repo_name string | path string | copies string | size string | content string | license string |
|---|---|---|---|---|---|
GalaxyTab4/bliss_kernel_samsung_s3ve3g | net/ipv6/netfilter/ip6t_rpfilter.c | 5054 | 3433 | /*
* Copyright (c) 2011 Florian Westphal <fw@strlen.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/route.h>
#include <net/ip6_fib.h>
#include <net/ip6_route.h>
#include <linux/netfilter/xt_rpfilter.h>
#include <linux/netfilter/x_tables.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Florian Westphal <fw@strlen.de>");
MODULE_DESCRIPTION("Xtables: IPv6 reverse path filter match");
static bool rpfilter_addr_unicast(const struct in6_addr *addr)
{
int addr_type = ipv6_addr_type(addr);
return addr_type & IPV6_ADDR_UNICAST;
}
static bool rpfilter_lookup_reverse6(const struct sk_buff *skb,
const struct net_device *dev, u8 flags)
{
struct rt6_info *rt;
struct ipv6hdr *iph = ipv6_hdr(skb);
bool ret = false;
struct flowi6 fl6 = {
.flowlabel = (* (__be32 *) iph) & IPV6_FLOWINFO_MASK,
.flowi6_proto = iph->nexthdr,
.daddr = iph->saddr,
};
int lookup_flags;
if (rpfilter_addr_unicast(&iph->daddr)) {
memcpy(&fl6.saddr, &iph->daddr, sizeof(struct in6_addr));
lookup_flags = RT6_LOOKUP_F_HAS_SADDR;
} else {
lookup_flags = 0;
}
fl6.flowi6_mark = flags & XT_RPFILTER_VALID_MARK ? skb->mark : 0;
if ((flags & XT_RPFILTER_LOOSE) == 0) {
fl6.flowi6_oif = dev->ifindex;
lookup_flags |= RT6_LOOKUP_F_IFACE;
}
rt = (void *) ip6_route_lookup(dev_net(dev), &fl6, lookup_flags);
if (rt->dst.error)
goto out;
if (rt->rt6i_flags & (RTF_REJECT|RTF_ANYCAST))
goto out;
if (rt->rt6i_flags & RTF_LOCAL) {
ret = flags & XT_RPFILTER_ACCEPT_LOCAL;
goto out;
}
if (rt->rt6i_idev->dev == dev || (flags & XT_RPFILTER_LOOSE))
ret = true;
out:
dst_release(&rt->dst);
return ret;
}
static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_rpfilter_info *info = par->matchinfo;
int saddrtype;
struct ipv6hdr *iph;
bool invert = info->flags & XT_RPFILTER_INVERT;
if (par->in->flags & IFF_LOOPBACK)
return true ^ invert;
iph = ipv6_hdr(skb);
saddrtype = ipv6_addr_type(&iph->saddr);
if (unlikely(saddrtype == IPV6_ADDR_ANY))
return true ^ invert; /* not routable: forward path will drop it */
return rpfilter_lookup_reverse6(skb, par->in, info->flags) ^ invert;
}
static int rpfilter_check(const struct xt_mtchk_param *par)
{
const struct xt_rpfilter_info *info = par->matchinfo;
unsigned int options = ~XT_RPFILTER_OPTION_MASK;
if (info->flags & options) {
pr_info("unknown options encountered");
return -EINVAL;
}
if (strcmp(par->table, "mangle") != 0 &&
strcmp(par->table, "raw") != 0) {
pr_info("match only valid in the \'raw\' "
"or \'mangle\' tables, not \'%s\'.\n", par->table);
return -EINVAL;
}
return 0;
}
static struct xt_match rpfilter_mt_reg __read_mostly = {
.name = "rpfilter",
.family = NFPROTO_IPV6,
.checkentry = rpfilter_check,
.match = rpfilter_mt,
.matchsize = sizeof(struct xt_rpfilter_info),
.hooks = (1 << NF_INET_PRE_ROUTING),
.me = THIS_MODULE
};
static int __init rpfilter_mt_init(void)
{
return xt_register_match(&rpfilter_mt_reg);
}
static void __exit rpfilter_mt_exit(void)
{
xt_unregister_match(&rpfilter_mt_reg);
}
module_init(rpfilter_mt_init);
module_exit(rpfilter_mt_exit);
| gpl-2.0 |
tarunkapadia93/android_kernel_xiaomi_cancro | drivers/staging/speakup/i18n.c | 7358 | 18885 | /* Internationalization implementation. Includes definitions of English
* string arrays, and the i18n pointer. */
#include <linux/slab.h> /* For kmalloc. */
#include <linux/ctype.h>
#include <linux/module.h>
#include <linux/string.h>
#include "speakup.h"
#include "spk_priv.h"
static char *speakup_msgs[MSG_LAST_INDEX];
static char *speakup_default_msgs[MSG_LAST_INDEX] = {
[MSG_BLANK] = "blank",
[MSG_IAM_ALIVE] = "I'm aLive!",
[MSG_YOU_KILLED_SPEAKUP] = "You killed speakup!",
[MSG_HEY_THATS_BETTER] = "hey. That's better!",
[MSG_YOU_TURNED_ME_OFF] = "You turned me off!",
[MSG_PARKED] = "parked!",
[MSG_UNPARKED] = "unparked!",
[MSG_MARK] = "mark",
[MSG_CUT] = "cut",
[MSG_MARK_CLEARED] = "mark, cleared",
[MSG_PASTE] = "paste",
[MSG_BRIGHT] = "bright",
[MSG_ON_BLINKING] = "on blinking",
[MSG_OFF] = "off",
[MSG_ON] = "on",
[MSG_NO_WINDOW] = "no window",
[MSG_CURSORING_OFF] = "cursoring off",
[MSG_CURSORING_ON] = "cursoring on",
[MSG_HIGHLIGHT_TRACKING] = "highlight tracking",
[MSG_READ_WINDOW] = "read windo",
[MSG_READ_ALL] = "read all",
[MSG_EDIT_DONE] = "edit done",
[MSG_WINDOW_ALREADY_SET] = "window already set, clear then reset",
[MSG_END_BEFORE_START] = "error end before start",
[MSG_WINDOW_CLEARED] = "window cleared",
[MSG_WINDOW_SILENCED] = "window silenced",
[MSG_WINDOW_SILENCE_DISABLED] = "window silence disabled",
[MSG_ERROR] = "error",
[MSG_GOTO_CANCELED] = "goto canceled",
[MSG_GOTO] = "go to?",
[MSG_LEAVING_HELP] = "leaving help",
[MSG_IS_UNASSIGNED] = "is unassigned",
[MSG_HELP_INFO] =
"press space to exit, up or down to scroll, or a letter to go to a command",
[MSG_EDGE_TOP] = "top,",
[MSG_EDGE_BOTTOM] = "bottom,",
[MSG_EDGE_LEFT] = "left,",
[MSG_EDGE_RIGHT] = "right,",
[MSG_NUMBER] = "number",
[MSG_SPACE] = "space",
[MSG_START] = "start",
[MSG_END] = "end",
[MSG_CTRL] = "control-",
[MSG_DISJUNCTION] = "or",
/* Messages with embedded format specifiers. */
[MSG_POS_INFO] = "line %ld, col %ld, t t y %d",
[MSG_CHAR_INFO] = "hex %02x, decimal %d",
[MSG_REPEAT_DESC] = "times %d .",
[MSG_REPEAT_DESC2] = "repeated %d .",
[MSG_WINDOW_LINE] = "window is line %d",
[MSG_WINDOW_BOUNDARY] = "%s at line %d, column %d",
[MSG_EDIT_PROMPT] = "edit %s, press space when done",
[MSG_NO_COMMAND] = "no commands for %c",
[MSG_KEYDESC] = "is %s",
/* Control keys. */
/* Most of these duplicate the entries in state names. */
[MSG_CTL_SHIFT] = "shift",
[MSG_CTL_ALTGR] = "altgr",
[MSG_CTL_CONTROL] = "control",
[MSG_CTL_ALT] = "ault",
[MSG_CTL_LSHIFT] = "l shift",
[MSG_CTL_SPEAKUP] = "speakup",
[MSG_CTL_LCONTROL] = "l control",
[MSG_CTL_RCONTROL] = "r control",
[MSG_CTL_CAPSSHIFT] = "caps shift",
/* Color names. */
[MSG_COLOR_BLACK] = "black",
[MSG_COLOR_BLUE] = "blue",
[MSG_COLOR_GREEN] = "green",
[MSG_COLOR_CYAN] = "cyan",
[MSG_COLOR_RED] = "red",
[MSG_COLOR_MAGENTA] = "magenta",
[MSG_COLOR_YELLOW] = "yellow",
[MSG_COLOR_WHITE] = "white",
[MSG_COLOR_GREY] = "grey",
/* Names of key states. */
[MSG_STATE_DOUBLE] = "double",
[MSG_STATE_SPEAKUP] = "speakup",
[MSG_STATE_ALT] = "alt",
[MSG_STATE_CONTROL] = "ctrl",
[MSG_STATE_ALTGR] = "altgr",
[MSG_STATE_SHIFT] = "shift",
/* Key names. */
[MSG_KEYNAME_ESC] = "escape",
[MSG_KEYNAME_1] = "1",
[MSG_KEYNAME_2] = "2",
[MSG_KEYNAME_3] = "3",
[MSG_KEYNAME_4] = "4",
[MSG_KEYNAME_5] = "5",
[MSG_KEYNAME_6] = "6",
[MSG_KEYNAME_7] = "7",
[MSG_KEYNAME_8] = "8",
[MSG_KEYNAME_9] = "9",
[MSG_KEYNAME_0] = "0",
[MSG_KEYNAME_DASH] = "minus",
[MSG_KEYNAME_EQUAL] = "equal",
[MSG_KEYNAME_BS] = "back space",
[MSG_KEYNAME_TAB] = "tab",
[MSG_KEYNAME_Q] = "q",
[MSG_KEYNAME_W] = "w",
[MSG_KEYNAME_E] = "e",
[MSG_KEYNAME_R] = "r",
[MSG_KEYNAME_T] = "t",
[MSG_KEYNAME_Y] = "y",
[MSG_KEYNAME_U] = "u",
[MSG_KEYNAME_I] = "i",
[MSG_KEYNAME_O] = "o",
[MSG_KEYNAME_P] = "p",
[MSG_KEYNAME_LEFTBRACE] = "left brace",
[MSG_KEYNAME_RIGHTBRACE] = "right brace",
[MSG_KEYNAME_ENTER] = "enter",
[MSG_KEYNAME_LEFTCTRL] = "left control",
[MSG_KEYNAME_A] = "a",
[MSG_KEYNAME_S] = "s",
[MSG_KEYNAME_D] = "d",
[MSG_KEYNAME_F] = "f",
[MSG_KEYNAME_G] = "g",
[MSG_KEYNAME_H] = "h",
[MSG_KEYNAME_J] = "j",
[MSG_KEYNAME_K] = "k",
[MSG_KEYNAME_L] = "l",
[MSG_KEYNAME_SEMICOLON] = "semicolon",
[MSG_KEYNAME_SINGLEQUOTE] = "apostrophe",
[MSG_KEYNAME_GRAVE] = "accent",
[MSG_KEYNAME_LEFTSHFT] = "left shift",
[MSG_KEYNAME_BACKSLASH] = "back slash",
[MSG_KEYNAME_Z] = "z",
[MSG_KEYNAME_X] = "x",
[MSG_KEYNAME_C] = "c",
[MSG_KEYNAME_V] = "v",
[MSG_KEYNAME_B] = "b",
[MSG_KEYNAME_N] = "n",
[MSG_KEYNAME_M] = "m",
[MSG_KEYNAME_COMMA] = "comma",
[MSG_KEYNAME_DOT] = "dot",
[MSG_KEYNAME_SLASH] = "slash",
[MSG_KEYNAME_RIGHTSHFT] = "right shift",
[MSG_KEYNAME_KPSTAR] = "keypad asterisk",
[MSG_KEYNAME_LEFTALT] = "left alt",
[MSG_KEYNAME_SPACE] = "space",
[MSG_KEYNAME_CAPSLOCK] = "caps lock",
[MSG_KEYNAME_F1] = "f1",
[MSG_KEYNAME_F2] = "f2",
[MSG_KEYNAME_F3] = "f3",
[MSG_KEYNAME_F4] = "f4",
[MSG_KEYNAME_F5] = "f5",
[MSG_KEYNAME_F6] = "f6",
[MSG_KEYNAME_F7] = "f7",
[MSG_KEYNAME_F8] = "f8",
[MSG_KEYNAME_F9] = "f9",
[MSG_KEYNAME_F10] = "f10",
[MSG_KEYNAME_NUMLOCK] = "num lock",
[MSG_KEYNAME_SCROLLLOCK] = "scroll lock",
[MSG_KEYNAME_KP7] = "keypad 7",
[MSG_KEYNAME_KP8] = "keypad 8",
[MSG_KEYNAME_KP9] = "keypad 9",
[MSG_KEYNAME_KPMINUS] = "keypad minus",
[MSG_KEYNAME_KP4] = "keypad 4",
[MSG_KEYNAME_KP5] = "keypad 5",
[MSG_KEYNAME_KP6] = "keypad 6",
[MSG_KEYNAME_KPPLUS] = "keypad plus",
[MSG_KEYNAME_KP1] = "keypad 1",
[MSG_KEYNAME_KP2] = "keypad 2",
[MSG_KEYNAME_KP3] = "keypad 3",
[MSG_KEYNAME_KP0] = "keypad 0",
[MSG_KEYNAME_KPDOT] = "keypad dot",
[MSG_KEYNAME_103RD] = "103rd",
[MSG_KEYNAME_F13] = "f13",
[MSG_KEYNAME_102ND] = "102nd",
[MSG_KEYNAME_F11] = "f11",
[MSG_KEYNAME_F12] = "f12",
[MSG_KEYNAME_F14] = "f14",
[MSG_KEYNAME_F15] = "f15",
[MSG_KEYNAME_F16] = "f16",
[MSG_KEYNAME_F17] = "f17",
[MSG_KEYNAME_F18] = "f18",
[MSG_KEYNAME_F19] = "f19",
[MSG_KEYNAME_F20] = "f20",
[MSG_KEYNAME_KPENTER] = "keypad enter",
[MSG_KEYNAME_RIGHTCTRL] = "right control",
[MSG_KEYNAME_KPSLASH] = "keypad slash",
[MSG_KEYNAME_SYSRQ] = "sysrq",
[MSG_KEYNAME_RIGHTALT] = "right alt",
[MSG_KEYNAME_LF] = "line feed",
[MSG_KEYNAME_HOME] = "home",
[MSG_KEYNAME_UP] = "up",
[MSG_KEYNAME_PGUP] = "page up",
[MSG_KEYNAME_LEFT] = "left",
[MSG_KEYNAME_RIGHT] = "right",
[MSG_KEYNAME_END] = "end",
[MSG_KEYNAME_DOWN] = "down",
[MSG_KEYNAME_PGDN] = "page down",
[MSG_KEYNAME_INS] = "insert",
[MSG_KEYNAME_DEL] = "delete",
[MSG_KEYNAME_MACRO] = "macro",
[MSG_KEYNAME_MUTE] = "mute",
[MSG_KEYNAME_VOLDOWN] = "volume down",
[MSG_KEYNAME_VOLUP] = "volume up",
[MSG_KEYNAME_POWER] = "power",
[MSG_KEYNAME_KPEQUAL] = "keypad equal",
[MSG_KEYNAME_KPPLUSDASH] = "keypad plusminus",
[MSG_KEYNAME_PAUSE] = "pause",
[MSG_KEYNAME_F21] = "f21",
[MSG_KEYNAME_F22] = "f22",
[MSG_KEYNAME_F23] = "f23",
[MSG_KEYNAME_F24] = "f24",
[MSG_KEYNAME_KPCOMMA] = "keypad comma",
[MSG_KEYNAME_LEFTMETA] = "left meta",
[MSG_KEYNAME_RIGHTMETA] = "right meta",
[MSG_KEYNAME_COMPOSE] = "compose",
[MSG_KEYNAME_STOP] = "stop",
[MSG_KEYNAME_AGAIN] = "again",
[MSG_KEYNAME_PROPS] = "props",
[MSG_KEYNAME_UNDO] = "undo",
[MSG_KEYNAME_FRONT] = "front",
[MSG_KEYNAME_COPY] = "copy",
[MSG_KEYNAME_OPEN] = "open",
[MSG_KEYNAME_PASTE] = "paste",
[MSG_KEYNAME_FIND] = "find",
[MSG_KEYNAME_CUT] = "cut",
[MSG_KEYNAME_HELP] = "help",
[MSG_KEYNAME_MENU] = "menu",
[MSG_KEYNAME_CALC] = "calc",
[MSG_KEYNAME_SETUP] = "setup",
[MSG_KEYNAME_SLEEP] = "sleep",
[MSG_KEYNAME_WAKEUP] = "wakeup",
[MSG_KEYNAME_FILE] = "file",
[MSG_KEYNAME_SENDFILE] = "send file",
[MSG_KEYNAME_DELFILE] = "delete file",
[MSG_KEYNAME_XFER] = "transfer",
[MSG_KEYNAME_PROG1] = "prog1",
[MSG_KEYNAME_PROG2] = "prog2",
[MSG_KEYNAME_WWW] = "www",
[MSG_KEYNAME_MSDOS] = "msdos",
[MSG_KEYNAME_COFFEE] = "coffee",
[MSG_KEYNAME_DIRECTION] = "direction",
[MSG_KEYNAME_CYCLEWINDOWS] = "cycle windows",
[MSG_KEYNAME_MAIL] = "mail",
[MSG_KEYNAME_BOOKMARKS] = "bookmarks",
[MSG_KEYNAME_COMPUTER] = "computer",
[MSG_KEYNAME_BACK] = "back",
[MSG_KEYNAME_FORWARD] = "forward",
[MSG_KEYNAME_CLOSECD] = "close cd",
[MSG_KEYNAME_EJECTCD] = "eject cd",
[MSG_KEYNAME_EJECTCLOSE] = "eject close cd",
[MSG_KEYNAME_NEXTSONG] = "next song",
[MSG_KEYNAME_PLAYPAUSE] = "play pause",
[MSG_KEYNAME_PREVSONG] = "previous song",
[MSG_KEYNAME_STOPCD] = "stop cd",
[MSG_KEYNAME_RECORD] = "record",
[MSG_KEYNAME_REWIND] = "rewind",
[MSG_KEYNAME_PHONE] = "phone",
[MSG_KEYNAME_ISO] = "iso",
[MSG_KEYNAME_CONFIG] = "config",
[MSG_KEYNAME_HOMEPG] = "home page",
[MSG_KEYNAME_REFRESH] = "refresh",
[MSG_KEYNAME_EXIT] = "exit",
[MSG_KEYNAME_MOVE] = "move",
[MSG_KEYNAME_EDIT] = "edit",
[MSG_KEYNAME_SCROLLUP] = "scroll up",
[MSG_KEYNAME_SCROLLDN] = "scroll down",
[MSG_KEYNAME_KPLEFTPAR] = "keypad left paren",
[MSG_KEYNAME_KPRIGHTPAR] = "keypad right paren",
/* Function names. */
[MSG_FUNCNAME_ATTRIB_BLEEP_DEC] = "attribute bleep decrement",
[MSG_FUNCNAME_ATTRIB_BLEEP_INC] = "attribute bleep increment",
[MSG_FUNCNAME_BLEEPS_DEC] = "bleeps decrement",
[MSG_FUNCNAME_BLEEPS_INC] = "bleeps increment",
[MSG_FUNCNAME_CHAR_FIRST] = "character, first",
[MSG_FUNCNAME_CHAR_LAST] = "character, last",
[MSG_FUNCNAME_CHAR_CURRENT] = "character, say current",
[MSG_FUNCNAME_CHAR_HEX_AND_DEC] = "character, say hex and decimal",
[MSG_FUNCNAME_CHAR_NEXT] = "character, say next",
[MSG_FUNCNAME_CHAR_PHONETIC] = "character, say phonetic",
[MSG_FUNCNAME_CHAR_PREVIOUS] = "character, say previous",
[MSG_FUNCNAME_CURSOR_PARK] = "cursor park",
[MSG_FUNCNAME_CUT] = "cut",
[MSG_FUNCNAME_EDIT_DELIM] = "edit delimiters",
[MSG_FUNCNAME_EDIT_EXNUM] = "edit exnum",
[MSG_FUNCNAME_EDIT_MOST] = "edit most",
[MSG_FUNCNAME_EDIT_REPEATS] = "edit repeats",
[MSG_FUNCNAME_EDIT_SOME] = "edit some",
[MSG_FUNCNAME_GOTO] = "go to",
[MSG_FUNCNAME_GOTO_BOTTOM] = "go to bottom edge",
[MSG_FUNCNAME_GOTO_LEFT] = "go to left edge",
[MSG_FUNCNAME_GOTO_RIGHT] = "go to right edge",
[MSG_FUNCNAME_GOTO_TOP] = "go to top edge",
[MSG_FUNCNAME_HELP] = "help",
[MSG_FUNCNAME_LINE_SAY_CURRENT] = "line, say current",
[MSG_FUNCNAME_LINE_SAY_NEXT] = "line, say next",
[MSG_FUNCNAME_LINE_SAY_PREVIOUS] = "line, say previous",
[MSG_FUNCNAME_LINE_SAY_WITH_INDENT] = "line, say with indent",
[MSG_FUNCNAME_PASTE] = "paste",
[MSG_FUNCNAME_PITCH_DEC] = "pitch decrement",
[MSG_FUNCNAME_PITCH_INC] = "pitch increment",
[MSG_FUNCNAME_PUNC_DEC] = "punctuation decrement",
[MSG_FUNCNAME_PUNC_INC] = "punctuation increment",
[MSG_FUNCNAME_PUNC_LEVEL_DEC] = "punc level decrement",
[MSG_FUNCNAME_PUNC_LEVEL_INC] = "punc level increment",
[MSG_FUNCNAME_QUIET] = "quiet",
[MSG_FUNCNAME_RATE_DEC] = "rate decrement",
[MSG_FUNCNAME_RATE_INC] = "rate increment",
[MSG_FUNCNAME_READING_PUNC_DEC] = "reading punctuation decrement",
[MSG_FUNCNAME_READING_PUNC_INC] = "reading punctuation increment",
[MSG_FUNCNAME_SAY_ATTRIBUTES] = "say attributes",
[MSG_FUNCNAME_SAY_FROM_LEFT] = "say from left",
[MSG_FUNCNAME_SAY_FROM_TOP] = "say from top",
[MSG_FUNCNAME_SAY_POSITION] = "say position",
[MSG_FUNCNAME_SAY_SCREEN] = "say screen",
[MSG_FUNCNAME_SAY_TO_BOTTOM] = "say to bottom",
[MSG_FUNCNAME_SAY_TO_RIGHT] = "say to right",
[MSG_FUNCNAME_SPEAKUP] = "speakup",
[MSG_FUNCNAME_SPEAKUP_LOCK] = "speakup lock",
[MSG_FUNCNAME_SPEAKUP_OFF] = "speakup off",
[MSG_FUNCNAME_SPEECH_KILL] = "speech kill",
[MSG_FUNCNAME_SPELL_DELAY_DEC] = "spell delay decrement",
[MSG_FUNCNAME_SPELL_DELAY_INC] = "spell delay increment",
[MSG_FUNCNAME_SPELL_WORD] = "spell word",
[MSG_FUNCNAME_SPELL_WORD_PHONETICALLY] = "spell word phoneticly",
[MSG_FUNCNAME_TONE_DEC] = "tone decrement",
[MSG_FUNCNAME_TONE_INC] = "tone increment",
[MSG_FUNCNAME_VOICE_DEC] = "voice decrement",
[MSG_FUNCNAME_VOICE_INC] = "voice increment",
[MSG_FUNCNAME_VOLUME_DEC] = "volume decrement",
[MSG_FUNCNAME_VOLUME_INC] = "volume increment",
[MSG_FUNCNAME_WINDOW_CLEAR] = "window, clear",
[MSG_FUNCNAME_WINDOW_SAY] = "window, say",
[MSG_FUNCNAME_WINDOW_SET] = "window, set",
[MSG_FUNCNAME_WINDOW_SILENCE] = "window, silence",
[MSG_FUNCNAME_WORD_SAY_CURRENT] = "word, say current",
[MSG_FUNCNAME_WORD_SAY_NEXT] = "word, say next",
[MSG_FUNCNAME_WORD_SAY_PREVIOUS] = "word, say previous",
};
static struct msg_group_t all_groups[] = {
{
.name = "ctl_keys",
.start = MSG_CTL_START,
.end = MSG_CTL_END,
},
{
.name = "colors",
.start = MSG_COLORS_START,
.end = MSG_COLORS_END,
},
{
.name = "formatted",
.start = MSG_FORMATTED_START,
.end = MSG_FORMATTED_END,
},
{
.name = "function_names",
.start = MSG_FUNCNAMES_START,
.end = MSG_FUNCNAMES_END,
},
{
.name = "key_names",
.start = MSG_KEYNAMES_START,
.end = MSG_KEYNAMES_END,
},
{
.name = "announcements",
.start = MSG_ANNOUNCEMENTS_START,
.end = MSG_ANNOUNCEMENTS_END,
},
{
.name = "states",
.start = MSG_STATES_START,
.end = MSG_STATES_END,
},
};
static const int num_groups = sizeof(all_groups) / sizeof(struct msg_group_t);
char *msg_get(enum msg_index_t index)
{
char *ch;
ch = speakup_msgs[index];
return ch;
}
/*
* Function: next_specifier
* Finds the start of the next format specifier in the argument string.
* Return value: pointer to start of format
* specifier, or NULL if no specifier exists.
*/
static char *next_specifier(char *input)
{
int found = 0;
char *next_percent = input;
while ((next_percent != NULL) && !found) {
next_percent = strchr(next_percent, '%');
if (next_percent != NULL) {
/* skip over doubled percent signs */
while ((next_percent[0] == '%')
&& (next_percent[1] == '%'))
next_percent += 2;
if (*next_percent == '%')
found = 1;
else if (*next_percent == '\0')
next_percent = NULL;
}
}
return next_percent;
}
/* Skip over 0 or more flags. */
static char *skip_flags(char *input)
{
while ((*input != '\0') && strchr(" 0+-#", *input))
input++;
return input;
}
/* Skip over width.precision, if it exists. */
static char *skip_width(char *input)
{
while (isdigit(*input))
input++;
if (*input == '.') {
input++;
while (isdigit(*input))
input++;
}
return input;
}
/*
* Skip past the end of the conversion part.
* Note that this code only accepts a handful of conversion specifiers:
* c d s x and ld. Not accidental; these are exactly the ones used in
* the default group of formatted messages.
*/
static char *skip_conversion(char *input)
{
if ((input[0] == 'l') && (input[1] == 'd'))
input += 2;
else if ((*input != '\0') && strchr("cdsx", *input))
input++;
return input;
}
/*
* Function: find_specifier_end
* Return a pointer to the end of the format specifier.
*/
static char *find_specifier_end(char *input)
{
input++; /* Advance over %. */
input = skip_flags(input);
input = skip_width(input);
input = skip_conversion(input);
return input;
}
/*
* Function: compare_specifiers
* Compare the format specifiers pointed to by *input1 and *input2.
* Return 1 if they are the same, 0 otherwise. Advance *input1 and *input2
* so that they point to the character following the end of the specifier.
*/
static int compare_specifiers(char **input1, char **input2)
{
int same = 0;
char *end1 = find_specifier_end(*input1);
char *end2 = find_specifier_end(*input2);
size_t length1 = end1 - *input1;
size_t length2 = end2 - *input2;
if ((length1 == length2) && !memcmp(*input1, *input2, length1))
same = 1;
*input1 = end1;
*input2 = end2;
return same;
}
/*
* Function: fmt_validate
* Check that two format strings contain the same number of format specifiers,
* and that the order of specifiers is the same in both strings.
* Return 1 if the condition holds, 0 if it doesn't.
*/
static int fmt_validate(char *template, char *user)
{
int valid = 1;
int still_comparing = 1;
char *template_ptr = template;
char *user_ptr = user;
while (still_comparing && valid) {
template_ptr = next_specifier(template_ptr);
user_ptr = next_specifier(user_ptr);
if (template_ptr && user_ptr) {
/* Both have at least one more specifier. */
valid = compare_specifiers(&template_ptr, &user_ptr);
} else {
/* No more format specifiers in one or both strings. */
still_comparing = 0;
/* See if one has more specifiers than the other. */
if (template_ptr || user_ptr)
valid = 0;
}
}
return valid;
}
/*
* Function: msg_set
* Description: Add a user-supplied message to the user_messages array.
* The message text is copied to a memory area allocated with kmalloc.
* If the function fails, then user_messages is untouched.
* Arguments:
* - index: a message number, as found in i18n.h.
* - text: text of message. Not NUL-terminated.
* - length: number of bytes in text.
* Failure conditions:
* -EINVAL - Invalid format specifiers in formatted message or illegal index.
* -ENOMEM - Unable to allocate memory.
*/
ssize_t msg_set(enum msg_index_t index, char *text, size_t length)
{
int rc = 0;
char *newstr = NULL;
unsigned long flags;
if ((index >= MSG_FIRST_INDEX) && (index < MSG_LAST_INDEX)) {
newstr = kmalloc(length + 1, GFP_KERNEL);
if (newstr) {
memcpy(newstr, text, length);
newstr[length] = '\0';
if ((index >= MSG_FORMATTED_START
&& index <= MSG_FORMATTED_END)
&& !fmt_validate(speakup_default_msgs[index],
newstr)) {
return -EINVAL;
}
spk_lock(flags);
if (speakup_msgs[index] != speakup_default_msgs[index])
kfree(speakup_msgs[index]);
speakup_msgs[index] = newstr;
spk_unlock(flags);
} else {
rc = -ENOMEM;
}
} else {
rc = -EINVAL;
}
return rc;
}
/*
* Find a message group, given its name. Return a pointer to the structure
* if found, or NULL otherwise.
*/
struct msg_group_t *find_msg_group(const char *group_name)
{
struct msg_group_t *group = NULL;
int i;
for (i = 0; i < num_groups; i++) {
if (!strcmp(all_groups[i].name, group_name)) {
group = &all_groups[i];
break;
}
}
return group;
}
void reset_msg_group(struct msg_group_t *group)
{
unsigned long flags;
enum msg_index_t i;
spk_lock(flags);
for (i = group->start; i <= group->end; i++) {
if (speakup_msgs[i] != speakup_default_msgs[i])
kfree(speakup_msgs[i]);
speakup_msgs[i] = speakup_default_msgs[i];
}
spk_unlock(flags);
}
/* Called at initialization time, to establish default messages. */
void initialize_msgs(void)
{
memcpy(speakup_msgs, speakup_default_msgs,
sizeof(speakup_default_msgs));
}
/* Free user-supplied strings when module is unloaded: */
void free_user_msgs(void)
{
enum msg_index_t index;
unsigned long flags;
spk_lock(flags);
for (index = MSG_FIRST_INDEX; index < MSG_LAST_INDEX; index++) {
if (speakup_msgs[index] != speakup_default_msgs[index]) {
kfree(speakup_msgs[index]);
speakup_msgs[index] = speakup_default_msgs[index];
}
}
spk_unlock(flags);
}
| gpl-2.0 |
ManhIT-CMB/SpaceX-Kernel-Exynos7420 | sound/pci/echoaudio/darla24_dsp.c | 12478 | 3872 | /***************************************************************************
Copyright Echo Digital Audio Corporation (c) 1998 - 2004
All rights reserved
www.echoaudio.com
This file is part of Echo Digital Audio's generic driver library.
Echo Digital Audio's generic driver library is free software;
you can redistribute it and/or modify it under the terms of
the GNU General Public License as published by the Free Software
Foundation.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place - Suite 330, Boston,
MA 02111-1307, USA.
*************************************************************************
Translation from C++ and adaptation for use in ALSA-Driver
were made by Giuliano Pochini <pochini@shiny.it>
****************************************************************************/
static int init_hw(struct echoaudio *chip, u16 device_id, u16 subdevice_id)
{
int err;
DE_INIT(("init_hw() - Darla24\n"));
if (snd_BUG_ON((subdevice_id & 0xfff0) != DARLA24))
return -ENODEV;
if ((err = init_dsp_comm_page(chip))) {
DE_INIT(("init_hw - could not initialize DSP comm page\n"));
return err;
}
chip->device_id = device_id;
chip->subdevice_id = subdevice_id;
chip->bad_board = TRUE;
chip->dsp_code_to_load = FW_DARLA24_DSP;
/* Since this card has no ASIC, mark it as loaded so everything
works OK */
chip->asic_loaded = TRUE;
chip->input_clock_types = ECHO_CLOCK_BIT_INTERNAL |
ECHO_CLOCK_BIT_ESYNC;
if ((err = load_firmware(chip)) < 0)
return err;
chip->bad_board = FALSE;
DE_INIT(("init_hw done\n"));
return err;
}
static int set_mixer_defaults(struct echoaudio *chip)
{
return init_line_levels(chip);
}
static u32 detect_input_clocks(const struct echoaudio *chip)
{
u32 clocks_from_dsp, clock_bits;
/* Map the DSP clock detect bits to the generic driver clock
detect bits */
clocks_from_dsp = le32_to_cpu(chip->comm_page->status_clocks);
clock_bits = ECHO_CLOCK_BIT_INTERNAL;
if (clocks_from_dsp & GLDM_CLOCK_DETECT_BIT_ESYNC)
clock_bits |= ECHO_CLOCK_BIT_ESYNC;
return clock_bits;
}
/* The Darla24 has no ASIC. Just do nothing */
static int load_asic(struct echoaudio *chip)
{
return 0;
}
static int set_sample_rate(struct echoaudio *chip, u32 rate)
{
u8 clock;
switch (rate) {
case 96000:
clock = GD24_96000;
break;
case 88200:
clock = GD24_88200;
break;
case 48000:
clock = GD24_48000;
break;
case 44100:
clock = GD24_44100;
break;
case 32000:
clock = GD24_32000;
break;
case 22050:
clock = GD24_22050;
break;
case 16000:
clock = GD24_16000;
break;
case 11025:
clock = GD24_11025;
break;
case 8000:
clock = GD24_8000;
break;
default:
DE_ACT(("set_sample_rate: Error, invalid sample rate %d\n",
rate));
return -EINVAL;
}
if (wait_handshake(chip))
return -EIO;
DE_ACT(("set_sample_rate: %d clock %d\n", rate, clock));
chip->sample_rate = rate;
/* Override the sample rate if this card is set to Echo sync. */
if (chip->input_clock == ECHO_CLOCK_ESYNC)
clock = GD24_EXT_SYNC;
chip->comm_page->sample_rate = cpu_to_le32(rate); /* ignored by the DSP ? */
chip->comm_page->gd_clock_state = clock;
clear_handshake(chip);
return send_vector(chip, DSP_VC_SET_GD_AUDIO_STATE);
}
static int set_input_clock(struct echoaudio *chip, u16 clock)
{
if (snd_BUG_ON(clock != ECHO_CLOCK_INTERNAL &&
clock != ECHO_CLOCK_ESYNC))
return -EINVAL;
chip->input_clock = clock;
return set_sample_rate(chip, chip->sample_rate);
}
| gpl-2.0 |
guillermocorrea/wcf-wssecurity | PruebaDeceval/Infraestructure/openssl-1.0.1e/openssl-1.0.1e/test/igetest.c | 191 | 16986 | /* test/igetest.c -*- mode:C; c-file-style: "eay" -*- */
/* ====================================================================
* Copyright (c) 2006 The OpenSSL Project. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* 3. All advertising materials mentioning features or use of this
* software must display the following acknowledgment:
* "This product includes software developed by the OpenSSL Project
* for use in the OpenSSL Toolkit. (http://www.openssl.org/)"
*
* 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
* endorse or promote products derived from this software without
* prior written permission. For written permission, please contact
* openssl-core@openssl.org.
*
* 5. Products derived from this software may not be called "OpenSSL"
* nor may "OpenSSL" appear in their names without prior written
* permission of the OpenSSL Project.
*
* 6. Redistributions of any form whatsoever must retain the following
* acknowledgment:
* "This product includes software developed by the OpenSSL Project
* for use in the OpenSSL Toolkit (http://www.openssl.org/)"
*
* THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
* EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR
* ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
* OF THE POSSIBILITY OF SUCH DAMAGE.
* ====================================================================
*
*/
#include <openssl/aes.h>
#include <openssl/rand.h>
#include <stdio.h>
#include <string.h>
#include <assert.h>
#define TEST_SIZE 128
#define BIG_TEST_SIZE 10240
static void hexdump(FILE *f,const char *title,const unsigned char *s,int l)
{
int n=0;
fprintf(f,"%s",title);
for( ; n < l ; ++n)
{
if((n%16) == 0)
fprintf(f,"\n%04x",n);
fprintf(f," %02x",s[n]);
}
fprintf(f,"\n");
}
#define MAX_VECTOR_SIZE 64
struct ige_test
{
const unsigned char key[16];
const unsigned char iv[32];
const unsigned char in[MAX_VECTOR_SIZE];
const unsigned char out[MAX_VECTOR_SIZE];
const size_t length;
const int encrypt;
};
static struct ige_test const ige_test_vectors[] = {
{ { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f }, /* key */
{ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f }, /* iv */
{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, /* in */
{ 0x1a, 0x85, 0x19, 0xa6, 0x55, 0x7b, 0xe6, 0x52,
0xe9, 0xda, 0x8e, 0x43, 0xda, 0x4e, 0xf4, 0x45,
0x3c, 0xf4, 0x56, 0xb4, 0xca, 0x48, 0x8a, 0xa3,
0x83, 0xc7, 0x9c, 0x98, 0xb3, 0x47, 0x97, 0xcb }, /* out */
32, AES_ENCRYPT }, /* test vector 0 */
{ { 0x54, 0x68, 0x69, 0x73, 0x20, 0x69, 0x73, 0x20,
0x61, 0x6e, 0x20, 0x69, 0x6d, 0x70, 0x6c, 0x65 }, /* key */
{ 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f,
0x6e, 0x20, 0x6f, 0x66, 0x20, 0x49, 0x47, 0x45,
0x20, 0x6d, 0x6f, 0x64, 0x65, 0x20, 0x66, 0x6f,
0x72, 0x20, 0x4f, 0x70, 0x65, 0x6e, 0x53, 0x53 }, /* iv */
{ 0x4c, 0x2e, 0x20, 0x4c, 0x65, 0x74, 0x27, 0x73,
0x20, 0x68, 0x6f, 0x70, 0x65, 0x20, 0x42, 0x65,
0x6e, 0x20, 0x67, 0x6f, 0x74, 0x20, 0x69, 0x74,
0x20, 0x72, 0x69, 0x67, 0x68, 0x74, 0x21, 0x0a }, /* in */
{ 0x99, 0x70, 0x64, 0x87, 0xa1, 0xcd, 0xe6, 0x13,
0xbc, 0x6d, 0xe0, 0xb6, 0xf2, 0x4b, 0x1c, 0x7a,
0xa4, 0x48, 0xc8, 0xb9, 0xc3, 0x40, 0x3e, 0x34,
0x67, 0xa8, 0xca, 0xd8, 0x93, 0x40, 0xf5, 0x3b }, /* out */
32, AES_DECRYPT }, /* test vector 1 */
};
struct bi_ige_test
{
const unsigned char key1[32];
const unsigned char key2[32];
const unsigned char iv[64];
const unsigned char in[MAX_VECTOR_SIZE];
const unsigned char out[MAX_VECTOR_SIZE];
const size_t keysize;
const size_t length;
const int encrypt;
};
static struct bi_ige_test const bi_ige_test_vectors[] = {
{ { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f }, /* key1 */
{ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f }, /* key2 */
{ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f }, /* iv */
{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, /* in */
{ 0x14, 0x40, 0x6f, 0xae, 0xa2, 0x79, 0xf2, 0x56,
0x1f, 0x86, 0xeb, 0x3b, 0x7d, 0xff, 0x53, 0xdc,
0x4e, 0x27, 0x0c, 0x03, 0xde, 0x7c, 0xe5, 0x16,
0x6a, 0x9c, 0x20, 0x33, 0x9d, 0x33, 0xfe, 0x12 }, /* out */
16, 32, AES_ENCRYPT }, /* test vector 0 */
{ { 0x58, 0x0a, 0x06, 0xe9, 0x97, 0x07, 0x59, 0x5c,
0x9e, 0x19, 0xd2, 0xa7, 0xbb, 0x40, 0x2b, 0x7a,
0xc7, 0xd8, 0x11, 0x9e, 0x4c, 0x51, 0x35, 0x75,
0x64, 0x28, 0x0f, 0x23, 0xad, 0x74, 0xac, 0x37 }, /* key1 */
{ 0xd1, 0x80, 0xa0, 0x31, 0x47, 0xa3, 0x11, 0x13,
0x86, 0x26, 0x9e, 0x6d, 0xff, 0xaf, 0x72, 0x74,
0x5b, 0xa2, 0x35, 0x81, 0xd2, 0xa6, 0x3d, 0x21,
0x67, 0x7b, 0x58, 0xa8, 0x18, 0xf9, 0x72, 0xe4 }, /* key2 */
{ 0x80, 0x3d, 0xbd, 0x4c, 0xe6, 0x7b, 0x06, 0xa9,
0x53, 0x35, 0xd5, 0x7e, 0x71, 0xc1, 0x70, 0x70,
0x74, 0x9a, 0x00, 0x28, 0x0c, 0xbf, 0x6c, 0x42,
0x9b, 0xa4, 0xdd, 0x65, 0x11, 0x77, 0x7c, 0x67,
0xfe, 0x76, 0x0a, 0xf0, 0xd5, 0xc6, 0x6e, 0x6a,
0xe7, 0x5e, 0x4c, 0xf2, 0x7e, 0x9e, 0xf9, 0x20,
0x0e, 0x54, 0x6f, 0x2d, 0x8a, 0x8d, 0x7e, 0xbd,
0x48, 0x79, 0x37, 0x99, 0xff, 0x27, 0x93, 0xa3 }, /* iv */
{ 0xf1, 0x54, 0x3d, 0xca, 0xfe, 0xb5, 0xef, 0x1c,
0x4f, 0xa6, 0x43, 0xf6, 0xe6, 0x48, 0x57, 0xf0,
0xee, 0x15, 0x7f, 0xe3, 0xe7, 0x2f, 0xd0, 0x2f,
0x11, 0x95, 0x7a, 0x17, 0x00, 0xab, 0xa7, 0x0b,
0xbe, 0x44, 0x09, 0x9c, 0xcd, 0xac, 0xa8, 0x52,
0xa1, 0x8e, 0x7b, 0x75, 0xbc, 0xa4, 0x92, 0x5a,
0xab, 0x46, 0xd3, 0x3a, 0xa0, 0xd5, 0x35, 0x1c,
0x55, 0xa4, 0xb3, 0xa8, 0x40, 0x81, 0xa5, 0x0b}, /* in */
{ 0x42, 0xe5, 0x28, 0x30, 0x31, 0xc2, 0xa0, 0x23,
0x68, 0x49, 0x4e, 0xb3, 0x24, 0x59, 0x92, 0x79,
0xc1, 0xa5, 0xcc, 0xe6, 0x76, 0x53, 0xb1, 0xcf,
0x20, 0x86, 0x23, 0xe8, 0x72, 0x55, 0x99, 0x92,
0x0d, 0x16, 0x1c, 0x5a, 0x2f, 0xce, 0xcb, 0x51,
0xe2, 0x67, 0xfa, 0x10, 0xec, 0xcd, 0x3d, 0x67,
0xa5, 0xe6, 0xf7, 0x31, 0x26, 0xb0, 0x0d, 0x76,
0x5e, 0x28, 0xdc, 0x7f, 0x01, 0xc5, 0xa5, 0x4c}, /* out */
32, 64, AES_ENCRYPT }, /* test vector 1 */
};
static int run_test_vectors(void)
{
unsigned int n;
int errs = 0;
for(n=0 ; n < sizeof(ige_test_vectors)/sizeof(ige_test_vectors[0]) ; ++n)
{
const struct ige_test * const v = &ige_test_vectors[n];
AES_KEY key;
unsigned char buf[MAX_VECTOR_SIZE];
unsigned char iv[AES_BLOCK_SIZE*2];
assert(v->length <= MAX_VECTOR_SIZE);
if(v->encrypt == AES_ENCRYPT)
AES_set_encrypt_key(v->key, 8*sizeof v->key, &key);
else
AES_set_decrypt_key(v->key, 8*sizeof v->key, &key);
memcpy(iv, v->iv, sizeof iv);
AES_ige_encrypt(v->in, buf, v->length, &key, iv, v->encrypt);
if(memcmp(v->out, buf, v->length))
{
printf("IGE test vector %d failed\n", n);
hexdump(stdout, "key", v->key, sizeof v->key);
hexdump(stdout, "iv", v->iv, sizeof v->iv);
hexdump(stdout, "in", v->in, v->length);
hexdump(stdout, "expected", v->out, v->length);
hexdump(stdout, "got", buf, v->length);
++errs;
}
/* try with in == out */
memcpy(iv, v->iv, sizeof iv);
memcpy(buf, v->in, v->length);
AES_ige_encrypt(buf, buf, v->length, &key, iv, v->encrypt);
if(memcmp(v->out, buf, v->length))
{
printf("IGE test vector %d failed (with in == out)\n", n);
hexdump(stdout, "key", v->key, sizeof v->key);
hexdump(stdout, "iv", v->iv, sizeof v->iv);
hexdump(stdout, "in", v->in, v->length);
hexdump(stdout, "expected", v->out, v->length);
hexdump(stdout, "got", buf, v->length);
++errs;
}
}
for(n=0 ; n < sizeof(bi_ige_test_vectors)/sizeof(bi_ige_test_vectors[0])
; ++n)
{
const struct bi_ige_test * const v = &bi_ige_test_vectors[n];
AES_KEY key1;
AES_KEY key2;
unsigned char buf[MAX_VECTOR_SIZE];
assert(v->length <= MAX_VECTOR_SIZE);
if(v->encrypt == AES_ENCRYPT)
{
AES_set_encrypt_key(v->key1, 8*v->keysize, &key1);
AES_set_encrypt_key(v->key2, 8*v->keysize, &key2);
}
else
{
AES_set_decrypt_key(v->key1, 8*v->keysize, &key1);
AES_set_decrypt_key(v->key2, 8*v->keysize, &key2);
}
AES_bi_ige_encrypt(v->in, buf, v->length, &key1, &key2, v->iv,
v->encrypt);
if(memcmp(v->out, buf, v->length))
{
printf("Bidirectional IGE test vector %d failed\n", n);
hexdump(stdout, "key 1", v->key1, sizeof v->key1);
hexdump(stdout, "key 2", v->key2, sizeof v->key2);
hexdump(stdout, "iv", v->iv, sizeof v->iv);
hexdump(stdout, "in", v->in, v->length);
hexdump(stdout, "expected", v->out, v->length);
hexdump(stdout, "got", buf, v->length);
++errs;
}
}
return errs;
}
int main(int argc, char **argv)
{
unsigned char rkey[16];
unsigned char rkey2[16];
AES_KEY key;
AES_KEY key2;
unsigned char plaintext[BIG_TEST_SIZE];
unsigned char ciphertext[BIG_TEST_SIZE];
unsigned char checktext[BIG_TEST_SIZE];
unsigned char iv[AES_BLOCK_SIZE*4];
unsigned char saved_iv[AES_BLOCK_SIZE*4];
int err = 0;
unsigned int n;
unsigned matches;
assert(BIG_TEST_SIZE >= TEST_SIZE);
RAND_pseudo_bytes(rkey, sizeof rkey);
RAND_pseudo_bytes(plaintext, sizeof plaintext);
RAND_pseudo_bytes(iv, sizeof iv);
memcpy(saved_iv, iv, sizeof saved_iv);
/* Forward IGE only... */
/* Straight encrypt/decrypt */
AES_set_encrypt_key(rkey, 8*sizeof rkey, &key);
AES_ige_encrypt(plaintext, ciphertext, TEST_SIZE, &key, iv,
AES_ENCRYPT);
AES_set_decrypt_key(rkey, 8*sizeof rkey, &key);
memcpy(iv, saved_iv, sizeof iv);
AES_ige_encrypt(ciphertext, checktext, TEST_SIZE, &key, iv,
AES_DECRYPT);
if(memcmp(checktext, plaintext, TEST_SIZE))
{
printf("Encrypt+decrypt doesn't match\n");
hexdump(stdout, "Plaintext", plaintext, TEST_SIZE);
hexdump(stdout, "Checktext", checktext, TEST_SIZE);
++err;
}
/* Now check encrypt chaining works */
AES_set_encrypt_key(rkey, 8*sizeof rkey, &key);
memcpy(iv, saved_iv, sizeof iv);
AES_ige_encrypt(plaintext, ciphertext, TEST_SIZE/2, &key, iv,
AES_ENCRYPT);
AES_ige_encrypt(plaintext+TEST_SIZE/2,
ciphertext+TEST_SIZE/2, TEST_SIZE/2,
&key, iv, AES_ENCRYPT);
AES_set_decrypt_key(rkey, 8*sizeof rkey, &key);
memcpy(iv, saved_iv, sizeof iv);
AES_ige_encrypt(ciphertext, checktext, TEST_SIZE, &key, iv,
AES_DECRYPT);
if(memcmp(checktext, plaintext, TEST_SIZE))
{
printf("Chained encrypt+decrypt doesn't match\n");
hexdump(stdout, "Plaintext", plaintext, TEST_SIZE);
hexdump(stdout, "Checktext", checktext, TEST_SIZE);
++err;
}
/* And check decrypt chaining */
AES_set_encrypt_key(rkey, 8*sizeof rkey, &key);
memcpy(iv, saved_iv, sizeof iv);
AES_ige_encrypt(plaintext, ciphertext, TEST_SIZE/2, &key, iv,
AES_ENCRYPT);
AES_ige_encrypt(plaintext+TEST_SIZE/2,
ciphertext+TEST_SIZE/2, TEST_SIZE/2,
&key, iv, AES_ENCRYPT);
AES_set_decrypt_key(rkey, 8*sizeof rkey, &key);
memcpy(iv, saved_iv, sizeof iv);
AES_ige_encrypt(ciphertext, checktext, TEST_SIZE/2, &key, iv,
AES_DECRYPT);
AES_ige_encrypt(ciphertext+TEST_SIZE/2,
checktext+TEST_SIZE/2, TEST_SIZE/2, &key, iv,
AES_DECRYPT);
if(memcmp(checktext, plaintext, TEST_SIZE))
{
printf("Chained encrypt+chained decrypt doesn't match\n");
hexdump(stdout, "Plaintext", plaintext, TEST_SIZE);
hexdump(stdout, "Checktext", checktext, TEST_SIZE);
++err;
}
/* make sure garble extends forwards only */
AES_set_encrypt_key(rkey, 8*sizeof rkey, &key);
memcpy(iv, saved_iv, sizeof iv);
AES_ige_encrypt(plaintext, ciphertext, sizeof plaintext, &key, iv,
AES_ENCRYPT);
/* corrupt halfway through */
++ciphertext[sizeof ciphertext/2];
AES_set_decrypt_key(rkey, 8*sizeof rkey, &key);
memcpy(iv, saved_iv, sizeof iv);
AES_ige_encrypt(ciphertext, checktext, sizeof checktext, &key, iv,
AES_DECRYPT);
matches=0;
for(n=0 ; n < sizeof checktext ; ++n)
if(checktext[n] == plaintext[n])
++matches;
if(matches > sizeof checktext/2+sizeof checktext/100)
{
printf("More than 51%% matches after garbling\n");
++err;
}
if(matches < sizeof checktext/2)
{
printf("Garble extends backwards!\n");
++err;
}
/* Bi-directional IGE */
/* Note that we don't have to recover the IV, because chaining isn't */
/* possible with biIGE, so the IV is not updated. */
RAND_pseudo_bytes(rkey2, sizeof rkey2);
/* Straight encrypt/decrypt */
AES_set_encrypt_key(rkey, 8*sizeof rkey, &key);
AES_set_encrypt_key(rkey2, 8*sizeof rkey2, &key2);
AES_bi_ige_encrypt(plaintext, ciphertext, TEST_SIZE, &key, &key2, iv,
AES_ENCRYPT);
AES_set_decrypt_key(rkey, 8*sizeof rkey, &key);
AES_set_decrypt_key(rkey2, 8*sizeof rkey2, &key2);
AES_bi_ige_encrypt(ciphertext, checktext, TEST_SIZE, &key, &key2, iv,
AES_DECRYPT);
if(memcmp(checktext, plaintext, TEST_SIZE))
{
printf("Encrypt+decrypt doesn't match\n");
hexdump(stdout, "Plaintext", plaintext, TEST_SIZE);
hexdump(stdout, "Checktext", checktext, TEST_SIZE);
++err;
}
/* make sure garble extends both ways */
AES_set_encrypt_key(rkey, 8*sizeof rkey, &key);
AES_set_encrypt_key(rkey2, 8*sizeof rkey2, &key2);
AES_ige_encrypt(plaintext, ciphertext, sizeof plaintext, &key, iv,
AES_ENCRYPT);
/* corrupt halfway through */
++ciphertext[sizeof ciphertext/2];
AES_set_decrypt_key(rkey, 8*sizeof rkey, &key);
AES_set_decrypt_key(rkey2, 8*sizeof rkey2, &key2);
AES_ige_encrypt(ciphertext, checktext, sizeof checktext, &key, iv,
AES_DECRYPT);
matches=0;
for(n=0 ; n < sizeof checktext ; ++n)
if(checktext[n] == plaintext[n])
++matches;
if(matches > sizeof checktext/100)
{
printf("More than 1%% matches after bidirectional garbling\n");
++err;
}
/* make sure garble extends both ways (2) */
AES_set_encrypt_key(rkey, 8*sizeof rkey, &key);
AES_set_encrypt_key(rkey2, 8*sizeof rkey2, &key2);
AES_ige_encrypt(plaintext, ciphertext, sizeof plaintext, &key, iv,
AES_ENCRYPT);
/* corrupt right at the end */
++ciphertext[sizeof ciphertext-1];
AES_set_decrypt_key(rkey, 8*sizeof rkey, &key);
AES_set_decrypt_key(rkey2, 8*sizeof rkey2, &key2);
AES_ige_encrypt(ciphertext, checktext, sizeof checktext, &key, iv,
AES_DECRYPT);
matches=0;
for(n=0 ; n < sizeof checktext ; ++n)
if(checktext[n] == plaintext[n])
++matches;
if(matches > sizeof checktext/100)
{
printf("More than 1%% matches after bidirectional garbling (2)\n");
++err;
}
/* make sure garble extends both ways (3) */
AES_set_encrypt_key(rkey, 8*sizeof rkey, &key);
AES_set_encrypt_key(rkey2, 8*sizeof rkey2, &key2);
AES_ige_encrypt(plaintext, ciphertext, sizeof plaintext, &key, iv,
AES_ENCRYPT);
/* corrupt right at the start */
++ciphertext[0];
AES_set_decrypt_key(rkey, 8*sizeof rkey, &key);
AES_set_decrypt_key(rkey2, 8*sizeof rkey2, &key2);
AES_ige_encrypt(ciphertext, checktext, sizeof checktext, &key, iv,
AES_DECRYPT);
matches=0;
for(n=0 ; n < sizeof checktext ; ++n)
if(checktext[n] == plaintext[n])
++matches;
if(matches > sizeof checktext/100)
{
printf("More than 1%% matches after bidirectional garbling (3)\n");
++err;
}
err += run_test_vectors();
return err;
}
| gpl-2.0 |
FrostBite-Android/android_kernel_samsung_smdk4412 | drivers/scsi/hpsa.c | 703 | 127406 | /*
* Disk Array driver for HP Smart Array SAS controllers
* Copyright 2000, 2009 Hewlett-Packard Development Company, L.P.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
* Questions/Comments/Bugfixes to iss_storagedev@hp.com
*
*/
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/pci-aspm.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/fs.h>
#include <linux/timer.h>
#include <linux/seq_file.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/compat.h>
#include <linux/blktrace_api.h>
#include <linux/uaccess.h>
#include <linux/io.h>
#include <linux/dma-mapping.h>
#include <linux/completion.h>
#include <linux/moduleparam.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_tcq.h>
#include <linux/cciss_ioctl.h>
#include <linux/string.h>
#include <linux/bitmap.h>
#include <asm/atomic.h>
#include <linux/kthread.h>
#include "hpsa_cmd.h"
#include "hpsa.h"
/* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */
#define HPSA_DRIVER_VERSION "2.0.2-1"
#define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
/* How long to wait (in milliseconds) for board to go into simple mode */
#define MAX_CONFIG_WAIT 30000
#define MAX_IOCTL_CONFIG_WAIT 1000
/*define how many times we will try a command because of bus resets */
#define MAX_CMD_RETRIES 3
/* Embedded module documentation macros - see modules.h */
MODULE_AUTHOR("Hewlett-Packard Company");
MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \
HPSA_DRIVER_VERSION);
MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers");
MODULE_VERSION(HPSA_DRIVER_VERSION);
MODULE_LICENSE("GPL");
static int hpsa_allow_any;
module_param(hpsa_allow_any, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(hpsa_allow_any,
"Allow hpsa driver to access unknown HP Smart Array hardware");
static int hpsa_simple_mode;
module_param(hpsa_simple_mode, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(hpsa_simple_mode,
"Use 'simple mode' rather than 'performant mode'");
/* define the PCI info for the cards we can control */
static const struct pci_device_id hpsa_pci_device_id[] = {
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3241},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3243},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3247},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324a},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324b},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3233},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3350},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3351},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3352},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3353},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3354},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3355},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3356},
{PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
{0,}
};
MODULE_DEVICE_TABLE(pci, hpsa_pci_device_id);
/* board_id = Subsystem Device ID & Vendor ID
* product = Marketing Name for the board
* access = Address of the struct of function pointers
*/
static struct board_type products[] = {
{0x3241103C, "Smart Array P212", &SA5_access},
{0x3243103C, "Smart Array P410", &SA5_access},
{0x3245103C, "Smart Array P410i", &SA5_access},
{0x3247103C, "Smart Array P411", &SA5_access},
{0x3249103C, "Smart Array P812", &SA5_access},
{0x324a103C, "Smart Array P712m", &SA5_access},
{0x324b103C, "Smart Array P711m", &SA5_access},
{0x3350103C, "Smart Array", &SA5_access},
{0x3351103C, "Smart Array", &SA5_access},
{0x3352103C, "Smart Array", &SA5_access},
{0x3353103C, "Smart Array", &SA5_access},
{0x3354103C, "Smart Array", &SA5_access},
{0x3355103C, "Smart Array", &SA5_access},
{0x3356103C, "Smart Array", &SA5_access},
{0xFFFF103C, "Unknown Smart Array", &SA5_access},
};
static int number_of_controllers;
static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id);
static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id);
static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg);
static void start_io(struct ctlr_info *h);
#ifdef CONFIG_COMPAT
static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg);
#endif
static void cmd_free(struct ctlr_info *h, struct CommandList *c);
static void cmd_special_free(struct ctlr_info *h, struct CommandList *c);
static struct CommandList *cmd_alloc(struct ctlr_info *h);
static struct CommandList *cmd_special_alloc(struct ctlr_info *h);
static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
void *buff, size_t size, u8 page_code, unsigned char *scsi3addr,
int cmd_type);
static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd);
static void hpsa_scan_start(struct Scsi_Host *);
static int hpsa_scan_finished(struct Scsi_Host *sh,
unsigned long elapsed_time);
static int hpsa_change_queue_depth(struct scsi_device *sdev,
int qdepth, int reason);
static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd);
static int hpsa_slave_alloc(struct scsi_device *sdev);
static void hpsa_slave_destroy(struct scsi_device *sdev);
static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno);
static int check_for_unit_attention(struct ctlr_info *h,
struct CommandList *c);
static void check_ioctl_unit_attention(struct ctlr_info *h,
struct CommandList *c);
/* performant mode helper functions */
static void calc_bucket_map(int *bucket, int num_buckets,
int nsgs, int *bucket_map);
static __devinit void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h);
static inline u32 next_command(struct ctlr_info *h);
static int __devinit hpsa_find_cfg_addrs(struct pci_dev *pdev,
void __iomem *vaddr, u32 *cfg_base_addr, u64 *cfg_base_addr_index,
u64 *cfg_offset);
static int __devinit hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
unsigned long *memory_bar);
static int __devinit hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id);
static int __devinit hpsa_wait_for_board_state(struct pci_dev *pdev,
void __iomem *vaddr, int wait_for_ready);
#define BOARD_NOT_READY 0
#define BOARD_READY 1
static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev)
{
unsigned long *priv = shost_priv(sdev->host);
return (struct ctlr_info *) *priv;
}
static inline struct ctlr_info *shost_to_hba(struct Scsi_Host *sh)
{
unsigned long *priv = shost_priv(sh);
return (struct ctlr_info *) *priv;
}
static int check_for_unit_attention(struct ctlr_info *h,
struct CommandList *c)
{
if (c->err_info->SenseInfo[2] != UNIT_ATTENTION)
return 0;
switch (c->err_info->SenseInfo[12]) {
case STATE_CHANGED:
dev_warn(&h->pdev->dev, "hpsa%d: a state change "
"detected, command retried\n", h->ctlr);
break;
case LUN_FAILED:
dev_warn(&h->pdev->dev, "hpsa%d: LUN failure "
"detected, action required\n", h->ctlr);
break;
case REPORT_LUNS_CHANGED:
dev_warn(&h->pdev->dev, "hpsa%d: report LUN data "
"changed, action required\n", h->ctlr);
/*
* Note: this REPORT_LUNS_CHANGED condition only occurs on the MSA2012.
*/
break;
case POWER_OR_RESET:
dev_warn(&h->pdev->dev, "hpsa%d: a power on "
"or device reset detected\n", h->ctlr);
break;
case UNIT_ATTENTION_CLEARED:
dev_warn(&h->pdev->dev, "hpsa%d: unit attention "
"cleared by another initiator\n", h->ctlr);
break;
default:
dev_warn(&h->pdev->dev, "hpsa%d: unknown "
"unit attention detected\n", h->ctlr);
break;
}
return 1;
}
static ssize_t host_store_rescan(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct ctlr_info *h;
struct Scsi_Host *shost = class_to_shost(dev);
h = shost_to_hba(shost);
hpsa_scan_start(h->scsi_host);
return count;
}
static ssize_t host_show_firmware_revision(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ctlr_info *h;
struct Scsi_Host *shost = class_to_shost(dev);
unsigned char *fwrev;
h = shost_to_hba(shost);
if (!h->hba_inquiry_data)
return 0;
fwrev = &h->hba_inquiry_data[32];
return snprintf(buf, 20, "%c%c%c%c\n",
fwrev[0], fwrev[1], fwrev[2], fwrev[3]);
}
static ssize_t host_show_commands_outstanding(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct ctlr_info *h = shost_to_hba(shost);
return snprintf(buf, 20, "%d\n", h->commands_outstanding);
}
static ssize_t host_show_transport_mode(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ctlr_info *h;
struct Scsi_Host *shost = class_to_shost(dev);
h = shost_to_hba(shost);
return snprintf(buf, 20, "%s\n",
h->transMethod & CFGTBL_Trans_Performant ?
"performant" : "simple");
}
/* List of controllers which cannot be hard reset on kexec with reset_devices */
static u32 unresettable_controller[] = {
0x324a103C, /* Smart Array P712m */
0x324b103C, /* SmartArray P711m */
0x3223103C, /* Smart Array P800 */
0x3234103C, /* Smart Array P400 */
0x3235103C, /* Smart Array P400i */
0x3211103C, /* Smart Array E200i */
0x3212103C, /* Smart Array E200 */
0x3213103C, /* Smart Array E200i */
0x3214103C, /* Smart Array E200i */
0x3215103C, /* Smart Array E200i */
0x3237103C, /* Smart Array E500 */
0x323D103C, /* Smart Array P700m */
0x409C0E11, /* Smart Array 6400 */
0x409D0E11, /* Smart Array 6400 EM */
};
/* List of controllers which cannot even be soft reset */
static u32 soft_unresettable_controller[] = {
/* Exclude 640x boards. These are two pci devices in one slot
* which share a battery backed cache module. One controls the
* cache, the other accesses the cache through the one that controls
* it. If we reset the one controlling the cache, the other will
* likely not be happy. Just forbid resetting this conjoined mess.
* The 640x isn't really supported by hpsa anyway.
*/
0x409C0E11, /* Smart Array 6400 */
0x409D0E11, /* Smart Array 6400 EM */
};
static int ctlr_is_hard_resettable(u32 board_id)
{
int i;
for (i = 0; i < ARRAY_SIZE(unresettable_controller); i++)
if (unresettable_controller[i] == board_id)
return 0;
return 1;
}
static int ctlr_is_soft_resettable(u32 board_id)
{
int i;
for (i = 0; i < ARRAY_SIZE(soft_unresettable_controller); i++)
if (soft_unresettable_controller[i] == board_id)
return 0;
return 1;
}
static int ctlr_is_resettable(u32 board_id)
{
return ctlr_is_hard_resettable(board_id) ||
ctlr_is_soft_resettable(board_id);
}
static ssize_t host_show_resettable(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ctlr_info *h;
struct Scsi_Host *shost = class_to_shost(dev);
h = shost_to_hba(shost);
return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h->board_id));
}
static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[])
{
return (scsi3addr[3] & 0xC0) == 0x40;
}
static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",
"UNKNOWN"
};
#define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 1)
static ssize_t raid_level_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
ssize_t l = 0;
unsigned char rlevel;
struct ctlr_info *h;
struct scsi_device *sdev;
struct hpsa_scsi_dev_t *hdev;
unsigned long flags;
sdev = to_scsi_device(dev);
h = sdev_to_hba(sdev);
spin_lock_irqsave(&h->lock, flags);
hdev = sdev->hostdata;
if (!hdev) {
spin_unlock_irqrestore(&h->lock, flags);
return -ENODEV;
}
/* Is this even a logical drive? */
if (!is_logical_dev_addr_mode(hdev->scsi3addr)) {
spin_unlock_irqrestore(&h->lock, flags);
l = snprintf(buf, PAGE_SIZE, "N/A\n");
return l;
}
rlevel = hdev->raid_level;
spin_unlock_irqrestore(&h->lock, flags);
if (rlevel > RAID_UNKNOWN)
rlevel = RAID_UNKNOWN;
l = snprintf(buf, PAGE_SIZE, "RAID %s\n", raid_label[rlevel]);
return l;
}
static ssize_t lunid_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ctlr_info *h;
struct scsi_device *sdev;
struct hpsa_scsi_dev_t *hdev;
unsigned long flags;
unsigned char lunid[8];
sdev = to_scsi_device(dev);
h = sdev_to_hba(sdev);
spin_lock_irqsave(&h->lock, flags);
hdev = sdev->hostdata;
if (!hdev) {
spin_unlock_irqrestore(&h->lock, flags);
return -ENODEV;
}
memcpy(lunid, hdev->scsi3addr, sizeof(lunid));
spin_unlock_irqrestore(&h->lock, flags);
return snprintf(buf, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
lunid[0], lunid[1], lunid[2], lunid[3],
lunid[4], lunid[5], lunid[6], lunid[7]);
}
static ssize_t unique_id_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ctlr_info *h;
struct scsi_device *sdev;
struct hpsa_scsi_dev_t *hdev;
unsigned long flags;
unsigned char sn[16];
sdev = to_scsi_device(dev);
h = sdev_to_hba(sdev);
spin_lock_irqsave(&h->lock, flags);
hdev = sdev->hostdata;
if (!hdev) {
spin_unlock_irqrestore(&h->lock, flags);
return -ENODEV;
}
memcpy(sn, hdev->device_id, sizeof(sn));
spin_unlock_irqrestore(&h->lock, flags);
return snprintf(buf, 16 * 2 + 2,
"%02X%02X%02X%02X%02X%02X%02X%02X"
"%02X%02X%02X%02X%02X%02X%02X%02X\n",
sn[0], sn[1], sn[2], sn[3],
sn[4], sn[5], sn[6], sn[7],
sn[8], sn[9], sn[10], sn[11],
sn[12], sn[13], sn[14], sn[15]);
}
static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL);
static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL);
static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL);
static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
static DEVICE_ATTR(firmware_revision, S_IRUGO,
host_show_firmware_revision, NULL);
static DEVICE_ATTR(commands_outstanding, S_IRUGO,
host_show_commands_outstanding, NULL);
static DEVICE_ATTR(transport_mode, S_IRUGO,
host_show_transport_mode, NULL);
static DEVICE_ATTR(resettable, S_IRUGO,
host_show_resettable, NULL);
static struct device_attribute *hpsa_sdev_attrs[] = {
&dev_attr_raid_level,
&dev_attr_lunid,
&dev_attr_unique_id,
NULL,
};
static struct device_attribute *hpsa_shost_attrs[] = {
&dev_attr_rescan,
&dev_attr_firmware_revision,
&dev_attr_commands_outstanding,
&dev_attr_transport_mode,
&dev_attr_resettable,
NULL,
};
static struct scsi_host_template hpsa_driver_template = {
.module = THIS_MODULE,
.name = "hpsa",
.proc_name = "hpsa",
.queuecommand = hpsa_scsi_queue_command,
.scan_start = hpsa_scan_start,
.scan_finished = hpsa_scan_finished,
.change_queue_depth = hpsa_change_queue_depth,
.this_id = -1,
.use_clustering = ENABLE_CLUSTERING,
.eh_device_reset_handler = hpsa_eh_device_reset_handler,
.ioctl = hpsa_ioctl,
.slave_alloc = hpsa_slave_alloc,
.slave_destroy = hpsa_slave_destroy,
#ifdef CONFIG_COMPAT
.compat_ioctl = hpsa_compat_ioctl,
#endif
.sdev_attrs = hpsa_sdev_attrs,
.shost_attrs = hpsa_shost_attrs,
};
/* Enqueuing and dequeuing functions for cmdlists. */
static inline void addQ(struct list_head *list, struct CommandList *c)
{
list_add_tail(&c->list, list);
}
static inline u32 next_command(struct ctlr_info *h)
{
u32 a;
if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
return h->access.command_completed(h);
if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
a = *(h->reply_pool_head); /* Next cmd in ring buffer */
(h->reply_pool_head)++;
h->commands_outstanding--;
} else {
a = FIFO_EMPTY;
}
/* Check for wraparound */
if (h->reply_pool_head == (h->reply_pool + h->max_commands)) {
h->reply_pool_head = h->reply_pool;
h->reply_pool_wraparound ^= 1;
}
return a;
}
/* set_performant_mode: Modify the tag for cciss performant
* set bit 0 for pull model, bits 3-1 for block fetch
* register number
*/
static void set_performant_mode(struct ctlr_info *h, struct CommandList *c)
{
if (likely(h->transMethod & CFGTBL_Trans_Performant))
c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
}
static void enqueue_cmd_and_start_io(struct ctlr_info *h,
struct CommandList *c)
{
unsigned long flags;
set_performant_mode(h, c);
spin_lock_irqsave(&h->lock, flags);
addQ(&h->reqQ, c);
h->Qdepth++;
start_io(h);
spin_unlock_irqrestore(&h->lock, flags);
}
static inline void removeQ(struct CommandList *c)
{
if (WARN_ON(list_empty(&c->list)))
return;
list_del_init(&c->list);
}
static inline int is_hba_lunid(unsigned char scsi3addr[])
{
return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0;
}
static inline int is_scsi_rev_5(struct ctlr_info *h)
{
if (!h->hba_inquiry_data)
return 0;
if ((h->hba_inquiry_data[2] & 0x07) == 5)
return 1;
return 0;
}
static int hpsa_find_target_lun(struct ctlr_info *h,
unsigned char scsi3addr[], int bus, int *target, int *lun)
{
/* finds an unused bus, target, lun for a new physical device
* assumes h->devlock is held
*/
int i, found = 0;
DECLARE_BITMAP(lun_taken, HPSA_MAX_SCSI_DEVS_PER_HBA);
memset(&lun_taken[0], 0, HPSA_MAX_SCSI_DEVS_PER_HBA >> 3);
for (i = 0; i < h->ndevices; i++) {
if (h->dev[i]->bus == bus && h->dev[i]->target != -1)
set_bit(h->dev[i]->target, lun_taken);
}
for (i = 0; i < HPSA_MAX_SCSI_DEVS_PER_HBA; i++) {
if (!test_bit(i, lun_taken)) {
/* *bus = 1; */
*target = i;
*lun = 0;
found = 1;
break;
}
}
return !found;
}
/* Add an entry into h->dev[] array. */
static int hpsa_scsi_add_entry(struct ctlr_info *h, int hostno,
struct hpsa_scsi_dev_t *device,
struct hpsa_scsi_dev_t *added[], int *nadded)
{
/* assumes h->devlock is held */
int n = h->ndevices;
int i;
unsigned char addr1[8], addr2[8];
struct hpsa_scsi_dev_t *sd;
if (n >= HPSA_MAX_SCSI_DEVS_PER_HBA) {
dev_err(&h->pdev->dev, "too many devices, some will be "
"inaccessible.\n");
return -1;
}
/* physical devices do not have lun or target assigned until now. */
if (device->lun != -1)
/* Logical device, lun is already assigned. */
goto lun_assigned;
/* If this device a non-zero lun of a multi-lun device
* byte 4 of the 8-byte LUN addr will contain the logical
* unit no, zero otherise.
*/
if (device->scsi3addr[4] == 0) {
/* This is not a non-zero lun of a multi-lun device */
if (hpsa_find_target_lun(h, device->scsi3addr,
device->bus, &device->target, &device->lun) != 0)
return -1;
goto lun_assigned;
}
/* This is a non-zero lun of a multi-lun device.
* Search through our list and find the device which
* has the same 8 byte LUN address, excepting byte 4.
* Assign the same bus and target for this new LUN.
* Use the logical unit number from the firmware.
*/
memcpy(addr1, device->scsi3addr, 8);
addr1[4] = 0;
for (i = 0; i < n; i++) {
sd = h->dev[i];
memcpy(addr2, sd->scsi3addr, 8);
addr2[4] = 0;
/* differ only in byte 4? */
if (memcmp(addr1, addr2, 8) == 0) {
device->bus = sd->bus;
device->target = sd->target;
device->lun = device->scsi3addr[4];
break;
}
}
if (device->lun == -1) {
dev_warn(&h->pdev->dev, "physical device with no LUN=0,"
" suspect firmware bug or unsupported hardware "
"configuration.\n");
return -1;
}
lun_assigned:
h->dev[n] = device;
h->ndevices++;
added[*nadded] = device;
(*nadded)++;
/* initially, (before registering with scsi layer) we don't
* know our hostno and we don't want to print anything first
* time anyway (the scsi layer's inquiries will show that info)
*/
/* if (hostno != -1) */
dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d added.\n",
scsi_device_type(device->devtype), hostno,
device->bus, device->target, device->lun);
return 0;
}
/* Replace an entry from h->dev[] array. */
static void hpsa_scsi_replace_entry(struct ctlr_info *h, int hostno,
int entry, struct hpsa_scsi_dev_t *new_entry,
struct hpsa_scsi_dev_t *added[], int *nadded,
struct hpsa_scsi_dev_t *removed[], int *nremoved)
{
/* assumes h->devlock is held */
BUG_ON(entry < 0 || entry >= HPSA_MAX_SCSI_DEVS_PER_HBA);
removed[*nremoved] = h->dev[entry];
(*nremoved)++;
/*
* New physical devices won't have target/lun assigned yet
* so we need to preserve the values in the slot we are replacing.
*/
if (new_entry->target == -1) {
new_entry->target = h->dev[entry]->target;
new_entry->lun = h->dev[entry]->lun;
}
h->dev[entry] = new_entry;
added[*nadded] = new_entry;
(*nadded)++;
dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d changed.\n",
scsi_device_type(new_entry->devtype), hostno, new_entry->bus,
new_entry->target, new_entry->lun);
}
/* Remove an entry from h->dev[] array. */
static void hpsa_scsi_remove_entry(struct ctlr_info *h, int hostno, int entry,
struct hpsa_scsi_dev_t *removed[], int *nremoved)
{
/* assumes h->devlock is held */
int i;
struct hpsa_scsi_dev_t *sd;
BUG_ON(entry < 0 || entry >= HPSA_MAX_SCSI_DEVS_PER_HBA);
sd = h->dev[entry];
removed[*nremoved] = h->dev[entry];
(*nremoved)++;
for (i = entry; i < h->ndevices-1; i++)
h->dev[i] = h->dev[i+1];
h->ndevices--;
dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d removed.\n",
scsi_device_type(sd->devtype), hostno, sd->bus, sd->target,
sd->lun);
}
#define SCSI3ADDR_EQ(a, b) ( \
(a)[7] == (b)[7] && \
(a)[6] == (b)[6] && \
(a)[5] == (b)[5] && \
(a)[4] == (b)[4] && \
(a)[3] == (b)[3] && \
(a)[2] == (b)[2] && \
(a)[1] == (b)[1] && \
(a)[0] == (b)[0])
static void fixup_botched_add(struct ctlr_info *h,
struct hpsa_scsi_dev_t *added)
{
/* called when scsi_add_device fails in order to re-adjust
* h->dev[] to match the mid layer's view.
*/
unsigned long flags;
int i, j;
spin_lock_irqsave(&h->lock, flags);
for (i = 0; i < h->ndevices; i++) {
if (h->dev[i] == added) {
for (j = i; j < h->ndevices-1; j++)
h->dev[j] = h->dev[j+1];
h->ndevices--;
break;
}
}
spin_unlock_irqrestore(&h->lock, flags);
kfree(added);
}
static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1,
struct hpsa_scsi_dev_t *dev2)
{
/* we compare everything except lun and target as these
* are not yet assigned. Compare parts likely
* to differ first
*/
if (memcmp(dev1->scsi3addr, dev2->scsi3addr,
sizeof(dev1->scsi3addr)) != 0)
return 0;
if (memcmp(dev1->device_id, dev2->device_id,
sizeof(dev1->device_id)) != 0)
return 0;
if (memcmp(dev1->model, dev2->model, sizeof(dev1->model)) != 0)
return 0;
if (memcmp(dev1->vendor, dev2->vendor, sizeof(dev1->vendor)) != 0)
return 0;
if (dev1->devtype != dev2->devtype)
return 0;
if (dev1->bus != dev2->bus)
return 0;
return 1;
}
/* Find needle in haystack. If exact match found, return DEVICE_SAME,
* and return needle location in *index. If scsi3addr matches, but not
* vendor, model, serial num, etc. return DEVICE_CHANGED, and return needle
* location in *index. If needle not found, return DEVICE_NOT_FOUND.
*/
static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle,
struct hpsa_scsi_dev_t *haystack[], int haystack_size,
int *index)
{
int i;
#define DEVICE_NOT_FOUND 0
#define DEVICE_CHANGED 1
#define DEVICE_SAME 2
for (i = 0; i < haystack_size; i++) {
if (haystack[i] == NULL) /* previously removed. */
continue;
if (SCSI3ADDR_EQ(needle->scsi3addr, haystack[i]->scsi3addr)) {
*index = i;
if (device_is_the_same(needle, haystack[i]))
return DEVICE_SAME;
else
return DEVICE_CHANGED;
}
}
*index = -1;
return DEVICE_NOT_FOUND;
}
static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno,
struct hpsa_scsi_dev_t *sd[], int nsds)
{
/* sd contains scsi3 addresses and devtypes, and inquiry
* data. This function takes what's in sd to be the current
* reality and updates h->dev[] to reflect that reality.
*/
int i, entry, device_change, changes = 0;
struct hpsa_scsi_dev_t *csd;
unsigned long flags;
struct hpsa_scsi_dev_t **added, **removed;
int nadded, nremoved;
struct Scsi_Host *sh = NULL;
added = kzalloc(sizeof(*added) * HPSA_MAX_SCSI_DEVS_PER_HBA,
GFP_KERNEL);
removed = kzalloc(sizeof(*removed) * HPSA_MAX_SCSI_DEVS_PER_HBA,
GFP_KERNEL);
if (!added || !removed) {
dev_warn(&h->pdev->dev, "out of memory in "
"adjust_hpsa_scsi_table\n");
goto free_and_out;
}
spin_lock_irqsave(&h->devlock, flags);
/* find any devices in h->dev[] that are not in
* sd[] and remove them from h->dev[], and for any
* devices which have changed, remove the old device
* info and add the new device info.
*/
i = 0;
nremoved = 0;
nadded = 0;
while (i < h->ndevices) {
csd = h->dev[i];
device_change = hpsa_scsi_find_entry(csd, sd, nsds, &entry);
if (device_change == DEVICE_NOT_FOUND) {
changes++;
hpsa_scsi_remove_entry(h, hostno, i,
removed, &nremoved);
continue; /* remove ^^^, hence i not incremented */
} else if (device_change == DEVICE_CHANGED) {
changes++;
hpsa_scsi_replace_entry(h, hostno, i, sd[entry],
added, &nadded, removed, &nremoved);
/* Set it to NULL to prevent it from being freed
* at the bottom of hpsa_update_scsi_devices()
*/
sd[entry] = NULL;
}
i++;
}
/* Now, make sure every device listed in sd[] is also
* listed in h->dev[], adding them if they aren't found
*/
for (i = 0; i < nsds; i++) {
if (!sd[i]) /* if already added above. */
continue;
device_change = hpsa_scsi_find_entry(sd[i], h->dev,
h->ndevices, &entry);
if (device_change == DEVICE_NOT_FOUND) {
changes++;
if (hpsa_scsi_add_entry(h, hostno, sd[i],
added, &nadded) != 0)
break;
sd[i] = NULL; /* prevent from being freed later. */
} else if (device_change == DEVICE_CHANGED) {
/* should never happen... */
changes++;
dev_warn(&h->pdev->dev,
"device unexpectedly changed.\n");
/* but if it does happen, we just ignore that device */
}
}
spin_unlock_irqrestore(&h->devlock, flags);
/* Don't notify scsi mid layer of any changes the first time through
* (or if there are no changes) scsi_scan_host will do it later the
* first time through.
*/
if (hostno == -1 || !changes)
goto free_and_out;
sh = h->scsi_host;
/* Notify scsi mid layer of any removed devices */
for (i = 0; i < nremoved; i++) {
struct scsi_device *sdev =
scsi_device_lookup(sh, removed[i]->bus,
removed[i]->target, removed[i]->lun);
if (sdev != NULL) {
scsi_remove_device(sdev);
scsi_device_put(sdev);
} else {
/* We don't expect to get here.
* future cmds to this device will get selection
* timeout as if the device was gone.
*/
dev_warn(&h->pdev->dev, "didn't find c%db%dt%dl%d "
" for removal.", hostno, removed[i]->bus,
removed[i]->target, removed[i]->lun);
}
kfree(removed[i]);
removed[i] = NULL;
}
/* Notify scsi mid layer of any added devices */
for (i = 0; i < nadded; i++) {
if (scsi_add_device(sh, added[i]->bus,
added[i]->target, added[i]->lun) == 0)
continue;
dev_warn(&h->pdev->dev, "scsi_add_device c%db%dt%dl%d failed, "
"device not added.\n", hostno, added[i]->bus,
added[i]->target, added[i]->lun);
/* now we have to remove it from h->dev,
* since it didn't get added to scsi mid layer
*/
fixup_botched_add(h, added[i]);
}
free_and_out:
kfree(added);
kfree(removed);
}
/*
* Lookup bus/target/lun and retrun corresponding struct hpsa_scsi_dev_t *
* Assume's h->devlock is held.
*/
static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h,
int bus, int target, int lun)
{
int i;
struct hpsa_scsi_dev_t *sd;
for (i = 0; i < h->ndevices; i++) {
sd = h->dev[i];
if (sd->bus == bus && sd->target == target && sd->lun == lun)
return sd;
}
return NULL;
}
/* link sdev->hostdata to our per-device structure. */
static int hpsa_slave_alloc(struct scsi_device *sdev)
{
struct hpsa_scsi_dev_t *sd;
unsigned long flags;
struct ctlr_info *h;
h = sdev_to_hba(sdev);
spin_lock_irqsave(&h->devlock, flags);
sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev),
sdev_id(sdev), sdev->lun);
if (sd != NULL)
sdev->hostdata = sd;
spin_unlock_irqrestore(&h->devlock, flags);
return 0;
}
static void hpsa_slave_destroy(struct scsi_device *sdev)
{
/* nothing to do. */
}
static void hpsa_free_sg_chain_blocks(struct ctlr_info *h)
{
int i;
if (!h->cmd_sg_list)
return;
for (i = 0; i < h->nr_cmds; i++) {
kfree(h->cmd_sg_list[i]);
h->cmd_sg_list[i] = NULL;
}
kfree(h->cmd_sg_list);
h->cmd_sg_list = NULL;
}
static int hpsa_allocate_sg_chain_blocks(struct ctlr_info *h)
{
int i;
if (h->chainsize <= 0)
return 0;
h->cmd_sg_list = kzalloc(sizeof(*h->cmd_sg_list) * h->nr_cmds,
GFP_KERNEL);
if (!h->cmd_sg_list)
return -ENOMEM;
for (i = 0; i < h->nr_cmds; i++) {
h->cmd_sg_list[i] = kmalloc(sizeof(*h->cmd_sg_list[i]) *
h->chainsize, GFP_KERNEL);
if (!h->cmd_sg_list[i])
goto clean;
}
return 0;
clean:
hpsa_free_sg_chain_blocks(h);
return -ENOMEM;
}
static void hpsa_map_sg_chain_block(struct ctlr_info *h,
struct CommandList *c)
{
struct SGDescriptor *chain_sg, *chain_block;
u64 temp64;
chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
chain_block = h->cmd_sg_list[c->cmdindex];
chain_sg->Ext = HPSA_SG_CHAIN;
chain_sg->Len = sizeof(*chain_sg) *
(c->Header.SGTotal - h->max_cmd_sg_entries);
temp64 = pci_map_single(h->pdev, chain_block, chain_sg->Len,
PCI_DMA_TODEVICE);
chain_sg->Addr.lower = (u32) (temp64 & 0x0FFFFFFFFULL);
chain_sg->Addr.upper = (u32) ((temp64 >> 32) & 0x0FFFFFFFFULL);
}
static void hpsa_unmap_sg_chain_block(struct ctlr_info *h,
struct CommandList *c)
{
struct SGDescriptor *chain_sg;
union u64bit temp64;
if (c->Header.SGTotal <= h->max_cmd_sg_entries)
return;
chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
temp64.val32.lower = chain_sg->Addr.lower;
temp64.val32.upper = chain_sg->Addr.upper;
pci_unmap_single(h->pdev, temp64.val, chain_sg->Len, PCI_DMA_TODEVICE);
}
static void complete_scsi_command(struct CommandList *cp)
{
struct scsi_cmnd *cmd;
struct ctlr_info *h;
struct ErrorInfo *ei;
unsigned char sense_key;
unsigned char asc; /* additional sense code */
unsigned char ascq; /* additional sense code qualifier */
unsigned long sense_data_size;
ei = cp->err_info;
cmd = (struct scsi_cmnd *) cp->scsi_cmd;
h = cp->h;
scsi_dma_unmap(cmd); /* undo the DMA mappings */
if (cp->Header.SGTotal > h->max_cmd_sg_entries)
hpsa_unmap_sg_chain_block(h, cp);
cmd->result = (DID_OK << 16); /* host byte */
cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */
cmd->result |= ei->ScsiStatus;
/* copy the sense data whether we need to or not. */
if (SCSI_SENSE_BUFFERSIZE < sizeof(ei->SenseInfo))
sense_data_size = SCSI_SENSE_BUFFERSIZE;
else
sense_data_size = sizeof(ei->SenseInfo);
if (ei->SenseLen < sense_data_size)
sense_data_size = ei->SenseLen;
memcpy(cmd->sense_buffer, ei->SenseInfo, sense_data_size);
scsi_set_resid(cmd, ei->ResidualCnt);
if (ei->CommandStatus == 0) {
cmd->scsi_done(cmd);
cmd_free(h, cp);
return;
}
/* an error has occurred */
switch (ei->CommandStatus) {
case CMD_TARGET_STATUS:
if (ei->ScsiStatus) {
/* Get sense key */
sense_key = 0xf & ei->SenseInfo[2];
/* Get additional sense code */
asc = ei->SenseInfo[12];
/* Get addition sense code qualifier */
ascq = ei->SenseInfo[13];
}
if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) {
if (check_for_unit_attention(h, cp)) {
cmd->result = DID_SOFT_ERROR << 16;
break;
}
if (sense_key == ILLEGAL_REQUEST) {
/*
* SCSI REPORT_LUNS is commonly unsupported on
* Smart Array. Suppress noisy complaint.
*/
if (cp->Request.CDB[0] == REPORT_LUNS)
break;
/* If ASC/ASCQ indicate Logical Unit
* Not Supported condition,
*/
if ((asc == 0x25) && (ascq == 0x0)) {
dev_warn(&h->pdev->dev, "cp %p "
"has check condition\n", cp);
break;
}
}
if (sense_key == NOT_READY) {
/* If Sense is Not Ready, Logical Unit
* Not ready, Manual Intervention
* required
*/
if ((asc == 0x04) && (ascq == 0x03)) {
dev_warn(&h->pdev->dev, "cp %p "
"has check condition: unit "
"not ready, manual "
"intervention required\n", cp);
break;
}
}
if (sense_key == ABORTED_COMMAND) {
/* Aborted command is retryable */
dev_warn(&h->pdev->dev, "cp %p "
"has check condition: aborted command: "
"ASC: 0x%x, ASCQ: 0x%x\n",
cp, asc, ascq);
cmd->result = DID_SOFT_ERROR << 16;
break;
}
/* Must be some other type of check condition */
dev_warn(&h->pdev->dev, "cp %p has check condition: "
"unknown type: "
"Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
"Returning result: 0x%x, "
"cmd=[%02x %02x %02x %02x %02x "
"%02x %02x %02x %02x %02x %02x "
"%02x %02x %02x %02x %02x]\n",
cp, sense_key, asc, ascq,
cmd->result,
cmd->cmnd[0], cmd->cmnd[1],
cmd->cmnd[2], cmd->cmnd[3],
cmd->cmnd[4], cmd->cmnd[5],
cmd->cmnd[6], cmd->cmnd[7],
cmd->cmnd[8], cmd->cmnd[9],
cmd->cmnd[10], cmd->cmnd[11],
cmd->cmnd[12], cmd->cmnd[13],
cmd->cmnd[14], cmd->cmnd[15]);
break;
}
/* Problem was not a check condition
* Pass it up to the upper layers...
*/
if (ei->ScsiStatus) {
dev_warn(&h->pdev->dev, "cp %p has status 0x%x "
"Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
"Returning result: 0x%x\n",
cp, ei->ScsiStatus,
sense_key, asc, ascq,
cmd->result);
} else { /* scsi status is zero??? How??? */
dev_warn(&h->pdev->dev, "cp %p SCSI status was 0. "
"Returning no connection.\n", cp),
/* Ordinarily, this case should never happen,
* but there is a bug in some released firmware
* revisions that allows it to happen if, for
* example, a 4100 backplane loses power and
* the tape drive is in it. We assume that
* it's a fatal error of some kind because we
* can't show that it wasn't. We will make it
* look like selection timeout since that is
* the most common reason for this to occur,
* and it's severe enough.
*/
cmd->result = DID_NO_CONNECT << 16;
}
break;
case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
break;
case CMD_DATA_OVERRUN:
dev_warn(&h->pdev->dev, "cp %p has"
" completed with data overrun "
"reported\n", cp);
break;
case CMD_INVALID: {
/* print_bytes(cp, sizeof(*cp), 1, 0);
print_cmd(cp); */
/* We get CMD_INVALID if you address a non-existent device
* instead of a selection timeout (no response). You will
* see this if you yank out a drive, then try to access it.
* This is kind of a shame because it means that any other
* CMD_INVALID (e.g. driver bug) will get interpreted as a
* missing target. */
cmd->result = DID_NO_CONNECT << 16;
}
break;
case CMD_PROTOCOL_ERR:
dev_warn(&h->pdev->dev, "cp %p has "
"protocol error \n", cp);
break;
case CMD_HARDWARE_ERR:
cmd->result = DID_ERROR << 16;
dev_warn(&h->pdev->dev, "cp %p had hardware error\n", cp);
break;
case CMD_CONNECTION_LOST:
cmd->result = DID_ERROR << 16;
dev_warn(&h->pdev->dev, "cp %p had connection lost\n", cp);
break;
case CMD_ABORTED:
cmd->result = DID_ABORT << 16;
dev_warn(&h->pdev->dev, "cp %p was aborted with status 0x%x\n",
cp, ei->ScsiStatus);
break;
case CMD_ABORT_FAILED:
cmd->result = DID_ERROR << 16;
dev_warn(&h->pdev->dev, "cp %p reports abort failed\n", cp);
break;
case CMD_UNSOLICITED_ABORT:
cmd->result = DID_RESET << 16;
dev_warn(&h->pdev->dev, "cp %p aborted do to an unsolicited "
"abort\n", cp);
break;
case CMD_TIMEOUT:
cmd->result = DID_TIME_OUT << 16;
dev_warn(&h->pdev->dev, "cp %p timedout\n", cp);
break;
case CMD_UNABORTABLE:
cmd->result = DID_ERROR << 16;
dev_warn(&h->pdev->dev, "Command unabortable\n");
break;
default:
cmd->result = DID_ERROR << 16;
dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n",
cp, ei->CommandStatus);
}
cmd->scsi_done(cmd);
cmd_free(h, cp);
}
static int hpsa_scsi_detect(struct ctlr_info *h)
{
struct Scsi_Host *sh;
int error;
sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h));
if (sh == NULL)
goto fail;
sh->io_port = 0;
sh->n_io_port = 0;
sh->this_id = -1;
sh->max_channel = 3;
sh->max_cmd_len = MAX_COMMAND_SIZE;
sh->max_lun = HPSA_MAX_LUN;
sh->max_id = HPSA_MAX_LUN;
sh->can_queue = h->nr_cmds;
sh->cmd_per_lun = h->nr_cmds;
sh->sg_tablesize = h->maxsgentries;
h->scsi_host = sh;
sh->hostdata[0] = (unsigned long) h;
sh->irq = h->intr[h->intr_mode];
sh->unique_id = sh->irq;
error = scsi_add_host(sh, &h->pdev->dev);
if (error)
goto fail_host_put;
scsi_scan_host(sh);
return 0;
fail_host_put:
dev_err(&h->pdev->dev, "hpsa_scsi_detect: scsi_add_host"
" failed for controller %d\n", h->ctlr);
scsi_host_put(sh);
return error;
fail:
dev_err(&h->pdev->dev, "hpsa_scsi_detect: scsi_host_alloc"
" failed for controller %d\n", h->ctlr);
return -ENOMEM;
}
static void hpsa_pci_unmap(struct pci_dev *pdev,
struct CommandList *c, int sg_used, int data_direction)
{
int i;
union u64bit addr64;
for (i = 0; i < sg_used; i++) {
addr64.val32.lower = c->SG[i].Addr.lower;
addr64.val32.upper = c->SG[i].Addr.upper;
pci_unmap_single(pdev, (dma_addr_t) addr64.val, c->SG[i].Len,
data_direction);
}
}
static void hpsa_map_one(struct pci_dev *pdev,
struct CommandList *cp,
unsigned char *buf,
size_t buflen,
int data_direction)
{
u64 addr64;
if (buflen == 0 || data_direction == PCI_DMA_NONE) {
cp->Header.SGList = 0;
cp->Header.SGTotal = 0;
return;
}
addr64 = (u64) pci_map_single(pdev, buf, buflen, data_direction);
cp->SG[0].Addr.lower =
(u32) (addr64 & (u64) 0x00000000FFFFFFFF);
cp->SG[0].Addr.upper =
(u32) ((addr64 >> 32) & (u64) 0x00000000FFFFFFFF);
cp->SG[0].Len = buflen;
cp->Header.SGList = (u8) 1; /* no. SGs contig in this cmd */
cp->Header.SGTotal = (u16) 1; /* total sgs in this cmd list */
}
static inline void hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h,
struct CommandList *c)
{
DECLARE_COMPLETION_ONSTACK(wait);
c->waiting = &wait;
enqueue_cmd_and_start_io(h, c);
wait_for_completion(&wait);
}
static void hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h,
struct CommandList *c, int data_direction)
{
int retry_count = 0;
do {
memset(c->err_info, 0, sizeof(*c->err_info));
hpsa_scsi_do_simple_cmd_core(h, c);
retry_count++;
} while (check_for_unit_attention(h, c) && retry_count <= 3);
hpsa_pci_unmap(h->pdev, c, 1, data_direction);
}
static void hpsa_scsi_interpret_error(struct CommandList *cp)
{
struct ErrorInfo *ei;
struct device *d = &cp->h->pdev->dev;
ei = cp->err_info;
switch (ei->CommandStatus) {
case CMD_TARGET_STATUS:
dev_warn(d, "cmd %p has completed with errors\n", cp);
dev_warn(d, "cmd %p has SCSI Status = %x\n", cp,
ei->ScsiStatus);
if (ei->ScsiStatus == 0)
dev_warn(d, "SCSI status is abnormally zero. "
"(probably indicates selection timeout "
"reported incorrectly due to a known "
"firmware bug, circa July, 2001.)\n");
break;
case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
dev_info(d, "UNDERRUN\n");
break;
case CMD_DATA_OVERRUN:
dev_warn(d, "cp %p has completed with data overrun\n", cp);
break;
case CMD_INVALID: {
/* controller unfortunately reports SCSI passthru's
* to non-existent targets as invalid commands.
*/
dev_warn(d, "cp %p is reported invalid (probably means "
"target device no longer present)\n", cp);
/* print_bytes((unsigned char *) cp, sizeof(*cp), 1, 0);
print_cmd(cp); */
}
break;
case CMD_PROTOCOL_ERR:
dev_warn(d, "cp %p has protocol error \n", cp);
break;
case CMD_HARDWARE_ERR:
/* cmd->result = DID_ERROR << 16; */
dev_warn(d, "cp %p had hardware error\n", cp);
break;
case CMD_CONNECTION_LOST:
dev_warn(d, "cp %p had connection lost\n", cp);
break;
case CMD_ABORTED:
dev_warn(d, "cp %p was aborted\n", cp);
break;
case CMD_ABORT_FAILED:
dev_warn(d, "cp %p reports abort failed\n", cp);
break;
case CMD_UNSOLICITED_ABORT:
dev_warn(d, "cp %p aborted due to an unsolicited abort\n", cp);
break;
case CMD_TIMEOUT:
dev_warn(d, "cp %p timed out\n", cp);
break;
case CMD_UNABORTABLE:
dev_warn(d, "Command unabortable\n");
break;
default:
dev_warn(d, "cp %p returned unknown status %x\n", cp,
ei->CommandStatus);
}
}
static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
unsigned char page, unsigned char *buf,
unsigned char bufsize)
{
int rc = IO_OK;
struct CommandList *c;
struct ErrorInfo *ei;
c = cmd_special_alloc(h);
if (c == NULL) { /* trouble... */
dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
return -ENOMEM;
}
fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize, page, scsi3addr, TYPE_CMD);
hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
ei = c->err_info;
if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
hpsa_scsi_interpret_error(c);
rc = -1;
}
cmd_special_free(h, c);
return rc;
}
static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr)
{
int rc = IO_OK;
struct CommandList *c;
struct ErrorInfo *ei;
c = cmd_special_alloc(h);
if (c == NULL) { /* trouble... */
dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
return -ENOMEM;
}
fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0, scsi3addr, TYPE_MSG);
hpsa_scsi_do_simple_cmd_core(h, c);
/* no unmap needed here because no data xfer. */
ei = c->err_info;
if (ei->CommandStatus != 0) {
hpsa_scsi_interpret_error(c);
rc = -1;
}
cmd_special_free(h, c);
return rc;
}
static void hpsa_get_raid_level(struct ctlr_info *h,
unsigned char *scsi3addr, unsigned char *raid_level)
{
int rc;
unsigned char *buf;
*raid_level = RAID_UNKNOWN;
buf = kzalloc(64, GFP_KERNEL);
if (!buf)
return;
rc = hpsa_scsi_do_inquiry(h, scsi3addr, 0xC1, buf, 64);
if (rc == 0)
*raid_level = buf[8];
if (*raid_level > RAID_UNKNOWN)
*raid_level = RAID_UNKNOWN;
kfree(buf);
return;
}
/* Get the device id from inquiry page 0x83 */
static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr,
unsigned char *device_id, int buflen)
{
int rc;
unsigned char *buf;
if (buflen > 16)
buflen = 16;
buf = kzalloc(64, GFP_KERNEL);
if (!buf)
return -1;
rc = hpsa_scsi_do_inquiry(h, scsi3addr, 0x83, buf, 64);
if (rc == 0)
memcpy(device_id, &buf[8], buflen);
kfree(buf);
return rc != 0;
}
static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
struct ReportLUNdata *buf, int bufsize,
int extended_response)
{
int rc = IO_OK;
struct CommandList *c;
unsigned char scsi3addr[8];
struct ErrorInfo *ei;
c = cmd_special_alloc(h);
if (c == NULL) { /* trouble... */
dev_err(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
return -1;
}
/* address the controller */
memset(scsi3addr, 0, sizeof(scsi3addr));
fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h,
buf, bufsize, 0, scsi3addr, TYPE_CMD);
if (extended_response)
c->Request.CDB[1] = extended_response;
hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
ei = c->err_info;
if (ei->CommandStatus != 0 &&
ei->CommandStatus != CMD_DATA_UNDERRUN) {
hpsa_scsi_interpret_error(c);
rc = -1;
}
cmd_special_free(h, c);
return rc;
}
static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
struct ReportLUNdata *buf,
int bufsize, int extended_response)
{
return hpsa_scsi_do_report_luns(h, 0, buf, bufsize, extended_response);
}
static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h,
struct ReportLUNdata *buf, int bufsize)
{
return hpsa_scsi_do_report_luns(h, 1, buf, bufsize, 0);
}
static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device,
int bus, int target, int lun)
{
device->bus = bus;
device->target = target;
device->lun = lun;
}
static int hpsa_update_device_info(struct ctlr_info *h,
unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device,
unsigned char *is_OBDR_device)
{
#define OBDR_SIG_OFFSET 43
#define OBDR_TAPE_SIG "$DR-10"
#define OBDR_SIG_LEN (sizeof(OBDR_TAPE_SIG) - 1)
#define OBDR_TAPE_INQ_SIZE (OBDR_SIG_OFFSET + OBDR_SIG_LEN)
unsigned char *inq_buff;
unsigned char *obdr_sig;
inq_buff = kzalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
if (!inq_buff)
goto bail_out;
/* Do an inquiry to the device to see what it is. */
if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff,
(unsigned char) OBDR_TAPE_INQ_SIZE) != 0) {
/* Inquiry failed (msg printed already) */
dev_err(&h->pdev->dev,
"hpsa_update_device_info: inquiry failed\n");
goto bail_out;
}
this_device->devtype = (inq_buff[0] & 0x1f);
memcpy(this_device->scsi3addr, scsi3addr, 8);
memcpy(this_device->vendor, &inq_buff[8],
sizeof(this_device->vendor));
memcpy(this_device->model, &inq_buff[16],
sizeof(this_device->model));
memset(this_device->device_id, 0,
sizeof(this_device->device_id));
hpsa_get_device_id(h, scsi3addr, this_device->device_id,
sizeof(this_device->device_id));
if (this_device->devtype == TYPE_DISK &&
is_logical_dev_addr_mode(scsi3addr))
hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level);
else
this_device->raid_level = RAID_UNKNOWN;
if (is_OBDR_device) {
/* See if this is a One-Button-Disaster-Recovery device
* by looking for "$DR-10" at offset 43 in inquiry data.
*/
obdr_sig = &inq_buff[OBDR_SIG_OFFSET];
*is_OBDR_device = (this_device->devtype == TYPE_ROM &&
strncmp(obdr_sig, OBDR_TAPE_SIG,
OBDR_SIG_LEN) == 0);
}
kfree(inq_buff);
return 0;
bail_out:
kfree(inq_buff);
return 1;
}
static unsigned char *msa2xxx_model[] = {
"MSA2012",
"MSA2024",
"MSA2312",
"MSA2324",
"P2000 G3 SAS",
NULL,
};
static int is_msa2xxx(struct ctlr_info *h, struct hpsa_scsi_dev_t *device)
{
int i;
for (i = 0; msa2xxx_model[i]; i++)
if (strncmp(device->model, msa2xxx_model[i],
strlen(msa2xxx_model[i])) == 0)
return 1;
return 0;
}
/* Helper function to assign bus, target, lun mapping of devices.
* Puts non-msa2xxx logical volumes on bus 0, msa2xxx logical
* volumes on bus 1, physical devices on bus 2. and the hba on bus 3.
* Logical drive target and lun are assigned at this time, but
* physical device lun and target assignment are deferred (assigned
* in hpsa_find_target_lun, called by hpsa_scsi_add_entry.)
*/
static void figure_bus_target_lun(struct ctlr_info *h,
u8 *lunaddrbytes, int *bus, int *target, int *lun,
struct hpsa_scsi_dev_t *device)
{
u32 lunid;
if (is_logical_dev_addr_mode(lunaddrbytes)) {
/* logical device */
if (unlikely(is_scsi_rev_5(h))) {
/* p1210m, logical drives lun assignments
* match SCSI REPORT LUNS data.
*/
lunid = le32_to_cpu(*((__le32 *) lunaddrbytes));
*bus = 0;
*target = 0;
*lun = (lunid & 0x3fff) + 1;
} else {
/* not p1210m... */
lunid = le32_to_cpu(*((__le32 *) lunaddrbytes));
if (is_msa2xxx(h, device)) {
/* msa2xxx way, put logicals on bus 1
* and match target/lun numbers box
* reports.
*/
*bus = 1;
*target = (lunid >> 16) & 0x3fff;
*lun = lunid & 0x00ff;
} else {
/* Traditional smart array way. */
*bus = 0;
*lun = 0;
*target = lunid & 0x3fff;
}
}
} else {
/* physical device */
if (is_hba_lunid(lunaddrbytes))
if (unlikely(is_scsi_rev_5(h))) {
*bus = 0; /* put p1210m ctlr at 0,0,0 */
*target = 0;
*lun = 0;
return;
} else
*bus = 3; /* traditional smartarray */
else
*bus = 2; /* physical disk */
*target = -1;
*lun = -1; /* we will fill these in later. */
}
}
/*
* If there is no lun 0 on a target, linux won't find any devices.
* For the MSA2xxx boxes, we have to manually detect the enclosure
* which is at lun zero, as CCISS_REPORT_PHYSICAL_LUNS doesn't report
* it for some reason. *tmpdevice is the target we're adding,
* this_device is a pointer into the current element of currentsd[]
* that we're building up in update_scsi_devices(), below.
* lunzerobits is a bitmap that tracks which targets already have a
* lun 0 assigned.
* Returns 1 if an enclosure was added, 0 if not.
*/
static int add_msa2xxx_enclosure_device(struct ctlr_info *h,
struct hpsa_scsi_dev_t *tmpdevice,
struct hpsa_scsi_dev_t *this_device, u8 *lunaddrbytes,
int bus, int target, int lun, unsigned long lunzerobits[],
int *nmsa2xxx_enclosures)
{
unsigned char scsi3addr[8];
if (test_bit(target, lunzerobits))
return 0; /* There is already a lun 0 on this target. */
if (!is_logical_dev_addr_mode(lunaddrbytes))
return 0; /* It's the logical targets that may lack lun 0. */
if (!is_msa2xxx(h, tmpdevice))
return 0; /* It's only the MSA2xxx that have this problem. */
if (lun == 0) /* if lun is 0, then obviously we have a lun 0. */
return 0;
memset(scsi3addr, 0, 8);
scsi3addr[3] = target;
if (is_hba_lunid(scsi3addr))
return 0; /* Don't add the RAID controller here. */
if (is_scsi_rev_5(h))
return 0; /* p1210m doesn't need to do this. */
#define MAX_MSA2XXX_ENCLOSURES 32
if (*nmsa2xxx_enclosures >= MAX_MSA2XXX_ENCLOSURES) {
dev_warn(&h->pdev->dev, "Maximum number of MSA2XXX "
"enclosures exceeded. Check your hardware "
"configuration.");
return 0;
}
if (hpsa_update_device_info(h, scsi3addr, this_device, NULL))
return 0;
(*nmsa2xxx_enclosures)++;
hpsa_set_bus_target_lun(this_device, bus, target, 0);
set_bit(target, lunzerobits);
return 1;
}
/*
* Do CISS_REPORT_PHYS and CISS_REPORT_LOG. Data is returned in physdev,
* logdev. The number of luns in physdev and logdev are returned in
* *nphysicals and *nlogicals, respectively.
* Returns 0 on success, -1 otherwise.
*/
static int hpsa_gather_lun_info(struct ctlr_info *h,
int reportlunsize,
struct ReportLUNdata *physdev, u32 *nphysicals,
struct ReportLUNdata *logdev, u32 *nlogicals)
{
if (hpsa_scsi_do_report_phys_luns(h, physdev, reportlunsize, 0)) {
dev_err(&h->pdev->dev, "report physical LUNs failed.\n");
return -1;
}
*nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) / 8;
if (*nphysicals > HPSA_MAX_PHYS_LUN) {
dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded."
" %d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
*nphysicals - HPSA_MAX_PHYS_LUN);
*nphysicals = HPSA_MAX_PHYS_LUN;
}
if (hpsa_scsi_do_report_log_luns(h, logdev, reportlunsize)) {
dev_err(&h->pdev->dev, "report logical LUNs failed.\n");
return -1;
}
*nlogicals = be32_to_cpu(*((__be32 *) logdev->LUNListLength)) / 8;
/* Reject Logicals in excess of our max capability. */
if (*nlogicals > HPSA_MAX_LUN) {
dev_warn(&h->pdev->dev,
"maximum logical LUNs (%d) exceeded. "
"%d LUNs ignored.\n", HPSA_MAX_LUN,
*nlogicals - HPSA_MAX_LUN);
*nlogicals = HPSA_MAX_LUN;
}
if (*nlogicals + *nphysicals > HPSA_MAX_PHYS_LUN) {
dev_warn(&h->pdev->dev,
"maximum logical + physical LUNs (%d) exceeded. "
"%d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
*nphysicals + *nlogicals - HPSA_MAX_PHYS_LUN);
*nlogicals = HPSA_MAX_PHYS_LUN - *nphysicals;
}
return 0;
}
u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position, int i,
int nphysicals, int nlogicals, struct ReportLUNdata *physdev_list,
struct ReportLUNdata *logdev_list)
{
/* Helper function, figure out where the LUN ID info is coming from
* given index i, lists of physical and logical devices, where in
* the list the raid controller is supposed to appear (first or last)
*/
int logicals_start = nphysicals + (raid_ctlr_position == 0);
int last_device = nphysicals + nlogicals + (raid_ctlr_position == 0);
if (i == raid_ctlr_position)
return RAID_CTLR_LUNID;
if (i < logicals_start)
return &physdev_list->LUN[i - (raid_ctlr_position == 0)][0];
if (i < last_device)
return &logdev_list->LUN[i - nphysicals -
(raid_ctlr_position == 0)][0];
BUG();
return NULL;
}
static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
{
/* the idea here is we could get notified
* that some devices have changed, so we do a report
* physical luns and report logical luns cmd, and adjust
* our list of devices accordingly.
*
* The scsi3addr's of devices won't change so long as the
* adapter is not reset. That means we can rescan and
* tell which devices we already know about, vs. new
* devices, vs. disappearing devices.
*/
struct ReportLUNdata *physdev_list = NULL;
struct ReportLUNdata *logdev_list = NULL;
u32 nphysicals = 0;
u32 nlogicals = 0;
u32 ndev_allocated = 0;
struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice;
int ncurrent = 0;
int reportlunsize = sizeof(*physdev_list) + HPSA_MAX_PHYS_LUN * 8;
int i, nmsa2xxx_enclosures, ndevs_to_allocate;
int bus, target, lun;
int raid_ctlr_position;
DECLARE_BITMAP(lunzerobits, HPSA_MAX_TARGETS_PER_CTLR);
currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_SCSI_DEVS_PER_HBA,
GFP_KERNEL);
physdev_list = kzalloc(reportlunsize, GFP_KERNEL);
logdev_list = kzalloc(reportlunsize, GFP_KERNEL);
tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL);
if (!currentsd || !physdev_list || !logdev_list || !tmpdevice) {
dev_err(&h->pdev->dev, "out of memory\n");
goto out;
}
memset(lunzerobits, 0, sizeof(lunzerobits));
if (hpsa_gather_lun_info(h, reportlunsize, physdev_list, &nphysicals,
logdev_list, &nlogicals))
goto out;
/* We might see up to 32 MSA2xxx enclosures, actually 8 of them
* but each of them 4 times through different paths. The plus 1
* is for the RAID controller.
*/
ndevs_to_allocate = nphysicals + nlogicals + MAX_MSA2XXX_ENCLOSURES + 1;
/* Allocate the per device structures */
for (i = 0; i < ndevs_to_allocate; i++) {
currentsd[i] = kzalloc(sizeof(*currentsd[i]), GFP_KERNEL);
if (!currentsd[i]) {
dev_warn(&h->pdev->dev, "out of memory at %s:%d\n",
__FILE__, __LINE__);
goto out;
}
ndev_allocated++;
}
if (unlikely(is_scsi_rev_5(h)))
raid_ctlr_position = 0;
else
raid_ctlr_position = nphysicals + nlogicals;
/* adjust our table of devices */
nmsa2xxx_enclosures = 0;
for (i = 0; i < nphysicals + nlogicals + 1; i++) {
u8 *lunaddrbytes, is_OBDR = 0;
/* Figure out where the LUN ID info is coming from */
lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position,
i, nphysicals, nlogicals, physdev_list, logdev_list);
/* skip masked physical devices. */
if (lunaddrbytes[3] & 0xC0 &&
i < nphysicals + (raid_ctlr_position == 0))
continue;
/* Get device type, vendor, model, device id */
if (hpsa_update_device_info(h, lunaddrbytes, tmpdevice,
&is_OBDR))
continue; /* skip it if we can't talk to it. */
figure_bus_target_lun(h, lunaddrbytes, &bus, &target, &lun,
tmpdevice);
this_device = currentsd[ncurrent];
/*
* For the msa2xxx boxes, we have to insert a LUN 0 which
* doesn't show up in CCISS_REPORT_PHYSICAL data, but there
* is nonetheless an enclosure device there. We have to
* present that otherwise linux won't find anything if
* there is no lun 0.
*/
if (add_msa2xxx_enclosure_device(h, tmpdevice, this_device,
lunaddrbytes, bus, target, lun, lunzerobits,
&nmsa2xxx_enclosures)) {
ncurrent++;
this_device = currentsd[ncurrent];
}
*this_device = *tmpdevice;
hpsa_set_bus_target_lun(this_device, bus, target, lun);
switch (this_device->devtype) {
case TYPE_ROM:
/* We don't *really* support actual CD-ROM devices,
* just "One Button Disaster Recovery" tape drive
* which temporarily pretends to be a CD-ROM drive.
* So we check that the device is really an OBDR tape
* device by checking for "$DR-10" in bytes 43-48 of
* the inquiry data.
*/
if (is_OBDR)
ncurrent++;
break;
case TYPE_DISK:
if (i < nphysicals)
break;
ncurrent++;
break;
case TYPE_TAPE:
case TYPE_MEDIUM_CHANGER:
ncurrent++;
break;
case TYPE_RAID:
/* Only present the Smartarray HBA as a RAID controller.
* If it's a RAID controller other than the HBA itself
* (an external RAID controller, MSA500 or similar)
* don't present it.
*/
if (!is_hba_lunid(lunaddrbytes))
break;
ncurrent++;
break;
default:
break;
}
if (ncurrent >= HPSA_MAX_SCSI_DEVS_PER_HBA)
break;
}
adjust_hpsa_scsi_table(h, hostno, currentsd, ncurrent);
out:
kfree(tmpdevice);
for (i = 0; i < ndev_allocated; i++)
kfree(currentsd[i]);
kfree(currentsd);
kfree(physdev_list);
kfree(logdev_list);
}
/* hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci
* dma mapping and fills in the scatter gather entries of the
* hpsa command, cp.
*/
static int hpsa_scatter_gather(struct ctlr_info *h,
struct CommandList *cp,
struct scsi_cmnd *cmd)
{
unsigned int len;
struct scatterlist *sg;
u64 addr64;
int use_sg, i, sg_index, chained;
struct SGDescriptor *curr_sg;
BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
use_sg = scsi_dma_map(cmd);
if (use_sg < 0)
return use_sg;
if (!use_sg)
goto sglist_finished;
curr_sg = cp->SG;
chained = 0;
sg_index = 0;
scsi_for_each_sg(cmd, sg, use_sg, i) {
if (i == h->max_cmd_sg_entries - 1 &&
use_sg > h->max_cmd_sg_entries) {
chained = 1;
curr_sg = h->cmd_sg_list[cp->cmdindex];
sg_index = 0;
}
addr64 = (u64) sg_dma_address(sg);
len = sg_dma_len(sg);
curr_sg->Addr.lower = (u32) (addr64 & 0x0FFFFFFFFULL);
curr_sg->Addr.upper = (u32) ((addr64 >> 32) & 0x0FFFFFFFFULL);
curr_sg->Len = len;
curr_sg->Ext = 0; /* we are not chaining */
curr_sg++;
}
if (use_sg + chained > h->maxSG)
h->maxSG = use_sg + chained;
if (chained) {
cp->Header.SGList = h->max_cmd_sg_entries;
cp->Header.SGTotal = (u16) (use_sg + 1);
hpsa_map_sg_chain_block(h, cp);
return 0;
}
sglist_finished:
cp->Header.SGList = (u8) use_sg; /* no. SGs contig in this cmd */
cp->Header.SGTotal = (u16) use_sg; /* total sgs in this cmd list */
return 0;
}
static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd,
void (*done)(struct scsi_cmnd *))
{
struct ctlr_info *h;
struct hpsa_scsi_dev_t *dev;
unsigned char scsi3addr[8];
struct CommandList *c;
unsigned long flags;
/* Get the ptr to our adapter structure out of cmd->host. */
h = sdev_to_hba(cmd->device);
dev = cmd->device->hostdata;
if (!dev) {
cmd->result = DID_NO_CONNECT << 16;
done(cmd);
return 0;
}
memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr));
/* Need a lock as this is being allocated from the pool */
spin_lock_irqsave(&h->lock, flags);
c = cmd_alloc(h);
spin_unlock_irqrestore(&h->lock, flags);
if (c == NULL) { /* trouble... */
dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n");
return SCSI_MLQUEUE_HOST_BUSY;
}
/* Fill in the command list header */
cmd->scsi_done = done; /* save this for use by completion code */
/* save c in case we have to abort it */
cmd->host_scribble = (unsigned char *) c;
c->cmd_type = CMD_SCSI;
c->scsi_cmd = cmd;
c->Header.ReplyQueue = 0; /* unused in simple mode */
memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8);
c->Header.Tag.lower = (c->cmdindex << DIRECT_LOOKUP_SHIFT);
c->Header.Tag.lower |= DIRECT_LOOKUP_BIT;
/* Fill in the request block... */
c->Request.Timeout = 0;
memset(c->Request.CDB, 0, sizeof(c->Request.CDB));
BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB));
c->Request.CDBLen = cmd->cmd_len;
memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len);
c->Request.Type.Type = TYPE_CMD;
c->Request.Type.Attribute = ATTR_SIMPLE;
switch (cmd->sc_data_direction) {
case DMA_TO_DEVICE:
c->Request.Type.Direction = XFER_WRITE;
break;
case DMA_FROM_DEVICE:
c->Request.Type.Direction = XFER_READ;
break;
case DMA_NONE:
c->Request.Type.Direction = XFER_NONE;
break;
case DMA_BIDIRECTIONAL:
/* This can happen if a buggy application does a scsi passthru
* and sets both inlen and outlen to non-zero. ( see
* ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() )
*/
c->Request.Type.Direction = XFER_RSVD;
/* This is technically wrong, and hpsa controllers should
* reject it with CMD_INVALID, which is the most correct
* response, but non-fibre backends appear to let it
* slide by, and give the same results as if this field
* were set correctly. Either way is acceptable for
* our purposes here.
*/
break;
default:
dev_err(&h->pdev->dev, "unknown data direction: %d\n",
cmd->sc_data_direction);
BUG();
break;
}
if (hpsa_scatter_gather(h, c, cmd) < 0) { /* Fill SG list */
cmd_free(h, c);
return SCSI_MLQUEUE_HOST_BUSY;
}
enqueue_cmd_and_start_io(h, c);
/* the cmd'll come back via intr handler in complete_scsi_command() */
return 0;
}
static DEF_SCSI_QCMD(hpsa_scsi_queue_command)
static void hpsa_scan_start(struct Scsi_Host *sh)
{
struct ctlr_info *h = shost_to_hba(sh);
unsigned long flags;
/* wait until any scan already in progress is finished. */
while (1) {
spin_lock_irqsave(&h->scan_lock, flags);
if (h->scan_finished)
break;
spin_unlock_irqrestore(&h->scan_lock, flags);
wait_event(h->scan_wait_queue, h->scan_finished);
/* Note: We don't need to worry about a race between this
* thread and driver unload because the midlayer will
* have incremented the reference count, so unload won't
* happen if we're in here.
*/
}
h->scan_finished = 0; /* mark scan as in progress */
spin_unlock_irqrestore(&h->scan_lock, flags);
hpsa_update_scsi_devices(h, h->scsi_host->host_no);
spin_lock_irqsave(&h->scan_lock, flags);
h->scan_finished = 1; /* mark scan as finished. */
wake_up_all(&h->scan_wait_queue);
spin_unlock_irqrestore(&h->scan_lock, flags);
}
static int hpsa_scan_finished(struct Scsi_Host *sh,
unsigned long elapsed_time)
{
struct ctlr_info *h = shost_to_hba(sh);
unsigned long flags;
int finished;
spin_lock_irqsave(&h->scan_lock, flags);
finished = h->scan_finished;
spin_unlock_irqrestore(&h->scan_lock, flags);
return finished;
}
static int hpsa_change_queue_depth(struct scsi_device *sdev,
int qdepth, int reason)
{
struct ctlr_info *h = sdev_to_hba(sdev);
if (reason != SCSI_QDEPTH_DEFAULT)
return -ENOTSUPP;
if (qdepth < 1)
qdepth = 1;
else
if (qdepth > h->nr_cmds)
qdepth = h->nr_cmds;
scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
return sdev->queue_depth;
}
static void hpsa_unregister_scsi(struct ctlr_info *h)
{
/* we are being forcibly unloaded, and may not refuse. */
scsi_remove_host(h->scsi_host);
scsi_host_put(h->scsi_host);
h->scsi_host = NULL;
}
static int hpsa_register_scsi(struct ctlr_info *h)
{
int rc;
rc = hpsa_scsi_detect(h);
if (rc != 0)
dev_err(&h->pdev->dev, "hpsa_register_scsi: failed"
" hpsa_scsi_detect(), rc is %d\n", rc);
return rc;
}
static int wait_for_device_to_become_ready(struct ctlr_info *h,
unsigned char lunaddr[])
{
int rc = 0;
int count = 0;
int waittime = 1; /* seconds */
struct CommandList *c;
c = cmd_special_alloc(h);
if (!c) {
dev_warn(&h->pdev->dev, "out of memory in "
"wait_for_device_to_become_ready.\n");
return IO_ERROR;
}
/* Send test unit ready until device ready, or give up. */
while (count < HPSA_TUR_RETRY_LIMIT) {
/* Wait for a bit. do this first, because if we send
* the TUR right away, the reset will just abort it.
*/
msleep(1000 * waittime);
count++;
/* Increase wait time with each try, up to a point. */
if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS)
waittime = waittime * 2;
/* Send the Test Unit Ready */
fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, lunaddr, TYPE_CMD);
hpsa_scsi_do_simple_cmd_core(h, c);
/* no unmap needed here because no data xfer. */
if (c->err_info->CommandStatus == CMD_SUCCESS)
break;
if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION &&
(c->err_info->SenseInfo[2] == NO_SENSE ||
c->err_info->SenseInfo[2] == UNIT_ATTENTION))
break;
dev_warn(&h->pdev->dev, "waiting %d secs "
"for device to become ready.\n", waittime);
rc = 1; /* device not ready. */
}
if (rc)
dev_warn(&h->pdev->dev, "giving up on device.\n");
else
dev_warn(&h->pdev->dev, "device is ready.\n");
cmd_special_free(h, c);
return rc;
}
/* Need at least one of these error handlers to keep ../scsi/hosts.c from
* complaining. Doing a host- or bus-reset can't do anything good here.
*/
static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
{
int rc;
struct ctlr_info *h;
struct hpsa_scsi_dev_t *dev;
/* find the controller to which the command to be aborted was sent */
h = sdev_to_hba(scsicmd->device);
if (h == NULL) /* paranoia */
return FAILED;
dev = scsicmd->device->hostdata;
if (!dev) {
dev_err(&h->pdev->dev, "hpsa_eh_device_reset_handler: "
"device lookup failed.\n");
return FAILED;
}
dev_warn(&h->pdev->dev, "resetting device %d:%d:%d:%d\n",
h->scsi_host->host_no, dev->bus, dev->target, dev->lun);
/* send a reset to the SCSI LUN which the command was sent to */
rc = hpsa_send_reset(h, dev->scsi3addr);
if (rc == 0 && wait_for_device_to_become_ready(h, dev->scsi3addr) == 0)
return SUCCESS;
dev_warn(&h->pdev->dev, "resetting device failed.\n");
return FAILED;
}
/*
* For operations that cannot sleep, a command block is allocated at init,
* and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
* which ones are free or in use. Lock must be held when calling this.
* cmd_free() is the complement.
*/
static struct CommandList *cmd_alloc(struct ctlr_info *h)
{
struct CommandList *c;
int i;
union u64bit temp64;
dma_addr_t cmd_dma_handle, err_dma_handle;
do {
i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds);
if (i == h->nr_cmds)
return NULL;
} while (test_and_set_bit
(i & (BITS_PER_LONG - 1),
h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0);
c = h->cmd_pool + i;
memset(c, 0, sizeof(*c));
cmd_dma_handle = h->cmd_pool_dhandle
+ i * sizeof(*c);
c->err_info = h->errinfo_pool + i;
memset(c->err_info, 0, sizeof(*c->err_info));
err_dma_handle = h->errinfo_pool_dhandle
+ i * sizeof(*c->err_info);
h->nr_allocs++;
c->cmdindex = i;
INIT_LIST_HEAD(&c->list);
c->busaddr = (u32) cmd_dma_handle;
temp64.val = (u64) err_dma_handle;
c->ErrDesc.Addr.lower = temp64.val32.lower;
c->ErrDesc.Addr.upper = temp64.val32.upper;
c->ErrDesc.Len = sizeof(*c->err_info);
c->h = h;
return c;
}
/* For operations that can wait for kmalloc to possibly sleep,
* this routine can be called. Lock need not be held to call
* cmd_special_alloc. cmd_special_free() is the complement.
*/
static struct CommandList *cmd_special_alloc(struct ctlr_info *h)
{
struct CommandList *c;
union u64bit temp64;
dma_addr_t cmd_dma_handle, err_dma_handle;
c = pci_alloc_consistent(h->pdev, sizeof(*c), &cmd_dma_handle);
if (c == NULL)
return NULL;
memset(c, 0, sizeof(*c));
c->cmdindex = -1;
c->err_info = pci_alloc_consistent(h->pdev, sizeof(*c->err_info),
&err_dma_handle);
if (c->err_info == NULL) {
pci_free_consistent(h->pdev,
sizeof(*c), c, cmd_dma_handle);
return NULL;
}
memset(c->err_info, 0, sizeof(*c->err_info));
INIT_LIST_HEAD(&c->list);
c->busaddr = (u32) cmd_dma_handle;
temp64.val = (u64) err_dma_handle;
c->ErrDesc.Addr.lower = temp64.val32.lower;
c->ErrDesc.Addr.upper = temp64.val32.upper;
c->ErrDesc.Len = sizeof(*c->err_info);
c->h = h;
return c;
}
static void cmd_free(struct ctlr_info *h, struct CommandList *c)
{
int i;
i = c - h->cmd_pool;
clear_bit(i & (BITS_PER_LONG - 1),
h->cmd_pool_bits + (i / BITS_PER_LONG));
h->nr_frees++;
}
static void cmd_special_free(struct ctlr_info *h, struct CommandList *c)
{
union u64bit temp64;
temp64.val32.lower = c->ErrDesc.Addr.lower;
temp64.val32.upper = c->ErrDesc.Addr.upper;
pci_free_consistent(h->pdev, sizeof(*c->err_info),
c->err_info, (dma_addr_t) temp64.val);
pci_free_consistent(h->pdev, sizeof(*c),
c, (dma_addr_t) (c->busaddr & DIRECT_LOOKUP_MASK));
}
#ifdef CONFIG_COMPAT
static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd, void *arg)
{
IOCTL32_Command_struct __user *arg32 =
(IOCTL32_Command_struct __user *) arg;
IOCTL_Command_struct arg64;
IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
int err;
u32 cp;
memset(&arg64, 0, sizeof(arg64));
err = 0;
err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
sizeof(arg64.LUN_info));
err |= copy_from_user(&arg64.Request, &arg32->Request,
sizeof(arg64.Request));
err |= copy_from_user(&arg64.error_info, &arg32->error_info,
sizeof(arg64.error_info));
err |= get_user(arg64.buf_size, &arg32->buf_size);
err |= get_user(cp, &arg32->buf);
arg64.buf = compat_ptr(cp);
err |= copy_to_user(p, &arg64, sizeof(arg64));
if (err)
return -EFAULT;
err = hpsa_ioctl(dev, CCISS_PASSTHRU, (void *)p);
if (err)
return err;
err |= copy_in_user(&arg32->error_info, &p->error_info,
sizeof(arg32->error_info));
if (err)
return -EFAULT;
return err;
}
static int hpsa_ioctl32_big_passthru(struct scsi_device *dev,
int cmd, void *arg)
{
BIG_IOCTL32_Command_struct __user *arg32 =
(BIG_IOCTL32_Command_struct __user *) arg;
BIG_IOCTL_Command_struct arg64;
BIG_IOCTL_Command_struct __user *p =
compat_alloc_user_space(sizeof(arg64));
int err;
u32 cp;
memset(&arg64, 0, sizeof(arg64));
err = 0;
err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
sizeof(arg64.LUN_info));
err |= copy_from_user(&arg64.Request, &arg32->Request,
sizeof(arg64.Request));
err |= copy_from_user(&arg64.error_info, &arg32->error_info,
sizeof(arg64.error_info));
err |= get_user(arg64.buf_size, &arg32->buf_size);
err |= get_user(arg64.malloc_size, &arg32->malloc_size);
err |= get_user(cp, &arg32->buf);
arg64.buf = compat_ptr(cp);
err |= copy_to_user(p, &arg64, sizeof(arg64));
if (err)
return -EFAULT;
err = hpsa_ioctl(dev, CCISS_BIG_PASSTHRU, (void *)p);
if (err)
return err;
err |= copy_in_user(&arg32->error_info, &p->error_info,
sizeof(arg32->error_info));
if (err)
return -EFAULT;
return err;
}
static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg)
{
switch (cmd) {
case CCISS_GETPCIINFO:
case CCISS_GETINTINFO:
case CCISS_SETINTINFO:
case CCISS_GETNODENAME:
case CCISS_SETNODENAME:
case CCISS_GETHEARTBEAT:
case CCISS_GETBUSTYPES:
case CCISS_GETFIRMVER:
case CCISS_GETDRIVVER:
case CCISS_REVALIDVOLS:
case CCISS_DEREGDISK:
case CCISS_REGNEWDISK:
case CCISS_REGNEWD:
case CCISS_RESCANDISK:
case CCISS_GETLUNINFO:
return hpsa_ioctl(dev, cmd, arg);
case CCISS_PASSTHRU32:
return hpsa_ioctl32_passthru(dev, cmd, arg);
case CCISS_BIG_PASSTHRU32:
return hpsa_ioctl32_big_passthru(dev, cmd, arg);
default:
return -ENOIOCTLCMD;
}
}
#endif
static int hpsa_getpciinfo_ioctl(struct ctlr_info *h, void __user *argp)
{
struct hpsa_pci_info pciinfo;
if (!argp)
return -EINVAL;
pciinfo.domain = pci_domain_nr(h->pdev->bus);
pciinfo.bus = h->pdev->bus->number;
pciinfo.dev_fn = h->pdev->devfn;
pciinfo.board_id = h->board_id;
if (copy_to_user(argp, &pciinfo, sizeof(pciinfo)))
return -EFAULT;
return 0;
}
static int hpsa_getdrivver_ioctl(struct ctlr_info *h, void __user *argp)
{
DriverVer_type DriverVer;
unsigned char vmaj, vmin, vsubmin;
int rc;
rc = sscanf(HPSA_DRIVER_VERSION, "%hhu.%hhu.%hhu",
&vmaj, &vmin, &vsubmin);
if (rc != 3) {
dev_info(&h->pdev->dev, "driver version string '%s' "
"unrecognized.", HPSA_DRIVER_VERSION);
vmaj = 0;
vmin = 0;
vsubmin = 0;
}
DriverVer = (vmaj << 16) | (vmin << 8) | vsubmin;
if (!argp)
return -EINVAL;
if (copy_to_user(argp, &DriverVer, sizeof(DriverVer_type)))
return -EFAULT;
return 0;
}
static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
{
IOCTL_Command_struct iocommand;
struct CommandList *c;
char *buff = NULL;
union u64bit temp64;
if (!argp)
return -EINVAL;
if (!capable(CAP_SYS_RAWIO))
return -EPERM;
if (copy_from_user(&iocommand, argp, sizeof(iocommand)))
return -EFAULT;
if ((iocommand.buf_size < 1) &&
(iocommand.Request.Type.Direction != XFER_NONE)) {
return -EINVAL;
}
if (iocommand.buf_size > 0) {
buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
if (buff == NULL)
return -EFAULT;
if (iocommand.Request.Type.Direction == XFER_WRITE) {
/* Copy the data into the buffer we created */
if (copy_from_user(buff, iocommand.buf,
iocommand.buf_size)) {
kfree(buff);
return -EFAULT;
}
} else {
memset(buff, 0, iocommand.buf_size);
}
}
c = cmd_special_alloc(h);
if (c == NULL) {
kfree(buff);
return -ENOMEM;
}
/* Fill in the command type */
c->cmd_type = CMD_IOCTL_PEND;
/* Fill in Command Header */
c->Header.ReplyQueue = 0; /* unused in simple mode */
if (iocommand.buf_size > 0) { /* buffer to fill */
c->Header.SGList = 1;
c->Header.SGTotal = 1;
} else { /* no buffers to fill */
c->Header.SGList = 0;
c->Header.SGTotal = 0;
}
memcpy(&c->Header.LUN, &iocommand.LUN_info, sizeof(c->Header.LUN));
/* use the kernel address the cmd block for tag */
c->Header.Tag.lower = c->busaddr;
/* Fill in Request block */
memcpy(&c->Request, &iocommand.Request,
sizeof(c->Request));
/* Fill in the scatter gather information */
if (iocommand.buf_size > 0) {
temp64.val = pci_map_single(h->pdev, buff,
iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
c->SG[0].Addr.lower = temp64.val32.lower;
c->SG[0].Addr.upper = temp64.val32.upper;
c->SG[0].Len = iocommand.buf_size;
c->SG[0].Ext = 0; /* we are not chaining*/
}
hpsa_scsi_do_simple_cmd_core(h, c);
if (iocommand.buf_size > 0)
hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL);
check_ioctl_unit_attention(h, c);
/* Copy the error information out */
memcpy(&iocommand.error_info, c->err_info,
sizeof(iocommand.error_info));
if (copy_to_user(argp, &iocommand, sizeof(iocommand))) {
kfree(buff);
cmd_special_free(h, c);
return -EFAULT;
}
if (iocommand.Request.Type.Direction == XFER_READ &&
iocommand.buf_size > 0) {
/* Copy the data out of the buffer we created */
if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) {
kfree(buff);
cmd_special_free(h, c);
return -EFAULT;
}
}
kfree(buff);
cmd_special_free(h, c);
return 0;
}
static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
{
BIG_IOCTL_Command_struct *ioc;
struct CommandList *c;
unsigned char **buff = NULL;
int *buff_size = NULL;
union u64bit temp64;
BYTE sg_used = 0;
int status = 0;
int i;
u32 left;
u32 sz;
BYTE __user *data_ptr;
if (!argp)
return -EINVAL;
if (!capable(CAP_SYS_RAWIO))
return -EPERM;
ioc = (BIG_IOCTL_Command_struct *)
kmalloc(sizeof(*ioc), GFP_KERNEL);
if (!ioc) {
status = -ENOMEM;
goto cleanup1;
}
if (copy_from_user(ioc, argp, sizeof(*ioc))) {
status = -EFAULT;
goto cleanup1;
}
if ((ioc->buf_size < 1) &&
(ioc->Request.Type.Direction != XFER_NONE)) {
status = -EINVAL;
goto cleanup1;
}
/* Check kmalloc limits using all SGs */
if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
status = -EINVAL;
goto cleanup1;
}
if (ioc->buf_size > ioc->malloc_size * MAXSGENTRIES) {
status = -EINVAL;
goto cleanup1;
}
buff = kzalloc(MAXSGENTRIES * sizeof(char *), GFP_KERNEL);
if (!buff) {
status = -ENOMEM;
goto cleanup1;
}
buff_size = kmalloc(MAXSGENTRIES * sizeof(int), GFP_KERNEL);
if (!buff_size) {
status = -ENOMEM;
goto cleanup1;
}
left = ioc->buf_size;
data_ptr = ioc->buf;
while (left) {
sz = (left > ioc->malloc_size) ? ioc->malloc_size : left;
buff_size[sg_used] = sz;
buff[sg_used] = kmalloc(sz, GFP_KERNEL);
if (buff[sg_used] == NULL) {
status = -ENOMEM;
goto cleanup1;
}
if (ioc->Request.Type.Direction == XFER_WRITE) {
if (copy_from_user(buff[sg_used], data_ptr, sz)) {
status = -ENOMEM;
goto cleanup1;
}
} else
memset(buff[sg_used], 0, sz);
left -= sz;
data_ptr += sz;
sg_used++;
}
c = cmd_special_alloc(h);
if (c == NULL) {
status = -ENOMEM;
goto cleanup1;
}
c->cmd_type = CMD_IOCTL_PEND;
c->Header.ReplyQueue = 0;
c->Header.SGList = c->Header.SGTotal = sg_used;
memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN));
c->Header.Tag.lower = c->busaddr;
memcpy(&c->Request, &ioc->Request, sizeof(c->Request));
if (ioc->buf_size > 0) {
int i;
for (i = 0; i < sg_used; i++) {
temp64.val = pci_map_single(h->pdev, buff[i],
buff_size[i], PCI_DMA_BIDIRECTIONAL);
c->SG[i].Addr.lower = temp64.val32.lower;
c->SG[i].Addr.upper = temp64.val32.upper;
c->SG[i].Len = buff_size[i];
/* we are not chaining */
c->SG[i].Ext = 0;
}
}
hpsa_scsi_do_simple_cmd_core(h, c);
if (sg_used)
hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL);
check_ioctl_unit_attention(h, c);
/* Copy the error information out */
memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info));
if (copy_to_user(argp, ioc, sizeof(*ioc))) {
cmd_special_free(h, c);
status = -EFAULT;
goto cleanup1;
}
if (ioc->Request.Type.Direction == XFER_READ && ioc->buf_size > 0) {
/* Copy the data out of the buffer we created */
BYTE __user *ptr = ioc->buf;
for (i = 0; i < sg_used; i++) {
if (copy_to_user(ptr, buff[i], buff_size[i])) {
cmd_special_free(h, c);
status = -EFAULT;
goto cleanup1;
}
ptr += buff_size[i];
}
}
cmd_special_free(h, c);
status = 0;
cleanup1:
if (buff) {
for (i = 0; i < sg_used; i++)
kfree(buff[i]);
kfree(buff);
}
kfree(buff_size);
kfree(ioc);
return status;
}
static void check_ioctl_unit_attention(struct ctlr_info *h,
struct CommandList *c)
{
if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION)
(void) check_for_unit_attention(h, c);
}
/*
* ioctl
*/
static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg)
{
struct ctlr_info *h;
void __user *argp = (void __user *)arg;
h = sdev_to_hba(dev);
switch (cmd) {
case CCISS_DEREGDISK:
case CCISS_REGNEWDISK:
case CCISS_REGNEWD:
hpsa_scan_start(h->scsi_host);
return 0;
case CCISS_GETPCIINFO:
return hpsa_getpciinfo_ioctl(h, argp);
case CCISS_GETDRIVVER:
return hpsa_getdrivver_ioctl(h, argp);
case CCISS_PASSTHRU:
return hpsa_passthru_ioctl(h, argp);
case CCISS_BIG_PASSTHRU:
return hpsa_big_passthru_ioctl(h, argp);
default:
return -ENOTTY;
}
}
static int __devinit hpsa_send_host_reset(struct ctlr_info *h,
unsigned char *scsi3addr, u8 reset_type)
{
struct CommandList *c;
c = cmd_alloc(h);
if (!c)
return -ENOMEM;
fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0,
RAID_CTLR_LUNID, TYPE_MSG);
c->Request.CDB[1] = reset_type; /* fill_cmd defaults to target reset */
c->waiting = NULL;
enqueue_cmd_and_start_io(h, c);
/* Don't wait for completion, the reset won't complete. Don't free
* the command either. This is the last command we will send before
* re-initializing everything, so it doesn't matter and won't leak.
*/
return 0;
}
static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
void *buff, size_t size, u8 page_code, unsigned char *scsi3addr,
int cmd_type)
{
int pci_dir = XFER_NONE;
c->cmd_type = CMD_IOCTL_PEND;
c->Header.ReplyQueue = 0;
if (buff != NULL && size > 0) {
c->Header.SGList = 1;
c->Header.SGTotal = 1;
} else {
c->Header.SGList = 0;
c->Header.SGTotal = 0;
}
c->Header.Tag.lower = c->busaddr;
memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8);
c->Request.Type.Type = cmd_type;
if (cmd_type == TYPE_CMD) {
switch (cmd) {
case HPSA_INQUIRY:
/* are we trying to read a vital product page */
if (page_code != 0) {
c->Request.CDB[1] = 0x01;
c->Request.CDB[2] = page_code;
}
c->Request.CDBLen = 6;
c->Request.Type.Attribute = ATTR_SIMPLE;
c->Request.Type.Direction = XFER_READ;
c->Request.Timeout = 0;
c->Request.CDB[0] = HPSA_INQUIRY;
c->Request.CDB[4] = size & 0xFF;
break;
case HPSA_REPORT_LOG:
case HPSA_REPORT_PHYS:
/* Talking to controller so It's a physical command
mode = 00 target = 0. Nothing to write.
*/
c->Request.CDBLen = 12;
c->Request.Type.Attribute = ATTR_SIMPLE;
c->Request.Type.Direction = XFER_READ;
c->Request.Timeout = 0;
c->Request.CDB[0] = cmd;
c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
c->Request.CDB[7] = (size >> 16) & 0xFF;
c->Request.CDB[8] = (size >> 8) & 0xFF;
c->Request.CDB[9] = size & 0xFF;
break;
case HPSA_CACHE_FLUSH:
c->Request.CDBLen = 12;
c->Request.Type.Attribute = ATTR_SIMPLE;
c->Request.Type.Direction = XFER_WRITE;
c->Request.Timeout = 0;
c->Request.CDB[0] = BMIC_WRITE;
c->Request.CDB[6] = BMIC_CACHE_FLUSH;
break;
case TEST_UNIT_READY:
c->Request.CDBLen = 6;
c->Request.Type.Attribute = ATTR_SIMPLE;
c->Request.Type.Direction = XFER_NONE;
c->Request.Timeout = 0;
break;
default:
dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd);
BUG();
return;
}
} else if (cmd_type == TYPE_MSG) {
switch (cmd) {
case HPSA_DEVICE_RESET_MSG:
c->Request.CDBLen = 16;
c->Request.Type.Type = 1; /* It is a MSG not a CMD */
c->Request.Type.Attribute = ATTR_SIMPLE;
c->Request.Type.Direction = XFER_NONE;
c->Request.Timeout = 0; /* Don't time out */
memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
c->Request.CDB[0] = cmd;
c->Request.CDB[1] = 0x03; /* Reset target above */
/* If bytes 4-7 are zero, it means reset the */
/* LunID device */
c->Request.CDB[4] = 0x00;
c->Request.CDB[5] = 0x00;
c->Request.CDB[6] = 0x00;
c->Request.CDB[7] = 0x00;
break;
default:
dev_warn(&h->pdev->dev, "unknown message type %d\n",
cmd);
BUG();
}
} else {
dev_warn(&h->pdev->dev, "unknown command type %d\n", cmd_type);
BUG();
}
switch (c->Request.Type.Direction) {
case XFER_READ:
pci_dir = PCI_DMA_FROMDEVICE;
break;
case XFER_WRITE:
pci_dir = PCI_DMA_TODEVICE;
break;
case XFER_NONE:
pci_dir = PCI_DMA_NONE;
break;
default:
pci_dir = PCI_DMA_BIDIRECTIONAL;
}
hpsa_map_one(h->pdev, c, buff, size, pci_dir);
return;
}
/*
* Map (physical) PCI mem into (virtual) kernel space
*/
static void __iomem *remap_pci_mem(ulong base, ulong size)
{
ulong page_base = ((ulong) base) & PAGE_MASK;
ulong page_offs = ((ulong) base) - page_base;
void __iomem *page_remapped = ioremap(page_base, page_offs + size);
return page_remapped ? (page_remapped + page_offs) : NULL;
}
/* Takes cmds off the submission queue and sends them to the hardware,
* then puts them on the queue of cmds waiting for completion.
*/
static void start_io(struct ctlr_info *h)
{
struct CommandList *c;
while (!list_empty(&h->reqQ)) {
c = list_entry(h->reqQ.next, struct CommandList, list);
/* can't do anything if fifo is full */
if ((h->access.fifo_full(h))) {
dev_warn(&h->pdev->dev, "fifo full\n");
break;
}
/* Get the first entry from the Request Q */
removeQ(c);
h->Qdepth--;
/* Tell the controller execute command */
h->access.submit_command(h, c);
/* Put job onto the completed Q */
addQ(&h->cmpQ, c);
}
}
static inline unsigned long get_next_completion(struct ctlr_info *h)
{
return h->access.command_completed(h);
}
static inline bool interrupt_pending(struct ctlr_info *h)
{
return h->access.intr_pending(h);
}
static inline long interrupt_not_for_us(struct ctlr_info *h)
{
return (h->access.intr_pending(h) == 0) ||
(h->interrupts_enabled == 0);
}
static inline int bad_tag(struct ctlr_info *h, u32 tag_index,
u32 raw_tag)
{
if (unlikely(tag_index >= h->nr_cmds)) {
dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag);
return 1;
}
return 0;
}
static inline void finish_cmd(struct CommandList *c, u32 raw_tag)
{
removeQ(c);
if (likely(c->cmd_type == CMD_SCSI))
complete_scsi_command(c);
else if (c->cmd_type == CMD_IOCTL_PEND)
complete(c->waiting);
}
static inline u32 hpsa_tag_contains_index(u32 tag)
{
return tag & DIRECT_LOOKUP_BIT;
}
static inline u32 hpsa_tag_to_index(u32 tag)
{
return tag >> DIRECT_LOOKUP_SHIFT;
}
static inline u32 hpsa_tag_discard_error_bits(struct ctlr_info *h, u32 tag)
{
#define HPSA_PERF_ERROR_BITS ((1 << DIRECT_LOOKUP_SHIFT) - 1)
#define HPSA_SIMPLE_ERROR_BITS 0x03
if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
return tag & ~HPSA_SIMPLE_ERROR_BITS;
return tag & ~HPSA_PERF_ERROR_BITS;
}
/* process completion of an indexed ("direct lookup") command */
static inline u32 process_indexed_cmd(struct ctlr_info *h,
u32 raw_tag)
{
u32 tag_index;
struct CommandList *c;
tag_index = hpsa_tag_to_index(raw_tag);
if (bad_tag(h, tag_index, raw_tag))
return next_command(h);
c = h->cmd_pool + tag_index;
finish_cmd(c, raw_tag);
return next_command(h);
}
/* process completion of a non-indexed command */
static inline u32 process_nonindexed_cmd(struct ctlr_info *h,
u32 raw_tag)
{
u32 tag;
struct CommandList *c = NULL;
tag = hpsa_tag_discard_error_bits(h, raw_tag);
list_for_each_entry(c, &h->cmpQ, list) {
if ((c->busaddr & 0xFFFFFFE0) == (tag & 0xFFFFFFE0)) {
finish_cmd(c, raw_tag);
return next_command(h);
}
}
bad_tag(h, h->nr_cmds + 1, raw_tag);
return next_command(h);
}
/* Some controllers, like p400, will give us one interrupt
* after a soft reset, even if we turned interrupts off.
* Only need to check for this in the hpsa_xxx_discard_completions
* functions.
*/
static int ignore_bogus_interrupt(struct ctlr_info *h)
{
if (likely(!reset_devices))
return 0;
if (likely(h->interrupts_enabled))
return 0;
dev_info(&h->pdev->dev, "Received interrupt while interrupts disabled "
"(known firmware bug.) Ignoring.\n");
return 1;
}
static irqreturn_t hpsa_intx_discard_completions(int irq, void *dev_id)
{
struct ctlr_info *h = dev_id;
unsigned long flags;
u32 raw_tag;
if (ignore_bogus_interrupt(h))
return IRQ_NONE;
if (interrupt_not_for_us(h))
return IRQ_NONE;
spin_lock_irqsave(&h->lock, flags);
while (interrupt_pending(h)) {
raw_tag = get_next_completion(h);
while (raw_tag != FIFO_EMPTY)
raw_tag = next_command(h);
}
spin_unlock_irqrestore(&h->lock, flags);
return IRQ_HANDLED;
}
static irqreturn_t hpsa_msix_discard_completions(int irq, void *dev_id)
{
struct ctlr_info *h = dev_id;
unsigned long flags;
u32 raw_tag;
if (ignore_bogus_interrupt(h))
return IRQ_NONE;
spin_lock_irqsave(&h->lock, flags);
raw_tag = get_next_completion(h);
while (raw_tag != FIFO_EMPTY)
raw_tag = next_command(h);
spin_unlock_irqrestore(&h->lock, flags);
return IRQ_HANDLED;
}
static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id)
{
struct ctlr_info *h = dev_id;
unsigned long flags;
u32 raw_tag;
if (interrupt_not_for_us(h))
return IRQ_NONE;
spin_lock_irqsave(&h->lock, flags);
while (interrupt_pending(h)) {
raw_tag = get_next_completion(h);
while (raw_tag != FIFO_EMPTY) {
if (hpsa_tag_contains_index(raw_tag))
raw_tag = process_indexed_cmd(h, raw_tag);
else
raw_tag = process_nonindexed_cmd(h, raw_tag);
}
}
spin_unlock_irqrestore(&h->lock, flags);
return IRQ_HANDLED;
}
static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id)
{
struct ctlr_info *h = dev_id;
unsigned long flags;
u32 raw_tag;
spin_lock_irqsave(&h->lock, flags);
raw_tag = get_next_completion(h);
while (raw_tag != FIFO_EMPTY) {
if (hpsa_tag_contains_index(raw_tag))
raw_tag = process_indexed_cmd(h, raw_tag);
else
raw_tag = process_nonindexed_cmd(h, raw_tag);
}
spin_unlock_irqrestore(&h->lock, flags);
return IRQ_HANDLED;
}
/* Send a message CDB to the firmware. Careful, this only works
* in simple mode, not performant mode due to the tag lookup.
* We only ever use this immediately after a controller reset.
*/
static __devinit int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
unsigned char type)
{
struct Command {
struct CommandListHeader CommandHeader;
struct RequestBlock Request;
struct ErrDescriptor ErrorDescriptor;
};
struct Command *cmd;
static const size_t cmd_sz = sizeof(*cmd) +
sizeof(cmd->ErrorDescriptor);
dma_addr_t paddr64;
uint32_t paddr32, tag;
void __iomem *vaddr;
int i, err;
vaddr = pci_ioremap_bar(pdev, 0);
if (vaddr == NULL)
return -ENOMEM;
/* The Inbound Post Queue only accepts 32-bit physical addresses for the
* CCISS commands, so they must be allocated from the lower 4GiB of
* memory.
*/
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
iounmap(vaddr);
return -ENOMEM;
}
cmd = pci_alloc_consistent(pdev, cmd_sz, &paddr64);
if (cmd == NULL) {
iounmap(vaddr);
return -ENOMEM;
}
/* This must fit, because of the 32-bit consistent DMA mask. Also,
* although there's no guarantee, we assume that the address is at
* least 4-byte aligned (most likely, it's page-aligned).
*/
paddr32 = paddr64;
cmd->CommandHeader.ReplyQueue = 0;
cmd->CommandHeader.SGList = 0;
cmd->CommandHeader.SGTotal = 0;
cmd->CommandHeader.Tag.lower = paddr32;
cmd->CommandHeader.Tag.upper = 0;
memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8);
cmd->Request.CDBLen = 16;
cmd->Request.Type.Type = TYPE_MSG;
cmd->Request.Type.Attribute = ATTR_HEADOFQUEUE;
cmd->Request.Type.Direction = XFER_NONE;
cmd->Request.Timeout = 0; /* Don't time out */
cmd->Request.CDB[0] = opcode;
cmd->Request.CDB[1] = type;
memset(&cmd->Request.CDB[2], 0, 14); /* rest of the CDB is reserved */
cmd->ErrorDescriptor.Addr.lower = paddr32 + sizeof(*cmd);
cmd->ErrorDescriptor.Addr.upper = 0;
cmd->ErrorDescriptor.Len = sizeof(struct ErrorInfo);
writel(paddr32, vaddr + SA5_REQUEST_PORT_OFFSET);
for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) {
tag = readl(vaddr + SA5_REPLY_PORT_OFFSET);
if ((tag & ~HPSA_SIMPLE_ERROR_BITS) == paddr32)
break;
msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS);
}
iounmap(vaddr);
/* we leak the DMA buffer here ... no choice since the controller could
* still complete the command.
*/
if (i == HPSA_MSG_SEND_RETRY_LIMIT) {
dev_err(&pdev->dev, "controller message %02x:%02x timed out\n",
opcode, type);
return -ETIMEDOUT;
}
pci_free_consistent(pdev, cmd_sz, cmd, paddr64);
if (tag & HPSA_ERROR_BIT) {
dev_err(&pdev->dev, "controller message %02x:%02x failed\n",
opcode, type);
return -EIO;
}
dev_info(&pdev->dev, "controller message %02x:%02x succeeded\n",
opcode, type);
return 0;
}
#define hpsa_noop(p) hpsa_message(p, 3, 0)
static int hpsa_controller_hard_reset(struct pci_dev *pdev,
void * __iomem vaddr, u32 use_doorbell)
{
u16 pmcsr;
int pos;
if (use_doorbell) {
/* For everything after the P600, the PCI power state method
* of resetting the controller doesn't work, so we have this
* other way using the doorbell register.
*/
dev_info(&pdev->dev, "using doorbell to reset controller\n");
writel(use_doorbell, vaddr + SA5_DOORBELL);
} else { /* Try to do it the PCI power state way */
/* Quoting from the Open CISS Specification: "The Power
* Management Control/Status Register (CSR) controls the power
* state of the device. The normal operating state is D0,
* CSR=00h. The software off state is D3, CSR=03h. To reset
* the controller, place the interface device in D3 then to D0,
* this causes a secondary PCI reset which will reset the
* controller." */
pos = pci_find_capability(pdev, PCI_CAP_ID_PM);
if (pos == 0) {
dev_err(&pdev->dev,
"hpsa_reset_controller: "
"PCI PM not supported\n");
return -ENODEV;
}
dev_info(&pdev->dev, "using PCI PM to reset controller\n");
/* enter the D3hot power management state */
pci_read_config_word(pdev, pos + PCI_PM_CTRL, &pmcsr);
pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
pmcsr |= PCI_D3hot;
pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
msleep(500);
/* enter the D0 power management state */
pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
pmcsr |= PCI_D0;
pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
/*
* The P600 requires a small delay when changing states.
* Otherwise we may think the board did not reset and we bail.
* This for kdump only and is particular to the P600.
*/
msleep(500);
}
return 0;
}
static __devinit void init_driver_version(char *driver_version, int len)
{
memset(driver_version, 0, len);
strncpy(driver_version, "hpsa " HPSA_DRIVER_VERSION, len - 1);
}
static __devinit int write_driver_ver_to_cfgtable(
struct CfgTable __iomem *cfgtable)
{
char *driver_version;
int i, size = sizeof(cfgtable->driver_version);
driver_version = kmalloc(size, GFP_KERNEL);
if (!driver_version)
return -ENOMEM;
init_driver_version(driver_version, size);
for (i = 0; i < size; i++)
writeb(driver_version[i], &cfgtable->driver_version[i]);
kfree(driver_version);
return 0;
}
static __devinit void read_driver_ver_from_cfgtable(
struct CfgTable __iomem *cfgtable, unsigned char *driver_ver)
{
int i;
for (i = 0; i < sizeof(cfgtable->driver_version); i++)
driver_ver[i] = readb(&cfgtable->driver_version[i]);
}
static __devinit int controller_reset_failed(
struct CfgTable __iomem *cfgtable)
{
char *driver_ver, *old_driver_ver;
int rc, size = sizeof(cfgtable->driver_version);
old_driver_ver = kmalloc(2 * size, GFP_KERNEL);
if (!old_driver_ver)
return -ENOMEM;
driver_ver = old_driver_ver + size;
/* After a reset, the 32 bytes of "driver version" in the cfgtable
* should have been changed, otherwise we know the reset failed.
*/
init_driver_version(old_driver_ver, size);
read_driver_ver_from_cfgtable(cfgtable, driver_ver);
rc = !memcmp(driver_ver, old_driver_ver, size);
kfree(old_driver_ver);
return rc;
}
/* This does a hard reset of the controller using PCI power management
* states or the using the doorbell register.
*/
static __devinit int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)
{
u64 cfg_offset;
u32 cfg_base_addr;
u64 cfg_base_addr_index;
void __iomem *vaddr;
unsigned long paddr;
u32 misc_fw_support;
int rc;
struct CfgTable __iomem *cfgtable;
u32 use_doorbell;
u32 board_id;
u16 command_register;
/* For controllers as old as the P600, this is very nearly
* the same thing as
*
* pci_save_state(pci_dev);
* pci_set_power_state(pci_dev, PCI_D3hot);
* pci_set_power_state(pci_dev, PCI_D0);
* pci_restore_state(pci_dev);
*
* For controllers newer than the P600, the pci power state
* method of resetting doesn't work so we have another way
* using the doorbell register.
*/
rc = hpsa_lookup_board_id(pdev, &board_id);
if (rc < 0 || !ctlr_is_resettable(board_id)) {
dev_warn(&pdev->dev, "Not resetting device.\n");
return -ENODEV;
}
/* if controller is soft- but not hard resettable... */
if (!ctlr_is_hard_resettable(board_id))
return -ENOTSUPP; /* try soft reset later. */
/* Save the PCI command register */
pci_read_config_word(pdev, 4, &command_register);
/* Turn the board off. This is so that later pci_restore_state()
* won't turn the board on before the rest of config space is ready.
*/
pci_disable_device(pdev);
pci_save_state(pdev);
/* find the first memory BAR, so we can find the cfg table */
rc = hpsa_pci_find_memory_BAR(pdev, &paddr);
if (rc)
return rc;
vaddr = remap_pci_mem(paddr, 0x250);
if (!vaddr)
return -ENOMEM;
/* find cfgtable in order to check if reset via doorbell is supported */
rc = hpsa_find_cfg_addrs(pdev, vaddr, &cfg_base_addr,
&cfg_base_addr_index, &cfg_offset);
if (rc)
goto unmap_vaddr;
cfgtable = remap_pci_mem(pci_resource_start(pdev,
cfg_base_addr_index) + cfg_offset, sizeof(*cfgtable));
if (!cfgtable) {
rc = -ENOMEM;
goto unmap_vaddr;
}
rc = write_driver_ver_to_cfgtable(cfgtable);
if (rc)
goto unmap_vaddr;
/* If reset via doorbell register is supported, use that.
* There are two such methods. Favor the newest method.
*/
misc_fw_support = readl(&cfgtable->misc_fw_support);
use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET2;
if (use_doorbell) {
use_doorbell = DOORBELL_CTLR_RESET2;
} else {
use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET;
if (use_doorbell) {
dev_warn(&pdev->dev, "Controller claims that "
"'Bit 2 doorbell reset' is "
"supported, but not 'bit 5 doorbell reset'. "
"Firmware update is recommended.\n");
rc = -ENOTSUPP; /* try soft reset */
goto unmap_cfgtable;
}
}
rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell);
if (rc)
goto unmap_cfgtable;
pci_restore_state(pdev);
rc = pci_enable_device(pdev);
if (rc) {
dev_warn(&pdev->dev, "failed to enable device.\n");
goto unmap_cfgtable;
}
pci_write_config_word(pdev, 4, command_register);
/* Some devices (notably the HP Smart Array 5i Controller)
need a little pause here */
msleep(HPSA_POST_RESET_PAUSE_MSECS);
/* Wait for board to become not ready, then ready. */
dev_info(&pdev->dev, "Waiting for board to reset.\n");
rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_NOT_READY);
if (rc) {
dev_warn(&pdev->dev,
"failed waiting for board to reset."
" Will try soft reset.\n");
rc = -ENOTSUPP; /* Not expected, but try soft reset later */
goto unmap_cfgtable;
}
rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_READY);
if (rc) {
dev_warn(&pdev->dev,
"failed waiting for board to become ready "
"after hard reset\n");
goto unmap_cfgtable;
}
rc = controller_reset_failed(vaddr);
if (rc < 0)
goto unmap_cfgtable;
if (rc) {
dev_warn(&pdev->dev, "Unable to successfully reset "
"controller. Will try soft reset.\n");
rc = -ENOTSUPP;
} else {
dev_info(&pdev->dev, "board ready after hard reset.\n");
}
unmap_cfgtable:
iounmap(cfgtable);
unmap_vaddr:
iounmap(vaddr);
return rc;
}
/*
* We cannot read the structure directly, for portability we must use
* the io functions.
* This is for debug only.
*/
static void print_cfg_table(struct device *dev, struct CfgTable *tb)
{
#ifdef HPSA_DEBUG
int i;
char temp_name[17];
dev_info(dev, "Controller Configuration information\n");
dev_info(dev, "------------------------------------\n");
for (i = 0; i < 4; i++)
temp_name[i] = readb(&(tb->Signature[i]));
temp_name[4] = '\0';
dev_info(dev, " Signature = %s\n", temp_name);
dev_info(dev, " Spec Number = %d\n", readl(&(tb->SpecValence)));
dev_info(dev, " Transport methods supported = 0x%x\n",
readl(&(tb->TransportSupport)));
dev_info(dev, " Transport methods active = 0x%x\n",
readl(&(tb->TransportActive)));
dev_info(dev, " Requested transport Method = 0x%x\n",
readl(&(tb->HostWrite.TransportRequest)));
dev_info(dev, " Coalesce Interrupt Delay = 0x%x\n",
readl(&(tb->HostWrite.CoalIntDelay)));
dev_info(dev, " Coalesce Interrupt Count = 0x%x\n",
readl(&(tb->HostWrite.CoalIntCount)));
dev_info(dev, " Max outstanding commands = 0x%d\n",
readl(&(tb->CmdsOutMax)));
dev_info(dev, " Bus Types = 0x%x\n", readl(&(tb->BusTypes)));
for (i = 0; i < 16; i++)
temp_name[i] = readb(&(tb->ServerName[i]));
temp_name[16] = '\0';
dev_info(dev, " Server Name = %s\n", temp_name);
dev_info(dev, " Heartbeat Counter = 0x%x\n\n\n",
readl(&(tb->HeartBeat)));
#endif /* HPSA_DEBUG */
}
static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
{
int i, offset, mem_type, bar_type;
if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */
return 0;
offset = 0;
for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE;
if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
offset += 4;
else {
mem_type = pci_resource_flags(pdev, i) &
PCI_BASE_ADDRESS_MEM_TYPE_MASK;
switch (mem_type) {
case PCI_BASE_ADDRESS_MEM_TYPE_32:
case PCI_BASE_ADDRESS_MEM_TYPE_1M:
offset += 4; /* 32 bit */
break;
case PCI_BASE_ADDRESS_MEM_TYPE_64:
offset += 8;
break;
default: /* reserved in PCI 2.2 */
dev_warn(&pdev->dev,
"base address is invalid\n");
return -1;
break;
}
}
if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
return i + 1;
}
return -1;
}
/* If MSI/MSI-X is supported by the kernel we will try to enable it on
* controllers that are capable. If not, we use IO-APIC mode.
*/
static void __devinit hpsa_interrupt_mode(struct ctlr_info *h)
{
#ifdef CONFIG_PCI_MSI
int err;
struct msix_entry hpsa_msix_entries[4] = { {0, 0}, {0, 1},
{0, 2}, {0, 3}
};
/* Some boards advertise MSI but don't really support it */
if ((h->board_id == 0x40700E11) || (h->board_id == 0x40800E11) ||
(h->board_id == 0x40820E11) || (h->board_id == 0x40830E11))
goto default_int_mode;
if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) {
dev_info(&h->pdev->dev, "MSIX\n");
err = pci_enable_msix(h->pdev, hpsa_msix_entries, 4);
if (!err) {
h->intr[0] = hpsa_msix_entries[0].vector;
h->intr[1] = hpsa_msix_entries[1].vector;
h->intr[2] = hpsa_msix_entries[2].vector;
h->intr[3] = hpsa_msix_entries[3].vector;
h->msix_vector = 1;
return;
}
if (err > 0) {
dev_warn(&h->pdev->dev, "only %d MSI-X vectors "
"available\n", err);
goto default_int_mode;
} else {
dev_warn(&h->pdev->dev, "MSI-X init failed %d\n",
err);
goto default_int_mode;
}
}
if (pci_find_capability(h->pdev, PCI_CAP_ID_MSI)) {
dev_info(&h->pdev->dev, "MSI\n");
if (!pci_enable_msi(h->pdev))
h->msi_vector = 1;
else
dev_warn(&h->pdev->dev, "MSI init failed\n");
}
default_int_mode:
#endif /* CONFIG_PCI_MSI */
/* if we get here we're going to use the default interrupt mode */
h->intr[h->intr_mode] = h->pdev->irq;
}
static int __devinit hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id)
{
int i;
u32 subsystem_vendor_id, subsystem_device_id;
subsystem_vendor_id = pdev->subsystem_vendor;
subsystem_device_id = pdev->subsystem_device;
*board_id = ((subsystem_device_id << 16) & 0xffff0000) |
subsystem_vendor_id;
for (i = 0; i < ARRAY_SIZE(products); i++)
if (*board_id == products[i].board_id)
return i;
if ((subsystem_vendor_id != PCI_VENDOR_ID_HP &&
subsystem_vendor_id != PCI_VENDOR_ID_COMPAQ) ||
!hpsa_allow_any) {
dev_warn(&pdev->dev, "unrecognized board ID: "
"0x%08x, ignoring.\n", *board_id);
return -ENODEV;
}
return ARRAY_SIZE(products) - 1; /* generic unknown smart array */
}
static inline bool hpsa_board_disabled(struct pci_dev *pdev)
{
u16 command;
(void) pci_read_config_word(pdev, PCI_COMMAND, &command);
return ((command & PCI_COMMAND_MEMORY) == 0);
}
static int __devinit hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
unsigned long *memory_bar)
{
int i;
for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
/* addressing mode bits already removed */
*memory_bar = pci_resource_start(pdev, i);
dev_dbg(&pdev->dev, "memory BAR = %lx\n",
*memory_bar);
return 0;
}
dev_warn(&pdev->dev, "no memory BAR found\n");
return -ENODEV;
}
static int __devinit hpsa_wait_for_board_state(struct pci_dev *pdev,
void __iomem *vaddr, int wait_for_ready)
{
int i, iterations;
u32 scratchpad;
if (wait_for_ready)
iterations = HPSA_BOARD_READY_ITERATIONS;
else
iterations = HPSA_BOARD_NOT_READY_ITERATIONS;
for (i = 0; i < iterations; i++) {
scratchpad = readl(vaddr + SA5_SCRATCHPAD_OFFSET);
if (wait_for_ready) {
if (scratchpad == HPSA_FIRMWARE_READY)
return 0;
} else {
if (scratchpad != HPSA_FIRMWARE_READY)
return 0;
}
msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS);
}
dev_warn(&pdev->dev, "board not ready, timed out.\n");
return -ENODEV;
}
static int __devinit hpsa_find_cfg_addrs(struct pci_dev *pdev,
void __iomem *vaddr, u32 *cfg_base_addr, u64 *cfg_base_addr_index,
u64 *cfg_offset)
{
*cfg_base_addr = readl(vaddr + SA5_CTCFG_OFFSET);
*cfg_offset = readl(vaddr + SA5_CTMEM_OFFSET);
*cfg_base_addr &= (u32) 0x0000ffff;
*cfg_base_addr_index = find_PCI_BAR_index(pdev, *cfg_base_addr);
if (*cfg_base_addr_index == -1) {
dev_warn(&pdev->dev, "cannot find cfg_base_addr_index\n");
return -ENODEV;
}
return 0;
}
static int __devinit hpsa_find_cfgtables(struct ctlr_info *h)
{
u64 cfg_offset;
u32 cfg_base_addr;
u64 cfg_base_addr_index;
u32 trans_offset;
int rc;
rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
&cfg_base_addr_index, &cfg_offset);
if (rc)
return rc;
h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev,
cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable));
if (!h->cfgtable)
return -ENOMEM;
rc = write_driver_ver_to_cfgtable(h->cfgtable);
if (rc)
return rc;
/* Find performant mode table. */
trans_offset = readl(&h->cfgtable->TransMethodOffset);
h->transtable = remap_pci_mem(pci_resource_start(h->pdev,
cfg_base_addr_index)+cfg_offset+trans_offset,
sizeof(*h->transtable));
if (!h->transtable)
return -ENOMEM;
return 0;
}
static void __devinit hpsa_get_max_perf_mode_cmds(struct ctlr_info *h)
{
h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands));
/* Limit commands in memory limited kdump scenario. */
if (reset_devices && h->max_commands > 32)
h->max_commands = 32;
if (h->max_commands < 16) {
dev_warn(&h->pdev->dev, "Controller reports "
"max supported commands of %d, an obvious lie. "
"Using 16. Ensure that firmware is up to date.\n",
h->max_commands);
h->max_commands = 16;
}
}
/* Interrogate the hardware for some limits:
* max commands, max SG elements without chaining, and with chaining,
* SG chain block size, etc.
*/
static void __devinit hpsa_find_board_params(struct ctlr_info *h)
{
hpsa_get_max_perf_mode_cmds(h);
h->nr_cmds = h->max_commands - 4; /* Allow room for some ioctls */
h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements));
/*
* Limit in-command s/g elements to 32 save dma'able memory.
* Howvever spec says if 0, use 31
*/
h->max_cmd_sg_entries = 31;
if (h->maxsgentries > 512) {
h->max_cmd_sg_entries = 32;
h->chainsize = h->maxsgentries - h->max_cmd_sg_entries + 1;
h->maxsgentries--; /* save one for chain pointer */
} else {
h->maxsgentries = 31; /* default to traditional values */
h->chainsize = 0;
}
}
static inline bool hpsa_CISS_signature_present(struct ctlr_info *h)
{
if ((readb(&h->cfgtable->Signature[0]) != 'C') ||
(readb(&h->cfgtable->Signature[1]) != 'I') ||
(readb(&h->cfgtable->Signature[2]) != 'S') ||
(readb(&h->cfgtable->Signature[3]) != 'S')) {
dev_warn(&h->pdev->dev, "not a valid CISS config table\n");
return false;
}
return true;
}
/* Need to enable prefetch in the SCSI core for 6400 in x86 */
static inline void hpsa_enable_scsi_prefetch(struct ctlr_info *h)
{
#ifdef CONFIG_X86
u32 prefetch;
prefetch = readl(&(h->cfgtable->SCSI_Prefetch));
prefetch |= 0x100;
writel(prefetch, &(h->cfgtable->SCSI_Prefetch));
#endif
}
/* Disable DMA prefetch for the P600. Otherwise an ASIC bug may result
* in a prefetch beyond physical memory.
*/
static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info *h)
{
u32 dma_prefetch;
if (h->board_id != 0x3225103C)
return;
dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG);
dma_prefetch |= 0x8000;
writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG);
}
static void __devinit hpsa_wait_for_mode_change_ack(struct ctlr_info *h)
{
int i;
u32 doorbell_value;
unsigned long flags;
/* under certain very rare conditions, this can take awhile.
* (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
* as we enter this code.)
*/
for (i = 0; i < MAX_CONFIG_WAIT; i++) {
spin_lock_irqsave(&h->lock, flags);
doorbell_value = readl(h->vaddr + SA5_DOORBELL);
spin_unlock_irqrestore(&h->lock, flags);
if (!(doorbell_value & CFGTBL_ChangeReq))
break;
/* delay and try again */
usleep_range(10000, 20000);
}
}
static int __devinit hpsa_enter_simple_mode(struct ctlr_info *h)
{
u32 trans_support;
trans_support = readl(&(h->cfgtable->TransportSupport));
if (!(trans_support & SIMPLE_MODE))
return -ENOTSUPP;
h->max_commands = readl(&(h->cfgtable->CmdsOutMax));
/* Update the field, and then ring the doorbell */
writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest));
writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
hpsa_wait_for_mode_change_ack(h);
print_cfg_table(&h->pdev->dev, h->cfgtable);
if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) {
dev_warn(&h->pdev->dev,
"unable to get board into simple mode\n");
return -ENODEV;
}
h->transMethod = CFGTBL_Trans_Simple;
return 0;
}
static int __devinit hpsa_pci_init(struct ctlr_info *h)
{
int prod_index, err;
prod_index = hpsa_lookup_board_id(h->pdev, &h->board_id);
if (prod_index < 0)
return -ENODEV;
h->product_name = products[prod_index].product_name;
h->access = *(products[prod_index].access);
if (hpsa_board_disabled(h->pdev)) {
dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
return -ENODEV;
}
pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
err = pci_enable_device(h->pdev);
if (err) {
dev_warn(&h->pdev->dev, "unable to enable PCI device\n");
return err;
}
err = pci_request_regions(h->pdev, "hpsa");
if (err) {
dev_err(&h->pdev->dev,
"cannot obtain PCI resources, aborting\n");
return err;
}
hpsa_interrupt_mode(h);
err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr);
if (err)
goto err_out_free_res;
h->vaddr = remap_pci_mem(h->paddr, 0x250);
if (!h->vaddr) {
err = -ENOMEM;
goto err_out_free_res;
}
err = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
if (err)
goto err_out_free_res;
err = hpsa_find_cfgtables(h);
if (err)
goto err_out_free_res;
hpsa_find_board_params(h);
if (!hpsa_CISS_signature_present(h)) {
err = -ENODEV;
goto err_out_free_res;
}
hpsa_enable_scsi_prefetch(h);
hpsa_p600_dma_prefetch_quirk(h);
err = hpsa_enter_simple_mode(h);
if (err)
goto err_out_free_res;
return 0;
err_out_free_res:
if (h->transtable)
iounmap(h->transtable);
if (h->cfgtable)
iounmap(h->cfgtable);
if (h->vaddr)
iounmap(h->vaddr);
/*
* Deliberately omit pci_disable_device(): it does something nasty to
* Smart Array controllers that pci_enable_device does not undo
*/
pci_release_regions(h->pdev);
return err;
}
static void __devinit hpsa_hba_inquiry(struct ctlr_info *h)
{
int rc;
#define HBA_INQUIRY_BYTE_COUNT 64
h->hba_inquiry_data = kmalloc(HBA_INQUIRY_BYTE_COUNT, GFP_KERNEL);
if (!h->hba_inquiry_data)
return;
rc = hpsa_scsi_do_inquiry(h, RAID_CTLR_LUNID, 0,
h->hba_inquiry_data, HBA_INQUIRY_BYTE_COUNT);
if (rc != 0) {
kfree(h->hba_inquiry_data);
h->hba_inquiry_data = NULL;
}
}
static __devinit int hpsa_init_reset_devices(struct pci_dev *pdev)
{
int rc, i;
if (!reset_devices)
return 0;
/* Reset the controller with a PCI power-cycle or via doorbell */
rc = hpsa_kdump_hard_reset_controller(pdev);
/* -ENOTSUPP here means we cannot reset the controller
* but it's already (and still) up and running in
* "performant mode". Or, it might be 640x, which can't reset
* due to concerns about shared bbwc between 6402/6404 pair.
*/
if (rc == -ENOTSUPP)
return rc; /* just try to do the kdump anyhow. */
if (rc)
return -ENODEV;
/* Now try to get the controller to respond to a no-op */
dev_warn(&pdev->dev, "Waiting for controller to respond to no-op\n");
for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) {
if (hpsa_noop(pdev) == 0)
break;
else
dev_warn(&pdev->dev, "no-op failed%s\n",
(i < 11 ? "; re-trying" : ""));
}
return 0;
}
static __devinit int hpsa_allocate_cmd_pool(struct ctlr_info *h)
{
h->cmd_pool_bits = kzalloc(
DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG) *
sizeof(unsigned long), GFP_KERNEL);
h->cmd_pool = pci_alloc_consistent(h->pdev,
h->nr_cmds * sizeof(*h->cmd_pool),
&(h->cmd_pool_dhandle));
h->errinfo_pool = pci_alloc_consistent(h->pdev,
h->nr_cmds * sizeof(*h->errinfo_pool),
&(h->errinfo_pool_dhandle));
if ((h->cmd_pool_bits == NULL)
|| (h->cmd_pool == NULL)
|| (h->errinfo_pool == NULL)) {
dev_err(&h->pdev->dev, "out of memory in %s", __func__);
return -ENOMEM;
}
return 0;
}
static void hpsa_free_cmd_pool(struct ctlr_info *h)
{
kfree(h->cmd_pool_bits);
if (h->cmd_pool)
pci_free_consistent(h->pdev,
h->nr_cmds * sizeof(struct CommandList),
h->cmd_pool, h->cmd_pool_dhandle);
if (h->errinfo_pool)
pci_free_consistent(h->pdev,
h->nr_cmds * sizeof(struct ErrorInfo),
h->errinfo_pool,
h->errinfo_pool_dhandle);
}
static int hpsa_request_irq(struct ctlr_info *h,
irqreturn_t (*msixhandler)(int, void *),
irqreturn_t (*intxhandler)(int, void *))
{
int rc;
if (h->msix_vector || h->msi_vector)
rc = request_irq(h->intr[h->intr_mode], msixhandler,
IRQF_DISABLED, h->devname, h);
else
rc = request_irq(h->intr[h->intr_mode], intxhandler,
IRQF_DISABLED, h->devname, h);
if (rc) {
dev_err(&h->pdev->dev, "unable to get irq %d for %s\n",
h->intr[h->intr_mode], h->devname);
return -ENODEV;
}
return 0;
}
static int __devinit hpsa_kdump_soft_reset(struct ctlr_info *h)
{
if (hpsa_send_host_reset(h, RAID_CTLR_LUNID,
HPSA_RESET_TYPE_CONTROLLER)) {
dev_warn(&h->pdev->dev, "Resetting array controller failed.\n");
return -EIO;
}
dev_info(&h->pdev->dev, "Waiting for board to soft reset.\n");
if (hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_NOT_READY)) {
dev_warn(&h->pdev->dev, "Soft reset had no effect.\n");
return -1;
}
dev_info(&h->pdev->dev, "Board reset, awaiting READY status.\n");
if (hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY)) {
dev_warn(&h->pdev->dev, "Board failed to become ready "
"after soft reset.\n");
return -1;
}
return 0;
}
static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h)
{
free_irq(h->intr[h->intr_mode], h);
#ifdef CONFIG_PCI_MSI
if (h->msix_vector)
pci_disable_msix(h->pdev);
else if (h->msi_vector)
pci_disable_msi(h->pdev);
#endif /* CONFIG_PCI_MSI */
hpsa_free_sg_chain_blocks(h);
hpsa_free_cmd_pool(h);
kfree(h->blockFetchTable);
pci_free_consistent(h->pdev, h->reply_pool_size,
h->reply_pool, h->reply_pool_dhandle);
if (h->vaddr)
iounmap(h->vaddr);
if (h->transtable)
iounmap(h->transtable);
if (h->cfgtable)
iounmap(h->cfgtable);
pci_release_regions(h->pdev);
kfree(h);
}
static int __devinit hpsa_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
int dac, rc;
struct ctlr_info *h;
int try_soft_reset = 0;
unsigned long flags;
if (number_of_controllers == 0)
printk(KERN_INFO DRIVER_NAME "\n");
rc = hpsa_init_reset_devices(pdev);
if (rc) {
if (rc != -ENOTSUPP)
return rc;
/* If the reset fails in a particular way (it has no way to do
* a proper hard reset, so returns -ENOTSUPP) we can try to do
* a soft reset once we get the controller configured up to the
* point that it can accept a command.
*/
try_soft_reset = 1;
rc = 0;
}
reinit_after_soft_reset:
/* Command structures must be aligned on a 32-byte boundary because
* the 5 lower bits of the address are used by the hardware. and by
* the driver. See comments in hpsa.h for more info.
*/
#define COMMANDLIST_ALIGNMENT 32
BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT);
h = kzalloc(sizeof(*h), GFP_KERNEL);
if (!h)
return -ENOMEM;
h->pdev = pdev;
h->busy_initializing = 1;
h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT;
INIT_LIST_HEAD(&h->cmpQ);
INIT_LIST_HEAD(&h->reqQ);
spin_lock_init(&h->lock);
spin_lock_init(&h->scan_lock);
rc = hpsa_pci_init(h);
if (rc != 0)
goto clean1;
sprintf(h->devname, "hpsa%d", number_of_controllers);
h->ctlr = number_of_controllers;
number_of_controllers++;
/* configure PCI DMA stuff */
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
if (rc == 0) {
dac = 1;
} else {
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc == 0) {
dac = 0;
} else {
dev_err(&pdev->dev, "no suitable DMA available\n");
goto clean1;
}
}
/* make sure the board interrupts are off */
h->access.set_intr_mask(h, HPSA_INTR_OFF);
if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
goto clean2;
dev_info(&pdev->dev, "%s: <0x%x> at IRQ %d%s using DAC\n",
h->devname, pdev->device,
h->intr[h->intr_mode], dac ? "" : " not");
if (hpsa_allocate_cmd_pool(h))
goto clean4;
if (hpsa_allocate_sg_chain_blocks(h))
goto clean4;
init_waitqueue_head(&h->scan_wait_queue);
h->scan_finished = 1; /* no scan currently in progress */
pci_set_drvdata(pdev, h);
h->ndevices = 0;
h->scsi_host = NULL;
spin_lock_init(&h->devlock);
hpsa_put_ctlr_into_performant_mode(h);
/* At this point, the controller is ready to take commands.
* Now, if reset_devices and the hard reset didn't work, try
* the soft reset and see if that works.
*/
if (try_soft_reset) {
/* This is kind of gross. We may or may not get a completion
* from the soft reset command, and if we do, then the value
* from the fifo may or may not be valid. So, we wait 10 secs
* after the reset throwing away any completions we get during
* that time. Unregister the interrupt handler and register
* fake ones to scoop up any residual completions.
*/
spin_lock_irqsave(&h->lock, flags);
h->access.set_intr_mask(h, HPSA_INTR_OFF);
spin_unlock_irqrestore(&h->lock, flags);
free_irq(h->intr[h->intr_mode], h);
rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
hpsa_intx_discard_completions);
if (rc) {
dev_warn(&h->pdev->dev, "Failed to request_irq after "
"soft reset.\n");
goto clean4;
}
rc = hpsa_kdump_soft_reset(h);
if (rc)
/* Neither hard nor soft reset worked, we're hosed. */
goto clean4;
dev_info(&h->pdev->dev, "Board READY.\n");
dev_info(&h->pdev->dev,
"Waiting for stale completions to drain.\n");
h->access.set_intr_mask(h, HPSA_INTR_ON);
msleep(10000);
h->access.set_intr_mask(h, HPSA_INTR_OFF);
rc = controller_reset_failed(h->cfgtable);
if (rc)
dev_info(&h->pdev->dev,
"Soft reset appears to have failed.\n");
/* since the controller's reset, we have to go back and re-init
* everything. Easiest to just forget what we've done and do it
* all over again.
*/
hpsa_undo_allocations_after_kdump_soft_reset(h);
try_soft_reset = 0;
if (rc)
/* don't go to clean4, we already unallocated */
return -ENODEV;
goto reinit_after_soft_reset;
}
/* Turn the interrupts on so we can service requests */
h->access.set_intr_mask(h, HPSA_INTR_ON);
hpsa_hba_inquiry(h);
hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
h->busy_initializing = 0;
return 1;
clean4:
hpsa_free_sg_chain_blocks(h);
hpsa_free_cmd_pool(h);
free_irq(h->intr[h->intr_mode], h);
clean2:
clean1:
h->busy_initializing = 0;
kfree(h);
return rc;
}
static void hpsa_flush_cache(struct ctlr_info *h)
{
char *flush_buf;
struct CommandList *c;
flush_buf = kzalloc(4, GFP_KERNEL);
if (!flush_buf)
return;
c = cmd_special_alloc(h);
if (!c) {
dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
goto out_of_memory;
}
fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0,
RAID_CTLR_LUNID, TYPE_CMD);
hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_TODEVICE);
if (c->err_info->CommandStatus != 0)
dev_warn(&h->pdev->dev,
"error flushing cache on controller\n");
cmd_special_free(h, c);
out_of_memory:
kfree(flush_buf);
}
static void hpsa_shutdown(struct pci_dev *pdev)
{
struct ctlr_info *h;
h = pci_get_drvdata(pdev);
/* Turn board interrupts off and send the flush cache command
* sendcmd will turn off interrupt, and send the flush...
* To write all data in the battery backed cache to disks
*/
hpsa_flush_cache(h);
h->access.set_intr_mask(h, HPSA_INTR_OFF);
free_irq(h->intr[h->intr_mode], h);
#ifdef CONFIG_PCI_MSI
if (h->msix_vector)
pci_disable_msix(h->pdev);
else if (h->msi_vector)
pci_disable_msi(h->pdev);
#endif /* CONFIG_PCI_MSI */
}
static void __devexit hpsa_remove_one(struct pci_dev *pdev)
{
struct ctlr_info *h;
if (pci_get_drvdata(pdev) == NULL) {
dev_err(&pdev->dev, "unable to remove device \n");
return;
}
h = pci_get_drvdata(pdev);
hpsa_unregister_scsi(h); /* unhook from SCSI subsystem */
hpsa_shutdown(pdev);
iounmap(h->vaddr);
iounmap(h->transtable);
iounmap(h->cfgtable);
hpsa_free_sg_chain_blocks(h);
pci_free_consistent(h->pdev,
h->nr_cmds * sizeof(struct CommandList),
h->cmd_pool, h->cmd_pool_dhandle);
pci_free_consistent(h->pdev,
h->nr_cmds * sizeof(struct ErrorInfo),
h->errinfo_pool, h->errinfo_pool_dhandle);
pci_free_consistent(h->pdev, h->reply_pool_size,
h->reply_pool, h->reply_pool_dhandle);
kfree(h->cmd_pool_bits);
kfree(h->blockFetchTable);
kfree(h->hba_inquiry_data);
/*
* Deliberately omit pci_disable_device(): it does something nasty to
* Smart Array controllers that pci_enable_device does not undo
*/
pci_release_regions(pdev);
pci_set_drvdata(pdev, NULL);
kfree(h);
}
static int hpsa_suspend(__attribute__((unused)) struct pci_dev *pdev,
__attribute__((unused)) pm_message_t state)
{
return -ENOSYS;
}
static int hpsa_resume(__attribute__((unused)) struct pci_dev *pdev)
{
return -ENOSYS;
}
static struct pci_driver hpsa_pci_driver = {
.name = "hpsa",
.probe = hpsa_init_one,
.remove = __devexit_p(hpsa_remove_one),
.id_table = hpsa_pci_device_id, /* id_table */
.shutdown = hpsa_shutdown,
.suspend = hpsa_suspend,
.resume = hpsa_resume,
};
/* Fill in bucket_map[], given nsgs (the max number of
* scatter gather elements supported) and bucket[],
* which is an array of 8 integers. The bucket[] array
* contains 8 different DMA transfer sizes (in 16
* byte increments) which the controller uses to fetch
* commands. This function fills in bucket_map[], which
* maps a given number of scatter gather elements to one of
* the 8 DMA transfer sizes. The point of it is to allow the
* controller to only do as much DMA as needed to fetch the
* command, with the DMA transfer size encoded in the lower
* bits of the command address.
*/
static void calc_bucket_map(int bucket[], int num_buckets,
int nsgs, int *bucket_map)
{
int i, j, b, size;
/* even a command with 0 SGs requires 4 blocks */
#define MINIMUM_TRANSFER_BLOCKS 4
#define NUM_BUCKETS 8
/* Note, bucket_map must have nsgs+1 entries. */
for (i = 0; i <= nsgs; i++) {
/* Compute size of a command with i SG entries */
size = i + MINIMUM_TRANSFER_BLOCKS;
b = num_buckets; /* Assume the biggest bucket */
/* Find the bucket that is just big enough */
for (j = 0; j < 8; j++) {
if (bucket[j] >= size) {
b = j;
break;
}
}
/* for a command with i SG entries, use bucket b. */
bucket_map[i] = b;
}
}
static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h,
u32 use_short_tags)
{
int i;
unsigned long register_value;
/* This is a bit complicated. There are 8 registers on
* the controller which we write to to tell it 8 different
* sizes of commands which there may be. It's a way of
* reducing the DMA done to fetch each command. Encoded into
* each command's tag are 3 bits which communicate to the controller
* which of the eight sizes that command fits within. The size of
* each command depends on how many scatter gather entries there are.
* Each SG entry requires 16 bytes. The eight registers are programmed
* with the number of 16-byte blocks a command of that size requires.
* The smallest command possible requires 5 such 16 byte blocks.
* the largest command possible requires MAXSGENTRIES + 4 16-byte
* blocks. Note, this only extends to the SG entries contained
* within the command block, and does not extend to chained blocks
* of SG elements. bft[] contains the eight values we write to
* the registers. They are not evenly distributed, but have more
* sizes for small commands, and fewer sizes for larger commands.
*/
int bft[8] = {5, 6, 8, 10, 12, 20, 28, MAXSGENTRIES + 4};
BUILD_BUG_ON(28 > MAXSGENTRIES + 4);
/* 5 = 1 s/g entry or 4k
* 6 = 2 s/g entry or 8k
* 8 = 4 s/g entry or 16k
* 10 = 6 s/g entry or 24k
*/
h->reply_pool_wraparound = 1; /* spec: init to 1 */
/* Controller spec: zero out this buffer. */
memset(h->reply_pool, 0, h->reply_pool_size);
h->reply_pool_head = h->reply_pool;
bft[7] = h->max_sg_entries + 4;
calc_bucket_map(bft, ARRAY_SIZE(bft), 32, h->blockFetchTable);
for (i = 0; i < 8; i++)
writel(bft[i], &h->transtable->BlockFetch[i]);
/* size of controller ring buffer */
writel(h->max_commands, &h->transtable->RepQSize);
writel(1, &h->transtable->RepQCount);
writel(0, &h->transtable->RepQCtrAddrLow32);
writel(0, &h->transtable->RepQCtrAddrHigh32);
writel(h->reply_pool_dhandle, &h->transtable->RepQAddr0Low32);
writel(0, &h->transtable->RepQAddr0High32);
writel(CFGTBL_Trans_Performant | use_short_tags,
&(h->cfgtable->HostWrite.TransportRequest));
writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
hpsa_wait_for_mode_change_ack(h);
register_value = readl(&(h->cfgtable->TransportActive));
if (!(register_value & CFGTBL_Trans_Performant)) {
dev_warn(&h->pdev->dev, "unable to get board into"
" performant mode\n");
return;
}
/* Change the access methods to the performant access methods */
h->access = SA5_performant_access;
h->transMethod = CFGTBL_Trans_Performant;
}
static __devinit void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
{
u32 trans_support;
if (hpsa_simple_mode)
return;
trans_support = readl(&(h->cfgtable->TransportSupport));
if (!(trans_support & PERFORMANT_MODE))
return;
hpsa_get_max_perf_mode_cmds(h);
h->max_sg_entries = 32;
/* Performant mode ring buffer and supporting data structures */
h->reply_pool_size = h->max_commands * sizeof(u64);
h->reply_pool = pci_alloc_consistent(h->pdev, h->reply_pool_size,
&(h->reply_pool_dhandle));
/* Need a block fetch table for performant mode */
h->blockFetchTable = kmalloc(((h->max_sg_entries+1) *
sizeof(u32)), GFP_KERNEL);
if ((h->reply_pool == NULL)
|| (h->blockFetchTable == NULL))
goto clean_up;
hpsa_enter_performant_mode(h,
trans_support & CFGTBL_Trans_use_short_tags);
return;
clean_up:
if (h->reply_pool)
pci_free_consistent(h->pdev, h->reply_pool_size,
h->reply_pool, h->reply_pool_dhandle);
kfree(h->blockFetchTable);
}
/*
* This is it. Register the PCI driver information for the cards we control
* the OS will call our registered routines when it finds one of our cards.
*/
static int __init hpsa_init(void)
{
return pci_register_driver(&hpsa_pci_driver);
}
static void __exit hpsa_cleanup(void)
{
pci_unregister_driver(&hpsa_pci_driver);
}
module_init(hpsa_init);
module_exit(hpsa_cleanup);
| gpl-2.0 |
delafer/YP-GI1CW | net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c | 703 | 9098 | /*
* Copyright (C)2003,2004 USAGI/WIDE Project
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Author:
* Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp>
*/
#include <linux/types.h>
#include <linux/timer.h>
#include <linux/module.h>
#include <linux/netfilter.h>
#include <linux/in6.h>
#include <linux/icmpv6.h>
#include <linux/ipv6.h>
#include <net/ipv6.h>
#include <net/ip6_checksum.h>
#include <linux/seq_file.h>
#include <linux/netfilter_ipv6.h>
#include <net/netfilter/nf_conntrack_tuple.h>
#include <net/netfilter/nf_conntrack_l4proto.h>
#include <net/netfilter/nf_conntrack_core.h>
#include <net/netfilter/nf_conntrack_zones.h>
#include <net/netfilter/ipv6/nf_conntrack_icmpv6.h>
#include <net/netfilter/nf_log.h>
static unsigned int nf_ct_icmpv6_timeout __read_mostly = 30*HZ;
static bool icmpv6_pkt_to_tuple(const struct sk_buff *skb,
unsigned int dataoff,
struct nf_conntrack_tuple *tuple)
{
const struct icmp6hdr *hp;
struct icmp6hdr _hdr;
hp = skb_header_pointer(skb, dataoff, sizeof(_hdr), &_hdr);
if (hp == NULL)
return false;
tuple->dst.u.icmp.type = hp->icmp6_type;
tuple->src.u.icmp.id = hp->icmp6_identifier;
tuple->dst.u.icmp.code = hp->icmp6_code;
return true;
}
/* Add 1; spaces filled with 0. */
static const u_int8_t invmap[] = {
[ICMPV6_ECHO_REQUEST - 128] = ICMPV6_ECHO_REPLY + 1,
[ICMPV6_ECHO_REPLY - 128] = ICMPV6_ECHO_REQUEST + 1,
[ICMPV6_NI_QUERY - 128] = ICMPV6_NI_REPLY + 1,
[ICMPV6_NI_REPLY - 128] = ICMPV6_NI_QUERY +1
};
static const u_int8_t noct_valid_new[] = {
[ICMPV6_MGM_QUERY - 130] = 1,
[ICMPV6_MGM_REPORT -130] = 1,
[ICMPV6_MGM_REDUCTION - 130] = 1,
[NDISC_ROUTER_SOLICITATION - 130] = 1,
[NDISC_ROUTER_ADVERTISEMENT - 130] = 1,
[NDISC_NEIGHBOUR_SOLICITATION - 130] = 1,
[NDISC_NEIGHBOUR_ADVERTISEMENT - 130] = 1,
[ICMPV6_MLD2_REPORT - 130] = 1
};
static bool icmpv6_invert_tuple(struct nf_conntrack_tuple *tuple,
const struct nf_conntrack_tuple *orig)
{
int type = orig->dst.u.icmp.type - 128;
if (type < 0 || type >= sizeof(invmap) || !invmap[type])
return false;
tuple->src.u.icmp.id = orig->src.u.icmp.id;
tuple->dst.u.icmp.type = invmap[type] - 1;
tuple->dst.u.icmp.code = orig->dst.u.icmp.code;
return true;
}
/* Print out the per-protocol part of the tuple. */
static int icmpv6_print_tuple(struct seq_file *s,
const struct nf_conntrack_tuple *tuple)
{
return seq_printf(s, "type=%u code=%u id=%u ",
tuple->dst.u.icmp.type,
tuple->dst.u.icmp.code,
ntohs(tuple->src.u.icmp.id));
}
/* Returns verdict for packet, or -1 for invalid. */
static int icmpv6_packet(struct nf_conn *ct,
const struct sk_buff *skb,
unsigned int dataoff,
enum ip_conntrack_info ctinfo,
u_int8_t pf,
unsigned int hooknum)
{
/* Do not immediately delete the connection after the first
successful reply to avoid excessive conntrackd traffic
and also to handle correctly ICMP echo reply duplicates. */
nf_ct_refresh_acct(ct, ctinfo, skb, nf_ct_icmpv6_timeout);
return NF_ACCEPT;
}
/* Called when a new connection for this protocol found. */
static bool icmpv6_new(struct nf_conn *ct, const struct sk_buff *skb,
unsigned int dataoff)
{
static const u_int8_t valid_new[] = {
[ICMPV6_ECHO_REQUEST - 128] = 1,
[ICMPV6_NI_QUERY - 128] = 1
};
int type = ct->tuplehash[0].tuple.dst.u.icmp.type - 128;
if (type < 0 || type >= sizeof(valid_new) || !valid_new[type]) {
/* Can't create a new ICMPv6 `conn' with this. */
pr_debug("icmpv6: can't create new conn with type %u\n",
type + 128);
nf_ct_dump_tuple_ipv6(&ct->tuplehash[0].tuple);
if (LOG_INVALID(nf_ct_net(ct), IPPROTO_ICMPV6))
nf_log_packet(PF_INET6, 0, skb, NULL, NULL, NULL,
"nf_ct_icmpv6: invalid new with type %d ",
type + 128);
return false;
}
return true;
}
static int
icmpv6_error_message(struct net *net, struct nf_conn *tmpl,
struct sk_buff *skb,
unsigned int icmp6off,
enum ip_conntrack_info *ctinfo,
unsigned int hooknum)
{
struct nf_conntrack_tuple intuple, origtuple;
const struct nf_conntrack_tuple_hash *h;
const struct nf_conntrack_l4proto *inproto;
u16 zone = tmpl ? nf_ct_zone(tmpl) : NF_CT_DEFAULT_ZONE;
NF_CT_ASSERT(skb->nfct == NULL);
/* Are they talking about one of our connections? */
if (!nf_ct_get_tuplepr(skb,
skb_network_offset(skb)
+ sizeof(struct ipv6hdr)
+ sizeof(struct icmp6hdr),
PF_INET6, &origtuple)) {
pr_debug("icmpv6_error: Can't get tuple\n");
return -NF_ACCEPT;
}
/* rcu_read_lock()ed by nf_hook_slow */
inproto = __nf_ct_l4proto_find(PF_INET6, origtuple.dst.protonum);
/* Ordinarily, we'd expect the inverted tupleproto, but it's
been preserved inside the ICMP. */
if (!nf_ct_invert_tuple(&intuple, &origtuple,
&nf_conntrack_l3proto_ipv6, inproto)) {
pr_debug("icmpv6_error: Can't invert tuple\n");
return -NF_ACCEPT;
}
*ctinfo = IP_CT_RELATED;
h = nf_conntrack_find_get(net, zone, &intuple);
if (!h) {
pr_debug("icmpv6_error: no match\n");
return -NF_ACCEPT;
} else {
if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY)
*ctinfo += IP_CT_IS_REPLY;
}
/* Update skb to refer to this connection */
skb->nfct = &nf_ct_tuplehash_to_ctrack(h)->ct_general;
skb->nfctinfo = *ctinfo;
return -NF_ACCEPT;
}
static int
icmpv6_error(struct net *net, struct nf_conn *tmpl,
struct sk_buff *skb, unsigned int dataoff,
enum ip_conntrack_info *ctinfo, u_int8_t pf, unsigned int hooknum)
{
const struct icmp6hdr *icmp6h;
struct icmp6hdr _ih;
int type;
icmp6h = skb_header_pointer(skb, dataoff, sizeof(_ih), &_ih);
if (icmp6h == NULL) {
if (LOG_INVALID(net, IPPROTO_ICMPV6))
nf_log_packet(PF_INET6, 0, skb, NULL, NULL, NULL,
"nf_ct_icmpv6: short packet ");
return -NF_ACCEPT;
}
if (net->ct.sysctl_checksum && hooknum == NF_INET_PRE_ROUTING &&
nf_ip6_checksum(skb, hooknum, dataoff, IPPROTO_ICMPV6)) {
if (LOG_INVALID(net, IPPROTO_ICMPV6))
nf_log_packet(PF_INET6, 0, skb, NULL, NULL, NULL,
"nf_ct_icmpv6: ICMPv6 checksum failed ");
return -NF_ACCEPT;
}
type = icmp6h->icmp6_type - 130;
if (type >= 0 && type < sizeof(noct_valid_new) &&
noct_valid_new[type]) {
skb->nfct = &nf_conntrack_untracked.ct_general;
skb->nfctinfo = IP_CT_NEW;
nf_conntrack_get(skb->nfct);
return NF_ACCEPT;
}
/* is not error message ? */
if (icmp6h->icmp6_type >= 128)
return NF_ACCEPT;
return icmpv6_error_message(net, tmpl, skb, dataoff, ctinfo, hooknum);
}
#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_conntrack.h>
static int icmpv6_tuple_to_nlattr(struct sk_buff *skb,
const struct nf_conntrack_tuple *t)
{
NLA_PUT_BE16(skb, CTA_PROTO_ICMPV6_ID, t->src.u.icmp.id);
NLA_PUT_U8(skb, CTA_PROTO_ICMPV6_TYPE, t->dst.u.icmp.type);
NLA_PUT_U8(skb, CTA_PROTO_ICMPV6_CODE, t->dst.u.icmp.code);
return 0;
nla_put_failure:
return -1;
}
static const struct nla_policy icmpv6_nla_policy[CTA_PROTO_MAX+1] = {
[CTA_PROTO_ICMPV6_TYPE] = { .type = NLA_U8 },
[CTA_PROTO_ICMPV6_CODE] = { .type = NLA_U8 },
[CTA_PROTO_ICMPV6_ID] = { .type = NLA_U16 },
};
static int icmpv6_nlattr_to_tuple(struct nlattr *tb[],
struct nf_conntrack_tuple *tuple)
{
if (!tb[CTA_PROTO_ICMPV6_TYPE] ||
!tb[CTA_PROTO_ICMPV6_CODE] ||
!tb[CTA_PROTO_ICMPV6_ID])
return -EINVAL;
tuple->dst.u.icmp.type = nla_get_u8(tb[CTA_PROTO_ICMPV6_TYPE]);
tuple->dst.u.icmp.code = nla_get_u8(tb[CTA_PROTO_ICMPV6_CODE]);
tuple->src.u.icmp.id = nla_get_be16(tb[CTA_PROTO_ICMPV6_ID]);
if (tuple->dst.u.icmp.type < 128 ||
tuple->dst.u.icmp.type - 128 >= sizeof(invmap) ||
!invmap[tuple->dst.u.icmp.type - 128])
return -EINVAL;
return 0;
}
static int icmpv6_nlattr_tuple_size(void)
{
return nla_policy_len(icmpv6_nla_policy, CTA_PROTO_MAX + 1);
}
#endif
#ifdef CONFIG_SYSCTL
static struct ctl_table_header *icmpv6_sysctl_header;
static struct ctl_table icmpv6_sysctl_table[] = {
{
.procname = "nf_conntrack_icmpv6_timeout",
.data = &nf_ct_icmpv6_timeout,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{ }
};
#endif /* CONFIG_SYSCTL */
struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6 __read_mostly =
{
.l3proto = PF_INET6,
.l4proto = IPPROTO_ICMPV6,
.name = "icmpv6",
.pkt_to_tuple = icmpv6_pkt_to_tuple,
.invert_tuple = icmpv6_invert_tuple,
.print_tuple = icmpv6_print_tuple,
.packet = icmpv6_packet,
.new = icmpv6_new,
.error = icmpv6_error,
#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
.tuple_to_nlattr = icmpv6_tuple_to_nlattr,
.nlattr_tuple_size = icmpv6_nlattr_tuple_size,
.nlattr_to_tuple = icmpv6_nlattr_to_tuple,
.nla_policy = icmpv6_nla_policy,
#endif
#ifdef CONFIG_SYSCTL
.ctl_table_header = &icmpv6_sysctl_header,
.ctl_table = icmpv6_sysctl_table,
#endif
};
| gpl-2.0 |
clemsyn/asusOC | arch/sparc/kernel/sun4d_irq.c | 959 | 14348 | /*
* arch/sparc/kernel/sun4d_irq.c:
* SS1000/SC2000 interrupt handling.
*
* Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
* Heavily based on arch/sparc/kernel/irq.c.
*/
#include <linux/errno.h>
#include <linux/linkage.h>
#include <linux/kernel_stat.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/ptrace.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/random.h>
#include <linux/init.h>
#include <linux/smp.h>
#include <linux/spinlock.h>
#include <linux/seq_file.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <asm/ptrace.h>
#include <asm/processor.h>
#include <asm/system.h>
#include <asm/psr.h>
#include <asm/smp.h>
#include <asm/vaddrs.h>
#include <asm/timer.h>
#include <asm/openprom.h>
#include <asm/oplib.h>
#include <asm/traps.h>
#include <asm/irq.h>
#include <asm/io.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
#include <asm/sbi.h>
#include <asm/cacheflush.h>
#include <asm/irq_regs.h>
#include "kernel.h"
#include "irq.h"
/* If you trust current SCSI layer to handle different SCSI IRQs, enable this. I don't trust it... -jj */
/* #define DISTRIBUTE_IRQS */
struct sun4d_timer_regs {
u32 l10_timer_limit;
u32 l10_cur_countx;
u32 l10_limit_noclear;
u32 ctrl;
u32 l10_cur_count;
};
static struct sun4d_timer_regs __iomem *sun4d_timers;
#define TIMER_IRQ 10
#define MAX_STATIC_ALLOC 4
extern int static_irq_count;
static unsigned char sbus_tid[32];
static struct irqaction *irq_action[NR_IRQS];
extern spinlock_t irq_action_lock;
static struct sbus_action {
struct irqaction *action;
/* For SMP this needs to be extended */
} *sbus_actions;
static int pil_to_sbus[] = {
0, 0, 1, 2, 0, 3, 0, 4, 0, 5, 0, 6, 0, 7, 0, 0,
};
static int sbus_to_pil[] = {
0, 2, 3, 5, 7, 9, 11, 13,
};
static int nsbi;
/* Exported for sun4d_smp.c */
DEFINE_SPINLOCK(sun4d_imsk_lock);
int show_sun4d_interrupts(struct seq_file *p, void *v)
{
int i = *(loff_t *) v, j = 0, k = 0, sbusl;
struct irqaction * action;
unsigned long flags;
#ifdef CONFIG_SMP
int x;
#endif
spin_lock_irqsave(&irq_action_lock, flags);
if (i < NR_IRQS) {
sbusl = pil_to_sbus[i];
if (!sbusl) {
action = *(i + irq_action);
if (!action)
goto out_unlock;
} else {
for (j = 0; j < nsbi; j++) {
for (k = 0; k < 4; k++)
if ((action = sbus_actions [(j << 5) + (sbusl << 2) + k].action))
goto found_it;
}
goto out_unlock;
}
found_it: seq_printf(p, "%3d: ", i);
#ifndef CONFIG_SMP
seq_printf(p, "%10u ", kstat_irqs(i));
#else
for_each_online_cpu(x)
seq_printf(p, "%10u ",
kstat_cpu(cpu_logical_map(x)).irqs[i]);
#endif
seq_printf(p, "%c %s",
(action->flags & IRQF_DISABLED) ? '+' : ' ',
action->name);
action = action->next;
for (;;) {
for (; action; action = action->next) {
seq_printf(p, ",%s %s",
(action->flags & IRQF_DISABLED) ? " +" : "",
action->name);
}
if (!sbusl) break;
k++;
if (k < 4)
action = sbus_actions [(j << 5) + (sbusl << 2) + k].action;
else {
j++;
if (j == nsbi) break;
k = 0;
action = sbus_actions [(j << 5) + (sbusl << 2)].action;
}
}
seq_putc(p, '\n');
}
out_unlock:
spin_unlock_irqrestore(&irq_action_lock, flags);
return 0;
}
void sun4d_free_irq(unsigned int irq, void *dev_id)
{
struct irqaction *action, **actionp;
struct irqaction *tmp = NULL;
unsigned long flags;
spin_lock_irqsave(&irq_action_lock, flags);
if (irq < 15)
actionp = irq + irq_action;
else
actionp = &(sbus_actions[irq - (1 << 5)].action);
action = *actionp;
if (!action) {
printk("Trying to free free IRQ%d\n",irq);
goto out_unlock;
}
if (dev_id) {
for (; action; action = action->next) {
if (action->dev_id == dev_id)
break;
tmp = action;
}
if (!action) {
printk("Trying to free free shared IRQ%d\n",irq);
goto out_unlock;
}
} else if (action->flags & IRQF_SHARED) {
printk("Trying to free shared IRQ%d with NULL device ID\n", irq);
goto out_unlock;
}
if (action->flags & SA_STATIC_ALLOC)
{
/* This interrupt is marked as specially allocated
* so it is a bad idea to free it.
*/
printk("Attempt to free statically allocated IRQ%d (%s)\n",
irq, action->name);
goto out_unlock;
}
if (tmp)
tmp->next = action->next;
else
*actionp = action->next;
spin_unlock_irqrestore(&irq_action_lock, flags);
synchronize_irq(irq);
spin_lock_irqsave(&irq_action_lock, flags);
kfree(action);
if (!(*actionp))
__disable_irq(irq);
out_unlock:
spin_unlock_irqrestore(&irq_action_lock, flags);
}
extern void unexpected_irq(int, void *, struct pt_regs *);
void sun4d_handler_irq(int irq, struct pt_regs * regs)
{
struct pt_regs *old_regs;
struct irqaction * action;
int cpu = smp_processor_id();
/* SBUS IRQ level (1 - 7) */
int sbusl = pil_to_sbus[irq];
/* FIXME: Is this necessary?? */
cc_get_ipen();
cc_set_iclr(1 << irq);
old_regs = set_irq_regs(regs);
irq_enter();
kstat_cpu(cpu).irqs[irq]++;
if (!sbusl) {
action = *(irq + irq_action);
if (!action)
unexpected_irq(irq, NULL, regs);
do {
action->handler(irq, action->dev_id);
action = action->next;
} while (action);
} else {
int bus_mask = bw_get_intr_mask(sbusl) & 0x3ffff;
int sbino;
struct sbus_action *actionp;
unsigned mask, slot;
int sbil = (sbusl << 2);
bw_clear_intr_mask(sbusl, bus_mask);
/* Loop for each pending SBI */
for (sbino = 0; bus_mask; sbino++, bus_mask >>= 1)
if (bus_mask & 1) {
mask = acquire_sbi(SBI2DEVID(sbino), 0xf << sbil);
mask &= (0xf << sbil);
actionp = sbus_actions + (sbino << 5) + (sbil);
/* Loop for each pending SBI slot */
for (slot = (1 << sbil); mask; slot <<= 1, actionp++)
if (mask & slot) {
mask &= ~slot;
action = actionp->action;
if (!action)
unexpected_irq(irq, NULL, regs);
do {
action->handler(irq, action->dev_id);
action = action->next;
} while (action);
release_sbi(SBI2DEVID(sbino), slot);
}
}
}
irq_exit();
set_irq_regs(old_regs);
}
int sun4d_request_irq(unsigned int irq,
irq_handler_t handler,
unsigned long irqflags, const char * devname, void *dev_id)
{
struct irqaction *action, *tmp = NULL, **actionp;
unsigned long flags;
int ret;
if(irq > 14 && irq < (1 << 5)) {
ret = -EINVAL;
goto out;
}
if (!handler) {
ret = -EINVAL;
goto out;
}
spin_lock_irqsave(&irq_action_lock, flags);
if (irq >= (1 << 5))
actionp = &(sbus_actions[irq - (1 << 5)].action);
else
actionp = irq + irq_action;
action = *actionp;
if (action) {
if ((action->flags & IRQF_SHARED) && (irqflags & IRQF_SHARED)) {
for (tmp = action; tmp->next; tmp = tmp->next);
} else {
ret = -EBUSY;
goto out_unlock;
}
if ((action->flags & IRQF_DISABLED) ^ (irqflags & IRQF_DISABLED)) {
printk("Attempt to mix fast and slow interrupts on IRQ%d denied\n", irq);
ret = -EBUSY;
goto out_unlock;
}
action = NULL; /* Or else! */
}
/* If this is flagged as statically allocated then we use our
* private struct which is never freed.
*/
if (irqflags & SA_STATIC_ALLOC) {
if (static_irq_count < MAX_STATIC_ALLOC)
action = &static_irqaction[static_irq_count++];
else
printk("Request for IRQ%d (%s) SA_STATIC_ALLOC failed using kmalloc\n", irq, devname);
}
if (action == NULL)
action = kmalloc(sizeof(struct irqaction),
GFP_ATOMIC);
if (!action) {
ret = -ENOMEM;
goto out_unlock;
}
action->handler = handler;
action->flags = irqflags;
action->name = devname;
action->next = NULL;
action->dev_id = dev_id;
if (tmp)
tmp->next = action;
else
*actionp = action;
__enable_irq(irq);
ret = 0;
out_unlock:
spin_unlock_irqrestore(&irq_action_lock, flags);
out:
return ret;
}
static void sun4d_disable_irq(unsigned int irq)
{
int tid = sbus_tid[(irq >> 5) - 1];
unsigned long flags;
if (irq < NR_IRQS)
return;
spin_lock_irqsave(&sun4d_imsk_lock, flags);
cc_set_imsk_other(tid, cc_get_imsk_other(tid) | (1 << sbus_to_pil[(irq >> 2) & 7]));
spin_unlock_irqrestore(&sun4d_imsk_lock, flags);
}
static void sun4d_enable_irq(unsigned int irq)
{
int tid = sbus_tid[(irq >> 5) - 1];
unsigned long flags;
if (irq < NR_IRQS)
return;
spin_lock_irqsave(&sun4d_imsk_lock, flags);
cc_set_imsk_other(tid, cc_get_imsk_other(tid) & ~(1 << sbus_to_pil[(irq >> 2) & 7]));
spin_unlock_irqrestore(&sun4d_imsk_lock, flags);
}
#ifdef CONFIG_SMP
static void sun4d_set_cpu_int(int cpu, int level)
{
sun4d_send_ipi(cpu, level);
}
static void sun4d_clear_ipi(int cpu, int level)
{
}
static void sun4d_set_udt(int cpu)
{
}
/* Setup IRQ distribution scheme. */
void __init sun4d_distribute_irqs(void)
{
struct device_node *dp;
#ifdef DISTRIBUTE_IRQS
cpumask_t sbus_serving_map;
sbus_serving_map = cpu_present_map;
for_each_node_by_name(dp, "sbi") {
int board = of_getintprop_default(dp, "board#", 0);
if ((board * 2) == boot_cpu_id && cpu_isset(board * 2 + 1, cpu_present_map))
sbus_tid[board] = (board * 2 + 1);
else if (cpu_isset(board * 2, cpu_present_map))
sbus_tid[board] = (board * 2);
else if (cpu_isset(board * 2 + 1, cpu_present_map))
sbus_tid[board] = (board * 2 + 1);
else
sbus_tid[board] = 0xff;
if (sbus_tid[board] != 0xff)
cpu_clear(sbus_tid[board], sbus_serving_map);
}
for_each_node_by_name(dp, "sbi") {
int board = of_getintprop_default(dp, "board#", 0);
if (sbus_tid[board] == 0xff) {
int i = 31;
if (cpus_empty(sbus_serving_map))
sbus_serving_map = cpu_present_map;
while (cpu_isset(i, sbus_serving_map))
i--;
sbus_tid[board] = i;
cpu_clear(i, sbus_serving_map);
}
}
for_each_node_by_name(dp, "sbi") {
int devid = of_getintprop_default(dp, "device-id", 0);
int board = of_getintprop_default(dp, "board#", 0);
printk("sbus%d IRQs directed to CPU%d\n", board, sbus_tid[board]);
set_sbi_tid(devid, sbus_tid[board] << 3);
}
#else
int cpuid = cpu_logical_map(1);
if (cpuid == -1)
cpuid = cpu_logical_map(0);
for_each_node_by_name(dp, "sbi") {
int devid = of_getintprop_default(dp, "device-id", 0);
int board = of_getintprop_default(dp, "board#", 0);
sbus_tid[board] = cpuid;
set_sbi_tid(devid, cpuid << 3);
}
printk("All sbus IRQs directed to CPU%d\n", cpuid);
#endif
}
#endif
static void sun4d_clear_clock_irq(void)
{
sbus_readl(&sun4d_timers->l10_timer_limit);
}
static void sun4d_load_profile_irq(int cpu, unsigned int limit)
{
bw_set_prof_limit(cpu, limit);
}
static void __init sun4d_load_profile_irqs(void)
{
int cpu = 0, mid;
while (!cpu_find_by_instance(cpu, NULL, &mid)) {
sun4d_load_profile_irq(mid >> 3, 0);
cpu++;
}
}
static void __init sun4d_fixup_trap_table(void)
{
#ifdef CONFIG_SMP
unsigned long flags;
extern unsigned long lvl14_save[4];
struct tt_entry *trap_table = &sparc_ttable[SP_TRAP_IRQ1 + (14 - 1)];
extern unsigned int real_irq_entry[], smp4d_ticker[];
extern unsigned int patchme_maybe_smp_msg[];
/* Adjust so that we jump directly to smp4d_ticker */
lvl14_save[2] += smp4d_ticker - real_irq_entry;
/* For SMP we use the level 14 ticker, however the bootup code
* has copied the firmware's level 14 vector into the boot cpu's
* trap table, we must fix this now or we get squashed.
*/
local_irq_save(flags);
patchme_maybe_smp_msg[0] = 0x01000000; /* NOP out the branch */
trap_table->inst_one = lvl14_save[0];
trap_table->inst_two = lvl14_save[1];
trap_table->inst_three = lvl14_save[2];
trap_table->inst_four = lvl14_save[3];
local_flush_cache_all();
local_irq_restore(flags);
#endif
}
static void __init sun4d_init_timers(irq_handler_t counter_fn)
{
struct device_node *dp;
struct resource res;
const u32 *reg;
int err;
dp = of_find_node_by_name(NULL, "cpu-unit");
if (!dp) {
prom_printf("sun4d_init_timers: Unable to find cpu-unit\n");
prom_halt();
}
/* Which cpu-unit we use is arbitrary, we can view the bootbus timer
* registers via any cpu's mapping. The first 'reg' property is the
* bootbus.
*/
reg = of_get_property(dp, "reg", NULL);
of_node_put(dp);
if (!reg) {
prom_printf("sun4d_init_timers: No reg property\n");
prom_halt();
}
res.start = reg[1];
res.end = reg[2] - 1;
res.flags = reg[0] & 0xff;
sun4d_timers = of_ioremap(&res, BW_TIMER_LIMIT,
sizeof(struct sun4d_timer_regs), "user timer");
if (!sun4d_timers) {
prom_printf("sun4d_init_timers: Can't map timer regs\n");
prom_halt();
}
sbus_writel((((1000000/HZ) + 1) << 10), &sun4d_timers->l10_timer_limit);
master_l10_counter = &sun4d_timers->l10_cur_count;
err = request_irq(TIMER_IRQ, counter_fn,
(IRQF_DISABLED | SA_STATIC_ALLOC),
"timer", NULL);
if (err) {
prom_printf("sun4d_init_timers: request_irq() failed with %d\n", err);
prom_halt();
}
sun4d_load_profile_irqs();
sun4d_fixup_trap_table();
}
void __init sun4d_init_sbi_irq(void)
{
struct device_node *dp;
int target_cpu = 0;
#ifdef CONFIG_SMP
target_cpu = boot_cpu_id;
#endif
nsbi = 0;
for_each_node_by_name(dp, "sbi")
nsbi++;
sbus_actions = kzalloc (nsbi * 8 * 4 * sizeof(struct sbus_action), GFP_ATOMIC);
if (!sbus_actions) {
prom_printf("SUN4D: Cannot allocate sbus_actions, halting.\n");
prom_halt();
}
for_each_node_by_name(dp, "sbi") {
int devid = of_getintprop_default(dp, "device-id", 0);
int board = of_getintprop_default(dp, "board#", 0);
unsigned int mask;
set_sbi_tid(devid, target_cpu << 3);
sbus_tid[board] = target_cpu;
/* Get rid of pending irqs from PROM */
mask = acquire_sbi(devid, 0xffffffff);
if (mask) {
printk ("Clearing pending IRQs %08x on SBI %d\n", mask, board);
release_sbi(devid, mask);
}
}
}
void __init sun4d_init_IRQ(void)
{
local_irq_disable();
BTFIXUPSET_CALL(enable_irq, sun4d_enable_irq, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(disable_irq, sun4d_disable_irq, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(clear_clock_irq, sun4d_clear_clock_irq, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(load_profile_irq, sun4d_load_profile_irq, BTFIXUPCALL_NORM);
sparc_init_timers = sun4d_init_timers;
#ifdef CONFIG_SMP
BTFIXUPSET_CALL(set_cpu_int, sun4d_set_cpu_int, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(clear_cpu_int, sun4d_clear_ipi, BTFIXUPCALL_NOP);
BTFIXUPSET_CALL(set_irq_udt, sun4d_set_udt, BTFIXUPCALL_NOP);
#endif
/* Cannot enable interrupts until OBP ticker is disabled. */
}
| gpl-2.0 |
Pafcholini/Nadia-kernel-KK-N910F-EUR-KK-N910FXXU1ANK4-Update1 | kernel/smpboot.c | 1983 | 7064 | /*
* Common SMP CPU bringup/teardown functions
*/
#include <linux/cpu.h>
#include <linux/err.h>
#include <linux/smp.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/export.h>
#include <linux/percpu.h>
#include <linux/kthread.h>
#include <linux/smpboot.h>
#include "smpboot.h"
#ifdef CONFIG_SMP
#ifdef CONFIG_GENERIC_SMP_IDLE_THREAD
/*
* For the hotplug case we keep the task structs around and reuse
* them.
*/
static DEFINE_PER_CPU(struct task_struct *, idle_threads);
struct task_struct * __cpuinit idle_thread_get(unsigned int cpu)
{
struct task_struct *tsk = per_cpu(idle_threads, cpu);
if (!tsk)
return ERR_PTR(-ENOMEM);
init_idle(tsk, cpu);
return tsk;
}
void __init idle_thread_set_boot_cpu(void)
{
per_cpu(idle_threads, smp_processor_id()) = current;
}
/**
* idle_init - Initialize the idle thread for a cpu
* @cpu: The cpu for which the idle thread should be initialized
*
* Creates the thread if it does not exist.
*/
static inline void idle_init(unsigned int cpu)
{
struct task_struct *tsk = per_cpu(idle_threads, cpu);
if (!tsk) {
tsk = fork_idle(cpu);
if (IS_ERR(tsk))
pr_err("SMP: fork_idle() failed for CPU %u\n", cpu);
else
per_cpu(idle_threads, cpu) = tsk;
}
}
/**
* idle_threads_init - Initialize idle threads for all cpus
*/
void __init idle_threads_init(void)
{
unsigned int cpu, boot_cpu;
boot_cpu = smp_processor_id();
for_each_possible_cpu(cpu) {
if (cpu != boot_cpu)
idle_init(cpu);
}
}
#endif
#endif /* #ifdef CONFIG_SMP */
static LIST_HEAD(hotplug_threads);
static DEFINE_MUTEX(smpboot_threads_lock);
struct smpboot_thread_data {
unsigned int cpu;
unsigned int status;
struct smp_hotplug_thread *ht;
};
enum {
HP_THREAD_NONE = 0,
HP_THREAD_ACTIVE,
HP_THREAD_PARKED,
};
/**
* smpboot_thread_fn - percpu hotplug thread loop function
* @data: thread data pointer
*
* Checks for thread stop and park conditions. Calls the necessary
* setup, cleanup, park and unpark functions for the registered
* thread.
*
* Returns 1 when the thread should exit, 0 otherwise.
*/
static int smpboot_thread_fn(void *data)
{
struct smpboot_thread_data *td = data;
struct smp_hotplug_thread *ht = td->ht;
while (1) {
set_current_state(TASK_INTERRUPTIBLE);
preempt_disable();
if (kthread_should_stop()) {
set_current_state(TASK_RUNNING);
preempt_enable();
if (ht->cleanup)
ht->cleanup(td->cpu, cpu_online(td->cpu));
kfree(td);
return 0;
}
if (kthread_should_park()) {
__set_current_state(TASK_RUNNING);
preempt_enable();
if (ht->park && td->status == HP_THREAD_ACTIVE) {
BUG_ON(td->cpu != smp_processor_id());
ht->park(td->cpu);
td->status = HP_THREAD_PARKED;
}
kthread_parkme();
/* We might have been woken for stop */
continue;
}
BUG_ON(td->cpu != smp_processor_id());
/* Check for state change setup */
switch (td->status) {
case HP_THREAD_NONE:
preempt_enable();
if (ht->setup)
ht->setup(td->cpu);
td->status = HP_THREAD_ACTIVE;
preempt_disable();
break;
case HP_THREAD_PARKED:
preempt_enable();
if (ht->unpark)
ht->unpark(td->cpu);
td->status = HP_THREAD_ACTIVE;
preempt_disable();
break;
}
if (!ht->thread_should_run(td->cpu)) {
preempt_enable();
schedule();
} else {
set_current_state(TASK_RUNNING);
preempt_enable();
ht->thread_fn(td->cpu);
}
}
}
static int
__smpboot_create_thread(struct smp_hotplug_thread *ht, unsigned int cpu)
{
struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu);
struct smpboot_thread_data *td;
if (tsk)
return 0;
td = kzalloc_node(sizeof(*td), GFP_KERNEL, cpu_to_node(cpu));
if (!td)
return -ENOMEM;
td->cpu = cpu;
td->ht = ht;
tsk = kthread_create_on_cpu(smpboot_thread_fn, td, cpu,
ht->thread_comm);
if (IS_ERR(tsk)) {
kfree(td);
return PTR_ERR(tsk);
}
get_task_struct(tsk);
*per_cpu_ptr(ht->store, cpu) = tsk;
if (ht->create) {
/*
* Make sure that the task has actually scheduled out
* into park position, before calling the create
* callback. At least the migration thread callback
* requires that the task is off the runqueue.
*/
if (!wait_task_inactive(tsk, TASK_PARKED))
WARN_ON(1);
else
ht->create(cpu);
}
return 0;
}
int smpboot_create_threads(unsigned int cpu)
{
struct smp_hotplug_thread *cur;
int ret = 0;
mutex_lock(&smpboot_threads_lock);
list_for_each_entry(cur, &hotplug_threads, list) {
ret = __smpboot_create_thread(cur, cpu);
if (ret)
break;
}
mutex_unlock(&smpboot_threads_lock);
return ret;
}
static void smpboot_unpark_thread(struct smp_hotplug_thread *ht, unsigned int cpu)
{
struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu);
if (ht->pre_unpark)
ht->pre_unpark(cpu);
kthread_unpark(tsk);
}
void smpboot_unpark_threads(unsigned int cpu)
{
struct smp_hotplug_thread *cur;
mutex_lock(&smpboot_threads_lock);
list_for_each_entry(cur, &hotplug_threads, list)
smpboot_unpark_thread(cur, cpu);
mutex_unlock(&smpboot_threads_lock);
}
static void smpboot_park_thread(struct smp_hotplug_thread *ht, unsigned int cpu)
{
struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu);
if (tsk && !ht->selfparking)
kthread_park(tsk);
}
void smpboot_park_threads(unsigned int cpu)
{
struct smp_hotplug_thread *cur;
mutex_lock(&smpboot_threads_lock);
list_for_each_entry_reverse(cur, &hotplug_threads, list)
smpboot_park_thread(cur, cpu);
mutex_unlock(&smpboot_threads_lock);
}
static void smpboot_destroy_threads(struct smp_hotplug_thread *ht)
{
unsigned int cpu;
/* We need to destroy also the parked threads of offline cpus */
for_each_possible_cpu(cpu) {
struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu);
if (tsk) {
kthread_stop(tsk);
put_task_struct(tsk);
*per_cpu_ptr(ht->store, cpu) = NULL;
}
}
}
/**
* smpboot_register_percpu_thread - Register a per_cpu thread related to hotplug
* @plug_thread: Hotplug thread descriptor
*
* Creates and starts the threads on all online cpus.
*/
int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
{
unsigned int cpu;
int ret = 0;
mutex_lock(&smpboot_threads_lock);
for_each_online_cpu(cpu) {
ret = __smpboot_create_thread(plug_thread, cpu);
if (ret) {
smpboot_destroy_threads(plug_thread);
goto out;
}
smpboot_unpark_thread(plug_thread, cpu);
}
list_add(&plug_thread->list, &hotplug_threads);
out:
mutex_unlock(&smpboot_threads_lock);
return ret;
}
EXPORT_SYMBOL_GPL(smpboot_register_percpu_thread);
/**
* smpboot_unregister_percpu_thread - Unregister a per_cpu thread related to hotplug
* @plug_thread: Hotplug thread descriptor
*
* Stops all threads on all possible cpus.
*/
void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread)
{
get_online_cpus();
mutex_lock(&smpboot_threads_lock);
list_del(&plug_thread->list);
smpboot_destroy_threads(plug_thread);
mutex_unlock(&smpboot_threads_lock);
put_online_cpus();
}
EXPORT_SYMBOL_GPL(smpboot_unregister_percpu_thread);
| gpl-2.0 |
superac11/x500_letv_kernel_3.10_K11 | fs/fscache/object.c | 2239 | 28516 | /* FS-Cache object state machine handler
*
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* See Documentation/filesystems/caching/object.txt for a description of the
* object state machine and the in-kernel representations.
*/
#define FSCACHE_DEBUG_LEVEL COOKIE
#include <linux/module.h>
#include <linux/slab.h>
#include "internal.h"
const char *fscache_object_states[FSCACHE_OBJECT__NSTATES] = {
[FSCACHE_OBJECT_INIT] = "OBJECT_INIT",
[FSCACHE_OBJECT_LOOKING_UP] = "OBJECT_LOOKING_UP",
[FSCACHE_OBJECT_CREATING] = "OBJECT_CREATING",
[FSCACHE_OBJECT_AVAILABLE] = "OBJECT_AVAILABLE",
[FSCACHE_OBJECT_ACTIVE] = "OBJECT_ACTIVE",
[FSCACHE_OBJECT_INVALIDATING] = "OBJECT_INVALIDATING",
[FSCACHE_OBJECT_UPDATING] = "OBJECT_UPDATING",
[FSCACHE_OBJECT_DYING] = "OBJECT_DYING",
[FSCACHE_OBJECT_LC_DYING] = "OBJECT_LC_DYING",
[FSCACHE_OBJECT_ABORT_INIT] = "OBJECT_ABORT_INIT",
[FSCACHE_OBJECT_RELEASING] = "OBJECT_RELEASING",
[FSCACHE_OBJECT_RECYCLING] = "OBJECT_RECYCLING",
[FSCACHE_OBJECT_WITHDRAWING] = "OBJECT_WITHDRAWING",
[FSCACHE_OBJECT_DEAD] = "OBJECT_DEAD",
};
EXPORT_SYMBOL(fscache_object_states);
const char fscache_object_states_short[FSCACHE_OBJECT__NSTATES][5] = {
[FSCACHE_OBJECT_INIT] = "INIT",
[FSCACHE_OBJECT_LOOKING_UP] = "LOOK",
[FSCACHE_OBJECT_CREATING] = "CRTN",
[FSCACHE_OBJECT_AVAILABLE] = "AVBL",
[FSCACHE_OBJECT_ACTIVE] = "ACTV",
[FSCACHE_OBJECT_INVALIDATING] = "INVL",
[FSCACHE_OBJECT_UPDATING] = "UPDT",
[FSCACHE_OBJECT_DYING] = "DYNG",
[FSCACHE_OBJECT_LC_DYING] = "LCDY",
[FSCACHE_OBJECT_ABORT_INIT] = "ABTI",
[FSCACHE_OBJECT_RELEASING] = "RELS",
[FSCACHE_OBJECT_RECYCLING] = "RCYC",
[FSCACHE_OBJECT_WITHDRAWING] = "WTHD",
[FSCACHE_OBJECT_DEAD] = "DEAD",
};
static int fscache_get_object(struct fscache_object *);
static void fscache_put_object(struct fscache_object *);
static void fscache_initialise_object(struct fscache_object *);
static void fscache_lookup_object(struct fscache_object *);
static void fscache_object_available(struct fscache_object *);
static void fscache_invalidate_object(struct fscache_object *);
static void fscache_release_object(struct fscache_object *);
static void fscache_withdraw_object(struct fscache_object *);
static void fscache_enqueue_dependents(struct fscache_object *);
static void fscache_dequeue_object(struct fscache_object *);
/*
* we need to notify the parent when an op completes that we had outstanding
* upon it
*/
static inline void fscache_done_parent_op(struct fscache_object *object)
{
struct fscache_object *parent = object->parent;
_enter("OBJ%x {OBJ%x,%x}",
object->debug_id, parent->debug_id, parent->n_ops);
spin_lock_nested(&parent->lock, 1);
parent->n_ops--;
parent->n_obj_ops--;
if (parent->n_ops == 0)
fscache_raise_event(parent, FSCACHE_OBJECT_EV_CLEARED);
spin_unlock(&parent->lock);
}
/*
* Notify netfs of invalidation completion.
*/
static inline void fscache_invalidation_complete(struct fscache_cookie *cookie)
{
if (test_and_clear_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags))
wake_up_bit(&cookie->flags, FSCACHE_COOKIE_INVALIDATING);
}
/*
* process events that have been sent to an object's state machine
* - initiates parent lookup
* - does object lookup
* - does object creation
* - does object recycling and retirement
* - does object withdrawal
*/
static void fscache_object_state_machine(struct fscache_object *object)
{
enum fscache_object_state new_state;
struct fscache_cookie *cookie;
int event;
ASSERT(object != NULL);
_enter("{OBJ%x,%s,%lx}",
object->debug_id, fscache_object_states[object->state],
object->events);
switch (object->state) {
/* wait for the parent object to become ready */
case FSCACHE_OBJECT_INIT:
object->event_mask =
FSCACHE_OBJECT_EVENTS_MASK &
~(1 << FSCACHE_OBJECT_EV_CLEARED);
fscache_initialise_object(object);
goto done;
/* look up the object metadata on disk */
case FSCACHE_OBJECT_LOOKING_UP:
fscache_lookup_object(object);
goto lookup_transit;
/* create the object metadata on disk */
case FSCACHE_OBJECT_CREATING:
fscache_lookup_object(object);
goto lookup_transit;
/* handle an object becoming available; start pending
* operations and queue dependent operations for processing */
case FSCACHE_OBJECT_AVAILABLE:
fscache_object_available(object);
goto active_transit;
/* normal running state */
case FSCACHE_OBJECT_ACTIVE:
goto active_transit;
/* Invalidate an object on disk */
case FSCACHE_OBJECT_INVALIDATING:
clear_bit(FSCACHE_OBJECT_EV_INVALIDATE, &object->events);
fscache_stat(&fscache_n_invalidates_run);
fscache_stat(&fscache_n_cop_invalidate_object);
fscache_invalidate_object(object);
fscache_stat_d(&fscache_n_cop_invalidate_object);
fscache_raise_event(object, FSCACHE_OBJECT_EV_UPDATE);
goto active_transit;
/* update the object metadata on disk */
case FSCACHE_OBJECT_UPDATING:
clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
fscache_stat(&fscache_n_updates_run);
fscache_stat(&fscache_n_cop_update_object);
object->cache->ops->update_object(object);
fscache_stat_d(&fscache_n_cop_update_object);
goto active_transit;
/* handle an object dying during lookup or creation */
case FSCACHE_OBJECT_LC_DYING:
object->event_mask &= ~(1 << FSCACHE_OBJECT_EV_UPDATE);
fscache_stat(&fscache_n_cop_lookup_complete);
object->cache->ops->lookup_complete(object);
fscache_stat_d(&fscache_n_cop_lookup_complete);
spin_lock(&object->lock);
object->state = FSCACHE_OBJECT_DYING;
cookie = object->cookie;
if (cookie) {
if (test_and_clear_bit(FSCACHE_COOKIE_LOOKING_UP,
&cookie->flags))
wake_up_bit(&cookie->flags,
FSCACHE_COOKIE_LOOKING_UP);
if (test_and_clear_bit(FSCACHE_COOKIE_CREATING,
&cookie->flags))
wake_up_bit(&cookie->flags,
FSCACHE_COOKIE_CREATING);
}
spin_unlock(&object->lock);
fscache_done_parent_op(object);
/* wait for completion of all active operations on this object
* and the death of all child objects of this object */
case FSCACHE_OBJECT_DYING:
dying:
clear_bit(FSCACHE_OBJECT_EV_CLEARED, &object->events);
spin_lock(&object->lock);
_debug("dying OBJ%x {%d,%d}",
object->debug_id, object->n_ops, object->n_children);
if (object->n_ops == 0 && object->n_children == 0) {
object->event_mask &=
~(1 << FSCACHE_OBJECT_EV_CLEARED);
object->event_mask |=
(1 << FSCACHE_OBJECT_EV_WITHDRAW) |
(1 << FSCACHE_OBJECT_EV_RETIRE) |
(1 << FSCACHE_OBJECT_EV_RELEASE) |
(1 << FSCACHE_OBJECT_EV_ERROR);
} else {
object->event_mask &=
~((1 << FSCACHE_OBJECT_EV_WITHDRAW) |
(1 << FSCACHE_OBJECT_EV_RETIRE) |
(1 << FSCACHE_OBJECT_EV_RELEASE) |
(1 << FSCACHE_OBJECT_EV_ERROR));
object->event_mask |=
1 << FSCACHE_OBJECT_EV_CLEARED;
}
spin_unlock(&object->lock);
fscache_enqueue_dependents(object);
fscache_start_operations(object);
goto terminal_transit;
/* handle an abort during initialisation */
case FSCACHE_OBJECT_ABORT_INIT:
_debug("handle abort init %lx", object->events);
object->event_mask &= ~(1 << FSCACHE_OBJECT_EV_UPDATE);
spin_lock(&object->lock);
fscache_dequeue_object(object);
object->state = FSCACHE_OBJECT_DYING;
if (test_and_clear_bit(FSCACHE_COOKIE_CREATING,
&object->cookie->flags))
wake_up_bit(&object->cookie->flags,
FSCACHE_COOKIE_CREATING);
spin_unlock(&object->lock);
goto dying;
/* handle the netfs releasing an object and possibly marking it
* obsolete too */
case FSCACHE_OBJECT_RELEASING:
case FSCACHE_OBJECT_RECYCLING:
object->event_mask &=
~((1 << FSCACHE_OBJECT_EV_WITHDRAW) |
(1 << FSCACHE_OBJECT_EV_RETIRE) |
(1 << FSCACHE_OBJECT_EV_RELEASE) |
(1 << FSCACHE_OBJECT_EV_ERROR));
fscache_release_object(object);
spin_lock(&object->lock);
object->state = FSCACHE_OBJECT_DEAD;
spin_unlock(&object->lock);
fscache_stat(&fscache_n_object_dead);
goto terminal_transit;
/* handle the parent cache of this object being withdrawn from
* active service */
case FSCACHE_OBJECT_WITHDRAWING:
object->event_mask &=
~((1 << FSCACHE_OBJECT_EV_WITHDRAW) |
(1 << FSCACHE_OBJECT_EV_RETIRE) |
(1 << FSCACHE_OBJECT_EV_RELEASE) |
(1 << FSCACHE_OBJECT_EV_ERROR));
fscache_withdraw_object(object);
spin_lock(&object->lock);
object->state = FSCACHE_OBJECT_DEAD;
spin_unlock(&object->lock);
fscache_stat(&fscache_n_object_dead);
goto terminal_transit;
/* complain about the object being woken up once it is
* deceased */
case FSCACHE_OBJECT_DEAD:
printk(KERN_ERR "FS-Cache:"
" Unexpected event in dead state %lx\n",
object->events & object->event_mask);
BUG();
default:
printk(KERN_ERR "FS-Cache: Unknown object state %u\n",
object->state);
BUG();
}
/* determine the transition from a lookup state */
lookup_transit:
event = fls(object->events & object->event_mask) - 1;
switch (event) {
case FSCACHE_OBJECT_EV_WITHDRAW:
case FSCACHE_OBJECT_EV_RETIRE:
case FSCACHE_OBJECT_EV_RELEASE:
case FSCACHE_OBJECT_EV_ERROR:
new_state = FSCACHE_OBJECT_LC_DYING;
goto change_state;
case FSCACHE_OBJECT_EV_INVALIDATE:
new_state = FSCACHE_OBJECT_INVALIDATING;
goto change_state;
case FSCACHE_OBJECT_EV_REQUEUE:
goto done;
case -1:
goto done; /* sleep until event */
default:
goto unsupported_event;
}
/* determine the transition from an active state */
active_transit:
event = fls(object->events & object->event_mask) - 1;
switch (event) {
case FSCACHE_OBJECT_EV_WITHDRAW:
case FSCACHE_OBJECT_EV_RETIRE:
case FSCACHE_OBJECT_EV_RELEASE:
case FSCACHE_OBJECT_EV_ERROR:
new_state = FSCACHE_OBJECT_DYING;
goto change_state;
case FSCACHE_OBJECT_EV_INVALIDATE:
new_state = FSCACHE_OBJECT_INVALIDATING;
goto change_state;
case FSCACHE_OBJECT_EV_UPDATE:
new_state = FSCACHE_OBJECT_UPDATING;
goto change_state;
case -1:
new_state = FSCACHE_OBJECT_ACTIVE;
goto change_state; /* sleep until event */
default:
goto unsupported_event;
}
/* determine the transition from a terminal state */
terminal_transit:
event = fls(object->events & object->event_mask) - 1;
switch (event) {
case FSCACHE_OBJECT_EV_WITHDRAW:
new_state = FSCACHE_OBJECT_WITHDRAWING;
goto change_state;
case FSCACHE_OBJECT_EV_RETIRE:
new_state = FSCACHE_OBJECT_RECYCLING;
goto change_state;
case FSCACHE_OBJECT_EV_RELEASE:
new_state = FSCACHE_OBJECT_RELEASING;
goto change_state;
case FSCACHE_OBJECT_EV_ERROR:
new_state = FSCACHE_OBJECT_WITHDRAWING;
goto change_state;
case FSCACHE_OBJECT_EV_CLEARED:
new_state = FSCACHE_OBJECT_DYING;
goto change_state;
case -1:
goto done; /* sleep until event */
default:
goto unsupported_event;
}
change_state:
spin_lock(&object->lock);
object->state = new_state;
spin_unlock(&object->lock);
done:
_leave(" [->%s]", fscache_object_states[object->state]);
return;
unsupported_event:
printk(KERN_ERR "FS-Cache:"
" Unsupported event %d [%lx/%lx] in state %s\n",
event, object->events, object->event_mask,
fscache_object_states[object->state]);
BUG();
}
/*
* execute an object
*/
void fscache_object_work_func(struct work_struct *work)
{
struct fscache_object *object =
container_of(work, struct fscache_object, work);
unsigned long start;
_enter("{OBJ%x}", object->debug_id);
start = jiffies;
fscache_object_state_machine(object);
fscache_hist(fscache_objs_histogram, start);
if (object->events & object->event_mask)
fscache_enqueue_object(object);
clear_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
fscache_put_object(object);
}
EXPORT_SYMBOL(fscache_object_work_func);
/*
* initialise an object
* - check the specified object's parent to see if we can make use of it
* immediately to do a creation
* - we may need to start the process of creating a parent and we need to wait
* for the parent's lookup and creation to complete if it's not there yet
* - an object's cookie is pinned until we clear FSCACHE_COOKIE_CREATING on the
* leaf-most cookies of the object and all its children
*/
static void fscache_initialise_object(struct fscache_object *object)
{
struct fscache_object *parent;
_enter("");
ASSERT(object->cookie != NULL);
ASSERT(object->cookie->parent != NULL);
if (object->events & ((1 << FSCACHE_OBJECT_EV_ERROR) |
(1 << FSCACHE_OBJECT_EV_RELEASE) |
(1 << FSCACHE_OBJECT_EV_RETIRE) |
(1 << FSCACHE_OBJECT_EV_WITHDRAW))) {
_debug("abort init %lx", object->events);
spin_lock(&object->lock);
object->state = FSCACHE_OBJECT_ABORT_INIT;
spin_unlock(&object->lock);
return;
}
spin_lock(&object->cookie->lock);
spin_lock_nested(&object->cookie->parent->lock, 1);
parent = object->parent;
if (!parent) {
_debug("no parent");
set_bit(FSCACHE_OBJECT_EV_WITHDRAW, &object->events);
} else {
spin_lock(&object->lock);
spin_lock_nested(&parent->lock, 1);
_debug("parent %s", fscache_object_states[parent->state]);
if (parent->state >= FSCACHE_OBJECT_DYING) {
_debug("bad parent");
set_bit(FSCACHE_OBJECT_EV_WITHDRAW, &object->events);
} else if (parent->state < FSCACHE_OBJECT_AVAILABLE) {
_debug("wait");
/* we may get woken up in this state by child objects
* binding on to us, so we need to make sure we don't
* add ourself to the list multiple times */
if (list_empty(&object->dep_link)) {
fscache_stat(&fscache_n_cop_grab_object);
object->cache->ops->grab_object(object);
fscache_stat_d(&fscache_n_cop_grab_object);
list_add(&object->dep_link,
&parent->dependents);
/* fscache_acquire_non_index_cookie() uses this
* to wake the chain up */
if (parent->state == FSCACHE_OBJECT_INIT)
fscache_enqueue_object(parent);
}
} else {
_debug("go");
parent->n_ops++;
parent->n_obj_ops++;
object->lookup_jif = jiffies;
object->state = FSCACHE_OBJECT_LOOKING_UP;
set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
}
spin_unlock(&parent->lock);
spin_unlock(&object->lock);
}
spin_unlock(&object->cookie->parent->lock);
spin_unlock(&object->cookie->lock);
_leave("");
}
/*
* look an object up in the cache from which it was allocated
* - we hold an "access lock" on the parent object, so the parent object cannot
* be withdrawn by either party till we've finished
* - an object's cookie is pinned until we clear FSCACHE_COOKIE_CREATING on the
* leaf-most cookies of the object and all its children
*/
static void fscache_lookup_object(struct fscache_object *object)
{
struct fscache_cookie *cookie = object->cookie;
struct fscache_object *parent;
int ret;
_enter("");
parent = object->parent;
ASSERT(parent != NULL);
ASSERTCMP(parent->n_ops, >, 0);
ASSERTCMP(parent->n_obj_ops, >, 0);
/* make sure the parent is still available */
ASSERTCMP(parent->state, >=, FSCACHE_OBJECT_AVAILABLE);
if (parent->state >= FSCACHE_OBJECT_DYING ||
test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
_debug("unavailable");
set_bit(FSCACHE_OBJECT_EV_WITHDRAW, &object->events);
_leave("");
return;
}
_debug("LOOKUP \"%s/%s\" in \"%s\"",
parent->cookie->def->name, cookie->def->name,
object->cache->tag->name);
fscache_stat(&fscache_n_object_lookups);
fscache_stat(&fscache_n_cop_lookup_object);
ret = object->cache->ops->lookup_object(object);
fscache_stat_d(&fscache_n_cop_lookup_object);
if (test_bit(FSCACHE_OBJECT_EV_ERROR, &object->events))
set_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags);
if (ret == -ETIMEDOUT) {
/* probably stuck behind another object, so move this one to
* the back of the queue */
fscache_stat(&fscache_n_object_lookups_timed_out);
set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
}
_leave("");
}
/**
* fscache_object_lookup_negative - Note negative cookie lookup
* @object: Object pointing to cookie to mark
*
* Note negative lookup, permitting those waiting to read data from an already
* existing backing object to continue as there's no data for them to read.
*/
void fscache_object_lookup_negative(struct fscache_object *object)
{
struct fscache_cookie *cookie = object->cookie;
_enter("{OBJ%x,%s}",
object->debug_id, fscache_object_states[object->state]);
spin_lock(&object->lock);
if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
fscache_stat(&fscache_n_object_lookups_negative);
/* transit here to allow write requests to begin stacking up
* and read requests to begin returning ENODATA */
object->state = FSCACHE_OBJECT_CREATING;
spin_unlock(&object->lock);
set_bit(FSCACHE_COOKIE_PENDING_FILL, &cookie->flags);
set_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
_debug("wake up lookup %p", &cookie->flags);
smp_mb__before_clear_bit();
clear_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags);
smp_mb__after_clear_bit();
wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP);
set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
} else {
ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
spin_unlock(&object->lock);
}
_leave("");
}
EXPORT_SYMBOL(fscache_object_lookup_negative);
/**
* fscache_obtained_object - Note successful object lookup or creation
* @object: Object pointing to cookie to mark
*
* Note successful lookup and/or creation, permitting those waiting to write
* data to a backing object to continue.
*
* Note that after calling this, an object's cookie may be relinquished by the
* netfs, and so must be accessed with object lock held.
*/
void fscache_obtained_object(struct fscache_object *object)
{
struct fscache_cookie *cookie = object->cookie;
_enter("{OBJ%x,%s}",
object->debug_id, fscache_object_states[object->state]);
/* if we were still looking up, then we must have a positive lookup
* result, in which case there may be data available */
spin_lock(&object->lock);
if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
fscache_stat(&fscache_n_object_lookups_positive);
clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
object->state = FSCACHE_OBJECT_AVAILABLE;
spin_unlock(&object->lock);
smp_mb__before_clear_bit();
clear_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags);
smp_mb__after_clear_bit();
wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP);
set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
} else {
ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
fscache_stat(&fscache_n_object_created);
object->state = FSCACHE_OBJECT_AVAILABLE;
spin_unlock(&object->lock);
set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
smp_wmb();
}
if (test_and_clear_bit(FSCACHE_COOKIE_CREATING, &cookie->flags))
wake_up_bit(&cookie->flags, FSCACHE_COOKIE_CREATING);
_leave("");
}
EXPORT_SYMBOL(fscache_obtained_object);
/*
* handle an object that has just become available
*/
static void fscache_object_available(struct fscache_object *object)
{
_enter("{OBJ%x}", object->debug_id);
spin_lock(&object->lock);
if (object->cookie &&
test_and_clear_bit(FSCACHE_COOKIE_CREATING, &object->cookie->flags))
wake_up_bit(&object->cookie->flags, FSCACHE_COOKIE_CREATING);
fscache_done_parent_op(object);
if (object->n_in_progress == 0) {
if (object->n_ops > 0) {
ASSERTCMP(object->n_ops, >=, object->n_obj_ops);
fscache_start_operations(object);
} else {
ASSERT(list_empty(&object->pending_ops));
}
}
spin_unlock(&object->lock);
fscache_stat(&fscache_n_cop_lookup_complete);
object->cache->ops->lookup_complete(object);
fscache_stat_d(&fscache_n_cop_lookup_complete);
fscache_enqueue_dependents(object);
fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
fscache_stat(&fscache_n_object_avail);
_leave("");
}
/*
* drop an object's attachments
*/
static void fscache_drop_object(struct fscache_object *object)
{
struct fscache_object *parent = object->parent;
struct fscache_cache *cache = object->cache;
_enter("{OBJ%x,%d}", object->debug_id, object->n_children);
ASSERTCMP(object->cookie, ==, NULL);
ASSERT(hlist_unhashed(&object->cookie_link));
spin_lock(&cache->object_list_lock);
list_del_init(&object->cache_link);
spin_unlock(&cache->object_list_lock);
fscache_stat(&fscache_n_cop_drop_object);
cache->ops->drop_object(object);
fscache_stat_d(&fscache_n_cop_drop_object);
if (parent) {
_debug("release parent OBJ%x {%d}",
parent->debug_id, parent->n_children);
spin_lock(&parent->lock);
parent->n_children--;
if (parent->n_children == 0)
fscache_raise_event(parent, FSCACHE_OBJECT_EV_CLEARED);
spin_unlock(&parent->lock);
object->parent = NULL;
}
/* this just shifts the object release to the work processor */
fscache_put_object(object);
_leave("");
}
/*
* release or recycle an object that the netfs has discarded
*/
static void fscache_release_object(struct fscache_object *object)
{
_enter("");
fscache_drop_object(object);
}
/*
* withdraw an object from active service
*/
static void fscache_withdraw_object(struct fscache_object *object)
{
struct fscache_cookie *cookie;
bool detached;
_enter("");
spin_lock(&object->lock);
cookie = object->cookie;
if (cookie) {
/* need to get the cookie lock before the object lock, starting
* from the object pointer */
atomic_inc(&cookie->usage);
spin_unlock(&object->lock);
detached = false;
spin_lock(&cookie->lock);
spin_lock(&object->lock);
if (object->cookie == cookie) {
hlist_del_init(&object->cookie_link);
object->cookie = NULL;
fscache_invalidation_complete(cookie);
detached = true;
}
spin_unlock(&cookie->lock);
fscache_cookie_put(cookie);
if (detached)
fscache_cookie_put(cookie);
}
spin_unlock(&object->lock);
fscache_drop_object(object);
}
/*
* withdraw an object from active service at the behest of the cache
* - need break the links to a cached object cookie
* - called under two situations:
* (1) recycler decides to reclaim an in-use object
* (2) a cache is unmounted
* - have to take care as the cookie can be being relinquished by the netfs
* simultaneously
* - the object is pinned by the caller holding a refcount on it
*/
void fscache_withdrawing_object(struct fscache_cache *cache,
struct fscache_object *object)
{
bool enqueue = false;
_enter(",OBJ%x", object->debug_id);
spin_lock(&object->lock);
if (object->state < FSCACHE_OBJECT_WITHDRAWING) {
object->state = FSCACHE_OBJECT_WITHDRAWING;
enqueue = true;
}
spin_unlock(&object->lock);
if (enqueue)
fscache_enqueue_object(object);
_leave("");
}
/*
* get a ref on an object
*/
static int fscache_get_object(struct fscache_object *object)
{
int ret;
fscache_stat(&fscache_n_cop_grab_object);
ret = object->cache->ops->grab_object(object) ? 0 : -EAGAIN;
fscache_stat_d(&fscache_n_cop_grab_object);
return ret;
}
/*
* discard a ref on a work item
*/
static void fscache_put_object(struct fscache_object *object)
{
fscache_stat(&fscache_n_cop_put_object);
object->cache->ops->put_object(object);
fscache_stat_d(&fscache_n_cop_put_object);
}
/*
* enqueue an object for metadata-type processing
*/
void fscache_enqueue_object(struct fscache_object *object)
{
_enter("{OBJ%x}", object->debug_id);
if (fscache_get_object(object) >= 0) {
wait_queue_head_t *cong_wq =
&get_cpu_var(fscache_object_cong_wait);
if (queue_work(fscache_object_wq, &object->work)) {
if (fscache_object_congested())
wake_up(cong_wq);
} else
fscache_put_object(object);
put_cpu_var(fscache_object_cong_wait);
}
}
/**
* fscache_object_sleep_till_congested - Sleep until object wq is congested
* @timoutp: Scheduler sleep timeout
*
* Allow an object handler to sleep until the object workqueue is congested.
*
* The caller must set up a wake up event before calling this and must have set
* the appropriate sleep mode (such as TASK_UNINTERRUPTIBLE) and tested its own
* condition before calling this function as no test is made here.
*
* %true is returned if the object wq is congested, %false otherwise.
*/
bool fscache_object_sleep_till_congested(signed long *timeoutp)
{
wait_queue_head_t *cong_wq = &__get_cpu_var(fscache_object_cong_wait);
DEFINE_WAIT(wait);
if (fscache_object_congested())
return true;
add_wait_queue_exclusive(cong_wq, &wait);
if (!fscache_object_congested())
*timeoutp = schedule_timeout(*timeoutp);
finish_wait(cong_wq, &wait);
return fscache_object_congested();
}
EXPORT_SYMBOL_GPL(fscache_object_sleep_till_congested);
/*
* enqueue the dependents of an object for metadata-type processing
* - the caller must hold the object's lock
* - this may cause an already locked object to wind up being processed again
*/
static void fscache_enqueue_dependents(struct fscache_object *object)
{
struct fscache_object *dep;
_enter("{OBJ%x}", object->debug_id);
if (list_empty(&object->dependents))
return;
spin_lock(&object->lock);
while (!list_empty(&object->dependents)) {
dep = list_entry(object->dependents.next,
struct fscache_object, dep_link);
list_del_init(&dep->dep_link);
/* sort onto appropriate lists */
fscache_enqueue_object(dep);
fscache_put_object(dep);
if (!list_empty(&object->dependents))
cond_resched_lock(&object->lock);
}
spin_unlock(&object->lock);
}
/*
* remove an object from whatever queue it's waiting on
* - the caller must hold object->lock
*/
void fscache_dequeue_object(struct fscache_object *object)
{
_enter("{OBJ%x}", object->debug_id);
if (!list_empty(&object->dep_link)) {
spin_lock(&object->parent->lock);
list_del_init(&object->dep_link);
spin_unlock(&object->parent->lock);
}
_leave("");
}
/**
* fscache_check_aux - Ask the netfs whether an object on disk is still valid
* @object: The object to ask about
* @data: The auxiliary data for the object
* @datalen: The size of the auxiliary data
*
* This function consults the netfs about the coherency state of an object
*/
enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
const void *data, uint16_t datalen)
{
enum fscache_checkaux result;
if (!object->cookie->def->check_aux) {
fscache_stat(&fscache_n_checkaux_none);
return FSCACHE_CHECKAUX_OKAY;
}
result = object->cookie->def->check_aux(object->cookie->netfs_data,
data, datalen);
switch (result) {
/* entry okay as is */
case FSCACHE_CHECKAUX_OKAY:
fscache_stat(&fscache_n_checkaux_okay);
break;
/* entry requires update */
case FSCACHE_CHECKAUX_NEEDS_UPDATE:
fscache_stat(&fscache_n_checkaux_update);
break;
/* entry requires deletion */
case FSCACHE_CHECKAUX_OBSOLETE:
fscache_stat(&fscache_n_checkaux_obsolete);
break;
default:
BUG();
}
return result;
}
EXPORT_SYMBOL(fscache_check_aux);
/*
* Asynchronously invalidate an object.
*/
static void fscache_invalidate_object(struct fscache_object *object)
{
struct fscache_operation *op;
struct fscache_cookie *cookie = object->cookie;
_enter("{OBJ%x}", object->debug_id);
/* Reject any new read/write ops and abort any that are pending. */
fscache_invalidate_writes(cookie);
clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
fscache_cancel_all_ops(object);
/* Now we have to wait for in-progress reads and writes */
op = kzalloc(sizeof(*op), GFP_KERNEL);
if (!op) {
fscache_raise_event(object, FSCACHE_OBJECT_EV_ERROR);
_leave(" [ENOMEM]");
return;
}
fscache_operation_init(op, object->cache->ops->invalidate_object, NULL);
op->flags = FSCACHE_OP_ASYNC | (1 << FSCACHE_OP_EXCLUSIVE);
spin_lock(&cookie->lock);
if (fscache_submit_exclusive_op(object, op) < 0)
goto submit_op_failed;
spin_unlock(&cookie->lock);
fscache_put_operation(op);
/* Once we've completed the invalidation, we know there will be no data
* stored in the cache and thus we can reinstate the data-check-skip
* optimisation.
*/
set_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
/* We can allow read and write requests to come in once again. They'll
* queue up behind our exclusive invalidation operation.
*/
fscache_invalidation_complete(cookie);
_leave("");
return;
submit_op_failed:
spin_unlock(&cookie->lock);
kfree(op);
fscache_raise_event(object, FSCACHE_OBJECT_EV_ERROR);
_leave(" [EIO]");
}
| gpl-2.0 |
Shabbypenguin/Kettle_Corn_Kernel | arch/mips/sgi-ip22/ip22-mc.c | 2751 | 6848 | /*
* ip22-mc.c: Routines for manipulating SGI Memory Controller.
*
* Copyright (C) 1996 David S. Miller (davem@davemloft.net)
* Copyright (C) 1999 Andrew R. Baker (andrewb@uab.edu) - Indigo2 changes
* Copyright (C) 2003 Ladislav Michl (ladis@linux-mips.org)
* Copyright (C) 2004 Peter Fuerst (pf@net.alphadv.de) - IP28
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <asm/io.h>
#include <asm/bootinfo.h>
#include <asm/sgialib.h>
#include <asm/sgi/mc.h>
#include <asm/sgi/hpc3.h>
#include <asm/sgi/ip22.h>
struct sgimc_regs *sgimc;
EXPORT_SYMBOL(sgimc);
static inline unsigned long get_bank_addr(unsigned int memconfig)
{
return ((memconfig & SGIMC_MCONFIG_BASEADDR) <<
((sgimc->systemid & SGIMC_SYSID_MASKREV) >= 5 ? 24 : 22));
}
static inline unsigned long get_bank_size(unsigned int memconfig)
{
return ((memconfig & SGIMC_MCONFIG_RMASK) + 0x0100) <<
((sgimc->systemid & SGIMC_SYSID_MASKREV) >= 5 ? 16 : 14);
}
static inline unsigned int get_bank_config(int bank)
{
unsigned int res = bank > 1 ? sgimc->mconfig1 : sgimc->mconfig0;
return bank % 2 ? res & 0xffff : res >> 16;
}
struct mem {
unsigned long addr;
unsigned long size;
};
/*
* Detect installed memory, do some sanity checks and notify kernel about it
*/
static void __init probe_memory(void)
{
int i, j, found, cnt = 0;
struct mem bank[4];
struct mem space[2] = {{SGIMC_SEG0_BADDR, 0}, {SGIMC_SEG1_BADDR, 0}};
printk(KERN_INFO "MC: Probing memory configuration:\n");
for (i = 0; i < ARRAY_SIZE(bank); i++) {
unsigned int tmp = get_bank_config(i);
if (!(tmp & SGIMC_MCONFIG_BVALID))
continue;
bank[cnt].size = get_bank_size(tmp);
bank[cnt].addr = get_bank_addr(tmp);
printk(KERN_INFO " bank%d: %3ldM @ %08lx\n",
i, bank[cnt].size / 1024 / 1024, bank[cnt].addr);
cnt++;
}
/* And you thought bubble sort is dead algorithm... */
do {
unsigned long addr, size;
found = 0;
for (i = 1; i < cnt; i++)
if (bank[i-1].addr > bank[i].addr) {
addr = bank[i].addr;
size = bank[i].size;
bank[i].addr = bank[i-1].addr;
bank[i].size = bank[i-1].size;
bank[i-1].addr = addr;
bank[i-1].size = size;
found = 1;
}
} while (found);
/* Figure out how are memory banks mapped into spaces */
for (i = 0; i < cnt; i++) {
found = 0;
for (j = 0; j < ARRAY_SIZE(space) && !found; j++)
if (space[j].addr + space[j].size == bank[i].addr) {
space[j].size += bank[i].size;
found = 1;
}
/* There is either hole or overlapping memory */
if (!found)
printk(KERN_CRIT "MC: Memory configuration mismatch "
"(%08lx), expect Bus Error soon\n",
bank[i].addr);
}
for (i = 0; i < ARRAY_SIZE(space); i++)
if (space[i].size)
add_memory_region(space[i].addr, space[i].size,
BOOT_MEM_RAM);
}
void __init sgimc_init(void)
{
u32 tmp;
/* ioremap can't fail */
sgimc = (struct sgimc_regs *)
ioremap(SGIMC_BASE, sizeof(struct sgimc_regs));
printk(KERN_INFO "MC: SGI memory controller Revision %d\n",
(int) sgimc->systemid & SGIMC_SYSID_MASKREV);
/* Place the MC into a known state. This must be done before
* interrupts are first enabled etc.
*/
/* Step 0: Make sure we turn off the watchdog in case it's
* still running (which might be the case after a
* soft reboot).
*/
tmp = sgimc->cpuctrl0;
tmp &= ~SGIMC_CCTRL0_WDOG;
sgimc->cpuctrl0 = tmp;
/* Step 1: The CPU/GIO error status registers will not latch
* up a new error status until the register has been
* cleared by the cpu. These status registers are
* cleared by writing any value to them.
*/
sgimc->cstat = sgimc->gstat = 0;
/* Step 2: Enable all parity checking in cpu control register
* zero.
*/
/* don't touch parity settings for IP28 */
#ifndef CONFIG_SGI_IP28
tmp = sgimc->cpuctrl0;
tmp |= (SGIMC_CCTRL0_EPERRGIO | SGIMC_CCTRL0_EPERRMEM |
SGIMC_CCTRL0_R4KNOCHKPARR);
#endif
sgimc->cpuctrl0 = tmp;
/* Step 3: Setup the MC write buffer depth, this is controlled
* in cpu control register 1 in the lower 4 bits.
*/
tmp = sgimc->cpuctrl1;
tmp &= ~0xf;
tmp |= 0xd;
sgimc->cpuctrl1 = tmp;
/* Step 4: Initialize the RPSS divider register to run as fast
* as it can correctly operate. The register is laid
* out as follows:
*
* ----------------------------------------
* | RESERVED | INCREMENT | DIVIDER |
* ----------------------------------------
* 31 16 15 8 7 0
*
* DIVIDER determines how often a 'tick' happens,
* INCREMENT determines by how the RPSS increment
* registers value increases at each 'tick'. Thus,
* for IP22 we get INCREMENT=1, DIVIDER=1 == 0x101
*/
sgimc->divider = 0x101;
/* Step 5: Initialize GIO64 arbitrator configuration register.
*
* NOTE: HPC init code in sgihpc_init() must run before us because
* we need to know Guiness vs. FullHouse and the board
* revision on this machine. You have been warned.
*/
/* First the basic invariants across all GIO64 implementations. */
tmp = SGIMC_GIOPAR_HPC64; /* All 1st HPC's interface at 64bits */
tmp |= SGIMC_GIOPAR_ONEBUS; /* Only one physical GIO bus exists */
if (ip22_is_fullhouse()) {
/* Fullhouse specific settings. */
if (SGIOC_SYSID_BOARDREV(sgioc->sysid) < 2) {
tmp |= SGIMC_GIOPAR_HPC264; /* 2nd HPC at 64bits */
tmp |= SGIMC_GIOPAR_PLINEEXP0; /* exp0 pipelines */
tmp |= SGIMC_GIOPAR_MASTEREXP1; /* exp1 masters */
tmp |= SGIMC_GIOPAR_RTIMEEXP0; /* exp0 is realtime */
} else {
tmp |= SGIMC_GIOPAR_HPC264; /* 2nd HPC 64bits */
tmp |= SGIMC_GIOPAR_PLINEEXP0; /* exp[01] pipelined */
tmp |= SGIMC_GIOPAR_PLINEEXP1;
tmp |= SGIMC_GIOPAR_MASTEREISA; /* EISA masters */
tmp |= SGIMC_GIOPAR_GFX64; /* GFX at 64 bits */
}
} else {
/* Guiness specific settings. */
tmp |= SGIMC_GIOPAR_EISA64; /* MC talks to EISA at 64bits */
tmp |= SGIMC_GIOPAR_MASTEREISA; /* EISA bus can act as master */
}
sgimc->giopar = tmp; /* poof */
probe_memory();
}
void __init prom_meminit(void) {}
void __init prom_free_prom_memory(void)
{
#ifdef CONFIG_SGI_IP28
u32 mconfig1;
unsigned long flags;
spinlock_t lock;
/*
* because ARCS accesses memory uncached we wait until ARCS
* isn't needed any longer, before we switch from slow to
* normal mode
*/
spin_lock_irqsave(&lock, flags);
mconfig1 = sgimc->mconfig1;
/* map ECC register */
sgimc->mconfig1 = (mconfig1 & 0xffff0000) | 0x2060;
iob();
/* switch to normal mode */
*(unsigned long *)PHYS_TO_XKSEG_UNCACHED(0x60000000) = 0;
iob();
/* reduce WR_COL */
sgimc->cmacc = (sgimc->cmacc & ~0xf) | 4;
iob();
/* restore old config */
sgimc->mconfig1 = mconfig1;
iob();
spin_unlock_irqrestore(&lock, flags);
#endif
}
| gpl-2.0 |
MikePach/stock_kernel_lge_l2s | arch/arm/kernel/crunch.c | 4543 | 2112 | /*
* arch/arm/kernel/crunch.c
* Cirrus MaverickCrunch context switching and handling
*
* Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/io.h>
#include <mach/ep93xx-regs.h>
#include <asm/thread_notify.h>
struct crunch_state *crunch_owner;
void crunch_task_release(struct thread_info *thread)
{
local_irq_disable();
if (crunch_owner == &thread->crunchstate)
crunch_owner = NULL;
local_irq_enable();
}
static int crunch_enabled(u32 devcfg)
{
return !!(devcfg & EP93XX_SYSCON_DEVCFG_CPENA);
}
static int crunch_do(struct notifier_block *self, unsigned long cmd, void *t)
{
struct thread_info *thread = (struct thread_info *)t;
struct crunch_state *crunch_state;
u32 devcfg;
crunch_state = &thread->crunchstate;
switch (cmd) {
case THREAD_NOTIFY_FLUSH:
memset(crunch_state, 0, sizeof(*crunch_state));
/*
* FALLTHROUGH: Ensure we don't try to overwrite our newly
* initialised state information on the first fault.
*/
case THREAD_NOTIFY_EXIT:
crunch_task_release(thread);
break;
case THREAD_NOTIFY_SWITCH:
devcfg = __raw_readl(EP93XX_SYSCON_DEVCFG);
if (crunch_enabled(devcfg) || crunch_owner == crunch_state) {
/*
* We don't use ep93xx_syscon_swlocked_write() here
* because we are on the context switch path and
* preemption is already disabled.
*/
devcfg ^= EP93XX_SYSCON_DEVCFG_CPENA;
__raw_writel(0xaa, EP93XX_SYSCON_SWLOCK);
__raw_writel(devcfg, EP93XX_SYSCON_DEVCFG);
}
break;
}
return NOTIFY_DONE;
}
static struct notifier_block crunch_notifier_block = {
.notifier_call = crunch_do,
};
static int __init crunch_init(void)
{
thread_register_notifier(&crunch_notifier_block);
elf_hwcap |= HWCAP_CRUNCH;
return 0;
}
late_initcall(crunch_init);
| gpl-2.0 |
12019/old_samsung-lt02wifi-kernel | drivers/watchdog/iTCO_vendor_support.c | 4799 | 11246 | /*
* intel TCO vendor specific watchdog driver support
*
* (c) Copyright 2006-2009 Wim Van Sebroeck <wim@iguana.be>.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Neither Wim Van Sebroeck nor Iguana vzw. admit liability nor
* provide warranty for any of this software. This material is
* provided "AS-IS" and at no charge.
*/
/*
* Includes, defines, variables, module parameters, ...
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
/* Module and version information */
#define DRV_NAME "iTCO_vendor_support"
#define DRV_VERSION "1.04"
/* Includes */
#include <linux/module.h> /* For module specific items */
#include <linux/moduleparam.h> /* For new moduleparam's */
#include <linux/types.h> /* For standard types (like size_t) */
#include <linux/errno.h> /* For the -ENODEV/... values */
#include <linux/kernel.h> /* For printk/panic/... */
#include <linux/init.h> /* For __init/__exit/... */
#include <linux/ioport.h> /* For io-port access */
#include <linux/io.h> /* For inb/outb/... */
#include "iTCO_vendor.h"
/* iTCO defines */
#define SMI_EN (acpibase + 0x30) /* SMI Control and Enable Register */
#define TCOBASE (acpibase + 0x60) /* TCO base address */
#define TCO1_STS (TCOBASE + 0x04) /* TCO1 Status Register */
/* List of vendor support modes */
/* SuperMicro Pentium 3 Era 370SSE+-OEM1/P3TSSE */
#define SUPERMICRO_OLD_BOARD 1
/* SuperMicro Pentium 4 / Xeon 4 / EMT64T Era Systems */
#define SUPERMICRO_NEW_BOARD 2
/* Broken BIOS */
#define BROKEN_BIOS 911
static int vendorsupport;
module_param(vendorsupport, int, 0);
MODULE_PARM_DESC(vendorsupport, "iTCO vendor specific support mode, default="
"0 (none), 1=SuperMicro Pent3, 2=SuperMicro Pent4+, "
"911=Broken SMI BIOS");
/*
* Vendor Specific Support
*/
/*
* Vendor Support: 1
* Board: Super Micro Computer Inc. 370SSE+-OEM1/P3TSSE
* iTCO chipset: ICH2
*
* Code contributed by: R. Seretny <lkpatches@paypc.com>
* Documentation obtained by R. Seretny from SuperMicro Technical Support
*
* To enable Watchdog function:
* BIOS setup -> Power -> TCO Logic SMI Enable -> Within5Minutes
* This setting enables SMI to clear the watchdog expired flag.
* If BIOS or CPU fail which may cause SMI hang, then system will
* reboot. When application starts to use watchdog function,
* application has to take over the control from SMI.
*
* For P3TSSE, J36 jumper needs to be removed to enable the Watchdog
* function.
*
* Note: The system will reboot when Expire Flag is set TWICE.
* So, if the watchdog timer is 20 seconds, then the maximum hang
* time is about 40 seconds, and the minimum hang time is about
* 20.6 seconds.
*/
static void supermicro_old_pre_start(unsigned long acpibase)
{
unsigned long val32;
/* Bit 13: TCO_EN -> 0 = Disables TCO logic generating an SMI# */
val32 = inl(SMI_EN);
val32 &= 0xffffdfff; /* Turn off SMI clearing watchdog */
outl(val32, SMI_EN); /* Needed to activate watchdog */
}
static void supermicro_old_pre_stop(unsigned long acpibase)
{
unsigned long val32;
/* Bit 13: TCO_EN -> 1 = Enables the TCO logic to generate SMI# */
val32 = inl(SMI_EN);
val32 |= 0x00002000; /* Turn on SMI clearing watchdog */
outl(val32, SMI_EN); /* Needed to deactivate watchdog */
}
/*
* Vendor Support: 2
* Board: Super Micro Computer Inc. P4SBx, P4DPx
* iTCO chipset: ICH4
*
* Code contributed by: R. Seretny <lkpatches@paypc.com>
* Documentation obtained by R. Seretny from SuperMicro Technical Support
*
* To enable Watchdog function:
* 1. BIOS
* For P4SBx:
* BIOS setup -> Advanced -> Integrated Peripherals -> Watch Dog Feature
* For P4DPx:
* BIOS setup -> Advanced -> I/O Device Configuration -> Watch Dog
* This setting enables or disables Watchdog function. When enabled, the
* default watchdog timer is set to be 5 minutes (about 4m35s). It is
* enough to load and run the OS. The application (service or driver) has
* to take over the control once OS is running up and before watchdog
* expires.
*
* 2. JUMPER
* For P4SBx: JP39
* For P4DPx: JP37
* This jumper is used for safety. Closed is enabled. This jumper
* prevents user enables watchdog in BIOS by accident.
*
* To enable Watch Dog function, both BIOS and JUMPER must be enabled.
*
* The documentation lists motherboards P4SBx and P4DPx series as of
* 20-March-2002. However, this code works flawlessly with much newer
* motherboards, such as my X6DHR-8G2 (SuperServer 6014H-82).
*
* The original iTCO driver as written does not actually reset the
* watchdog timer on these machines, as a result they reboot after five
* minutes.
*
* NOTE: You may leave the Watchdog function disabled in the SuperMicro
* BIOS to avoid a "boot-race"... This driver will enable watchdog
* functionality even if it's disabled in the BIOS once the /dev/watchdog
* file is opened.
*/
/* I/O Port's */
#define SM_REGINDEX 0x2e /* SuperMicro ICH4+ Register Index */
#define SM_DATAIO 0x2f /* SuperMicro ICH4+ Register Data I/O */
/* Control Register's */
#define SM_CTLPAGESW 0x07 /* SuperMicro ICH4+ Control Page Switch */
#define SM_CTLPAGE 0x08 /* SuperMicro ICH4+ Control Page Num */
#define SM_WATCHENABLE 0x30 /* Watchdog enable: Bit 0: 0=off, 1=on */
#define SM_WATCHPAGE 0x87 /* Watchdog unlock control page */
#define SM_ENDWATCH 0xAA /* Watchdog lock control page */
#define SM_COUNTMODE 0xf5 /* Watchdog count mode select */
/* (Bit 3: 0 = seconds, 1 = minutes */
#define SM_WATCHTIMER 0xf6 /* 8-bits, Watchdog timer counter (RW) */
#define SM_RESETCONTROL 0xf7 /* Watchdog reset control */
/* Bit 6: timer is reset by kbd interrupt */
/* Bit 7: timer is reset by mouse interrupt */
static void supermicro_new_unlock_watchdog(void)
{
/* Write 0x87 to port 0x2e twice */
outb(SM_WATCHPAGE, SM_REGINDEX);
outb(SM_WATCHPAGE, SM_REGINDEX);
/* Switch to watchdog control page */
outb(SM_CTLPAGESW, SM_REGINDEX);
outb(SM_CTLPAGE, SM_DATAIO);
}
static void supermicro_new_lock_watchdog(void)
{
outb(SM_ENDWATCH, SM_REGINDEX);
}
static void supermicro_new_pre_start(unsigned int heartbeat)
{
unsigned int val;
supermicro_new_unlock_watchdog();
/* Watchdog timer setting needs to be in seconds*/
outb(SM_COUNTMODE, SM_REGINDEX);
val = inb(SM_DATAIO);
val &= 0xF7;
outb(val, SM_DATAIO);
/* Write heartbeat interval to WDOG */
outb(SM_WATCHTIMER, SM_REGINDEX);
outb((heartbeat & 255), SM_DATAIO);
/* Make sure keyboard/mouse interrupts don't interfere */
outb(SM_RESETCONTROL, SM_REGINDEX);
val = inb(SM_DATAIO);
val &= 0x3f;
outb(val, SM_DATAIO);
/* enable watchdog by setting bit 0 of Watchdog Enable to 1 */
outb(SM_WATCHENABLE, SM_REGINDEX);
val = inb(SM_DATAIO);
val |= 0x01;
outb(val, SM_DATAIO);
supermicro_new_lock_watchdog();
}
static void supermicro_new_pre_stop(void)
{
unsigned int val;
supermicro_new_unlock_watchdog();
/* disable watchdog by setting bit 0 of Watchdog Enable to 0 */
outb(SM_WATCHENABLE, SM_REGINDEX);
val = inb(SM_DATAIO);
val &= 0xFE;
outb(val, SM_DATAIO);
supermicro_new_lock_watchdog();
}
static void supermicro_new_pre_set_heartbeat(unsigned int heartbeat)
{
supermicro_new_unlock_watchdog();
/* reset watchdog timeout to heartveat value */
outb(SM_WATCHTIMER, SM_REGINDEX);
outb((heartbeat & 255), SM_DATAIO);
supermicro_new_lock_watchdog();
}
/*
* Vendor Support: 911
* Board: Some Intel ICHx based motherboards
* iTCO chipset: ICH7+
*
* Some Intel motherboards have a broken BIOS implementation: i.e.
* the SMI handler clear's the TIMEOUT bit in the TC01_STS register
* and does not reload the time. Thus the TCO watchdog does not reboot
* the system.
*
* These are the conclusions of Andriy Gapon <avg@icyb.net.ua> after
* debugging: the SMI handler is quite simple - it tests value in
* TCO1_CNT against 0x800, i.e. checks TCO_TMR_HLT. If the bit is set
* the handler goes into an infinite loop, apparently to allow the
* second timeout and reboot. Otherwise it simply clears TIMEOUT bit
* in TCO1_STS and that's it.
* So the logic seems to be reversed, because it is hard to see how
* TIMEOUT can get set to 1 and SMI generated when TCO_TMR_HLT is set
* (other than a transitional effect).
*
* The only fix found to get the motherboard(s) to reboot is to put
* the glb_smi_en bit to 0. This is a dirty hack that bypasses the
* broken code by disabling Global SMI.
*
* WARNING: globally disabling SMI could possibly lead to dramatic
* problems, especially on laptops! I.e. various ACPI things where
* SMI is used for communication between OS and firmware.
*
* Don't use this fix if you don't need to!!!
*/
static void broken_bios_start(unsigned long acpibase)
{
unsigned long val32;
val32 = inl(SMI_EN);
/* Bit 13: TCO_EN -> 0 = Disables TCO logic generating an SMI#
Bit 0: GBL_SMI_EN -> 0 = No SMI# will be generated by ICH. */
val32 &= 0xffffdffe;
outl(val32, SMI_EN);
}
static void broken_bios_stop(unsigned long acpibase)
{
unsigned long val32;
val32 = inl(SMI_EN);
/* Bit 13: TCO_EN -> 1 = Enables TCO logic generating an SMI#
Bit 0: GBL_SMI_EN -> 1 = Turn global SMI on again. */
val32 |= 0x00002001;
outl(val32, SMI_EN);
}
/*
* Generic Support Functions
*/
void iTCO_vendor_pre_start(unsigned long acpibase,
unsigned int heartbeat)
{
switch (vendorsupport) {
case SUPERMICRO_OLD_BOARD:
supermicro_old_pre_start(acpibase);
break;
case SUPERMICRO_NEW_BOARD:
supermicro_new_pre_start(heartbeat);
break;
case BROKEN_BIOS:
broken_bios_start(acpibase);
break;
}
}
EXPORT_SYMBOL(iTCO_vendor_pre_start);
void iTCO_vendor_pre_stop(unsigned long acpibase)
{
switch (vendorsupport) {
case SUPERMICRO_OLD_BOARD:
supermicro_old_pre_stop(acpibase);
break;
case SUPERMICRO_NEW_BOARD:
supermicro_new_pre_stop();
break;
case BROKEN_BIOS:
broken_bios_stop(acpibase);
break;
}
}
EXPORT_SYMBOL(iTCO_vendor_pre_stop);
void iTCO_vendor_pre_keepalive(unsigned long acpibase, unsigned int heartbeat)
{
if (vendorsupport == SUPERMICRO_NEW_BOARD)
supermicro_new_pre_set_heartbeat(heartbeat);
}
EXPORT_SYMBOL(iTCO_vendor_pre_keepalive);
void iTCO_vendor_pre_set_heartbeat(unsigned int heartbeat)
{
if (vendorsupport == SUPERMICRO_NEW_BOARD)
supermicro_new_pre_set_heartbeat(heartbeat);
}
EXPORT_SYMBOL(iTCO_vendor_pre_set_heartbeat);
int iTCO_vendor_check_noreboot_on(void)
{
switch (vendorsupport) {
case SUPERMICRO_OLD_BOARD:
return 0;
default:
return 1;
}
}
EXPORT_SYMBOL(iTCO_vendor_check_noreboot_on);
static int __init iTCO_vendor_init_module(void)
{
pr_info("vendor-support=%d\n", vendorsupport);
return 0;
}
static void __exit iTCO_vendor_exit_module(void)
{
pr_info("Module Unloaded\n");
}
module_init(iTCO_vendor_init_module);
module_exit(iTCO_vendor_exit_module);
MODULE_AUTHOR("Wim Van Sebroeck <wim@iguana.be>, "
"R. Seretny <lkpatches@paypc.com>");
MODULE_DESCRIPTION("Intel TCO Vendor Specific WatchDog Timer Driver Support");
MODULE_VERSION(DRV_VERSION);
MODULE_LICENSE("GPL");
| gpl-2.0 |
Thecrazyskull/kernel_msm | drivers/macintosh/windfarm_smu_controls.c | 4799 | 8267 | /*
* Windfarm PowerMac thermal control. SMU based controls
*
* (c) Copyright 2005 Benjamin Herrenschmidt, IBM Corp.
* <benh@kernel.crashing.org>
*
* Released under the term of the GNU GPL v2.
*/
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/wait.h>
#include <linux/completion.h>
#include <asm/prom.h>
#include <asm/machdep.h>
#include <asm/io.h>
#include <asm/sections.h>
#include <asm/smu.h>
#include "windfarm.h"
#define VERSION "0.4"
#undef DEBUG
#ifdef DEBUG
#define DBG(args...) printk(args)
#else
#define DBG(args...) do { } while(0)
#endif
static int smu_supports_new_fans_ops = 1;
/*
* SMU fans control object
*/
static LIST_HEAD(smu_fans);
struct smu_fan_control {
struct list_head link;
int fan_type; /* 0 = rpm, 1 = pwm */
u32 reg; /* index in SMU */
s32 value; /* current value */
s32 min, max; /* min/max values */
struct wf_control ctrl;
};
#define to_smu_fan(c) container_of(c, struct smu_fan_control, ctrl)
static int smu_set_fan(int pwm, u8 id, u16 value)
{
struct smu_cmd cmd;
u8 buffer[16];
DECLARE_COMPLETION_ONSTACK(comp);
int rc;
/* Fill SMU command structure */
cmd.cmd = SMU_CMD_FAN_COMMAND;
/* The SMU has an "old" and a "new" way of setting the fan speed
* Unfortunately, I found no reliable way to know which one works
* on a given machine model. After some investigations it appears
* that MacOS X just tries the new one, and if it fails fallbacks
* to the old ones ... Ugh.
*/
retry:
if (smu_supports_new_fans_ops) {
buffer[0] = 0x30;
buffer[1] = id;
*((u16 *)(&buffer[2])) = value;
cmd.data_len = 4;
} else {
if (id > 7)
return -EINVAL;
/* Fill argument buffer */
memset(buffer, 0, 16);
buffer[0] = pwm ? 0x10 : 0x00;
buffer[1] = 0x01 << id;
*((u16 *)&buffer[2 + id * 2]) = value;
cmd.data_len = 14;
}
cmd.reply_len = 16;
cmd.data_buf = cmd.reply_buf = buffer;
cmd.status = 0;
cmd.done = smu_done_complete;
cmd.misc = ∁
rc = smu_queue_cmd(&cmd);
if (rc)
return rc;
wait_for_completion(&comp);
/* Handle fallback (see coment above) */
if (cmd.status != 0 && smu_supports_new_fans_ops) {
printk(KERN_WARNING "windfarm: SMU failed new fan command "
"falling back to old method\n");
smu_supports_new_fans_ops = 0;
goto retry;
}
return cmd.status;
}
static void smu_fan_release(struct wf_control *ct)
{
struct smu_fan_control *fct = to_smu_fan(ct);
kfree(fct);
}
static int smu_fan_set(struct wf_control *ct, s32 value)
{
struct smu_fan_control *fct = to_smu_fan(ct);
if (value < fct->min)
value = fct->min;
if (value > fct->max)
value = fct->max;
fct->value = value;
return smu_set_fan(fct->fan_type, fct->reg, value);
}
static int smu_fan_get(struct wf_control *ct, s32 *value)
{
struct smu_fan_control *fct = to_smu_fan(ct);
*value = fct->value; /* todo: read from SMU */
return 0;
}
static s32 smu_fan_min(struct wf_control *ct)
{
struct smu_fan_control *fct = to_smu_fan(ct);
return fct->min;
}
static s32 smu_fan_max(struct wf_control *ct)
{
struct smu_fan_control *fct = to_smu_fan(ct);
return fct->max;
}
static struct wf_control_ops smu_fan_ops = {
.set_value = smu_fan_set,
.get_value = smu_fan_get,
.get_min = smu_fan_min,
.get_max = smu_fan_max,
.release = smu_fan_release,
.owner = THIS_MODULE,
};
static struct smu_fan_control *smu_fan_create(struct device_node *node,
int pwm_fan)
{
struct smu_fan_control *fct;
const s32 *v;
const u32 *reg;
const char *l;
fct = kmalloc(sizeof(struct smu_fan_control), GFP_KERNEL);
if (fct == NULL)
return NULL;
fct->ctrl.ops = &smu_fan_ops;
l = of_get_property(node, "location", NULL);
if (l == NULL)
goto fail;
fct->fan_type = pwm_fan;
fct->ctrl.type = pwm_fan ? WF_CONTROL_PWM_FAN : WF_CONTROL_RPM_FAN;
sysfs_attr_init(&fct->ctrl.attr.attr);
/* We use the name & location here the same way we do for SMU sensors,
* see the comment in windfarm_smu_sensors.c. The locations are a bit
* less consistent here between the iMac and the desktop models, but
* that is good enough for our needs for now at least.
*
* One problem though is that Apple seem to be inconsistent with case
* and the kernel doesn't have strcasecmp =P
*/
fct->ctrl.name = NULL;
/* Names used on desktop models */
if (!strcmp(l, "Rear Fan 0") || !strcmp(l, "Rear Fan") ||
!strcmp(l, "Rear fan 0") || !strcmp(l, "Rear fan") ||
!strcmp(l, "CPU A EXHAUST"))
fct->ctrl.name = "cpu-rear-fan-0";
else if (!strcmp(l, "Rear Fan 1") || !strcmp(l, "Rear fan 1") ||
!strcmp(l, "CPU B EXHAUST"))
fct->ctrl.name = "cpu-rear-fan-1";
else if (!strcmp(l, "Front Fan 0") || !strcmp(l, "Front Fan") ||
!strcmp(l, "Front fan 0") || !strcmp(l, "Front fan") ||
!strcmp(l, "CPU A INTAKE"))
fct->ctrl.name = "cpu-front-fan-0";
else if (!strcmp(l, "Front Fan 1") || !strcmp(l, "Front fan 1") ||
!strcmp(l, "CPU B INTAKE"))
fct->ctrl.name = "cpu-front-fan-1";
else if (!strcmp(l, "CPU A PUMP"))
fct->ctrl.name = "cpu-pump-0";
else if (!strcmp(l, "CPU B PUMP"))
fct->ctrl.name = "cpu-pump-1";
else if (!strcmp(l, "Slots Fan") || !strcmp(l, "Slots fan") ||
!strcmp(l, "EXPANSION SLOTS INTAKE"))
fct->ctrl.name = "slots-fan";
else if (!strcmp(l, "Drive Bay") || !strcmp(l, "Drive bay") ||
!strcmp(l, "DRIVE BAY A INTAKE"))
fct->ctrl.name = "drive-bay-fan";
else if (!strcmp(l, "BACKSIDE"))
fct->ctrl.name = "backside-fan";
/* Names used on iMac models */
if (!strcmp(l, "System Fan") || !strcmp(l, "System fan"))
fct->ctrl.name = "system-fan";
else if (!strcmp(l, "CPU Fan") || !strcmp(l, "CPU fan"))
fct->ctrl.name = "cpu-fan";
else if (!strcmp(l, "Hard Drive") || !strcmp(l, "Hard drive"))
fct->ctrl.name = "drive-bay-fan";
else if (!strcmp(l, "HDD Fan")) /* seen on iMac G5 iSight */
fct->ctrl.name = "hard-drive-fan";
else if (!strcmp(l, "ODD Fan")) /* same */
fct->ctrl.name = "optical-drive-fan";
/* Unrecognized fan, bail out */
if (fct->ctrl.name == NULL)
goto fail;
/* Get min & max values*/
v = of_get_property(node, "min-value", NULL);
if (v == NULL)
goto fail;
fct->min = *v;
v = of_get_property(node, "max-value", NULL);
if (v == NULL)
goto fail;
fct->max = *v;
/* Get "reg" value */
reg = of_get_property(node, "reg", NULL);
if (reg == NULL)
goto fail;
fct->reg = *reg;
if (wf_register_control(&fct->ctrl))
goto fail;
return fct;
fail:
kfree(fct);
return NULL;
}
static int __init smu_controls_init(void)
{
struct device_node *smu, *fans, *fan;
if (!smu_present())
return -ENODEV;
smu = of_find_node_by_type(NULL, "smu");
if (smu == NULL)
return -ENODEV;
/* Look for RPM fans */
for (fans = NULL; (fans = of_get_next_child(smu, fans)) != NULL;)
if (!strcmp(fans->name, "rpm-fans") ||
of_device_is_compatible(fans, "smu-rpm-fans"))
break;
for (fan = NULL;
fans && (fan = of_get_next_child(fans, fan)) != NULL;) {
struct smu_fan_control *fct;
fct = smu_fan_create(fan, 0);
if (fct == NULL) {
printk(KERN_WARNING "windfarm: Failed to create SMU "
"RPM fan %s\n", fan->name);
continue;
}
list_add(&fct->link, &smu_fans);
}
of_node_put(fans);
/* Look for PWM fans */
for (fans = NULL; (fans = of_get_next_child(smu, fans)) != NULL;)
if (!strcmp(fans->name, "pwm-fans"))
break;
for (fan = NULL;
fans && (fan = of_get_next_child(fans, fan)) != NULL;) {
struct smu_fan_control *fct;
fct = smu_fan_create(fan, 1);
if (fct == NULL) {
printk(KERN_WARNING "windfarm: Failed to create SMU "
"PWM fan %s\n", fan->name);
continue;
}
list_add(&fct->link, &smu_fans);
}
of_node_put(fans);
of_node_put(smu);
return 0;
}
static void __exit smu_controls_exit(void)
{
struct smu_fan_control *fct;
while (!list_empty(&smu_fans)) {
fct = list_entry(smu_fans.next, struct smu_fan_control, link);
list_del(&fct->link);
wf_unregister_control(&fct->ctrl);
}
}
module_init(smu_controls_init);
module_exit(smu_controls_exit);
MODULE_AUTHOR("Benjamin Herrenschmidt <benh@kernel.crashing.org>");
MODULE_DESCRIPTION("SMU control objects for PowerMacs thermal control");
MODULE_LICENSE("GPL");
| gpl-2.0 |
shepherd44/kernel_3.14.4 | sound/pci/emu10k1/timer.c | 4799 | 2750 | /*
* Copyright (c) by Lee Revell <rlrevell@joe-job.com>
* Clemens Ladisch <clemens@ladisch.de>
* Routines for control of EMU10K1 chips
*
* BUGS:
* --
*
* TODO:
* --
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/time.h>
#include <sound/core.h>
#include <sound/emu10k1.h>
static int snd_emu10k1_timer_start(struct snd_timer *timer)
{
struct snd_emu10k1 *emu;
unsigned long flags;
unsigned int delay;
emu = snd_timer_chip(timer);
delay = timer->sticks - 1;
if (delay < 5 ) /* minimum time is 5 ticks */
delay = 5;
spin_lock_irqsave(&emu->reg_lock, flags);
snd_emu10k1_intr_enable(emu, INTE_INTERVALTIMERENB);
outw(delay & TIMER_RATE_MASK, emu->port + TIMER);
spin_unlock_irqrestore(&emu->reg_lock, flags);
return 0;
}
static int snd_emu10k1_timer_stop(struct snd_timer *timer)
{
struct snd_emu10k1 *emu;
unsigned long flags;
emu = snd_timer_chip(timer);
spin_lock_irqsave(&emu->reg_lock, flags);
snd_emu10k1_intr_disable(emu, INTE_INTERVALTIMERENB);
spin_unlock_irqrestore(&emu->reg_lock, flags);
return 0;
}
static int snd_emu10k1_timer_precise_resolution(struct snd_timer *timer,
unsigned long *num, unsigned long *den)
{
*num = 1;
*den = 48000;
return 0;
}
static struct snd_timer_hardware snd_emu10k1_timer_hw = {
.flags = SNDRV_TIMER_HW_AUTO,
.resolution = 20833, /* 1 sample @ 48KHZ = 20.833...us */
.ticks = 1024,
.start = snd_emu10k1_timer_start,
.stop = snd_emu10k1_timer_stop,
.precise_resolution = snd_emu10k1_timer_precise_resolution,
};
int snd_emu10k1_timer(struct snd_emu10k1 *emu, int device)
{
struct snd_timer *timer = NULL;
struct snd_timer_id tid;
int err;
tid.dev_class = SNDRV_TIMER_CLASS_CARD;
tid.dev_sclass = SNDRV_TIMER_SCLASS_NONE;
tid.card = emu->card->number;
tid.device = device;
tid.subdevice = 0;
if ((err = snd_timer_new(emu->card, "EMU10K1", &tid, &timer)) >= 0) {
strcpy(timer->name, "EMU10K1 timer");
timer->private_data = emu;
timer->hw = snd_emu10k1_timer_hw;
}
emu->timer = timer;
return err;
}
| gpl-2.0 |
jfdsmabalot/kernel_lge_g2 | drivers/char/lp.c | 5055 | 26820 | /*
* Generic parallel printer driver
*
* Copyright (C) 1992 by Jim Weigand and Linus Torvalds
* Copyright (C) 1992,1993 by Michael K. Johnson
* - Thanks much to Gunter Windau for pointing out to me where the error
* checking ought to be.
* Copyright (C) 1993 by Nigel Gamble (added interrupt code)
* Copyright (C) 1994 by Alan Cox (Modularised it)
* LPCAREFUL, LPABORT, LPGETSTATUS added by Chris Metcalf, metcalf@lcs.mit.edu
* Statistics and support for slow printers by Rob Janssen, rob@knoware.nl
* "lp=" command line parameters added by Grant Guenther, grant@torque.net
* lp_read (Status readback) support added by Carsten Gross,
* carsten@sol.wohnheim.uni-ulm.de
* Support for parport by Philip Blundell <philb@gnu.org>
* Parport sharing hacking by Andrea Arcangeli
* Fixed kernel_(to/from)_user memory copy to check for errors
* by Riccardo Facchetti <fizban@tin.it>
* 22-JAN-1998 Added support for devfs Richard Gooch <rgooch@atnf.csiro.au>
* Redesigned interrupt handling for handle printers with buggy handshake
* by Andrea Arcangeli, 11 May 1998
* Full efficient handling of printer with buggy irq handshake (now I have
* understood the meaning of the strange handshake). This is done sending new
* characters if the interrupt is just happened, even if the printer say to
* be still BUSY. This is needed at least with Epson Stylus Color. To enable
* the new TRUST_IRQ mode read the `LP OPTIMIZATION' section below...
* Fixed the irq on the rising edge of the strobe case.
* Obsoleted the CAREFUL flag since a printer that doesn' t work with
* CAREFUL will block a bit after in lp_check_status().
* Andrea Arcangeli, 15 Oct 1998
* Obsoleted and removed all the lowlevel stuff implemented in the last
* month to use the IEEE1284 functions (that handle the _new_ compatibilty
* mode fine).
*/
/* This driver should, in theory, work with any parallel port that has an
* appropriate low-level driver; all I/O is done through the parport
* abstraction layer.
*
* If this driver is built into the kernel, you can configure it using the
* kernel command-line. For example:
*
* lp=parport1,none,parport2 (bind lp0 to parport1, disable lp1 and
* bind lp2 to parport2)
*
* lp=auto (assign lp devices to all ports that
* have printers attached, as determined
* by the IEEE-1284 autoprobe)
*
* lp=reset (reset the printer during
* initialisation)
*
* lp=off (disable the printer driver entirely)
*
* If the driver is loaded as a module, similar functionality is available
* using module parameters. The equivalent of the above commands would be:
*
* # insmod lp.o parport=1,none,2
*
* # insmod lp.o parport=auto
*
* # insmod lp.o reset=1
*/
/* COMPATIBILITY WITH OLD KERNELS
*
* Under Linux 2.0 and previous versions, lp devices were bound to ports at
* particular I/O addresses, as follows:
*
* lp0 0x3bc
* lp1 0x378
* lp2 0x278
*
* The new driver, by default, binds lp devices to parport devices as it
* finds them. This means that if you only have one port, it will be bound
* to lp0 regardless of its I/O address. If you need the old behaviour, you
* can force it using the parameters described above.
*/
/*
* The new interrupt handling code take care of the buggy handshake
* of some HP and Epson printer:
* ___
* ACK _______________ ___________
* |__|
* ____
* BUSY _________ _______
* |____________|
*
* I discovered this using the printer scanner that you can find at:
*
* ftp://e-mind.com/pub/linux/pscan/
*
* 11 May 98, Andrea Arcangeli
*
* My printer scanner run on an Epson Stylus Color show that such printer
* generates the irq on the _rising_ edge of the STROBE. Now lp handle
* this case fine too.
*
* 15 Oct 1998, Andrea Arcangeli
*
* The so called `buggy' handshake is really the well documented
* compatibility mode IEEE1284 handshake. They changed the well known
* Centronics handshake acking in the middle of busy expecting to not
* break drivers or legacy application, while they broken linux lp
* until I fixed it reverse engineering the protocol by hand some
* month ago...
*
* 14 Dec 1998, Andrea Arcangeli
*
* Copyright (C) 2000 by Tim Waugh (added LPSETTIMEOUT ioctl)
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/major.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/fcntl.h>
#include <linux/delay.h>
#include <linux/poll.h>
#include <linux/console.h>
#include <linux/device.h>
#include <linux/wait.h>
#include <linux/jiffies.h>
#include <linux/mutex.h>
#include <linux/compat.h>
#include <linux/parport.h>
#undef LP_STATS
#include <linux/lp.h>
#include <asm/irq.h>
#include <asm/uaccess.h>
/* if you have more than 8 printers, remember to increase LP_NO */
#define LP_NO 8
static DEFINE_MUTEX(lp_mutex);
static struct lp_struct lp_table[LP_NO];
static unsigned int lp_count = 0;
static struct class *lp_class;
#ifdef CONFIG_LP_CONSOLE
static struct parport *console_registered;
#endif /* CONFIG_LP_CONSOLE */
#undef LP_DEBUG
/* Bits used to manage claiming the parport device */
#define LP_PREEMPT_REQUEST 1
#define LP_PARPORT_CLAIMED 2
/* --- low-level port access ----------------------------------- */
#define r_dtr(x) (parport_read_data(lp_table[(x)].dev->port))
#define r_str(x) (parport_read_status(lp_table[(x)].dev->port))
#define w_ctr(x,y) do { parport_write_control(lp_table[(x)].dev->port, (y)); } while (0)
#define w_dtr(x,y) do { parport_write_data(lp_table[(x)].dev->port, (y)); } while (0)
/* Claim the parport or block trying unless we've already claimed it */
static void lp_claim_parport_or_block(struct lp_struct *this_lp)
{
if (!test_and_set_bit(LP_PARPORT_CLAIMED, &this_lp->bits)) {
parport_claim_or_block (this_lp->dev);
}
}
/* Claim the parport or block trying unless we've already claimed it */
static void lp_release_parport(struct lp_struct *this_lp)
{
if (test_and_clear_bit(LP_PARPORT_CLAIMED, &this_lp->bits)) {
parport_release (this_lp->dev);
}
}
static int lp_preempt(void *handle)
{
struct lp_struct *this_lp = (struct lp_struct *)handle;
set_bit(LP_PREEMPT_REQUEST, &this_lp->bits);
return (1);
}
/*
* Try to negotiate to a new mode; if unsuccessful negotiate to
* compatibility mode. Return the mode we ended up in.
*/
static int lp_negotiate(struct parport * port, int mode)
{
if (parport_negotiate (port, mode) != 0) {
mode = IEEE1284_MODE_COMPAT;
parport_negotiate (port, mode);
}
return (mode);
}
static int lp_reset(int minor)
{
int retval;
lp_claim_parport_or_block (&lp_table[minor]);
w_ctr(minor, LP_PSELECP);
udelay (LP_DELAY);
w_ctr(minor, LP_PSELECP | LP_PINITP);
retval = r_str(minor);
lp_release_parport (&lp_table[minor]);
return retval;
}
static void lp_error (int minor)
{
DEFINE_WAIT(wait);
int polling;
if (LP_F(minor) & LP_ABORT)
return;
polling = lp_table[minor].dev->port->irq == PARPORT_IRQ_NONE;
if (polling) lp_release_parport (&lp_table[minor]);
prepare_to_wait(&lp_table[minor].waitq, &wait, TASK_INTERRUPTIBLE);
schedule_timeout(LP_TIMEOUT_POLLED);
finish_wait(&lp_table[minor].waitq, &wait);
if (polling) lp_claim_parport_or_block (&lp_table[minor]);
else parport_yield_blocking (lp_table[minor].dev);
}
static int lp_check_status(int minor)
{
int error = 0;
unsigned int last = lp_table[minor].last_error;
unsigned char status = r_str(minor);
if ((status & LP_PERRORP) && !(LP_F(minor) & LP_CAREFUL))
/* No error. */
last = 0;
else if ((status & LP_POUTPA)) {
if (last != LP_POUTPA) {
last = LP_POUTPA;
printk(KERN_INFO "lp%d out of paper\n", minor);
}
error = -ENOSPC;
} else if (!(status & LP_PSELECD)) {
if (last != LP_PSELECD) {
last = LP_PSELECD;
printk(KERN_INFO "lp%d off-line\n", minor);
}
error = -EIO;
} else if (!(status & LP_PERRORP)) {
if (last != LP_PERRORP) {
last = LP_PERRORP;
printk(KERN_INFO "lp%d on fire\n", minor);
}
error = -EIO;
} else {
last = 0; /* Come here if LP_CAREFUL is set and no
errors are reported. */
}
lp_table[minor].last_error = last;
if (last != 0)
lp_error(minor);
return error;
}
static int lp_wait_ready(int minor, int nonblock)
{
int error = 0;
/* If we're not in compatibility mode, we're ready now! */
if (lp_table[minor].current_mode != IEEE1284_MODE_COMPAT) {
return (0);
}
do {
error = lp_check_status (minor);
if (error && (nonblock || (LP_F(minor) & LP_ABORT)))
break;
if (signal_pending (current)) {
error = -EINTR;
break;
}
} while (error);
return error;
}
static ssize_t lp_write(struct file * file, const char __user * buf,
size_t count, loff_t *ppos)
{
unsigned int minor = iminor(file->f_path.dentry->d_inode);
struct parport *port = lp_table[minor].dev->port;
char *kbuf = lp_table[minor].lp_buffer;
ssize_t retv = 0;
ssize_t written;
size_t copy_size = count;
int nonblock = ((file->f_flags & O_NONBLOCK) ||
(LP_F(minor) & LP_ABORT));
#ifdef LP_STATS
if (time_after(jiffies, lp_table[minor].lastcall + LP_TIME(minor)))
lp_table[minor].runchars = 0;
lp_table[minor].lastcall = jiffies;
#endif
/* Need to copy the data from user-space. */
if (copy_size > LP_BUFFER_SIZE)
copy_size = LP_BUFFER_SIZE;
if (mutex_lock_interruptible(&lp_table[minor].port_mutex))
return -EINTR;
if (copy_from_user (kbuf, buf, copy_size)) {
retv = -EFAULT;
goto out_unlock;
}
/* Claim Parport or sleep until it becomes available
*/
lp_claim_parport_or_block (&lp_table[minor]);
/* Go to the proper mode. */
lp_table[minor].current_mode = lp_negotiate (port,
lp_table[minor].best_mode);
parport_set_timeout (lp_table[minor].dev,
(nonblock ? PARPORT_INACTIVITY_O_NONBLOCK
: lp_table[minor].timeout));
if ((retv = lp_wait_ready (minor, nonblock)) == 0)
do {
/* Write the data. */
written = parport_write (port, kbuf, copy_size);
if (written > 0) {
copy_size -= written;
count -= written;
buf += written;
retv += written;
}
if (signal_pending (current)) {
if (retv == 0)
retv = -EINTR;
break;
}
if (copy_size > 0) {
/* incomplete write -> check error ! */
int error;
parport_negotiate (lp_table[minor].dev->port,
IEEE1284_MODE_COMPAT);
lp_table[minor].current_mode = IEEE1284_MODE_COMPAT;
error = lp_wait_ready (minor, nonblock);
if (error) {
if (retv == 0)
retv = error;
break;
} else if (nonblock) {
if (retv == 0)
retv = -EAGAIN;
break;
}
parport_yield_blocking (lp_table[minor].dev);
lp_table[minor].current_mode
= lp_negotiate (port,
lp_table[minor].best_mode);
} else if (need_resched())
schedule ();
if (count) {
copy_size = count;
if (copy_size > LP_BUFFER_SIZE)
copy_size = LP_BUFFER_SIZE;
if (copy_from_user(kbuf, buf, copy_size)) {
if (retv == 0)
retv = -EFAULT;
break;
}
}
} while (count > 0);
if (test_and_clear_bit(LP_PREEMPT_REQUEST,
&lp_table[minor].bits)) {
printk(KERN_INFO "lp%d releasing parport\n", minor);
parport_negotiate (lp_table[minor].dev->port,
IEEE1284_MODE_COMPAT);
lp_table[minor].current_mode = IEEE1284_MODE_COMPAT;
lp_release_parport (&lp_table[minor]);
}
out_unlock:
mutex_unlock(&lp_table[minor].port_mutex);
return retv;
}
#ifdef CONFIG_PARPORT_1284
/* Status readback conforming to ieee1284 */
static ssize_t lp_read(struct file * file, char __user * buf,
size_t count, loff_t *ppos)
{
DEFINE_WAIT(wait);
unsigned int minor=iminor(file->f_path.dentry->d_inode);
struct parport *port = lp_table[minor].dev->port;
ssize_t retval = 0;
char *kbuf = lp_table[minor].lp_buffer;
int nonblock = ((file->f_flags & O_NONBLOCK) ||
(LP_F(minor) & LP_ABORT));
if (count > LP_BUFFER_SIZE)
count = LP_BUFFER_SIZE;
if (mutex_lock_interruptible(&lp_table[minor].port_mutex))
return -EINTR;
lp_claim_parport_or_block (&lp_table[minor]);
parport_set_timeout (lp_table[minor].dev,
(nonblock ? PARPORT_INACTIVITY_O_NONBLOCK
: lp_table[minor].timeout));
parport_negotiate (lp_table[minor].dev->port, IEEE1284_MODE_COMPAT);
if (parport_negotiate (lp_table[minor].dev->port,
IEEE1284_MODE_NIBBLE)) {
retval = -EIO;
goto out;
}
while (retval == 0) {
retval = parport_read (port, kbuf, count);
if (retval > 0)
break;
if (nonblock) {
retval = -EAGAIN;
break;
}
/* Wait for data. */
if (lp_table[minor].dev->port->irq == PARPORT_IRQ_NONE) {
parport_negotiate (lp_table[minor].dev->port,
IEEE1284_MODE_COMPAT);
lp_error (minor);
if (parport_negotiate (lp_table[minor].dev->port,
IEEE1284_MODE_NIBBLE)) {
retval = -EIO;
goto out;
}
} else {
prepare_to_wait(&lp_table[minor].waitq, &wait, TASK_INTERRUPTIBLE);
schedule_timeout(LP_TIMEOUT_POLLED);
finish_wait(&lp_table[minor].waitq, &wait);
}
if (signal_pending (current)) {
retval = -ERESTARTSYS;
break;
}
cond_resched ();
}
parport_negotiate (lp_table[minor].dev->port, IEEE1284_MODE_COMPAT);
out:
lp_release_parport (&lp_table[minor]);
if (retval > 0 && copy_to_user (buf, kbuf, retval))
retval = -EFAULT;
mutex_unlock(&lp_table[minor].port_mutex);
return retval;
}
#endif /* IEEE 1284 support */
static int lp_open(struct inode * inode, struct file * file)
{
unsigned int minor = iminor(inode);
int ret = 0;
mutex_lock(&lp_mutex);
if (minor >= LP_NO) {
ret = -ENXIO;
goto out;
}
if ((LP_F(minor) & LP_EXIST) == 0) {
ret = -ENXIO;
goto out;
}
if (test_and_set_bit(LP_BUSY_BIT_POS, &LP_F(minor))) {
ret = -EBUSY;
goto out;
}
/* If ABORTOPEN is set and the printer is offline or out of paper,
we may still want to open it to perform ioctl()s. Therefore we
have commandeered O_NONBLOCK, even though it is being used in
a non-standard manner. This is strictly a Linux hack, and
should most likely only ever be used by the tunelp application. */
if ((LP_F(minor) & LP_ABORTOPEN) && !(file->f_flags & O_NONBLOCK)) {
int status;
lp_claim_parport_or_block (&lp_table[minor]);
status = r_str(minor);
lp_release_parport (&lp_table[minor]);
if (status & LP_POUTPA) {
printk(KERN_INFO "lp%d out of paper\n", minor);
LP_F(minor) &= ~LP_BUSY;
ret = -ENOSPC;
goto out;
} else if (!(status & LP_PSELECD)) {
printk(KERN_INFO "lp%d off-line\n", minor);
LP_F(minor) &= ~LP_BUSY;
ret = -EIO;
goto out;
} else if (!(status & LP_PERRORP)) {
printk(KERN_ERR "lp%d printer error\n", minor);
LP_F(minor) &= ~LP_BUSY;
ret = -EIO;
goto out;
}
}
lp_table[minor].lp_buffer = kmalloc(LP_BUFFER_SIZE, GFP_KERNEL);
if (!lp_table[minor].lp_buffer) {
LP_F(minor) &= ~LP_BUSY;
ret = -ENOMEM;
goto out;
}
/* Determine if the peripheral supports ECP mode */
lp_claim_parport_or_block (&lp_table[minor]);
if ( (lp_table[minor].dev->port->modes & PARPORT_MODE_ECP) &&
!parport_negotiate (lp_table[minor].dev->port,
IEEE1284_MODE_ECP)) {
printk (KERN_INFO "lp%d: ECP mode\n", minor);
lp_table[minor].best_mode = IEEE1284_MODE_ECP;
} else {
lp_table[minor].best_mode = IEEE1284_MODE_COMPAT;
}
/* Leave peripheral in compatibility mode */
parport_negotiate (lp_table[minor].dev->port, IEEE1284_MODE_COMPAT);
lp_release_parport (&lp_table[minor]);
lp_table[minor].current_mode = IEEE1284_MODE_COMPAT;
out:
mutex_unlock(&lp_mutex);
return ret;
}
static int lp_release(struct inode * inode, struct file * file)
{
unsigned int minor = iminor(inode);
lp_claim_parport_or_block (&lp_table[minor]);
parport_negotiate (lp_table[minor].dev->port, IEEE1284_MODE_COMPAT);
lp_table[minor].current_mode = IEEE1284_MODE_COMPAT;
lp_release_parport (&lp_table[minor]);
kfree(lp_table[minor].lp_buffer);
lp_table[minor].lp_buffer = NULL;
LP_F(minor) &= ~LP_BUSY;
return 0;
}
static int lp_do_ioctl(unsigned int minor, unsigned int cmd,
unsigned long arg, void __user *argp)
{
int status;
int retval = 0;
#ifdef LP_DEBUG
printk(KERN_DEBUG "lp%d ioctl, cmd: 0x%x, arg: 0x%lx\n", minor, cmd, arg);
#endif
if (minor >= LP_NO)
return -ENODEV;
if ((LP_F(minor) & LP_EXIST) == 0)
return -ENODEV;
switch ( cmd ) {
case LPTIME:
LP_TIME(minor) = arg * HZ/100;
break;
case LPCHAR:
LP_CHAR(minor) = arg;
break;
case LPABORT:
if (arg)
LP_F(minor) |= LP_ABORT;
else
LP_F(minor) &= ~LP_ABORT;
break;
case LPABORTOPEN:
if (arg)
LP_F(minor) |= LP_ABORTOPEN;
else
LP_F(minor) &= ~LP_ABORTOPEN;
break;
case LPCAREFUL:
if (arg)
LP_F(minor) |= LP_CAREFUL;
else
LP_F(minor) &= ~LP_CAREFUL;
break;
case LPWAIT:
LP_WAIT(minor) = arg;
break;
case LPSETIRQ:
return -EINVAL;
break;
case LPGETIRQ:
if (copy_to_user(argp, &LP_IRQ(minor),
sizeof(int)))
return -EFAULT;
break;
case LPGETSTATUS:
lp_claim_parport_or_block (&lp_table[minor]);
status = r_str(minor);
lp_release_parport (&lp_table[minor]);
if (copy_to_user(argp, &status, sizeof(int)))
return -EFAULT;
break;
case LPRESET:
lp_reset(minor);
break;
#ifdef LP_STATS
case LPGETSTATS:
if (copy_to_user(argp, &LP_STAT(minor),
sizeof(struct lp_stats)))
return -EFAULT;
if (capable(CAP_SYS_ADMIN))
memset(&LP_STAT(minor), 0,
sizeof(struct lp_stats));
break;
#endif
case LPGETFLAGS:
status = LP_F(minor);
if (copy_to_user(argp, &status, sizeof(int)))
return -EFAULT;
break;
default:
retval = -EINVAL;
}
return retval;
}
static int lp_set_timeout(unsigned int minor, struct timeval *par_timeout)
{
long to_jiffies;
/* Convert to jiffies, place in lp_table */
if ((par_timeout->tv_sec < 0) ||
(par_timeout->tv_usec < 0)) {
return -EINVAL;
}
to_jiffies = DIV_ROUND_UP(par_timeout->tv_usec, 1000000/HZ);
to_jiffies += par_timeout->tv_sec * (long) HZ;
if (to_jiffies <= 0) {
return -EINVAL;
}
lp_table[minor].timeout = to_jiffies;
return 0;
}
static long lp_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
unsigned int minor;
struct timeval par_timeout;
int ret;
minor = iminor(file->f_path.dentry->d_inode);
mutex_lock(&lp_mutex);
switch (cmd) {
case LPSETTIMEOUT:
if (copy_from_user(&par_timeout, (void __user *)arg,
sizeof (struct timeval))) {
ret = -EFAULT;
break;
}
ret = lp_set_timeout(minor, &par_timeout);
break;
default:
ret = lp_do_ioctl(minor, cmd, arg, (void __user *)arg);
break;
}
mutex_unlock(&lp_mutex);
return ret;
}
#ifdef CONFIG_COMPAT
static long lp_compat_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
unsigned int minor;
struct timeval par_timeout;
int ret;
minor = iminor(file->f_path.dentry->d_inode);
mutex_lock(&lp_mutex);
switch (cmd) {
case LPSETTIMEOUT:
if (compat_get_timeval(&par_timeout, compat_ptr(arg))) {
ret = -EFAULT;
break;
}
ret = lp_set_timeout(minor, &par_timeout);
break;
#ifdef LP_STATS
case LPGETSTATS:
/* FIXME: add an implementation if you set LP_STATS */
ret = -EINVAL;
break;
#endif
default:
ret = lp_do_ioctl(minor, cmd, arg, compat_ptr(arg));
break;
}
mutex_unlock(&lp_mutex);
return ret;
}
#endif
static const struct file_operations lp_fops = {
.owner = THIS_MODULE,
.write = lp_write,
.unlocked_ioctl = lp_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = lp_compat_ioctl,
#endif
.open = lp_open,
.release = lp_release,
#ifdef CONFIG_PARPORT_1284
.read = lp_read,
#endif
.llseek = noop_llseek,
};
/* --- support for console on the line printer ----------------- */
#ifdef CONFIG_LP_CONSOLE
#define CONSOLE_LP 0
/* If the printer is out of paper, we can either lose the messages or
* stall until the printer is happy again. Define CONSOLE_LP_STRICT
* non-zero to get the latter behaviour. */
#define CONSOLE_LP_STRICT 1
/* The console must be locked when we get here. */
static void lp_console_write (struct console *co, const char *s,
unsigned count)
{
struct pardevice *dev = lp_table[CONSOLE_LP].dev;
struct parport *port = dev->port;
ssize_t written;
if (parport_claim (dev))
/* Nothing we can do. */
return;
parport_set_timeout (dev, 0);
/* Go to compatibility mode. */
parport_negotiate (port, IEEE1284_MODE_COMPAT);
do {
/* Write the data, converting LF->CRLF as we go. */
ssize_t canwrite = count;
char *lf = memchr (s, '\n', count);
if (lf)
canwrite = lf - s;
if (canwrite > 0) {
written = parport_write (port, s, canwrite);
if (written <= 0)
continue;
s += written;
count -= written;
canwrite -= written;
}
if (lf && canwrite <= 0) {
const char *crlf = "\r\n";
int i = 2;
/* Dodge the original '\n', and put '\r\n' instead. */
s++;
count--;
do {
written = parport_write (port, crlf, i);
if (written > 0)
i -= written, crlf += written;
} while (i > 0 && (CONSOLE_LP_STRICT || written > 0));
}
} while (count > 0 && (CONSOLE_LP_STRICT || written > 0));
parport_release (dev);
}
static struct console lpcons = {
.name = "lp",
.write = lp_console_write,
.flags = CON_PRINTBUFFER,
};
#endif /* console on line printer */
/* --- initialisation code ------------------------------------- */
static int parport_nr[LP_NO] = { [0 ... LP_NO-1] = LP_PARPORT_UNSPEC };
static char *parport[LP_NO];
static bool reset;
module_param_array(parport, charp, NULL, 0);
module_param(reset, bool, 0);
#ifndef MODULE
static int __init lp_setup (char *str)
{
static int parport_ptr;
int x;
if (get_option(&str, &x)) {
if (x == 0) {
/* disable driver on "lp=" or "lp=0" */
parport_nr[0] = LP_PARPORT_OFF;
} else {
printk(KERN_WARNING "warning: 'lp=0x%x' is deprecated, ignored\n", x);
return 0;
}
} else if (!strncmp(str, "parport", 7)) {
int n = simple_strtoul(str+7, NULL, 10);
if (parport_ptr < LP_NO)
parport_nr[parport_ptr++] = n;
else
printk(KERN_INFO "lp: too many ports, %s ignored.\n",
str);
} else if (!strcmp(str, "auto")) {
parport_nr[0] = LP_PARPORT_AUTO;
} else if (!strcmp(str, "none")) {
parport_nr[parport_ptr++] = LP_PARPORT_NONE;
} else if (!strcmp(str, "reset")) {
reset = 1;
}
return 1;
}
#endif
static int lp_register(int nr, struct parport *port)
{
lp_table[nr].dev = parport_register_device(port, "lp",
lp_preempt, NULL, NULL, 0,
(void *) &lp_table[nr]);
if (lp_table[nr].dev == NULL)
return 1;
lp_table[nr].flags |= LP_EXIST;
if (reset)
lp_reset(nr);
device_create(lp_class, port->dev, MKDEV(LP_MAJOR, nr), NULL,
"lp%d", nr);
printk(KERN_INFO "lp%d: using %s (%s).\n", nr, port->name,
(port->irq == PARPORT_IRQ_NONE)?"polling":"interrupt-driven");
#ifdef CONFIG_LP_CONSOLE
if (!nr) {
if (port->modes & PARPORT_MODE_SAFEININT) {
register_console(&lpcons);
console_registered = port;
printk (KERN_INFO "lp%d: console ready\n", CONSOLE_LP);
} else
printk (KERN_ERR "lp%d: cannot run console on %s\n",
CONSOLE_LP, port->name);
}
#endif
return 0;
}
static void lp_attach (struct parport *port)
{
unsigned int i;
switch (parport_nr[0]) {
case LP_PARPORT_UNSPEC:
case LP_PARPORT_AUTO:
if (parport_nr[0] == LP_PARPORT_AUTO &&
port->probe_info[0].class != PARPORT_CLASS_PRINTER)
return;
if (lp_count == LP_NO) {
printk(KERN_INFO "lp: ignoring parallel port (max. %d)\n",LP_NO);
return;
}
if (!lp_register(lp_count, port))
lp_count++;
break;
default:
for (i = 0; i < LP_NO; i++) {
if (port->number == parport_nr[i]) {
if (!lp_register(i, port))
lp_count++;
break;
}
}
break;
}
}
static void lp_detach (struct parport *port)
{
/* Write this some day. */
#ifdef CONFIG_LP_CONSOLE
if (console_registered == port) {
unregister_console(&lpcons);
console_registered = NULL;
}
#endif /* CONFIG_LP_CONSOLE */
}
static struct parport_driver lp_driver = {
.name = "lp",
.attach = lp_attach,
.detach = lp_detach,
};
static int __init lp_init (void)
{
int i, err = 0;
if (parport_nr[0] == LP_PARPORT_OFF)
return 0;
for (i = 0; i < LP_NO; i++) {
lp_table[i].dev = NULL;
lp_table[i].flags = 0;
lp_table[i].chars = LP_INIT_CHAR;
lp_table[i].time = LP_INIT_TIME;
lp_table[i].wait = LP_INIT_WAIT;
lp_table[i].lp_buffer = NULL;
#ifdef LP_STATS
lp_table[i].lastcall = 0;
lp_table[i].runchars = 0;
memset (&lp_table[i].stats, 0, sizeof (struct lp_stats));
#endif
lp_table[i].last_error = 0;
init_waitqueue_head (&lp_table[i].waitq);
init_waitqueue_head (&lp_table[i].dataq);
mutex_init(&lp_table[i].port_mutex);
lp_table[i].timeout = 10 * HZ;
}
if (register_chrdev (LP_MAJOR, "lp", &lp_fops)) {
printk (KERN_ERR "lp: unable to get major %d\n", LP_MAJOR);
return -EIO;
}
lp_class = class_create(THIS_MODULE, "printer");
if (IS_ERR(lp_class)) {
err = PTR_ERR(lp_class);
goto out_reg;
}
if (parport_register_driver (&lp_driver)) {
printk (KERN_ERR "lp: unable to register with parport\n");
err = -EIO;
goto out_class;
}
if (!lp_count) {
printk (KERN_INFO "lp: driver loaded but no devices found\n");
#ifndef CONFIG_PARPORT_1284
if (parport_nr[0] == LP_PARPORT_AUTO)
printk (KERN_INFO "lp: (is IEEE 1284 support enabled?)\n");
#endif
}
return 0;
out_class:
class_destroy(lp_class);
out_reg:
unregister_chrdev(LP_MAJOR, "lp");
return err;
}
static int __init lp_init_module (void)
{
if (parport[0]) {
/* The user gave some parameters. Let's see what they were. */
if (!strncmp(parport[0], "auto", 4))
parport_nr[0] = LP_PARPORT_AUTO;
else {
int n;
for (n = 0; n < LP_NO && parport[n]; n++) {
if (!strncmp(parport[n], "none", 4))
parport_nr[n] = LP_PARPORT_NONE;
else {
char *ep;
unsigned long r = simple_strtoul(parport[n], &ep, 0);
if (ep != parport[n])
parport_nr[n] = r;
else {
printk(KERN_ERR "lp: bad port specifier `%s'\n", parport[n]);
return -ENODEV;
}
}
}
}
}
return lp_init();
}
static void lp_cleanup_module (void)
{
unsigned int offset;
parport_unregister_driver (&lp_driver);
#ifdef CONFIG_LP_CONSOLE
unregister_console (&lpcons);
#endif
unregister_chrdev(LP_MAJOR, "lp");
for (offset = 0; offset < LP_NO; offset++) {
if (lp_table[offset].dev == NULL)
continue;
parport_unregister_device(lp_table[offset].dev);
device_destroy(lp_class, MKDEV(LP_MAJOR, offset));
}
class_destroy(lp_class);
}
__setup("lp=", lp_setup);
module_init(lp_init_module);
module_exit(lp_cleanup_module);
MODULE_ALIAS_CHARDEV_MAJOR(LP_MAJOR);
MODULE_LICENSE("GPL");
| gpl-2.0 |
rickyzhang82/odroid-linux | arch/arm/mach-ux500/board-mop500-stuib.c | 7871 | 4759 | /*
* Copyright (C) ST-Ericsson SA 2010
*
* License terms: GNU General Public License (GPL), version 2
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/mfd/stmpe.h>
#include <linux/input/bu21013.h>
#include <linux/gpio.h>
#include <linux/interrupt.h>
#include <linux/i2c.h>
#include <linux/input/matrix_keypad.h>
#include <asm/mach-types.h>
#include "board-mop500.h"
/* STMPE/SKE keypad use this key layout */
static const unsigned int mop500_keymap[] = {
KEY(2, 5, KEY_END),
KEY(4, 1, KEY_POWER),
KEY(3, 5, KEY_VOLUMEDOWN),
KEY(1, 3, KEY_3),
KEY(5, 2, KEY_RIGHT),
KEY(5, 0, KEY_9),
KEY(0, 5, KEY_MENU),
KEY(7, 6, KEY_ENTER),
KEY(4, 5, KEY_0),
KEY(6, 7, KEY_2),
KEY(3, 4, KEY_UP),
KEY(3, 3, KEY_DOWN),
KEY(6, 4, KEY_SEND),
KEY(6, 2, KEY_BACK),
KEY(4, 2, KEY_VOLUMEUP),
KEY(5, 5, KEY_1),
KEY(4, 3, KEY_LEFT),
KEY(3, 2, KEY_7),
};
static const struct matrix_keymap_data mop500_keymap_data = {
.keymap = mop500_keymap,
.keymap_size = ARRAY_SIZE(mop500_keymap),
};
/*
* STMPE1601
*/
static struct stmpe_keypad_platform_data stmpe1601_keypad_data = {
.debounce_ms = 64,
.scan_count = 8,
.no_autorepeat = true,
.keymap_data = &mop500_keymap_data,
};
static struct stmpe_platform_data stmpe1601_data = {
.id = 1,
.blocks = STMPE_BLOCK_KEYPAD,
.irq_trigger = IRQF_TRIGGER_FALLING,
.irq_base = MOP500_STMPE1601_IRQ(0),
.keypad = &stmpe1601_keypad_data,
.autosleep = true,
.autosleep_timeout = 1024,
};
static struct i2c_board_info __initdata mop500_i2c0_devices_stuib[] = {
{
I2C_BOARD_INFO("stmpe1601", 0x40),
.irq = NOMADIK_GPIO_TO_IRQ(218),
.platform_data = &stmpe1601_data,
.flags = I2C_CLIENT_WAKE,
},
};
/*
* BU21013 ROHM touchscreen interface on the STUIBs
*/
/* tracks number of bu21013 devices being enabled */
static int bu21013_devices;
#define TOUCH_GPIO_PIN 84
#define TOUCH_XMAX 384
#define TOUCH_YMAX 704
#define PRCMU_CLOCK_OCR 0x1CC
#define TSC_EXT_CLOCK_9_6MHZ 0x840000
/**
* bu21013_gpio_board_init : configures the touch panel.
* @reset_pin: reset pin number
* This function can be used to configures
* the voltage and reset the touch panel controller.
*/
static int bu21013_gpio_board_init(int reset_pin)
{
int retval = 0;
bu21013_devices++;
if (bu21013_devices == 1) {
retval = gpio_request(reset_pin, "touchp_reset");
if (retval) {
printk(KERN_ERR "Unable to request gpio reset_pin");
return retval;
}
retval = gpio_direction_output(reset_pin, 1);
if (retval < 0) {
printk(KERN_ERR "%s: gpio direction failed\n",
__func__);
return retval;
}
}
return retval;
}
/**
* bu21013_gpio_board_exit : deconfigures the touch panel controller
* @reset_pin: reset pin number
* This function can be used to deconfigures the chip selection
* for touch panel controller.
*/
static int bu21013_gpio_board_exit(int reset_pin)
{
int retval = 0;
if (bu21013_devices == 1) {
retval = gpio_direction_output(reset_pin, 0);
if (retval < 0) {
printk(KERN_ERR "%s: gpio direction failed\n",
__func__);
return retval;
}
gpio_set_value(reset_pin, 0);
}
bu21013_devices--;
return retval;
}
/**
* bu21013_read_pin_val : get the interrupt pin value
* This function can be used to get the interrupt pin value for touch panel
* controller.
*/
static int bu21013_read_pin_val(void)
{
return gpio_get_value(TOUCH_GPIO_PIN);
}
static struct bu21013_platform_device tsc_plat_device = {
.cs_en = bu21013_gpio_board_init,
.cs_dis = bu21013_gpio_board_exit,
.irq_read_val = bu21013_read_pin_val,
.irq = NOMADIK_GPIO_TO_IRQ(TOUCH_GPIO_PIN),
.touch_x_max = TOUCH_XMAX,
.touch_y_max = TOUCH_YMAX,
.ext_clk = false,
.x_flip = false,
.y_flip = true,
};
static struct bu21013_platform_device tsc_plat2_device = {
.cs_en = bu21013_gpio_board_init,
.cs_dis = bu21013_gpio_board_exit,
.irq_read_val = bu21013_read_pin_val,
.irq = NOMADIK_GPIO_TO_IRQ(TOUCH_GPIO_PIN),
.touch_x_max = TOUCH_XMAX,
.touch_y_max = TOUCH_YMAX,
.ext_clk = false,
.x_flip = false,
.y_flip = true,
};
static struct i2c_board_info __initdata u8500_i2c3_devices_stuib[] = {
{
I2C_BOARD_INFO("bu21013_tp", 0x5C),
.platform_data = &tsc_plat_device,
},
{
I2C_BOARD_INFO("bu21013_tp", 0x5D),
.platform_data = &tsc_plat2_device,
},
};
void __init mop500_stuib_init(void)
{
if (machine_is_hrefv60()) {
tsc_plat_device.cs_pin = HREFV60_TOUCH_RST_GPIO;
tsc_plat2_device.cs_pin = HREFV60_TOUCH_RST_GPIO;
} else {
tsc_plat_device.cs_pin = GPIO_BU21013_CS;
tsc_plat2_device.cs_pin = GPIO_BU21013_CS;
}
mop500_uib_i2c_add(0, mop500_i2c0_devices_stuib,
ARRAY_SIZE(mop500_i2c0_devices_stuib));
mop500_uib_i2c_add(3, u8500_i2c3_devices_stuib,
ARRAY_SIZE(u8500_i2c3_devices_stuib));
}
| gpl-2.0 |
glewarne/LG_G2-D802_StockMOD_Kernel | net/netfilter/nf_conntrack_snmp.c | 8383 | 2225 | /*
* SNMP service broadcast connection tracking helper
*
* (c) 2011 Jiri Olsa <jolsa@redhat.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/in.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_helper.h>
#include <net/netfilter/nf_conntrack_expect.h>
#define SNMP_PORT 161
MODULE_AUTHOR("Jiri Olsa <jolsa@redhat.com>");
MODULE_DESCRIPTION("SNMP service broadcast connection tracking helper");
MODULE_LICENSE("GPL");
MODULE_ALIAS_NFCT_HELPER("snmp");
static unsigned int timeout __read_mostly = 30;
module_param(timeout, uint, S_IRUSR);
MODULE_PARM_DESC(timeout, "timeout for master connection/replies in seconds");
int (*nf_nat_snmp_hook)(struct sk_buff *skb,
unsigned int protoff,
struct nf_conn *ct,
enum ip_conntrack_info ctinfo);
EXPORT_SYMBOL_GPL(nf_nat_snmp_hook);
static int snmp_conntrack_help(struct sk_buff *skb, unsigned int protoff,
struct nf_conn *ct, enum ip_conntrack_info ctinfo)
{
typeof(nf_nat_snmp_hook) nf_nat_snmp;
nf_conntrack_broadcast_help(skb, protoff, ct, ctinfo, timeout);
nf_nat_snmp = rcu_dereference(nf_nat_snmp_hook);
if (nf_nat_snmp && ct->status & IPS_NAT_MASK)
return nf_nat_snmp(skb, protoff, ct, ctinfo);
return NF_ACCEPT;
}
static struct nf_conntrack_expect_policy exp_policy = {
.max_expected = 1,
};
static struct nf_conntrack_helper helper __read_mostly = {
.name = "snmp",
.tuple.src.l3num = NFPROTO_IPV4,
.tuple.src.u.udp.port = cpu_to_be16(SNMP_PORT),
.tuple.dst.protonum = IPPROTO_UDP,
.me = THIS_MODULE,
.help = snmp_conntrack_help,
.expect_policy = &exp_policy,
};
static int __init nf_conntrack_snmp_init(void)
{
exp_policy.timeout = timeout;
return nf_conntrack_helper_register(&helper);
}
static void __exit nf_conntrack_snmp_fini(void)
{
nf_conntrack_helper_unregister(&helper);
}
module_init(nf_conntrack_snmp_init);
module_exit(nf_conntrack_snmp_fini);
| gpl-2.0 |
e9wifi-dev/android_kernel_lge_e9wifi | arch/cris/kernel/crisksyms.c | 9407 | 1679 | #include <linux/module.h>
#include <linux/user.h>
#include <linux/elfcore.h>
#include <linux/sched.h>
#include <linux/in6.h>
#include <linux/interrupt.h>
#include <linux/pm.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/tty.h>
#include <asm/processor.h>
#include <asm/uaccess.h>
#include <asm/checksum.h>
#include <asm/io.h>
#include <asm/delay.h>
#include <asm/irq.h>
#include <asm/pgtable.h>
#include <asm/fasttimer.h>
extern unsigned long get_cmos_time(void);
extern void __Udiv(void);
extern void __Umod(void);
extern void __Div(void);
extern void __Mod(void);
extern void __ashldi3(void);
extern void __ashrdi3(void);
extern void __lshrdi3(void);
extern void __negdi2(void);
extern void iounmap(volatile void * __iomem);
/* Platform dependent support */
EXPORT_SYMBOL(kernel_thread);
EXPORT_SYMBOL(get_cmos_time);
EXPORT_SYMBOL(loops_per_usec);
/* Math functions */
EXPORT_SYMBOL(__Udiv);
EXPORT_SYMBOL(__Umod);
EXPORT_SYMBOL(__Div);
EXPORT_SYMBOL(__Mod);
EXPORT_SYMBOL(__ashldi3);
EXPORT_SYMBOL(__ashrdi3);
EXPORT_SYMBOL(__lshrdi3);
EXPORT_SYMBOL(__negdi2);
/* Memory functions */
EXPORT_SYMBOL(__ioremap);
EXPORT_SYMBOL(iounmap);
/* Userspace access functions */
EXPORT_SYMBOL(__copy_user_zeroing);
EXPORT_SYMBOL(__copy_user);
#undef memcpy
#undef memset
extern void * memset(void *, int, __kernel_size_t);
extern void * memcpy(void *, const void *, __kernel_size_t);
EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(memset);
#ifdef CONFIG_ETRAX_FAST_TIMER
/* Fast timer functions */
EXPORT_SYMBOL(fast_timer_list);
EXPORT_SYMBOL(start_one_shot_timer);
EXPORT_SYMBOL(del_fast_timer);
EXPORT_SYMBOL(schedule_usleep);
#endif
EXPORT_SYMBOL(csum_partial);
| gpl-2.0 |
spacecaker/android_kernel_acer_swing_cm10.1_MDP4 | drivers/infiniband/hw/ehca/ehca_mcast.c | 13503 | 4589 | /*
* IBM eServer eHCA Infiniband device driver for Linux on POWER
*
* mcast functions
*
* Authors: Khadija Souissi <souissik@de.ibm.com>
* Waleri Fomin <fomin@de.ibm.com>
* Reinhard Ernst <rernst@de.ibm.com>
* Hoang-Nam Nguyen <hnguyen@de.ibm.com>
* Heiko J Schick <schickhj@de.ibm.com>
*
* Copyright (c) 2005 IBM Corporation
*
* All rights reserved.
*
* This source code is distributed under a dual license of GPL v2.0 and OpenIB
* BSD.
*
* OpenIB BSD License
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials
* provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/module.h>
#include <linux/err.h>
#include "ehca_classes.h"
#include "ehca_tools.h"
#include "ehca_qes.h"
#include "ehca_iverbs.h"
#include "hcp_if.h"
#define MAX_MC_LID 0xFFFE
#define MIN_MC_LID 0xC000 /* Multicast limits */
#define EHCA_VALID_MULTICAST_GID(gid) ((gid)[0] == 0xFF)
#define EHCA_VALID_MULTICAST_LID(lid) \
(((lid) >= MIN_MC_LID) && ((lid) <= MAX_MC_LID))
int ehca_attach_mcast(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
{
struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp);
struct ehca_shca *shca = container_of(ibqp->device, struct ehca_shca,
ib_device);
union ib_gid my_gid;
u64 subnet_prefix, interface_id, h_ret;
if (ibqp->qp_type != IB_QPT_UD) {
ehca_err(ibqp->device, "invalid qp_type=%x", ibqp->qp_type);
return -EINVAL;
}
if (!(EHCA_VALID_MULTICAST_GID(gid->raw))) {
ehca_err(ibqp->device, "invalid mulitcast gid");
return -EINVAL;
} else if ((lid < MIN_MC_LID) || (lid > MAX_MC_LID)) {
ehca_err(ibqp->device, "invalid mulitcast lid=%x", lid);
return -EINVAL;
}
memcpy(&my_gid.raw, gid->raw, sizeof(union ib_gid));
subnet_prefix = be64_to_cpu(my_gid.global.subnet_prefix);
interface_id = be64_to_cpu(my_gid.global.interface_id);
h_ret = hipz_h_attach_mcqp(shca->ipz_hca_handle,
my_qp->ipz_qp_handle,
my_qp->galpas.kernel,
lid, subnet_prefix, interface_id);
if (h_ret != H_SUCCESS)
ehca_err(ibqp->device,
"ehca_qp=%p qp_num=%x hipz_h_attach_mcqp() failed "
"h_ret=%lli", my_qp, ibqp->qp_num, h_ret);
return ehca2ib_return_code(h_ret);
}
int ehca_detach_mcast(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
{
struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp);
struct ehca_shca *shca = container_of(ibqp->pd->device,
struct ehca_shca, ib_device);
union ib_gid my_gid;
u64 subnet_prefix, interface_id, h_ret;
if (ibqp->qp_type != IB_QPT_UD) {
ehca_err(ibqp->device, "invalid qp_type %x", ibqp->qp_type);
return -EINVAL;
}
if (!(EHCA_VALID_MULTICAST_GID(gid->raw))) {
ehca_err(ibqp->device, "invalid mulitcast gid");
return -EINVAL;
} else if ((lid < MIN_MC_LID) || (lid > MAX_MC_LID)) {
ehca_err(ibqp->device, "invalid mulitcast lid=%x", lid);
return -EINVAL;
}
memcpy(&my_gid.raw, gid->raw, sizeof(union ib_gid));
subnet_prefix = be64_to_cpu(my_gid.global.subnet_prefix);
interface_id = be64_to_cpu(my_gid.global.interface_id);
h_ret = hipz_h_detach_mcqp(shca->ipz_hca_handle,
my_qp->ipz_qp_handle,
my_qp->galpas.kernel,
lid, subnet_prefix, interface_id);
if (h_ret != H_SUCCESS)
ehca_err(ibqp->device,
"ehca_qp=%p qp_num=%x hipz_h_detach_mcqp() failed "
"h_ret=%lli", my_qp, ibqp->qp_num, h_ret);
return ehca2ib_return_code(h_ret);
}
| gpl-2.0 |
aldanopolis/android_kernel_motorola_msm8226 | net/wimax/debugfs.c | 14527 | 2283 | /*
* Linux WiMAX
* Debugfs support
*
*
* Copyright (C) 2005-2006 Intel Corporation <linux-wimax@intel.com>
* Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#include <linux/debugfs.h>
#include <linux/wimax.h>
#include "wimax-internal.h"
#define D_SUBMODULE debugfs
#include "debug-levels.h"
#define __debugfs_register(prefix, name, parent) \
do { \
result = d_level_register_debugfs(prefix, name, parent); \
if (result < 0) \
goto error; \
} while (0)
int wimax_debugfs_add(struct wimax_dev *wimax_dev)
{
int result;
struct net_device *net_dev = wimax_dev->net_dev;
struct device *dev = net_dev->dev.parent;
struct dentry *dentry;
char buf[128];
snprintf(buf, sizeof(buf), "wimax:%s", net_dev->name);
dentry = debugfs_create_dir(buf, NULL);
result = PTR_ERR(dentry);
if (IS_ERR(dentry)) {
if (result == -ENODEV)
result = 0; /* No debugfs support */
else
dev_err(dev, "Can't create debugfs dentry: %d\n",
result);
goto out;
}
wimax_dev->debugfs_dentry = dentry;
__debugfs_register("wimax_dl_", debugfs, dentry);
__debugfs_register("wimax_dl_", id_table, dentry);
__debugfs_register("wimax_dl_", op_msg, dentry);
__debugfs_register("wimax_dl_", op_reset, dentry);
__debugfs_register("wimax_dl_", op_rfkill, dentry);
__debugfs_register("wimax_dl_", op_state_get, dentry);
__debugfs_register("wimax_dl_", stack, dentry);
result = 0;
out:
return result;
error:
debugfs_remove_recursive(wimax_dev->debugfs_dentry);
return result;
}
void wimax_debugfs_rm(struct wimax_dev *wimax_dev)
{
debugfs_remove_recursive(wimax_dev->debugfs_dentry);
}
| gpl-2.0 |
Raylingogogo/ij3c-openwrt | target/linux/generic/files/drivers/net/phy/rtl8367.c | 448 | 59026 | /*
* Platform driver for the Realtek RTL8367R/M ethernet switches
*
* Copyright (C) 2011 Gabor Juhos <juhosg@openwrt.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/device.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/delay.h>
#include <linux/skbuff.h>
#include <linux/rtl8367.h>
#include "rtl8366_smi.h"
#define RTL8367_RESET_DELAY 1000 /* msecs*/
#define RTL8367_PHY_ADDR_MAX 8
#define RTL8367_PHY_REG_MAX 31
#define RTL8367_VID_MASK 0xffff
#define RTL8367_FID_MASK 0xfff
#define RTL8367_UNTAG_MASK 0xffff
#define RTL8367_MEMBER_MASK 0xffff
#define RTL8367_PORT_CFG_REG(_p) (0x000e + 0x20 * (_p))
#define RTL8367_PORT_CFG_EGRESS_MODE_SHIFT 4
#define RTL8367_PORT_CFG_EGRESS_MODE_MASK 0x3
#define RTL8367_PORT_CFG_EGRESS_MODE_ORIGINAL 0
#define RTL8367_PORT_CFG_EGRESS_MODE_KEEP 1
#define RTL8367_PORT_CFG_EGRESS_MODE_PRI 2
#define RTL8367_PORT_CFG_EGRESS_MODE_REAL 3
#define RTL8367_BYPASS_LINE_RATE_REG 0x03f7
#define RTL8367_TA_CTRL_REG 0x0500
#define RTL8367_TA_CTRL_STATUS BIT(12)
#define RTL8367_TA_CTRL_METHOD BIT(5)
#define RTL8367_TA_CTRL_CMD_SHIFT 4
#define RTL8367_TA_CTRL_CMD_READ 0
#define RTL8367_TA_CTRL_CMD_WRITE 1
#define RTL8367_TA_CTRL_TABLE_SHIFT 0
#define RTL8367_TA_CTRL_TABLE_ACLRULE 1
#define RTL8367_TA_CTRL_TABLE_ACLACT 2
#define RTL8367_TA_CTRL_TABLE_CVLAN 3
#define RTL8367_TA_CTRL_TABLE_L2 4
#define RTL8367_TA_CTRL_CVLAN_READ \
((RTL8367_TA_CTRL_CMD_READ << RTL8367_TA_CTRL_CMD_SHIFT) | \
RTL8367_TA_CTRL_TABLE_CVLAN)
#define RTL8367_TA_CTRL_CVLAN_WRITE \
((RTL8367_TA_CTRL_CMD_WRITE << RTL8367_TA_CTRL_CMD_SHIFT) | \
RTL8367_TA_CTRL_TABLE_CVLAN)
#define RTL8367_TA_ADDR_REG 0x0501
#define RTL8367_TA_ADDR_MASK 0x3fff
#define RTL8367_TA_DATA_REG(_x) (0x0503 + (_x))
#define RTL8367_TA_VLAN_DATA_SIZE 4
#define RTL8367_TA_VLAN_VID_MASK RTL8367_VID_MASK
#define RTL8367_TA_VLAN_MEMBER_SHIFT 0
#define RTL8367_TA_VLAN_MEMBER_MASK RTL8367_MEMBER_MASK
#define RTL8367_TA_VLAN_FID_SHIFT 0
#define RTL8367_TA_VLAN_FID_MASK RTL8367_FID_MASK
#define RTL8367_TA_VLAN_UNTAG1_SHIFT 14
#define RTL8367_TA_VLAN_UNTAG1_MASK 0x3
#define RTL8367_TA_VLAN_UNTAG2_SHIFT 0
#define RTL8367_TA_VLAN_UNTAG2_MASK 0x3fff
#define RTL8367_VLAN_PVID_CTRL_REG(_p) (0x0700 + (_p) / 2)
#define RTL8367_VLAN_PVID_CTRL_MASK 0x1f
#define RTL8367_VLAN_PVID_CTRL_SHIFT(_p) (8 * ((_p) % 2))
#define RTL8367_VLAN_MC_BASE(_x) (0x0728 + (_x) * 4)
#define RTL8367_VLAN_MC_DATA_SIZE 4
#define RTL8367_VLAN_MC_MEMBER_SHIFT 0
#define RTL8367_VLAN_MC_MEMBER_MASK RTL8367_MEMBER_MASK
#define RTL8367_VLAN_MC_FID_SHIFT 0
#define RTL8367_VLAN_MC_FID_MASK RTL8367_FID_MASK
#define RTL8367_VLAN_MC_EVID_SHIFT 0
#define RTL8367_VLAN_MC_EVID_MASK RTL8367_VID_MASK
#define RTL8367_VLAN_CTRL_REG 0x07a8
#define RTL8367_VLAN_CTRL_ENABLE BIT(0)
#define RTL8367_VLAN_INGRESS_REG 0x07a9
#define RTL8367_PORT_ISOLATION_REG(_p) (0x08a2 + (_p))
#define RTL8367_MIB_COUNTER_REG(_x) (0x1000 + (_x))
#define RTL8367_MIB_ADDRESS_REG 0x1004
#define RTL8367_MIB_CTRL_REG(_x) (0x1005 + (_x))
#define RTL8367_MIB_CTRL_GLOBAL_RESET_MASK BIT(11)
#define RTL8367_MIB_CTRL_QM_RESET_MASK BIT(10)
#define RTL8367_MIB_CTRL_PORT_RESET_MASK(_p) BIT(2 + (_p))
#define RTL8367_MIB_CTRL_RESET_MASK BIT(1)
#define RTL8367_MIB_CTRL_BUSY_MASK BIT(0)
#define RTL8367_MIB_COUNT 36
#define RTL8367_MIB_COUNTER_PORT_OFFSET 0x0050
#define RTL8367_SWC0_REG 0x1200
#define RTL8367_SWC0_MAX_LENGTH_SHIFT 13
#define RTL8367_SWC0_MAX_LENGTH(_x) ((_x) << 13)
#define RTL8367_SWC0_MAX_LENGTH_MASK RTL8367_SWC0_MAX_LENGTH(0x3)
#define RTL8367_SWC0_MAX_LENGTH_1522 RTL8367_SWC0_MAX_LENGTH(0)
#define RTL8367_SWC0_MAX_LENGTH_1536 RTL8367_SWC0_MAX_LENGTH(1)
#define RTL8367_SWC0_MAX_LENGTH_1552 RTL8367_SWC0_MAX_LENGTH(2)
#define RTL8367_SWC0_MAX_LENGTH_16000 RTL8367_SWC0_MAX_LENGTH(3)
#define RTL8367_CHIP_NUMBER_REG 0x1300
#define RTL8367_CHIP_VER_REG 0x1301
#define RTL8367_CHIP_VER_RLVID_SHIFT 12
#define RTL8367_CHIP_VER_RLVID_MASK 0xf
#define RTL8367_CHIP_VER_MCID_SHIFT 8
#define RTL8367_CHIP_VER_MCID_MASK 0xf
#define RTL8367_CHIP_VER_BOID_SHIFT 4
#define RTL8367_CHIP_VER_BOID_MASK 0xf
#define RTL8367_CHIP_MODE_REG 0x1302
#define RTL8367_CHIP_MODE_MASK 0x7
#define RTL8367_CHIP_DEBUG0_REG 0x1303
#define RTL8367_CHIP_DEBUG0_DUMMY0(_x) BIT(8 + (_x))
#define RTL8367_CHIP_DEBUG1_REG 0x1304
#define RTL8367_DIS_REG 0x1305
#define RTL8367_DIS_SKIP_MII_RXER(_x) BIT(12 + (_x))
#define RTL8367_DIS_RGMII_SHIFT(_x) (4 * (_x))
#define RTL8367_DIS_RGMII_MASK 0x7
#define RTL8367_EXT_RGMXF_REG(_x) (0x1306 + (_x))
#define RTL8367_EXT_RGMXF_DUMMY0_SHIFT 5
#define RTL8367_EXT_RGMXF_DUMMY0_MASK 0x7ff
#define RTL8367_EXT_RGMXF_TXDELAY_SHIFT 3
#define RTL8367_EXT_RGMXF_TXDELAY_MASK 1
#define RTL8367_EXT_RGMXF_RXDELAY_MASK 0x7
#define RTL8367_DI_FORCE_REG(_x) (0x1310 + (_x))
#define RTL8367_DI_FORCE_MODE BIT(12)
#define RTL8367_DI_FORCE_NWAY BIT(7)
#define RTL8367_DI_FORCE_TXPAUSE BIT(6)
#define RTL8367_DI_FORCE_RXPAUSE BIT(5)
#define RTL8367_DI_FORCE_LINK BIT(4)
#define RTL8367_DI_FORCE_DUPLEX BIT(2)
#define RTL8367_DI_FORCE_SPEED_MASK 3
#define RTL8367_DI_FORCE_SPEED_10 0
#define RTL8367_DI_FORCE_SPEED_100 1
#define RTL8367_DI_FORCE_SPEED_1000 2
#define RTL8367_MAC_FORCE_REG(_x) (0x1312 + (_x))
#define RTL8367_CHIP_RESET_REG 0x1322
#define RTL8367_CHIP_RESET_SW BIT(1)
#define RTL8367_CHIP_RESET_HW BIT(0)
#define RTL8367_PORT_STATUS_REG(_p) (0x1352 + (_p))
#define RTL8367_PORT_STATUS_NWAY BIT(7)
#define RTL8367_PORT_STATUS_TXPAUSE BIT(6)
#define RTL8367_PORT_STATUS_RXPAUSE BIT(5)
#define RTL8367_PORT_STATUS_LINK BIT(4)
#define RTL8367_PORT_STATUS_DUPLEX BIT(2)
#define RTL8367_PORT_STATUS_SPEED_MASK 0x0003
#define RTL8367_PORT_STATUS_SPEED_10 0
#define RTL8367_PORT_STATUS_SPEED_100 1
#define RTL8367_PORT_STATUS_SPEED_1000 2
#define RTL8367_RTL_NO_REG 0x13c0
#define RTL8367_RTL_NO_8367R 0x3670
#define RTL8367_RTL_NO_8367M 0x3671
#define RTL8367_RTL_VER_REG 0x13c1
#define RTL8367_RTL_VER_MASK 0xf
#define RTL8367_RTL_MAGIC_ID_REG 0x13c2
#define RTL8367_RTL_MAGIC_ID_VAL 0x0249
#define RTL8367_LED_SYS_CONFIG_REG 0x1b00
#define RTL8367_LED_MODE_REG 0x1b02
#define RTL8367_LED_MODE_RATE_M 0x7
#define RTL8367_LED_MODE_RATE_S 1
#define RTL8367_LED_CONFIG_REG 0x1b03
#define RTL8367_LED_CONFIG_DATA_S 12
#define RTL8367_LED_CONFIG_DATA_M 0x3
#define RTL8367_LED_CONFIG_SEL BIT(14)
#define RTL8367_LED_CONFIG_LED_CFG_M 0xf
#define RTL8367_PARA_LED_IO_EN1_REG 0x1b24
#define RTL8367_PARA_LED_IO_EN2_REG 0x1b25
#define RTL8367_PARA_LED_IO_EN_PMASK 0xff
#define RTL8367_IA_CTRL_REG 0x1f00
#define RTL8367_IA_CTRL_RW(_x) ((_x) << 1)
#define RTL8367_IA_CTRL_RW_READ RTL8367_IA_CTRL_RW(0)
#define RTL8367_IA_CTRL_RW_WRITE RTL8367_IA_CTRL_RW(1)
#define RTL8367_IA_CTRL_CMD_MASK BIT(0)
#define RTL8367_IA_STATUS_REG 0x1f01
#define RTL8367_IA_STATUS_PHY_BUSY BIT(2)
#define RTL8367_IA_STATUS_SDS_BUSY BIT(1)
#define RTL8367_IA_STATUS_MDX_BUSY BIT(0)
#define RTL8367_IA_ADDRESS_REG 0x1f02
#define RTL8367_IA_WRITE_DATA_REG 0x1f03
#define RTL8367_IA_READ_DATA_REG 0x1f04
#define RTL8367_INTERNAL_PHY_REG(_a, _r) (0x2000 + 32 * (_a) + (_r))
#define RTL8367_CPU_PORT_NUM 9
#define RTL8367_NUM_PORTS 10
#define RTL8367_NUM_VLANS 32
#define RTL8367_NUM_LEDGROUPS 4
#define RTL8367_NUM_VIDS 4096
#define RTL8367_PRIORITYMAX 7
#define RTL8367_FIDMAX 7
#define RTL8367_PORT_0 BIT(0)
#define RTL8367_PORT_1 BIT(1)
#define RTL8367_PORT_2 BIT(2)
#define RTL8367_PORT_3 BIT(3)
#define RTL8367_PORT_4 BIT(4)
#define RTL8367_PORT_5 BIT(5)
#define RTL8367_PORT_6 BIT(6)
#define RTL8367_PORT_7 BIT(7)
#define RTL8367_PORT_E1 BIT(8) /* external port 1 */
#define RTL8367_PORT_E0 BIT(9) /* external port 0 */
#define RTL8367_PORTS_ALL \
(RTL8367_PORT_0 | RTL8367_PORT_1 | RTL8367_PORT_2 | \
RTL8367_PORT_3 | RTL8367_PORT_4 | RTL8367_PORT_5 | \
RTL8367_PORT_6 | RTL8367_PORT_7 | RTL8367_PORT_E1 | \
RTL8367_PORT_E0)
#define RTL8367_PORTS_ALL_BUT_CPU \
(RTL8367_PORT_0 | RTL8367_PORT_1 | RTL8367_PORT_2 | \
RTL8367_PORT_3 | RTL8367_PORT_4 | RTL8367_PORT_5 | \
RTL8367_PORT_6 | RTL8367_PORT_7 | RTL8367_PORT_E1)
struct rtl8367_initval {
u16 reg;
u16 val;
};
static struct rtl8366_mib_counter rtl8367_mib_counters[] = {
{ 0, 0, 4, "IfInOctets" },
{ 0, 4, 2, "Dot3StatsFCSErrors" },
{ 0, 6, 2, "Dot3StatsSymbolErrors" },
{ 0, 8, 2, "Dot3InPauseFrames" },
{ 0, 10, 2, "Dot3ControlInUnknownOpcodes" },
{ 0, 12, 2, "EtherStatsFragments" },
{ 0, 14, 2, "EtherStatsJabbers" },
{ 0, 16, 2, "IfInUcastPkts" },
{ 0, 18, 2, "EtherStatsDropEvents" },
{ 0, 20, 4, "EtherStatsOctets" },
{ 0, 24, 2, "EtherStatsUnderSizePkts" },
{ 0, 26, 2, "EtherOversizeStats" },
{ 0, 28, 2, "EtherStatsPkts64Octets" },
{ 0, 30, 2, "EtherStatsPkts65to127Octets" },
{ 0, 32, 2, "EtherStatsPkts128to255Octets" },
{ 0, 34, 2, "EtherStatsPkts256to511Octets" },
{ 0, 36, 2, "EtherStatsPkts512to1023Octets" },
{ 0, 38, 2, "EtherStatsPkts1024to1518Octets" },
{ 0, 40, 2, "EtherStatsMulticastPkts" },
{ 0, 42, 2, "EtherStatsBroadcastPkts" },
{ 0, 44, 4, "IfOutOctets" },
{ 0, 48, 2, "Dot3StatsSingleCollisionFrames" },
{ 0, 50, 2, "Dot3StatMultipleCollisionFrames" },
{ 0, 52, 2, "Dot3sDeferredTransmissions" },
{ 0, 54, 2, "Dot3StatsLateCollisions" },
{ 0, 56, 2, "EtherStatsCollisions" },
{ 0, 58, 2, "Dot3StatsExcessiveCollisions" },
{ 0, 60, 2, "Dot3OutPauseFrames" },
{ 0, 62, 2, "Dot1dBasePortDelayExceededDiscards" },
{ 0, 64, 2, "Dot1dTpPortInDiscards" },
{ 0, 66, 2, "IfOutUcastPkts" },
{ 0, 68, 2, "IfOutMulticastPkts" },
{ 0, 70, 2, "IfOutBroadcastPkts" },
{ 0, 72, 2, "OutOampduPkts" },
{ 0, 74, 2, "InOampduPkts" },
{ 0, 76, 2, "PktgenPkts" },
};
#define REG_RD(_smi, _reg, _val) \
do { \
err = rtl8366_smi_read_reg(_smi, _reg, _val); \
if (err) \
return err; \
} while (0)
#define REG_WR(_smi, _reg, _val) \
do { \
err = rtl8366_smi_write_reg(_smi, _reg, _val); \
if (err) \
return err; \
} while (0)
#define REG_RMW(_smi, _reg, _mask, _val) \
do { \
err = rtl8366_smi_rmwr(_smi, _reg, _mask, _val); \
if (err) \
return err; \
} while (0)
static const struct rtl8367_initval rtl8367_initvals_0_0[] = {
{0x133f, 0x0030}, {0x133e, 0x000e}, {0x221f, 0x0000}, {0x2215, 0x1006},
{0x221f, 0x0005}, {0x2200, 0x00c6}, {0x221f, 0x0007}, {0x221e, 0x0048},
{0x2215, 0x6412}, {0x2216, 0x6412}, {0x2217, 0x6412}, {0x2218, 0x6412},
{0x2219, 0x6412}, {0x221A, 0x6412}, {0x221f, 0x0001}, {0x220c, 0xdbf0},
{0x2209, 0x2576}, {0x2207, 0x287E}, {0x220A, 0x68E5}, {0x221D, 0x3DA4},
{0x221C, 0xE7F7}, {0x2214, 0x7F52}, {0x2218, 0x7FCE}, {0x2208, 0x04B7},
{0x2206, 0x4072}, {0x2210, 0xF05E}, {0x221B, 0xB414}, {0x221F, 0x0003},
{0x221A, 0x06A6}, {0x2210, 0xF05E}, {0x2213, 0x06EB}, {0x2212, 0xF4D2},
{0x220E, 0xE120}, {0x2200, 0x7C00}, {0x2202, 0x5FD0}, {0x220D, 0x0207},
{0x221f, 0x0002}, {0x2205, 0x0978}, {0x2202, 0x8C01}, {0x2207, 0x3620},
{0x221C, 0x0001}, {0x2203, 0x0420}, {0x2204, 0x80C8}, {0x133e, 0x0ede},
{0x221f, 0x0002}, {0x220c, 0x0073}, {0x220d, 0xEB65}, {0x220e, 0x51d1},
{0x220f, 0x5dcb}, {0x2210, 0x3044}, {0x2211, 0x1800}, {0x2212, 0x7E00},
{0x2213, 0x0000}, {0x133f, 0x0010}, {0x133e, 0x0ffe}, {0x207f, 0x0002},
{0x2074, 0x3D22}, {0x2075, 0x2000}, {0x2076, 0x6040}, {0x2077, 0x0000},
{0x2078, 0x0f0a}, {0x2079, 0x50AB}, {0x207a, 0x0000}, {0x207b, 0x0f0f},
{0x205f, 0x0002}, {0x2054, 0xFF00}, {0x2055, 0x000A}, {0x2056, 0x000A},
{0x2057, 0x0005}, {0x2058, 0x0005}, {0x2059, 0x0000}, {0x205A, 0x0005},
{0x205B, 0x0005}, {0x205C, 0x0005}, {0x209f, 0x0002}, {0x2094, 0x00AA},
{0x2095, 0x00AA}, {0x2096, 0x00AA}, {0x2097, 0x00AA}, {0x2098, 0x0055},
{0x2099, 0x00AA}, {0x209A, 0x00AA}, {0x209B, 0x00AA}, {0x1363, 0x8354},
{0x1270, 0x3333}, {0x1271, 0x3333}, {0x1272, 0x3333}, {0x1330, 0x00DB},
{0x1203, 0xff00}, {0x1200, 0x7fc4}, {0x121d, 0x1006}, {0x121e, 0x03e8},
{0x121f, 0x02b3}, {0x1220, 0x028f}, {0x1221, 0x029b}, {0x1222, 0x0277},
{0x1223, 0x02b3}, {0x1224, 0x028f}, {0x1225, 0x029b}, {0x1226, 0x0277},
{0x1227, 0x00c0}, {0x1228, 0x00b4}, {0x122f, 0x00c0}, {0x1230, 0x00b4},
{0x1229, 0x0020}, {0x122a, 0x000c}, {0x1231, 0x0030}, {0x1232, 0x0024},
{0x0219, 0x0032}, {0x0200, 0x03e8}, {0x0201, 0x03e8}, {0x0202, 0x03e8},
{0x0203, 0x03e8}, {0x0204, 0x03e8}, {0x0205, 0x03e8}, {0x0206, 0x03e8},
{0x0207, 0x03e8}, {0x0218, 0x0032}, {0x0208, 0x029b}, {0x0209, 0x029b},
{0x020a, 0x029b}, {0x020b, 0x029b}, {0x020c, 0x029b}, {0x020d, 0x029b},
{0x020e, 0x029b}, {0x020f, 0x029b}, {0x0210, 0x029b}, {0x0211, 0x029b},
{0x0212, 0x029b}, {0x0213, 0x029b}, {0x0214, 0x029b}, {0x0215, 0x029b},
{0x0216, 0x029b}, {0x0217, 0x029b}, {0x0900, 0x0000}, {0x0901, 0x0000},
{0x0902, 0x0000}, {0x0903, 0x0000}, {0x0865, 0x3210}, {0x087b, 0x0000},
{0x087c, 0xff00}, {0x087d, 0x0000}, {0x087e, 0x0000}, {0x0801, 0x0100},
{0x0802, 0x0100}, {0x1700, 0x014C}, {0x0301, 0x00FF}, {0x12AA, 0x0096},
{0x133f, 0x0030}, {0x133e, 0x000e}, {0x221f, 0x0005}, {0x2200, 0x00C4},
{0x221f, 0x0000}, {0x2210, 0x05EF}, {0x2204, 0x05E1}, {0x2200, 0x1340},
{0x133f, 0x0010}, {0x20A0, 0x1940}, {0x20C0, 0x1940}, {0x20E0, 0x1940},
};
static const struct rtl8367_initval rtl8367_initvals_0_1[] = {
{0x133f, 0x0030}, {0x133e, 0x000e}, {0x221f, 0x0000}, {0x2215, 0x1006},
{0x221f, 0x0005}, {0x2200, 0x00c6}, {0x221f, 0x0007}, {0x221e, 0x0048},
{0x2215, 0x6412}, {0x2216, 0x6412}, {0x2217, 0x6412}, {0x2218, 0x6412},
{0x2219, 0x6412}, {0x221A, 0x6412}, {0x221f, 0x0001}, {0x220c, 0xdbf0},
{0x2209, 0x2576}, {0x2207, 0x287E}, {0x220A, 0x68E5}, {0x221D, 0x3DA4},
{0x221C, 0xE7F7}, {0x2214, 0x7F52}, {0x2218, 0x7FCE}, {0x2208, 0x04B7},
{0x2206, 0x4072}, {0x2210, 0xF05E}, {0x221B, 0xB414}, {0x221F, 0x0003},
{0x221A, 0x06A6}, {0x2210, 0xF05E}, {0x2213, 0x06EB}, {0x2212, 0xF4D2},
{0x220E, 0xE120}, {0x2200, 0x7C00}, {0x2202, 0x5FD0}, {0x220D, 0x0207},
{0x221f, 0x0002}, {0x2205, 0x0978}, {0x2202, 0x8C01}, {0x2207, 0x3620},
{0x221C, 0x0001}, {0x2203, 0x0420}, {0x2204, 0x80C8}, {0x133e, 0x0ede},
{0x221f, 0x0002}, {0x220c, 0x0073}, {0x220d, 0xEB65}, {0x220e, 0x51d1},
{0x220f, 0x5dcb}, {0x2210, 0x3044}, {0x2211, 0x1800}, {0x2212, 0x7E00},
{0x2213, 0x0000}, {0x133f, 0x0010}, {0x133e, 0x0ffe}, {0x207f, 0x0002},
{0x2074, 0x3D22}, {0x2075, 0x2000}, {0x2076, 0x6040}, {0x2077, 0x0000},
{0x2078, 0x0f0a}, {0x2079, 0x50AB}, {0x207a, 0x0000}, {0x207b, 0x0f0f},
{0x205f, 0x0002}, {0x2054, 0xFF00}, {0x2055, 0x000A}, {0x2056, 0x000A},
{0x2057, 0x0005}, {0x2058, 0x0005}, {0x2059, 0x0000}, {0x205A, 0x0005},
{0x205B, 0x0005}, {0x205C, 0x0005}, {0x209f, 0x0002}, {0x2094, 0x00AA},
{0x2095, 0x00AA}, {0x2096, 0x00AA}, {0x2097, 0x00AA}, {0x2098, 0x0055},
{0x2099, 0x00AA}, {0x209A, 0x00AA}, {0x209B, 0x00AA}, {0x1363, 0x8354},
{0x1270, 0x3333}, {0x1271, 0x3333}, {0x1272, 0x3333}, {0x1330, 0x00DB},
{0x1203, 0xff00}, {0x1200, 0x7fc4}, {0x121d, 0x1b06}, {0x121e, 0x07f0},
{0x121f, 0x0438}, {0x1220, 0x040f}, {0x1221, 0x040f}, {0x1222, 0x03eb},
{0x1223, 0x0438}, {0x1224, 0x040f}, {0x1225, 0x040f}, {0x1226, 0x03eb},
{0x1227, 0x0144}, {0x1228, 0x0138}, {0x122f, 0x0144}, {0x1230, 0x0138},
{0x1229, 0x0020}, {0x122a, 0x000c}, {0x1231, 0x0030}, {0x1232, 0x0024},
{0x0219, 0x0032}, {0x0200, 0x07d0}, {0x0201, 0x07d0}, {0x0202, 0x07d0},
{0x0203, 0x07d0}, {0x0204, 0x07d0}, {0x0205, 0x07d0}, {0x0206, 0x07d0},
{0x0207, 0x07d0}, {0x0218, 0x0032}, {0x0208, 0x0190}, {0x0209, 0x0190},
{0x020a, 0x0190}, {0x020b, 0x0190}, {0x020c, 0x0190}, {0x020d, 0x0190},
{0x020e, 0x0190}, {0x020f, 0x0190}, {0x0210, 0x0190}, {0x0211, 0x0190},
{0x0212, 0x0190}, {0x0213, 0x0190}, {0x0214, 0x0190}, {0x0215, 0x0190},
{0x0216, 0x0190}, {0x0217, 0x0190}, {0x0900, 0x0000}, {0x0901, 0x0000},
{0x0902, 0x0000}, {0x0903, 0x0000}, {0x0865, 0x3210}, {0x087b, 0x0000},
{0x087c, 0xff00}, {0x087d, 0x0000}, {0x087e, 0x0000}, {0x0801, 0x0100},
{0x0802, 0x0100}, {0x1700, 0x0125}, {0x0301, 0x00FF}, {0x12AA, 0x0096},
{0x133f, 0x0030}, {0x133e, 0x000e}, {0x221f, 0x0005}, {0x2200, 0x00C4},
{0x221f, 0x0000}, {0x2210, 0x05EF}, {0x2204, 0x05E1}, {0x2200, 0x1340},
{0x133f, 0x0010},
};
static const struct rtl8367_initval rtl8367_initvals_1_0[] = {
{0x1B24, 0x0000}, {0x1B25, 0x0000}, {0x1B26, 0x0000}, {0x1B27, 0x0000},
{0x207F, 0x0002}, {0x2079, 0x0200}, {0x207F, 0x0000}, {0x133F, 0x0030},
{0x133E, 0x000E}, {0x221F, 0x0005}, {0x2201, 0x0700}, {0x2205, 0x8B82},
{0x2206, 0x05CB}, {0x221F, 0x0002}, {0x2204, 0x80C2}, {0x2205, 0x0938},
{0x221F, 0x0003}, {0x2212, 0xC4D2}, {0x220D, 0x0207}, {0x221F, 0x0001},
{0x2207, 0x267E}, {0x221C, 0xE5F7}, {0x221B, 0x0424}, {0x221F, 0x0007},
{0x221E, 0x0040}, {0x2218, 0x0000}, {0x221F, 0x0007}, {0x221E, 0x002C},
{0x2218, 0x008B}, {0x221F, 0x0005}, {0x2205, 0xFFF6}, {0x2206, 0x0080},
{0x2205, 0x8000}, {0x2206, 0xF8E0}, {0x2206, 0xE000}, {0x2206, 0xE1E0},
{0x2206, 0x01AC}, {0x2206, 0x2408}, {0x2206, 0xE08B}, {0x2206, 0x84F7},
{0x2206, 0x20E4}, {0x2206, 0x8B84}, {0x2206, 0xFC05}, {0x2206, 0xF8FA},
{0x2206, 0xEF69}, {0x2206, 0xE08B}, {0x2206, 0x86AC}, {0x2206, 0x201A},
{0x2206, 0xBF80}, {0x2206, 0x59D0}, {0x2206, 0x2402}, {0x2206, 0x803D},
{0x2206, 0xE0E0}, {0x2206, 0xE4E1}, {0x2206, 0xE0E5}, {0x2206, 0x5806},
{0x2206, 0x68C0}, {0x2206, 0xD1D2}, {0x2206, 0xE4E0}, {0x2206, 0xE4E5},
{0x2206, 0xE0E5}, {0x2206, 0xEF96}, {0x2206, 0xFEFC}, {0x2206, 0x05FB},
{0x2206, 0x0BFB}, {0x2206, 0x58FF}, {0x2206, 0x9E11}, {0x2206, 0x06F0},
{0x2206, 0x0C81}, {0x2206, 0x8AE0}, {0x2206, 0x0019}, {0x2206, 0x1B89},
{0x2206, 0xCFEB}, {0x2206, 0x19EB}, {0x2206, 0x19B0}, {0x2206, 0xEFFF},
{0x2206, 0x0BFF}, {0x2206, 0x0425}, {0x2206, 0x0807}, {0x2206, 0x2640},
{0x2206, 0x7227}, {0x2206, 0x267E}, {0x2206, 0x2804}, {0x2206, 0xB729},
{0x2206, 0x2576}, {0x2206, 0x2A68}, {0x2206, 0xE52B}, {0x2206, 0xAD00},
{0x2206, 0x2CDB}, {0x2206, 0xF02D}, {0x2206, 0x67BB}, {0x2206, 0x2E7B},
{0x2206, 0x0F2F}, {0x2206, 0x7365}, {0x2206, 0x31AC}, {0x2206, 0xCC32},
{0x2206, 0x2300}, {0x2206, 0x332D}, {0x2206, 0x1734}, {0x2206, 0x7F52},
{0x2206, 0x3510}, {0x2206, 0x0036}, {0x2206, 0x0600}, {0x2206, 0x370C},
{0x2206, 0xC038}, {0x2206, 0x7FCE}, {0x2206, 0x3CE5}, {0x2206, 0xF73D},
{0x2206, 0x3DA4}, {0x2206, 0x6530}, {0x2206, 0x3E67}, {0x2206, 0x0053},
{0x2206, 0x69D2}, {0x2206, 0x0F6A}, {0x2206, 0x012C}, {0x2206, 0x6C2B},
{0x2206, 0x136E}, {0x2206, 0xE100}, {0x2206, 0x6F12}, {0x2206, 0xF771},
{0x2206, 0x006B}, {0x2206, 0x7306}, {0x2206, 0xEB74}, {0x2206, 0x94C7},
{0x2206, 0x7698}, {0x2206, 0x0A77}, {0x2206, 0x5000}, {0x2206, 0x788A},
{0x2206, 0x1579}, {0x2206, 0x7F6F}, {0x2206, 0x7A06}, {0x2206, 0xA600},
{0x2205, 0x8B90}, {0x2206, 0x8000}, {0x2205, 0x8B92}, {0x2206, 0x8000},
{0x2205, 0x8B94}, {0x2206, 0x8014}, {0x2208, 0xFFFA}, {0x2202, 0x3C65},
{0x2205, 0xFFF6}, {0x2206, 0x00F7}, {0x221F, 0x0000}, {0x221F, 0x0007},
{0x221E, 0x0042}, {0x2218, 0x0000}, {0x221E, 0x002D}, {0x2218, 0xF010},
{0x221E, 0x0020}, {0x2215, 0x0000}, {0x221E, 0x0023}, {0x2216, 0x8000},
{0x221F, 0x0000}, {0x133F, 0x0010}, {0x133E, 0x0FFE}, {0x1362, 0x0115},
{0x1363, 0x0002}, {0x1363, 0x0000}, {0x1306, 0x000C}, {0x1307, 0x000C},
{0x1303, 0x0067}, {0x1304, 0x4444}, {0x1203, 0xFF00}, {0x1200, 0x7FC4},
{0x121D, 0x7D16}, {0x121E, 0x03E8}, {0x121F, 0x024E}, {0x1220, 0x0230},
{0x1221, 0x0244}, {0x1222, 0x0226}, {0x1223, 0x024E}, {0x1224, 0x0230},
{0x1225, 0x0244}, {0x1226, 0x0226}, {0x1227, 0x00C0}, {0x1228, 0x00B4},
{0x122F, 0x00C0}, {0x1230, 0x00B4}, {0x0208, 0x03E8}, {0x0209, 0x03E8},
{0x020A, 0x03E8}, {0x020B, 0x03E8}, {0x020C, 0x03E8}, {0x020D, 0x03E8},
{0x020E, 0x03E8}, {0x020F, 0x03E8}, {0x0210, 0x03E8}, {0x0211, 0x03E8},
{0x0212, 0x03E8}, {0x0213, 0x03E8}, {0x0214, 0x03E8}, {0x0215, 0x03E8},
{0x0216, 0x03E8}, {0x0217, 0x03E8}, {0x0900, 0x0000}, {0x0901, 0x0000},
{0x0902, 0x0000}, {0x0903, 0x0000}, {0x0865, 0x3210}, {0x087B, 0x0000},
{0x087C, 0xFF00}, {0x087D, 0x0000}, {0x087E, 0x0000}, {0x0801, 0x0100},
{0x0802, 0x0100}, {0x0A20, 0x2040}, {0x0A21, 0x2040}, {0x0A22, 0x2040},
{0x0A23, 0x2040}, {0x0A24, 0x2040}, {0x0A28, 0x2040}, {0x0A29, 0x2040},
{0x133F, 0x0030}, {0x133E, 0x000E}, {0x221F, 0x0000}, {0x2200, 0x1340},
{0x221F, 0x0000}, {0x133F, 0x0010}, {0x133E, 0x0FFE}, {0x20A0, 0x1940},
{0x20C0, 0x1940}, {0x20E0, 0x1940}, {0x130c, 0x0050},
};
static const struct rtl8367_initval rtl8367_initvals_1_1[] = {
{0x1B24, 0x0000}, {0x1B25, 0x0000}, {0x1B26, 0x0000}, {0x1B27, 0x0000},
{0x207F, 0x0002}, {0x2079, 0x0200}, {0x207F, 0x0000}, {0x133F, 0x0030},
{0x133E, 0x000E}, {0x221F, 0x0005}, {0x2201, 0x0700}, {0x2205, 0x8B82},
{0x2206, 0x05CB}, {0x221F, 0x0002}, {0x2204, 0x80C2}, {0x2205, 0x0938},
{0x221F, 0x0003}, {0x2212, 0xC4D2}, {0x220D, 0x0207}, {0x221F, 0x0001},
{0x2207, 0x267E}, {0x221C, 0xE5F7}, {0x221B, 0x0424}, {0x221F, 0x0007},
{0x221E, 0x0040}, {0x2218, 0x0000}, {0x221F, 0x0007}, {0x221E, 0x002C},
{0x2218, 0x008B}, {0x221F, 0x0005}, {0x2205, 0xFFF6}, {0x2206, 0x0080},
{0x2205, 0x8000}, {0x2206, 0xF8E0}, {0x2206, 0xE000}, {0x2206, 0xE1E0},
{0x2206, 0x01AC}, {0x2206, 0x2408}, {0x2206, 0xE08B}, {0x2206, 0x84F7},
{0x2206, 0x20E4}, {0x2206, 0x8B84}, {0x2206, 0xFC05}, {0x2206, 0xF8FA},
{0x2206, 0xEF69}, {0x2206, 0xE08B}, {0x2206, 0x86AC}, {0x2206, 0x201A},
{0x2206, 0xBF80}, {0x2206, 0x59D0}, {0x2206, 0x2402}, {0x2206, 0x803D},
{0x2206, 0xE0E0}, {0x2206, 0xE4E1}, {0x2206, 0xE0E5}, {0x2206, 0x5806},
{0x2206, 0x68C0}, {0x2206, 0xD1D2}, {0x2206, 0xE4E0}, {0x2206, 0xE4E5},
{0x2206, 0xE0E5}, {0x2206, 0xEF96}, {0x2206, 0xFEFC}, {0x2206, 0x05FB},
{0x2206, 0x0BFB}, {0x2206, 0x58FF}, {0x2206, 0x9E11}, {0x2206, 0x06F0},
{0x2206, 0x0C81}, {0x2206, 0x8AE0}, {0x2206, 0x0019}, {0x2206, 0x1B89},
{0x2206, 0xCFEB}, {0x2206, 0x19EB}, {0x2206, 0x19B0}, {0x2206, 0xEFFF},
{0x2206, 0x0BFF}, {0x2206, 0x0425}, {0x2206, 0x0807}, {0x2206, 0x2640},
{0x2206, 0x7227}, {0x2206, 0x267E}, {0x2206, 0x2804}, {0x2206, 0xB729},
{0x2206, 0x2576}, {0x2206, 0x2A68}, {0x2206, 0xE52B}, {0x2206, 0xAD00},
{0x2206, 0x2CDB}, {0x2206, 0xF02D}, {0x2206, 0x67BB}, {0x2206, 0x2E7B},
{0x2206, 0x0F2F}, {0x2206, 0x7365}, {0x2206, 0x31AC}, {0x2206, 0xCC32},
{0x2206, 0x2300}, {0x2206, 0x332D}, {0x2206, 0x1734}, {0x2206, 0x7F52},
{0x2206, 0x3510}, {0x2206, 0x0036}, {0x2206, 0x0600}, {0x2206, 0x370C},
{0x2206, 0xC038}, {0x2206, 0x7FCE}, {0x2206, 0x3CE5}, {0x2206, 0xF73D},
{0x2206, 0x3DA4}, {0x2206, 0x6530}, {0x2206, 0x3E67}, {0x2206, 0x0053},
{0x2206, 0x69D2}, {0x2206, 0x0F6A}, {0x2206, 0x012C}, {0x2206, 0x6C2B},
{0x2206, 0x136E}, {0x2206, 0xE100}, {0x2206, 0x6F12}, {0x2206, 0xF771},
{0x2206, 0x006B}, {0x2206, 0x7306}, {0x2206, 0xEB74}, {0x2206, 0x94C7},
{0x2206, 0x7698}, {0x2206, 0x0A77}, {0x2206, 0x5000}, {0x2206, 0x788A},
{0x2206, 0x1579}, {0x2206, 0x7F6F}, {0x2206, 0x7A06}, {0x2206, 0xA600},
{0x2205, 0x8B90}, {0x2206, 0x8000}, {0x2205, 0x8B92}, {0x2206, 0x8000},
{0x2205, 0x8B94}, {0x2206, 0x8014}, {0x2208, 0xFFFA}, {0x2202, 0x3C65},
{0x2205, 0xFFF6}, {0x2206, 0x00F7}, {0x221F, 0x0000}, {0x221F, 0x0007},
{0x221E, 0x0042}, {0x2218, 0x0000}, {0x221E, 0x002D}, {0x2218, 0xF010},
{0x221E, 0x0020}, {0x2215, 0x0000}, {0x221E, 0x0023}, {0x2216, 0x8000},
{0x221F, 0x0000}, {0x133F, 0x0010}, {0x133E, 0x0FFE}, {0x1362, 0x0115},
{0x1363, 0x0002}, {0x1363, 0x0000}, {0x1306, 0x000C}, {0x1307, 0x000C},
{0x1303, 0x0067}, {0x1304, 0x4444}, {0x1203, 0xFF00}, {0x1200, 0x7FC4},
{0x0900, 0x0000}, {0x0901, 0x0000}, {0x0902, 0x0000}, {0x0903, 0x0000},
{0x0865, 0x3210}, {0x087B, 0x0000}, {0x087C, 0xFF00}, {0x087D, 0x0000},
{0x087E, 0x0000}, {0x0801, 0x0100}, {0x0802, 0x0100}, {0x0A20, 0x2040},
{0x0A21, 0x2040}, {0x0A22, 0x2040}, {0x0A23, 0x2040}, {0x0A24, 0x2040},
{0x0A25, 0x2040}, {0x0A26, 0x2040}, {0x0A27, 0x2040}, {0x0A28, 0x2040},
{0x0A29, 0x2040}, {0x133F, 0x0030}, {0x133E, 0x000E}, {0x221F, 0x0000},
{0x2200, 0x1340}, {0x221F, 0x0000}, {0x133F, 0x0010}, {0x133E, 0x0FFE},
{0x1B03, 0x0876},
};
static const struct rtl8367_initval rtl8367_initvals_2_0[] = {
{0x1b24, 0x0000}, {0x1b25, 0x0000}, {0x1b26, 0x0000}, {0x1b27, 0x0000},
{0x133f, 0x0030}, {0x133e, 0x000e}, {0x221f, 0x0007}, {0x221e, 0x0048},
{0x2219, 0x4012}, {0x221f, 0x0003}, {0x2201, 0x3554}, {0x2202, 0x63e8},
{0x2203, 0x99c2}, {0x2204, 0x0113}, {0x2205, 0x303e}, {0x220d, 0x0207},
{0x220e, 0xe100}, {0x221f, 0x0007}, {0x221e, 0x0040}, {0x2218, 0x0000},
{0x221f, 0x0007}, {0x221e, 0x002c}, {0x2218, 0x008b}, {0x221f, 0x0005},
{0x2205, 0xfff6}, {0x2206, 0x0080}, {0x221f, 0x0005}, {0x2205, 0x8000},
{0x2206, 0x0280}, {0x2206, 0x2bf7}, {0x2206, 0x00e0}, {0x2206, 0xfff7},
{0x2206, 0xa080}, {0x2206, 0x02ae}, {0x2206, 0xf602}, {0x2206, 0x804e},
{0x2206, 0x0201}, {0x2206, 0x5002}, {0x2206, 0x0163}, {0x2206, 0x0201},
{0x2206, 0x79e0}, {0x2206, 0x8b8c}, {0x2206, 0xe18b}, {0x2206, 0x8d1e},
{0x2206, 0x01e1}, {0x2206, 0x8b8e}, {0x2206, 0x1e01}, {0x2206, 0xa000},
{0x2206, 0xe4ae}, {0x2206, 0xd8bf}, {0x2206, 0x8b88}, {0x2206, 0xec00},
{0x2206, 0x19a9}, {0x2206, 0x8b90}, {0x2206, 0xf9ee}, {0x2206, 0xfff6},
{0x2206, 0x00ee}, {0x2206, 0xfff7}, {0x2206, 0xfce0}, {0x2206, 0xe140},
{0x2206, 0xe1e1}, {0x2206, 0x41f7}, {0x2206, 0x2ff6}, {0x2206, 0x28e4},
{0x2206, 0xe140}, {0x2206, 0xe5e1}, {0x2206, 0x4104}, {0x2206, 0xf8fa},
{0x2206, 0xef69}, {0x2206, 0xe08b}, {0x2206, 0x86ac}, {0x2206, 0x201a},
{0x2206, 0xbf80}, {0x2206, 0x77d0}, {0x2206, 0x6c02}, {0x2206, 0x2978},
{0x2206, 0xe0e0}, {0x2206, 0xe4e1}, {0x2206, 0xe0e5}, {0x2206, 0x5806},
{0x2206, 0x68c0}, {0x2206, 0xd1d2}, {0x2206, 0xe4e0}, {0x2206, 0xe4e5},
{0x2206, 0xe0e5}, {0x2206, 0xef96}, {0x2206, 0xfefc}, {0x2206, 0x0425},
{0x2206, 0x0807}, {0x2206, 0x2640}, {0x2206, 0x7227}, {0x2206, 0x267e},
{0x2206, 0x2804}, {0x2206, 0xb729}, {0x2206, 0x2576}, {0x2206, 0x2a68},
{0x2206, 0xe52b}, {0x2206, 0xad00}, {0x2206, 0x2cdb}, {0x2206, 0xf02d},
{0x2206, 0x67bb}, {0x2206, 0x2e7b}, {0x2206, 0x0f2f}, {0x2206, 0x7365},
{0x2206, 0x31ac}, {0x2206, 0xcc32}, {0x2206, 0x2300}, {0x2206, 0x332d},
{0x2206, 0x1734}, {0x2206, 0x7f52}, {0x2206, 0x3510}, {0x2206, 0x0036},
{0x2206, 0x0600}, {0x2206, 0x370c}, {0x2206, 0xc038}, {0x2206, 0x7fce},
{0x2206, 0x3ce5}, {0x2206, 0xf73d}, {0x2206, 0x3da4}, {0x2206, 0x6530},
{0x2206, 0x3e67}, {0x2206, 0x0053}, {0x2206, 0x69d2}, {0x2206, 0x0f6a},
{0x2206, 0x012c}, {0x2206, 0x6c2b}, {0x2206, 0x136e}, {0x2206, 0xe100},
{0x2206, 0x6f12}, {0x2206, 0xf771}, {0x2206, 0x006b}, {0x2206, 0x7306},
{0x2206, 0xeb74}, {0x2206, 0x94c7}, {0x2206, 0x7698}, {0x2206, 0x0a77},
{0x2206, 0x5000}, {0x2206, 0x788a}, {0x2206, 0x1579}, {0x2206, 0x7f6f},
{0x2206, 0x7a06}, {0x2206, 0xa600}, {0x2201, 0x0701}, {0x2200, 0x0405},
{0x221f, 0x0000}, {0x2200, 0x1340}, {0x221f, 0x0000}, {0x133f, 0x0010},
{0x133e, 0x0ffe}, {0x1203, 0xff00}, {0x1200, 0x7fc4}, {0x121d, 0x7D16},
{0x121e, 0x03e8}, {0x121f, 0x024e}, {0x1220, 0x0230}, {0x1221, 0x0244},
{0x1222, 0x0226}, {0x1223, 0x024e}, {0x1224, 0x0230}, {0x1225, 0x0244},
{0x1226, 0x0226}, {0x1227, 0x00c0}, {0x1228, 0x00b4}, {0x122f, 0x00c0},
{0x1230, 0x00b4}, {0x0208, 0x03e8}, {0x0209, 0x03e8}, {0x020a, 0x03e8},
{0x020b, 0x03e8}, {0x020c, 0x03e8}, {0x020d, 0x03e8}, {0x020e, 0x03e8},
{0x020f, 0x03e8}, {0x0210, 0x03e8}, {0x0211, 0x03e8}, {0x0212, 0x03e8},
{0x0213, 0x03e8}, {0x0214, 0x03e8}, {0x0215, 0x03e8}, {0x0216, 0x03e8},
{0x0217, 0x03e8}, {0x0900, 0x0000}, {0x0901, 0x0000}, {0x0902, 0x0000},
{0x0903, 0x0000}, {0x0865, 0x3210}, {0x087b, 0x0000}, {0x087c, 0xff00},
{0x087d, 0x0000}, {0x087e, 0x0000}, {0x0801, 0x0100}, {0x0802, 0x0100},
{0x0A20, 0x2040}, {0x0A21, 0x2040}, {0x0A22, 0x2040}, {0x0A23, 0x2040},
{0x0A24, 0x2040}, {0x0A28, 0x2040}, {0x0A29, 0x2040}, {0x20A0, 0x1940},
{0x20C0, 0x1940}, {0x20E0, 0x1940}, {0x130c, 0x0050},
};
static const struct rtl8367_initval rtl8367_initvals_2_1[] = {
{0x1b24, 0x0000}, {0x1b25, 0x0000}, {0x1b26, 0x0000}, {0x1b27, 0x0000},
{0x133f, 0x0030}, {0x133e, 0x000e}, {0x221f, 0x0007}, {0x221e, 0x0048},
{0x2219, 0x4012}, {0x221f, 0x0003}, {0x2201, 0x3554}, {0x2202, 0x63e8},
{0x2203, 0x99c2}, {0x2204, 0x0113}, {0x2205, 0x303e}, {0x220d, 0x0207},
{0x220e, 0xe100}, {0x221f, 0x0007}, {0x221e, 0x0040}, {0x2218, 0x0000},
{0x221f, 0x0007}, {0x221e, 0x002c}, {0x2218, 0x008b}, {0x221f, 0x0005},
{0x2205, 0xfff6}, {0x2206, 0x0080}, {0x221f, 0x0005}, {0x2205, 0x8000},
{0x2206, 0x0280}, {0x2206, 0x2bf7}, {0x2206, 0x00e0}, {0x2206, 0xfff7},
{0x2206, 0xa080}, {0x2206, 0x02ae}, {0x2206, 0xf602}, {0x2206, 0x804e},
{0x2206, 0x0201}, {0x2206, 0x5002}, {0x2206, 0x0163}, {0x2206, 0x0201},
{0x2206, 0x79e0}, {0x2206, 0x8b8c}, {0x2206, 0xe18b}, {0x2206, 0x8d1e},
{0x2206, 0x01e1}, {0x2206, 0x8b8e}, {0x2206, 0x1e01}, {0x2206, 0xa000},
{0x2206, 0xe4ae}, {0x2206, 0xd8bf}, {0x2206, 0x8b88}, {0x2206, 0xec00},
{0x2206, 0x19a9}, {0x2206, 0x8b90}, {0x2206, 0xf9ee}, {0x2206, 0xfff6},
{0x2206, 0x00ee}, {0x2206, 0xfff7}, {0x2206, 0xfce0}, {0x2206, 0xe140},
{0x2206, 0xe1e1}, {0x2206, 0x41f7}, {0x2206, 0x2ff6}, {0x2206, 0x28e4},
{0x2206, 0xe140}, {0x2206, 0xe5e1}, {0x2206, 0x4104}, {0x2206, 0xf8fa},
{0x2206, 0xef69}, {0x2206, 0xe08b}, {0x2206, 0x86ac}, {0x2206, 0x201a},
{0x2206, 0xbf80}, {0x2206, 0x77d0}, {0x2206, 0x6c02}, {0x2206, 0x2978},
{0x2206, 0xe0e0}, {0x2206, 0xe4e1}, {0x2206, 0xe0e5}, {0x2206, 0x5806},
{0x2206, 0x68c0}, {0x2206, 0xd1d2}, {0x2206, 0xe4e0}, {0x2206, 0xe4e5},
{0x2206, 0xe0e5}, {0x2206, 0xef96}, {0x2206, 0xfefc}, {0x2206, 0x0425},
{0x2206, 0x0807}, {0x2206, 0x2640}, {0x2206, 0x7227}, {0x2206, 0x267e},
{0x2206, 0x2804}, {0x2206, 0xb729}, {0x2206, 0x2576}, {0x2206, 0x2a68},
{0x2206, 0xe52b}, {0x2206, 0xad00}, {0x2206, 0x2cdb}, {0x2206, 0xf02d},
{0x2206, 0x67bb}, {0x2206, 0x2e7b}, {0x2206, 0x0f2f}, {0x2206, 0x7365},
{0x2206, 0x31ac}, {0x2206, 0xcc32}, {0x2206, 0x2300}, {0x2206, 0x332d},
{0x2206, 0x1734}, {0x2206, 0x7f52}, {0x2206, 0x3510}, {0x2206, 0x0036},
{0x2206, 0x0600}, {0x2206, 0x370c}, {0x2206, 0xc038}, {0x2206, 0x7fce},
{0x2206, 0x3ce5}, {0x2206, 0xf73d}, {0x2206, 0x3da4}, {0x2206, 0x6530},
{0x2206, 0x3e67}, {0x2206, 0x0053}, {0x2206, 0x69d2}, {0x2206, 0x0f6a},
{0x2206, 0x012c}, {0x2206, 0x6c2b}, {0x2206, 0x136e}, {0x2206, 0xe100},
{0x2206, 0x6f12}, {0x2206, 0xf771}, {0x2206, 0x006b}, {0x2206, 0x7306},
{0x2206, 0xeb74}, {0x2206, 0x94c7}, {0x2206, 0x7698}, {0x2206, 0x0a77},
{0x2206, 0x5000}, {0x2206, 0x788a}, {0x2206, 0x1579}, {0x2206, 0x7f6f},
{0x2206, 0x7a06}, {0x2206, 0xa600}, {0x2201, 0x0701}, {0x2200, 0x0405},
{0x221f, 0x0000}, {0x2200, 0x1340}, {0x221f, 0x0000}, {0x133f, 0x0010},
{0x133e, 0x0ffe}, {0x1203, 0xff00}, {0x1200, 0x7fc4}, {0x0900, 0x0000},
{0x0901, 0x0000}, {0x0902, 0x0000}, {0x0903, 0x0000}, {0x0865, 0x3210},
{0x087b, 0x0000}, {0x087c, 0xff00}, {0x087d, 0x0000}, {0x087e, 0x0000},
{0x0801, 0x0100}, {0x0802, 0x0100}, {0x0A20, 0x2040}, {0x0A21, 0x2040},
{0x0A22, 0x2040}, {0x0A23, 0x2040}, {0x0A24, 0x2040}, {0x0A25, 0x2040},
{0x0A26, 0x2040}, {0x0A27, 0x2040}, {0x0A28, 0x2040}, {0x0A29, 0x2040},
{0x130c, 0x0050},
};
static int rtl8367_write_initvals(struct rtl8366_smi *smi,
const struct rtl8367_initval *initvals,
int count)
{
int err;
int i;
for (i = 0; i < count; i++)
REG_WR(smi, initvals[i].reg, initvals[i].val);
return 0;
}
static int rtl8367_read_phy_reg(struct rtl8366_smi *smi,
u32 phy_addr, u32 phy_reg, u32 *val)
{
int timeout;
u32 data;
int err;
if (phy_addr > RTL8367_PHY_ADDR_MAX)
return -EINVAL;
if (phy_reg > RTL8367_PHY_REG_MAX)
return -EINVAL;
REG_RD(smi, RTL8367_IA_STATUS_REG, &data);
if (data & RTL8367_IA_STATUS_PHY_BUSY)
return -ETIMEDOUT;
/* prepare address */
REG_WR(smi, RTL8367_IA_ADDRESS_REG,
RTL8367_INTERNAL_PHY_REG(phy_addr, phy_reg));
/* send read command */
REG_WR(smi, RTL8367_IA_CTRL_REG,
RTL8367_IA_CTRL_CMD_MASK | RTL8367_IA_CTRL_RW_READ);
timeout = 5;
do {
REG_RD(smi, RTL8367_IA_STATUS_REG, &data);
if ((data & RTL8367_IA_STATUS_PHY_BUSY) == 0)
break;
if (timeout--) {
dev_err(smi->parent, "phy read timed out\n");
return -ETIMEDOUT;
}
udelay(1);
} while (1);
/* read data */
REG_RD(smi, RTL8367_IA_READ_DATA_REG, val);
dev_dbg(smi->parent, "phy_read: addr:%02x, reg:%02x, val:%04x\n",
phy_addr, phy_reg, *val);
return 0;
}
static int rtl8367_write_phy_reg(struct rtl8366_smi *smi,
u32 phy_addr, u32 phy_reg, u32 val)
{
int timeout;
u32 data;
int err;
dev_dbg(smi->parent, "phy_write: addr:%02x, reg:%02x, val:%04x\n",
phy_addr, phy_reg, val);
if (phy_addr > RTL8367_PHY_ADDR_MAX)
return -EINVAL;
if (phy_reg > RTL8367_PHY_REG_MAX)
return -EINVAL;
REG_RD(smi, RTL8367_IA_STATUS_REG, &data);
if (data & RTL8367_IA_STATUS_PHY_BUSY)
return -ETIMEDOUT;
/* preapre data */
REG_WR(smi, RTL8367_IA_WRITE_DATA_REG, val);
/* prepare address */
REG_WR(smi, RTL8367_IA_ADDRESS_REG,
RTL8367_INTERNAL_PHY_REG(phy_addr, phy_reg));
/* send write command */
REG_WR(smi, RTL8367_IA_CTRL_REG,
RTL8367_IA_CTRL_CMD_MASK | RTL8367_IA_CTRL_RW_WRITE);
timeout = 5;
do {
REG_RD(smi, RTL8367_IA_STATUS_REG, &data);
if ((data & RTL8367_IA_STATUS_PHY_BUSY) == 0)
break;
if (timeout--) {
dev_err(smi->parent, "phy write timed out\n");
return -ETIMEDOUT;
}
udelay(1);
} while (1);
return 0;
}
static int rtl8367_init_regs0(struct rtl8366_smi *smi, unsigned mode)
{
const struct rtl8367_initval *initvals;
int count;
int err;
switch (mode) {
case 0:
initvals = rtl8367_initvals_0_0;
count = ARRAY_SIZE(rtl8367_initvals_0_0);
break;
case 1:
case 2:
initvals = rtl8367_initvals_0_1;
count = ARRAY_SIZE(rtl8367_initvals_0_1);
break;
default:
dev_err(smi->parent, "%s: unknow mode %u\n", __func__, mode);
return -ENODEV;
}
err = rtl8367_write_initvals(smi, initvals, count);
if (err)
return err;
/* TODO: complete this */
return 0;
}
static int rtl8367_init_regs1(struct rtl8366_smi *smi, unsigned mode)
{
const struct rtl8367_initval *initvals;
int count;
switch (mode) {
case 0:
initvals = rtl8367_initvals_1_0;
count = ARRAY_SIZE(rtl8367_initvals_1_0);
break;
case 1:
case 2:
initvals = rtl8367_initvals_1_1;
count = ARRAY_SIZE(rtl8367_initvals_1_1);
break;
default:
dev_err(smi->parent, "%s: unknow mode %u\n", __func__, mode);
return -ENODEV;
}
return rtl8367_write_initvals(smi, initvals, count);
}
static int rtl8367_init_regs2(struct rtl8366_smi *smi, unsigned mode)
{
const struct rtl8367_initval *initvals;
int count;
switch (mode) {
case 0:
initvals = rtl8367_initvals_2_0;
count = ARRAY_SIZE(rtl8367_initvals_2_0);
break;
case 1:
case 2:
initvals = rtl8367_initvals_2_1;
count = ARRAY_SIZE(rtl8367_initvals_2_1);
break;
default:
dev_err(smi->parent, "%s: unknow mode %u\n", __func__, mode);
return -ENODEV;
}
return rtl8367_write_initvals(smi, initvals, count);
}
static int rtl8367_init_regs(struct rtl8366_smi *smi)
{
u32 data;
u32 rlvid;
u32 mode;
int err;
REG_WR(smi, RTL8367_RTL_MAGIC_ID_REG, RTL8367_RTL_MAGIC_ID_VAL);
REG_RD(smi, RTL8367_CHIP_VER_REG, &data);
rlvid = (data >> RTL8367_CHIP_VER_RLVID_SHIFT) &
RTL8367_CHIP_VER_RLVID_MASK;
REG_RD(smi, RTL8367_CHIP_MODE_REG, &data);
mode = data & RTL8367_CHIP_MODE_MASK;
switch (rlvid) {
case 0:
err = rtl8367_init_regs0(smi, mode);
break;
case 1:
err = rtl8367_write_phy_reg(smi, 0, 31, 5);
if (err)
break;
err = rtl8367_write_phy_reg(smi, 0, 5, 0x3ffe);
if (err)
break;
err = rtl8367_read_phy_reg(smi, 0, 6, &data);
if (err)
break;
if (data == 0x94eb) {
err = rtl8367_init_regs1(smi, mode);
} else if (data == 0x2104) {
err = rtl8367_init_regs2(smi, mode);
} else {
dev_err(smi->parent, "unknow phy data %04x\n", data);
return -ENODEV;
}
break;
default:
dev_err(smi->parent, "unknow rlvid %u\n", rlvid);
err = -ENODEV;
break;
}
return err;
}
static int rtl8367_reset_chip(struct rtl8366_smi *smi)
{
int timeout = 10;
int err;
u32 data;
REG_WR(smi, RTL8367_CHIP_RESET_REG, RTL8367_CHIP_RESET_HW);
msleep(RTL8367_RESET_DELAY);
do {
REG_RD(smi, RTL8367_CHIP_RESET_REG, &data);
if (!(data & RTL8367_CHIP_RESET_HW))
break;
msleep(1);
} while (--timeout);
if (!timeout) {
dev_err(smi->parent, "chip reset timed out\n");
return -ETIMEDOUT;
}
return 0;
}
static int rtl8367_extif_set_mode(struct rtl8366_smi *smi, int id,
enum rtl8367_extif_mode mode)
{
int err;
/* set port mode */
switch (mode) {
case RTL8367_EXTIF_MODE_RGMII:
case RTL8367_EXTIF_MODE_RGMII_33V:
REG_WR(smi, RTL8367_CHIP_DEBUG0_REG, 0x0367);
REG_WR(smi, RTL8367_CHIP_DEBUG1_REG, 0x7777);
break;
case RTL8367_EXTIF_MODE_TMII_MAC:
case RTL8367_EXTIF_MODE_TMII_PHY:
REG_RMW(smi, RTL8367_BYPASS_LINE_RATE_REG,
BIT((id + 1) % 2), BIT((id + 1) % 2));
break;
case RTL8367_EXTIF_MODE_GMII:
REG_RMW(smi, RTL8367_CHIP_DEBUG0_REG,
RTL8367_CHIP_DEBUG0_DUMMY0(id),
RTL8367_CHIP_DEBUG0_DUMMY0(id));
REG_RMW(smi, RTL8367_EXT_RGMXF_REG(id), BIT(6), BIT(6));
break;
case RTL8367_EXTIF_MODE_MII_MAC:
case RTL8367_EXTIF_MODE_MII_PHY:
case RTL8367_EXTIF_MODE_DISABLED:
REG_RMW(smi, RTL8367_BYPASS_LINE_RATE_REG,
BIT((id + 1) % 2), 0);
REG_RMW(smi, RTL8367_EXT_RGMXF_REG(id), BIT(6), 0);
break;
default:
dev_err(smi->parent,
"invalid mode for external interface %d\n", id);
return -EINVAL;
}
REG_RMW(smi, RTL8367_DIS_REG,
RTL8367_DIS_RGMII_MASK << RTL8367_DIS_RGMII_SHIFT(id),
mode << RTL8367_DIS_RGMII_SHIFT(id));
return 0;
}
static int rtl8367_extif_set_force(struct rtl8366_smi *smi, int id,
struct rtl8367_port_ability *pa)
{
u32 mask;
u32 val;
int err;
mask = (RTL8367_DI_FORCE_MODE |
RTL8367_DI_FORCE_NWAY |
RTL8367_DI_FORCE_TXPAUSE |
RTL8367_DI_FORCE_RXPAUSE |
RTL8367_DI_FORCE_LINK |
RTL8367_DI_FORCE_DUPLEX |
RTL8367_DI_FORCE_SPEED_MASK);
val = pa->speed;
val |= pa->force_mode ? RTL8367_DI_FORCE_MODE : 0;
val |= pa->nway ? RTL8367_DI_FORCE_NWAY : 0;
val |= pa->txpause ? RTL8367_DI_FORCE_TXPAUSE : 0;
val |= pa->rxpause ? RTL8367_DI_FORCE_RXPAUSE : 0;
val |= pa->link ? RTL8367_DI_FORCE_LINK : 0;
val |= pa->duplex ? RTL8367_DI_FORCE_DUPLEX : 0;
REG_RMW(smi, RTL8367_DI_FORCE_REG(id), mask, val);
return 0;
}
static int rtl8367_extif_set_rgmii_delay(struct rtl8366_smi *smi, int id,
unsigned txdelay, unsigned rxdelay)
{
u32 mask;
u32 val;
int err;
mask = (RTL8367_EXT_RGMXF_RXDELAY_MASK |
(RTL8367_EXT_RGMXF_TXDELAY_MASK <<
RTL8367_EXT_RGMXF_TXDELAY_SHIFT));
val = rxdelay;
val |= txdelay << RTL8367_EXT_RGMXF_TXDELAY_SHIFT;
REG_RMW(smi, RTL8367_EXT_RGMXF_REG(id), mask, val);
return 0;
}
static int rtl8367_extif_init(struct rtl8366_smi *smi, int id,
struct rtl8367_extif_config *cfg)
{
enum rtl8367_extif_mode mode;
int err;
mode = (cfg) ? cfg->mode : RTL8367_EXTIF_MODE_DISABLED;
err = rtl8367_extif_set_mode(smi, id, mode);
if (err)
return err;
if (mode != RTL8367_EXTIF_MODE_DISABLED) {
err = rtl8367_extif_set_force(smi, id, &cfg->ability);
if (err)
return err;
err = rtl8367_extif_set_rgmii_delay(smi, id, cfg->txdelay,
cfg->rxdelay);
if (err)
return err;
}
return 0;
}
static int rtl8367_led_group_set_ports(struct rtl8366_smi *smi,
unsigned int group, u16 port_mask)
{
u32 reg;
u32 s;
int err;
port_mask &= RTL8367_PARA_LED_IO_EN_PMASK;
s = (group % 2) * 8;
reg = RTL8367_PARA_LED_IO_EN1_REG + (group / 2);
REG_RMW(smi, reg, (RTL8367_PARA_LED_IO_EN_PMASK << s), port_mask << s);
return 0;
}
static int rtl8367_led_group_set_mode(struct rtl8366_smi *smi,
unsigned int mode)
{
u16 mask;
u16 set;
int err;
mode &= RTL8367_LED_CONFIG_DATA_M;
mask = (RTL8367_LED_CONFIG_DATA_M << RTL8367_LED_CONFIG_DATA_S) |
RTL8367_LED_CONFIG_SEL;
set = (mode << RTL8367_LED_CONFIG_DATA_S) | RTL8367_LED_CONFIG_SEL;
REG_RMW(smi, RTL8367_LED_CONFIG_REG, mask, set);
return 0;
}
static int rtl8367_led_group_set_config(struct rtl8366_smi *smi,
unsigned int led, unsigned int cfg)
{
u16 mask;
u16 set;
int err;
mask = (RTL8367_LED_CONFIG_LED_CFG_M << (led * 4)) |
RTL8367_LED_CONFIG_SEL;
set = (cfg & RTL8367_LED_CONFIG_LED_CFG_M) << (led * 4);
REG_RMW(smi, RTL8367_LED_CONFIG_REG, mask, set);
return 0;
}
static int rtl8367_led_op_select_parallel(struct rtl8366_smi *smi)
{
int err;
REG_WR(smi, RTL8367_LED_SYS_CONFIG_REG, 0x1472);
return 0;
}
static int rtl8367_led_blinkrate_set(struct rtl8366_smi *smi, unsigned int rate)
{
u16 mask;
u16 set;
int err;
mask = RTL8367_LED_MODE_RATE_M << RTL8367_LED_MODE_RATE_S;
set = (rate & RTL8367_LED_MODE_RATE_M) << RTL8367_LED_MODE_RATE_S;
REG_RMW(smi, RTL8367_LED_MODE_REG, mask, set);
return 0;
}
#ifdef CONFIG_OF
static int rtl8367_extif_init_of(struct rtl8366_smi *smi, int id,
const char *name)
{
struct rtl8367_extif_config *cfg;
const __be32 *prop;
int size;
int err;
prop = of_get_property(smi->parent->of_node, name, &size);
if (!prop)
return rtl8367_extif_init(smi, id, NULL);
if (size != (9 * sizeof(*prop))) {
dev_err(smi->parent, "%s property is invalid\n", name);
return -EINVAL;
}
cfg = kzalloc(sizeof(struct rtl8367_extif_config), GFP_KERNEL);
if (!cfg)
return -ENOMEM;
cfg->txdelay = be32_to_cpup(prop++);
cfg->rxdelay = be32_to_cpup(prop++);
cfg->mode = be32_to_cpup(prop++);
cfg->ability.force_mode = be32_to_cpup(prop++);
cfg->ability.txpause = be32_to_cpup(prop++);
cfg->ability.rxpause = be32_to_cpup(prop++);
cfg->ability.link = be32_to_cpup(prop++);
cfg->ability.duplex = be32_to_cpup(prop++);
cfg->ability.speed = be32_to_cpup(prop++);
err = rtl8367_extif_init(smi, id, cfg);
kfree(cfg);
return err;
}
#else
static int rtl8367_extif_init_of(struct rtl8366_smi *smi, int id,
const char *name)
{
return -EINVAL;
}
#endif
static int rtl8367_setup(struct rtl8366_smi *smi)
{
struct rtl8367_platform_data *pdata;
int err;
int i;
pdata = smi->parent->platform_data;
err = rtl8367_init_regs(smi);
if (err)
return err;
/* initialize external interfaces */
if (smi->parent->of_node) {
err = rtl8367_extif_init_of(smi, 0, "realtek,extif0");
if (err)
return err;
err = rtl8367_extif_init_of(smi, 1, "realtek,extif1");
if (err)
return err;
} else {
err = rtl8367_extif_init(smi, 0, pdata->extif0_cfg);
if (err)
return err;
err = rtl8367_extif_init(smi, 1, pdata->extif1_cfg);
if (err)
return err;
}
/* set maximum packet length to 1536 bytes */
REG_RMW(smi, RTL8367_SWC0_REG, RTL8367_SWC0_MAX_LENGTH_MASK,
RTL8367_SWC0_MAX_LENGTH_1536);
/*
* discard VLAN tagged packets if the port is not a member of
* the VLAN with which the packets is associated.
*/
REG_WR(smi, RTL8367_VLAN_INGRESS_REG, RTL8367_PORTS_ALL);
/*
* Setup egress tag mode for each port.
*/
for (i = 0; i < RTL8367_NUM_PORTS; i++)
REG_RMW(smi,
RTL8367_PORT_CFG_REG(i),
RTL8367_PORT_CFG_EGRESS_MODE_MASK <<
RTL8367_PORT_CFG_EGRESS_MODE_SHIFT,
RTL8367_PORT_CFG_EGRESS_MODE_ORIGINAL <<
RTL8367_PORT_CFG_EGRESS_MODE_SHIFT);
/* setup LEDs */
err = rtl8367_led_group_set_ports(smi, 0, RTL8367_PORTS_ALL);
if (err)
return err;
err = rtl8367_led_group_set_mode(smi, 0);
if (err)
return err;
err = rtl8367_led_op_select_parallel(smi);
if (err)
return err;
err = rtl8367_led_blinkrate_set(smi, 1);
if (err)
return err;
err = rtl8367_led_group_set_config(smi, 0, 2);
if (err)
return err;
return 0;
}
static int rtl8367_get_mib_counter(struct rtl8366_smi *smi, int counter,
int port, unsigned long long *val)
{
struct rtl8366_mib_counter *mib;
int offset;
int i;
int err;
u32 addr, data;
u64 mibvalue;
if (port > RTL8367_NUM_PORTS || counter >= RTL8367_MIB_COUNT)
return -EINVAL;
mib = &rtl8367_mib_counters[counter];
addr = RTL8367_MIB_COUNTER_PORT_OFFSET * port + mib->offset;
/*
* Writing access counter address first
* then ASIC will prepare 64bits counter wait for being retrived
*/
REG_WR(smi, RTL8367_MIB_ADDRESS_REG, addr >> 2);
/* read MIB control register */
REG_RD(smi, RTL8367_MIB_CTRL_REG(0), &data);
if (data & RTL8367_MIB_CTRL_BUSY_MASK)
return -EBUSY;
if (data & RTL8367_MIB_CTRL_RESET_MASK)
return -EIO;
if (mib->length == 4)
offset = 3;
else
offset = (mib->offset + 1) % 4;
mibvalue = 0;
for (i = 0; i < mib->length; i++) {
REG_RD(smi, RTL8367_MIB_COUNTER_REG(offset - i), &data);
mibvalue = (mibvalue << 16) | (data & 0xFFFF);
}
*val = mibvalue;
return 0;
}
static int rtl8367_get_vlan_4k(struct rtl8366_smi *smi, u32 vid,
struct rtl8366_vlan_4k *vlan4k)
{
u32 data[RTL8367_TA_VLAN_DATA_SIZE];
int err;
int i;
memset(vlan4k, '\0', sizeof(struct rtl8366_vlan_4k));
if (vid >= RTL8367_NUM_VIDS)
return -EINVAL;
/* write VID */
REG_WR(smi, RTL8367_TA_ADDR_REG, vid);
/* write table access control word */
REG_WR(smi, RTL8367_TA_CTRL_REG, RTL8367_TA_CTRL_CVLAN_READ);
for (i = 0; i < ARRAY_SIZE(data); i++)
REG_RD(smi, RTL8367_TA_DATA_REG(i), &data[i]);
vlan4k->vid = vid;
vlan4k->member = (data[0] >> RTL8367_TA_VLAN_MEMBER_SHIFT) &
RTL8367_TA_VLAN_MEMBER_MASK;
vlan4k->fid = (data[1] >> RTL8367_TA_VLAN_FID_SHIFT) &
RTL8367_TA_VLAN_FID_MASK;
vlan4k->untag = (data[2] >> RTL8367_TA_VLAN_UNTAG1_SHIFT) &
RTL8367_TA_VLAN_UNTAG1_MASK;
vlan4k->untag |= ((data[3] >> RTL8367_TA_VLAN_UNTAG2_SHIFT) &
RTL8367_TA_VLAN_UNTAG2_MASK) << 2;
return 0;
}
static int rtl8367_set_vlan_4k(struct rtl8366_smi *smi,
const struct rtl8366_vlan_4k *vlan4k)
{
u32 data[RTL8367_TA_VLAN_DATA_SIZE];
int err;
int i;
if (vlan4k->vid >= RTL8367_NUM_VIDS ||
vlan4k->member > RTL8367_TA_VLAN_MEMBER_MASK ||
vlan4k->untag > RTL8367_UNTAG_MASK ||
vlan4k->fid > RTL8367_FIDMAX)
return -EINVAL;
data[0] = (vlan4k->member & RTL8367_TA_VLAN_MEMBER_MASK) <<
RTL8367_TA_VLAN_MEMBER_SHIFT;
data[1] = (vlan4k->fid & RTL8367_TA_VLAN_FID_MASK) <<
RTL8367_TA_VLAN_FID_SHIFT;
data[2] = (vlan4k->untag & RTL8367_TA_VLAN_UNTAG1_MASK) <<
RTL8367_TA_VLAN_UNTAG1_SHIFT;
data[3] = ((vlan4k->untag >> 2) & RTL8367_TA_VLAN_UNTAG2_MASK) <<
RTL8367_TA_VLAN_UNTAG2_SHIFT;
for (i = 0; i < ARRAY_SIZE(data); i++)
REG_WR(smi, RTL8367_TA_DATA_REG(i), data[i]);
/* write VID */
REG_WR(smi, RTL8367_TA_ADDR_REG,
vlan4k->vid & RTL8367_TA_VLAN_VID_MASK);
/* write table access control word */
REG_WR(smi, RTL8367_TA_CTRL_REG, RTL8367_TA_CTRL_CVLAN_WRITE);
return 0;
}
static int rtl8367_get_vlan_mc(struct rtl8366_smi *smi, u32 index,
struct rtl8366_vlan_mc *vlanmc)
{
u32 data[RTL8367_VLAN_MC_DATA_SIZE];
int err;
int i;
memset(vlanmc, '\0', sizeof(struct rtl8366_vlan_mc));
if (index >= RTL8367_NUM_VLANS)
return -EINVAL;
for (i = 0; i < ARRAY_SIZE(data); i++)
REG_RD(smi, RTL8367_VLAN_MC_BASE(index) + i, &data[i]);
vlanmc->member = (data[0] >> RTL8367_VLAN_MC_MEMBER_SHIFT) &
RTL8367_VLAN_MC_MEMBER_MASK;
vlanmc->fid = (data[1] >> RTL8367_VLAN_MC_FID_SHIFT) &
RTL8367_VLAN_MC_FID_MASK;
vlanmc->vid = (data[3] >> RTL8367_VLAN_MC_EVID_SHIFT) &
RTL8367_VLAN_MC_EVID_MASK;
return 0;
}
static int rtl8367_set_vlan_mc(struct rtl8366_smi *smi, u32 index,
const struct rtl8366_vlan_mc *vlanmc)
{
u32 data[RTL8367_VLAN_MC_DATA_SIZE];
int err;
int i;
if (index >= RTL8367_NUM_VLANS ||
vlanmc->vid >= RTL8367_NUM_VIDS ||
vlanmc->priority > RTL8367_PRIORITYMAX ||
vlanmc->member > RTL8367_VLAN_MC_MEMBER_MASK ||
vlanmc->untag > RTL8367_UNTAG_MASK ||
vlanmc->fid > RTL8367_FIDMAX)
return -EINVAL;
data[0] = (vlanmc->member & RTL8367_VLAN_MC_MEMBER_MASK) <<
RTL8367_VLAN_MC_MEMBER_SHIFT;
data[1] = (vlanmc->fid & RTL8367_VLAN_MC_FID_MASK) <<
RTL8367_VLAN_MC_FID_SHIFT;
data[2] = 0;
data[3] = (vlanmc->vid & RTL8367_VLAN_MC_EVID_MASK) <<
RTL8367_VLAN_MC_EVID_SHIFT;
for (i = 0; i < ARRAY_SIZE(data); i++)
REG_WR(smi, RTL8367_VLAN_MC_BASE(index) + i, data[i]);
return 0;
}
static int rtl8367_get_mc_index(struct rtl8366_smi *smi, int port, int *val)
{
u32 data;
int err;
if (port >= RTL8367_NUM_PORTS)
return -EINVAL;
REG_RD(smi, RTL8367_VLAN_PVID_CTRL_REG(port), &data);
*val = (data >> RTL8367_VLAN_PVID_CTRL_SHIFT(port)) &
RTL8367_VLAN_PVID_CTRL_MASK;
return 0;
}
static int rtl8367_set_mc_index(struct rtl8366_smi *smi, int port, int index)
{
if (port >= RTL8367_NUM_PORTS || index >= RTL8367_NUM_VLANS)
return -EINVAL;
return rtl8366_smi_rmwr(smi, RTL8367_VLAN_PVID_CTRL_REG(port),
RTL8367_VLAN_PVID_CTRL_MASK <<
RTL8367_VLAN_PVID_CTRL_SHIFT(port),
(index & RTL8367_VLAN_PVID_CTRL_MASK) <<
RTL8367_VLAN_PVID_CTRL_SHIFT(port));
}
static int rtl8367_enable_vlan(struct rtl8366_smi *smi, int enable)
{
return rtl8366_smi_rmwr(smi, RTL8367_VLAN_CTRL_REG,
RTL8367_VLAN_CTRL_ENABLE,
(enable) ? RTL8367_VLAN_CTRL_ENABLE : 0);
}
static int rtl8367_enable_vlan4k(struct rtl8366_smi *smi, int enable)
{
return 0;
}
static int rtl8367_is_vlan_valid(struct rtl8366_smi *smi, unsigned vlan)
{
unsigned max = RTL8367_NUM_VLANS;
if (smi->vlan4k_enabled)
max = RTL8367_NUM_VIDS - 1;
if (vlan == 0 || vlan >= max)
return 0;
return 1;
}
static int rtl8367_enable_port(struct rtl8366_smi *smi, int port, int enable)
{
int err;
REG_WR(smi, RTL8367_PORT_ISOLATION_REG(port),
(enable) ? RTL8367_PORTS_ALL : 0);
return 0;
}
static int rtl8367_sw_reset_mibs(struct switch_dev *dev,
const struct switch_attr *attr,
struct switch_val *val)
{
struct rtl8366_smi *smi = sw_to_rtl8366_smi(dev);
return rtl8366_smi_rmwr(smi, RTL8367_MIB_CTRL_REG(0), 0,
RTL8367_MIB_CTRL_GLOBAL_RESET_MASK);
}
static int rtl8367_sw_get_port_link(struct switch_dev *dev,
int port,
struct switch_port_link *link)
{
struct rtl8366_smi *smi = sw_to_rtl8366_smi(dev);
u32 data = 0;
u32 speed;
if (port >= RTL8367_NUM_PORTS)
return -EINVAL;
rtl8366_smi_read_reg(smi, RTL8367_PORT_STATUS_REG(port), &data);
link->link = !!(data & RTL8367_PORT_STATUS_LINK);
if (!link->link)
return 0;
link->duplex = !!(data & RTL8367_PORT_STATUS_DUPLEX);
link->rx_flow = !!(data & RTL8367_PORT_STATUS_RXPAUSE);
link->tx_flow = !!(data & RTL8367_PORT_STATUS_TXPAUSE);
link->aneg = !!(data & RTL8367_PORT_STATUS_NWAY);
speed = (data & RTL8367_PORT_STATUS_SPEED_MASK);
switch (speed) {
case 0:
link->speed = SWITCH_PORT_SPEED_10;
break;
case 1:
link->speed = SWITCH_PORT_SPEED_100;
break;
case 2:
link->speed = SWITCH_PORT_SPEED_1000;
break;
default:
link->speed = SWITCH_PORT_SPEED_UNKNOWN;
break;
}
return 0;
}
static int rtl8367_sw_get_max_length(struct switch_dev *dev,
const struct switch_attr *attr,
struct switch_val *val)
{
struct rtl8366_smi *smi = sw_to_rtl8366_smi(dev);
u32 data;
rtl8366_smi_read_reg(smi, RTL8367_SWC0_REG, &data);
val->value.i = (data & RTL8367_SWC0_MAX_LENGTH_MASK) >>
RTL8367_SWC0_MAX_LENGTH_SHIFT;
return 0;
}
static int rtl8367_sw_set_max_length(struct switch_dev *dev,
const struct switch_attr *attr,
struct switch_val *val)
{
struct rtl8366_smi *smi = sw_to_rtl8366_smi(dev);
u32 max_len;
switch (val->value.i) {
case 0:
max_len = RTL8367_SWC0_MAX_LENGTH_1522;
break;
case 1:
max_len = RTL8367_SWC0_MAX_LENGTH_1536;
break;
case 2:
max_len = RTL8367_SWC0_MAX_LENGTH_1552;
break;
case 3:
max_len = RTL8367_SWC0_MAX_LENGTH_16000;
break;
default:
return -EINVAL;
}
return rtl8366_smi_rmwr(smi, RTL8367_SWC0_REG,
RTL8367_SWC0_MAX_LENGTH_MASK, max_len);
}
static int rtl8367_sw_reset_port_mibs(struct switch_dev *dev,
const struct switch_attr *attr,
struct switch_val *val)
{
struct rtl8366_smi *smi = sw_to_rtl8366_smi(dev);
int port;
port = val->port_vlan;
if (port >= RTL8367_NUM_PORTS)
return -EINVAL;
return rtl8366_smi_rmwr(smi, RTL8367_MIB_CTRL_REG(port / 8), 0,
RTL8367_MIB_CTRL_PORT_RESET_MASK(port % 8));
}
static struct switch_attr rtl8367_globals[] = {
{
.type = SWITCH_TYPE_INT,
.name = "enable_vlan",
.description = "Enable VLAN mode",
.set = rtl8366_sw_set_vlan_enable,
.get = rtl8366_sw_get_vlan_enable,
.max = 1,
.ofs = 1
}, {
.type = SWITCH_TYPE_INT,
.name = "enable_vlan4k",
.description = "Enable VLAN 4K mode",
.set = rtl8366_sw_set_vlan_enable,
.get = rtl8366_sw_get_vlan_enable,
.max = 1,
.ofs = 2
}, {
.type = SWITCH_TYPE_NOVAL,
.name = "reset_mibs",
.description = "Reset all MIB counters",
.set = rtl8367_sw_reset_mibs,
}, {
.type = SWITCH_TYPE_INT,
.name = "max_length",
.description = "Get/Set the maximum length of valid packets"
"(0:1522, 1:1536, 2:1552, 3:16000)",
.set = rtl8367_sw_set_max_length,
.get = rtl8367_sw_get_max_length,
.max = 3,
}
};
static struct switch_attr rtl8367_port[] = {
{
.type = SWITCH_TYPE_NOVAL,
.name = "reset_mib",
.description = "Reset single port MIB counters",
.set = rtl8367_sw_reset_port_mibs,
}, {
.type = SWITCH_TYPE_STRING,
.name = "mib",
.description = "Get MIB counters for port",
.max = 33,
.set = NULL,
.get = rtl8366_sw_get_port_mib,
},
};
static struct switch_attr rtl8367_vlan[] = {
{
.type = SWITCH_TYPE_STRING,
.name = "info",
.description = "Get vlan information",
.max = 1,
.set = NULL,
.get = rtl8366_sw_get_vlan_info,
}, {
.type = SWITCH_TYPE_INT,
.name = "fid",
.description = "Get/Set vlan FID",
.max = RTL8367_FIDMAX,
.set = rtl8366_sw_set_vlan_fid,
.get = rtl8366_sw_get_vlan_fid,
},
};
static const struct switch_dev_ops rtl8367_sw_ops = {
.attr_global = {
.attr = rtl8367_globals,
.n_attr = ARRAY_SIZE(rtl8367_globals),
},
.attr_port = {
.attr = rtl8367_port,
.n_attr = ARRAY_SIZE(rtl8367_port),
},
.attr_vlan = {
.attr = rtl8367_vlan,
.n_attr = ARRAY_SIZE(rtl8367_vlan),
},
.get_vlan_ports = rtl8366_sw_get_vlan_ports,
.set_vlan_ports = rtl8366_sw_set_vlan_ports,
.get_port_pvid = rtl8366_sw_get_port_pvid,
.set_port_pvid = rtl8366_sw_set_port_pvid,
.reset_switch = rtl8366_sw_reset_switch,
.get_port_link = rtl8367_sw_get_port_link,
};
static int rtl8367_switch_init(struct rtl8366_smi *smi)
{
struct switch_dev *dev = &smi->sw_dev;
int err;
dev->name = "RTL8367";
dev->cpu_port = RTL8367_CPU_PORT_NUM;
dev->ports = RTL8367_NUM_PORTS;
dev->vlans = RTL8367_NUM_VIDS;
dev->ops = &rtl8367_sw_ops;
dev->alias = dev_name(smi->parent);
err = register_switch(dev, NULL);
if (err)
dev_err(smi->parent, "switch registration failed\n");
return err;
}
static void rtl8367_switch_cleanup(struct rtl8366_smi *smi)
{
unregister_switch(&smi->sw_dev);
}
static int rtl8367_mii_read(struct mii_bus *bus, int addr, int reg)
{
struct rtl8366_smi *smi = bus->priv;
u32 val = 0;
int err;
err = rtl8367_read_phy_reg(smi, addr, reg, &val);
if (err)
return 0xffff;
return val;
}
static int rtl8367_mii_write(struct mii_bus *bus, int addr, int reg, u16 val)
{
struct rtl8366_smi *smi = bus->priv;
u32 t;
int err;
err = rtl8367_write_phy_reg(smi, addr, reg, val);
if (err)
return err;
/* flush write */
(void) rtl8367_read_phy_reg(smi, addr, reg, &t);
return err;
}
static int rtl8367_detect(struct rtl8366_smi *smi)
{
u32 rtl_no = 0;
u32 rtl_ver = 0;
char *chip_name;
int ret;
ret = rtl8366_smi_read_reg(smi, RTL8367_RTL_NO_REG, &rtl_no);
if (ret) {
dev_err(smi->parent, "unable to read chip number\n");
return ret;
}
switch (rtl_no) {
case RTL8367_RTL_NO_8367R:
chip_name = "8367R";
break;
case RTL8367_RTL_NO_8367M:
chip_name = "8367M";
break;
default:
dev_err(smi->parent, "unknown chip number (%04x)\n", rtl_no);
return -ENODEV;
}
ret = rtl8366_smi_read_reg(smi, RTL8367_RTL_VER_REG, &rtl_ver);
if (ret) {
dev_err(smi->parent, "unable to read chip version\n");
return ret;
}
dev_info(smi->parent, "RTL%s ver. %u chip found\n",
chip_name, rtl_ver & RTL8367_RTL_VER_MASK);
return 0;
}
static struct rtl8366_smi_ops rtl8367_smi_ops = {
.detect = rtl8367_detect,
.reset_chip = rtl8367_reset_chip,
.setup = rtl8367_setup,
.mii_read = rtl8367_mii_read,
.mii_write = rtl8367_mii_write,
.get_vlan_mc = rtl8367_get_vlan_mc,
.set_vlan_mc = rtl8367_set_vlan_mc,
.get_vlan_4k = rtl8367_get_vlan_4k,
.set_vlan_4k = rtl8367_set_vlan_4k,
.get_mc_index = rtl8367_get_mc_index,
.set_mc_index = rtl8367_set_mc_index,
.get_mib_counter = rtl8367_get_mib_counter,
.is_vlan_valid = rtl8367_is_vlan_valid,
.enable_vlan = rtl8367_enable_vlan,
.enable_vlan4k = rtl8367_enable_vlan4k,
.enable_port = rtl8367_enable_port,
};
static int rtl8367_probe(struct platform_device *pdev)
{
struct rtl8366_smi *smi;
int err;
smi = rtl8366_smi_probe(pdev);
if (!smi)
return -ENODEV;
smi->clk_delay = 1500;
smi->cmd_read = 0xb9;
smi->cmd_write = 0xb8;
smi->ops = &rtl8367_smi_ops;
smi->cpu_port = RTL8367_CPU_PORT_NUM;
smi->num_ports = RTL8367_NUM_PORTS;
smi->num_vlan_mc = RTL8367_NUM_VLANS;
smi->mib_counters = rtl8367_mib_counters;
smi->num_mib_counters = ARRAY_SIZE(rtl8367_mib_counters);
err = rtl8366_smi_init(smi);
if (err)
goto err_free_smi;
platform_set_drvdata(pdev, smi);
err = rtl8367_switch_init(smi);
if (err)
goto err_clear_drvdata;
return 0;
err_clear_drvdata:
platform_set_drvdata(pdev, NULL);
rtl8366_smi_cleanup(smi);
err_free_smi:
kfree(smi);
return err;
}
static int rtl8367_remove(struct platform_device *pdev)
{
struct rtl8366_smi *smi = platform_get_drvdata(pdev);
if (smi) {
rtl8367_switch_cleanup(smi);
platform_set_drvdata(pdev, NULL);
rtl8366_smi_cleanup(smi);
kfree(smi);
}
return 0;
}
static void rtl8367_shutdown(struct platform_device *pdev)
{
struct rtl8366_smi *smi = platform_get_drvdata(pdev);
if (smi)
rtl8367_reset_chip(smi);
}
#ifdef CONFIG_OF
static const struct of_device_id rtl8367_match[] = {
{ .compatible = "realtek,rtl8367" },
{},
};
MODULE_DEVICE_TABLE(of, rtl8367_match);
#endif
static struct platform_driver rtl8367_driver = {
.driver = {
.name = RTL8367_DRIVER_NAME,
.owner = THIS_MODULE,
#ifdef CONFIG_OF
.of_match_table = of_match_ptr(rtl8367_match),
#endif
},
.probe = rtl8367_probe,
.remove = rtl8367_remove,
.shutdown = rtl8367_shutdown,
};
static int __init rtl8367_module_init(void)
{
return platform_driver_register(&rtl8367_driver);
}
module_init(rtl8367_module_init);
static void __exit rtl8367_module_exit(void)
{
platform_driver_unregister(&rtl8367_driver);
}
module_exit(rtl8367_module_exit);
MODULE_DESCRIPTION(RTL8367_DRIVER_DESC);
MODULE_AUTHOR("Gabor Juhos <juhosg@openwrt.org>");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:" RTL8367_DRIVER_NAME);
| gpl-2.0 |
fabianbergmark/linux-sctp | drivers/misc/mic/cosm/cosm_scif_server.c | 448 | 12775 | /*
* Intel MIC Platform Software Stack (MPSS)
*
* Copyright(c) 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, version 2, as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* Intel MIC Coprocessor State Management (COSM) Driver
*
*/
#include <linux/kthread.h>
#include "cosm_main.h"
/*
* The COSM driver uses SCIF to communicate between the management node and the
* MIC cards. SCIF is used to (a) Send a shutdown command to the card (b)
* receive a shutdown status back from the card upon completion of shutdown and
* (c) receive periodic heartbeat messages from the card used to deduce if the
* card has crashed.
*
* A COSM server consisting of a SCIF listening endpoint waits for incoming
* connections from the card. Upon acceptance of the connection, a separate
* work-item is scheduled to handle SCIF message processing for that card. The
* life-time of this work-item is therefore the time from which the connection
* from a card is accepted to the time at which the connection is closed. A new
* work-item starts each time the card boots and is alive till the card (a)
* shuts down (b) is reset (c) crashes (d) cosm_client driver on the card is
* unloaded.
*
* From the point of view of COSM interactions with SCIF during card
* shutdown, reset and crash are as follows:
*
* Card shutdown
* -------------
* 1. COSM client on the card invokes orderly_poweroff() in response to SHUTDOWN
* message from the host.
* 2. Card driver shutdown callback invokes scif_unregister_device(..) resulting
* in scif_remove(..) getting called on the card
* 3. scif_remove -> scif_stop -> scif_handle_remove_node ->
* scif_peer_unregister_device -> device_unregister for the host peer device
* 4. During device_unregister remove(..) method of cosm_client is invoked which
* closes the COSM SCIF endpoint on the card. This results in a SCIF_DISCNCT
* message being sent to host SCIF. SCIF_DISCNCT message processing on the
* host SCIF sets the host COSM SCIF endpoint state to DISCONNECTED and wakes
* up the host COSM thread blocked in scif_poll(..) resulting in
* scif_poll(..) returning POLLHUP.
* 5. On the card, scif_peer_release_dev is next called which results in an
* SCIF_EXIT message being sent to the host and after receiving the
* SCIF_EXIT_ACK from the host the peer device teardown on the card is
* complete.
* 6. As part of the SCIF_EXIT message processing on the host, host sends a
* SCIF_REMOVE_NODE to itself corresponding to the card being removed. This
* starts a similar SCIF peer device teardown sequence on the host
* corresponding to the card being shut down.
*
* Card reset
* ----------
* The case of interest here is when the card has not been previously shut down
* since most of the steps below are skipped in that case:
* 1. cosm_stop(..) invokes hw_ops->stop(..) method of the base PCIe driver
* which unregisters the SCIF HW device resulting in scif_remove(..) being
* called on the host.
* 2. scif_remove(..) calls scif_disconnect_node(..) which results in a
* SCIF_EXIT message being sent to the card.
* 3. The card executes scif_stop() as part of SCIF_EXIT message
* processing. This results in the COSM endpoint on the card being closed and
* the SCIF host peer device on the card getting unregistered similar to
* steps 3, 4 and 5 for the card shutdown case above. scif_poll(..) on the
* host returns POLLHUP as a result.
* 4. On the host, card peer device unregister and SCIF HW remove(..) also
* subsequently complete.
*
* Card crash
* ----------
* If a reset is issued after the card has crashed, there is no SCIF_DISCNT
* message from the card which would result in scif_poll(..) returning
* POLLHUP. In this case when the host SCIF driver sends a SCIF_REMOVE_NODE
* message to itself resulting in the card SCIF peer device being unregistered,
* this results in a scif_peer_release_dev -> scif_cleanup_scifdev->
* scif_invalidate_ep call sequence which sets the endpoint state to
* DISCONNECTED and results in scif_poll(..) returning POLLHUP.
*/
#define COSM_SCIF_BACKLOG 16
#define COSM_HEARTBEAT_CHECK_DELTA_SEC 10
#define COSM_HEARTBEAT_TIMEOUT_SEC \
(COSM_HEARTBEAT_SEND_SEC + COSM_HEARTBEAT_CHECK_DELTA_SEC)
#define COSM_HEARTBEAT_TIMEOUT_MSEC (COSM_HEARTBEAT_TIMEOUT_SEC * MSEC_PER_SEC)
static struct task_struct *server_thread;
static scif_epd_t listen_epd;
/* Publish MIC card's shutdown status to user space MIC daemon */
static void cosm_update_mic_status(struct cosm_device *cdev)
{
if (cdev->shutdown_status_int != MIC_NOP) {
cosm_set_shutdown_status(cdev, cdev->shutdown_status_int);
cdev->shutdown_status_int = MIC_NOP;
}
}
/* Store MIC card's shutdown status internally when it is received */
static void cosm_shutdown_status_int(struct cosm_device *cdev,
enum mic_status shutdown_status)
{
switch (shutdown_status) {
case MIC_HALTED:
case MIC_POWER_OFF:
case MIC_RESTART:
case MIC_CRASHED:
break;
default:
dev_err(&cdev->dev, "%s %d Unexpected shutdown_status %d\n",
__func__, __LINE__, shutdown_status);
return;
};
cdev->shutdown_status_int = shutdown_status;
cdev->heartbeat_watchdog_enable = false;
if (cdev->state != MIC_SHUTTING_DOWN)
cosm_set_state(cdev, MIC_SHUTTING_DOWN);
}
/* Non-blocking recv. Read and process all available messages */
static void cosm_scif_recv(struct cosm_device *cdev)
{
struct cosm_msg msg;
int rc;
while (1) {
rc = scif_recv(cdev->epd, &msg, sizeof(msg), 0);
if (!rc) {
break;
} else if (rc < 0) {
dev_dbg(&cdev->dev, "%s: %d rc %d\n",
__func__, __LINE__, rc);
break;
}
dev_dbg(&cdev->dev, "%s: %d rc %d id 0x%llx\n",
__func__, __LINE__, rc, msg.id);
switch (msg.id) {
case COSM_MSG_SHUTDOWN_STATUS:
cosm_shutdown_status_int(cdev, msg.shutdown_status);
break;
case COSM_MSG_HEARTBEAT:
/* Nothing to do, heartbeat only unblocks scif_poll */
break;
default:
dev_err(&cdev->dev, "%s: %d unknown msg.id %lld\n",
__func__, __LINE__, msg.id);
break;
}
}
}
/* Publish crashed status for this MIC card */
static void cosm_set_crashed(struct cosm_device *cdev)
{
dev_err(&cdev->dev, "node alive timeout\n");
cosm_shutdown_status_int(cdev, MIC_CRASHED);
cosm_update_mic_status(cdev);
}
/* Send host time to the MIC card to sync system time between host and MIC */
static void cosm_send_time(struct cosm_device *cdev)
{
struct cosm_msg msg = { .id = COSM_MSG_SYNC_TIME };
int rc;
getnstimeofday64(&msg.timespec);
rc = scif_send(cdev->epd, &msg, sizeof(msg), SCIF_SEND_BLOCK);
if (rc < 0)
dev_err(&cdev->dev, "%s %d scif_send failed rc %d\n",
__func__, __LINE__, rc);
}
/*
* Close this cosm_device's endpoint after its peer endpoint on the card has
* been closed. In all cases except MIC card crash POLLHUP on the host is
* triggered by the client's endpoint being closed.
*/
static void cosm_scif_close(struct cosm_device *cdev)
{
/*
* Because SHUTDOWN_STATUS message is sent by the MIC cards in the
* reboot notifier when shutdown is still not complete, we notify mpssd
* to reset the card when SCIF endpoint is closed.
*/
cosm_update_mic_status(cdev);
scif_close(cdev->epd);
cdev->epd = NULL;
dev_dbg(&cdev->dev, "%s %d\n", __func__, __LINE__);
}
/*
* Set card state to ONLINE when a new SCIF connection from a MIC card is
* received. Normally the state is BOOTING when the connection comes in, but can
* be ONLINE if cosm_client driver on the card was unloaded and then reloaded.
*/
static int cosm_set_online(struct cosm_device *cdev)
{
int rc = 0;
if (MIC_BOOTING == cdev->state || MIC_ONLINE == cdev->state) {
cdev->heartbeat_watchdog_enable = cdev->sysfs_heartbeat_enable;
cdev->epd = cdev->newepd;
if (cdev->state == MIC_BOOTING)
cosm_set_state(cdev, MIC_ONLINE);
cosm_send_time(cdev);
dev_dbg(&cdev->dev, "%s %d\n", __func__, __LINE__);
} else {
dev_warn(&cdev->dev, "%s %d not going online in state: %s\n",
__func__, __LINE__, cosm_state_string[cdev->state]);
rc = -EINVAL;
}
/* Drop reference acquired by bus_find_device in the server thread */
put_device(&cdev->dev);
return rc;
}
/*
* Work function for handling work for a SCIF connection from a particular MIC
* card. It first sets the card state to ONLINE and then calls scif_poll to
* block on activity such as incoming messages on the SCIF endpoint. When the
* endpoint is closed, the work function exits, completing its life cycle, from
* MIC card boot to card shutdown/reset/crash.
*/
void cosm_scif_work(struct work_struct *work)
{
struct cosm_device *cdev = container_of(work, struct cosm_device,
scif_work);
struct scif_pollepd pollepd;
int rc;
mutex_lock(&cdev->cosm_mutex);
if (cosm_set_online(cdev))
goto exit;
while (1) {
pollepd.epd = cdev->epd;
pollepd.events = POLLIN;
/* Drop the mutex before blocking in scif_poll(..) */
mutex_unlock(&cdev->cosm_mutex);
/* poll(..) with timeout on our endpoint */
rc = scif_poll(&pollepd, 1, COSM_HEARTBEAT_TIMEOUT_MSEC);
mutex_lock(&cdev->cosm_mutex);
if (rc < 0) {
dev_err(&cdev->dev, "%s %d scif_poll rc %d\n",
__func__, __LINE__, rc);
continue;
}
/* There is a message from the card */
if (pollepd.revents & POLLIN)
cosm_scif_recv(cdev);
/* The peer endpoint is closed or this endpoint disconnected */
if (pollepd.revents & POLLHUP) {
cosm_scif_close(cdev);
break;
}
/* Did we timeout from poll? */
if (!rc && cdev->heartbeat_watchdog_enable)
cosm_set_crashed(cdev);
}
exit:
dev_dbg(&cdev->dev, "%s %d exiting\n", __func__, __LINE__);
mutex_unlock(&cdev->cosm_mutex);
}
/*
* COSM SCIF server thread function. Accepts incoming SCIF connections from MIC
* cards, finds the correct cosm_device to associate that connection with and
* schedules individual work items for each MIC card.
*/
static int cosm_scif_server(void *unused)
{
struct cosm_device *cdev;
scif_epd_t newepd;
struct scif_port_id port_id;
int rc;
allow_signal(SIGKILL);
while (!kthread_should_stop()) {
rc = scif_accept(listen_epd, &port_id, &newepd,
SCIF_ACCEPT_SYNC);
if (rc < 0) {
if (-ERESTARTSYS != rc)
pr_err("%s %d rc %d\n", __func__, __LINE__, rc);
continue;
}
/*
* Associate the incoming connection with a particular
* cosm_device, COSM device ID == SCIF node ID - 1
*/
cdev = cosm_find_cdev_by_id(port_id.node - 1);
if (!cdev)
continue;
cdev->newepd = newepd;
schedule_work(&cdev->scif_work);
}
pr_debug("%s %d Server thread stopped\n", __func__, __LINE__);
return 0;
}
static int cosm_scif_listen(void)
{
int rc;
listen_epd = scif_open();
if (!listen_epd) {
pr_err("%s %d scif_open failed\n", __func__, __LINE__);
return -ENOMEM;
}
rc = scif_bind(listen_epd, SCIF_COSM_LISTEN_PORT);
if (rc < 0) {
pr_err("%s %d scif_bind failed rc %d\n",
__func__, __LINE__, rc);
goto err;
}
rc = scif_listen(listen_epd, COSM_SCIF_BACKLOG);
if (rc < 0) {
pr_err("%s %d scif_listen rc %d\n", __func__, __LINE__, rc);
goto err;
}
pr_debug("%s %d listen_epd set up\n", __func__, __LINE__);
return 0;
err:
scif_close(listen_epd);
listen_epd = NULL;
return rc;
}
static void cosm_scif_listen_exit(void)
{
pr_debug("%s %d closing listen_epd\n", __func__, __LINE__);
if (listen_epd) {
scif_close(listen_epd);
listen_epd = NULL;
}
}
/*
* Create a listening SCIF endpoint and a server kthread which accepts incoming
* SCIF connections from MIC cards
*/
int cosm_scif_init(void)
{
int rc = cosm_scif_listen();
if (rc) {
pr_err("%s %d cosm_scif_listen rc %d\n",
__func__, __LINE__, rc);
goto err;
}
server_thread = kthread_run(cosm_scif_server, NULL, "cosm_server");
if (IS_ERR(server_thread)) {
rc = PTR_ERR(server_thread);
pr_err("%s %d kthread_run rc %d\n", __func__, __LINE__, rc);
goto listen_exit;
}
return 0;
listen_exit:
cosm_scif_listen_exit();
err:
return rc;
}
/* Stop the running server thread and close the listening SCIF endpoint */
void cosm_scif_exit(void)
{
int rc;
if (!IS_ERR_OR_NULL(server_thread)) {
rc = send_sig(SIGKILL, server_thread, 0);
if (rc) {
pr_err("%s %d send_sig rc %d\n",
__func__, __LINE__, rc);
return;
}
kthread_stop(server_thread);
}
cosm_scif_listen_exit();
}
| gpl-2.0 |
mk01/linux-fslc | tools/testing/selftests/powerpc/primitives/load_unaligned_zeropad.c | 448 | 3412 | /*
* Userspace test harness for load_unaligned_zeropad. Creates two
* pages and uses mprotect to prevent access to the second page and
* a SEGV handler that walks the exception tables and runs the fixup
* routine.
*
* The results are compared against a normal load that is that is
* performed while access to the second page is enabled via mprotect.
*
* Copyright (C) 2014 Anton Blanchard <anton@au.ibm.com>, IBM
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#include <stdbool.h>
#include <signal.h>
#include <unistd.h>
#include <sys/mman.h>
#define FIXUP_SECTION ".ex_fixup"
static inline unsigned long __fls(unsigned long x);
#include "word-at-a-time.h"
#include "utils.h"
static inline unsigned long __fls(unsigned long x)
{
int lz;
asm (PPC_CNTLZL "%0,%1" : "=r" (lz) : "r" (x));
return sizeof(unsigned long) - 1 - lz;
}
static int page_size;
static char *mem_region;
static int protect_region(void)
{
if (mprotect(mem_region + page_size, page_size, PROT_NONE)) {
perror("mprotect");
return 1;
}
return 0;
}
static int unprotect_region(void)
{
if (mprotect(mem_region + page_size, page_size, PROT_READ|PROT_WRITE)) {
perror("mprotect");
return 1;
}
return 0;
}
extern char __start___ex_table[];
extern char __stop___ex_table[];
#if defined(__powerpc64__)
#define UCONTEXT_NIA(UC) (UC)->uc_mcontext.gp_regs[PT_NIP]
#elif defined(__powerpc__)
#define UCONTEXT_NIA(UC) (UC)->uc_mcontext.uc_regs->gregs[PT_NIP]
#else
#error implement UCONTEXT_NIA
#endif
static int segv_error;
static void segv_handler(int signr, siginfo_t *info, void *ptr)
{
ucontext_t *uc = (ucontext_t *)ptr;
unsigned long addr = (unsigned long)info->si_addr;
unsigned long *ip = &UCONTEXT_NIA(uc);
unsigned long *ex_p = (unsigned long *)__start___ex_table;
while (ex_p < (unsigned long *)__stop___ex_table) {
unsigned long insn, fixup;
insn = *ex_p++;
fixup = *ex_p++;
if (insn == *ip) {
*ip = fixup;
return;
}
}
printf("No exception table match for NIA %lx ADDR %lx\n", *ip, addr);
segv_error++;
}
static void setup_segv_handler(void)
{
struct sigaction action;
memset(&action, 0, sizeof(action));
action.sa_sigaction = segv_handler;
action.sa_flags = SA_SIGINFO;
sigaction(SIGSEGV, &action, NULL);
}
static int do_one_test(char *p, int page_offset)
{
unsigned long should;
unsigned long got;
FAIL_IF(unprotect_region());
should = *(unsigned long *)p;
FAIL_IF(protect_region());
got = load_unaligned_zeropad(p);
if (should != got)
printf("offset %u load_unaligned_zeropad returned 0x%lx, should be 0x%lx\n", page_offset, got, should);
return 0;
}
static int test_body(void)
{
unsigned long i;
page_size = getpagesize();
mem_region = mmap(NULL, page_size * 2, PROT_READ|PROT_WRITE,
MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
FAIL_IF(mem_region == MAP_FAILED);
for (i = 0; i < page_size; i++)
mem_region[i] = i;
memset(mem_region+page_size, 0, page_size);
setup_segv_handler();
for (i = 0; i < page_size; i++)
FAIL_IF(do_one_test(mem_region+i, i));
FAIL_IF(segv_error);
return 0;
}
int main(void)
{
return test_harness(test_body, "load_unaligned_zeropad");
}
| gpl-2.0 |
GalaticStryder/kernel_leeco_msm8996 | drivers/clocksource/sh_tmu.c | 448 | 15380 | /*
* SuperH Timer Support - TMU
*
* Copyright (C) 2009 Magnus Damm
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/clk.h>
#include <linux/clockchips.h>
#include <linux/clocksource.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/irq.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
#include <linux/sh_timer.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
enum sh_tmu_model {
SH_TMU,
SH_TMU_SH3,
};
struct sh_tmu_device;
struct sh_tmu_channel {
struct sh_tmu_device *tmu;
unsigned int index;
void __iomem *base;
int irq;
unsigned long rate;
unsigned long periodic;
struct clock_event_device ced;
struct clocksource cs;
bool cs_enabled;
unsigned int enable_count;
};
struct sh_tmu_device {
struct platform_device *pdev;
void __iomem *mapbase;
struct clk *clk;
enum sh_tmu_model model;
raw_spinlock_t lock; /* Protect the shared start/stop register */
struct sh_tmu_channel *channels;
unsigned int num_channels;
bool has_clockevent;
bool has_clocksource;
};
#define TSTR -1 /* shared register */
#define TCOR 0 /* channel register */
#define TCNT 1 /* channel register */
#define TCR 2 /* channel register */
#define TCR_UNF (1 << 8)
#define TCR_UNIE (1 << 5)
#define TCR_TPSC_CLK4 (0 << 0)
#define TCR_TPSC_CLK16 (1 << 0)
#define TCR_TPSC_CLK64 (2 << 0)
#define TCR_TPSC_CLK256 (3 << 0)
#define TCR_TPSC_CLK1024 (4 << 0)
#define TCR_TPSC_MASK (7 << 0)
static inline unsigned long sh_tmu_read(struct sh_tmu_channel *ch, int reg_nr)
{
unsigned long offs;
if (reg_nr == TSTR) {
switch (ch->tmu->model) {
case SH_TMU_SH3:
return ioread8(ch->tmu->mapbase + 2);
case SH_TMU:
return ioread8(ch->tmu->mapbase + 4);
}
}
offs = reg_nr << 2;
if (reg_nr == TCR)
return ioread16(ch->base + offs);
else
return ioread32(ch->base + offs);
}
static inline void sh_tmu_write(struct sh_tmu_channel *ch, int reg_nr,
unsigned long value)
{
unsigned long offs;
if (reg_nr == TSTR) {
switch (ch->tmu->model) {
case SH_TMU_SH3:
return iowrite8(value, ch->tmu->mapbase + 2);
case SH_TMU:
return iowrite8(value, ch->tmu->mapbase + 4);
}
}
offs = reg_nr << 2;
if (reg_nr == TCR)
iowrite16(value, ch->base + offs);
else
iowrite32(value, ch->base + offs);
}
static void sh_tmu_start_stop_ch(struct sh_tmu_channel *ch, int start)
{
unsigned long flags, value;
/* start stop register shared by multiple timer channels */
raw_spin_lock_irqsave(&ch->tmu->lock, flags);
value = sh_tmu_read(ch, TSTR);
if (start)
value |= 1 << ch->index;
else
value &= ~(1 << ch->index);
sh_tmu_write(ch, TSTR, value);
raw_spin_unlock_irqrestore(&ch->tmu->lock, flags);
}
static int __sh_tmu_enable(struct sh_tmu_channel *ch)
{
int ret;
/* enable clock */
ret = clk_enable(ch->tmu->clk);
if (ret) {
dev_err(&ch->tmu->pdev->dev, "ch%u: cannot enable clock\n",
ch->index);
return ret;
}
/* make sure channel is disabled */
sh_tmu_start_stop_ch(ch, 0);
/* maximum timeout */
sh_tmu_write(ch, TCOR, 0xffffffff);
sh_tmu_write(ch, TCNT, 0xffffffff);
/* configure channel to parent clock / 4, irq off */
ch->rate = clk_get_rate(ch->tmu->clk) / 4;
sh_tmu_write(ch, TCR, TCR_TPSC_CLK4);
/* enable channel */
sh_tmu_start_stop_ch(ch, 1);
return 0;
}
static int sh_tmu_enable(struct sh_tmu_channel *ch)
{
if (ch->enable_count++ > 0)
return 0;
pm_runtime_get_sync(&ch->tmu->pdev->dev);
dev_pm_syscore_device(&ch->tmu->pdev->dev, true);
return __sh_tmu_enable(ch);
}
static void __sh_tmu_disable(struct sh_tmu_channel *ch)
{
/* disable channel */
sh_tmu_start_stop_ch(ch, 0);
/* disable interrupts in TMU block */
sh_tmu_write(ch, TCR, TCR_TPSC_CLK4);
/* stop clock */
clk_disable(ch->tmu->clk);
}
static void sh_tmu_disable(struct sh_tmu_channel *ch)
{
if (WARN_ON(ch->enable_count == 0))
return;
if (--ch->enable_count > 0)
return;
__sh_tmu_disable(ch);
dev_pm_syscore_device(&ch->tmu->pdev->dev, false);
pm_runtime_put(&ch->tmu->pdev->dev);
}
static void sh_tmu_set_next(struct sh_tmu_channel *ch, unsigned long delta,
int periodic)
{
/* stop timer */
sh_tmu_start_stop_ch(ch, 0);
/* acknowledge interrupt */
sh_tmu_read(ch, TCR);
/* enable interrupt */
sh_tmu_write(ch, TCR, TCR_UNIE | TCR_TPSC_CLK4);
/* reload delta value in case of periodic timer */
if (periodic)
sh_tmu_write(ch, TCOR, delta);
else
sh_tmu_write(ch, TCOR, 0xffffffff);
sh_tmu_write(ch, TCNT, delta);
/* start timer */
sh_tmu_start_stop_ch(ch, 1);
}
static irqreturn_t sh_tmu_interrupt(int irq, void *dev_id)
{
struct sh_tmu_channel *ch = dev_id;
/* disable or acknowledge interrupt */
if (ch->ced.mode == CLOCK_EVT_MODE_ONESHOT)
sh_tmu_write(ch, TCR, TCR_TPSC_CLK4);
else
sh_tmu_write(ch, TCR, TCR_UNIE | TCR_TPSC_CLK4);
/* notify clockevent layer */
ch->ced.event_handler(&ch->ced);
return IRQ_HANDLED;
}
static struct sh_tmu_channel *cs_to_sh_tmu(struct clocksource *cs)
{
return container_of(cs, struct sh_tmu_channel, cs);
}
static cycle_t sh_tmu_clocksource_read(struct clocksource *cs)
{
struct sh_tmu_channel *ch = cs_to_sh_tmu(cs);
return sh_tmu_read(ch, TCNT) ^ 0xffffffff;
}
static int sh_tmu_clocksource_enable(struct clocksource *cs)
{
struct sh_tmu_channel *ch = cs_to_sh_tmu(cs);
int ret;
if (WARN_ON(ch->cs_enabled))
return 0;
ret = sh_tmu_enable(ch);
if (!ret) {
__clocksource_updatefreq_hz(cs, ch->rate);
ch->cs_enabled = true;
}
return ret;
}
static void sh_tmu_clocksource_disable(struct clocksource *cs)
{
struct sh_tmu_channel *ch = cs_to_sh_tmu(cs);
if (WARN_ON(!ch->cs_enabled))
return;
sh_tmu_disable(ch);
ch->cs_enabled = false;
}
static void sh_tmu_clocksource_suspend(struct clocksource *cs)
{
struct sh_tmu_channel *ch = cs_to_sh_tmu(cs);
if (!ch->cs_enabled)
return;
if (--ch->enable_count == 0) {
__sh_tmu_disable(ch);
pm_genpd_syscore_poweroff(&ch->tmu->pdev->dev);
}
}
static void sh_tmu_clocksource_resume(struct clocksource *cs)
{
struct sh_tmu_channel *ch = cs_to_sh_tmu(cs);
if (!ch->cs_enabled)
return;
if (ch->enable_count++ == 0) {
pm_genpd_syscore_poweron(&ch->tmu->pdev->dev);
__sh_tmu_enable(ch);
}
}
static int sh_tmu_register_clocksource(struct sh_tmu_channel *ch,
const char *name)
{
struct clocksource *cs = &ch->cs;
cs->name = name;
cs->rating = 200;
cs->read = sh_tmu_clocksource_read;
cs->enable = sh_tmu_clocksource_enable;
cs->disable = sh_tmu_clocksource_disable;
cs->suspend = sh_tmu_clocksource_suspend;
cs->resume = sh_tmu_clocksource_resume;
cs->mask = CLOCKSOURCE_MASK(32);
cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
dev_info(&ch->tmu->pdev->dev, "ch%u: used as clock source\n",
ch->index);
/* Register with dummy 1 Hz value, gets updated in ->enable() */
clocksource_register_hz(cs, 1);
return 0;
}
static struct sh_tmu_channel *ced_to_sh_tmu(struct clock_event_device *ced)
{
return container_of(ced, struct sh_tmu_channel, ced);
}
static void sh_tmu_clock_event_start(struct sh_tmu_channel *ch, int periodic)
{
struct clock_event_device *ced = &ch->ced;
sh_tmu_enable(ch);
clockevents_config(ced, ch->rate);
if (periodic) {
ch->periodic = (ch->rate + HZ/2) / HZ;
sh_tmu_set_next(ch, ch->periodic, 1);
}
}
static void sh_tmu_clock_event_mode(enum clock_event_mode mode,
struct clock_event_device *ced)
{
struct sh_tmu_channel *ch = ced_to_sh_tmu(ced);
int disabled = 0;
/* deal with old setting first */
switch (ced->mode) {
case CLOCK_EVT_MODE_PERIODIC:
case CLOCK_EVT_MODE_ONESHOT:
sh_tmu_disable(ch);
disabled = 1;
break;
default:
break;
}
switch (mode) {
case CLOCK_EVT_MODE_PERIODIC:
dev_info(&ch->tmu->pdev->dev,
"ch%u: used for periodic clock events\n", ch->index);
sh_tmu_clock_event_start(ch, 1);
break;
case CLOCK_EVT_MODE_ONESHOT:
dev_info(&ch->tmu->pdev->dev,
"ch%u: used for oneshot clock events\n", ch->index);
sh_tmu_clock_event_start(ch, 0);
break;
case CLOCK_EVT_MODE_UNUSED:
if (!disabled)
sh_tmu_disable(ch);
break;
case CLOCK_EVT_MODE_SHUTDOWN:
default:
break;
}
}
static int sh_tmu_clock_event_next(unsigned long delta,
struct clock_event_device *ced)
{
struct sh_tmu_channel *ch = ced_to_sh_tmu(ced);
BUG_ON(ced->mode != CLOCK_EVT_MODE_ONESHOT);
/* program new delta value */
sh_tmu_set_next(ch, delta, 0);
return 0;
}
static void sh_tmu_clock_event_suspend(struct clock_event_device *ced)
{
pm_genpd_syscore_poweroff(&ced_to_sh_tmu(ced)->tmu->pdev->dev);
}
static void sh_tmu_clock_event_resume(struct clock_event_device *ced)
{
pm_genpd_syscore_poweron(&ced_to_sh_tmu(ced)->tmu->pdev->dev);
}
static void sh_tmu_register_clockevent(struct sh_tmu_channel *ch,
const char *name)
{
struct clock_event_device *ced = &ch->ced;
int ret;
ced->name = name;
ced->features = CLOCK_EVT_FEAT_PERIODIC;
ced->features |= CLOCK_EVT_FEAT_ONESHOT;
ced->rating = 200;
ced->cpumask = cpumask_of(0);
ced->set_next_event = sh_tmu_clock_event_next;
ced->set_mode = sh_tmu_clock_event_mode;
ced->suspend = sh_tmu_clock_event_suspend;
ced->resume = sh_tmu_clock_event_resume;
dev_info(&ch->tmu->pdev->dev, "ch%u: used for clock events\n",
ch->index);
clockevents_config_and_register(ced, 1, 0x300, 0xffffffff);
ret = request_irq(ch->irq, sh_tmu_interrupt,
IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING,
dev_name(&ch->tmu->pdev->dev), ch);
if (ret) {
dev_err(&ch->tmu->pdev->dev, "ch%u: failed to request irq %d\n",
ch->index, ch->irq);
return;
}
}
static int sh_tmu_register(struct sh_tmu_channel *ch, const char *name,
bool clockevent, bool clocksource)
{
if (clockevent) {
ch->tmu->has_clockevent = true;
sh_tmu_register_clockevent(ch, name);
} else if (clocksource) {
ch->tmu->has_clocksource = true;
sh_tmu_register_clocksource(ch, name);
}
return 0;
}
static int sh_tmu_channel_setup(struct sh_tmu_channel *ch, unsigned int index,
bool clockevent, bool clocksource,
struct sh_tmu_device *tmu)
{
/* Skip unused channels. */
if (!clockevent && !clocksource)
return 0;
ch->tmu = tmu;
ch->index = index;
if (tmu->model == SH_TMU_SH3)
ch->base = tmu->mapbase + 4 + ch->index * 12;
else
ch->base = tmu->mapbase + 8 + ch->index * 12;
ch->irq = platform_get_irq(tmu->pdev, index);
if (ch->irq < 0) {
dev_err(&tmu->pdev->dev, "ch%u: failed to get irq\n",
ch->index);
return ch->irq;
}
ch->cs_enabled = false;
ch->enable_count = 0;
return sh_tmu_register(ch, dev_name(&tmu->pdev->dev),
clockevent, clocksource);
}
static int sh_tmu_map_memory(struct sh_tmu_device *tmu)
{
struct resource *res;
res = platform_get_resource(tmu->pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(&tmu->pdev->dev, "failed to get I/O memory\n");
return -ENXIO;
}
tmu->mapbase = ioremap_nocache(res->start, resource_size(res));
if (tmu->mapbase == NULL)
return -ENXIO;
return 0;
}
static int sh_tmu_parse_dt(struct sh_tmu_device *tmu)
{
struct device_node *np = tmu->pdev->dev.of_node;
tmu->model = SH_TMU;
tmu->num_channels = 3;
of_property_read_u32(np, "#renesas,channels", &tmu->num_channels);
if (tmu->num_channels != 2 && tmu->num_channels != 3) {
dev_err(&tmu->pdev->dev, "invalid number of channels %u\n",
tmu->num_channels);
return -EINVAL;
}
return 0;
}
static int sh_tmu_setup(struct sh_tmu_device *tmu, struct platform_device *pdev)
{
unsigned int i;
int ret;
tmu->pdev = pdev;
raw_spin_lock_init(&tmu->lock);
if (IS_ENABLED(CONFIG_OF) && pdev->dev.of_node) {
ret = sh_tmu_parse_dt(tmu);
if (ret < 0)
return ret;
} else if (pdev->dev.platform_data) {
const struct platform_device_id *id = pdev->id_entry;
struct sh_timer_config *cfg = pdev->dev.platform_data;
tmu->model = id->driver_data;
tmu->num_channels = hweight8(cfg->channels_mask);
} else {
dev_err(&tmu->pdev->dev, "missing platform data\n");
return -ENXIO;
}
/* Get hold of clock. */
tmu->clk = clk_get(&tmu->pdev->dev, "fck");
if (IS_ERR(tmu->clk)) {
dev_err(&tmu->pdev->dev, "cannot get clock\n");
return PTR_ERR(tmu->clk);
}
ret = clk_prepare(tmu->clk);
if (ret < 0)
goto err_clk_put;
/* Map the memory resource. */
ret = sh_tmu_map_memory(tmu);
if (ret < 0) {
dev_err(&tmu->pdev->dev, "failed to remap I/O memory\n");
goto err_clk_unprepare;
}
/* Allocate and setup the channels. */
tmu->channels = kzalloc(sizeof(*tmu->channels) * tmu->num_channels,
GFP_KERNEL);
if (tmu->channels == NULL) {
ret = -ENOMEM;
goto err_unmap;
}
/*
* Use the first channel as a clock event device and the second channel
* as a clock source.
*/
for (i = 0; i < tmu->num_channels; ++i) {
ret = sh_tmu_channel_setup(&tmu->channels[i], i,
i == 0, i == 1, tmu);
if (ret < 0)
goto err_unmap;
}
platform_set_drvdata(pdev, tmu);
return 0;
err_unmap:
kfree(tmu->channels);
iounmap(tmu->mapbase);
err_clk_unprepare:
clk_unprepare(tmu->clk);
err_clk_put:
clk_put(tmu->clk);
return ret;
}
static int sh_tmu_probe(struct platform_device *pdev)
{
struct sh_tmu_device *tmu = platform_get_drvdata(pdev);
int ret;
if (!is_early_platform_device(pdev)) {
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
}
if (tmu) {
dev_info(&pdev->dev, "kept as earlytimer\n");
goto out;
}
tmu = kzalloc(sizeof(*tmu), GFP_KERNEL);
if (tmu == NULL)
return -ENOMEM;
ret = sh_tmu_setup(tmu, pdev);
if (ret) {
kfree(tmu);
pm_runtime_idle(&pdev->dev);
return ret;
}
if (is_early_platform_device(pdev))
return 0;
out:
if (tmu->has_clockevent || tmu->has_clocksource)
pm_runtime_irq_safe(&pdev->dev);
else
pm_runtime_idle(&pdev->dev);
return 0;
}
static int sh_tmu_remove(struct platform_device *pdev)
{
return -EBUSY; /* cannot unregister clockevent and clocksource */
}
static const struct platform_device_id sh_tmu_id_table[] = {
{ "sh-tmu", SH_TMU },
{ "sh-tmu-sh3", SH_TMU_SH3 },
{ }
};
MODULE_DEVICE_TABLE(platform, sh_tmu_id_table);
static const struct of_device_id sh_tmu_of_table[] __maybe_unused = {
{ .compatible = "renesas,tmu" },
{ }
};
MODULE_DEVICE_TABLE(of, sh_tmu_of_table);
static struct platform_driver sh_tmu_device_driver = {
.probe = sh_tmu_probe,
.remove = sh_tmu_remove,
.driver = {
.name = "sh_tmu",
.of_match_table = of_match_ptr(sh_tmu_of_table),
},
.id_table = sh_tmu_id_table,
};
static int __init sh_tmu_init(void)
{
return platform_driver_register(&sh_tmu_device_driver);
}
static void __exit sh_tmu_exit(void)
{
platform_driver_unregister(&sh_tmu_device_driver);
}
early_platform_init("earlytimer", &sh_tmu_device_driver);
subsys_initcall(sh_tmu_init);
module_exit(sh_tmu_exit);
MODULE_AUTHOR("Magnus Damm");
MODULE_DESCRIPTION("SuperH TMU Timer Driver");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
paulalesius/kernel-3.10.20-lenovo-tablet | arch/arm/mach-imx/platsmp.c | 1984 | 2671 | /*
* Copyright 2011 Freescale Semiconductor, Inc.
* Copyright 2011 Linaro Ltd.
*
* The code contained herein is licensed under the GNU General Public
* License. You may obtain a copy of the GNU General Public License
* Version 2 or later at the following locations:
*
* http://www.opensource.org/licenses/gpl-license.html
* http://www.gnu.org/copyleft/gpl.html
*/
#include <linux/init.h>
#include <linux/smp.h>
#include <asm/cacheflush.h>
#include <asm/page.h>
#include <asm/smp_scu.h>
#include <asm/mach/map.h>
#include "common.h"
#include "hardware.h"
#define SCU_STANDBY_ENABLE (1 << 5)
u32 g_diag_reg;
static void __iomem *scu_base;
static struct map_desc scu_io_desc __initdata = {
/* .virtual and .pfn are run-time assigned */
.length = SZ_4K,
.type = MT_DEVICE,
};
void __init imx_scu_map_io(void)
{
unsigned long base;
/* Get SCU base */
asm("mrc p15, 4, %0, c15, c0, 0" : "=r" (base));
scu_io_desc.virtual = IMX_IO_P2V(base);
scu_io_desc.pfn = __phys_to_pfn(base);
iotable_init(&scu_io_desc, 1);
scu_base = IMX_IO_ADDRESS(base);
}
void imx_scu_standby_enable(void)
{
u32 val = readl_relaxed(scu_base);
val |= SCU_STANDBY_ENABLE;
writel_relaxed(val, scu_base);
}
static int __cpuinit imx_boot_secondary(unsigned int cpu, struct task_struct *idle)
{
imx_set_cpu_jump(cpu, v7_secondary_startup);
imx_enable_cpu(cpu, true);
return 0;
}
/*
* Initialise the CPU possible map early - this describes the CPUs
* which may be present or become present in the system.
*/
static void __init imx_smp_init_cpus(void)
{
int i, ncores;
ncores = scu_get_core_count(scu_base);
for (i = ncores; i < NR_CPUS; i++)
set_cpu_possible(i, false);
}
void imx_smp_prepare(void)
{
scu_enable(scu_base);
}
static void __init imx_smp_prepare_cpus(unsigned int max_cpus)
{
imx_smp_prepare();
/*
* The diagnostic register holds the errata bits. Mostly bootloader
* does not bring up secondary cores, so that when errata bits are set
* in bootloader, they are set only for boot cpu. But on a SMP
* configuration, it should be equally done on every single core.
* Read the register from boot cpu here, and will replicate it into
* secondary cores when booting them.
*/
asm("mrc p15, 0, %0, c15, c0, 1" : "=r" (g_diag_reg) : : "cc");
__cpuc_flush_dcache_area(&g_diag_reg, sizeof(g_diag_reg));
outer_clean_range(__pa(&g_diag_reg), __pa(&g_diag_reg + 1));
}
struct smp_operations imx_smp_ops __initdata = {
.smp_init_cpus = imx_smp_init_cpus,
.smp_prepare_cpus = imx_smp_prepare_cpus,
.smp_boot_secondary = imx_boot_secondary,
#ifdef CONFIG_HOTPLUG_CPU
.cpu_die = imx_cpu_die,
.cpu_kill = imx_cpu_kill,
#endif
};
| gpl-2.0 |
moresushant48/android_kernel_cyanogen_msm8916 | drivers/net/can/sja1000/ems_pci.c | 2240 | 10101 | /*
* Copyright (C) 2007 Wolfgang Grandegger <wg@grandegger.com>
* Copyright (C) 2008 Markus Plessing <plessing@ems-wuensche.com>
* Copyright (C) 2008 Sebastian Haas <haas@ems-wuensche.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the version 2 of the GNU General Public License
* as published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/netdevice.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/pci.h>
#include <linux/can/dev.h>
#include <linux/io.h>
#include "sja1000.h"
#define DRV_NAME "ems_pci"
MODULE_AUTHOR("Sebastian Haas <haas@ems-wuenche.com>");
MODULE_DESCRIPTION("Socket-CAN driver for EMS CPC-PCI/PCIe/104P CAN cards");
MODULE_SUPPORTED_DEVICE("EMS CPC-PCI/PCIe/104P CAN card");
MODULE_LICENSE("GPL v2");
#define EMS_PCI_V1_MAX_CHAN 2
#define EMS_PCI_V2_MAX_CHAN 4
#define EMS_PCI_MAX_CHAN EMS_PCI_V2_MAX_CHAN
struct ems_pci_card {
int version;
int channels;
struct pci_dev *pci_dev;
struct net_device *net_dev[EMS_PCI_MAX_CHAN];
void __iomem *conf_addr;
void __iomem *base_addr;
};
#define EMS_PCI_CAN_CLOCK (16000000 / 2)
/*
* Register definitions and descriptions are from LinCAN 0.3.3.
*
* PSB4610 PITA-2 bridge control registers
*/
#define PITA2_ICR 0x00 /* Interrupt Control Register */
#define PITA2_ICR_INT0 0x00000002 /* [RC] INT0 Active/Clear */
#define PITA2_ICR_INT0_EN 0x00020000 /* [RW] Enable INT0 */
#define PITA2_MISC 0x1c /* Miscellaneous Register */
#define PITA2_MISC_CONFIG 0x04000000 /* Multiplexed parallel interface */
/*
* Register definitions for the PLX 9030
*/
#define PLX_ICSR 0x4c /* Interrupt Control/Status register */
#define PLX_ICSR_LINTI1_ENA 0x0001 /* LINTi1 Enable */
#define PLX_ICSR_PCIINT_ENA 0x0040 /* PCI Interrupt Enable */
#define PLX_ICSR_LINTI1_CLR 0x0400 /* Local Edge Triggerable Interrupt Clear */
#define PLX_ICSR_ENA_CLR (PLX_ICSR_LINTI1_ENA | PLX_ICSR_PCIINT_ENA | \
PLX_ICSR_LINTI1_CLR)
/*
* The board configuration is probably following:
* RX1 is connected to ground.
* TX1 is not connected.
* CLKO is not connected.
* Setting the OCR register to 0xDA is a good idea.
* This means normal output mode, push-pull and the correct polarity.
*/
#define EMS_PCI_OCR (OCR_TX0_PUSHPULL | OCR_TX1_PUSHPULL)
/*
* In the CDR register, you should set CBP to 1.
* You will probably also want to set the clock divider value to 7
* (meaning direct oscillator output) because the second SJA1000 chip
* is driven by the first one CLKOUT output.
*/
#define EMS_PCI_CDR (CDR_CBP | CDR_CLKOUT_MASK)
#define EMS_PCI_V1_BASE_BAR 1
#define EMS_PCI_V1_CONF_SIZE 4096 /* size of PITA control area */
#define EMS_PCI_V2_BASE_BAR 2
#define EMS_PCI_V2_CONF_SIZE 128 /* size of PLX control area */
#define EMS_PCI_CAN_BASE_OFFSET 0x400 /* offset where the controllers starts */
#define EMS_PCI_CAN_CTRL_SIZE 0x200 /* memory size for each controller */
#define EMS_PCI_BASE_SIZE 4096 /* size of controller area */
static DEFINE_PCI_DEVICE_TABLE(ems_pci_tbl) = {
/* CPC-PCI v1 */
{PCI_VENDOR_ID_SIEMENS, 0x2104, PCI_ANY_ID, PCI_ANY_ID,},
/* CPC-PCI v2 */
{PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030, PCI_VENDOR_ID_PLX, 0x4000},
/* CPC-104P v2 */
{PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030, PCI_VENDOR_ID_PLX, 0x4002},
{0,}
};
MODULE_DEVICE_TABLE(pci, ems_pci_tbl);
/*
* Helper to read internal registers from card logic (not CAN)
*/
static u8 ems_pci_v1_readb(struct ems_pci_card *card, unsigned int port)
{
return readb(card->base_addr + (port * 4));
}
static u8 ems_pci_v1_read_reg(const struct sja1000_priv *priv, int port)
{
return readb(priv->reg_base + (port * 4));
}
static void ems_pci_v1_write_reg(const struct sja1000_priv *priv,
int port, u8 val)
{
writeb(val, priv->reg_base + (port * 4));
}
static void ems_pci_v1_post_irq(const struct sja1000_priv *priv)
{
struct ems_pci_card *card = (struct ems_pci_card *)priv->priv;
/* reset int flag of pita */
writel(PITA2_ICR_INT0_EN | PITA2_ICR_INT0,
card->conf_addr + PITA2_ICR);
}
static u8 ems_pci_v2_read_reg(const struct sja1000_priv *priv, int port)
{
return readb(priv->reg_base + port);
}
static void ems_pci_v2_write_reg(const struct sja1000_priv *priv,
int port, u8 val)
{
writeb(val, priv->reg_base + port);
}
static void ems_pci_v2_post_irq(const struct sja1000_priv *priv)
{
struct ems_pci_card *card = (struct ems_pci_card *)priv->priv;
writel(PLX_ICSR_ENA_CLR, card->conf_addr + PLX_ICSR);
}
/*
* Check if a CAN controller is present at the specified location
* by trying to set 'em into the PeliCAN mode
*/
static inline int ems_pci_check_chan(const struct sja1000_priv *priv)
{
unsigned char res;
/* Make sure SJA1000 is in reset mode */
priv->write_reg(priv, SJA1000_MOD, 1);
priv->write_reg(priv, SJA1000_CDR, CDR_PELICAN);
/* read reset-values */
res = priv->read_reg(priv, SJA1000_CDR);
if (res == CDR_PELICAN)
return 1;
return 0;
}
static void ems_pci_del_card(struct pci_dev *pdev)
{
struct ems_pci_card *card = pci_get_drvdata(pdev);
struct net_device *dev;
int i = 0;
for (i = 0; i < card->channels; i++) {
dev = card->net_dev[i];
if (!dev)
continue;
dev_info(&pdev->dev, "Removing %s.\n", dev->name);
unregister_sja1000dev(dev);
free_sja1000dev(dev);
}
if (card->base_addr != NULL)
pci_iounmap(card->pci_dev, card->base_addr);
if (card->conf_addr != NULL)
pci_iounmap(card->pci_dev, card->conf_addr);
kfree(card);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
}
static void ems_pci_card_reset(struct ems_pci_card *card)
{
/* Request board reset */
writeb(0, card->base_addr);
}
/*
* Probe PCI device for EMS CAN signature and register each available
* CAN channel to SJA1000 Socket-CAN subsystem.
*/
static int ems_pci_add_card(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct sja1000_priv *priv;
struct net_device *dev;
struct ems_pci_card *card;
int max_chan, conf_size, base_bar;
int err, i;
/* Enabling PCI device */
if (pci_enable_device(pdev) < 0) {
dev_err(&pdev->dev, "Enabling PCI device failed\n");
return -ENODEV;
}
/* Allocating card structures to hold addresses, ... */
card = kzalloc(sizeof(struct ems_pci_card), GFP_KERNEL);
if (card == NULL) {
pci_disable_device(pdev);
return -ENOMEM;
}
pci_set_drvdata(pdev, card);
card->pci_dev = pdev;
card->channels = 0;
if (pdev->vendor == PCI_VENDOR_ID_PLX) {
card->version = 2; /* CPC-PCI v2 */
max_chan = EMS_PCI_V2_MAX_CHAN;
base_bar = EMS_PCI_V2_BASE_BAR;
conf_size = EMS_PCI_V2_CONF_SIZE;
} else {
card->version = 1; /* CPC-PCI v1 */
max_chan = EMS_PCI_V1_MAX_CHAN;
base_bar = EMS_PCI_V1_BASE_BAR;
conf_size = EMS_PCI_V1_CONF_SIZE;
}
/* Remap configuration space and controller memory area */
card->conf_addr = pci_iomap(pdev, 0, conf_size);
if (card->conf_addr == NULL) {
err = -ENOMEM;
goto failure_cleanup;
}
card->base_addr = pci_iomap(pdev, base_bar, EMS_PCI_BASE_SIZE);
if (card->base_addr == NULL) {
err = -ENOMEM;
goto failure_cleanup;
}
if (card->version == 1) {
/* Configure PITA-2 parallel interface (enable MUX) */
writel(PITA2_MISC_CONFIG, card->conf_addr + PITA2_MISC);
/* Check for unique EMS CAN signature */
if (ems_pci_v1_readb(card, 0) != 0x55 ||
ems_pci_v1_readb(card, 1) != 0xAA ||
ems_pci_v1_readb(card, 2) != 0x01 ||
ems_pci_v1_readb(card, 3) != 0xCB ||
ems_pci_v1_readb(card, 4) != 0x11) {
dev_err(&pdev->dev,
"Not EMS Dr. Thomas Wuensche interface\n");
err = -ENODEV;
goto failure_cleanup;
}
}
ems_pci_card_reset(card);
/* Detect available channels */
for (i = 0; i < max_chan; i++) {
dev = alloc_sja1000dev(0);
if (dev == NULL) {
err = -ENOMEM;
goto failure_cleanup;
}
card->net_dev[i] = dev;
priv = netdev_priv(dev);
priv->priv = card;
priv->irq_flags = IRQF_SHARED;
dev->irq = pdev->irq;
priv->reg_base = card->base_addr + EMS_PCI_CAN_BASE_OFFSET
+ (i * EMS_PCI_CAN_CTRL_SIZE);
if (card->version == 1) {
priv->read_reg = ems_pci_v1_read_reg;
priv->write_reg = ems_pci_v1_write_reg;
priv->post_irq = ems_pci_v1_post_irq;
} else {
priv->read_reg = ems_pci_v2_read_reg;
priv->write_reg = ems_pci_v2_write_reg;
priv->post_irq = ems_pci_v2_post_irq;
}
/* Check if channel is present */
if (ems_pci_check_chan(priv)) {
priv->can.clock.freq = EMS_PCI_CAN_CLOCK;
priv->ocr = EMS_PCI_OCR;
priv->cdr = EMS_PCI_CDR;
SET_NETDEV_DEV(dev, &pdev->dev);
if (card->version == 1)
/* reset int flag of pita */
writel(PITA2_ICR_INT0_EN | PITA2_ICR_INT0,
card->conf_addr + PITA2_ICR);
else
/* enable IRQ in PLX 9030 */
writel(PLX_ICSR_ENA_CLR,
card->conf_addr + PLX_ICSR);
/* Register SJA1000 device */
err = register_sja1000dev(dev);
if (err) {
dev_err(&pdev->dev, "Registering device failed "
"(err=%d)\n", err);
free_sja1000dev(dev);
goto failure_cleanup;
}
card->channels++;
dev_info(&pdev->dev, "Channel #%d at 0x%p, irq %d\n",
i + 1, priv->reg_base, dev->irq);
} else {
free_sja1000dev(dev);
}
}
return 0;
failure_cleanup:
dev_err(&pdev->dev, "Error: %d. Cleaning Up.\n", err);
ems_pci_del_card(pdev);
return err;
}
static struct pci_driver ems_pci_driver = {
.name = DRV_NAME,
.id_table = ems_pci_tbl,
.probe = ems_pci_add_card,
.remove = ems_pci_del_card,
};
module_pci_driver(ems_pci_driver);
| gpl-2.0 |
XileForce/Vindicator-S6-Unified | drivers/ata/pata_atiixp.c | 2752 | 8723 | /*
* pata_atiixp.c - ATI PATA for new ATA layer
* (C) 2005 Red Hat Inc
* (C) 2009-2010 Bartlomiej Zolnierkiewicz
*
* Based on
*
* linux/drivers/ide/pci/atiixp.c Version 0.01-bart2 Feb. 26, 2004
*
* Copyright (C) 2003 ATI Inc. <hyu@ati.com>
* Copyright (C) 2004 Bartlomiej Zolnierkiewicz
*
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#include <linux/dmi.h>
#define DRV_NAME "pata_atiixp"
#define DRV_VERSION "0.4.6"
enum {
ATIIXP_IDE_PIO_TIMING = 0x40,
ATIIXP_IDE_MWDMA_TIMING = 0x44,
ATIIXP_IDE_PIO_CONTROL = 0x48,
ATIIXP_IDE_PIO_MODE = 0x4a,
ATIIXP_IDE_UDMA_CONTROL = 0x54,
ATIIXP_IDE_UDMA_MODE = 0x56
};
static const struct dmi_system_id attixp_cable_override_dmi_table[] = {
{
/* Board has onboard PATA<->SATA converters */
.ident = "MSI E350DM-E33",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "MSI"),
DMI_MATCH(DMI_BOARD_NAME, "E350DM-E33(MS-7720)"),
},
},
{ }
};
static int atiixp_cable_detect(struct ata_port *ap)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
u8 udma;
if (dmi_check_system(attixp_cable_override_dmi_table))
return ATA_CBL_PATA40_SHORT;
/* Hack from drivers/ide/pci. Really we want to know how to do the
raw detection not play follow the bios mode guess */
pci_read_config_byte(pdev, ATIIXP_IDE_UDMA_MODE + ap->port_no, &udma);
if ((udma & 0x07) >= 0x04 || (udma & 0x70) >= 0x40)
return ATA_CBL_PATA80;
return ATA_CBL_PATA40;
}
static DEFINE_SPINLOCK(atiixp_lock);
/**
* atiixp_prereset - perform reset handling
* @link: ATA link
* @deadline: deadline jiffies for the operation
*
* Reset sequence checking enable bits to see which ports are
* active.
*/
static int atiixp_prereset(struct ata_link *link, unsigned long deadline)
{
static const struct pci_bits atiixp_enable_bits[] = {
{ 0x48, 1, 0x01, 0x00 },
{ 0x48, 1, 0x08, 0x00 }
};
struct ata_port *ap = link->ap;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
if (!pci_test_config_bits(pdev, &atiixp_enable_bits[ap->port_no]))
return -ENOENT;
return ata_sff_prereset(link, deadline);
}
/**
* atiixp_set_pio_timing - set initial PIO mode data
* @ap: ATA interface
* @adev: ATA device
*
* Called by both the pio and dma setup functions to set the controller
* timings for PIO transfers. We must load both the mode number and
* timing values into the controller.
*/
static void atiixp_set_pio_timing(struct ata_port *ap, struct ata_device *adev, int pio)
{
static u8 pio_timings[5] = { 0x5D, 0x47, 0x34, 0x22, 0x20 };
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
int dn = 2 * ap->port_no + adev->devno;
int timing_shift = (16 * ap->port_no) + 8 * (adev->devno ^ 1);
u32 pio_timing_data;
u16 pio_mode_data;
pci_read_config_word(pdev, ATIIXP_IDE_PIO_MODE, &pio_mode_data);
pio_mode_data &= ~(0x7 << (4 * dn));
pio_mode_data |= pio << (4 * dn);
pci_write_config_word(pdev, ATIIXP_IDE_PIO_MODE, pio_mode_data);
pci_read_config_dword(pdev, ATIIXP_IDE_PIO_TIMING, &pio_timing_data);
pio_timing_data &= ~(0xFF << timing_shift);
pio_timing_data |= (pio_timings[pio] << timing_shift);
pci_write_config_dword(pdev, ATIIXP_IDE_PIO_TIMING, pio_timing_data);
}
/**
* atiixp_set_piomode - set initial PIO mode data
* @ap: ATA interface
* @adev: ATA device
*
* Called to do the PIO mode setup. We use a shared helper for this
* as the DMA setup must also adjust the PIO timing information.
*/
static void atiixp_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
unsigned long flags;
spin_lock_irqsave(&atiixp_lock, flags);
atiixp_set_pio_timing(ap, adev, adev->pio_mode - XFER_PIO_0);
spin_unlock_irqrestore(&atiixp_lock, flags);
}
/**
* atiixp_set_dmamode - set initial DMA mode data
* @ap: ATA interface
* @adev: ATA device
*
* Called to do the DMA mode setup. We use timing tables for most
* modes but must tune an appropriate PIO mode to match.
*/
static void atiixp_set_dmamode(struct ata_port *ap, struct ata_device *adev)
{
static u8 mwdma_timings[5] = { 0x77, 0x21, 0x20 };
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
int dma = adev->dma_mode;
int dn = 2 * ap->port_no + adev->devno;
int wanted_pio;
unsigned long flags;
spin_lock_irqsave(&atiixp_lock, flags);
if (adev->dma_mode >= XFER_UDMA_0) {
u16 udma_mode_data;
dma -= XFER_UDMA_0;
pci_read_config_word(pdev, ATIIXP_IDE_UDMA_MODE, &udma_mode_data);
udma_mode_data &= ~(0x7 << (4 * dn));
udma_mode_data |= dma << (4 * dn);
pci_write_config_word(pdev, ATIIXP_IDE_UDMA_MODE, udma_mode_data);
} else {
int timing_shift = (16 * ap->port_no) + 8 * (adev->devno ^ 1);
u32 mwdma_timing_data;
dma -= XFER_MW_DMA_0;
pci_read_config_dword(pdev, ATIIXP_IDE_MWDMA_TIMING,
&mwdma_timing_data);
mwdma_timing_data &= ~(0xFF << timing_shift);
mwdma_timing_data |= (mwdma_timings[dma] << timing_shift);
pci_write_config_dword(pdev, ATIIXP_IDE_MWDMA_TIMING,
mwdma_timing_data);
}
/*
* We must now look at the PIO mode situation. We may need to
* adjust the PIO mode to keep the timings acceptable
*/
if (adev->dma_mode >= XFER_MW_DMA_2)
wanted_pio = 4;
else if (adev->dma_mode == XFER_MW_DMA_1)
wanted_pio = 3;
else if (adev->dma_mode == XFER_MW_DMA_0)
wanted_pio = 0;
else BUG();
if (adev->pio_mode != wanted_pio)
atiixp_set_pio_timing(ap, adev, wanted_pio);
spin_unlock_irqrestore(&atiixp_lock, flags);
}
/**
* atiixp_bmdma_start - DMA start callback
* @qc: Command in progress
*
* When DMA begins we need to ensure that the UDMA control
* register for the channel is correctly set.
*
* Note: The host lock held by the libata layer protects
* us from two channels both trying to set DMA bits at once
*/
static void atiixp_bmdma_start(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct ata_device *adev = qc->dev;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
int dn = (2 * ap->port_no) + adev->devno;
u16 tmp16;
pci_read_config_word(pdev, ATIIXP_IDE_UDMA_CONTROL, &tmp16);
if (ata_using_udma(adev))
tmp16 |= (1 << dn);
else
tmp16 &= ~(1 << dn);
pci_write_config_word(pdev, ATIIXP_IDE_UDMA_CONTROL, tmp16);
ata_bmdma_start(qc);
}
/**
* atiixp_dma_stop - DMA stop callback
* @qc: Command in progress
*
* DMA has completed. Clear the UDMA flag as the next operations will
* be PIO ones not UDMA data transfer.
*
* Note: The host lock held by the libata layer protects
* us from two channels both trying to set DMA bits at once
*/
static void atiixp_bmdma_stop(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
int dn = (2 * ap->port_no) + qc->dev->devno;
u16 tmp16;
pci_read_config_word(pdev, ATIIXP_IDE_UDMA_CONTROL, &tmp16);
tmp16 &= ~(1 << dn);
pci_write_config_word(pdev, ATIIXP_IDE_UDMA_CONTROL, tmp16);
ata_bmdma_stop(qc);
}
static struct scsi_host_template atiixp_sht = {
ATA_BMDMA_SHT(DRV_NAME),
.sg_tablesize = LIBATA_DUMB_MAX_PRD,
};
static struct ata_port_operations atiixp_port_ops = {
.inherits = &ata_bmdma_port_ops,
.qc_prep = ata_bmdma_dumb_qc_prep,
.bmdma_start = atiixp_bmdma_start,
.bmdma_stop = atiixp_bmdma_stop,
.prereset = atiixp_prereset,
.cable_detect = atiixp_cable_detect,
.set_piomode = atiixp_set_piomode,
.set_dmamode = atiixp_set_dmamode,
};
static int atiixp_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
static const struct ata_port_info info = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA12_ONLY,
.udma_mask = ATA_UDMA5,
.port_ops = &atiixp_port_ops
};
const struct ata_port_info *ppi[] = { &info, &info };
return ata_pci_bmdma_init_one(pdev, ppi, &atiixp_sht, NULL,
ATA_HOST_PARALLEL_SCAN);
}
static const struct pci_device_id atiixp[] = {
{ PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP200_IDE), },
{ PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP300_IDE), },
{ PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP400_IDE), },
{ PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP600_IDE), },
{ PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP700_IDE), },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_HUDSON2_IDE), },
{ },
};
static struct pci_driver atiixp_pci_driver = {
.name = DRV_NAME,
.id_table = atiixp,
.probe = atiixp_init_one,
.remove = ata_pci_remove_one,
#ifdef CONFIG_PM
.resume = ata_pci_device_resume,
.suspend = ata_pci_device_suspend,
#endif
};
module_pci_driver(atiixp_pci_driver);
MODULE_AUTHOR("Alan Cox");
MODULE_DESCRIPTION("low-level driver for ATI IXP200/300/400");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, atiixp);
MODULE_VERSION(DRV_VERSION);
| gpl-2.0 |
ML-Design/android-2.6.35 | drivers/gpu/drm/drm_dma.c | 4032 | 4105 | /**
* \file drm_dma.c
* DMA IOCTL and function support
*
* \author Rickard E. (Rik) Faith <faith@valinux.com>
* \author Gareth Hughes <gareth@valinux.com>
*/
/*
* Created: Fri Mar 19 14:30:16 1999 by faith@valinux.com
*
* Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "drmP.h"
/**
* Initialize the DMA data.
*
* \param dev DRM device.
* \return zero on success or a negative value on failure.
*
* Allocate and initialize a drm_device_dma structure.
*/
int drm_dma_setup(struct drm_device *dev)
{
int i;
dev->dma = kzalloc(sizeof(*dev->dma), GFP_KERNEL);
if (!dev->dma)
return -ENOMEM;
for (i = 0; i <= DRM_MAX_ORDER; i++)
memset(&dev->dma->bufs[i], 0, sizeof(dev->dma->bufs[0]));
return 0;
}
/**
* Cleanup the DMA resources.
*
* \param dev DRM device.
*
* Free all pages associated with DMA buffers, the buffers and pages lists, and
* finally the drm_device::dma structure itself.
*/
void drm_dma_takedown(struct drm_device *dev)
{
struct drm_device_dma *dma = dev->dma;
int i, j;
if (!dma)
return;
/* Clear dma buffers */
for (i = 0; i <= DRM_MAX_ORDER; i++) {
if (dma->bufs[i].seg_count) {
DRM_DEBUG("order %d: buf_count = %d,"
" seg_count = %d\n",
i,
dma->bufs[i].buf_count,
dma->bufs[i].seg_count);
for (j = 0; j < dma->bufs[i].seg_count; j++) {
if (dma->bufs[i].seglist[j]) {
drm_pci_free(dev, dma->bufs[i].seglist[j]);
}
}
kfree(dma->bufs[i].seglist);
}
if (dma->bufs[i].buf_count) {
for (j = 0; j < dma->bufs[i].buf_count; j++) {
kfree(dma->bufs[i].buflist[j].dev_private);
}
kfree(dma->bufs[i].buflist);
}
}
kfree(dma->buflist);
kfree(dma->pagelist);
kfree(dev->dma);
dev->dma = NULL;
}
/**
* Free a buffer.
*
* \param dev DRM device.
* \param buf buffer to free.
*
* Resets the fields of \p buf.
*/
void drm_free_buffer(struct drm_device *dev, struct drm_buf * buf)
{
if (!buf)
return;
buf->waiting = 0;
buf->pending = 0;
buf->file_priv = NULL;
buf->used = 0;
if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE)
&& waitqueue_active(&buf->dma_wait)) {
wake_up_interruptible(&buf->dma_wait);
}
}
/**
* Reclaim the buffers.
*
* \param file_priv DRM file private.
*
* Frees each buffer associated with \p file_priv not already on the hardware.
*/
void drm_core_reclaim_buffers(struct drm_device *dev,
struct drm_file *file_priv)
{
struct drm_device_dma *dma = dev->dma;
int i;
if (!dma)
return;
for (i = 0; i < dma->buf_count; i++) {
if (dma->buflist[i]->file_priv == file_priv) {
switch (dma->buflist[i]->list) {
case DRM_LIST_NONE:
drm_free_buffer(dev, dma->buflist[i]);
break;
case DRM_LIST_WAIT:
dma->buflist[i]->list = DRM_LIST_RECLAIM;
break;
default:
/* Buffer already on hardware. */
break;
}
}
}
}
EXPORT_SYMBOL(drm_core_reclaim_buffers);
| gpl-2.0 |
ARMP/ARM-Project | drivers/gpu/drm/drm_dma.c | 4032 | 4105 | /**
* \file drm_dma.c
* DMA IOCTL and function support
*
* \author Rickard E. (Rik) Faith <faith@valinux.com>
* \author Gareth Hughes <gareth@valinux.com>
*/
/*
* Created: Fri Mar 19 14:30:16 1999 by faith@valinux.com
*
* Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "drmP.h"
/**
* Initialize the DMA data.
*
* \param dev DRM device.
* \return zero on success or a negative value on failure.
*
* Allocate and initialize a drm_device_dma structure.
*/
int drm_dma_setup(struct drm_device *dev)
{
int i;
dev->dma = kzalloc(sizeof(*dev->dma), GFP_KERNEL);
if (!dev->dma)
return -ENOMEM;
for (i = 0; i <= DRM_MAX_ORDER; i++)
memset(&dev->dma->bufs[i], 0, sizeof(dev->dma->bufs[0]));
return 0;
}
/**
* Cleanup the DMA resources.
*
* \param dev DRM device.
*
* Free all pages associated with DMA buffers, the buffers and pages lists, and
* finally the drm_device::dma structure itself.
*/
void drm_dma_takedown(struct drm_device *dev)
{
struct drm_device_dma *dma = dev->dma;
int i, j;
if (!dma)
return;
/* Clear dma buffers */
for (i = 0; i <= DRM_MAX_ORDER; i++) {
if (dma->bufs[i].seg_count) {
DRM_DEBUG("order %d: buf_count = %d,"
" seg_count = %d\n",
i,
dma->bufs[i].buf_count,
dma->bufs[i].seg_count);
for (j = 0; j < dma->bufs[i].seg_count; j++) {
if (dma->bufs[i].seglist[j]) {
drm_pci_free(dev, dma->bufs[i].seglist[j]);
}
}
kfree(dma->bufs[i].seglist);
}
if (dma->bufs[i].buf_count) {
for (j = 0; j < dma->bufs[i].buf_count; j++) {
kfree(dma->bufs[i].buflist[j].dev_private);
}
kfree(dma->bufs[i].buflist);
}
}
kfree(dma->buflist);
kfree(dma->pagelist);
kfree(dev->dma);
dev->dma = NULL;
}
/**
* Free a buffer.
*
* \param dev DRM device.
* \param buf buffer to free.
*
* Resets the fields of \p buf.
*/
void drm_free_buffer(struct drm_device *dev, struct drm_buf * buf)
{
if (!buf)
return;
buf->waiting = 0;
buf->pending = 0;
buf->file_priv = NULL;
buf->used = 0;
if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE)
&& waitqueue_active(&buf->dma_wait)) {
wake_up_interruptible(&buf->dma_wait);
}
}
/**
* Reclaim the buffers.
*
* \param file_priv DRM file private.
*
* Frees each buffer associated with \p file_priv not already on the hardware.
*/
void drm_core_reclaim_buffers(struct drm_device *dev,
struct drm_file *file_priv)
{
struct drm_device_dma *dma = dev->dma;
int i;
if (!dma)
return;
for (i = 0; i < dma->buf_count; i++) {
if (dma->buflist[i]->file_priv == file_priv) {
switch (dma->buflist[i]->list) {
case DRM_LIST_NONE:
drm_free_buffer(dev, dma->buflist[i]);
break;
case DRM_LIST_WAIT:
dma->buflist[i]->list = DRM_LIST_RECLAIM;
break;
default:
/* Buffer already on hardware. */
break;
}
}
}
}
EXPORT_SYMBOL(drm_core_reclaim_buffers);
| gpl-2.0 |
makarandk/linux | arch/mips/emma/markeins/irq.c | 4544 | 8151 | /*
* Copyright (C) NEC Electronics Corporation 2004-2006
*
* This file is based on the arch/mips/ddb5xxx/ddb5477/irq.c
*
* Copyright 2001 MontaVista Software Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/delay.h>
#include <asm/irq_cpu.h>
#include <asm/mipsregs.h>
#include <asm/addrspace.h>
#include <asm/bootinfo.h>
#include <asm/emma/emma2rh.h>
static void emma2rh_irq_enable(struct irq_data *d)
{
unsigned int irq = d->irq - EMMA2RH_IRQ_BASE;
u32 reg_value, reg_bitmask, reg_index;
reg_index = EMMA2RH_BHIF_INT_EN_0 +
(EMMA2RH_BHIF_INT_EN_1 - EMMA2RH_BHIF_INT_EN_0) * (irq / 32);
reg_value = emma2rh_in32(reg_index);
reg_bitmask = 0x1 << (irq % 32);
emma2rh_out32(reg_index, reg_value | reg_bitmask);
}
static void emma2rh_irq_disable(struct irq_data *d)
{
unsigned int irq = d->irq - EMMA2RH_IRQ_BASE;
u32 reg_value, reg_bitmask, reg_index;
reg_index = EMMA2RH_BHIF_INT_EN_0 +
(EMMA2RH_BHIF_INT_EN_1 - EMMA2RH_BHIF_INT_EN_0) * (irq / 32);
reg_value = emma2rh_in32(reg_index);
reg_bitmask = 0x1 << (irq % 32);
emma2rh_out32(reg_index, reg_value & ~reg_bitmask);
}
struct irq_chip emma2rh_irq_controller = {
.name = "emma2rh_irq",
.irq_mask = emma2rh_irq_disable,
.irq_unmask = emma2rh_irq_enable,
};
void emma2rh_irq_init(void)
{
u32 i;
for (i = 0; i < NUM_EMMA2RH_IRQ; i++)
irq_set_chip_and_handler_name(EMMA2RH_IRQ_BASE + i,
&emma2rh_irq_controller,
handle_level_irq, "level");
}
static void emma2rh_sw_irq_enable(struct irq_data *d)
{
unsigned int irq = d->irq - EMMA2RH_SW_IRQ_BASE;
u32 reg;
reg = emma2rh_in32(EMMA2RH_BHIF_SW_INT_EN);
reg |= 1 << irq;
emma2rh_out32(EMMA2RH_BHIF_SW_INT_EN, reg);
}
static void emma2rh_sw_irq_disable(struct irq_data *d)
{
unsigned int irq = d->irq - EMMA2RH_SW_IRQ_BASE;
u32 reg;
reg = emma2rh_in32(EMMA2RH_BHIF_SW_INT_EN);
reg &= ~(1 << irq);
emma2rh_out32(EMMA2RH_BHIF_SW_INT_EN, reg);
}
struct irq_chip emma2rh_sw_irq_controller = {
.name = "emma2rh_sw_irq",
.irq_mask = emma2rh_sw_irq_disable,
.irq_unmask = emma2rh_sw_irq_enable,
};
void emma2rh_sw_irq_init(void)
{
u32 i;
for (i = 0; i < NUM_EMMA2RH_IRQ_SW; i++)
irq_set_chip_and_handler_name(EMMA2RH_SW_IRQ_BASE + i,
&emma2rh_sw_irq_controller,
handle_level_irq, "level");
}
static void emma2rh_gpio_irq_enable(struct irq_data *d)
{
unsigned int irq = d->irq - EMMA2RH_GPIO_IRQ_BASE;
u32 reg;
reg = emma2rh_in32(EMMA2RH_GPIO_INT_MASK);
reg |= 1 << irq;
emma2rh_out32(EMMA2RH_GPIO_INT_MASK, reg);
}
static void emma2rh_gpio_irq_disable(struct irq_data *d)
{
unsigned int irq = d->irq - EMMA2RH_GPIO_IRQ_BASE;
u32 reg;
reg = emma2rh_in32(EMMA2RH_GPIO_INT_MASK);
reg &= ~(1 << irq);
emma2rh_out32(EMMA2RH_GPIO_INT_MASK, reg);
}
static void emma2rh_gpio_irq_ack(struct irq_data *d)
{
unsigned int irq = d->irq - EMMA2RH_GPIO_IRQ_BASE;
emma2rh_out32(EMMA2RH_GPIO_INT_ST, ~(1 << irq));
}
static void emma2rh_gpio_irq_mask_ack(struct irq_data *d)
{
unsigned int irq = d->irq - EMMA2RH_GPIO_IRQ_BASE;
u32 reg;
emma2rh_out32(EMMA2RH_GPIO_INT_ST, ~(1 << irq));
reg = emma2rh_in32(EMMA2RH_GPIO_INT_MASK);
reg &= ~(1 << irq);
emma2rh_out32(EMMA2RH_GPIO_INT_MASK, reg);
}
struct irq_chip emma2rh_gpio_irq_controller = {
.name = "emma2rh_gpio_irq",
.irq_ack = emma2rh_gpio_irq_ack,
.irq_mask = emma2rh_gpio_irq_disable,
.irq_mask_ack = emma2rh_gpio_irq_mask_ack,
.irq_unmask = emma2rh_gpio_irq_enable,
};
void emma2rh_gpio_irq_init(void)
{
u32 i;
for (i = 0; i < NUM_EMMA2RH_IRQ_GPIO; i++)
irq_set_chip_and_handler_name(EMMA2RH_GPIO_IRQ_BASE + i,
&emma2rh_gpio_irq_controller,
handle_edge_irq, "edge");
}
static struct irqaction irq_cascade = {
.handler = no_action,
.flags = IRQF_NO_THREAD,
.name = "cascade",
.dev_id = NULL,
.next = NULL,
};
/*
* the first level int-handler will jump here if it is a emma2rh irq
*/
void emma2rh_irq_dispatch(void)
{
u32 intStatus;
u32 bitmask;
u32 i;
intStatus = emma2rh_in32(EMMA2RH_BHIF_INT_ST_0) &
emma2rh_in32(EMMA2RH_BHIF_INT_EN_0);
#ifdef EMMA2RH_SW_CASCADE
if (intStatus & (1UL << EMMA2RH_SW_CASCADE)) {
u32 swIntStatus;
swIntStatus = emma2rh_in32(EMMA2RH_BHIF_SW_INT)
& emma2rh_in32(EMMA2RH_BHIF_SW_INT_EN);
for (i = 0, bitmask = 1; i < 32; i++, bitmask <<= 1) {
if (swIntStatus & bitmask) {
do_IRQ(EMMA2RH_SW_IRQ_BASE + i);
return;
}
}
}
/* Skip S/W interrupt */
intStatus &= ~(1UL << EMMA2RH_SW_CASCADE);
#endif
for (i = 0, bitmask = 1; i < 32; i++, bitmask <<= 1) {
if (intStatus & bitmask) {
do_IRQ(EMMA2RH_IRQ_BASE + i);
return;
}
}
intStatus = emma2rh_in32(EMMA2RH_BHIF_INT_ST_1) &
emma2rh_in32(EMMA2RH_BHIF_INT_EN_1);
#ifdef EMMA2RH_GPIO_CASCADE
if (intStatus & (1UL << (EMMA2RH_GPIO_CASCADE % 32))) {
u32 gpioIntStatus;
gpioIntStatus = emma2rh_in32(EMMA2RH_GPIO_INT_ST)
& emma2rh_in32(EMMA2RH_GPIO_INT_MASK);
for (i = 0, bitmask = 1; i < 32; i++, bitmask <<= 1) {
if (gpioIntStatus & bitmask) {
do_IRQ(EMMA2RH_GPIO_IRQ_BASE + i);
return;
}
}
}
/* Skip GPIO interrupt */
intStatus &= ~(1UL << (EMMA2RH_GPIO_CASCADE % 32));
#endif
for (i = 32, bitmask = 1; i < 64; i++, bitmask <<= 1) {
if (intStatus & bitmask) {
do_IRQ(EMMA2RH_IRQ_BASE + i);
return;
}
}
intStatus = emma2rh_in32(EMMA2RH_BHIF_INT_ST_2) &
emma2rh_in32(EMMA2RH_BHIF_INT_EN_2);
for (i = 64, bitmask = 1; i < 96; i++, bitmask <<= 1) {
if (intStatus & bitmask) {
do_IRQ(EMMA2RH_IRQ_BASE + i);
return;
}
}
}
void __init arch_init_irq(void)
{
u32 reg;
/* by default, interrupts are disabled. */
emma2rh_out32(EMMA2RH_BHIF_INT_EN_0, 0);
emma2rh_out32(EMMA2RH_BHIF_INT_EN_1, 0);
emma2rh_out32(EMMA2RH_BHIF_INT_EN_2, 0);
emma2rh_out32(EMMA2RH_BHIF_INT1_EN_0, 0);
emma2rh_out32(EMMA2RH_BHIF_INT1_EN_1, 0);
emma2rh_out32(EMMA2RH_BHIF_INT1_EN_2, 0);
emma2rh_out32(EMMA2RH_BHIF_SW_INT_EN, 0);
clear_c0_status(0xff00);
set_c0_status(0x0400);
#define GPIO_PCI (0xf<<15)
/* setup GPIO interrupt for PCI interface */
/* direction input */
reg = emma2rh_in32(EMMA2RH_GPIO_DIR);
emma2rh_out32(EMMA2RH_GPIO_DIR, reg & ~GPIO_PCI);
/* disable interrupt */
reg = emma2rh_in32(EMMA2RH_GPIO_INT_MASK);
emma2rh_out32(EMMA2RH_GPIO_INT_MASK, reg & ~GPIO_PCI);
/* level triggerd */
reg = emma2rh_in32(EMMA2RH_GPIO_INT_MODE);
emma2rh_out32(EMMA2RH_GPIO_INT_MODE, reg | GPIO_PCI);
reg = emma2rh_in32(EMMA2RH_GPIO_INT_CND_A);
emma2rh_out32(EMMA2RH_GPIO_INT_CND_A, reg & (~GPIO_PCI));
/* interrupt clear */
emma2rh_out32(EMMA2RH_GPIO_INT_ST, ~GPIO_PCI);
/* init all controllers */
emma2rh_irq_init();
emma2rh_sw_irq_init();
emma2rh_gpio_irq_init();
mips_cpu_irq_init();
/* setup cascade interrupts */
setup_irq(EMMA2RH_IRQ_BASE + EMMA2RH_SW_CASCADE, &irq_cascade);
setup_irq(EMMA2RH_IRQ_BASE + EMMA2RH_GPIO_CASCADE, &irq_cascade);
setup_irq(MIPS_CPU_IRQ_BASE + 2, &irq_cascade);
}
asmlinkage void plat_irq_dispatch(void)
{
unsigned int pending = read_c0_status() & read_c0_cause() & ST0_IM;
if (pending & STATUSF_IP7)
do_IRQ(MIPS_CPU_IRQ_BASE + 7);
else if (pending & STATUSF_IP2)
emma2rh_irq_dispatch();
else if (pending & STATUSF_IP1)
do_IRQ(MIPS_CPU_IRQ_BASE + 1);
else if (pending & STATUSF_IP0)
do_IRQ(MIPS_CPU_IRQ_BASE + 0);
else
spurious_interrupt();
}
| gpl-2.0 |
andytimes/kernel-msm8660 | drivers/tty/serial/crisv10.c | 4800 | 128985 | /*
* Serial port driver for the ETRAX 100LX chip
*
* Copyright (C) 1998-2007 Axis Communications AB
*
* Many, many authors. Based once upon a time on serial.c for 16x50.
*
*/
static char *serial_version = "$Revision: 1.25 $";
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/timer.h>
#include <linux/interrupt.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/major.h>
#include <linux/string.h>
#include <linux/fcntl.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/mutex.h>
#include <linux/bitops.h>
#include <linux/seq_file.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/uaccess.h>
#include <linux/io.h>
#include <asm/irq.h>
#include <asm/dma.h>
#include <arch/svinto.h>
#include <arch/system.h>
/* non-arch dependent serial structures are in linux/serial.h */
#include <linux/serial.h>
/* while we keep our own stuff (struct e100_serial) in a local .h file */
#include "crisv10.h"
#include <asm/fasttimer.h>
#include <arch/io_interface_mux.h>
#ifdef CONFIG_ETRAX_SERIAL_FAST_TIMER
#ifndef CONFIG_ETRAX_FAST_TIMER
#error "Enable FAST_TIMER to use SERIAL_FAST_TIMER"
#endif
#endif
#if defined(CONFIG_ETRAX_SERIAL_RX_TIMEOUT_TICKS) && \
(CONFIG_ETRAX_SERIAL_RX_TIMEOUT_TICKS == 0)
#error "RX_TIMEOUT_TICKS == 0 not allowed, use 1"
#endif
#if defined(CONFIG_ETRAX_RS485_ON_PA) && defined(CONFIG_ETRAX_RS485_ON_PORT_G)
#error "Disable either CONFIG_ETRAX_RS485_ON_PA or CONFIG_ETRAX_RS485_ON_PORT_G"
#endif
/*
* All of the compatibilty code so we can compile serial.c against
* older kernels is hidden in serial_compat.h
*/
#if defined(LOCAL_HEADERS)
#include "serial_compat.h"
#endif
struct tty_driver *serial_driver;
/* number of characters left in xmit buffer before we ask for more */
#define WAKEUP_CHARS 256
//#define SERIAL_DEBUG_INTR
//#define SERIAL_DEBUG_OPEN
//#define SERIAL_DEBUG_FLOW
//#define SERIAL_DEBUG_DATA
//#define SERIAL_DEBUG_THROTTLE
//#define SERIAL_DEBUG_IO /* Debug for Extra control and status pins */
//#define SERIAL_DEBUG_LINE 0 /* What serport we want to debug */
/* Enable this to use serial interrupts to handle when you
expect the first received event on the serial port to
be an error, break or similar. Used to be able to flash IRMA
from eLinux */
#define SERIAL_HANDLE_EARLY_ERRORS
/* Currently 16 descriptors x 128 bytes = 2048 bytes */
#define SERIAL_DESCR_BUF_SIZE 256
#define SERIAL_PRESCALE_BASE 3125000 /* 3.125MHz */
#define DEF_BAUD_BASE SERIAL_PRESCALE_BASE
/* We don't want to load the system with massive fast timer interrupt
* on high baudrates so limit it to 250 us (4kHz) */
#define MIN_FLUSH_TIME_USEC 250
/* Add an x here to log a lot of timer stuff */
#define TIMERD(x)
/* Debug details of interrupt handling */
#define DINTR1(x) /* irq on/off, errors */
#define DINTR2(x) /* tx and rx */
/* Debug flip buffer stuff */
#define DFLIP(x)
/* Debug flow control and overview of data flow */
#define DFLOW(x)
#define DBAUD(x)
#define DLOG_INT_TRIG(x)
//#define DEBUG_LOG_INCLUDED
#ifndef DEBUG_LOG_INCLUDED
#define DEBUG_LOG(line, string, value)
#else
struct debug_log_info
{
unsigned long time;
unsigned long timer_data;
// int line;
const char *string;
int value;
};
#define DEBUG_LOG_SIZE 4096
struct debug_log_info debug_log[DEBUG_LOG_SIZE];
int debug_log_pos = 0;
#define DEBUG_LOG(_line, _string, _value) do { \
if ((_line) == SERIAL_DEBUG_LINE) {\
debug_log_func(_line, _string, _value); \
}\
}while(0)
void debug_log_func(int line, const char *string, int value)
{
if (debug_log_pos < DEBUG_LOG_SIZE) {
debug_log[debug_log_pos].time = jiffies;
debug_log[debug_log_pos].timer_data = *R_TIMER_DATA;
// debug_log[debug_log_pos].line = line;
debug_log[debug_log_pos].string = string;
debug_log[debug_log_pos].value = value;
debug_log_pos++;
}
/*printk(string, value);*/
}
#endif
#ifndef CONFIG_ETRAX_SERIAL_RX_TIMEOUT_TICKS
/* Default number of timer ticks before flushing rx fifo
* When using "little data, low latency applications: use 0
* When using "much data applications (PPP)" use ~5
*/
#define CONFIG_ETRAX_SERIAL_RX_TIMEOUT_TICKS 5
#endif
unsigned long timer_data_to_ns(unsigned long timer_data);
static void change_speed(struct e100_serial *info);
static void rs_throttle(struct tty_struct * tty);
static void rs_wait_until_sent(struct tty_struct *tty, int timeout);
static int rs_write(struct tty_struct *tty,
const unsigned char *buf, int count);
#ifdef CONFIG_ETRAX_RS485
static int e100_write_rs485(struct tty_struct *tty,
const unsigned char *buf, int count);
#endif
static int get_lsr_info(struct e100_serial *info, unsigned int *value);
#define DEF_BAUD 115200 /* 115.2 kbit/s */
#define STD_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST)
#define DEF_RX 0x20 /* or SERIAL_CTRL_W >> 8 */
/* Default value of tx_ctrl register: has txd(bit 7)=1 (idle) as default */
#define DEF_TX 0x80 /* or SERIAL_CTRL_B */
/* offsets from R_SERIALx_CTRL */
#define REG_DATA 0
#define REG_DATA_STATUS32 0 /* this is the 32 bit register R_SERIALx_READ */
#define REG_TR_DATA 0
#define REG_STATUS 1
#define REG_TR_CTRL 1
#define REG_REC_CTRL 2
#define REG_BAUD 3
#define REG_XOFF 4 /* this is a 32 bit register */
/* The bitfields are the same for all serial ports */
#define SER_RXD_MASK IO_MASK(R_SERIAL0_STATUS, rxd)
#define SER_DATA_AVAIL_MASK IO_MASK(R_SERIAL0_STATUS, data_avail)
#define SER_FRAMING_ERR_MASK IO_MASK(R_SERIAL0_STATUS, framing_err)
#define SER_PAR_ERR_MASK IO_MASK(R_SERIAL0_STATUS, par_err)
#define SER_OVERRUN_MASK IO_MASK(R_SERIAL0_STATUS, overrun)
#define SER_ERROR_MASK (SER_OVERRUN_MASK | SER_PAR_ERR_MASK | SER_FRAMING_ERR_MASK)
/* Values for info->errorcode */
#define ERRCODE_SET_BREAK (TTY_BREAK)
#define ERRCODE_INSERT 0x100
#define ERRCODE_INSERT_BREAK (ERRCODE_INSERT | TTY_BREAK)
#define FORCE_EOP(info) *R_SET_EOP = 1U << info->iseteop;
/*
* General note regarding the use of IO_* macros in this file:
*
* We will use the bits defined for DMA channel 6 when using various
* IO_* macros (e.g. IO_STATE, IO_MASK, IO_EXTRACT) and _assume_ they are
* the same for all channels (which of course they are).
*
* We will also use the bits defined for serial port 0 when writing commands
* to the different ports, as these bits too are the same for all ports.
*/
/* Mask for the irqs possibly enabled in R_IRQ_MASK1_RD etc. */
static const unsigned long e100_ser_int_mask = 0
#ifdef CONFIG_ETRAX_SERIAL_PORT0
| IO_MASK(R_IRQ_MASK1_RD, ser0_data) | IO_MASK(R_IRQ_MASK1_RD, ser0_ready)
#endif
#ifdef CONFIG_ETRAX_SERIAL_PORT1
| IO_MASK(R_IRQ_MASK1_RD, ser1_data) | IO_MASK(R_IRQ_MASK1_RD, ser1_ready)
#endif
#ifdef CONFIG_ETRAX_SERIAL_PORT2
| IO_MASK(R_IRQ_MASK1_RD, ser2_data) | IO_MASK(R_IRQ_MASK1_RD, ser2_ready)
#endif
#ifdef CONFIG_ETRAX_SERIAL_PORT3
| IO_MASK(R_IRQ_MASK1_RD, ser3_data) | IO_MASK(R_IRQ_MASK1_RD, ser3_ready)
#endif
;
unsigned long r_alt_ser_baudrate_shadow = 0;
/* this is the data for the four serial ports in the etrax100 */
/* DMA2(ser2), DMA4(ser3), DMA6(ser0) or DMA8(ser1) */
/* R_DMA_CHx_CLR_INTR, R_DMA_CHx_FIRST, R_DMA_CHx_CMD */
static struct e100_serial rs_table[] = {
{ .baud = DEF_BAUD,
.ioport = (unsigned char *)R_SERIAL0_CTRL,
.irq = 1U << 12, /* uses DMA 6 and 7 */
.oclrintradr = R_DMA_CH6_CLR_INTR,
.ofirstadr = R_DMA_CH6_FIRST,
.ocmdadr = R_DMA_CH6_CMD,
.ostatusadr = R_DMA_CH6_STATUS,
.iclrintradr = R_DMA_CH7_CLR_INTR,
.ifirstadr = R_DMA_CH7_FIRST,
.icmdadr = R_DMA_CH7_CMD,
.idescradr = R_DMA_CH7_DESCR,
.flags = STD_FLAGS,
.rx_ctrl = DEF_RX,
.tx_ctrl = DEF_TX,
.iseteop = 2,
.dma_owner = dma_ser0,
.io_if = if_serial_0,
#ifdef CONFIG_ETRAX_SERIAL_PORT0
.enabled = 1,
#ifdef CONFIG_ETRAX_SERIAL_PORT0_DMA6_OUT
.dma_out_enabled = 1,
.dma_out_nbr = SER0_TX_DMA_NBR,
.dma_out_irq_nbr = SER0_DMA_TX_IRQ_NBR,
.dma_out_irq_flags = 0,
.dma_out_irq_description = "serial 0 dma tr",
#else
.dma_out_enabled = 0,
.dma_out_nbr = UINT_MAX,
.dma_out_irq_nbr = 0,
.dma_out_irq_flags = 0,
.dma_out_irq_description = NULL,
#endif
#ifdef CONFIG_ETRAX_SERIAL_PORT0_DMA7_IN
.dma_in_enabled = 1,
.dma_in_nbr = SER0_RX_DMA_NBR,
.dma_in_irq_nbr = SER0_DMA_RX_IRQ_NBR,
.dma_in_irq_flags = 0,
.dma_in_irq_description = "serial 0 dma rec",
#else
.dma_in_enabled = 0,
.dma_in_nbr = UINT_MAX,
.dma_in_irq_nbr = 0,
.dma_in_irq_flags = 0,
.dma_in_irq_description = NULL,
#endif
#else
.enabled = 0,
.io_if_description = NULL,
.dma_out_enabled = 0,
.dma_in_enabled = 0
#endif
}, /* ttyS0 */
#ifndef CONFIG_SVINTO_SIM
{ .baud = DEF_BAUD,
.ioport = (unsigned char *)R_SERIAL1_CTRL,
.irq = 1U << 16, /* uses DMA 8 and 9 */
.oclrintradr = R_DMA_CH8_CLR_INTR,
.ofirstadr = R_DMA_CH8_FIRST,
.ocmdadr = R_DMA_CH8_CMD,
.ostatusadr = R_DMA_CH8_STATUS,
.iclrintradr = R_DMA_CH9_CLR_INTR,
.ifirstadr = R_DMA_CH9_FIRST,
.icmdadr = R_DMA_CH9_CMD,
.idescradr = R_DMA_CH9_DESCR,
.flags = STD_FLAGS,
.rx_ctrl = DEF_RX,
.tx_ctrl = DEF_TX,
.iseteop = 3,
.dma_owner = dma_ser1,
.io_if = if_serial_1,
#ifdef CONFIG_ETRAX_SERIAL_PORT1
.enabled = 1,
.io_if_description = "ser1",
#ifdef CONFIG_ETRAX_SERIAL_PORT1_DMA8_OUT
.dma_out_enabled = 1,
.dma_out_nbr = SER1_TX_DMA_NBR,
.dma_out_irq_nbr = SER1_DMA_TX_IRQ_NBR,
.dma_out_irq_flags = 0,
.dma_out_irq_description = "serial 1 dma tr",
#else
.dma_out_enabled = 0,
.dma_out_nbr = UINT_MAX,
.dma_out_irq_nbr = 0,
.dma_out_irq_flags = 0,
.dma_out_irq_description = NULL,
#endif
#ifdef CONFIG_ETRAX_SERIAL_PORT1_DMA9_IN
.dma_in_enabled = 1,
.dma_in_nbr = SER1_RX_DMA_NBR,
.dma_in_irq_nbr = SER1_DMA_RX_IRQ_NBR,
.dma_in_irq_flags = 0,
.dma_in_irq_description = "serial 1 dma rec",
#else
.dma_in_enabled = 0,
.dma_in_enabled = 0,
.dma_in_nbr = UINT_MAX,
.dma_in_irq_nbr = 0,
.dma_in_irq_flags = 0,
.dma_in_irq_description = NULL,
#endif
#else
.enabled = 0,
.io_if_description = NULL,
.dma_in_irq_nbr = 0,
.dma_out_enabled = 0,
.dma_in_enabled = 0
#endif
}, /* ttyS1 */
{ .baud = DEF_BAUD,
.ioport = (unsigned char *)R_SERIAL2_CTRL,
.irq = 1U << 4, /* uses DMA 2 and 3 */
.oclrintradr = R_DMA_CH2_CLR_INTR,
.ofirstadr = R_DMA_CH2_FIRST,
.ocmdadr = R_DMA_CH2_CMD,
.ostatusadr = R_DMA_CH2_STATUS,
.iclrintradr = R_DMA_CH3_CLR_INTR,
.ifirstadr = R_DMA_CH3_FIRST,
.icmdadr = R_DMA_CH3_CMD,
.idescradr = R_DMA_CH3_DESCR,
.flags = STD_FLAGS,
.rx_ctrl = DEF_RX,
.tx_ctrl = DEF_TX,
.iseteop = 0,
.dma_owner = dma_ser2,
.io_if = if_serial_2,
#ifdef CONFIG_ETRAX_SERIAL_PORT2
.enabled = 1,
.io_if_description = "ser2",
#ifdef CONFIG_ETRAX_SERIAL_PORT2_DMA2_OUT
.dma_out_enabled = 1,
.dma_out_nbr = SER2_TX_DMA_NBR,
.dma_out_irq_nbr = SER2_DMA_TX_IRQ_NBR,
.dma_out_irq_flags = 0,
.dma_out_irq_description = "serial 2 dma tr",
#else
.dma_out_enabled = 0,
.dma_out_nbr = UINT_MAX,
.dma_out_irq_nbr = 0,
.dma_out_irq_flags = 0,
.dma_out_irq_description = NULL,
#endif
#ifdef CONFIG_ETRAX_SERIAL_PORT2_DMA3_IN
.dma_in_enabled = 1,
.dma_in_nbr = SER2_RX_DMA_NBR,
.dma_in_irq_nbr = SER2_DMA_RX_IRQ_NBR,
.dma_in_irq_flags = 0,
.dma_in_irq_description = "serial 2 dma rec",
#else
.dma_in_enabled = 0,
.dma_in_nbr = UINT_MAX,
.dma_in_irq_nbr = 0,
.dma_in_irq_flags = 0,
.dma_in_irq_description = NULL,
#endif
#else
.enabled = 0,
.io_if_description = NULL,
.dma_out_enabled = 0,
.dma_in_enabled = 0
#endif
}, /* ttyS2 */
{ .baud = DEF_BAUD,
.ioport = (unsigned char *)R_SERIAL3_CTRL,
.irq = 1U << 8, /* uses DMA 4 and 5 */
.oclrintradr = R_DMA_CH4_CLR_INTR,
.ofirstadr = R_DMA_CH4_FIRST,
.ocmdadr = R_DMA_CH4_CMD,
.ostatusadr = R_DMA_CH4_STATUS,
.iclrintradr = R_DMA_CH5_CLR_INTR,
.ifirstadr = R_DMA_CH5_FIRST,
.icmdadr = R_DMA_CH5_CMD,
.idescradr = R_DMA_CH5_DESCR,
.flags = STD_FLAGS,
.rx_ctrl = DEF_RX,
.tx_ctrl = DEF_TX,
.iseteop = 1,
.dma_owner = dma_ser3,
.io_if = if_serial_3,
#ifdef CONFIG_ETRAX_SERIAL_PORT3
.enabled = 1,
.io_if_description = "ser3",
#ifdef CONFIG_ETRAX_SERIAL_PORT3_DMA4_OUT
.dma_out_enabled = 1,
.dma_out_nbr = SER3_TX_DMA_NBR,
.dma_out_irq_nbr = SER3_DMA_TX_IRQ_NBR,
.dma_out_irq_flags = 0,
.dma_out_irq_description = "serial 3 dma tr",
#else
.dma_out_enabled = 0,
.dma_out_nbr = UINT_MAX,
.dma_out_irq_nbr = 0,
.dma_out_irq_flags = 0,
.dma_out_irq_description = NULL,
#endif
#ifdef CONFIG_ETRAX_SERIAL_PORT3_DMA5_IN
.dma_in_enabled = 1,
.dma_in_nbr = SER3_RX_DMA_NBR,
.dma_in_irq_nbr = SER3_DMA_RX_IRQ_NBR,
.dma_in_irq_flags = 0,
.dma_in_irq_description = "serial 3 dma rec",
#else
.dma_in_enabled = 0,
.dma_in_nbr = UINT_MAX,
.dma_in_irq_nbr = 0,
.dma_in_irq_flags = 0,
.dma_in_irq_description = NULL
#endif
#else
.enabled = 0,
.io_if_description = NULL,
.dma_out_enabled = 0,
.dma_in_enabled = 0
#endif
} /* ttyS3 */
#endif
};
#define NR_PORTS (sizeof(rs_table)/sizeof(struct e100_serial))
#ifdef CONFIG_ETRAX_SERIAL_FAST_TIMER
static struct fast_timer fast_timers[NR_PORTS];
#endif
#ifdef CONFIG_ETRAX_SERIAL_PROC_ENTRY
#define PROCSTAT(x) x
struct ser_statistics_type {
int overrun_cnt;
int early_errors_cnt;
int ser_ints_ok_cnt;
int errors_cnt;
unsigned long int processing_flip;
unsigned long processing_flip_still_room;
unsigned long int timeout_flush_cnt;
int rx_dma_ints;
int tx_dma_ints;
int rx_tot;
int tx_tot;
};
static struct ser_statistics_type ser_stat[NR_PORTS];
#else
#define PROCSTAT(x)
#endif /* CONFIG_ETRAX_SERIAL_PROC_ENTRY */
/* RS-485 */
#if defined(CONFIG_ETRAX_RS485)
#ifdef CONFIG_ETRAX_FAST_TIMER
static struct fast_timer fast_timers_rs485[NR_PORTS];
#endif
#if defined(CONFIG_ETRAX_RS485_ON_PA)
static int rs485_pa_bit = CONFIG_ETRAX_RS485_ON_PA_BIT;
#endif
#if defined(CONFIG_ETRAX_RS485_ON_PORT_G)
static int rs485_port_g_bit = CONFIG_ETRAX_RS485_ON_PORT_G_BIT;
#endif
#endif
/* Info and macros needed for each ports extra control/status signals. */
#define E100_STRUCT_PORT(line, pinname) \
((CONFIG_ETRAX_SER##line##_##pinname##_ON_PA_BIT >= 0)? \
(R_PORT_PA_DATA): ( \
(CONFIG_ETRAX_SER##line##_##pinname##_ON_PB_BIT >= 0)? \
(R_PORT_PB_DATA):&dummy_ser[line]))
#define E100_STRUCT_SHADOW(line, pinname) \
((CONFIG_ETRAX_SER##line##_##pinname##_ON_PA_BIT >= 0)? \
(&port_pa_data_shadow): ( \
(CONFIG_ETRAX_SER##line##_##pinname##_ON_PB_BIT >= 0)? \
(&port_pb_data_shadow):&dummy_ser[line]))
#define E100_STRUCT_MASK(line, pinname) \
((CONFIG_ETRAX_SER##line##_##pinname##_ON_PA_BIT >= 0)? \
(1<<CONFIG_ETRAX_SER##line##_##pinname##_ON_PA_BIT): ( \
(CONFIG_ETRAX_SER##line##_##pinname##_ON_PB_BIT >= 0)? \
(1<<CONFIG_ETRAX_SER##line##_##pinname##_ON_PB_BIT):DUMMY_##pinname##_MASK))
#define DUMMY_DTR_MASK 1
#define DUMMY_RI_MASK 2
#define DUMMY_DSR_MASK 4
#define DUMMY_CD_MASK 8
static unsigned char dummy_ser[NR_PORTS] = {0xFF, 0xFF, 0xFF,0xFF};
/* If not all status pins are used or disabled, use mixed mode */
#ifdef CONFIG_ETRAX_SERIAL_PORT0
#define SER0_PA_BITSUM (CONFIG_ETRAX_SER0_DTR_ON_PA_BIT+CONFIG_ETRAX_SER0_RI_ON_PA_BIT+CONFIG_ETRAX_SER0_DSR_ON_PA_BIT+CONFIG_ETRAX_SER0_CD_ON_PA_BIT)
#if SER0_PA_BITSUM != -4
# if CONFIG_ETRAX_SER0_DTR_ON_PA_BIT == -1
# ifndef CONFIG_ETRAX_SER0_DTR_RI_DSR_CD_MIXED
# define CONFIG_ETRAX_SER0_DTR_RI_DSR_CD_MIXED 1
# endif
# endif
# if CONFIG_ETRAX_SER0_RI_ON_PA_BIT == -1
# ifndef CONFIG_ETRAX_SER0_DTR_RI_DSR_CD_MIXED
# define CONFIG_ETRAX_SER0_DTR_RI_DSR_CD_MIXED 1
# endif
# endif
# if CONFIG_ETRAX_SER0_DSR_ON_PA_BIT == -1
# ifndef CONFIG_ETRAX_SER0_DTR_RI_DSR_CD_MIXED
# define CONFIG_ETRAX_SER0_DTR_RI_DSR_CD_MIXED 1
# endif
# endif
# if CONFIG_ETRAX_SER0_CD_ON_PA_BIT == -1
# ifndef CONFIG_ETRAX_SER0_DTR_RI_DSR_CD_MIXED
# define CONFIG_ETRAX_SER0_DTR_RI_DSR_CD_MIXED 1
# endif
# endif
#endif
#define SER0_PB_BITSUM (CONFIG_ETRAX_SER0_DTR_ON_PB_BIT+CONFIG_ETRAX_SER0_RI_ON_PB_BIT+CONFIG_ETRAX_SER0_DSR_ON_PB_BIT+CONFIG_ETRAX_SER0_CD_ON_PB_BIT)
#if SER0_PB_BITSUM != -4
# if CONFIG_ETRAX_SER0_DTR_ON_PB_BIT == -1
# ifndef CONFIG_ETRAX_SER0_DTR_RI_DSR_CD_MIXED
# define CONFIG_ETRAX_SER0_DTR_RI_DSR_CD_MIXED 1
# endif
# endif
# if CONFIG_ETRAX_SER0_RI_ON_PB_BIT == -1
# ifndef CONFIG_ETRAX_SER0_DTR_RI_DSR_CD_MIXED
# define CONFIG_ETRAX_SER0_DTR_RI_DSR_CD_MIXED 1
# endif
# endif
# if CONFIG_ETRAX_SER0_DSR_ON_PB_BIT == -1
# ifndef CONFIG_ETRAX_SER0_DTR_RI_DSR_CD_MIXED
# define CONFIG_ETRAX_SER0_DTR_RI_DSR_CD_MIXED 1
# endif
# endif
# if CONFIG_ETRAX_SER0_CD_ON_PB_BIT == -1
# ifndef CONFIG_ETRAX_SER0_DTR_RI_DSR_CD_MIXED
# define CONFIG_ETRAX_SER0_DTR_RI_DSR_CD_MIXED 1
# endif
# endif
#endif
#endif /* PORT0 */
#ifdef CONFIG_ETRAX_SERIAL_PORT1
#define SER1_PA_BITSUM (CONFIG_ETRAX_SER1_DTR_ON_PA_BIT+CONFIG_ETRAX_SER1_RI_ON_PA_BIT+CONFIG_ETRAX_SER1_DSR_ON_PA_BIT+CONFIG_ETRAX_SER1_CD_ON_PA_BIT)
#if SER1_PA_BITSUM != -4
# if CONFIG_ETRAX_SER1_DTR_ON_PA_BIT == -1
# ifndef CONFIG_ETRAX_SER1_DTR_RI_DSR_CD_MIXED
# define CONFIG_ETRAX_SER1_DTR_RI_DSR_CD_MIXED 1
# endif
# endif
# if CONFIG_ETRAX_SER1_RI_ON_PA_BIT == -1
# ifndef CONFIG_ETRAX_SER1_DTR_RI_DSR_CD_MIXED
# define CONFIG_ETRAX_SER1_DTR_RI_DSR_CD_MIXED 1
# endif
# endif
# if CONFIG_ETRAX_SER1_DSR_ON_PA_BIT == -1
# ifndef CONFIG_ETRAX_SER1_DTR_RI_DSR_CD_MIXED
# define CONFIG_ETRAX_SER1_DTR_RI_DSR_CD_MIXED 1
# endif
# endif
# if CONFIG_ETRAX_SER1_CD_ON_PA_BIT == -1
# ifndef CONFIG_ETRAX_SER1_DTR_RI_DSR_CD_MIXED
# define CONFIG_ETRAX_SER1_DTR_RI_DSR_CD_MIXED 1
# endif
# endif
#endif
#define SER1_PB_BITSUM (CONFIG_ETRAX_SER1_DTR_ON_PB_BIT+CONFIG_ETRAX_SER1_RI_ON_PB_BIT+CONFIG_ETRAX_SER1_DSR_ON_PB_BIT+CONFIG_ETRAX_SER1_CD_ON_PB_BIT)
#if SER1_PB_BITSUM != -4
# if CONFIG_ETRAX_SER1_DTR_ON_PB_BIT == -1
# ifndef CONFIG_ETRAX_SER1_DTR_RI_DSR_CD_MIXED
# define CONFIG_ETRAX_SER1_DTR_RI_DSR_CD_MIXED 1
# endif
# endif
# if CONFIG_ETRAX_SER1_RI_ON_PB_BIT == -1
# ifndef CONFIG_ETRAX_SER1_DTR_RI_DSR_CD_MIXED
# define CONFIG_ETRAX_SER1_DTR_RI_DSR_CD_MIXED 1
# endif
# endif
# if CONFIG_ETRAX_SER1_DSR_ON_PB_BIT == -1
# ifndef CONFIG_ETRAX_SER1_DTR_RI_DSR_CD_MIXED
# define CONFIG_ETRAX_SER1_DTR_RI_DSR_CD_MIXED 1
# endif
# endif
# if CONFIG_ETRAX_SER1_CD_ON_PB_BIT == -1
# ifndef CONFIG_ETRAX_SER1_DTR_RI_DSR_CD_MIXED
# define CONFIG_ETRAX_SER1_DTR_RI_DSR_CD_MIXED 1
# endif
# endif
#endif
#endif /* PORT1 */
#ifdef CONFIG_ETRAX_SERIAL_PORT2
#define SER2_PA_BITSUM (CONFIG_ETRAX_SER2_DTR_ON_PA_BIT+CONFIG_ETRAX_SER2_RI_ON_PA_BIT+CONFIG_ETRAX_SER2_DSR_ON_PA_BIT+CONFIG_ETRAX_SER2_CD_ON_PA_BIT)
#if SER2_PA_BITSUM != -4
# if CONFIG_ETRAX_SER2_DTR_ON_PA_BIT == -1
# ifndef CONFIG_ETRAX_SER2_DTR_RI_DSR_CD_MIXED
# define CONFIG_ETRAX_SER2_DTR_RI_DSR_CD_MIXED 1
# endif
# endif
# if CONFIG_ETRAX_SER2_RI_ON_PA_BIT == -1
# ifndef CONFIG_ETRAX_SER2_DTR_RI_DSR_CD_MIXED
# define CONFIG_ETRAX_SER2_DTR_RI_DSR_CD_MIXED 1
# endif
# endif
# if CONFIG_ETRAX_SER2_DSR_ON_PA_BIT == -1
# ifndef CONFIG_ETRAX_SER2_DTR_RI_DSR_CD_MIXED
# define CONFIG_ETRAX_SER2_DTR_RI_DSR_CD_MIXED 1
# endif
# endif
# if CONFIG_ETRAX_SER2_CD_ON_PA_BIT == -1
# ifndef CONFIG_ETRAX_SER2_DTR_RI_DSR_CD_MIXED
# define CONFIG_ETRAX_SER2_DTR_RI_DSR_CD_MIXED 1
# endif
# endif
#endif
#define SER2_PB_BITSUM (CONFIG_ETRAX_SER2_DTR_ON_PB_BIT+CONFIG_ETRAX_SER2_RI_ON_PB_BIT+CONFIG_ETRAX_SER2_DSR_ON_PB_BIT+CONFIG_ETRAX_SER2_CD_ON_PB_BIT)
#if SER2_PB_BITSUM != -4
# if CONFIG_ETRAX_SER2_DTR_ON_PB_BIT == -1
# ifndef CONFIG_ETRAX_SER2_DTR_RI_DSR_CD_MIXED
# define CONFIG_ETRAX_SER2_DTR_RI_DSR_CD_MIXED 1
# endif
# endif
# if CONFIG_ETRAX_SER2_RI_ON_PB_BIT == -1
# ifndef CONFIG_ETRAX_SER2_DTR_RI_DSR_CD_MIXED
# define CONFIG_ETRAX_SER2_DTR_RI_DSR_CD_MIXED 1
# endif
# endif
# if CONFIG_ETRAX_SER2_DSR_ON_PB_BIT == -1
# ifndef CONFIG_ETRAX_SER2_DTR_RI_DSR_CD_MIXED
# define CONFIG_ETRAX_SER2_DTR_RI_DSR_CD_MIXED 1
# endif
# endif
# if CONFIG_ETRAX_SER2_CD_ON_PB_BIT == -1
# ifndef CONFIG_ETRAX_SER2_DTR_RI_DSR_CD_MIXED
# define CONFIG_ETRAX_SER2_DTR_RI_DSR_CD_MIXED 1
# endif
# endif
#endif
#endif /* PORT2 */
#ifdef CONFIG_ETRAX_SERIAL_PORT3
#define SER3_PA_BITSUM (CONFIG_ETRAX_SER3_DTR_ON_PA_BIT+CONFIG_ETRAX_SER3_RI_ON_PA_BIT+CONFIG_ETRAX_SER3_DSR_ON_PA_BIT+CONFIG_ETRAX_SER3_CD_ON_PA_BIT)
#if SER3_PA_BITSUM != -4
# if CONFIG_ETRAX_SER3_DTR_ON_PA_BIT == -1
# ifndef CONFIG_ETRAX_SER3_DTR_RI_DSR_CD_MIXED
# define CONFIG_ETRAX_SER3_DTR_RI_DSR_CD_MIXED 1
# endif
# endif
# if CONFIG_ETRAX_SER3_RI_ON_PA_BIT == -1
# ifndef CONFIG_ETRAX_SER3_DTR_RI_DSR_CD_MIXED
# define CONFIG_ETRAX_SER3_DTR_RI_DSR_CD_MIXED 1
# endif
# endif
# if CONFIG_ETRAX_SER3_DSR_ON_PA_BIT == -1
# ifndef CONFIG_ETRAX_SER3_DTR_RI_DSR_CD_MIXED
# define CONFIG_ETRAX_SER3_DTR_RI_DSR_CD_MIXED 1
# endif
# endif
# if CONFIG_ETRAX_SER3_CD_ON_PA_BIT == -1
# ifndef CONFIG_ETRAX_SER3_DTR_RI_DSR_CD_MIXED
# define CONFIG_ETRAX_SER3_DTR_RI_DSR_CD_MIXED 1
# endif
# endif
#endif
#define SER3_PB_BITSUM (CONFIG_ETRAX_SER3_DTR_ON_PB_BIT+CONFIG_ETRAX_SER3_RI_ON_PB_BIT+CONFIG_ETRAX_SER3_DSR_ON_PB_BIT+CONFIG_ETRAX_SER3_CD_ON_PB_BIT)
#if SER3_PB_BITSUM != -4
# if CONFIG_ETRAX_SER3_DTR_ON_PB_BIT == -1
# ifndef CONFIG_ETRAX_SER3_DTR_RI_DSR_CD_MIXED
# define CONFIG_ETRAX_SER3_DTR_RI_DSR_CD_MIXED 1
# endif
# endif
# if CONFIG_ETRAX_SER3_RI_ON_PB_BIT == -1
# ifndef CONFIG_ETRAX_SER3_DTR_RI_DSR_CD_MIXED
# define CONFIG_ETRAX_SER3_DTR_RI_DSR_CD_MIXED 1
# endif
# endif
# if CONFIG_ETRAX_SER3_DSR_ON_PB_BIT == -1
# ifndef CONFIG_ETRAX_SER3_DTR_RI_DSR_CD_MIXED
# define CONFIG_ETRAX_SER3_DTR_RI_DSR_CD_MIXED 1
# endif
# endif
# if CONFIG_ETRAX_SER3_CD_ON_PB_BIT == -1
# ifndef CONFIG_ETRAX_SER3_DTR_RI_DSR_CD_MIXED
# define CONFIG_ETRAX_SER3_DTR_RI_DSR_CD_MIXED 1
# endif
# endif
#endif
#endif /* PORT3 */
#if defined(CONFIG_ETRAX_SER0_DTR_RI_DSR_CD_MIXED) || \
defined(CONFIG_ETRAX_SER1_DTR_RI_DSR_CD_MIXED) || \
defined(CONFIG_ETRAX_SER2_DTR_RI_DSR_CD_MIXED) || \
defined(CONFIG_ETRAX_SER3_DTR_RI_DSR_CD_MIXED)
#define CONFIG_ETRAX_SERX_DTR_RI_DSR_CD_MIXED
#endif
#ifdef CONFIG_ETRAX_SERX_DTR_RI_DSR_CD_MIXED
/* The pins can be mixed on PA and PB */
#define CONTROL_PINS_PORT_NOT_USED(line) \
&dummy_ser[line], &dummy_ser[line], \
&dummy_ser[line], &dummy_ser[line], \
&dummy_ser[line], &dummy_ser[line], \
&dummy_ser[line], &dummy_ser[line], \
DUMMY_DTR_MASK, DUMMY_RI_MASK, DUMMY_DSR_MASK, DUMMY_CD_MASK
struct control_pins
{
volatile unsigned char *dtr_port;
unsigned char *dtr_shadow;
volatile unsigned char *ri_port;
unsigned char *ri_shadow;
volatile unsigned char *dsr_port;
unsigned char *dsr_shadow;
volatile unsigned char *cd_port;
unsigned char *cd_shadow;
unsigned char dtr_mask;
unsigned char ri_mask;
unsigned char dsr_mask;
unsigned char cd_mask;
};
static const struct control_pins e100_modem_pins[NR_PORTS] =
{
/* Ser 0 */
{
#ifdef CONFIG_ETRAX_SERIAL_PORT0
E100_STRUCT_PORT(0,DTR), E100_STRUCT_SHADOW(0,DTR),
E100_STRUCT_PORT(0,RI), E100_STRUCT_SHADOW(0,RI),
E100_STRUCT_PORT(0,DSR), E100_STRUCT_SHADOW(0,DSR),
E100_STRUCT_PORT(0,CD), E100_STRUCT_SHADOW(0,CD),
E100_STRUCT_MASK(0,DTR),
E100_STRUCT_MASK(0,RI),
E100_STRUCT_MASK(0,DSR),
E100_STRUCT_MASK(0,CD)
#else
CONTROL_PINS_PORT_NOT_USED(0)
#endif
},
/* Ser 1 */
{
#ifdef CONFIG_ETRAX_SERIAL_PORT1
E100_STRUCT_PORT(1,DTR), E100_STRUCT_SHADOW(1,DTR),
E100_STRUCT_PORT(1,RI), E100_STRUCT_SHADOW(1,RI),
E100_STRUCT_PORT(1,DSR), E100_STRUCT_SHADOW(1,DSR),
E100_STRUCT_PORT(1,CD), E100_STRUCT_SHADOW(1,CD),
E100_STRUCT_MASK(1,DTR),
E100_STRUCT_MASK(1,RI),
E100_STRUCT_MASK(1,DSR),
E100_STRUCT_MASK(1,CD)
#else
CONTROL_PINS_PORT_NOT_USED(1)
#endif
},
/* Ser 2 */
{
#ifdef CONFIG_ETRAX_SERIAL_PORT2
E100_STRUCT_PORT(2,DTR), E100_STRUCT_SHADOW(2,DTR),
E100_STRUCT_PORT(2,RI), E100_STRUCT_SHADOW(2,RI),
E100_STRUCT_PORT(2,DSR), E100_STRUCT_SHADOW(2,DSR),
E100_STRUCT_PORT(2,CD), E100_STRUCT_SHADOW(2,CD),
E100_STRUCT_MASK(2,DTR),
E100_STRUCT_MASK(2,RI),
E100_STRUCT_MASK(2,DSR),
E100_STRUCT_MASK(2,CD)
#else
CONTROL_PINS_PORT_NOT_USED(2)
#endif
},
/* Ser 3 */
{
#ifdef CONFIG_ETRAX_SERIAL_PORT3
E100_STRUCT_PORT(3,DTR), E100_STRUCT_SHADOW(3,DTR),
E100_STRUCT_PORT(3,RI), E100_STRUCT_SHADOW(3,RI),
E100_STRUCT_PORT(3,DSR), E100_STRUCT_SHADOW(3,DSR),
E100_STRUCT_PORT(3,CD), E100_STRUCT_SHADOW(3,CD),
E100_STRUCT_MASK(3,DTR),
E100_STRUCT_MASK(3,RI),
E100_STRUCT_MASK(3,DSR),
E100_STRUCT_MASK(3,CD)
#else
CONTROL_PINS_PORT_NOT_USED(3)
#endif
}
};
#else /* CONFIG_ETRAX_SERX_DTR_RI_DSR_CD_MIXED */
/* All pins are on either PA or PB for each serial port */
#define CONTROL_PINS_PORT_NOT_USED(line) \
&dummy_ser[line], &dummy_ser[line], \
DUMMY_DTR_MASK, DUMMY_RI_MASK, DUMMY_DSR_MASK, DUMMY_CD_MASK
struct control_pins
{
volatile unsigned char *port;
unsigned char *shadow;
unsigned char dtr_mask;
unsigned char ri_mask;
unsigned char dsr_mask;
unsigned char cd_mask;
};
#define dtr_port port
#define dtr_shadow shadow
#define ri_port port
#define ri_shadow shadow
#define dsr_port port
#define dsr_shadow shadow
#define cd_port port
#define cd_shadow shadow
static const struct control_pins e100_modem_pins[NR_PORTS] =
{
/* Ser 0 */
{
#ifdef CONFIG_ETRAX_SERIAL_PORT0
E100_STRUCT_PORT(0,DTR), E100_STRUCT_SHADOW(0,DTR),
E100_STRUCT_MASK(0,DTR),
E100_STRUCT_MASK(0,RI),
E100_STRUCT_MASK(0,DSR),
E100_STRUCT_MASK(0,CD)
#else
CONTROL_PINS_PORT_NOT_USED(0)
#endif
},
/* Ser 1 */
{
#ifdef CONFIG_ETRAX_SERIAL_PORT1
E100_STRUCT_PORT(1,DTR), E100_STRUCT_SHADOW(1,DTR),
E100_STRUCT_MASK(1,DTR),
E100_STRUCT_MASK(1,RI),
E100_STRUCT_MASK(1,DSR),
E100_STRUCT_MASK(1,CD)
#else
CONTROL_PINS_PORT_NOT_USED(1)
#endif
},
/* Ser 2 */
{
#ifdef CONFIG_ETRAX_SERIAL_PORT2
E100_STRUCT_PORT(2,DTR), E100_STRUCT_SHADOW(2,DTR),
E100_STRUCT_MASK(2,DTR),
E100_STRUCT_MASK(2,RI),
E100_STRUCT_MASK(2,DSR),
E100_STRUCT_MASK(2,CD)
#else
CONTROL_PINS_PORT_NOT_USED(2)
#endif
},
/* Ser 3 */
{
#ifdef CONFIG_ETRAX_SERIAL_PORT3
E100_STRUCT_PORT(3,DTR), E100_STRUCT_SHADOW(3,DTR),
E100_STRUCT_MASK(3,DTR),
E100_STRUCT_MASK(3,RI),
E100_STRUCT_MASK(3,DSR),
E100_STRUCT_MASK(3,CD)
#else
CONTROL_PINS_PORT_NOT_USED(3)
#endif
}
};
#endif /* !CONFIG_ETRAX_SERX_DTR_RI_DSR_CD_MIXED */
#define E100_RTS_MASK 0x20
#define E100_CTS_MASK 0x40
/* All serial port signals are active low:
* active = 0 -> 3.3V to RS-232 driver -> -12V on RS-232 level
* inactive = 1 -> 0V to RS-232 driver -> +12V on RS-232 level
*
* These macros returns the pin value: 0=0V, >=1 = 3.3V on ETRAX chip
*/
/* Output */
#define E100_RTS_GET(info) ((info)->rx_ctrl & E100_RTS_MASK)
/* Input */
#define E100_CTS_GET(info) ((info)->ioport[REG_STATUS] & E100_CTS_MASK)
/* These are typically PA or PB and 0 means 0V, 1 means 3.3V */
/* Is an output */
#define E100_DTR_GET(info) ((*e100_modem_pins[(info)->line].dtr_shadow) & e100_modem_pins[(info)->line].dtr_mask)
/* Normally inputs */
#define E100_RI_GET(info) ((*e100_modem_pins[(info)->line].ri_port) & e100_modem_pins[(info)->line].ri_mask)
#define E100_CD_GET(info) ((*e100_modem_pins[(info)->line].cd_port) & e100_modem_pins[(info)->line].cd_mask)
/* Input */
#define E100_DSR_GET(info) ((*e100_modem_pins[(info)->line].dsr_port) & e100_modem_pins[(info)->line].dsr_mask)
/*
* tmp_buf is used as a temporary buffer by serial_write. We need to
* lock it in case the memcpy_fromfs blocks while swapping in a page,
* and some other program tries to do a serial write at the same time.
* Since the lock will only come under contention when the system is
* swapping and available memory is low, it makes sense to share one
* buffer across all the serial ports, since it significantly saves
* memory if large numbers of serial ports are open.
*/
static unsigned char *tmp_buf;
static DEFINE_MUTEX(tmp_buf_mutex);
/* Calculate the chartime depending on baudrate, numbor of bits etc. */
static void update_char_time(struct e100_serial * info)
{
tcflag_t cflags = info->port.tty->termios->c_cflag;
int bits;
/* calc. number of bits / data byte */
/* databits + startbit and 1 stopbit */
if ((cflags & CSIZE) == CS7)
bits = 9;
else
bits = 10;
if (cflags & CSTOPB) /* 2 stopbits ? */
bits++;
if (cflags & PARENB) /* parity bit ? */
bits++;
/* calc timeout */
info->char_time_usec = ((bits * 1000000) / info->baud) + 1;
info->flush_time_usec = 4*info->char_time_usec;
if (info->flush_time_usec < MIN_FLUSH_TIME_USEC)
info->flush_time_usec = MIN_FLUSH_TIME_USEC;
}
/*
* This function maps from the Bxxxx defines in asm/termbits.h into real
* baud rates.
*/
static int
cflag_to_baud(unsigned int cflag)
{
static int baud_table[] = {
0, 50, 75, 110, 134, 150, 200, 300, 600, 1200, 1800, 2400,
4800, 9600, 19200, 38400 };
static int ext_baud_table[] = {
0, 57600, 115200, 230400, 460800, 921600, 1843200, 6250000,
0, 0, 0, 0, 0, 0, 0, 0 };
if (cflag & CBAUDEX)
return ext_baud_table[(cflag & CBAUD) & ~CBAUDEX];
else
return baud_table[cflag & CBAUD];
}
/* and this maps to an etrax100 hardware baud constant */
static unsigned char
cflag_to_etrax_baud(unsigned int cflag)
{
char retval;
static char baud_table[] = {
-1, -1, -1, -1, -1, -1, -1, 0, 1, 2, -1, 3, 4, 5, 6, 7 };
static char ext_baud_table[] = {
-1, 8, 9, 10, 11, 12, 13, 14, -1, -1, -1, -1, -1, -1, -1, -1 };
if (cflag & CBAUDEX)
retval = ext_baud_table[(cflag & CBAUD) & ~CBAUDEX];
else
retval = baud_table[cflag & CBAUD];
if (retval < 0) {
printk(KERN_WARNING "serdriver tried setting invalid baud rate, flags %x.\n", cflag);
retval = 5; /* choose default 9600 instead */
}
return retval | (retval << 4); /* choose same for both TX and RX */
}
/* Various static support functions */
/* Functions to set or clear DTR/RTS on the requested line */
/* It is complicated by the fact that RTS is a serial port register, while
* DTR might not be implemented in the HW at all, and if it is, it can be on
* any general port.
*/
static inline void
e100_dtr(struct e100_serial *info, int set)
{
#ifndef CONFIG_SVINTO_SIM
unsigned char mask = e100_modem_pins[info->line].dtr_mask;
#ifdef SERIAL_DEBUG_IO
printk("ser%i dtr %i mask: 0x%02X\n", info->line, set, mask);
printk("ser%i shadow before 0x%02X get: %i\n",
info->line, *e100_modem_pins[info->line].dtr_shadow,
E100_DTR_GET(info));
#endif
/* DTR is active low */
{
unsigned long flags;
local_irq_save(flags);
*e100_modem_pins[info->line].dtr_shadow &= ~mask;
*e100_modem_pins[info->line].dtr_shadow |= (set ? 0 : mask);
*e100_modem_pins[info->line].dtr_port = *e100_modem_pins[info->line].dtr_shadow;
local_irq_restore(flags);
}
#ifdef SERIAL_DEBUG_IO
printk("ser%i shadow after 0x%02X get: %i\n",
info->line, *e100_modem_pins[info->line].dtr_shadow,
E100_DTR_GET(info));
#endif
#endif
}
/* set = 0 means 3.3V on the pin, bitvalue: 0=active, 1=inactive
* 0=0V , 1=3.3V
*/
static inline void
e100_rts(struct e100_serial *info, int set)
{
#ifndef CONFIG_SVINTO_SIM
unsigned long flags;
local_irq_save(flags);
info->rx_ctrl &= ~E100_RTS_MASK;
info->rx_ctrl |= (set ? 0 : E100_RTS_MASK); /* RTS is active low */
info->ioport[REG_REC_CTRL] = info->rx_ctrl;
local_irq_restore(flags);
#ifdef SERIAL_DEBUG_IO
printk("ser%i rts %i\n", info->line, set);
#endif
#endif
}
/* If this behaves as a modem, RI and CD is an output */
static inline void
e100_ri_out(struct e100_serial *info, int set)
{
#ifndef CONFIG_SVINTO_SIM
/* RI is active low */
{
unsigned char mask = e100_modem_pins[info->line].ri_mask;
unsigned long flags;
local_irq_save(flags);
*e100_modem_pins[info->line].ri_shadow &= ~mask;
*e100_modem_pins[info->line].ri_shadow |= (set ? 0 : mask);
*e100_modem_pins[info->line].ri_port = *e100_modem_pins[info->line].ri_shadow;
local_irq_restore(flags);
}
#endif
}
static inline void
e100_cd_out(struct e100_serial *info, int set)
{
#ifndef CONFIG_SVINTO_SIM
/* CD is active low */
{
unsigned char mask = e100_modem_pins[info->line].cd_mask;
unsigned long flags;
local_irq_save(flags);
*e100_modem_pins[info->line].cd_shadow &= ~mask;
*e100_modem_pins[info->line].cd_shadow |= (set ? 0 : mask);
*e100_modem_pins[info->line].cd_port = *e100_modem_pins[info->line].cd_shadow;
local_irq_restore(flags);
}
#endif
}
static inline void
e100_disable_rx(struct e100_serial *info)
{
#ifndef CONFIG_SVINTO_SIM
/* disable the receiver */
info->ioport[REG_REC_CTRL] =
(info->rx_ctrl &= ~IO_MASK(R_SERIAL0_REC_CTRL, rec_enable));
#endif
}
static inline void
e100_enable_rx(struct e100_serial *info)
{
#ifndef CONFIG_SVINTO_SIM
/* enable the receiver */
info->ioport[REG_REC_CTRL] =
(info->rx_ctrl |= IO_MASK(R_SERIAL0_REC_CTRL, rec_enable));
#endif
}
/* the rx DMA uses both the dma_descr and the dma_eop interrupts */
static inline void
e100_disable_rxdma_irq(struct e100_serial *info)
{
#ifdef SERIAL_DEBUG_INTR
printk("rxdma_irq(%d): 0\n",info->line);
#endif
DINTR1(DEBUG_LOG(info->line,"IRQ disable_rxdma_irq %i\n", info->line));
*R_IRQ_MASK2_CLR = (info->irq << 2) | (info->irq << 3);
}
static inline void
e100_enable_rxdma_irq(struct e100_serial *info)
{
#ifdef SERIAL_DEBUG_INTR
printk("rxdma_irq(%d): 1\n",info->line);
#endif
DINTR1(DEBUG_LOG(info->line,"IRQ enable_rxdma_irq %i\n", info->line));
*R_IRQ_MASK2_SET = (info->irq << 2) | (info->irq << 3);
}
/* the tx DMA uses only dma_descr interrupt */
static void e100_disable_txdma_irq(struct e100_serial *info)
{
#ifdef SERIAL_DEBUG_INTR
printk("txdma_irq(%d): 0\n",info->line);
#endif
DINTR1(DEBUG_LOG(info->line,"IRQ disable_txdma_irq %i\n", info->line));
*R_IRQ_MASK2_CLR = info->irq;
}
static void e100_enable_txdma_irq(struct e100_serial *info)
{
#ifdef SERIAL_DEBUG_INTR
printk("txdma_irq(%d): 1\n",info->line);
#endif
DINTR1(DEBUG_LOG(info->line,"IRQ enable_txdma_irq %i\n", info->line));
*R_IRQ_MASK2_SET = info->irq;
}
static void e100_disable_txdma_channel(struct e100_serial *info)
{
unsigned long flags;
/* Disable output DMA channel for the serial port in question
* ( set to something other than serialX)
*/
local_irq_save(flags);
DFLOW(DEBUG_LOG(info->line, "disable_txdma_channel %i\n", info->line));
if (info->line == 0) {
if ((genconfig_shadow & IO_MASK(R_GEN_CONFIG, dma6)) ==
IO_STATE(R_GEN_CONFIG, dma6, serial0)) {
genconfig_shadow &= ~IO_MASK(R_GEN_CONFIG, dma6);
genconfig_shadow |= IO_STATE(R_GEN_CONFIG, dma6, unused);
}
} else if (info->line == 1) {
if ((genconfig_shadow & IO_MASK(R_GEN_CONFIG, dma8)) ==
IO_STATE(R_GEN_CONFIG, dma8, serial1)) {
genconfig_shadow &= ~IO_MASK(R_GEN_CONFIG, dma8);
genconfig_shadow |= IO_STATE(R_GEN_CONFIG, dma8, usb);
}
} else if (info->line == 2) {
if ((genconfig_shadow & IO_MASK(R_GEN_CONFIG, dma2)) ==
IO_STATE(R_GEN_CONFIG, dma2, serial2)) {
genconfig_shadow &= ~IO_MASK(R_GEN_CONFIG, dma2);
genconfig_shadow |= IO_STATE(R_GEN_CONFIG, dma2, par0);
}
} else if (info->line == 3) {
if ((genconfig_shadow & IO_MASK(R_GEN_CONFIG, dma4)) ==
IO_STATE(R_GEN_CONFIG, dma4, serial3)) {
genconfig_shadow &= ~IO_MASK(R_GEN_CONFIG, dma4);
genconfig_shadow |= IO_STATE(R_GEN_CONFIG, dma4, par1);
}
}
*R_GEN_CONFIG = genconfig_shadow;
local_irq_restore(flags);
}
static void e100_enable_txdma_channel(struct e100_serial *info)
{
unsigned long flags;
local_irq_save(flags);
DFLOW(DEBUG_LOG(info->line, "enable_txdma_channel %i\n", info->line));
/* Enable output DMA channel for the serial port in question */
if (info->line == 0) {
genconfig_shadow &= ~IO_MASK(R_GEN_CONFIG, dma6);
genconfig_shadow |= IO_STATE(R_GEN_CONFIG, dma6, serial0);
} else if (info->line == 1) {
genconfig_shadow &= ~IO_MASK(R_GEN_CONFIG, dma8);
genconfig_shadow |= IO_STATE(R_GEN_CONFIG, dma8, serial1);
} else if (info->line == 2) {
genconfig_shadow &= ~IO_MASK(R_GEN_CONFIG, dma2);
genconfig_shadow |= IO_STATE(R_GEN_CONFIG, dma2, serial2);
} else if (info->line == 3) {
genconfig_shadow &= ~IO_MASK(R_GEN_CONFIG, dma4);
genconfig_shadow |= IO_STATE(R_GEN_CONFIG, dma4, serial3);
}
*R_GEN_CONFIG = genconfig_shadow;
local_irq_restore(flags);
}
static void e100_disable_rxdma_channel(struct e100_serial *info)
{
unsigned long flags;
/* Disable input DMA channel for the serial port in question
* ( set to something other than serialX)
*/
local_irq_save(flags);
if (info->line == 0) {
if ((genconfig_shadow & IO_MASK(R_GEN_CONFIG, dma7)) ==
IO_STATE(R_GEN_CONFIG, dma7, serial0)) {
genconfig_shadow &= ~IO_MASK(R_GEN_CONFIG, dma7);
genconfig_shadow |= IO_STATE(R_GEN_CONFIG, dma7, unused);
}
} else if (info->line == 1) {
if ((genconfig_shadow & IO_MASK(R_GEN_CONFIG, dma9)) ==
IO_STATE(R_GEN_CONFIG, dma9, serial1)) {
genconfig_shadow &= ~IO_MASK(R_GEN_CONFIG, dma9);
genconfig_shadow |= IO_STATE(R_GEN_CONFIG, dma9, usb);
}
} else if (info->line == 2) {
if ((genconfig_shadow & IO_MASK(R_GEN_CONFIG, dma3)) ==
IO_STATE(R_GEN_CONFIG, dma3, serial2)) {
genconfig_shadow &= ~IO_MASK(R_GEN_CONFIG, dma3);
genconfig_shadow |= IO_STATE(R_GEN_CONFIG, dma3, par0);
}
} else if (info->line == 3) {
if ((genconfig_shadow & IO_MASK(R_GEN_CONFIG, dma5)) ==
IO_STATE(R_GEN_CONFIG, dma5, serial3)) {
genconfig_shadow &= ~IO_MASK(R_GEN_CONFIG, dma5);
genconfig_shadow |= IO_STATE(R_GEN_CONFIG, dma5, par1);
}
}
*R_GEN_CONFIG = genconfig_shadow;
local_irq_restore(flags);
}
static void e100_enable_rxdma_channel(struct e100_serial *info)
{
unsigned long flags;
local_irq_save(flags);
/* Enable input DMA channel for the serial port in question */
if (info->line == 0) {
genconfig_shadow &= ~IO_MASK(R_GEN_CONFIG, dma7);
genconfig_shadow |= IO_STATE(R_GEN_CONFIG, dma7, serial0);
} else if (info->line == 1) {
genconfig_shadow &= ~IO_MASK(R_GEN_CONFIG, dma9);
genconfig_shadow |= IO_STATE(R_GEN_CONFIG, dma9, serial1);
} else if (info->line == 2) {
genconfig_shadow &= ~IO_MASK(R_GEN_CONFIG, dma3);
genconfig_shadow |= IO_STATE(R_GEN_CONFIG, dma3, serial2);
} else if (info->line == 3) {
genconfig_shadow &= ~IO_MASK(R_GEN_CONFIG, dma5);
genconfig_shadow |= IO_STATE(R_GEN_CONFIG, dma5, serial3);
}
*R_GEN_CONFIG = genconfig_shadow;
local_irq_restore(flags);
}
#ifdef SERIAL_HANDLE_EARLY_ERRORS
/* in order to detect and fix errors on the first byte
we have to use the serial interrupts as well. */
static inline void
e100_disable_serial_data_irq(struct e100_serial *info)
{
#ifdef SERIAL_DEBUG_INTR
printk("ser_irq(%d): 0\n",info->line);
#endif
DINTR1(DEBUG_LOG(info->line,"IRQ disable data_irq %i\n", info->line));
*R_IRQ_MASK1_CLR = (1U << (8+2*info->line));
}
static inline void
e100_enable_serial_data_irq(struct e100_serial *info)
{
#ifdef SERIAL_DEBUG_INTR
printk("ser_irq(%d): 1\n",info->line);
printk("**** %d = %d\n",
(8+2*info->line),
(1U << (8+2*info->line)));
#endif
DINTR1(DEBUG_LOG(info->line,"IRQ enable data_irq %i\n", info->line));
*R_IRQ_MASK1_SET = (1U << (8+2*info->line));
}
#endif
static inline void
e100_disable_serial_tx_ready_irq(struct e100_serial *info)
{
#ifdef SERIAL_DEBUG_INTR
printk("ser_tx_irq(%d): 0\n",info->line);
#endif
DINTR1(DEBUG_LOG(info->line,"IRQ disable ready_irq %i\n", info->line));
*R_IRQ_MASK1_CLR = (1U << (8+1+2*info->line));
}
static inline void
e100_enable_serial_tx_ready_irq(struct e100_serial *info)
{
#ifdef SERIAL_DEBUG_INTR
printk("ser_tx_irq(%d): 1\n",info->line);
printk("**** %d = %d\n",
(8+1+2*info->line),
(1U << (8+1+2*info->line)));
#endif
DINTR2(DEBUG_LOG(info->line,"IRQ enable ready_irq %i\n", info->line));
*R_IRQ_MASK1_SET = (1U << (8+1+2*info->line));
}
static inline void e100_enable_rx_irq(struct e100_serial *info)
{
if (info->uses_dma_in)
e100_enable_rxdma_irq(info);
else
e100_enable_serial_data_irq(info);
}
static inline void e100_disable_rx_irq(struct e100_serial *info)
{
if (info->uses_dma_in)
e100_disable_rxdma_irq(info);
else
e100_disable_serial_data_irq(info);
}
#if defined(CONFIG_ETRAX_RS485)
/* Enable RS-485 mode on selected port. This is UGLY. */
static int
e100_enable_rs485(struct tty_struct *tty, struct serial_rs485 *r)
{
struct e100_serial * info = (struct e100_serial *)tty->driver_data;
#if defined(CONFIG_ETRAX_RS485_ON_PA)
*R_PORT_PA_DATA = port_pa_data_shadow |= (1 << rs485_pa_bit);
#endif
#if defined(CONFIG_ETRAX_RS485_ON_PORT_G)
REG_SHADOW_SET(R_PORT_G_DATA, port_g_data_shadow,
rs485_port_g_bit, 1);
#endif
#if defined(CONFIG_ETRAX_RS485_LTC1387)
REG_SHADOW_SET(R_PORT_G_DATA, port_g_data_shadow,
CONFIG_ETRAX_RS485_LTC1387_DXEN_PORT_G_BIT, 1);
REG_SHADOW_SET(R_PORT_G_DATA, port_g_data_shadow,
CONFIG_ETRAX_RS485_LTC1387_RXEN_PORT_G_BIT, 1);
#endif
info->rs485 = *r;
/* Maximum delay before RTS equal to 1000 */
if (info->rs485.delay_rts_before_send >= 1000)
info->rs485.delay_rts_before_send = 1000;
/* printk("rts: on send = %i, after = %i, enabled = %i",
info->rs485.rts_on_send,
info->rs485.rts_after_sent,
info->rs485.enabled
);
*/
return 0;
}
static int
e100_write_rs485(struct tty_struct *tty,
const unsigned char *buf, int count)
{
struct e100_serial * info = (struct e100_serial *)tty->driver_data;
int old_value = (info->rs485.flags) & SER_RS485_ENABLED;
/* rs485 is always implicitly enabled if we're using the ioctl()
* but it doesn't have to be set in the serial_rs485
* (to be backward compatible with old apps)
* So we store, set and restore it.
*/
info->rs485.flags |= SER_RS485_ENABLED;
/* rs_write now deals with RS485 if enabled */
count = rs_write(tty, buf, count);
if (!old_value)
info->rs485.flags &= ~(SER_RS485_ENABLED);
return count;
}
#ifdef CONFIG_ETRAX_FAST_TIMER
/* Timer function to toggle RTS when using FAST_TIMER */
static void rs485_toggle_rts_timer_function(unsigned long data)
{
struct e100_serial *info = (struct e100_serial *)data;
fast_timers_rs485[info->line].function = NULL;
e100_rts(info, (info->rs485.flags & SER_RS485_RTS_AFTER_SEND));
#if defined(CONFIG_ETRAX_RS485_DISABLE_RECEIVER)
e100_enable_rx(info);
e100_enable_rx_irq(info);
#endif
}
#endif
#endif /* CONFIG_ETRAX_RS485 */
/*
* ------------------------------------------------------------
* rs_stop() and rs_start()
*
* This routines are called before setting or resetting tty->stopped.
* They enable or disable transmitter using the XOFF registers, as necessary.
* ------------------------------------------------------------
*/
static void
rs_stop(struct tty_struct *tty)
{
struct e100_serial *info = (struct e100_serial *)tty->driver_data;
if (info) {
unsigned long flags;
unsigned long xoff;
local_irq_save(flags);
DFLOW(DEBUG_LOG(info->line, "XOFF rs_stop xmit %i\n",
CIRC_CNT(info->xmit.head,
info->xmit.tail,SERIAL_XMIT_SIZE)));
xoff = IO_FIELD(R_SERIAL0_XOFF, xoff_char,
STOP_CHAR(info->port.tty));
xoff |= IO_STATE(R_SERIAL0_XOFF, tx_stop, stop);
if (tty->termios->c_iflag & IXON ) {
xoff |= IO_STATE(R_SERIAL0_XOFF, auto_xoff, enable);
}
*((unsigned long *)&info->ioport[REG_XOFF]) = xoff;
local_irq_restore(flags);
}
}
static void
rs_start(struct tty_struct *tty)
{
struct e100_serial *info = (struct e100_serial *)tty->driver_data;
if (info) {
unsigned long flags;
unsigned long xoff;
local_irq_save(flags);
DFLOW(DEBUG_LOG(info->line, "XOFF rs_start xmit %i\n",
CIRC_CNT(info->xmit.head,
info->xmit.tail,SERIAL_XMIT_SIZE)));
xoff = IO_FIELD(R_SERIAL0_XOFF, xoff_char, STOP_CHAR(tty));
xoff |= IO_STATE(R_SERIAL0_XOFF, tx_stop, enable);
if (tty->termios->c_iflag & IXON ) {
xoff |= IO_STATE(R_SERIAL0_XOFF, auto_xoff, enable);
}
*((unsigned long *)&info->ioport[REG_XOFF]) = xoff;
if (!info->uses_dma_out &&
info->xmit.head != info->xmit.tail && info->xmit.buf)
e100_enable_serial_tx_ready_irq(info);
local_irq_restore(flags);
}
}
/*
* ----------------------------------------------------------------------
*
* Here starts the interrupt handling routines. All of the following
* subroutines are declared as inline and are folded into
* rs_interrupt(). They were separated out for readability's sake.
*
* Note: rs_interrupt() is a "fast" interrupt, which means that it
* runs with interrupts turned off. People who may want to modify
* rs_interrupt() should try to keep the interrupt handler as fast as
* possible. After you are done making modifications, it is not a bad
* idea to do:
*
* gcc -S -DKERNEL -Wall -Wstrict-prototypes -O6 -fomit-frame-pointer serial.c
*
* and look at the resulting assemble code in serial.s.
*
* - Ted Ts'o (tytso@mit.edu), 7-Mar-93
* -----------------------------------------------------------------------
*/
/*
* This routine is used by the interrupt handler to schedule
* processing in the software interrupt portion of the driver.
*/
static void rs_sched_event(struct e100_serial *info, int event)
{
if (info->event & (1 << event))
return;
info->event |= 1 << event;
schedule_work(&info->work);
}
/* The output DMA channel is free - use it to send as many chars as possible
* NOTES:
* We don't pay attention to info->x_char, which means if the TTY wants to
* use XON/XOFF it will set info->x_char but we won't send any X char!
*
* To implement this, we'd just start a DMA send of 1 byte pointing at a
* buffer containing the X char, and skip updating xmit. We'd also have to
* check if the last sent char was the X char when we enter this function
* the next time, to avoid updating xmit with the sent X value.
*/
static void
transmit_chars_dma(struct e100_serial *info)
{
unsigned int c, sentl;
struct etrax_dma_descr *descr;
#ifdef CONFIG_SVINTO_SIM
/* This will output too little if tail is not 0 always since
* we don't reloop to send the other part. Anyway this SHOULD be a
* no-op - transmit_chars_dma would never really be called during sim
* since rs_write does not write into the xmit buffer then.
*/
if (info->xmit.tail)
printk("Error in serial.c:transmit_chars-dma(), tail!=0\n");
if (info->xmit.head != info->xmit.tail) {
SIMCOUT(info->xmit.buf + info->xmit.tail,
CIRC_CNT(info->xmit.head,
info->xmit.tail,
SERIAL_XMIT_SIZE));
info->xmit.head = info->xmit.tail; /* move back head */
info->tr_running = 0;
}
return;
#endif
/* acknowledge both dma_descr and dma_eop irq in R_DMA_CHx_CLR_INTR */
*info->oclrintradr =
IO_STATE(R_DMA_CH6_CLR_INTR, clr_descr, do) |
IO_STATE(R_DMA_CH6_CLR_INTR, clr_eop, do);
#ifdef SERIAL_DEBUG_INTR
if (info->line == SERIAL_DEBUG_LINE)
printk("tc\n");
#endif
if (!info->tr_running) {
/* weirdo... we shouldn't get here! */
printk(KERN_WARNING "Achtung: transmit_chars_dma with !tr_running\n");
return;
}
descr = &info->tr_descr;
/* first get the amount of bytes sent during the last DMA transfer,
and update xmit accordingly */
/* if the stop bit was not set, all data has been sent */
if (!(descr->status & d_stop)) {
sentl = descr->sw_len;
} else
/* otherwise we find the amount of data sent here */
sentl = descr->hw_len;
DFLOW(DEBUG_LOG(info->line, "TX %i done\n", sentl));
/* update stats */
info->icount.tx += sentl;
/* update xmit buffer */
info->xmit.tail = (info->xmit.tail + sentl) & (SERIAL_XMIT_SIZE - 1);
/* if there is only a few chars left in the buf, wake up the blocked
write if any */
if (CIRC_CNT(info->xmit.head,
info->xmit.tail,
SERIAL_XMIT_SIZE) < WAKEUP_CHARS)
rs_sched_event(info, RS_EVENT_WRITE_WAKEUP);
/* find out the largest amount of consecutive bytes we want to send now */
c = CIRC_CNT_TO_END(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE);
/* Don't send all in one DMA transfer - divide it so we wake up
* application before all is sent
*/
if (c >= 4*WAKEUP_CHARS)
c = c/2;
if (c <= 0) {
/* our job here is done, don't schedule any new DMA transfer */
info->tr_running = 0;
#if defined(CONFIG_ETRAX_RS485) && defined(CONFIG_ETRAX_FAST_TIMER)
if (info->rs485.flags & SER_RS485_ENABLED) {
/* Set a short timer to toggle RTS */
start_one_shot_timer(&fast_timers_rs485[info->line],
rs485_toggle_rts_timer_function,
(unsigned long)info,
info->char_time_usec*2,
"RS-485");
}
#endif /* RS485 */
return;
}
/* ok we can schedule a dma send of c chars starting at info->xmit.tail */
/* set up the descriptor correctly for output */
DFLOW(DEBUG_LOG(info->line, "TX %i\n", c));
descr->ctrl = d_int | d_eol | d_wait; /* Wait needed for tty_wait_until_sent() */
descr->sw_len = c;
descr->buf = virt_to_phys(info->xmit.buf + info->xmit.tail);
descr->status = 0;
*info->ofirstadr = virt_to_phys(descr); /* write to R_DMAx_FIRST */
*info->ocmdadr = IO_STATE(R_DMA_CH6_CMD, cmd, start);
/* DMA is now running (hopefully) */
} /* transmit_chars_dma */
static void
start_transmit(struct e100_serial *info)
{
#if 0
if (info->line == SERIAL_DEBUG_LINE)
printk("x\n");
#endif
info->tr_descr.sw_len = 0;
info->tr_descr.hw_len = 0;
info->tr_descr.status = 0;
info->tr_running = 1;
if (info->uses_dma_out)
transmit_chars_dma(info);
else
e100_enable_serial_tx_ready_irq(info);
} /* start_transmit */
#ifdef CONFIG_ETRAX_SERIAL_FAST_TIMER
static int serial_fast_timer_started = 0;
static int serial_fast_timer_expired = 0;
static void flush_timeout_function(unsigned long data);
#define START_FLUSH_FAST_TIMER_TIME(info, string, usec) {\
unsigned long timer_flags; \
local_irq_save(timer_flags); \
if (fast_timers[info->line].function == NULL) { \
serial_fast_timer_started++; \
TIMERD(DEBUG_LOG(info->line, "start_timer %i ", info->line)); \
TIMERD(DEBUG_LOG(info->line, "num started: %i\n", serial_fast_timer_started)); \
start_one_shot_timer(&fast_timers[info->line], \
flush_timeout_function, \
(unsigned long)info, \
(usec), \
string); \
} \
else { \
TIMERD(DEBUG_LOG(info->line, "timer %i already running\n", info->line)); \
} \
local_irq_restore(timer_flags); \
}
#define START_FLUSH_FAST_TIMER(info, string) START_FLUSH_FAST_TIMER_TIME(info, string, info->flush_time_usec)
#else
#define START_FLUSH_FAST_TIMER_TIME(info, string, usec)
#define START_FLUSH_FAST_TIMER(info, string)
#endif
static struct etrax_recv_buffer *
alloc_recv_buffer(unsigned int size)
{
struct etrax_recv_buffer *buffer;
if (!(buffer = kmalloc(sizeof *buffer + size, GFP_ATOMIC)))
return NULL;
buffer->next = NULL;
buffer->length = 0;
buffer->error = TTY_NORMAL;
return buffer;
}
static void
append_recv_buffer(struct e100_serial *info, struct etrax_recv_buffer *buffer)
{
unsigned long flags;
local_irq_save(flags);
if (!info->first_recv_buffer)
info->first_recv_buffer = buffer;
else
info->last_recv_buffer->next = buffer;
info->last_recv_buffer = buffer;
info->recv_cnt += buffer->length;
if (info->recv_cnt > info->max_recv_cnt)
info->max_recv_cnt = info->recv_cnt;
local_irq_restore(flags);
}
static int
add_char_and_flag(struct e100_serial *info, unsigned char data, unsigned char flag)
{
struct etrax_recv_buffer *buffer;
if (info->uses_dma_in) {
if (!(buffer = alloc_recv_buffer(4)))
return 0;
buffer->length = 1;
buffer->error = flag;
buffer->buffer[0] = data;
append_recv_buffer(info, buffer);
info->icount.rx++;
} else {
struct tty_struct *tty = info->port.tty;
tty_insert_flip_char(tty, data, flag);
info->icount.rx++;
}
return 1;
}
static unsigned int handle_descr_data(struct e100_serial *info,
struct etrax_dma_descr *descr,
unsigned int recvl)
{
struct etrax_recv_buffer *buffer = phys_to_virt(descr->buf) - sizeof *buffer;
if (info->recv_cnt + recvl > 65536) {
printk(KERN_WARNING
"%s: Too much pending incoming serial data! Dropping %u bytes.\n", __func__, recvl);
return 0;
}
buffer->length = recvl;
if (info->errorcode == ERRCODE_SET_BREAK)
buffer->error = TTY_BREAK;
info->errorcode = 0;
append_recv_buffer(info, buffer);
if (!(buffer = alloc_recv_buffer(SERIAL_DESCR_BUF_SIZE)))
panic("%s: Failed to allocate memory for receive buffer!\n", __func__);
descr->buf = virt_to_phys(buffer->buffer);
return recvl;
}
static unsigned int handle_all_descr_data(struct e100_serial *info)
{
struct etrax_dma_descr *descr;
unsigned int recvl;
unsigned int ret = 0;
while (1)
{
descr = &info->rec_descr[info->cur_rec_descr];
if (descr == phys_to_virt(*info->idescradr))
break;
if (++info->cur_rec_descr == SERIAL_RECV_DESCRIPTORS)
info->cur_rec_descr = 0;
/* find out how many bytes were read */
/* if the eop bit was not set, all data has been received */
if (!(descr->status & d_eop)) {
recvl = descr->sw_len;
} else {
/* otherwise we find the amount of data received here */
recvl = descr->hw_len;
}
/* Reset the status information */
descr->status = 0;
DFLOW( DEBUG_LOG(info->line, "RX %lu\n", recvl);
if (info->port.tty->stopped) {
unsigned char *buf = phys_to_virt(descr->buf);
DEBUG_LOG(info->line, "rx 0x%02X\n", buf[0]);
DEBUG_LOG(info->line, "rx 0x%02X\n", buf[1]);
DEBUG_LOG(info->line, "rx 0x%02X\n", buf[2]);
}
);
/* update stats */
info->icount.rx += recvl;
ret += handle_descr_data(info, descr, recvl);
}
return ret;
}
static void receive_chars_dma(struct e100_serial *info)
{
struct tty_struct *tty;
unsigned char rstat;
#ifdef CONFIG_SVINTO_SIM
/* No receive in the simulator. Will probably be when the rest of
* the serial interface works, and this piece will just be removed.
*/
return;
#endif
/* Acknowledge both dma_descr and dma_eop irq in R_DMA_CHx_CLR_INTR */
*info->iclrintradr =
IO_STATE(R_DMA_CH6_CLR_INTR, clr_descr, do) |
IO_STATE(R_DMA_CH6_CLR_INTR, clr_eop, do);
tty = info->port.tty;
if (!tty) /* Something wrong... */
return;
#ifdef SERIAL_HANDLE_EARLY_ERRORS
if (info->uses_dma_in)
e100_enable_serial_data_irq(info);
#endif
if (info->errorcode == ERRCODE_INSERT_BREAK)
add_char_and_flag(info, '\0', TTY_BREAK);
handle_all_descr_data(info);
/* Read the status register to detect errors */
rstat = info->ioport[REG_STATUS];
if (rstat & IO_MASK(R_SERIAL0_STATUS, xoff_detect) ) {
DFLOW(DEBUG_LOG(info->line, "XOFF detect stat %x\n", rstat));
}
if (rstat & SER_ERROR_MASK) {
/* If we got an error, we must reset it by reading the
* data_in field
*/
unsigned char data = info->ioport[REG_DATA];
PROCSTAT(ser_stat[info->line].errors_cnt++);
DEBUG_LOG(info->line, "#dERR: s d 0x%04X\n",
((rstat & SER_ERROR_MASK) << 8) | data);
if (rstat & SER_PAR_ERR_MASK)
add_char_and_flag(info, data, TTY_PARITY);
else if (rstat & SER_OVERRUN_MASK)
add_char_and_flag(info, data, TTY_OVERRUN);
else if (rstat & SER_FRAMING_ERR_MASK)
add_char_and_flag(info, data, TTY_FRAME);
}
START_FLUSH_FAST_TIMER(info, "receive_chars");
/* Restart the receiving DMA */
*info->icmdadr = IO_STATE(R_DMA_CH6_CMD, cmd, restart);
}
static int start_recv_dma(struct e100_serial *info)
{
struct etrax_dma_descr *descr = info->rec_descr;
struct etrax_recv_buffer *buffer;
int i;
/* Set up the receiving descriptors */
for (i = 0; i < SERIAL_RECV_DESCRIPTORS; i++) {
if (!(buffer = alloc_recv_buffer(SERIAL_DESCR_BUF_SIZE)))
panic("%s: Failed to allocate memory for receive buffer!\n", __func__);
descr[i].ctrl = d_int;
descr[i].buf = virt_to_phys(buffer->buffer);
descr[i].sw_len = SERIAL_DESCR_BUF_SIZE;
descr[i].hw_len = 0;
descr[i].status = 0;
descr[i].next = virt_to_phys(&descr[i+1]);
}
/* Link the last descriptor to the first */
descr[i-1].next = virt_to_phys(&descr[0]);
/* Start with the first descriptor in the list */
info->cur_rec_descr = 0;
/* Start the DMA */
*info->ifirstadr = virt_to_phys(&descr[info->cur_rec_descr]);
*info->icmdadr = IO_STATE(R_DMA_CH6_CMD, cmd, start);
/* Input DMA should be running now */
return 1;
}
static void
start_receive(struct e100_serial *info)
{
#ifdef CONFIG_SVINTO_SIM
/* No receive in the simulator. Will probably be when the rest of
* the serial interface works, and this piece will just be removed.
*/
return;
#endif
if (info->uses_dma_in) {
/* reset the input dma channel to be sure it works */
*info->icmdadr = IO_STATE(R_DMA_CH6_CMD, cmd, reset);
while (IO_EXTRACT(R_DMA_CH6_CMD, cmd, *info->icmdadr) ==
IO_STATE_VALUE(R_DMA_CH6_CMD, cmd, reset));
start_recv_dma(info);
}
}
/* the bits in the MASK2 register are laid out like this:
DMAI_EOP DMAI_DESCR DMAO_EOP DMAO_DESCR
where I is the input channel and O is the output channel for the port.
info->irq is the bit number for the DMAO_DESCR so to check the others we
shift info->irq to the left.
*/
/* dma output channel interrupt handler
this interrupt is called from DMA2(ser2), DMA4(ser3), DMA6(ser0) or
DMA8(ser1) when they have finished a descriptor with the intr flag set.
*/
static irqreturn_t
tr_interrupt(int irq, void *dev_id)
{
struct e100_serial *info;
unsigned long ireg;
int i;
int handled = 0;
#ifdef CONFIG_SVINTO_SIM
/* No receive in the simulator. Will probably be when the rest of
* the serial interface works, and this piece will just be removed.
*/
{
const char *s = "What? tr_interrupt in simulator??\n";
SIMCOUT(s,strlen(s));
}
return IRQ_HANDLED;
#endif
/* find out the line that caused this irq and get it from rs_table */
ireg = *R_IRQ_MASK2_RD; /* get the active irq bits for the dma channels */
for (i = 0; i < NR_PORTS; i++) {
info = rs_table + i;
if (!info->enabled || !info->uses_dma_out)
continue;
/* check for dma_descr (don't need to check for dma_eop in output dma for serial */
if (ireg & info->irq) {
handled = 1;
/* we can send a new dma bunch. make it so. */
DINTR2(DEBUG_LOG(info->line, "tr_interrupt %i\n", i));
/* Read jiffies_usec first,
* we want this time to be as late as possible
*/
PROCSTAT(ser_stat[info->line].tx_dma_ints++);
info->last_tx_active_usec = GET_JIFFIES_USEC();
info->last_tx_active = jiffies;
transmit_chars_dma(info);
}
/* FIXME: here we should really check for a change in the
status lines and if so call status_handle(info) */
}
return IRQ_RETVAL(handled);
} /* tr_interrupt */
/* dma input channel interrupt handler */
static irqreturn_t
rec_interrupt(int irq, void *dev_id)
{
struct e100_serial *info;
unsigned long ireg;
int i;
int handled = 0;
#ifdef CONFIG_SVINTO_SIM
/* No receive in the simulator. Will probably be when the rest of
* the serial interface works, and this piece will just be removed.
*/
{
const char *s = "What? rec_interrupt in simulator??\n";
SIMCOUT(s,strlen(s));
}
return IRQ_HANDLED;
#endif
/* find out the line that caused this irq and get it from rs_table */
ireg = *R_IRQ_MASK2_RD; /* get the active irq bits for the dma channels */
for (i = 0; i < NR_PORTS; i++) {
info = rs_table + i;
if (!info->enabled || !info->uses_dma_in)
continue;
/* check for both dma_eop and dma_descr for the input dma channel */
if (ireg & ((info->irq << 2) | (info->irq << 3))) {
handled = 1;
/* we have received something */
receive_chars_dma(info);
}
/* FIXME: here we should really check for a change in the
status lines and if so call status_handle(info) */
}
return IRQ_RETVAL(handled);
} /* rec_interrupt */
static int force_eop_if_needed(struct e100_serial *info)
{
/* We check data_avail bit to determine if data has
* arrived since last time
*/
unsigned char rstat = info->ioport[REG_STATUS];
/* error or datavail? */
if (rstat & SER_ERROR_MASK) {
/* Some error has occurred. If there has been valid data, an
* EOP interrupt will be made automatically. If no data, the
* normal ser_interrupt should be enabled and handle it.
* So do nothing!
*/
DEBUG_LOG(info->line, "timeout err: rstat 0x%03X\n",
rstat | (info->line << 8));
return 0;
}
if (rstat & SER_DATA_AVAIL_MASK) {
/* Ok data, no error, count it */
TIMERD(DEBUG_LOG(info->line, "timeout: rstat 0x%03X\n",
rstat | (info->line << 8)));
/* Read data to clear status flags */
(void)info->ioport[REG_DATA];
info->forced_eop = 0;
START_FLUSH_FAST_TIMER(info, "magic");
return 0;
}
/* hit the timeout, force an EOP for the input
* dma channel if we haven't already
*/
if (!info->forced_eop) {
info->forced_eop = 1;
PROCSTAT(ser_stat[info->line].timeout_flush_cnt++);
TIMERD(DEBUG_LOG(info->line, "timeout EOP %i\n", info->line));
FORCE_EOP(info);
}
return 1;
}
static void flush_to_flip_buffer(struct e100_serial *info)
{
struct tty_struct *tty;
struct etrax_recv_buffer *buffer;
unsigned long flags;
local_irq_save(flags);
tty = info->port.tty;
if (!tty) {
local_irq_restore(flags);
return;
}
while ((buffer = info->first_recv_buffer) != NULL) {
unsigned int count = buffer->length;
tty_insert_flip_string(tty, buffer->buffer, count);
info->recv_cnt -= count;
if (count == buffer->length) {
info->first_recv_buffer = buffer->next;
kfree(buffer);
} else {
buffer->length -= count;
memmove(buffer->buffer, buffer->buffer + count, buffer->length);
buffer->error = TTY_NORMAL;
}
}
if (!info->first_recv_buffer)
info->last_recv_buffer = NULL;
local_irq_restore(flags);
/* This includes a check for low-latency */
tty_flip_buffer_push(tty);
}
static void check_flush_timeout(struct e100_serial *info)
{
/* Flip what we've got (if we can) */
flush_to_flip_buffer(info);
/* We might need to flip later, but not to fast
* since the system is busy processing input... */
if (info->first_recv_buffer)
START_FLUSH_FAST_TIMER_TIME(info, "flip", 2000);
/* Force eop last, since data might have come while we're processing
* and if we started the slow timer above, we won't start a fast
* below.
*/
force_eop_if_needed(info);
}
#ifdef CONFIG_ETRAX_SERIAL_FAST_TIMER
static void flush_timeout_function(unsigned long data)
{
struct e100_serial *info = (struct e100_serial *)data;
fast_timers[info->line].function = NULL;
serial_fast_timer_expired++;
TIMERD(DEBUG_LOG(info->line, "flush_timout %i ", info->line));
TIMERD(DEBUG_LOG(info->line, "num expired: %i\n", serial_fast_timer_expired));
check_flush_timeout(info);
}
#else
/* dma fifo/buffer timeout handler
forces an end-of-packet for the dma input channel if no chars
have been received for CONFIG_ETRAX_SERIAL_RX_TIMEOUT_TICKS/100 s.
*/
static struct timer_list flush_timer;
static void
timed_flush_handler(unsigned long ptr)
{
struct e100_serial *info;
int i;
#ifdef CONFIG_SVINTO_SIM
return;
#endif
for (i = 0; i < NR_PORTS; i++) {
info = rs_table + i;
if (info->uses_dma_in)
check_flush_timeout(info);
}
/* restart flush timer */
mod_timer(&flush_timer, jiffies + CONFIG_ETRAX_SERIAL_RX_TIMEOUT_TICKS);
}
#endif
#ifdef SERIAL_HANDLE_EARLY_ERRORS
/* If there is an error (ie break) when the DMA is running and
* there are no bytes in the fifo the DMA is stopped and we get no
* eop interrupt. Thus we have to monitor the first bytes on a DMA
* transfer, and if it is without error we can turn the serial
* interrupts off.
*/
/*
BREAK handling on ETRAX 100:
ETRAX will generate interrupt although there is no stop bit between the
characters.
Depending on how long the break sequence is, the end of the breaksequence
will look differently:
| indicates start/end of a character.
B= Break character (0x00) with framing error.
E= Error byte with parity error received after B characters.
F= "Faked" valid byte received immediately after B characters.
V= Valid byte
1.
B BL ___________________________ V
.._|__________|__________| |valid data |
Multiple frame errors with data == 0x00 (B),
the timing matches up "perfectly" so no extra ending char is detected.
The RXD pin is 1 in the last interrupt, in that case
we set info->errorcode = ERRCODE_INSERT_BREAK, but we can't really
know if another byte will come and this really is case 2. below
(e.g F=0xFF or 0xFE)
If RXD pin is 0 we can expect another character (see 2. below).
2.
B B E or F__________________..__ V
.._|__________|__________|______ | |valid data
"valid" or
parity error
Multiple frame errors with data == 0x00 (B),
but the part of the break trigs is interpreted as a start bit (and possibly
some 0 bits followed by a number of 1 bits and a stop bit).
Depending on parity settings etc. this last character can be either
a fake "valid" char (F) or have a parity error (E).
If the character is valid it will be put in the buffer,
we set info->errorcode = ERRCODE_SET_BREAK so the receive interrupt
will set the flags so the tty will handle it,
if it's an error byte it will not be put in the buffer
and we set info->errorcode = ERRCODE_INSERT_BREAK.
To distinguish a V byte in 1. from an F byte in 2. we keep a timestamp
of the last faulty char (B) and compares it with the current time:
If the time elapsed time is less then 2*char_time_usec we will assume
it's a faked F char and not a Valid char and set
info->errorcode = ERRCODE_SET_BREAK.
Flaws in the above solution:
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
We use the timer to distinguish a F character from a V character,
if a V character is to close after the break we might make the wrong decision.
TODO: The break will be delayed until an F or V character is received.
*/
static
struct e100_serial * handle_ser_rx_interrupt_no_dma(struct e100_serial *info)
{
unsigned long data_read;
struct tty_struct *tty = info->port.tty;
if (!tty) {
printk("!NO TTY!\n");
return info;
}
/* Read data and status at the same time */
data_read = *((unsigned long *)&info->ioport[REG_DATA_STATUS32]);
more_data:
if (data_read & IO_MASK(R_SERIAL0_READ, xoff_detect) ) {
DFLOW(DEBUG_LOG(info->line, "XOFF detect\n", 0));
}
DINTR2(DEBUG_LOG(info->line, "ser_rx %c\n", IO_EXTRACT(R_SERIAL0_READ, data_in, data_read)));
if (data_read & ( IO_MASK(R_SERIAL0_READ, framing_err) |
IO_MASK(R_SERIAL0_READ, par_err) |
IO_MASK(R_SERIAL0_READ, overrun) )) {
/* An error */
info->last_rx_active_usec = GET_JIFFIES_USEC();
info->last_rx_active = jiffies;
DINTR1(DEBUG_LOG(info->line, "ser_rx err stat_data %04X\n", data_read));
DLOG_INT_TRIG(
if (!log_int_trig1_pos) {
log_int_trig1_pos = log_int_pos;
log_int(rdpc(), 0, 0);
}
);
if ( ((data_read & IO_MASK(R_SERIAL0_READ, data_in)) == 0) &&
(data_read & IO_MASK(R_SERIAL0_READ, framing_err)) ) {
/* Most likely a break, but we get interrupts over and
* over again.
*/
if (!info->break_detected_cnt) {
DEBUG_LOG(info->line, "#BRK start\n", 0);
}
if (data_read & IO_MASK(R_SERIAL0_READ, rxd)) {
/* The RX pin is high now, so the break
* must be over, but....
* we can't really know if we will get another
* last byte ending the break or not.
* And we don't know if the byte (if any) will
* have an error or look valid.
*/
DEBUG_LOG(info->line, "# BL BRK\n", 0);
info->errorcode = ERRCODE_INSERT_BREAK;
}
info->break_detected_cnt++;
} else {
/* The error does not look like a break, but could be
* the end of one
*/
if (info->break_detected_cnt) {
DEBUG_LOG(info->line, "EBRK %i\n", info->break_detected_cnt);
info->errorcode = ERRCODE_INSERT_BREAK;
} else {
unsigned char data = IO_EXTRACT(R_SERIAL0_READ,
data_in, data_read);
char flag = TTY_NORMAL;
if (info->errorcode == ERRCODE_INSERT_BREAK) {
struct tty_struct *tty = info->port.tty;
tty_insert_flip_char(tty, 0, flag);
info->icount.rx++;
}
if (data_read & IO_MASK(R_SERIAL0_READ, par_err)) {
info->icount.parity++;
flag = TTY_PARITY;
} else if (data_read & IO_MASK(R_SERIAL0_READ, overrun)) {
info->icount.overrun++;
flag = TTY_OVERRUN;
} else if (data_read & IO_MASK(R_SERIAL0_READ, framing_err)) {
info->icount.frame++;
flag = TTY_FRAME;
}
tty_insert_flip_char(tty, data, flag);
info->errorcode = 0;
}
info->break_detected_cnt = 0;
}
} else if (data_read & IO_MASK(R_SERIAL0_READ, data_avail)) {
/* No error */
DLOG_INT_TRIG(
if (!log_int_trig1_pos) {
if (log_int_pos >= log_int_size) {
log_int_pos = 0;
}
log_int_trig0_pos = log_int_pos;
log_int(rdpc(), 0, 0);
}
);
tty_insert_flip_char(tty,
IO_EXTRACT(R_SERIAL0_READ, data_in, data_read),
TTY_NORMAL);
} else {
DEBUG_LOG(info->line, "ser_rx int but no data_avail %08lX\n", data_read);
}
info->icount.rx++;
data_read = *((unsigned long *)&info->ioport[REG_DATA_STATUS32]);
if (data_read & IO_MASK(R_SERIAL0_READ, data_avail)) {
DEBUG_LOG(info->line, "ser_rx %c in loop\n", IO_EXTRACT(R_SERIAL0_READ, data_in, data_read));
goto more_data;
}
tty_flip_buffer_push(info->port.tty);
return info;
}
static struct e100_serial* handle_ser_rx_interrupt(struct e100_serial *info)
{
unsigned char rstat;
#ifdef SERIAL_DEBUG_INTR
printk("Interrupt from serport %d\n", i);
#endif
/* DEBUG_LOG(info->line, "ser_interrupt stat %03X\n", rstat | (i << 8)); */
if (!info->uses_dma_in) {
return handle_ser_rx_interrupt_no_dma(info);
}
/* DMA is used */
rstat = info->ioport[REG_STATUS];
if (rstat & IO_MASK(R_SERIAL0_STATUS, xoff_detect) ) {
DFLOW(DEBUG_LOG(info->line, "XOFF detect\n", 0));
}
if (rstat & SER_ERROR_MASK) {
unsigned char data;
info->last_rx_active_usec = GET_JIFFIES_USEC();
info->last_rx_active = jiffies;
/* If we got an error, we must reset it by reading the
* data_in field
*/
data = info->ioport[REG_DATA];
DINTR1(DEBUG_LOG(info->line, "ser_rx! %c\n", data));
DINTR1(DEBUG_LOG(info->line, "ser_rx err stat %02X\n", rstat));
if (!data && (rstat & SER_FRAMING_ERR_MASK)) {
/* Most likely a break, but we get interrupts over and
* over again.
*/
if (!info->break_detected_cnt) {
DEBUG_LOG(info->line, "#BRK start\n", 0);
}
if (rstat & SER_RXD_MASK) {
/* The RX pin is high now, so the break
* must be over, but....
* we can't really know if we will get another
* last byte ending the break or not.
* And we don't know if the byte (if any) will
* have an error or look valid.
*/
DEBUG_LOG(info->line, "# BL BRK\n", 0);
info->errorcode = ERRCODE_INSERT_BREAK;
}
info->break_detected_cnt++;
} else {
/* The error does not look like a break, but could be
* the end of one
*/
if (info->break_detected_cnt) {
DEBUG_LOG(info->line, "EBRK %i\n", info->break_detected_cnt);
info->errorcode = ERRCODE_INSERT_BREAK;
} else {
if (info->errorcode == ERRCODE_INSERT_BREAK) {
info->icount.brk++;
add_char_and_flag(info, '\0', TTY_BREAK);
}
if (rstat & SER_PAR_ERR_MASK) {
info->icount.parity++;
add_char_and_flag(info, data, TTY_PARITY);
} else if (rstat & SER_OVERRUN_MASK) {
info->icount.overrun++;
add_char_and_flag(info, data, TTY_OVERRUN);
} else if (rstat & SER_FRAMING_ERR_MASK) {
info->icount.frame++;
add_char_and_flag(info, data, TTY_FRAME);
}
info->errorcode = 0;
}
info->break_detected_cnt = 0;
DEBUG_LOG(info->line, "#iERR s d %04X\n",
((rstat & SER_ERROR_MASK) << 8) | data);
}
PROCSTAT(ser_stat[info->line].early_errors_cnt++);
} else { /* It was a valid byte, now let the DMA do the rest */
unsigned long curr_time_u = GET_JIFFIES_USEC();
unsigned long curr_time = jiffies;
if (info->break_detected_cnt) {
/* Detect if this character is a new valid char or the
* last char in a break sequence: If LSBits are 0 and
* MSBits are high AND the time is close to the
* previous interrupt we should discard it.
*/
long elapsed_usec =
(curr_time - info->last_rx_active) * (1000000/HZ) +
curr_time_u - info->last_rx_active_usec;
if (elapsed_usec < 2*info->char_time_usec) {
DEBUG_LOG(info->line, "FBRK %i\n", info->line);
/* Report as BREAK (error) and let
* receive_chars_dma() handle it
*/
info->errorcode = ERRCODE_SET_BREAK;
} else {
DEBUG_LOG(info->line, "Not end of BRK (V)%i\n", info->line);
}
DEBUG_LOG(info->line, "num brk %i\n", info->break_detected_cnt);
}
#ifdef SERIAL_DEBUG_INTR
printk("** OK, disabling ser_interrupts\n");
#endif
e100_disable_serial_data_irq(info);
DINTR2(DEBUG_LOG(info->line, "ser_rx OK %d\n", info->line));
info->break_detected_cnt = 0;
PROCSTAT(ser_stat[info->line].ser_ints_ok_cnt++);
}
/* Restarting the DMA never hurts */
*info->icmdadr = IO_STATE(R_DMA_CH6_CMD, cmd, restart);
START_FLUSH_FAST_TIMER(info, "ser_int");
return info;
} /* handle_ser_rx_interrupt */
static void handle_ser_tx_interrupt(struct e100_serial *info)
{
unsigned long flags;
if (info->x_char) {
unsigned char rstat;
DFLOW(DEBUG_LOG(info->line, "tx_int: xchar 0x%02X\n", info->x_char));
local_irq_save(flags);
rstat = info->ioport[REG_STATUS];
DFLOW(DEBUG_LOG(info->line, "stat %x\n", rstat));
info->ioport[REG_TR_DATA] = info->x_char;
info->icount.tx++;
info->x_char = 0;
/* We must enable since it is disabled in ser_interrupt */
e100_enable_serial_tx_ready_irq(info);
local_irq_restore(flags);
return;
}
if (info->uses_dma_out) {
unsigned char rstat;
int i;
/* We only use normal tx interrupt when sending x_char */
DFLOW(DEBUG_LOG(info->line, "tx_int: xchar sent\n", 0));
local_irq_save(flags);
rstat = info->ioport[REG_STATUS];
DFLOW(DEBUG_LOG(info->line, "stat %x\n", rstat));
e100_disable_serial_tx_ready_irq(info);
if (info->port.tty->stopped)
rs_stop(info->port.tty);
/* Enable the DMA channel and tell it to continue */
e100_enable_txdma_channel(info);
/* Wait 12 cycles before doing the DMA command */
for(i = 6; i > 0; i--)
nop();
*info->ocmdadr = IO_STATE(R_DMA_CH6_CMD, cmd, continue);
local_irq_restore(flags);
return;
}
/* Normal char-by-char interrupt */
if (info->xmit.head == info->xmit.tail
|| info->port.tty->stopped
|| info->port.tty->hw_stopped) {
DFLOW(DEBUG_LOG(info->line, "tx_int: stopped %i\n",
info->port.tty->stopped));
e100_disable_serial_tx_ready_irq(info);
info->tr_running = 0;
return;
}
DINTR2(DEBUG_LOG(info->line, "tx_int %c\n", info->xmit.buf[info->xmit.tail]));
/* Send a byte, rs485 timing is critical so turn of ints */
local_irq_save(flags);
info->ioport[REG_TR_DATA] = info->xmit.buf[info->xmit.tail];
info->xmit.tail = (info->xmit.tail + 1) & (SERIAL_XMIT_SIZE-1);
info->icount.tx++;
if (info->xmit.head == info->xmit.tail) {
#if defined(CONFIG_ETRAX_RS485) && defined(CONFIG_ETRAX_FAST_TIMER)
if (info->rs485.flags & SER_RS485_ENABLED) {
/* Set a short timer to toggle RTS */
start_one_shot_timer(&fast_timers_rs485[info->line],
rs485_toggle_rts_timer_function,
(unsigned long)info,
info->char_time_usec*2,
"RS-485");
}
#endif /* RS485 */
info->last_tx_active_usec = GET_JIFFIES_USEC();
info->last_tx_active = jiffies;
e100_disable_serial_tx_ready_irq(info);
info->tr_running = 0;
DFLOW(DEBUG_LOG(info->line, "tx_int: stop2\n", 0));
} else {
/* We must enable since it is disabled in ser_interrupt */
e100_enable_serial_tx_ready_irq(info);
}
local_irq_restore(flags);
if (CIRC_CNT(info->xmit.head,
info->xmit.tail,
SERIAL_XMIT_SIZE) < WAKEUP_CHARS)
rs_sched_event(info, RS_EVENT_WRITE_WAKEUP);
} /* handle_ser_tx_interrupt */
/* result of time measurements:
* RX duration 54-60 us when doing something, otherwise 6-9 us
* ser_int duration: just sending: 8-15 us normally, up to 73 us
*/
static irqreturn_t
ser_interrupt(int irq, void *dev_id)
{
static volatile int tx_started = 0;
struct e100_serial *info;
int i;
unsigned long flags;
unsigned long irq_mask1_rd;
unsigned long data_mask = (1 << (8+2*0)); /* ser0 data_avail */
int handled = 0;
static volatile unsigned long reentered_ready_mask = 0;
local_irq_save(flags);
irq_mask1_rd = *R_IRQ_MASK1_RD;
/* First handle all rx interrupts with ints disabled */
info = rs_table;
irq_mask1_rd &= e100_ser_int_mask;
for (i = 0; i < NR_PORTS; i++) {
/* Which line caused the data irq? */
if (irq_mask1_rd & data_mask) {
handled = 1;
handle_ser_rx_interrupt(info);
}
info += 1;
data_mask <<= 2;
}
/* Handle tx interrupts with interrupts enabled so we
* can take care of new data interrupts while transmitting
* We protect the tx part with the tx_started flag.
* We disable the tr_ready interrupts we are about to handle and
* unblock the serial interrupt so new serial interrupts may come.
*
* If we get a new interrupt:
* - it migth be due to synchronous serial ports.
* - serial irq will be blocked by general irq handler.
* - async data will be handled above (sync will be ignored).
* - tx_started flag will prevent us from trying to send again and
* we will exit fast - no need to unblock serial irq.
* - Next (sync) serial interrupt handler will be runned with
* disabled interrupt due to restore_flags() at end of function,
* so sync handler will not be preempted or reentered.
*/
if (!tx_started) {
unsigned long ready_mask;
unsigned long
tx_started = 1;
/* Only the tr_ready interrupts left */
irq_mask1_rd &= (IO_MASK(R_IRQ_MASK1_RD, ser0_ready) |
IO_MASK(R_IRQ_MASK1_RD, ser1_ready) |
IO_MASK(R_IRQ_MASK1_RD, ser2_ready) |
IO_MASK(R_IRQ_MASK1_RD, ser3_ready));
while (irq_mask1_rd) {
/* Disable those we are about to handle */
*R_IRQ_MASK1_CLR = irq_mask1_rd;
/* Unblock the serial interrupt */
*R_VECT_MASK_SET = IO_STATE(R_VECT_MASK_SET, serial, set);
local_irq_enable();
ready_mask = (1 << (8+1+2*0)); /* ser0 tr_ready */
info = rs_table;
for (i = 0; i < NR_PORTS; i++) {
/* Which line caused the ready irq? */
if (irq_mask1_rd & ready_mask) {
handled = 1;
handle_ser_tx_interrupt(info);
}
info += 1;
ready_mask <<= 2;
}
/* handle_ser_tx_interrupt enables tr_ready interrupts */
local_irq_disable();
/* Handle reentered TX interrupt */
irq_mask1_rd = reentered_ready_mask;
}
local_irq_disable();
tx_started = 0;
} else {
unsigned long ready_mask;
ready_mask = irq_mask1_rd & (IO_MASK(R_IRQ_MASK1_RD, ser0_ready) |
IO_MASK(R_IRQ_MASK1_RD, ser1_ready) |
IO_MASK(R_IRQ_MASK1_RD, ser2_ready) |
IO_MASK(R_IRQ_MASK1_RD, ser3_ready));
if (ready_mask) {
reentered_ready_mask |= ready_mask;
/* Disable those we are about to handle */
*R_IRQ_MASK1_CLR = ready_mask;
DFLOW(DEBUG_LOG(SERIAL_DEBUG_LINE, "ser_int reentered with TX %X\n", ready_mask));
}
}
local_irq_restore(flags);
return IRQ_RETVAL(handled);
} /* ser_interrupt */
#endif
/*
* -------------------------------------------------------------------
* Here ends the serial interrupt routines.
* -------------------------------------------------------------------
*/
/*
* This routine is used to handle the "bottom half" processing for the
* serial driver, known also the "software interrupt" processing.
* This processing is done at the kernel interrupt level, after the
* rs_interrupt() has returned, BUT WITH INTERRUPTS TURNED ON. This
* is where time-consuming activities which can not be done in the
* interrupt driver proper are done; the interrupt driver schedules
* them using rs_sched_event(), and they get done here.
*/
static void
do_softint(struct work_struct *work)
{
struct e100_serial *info;
struct tty_struct *tty;
info = container_of(work, struct e100_serial, work);
tty = info->port.tty;
if (!tty)
return;
if (test_and_clear_bit(RS_EVENT_WRITE_WAKEUP, &info->event))
tty_wakeup(tty);
}
static int
startup(struct e100_serial * info)
{
unsigned long flags;
unsigned long xmit_page;
int i;
xmit_page = get_zeroed_page(GFP_KERNEL);
if (!xmit_page)
return -ENOMEM;
local_irq_save(flags);
/* if it was already initialized, skip this */
if (info->flags & ASYNC_INITIALIZED) {
local_irq_restore(flags);
free_page(xmit_page);
return 0;
}
if (info->xmit.buf)
free_page(xmit_page);
else
info->xmit.buf = (unsigned char *) xmit_page;
#ifdef SERIAL_DEBUG_OPEN
printk("starting up ttyS%d (xmit_buf 0x%p)...\n", info->line, info->xmit.buf);
#endif
#ifdef CONFIG_SVINTO_SIM
/* Bits and pieces collected from below. Better to have them
in one ifdef:ed clause than to mix in a lot of ifdefs,
right? */
if (info->port.tty)
clear_bit(TTY_IO_ERROR, &info->port.tty->flags);
info->xmit.head = info->xmit.tail = 0;
info->first_recv_buffer = info->last_recv_buffer = NULL;
info->recv_cnt = info->max_recv_cnt = 0;
for (i = 0; i < SERIAL_RECV_DESCRIPTORS; i++)
info->rec_descr[i].buf = NULL;
/* No real action in the simulator, but may set info important
to ioctl. */
change_speed(info);
#else
/*
* Clear the FIFO buffers and disable them
* (they will be reenabled in change_speed())
*/
/*
* Reset the DMA channels and make sure their interrupts are cleared
*/
if (info->dma_in_enabled) {
info->uses_dma_in = 1;
e100_enable_rxdma_channel(info);
*info->icmdadr = IO_STATE(R_DMA_CH6_CMD, cmd, reset);
/* Wait until reset cycle is complete */
while (IO_EXTRACT(R_DMA_CH6_CMD, cmd, *info->icmdadr) ==
IO_STATE_VALUE(R_DMA_CH6_CMD, cmd, reset));
/* Make sure the irqs are cleared */
*info->iclrintradr =
IO_STATE(R_DMA_CH6_CLR_INTR, clr_descr, do) |
IO_STATE(R_DMA_CH6_CLR_INTR, clr_eop, do);
} else {
e100_disable_rxdma_channel(info);
}
if (info->dma_out_enabled) {
info->uses_dma_out = 1;
e100_enable_txdma_channel(info);
*info->ocmdadr = IO_STATE(R_DMA_CH6_CMD, cmd, reset);
while (IO_EXTRACT(R_DMA_CH6_CMD, cmd, *info->ocmdadr) ==
IO_STATE_VALUE(R_DMA_CH6_CMD, cmd, reset));
/* Make sure the irqs are cleared */
*info->oclrintradr =
IO_STATE(R_DMA_CH6_CLR_INTR, clr_descr, do) |
IO_STATE(R_DMA_CH6_CLR_INTR, clr_eop, do);
} else {
e100_disable_txdma_channel(info);
}
if (info->port.tty)
clear_bit(TTY_IO_ERROR, &info->port.tty->flags);
info->xmit.head = info->xmit.tail = 0;
info->first_recv_buffer = info->last_recv_buffer = NULL;
info->recv_cnt = info->max_recv_cnt = 0;
for (i = 0; i < SERIAL_RECV_DESCRIPTORS; i++)
info->rec_descr[i].buf = 0;
/*
* and set the speed and other flags of the serial port
* this will start the rx/tx as well
*/
#ifdef SERIAL_HANDLE_EARLY_ERRORS
e100_enable_serial_data_irq(info);
#endif
change_speed(info);
/* dummy read to reset any serial errors */
(void)info->ioport[REG_DATA];
/* enable the interrupts */
if (info->uses_dma_out)
e100_enable_txdma_irq(info);
e100_enable_rx_irq(info);
info->tr_running = 0; /* to be sure we don't lock up the transmitter */
/* setup the dma input descriptor and start dma */
start_receive(info);
/* for safety, make sure the descriptors last result is 0 bytes written */
info->tr_descr.sw_len = 0;
info->tr_descr.hw_len = 0;
info->tr_descr.status = 0;
/* enable RTS/DTR last */
e100_rts(info, 1);
e100_dtr(info, 1);
#endif /* CONFIG_SVINTO_SIM */
info->flags |= ASYNC_INITIALIZED;
local_irq_restore(flags);
return 0;
}
/*
* This routine will shutdown a serial port; interrupts are disabled, and
* DTR is dropped if the hangup on close termio flag is on.
*/
static void
shutdown(struct e100_serial * info)
{
unsigned long flags;
struct etrax_dma_descr *descr = info->rec_descr;
struct etrax_recv_buffer *buffer;
int i;
#ifndef CONFIG_SVINTO_SIM
/* shut down the transmitter and receiver */
DFLOW(DEBUG_LOG(info->line, "shutdown %i\n", info->line));
e100_disable_rx(info);
info->ioport[REG_TR_CTRL] = (info->tx_ctrl &= ~0x40);
/* disable interrupts, reset dma channels */
if (info->uses_dma_in) {
e100_disable_rxdma_irq(info);
*info->icmdadr = IO_STATE(R_DMA_CH6_CMD, cmd, reset);
info->uses_dma_in = 0;
} else {
e100_disable_serial_data_irq(info);
}
if (info->uses_dma_out) {
e100_disable_txdma_irq(info);
info->tr_running = 0;
*info->ocmdadr = IO_STATE(R_DMA_CH6_CMD, cmd, reset);
info->uses_dma_out = 0;
} else {
e100_disable_serial_tx_ready_irq(info);
info->tr_running = 0;
}
#endif /* CONFIG_SVINTO_SIM */
if (!(info->flags & ASYNC_INITIALIZED))
return;
#ifdef SERIAL_DEBUG_OPEN
printk("Shutting down serial port %d (irq %d)....\n", info->line,
info->irq);
#endif
local_irq_save(flags);
if (info->xmit.buf) {
free_page((unsigned long)info->xmit.buf);
info->xmit.buf = NULL;
}
for (i = 0; i < SERIAL_RECV_DESCRIPTORS; i++)
if (descr[i].buf) {
buffer = phys_to_virt(descr[i].buf) - sizeof *buffer;
kfree(buffer);
descr[i].buf = 0;
}
if (!info->port.tty || (info->port.tty->termios->c_cflag & HUPCL)) {
/* hang up DTR and RTS if HUPCL is enabled */
e100_dtr(info, 0);
e100_rts(info, 0); /* could check CRTSCTS before doing this */
}
if (info->port.tty)
set_bit(TTY_IO_ERROR, &info->port.tty->flags);
info->flags &= ~ASYNC_INITIALIZED;
local_irq_restore(flags);
}
/* change baud rate and other assorted parameters */
static void
change_speed(struct e100_serial *info)
{
unsigned int cflag;
unsigned long xoff;
unsigned long flags;
/* first some safety checks */
if (!info->port.tty || !info->port.tty->termios)
return;
if (!info->ioport)
return;
cflag = info->port.tty->termios->c_cflag;
/* possibly, the tx/rx should be disabled first to do this safely */
/* change baud-rate and write it to the hardware */
if ((info->flags & ASYNC_SPD_MASK) == ASYNC_SPD_CUST) {
/* Special baudrate */
u32 mask = 0xFF << (info->line*8); /* Each port has 8 bits */
unsigned long alt_source =
IO_STATE(R_ALT_SER_BAUDRATE, ser0_rec, normal) |
IO_STATE(R_ALT_SER_BAUDRATE, ser0_tr, normal);
/* R_ALT_SER_BAUDRATE selects the source */
DBAUD(printk("Custom baudrate: baud_base/divisor %lu/%i\n",
(unsigned long)info->baud_base, info->custom_divisor));
if (info->baud_base == SERIAL_PRESCALE_BASE) {
/* 0, 2-65535 (0=65536) */
u16 divisor = info->custom_divisor;
/* R_SERIAL_PRESCALE (upper 16 bits of R_CLOCK_PRESCALE) */
/* baudrate is 3.125MHz/custom_divisor */
alt_source =
IO_STATE(R_ALT_SER_BAUDRATE, ser0_rec, prescale) |
IO_STATE(R_ALT_SER_BAUDRATE, ser0_tr, prescale);
alt_source = 0x11;
DBAUD(printk("Writing SERIAL_PRESCALE: divisor %i\n", divisor));
*R_SERIAL_PRESCALE = divisor;
info->baud = SERIAL_PRESCALE_BASE/divisor;
}
#ifdef CONFIG_ETRAX_EXTERN_PB6CLK_ENABLED
else if ((info->baud_base==CONFIG_ETRAX_EXTERN_PB6CLK_FREQ/8 &&
info->custom_divisor == 1) ||
(info->baud_base==CONFIG_ETRAX_EXTERN_PB6CLK_FREQ &&
info->custom_divisor == 8)) {
/* ext_clk selected */
alt_source =
IO_STATE(R_ALT_SER_BAUDRATE, ser0_rec, extern) |
IO_STATE(R_ALT_SER_BAUDRATE, ser0_tr, extern);
DBAUD(printk("using external baudrate: %lu\n", CONFIG_ETRAX_EXTERN_PB6CLK_FREQ/8));
info->baud = CONFIG_ETRAX_EXTERN_PB6CLK_FREQ/8;
}
#endif
else
{
/* Bad baudbase, we don't support using timer0
* for baudrate.
*/
printk(KERN_WARNING "Bad baud_base/custom_divisor: %lu/%i\n",
(unsigned long)info->baud_base, info->custom_divisor);
}
r_alt_ser_baudrate_shadow &= ~mask;
r_alt_ser_baudrate_shadow |= (alt_source << (info->line*8));
*R_ALT_SER_BAUDRATE = r_alt_ser_baudrate_shadow;
} else {
/* Normal baudrate */
/* Make sure we use normal baudrate */
u32 mask = 0xFF << (info->line*8); /* Each port has 8 bits */
unsigned long alt_source =
IO_STATE(R_ALT_SER_BAUDRATE, ser0_rec, normal) |
IO_STATE(R_ALT_SER_BAUDRATE, ser0_tr, normal);
r_alt_ser_baudrate_shadow &= ~mask;
r_alt_ser_baudrate_shadow |= (alt_source << (info->line*8));
#ifndef CONFIG_SVINTO_SIM
*R_ALT_SER_BAUDRATE = r_alt_ser_baudrate_shadow;
#endif /* CONFIG_SVINTO_SIM */
info->baud = cflag_to_baud(cflag);
#ifndef CONFIG_SVINTO_SIM
info->ioport[REG_BAUD] = cflag_to_etrax_baud(cflag);
#endif /* CONFIG_SVINTO_SIM */
}
#ifndef CONFIG_SVINTO_SIM
/* start with default settings and then fill in changes */
local_irq_save(flags);
/* 8 bit, no/even parity */
info->rx_ctrl &= ~(IO_MASK(R_SERIAL0_REC_CTRL, rec_bitnr) |
IO_MASK(R_SERIAL0_REC_CTRL, rec_par_en) |
IO_MASK(R_SERIAL0_REC_CTRL, rec_par));
/* 8 bit, no/even parity, 1 stop bit, no cts */
info->tx_ctrl &= ~(IO_MASK(R_SERIAL0_TR_CTRL, tr_bitnr) |
IO_MASK(R_SERIAL0_TR_CTRL, tr_par_en) |
IO_MASK(R_SERIAL0_TR_CTRL, tr_par) |
IO_MASK(R_SERIAL0_TR_CTRL, stop_bits) |
IO_MASK(R_SERIAL0_TR_CTRL, auto_cts));
if ((cflag & CSIZE) == CS7) {
/* set 7 bit mode */
info->tx_ctrl |= IO_STATE(R_SERIAL0_TR_CTRL, tr_bitnr, tr_7bit);
info->rx_ctrl |= IO_STATE(R_SERIAL0_REC_CTRL, rec_bitnr, rec_7bit);
}
if (cflag & CSTOPB) {
/* set 2 stop bit mode */
info->tx_ctrl |= IO_STATE(R_SERIAL0_TR_CTRL, stop_bits, two_bits);
}
if (cflag & PARENB) {
/* enable parity */
info->tx_ctrl |= IO_STATE(R_SERIAL0_TR_CTRL, tr_par_en, enable);
info->rx_ctrl |= IO_STATE(R_SERIAL0_REC_CTRL, rec_par_en, enable);
}
if (cflag & CMSPAR) {
/* enable stick parity, PARODD mean Mark which matches ETRAX */
info->tx_ctrl |= IO_STATE(R_SERIAL0_TR_CTRL, tr_stick_par, stick);
info->rx_ctrl |= IO_STATE(R_SERIAL0_REC_CTRL, rec_stick_par, stick);
}
if (cflag & PARODD) {
/* set odd parity (or Mark if CMSPAR) */
info->tx_ctrl |= IO_STATE(R_SERIAL0_TR_CTRL, tr_par, odd);
info->rx_ctrl |= IO_STATE(R_SERIAL0_REC_CTRL, rec_par, odd);
}
if (cflag & CRTSCTS) {
/* enable automatic CTS handling */
DFLOW(DEBUG_LOG(info->line, "FLOW auto_cts enabled\n", 0));
info->tx_ctrl |= IO_STATE(R_SERIAL0_TR_CTRL, auto_cts, active);
}
/* make sure the tx and rx are enabled */
info->tx_ctrl |= IO_STATE(R_SERIAL0_TR_CTRL, tr_enable, enable);
info->rx_ctrl |= IO_STATE(R_SERIAL0_REC_CTRL, rec_enable, enable);
/* actually write the control regs to the hardware */
info->ioport[REG_TR_CTRL] = info->tx_ctrl;
info->ioport[REG_REC_CTRL] = info->rx_ctrl;
xoff = IO_FIELD(R_SERIAL0_XOFF, xoff_char, STOP_CHAR(info->port.tty));
xoff |= IO_STATE(R_SERIAL0_XOFF, tx_stop, enable);
if (info->port.tty->termios->c_iflag & IXON ) {
DFLOW(DEBUG_LOG(info->line, "FLOW XOFF enabled 0x%02X\n",
STOP_CHAR(info->port.tty)));
xoff |= IO_STATE(R_SERIAL0_XOFF, auto_xoff, enable);
}
*((unsigned long *)&info->ioport[REG_XOFF]) = xoff;
local_irq_restore(flags);
#endif /* !CONFIG_SVINTO_SIM */
update_char_time(info);
} /* change_speed */
/* start transmitting chars NOW */
static void
rs_flush_chars(struct tty_struct *tty)
{
struct e100_serial *info = (struct e100_serial *)tty->driver_data;
unsigned long flags;
if (info->tr_running ||
info->xmit.head == info->xmit.tail ||
tty->stopped ||
tty->hw_stopped ||
!info->xmit.buf)
return;
#ifdef SERIAL_DEBUG_FLOW
printk("rs_flush_chars\n");
#endif
/* this protection might not exactly be necessary here */
local_irq_save(flags);
start_transmit(info);
local_irq_restore(flags);
}
static int rs_raw_write(struct tty_struct *tty,
const unsigned char *buf, int count)
{
int c, ret = 0;
struct e100_serial *info = (struct e100_serial *)tty->driver_data;
unsigned long flags;
/* first some sanity checks */
if (!tty || !info->xmit.buf || !tmp_buf)
return 0;
#ifdef SERIAL_DEBUG_DATA
if (info->line == SERIAL_DEBUG_LINE)
printk("rs_raw_write (%d), status %d\n",
count, info->ioport[REG_STATUS]);
#endif
#ifdef CONFIG_SVINTO_SIM
/* Really simple. The output is here and now. */
SIMCOUT(buf, count);
return count;
#endif
local_save_flags(flags);
DFLOW(DEBUG_LOG(info->line, "write count %i ", count));
DFLOW(DEBUG_LOG(info->line, "ldisc %i\n", tty->ldisc.chars_in_buffer(tty)));
/* The local_irq_disable/restore_flags pairs below are needed
* because the DMA interrupt handler moves the info->xmit values.
* the memcpy needs to be in the critical region unfortunately,
* because we need to read xmit values, memcpy, write xmit values
* in one atomic operation... this could perhaps be avoided by
* more clever design.
*/
local_irq_disable();
while (count) {
c = CIRC_SPACE_TO_END(info->xmit.head,
info->xmit.tail,
SERIAL_XMIT_SIZE);
if (count < c)
c = count;
if (c <= 0)
break;
memcpy(info->xmit.buf + info->xmit.head, buf, c);
info->xmit.head = (info->xmit.head + c) &
(SERIAL_XMIT_SIZE-1);
buf += c;
count -= c;
ret += c;
}
local_irq_restore(flags);
/* enable transmitter if not running, unless the tty is stopped
* this does not need IRQ protection since if tr_running == 0
* the IRQ's are not running anyway for this port.
*/
DFLOW(DEBUG_LOG(info->line, "write ret %i\n", ret));
if (info->xmit.head != info->xmit.tail &&
!tty->stopped &&
!tty->hw_stopped &&
!info->tr_running) {
start_transmit(info);
}
return ret;
} /* raw_raw_write() */
static int
rs_write(struct tty_struct *tty,
const unsigned char *buf, int count)
{
#if defined(CONFIG_ETRAX_RS485)
struct e100_serial *info = (struct e100_serial *)tty->driver_data;
if (info->rs485.flags & SER_RS485_ENABLED)
{
/* If we are in RS-485 mode, we need to toggle RTS and disable
* the receiver before initiating a DMA transfer
*/
#ifdef CONFIG_ETRAX_FAST_TIMER
/* Abort any started timer */
fast_timers_rs485[info->line].function = NULL;
del_fast_timer(&fast_timers_rs485[info->line]);
#endif
e100_rts(info, (info->rs485.flags & SER_RS485_RTS_ON_SEND));
#if defined(CONFIG_ETRAX_RS485_DISABLE_RECEIVER)
e100_disable_rx(info);
e100_enable_rx_irq(info);
#endif
if (info->rs485.delay_rts_before_send > 0)
msleep(info->rs485.delay_rts_before_send);
}
#endif /* CONFIG_ETRAX_RS485 */
count = rs_raw_write(tty, buf, count);
#if defined(CONFIG_ETRAX_RS485)
if (info->rs485.flags & SER_RS485_ENABLED)
{
unsigned int val;
/* If we are in RS-485 mode the following has to be done:
* wait until DMA is ready
* wait on transmit shift register
* toggle RTS
* enable the receiver
*/
/* Sleep until all sent */
tty_wait_until_sent(tty, 0);
#ifdef CONFIG_ETRAX_FAST_TIMER
/* Now sleep a little more so that shift register is empty */
schedule_usleep(info->char_time_usec * 2);
#endif
/* wait on transmit shift register */
do{
get_lsr_info(info, &val);
}while (!(val & TIOCSER_TEMT));
e100_rts(info, (info->rs485.flags & SER_RS485_RTS_AFTER_SEND));
#if defined(CONFIG_ETRAX_RS485_DISABLE_RECEIVER)
e100_enable_rx(info);
e100_enable_rxdma_irq(info);
#endif
}
#endif /* CONFIG_ETRAX_RS485 */
return count;
} /* rs_write */
/* how much space is available in the xmit buffer? */
static int
rs_write_room(struct tty_struct *tty)
{
struct e100_serial *info = (struct e100_serial *)tty->driver_data;
return CIRC_SPACE(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE);
}
/* How many chars are in the xmit buffer?
* This does not include any chars in the transmitter FIFO.
* Use wait_until_sent for waiting for FIFO drain.
*/
static int
rs_chars_in_buffer(struct tty_struct *tty)
{
struct e100_serial *info = (struct e100_serial *)tty->driver_data;
return CIRC_CNT(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE);
}
/* discard everything in the xmit buffer */
static void
rs_flush_buffer(struct tty_struct *tty)
{
struct e100_serial *info = (struct e100_serial *)tty->driver_data;
unsigned long flags;
local_irq_save(flags);
info->xmit.head = info->xmit.tail = 0;
local_irq_restore(flags);
tty_wakeup(tty);
}
/*
* This function is used to send a high-priority XON/XOFF character to
* the device
*
* Since we use DMA we don't check for info->x_char in transmit_chars_dma(),
* but we do it in handle_ser_tx_interrupt().
* We disable DMA channel and enable tx ready interrupt and write the
* character when possible.
*/
static void rs_send_xchar(struct tty_struct *tty, char ch)
{
struct e100_serial *info = (struct e100_serial *)tty->driver_data;
unsigned long flags;
local_irq_save(flags);
if (info->uses_dma_out) {
/* Put the DMA on hold and disable the channel */
*info->ocmdadr = IO_STATE(R_DMA_CH6_CMD, cmd, hold);
while (IO_EXTRACT(R_DMA_CH6_CMD, cmd, *info->ocmdadr) !=
IO_STATE_VALUE(R_DMA_CH6_CMD, cmd, hold));
e100_disable_txdma_channel(info);
}
/* Must make sure transmitter is not stopped before we can transmit */
if (tty->stopped)
rs_start(tty);
/* Enable manual transmit interrupt and send from there */
DFLOW(DEBUG_LOG(info->line, "rs_send_xchar 0x%02X\n", ch));
info->x_char = ch;
e100_enable_serial_tx_ready_irq(info);
local_irq_restore(flags);
}
/*
* ------------------------------------------------------------
* rs_throttle()
*
* This routine is called by the upper-layer tty layer to signal that
* incoming characters should be throttled.
* ------------------------------------------------------------
*/
static void
rs_throttle(struct tty_struct * tty)
{
struct e100_serial *info = (struct e100_serial *)tty->driver_data;
#ifdef SERIAL_DEBUG_THROTTLE
char buf[64];
printk("throttle %s: %lu....\n", tty_name(tty, buf),
(unsigned long)tty->ldisc.chars_in_buffer(tty));
#endif
DFLOW(DEBUG_LOG(info->line,"rs_throttle %lu\n", tty->ldisc.chars_in_buffer(tty)));
/* Do RTS before XOFF since XOFF might take some time */
if (tty->termios->c_cflag & CRTSCTS) {
/* Turn off RTS line */
e100_rts(info, 0);
}
if (I_IXOFF(tty))
rs_send_xchar(tty, STOP_CHAR(tty));
}
static void
rs_unthrottle(struct tty_struct * tty)
{
struct e100_serial *info = (struct e100_serial *)tty->driver_data;
#ifdef SERIAL_DEBUG_THROTTLE
char buf[64];
printk("unthrottle %s: %lu....\n", tty_name(tty, buf),
(unsigned long)tty->ldisc.chars_in_buffer(tty));
#endif
DFLOW(DEBUG_LOG(info->line,"rs_unthrottle ldisc %d\n", tty->ldisc.chars_in_buffer(tty)));
DFLOW(DEBUG_LOG(info->line,"rs_unthrottle flip.count: %i\n", tty->flip.count));
/* Do RTS before XOFF since XOFF might take some time */
if (tty->termios->c_cflag & CRTSCTS) {
/* Assert RTS line */
e100_rts(info, 1);
}
if (I_IXOFF(tty)) {
if (info->x_char)
info->x_char = 0;
else
rs_send_xchar(tty, START_CHAR(tty));
}
}
/*
* ------------------------------------------------------------
* rs_ioctl() and friends
* ------------------------------------------------------------
*/
static int
get_serial_info(struct e100_serial * info,
struct serial_struct * retinfo)
{
struct serial_struct tmp;
/* this is all probably wrong, there are a lot of fields
* here that we don't have in e100_serial and maybe we
* should set them to something else than 0.
*/
if (!retinfo)
return -EFAULT;
memset(&tmp, 0, sizeof(tmp));
tmp.type = info->type;
tmp.line = info->line;
tmp.port = (int)info->ioport;
tmp.irq = info->irq;
tmp.flags = info->flags;
tmp.baud_base = info->baud_base;
tmp.close_delay = info->close_delay;
tmp.closing_wait = info->closing_wait;
tmp.custom_divisor = info->custom_divisor;
if (copy_to_user(retinfo, &tmp, sizeof(*retinfo)))
return -EFAULT;
return 0;
}
static int
set_serial_info(struct e100_serial *info,
struct serial_struct *new_info)
{
struct serial_struct new_serial;
struct e100_serial old_info;
int retval = 0;
if (copy_from_user(&new_serial, new_info, sizeof(new_serial)))
return -EFAULT;
old_info = *info;
if (!capable(CAP_SYS_ADMIN)) {
if ((new_serial.type != info->type) ||
(new_serial.close_delay != info->close_delay) ||
((new_serial.flags & ~ASYNC_USR_MASK) !=
(info->flags & ~ASYNC_USR_MASK)))
return -EPERM;
info->flags = ((info->flags & ~ASYNC_USR_MASK) |
(new_serial.flags & ASYNC_USR_MASK));
goto check_and_exit;
}
if (info->count > 1)
return -EBUSY;
/*
* OK, past this point, all the error checking has been done.
* At this point, we start making changes.....
*/
info->baud_base = new_serial.baud_base;
info->flags = ((info->flags & ~ASYNC_FLAGS) |
(new_serial.flags & ASYNC_FLAGS));
info->custom_divisor = new_serial.custom_divisor;
info->type = new_serial.type;
info->close_delay = new_serial.close_delay;
info->closing_wait = new_serial.closing_wait;
info->port.tty->low_latency = (info->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
check_and_exit:
if (info->flags & ASYNC_INITIALIZED) {
change_speed(info);
} else
retval = startup(info);
return retval;
}
/*
* get_lsr_info - get line status register info
*
* Purpose: Let user call ioctl() to get info when the UART physically
* is emptied. On bus types like RS485, the transmitter must
* release the bus after transmitting. This must be done when
* the transmit shift register is empty, not be done when the
* transmit holding register is empty. This functionality
* allows an RS485 driver to be written in user space.
*/
static int
get_lsr_info(struct e100_serial * info, unsigned int *value)
{
unsigned int result = TIOCSER_TEMT;
#ifndef CONFIG_SVINTO_SIM
unsigned long curr_time = jiffies;
unsigned long curr_time_usec = GET_JIFFIES_USEC();
unsigned long elapsed_usec =
(curr_time - info->last_tx_active) * 1000000/HZ +
curr_time_usec - info->last_tx_active_usec;
if (info->xmit.head != info->xmit.tail ||
elapsed_usec < 2*info->char_time_usec) {
result = 0;
}
#endif
if (copy_to_user(value, &result, sizeof(int)))
return -EFAULT;
return 0;
}
#ifdef SERIAL_DEBUG_IO
struct state_str
{
int state;
const char *str;
};
const struct state_str control_state_str[] = {
{TIOCM_DTR, "DTR" },
{TIOCM_RTS, "RTS"},
{TIOCM_ST, "ST?" },
{TIOCM_SR, "SR?" },
{TIOCM_CTS, "CTS" },
{TIOCM_CD, "CD" },
{TIOCM_RI, "RI" },
{TIOCM_DSR, "DSR" },
{0, NULL }
};
char *get_control_state_str(int MLines, char *s)
{
int i = 0;
s[0]='\0';
while (control_state_str[i].str != NULL) {
if (MLines & control_state_str[i].state) {
if (s[0] != '\0') {
strcat(s, ", ");
}
strcat(s, control_state_str[i].str);
}
i++;
}
return s;
}
#endif
static int
rs_break(struct tty_struct *tty, int break_state)
{
struct e100_serial *info = (struct e100_serial *)tty->driver_data;
unsigned long flags;
if (!info->ioport)
return -EIO;
local_irq_save(flags);
if (break_state == -1) {
/* Go to manual mode and set the txd pin to 0 */
/* Clear bit 7 (txd) and 6 (tr_enable) */
info->tx_ctrl &= 0x3F;
} else {
/* Set bit 7 (txd) and 6 (tr_enable) */
info->tx_ctrl |= (0x80 | 0x40);
}
info->ioport[REG_TR_CTRL] = info->tx_ctrl;
local_irq_restore(flags);
return 0;
}
static int
rs_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear)
{
struct e100_serial *info = (struct e100_serial *)tty->driver_data;
unsigned long flags;
local_irq_save(flags);
if (clear & TIOCM_RTS)
e100_rts(info, 0);
if (clear & TIOCM_DTR)
e100_dtr(info, 0);
/* Handle FEMALE behaviour */
if (clear & TIOCM_RI)
e100_ri_out(info, 0);
if (clear & TIOCM_CD)
e100_cd_out(info, 0);
if (set & TIOCM_RTS)
e100_rts(info, 1);
if (set & TIOCM_DTR)
e100_dtr(info, 1);
/* Handle FEMALE behaviour */
if (set & TIOCM_RI)
e100_ri_out(info, 1);
if (set & TIOCM_CD)
e100_cd_out(info, 1);
local_irq_restore(flags);
return 0;
}
static int
rs_tiocmget(struct tty_struct *tty)
{
struct e100_serial *info = (struct e100_serial *)tty->driver_data;
unsigned int result;
unsigned long flags;
local_irq_save(flags);
result =
(!E100_RTS_GET(info) ? TIOCM_RTS : 0)
| (!E100_DTR_GET(info) ? TIOCM_DTR : 0)
| (!E100_RI_GET(info) ? TIOCM_RNG : 0)
| (!E100_DSR_GET(info) ? TIOCM_DSR : 0)
| (!E100_CD_GET(info) ? TIOCM_CAR : 0)
| (!E100_CTS_GET(info) ? TIOCM_CTS : 0);
local_irq_restore(flags);
#ifdef SERIAL_DEBUG_IO
printk(KERN_DEBUG "ser%i: modem state: %i 0x%08X\n",
info->line, result, result);
{
char s[100];
get_control_state_str(result, s);
printk(KERN_DEBUG "state: %s\n", s);
}
#endif
return result;
}
static int
rs_ioctl(struct tty_struct *tty,
unsigned int cmd, unsigned long arg)
{
struct e100_serial * info = (struct e100_serial *)tty->driver_data;
if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) &&
(cmd != TIOCSERCONFIG) && (cmd != TIOCSERGWILD) &&
(cmd != TIOCSERSWILD) && (cmd != TIOCSERGSTRUCT)) {
if (tty->flags & (1 << TTY_IO_ERROR))
return -EIO;
}
switch (cmd) {
case TIOCGSERIAL:
return get_serial_info(info,
(struct serial_struct *) arg);
case TIOCSSERIAL:
return set_serial_info(info,
(struct serial_struct *) arg);
case TIOCSERGETLSR: /* Get line status register */
return get_lsr_info(info, (unsigned int *) arg);
case TIOCSERGSTRUCT:
if (copy_to_user((struct e100_serial *) arg,
info, sizeof(struct e100_serial)))
return -EFAULT;
return 0;
#if defined(CONFIG_ETRAX_RS485)
case TIOCSERSETRS485:
{
/* In this ioctl we still use the old structure
* rs485_control for backward compatibility
* (if we use serial_rs485, then old user-level code
* wouldn't work anymore...).
* The use of this ioctl is deprecated: use TIOCSRS485
* instead.*/
struct rs485_control rs485ctrl;
struct serial_rs485 rs485data;
printk(KERN_DEBUG "The use of this ioctl is deprecated. Use TIOCSRS485 instead\n");
if (copy_from_user(&rs485ctrl, (struct rs485_control *)arg,
sizeof(rs485ctrl)))
return -EFAULT;
rs485data.delay_rts_before_send = rs485ctrl.delay_rts_before_send;
rs485data.flags = 0;
if (rs485ctrl.enabled)
rs485data.flags |= SER_RS485_ENABLED;
else
rs485data.flags &= ~(SER_RS485_ENABLED);
if (rs485ctrl.rts_on_send)
rs485data.flags |= SER_RS485_RTS_ON_SEND;
else
rs485data.flags &= ~(SER_RS485_RTS_ON_SEND);
if (rs485ctrl.rts_after_sent)
rs485data.flags |= SER_RS485_RTS_AFTER_SEND;
else
rs485data.flags &= ~(SER_RS485_RTS_AFTER_SEND);
return e100_enable_rs485(tty, &rs485data);
}
case TIOCSRS485:
{
/* This is the new version of TIOCSRS485, with new
* data structure serial_rs485 */
struct serial_rs485 rs485data;
if (copy_from_user(&rs485data, (struct rs485_control *)arg,
sizeof(rs485data)))
return -EFAULT;
return e100_enable_rs485(tty, &rs485data);
}
case TIOCGRS485:
{
struct serial_rs485 *rs485data =
&(((struct e100_serial *)tty->driver_data)->rs485);
/* This is the ioctl to get RS485 data from user-space */
if (copy_to_user((struct serial_rs485 *) arg,
rs485data,
sizeof(struct serial_rs485)))
return -EFAULT;
break;
}
case TIOCSERWRRS485:
{
struct rs485_write rs485wr;
if (copy_from_user(&rs485wr, (struct rs485_write *)arg,
sizeof(rs485wr)))
return -EFAULT;
return e100_write_rs485(tty, rs485wr.outc, rs485wr.outc_size);
}
#endif
default:
return -ENOIOCTLCMD;
}
return 0;
}
static void
rs_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
{
struct e100_serial *info = (struct e100_serial *)tty->driver_data;
change_speed(info);
/* Handle turning off CRTSCTS */
if ((old_termios->c_cflag & CRTSCTS) &&
!(tty->termios->c_cflag & CRTSCTS)) {
tty->hw_stopped = 0;
rs_start(tty);
}
}
/*
* ------------------------------------------------------------
* rs_close()
*
* This routine is called when the serial port gets closed. First, we
* wait for the last remaining data to be sent. Then, we unlink its
* S structure from the interrupt chain if necessary, and we free
* that IRQ if nothing is left in the chain.
* ------------------------------------------------------------
*/
static void
rs_close(struct tty_struct *tty, struct file * filp)
{
struct e100_serial * info = (struct e100_serial *)tty->driver_data;
unsigned long flags;
if (!info)
return;
/* interrupts are disabled for this entire function */
local_irq_save(flags);
if (tty_hung_up_p(filp)) {
local_irq_restore(flags);
return;
}
#ifdef SERIAL_DEBUG_OPEN
printk("[%d] rs_close ttyS%d, count = %d\n", current->pid,
info->line, info->count);
#endif
if ((tty->count == 1) && (info->count != 1)) {
/*
* Uh, oh. tty->count is 1, which means that the tty
* structure will be freed. Info->count should always
* be one in these conditions. If it's greater than
* one, we've got real problems, since it means the
* serial port won't be shutdown.
*/
printk(KERN_ERR
"rs_close: bad serial port count; tty->count is 1, "
"info->count is %d\n", info->count);
info->count = 1;
}
if (--info->count < 0) {
printk(KERN_ERR "rs_close: bad serial port count for ttyS%d: %d\n",
info->line, info->count);
info->count = 0;
}
if (info->count) {
local_irq_restore(flags);
return;
}
info->flags |= ASYNC_CLOSING;
/*
* Save the termios structure, since this port may have
* separate termios for callout and dialin.
*/
if (info->flags & ASYNC_NORMAL_ACTIVE)
info->normal_termios = *tty->termios;
/*
* Now we wait for the transmit buffer to clear; and we notify
* the line discipline to only process XON/XOFF characters.
*/
tty->closing = 1;
if (info->closing_wait != ASYNC_CLOSING_WAIT_NONE)
tty_wait_until_sent(tty, info->closing_wait);
/*
* At this point we stop accepting input. To do this, we
* disable the serial receiver and the DMA receive interrupt.
*/
#ifdef SERIAL_HANDLE_EARLY_ERRORS
e100_disable_serial_data_irq(info);
#endif
#ifndef CONFIG_SVINTO_SIM
e100_disable_rx(info);
e100_disable_rx_irq(info);
if (info->flags & ASYNC_INITIALIZED) {
/*
* Before we drop DTR, make sure the UART transmitter
* has completely drained; this is especially
* important as we have a transmit FIFO!
*/
rs_wait_until_sent(tty, HZ);
}
#endif
shutdown(info);
rs_flush_buffer(tty);
tty_ldisc_flush(tty);
tty->closing = 0;
info->event = 0;
info->port.tty = NULL;
if (info->blocked_open) {
if (info->close_delay)
schedule_timeout_interruptible(info->close_delay);
wake_up_interruptible(&info->open_wait);
}
info->flags &= ~(ASYNC_NORMAL_ACTIVE|ASYNC_CLOSING);
wake_up_interruptible(&info->close_wait);
local_irq_restore(flags);
/* port closed */
#if defined(CONFIG_ETRAX_RS485)
if (info->rs485.flags & SER_RS485_ENABLED) {
info->rs485.flags &= ~(SER_RS485_ENABLED);
#if defined(CONFIG_ETRAX_RS485_ON_PA)
*R_PORT_PA_DATA = port_pa_data_shadow &= ~(1 << rs485_pa_bit);
#endif
#if defined(CONFIG_ETRAX_RS485_ON_PORT_G)
REG_SHADOW_SET(R_PORT_G_DATA, port_g_data_shadow,
rs485_port_g_bit, 0);
#endif
#if defined(CONFIG_ETRAX_RS485_LTC1387)
REG_SHADOW_SET(R_PORT_G_DATA, port_g_data_shadow,
CONFIG_ETRAX_RS485_LTC1387_DXEN_PORT_G_BIT, 0);
REG_SHADOW_SET(R_PORT_G_DATA, port_g_data_shadow,
CONFIG_ETRAX_RS485_LTC1387_RXEN_PORT_G_BIT, 0);
#endif
}
#endif
/*
* Release any allocated DMA irq's.
*/
if (info->dma_in_enabled) {
free_irq(info->dma_in_irq_nbr, info);
cris_free_dma(info->dma_in_nbr, info->dma_in_irq_description);
info->uses_dma_in = 0;
#ifdef SERIAL_DEBUG_OPEN
printk(KERN_DEBUG "DMA irq '%s' freed\n",
info->dma_in_irq_description);
#endif
}
if (info->dma_out_enabled) {
free_irq(info->dma_out_irq_nbr, info);
cris_free_dma(info->dma_out_nbr, info->dma_out_irq_description);
info->uses_dma_out = 0;
#ifdef SERIAL_DEBUG_OPEN
printk(KERN_DEBUG "DMA irq '%s' freed\n",
info->dma_out_irq_description);
#endif
}
}
/*
* rs_wait_until_sent() --- wait until the transmitter is empty
*/
static void rs_wait_until_sent(struct tty_struct *tty, int timeout)
{
unsigned long orig_jiffies;
struct e100_serial *info = (struct e100_serial *)tty->driver_data;
unsigned long curr_time = jiffies;
unsigned long curr_time_usec = GET_JIFFIES_USEC();
long elapsed_usec =
(curr_time - info->last_tx_active) * (1000000/HZ) +
curr_time_usec - info->last_tx_active_usec;
/*
* Check R_DMA_CHx_STATUS bit 0-6=number of available bytes in FIFO
* R_DMA_CHx_HWSW bit 31-16=nbr of bytes left in DMA buffer (0=64k)
*/
orig_jiffies = jiffies;
while (info->xmit.head != info->xmit.tail || /* More in send queue */
(*info->ostatusadr & 0x007f) || /* more in FIFO */
(elapsed_usec < 2*info->char_time_usec)) {
schedule_timeout_interruptible(1);
if (signal_pending(current))
break;
if (timeout && time_after(jiffies, orig_jiffies + timeout))
break;
curr_time = jiffies;
curr_time_usec = GET_JIFFIES_USEC();
elapsed_usec =
(curr_time - info->last_tx_active) * (1000000/HZ) +
curr_time_usec - info->last_tx_active_usec;
}
set_current_state(TASK_RUNNING);
}
/*
* rs_hangup() --- called by tty_hangup() when a hangup is signaled.
*/
void
rs_hangup(struct tty_struct *tty)
{
struct e100_serial * info = (struct e100_serial *)tty->driver_data;
rs_flush_buffer(tty);
shutdown(info);
info->event = 0;
info->count = 0;
info->flags &= ~ASYNC_NORMAL_ACTIVE;
info->port.tty = NULL;
wake_up_interruptible(&info->open_wait);
}
/*
* ------------------------------------------------------------
* rs_open() and friends
* ------------------------------------------------------------
*/
static int
block_til_ready(struct tty_struct *tty, struct file * filp,
struct e100_serial *info)
{
DECLARE_WAITQUEUE(wait, current);
unsigned long flags;
int retval;
int do_clocal = 0, extra_count = 0;
/*
* If the device is in the middle of being closed, then block
* until it's done, and then try again.
*/
if (tty_hung_up_p(filp) ||
(info->flags & ASYNC_CLOSING)) {
wait_event_interruptible_tty(info->close_wait,
!(info->flags & ASYNC_CLOSING));
#ifdef SERIAL_DO_RESTART
if (info->flags & ASYNC_HUP_NOTIFY)
return -EAGAIN;
else
return -ERESTARTSYS;
#else
return -EAGAIN;
#endif
}
/*
* If non-blocking mode is set, or the port is not enabled,
* then make the check up front and then exit.
*/
if ((filp->f_flags & O_NONBLOCK) ||
(tty->flags & (1 << TTY_IO_ERROR))) {
info->flags |= ASYNC_NORMAL_ACTIVE;
return 0;
}
if (tty->termios->c_cflag & CLOCAL) {
do_clocal = 1;
}
/*
* Block waiting for the carrier detect and the line to become
* free (i.e., not in use by the callout). While we are in
* this loop, info->count is dropped by one, so that
* rs_close() knows when to free things. We restore it upon
* exit, either normal or abnormal.
*/
retval = 0;
add_wait_queue(&info->open_wait, &wait);
#ifdef SERIAL_DEBUG_OPEN
printk("block_til_ready before block: ttyS%d, count = %d\n",
info->line, info->count);
#endif
local_irq_save(flags);
if (!tty_hung_up_p(filp)) {
extra_count++;
info->count--;
}
local_irq_restore(flags);
info->blocked_open++;
while (1) {
local_irq_save(flags);
/* assert RTS and DTR */
e100_rts(info, 1);
e100_dtr(info, 1);
local_irq_restore(flags);
set_current_state(TASK_INTERRUPTIBLE);
if (tty_hung_up_p(filp) ||
!(info->flags & ASYNC_INITIALIZED)) {
#ifdef SERIAL_DO_RESTART
if (info->flags & ASYNC_HUP_NOTIFY)
retval = -EAGAIN;
else
retval = -ERESTARTSYS;
#else
retval = -EAGAIN;
#endif
break;
}
if (!(info->flags & ASYNC_CLOSING) && do_clocal)
/* && (do_clocal || DCD_IS_ASSERTED) */
break;
if (signal_pending(current)) {
retval = -ERESTARTSYS;
break;
}
#ifdef SERIAL_DEBUG_OPEN
printk("block_til_ready blocking: ttyS%d, count = %d\n",
info->line, info->count);
#endif
tty_unlock();
schedule();
tty_lock();
}
set_current_state(TASK_RUNNING);
remove_wait_queue(&info->open_wait, &wait);
if (extra_count)
info->count++;
info->blocked_open--;
#ifdef SERIAL_DEBUG_OPEN
printk("block_til_ready after blocking: ttyS%d, count = %d\n",
info->line, info->count);
#endif
if (retval)
return retval;
info->flags |= ASYNC_NORMAL_ACTIVE;
return 0;
}
static void
deinit_port(struct e100_serial *info)
{
if (info->dma_out_enabled) {
cris_free_dma(info->dma_out_nbr, info->dma_out_irq_description);
free_irq(info->dma_out_irq_nbr, info);
}
if (info->dma_in_enabled) {
cris_free_dma(info->dma_in_nbr, info->dma_in_irq_description);
free_irq(info->dma_in_irq_nbr, info);
}
}
/*
* This routine is called whenever a serial port is opened.
* It performs the serial-specific initialization for the tty structure.
*/
static int
rs_open(struct tty_struct *tty, struct file * filp)
{
struct e100_serial *info;
int retval;
unsigned long page;
int allocated_resources = 0;
info = rs_table + tty->index;
if (!info->enabled)
return -ENODEV;
#ifdef SERIAL_DEBUG_OPEN
printk("[%d] rs_open %s, count = %d\n", current->pid, tty->name,
info->count);
#endif
info->count++;
tty->driver_data = info;
info->port.tty = tty;
tty->low_latency = !!(info->flags & ASYNC_LOW_LATENCY);
if (!tmp_buf) {
page = get_zeroed_page(GFP_KERNEL);
if (!page) {
return -ENOMEM;
}
if (tmp_buf)
free_page(page);
else
tmp_buf = (unsigned char *) page;
}
/*
* If the port is in the middle of closing, bail out now
*/
if (tty_hung_up_p(filp) ||
(info->flags & ASYNC_CLOSING)) {
wait_event_interruptible_tty(info->close_wait,
!(info->flags & ASYNC_CLOSING));
#ifdef SERIAL_DO_RESTART
return ((info->flags & ASYNC_HUP_NOTIFY) ?
-EAGAIN : -ERESTARTSYS);
#else
return -EAGAIN;
#endif
}
/*
* If DMA is enabled try to allocate the irq's.
*/
if (info->count == 1) {
allocated_resources = 1;
if (info->dma_in_enabled) {
if (request_irq(info->dma_in_irq_nbr,
rec_interrupt,
info->dma_in_irq_flags,
info->dma_in_irq_description,
info)) {
printk(KERN_WARNING "DMA irq '%s' busy; "
"falling back to non-DMA mode\n",
info->dma_in_irq_description);
/* Make sure we never try to use DMA in */
/* for the port again. */
info->dma_in_enabled = 0;
} else if (cris_request_dma(info->dma_in_nbr,
info->dma_in_irq_description,
DMA_VERBOSE_ON_ERROR,
info->dma_owner)) {
free_irq(info->dma_in_irq_nbr, info);
printk(KERN_WARNING "DMA '%s' busy; "
"falling back to non-DMA mode\n",
info->dma_in_irq_description);
/* Make sure we never try to use DMA in */
/* for the port again. */
info->dma_in_enabled = 0;
}
#ifdef SERIAL_DEBUG_OPEN
else
printk(KERN_DEBUG "DMA irq '%s' allocated\n",
info->dma_in_irq_description);
#endif
}
if (info->dma_out_enabled) {
if (request_irq(info->dma_out_irq_nbr,
tr_interrupt,
info->dma_out_irq_flags,
info->dma_out_irq_description,
info)) {
printk(KERN_WARNING "DMA irq '%s' busy; "
"falling back to non-DMA mode\n",
info->dma_out_irq_description);
/* Make sure we never try to use DMA out */
/* for the port again. */
info->dma_out_enabled = 0;
} else if (cris_request_dma(info->dma_out_nbr,
info->dma_out_irq_description,
DMA_VERBOSE_ON_ERROR,
info->dma_owner)) {
free_irq(info->dma_out_irq_nbr, info);
printk(KERN_WARNING "DMA '%s' busy; "
"falling back to non-DMA mode\n",
info->dma_out_irq_description);
/* Make sure we never try to use DMA out */
/* for the port again. */
info->dma_out_enabled = 0;
}
#ifdef SERIAL_DEBUG_OPEN
else
printk(KERN_DEBUG "DMA irq '%s' allocated\n",
info->dma_out_irq_description);
#endif
}
}
/*
* Start up the serial port
*/
retval = startup(info);
if (retval) {
if (allocated_resources)
deinit_port(info);
/* FIXME Decrease count info->count here too? */
return retval;
}
retval = block_til_ready(tty, filp, info);
if (retval) {
#ifdef SERIAL_DEBUG_OPEN
printk("rs_open returning after block_til_ready with %d\n",
retval);
#endif
if (allocated_resources)
deinit_port(info);
return retval;
}
if ((info->count == 1) && (info->flags & ASYNC_SPLIT_TERMIOS)) {
*tty->termios = info->normal_termios;
change_speed(info);
}
#ifdef SERIAL_DEBUG_OPEN
printk("rs_open ttyS%d successful...\n", info->line);
#endif
DLOG_INT_TRIG( log_int_pos = 0);
DFLIP( if (info->line == SERIAL_DEBUG_LINE) {
info->icount.rx = 0;
} );
return 0;
}
#ifdef CONFIG_PROC_FS
/*
* /proc fs routines....
*/
static void seq_line_info(struct seq_file *m, struct e100_serial *info)
{
unsigned long tmp;
seq_printf(m, "%d: uart:E100 port:%lX irq:%d",
info->line, (unsigned long)info->ioport, info->irq);
if (!info->ioport || (info->type == PORT_UNKNOWN)) {
seq_printf(m, "\n");
return;
}
seq_printf(m, " baud:%d", info->baud);
seq_printf(m, " tx:%lu rx:%lu",
(unsigned long)info->icount.tx,
(unsigned long)info->icount.rx);
tmp = CIRC_CNT(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE);
if (tmp)
seq_printf(m, " tx_pend:%lu/%lu",
(unsigned long)tmp,
(unsigned long)SERIAL_XMIT_SIZE);
seq_printf(m, " rx_pend:%lu/%lu",
(unsigned long)info->recv_cnt,
(unsigned long)info->max_recv_cnt);
#if 1
if (info->port.tty) {
if (info->port.tty->stopped)
seq_printf(m, " stopped:%i",
(int)info->port.tty->stopped);
if (info->port.tty->hw_stopped)
seq_printf(m, " hw_stopped:%i",
(int)info->port.tty->hw_stopped);
}
{
unsigned char rstat = info->ioport[REG_STATUS];
if (rstat & IO_MASK(R_SERIAL0_STATUS, xoff_detect))
seq_printf(m, " xoff_detect:1");
}
#endif
if (info->icount.frame)
seq_printf(m, " fe:%lu", (unsigned long)info->icount.frame);
if (info->icount.parity)
seq_printf(m, " pe:%lu", (unsigned long)info->icount.parity);
if (info->icount.brk)
seq_printf(m, " brk:%lu", (unsigned long)info->icount.brk);
if (info->icount.overrun)
seq_printf(m, " oe:%lu", (unsigned long)info->icount.overrun);
/*
* Last thing is the RS-232 status lines
*/
if (!E100_RTS_GET(info))
seq_puts(m, "|RTS");
if (!E100_CTS_GET(info))
seq_puts(m, "|CTS");
if (!E100_DTR_GET(info))
seq_puts(m, "|DTR");
if (!E100_DSR_GET(info))
seq_puts(m, "|DSR");
if (!E100_CD_GET(info))
seq_puts(m, "|CD");
if (!E100_RI_GET(info))
seq_puts(m, "|RI");
seq_puts(m, "\n");
}
static int crisv10_proc_show(struct seq_file *m, void *v)
{
int i;
seq_printf(m, "serinfo:1.0 driver:%s\n", serial_version);
for (i = 0; i < NR_PORTS; i++) {
if (!rs_table[i].enabled)
continue;
seq_line_info(m, &rs_table[i]);
}
#ifdef DEBUG_LOG_INCLUDED
for (i = 0; i < debug_log_pos; i++) {
seq_printf(m, "%-4i %lu.%lu ",
i, debug_log[i].time,
timer_data_to_ns(debug_log[i].timer_data));
seq_printf(m, debug_log[i].string, debug_log[i].value);
}
seq_printf(m, "debug_log %i/%i\n", i, DEBUG_LOG_SIZE);
debug_log_pos = 0;
#endif
return 0;
}
static int crisv10_proc_open(struct inode *inode, struct file *file)
{
return single_open(file, crisv10_proc_show, NULL);
}
static const struct file_operations crisv10_proc_fops = {
.owner = THIS_MODULE,
.open = crisv10_proc_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
#endif
/* Finally, routines used to initialize the serial driver. */
static void show_serial_version(void)
{
printk(KERN_INFO
"ETRAX 100LX serial-driver %s, "
"(c) 2000-2004 Axis Communications AB\r\n",
&serial_version[11]); /* "$Revision: x.yy" */
}
/* rs_init inits the driver at boot (using the module_init chain) */
static const struct tty_operations rs_ops = {
.open = rs_open,
.close = rs_close,
.write = rs_write,
.flush_chars = rs_flush_chars,
.write_room = rs_write_room,
.chars_in_buffer = rs_chars_in_buffer,
.flush_buffer = rs_flush_buffer,
.ioctl = rs_ioctl,
.throttle = rs_throttle,
.unthrottle = rs_unthrottle,
.set_termios = rs_set_termios,
.stop = rs_stop,
.start = rs_start,
.hangup = rs_hangup,
.break_ctl = rs_break,
.send_xchar = rs_send_xchar,
.wait_until_sent = rs_wait_until_sent,
.tiocmget = rs_tiocmget,
.tiocmset = rs_tiocmset,
#ifdef CONFIG_PROC_FS
.proc_fops = &crisv10_proc_fops,
#endif
};
static int __init rs_init(void)
{
int i;
struct e100_serial *info;
struct tty_driver *driver = alloc_tty_driver(NR_PORTS);
if (!driver)
return -ENOMEM;
show_serial_version();
/* Setup the timed flush handler system */
#if !defined(CONFIG_ETRAX_SERIAL_FAST_TIMER)
setup_timer(&flush_timer, timed_flush_handler, 0);
mod_timer(&flush_timer, jiffies + 5);
#endif
#if defined(CONFIG_ETRAX_RS485)
#if defined(CONFIG_ETRAX_RS485_ON_PA)
if (cris_io_interface_allocate_pins(if_serial_0, 'a', rs485_pa_bit,
rs485_pa_bit)) {
printk(KERN_ERR "ETRAX100LX serial: Could not allocate "
"RS485 pin\n");
put_tty_driver(driver);
return -EBUSY;
}
#endif
#if defined(CONFIG_ETRAX_RS485_ON_PORT_G)
if (cris_io_interface_allocate_pins(if_serial_0, 'g', rs485_pa_bit,
rs485_port_g_bit)) {
printk(KERN_ERR "ETRAX100LX serial: Could not allocate "
"RS485 pin\n");
put_tty_driver(driver);
return -EBUSY;
}
#endif
#endif
/* Initialize the tty_driver structure */
driver->driver_name = "serial";
driver->name = "ttyS";
driver->major = TTY_MAJOR;
driver->minor_start = 64;
driver->type = TTY_DRIVER_TYPE_SERIAL;
driver->subtype = SERIAL_TYPE_NORMAL;
driver->init_termios = tty_std_termios;
driver->init_termios.c_cflag =
B115200 | CS8 | CREAD | HUPCL | CLOCAL; /* is normally B9600 default... */
driver->init_termios.c_ispeed = 115200;
driver->init_termios.c_ospeed = 115200;
driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
tty_set_operations(driver, &rs_ops);
serial_driver = driver;
if (tty_register_driver(driver))
panic("Couldn't register serial driver\n");
/* do some initializing for the separate ports */
for (i = 0, info = rs_table; i < NR_PORTS; i++,info++) {
if (info->enabled) {
if (cris_request_io_interface(info->io_if,
info->io_if_description)) {
printk(KERN_ERR "ETRAX100LX async serial: "
"Could not allocate IO pins for "
"%s, port %d\n",
info->io_if_description, i);
info->enabled = 0;
}
}
info->uses_dma_in = 0;
info->uses_dma_out = 0;
info->line = i;
info->port.tty = NULL;
info->type = PORT_ETRAX;
info->tr_running = 0;
info->forced_eop = 0;
info->baud_base = DEF_BAUD_BASE;
info->custom_divisor = 0;
info->flags = 0;
info->close_delay = 5*HZ/10;
info->closing_wait = 30*HZ;
info->x_char = 0;
info->event = 0;
info->count = 0;
info->blocked_open = 0;
info->normal_termios = driver->init_termios;
init_waitqueue_head(&info->open_wait);
init_waitqueue_head(&info->close_wait);
info->xmit.buf = NULL;
info->xmit.tail = info->xmit.head = 0;
info->first_recv_buffer = info->last_recv_buffer = NULL;
info->recv_cnt = info->max_recv_cnt = 0;
info->last_tx_active_usec = 0;
info->last_tx_active = 0;
#if defined(CONFIG_ETRAX_RS485)
/* Set sane defaults */
info->rs485.flags &= ~(SER_RS485_RTS_ON_SEND);
info->rs485.flags |= SER_RS485_RTS_AFTER_SEND;
info->rs485.delay_rts_before_send = 0;
info->rs485.flags &= ~(SER_RS485_ENABLED);
#endif
INIT_WORK(&info->work, do_softint);
if (info->enabled) {
printk(KERN_INFO "%s%d at %p is a builtin UART with DMA\n",
serial_driver->name, info->line, info->ioport);
}
}
#ifdef CONFIG_ETRAX_FAST_TIMER
#ifdef CONFIG_ETRAX_SERIAL_FAST_TIMER
memset(fast_timers, 0, sizeof(fast_timers));
#endif
#ifdef CONFIG_ETRAX_RS485
memset(fast_timers_rs485, 0, sizeof(fast_timers_rs485));
#endif
fast_timer_init();
#endif
#ifndef CONFIG_SVINTO_SIM
#ifndef CONFIG_ETRAX_KGDB
/* Not needed in simulator. May only complicate stuff. */
/* hook the irq's for DMA channel 6 and 7, serial output and input, and some more... */
if (request_irq(SERIAL_IRQ_NBR, ser_interrupt,
IRQF_SHARED, "serial ", driver))
panic("%s: Failed to request irq8", __func__);
#endif
#endif /* CONFIG_SVINTO_SIM */
return 0;
}
/* this makes sure that rs_init is called during kernel boot */
module_init(rs_init);
| gpl-2.0 |
jituijiaqiezi/linux | drivers/ide/cs5535.c | 4800 | 6315 | /*
* Copyright (C) 2004-2005 Advanced Micro Devices, Inc.
* Copyright (C) 2007 Bartlomiej Zolnierkiewicz
*
* History:
* 09/20/2005 - Jaya Kumar <jayakumar.ide@gmail.com>
* - Reworked tuneproc, set_drive, misc mods to prep for mainline
* - Work was sponsored by CIS (M) Sdn Bhd.
* Ported to Kernel 2.6.11 on June 26, 2005 by
* Wolfgang Zuleger <wolfgang.zuleger@gmx.de>
* Alexander Kiausch <alex.kiausch@t-online.de>
* Originally developed by AMD for 2.4/2.6
*
* Development of this chipset driver was funded
* by the nice folks at National Semiconductor/AMD.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* Documentation:
* CS5535 documentation available from AMD
*/
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/ide.h>
#define DRV_NAME "cs5535"
#define MSR_ATAC_BASE 0x51300000
#define ATAC_GLD_MSR_CAP (MSR_ATAC_BASE+0)
#define ATAC_GLD_MSR_CONFIG (MSR_ATAC_BASE+0x01)
#define ATAC_GLD_MSR_SMI (MSR_ATAC_BASE+0x02)
#define ATAC_GLD_MSR_ERROR (MSR_ATAC_BASE+0x03)
#define ATAC_GLD_MSR_PM (MSR_ATAC_BASE+0x04)
#define ATAC_GLD_MSR_DIAG (MSR_ATAC_BASE+0x05)
#define ATAC_IO_BAR (MSR_ATAC_BASE+0x08)
#define ATAC_RESET (MSR_ATAC_BASE+0x10)
#define ATAC_CH0D0_PIO (MSR_ATAC_BASE+0x20)
#define ATAC_CH0D0_DMA (MSR_ATAC_BASE+0x21)
#define ATAC_CH0D1_PIO (MSR_ATAC_BASE+0x22)
#define ATAC_CH0D1_DMA (MSR_ATAC_BASE+0x23)
#define ATAC_PCI_ABRTERR (MSR_ATAC_BASE+0x24)
#define ATAC_BM0_CMD_PRIM 0x00
#define ATAC_BM0_STS_PRIM 0x02
#define ATAC_BM0_PRD 0x04
#define CS5535_CABLE_DETECT 0x48
/* Format I PIO settings. We separate out cmd and data for safer timings */
static unsigned int cs5535_pio_cmd_timings[5] =
{ 0xF7F4, 0x53F3, 0x13F1, 0x5131, 0x1131 };
static unsigned int cs5535_pio_dta_timings[5] =
{ 0xF7F4, 0xF173, 0x8141, 0x5131, 0x1131 };
static unsigned int cs5535_mwdma_timings[3] =
{ 0x7F0FFFF3, 0x7F035352, 0x7f024241 };
static unsigned int cs5535_udma_timings[5] =
{ 0x7F7436A1, 0x7F733481, 0x7F723261, 0x7F713161, 0x7F703061 };
/* Macros to check if the register is the reset value - reset value is an
invalid timing and indicates the register has not been set previously */
#define CS5535_BAD_PIO(timings) ( (timings&~0x80000000UL) == 0x00009172 )
#define CS5535_BAD_DMA(timings) ( (timings & 0x000FFFFF) == 0x00077771 )
/****
* cs5535_set_speed - Configure the chipset to the new speed
* @drive: Drive to set up
* @speed: desired speed
*
* cs5535_set_speed() configures the chipset to a new speed.
*/
static void cs5535_set_speed(ide_drive_t *drive, const u8 speed)
{
u32 reg = 0, dummy;
u8 unit = drive->dn & 1;
/* Set the PIO timings */
if (speed < XFER_SW_DMA_0) {
ide_drive_t *pair = ide_get_pair_dev(drive);
u8 cmd, pioa;
cmd = pioa = speed - XFER_PIO_0;
if (pair) {
u8 piob = pair->pio_mode - XFER_PIO_0;
if (piob < cmd)
cmd = piob;
}
/* Write the speed of the current drive */
reg = (cs5535_pio_cmd_timings[cmd] << 16) |
cs5535_pio_dta_timings[pioa];
wrmsr(unit ? ATAC_CH0D1_PIO : ATAC_CH0D0_PIO, reg, 0);
/* And if nessesary - change the speed of the other drive */
rdmsr(unit ? ATAC_CH0D0_PIO : ATAC_CH0D1_PIO, reg, dummy);
if (((reg >> 16) & cs5535_pio_cmd_timings[cmd]) !=
cs5535_pio_cmd_timings[cmd]) {
reg &= 0x0000FFFF;
reg |= cs5535_pio_cmd_timings[cmd] << 16;
wrmsr(unit ? ATAC_CH0D0_PIO : ATAC_CH0D1_PIO, reg, 0);
}
/* Set bit 31 of the DMA register for PIO format 1 timings */
rdmsr(unit ? ATAC_CH0D1_DMA : ATAC_CH0D0_DMA, reg, dummy);
wrmsr(unit ? ATAC_CH0D1_DMA : ATAC_CH0D0_DMA,
reg | 0x80000000UL, 0);
} else {
rdmsr(unit ? ATAC_CH0D1_DMA : ATAC_CH0D0_DMA, reg, dummy);
reg &= 0x80000000UL; /* Preserve the PIO format bit */
if (speed >= XFER_UDMA_0 && speed <= XFER_UDMA_4)
reg |= cs5535_udma_timings[speed - XFER_UDMA_0];
else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2)
reg |= cs5535_mwdma_timings[speed - XFER_MW_DMA_0];
else
return;
wrmsr(unit ? ATAC_CH0D1_DMA : ATAC_CH0D0_DMA, reg, 0);
}
}
/**
* cs5535_set_dma_mode - set host controller for DMA mode
* @hwif: port
* @drive: drive
*
* Programs the chipset for DMA mode.
*/
static void cs5535_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
cs5535_set_speed(drive, drive->dma_mode);
}
/**
* cs5535_set_pio_mode - set host controller for PIO mode
* @hwif: port
* @drive: drive
*
* A callback from the upper layers for PIO-only tuning.
*/
static void cs5535_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
cs5535_set_speed(drive, drive->pio_mode);
}
static u8 cs5535_cable_detect(ide_hwif_t *hwif)
{
struct pci_dev *dev = to_pci_dev(hwif->dev);
u8 bit;
/* if a 80 wire cable was detected */
pci_read_config_byte(dev, CS5535_CABLE_DETECT, &bit);
return (bit & 1) ? ATA_CBL_PATA80 : ATA_CBL_PATA40;
}
static const struct ide_port_ops cs5535_port_ops = {
.set_pio_mode = cs5535_set_pio_mode,
.set_dma_mode = cs5535_set_dma_mode,
.cable_detect = cs5535_cable_detect,
};
static const struct ide_port_info cs5535_chipset = {
.name = DRV_NAME,
.port_ops = &cs5535_port_ops,
.host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_POST_SET_MODE,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA4,
};
static int cs5535_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
return ide_pci_init_one(dev, &cs5535_chipset, NULL);
}
static const struct pci_device_id cs5535_pci_tbl[] = {
{ PCI_VDEVICE(NS, PCI_DEVICE_ID_NS_CS5535_IDE), 0 },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CS5535_IDE), },
{ 0, },
};
MODULE_DEVICE_TABLE(pci, cs5535_pci_tbl);
static struct pci_driver cs5535_pci_driver = {
.name = "CS5535_IDE",
.id_table = cs5535_pci_tbl,
.probe = cs5535_init_one,
.remove = ide_pci_remove,
.suspend = ide_pci_suspend,
.resume = ide_pci_resume,
};
static int __init cs5535_ide_init(void)
{
return ide_pci_register_driver(&cs5535_pci_driver);
}
static void __exit cs5535_ide_exit(void)
{
pci_unregister_driver(&cs5535_pci_driver);
}
module_init(cs5535_ide_init);
module_exit(cs5535_ide_exit);
MODULE_AUTHOR("AMD");
MODULE_DESCRIPTION("PCI driver module for AMD/NS CS5535 IDE");
MODULE_LICENSE("GPL");
| gpl-2.0 |
akkufix/HTC_M7xx_kernel-4.4x | arch/x86/kernel/amd_nb.c | 5824 | 7001 | /*
* Shared support code for AMD K8 northbridges and derivates.
* Copyright 2006 Andi Kleen, SUSE Labs. Subject to GPLv2.
*/
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <asm/amd_nb.h>
static u32 *flush_words;
const struct pci_device_id amd_nb_misc_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) },
{}
};
EXPORT_SYMBOL(amd_nb_misc_ids);
static struct pci_device_id amd_nb_link_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
{}
};
const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = {
{ 0x00, 0x18, 0x20 },
{ 0xff, 0x00, 0x20 },
{ 0xfe, 0x00, 0x20 },
{ }
};
struct amd_northbridge_info amd_northbridges;
EXPORT_SYMBOL(amd_northbridges);
static struct pci_dev *next_northbridge(struct pci_dev *dev,
const struct pci_device_id *ids)
{
do {
dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
if (!dev)
break;
} while (!pci_match_id(ids, dev));
return dev;
}
int amd_cache_northbridges(void)
{
u16 i = 0;
struct amd_northbridge *nb;
struct pci_dev *misc, *link;
if (amd_nb_num())
return 0;
misc = NULL;
while ((misc = next_northbridge(misc, amd_nb_misc_ids)) != NULL)
i++;
if (i == 0)
return 0;
nb = kzalloc(i * sizeof(struct amd_northbridge), GFP_KERNEL);
if (!nb)
return -ENOMEM;
amd_northbridges.nb = nb;
amd_northbridges.num = i;
link = misc = NULL;
for (i = 0; i != amd_nb_num(); i++) {
node_to_amd_nb(i)->misc = misc =
next_northbridge(misc, amd_nb_misc_ids);
node_to_amd_nb(i)->link = link =
next_northbridge(link, amd_nb_link_ids);
}
/* some CPU families (e.g. family 0x11) do not support GART */
if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10 ||
boot_cpu_data.x86 == 0x15)
amd_northbridges.flags |= AMD_NB_GART;
/*
* Some CPU families support L3 Cache Index Disable. There are some
* limitations because of E382 and E388 on family 0x10.
*/
if (boot_cpu_data.x86 == 0x10 &&
boot_cpu_data.x86_model >= 0x8 &&
(boot_cpu_data.x86_model > 0x9 ||
boot_cpu_data.x86_mask >= 0x1))
amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
if (boot_cpu_data.x86 == 0x15)
amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
/* L3 cache partitioning is supported on family 0x15 */
if (boot_cpu_data.x86 == 0x15)
amd_northbridges.flags |= AMD_NB_L3_PARTITIONING;
return 0;
}
EXPORT_SYMBOL_GPL(amd_cache_northbridges);
/*
* Ignores subdevice/subvendor but as far as I can figure out
* they're useless anyways
*/
bool __init early_is_amd_nb(u32 device)
{
const struct pci_device_id *id;
u32 vendor = device & 0xffff;
device >>= 16;
for (id = amd_nb_misc_ids; id->vendor; id++)
if (vendor == id->vendor && device == id->device)
return true;
return false;
}
struct resource *amd_get_mmconfig_range(struct resource *res)
{
u32 address;
u64 base, msr;
unsigned segn_busn_bits;
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
return NULL;
/* assume all cpus from fam10h have mmconfig */
if (boot_cpu_data.x86 < 0x10)
return NULL;
address = MSR_FAM10H_MMIO_CONF_BASE;
rdmsrl(address, msr);
/* mmconfig is not enabled */
if (!(msr & FAM10H_MMIO_CONF_ENABLE))
return NULL;
base = msr & (FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT);
segn_busn_bits = (msr >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) &
FAM10H_MMIO_CONF_BUSRANGE_MASK;
res->flags = IORESOURCE_MEM;
res->start = base;
res->end = base + (1ULL<<(segn_busn_bits + 20)) - 1;
return res;
}
int amd_get_subcaches(int cpu)
{
struct pci_dev *link = node_to_amd_nb(amd_get_nb_id(cpu))->link;
unsigned int mask;
int cuid;
if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
return 0;
pci_read_config_dword(link, 0x1d4, &mask);
cuid = cpu_data(cpu).compute_unit_id;
return (mask >> (4 * cuid)) & 0xf;
}
int amd_set_subcaches(int cpu, int mask)
{
static unsigned int reset, ban;
struct amd_northbridge *nb = node_to_amd_nb(amd_get_nb_id(cpu));
unsigned int reg;
int cuid;
if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING) || mask > 0xf)
return -EINVAL;
/* if necessary, collect reset state of L3 partitioning and BAN mode */
if (reset == 0) {
pci_read_config_dword(nb->link, 0x1d4, &reset);
pci_read_config_dword(nb->misc, 0x1b8, &ban);
ban &= 0x180000;
}
/* deactivate BAN mode if any subcaches are to be disabled */
if (mask != 0xf) {
pci_read_config_dword(nb->misc, 0x1b8, ®);
pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000);
}
cuid = cpu_data(cpu).compute_unit_id;
mask <<= 4 * cuid;
mask |= (0xf ^ (1 << cuid)) << 26;
pci_write_config_dword(nb->link, 0x1d4, mask);
/* reset BAN mode if L3 partitioning returned to reset state */
pci_read_config_dword(nb->link, 0x1d4, ®);
if (reg == reset) {
pci_read_config_dword(nb->misc, 0x1b8, ®);
reg &= ~0x180000;
pci_write_config_dword(nb->misc, 0x1b8, reg | ban);
}
return 0;
}
static int amd_cache_gart(void)
{
u16 i;
if (!amd_nb_has_feature(AMD_NB_GART))
return 0;
flush_words = kmalloc(amd_nb_num() * sizeof(u32), GFP_KERNEL);
if (!flush_words) {
amd_northbridges.flags &= ~AMD_NB_GART;
return -ENOMEM;
}
for (i = 0; i != amd_nb_num(); i++)
pci_read_config_dword(node_to_amd_nb(i)->misc, 0x9c,
&flush_words[i]);
return 0;
}
void amd_flush_garts(void)
{
int flushed, i;
unsigned long flags;
static DEFINE_SPINLOCK(gart_lock);
if (!amd_nb_has_feature(AMD_NB_GART))
return;
/* Avoid races between AGP and IOMMU. In theory it's not needed
but I'm not sure if the hardware won't lose flush requests
when another is pending. This whole thing is so expensive anyways
that it doesn't matter to serialize more. -AK */
spin_lock_irqsave(&gart_lock, flags);
flushed = 0;
for (i = 0; i < amd_nb_num(); i++) {
pci_write_config_dword(node_to_amd_nb(i)->misc, 0x9c,
flush_words[i] | 1);
flushed++;
}
for (i = 0; i < amd_nb_num(); i++) {
u32 w;
/* Make sure the hardware actually executed the flush*/
for (;;) {
pci_read_config_dword(node_to_amd_nb(i)->misc,
0x9c, &w);
if (!(w & 1))
break;
cpu_relax();
}
}
spin_unlock_irqrestore(&gart_lock, flags);
if (!flushed)
printk("nothing to flush?\n");
}
EXPORT_SYMBOL_GPL(amd_flush_garts);
static __init int init_amd_nbs(void)
{
int err = 0;
err = amd_cache_northbridges();
if (err < 0)
printk(KERN_NOTICE "AMD NB: Cannot enumerate AMD northbridges.\n");
if (amd_cache_gart() < 0)
printk(KERN_NOTICE "AMD NB: Cannot initialize GART flush words, "
"GART support disabled.\n");
return err;
}
/* This has to go after the PCI subsystem */
fs_initcall(init_amd_nbs);
| gpl-2.0 |
Flemmard/htc7x30-3.0 | fs/hpfs/name.c | 10944 | 3234 | /*
* linux/fs/hpfs/name.c
*
* Mikulas Patocka (mikulas@artax.karlin.mff.cuni.cz), 1998-1999
*
* operations with filenames
*/
#include "hpfs_fn.h"
static inline int not_allowed_char(unsigned char c)
{
return c<' ' || c=='"' || c=='*' || c=='/' || c==':' || c=='<' ||
c=='>' || c=='?' || c=='\\' || c=='|';
}
static inline int no_dos_char(unsigned char c)
{ /* Characters that are allowed in HPFS but not in DOS */
return c=='+' || c==',' || c==';' || c=='=' || c=='[' || c==']';
}
static inline unsigned char upcase(unsigned char *dir, unsigned char a)
{
if (a<128 || a==255) return a>='a' && a<='z' ? a - 0x20 : a;
if (!dir) return a;
return dir[a-128];
}
unsigned char hpfs_upcase(unsigned char *dir, unsigned char a)
{
return upcase(dir, a);
}
static inline unsigned char locase(unsigned char *dir, unsigned char a)
{
if (a<128 || a==255) return a>='A' && a<='Z' ? a + 0x20 : a;
if (!dir) return a;
return dir[a];
}
int hpfs_chk_name(const unsigned char *name, unsigned *len)
{
int i;
if (*len > 254) return -ENAMETOOLONG;
hpfs_adjust_length(name, len);
if (!*len) return -EINVAL;
for (i = 0; i < *len; i++) if (not_allowed_char(name[i])) return -EINVAL;
if (*len == 1) if (name[0] == '.') return -EINVAL;
if (*len == 2) if (name[0] == '.' && name[1] == '.') return -EINVAL;
return 0;
}
unsigned char *hpfs_translate_name(struct super_block *s, unsigned char *from,
unsigned len, int lc, int lng)
{
unsigned char *to;
int i;
if (hpfs_sb(s)->sb_chk >= 2) if (hpfs_is_name_long(from, len) != lng) {
printk("HPFS: Long name flag mismatch - name ");
for (i=0; i<len; i++) printk("%c", from[i]);
printk(" misidentified as %s.\n", lng ? "short" : "long");
printk("HPFS: It's nothing serious. It could happen because of bug in OS/2.\nHPFS: Set checks=normal to disable this message.\n");
}
if (!lc) return from;
if (!(to = kmalloc(len, GFP_KERNEL))) {
printk("HPFS: can't allocate memory for name conversion buffer\n");
return from;
}
for (i = 0; i < len; i++) to[i] = locase(hpfs_sb(s)->sb_cp_table,from[i]);
return to;
}
int hpfs_compare_names(struct super_block *s,
const unsigned char *n1, unsigned l1,
const unsigned char *n2, unsigned l2, int last)
{
unsigned l = l1 < l2 ? l1 : l2;
unsigned i;
if (last) return -1;
for (i = 0; i < l; i++) {
unsigned char c1 = upcase(hpfs_sb(s)->sb_cp_table,n1[i]);
unsigned char c2 = upcase(hpfs_sb(s)->sb_cp_table,n2[i]);
if (c1 < c2) return -1;
if (c1 > c2) return 1;
}
if (l1 < l2) return -1;
if (l1 > l2) return 1;
return 0;
}
int hpfs_is_name_long(const unsigned char *name, unsigned len)
{
int i,j;
for (i = 0; i < len && name[i] != '.'; i++)
if (no_dos_char(name[i])) return 1;
if (!i || i > 8) return 1;
if (i == len) return 0;
for (j = i + 1; j < len; j++)
if (name[j] == '.' || no_dos_char(name[i])) return 1;
return j - i > 4;
}
/* OS/2 clears dots and spaces at the end of file name, so we have to */
void hpfs_adjust_length(const unsigned char *name, unsigned *len)
{
if (!*len) return;
if (*len == 1 && name[0] == '.') return;
if (*len == 2 && name[0] == '.' && name[1] == '.') return;
while (*len && (name[*len - 1] == '.' || name[*len - 1] == ' '))
(*len)--;
}
| gpl-2.0 |
eoghan2t9/primou-kernel-IRONMAN | arch/blackfin/mach-bf538/dma.c | 12224 | 3039 | /*
* the simple DMA Implementation for Blackfin
*
* Copyright 2008 Analog Devices Inc.
*
* Licensed under the GPL-2 or later.
*/
#include <linux/module.h>
#include <asm/blackfin.h>
#include <asm/dma.h>
struct dma_register * const dma_io_base_addr[MAX_DMA_CHANNELS] = {
(struct dma_register *) DMA0_NEXT_DESC_PTR,
(struct dma_register *) DMA1_NEXT_DESC_PTR,
(struct dma_register *) DMA2_NEXT_DESC_PTR,
(struct dma_register *) DMA3_NEXT_DESC_PTR,
(struct dma_register *) DMA4_NEXT_DESC_PTR,
(struct dma_register *) DMA5_NEXT_DESC_PTR,
(struct dma_register *) DMA6_NEXT_DESC_PTR,
(struct dma_register *) DMA7_NEXT_DESC_PTR,
(struct dma_register *) DMA8_NEXT_DESC_PTR,
(struct dma_register *) DMA9_NEXT_DESC_PTR,
(struct dma_register *) DMA10_NEXT_DESC_PTR,
(struct dma_register *) DMA11_NEXT_DESC_PTR,
(struct dma_register *) DMA12_NEXT_DESC_PTR,
(struct dma_register *) DMA13_NEXT_DESC_PTR,
(struct dma_register *) DMA14_NEXT_DESC_PTR,
(struct dma_register *) DMA15_NEXT_DESC_PTR,
(struct dma_register *) DMA16_NEXT_DESC_PTR,
(struct dma_register *) DMA17_NEXT_DESC_PTR,
(struct dma_register *) DMA18_NEXT_DESC_PTR,
(struct dma_register *) DMA19_NEXT_DESC_PTR,
(struct dma_register *) MDMA_D0_NEXT_DESC_PTR,
(struct dma_register *) MDMA_S0_NEXT_DESC_PTR,
(struct dma_register *) MDMA_D1_NEXT_DESC_PTR,
(struct dma_register *) MDMA_S1_NEXT_DESC_PTR,
(struct dma_register *) MDMA_D2_NEXT_DESC_PTR,
(struct dma_register *) MDMA_S2_NEXT_DESC_PTR,
(struct dma_register *) MDMA_D3_NEXT_DESC_PTR,
(struct dma_register *) MDMA_S3_NEXT_DESC_PTR,
};
EXPORT_SYMBOL(dma_io_base_addr);
int channel2irq(unsigned int channel)
{
int ret_irq = -1;
switch (channel) {
case CH_PPI:
ret_irq = IRQ_PPI;
break;
case CH_UART0_RX:
ret_irq = IRQ_UART0_RX;
break;
case CH_UART0_TX:
ret_irq = IRQ_UART0_TX;
break;
case CH_UART1_RX:
ret_irq = IRQ_UART1_RX;
break;
case CH_UART1_TX:
ret_irq = IRQ_UART1_TX;
break;
case CH_UART2_RX:
ret_irq = IRQ_UART2_RX;
break;
case CH_UART2_TX:
ret_irq = IRQ_UART2_TX;
break;
case CH_SPORT0_RX:
ret_irq = IRQ_SPORT0_RX;
break;
case CH_SPORT0_TX:
ret_irq = IRQ_SPORT0_TX;
break;
case CH_SPORT1_RX:
ret_irq = IRQ_SPORT1_RX;
break;
case CH_SPORT1_TX:
ret_irq = IRQ_SPORT1_TX;
break;
case CH_SPORT2_RX:
ret_irq = IRQ_SPORT2_RX;
break;
case CH_SPORT2_TX:
ret_irq = IRQ_SPORT2_TX;
break;
case CH_SPORT3_RX:
ret_irq = IRQ_SPORT3_RX;
break;
case CH_SPORT3_TX:
ret_irq = IRQ_SPORT3_TX;
break;
case CH_SPI0:
ret_irq = IRQ_SPI0;
break;
case CH_SPI1:
ret_irq = IRQ_SPI1;
break;
case CH_SPI2:
ret_irq = IRQ_SPI2;
break;
case CH_MEM_STREAM0_SRC:
case CH_MEM_STREAM0_DEST:
ret_irq = IRQ_MEM0_DMA0;
break;
case CH_MEM_STREAM1_SRC:
case CH_MEM_STREAM1_DEST:
ret_irq = IRQ_MEM0_DMA1;
break;
case CH_MEM_STREAM2_SRC:
case CH_MEM_STREAM2_DEST:
ret_irq = IRQ_MEM1_DMA0;
break;
case CH_MEM_STREAM3_SRC:
case CH_MEM_STREAM3_DEST:
ret_irq = IRQ_MEM1_DMA1;
break;
}
return ret_irq;
}
| gpl-2.0 |
cattleprod/samsung-kernel-gt-i9100 | mm/kmemcheck.c | 12736 | 2910 | #include <linux/gfp.h>
#include <linux/mm_types.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/kmemcheck.h>
void kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node)
{
struct page *shadow;
int pages;
int i;
pages = 1 << order;
/*
* With kmemcheck enabled, we need to allocate a memory area for the
* shadow bits as well.
*/
shadow = alloc_pages_node(node, flags | __GFP_NOTRACK, order);
if (!shadow) {
if (printk_ratelimit())
printk(KERN_ERR "kmemcheck: failed to allocate "
"shadow bitmap\n");
return;
}
for(i = 0; i < pages; ++i)
page[i].shadow = page_address(&shadow[i]);
/*
* Mark it as non-present for the MMU so that our accesses to
* this memory will trigger a page fault and let us analyze
* the memory accesses.
*/
kmemcheck_hide_pages(page, pages);
}
void kmemcheck_free_shadow(struct page *page, int order)
{
struct page *shadow;
int pages;
int i;
if (!kmemcheck_page_is_tracked(page))
return;
pages = 1 << order;
kmemcheck_show_pages(page, pages);
shadow = virt_to_page(page[0].shadow);
for(i = 0; i < pages; ++i)
page[i].shadow = NULL;
__free_pages(shadow, order);
}
void kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object,
size_t size)
{
/*
* Has already been memset(), which initializes the shadow for us
* as well.
*/
if (gfpflags & __GFP_ZERO)
return;
/* No need to initialize the shadow of a non-tracked slab. */
if (s->flags & SLAB_NOTRACK)
return;
if (!kmemcheck_enabled || gfpflags & __GFP_NOTRACK) {
/*
* Allow notracked objects to be allocated from
* tracked caches. Note however that these objects
* will still get page faults on access, they just
* won't ever be flagged as uninitialized. If page
* faults are not acceptable, the slab cache itself
* should be marked NOTRACK.
*/
kmemcheck_mark_initialized(object, size);
} else if (!s->ctor) {
/*
* New objects should be marked uninitialized before
* they're returned to the called.
*/
kmemcheck_mark_uninitialized(object, size);
}
}
void kmemcheck_slab_free(struct kmem_cache *s, void *object, size_t size)
{
/* TODO: RCU freeing is unsupported for now; hide false positives. */
if (!s->ctor && !(s->flags & SLAB_DESTROY_BY_RCU))
kmemcheck_mark_freed(object, size);
}
void kmemcheck_pagealloc_alloc(struct page *page, unsigned int order,
gfp_t gfpflags)
{
int pages;
if (gfpflags & (__GFP_HIGHMEM | __GFP_NOTRACK))
return;
pages = 1 << order;
/*
* NOTE: We choose to track GFP_ZERO pages too; in fact, they
* can become uninitialized by copying uninitialized memory
* into them.
*/
/* XXX: Can use zone->node for node? */
kmemcheck_alloc_shadow(page, order, gfpflags, -1);
if (gfpflags & __GFP_ZERO)
kmemcheck_mark_initialized_pages(page, pages);
else
kmemcheck_mark_uninitialized_pages(page, pages);
}
| gpl-2.0 |
szezso/android_kernel_motorola_msm8916 | mm/kmemcheck.c | 12736 | 2910 | #include <linux/gfp.h>
#include <linux/mm_types.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/kmemcheck.h>
void kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node)
{
struct page *shadow;
int pages;
int i;
pages = 1 << order;
/*
* With kmemcheck enabled, we need to allocate a memory area for the
* shadow bits as well.
*/
shadow = alloc_pages_node(node, flags | __GFP_NOTRACK, order);
if (!shadow) {
if (printk_ratelimit())
printk(KERN_ERR "kmemcheck: failed to allocate "
"shadow bitmap\n");
return;
}
for(i = 0; i < pages; ++i)
page[i].shadow = page_address(&shadow[i]);
/*
* Mark it as non-present for the MMU so that our accesses to
* this memory will trigger a page fault and let us analyze
* the memory accesses.
*/
kmemcheck_hide_pages(page, pages);
}
void kmemcheck_free_shadow(struct page *page, int order)
{
struct page *shadow;
int pages;
int i;
if (!kmemcheck_page_is_tracked(page))
return;
pages = 1 << order;
kmemcheck_show_pages(page, pages);
shadow = virt_to_page(page[0].shadow);
for(i = 0; i < pages; ++i)
page[i].shadow = NULL;
__free_pages(shadow, order);
}
void kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object,
size_t size)
{
/*
* Has already been memset(), which initializes the shadow for us
* as well.
*/
if (gfpflags & __GFP_ZERO)
return;
/* No need to initialize the shadow of a non-tracked slab. */
if (s->flags & SLAB_NOTRACK)
return;
if (!kmemcheck_enabled || gfpflags & __GFP_NOTRACK) {
/*
* Allow notracked objects to be allocated from
* tracked caches. Note however that these objects
* will still get page faults on access, they just
* won't ever be flagged as uninitialized. If page
* faults are not acceptable, the slab cache itself
* should be marked NOTRACK.
*/
kmemcheck_mark_initialized(object, size);
} else if (!s->ctor) {
/*
* New objects should be marked uninitialized before
* they're returned to the called.
*/
kmemcheck_mark_uninitialized(object, size);
}
}
void kmemcheck_slab_free(struct kmem_cache *s, void *object, size_t size)
{
/* TODO: RCU freeing is unsupported for now; hide false positives. */
if (!s->ctor && !(s->flags & SLAB_DESTROY_BY_RCU))
kmemcheck_mark_freed(object, size);
}
void kmemcheck_pagealloc_alloc(struct page *page, unsigned int order,
gfp_t gfpflags)
{
int pages;
if (gfpflags & (__GFP_HIGHMEM | __GFP_NOTRACK))
return;
pages = 1 << order;
/*
* NOTE: We choose to track GFP_ZERO pages too; in fact, they
* can become uninitialized by copying uninitialized memory
* into them.
*/
/* XXX: Can use zone->node for node? */
kmemcheck_alloc_shadow(page, order, gfpflags, -1);
if (gfpflags & __GFP_ZERO)
kmemcheck_mark_initialized_pages(page, pages);
else
kmemcheck_mark_uninitialized_pages(page, pages);
}
| gpl-2.0 |
savoca/h811 | crypto/cipher.c | 12992 | 3391 | /*
* Cryptographic API.
*
* Cipher operations.
*
* Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
* Copyright (c) 2005 Herbert Xu <herbert@gondor.apana.org.au>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
#include <linux/kernel.h>
#include <linux/crypto.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/string.h>
#include "internal.h"
static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key,
unsigned int keylen)
{
struct cipher_alg *cia = &tfm->__crt_alg->cra_cipher;
unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
int ret;
u8 *buffer, *alignbuffer;
unsigned long absize;
absize = keylen + alignmask;
buffer = kmalloc(absize, GFP_ATOMIC);
if (!buffer)
return -ENOMEM;
alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
memcpy(alignbuffer, key, keylen);
ret = cia->cia_setkey(tfm, alignbuffer, keylen);
memset(alignbuffer, 0, keylen);
kfree(buffer);
return ret;
}
static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
{
struct cipher_alg *cia = &tfm->__crt_alg->cra_cipher;
unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
if (keylen < cia->cia_min_keysize || keylen > cia->cia_max_keysize) {
tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
return -EINVAL;
}
if ((unsigned long)key & alignmask)
return setkey_unaligned(tfm, key, keylen);
return cia->cia_setkey(tfm, key, keylen);
}
static void cipher_crypt_unaligned(void (*fn)(struct crypto_tfm *, u8 *,
const u8 *),
struct crypto_tfm *tfm,
u8 *dst, const u8 *src)
{
unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
unsigned int size = crypto_tfm_alg_blocksize(tfm);
u8 buffer[size + alignmask];
u8 *tmp = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
memcpy(tmp, src, size);
fn(tfm, tmp, tmp);
memcpy(dst, tmp, size);
}
static void cipher_encrypt_unaligned(struct crypto_tfm *tfm,
u8 *dst, const u8 *src)
{
unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
struct cipher_alg *cipher = &tfm->__crt_alg->cra_cipher;
if (unlikely(((unsigned long)dst | (unsigned long)src) & alignmask)) {
cipher_crypt_unaligned(cipher->cia_encrypt, tfm, dst, src);
return;
}
cipher->cia_encrypt(tfm, dst, src);
}
static void cipher_decrypt_unaligned(struct crypto_tfm *tfm,
u8 *dst, const u8 *src)
{
unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
struct cipher_alg *cipher = &tfm->__crt_alg->cra_cipher;
if (unlikely(((unsigned long)dst | (unsigned long)src) & alignmask)) {
cipher_crypt_unaligned(cipher->cia_decrypt, tfm, dst, src);
return;
}
cipher->cia_decrypt(tfm, dst, src);
}
int crypto_init_cipher_ops(struct crypto_tfm *tfm)
{
struct cipher_tfm *ops = &tfm->crt_cipher;
struct cipher_alg *cipher = &tfm->__crt_alg->cra_cipher;
ops->cit_setkey = setkey;
ops->cit_encrypt_one = crypto_tfm_alg_alignmask(tfm) ?
cipher_encrypt_unaligned : cipher->cia_encrypt;
ops->cit_decrypt_one = crypto_tfm_alg_alignmask(tfm) ?
cipher_decrypt_unaligned : cipher->cia_decrypt;
return 0;
}
void crypto_exit_cipher_ops(struct crypto_tfm *tfm)
{
}
| gpl-2.0 |
fschaefer/android-samsung-3.0-jb | arch/powerpc/boot/cuboot-taishan.c | 14016 | 1361 | /*
* Old U-boot compatibility for Taishan
*
* Author: Hugh Blemings <hugh@au.ibm.com>
*
* Copyright 2007 Hugh Blemings, IBM Corporation.
* Based on cuboot-ebony.c which is:
* Copyright 2007 David Gibson, IBM Corporation.
* Based on cuboot-83xx.c, which is:
* Copyright (c) 2007 Freescale Semiconductor, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*/
#include "ops.h"
#include "stdio.h"
#include "cuboot.h"
#include "reg.h"
#include "dcr.h"
#include "4xx.h"
#define TARGET_4xx
#define TARGET_44x
#define TARGET_440GX
#include "ppcboot.h"
static bd_t bd;
BSS_STACK(4096);
static void taishan_fixups(void)
{
/* FIXME: sysclk should be derived by reading the FPGA
registers */
unsigned long sysclk = 33000000;
ibm440gx_fixup_clocks(sysclk, 6 * 1843200, 25000000);
ibm4xx_sdram_fixup_memsize();
dt_fixup_mac_address_by_alias("ethernet0", bd.bi_enetaddr);
dt_fixup_mac_address_by_alias("ethernet1", bd.bi_enet1addr);
ibm4xx_fixup_ebc_ranges("/plb/opb/ebc");
}
void platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
unsigned long r6, unsigned long r7)
{
CUBOOT_INIT();
platform_ops.fixups = taishan_fixups;
fdt_init(_dtb_start);
serial_console_init();
}
| gpl-2.0 |
windxixi/kernel_ef39s_ics | drivers/gpu/msm/adreno_postmortem.c | 193 | 21527 | /* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/vmalloc.h>
#include "kgsl.h"
#include "adreno.h"
#include "adreno_pm4types.h"
#include "adreno_ringbuffer.h"
#include "adreno_postmortem.h"
#include "adreno_debugfs.h"
#include "kgsl_cffdump.h"
#include "kgsl_pwrctrl.h"
#include "a2xx_reg.h"
#define INVALID_RB_CMD 0xaaaaaaaa
#define NUM_DWORDS_OF_RINGBUFFER_HISTORY 100
struct pm_id_name {
uint32_t id;
char name[9];
};
static const struct pm_id_name pm0_types[] = {
{REG_PA_SC_AA_CONFIG, "RPASCAAC"},
{REG_RBBM_PM_OVERRIDE2, "RRBBPMO2"},
{REG_SCRATCH_REG2, "RSCRTRG2"},
{REG_SQ_GPR_MANAGEMENT, "RSQGPRMN"},
{REG_SQ_INST_STORE_MANAGMENT, "RSQINSTS"},
{REG_TC_CNTL_STATUS, "RTCCNTLS"},
{REG_TP0_CHICKEN, "RTP0CHCK"},
{REG_CP_TIMESTAMP, "CP_TM_ST"},
};
static const struct pm_id_name pm3_types[] = {
{CP_COND_EXEC, "CND_EXEC"},
{CP_CONTEXT_UPDATE, "CX__UPDT"},
{CP_DRAW_INDX, "DRW_NDX_"},
{CP_DRAW_INDX_BIN, "DRW_NDXB"},
{CP_EVENT_WRITE, "EVENT_WT"},
{CP_IM_LOAD, "IN__LOAD"},
{CP_IM_LOAD_IMMEDIATE, "IM_LOADI"},
{CP_IM_STORE, "IM_STORE"},
{CP_INDIRECT_BUFFER, "IND_BUF_"},
{CP_INDIRECT_BUFFER_PFD, "IND_BUFP"},
{CP_INTERRUPT, "PM4_INTR"},
{CP_INVALIDATE_STATE, "INV_STAT"},
{CP_LOAD_CONSTANT_CONTEXT, "LD_CN_CX"},
{CP_ME_INIT, "ME__INIT"},
{CP_NOP, "PM4__NOP"},
{CP_REG_RMW, "REG__RMW"},
{CP_REG_TO_MEM, "REG2_MEM"},
{CP_SET_BIN_BASE_OFFSET, "ST_BIN_O"},
{CP_SET_CONSTANT, "ST_CONST"},
{CP_SET_PROTECTED_MODE, "ST_PRT_M"},
{CP_SET_SHADER_BASES, "ST_SHD_B"},
{CP_WAIT_FOR_IDLE, "WAIT4IDL"},
};
static uint32_t adreno_is_pm4_len(uint32_t word)
{
if (word == INVALID_RB_CMD)
return 0;
return (word >> 16) & 0x3FFF;
}
static bool adreno_is_pm4_type(uint32_t word)
{
int i;
if (word == INVALID_RB_CMD)
return 1;
if (adreno_is_pm4_len(word) > 16)
return 0;
if ((word & (3<<30)) == CP_TYPE0_PKT) {
for (i = 0; i < ARRAY_SIZE(pm0_types); ++i) {
if ((word & 0x7FFF) == pm0_types[i].id)
return 1;
}
return 0;
}
if ((word & (3<<30)) == CP_TYPE3_PKT) {
for (i = 0; i < ARRAY_SIZE(pm3_types); ++i) {
if ((word & 0xFFFF) == (pm3_types[i].id << 8))
return 1;
}
return 0;
}
return 0;
}
static const char *adreno_pm4_name(uint32_t word)
{
int i;
if (word == INVALID_RB_CMD)
return "--------";
if ((word & (3<<30)) == CP_TYPE0_PKT) {
for (i = 0; i < ARRAY_SIZE(pm0_types); ++i) {
if ((word & 0x7FFF) == pm0_types[i].id)
return pm0_types[i].name;
}
return "????????";
}
if ((word & (3<<30)) == CP_TYPE3_PKT) {
for (i = 0; i < ARRAY_SIZE(pm3_types); ++i) {
if ((word & 0xFFFF) == (pm3_types[i].id << 8))
return pm3_types[i].name;
}
return "????????";
}
return "????????";
}
static void adreno_dump_regs(struct kgsl_device *device,
const int *registers, int size)
{
int range = 0, offset = 0;
for (range = 0; range < size; range++) {
/* start and end are in dword offsets */
int start = registers[range * 2];
int end = registers[range * 2 + 1];
unsigned char linebuf[32 * 3 + 2 + 32 + 1];
int linelen, i;
for (offset = start; offset <= end; offset += linelen) {
unsigned int regvals[32/4];
linelen = min(end+1-offset, 32/4);
for (i = 0; i < linelen; ++i)
kgsl_regread(device, offset+i, regvals+i);
hex_dump_to_buffer(regvals, linelen*4, 32, 4,
linebuf, sizeof(linebuf), 0);
KGSL_LOG_DUMP(device,
"REG: %5.5X: %s\n", offset, linebuf);
}
}
}
static void dump_ib(struct kgsl_device *device, char* buffId, uint32_t pt_base,
uint32_t base_offset, uint32_t ib_base, uint32_t ib_size, bool dump)
{
uint8_t *base_addr = adreno_convertaddr(device, pt_base,
ib_base, ib_size*sizeof(uint32_t));
if (base_addr && dump)
print_hex_dump(KERN_ERR, buffId, DUMP_PREFIX_OFFSET,
32, 4, base_addr, ib_size*4, 0);
else
KGSL_LOG_DUMP(device, "%s base:%8.8X ib_size:%d "
"offset:%5.5X%s\n",
buffId, ib_base, ib_size*4, base_offset,
base_addr ? "" : " [Invalid]");
}
#define IB_LIST_SIZE 64
struct ib_list {
int count;
uint32_t bases[IB_LIST_SIZE];
uint32_t sizes[IB_LIST_SIZE];
uint32_t offsets[IB_LIST_SIZE];
};
static void dump_ib1(struct kgsl_device *device, uint32_t pt_base,
uint32_t base_offset,
uint32_t ib1_base, uint32_t ib1_size,
struct ib_list *ib_list, bool dump)
{
int i, j;
uint32_t value;
uint32_t *ib1_addr;
dump_ib(device, "IB1:", pt_base, base_offset, ib1_base,
ib1_size, dump);
/* fetch virtual address for given IB base */
ib1_addr = (uint32_t *)adreno_convertaddr(device, pt_base,
ib1_base, ib1_size*sizeof(uint32_t));
if (!ib1_addr)
return;
for (i = 0; i+3 < ib1_size; ) {
value = ib1_addr[i++];
if (adreno_cmd_is_ib(value)) {
uint32_t ib2_base = ib1_addr[i++];
uint32_t ib2_size = ib1_addr[i++];
/* find previous match */
for (j = 0; j < ib_list->count; ++j)
if (ib_list->sizes[j] == ib2_size
&& ib_list->bases[j] == ib2_base)
break;
if (j < ib_list->count || ib_list->count
>= IB_LIST_SIZE)
continue;
/* store match */
ib_list->sizes[ib_list->count] = ib2_size;
ib_list->bases[ib_list->count] = ib2_base;
ib_list->offsets[ib_list->count] = i<<2;
++ib_list->count;
}
}
}
static void adreno_dump_rb_buffer(const void *buf, size_t len,
char *linebuf, size_t linebuflen, int *argp)
{
const u32 *ptr4 = buf;
const int ngroups = len;
int lx = 0, j;
bool nxsp = 1;
for (j = 0; j < ngroups; j++) {
if (*argp < 0) {
lx += scnprintf(linebuf + lx, linebuflen - lx, " <");
*argp = -*argp;
} else if (nxsp)
lx += scnprintf(linebuf + lx, linebuflen - lx, " ");
else
nxsp = 1;
if (!*argp && adreno_is_pm4_type(ptr4[j])) {
lx += scnprintf(linebuf + lx, linebuflen - lx,
"%s", adreno_pm4_name(ptr4[j]));
*argp = -(adreno_is_pm4_len(ptr4[j])+1);
} else {
lx += scnprintf(linebuf + lx, linebuflen - lx,
"%8.8X", ptr4[j]);
if (*argp > 1)
--*argp;
else if (*argp == 1) {
*argp = 0;
nxsp = 0;
lx += scnprintf(linebuf + lx, linebuflen - lx,
"> ");
}
}
}
linebuf[lx] = '\0';
}
static bool adreno_rb_use_hex(void)
{
#ifdef CONFIG_MSM_KGSL_PSTMRTMDMP_RB_HEX
return 1;
#else
return 0;
#endif
}
static void adreno_dump_rb(struct kgsl_device *device, const void *buf,
size_t len, int start, int size)
{
const uint32_t *ptr = buf;
int i, remaining, args = 0;
unsigned char linebuf[32 * 3 + 2 + 32 + 1];
const int rowsize = 8;
len >>= 2;
remaining = len;
for (i = 0; i < len; i += rowsize) {
int linelen = min(remaining, rowsize);
remaining -= rowsize;
if (adreno_rb_use_hex())
hex_dump_to_buffer(ptr+i, linelen*4, rowsize*4, 4,
linebuf, sizeof(linebuf), 0);
else
adreno_dump_rb_buffer(ptr+i, linelen, linebuf,
sizeof(linebuf), &args);
KGSL_LOG_DUMP(device,
"RB: %4.4X:%s\n", (start+i)%size, linebuf);
}
}
static bool adreno_ib_dump_enabled(void)
{
#ifdef CONFIG_MSM_KGSL_PSTMRTMDMP_NO_IB_DUMP
return 0;
#else
return 1;
#endif
}
struct log_field {
bool show;
const char *display;
};
static int adreno_dump_fields_line(struct kgsl_device *device,
const char *start, char *str, int slen,
const struct log_field **lines,
int num)
{
const struct log_field *l = *lines;
int sptr, count = 0;
sptr = snprintf(str, slen, "%s", start);
for ( ; num && sptr < slen; num--, l++) {
int ilen = strlen(l->display);
if (!l->show)
continue;
if (count)
ilen += strlen(" | ");
if (ilen > (slen - sptr))
break;
if (count++)
sptr += snprintf(str + sptr, slen - sptr, " | ");
sptr += snprintf(str + sptr, slen - sptr, "%s", l->display);
}
KGSL_LOG_DUMP(device, "%s\n", str);
*lines = l;
return num;
}
static void adreno_dump_fields(struct kgsl_device *device,
const char *start, const struct log_field *lines,
int num)
{
char lb[90];
const char *sstr = start;
lb[sizeof(lb) - 1] = '\0';
while (num) {
int ret = adreno_dump_fields_line(device, sstr, lb,
sizeof(lb) - 1, &lines, num);
if (ret == num)
break;
num = ret;
sstr = " ";
}
}
static int adreno_dump(struct kgsl_device *device)
{
unsigned int r1, r2, r3, rbbm_status;
unsigned int cp_ib1_base, cp_ib1_bufsz, cp_stat;
unsigned int cp_ib2_base, cp_ib2_bufsz;
unsigned int pt_base, cur_pt_base;
unsigned int cp_rb_base, rb_count;
unsigned int cp_rb_wptr, cp_rb_rptr;
unsigned int i;
int result = 0;
uint32_t *rb_copy;
const uint32_t *rb_vaddr;
int num_item = 0;
int read_idx, write_idx;
unsigned int ts_processed;
static struct ib_list ib_list;
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
mb();
kgsl_regread(device, REG_RBBM_STATUS, &rbbm_status);
kgsl_regread(device, REG_RBBM_PM_OVERRIDE1, &r2);
kgsl_regread(device, REG_RBBM_PM_OVERRIDE2, &r3);
KGSL_LOG_DUMP(device, "RBBM: STATUS = %08X | PM_OVERRIDE1 = %08X | "
"PM_OVERRIDE2 = %08X\n", rbbm_status, r2, r3);
kgsl_regread(device, REG_RBBM_INT_CNTL, &r1);
kgsl_regread(device, REG_RBBM_INT_STATUS, &r2);
kgsl_regread(device, REG_RBBM_READ_ERROR, &r3);
KGSL_LOG_DUMP(device, " INT_CNTL = %08X | INT_STATUS = %08X | "
"READ_ERROR = %08X\n", r1, r2, r3);
{
char cmdFifo[16];
struct log_field lines[] = {
{rbbm_status & 0x001F, cmdFifo},
{rbbm_status & BIT(5), "TC busy "},
{rbbm_status & BIT(8), "HIRQ pending"},
{rbbm_status & BIT(9), "CPRQ pending"},
{rbbm_status & BIT(10), "CFRQ pending"},
{rbbm_status & BIT(11), "PFRQ pending"},
{rbbm_status & BIT(12), "VGT 0DMA bsy"},
{rbbm_status & BIT(14), "RBBM WU busy"},
{rbbm_status & BIT(16), "CP NRT busy "},
{rbbm_status & BIT(18), "MH busy "},
{rbbm_status & BIT(19), "MH chncy bsy"},
{rbbm_status & BIT(21), "SX busy "},
{rbbm_status & BIT(22), "TPC busy "},
{rbbm_status & BIT(24), "SC CNTX busy"},
{rbbm_status & BIT(25), "PA busy "},
{rbbm_status & BIT(26), "VGT busy "},
{rbbm_status & BIT(27), "SQ cntx1 bsy"},
{rbbm_status & BIT(28), "SQ cntx0 bsy"},
{rbbm_status & BIT(30), "RB busy "},
{rbbm_status & BIT(31), "Grphs pp bsy"},
};
snprintf(cmdFifo, sizeof(cmdFifo), "CMD FIFO=%01X ",
rbbm_status & 0xf);
adreno_dump_fields(device, " STATUS=", lines,
ARRAY_SIZE(lines));
}
kgsl_regread(device, REG_CP_RB_BASE, &cp_rb_base);
kgsl_regread(device, REG_CP_RB_CNTL, &r2);
rb_count = 2 << (r2 & (BIT(6)-1));
kgsl_regread(device, REG_CP_RB_RPTR_ADDR, &r3);
KGSL_LOG_DUMP(device,
"CP_RB: BASE = %08X | CNTL = %08X | RPTR_ADDR = %08X"
" | rb_count = %08X\n", cp_rb_base, r2, r3, rb_count);
{
struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
if (rb->sizedwords != rb_count)
rb_count = rb->sizedwords;
}
kgsl_regread(device, REG_CP_RB_RPTR, &cp_rb_rptr);
kgsl_regread(device, REG_CP_RB_WPTR, &cp_rb_wptr);
kgsl_regread(device, REG_CP_RB_RPTR_WR, &r3);
KGSL_LOG_DUMP(device,
" RPTR = %08X | WPTR = %08X | RPTR_WR = %08X"
"\n", cp_rb_rptr, cp_rb_wptr, r3);
kgsl_regread(device, REG_CP_IB1_BASE, &cp_ib1_base);
kgsl_regread(device, REG_CP_IB1_BUFSZ, &cp_ib1_bufsz);
KGSL_LOG_DUMP(device,
"CP_IB1: BASE = %08X | BUFSZ = %d\n", cp_ib1_base,
cp_ib1_bufsz);
kgsl_regread(device, REG_CP_IB2_BASE, &cp_ib2_base);
kgsl_regread(device, REG_CP_IB2_BUFSZ, &cp_ib2_bufsz);
KGSL_LOG_DUMP(device,
"CP_IB2: BASE = %08X | BUFSZ = %d\n", cp_ib2_base,
cp_ib2_bufsz);
kgsl_regread(device, REG_CP_INT_CNTL, &r1);
kgsl_regread(device, REG_CP_INT_STATUS, &r2);
KGSL_LOG_DUMP(device, "CP_INT: CNTL = %08X | STATUS = %08X\n", r1, r2);
kgsl_regread(device, REG_CP_ME_CNTL, &r1);
kgsl_regread(device, REG_CP_ME_STATUS, &r2);
kgsl_regread(device, REG_MASTER_INT_SIGNAL, &r3);
KGSL_LOG_DUMP(device,
"CP_ME: CNTL = %08X | STATUS = %08X | MSTR_INT_SGNL = "
"%08X\n", r1, r2, r3);
kgsl_regread(device, REG_CP_STAT, &cp_stat);
KGSL_LOG_DUMP(device, "CP_STAT = %08X\n", cp_stat);
#ifndef CONFIG_MSM_KGSL_PSTMRTMDMP_CP_STAT_NO_DETAIL
{
struct log_field lns[] = {
{cp_stat & BIT(0), "WR_BSY 0"},
{cp_stat & BIT(1), "RD_RQ_BSY 1"},
{cp_stat & BIT(2), "RD_RTN_BSY 2"},
};
adreno_dump_fields(device, " MIU=", lns, ARRAY_SIZE(lns));
}
{
struct log_field lns[] = {
{cp_stat & BIT(5), "RING_BUSY 5"},
{cp_stat & BIT(6), "NDRCTS_BSY 6"},
{cp_stat & BIT(7), "NDRCT2_BSY 7"},
{cp_stat & BIT(9), "ST_BUSY 9"},
{cp_stat & BIT(10), "BUSY 10"},
};
adreno_dump_fields(device, " CSF=", lns, ARRAY_SIZE(lns));
}
{
struct log_field lns[] = {
{cp_stat & BIT(11), "RNG_Q_BSY 11"},
{cp_stat & BIT(12), "NDRCTS_Q_B12"},
{cp_stat & BIT(13), "NDRCT2_Q_B13"},
{cp_stat & BIT(16), "ST_QUEUE_B16"},
{cp_stat & BIT(17), "PFP_BUSY 17"},
};
adreno_dump_fields(device, " RING=", lns, ARRAY_SIZE(lns));
}
{
struct log_field lns[] = {
{cp_stat & BIT(3), "RBIU_BUSY 3"},
{cp_stat & BIT(4), "RCIU_BUSY 4"},
{cp_stat & BIT(18), "MQ_RG_BSY 18"},
{cp_stat & BIT(19), "MQ_NDRS_BS19"},
{cp_stat & BIT(20), "MQ_NDR2_BS20"},
{cp_stat & BIT(21), "MIU_WC_STL21"},
{cp_stat & BIT(22), "CP_NRT_BSY22"},
{cp_stat & BIT(23), "3D_BUSY 23"},
{cp_stat & BIT(26), "ME_BUSY 26"},
{cp_stat & BIT(29), "ME_WC_BSY 29"},
{cp_stat & BIT(30), "MIU_FF EM 30"},
{cp_stat & BIT(31), "CP_BUSY 31"},
};
adreno_dump_fields(device, " CP_STT=", lns, ARRAY_SIZE(lns));
}
#endif
kgsl_regread(device, REG_SCRATCH_REG0, &r1);
KGSL_LOG_DUMP(device, "SCRATCH_REG0 = %08X\n", r1);
kgsl_regread(device, REG_COHER_SIZE_PM4, &r1);
kgsl_regread(device, REG_COHER_BASE_PM4, &r2);
kgsl_regread(device, REG_COHER_STATUS_PM4, &r3);
KGSL_LOG_DUMP(device,
"COHER: SIZE_PM4 = %08X | BASE_PM4 = %08X | STATUS_PM4"
" = %08X\n", r1, r2, r3);
kgsl_regread(device, MH_AXI_ERROR, &r1);
KGSL_LOG_DUMP(device, "MH: AXI_ERROR = %08X\n", r1);
kgsl_regread(device, MH_MMU_PAGE_FAULT, &r1);
kgsl_regread(device, MH_MMU_CONFIG, &r2);
kgsl_regread(device, MH_MMU_MPU_BASE, &r3);
KGSL_LOG_DUMP(device,
"MH_MMU: PAGE_FAULT = %08X | CONFIG = %08X | MPU_BASE ="
" %08X\n", r1, r2, r3);
kgsl_regread(device, MH_MMU_MPU_END, &r1);
kgsl_regread(device, MH_MMU_VA_RANGE, &r2);
pt_base = kgsl_mmu_get_current_ptbase(device);
KGSL_LOG_DUMP(device,
" MPU_END = %08X | VA_RANGE = %08X | PT_BASE ="
" %08X\n", r1, r2, pt_base);
cur_pt_base = pt_base;
KGSL_LOG_DUMP(device, "PAGETABLE SIZE: %08X ", KGSL_PAGETABLE_SIZE);
kgsl_regread(device, MH_MMU_TRAN_ERROR, &r1);
KGSL_LOG_DUMP(device, " TRAN_ERROR = %08X\n", r1);
kgsl_regread(device, MH_INTERRUPT_MASK, &r1);
kgsl_regread(device, MH_INTERRUPT_STATUS, &r2);
KGSL_LOG_DUMP(device,
"MH_INTERRUPT: MASK = %08X | STATUS = %08X\n", r1, r2);
ts_processed = device->ftbl->readtimestamp(device,
KGSL_TIMESTAMP_RETIRED);
KGSL_LOG_DUMP(device, "TIMESTM RTRD: %08X\n", ts_processed);
num_item = adreno_ringbuffer_count(&adreno_dev->ringbuffer,
cp_rb_rptr);
if (num_item <= 0)
KGSL_LOG_POSTMORTEM_WRITE(device, "Ringbuffer is Empty.\n");
rb_copy = vmalloc(rb_count<<2);
if (!rb_copy) {
KGSL_LOG_POSTMORTEM_WRITE(device,
"vmalloc(%d) failed\n", rb_count << 2);
result = -ENOMEM;
goto end;
}
KGSL_LOG_DUMP(device, "RB: rd_addr:%8.8x rb_size:%d num_item:%d\n",
cp_rb_base, rb_count<<2, num_item);
if (adreno_dev->ringbuffer.buffer_desc.gpuaddr != cp_rb_base)
KGSL_LOG_POSTMORTEM_WRITE(device,
"rb address mismatch, should be 0x%08x\n",
adreno_dev->ringbuffer.buffer_desc.gpuaddr);
rb_vaddr = adreno_dev->ringbuffer.buffer_desc.hostptr;
if (!rb_vaddr) {
KGSL_LOG_POSTMORTEM_WRITE(device,
"rb has no kernel mapping!\n");
goto error_vfree;
}
read_idx = (int)cp_rb_rptr - NUM_DWORDS_OF_RINGBUFFER_HISTORY;
if (read_idx < 0)
read_idx += rb_count;
write_idx = (int)cp_rb_wptr + 16;
if (write_idx > rb_count)
write_idx -= rb_count;
num_item += NUM_DWORDS_OF_RINGBUFFER_HISTORY+16;
if (num_item > rb_count)
num_item = rb_count;
if (write_idx >= read_idx)
memcpy(rb_copy, rb_vaddr+read_idx, num_item<<2);
else {
int part1_c = rb_count-read_idx;
memcpy(rb_copy, rb_vaddr+read_idx, part1_c<<2);
memcpy(rb_copy+part1_c, rb_vaddr, (num_item-part1_c)<<2);
}
/* extract the latest ib commands from the buffer */
ib_list.count = 0;
i = 0;
for (read_idx = 0; read_idx < num_item; ) {
uint32_t this_cmd = rb_copy[read_idx++];
if (adreno_cmd_is_ib(this_cmd)) {
uint32_t ib_addr = rb_copy[read_idx++];
uint32_t ib_size = rb_copy[read_idx++];
dump_ib1(device, cur_pt_base, (read_idx-3)<<2, ib_addr,
ib_size, &ib_list, 0);
for (; i < ib_list.count; ++i)
dump_ib(device, "IB2:", cur_pt_base,
ib_list.offsets[i],
ib_list.bases[i],
ib_list.sizes[i], 0);
} else if (this_cmd == cp_type0_packet(MH_MMU_PT_BASE, 1)) {
KGSL_LOG_DUMP(device, "Current pagetable: %x\t"
"pagetable base: %x\n",
kgsl_mmu_get_ptname_from_ptbase(cur_pt_base),
cur_pt_base);
/* Set cur_pt_base to the new pagetable base */
cur_pt_base = rb_copy[read_idx++];
KGSL_LOG_DUMP(device, "New pagetable: %x\t"
"pagetable base: %x\n",
kgsl_mmu_get_ptname_from_ptbase(cur_pt_base),
cur_pt_base);
}
}
/* Restore cur_pt_base back to the pt_base of
the process in whose context the GPU hung */
cur_pt_base = pt_base;
read_idx = (int)cp_rb_rptr - NUM_DWORDS_OF_RINGBUFFER_HISTORY;
if (read_idx < 0)
read_idx += rb_count;
KGSL_LOG_DUMP(device,
"RB: addr=%8.8x window:%4.4x-%4.4x, start:%4.4x\n",
cp_rb_base, cp_rb_rptr, cp_rb_wptr, read_idx);
adreno_dump_rb(device, rb_copy, num_item<<2, read_idx, rb_count);
if (adreno_ib_dump_enabled()) {
for (read_idx = NUM_DWORDS_OF_RINGBUFFER_HISTORY;
read_idx >= 0; --read_idx) {
uint32_t this_cmd = rb_copy[read_idx];
if (adreno_cmd_is_ib(this_cmd)) {
uint32_t ib_addr = rb_copy[read_idx+1];
uint32_t ib_size = rb_copy[read_idx+2];
if (ib_size && cp_ib1_base == ib_addr) {
KGSL_LOG_DUMP(device,
"IB1: base:%8.8X "
"count:%d\n", ib_addr, ib_size);
dump_ib(device, "IB1: ", cur_pt_base,
read_idx<<2, ib_addr, ib_size,
1);
}
}
}
for (i = 0; i < ib_list.count; ++i) {
uint32_t ib_size = ib_list.sizes[i];
uint32_t ib_offset = ib_list.offsets[i];
if (ib_size && cp_ib2_base == ib_list.bases[i]) {
KGSL_LOG_DUMP(device,
"IB2: base:%8.8X count:%d\n",
cp_ib2_base, ib_size);
dump_ib(device, "IB2: ", cur_pt_base, ib_offset,
ib_list.bases[i], ib_size, 1);
}
}
}
/* Dump the registers if the user asked for it */
if (adreno_is_a20x(adreno_dev))
adreno_dump_regs(device, a200_registers,
a200_registers_count);
else if (adreno_is_a22x(adreno_dev))
adreno_dump_regs(device, a220_registers,
a220_registers_count);
error_vfree:
vfree(rb_copy);
end:
return result;
}
/**
* adreno_postmortem_dump - Dump the current GPU state
* @device - A pointer to the KGSL device to dump
* @manual - A flag that indicates if this was a manually triggered
* dump (from debugfs). If zero, then this is assumed to be a
* dump automaticlaly triggered from a hang
*/
int adreno_postmortem_dump(struct kgsl_device *device, int manual)
{
bool saved_nap;
struct kgsl_pwrctrl *pwr = &device->pwrctrl;
BUG_ON(device == NULL);
kgsl_cffdump_hang(device->id);
/* For a manual dump, make sure that the system is idle */
if (manual) {
if (device->active_cnt != 0) {
mutex_unlock(&device->mutex);
wait_for_completion(&device->suspend_gate);
mutex_lock(&device->mutex);
}
if (device->state == KGSL_STATE_ACTIVE)
kgsl_idle(device, KGSL_TIMEOUT_DEFAULT);
}
KGSL_LOG_DUMP(device, "POWER: FLAGS = %08lX | ACTIVE POWERLEVEL = %08X",
pwr->power_flags, pwr->active_pwrlevel);
KGSL_LOG_DUMP(device, "POWER: INTERVAL TIMEOUT = %08X ",
pwr->interval_timeout);
KGSL_LOG_DUMP(device, "GRP_CLK = %lu ",
kgsl_get_clkrate(pwr->grp_clks[0]));
KGSL_LOG_DUMP(device, "BUS CLK = %lu ",
kgsl_get_clkrate(pwr->ebi1_clk));
/* Disable the idle timer so we don't get interrupted */
del_timer_sync(&device->idle_timer);
mutex_unlock(&device->mutex);
flush_workqueue(device->work_queue);
mutex_lock(&device->mutex);
/* Turn off napping to make sure we have the clocks full
attention through the following process */
saved_nap = device->pwrctrl.nap_allowed;
device->pwrctrl.nap_allowed = false;
/* Force on the clocks */
kgsl_pwrctrl_wake(device);
/* Disable the irq */
kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
adreno_dump(device);
/* Restore nap mode */
device->pwrctrl.nap_allowed = saved_nap;
/* On a manual trigger, turn on the interrupts and put
the clocks to sleep. They will recover themselves
on the next event. For a hang, leave things as they
are until recovery kicks in. */
if (manual) {
kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_ON);
/* try to go into a sleep mode until the next event */
kgsl_pwrctrl_request_state(device, KGSL_STATE_SLEEP);
kgsl_pwrctrl_sleep(device);
}
KGSL_DRV_ERR(device, "Dump Finished\n");
return 0;
}
| gpl-2.0 |
zhaiyu/linux-2.6 | drivers/staging/comedi/drivers/pcmad.c | 193 | 4047 | /*
* pcmad.c
* Hardware driver for Winsystems PCM-A/D12 and PCM-A/D16
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 2000,2001 David A. Schleef <ds@schleef.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
/*
* Driver: pcmad
* Description: Winsystems PCM-A/D12, PCM-A/D16
* Devices: (Winsystems) PCM-A/D12 [pcmad12]
* (Winsystems) PCM-A/D16 [pcmad16]
* Author: ds
* Status: untested
*
* This driver was written on a bet that I couldn't write a driver
* in less than 2 hours. I won the bet, but never got paid. =(
*
* Configuration options:
* [0] - I/O port base
* [1] - IRQ (unused)
* [2] - Analog input reference (must match jumpers)
* 0 = single-ended (16 channels)
* 1 = differential (8 channels)
* [3] - Analog input encoding (must match jumpers)
* 0 = straight binary (0-5V input range)
* 1 = two's complement (+-10V input range)
*/
#include <linux/module.h>
#include "../comedidev.h"
#define PCMAD_STATUS 0
#define PCMAD_LSB 1
#define PCMAD_MSB 2
#define PCMAD_CONVERT 1
struct pcmad_board_struct {
const char *name;
unsigned int ai_maxdata;
};
static const struct pcmad_board_struct pcmad_boards[] = {
{
.name = "pcmad12",
.ai_maxdata = 0x0fff,
}, {
.name = "pcmad16",
.ai_maxdata = 0xffff,
},
};
static int pcmad_ai_eoc(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned long context)
{
unsigned int status;
status = inb(dev->iobase + PCMAD_STATUS);
if ((status & 0x3) == 0x3)
return 0;
return -EBUSY;
}
static int pcmad_ai_insn_read(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
unsigned int chan = CR_CHAN(insn->chanspec);
unsigned int range = CR_RANGE(insn->chanspec);
unsigned int val;
int ret;
int i;
for (i = 0; i < insn->n; i++) {
outb(chan, dev->iobase + PCMAD_CONVERT);
ret = comedi_timeout(dev, s, insn, pcmad_ai_eoc, 0);
if (ret)
return ret;
val = inb(dev->iobase + PCMAD_LSB) |
(inb(dev->iobase + PCMAD_MSB) << 8);
/* data is shifted on the pcmad12, fix it */
if (s->maxdata == 0x0fff)
val >>= 4;
if (comedi_range_is_bipolar(s, range)) {
/* munge the two's complement value */
val ^= ((s->maxdata + 1) >> 1);
}
data[i] = val;
}
return insn->n;
}
static int pcmad_attach(struct comedi_device *dev, struct comedi_devconfig *it)
{
const struct pcmad_board_struct *board = comedi_board(dev);
struct comedi_subdevice *s;
int ret;
ret = comedi_request_region(dev, it->options[0], 0x04);
if (ret)
return ret;
ret = comedi_alloc_subdevices(dev, 1);
if (ret)
return ret;
s = &dev->subdevices[0];
s->type = COMEDI_SUBD_AI;
if (it->options[1]) {
/* 8 differential channels */
s->subdev_flags = SDF_READABLE | AREF_DIFF;
s->n_chan = 8;
} else {
/* 16 single-ended channels */
s->subdev_flags = SDF_READABLE | AREF_GROUND;
s->n_chan = 16;
}
s->len_chanlist = 1;
s->maxdata = board->ai_maxdata;
s->range_table = it->options[2] ? &range_bipolar10 : &range_unipolar5;
s->insn_read = pcmad_ai_insn_read;
return 0;
}
static struct comedi_driver pcmad_driver = {
.driver_name = "pcmad",
.module = THIS_MODULE,
.attach = pcmad_attach,
.detach = comedi_legacy_detach,
.board_name = &pcmad_boards[0].name,
.num_names = ARRAY_SIZE(pcmad_boards),
.offset = sizeof(pcmad_boards[0]),
};
module_comedi_driver(pcmad_driver);
MODULE_AUTHOR("Comedi http://www.comedi.org");
MODULE_DESCRIPTION("Comedi low-level driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
iTechnoguy/zwkernel | mm/migrate.c | 449 | 46080 | /*
* Memory Migration functionality - linux/mm/migration.c
*
* Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter
*
* Page migration was first developed in the context of the memory hotplug
* project. The main authors of the migration code are:
*
* IWAMOTO Toshihiro <iwamoto@valinux.co.jp>
* Hirokazu Takahashi <taka@valinux.co.jp>
* Dave Hansen <haveblue@us.ibm.com>
* Christoph Lameter
*/
#include <linux/migrate.h>
#include <linux/export.h>
#include <linux/swap.h>
#include <linux/swapops.h>
#include <linux/pagemap.h>
#include <linux/buffer_head.h>
#include <linux/mm_inline.h>
#include <linux/nsproxy.h>
#include <linux/pagevec.h>
#include <linux/ksm.h>
#include <linux/rmap.h>
#include <linux/topology.h>
#include <linux/cpu.h>
#include <linux/cpuset.h>
#include <linux/writeback.h>
#include <linux/mempolicy.h>
#include <linux/vmalloc.h>
#include <linux/security.h>
#include <linux/memcontrol.h>
#include <linux/syscalls.h>
#include <linux/hugetlb.h>
#include <linux/hugetlb_cgroup.h>
#include <linux/gfp.h>
#include <linux/balloon_compaction.h>
#include <trace/events/kmem.h>
#include <asm/tlbflush.h>
#define CREATE_TRACE_POINTS
#include <trace/events/migrate.h>
#include "internal.h"
/*
* migrate_prep() needs to be called before we start compiling a list of pages
* to be migrated using isolate_lru_page(). If scheduling work on other CPUs is
* undesirable, use migrate_prep_local()
*/
int migrate_prep(void)
{
/*
* Clear the LRU lists so pages can be isolated.
* Note that pages may be moved off the LRU after we have
* drained them. Those pages will fail to migrate like other
* pages that may be busy.
*/
lru_add_drain_all();
return 0;
}
/* Do the necessary work of migrate_prep but not if it involves other CPUs */
int migrate_prep_local(void)
{
lru_add_drain();
return 0;
}
/*
* Add isolated pages on the list back to the LRU under page lock
* to avoid leaking evictable pages back onto unevictable list.
*/
void putback_lru_pages(struct list_head *l)
{
struct page *page;
struct page *page2;
list_for_each_entry_safe(page, page2, l, lru) {
list_del(&page->lru);
dec_zone_page_state(page, NR_ISOLATED_ANON +
page_is_file_cache(page));
putback_lru_page(page);
}
}
/*
* Put previously isolated pages back onto the appropriate lists
* from where they were once taken off for compaction/migration.
*
* This function shall be used instead of putback_lru_pages(),
* whenever the isolated pageset has been built by isolate_migratepages_range()
*/
void putback_movable_pages(struct list_head *l)
{
struct page *page;
struct page *page2;
list_for_each_entry_safe(page, page2, l, lru) {
list_del(&page->lru);
dec_zone_page_state(page, NR_ISOLATED_ANON +
page_is_file_cache(page));
if (unlikely(isolated_balloon_page(page)))
balloon_page_putback(page);
else
putback_lru_page(page);
}
}
/*
* Restore a potential migration pte to a working pte entry
*/
static int remove_migration_pte(struct page *new, struct vm_area_struct *vma,
unsigned long addr, void *old)
{
struct mm_struct *mm = vma->vm_mm;
swp_entry_t entry;
pmd_t *pmd;
pte_t *ptep, pte;
spinlock_t *ptl;
if (unlikely(PageHuge(new))) {
ptep = huge_pte_offset(mm, addr);
if (!ptep)
goto out;
ptl = &mm->page_table_lock;
} else {
pmd = mm_find_pmd(mm, addr);
if (!pmd)
goto out;
if (pmd_trans_huge(*pmd))
goto out;
ptep = pte_offset_map(pmd, addr);
/*
* Peek to check is_swap_pte() before taking ptlock? No, we
* can race mremap's move_ptes(), which skips anon_vma lock.
*/
ptl = pte_lockptr(mm, pmd);
}
spin_lock(ptl);
pte = *ptep;
if (!is_swap_pte(pte))
goto unlock;
entry = pte_to_swp_entry(pte);
if (!is_migration_entry(entry) ||
migration_entry_to_page(entry) != old)
goto unlock;
get_page(new);
pte = pte_mkold(mk_pte(new, vma->vm_page_prot));
if (is_write_migration_entry(entry))
pte = pte_mkwrite(pte);
#ifdef CONFIG_HUGETLB_PAGE
if (PageHuge(new)) {
pte = pte_mkhuge(pte);
pte = arch_make_huge_pte(pte, vma, new, 0);
}
#endif
flush_dcache_page(new);
set_pte_at(mm, addr, ptep, pte);
if (PageHuge(new)) {
if (PageAnon(new))
hugepage_add_anon_rmap(new, vma, addr);
else
page_dup_rmap(new);
} else if (PageAnon(new))
page_add_anon_rmap(new, vma, addr);
else
page_add_file_rmap(new);
/* No need to invalidate - it was non-present before */
update_mmu_cache(vma, addr, ptep);
unlock:
pte_unmap_unlock(ptep, ptl);
out:
return SWAP_AGAIN;
}
/*
* Get rid of all migration entries and replace them by
* references to the indicated page.
*/
static void remove_migration_ptes(struct page *old, struct page *new)
{
rmap_walk(new, remove_migration_pte, old);
}
/*
* Something used the pte of a page under migration. We need to
* get to the page and wait until migration is finished.
* When we return from this function the fault will be retried.
*/
static void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
spinlock_t *ptl)
{
pte_t pte;
swp_entry_t entry;
struct page *page;
spin_lock(ptl);
pte = *ptep;
if (!is_swap_pte(pte))
goto out;
entry = pte_to_swp_entry(pte);
if (!is_migration_entry(entry))
goto out;
page = migration_entry_to_page(entry);
/*
* Once radix-tree replacement of page migration started, page_count
* *must* be zero. And, we don't want to call wait_on_page_locked()
* against a page without get_page().
* So, we use get_page_unless_zero(), here. Even failed, page fault
* will occur again.
*/
if (!get_page_unless_zero(page))
goto out;
pte_unmap_unlock(ptep, ptl);
wait_on_page_locked(page);
put_page(page);
return;
out:
pte_unmap_unlock(ptep, ptl);
}
void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
unsigned long address)
{
spinlock_t *ptl = pte_lockptr(mm, pmd);
pte_t *ptep = pte_offset_map(pmd, address);
__migration_entry_wait(mm, ptep, ptl);
}
void migration_entry_wait_huge(struct mm_struct *mm, pte_t *pte)
{
spinlock_t *ptl = &(mm)->page_table_lock;
__migration_entry_wait(mm, pte, ptl);
}
#ifdef CONFIG_BLOCK
/* Returns true if all buffers are successfully locked */
static bool buffer_migrate_lock_buffers(struct buffer_head *head,
enum migrate_mode mode)
{
struct buffer_head *bh = head;
/* Simple case, sync compaction */
if (mode != MIGRATE_ASYNC) {
do {
get_bh(bh);
lock_buffer(bh);
bh = bh->b_this_page;
} while (bh != head);
return true;
}
/* async case, we cannot block on lock_buffer so use trylock_buffer */
do {
get_bh(bh);
if (!trylock_buffer(bh)) {
/*
* We failed to lock the buffer and cannot stall in
* async migration. Release the taken locks
*/
struct buffer_head *failed_bh = bh;
put_bh(failed_bh);
bh = head;
while (bh != failed_bh) {
unlock_buffer(bh);
put_bh(bh);
bh = bh->b_this_page;
}
return false;
}
bh = bh->b_this_page;
} while (bh != head);
return true;
}
#else
static inline bool buffer_migrate_lock_buffers(struct buffer_head *head,
enum migrate_mode mode)
{
return true;
}
#endif /* CONFIG_BLOCK */
/*
* Replace the page in the mapping.
*
* The number of remaining references must be:
* 1 for anonymous pages without a mapping
* 2 for pages with a mapping
* 3 for pages with a mapping and PagePrivate/PagePrivate2 set.
*/
static int migrate_page_move_mapping(struct address_space *mapping,
struct page *newpage, struct page *page,
struct buffer_head *head, enum migrate_mode mode)
{
int expected_count = 0;
void **pslot;
if (!mapping) {
/* Anonymous page without mapping */
if (page_count(page) != 1)
return -EAGAIN;
return MIGRATEPAGE_SUCCESS;
}
spin_lock_irq(&mapping->tree_lock);
pslot = radix_tree_lookup_slot(&mapping->page_tree,
page_index(page));
expected_count = 2 + page_has_private(page);
if (page_count(page) != expected_count ||
radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) {
spin_unlock_irq(&mapping->tree_lock);
return -EAGAIN;
}
if (!page_freeze_refs(page, expected_count)) {
spin_unlock_irq(&mapping->tree_lock);
return -EAGAIN;
}
/*
* In the async migration case of moving a page with buffers, lock the
* buffers using trylock before the mapping is moved. If the mapping
* was moved, we later failed to lock the buffers and could not move
* the mapping back due to an elevated page count, we would have to
* block waiting on other references to be dropped.
*/
if (mode == MIGRATE_ASYNC && head &&
!buffer_migrate_lock_buffers(head, mode)) {
page_unfreeze_refs(page, expected_count);
spin_unlock_irq(&mapping->tree_lock);
return -EAGAIN;
}
/*
* Now we know that no one else is looking at the page.
*/
get_page(newpage); /* add cache reference */
if (PageSwapCache(page)) {
SetPageSwapCache(newpage);
set_page_private(newpage, page_private(page));
}
radix_tree_replace_slot(pslot, newpage);
/*
* Drop cache reference from old page by unfreezing
* to one less reference.
* We know this isn't the last reference.
*/
page_unfreeze_refs(page, expected_count - 1);
/*
* If moved to a different zone then also account
* the page for that zone. Other VM counters will be
* taken care of when we establish references to the
* new page and drop references to the old page.
*
* Note that anonymous pages are accounted for
* via NR_FILE_PAGES and NR_ANON_PAGES if they
* are mapped to swap space.
*/
__dec_zone_page_state(page, NR_FILE_PAGES);
__inc_zone_page_state(newpage, NR_FILE_PAGES);
if (!PageSwapCache(page) && PageSwapBacked(page)) {
__dec_zone_page_state(page, NR_SHMEM);
__inc_zone_page_state(newpage, NR_SHMEM);
}
spin_unlock_irq(&mapping->tree_lock);
return MIGRATEPAGE_SUCCESS;
}
/*
* The expected number of remaining references is the same as that
* of migrate_page_move_mapping().
*/
int migrate_huge_page_move_mapping(struct address_space *mapping,
struct page *newpage, struct page *page)
{
int expected_count;
void **pslot;
if (!mapping) {
if (page_count(page) != 1)
return -EAGAIN;
return MIGRATEPAGE_SUCCESS;
}
spin_lock_irq(&mapping->tree_lock);
pslot = radix_tree_lookup_slot(&mapping->page_tree,
page_index(page));
expected_count = 2 + page_has_private(page);
if (page_count(page) != expected_count ||
radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) {
spin_unlock_irq(&mapping->tree_lock);
return -EAGAIN;
}
if (!page_freeze_refs(page, expected_count)) {
spin_unlock_irq(&mapping->tree_lock);
return -EAGAIN;
}
get_page(newpage);
radix_tree_replace_slot(pslot, newpage);
page_unfreeze_refs(page, expected_count - 1);
spin_unlock_irq(&mapping->tree_lock);
return MIGRATEPAGE_SUCCESS;
}
/*
* Copy the page to its new location
*/
void migrate_page_copy(struct page *newpage, struct page *page)
{
if (PageHuge(page) || PageTransHuge(page))
copy_huge_page(newpage, page);
else
copy_highpage(newpage, page);
if (PageError(page))
SetPageError(newpage);
if (PageReferenced(page))
SetPageReferenced(newpage);
if (PageUptodate(page))
SetPageUptodate(newpage);
if (TestClearPageActive(page)) {
VM_BUG_ON(PageUnevictable(page));
SetPageActive(newpage);
} else if (TestClearPageUnevictable(page))
SetPageUnevictable(newpage);
if (PageChecked(page))
SetPageChecked(newpage);
if (PageMappedToDisk(page))
SetPageMappedToDisk(newpage);
if (PageDirty(page)) {
clear_page_dirty_for_io(page);
/*
* Want to mark the page and the radix tree as dirty, and
* redo the accounting that clear_page_dirty_for_io undid,
* but we can't use set_page_dirty because that function
* is actually a signal that all of the page has become dirty.
* Whereas only part of our page may be dirty.
*/
if (PageSwapBacked(page))
SetPageDirty(newpage);
else
__set_page_dirty_nobuffers(newpage);
}
mlock_migrate_page(newpage, page);
ksm_migrate_page(newpage, page);
/*
* Please do not reorder this without considering how mm/ksm.c's
* get_ksm_page() depends upon ksm_migrate_page() and PageSwapCache().
*/
ClearPageSwapCache(page);
ClearPagePrivate(page);
set_page_private(page, 0);
/*
* If any waiters have accumulated on the new page then
* wake them up.
*/
if (PageWriteback(newpage))
end_page_writeback(newpage);
}
/************************************************************
* Migration functions
***********************************************************/
/* Always fail migration. Used for mappings that are not movable */
int fail_migrate_page(struct address_space *mapping,
struct page *newpage, struct page *page)
{
return -EIO;
}
EXPORT_SYMBOL(fail_migrate_page);
/*
* Common logic to directly migrate a single page suitable for
* pages that do not use PagePrivate/PagePrivate2.
*
* Pages are locked upon entry and exit.
*/
int migrate_page(struct address_space *mapping,
struct page *newpage, struct page *page,
enum migrate_mode mode)
{
int rc;
BUG_ON(PageWriteback(page)); /* Writeback must be complete */
rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode);
if (rc != MIGRATEPAGE_SUCCESS)
return rc;
migrate_page_copy(newpage, page);
return MIGRATEPAGE_SUCCESS;
}
EXPORT_SYMBOL(migrate_page);
#ifdef CONFIG_BLOCK
/*
* Migration function for pages with buffers. This function can only be used
* if the underlying filesystem guarantees that no other references to "page"
* exist.
*/
int buffer_migrate_page(struct address_space *mapping,
struct page *newpage, struct page *page, enum migrate_mode mode)
{
struct buffer_head *bh, *head;
int rc;
if (!page_has_buffers(page))
return migrate_page(mapping, newpage, page, mode);
head = page_buffers(page);
rc = migrate_page_move_mapping(mapping, newpage, page, head, mode);
if (rc != MIGRATEPAGE_SUCCESS)
return rc;
/*
* In the async case, migrate_page_move_mapping locked the buffers
* with an IRQ-safe spinlock held. In the sync case, the buffers
* need to be locked now
*/
if (mode != MIGRATE_ASYNC)
BUG_ON(!buffer_migrate_lock_buffers(head, mode));
ClearPagePrivate(page);
set_page_private(newpage, page_private(page));
set_page_private(page, 0);
put_page(page);
get_page(newpage);
bh = head;
do {
set_bh_page(bh, newpage, bh_offset(bh));
bh = bh->b_this_page;
} while (bh != head);
SetPagePrivate(newpage);
migrate_page_copy(newpage, page);
bh = head;
do {
unlock_buffer(bh);
put_bh(bh);
bh = bh->b_this_page;
} while (bh != head);
return MIGRATEPAGE_SUCCESS;
}
EXPORT_SYMBOL(buffer_migrate_page);
#endif
/*
* Writeback a page to clean the dirty state
*/
static int writeout(struct address_space *mapping, struct page *page)
{
struct writeback_control wbc = {
.sync_mode = WB_SYNC_NONE,
.nr_to_write = 1,
.range_start = 0,
.range_end = LLONG_MAX,
.for_reclaim = 1
};
int rc;
if (!mapping->a_ops->writepage)
/* No write method for the address space */
return -EINVAL;
if (!clear_page_dirty_for_io(page))
/* Someone else already triggered a write */
return -EAGAIN;
/*
* A dirty page may imply that the underlying filesystem has
* the page on some queue. So the page must be clean for
* migration. Writeout may mean we loose the lock and the
* page state is no longer what we checked for earlier.
* At this point we know that the migration attempt cannot
* be successful.
*/
remove_migration_ptes(page, page);
rc = mapping->a_ops->writepage(page, &wbc);
if (rc != AOP_WRITEPAGE_ACTIVATE)
/* unlocked. Relock */
lock_page(page);
return (rc < 0) ? -EIO : -EAGAIN;
}
/*
* Default handling if a filesystem does not provide a migration function.
*/
static int fallback_migrate_page(struct address_space *mapping,
struct page *newpage, struct page *page, enum migrate_mode mode)
{
if (PageDirty(page)) {
/* Only writeback pages in full synchronous migration */
if (mode != MIGRATE_SYNC)
return -EBUSY;
return writeout(mapping, page);
}
/*
* Buffers may be managed in a filesystem specific way.
* We must have no buffers or drop them.
*/
if (page_has_private(page) &&
!try_to_release_page(page, GFP_KERNEL))
return -EAGAIN;
return migrate_page(mapping, newpage, page, mode);
}
/*
* Move a page to a newly allocated page
* The page is locked and all ptes have been successfully removed.
*
* The new page will have replaced the old page if this function
* is successful.
*
* Return value:
* < 0 - error code
* MIGRATEPAGE_SUCCESS - success
*/
static int move_to_new_page(struct page *newpage, struct page *page,
int remap_swapcache, enum migrate_mode mode)
{
struct address_space *mapping;
int rc;
/*
* Block others from accessing the page when we get around to
* establishing additional references. We are the only one
* holding a reference to the new page at this point.
*/
if (!trylock_page(newpage))
BUG();
/* Prepare mapping for the new page.*/
newpage->index = page->index;
newpage->mapping = page->mapping;
if (PageSwapBacked(page))
SetPageSwapBacked(newpage);
mapping = page_mapping(page);
if (!mapping)
rc = migrate_page(mapping, newpage, page, mode);
else if (mapping->a_ops->migratepage)
/*
* Most pages have a mapping and most filesystems provide a
* migratepage callback. Anonymous pages are part of swap
* space which also has its own migratepage callback. This
* is the most common path for page migration.
*/
rc = mapping->a_ops->migratepage(mapping,
newpage, page, mode);
else
rc = fallback_migrate_page(mapping, newpage, page, mode);
if (rc != MIGRATEPAGE_SUCCESS) {
newpage->mapping = NULL;
} else {
if (remap_swapcache)
remove_migration_ptes(page, newpage);
page->mapping = NULL;
}
unlock_page(newpage);
return rc;
}
static int __unmap_and_move(struct page *page, struct page *newpage,
int force, enum migrate_mode mode)
{
int rc = -EAGAIN;
int remap_swapcache = 1;
struct mem_cgroup *mem;
struct anon_vma *anon_vma = NULL;
if (!trylock_page(page)) {
if (!force || mode == MIGRATE_ASYNC)
goto out;
/*
* It's not safe for direct compaction to call lock_page.
* For example, during page readahead pages are added locked
* to the LRU. Later, when the IO completes the pages are
* marked uptodate and unlocked. However, the queueing
* could be merging multiple pages for one bio (e.g.
* mpage_readpages). If an allocation happens for the
* second or third page, the process can end up locking
* the same page twice and deadlocking. Rather than
* trying to be clever about what pages can be locked,
* avoid the use of lock_page for direct compaction
* altogether.
*/
if (current->flags & PF_MEMALLOC)
goto out;
lock_page(page);
}
/* charge against new page */
mem_cgroup_prepare_migration(page, newpage, &mem);
if (PageWriteback(page)) {
/*
* Only in the case of a full synchronous migration is it
* necessary to wait for PageWriteback. In the async case,
* the retry loop is too short and in the sync-light case,
* the overhead of stalling is too much
*/
if (mode != MIGRATE_SYNC) {
rc = -EBUSY;
goto uncharge;
}
if (!force)
goto uncharge;
wait_on_page_writeback(page);
}
/*
* By try_to_unmap(), page->mapcount goes down to 0 here. In this case,
* we cannot notice that anon_vma is freed while we migrates a page.
* This get_anon_vma() delays freeing anon_vma pointer until the end
* of migration. File cache pages are no problem because of page_lock()
* File Caches may use write_page() or lock_page() in migration, then,
* just care Anon page here.
*/
if (PageAnon(page) && !PageKsm(page)) {
/*
* Only page_lock_anon_vma_read() understands the subtleties of
* getting a hold on an anon_vma from outside one of its mms.
*/
anon_vma = page_get_anon_vma(page);
if (anon_vma) {
/*
* Anon page
*/
} else if (PageSwapCache(page)) {
/*
* We cannot be sure that the anon_vma of an unmapped
* swapcache page is safe to use because we don't
* know in advance if the VMA that this page belonged
* to still exists. If the VMA and others sharing the
* data have been freed, then the anon_vma could
* already be invalid.
*
* To avoid this possibility, swapcache pages get
* migrated but are not remapped when migration
* completes
*/
remap_swapcache = 0;
} else {
goto uncharge;
}
}
if (unlikely(balloon_page_movable(page))) {
/*
* A ballooned page does not need any special attention from
* physical to virtual reverse mapping procedures.
* Skip any attempt to unmap PTEs or to remap swap cache,
* in order to avoid burning cycles at rmap level, and perform
* the page migration right away (proteced by page lock).
*/
rc = balloon_page_migrate(newpage, page, mode);
goto uncharge;
}
/*
* Corner case handling:
* 1. When a new swap-cache page is read into, it is added to the LRU
* and treated as swapcache but it has no rmap yet.
* Calling try_to_unmap() against a page->mapping==NULL page will
* trigger a BUG. So handle it here.
* 2. An orphaned page (see truncate_complete_page) might have
* fs-private metadata. The page can be picked up due to memory
* offlining. Everywhere else except page reclaim, the page is
* invisible to the vm, so the page can not be migrated. So try to
* free the metadata, so the page can be freed.
*/
if (!page->mapping) {
VM_BUG_ON(PageAnon(page));
if (page_has_private(page)) {
try_to_free_buffers(page);
goto uncharge;
}
goto skip_unmap;
}
/* Establish migration ptes or remove ptes */
try_to_unmap(page, TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
skip_unmap:
if (!page_mapped(page))
rc = move_to_new_page(newpage, page, remap_swapcache, mode);
if (rc && remap_swapcache)
remove_migration_ptes(page, page);
/* Drop an anon_vma reference if we took one */
if (anon_vma)
put_anon_vma(anon_vma);
uncharge:
mem_cgroup_end_migration(mem, page, newpage,
(rc == MIGRATEPAGE_SUCCESS ||
rc == MIGRATEPAGE_BALLOON_SUCCESS));
unlock_page(page);
out:
return rc;
}
/*
* Obtain the lock on page, remove all ptes and migrate the page
* to the newly allocated page in newpage.
*/
static int unmap_and_move(new_page_t get_new_page, unsigned long private,
struct page *page, int force, enum migrate_mode mode)
{
int rc = 0;
int *result = NULL;
struct page *newpage = get_new_page(page, private, &result);
if (!newpage)
return -ENOMEM;
if (page_count(page) == 1) {
/* page was freed from under us. So we are done. */
goto out;
}
if (unlikely(PageTransHuge(page)))
if (unlikely(split_huge_page(page)))
goto out;
rc = __unmap_and_move(page, newpage, force, mode);
if (unlikely(rc == MIGRATEPAGE_BALLOON_SUCCESS)) {
/*
* A ballooned page has been migrated already.
* Now, it's the time to wrap-up counters,
* handle the page back to Buddy and return.
*/
dec_zone_page_state(page, NR_ISOLATED_ANON +
page_is_file_cache(page));
balloon_page_free(page);
return MIGRATEPAGE_SUCCESS;
}
out:
if (rc != -EAGAIN) {
/*
* A page that has been migrated has all references
* removed and will be freed. A page that has not been
* migrated will have kepts its references and be
* restored.
*/
list_del(&page->lru);
dec_zone_page_state(page, NR_ISOLATED_ANON +
page_is_file_cache(page));
putback_lru_page(page);
}
/*
* Move the new page to the LRU. If migration was not successful
* then this will free the page.
*/
putback_lru_page(newpage);
if (result) {
if (rc)
*result = rc;
else
*result = page_to_nid(newpage);
}
return rc;
}
/*
* Counterpart of unmap_and_move_page() for hugepage migration.
*
* This function doesn't wait the completion of hugepage I/O
* because there is no race between I/O and migration for hugepage.
* Note that currently hugepage I/O occurs only in direct I/O
* where no lock is held and PG_writeback is irrelevant,
* and writeback status of all subpages are counted in the reference
* count of the head page (i.e. if all subpages of a 2MB hugepage are
* under direct I/O, the reference of the head page is 512 and a bit more.)
* This means that when we try to migrate hugepage whose subpages are
* doing direct I/O, some references remain after try_to_unmap() and
* hugepage migration fails without data corruption.
*
* There is also no race when direct I/O is issued on the page under migration,
* because then pte is replaced with migration swap entry and direct I/O code
* will wait in the page fault for migration to complete.
*/
static int unmap_and_move_huge_page(new_page_t get_new_page,
unsigned long private, struct page *hpage,
int force, enum migrate_mode mode)
{
int rc = 0;
int *result = NULL;
struct page *new_hpage = get_new_page(hpage, private, &result);
struct anon_vma *anon_vma = NULL;
/*
* Movability of hugepages depends on architectures and hugepage size.
* This check is necessary because some callers of hugepage migration
* like soft offline and memory hotremove don't walk through page
* tables or check whether the hugepage is pmd-based or not before
* kicking migration.
*/
if (!hugepage_migration_support(page_hstate(hpage)))
return -ENOSYS;
if (!new_hpage)
return -ENOMEM;
rc = -EAGAIN;
if (!trylock_page(hpage)) {
if (!force || mode != MIGRATE_SYNC)
goto out;
lock_page(hpage);
}
if (PageAnon(hpage))
anon_vma = page_get_anon_vma(hpage);
try_to_unmap(hpage, TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
if (!page_mapped(hpage))
rc = move_to_new_page(new_hpage, hpage, 1, mode);
if (rc)
remove_migration_ptes(hpage, hpage);
if (anon_vma)
put_anon_vma(anon_vma);
if (!rc)
hugetlb_cgroup_migrate(hpage, new_hpage);
unlock_page(hpage);
out:
put_page(new_hpage);
if (result) {
if (rc)
*result = rc;
else
*result = page_to_nid(new_hpage);
}
return rc;
}
/*
* migrate_pages - migrate the pages specified in a list, to the free pages
* supplied as the target for the page migration
*
* @from: The list of pages to be migrated.
* @get_new_page: The function used to allocate free pages to be used
* as the target of the page migration.
* @private: Private data to be passed on to get_new_page()
* @mode: The migration mode that specifies the constraints for
* page migration, if any.
* @reason: The reason for page migration.
*
* The function returns after 10 attempts or if no pages are movable any more
* because the list has become empty or no retryable pages exist any more.
* The caller should call putback_lru_pages() to return pages to the LRU
* or free list only if ret != 0.
*
* Returns the number of pages that were not migrated, or an error code.
*/
int migrate_pages(struct list_head *from, new_page_t get_new_page,
unsigned long private, enum migrate_mode mode, int reason)
{
int retry = 1;
int nr_failed = 0;
int nr_succeeded = 0;
int pass = 0;
struct page *page;
struct page *page2;
int swapwrite = current->flags & PF_SWAPWRITE;
int rc;
trace_migrate_pages_start(mode);
if (!swapwrite)
current->flags |= PF_SWAPWRITE;
for(pass = 0; pass < 10 && retry; pass++) {
retry = 0;
list_for_each_entry_safe(page, page2, from, lru) {
cond_resched();
rc = unmap_and_move(get_new_page, private,
page, pass > 2, mode);
switch(rc) {
case -ENOMEM:
goto out;
case -EAGAIN:
retry++;
trace_migrate_retry(retry);
break;
case MIGRATEPAGE_SUCCESS:
nr_succeeded++;
break;
default:
/* Permanent failure */
nr_failed++;
break;
}
}
}
rc = nr_failed + retry;
out:
if (nr_succeeded)
count_vm_events(PGMIGRATE_SUCCESS, nr_succeeded);
if (nr_failed)
count_vm_events(PGMIGRATE_FAIL, nr_failed);
trace_mm_migrate_pages(nr_succeeded, nr_failed, mode, reason);
if (!swapwrite)
current->flags &= ~PF_SWAPWRITE;
trace_migrate_pages_end(mode);
return rc;
}
int migrate_huge_page(struct page *hpage, new_page_t get_new_page,
unsigned long private, enum migrate_mode mode)
{
int pass, rc;
for (pass = 0; pass < 10; pass++) {
rc = unmap_and_move_huge_page(get_new_page, private,
hpage, pass > 2, mode);
switch (rc) {
case -ENOMEM:
goto out;
case -EAGAIN:
/* try again */
cond_resched();
break;
case MIGRATEPAGE_SUCCESS:
goto out;
default:
rc = -EIO;
goto out;
}
}
out:
return rc;
}
#ifdef CONFIG_NUMA
/*
* Move a list of individual pages
*/
struct page_to_node {
unsigned long addr;
struct page *page;
int node;
int status;
};
static struct page *new_page_node(struct page *p, unsigned long private,
int **result)
{
struct page_to_node *pm = (struct page_to_node *)private;
while (pm->node != MAX_NUMNODES && pm->page != p)
pm++;
if (pm->node == MAX_NUMNODES)
return NULL;
*result = &pm->status;
return alloc_pages_exact_node(pm->node,
GFP_HIGHUSER_MOVABLE | GFP_THISNODE, 0);
}
/*
* Move a set of pages as indicated in the pm array. The addr
* field must be set to the virtual address of the page to be moved
* and the node number must contain a valid target node.
* The pm array ends with node = MAX_NUMNODES.
*/
static int do_move_page_to_node_array(struct mm_struct *mm,
struct page_to_node *pm,
int migrate_all)
{
int err;
struct page_to_node *pp;
LIST_HEAD(pagelist);
down_read(&mm->mmap_sem);
/*
* Build a list of pages to migrate
*/
for (pp = pm; pp->node != MAX_NUMNODES; pp++) {
struct vm_area_struct *vma;
struct page *page;
err = -EFAULT;
vma = find_vma(mm, pp->addr);
if (!vma || pp->addr < vma->vm_start || !vma_migratable(vma))
goto set_status;
page = follow_page(vma, pp->addr, FOLL_GET|FOLL_SPLIT);
err = PTR_ERR(page);
if (IS_ERR(page))
goto set_status;
err = -ENOENT;
if (!page)
goto set_status;
/* Use PageReserved to check for zero page */
if (PageReserved(page))
goto put_and_set;
pp->page = page;
err = page_to_nid(page);
if (err == pp->node)
/*
* Node already in the right place
*/
goto put_and_set;
err = -EACCES;
if (page_mapcount(page) > 1 &&
!migrate_all)
goto put_and_set;
err = isolate_lru_page(page);
if (!err) {
list_add_tail(&page->lru, &pagelist);
inc_zone_page_state(page, NR_ISOLATED_ANON +
page_is_file_cache(page));
}
put_and_set:
/*
* Either remove the duplicate refcount from
* isolate_lru_page() or drop the page ref if it was
* not isolated.
*/
put_page(page);
set_status:
pp->status = err;
}
err = 0;
if (!list_empty(&pagelist)) {
err = migrate_pages(&pagelist, new_page_node,
(unsigned long)pm, MIGRATE_SYNC, MR_SYSCALL);
if (err)
putback_lru_pages(&pagelist);
}
up_read(&mm->mmap_sem);
return err;
}
/*
* Migrate an array of page address onto an array of nodes and fill
* the corresponding array of status.
*/
static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
unsigned long nr_pages,
const void __user * __user *pages,
const int __user *nodes,
int __user *status, int flags)
{
struct page_to_node *pm;
unsigned long chunk_nr_pages;
unsigned long chunk_start;
int err;
err = -ENOMEM;
pm = (struct page_to_node *)__get_free_page(GFP_KERNEL);
if (!pm)
goto out;
migrate_prep();
/*
* Store a chunk of page_to_node array in a page,
* but keep the last one as a marker
*/
chunk_nr_pages = (PAGE_SIZE / sizeof(struct page_to_node)) - 1;
for (chunk_start = 0;
chunk_start < nr_pages;
chunk_start += chunk_nr_pages) {
int j;
if (chunk_start + chunk_nr_pages > nr_pages)
chunk_nr_pages = nr_pages - chunk_start;
/* fill the chunk pm with addrs and nodes from user-space */
for (j = 0; j < chunk_nr_pages; j++) {
const void __user *p;
int node;
err = -EFAULT;
if (get_user(p, pages + j + chunk_start))
goto out_pm;
pm[j].addr = (unsigned long) p;
if (get_user(node, nodes + j + chunk_start))
goto out_pm;
err = -ENODEV;
if (node < 0 || node >= MAX_NUMNODES)
goto out_pm;
if (!node_state(node, N_MEMORY))
goto out_pm;
err = -EACCES;
if (!node_isset(node, task_nodes))
goto out_pm;
pm[j].node = node;
}
/* End marker for this chunk */
pm[chunk_nr_pages].node = MAX_NUMNODES;
/* Migrate this chunk */
err = do_move_page_to_node_array(mm, pm,
flags & MPOL_MF_MOVE_ALL);
if (err < 0)
goto out_pm;
/* Return status information */
for (j = 0; j < chunk_nr_pages; j++)
if (put_user(pm[j].status, status + j + chunk_start)) {
err = -EFAULT;
goto out_pm;
}
}
err = 0;
out_pm:
free_page((unsigned long)pm);
out:
return err;
}
/*
* Determine the nodes of an array of pages and store it in an array of status.
*/
static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
const void __user **pages, int *status)
{
unsigned long i;
down_read(&mm->mmap_sem);
for (i = 0; i < nr_pages; i++) {
unsigned long addr = (unsigned long)(*pages);
struct vm_area_struct *vma;
struct page *page;
int err = -EFAULT;
vma = find_vma(mm, addr);
if (!vma || addr < vma->vm_start)
goto set_status;
page = follow_page(vma, addr, 0);
err = PTR_ERR(page);
if (IS_ERR(page))
goto set_status;
err = -ENOENT;
/* Use PageReserved to check for zero page */
if (!page || PageReserved(page))
goto set_status;
err = page_to_nid(page);
set_status:
*status = err;
pages++;
status++;
}
up_read(&mm->mmap_sem);
}
/*
* Determine the nodes of a user array of pages and store it in
* a user array of status.
*/
static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
const void __user * __user *pages,
int __user *status)
{
#define DO_PAGES_STAT_CHUNK_NR 16
const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR];
int chunk_status[DO_PAGES_STAT_CHUNK_NR];
while (nr_pages) {
unsigned long chunk_nr;
chunk_nr = nr_pages;
if (chunk_nr > DO_PAGES_STAT_CHUNK_NR)
chunk_nr = DO_PAGES_STAT_CHUNK_NR;
if (copy_from_user(chunk_pages, pages, chunk_nr * sizeof(*chunk_pages)))
break;
do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status);
if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status)))
break;
pages += chunk_nr;
status += chunk_nr;
nr_pages -= chunk_nr;
}
return nr_pages ? -EFAULT : 0;
}
/*
* Move a list of pages in the address space of the currently executing
* process.
*/
SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
const void __user * __user *, pages,
const int __user *, nodes,
int __user *, status, int, flags)
{
const struct cred *cred = current_cred(), *tcred;
struct task_struct *task;
struct mm_struct *mm;
int err;
nodemask_t task_nodes;
/* Check flags */
if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL))
return -EINVAL;
if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
return -EPERM;
/* Find the mm_struct */
rcu_read_lock();
task = pid ? find_task_by_vpid(pid) : current;
if (!task) {
rcu_read_unlock();
return -ESRCH;
}
get_task_struct(task);
/*
* Check if this process has the right to modify the specified
* process. The right exists if the process has administrative
* capabilities, superuser privileges or the same
* userid as the target process.
*/
tcred = __task_cred(task);
if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
!uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
!capable(CAP_SYS_NICE)) {
rcu_read_unlock();
err = -EPERM;
goto out;
}
rcu_read_unlock();
err = security_task_movememory(task);
if (err)
goto out;
task_nodes = cpuset_mems_allowed(task);
mm = get_task_mm(task);
put_task_struct(task);
if (!mm)
return -EINVAL;
if (nodes)
err = do_pages_move(mm, task_nodes, nr_pages, pages,
nodes, status, flags);
else
err = do_pages_stat(mm, nr_pages, pages, status);
mmput(mm);
return err;
out:
put_task_struct(task);
return err;
}
/*
* Call migration functions in the vma_ops that may prepare
* memory in a vm for migration. migration functions may perform
* the migration for vmas that do not have an underlying page struct.
*/
int migrate_vmas(struct mm_struct *mm, const nodemask_t *to,
const nodemask_t *from, unsigned long flags)
{
struct vm_area_struct *vma;
int err = 0;
for (vma = mm->mmap; vma && !err; vma = vma->vm_next) {
if (vma->vm_ops && vma->vm_ops->migrate) {
err = vma->vm_ops->migrate(vma, to, from, flags);
if (err)
break;
}
}
return err;
}
#ifdef CONFIG_NUMA_BALANCING
/*
* Returns true if this is a safe migration target node for misplaced NUMA
* pages. Currently it only checks the watermarks which crude
*/
static bool migrate_balanced_pgdat(struct pglist_data *pgdat,
unsigned long nr_migrate_pages)
{
int z;
for (z = pgdat->nr_zones - 1; z >= 0; z--) {
struct zone *zone = pgdat->node_zones + z;
if (!populated_zone(zone))
continue;
if (!zone_reclaimable(zone))
continue;
/* Avoid waking kswapd by allocating pages_to_migrate pages. */
if (!zone_watermark_ok(zone, 0,
high_wmark_pages(zone) +
nr_migrate_pages,
0, 0))
continue;
return true;
}
return false;
}
static struct page *alloc_misplaced_dst_page(struct page *page,
unsigned long data,
int **result)
{
int nid = (int) data;
struct page *newpage;
newpage = alloc_pages_exact_node(nid,
(GFP_HIGHUSER_MOVABLE | GFP_THISNODE |
__GFP_NOMEMALLOC | __GFP_NORETRY |
__GFP_NOWARN) &
~GFP_IOFS, 0);
if (newpage)
page_nid_xchg_last(newpage, page_nid_last(page));
return newpage;
}
/*
* page migration rate limiting control.
* Do not migrate more than @pages_to_migrate in a @migrate_interval_millisecs
* window of time. Default here says do not migrate more than 1280M per second.
* If a node is rate-limited then PTE NUMA updates are also rate-limited. However
* as it is faults that reset the window, pte updates will happen unconditionally
* if there has not been a fault since @pteupdate_interval_millisecs after the
* throttle window closed.
*/
static unsigned int migrate_interval_millisecs __read_mostly = 100;
static unsigned int pteupdate_interval_millisecs __read_mostly = 1000;
static unsigned int ratelimit_pages __read_mostly = 128 << (20 - PAGE_SHIFT);
/* Returns true if NUMA migration is currently rate limited */
bool migrate_ratelimited(int node)
{
pg_data_t *pgdat = NODE_DATA(node);
if (time_after(jiffies, pgdat->numabalancing_migrate_next_window +
msecs_to_jiffies(pteupdate_interval_millisecs)))
return false;
if (pgdat->numabalancing_migrate_nr_pages < ratelimit_pages)
return false;
return true;
}
/* Returns true if the node is migrate rate-limited after the update */
bool numamigrate_update_ratelimit(pg_data_t *pgdat, unsigned long nr_pages)
{
bool rate_limited = false;
/*
* Rate-limit the amount of data that is being migrated to a node.
* Optimal placement is no good if the memory bus is saturated and
* all the time is being spent migrating!
*/
spin_lock(&pgdat->numabalancing_migrate_lock);
if (time_after(jiffies, pgdat->numabalancing_migrate_next_window)) {
pgdat->numabalancing_migrate_nr_pages = 0;
pgdat->numabalancing_migrate_next_window = jiffies +
msecs_to_jiffies(migrate_interval_millisecs);
}
if (pgdat->numabalancing_migrate_nr_pages > ratelimit_pages)
rate_limited = true;
else
pgdat->numabalancing_migrate_nr_pages += nr_pages;
spin_unlock(&pgdat->numabalancing_migrate_lock);
return rate_limited;
}
int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
{
int page_lru;
VM_BUG_ON(compound_order(page) && !PageTransHuge(page));
/* Avoid migrating to a node that is nearly full */
if (!migrate_balanced_pgdat(pgdat, 1UL << compound_order(page)))
return 0;
if (isolate_lru_page(page))
return 0;
/*
* migrate_misplaced_transhuge_page() skips page migration's usual
* check on page_count(), so we must do it here, now that the page
* has been isolated: a GUP pin, or any other pin, prevents migration.
* The expected page count is 3: 1 for page's mapcount and 1 for the
* caller's pin and 1 for the reference taken by isolate_lru_page().
*/
if (PageTransHuge(page) && page_count(page) != 3) {
putback_lru_page(page);
return 0;
}
page_lru = page_is_file_cache(page);
mod_zone_page_state(page_zone(page), NR_ISOLATED_ANON + page_lru,
hpage_nr_pages(page));
/*
* Isolating the page has taken another reference, so the
* caller's reference can be safely dropped without the page
* disappearing underneath us during migration.
*/
put_page(page);
return 1;
}
/*
* Attempt to migrate a misplaced page to the specified destination
* node. Caller is expected to have an elevated reference count on
* the page that will be dropped by this function before returning.
*/
int migrate_misplaced_page(struct page *page, int node)
{
pg_data_t *pgdat = NODE_DATA(node);
int isolated;
int nr_remaining;
LIST_HEAD(migratepages);
/*
* Don't migrate pages that are mapped in multiple processes.
* TODO: Handle false sharing detection instead of this hammer
*/
if (page_mapcount(page) != 1)
goto out;
/*
* Rate-limit the amount of data that is being migrated to a node.
* Optimal placement is no good if the memory bus is saturated and
* all the time is being spent migrating!
*/
if (numamigrate_update_ratelimit(pgdat, 1))
goto out;
isolated = numamigrate_isolate_page(pgdat, page);
if (!isolated)
goto out;
list_add(&page->lru, &migratepages);
nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_page,
node, MIGRATE_ASYNC, MR_NUMA_MISPLACED);
if (nr_remaining) {
putback_lru_pages(&migratepages);
isolated = 0;
} else
count_vm_numa_event(NUMA_PAGE_MIGRATE);
BUG_ON(!list_empty(&migratepages));
return isolated;
out:
put_page(page);
return 0;
}
#endif /* CONFIG_NUMA_BALANCING */
#if defined(CONFIG_NUMA_BALANCING) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
/*
* Migrates a THP to a given target node. page must be locked and is unlocked
* before returning.
*/
int migrate_misplaced_transhuge_page(struct mm_struct *mm,
struct vm_area_struct *vma,
pmd_t *pmd, pmd_t entry,
unsigned long address,
struct page *page, int node)
{
unsigned long haddr = address & HPAGE_PMD_MASK;
pg_data_t *pgdat = NODE_DATA(node);
int isolated = 0;
struct page *new_page = NULL;
struct mem_cgroup *memcg = NULL;
int page_lru = page_is_file_cache(page);
/*
* Don't migrate pages that are mapped in multiple processes.
* TODO: Handle false sharing detection instead of this hammer
*/
if (page_mapcount(page) != 1)
goto out_dropref;
/*
* Rate-limit the amount of data that is being migrated to a node.
* Optimal placement is no good if the memory bus is saturated and
* all the time is being spent migrating!
*/
if (numamigrate_update_ratelimit(pgdat, HPAGE_PMD_NR))
goto out_dropref;
new_page = alloc_pages_node(node,
(GFP_TRANSHUGE | GFP_THISNODE) & ~__GFP_WAIT, HPAGE_PMD_ORDER);
if (!new_page)
goto out_fail;
page_nid_xchg_last(new_page, page_nid_last(page));
isolated = numamigrate_isolate_page(pgdat, page);
if (!isolated) {
put_page(new_page);
goto out_fail;
}
/* Prepare a page as a migration target */
__set_page_locked(new_page);
SetPageSwapBacked(new_page);
/* anon mapping, we can simply copy page->mapping to the new page: */
new_page->mapping = page->mapping;
new_page->index = page->index;
migrate_page_copy(new_page, page);
WARN_ON(PageLRU(new_page));
/* Recheck the target PMD */
spin_lock(&mm->page_table_lock);
if (unlikely(!pmd_same(*pmd, entry))) {
spin_unlock(&mm->page_table_lock);
/* Reverse changes made by migrate_page_copy() */
if (TestClearPageActive(new_page))
SetPageActive(page);
if (TestClearPageUnevictable(new_page))
SetPageUnevictable(page);
mlock_migrate_page(page, new_page);
unlock_page(new_page);
put_page(new_page); /* Free it */
/* Retake the callers reference and putback on LRU */
get_page(page);
putback_lru_page(page);
mod_zone_page_state(page_zone(page),
NR_ISOLATED_ANON + page_lru, -HPAGE_PMD_NR);
goto out_unlock;
}
/*
* Traditional migration needs to prepare the memcg charge
* transaction early to prevent the old page from being
* uncharged when installing migration entries. Here we can
* save the potential rollback and start the charge transfer
* only when migration is already known to end successfully.
*/
mem_cgroup_prepare_migration(page, new_page, &memcg);
entry = mk_pmd(new_page, vma->vm_page_prot);
entry = pmd_mknonnuma(entry);
entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
entry = pmd_mkhuge(entry);
pmdp_clear_flush(vma, haddr, pmd);
set_pmd_at(mm, haddr, pmd, entry);
page_add_new_anon_rmap(new_page, vma, haddr);
update_mmu_cache_pmd(vma, address, &entry);
page_remove_rmap(page);
/*
* Finish the charge transaction under the page table lock to
* prevent split_huge_page() from dividing up the charge
* before it's fully transferred to the new page.
*/
mem_cgroup_end_migration(memcg, page, new_page, true);
spin_unlock(&mm->page_table_lock);
unlock_page(new_page);
unlock_page(page);
put_page(page); /* Drop the rmap reference */
put_page(page); /* Drop the LRU isolation reference */
count_vm_events(PGMIGRATE_SUCCESS, HPAGE_PMD_NR);
count_vm_numa_events(NUMA_PAGE_MIGRATE, HPAGE_PMD_NR);
mod_zone_page_state(page_zone(page),
NR_ISOLATED_ANON + page_lru,
-HPAGE_PMD_NR);
return isolated;
out_fail:
count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR);
out_dropref:
entry = pmd_mknonnuma(entry);
set_pmd_at(mm, haddr, pmd, entry);
update_mmu_cache_pmd(vma, address, &entry);
out_unlock:
unlock_page(page);
put_page(page);
return 0;
}
#endif /* CONFIG_NUMA_BALANCING */
#endif /* CONFIG_NUMA */
| gpl-2.0 |
G620s-Development/android_kernel_huawei_g620s | drivers/cpuidle/driver.c | 449 | 5568 | /*
* driver.c - driver support
*
* (C) 2006-2007 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
* Shaohua Li <shaohua.li@intel.com>
* Adam Belay <abelay@novell.com>
*
* This code is licenced under the GPL.
*/
#include <linux/mutex.h>
#include <linux/module.h>
#include <linux/cpuidle.h>
#include <linux/cpumask.h>
#include <linux/clockchips.h>
#include "cpuidle.h"
DEFINE_SPINLOCK(cpuidle_driver_lock);
static void __cpuidle_set_cpu_driver(struct cpuidle_driver *drv, int cpu);
static struct cpuidle_driver * __cpuidle_get_cpu_driver(int cpu);
static void cpuidle_setup_broadcast_timer(void *arg)
{
int cpu = smp_processor_id();
clockevents_notify((long)(arg), &cpu);
}
static void __cpuidle_driver_init(struct cpuidle_driver *drv, int cpu)
{
int i;
drv->refcnt = 0;
for (i = drv->state_count - 1; i >= 0 ; i--) {
if (!(drv->states[i].flags & CPUIDLE_FLAG_TIMER_STOP))
continue;
drv->bctimer = 1;
on_each_cpu_mask(get_cpu_mask(cpu), cpuidle_setup_broadcast_timer,
(void *)CLOCK_EVT_NOTIFY_BROADCAST_ON, 1);
break;
}
}
static int __cpuidle_register_driver(struct cpuidle_driver *drv, int cpu)
{
if (!drv || !drv->state_count)
return -EINVAL;
if (cpuidle_disabled())
return -ENODEV;
if (__cpuidle_get_cpu_driver(cpu))
return -EBUSY;
__cpuidle_driver_init(drv, cpu);
__cpuidle_set_cpu_driver(drv, cpu);
return 0;
}
static void __cpuidle_unregister_driver(struct cpuidle_driver *drv, int cpu)
{
if (drv != __cpuidle_get_cpu_driver(cpu))
return;
if (!WARN_ON(drv->refcnt > 0))
__cpuidle_set_cpu_driver(NULL, cpu);
if (drv->bctimer) {
drv->bctimer = 0;
on_each_cpu_mask(get_cpu_mask(cpu), cpuidle_setup_broadcast_timer,
(void *)CLOCK_EVT_NOTIFY_BROADCAST_OFF, 1);
}
}
#ifdef CONFIG_CPU_IDLE_MULTIPLE_DRIVERS
static DEFINE_PER_CPU(struct cpuidle_driver *, cpuidle_drivers);
static void __cpuidle_set_cpu_driver(struct cpuidle_driver *drv, int cpu)
{
per_cpu(cpuidle_drivers, cpu) = drv;
}
static struct cpuidle_driver *__cpuidle_get_cpu_driver(int cpu)
{
return per_cpu(cpuidle_drivers, cpu);
}
static void __cpuidle_unregister_all_cpu_driver(struct cpuidle_driver *drv)
{
int cpu;
for_each_present_cpu(cpu)
__cpuidle_unregister_driver(drv, cpu);
}
static int __cpuidle_register_all_cpu_driver(struct cpuidle_driver *drv)
{
int ret = 0;
int i, cpu;
for_each_present_cpu(cpu) {
ret = __cpuidle_register_driver(drv, cpu);
if (ret)
break;
}
if (ret)
for_each_present_cpu(i) {
if (i == cpu)
break;
__cpuidle_unregister_driver(drv, i);
}
return ret;
}
int cpuidle_register_cpu_driver(struct cpuidle_driver *drv, int cpu)
{
int ret;
spin_lock(&cpuidle_driver_lock);
ret = __cpuidle_register_driver(drv, cpu);
spin_unlock(&cpuidle_driver_lock);
return ret;
}
void cpuidle_unregister_cpu_driver(struct cpuidle_driver *drv, int cpu)
{
spin_lock(&cpuidle_driver_lock);
__cpuidle_unregister_driver(drv, cpu);
spin_unlock(&cpuidle_driver_lock);
}
/**
* cpuidle_register_driver - registers a driver
* @drv: the driver
*/
int cpuidle_register_driver(struct cpuidle_driver *drv)
{
int ret;
spin_lock(&cpuidle_driver_lock);
ret = __cpuidle_register_all_cpu_driver(drv);
spin_unlock(&cpuidle_driver_lock);
return ret;
}
EXPORT_SYMBOL_GPL(cpuidle_register_driver);
/**
* cpuidle_unregister_driver - unregisters a driver
* @drv: the driver
*/
void cpuidle_unregister_driver(struct cpuidle_driver *drv)
{
spin_lock(&cpuidle_driver_lock);
__cpuidle_unregister_all_cpu_driver(drv);
spin_unlock(&cpuidle_driver_lock);
}
EXPORT_SYMBOL_GPL(cpuidle_unregister_driver);
#else
static struct cpuidle_driver *cpuidle_curr_driver;
static inline void __cpuidle_set_cpu_driver(struct cpuidle_driver *drv, int cpu)
{
cpuidle_curr_driver = drv;
}
static inline struct cpuidle_driver *__cpuidle_get_cpu_driver(int cpu)
{
return cpuidle_curr_driver;
}
/**
* cpuidle_register_driver - registers a driver
* @drv: the driver
*/
int cpuidle_register_driver(struct cpuidle_driver *drv)
{
int ret, cpu;
cpu = get_cpu();
spin_lock(&cpuidle_driver_lock);
ret = __cpuidle_register_driver(drv, cpu);
spin_unlock(&cpuidle_driver_lock);
put_cpu();
return ret;
}
EXPORT_SYMBOL_GPL(cpuidle_register_driver);
/**
* cpuidle_unregister_driver - unregisters a driver
* @drv: the driver
*/
void cpuidle_unregister_driver(struct cpuidle_driver *drv)
{
int cpu;
cpu = get_cpu();
spin_lock(&cpuidle_driver_lock);
__cpuidle_unregister_driver(drv, cpu);
spin_unlock(&cpuidle_driver_lock);
put_cpu();
}
EXPORT_SYMBOL_GPL(cpuidle_unregister_driver);
#endif
/**
* cpuidle_get_driver - return the current driver
*/
struct cpuidle_driver *cpuidle_get_driver(void)
{
struct cpuidle_driver *drv;
int cpu;
cpu = get_cpu();
drv = __cpuidle_get_cpu_driver(cpu);
put_cpu();
return drv;
}
EXPORT_SYMBOL_GPL(cpuidle_get_driver);
/**
* cpuidle_get_cpu_driver - return the driver tied with a cpu
*/
struct cpuidle_driver *cpuidle_get_cpu_driver(struct cpuidle_device *dev)
{
if (!dev)
return NULL;
return __cpuidle_get_cpu_driver(dev->cpu);
}
EXPORT_SYMBOL_GPL(cpuidle_get_cpu_driver);
struct cpuidle_driver *cpuidle_driver_ref(void)
{
struct cpuidle_driver *drv;
spin_lock(&cpuidle_driver_lock);
drv = cpuidle_get_driver();
drv->refcnt++;
spin_unlock(&cpuidle_driver_lock);
return drv;
}
void cpuidle_driver_unref(void)
{
struct cpuidle_driver *drv = cpuidle_get_driver();
spin_lock(&cpuidle_driver_lock);
if (drv && !WARN_ON(drv->refcnt <= 0))
drv->refcnt--;
spin_unlock(&cpuidle_driver_lock);
}
| gpl-2.0 |
aimaletdinow/LABS | drivers/net/ethernet/brocade/bna/bna_tx_rx.c | 449 | 98712 | /*
* Linux network driver for Brocade Converged Network Adapter.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License (GPL) Version 2 as
* published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
/*
* Copyright (c) 2005-2011 Brocade Communications Systems, Inc.
* All rights reserved
* www.brocade.com
*/
#include "bna.h"
#include "bfi.h"
/* IB */
static void
bna_ib_coalescing_timeo_set(struct bna_ib *ib, u8 coalescing_timeo)
{
ib->coalescing_timeo = coalescing_timeo;
ib->door_bell.doorbell_ack = BNA_DOORBELL_IB_INT_ACK(
(u32)ib->coalescing_timeo, 0);
}
/* RXF */
#define bna_rxf_vlan_cfg_soft_reset(rxf) \
do { \
(rxf)->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL; \
(rxf)->vlan_strip_pending = true; \
} while (0)
#define bna_rxf_rss_cfg_soft_reset(rxf) \
do { \
if ((rxf)->rss_status == BNA_STATUS_T_ENABLED) \
(rxf)->rss_pending = (BNA_RSS_F_RIT_PENDING | \
BNA_RSS_F_CFG_PENDING | \
BNA_RSS_F_STATUS_PENDING); \
} while (0)
static int bna_rxf_cfg_apply(struct bna_rxf *rxf);
static void bna_rxf_cfg_reset(struct bna_rxf *rxf);
static int bna_rxf_fltr_clear(struct bna_rxf *rxf);
static int bna_rxf_ucast_cfg_apply(struct bna_rxf *rxf);
static int bna_rxf_promisc_cfg_apply(struct bna_rxf *rxf);
static int bna_rxf_allmulti_cfg_apply(struct bna_rxf *rxf);
static int bna_rxf_vlan_strip_cfg_apply(struct bna_rxf *rxf);
static int bna_rxf_ucast_cfg_reset(struct bna_rxf *rxf,
enum bna_cleanup_type cleanup);
static int bna_rxf_promisc_cfg_reset(struct bna_rxf *rxf,
enum bna_cleanup_type cleanup);
static int bna_rxf_allmulti_cfg_reset(struct bna_rxf *rxf,
enum bna_cleanup_type cleanup);
bfa_fsm_state_decl(bna_rxf, stopped, struct bna_rxf,
enum bna_rxf_event);
bfa_fsm_state_decl(bna_rxf, paused, struct bna_rxf,
enum bna_rxf_event);
bfa_fsm_state_decl(bna_rxf, cfg_wait, struct bna_rxf,
enum bna_rxf_event);
bfa_fsm_state_decl(bna_rxf, started, struct bna_rxf,
enum bna_rxf_event);
bfa_fsm_state_decl(bna_rxf, fltr_clr_wait, struct bna_rxf,
enum bna_rxf_event);
bfa_fsm_state_decl(bna_rxf, last_resp_wait, struct bna_rxf,
enum bna_rxf_event);
static void
bna_rxf_sm_stopped_entry(struct bna_rxf *rxf)
{
call_rxf_stop_cbfn(rxf);
}
static void
bna_rxf_sm_stopped(struct bna_rxf *rxf, enum bna_rxf_event event)
{
switch (event) {
case RXF_E_START:
if (rxf->flags & BNA_RXF_F_PAUSED) {
bfa_fsm_set_state(rxf, bna_rxf_sm_paused);
call_rxf_start_cbfn(rxf);
} else
bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait);
break;
case RXF_E_STOP:
call_rxf_stop_cbfn(rxf);
break;
case RXF_E_FAIL:
/* No-op */
break;
case RXF_E_CONFIG:
call_rxf_cam_fltr_cbfn(rxf);
break;
case RXF_E_PAUSE:
rxf->flags |= BNA_RXF_F_PAUSED;
call_rxf_pause_cbfn(rxf);
break;
case RXF_E_RESUME:
rxf->flags &= ~BNA_RXF_F_PAUSED;
call_rxf_resume_cbfn(rxf);
break;
default:
bfa_sm_fault(event);
}
}
static void
bna_rxf_sm_paused_entry(struct bna_rxf *rxf)
{
call_rxf_pause_cbfn(rxf);
}
static void
bna_rxf_sm_paused(struct bna_rxf *rxf, enum bna_rxf_event event)
{
switch (event) {
case RXF_E_STOP:
case RXF_E_FAIL:
bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
break;
case RXF_E_CONFIG:
call_rxf_cam_fltr_cbfn(rxf);
break;
case RXF_E_RESUME:
rxf->flags &= ~BNA_RXF_F_PAUSED;
bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait);
break;
default:
bfa_sm_fault(event);
}
}
static void
bna_rxf_sm_cfg_wait_entry(struct bna_rxf *rxf)
{
if (!bna_rxf_cfg_apply(rxf)) {
/* No more pending config updates */
bfa_fsm_set_state(rxf, bna_rxf_sm_started);
}
}
static void
bna_rxf_sm_cfg_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
{
switch (event) {
case RXF_E_STOP:
bfa_fsm_set_state(rxf, bna_rxf_sm_last_resp_wait);
break;
case RXF_E_FAIL:
bna_rxf_cfg_reset(rxf);
call_rxf_start_cbfn(rxf);
call_rxf_cam_fltr_cbfn(rxf);
call_rxf_resume_cbfn(rxf);
bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
break;
case RXF_E_CONFIG:
/* No-op */
break;
case RXF_E_PAUSE:
rxf->flags |= BNA_RXF_F_PAUSED;
call_rxf_start_cbfn(rxf);
bfa_fsm_set_state(rxf, bna_rxf_sm_fltr_clr_wait);
break;
case RXF_E_FW_RESP:
if (!bna_rxf_cfg_apply(rxf)) {
/* No more pending config updates */
bfa_fsm_set_state(rxf, bna_rxf_sm_started);
}
break;
default:
bfa_sm_fault(event);
}
}
static void
bna_rxf_sm_started_entry(struct bna_rxf *rxf)
{
call_rxf_start_cbfn(rxf);
call_rxf_cam_fltr_cbfn(rxf);
call_rxf_resume_cbfn(rxf);
}
static void
bna_rxf_sm_started(struct bna_rxf *rxf, enum bna_rxf_event event)
{
switch (event) {
case RXF_E_STOP:
case RXF_E_FAIL:
bna_rxf_cfg_reset(rxf);
bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
break;
case RXF_E_CONFIG:
bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait);
break;
case RXF_E_PAUSE:
rxf->flags |= BNA_RXF_F_PAUSED;
if (!bna_rxf_fltr_clear(rxf))
bfa_fsm_set_state(rxf, bna_rxf_sm_paused);
else
bfa_fsm_set_state(rxf, bna_rxf_sm_fltr_clr_wait);
break;
default:
bfa_sm_fault(event);
}
}
static void
bna_rxf_sm_fltr_clr_wait_entry(struct bna_rxf *rxf)
{
}
static void
bna_rxf_sm_fltr_clr_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
{
switch (event) {
case RXF_E_FAIL:
bna_rxf_cfg_reset(rxf);
call_rxf_pause_cbfn(rxf);
bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
break;
case RXF_E_FW_RESP:
if (!bna_rxf_fltr_clear(rxf)) {
/* No more pending CAM entries to clear */
bfa_fsm_set_state(rxf, bna_rxf_sm_paused);
}
break;
default:
bfa_sm_fault(event);
}
}
static void
bna_rxf_sm_last_resp_wait_entry(struct bna_rxf *rxf)
{
}
static void
bna_rxf_sm_last_resp_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
{
switch (event) {
case RXF_E_FAIL:
case RXF_E_FW_RESP:
bna_rxf_cfg_reset(rxf);
bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
break;
default:
bfa_sm_fault(event);
}
}
static void
bna_bfi_ucast_req(struct bna_rxf *rxf, struct bna_mac *mac,
enum bfi_enet_h2i_msgs req_type)
{
struct bfi_enet_ucast_req *req = &rxf->bfi_enet_cmd.ucast_req;
bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, req_type, 0, rxf->rx->rid);
req->mh.num_entries = htons(
bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_ucast_req)));
memcpy(&req->mac_addr, &mac->addr, sizeof(mac_t));
bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
sizeof(struct bfi_enet_ucast_req), &req->mh);
bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
}
static void
bna_bfi_mcast_add_req(struct bna_rxf *rxf, struct bna_mac *mac)
{
struct bfi_enet_mcast_add_req *req =
&rxf->bfi_enet_cmd.mcast_add_req;
bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, BFI_ENET_H2I_MAC_MCAST_ADD_REQ,
0, rxf->rx->rid);
req->mh.num_entries = htons(
bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_mcast_add_req)));
memcpy(&req->mac_addr, &mac->addr, sizeof(mac_t));
bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
sizeof(struct bfi_enet_mcast_add_req), &req->mh);
bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
}
static void
bna_bfi_mcast_del_req(struct bna_rxf *rxf, u16 handle)
{
struct bfi_enet_mcast_del_req *req =
&rxf->bfi_enet_cmd.mcast_del_req;
bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, BFI_ENET_H2I_MAC_MCAST_DEL_REQ,
0, rxf->rx->rid);
req->mh.num_entries = htons(
bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_mcast_del_req)));
req->handle = htons(handle);
bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
sizeof(struct bfi_enet_mcast_del_req), &req->mh);
bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
}
static void
bna_bfi_mcast_filter_req(struct bna_rxf *rxf, enum bna_status status)
{
struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req;
bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
BFI_ENET_H2I_MAC_MCAST_FILTER_REQ, 0, rxf->rx->rid);
req->mh.num_entries = htons(
bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
req->enable = status;
bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
sizeof(struct bfi_enet_enable_req), &req->mh);
bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
}
static void
bna_bfi_rx_promisc_req(struct bna_rxf *rxf, enum bna_status status)
{
struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req;
bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
BFI_ENET_H2I_RX_PROMISCUOUS_REQ, 0, rxf->rx->rid);
req->mh.num_entries = htons(
bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
req->enable = status;
bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
sizeof(struct bfi_enet_enable_req), &req->mh);
bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
}
static void
bna_bfi_rx_vlan_filter_set(struct bna_rxf *rxf, u8 block_idx)
{
struct bfi_enet_rx_vlan_req *req = &rxf->bfi_enet_cmd.vlan_req;
int i;
int j;
bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
BFI_ENET_H2I_RX_VLAN_SET_REQ, 0, rxf->rx->rid);
req->mh.num_entries = htons(
bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rx_vlan_req)));
req->block_idx = block_idx;
for (i = 0; i < (BFI_ENET_VLAN_BLOCK_SIZE / 32); i++) {
j = (block_idx * (BFI_ENET_VLAN_BLOCK_SIZE / 32)) + i;
if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED)
req->bit_mask[i] =
htonl(rxf->vlan_filter_table[j]);
else
req->bit_mask[i] = 0xFFFFFFFF;
}
bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
sizeof(struct bfi_enet_rx_vlan_req), &req->mh);
bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
}
static void
bna_bfi_vlan_strip_enable(struct bna_rxf *rxf)
{
struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req;
bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
BFI_ENET_H2I_RX_VLAN_STRIP_ENABLE_REQ, 0, rxf->rx->rid);
req->mh.num_entries = htons(
bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
req->enable = rxf->vlan_strip_status;
bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
sizeof(struct bfi_enet_enable_req), &req->mh);
bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
}
static void
bna_bfi_rit_cfg(struct bna_rxf *rxf)
{
struct bfi_enet_rit_req *req = &rxf->bfi_enet_cmd.rit_req;
bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
BFI_ENET_H2I_RIT_CFG_REQ, 0, rxf->rx->rid);
req->mh.num_entries = htons(
bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rit_req)));
req->size = htons(rxf->rit_size);
memcpy(&req->table[0], rxf->rit, rxf->rit_size);
bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
sizeof(struct bfi_enet_rit_req), &req->mh);
bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
}
static void
bna_bfi_rss_cfg(struct bna_rxf *rxf)
{
struct bfi_enet_rss_cfg_req *req = &rxf->bfi_enet_cmd.rss_req;
int i;
bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
BFI_ENET_H2I_RSS_CFG_REQ, 0, rxf->rx->rid);
req->mh.num_entries = htons(
bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rss_cfg_req)));
req->cfg.type = rxf->rss_cfg.hash_type;
req->cfg.mask = rxf->rss_cfg.hash_mask;
for (i = 0; i < BFI_ENET_RSS_KEY_LEN; i++)
req->cfg.key[i] =
htonl(rxf->rss_cfg.toeplitz_hash_key[i]);
bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
sizeof(struct bfi_enet_rss_cfg_req), &req->mh);
bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
}
static void
bna_bfi_rss_enable(struct bna_rxf *rxf)
{
struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req;
bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
BFI_ENET_H2I_RSS_ENABLE_REQ, 0, rxf->rx->rid);
req->mh.num_entries = htons(
bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
req->enable = rxf->rss_status;
bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
sizeof(struct bfi_enet_enable_req), &req->mh);
bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
}
/* This function gets the multicast MAC that has already been added to CAM */
static struct bna_mac *
bna_rxf_mcmac_get(struct bna_rxf *rxf, u8 *mac_addr)
{
struct bna_mac *mac;
struct list_head *qe;
list_for_each(qe, &rxf->mcast_active_q) {
mac = (struct bna_mac *)qe;
if (BNA_MAC_IS_EQUAL(&mac->addr, mac_addr))
return mac;
}
list_for_each(qe, &rxf->mcast_pending_del_q) {
mac = (struct bna_mac *)qe;
if (BNA_MAC_IS_EQUAL(&mac->addr, mac_addr))
return mac;
}
return NULL;
}
static struct bna_mcam_handle *
bna_rxf_mchandle_get(struct bna_rxf *rxf, int handle)
{
struct bna_mcam_handle *mchandle;
struct list_head *qe;
list_for_each(qe, &rxf->mcast_handle_q) {
mchandle = (struct bna_mcam_handle *)qe;
if (mchandle->handle == handle)
return mchandle;
}
return NULL;
}
static void
bna_rxf_mchandle_attach(struct bna_rxf *rxf, u8 *mac_addr, int handle)
{
struct bna_mac *mcmac;
struct bna_mcam_handle *mchandle;
mcmac = bna_rxf_mcmac_get(rxf, mac_addr);
mchandle = bna_rxf_mchandle_get(rxf, handle);
if (mchandle == NULL) {
mchandle = bna_mcam_mod_handle_get(&rxf->rx->bna->mcam_mod);
mchandle->handle = handle;
mchandle->refcnt = 0;
list_add_tail(&mchandle->qe, &rxf->mcast_handle_q);
}
mchandle->refcnt++;
mcmac->handle = mchandle;
}
static int
bna_rxf_mcast_del(struct bna_rxf *rxf, struct bna_mac *mac,
enum bna_cleanup_type cleanup)
{
struct bna_mcam_handle *mchandle;
int ret = 0;
mchandle = mac->handle;
if (mchandle == NULL)
return ret;
mchandle->refcnt--;
if (mchandle->refcnt == 0) {
if (cleanup == BNA_HARD_CLEANUP) {
bna_bfi_mcast_del_req(rxf, mchandle->handle);
ret = 1;
}
list_del(&mchandle->qe);
bfa_q_qe_init(&mchandle->qe);
bna_mcam_mod_handle_put(&rxf->rx->bna->mcam_mod, mchandle);
}
mac->handle = NULL;
return ret;
}
static int
bna_rxf_mcast_cfg_apply(struct bna_rxf *rxf)
{
struct bna_mac *mac = NULL;
struct list_head *qe;
int ret;
/* First delete multicast entries to maintain the count */
while (!list_empty(&rxf->mcast_pending_del_q)) {
bfa_q_deq(&rxf->mcast_pending_del_q, &qe);
bfa_q_qe_init(qe);
mac = (struct bna_mac *)qe;
ret = bna_rxf_mcast_del(rxf, mac, BNA_HARD_CLEANUP);
bna_cam_mod_mac_put(bna_mcam_mod_del_q(rxf->rx->bna), mac);
if (ret)
return ret;
}
/* Add multicast entries */
if (!list_empty(&rxf->mcast_pending_add_q)) {
bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
bfa_q_qe_init(qe);
mac = (struct bna_mac *)qe;
list_add_tail(&mac->qe, &rxf->mcast_active_q);
bna_bfi_mcast_add_req(rxf, mac);
return 1;
}
return 0;
}
static int
bna_rxf_vlan_cfg_apply(struct bna_rxf *rxf)
{
u8 vlan_pending_bitmask;
int block_idx = 0;
if (rxf->vlan_pending_bitmask) {
vlan_pending_bitmask = rxf->vlan_pending_bitmask;
while (!(vlan_pending_bitmask & 0x1)) {
block_idx++;
vlan_pending_bitmask >>= 1;
}
rxf->vlan_pending_bitmask &= ~(1 << block_idx);
bna_bfi_rx_vlan_filter_set(rxf, block_idx);
return 1;
}
return 0;
}
static int
bna_rxf_mcast_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
{
struct list_head *qe;
struct bna_mac *mac;
int ret;
/* Throw away delete pending mcast entries */
while (!list_empty(&rxf->mcast_pending_del_q)) {
bfa_q_deq(&rxf->mcast_pending_del_q, &qe);
bfa_q_qe_init(qe);
mac = (struct bna_mac *)qe;
ret = bna_rxf_mcast_del(rxf, mac, cleanup);
bna_cam_mod_mac_put(bna_mcam_mod_del_q(rxf->rx->bna), mac);
if (ret)
return ret;
}
/* Move active mcast entries to pending_add_q */
while (!list_empty(&rxf->mcast_active_q)) {
bfa_q_deq(&rxf->mcast_active_q, &qe);
bfa_q_qe_init(qe);
list_add_tail(qe, &rxf->mcast_pending_add_q);
mac = (struct bna_mac *)qe;
if (bna_rxf_mcast_del(rxf, mac, cleanup))
return 1;
}
return 0;
}
static int
bna_rxf_rss_cfg_apply(struct bna_rxf *rxf)
{
if (rxf->rss_pending) {
if (rxf->rss_pending & BNA_RSS_F_RIT_PENDING) {
rxf->rss_pending &= ~BNA_RSS_F_RIT_PENDING;
bna_bfi_rit_cfg(rxf);
return 1;
}
if (rxf->rss_pending & BNA_RSS_F_CFG_PENDING) {
rxf->rss_pending &= ~BNA_RSS_F_CFG_PENDING;
bna_bfi_rss_cfg(rxf);
return 1;
}
if (rxf->rss_pending & BNA_RSS_F_STATUS_PENDING) {
rxf->rss_pending &= ~BNA_RSS_F_STATUS_PENDING;
bna_bfi_rss_enable(rxf);
return 1;
}
}
return 0;
}
static int
bna_rxf_cfg_apply(struct bna_rxf *rxf)
{
if (bna_rxf_ucast_cfg_apply(rxf))
return 1;
if (bna_rxf_mcast_cfg_apply(rxf))
return 1;
if (bna_rxf_promisc_cfg_apply(rxf))
return 1;
if (bna_rxf_allmulti_cfg_apply(rxf))
return 1;
if (bna_rxf_vlan_cfg_apply(rxf))
return 1;
if (bna_rxf_vlan_strip_cfg_apply(rxf))
return 1;
if (bna_rxf_rss_cfg_apply(rxf))
return 1;
return 0;
}
/* Only software reset */
static int
bna_rxf_fltr_clear(struct bna_rxf *rxf)
{
if (bna_rxf_ucast_cfg_reset(rxf, BNA_HARD_CLEANUP))
return 1;
if (bna_rxf_mcast_cfg_reset(rxf, BNA_HARD_CLEANUP))
return 1;
if (bna_rxf_promisc_cfg_reset(rxf, BNA_HARD_CLEANUP))
return 1;
if (bna_rxf_allmulti_cfg_reset(rxf, BNA_HARD_CLEANUP))
return 1;
return 0;
}
static void
bna_rxf_cfg_reset(struct bna_rxf *rxf)
{
bna_rxf_ucast_cfg_reset(rxf, BNA_SOFT_CLEANUP);
bna_rxf_mcast_cfg_reset(rxf, BNA_SOFT_CLEANUP);
bna_rxf_promisc_cfg_reset(rxf, BNA_SOFT_CLEANUP);
bna_rxf_allmulti_cfg_reset(rxf, BNA_SOFT_CLEANUP);
bna_rxf_vlan_cfg_soft_reset(rxf);
bna_rxf_rss_cfg_soft_reset(rxf);
}
static void
bna_rit_init(struct bna_rxf *rxf, int rit_size)
{
struct bna_rx *rx = rxf->rx;
struct bna_rxp *rxp;
struct list_head *qe;
int offset = 0;
rxf->rit_size = rit_size;
list_for_each(qe, &rx->rxp_q) {
rxp = (struct bna_rxp *)qe;
rxf->rit[offset] = rxp->cq.ccb->id;
offset++;
}
}
void
bna_bfi_rxf_cfg_rsp(struct bna_rxf *rxf, struct bfi_msgq_mhdr *msghdr)
{
bfa_fsm_send_event(rxf, RXF_E_FW_RESP);
}
void
bna_bfi_rxf_ucast_set_rsp(struct bna_rxf *rxf,
struct bfi_msgq_mhdr *msghdr)
{
struct bfi_enet_rsp *rsp =
(struct bfi_enet_rsp *)msghdr;
if (rsp->error) {
/* Clear ucast from cache */
rxf->ucast_active_set = 0;
}
bfa_fsm_send_event(rxf, RXF_E_FW_RESP);
}
void
bna_bfi_rxf_mcast_add_rsp(struct bna_rxf *rxf,
struct bfi_msgq_mhdr *msghdr)
{
struct bfi_enet_mcast_add_req *req =
&rxf->bfi_enet_cmd.mcast_add_req;
struct bfi_enet_mcast_add_rsp *rsp =
(struct bfi_enet_mcast_add_rsp *)msghdr;
bna_rxf_mchandle_attach(rxf, (u8 *)&req->mac_addr,
ntohs(rsp->handle));
bfa_fsm_send_event(rxf, RXF_E_FW_RESP);
}
static void
bna_rxf_init(struct bna_rxf *rxf,
struct bna_rx *rx,
struct bna_rx_config *q_config,
struct bna_res_info *res_info)
{
rxf->rx = rx;
INIT_LIST_HEAD(&rxf->ucast_pending_add_q);
INIT_LIST_HEAD(&rxf->ucast_pending_del_q);
rxf->ucast_pending_set = 0;
rxf->ucast_active_set = 0;
INIT_LIST_HEAD(&rxf->ucast_active_q);
rxf->ucast_pending_mac = NULL;
INIT_LIST_HEAD(&rxf->mcast_pending_add_q);
INIT_LIST_HEAD(&rxf->mcast_pending_del_q);
INIT_LIST_HEAD(&rxf->mcast_active_q);
INIT_LIST_HEAD(&rxf->mcast_handle_q);
if (q_config->paused)
rxf->flags |= BNA_RXF_F_PAUSED;
rxf->rit = (u8 *)
res_info[BNA_RX_RES_MEM_T_RIT].res_u.mem_info.mdl[0].kva;
bna_rit_init(rxf, q_config->num_paths);
rxf->rss_status = q_config->rss_status;
if (rxf->rss_status == BNA_STATUS_T_ENABLED) {
rxf->rss_cfg = q_config->rss_config;
rxf->rss_pending |= BNA_RSS_F_CFG_PENDING;
rxf->rss_pending |= BNA_RSS_F_RIT_PENDING;
rxf->rss_pending |= BNA_RSS_F_STATUS_PENDING;
}
rxf->vlan_filter_status = BNA_STATUS_T_DISABLED;
memset(rxf->vlan_filter_table, 0,
(sizeof(u32) * (BFI_ENET_VLAN_ID_MAX / 32)));
rxf->vlan_filter_table[0] |= 1; /* for pure priority tagged frames */
rxf->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL;
rxf->vlan_strip_status = q_config->vlan_strip_status;
bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
}
static void
bna_rxf_uninit(struct bna_rxf *rxf)
{
struct bna_mac *mac;
rxf->ucast_pending_set = 0;
rxf->ucast_active_set = 0;
while (!list_empty(&rxf->ucast_pending_add_q)) {
bfa_q_deq(&rxf->ucast_pending_add_q, &mac);
bfa_q_qe_init(&mac->qe);
bna_cam_mod_mac_put(bna_ucam_mod_free_q(rxf->rx->bna), mac);
}
if (rxf->ucast_pending_mac) {
bfa_q_qe_init(&rxf->ucast_pending_mac->qe);
bna_cam_mod_mac_put(bna_ucam_mod_free_q(rxf->rx->bna),
rxf->ucast_pending_mac);
rxf->ucast_pending_mac = NULL;
}
while (!list_empty(&rxf->mcast_pending_add_q)) {
bfa_q_deq(&rxf->mcast_pending_add_q, &mac);
bfa_q_qe_init(&mac->qe);
bna_cam_mod_mac_put(bna_mcam_mod_free_q(rxf->rx->bna), mac);
}
rxf->rxmode_pending = 0;
rxf->rxmode_pending_bitmask = 0;
if (rxf->rx->bna->promisc_rid == rxf->rx->rid)
rxf->rx->bna->promisc_rid = BFI_INVALID_RID;
if (rxf->rx->bna->default_mode_rid == rxf->rx->rid)
rxf->rx->bna->default_mode_rid = BFI_INVALID_RID;
rxf->rss_pending = 0;
rxf->vlan_strip_pending = false;
rxf->flags = 0;
rxf->rx = NULL;
}
static void
bna_rx_cb_rxf_started(struct bna_rx *rx)
{
bfa_fsm_send_event(rx, RX_E_RXF_STARTED);
}
static void
bna_rxf_start(struct bna_rxf *rxf)
{
rxf->start_cbfn = bna_rx_cb_rxf_started;
rxf->start_cbarg = rxf->rx;
bfa_fsm_send_event(rxf, RXF_E_START);
}
static void
bna_rx_cb_rxf_stopped(struct bna_rx *rx)
{
bfa_fsm_send_event(rx, RX_E_RXF_STOPPED);
}
static void
bna_rxf_stop(struct bna_rxf *rxf)
{
rxf->stop_cbfn = bna_rx_cb_rxf_stopped;
rxf->stop_cbarg = rxf->rx;
bfa_fsm_send_event(rxf, RXF_E_STOP);
}
static void
bna_rxf_fail(struct bna_rxf *rxf)
{
bfa_fsm_send_event(rxf, RXF_E_FAIL);
}
enum bna_cb_status
bna_rx_ucast_set(struct bna_rx *rx, u8 *ucmac,
void (*cbfn)(struct bnad *, struct bna_rx *))
{
struct bna_rxf *rxf = &rx->rxf;
if (rxf->ucast_pending_mac == NULL) {
rxf->ucast_pending_mac =
bna_cam_mod_mac_get(bna_ucam_mod_free_q(rxf->rx->bna));
if (rxf->ucast_pending_mac == NULL)
return BNA_CB_UCAST_CAM_FULL;
bfa_q_qe_init(&rxf->ucast_pending_mac->qe);
}
memcpy(rxf->ucast_pending_mac->addr, ucmac, ETH_ALEN);
rxf->ucast_pending_set = 1;
rxf->cam_fltr_cbfn = cbfn;
rxf->cam_fltr_cbarg = rx->bna->bnad;
bfa_fsm_send_event(rxf, RXF_E_CONFIG);
return BNA_CB_SUCCESS;
}
enum bna_cb_status
bna_rx_mcast_add(struct bna_rx *rx, u8 *addr,
void (*cbfn)(struct bnad *, struct bna_rx *))
{
struct bna_rxf *rxf = &rx->rxf;
struct bna_mac *mac;
/* Check if already added or pending addition */
if (bna_mac_find(&rxf->mcast_active_q, addr) ||
bna_mac_find(&rxf->mcast_pending_add_q, addr)) {
if (cbfn)
cbfn(rx->bna->bnad, rx);
return BNA_CB_SUCCESS;
}
mac = bna_cam_mod_mac_get(bna_mcam_mod_free_q(rxf->rx->bna));
if (mac == NULL)
return BNA_CB_MCAST_LIST_FULL;
bfa_q_qe_init(&mac->qe);
memcpy(mac->addr, addr, ETH_ALEN);
list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
rxf->cam_fltr_cbfn = cbfn;
rxf->cam_fltr_cbarg = rx->bna->bnad;
bfa_fsm_send_event(rxf, RXF_E_CONFIG);
return BNA_CB_SUCCESS;
}
enum bna_cb_status
bna_rx_ucast_listset(struct bna_rx *rx, int count, u8 *uclist,
void (*cbfn)(struct bnad *, struct bna_rx *))
{
struct bna_ucam_mod *ucam_mod = &rx->bna->ucam_mod;
struct bna_rxf *rxf = &rx->rxf;
struct list_head list_head;
struct list_head *qe;
u8 *mcaddr;
struct bna_mac *mac, *del_mac;
int i;
/* Purge the pending_add_q */
while (!list_empty(&rxf->ucast_pending_add_q)) {
bfa_q_deq(&rxf->ucast_pending_add_q, &qe);
bfa_q_qe_init(qe);
mac = (struct bna_mac *)qe;
bna_cam_mod_mac_put(&ucam_mod->free_q, mac);
}
/* Schedule active_q entries for deletion */
while (!list_empty(&rxf->ucast_active_q)) {
bfa_q_deq(&rxf->ucast_active_q, &qe);
mac = (struct bna_mac *)qe;
bfa_q_qe_init(&mac->qe);
del_mac = bna_cam_mod_mac_get(&ucam_mod->del_q);
memcpy(del_mac, mac, sizeof(*del_mac));
list_add_tail(&del_mac->qe, &rxf->ucast_pending_del_q);
bna_cam_mod_mac_put(&ucam_mod->free_q, mac);
}
/* Allocate nodes */
INIT_LIST_HEAD(&list_head);
for (i = 0, mcaddr = uclist; i < count; i++) {
mac = bna_cam_mod_mac_get(&ucam_mod->free_q);
if (mac == NULL)
goto err_return;
bfa_q_qe_init(&mac->qe);
memcpy(mac->addr, mcaddr, ETH_ALEN);
list_add_tail(&mac->qe, &list_head);
mcaddr += ETH_ALEN;
}
/* Add the new entries */
while (!list_empty(&list_head)) {
bfa_q_deq(&list_head, &qe);
mac = (struct bna_mac *)qe;
bfa_q_qe_init(&mac->qe);
list_add_tail(&mac->qe, &rxf->ucast_pending_add_q);
}
rxf->cam_fltr_cbfn = cbfn;
rxf->cam_fltr_cbarg = rx->bna->bnad;
bfa_fsm_send_event(rxf, RXF_E_CONFIG);
return BNA_CB_SUCCESS;
err_return:
while (!list_empty(&list_head)) {
bfa_q_deq(&list_head, &qe);
mac = (struct bna_mac *)qe;
bfa_q_qe_init(&mac->qe);
bna_cam_mod_mac_put(&ucam_mod->free_q, mac);
}
return BNA_CB_UCAST_CAM_FULL;
}
enum bna_cb_status
bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mclist,
void (*cbfn)(struct bnad *, struct bna_rx *))
{
struct bna_mcam_mod *mcam_mod = &rx->bna->mcam_mod;
struct bna_rxf *rxf = &rx->rxf;
struct list_head list_head;
struct list_head *qe;
u8 *mcaddr;
struct bna_mac *mac, *del_mac;
int i;
/* Purge the pending_add_q */
while (!list_empty(&rxf->mcast_pending_add_q)) {
bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
bfa_q_qe_init(qe);
mac = (struct bna_mac *)qe;
bna_cam_mod_mac_put(&mcam_mod->free_q, mac);
}
/* Schedule active_q entries for deletion */
while (!list_empty(&rxf->mcast_active_q)) {
bfa_q_deq(&rxf->mcast_active_q, &qe);
mac = (struct bna_mac *)qe;
bfa_q_qe_init(&mac->qe);
del_mac = bna_cam_mod_mac_get(&mcam_mod->del_q);
memcpy(del_mac, mac, sizeof(*del_mac));
list_add_tail(&del_mac->qe, &rxf->mcast_pending_del_q);
mac->handle = NULL;
bna_cam_mod_mac_put(&mcam_mod->free_q, mac);
}
/* Allocate nodes */
INIT_LIST_HEAD(&list_head);
for (i = 0, mcaddr = mclist; i < count; i++) {
mac = bna_cam_mod_mac_get(&mcam_mod->free_q);
if (mac == NULL)
goto err_return;
bfa_q_qe_init(&mac->qe);
memcpy(mac->addr, mcaddr, ETH_ALEN);
list_add_tail(&mac->qe, &list_head);
mcaddr += ETH_ALEN;
}
/* Add the new entries */
while (!list_empty(&list_head)) {
bfa_q_deq(&list_head, &qe);
mac = (struct bna_mac *)qe;
bfa_q_qe_init(&mac->qe);
list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
}
rxf->cam_fltr_cbfn = cbfn;
rxf->cam_fltr_cbarg = rx->bna->bnad;
bfa_fsm_send_event(rxf, RXF_E_CONFIG);
return BNA_CB_SUCCESS;
err_return:
while (!list_empty(&list_head)) {
bfa_q_deq(&list_head, &qe);
mac = (struct bna_mac *)qe;
bfa_q_qe_init(&mac->qe);
bna_cam_mod_mac_put(&mcam_mod->free_q, mac);
}
return BNA_CB_MCAST_LIST_FULL;
}
void
bna_rx_mcast_delall(struct bna_rx *rx,
void (*cbfn)(struct bnad *, struct bna_rx *))
{
struct bna_rxf *rxf = &rx->rxf;
struct list_head *qe;
struct bna_mac *mac, *del_mac;
int need_hw_config = 0;
/* Purge all entries from pending_add_q */
while (!list_empty(&rxf->mcast_pending_add_q)) {
bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
mac = (struct bna_mac *)qe;
bfa_q_qe_init(&mac->qe);
bna_cam_mod_mac_put(bna_mcam_mod_free_q(rxf->rx->bna), mac);
}
/* Schedule all entries in active_q for deletion */
while (!list_empty(&rxf->mcast_active_q)) {
bfa_q_deq(&rxf->mcast_active_q, &qe);
mac = (struct bna_mac *)qe;
bfa_q_qe_init(&mac->qe);
del_mac = bna_cam_mod_mac_get(bna_mcam_mod_del_q(rxf->rx->bna));
memcpy(del_mac, mac, sizeof(*del_mac));
list_add_tail(&del_mac->qe, &rxf->mcast_pending_del_q);
mac->handle = NULL;
bna_cam_mod_mac_put(bna_mcam_mod_free_q(rxf->rx->bna), mac);
need_hw_config = 1;
}
if (need_hw_config) {
rxf->cam_fltr_cbfn = cbfn;
rxf->cam_fltr_cbarg = rx->bna->bnad;
bfa_fsm_send_event(rxf, RXF_E_CONFIG);
return;
}
if (cbfn)
(*cbfn)(rx->bna->bnad, rx);
}
void
bna_rx_vlan_add(struct bna_rx *rx, int vlan_id)
{
struct bna_rxf *rxf = &rx->rxf;
int index = (vlan_id >> BFI_VLAN_WORD_SHIFT);
int bit = (1 << (vlan_id & BFI_VLAN_WORD_MASK));
int group_id = (vlan_id >> BFI_VLAN_BLOCK_SHIFT);
rxf->vlan_filter_table[index] |= bit;
if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) {
rxf->vlan_pending_bitmask |= (1 << group_id);
bfa_fsm_send_event(rxf, RXF_E_CONFIG);
}
}
void
bna_rx_vlan_del(struct bna_rx *rx, int vlan_id)
{
struct bna_rxf *rxf = &rx->rxf;
int index = (vlan_id >> BFI_VLAN_WORD_SHIFT);
int bit = (1 << (vlan_id & BFI_VLAN_WORD_MASK));
int group_id = (vlan_id >> BFI_VLAN_BLOCK_SHIFT);
rxf->vlan_filter_table[index] &= ~bit;
if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) {
rxf->vlan_pending_bitmask |= (1 << group_id);
bfa_fsm_send_event(rxf, RXF_E_CONFIG);
}
}
static int
bna_rxf_ucast_cfg_apply(struct bna_rxf *rxf)
{
struct bna_mac *mac = NULL;
struct list_head *qe;
/* Delete MAC addresses previousely added */
if (!list_empty(&rxf->ucast_pending_del_q)) {
bfa_q_deq(&rxf->ucast_pending_del_q, &qe);
bfa_q_qe_init(qe);
mac = (struct bna_mac *)qe;
bna_bfi_ucast_req(rxf, mac, BFI_ENET_H2I_MAC_UCAST_DEL_REQ);
bna_cam_mod_mac_put(bna_ucam_mod_del_q(rxf->rx->bna), mac);
return 1;
}
/* Set default unicast MAC */
if (rxf->ucast_pending_set) {
rxf->ucast_pending_set = 0;
memcpy(rxf->ucast_active_mac.addr,
rxf->ucast_pending_mac->addr, ETH_ALEN);
rxf->ucast_active_set = 1;
bna_bfi_ucast_req(rxf, &rxf->ucast_active_mac,
BFI_ENET_H2I_MAC_UCAST_SET_REQ);
return 1;
}
/* Add additional MAC entries */
if (!list_empty(&rxf->ucast_pending_add_q)) {
bfa_q_deq(&rxf->ucast_pending_add_q, &qe);
bfa_q_qe_init(qe);
mac = (struct bna_mac *)qe;
list_add_tail(&mac->qe, &rxf->ucast_active_q);
bna_bfi_ucast_req(rxf, mac, BFI_ENET_H2I_MAC_UCAST_ADD_REQ);
return 1;
}
return 0;
}
static int
bna_rxf_ucast_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
{
struct list_head *qe;
struct bna_mac *mac;
/* Throw away delete pending ucast entries */
while (!list_empty(&rxf->ucast_pending_del_q)) {
bfa_q_deq(&rxf->ucast_pending_del_q, &qe);
bfa_q_qe_init(qe);
mac = (struct bna_mac *)qe;
if (cleanup == BNA_SOFT_CLEANUP)
bna_cam_mod_mac_put(bna_ucam_mod_del_q(rxf->rx->bna),
mac);
else {
bna_bfi_ucast_req(rxf, mac,
BFI_ENET_H2I_MAC_UCAST_DEL_REQ);
bna_cam_mod_mac_put(bna_ucam_mod_del_q(rxf->rx->bna),
mac);
return 1;
}
}
/* Move active ucast entries to pending_add_q */
while (!list_empty(&rxf->ucast_active_q)) {
bfa_q_deq(&rxf->ucast_active_q, &qe);
bfa_q_qe_init(qe);
list_add_tail(qe, &rxf->ucast_pending_add_q);
if (cleanup == BNA_HARD_CLEANUP) {
mac = (struct bna_mac *)qe;
bna_bfi_ucast_req(rxf, mac,
BFI_ENET_H2I_MAC_UCAST_DEL_REQ);
return 1;
}
}
if (rxf->ucast_active_set) {
rxf->ucast_pending_set = 1;
rxf->ucast_active_set = 0;
if (cleanup == BNA_HARD_CLEANUP) {
bna_bfi_ucast_req(rxf, &rxf->ucast_active_mac,
BFI_ENET_H2I_MAC_UCAST_CLR_REQ);
return 1;
}
}
return 0;
}
static int
bna_rxf_promisc_cfg_apply(struct bna_rxf *rxf)
{
struct bna *bna = rxf->rx->bna;
/* Enable/disable promiscuous mode */
if (is_promisc_enable(rxf->rxmode_pending,
rxf->rxmode_pending_bitmask)) {
/* move promisc configuration from pending -> active */
promisc_inactive(rxf->rxmode_pending,
rxf->rxmode_pending_bitmask);
rxf->rxmode_active |= BNA_RXMODE_PROMISC;
bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_ENABLED);
return 1;
} else if (is_promisc_disable(rxf->rxmode_pending,
rxf->rxmode_pending_bitmask)) {
/* move promisc configuration from pending -> active */
promisc_inactive(rxf->rxmode_pending,
rxf->rxmode_pending_bitmask);
rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
bna->promisc_rid = BFI_INVALID_RID;
bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_DISABLED);
return 1;
}
return 0;
}
static int
bna_rxf_promisc_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
{
struct bna *bna = rxf->rx->bna;
/* Clear pending promisc mode disable */
if (is_promisc_disable(rxf->rxmode_pending,
rxf->rxmode_pending_bitmask)) {
promisc_inactive(rxf->rxmode_pending,
rxf->rxmode_pending_bitmask);
rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
bna->promisc_rid = BFI_INVALID_RID;
if (cleanup == BNA_HARD_CLEANUP) {
bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_DISABLED);
return 1;
}
}
/* Move promisc mode config from active -> pending */
if (rxf->rxmode_active & BNA_RXMODE_PROMISC) {
promisc_enable(rxf->rxmode_pending,
rxf->rxmode_pending_bitmask);
rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
if (cleanup == BNA_HARD_CLEANUP) {
bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_DISABLED);
return 1;
}
}
return 0;
}
static int
bna_rxf_allmulti_cfg_apply(struct bna_rxf *rxf)
{
/* Enable/disable allmulti mode */
if (is_allmulti_enable(rxf->rxmode_pending,
rxf->rxmode_pending_bitmask)) {
/* move allmulti configuration from pending -> active */
allmulti_inactive(rxf->rxmode_pending,
rxf->rxmode_pending_bitmask);
rxf->rxmode_active |= BNA_RXMODE_ALLMULTI;
bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_DISABLED);
return 1;
} else if (is_allmulti_disable(rxf->rxmode_pending,
rxf->rxmode_pending_bitmask)) {
/* move allmulti configuration from pending -> active */
allmulti_inactive(rxf->rxmode_pending,
rxf->rxmode_pending_bitmask);
rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_ENABLED);
return 1;
}
return 0;
}
static int
bna_rxf_allmulti_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
{
/* Clear pending allmulti mode disable */
if (is_allmulti_disable(rxf->rxmode_pending,
rxf->rxmode_pending_bitmask)) {
allmulti_inactive(rxf->rxmode_pending,
rxf->rxmode_pending_bitmask);
rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
if (cleanup == BNA_HARD_CLEANUP) {
bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_ENABLED);
return 1;
}
}
/* Move allmulti mode config from active -> pending */
if (rxf->rxmode_active & BNA_RXMODE_ALLMULTI) {
allmulti_enable(rxf->rxmode_pending,
rxf->rxmode_pending_bitmask);
rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
if (cleanup == BNA_HARD_CLEANUP) {
bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_ENABLED);
return 1;
}
}
return 0;
}
static int
bna_rxf_promisc_enable(struct bna_rxf *rxf)
{
struct bna *bna = rxf->rx->bna;
int ret = 0;
if (is_promisc_enable(rxf->rxmode_pending,
rxf->rxmode_pending_bitmask) ||
(rxf->rxmode_active & BNA_RXMODE_PROMISC)) {
/* Do nothing if pending enable or already enabled */
} else if (is_promisc_disable(rxf->rxmode_pending,
rxf->rxmode_pending_bitmask)) {
/* Turn off pending disable command */
promisc_inactive(rxf->rxmode_pending,
rxf->rxmode_pending_bitmask);
} else {
/* Schedule enable */
promisc_enable(rxf->rxmode_pending,
rxf->rxmode_pending_bitmask);
bna->promisc_rid = rxf->rx->rid;
ret = 1;
}
return ret;
}
static int
bna_rxf_promisc_disable(struct bna_rxf *rxf)
{
struct bna *bna = rxf->rx->bna;
int ret = 0;
if (is_promisc_disable(rxf->rxmode_pending,
rxf->rxmode_pending_bitmask) ||
(!(rxf->rxmode_active & BNA_RXMODE_PROMISC))) {
/* Do nothing if pending disable or already disabled */
} else if (is_promisc_enable(rxf->rxmode_pending,
rxf->rxmode_pending_bitmask)) {
/* Turn off pending enable command */
promisc_inactive(rxf->rxmode_pending,
rxf->rxmode_pending_bitmask);
bna->promisc_rid = BFI_INVALID_RID;
} else if (rxf->rxmode_active & BNA_RXMODE_PROMISC) {
/* Schedule disable */
promisc_disable(rxf->rxmode_pending,
rxf->rxmode_pending_bitmask);
ret = 1;
}
return ret;
}
static int
bna_rxf_allmulti_enable(struct bna_rxf *rxf)
{
int ret = 0;
if (is_allmulti_enable(rxf->rxmode_pending,
rxf->rxmode_pending_bitmask) ||
(rxf->rxmode_active & BNA_RXMODE_ALLMULTI)) {
/* Do nothing if pending enable or already enabled */
} else if (is_allmulti_disable(rxf->rxmode_pending,
rxf->rxmode_pending_bitmask)) {
/* Turn off pending disable command */
allmulti_inactive(rxf->rxmode_pending,
rxf->rxmode_pending_bitmask);
} else {
/* Schedule enable */
allmulti_enable(rxf->rxmode_pending,
rxf->rxmode_pending_bitmask);
ret = 1;
}
return ret;
}
static int
bna_rxf_allmulti_disable(struct bna_rxf *rxf)
{
int ret = 0;
if (is_allmulti_disable(rxf->rxmode_pending,
rxf->rxmode_pending_bitmask) ||
(!(rxf->rxmode_active & BNA_RXMODE_ALLMULTI))) {
/* Do nothing if pending disable or already disabled */
} else if (is_allmulti_enable(rxf->rxmode_pending,
rxf->rxmode_pending_bitmask)) {
/* Turn off pending enable command */
allmulti_inactive(rxf->rxmode_pending,
rxf->rxmode_pending_bitmask);
} else if (rxf->rxmode_active & BNA_RXMODE_ALLMULTI) {
/* Schedule disable */
allmulti_disable(rxf->rxmode_pending,
rxf->rxmode_pending_bitmask);
ret = 1;
}
return ret;
}
static int
bna_rxf_vlan_strip_cfg_apply(struct bna_rxf *rxf)
{
if (rxf->vlan_strip_pending) {
rxf->vlan_strip_pending = false;
bna_bfi_vlan_strip_enable(rxf);
return 1;
}
return 0;
}
/* RX */
#define BNA_GET_RXQS(qcfg) (((qcfg)->rxp_type == BNA_RXP_SINGLE) ? \
(qcfg)->num_paths : ((qcfg)->num_paths * 2))
#define SIZE_TO_PAGES(size) (((size) >> PAGE_SHIFT) + ((((size) &\
(PAGE_SIZE - 1)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT))
#define call_rx_stop_cbfn(rx) \
do { \
if ((rx)->stop_cbfn) { \
void (*cbfn)(void *, struct bna_rx *); \
void *cbarg; \
cbfn = (rx)->stop_cbfn; \
cbarg = (rx)->stop_cbarg; \
(rx)->stop_cbfn = NULL; \
(rx)->stop_cbarg = NULL; \
cbfn(cbarg, rx); \
} \
} while (0)
#define call_rx_stall_cbfn(rx) \
do { \
if ((rx)->rx_stall_cbfn) \
(rx)->rx_stall_cbfn((rx)->bna->bnad, (rx)); \
} while (0)
#define bfi_enet_datapath_q_init(bfi_q, bna_qpt) \
do { \
struct bna_dma_addr cur_q_addr = \
*((struct bna_dma_addr *)((bna_qpt)->kv_qpt_ptr)); \
(bfi_q)->pg_tbl.a32.addr_lo = (bna_qpt)->hw_qpt_ptr.lsb; \
(bfi_q)->pg_tbl.a32.addr_hi = (bna_qpt)->hw_qpt_ptr.msb; \
(bfi_q)->first_entry.a32.addr_lo = cur_q_addr.lsb; \
(bfi_q)->first_entry.a32.addr_hi = cur_q_addr.msb; \
(bfi_q)->pages = htons((u16)(bna_qpt)->page_count); \
(bfi_q)->page_sz = htons((u16)(bna_qpt)->page_size);\
} while (0)
static void bna_bfi_rx_enet_start(struct bna_rx *rx);
static void bna_rx_enet_stop(struct bna_rx *rx);
static void bna_rx_mod_cb_rx_stopped(void *arg, struct bna_rx *rx);
bfa_fsm_state_decl(bna_rx, stopped,
struct bna_rx, enum bna_rx_event);
bfa_fsm_state_decl(bna_rx, start_wait,
struct bna_rx, enum bna_rx_event);
bfa_fsm_state_decl(bna_rx, start_stop_wait,
struct bna_rx, enum bna_rx_event);
bfa_fsm_state_decl(bna_rx, rxf_start_wait,
struct bna_rx, enum bna_rx_event);
bfa_fsm_state_decl(bna_rx, started,
struct bna_rx, enum bna_rx_event);
bfa_fsm_state_decl(bna_rx, rxf_stop_wait,
struct bna_rx, enum bna_rx_event);
bfa_fsm_state_decl(bna_rx, stop_wait,
struct bna_rx, enum bna_rx_event);
bfa_fsm_state_decl(bna_rx, cleanup_wait,
struct bna_rx, enum bna_rx_event);
bfa_fsm_state_decl(bna_rx, failed,
struct bna_rx, enum bna_rx_event);
bfa_fsm_state_decl(bna_rx, quiesce_wait,
struct bna_rx, enum bna_rx_event);
static void bna_rx_sm_stopped_entry(struct bna_rx *rx)
{
call_rx_stop_cbfn(rx);
}
static void bna_rx_sm_stopped(struct bna_rx *rx,
enum bna_rx_event event)
{
switch (event) {
case RX_E_START:
bfa_fsm_set_state(rx, bna_rx_sm_start_wait);
break;
case RX_E_STOP:
call_rx_stop_cbfn(rx);
break;
case RX_E_FAIL:
/* no-op */
break;
default:
bfa_sm_fault(event);
break;
}
}
static void bna_rx_sm_start_wait_entry(struct bna_rx *rx)
{
bna_bfi_rx_enet_start(rx);
}
static void
bna_rx_sm_stop_wait_entry(struct bna_rx *rx)
{
}
static void
bna_rx_sm_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
{
switch (event) {
case RX_E_FAIL:
case RX_E_STOPPED:
bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait);
rx->rx_cleanup_cbfn(rx->bna->bnad, rx);
break;
case RX_E_STARTED:
bna_rx_enet_stop(rx);
break;
default:
bfa_sm_fault(event);
break;
}
}
static void bna_rx_sm_start_wait(struct bna_rx *rx,
enum bna_rx_event event)
{
switch (event) {
case RX_E_STOP:
bfa_fsm_set_state(rx, bna_rx_sm_start_stop_wait);
break;
case RX_E_FAIL:
bfa_fsm_set_state(rx, bna_rx_sm_stopped);
break;
case RX_E_STARTED:
bfa_fsm_set_state(rx, bna_rx_sm_rxf_start_wait);
break;
default:
bfa_sm_fault(event);
break;
}
}
static void bna_rx_sm_rxf_start_wait_entry(struct bna_rx *rx)
{
rx->rx_post_cbfn(rx->bna->bnad, rx);
bna_rxf_start(&rx->rxf);
}
static void
bna_rx_sm_rxf_stop_wait_entry(struct bna_rx *rx)
{
}
static void
bna_rx_sm_rxf_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
{
switch (event) {
case RX_E_FAIL:
bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait);
bna_rxf_fail(&rx->rxf);
call_rx_stall_cbfn(rx);
rx->rx_cleanup_cbfn(rx->bna->bnad, rx);
break;
case RX_E_RXF_STARTED:
bna_rxf_stop(&rx->rxf);
break;
case RX_E_RXF_STOPPED:
bfa_fsm_set_state(rx, bna_rx_sm_stop_wait);
call_rx_stall_cbfn(rx);
bna_rx_enet_stop(rx);
break;
default:
bfa_sm_fault(event);
break;
}
}
static void
bna_rx_sm_start_stop_wait_entry(struct bna_rx *rx)
{
}
static void
bna_rx_sm_start_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
{
switch (event) {
case RX_E_FAIL:
case RX_E_STOPPED:
bfa_fsm_set_state(rx, bna_rx_sm_stopped);
break;
case RX_E_STARTED:
bna_rx_enet_stop(rx);
break;
default:
bfa_sm_fault(event);
}
}
static void
bna_rx_sm_started_entry(struct bna_rx *rx)
{
struct bna_rxp *rxp;
struct list_head *qe_rxp;
int is_regular = (rx->type == BNA_RX_T_REGULAR);
/* Start IB */
list_for_each(qe_rxp, &rx->rxp_q) {
rxp = (struct bna_rxp *)qe_rxp;
bna_ib_start(rx->bna, &rxp->cq.ib, is_regular);
}
bna_ethport_cb_rx_started(&rx->bna->ethport);
}
static void
bna_rx_sm_started(struct bna_rx *rx, enum bna_rx_event event)
{
switch (event) {
case RX_E_STOP:
bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait);
bna_ethport_cb_rx_stopped(&rx->bna->ethport);
bna_rxf_stop(&rx->rxf);
break;
case RX_E_FAIL:
bfa_fsm_set_state(rx, bna_rx_sm_failed);
bna_ethport_cb_rx_stopped(&rx->bna->ethport);
bna_rxf_fail(&rx->rxf);
call_rx_stall_cbfn(rx);
rx->rx_cleanup_cbfn(rx->bna->bnad, rx);
break;
default:
bfa_sm_fault(event);
break;
}
}
static void bna_rx_sm_rxf_start_wait(struct bna_rx *rx,
enum bna_rx_event event)
{
switch (event) {
case RX_E_STOP:
bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait);
break;
case RX_E_FAIL:
bfa_fsm_set_state(rx, bna_rx_sm_failed);
bna_rxf_fail(&rx->rxf);
call_rx_stall_cbfn(rx);
rx->rx_cleanup_cbfn(rx->bna->bnad, rx);
break;
case RX_E_RXF_STARTED:
bfa_fsm_set_state(rx, bna_rx_sm_started);
break;
default:
bfa_sm_fault(event);
break;
}
}
static void
bna_rx_sm_cleanup_wait_entry(struct bna_rx *rx)
{
}
static void
bna_rx_sm_cleanup_wait(struct bna_rx *rx, enum bna_rx_event event)
{
switch (event) {
case RX_E_FAIL:
case RX_E_RXF_STOPPED:
/* No-op */
break;
case RX_E_CLEANUP_DONE:
bfa_fsm_set_state(rx, bna_rx_sm_stopped);
break;
default:
bfa_sm_fault(event);
break;
}
}
static void
bna_rx_sm_failed_entry(struct bna_rx *rx)
{
}
static void
bna_rx_sm_failed(struct bna_rx *rx, enum bna_rx_event event)
{
switch (event) {
case RX_E_START:
bfa_fsm_set_state(rx, bna_rx_sm_quiesce_wait);
break;
case RX_E_STOP:
bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait);
break;
case RX_E_FAIL:
case RX_E_RXF_STARTED:
case RX_E_RXF_STOPPED:
/* No-op */
break;
case RX_E_CLEANUP_DONE:
bfa_fsm_set_state(rx, bna_rx_sm_stopped);
break;
default:
bfa_sm_fault(event);
break;
} }
static void
bna_rx_sm_quiesce_wait_entry(struct bna_rx *rx)
{
}
static void
bna_rx_sm_quiesce_wait(struct bna_rx *rx, enum bna_rx_event event)
{
switch (event) {
case RX_E_STOP:
bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait);
break;
case RX_E_FAIL:
bfa_fsm_set_state(rx, bna_rx_sm_failed);
break;
case RX_E_CLEANUP_DONE:
bfa_fsm_set_state(rx, bna_rx_sm_start_wait);
break;
default:
bfa_sm_fault(event);
break;
}
}
static void
bna_bfi_rx_enet_start(struct bna_rx *rx)
{
struct bfi_enet_rx_cfg_req *cfg_req = &rx->bfi_enet_cmd.cfg_req;
struct bna_rxp *rxp = NULL;
struct bna_rxq *q0 = NULL, *q1 = NULL;
struct list_head *rxp_qe;
int i;
bfi_msgq_mhdr_set(cfg_req->mh, BFI_MC_ENET,
BFI_ENET_H2I_RX_CFG_SET_REQ, 0, rx->rid);
cfg_req->mh.num_entries = htons(
bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rx_cfg_req)));
cfg_req->rx_cfg.frame_size = bna_enet_mtu_get(&rx->bna->enet);
cfg_req->num_queue_sets = rx->num_paths;
for (i = 0, rxp_qe = bfa_q_first(&rx->rxp_q);
i < rx->num_paths;
i++, rxp_qe = bfa_q_next(rxp_qe)) {
rxp = (struct bna_rxp *)rxp_qe;
GET_RXQS(rxp, q0, q1);
switch (rxp->type) {
case BNA_RXP_SLR:
case BNA_RXP_HDS:
/* Small RxQ */
bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].qs.q,
&q1->qpt);
cfg_req->q_cfg[i].qs.rx_buffer_size =
htons((u16)q1->buffer_size);
/* Fall through */
case BNA_RXP_SINGLE:
/* Large/Single RxQ */
bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].ql.q,
&q0->qpt);
if (q0->multi_buffer)
/* multi-buffer is enabled by allocating
* a new rx with new set of resources.
* q0->buffer_size should be initialized to
* fragment size.
*/
cfg_req->rx_cfg.multi_buffer =
BNA_STATUS_T_ENABLED;
else
q0->buffer_size =
bna_enet_mtu_get(&rx->bna->enet);
cfg_req->q_cfg[i].ql.rx_buffer_size =
htons((u16)q0->buffer_size);
break;
default:
BUG_ON(1);
}
bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].cq.q,
&rxp->cq.qpt);
cfg_req->q_cfg[i].ib.index_addr.a32.addr_lo =
rxp->cq.ib.ib_seg_host_addr.lsb;
cfg_req->q_cfg[i].ib.index_addr.a32.addr_hi =
rxp->cq.ib.ib_seg_host_addr.msb;
cfg_req->q_cfg[i].ib.intr.msix_index =
htons((u16)rxp->cq.ib.intr_vector);
}
cfg_req->ib_cfg.int_pkt_dma = BNA_STATUS_T_DISABLED;
cfg_req->ib_cfg.int_enabled = BNA_STATUS_T_ENABLED;
cfg_req->ib_cfg.int_pkt_enabled = BNA_STATUS_T_DISABLED;
cfg_req->ib_cfg.continuous_coalescing = BNA_STATUS_T_DISABLED;
cfg_req->ib_cfg.msix = (rxp->cq.ib.intr_type == BNA_INTR_T_MSIX)
? BNA_STATUS_T_ENABLED :
BNA_STATUS_T_DISABLED;
cfg_req->ib_cfg.coalescing_timeout =
htonl((u32)rxp->cq.ib.coalescing_timeo);
cfg_req->ib_cfg.inter_pkt_timeout =
htonl((u32)rxp->cq.ib.interpkt_timeo);
cfg_req->ib_cfg.inter_pkt_count = (u8)rxp->cq.ib.interpkt_count;
switch (rxp->type) {
case BNA_RXP_SLR:
cfg_req->rx_cfg.rxq_type = BFI_ENET_RXQ_LARGE_SMALL;
break;
case BNA_RXP_HDS:
cfg_req->rx_cfg.rxq_type = BFI_ENET_RXQ_HDS;
cfg_req->rx_cfg.hds.type = rx->hds_cfg.hdr_type;
cfg_req->rx_cfg.hds.force_offset = rx->hds_cfg.forced_offset;
cfg_req->rx_cfg.hds.max_header_size = rx->hds_cfg.forced_offset;
break;
case BNA_RXP_SINGLE:
cfg_req->rx_cfg.rxq_type = BFI_ENET_RXQ_SINGLE;
break;
default:
BUG_ON(1);
}
cfg_req->rx_cfg.strip_vlan = rx->rxf.vlan_strip_status;
bfa_msgq_cmd_set(&rx->msgq_cmd, NULL, NULL,
sizeof(struct bfi_enet_rx_cfg_req), &cfg_req->mh);
bfa_msgq_cmd_post(&rx->bna->msgq, &rx->msgq_cmd);
}
static void
bna_bfi_rx_enet_stop(struct bna_rx *rx)
{
struct bfi_enet_req *req = &rx->bfi_enet_cmd.req;
bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
BFI_ENET_H2I_RX_CFG_CLR_REQ, 0, rx->rid);
req->mh.num_entries = htons(
bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_req)));
bfa_msgq_cmd_set(&rx->msgq_cmd, NULL, NULL, sizeof(struct bfi_enet_req),
&req->mh);
bfa_msgq_cmd_post(&rx->bna->msgq, &rx->msgq_cmd);
}
static void
bna_rx_enet_stop(struct bna_rx *rx)
{
struct bna_rxp *rxp;
struct list_head *qe_rxp;
/* Stop IB */
list_for_each(qe_rxp, &rx->rxp_q) {
rxp = (struct bna_rxp *)qe_rxp;
bna_ib_stop(rx->bna, &rxp->cq.ib);
}
bna_bfi_rx_enet_stop(rx);
}
static int
bna_rx_res_check(struct bna_rx_mod *rx_mod, struct bna_rx_config *rx_cfg)
{
if ((rx_mod->rx_free_count == 0) ||
(rx_mod->rxp_free_count == 0) ||
(rx_mod->rxq_free_count == 0))
return 0;
if (rx_cfg->rxp_type == BNA_RXP_SINGLE) {
if ((rx_mod->rxp_free_count < rx_cfg->num_paths) ||
(rx_mod->rxq_free_count < rx_cfg->num_paths))
return 0;
} else {
if ((rx_mod->rxp_free_count < rx_cfg->num_paths) ||
(rx_mod->rxq_free_count < (2 * rx_cfg->num_paths)))
return 0;
}
return 1;
}
static struct bna_rxq *
bna_rxq_get(struct bna_rx_mod *rx_mod)
{
struct bna_rxq *rxq = NULL;
struct list_head *qe = NULL;
bfa_q_deq(&rx_mod->rxq_free_q, &qe);
rx_mod->rxq_free_count--;
rxq = (struct bna_rxq *)qe;
bfa_q_qe_init(&rxq->qe);
return rxq;
}
static void
bna_rxq_put(struct bna_rx_mod *rx_mod, struct bna_rxq *rxq)
{
bfa_q_qe_init(&rxq->qe);
list_add_tail(&rxq->qe, &rx_mod->rxq_free_q);
rx_mod->rxq_free_count++;
}
static struct bna_rxp *
bna_rxp_get(struct bna_rx_mod *rx_mod)
{
struct list_head *qe = NULL;
struct bna_rxp *rxp = NULL;
bfa_q_deq(&rx_mod->rxp_free_q, &qe);
rx_mod->rxp_free_count--;
rxp = (struct bna_rxp *)qe;
bfa_q_qe_init(&rxp->qe);
return rxp;
}
static void
bna_rxp_put(struct bna_rx_mod *rx_mod, struct bna_rxp *rxp)
{
bfa_q_qe_init(&rxp->qe);
list_add_tail(&rxp->qe, &rx_mod->rxp_free_q);
rx_mod->rxp_free_count++;
}
static struct bna_rx *
bna_rx_get(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
{
struct list_head *qe = NULL;
struct bna_rx *rx = NULL;
if (type == BNA_RX_T_REGULAR) {
bfa_q_deq(&rx_mod->rx_free_q, &qe);
} else
bfa_q_deq_tail(&rx_mod->rx_free_q, &qe);
rx_mod->rx_free_count--;
rx = (struct bna_rx *)qe;
bfa_q_qe_init(&rx->qe);
list_add_tail(&rx->qe, &rx_mod->rx_active_q);
rx->type = type;
return rx;
}
static void
bna_rx_put(struct bna_rx_mod *rx_mod, struct bna_rx *rx)
{
struct list_head *prev_qe = NULL;
struct list_head *qe;
bfa_q_qe_init(&rx->qe);
list_for_each(qe, &rx_mod->rx_free_q) {
if (((struct bna_rx *)qe)->rid < rx->rid)
prev_qe = qe;
else
break;
}
if (prev_qe == NULL) {
/* This is the first entry */
bfa_q_enq_head(&rx_mod->rx_free_q, &rx->qe);
} else if (bfa_q_next(prev_qe) == &rx_mod->rx_free_q) {
/* This is the last entry */
list_add_tail(&rx->qe, &rx_mod->rx_free_q);
} else {
/* Somewhere in the middle */
bfa_q_next(&rx->qe) = bfa_q_next(prev_qe);
bfa_q_prev(&rx->qe) = prev_qe;
bfa_q_next(prev_qe) = &rx->qe;
bfa_q_prev(bfa_q_next(&rx->qe)) = &rx->qe;
}
rx_mod->rx_free_count++;
}
static void
bna_rxp_add_rxqs(struct bna_rxp *rxp, struct bna_rxq *q0,
struct bna_rxq *q1)
{
switch (rxp->type) {
case BNA_RXP_SINGLE:
rxp->rxq.single.only = q0;
rxp->rxq.single.reserved = NULL;
break;
case BNA_RXP_SLR:
rxp->rxq.slr.large = q0;
rxp->rxq.slr.small = q1;
break;
case BNA_RXP_HDS:
rxp->rxq.hds.data = q0;
rxp->rxq.hds.hdr = q1;
break;
default:
break;
}
}
static void
bna_rxq_qpt_setup(struct bna_rxq *rxq,
struct bna_rxp *rxp,
u32 page_count,
u32 page_size,
struct bna_mem_descr *qpt_mem,
struct bna_mem_descr *swqpt_mem,
struct bna_mem_descr *page_mem)
{
u8 *kva;
u64 dma;
struct bna_dma_addr bna_dma;
int i;
rxq->qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
rxq->qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
rxq->qpt.kv_qpt_ptr = qpt_mem->kva;
rxq->qpt.page_count = page_count;
rxq->qpt.page_size = page_size;
rxq->rcb->sw_qpt = (void **) swqpt_mem->kva;
rxq->rcb->sw_q = page_mem->kva;
kva = page_mem->kva;
BNA_GET_DMA_ADDR(&page_mem->dma, dma);
for (i = 0; i < rxq->qpt.page_count; i++) {
rxq->rcb->sw_qpt[i] = kva;
kva += PAGE_SIZE;
BNA_SET_DMA_ADDR(dma, &bna_dma);
((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].lsb =
bna_dma.lsb;
((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].msb =
bna_dma.msb;
dma += PAGE_SIZE;
}
}
static void
bna_rxp_cqpt_setup(struct bna_rxp *rxp,
u32 page_count,
u32 page_size,
struct bna_mem_descr *qpt_mem,
struct bna_mem_descr *swqpt_mem,
struct bna_mem_descr *page_mem)
{
u8 *kva;
u64 dma;
struct bna_dma_addr bna_dma;
int i;
rxp->cq.qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
rxp->cq.qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
rxp->cq.qpt.kv_qpt_ptr = qpt_mem->kva;
rxp->cq.qpt.page_count = page_count;
rxp->cq.qpt.page_size = page_size;
rxp->cq.ccb->sw_qpt = (void **) swqpt_mem->kva;
rxp->cq.ccb->sw_q = page_mem->kva;
kva = page_mem->kva;
BNA_GET_DMA_ADDR(&page_mem->dma, dma);
for (i = 0; i < rxp->cq.qpt.page_count; i++) {
rxp->cq.ccb->sw_qpt[i] = kva;
kva += PAGE_SIZE;
BNA_SET_DMA_ADDR(dma, &bna_dma);
((struct bna_dma_addr *)rxp->cq.qpt.kv_qpt_ptr)[i].lsb =
bna_dma.lsb;
((struct bna_dma_addr *)rxp->cq.qpt.kv_qpt_ptr)[i].msb =
bna_dma.msb;
dma += PAGE_SIZE;
}
}
static void
bna_rx_mod_cb_rx_stopped(void *arg, struct bna_rx *rx)
{
struct bna_rx_mod *rx_mod = (struct bna_rx_mod *)arg;
bfa_wc_down(&rx_mod->rx_stop_wc);
}
static void
bna_rx_mod_cb_rx_stopped_all(void *arg)
{
struct bna_rx_mod *rx_mod = (struct bna_rx_mod *)arg;
if (rx_mod->stop_cbfn)
rx_mod->stop_cbfn(&rx_mod->bna->enet);
rx_mod->stop_cbfn = NULL;
}
static void
bna_rx_start(struct bna_rx *rx)
{
rx->rx_flags |= BNA_RX_F_ENET_STARTED;
if (rx->rx_flags & BNA_RX_F_ENABLED)
bfa_fsm_send_event(rx, RX_E_START);
}
static void
bna_rx_stop(struct bna_rx *rx)
{
rx->rx_flags &= ~BNA_RX_F_ENET_STARTED;
if (rx->fsm == (bfa_fsm_t) bna_rx_sm_stopped)
bna_rx_mod_cb_rx_stopped(&rx->bna->rx_mod, rx);
else {
rx->stop_cbfn = bna_rx_mod_cb_rx_stopped;
rx->stop_cbarg = &rx->bna->rx_mod;
bfa_fsm_send_event(rx, RX_E_STOP);
}
}
static void
bna_rx_fail(struct bna_rx *rx)
{
/* Indicate Enet is not enabled, and failed */
rx->rx_flags &= ~BNA_RX_F_ENET_STARTED;
bfa_fsm_send_event(rx, RX_E_FAIL);
}
void
bna_rx_mod_start(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
{
struct bna_rx *rx;
struct list_head *qe;
rx_mod->flags |= BNA_RX_MOD_F_ENET_STARTED;
if (type == BNA_RX_T_LOOPBACK)
rx_mod->flags |= BNA_RX_MOD_F_ENET_LOOPBACK;
list_for_each(qe, &rx_mod->rx_active_q) {
rx = (struct bna_rx *)qe;
if (rx->type == type)
bna_rx_start(rx);
}
}
void
bna_rx_mod_stop(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
{
struct bna_rx *rx;
struct list_head *qe;
rx_mod->flags &= ~BNA_RX_MOD_F_ENET_STARTED;
rx_mod->flags &= ~BNA_RX_MOD_F_ENET_LOOPBACK;
rx_mod->stop_cbfn = bna_enet_cb_rx_stopped;
bfa_wc_init(&rx_mod->rx_stop_wc, bna_rx_mod_cb_rx_stopped_all, rx_mod);
list_for_each(qe, &rx_mod->rx_active_q) {
rx = (struct bna_rx *)qe;
if (rx->type == type) {
bfa_wc_up(&rx_mod->rx_stop_wc);
bna_rx_stop(rx);
}
}
bfa_wc_wait(&rx_mod->rx_stop_wc);
}
void
bna_rx_mod_fail(struct bna_rx_mod *rx_mod)
{
struct bna_rx *rx;
struct list_head *qe;
rx_mod->flags &= ~BNA_RX_MOD_F_ENET_STARTED;
rx_mod->flags &= ~BNA_RX_MOD_F_ENET_LOOPBACK;
list_for_each(qe, &rx_mod->rx_active_q) {
rx = (struct bna_rx *)qe;
bna_rx_fail(rx);
}
}
void bna_rx_mod_init(struct bna_rx_mod *rx_mod, struct bna *bna,
struct bna_res_info *res_info)
{
int index;
struct bna_rx *rx_ptr;
struct bna_rxp *rxp_ptr;
struct bna_rxq *rxq_ptr;
rx_mod->bna = bna;
rx_mod->flags = 0;
rx_mod->rx = (struct bna_rx *)
res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.mdl[0].kva;
rx_mod->rxp = (struct bna_rxp *)
res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.mdl[0].kva;
rx_mod->rxq = (struct bna_rxq *)
res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.mdl[0].kva;
/* Initialize the queues */
INIT_LIST_HEAD(&rx_mod->rx_free_q);
rx_mod->rx_free_count = 0;
INIT_LIST_HEAD(&rx_mod->rxq_free_q);
rx_mod->rxq_free_count = 0;
INIT_LIST_HEAD(&rx_mod->rxp_free_q);
rx_mod->rxp_free_count = 0;
INIT_LIST_HEAD(&rx_mod->rx_active_q);
/* Build RX queues */
for (index = 0; index < bna->ioceth.attr.num_rxp; index++) {
rx_ptr = &rx_mod->rx[index];
bfa_q_qe_init(&rx_ptr->qe);
INIT_LIST_HEAD(&rx_ptr->rxp_q);
rx_ptr->bna = NULL;
rx_ptr->rid = index;
rx_ptr->stop_cbfn = NULL;
rx_ptr->stop_cbarg = NULL;
list_add_tail(&rx_ptr->qe, &rx_mod->rx_free_q);
rx_mod->rx_free_count++;
}
/* build RX-path queue */
for (index = 0; index < bna->ioceth.attr.num_rxp; index++) {
rxp_ptr = &rx_mod->rxp[index];
bfa_q_qe_init(&rxp_ptr->qe);
list_add_tail(&rxp_ptr->qe, &rx_mod->rxp_free_q);
rx_mod->rxp_free_count++;
}
/* build RXQ queue */
for (index = 0; index < (bna->ioceth.attr.num_rxp * 2); index++) {
rxq_ptr = &rx_mod->rxq[index];
bfa_q_qe_init(&rxq_ptr->qe);
list_add_tail(&rxq_ptr->qe, &rx_mod->rxq_free_q);
rx_mod->rxq_free_count++;
}
}
void
bna_rx_mod_uninit(struct bna_rx_mod *rx_mod)
{
struct list_head *qe;
int i;
i = 0;
list_for_each(qe, &rx_mod->rx_free_q)
i++;
i = 0;
list_for_each(qe, &rx_mod->rxp_free_q)
i++;
i = 0;
list_for_each(qe, &rx_mod->rxq_free_q)
i++;
rx_mod->bna = NULL;
}
void
bna_bfi_rx_enet_start_rsp(struct bna_rx *rx, struct bfi_msgq_mhdr *msghdr)
{
struct bfi_enet_rx_cfg_rsp *cfg_rsp = &rx->bfi_enet_cmd.cfg_rsp;
struct bna_rxp *rxp = NULL;
struct bna_rxq *q0 = NULL, *q1 = NULL;
struct list_head *rxp_qe;
int i;
bfa_msgq_rsp_copy(&rx->bna->msgq, (u8 *)cfg_rsp,
sizeof(struct bfi_enet_rx_cfg_rsp));
rx->hw_id = cfg_rsp->hw_id;
for (i = 0, rxp_qe = bfa_q_first(&rx->rxp_q);
i < rx->num_paths;
i++, rxp_qe = bfa_q_next(rxp_qe)) {
rxp = (struct bna_rxp *)rxp_qe;
GET_RXQS(rxp, q0, q1);
/* Setup doorbells */
rxp->cq.ccb->i_dbell->doorbell_addr =
rx->bna->pcidev.pci_bar_kva
+ ntohl(cfg_rsp->q_handles[i].i_dbell);
rxp->hw_id = cfg_rsp->q_handles[i].hw_cqid;
q0->rcb->q_dbell =
rx->bna->pcidev.pci_bar_kva
+ ntohl(cfg_rsp->q_handles[i].ql_dbell);
q0->hw_id = cfg_rsp->q_handles[i].hw_lqid;
if (q1) {
q1->rcb->q_dbell =
rx->bna->pcidev.pci_bar_kva
+ ntohl(cfg_rsp->q_handles[i].qs_dbell);
q1->hw_id = cfg_rsp->q_handles[i].hw_sqid;
}
/* Initialize producer/consumer indexes */
(*rxp->cq.ccb->hw_producer_index) = 0;
rxp->cq.ccb->producer_index = 0;
q0->rcb->producer_index = q0->rcb->consumer_index = 0;
if (q1)
q1->rcb->producer_index = q1->rcb->consumer_index = 0;
}
bfa_fsm_send_event(rx, RX_E_STARTED);
}
void
bna_bfi_rx_enet_stop_rsp(struct bna_rx *rx, struct bfi_msgq_mhdr *msghdr)
{
bfa_fsm_send_event(rx, RX_E_STOPPED);
}
void
bna_rx_res_req(struct bna_rx_config *q_cfg, struct bna_res_info *res_info)
{
u32 cq_size, hq_size, dq_size;
u32 cpage_count, hpage_count, dpage_count;
struct bna_mem_info *mem_info;
u32 cq_depth;
u32 hq_depth;
u32 dq_depth;
dq_depth = q_cfg->q0_depth;
hq_depth = ((q_cfg->rxp_type == BNA_RXP_SINGLE) ? 0 : q_cfg->q1_depth);
cq_depth = dq_depth + hq_depth;
BNA_TO_POWER_OF_2_HIGH(cq_depth);
cq_size = cq_depth * BFI_CQ_WI_SIZE;
cq_size = ALIGN(cq_size, PAGE_SIZE);
cpage_count = SIZE_TO_PAGES(cq_size);
BNA_TO_POWER_OF_2_HIGH(dq_depth);
dq_size = dq_depth * BFI_RXQ_WI_SIZE;
dq_size = ALIGN(dq_size, PAGE_SIZE);
dpage_count = SIZE_TO_PAGES(dq_size);
if (BNA_RXP_SINGLE != q_cfg->rxp_type) {
BNA_TO_POWER_OF_2_HIGH(hq_depth);
hq_size = hq_depth * BFI_RXQ_WI_SIZE;
hq_size = ALIGN(hq_size, PAGE_SIZE);
hpage_count = SIZE_TO_PAGES(hq_size);
} else
hpage_count = 0;
res_info[BNA_RX_RES_MEM_T_CCB].res_type = BNA_RES_T_MEM;
mem_info = &res_info[BNA_RX_RES_MEM_T_CCB].res_u.mem_info;
mem_info->mem_type = BNA_MEM_T_KVA;
mem_info->len = sizeof(struct bna_ccb);
mem_info->num = q_cfg->num_paths;
res_info[BNA_RX_RES_MEM_T_RCB].res_type = BNA_RES_T_MEM;
mem_info = &res_info[BNA_RX_RES_MEM_T_RCB].res_u.mem_info;
mem_info->mem_type = BNA_MEM_T_KVA;
mem_info->len = sizeof(struct bna_rcb);
mem_info->num = BNA_GET_RXQS(q_cfg);
res_info[BNA_RX_RES_MEM_T_CQPT].res_type = BNA_RES_T_MEM;
mem_info = &res_info[BNA_RX_RES_MEM_T_CQPT].res_u.mem_info;
mem_info->mem_type = BNA_MEM_T_DMA;
mem_info->len = cpage_count * sizeof(struct bna_dma_addr);
mem_info->num = q_cfg->num_paths;
res_info[BNA_RX_RES_MEM_T_CSWQPT].res_type = BNA_RES_T_MEM;
mem_info = &res_info[BNA_RX_RES_MEM_T_CSWQPT].res_u.mem_info;
mem_info->mem_type = BNA_MEM_T_KVA;
mem_info->len = cpage_count * sizeof(void *);
mem_info->num = q_cfg->num_paths;
res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_type = BNA_RES_T_MEM;
mem_info = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info;
mem_info->mem_type = BNA_MEM_T_DMA;
mem_info->len = PAGE_SIZE * cpage_count;
mem_info->num = q_cfg->num_paths;
res_info[BNA_RX_RES_MEM_T_DQPT].res_type = BNA_RES_T_MEM;
mem_info = &res_info[BNA_RX_RES_MEM_T_DQPT].res_u.mem_info;
mem_info->mem_type = BNA_MEM_T_DMA;
mem_info->len = dpage_count * sizeof(struct bna_dma_addr);
mem_info->num = q_cfg->num_paths;
res_info[BNA_RX_RES_MEM_T_DSWQPT].res_type = BNA_RES_T_MEM;
mem_info = &res_info[BNA_RX_RES_MEM_T_DSWQPT].res_u.mem_info;
mem_info->mem_type = BNA_MEM_T_KVA;
mem_info->len = dpage_count * sizeof(void *);
mem_info->num = q_cfg->num_paths;
res_info[BNA_RX_RES_MEM_T_DPAGE].res_type = BNA_RES_T_MEM;
mem_info = &res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info;
mem_info->mem_type = BNA_MEM_T_DMA;
mem_info->len = PAGE_SIZE * dpage_count;
mem_info->num = q_cfg->num_paths;
res_info[BNA_RX_RES_MEM_T_HQPT].res_type = BNA_RES_T_MEM;
mem_info = &res_info[BNA_RX_RES_MEM_T_HQPT].res_u.mem_info;
mem_info->mem_type = BNA_MEM_T_DMA;
mem_info->len = hpage_count * sizeof(struct bna_dma_addr);
mem_info->num = (hpage_count ? q_cfg->num_paths : 0);
res_info[BNA_RX_RES_MEM_T_HSWQPT].res_type = BNA_RES_T_MEM;
mem_info = &res_info[BNA_RX_RES_MEM_T_HSWQPT].res_u.mem_info;
mem_info->mem_type = BNA_MEM_T_KVA;
mem_info->len = hpage_count * sizeof(void *);
mem_info->num = (hpage_count ? q_cfg->num_paths : 0);
res_info[BNA_RX_RES_MEM_T_HPAGE].res_type = BNA_RES_T_MEM;
mem_info = &res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info;
mem_info->mem_type = BNA_MEM_T_DMA;
mem_info->len = PAGE_SIZE * hpage_count;
mem_info->num = (hpage_count ? q_cfg->num_paths : 0);
res_info[BNA_RX_RES_MEM_T_IBIDX].res_type = BNA_RES_T_MEM;
mem_info = &res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info;
mem_info->mem_type = BNA_MEM_T_DMA;
mem_info->len = BFI_IBIDX_SIZE;
mem_info->num = q_cfg->num_paths;
res_info[BNA_RX_RES_MEM_T_RIT].res_type = BNA_RES_T_MEM;
mem_info = &res_info[BNA_RX_RES_MEM_T_RIT].res_u.mem_info;
mem_info->mem_type = BNA_MEM_T_KVA;
mem_info->len = BFI_ENET_RSS_RIT_MAX;
mem_info->num = 1;
res_info[BNA_RX_RES_T_INTR].res_type = BNA_RES_T_INTR;
res_info[BNA_RX_RES_T_INTR].res_u.intr_info.intr_type = BNA_INTR_T_MSIX;
res_info[BNA_RX_RES_T_INTR].res_u.intr_info.num = q_cfg->num_paths;
}
struct bna_rx *
bna_rx_create(struct bna *bna, struct bnad *bnad,
struct bna_rx_config *rx_cfg,
const struct bna_rx_event_cbfn *rx_cbfn,
struct bna_res_info *res_info,
void *priv)
{
struct bna_rx_mod *rx_mod = &bna->rx_mod;
struct bna_rx *rx;
struct bna_rxp *rxp;
struct bna_rxq *q0;
struct bna_rxq *q1;
struct bna_intr_info *intr_info;
struct bna_mem_descr *hqunmap_mem;
struct bna_mem_descr *dqunmap_mem;
struct bna_mem_descr *ccb_mem;
struct bna_mem_descr *rcb_mem;
struct bna_mem_descr *cqpt_mem;
struct bna_mem_descr *cswqpt_mem;
struct bna_mem_descr *cpage_mem;
struct bna_mem_descr *hqpt_mem;
struct bna_mem_descr *dqpt_mem;
struct bna_mem_descr *hsqpt_mem;
struct bna_mem_descr *dsqpt_mem;
struct bna_mem_descr *hpage_mem;
struct bna_mem_descr *dpage_mem;
u32 dpage_count, hpage_count;
u32 hq_idx, dq_idx, rcb_idx;
u32 cq_depth, i;
u32 page_count;
if (!bna_rx_res_check(rx_mod, rx_cfg))
return NULL;
intr_info = &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
ccb_mem = &res_info[BNA_RX_RES_MEM_T_CCB].res_u.mem_info.mdl[0];
rcb_mem = &res_info[BNA_RX_RES_MEM_T_RCB].res_u.mem_info.mdl[0];
dqunmap_mem = &res_info[BNA_RX_RES_MEM_T_UNMAPDQ].res_u.mem_info.mdl[0];
hqunmap_mem = &res_info[BNA_RX_RES_MEM_T_UNMAPHQ].res_u.mem_info.mdl[0];
cqpt_mem = &res_info[BNA_RX_RES_MEM_T_CQPT].res_u.mem_info.mdl[0];
cswqpt_mem = &res_info[BNA_RX_RES_MEM_T_CSWQPT].res_u.mem_info.mdl[0];
cpage_mem = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.mdl[0];
hqpt_mem = &res_info[BNA_RX_RES_MEM_T_HQPT].res_u.mem_info.mdl[0];
dqpt_mem = &res_info[BNA_RX_RES_MEM_T_DQPT].res_u.mem_info.mdl[0];
hsqpt_mem = &res_info[BNA_RX_RES_MEM_T_HSWQPT].res_u.mem_info.mdl[0];
dsqpt_mem = &res_info[BNA_RX_RES_MEM_T_DSWQPT].res_u.mem_info.mdl[0];
hpage_mem = &res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info.mdl[0];
dpage_mem = &res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info.mdl[0];
page_count = res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.len /
PAGE_SIZE;
dpage_count = res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info.len /
PAGE_SIZE;
hpage_count = res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info.len /
PAGE_SIZE;
rx = bna_rx_get(rx_mod, rx_cfg->rx_type);
rx->bna = bna;
rx->rx_flags = 0;
INIT_LIST_HEAD(&rx->rxp_q);
rx->stop_cbfn = NULL;
rx->stop_cbarg = NULL;
rx->priv = priv;
rx->rcb_setup_cbfn = rx_cbfn->rcb_setup_cbfn;
rx->rcb_destroy_cbfn = rx_cbfn->rcb_destroy_cbfn;
rx->ccb_setup_cbfn = rx_cbfn->ccb_setup_cbfn;
rx->ccb_destroy_cbfn = rx_cbfn->ccb_destroy_cbfn;
rx->rx_stall_cbfn = rx_cbfn->rx_stall_cbfn;
/* Following callbacks are mandatory */
rx->rx_cleanup_cbfn = rx_cbfn->rx_cleanup_cbfn;
rx->rx_post_cbfn = rx_cbfn->rx_post_cbfn;
if (rx->bna->rx_mod.flags & BNA_RX_MOD_F_ENET_STARTED) {
switch (rx->type) {
case BNA_RX_T_REGULAR:
if (!(rx->bna->rx_mod.flags &
BNA_RX_MOD_F_ENET_LOOPBACK))
rx->rx_flags |= BNA_RX_F_ENET_STARTED;
break;
case BNA_RX_T_LOOPBACK:
if (rx->bna->rx_mod.flags & BNA_RX_MOD_F_ENET_LOOPBACK)
rx->rx_flags |= BNA_RX_F_ENET_STARTED;
break;
}
}
rx->num_paths = rx_cfg->num_paths;
for (i = 0, hq_idx = 0, dq_idx = 0, rcb_idx = 0;
i < rx->num_paths; i++) {
rxp = bna_rxp_get(rx_mod);
list_add_tail(&rxp->qe, &rx->rxp_q);
rxp->type = rx_cfg->rxp_type;
rxp->rx = rx;
rxp->cq.rx = rx;
q0 = bna_rxq_get(rx_mod);
if (BNA_RXP_SINGLE == rx_cfg->rxp_type)
q1 = NULL;
else
q1 = bna_rxq_get(rx_mod);
if (1 == intr_info->num)
rxp->vector = intr_info->idl[0].vector;
else
rxp->vector = intr_info->idl[i].vector;
/* Setup IB */
rxp->cq.ib.ib_seg_host_addr.lsb =
res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.lsb;
rxp->cq.ib.ib_seg_host_addr.msb =
res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.msb;
rxp->cq.ib.ib_seg_host_addr_kva =
res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].kva;
rxp->cq.ib.intr_type = intr_info->intr_type;
if (intr_info->intr_type == BNA_INTR_T_MSIX)
rxp->cq.ib.intr_vector = rxp->vector;
else
rxp->cq.ib.intr_vector = (1 << rxp->vector);
rxp->cq.ib.coalescing_timeo = rx_cfg->coalescing_timeo;
rxp->cq.ib.interpkt_count = BFI_RX_INTERPKT_COUNT;
rxp->cq.ib.interpkt_timeo = BFI_RX_INTERPKT_TIMEO;
bna_rxp_add_rxqs(rxp, q0, q1);
/* Setup large Q */
q0->rx = rx;
q0->rxp = rxp;
q0->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva;
q0->rcb->unmap_q = (void *)dqunmap_mem[dq_idx].kva;
rcb_idx++; dq_idx++;
q0->rcb->q_depth = rx_cfg->q0_depth;
q0->q_depth = rx_cfg->q0_depth;
q0->multi_buffer = rx_cfg->q0_multi_buf;
q0->buffer_size = rx_cfg->q0_buf_size;
q0->num_vecs = rx_cfg->q0_num_vecs;
q0->rcb->rxq = q0;
q0->rcb->bnad = bna->bnad;
q0->rcb->id = 0;
q0->rx_packets = q0->rx_bytes = 0;
q0->rx_packets_with_error = q0->rxbuf_alloc_failed = 0;
bna_rxq_qpt_setup(q0, rxp, dpage_count, PAGE_SIZE,
&dqpt_mem[i], &dsqpt_mem[i], &dpage_mem[i]);
if (rx->rcb_setup_cbfn)
rx->rcb_setup_cbfn(bnad, q0->rcb);
/* Setup small Q */
if (q1) {
q1->rx = rx;
q1->rxp = rxp;
q1->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva;
q1->rcb->unmap_q = (void *)hqunmap_mem[hq_idx].kva;
rcb_idx++; hq_idx++;
q1->rcb->q_depth = rx_cfg->q1_depth;
q1->q_depth = rx_cfg->q1_depth;
q1->multi_buffer = BNA_STATUS_T_DISABLED;
q1->num_vecs = 1;
q1->rcb->rxq = q1;
q1->rcb->bnad = bna->bnad;
q1->rcb->id = 1;
q1->buffer_size = (rx_cfg->rxp_type == BNA_RXP_HDS) ?
rx_cfg->hds_config.forced_offset
: rx_cfg->q1_buf_size;
q1->rx_packets = q1->rx_bytes = 0;
q1->rx_packets_with_error = q1->rxbuf_alloc_failed = 0;
bna_rxq_qpt_setup(q1, rxp, hpage_count, PAGE_SIZE,
&hqpt_mem[i], &hsqpt_mem[i],
&hpage_mem[i]);
if (rx->rcb_setup_cbfn)
rx->rcb_setup_cbfn(bnad, q1->rcb);
}
/* Setup CQ */
rxp->cq.ccb = (struct bna_ccb *) ccb_mem[i].kva;
cq_depth = rx_cfg->q0_depth +
((rx_cfg->rxp_type == BNA_RXP_SINGLE) ?
0 : rx_cfg->q1_depth);
/* if multi-buffer is enabled sum of q0_depth
* and q1_depth need not be a power of 2
*/
BNA_TO_POWER_OF_2_HIGH(cq_depth);
rxp->cq.ccb->q_depth = cq_depth;
rxp->cq.ccb->cq = &rxp->cq;
rxp->cq.ccb->rcb[0] = q0->rcb;
q0->rcb->ccb = rxp->cq.ccb;
if (q1) {
rxp->cq.ccb->rcb[1] = q1->rcb;
q1->rcb->ccb = rxp->cq.ccb;
}
rxp->cq.ccb->hw_producer_index =
(u32 *)rxp->cq.ib.ib_seg_host_addr_kva;
rxp->cq.ccb->i_dbell = &rxp->cq.ib.door_bell;
rxp->cq.ccb->intr_type = rxp->cq.ib.intr_type;
rxp->cq.ccb->intr_vector = rxp->cq.ib.intr_vector;
rxp->cq.ccb->rx_coalescing_timeo =
rxp->cq.ib.coalescing_timeo;
rxp->cq.ccb->pkt_rate.small_pkt_cnt = 0;
rxp->cq.ccb->pkt_rate.large_pkt_cnt = 0;
rxp->cq.ccb->bnad = bna->bnad;
rxp->cq.ccb->id = i;
bna_rxp_cqpt_setup(rxp, page_count, PAGE_SIZE,
&cqpt_mem[i], &cswqpt_mem[i], &cpage_mem[i]);
if (rx->ccb_setup_cbfn)
rx->ccb_setup_cbfn(bnad, rxp->cq.ccb);
}
rx->hds_cfg = rx_cfg->hds_config;
bna_rxf_init(&rx->rxf, rx, rx_cfg, res_info);
bfa_fsm_set_state(rx, bna_rx_sm_stopped);
rx_mod->rid_mask |= (1 << rx->rid);
return rx;
}
void
bna_rx_destroy(struct bna_rx *rx)
{
struct bna_rx_mod *rx_mod = &rx->bna->rx_mod;
struct bna_rxq *q0 = NULL;
struct bna_rxq *q1 = NULL;
struct bna_rxp *rxp;
struct list_head *qe;
bna_rxf_uninit(&rx->rxf);
while (!list_empty(&rx->rxp_q)) {
bfa_q_deq(&rx->rxp_q, &rxp);
GET_RXQS(rxp, q0, q1);
if (rx->rcb_destroy_cbfn)
rx->rcb_destroy_cbfn(rx->bna->bnad, q0->rcb);
q0->rcb = NULL;
q0->rxp = NULL;
q0->rx = NULL;
bna_rxq_put(rx_mod, q0);
if (q1) {
if (rx->rcb_destroy_cbfn)
rx->rcb_destroy_cbfn(rx->bna->bnad, q1->rcb);
q1->rcb = NULL;
q1->rxp = NULL;
q1->rx = NULL;
bna_rxq_put(rx_mod, q1);
}
rxp->rxq.slr.large = NULL;
rxp->rxq.slr.small = NULL;
if (rx->ccb_destroy_cbfn)
rx->ccb_destroy_cbfn(rx->bna->bnad, rxp->cq.ccb);
rxp->cq.ccb = NULL;
rxp->rx = NULL;
bna_rxp_put(rx_mod, rxp);
}
list_for_each(qe, &rx_mod->rx_active_q) {
if (qe == &rx->qe) {
list_del(&rx->qe);
bfa_q_qe_init(&rx->qe);
break;
}
}
rx_mod->rid_mask &= ~(1 << rx->rid);
rx->bna = NULL;
rx->priv = NULL;
bna_rx_put(rx_mod, rx);
}
void
bna_rx_enable(struct bna_rx *rx)
{
if (rx->fsm != (bfa_sm_t)bna_rx_sm_stopped)
return;
rx->rx_flags |= BNA_RX_F_ENABLED;
if (rx->rx_flags & BNA_RX_F_ENET_STARTED)
bfa_fsm_send_event(rx, RX_E_START);
}
void
bna_rx_disable(struct bna_rx *rx, enum bna_cleanup_type type,
void (*cbfn)(void *, struct bna_rx *))
{
if (type == BNA_SOFT_CLEANUP) {
/* h/w should not be accessed. Treat we're stopped */
(*cbfn)(rx->bna->bnad, rx);
} else {
rx->stop_cbfn = cbfn;
rx->stop_cbarg = rx->bna->bnad;
rx->rx_flags &= ~BNA_RX_F_ENABLED;
bfa_fsm_send_event(rx, RX_E_STOP);
}
}
void
bna_rx_cleanup_complete(struct bna_rx *rx)
{
bfa_fsm_send_event(rx, RX_E_CLEANUP_DONE);
}
void
bna_rx_vlan_strip_enable(struct bna_rx *rx)
{
struct bna_rxf *rxf = &rx->rxf;
if (rxf->vlan_strip_status == BNA_STATUS_T_DISABLED) {
rxf->vlan_strip_status = BNA_STATUS_T_ENABLED;
rxf->vlan_strip_pending = true;
bfa_fsm_send_event(rxf, RXF_E_CONFIG);
}
}
void
bna_rx_vlan_strip_disable(struct bna_rx *rx)
{
struct bna_rxf *rxf = &rx->rxf;
if (rxf->vlan_strip_status != BNA_STATUS_T_DISABLED) {
rxf->vlan_strip_status = BNA_STATUS_T_DISABLED;
rxf->vlan_strip_pending = true;
bfa_fsm_send_event(rxf, RXF_E_CONFIG);
}
}
enum bna_cb_status
bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode new_mode,
enum bna_rxmode bitmask,
void (*cbfn)(struct bnad *, struct bna_rx *))
{
struct bna_rxf *rxf = &rx->rxf;
int need_hw_config = 0;
/* Error checks */
if (is_promisc_enable(new_mode, bitmask)) {
/* If promisc mode is already enabled elsewhere in the system */
if ((rx->bna->promisc_rid != BFI_INVALID_RID) &&
(rx->bna->promisc_rid != rxf->rx->rid))
goto err_return;
/* If default mode is already enabled in the system */
if (rx->bna->default_mode_rid != BFI_INVALID_RID)
goto err_return;
/* Trying to enable promiscuous and default mode together */
if (is_default_enable(new_mode, bitmask))
goto err_return;
}
if (is_default_enable(new_mode, bitmask)) {
/* If default mode is already enabled elsewhere in the system */
if ((rx->bna->default_mode_rid != BFI_INVALID_RID) &&
(rx->bna->default_mode_rid != rxf->rx->rid)) {
goto err_return;
}
/* If promiscuous mode is already enabled in the system */
if (rx->bna->promisc_rid != BFI_INVALID_RID)
goto err_return;
}
/* Process the commands */
if (is_promisc_enable(new_mode, bitmask)) {
if (bna_rxf_promisc_enable(rxf))
need_hw_config = 1;
} else if (is_promisc_disable(new_mode, bitmask)) {
if (bna_rxf_promisc_disable(rxf))
need_hw_config = 1;
}
if (is_allmulti_enable(new_mode, bitmask)) {
if (bna_rxf_allmulti_enable(rxf))
need_hw_config = 1;
} else if (is_allmulti_disable(new_mode, bitmask)) {
if (bna_rxf_allmulti_disable(rxf))
need_hw_config = 1;
}
/* Trigger h/w if needed */
if (need_hw_config) {
rxf->cam_fltr_cbfn = cbfn;
rxf->cam_fltr_cbarg = rx->bna->bnad;
bfa_fsm_send_event(rxf, RXF_E_CONFIG);
} else if (cbfn)
(*cbfn)(rx->bna->bnad, rx);
return BNA_CB_SUCCESS;
err_return:
return BNA_CB_FAIL;
}
void
bna_rx_vlanfilter_enable(struct bna_rx *rx)
{
struct bna_rxf *rxf = &rx->rxf;
if (rxf->vlan_filter_status == BNA_STATUS_T_DISABLED) {
rxf->vlan_filter_status = BNA_STATUS_T_ENABLED;
rxf->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL;
bfa_fsm_send_event(rxf, RXF_E_CONFIG);
}
}
void
bna_rx_coalescing_timeo_set(struct bna_rx *rx, int coalescing_timeo)
{
struct bna_rxp *rxp;
struct list_head *qe;
list_for_each(qe, &rx->rxp_q) {
rxp = (struct bna_rxp *)qe;
rxp->cq.ccb->rx_coalescing_timeo = coalescing_timeo;
bna_ib_coalescing_timeo_set(&rxp->cq.ib, coalescing_timeo);
}
}
void
bna_rx_dim_reconfig(struct bna *bna, const u32 vector[][BNA_BIAS_T_MAX])
{
int i, j;
for (i = 0; i < BNA_LOAD_T_MAX; i++)
for (j = 0; j < BNA_BIAS_T_MAX; j++)
bna->rx_mod.dim_vector[i][j] = vector[i][j];
}
void
bna_rx_dim_update(struct bna_ccb *ccb)
{
struct bna *bna = ccb->cq->rx->bna;
u32 load, bias;
u32 pkt_rt, small_rt, large_rt;
u8 coalescing_timeo;
if ((ccb->pkt_rate.small_pkt_cnt == 0) &&
(ccb->pkt_rate.large_pkt_cnt == 0))
return;
/* Arrive at preconfigured coalescing timeo value based on pkt rate */
small_rt = ccb->pkt_rate.small_pkt_cnt;
large_rt = ccb->pkt_rate.large_pkt_cnt;
pkt_rt = small_rt + large_rt;
if (pkt_rt < BNA_PKT_RATE_10K)
load = BNA_LOAD_T_LOW_4;
else if (pkt_rt < BNA_PKT_RATE_20K)
load = BNA_LOAD_T_LOW_3;
else if (pkt_rt < BNA_PKT_RATE_30K)
load = BNA_LOAD_T_LOW_2;
else if (pkt_rt < BNA_PKT_RATE_40K)
load = BNA_LOAD_T_LOW_1;
else if (pkt_rt < BNA_PKT_RATE_50K)
load = BNA_LOAD_T_HIGH_1;
else if (pkt_rt < BNA_PKT_RATE_60K)
load = BNA_LOAD_T_HIGH_2;
else if (pkt_rt < BNA_PKT_RATE_80K)
load = BNA_LOAD_T_HIGH_3;
else
load = BNA_LOAD_T_HIGH_4;
if (small_rt > (large_rt << 1))
bias = 0;
else
bias = 1;
ccb->pkt_rate.small_pkt_cnt = 0;
ccb->pkt_rate.large_pkt_cnt = 0;
coalescing_timeo = bna->rx_mod.dim_vector[load][bias];
ccb->rx_coalescing_timeo = coalescing_timeo;
/* Set it to IB */
bna_ib_coalescing_timeo_set(&ccb->cq->ib, coalescing_timeo);
}
const u32 bna_napi_dim_vector[BNA_LOAD_T_MAX][BNA_BIAS_T_MAX] = {
{12, 12},
{6, 10},
{5, 10},
{4, 8},
{3, 6},
{3, 6},
{2, 4},
{1, 2},
};
/* TX */
#define call_tx_stop_cbfn(tx) \
do { \
if ((tx)->stop_cbfn) { \
void (*cbfn)(void *, struct bna_tx *); \
void *cbarg; \
cbfn = (tx)->stop_cbfn; \
cbarg = (tx)->stop_cbarg; \
(tx)->stop_cbfn = NULL; \
(tx)->stop_cbarg = NULL; \
cbfn(cbarg, (tx)); \
} \
} while (0)
#define call_tx_prio_change_cbfn(tx) \
do { \
if ((tx)->prio_change_cbfn) { \
void (*cbfn)(struct bnad *, struct bna_tx *); \
cbfn = (tx)->prio_change_cbfn; \
(tx)->prio_change_cbfn = NULL; \
cbfn((tx)->bna->bnad, (tx)); \
} \
} while (0)
static void bna_tx_mod_cb_tx_stopped(void *tx_mod, struct bna_tx *tx);
static void bna_bfi_tx_enet_start(struct bna_tx *tx);
static void bna_tx_enet_stop(struct bna_tx *tx);
enum bna_tx_event {
TX_E_START = 1,
TX_E_STOP = 2,
TX_E_FAIL = 3,
TX_E_STARTED = 4,
TX_E_STOPPED = 5,
TX_E_PRIO_CHANGE = 6,
TX_E_CLEANUP_DONE = 7,
TX_E_BW_UPDATE = 8,
};
bfa_fsm_state_decl(bna_tx, stopped, struct bna_tx, enum bna_tx_event);
bfa_fsm_state_decl(bna_tx, start_wait, struct bna_tx, enum bna_tx_event);
bfa_fsm_state_decl(bna_tx, started, struct bna_tx, enum bna_tx_event);
bfa_fsm_state_decl(bna_tx, stop_wait, struct bna_tx, enum bna_tx_event);
bfa_fsm_state_decl(bna_tx, cleanup_wait, struct bna_tx,
enum bna_tx_event);
bfa_fsm_state_decl(bna_tx, prio_stop_wait, struct bna_tx,
enum bna_tx_event);
bfa_fsm_state_decl(bna_tx, prio_cleanup_wait, struct bna_tx,
enum bna_tx_event);
bfa_fsm_state_decl(bna_tx, failed, struct bna_tx, enum bna_tx_event);
bfa_fsm_state_decl(bna_tx, quiesce_wait, struct bna_tx,
enum bna_tx_event);
static void
bna_tx_sm_stopped_entry(struct bna_tx *tx)
{
call_tx_stop_cbfn(tx);
}
static void
bna_tx_sm_stopped(struct bna_tx *tx, enum bna_tx_event event)
{
switch (event) {
case TX_E_START:
bfa_fsm_set_state(tx, bna_tx_sm_start_wait);
break;
case TX_E_STOP:
call_tx_stop_cbfn(tx);
break;
case TX_E_FAIL:
/* No-op */
break;
case TX_E_PRIO_CHANGE:
call_tx_prio_change_cbfn(tx);
break;
case TX_E_BW_UPDATE:
/* No-op */
break;
default:
bfa_sm_fault(event);
}
}
static void
bna_tx_sm_start_wait_entry(struct bna_tx *tx)
{
bna_bfi_tx_enet_start(tx);
}
static void
bna_tx_sm_start_wait(struct bna_tx *tx, enum bna_tx_event event)
{
switch (event) {
case TX_E_STOP:
tx->flags &= ~(BNA_TX_F_PRIO_CHANGED | BNA_TX_F_BW_UPDATED);
bfa_fsm_set_state(tx, bna_tx_sm_stop_wait);
break;
case TX_E_FAIL:
tx->flags &= ~(BNA_TX_F_PRIO_CHANGED | BNA_TX_F_BW_UPDATED);
bfa_fsm_set_state(tx, bna_tx_sm_stopped);
break;
case TX_E_STARTED:
if (tx->flags & (BNA_TX_F_PRIO_CHANGED | BNA_TX_F_BW_UPDATED)) {
tx->flags &= ~(BNA_TX_F_PRIO_CHANGED |
BNA_TX_F_BW_UPDATED);
bfa_fsm_set_state(tx, bna_tx_sm_prio_stop_wait);
} else
bfa_fsm_set_state(tx, bna_tx_sm_started);
break;
case TX_E_PRIO_CHANGE:
tx->flags |= BNA_TX_F_PRIO_CHANGED;
break;
case TX_E_BW_UPDATE:
tx->flags |= BNA_TX_F_BW_UPDATED;
break;
default:
bfa_sm_fault(event);
}
}
static void
bna_tx_sm_started_entry(struct bna_tx *tx)
{
struct bna_txq *txq;
struct list_head *qe;
int is_regular = (tx->type == BNA_TX_T_REGULAR);
list_for_each(qe, &tx->txq_q) {
txq = (struct bna_txq *)qe;
txq->tcb->priority = txq->priority;
/* Start IB */
bna_ib_start(tx->bna, &txq->ib, is_regular);
}
tx->tx_resume_cbfn(tx->bna->bnad, tx);
}
static void
bna_tx_sm_started(struct bna_tx *tx, enum bna_tx_event event)
{
switch (event) {
case TX_E_STOP:
bfa_fsm_set_state(tx, bna_tx_sm_stop_wait);
tx->tx_stall_cbfn(tx->bna->bnad, tx);
bna_tx_enet_stop(tx);
break;
case TX_E_FAIL:
bfa_fsm_set_state(tx, bna_tx_sm_failed);
tx->tx_stall_cbfn(tx->bna->bnad, tx);
tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
break;
case TX_E_PRIO_CHANGE:
case TX_E_BW_UPDATE:
bfa_fsm_set_state(tx, bna_tx_sm_prio_stop_wait);
break;
default:
bfa_sm_fault(event);
}
}
static void
bna_tx_sm_stop_wait_entry(struct bna_tx *tx)
{
}
static void
bna_tx_sm_stop_wait(struct bna_tx *tx, enum bna_tx_event event)
{
switch (event) {
case TX_E_FAIL:
case TX_E_STOPPED:
bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait);
tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
break;
case TX_E_STARTED:
/**
* We are here due to start_wait -> stop_wait transition on
* TX_E_STOP event
*/
bna_tx_enet_stop(tx);
break;
case TX_E_PRIO_CHANGE:
case TX_E_BW_UPDATE:
/* No-op */
break;
default:
bfa_sm_fault(event);
}
}
static void
bna_tx_sm_cleanup_wait_entry(struct bna_tx *tx)
{
}
static void
bna_tx_sm_cleanup_wait(struct bna_tx *tx, enum bna_tx_event event)
{
switch (event) {
case TX_E_FAIL:
case TX_E_PRIO_CHANGE:
case TX_E_BW_UPDATE:
/* No-op */
break;
case TX_E_CLEANUP_DONE:
bfa_fsm_set_state(tx, bna_tx_sm_stopped);
break;
default:
bfa_sm_fault(event);
}
}
static void
bna_tx_sm_prio_stop_wait_entry(struct bna_tx *tx)
{
tx->tx_stall_cbfn(tx->bna->bnad, tx);
bna_tx_enet_stop(tx);
}
static void
bna_tx_sm_prio_stop_wait(struct bna_tx *tx, enum bna_tx_event event)
{
switch (event) {
case TX_E_STOP:
bfa_fsm_set_state(tx, bna_tx_sm_stop_wait);
break;
case TX_E_FAIL:
bfa_fsm_set_state(tx, bna_tx_sm_failed);
call_tx_prio_change_cbfn(tx);
tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
break;
case TX_E_STOPPED:
bfa_fsm_set_state(tx, bna_tx_sm_prio_cleanup_wait);
break;
case TX_E_PRIO_CHANGE:
case TX_E_BW_UPDATE:
/* No-op */
break;
default:
bfa_sm_fault(event);
}
}
static void
bna_tx_sm_prio_cleanup_wait_entry(struct bna_tx *tx)
{
call_tx_prio_change_cbfn(tx);
tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
}
static void
bna_tx_sm_prio_cleanup_wait(struct bna_tx *tx, enum bna_tx_event event)
{
switch (event) {
case TX_E_STOP:
bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait);
break;
case TX_E_FAIL:
bfa_fsm_set_state(tx, bna_tx_sm_failed);
break;
case TX_E_PRIO_CHANGE:
case TX_E_BW_UPDATE:
/* No-op */
break;
case TX_E_CLEANUP_DONE:
bfa_fsm_set_state(tx, bna_tx_sm_start_wait);
break;
default:
bfa_sm_fault(event);
}
}
static void
bna_tx_sm_failed_entry(struct bna_tx *tx)
{
}
static void
bna_tx_sm_failed(struct bna_tx *tx, enum bna_tx_event event)
{
switch (event) {
case TX_E_START:
bfa_fsm_set_state(tx, bna_tx_sm_quiesce_wait);
break;
case TX_E_STOP:
bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait);
break;
case TX_E_FAIL:
/* No-op */
break;
case TX_E_CLEANUP_DONE:
bfa_fsm_set_state(tx, bna_tx_sm_stopped);
break;
default:
bfa_sm_fault(event);
}
}
static void
bna_tx_sm_quiesce_wait_entry(struct bna_tx *tx)
{
}
static void
bna_tx_sm_quiesce_wait(struct bna_tx *tx, enum bna_tx_event event)
{
switch (event) {
case TX_E_STOP:
bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait);
break;
case TX_E_FAIL:
bfa_fsm_set_state(tx, bna_tx_sm_failed);
break;
case TX_E_CLEANUP_DONE:
bfa_fsm_set_state(tx, bna_tx_sm_start_wait);
break;
case TX_E_BW_UPDATE:
/* No-op */
break;
default:
bfa_sm_fault(event);
}
}
static void
bna_bfi_tx_enet_start(struct bna_tx *tx)
{
struct bfi_enet_tx_cfg_req *cfg_req = &tx->bfi_enet_cmd.cfg_req;
struct bna_txq *txq = NULL;
struct list_head *qe;
int i;
bfi_msgq_mhdr_set(cfg_req->mh, BFI_MC_ENET,
BFI_ENET_H2I_TX_CFG_SET_REQ, 0, tx->rid);
cfg_req->mh.num_entries = htons(
bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_tx_cfg_req)));
cfg_req->num_queues = tx->num_txq;
for (i = 0, qe = bfa_q_first(&tx->txq_q);
i < tx->num_txq;
i++, qe = bfa_q_next(qe)) {
txq = (struct bna_txq *)qe;
bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].q.q, &txq->qpt);
cfg_req->q_cfg[i].q.priority = txq->priority;
cfg_req->q_cfg[i].ib.index_addr.a32.addr_lo =
txq->ib.ib_seg_host_addr.lsb;
cfg_req->q_cfg[i].ib.index_addr.a32.addr_hi =
txq->ib.ib_seg_host_addr.msb;
cfg_req->q_cfg[i].ib.intr.msix_index =
htons((u16)txq->ib.intr_vector);
}
cfg_req->ib_cfg.int_pkt_dma = BNA_STATUS_T_ENABLED;
cfg_req->ib_cfg.int_enabled = BNA_STATUS_T_ENABLED;
cfg_req->ib_cfg.int_pkt_enabled = BNA_STATUS_T_DISABLED;
cfg_req->ib_cfg.continuous_coalescing = BNA_STATUS_T_ENABLED;
cfg_req->ib_cfg.msix = (txq->ib.intr_type == BNA_INTR_T_MSIX)
? BNA_STATUS_T_ENABLED : BNA_STATUS_T_DISABLED;
cfg_req->ib_cfg.coalescing_timeout =
htonl((u32)txq->ib.coalescing_timeo);
cfg_req->ib_cfg.inter_pkt_timeout =
htonl((u32)txq->ib.interpkt_timeo);
cfg_req->ib_cfg.inter_pkt_count = (u8)txq->ib.interpkt_count;
cfg_req->tx_cfg.vlan_mode = BFI_ENET_TX_VLAN_WI;
cfg_req->tx_cfg.vlan_id = htons((u16)tx->txf_vlan_id);
cfg_req->tx_cfg.admit_tagged_frame = BNA_STATUS_T_DISABLED;
cfg_req->tx_cfg.apply_vlan_filter = BNA_STATUS_T_DISABLED;
bfa_msgq_cmd_set(&tx->msgq_cmd, NULL, NULL,
sizeof(struct bfi_enet_tx_cfg_req), &cfg_req->mh);
bfa_msgq_cmd_post(&tx->bna->msgq, &tx->msgq_cmd);
}
static void
bna_bfi_tx_enet_stop(struct bna_tx *tx)
{
struct bfi_enet_req *req = &tx->bfi_enet_cmd.req;
bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
BFI_ENET_H2I_TX_CFG_CLR_REQ, 0, tx->rid);
req->mh.num_entries = htons(
bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_req)));
bfa_msgq_cmd_set(&tx->msgq_cmd, NULL, NULL, sizeof(struct bfi_enet_req),
&req->mh);
bfa_msgq_cmd_post(&tx->bna->msgq, &tx->msgq_cmd);
}
static void
bna_tx_enet_stop(struct bna_tx *tx)
{
struct bna_txq *txq;
struct list_head *qe;
/* Stop IB */
list_for_each(qe, &tx->txq_q) {
txq = (struct bna_txq *)qe;
bna_ib_stop(tx->bna, &txq->ib);
}
bna_bfi_tx_enet_stop(tx);
}
static void
bna_txq_qpt_setup(struct bna_txq *txq, int page_count, int page_size,
struct bna_mem_descr *qpt_mem,
struct bna_mem_descr *swqpt_mem,
struct bna_mem_descr *page_mem)
{
u8 *kva;
u64 dma;
struct bna_dma_addr bna_dma;
int i;
txq->qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
txq->qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
txq->qpt.kv_qpt_ptr = qpt_mem->kva;
txq->qpt.page_count = page_count;
txq->qpt.page_size = page_size;
txq->tcb->sw_qpt = (void **) swqpt_mem->kva;
txq->tcb->sw_q = page_mem->kva;
kva = page_mem->kva;
BNA_GET_DMA_ADDR(&page_mem->dma, dma);
for (i = 0; i < page_count; i++) {
txq->tcb->sw_qpt[i] = kva;
kva += PAGE_SIZE;
BNA_SET_DMA_ADDR(dma, &bna_dma);
((struct bna_dma_addr *)txq->qpt.kv_qpt_ptr)[i].lsb =
bna_dma.lsb;
((struct bna_dma_addr *)txq->qpt.kv_qpt_ptr)[i].msb =
bna_dma.msb;
dma += PAGE_SIZE;
}
}
static struct bna_tx *
bna_tx_get(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
{
struct list_head *qe = NULL;
struct bna_tx *tx = NULL;
if (list_empty(&tx_mod->tx_free_q))
return NULL;
if (type == BNA_TX_T_REGULAR) {
bfa_q_deq(&tx_mod->tx_free_q, &qe);
} else {
bfa_q_deq_tail(&tx_mod->tx_free_q, &qe);
}
tx = (struct bna_tx *)qe;
bfa_q_qe_init(&tx->qe);
tx->type = type;
return tx;
}
static void
bna_tx_free(struct bna_tx *tx)
{
struct bna_tx_mod *tx_mod = &tx->bna->tx_mod;
struct bna_txq *txq;
struct list_head *prev_qe;
struct list_head *qe;
while (!list_empty(&tx->txq_q)) {
bfa_q_deq(&tx->txq_q, &txq);
bfa_q_qe_init(&txq->qe);
txq->tcb = NULL;
txq->tx = NULL;
list_add_tail(&txq->qe, &tx_mod->txq_free_q);
}
list_for_each(qe, &tx_mod->tx_active_q) {
if (qe == &tx->qe) {
list_del(&tx->qe);
bfa_q_qe_init(&tx->qe);
break;
}
}
tx->bna = NULL;
tx->priv = NULL;
prev_qe = NULL;
list_for_each(qe, &tx_mod->tx_free_q) {
if (((struct bna_tx *)qe)->rid < tx->rid)
prev_qe = qe;
else {
break;
}
}
if (prev_qe == NULL) {
/* This is the first entry */
bfa_q_enq_head(&tx_mod->tx_free_q, &tx->qe);
} else if (bfa_q_next(prev_qe) == &tx_mod->tx_free_q) {
/* This is the last entry */
list_add_tail(&tx->qe, &tx_mod->tx_free_q);
} else {
/* Somewhere in the middle */
bfa_q_next(&tx->qe) = bfa_q_next(prev_qe);
bfa_q_prev(&tx->qe) = prev_qe;
bfa_q_next(prev_qe) = &tx->qe;
bfa_q_prev(bfa_q_next(&tx->qe)) = &tx->qe;
}
}
static void
bna_tx_start(struct bna_tx *tx)
{
tx->flags |= BNA_TX_F_ENET_STARTED;
if (tx->flags & BNA_TX_F_ENABLED)
bfa_fsm_send_event(tx, TX_E_START);
}
static void
bna_tx_stop(struct bna_tx *tx)
{
tx->stop_cbfn = bna_tx_mod_cb_tx_stopped;
tx->stop_cbarg = &tx->bna->tx_mod;
tx->flags &= ~BNA_TX_F_ENET_STARTED;
bfa_fsm_send_event(tx, TX_E_STOP);
}
static void
bna_tx_fail(struct bna_tx *tx)
{
tx->flags &= ~BNA_TX_F_ENET_STARTED;
bfa_fsm_send_event(tx, TX_E_FAIL);
}
void
bna_bfi_tx_enet_start_rsp(struct bna_tx *tx, struct bfi_msgq_mhdr *msghdr)
{
struct bfi_enet_tx_cfg_rsp *cfg_rsp = &tx->bfi_enet_cmd.cfg_rsp;
struct bna_txq *txq = NULL;
struct list_head *qe;
int i;
bfa_msgq_rsp_copy(&tx->bna->msgq, (u8 *)cfg_rsp,
sizeof(struct bfi_enet_tx_cfg_rsp));
tx->hw_id = cfg_rsp->hw_id;
for (i = 0, qe = bfa_q_first(&tx->txq_q);
i < tx->num_txq; i++, qe = bfa_q_next(qe)) {
txq = (struct bna_txq *)qe;
/* Setup doorbells */
txq->tcb->i_dbell->doorbell_addr =
tx->bna->pcidev.pci_bar_kva
+ ntohl(cfg_rsp->q_handles[i].i_dbell);
txq->tcb->q_dbell =
tx->bna->pcidev.pci_bar_kva
+ ntohl(cfg_rsp->q_handles[i].q_dbell);
txq->hw_id = cfg_rsp->q_handles[i].hw_qid;
/* Initialize producer/consumer indexes */
(*txq->tcb->hw_consumer_index) = 0;
txq->tcb->producer_index = txq->tcb->consumer_index = 0;
}
bfa_fsm_send_event(tx, TX_E_STARTED);
}
void
bna_bfi_tx_enet_stop_rsp(struct bna_tx *tx, struct bfi_msgq_mhdr *msghdr)
{
bfa_fsm_send_event(tx, TX_E_STOPPED);
}
void
bna_bfi_bw_update_aen(struct bna_tx_mod *tx_mod)
{
struct bna_tx *tx;
struct list_head *qe;
list_for_each(qe, &tx_mod->tx_active_q) {
tx = (struct bna_tx *)qe;
bfa_fsm_send_event(tx, TX_E_BW_UPDATE);
}
}
void
bna_tx_res_req(int num_txq, int txq_depth, struct bna_res_info *res_info)
{
u32 q_size;
u32 page_count;
struct bna_mem_info *mem_info;
res_info[BNA_TX_RES_MEM_T_TCB].res_type = BNA_RES_T_MEM;
mem_info = &res_info[BNA_TX_RES_MEM_T_TCB].res_u.mem_info;
mem_info->mem_type = BNA_MEM_T_KVA;
mem_info->len = sizeof(struct bna_tcb);
mem_info->num = num_txq;
q_size = txq_depth * BFI_TXQ_WI_SIZE;
q_size = ALIGN(q_size, PAGE_SIZE);
page_count = q_size >> PAGE_SHIFT;
res_info[BNA_TX_RES_MEM_T_QPT].res_type = BNA_RES_T_MEM;
mem_info = &res_info[BNA_TX_RES_MEM_T_QPT].res_u.mem_info;
mem_info->mem_type = BNA_MEM_T_DMA;
mem_info->len = page_count * sizeof(struct bna_dma_addr);
mem_info->num = num_txq;
res_info[BNA_TX_RES_MEM_T_SWQPT].res_type = BNA_RES_T_MEM;
mem_info = &res_info[BNA_TX_RES_MEM_T_SWQPT].res_u.mem_info;
mem_info->mem_type = BNA_MEM_T_KVA;
mem_info->len = page_count * sizeof(void *);
mem_info->num = num_txq;
res_info[BNA_TX_RES_MEM_T_PAGE].res_type = BNA_RES_T_MEM;
mem_info = &res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info;
mem_info->mem_type = BNA_MEM_T_DMA;
mem_info->len = PAGE_SIZE * page_count;
mem_info->num = num_txq;
res_info[BNA_TX_RES_MEM_T_IBIDX].res_type = BNA_RES_T_MEM;
mem_info = &res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info;
mem_info->mem_type = BNA_MEM_T_DMA;
mem_info->len = BFI_IBIDX_SIZE;
mem_info->num = num_txq;
res_info[BNA_TX_RES_INTR_T_TXCMPL].res_type = BNA_RES_T_INTR;
res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info.intr_type =
BNA_INTR_T_MSIX;
res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info.num = num_txq;
}
struct bna_tx *
bna_tx_create(struct bna *bna, struct bnad *bnad,
struct bna_tx_config *tx_cfg,
const struct bna_tx_event_cbfn *tx_cbfn,
struct bna_res_info *res_info, void *priv)
{
struct bna_intr_info *intr_info;
struct bna_tx_mod *tx_mod = &bna->tx_mod;
struct bna_tx *tx;
struct bna_txq *txq;
struct list_head *qe;
int page_count;
int i;
intr_info = &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
page_count = (res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info.len) /
PAGE_SIZE;
/**
* Get resources
*/
if ((intr_info->num != 1) && (intr_info->num != tx_cfg->num_txq))
return NULL;
/* Tx */
tx = bna_tx_get(tx_mod, tx_cfg->tx_type);
if (!tx)
return NULL;
tx->bna = bna;
tx->priv = priv;
/* TxQs */
INIT_LIST_HEAD(&tx->txq_q);
for (i = 0; i < tx_cfg->num_txq; i++) {
if (list_empty(&tx_mod->txq_free_q))
goto err_return;
bfa_q_deq(&tx_mod->txq_free_q, &txq);
bfa_q_qe_init(&txq->qe);
list_add_tail(&txq->qe, &tx->txq_q);
txq->tx = tx;
}
/*
* Initialize
*/
/* Tx */
tx->tcb_setup_cbfn = tx_cbfn->tcb_setup_cbfn;
tx->tcb_destroy_cbfn = tx_cbfn->tcb_destroy_cbfn;
/* Following callbacks are mandatory */
tx->tx_stall_cbfn = tx_cbfn->tx_stall_cbfn;
tx->tx_resume_cbfn = tx_cbfn->tx_resume_cbfn;
tx->tx_cleanup_cbfn = tx_cbfn->tx_cleanup_cbfn;
list_add_tail(&tx->qe, &tx_mod->tx_active_q);
tx->num_txq = tx_cfg->num_txq;
tx->flags = 0;
if (tx->bna->tx_mod.flags & BNA_TX_MOD_F_ENET_STARTED) {
switch (tx->type) {
case BNA_TX_T_REGULAR:
if (!(tx->bna->tx_mod.flags &
BNA_TX_MOD_F_ENET_LOOPBACK))
tx->flags |= BNA_TX_F_ENET_STARTED;
break;
case BNA_TX_T_LOOPBACK:
if (tx->bna->tx_mod.flags & BNA_TX_MOD_F_ENET_LOOPBACK)
tx->flags |= BNA_TX_F_ENET_STARTED;
break;
}
}
/* TxQ */
i = 0;
list_for_each(qe, &tx->txq_q) {
txq = (struct bna_txq *)qe;
txq->tcb = (struct bna_tcb *)
res_info[BNA_TX_RES_MEM_T_TCB].res_u.mem_info.mdl[i].kva;
txq->tx_packets = 0;
txq->tx_bytes = 0;
/* IB */
txq->ib.ib_seg_host_addr.lsb =
res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.lsb;
txq->ib.ib_seg_host_addr.msb =
res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.msb;
txq->ib.ib_seg_host_addr_kva =
res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].kva;
txq->ib.intr_type = intr_info->intr_type;
txq->ib.intr_vector = (intr_info->num == 1) ?
intr_info->idl[0].vector :
intr_info->idl[i].vector;
if (intr_info->intr_type == BNA_INTR_T_INTX)
txq->ib.intr_vector = (1 << txq->ib.intr_vector);
txq->ib.coalescing_timeo = tx_cfg->coalescing_timeo;
txq->ib.interpkt_timeo = BFI_TX_INTERPKT_TIMEO;
txq->ib.interpkt_count = BFI_TX_INTERPKT_COUNT;
/* TCB */
txq->tcb->q_depth = tx_cfg->txq_depth;
txq->tcb->unmap_q = (void *)
res_info[BNA_TX_RES_MEM_T_UNMAPQ].res_u.mem_info.mdl[i].kva;
txq->tcb->hw_consumer_index =
(u32 *)txq->ib.ib_seg_host_addr_kva;
txq->tcb->i_dbell = &txq->ib.door_bell;
txq->tcb->intr_type = txq->ib.intr_type;
txq->tcb->intr_vector = txq->ib.intr_vector;
txq->tcb->txq = txq;
txq->tcb->bnad = bnad;
txq->tcb->id = i;
/* QPT, SWQPT, Pages */
bna_txq_qpt_setup(txq, page_count, PAGE_SIZE,
&res_info[BNA_TX_RES_MEM_T_QPT].res_u.mem_info.mdl[i],
&res_info[BNA_TX_RES_MEM_T_SWQPT].res_u.mem_info.mdl[i],
&res_info[BNA_TX_RES_MEM_T_PAGE].
res_u.mem_info.mdl[i]);
/* Callback to bnad for setting up TCB */
if (tx->tcb_setup_cbfn)
(tx->tcb_setup_cbfn)(bna->bnad, txq->tcb);
if (tx_cfg->num_txq == BFI_TX_MAX_PRIO)
txq->priority = txq->tcb->id;
else
txq->priority = tx_mod->default_prio;
i++;
}
tx->txf_vlan_id = 0;
bfa_fsm_set_state(tx, bna_tx_sm_stopped);
tx_mod->rid_mask |= (1 << tx->rid);
return tx;
err_return:
bna_tx_free(tx);
return NULL;
}
void
bna_tx_destroy(struct bna_tx *tx)
{
struct bna_txq *txq;
struct list_head *qe;
list_for_each(qe, &tx->txq_q) {
txq = (struct bna_txq *)qe;
if (tx->tcb_destroy_cbfn)
(tx->tcb_destroy_cbfn)(tx->bna->bnad, txq->tcb);
}
tx->bna->tx_mod.rid_mask &= ~(1 << tx->rid);
bna_tx_free(tx);
}
void
bna_tx_enable(struct bna_tx *tx)
{
if (tx->fsm != (bfa_sm_t)bna_tx_sm_stopped)
return;
tx->flags |= BNA_TX_F_ENABLED;
if (tx->flags & BNA_TX_F_ENET_STARTED)
bfa_fsm_send_event(tx, TX_E_START);
}
void
bna_tx_disable(struct bna_tx *tx, enum bna_cleanup_type type,
void (*cbfn)(void *, struct bna_tx *))
{
if (type == BNA_SOFT_CLEANUP) {
(*cbfn)(tx->bna->bnad, tx);
return;
}
tx->stop_cbfn = cbfn;
tx->stop_cbarg = tx->bna->bnad;
tx->flags &= ~BNA_TX_F_ENABLED;
bfa_fsm_send_event(tx, TX_E_STOP);
}
void
bna_tx_cleanup_complete(struct bna_tx *tx)
{
bfa_fsm_send_event(tx, TX_E_CLEANUP_DONE);
}
static void
bna_tx_mod_cb_tx_stopped(void *arg, struct bna_tx *tx)
{
struct bna_tx_mod *tx_mod = (struct bna_tx_mod *)arg;
bfa_wc_down(&tx_mod->tx_stop_wc);
}
static void
bna_tx_mod_cb_tx_stopped_all(void *arg)
{
struct bna_tx_mod *tx_mod = (struct bna_tx_mod *)arg;
if (tx_mod->stop_cbfn)
tx_mod->stop_cbfn(&tx_mod->bna->enet);
tx_mod->stop_cbfn = NULL;
}
void
bna_tx_mod_init(struct bna_tx_mod *tx_mod, struct bna *bna,
struct bna_res_info *res_info)
{
int i;
tx_mod->bna = bna;
tx_mod->flags = 0;
tx_mod->tx = (struct bna_tx *)
res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.mdl[0].kva;
tx_mod->txq = (struct bna_txq *)
res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.mdl[0].kva;
INIT_LIST_HEAD(&tx_mod->tx_free_q);
INIT_LIST_HEAD(&tx_mod->tx_active_q);
INIT_LIST_HEAD(&tx_mod->txq_free_q);
for (i = 0; i < bna->ioceth.attr.num_txq; i++) {
tx_mod->tx[i].rid = i;
bfa_q_qe_init(&tx_mod->tx[i].qe);
list_add_tail(&tx_mod->tx[i].qe, &tx_mod->tx_free_q);
bfa_q_qe_init(&tx_mod->txq[i].qe);
list_add_tail(&tx_mod->txq[i].qe, &tx_mod->txq_free_q);
}
tx_mod->prio_map = BFI_TX_PRIO_MAP_ALL;
tx_mod->default_prio = 0;
tx_mod->iscsi_over_cee = BNA_STATUS_T_DISABLED;
tx_mod->iscsi_prio = -1;
}
void
bna_tx_mod_uninit(struct bna_tx_mod *tx_mod)
{
struct list_head *qe;
int i;
i = 0;
list_for_each(qe, &tx_mod->tx_free_q)
i++;
i = 0;
list_for_each(qe, &tx_mod->txq_free_q)
i++;
tx_mod->bna = NULL;
}
void
bna_tx_mod_start(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
{
struct bna_tx *tx;
struct list_head *qe;
tx_mod->flags |= BNA_TX_MOD_F_ENET_STARTED;
if (type == BNA_TX_T_LOOPBACK)
tx_mod->flags |= BNA_TX_MOD_F_ENET_LOOPBACK;
list_for_each(qe, &tx_mod->tx_active_q) {
tx = (struct bna_tx *)qe;
if (tx->type == type)
bna_tx_start(tx);
}
}
void
bna_tx_mod_stop(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
{
struct bna_tx *tx;
struct list_head *qe;
tx_mod->flags &= ~BNA_TX_MOD_F_ENET_STARTED;
tx_mod->flags &= ~BNA_TX_MOD_F_ENET_LOOPBACK;
tx_mod->stop_cbfn = bna_enet_cb_tx_stopped;
bfa_wc_init(&tx_mod->tx_stop_wc, bna_tx_mod_cb_tx_stopped_all, tx_mod);
list_for_each(qe, &tx_mod->tx_active_q) {
tx = (struct bna_tx *)qe;
if (tx->type == type) {
bfa_wc_up(&tx_mod->tx_stop_wc);
bna_tx_stop(tx);
}
}
bfa_wc_wait(&tx_mod->tx_stop_wc);
}
void
bna_tx_mod_fail(struct bna_tx_mod *tx_mod)
{
struct bna_tx *tx;
struct list_head *qe;
tx_mod->flags &= ~BNA_TX_MOD_F_ENET_STARTED;
tx_mod->flags &= ~BNA_TX_MOD_F_ENET_LOOPBACK;
list_for_each(qe, &tx_mod->tx_active_q) {
tx = (struct bna_tx *)qe;
bna_tx_fail(tx);
}
}
void
bna_tx_coalescing_timeo_set(struct bna_tx *tx, int coalescing_timeo)
{
struct bna_txq *txq;
struct list_head *qe;
list_for_each(qe, &tx->txq_q) {
txq = (struct bna_txq *)qe;
bna_ib_coalescing_timeo_set(&txq->ib, coalescing_timeo);
}
}
| gpl-2.0 |
BobZhome/android_kernel_gelato | arch/sh/boards/mach-snapgear/io.c | 1473 | 2630 | /*
* Copyright (C) 2002 David McCullough <davidm@snapgear.com>
* Copyright (C) 2001 Ian da Silva, Jeremy Siegel
* Based largely on io_se.c.
*
* I/O routine for Hitachi 7751 SolutionEngine.
*
* Initial version only to support LAN access; some
* placeholder code from io_se.c left in with the
* expectation of later SuperIO and PCMCIA access.
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <asm/io.h>
#include <asm/addrspace.h>
#ifdef CONFIG_SH_SECUREEDGE5410
unsigned short secureedge5410_ioport;
#endif
static inline volatile __u16 *port2adr(unsigned int port)
{
maybebadio((unsigned long)port);
return (volatile __u16*)port;
}
/*
* General outline: remap really low stuff [eventually] to SuperIO,
* stuff in PCI IO space (at or above window at pci.h:PCIBIOS_MIN_IO)
* is mapped through the PCI IO window. Stuff with high bits (PXSEG)
* should be way beyond the window, and is used w/o translation for
* compatibility.
*/
unsigned char snapgear_inb(unsigned long port)
{
if (PXSEG(port))
return *(volatile unsigned char *)port;
else
return (*port2adr(port)) & 0xff;
}
unsigned char snapgear_inb_p(unsigned long port)
{
unsigned char v;
if (PXSEG(port))
v = *(volatile unsigned char *)port;
else
v = (*port2adr(port))&0xff;
ctrl_delay();
return v;
}
unsigned short snapgear_inw(unsigned long port)
{
if (PXSEG(port))
return *(volatile unsigned short *)port;
else if (port >= 0x2000)
return *port2adr(port);
else
maybebadio(port);
return 0;
}
unsigned int snapgear_inl(unsigned long port)
{
if (PXSEG(port))
return *(volatile unsigned long *)port;
else if (port >= 0x2000)
return *port2adr(port);
else
maybebadio(port);
return 0;
}
void snapgear_outb(unsigned char value, unsigned long port)
{
if (PXSEG(port))
*(volatile unsigned char *)port = value;
else
*(port2adr(port)) = value;
}
void snapgear_outb_p(unsigned char value, unsigned long port)
{
if (PXSEG(port))
*(volatile unsigned char *)port = value;
else
*(port2adr(port)) = value;
ctrl_delay();
}
void snapgear_outw(unsigned short value, unsigned long port)
{
if (PXSEG(port))
*(volatile unsigned short *)port = value;
else if (port >= 0x2000)
*port2adr(port) = value;
else
maybebadio(port);
}
void snapgear_outl(unsigned int value, unsigned long port)
{
if (PXSEG(port))
*(volatile unsigned long *)port = value;
else
maybebadio(port);
}
void snapgear_insl(unsigned long port, void *addr, unsigned long count)
{
maybebadio(port);
}
void snapgear_outsl(unsigned long port, const void *addr, unsigned long count)
{
maybebadio(port);
}
| gpl-2.0 |
LuisCastillo98/FenomenalMod_alto45_kernel | drivers/net/usb/r8152.c | 1985 | 42253 | /*
* Copyright (c) 2013 Realtek Semiconductor Corp. All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
*/
#include <linux/init.h>
#include <linux/signal.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/version.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/mii.h>
#include <linux/ethtool.h>
#include <linux/usb.h>
#include <linux/crc32.h>
#include <linux/if_vlan.h>
#include <linux/uaccess.h>
/* Version Information */
#define DRIVER_VERSION "v1.0.0 (2013/05/03)"
#define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
#define DRIVER_DESC "Realtek RTL8152 Based USB 2.0 Ethernet Adapters"
#define MODULENAME "r8152"
#define R8152_PHY_ID 32
#define PLA_IDR 0xc000
#define PLA_RCR 0xc010
#define PLA_RMS 0xc016
#define PLA_RXFIFO_CTRL0 0xc0a0
#define PLA_RXFIFO_CTRL1 0xc0a4
#define PLA_RXFIFO_CTRL2 0xc0a8
#define PLA_FMC 0xc0b4
#define PLA_CFG_WOL 0xc0b6
#define PLA_MAR 0xcd00
#define PAL_BDC_CR 0xd1a0
#define PLA_LEDSEL 0xdd90
#define PLA_LED_FEATURE 0xdd92
#define PLA_PHYAR 0xde00
#define PLA_GPHY_INTR_IMR 0xe022
#define PLA_EEE_CR 0xe040
#define PLA_EEEP_CR 0xe080
#define PLA_MAC_PWR_CTRL 0xe0c0
#define PLA_TCR0 0xe610
#define PLA_TCR1 0xe612
#define PLA_TXFIFO_CTRL 0xe618
#define PLA_RSTTELLY 0xe800
#define PLA_CR 0xe813
#define PLA_CRWECR 0xe81c
#define PLA_CONFIG5 0xe822
#define PLA_PHY_PWR 0xe84c
#define PLA_OOB_CTRL 0xe84f
#define PLA_CPCR 0xe854
#define PLA_MISC_0 0xe858
#define PLA_MISC_1 0xe85a
#define PLA_OCP_GPHY_BASE 0xe86c
#define PLA_TELLYCNT 0xe890
#define PLA_SFF_STS_7 0xe8de
#define PLA_PHYSTATUS 0xe908
#define PLA_BP_BA 0xfc26
#define PLA_BP_0 0xfc28
#define PLA_BP_1 0xfc2a
#define PLA_BP_2 0xfc2c
#define PLA_BP_3 0xfc2e
#define PLA_BP_4 0xfc30
#define PLA_BP_5 0xfc32
#define PLA_BP_6 0xfc34
#define PLA_BP_7 0xfc36
#define USB_DEV_STAT 0xb808
#define USB_USB_CTRL 0xd406
#define USB_PHY_CTRL 0xd408
#define USB_TX_AGG 0xd40a
#define USB_RX_BUF_TH 0xd40c
#define USB_USB_TIMER 0xd428
#define USB_PM_CTRL_STATUS 0xd432
#define USB_TX_DMA 0xd434
#define USB_UPS_CTRL 0xd800
#define USB_BP_BA 0xfc26
#define USB_BP_0 0xfc28
#define USB_BP_1 0xfc2a
#define USB_BP_2 0xfc2c
#define USB_BP_3 0xfc2e
#define USB_BP_4 0xfc30
#define USB_BP_5 0xfc32
#define USB_BP_6 0xfc34
#define USB_BP_7 0xfc36
/* OCP Registers */
#define OCP_ALDPS_CONFIG 0x2010
#define OCP_EEE_CONFIG1 0x2080
#define OCP_EEE_CONFIG2 0x2092
#define OCP_EEE_CONFIG3 0x2094
#define OCP_EEE_AR 0xa41a
#define OCP_EEE_DATA 0xa41c
/* PLA_RCR */
#define RCR_AAP 0x00000001
#define RCR_APM 0x00000002
#define RCR_AM 0x00000004
#define RCR_AB 0x00000008
#define RCR_ACPT_ALL (RCR_AAP | RCR_APM | RCR_AM | RCR_AB)
/* PLA_RXFIFO_CTRL0 */
#define RXFIFO_THR1_NORMAL 0x00080002
#define RXFIFO_THR1_OOB 0x01800003
/* PLA_RXFIFO_CTRL1 */
#define RXFIFO_THR2_FULL 0x00000060
#define RXFIFO_THR2_HIGH 0x00000038
#define RXFIFO_THR2_OOB 0x0000004a
/* PLA_RXFIFO_CTRL2 */
#define RXFIFO_THR3_FULL 0x00000078
#define RXFIFO_THR3_HIGH 0x00000048
#define RXFIFO_THR3_OOB 0x0000005a
/* PLA_TXFIFO_CTRL */
#define TXFIFO_THR_NORMAL 0x00400008
/* PLA_FMC */
#define FMC_FCR_MCU_EN 0x0001
/* PLA_EEEP_CR */
#define EEEP_CR_EEEP_TX 0x0002
/* PLA_TCR0 */
#define TCR0_TX_EMPTY 0x0800
#define TCR0_AUTO_FIFO 0x0080
/* PLA_TCR1 */
#define VERSION_MASK 0x7cf0
/* PLA_CR */
#define CR_RST 0x10
#define CR_RE 0x08
#define CR_TE 0x04
/* PLA_CRWECR */
#define CRWECR_NORAML 0x00
#define CRWECR_CONFIG 0xc0
/* PLA_OOB_CTRL */
#define NOW_IS_OOB 0x80
#define TXFIFO_EMPTY 0x20
#define RXFIFO_EMPTY 0x10
#define LINK_LIST_READY 0x02
#define DIS_MCU_CLROOB 0x01
#define FIFO_EMPTY (TXFIFO_EMPTY | RXFIFO_EMPTY)
/* PLA_MISC_1 */
#define RXDY_GATED_EN 0x0008
/* PLA_SFF_STS_7 */
#define RE_INIT_LL 0x8000
#define MCU_BORW_EN 0x4000
/* PLA_CPCR */
#define CPCR_RX_VLAN 0x0040
/* PLA_CFG_WOL */
#define MAGIC_EN 0x0001
/* PAL_BDC_CR */
#define ALDPS_PROXY_MODE 0x0001
/* PLA_CONFIG5 */
#define LAN_WAKE_EN 0x0002
/* PLA_LED_FEATURE */
#define LED_MODE_MASK 0x0700
/* PLA_PHY_PWR */
#define TX_10M_IDLE_EN 0x0080
#define PFM_PWM_SWITCH 0x0040
/* PLA_MAC_PWR_CTRL */
#define D3_CLK_GATED_EN 0x00004000
#define MCU_CLK_RATIO 0x07010f07
#define MCU_CLK_RATIO_MASK 0x0f0f0f0f
/* PLA_GPHY_INTR_IMR */
#define GPHY_STS_MSK 0x0001
#define SPEED_DOWN_MSK 0x0002
#define SPDWN_RXDV_MSK 0x0004
#define SPDWN_LINKCHG_MSK 0x0008
/* PLA_PHYAR */
#define PHYAR_FLAG 0x80000000
/* PLA_EEE_CR */
#define EEE_RX_EN 0x0001
#define EEE_TX_EN 0x0002
/* USB_DEV_STAT */
#define STAT_SPEED_MASK 0x0006
#define STAT_SPEED_HIGH 0x0000
#define STAT_SPEED_FULL 0x0001
/* USB_TX_AGG */
#define TX_AGG_MAX_THRESHOLD 0x03
/* USB_RX_BUF_TH */
#define RX_BUF_THR 0x7a120180
/* USB_TX_DMA */
#define TEST_MODE_DISABLE 0x00000001
#define TX_SIZE_ADJUST1 0x00000100
/* USB_UPS_CTRL */
#define POWER_CUT 0x0100
/* USB_PM_CTRL_STATUS */
#define RWSUME_INDICATE 0x0001
/* USB_USB_CTRL */
#define RX_AGG_DISABLE 0x0010
/* OCP_ALDPS_CONFIG */
#define ENPWRSAVE 0x8000
#define ENPDNPS 0x0200
#define LINKENA 0x0100
#define DIS_SDSAVE 0x0010
/* OCP_EEE_CONFIG1 */
#define RG_TXLPI_MSK_HFDUP 0x8000
#define RG_MATCLR_EN 0x4000
#define EEE_10_CAP 0x2000
#define EEE_NWAY_EN 0x1000
#define TX_QUIET_EN 0x0200
#define RX_QUIET_EN 0x0100
#define SDRISETIME 0x0010 /* bit 4 ~ 6 */
#define RG_RXLPI_MSK_HFDUP 0x0008
#define SDFALLTIME 0x0007 /* bit 0 ~ 2 */
/* OCP_EEE_CONFIG2 */
#define RG_LPIHYS_NUM 0x7000 /* bit 12 ~ 15 */
#define RG_DACQUIET_EN 0x0400
#define RG_LDVQUIET_EN 0x0200
#define RG_CKRSEL 0x0020
#define RG_EEEPRG_EN 0x0010
/* OCP_EEE_CONFIG3 */
#define FST_SNR_EYE_R 0x1500 /* bit 7 ~ 15 */
#define RG_LFS_SEL 0x0060 /* bit 6 ~ 5 */
#define MSK_PH 0x0006 /* bit 0 ~ 3 */
/* OCP_EEE_AR */
/* bit[15:14] function */
#define FUN_ADDR 0x0000
#define FUN_DATA 0x4000
/* bit[4:0] device addr */
#define DEVICE_ADDR 0x0007
/* OCP_EEE_DATA */
#define EEE_ADDR 0x003C
#define EEE_DATA 0x0002
enum rtl_register_content {
_100bps = 0x08,
_10bps = 0x04,
LINK_STATUS = 0x02,
FULL_DUP = 0x01,
};
#define RTL8152_REQT_READ 0xc0
#define RTL8152_REQT_WRITE 0x40
#define RTL8152_REQ_GET_REGS 0x05
#define RTL8152_REQ_SET_REGS 0x05
#define BYTE_EN_DWORD 0xff
#define BYTE_EN_WORD 0x33
#define BYTE_EN_BYTE 0x11
#define BYTE_EN_SIX_BYTES 0x3f
#define BYTE_EN_START_MASK 0x0f
#define BYTE_EN_END_MASK 0xf0
#define RTL8152_RMS (VLAN_ETH_FRAME_LEN + VLAN_HLEN)
#define RTL8152_TX_TIMEOUT (HZ)
/* rtl8152 flags */
enum rtl8152_flags {
RTL8152_UNPLUG = 0,
RX_URB_FAIL,
RTL8152_SET_RX_MODE,
WORK_ENABLE
};
/* Define these values to match your device */
#define VENDOR_ID_REALTEK 0x0bda
#define PRODUCT_ID_RTL8152 0x8152
#define MCU_TYPE_PLA 0x0100
#define MCU_TYPE_USB 0x0000
struct rx_desc {
u32 opts1;
#define RX_LEN_MASK 0x7fff
u32 opts2;
u32 opts3;
u32 opts4;
u32 opts5;
u32 opts6;
};
struct tx_desc {
u32 opts1;
#define TX_FS (1 << 31) /* First segment of a packet */
#define TX_LS (1 << 30) /* Final segment of a packet */
#define TX_LEN_MASK 0xffff
u32 opts2;
};
struct r8152 {
unsigned long flags;
struct usb_device *udev;
struct tasklet_struct tl;
struct net_device *netdev;
struct urb *rx_urb, *tx_urb;
struct sk_buff *tx_skb, *rx_skb;
struct delayed_work schedule;
struct mii_if_info mii;
u32 msg_enable;
u16 ocp_base;
u8 version;
u8 speed;
};
enum rtl_version {
RTL_VER_UNKNOWN = 0,
RTL_VER_01,
RTL_VER_02
};
/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
* The RTL chips use a 64 element hash table based on the Ethernet CRC.
*/
static const int multicast_filter_limit = 32;
static
int get_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data)
{
return usb_control_msg(tp->udev, usb_rcvctrlpipe(tp->udev, 0),
RTL8152_REQ_GET_REGS, RTL8152_REQT_READ,
value, index, data, size, 500);
}
static
int set_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data)
{
return usb_control_msg(tp->udev, usb_sndctrlpipe(tp->udev, 0),
RTL8152_REQ_SET_REGS, RTL8152_REQT_WRITE,
value, index, data, size, 500);
}
static int generic_ocp_read(struct r8152 *tp, u16 index, u16 size,
void *data, u16 type)
{
u16 limit = 64;
int ret = 0;
if (test_bit(RTL8152_UNPLUG, &tp->flags))
return -ENODEV;
/* both size and indix must be 4 bytes align */
if ((size & 3) || !size || (index & 3) || !data)
return -EPERM;
if ((u32)index + (u32)size > 0xffff)
return -EPERM;
while (size) {
if (size > limit) {
ret = get_registers(tp, index, type, limit, data);
if (ret < 0)
break;
index += limit;
data += limit;
size -= limit;
} else {
ret = get_registers(tp, index, type, size, data);
if (ret < 0)
break;
index += size;
data += size;
size = 0;
break;
}
}
return ret;
}
static int generic_ocp_write(struct r8152 *tp, u16 index, u16 byteen,
u16 size, void *data, u16 type)
{
int ret;
u16 byteen_start, byteen_end, byen;
u16 limit = 512;
if (test_bit(RTL8152_UNPLUG, &tp->flags))
return -ENODEV;
/* both size and indix must be 4 bytes align */
if ((size & 3) || !size || (index & 3) || !data)
return -EPERM;
if ((u32)index + (u32)size > 0xffff)
return -EPERM;
byteen_start = byteen & BYTE_EN_START_MASK;
byteen_end = byteen & BYTE_EN_END_MASK;
byen = byteen_start | (byteen_start << 4);
ret = set_registers(tp, index, type | byen, 4, data);
if (ret < 0)
goto error1;
index += 4;
data += 4;
size -= 4;
if (size) {
size -= 4;
while (size) {
if (size > limit) {
ret = set_registers(tp, index,
type | BYTE_EN_DWORD,
limit, data);
if (ret < 0)
goto error1;
index += limit;
data += limit;
size -= limit;
} else {
ret = set_registers(tp, index,
type | BYTE_EN_DWORD,
size, data);
if (ret < 0)
goto error1;
index += size;
data += size;
size = 0;
break;
}
}
byen = byteen_end | (byteen_end >> 4);
ret = set_registers(tp, index, type | byen, 4, data);
if (ret < 0)
goto error1;
}
error1:
return ret;
}
static inline
int pla_ocp_read(struct r8152 *tp, u16 index, u16 size, void *data)
{
return generic_ocp_read(tp, index, size, data, MCU_TYPE_PLA);
}
static inline
int pla_ocp_write(struct r8152 *tp, u16 index, u16 byteen, u16 size, void *data)
{
return generic_ocp_write(tp, index, byteen, size, data, MCU_TYPE_PLA);
}
static inline
int usb_ocp_read(struct r8152 *tp, u16 index, u16 size, void *data)
{
return generic_ocp_read(tp, index, size, data, MCU_TYPE_USB);
}
static inline
int usb_ocp_write(struct r8152 *tp, u16 index, u16 byteen, u16 size, void *data)
{
return generic_ocp_write(tp, index, byteen, size, data, MCU_TYPE_USB);
}
static u32 ocp_read_dword(struct r8152 *tp, u16 type, u16 index)
{
u32 data;
if (type == MCU_TYPE_PLA)
pla_ocp_read(tp, index, sizeof(data), &data);
else
usb_ocp_read(tp, index, sizeof(data), &data);
return __le32_to_cpu(data);
}
static void ocp_write_dword(struct r8152 *tp, u16 type, u16 index, u32 data)
{
if (type == MCU_TYPE_PLA)
pla_ocp_write(tp, index, BYTE_EN_DWORD, sizeof(data), &data);
else
usb_ocp_write(tp, index, BYTE_EN_DWORD, sizeof(data), &data);
}
static u16 ocp_read_word(struct r8152 *tp, u16 type, u16 index)
{
u32 data;
u8 shift = index & 2;
index &= ~3;
if (type == MCU_TYPE_PLA)
pla_ocp_read(tp, index, sizeof(data), &data);
else
usb_ocp_read(tp, index, sizeof(data), &data);
data = __le32_to_cpu(data);
data >>= (shift * 8);
data &= 0xffff;
return (u16)data;
}
static void ocp_write_word(struct r8152 *tp, u16 type, u16 index, u32 data)
{
u32 tmp, mask = 0xffff;
u16 byen = BYTE_EN_WORD;
u8 shift = index & 2;
data &= mask;
if (index & 2) {
byen <<= shift;
mask <<= (shift * 8);
data <<= (shift * 8);
index &= ~3;
}
if (type == MCU_TYPE_PLA)
pla_ocp_read(tp, index, sizeof(tmp), &tmp);
else
usb_ocp_read(tp, index, sizeof(tmp), &tmp);
tmp = __le32_to_cpu(tmp) & ~mask;
tmp |= data;
tmp = __cpu_to_le32(tmp);
if (type == MCU_TYPE_PLA)
pla_ocp_write(tp, index, byen, sizeof(tmp), &tmp);
else
usb_ocp_write(tp, index, byen, sizeof(tmp), &tmp);
}
static u8 ocp_read_byte(struct r8152 *tp, u16 type, u16 index)
{
u32 data;
u8 shift = index & 3;
index &= ~3;
if (type == MCU_TYPE_PLA)
pla_ocp_read(tp, index, sizeof(data), &data);
else
usb_ocp_read(tp, index, sizeof(data), &data);
data = __le32_to_cpu(data);
data >>= (shift * 8);
data &= 0xff;
return (u8)data;
}
static void ocp_write_byte(struct r8152 *tp, u16 type, u16 index, u32 data)
{
u32 tmp, mask = 0xff;
u16 byen = BYTE_EN_BYTE;
u8 shift = index & 3;
data &= mask;
if (index & 3) {
byen <<= shift;
mask <<= (shift * 8);
data <<= (shift * 8);
index &= ~3;
}
if (type == MCU_TYPE_PLA)
pla_ocp_read(tp, index, sizeof(tmp), &tmp);
else
usb_ocp_read(tp, index, sizeof(tmp), &tmp);
tmp = __le32_to_cpu(tmp) & ~mask;
tmp |= data;
tmp = __cpu_to_le32(tmp);
if (type == MCU_TYPE_PLA)
pla_ocp_write(tp, index, byen, sizeof(tmp), &tmp);
else
usb_ocp_write(tp, index, byen, sizeof(tmp), &tmp);
}
static void r8152_mdio_write(struct r8152 *tp, u32 reg_addr, u32 value)
{
u32 ocp_data;
int i;
ocp_data = PHYAR_FLAG | ((reg_addr & 0x1f) << 16) |
(value & 0xffff);
ocp_write_dword(tp, MCU_TYPE_PLA, PLA_PHYAR, ocp_data);
for (i = 20; i > 0; i--) {
udelay(25);
ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_PHYAR);
if (!(ocp_data & PHYAR_FLAG))
break;
}
udelay(20);
}
static int r8152_mdio_read(struct r8152 *tp, u32 reg_addr)
{
u32 ocp_data;
int i;
ocp_data = (reg_addr & 0x1f) << 16;
ocp_write_dword(tp, MCU_TYPE_PLA, PLA_PHYAR, ocp_data);
for (i = 20; i > 0; i--) {
udelay(25);
ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_PHYAR);
if (ocp_data & PHYAR_FLAG)
break;
}
udelay(20);
if (!(ocp_data & PHYAR_FLAG))
return -EAGAIN;
return (u16)(ocp_data & 0xffff);
}
static int read_mii_word(struct net_device *netdev, int phy_id, int reg)
{
struct r8152 *tp = netdev_priv(netdev);
if (phy_id != R8152_PHY_ID)
return -EINVAL;
return r8152_mdio_read(tp, reg);
}
static
void write_mii_word(struct net_device *netdev, int phy_id, int reg, int val)
{
struct r8152 *tp = netdev_priv(netdev);
if (phy_id != R8152_PHY_ID)
return;
r8152_mdio_write(tp, reg, val);
}
static void ocp_reg_write(struct r8152 *tp, u16 addr, u16 data)
{
u16 ocp_base, ocp_index;
ocp_base = addr & 0xf000;
if (ocp_base != tp->ocp_base) {
ocp_write_word(tp, MCU_TYPE_PLA, PLA_OCP_GPHY_BASE, ocp_base);
tp->ocp_base = ocp_base;
}
ocp_index = (addr & 0x0fff) | 0xb000;
ocp_write_word(tp, MCU_TYPE_PLA, ocp_index, data);
}
static inline void set_ethernet_addr(struct r8152 *tp)
{
struct net_device *dev = tp->netdev;
u8 *node_id;
node_id = kmalloc(sizeof(u8) * 8, GFP_KERNEL);
if (!node_id) {
netif_err(tp, probe, dev, "out of memory");
return;
}
if (pla_ocp_read(tp, PLA_IDR, sizeof(u8) * 8, node_id) < 0)
netif_notice(tp, probe, dev, "inet addr fail\n");
else {
memcpy(dev->dev_addr, node_id, dev->addr_len);
memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
}
kfree(node_id);
}
static int rtl8152_set_mac_address(struct net_device *netdev, void *p)
{
struct r8152 *tp = netdev_priv(netdev);
struct sockaddr *addr = p;
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG);
pla_ocp_write(tp, PLA_IDR, BYTE_EN_SIX_BYTES, 8, addr->sa_data);
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML);
return 0;
}
static int alloc_all_urbs(struct r8152 *tp)
{
tp->rx_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!tp->rx_urb)
return 0;
tp->tx_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!tp->tx_urb) {
usb_free_urb(tp->rx_urb);
return 0;
}
return 1;
}
static void free_all_urbs(struct r8152 *tp)
{
usb_free_urb(tp->rx_urb);
usb_free_urb(tp->tx_urb);
}
static struct net_device_stats *rtl8152_get_stats(struct net_device *dev)
{
return &dev->stats;
}
static void read_bulk_callback(struct urb *urb)
{
struct r8152 *tp;
unsigned pkt_len;
struct sk_buff *skb;
struct net_device *netdev;
struct net_device_stats *stats;
int status = urb->status;
int result;
struct rx_desc *rx_desc;
tp = urb->context;
if (!tp)
return;
if (test_bit(RTL8152_UNPLUG, &tp->flags))
return;
netdev = tp->netdev;
if (!netif_device_present(netdev))
return;
stats = rtl8152_get_stats(netdev);
switch (status) {
case 0:
break;
case -ESHUTDOWN:
set_bit(RTL8152_UNPLUG, &tp->flags);
netif_device_detach(tp->netdev);
case -ENOENT:
return; /* the urb is in unlink state */
case -ETIME:
pr_warn_ratelimited("may be reset is needed?..\n");
goto goon;
default:
pr_warn_ratelimited("Rx status %d\n", status);
goto goon;
}
/* protect against short packets (tell me why we got some?!?) */
if (urb->actual_length < sizeof(*rx_desc))
goto goon;
rx_desc = (struct rx_desc *)urb->transfer_buffer;
pkt_len = le32_to_cpu(rx_desc->opts1) & RX_LEN_MASK;
if (urb->actual_length < sizeof(struct rx_desc) + pkt_len)
goto goon;
skb = netdev_alloc_skb_ip_align(netdev, pkt_len);
if (!skb)
goto goon;
memcpy(skb->data, tp->rx_skb->data + sizeof(struct rx_desc), pkt_len);
skb_put(skb, pkt_len);
skb->protocol = eth_type_trans(skb, netdev);
netif_rx(skb);
stats->rx_packets++;
stats->rx_bytes += pkt_len;
goon:
usb_fill_bulk_urb(tp->rx_urb, tp->udev, usb_rcvbulkpipe(tp->udev, 1),
tp->rx_skb->data, RTL8152_RMS + sizeof(struct rx_desc),
(usb_complete_t)read_bulk_callback, tp);
result = usb_submit_urb(tp->rx_urb, GFP_ATOMIC);
if (result == -ENODEV) {
netif_device_detach(tp->netdev);
} else if (result) {
set_bit(RX_URB_FAIL, &tp->flags);
goto resched;
} else {
clear_bit(RX_URB_FAIL, &tp->flags);
}
return;
resched:
tasklet_schedule(&tp->tl);
}
static void rx_fixup(unsigned long data)
{
struct r8152 *tp;
int status;
tp = (struct r8152 *)data;
if (!test_bit(WORK_ENABLE, &tp->flags))
return;
status = usb_submit_urb(tp->rx_urb, GFP_ATOMIC);
if (status == -ENODEV) {
netif_device_detach(tp->netdev);
} else if (status) {
set_bit(RX_URB_FAIL, &tp->flags);
goto tlsched;
} else {
clear_bit(RX_URB_FAIL, &tp->flags);
}
return;
tlsched:
tasklet_schedule(&tp->tl);
}
static void write_bulk_callback(struct urb *urb)
{
struct r8152 *tp;
int status = urb->status;
tp = urb->context;
if (!tp)
return;
dev_kfree_skb_irq(tp->tx_skb);
if (!netif_device_present(tp->netdev))
return;
if (status)
dev_info(&urb->dev->dev, "%s: Tx status %d\n",
tp->netdev->name, status);
tp->netdev->trans_start = jiffies;
netif_wake_queue(tp->netdev);
}
static void rtl8152_tx_timeout(struct net_device *netdev)
{
struct r8152 *tp = netdev_priv(netdev);
struct net_device_stats *stats = rtl8152_get_stats(netdev);
netif_warn(tp, tx_err, netdev, "Tx timeout.\n");
usb_unlink_urb(tp->tx_urb);
stats->tx_errors++;
}
static void rtl8152_set_rx_mode(struct net_device *netdev)
{
struct r8152 *tp = netdev_priv(netdev);
if (tp->speed & LINK_STATUS)
set_bit(RTL8152_SET_RX_MODE, &tp->flags);
}
static void _rtl8152_set_rx_mode(struct net_device *netdev)
{
struct r8152 *tp = netdev_priv(netdev);
u32 tmp, *mc_filter; /* Multicast hash filter */
u32 ocp_data;
mc_filter = kmalloc(sizeof(u32) * 2, GFP_KERNEL);
if (!mc_filter) {
netif_err(tp, link, netdev, "out of memory");
return;
}
clear_bit(RTL8152_SET_RX_MODE, &tp->flags);
netif_stop_queue(netdev);
ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR);
ocp_data &= ~RCR_ACPT_ALL;
ocp_data |= RCR_AB | RCR_APM;
if (netdev->flags & IFF_PROMISC) {
/* Unconditionally log net taps. */
netif_notice(tp, link, netdev, "Promiscuous mode enabled\n");
ocp_data |= RCR_AM | RCR_AAP;
mc_filter[1] = mc_filter[0] = 0xffffffff;
} else if ((netdev_mc_count(netdev) > multicast_filter_limit) ||
(netdev->flags & IFF_ALLMULTI)) {
/* Too many to filter perfectly -- accept all multicasts. */
ocp_data |= RCR_AM;
mc_filter[1] = mc_filter[0] = 0xffffffff;
} else {
struct netdev_hw_addr *ha;
mc_filter[1] = mc_filter[0] = 0;
netdev_for_each_mc_addr(ha, netdev) {
int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
ocp_data |= RCR_AM;
}
}
tmp = mc_filter[0];
mc_filter[0] = __cpu_to_le32(swab32(mc_filter[1]));
mc_filter[1] = __cpu_to_le32(swab32(tmp));
pla_ocp_write(tp, PLA_MAR, BYTE_EN_DWORD, sizeof(u32) * 2, mc_filter);
ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data);
netif_wake_queue(netdev);
kfree(mc_filter);
}
static netdev_tx_t rtl8152_start_xmit(struct sk_buff *skb,
struct net_device *netdev)
{
struct r8152 *tp = netdev_priv(netdev);
struct net_device_stats *stats = rtl8152_get_stats(netdev);
struct tx_desc *tx_desc;
int len, res;
netif_stop_queue(netdev);
len = skb->len;
if (skb_header_cloned(skb) || skb_headroom(skb) < sizeof(*tx_desc)) {
struct sk_buff *tx_skb;
tx_skb = skb_copy_expand(skb, sizeof(*tx_desc), 0, GFP_ATOMIC);
dev_kfree_skb_any(skb);
if (!tx_skb) {
stats->tx_dropped++;
netif_wake_queue(netdev);
return NETDEV_TX_OK;
}
skb = tx_skb;
}
tx_desc = (struct tx_desc *)skb_push(skb, sizeof(*tx_desc));
memset(tx_desc, 0, sizeof(*tx_desc));
tx_desc->opts1 = cpu_to_le32((len & TX_LEN_MASK) | TX_FS | TX_LS);
tp->tx_skb = skb;
skb_tx_timestamp(skb);
usb_fill_bulk_urb(tp->tx_urb, tp->udev, usb_sndbulkpipe(tp->udev, 2),
skb->data, skb->len,
(usb_complete_t)write_bulk_callback, tp);
res = usb_submit_urb(tp->tx_urb, GFP_ATOMIC);
if (res) {
/* Can we get/handle EPIPE here? */
if (res == -ENODEV) {
netif_device_detach(tp->netdev);
} else {
netif_warn(tp, tx_err, netdev,
"failed tx_urb %d\n", res);
stats->tx_errors++;
netif_start_queue(netdev);
}
} else {
stats->tx_packets++;
stats->tx_bytes += skb->len;
}
return NETDEV_TX_OK;
}
static void r8152b_reset_packet_filter(struct r8152 *tp)
{
u32 ocp_data;
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_FMC);
ocp_data &= ~FMC_FCR_MCU_EN;
ocp_write_word(tp, MCU_TYPE_PLA, PLA_FMC, ocp_data);
ocp_data |= FMC_FCR_MCU_EN;
ocp_write_word(tp, MCU_TYPE_PLA, PLA_FMC, ocp_data);
}
static void rtl8152_nic_reset(struct r8152 *tp)
{
int i;
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CR, CR_RST);
for (i = 0; i < 1000; i++) {
if (!(ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CR) & CR_RST))
break;
udelay(100);
}
}
static inline u8 rtl8152_get_speed(struct r8152 *tp)
{
return ocp_read_byte(tp, MCU_TYPE_PLA, PLA_PHYSTATUS);
}
static int rtl8152_enable(struct r8152 *tp)
{
u32 ocp_data;
u8 speed;
speed = rtl8152_get_speed(tp);
if (speed & _100bps) {
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEEP_CR);
ocp_data &= ~EEEP_CR_EEEP_TX;
ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEEP_CR, ocp_data);
} else {
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEEP_CR);
ocp_data |= EEEP_CR_EEEP_TX;
ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEEP_CR, ocp_data);
}
r8152b_reset_packet_filter(tp);
ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CR);
ocp_data |= CR_RE | CR_TE;
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CR, ocp_data);
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MISC_1);
ocp_data &= ~RXDY_GATED_EN;
ocp_write_word(tp, MCU_TYPE_PLA, PLA_MISC_1, ocp_data);
usb_fill_bulk_urb(tp->rx_urb, tp->udev, usb_rcvbulkpipe(tp->udev, 1),
tp->rx_skb->data, RTL8152_RMS + sizeof(struct rx_desc),
(usb_complete_t)read_bulk_callback, tp);
return usb_submit_urb(tp->rx_urb, GFP_KERNEL);
}
static void rtl8152_disable(struct r8152 *tp)
{
u32 ocp_data;
int i;
ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR);
ocp_data &= ~RCR_ACPT_ALL;
ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data);
usb_kill_urb(tp->tx_urb);
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MISC_1);
ocp_data |= RXDY_GATED_EN;
ocp_write_word(tp, MCU_TYPE_PLA, PLA_MISC_1, ocp_data);
for (i = 0; i < 1000; i++) {
ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
if ((ocp_data & FIFO_EMPTY) == FIFO_EMPTY)
break;
mdelay(1);
}
for (i = 0; i < 1000; i++) {
if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_TCR0) & TCR0_TX_EMPTY)
break;
mdelay(1);
}
usb_kill_urb(tp->rx_urb);
rtl8152_nic_reset(tp);
}
static void r8152b_exit_oob(struct r8152 *tp)
{
u32 ocp_data;
int i;
ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR);
ocp_data &= ~RCR_ACPT_ALL;
ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data);
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MISC_1);
ocp_data |= RXDY_GATED_EN;
ocp_write_word(tp, MCU_TYPE_PLA, PLA_MISC_1, ocp_data);
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML);
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CR, 0x00);
ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
ocp_data &= ~NOW_IS_OOB;
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data);
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7);
ocp_data &= ~MCU_BORW_EN;
ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data);
for (i = 0; i < 1000; i++) {
ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
if (ocp_data & LINK_LIST_READY)
break;
mdelay(1);
}
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7);
ocp_data |= RE_INIT_LL;
ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data);
for (i = 0; i < 1000; i++) {
ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
if (ocp_data & LINK_LIST_READY)
break;
mdelay(1);
}
rtl8152_nic_reset(tp);
/* rx share fifo credit full threshold */
ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL0, RXFIFO_THR1_NORMAL);
ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_DEV_STAT);
ocp_data &= STAT_SPEED_MASK;
if (ocp_data == STAT_SPEED_FULL) {
/* rx share fifo credit near full threshold */
ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL1,
RXFIFO_THR2_FULL);
ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL2,
RXFIFO_THR3_FULL);
} else {
/* rx share fifo credit near full threshold */
ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL1,
RXFIFO_THR2_HIGH);
ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL2,
RXFIFO_THR3_HIGH);
}
/* TX share fifo free credit full threshold */
ocp_write_dword(tp, MCU_TYPE_PLA, PLA_TXFIFO_CTRL, TXFIFO_THR_NORMAL);
ocp_write_byte(tp, MCU_TYPE_USB, USB_TX_AGG, TX_AGG_MAX_THRESHOLD);
ocp_write_dword(tp, MCU_TYPE_USB, USB_RX_BUF_TH, RX_BUF_THR);
ocp_write_dword(tp, MCU_TYPE_USB, USB_TX_DMA,
TEST_MODE_DISABLE | TX_SIZE_ADJUST1);
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CPCR);
ocp_data &= ~CPCR_RX_VLAN;
ocp_write_word(tp, MCU_TYPE_PLA, PLA_CPCR, ocp_data);
ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, RTL8152_RMS);
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_TCR0);
ocp_data |= TCR0_AUTO_FIFO;
ocp_write_word(tp, MCU_TYPE_PLA, PLA_TCR0, ocp_data);
}
static void r8152b_enter_oob(struct r8152 *tp)
{
u32 ocp_data;
int i;
ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
ocp_data &= ~NOW_IS_OOB;
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data);
ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL0, RXFIFO_THR1_OOB);
ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL1, RXFIFO_THR2_OOB);
ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL2, RXFIFO_THR3_OOB);
rtl8152_disable(tp);
for (i = 0; i < 1000; i++) {
ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
if (ocp_data & LINK_LIST_READY)
break;
mdelay(1);
}
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7);
ocp_data |= RE_INIT_LL;
ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data);
for (i = 0; i < 1000; i++) {
ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
if (ocp_data & LINK_LIST_READY)
break;
mdelay(1);
}
ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, RTL8152_RMS);
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CFG_WOL);
ocp_data |= MAGIC_EN;
ocp_write_word(tp, MCU_TYPE_PLA, PLA_CFG_WOL, ocp_data);
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CPCR);
ocp_data |= CPCR_RX_VLAN;
ocp_write_word(tp, MCU_TYPE_PLA, PLA_CPCR, ocp_data);
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PAL_BDC_CR);
ocp_data |= ALDPS_PROXY_MODE;
ocp_write_word(tp, MCU_TYPE_PLA, PAL_BDC_CR, ocp_data);
ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
ocp_data |= NOW_IS_OOB | DIS_MCU_CLROOB;
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data);
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CONFIG5, LAN_WAKE_EN);
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MISC_1);
ocp_data &= ~RXDY_GATED_EN;
ocp_write_word(tp, MCU_TYPE_PLA, PLA_MISC_1, ocp_data);
ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR);
ocp_data |= RCR_APM | RCR_AM | RCR_AB;
ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data);
}
static void r8152b_disable_aldps(struct r8152 *tp)
{
ocp_reg_write(tp, OCP_ALDPS_CONFIG, ENPDNPS | LINKENA | DIS_SDSAVE);
msleep(20);
}
static inline void r8152b_enable_aldps(struct r8152 *tp)
{
ocp_reg_write(tp, OCP_ALDPS_CONFIG, ENPWRSAVE | ENPDNPS |
LINKENA | DIS_SDSAVE);
}
static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u16 speed, u8 duplex)
{
u16 bmcr, anar;
int ret = 0;
cancel_delayed_work_sync(&tp->schedule);
anar = r8152_mdio_read(tp, MII_ADVERTISE);
anar &= ~(ADVERTISE_10HALF | ADVERTISE_10FULL |
ADVERTISE_100HALF | ADVERTISE_100FULL);
if (autoneg == AUTONEG_DISABLE) {
if (speed == SPEED_10) {
bmcr = 0;
anar |= ADVERTISE_10HALF | ADVERTISE_10FULL;
} else if (speed == SPEED_100) {
bmcr = BMCR_SPEED100;
anar |= ADVERTISE_100HALF | ADVERTISE_100FULL;
} else {
ret = -EINVAL;
goto out;
}
if (duplex == DUPLEX_FULL)
bmcr |= BMCR_FULLDPLX;
} else {
if (speed == SPEED_10) {
if (duplex == DUPLEX_FULL)
anar |= ADVERTISE_10HALF | ADVERTISE_10FULL;
else
anar |= ADVERTISE_10HALF;
} else if (speed == SPEED_100) {
if (duplex == DUPLEX_FULL) {
anar |= ADVERTISE_10HALF | ADVERTISE_10FULL;
anar |= ADVERTISE_100HALF | ADVERTISE_100FULL;
} else {
anar |= ADVERTISE_10HALF;
anar |= ADVERTISE_100HALF;
}
} else {
ret = -EINVAL;
goto out;
}
bmcr = BMCR_ANENABLE | BMCR_ANRESTART;
}
r8152_mdio_write(tp, MII_ADVERTISE, anar);
r8152_mdio_write(tp, MII_BMCR, bmcr);
out:
schedule_delayed_work(&tp->schedule, 5 * HZ);
return ret;
}
static void rtl8152_down(struct r8152 *tp)
{
u32 ocp_data;
ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_UPS_CTRL);
ocp_data &= ~POWER_CUT;
ocp_write_word(tp, MCU_TYPE_USB, USB_UPS_CTRL, ocp_data);
r8152b_disable_aldps(tp);
r8152b_enter_oob(tp);
r8152b_enable_aldps(tp);
}
static void set_carrier(struct r8152 *tp)
{
struct net_device *netdev = tp->netdev;
u8 speed;
speed = rtl8152_get_speed(tp);
if (speed & LINK_STATUS) {
if (!(tp->speed & LINK_STATUS)) {
rtl8152_enable(tp);
set_bit(RTL8152_SET_RX_MODE, &tp->flags);
netif_carrier_on(netdev);
}
} else {
if (tp->speed & LINK_STATUS) {
netif_carrier_off(netdev);
rtl8152_disable(tp);
}
}
tp->speed = speed;
}
static void rtl_work_func_t(struct work_struct *work)
{
struct r8152 *tp = container_of(work, struct r8152, schedule.work);
if (!test_bit(WORK_ENABLE, &tp->flags))
goto out1;
if (test_bit(RTL8152_UNPLUG, &tp->flags))
goto out1;
set_carrier(tp);
if (test_bit(RTL8152_SET_RX_MODE, &tp->flags))
_rtl8152_set_rx_mode(tp->netdev);
schedule_delayed_work(&tp->schedule, HZ);
out1:
return;
}
static int rtl8152_open(struct net_device *netdev)
{
struct r8152 *tp = netdev_priv(netdev);
int res = 0;
tp->speed = rtl8152_get_speed(tp);
if (tp->speed & LINK_STATUS) {
res = rtl8152_enable(tp);
if (res) {
if (res == -ENODEV)
netif_device_detach(tp->netdev);
netif_err(tp, ifup, netdev,
"rtl8152_open failed: %d\n", res);
return res;
}
netif_carrier_on(netdev);
} else {
netif_stop_queue(netdev);
netif_carrier_off(netdev);
}
rtl8152_set_speed(tp, AUTONEG_ENABLE, SPEED_100, DUPLEX_FULL);
netif_start_queue(netdev);
set_bit(WORK_ENABLE, &tp->flags);
schedule_delayed_work(&tp->schedule, 0);
return res;
}
static int rtl8152_close(struct net_device *netdev)
{
struct r8152 *tp = netdev_priv(netdev);
int res = 0;
clear_bit(WORK_ENABLE, &tp->flags);
cancel_delayed_work_sync(&tp->schedule);
netif_stop_queue(netdev);
rtl8152_disable(tp);
return res;
}
static void rtl_clear_bp(struct r8152 *tp)
{
ocp_write_dword(tp, MCU_TYPE_PLA, PLA_BP_0, 0);
ocp_write_dword(tp, MCU_TYPE_PLA, PLA_BP_2, 0);
ocp_write_dword(tp, MCU_TYPE_PLA, PLA_BP_4, 0);
ocp_write_dword(tp, MCU_TYPE_PLA, PLA_BP_6, 0);
ocp_write_dword(tp, MCU_TYPE_USB, USB_BP_0, 0);
ocp_write_dword(tp, MCU_TYPE_USB, USB_BP_2, 0);
ocp_write_dword(tp, MCU_TYPE_USB, USB_BP_4, 0);
ocp_write_dword(tp, MCU_TYPE_USB, USB_BP_6, 0);
mdelay(3);
ocp_write_word(tp, MCU_TYPE_PLA, PLA_BP_BA, 0);
ocp_write_word(tp, MCU_TYPE_USB, USB_BP_BA, 0);
}
static void r8152b_enable_eee(struct r8152 *tp)
{
u32 ocp_data;
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEE_CR);
ocp_data |= EEE_RX_EN | EEE_TX_EN;
ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEE_CR, ocp_data);
ocp_reg_write(tp, OCP_EEE_CONFIG1, RG_TXLPI_MSK_HFDUP | RG_MATCLR_EN |
EEE_10_CAP | EEE_NWAY_EN |
TX_QUIET_EN | RX_QUIET_EN |
SDRISETIME | RG_RXLPI_MSK_HFDUP |
SDFALLTIME);
ocp_reg_write(tp, OCP_EEE_CONFIG2, RG_LPIHYS_NUM | RG_DACQUIET_EN |
RG_LDVQUIET_EN | RG_CKRSEL |
RG_EEEPRG_EN);
ocp_reg_write(tp, OCP_EEE_CONFIG3, FST_SNR_EYE_R | RG_LFS_SEL | MSK_PH);
ocp_reg_write(tp, OCP_EEE_AR, FUN_ADDR | DEVICE_ADDR);
ocp_reg_write(tp, OCP_EEE_DATA, EEE_ADDR);
ocp_reg_write(tp, OCP_EEE_AR, FUN_DATA | DEVICE_ADDR);
ocp_reg_write(tp, OCP_EEE_DATA, EEE_DATA);
ocp_reg_write(tp, OCP_EEE_AR, 0x0000);
}
static void r8152b_enable_fc(struct r8152 *tp)
{
u16 anar;
anar = r8152_mdio_read(tp, MII_ADVERTISE);
anar |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
r8152_mdio_write(tp, MII_ADVERTISE, anar);
}
static void r8152b_hw_phy_cfg(struct r8152 *tp)
{
r8152_mdio_write(tp, MII_BMCR, BMCR_ANENABLE);
r8152b_disable_aldps(tp);
}
static void r8152b_init(struct r8152 *tp)
{
u32 ocp_data;
int i;
rtl_clear_bp(tp);
if (tp->version == RTL_VER_01) {
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_LED_FEATURE);
ocp_data &= ~LED_MODE_MASK;
ocp_write_word(tp, MCU_TYPE_PLA, PLA_LED_FEATURE, ocp_data);
}
r8152b_hw_phy_cfg(tp);
ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_UPS_CTRL);
ocp_data &= ~POWER_CUT;
ocp_write_word(tp, MCU_TYPE_USB, USB_UPS_CTRL, ocp_data);
ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_PM_CTRL_STATUS);
ocp_data &= ~RWSUME_INDICATE;
ocp_write_word(tp, MCU_TYPE_USB, USB_PM_CTRL_STATUS, ocp_data);
r8152b_exit_oob(tp);
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR);
ocp_data |= TX_10M_IDLE_EN | PFM_PWM_SWITCH;
ocp_write_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR, ocp_data);
ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL);
ocp_data &= ~MCU_CLK_RATIO_MASK;
ocp_data |= MCU_CLK_RATIO | D3_CLK_GATED_EN;
ocp_write_dword(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL, ocp_data);
ocp_data = GPHY_STS_MSK | SPEED_DOWN_MSK |
SPDWN_RXDV_MSK | SPDWN_LINKCHG_MSK;
ocp_write_word(tp, MCU_TYPE_PLA, PLA_GPHY_INTR_IMR, ocp_data);
r8152b_enable_eee(tp);
r8152b_enable_aldps(tp);
r8152b_enable_fc(tp);
r8152_mdio_write(tp, MII_BMCR, BMCR_RESET | BMCR_ANENABLE |
BMCR_ANRESTART);
for (i = 0; i < 100; i++) {
udelay(100);
if (!(r8152_mdio_read(tp, MII_BMCR) & BMCR_RESET))
break;
}
/* disable rx aggregation */
ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL);
ocp_data |= RX_AGG_DISABLE;
ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data);
}
static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message)
{
struct r8152 *tp = usb_get_intfdata(intf);
netif_device_detach(tp->netdev);
if (netif_running(tp->netdev)) {
clear_bit(WORK_ENABLE, &tp->flags);
cancel_delayed_work_sync(&tp->schedule);
}
rtl8152_down(tp);
return 0;
}
static int rtl8152_resume(struct usb_interface *intf)
{
struct r8152 *tp = usb_get_intfdata(intf);
r8152b_init(tp);
netif_device_attach(tp->netdev);
if (netif_running(tp->netdev)) {
rtl8152_enable(tp);
set_bit(WORK_ENABLE, &tp->flags);
set_bit(RTL8152_SET_RX_MODE, &tp->flags);
schedule_delayed_work(&tp->schedule, 0);
}
return 0;
}
static void rtl8152_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
{
struct r8152 *tp = netdev_priv(netdev);
strncpy(info->driver, MODULENAME, ETHTOOL_BUSINFO_LEN);
strncpy(info->version, DRIVER_VERSION, ETHTOOL_BUSINFO_LEN);
usb_make_path(tp->udev, info->bus_info, sizeof(info->bus_info));
}
static
int rtl8152_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
{
struct r8152 *tp = netdev_priv(netdev);
if (!tp->mii.mdio_read)
return -EOPNOTSUPP;
return mii_ethtool_gset(&tp->mii, cmd);
}
static int rtl8152_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct r8152 *tp = netdev_priv(dev);
return rtl8152_set_speed(tp, cmd->autoneg, cmd->speed, cmd->duplex);
}
static struct ethtool_ops ops = {
.get_drvinfo = rtl8152_get_drvinfo,
.get_settings = rtl8152_get_settings,
.set_settings = rtl8152_set_settings,
.get_link = ethtool_op_get_link,
};
static int rtl8152_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
{
struct r8152 *tp = netdev_priv(netdev);
struct mii_ioctl_data *data = if_mii(rq);
int res = 0;
switch (cmd) {
case SIOCGMIIPHY:
data->phy_id = R8152_PHY_ID; /* Internal PHY */
break;
case SIOCGMIIREG:
data->val_out = r8152_mdio_read(tp, data->reg_num);
break;
case SIOCSMIIREG:
if (!capable(CAP_NET_ADMIN)) {
res = -EPERM;
break;
}
r8152_mdio_write(tp, data->reg_num, data->val_in);
break;
default:
res = -EOPNOTSUPP;
}
return res;
}
static const struct net_device_ops rtl8152_netdev_ops = {
.ndo_open = rtl8152_open,
.ndo_stop = rtl8152_close,
.ndo_do_ioctl = rtl8152_ioctl,
.ndo_start_xmit = rtl8152_start_xmit,
.ndo_tx_timeout = rtl8152_tx_timeout,
.ndo_set_rx_mode = rtl8152_set_rx_mode,
.ndo_set_mac_address = rtl8152_set_mac_address,
.ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
};
static void r8152b_get_version(struct r8152 *tp)
{
u32 ocp_data;
u16 version;
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_TCR1);
version = (u16)(ocp_data & VERSION_MASK);
switch (version) {
case 0x4c00:
tp->version = RTL_VER_01;
break;
case 0x4c10:
tp->version = RTL_VER_02;
break;
default:
netif_info(tp, probe, tp->netdev,
"Unknown version 0x%04x\n", version);
break;
}
}
static int rtl8152_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
struct usb_device *udev = interface_to_usbdev(intf);
struct r8152 *tp;
struct net_device *netdev;
if (udev->actconfig->desc.bConfigurationValue != 1) {
usb_driver_set_configuration(udev, 1);
return -ENODEV;
}
netdev = alloc_etherdev(sizeof(struct r8152));
if (!netdev) {
dev_err(&intf->dev, "Out of memory");
return -ENOMEM;
}
tp = netdev_priv(netdev);
tp->msg_enable = 0x7FFF;
tasklet_init(&tp->tl, rx_fixup, (unsigned long)tp);
INIT_DELAYED_WORK(&tp->schedule, rtl_work_func_t);
tp->udev = udev;
tp->netdev = netdev;
netdev->netdev_ops = &rtl8152_netdev_ops;
netdev->watchdog_timeo = RTL8152_TX_TIMEOUT;
netdev->features &= ~NETIF_F_IP_CSUM;
SET_ETHTOOL_OPS(netdev, &ops);
tp->speed = 0;
tp->mii.dev = netdev;
tp->mii.mdio_read = read_mii_word;
tp->mii.mdio_write = write_mii_word;
tp->mii.phy_id_mask = 0x3f;
tp->mii.reg_num_mask = 0x1f;
tp->mii.phy_id = R8152_PHY_ID;
tp->mii.supports_gmii = 0;
r8152b_get_version(tp);
r8152b_init(tp);
set_ethernet_addr(tp);
if (!alloc_all_urbs(tp)) {
netif_err(tp, probe, netdev, "out of memory");
goto out;
}
tp->rx_skb = netdev_alloc_skb(netdev,
RTL8152_RMS + sizeof(struct rx_desc));
if (!tp->rx_skb)
goto out1;
usb_set_intfdata(intf, tp);
SET_NETDEV_DEV(netdev, &intf->dev);
if (register_netdev(netdev) != 0) {
netif_err(tp, probe, netdev, "couldn't register the device");
goto out2;
}
netif_info(tp, probe, netdev, "%s", DRIVER_VERSION);
return 0;
out2:
usb_set_intfdata(intf, NULL);
dev_kfree_skb(tp->rx_skb);
out1:
free_all_urbs(tp);
out:
free_netdev(netdev);
return -EIO;
}
static void rtl8152_unload(struct r8152 *tp)
{
u32 ocp_data;
if (tp->version != RTL_VER_01) {
ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_UPS_CTRL);
ocp_data |= POWER_CUT;
ocp_write_word(tp, MCU_TYPE_USB, USB_UPS_CTRL, ocp_data);
}
ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_PM_CTRL_STATUS);
ocp_data &= ~RWSUME_INDICATE;
ocp_write_word(tp, MCU_TYPE_USB, USB_PM_CTRL_STATUS, ocp_data);
}
static void rtl8152_disconnect(struct usb_interface *intf)
{
struct r8152 *tp = usb_get_intfdata(intf);
usb_set_intfdata(intf, NULL);
if (tp) {
set_bit(RTL8152_UNPLUG, &tp->flags);
tasklet_kill(&tp->tl);
unregister_netdev(tp->netdev);
rtl8152_unload(tp);
free_all_urbs(tp);
if (tp->rx_skb)
dev_kfree_skb(tp->rx_skb);
free_netdev(tp->netdev);
}
}
/* table of devices that work with this driver */
static struct usb_device_id rtl8152_table[] = {
{USB_DEVICE(VENDOR_ID_REALTEK, PRODUCT_ID_RTL8152)},
{}
};
MODULE_DEVICE_TABLE(usb, rtl8152_table);
static struct usb_driver rtl8152_driver = {
.name = MODULENAME,
.probe = rtl8152_probe,
.disconnect = rtl8152_disconnect,
.id_table = rtl8152_table,
.suspend = rtl8152_suspend,
.resume = rtl8152_resume
};
static int __init usb_rtl8152_init(void)
{
return usb_register(&rtl8152_driver);
}
static void __exit usb_rtl8152_exit(void)
{
usb_deregister(&rtl8152_driver);
}
module_init(usb_rtl8152_init);
module_exit(usb_rtl8152_exit);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
| gpl-2.0 |
ryrzy/g2_4.2.2 | arch/alpha/kernel/srmcons.c | 4545 | 6074 | /*
* linux/arch/alpha/kernel/srmcons.c
*
* Callback based driver for SRM Console console device.
* (TTY driver and console driver)
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/console.h>
#include <linux/delay.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/timer.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
#include <asm/console.h>
#include <asm/uaccess.h>
static DEFINE_SPINLOCK(srmcons_callback_lock);
static int srm_is_registered_console = 0;
/*
* The TTY driver
*/
#define MAX_SRM_CONSOLE_DEVICES 1 /* only support 1 console device */
struct srmcons_private {
struct tty_port port;
struct timer_list timer;
} srmcons_singleton;
typedef union _srmcons_result {
struct {
unsigned long c :61;
unsigned long status :3;
} bits;
long as_long;
} srmcons_result;
/* called with callback_lock held */
static int
srmcons_do_receive_chars(struct tty_struct *tty)
{
srmcons_result result;
int count = 0, loops = 0;
do {
result.as_long = callback_getc(0);
if (result.bits.status < 2) {
tty_insert_flip_char(tty, (char)result.bits.c, 0);
count++;
}
} while((result.bits.status & 1) && (++loops < 10));
if (count)
tty_schedule_flip(tty);
return count;
}
static void
srmcons_receive_chars(unsigned long data)
{
struct srmcons_private *srmconsp = (struct srmcons_private *)data;
struct tty_port *port = &srmconsp->port;
unsigned long flags;
int incr = 10;
local_irq_save(flags);
if (spin_trylock(&srmcons_callback_lock)) {
if (!srmcons_do_receive_chars(port->tty))
incr = 100;
spin_unlock(&srmcons_callback_lock);
}
spin_lock(&port->lock);
if (port->tty)
mod_timer(&srmconsp->timer, jiffies + incr);
spin_unlock(&port->lock);
local_irq_restore(flags);
}
/* called with callback_lock held */
static int
srmcons_do_write(struct tty_struct *tty, const char *buf, int count)
{
static char str_cr[1] = "\r";
long c, remaining = count;
srmcons_result result;
char *cur;
int need_cr;
for (cur = (char *)buf; remaining > 0; ) {
need_cr = 0;
/*
* Break it up into reasonable size chunks to allow a chance
* for input to get in
*/
for (c = 0; c < min_t(long, 128L, remaining) && !need_cr; c++)
if (cur[c] == '\n')
need_cr = 1;
while (c > 0) {
result.as_long = callback_puts(0, cur, c);
c -= result.bits.c;
remaining -= result.bits.c;
cur += result.bits.c;
/*
* Check for pending input iff a tty was provided
*/
if (tty)
srmcons_do_receive_chars(tty);
}
while (need_cr) {
result.as_long = callback_puts(0, str_cr, 1);
if (result.bits.c > 0)
need_cr = 0;
}
}
return count;
}
static int
srmcons_write(struct tty_struct *tty,
const unsigned char *buf, int count)
{
unsigned long flags;
spin_lock_irqsave(&srmcons_callback_lock, flags);
srmcons_do_write(tty, (const char *) buf, count);
spin_unlock_irqrestore(&srmcons_callback_lock, flags);
return count;
}
static int
srmcons_write_room(struct tty_struct *tty)
{
return 512;
}
static int
srmcons_chars_in_buffer(struct tty_struct *tty)
{
return 0;
}
static int
srmcons_open(struct tty_struct *tty, struct file *filp)
{
struct srmcons_private *srmconsp = &srmcons_singleton;
struct tty_port *port = &srmconsp->port;
unsigned long flags;
spin_lock_irqsave(&port->lock, flags);
if (!port->tty) {
tty->driver_data = srmconsp;
tty->port = port;
port->tty = tty; /* XXX proper refcounting */
mod_timer(&srmconsp->timer, jiffies + 10);
}
spin_unlock_irqrestore(&port->lock, flags);
return 0;
}
static void
srmcons_close(struct tty_struct *tty, struct file *filp)
{
struct srmcons_private *srmconsp = tty->driver_data;
struct tty_port *port = &srmconsp->port;
unsigned long flags;
spin_lock_irqsave(&port->lock, flags);
if (tty->count == 1) {
port->tty = NULL;
del_timer(&srmconsp->timer);
}
spin_unlock_irqrestore(&port->lock, flags);
}
static struct tty_driver *srmcons_driver;
static const struct tty_operations srmcons_ops = {
.open = srmcons_open,
.close = srmcons_close,
.write = srmcons_write,
.write_room = srmcons_write_room,
.chars_in_buffer= srmcons_chars_in_buffer,
};
static int __init
srmcons_init(void)
{
tty_port_init(&srmcons_singleton.port);
setup_timer(&srmcons_singleton.timer, srmcons_receive_chars,
(unsigned long)&srmcons_singleton);
if (srm_is_registered_console) {
struct tty_driver *driver;
int err;
driver = alloc_tty_driver(MAX_SRM_CONSOLE_DEVICES);
if (!driver)
return -ENOMEM;
driver->driver_name = "srm";
driver->name = "srm";
driver->major = 0; /* dynamic */
driver->minor_start = 0;
driver->type = TTY_DRIVER_TYPE_SYSTEM;
driver->subtype = SYSTEM_TYPE_SYSCONS;
driver->init_termios = tty_std_termios;
tty_set_operations(driver, &srmcons_ops);
err = tty_register_driver(driver);
if (err) {
put_tty_driver(driver);
return err;
}
srmcons_driver = driver;
}
return -ENODEV;
}
module_init(srmcons_init);
/*
* The console driver
*/
static void
srm_console_write(struct console *co, const char *s, unsigned count)
{
unsigned long flags;
spin_lock_irqsave(&srmcons_callback_lock, flags);
srmcons_do_write(NULL, s, count);
spin_unlock_irqrestore(&srmcons_callback_lock, flags);
}
static struct tty_driver *
srm_console_device(struct console *co, int *index)
{
*index = co->index;
return srmcons_driver;
}
static int
srm_console_setup(struct console *co, char *options)
{
return 0;
}
static struct console srmcons = {
.name = "srm",
.write = srm_console_write,
.device = srm_console_device,
.setup = srm_console_setup,
.flags = CON_PRINTBUFFER | CON_BOOT,
.index = -1,
};
void __init
register_srm_console(void)
{
if (!srm_is_registered_console) {
callback_open_console();
register_console(&srmcons);
srm_is_registered_console = 1;
}
}
void __init
unregister_srm_console(void)
{
if (srm_is_registered_console) {
callback_close_console();
unregister_console(&srmcons);
srm_is_registered_console = 0;
}
}
| gpl-2.0 |
lawnn/android_kernel_samsung_d2dcm | arch/arm/mach-exynos/clock-exynos4.c | 4545 | 43133 | /*
* Copyright (c) 2010-2012 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* EXYNOS4 - Clock support
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/syscore_ops.h>
#include <plat/cpu-freq.h>
#include <plat/clock.h>
#include <plat/cpu.h>
#include <plat/pll.h>
#include <plat/s5p-clock.h>
#include <plat/clock-clksrc.h>
#include <plat/pm.h>
#include <mach/map.h>
#include <mach/regs-clock.h>
#include <mach/sysmmu.h>
#include "common.h"
#include "clock-exynos4.h"
#ifdef CONFIG_PM_SLEEP
static struct sleep_save exynos4_clock_save[] = {
SAVE_ITEM(EXYNOS4_CLKDIV_LEFTBUS),
SAVE_ITEM(EXYNOS4_CLKGATE_IP_LEFTBUS),
SAVE_ITEM(EXYNOS4_CLKDIV_RIGHTBUS),
SAVE_ITEM(EXYNOS4_CLKGATE_IP_RIGHTBUS),
SAVE_ITEM(EXYNOS4_CLKSRC_TOP0),
SAVE_ITEM(EXYNOS4_CLKSRC_TOP1),
SAVE_ITEM(EXYNOS4_CLKSRC_CAM),
SAVE_ITEM(EXYNOS4_CLKSRC_TV),
SAVE_ITEM(EXYNOS4_CLKSRC_MFC),
SAVE_ITEM(EXYNOS4_CLKSRC_G3D),
SAVE_ITEM(EXYNOS4_CLKSRC_LCD0),
SAVE_ITEM(EXYNOS4_CLKSRC_MAUDIO),
SAVE_ITEM(EXYNOS4_CLKSRC_FSYS),
SAVE_ITEM(EXYNOS4_CLKSRC_PERIL0),
SAVE_ITEM(EXYNOS4_CLKSRC_PERIL1),
SAVE_ITEM(EXYNOS4_CLKDIV_CAM),
SAVE_ITEM(EXYNOS4_CLKDIV_TV),
SAVE_ITEM(EXYNOS4_CLKDIV_MFC),
SAVE_ITEM(EXYNOS4_CLKDIV_G3D),
SAVE_ITEM(EXYNOS4_CLKDIV_LCD0),
SAVE_ITEM(EXYNOS4_CLKDIV_MAUDIO),
SAVE_ITEM(EXYNOS4_CLKDIV_FSYS0),
SAVE_ITEM(EXYNOS4_CLKDIV_FSYS1),
SAVE_ITEM(EXYNOS4_CLKDIV_FSYS2),
SAVE_ITEM(EXYNOS4_CLKDIV_FSYS3),
SAVE_ITEM(EXYNOS4_CLKDIV_PERIL0),
SAVE_ITEM(EXYNOS4_CLKDIV_PERIL1),
SAVE_ITEM(EXYNOS4_CLKDIV_PERIL2),
SAVE_ITEM(EXYNOS4_CLKDIV_PERIL3),
SAVE_ITEM(EXYNOS4_CLKDIV_PERIL4),
SAVE_ITEM(EXYNOS4_CLKDIV_PERIL5),
SAVE_ITEM(EXYNOS4_CLKDIV_TOP),
SAVE_ITEM(EXYNOS4_CLKSRC_MASK_TOP),
SAVE_ITEM(EXYNOS4_CLKSRC_MASK_CAM),
SAVE_ITEM(EXYNOS4_CLKSRC_MASK_TV),
SAVE_ITEM(EXYNOS4_CLKSRC_MASK_LCD0),
SAVE_ITEM(EXYNOS4_CLKSRC_MASK_MAUDIO),
SAVE_ITEM(EXYNOS4_CLKSRC_MASK_FSYS),
SAVE_ITEM(EXYNOS4_CLKSRC_MASK_PERIL0),
SAVE_ITEM(EXYNOS4_CLKSRC_MASK_PERIL1),
SAVE_ITEM(EXYNOS4_CLKDIV2_RATIO),
SAVE_ITEM(EXYNOS4_CLKGATE_SCLKCAM),
SAVE_ITEM(EXYNOS4_CLKGATE_IP_CAM),
SAVE_ITEM(EXYNOS4_CLKGATE_IP_TV),
SAVE_ITEM(EXYNOS4_CLKGATE_IP_MFC),
SAVE_ITEM(EXYNOS4_CLKGATE_IP_G3D),
SAVE_ITEM(EXYNOS4_CLKGATE_IP_LCD0),
SAVE_ITEM(EXYNOS4_CLKGATE_IP_FSYS),
SAVE_ITEM(EXYNOS4_CLKGATE_IP_GPS),
SAVE_ITEM(EXYNOS4_CLKGATE_IP_PERIL),
SAVE_ITEM(EXYNOS4_CLKGATE_BLOCK),
SAVE_ITEM(EXYNOS4_CLKSRC_MASK_DMC),
SAVE_ITEM(EXYNOS4_CLKSRC_DMC),
SAVE_ITEM(EXYNOS4_CLKDIV_DMC0),
SAVE_ITEM(EXYNOS4_CLKDIV_DMC1),
SAVE_ITEM(EXYNOS4_CLKGATE_IP_DMC),
SAVE_ITEM(EXYNOS4_CLKSRC_CPU),
SAVE_ITEM(EXYNOS4_CLKDIV_CPU),
SAVE_ITEM(EXYNOS4_CLKDIV_CPU + 0x4),
SAVE_ITEM(EXYNOS4_CLKGATE_SCLKCPU),
SAVE_ITEM(EXYNOS4_CLKGATE_IP_CPU),
};
#endif
static struct clk exynos4_clk_sclk_hdmi27m = {
.name = "sclk_hdmi27m",
.rate = 27000000,
};
static struct clk exynos4_clk_sclk_hdmiphy = {
.name = "sclk_hdmiphy",
};
static struct clk exynos4_clk_sclk_usbphy0 = {
.name = "sclk_usbphy0",
.rate = 27000000,
};
static struct clk exynos4_clk_sclk_usbphy1 = {
.name = "sclk_usbphy1",
};
static struct clk dummy_apb_pclk = {
.name = "apb_pclk",
.id = -1,
};
static int exynos4_clksrc_mask_top_ctrl(struct clk *clk, int enable)
{
return s5p_gatectrl(EXYNOS4_CLKSRC_MASK_TOP, clk, enable);
}
static int exynos4_clksrc_mask_cam_ctrl(struct clk *clk, int enable)
{
return s5p_gatectrl(EXYNOS4_CLKSRC_MASK_CAM, clk, enable);
}
static int exynos4_clksrc_mask_lcd0_ctrl(struct clk *clk, int enable)
{
return s5p_gatectrl(EXYNOS4_CLKSRC_MASK_LCD0, clk, enable);
}
int exynos4_clksrc_mask_fsys_ctrl(struct clk *clk, int enable)
{
return s5p_gatectrl(EXYNOS4_CLKSRC_MASK_FSYS, clk, enable);
}
static int exynos4_clksrc_mask_peril0_ctrl(struct clk *clk, int enable)
{
return s5p_gatectrl(EXYNOS4_CLKSRC_MASK_PERIL0, clk, enable);
}
static int exynos4_clksrc_mask_peril1_ctrl(struct clk *clk, int enable)
{
return s5p_gatectrl(EXYNOS4_CLKSRC_MASK_PERIL1, clk, enable);
}
static int exynos4_clk_ip_mfc_ctrl(struct clk *clk, int enable)
{
return s5p_gatectrl(EXYNOS4_CLKGATE_IP_MFC, clk, enable);
}
static int exynos4_clksrc_mask_tv_ctrl(struct clk *clk, int enable)
{
return s5p_gatectrl(EXYNOS4_CLKSRC_MASK_TV, clk, enable);
}
static int exynos4_clk_ip_cam_ctrl(struct clk *clk, int enable)
{
return s5p_gatectrl(EXYNOS4_CLKGATE_IP_CAM, clk, enable);
}
static int exynos4_clk_ip_tv_ctrl(struct clk *clk, int enable)
{
return s5p_gatectrl(EXYNOS4_CLKGATE_IP_TV, clk, enable);
}
static int exynos4_clk_ip_image_ctrl(struct clk *clk, int enable)
{
return s5p_gatectrl(EXYNOS4_CLKGATE_IP_IMAGE, clk, enable);
}
static int exynos4_clk_ip_lcd0_ctrl(struct clk *clk, int enable)
{
return s5p_gatectrl(EXYNOS4_CLKGATE_IP_LCD0, clk, enable);
}
int exynos4_clk_ip_lcd1_ctrl(struct clk *clk, int enable)
{
return s5p_gatectrl(EXYNOS4210_CLKGATE_IP_LCD1, clk, enable);
}
int exynos4_clk_ip_fsys_ctrl(struct clk *clk, int enable)
{
return s5p_gatectrl(EXYNOS4_CLKGATE_IP_FSYS, clk, enable);
}
static int exynos4_clk_ip_peril_ctrl(struct clk *clk, int enable)
{
return s5p_gatectrl(EXYNOS4_CLKGATE_IP_PERIL, clk, enable);
}
static int exynos4_clk_ip_perir_ctrl(struct clk *clk, int enable)
{
return s5p_gatectrl(EXYNOS4_CLKGATE_IP_PERIR, clk, enable);
}
static int exynos4_clk_hdmiphy_ctrl(struct clk *clk, int enable)
{
return s5p_gatectrl(S5P_HDMI_PHY_CONTROL, clk, enable);
}
static int exynos4_clk_dac_ctrl(struct clk *clk, int enable)
{
return s5p_gatectrl(S5P_DAC_PHY_CONTROL, clk, enable);
}
/* Core list of CMU_CPU side */
static struct clksrc_clk exynos4_clk_mout_apll = {
.clk = {
.name = "mout_apll",
},
.sources = &clk_src_apll,
.reg_src = { .reg = EXYNOS4_CLKSRC_CPU, .shift = 0, .size = 1 },
};
static struct clksrc_clk exynos4_clk_sclk_apll = {
.clk = {
.name = "sclk_apll",
.parent = &exynos4_clk_mout_apll.clk,
},
.reg_div = { .reg = EXYNOS4_CLKDIV_CPU, .shift = 24, .size = 3 },
};
static struct clksrc_clk exynos4_clk_mout_epll = {
.clk = {
.name = "mout_epll",
},
.sources = &clk_src_epll,
.reg_src = { .reg = EXYNOS4_CLKSRC_TOP0, .shift = 4, .size = 1 },
};
struct clksrc_clk exynos4_clk_mout_mpll = {
.clk = {
.name = "mout_mpll",
},
.sources = &clk_src_mpll,
/* reg_src will be added in each SoCs' clock */
};
static struct clk *exynos4_clkset_moutcore_list[] = {
[0] = &exynos4_clk_mout_apll.clk,
[1] = &exynos4_clk_mout_mpll.clk,
};
static struct clksrc_sources exynos4_clkset_moutcore = {
.sources = exynos4_clkset_moutcore_list,
.nr_sources = ARRAY_SIZE(exynos4_clkset_moutcore_list),
};
static struct clksrc_clk exynos4_clk_moutcore = {
.clk = {
.name = "moutcore",
},
.sources = &exynos4_clkset_moutcore,
.reg_src = { .reg = EXYNOS4_CLKSRC_CPU, .shift = 16, .size = 1 },
};
static struct clksrc_clk exynos4_clk_coreclk = {
.clk = {
.name = "core_clk",
.parent = &exynos4_clk_moutcore.clk,
},
.reg_div = { .reg = EXYNOS4_CLKDIV_CPU, .shift = 0, .size = 3 },
};
static struct clksrc_clk exynos4_clk_armclk = {
.clk = {
.name = "armclk",
.parent = &exynos4_clk_coreclk.clk,
},
};
static struct clksrc_clk exynos4_clk_aclk_corem0 = {
.clk = {
.name = "aclk_corem0",
.parent = &exynos4_clk_coreclk.clk,
},
.reg_div = { .reg = EXYNOS4_CLKDIV_CPU, .shift = 4, .size = 3 },
};
static struct clksrc_clk exynos4_clk_aclk_cores = {
.clk = {
.name = "aclk_cores",
.parent = &exynos4_clk_coreclk.clk,
},
.reg_div = { .reg = EXYNOS4_CLKDIV_CPU, .shift = 4, .size = 3 },
};
static struct clksrc_clk exynos4_clk_aclk_corem1 = {
.clk = {
.name = "aclk_corem1",
.parent = &exynos4_clk_coreclk.clk,
},
.reg_div = { .reg = EXYNOS4_CLKDIV_CPU, .shift = 8, .size = 3 },
};
static struct clksrc_clk exynos4_clk_periphclk = {
.clk = {
.name = "periphclk",
.parent = &exynos4_clk_coreclk.clk,
},
.reg_div = { .reg = EXYNOS4_CLKDIV_CPU, .shift = 12, .size = 3 },
};
/* Core list of CMU_CORE side */
static struct clk *exynos4_clkset_corebus_list[] = {
[0] = &exynos4_clk_mout_mpll.clk,
[1] = &exynos4_clk_sclk_apll.clk,
};
struct clksrc_sources exynos4_clkset_mout_corebus = {
.sources = exynos4_clkset_corebus_list,
.nr_sources = ARRAY_SIZE(exynos4_clkset_corebus_list),
};
static struct clksrc_clk exynos4_clk_mout_corebus = {
.clk = {
.name = "mout_corebus",
},
.sources = &exynos4_clkset_mout_corebus,
.reg_src = { .reg = EXYNOS4_CLKSRC_DMC, .shift = 4, .size = 1 },
};
static struct clksrc_clk exynos4_clk_sclk_dmc = {
.clk = {
.name = "sclk_dmc",
.parent = &exynos4_clk_mout_corebus.clk,
},
.reg_div = { .reg = EXYNOS4_CLKDIV_DMC0, .shift = 12, .size = 3 },
};
static struct clksrc_clk exynos4_clk_aclk_cored = {
.clk = {
.name = "aclk_cored",
.parent = &exynos4_clk_sclk_dmc.clk,
},
.reg_div = { .reg = EXYNOS4_CLKDIV_DMC0, .shift = 16, .size = 3 },
};
static struct clksrc_clk exynos4_clk_aclk_corep = {
.clk = {
.name = "aclk_corep",
.parent = &exynos4_clk_aclk_cored.clk,
},
.reg_div = { .reg = EXYNOS4_CLKDIV_DMC0, .shift = 20, .size = 3 },
};
static struct clksrc_clk exynos4_clk_aclk_acp = {
.clk = {
.name = "aclk_acp",
.parent = &exynos4_clk_mout_corebus.clk,
},
.reg_div = { .reg = EXYNOS4_CLKDIV_DMC0, .shift = 0, .size = 3 },
};
static struct clksrc_clk exynos4_clk_pclk_acp = {
.clk = {
.name = "pclk_acp",
.parent = &exynos4_clk_aclk_acp.clk,
},
.reg_div = { .reg = EXYNOS4_CLKDIV_DMC0, .shift = 4, .size = 3 },
};
/* Core list of CMU_TOP side */
struct clk *exynos4_clkset_aclk_top_list[] = {
[0] = &exynos4_clk_mout_mpll.clk,
[1] = &exynos4_clk_sclk_apll.clk,
};
static struct clksrc_sources exynos4_clkset_aclk = {
.sources = exynos4_clkset_aclk_top_list,
.nr_sources = ARRAY_SIZE(exynos4_clkset_aclk_top_list),
};
static struct clksrc_clk exynos4_clk_aclk_200 = {
.clk = {
.name = "aclk_200",
},
.sources = &exynos4_clkset_aclk,
.reg_src = { .reg = EXYNOS4_CLKSRC_TOP0, .shift = 12, .size = 1 },
.reg_div = { .reg = EXYNOS4_CLKDIV_TOP, .shift = 0, .size = 3 },
};
static struct clksrc_clk exynos4_clk_aclk_100 = {
.clk = {
.name = "aclk_100",
},
.sources = &exynos4_clkset_aclk,
.reg_src = { .reg = EXYNOS4_CLKSRC_TOP0, .shift = 16, .size = 1 },
.reg_div = { .reg = EXYNOS4_CLKDIV_TOP, .shift = 4, .size = 4 },
};
static struct clksrc_clk exynos4_clk_aclk_160 = {
.clk = {
.name = "aclk_160",
},
.sources = &exynos4_clkset_aclk,
.reg_src = { .reg = EXYNOS4_CLKSRC_TOP0, .shift = 20, .size = 1 },
.reg_div = { .reg = EXYNOS4_CLKDIV_TOP, .shift = 8, .size = 3 },
};
struct clksrc_clk exynos4_clk_aclk_133 = {
.clk = {
.name = "aclk_133",
},
.sources = &exynos4_clkset_aclk,
.reg_src = { .reg = EXYNOS4_CLKSRC_TOP0, .shift = 24, .size = 1 },
.reg_div = { .reg = EXYNOS4_CLKDIV_TOP, .shift = 12, .size = 3 },
};
static struct clk *exynos4_clkset_vpllsrc_list[] = {
[0] = &clk_fin_vpll,
[1] = &exynos4_clk_sclk_hdmi27m,
};
static struct clksrc_sources exynos4_clkset_vpllsrc = {
.sources = exynos4_clkset_vpllsrc_list,
.nr_sources = ARRAY_SIZE(exynos4_clkset_vpllsrc_list),
};
static struct clksrc_clk exynos4_clk_vpllsrc = {
.clk = {
.name = "vpll_src",
.enable = exynos4_clksrc_mask_top_ctrl,
.ctrlbit = (1 << 0),
},
.sources = &exynos4_clkset_vpllsrc,
.reg_src = { .reg = EXYNOS4_CLKSRC_TOP1, .shift = 0, .size = 1 },
};
static struct clk *exynos4_clkset_sclk_vpll_list[] = {
[0] = &exynos4_clk_vpllsrc.clk,
[1] = &clk_fout_vpll,
};
static struct clksrc_sources exynos4_clkset_sclk_vpll = {
.sources = exynos4_clkset_sclk_vpll_list,
.nr_sources = ARRAY_SIZE(exynos4_clkset_sclk_vpll_list),
};
static struct clksrc_clk exynos4_clk_sclk_vpll = {
.clk = {
.name = "sclk_vpll",
},
.sources = &exynos4_clkset_sclk_vpll,
.reg_src = { .reg = EXYNOS4_CLKSRC_TOP0, .shift = 8, .size = 1 },
};
static struct clk exynos4_init_clocks_off[] = {
{
.name = "timers",
.parent = &exynos4_clk_aclk_100.clk,
.enable = exynos4_clk_ip_peril_ctrl,
.ctrlbit = (1<<24),
}, {
.name = "csis",
.devname = "s5p-mipi-csis.0",
.enable = exynos4_clk_ip_cam_ctrl,
.ctrlbit = (1 << 4),
}, {
.name = "csis",
.devname = "s5p-mipi-csis.1",
.enable = exynos4_clk_ip_cam_ctrl,
.ctrlbit = (1 << 5),
}, {
.name = "jpeg",
.id = 0,
.enable = exynos4_clk_ip_cam_ctrl,
.ctrlbit = (1 << 6),
}, {
.name = "fimc",
.devname = "exynos4-fimc.0",
.enable = exynos4_clk_ip_cam_ctrl,
.ctrlbit = (1 << 0),
}, {
.name = "fimc",
.devname = "exynos4-fimc.1",
.enable = exynos4_clk_ip_cam_ctrl,
.ctrlbit = (1 << 1),
}, {
.name = "fimc",
.devname = "exynos4-fimc.2",
.enable = exynos4_clk_ip_cam_ctrl,
.ctrlbit = (1 << 2),
}, {
.name = "fimc",
.devname = "exynos4-fimc.3",
.enable = exynos4_clk_ip_cam_ctrl,
.ctrlbit = (1 << 3),
}, {
.name = "hsmmc",
.devname = "exynos4-sdhci.0",
.parent = &exynos4_clk_aclk_133.clk,
.enable = exynos4_clk_ip_fsys_ctrl,
.ctrlbit = (1 << 5),
}, {
.name = "hsmmc",
.devname = "exynos4-sdhci.1",
.parent = &exynos4_clk_aclk_133.clk,
.enable = exynos4_clk_ip_fsys_ctrl,
.ctrlbit = (1 << 6),
}, {
.name = "hsmmc",
.devname = "exynos4-sdhci.2",
.parent = &exynos4_clk_aclk_133.clk,
.enable = exynos4_clk_ip_fsys_ctrl,
.ctrlbit = (1 << 7),
}, {
.name = "hsmmc",
.devname = "exynos4-sdhci.3",
.parent = &exynos4_clk_aclk_133.clk,
.enable = exynos4_clk_ip_fsys_ctrl,
.ctrlbit = (1 << 8),
}, {
.name = "dwmmc",
.parent = &exynos4_clk_aclk_133.clk,
.enable = exynos4_clk_ip_fsys_ctrl,
.ctrlbit = (1 << 9),
}, {
.name = "dac",
.devname = "s5p-sdo",
.enable = exynos4_clk_ip_tv_ctrl,
.ctrlbit = (1 << 2),
}, {
.name = "mixer",
.devname = "s5p-mixer",
.enable = exynos4_clk_ip_tv_ctrl,
.ctrlbit = (1 << 1),
}, {
.name = "vp",
.devname = "s5p-mixer",
.enable = exynos4_clk_ip_tv_ctrl,
.ctrlbit = (1 << 0),
}, {
.name = "hdmi",
.devname = "exynos4-hdmi",
.enable = exynos4_clk_ip_tv_ctrl,
.ctrlbit = (1 << 3),
}, {
.name = "hdmiphy",
.devname = "exynos4-hdmi",
.enable = exynos4_clk_hdmiphy_ctrl,
.ctrlbit = (1 << 0),
}, {
.name = "dacphy",
.devname = "s5p-sdo",
.enable = exynos4_clk_dac_ctrl,
.ctrlbit = (1 << 0),
}, {
.name = "adc",
.enable = exynos4_clk_ip_peril_ctrl,
.ctrlbit = (1 << 15),
}, {
.name = "keypad",
.enable = exynos4_clk_ip_perir_ctrl,
.ctrlbit = (1 << 16),
}, {
.name = "rtc",
.enable = exynos4_clk_ip_perir_ctrl,
.ctrlbit = (1 << 15),
}, {
.name = "watchdog",
.parent = &exynos4_clk_aclk_100.clk,
.enable = exynos4_clk_ip_perir_ctrl,
.ctrlbit = (1 << 14),
}, {
.name = "usbhost",
.enable = exynos4_clk_ip_fsys_ctrl ,
.ctrlbit = (1 << 12),
}, {
.name = "otg",
.enable = exynos4_clk_ip_fsys_ctrl,
.ctrlbit = (1 << 13),
}, {
.name = "spi",
.devname = "s3c64xx-spi.0",
.enable = exynos4_clk_ip_peril_ctrl,
.ctrlbit = (1 << 16),
}, {
.name = "spi",
.devname = "s3c64xx-spi.1",
.enable = exynos4_clk_ip_peril_ctrl,
.ctrlbit = (1 << 17),
}, {
.name = "spi",
.devname = "s3c64xx-spi.2",
.enable = exynos4_clk_ip_peril_ctrl,
.ctrlbit = (1 << 18),
}, {
.name = "iis",
.devname = "samsung-i2s.0",
.enable = exynos4_clk_ip_peril_ctrl,
.ctrlbit = (1 << 19),
}, {
.name = "iis",
.devname = "samsung-i2s.1",
.enable = exynos4_clk_ip_peril_ctrl,
.ctrlbit = (1 << 20),
}, {
.name = "iis",
.devname = "samsung-i2s.2",
.enable = exynos4_clk_ip_peril_ctrl,
.ctrlbit = (1 << 21),
}, {
.name = "ac97",
.devname = "samsung-ac97",
.enable = exynos4_clk_ip_peril_ctrl,
.ctrlbit = (1 << 27),
}, {
.name = "fimg2d",
.enable = exynos4_clk_ip_image_ctrl,
.ctrlbit = (1 << 0),
}, {
.name = "mfc",
.devname = "s5p-mfc",
.enable = exynos4_clk_ip_mfc_ctrl,
.ctrlbit = (1 << 0),
}, {
.name = "i2c",
.devname = "s3c2440-i2c.0",
.parent = &exynos4_clk_aclk_100.clk,
.enable = exynos4_clk_ip_peril_ctrl,
.ctrlbit = (1 << 6),
}, {
.name = "i2c",
.devname = "s3c2440-i2c.1",
.parent = &exynos4_clk_aclk_100.clk,
.enable = exynos4_clk_ip_peril_ctrl,
.ctrlbit = (1 << 7),
}, {
.name = "i2c",
.devname = "s3c2440-i2c.2",
.parent = &exynos4_clk_aclk_100.clk,
.enable = exynos4_clk_ip_peril_ctrl,
.ctrlbit = (1 << 8),
}, {
.name = "i2c",
.devname = "s3c2440-i2c.3",
.parent = &exynos4_clk_aclk_100.clk,
.enable = exynos4_clk_ip_peril_ctrl,
.ctrlbit = (1 << 9),
}, {
.name = "i2c",
.devname = "s3c2440-i2c.4",
.parent = &exynos4_clk_aclk_100.clk,
.enable = exynos4_clk_ip_peril_ctrl,
.ctrlbit = (1 << 10),
}, {
.name = "i2c",
.devname = "s3c2440-i2c.5",
.parent = &exynos4_clk_aclk_100.clk,
.enable = exynos4_clk_ip_peril_ctrl,
.ctrlbit = (1 << 11),
}, {
.name = "i2c",
.devname = "s3c2440-i2c.6",
.parent = &exynos4_clk_aclk_100.clk,
.enable = exynos4_clk_ip_peril_ctrl,
.ctrlbit = (1 << 12),
}, {
.name = "i2c",
.devname = "s3c2440-i2c.7",
.parent = &exynos4_clk_aclk_100.clk,
.enable = exynos4_clk_ip_peril_ctrl,
.ctrlbit = (1 << 13),
}, {
.name = "i2c",
.devname = "s3c2440-hdmiphy-i2c",
.parent = &exynos4_clk_aclk_100.clk,
.enable = exynos4_clk_ip_peril_ctrl,
.ctrlbit = (1 << 14),
}, {
.name = "SYSMMU_MDMA",
.enable = exynos4_clk_ip_image_ctrl,
.ctrlbit = (1 << 5),
}, {
.name = "SYSMMU_FIMC0",
.enable = exynos4_clk_ip_cam_ctrl,
.ctrlbit = (1 << 7),
}, {
.name = "SYSMMU_FIMC1",
.enable = exynos4_clk_ip_cam_ctrl,
.ctrlbit = (1 << 8),
}, {
.name = "SYSMMU_FIMC2",
.enable = exynos4_clk_ip_cam_ctrl,
.ctrlbit = (1 << 9),
}, {
.name = "SYSMMU_FIMC3",
.enable = exynos4_clk_ip_cam_ctrl,
.ctrlbit = (1 << 10),
}, {
.name = "SYSMMU_JPEG",
.enable = exynos4_clk_ip_cam_ctrl,
.ctrlbit = (1 << 11),
}, {
.name = "SYSMMU_FIMD0",
.enable = exynos4_clk_ip_lcd0_ctrl,
.ctrlbit = (1 << 4),
}, {
.name = "SYSMMU_FIMD1",
.enable = exynos4_clk_ip_lcd1_ctrl,
.ctrlbit = (1 << 4),
}, {
.name = "SYSMMU_PCIe",
.enable = exynos4_clk_ip_fsys_ctrl,
.ctrlbit = (1 << 18),
}, {
.name = "SYSMMU_G2D",
.enable = exynos4_clk_ip_image_ctrl,
.ctrlbit = (1 << 3),
}, {
.name = "SYSMMU_ROTATOR",
.enable = exynos4_clk_ip_image_ctrl,
.ctrlbit = (1 << 4),
}, {
.name = "SYSMMU_TV",
.enable = exynos4_clk_ip_tv_ctrl,
.ctrlbit = (1 << 4),
}, {
.name = "SYSMMU_MFC_L",
.enable = exynos4_clk_ip_mfc_ctrl,
.ctrlbit = (1 << 1),
}, {
.name = "SYSMMU_MFC_R",
.enable = exynos4_clk_ip_mfc_ctrl,
.ctrlbit = (1 << 2),
}
};
static struct clk exynos4_init_clocks_on[] = {
{
.name = "uart",
.devname = "s5pv210-uart.0",
.enable = exynos4_clk_ip_peril_ctrl,
.ctrlbit = (1 << 0),
}, {
.name = "uart",
.devname = "s5pv210-uart.1",
.enable = exynos4_clk_ip_peril_ctrl,
.ctrlbit = (1 << 1),
}, {
.name = "uart",
.devname = "s5pv210-uart.2",
.enable = exynos4_clk_ip_peril_ctrl,
.ctrlbit = (1 << 2),
}, {
.name = "uart",
.devname = "s5pv210-uart.3",
.enable = exynos4_clk_ip_peril_ctrl,
.ctrlbit = (1 << 3),
}, {
.name = "uart",
.devname = "s5pv210-uart.4",
.enable = exynos4_clk_ip_peril_ctrl,
.ctrlbit = (1 << 4),
}, {
.name = "uart",
.devname = "s5pv210-uart.5",
.enable = exynos4_clk_ip_peril_ctrl,
.ctrlbit = (1 << 5),
}
};
static struct clk exynos4_clk_pdma0 = {
.name = "dma",
.devname = "dma-pl330.0",
.enable = exynos4_clk_ip_fsys_ctrl,
.ctrlbit = (1 << 0),
};
static struct clk exynos4_clk_pdma1 = {
.name = "dma",
.devname = "dma-pl330.1",
.enable = exynos4_clk_ip_fsys_ctrl,
.ctrlbit = (1 << 1),
};
static struct clk exynos4_clk_mdma1 = {
.name = "dma",
.devname = "dma-pl330.2",
.enable = exynos4_clk_ip_image_ctrl,
.ctrlbit = ((1 << 8) | (1 << 5) | (1 << 2)),
};
static struct clk exynos4_clk_fimd0 = {
.name = "fimd",
.devname = "exynos4-fb.0",
.enable = exynos4_clk_ip_lcd0_ctrl,
.ctrlbit = (1 << 0),
};
struct clk *exynos4_clkset_group_list[] = {
[0] = &clk_ext_xtal_mux,
[1] = &clk_xusbxti,
[2] = &exynos4_clk_sclk_hdmi27m,
[3] = &exynos4_clk_sclk_usbphy0,
[4] = &exynos4_clk_sclk_usbphy1,
[5] = &exynos4_clk_sclk_hdmiphy,
[6] = &exynos4_clk_mout_mpll.clk,
[7] = &exynos4_clk_mout_epll.clk,
[8] = &exynos4_clk_sclk_vpll.clk,
};
struct clksrc_sources exynos4_clkset_group = {
.sources = exynos4_clkset_group_list,
.nr_sources = ARRAY_SIZE(exynos4_clkset_group_list),
};
static struct clk *exynos4_clkset_mout_g2d0_list[] = {
[0] = &exynos4_clk_mout_mpll.clk,
[1] = &exynos4_clk_sclk_apll.clk,
};
static struct clksrc_sources exynos4_clkset_mout_g2d0 = {
.sources = exynos4_clkset_mout_g2d0_list,
.nr_sources = ARRAY_SIZE(exynos4_clkset_mout_g2d0_list),
};
static struct clksrc_clk exynos4_clk_mout_g2d0 = {
.clk = {
.name = "mout_g2d0",
},
.sources = &exynos4_clkset_mout_g2d0,
.reg_src = { .reg = EXYNOS4_CLKSRC_IMAGE, .shift = 0, .size = 1 },
};
static struct clk *exynos4_clkset_mout_g2d1_list[] = {
[0] = &exynos4_clk_mout_epll.clk,
[1] = &exynos4_clk_sclk_vpll.clk,
};
static struct clksrc_sources exynos4_clkset_mout_g2d1 = {
.sources = exynos4_clkset_mout_g2d1_list,
.nr_sources = ARRAY_SIZE(exynos4_clkset_mout_g2d1_list),
};
static struct clksrc_clk exynos4_clk_mout_g2d1 = {
.clk = {
.name = "mout_g2d1",
},
.sources = &exynos4_clkset_mout_g2d1,
.reg_src = { .reg = EXYNOS4_CLKSRC_IMAGE, .shift = 4, .size = 1 },
};
static struct clk *exynos4_clkset_mout_g2d_list[] = {
[0] = &exynos4_clk_mout_g2d0.clk,
[1] = &exynos4_clk_mout_g2d1.clk,
};
static struct clksrc_sources exynos4_clkset_mout_g2d = {
.sources = exynos4_clkset_mout_g2d_list,
.nr_sources = ARRAY_SIZE(exynos4_clkset_mout_g2d_list),
};
static struct clk *exynos4_clkset_mout_mfc0_list[] = {
[0] = &exynos4_clk_mout_mpll.clk,
[1] = &exynos4_clk_sclk_apll.clk,
};
static struct clksrc_sources exynos4_clkset_mout_mfc0 = {
.sources = exynos4_clkset_mout_mfc0_list,
.nr_sources = ARRAY_SIZE(exynos4_clkset_mout_mfc0_list),
};
static struct clksrc_clk exynos4_clk_mout_mfc0 = {
.clk = {
.name = "mout_mfc0",
},
.sources = &exynos4_clkset_mout_mfc0,
.reg_src = { .reg = EXYNOS4_CLKSRC_MFC, .shift = 0, .size = 1 },
};
static struct clk *exynos4_clkset_mout_mfc1_list[] = {
[0] = &exynos4_clk_mout_epll.clk,
[1] = &exynos4_clk_sclk_vpll.clk,
};
static struct clksrc_sources exynos4_clkset_mout_mfc1 = {
.sources = exynos4_clkset_mout_mfc1_list,
.nr_sources = ARRAY_SIZE(exynos4_clkset_mout_mfc1_list),
};
static struct clksrc_clk exynos4_clk_mout_mfc1 = {
.clk = {
.name = "mout_mfc1",
},
.sources = &exynos4_clkset_mout_mfc1,
.reg_src = { .reg = EXYNOS4_CLKSRC_MFC, .shift = 4, .size = 1 },
};
static struct clk *exynos4_clkset_mout_mfc_list[] = {
[0] = &exynos4_clk_mout_mfc0.clk,
[1] = &exynos4_clk_mout_mfc1.clk,
};
static struct clksrc_sources exynos4_clkset_mout_mfc = {
.sources = exynos4_clkset_mout_mfc_list,
.nr_sources = ARRAY_SIZE(exynos4_clkset_mout_mfc_list),
};
static struct clk *exynos4_clkset_sclk_dac_list[] = {
[0] = &exynos4_clk_sclk_vpll.clk,
[1] = &exynos4_clk_sclk_hdmiphy,
};
static struct clksrc_sources exynos4_clkset_sclk_dac = {
.sources = exynos4_clkset_sclk_dac_list,
.nr_sources = ARRAY_SIZE(exynos4_clkset_sclk_dac_list),
};
static struct clksrc_clk exynos4_clk_sclk_dac = {
.clk = {
.name = "sclk_dac",
.enable = exynos4_clksrc_mask_tv_ctrl,
.ctrlbit = (1 << 8),
},
.sources = &exynos4_clkset_sclk_dac,
.reg_src = { .reg = EXYNOS4_CLKSRC_TV, .shift = 8, .size = 1 },
};
static struct clksrc_clk exynos4_clk_sclk_pixel = {
.clk = {
.name = "sclk_pixel",
.parent = &exynos4_clk_sclk_vpll.clk,
},
.reg_div = { .reg = EXYNOS4_CLKDIV_TV, .shift = 0, .size = 4 },
};
static struct clk *exynos4_clkset_sclk_hdmi_list[] = {
[0] = &exynos4_clk_sclk_pixel.clk,
[1] = &exynos4_clk_sclk_hdmiphy,
};
static struct clksrc_sources exynos4_clkset_sclk_hdmi = {
.sources = exynos4_clkset_sclk_hdmi_list,
.nr_sources = ARRAY_SIZE(exynos4_clkset_sclk_hdmi_list),
};
static struct clksrc_clk exynos4_clk_sclk_hdmi = {
.clk = {
.name = "sclk_hdmi",
.enable = exynos4_clksrc_mask_tv_ctrl,
.ctrlbit = (1 << 0),
},
.sources = &exynos4_clkset_sclk_hdmi,
.reg_src = { .reg = EXYNOS4_CLKSRC_TV, .shift = 0, .size = 1 },
};
static struct clk *exynos4_clkset_sclk_mixer_list[] = {
[0] = &exynos4_clk_sclk_dac.clk,
[1] = &exynos4_clk_sclk_hdmi.clk,
};
static struct clksrc_sources exynos4_clkset_sclk_mixer = {
.sources = exynos4_clkset_sclk_mixer_list,
.nr_sources = ARRAY_SIZE(exynos4_clkset_sclk_mixer_list),
};
static struct clksrc_clk exynos4_clk_sclk_mixer = {
.clk = {
.name = "sclk_mixer",
.enable = exynos4_clksrc_mask_tv_ctrl,
.ctrlbit = (1 << 4),
},
.sources = &exynos4_clkset_sclk_mixer,
.reg_src = { .reg = EXYNOS4_CLKSRC_TV, .shift = 4, .size = 1 },
};
static struct clksrc_clk *exynos4_sclk_tv[] = {
&exynos4_clk_sclk_dac,
&exynos4_clk_sclk_pixel,
&exynos4_clk_sclk_hdmi,
&exynos4_clk_sclk_mixer,
};
static struct clksrc_clk exynos4_clk_dout_mmc0 = {
.clk = {
.name = "dout_mmc0",
},
.sources = &exynos4_clkset_group,
.reg_src = { .reg = EXYNOS4_CLKSRC_FSYS, .shift = 0, .size = 4 },
.reg_div = { .reg = EXYNOS4_CLKDIV_FSYS1, .shift = 0, .size = 4 },
};
static struct clksrc_clk exynos4_clk_dout_mmc1 = {
.clk = {
.name = "dout_mmc1",
},
.sources = &exynos4_clkset_group,
.reg_src = { .reg = EXYNOS4_CLKSRC_FSYS, .shift = 4, .size = 4 },
.reg_div = { .reg = EXYNOS4_CLKDIV_FSYS1, .shift = 16, .size = 4 },
};
static struct clksrc_clk exynos4_clk_dout_mmc2 = {
.clk = {
.name = "dout_mmc2",
},
.sources = &exynos4_clkset_group,
.reg_src = { .reg = EXYNOS4_CLKSRC_FSYS, .shift = 8, .size = 4 },
.reg_div = { .reg = EXYNOS4_CLKDIV_FSYS2, .shift = 0, .size = 4 },
};
static struct clksrc_clk exynos4_clk_dout_mmc3 = {
.clk = {
.name = "dout_mmc3",
},
.sources = &exynos4_clkset_group,
.reg_src = { .reg = EXYNOS4_CLKSRC_FSYS, .shift = 12, .size = 4 },
.reg_div = { .reg = EXYNOS4_CLKDIV_FSYS2, .shift = 16, .size = 4 },
};
static struct clksrc_clk exynos4_clk_dout_mmc4 = {
.clk = {
.name = "dout_mmc4",
},
.sources = &exynos4_clkset_group,
.reg_src = { .reg = EXYNOS4_CLKSRC_FSYS, .shift = 16, .size = 4 },
.reg_div = { .reg = EXYNOS4_CLKDIV_FSYS3, .shift = 0, .size = 4 },
};
static struct clksrc_clk exynos4_clksrcs[] = {
{
.clk = {
.name = "sclk_pwm",
.enable = exynos4_clksrc_mask_peril0_ctrl,
.ctrlbit = (1 << 24),
},
.sources = &exynos4_clkset_group,
.reg_src = { .reg = EXYNOS4_CLKSRC_PERIL0, .shift = 24, .size = 4 },
.reg_div = { .reg = EXYNOS4_CLKDIV_PERIL3, .shift = 0, .size = 4 },
}, {
.clk = {
.name = "sclk_csis",
.devname = "s5p-mipi-csis.0",
.enable = exynos4_clksrc_mask_cam_ctrl,
.ctrlbit = (1 << 24),
},
.sources = &exynos4_clkset_group,
.reg_src = { .reg = EXYNOS4_CLKSRC_CAM, .shift = 24, .size = 4 },
.reg_div = { .reg = EXYNOS4_CLKDIV_CAM, .shift = 24, .size = 4 },
}, {
.clk = {
.name = "sclk_csis",
.devname = "s5p-mipi-csis.1",
.enable = exynos4_clksrc_mask_cam_ctrl,
.ctrlbit = (1 << 28),
},
.sources = &exynos4_clkset_group,
.reg_src = { .reg = EXYNOS4_CLKSRC_CAM, .shift = 28, .size = 4 },
.reg_div = { .reg = EXYNOS4_CLKDIV_CAM, .shift = 28, .size = 4 },
}, {
.clk = {
.name = "sclk_cam0",
.enable = exynos4_clksrc_mask_cam_ctrl,
.ctrlbit = (1 << 16),
},
.sources = &exynos4_clkset_group,
.reg_src = { .reg = EXYNOS4_CLKSRC_CAM, .shift = 16, .size = 4 },
.reg_div = { .reg = EXYNOS4_CLKDIV_CAM, .shift = 16, .size = 4 },
}, {
.clk = {
.name = "sclk_cam1",
.enable = exynos4_clksrc_mask_cam_ctrl,
.ctrlbit = (1 << 20),
},
.sources = &exynos4_clkset_group,
.reg_src = { .reg = EXYNOS4_CLKSRC_CAM, .shift = 20, .size = 4 },
.reg_div = { .reg = EXYNOS4_CLKDIV_CAM, .shift = 20, .size = 4 },
}, {
.clk = {
.name = "sclk_fimc",
.devname = "exynos4-fimc.0",
.enable = exynos4_clksrc_mask_cam_ctrl,
.ctrlbit = (1 << 0),
},
.sources = &exynos4_clkset_group,
.reg_src = { .reg = EXYNOS4_CLKSRC_CAM, .shift = 0, .size = 4 },
.reg_div = { .reg = EXYNOS4_CLKDIV_CAM, .shift = 0, .size = 4 },
}, {
.clk = {
.name = "sclk_fimc",
.devname = "exynos4-fimc.1",
.enable = exynos4_clksrc_mask_cam_ctrl,
.ctrlbit = (1 << 4),
},
.sources = &exynos4_clkset_group,
.reg_src = { .reg = EXYNOS4_CLKSRC_CAM, .shift = 4, .size = 4 },
.reg_div = { .reg = EXYNOS4_CLKDIV_CAM, .shift = 4, .size = 4 },
}, {
.clk = {
.name = "sclk_fimc",
.devname = "exynos4-fimc.2",
.enable = exynos4_clksrc_mask_cam_ctrl,
.ctrlbit = (1 << 8),
},
.sources = &exynos4_clkset_group,
.reg_src = { .reg = EXYNOS4_CLKSRC_CAM, .shift = 8, .size = 4 },
.reg_div = { .reg = EXYNOS4_CLKDIV_CAM, .shift = 8, .size = 4 },
}, {
.clk = {
.name = "sclk_fimc",
.devname = "exynos4-fimc.3",
.enable = exynos4_clksrc_mask_cam_ctrl,
.ctrlbit = (1 << 12),
},
.sources = &exynos4_clkset_group,
.reg_src = { .reg = EXYNOS4_CLKSRC_CAM, .shift = 12, .size = 4 },
.reg_div = { .reg = EXYNOS4_CLKDIV_CAM, .shift = 12, .size = 4 },
}, {
.clk = {
.name = "sclk_fimd",
.devname = "exynos4-fb.0",
.enable = exynos4_clksrc_mask_lcd0_ctrl,
.ctrlbit = (1 << 0),
},
.sources = &exynos4_clkset_group,
.reg_src = { .reg = EXYNOS4_CLKSRC_LCD0, .shift = 0, .size = 4 },
.reg_div = { .reg = EXYNOS4_CLKDIV_LCD0, .shift = 0, .size = 4 },
}, {
.clk = {
.name = "sclk_fimg2d",
},
.sources = &exynos4_clkset_mout_g2d,
.reg_src = { .reg = EXYNOS4_CLKSRC_IMAGE, .shift = 8, .size = 1 },
.reg_div = { .reg = EXYNOS4_CLKDIV_IMAGE, .shift = 0, .size = 4 },
}, {
.clk = {
.name = "sclk_mfc",
.devname = "s5p-mfc",
},
.sources = &exynos4_clkset_mout_mfc,
.reg_src = { .reg = EXYNOS4_CLKSRC_MFC, .shift = 8, .size = 1 },
.reg_div = { .reg = EXYNOS4_CLKDIV_MFC, .shift = 0, .size = 4 },
}, {
.clk = {
.name = "sclk_dwmmc",
.parent = &exynos4_clk_dout_mmc4.clk,
.enable = exynos4_clksrc_mask_fsys_ctrl,
.ctrlbit = (1 << 16),
},
.reg_div = { .reg = EXYNOS4_CLKDIV_FSYS3, .shift = 8, .size = 8 },
}
};
static struct clksrc_clk exynos4_clk_sclk_uart0 = {
.clk = {
.name = "uclk1",
.devname = "exynos4210-uart.0",
.enable = exynos4_clksrc_mask_peril0_ctrl,
.ctrlbit = (1 << 0),
},
.sources = &exynos4_clkset_group,
.reg_src = { .reg = EXYNOS4_CLKSRC_PERIL0, .shift = 0, .size = 4 },
.reg_div = { .reg = EXYNOS4_CLKDIV_PERIL0, .shift = 0, .size = 4 },
};
static struct clksrc_clk exynos4_clk_sclk_uart1 = {
.clk = {
.name = "uclk1",
.devname = "exynos4210-uart.1",
.enable = exynos4_clksrc_mask_peril0_ctrl,
.ctrlbit = (1 << 4),
},
.sources = &exynos4_clkset_group,
.reg_src = { .reg = EXYNOS4_CLKSRC_PERIL0, .shift = 4, .size = 4 },
.reg_div = { .reg = EXYNOS4_CLKDIV_PERIL0, .shift = 4, .size = 4 },
};
static struct clksrc_clk exynos4_clk_sclk_uart2 = {
.clk = {
.name = "uclk1",
.devname = "exynos4210-uart.2",
.enable = exynos4_clksrc_mask_peril0_ctrl,
.ctrlbit = (1 << 8),
},
.sources = &exynos4_clkset_group,
.reg_src = { .reg = EXYNOS4_CLKSRC_PERIL0, .shift = 8, .size = 4 },
.reg_div = { .reg = EXYNOS4_CLKDIV_PERIL0, .shift = 8, .size = 4 },
};
static struct clksrc_clk exynos4_clk_sclk_uart3 = {
.clk = {
.name = "uclk1",
.devname = "exynos4210-uart.3",
.enable = exynos4_clksrc_mask_peril0_ctrl,
.ctrlbit = (1 << 12),
},
.sources = &exynos4_clkset_group,
.reg_src = { .reg = EXYNOS4_CLKSRC_PERIL0, .shift = 12, .size = 4 },
.reg_div = { .reg = EXYNOS4_CLKDIV_PERIL0, .shift = 12, .size = 4 },
};
static struct clksrc_clk exynos4_clk_sclk_mmc0 = {
.clk = {
.name = "sclk_mmc",
.devname = "exynos4-sdhci.0",
.parent = &exynos4_clk_dout_mmc0.clk,
.enable = exynos4_clksrc_mask_fsys_ctrl,
.ctrlbit = (1 << 0),
},
.reg_div = { .reg = EXYNOS4_CLKDIV_FSYS1, .shift = 8, .size = 8 },
};
static struct clksrc_clk exynos4_clk_sclk_mmc1 = {
.clk = {
.name = "sclk_mmc",
.devname = "exynos4-sdhci.1",
.parent = &exynos4_clk_dout_mmc1.clk,
.enable = exynos4_clksrc_mask_fsys_ctrl,
.ctrlbit = (1 << 4),
},
.reg_div = { .reg = EXYNOS4_CLKDIV_FSYS1, .shift = 24, .size = 8 },
};
static struct clksrc_clk exynos4_clk_sclk_mmc2 = {
.clk = {
.name = "sclk_mmc",
.devname = "exynos4-sdhci.2",
.parent = &exynos4_clk_dout_mmc2.clk,
.enable = exynos4_clksrc_mask_fsys_ctrl,
.ctrlbit = (1 << 8),
},
.reg_div = { .reg = EXYNOS4_CLKDIV_FSYS2, .shift = 8, .size = 8 },
};
static struct clksrc_clk exynos4_clk_sclk_mmc3 = {
.clk = {
.name = "sclk_mmc",
.devname = "exynos4-sdhci.3",
.parent = &exynos4_clk_dout_mmc3.clk,
.enable = exynos4_clksrc_mask_fsys_ctrl,
.ctrlbit = (1 << 12),
},
.reg_div = { .reg = EXYNOS4_CLKDIV_FSYS2, .shift = 24, .size = 8 },
};
static struct clksrc_clk exynos4_clk_sclk_spi0 = {
.clk = {
.name = "sclk_spi",
.devname = "s3c64xx-spi.0",
.enable = exynos4_clksrc_mask_peril1_ctrl,
.ctrlbit = (1 << 16),
},
.sources = &exynos4_clkset_group,
.reg_src = { .reg = EXYNOS4_CLKSRC_PERIL1, .shift = 16, .size = 4 },
.reg_div = { .reg = EXYNOS4_CLKDIV_PERIL1, .shift = 0, .size = 4 },
};
static struct clksrc_clk exynos4_clk_sclk_spi1 = {
.clk = {
.name = "sclk_spi",
.devname = "s3c64xx-spi.1",
.enable = exynos4_clksrc_mask_peril1_ctrl,
.ctrlbit = (1 << 20),
},
.sources = &exynos4_clkset_group,
.reg_src = { .reg = EXYNOS4_CLKSRC_PERIL1, .shift = 20, .size = 4 },
.reg_div = { .reg = EXYNOS4_CLKDIV_PERIL1, .shift = 16, .size = 4 },
};
static struct clksrc_clk exynos4_clk_sclk_spi2 = {
.clk = {
.name = "sclk_spi",
.devname = "s3c64xx-spi.2",
.enable = exynos4_clksrc_mask_peril1_ctrl,
.ctrlbit = (1 << 24),
},
.sources = &exynos4_clkset_group,
.reg_src = { .reg = EXYNOS4_CLKSRC_PERIL1, .shift = 24, .size = 4 },
.reg_div = { .reg = EXYNOS4_CLKDIV_PERIL2, .shift = 0, .size = 4 },
};
/* Clock initialization code */
static struct clksrc_clk *exynos4_sysclks[] = {
&exynos4_clk_mout_apll,
&exynos4_clk_sclk_apll,
&exynos4_clk_mout_epll,
&exynos4_clk_mout_mpll,
&exynos4_clk_moutcore,
&exynos4_clk_coreclk,
&exynos4_clk_armclk,
&exynos4_clk_aclk_corem0,
&exynos4_clk_aclk_cores,
&exynos4_clk_aclk_corem1,
&exynos4_clk_periphclk,
&exynos4_clk_mout_corebus,
&exynos4_clk_sclk_dmc,
&exynos4_clk_aclk_cored,
&exynos4_clk_aclk_corep,
&exynos4_clk_aclk_acp,
&exynos4_clk_pclk_acp,
&exynos4_clk_vpllsrc,
&exynos4_clk_sclk_vpll,
&exynos4_clk_aclk_200,
&exynos4_clk_aclk_100,
&exynos4_clk_aclk_160,
&exynos4_clk_aclk_133,
&exynos4_clk_dout_mmc0,
&exynos4_clk_dout_mmc1,
&exynos4_clk_dout_mmc2,
&exynos4_clk_dout_mmc3,
&exynos4_clk_dout_mmc4,
&exynos4_clk_mout_mfc0,
&exynos4_clk_mout_mfc1,
};
static struct clk *exynos4_clk_cdev[] = {
&exynos4_clk_pdma0,
&exynos4_clk_pdma1,
&exynos4_clk_mdma1,
&exynos4_clk_fimd0,
};
static struct clksrc_clk *exynos4_clksrc_cdev[] = {
&exynos4_clk_sclk_uart0,
&exynos4_clk_sclk_uart1,
&exynos4_clk_sclk_uart2,
&exynos4_clk_sclk_uart3,
&exynos4_clk_sclk_mmc0,
&exynos4_clk_sclk_mmc1,
&exynos4_clk_sclk_mmc2,
&exynos4_clk_sclk_mmc3,
&exynos4_clk_sclk_spi0,
&exynos4_clk_sclk_spi1,
&exynos4_clk_sclk_spi2,
};
static struct clk_lookup exynos4_clk_lookup[] = {
CLKDEV_INIT("exynos4210-uart.0", "clk_uart_baud0", &exynos4_clk_sclk_uart0.clk),
CLKDEV_INIT("exynos4210-uart.1", "clk_uart_baud0", &exynos4_clk_sclk_uart1.clk),
CLKDEV_INIT("exynos4210-uart.2", "clk_uart_baud0", &exynos4_clk_sclk_uart2.clk),
CLKDEV_INIT("exynos4210-uart.3", "clk_uart_baud0", &exynos4_clk_sclk_uart3.clk),
CLKDEV_INIT("exynos4-sdhci.0", "mmc_busclk.2", &exynos4_clk_sclk_mmc0.clk),
CLKDEV_INIT("exynos4-sdhci.1", "mmc_busclk.2", &exynos4_clk_sclk_mmc1.clk),
CLKDEV_INIT("exynos4-sdhci.2", "mmc_busclk.2", &exynos4_clk_sclk_mmc2.clk),
CLKDEV_INIT("exynos4-sdhci.3", "mmc_busclk.2", &exynos4_clk_sclk_mmc3.clk),
CLKDEV_INIT("exynos4-fb.0", "lcd", &exynos4_clk_fimd0),
CLKDEV_INIT("dma-pl330.0", "apb_pclk", &exynos4_clk_pdma0),
CLKDEV_INIT("dma-pl330.1", "apb_pclk", &exynos4_clk_pdma1),
CLKDEV_INIT("dma-pl330.2", "apb_pclk", &exynos4_clk_mdma1),
CLKDEV_INIT("s3c64xx-spi.0", "spi_busclk0", &exynos4_clk_sclk_spi0.clk),
CLKDEV_INIT("s3c64xx-spi.1", "spi_busclk0", &exynos4_clk_sclk_spi1.clk),
CLKDEV_INIT("s3c64xx-spi.2", "spi_busclk0", &exynos4_clk_sclk_spi2.clk),
};
static int xtal_rate;
static unsigned long exynos4_fout_apll_get_rate(struct clk *clk)
{
if (soc_is_exynos4210())
return s5p_get_pll45xx(xtal_rate, __raw_readl(EXYNOS4_APLL_CON0),
pll_4508);
else if (soc_is_exynos4212() || soc_is_exynos4412())
return s5p_get_pll35xx(xtal_rate, __raw_readl(EXYNOS4_APLL_CON0));
else
return 0;
}
static struct clk_ops exynos4_fout_apll_ops = {
.get_rate = exynos4_fout_apll_get_rate,
};
static u32 exynos4_vpll_div[][8] = {
{ 54000000, 3, 53, 3, 1024, 0, 17, 0 },
{ 108000000, 3, 53, 2, 1024, 0, 17, 0 },
};
static unsigned long exynos4_vpll_get_rate(struct clk *clk)
{
return clk->rate;
}
static int exynos4_vpll_set_rate(struct clk *clk, unsigned long rate)
{
unsigned int vpll_con0, vpll_con1 = 0;
unsigned int i;
/* Return if nothing changed */
if (clk->rate == rate)
return 0;
vpll_con0 = __raw_readl(EXYNOS4_VPLL_CON0);
vpll_con0 &= ~(0x1 << 27 | \
PLL90XX_MDIV_MASK << PLL46XX_MDIV_SHIFT | \
PLL90XX_PDIV_MASK << PLL46XX_PDIV_SHIFT | \
PLL90XX_SDIV_MASK << PLL46XX_SDIV_SHIFT);
vpll_con1 = __raw_readl(EXYNOS4_VPLL_CON1);
vpll_con1 &= ~(PLL46XX_MRR_MASK << PLL46XX_MRR_SHIFT | \
PLL46XX_MFR_MASK << PLL46XX_MFR_SHIFT | \
PLL4650C_KDIV_MASK << PLL46XX_KDIV_SHIFT);
for (i = 0; i < ARRAY_SIZE(exynos4_vpll_div); i++) {
if (exynos4_vpll_div[i][0] == rate) {
vpll_con0 |= exynos4_vpll_div[i][1] << PLL46XX_PDIV_SHIFT;
vpll_con0 |= exynos4_vpll_div[i][2] << PLL46XX_MDIV_SHIFT;
vpll_con0 |= exynos4_vpll_div[i][3] << PLL46XX_SDIV_SHIFT;
vpll_con1 |= exynos4_vpll_div[i][4] << PLL46XX_KDIV_SHIFT;
vpll_con1 |= exynos4_vpll_div[i][5] << PLL46XX_MFR_SHIFT;
vpll_con1 |= exynos4_vpll_div[i][6] << PLL46XX_MRR_SHIFT;
vpll_con0 |= exynos4_vpll_div[i][7] << 27;
break;
}
}
if (i == ARRAY_SIZE(exynos4_vpll_div)) {
printk(KERN_ERR "%s: Invalid Clock VPLL Frequency\n",
__func__);
return -EINVAL;
}
__raw_writel(vpll_con0, EXYNOS4_VPLL_CON0);
__raw_writel(vpll_con1, EXYNOS4_VPLL_CON1);
/* Wait for VPLL lock */
while (!(__raw_readl(EXYNOS4_VPLL_CON0) & (1 << PLL46XX_LOCKED_SHIFT)))
continue;
clk->rate = rate;
return 0;
}
static struct clk_ops exynos4_vpll_ops = {
.get_rate = exynos4_vpll_get_rate,
.set_rate = exynos4_vpll_set_rate,
};
void __init_or_cpufreq exynos4_setup_clocks(void)
{
struct clk *xtal_clk;
unsigned long apll = 0;
unsigned long mpll = 0;
unsigned long epll = 0;
unsigned long vpll = 0;
unsigned long vpllsrc;
unsigned long xtal;
unsigned long armclk;
unsigned long sclk_dmc;
unsigned long aclk_200;
unsigned long aclk_100;
unsigned long aclk_160;
unsigned long aclk_133;
unsigned int ptr;
printk(KERN_DEBUG "%s: registering clocks\n", __func__);
xtal_clk = clk_get(NULL, "xtal");
BUG_ON(IS_ERR(xtal_clk));
xtal = clk_get_rate(xtal_clk);
xtal_rate = xtal;
clk_put(xtal_clk);
printk(KERN_DEBUG "%s: xtal is %ld\n", __func__, xtal);
if (soc_is_exynos4210()) {
apll = s5p_get_pll45xx(xtal, __raw_readl(EXYNOS4_APLL_CON0),
pll_4508);
mpll = s5p_get_pll45xx(xtal, __raw_readl(EXYNOS4_MPLL_CON0),
pll_4508);
epll = s5p_get_pll46xx(xtal, __raw_readl(EXYNOS4_EPLL_CON0),
__raw_readl(EXYNOS4_EPLL_CON1), pll_4600);
vpllsrc = clk_get_rate(&exynos4_clk_vpllsrc.clk);
vpll = s5p_get_pll46xx(vpllsrc, __raw_readl(EXYNOS4_VPLL_CON0),
__raw_readl(EXYNOS4_VPLL_CON1), pll_4650c);
} else if (soc_is_exynos4212() || soc_is_exynos4412()) {
apll = s5p_get_pll35xx(xtal, __raw_readl(EXYNOS4_APLL_CON0));
mpll = s5p_get_pll35xx(xtal, __raw_readl(EXYNOS4_MPLL_CON0));
epll = s5p_get_pll36xx(xtal, __raw_readl(EXYNOS4_EPLL_CON0),
__raw_readl(EXYNOS4_EPLL_CON1));
vpllsrc = clk_get_rate(&exynos4_clk_vpllsrc.clk);
vpll = s5p_get_pll36xx(vpllsrc, __raw_readl(EXYNOS4_VPLL_CON0),
__raw_readl(EXYNOS4_VPLL_CON1));
} else {
/* nothing */
}
clk_fout_apll.ops = &exynos4_fout_apll_ops;
clk_fout_mpll.rate = mpll;
clk_fout_epll.rate = epll;
clk_fout_vpll.ops = &exynos4_vpll_ops;
clk_fout_vpll.rate = vpll;
printk(KERN_INFO "EXYNOS4: PLL settings, A=%ld, M=%ld, E=%ld V=%ld",
apll, mpll, epll, vpll);
armclk = clk_get_rate(&exynos4_clk_armclk.clk);
sclk_dmc = clk_get_rate(&exynos4_clk_sclk_dmc.clk);
aclk_200 = clk_get_rate(&exynos4_clk_aclk_200.clk);
aclk_100 = clk_get_rate(&exynos4_clk_aclk_100.clk);
aclk_160 = clk_get_rate(&exynos4_clk_aclk_160.clk);
aclk_133 = clk_get_rate(&exynos4_clk_aclk_133.clk);
printk(KERN_INFO "EXYNOS4: ARMCLK=%ld, DMC=%ld, ACLK200=%ld\n"
"ACLK100=%ld, ACLK160=%ld, ACLK133=%ld\n",
armclk, sclk_dmc, aclk_200,
aclk_100, aclk_160, aclk_133);
clk_f.rate = armclk;
clk_h.rate = sclk_dmc;
clk_p.rate = aclk_100;
for (ptr = 0; ptr < ARRAY_SIZE(exynos4_clksrcs); ptr++)
s3c_set_clksrc(&exynos4_clksrcs[ptr], true);
}
static struct clk *exynos4_clks[] __initdata = {
&exynos4_clk_sclk_hdmi27m,
&exynos4_clk_sclk_hdmiphy,
&exynos4_clk_sclk_usbphy0,
&exynos4_clk_sclk_usbphy1,
};
#ifdef CONFIG_PM_SLEEP
static int exynos4_clock_suspend(void)
{
s3c_pm_do_save(exynos4_clock_save, ARRAY_SIZE(exynos4_clock_save));
return 0;
}
static void exynos4_clock_resume(void)
{
s3c_pm_do_restore_core(exynos4_clock_save, ARRAY_SIZE(exynos4_clock_save));
}
#else
#define exynos4_clock_suspend NULL
#define exynos4_clock_resume NULL
#endif
static struct syscore_ops exynos4_clock_syscore_ops = {
.suspend = exynos4_clock_suspend,
.resume = exynos4_clock_resume,
};
void __init exynos4_register_clocks(void)
{
int ptr;
s3c24xx_register_clocks(exynos4_clks, ARRAY_SIZE(exynos4_clks));
for (ptr = 0; ptr < ARRAY_SIZE(exynos4_sysclks); ptr++)
s3c_register_clksrc(exynos4_sysclks[ptr], 1);
for (ptr = 0; ptr < ARRAY_SIZE(exynos4_sclk_tv); ptr++)
s3c_register_clksrc(exynos4_sclk_tv[ptr], 1);
for (ptr = 0; ptr < ARRAY_SIZE(exynos4_clksrc_cdev); ptr++)
s3c_register_clksrc(exynos4_clksrc_cdev[ptr], 1);
s3c_register_clksrc(exynos4_clksrcs, ARRAY_SIZE(exynos4_clksrcs));
s3c_register_clocks(exynos4_init_clocks_on, ARRAY_SIZE(exynos4_init_clocks_on));
s3c24xx_register_clocks(exynos4_clk_cdev, ARRAY_SIZE(exynos4_clk_cdev));
for (ptr = 0; ptr < ARRAY_SIZE(exynos4_clk_cdev); ptr++)
s3c_disable_clocks(exynos4_clk_cdev[ptr], 1);
s3c_register_clocks(exynos4_init_clocks_off, ARRAY_SIZE(exynos4_init_clocks_off));
s3c_disable_clocks(exynos4_init_clocks_off, ARRAY_SIZE(exynos4_init_clocks_off));
clkdev_add_table(exynos4_clk_lookup, ARRAY_SIZE(exynos4_clk_lookup));
register_syscore_ops(&exynos4_clock_syscore_ops);
s3c24xx_register_clock(&dummy_apb_pclk);
s3c_pwmclk_init();
}
| gpl-2.0 |
percy-g2/android_kernel_sony_msm8226 | drivers/gpu/drm/radeon/radeon_asic.c | 4801 | 49248 | /*
* Copyright 2008 Advanced Micro Devices, Inc.
* Copyright 2008 Red Hat Inc.
* Copyright 2009 Jerome Glisse.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Dave Airlie
* Alex Deucher
* Jerome Glisse
*/
#include <linux/console.h>
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include <drm/radeon_drm.h>
#include <linux/vgaarb.h>
#include <linux/vga_switcheroo.h>
#include "radeon_reg.h"
#include "radeon.h"
#include "radeon_asic.h"
#include "atom.h"
/*
* Registers accessors functions.
*/
static uint32_t radeon_invalid_rreg(struct radeon_device *rdev, uint32_t reg)
{
DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
BUG_ON(1);
return 0;
}
static void radeon_invalid_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
{
DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
reg, v);
BUG_ON(1);
}
static void radeon_register_accessor_init(struct radeon_device *rdev)
{
rdev->mc_rreg = &radeon_invalid_rreg;
rdev->mc_wreg = &radeon_invalid_wreg;
rdev->pll_rreg = &radeon_invalid_rreg;
rdev->pll_wreg = &radeon_invalid_wreg;
rdev->pciep_rreg = &radeon_invalid_rreg;
rdev->pciep_wreg = &radeon_invalid_wreg;
/* Don't change order as we are overridding accessor. */
if (rdev->family < CHIP_RV515) {
rdev->pcie_reg_mask = 0xff;
} else {
rdev->pcie_reg_mask = 0x7ff;
}
/* FIXME: not sure here */
if (rdev->family <= CHIP_R580) {
rdev->pll_rreg = &r100_pll_rreg;
rdev->pll_wreg = &r100_pll_wreg;
}
if (rdev->family >= CHIP_R420) {
rdev->mc_rreg = &r420_mc_rreg;
rdev->mc_wreg = &r420_mc_wreg;
}
if (rdev->family >= CHIP_RV515) {
rdev->mc_rreg = &rv515_mc_rreg;
rdev->mc_wreg = &rv515_mc_wreg;
}
if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480) {
rdev->mc_rreg = &rs400_mc_rreg;
rdev->mc_wreg = &rs400_mc_wreg;
}
if (rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
rdev->mc_rreg = &rs690_mc_rreg;
rdev->mc_wreg = &rs690_mc_wreg;
}
if (rdev->family == CHIP_RS600) {
rdev->mc_rreg = &rs600_mc_rreg;
rdev->mc_wreg = &rs600_mc_wreg;
}
if (rdev->family >= CHIP_R600) {
rdev->pciep_rreg = &r600_pciep_rreg;
rdev->pciep_wreg = &r600_pciep_wreg;
}
}
/* helper to disable agp */
void radeon_agp_disable(struct radeon_device *rdev)
{
rdev->flags &= ~RADEON_IS_AGP;
if (rdev->family >= CHIP_R600) {
DRM_INFO("Forcing AGP to PCIE mode\n");
rdev->flags |= RADEON_IS_PCIE;
} else if (rdev->family >= CHIP_RV515 ||
rdev->family == CHIP_RV380 ||
rdev->family == CHIP_RV410 ||
rdev->family == CHIP_R423) {
DRM_INFO("Forcing AGP to PCIE mode\n");
rdev->flags |= RADEON_IS_PCIE;
rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush;
rdev->asic->gart.set_page = &rv370_pcie_gart_set_page;
} else {
DRM_INFO("Forcing AGP to PCI mode\n");
rdev->flags |= RADEON_IS_PCI;
rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush;
rdev->asic->gart.set_page = &r100_pci_gart_set_page;
}
rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
}
/*
* ASIC
*/
static struct radeon_asic r100_asic = {
.init = &r100_init,
.fini = &r100_fini,
.suspend = &r100_suspend,
.resume = &r100_resume,
.vga_set_state = &r100_vga_set_state,
.gpu_is_lockup = &r100_gpu_is_lockup,
.asic_reset = &r100_asic_reset,
.ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &r100_mc_wait_for_idle,
.gart = {
.tlb_flush = &r100_pci_gart_tlb_flush,
.set_page = &r100_pci_gart_set_page,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = {
.ib_execute = &r100_ring_ib_execute,
.emit_fence = &r100_fence_ring_emit,
.emit_semaphore = &r100_semaphore_ring_emit,
.cs_parse = &r100_cs_parse,
.ring_start = &r100_ring_start,
.ring_test = &r100_ring_test,
.ib_test = &r100_ib_test,
}
},
.irq = {
.set = &r100_irq_set,
.process = &r100_irq_process,
},
.display = {
.bandwidth_update = &r100_bandwidth_update,
.get_vblank_counter = &r100_get_vblank_counter,
.wait_for_vblank = &r100_wait_for_vblank,
},
.copy = {
.blit = &r100_copy_blit,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = NULL,
.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.copy = &r100_copy_blit,
.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
},
.surface = {
.set_reg = r100_set_surface_reg,
.clear_reg = r100_clear_surface_reg,
},
.hpd = {
.init = &r100_hpd_init,
.fini = &r100_hpd_fini,
.sense = &r100_hpd_sense,
.set_polarity = &r100_hpd_set_polarity,
},
.pm = {
.misc = &r100_pm_misc,
.prepare = &r100_pm_prepare,
.finish = &r100_pm_finish,
.init_profile = &r100_pm_init_profile,
.get_dynpm_state = &r100_pm_get_dynpm_state,
.get_engine_clock = &radeon_legacy_get_engine_clock,
.set_engine_clock = &radeon_legacy_set_engine_clock,
.get_memory_clock = &radeon_legacy_get_memory_clock,
.set_memory_clock = NULL,
.get_pcie_lanes = NULL,
.set_pcie_lanes = NULL,
.set_clock_gating = &radeon_legacy_set_clock_gating,
},
.pflip = {
.pre_page_flip = &r100_pre_page_flip,
.page_flip = &r100_page_flip,
.post_page_flip = &r100_post_page_flip,
},
};
static struct radeon_asic r200_asic = {
.init = &r100_init,
.fini = &r100_fini,
.suspend = &r100_suspend,
.resume = &r100_resume,
.vga_set_state = &r100_vga_set_state,
.gpu_is_lockup = &r100_gpu_is_lockup,
.asic_reset = &r100_asic_reset,
.ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &r100_mc_wait_for_idle,
.gart = {
.tlb_flush = &r100_pci_gart_tlb_flush,
.set_page = &r100_pci_gart_set_page,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = {
.ib_execute = &r100_ring_ib_execute,
.emit_fence = &r100_fence_ring_emit,
.emit_semaphore = &r100_semaphore_ring_emit,
.cs_parse = &r100_cs_parse,
.ring_start = &r100_ring_start,
.ring_test = &r100_ring_test,
.ib_test = &r100_ib_test,
}
},
.irq = {
.set = &r100_irq_set,
.process = &r100_irq_process,
},
.display = {
.bandwidth_update = &r100_bandwidth_update,
.get_vblank_counter = &r100_get_vblank_counter,
.wait_for_vblank = &r100_wait_for_vblank,
},
.copy = {
.blit = &r100_copy_blit,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = &r200_copy_dma,
.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.copy = &r100_copy_blit,
.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
},
.surface = {
.set_reg = r100_set_surface_reg,
.clear_reg = r100_clear_surface_reg,
},
.hpd = {
.init = &r100_hpd_init,
.fini = &r100_hpd_fini,
.sense = &r100_hpd_sense,
.set_polarity = &r100_hpd_set_polarity,
},
.pm = {
.misc = &r100_pm_misc,
.prepare = &r100_pm_prepare,
.finish = &r100_pm_finish,
.init_profile = &r100_pm_init_profile,
.get_dynpm_state = &r100_pm_get_dynpm_state,
.get_engine_clock = &radeon_legacy_get_engine_clock,
.set_engine_clock = &radeon_legacy_set_engine_clock,
.get_memory_clock = &radeon_legacy_get_memory_clock,
.set_memory_clock = NULL,
.get_pcie_lanes = NULL,
.set_pcie_lanes = NULL,
.set_clock_gating = &radeon_legacy_set_clock_gating,
},
.pflip = {
.pre_page_flip = &r100_pre_page_flip,
.page_flip = &r100_page_flip,
.post_page_flip = &r100_post_page_flip,
},
};
static struct radeon_asic r300_asic = {
.init = &r300_init,
.fini = &r300_fini,
.suspend = &r300_suspend,
.resume = &r300_resume,
.vga_set_state = &r100_vga_set_state,
.gpu_is_lockup = &r300_gpu_is_lockup,
.asic_reset = &r300_asic_reset,
.ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &r300_mc_wait_for_idle,
.gart = {
.tlb_flush = &r100_pci_gart_tlb_flush,
.set_page = &r100_pci_gart_set_page,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = {
.ib_execute = &r100_ring_ib_execute,
.emit_fence = &r300_fence_ring_emit,
.emit_semaphore = &r100_semaphore_ring_emit,
.cs_parse = &r300_cs_parse,
.ring_start = &r300_ring_start,
.ring_test = &r100_ring_test,
.ib_test = &r100_ib_test,
}
},
.irq = {
.set = &r100_irq_set,
.process = &r100_irq_process,
},
.display = {
.bandwidth_update = &r100_bandwidth_update,
.get_vblank_counter = &r100_get_vblank_counter,
.wait_for_vblank = &r100_wait_for_vblank,
},
.copy = {
.blit = &r100_copy_blit,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = &r200_copy_dma,
.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.copy = &r100_copy_blit,
.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
},
.surface = {
.set_reg = r100_set_surface_reg,
.clear_reg = r100_clear_surface_reg,
},
.hpd = {
.init = &r100_hpd_init,
.fini = &r100_hpd_fini,
.sense = &r100_hpd_sense,
.set_polarity = &r100_hpd_set_polarity,
},
.pm = {
.misc = &r100_pm_misc,
.prepare = &r100_pm_prepare,
.finish = &r100_pm_finish,
.init_profile = &r100_pm_init_profile,
.get_dynpm_state = &r100_pm_get_dynpm_state,
.get_engine_clock = &radeon_legacy_get_engine_clock,
.set_engine_clock = &radeon_legacy_set_engine_clock,
.get_memory_clock = &radeon_legacy_get_memory_clock,
.set_memory_clock = NULL,
.get_pcie_lanes = &rv370_get_pcie_lanes,
.set_pcie_lanes = &rv370_set_pcie_lanes,
.set_clock_gating = &radeon_legacy_set_clock_gating,
},
.pflip = {
.pre_page_flip = &r100_pre_page_flip,
.page_flip = &r100_page_flip,
.post_page_flip = &r100_post_page_flip,
},
};
static struct radeon_asic r300_asic_pcie = {
.init = &r300_init,
.fini = &r300_fini,
.suspend = &r300_suspend,
.resume = &r300_resume,
.vga_set_state = &r100_vga_set_state,
.gpu_is_lockup = &r300_gpu_is_lockup,
.asic_reset = &r300_asic_reset,
.ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &r300_mc_wait_for_idle,
.gart = {
.tlb_flush = &rv370_pcie_gart_tlb_flush,
.set_page = &rv370_pcie_gart_set_page,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = {
.ib_execute = &r100_ring_ib_execute,
.emit_fence = &r300_fence_ring_emit,
.emit_semaphore = &r100_semaphore_ring_emit,
.cs_parse = &r300_cs_parse,
.ring_start = &r300_ring_start,
.ring_test = &r100_ring_test,
.ib_test = &r100_ib_test,
}
},
.irq = {
.set = &r100_irq_set,
.process = &r100_irq_process,
},
.display = {
.bandwidth_update = &r100_bandwidth_update,
.get_vblank_counter = &r100_get_vblank_counter,
.wait_for_vblank = &r100_wait_for_vblank,
},
.copy = {
.blit = &r100_copy_blit,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = &r200_copy_dma,
.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.copy = &r100_copy_blit,
.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
},
.surface = {
.set_reg = r100_set_surface_reg,
.clear_reg = r100_clear_surface_reg,
},
.hpd = {
.init = &r100_hpd_init,
.fini = &r100_hpd_fini,
.sense = &r100_hpd_sense,
.set_polarity = &r100_hpd_set_polarity,
},
.pm = {
.misc = &r100_pm_misc,
.prepare = &r100_pm_prepare,
.finish = &r100_pm_finish,
.init_profile = &r100_pm_init_profile,
.get_dynpm_state = &r100_pm_get_dynpm_state,
.get_engine_clock = &radeon_legacy_get_engine_clock,
.set_engine_clock = &radeon_legacy_set_engine_clock,
.get_memory_clock = &radeon_legacy_get_memory_clock,
.set_memory_clock = NULL,
.get_pcie_lanes = &rv370_get_pcie_lanes,
.set_pcie_lanes = &rv370_set_pcie_lanes,
.set_clock_gating = &radeon_legacy_set_clock_gating,
},
.pflip = {
.pre_page_flip = &r100_pre_page_flip,
.page_flip = &r100_page_flip,
.post_page_flip = &r100_post_page_flip,
},
};
static struct radeon_asic r420_asic = {
.init = &r420_init,
.fini = &r420_fini,
.suspend = &r420_suspend,
.resume = &r420_resume,
.vga_set_state = &r100_vga_set_state,
.gpu_is_lockup = &r300_gpu_is_lockup,
.asic_reset = &r300_asic_reset,
.ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &r300_mc_wait_for_idle,
.gart = {
.tlb_flush = &rv370_pcie_gart_tlb_flush,
.set_page = &rv370_pcie_gart_set_page,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = {
.ib_execute = &r100_ring_ib_execute,
.emit_fence = &r300_fence_ring_emit,
.emit_semaphore = &r100_semaphore_ring_emit,
.cs_parse = &r300_cs_parse,
.ring_start = &r300_ring_start,
.ring_test = &r100_ring_test,
.ib_test = &r100_ib_test,
}
},
.irq = {
.set = &r100_irq_set,
.process = &r100_irq_process,
},
.display = {
.bandwidth_update = &r100_bandwidth_update,
.get_vblank_counter = &r100_get_vblank_counter,
.wait_for_vblank = &r100_wait_for_vblank,
},
.copy = {
.blit = &r100_copy_blit,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = &r200_copy_dma,
.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.copy = &r100_copy_blit,
.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
},
.surface = {
.set_reg = r100_set_surface_reg,
.clear_reg = r100_clear_surface_reg,
},
.hpd = {
.init = &r100_hpd_init,
.fini = &r100_hpd_fini,
.sense = &r100_hpd_sense,
.set_polarity = &r100_hpd_set_polarity,
},
.pm = {
.misc = &r100_pm_misc,
.prepare = &r100_pm_prepare,
.finish = &r100_pm_finish,
.init_profile = &r420_pm_init_profile,
.get_dynpm_state = &r100_pm_get_dynpm_state,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
.get_pcie_lanes = &rv370_get_pcie_lanes,
.set_pcie_lanes = &rv370_set_pcie_lanes,
.set_clock_gating = &radeon_atom_set_clock_gating,
},
.pflip = {
.pre_page_flip = &r100_pre_page_flip,
.page_flip = &r100_page_flip,
.post_page_flip = &r100_post_page_flip,
},
};
static struct radeon_asic rs400_asic = {
.init = &rs400_init,
.fini = &rs400_fini,
.suspend = &rs400_suspend,
.resume = &rs400_resume,
.vga_set_state = &r100_vga_set_state,
.gpu_is_lockup = &r300_gpu_is_lockup,
.asic_reset = &r300_asic_reset,
.ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &rs400_mc_wait_for_idle,
.gart = {
.tlb_flush = &rs400_gart_tlb_flush,
.set_page = &rs400_gart_set_page,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = {
.ib_execute = &r100_ring_ib_execute,
.emit_fence = &r300_fence_ring_emit,
.emit_semaphore = &r100_semaphore_ring_emit,
.cs_parse = &r300_cs_parse,
.ring_start = &r300_ring_start,
.ring_test = &r100_ring_test,
.ib_test = &r100_ib_test,
}
},
.irq = {
.set = &r100_irq_set,
.process = &r100_irq_process,
},
.display = {
.bandwidth_update = &r100_bandwidth_update,
.get_vblank_counter = &r100_get_vblank_counter,
.wait_for_vblank = &r100_wait_for_vblank,
},
.copy = {
.blit = &r100_copy_blit,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = &r200_copy_dma,
.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.copy = &r100_copy_blit,
.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
},
.surface = {
.set_reg = r100_set_surface_reg,
.clear_reg = r100_clear_surface_reg,
},
.hpd = {
.init = &r100_hpd_init,
.fini = &r100_hpd_fini,
.sense = &r100_hpd_sense,
.set_polarity = &r100_hpd_set_polarity,
},
.pm = {
.misc = &r100_pm_misc,
.prepare = &r100_pm_prepare,
.finish = &r100_pm_finish,
.init_profile = &r100_pm_init_profile,
.get_dynpm_state = &r100_pm_get_dynpm_state,
.get_engine_clock = &radeon_legacy_get_engine_clock,
.set_engine_clock = &radeon_legacy_set_engine_clock,
.get_memory_clock = &radeon_legacy_get_memory_clock,
.set_memory_clock = NULL,
.get_pcie_lanes = NULL,
.set_pcie_lanes = NULL,
.set_clock_gating = &radeon_legacy_set_clock_gating,
},
.pflip = {
.pre_page_flip = &r100_pre_page_flip,
.page_flip = &r100_page_flip,
.post_page_flip = &r100_post_page_flip,
},
};
static struct radeon_asic rs600_asic = {
.init = &rs600_init,
.fini = &rs600_fini,
.suspend = &rs600_suspend,
.resume = &rs600_resume,
.vga_set_state = &r100_vga_set_state,
.gpu_is_lockup = &r300_gpu_is_lockup,
.asic_reset = &rs600_asic_reset,
.ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &rs600_mc_wait_for_idle,
.gart = {
.tlb_flush = &rs600_gart_tlb_flush,
.set_page = &rs600_gart_set_page,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = {
.ib_execute = &r100_ring_ib_execute,
.emit_fence = &r300_fence_ring_emit,
.emit_semaphore = &r100_semaphore_ring_emit,
.cs_parse = &r300_cs_parse,
.ring_start = &r300_ring_start,
.ring_test = &r100_ring_test,
.ib_test = &r100_ib_test,
}
},
.irq = {
.set = &rs600_irq_set,
.process = &rs600_irq_process,
},
.display = {
.bandwidth_update = &rs600_bandwidth_update,
.get_vblank_counter = &rs600_get_vblank_counter,
.wait_for_vblank = &avivo_wait_for_vblank,
},
.copy = {
.blit = &r100_copy_blit,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = &r200_copy_dma,
.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.copy = &r100_copy_blit,
.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
},
.surface = {
.set_reg = r100_set_surface_reg,
.clear_reg = r100_clear_surface_reg,
},
.hpd = {
.init = &rs600_hpd_init,
.fini = &rs600_hpd_fini,
.sense = &rs600_hpd_sense,
.set_polarity = &rs600_hpd_set_polarity,
},
.pm = {
.misc = &rs600_pm_misc,
.prepare = &rs600_pm_prepare,
.finish = &rs600_pm_finish,
.init_profile = &r420_pm_init_profile,
.get_dynpm_state = &r100_pm_get_dynpm_state,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
.get_pcie_lanes = NULL,
.set_pcie_lanes = NULL,
.set_clock_gating = &radeon_atom_set_clock_gating,
},
.pflip = {
.pre_page_flip = &rs600_pre_page_flip,
.page_flip = &rs600_page_flip,
.post_page_flip = &rs600_post_page_flip,
},
};
static struct radeon_asic rs690_asic = {
.init = &rs690_init,
.fini = &rs690_fini,
.suspend = &rs690_suspend,
.resume = &rs690_resume,
.vga_set_state = &r100_vga_set_state,
.gpu_is_lockup = &r300_gpu_is_lockup,
.asic_reset = &rs600_asic_reset,
.ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &rs690_mc_wait_for_idle,
.gart = {
.tlb_flush = &rs400_gart_tlb_flush,
.set_page = &rs400_gart_set_page,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = {
.ib_execute = &r100_ring_ib_execute,
.emit_fence = &r300_fence_ring_emit,
.emit_semaphore = &r100_semaphore_ring_emit,
.cs_parse = &r300_cs_parse,
.ring_start = &r300_ring_start,
.ring_test = &r100_ring_test,
.ib_test = &r100_ib_test,
}
},
.irq = {
.set = &rs600_irq_set,
.process = &rs600_irq_process,
},
.display = {
.get_vblank_counter = &rs600_get_vblank_counter,
.bandwidth_update = &rs690_bandwidth_update,
.wait_for_vblank = &avivo_wait_for_vblank,
},
.copy = {
.blit = &r100_copy_blit,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = &r200_copy_dma,
.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.copy = &r200_copy_dma,
.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
},
.surface = {
.set_reg = r100_set_surface_reg,
.clear_reg = r100_clear_surface_reg,
},
.hpd = {
.init = &rs600_hpd_init,
.fini = &rs600_hpd_fini,
.sense = &rs600_hpd_sense,
.set_polarity = &rs600_hpd_set_polarity,
},
.pm = {
.misc = &rs600_pm_misc,
.prepare = &rs600_pm_prepare,
.finish = &rs600_pm_finish,
.init_profile = &r420_pm_init_profile,
.get_dynpm_state = &r100_pm_get_dynpm_state,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
.get_pcie_lanes = NULL,
.set_pcie_lanes = NULL,
.set_clock_gating = &radeon_atom_set_clock_gating,
},
.pflip = {
.pre_page_flip = &rs600_pre_page_flip,
.page_flip = &rs600_page_flip,
.post_page_flip = &rs600_post_page_flip,
},
};
static struct radeon_asic rv515_asic = {
.init = &rv515_init,
.fini = &rv515_fini,
.suspend = &rv515_suspend,
.resume = &rv515_resume,
.vga_set_state = &r100_vga_set_state,
.gpu_is_lockup = &r300_gpu_is_lockup,
.asic_reset = &rs600_asic_reset,
.ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &rv515_mc_wait_for_idle,
.gart = {
.tlb_flush = &rv370_pcie_gart_tlb_flush,
.set_page = &rv370_pcie_gart_set_page,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = {
.ib_execute = &r100_ring_ib_execute,
.emit_fence = &r300_fence_ring_emit,
.emit_semaphore = &r100_semaphore_ring_emit,
.cs_parse = &r300_cs_parse,
.ring_start = &rv515_ring_start,
.ring_test = &r100_ring_test,
.ib_test = &r100_ib_test,
}
},
.irq = {
.set = &rs600_irq_set,
.process = &rs600_irq_process,
},
.display = {
.get_vblank_counter = &rs600_get_vblank_counter,
.bandwidth_update = &rv515_bandwidth_update,
.wait_for_vblank = &avivo_wait_for_vblank,
},
.copy = {
.blit = &r100_copy_blit,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = &r200_copy_dma,
.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.copy = &r100_copy_blit,
.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
},
.surface = {
.set_reg = r100_set_surface_reg,
.clear_reg = r100_clear_surface_reg,
},
.hpd = {
.init = &rs600_hpd_init,
.fini = &rs600_hpd_fini,
.sense = &rs600_hpd_sense,
.set_polarity = &rs600_hpd_set_polarity,
},
.pm = {
.misc = &rs600_pm_misc,
.prepare = &rs600_pm_prepare,
.finish = &rs600_pm_finish,
.init_profile = &r420_pm_init_profile,
.get_dynpm_state = &r100_pm_get_dynpm_state,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
.get_pcie_lanes = &rv370_get_pcie_lanes,
.set_pcie_lanes = &rv370_set_pcie_lanes,
.set_clock_gating = &radeon_atom_set_clock_gating,
},
.pflip = {
.pre_page_flip = &rs600_pre_page_flip,
.page_flip = &rs600_page_flip,
.post_page_flip = &rs600_post_page_flip,
},
};
static struct radeon_asic r520_asic = {
.init = &r520_init,
.fini = &rv515_fini,
.suspend = &rv515_suspend,
.resume = &r520_resume,
.vga_set_state = &r100_vga_set_state,
.gpu_is_lockup = &r300_gpu_is_lockup,
.asic_reset = &rs600_asic_reset,
.ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &r520_mc_wait_for_idle,
.gart = {
.tlb_flush = &rv370_pcie_gart_tlb_flush,
.set_page = &rv370_pcie_gart_set_page,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = {
.ib_execute = &r100_ring_ib_execute,
.emit_fence = &r300_fence_ring_emit,
.emit_semaphore = &r100_semaphore_ring_emit,
.cs_parse = &r300_cs_parse,
.ring_start = &rv515_ring_start,
.ring_test = &r100_ring_test,
.ib_test = &r100_ib_test,
}
},
.irq = {
.set = &rs600_irq_set,
.process = &rs600_irq_process,
},
.display = {
.bandwidth_update = &rv515_bandwidth_update,
.get_vblank_counter = &rs600_get_vblank_counter,
.wait_for_vblank = &avivo_wait_for_vblank,
},
.copy = {
.blit = &r100_copy_blit,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = &r200_copy_dma,
.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.copy = &r100_copy_blit,
.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
},
.surface = {
.set_reg = r100_set_surface_reg,
.clear_reg = r100_clear_surface_reg,
},
.hpd = {
.init = &rs600_hpd_init,
.fini = &rs600_hpd_fini,
.sense = &rs600_hpd_sense,
.set_polarity = &rs600_hpd_set_polarity,
},
.pm = {
.misc = &rs600_pm_misc,
.prepare = &rs600_pm_prepare,
.finish = &rs600_pm_finish,
.init_profile = &r420_pm_init_profile,
.get_dynpm_state = &r100_pm_get_dynpm_state,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
.get_pcie_lanes = &rv370_get_pcie_lanes,
.set_pcie_lanes = &rv370_set_pcie_lanes,
.set_clock_gating = &radeon_atom_set_clock_gating,
},
.pflip = {
.pre_page_flip = &rs600_pre_page_flip,
.page_flip = &rs600_page_flip,
.post_page_flip = &rs600_post_page_flip,
},
};
static struct radeon_asic r600_asic = {
.init = &r600_init,
.fini = &r600_fini,
.suspend = &r600_suspend,
.resume = &r600_resume,
.vga_set_state = &r600_vga_set_state,
.gpu_is_lockup = &r600_gpu_is_lockup,
.asic_reset = &r600_asic_reset,
.ioctl_wait_idle = r600_ioctl_wait_idle,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &r600_mc_wait_for_idle,
.gart = {
.tlb_flush = &r600_pcie_gart_tlb_flush,
.set_page = &rs600_gart_set_page,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = {
.ib_execute = &r600_ring_ib_execute,
.emit_fence = &r600_fence_ring_emit,
.emit_semaphore = &r600_semaphore_ring_emit,
.cs_parse = &r600_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
}
},
.irq = {
.set = &r600_irq_set,
.process = &r600_irq_process,
},
.display = {
.bandwidth_update = &rv515_bandwidth_update,
.get_vblank_counter = &rs600_get_vblank_counter,
.wait_for_vblank = &avivo_wait_for_vblank,
},
.copy = {
.blit = &r600_copy_blit,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = NULL,
.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.copy = &r600_copy_blit,
.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
},
.surface = {
.set_reg = r600_set_surface_reg,
.clear_reg = r600_clear_surface_reg,
},
.hpd = {
.init = &r600_hpd_init,
.fini = &r600_hpd_fini,
.sense = &r600_hpd_sense,
.set_polarity = &r600_hpd_set_polarity,
},
.pm = {
.misc = &r600_pm_misc,
.prepare = &rs600_pm_prepare,
.finish = &rs600_pm_finish,
.init_profile = &r600_pm_init_profile,
.get_dynpm_state = &r600_pm_get_dynpm_state,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
.get_pcie_lanes = &r600_get_pcie_lanes,
.set_pcie_lanes = &r600_set_pcie_lanes,
.set_clock_gating = NULL,
},
.pflip = {
.pre_page_flip = &rs600_pre_page_flip,
.page_flip = &rs600_page_flip,
.post_page_flip = &rs600_post_page_flip,
},
};
static struct radeon_asic rs780_asic = {
.init = &r600_init,
.fini = &r600_fini,
.suspend = &r600_suspend,
.resume = &r600_resume,
.gpu_is_lockup = &r600_gpu_is_lockup,
.vga_set_state = &r600_vga_set_state,
.asic_reset = &r600_asic_reset,
.ioctl_wait_idle = r600_ioctl_wait_idle,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &r600_mc_wait_for_idle,
.gart = {
.tlb_flush = &r600_pcie_gart_tlb_flush,
.set_page = &rs600_gart_set_page,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = {
.ib_execute = &r600_ring_ib_execute,
.emit_fence = &r600_fence_ring_emit,
.emit_semaphore = &r600_semaphore_ring_emit,
.cs_parse = &r600_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
}
},
.irq = {
.set = &r600_irq_set,
.process = &r600_irq_process,
},
.display = {
.bandwidth_update = &rs690_bandwidth_update,
.get_vblank_counter = &rs600_get_vblank_counter,
.wait_for_vblank = &avivo_wait_for_vblank,
},
.copy = {
.blit = &r600_copy_blit,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = NULL,
.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.copy = &r600_copy_blit,
.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
},
.surface = {
.set_reg = r600_set_surface_reg,
.clear_reg = r600_clear_surface_reg,
},
.hpd = {
.init = &r600_hpd_init,
.fini = &r600_hpd_fini,
.sense = &r600_hpd_sense,
.set_polarity = &r600_hpd_set_polarity,
},
.pm = {
.misc = &r600_pm_misc,
.prepare = &rs600_pm_prepare,
.finish = &rs600_pm_finish,
.init_profile = &rs780_pm_init_profile,
.get_dynpm_state = &r600_pm_get_dynpm_state,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = NULL,
.set_memory_clock = NULL,
.get_pcie_lanes = NULL,
.set_pcie_lanes = NULL,
.set_clock_gating = NULL,
},
.pflip = {
.pre_page_flip = &rs600_pre_page_flip,
.page_flip = &rs600_page_flip,
.post_page_flip = &rs600_post_page_flip,
},
};
static struct radeon_asic rv770_asic = {
.init = &rv770_init,
.fini = &rv770_fini,
.suspend = &rv770_suspend,
.resume = &rv770_resume,
.asic_reset = &r600_asic_reset,
.gpu_is_lockup = &r600_gpu_is_lockup,
.vga_set_state = &r600_vga_set_state,
.ioctl_wait_idle = r600_ioctl_wait_idle,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &r600_mc_wait_for_idle,
.gart = {
.tlb_flush = &r600_pcie_gart_tlb_flush,
.set_page = &rs600_gart_set_page,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = {
.ib_execute = &r600_ring_ib_execute,
.emit_fence = &r600_fence_ring_emit,
.emit_semaphore = &r600_semaphore_ring_emit,
.cs_parse = &r600_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
}
},
.irq = {
.set = &r600_irq_set,
.process = &r600_irq_process,
},
.display = {
.bandwidth_update = &rv515_bandwidth_update,
.get_vblank_counter = &rs600_get_vblank_counter,
.wait_for_vblank = &avivo_wait_for_vblank,
},
.copy = {
.blit = &r600_copy_blit,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = NULL,
.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.copy = &r600_copy_blit,
.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
},
.surface = {
.set_reg = r600_set_surface_reg,
.clear_reg = r600_clear_surface_reg,
},
.hpd = {
.init = &r600_hpd_init,
.fini = &r600_hpd_fini,
.sense = &r600_hpd_sense,
.set_polarity = &r600_hpd_set_polarity,
},
.pm = {
.misc = &rv770_pm_misc,
.prepare = &rs600_pm_prepare,
.finish = &rs600_pm_finish,
.init_profile = &r600_pm_init_profile,
.get_dynpm_state = &r600_pm_get_dynpm_state,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
.get_pcie_lanes = &r600_get_pcie_lanes,
.set_pcie_lanes = &r600_set_pcie_lanes,
.set_clock_gating = &radeon_atom_set_clock_gating,
},
.pflip = {
.pre_page_flip = &rs600_pre_page_flip,
.page_flip = &rv770_page_flip,
.post_page_flip = &rs600_post_page_flip,
},
};
static struct radeon_asic evergreen_asic = {
.init = &evergreen_init,
.fini = &evergreen_fini,
.suspend = &evergreen_suspend,
.resume = &evergreen_resume,
.gpu_is_lockup = &evergreen_gpu_is_lockup,
.asic_reset = &evergreen_asic_reset,
.vga_set_state = &r600_vga_set_state,
.ioctl_wait_idle = r600_ioctl_wait_idle,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
.gart = {
.tlb_flush = &evergreen_pcie_gart_tlb_flush,
.set_page = &rs600_gart_set_page,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = {
.ib_execute = &evergreen_ring_ib_execute,
.emit_fence = &r600_fence_ring_emit,
.emit_semaphore = &r600_semaphore_ring_emit,
.cs_parse = &evergreen_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
}
},
.irq = {
.set = &evergreen_irq_set,
.process = &evergreen_irq_process,
},
.display = {
.bandwidth_update = &evergreen_bandwidth_update,
.get_vblank_counter = &evergreen_get_vblank_counter,
.wait_for_vblank = &dce4_wait_for_vblank,
},
.copy = {
.blit = &r600_copy_blit,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = NULL,
.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.copy = &r600_copy_blit,
.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
},
.surface = {
.set_reg = r600_set_surface_reg,
.clear_reg = r600_clear_surface_reg,
},
.hpd = {
.init = &evergreen_hpd_init,
.fini = &evergreen_hpd_fini,
.sense = &evergreen_hpd_sense,
.set_polarity = &evergreen_hpd_set_polarity,
},
.pm = {
.misc = &evergreen_pm_misc,
.prepare = &evergreen_pm_prepare,
.finish = &evergreen_pm_finish,
.init_profile = &r600_pm_init_profile,
.get_dynpm_state = &r600_pm_get_dynpm_state,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
.get_pcie_lanes = &r600_get_pcie_lanes,
.set_pcie_lanes = &r600_set_pcie_lanes,
.set_clock_gating = NULL,
},
.pflip = {
.pre_page_flip = &evergreen_pre_page_flip,
.page_flip = &evergreen_page_flip,
.post_page_flip = &evergreen_post_page_flip,
},
};
static struct radeon_asic sumo_asic = {
.init = &evergreen_init,
.fini = &evergreen_fini,
.suspend = &evergreen_suspend,
.resume = &evergreen_resume,
.gpu_is_lockup = &evergreen_gpu_is_lockup,
.asic_reset = &evergreen_asic_reset,
.vga_set_state = &r600_vga_set_state,
.ioctl_wait_idle = r600_ioctl_wait_idle,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
.gart = {
.tlb_flush = &evergreen_pcie_gart_tlb_flush,
.set_page = &rs600_gart_set_page,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = {
.ib_execute = &evergreen_ring_ib_execute,
.emit_fence = &r600_fence_ring_emit,
.emit_semaphore = &r600_semaphore_ring_emit,
.cs_parse = &evergreen_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
},
},
.irq = {
.set = &evergreen_irq_set,
.process = &evergreen_irq_process,
},
.display = {
.bandwidth_update = &evergreen_bandwidth_update,
.get_vblank_counter = &evergreen_get_vblank_counter,
.wait_for_vblank = &dce4_wait_for_vblank,
},
.copy = {
.blit = &r600_copy_blit,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = NULL,
.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.copy = &r600_copy_blit,
.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
},
.surface = {
.set_reg = r600_set_surface_reg,
.clear_reg = r600_clear_surface_reg,
},
.hpd = {
.init = &evergreen_hpd_init,
.fini = &evergreen_hpd_fini,
.sense = &evergreen_hpd_sense,
.set_polarity = &evergreen_hpd_set_polarity,
},
.pm = {
.misc = &evergreen_pm_misc,
.prepare = &evergreen_pm_prepare,
.finish = &evergreen_pm_finish,
.init_profile = &sumo_pm_init_profile,
.get_dynpm_state = &r600_pm_get_dynpm_state,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = NULL,
.set_memory_clock = NULL,
.get_pcie_lanes = NULL,
.set_pcie_lanes = NULL,
.set_clock_gating = NULL,
},
.pflip = {
.pre_page_flip = &evergreen_pre_page_flip,
.page_flip = &evergreen_page_flip,
.post_page_flip = &evergreen_post_page_flip,
},
};
static struct radeon_asic btc_asic = {
.init = &evergreen_init,
.fini = &evergreen_fini,
.suspend = &evergreen_suspend,
.resume = &evergreen_resume,
.gpu_is_lockup = &evergreen_gpu_is_lockup,
.asic_reset = &evergreen_asic_reset,
.vga_set_state = &r600_vga_set_state,
.ioctl_wait_idle = r600_ioctl_wait_idle,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
.gart = {
.tlb_flush = &evergreen_pcie_gart_tlb_flush,
.set_page = &rs600_gart_set_page,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = {
.ib_execute = &evergreen_ring_ib_execute,
.emit_fence = &r600_fence_ring_emit,
.emit_semaphore = &r600_semaphore_ring_emit,
.cs_parse = &evergreen_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
}
},
.irq = {
.set = &evergreen_irq_set,
.process = &evergreen_irq_process,
},
.display = {
.bandwidth_update = &evergreen_bandwidth_update,
.get_vblank_counter = &evergreen_get_vblank_counter,
.wait_for_vblank = &dce4_wait_for_vblank,
},
.copy = {
.blit = &r600_copy_blit,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = NULL,
.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.copy = &r600_copy_blit,
.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
},
.surface = {
.set_reg = r600_set_surface_reg,
.clear_reg = r600_clear_surface_reg,
},
.hpd = {
.init = &evergreen_hpd_init,
.fini = &evergreen_hpd_fini,
.sense = &evergreen_hpd_sense,
.set_polarity = &evergreen_hpd_set_polarity,
},
.pm = {
.misc = &evergreen_pm_misc,
.prepare = &evergreen_pm_prepare,
.finish = &evergreen_pm_finish,
.init_profile = &r600_pm_init_profile,
.get_dynpm_state = &r600_pm_get_dynpm_state,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
.get_pcie_lanes = NULL,
.set_pcie_lanes = NULL,
.set_clock_gating = NULL,
},
.pflip = {
.pre_page_flip = &evergreen_pre_page_flip,
.page_flip = &evergreen_page_flip,
.post_page_flip = &evergreen_post_page_flip,
},
};
static const struct radeon_vm_funcs cayman_vm_funcs = {
.init = &cayman_vm_init,
.fini = &cayman_vm_fini,
.bind = &cayman_vm_bind,
.unbind = &cayman_vm_unbind,
.tlb_flush = &cayman_vm_tlb_flush,
.page_flags = &cayman_vm_page_flags,
.set_page = &cayman_vm_set_page,
};
static struct radeon_asic cayman_asic = {
.init = &cayman_init,
.fini = &cayman_fini,
.suspend = &cayman_suspend,
.resume = &cayman_resume,
.gpu_is_lockup = &cayman_gpu_is_lockup,
.asic_reset = &cayman_asic_reset,
.vga_set_state = &r600_vga_set_state,
.ioctl_wait_idle = r600_ioctl_wait_idle,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
.gart = {
.tlb_flush = &cayman_pcie_gart_tlb_flush,
.set_page = &rs600_gart_set_page,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = {
.ib_execute = &cayman_ring_ib_execute,
.ib_parse = &evergreen_ib_parse,
.emit_fence = &cayman_fence_ring_emit,
.emit_semaphore = &r600_semaphore_ring_emit,
.cs_parse = &evergreen_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
},
[CAYMAN_RING_TYPE_CP1_INDEX] = {
.ib_execute = &cayman_ring_ib_execute,
.ib_parse = &evergreen_ib_parse,
.emit_fence = &cayman_fence_ring_emit,
.emit_semaphore = &r600_semaphore_ring_emit,
.cs_parse = &evergreen_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
},
[CAYMAN_RING_TYPE_CP2_INDEX] = {
.ib_execute = &cayman_ring_ib_execute,
.ib_parse = &evergreen_ib_parse,
.emit_fence = &cayman_fence_ring_emit,
.emit_semaphore = &r600_semaphore_ring_emit,
.cs_parse = &evergreen_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
}
},
.irq = {
.set = &evergreen_irq_set,
.process = &evergreen_irq_process,
},
.display = {
.bandwidth_update = &evergreen_bandwidth_update,
.get_vblank_counter = &evergreen_get_vblank_counter,
.wait_for_vblank = &dce4_wait_for_vblank,
},
.copy = {
.blit = &r600_copy_blit,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = NULL,
.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.copy = &r600_copy_blit,
.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
},
.surface = {
.set_reg = r600_set_surface_reg,
.clear_reg = r600_clear_surface_reg,
},
.hpd = {
.init = &evergreen_hpd_init,
.fini = &evergreen_hpd_fini,
.sense = &evergreen_hpd_sense,
.set_polarity = &evergreen_hpd_set_polarity,
},
.pm = {
.misc = &evergreen_pm_misc,
.prepare = &evergreen_pm_prepare,
.finish = &evergreen_pm_finish,
.init_profile = &r600_pm_init_profile,
.get_dynpm_state = &r600_pm_get_dynpm_state,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
.get_pcie_lanes = NULL,
.set_pcie_lanes = NULL,
.set_clock_gating = NULL,
},
.pflip = {
.pre_page_flip = &evergreen_pre_page_flip,
.page_flip = &evergreen_page_flip,
.post_page_flip = &evergreen_post_page_flip,
},
};
static struct radeon_asic trinity_asic = {
.init = &cayman_init,
.fini = &cayman_fini,
.suspend = &cayman_suspend,
.resume = &cayman_resume,
.gpu_is_lockup = &cayman_gpu_is_lockup,
.asic_reset = &cayman_asic_reset,
.vga_set_state = &r600_vga_set_state,
.ioctl_wait_idle = r600_ioctl_wait_idle,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
.gart = {
.tlb_flush = &cayman_pcie_gart_tlb_flush,
.set_page = &rs600_gart_set_page,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = {
.ib_execute = &cayman_ring_ib_execute,
.ib_parse = &evergreen_ib_parse,
.emit_fence = &cayman_fence_ring_emit,
.emit_semaphore = &r600_semaphore_ring_emit,
.cs_parse = &evergreen_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
},
[CAYMAN_RING_TYPE_CP1_INDEX] = {
.ib_execute = &cayman_ring_ib_execute,
.ib_parse = &evergreen_ib_parse,
.emit_fence = &cayman_fence_ring_emit,
.emit_semaphore = &r600_semaphore_ring_emit,
.cs_parse = &evergreen_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
},
[CAYMAN_RING_TYPE_CP2_INDEX] = {
.ib_execute = &cayman_ring_ib_execute,
.ib_parse = &evergreen_ib_parse,
.emit_fence = &cayman_fence_ring_emit,
.emit_semaphore = &r600_semaphore_ring_emit,
.cs_parse = &evergreen_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
}
},
.irq = {
.set = &evergreen_irq_set,
.process = &evergreen_irq_process,
},
.display = {
.bandwidth_update = &dce6_bandwidth_update,
.get_vblank_counter = &evergreen_get_vblank_counter,
.wait_for_vblank = &dce4_wait_for_vblank,
},
.copy = {
.blit = &r600_copy_blit,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = NULL,
.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.copy = &r600_copy_blit,
.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
},
.surface = {
.set_reg = r600_set_surface_reg,
.clear_reg = r600_clear_surface_reg,
},
.hpd = {
.init = &evergreen_hpd_init,
.fini = &evergreen_hpd_fini,
.sense = &evergreen_hpd_sense,
.set_polarity = &evergreen_hpd_set_polarity,
},
.pm = {
.misc = &evergreen_pm_misc,
.prepare = &evergreen_pm_prepare,
.finish = &evergreen_pm_finish,
.init_profile = &sumo_pm_init_profile,
.get_dynpm_state = &r600_pm_get_dynpm_state,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = NULL,
.set_memory_clock = NULL,
.get_pcie_lanes = NULL,
.set_pcie_lanes = NULL,
.set_clock_gating = NULL,
},
.pflip = {
.pre_page_flip = &evergreen_pre_page_flip,
.page_flip = &evergreen_page_flip,
.post_page_flip = &evergreen_post_page_flip,
},
};
static const struct radeon_vm_funcs si_vm_funcs = {
.init = &si_vm_init,
.fini = &si_vm_fini,
.bind = &si_vm_bind,
.unbind = &si_vm_unbind,
.tlb_flush = &si_vm_tlb_flush,
.page_flags = &cayman_vm_page_flags,
.set_page = &cayman_vm_set_page,
};
static struct radeon_asic si_asic = {
.init = &si_init,
.fini = &si_fini,
.suspend = &si_suspend,
.resume = &si_resume,
.gpu_is_lockup = &si_gpu_is_lockup,
.asic_reset = &si_asic_reset,
.vga_set_state = &r600_vga_set_state,
.ioctl_wait_idle = r600_ioctl_wait_idle,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
.gart = {
.tlb_flush = &si_pcie_gart_tlb_flush,
.set_page = &rs600_gart_set_page,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = {
.ib_execute = &si_ring_ib_execute,
.ib_parse = &si_ib_parse,
.emit_fence = &si_fence_ring_emit,
.emit_semaphore = &r600_semaphore_ring_emit,
.cs_parse = NULL,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
},
[CAYMAN_RING_TYPE_CP1_INDEX] = {
.ib_execute = &si_ring_ib_execute,
.ib_parse = &si_ib_parse,
.emit_fence = &si_fence_ring_emit,
.emit_semaphore = &r600_semaphore_ring_emit,
.cs_parse = NULL,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
},
[CAYMAN_RING_TYPE_CP2_INDEX] = {
.ib_execute = &si_ring_ib_execute,
.ib_parse = &si_ib_parse,
.emit_fence = &si_fence_ring_emit,
.emit_semaphore = &r600_semaphore_ring_emit,
.cs_parse = NULL,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
}
},
.irq = {
.set = &si_irq_set,
.process = &si_irq_process,
},
.display = {
.bandwidth_update = &dce6_bandwidth_update,
.get_vblank_counter = &evergreen_get_vblank_counter,
.wait_for_vblank = &dce4_wait_for_vblank,
},
.copy = {
.blit = NULL,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = NULL,
.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.copy = NULL,
.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
},
.surface = {
.set_reg = r600_set_surface_reg,
.clear_reg = r600_clear_surface_reg,
},
.hpd = {
.init = &evergreen_hpd_init,
.fini = &evergreen_hpd_fini,
.sense = &evergreen_hpd_sense,
.set_polarity = &evergreen_hpd_set_polarity,
},
.pm = {
.misc = &evergreen_pm_misc,
.prepare = &evergreen_pm_prepare,
.finish = &evergreen_pm_finish,
.init_profile = &sumo_pm_init_profile,
.get_dynpm_state = &r600_pm_get_dynpm_state,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
.get_pcie_lanes = NULL,
.set_pcie_lanes = NULL,
.set_clock_gating = NULL,
},
.pflip = {
.pre_page_flip = &evergreen_pre_page_flip,
.page_flip = &evergreen_page_flip,
.post_page_flip = &evergreen_post_page_flip,
},
};
int radeon_asic_init(struct radeon_device *rdev)
{
radeon_register_accessor_init(rdev);
/* set the number of crtcs */
if (rdev->flags & RADEON_SINGLE_CRTC)
rdev->num_crtc = 1;
else
rdev->num_crtc = 2;
switch (rdev->family) {
case CHIP_R100:
case CHIP_RV100:
case CHIP_RS100:
case CHIP_RV200:
case CHIP_RS200:
rdev->asic = &r100_asic;
break;
case CHIP_R200:
case CHIP_RV250:
case CHIP_RS300:
case CHIP_RV280:
rdev->asic = &r200_asic;
break;
case CHIP_R300:
case CHIP_R350:
case CHIP_RV350:
case CHIP_RV380:
if (rdev->flags & RADEON_IS_PCIE)
rdev->asic = &r300_asic_pcie;
else
rdev->asic = &r300_asic;
break;
case CHIP_R420:
case CHIP_R423:
case CHIP_RV410:
rdev->asic = &r420_asic;
/* handle macs */
if (rdev->bios == NULL) {
rdev->asic->pm.get_engine_clock = &radeon_legacy_get_engine_clock;
rdev->asic->pm.set_engine_clock = &radeon_legacy_set_engine_clock;
rdev->asic->pm.get_memory_clock = &radeon_legacy_get_memory_clock;
rdev->asic->pm.set_memory_clock = NULL;
}
break;
case CHIP_RS400:
case CHIP_RS480:
rdev->asic = &rs400_asic;
break;
case CHIP_RS600:
rdev->asic = &rs600_asic;
break;
case CHIP_RS690:
case CHIP_RS740:
rdev->asic = &rs690_asic;
break;
case CHIP_RV515:
rdev->asic = &rv515_asic;
break;
case CHIP_R520:
case CHIP_RV530:
case CHIP_RV560:
case CHIP_RV570:
case CHIP_R580:
rdev->asic = &r520_asic;
break;
case CHIP_R600:
case CHIP_RV610:
case CHIP_RV630:
case CHIP_RV620:
case CHIP_RV635:
case CHIP_RV670:
rdev->asic = &r600_asic;
break;
case CHIP_RS780:
case CHIP_RS880:
rdev->asic = &rs780_asic;
break;
case CHIP_RV770:
case CHIP_RV730:
case CHIP_RV710:
case CHIP_RV740:
rdev->asic = &rv770_asic;
break;
case CHIP_CEDAR:
case CHIP_REDWOOD:
case CHIP_JUNIPER:
case CHIP_CYPRESS:
case CHIP_HEMLOCK:
/* set num crtcs */
if (rdev->family == CHIP_CEDAR)
rdev->num_crtc = 4;
else
rdev->num_crtc = 6;
rdev->asic = &evergreen_asic;
break;
case CHIP_PALM:
case CHIP_SUMO:
case CHIP_SUMO2:
rdev->asic = &sumo_asic;
break;
case CHIP_BARTS:
case CHIP_TURKS:
case CHIP_CAICOS:
/* set num crtcs */
if (rdev->family == CHIP_CAICOS)
rdev->num_crtc = 4;
else
rdev->num_crtc = 6;
rdev->asic = &btc_asic;
break;
case CHIP_CAYMAN:
rdev->asic = &cayman_asic;
/* set num crtcs */
rdev->num_crtc = 6;
rdev->vm_manager.funcs = &cayman_vm_funcs;
break;
case CHIP_ARUBA:
rdev->asic = &trinity_asic;
/* set num crtcs */
rdev->num_crtc = 4;
rdev->vm_manager.funcs = &cayman_vm_funcs;
break;
case CHIP_TAHITI:
case CHIP_PITCAIRN:
case CHIP_VERDE:
rdev->asic = &si_asic;
/* set num crtcs */
rdev->num_crtc = 6;
rdev->vm_manager.funcs = &si_vm_funcs;
break;
default:
/* FIXME: not supported yet */
return -EINVAL;
}
if (rdev->flags & RADEON_IS_IGP) {
rdev->asic->pm.get_memory_clock = NULL;
rdev->asic->pm.set_memory_clock = NULL;
}
return 0;
}
| gpl-2.0 |
hroark13/android_kernel_zte_draconis | drivers/s390/char/tty3270.c | 4801 | 42856 | /*
* drivers/s390/char/tty3270.c
* IBM/3270 Driver - tty functions.
*
* Author(s):
* Original 3270 Code for 2.4 written by Richard Hitt (UTS Global)
* Rewritten for 2.5 by Martin Schwidefsky <schwidefsky@de.ibm.com>
* -- Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kdev_t.h>
#include <linux/tty.h>
#include <linux/vt_kern.h>
#include <linux/init.h>
#include <linux/console.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/bootmem.h>
#include <linux/compat.h>
#include <asm/ccwdev.h>
#include <asm/cio.h>
#include <asm/ebcdic.h>
#include <asm/uaccess.h>
#include "raw3270.h"
#include "tty3270.h"
#include "keyboard.h"
#define TTY3270_CHAR_BUF_SIZE 256
#define TTY3270_OUTPUT_BUFFER_SIZE 1024
#define TTY3270_STRING_PAGES 5
struct tty_driver *tty3270_driver;
static int tty3270_max_index;
static struct raw3270_fn tty3270_fn;
struct tty3270_cell {
unsigned char character;
unsigned char highlight;
unsigned char f_color;
};
struct tty3270_line {
struct tty3270_cell *cells;
int len;
};
#define ESCAPE_NPAR 8
/*
* The main tty view data structure.
* FIXME:
* 1) describe line orientation & lines list concept against screen
* 2) describe conversion of screen to lines
* 3) describe line format.
*/
struct tty3270 {
struct raw3270_view view;
struct tty_struct *tty; /* Pointer to tty structure */
void **freemem_pages; /* Array of pages used for freemem. */
struct list_head freemem; /* List of free memory for strings. */
/* Output stuff. */
struct list_head lines; /* List of lines. */
struct list_head update; /* List of lines to update. */
unsigned char wcc; /* Write control character. */
int nr_lines; /* # lines in list. */
int nr_up; /* # lines up in history. */
unsigned long update_flags; /* Update indication bits. */
struct string *status; /* Lower right of display. */
struct raw3270_request *write; /* Single write request. */
struct timer_list timer; /* Output delay timer. */
/* Current tty screen. */
unsigned int cx, cy; /* Current output position. */
unsigned int highlight; /* Blink/reverse/underscore */
unsigned int f_color; /* Foreground color */
struct tty3270_line *screen;
/* Input stuff. */
struct string *prompt; /* Output string for input area. */
struct string *input; /* Input string for read request. */
struct raw3270_request *read; /* Single read request. */
struct raw3270_request *kreset; /* Single keyboard reset request. */
unsigned char inattr; /* Visible/invisible input. */
int throttle, attn; /* tty throttle/unthrottle. */
struct tasklet_struct readlet; /* Tasklet to issue read request. */
struct kbd_data *kbd; /* key_maps stuff. */
/* Escape sequence parsing. */
int esc_state, esc_ques, esc_npar;
int esc_par[ESCAPE_NPAR];
unsigned int saved_cx, saved_cy;
unsigned int saved_highlight, saved_f_color;
/* Command recalling. */
struct list_head rcl_lines; /* List of recallable lines. */
struct list_head *rcl_walk; /* Point in rcl_lines list. */
int rcl_nr, rcl_max; /* Number/max number of rcl_lines. */
/* Character array for put_char/flush_chars. */
unsigned int char_count;
char char_buf[TTY3270_CHAR_BUF_SIZE];
};
/* tty3270->update_flags. See tty3270_update for details. */
#define TTY_UPDATE_ERASE 1 /* Use EWRITEA instead of WRITE. */
#define TTY_UPDATE_LIST 2 /* Update lines in tty3270->update. */
#define TTY_UPDATE_INPUT 4 /* Update input line. */
#define TTY_UPDATE_STATUS 8 /* Update status line. */
#define TTY_UPDATE_ALL 16 /* Recreate screen. */
static void tty3270_update(struct tty3270 *);
/*
* Setup timeout for a device. On timeout trigger an update.
*/
static void tty3270_set_timer(struct tty3270 *tp, int expires)
{
if (expires == 0)
del_timer(&tp->timer);
else
mod_timer(&tp->timer, jiffies + expires);
}
/*
* The input line are the two last lines of the screen.
*/
static void
tty3270_update_prompt(struct tty3270 *tp, char *input, int count)
{
struct string *line;
unsigned int off;
line = tp->prompt;
if (count != 0)
line->string[5] = TF_INMDT;
else
line->string[5] = tp->inattr;
if (count > tp->view.cols * 2 - 11)
count = tp->view.cols * 2 - 11;
memcpy(line->string + 6, input, count);
line->string[6 + count] = TO_IC;
/* Clear to end of input line. */
if (count < tp->view.cols * 2 - 11) {
line->string[7 + count] = TO_RA;
line->string[10 + count] = 0;
off = tp->view.cols * tp->view.rows - 9;
raw3270_buffer_address(tp->view.dev, line->string+count+8, off);
line->len = 11 + count;
} else
line->len = 7 + count;
tp->update_flags |= TTY_UPDATE_INPUT;
}
static void
tty3270_create_prompt(struct tty3270 *tp)
{
static const unsigned char blueprint[] =
{ TO_SBA, 0, 0, 0x6e, TO_SF, TF_INPUT,
/* empty input string */
TO_IC, TO_RA, 0, 0, 0 };
struct string *line;
unsigned int offset;
line = alloc_string(&tp->freemem,
sizeof(blueprint) + tp->view.cols * 2 - 9);
tp->prompt = line;
tp->inattr = TF_INPUT;
/* Copy blueprint to status line */
memcpy(line->string, blueprint, sizeof(blueprint));
line->len = sizeof(blueprint);
/* Set output offsets. */
offset = tp->view.cols * (tp->view.rows - 2);
raw3270_buffer_address(tp->view.dev, line->string + 1, offset);
offset = tp->view.cols * tp->view.rows - 9;
raw3270_buffer_address(tp->view.dev, line->string + 8, offset);
/* Allocate input string for reading. */
tp->input = alloc_string(&tp->freemem, tp->view.cols * 2 - 9 + 6);
}
/*
* The status line is the last line of the screen. It shows the string
* "Running"/"Holding" in the lower right corner of the screen.
*/
static void
tty3270_update_status(struct tty3270 * tp)
{
char *str;
str = (tp->nr_up != 0) ? "History" : "Running";
memcpy(tp->status->string + 8, str, 7);
codepage_convert(tp->view.ascebc, tp->status->string + 8, 7);
tp->update_flags |= TTY_UPDATE_STATUS;
}
static void
tty3270_create_status(struct tty3270 * tp)
{
static const unsigned char blueprint[] =
{ TO_SBA, 0, 0, TO_SF, TF_LOG, TO_SA, TAT_COLOR, TAC_GREEN,
0, 0, 0, 0, 0, 0, 0, TO_SF, TF_LOG, TO_SA, TAT_COLOR,
TAC_RESET };
struct string *line;
unsigned int offset;
line = alloc_string(&tp->freemem,sizeof(blueprint));
tp->status = line;
/* Copy blueprint to status line */
memcpy(line->string, blueprint, sizeof(blueprint));
/* Set address to start of status string (= last 9 characters). */
offset = tp->view.cols * tp->view.rows - 9;
raw3270_buffer_address(tp->view.dev, line->string + 1, offset);
}
/*
* Set output offsets to 3270 datastream fragment of a tty string.
* (TO_SBA offset at the start and TO_RA offset at the end of the string)
*/
static void
tty3270_update_string(struct tty3270 *tp, struct string *line, int nr)
{
unsigned char *cp;
raw3270_buffer_address(tp->view.dev, line->string + 1,
tp->view.cols * nr);
cp = line->string + line->len - 4;
if (*cp == TO_RA)
raw3270_buffer_address(tp->view.dev, cp + 1,
tp->view.cols * (nr + 1));
}
/*
* Rebuild update list to print all lines.
*/
static void
tty3270_rebuild_update(struct tty3270 *tp)
{
struct string *s, *n;
int line, nr_up;
/*
* Throw away update list and create a new one,
* containing all lines that will fit on the screen.
*/
list_for_each_entry_safe(s, n, &tp->update, update)
list_del_init(&s->update);
line = tp->view.rows - 3;
nr_up = tp->nr_up;
list_for_each_entry_reverse(s, &tp->lines, list) {
if (nr_up > 0) {
nr_up--;
continue;
}
tty3270_update_string(tp, s, line);
list_add(&s->update, &tp->update);
if (--line < 0)
break;
}
tp->update_flags |= TTY_UPDATE_LIST;
}
/*
* Alloc string for size bytes. If there is not enough room in
* freemem, free strings until there is room.
*/
static struct string *
tty3270_alloc_string(struct tty3270 *tp, size_t size)
{
struct string *s, *n;
s = alloc_string(&tp->freemem, size);
if (s)
return s;
list_for_each_entry_safe(s, n, &tp->lines, list) {
BUG_ON(tp->nr_lines <= tp->view.rows - 2);
list_del(&s->list);
if (!list_empty(&s->update))
list_del(&s->update);
tp->nr_lines--;
if (free_string(&tp->freemem, s) >= size)
break;
}
s = alloc_string(&tp->freemem, size);
BUG_ON(!s);
if (tp->nr_up != 0 &&
tp->nr_up + tp->view.rows - 2 >= tp->nr_lines) {
tp->nr_up = tp->nr_lines - tp->view.rows + 2;
tty3270_rebuild_update(tp);
tty3270_update_status(tp);
}
return s;
}
/*
* Add an empty line to the list.
*/
static void
tty3270_blank_line(struct tty3270 *tp)
{
static const unsigned char blueprint[] =
{ TO_SBA, 0, 0, TO_SA, TAT_EXTHI, TAX_RESET,
TO_SA, TAT_COLOR, TAC_RESET, TO_RA, 0, 0, 0 };
struct string *s;
s = tty3270_alloc_string(tp, sizeof(blueprint));
memcpy(s->string, blueprint, sizeof(blueprint));
s->len = sizeof(blueprint);
list_add_tail(&s->list, &tp->lines);
tp->nr_lines++;
if (tp->nr_up != 0)
tp->nr_up++;
}
/*
* Write request completion callback.
*/
static void
tty3270_write_callback(struct raw3270_request *rq, void *data)
{
struct tty3270 *tp;
tp = (struct tty3270 *) rq->view;
if (rq->rc != 0) {
/* Write wasn't successful. Refresh all. */
tp->update_flags = TTY_UPDATE_ALL;
tty3270_set_timer(tp, 1);
}
raw3270_request_reset(rq);
xchg(&tp->write, rq);
}
/*
* Update 3270 display.
*/
static void
tty3270_update(struct tty3270 *tp)
{
static char invalid_sba[2] = { 0xff, 0xff };
struct raw3270_request *wrq;
unsigned long updated;
struct string *s, *n;
char *sba, *str;
int rc, len;
wrq = xchg(&tp->write, 0);
if (!wrq) {
tty3270_set_timer(tp, 1);
return;
}
spin_lock(&tp->view.lock);
updated = 0;
if (tp->update_flags & TTY_UPDATE_ALL) {
tty3270_rebuild_update(tp);
tty3270_update_status(tp);
tp->update_flags = TTY_UPDATE_ERASE | TTY_UPDATE_LIST |
TTY_UPDATE_INPUT | TTY_UPDATE_STATUS;
}
if (tp->update_flags & TTY_UPDATE_ERASE) {
/* Use erase write alternate to erase display. */
raw3270_request_set_cmd(wrq, TC_EWRITEA);
updated |= TTY_UPDATE_ERASE;
} else
raw3270_request_set_cmd(wrq, TC_WRITE);
raw3270_request_add_data(wrq, &tp->wcc, 1);
tp->wcc = TW_NONE;
/*
* Update status line.
*/
if (tp->update_flags & TTY_UPDATE_STATUS)
if (raw3270_request_add_data(wrq, tp->status->string,
tp->status->len) == 0)
updated |= TTY_UPDATE_STATUS;
/*
* Write input line.
*/
if (tp->update_flags & TTY_UPDATE_INPUT)
if (raw3270_request_add_data(wrq, tp->prompt->string,
tp->prompt->len) == 0)
updated |= TTY_UPDATE_INPUT;
sba = invalid_sba;
if (tp->update_flags & TTY_UPDATE_LIST) {
/* Write strings in the update list to the screen. */
list_for_each_entry_safe(s, n, &tp->update, update) {
str = s->string;
len = s->len;
/*
* Skip TO_SBA at the start of the string if the
* last output position matches the start address
* of this line.
*/
if (s->string[1] == sba[0] && s->string[2] == sba[1])
str += 3, len -= 3;
if (raw3270_request_add_data(wrq, str, len) != 0)
break;
list_del_init(&s->update);
sba = s->string + s->len - 3;
}
if (list_empty(&tp->update))
updated |= TTY_UPDATE_LIST;
}
wrq->callback = tty3270_write_callback;
rc = raw3270_start(&tp->view, wrq);
if (rc == 0) {
tp->update_flags &= ~updated;
if (tp->update_flags)
tty3270_set_timer(tp, 1);
} else {
raw3270_request_reset(wrq);
xchg(&tp->write, wrq);
}
spin_unlock(&tp->view.lock);
}
/*
* Command recalling.
*/
static void
tty3270_rcl_add(struct tty3270 *tp, char *input, int len)
{
struct string *s;
tp->rcl_walk = NULL;
if (len <= 0)
return;
if (tp->rcl_nr >= tp->rcl_max) {
s = list_entry(tp->rcl_lines.next, struct string, list);
list_del(&s->list);
free_string(&tp->freemem, s);
tp->rcl_nr--;
}
s = tty3270_alloc_string(tp, len);
memcpy(s->string, input, len);
list_add_tail(&s->list, &tp->rcl_lines);
tp->rcl_nr++;
}
static void
tty3270_rcl_backward(struct kbd_data *kbd)
{
struct tty3270 *tp;
struct string *s;
tp = kbd->tty->driver_data;
spin_lock_bh(&tp->view.lock);
if (tp->inattr == TF_INPUT) {
if (tp->rcl_walk && tp->rcl_walk->prev != &tp->rcl_lines)
tp->rcl_walk = tp->rcl_walk->prev;
else if (!list_empty(&tp->rcl_lines))
tp->rcl_walk = tp->rcl_lines.prev;
s = tp->rcl_walk ?
list_entry(tp->rcl_walk, struct string, list) : NULL;
if (tp->rcl_walk) {
s = list_entry(tp->rcl_walk, struct string, list);
tty3270_update_prompt(tp, s->string, s->len);
} else
tty3270_update_prompt(tp, NULL, 0);
tty3270_set_timer(tp, 1);
}
spin_unlock_bh(&tp->view.lock);
}
/*
* Deactivate tty view.
*/
static void
tty3270_exit_tty(struct kbd_data *kbd)
{
struct tty3270 *tp;
tp = kbd->tty->driver_data;
raw3270_deactivate_view(&tp->view);
}
/*
* Scroll forward in history.
*/
static void
tty3270_scroll_forward(struct kbd_data *kbd)
{
struct tty3270 *tp;
int nr_up;
tp = kbd->tty->driver_data;
spin_lock_bh(&tp->view.lock);
nr_up = tp->nr_up - tp->view.rows + 2;
if (nr_up < 0)
nr_up = 0;
if (nr_up != tp->nr_up) {
tp->nr_up = nr_up;
tty3270_rebuild_update(tp);
tty3270_update_status(tp);
tty3270_set_timer(tp, 1);
}
spin_unlock_bh(&tp->view.lock);
}
/*
* Scroll backward in history.
*/
static void
tty3270_scroll_backward(struct kbd_data *kbd)
{
struct tty3270 *tp;
int nr_up;
tp = kbd->tty->driver_data;
spin_lock_bh(&tp->view.lock);
nr_up = tp->nr_up + tp->view.rows - 2;
if (nr_up + tp->view.rows - 2 > tp->nr_lines)
nr_up = tp->nr_lines - tp->view.rows + 2;
if (nr_up != tp->nr_up) {
tp->nr_up = nr_up;
tty3270_rebuild_update(tp);
tty3270_update_status(tp);
tty3270_set_timer(tp, 1);
}
spin_unlock_bh(&tp->view.lock);
}
/*
* Pass input line to tty.
*/
static void
tty3270_read_tasklet(struct raw3270_request *rrq)
{
static char kreset_data = TW_KR;
struct tty3270 *tp;
char *input;
int len;
tp = (struct tty3270 *) rrq->view;
spin_lock_bh(&tp->view.lock);
/*
* Two AID keys are special: For 0x7d (enter) the input line
* has to be emitted to the tty and for 0x6d the screen
* needs to be redrawn.
*/
input = NULL;
len = 0;
if (tp->input->string[0] == 0x7d) {
/* Enter: write input to tty. */
input = tp->input->string + 6;
len = tp->input->len - 6 - rrq->rescnt;
if (tp->inattr != TF_INPUTN)
tty3270_rcl_add(tp, input, len);
if (tp->nr_up > 0) {
tp->nr_up = 0;
tty3270_rebuild_update(tp);
tty3270_update_status(tp);
}
/* Clear input area. */
tty3270_update_prompt(tp, NULL, 0);
tty3270_set_timer(tp, 1);
} else if (tp->input->string[0] == 0x6d) {
/* Display has been cleared. Redraw. */
tp->update_flags = TTY_UPDATE_ALL;
tty3270_set_timer(tp, 1);
}
spin_unlock_bh(&tp->view.lock);
/* Start keyboard reset command. */
raw3270_request_reset(tp->kreset);
raw3270_request_set_cmd(tp->kreset, TC_WRITE);
raw3270_request_add_data(tp->kreset, &kreset_data, 1);
raw3270_start(&tp->view, tp->kreset);
/* Emit input string. */
if (tp->tty) {
while (len-- > 0)
kbd_keycode(tp->kbd, *input++);
/* Emit keycode for AID byte. */
kbd_keycode(tp->kbd, 256 + tp->input->string[0]);
}
raw3270_request_reset(rrq);
xchg(&tp->read, rrq);
raw3270_put_view(&tp->view);
}
/*
* Read request completion callback.
*/
static void
tty3270_read_callback(struct raw3270_request *rq, void *data)
{
raw3270_get_view(rq->view);
/* Schedule tasklet to pass input to tty. */
tasklet_schedule(&((struct tty3270 *) rq->view)->readlet);
}
/*
* Issue a read request. Call with device lock.
*/
static void
tty3270_issue_read(struct tty3270 *tp, int lock)
{
struct raw3270_request *rrq;
int rc;
rrq = xchg(&tp->read, 0);
if (!rrq)
/* Read already scheduled. */
return;
rrq->callback = tty3270_read_callback;
rrq->callback_data = tp;
raw3270_request_set_cmd(rrq, TC_READMOD);
raw3270_request_set_data(rrq, tp->input->string, tp->input->len);
/* Issue the read modified request. */
if (lock) {
rc = raw3270_start(&tp->view, rrq);
} else
rc = raw3270_start_irq(&tp->view, rrq);
if (rc) {
raw3270_request_reset(rrq);
xchg(&tp->read, rrq);
}
}
/*
* Switch to the tty view.
*/
static int
tty3270_activate(struct raw3270_view *view)
{
struct tty3270 *tp;
tp = (struct tty3270 *) view;
tp->update_flags = TTY_UPDATE_ALL;
tty3270_set_timer(tp, 1);
return 0;
}
static void
tty3270_deactivate(struct raw3270_view *view)
{
struct tty3270 *tp;
tp = (struct tty3270 *) view;
del_timer(&tp->timer);
}
static int
tty3270_irq(struct tty3270 *tp, struct raw3270_request *rq, struct irb *irb)
{
/* Handle ATTN. Schedule tasklet to read aid. */
if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) {
if (!tp->throttle)
tty3270_issue_read(tp, 0);
else
tp->attn = 1;
}
if (rq) {
if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)
rq->rc = -EIO;
else
/* Normal end. Copy residual count. */
rq->rescnt = irb->scsw.cmd.count;
}
return RAW3270_IO_DONE;
}
/*
* Allocate tty3270 structure.
*/
static struct tty3270 *
tty3270_alloc_view(void)
{
struct tty3270 *tp;
int pages;
tp = kzalloc(sizeof(struct tty3270), GFP_KERNEL);
if (!tp)
goto out_err;
tp->freemem_pages =
kmalloc(sizeof(void *) * TTY3270_STRING_PAGES, GFP_KERNEL);
if (!tp->freemem_pages)
goto out_tp;
INIT_LIST_HEAD(&tp->freemem);
for (pages = 0; pages < TTY3270_STRING_PAGES; pages++) {
tp->freemem_pages[pages] = (void *)
__get_free_pages(GFP_KERNEL|GFP_DMA, 0);
if (!tp->freemem_pages[pages])
goto out_pages;
add_string_memory(&tp->freemem,
tp->freemem_pages[pages], PAGE_SIZE);
}
tp->write = raw3270_request_alloc(TTY3270_OUTPUT_BUFFER_SIZE);
if (IS_ERR(tp->write))
goto out_pages;
tp->read = raw3270_request_alloc(0);
if (IS_ERR(tp->read))
goto out_write;
tp->kreset = raw3270_request_alloc(1);
if (IS_ERR(tp->kreset))
goto out_read;
tp->kbd = kbd_alloc();
if (!tp->kbd)
goto out_reset;
return tp;
out_reset:
raw3270_request_free(tp->kreset);
out_read:
raw3270_request_free(tp->read);
out_write:
raw3270_request_free(tp->write);
out_pages:
while (pages--)
free_pages((unsigned long) tp->freemem_pages[pages], 0);
kfree(tp->freemem_pages);
out_tp:
kfree(tp);
out_err:
return ERR_PTR(-ENOMEM);
}
/*
* Free tty3270 structure.
*/
static void
tty3270_free_view(struct tty3270 *tp)
{
int pages;
del_timer_sync(&tp->timer);
kbd_free(tp->kbd);
raw3270_request_free(tp->kreset);
raw3270_request_free(tp->read);
raw3270_request_free(tp->write);
for (pages = 0; pages < TTY3270_STRING_PAGES; pages++)
free_pages((unsigned long) tp->freemem_pages[pages], 0);
kfree(tp->freemem_pages);
kfree(tp);
}
/*
* Allocate tty3270 screen.
*/
static int
tty3270_alloc_screen(struct tty3270 *tp)
{
unsigned long size;
int lines;
size = sizeof(struct tty3270_line) * (tp->view.rows - 2);
tp->screen = kzalloc(size, GFP_KERNEL);
if (!tp->screen)
goto out_err;
for (lines = 0; lines < tp->view.rows - 2; lines++) {
size = sizeof(struct tty3270_cell) * tp->view.cols;
tp->screen[lines].cells = kzalloc(size, GFP_KERNEL);
if (!tp->screen[lines].cells)
goto out_screen;
}
return 0;
out_screen:
while (lines--)
kfree(tp->screen[lines].cells);
kfree(tp->screen);
out_err:
return -ENOMEM;
}
/*
* Free tty3270 screen.
*/
static void
tty3270_free_screen(struct tty3270 *tp)
{
int lines;
for (lines = 0; lines < tp->view.rows - 2; lines++)
kfree(tp->screen[lines].cells);
kfree(tp->screen);
}
/*
* Unlink tty3270 data structure from tty.
*/
static void
tty3270_release(struct raw3270_view *view)
{
struct tty3270 *tp;
struct tty_struct *tty;
tp = (struct tty3270 *) view;
tty = tp->tty;
if (tty) {
tty->driver_data = NULL;
tp->tty = tp->kbd->tty = NULL;
tty_hangup(tty);
raw3270_put_view(&tp->view);
}
}
/*
* Free tty3270 data structure
*/
static void
tty3270_free(struct raw3270_view *view)
{
tty3270_free_screen((struct tty3270 *) view);
tty3270_free_view((struct tty3270 *) view);
}
/*
* Delayed freeing of tty3270 views.
*/
static void
tty3270_del_views(void)
{
struct tty3270 *tp;
int i;
for (i = 0; i < tty3270_max_index; i++) {
tp = (struct tty3270 *)
raw3270_find_view(&tty3270_fn, i + RAW3270_FIRSTMINOR);
if (!IS_ERR(tp))
raw3270_del_view(&tp->view);
}
}
static struct raw3270_fn tty3270_fn = {
.activate = tty3270_activate,
.deactivate = tty3270_deactivate,
.intv = (void *) tty3270_irq,
.release = tty3270_release,
.free = tty3270_free
};
/*
* This routine is called whenever a 3270 tty is opened.
*/
static int
tty3270_open(struct tty_struct *tty, struct file * filp)
{
struct tty3270 *tp;
int i, rc;
if (tty->count > 1)
return 0;
/* Check if the tty3270 is already there. */
tp = (struct tty3270 *)
raw3270_find_view(&tty3270_fn,
tty->index + RAW3270_FIRSTMINOR);
if (!IS_ERR(tp)) {
tty->driver_data = tp;
tty->winsize.ws_row = tp->view.rows - 2;
tty->winsize.ws_col = tp->view.cols;
tty->low_latency = 0;
tp->tty = tty;
tp->kbd->tty = tty;
tp->inattr = TF_INPUT;
return 0;
}
if (tty3270_max_index < tty->index + 1)
tty3270_max_index = tty->index + 1;
/* Quick exit if there is no device for tty->index. */
if (PTR_ERR(tp) == -ENODEV)
return -ENODEV;
/* Allocate tty3270 structure on first open. */
tp = tty3270_alloc_view();
if (IS_ERR(tp))
return PTR_ERR(tp);
INIT_LIST_HEAD(&tp->lines);
INIT_LIST_HEAD(&tp->update);
INIT_LIST_HEAD(&tp->rcl_lines);
tp->rcl_max = 20;
setup_timer(&tp->timer, (void (*)(unsigned long)) tty3270_update,
(unsigned long) tp);
tasklet_init(&tp->readlet,
(void (*)(unsigned long)) tty3270_read_tasklet,
(unsigned long) tp->read);
rc = raw3270_add_view(&tp->view, &tty3270_fn,
tty->index + RAW3270_FIRSTMINOR);
if (rc) {
tty3270_free_view(tp);
return rc;
}
rc = tty3270_alloc_screen(tp);
if (rc) {
raw3270_put_view(&tp->view);
raw3270_del_view(&tp->view);
return rc;
}
tp->tty = tty;
tty->low_latency = 0;
tty->driver_data = tp;
tty->winsize.ws_row = tp->view.rows - 2;
tty->winsize.ws_col = tp->view.cols;
tty3270_create_prompt(tp);
tty3270_create_status(tp);
tty3270_update_status(tp);
/* Create blank line for every line in the tty output area. */
for (i = 0; i < tp->view.rows - 2; i++)
tty3270_blank_line(tp);
tp->kbd->tty = tty;
tp->kbd->fn_handler[KVAL(K_INCRCONSOLE)] = tty3270_exit_tty;
tp->kbd->fn_handler[KVAL(K_SCROLLBACK)] = tty3270_scroll_backward;
tp->kbd->fn_handler[KVAL(K_SCROLLFORW)] = tty3270_scroll_forward;
tp->kbd->fn_handler[KVAL(K_CONS)] = tty3270_rcl_backward;
kbd_ascebc(tp->kbd, tp->view.ascebc);
raw3270_activate_view(&tp->view);
return 0;
}
/*
* This routine is called when the 3270 tty is closed. We wait
* for the remaining request to be completed. Then we clean up.
*/
static void
tty3270_close(struct tty_struct *tty, struct file * filp)
{
struct tty3270 *tp;
if (tty->count > 1)
return;
tp = (struct tty3270 *) tty->driver_data;
if (tp) {
tty->driver_data = NULL;
tp->tty = tp->kbd->tty = NULL;
raw3270_put_view(&tp->view);
}
}
/*
* We always have room.
*/
static int
tty3270_write_room(struct tty_struct *tty)
{
return INT_MAX;
}
/*
* Insert character into the screen at the current position with the
* current color and highlight. This function does NOT do cursor movement.
*/
static void tty3270_put_character(struct tty3270 *tp, char ch)
{
struct tty3270_line *line;
struct tty3270_cell *cell;
line = tp->screen + tp->cy;
if (line->len <= tp->cx) {
while (line->len < tp->cx) {
cell = line->cells + line->len;
cell->character = tp->view.ascebc[' '];
cell->highlight = tp->highlight;
cell->f_color = tp->f_color;
line->len++;
}
line->len++;
}
cell = line->cells + tp->cx;
cell->character = tp->view.ascebc[(unsigned int) ch];
cell->highlight = tp->highlight;
cell->f_color = tp->f_color;
}
/*
* Convert a tty3270_line to a 3270 data fragment usable for output.
*/
static void
tty3270_convert_line(struct tty3270 *tp, int line_nr)
{
struct tty3270_line *line;
struct tty3270_cell *cell;
struct string *s, *n;
unsigned char highlight;
unsigned char f_color;
char *cp;
int flen, i;
/* Determine how long the fragment will be. */
flen = 3; /* Prefix (TO_SBA). */
line = tp->screen + line_nr;
flen += line->len;
highlight = TAX_RESET;
f_color = TAC_RESET;
for (i = 0, cell = line->cells; i < line->len; i++, cell++) {
if (cell->highlight != highlight) {
flen += 3; /* TO_SA to switch highlight. */
highlight = cell->highlight;
}
if (cell->f_color != f_color) {
flen += 3; /* TO_SA to switch color. */
f_color = cell->f_color;
}
}
if (highlight != TAX_RESET)
flen += 3; /* TO_SA to reset hightlight. */
if (f_color != TAC_RESET)
flen += 3; /* TO_SA to reset color. */
if (line->len < tp->view.cols)
flen += 4; /* Postfix (TO_RA). */
/* Find the line in the list. */
i = tp->view.rows - 2 - line_nr;
list_for_each_entry_reverse(s, &tp->lines, list)
if (--i <= 0)
break;
/*
* Check if the line needs to get reallocated.
*/
if (s->len != flen) {
/* Reallocate string. */
n = tty3270_alloc_string(tp, flen);
list_add(&n->list, &s->list);
list_del_init(&s->list);
if (!list_empty(&s->update))
list_del_init(&s->update);
free_string(&tp->freemem, s);
s = n;
}
/* Write 3270 data fragment. */
cp = s->string;
*cp++ = TO_SBA;
*cp++ = 0;
*cp++ = 0;
highlight = TAX_RESET;
f_color = TAC_RESET;
for (i = 0, cell = line->cells; i < line->len; i++, cell++) {
if (cell->highlight != highlight) {
*cp++ = TO_SA;
*cp++ = TAT_EXTHI;
*cp++ = cell->highlight;
highlight = cell->highlight;
}
if (cell->f_color != f_color) {
*cp++ = TO_SA;
*cp++ = TAT_COLOR;
*cp++ = cell->f_color;
f_color = cell->f_color;
}
*cp++ = cell->character;
}
if (highlight != TAX_RESET) {
*cp++ = TO_SA;
*cp++ = TAT_EXTHI;
*cp++ = TAX_RESET;
}
if (f_color != TAC_RESET) {
*cp++ = TO_SA;
*cp++ = TAT_COLOR;
*cp++ = TAC_RESET;
}
if (line->len < tp->view.cols) {
*cp++ = TO_RA;
*cp++ = 0;
*cp++ = 0;
*cp++ = 0;
}
if (tp->nr_up + line_nr < tp->view.rows - 2) {
/* Line is currently visible on screen. */
tty3270_update_string(tp, s, line_nr);
/* Add line to update list. */
if (list_empty(&s->update)) {
list_add_tail(&s->update, &tp->update);
tp->update_flags |= TTY_UPDATE_LIST;
}
}
}
/*
* Do carriage return.
*/
static void
tty3270_cr(struct tty3270 *tp)
{
tp->cx = 0;
}
/*
* Do line feed.
*/
static void
tty3270_lf(struct tty3270 *tp)
{
struct tty3270_line temp;
int i;
tty3270_convert_line(tp, tp->cy);
if (tp->cy < tp->view.rows - 3) {
tp->cy++;
return;
}
/* Last line just filled up. Add new, blank line. */
tty3270_blank_line(tp);
temp = tp->screen[0];
temp.len = 0;
for (i = 0; i < tp->view.rows - 3; i++)
tp->screen[i] = tp->screen[i+1];
tp->screen[tp->view.rows - 3] = temp;
tty3270_rebuild_update(tp);
}
static void
tty3270_ri(struct tty3270 *tp)
{
if (tp->cy > 0) {
tty3270_convert_line(tp, tp->cy);
tp->cy--;
}
}
/*
* Insert characters at current position.
*/
static void
tty3270_insert_characters(struct tty3270 *tp, int n)
{
struct tty3270_line *line;
int k;
line = tp->screen + tp->cy;
while (line->len < tp->cx) {
line->cells[line->len].character = tp->view.ascebc[' '];
line->cells[line->len].highlight = TAX_RESET;
line->cells[line->len].f_color = TAC_RESET;
line->len++;
}
if (n > tp->view.cols - tp->cx)
n = tp->view.cols - tp->cx;
k = min_t(int, line->len - tp->cx, tp->view.cols - tp->cx - n);
while (k--)
line->cells[tp->cx + n + k] = line->cells[tp->cx + k];
line->len += n;
if (line->len > tp->view.cols)
line->len = tp->view.cols;
while (n-- > 0) {
line->cells[tp->cx + n].character = tp->view.ascebc[' '];
line->cells[tp->cx + n].highlight = tp->highlight;
line->cells[tp->cx + n].f_color = tp->f_color;
}
}
/*
* Delete characters at current position.
*/
static void
tty3270_delete_characters(struct tty3270 *tp, int n)
{
struct tty3270_line *line;
int i;
line = tp->screen + tp->cy;
if (line->len <= tp->cx)
return;
if (line->len - tp->cx <= n) {
line->len = tp->cx;
return;
}
for (i = tp->cx; i + n < line->len; i++)
line->cells[i] = line->cells[i + n];
line->len -= n;
}
/*
* Erase characters at current position.
*/
static void
tty3270_erase_characters(struct tty3270 *tp, int n)
{
struct tty3270_line *line;
struct tty3270_cell *cell;
line = tp->screen + tp->cy;
while (line->len > tp->cx && n-- > 0) {
cell = line->cells + tp->cx++;
cell->character = ' ';
cell->highlight = TAX_RESET;
cell->f_color = TAC_RESET;
}
tp->cx += n;
tp->cx = min_t(int, tp->cx, tp->view.cols - 1);
}
/*
* Erase line, 3 different cases:
* Esc [ 0 K Erase from current position to end of line inclusive
* Esc [ 1 K Erase from beginning of line to current position inclusive
* Esc [ 2 K Erase entire line (without moving cursor)
*/
static void
tty3270_erase_line(struct tty3270 *tp, int mode)
{
struct tty3270_line *line;
struct tty3270_cell *cell;
int i;
line = tp->screen + tp->cy;
if (mode == 0)
line->len = tp->cx;
else if (mode == 1) {
for (i = 0; i < tp->cx; i++) {
cell = line->cells + i;
cell->character = ' ';
cell->highlight = TAX_RESET;
cell->f_color = TAC_RESET;
}
if (line->len <= tp->cx)
line->len = tp->cx + 1;
} else if (mode == 2)
line->len = 0;
tty3270_convert_line(tp, tp->cy);
}
/*
* Erase display, 3 different cases:
* Esc [ 0 J Erase from current position to bottom of screen inclusive
* Esc [ 1 J Erase from top of screen to current position inclusive
* Esc [ 2 J Erase entire screen (without moving the cursor)
*/
static void
tty3270_erase_display(struct tty3270 *tp, int mode)
{
int i;
if (mode == 0) {
tty3270_erase_line(tp, 0);
for (i = tp->cy + 1; i < tp->view.rows - 2; i++) {
tp->screen[i].len = 0;
tty3270_convert_line(tp, i);
}
} else if (mode == 1) {
for (i = 0; i < tp->cy; i++) {
tp->screen[i].len = 0;
tty3270_convert_line(tp, i);
}
tty3270_erase_line(tp, 1);
} else if (mode == 2) {
for (i = 0; i < tp->view.rows - 2; i++) {
tp->screen[i].len = 0;
tty3270_convert_line(tp, i);
}
}
tty3270_rebuild_update(tp);
}
/*
* Set attributes found in an escape sequence.
* Esc [ <attr> ; <attr> ; ... m
*/
static void
tty3270_set_attributes(struct tty3270 *tp)
{
static unsigned char f_colors[] = {
TAC_DEFAULT, TAC_RED, TAC_GREEN, TAC_YELLOW, TAC_BLUE,
TAC_PINK, TAC_TURQ, TAC_WHITE, 0, TAC_DEFAULT
};
int i, attr;
for (i = 0; i <= tp->esc_npar; i++) {
attr = tp->esc_par[i];
switch (attr) {
case 0: /* Reset */
tp->highlight = TAX_RESET;
tp->f_color = TAC_RESET;
break;
/* Highlight. */
case 4: /* Start underlining. */
tp->highlight = TAX_UNDER;
break;
case 5: /* Start blink. */
tp->highlight = TAX_BLINK;
break;
case 7: /* Start reverse. */
tp->highlight = TAX_REVER;
break;
case 24: /* End underlining */
if (tp->highlight == TAX_UNDER)
tp->highlight = TAX_RESET;
break;
case 25: /* End blink. */
if (tp->highlight == TAX_BLINK)
tp->highlight = TAX_RESET;
break;
case 27: /* End reverse. */
if (tp->highlight == TAX_REVER)
tp->highlight = TAX_RESET;
break;
/* Foreground color. */
case 30: /* Black */
case 31: /* Red */
case 32: /* Green */
case 33: /* Yellow */
case 34: /* Blue */
case 35: /* Magenta */
case 36: /* Cyan */
case 37: /* White */
case 39: /* Black */
tp->f_color = f_colors[attr - 30];
break;
}
}
}
static inline int
tty3270_getpar(struct tty3270 *tp, int ix)
{
return (tp->esc_par[ix] > 0) ? tp->esc_par[ix] : 1;
}
static void
tty3270_goto_xy(struct tty3270 *tp, int cx, int cy)
{
int max_cx = max(0, cx);
int max_cy = max(0, cy);
tp->cx = min_t(int, tp->view.cols - 1, max_cx);
cy = min_t(int, tp->view.rows - 3, max_cy);
if (cy != tp->cy) {
tty3270_convert_line(tp, tp->cy);
tp->cy = cy;
}
}
/*
* Process escape sequences. Known sequences:
* Esc 7 Save Cursor Position
* Esc 8 Restore Cursor Position
* Esc [ Pn ; Pn ; .. m Set attributes
* Esc [ Pn ; Pn H Cursor Position
* Esc [ Pn ; Pn f Cursor Position
* Esc [ Pn A Cursor Up
* Esc [ Pn B Cursor Down
* Esc [ Pn C Cursor Forward
* Esc [ Pn D Cursor Backward
* Esc [ Pn G Cursor Horizontal Absolute
* Esc [ Pn X Erase Characters
* Esc [ Ps J Erase in Display
* Esc [ Ps K Erase in Line
* // FIXME: add all the new ones.
*
* Pn is a numeric parameter, a string of zero or more decimal digits.
* Ps is a selective parameter.
*/
static void
tty3270_escape_sequence(struct tty3270 *tp, char ch)
{
enum { ESnormal, ESesc, ESsquare, ESgetpars };
if (tp->esc_state == ESnormal) {
if (ch == 0x1b)
/* Starting new escape sequence. */
tp->esc_state = ESesc;
return;
}
if (tp->esc_state == ESesc) {
tp->esc_state = ESnormal;
switch (ch) {
case '[':
tp->esc_state = ESsquare;
break;
case 'E':
tty3270_cr(tp);
tty3270_lf(tp);
break;
case 'M':
tty3270_ri(tp);
break;
case 'D':
tty3270_lf(tp);
break;
case 'Z': /* Respond ID. */
kbd_puts_queue(tp->tty, "\033[?6c");
break;
case '7': /* Save cursor position. */
tp->saved_cx = tp->cx;
tp->saved_cy = tp->cy;
tp->saved_highlight = tp->highlight;
tp->saved_f_color = tp->f_color;
break;
case '8': /* Restore cursor position. */
tty3270_convert_line(tp, tp->cy);
tty3270_goto_xy(tp, tp->saved_cx, tp->saved_cy);
tp->highlight = tp->saved_highlight;
tp->f_color = tp->saved_f_color;
break;
case 'c': /* Reset terminal. */
tp->cx = tp->saved_cx = 0;
tp->cy = tp->saved_cy = 0;
tp->highlight = tp->saved_highlight = TAX_RESET;
tp->f_color = tp->saved_f_color = TAC_RESET;
tty3270_erase_display(tp, 2);
break;
}
return;
}
if (tp->esc_state == ESsquare) {
tp->esc_state = ESgetpars;
memset(tp->esc_par, 0, sizeof(tp->esc_par));
tp->esc_npar = 0;
tp->esc_ques = (ch == '?');
if (tp->esc_ques)
return;
}
if (tp->esc_state == ESgetpars) {
if (ch == ';' && tp->esc_npar < ESCAPE_NPAR - 1) {
tp->esc_npar++;
return;
}
if (ch >= '0' && ch <= '9') {
tp->esc_par[tp->esc_npar] *= 10;
tp->esc_par[tp->esc_npar] += ch - '0';
return;
}
}
tp->esc_state = ESnormal;
if (ch == 'n' && !tp->esc_ques) {
if (tp->esc_par[0] == 5) /* Status report. */
kbd_puts_queue(tp->tty, "\033[0n");
else if (tp->esc_par[0] == 6) { /* Cursor report. */
char buf[40];
sprintf(buf, "\033[%d;%dR", tp->cy + 1, tp->cx + 1);
kbd_puts_queue(tp->tty, buf);
}
return;
}
if (tp->esc_ques)
return;
switch (ch) {
case 'm':
tty3270_set_attributes(tp);
break;
case 'H': /* Set cursor position. */
case 'f':
tty3270_goto_xy(tp, tty3270_getpar(tp, 1) - 1,
tty3270_getpar(tp, 0) - 1);
break;
case 'd': /* Set y position. */
tty3270_goto_xy(tp, tp->cx, tty3270_getpar(tp, 0) - 1);
break;
case 'A': /* Cursor up. */
case 'F':
tty3270_goto_xy(tp, tp->cx, tp->cy - tty3270_getpar(tp, 0));
break;
case 'B': /* Cursor down. */
case 'e':
case 'E':
tty3270_goto_xy(tp, tp->cx, tp->cy + tty3270_getpar(tp, 0));
break;
case 'C': /* Cursor forward. */
case 'a':
tty3270_goto_xy(tp, tp->cx + tty3270_getpar(tp, 0), tp->cy);
break;
case 'D': /* Cursor backward. */
tty3270_goto_xy(tp, tp->cx - tty3270_getpar(tp, 0), tp->cy);
break;
case 'G': /* Set x position. */
case '`':
tty3270_goto_xy(tp, tty3270_getpar(tp, 0), tp->cy);
break;
case 'X': /* Erase Characters. */
tty3270_erase_characters(tp, tty3270_getpar(tp, 0));
break;
case 'J': /* Erase display. */
tty3270_erase_display(tp, tp->esc_par[0]);
break;
case 'K': /* Erase line. */
tty3270_erase_line(tp, tp->esc_par[0]);
break;
case 'P': /* Delete characters. */
tty3270_delete_characters(tp, tty3270_getpar(tp, 0));
break;
case '@': /* Insert characters. */
tty3270_insert_characters(tp, tty3270_getpar(tp, 0));
break;
case 's': /* Save cursor position. */
tp->saved_cx = tp->cx;
tp->saved_cy = tp->cy;
tp->saved_highlight = tp->highlight;
tp->saved_f_color = tp->f_color;
break;
case 'u': /* Restore cursor position. */
tty3270_convert_line(tp, tp->cy);
tty3270_goto_xy(tp, tp->saved_cx, tp->saved_cy);
tp->highlight = tp->saved_highlight;
tp->f_color = tp->saved_f_color;
break;
}
}
/*
* String write routine for 3270 ttys
*/
static void
tty3270_do_write(struct tty3270 *tp, const unsigned char *buf, int count)
{
int i_msg, i;
spin_lock_bh(&tp->view.lock);
for (i_msg = 0; !tp->tty->stopped && i_msg < count; i_msg++) {
if (tp->esc_state != 0) {
/* Continue escape sequence. */
tty3270_escape_sequence(tp, buf[i_msg]);
continue;
}
switch (buf[i_msg]) {
case 0x07: /* '\a' -- Alarm */
tp->wcc |= TW_PLUSALARM;
break;
case 0x08: /* Backspace. */
if (tp->cx > 0) {
tp->cx--;
tty3270_put_character(tp, ' ');
}
break;
case 0x09: /* '\t' -- Tabulate */
for (i = tp->cx % 8; i < 8; i++) {
if (tp->cx >= tp->view.cols) {
tty3270_cr(tp);
tty3270_lf(tp);
break;
}
tty3270_put_character(tp, ' ');
tp->cx++;
}
break;
case 0x0a: /* '\n' -- New Line */
tty3270_cr(tp);
tty3270_lf(tp);
break;
case 0x0c: /* '\f' -- Form Feed */
tty3270_erase_display(tp, 2);
tp->cx = tp->cy = 0;
break;
case 0x0d: /* '\r' -- Carriage Return */
tp->cx = 0;
break;
case 0x0f: /* SuSE "exit alternate mode" */
break;
case 0x1b: /* Start escape sequence. */
tty3270_escape_sequence(tp, buf[i_msg]);
break;
default: /* Insert normal character. */
if (tp->cx >= tp->view.cols) {
tty3270_cr(tp);
tty3270_lf(tp);
}
tty3270_put_character(tp, buf[i_msg]);
tp->cx++;
break;
}
}
/* Convert current line to 3270 data fragment. */
tty3270_convert_line(tp, tp->cy);
/* Setup timer to update display after 1/10 second */
if (!timer_pending(&tp->timer))
tty3270_set_timer(tp, HZ/10);
spin_unlock_bh(&tp->view.lock);
}
/*
* String write routine for 3270 ttys
*/
static int
tty3270_write(struct tty_struct * tty,
const unsigned char *buf, int count)
{
struct tty3270 *tp;
tp = tty->driver_data;
if (!tp)
return 0;
if (tp->char_count > 0) {
tty3270_do_write(tp, tp->char_buf, tp->char_count);
tp->char_count = 0;
}
tty3270_do_write(tp, buf, count);
return count;
}
/*
* Put single characters to the ttys character buffer
*/
static int tty3270_put_char(struct tty_struct *tty, unsigned char ch)
{
struct tty3270 *tp;
tp = tty->driver_data;
if (!tp || tp->char_count >= TTY3270_CHAR_BUF_SIZE)
return 0;
tp->char_buf[tp->char_count++] = ch;
return 1;
}
/*
* Flush all characters from the ttys characeter buffer put there
* by tty3270_put_char.
*/
static void
tty3270_flush_chars(struct tty_struct *tty)
{
struct tty3270 *tp;
tp = tty->driver_data;
if (!tp)
return;
if (tp->char_count > 0) {
tty3270_do_write(tp, tp->char_buf, tp->char_count);
tp->char_count = 0;
}
}
/*
* Returns the number of characters in the output buffer. This is
* used in tty_wait_until_sent to wait until all characters have
* appeared on the screen.
*/
static int
tty3270_chars_in_buffer(struct tty_struct *tty)
{
return 0;
}
static void
tty3270_flush_buffer(struct tty_struct *tty)
{
}
/*
* Check for visible/invisible input switches
*/
static void
tty3270_set_termios(struct tty_struct *tty, struct ktermios *old)
{
struct tty3270 *tp;
int new;
tp = tty->driver_data;
if (!tp)
return;
spin_lock_bh(&tp->view.lock);
if (L_ICANON(tty)) {
new = L_ECHO(tty) ? TF_INPUT: TF_INPUTN;
if (new != tp->inattr) {
tp->inattr = new;
tty3270_update_prompt(tp, NULL, 0);
tty3270_set_timer(tp, 1);
}
}
spin_unlock_bh(&tp->view.lock);
}
/*
* Disable reading from a 3270 tty
*/
static void
tty3270_throttle(struct tty_struct * tty)
{
struct tty3270 *tp;
tp = tty->driver_data;
if (!tp)
return;
tp->throttle = 1;
}
/*
* Enable reading from a 3270 tty
*/
static void
tty3270_unthrottle(struct tty_struct * tty)
{
struct tty3270 *tp;
tp = tty->driver_data;
if (!tp)
return;
tp->throttle = 0;
if (tp->attn)
tty3270_issue_read(tp, 1);
}
/*
* Hang up the tty device.
*/
static void
tty3270_hangup(struct tty_struct *tty)
{
// FIXME: implement
}
static void
tty3270_wait_until_sent(struct tty_struct *tty, int timeout)
{
}
static int tty3270_ioctl(struct tty_struct *tty, unsigned int cmd,
unsigned long arg)
{
struct tty3270 *tp;
tp = tty->driver_data;
if (!tp)
return -ENODEV;
if (tty->flags & (1 << TTY_IO_ERROR))
return -EIO;
return kbd_ioctl(tp->kbd, cmd, arg);
}
#ifdef CONFIG_COMPAT
static long tty3270_compat_ioctl(struct tty_struct *tty,
unsigned int cmd, unsigned long arg)
{
struct tty3270 *tp;
tp = tty->driver_data;
if (!tp)
return -ENODEV;
if (tty->flags & (1 << TTY_IO_ERROR))
return -EIO;
return kbd_ioctl(tp->kbd, cmd, (unsigned long)compat_ptr(arg));
}
#endif
static const struct tty_operations tty3270_ops = {
.open = tty3270_open,
.close = tty3270_close,
.write = tty3270_write,
.put_char = tty3270_put_char,
.flush_chars = tty3270_flush_chars,
.write_room = tty3270_write_room,
.chars_in_buffer = tty3270_chars_in_buffer,
.flush_buffer = tty3270_flush_buffer,
.throttle = tty3270_throttle,
.unthrottle = tty3270_unthrottle,
.hangup = tty3270_hangup,
.wait_until_sent = tty3270_wait_until_sent,
.ioctl = tty3270_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = tty3270_compat_ioctl,
#endif
.set_termios = tty3270_set_termios
};
/*
* 3270 tty registration code called from tty_init().
* Most kernel services (incl. kmalloc) are available at this poimt.
*/
static int __init tty3270_init(void)
{
struct tty_driver *driver;
int ret;
driver = alloc_tty_driver(RAW3270_MAXDEVS);
if (!driver)
return -ENOMEM;
/*
* Initialize the tty_driver structure
* Entries in tty3270_driver that are NOT initialized:
* proc_entry, set_termios, flush_buffer, set_ldisc, write_proc
*/
driver->driver_name = "ttyTUB";
driver->name = "ttyTUB";
driver->major = IBM_TTY3270_MAJOR;
driver->minor_start = RAW3270_FIRSTMINOR;
driver->type = TTY_DRIVER_TYPE_SYSTEM;
driver->subtype = SYSTEM_TYPE_TTY;
driver->init_termios = tty_std_termios;
driver->flags = TTY_DRIVER_RESET_TERMIOS | TTY_DRIVER_DYNAMIC_DEV;
tty_set_operations(driver, &tty3270_ops);
ret = tty_register_driver(driver);
if (ret) {
put_tty_driver(driver);
return ret;
}
tty3270_driver = driver;
return 0;
}
static void __exit
tty3270_exit(void)
{
struct tty_driver *driver;
driver = tty3270_driver;
tty3270_driver = NULL;
tty_unregister_driver(driver);
tty3270_del_views();
}
MODULE_LICENSE("GPL");
MODULE_ALIAS_CHARDEV_MAJOR(IBM_TTY3270_MAJOR);
module_init(tty3270_init);
module_exit(tty3270_exit);
| gpl-2.0 |
FrancescoCG/CrazySuperKernel-CM13-KLTE-NEW-REBASE | arch/arm/mach-gemini/board-nas4220b.c | 5313 | 2430 | /*
* Support for Raidsonic NAS-4220-B
*
* Copyright (C) 2009 Janos Laube <janos.dev@gmail.com>
*
* based on rut1xx.c
* Copyright (C) 2008 Paulius Zaleckas <paulius.zaleckas@teltonika.lt>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/leds.h>
#include <linux/input.h>
#include <linux/gpio_keys.h>
#include <linux/mdio-gpio.h>
#include <linux/io.h>
#include <asm/setup.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/mach/time.h>
#include <mach/hardware.h>
#include <mach/global_reg.h>
#include "common.h"
static struct sys_timer ib4220b_timer = {
.init = gemini_timer_init,
};
static struct gpio_led ib4220b_leds[] = {
{
.name = "nas4220b:orange:hdd",
.default_trigger = "none",
.gpio = 60,
},
{
.name = "nas4220b:green:os",
.default_trigger = "heartbeat",
.gpio = 62,
},
};
static struct gpio_led_platform_data ib4220b_leds_data = {
.num_leds = ARRAY_SIZE(ib4220b_leds),
.leds = ib4220b_leds,
};
static struct platform_device ib4220b_led_device = {
.name = "leds-gpio",
.id = -1,
.dev = {
.platform_data = &ib4220b_leds_data,
},
};
static struct gpio_keys_button ib4220b_keys[] = {
{
.code = KEY_SETUP,
.gpio = 61,
.active_low = 1,
.desc = "Backup Button",
.type = EV_KEY,
},
{
.code = KEY_RESTART,
.gpio = 63,
.active_low = 1,
.desc = "Softreset Button",
.type = EV_KEY,
},
};
static struct gpio_keys_platform_data ib4220b_keys_data = {
.buttons = ib4220b_keys,
.nbuttons = ARRAY_SIZE(ib4220b_keys),
};
static struct platform_device ib4220b_key_device = {
.name = "gpio-keys",
.id = -1,
.dev = {
.platform_data = &ib4220b_keys_data,
},
};
static void __init ib4220b_init(void)
{
gemini_gpio_init();
platform_register_uart();
platform_register_pflash(SZ_16M, NULL, 0);
platform_device_register(&ib4220b_led_device);
platform_device_register(&ib4220b_key_device);
platform_register_rtc();
}
MACHINE_START(NAS4220B, "Raidsonic NAS IB-4220-B")
.atag_offset = 0x100,
.map_io = gemini_map_io,
.init_irq = gemini_init_irq,
.timer = &ib4220b_timer,
.init_machine = ib4220b_init,
MACHINE_END
| gpl-2.0 |
Feche/android_kernel_motorola_olympus_oc | lib/decompress_inflate.c | 8385 | 3811 | #ifdef STATIC
/* Pre-boot environment: included */
/* prevent inclusion of _LINUX_KERNEL_H in pre-boot environment: lots
* errors about console_printk etc... on ARM */
#define _LINUX_KERNEL_H
#include "zlib_inflate/inftrees.c"
#include "zlib_inflate/inffast.c"
#include "zlib_inflate/inflate.c"
#else /* STATIC */
/* initramfs et al: linked */
#include <linux/zutil.h>
#include "zlib_inflate/inftrees.h"
#include "zlib_inflate/inffast.h"
#include "zlib_inflate/inflate.h"
#include "zlib_inflate/infutil.h"
#endif /* STATIC */
#include <linux/decompress/mm.h>
#define GZIP_IOBUF_SIZE (16*1024)
static int INIT nofill(void *buffer, unsigned int len)
{
return -1;
}
/* Included from initramfs et al code */
STATIC int INIT gunzip(unsigned char *buf, int len,
int(*fill)(void*, unsigned int),
int(*flush)(void*, unsigned int),
unsigned char *out_buf,
int *pos,
void(*error)(char *x)) {
u8 *zbuf;
struct z_stream_s *strm;
int rc;
size_t out_len;
rc = -1;
if (flush) {
out_len = 0x8000; /* 32 K */
out_buf = malloc(out_len);
} else {
out_len = 0x7fffffff; /* no limit */
}
if (!out_buf) {
error("Out of memory while allocating output buffer");
goto gunzip_nomem1;
}
if (buf)
zbuf = buf;
else {
zbuf = malloc(GZIP_IOBUF_SIZE);
len = 0;
}
if (!zbuf) {
error("Out of memory while allocating input buffer");
goto gunzip_nomem2;
}
strm = malloc(sizeof(*strm));
if (strm == NULL) {
error("Out of memory while allocating z_stream");
goto gunzip_nomem3;
}
strm->workspace = malloc(flush ? zlib_inflate_workspacesize() :
sizeof(struct inflate_state));
if (strm->workspace == NULL) {
error("Out of memory while allocating workspace");
goto gunzip_nomem4;
}
if (!fill)
fill = nofill;
if (len == 0)
len = fill(zbuf, GZIP_IOBUF_SIZE);
/* verify the gzip header */
if (len < 10 ||
zbuf[0] != 0x1f || zbuf[1] != 0x8b || zbuf[2] != 0x08) {
if (pos)
*pos = 0;
error("Not a gzip file");
goto gunzip_5;
}
/* skip over gzip header (1f,8b,08... 10 bytes total +
* possible asciz filename)
*/
strm->next_in = zbuf + 10;
strm->avail_in = len - 10;
/* skip over asciz filename */
if (zbuf[3] & 0x8) {
do {
/*
* If the filename doesn't fit into the buffer,
* the file is very probably corrupt. Don't try
* to read more data.
*/
if (strm->avail_in == 0) {
error("header error");
goto gunzip_5;
}
--strm->avail_in;
} while (*strm->next_in++);
}
strm->next_out = out_buf;
strm->avail_out = out_len;
rc = zlib_inflateInit2(strm, -MAX_WBITS);
if (!flush) {
WS(strm)->inflate_state.wsize = 0;
WS(strm)->inflate_state.window = NULL;
}
while (rc == Z_OK) {
if (strm->avail_in == 0) {
/* TODO: handle case where both pos and fill are set */
len = fill(zbuf, GZIP_IOBUF_SIZE);
if (len < 0) {
rc = -1;
error("read error");
break;
}
strm->next_in = zbuf;
strm->avail_in = len;
}
rc = zlib_inflate(strm, 0);
/* Write any data generated */
if (flush && strm->next_out > out_buf) {
int l = strm->next_out - out_buf;
if (l != flush(out_buf, l)) {
rc = -1;
error("write error");
break;
}
strm->next_out = out_buf;
strm->avail_out = out_len;
}
/* after Z_FINISH, only Z_STREAM_END is "we unpacked it all" */
if (rc == Z_STREAM_END) {
rc = 0;
break;
} else if (rc != Z_OK) {
error("uncompression error");
rc = -1;
}
}
zlib_inflateEnd(strm);
if (pos)
/* add + 8 to skip over trailer */
*pos = strm->next_in - zbuf+8;
gunzip_5:
free(strm->workspace);
gunzip_nomem4:
free(strm);
gunzip_nomem3:
if (!buf)
free(zbuf);
gunzip_nomem2:
if (flush)
free(out_buf);
gunzip_nomem1:
return rc; /* returns Z_OK (0) if successful */
}
#define decompress gunzip
| gpl-2.0 |
zhaochengw/android_kernel_nx403 | net/netfilter/xt_tcpudp.c | 13249 | 5817 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/types.h>
#include <linux/module.h>
#include <net/ip.h>
#include <linux/ipv6.h>
#include <net/ipv6.h>
#include <net/tcp.h>
#include <net/udp.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter/xt_tcpudp.h>
#include <linux/netfilter_ipv4/ip_tables.h>
#include <linux/netfilter_ipv6/ip6_tables.h>
MODULE_DESCRIPTION("Xtables: TCP, UDP and UDP-Lite match");
MODULE_LICENSE("GPL");
MODULE_ALIAS("xt_tcp");
MODULE_ALIAS("xt_udp");
MODULE_ALIAS("ipt_udp");
MODULE_ALIAS("ipt_tcp");
MODULE_ALIAS("ip6t_udp");
MODULE_ALIAS("ip6t_tcp");
/* Returns 1 if the port is matched by the range, 0 otherwise */
static inline bool
port_match(u_int16_t min, u_int16_t max, u_int16_t port, bool invert)
{
return (port >= min && port <= max) ^ invert;
}
static bool
tcp_find_option(u_int8_t option,
const struct sk_buff *skb,
unsigned int protoff,
unsigned int optlen,
bool invert,
bool *hotdrop)
{
/* tcp.doff is only 4 bits, ie. max 15 * 4 bytes */
const u_int8_t *op;
u_int8_t _opt[60 - sizeof(struct tcphdr)];
unsigned int i;
pr_debug("finding option\n");
if (!optlen)
return invert;
/* If we don't have the whole header, drop packet. */
op = skb_header_pointer(skb, protoff + sizeof(struct tcphdr),
optlen, _opt);
if (op == NULL) {
*hotdrop = true;
return false;
}
for (i = 0; i < optlen; ) {
if (op[i] == option) return !invert;
if (op[i] < 2) i++;
else i += op[i+1]?:1;
}
return invert;
}
static bool tcp_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct tcphdr *th;
struct tcphdr _tcph;
const struct xt_tcp *tcpinfo = par->matchinfo;
if (par->fragoff != 0) {
/* To quote Alan:
Don't allow a fragment of TCP 8 bytes in. Nobody normal
causes this. Its a cracker trying to break in by doing a
flag overwrite to pass the direction checks.
*/
if (par->fragoff == 1) {
pr_debug("Dropping evil TCP offset=1 frag.\n");
par->hotdrop = true;
}
/* Must not be a fragment. */
return false;
}
#define FWINVTCP(bool, invflg) ((bool) ^ !!(tcpinfo->invflags & (invflg)))
th = skb_header_pointer(skb, par->thoff, sizeof(_tcph), &_tcph);
if (th == NULL) {
/* We've been asked to examine this packet, and we
can't. Hence, no choice but to drop. */
pr_debug("Dropping evil TCP offset=0 tinygram.\n");
par->hotdrop = true;
return false;
}
if (!port_match(tcpinfo->spts[0], tcpinfo->spts[1],
ntohs(th->source),
!!(tcpinfo->invflags & XT_TCP_INV_SRCPT)))
return false;
if (!port_match(tcpinfo->dpts[0], tcpinfo->dpts[1],
ntohs(th->dest),
!!(tcpinfo->invflags & XT_TCP_INV_DSTPT)))
return false;
if (!FWINVTCP((((unsigned char *)th)[13] & tcpinfo->flg_mask)
== tcpinfo->flg_cmp,
XT_TCP_INV_FLAGS))
return false;
if (tcpinfo->option) {
if (th->doff * 4 < sizeof(_tcph)) {
par->hotdrop = true;
return false;
}
if (!tcp_find_option(tcpinfo->option, skb, par->thoff,
th->doff*4 - sizeof(_tcph),
tcpinfo->invflags & XT_TCP_INV_OPTION,
&par->hotdrop))
return false;
}
return true;
}
static int tcp_mt_check(const struct xt_mtchk_param *par)
{
const struct xt_tcp *tcpinfo = par->matchinfo;
/* Must specify no unknown invflags */
return (tcpinfo->invflags & ~XT_TCP_INV_MASK) ? -EINVAL : 0;
}
static bool udp_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct udphdr *uh;
struct udphdr _udph;
const struct xt_udp *udpinfo = par->matchinfo;
/* Must not be a fragment. */
if (par->fragoff != 0)
return false;
uh = skb_header_pointer(skb, par->thoff, sizeof(_udph), &_udph);
if (uh == NULL) {
/* We've been asked to examine this packet, and we
can't. Hence, no choice but to drop. */
pr_debug("Dropping evil UDP tinygram.\n");
par->hotdrop = true;
return false;
}
return port_match(udpinfo->spts[0], udpinfo->spts[1],
ntohs(uh->source),
!!(udpinfo->invflags & XT_UDP_INV_SRCPT))
&& port_match(udpinfo->dpts[0], udpinfo->dpts[1],
ntohs(uh->dest),
!!(udpinfo->invflags & XT_UDP_INV_DSTPT));
}
static int udp_mt_check(const struct xt_mtchk_param *par)
{
const struct xt_udp *udpinfo = par->matchinfo;
/* Must specify no unknown invflags */
return (udpinfo->invflags & ~XT_UDP_INV_MASK) ? -EINVAL : 0;
}
static struct xt_match tcpudp_mt_reg[] __read_mostly = {
{
.name = "tcp",
.family = NFPROTO_IPV4,
.checkentry = tcp_mt_check,
.match = tcp_mt,
.matchsize = sizeof(struct xt_tcp),
.proto = IPPROTO_TCP,
.me = THIS_MODULE,
},
{
.name = "tcp",
.family = NFPROTO_IPV6,
.checkentry = tcp_mt_check,
.match = tcp_mt,
.matchsize = sizeof(struct xt_tcp),
.proto = IPPROTO_TCP,
.me = THIS_MODULE,
},
{
.name = "udp",
.family = NFPROTO_IPV4,
.checkentry = udp_mt_check,
.match = udp_mt,
.matchsize = sizeof(struct xt_udp),
.proto = IPPROTO_UDP,
.me = THIS_MODULE,
},
{
.name = "udp",
.family = NFPROTO_IPV6,
.checkentry = udp_mt_check,
.match = udp_mt,
.matchsize = sizeof(struct xt_udp),
.proto = IPPROTO_UDP,
.me = THIS_MODULE,
},
{
.name = "udplite",
.family = NFPROTO_IPV4,
.checkentry = udp_mt_check,
.match = udp_mt,
.matchsize = sizeof(struct xt_udp),
.proto = IPPROTO_UDPLITE,
.me = THIS_MODULE,
},
{
.name = "udplite",
.family = NFPROTO_IPV6,
.checkentry = udp_mt_check,
.match = udp_mt,
.matchsize = sizeof(struct xt_udp),
.proto = IPPROTO_UDPLITE,
.me = THIS_MODULE,
},
};
static int __init tcpudp_mt_init(void)
{
return xt_register_matches(tcpudp_mt_reg, ARRAY_SIZE(tcpudp_mt_reg));
}
static void __exit tcpudp_mt_exit(void)
{
xt_unregister_matches(tcpudp_mt_reg, ARRAY_SIZE(tcpudp_mt_reg));
}
module_init(tcpudp_mt_init);
module_exit(tcpudp_mt_exit);
| gpl-2.0 |
netmaxt3r/omni_kernel_sony_msm8974ab | drivers/block/paride/on26.c | 15553 | 7532 | /*
on26.c (c) 1997-8 Grant R. Guenther <grant@torque.net>
Under the terms of the GNU General Public License.
on26.c is a low-level protocol driver for the
OnSpec 90c26 parallel to IDE adapter chip.
*/
/* Changes:
1.01 GRG 1998.05.06 init_proto, release_proto
1.02 GRG 1998.09.23 updates for the -E rev chip
1.03 GRG 1998.12.14 fix for slave drives
1.04 GRG 1998.12.20 yet another bug fix
*/
#define ON26_VERSION "1.04"
#include <linux/module.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/wait.h>
#include <asm/io.h>
#include "paride.h"
/* mode codes: 0 nybble reads, 8-bit writes
1 8-bit reads and writes
2 8-bit EPP mode
3 EPP-16
4 EPP-32
*/
#define j44(a,b) (((a>>4)&0x0f)|(b&0xf0))
#define P1 w2(5);w2(0xd);w2(5);w2(0xd);w2(5);w2(4);
#define P2 w2(5);w2(7);w2(5);w2(4);
/* cont = 0 - access the IDE register file
cont = 1 - access the IDE command set
*/
static int on26_read_regr( PIA *pi, int cont, int regr )
{ int a, b, r;
r = (regr<<2) + 1 + cont;
switch (pi->mode) {
case 0: w0(1); P1; w0(r); P2; w0(0); P1;
w2(6); a = r1(); w2(4);
w2(6); b = r1(); w2(4);
w2(6); w2(4); w2(6); w2(4);
return j44(a,b);
case 1: w0(1); P1; w0(r); P2; w0(0); P1;
w2(0x26); a = r0(); w2(4); w2(0x26); w2(4);
return a;
case 2:
case 3:
case 4: w3(1); w3(1); w2(5); w4(r); w2(4);
w3(0); w3(0); w2(0x24); a = r4(); w2(4);
w2(0x24); (void)r4(); w2(4);
return a;
}
return -1;
}
static void on26_write_regr( PIA *pi, int cont, int regr, int val )
{ int r;
r = (regr<<2) + 1 + cont;
switch (pi->mode) {
case 0:
case 1: w0(1); P1; w0(r); P2; w0(0); P1;
w0(val); P2; w0(val); P2;
break;
case 2:
case 3:
case 4: w3(1); w3(1); w2(5); w4(r); w2(4);
w3(0); w3(0);
w2(5); w4(val); w2(4);
w2(5); w4(val); w2(4);
break;
}
}
#define CCP(x) w0(0xfe);w0(0xaa);w0(0x55);w0(0);w0(0xff);\
w0(0x87);w0(0x78);w0(x);w2(4);w2(5);w2(4);w0(0xff);
static void on26_connect ( PIA *pi )
{ int x;
pi->saved_r0 = r0();
pi->saved_r2 = r2();
CCP(0x20);
x = 8; if (pi->mode) x = 9;
w0(2); P1; w0(8); P2;
w0(2); P1; w0(x); P2;
}
static void on26_disconnect ( PIA *pi )
{ if (pi->mode >= 2) { w3(4); w3(4); w3(4); w3(4); }
else { w0(4); P1; w0(4); P1; }
CCP(0x30);
w0(pi->saved_r0);
w2(pi->saved_r2);
}
#define RESET_WAIT 200
static int on26_test_port( PIA *pi) /* hard reset */
{ int i, m, d, x=0, y=0;
pi->saved_r0 = r0();
pi->saved_r2 = r2();
d = pi->delay;
m = pi->mode;
pi->delay = 5;
pi->mode = 0;
w2(0xc);
CCP(0x30); CCP(0);
w0(0xfe);w0(0xaa);w0(0x55);w0(0);w0(0xff);
i = ((r1() & 0xf0) << 4); w0(0x87);
i |= (r1() & 0xf0); w0(0x78);
w0(0x20);w2(4);w2(5);
i |= ((r1() & 0xf0) >> 4);
w2(4);w0(0xff);
if (i == 0xb5f) {
w0(2); P1; w0(0); P2;
w0(3); P1; w0(0); P2;
w0(2); P1; w0(8); P2; udelay(100);
w0(2); P1; w0(0xa); P2; udelay(100);
w0(2); P1; w0(8); P2; udelay(1000);
on26_write_regr(pi,0,6,0xa0);
for (i=0;i<RESET_WAIT;i++) {
on26_write_regr(pi,0,6,0xa0);
x = on26_read_regr(pi,0,7);
on26_write_regr(pi,0,6,0xb0);
y = on26_read_regr(pi,0,7);
if (!((x&0x80)||(y&0x80))) break;
mdelay(100);
}
if (i == RESET_WAIT)
printk("on26: Device reset failed (%x,%x)\n",x,y);
w0(4); P1; w0(4); P1;
}
CCP(0x30);
pi->delay = d;
pi->mode = m;
w0(pi->saved_r0);
w2(pi->saved_r2);
return 5;
}
static void on26_read_block( PIA *pi, char * buf, int count )
{ int k, a, b;
switch (pi->mode) {
case 0: w0(1); P1; w0(1); P2; w0(2); P1; w0(0x18); P2; w0(0); P1;
udelay(10);
for (k=0;k<count;k++) {
w2(6); a = r1();
w2(4); b = r1();
buf[k] = j44(a,b);
}
w0(2); P1; w0(8); P2;
break;
case 1: w0(1); P1; w0(1); P2; w0(2); P1; w0(0x19); P2; w0(0); P1;
udelay(10);
for (k=0;k<count/2;k++) {
w2(0x26); buf[2*k] = r0();
w2(0x24); buf[2*k+1] = r0();
}
w0(2); P1; w0(9); P2;
break;
case 2: w3(1); w3(1); w2(5); w4(1); w2(4);
w3(0); w3(0); w2(0x24);
udelay(10);
for (k=0;k<count;k++) buf[k] = r4();
w2(4);
break;
case 3: w3(1); w3(1); w2(5); w4(1); w2(4);
w3(0); w3(0); w2(0x24);
udelay(10);
for (k=0;k<count/2;k++) ((u16 *)buf)[k] = r4w();
w2(4);
break;
case 4: w3(1); w3(1); w2(5); w4(1); w2(4);
w3(0); w3(0); w2(0x24);
udelay(10);
for (k=0;k<count/4;k++) ((u32 *)buf)[k] = r4l();
w2(4);
break;
}
}
static void on26_write_block( PIA *pi, char * buf, int count )
{ int k;
switch (pi->mode) {
case 0:
case 1: w0(1); P1; w0(1); P2;
w0(2); P1; w0(0x18+pi->mode); P2; w0(0); P1;
udelay(10);
for (k=0;k<count/2;k++) {
w2(5); w0(buf[2*k]);
w2(7); w0(buf[2*k+1]);
}
w2(5); w2(4);
w0(2); P1; w0(8+pi->mode); P2;
break;
case 2: w3(1); w3(1); w2(5); w4(1); w2(4);
w3(0); w3(0); w2(0xc5);
udelay(10);
for (k=0;k<count;k++) w4(buf[k]);
w2(0xc4);
break;
case 3: w3(1); w3(1); w2(5); w4(1); w2(4);
w3(0); w3(0); w2(0xc5);
udelay(10);
for (k=0;k<count/2;k++) w4w(((u16 *)buf)[k]);
w2(0xc4);
break;
case 4: w3(1); w3(1); w2(5); w4(1); w2(4);
w3(0); w3(0); w2(0xc5);
udelay(10);
for (k=0;k<count/4;k++) w4l(((u32 *)buf)[k]);
w2(0xc4);
break;
}
}
static void on26_log_adapter( PIA *pi, char * scratch, int verbose )
{ char *mode_string[5] = {"4-bit","8-bit","EPP-8",
"EPP-16","EPP-32"};
printk("%s: on26 %s, OnSpec 90c26 at 0x%x, ",
pi->device,ON26_VERSION,pi->port);
printk("mode %d (%s), delay %d\n",pi->mode,
mode_string[pi->mode],pi->delay);
}
static struct pi_protocol on26 = {
.owner = THIS_MODULE,
.name = "on26",
.max_mode = 5,
.epp_first = 2,
.default_delay = 1,
.max_units = 1,
.write_regr = on26_write_regr,
.read_regr = on26_read_regr,
.write_block = on26_write_block,
.read_block = on26_read_block,
.connect = on26_connect,
.disconnect = on26_disconnect,
.test_port = on26_test_port,
.log_adapter = on26_log_adapter,
};
static int __init on26_init(void)
{
return paride_register(&on26);
}
static void __exit on26_exit(void)
{
paride_unregister(&on26);
}
MODULE_LICENSE("GPL");
module_init(on26_init)
module_exit(on26_exit)
| gpl-2.0 |
javelinanddart/android_kernel_samsung_msm8974 | drivers/video/fb_sys_fops.c | 15553 | 2075 | /*
* linux/drivers/video/fb_sys_read.c - Generic file operations where
* framebuffer is in system RAM
*
* Copyright (C) 2007 Antonino Daplas <adaplas@pol.net>
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive
* for more details.
*
*/
#include <linux/fb.h>
#include <linux/module.h>
#include <linux/uaccess.h>
ssize_t fb_sys_read(struct fb_info *info, char __user *buf, size_t count,
loff_t *ppos)
{
unsigned long p = *ppos;
void *src;
int err = 0;
unsigned long total_size;
if (info->state != FBINFO_STATE_RUNNING)
return -EPERM;
total_size = info->screen_size;
if (total_size == 0)
total_size = info->fix.smem_len;
if (p >= total_size)
return 0;
if (count >= total_size)
count = total_size;
if (count + p > total_size)
count = total_size - p;
src = (void __force *)(info->screen_base + p);
if (info->fbops->fb_sync)
info->fbops->fb_sync(info);
if (copy_to_user(buf, src, count))
err = -EFAULT;
if (!err)
*ppos += count;
return (err) ? err : count;
}
EXPORT_SYMBOL_GPL(fb_sys_read);
ssize_t fb_sys_write(struct fb_info *info, const char __user *buf,
size_t count, loff_t *ppos)
{
unsigned long p = *ppos;
void *dst;
int err = 0;
unsigned long total_size;
if (info->state != FBINFO_STATE_RUNNING)
return -EPERM;
total_size = info->screen_size;
if (total_size == 0)
total_size = info->fix.smem_len;
if (p > total_size)
return -EFBIG;
if (count > total_size) {
err = -EFBIG;
count = total_size;
}
if (count + p > total_size) {
if (!err)
err = -ENOSPC;
count = total_size - p;
}
dst = (void __force *) (info->screen_base + p);
if (info->fbops->fb_sync)
info->fbops->fb_sync(info);
if (copy_from_user(dst, buf, count))
err = -EFAULT;
if (!err)
*ppos += count;
return (err) ? err : count;
}
EXPORT_SYMBOL_GPL(fb_sys_write);
MODULE_AUTHOR("Antonino Daplas <adaplas@pol.net>");
MODULE_DESCRIPTION("Generic file read (fb in system RAM)");
MODULE_LICENSE("GPL");
| gpl-2.0 |
chui101/kernel-lge-msm7x30 | drivers/block/paride/on26.c | 15553 | 7532 | /*
on26.c (c) 1997-8 Grant R. Guenther <grant@torque.net>
Under the terms of the GNU General Public License.
on26.c is a low-level protocol driver for the
OnSpec 90c26 parallel to IDE adapter chip.
*/
/* Changes:
1.01 GRG 1998.05.06 init_proto, release_proto
1.02 GRG 1998.09.23 updates for the -E rev chip
1.03 GRG 1998.12.14 fix for slave drives
1.04 GRG 1998.12.20 yet another bug fix
*/
#define ON26_VERSION "1.04"
#include <linux/module.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/wait.h>
#include <asm/io.h>
#include "paride.h"
/* mode codes: 0 nybble reads, 8-bit writes
1 8-bit reads and writes
2 8-bit EPP mode
3 EPP-16
4 EPP-32
*/
#define j44(a,b) (((a>>4)&0x0f)|(b&0xf0))
#define P1 w2(5);w2(0xd);w2(5);w2(0xd);w2(5);w2(4);
#define P2 w2(5);w2(7);w2(5);w2(4);
/* cont = 0 - access the IDE register file
cont = 1 - access the IDE command set
*/
static int on26_read_regr( PIA *pi, int cont, int regr )
{ int a, b, r;
r = (regr<<2) + 1 + cont;
switch (pi->mode) {
case 0: w0(1); P1; w0(r); P2; w0(0); P1;
w2(6); a = r1(); w2(4);
w2(6); b = r1(); w2(4);
w2(6); w2(4); w2(6); w2(4);
return j44(a,b);
case 1: w0(1); P1; w0(r); P2; w0(0); P1;
w2(0x26); a = r0(); w2(4); w2(0x26); w2(4);
return a;
case 2:
case 3:
case 4: w3(1); w3(1); w2(5); w4(r); w2(4);
w3(0); w3(0); w2(0x24); a = r4(); w2(4);
w2(0x24); (void)r4(); w2(4);
return a;
}
return -1;
}
static void on26_write_regr( PIA *pi, int cont, int regr, int val )
{ int r;
r = (regr<<2) + 1 + cont;
switch (pi->mode) {
case 0:
case 1: w0(1); P1; w0(r); P2; w0(0); P1;
w0(val); P2; w0(val); P2;
break;
case 2:
case 3:
case 4: w3(1); w3(1); w2(5); w4(r); w2(4);
w3(0); w3(0);
w2(5); w4(val); w2(4);
w2(5); w4(val); w2(4);
break;
}
}
#define CCP(x) w0(0xfe);w0(0xaa);w0(0x55);w0(0);w0(0xff);\
w0(0x87);w0(0x78);w0(x);w2(4);w2(5);w2(4);w0(0xff);
static void on26_connect ( PIA *pi )
{ int x;
pi->saved_r0 = r0();
pi->saved_r2 = r2();
CCP(0x20);
x = 8; if (pi->mode) x = 9;
w0(2); P1; w0(8); P2;
w0(2); P1; w0(x); P2;
}
static void on26_disconnect ( PIA *pi )
{ if (pi->mode >= 2) { w3(4); w3(4); w3(4); w3(4); }
else { w0(4); P1; w0(4); P1; }
CCP(0x30);
w0(pi->saved_r0);
w2(pi->saved_r2);
}
#define RESET_WAIT 200
static int on26_test_port( PIA *pi) /* hard reset */
{ int i, m, d, x=0, y=0;
pi->saved_r0 = r0();
pi->saved_r2 = r2();
d = pi->delay;
m = pi->mode;
pi->delay = 5;
pi->mode = 0;
w2(0xc);
CCP(0x30); CCP(0);
w0(0xfe);w0(0xaa);w0(0x55);w0(0);w0(0xff);
i = ((r1() & 0xf0) << 4); w0(0x87);
i |= (r1() & 0xf0); w0(0x78);
w0(0x20);w2(4);w2(5);
i |= ((r1() & 0xf0) >> 4);
w2(4);w0(0xff);
if (i == 0xb5f) {
w0(2); P1; w0(0); P2;
w0(3); P1; w0(0); P2;
w0(2); P1; w0(8); P2; udelay(100);
w0(2); P1; w0(0xa); P2; udelay(100);
w0(2); P1; w0(8); P2; udelay(1000);
on26_write_regr(pi,0,6,0xa0);
for (i=0;i<RESET_WAIT;i++) {
on26_write_regr(pi,0,6,0xa0);
x = on26_read_regr(pi,0,7);
on26_write_regr(pi,0,6,0xb0);
y = on26_read_regr(pi,0,7);
if (!((x&0x80)||(y&0x80))) break;
mdelay(100);
}
if (i == RESET_WAIT)
printk("on26: Device reset failed (%x,%x)\n",x,y);
w0(4); P1; w0(4); P1;
}
CCP(0x30);
pi->delay = d;
pi->mode = m;
w0(pi->saved_r0);
w2(pi->saved_r2);
return 5;
}
static void on26_read_block( PIA *pi, char * buf, int count )
{ int k, a, b;
switch (pi->mode) {
case 0: w0(1); P1; w0(1); P2; w0(2); P1; w0(0x18); P2; w0(0); P1;
udelay(10);
for (k=0;k<count;k++) {
w2(6); a = r1();
w2(4); b = r1();
buf[k] = j44(a,b);
}
w0(2); P1; w0(8); P2;
break;
case 1: w0(1); P1; w0(1); P2; w0(2); P1; w0(0x19); P2; w0(0); P1;
udelay(10);
for (k=0;k<count/2;k++) {
w2(0x26); buf[2*k] = r0();
w2(0x24); buf[2*k+1] = r0();
}
w0(2); P1; w0(9); P2;
break;
case 2: w3(1); w3(1); w2(5); w4(1); w2(4);
w3(0); w3(0); w2(0x24);
udelay(10);
for (k=0;k<count;k++) buf[k] = r4();
w2(4);
break;
case 3: w3(1); w3(1); w2(5); w4(1); w2(4);
w3(0); w3(0); w2(0x24);
udelay(10);
for (k=0;k<count/2;k++) ((u16 *)buf)[k] = r4w();
w2(4);
break;
case 4: w3(1); w3(1); w2(5); w4(1); w2(4);
w3(0); w3(0); w2(0x24);
udelay(10);
for (k=0;k<count/4;k++) ((u32 *)buf)[k] = r4l();
w2(4);
break;
}
}
static void on26_write_block( PIA *pi, char * buf, int count )
{ int k;
switch (pi->mode) {
case 0:
case 1: w0(1); P1; w0(1); P2;
w0(2); P1; w0(0x18+pi->mode); P2; w0(0); P1;
udelay(10);
for (k=0;k<count/2;k++) {
w2(5); w0(buf[2*k]);
w2(7); w0(buf[2*k+1]);
}
w2(5); w2(4);
w0(2); P1; w0(8+pi->mode); P2;
break;
case 2: w3(1); w3(1); w2(5); w4(1); w2(4);
w3(0); w3(0); w2(0xc5);
udelay(10);
for (k=0;k<count;k++) w4(buf[k]);
w2(0xc4);
break;
case 3: w3(1); w3(1); w2(5); w4(1); w2(4);
w3(0); w3(0); w2(0xc5);
udelay(10);
for (k=0;k<count/2;k++) w4w(((u16 *)buf)[k]);
w2(0xc4);
break;
case 4: w3(1); w3(1); w2(5); w4(1); w2(4);
w3(0); w3(0); w2(0xc5);
udelay(10);
for (k=0;k<count/4;k++) w4l(((u32 *)buf)[k]);
w2(0xc4);
break;
}
}
static void on26_log_adapter( PIA *pi, char * scratch, int verbose )
{ char *mode_string[5] = {"4-bit","8-bit","EPP-8",
"EPP-16","EPP-32"};
printk("%s: on26 %s, OnSpec 90c26 at 0x%x, ",
pi->device,ON26_VERSION,pi->port);
printk("mode %d (%s), delay %d\n",pi->mode,
mode_string[pi->mode],pi->delay);
}
static struct pi_protocol on26 = {
.owner = THIS_MODULE,
.name = "on26",
.max_mode = 5,
.epp_first = 2,
.default_delay = 1,
.max_units = 1,
.write_regr = on26_write_regr,
.read_regr = on26_read_regr,
.write_block = on26_write_block,
.read_block = on26_read_block,
.connect = on26_connect,
.disconnect = on26_disconnect,
.test_port = on26_test_port,
.log_adapter = on26_log_adapter,
};
static int __init on26_init(void)
{
return paride_register(&on26);
}
static void __exit on26_exit(void)
{
paride_unregister(&on26);
}
MODULE_LICENSE("GPL");
module_init(on26_init)
module_exit(on26_exit)
| gpl-2.0 |
TeamFahQ/kernel_linux-4.2.6-lz | sound/soc/codecs/ak4671.c | 194 | 22490 | /*
* ak4671.c -- audio driver for AK4671
*
* Copyright (C) 2009 Samsung Electronics Co.Ltd
* Author: Joonyoung Shim <jy0922.shim@samsung.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/i2c.h>
#include <linux/delay.h>
#include <linux/regmap.h>
#include <linux/slab.h>
#include <sound/soc.h>
#include <sound/initval.h>
#include <sound/tlv.h>
#include "ak4671.h"
/* ak4671 register cache & default register settings */
static const struct reg_default ak4671_reg_defaults[] = {
{ 0x00, 0x00 }, /* AK4671_AD_DA_POWER_MANAGEMENT (0x00) */
{ 0x01, 0xf6 }, /* AK4671_PLL_MODE_SELECT0 (0x01) */
{ 0x02, 0x00 }, /* AK4671_PLL_MODE_SELECT1 (0x02) */
{ 0x03, 0x02 }, /* AK4671_FORMAT_SELECT (0x03) */
{ 0x04, 0x00 }, /* AK4671_MIC_SIGNAL_SELECT (0x04) */
{ 0x05, 0x55 }, /* AK4671_MIC_AMP_GAIN (0x05) */
{ 0x06, 0x00 }, /* AK4671_MIXING_POWER_MANAGEMENT0 (0x06) */
{ 0x07, 0x00 }, /* AK4671_MIXING_POWER_MANAGEMENT1 (0x07) */
{ 0x08, 0xb5 }, /* AK4671_OUTPUT_VOLUME_CONTROL (0x08) */
{ 0x09, 0x00 }, /* AK4671_LOUT1_SIGNAL_SELECT (0x09) */
{ 0x0a, 0x00 }, /* AK4671_ROUT1_SIGNAL_SELECT (0x0a) */
{ 0x0b, 0x00 }, /* AK4671_LOUT2_SIGNAL_SELECT (0x0b) */
{ 0x0c, 0x00 }, /* AK4671_ROUT2_SIGNAL_SELECT (0x0c) */
{ 0x0d, 0x00 }, /* AK4671_LOUT3_SIGNAL_SELECT (0x0d) */
{ 0x0e, 0x00 }, /* AK4671_ROUT3_SIGNAL_SELECT (0x0e) */
{ 0x0f, 0x00 }, /* AK4671_LOUT1_POWER_MANAGERMENT (0x0f) */
{ 0x10, 0x00 }, /* AK4671_LOUT2_POWER_MANAGERMENT (0x10) */
{ 0x11, 0x80 }, /* AK4671_LOUT3_POWER_MANAGERMENT (0x11) */
{ 0x12, 0x91 }, /* AK4671_LCH_INPUT_VOLUME_CONTROL (0x12) */
{ 0x13, 0x91 }, /* AK4671_RCH_INPUT_VOLUME_CONTROL (0x13) */
{ 0x14, 0xe1 }, /* AK4671_ALC_REFERENCE_SELECT (0x14) */
{ 0x15, 0x00 }, /* AK4671_DIGITAL_MIXING_CONTROL (0x15) */
{ 0x16, 0x00 }, /* AK4671_ALC_TIMER_SELECT (0x16) */
{ 0x17, 0x00 }, /* AK4671_ALC_MODE_CONTROL (0x17) */
{ 0x18, 0x02 }, /* AK4671_MODE_CONTROL1 (0x18) */
{ 0x19, 0x01 }, /* AK4671_MODE_CONTROL2 (0x19) */
{ 0x1a, 0x18 }, /* AK4671_LCH_OUTPUT_VOLUME_CONTROL (0x1a) */
{ 0x1b, 0x18 }, /* AK4671_RCH_OUTPUT_VOLUME_CONTROL (0x1b) */
{ 0x1c, 0x00 }, /* AK4671_SIDETONE_A_CONTROL (0x1c) */
{ 0x1d, 0x02 }, /* AK4671_DIGITAL_FILTER_SELECT (0x1d) */
{ 0x1e, 0x00 }, /* AK4671_FIL3_COEFFICIENT0 (0x1e) */
{ 0x1f, 0x00 }, /* AK4671_FIL3_COEFFICIENT1 (0x1f) */
{ 0x20, 0x00 }, /* AK4671_FIL3_COEFFICIENT2 (0x20) */
{ 0x21, 0x00 }, /* AK4671_FIL3_COEFFICIENT3 (0x21) */
{ 0x22, 0x00 }, /* AK4671_EQ_COEFFICIENT0 (0x22) */
{ 0x23, 0x00 }, /* AK4671_EQ_COEFFICIENT1 (0x23) */
{ 0x24, 0x00 }, /* AK4671_EQ_COEFFICIENT2 (0x24) */
{ 0x25, 0x00 }, /* AK4671_EQ_COEFFICIENT3 (0x25) */
{ 0x26, 0x00 }, /* AK4671_EQ_COEFFICIENT4 (0x26) */
{ 0x27, 0x00 }, /* AK4671_EQ_COEFFICIENT5 (0x27) */
{ 0x28, 0xa9 }, /* AK4671_FIL1_COEFFICIENT0 (0x28) */
{ 0x29, 0x1f }, /* AK4671_FIL1_COEFFICIENT1 (0x29) */
{ 0x2a, 0xad }, /* AK4671_FIL1_COEFFICIENT2 (0x2a) */
{ 0x2b, 0x20 }, /* AK4671_FIL1_COEFFICIENT3 (0x2b) */
{ 0x2c, 0x00 }, /* AK4671_FIL2_COEFFICIENT0 (0x2c) */
{ 0x2d, 0x00 }, /* AK4671_FIL2_COEFFICIENT1 (0x2d) */
{ 0x2e, 0x00 }, /* AK4671_FIL2_COEFFICIENT2 (0x2e) */
{ 0x2f, 0x00 }, /* AK4671_FIL2_COEFFICIENT3 (0x2f) */
{ 0x30, 0x00 }, /* AK4671_DIGITAL_FILTER_SELECT2 (0x30) */
{ 0x32, 0x00 }, /* AK4671_E1_COEFFICIENT0 (0x32) */
{ 0x33, 0x00 }, /* AK4671_E1_COEFFICIENT1 (0x33) */
{ 0x34, 0x00 }, /* AK4671_E1_COEFFICIENT2 (0x34) */
{ 0x35, 0x00 }, /* AK4671_E1_COEFFICIENT3 (0x35) */
{ 0x36, 0x00 }, /* AK4671_E1_COEFFICIENT4 (0x36) */
{ 0x37, 0x00 }, /* AK4671_E1_COEFFICIENT5 (0x37) */
{ 0x38, 0x00 }, /* AK4671_E2_COEFFICIENT0 (0x38) */
{ 0x39, 0x00 }, /* AK4671_E2_COEFFICIENT1 (0x39) */
{ 0x3a, 0x00 }, /* AK4671_E2_COEFFICIENT2 (0x3a) */
{ 0x3b, 0x00 }, /* AK4671_E2_COEFFICIENT3 (0x3b) */
{ 0x3c, 0x00 }, /* AK4671_E2_COEFFICIENT4 (0x3c) */
{ 0x3d, 0x00 }, /* AK4671_E2_COEFFICIENT5 (0x3d) */
{ 0x3e, 0x00 }, /* AK4671_E3_COEFFICIENT0 (0x3e) */
{ 0x3f, 0x00 }, /* AK4671_E3_COEFFICIENT1 (0x3f) */
{ 0x40, 0x00 }, /* AK4671_E3_COEFFICIENT2 (0x40) */
{ 0x41, 0x00 }, /* AK4671_E3_COEFFICIENT3 (0x41) */
{ 0x42, 0x00 }, /* AK4671_E3_COEFFICIENT4 (0x42) */
{ 0x43, 0x00 }, /* AK4671_E3_COEFFICIENT5 (0x43) */
{ 0x44, 0x00 }, /* AK4671_E4_COEFFICIENT0 (0x44) */
{ 0x45, 0x00 }, /* AK4671_E4_COEFFICIENT1 (0x45) */
{ 0x46, 0x00 }, /* AK4671_E4_COEFFICIENT2 (0x46) */
{ 0x47, 0x00 }, /* AK4671_E4_COEFFICIENT3 (0x47) */
{ 0x48, 0x00 }, /* AK4671_E4_COEFFICIENT4 (0x48) */
{ 0x49, 0x00 }, /* AK4671_E4_COEFFICIENT5 (0x49) */
{ 0x4a, 0x00 }, /* AK4671_E5_COEFFICIENT0 (0x4a) */
{ 0x4b, 0x00 }, /* AK4671_E5_COEFFICIENT1 (0x4b) */
{ 0x4c, 0x00 }, /* AK4671_E5_COEFFICIENT2 (0x4c) */
{ 0x4d, 0x00 }, /* AK4671_E5_COEFFICIENT3 (0x4d) */
{ 0x4e, 0x00 }, /* AK4671_E5_COEFFICIENT4 (0x4e) */
{ 0x4f, 0x00 }, /* AK4671_E5_COEFFICIENT5 (0x4f) */
{ 0x50, 0x88 }, /* AK4671_EQ_CONTROL_250HZ_100HZ (0x50) */
{ 0x51, 0x88 }, /* AK4671_EQ_CONTROL_3500HZ_1KHZ (0x51) */
{ 0x52, 0x08 }, /* AK4671_EQ_CONTRO_10KHZ (0x52) */
{ 0x53, 0x00 }, /* AK4671_PCM_IF_CONTROL0 (0x53) */
{ 0x54, 0x00 }, /* AK4671_PCM_IF_CONTROL1 (0x54) */
{ 0x55, 0x00 }, /* AK4671_PCM_IF_CONTROL2 (0x55) */
{ 0x56, 0x18 }, /* AK4671_DIGITAL_VOLUME_B_CONTROL (0x56) */
{ 0x57, 0x18 }, /* AK4671_DIGITAL_VOLUME_C_CONTROL (0x57) */
{ 0x58, 0x00 }, /* AK4671_SIDETONE_VOLUME_CONTROL (0x58) */
{ 0x59, 0x00 }, /* AK4671_DIGITAL_MIXING_CONTROL2 (0x59) */
{ 0x5a, 0x00 }, /* AK4671_SAR_ADC_CONTROL (0x5a) */
};
/*
* LOUT1/ROUT1 output volume control:
* from -24 to 6 dB in 6 dB steps (mute instead of -30 dB)
*/
static DECLARE_TLV_DB_SCALE(out1_tlv, -3000, 600, 1);
/*
* LOUT2/ROUT2 output volume control:
* from -33 to 6 dB in 3 dB steps (mute instead of -33 dB)
*/
static DECLARE_TLV_DB_SCALE(out2_tlv, -3300, 300, 1);
/*
* LOUT3/ROUT3 output volume control:
* from -6 to 3 dB in 3 dB steps
*/
static DECLARE_TLV_DB_SCALE(out3_tlv, -600, 300, 0);
/*
* Mic amp gain control:
* from -15 to 30 dB in 3 dB steps
* REVISIT: The actual min value(0x01) is -12 dB and the reg value 0x00 is not
* available
*/
static DECLARE_TLV_DB_SCALE(mic_amp_tlv, -1500, 300, 0);
static const struct snd_kcontrol_new ak4671_snd_controls[] = {
/* Common playback gain controls */
SOC_SINGLE_TLV("Line Output1 Playback Volume",
AK4671_OUTPUT_VOLUME_CONTROL, 0, 0x6, 0, out1_tlv),
SOC_SINGLE_TLV("Headphone Output2 Playback Volume",
AK4671_OUTPUT_VOLUME_CONTROL, 4, 0xd, 0, out2_tlv),
SOC_SINGLE_TLV("Line Output3 Playback Volume",
AK4671_LOUT3_POWER_MANAGERMENT, 6, 0x3, 0, out3_tlv),
/* Common capture gain controls */
SOC_DOUBLE_TLV("Mic Amp Capture Volume",
AK4671_MIC_AMP_GAIN, 0, 4, 0xf, 0, mic_amp_tlv),
};
/* event handlers */
static int ak4671_out2_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
switch (event) {
case SND_SOC_DAPM_POST_PMU:
snd_soc_update_bits(codec, AK4671_LOUT2_POWER_MANAGERMENT,
AK4671_MUTEN, AK4671_MUTEN);
break;
case SND_SOC_DAPM_PRE_PMD:
snd_soc_update_bits(codec, AK4671_LOUT2_POWER_MANAGERMENT,
AK4671_MUTEN, 0);
break;
}
return 0;
}
/* Output Mixers */
static const struct snd_kcontrol_new ak4671_lout1_mixer_controls[] = {
SOC_DAPM_SINGLE("DACL", AK4671_LOUT1_SIGNAL_SELECT, 0, 1, 0),
SOC_DAPM_SINGLE("LINL1", AK4671_LOUT1_SIGNAL_SELECT, 1, 1, 0),
SOC_DAPM_SINGLE("LINL2", AK4671_LOUT1_SIGNAL_SELECT, 2, 1, 0),
SOC_DAPM_SINGLE("LINL3", AK4671_LOUT1_SIGNAL_SELECT, 3, 1, 0),
SOC_DAPM_SINGLE("LINL4", AK4671_LOUT1_SIGNAL_SELECT, 4, 1, 0),
SOC_DAPM_SINGLE("LOOPL", AK4671_LOUT1_SIGNAL_SELECT, 5, 1, 0),
};
static const struct snd_kcontrol_new ak4671_rout1_mixer_controls[] = {
SOC_DAPM_SINGLE("DACR", AK4671_ROUT1_SIGNAL_SELECT, 0, 1, 0),
SOC_DAPM_SINGLE("RINR1", AK4671_ROUT1_SIGNAL_SELECT, 1, 1, 0),
SOC_DAPM_SINGLE("RINR2", AK4671_ROUT1_SIGNAL_SELECT, 2, 1, 0),
SOC_DAPM_SINGLE("RINR3", AK4671_ROUT1_SIGNAL_SELECT, 3, 1, 0),
SOC_DAPM_SINGLE("RINR4", AK4671_ROUT1_SIGNAL_SELECT, 4, 1, 0),
SOC_DAPM_SINGLE("LOOPR", AK4671_ROUT1_SIGNAL_SELECT, 5, 1, 0),
};
static const struct snd_kcontrol_new ak4671_lout2_mixer_controls[] = {
SOC_DAPM_SINGLE("DACHL", AK4671_LOUT2_SIGNAL_SELECT, 0, 1, 0),
SOC_DAPM_SINGLE("LINH1", AK4671_LOUT2_SIGNAL_SELECT, 1, 1, 0),
SOC_DAPM_SINGLE("LINH2", AK4671_LOUT2_SIGNAL_SELECT, 2, 1, 0),
SOC_DAPM_SINGLE("LINH3", AK4671_LOUT2_SIGNAL_SELECT, 3, 1, 0),
SOC_DAPM_SINGLE("LINH4", AK4671_LOUT2_SIGNAL_SELECT, 4, 1, 0),
SOC_DAPM_SINGLE("LOOPHL", AK4671_LOUT2_SIGNAL_SELECT, 5, 1, 0),
};
static const struct snd_kcontrol_new ak4671_rout2_mixer_controls[] = {
SOC_DAPM_SINGLE("DACHR", AK4671_ROUT2_SIGNAL_SELECT, 0, 1, 0),
SOC_DAPM_SINGLE("RINH1", AK4671_ROUT2_SIGNAL_SELECT, 1, 1, 0),
SOC_DAPM_SINGLE("RINH2", AK4671_ROUT2_SIGNAL_SELECT, 2, 1, 0),
SOC_DAPM_SINGLE("RINH3", AK4671_ROUT2_SIGNAL_SELECT, 3, 1, 0),
SOC_DAPM_SINGLE("RINH4", AK4671_ROUT2_SIGNAL_SELECT, 4, 1, 0),
SOC_DAPM_SINGLE("LOOPHR", AK4671_ROUT2_SIGNAL_SELECT, 5, 1, 0),
};
static const struct snd_kcontrol_new ak4671_lout3_mixer_controls[] = {
SOC_DAPM_SINGLE("DACSL", AK4671_LOUT3_SIGNAL_SELECT, 0, 1, 0),
SOC_DAPM_SINGLE("LINS1", AK4671_LOUT3_SIGNAL_SELECT, 1, 1, 0),
SOC_DAPM_SINGLE("LINS2", AK4671_LOUT3_SIGNAL_SELECT, 2, 1, 0),
SOC_DAPM_SINGLE("LINS3", AK4671_LOUT3_SIGNAL_SELECT, 3, 1, 0),
SOC_DAPM_SINGLE("LINS4", AK4671_LOUT3_SIGNAL_SELECT, 4, 1, 0),
SOC_DAPM_SINGLE("LOOPSL", AK4671_LOUT3_SIGNAL_SELECT, 5, 1, 0),
};
static const struct snd_kcontrol_new ak4671_rout3_mixer_controls[] = {
SOC_DAPM_SINGLE("DACSR", AK4671_ROUT3_SIGNAL_SELECT, 0, 1, 0),
SOC_DAPM_SINGLE("RINS1", AK4671_ROUT3_SIGNAL_SELECT, 1, 1, 0),
SOC_DAPM_SINGLE("RINS2", AK4671_ROUT3_SIGNAL_SELECT, 2, 1, 0),
SOC_DAPM_SINGLE("RINS3", AK4671_ROUT3_SIGNAL_SELECT, 3, 1, 0),
SOC_DAPM_SINGLE("RINS4", AK4671_ROUT3_SIGNAL_SELECT, 4, 1, 0),
SOC_DAPM_SINGLE("LOOPSR", AK4671_ROUT3_SIGNAL_SELECT, 5, 1, 0),
};
/* Input MUXs */
static const char *ak4671_lin_mux_texts[] =
{"LIN1", "LIN2", "LIN3", "LIN4"};
static SOC_ENUM_SINGLE_DECL(ak4671_lin_mux_enum,
AK4671_MIC_SIGNAL_SELECT, 0,
ak4671_lin_mux_texts);
static const struct snd_kcontrol_new ak4671_lin_mux_control =
SOC_DAPM_ENUM("Route", ak4671_lin_mux_enum);
static const char *ak4671_rin_mux_texts[] =
{"RIN1", "RIN2", "RIN3", "RIN4"};
static SOC_ENUM_SINGLE_DECL(ak4671_rin_mux_enum,
AK4671_MIC_SIGNAL_SELECT, 2,
ak4671_rin_mux_texts);
static const struct snd_kcontrol_new ak4671_rin_mux_control =
SOC_DAPM_ENUM("Route", ak4671_rin_mux_enum);
static const struct snd_soc_dapm_widget ak4671_dapm_widgets[] = {
/* Inputs */
SND_SOC_DAPM_INPUT("LIN1"),
SND_SOC_DAPM_INPUT("RIN1"),
SND_SOC_DAPM_INPUT("LIN2"),
SND_SOC_DAPM_INPUT("RIN2"),
SND_SOC_DAPM_INPUT("LIN3"),
SND_SOC_DAPM_INPUT("RIN3"),
SND_SOC_DAPM_INPUT("LIN4"),
SND_SOC_DAPM_INPUT("RIN4"),
/* Outputs */
SND_SOC_DAPM_OUTPUT("LOUT1"),
SND_SOC_DAPM_OUTPUT("ROUT1"),
SND_SOC_DAPM_OUTPUT("LOUT2"),
SND_SOC_DAPM_OUTPUT("ROUT2"),
SND_SOC_DAPM_OUTPUT("LOUT3"),
SND_SOC_DAPM_OUTPUT("ROUT3"),
/* DAC */
SND_SOC_DAPM_DAC("DAC Left", "Left HiFi Playback",
AK4671_AD_DA_POWER_MANAGEMENT, 6, 0),
SND_SOC_DAPM_DAC("DAC Right", "Right HiFi Playback",
AK4671_AD_DA_POWER_MANAGEMENT, 7, 0),
/* ADC */
SND_SOC_DAPM_ADC("ADC Left", "Left HiFi Capture",
AK4671_AD_DA_POWER_MANAGEMENT, 4, 0),
SND_SOC_DAPM_ADC("ADC Right", "Right HiFi Capture",
AK4671_AD_DA_POWER_MANAGEMENT, 5, 0),
/* PGA */
SND_SOC_DAPM_PGA("LOUT2 Mix Amp",
AK4671_LOUT2_POWER_MANAGERMENT, 5, 0, NULL, 0),
SND_SOC_DAPM_PGA("ROUT2 Mix Amp",
AK4671_LOUT2_POWER_MANAGERMENT, 6, 0, NULL, 0),
SND_SOC_DAPM_PGA("LIN1 Mixing Circuit",
AK4671_MIXING_POWER_MANAGEMENT1, 0, 0, NULL, 0),
SND_SOC_DAPM_PGA("RIN1 Mixing Circuit",
AK4671_MIXING_POWER_MANAGEMENT1, 1, 0, NULL, 0),
SND_SOC_DAPM_PGA("LIN2 Mixing Circuit",
AK4671_MIXING_POWER_MANAGEMENT1, 2, 0, NULL, 0),
SND_SOC_DAPM_PGA("RIN2 Mixing Circuit",
AK4671_MIXING_POWER_MANAGEMENT1, 3, 0, NULL, 0),
SND_SOC_DAPM_PGA("LIN3 Mixing Circuit",
AK4671_MIXING_POWER_MANAGEMENT1, 4, 0, NULL, 0),
SND_SOC_DAPM_PGA("RIN3 Mixing Circuit",
AK4671_MIXING_POWER_MANAGEMENT1, 5, 0, NULL, 0),
SND_SOC_DAPM_PGA("LIN4 Mixing Circuit",
AK4671_MIXING_POWER_MANAGEMENT1, 6, 0, NULL, 0),
SND_SOC_DAPM_PGA("RIN4 Mixing Circuit",
AK4671_MIXING_POWER_MANAGEMENT1, 7, 0, NULL, 0),
/* Output Mixers */
SND_SOC_DAPM_MIXER("LOUT1 Mixer", AK4671_LOUT1_POWER_MANAGERMENT, 0, 0,
&ak4671_lout1_mixer_controls[0],
ARRAY_SIZE(ak4671_lout1_mixer_controls)),
SND_SOC_DAPM_MIXER("ROUT1 Mixer", AK4671_LOUT1_POWER_MANAGERMENT, 1, 0,
&ak4671_rout1_mixer_controls[0],
ARRAY_SIZE(ak4671_rout1_mixer_controls)),
SND_SOC_DAPM_MIXER_E("LOUT2 Mixer", AK4671_LOUT2_POWER_MANAGERMENT,
0, 0, &ak4671_lout2_mixer_controls[0],
ARRAY_SIZE(ak4671_lout2_mixer_controls),
ak4671_out2_event,
SND_SOC_DAPM_POST_PMU|SND_SOC_DAPM_PRE_PMD),
SND_SOC_DAPM_MIXER_E("ROUT2 Mixer", AK4671_LOUT2_POWER_MANAGERMENT,
1, 0, &ak4671_rout2_mixer_controls[0],
ARRAY_SIZE(ak4671_rout2_mixer_controls),
ak4671_out2_event,
SND_SOC_DAPM_POST_PMU|SND_SOC_DAPM_PRE_PMD),
SND_SOC_DAPM_MIXER("LOUT3 Mixer", AK4671_LOUT3_POWER_MANAGERMENT, 0, 0,
&ak4671_lout3_mixer_controls[0],
ARRAY_SIZE(ak4671_lout3_mixer_controls)),
SND_SOC_DAPM_MIXER("ROUT3 Mixer", AK4671_LOUT3_POWER_MANAGERMENT, 1, 0,
&ak4671_rout3_mixer_controls[0],
ARRAY_SIZE(ak4671_rout3_mixer_controls)),
/* Input MUXs */
SND_SOC_DAPM_MUX("LIN MUX", AK4671_AD_DA_POWER_MANAGEMENT, 2, 0,
&ak4671_lin_mux_control),
SND_SOC_DAPM_MUX("RIN MUX", AK4671_AD_DA_POWER_MANAGEMENT, 3, 0,
&ak4671_rin_mux_control),
/* Mic Power */
SND_SOC_DAPM_MICBIAS("Mic Bias", AK4671_AD_DA_POWER_MANAGEMENT, 1, 0),
/* Supply */
SND_SOC_DAPM_SUPPLY("PMPLL", AK4671_PLL_MODE_SELECT1, 0, 0, NULL, 0),
};
static const struct snd_soc_dapm_route ak4671_intercon[] = {
{"DAC Left", NULL, "PMPLL"},
{"DAC Right", NULL, "PMPLL"},
{"ADC Left", NULL, "PMPLL"},
{"ADC Right", NULL, "PMPLL"},
/* Outputs */
{"LOUT1", NULL, "LOUT1 Mixer"},
{"ROUT1", NULL, "ROUT1 Mixer"},
{"LOUT2", NULL, "LOUT2 Mix Amp"},
{"ROUT2", NULL, "ROUT2 Mix Amp"},
{"LOUT3", NULL, "LOUT3 Mixer"},
{"ROUT3", NULL, "ROUT3 Mixer"},
{"LOUT1 Mixer", "DACL", "DAC Left"},
{"ROUT1 Mixer", "DACR", "DAC Right"},
{"LOUT2 Mixer", "DACHL", "DAC Left"},
{"ROUT2 Mixer", "DACHR", "DAC Right"},
{"LOUT2 Mix Amp", NULL, "LOUT2 Mixer"},
{"ROUT2 Mix Amp", NULL, "ROUT2 Mixer"},
{"LOUT3 Mixer", "DACSL", "DAC Left"},
{"ROUT3 Mixer", "DACSR", "DAC Right"},
/* Inputs */
{"LIN MUX", "LIN1", "LIN1"},
{"LIN MUX", "LIN2", "LIN2"},
{"LIN MUX", "LIN3", "LIN3"},
{"LIN MUX", "LIN4", "LIN4"},
{"RIN MUX", "RIN1", "RIN1"},
{"RIN MUX", "RIN2", "RIN2"},
{"RIN MUX", "RIN3", "RIN3"},
{"RIN MUX", "RIN4", "RIN4"},
{"LIN1", NULL, "Mic Bias"},
{"RIN1", NULL, "Mic Bias"},
{"LIN2", NULL, "Mic Bias"},
{"RIN2", NULL, "Mic Bias"},
{"ADC Left", NULL, "LIN MUX"},
{"ADC Right", NULL, "RIN MUX"},
/* Analog Loops */
{"LIN1 Mixing Circuit", NULL, "LIN1"},
{"RIN1 Mixing Circuit", NULL, "RIN1"},
{"LIN2 Mixing Circuit", NULL, "LIN2"},
{"RIN2 Mixing Circuit", NULL, "RIN2"},
{"LIN3 Mixing Circuit", NULL, "LIN3"},
{"RIN3 Mixing Circuit", NULL, "RIN3"},
{"LIN4 Mixing Circuit", NULL, "LIN4"},
{"RIN4 Mixing Circuit", NULL, "RIN4"},
{"LOUT1 Mixer", "LINL1", "LIN1 Mixing Circuit"},
{"ROUT1 Mixer", "RINR1", "RIN1 Mixing Circuit"},
{"LOUT2 Mixer", "LINH1", "LIN1 Mixing Circuit"},
{"ROUT2 Mixer", "RINH1", "RIN1 Mixing Circuit"},
{"LOUT3 Mixer", "LINS1", "LIN1 Mixing Circuit"},
{"ROUT3 Mixer", "RINS1", "RIN1 Mixing Circuit"},
{"LOUT1 Mixer", "LINL2", "LIN2 Mixing Circuit"},
{"ROUT1 Mixer", "RINR2", "RIN2 Mixing Circuit"},
{"LOUT2 Mixer", "LINH2", "LIN2 Mixing Circuit"},
{"ROUT2 Mixer", "RINH2", "RIN2 Mixing Circuit"},
{"LOUT3 Mixer", "LINS2", "LIN2 Mixing Circuit"},
{"ROUT3 Mixer", "RINS2", "RIN2 Mixing Circuit"},
{"LOUT1 Mixer", "LINL3", "LIN3 Mixing Circuit"},
{"ROUT1 Mixer", "RINR3", "RIN3 Mixing Circuit"},
{"LOUT2 Mixer", "LINH3", "LIN3 Mixing Circuit"},
{"ROUT2 Mixer", "RINH3", "RIN3 Mixing Circuit"},
{"LOUT3 Mixer", "LINS3", "LIN3 Mixing Circuit"},
{"ROUT3 Mixer", "RINS3", "RIN3 Mixing Circuit"},
{"LOUT1 Mixer", "LINL4", "LIN4 Mixing Circuit"},
{"ROUT1 Mixer", "RINR4", "RIN4 Mixing Circuit"},
{"LOUT2 Mixer", "LINH4", "LIN4 Mixing Circuit"},
{"ROUT2 Mixer", "RINH4", "RIN4 Mixing Circuit"},
{"LOUT3 Mixer", "LINS4", "LIN4 Mixing Circuit"},
{"ROUT3 Mixer", "RINS4", "RIN4 Mixing Circuit"},
};
static int ak4671_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
{
struct snd_soc_codec *codec = dai->codec;
u8 fs;
fs = snd_soc_read(codec, AK4671_PLL_MODE_SELECT0);
fs &= ~AK4671_FS;
switch (params_rate(params)) {
case 8000:
fs |= AK4671_FS_8KHZ;
break;
case 12000:
fs |= AK4671_FS_12KHZ;
break;
case 16000:
fs |= AK4671_FS_16KHZ;
break;
case 24000:
fs |= AK4671_FS_24KHZ;
break;
case 11025:
fs |= AK4671_FS_11_025KHZ;
break;
case 22050:
fs |= AK4671_FS_22_05KHZ;
break;
case 32000:
fs |= AK4671_FS_32KHZ;
break;
case 44100:
fs |= AK4671_FS_44_1KHZ;
break;
case 48000:
fs |= AK4671_FS_48KHZ;
break;
default:
return -EINVAL;
}
snd_soc_write(codec, AK4671_PLL_MODE_SELECT0, fs);
return 0;
}
static int ak4671_set_dai_sysclk(struct snd_soc_dai *dai, int clk_id,
unsigned int freq, int dir)
{
struct snd_soc_codec *codec = dai->codec;
u8 pll;
pll = snd_soc_read(codec, AK4671_PLL_MODE_SELECT0);
pll &= ~AK4671_PLL;
switch (freq) {
case 11289600:
pll |= AK4671_PLL_11_2896MHZ;
break;
case 12000000:
pll |= AK4671_PLL_12MHZ;
break;
case 12288000:
pll |= AK4671_PLL_12_288MHZ;
break;
case 13000000:
pll |= AK4671_PLL_13MHZ;
break;
case 13500000:
pll |= AK4671_PLL_13_5MHZ;
break;
case 19200000:
pll |= AK4671_PLL_19_2MHZ;
break;
case 24000000:
pll |= AK4671_PLL_24MHZ;
break;
case 26000000:
pll |= AK4671_PLL_26MHZ;
break;
case 27000000:
pll |= AK4671_PLL_27MHZ;
break;
default:
return -EINVAL;
}
snd_soc_write(codec, AK4671_PLL_MODE_SELECT0, pll);
return 0;
}
static int ak4671_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
{
struct snd_soc_codec *codec = dai->codec;
u8 mode;
u8 format;
/* set master/slave audio interface */
mode = snd_soc_read(codec, AK4671_PLL_MODE_SELECT1);
switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
case SND_SOC_DAIFMT_CBM_CFM:
mode |= AK4671_M_S;
break;
case SND_SOC_DAIFMT_CBM_CFS:
mode &= ~(AK4671_M_S);
break;
default:
return -EINVAL;
}
/* interface format */
format = snd_soc_read(codec, AK4671_FORMAT_SELECT);
format &= ~AK4671_DIF;
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_I2S:
format |= AK4671_DIF_I2S_MODE;
break;
case SND_SOC_DAIFMT_LEFT_J:
format |= AK4671_DIF_MSB_MODE;
break;
case SND_SOC_DAIFMT_DSP_A:
format |= AK4671_DIF_DSP_MODE;
format |= AK4671_BCKP;
format |= AK4671_MSBS;
break;
default:
return -EINVAL;
}
/* set mode and format */
snd_soc_write(codec, AK4671_PLL_MODE_SELECT1, mode);
snd_soc_write(codec, AK4671_FORMAT_SELECT, format);
return 0;
}
static int ak4671_set_bias_level(struct snd_soc_codec *codec,
enum snd_soc_bias_level level)
{
switch (level) {
case SND_SOC_BIAS_ON:
case SND_SOC_BIAS_PREPARE:
case SND_SOC_BIAS_STANDBY:
snd_soc_update_bits(codec, AK4671_AD_DA_POWER_MANAGEMENT,
AK4671_PMVCM, AK4671_PMVCM);
break;
case SND_SOC_BIAS_OFF:
snd_soc_write(codec, AK4671_AD_DA_POWER_MANAGEMENT, 0x00);
break;
}
return 0;
}
#define AK4671_RATES (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_11025 |\
SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_22050 |\
SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 |\
SNDRV_PCM_RATE_48000)
#define AK4671_FORMATS SNDRV_PCM_FMTBIT_S16_LE
static const struct snd_soc_dai_ops ak4671_dai_ops = {
.hw_params = ak4671_hw_params,
.set_sysclk = ak4671_set_dai_sysclk,
.set_fmt = ak4671_set_dai_fmt,
};
static struct snd_soc_dai_driver ak4671_dai = {
.name = "ak4671-hifi",
.playback = {
.stream_name = "Playback",
.channels_min = 1,
.channels_max = 2,
.rates = AK4671_RATES,
.formats = AK4671_FORMATS,},
.capture = {
.stream_name = "Capture",
.channels_min = 1,
.channels_max = 2,
.rates = AK4671_RATES,
.formats = AK4671_FORMATS,},
.ops = &ak4671_dai_ops,
};
static struct snd_soc_codec_driver soc_codec_dev_ak4671 = {
.set_bias_level = ak4671_set_bias_level,
.controls = ak4671_snd_controls,
.num_controls = ARRAY_SIZE(ak4671_snd_controls),
.dapm_widgets = ak4671_dapm_widgets,
.num_dapm_widgets = ARRAY_SIZE(ak4671_dapm_widgets),
.dapm_routes = ak4671_intercon,
.num_dapm_routes = ARRAY_SIZE(ak4671_intercon),
};
static const struct regmap_config ak4671_regmap = {
.reg_bits = 8,
.val_bits = 8,
.max_register = AK4671_SAR_ADC_CONTROL,
.reg_defaults = ak4671_reg_defaults,
.num_reg_defaults = ARRAY_SIZE(ak4671_reg_defaults),
.cache_type = REGCACHE_RBTREE,
};
static int ak4671_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct regmap *regmap;
int ret;
regmap = devm_regmap_init_i2c(client, &ak4671_regmap);
if (IS_ERR(regmap)) {
ret = PTR_ERR(regmap);
dev_err(&client->dev, "Failed to create regmap: %d\n", ret);
return ret;
}
ret = snd_soc_register_codec(&client->dev,
&soc_codec_dev_ak4671, &ak4671_dai, 1);
return ret;
}
static int ak4671_i2c_remove(struct i2c_client *client)
{
snd_soc_unregister_codec(&client->dev);
return 0;
}
static const struct i2c_device_id ak4671_i2c_id[] = {
{ "ak4671", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, ak4671_i2c_id);
static struct i2c_driver ak4671_i2c_driver = {
.driver = {
.name = "ak4671-codec",
.owner = THIS_MODULE,
},
.probe = ak4671_i2c_probe,
.remove = ak4671_i2c_remove,
.id_table = ak4671_i2c_id,
};
module_i2c_driver(ak4671_i2c_driver);
MODULE_DESCRIPTION("ASoC AK4671 codec driver");
MODULE_AUTHOR("Joonyoung Shim <jy0922.shim@samsung.com>");
MODULE_LICENSE("GPL");
| gpl-2.0 |
cuviper/linux-uprobes | drivers/net/wireless/iwlegacy/iwl-4965-tx.c | 194 | 40985 | /******************************************************************************
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
* USA
*
* The full GNU General Public License is included in this distribution
* in the file called LICENSE.GPL.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
*****************************************************************************/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/sched.h>
#include "iwl-dev.h"
#include "iwl-core.h"
#include "iwl-sta.h"
#include "iwl-io.h"
#include "iwl-helpers.h"
#include "iwl-4965-hw.h"
#include "iwl-4965.h"
/*
* mac80211 queues, ACs, hardware queues, FIFOs.
*
* Cf. http://wireless.kernel.org/en/developers/Documentation/mac80211/queues
*
* Mac80211 uses the following numbers, which we get as from it
* by way of skb_get_queue_mapping(skb):
*
* VO 0
* VI 1
* BE 2
* BK 3
*
*
* Regular (not A-MPDU) frames are put into hardware queues corresponding
* to the FIFOs, see comments in iwl-prph.h. Aggregated frames get their
* own queue per aggregation session (RA/TID combination), such queues are
* set up to map into FIFOs too, for which we need an AC->FIFO mapping. In
* order to map frames to the right queue, we also need an AC->hw queue
* mapping. This is implemented here.
*
* Due to the way hw queues are set up (by the hw specific modules like
* iwl-4965.c), the AC->hw queue mapping is the identity
* mapping.
*/
static const u8 tid_to_ac[] = {
IEEE80211_AC_BE,
IEEE80211_AC_BK,
IEEE80211_AC_BK,
IEEE80211_AC_BE,
IEEE80211_AC_VI,
IEEE80211_AC_VI,
IEEE80211_AC_VO,
IEEE80211_AC_VO
};
static inline int iwl4965_get_ac_from_tid(u16 tid)
{
if (likely(tid < ARRAY_SIZE(tid_to_ac)))
return tid_to_ac[tid];
/* no support for TIDs 8-15 yet */
return -EINVAL;
}
static inline int
iwl4965_get_fifo_from_tid(struct iwl_rxon_context *ctx, u16 tid)
{
if (likely(tid < ARRAY_SIZE(tid_to_ac)))
return ctx->ac_to_fifo[tid_to_ac[tid]];
/* no support for TIDs 8-15 yet */
return -EINVAL;
}
/*
* handle build REPLY_TX command notification.
*/
static void iwl4965_tx_cmd_build_basic(struct iwl_priv *priv,
struct sk_buff *skb,
struct iwl_tx_cmd *tx_cmd,
struct ieee80211_tx_info *info,
struct ieee80211_hdr *hdr,
u8 std_id)
{
__le16 fc = hdr->frame_control;
__le32 tx_flags = tx_cmd->tx_flags;
tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
tx_flags |= TX_CMD_FLG_ACK_MSK;
if (ieee80211_is_mgmt(fc))
tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
if (ieee80211_is_probe_resp(fc) &&
!(le16_to_cpu(hdr->seq_ctrl) & 0xf))
tx_flags |= TX_CMD_FLG_TSF_MSK;
} else {
tx_flags &= (~TX_CMD_FLG_ACK_MSK);
tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
}
if (ieee80211_is_back_req(fc))
tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
tx_cmd->sta_id = std_id;
if (ieee80211_has_morefrags(fc))
tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
if (ieee80211_is_data_qos(fc)) {
u8 *qc = ieee80211_get_qos_ctl(hdr);
tx_cmd->tid_tspec = qc[0] & 0xf;
tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
} else {
tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
}
iwl_legacy_tx_cmd_protection(priv, info, fc, &tx_flags);
tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
if (ieee80211_is_mgmt(fc)) {
if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
else
tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
} else {
tx_cmd->timeout.pm_frame_timeout = 0;
}
tx_cmd->driver_txop = 0;
tx_cmd->tx_flags = tx_flags;
tx_cmd->next_frame_len = 0;
}
#define RTS_DFAULT_RETRY_LIMIT 60
static void iwl4965_tx_cmd_build_rate(struct iwl_priv *priv,
struct iwl_tx_cmd *tx_cmd,
struct ieee80211_tx_info *info,
__le16 fc)
{
u32 rate_flags;
int rate_idx;
u8 rts_retry_limit;
u8 data_retry_limit;
u8 rate_plcp;
/* Set retry limit on DATA packets and Probe Responses*/
if (ieee80211_is_probe_resp(fc))
data_retry_limit = 3;
else
data_retry_limit = IWL4965_DEFAULT_TX_RETRY;
tx_cmd->data_retry_limit = data_retry_limit;
/* Set retry limit on RTS packets */
rts_retry_limit = RTS_DFAULT_RETRY_LIMIT;
if (data_retry_limit < rts_retry_limit)
rts_retry_limit = data_retry_limit;
tx_cmd->rts_retry_limit = rts_retry_limit;
/* DATA packets will use the uCode station table for rate/antenna
* selection */
if (ieee80211_is_data(fc)) {
tx_cmd->initial_rate_index = 0;
tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
return;
}
/**
* If the current TX rate stored in mac80211 has the MCS bit set, it's
* not really a TX rate. Thus, we use the lowest supported rate for
* this band. Also use the lowest supported rate if the stored rate
* index is invalid.
*/
rate_idx = info->control.rates[0].idx;
if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS ||
(rate_idx < 0) || (rate_idx > IWL_RATE_COUNT_LEGACY))
rate_idx = rate_lowest_index(&priv->bands[info->band],
info->control.sta);
/* For 5 GHZ band, remap mac80211 rate indices into driver indices */
if (info->band == IEEE80211_BAND_5GHZ)
rate_idx += IWL_FIRST_OFDM_RATE;
/* Get PLCP rate for tx_cmd->rate_n_flags */
rate_plcp = iwlegacy_rates[rate_idx].plcp;
/* Zero out flags for this packet */
rate_flags = 0;
/* Set CCK flag as needed */
if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
rate_flags |= RATE_MCS_CCK_MSK;
/* Set up antennas */
priv->mgmt_tx_ant = iwl4965_toggle_tx_ant(priv, priv->mgmt_tx_ant,
priv->hw_params.valid_tx_ant);
rate_flags |= iwl4965_ant_idx_to_flags(priv->mgmt_tx_ant);
/* Set the rate in the TX cmd */
tx_cmd->rate_n_flags = iwl4965_hw_set_rate_n_flags(rate_plcp, rate_flags);
}
static void iwl4965_tx_cmd_build_hwcrypto(struct iwl_priv *priv,
struct ieee80211_tx_info *info,
struct iwl_tx_cmd *tx_cmd,
struct sk_buff *skb_frag,
int sta_id)
{
struct ieee80211_key_conf *keyconf = info->control.hw_key;
switch (keyconf->cipher) {
case WLAN_CIPHER_SUITE_CCMP:
tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
if (info->flags & IEEE80211_TX_CTL_AMPDU)
tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK;
IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n");
break;
case WLAN_CIPHER_SUITE_TKIP:
tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
ieee80211_get_tkip_p2k(keyconf, skb_frag, tx_cmd->key);
IWL_DEBUG_TX(priv, "tx_cmd with tkip hwcrypto\n");
break;
case WLAN_CIPHER_SUITE_WEP104:
tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
/* fall through */
case WLAN_CIPHER_SUITE_WEP40:
tx_cmd->sec_ctl |= (TX_CMD_SEC_WEP |
(keyconf->keyidx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT);
memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption "
"with key %d\n", keyconf->keyidx);
break;
default:
IWL_ERR(priv, "Unknown encode cipher %x\n", keyconf->cipher);
break;
}
}
/*
* start REPLY_TX command process
*/
int iwl4965_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_sta *sta = info->control.sta;
struct iwl_station_priv *sta_priv = NULL;
struct iwl_tx_queue *txq;
struct iwl_queue *q;
struct iwl_device_cmd *out_cmd;
struct iwl_cmd_meta *out_meta;
struct iwl_tx_cmd *tx_cmd;
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
int txq_id;
dma_addr_t phys_addr;
dma_addr_t txcmd_phys;
dma_addr_t scratch_phys;
u16 len, firstlen, secondlen;
u16 seq_number = 0;
__le16 fc;
u8 hdr_len;
u8 sta_id;
u8 wait_write_ptr = 0;
u8 tid = 0;
u8 *qc = NULL;
unsigned long flags;
bool is_agg = false;
if (info->control.vif)
ctx = iwl_legacy_rxon_ctx_from_vif(info->control.vif);
spin_lock_irqsave(&priv->lock, flags);
if (iwl_legacy_is_rfkill(priv)) {
IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n");
goto drop_unlock;
}
fc = hdr->frame_control;
#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
if (ieee80211_is_auth(fc))
IWL_DEBUG_TX(priv, "Sending AUTH frame\n");
else if (ieee80211_is_assoc_req(fc))
IWL_DEBUG_TX(priv, "Sending ASSOC frame\n");
else if (ieee80211_is_reassoc_req(fc))
IWL_DEBUG_TX(priv, "Sending REASSOC frame\n");
#endif
hdr_len = ieee80211_hdrlen(fc);
/* For management frames use broadcast id to do not break aggregation */
if (!ieee80211_is_data(fc))
sta_id = ctx->bcast_sta_id;
else {
/* Find index into station table for destination station */
sta_id = iwl_legacy_sta_id_or_broadcast(priv, ctx, info->control.sta);
if (sta_id == IWL_INVALID_STATION) {
IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
hdr->addr1);
goto drop_unlock;
}
}
IWL_DEBUG_TX(priv, "station Id %d\n", sta_id);
if (sta)
sta_priv = (void *)sta->drv_priv;
if (sta_priv && sta_priv->asleep &&
(info->flags & IEEE80211_TX_CTL_POLL_RESPONSE)) {
/*
* This sends an asynchronous command to the device,
* but we can rely on it being processed before the
* next frame is processed -- and the next frame to
* this station is the one that will consume this
* counter.
* For now set the counter to just 1 since we do not
* support uAPSD yet.
*/
iwl4965_sta_modify_sleep_tx_count(priv, sta_id, 1);
}
/*
* Send this frame after DTIM -- there's a special queue
* reserved for this for contexts that support AP mode.
*/
if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
txq_id = ctx->mcast_queue;
/*
* The microcode will clear the more data
* bit in the last frame it transmits.
*/
hdr->frame_control |=
cpu_to_le16(IEEE80211_FCTL_MOREDATA);
} else
txq_id = ctx->ac_to_queue[skb_get_queue_mapping(skb)];
/* irqs already disabled/saved above when locking priv->lock */
spin_lock(&priv->sta_lock);
if (ieee80211_is_data_qos(fc)) {
qc = ieee80211_get_qos_ctl(hdr);
tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
if (WARN_ON_ONCE(tid >= MAX_TID_COUNT)) {
spin_unlock(&priv->sta_lock);
goto drop_unlock;
}
seq_number = priv->stations[sta_id].tid[tid].seq_number;
seq_number &= IEEE80211_SCTL_SEQ;
hdr->seq_ctrl = hdr->seq_ctrl &
cpu_to_le16(IEEE80211_SCTL_FRAG);
hdr->seq_ctrl |= cpu_to_le16(seq_number);
seq_number += 0x10;
/* aggregation is on for this <sta,tid> */
if (info->flags & IEEE80211_TX_CTL_AMPDU &&
priv->stations[sta_id].tid[tid].agg.state == IWL_AGG_ON) {
txq_id = priv->stations[sta_id].tid[tid].agg.txq_id;
is_agg = true;
}
}
txq = &priv->txq[txq_id];
q = &txq->q;
if (unlikely(iwl_legacy_queue_space(q) < q->high_mark)) {
spin_unlock(&priv->sta_lock);
goto drop_unlock;
}
if (ieee80211_is_data_qos(fc)) {
priv->stations[sta_id].tid[tid].tfds_in_queue++;
if (!ieee80211_has_morefrags(fc))
priv->stations[sta_id].tid[tid].seq_number = seq_number;
}
spin_unlock(&priv->sta_lock);
/* Set up driver data for this TFD */
memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
txq->txb[q->write_ptr].skb = skb;
txq->txb[q->write_ptr].ctx = ctx;
/* Set up first empty entry in queue's array of Tx/cmd buffers */
out_cmd = txq->cmd[q->write_ptr];
out_meta = &txq->meta[q->write_ptr];
tx_cmd = &out_cmd->cmd.tx;
memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
memset(tx_cmd, 0, sizeof(struct iwl_tx_cmd));
/*
* Set up the Tx-command (not MAC!) header.
* Store the chosen Tx queue and TFD index within the sequence field;
* after Tx, uCode's Tx response will return this value so driver can
* locate the frame within the tx queue and do post-tx processing.
*/
out_cmd->hdr.cmd = REPLY_TX;
out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
INDEX_TO_SEQ(q->write_ptr)));
/* Copy MAC header from skb into command buffer */
memcpy(tx_cmd->hdr, hdr, hdr_len);
/* Total # bytes to be transmitted */
len = (u16)skb->len;
tx_cmd->len = cpu_to_le16(len);
if (info->control.hw_key)
iwl4965_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id);
/* TODO need this for burst mode later on */
iwl4965_tx_cmd_build_basic(priv, skb, tx_cmd, info, hdr, sta_id);
iwl_legacy_dbg_log_tx_data_frame(priv, len, hdr);
iwl4965_tx_cmd_build_rate(priv, tx_cmd, info, fc);
iwl_legacy_update_stats(priv, true, fc, len);
/*
* Use the first empty entry in this queue's command buffer array
* to contain the Tx command and MAC header concatenated together
* (payload data will be in another buffer).
* Size of this varies, due to varying MAC header length.
* If end is not dword aligned, we'll have 2 extra bytes at the end
* of the MAC header (device reads on dword boundaries).
* We'll tell device about this padding later.
*/
len = sizeof(struct iwl_tx_cmd) +
sizeof(struct iwl_cmd_header) + hdr_len;
firstlen = (len + 3) & ~3;
/* Tell NIC about any 2-byte padding after MAC header */
if (firstlen != len)
tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
/* Physical address of this Tx command's header (not MAC header!),
* within command buffer array. */
txcmd_phys = pci_map_single(priv->pci_dev,
&out_cmd->hdr, firstlen,
PCI_DMA_BIDIRECTIONAL);
dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
dma_unmap_len_set(out_meta, len, firstlen);
/* Add buffer containing Tx command and MAC(!) header to TFD's
* first entry */
priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
txcmd_phys, firstlen, 1, 0);
if (!ieee80211_has_morefrags(hdr->frame_control)) {
txq->need_update = 1;
} else {
wait_write_ptr = 1;
txq->need_update = 0;
}
/* Set up TFD's 2nd entry to point directly to remainder of skb,
* if any (802.11 null frames have no payload). */
secondlen = skb->len - hdr_len;
if (secondlen > 0) {
phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
secondlen, PCI_DMA_TODEVICE);
priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
phys_addr, secondlen,
0, 0);
}
scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
offsetof(struct iwl_tx_cmd, scratch);
/* take back ownership of DMA buffer to enable update */
pci_dma_sync_single_for_cpu(priv->pci_dev, txcmd_phys,
firstlen, PCI_DMA_BIDIRECTIONAL);
tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
tx_cmd->dram_msb_ptr = iwl_legacy_get_dma_hi_addr(scratch_phys);
IWL_DEBUG_TX(priv, "sequence nr = 0X%x\n",
le16_to_cpu(out_cmd->hdr.sequence));
IWL_DEBUG_TX(priv, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd));
iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
/* Set up entry for this TFD in Tx byte-count array */
if (info->flags & IEEE80211_TX_CTL_AMPDU)
priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq,
le16_to_cpu(tx_cmd->len));
pci_dma_sync_single_for_device(priv->pci_dev, txcmd_phys,
firstlen, PCI_DMA_BIDIRECTIONAL);
trace_iwlwifi_legacy_dev_tx(priv,
&((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr],
sizeof(struct iwl_tfd),
&out_cmd->hdr, firstlen,
skb->data + hdr_len, secondlen);
/* Tell device the write index *just past* this latest filled TFD */
q->write_ptr = iwl_legacy_queue_inc_wrap(q->write_ptr, q->n_bd);
iwl_legacy_txq_update_write_ptr(priv, txq);
spin_unlock_irqrestore(&priv->lock, flags);
/*
* At this point the frame is "transmitted" successfully
* and we will get a TX status notification eventually,
* regardless of the value of ret. "ret" only indicates
* whether or not we should update the write pointer.
*/
/*
* Avoid atomic ops if it isn't an associated client.
* Also, if this is a packet for aggregation, don't
* increase the counter because the ucode will stop
* aggregation queues when their respective station
* goes to sleep.
*/
if (sta_priv && sta_priv->client && !is_agg)
atomic_inc(&sta_priv->pending_frames);
if ((iwl_legacy_queue_space(q) < q->high_mark) &&
priv->mac80211_registered) {
if (wait_write_ptr) {
spin_lock_irqsave(&priv->lock, flags);
txq->need_update = 1;
iwl_legacy_txq_update_write_ptr(priv, txq);
spin_unlock_irqrestore(&priv->lock, flags);
} else {
iwl_legacy_stop_queue(priv, txq);
}
}
return 0;
drop_unlock:
spin_unlock_irqrestore(&priv->lock, flags);
return -1;
}
static inline int iwl4965_alloc_dma_ptr(struct iwl_priv *priv,
struct iwl_dma_ptr *ptr, size_t size)
{
ptr->addr = dma_alloc_coherent(&priv->pci_dev->dev, size, &ptr->dma,
GFP_KERNEL);
if (!ptr->addr)
return -ENOMEM;
ptr->size = size;
return 0;
}
static inline void iwl4965_free_dma_ptr(struct iwl_priv *priv,
struct iwl_dma_ptr *ptr)
{
if (unlikely(!ptr->addr))
return;
dma_free_coherent(&priv->pci_dev->dev, ptr->size, ptr->addr, ptr->dma);
memset(ptr, 0, sizeof(*ptr));
}
/**
* iwl4965_hw_txq_ctx_free - Free TXQ Context
*
* Destroy all TX DMA queues and structures
*/
void iwl4965_hw_txq_ctx_free(struct iwl_priv *priv)
{
int txq_id;
/* Tx queues */
if (priv->txq) {
for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
if (txq_id == priv->cmd_queue)
iwl_legacy_cmd_queue_free(priv);
else
iwl_legacy_tx_queue_free(priv, txq_id);
}
iwl4965_free_dma_ptr(priv, &priv->kw);
iwl4965_free_dma_ptr(priv, &priv->scd_bc_tbls);
/* free tx queue structure */
iwl_legacy_txq_mem(priv);
}
/**
* iwl4965_txq_ctx_alloc - allocate TX queue context
* Allocate all Tx DMA structures and initialize them
*
* @param priv
* @return error code
*/
int iwl4965_txq_ctx_alloc(struct iwl_priv *priv)
{
int ret;
int txq_id, slots_num;
unsigned long flags;
/* Free all tx/cmd queues and keep-warm buffer */
iwl4965_hw_txq_ctx_free(priv);
ret = iwl4965_alloc_dma_ptr(priv, &priv->scd_bc_tbls,
priv->hw_params.scd_bc_tbls_size);
if (ret) {
IWL_ERR(priv, "Scheduler BC Table allocation failed\n");
goto error_bc_tbls;
}
/* Alloc keep-warm buffer */
ret = iwl4965_alloc_dma_ptr(priv, &priv->kw, IWL_KW_SIZE);
if (ret) {
IWL_ERR(priv, "Keep Warm allocation failed\n");
goto error_kw;
}
/* allocate tx queue structure */
ret = iwl_legacy_alloc_txq_mem(priv);
if (ret)
goto error;
spin_lock_irqsave(&priv->lock, flags);
/* Turn off all Tx DMA fifos */
iwl4965_txq_set_sched(priv, 0);
/* Tell NIC where to find the "keep warm" buffer */
iwl_legacy_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
spin_unlock_irqrestore(&priv->lock, flags);
/* Alloc and init all Tx queues, including the command queue (#4/#9) */
for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
slots_num = (txq_id == priv->cmd_queue) ?
TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
ret = iwl_legacy_tx_queue_init(priv,
&priv->txq[txq_id], slots_num,
txq_id);
if (ret) {
IWL_ERR(priv, "Tx %d queue init failed\n", txq_id);
goto error;
}
}
return ret;
error:
iwl4965_hw_txq_ctx_free(priv);
iwl4965_free_dma_ptr(priv, &priv->kw);
error_kw:
iwl4965_free_dma_ptr(priv, &priv->scd_bc_tbls);
error_bc_tbls:
return ret;
}
void iwl4965_txq_ctx_reset(struct iwl_priv *priv)
{
int txq_id, slots_num;
unsigned long flags;
spin_lock_irqsave(&priv->lock, flags);
/* Turn off all Tx DMA fifos */
iwl4965_txq_set_sched(priv, 0);
/* Tell NIC where to find the "keep warm" buffer */
iwl_legacy_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
spin_unlock_irqrestore(&priv->lock, flags);
/* Alloc and init all Tx queues, including the command queue (#4) */
for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
slots_num = txq_id == priv->cmd_queue ?
TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
iwl_legacy_tx_queue_reset(priv, &priv->txq[txq_id],
slots_num, txq_id);
}
}
/**
* iwl4965_txq_ctx_stop - Stop all Tx DMA channels
*/
void iwl4965_txq_ctx_stop(struct iwl_priv *priv)
{
int ch, txq_id;
unsigned long flags;
/* Turn off all Tx DMA fifos */
spin_lock_irqsave(&priv->lock, flags);
iwl4965_txq_set_sched(priv, 0);
/* Stop each Tx DMA channel, and wait for it to be idle */
for (ch = 0; ch < priv->hw_params.dma_chnl_num; ch++) {
iwl_legacy_write_direct32(priv,
FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
if (iwl_poll_direct_bit(priv, FH_TSSR_TX_STATUS_REG,
FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
1000))
IWL_ERR(priv, "Failing on timeout while stopping"
" DMA channel %d [0x%08x]", ch,
iwl_legacy_read_direct32(priv,
FH_TSSR_TX_STATUS_REG));
}
spin_unlock_irqrestore(&priv->lock, flags);
if (!priv->txq)
return;
/* Unmap DMA from host system and free skb's */
for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
if (txq_id == priv->cmd_queue)
iwl_legacy_cmd_queue_unmap(priv);
else
iwl_legacy_tx_queue_unmap(priv, txq_id);
}
/*
* Find first available (lowest unused) Tx Queue, mark it "active".
* Called only when finding queue for aggregation.
* Should never return anything < 7, because they should already
* be in use as EDCA AC (0-3), Command (4), reserved (5, 6)
*/
static int iwl4965_txq_ctx_activate_free(struct iwl_priv *priv)
{
int txq_id;
for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
if (!test_and_set_bit(txq_id, &priv->txq_ctx_active_msk))
return txq_id;
return -1;
}
/**
* iwl4965_tx_queue_stop_scheduler - Stop queue, but keep configuration
*/
static void iwl4965_tx_queue_stop_scheduler(struct iwl_priv *priv,
u16 txq_id)
{
/* Simply stop the queue, but don't change any configuration;
* the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
iwl_legacy_write_prph(priv,
IWL49_SCD_QUEUE_STATUS_BITS(txq_id),
(0 << IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE)|
(1 << IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
}
/**
* iwl4965_tx_queue_set_q2ratid - Map unique receiver/tid combination to a queue
*/
static int iwl4965_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid,
u16 txq_id)
{
u32 tbl_dw_addr;
u32 tbl_dw;
u16 scd_q2ratid;
scd_q2ratid = ra_tid & IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK;
tbl_dw_addr = priv->scd_base_addr +
IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id);
tbl_dw = iwl_legacy_read_targ_mem(priv, tbl_dw_addr);
if (txq_id & 0x1)
tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
else
tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
iwl_legacy_write_targ_mem(priv, tbl_dw_addr, tbl_dw);
return 0;
}
/**
* iwl4965_tx_queue_agg_enable - Set up & enable aggregation for selected queue
*
* NOTE: txq_id must be greater than IWL49_FIRST_AMPDU_QUEUE,
* i.e. it must be one of the higher queues used for aggregation
*/
static int iwl4965_txq_agg_enable(struct iwl_priv *priv, int txq_id,
int tx_fifo, int sta_id, int tid, u16 ssn_idx)
{
unsigned long flags;
u16 ra_tid;
int ret;
if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) ||
(IWL49_FIRST_AMPDU_QUEUE +
priv->cfg->base_params->num_of_ampdu_queues <= txq_id)) {
IWL_WARN(priv,
"queue number out of range: %d, must be %d to %d\n",
txq_id, IWL49_FIRST_AMPDU_QUEUE,
IWL49_FIRST_AMPDU_QUEUE +
priv->cfg->base_params->num_of_ampdu_queues - 1);
return -EINVAL;
}
ra_tid = BUILD_RAxTID(sta_id, tid);
/* Modify device's station table to Tx this TID */
ret = iwl4965_sta_tx_modify_enable_tid(priv, sta_id, tid);
if (ret)
return ret;
spin_lock_irqsave(&priv->lock, flags);
/* Stop this Tx queue before configuring it */
iwl4965_tx_queue_stop_scheduler(priv, txq_id);
/* Map receiver-address / traffic-ID to this queue */
iwl4965_tx_queue_set_q2ratid(priv, ra_tid, txq_id);
/* Set this queue as a chain-building queue */
iwl_legacy_set_bits_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
/* Place first TFD at index corresponding to start sequence number.
* Assumes that ssn_idx is valid (!= 0xFFF) */
priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx);
/* Set up Tx window size and frame limit for this queue */
iwl_legacy_write_targ_mem(priv,
priv->scd_base_addr + IWL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id),
(SCD_WIN_SIZE << IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
iwl_legacy_write_targ_mem(priv, priv->scd_base_addr +
IWL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
(SCD_FRAME_LIMIT << IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS)
& IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
iwl_legacy_set_bits_prph(priv, IWL49_SCD_INTERRUPT_MASK, (1 << txq_id));
/* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 1);
spin_unlock_irqrestore(&priv->lock, flags);
return 0;
}
int iwl4965_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, u16 tid, u16 *ssn)
{
int sta_id;
int tx_fifo;
int txq_id;
int ret;
unsigned long flags;
struct iwl_tid_data *tid_data;
tx_fifo = iwl4965_get_fifo_from_tid(iwl_legacy_rxon_ctx_from_vif(vif), tid);
if (unlikely(tx_fifo < 0))
return tx_fifo;
IWL_WARN(priv, "%s on ra = %pM tid = %d\n",
__func__, sta->addr, tid);
sta_id = iwl_legacy_sta_id(sta);
if (sta_id == IWL_INVALID_STATION) {
IWL_ERR(priv, "Start AGG on invalid station\n");
return -ENXIO;
}
if (unlikely(tid >= MAX_TID_COUNT))
return -EINVAL;
if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) {
IWL_ERR(priv, "Start AGG when state is not IWL_AGG_OFF !\n");
return -ENXIO;
}
txq_id = iwl4965_txq_ctx_activate_free(priv);
if (txq_id == -1) {
IWL_ERR(priv, "No free aggregation queue available\n");
return -ENXIO;
}
spin_lock_irqsave(&priv->sta_lock, flags);
tid_data = &priv->stations[sta_id].tid[tid];
*ssn = SEQ_TO_SN(tid_data->seq_number);
tid_data->agg.txq_id = txq_id;
iwl_legacy_set_swq_id(&priv->txq[txq_id],
iwl4965_get_ac_from_tid(tid), txq_id);
spin_unlock_irqrestore(&priv->sta_lock, flags);
ret = iwl4965_txq_agg_enable(priv, txq_id, tx_fifo,
sta_id, tid, *ssn);
if (ret)
return ret;
spin_lock_irqsave(&priv->sta_lock, flags);
tid_data = &priv->stations[sta_id].tid[tid];
if (tid_data->tfds_in_queue == 0) {
IWL_DEBUG_HT(priv, "HW queue is empty\n");
tid_data->agg.state = IWL_AGG_ON;
ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
} else {
IWL_DEBUG_HT(priv,
"HW queue is NOT empty: %d packets in HW queue\n",
tid_data->tfds_in_queue);
tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
}
spin_unlock_irqrestore(&priv->sta_lock, flags);
return ret;
}
/**
* txq_id must be greater than IWL49_FIRST_AMPDU_QUEUE
* priv->lock must be held by the caller
*/
static int iwl4965_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
u16 ssn_idx, u8 tx_fifo)
{
if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) ||
(IWL49_FIRST_AMPDU_QUEUE +
priv->cfg->base_params->num_of_ampdu_queues <= txq_id)) {
IWL_WARN(priv,
"queue number out of range: %d, must be %d to %d\n",
txq_id, IWL49_FIRST_AMPDU_QUEUE,
IWL49_FIRST_AMPDU_QUEUE +
priv->cfg->base_params->num_of_ampdu_queues - 1);
return -EINVAL;
}
iwl4965_tx_queue_stop_scheduler(priv, txq_id);
iwl_legacy_clear_bits_prph(priv,
IWL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
/* supposes that ssn_idx is valid (!= 0xFFF) */
iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx);
iwl_legacy_clear_bits_prph(priv,
IWL49_SCD_INTERRUPT_MASK, (1 << txq_id));
iwl_txq_ctx_deactivate(priv, txq_id);
iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 0);
return 0;
}
int iwl4965_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, u16 tid)
{
int tx_fifo_id, txq_id, sta_id, ssn;
struct iwl_tid_data *tid_data;
int write_ptr, read_ptr;
unsigned long flags;
tx_fifo_id = iwl4965_get_fifo_from_tid(iwl_legacy_rxon_ctx_from_vif(vif), tid);
if (unlikely(tx_fifo_id < 0))
return tx_fifo_id;
sta_id = iwl_legacy_sta_id(sta);
if (sta_id == IWL_INVALID_STATION) {
IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid);
return -ENXIO;
}
spin_lock_irqsave(&priv->sta_lock, flags);
tid_data = &priv->stations[sta_id].tid[tid];
ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4;
txq_id = tid_data->agg.txq_id;
switch (priv->stations[sta_id].tid[tid].agg.state) {
case IWL_EMPTYING_HW_QUEUE_ADDBA:
/*
* This can happen if the peer stops aggregation
* again before we've had a chance to drain the
* queue we selected previously, i.e. before the
* session was really started completely.
*/
IWL_DEBUG_HT(priv, "AGG stop before setup done\n");
goto turn_off;
case IWL_AGG_ON:
break;
default:
IWL_WARN(priv, "Stopping AGG while state not ON or starting\n");
}
write_ptr = priv->txq[txq_id].q.write_ptr;
read_ptr = priv->txq[txq_id].q.read_ptr;
/* The queue is not empty */
if (write_ptr != read_ptr) {
IWL_DEBUG_HT(priv, "Stopping a non empty AGG HW QUEUE\n");
priv->stations[sta_id].tid[tid].agg.state =
IWL_EMPTYING_HW_QUEUE_DELBA;
spin_unlock_irqrestore(&priv->sta_lock, flags);
return 0;
}
IWL_DEBUG_HT(priv, "HW queue is empty\n");
turn_off:
priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
/* do not restore/save irqs */
spin_unlock(&priv->sta_lock);
spin_lock(&priv->lock);
/*
* the only reason this call can fail is queue number out of range,
* which can happen if uCode is reloaded and all the station
* information are lost. if it is outside the range, there is no need
* to deactivate the uCode queue, just return "success" to allow
* mac80211 to clean up it own data.
*/
iwl4965_txq_agg_disable(priv, txq_id, ssn, tx_fifo_id);
spin_unlock_irqrestore(&priv->lock, flags);
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
return 0;
}
int iwl4965_txq_check_empty(struct iwl_priv *priv,
int sta_id, u8 tid, int txq_id)
{
struct iwl_queue *q = &priv->txq[txq_id].q;
u8 *addr = priv->stations[sta_id].sta.sta.addr;
struct iwl_tid_data *tid_data = &priv->stations[sta_id].tid[tid];
struct iwl_rxon_context *ctx;
ctx = &priv->contexts[priv->stations[sta_id].ctxid];
lockdep_assert_held(&priv->sta_lock);
switch (priv->stations[sta_id].tid[tid].agg.state) {
case IWL_EMPTYING_HW_QUEUE_DELBA:
/* We are reclaiming the last packet of the */
/* aggregated HW queue */
if ((txq_id == tid_data->agg.txq_id) &&
(q->read_ptr == q->write_ptr)) {
u16 ssn = SEQ_TO_SN(tid_data->seq_number);
int tx_fifo = iwl4965_get_fifo_from_tid(ctx, tid);
IWL_DEBUG_HT(priv,
"HW queue empty: continue DELBA flow\n");
iwl4965_txq_agg_disable(priv, txq_id, ssn, tx_fifo);
tid_data->agg.state = IWL_AGG_OFF;
ieee80211_stop_tx_ba_cb_irqsafe(ctx->vif, addr, tid);
}
break;
case IWL_EMPTYING_HW_QUEUE_ADDBA:
/* We are reclaiming the last packet of the queue */
if (tid_data->tfds_in_queue == 0) {
IWL_DEBUG_HT(priv,
"HW queue empty: continue ADDBA flow\n");
tid_data->agg.state = IWL_AGG_ON;
ieee80211_start_tx_ba_cb_irqsafe(ctx->vif, addr, tid);
}
break;
}
return 0;
}
static void iwl4965_non_agg_tx_status(struct iwl_priv *priv,
struct iwl_rxon_context *ctx,
const u8 *addr1)
{
struct ieee80211_sta *sta;
struct iwl_station_priv *sta_priv;
rcu_read_lock();
sta = ieee80211_find_sta(ctx->vif, addr1);
if (sta) {
sta_priv = (void *)sta->drv_priv;
/* avoid atomic ops if this isn't a client */
if (sta_priv->client &&
atomic_dec_return(&sta_priv->pending_frames) == 0)
ieee80211_sta_block_awake(priv->hw, sta, false);
}
rcu_read_unlock();
}
static void
iwl4965_tx_status(struct iwl_priv *priv, struct iwl_tx_info *tx_info,
bool is_agg)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) tx_info->skb->data;
if (!is_agg)
iwl4965_non_agg_tx_status(priv, tx_info->ctx, hdr->addr1);
ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb);
}
int iwl4965_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
{
struct iwl_tx_queue *txq = &priv->txq[txq_id];
struct iwl_queue *q = &txq->q;
struct iwl_tx_info *tx_info;
int nfreed = 0;
struct ieee80211_hdr *hdr;
if ((index >= q->n_bd) || (iwl_legacy_queue_used(q, index) == 0)) {
IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, "
"is out of range [0-%d] %d %d.\n", txq_id,
index, q->n_bd, q->write_ptr, q->read_ptr);
return 0;
}
for (index = iwl_legacy_queue_inc_wrap(index, q->n_bd);
q->read_ptr != index;
q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd)) {
tx_info = &txq->txb[txq->q.read_ptr];
if (WARN_ON_ONCE(tx_info->skb == NULL))
continue;
hdr = (struct ieee80211_hdr *)tx_info->skb->data;
if (ieee80211_is_data_qos(hdr->frame_control))
nfreed++;
iwl4965_tx_status(priv, tx_info,
txq_id >= IWL4965_FIRST_AMPDU_QUEUE);
tx_info->skb = NULL;
priv->cfg->ops->lib->txq_free_tfd(priv, txq);
}
return nfreed;
}
/**
* iwl4965_tx_status_reply_compressed_ba - Update tx status from block-ack
*
* Go through block-ack's bitmap of ACK'd frames, update driver's record of
* ACK vs. not. This gets sent to mac80211, then to rate scaling algo.
*/
static int iwl4965_tx_status_reply_compressed_ba(struct iwl_priv *priv,
struct iwl_ht_agg *agg,
struct iwl_compressed_ba_resp *ba_resp)
{
int i, sh, ack;
u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl);
u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
int successes = 0;
struct ieee80211_tx_info *info;
u64 bitmap, sent_bitmap;
if (unlikely(!agg->wait_for_ba)) {
if (unlikely(ba_resp->bitmap))
IWL_ERR(priv, "Received BA when not expected\n");
return -EINVAL;
}
/* Mark that the expected block-ack response arrived */
agg->wait_for_ba = 0;
IWL_DEBUG_TX_REPLY(priv, "BA %d %d\n", agg->start_idx,
ba_resp->seq_ctl);
/* Calculate shift to align block-ack bits with our Tx window bits */
sh = agg->start_idx - SEQ_TO_INDEX(seq_ctl >> 4);
if (sh < 0) /* tbw something is wrong with indices */
sh += 0x100;
if (agg->frame_count > (64 - sh)) {
IWL_DEBUG_TX_REPLY(priv, "more frames than bitmap size");
return -1;
}
/* don't use 64-bit values for now */
bitmap = le64_to_cpu(ba_resp->bitmap) >> sh;
/* check for success or failure according to the
* transmitted bitmap and block-ack bitmap */
sent_bitmap = bitmap & agg->bitmap;
/* For each frame attempted in aggregation,
* update driver's record of tx frame's status. */
i = 0;
while (sent_bitmap) {
ack = sent_bitmap & 1ULL;
successes += ack;
IWL_DEBUG_TX_REPLY(priv, "%s ON i=%d idx=%d raw=%d\n",
ack ? "ACK" : "NACK", i,
(agg->start_idx + i) & 0xff,
agg->start_idx + i);
sent_bitmap >>= 1;
++i;
}
IWL_DEBUG_TX_REPLY(priv, "Bitmap %llx\n",
(unsigned long long)bitmap);
info = IEEE80211_SKB_CB(priv->txq[scd_flow].txb[agg->start_idx].skb);
memset(&info->status, 0, sizeof(info->status));
info->flags |= IEEE80211_TX_STAT_ACK;
info->flags |= IEEE80211_TX_STAT_AMPDU;
info->status.ampdu_ack_len = successes;
info->status.ampdu_len = agg->frame_count;
iwl4965_hwrate_to_tx_control(priv, agg->rate_n_flags, info);
return 0;
}
/**
* translate ucode response to mac80211 tx status control values
*/
void iwl4965_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
struct ieee80211_tx_info *info)
{
struct ieee80211_tx_rate *r = &info->control.rates[0];
info->antenna_sel_tx =
((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
if (rate_n_flags & RATE_MCS_HT_MSK)
r->flags |= IEEE80211_TX_RC_MCS;
if (rate_n_flags & RATE_MCS_GF_MSK)
r->flags |= IEEE80211_TX_RC_GREEN_FIELD;
if (rate_n_flags & RATE_MCS_HT40_MSK)
r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
if (rate_n_flags & RATE_MCS_DUP_MSK)
r->flags |= IEEE80211_TX_RC_DUP_DATA;
if (rate_n_flags & RATE_MCS_SGI_MSK)
r->flags |= IEEE80211_TX_RC_SHORT_GI;
r->idx = iwl4965_hwrate_to_mac80211_idx(rate_n_flags, info->band);
}
/**
* iwl4965_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA
*
* Handles block-acknowledge notification from device, which reports success
* of frames sent via aggregation.
*/
void iwl4965_rx_reply_compressed_ba(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba;
struct iwl_tx_queue *txq = NULL;
struct iwl_ht_agg *agg;
int index;
int sta_id;
int tid;
unsigned long flags;
/* "flow" corresponds to Tx queue */
u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
/* "ssn" is start of block-ack Tx window, corresponds to index
* (in Tx queue's circular buffer) of first TFD/frame in window */
u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
if (scd_flow >= priv->hw_params.max_txq_num) {
IWL_ERR(priv,
"BUG_ON scd_flow is bigger than number of queues\n");
return;
}
txq = &priv->txq[scd_flow];
sta_id = ba_resp->sta_id;
tid = ba_resp->tid;
agg = &priv->stations[sta_id].tid[tid].agg;
if (unlikely(agg->txq_id != scd_flow)) {
/*
* FIXME: this is a uCode bug which need to be addressed,
* log the information and return for now!
* since it is possible happen very often and in order
* not to fill the syslog, don't enable the logging by default
*/
IWL_DEBUG_TX_REPLY(priv,
"BA scd_flow %d does not match txq_id %d\n",
scd_flow, agg->txq_id);
return;
}
/* Find index just before block-ack window */
index = iwl_legacy_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd);
spin_lock_irqsave(&priv->sta_lock, flags);
IWL_DEBUG_TX_REPLY(priv, "REPLY_COMPRESSED_BA [%d] Received from %pM, "
"sta_id = %d\n",
agg->wait_for_ba,
(u8 *) &ba_resp->sta_addr_lo32,
ba_resp->sta_id);
IWL_DEBUG_TX_REPLY(priv, "TID = %d, SeqCtl = %d, bitmap = 0x%llx,"
"scd_flow = "
"%d, scd_ssn = %d\n",
ba_resp->tid,
ba_resp->seq_ctl,
(unsigned long long)le64_to_cpu(ba_resp->bitmap),
ba_resp->scd_flow,
ba_resp->scd_ssn);
IWL_DEBUG_TX_REPLY(priv, "DAT start_idx = %d, bitmap = 0x%llx\n",
agg->start_idx,
(unsigned long long)agg->bitmap);
/* Update driver's record of ACK vs. not for each frame in window */
iwl4965_tx_status_reply_compressed_ba(priv, agg, ba_resp);
/* Release all TFDs before the SSN, i.e. all TFDs in front of
* block-ack window (we assume that they've been successfully
* transmitted ... if not, it's too late anyway). */
if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) {
/* calculate mac80211 ampdu sw queue to wake */
int freed = iwl4965_tx_queue_reclaim(priv, scd_flow, index);
iwl4965_free_tfds_in_queue(priv, sta_id, tid, freed);
if ((iwl_legacy_queue_space(&txq->q) > txq->q.low_mark) &&
priv->mac80211_registered &&
(agg->state != IWL_EMPTYING_HW_QUEUE_DELBA))
iwl_legacy_wake_queue(priv, txq);
iwl4965_txq_check_empty(priv, sta_id, tid, scd_flow);
}
spin_unlock_irqrestore(&priv->sta_lock, flags);
}
#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
const char *iwl4965_get_tx_fail_reason(u32 status)
{
#define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x
#define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x
switch (status & TX_STATUS_MSK) {
case TX_STATUS_SUCCESS:
return "SUCCESS";
TX_STATUS_POSTPONE(DELAY);
TX_STATUS_POSTPONE(FEW_BYTES);
TX_STATUS_POSTPONE(QUIET_PERIOD);
TX_STATUS_POSTPONE(CALC_TTAK);
TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY);
TX_STATUS_FAIL(SHORT_LIMIT);
TX_STATUS_FAIL(LONG_LIMIT);
TX_STATUS_FAIL(FIFO_UNDERRUN);
TX_STATUS_FAIL(DRAIN_FLOW);
TX_STATUS_FAIL(RFKILL_FLUSH);
TX_STATUS_FAIL(LIFE_EXPIRE);
TX_STATUS_FAIL(DEST_PS);
TX_STATUS_FAIL(HOST_ABORTED);
TX_STATUS_FAIL(BT_RETRY);
TX_STATUS_FAIL(STA_INVALID);
TX_STATUS_FAIL(FRAG_DROPPED);
TX_STATUS_FAIL(TID_DISABLE);
TX_STATUS_FAIL(FIFO_FLUSHED);
TX_STATUS_FAIL(INSUFFICIENT_CF_POLL);
TX_STATUS_FAIL(PASSIVE_NO_RX);
TX_STATUS_FAIL(NO_BEACON_ON_RADAR);
}
return "UNKNOWN";
#undef TX_STATUS_FAIL
#undef TX_STATUS_POSTPONE
}
#endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */
| gpl-2.0 |
allanmatthew/backports-20141221 | drivers/media/dvb-frontends/au8522_dig.c | 450 | 19904 | /*
Auvitek AU8522 QAM/8VSB demodulator driver
Copyright (C) 2008 Steven Toth <stoth@linuxtv.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/string.h>
#include <linux/delay.h>
#include "dvb_frontend.h"
#include "au8522.h"
#include "au8522_priv.h"
static int debug;
static int zv_mode = 1; /* default to on */
#define dprintk(arg...)\
do { if (debug)\
printk(arg);\
} while (0)
struct mse2snr_tab {
u16 val;
u16 data;
};
/* VSB SNR lookup table */
static struct mse2snr_tab vsb_mse2snr_tab[] = {
{ 0, 270 },
{ 2, 250 },
{ 3, 240 },
{ 5, 230 },
{ 7, 220 },
{ 9, 210 },
{ 12, 200 },
{ 13, 195 },
{ 15, 190 },
{ 17, 185 },
{ 19, 180 },
{ 21, 175 },
{ 24, 170 },
{ 27, 165 },
{ 31, 160 },
{ 32, 158 },
{ 33, 156 },
{ 36, 152 },
{ 37, 150 },
{ 39, 148 },
{ 40, 146 },
{ 41, 144 },
{ 43, 142 },
{ 44, 140 },
{ 48, 135 },
{ 50, 130 },
{ 43, 142 },
{ 53, 125 },
{ 56, 120 },
{ 256, 115 },
};
/* QAM64 SNR lookup table */
static struct mse2snr_tab qam64_mse2snr_tab[] = {
{ 15, 0 },
{ 16, 290 },
{ 17, 288 },
{ 18, 286 },
{ 19, 284 },
{ 20, 282 },
{ 21, 281 },
{ 22, 279 },
{ 23, 277 },
{ 24, 275 },
{ 25, 273 },
{ 26, 271 },
{ 27, 269 },
{ 28, 268 },
{ 29, 266 },
{ 30, 264 },
{ 31, 262 },
{ 32, 260 },
{ 33, 259 },
{ 34, 258 },
{ 35, 256 },
{ 36, 255 },
{ 37, 254 },
{ 38, 252 },
{ 39, 251 },
{ 40, 250 },
{ 41, 249 },
{ 42, 248 },
{ 43, 246 },
{ 44, 245 },
{ 45, 244 },
{ 46, 242 },
{ 47, 241 },
{ 48, 240 },
{ 50, 239 },
{ 51, 238 },
{ 53, 237 },
{ 54, 236 },
{ 56, 235 },
{ 57, 234 },
{ 59, 233 },
{ 60, 232 },
{ 62, 231 },
{ 63, 230 },
{ 65, 229 },
{ 67, 228 },
{ 68, 227 },
{ 70, 226 },
{ 71, 225 },
{ 73, 224 },
{ 74, 223 },
{ 76, 222 },
{ 78, 221 },
{ 80, 220 },
{ 82, 219 },
{ 85, 218 },
{ 88, 217 },
{ 90, 216 },
{ 92, 215 },
{ 93, 214 },
{ 94, 212 },
{ 95, 211 },
{ 97, 210 },
{ 99, 209 },
{ 101, 208 },
{ 102, 207 },
{ 104, 206 },
{ 107, 205 },
{ 111, 204 },
{ 114, 203 },
{ 118, 202 },
{ 122, 201 },
{ 125, 200 },
{ 128, 199 },
{ 130, 198 },
{ 132, 197 },
{ 256, 190 },
};
/* QAM256 SNR lookup table */
static struct mse2snr_tab qam256_mse2snr_tab[] = {
{ 15, 0 },
{ 16, 400 },
{ 17, 398 },
{ 18, 396 },
{ 19, 394 },
{ 20, 392 },
{ 21, 390 },
{ 22, 388 },
{ 23, 386 },
{ 24, 384 },
{ 25, 382 },
{ 26, 380 },
{ 27, 379 },
{ 28, 378 },
{ 29, 377 },
{ 30, 376 },
{ 31, 375 },
{ 32, 374 },
{ 33, 373 },
{ 34, 372 },
{ 35, 371 },
{ 36, 370 },
{ 37, 362 },
{ 38, 354 },
{ 39, 346 },
{ 40, 338 },
{ 41, 330 },
{ 42, 328 },
{ 43, 326 },
{ 44, 324 },
{ 45, 322 },
{ 46, 320 },
{ 47, 319 },
{ 48, 318 },
{ 49, 317 },
{ 50, 316 },
{ 51, 315 },
{ 52, 314 },
{ 53, 313 },
{ 54, 312 },
{ 55, 311 },
{ 56, 310 },
{ 57, 308 },
{ 58, 306 },
{ 59, 304 },
{ 60, 302 },
{ 61, 300 },
{ 62, 298 },
{ 65, 295 },
{ 68, 294 },
{ 70, 293 },
{ 73, 292 },
{ 76, 291 },
{ 78, 290 },
{ 79, 289 },
{ 81, 288 },
{ 82, 287 },
{ 83, 286 },
{ 84, 285 },
{ 85, 284 },
{ 86, 283 },
{ 88, 282 },
{ 89, 281 },
{ 256, 280 },
};
static int au8522_mse2snr_lookup(struct mse2snr_tab *tab, int sz, int mse,
u16 *snr)
{
int i, ret = -EINVAL;
dprintk("%s()\n", __func__);
for (i = 0; i < sz; i++) {
if (mse < tab[i].val) {
*snr = tab[i].data;
ret = 0;
break;
}
}
dprintk("%s() snr=%d\n", __func__, *snr);
return ret;
}
static int au8522_set_if(struct dvb_frontend *fe, enum au8522_if_freq if_freq)
{
struct au8522_state *state = fe->demodulator_priv;
u8 r0b5, r0b6, r0b7;
char *ifmhz;
switch (if_freq) {
case AU8522_IF_3_25MHZ:
ifmhz = "3.25";
r0b5 = 0x00;
r0b6 = 0x3d;
r0b7 = 0xa0;
break;
case AU8522_IF_4MHZ:
ifmhz = "4.00";
r0b5 = 0x00;
r0b6 = 0x4b;
r0b7 = 0xd9;
break;
case AU8522_IF_6MHZ:
ifmhz = "6.00";
r0b5 = 0xfb;
r0b6 = 0x8e;
r0b7 = 0x39;
break;
default:
dprintk("%s() IF Frequency not supported\n", __func__);
return -EINVAL;
}
dprintk("%s() %s MHz\n", __func__, ifmhz);
au8522_writereg(state, 0x80b5, r0b5);
au8522_writereg(state, 0x80b6, r0b6);
au8522_writereg(state, 0x80b7, r0b7);
return 0;
}
/* VSB Modulation table */
static struct {
u16 reg;
u16 data;
} VSB_mod_tab[] = {
{ 0x8090, 0x84 },
{ 0x4092, 0x11 },
{ 0x2005, 0x00 },
{ 0x8091, 0x80 },
{ 0x80a3, 0x0c },
{ 0x80a4, 0xe8 },
{ 0x8081, 0xc4 },
{ 0x80a5, 0x40 },
{ 0x80a7, 0x40 },
{ 0x80a6, 0x67 },
{ 0x8262, 0x20 },
{ 0x821c, 0x30 },
{ 0x80d8, 0x1a },
{ 0x8227, 0xa0 },
{ 0x8121, 0xff },
{ 0x80a8, 0xf0 },
{ 0x80a9, 0x05 },
{ 0x80aa, 0x77 },
{ 0x80ab, 0xf0 },
{ 0x80ac, 0x05 },
{ 0x80ad, 0x77 },
{ 0x80ae, 0x41 },
{ 0x80af, 0x66 },
{ 0x821b, 0xcc },
{ 0x821d, 0x80 },
{ 0x80a4, 0xe8 },
{ 0x8231, 0x13 },
};
/* QAM64 Modulation table */
static struct {
u16 reg;
u16 data;
} QAM64_mod_tab[] = {
{ 0x00a3, 0x09 },
{ 0x00a4, 0x00 },
{ 0x0081, 0xc4 },
{ 0x00a5, 0x40 },
{ 0x00aa, 0x77 },
{ 0x00ad, 0x77 },
{ 0x00a6, 0x67 },
{ 0x0262, 0x20 },
{ 0x021c, 0x30 },
{ 0x00b8, 0x3e },
{ 0x00b9, 0xf0 },
{ 0x00ba, 0x01 },
{ 0x00bb, 0x18 },
{ 0x00bc, 0x50 },
{ 0x00bd, 0x00 },
{ 0x00be, 0xea },
{ 0x00bf, 0xef },
{ 0x00c0, 0xfc },
{ 0x00c1, 0xbd },
{ 0x00c2, 0x1f },
{ 0x00c3, 0xfc },
{ 0x00c4, 0xdd },
{ 0x00c5, 0xaf },
{ 0x00c6, 0x00 },
{ 0x00c7, 0x38 },
{ 0x00c8, 0x30 },
{ 0x00c9, 0x05 },
{ 0x00ca, 0x4a },
{ 0x00cb, 0xd0 },
{ 0x00cc, 0x01 },
{ 0x00cd, 0xd9 },
{ 0x00ce, 0x6f },
{ 0x00cf, 0xf9 },
{ 0x00d0, 0x70 },
{ 0x00d1, 0xdf },
{ 0x00d2, 0xf7 },
{ 0x00d3, 0xc2 },
{ 0x00d4, 0xdf },
{ 0x00d5, 0x02 },
{ 0x00d6, 0x9a },
{ 0x00d7, 0xd0 },
{ 0x0250, 0x0d },
{ 0x0251, 0xcd },
{ 0x0252, 0xe0 },
{ 0x0253, 0x05 },
{ 0x0254, 0xa7 },
{ 0x0255, 0xff },
{ 0x0256, 0xed },
{ 0x0257, 0x5b },
{ 0x0258, 0xae },
{ 0x0259, 0xe6 },
{ 0x025a, 0x3d },
{ 0x025b, 0x0f },
{ 0x025c, 0x0d },
{ 0x025d, 0xea },
{ 0x025e, 0xf2 },
{ 0x025f, 0x51 },
{ 0x0260, 0xf5 },
{ 0x0261, 0x06 },
{ 0x021a, 0x00 },
{ 0x0546, 0x40 },
{ 0x0210, 0xc7 },
{ 0x0211, 0xaa },
{ 0x0212, 0xab },
{ 0x0213, 0x02 },
{ 0x0502, 0x00 },
{ 0x0121, 0x04 },
{ 0x0122, 0x04 },
{ 0x052e, 0x10 },
{ 0x00a4, 0xca },
{ 0x00a7, 0x40 },
{ 0x0526, 0x01 },
};
/* QAM256 Modulation table */
static struct {
u16 reg;
u16 data;
} QAM256_mod_tab[] = {
{ 0x80a3, 0x09 },
{ 0x80a4, 0x00 },
{ 0x8081, 0xc4 },
{ 0x80a5, 0x40 },
{ 0x80aa, 0x77 },
{ 0x80ad, 0x77 },
{ 0x80a6, 0x67 },
{ 0x8262, 0x20 },
{ 0x821c, 0x30 },
{ 0x80b8, 0x3e },
{ 0x80b9, 0xf0 },
{ 0x80ba, 0x01 },
{ 0x80bb, 0x18 },
{ 0x80bc, 0x50 },
{ 0x80bd, 0x00 },
{ 0x80be, 0xea },
{ 0x80bf, 0xef },
{ 0x80c0, 0xfc },
{ 0x80c1, 0xbd },
{ 0x80c2, 0x1f },
{ 0x80c3, 0xfc },
{ 0x80c4, 0xdd },
{ 0x80c5, 0xaf },
{ 0x80c6, 0x00 },
{ 0x80c7, 0x38 },
{ 0x80c8, 0x30 },
{ 0x80c9, 0x05 },
{ 0x80ca, 0x4a },
{ 0x80cb, 0xd0 },
{ 0x80cc, 0x01 },
{ 0x80cd, 0xd9 },
{ 0x80ce, 0x6f },
{ 0x80cf, 0xf9 },
{ 0x80d0, 0x70 },
{ 0x80d1, 0xdf },
{ 0x80d2, 0xf7 },
{ 0x80d3, 0xc2 },
{ 0x80d4, 0xdf },
{ 0x80d5, 0x02 },
{ 0x80d6, 0x9a },
{ 0x80d7, 0xd0 },
{ 0x8250, 0x0d },
{ 0x8251, 0xcd },
{ 0x8252, 0xe0 },
{ 0x8253, 0x05 },
{ 0x8254, 0xa7 },
{ 0x8255, 0xff },
{ 0x8256, 0xed },
{ 0x8257, 0x5b },
{ 0x8258, 0xae },
{ 0x8259, 0xe6 },
{ 0x825a, 0x3d },
{ 0x825b, 0x0f },
{ 0x825c, 0x0d },
{ 0x825d, 0xea },
{ 0x825e, 0xf2 },
{ 0x825f, 0x51 },
{ 0x8260, 0xf5 },
{ 0x8261, 0x06 },
{ 0x821a, 0x00 },
{ 0x8546, 0x40 },
{ 0x8210, 0x26 },
{ 0x8211, 0xf6 },
{ 0x8212, 0x84 },
{ 0x8213, 0x02 },
{ 0x8502, 0x01 },
{ 0x8121, 0x04 },
{ 0x8122, 0x04 },
{ 0x852e, 0x10 },
{ 0x80a4, 0xca },
{ 0x80a7, 0x40 },
{ 0x8526, 0x01 },
};
static struct {
u16 reg;
u16 data;
} QAM256_mod_tab_zv_mode[] = {
{ 0x80a3, 0x09 },
{ 0x80a4, 0x00 },
{ 0x8081, 0xc4 },
{ 0x80a5, 0x40 },
{ 0x80b5, 0xfb },
{ 0x80b6, 0x8e },
{ 0x80b7, 0x39 },
{ 0x80aa, 0x77 },
{ 0x80ad, 0x77 },
{ 0x80a6, 0x67 },
{ 0x8262, 0x20 },
{ 0x821c, 0x30 },
{ 0x80b8, 0x3e },
{ 0x80b9, 0xf0 },
{ 0x80ba, 0x01 },
{ 0x80bb, 0x18 },
{ 0x80bc, 0x50 },
{ 0x80bd, 0x00 },
{ 0x80be, 0xea },
{ 0x80bf, 0xef },
{ 0x80c0, 0xfc },
{ 0x80c1, 0xbd },
{ 0x80c2, 0x1f },
{ 0x80c3, 0xfc },
{ 0x80c4, 0xdd },
{ 0x80c5, 0xaf },
{ 0x80c6, 0x00 },
{ 0x80c7, 0x38 },
{ 0x80c8, 0x30 },
{ 0x80c9, 0x05 },
{ 0x80ca, 0x4a },
{ 0x80cb, 0xd0 },
{ 0x80cc, 0x01 },
{ 0x80cd, 0xd9 },
{ 0x80ce, 0x6f },
{ 0x80cf, 0xf9 },
{ 0x80d0, 0x70 },
{ 0x80d1, 0xdf },
{ 0x80d2, 0xf7 },
{ 0x80d3, 0xc2 },
{ 0x80d4, 0xdf },
{ 0x80d5, 0x02 },
{ 0x80d6, 0x9a },
{ 0x80d7, 0xd0 },
{ 0x8250, 0x0d },
{ 0x8251, 0xcd },
{ 0x8252, 0xe0 },
{ 0x8253, 0x05 },
{ 0x8254, 0xa7 },
{ 0x8255, 0xff },
{ 0x8256, 0xed },
{ 0x8257, 0x5b },
{ 0x8258, 0xae },
{ 0x8259, 0xe6 },
{ 0x825a, 0x3d },
{ 0x825b, 0x0f },
{ 0x825c, 0x0d },
{ 0x825d, 0xea },
{ 0x825e, 0xf2 },
{ 0x825f, 0x51 },
{ 0x8260, 0xf5 },
{ 0x8261, 0x06 },
{ 0x821a, 0x01 },
{ 0x8546, 0x40 },
{ 0x8210, 0x26 },
{ 0x8211, 0xf6 },
{ 0x8212, 0x84 },
{ 0x8213, 0x02 },
{ 0x8502, 0x01 },
{ 0x8121, 0x04 },
{ 0x8122, 0x04 },
{ 0x852e, 0x10 },
{ 0x80a4, 0xca },
{ 0x80a7, 0x40 },
{ 0x8526, 0x01 },
};
static int au8522_enable_modulation(struct dvb_frontend *fe,
fe_modulation_t m)
{
struct au8522_state *state = fe->demodulator_priv;
int i;
dprintk("%s(0x%08x)\n", __func__, m);
switch (m) {
case VSB_8:
dprintk("%s() VSB_8\n", __func__);
for (i = 0; i < ARRAY_SIZE(VSB_mod_tab); i++)
au8522_writereg(state,
VSB_mod_tab[i].reg,
VSB_mod_tab[i].data);
au8522_set_if(fe, state->config->vsb_if);
break;
case QAM_64:
dprintk("%s() QAM 64\n", __func__);
for (i = 0; i < ARRAY_SIZE(QAM64_mod_tab); i++)
au8522_writereg(state,
QAM64_mod_tab[i].reg,
QAM64_mod_tab[i].data);
au8522_set_if(fe, state->config->qam_if);
break;
case QAM_256:
if (zv_mode) {
dprintk("%s() QAM 256 (zv_mode)\n", __func__);
for (i = 0; i < ARRAY_SIZE(QAM256_mod_tab_zv_mode); i++)
au8522_writereg(state,
QAM256_mod_tab_zv_mode[i].reg,
QAM256_mod_tab_zv_mode[i].data);
au8522_set_if(fe, state->config->qam_if);
msleep(100);
au8522_writereg(state, 0x821a, 0x00);
} else {
dprintk("%s() QAM 256\n", __func__);
for (i = 0; i < ARRAY_SIZE(QAM256_mod_tab); i++)
au8522_writereg(state,
QAM256_mod_tab[i].reg,
QAM256_mod_tab[i].data);
au8522_set_if(fe, state->config->qam_if);
}
break;
default:
dprintk("%s() Invalid modulation\n", __func__);
return -EINVAL;
}
state->current_modulation = m;
return 0;
}
/* Talk to the demod, set the FEC, GUARD, QAM settings etc */
static int au8522_set_frontend(struct dvb_frontend *fe)
{
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
struct au8522_state *state = fe->demodulator_priv;
int ret = -EINVAL;
dprintk("%s(frequency=%d)\n", __func__, c->frequency);
if ((state->current_frequency == c->frequency) &&
(state->current_modulation == c->modulation))
return 0;
if (fe->ops.tuner_ops.set_params) {
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
ret = fe->ops.tuner_ops.set_params(fe);
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 0);
}
if (ret < 0)
return ret;
/* Allow the tuner to settle */
if (zv_mode) {
dprintk("%s() increase tuner settling time for zv_mode\n",
__func__);
msleep(250);
} else
msleep(100);
au8522_enable_modulation(fe, c->modulation);
state->current_frequency = c->frequency;
return 0;
}
static int au8522_read_status(struct dvb_frontend *fe, fe_status_t *status)
{
struct au8522_state *state = fe->demodulator_priv;
u8 reg;
u32 tuner_status = 0;
*status = 0;
if (state->current_modulation == VSB_8) {
dprintk("%s() Checking VSB_8\n", __func__);
reg = au8522_readreg(state, 0x4088);
if ((reg & 0x03) == 0x03)
*status |= FE_HAS_LOCK | FE_HAS_SYNC | FE_HAS_VITERBI;
} else {
dprintk("%s() Checking QAM\n", __func__);
reg = au8522_readreg(state, 0x4541);
if (reg & 0x80)
*status |= FE_HAS_VITERBI;
if (reg & 0x20)
*status |= FE_HAS_LOCK | FE_HAS_SYNC;
}
switch (state->config->status_mode) {
case AU8522_DEMODLOCKING:
dprintk("%s() DEMODLOCKING\n", __func__);
if (*status & FE_HAS_VITERBI)
*status |= FE_HAS_CARRIER | FE_HAS_SIGNAL;
break;
case AU8522_TUNERLOCKING:
/* Get the tuner status */
dprintk("%s() TUNERLOCKING\n", __func__);
if (fe->ops.tuner_ops.get_status) {
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
fe->ops.tuner_ops.get_status(fe, &tuner_status);
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 0);
}
if (tuner_status)
*status |= FE_HAS_CARRIER | FE_HAS_SIGNAL;
break;
}
state->fe_status = *status;
if (*status & FE_HAS_LOCK)
/* turn on LED, if it isn't on already */
au8522_led_ctrl(state, -1);
else
/* turn off LED */
au8522_led_ctrl(state, 0);
dprintk("%s() status 0x%08x\n", __func__, *status);
return 0;
}
static int au8522_led_status(struct au8522_state *state, const u16 *snr)
{
struct au8522_led_config *led_config = state->config->led_cfg;
int led;
u16 strong;
/* bail out if we can't control an LED */
if (!led_config)
return 0;
if (0 == (state->fe_status & FE_HAS_LOCK))
return au8522_led_ctrl(state, 0);
else if (state->current_modulation == QAM_256)
strong = led_config->qam256_strong;
else if (state->current_modulation == QAM_64)
strong = led_config->qam64_strong;
else /* (state->current_modulation == VSB_8) */
strong = led_config->vsb8_strong;
if (*snr >= strong)
led = 2;
else
led = 1;
if ((state->led_state) &&
(((strong < *snr) ? (*snr - strong) : (strong - *snr)) <= 10))
/* snr didn't change enough to bother
* changing the color of the led */
return 0;
return au8522_led_ctrl(state, led);
}
static int au8522_read_snr(struct dvb_frontend *fe, u16 *snr)
{
struct au8522_state *state = fe->demodulator_priv;
int ret = -EINVAL;
dprintk("%s()\n", __func__);
if (state->current_modulation == QAM_256)
ret = au8522_mse2snr_lookup(qam256_mse2snr_tab,
ARRAY_SIZE(qam256_mse2snr_tab),
au8522_readreg(state, 0x4522),
snr);
else if (state->current_modulation == QAM_64)
ret = au8522_mse2snr_lookup(qam64_mse2snr_tab,
ARRAY_SIZE(qam64_mse2snr_tab),
au8522_readreg(state, 0x4522),
snr);
else /* VSB_8 */
ret = au8522_mse2snr_lookup(vsb_mse2snr_tab,
ARRAY_SIZE(vsb_mse2snr_tab),
au8522_readreg(state, 0x4311),
snr);
if (state->config->led_cfg)
au8522_led_status(state, snr);
return ret;
}
static int au8522_read_signal_strength(struct dvb_frontend *fe,
u16 *signal_strength)
{
/* borrowed from lgdt330x.c
*
* Calculate strength from SNR up to 35dB
* Even though the SNR can go higher than 35dB,
* there is some comfort factor in having a range of
* strong signals that can show at 100%
*/
u16 snr;
u32 tmp;
int ret = au8522_read_snr(fe, &snr);
*signal_strength = 0;
if (0 == ret) {
/* The following calculation method was chosen
* purely for the sake of code re-use from the
* other demod drivers that use this method */
/* Convert from SNR in dB * 10 to 8.24 fixed-point */
tmp = (snr * ((1 << 24) / 10));
/* Convert from 8.24 fixed-point to
* scale the range 0 - 35*2^24 into 0 - 65535*/
if (tmp >= 8960 * 0x10000)
*signal_strength = 0xffff;
else
*signal_strength = tmp / 8960;
}
return ret;
}
static int au8522_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
{
struct au8522_state *state = fe->demodulator_priv;
if (state->current_modulation == VSB_8)
*ucblocks = au8522_readreg(state, 0x4087);
else
*ucblocks = au8522_readreg(state, 0x4543);
return 0;
}
static int au8522_read_ber(struct dvb_frontend *fe, u32 *ber)
{
return au8522_read_ucblocks(fe, ber);
}
static int au8522_get_frontend(struct dvb_frontend *fe)
{
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
struct au8522_state *state = fe->demodulator_priv;
c->frequency = state->current_frequency;
c->modulation = state->current_modulation;
return 0;
}
static int au8522_get_tune_settings(struct dvb_frontend *fe,
struct dvb_frontend_tune_settings *tune)
{
tune->min_delay_ms = 1000;
return 0;
}
static struct dvb_frontend_ops au8522_ops;
static void au8522_release(struct dvb_frontend *fe)
{
struct au8522_state *state = fe->demodulator_priv;
au8522_release_state(state);
}
struct dvb_frontend *au8522_attach(const struct au8522_config *config,
struct i2c_adapter *i2c)
{
struct au8522_state *state = NULL;
int instance;
/* allocate memory for the internal state */
instance = au8522_get_state(&state, i2c, config->demod_address);
switch (instance) {
case 0:
dprintk("%s state allocation failed\n", __func__);
break;
case 1:
/* new demod instance */
dprintk("%s using new instance\n", __func__);
break;
default:
/* existing demod instance */
dprintk("%s using existing instance\n", __func__);
break;
}
/* setup the state */
state->config = config;
state->i2c = i2c;
state->operational_mode = AU8522_DIGITAL_MODE;
/* create dvb_frontend */
memcpy(&state->frontend.ops, &au8522_ops,
sizeof(struct dvb_frontend_ops));
state->frontend.demodulator_priv = state;
state->frontend.ops.analog_ops.i2c_gate_ctrl = au8522_analog_i2c_gate_ctrl;
if (au8522_init(&state->frontend) != 0) {
printk(KERN_ERR "%s: Failed to initialize correctly\n",
__func__);
goto error;
}
/* Note: Leaving the I2C gate open here. */
au8522_i2c_gate_ctrl(&state->frontend, 1);
return &state->frontend;
error:
au8522_release_state(state);
return NULL;
}
EXPORT_SYMBOL(au8522_attach);
static struct dvb_frontend_ops au8522_ops = {
.delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
.info = {
.name = "Auvitek AU8522 QAM/8VSB Frontend",
.frequency_min = 54000000,
.frequency_max = 858000000,
.frequency_stepsize = 62500,
.caps = FE_CAN_QAM_64 | FE_CAN_QAM_256 | FE_CAN_8VSB
},
.init = au8522_init,
.sleep = au8522_sleep,
.i2c_gate_ctrl = au8522_i2c_gate_ctrl,
.set_frontend = au8522_set_frontend,
.get_frontend = au8522_get_frontend,
.get_tune_settings = au8522_get_tune_settings,
.read_status = au8522_read_status,
.read_ber = au8522_read_ber,
.read_signal_strength = au8522_read_signal_strength,
.read_snr = au8522_read_snr,
.read_ucblocks = au8522_read_ucblocks,
.release = au8522_release,
};
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "Enable verbose debug messages");
module_param(zv_mode, int, 0644);
MODULE_PARM_DESC(zv_mode, "Turn on/off ZeeVee modulator compatability mode (default:on).\n"
"\t\ton - modified AU8522 QAM256 initialization.\n"
"\t\tProvides faster lock when using ZeeVee modulator based sources");
MODULE_DESCRIPTION("Auvitek AU8522 QAM-B/ATSC Demodulator driver");
MODULE_AUTHOR("Steven Toth");
MODULE_LICENSE("GPL");
| gpl-2.0 |
loveboylion/android_kernel_yu_msm8916 | drivers/gpu/drm/radeon/atombios_dp.c | 706 | 26500 | /*
* Copyright 2007-8 Advanced Micro Devices, Inc.
* Copyright 2008 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Dave Airlie
* Alex Deucher
* Jerome Glisse
*/
#include <drm/drmP.h>
#include <drm/radeon_drm.h>
#include "radeon.h"
#include "atom.h"
#include "atom-bits.h"
#include <drm/drm_dp_helper.h>
/* move these to drm_dp_helper.c/h */
#define DP_LINK_CONFIGURATION_SIZE 9
#define DP_DPCD_SIZE DP_RECEIVER_CAP_SIZE
static char *voltage_names[] = {
"0.4V", "0.6V", "0.8V", "1.2V"
};
static char *pre_emph_names[] = {
"0dB", "3.5dB", "6dB", "9.5dB"
};
/***** radeon AUX functions *****/
/* Atom needs data in little endian format
* so swap as appropriate when copying data to
* or from atom. Note that atom operates on
* dw units.
*/
void radeon_atom_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le)
{
#ifdef __BIG_ENDIAN
u8 src_tmp[20], dst_tmp[20]; /* used for byteswapping */
u32 *dst32, *src32;
int i;
memcpy(src_tmp, src, num_bytes);
src32 = (u32 *)src_tmp;
dst32 = (u32 *)dst_tmp;
if (to_le) {
for (i = 0; i < ((num_bytes + 3) / 4); i++)
dst32[i] = cpu_to_le32(src32[i]);
memcpy(dst, dst_tmp, num_bytes);
} else {
u8 dws = num_bytes & ~3;
for (i = 0; i < ((num_bytes + 3) / 4); i++)
dst32[i] = le32_to_cpu(src32[i]);
memcpy(dst, dst_tmp, dws);
if (num_bytes % 4) {
for (i = 0; i < (num_bytes % 4); i++)
dst[dws+i] = dst_tmp[dws+i];
}
}
#else
memcpy(dst, src, num_bytes);
#endif
}
union aux_channel_transaction {
PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION v1;
PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2 v2;
};
static int radeon_process_aux_ch(struct radeon_i2c_chan *chan,
u8 *send, int send_bytes,
u8 *recv, int recv_size,
u8 delay, u8 *ack)
{
struct drm_device *dev = chan->dev;
struct radeon_device *rdev = dev->dev_private;
union aux_channel_transaction args;
int index = GetIndexIntoMasterTable(COMMAND, ProcessAuxChannelTransaction);
unsigned char *base;
int recv_bytes;
memset(&args, 0, sizeof(args));
base = (unsigned char *)(rdev->mode_info.atom_context->scratch + 1);
radeon_atom_copy_swap(base, send, send_bytes, true);
args.v1.lpAuxRequest = cpu_to_le16((u16)(0 + 4));
args.v1.lpDataOut = cpu_to_le16((u16)(16 + 4));
args.v1.ucDataOutLen = 0;
args.v1.ucChannelID = chan->rec.i2c_id;
args.v1.ucDelay = delay / 10;
if (ASIC_IS_DCE4(rdev))
args.v2.ucHPD_ID = chan->rec.hpd;
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
*ack = args.v1.ucReplyStatus;
/* timeout */
if (args.v1.ucReplyStatus == 1) {
DRM_DEBUG_KMS("dp_aux_ch timeout\n");
return -ETIMEDOUT;
}
/* flags not zero */
if (args.v1.ucReplyStatus == 2) {
DRM_DEBUG_KMS("dp_aux_ch flags not zero\n");
return -EBUSY;
}
/* error */
if (args.v1.ucReplyStatus == 3) {
DRM_DEBUG_KMS("dp_aux_ch error\n");
return -EIO;
}
recv_bytes = args.v1.ucDataOutLen;
if (recv_bytes > recv_size)
recv_bytes = recv_size;
if (recv && recv_size)
radeon_atom_copy_swap(recv, base + 16, recv_bytes, false);
return recv_bytes;
}
static int radeon_dp_aux_native_write(struct radeon_connector *radeon_connector,
u16 address, u8 *send, u8 send_bytes, u8 delay)
{
struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
int ret;
u8 msg[20];
int msg_bytes = send_bytes + 4;
u8 ack;
unsigned retry;
if (send_bytes > 16)
return -1;
msg[0] = address;
msg[1] = address >> 8;
msg[2] = AUX_NATIVE_WRITE << 4;
msg[3] = (msg_bytes << 4) | (send_bytes - 1);
memcpy(&msg[4], send, send_bytes);
for (retry = 0; retry < 4; retry++) {
ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus,
msg, msg_bytes, NULL, 0, delay, &ack);
if (ret == -EBUSY)
continue;
else if (ret < 0)
return ret;
if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
return send_bytes;
else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
udelay(400);
else
return -EIO;
}
return -EIO;
}
static int radeon_dp_aux_native_read(struct radeon_connector *radeon_connector,
u16 address, u8 *recv, int recv_bytes, u8 delay)
{
struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
u8 msg[4];
int msg_bytes = 4;
u8 ack;
int ret;
unsigned retry;
msg[0] = address;
msg[1] = address >> 8;
msg[2] = AUX_NATIVE_READ << 4;
msg[3] = (msg_bytes << 4) | (recv_bytes - 1);
for (retry = 0; retry < 4; retry++) {
ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus,
msg, msg_bytes, recv, recv_bytes, delay, &ack);
if (ret == -EBUSY)
continue;
else if (ret < 0)
return ret;
if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
return ret;
else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
udelay(400);
else if (ret == 0)
return -EPROTO;
else
return -EIO;
}
return -EIO;
}
static void radeon_write_dpcd_reg(struct radeon_connector *radeon_connector,
u16 reg, u8 val)
{
radeon_dp_aux_native_write(radeon_connector, reg, &val, 1, 0);
}
static u8 radeon_read_dpcd_reg(struct radeon_connector *radeon_connector,
u16 reg)
{
u8 val = 0;
radeon_dp_aux_native_read(radeon_connector, reg, &val, 1, 0);
return val;
}
int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
u8 write_byte, u8 *read_byte)
{
struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
struct radeon_i2c_chan *auxch = (struct radeon_i2c_chan *)adapter;
u16 address = algo_data->address;
u8 msg[5];
u8 reply[2];
unsigned retry;
int msg_bytes;
int reply_bytes = 1;
int ret;
u8 ack;
/* Set up the command byte */
if (mode & MODE_I2C_READ)
msg[2] = AUX_I2C_READ << 4;
else
msg[2] = AUX_I2C_WRITE << 4;
if (!(mode & MODE_I2C_STOP))
msg[2] |= AUX_I2C_MOT << 4;
msg[0] = address;
msg[1] = address >> 8;
switch (mode) {
case MODE_I2C_WRITE:
msg_bytes = 5;
msg[3] = msg_bytes << 4;
msg[4] = write_byte;
break;
case MODE_I2C_READ:
msg_bytes = 4;
msg[3] = msg_bytes << 4;
break;
default:
msg_bytes = 4;
msg[3] = 3 << 4;
break;
}
for (retry = 0; retry < 4; retry++) {
ret = radeon_process_aux_ch(auxch,
msg, msg_bytes, reply, reply_bytes, 0, &ack);
if (ret == -EBUSY)
continue;
else if (ret < 0) {
DRM_DEBUG_KMS("aux_ch failed %d\n", ret);
return ret;
}
switch (ack & AUX_NATIVE_REPLY_MASK) {
case AUX_NATIVE_REPLY_ACK:
/* I2C-over-AUX Reply field is only valid
* when paired with AUX ACK.
*/
break;
case AUX_NATIVE_REPLY_NACK:
DRM_DEBUG_KMS("aux_ch native nack\n");
return -EREMOTEIO;
case AUX_NATIVE_REPLY_DEFER:
DRM_DEBUG_KMS("aux_ch native defer\n");
udelay(400);
continue;
default:
DRM_ERROR("aux_ch invalid native reply 0x%02x\n", ack);
return -EREMOTEIO;
}
switch (ack & AUX_I2C_REPLY_MASK) {
case AUX_I2C_REPLY_ACK:
if (mode == MODE_I2C_READ)
*read_byte = reply[0];
return ret;
case AUX_I2C_REPLY_NACK:
DRM_DEBUG_KMS("aux_i2c nack\n");
return -EREMOTEIO;
case AUX_I2C_REPLY_DEFER:
DRM_DEBUG_KMS("aux_i2c defer\n");
udelay(400);
break;
default:
DRM_ERROR("aux_i2c invalid reply 0x%02x\n", ack);
return -EREMOTEIO;
}
}
DRM_DEBUG_KMS("aux i2c too many retries, giving up\n");
return -EREMOTEIO;
}
/***** general DP utility functions *****/
#define DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_1200
#define DP_PRE_EMPHASIS_MAX DP_TRAIN_PRE_EMPHASIS_9_5
static void dp_get_adjust_train(u8 link_status[DP_LINK_STATUS_SIZE],
int lane_count,
u8 train_set[4])
{
u8 v = 0;
u8 p = 0;
int lane;
for (lane = 0; lane < lane_count; lane++) {
u8 this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
u8 this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
DRM_DEBUG_KMS("requested signal parameters: lane %d voltage %s pre_emph %s\n",
lane,
voltage_names[this_v >> DP_TRAIN_VOLTAGE_SWING_SHIFT],
pre_emph_names[this_p >> DP_TRAIN_PRE_EMPHASIS_SHIFT]);
if (this_v > v)
v = this_v;
if (this_p > p)
p = this_p;
}
if (v >= DP_VOLTAGE_MAX)
v |= DP_TRAIN_MAX_SWING_REACHED;
if (p >= DP_PRE_EMPHASIS_MAX)
p |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
DRM_DEBUG_KMS("using signal parameters: voltage %s pre_emph %s\n",
voltage_names[(v & DP_TRAIN_VOLTAGE_SWING_MASK) >> DP_TRAIN_VOLTAGE_SWING_SHIFT],
pre_emph_names[(p & DP_TRAIN_PRE_EMPHASIS_MASK) >> DP_TRAIN_PRE_EMPHASIS_SHIFT]);
for (lane = 0; lane < 4; lane++)
train_set[lane] = v | p;
}
/* convert bits per color to bits per pixel */
/* get bpc from the EDID */
static int convert_bpc_to_bpp(int bpc)
{
if (bpc == 0)
return 24;
else
return bpc * 3;
}
/* get the max pix clock supported by the link rate and lane num */
static int dp_get_max_dp_pix_clock(int link_rate,
int lane_num,
int bpp)
{
return (link_rate * lane_num * 8) / bpp;
}
/***** radeon specific DP functions *****/
static int radeon_dp_get_max_link_rate(struct drm_connector *connector,
u8 dpcd[DP_DPCD_SIZE])
{
int max_link_rate;
if (radeon_connector_is_dp12_capable(connector))
max_link_rate = min(drm_dp_max_link_rate(dpcd), 540000);
else
max_link_rate = min(drm_dp_max_link_rate(dpcd), 270000);
return max_link_rate;
}
/* First get the min lane# when low rate is used according to pixel clock
* (prefer low rate), second check max lane# supported by DP panel,
* if the max lane# < low rate lane# then use max lane# instead.
*/
static int radeon_dp_get_dp_lane_number(struct drm_connector *connector,
u8 dpcd[DP_DPCD_SIZE],
int pix_clock)
{
int bpp = convert_bpc_to_bpp(radeon_get_monitor_bpc(connector));
int max_link_rate = radeon_dp_get_max_link_rate(connector, dpcd);
int max_lane_num = drm_dp_max_lane_count(dpcd);
int lane_num;
int max_dp_pix_clock;
for (lane_num = 1; lane_num < max_lane_num; lane_num <<= 1) {
max_dp_pix_clock = dp_get_max_dp_pix_clock(max_link_rate, lane_num, bpp);
if (pix_clock <= max_dp_pix_clock)
break;
}
return lane_num;
}
static int radeon_dp_get_dp_link_clock(struct drm_connector *connector,
u8 dpcd[DP_DPCD_SIZE],
int pix_clock)
{
int bpp = convert_bpc_to_bpp(radeon_get_monitor_bpc(connector));
int lane_num, max_pix_clock;
if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) ==
ENCODER_OBJECT_ID_NUTMEG)
return 270000;
lane_num = radeon_dp_get_dp_lane_number(connector, dpcd, pix_clock);
max_pix_clock = dp_get_max_dp_pix_clock(162000, lane_num, bpp);
if (pix_clock <= max_pix_clock)
return 162000;
max_pix_clock = dp_get_max_dp_pix_clock(270000, lane_num, bpp);
if (pix_clock <= max_pix_clock)
return 270000;
if (radeon_connector_is_dp12_capable(connector)) {
max_pix_clock = dp_get_max_dp_pix_clock(540000, lane_num, bpp);
if (pix_clock <= max_pix_clock)
return 540000;
}
return radeon_dp_get_max_link_rate(connector, dpcd);
}
static u8 radeon_dp_encoder_service(struct radeon_device *rdev,
int action, int dp_clock,
u8 ucconfig, u8 lane_num)
{
DP_ENCODER_SERVICE_PARAMETERS args;
int index = GetIndexIntoMasterTable(COMMAND, DPEncoderService);
memset(&args, 0, sizeof(args));
args.ucLinkClock = dp_clock / 10;
args.ucConfig = ucconfig;
args.ucAction = action;
args.ucLaneNum = lane_num;
args.ucStatus = 0;
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
return args.ucStatus;
}
u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector)
{
struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
struct drm_device *dev = radeon_connector->base.dev;
struct radeon_device *rdev = dev->dev_private;
return radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_GET_SINK_TYPE, 0,
dig_connector->dp_i2c_bus->rec.i2c_id, 0);
}
static void radeon_dp_probe_oui(struct radeon_connector *radeon_connector)
{
struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
u8 buf[3];
if (!(dig_connector->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
return;
if (radeon_dp_aux_native_read(radeon_connector, DP_SINK_OUI, buf, 3, 0))
DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
buf[0], buf[1], buf[2]);
if (radeon_dp_aux_native_read(radeon_connector, DP_BRANCH_OUI, buf, 3, 0))
DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
buf[0], buf[1], buf[2]);
}
bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector)
{
struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
u8 msg[DP_DPCD_SIZE];
int ret, i;
ret = radeon_dp_aux_native_read(radeon_connector, DP_DPCD_REV, msg,
DP_DPCD_SIZE, 0);
if (ret > 0) {
memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE);
DRM_DEBUG_KMS("DPCD: ");
for (i = 0; i < DP_DPCD_SIZE; i++)
DRM_DEBUG_KMS("%02x ", msg[i]);
DRM_DEBUG_KMS("\n");
radeon_dp_probe_oui(radeon_connector);
return true;
}
dig_connector->dpcd[0] = 0;
return false;
}
int radeon_dp_get_panel_mode(struct drm_encoder *encoder,
struct drm_connector *connector)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
int panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE;
u16 dp_bridge = radeon_connector_encoder_get_dp_bridge_encoder_id(connector);
u8 tmp;
if (!ASIC_IS_DCE4(rdev))
return panel_mode;
if (dp_bridge != ENCODER_OBJECT_ID_NONE) {
/* DP bridge chips */
tmp = radeon_read_dpcd_reg(radeon_connector, DP_EDP_CONFIGURATION_CAP);
if (tmp & 1)
panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE;
else if ((dp_bridge == ENCODER_OBJECT_ID_NUTMEG) ||
(dp_bridge == ENCODER_OBJECT_ID_TRAVIS))
panel_mode = DP_PANEL_MODE_INTERNAL_DP1_MODE;
else
panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE;
} else if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
/* eDP */
tmp = radeon_read_dpcd_reg(radeon_connector, DP_EDP_CONFIGURATION_CAP);
if (tmp & 1)
panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE;
}
return panel_mode;
}
void radeon_dp_set_link_config(struct drm_connector *connector,
const struct drm_display_mode *mode)
{
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct radeon_connector_atom_dig *dig_connector;
if (!radeon_connector->con_priv)
return;
dig_connector = radeon_connector->con_priv;
if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
(dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) {
dig_connector->dp_clock =
radeon_dp_get_dp_link_clock(connector, dig_connector->dpcd, mode->clock);
dig_connector->dp_lane_count =
radeon_dp_get_dp_lane_number(connector, dig_connector->dpcd, mode->clock);
}
}
int radeon_dp_mode_valid_helper(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct radeon_connector_atom_dig *dig_connector;
int dp_clock;
if (!radeon_connector->con_priv)
return MODE_CLOCK_HIGH;
dig_connector = radeon_connector->con_priv;
dp_clock =
radeon_dp_get_dp_link_clock(connector, dig_connector->dpcd, mode->clock);
if ((dp_clock == 540000) &&
(!radeon_connector_is_dp12_capable(connector)))
return MODE_CLOCK_HIGH;
return MODE_OK;
}
static bool radeon_dp_get_link_status(struct radeon_connector *radeon_connector,
u8 link_status[DP_LINK_STATUS_SIZE])
{
int ret;
ret = radeon_dp_aux_native_read(radeon_connector, DP_LANE0_1_STATUS,
link_status, DP_LINK_STATUS_SIZE, 100);
if (ret <= 0) {
return false;
}
DRM_DEBUG_KMS("link status %*ph\n", 6, link_status);
return true;
}
bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector)
{
u8 link_status[DP_LINK_STATUS_SIZE];
struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
if (!radeon_dp_get_link_status(radeon_connector, link_status))
return false;
if (drm_dp_channel_eq_ok(link_status, dig->dp_lane_count))
return false;
return true;
}
struct radeon_dp_link_train_info {
struct radeon_device *rdev;
struct drm_encoder *encoder;
struct drm_connector *connector;
struct radeon_connector *radeon_connector;
int enc_id;
int dp_clock;
int dp_lane_count;
bool tp3_supported;
u8 dpcd[DP_RECEIVER_CAP_SIZE];
u8 train_set[4];
u8 link_status[DP_LINK_STATUS_SIZE];
u8 tries;
bool use_dpencoder;
};
static void radeon_dp_update_vs_emph(struct radeon_dp_link_train_info *dp_info)
{
/* set the initial vs/emph on the source */
atombios_dig_transmitter_setup(dp_info->encoder,
ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH,
0, dp_info->train_set[0]); /* sets all lanes at once */
/* set the vs/emph on the sink */
radeon_dp_aux_native_write(dp_info->radeon_connector, DP_TRAINING_LANE0_SET,
dp_info->train_set, dp_info->dp_lane_count, 0);
}
static void radeon_dp_set_tp(struct radeon_dp_link_train_info *dp_info, int tp)
{
int rtp = 0;
/* set training pattern on the source */
if (ASIC_IS_DCE4(dp_info->rdev) || !dp_info->use_dpencoder) {
switch (tp) {
case DP_TRAINING_PATTERN_1:
rtp = ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN1;
break;
case DP_TRAINING_PATTERN_2:
rtp = ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN2;
break;
case DP_TRAINING_PATTERN_3:
rtp = ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN3;
break;
}
atombios_dig_encoder_setup(dp_info->encoder, rtp, 0);
} else {
switch (tp) {
case DP_TRAINING_PATTERN_1:
rtp = 0;
break;
case DP_TRAINING_PATTERN_2:
rtp = 1;
break;
}
radeon_dp_encoder_service(dp_info->rdev, ATOM_DP_ACTION_TRAINING_PATTERN_SEL,
dp_info->dp_clock, dp_info->enc_id, rtp);
}
/* enable training pattern on the sink */
radeon_write_dpcd_reg(dp_info->radeon_connector, DP_TRAINING_PATTERN_SET, tp);
}
static int radeon_dp_link_train_init(struct radeon_dp_link_train_info *dp_info)
{
struct radeon_encoder *radeon_encoder = to_radeon_encoder(dp_info->encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
u8 tmp;
/* power up the sink */
if (dp_info->dpcd[0] >= 0x11)
radeon_write_dpcd_reg(dp_info->radeon_connector,
DP_SET_POWER, DP_SET_POWER_D0);
/* possibly enable downspread on the sink */
if (dp_info->dpcd[3] & 0x1)
radeon_write_dpcd_reg(dp_info->radeon_connector,
DP_DOWNSPREAD_CTRL, DP_SPREAD_AMP_0_5);
else
radeon_write_dpcd_reg(dp_info->radeon_connector,
DP_DOWNSPREAD_CTRL, 0);
if ((dp_info->connector->connector_type == DRM_MODE_CONNECTOR_eDP) &&
(dig->panel_mode == DP_PANEL_MODE_INTERNAL_DP2_MODE)) {
radeon_write_dpcd_reg(dp_info->radeon_connector, DP_EDP_CONFIGURATION_SET, 1);
}
/* set the lane count on the sink */
tmp = dp_info->dp_lane_count;
if (dp_info->dpcd[DP_DPCD_REV] >= 0x11 &&
dp_info->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)
tmp |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
radeon_write_dpcd_reg(dp_info->radeon_connector, DP_LANE_COUNT_SET, tmp);
/* set the link rate on the sink */
tmp = drm_dp_link_rate_to_bw_code(dp_info->dp_clock);
radeon_write_dpcd_reg(dp_info->radeon_connector, DP_LINK_BW_SET, tmp);
/* start training on the source */
if (ASIC_IS_DCE4(dp_info->rdev) || !dp_info->use_dpencoder)
atombios_dig_encoder_setup(dp_info->encoder,
ATOM_ENCODER_CMD_DP_LINK_TRAINING_START, 0);
else
radeon_dp_encoder_service(dp_info->rdev, ATOM_DP_ACTION_TRAINING_START,
dp_info->dp_clock, dp_info->enc_id, 0);
/* disable the training pattern on the sink */
radeon_write_dpcd_reg(dp_info->radeon_connector,
DP_TRAINING_PATTERN_SET,
DP_TRAINING_PATTERN_DISABLE);
return 0;
}
static int radeon_dp_link_train_finish(struct radeon_dp_link_train_info *dp_info)
{
udelay(400);
/* disable the training pattern on the sink */
radeon_write_dpcd_reg(dp_info->radeon_connector,
DP_TRAINING_PATTERN_SET,
DP_TRAINING_PATTERN_DISABLE);
/* disable the training pattern on the source */
if (ASIC_IS_DCE4(dp_info->rdev) || !dp_info->use_dpencoder)
atombios_dig_encoder_setup(dp_info->encoder,
ATOM_ENCODER_CMD_DP_LINK_TRAINING_COMPLETE, 0);
else
radeon_dp_encoder_service(dp_info->rdev, ATOM_DP_ACTION_TRAINING_COMPLETE,
dp_info->dp_clock, dp_info->enc_id, 0);
return 0;
}
static int radeon_dp_link_train_cr(struct radeon_dp_link_train_info *dp_info)
{
bool clock_recovery;
u8 voltage;
int i;
radeon_dp_set_tp(dp_info, DP_TRAINING_PATTERN_1);
memset(dp_info->train_set, 0, 4);
radeon_dp_update_vs_emph(dp_info);
udelay(400);
/* clock recovery loop */
clock_recovery = false;
dp_info->tries = 0;
voltage = 0xff;
while (1) {
drm_dp_link_train_clock_recovery_delay(dp_info->dpcd);
if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status)) {
DRM_ERROR("displayport link status failed\n");
break;
}
if (drm_dp_clock_recovery_ok(dp_info->link_status, dp_info->dp_lane_count)) {
clock_recovery = true;
break;
}
for (i = 0; i < dp_info->dp_lane_count; i++) {
if ((dp_info->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
break;
}
if (i == dp_info->dp_lane_count) {
DRM_ERROR("clock recovery reached max voltage\n");
break;
}
if ((dp_info->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
++dp_info->tries;
if (dp_info->tries == 5) {
DRM_ERROR("clock recovery tried 5 times\n");
break;
}
} else
dp_info->tries = 0;
voltage = dp_info->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
/* Compute new train_set as requested by sink */
dp_get_adjust_train(dp_info->link_status, dp_info->dp_lane_count, dp_info->train_set);
radeon_dp_update_vs_emph(dp_info);
}
if (!clock_recovery) {
DRM_ERROR("clock recovery failed\n");
return -1;
} else {
DRM_DEBUG_KMS("clock recovery at voltage %d pre-emphasis %d\n",
dp_info->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK,
(dp_info->train_set[0] & DP_TRAIN_PRE_EMPHASIS_MASK) >>
DP_TRAIN_PRE_EMPHASIS_SHIFT);
return 0;
}
}
static int radeon_dp_link_train_ce(struct radeon_dp_link_train_info *dp_info)
{
bool channel_eq;
if (dp_info->tp3_supported)
radeon_dp_set_tp(dp_info, DP_TRAINING_PATTERN_3);
else
radeon_dp_set_tp(dp_info, DP_TRAINING_PATTERN_2);
/* channel equalization loop */
dp_info->tries = 0;
channel_eq = false;
while (1) {
drm_dp_link_train_channel_eq_delay(dp_info->dpcd);
if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status)) {
DRM_ERROR("displayport link status failed\n");
break;
}
if (drm_dp_channel_eq_ok(dp_info->link_status, dp_info->dp_lane_count)) {
channel_eq = true;
break;
}
/* Try 5 times */
if (dp_info->tries > 5) {
DRM_ERROR("channel eq failed: 5 tries\n");
break;
}
/* Compute new train_set as requested by sink */
dp_get_adjust_train(dp_info->link_status, dp_info->dp_lane_count, dp_info->train_set);
radeon_dp_update_vs_emph(dp_info);
dp_info->tries++;
}
if (!channel_eq) {
DRM_ERROR("channel eq failed\n");
return -1;
} else {
DRM_DEBUG_KMS("channel eq at voltage %d pre-emphasis %d\n",
dp_info->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK,
(dp_info->train_set[0] & DP_TRAIN_PRE_EMPHASIS_MASK)
>> DP_TRAIN_PRE_EMPHASIS_SHIFT);
return 0;
}
}
void radeon_dp_link_train(struct drm_encoder *encoder,
struct drm_connector *connector)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig;
struct radeon_connector *radeon_connector;
struct radeon_connector_atom_dig *dig_connector;
struct radeon_dp_link_train_info dp_info;
int index;
u8 tmp, frev, crev;
if (!radeon_encoder->enc_priv)
return;
dig = radeon_encoder->enc_priv;
radeon_connector = to_radeon_connector(connector);
if (!radeon_connector->con_priv)
return;
dig_connector = radeon_connector->con_priv;
if ((dig_connector->dp_sink_type != CONNECTOR_OBJECT_ID_DISPLAYPORT) &&
(dig_connector->dp_sink_type != CONNECTOR_OBJECT_ID_eDP))
return;
/* DPEncoderService newer than 1.1 can't program properly the
* training pattern. When facing such version use the
* DIGXEncoderControl (X== 1 | 2)
*/
dp_info.use_dpencoder = true;
index = GetIndexIntoMasterTable(COMMAND, DPEncoderService);
if (atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) {
if (crev > 1) {
dp_info.use_dpencoder = false;
}
}
dp_info.enc_id = 0;
if (dig->dig_encoder)
dp_info.enc_id |= ATOM_DP_CONFIG_DIG2_ENCODER;
else
dp_info.enc_id |= ATOM_DP_CONFIG_DIG1_ENCODER;
if (dig->linkb)
dp_info.enc_id |= ATOM_DP_CONFIG_LINK_B;
else
dp_info.enc_id |= ATOM_DP_CONFIG_LINK_A;
tmp = radeon_read_dpcd_reg(radeon_connector, DP_MAX_LANE_COUNT);
if (ASIC_IS_DCE5(rdev) && (tmp & DP_TPS3_SUPPORTED))
dp_info.tp3_supported = true;
else
dp_info.tp3_supported = false;
memcpy(dp_info.dpcd, dig_connector->dpcd, DP_RECEIVER_CAP_SIZE);
dp_info.rdev = rdev;
dp_info.encoder = encoder;
dp_info.connector = connector;
dp_info.radeon_connector = radeon_connector;
dp_info.dp_lane_count = dig_connector->dp_lane_count;
dp_info.dp_clock = dig_connector->dp_clock;
if (radeon_dp_link_train_init(&dp_info))
goto done;
if (radeon_dp_link_train_cr(&dp_info))
goto done;
if (radeon_dp_link_train_ce(&dp_info))
goto done;
done:
if (radeon_dp_link_train_finish(&dp_info))
return;
}
| gpl-2.0 |
pcarrier/linux | drivers/oprofile/timer_int.c | 1474 | 2597 | /**
* @file timer_int.c
*
* @remark Copyright 2002 OProfile authors
* @remark Read the file COPYING
*
* @author John Levon <levon@movementarian.org>
*/
#include <linux/kernel.h>
#include <linux/notifier.h>
#include <linux/smp.h>
#include <linux/oprofile.h>
#include <linux/profile.h>
#include <linux/init.h>
#include <linux/cpu.h>
#include <linux/hrtimer.h>
#include <asm/irq_regs.h>
#include <asm/ptrace.h>
#include "oprof.h"
static DEFINE_PER_CPU(struct hrtimer, oprofile_hrtimer);
static int ctr_running;
static enum hrtimer_restart oprofile_hrtimer_notify(struct hrtimer *hrtimer)
{
oprofile_add_sample(get_irq_regs(), 0);
hrtimer_forward_now(hrtimer, ns_to_ktime(TICK_NSEC));
return HRTIMER_RESTART;
}
static void __oprofile_hrtimer_start(void *unused)
{
struct hrtimer *hrtimer = this_cpu_ptr(&oprofile_hrtimer);
if (!ctr_running)
return;
hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
hrtimer->function = oprofile_hrtimer_notify;
hrtimer_start(hrtimer, ns_to_ktime(TICK_NSEC),
HRTIMER_MODE_REL_PINNED);
}
static int oprofile_hrtimer_start(void)
{
get_online_cpus();
ctr_running = 1;
on_each_cpu(__oprofile_hrtimer_start, NULL, 1);
put_online_cpus();
return 0;
}
static void __oprofile_hrtimer_stop(int cpu)
{
struct hrtimer *hrtimer = &per_cpu(oprofile_hrtimer, cpu);
if (!ctr_running)
return;
hrtimer_cancel(hrtimer);
}
static void oprofile_hrtimer_stop(void)
{
int cpu;
get_online_cpus();
for_each_online_cpu(cpu)
__oprofile_hrtimer_stop(cpu);
ctr_running = 0;
put_online_cpus();
}
static int oprofile_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
{
long cpu = (long) hcpu;
switch (action) {
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
smp_call_function_single(cpu, __oprofile_hrtimer_start,
NULL, 1);
break;
case CPU_DEAD:
case CPU_DEAD_FROZEN:
__oprofile_hrtimer_stop(cpu);
break;
}
return NOTIFY_OK;
}
static struct notifier_block __refdata oprofile_cpu_notifier = {
.notifier_call = oprofile_cpu_notify,
};
static int oprofile_hrtimer_setup(void)
{
return register_hotcpu_notifier(&oprofile_cpu_notifier);
}
static void oprofile_hrtimer_shutdown(void)
{
unregister_hotcpu_notifier(&oprofile_cpu_notifier);
}
int oprofile_timer_init(struct oprofile_operations *ops)
{
ops->create_files = NULL;
ops->setup = oprofile_hrtimer_setup;
ops->shutdown = oprofile_hrtimer_shutdown;
ops->start = oprofile_hrtimer_start;
ops->stop = oprofile_hrtimer_stop;
ops->cpu_type = "timer";
printk(KERN_INFO "oprofile: using timer interrupt.\n");
return 0;
}
| gpl-2.0 |
TEAM-Gummy/android_kernel_sony_msm8x27 | mm/readahead.c | 1986 | 15575 | /*
* mm/readahead.c - address_space-level file readahead.
*
* Copyright (C) 2002, Linus Torvalds
*
* 09Apr2002 Andrew Morton
* Initial version.
*/
#include <linux/kernel.h>
#include <linux/fs.h>
#include <linux/gfp.h>
#include <linux/mm.h>
#include <linux/export.h>
#include <linux/blkdev.h>
#include <linux/backing-dev.h>
#include <linux/task_io_accounting_ops.h>
#include <linux/pagevec.h>
#include <linux/pagemap.h>
/*
* Initialise a struct file's readahead state. Assumes that the caller has
* memset *ra to zero.
*/
void
file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping)
{
ra->ra_pages = mapping->backing_dev_info->ra_pages;
ra->prev_pos = -1;
}
EXPORT_SYMBOL_GPL(file_ra_state_init);
#define list_to_page(head) (list_entry((head)->prev, struct page, lru))
/*
* see if a page needs releasing upon read_cache_pages() failure
* - the caller of read_cache_pages() may have set PG_private or PG_fscache
* before calling, such as the NFS fs marking pages that are cached locally
* on disk, thus we need to give the fs a chance to clean up in the event of
* an error
*/
static void read_cache_pages_invalidate_page(struct address_space *mapping,
struct page *page)
{
if (page_has_private(page)) {
if (!trylock_page(page))
BUG();
page->mapping = mapping;
do_invalidatepage(page, 0);
page->mapping = NULL;
unlock_page(page);
}
page_cache_release(page);
}
/*
* release a list of pages, invalidating them first if need be
*/
static void read_cache_pages_invalidate_pages(struct address_space *mapping,
struct list_head *pages)
{
struct page *victim;
while (!list_empty(pages)) {
victim = list_to_page(pages);
list_del(&victim->lru);
read_cache_pages_invalidate_page(mapping, victim);
}
}
/**
* read_cache_pages - populate an address space with some pages & start reads against them
* @mapping: the address_space
* @pages: The address of a list_head which contains the target pages. These
* pages have their ->index populated and are otherwise uninitialised.
* @filler: callback routine for filling a single page.
* @data: private data for the callback routine.
*
* Hides the details of the LRU cache etc from the filesystems.
*/
int read_cache_pages(struct address_space *mapping, struct list_head *pages,
int (*filler)(void *, struct page *), void *data)
{
struct page *page;
int ret = 0;
while (!list_empty(pages)) {
page = list_to_page(pages);
list_del(&page->lru);
if (add_to_page_cache_lru(page, mapping,
page->index, GFP_KERNEL)) {
read_cache_pages_invalidate_page(mapping, page);
continue;
}
page_cache_release(page);
ret = filler(data, page);
if (unlikely(ret)) {
read_cache_pages_invalidate_pages(mapping, pages);
break;
}
task_io_account_read(PAGE_CACHE_SIZE);
}
return ret;
}
EXPORT_SYMBOL(read_cache_pages);
static int read_pages(struct address_space *mapping, struct file *filp,
struct list_head *pages, unsigned nr_pages)
{
struct blk_plug plug;
unsigned page_idx;
int ret;
blk_start_plug(&plug);
if (mapping->a_ops->readpages) {
ret = mapping->a_ops->readpages(filp, mapping, pages, nr_pages);
/* Clean up the remaining pages */
put_pages_list(pages);
goto out;
}
for (page_idx = 0; page_idx < nr_pages; page_idx++) {
struct page *page = list_to_page(pages);
list_del(&page->lru);
if (!add_to_page_cache_lru(page, mapping,
page->index, GFP_KERNEL)) {
mapping->a_ops->readpage(filp, page);
}
page_cache_release(page);
}
ret = 0;
out:
blk_finish_plug(&plug);
return ret;
}
/*
* __do_page_cache_readahead() actually reads a chunk of disk. It allocates all
* the pages first, then submits them all for I/O. This avoids the very bad
* behaviour which would occur if page allocations are causing VM writeback.
* We really don't want to intermingle reads and writes like that.
*
* Returns the number of pages requested, or the maximum amount of I/O allowed.
*/
static int
__do_page_cache_readahead(struct address_space *mapping, struct file *filp,
pgoff_t offset, unsigned long nr_to_read,
unsigned long lookahead_size)
{
struct inode *inode = mapping->host;
struct page *page;
unsigned long end_index; /* The last page we want to read */
LIST_HEAD(page_pool);
int page_idx;
int ret = 0;
loff_t isize = i_size_read(inode);
if (isize == 0)
goto out;
end_index = ((isize - 1) >> PAGE_CACHE_SHIFT);
/*
* Preallocate as many pages as we will need.
*/
for (page_idx = 0; page_idx < nr_to_read; page_idx++) {
pgoff_t page_offset = offset + page_idx;
if (page_offset > end_index)
break;
rcu_read_lock();
page = radix_tree_lookup(&mapping->page_tree, page_offset);
rcu_read_unlock();
if (page)
continue;
page = page_cache_alloc_readahead(mapping);
if (!page)
break;
page->index = page_offset;
page->flags |= (1L << PG_readahead);
list_add(&page->lru, &page_pool);
if (page_idx == nr_to_read - lookahead_size)
SetPageReadahead(page);
ret++;
}
/*
* Now start the IO. We ignore I/O errors - if the page is not
* uptodate then the caller will launch readpage again, and
* will then handle the error.
*/
if (ret)
read_pages(mapping, filp, &page_pool, ret);
BUG_ON(!list_empty(&page_pool));
out:
return ret;
}
/*
* Chunk the readahead into 2 megabyte units, so that we don't pin too much
* memory at once.
*/
int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
pgoff_t offset, unsigned long nr_to_read)
{
int ret = 0;
if (unlikely(!mapping->a_ops->readpage && !mapping->a_ops->readpages))
return -EINVAL;
nr_to_read = max_sane_readahead(nr_to_read);
while (nr_to_read) {
int err;
unsigned long this_chunk = (2 * 1024 * 1024) / PAGE_CACHE_SIZE;
if (this_chunk > nr_to_read)
this_chunk = nr_to_read;
err = __do_page_cache_readahead(mapping, filp,
offset, this_chunk, 0);
if (err < 0) {
ret = err;
break;
}
ret += err;
offset += this_chunk;
nr_to_read -= this_chunk;
}
return ret;
}
/*
* Given a desired number of PAGE_CACHE_SIZE readahead pages, return a
* sensible upper limit.
*/
unsigned long max_sane_readahead(unsigned long nr)
{
return min(nr, (node_page_state(numa_node_id(), NR_INACTIVE_FILE)
+ node_page_state(numa_node_id(), NR_FREE_PAGES)) / 2);
}
/*
* Submit IO for the read-ahead request in file_ra_state.
*/
unsigned long ra_submit(struct file_ra_state *ra,
struct address_space *mapping, struct file *filp)
{
int actual;
actual = __do_page_cache_readahead(mapping, filp,
ra->start, ra->size, ra->async_size);
return actual;
}
/*
* Set the initial window size, round to next power of 2 and square
* Small size is not dependant on max value - only a one-page read is regarded
* as small.
* for small size, x 4 for medium, and x 2 for large
* for 128k (32 page) max ra
* 1-8 page = 32k initial, > 8 page = 128k initial
*/
static unsigned long get_init_ra_size(unsigned long size, unsigned long max)
{
unsigned long newsize = roundup_pow_of_two(size);
if (newsize <= 1)
newsize = newsize * 4;
else if (newsize <= max / 4)
newsize = newsize * 2;
else
newsize = max;
return newsize;
}
/*
* Get the previous window size, ramp it up, and
* return it as the new window size.
*/
static unsigned long get_next_ra_size(struct file_ra_state *ra,
unsigned long max)
{
unsigned long cur = ra->size;
unsigned long newsize;
if (cur < max / 16)
newsize = 4 * cur;
else
newsize = 2 * cur;
return min(newsize, max);
}
/*
* On-demand readahead design.
*
* The fields in struct file_ra_state represent the most-recently-executed
* readahead attempt:
*
* |<----- async_size ---------|
* |------------------- size -------------------->|
* |==================#===========================|
* ^start ^page marked with PG_readahead
*
* To overlap application thinking time and disk I/O time, we do
* `readahead pipelining': Do not wait until the application consumed all
* readahead pages and stalled on the missing page at readahead_index;
* Instead, submit an asynchronous readahead I/O as soon as there are
* only async_size pages left in the readahead window. Normally async_size
* will be equal to size, for maximum pipelining.
*
* In interleaved sequential reads, concurrent streams on the same fd can
* be invalidating each other's readahead state. So we flag the new readahead
* page at (start+size-async_size) with PG_readahead, and use it as readahead
* indicator. The flag won't be set on already cached pages, to avoid the
* readahead-for-nothing fuss, saving pointless page cache lookups.
*
* prev_pos tracks the last visited byte in the _previous_ read request.
* It should be maintained by the caller, and will be used for detecting
* small random reads. Note that the readahead algorithm checks loosely
* for sequential patterns. Hence interleaved reads might be served as
* sequential ones.
*
* There is a special-case: if the first page which the application tries to
* read happens to be the first page of the file, it is assumed that a linear
* read is about to happen and the window is immediately set to the initial size
* based on I/O request size and the max_readahead.
*
* The code ramps up the readahead size aggressively at first, but slow down as
* it approaches max_readhead.
*/
/*
* Count contiguously cached pages from @offset-1 to @offset-@max,
* this count is a conservative estimation of
* - length of the sequential read sequence, or
* - thrashing threshold in memory tight systems
*/
static pgoff_t count_history_pages(struct address_space *mapping,
struct file_ra_state *ra,
pgoff_t offset, unsigned long max)
{
pgoff_t head;
rcu_read_lock();
head = radix_tree_prev_hole(&mapping->page_tree, offset - 1, max);
rcu_read_unlock();
return offset - 1 - head;
}
/*
* page cache context based read-ahead
*/
static int try_context_readahead(struct address_space *mapping,
struct file_ra_state *ra,
pgoff_t offset,
unsigned long req_size,
unsigned long max)
{
pgoff_t size;
size = count_history_pages(mapping, ra, offset, max);
/*
* no history pages:
* it could be a random read
*/
if (!size)
return 0;
/*
* starts from beginning of file:
* it is a strong indication of long-run stream (or whole-file-read)
*/
if (size >= offset)
size *= 2;
ra->start = offset;
ra->size = get_init_ra_size(size + req_size, max);
ra->async_size = ra->size;
return 1;
}
/*
* A minimal readahead algorithm for trivial sequential/random reads.
*/
static unsigned long
ondemand_readahead(struct address_space *mapping,
struct file_ra_state *ra, struct file *filp,
bool hit_readahead_marker, pgoff_t offset,
unsigned long req_size)
{
unsigned long max = max_sane_readahead(ra->ra_pages);
/*
* start of file
*/
if (!offset)
goto initial_readahead;
/*
* It's the expected callback offset, assume sequential access.
* Ramp up sizes, and push forward the readahead window.
*/
if ((offset == (ra->start + ra->size - ra->async_size) ||
offset == (ra->start + ra->size))) {
ra->start += ra->size;
ra->size = get_next_ra_size(ra, max);
ra->async_size = ra->size;
goto readit;
}
/*
* Hit a marked page without valid readahead state.
* E.g. interleaved reads.
* Query the pagecache for async_size, which normally equals to
* readahead size. Ramp it up and use it as the new readahead size.
*/
if (hit_readahead_marker) {
pgoff_t start;
rcu_read_lock();
start = radix_tree_next_hole(&mapping->page_tree, offset+1,max);
rcu_read_unlock();
if (!start || start - offset > max)
return 0;
ra->start = start;
ra->size = start - offset; /* old async_size */
ra->size += req_size;
ra->size = get_next_ra_size(ra, max);
ra->async_size = ra->size;
goto readit;
}
/*
* oversize read
*/
if (req_size > max)
goto initial_readahead;
/*
* sequential cache miss
*/
if (offset - (ra->prev_pos >> PAGE_CACHE_SHIFT) <= 1UL)
goto initial_readahead;
/*
* Query the page cache and look for the traces(cached history pages)
* that a sequential stream would leave behind.
*/
if (try_context_readahead(mapping, ra, offset, req_size, max))
goto readit;
/*
* standalone, small random read
* Read as is, and do not pollute the readahead state.
*/
return __do_page_cache_readahead(mapping, filp, offset, req_size, 0);
initial_readahead:
ra->start = offset;
ra->size = get_init_ra_size(req_size, max);
ra->async_size = ra->size > req_size ? ra->size - req_size : ra->size;
readit:
/*
* Will this read hit the readahead marker made by itself?
* If so, trigger the readahead marker hit now, and merge
* the resulted next readahead window into the current one.
*/
if (offset == ra->start && ra->size == ra->async_size) {
ra->async_size = get_next_ra_size(ra, max);
ra->size += ra->async_size;
}
return ra_submit(ra, mapping, filp);
}
/**
* page_cache_sync_readahead - generic file readahead
* @mapping: address_space which holds the pagecache and I/O vectors
* @ra: file_ra_state which holds the readahead state
* @filp: passed on to ->readpage() and ->readpages()
* @offset: start offset into @mapping, in pagecache page-sized units
* @req_size: hint: total size of the read which the caller is performing in
* pagecache pages
*
* page_cache_sync_readahead() should be called when a cache miss happened:
* it will submit the read. The readahead logic may decide to piggyback more
* pages onto the read request if access patterns suggest it will improve
* performance.
*/
void page_cache_sync_readahead(struct address_space *mapping,
struct file_ra_state *ra, struct file *filp,
pgoff_t offset, unsigned long req_size)
{
/* no read-ahead */
if (!ra->ra_pages)
return;
/* be dumb */
if (filp && (filp->f_mode & FMODE_RANDOM)) {
force_page_cache_readahead(mapping, filp, offset, req_size);
return;
}
/* do read-ahead */
ondemand_readahead(mapping, ra, filp, false, offset, req_size);
}
EXPORT_SYMBOL_GPL(page_cache_sync_readahead);
/**
* page_cache_async_readahead - file readahead for marked pages
* @mapping: address_space which holds the pagecache and I/O vectors
* @ra: file_ra_state which holds the readahead state
* @filp: passed on to ->readpage() and ->readpages()
* @page: the page at @offset which has the PG_readahead flag set
* @offset: start offset into @mapping, in pagecache page-sized units
* @req_size: hint: total size of the read which the caller is performing in
* pagecache pages
*
* page_cache_async_readahead() should be called when a page is used which
* has the PG_readahead flag; this is a marker to suggest that the application
* has used up enough of the readahead window that we should start pulling in
* more pages.
*/
void
page_cache_async_readahead(struct address_space *mapping,
struct file_ra_state *ra, struct file *filp,
struct page *page, pgoff_t offset,
unsigned long req_size)
{
/* no read-ahead */
if (!ra->ra_pages)
return;
/*
* Same bit is used for PG_readahead and PG_reclaim.
*/
if (PageWriteback(page))
return;
ClearPageReadahead(page);
/*
* Defer asynchronous read-ahead on IO congestion.
*/
if (bdi_read_congested(mapping->backing_dev_info))
return;
/* do read-ahead */
ondemand_readahead(mapping, ra, filp, true, offset, req_size);
}
EXPORT_SYMBOL_GPL(page_cache_async_readahead);
| gpl-2.0 |
Zenfone2-Dev/Kernel-for-Asus-Zenfone-2 | drivers/video/backlight/ili9320.c | 2242 | 6932 | /* drivers/video/backlight/ili9320.c
*
* ILI9320 LCD controller driver core.
*
* Copyright 2007 Simtec Electronics
* http://armlinux.simtec.co.uk/
* Ben Dooks <ben@simtec.co.uk>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/fb.h>
#include <linux/init.h>
#include <linux/lcd.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/spi/spi.h>
#include <video/ili9320.h>
#include "ili9320.h"
static inline int ili9320_write_spi(struct ili9320 *ili,
unsigned int reg,
unsigned int value)
{
struct ili9320_spi *spi = &ili->access.spi;
unsigned char *addr = spi->buffer_addr;
unsigned char *data = spi->buffer_data;
/* spi message consits of:
* first byte: ID and operation
*/
addr[0] = spi->id | ILI9320_SPI_INDEX | ILI9320_SPI_WRITE;
addr[1] = reg >> 8;
addr[2] = reg;
/* second message is the data to transfer */
data[0] = spi->id | ILI9320_SPI_DATA | ILI9320_SPI_WRITE;
data[1] = value >> 8;
data[2] = value;
return spi_sync(spi->dev, &spi->message);
}
int ili9320_write(struct ili9320 *ili, unsigned int reg, unsigned int value)
{
dev_dbg(ili->dev, "write: reg=%02x, val=%04x\n", reg, value);
return ili->write(ili, reg, value);
}
EXPORT_SYMBOL_GPL(ili9320_write);
int ili9320_write_regs(struct ili9320 *ili,
const struct ili9320_reg *values,
int nr_values)
{
int index;
int ret;
for (index = 0; index < nr_values; index++, values++) {
ret = ili9320_write(ili, values->address, values->value);
if (ret != 0)
return ret;
}
return 0;
}
EXPORT_SYMBOL_GPL(ili9320_write_regs);
static void ili9320_reset(struct ili9320 *lcd)
{
struct ili9320_platdata *cfg = lcd->platdata;
cfg->reset(1);
mdelay(50);
cfg->reset(0);
mdelay(50);
cfg->reset(1);
mdelay(100);
}
static inline int ili9320_init_chip(struct ili9320 *lcd)
{
int ret;
ili9320_reset(lcd);
ret = lcd->client->init(lcd, lcd->platdata);
if (ret != 0) {
dev_err(lcd->dev, "failed to initialise display\n");
return ret;
}
lcd->initialised = 1;
return 0;
}
static inline int ili9320_power_on(struct ili9320 *lcd)
{
if (!lcd->initialised)
ili9320_init_chip(lcd);
lcd->display1 |= (ILI9320_DISPLAY1_D(3) | ILI9320_DISPLAY1_BASEE);
ili9320_write(lcd, ILI9320_DISPLAY1, lcd->display1);
return 0;
}
static inline int ili9320_power_off(struct ili9320 *lcd)
{
lcd->display1 &= ~(ILI9320_DISPLAY1_D(3) | ILI9320_DISPLAY1_BASEE);
ili9320_write(lcd, ILI9320_DISPLAY1, lcd->display1);
return 0;
}
#define POWER_IS_ON(pwr) ((pwr) <= FB_BLANK_NORMAL)
static int ili9320_power(struct ili9320 *lcd, int power)
{
int ret = 0;
dev_dbg(lcd->dev, "power %d => %d\n", lcd->power, power);
if (POWER_IS_ON(power) && !POWER_IS_ON(lcd->power))
ret = ili9320_power_on(lcd);
else if (!POWER_IS_ON(power) && POWER_IS_ON(lcd->power))
ret = ili9320_power_off(lcd);
if (ret == 0)
lcd->power = power;
else
dev_warn(lcd->dev, "failed to set power mode %d\n", power);
return ret;
}
static inline struct ili9320 *to_our_lcd(struct lcd_device *lcd)
{
return lcd_get_data(lcd);
}
static int ili9320_set_power(struct lcd_device *ld, int power)
{
struct ili9320 *lcd = to_our_lcd(ld);
return ili9320_power(lcd, power);
}
static int ili9320_get_power(struct lcd_device *ld)
{
struct ili9320 *lcd = to_our_lcd(ld);
return lcd->power;
}
static struct lcd_ops ili9320_ops = {
.get_power = ili9320_get_power,
.set_power = ili9320_set_power,
};
static void ili9320_setup_spi(struct ili9320 *ili,
struct spi_device *dev)
{
struct ili9320_spi *spi = &ili->access.spi;
ili->write = ili9320_write_spi;
spi->dev = dev;
/* fill the two messages we are going to use to send the data
* with, the first the address followed by the data. The datasheet
* says they should be done as two distinct cycles of the SPI CS line.
*/
spi->xfer[0].tx_buf = spi->buffer_addr;
spi->xfer[1].tx_buf = spi->buffer_data;
spi->xfer[0].len = 3;
spi->xfer[1].len = 3;
spi->xfer[0].bits_per_word = 8;
spi->xfer[1].bits_per_word = 8;
spi->xfer[0].cs_change = 1;
spi_message_init(&spi->message);
spi_message_add_tail(&spi->xfer[0], &spi->message);
spi_message_add_tail(&spi->xfer[1], &spi->message);
}
int ili9320_probe_spi(struct spi_device *spi,
struct ili9320_client *client)
{
struct ili9320_platdata *cfg = spi->dev.platform_data;
struct device *dev = &spi->dev;
struct ili9320 *ili;
struct lcd_device *lcd;
int ret = 0;
/* verify we where given some information */
if (cfg == NULL) {
dev_err(dev, "no platform data supplied\n");
return -EINVAL;
}
if (cfg->hsize <= 0 || cfg->vsize <= 0 || cfg->reset == NULL) {
dev_err(dev, "invalid platform data supplied\n");
return -EINVAL;
}
/* allocate and initialse our state */
ili = devm_kzalloc(&spi->dev, sizeof(struct ili9320), GFP_KERNEL);
if (ili == NULL) {
dev_err(dev, "no memory for device\n");
return -ENOMEM;
}
ili->access.spi.id = ILI9320_SPI_IDCODE | ILI9320_SPI_ID(1);
ili->dev = dev;
ili->client = client;
ili->power = FB_BLANK_POWERDOWN;
ili->platdata = cfg;
spi_set_drvdata(spi, ili);
ili9320_setup_spi(ili, spi);
lcd = lcd_device_register("ili9320", dev, ili, &ili9320_ops);
if (IS_ERR(lcd)) {
dev_err(dev, "failed to register lcd device\n");
return PTR_ERR(lcd);
}
ili->lcd = lcd;
dev_info(dev, "initialising %s\n", client->name);
ret = ili9320_power(ili, FB_BLANK_UNBLANK);
if (ret != 0) {
dev_err(dev, "failed to set lcd power state\n");
goto err_unregister;
}
return 0;
err_unregister:
lcd_device_unregister(lcd);
return ret;
}
EXPORT_SYMBOL_GPL(ili9320_probe_spi);
int ili9320_remove(struct ili9320 *ili)
{
ili9320_power(ili, FB_BLANK_POWERDOWN);
lcd_device_unregister(ili->lcd);
return 0;
}
EXPORT_SYMBOL_GPL(ili9320_remove);
#ifdef CONFIG_PM_SLEEP
int ili9320_suspend(struct ili9320 *lcd)
{
int ret;
ret = ili9320_power(lcd, FB_BLANK_POWERDOWN);
if (lcd->platdata->suspend == ILI9320_SUSPEND_DEEP) {
ili9320_write(lcd, ILI9320_POWER1, lcd->power1 |
ILI9320_POWER1_SLP |
ILI9320_POWER1_DSTB);
lcd->initialised = 0;
}
return ret;
}
EXPORT_SYMBOL_GPL(ili9320_suspend);
int ili9320_resume(struct ili9320 *lcd)
{
dev_info(lcd->dev, "resuming from power state %d\n", lcd->power);
if (lcd->platdata->suspend == ILI9320_SUSPEND_DEEP)
ili9320_write(lcd, ILI9320_POWER1, 0x00);
return ili9320_power(lcd, FB_BLANK_UNBLANK);
}
EXPORT_SYMBOL_GPL(ili9320_resume);
#endif
/* Power down all displays on reboot, poweroff or halt */
void ili9320_shutdown(struct ili9320 *lcd)
{
ili9320_power(lcd, FB_BLANK_POWERDOWN);
}
EXPORT_SYMBOL_GPL(ili9320_shutdown);
MODULE_AUTHOR("Ben Dooks <ben-linux@fluff.org>");
MODULE_DESCRIPTION("ILI9320 LCD Driver");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
rutvik95/speedx_kernel_i9082 | drivers/base/node.c | 2242 | 17890 | /*
* drivers/base/node.c - basic Node class support
*/
#include <linux/sysdev.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/memory.h>
#include <linux/vmstat.h>
#include <linux/node.h>
#include <linux/hugetlb.h>
#include <linux/compaction.h>
#include <linux/cpumask.h>
#include <linux/topology.h>
#include <linux/nodemask.h>
#include <linux/cpu.h>
#include <linux/device.h>
#include <linux/swap.h>
#include <linux/slab.h>
static struct sysdev_class_attribute *node_state_attrs[];
static struct sysdev_class node_class = {
.name = "node",
.attrs = node_state_attrs,
};
static ssize_t node_read_cpumap(struct sys_device *dev, int type, char *buf)
{
struct node *node_dev = to_node(dev);
const struct cpumask *mask = cpumask_of_node(node_dev->sysdev.id);
int len;
/* 2008/04/07: buf currently PAGE_SIZE, need 9 chars per 32 bits. */
BUILD_BUG_ON((NR_CPUS/32 * 9) > (PAGE_SIZE-1));
len = type?
cpulist_scnprintf(buf, PAGE_SIZE-2, mask) :
cpumask_scnprintf(buf, PAGE_SIZE-2, mask);
buf[len++] = '\n';
buf[len] = '\0';
return len;
}
static inline ssize_t node_read_cpumask(struct sys_device *dev,
struct sysdev_attribute *attr, char *buf)
{
return node_read_cpumap(dev, 0, buf);
}
static inline ssize_t node_read_cpulist(struct sys_device *dev,
struct sysdev_attribute *attr, char *buf)
{
return node_read_cpumap(dev, 1, buf);
}
static SYSDEV_ATTR(cpumap, S_IRUGO, node_read_cpumask, NULL);
static SYSDEV_ATTR(cpulist, S_IRUGO, node_read_cpulist, NULL);
#define K(x) ((x) << (PAGE_SHIFT - 10))
static ssize_t node_read_meminfo(struct sys_device * dev,
struct sysdev_attribute *attr, char * buf)
{
int n;
int nid = dev->id;
struct sysinfo i;
si_meminfo_node(&i, nid);
n = sprintf(buf,
"Node %d MemTotal: %8lu kB\n"
"Node %d MemFree: %8lu kB\n"
"Node %d MemUsed: %8lu kB\n"
"Node %d Active: %8lu kB\n"
"Node %d Inactive: %8lu kB\n"
"Node %d Active(anon): %8lu kB\n"
"Node %d Inactive(anon): %8lu kB\n"
"Node %d Active(file): %8lu kB\n"
"Node %d Inactive(file): %8lu kB\n"
"Node %d Unevictable: %8lu kB\n"
"Node %d Mlocked: %8lu kB\n",
nid, K(i.totalram),
nid, K(i.freeram),
nid, K(i.totalram - i.freeram),
nid, K(node_page_state(nid, NR_ACTIVE_ANON) +
node_page_state(nid, NR_ACTIVE_FILE)),
nid, K(node_page_state(nid, NR_INACTIVE_ANON) +
node_page_state(nid, NR_INACTIVE_FILE)),
nid, K(node_page_state(nid, NR_ACTIVE_ANON)),
nid, K(node_page_state(nid, NR_INACTIVE_ANON)),
nid, K(node_page_state(nid, NR_ACTIVE_FILE)),
nid, K(node_page_state(nid, NR_INACTIVE_FILE)),
nid, K(node_page_state(nid, NR_UNEVICTABLE)),
nid, K(node_page_state(nid, NR_MLOCK)));
#ifdef CONFIG_HIGHMEM
n += sprintf(buf + n,
"Node %d HighTotal: %8lu kB\n"
"Node %d HighFree: %8lu kB\n"
"Node %d LowTotal: %8lu kB\n"
"Node %d LowFree: %8lu kB\n",
nid, K(i.totalhigh),
nid, K(i.freehigh),
nid, K(i.totalram - i.totalhigh),
nid, K(i.freeram - i.freehigh));
#endif
n += sprintf(buf + n,
"Node %d Dirty: %8lu kB\n"
"Node %d Writeback: %8lu kB\n"
"Node %d FilePages: %8lu kB\n"
"Node %d Mapped: %8lu kB\n"
"Node %d AnonPages: %8lu kB\n"
"Node %d Shmem: %8lu kB\n"
"Node %d KernelStack: %8lu kB\n"
"Node %d PageTables: %8lu kB\n"
"Node %d NFS_Unstable: %8lu kB\n"
"Node %d Bounce: %8lu kB\n"
"Node %d WritebackTmp: %8lu kB\n"
"Node %d Slab: %8lu kB\n"
"Node %d SReclaimable: %8lu kB\n"
"Node %d SUnreclaim: %8lu kB\n"
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
"Node %d AnonHugePages: %8lu kB\n"
#endif
,
nid, K(node_page_state(nid, NR_FILE_DIRTY)),
nid, K(node_page_state(nid, NR_WRITEBACK)),
nid, K(node_page_state(nid, NR_FILE_PAGES)),
nid, K(node_page_state(nid, NR_FILE_MAPPED)),
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
nid, K(node_page_state(nid, NR_ANON_PAGES)
+ node_page_state(nid, NR_ANON_TRANSPARENT_HUGEPAGES) *
HPAGE_PMD_NR),
#else
nid, K(node_page_state(nid, NR_ANON_PAGES)),
#endif
nid, K(node_page_state(nid, NR_SHMEM)),
nid, node_page_state(nid, NR_KERNEL_STACK) *
THREAD_SIZE / 1024,
nid, K(node_page_state(nid, NR_PAGETABLE)),
nid, K(node_page_state(nid, NR_UNSTABLE_NFS)),
nid, K(node_page_state(nid, NR_BOUNCE)),
nid, K(node_page_state(nid, NR_WRITEBACK_TEMP)),
nid, K(node_page_state(nid, NR_SLAB_RECLAIMABLE) +
node_page_state(nid, NR_SLAB_UNRECLAIMABLE)),
nid, K(node_page_state(nid, NR_SLAB_RECLAIMABLE)),
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
nid, K(node_page_state(nid, NR_SLAB_UNRECLAIMABLE))
, nid,
K(node_page_state(nid, NR_ANON_TRANSPARENT_HUGEPAGES) *
HPAGE_PMD_NR));
#else
nid, K(node_page_state(nid, NR_SLAB_UNRECLAIMABLE)));
#endif
n += hugetlb_report_node_meminfo(nid, buf + n);
return n;
}
#undef K
static SYSDEV_ATTR(meminfo, S_IRUGO, node_read_meminfo, NULL);
static ssize_t node_read_numastat(struct sys_device * dev,
struct sysdev_attribute *attr, char * buf)
{
return sprintf(buf,
"numa_hit %lu\n"
"numa_miss %lu\n"
"numa_foreign %lu\n"
"interleave_hit %lu\n"
"local_node %lu\n"
"other_node %lu\n",
node_page_state(dev->id, NUMA_HIT),
node_page_state(dev->id, NUMA_MISS),
node_page_state(dev->id, NUMA_FOREIGN),
node_page_state(dev->id, NUMA_INTERLEAVE_HIT),
node_page_state(dev->id, NUMA_LOCAL),
node_page_state(dev->id, NUMA_OTHER));
}
static SYSDEV_ATTR(numastat, S_IRUGO, node_read_numastat, NULL);
static ssize_t node_read_vmstat(struct sys_device *dev,
struct sysdev_attribute *attr, char *buf)
{
int nid = dev->id;
int i;
int n = 0;
for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
n += sprintf(buf+n, "%s %lu\n", vmstat_text[i],
node_page_state(nid, i));
return n;
}
static SYSDEV_ATTR(vmstat, S_IRUGO, node_read_vmstat, NULL);
static ssize_t node_read_distance(struct sys_device * dev,
struct sysdev_attribute *attr, char * buf)
{
int nid = dev->id;
int len = 0;
int i;
/*
* buf is currently PAGE_SIZE in length and each node needs 4 chars
* at the most (distance + space or newline).
*/
BUILD_BUG_ON(MAX_NUMNODES * 4 > PAGE_SIZE);
for_each_online_node(i)
len += sprintf(buf + len, "%s%d", i ? " " : "", node_distance(nid, i));
len += sprintf(buf + len, "\n");
return len;
}
static SYSDEV_ATTR(distance, S_IRUGO, node_read_distance, NULL);
#ifdef CONFIG_HUGETLBFS
/*
* hugetlbfs per node attributes registration interface:
* When/if hugetlb[fs] subsystem initializes [sometime after this module],
* it will register its per node attributes for all online nodes with
* memory. It will also call register_hugetlbfs_with_node(), below, to
* register its attribute registration functions with this node driver.
* Once these hooks have been initialized, the node driver will call into
* the hugetlb module to [un]register attributes for hot-plugged nodes.
*/
static node_registration_func_t __hugetlb_register_node;
static node_registration_func_t __hugetlb_unregister_node;
static inline bool hugetlb_register_node(struct node *node)
{
if (__hugetlb_register_node &&
node_state(node->sysdev.id, N_HIGH_MEMORY)) {
__hugetlb_register_node(node);
return true;
}
return false;
}
static inline void hugetlb_unregister_node(struct node *node)
{
if (__hugetlb_unregister_node)
__hugetlb_unregister_node(node);
}
void register_hugetlbfs_with_node(node_registration_func_t doregister,
node_registration_func_t unregister)
{
__hugetlb_register_node = doregister;
__hugetlb_unregister_node = unregister;
}
#else
static inline void hugetlb_register_node(struct node *node) {}
static inline void hugetlb_unregister_node(struct node *node) {}
#endif
/*
* register_node - Setup a sysfs device for a node.
* @num - Node number to use when creating the device.
*
* Initialize and register the node device.
*/
int register_node(struct node *node, int num, struct node *parent)
{
int error;
node->sysdev.id = num;
node->sysdev.cls = &node_class;
error = sysdev_register(&node->sysdev);
if (!error){
sysdev_create_file(&node->sysdev, &attr_cpumap);
sysdev_create_file(&node->sysdev, &attr_cpulist);
sysdev_create_file(&node->sysdev, &attr_meminfo);
sysdev_create_file(&node->sysdev, &attr_numastat);
sysdev_create_file(&node->sysdev, &attr_distance);
sysdev_create_file(&node->sysdev, &attr_vmstat);
scan_unevictable_register_node(node);
hugetlb_register_node(node);
compaction_register_node(node);
}
return error;
}
/**
* unregister_node - unregister a node device
* @node: node going away
*
* Unregisters a node device @node. All the devices on the node must be
* unregistered before calling this function.
*/
void unregister_node(struct node *node)
{
sysdev_remove_file(&node->sysdev, &attr_cpumap);
sysdev_remove_file(&node->sysdev, &attr_cpulist);
sysdev_remove_file(&node->sysdev, &attr_meminfo);
sysdev_remove_file(&node->sysdev, &attr_numastat);
sysdev_remove_file(&node->sysdev, &attr_distance);
sysdev_remove_file(&node->sysdev, &attr_vmstat);
scan_unevictable_unregister_node(node);
hugetlb_unregister_node(node); /* no-op, if memoryless node */
sysdev_unregister(&node->sysdev);
}
struct node node_devices[MAX_NUMNODES];
/*
* register cpu under node
*/
int register_cpu_under_node(unsigned int cpu, unsigned int nid)
{
int ret;
struct sys_device *obj;
if (!node_online(nid))
return 0;
obj = get_cpu_sysdev(cpu);
if (!obj)
return 0;
ret = sysfs_create_link(&node_devices[nid].sysdev.kobj,
&obj->kobj,
kobject_name(&obj->kobj));
if (ret)
return ret;
return sysfs_create_link(&obj->kobj,
&node_devices[nid].sysdev.kobj,
kobject_name(&node_devices[nid].sysdev.kobj));
}
int unregister_cpu_under_node(unsigned int cpu, unsigned int nid)
{
struct sys_device *obj;
if (!node_online(nid))
return 0;
obj = get_cpu_sysdev(cpu);
if (!obj)
return 0;
sysfs_remove_link(&node_devices[nid].sysdev.kobj,
kobject_name(&obj->kobj));
sysfs_remove_link(&obj->kobj,
kobject_name(&node_devices[nid].sysdev.kobj));
return 0;
}
#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
#define page_initialized(page) (page->lru.next)
static int get_nid_for_pfn(unsigned long pfn)
{
struct page *page;
if (!pfn_valid_within(pfn))
return -1;
page = pfn_to_page(pfn);
if (!page_initialized(page))
return -1;
return pfn_to_nid(pfn);
}
/* register memory section under specified node if it spans that node */
int register_mem_sect_under_node(struct memory_block *mem_blk, int nid)
{
int ret;
unsigned long pfn, sect_start_pfn, sect_end_pfn;
if (!mem_blk)
return -EFAULT;
if (!node_online(nid))
return 0;
sect_start_pfn = section_nr_to_pfn(mem_blk->start_section_nr);
sect_end_pfn = section_nr_to_pfn(mem_blk->end_section_nr);
sect_end_pfn += PAGES_PER_SECTION - 1;
for (pfn = sect_start_pfn; pfn <= sect_end_pfn; pfn++) {
int page_nid;
page_nid = get_nid_for_pfn(pfn);
if (page_nid < 0)
continue;
if (page_nid != nid)
continue;
ret = sysfs_create_link_nowarn(&node_devices[nid].sysdev.kobj,
&mem_blk->sysdev.kobj,
kobject_name(&mem_blk->sysdev.kobj));
if (ret)
return ret;
return sysfs_create_link_nowarn(&mem_blk->sysdev.kobj,
&node_devices[nid].sysdev.kobj,
kobject_name(&node_devices[nid].sysdev.kobj));
}
/* mem section does not span the specified node */
return 0;
}
/* unregister memory section under all nodes that it spans */
int unregister_mem_sect_under_nodes(struct memory_block *mem_blk,
unsigned long phys_index)
{
NODEMASK_ALLOC(nodemask_t, unlinked_nodes, GFP_KERNEL);
unsigned long pfn, sect_start_pfn, sect_end_pfn;
if (!mem_blk) {
NODEMASK_FREE(unlinked_nodes);
return -EFAULT;
}
if (!unlinked_nodes)
return -ENOMEM;
nodes_clear(*unlinked_nodes);
sect_start_pfn = section_nr_to_pfn(phys_index);
sect_end_pfn = sect_start_pfn + PAGES_PER_SECTION - 1;
for (pfn = sect_start_pfn; pfn <= sect_end_pfn; pfn++) {
int nid;
nid = get_nid_for_pfn(pfn);
if (nid < 0)
continue;
if (!node_online(nid))
continue;
if (node_test_and_set(nid, *unlinked_nodes))
continue;
sysfs_remove_link(&node_devices[nid].sysdev.kobj,
kobject_name(&mem_blk->sysdev.kobj));
sysfs_remove_link(&mem_blk->sysdev.kobj,
kobject_name(&node_devices[nid].sysdev.kobj));
}
NODEMASK_FREE(unlinked_nodes);
return 0;
}
static int link_mem_sections(int nid)
{
unsigned long start_pfn = NODE_DATA(nid)->node_start_pfn;
unsigned long end_pfn = start_pfn + NODE_DATA(nid)->node_spanned_pages;
unsigned long pfn;
struct memory_block *mem_blk = NULL;
int err = 0;
for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
unsigned long section_nr = pfn_to_section_nr(pfn);
struct mem_section *mem_sect;
int ret;
if (!present_section_nr(section_nr))
continue;
mem_sect = __nr_to_section(section_nr);
mem_blk = find_memory_block_hinted(mem_sect, mem_blk);
ret = register_mem_sect_under_node(mem_blk, nid);
if (!err)
err = ret;
/* discard ref obtained in find_memory_block() */
}
if (mem_blk)
kobject_put(&mem_blk->sysdev.kobj);
return err;
}
#ifdef CONFIG_HUGETLBFS
/*
* Handle per node hstate attribute [un]registration on transistions
* to/from memoryless state.
*/
static void node_hugetlb_work(struct work_struct *work)
{
struct node *node = container_of(work, struct node, node_work);
/*
* We only get here when a node transitions to/from memoryless state.
* We can detect which transition occurred by examining whether the
* node has memory now. hugetlb_register_node() already check this
* so we try to register the attributes. If that fails, then the
* node has transitioned to memoryless, try to unregister the
* attributes.
*/
if (!hugetlb_register_node(node))
hugetlb_unregister_node(node);
}
static void init_node_hugetlb_work(int nid)
{
INIT_WORK(&node_devices[nid].node_work, node_hugetlb_work);
}
static int node_memory_callback(struct notifier_block *self,
unsigned long action, void *arg)
{
struct memory_notify *mnb = arg;
int nid = mnb->status_change_nid;
switch (action) {
case MEM_ONLINE:
case MEM_OFFLINE:
/*
* offload per node hstate [un]registration to a work thread
* when transitioning to/from memoryless state.
*/
if (nid != NUMA_NO_NODE)
schedule_work(&node_devices[nid].node_work);
break;
case MEM_GOING_ONLINE:
case MEM_GOING_OFFLINE:
case MEM_CANCEL_ONLINE:
case MEM_CANCEL_OFFLINE:
default:
break;
}
return NOTIFY_OK;
}
#endif /* CONFIG_HUGETLBFS */
#else /* !CONFIG_MEMORY_HOTPLUG_SPARSE */
static int link_mem_sections(int nid) { return 0; }
#endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */
#if !defined(CONFIG_MEMORY_HOTPLUG_SPARSE) || \
!defined(CONFIG_HUGETLBFS)
static inline int node_memory_callback(struct notifier_block *self,
unsigned long action, void *arg)
{
return NOTIFY_OK;
}
static void init_node_hugetlb_work(int nid) { }
#endif
int register_one_node(int nid)
{
int error = 0;
int cpu;
if (node_online(nid)) {
int p_node = parent_node(nid);
struct node *parent = NULL;
if (p_node != nid)
parent = &node_devices[p_node];
error = register_node(&node_devices[nid], nid, parent);
/* link cpu under this node */
for_each_present_cpu(cpu) {
if (cpu_to_node(cpu) == nid)
register_cpu_under_node(cpu, nid);
}
/* link memory sections under this node */
error = link_mem_sections(nid);
/* initialize work queue for memory hot plug */
init_node_hugetlb_work(nid);
}
return error;
}
void unregister_one_node(int nid)
{
unregister_node(&node_devices[nid]);
}
/*
* node states attributes
*/
static ssize_t print_nodes_state(enum node_states state, char *buf)
{
int n;
n = nodelist_scnprintf(buf, PAGE_SIZE, node_states[state]);
if (n > 0 && PAGE_SIZE > n + 1) {
*(buf + n++) = '\n';
*(buf + n++) = '\0';
}
return n;
}
struct node_attr {
struct sysdev_class_attribute attr;
enum node_states state;
};
static ssize_t show_node_state(struct sysdev_class *class,
struct sysdev_class_attribute *attr, char *buf)
{
struct node_attr *na = container_of(attr, struct node_attr, attr);
return print_nodes_state(na->state, buf);
}
#define _NODE_ATTR(name, state) \
{ _SYSDEV_CLASS_ATTR(name, 0444, show_node_state, NULL), state }
static struct node_attr node_state_attr[] = {
_NODE_ATTR(possible, N_POSSIBLE),
_NODE_ATTR(online, N_ONLINE),
_NODE_ATTR(has_normal_memory, N_NORMAL_MEMORY),
_NODE_ATTR(has_cpu, N_CPU),
#ifdef CONFIG_HIGHMEM
_NODE_ATTR(has_high_memory, N_HIGH_MEMORY),
#endif
};
static struct sysdev_class_attribute *node_state_attrs[] = {
&node_state_attr[0].attr,
&node_state_attr[1].attr,
&node_state_attr[2].attr,
&node_state_attr[3].attr,
#ifdef CONFIG_HIGHMEM
&node_state_attr[4].attr,
#endif
NULL
};
#define NODE_CALLBACK_PRI 2 /* lower than SLAB */
static int __init register_node_type(void)
{
int ret;
BUILD_BUG_ON(ARRAY_SIZE(node_state_attr) != NR_NODE_STATES);
BUILD_BUG_ON(ARRAY_SIZE(node_state_attrs)-1 != NR_NODE_STATES);
ret = sysdev_class_register(&node_class);
if (!ret) {
hotplug_memory_notifier(node_memory_callback,
NODE_CALLBACK_PRI);
}
/*
* Note: we're not going to unregister the node class if we fail
* to register the node state class attribute files.
*/
return ret;
}
postcore_initcall(register_node_type);
| gpl-2.0 |
HRTKernel/Hacker_Kernel_SM-G928F | tools/perf/util/cpumap.c | 2242 | 6087 | #include "util.h"
#include "sysfs.h"
#include "../perf.h"
#include "cpumap.h"
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
static struct cpu_map *cpu_map__default_new(void)
{
struct cpu_map *cpus;
int nr_cpus;
nr_cpus = sysconf(_SC_NPROCESSORS_ONLN);
if (nr_cpus < 0)
return NULL;
cpus = malloc(sizeof(*cpus) + nr_cpus * sizeof(int));
if (cpus != NULL) {
int i;
for (i = 0; i < nr_cpus; ++i)
cpus->map[i] = i;
cpus->nr = nr_cpus;
}
return cpus;
}
static struct cpu_map *cpu_map__trim_new(int nr_cpus, int *tmp_cpus)
{
size_t payload_size = nr_cpus * sizeof(int);
struct cpu_map *cpus = malloc(sizeof(*cpus) + payload_size);
if (cpus != NULL) {
cpus->nr = nr_cpus;
memcpy(cpus->map, tmp_cpus, payload_size);
}
return cpus;
}
struct cpu_map *cpu_map__read(FILE *file)
{
struct cpu_map *cpus = NULL;
int nr_cpus = 0;
int *tmp_cpus = NULL, *tmp;
int max_entries = 0;
int n, cpu, prev;
char sep;
sep = 0;
prev = -1;
for (;;) {
n = fscanf(file, "%u%c", &cpu, &sep);
if (n <= 0)
break;
if (prev >= 0) {
int new_max = nr_cpus + cpu - prev - 1;
if (new_max >= max_entries) {
max_entries = new_max + MAX_NR_CPUS / 2;
tmp = realloc(tmp_cpus, max_entries * sizeof(int));
if (tmp == NULL)
goto out_free_tmp;
tmp_cpus = tmp;
}
while (++prev < cpu)
tmp_cpus[nr_cpus++] = prev;
}
if (nr_cpus == max_entries) {
max_entries += MAX_NR_CPUS;
tmp = realloc(tmp_cpus, max_entries * sizeof(int));
if (tmp == NULL)
goto out_free_tmp;
tmp_cpus = tmp;
}
tmp_cpus[nr_cpus++] = cpu;
if (n == 2 && sep == '-')
prev = cpu;
else
prev = -1;
if (n == 1 || sep == '\n')
break;
}
if (nr_cpus > 0)
cpus = cpu_map__trim_new(nr_cpus, tmp_cpus);
else
cpus = cpu_map__default_new();
out_free_tmp:
free(tmp_cpus);
return cpus;
}
static struct cpu_map *cpu_map__read_all_cpu_map(void)
{
struct cpu_map *cpus = NULL;
FILE *onlnf;
onlnf = fopen("/sys/devices/system/cpu/online", "r");
if (!onlnf)
return cpu_map__default_new();
cpus = cpu_map__read(onlnf);
fclose(onlnf);
return cpus;
}
struct cpu_map *cpu_map__new(const char *cpu_list)
{
struct cpu_map *cpus = NULL;
unsigned long start_cpu, end_cpu = 0;
char *p = NULL;
int i, nr_cpus = 0;
int *tmp_cpus = NULL, *tmp;
int max_entries = 0;
if (!cpu_list)
return cpu_map__read_all_cpu_map();
if (!isdigit(*cpu_list))
goto out;
while (isdigit(*cpu_list)) {
p = NULL;
start_cpu = strtoul(cpu_list, &p, 0);
if (start_cpu >= INT_MAX
|| (*p != '\0' && *p != ',' && *p != '-'))
goto invalid;
if (*p == '-') {
cpu_list = ++p;
p = NULL;
end_cpu = strtoul(cpu_list, &p, 0);
if (end_cpu >= INT_MAX || (*p != '\0' && *p != ','))
goto invalid;
if (end_cpu < start_cpu)
goto invalid;
} else {
end_cpu = start_cpu;
}
for (; start_cpu <= end_cpu; start_cpu++) {
/* check for duplicates */
for (i = 0; i < nr_cpus; i++)
if (tmp_cpus[i] == (int)start_cpu)
goto invalid;
if (nr_cpus == max_entries) {
max_entries += MAX_NR_CPUS;
tmp = realloc(tmp_cpus, max_entries * sizeof(int));
if (tmp == NULL)
goto invalid;
tmp_cpus = tmp;
}
tmp_cpus[nr_cpus++] = (int)start_cpu;
}
if (*p)
++p;
cpu_list = p;
}
if (nr_cpus > 0)
cpus = cpu_map__trim_new(nr_cpus, tmp_cpus);
else
cpus = cpu_map__default_new();
invalid:
free(tmp_cpus);
out:
return cpus;
}
size_t cpu_map__fprintf(struct cpu_map *map, FILE *fp)
{
int i;
size_t printed = fprintf(fp, "%d cpu%s: ",
map->nr, map->nr > 1 ? "s" : "");
for (i = 0; i < map->nr; ++i)
printed += fprintf(fp, "%s%d", i ? ", " : "", map->map[i]);
return printed + fprintf(fp, "\n");
}
struct cpu_map *cpu_map__dummy_new(void)
{
struct cpu_map *cpus = malloc(sizeof(*cpus) + sizeof(int));
if (cpus != NULL) {
cpus->nr = 1;
cpus->map[0] = -1;
}
return cpus;
}
void cpu_map__delete(struct cpu_map *map)
{
free(map);
}
int cpu_map__get_socket(struct cpu_map *map, int idx)
{
FILE *fp;
const char *mnt;
char path[PATH_MAX];
int cpu, ret;
if (idx > map->nr)
return -1;
cpu = map->map[idx];
mnt = sysfs_find_mountpoint();
if (!mnt)
return -1;
snprintf(path, PATH_MAX,
"%s/devices/system/cpu/cpu%d/topology/physical_package_id",
mnt, cpu);
fp = fopen(path, "r");
if (!fp)
return -1;
ret = fscanf(fp, "%d", &cpu);
fclose(fp);
return ret == 1 ? cpu : -1;
}
static int cmp_ids(const void *a, const void *b)
{
return *(int *)a - *(int *)b;
}
static int cpu_map__build_map(struct cpu_map *cpus, struct cpu_map **res,
int (*f)(struct cpu_map *map, int cpu))
{
struct cpu_map *c;
int nr = cpus->nr;
int cpu, s1, s2;
/* allocate as much as possible */
c = calloc(1, sizeof(*c) + nr * sizeof(int));
if (!c)
return -1;
for (cpu = 0; cpu < nr; cpu++) {
s1 = f(cpus, cpu);
for (s2 = 0; s2 < c->nr; s2++) {
if (s1 == c->map[s2])
break;
}
if (s2 == c->nr) {
c->map[c->nr] = s1;
c->nr++;
}
}
/* ensure we process id in increasing order */
qsort(c->map, c->nr, sizeof(int), cmp_ids);
*res = c;
return 0;
}
int cpu_map__get_core(struct cpu_map *map, int idx)
{
FILE *fp;
const char *mnt;
char path[PATH_MAX];
int cpu, ret, s;
if (idx > map->nr)
return -1;
cpu = map->map[idx];
mnt = sysfs_find_mountpoint();
if (!mnt)
return -1;
snprintf(path, PATH_MAX,
"%s/devices/system/cpu/cpu%d/topology/core_id",
mnt, cpu);
fp = fopen(path, "r");
if (!fp)
return -1;
ret = fscanf(fp, "%d", &cpu);
fclose(fp);
if (ret != 1)
return -1;
s = cpu_map__get_socket(map, idx);
if (s == -1)
return -1;
/*
* encode socket in upper 16 bits
* core_id is relative to socket, and
* we need a global id. So we combine
* socket+ core id
*/
return (s << 16) | (cpu & 0xffff);
}
int cpu_map__build_socket_map(struct cpu_map *cpus, struct cpu_map **sockp)
{
return cpu_map__build_map(cpus, sockp, cpu_map__get_socket);
}
int cpu_map__build_core_map(struct cpu_map *cpus, struct cpu_map **corep)
{
return cpu_map__build_map(cpus, corep, cpu_map__get_core);
}
| gpl-2.0 |
ShinyROM/android_kernel_asus_grouper | drivers/md/dm-delay.c | 3266 | 8595 | /*
* Copyright (C) 2005-2007 Red Hat GmbH
*
* A target that delays reads and/or writes and can send
* them to different devices.
*
* This file is released under the GPL.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/bio.h>
#include <linux/slab.h>
#include <linux/device-mapper.h>
#define DM_MSG_PREFIX "delay"
struct delay_c {
struct timer_list delay_timer;
struct mutex timer_lock;
struct work_struct flush_expired_bios;
struct list_head delayed_bios;
atomic_t may_delay;
mempool_t *delayed_pool;
struct dm_dev *dev_read;
sector_t start_read;
unsigned read_delay;
unsigned reads;
struct dm_dev *dev_write;
sector_t start_write;
unsigned write_delay;
unsigned writes;
};
struct dm_delay_info {
struct delay_c *context;
struct list_head list;
struct bio *bio;
unsigned long expires;
};
static DEFINE_MUTEX(delayed_bios_lock);
static struct workqueue_struct *kdelayd_wq;
static struct kmem_cache *delayed_cache;
static void handle_delayed_timer(unsigned long data)
{
struct delay_c *dc = (struct delay_c *)data;
queue_work(kdelayd_wq, &dc->flush_expired_bios);
}
static void queue_timeout(struct delay_c *dc, unsigned long expires)
{
mutex_lock(&dc->timer_lock);
if (!timer_pending(&dc->delay_timer) || expires < dc->delay_timer.expires)
mod_timer(&dc->delay_timer, expires);
mutex_unlock(&dc->timer_lock);
}
static void flush_bios(struct bio *bio)
{
struct bio *n;
while (bio) {
n = bio->bi_next;
bio->bi_next = NULL;
generic_make_request(bio);
bio = n;
}
}
static struct bio *flush_delayed_bios(struct delay_c *dc, int flush_all)
{
struct dm_delay_info *delayed, *next;
unsigned long next_expires = 0;
int start_timer = 0;
struct bio_list flush_bios = { };
mutex_lock(&delayed_bios_lock);
list_for_each_entry_safe(delayed, next, &dc->delayed_bios, list) {
if (flush_all || time_after_eq(jiffies, delayed->expires)) {
list_del(&delayed->list);
bio_list_add(&flush_bios, delayed->bio);
if ((bio_data_dir(delayed->bio) == WRITE))
delayed->context->writes--;
else
delayed->context->reads--;
mempool_free(delayed, dc->delayed_pool);
continue;
}
if (!start_timer) {
start_timer = 1;
next_expires = delayed->expires;
} else
next_expires = min(next_expires, delayed->expires);
}
mutex_unlock(&delayed_bios_lock);
if (start_timer)
queue_timeout(dc, next_expires);
return bio_list_get(&flush_bios);
}
static void flush_expired_bios(struct work_struct *work)
{
struct delay_c *dc;
dc = container_of(work, struct delay_c, flush_expired_bios);
flush_bios(flush_delayed_bios(dc, 0));
}
/*
* Mapping parameters:
* <device> <offset> <delay> [<write_device> <write_offset> <write_delay>]
*
* With separate write parameters, the first set is only used for reads.
* Delays are specified in milliseconds.
*/
static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
struct delay_c *dc;
unsigned long long tmpll;
if (argc != 3 && argc != 6) {
ti->error = "requires exactly 3 or 6 arguments";
return -EINVAL;
}
dc = kmalloc(sizeof(*dc), GFP_KERNEL);
if (!dc) {
ti->error = "Cannot allocate context";
return -ENOMEM;
}
dc->reads = dc->writes = 0;
if (sscanf(argv[1], "%llu", &tmpll) != 1) {
ti->error = "Invalid device sector";
goto bad;
}
dc->start_read = tmpll;
if (sscanf(argv[2], "%u", &dc->read_delay) != 1) {
ti->error = "Invalid delay";
goto bad;
}
if (dm_get_device(ti, argv[0], dm_table_get_mode(ti->table),
&dc->dev_read)) {
ti->error = "Device lookup failed";
goto bad;
}
dc->dev_write = NULL;
if (argc == 3)
goto out;
if (sscanf(argv[4], "%llu", &tmpll) != 1) {
ti->error = "Invalid write device sector";
goto bad_dev_read;
}
dc->start_write = tmpll;
if (sscanf(argv[5], "%u", &dc->write_delay) != 1) {
ti->error = "Invalid write delay";
goto bad_dev_read;
}
if (dm_get_device(ti, argv[3], dm_table_get_mode(ti->table),
&dc->dev_write)) {
ti->error = "Write device lookup failed";
goto bad_dev_read;
}
out:
dc->delayed_pool = mempool_create_slab_pool(128, delayed_cache);
if (!dc->delayed_pool) {
DMERR("Couldn't create delayed bio pool.");
goto bad_dev_write;
}
setup_timer(&dc->delay_timer, handle_delayed_timer, (unsigned long)dc);
INIT_WORK(&dc->flush_expired_bios, flush_expired_bios);
INIT_LIST_HEAD(&dc->delayed_bios);
mutex_init(&dc->timer_lock);
atomic_set(&dc->may_delay, 1);
ti->num_flush_requests = 1;
ti->num_discard_requests = 1;
ti->private = dc;
return 0;
bad_dev_write:
if (dc->dev_write)
dm_put_device(ti, dc->dev_write);
bad_dev_read:
dm_put_device(ti, dc->dev_read);
bad:
kfree(dc);
return -EINVAL;
}
static void delay_dtr(struct dm_target *ti)
{
struct delay_c *dc = ti->private;
flush_workqueue(kdelayd_wq);
dm_put_device(ti, dc->dev_read);
if (dc->dev_write)
dm_put_device(ti, dc->dev_write);
mempool_destroy(dc->delayed_pool);
kfree(dc);
}
static int delay_bio(struct delay_c *dc, int delay, struct bio *bio)
{
struct dm_delay_info *delayed;
unsigned long expires = 0;
if (!delay || !atomic_read(&dc->may_delay))
return 1;
delayed = mempool_alloc(dc->delayed_pool, GFP_NOIO);
delayed->context = dc;
delayed->bio = bio;
delayed->expires = expires = jiffies + (delay * HZ / 1000);
mutex_lock(&delayed_bios_lock);
if (bio_data_dir(bio) == WRITE)
dc->writes++;
else
dc->reads++;
list_add_tail(&delayed->list, &dc->delayed_bios);
mutex_unlock(&delayed_bios_lock);
queue_timeout(dc, expires);
return 0;
}
static void delay_presuspend(struct dm_target *ti)
{
struct delay_c *dc = ti->private;
atomic_set(&dc->may_delay, 0);
del_timer_sync(&dc->delay_timer);
flush_bios(flush_delayed_bios(dc, 1));
}
static void delay_resume(struct dm_target *ti)
{
struct delay_c *dc = ti->private;
atomic_set(&dc->may_delay, 1);
}
static int delay_map(struct dm_target *ti, struct bio *bio,
union map_info *map_context)
{
struct delay_c *dc = ti->private;
if ((bio_data_dir(bio) == WRITE) && (dc->dev_write)) {
bio->bi_bdev = dc->dev_write->bdev;
if (bio_sectors(bio))
bio->bi_sector = dc->start_write +
dm_target_offset(ti, bio->bi_sector);
return delay_bio(dc, dc->write_delay, bio);
}
bio->bi_bdev = dc->dev_read->bdev;
bio->bi_sector = dc->start_read + dm_target_offset(ti, bio->bi_sector);
return delay_bio(dc, dc->read_delay, bio);
}
static int delay_status(struct dm_target *ti, status_type_t type,
char *result, unsigned maxlen)
{
struct delay_c *dc = ti->private;
int sz = 0;
switch (type) {
case STATUSTYPE_INFO:
DMEMIT("%u %u", dc->reads, dc->writes);
break;
case STATUSTYPE_TABLE:
DMEMIT("%s %llu %u", dc->dev_read->name,
(unsigned long long) dc->start_read,
dc->read_delay);
if (dc->dev_write)
DMEMIT(" %s %llu %u", dc->dev_write->name,
(unsigned long long) dc->start_write,
dc->write_delay);
break;
}
return 0;
}
static int delay_iterate_devices(struct dm_target *ti,
iterate_devices_callout_fn fn, void *data)
{
struct delay_c *dc = ti->private;
int ret = 0;
ret = fn(ti, dc->dev_read, dc->start_read, ti->len, data);
if (ret)
goto out;
if (dc->dev_write)
ret = fn(ti, dc->dev_write, dc->start_write, ti->len, data);
out:
return ret;
}
static struct target_type delay_target = {
.name = "delay",
.version = {1, 1, 0},
.module = THIS_MODULE,
.ctr = delay_ctr,
.dtr = delay_dtr,
.map = delay_map,
.presuspend = delay_presuspend,
.resume = delay_resume,
.status = delay_status,
.iterate_devices = delay_iterate_devices,
};
static int __init dm_delay_init(void)
{
int r = -ENOMEM;
kdelayd_wq = alloc_workqueue("kdelayd", WQ_MEM_RECLAIM, 0);
if (!kdelayd_wq) {
DMERR("Couldn't start kdelayd");
goto bad_queue;
}
delayed_cache = KMEM_CACHE(dm_delay_info, 0);
if (!delayed_cache) {
DMERR("Couldn't create delayed bio cache.");
goto bad_memcache;
}
r = dm_register_target(&delay_target);
if (r < 0) {
DMERR("register failed %d", r);
goto bad_register;
}
return 0;
bad_register:
kmem_cache_destroy(delayed_cache);
bad_memcache:
destroy_workqueue(kdelayd_wq);
bad_queue:
return r;
}
static void __exit dm_delay_exit(void)
{
dm_unregister_target(&delay_target);
kmem_cache_destroy(delayed_cache);
destroy_workqueue(kdelayd_wq);
}
/* Module hooks */
module_init(dm_delay_init);
module_exit(dm_delay_exit);
MODULE_DESCRIPTION(DM_NAME " delay target");
MODULE_AUTHOR("Heinz Mauelshagen <mauelshagen@redhat.com>");
MODULE_LICENSE("GPL");
| gpl-2.0 |
sytuxww/android2.3-dm8168 | arch/h8300/kernel/traps.c | 3266 | 4062 | /*
* linux/arch/h8300/boot/traps.c -- general exception handling code
* H8/300 support Yoshinori Sato <ysato@users.sourceforge.jp>
*
* Cloned from Linux/m68k.
*
* No original Copyright holder listed,
* Probable original (C) Roman Zippel (assigned DJD, 1999)
*
* Copyright 1999-2000 D. Jeff Dionne, <jeff@rt-control.com>
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive
* for more details.
*/
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/bug.h>
#include <asm/system.h>
#include <asm/irq.h>
#include <asm/traps.h>
#include <asm/page.h>
static DEFINE_SPINLOCK(die_lock);
/*
* this must be called very early as the kernel might
* use some instruction that are emulated on the 060
*/
void __init base_trap_init(void)
{
}
void __init trap_init (void)
{
}
asmlinkage void set_esp0 (unsigned long ssp)
{
current->thread.esp0 = ssp;
}
/*
* Generic dumping code. Used for panic and debug.
*/
static void dump(struct pt_regs *fp)
{
unsigned long *sp;
unsigned char *tp;
int i;
printk("\nCURRENT PROCESS:\n\n");
printk("COMM=%s PID=%d\n", current->comm, current->pid);
if (current->mm) {
printk("TEXT=%08x-%08x DATA=%08x-%08x BSS=%08x-%08x\n",
(int) current->mm->start_code,
(int) current->mm->end_code,
(int) current->mm->start_data,
(int) current->mm->end_data,
(int) current->mm->end_data,
(int) current->mm->brk);
printk("USER-STACK=%08x KERNEL-STACK=%08lx\n\n",
(int) current->mm->start_stack,
(int) PAGE_SIZE+(unsigned long)current);
}
show_regs(fp);
printk("\nCODE:");
tp = ((unsigned char *) fp->pc) - 0x20;
for (sp = (unsigned long *) tp, i = 0; (i < 0x40); i += 4) {
if ((i % 0x10) == 0)
printk("\n%08x: ", (int) (tp + i));
printk("%08x ", (int) *sp++);
}
printk("\n");
printk("\nKERNEL STACK:");
tp = ((unsigned char *) fp) - 0x40;
for (sp = (unsigned long *) tp, i = 0; (i < 0xc0); i += 4) {
if ((i % 0x10) == 0)
printk("\n%08x: ", (int) (tp + i));
printk("%08x ", (int) *sp++);
}
printk("\n");
if (STACK_MAGIC != *(unsigned long *)((unsigned long)current+PAGE_SIZE))
printk("(Possibly corrupted stack page??)\n");
printk("\n\n");
}
void die(const char *str, struct pt_regs *fp, unsigned long err)
{
static int diecount;
oops_enter();
console_verbose();
spin_lock_irq(&die_lock);
report_bug(fp->pc, fp);
printk(KERN_EMERG "%s: %04lx [#%d] ", str, err & 0xffff, ++diecount);
dump(fp);
spin_unlock_irq(&die_lock);
do_exit(SIGSEGV);
}
extern char _start, _etext;
#define check_kernel_text(addr) \
((addr >= (unsigned long)(&_start)) && \
(addr < (unsigned long)(&_etext)))
static int kstack_depth_to_print = 24;
void show_stack(struct task_struct *task, unsigned long *esp)
{
unsigned long *stack, addr;
int i;
if (esp == NULL)
esp = (unsigned long *) &esp;
stack = esp;
printk("Stack from %08lx:", (unsigned long)stack);
for (i = 0; i < kstack_depth_to_print; i++) {
if (((unsigned long)stack & (THREAD_SIZE - 1)) == 0)
break;
if (i % 8 == 0)
printk("\n ");
printk(" %08lx", *stack++);
}
printk("\nCall Trace:");
i = 0;
stack = esp;
while (((unsigned long)stack & (THREAD_SIZE - 1)) != 0) {
addr = *stack++;
/*
* If the address is either in the text segment of the
* kernel, or in the region which contains vmalloc'ed
* memory, it *may* be the address of a calling
* routine; if so, print it so that someone tracing
* down the cause of the crash will be able to figure
* out the call path that was taken.
*/
if (check_kernel_text(addr)) {
if (i % 4 == 0)
printk("\n ");
printk(" [<%08lx>]", addr);
i++;
}
}
printk("\n");
}
void show_trace_task(struct task_struct *tsk)
{
show_stack(tsk,(unsigned long *)tsk->thread.esp0);
}
void dump_stack(void)
{
show_stack(NULL,NULL);
}
EXPORT_SYMBOL(dump_stack);
| gpl-2.0 |
EmericanX/android_kernel_motorola_msm8960-common | drivers/regulator/userspace-consumer.c | 4034 | 5005 | /*
* userspace-consumer.c
*
* Copyright 2009 CompuLab, Ltd.
*
* Author: Mike Rapoport <mike@compulab.co.il>
*
* Based of virtual consumer driver:
* Copyright 2008 Wolfson Microelectronics PLC.
* Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of the
* License, or (at your option) any later version.
*
*/
#include <linux/err.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <linux/regulator/userspace-consumer.h>
#include <linux/slab.h>
struct userspace_consumer_data {
const char *name;
struct mutex lock;
bool enabled;
int num_supplies;
struct regulator_bulk_data *supplies;
};
static ssize_t reg_show_name(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct userspace_consumer_data *data = dev_get_drvdata(dev);
return sprintf(buf, "%s\n", data->name);
}
static ssize_t reg_show_state(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct userspace_consumer_data *data = dev_get_drvdata(dev);
if (data->enabled)
return sprintf(buf, "enabled\n");
return sprintf(buf, "disabled\n");
}
static ssize_t reg_set_state(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct userspace_consumer_data *data = dev_get_drvdata(dev);
bool enabled;
int ret;
/*
* sysfs_streq() doesn't need the \n's, but we add them so the strings
* will be shared with show_state(), above.
*/
if (sysfs_streq(buf, "enabled\n") || sysfs_streq(buf, "1"))
enabled = true;
else if (sysfs_streq(buf, "disabled\n") || sysfs_streq(buf, "0"))
enabled = false;
else {
dev_err(dev, "Configuring invalid mode\n");
return count;
}
mutex_lock(&data->lock);
if (enabled != data->enabled) {
if (enabled)
ret = regulator_bulk_enable(data->num_supplies,
data->supplies);
else
ret = regulator_bulk_disable(data->num_supplies,
data->supplies);
if (ret == 0)
data->enabled = enabled;
else
dev_err(dev, "Failed to configure state: %d\n", ret);
}
mutex_unlock(&data->lock);
return count;
}
static DEVICE_ATTR(name, 0444, reg_show_name, NULL);
static DEVICE_ATTR(state, 0644, reg_show_state, reg_set_state);
static struct attribute *attributes[] = {
&dev_attr_name.attr,
&dev_attr_state.attr,
NULL,
};
static const struct attribute_group attr_group = {
.attrs = attributes,
};
static int regulator_userspace_consumer_probe(struct platform_device *pdev)
{
struct regulator_userspace_consumer_data *pdata;
struct userspace_consumer_data *drvdata;
int ret;
pdata = pdev->dev.platform_data;
if (!pdata)
return -EINVAL;
drvdata = kzalloc(sizeof(struct userspace_consumer_data), GFP_KERNEL);
if (drvdata == NULL)
return -ENOMEM;
drvdata->name = pdata->name;
drvdata->num_supplies = pdata->num_supplies;
drvdata->supplies = pdata->supplies;
mutex_init(&drvdata->lock);
ret = regulator_bulk_get(&pdev->dev, drvdata->num_supplies,
drvdata->supplies);
if (ret) {
dev_err(&pdev->dev, "Failed to get supplies: %d\n", ret);
goto err_alloc_supplies;
}
ret = sysfs_create_group(&pdev->dev.kobj, &attr_group);
if (ret != 0)
goto err_create_attrs;
if (pdata->init_on) {
ret = regulator_bulk_enable(drvdata->num_supplies,
drvdata->supplies);
if (ret) {
dev_err(&pdev->dev,
"Failed to set initial state: %d\n", ret);
goto err_enable;
}
}
drvdata->enabled = pdata->init_on;
platform_set_drvdata(pdev, drvdata);
return 0;
err_enable:
sysfs_remove_group(&pdev->dev.kobj, &attr_group);
err_create_attrs:
regulator_bulk_free(drvdata->num_supplies, drvdata->supplies);
err_alloc_supplies:
kfree(drvdata);
return ret;
}
static int regulator_userspace_consumer_remove(struct platform_device *pdev)
{
struct userspace_consumer_data *data = platform_get_drvdata(pdev);
sysfs_remove_group(&pdev->dev.kobj, &attr_group);
if (data->enabled)
regulator_bulk_disable(data->num_supplies, data->supplies);
regulator_bulk_free(data->num_supplies, data->supplies);
kfree(data);
return 0;
}
static struct platform_driver regulator_userspace_consumer_driver = {
.probe = regulator_userspace_consumer_probe,
.remove = regulator_userspace_consumer_remove,
.driver = {
.name = "reg-userspace-consumer",
},
};
static int __init regulator_userspace_consumer_init(void)
{
return platform_driver_register(®ulator_userspace_consumer_driver);
}
module_init(regulator_userspace_consumer_init);
static void __exit regulator_userspace_consumer_exit(void)
{
platform_driver_unregister(®ulator_userspace_consumer_driver);
}
module_exit(regulator_userspace_consumer_exit);
MODULE_AUTHOR("Mike Rapoport <mike@compulab.co.il>");
MODULE_DESCRIPTION("Userspace consumer for voltage and current regulators");
MODULE_LICENSE("GPL");
| gpl-2.0 |
Alex-the-black/android_kernel_huawei_msm8212 | drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c | 4802 | 81943 | /*
* Original code based Host AP (software wireless LAN access point) driver
* for Intersil Prism2/2.5/3 - hostap.o module, common routines
*
* Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen
* <jkmaline@cc.hut.fi>
* Copyright (c) 2002-2003, Jouni Malinen <jkmaline@cc.hut.fi>
* Copyright (c) 2004, Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation. See README and COPYING for
* more details.
******************************************************************************
Few modifications for Realtek's Wi-Fi drivers by
Andrea Merello <andreamrl@tiscali.it>
A special thanks goes to Realtek for their support !
******************************************************************************/
#include <linux/compiler.h>
//#include <linux/config.h>
#include <linux/errno.h>
#include <linux/if_arp.h>
#include <linux/in6.h>
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/pci.h>
#include <linux/proc_fs.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/tcp.h>
#include <linux/types.h>
#include <linux/wireless.h>
#include <linux/etherdevice.h>
#include <asm/uaccess.h>
#include <linux/ctype.h>
#include "ieee80211.h"
#include "dot11d.h"
static inline void ieee80211_monitor_rx(struct ieee80211_device *ieee,
struct sk_buff *skb,
struct ieee80211_rx_stats *rx_stats)
{
struct ieee80211_hdr_4addr *hdr = (struct ieee80211_hdr_4addr *)skb->data;
u16 fc = le16_to_cpu(hdr->frame_ctl);
skb->dev = ieee->dev;
skb_reset_mac_header(skb);
skb_pull(skb, ieee80211_get_hdrlen(fc));
skb->pkt_type = PACKET_OTHERHOST;
skb->protocol = __constant_htons(ETH_P_80211_RAW);
memset(skb->cb, 0, sizeof(skb->cb));
netif_rx(skb);
}
/* Called only as a tasklet (software IRQ) */
static struct ieee80211_frag_entry *
ieee80211_frag_cache_find(struct ieee80211_device *ieee, unsigned int seq,
unsigned int frag, u8 tid,u8 *src, u8 *dst)
{
struct ieee80211_frag_entry *entry;
int i;
for (i = 0; i < IEEE80211_FRAG_CACHE_LEN; i++) {
entry = &ieee->frag_cache[tid][i];
if (entry->skb != NULL &&
time_after(jiffies, entry->first_frag_time + 2 * HZ)) {
IEEE80211_DEBUG_FRAG(
"expiring fragment cache entry "
"seq=%u last_frag=%u\n",
entry->seq, entry->last_frag);
dev_kfree_skb_any(entry->skb);
entry->skb = NULL;
}
if (entry->skb != NULL && entry->seq == seq &&
(entry->last_frag + 1 == frag || frag == -1) &&
memcmp(entry->src_addr, src, ETH_ALEN) == 0 &&
memcmp(entry->dst_addr, dst, ETH_ALEN) == 0)
return entry;
}
return NULL;
}
/* Called only as a tasklet (software IRQ) */
static struct sk_buff *
ieee80211_frag_cache_get(struct ieee80211_device *ieee,
struct ieee80211_hdr_4addr *hdr)
{
struct sk_buff *skb = NULL;
u16 fc = le16_to_cpu(hdr->frame_ctl);
u16 sc = le16_to_cpu(hdr->seq_ctl);
unsigned int frag = WLAN_GET_SEQ_FRAG(sc);
unsigned int seq = WLAN_GET_SEQ_SEQ(sc);
struct ieee80211_frag_entry *entry;
struct ieee80211_hdr_3addrqos *hdr_3addrqos;
struct ieee80211_hdr_4addrqos *hdr_4addrqos;
u8 tid;
if (((fc & IEEE80211_FCTL_DSTODS) == IEEE80211_FCTL_DSTODS)&&IEEE80211_QOS_HAS_SEQ(fc)) {
hdr_4addrqos = (struct ieee80211_hdr_4addrqos *)hdr;
tid = le16_to_cpu(hdr_4addrqos->qos_ctl) & IEEE80211_QCTL_TID;
tid = UP2AC(tid);
tid ++;
} else if (IEEE80211_QOS_HAS_SEQ(fc)) {
hdr_3addrqos = (struct ieee80211_hdr_3addrqos *)hdr;
tid = le16_to_cpu(hdr_3addrqos->qos_ctl) & IEEE80211_QCTL_TID;
tid = UP2AC(tid);
tid ++;
} else {
tid = 0;
}
if (frag == 0) {
/* Reserve enough space to fit maximum frame length */
skb = dev_alloc_skb(ieee->dev->mtu +
sizeof(struct ieee80211_hdr_4addr) +
8 /* LLC */ +
2 /* alignment */ +
8 /* WEP */ +
ETH_ALEN /* WDS */ +
(IEEE80211_QOS_HAS_SEQ(fc)?2:0) /* QOS Control */);
if (skb == NULL)
return NULL;
entry = &ieee->frag_cache[tid][ieee->frag_next_idx[tid]];
ieee->frag_next_idx[tid]++;
if (ieee->frag_next_idx[tid] >= IEEE80211_FRAG_CACHE_LEN)
ieee->frag_next_idx[tid] = 0;
if (entry->skb != NULL)
dev_kfree_skb_any(entry->skb);
entry->first_frag_time = jiffies;
entry->seq = seq;
entry->last_frag = frag;
entry->skb = skb;
memcpy(entry->src_addr, hdr->addr2, ETH_ALEN);
memcpy(entry->dst_addr, hdr->addr1, ETH_ALEN);
} else {
/* received a fragment of a frame for which the head fragment
* should have already been received */
entry = ieee80211_frag_cache_find(ieee, seq, frag, tid,hdr->addr2,
hdr->addr1);
if (entry != NULL) {
entry->last_frag = frag;
skb = entry->skb;
}
}
return skb;
}
/* Called only as a tasklet (software IRQ) */
static int ieee80211_frag_cache_invalidate(struct ieee80211_device *ieee,
struct ieee80211_hdr_4addr *hdr)
{
u16 fc = le16_to_cpu(hdr->frame_ctl);
u16 sc = le16_to_cpu(hdr->seq_ctl);
unsigned int seq = WLAN_GET_SEQ_SEQ(sc);
struct ieee80211_frag_entry *entry;
struct ieee80211_hdr_3addrqos *hdr_3addrqos;
struct ieee80211_hdr_4addrqos *hdr_4addrqos;
u8 tid;
if(((fc & IEEE80211_FCTL_DSTODS) == IEEE80211_FCTL_DSTODS)&&IEEE80211_QOS_HAS_SEQ(fc)) {
hdr_4addrqos = (struct ieee80211_hdr_4addrqos *)hdr;
tid = le16_to_cpu(hdr_4addrqos->qos_ctl) & IEEE80211_QCTL_TID;
tid = UP2AC(tid);
tid ++;
} else if (IEEE80211_QOS_HAS_SEQ(fc)) {
hdr_3addrqos = (struct ieee80211_hdr_3addrqos *)hdr;
tid = le16_to_cpu(hdr_3addrqos->qos_ctl) & IEEE80211_QCTL_TID;
tid = UP2AC(tid);
tid ++;
} else {
tid = 0;
}
entry = ieee80211_frag_cache_find(ieee, seq, -1, tid,hdr->addr2,
hdr->addr1);
if (entry == NULL) {
IEEE80211_DEBUG_FRAG(
"could not invalidate fragment cache "
"entry (seq=%u)\n", seq);
return -1;
}
entry->skb = NULL;
return 0;
}
/* ieee80211_rx_frame_mgtmt
*
* Responsible for handling management control frames
*
* Called by ieee80211_rx */
static inline int
ieee80211_rx_frame_mgmt(struct ieee80211_device *ieee, struct sk_buff *skb,
struct ieee80211_rx_stats *rx_stats, u16 type,
u16 stype)
{
/* On the struct stats definition there is written that
* this is not mandatory.... but seems that the probe
* response parser uses it
*/
struct ieee80211_hdr_3addr * hdr = (struct ieee80211_hdr_3addr *)skb->data;
rx_stats->len = skb->len;
ieee80211_rx_mgt(ieee,(struct ieee80211_hdr_4addr *)skb->data,rx_stats);
//if ((ieee->state == IEEE80211_LINKED) && (memcmp(hdr->addr3, ieee->current_network.bssid, ETH_ALEN)))
if ((memcmp(hdr->addr1, ieee->dev->dev_addr, ETH_ALEN)))//use ADDR1 to perform address matching for Management frames
{
dev_kfree_skb_any(skb);
return 0;
}
ieee80211_rx_frame_softmac(ieee, skb, rx_stats, type, stype);
dev_kfree_skb_any(skb);
return 0;
#ifdef NOT_YET
if (ieee->iw_mode == IW_MODE_MASTER) {
printk(KERN_DEBUG "%s: Master mode not yet supported.\n",
ieee->dev->name);
return 0;
/*
hostap_update_sta_ps(ieee, (struct hostap_ieee80211_hdr_4addr *)
skb->data);*/
}
if (ieee->hostapd && type == IEEE80211_TYPE_MGMT) {
if (stype == WLAN_FC_STYPE_BEACON &&
ieee->iw_mode == IW_MODE_MASTER) {
struct sk_buff *skb2;
/* Process beacon frames also in kernel driver to
* update STA(AP) table statistics */
skb2 = skb_clone(skb, GFP_ATOMIC);
if (skb2)
hostap_rx(skb2->dev, skb2, rx_stats);
}
/* send management frames to the user space daemon for
* processing */
ieee->apdevstats.rx_packets++;
ieee->apdevstats.rx_bytes += skb->len;
prism2_rx_80211(ieee->apdev, skb, rx_stats, PRISM2_RX_MGMT);
return 0;
}
if (ieee->iw_mode == IW_MODE_MASTER) {
if (type != WLAN_FC_TYPE_MGMT && type != WLAN_FC_TYPE_CTRL) {
printk(KERN_DEBUG "%s: unknown management frame "
"(type=0x%02x, stype=0x%02x) dropped\n",
skb->dev->name, type, stype);
return -1;
}
hostap_rx(skb->dev, skb, rx_stats);
return 0;
}
printk(KERN_DEBUG "%s: hostap_rx_frame_mgmt: management frame "
"received in non-Host AP mode\n", skb->dev->name);
return -1;
#endif
}
/* See IEEE 802.1H for LLC/SNAP encapsulation/decapsulation */
/* Ethernet-II snap header (RFC1042 for most EtherTypes) */
static unsigned char rfc1042_header[] =
{ 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 };
/* Bridge-Tunnel header (for EtherTypes ETH_P_AARP and ETH_P_IPX) */
static unsigned char bridge_tunnel_header[] =
{ 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 };
/* No encapsulation header if EtherType < 0x600 (=length) */
/* Called by ieee80211_rx_frame_decrypt */
static int ieee80211_is_eapol_frame(struct ieee80211_device *ieee,
struct sk_buff *skb, size_t hdrlen)
{
struct net_device *dev = ieee->dev;
u16 fc, ethertype;
struct ieee80211_hdr_4addr *hdr;
u8 *pos;
if (skb->len < 24)
return 0;
hdr = (struct ieee80211_hdr_4addr *) skb->data;
fc = le16_to_cpu(hdr->frame_ctl);
/* check that the frame is unicast frame to us */
if ((fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) ==
IEEE80211_FCTL_TODS &&
memcmp(hdr->addr1, dev->dev_addr, ETH_ALEN) == 0 &&
memcmp(hdr->addr3, dev->dev_addr, ETH_ALEN) == 0) {
/* ToDS frame with own addr BSSID and DA */
} else if ((fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) ==
IEEE80211_FCTL_FROMDS &&
memcmp(hdr->addr1, dev->dev_addr, ETH_ALEN) == 0) {
/* FromDS frame with own addr as DA */
} else
return 0;
if (skb->len < 24 + 8)
return 0;
/* check for port access entity Ethernet type */
// pos = skb->data + 24;
pos = skb->data + hdrlen;
ethertype = (pos[6] << 8) | pos[7];
if (ethertype == ETH_P_PAE)
return 1;
return 0;
}
/* Called only as a tasklet (software IRQ), by ieee80211_rx */
static inline int
ieee80211_rx_frame_decrypt(struct ieee80211_device* ieee, struct sk_buff *skb,
struct ieee80211_crypt_data *crypt)
{
struct ieee80211_hdr_4addr *hdr;
int res, hdrlen;
if (crypt == NULL || crypt->ops->decrypt_mpdu == NULL)
return 0;
if (ieee->hwsec_active)
{
cb_desc *tcb_desc = (cb_desc *)(skb->cb+ MAX_DEV_ADDR_SIZE);
tcb_desc->bHwSec = 1;
}
hdr = (struct ieee80211_hdr_4addr *) skb->data;
hdrlen = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
#ifdef CONFIG_IEEE80211_CRYPT_TKIP
if (ieee->tkip_countermeasures &&
strcmp(crypt->ops->name, "TKIP") == 0) {
if (net_ratelimit()) {
printk(KERN_DEBUG "%s: TKIP countermeasures: dropped "
"received packet from %pM\n",
ieee->dev->name, hdr->addr2);
}
return -1;
}
#endif
atomic_inc(&crypt->refcnt);
res = crypt->ops->decrypt_mpdu(skb, hdrlen, crypt->priv);
atomic_dec(&crypt->refcnt);
if (res < 0) {
IEEE80211_DEBUG_DROP(
"decryption failed (SA=%pM"
") res=%d\n", hdr->addr2, res);
if (res == -2)
IEEE80211_DEBUG_DROP("Decryption failed ICV "
"mismatch (key %d)\n",
skb->data[hdrlen + 3] >> 6);
ieee->ieee_stats.rx_discards_undecryptable++;
return -1;
}
return res;
}
/* Called only as a tasklet (software IRQ), by ieee80211_rx */
static inline int
ieee80211_rx_frame_decrypt_msdu(struct ieee80211_device* ieee, struct sk_buff *skb,
int keyidx, struct ieee80211_crypt_data *crypt)
{
struct ieee80211_hdr_4addr *hdr;
int res, hdrlen;
if (crypt == NULL || crypt->ops->decrypt_msdu == NULL)
return 0;
if (ieee->hwsec_active)
{
cb_desc *tcb_desc = (cb_desc *)(skb->cb+ MAX_DEV_ADDR_SIZE);
tcb_desc->bHwSec = 1;
}
hdr = (struct ieee80211_hdr_4addr *) skb->data;
hdrlen = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
atomic_inc(&crypt->refcnt);
res = crypt->ops->decrypt_msdu(skb, keyidx, hdrlen, crypt->priv);
atomic_dec(&crypt->refcnt);
if (res < 0) {
printk(KERN_DEBUG "%s: MSDU decryption/MIC verification failed"
" (SA=%pM keyidx=%d)\n",
ieee->dev->name, hdr->addr2, keyidx);
return -1;
}
return 0;
}
/* this function is stolen from ipw2200 driver*/
#define IEEE_PACKET_RETRY_TIME (5*HZ)
static int is_duplicate_packet(struct ieee80211_device *ieee,
struct ieee80211_hdr_4addr *header)
{
u16 fc = le16_to_cpu(header->frame_ctl);
u16 sc = le16_to_cpu(header->seq_ctl);
u16 seq = WLAN_GET_SEQ_SEQ(sc);
u16 frag = WLAN_GET_SEQ_FRAG(sc);
u16 *last_seq, *last_frag;
unsigned long *last_time;
struct ieee80211_hdr_3addrqos *hdr_3addrqos;
struct ieee80211_hdr_4addrqos *hdr_4addrqos;
u8 tid;
//TO2DS and QoS
if(((fc & IEEE80211_FCTL_DSTODS) == IEEE80211_FCTL_DSTODS)&&IEEE80211_QOS_HAS_SEQ(fc)) {
hdr_4addrqos = (struct ieee80211_hdr_4addrqos *)header;
tid = le16_to_cpu(hdr_4addrqos->qos_ctl) & IEEE80211_QCTL_TID;
tid = UP2AC(tid);
tid ++;
} else if(IEEE80211_QOS_HAS_SEQ(fc)) { //QoS
hdr_3addrqos = (struct ieee80211_hdr_3addrqos*)header;
tid = le16_to_cpu(hdr_3addrqos->qos_ctl) & IEEE80211_QCTL_TID;
tid = UP2AC(tid);
tid ++;
} else { // no QoS
tid = 0;
}
switch (ieee->iw_mode) {
case IW_MODE_ADHOC:
{
struct list_head *p;
struct ieee_ibss_seq *entry = NULL;
u8 *mac = header->addr2;
int index = mac[5] % IEEE_IBSS_MAC_HASH_SIZE;
//for (pos = (head)->next; pos != (head); pos = pos->next)
//__list_for_each(p, &ieee->ibss_mac_hash[index]) {
list_for_each(p, &ieee->ibss_mac_hash[index]) {
entry = list_entry(p, struct ieee_ibss_seq, list);
if (!memcmp(entry->mac, mac, ETH_ALEN))
break;
}
// if (memcmp(entry->mac, mac, ETH_ALEN)){
if (p == &ieee->ibss_mac_hash[index]) {
entry = kmalloc(sizeof(struct ieee_ibss_seq), GFP_ATOMIC);
if (!entry) {
printk(KERN_WARNING "Cannot malloc new mac entry\n");
return 0;
}
memcpy(entry->mac, mac, ETH_ALEN);
entry->seq_num[tid] = seq;
entry->frag_num[tid] = frag;
entry->packet_time[tid] = jiffies;
list_add(&entry->list, &ieee->ibss_mac_hash[index]);
return 0;
}
last_seq = &entry->seq_num[tid];
last_frag = &entry->frag_num[tid];
last_time = &entry->packet_time[tid];
break;
}
case IW_MODE_INFRA:
last_seq = &ieee->last_rxseq_num[tid];
last_frag = &ieee->last_rxfrag_num[tid];
last_time = &ieee->last_packet_time[tid];
break;
default:
return 0;
}
// if(tid != 0) {
// printk(KERN_WARNING ":)))))))))))%x %x %x, fc(%x)\n", tid, *last_seq, seq, header->frame_ctl);
// }
if ((*last_seq == seq) &&
time_after(*last_time + IEEE_PACKET_RETRY_TIME, jiffies)) {
if (*last_frag == frag){
//printk(KERN_WARNING "[1] go drop!\n");
goto drop;
}
if (*last_frag + 1 != frag)
/* out-of-order fragment */
//printk(KERN_WARNING "[2] go drop!\n");
goto drop;
} else
*last_seq = seq;
*last_frag = frag;
*last_time = jiffies;
return 0;
drop:
// BUG_ON(!(fc & IEEE80211_FCTL_RETRY));
// printk("DUP\n");
return 1;
}
bool
AddReorderEntry(
PRX_TS_RECORD pTS,
PRX_REORDER_ENTRY pReorderEntry
)
{
struct list_head *pList = &pTS->RxPendingPktList;
while(pList->next != &pTS->RxPendingPktList)
{
if( SN_LESS(pReorderEntry->SeqNum, ((PRX_REORDER_ENTRY)list_entry(pList->next,RX_REORDER_ENTRY,List))->SeqNum) )
{
pList = pList->next;
}
else if( SN_EQUAL(pReorderEntry->SeqNum, ((PRX_REORDER_ENTRY)list_entry(pList->next,RX_REORDER_ENTRY,List))->SeqNum) )
{
return false;
}
else
{
break;
}
}
pReorderEntry->List.next = pList->next;
pReorderEntry->List.next->prev = &pReorderEntry->List;
pReorderEntry->List.prev = pList;
pList->next = &pReorderEntry->List;
return true;
}
void ieee80211_indicate_packets(struct ieee80211_device *ieee, struct ieee80211_rxb** prxbIndicateArray,u8 index)
{
u8 i = 0 , j=0;
u16 ethertype;
// if(index > 1)
// IEEE80211_DEBUG(IEEE80211_DL_REORDER,"%s(): hahahahhhh, We indicate packet from reorder list, index is %u\n",__FUNCTION__,index);
for(j = 0; j<index; j++)
{
//added by amy for reorder
struct ieee80211_rxb* prxb = prxbIndicateArray[j];
for(i = 0; i<prxb->nr_subframes; i++) {
struct sk_buff *sub_skb = prxb->subframes[i];
/* convert hdr + possible LLC headers into Ethernet header */
ethertype = (sub_skb->data[6] << 8) | sub_skb->data[7];
if (sub_skb->len >= 8 &&
((memcmp(sub_skb->data, rfc1042_header, SNAP_SIZE) == 0 &&
ethertype != ETH_P_AARP && ethertype != ETH_P_IPX) ||
memcmp(sub_skb->data, bridge_tunnel_header, SNAP_SIZE) == 0)) {
/* remove RFC1042 or Bridge-Tunnel encapsulation and
* replace EtherType */
skb_pull(sub_skb, SNAP_SIZE);
memcpy(skb_push(sub_skb, ETH_ALEN), prxb->src, ETH_ALEN);
memcpy(skb_push(sub_skb, ETH_ALEN), prxb->dst, ETH_ALEN);
} else {
u16 len;
/* Leave Ethernet header part of hdr and full payload */
len = htons(sub_skb->len);
memcpy(skb_push(sub_skb, 2), &len, 2);
memcpy(skb_push(sub_skb, ETH_ALEN), prxb->src, ETH_ALEN);
memcpy(skb_push(sub_skb, ETH_ALEN), prxb->dst, ETH_ALEN);
}
//stats->rx_packets++;
//stats->rx_bytes += sub_skb->len;
/* Indicat the packets to upper layer */
if (sub_skb) {
//printk("0skb_len(%d)\n", skb->len);
sub_skb->protocol = eth_type_trans(sub_skb, ieee->dev);
memset(sub_skb->cb, 0, sizeof(sub_skb->cb));
sub_skb->dev = ieee->dev;
sub_skb->ip_summed = CHECKSUM_NONE; /* 802.11 crc not sufficient */
//skb->ip_summed = CHECKSUM_UNNECESSARY; /* 802.11 crc not sufficient */
ieee->last_rx_ps_time = jiffies;
//printk("1skb_len(%d)\n", skb->len);
netif_rx(sub_skb);
}
}
kfree(prxb);
prxb = NULL;
}
}
void RxReorderIndicatePacket( struct ieee80211_device *ieee,
struct ieee80211_rxb* prxb,
PRX_TS_RECORD pTS,
u16 SeqNum)
{
PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
PRX_REORDER_ENTRY pReorderEntry = NULL;
struct ieee80211_rxb* prxbIndicateArray[REORDER_WIN_SIZE];
u8 WinSize = pHTInfo->RxReorderWinSize;
u16 WinEnd = (pTS->RxIndicateSeq + WinSize -1)%4096;
u8 index = 0;
bool bMatchWinStart = false, bPktInBuf = false;
IEEE80211_DEBUG(IEEE80211_DL_REORDER,"%s(): Seq is %d,pTS->RxIndicateSeq is %d, WinSize is %d\n",__FUNCTION__,SeqNum,pTS->RxIndicateSeq,WinSize);
/* Rx Reorder initialize condition.*/
if(pTS->RxIndicateSeq == 0xffff) {
pTS->RxIndicateSeq = SeqNum;
}
/* Drop out the packet which SeqNum is smaller than WinStart */
if(SN_LESS(SeqNum, pTS->RxIndicateSeq)) {
IEEE80211_DEBUG(IEEE80211_DL_REORDER,"Packet Drop! IndicateSeq: %d, NewSeq: %d\n",
pTS->RxIndicateSeq, SeqNum);
pHTInfo->RxReorderDropCounter++;
{
int i;
for(i =0; i < prxb->nr_subframes; i++) {
dev_kfree_skb(prxb->subframes[i]);
}
kfree(prxb);
prxb = NULL;
}
return;
}
/*
* Sliding window manipulation. Conditions includes:
* 1. Incoming SeqNum is equal to WinStart =>Window shift 1
* 2. Incoming SeqNum is larger than the WinEnd => Window shift N
*/
if(SN_EQUAL(SeqNum, pTS->RxIndicateSeq)) {
pTS->RxIndicateSeq = (pTS->RxIndicateSeq + 1) % 4096;
bMatchWinStart = true;
} else if(SN_LESS(WinEnd, SeqNum)) {
if(SeqNum >= (WinSize - 1)) {
pTS->RxIndicateSeq = SeqNum + 1 -WinSize;
} else {
pTS->RxIndicateSeq = 4095 - (WinSize - (SeqNum +1)) + 1;
}
IEEE80211_DEBUG(IEEE80211_DL_REORDER, "Window Shift! IndicateSeq: %d, NewSeq: %d\n",pTS->RxIndicateSeq, SeqNum);
}
/*
* Indication process.
* After Packet dropping and Sliding Window shifting as above, we can now just indicate the packets
* with the SeqNum smaller than latest WinStart and buffer other packets.
*/
/* For Rx Reorder condition:
* 1. All packets with SeqNum smaller than WinStart => Indicate
* 2. All packets with SeqNum larger than or equal to WinStart => Buffer it.
*/
if(bMatchWinStart) {
/* Current packet is going to be indicated.*/
IEEE80211_DEBUG(IEEE80211_DL_REORDER, "Packets indication!! IndicateSeq: %d, NewSeq: %d\n",\
pTS->RxIndicateSeq, SeqNum);
prxbIndicateArray[0] = prxb;
// printk("========================>%s(): SeqNum is %d\n",__FUNCTION__,SeqNum);
index = 1;
} else {
/* Current packet is going to be inserted into pending list.*/
//IEEE80211_DEBUG(IEEE80211_DL_REORDER,"%s(): We RX no ordered packed, insert to orderd list\n",__FUNCTION__);
if(!list_empty(&ieee->RxReorder_Unused_List)) {
pReorderEntry = (PRX_REORDER_ENTRY)list_entry(ieee->RxReorder_Unused_List.next,RX_REORDER_ENTRY,List);
list_del_init(&pReorderEntry->List);
/* Make a reorder entry and insert into a the packet list.*/
pReorderEntry->SeqNum = SeqNum;
pReorderEntry->prxb = prxb;
// IEEE80211_DEBUG(IEEE80211_DL_REORDER,"%s(): pREorderEntry->SeqNum is %d\n",__FUNCTION__,pReorderEntry->SeqNum);
if(!AddReorderEntry(pTS, pReorderEntry)) {
IEEE80211_DEBUG(IEEE80211_DL_REORDER, "%s(): Duplicate packet is dropped!! IndicateSeq: %d, NewSeq: %d\n",
__FUNCTION__, pTS->RxIndicateSeq, SeqNum);
list_add_tail(&pReorderEntry->List,&ieee->RxReorder_Unused_List);
{
int i;
for(i =0; i < prxb->nr_subframes; i++) {
dev_kfree_skb(prxb->subframes[i]);
}
kfree(prxb);
prxb = NULL;
}
} else {
IEEE80211_DEBUG(IEEE80211_DL_REORDER,
"Pkt insert into buffer!! IndicateSeq: %d, NewSeq: %d\n",pTS->RxIndicateSeq, SeqNum);
}
}
else {
/*
* Packets are dropped if there is not enough reorder entries.
* This part shall be modified!! We can just indicate all the
* packets in buffer and get reorder entries.
*/
IEEE80211_DEBUG(IEEE80211_DL_ERR, "RxReorderIndicatePacket(): There is no reorder entry!! Packet is dropped!!\n");
{
int i;
for(i =0; i < prxb->nr_subframes; i++) {
dev_kfree_skb(prxb->subframes[i]);
}
kfree(prxb);
prxb = NULL;
}
}
}
/* Check if there is any packet need indicate.*/
while(!list_empty(&pTS->RxPendingPktList)) {
IEEE80211_DEBUG(IEEE80211_DL_REORDER,"%s(): start RREORDER indicate\n",__FUNCTION__);
pReorderEntry = (PRX_REORDER_ENTRY)list_entry(pTS->RxPendingPktList.prev,RX_REORDER_ENTRY,List);
if( SN_LESS(pReorderEntry->SeqNum, pTS->RxIndicateSeq) ||
SN_EQUAL(pReorderEntry->SeqNum, pTS->RxIndicateSeq))
{
/* This protect buffer from overflow. */
if(index >= REORDER_WIN_SIZE) {
IEEE80211_DEBUG(IEEE80211_DL_ERR, "RxReorderIndicatePacket(): Buffer overflow!! \n");
bPktInBuf = true;
break;
}
list_del_init(&pReorderEntry->List);
if(SN_EQUAL(pReorderEntry->SeqNum, pTS->RxIndicateSeq))
pTS->RxIndicateSeq = (pTS->RxIndicateSeq + 1) % 4096;
IEEE80211_DEBUG(IEEE80211_DL_REORDER,"Packets indication!! IndicateSeq: %d, NewSeq: %d\n",pTS->RxIndicateSeq, SeqNum);
prxbIndicateArray[index] = pReorderEntry->prxb;
// printk("========================>%s(): pReorderEntry->SeqNum is %d\n",__FUNCTION__,pReorderEntry->SeqNum);
index++;
list_add_tail(&pReorderEntry->List,&ieee->RxReorder_Unused_List);
} else {
bPktInBuf = true;
break;
}
}
/* Handling pending timer. Set this timer to prevent from long time Rx buffering.*/
if(index>0) {
// Cancel previous pending timer.
// del_timer_sync(&pTS->RxPktPendingTimer);
pTS->RxTimeoutIndicateSeq = 0xffff;
// Indicate packets
if(index>REORDER_WIN_SIZE){
IEEE80211_DEBUG(IEEE80211_DL_ERR, "RxReorderIndicatePacket(): Rx Reorer buffer full!! \n");
return;
}
ieee80211_indicate_packets(ieee, prxbIndicateArray, index);
}
if(bPktInBuf && pTS->RxTimeoutIndicateSeq==0xffff) {
// Set new pending timer.
IEEE80211_DEBUG(IEEE80211_DL_REORDER,"%s(): SET rx timeout timer\n", __FUNCTION__);
pTS->RxTimeoutIndicateSeq = pTS->RxIndicateSeq;
if(timer_pending(&pTS->RxPktPendingTimer))
del_timer_sync(&pTS->RxPktPendingTimer);
pTS->RxPktPendingTimer.expires = jiffies + MSECS(pHTInfo->RxReorderPendingTime);
add_timer(&pTS->RxPktPendingTimer);
}
}
u8 parse_subframe(struct sk_buff *skb,
struct ieee80211_rx_stats *rx_stats,
struct ieee80211_rxb *rxb,u8* src,u8* dst)
{
struct ieee80211_hdr_3addr *hdr = (struct ieee80211_hdr_3addr* )skb->data;
u16 fc = le16_to_cpu(hdr->frame_ctl);
u16 LLCOffset= sizeof(struct ieee80211_hdr_3addr);
u16 ChkLength;
bool bIsAggregateFrame = false;
u16 nSubframe_Length;
u8 nPadding_Length = 0;
u16 SeqNum=0;
struct sk_buff *sub_skb;
u8 *data_ptr;
/* just for debug purpose */
SeqNum = WLAN_GET_SEQ_SEQ(le16_to_cpu(hdr->seq_ctl));
if((IEEE80211_QOS_HAS_SEQ(fc))&&\
(((frameqos *)(skb->data + IEEE80211_3ADDR_LEN))->field.reserved)) {
bIsAggregateFrame = true;
}
if(IEEE80211_QOS_HAS_SEQ(fc)) {
LLCOffset += 2;
}
if(rx_stats->bContainHTC) {
LLCOffset += sHTCLng;
}
//printk("ChkLength = %d\n", LLCOffset);
// Null packet, don't indicate it to upper layer
ChkLength = LLCOffset;/* + (Frame_WEP(frame)!=0 ?Adapter->MgntInfo.SecurityInfo.EncryptionHeadOverhead:0);*/
if( skb->len <= ChkLength ) {
return 0;
}
skb_pull(skb, LLCOffset);
if(!bIsAggregateFrame) {
rxb->nr_subframes = 1;
#ifdef JOHN_NOCPY
rxb->subframes[0] = skb;
#else
rxb->subframes[0] = skb_copy(skb, GFP_ATOMIC);
#endif
memcpy(rxb->src,src,ETH_ALEN);
memcpy(rxb->dst,dst,ETH_ALEN);
//IEEE80211_DEBUG_DATA(IEEE80211_DL_RX,skb->data,skb->len);
return 1;
} else {
rxb->nr_subframes = 0;
memcpy(rxb->src,src,ETH_ALEN);
memcpy(rxb->dst,dst,ETH_ALEN);
while(skb->len > ETHERNET_HEADER_SIZE) {
/* Offset 12 denote 2 mac address */
nSubframe_Length = *((u16*)(skb->data + 12));
//==m==>change the length order
nSubframe_Length = (nSubframe_Length>>8) + (nSubframe_Length<<8);
if(skb->len<(ETHERNET_HEADER_SIZE + nSubframe_Length)) {
printk("%s: A-MSDU parse error!! pRfd->nTotalSubframe : %d\n",\
__FUNCTION__,rxb->nr_subframes);
printk("%s: A-MSDU parse error!! Subframe Length: %d\n",__FUNCTION__, nSubframe_Length);
printk("nRemain_Length is %d and nSubframe_Length is : %d\n",skb->len,nSubframe_Length);
printk("The Packet SeqNum is %d\n",SeqNum);
return 0;
}
/* move the data point to data content */
skb_pull(skb, ETHERNET_HEADER_SIZE);
#ifdef JOHN_NOCPY
sub_skb = skb_clone(skb, GFP_ATOMIC);
sub_skb->len = nSubframe_Length;
sub_skb->tail = sub_skb->data + nSubframe_Length;
#else
/* Allocate new skb for releasing to upper layer */
sub_skb = dev_alloc_skb(nSubframe_Length + 12);
skb_reserve(sub_skb, 12);
data_ptr = (u8 *)skb_put(sub_skb, nSubframe_Length);
memcpy(data_ptr,skb->data,nSubframe_Length);
#endif
rxb->subframes[rxb->nr_subframes++] = sub_skb;
if(rxb->nr_subframes >= MAX_SUBFRAME_COUNT) {
IEEE80211_DEBUG_RX("ParseSubframe(): Too many Subframes! Packets dropped!\n");
break;
}
skb_pull(skb,nSubframe_Length);
if(skb->len != 0) {
nPadding_Length = 4 - ((nSubframe_Length + ETHERNET_HEADER_SIZE) % 4);
if(nPadding_Length == 4) {
nPadding_Length = 0;
}
if(skb->len < nPadding_Length) {
return 0;
}
skb_pull(skb,nPadding_Length);
}
}
#ifdef JOHN_NOCPY
dev_kfree_skb(skb);
#endif
//{just for debug added by david
//printk("AMSDU::rxb->nr_subframes = %d\n",rxb->nr_subframes);
//}
return rxb->nr_subframes;
}
}
/* All received frames are sent to this function. @skb contains the frame in
* IEEE 802.11 format, i.e., in the format it was sent over air.
* This function is called only as a tasklet (software IRQ). */
int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
struct ieee80211_rx_stats *rx_stats)
{
struct net_device *dev = ieee->dev;
struct ieee80211_hdr_4addr *hdr;
//struct ieee80211_hdr_3addrqos *hdr;
size_t hdrlen;
u16 fc, type, stype, sc;
struct net_device_stats *stats;
unsigned int frag;
u8 *payload;
u16 ethertype;
//added by amy for reorder
u8 TID = 0;
u16 SeqNum = 0;
PRX_TS_RECORD pTS = NULL;
//bool bIsAggregateFrame = false;
//added by amy for reorder
#ifdef NOT_YET
struct net_device *wds = NULL;
struct sk_buff *skb2 = NULL;
struct net_device *wds = NULL;
int frame_authorized = 0;
int from_assoc_ap = 0;
void *sta = NULL;
#endif
// u16 qos_ctl = 0;
u8 dst[ETH_ALEN];
u8 src[ETH_ALEN];
u8 bssid[ETH_ALEN];
struct ieee80211_crypt_data *crypt = NULL;
int keyidx = 0;
int i;
struct ieee80211_rxb* rxb = NULL;
// cheat the the hdr type
hdr = (struct ieee80211_hdr_4addr *)skb->data;
stats = &ieee->stats;
if (skb->len < 10) {
printk(KERN_INFO "%s: SKB length < 10\n",
dev->name);
goto rx_dropped;
}
fc = le16_to_cpu(hdr->frame_ctl);
type = WLAN_FC_GET_TYPE(fc);
stype = WLAN_FC_GET_STYPE(fc);
sc = le16_to_cpu(hdr->seq_ctl);
frag = WLAN_GET_SEQ_FRAG(sc);
hdrlen = ieee80211_get_hdrlen(fc);
if(HTCCheck(ieee, skb->data))
{
if(net_ratelimit())
printk("find HTCControl\n");
hdrlen += 4;
rx_stats->bContainHTC = 1;
}
//IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA, skb->data, skb->len);
#ifdef NOT_YET
#if WIRELESS_EXT > 15
/* Put this code here so that we avoid duplicating it in all
* Rx paths. - Jean II */
#ifdef IW_WIRELESS_SPY /* defined in iw_handler.h */
/* If spy monitoring on */
if (iface->spy_data.spy_number > 0) {
struct iw_quality wstats;
wstats.level = rx_stats->rssi;
wstats.noise = rx_stats->noise;
wstats.updated = 6; /* No qual value */
/* Update spy records */
wireless_spy_update(dev, hdr->addr2, &wstats);
}
#endif /* IW_WIRELESS_SPY */
#endif /* WIRELESS_EXT > 15 */
hostap_update_rx_stats(local->ap, hdr, rx_stats);
#endif
#if WIRELESS_EXT > 15
if (ieee->iw_mode == IW_MODE_MONITOR) {
ieee80211_monitor_rx(ieee, skb, rx_stats);
stats->rx_packets++;
stats->rx_bytes += skb->len;
return 1;
}
#endif
if (ieee->host_decrypt) {
int idx = 0;
if (skb->len >= hdrlen + 3)
idx = skb->data[hdrlen + 3] >> 6;
crypt = ieee->crypt[idx];
#ifdef NOT_YET
sta = NULL;
/* Use station specific key to override default keys if the
* receiver address is a unicast address ("individual RA"). If
* bcrx_sta_key parameter is set, station specific key is used
* even with broad/multicast targets (this is against IEEE
* 802.11, but makes it easier to use different keys with
* stations that do not support WEP key mapping). */
if (!(hdr->addr1[0] & 0x01) || local->bcrx_sta_key)
(void) hostap_handle_sta_crypto(local, hdr, &crypt,
&sta);
#endif
/* allow NULL decrypt to indicate an station specific override
* for default encryption */
if (crypt && (crypt->ops == NULL ||
crypt->ops->decrypt_mpdu == NULL))
crypt = NULL;
if (!crypt && (fc & IEEE80211_FCTL_WEP)) {
/* This seems to be triggered by some (multicast?)
* frames from other than current BSS, so just drop the
* frames silently instead of filling system log with
* these reports. */
IEEE80211_DEBUG_DROP("Decryption failed (not set)"
" (SA=%pM)\n",
hdr->addr2);
ieee->ieee_stats.rx_discards_undecryptable++;
goto rx_dropped;
}
}
if (skb->len < IEEE80211_DATA_HDR3_LEN)
goto rx_dropped;
// if QoS enabled, should check the sequence for each of the AC
if( (ieee->pHTInfo->bCurRxReorderEnable == false) || !ieee->current_network.qos_data.active|| !IsDataFrame(skb->data) || IsLegacyDataFrame(skb->data)){
if (is_duplicate_packet(ieee, hdr))
goto rx_dropped;
}
else
{
PRX_TS_RECORD pRxTS = NULL;
//IEEE80211_DEBUG(IEEE80211_DL_REORDER,"%s(): QOS ENABLE AND RECEIVE QOS DATA , we will get Ts, tid:%d\n",__FUNCTION__, tid);
if(GetTs(
ieee,
(PTS_COMMON_INFO*) &pRxTS,
hdr->addr2,
(u8)Frame_QoSTID((u8*)(skb->data)),
RX_DIR,
true))
{
// IEEE80211_DEBUG(IEEE80211_DL_REORDER,"%s(): pRxTS->RxLastFragNum is %d,frag is %d,pRxTS->RxLastSeqNum is %d,seq is %d\n",__FUNCTION__,pRxTS->RxLastFragNum,frag,pRxTS->RxLastSeqNum,WLAN_GET_SEQ_SEQ(sc));
if( (fc & (1<<11)) &&
(frag == pRxTS->RxLastFragNum) &&
(WLAN_GET_SEQ_SEQ(sc) == pRxTS->RxLastSeqNum) )
{
goto rx_dropped;
}
else
{
pRxTS->RxLastFragNum = frag;
pRxTS->RxLastSeqNum = WLAN_GET_SEQ_SEQ(sc);
}
}
else
{
IEEE80211_DEBUG(IEEE80211_DL_ERR, "%s(): No TS!! Skip the check!!\n",__FUNCTION__);
goto rx_dropped;
}
}
if (type == IEEE80211_FTYPE_MGMT) {
//IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA, skb->data, skb->len);
if (ieee80211_rx_frame_mgmt(ieee, skb, rx_stats, type, stype))
goto rx_dropped;
else
goto rx_exit;
}
/* Data frame - extract src/dst addresses */
switch (fc & (IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS)) {
case IEEE80211_FCTL_FROMDS:
memcpy(dst, hdr->addr1, ETH_ALEN);
memcpy(src, hdr->addr3, ETH_ALEN);
memcpy(bssid, hdr->addr2, ETH_ALEN);
break;
case IEEE80211_FCTL_TODS:
memcpy(dst, hdr->addr3, ETH_ALEN);
memcpy(src, hdr->addr2, ETH_ALEN);
memcpy(bssid, hdr->addr1, ETH_ALEN);
break;
case IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS:
if (skb->len < IEEE80211_DATA_HDR4_LEN)
goto rx_dropped;
memcpy(dst, hdr->addr3, ETH_ALEN);
memcpy(src, hdr->addr4, ETH_ALEN);
memcpy(bssid, ieee->current_network.bssid, ETH_ALEN);
break;
case 0:
memcpy(dst, hdr->addr1, ETH_ALEN);
memcpy(src, hdr->addr2, ETH_ALEN);
memcpy(bssid, hdr->addr3, ETH_ALEN);
break;
}
#ifdef NOT_YET
if (hostap_rx_frame_wds(ieee, hdr, fc, &wds))
goto rx_dropped;
if (wds) {
skb->dev = dev = wds;
stats = hostap_get_stats(dev);
}
if (ieee->iw_mode == IW_MODE_MASTER && !wds &&
(fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) == IEEE80211_FCTL_FROMDS &&
ieee->stadev &&
memcmp(hdr->addr2, ieee->assoc_ap_addr, ETH_ALEN) == 0) {
/* Frame from BSSID of the AP for which we are a client */
skb->dev = dev = ieee->stadev;
stats = hostap_get_stats(dev);
from_assoc_ap = 1;
}
#endif
dev->last_rx = jiffies;
#ifdef NOT_YET
if ((ieee->iw_mode == IW_MODE_MASTER ||
ieee->iw_mode == IW_MODE_REPEAT) &&
!from_assoc_ap) {
switch (hostap_handle_sta_rx(ieee, dev, skb, rx_stats,
wds != NULL)) {
case AP_RX_CONTINUE_NOT_AUTHORIZED:
frame_authorized = 0;
break;
case AP_RX_CONTINUE:
frame_authorized = 1;
break;
case AP_RX_DROP:
goto rx_dropped;
case AP_RX_EXIT:
goto rx_exit;
}
}
#endif
//IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA, skb->data, skb->len);
/* Nullfunc frames may have PS-bit set, so they must be passed to
* hostap_handle_sta_rx() before being dropped here. */
if (stype != IEEE80211_STYPE_DATA &&
stype != IEEE80211_STYPE_DATA_CFACK &&
stype != IEEE80211_STYPE_DATA_CFPOLL &&
stype != IEEE80211_STYPE_DATA_CFACKPOLL&&
stype != IEEE80211_STYPE_QOS_DATA//add by David,2006.8.4
) {
if (stype != IEEE80211_STYPE_NULLFUNC)
IEEE80211_DEBUG_DROP(
"RX: dropped data frame "
"with no data (type=0x%02x, "
"subtype=0x%02x, len=%d)\n",
type, stype, skb->len);
goto rx_dropped;
}
if (memcmp(bssid, ieee->current_network.bssid, ETH_ALEN))
goto rx_dropped;
/* skb: hdr + (possibly fragmented, possibly encrypted) payload */
if (ieee->host_decrypt && (fc & IEEE80211_FCTL_WEP) &&
(keyidx = ieee80211_rx_frame_decrypt(ieee, skb, crypt)) < 0)
{
printk("decrypt frame error\n");
goto rx_dropped;
}
hdr = (struct ieee80211_hdr_4addr *) skb->data;
/* skb: hdr + (possibly fragmented) plaintext payload */
// PR: FIXME: hostap has additional conditions in the "if" below:
// ieee->host_decrypt && (fc & IEEE80211_FCTL_WEP) &&
if ((frag != 0 || (fc & IEEE80211_FCTL_MOREFRAGS))) {
int flen;
struct sk_buff *frag_skb = ieee80211_frag_cache_get(ieee, hdr);
IEEE80211_DEBUG_FRAG("Rx Fragment received (%u)\n", frag);
if (!frag_skb) {
IEEE80211_DEBUG(IEEE80211_DL_RX | IEEE80211_DL_FRAG,
"Rx cannot get skb from fragment "
"cache (morefrag=%d seq=%u frag=%u)\n",
(fc & IEEE80211_FCTL_MOREFRAGS) != 0,
WLAN_GET_SEQ_SEQ(sc), frag);
goto rx_dropped;
}
flen = skb->len;
if (frag != 0)
flen -= hdrlen;
if (frag_skb->tail + flen > frag_skb->end) {
printk(KERN_WARNING "%s: host decrypted and "
"reassembled frame did not fit skb\n",
dev->name);
ieee80211_frag_cache_invalidate(ieee, hdr);
goto rx_dropped;
}
if (frag == 0) {
/* copy first fragment (including full headers) into
* beginning of the fragment cache skb */
memcpy(skb_put(frag_skb, flen), skb->data, flen);
} else {
/* append frame payload to the end of the fragment
* cache skb */
memcpy(skb_put(frag_skb, flen), skb->data + hdrlen,
flen);
}
dev_kfree_skb_any(skb);
skb = NULL;
if (fc & IEEE80211_FCTL_MOREFRAGS) {
/* more fragments expected - leave the skb in fragment
* cache for now; it will be delivered to upper layers
* after all fragments have been received */
goto rx_exit;
}
/* this was the last fragment and the frame will be
* delivered, so remove skb from fragment cache */
skb = frag_skb;
hdr = (struct ieee80211_hdr_4addr *) skb->data;
ieee80211_frag_cache_invalidate(ieee, hdr);
}
/* skb: hdr + (possible reassembled) full MSDU payload; possibly still
* encrypted/authenticated */
if (ieee->host_decrypt && (fc & IEEE80211_FCTL_WEP) &&
ieee80211_rx_frame_decrypt_msdu(ieee, skb, keyidx, crypt))
{
printk("==>decrypt msdu error\n");
goto rx_dropped;
}
//added by amy for AP roaming
ieee->LinkDetectInfo.NumRecvDataInPeriod++;
ieee->LinkDetectInfo.NumRxOkInPeriod++;
hdr = (struct ieee80211_hdr_4addr *) skb->data;
if (crypt && !(fc & IEEE80211_FCTL_WEP) && !ieee->open_wep) {
if (/*ieee->ieee802_1x &&*/
ieee80211_is_eapol_frame(ieee, skb, hdrlen)) {
#ifdef CONFIG_IEEE80211_DEBUG
/* pass unencrypted EAPOL frames even if encryption is
* configured */
struct eapol *eap = (struct eapol *)(skb->data +
24);
IEEE80211_DEBUG_EAP("RX: IEEE 802.1X EAPOL frame: %s\n",
eap_get_type(eap->type));
#endif
} else {
IEEE80211_DEBUG_DROP(
"encryption configured, but RX "
"frame not encrypted (SA=%pM)\n",
hdr->addr2);
goto rx_dropped;
}
}
#ifdef CONFIG_IEEE80211_DEBUG
if (crypt && !(fc & IEEE80211_FCTL_WEP) &&
ieee80211_is_eapol_frame(ieee, skb, hdrlen)) {
struct eapol *eap = (struct eapol *)(skb->data +
24);
IEEE80211_DEBUG_EAP("RX: IEEE 802.1X EAPOL frame: %s\n",
eap_get_type(eap->type));
}
#endif
if (crypt && !(fc & IEEE80211_FCTL_WEP) && !ieee->open_wep &&
!ieee80211_is_eapol_frame(ieee, skb, hdrlen)) {
IEEE80211_DEBUG_DROP(
"dropped unencrypted RX data "
"frame from %pM"
" (drop_unencrypted=1)\n",
hdr->addr2);
goto rx_dropped;
}
/*
if(ieee80211_is_eapol_frame(ieee, skb, hdrlen)) {
printk(KERN_WARNING "RX: IEEE802.1X EPAOL frame!\n");
}
*/
//added by amy for reorder
if(ieee->current_network.qos_data.active && IsQoSDataFrame(skb->data)
&& !is_multicast_ether_addr(hdr->addr1) && !is_broadcast_ether_addr(hdr->addr1))
{
TID = Frame_QoSTID(skb->data);
SeqNum = WLAN_GET_SEQ_SEQ(sc);
GetTs(ieee,(PTS_COMMON_INFO*) &pTS,hdr->addr2,TID,RX_DIR,true);
if(TID !=0 && TID !=3)
{
ieee->bis_any_nonbepkts = true;
}
}
//added by amy for reorder
/* skb: hdr + (possible reassembled) full plaintext payload */
payload = skb->data + hdrlen;
//ethertype = (payload[6] << 8) | payload[7];
rxb = kmalloc(sizeof(struct ieee80211_rxb), GFP_ATOMIC);
if(rxb == NULL)
{
IEEE80211_DEBUG(IEEE80211_DL_ERR,"%s(): kmalloc rxb error\n",__FUNCTION__);
goto rx_dropped;
}
/* to parse amsdu packets */
/* qos data packets & reserved bit is 1 */
if(parse_subframe(skb,rx_stats,rxb,src,dst) == 0) {
/* only to free rxb, and not submit the packets to upper layer */
for(i =0; i < rxb->nr_subframes; i++) {
dev_kfree_skb(rxb->subframes[i]);
}
kfree(rxb);
rxb = NULL;
goto rx_dropped;
}
//added by amy for reorder
if(ieee->pHTInfo->bCurRxReorderEnable == false ||pTS == NULL){
//added by amy for reorder
for(i = 0; i<rxb->nr_subframes; i++) {
struct sk_buff *sub_skb = rxb->subframes[i];
if (sub_skb) {
/* convert hdr + possible LLC headers into Ethernet header */
ethertype = (sub_skb->data[6] << 8) | sub_skb->data[7];
if (sub_skb->len >= 8 &&
((memcmp(sub_skb->data, rfc1042_header, SNAP_SIZE) == 0 &&
ethertype != ETH_P_AARP && ethertype != ETH_P_IPX) ||
memcmp(sub_skb->data, bridge_tunnel_header, SNAP_SIZE) == 0)) {
/* remove RFC1042 or Bridge-Tunnel encapsulation and
* replace EtherType */
skb_pull(sub_skb, SNAP_SIZE);
memcpy(skb_push(sub_skb, ETH_ALEN), src, ETH_ALEN);
memcpy(skb_push(sub_skb, ETH_ALEN), dst, ETH_ALEN);
} else {
u16 len;
/* Leave Ethernet header part of hdr and full payload */
len = htons(sub_skb->len);
memcpy(skb_push(sub_skb, 2), &len, 2);
memcpy(skb_push(sub_skb, ETH_ALEN), src, ETH_ALEN);
memcpy(skb_push(sub_skb, ETH_ALEN), dst, ETH_ALEN);
}
stats->rx_packets++;
stats->rx_bytes += sub_skb->len;
if(is_multicast_ether_addr(dst)) {
stats->multicast++;
}
/* Indicat the packets to upper layer */
//printk("0skb_len(%d)\n", skb->len);
sub_skb->protocol = eth_type_trans(sub_skb, dev);
memset(sub_skb->cb, 0, sizeof(sub_skb->cb));
sub_skb->dev = dev;
sub_skb->ip_summed = CHECKSUM_NONE; /* 802.11 crc not sufficient */
//skb->ip_summed = CHECKSUM_UNNECESSARY; /* 802.11 crc not sufficient */
ieee->last_rx_ps_time = jiffies;
//printk("1skb_len(%d)\n", skb->len);
netif_rx(sub_skb);
}
}
kfree(rxb);
rxb = NULL;
}
else
{
IEEE80211_DEBUG(IEEE80211_DL_REORDER,"%s(): REORDER ENABLE AND PTS not NULL, and we will enter RxReorderIndicatePacket()\n",__FUNCTION__);
RxReorderIndicatePacket(ieee, rxb, pTS, SeqNum);
}
#ifndef JOHN_NOCPY
dev_kfree_skb(skb);
#endif
rx_exit:
#ifdef NOT_YET
if (sta)
hostap_handle_sta_release(sta);
#endif
return 1;
rx_dropped:
kfree(rxb);
rxb = NULL;
stats->rx_dropped++;
/* Returning 0 indicates to caller that we have not handled the SKB--
* so it is still allocated and can be used again by underlying
* hardware as a DMA target */
return 0;
}
#define MGMT_FRAME_FIXED_PART_LENGTH 0x24
static u8 qos_oui[QOS_OUI_LEN] = { 0x00, 0x50, 0xF2 };
/*
* Make the structure we read from the beacon packet to have
* the right values
*/
static int ieee80211_verify_qos_info(struct ieee80211_qos_information_element
*info_element, int sub_type)
{
if (info_element->qui_subtype != sub_type)
return -1;
if (memcmp(info_element->qui, qos_oui, QOS_OUI_LEN))
return -1;
if (info_element->qui_type != QOS_OUI_TYPE)
return -1;
if (info_element->version != QOS_VERSION_1)
return -1;
return 0;
}
/*
* Parse a QoS parameter element
*/
static int ieee80211_read_qos_param_element(struct ieee80211_qos_parameter_info
*element_param, struct ieee80211_info_element
*info_element)
{
int ret = 0;
u16 size = sizeof(struct ieee80211_qos_parameter_info) - 2;
if ((info_element == NULL) || (element_param == NULL))
return -1;
if (info_element->id == QOS_ELEMENT_ID && info_element->len == size) {
memcpy(element_param->info_element.qui, info_element->data,
info_element->len);
element_param->info_element.elementID = info_element->id;
element_param->info_element.length = info_element->len;
} else
ret = -1;
if (ret == 0)
ret = ieee80211_verify_qos_info(&element_param->info_element,
QOS_OUI_PARAM_SUB_TYPE);
return ret;
}
/*
* Parse a QoS information element
*/
static int ieee80211_read_qos_info_element(struct
ieee80211_qos_information_element
*element_info, struct ieee80211_info_element
*info_element)
{
int ret = 0;
u16 size = sizeof(struct ieee80211_qos_information_element) - 2;
if (element_info == NULL)
return -1;
if (info_element == NULL)
return -1;
if ((info_element->id == QOS_ELEMENT_ID) && (info_element->len == size)) {
memcpy(element_info->qui, info_element->data,
info_element->len);
element_info->elementID = info_element->id;
element_info->length = info_element->len;
} else
ret = -1;
if (ret == 0)
ret = ieee80211_verify_qos_info(element_info,
QOS_OUI_INFO_SUB_TYPE);
return ret;
}
/*
* Write QoS parameters from the ac parameters.
*/
static int ieee80211_qos_convert_ac_to_parameters(struct
ieee80211_qos_parameter_info
*param_elm, struct
ieee80211_qos_parameters
*qos_param)
{
int rc = 0;
int i;
struct ieee80211_qos_ac_parameter *ac_params;
u8 aci;
//u8 cw_min;
//u8 cw_max;
for (i = 0; i < QOS_QUEUE_NUM; i++) {
ac_params = &(param_elm->ac_params_record[i]);
aci = (ac_params->aci_aifsn & 0x60) >> 5;
if(aci >= QOS_QUEUE_NUM)
continue;
qos_param->aifs[aci] = (ac_params->aci_aifsn) & 0x0f;
/* WMM spec P.11: The minimum value for AIFSN shall be 2 */
qos_param->aifs[aci] = (qos_param->aifs[aci] < 2) ? 2:qos_param->aifs[aci];
qos_param->cw_min[aci] = ac_params->ecw_min_max & 0x0F;
qos_param->cw_max[aci] = (ac_params->ecw_min_max & 0xF0) >> 4;
qos_param->flag[aci] =
(ac_params->aci_aifsn & 0x10) ? 0x01 : 0x00;
qos_param->tx_op_limit[aci] = le16_to_cpu(ac_params->tx_op_limit);
}
return rc;
}
/*
* we have a generic data element which it may contain QoS information or
* parameters element. check the information element length to decide
* which type to read
*/
static int ieee80211_parse_qos_info_param_IE(struct ieee80211_info_element
*info_element,
struct ieee80211_network *network)
{
int rc = 0;
struct ieee80211_qos_parameters *qos_param = NULL;
struct ieee80211_qos_information_element qos_info_element;
rc = ieee80211_read_qos_info_element(&qos_info_element, info_element);
if (rc == 0) {
network->qos_data.param_count = qos_info_element.ac_info & 0x0F;
network->flags |= NETWORK_HAS_QOS_INFORMATION;
} else {
struct ieee80211_qos_parameter_info param_element;
rc = ieee80211_read_qos_param_element(¶m_element,
info_element);
if (rc == 0) {
qos_param = &(network->qos_data.parameters);
ieee80211_qos_convert_ac_to_parameters(¶m_element,
qos_param);
network->flags |= NETWORK_HAS_QOS_PARAMETERS;
network->qos_data.param_count =
param_element.info_element.ac_info & 0x0F;
}
}
if (rc == 0) {
IEEE80211_DEBUG_QOS("QoS is supported\n");
network->qos_data.supported = 1;
}
return rc;
}
#ifdef CONFIG_IEEE80211_DEBUG
#define MFIE_STRING(x) case MFIE_TYPE_ ##x: return #x
static const char *get_info_element_string(u16 id)
{
switch (id) {
MFIE_STRING(SSID);
MFIE_STRING(RATES);
MFIE_STRING(FH_SET);
MFIE_STRING(DS_SET);
MFIE_STRING(CF_SET);
MFIE_STRING(TIM);
MFIE_STRING(IBSS_SET);
MFIE_STRING(COUNTRY);
MFIE_STRING(HOP_PARAMS);
MFIE_STRING(HOP_TABLE);
MFIE_STRING(REQUEST);
MFIE_STRING(CHALLENGE);
MFIE_STRING(POWER_CONSTRAINT);
MFIE_STRING(POWER_CAPABILITY);
MFIE_STRING(TPC_REQUEST);
MFIE_STRING(TPC_REPORT);
MFIE_STRING(SUPP_CHANNELS);
MFIE_STRING(CSA);
MFIE_STRING(MEASURE_REQUEST);
MFIE_STRING(MEASURE_REPORT);
MFIE_STRING(QUIET);
MFIE_STRING(IBSS_DFS);
// MFIE_STRING(ERP_INFO);
MFIE_STRING(RSN);
MFIE_STRING(RATES_EX);
MFIE_STRING(GENERIC);
MFIE_STRING(QOS_PARAMETER);
default:
return "UNKNOWN";
}
}
#endif
static inline void ieee80211_extract_country_ie(
struct ieee80211_device *ieee,
struct ieee80211_info_element *info_element,
struct ieee80211_network *network,
u8 * addr2
)
{
if(IS_DOT11D_ENABLE(ieee))
{
if(info_element->len!= 0)
{
memcpy(network->CountryIeBuf, info_element->data, info_element->len);
network->CountryIeLen = info_element->len;
if(!IS_COUNTRY_IE_VALID(ieee))
{
Dot11d_UpdateCountryIe(ieee, addr2, info_element->len, info_element->data);
}
}
//
// 070305, rcnjko: I update country IE watch dog here because
// some AP (e.g. Cisco 1242) don't include country IE in their
// probe response frame.
//
if(IS_EQUAL_CIE_SRC(ieee, addr2) )
{
UPDATE_CIE_WATCHDOG(ieee);
}
}
}
int ieee80211_parse_info_param(struct ieee80211_device *ieee,
struct ieee80211_info_element *info_element,
u16 length,
struct ieee80211_network *network,
struct ieee80211_rx_stats *stats)
{
u8 i;
short offset;
u16 tmp_htcap_len=0;
u16 tmp_htinfo_len=0;
u16 ht_realtek_agg_len=0;
u8 ht_realtek_agg_buf[MAX_IE_LEN];
// u16 broadcom_len = 0;
#ifdef CONFIG_IEEE80211_DEBUG
char rates_str[64];
char *p;
#endif
while (length >= sizeof(*info_element)) {
if (sizeof(*info_element) + info_element->len > length) {
IEEE80211_DEBUG_MGMT("Info elem: parse failed: "
"info_element->len + 2 > left : "
"info_element->len+2=%zd left=%d, id=%d.\n",
info_element->len +
sizeof(*info_element),
length, info_element->id);
/* We stop processing but don't return an error here
* because some misbehaviour APs break this rule. ie.
* Orinoco AP1000. */
break;
}
switch (info_element->id) {
case MFIE_TYPE_SSID:
if (ieee80211_is_empty_essid(info_element->data,
info_element->len)) {
network->flags |= NETWORK_EMPTY_ESSID;
break;
}
network->ssid_len = min(info_element->len,
(u8) IW_ESSID_MAX_SIZE);
memcpy(network->ssid, info_element->data, network->ssid_len);
if (network->ssid_len < IW_ESSID_MAX_SIZE)
memset(network->ssid + network->ssid_len, 0,
IW_ESSID_MAX_SIZE - network->ssid_len);
IEEE80211_DEBUG_MGMT("MFIE_TYPE_SSID: '%s' len=%d.\n",
network->ssid, network->ssid_len);
break;
case MFIE_TYPE_RATES:
#ifdef CONFIG_IEEE80211_DEBUG
p = rates_str;
#endif
network->rates_len = min(info_element->len,
MAX_RATES_LENGTH);
for (i = 0; i < network->rates_len; i++) {
network->rates[i] = info_element->data[i];
#ifdef CONFIG_IEEE80211_DEBUG
p += snprintf(p, sizeof(rates_str) -
(p - rates_str), "%02X ",
network->rates[i]);
#endif
if (ieee80211_is_ofdm_rate
(info_element->data[i])) {
network->flags |= NETWORK_HAS_OFDM;
if (info_element->data[i] &
IEEE80211_BASIC_RATE_MASK)
network->flags &=
~NETWORK_HAS_CCK;
}
}
IEEE80211_DEBUG_MGMT("MFIE_TYPE_RATES: '%s' (%d)\n",
rates_str, network->rates_len);
break;
case MFIE_TYPE_RATES_EX:
#ifdef CONFIG_IEEE80211_DEBUG
p = rates_str;
#endif
network->rates_ex_len = min(info_element->len,
MAX_RATES_EX_LENGTH);
for (i = 0; i < network->rates_ex_len; i++) {
network->rates_ex[i] = info_element->data[i];
#ifdef CONFIG_IEEE80211_DEBUG
p += snprintf(p, sizeof(rates_str) -
(p - rates_str), "%02X ",
network->rates[i]);
#endif
if (ieee80211_is_ofdm_rate
(info_element->data[i])) {
network->flags |= NETWORK_HAS_OFDM;
if (info_element->data[i] &
IEEE80211_BASIC_RATE_MASK)
network->flags &=
~NETWORK_HAS_CCK;
}
}
IEEE80211_DEBUG_MGMT("MFIE_TYPE_RATES_EX: '%s' (%d)\n",
rates_str, network->rates_ex_len);
break;
case MFIE_TYPE_DS_SET:
IEEE80211_DEBUG_MGMT("MFIE_TYPE_DS_SET: %d\n",
info_element->data[0]);
network->channel = info_element->data[0];
break;
case MFIE_TYPE_FH_SET:
IEEE80211_DEBUG_MGMT("MFIE_TYPE_FH_SET: ignored\n");
break;
case MFIE_TYPE_CF_SET:
IEEE80211_DEBUG_MGMT("MFIE_TYPE_CF_SET: ignored\n");
break;
case MFIE_TYPE_TIM:
if(info_element->len < 4)
break;
network->tim.tim_count = info_element->data[0];
network->tim.tim_period = info_element->data[1];
network->dtim_period = info_element->data[1];
if(ieee->state != IEEE80211_LINKED)
break;
network->last_dtim_sta_time[0] = stats->mac_time[0];
network->last_dtim_sta_time[1] = stats->mac_time[1];
network->dtim_data = IEEE80211_DTIM_VALID;
if(info_element->data[0] != 0)
break;
if(info_element->data[2] & 1)
network->dtim_data |= IEEE80211_DTIM_MBCAST;
offset = (info_element->data[2] >> 1)*2;
//printk("offset1:%x aid:%x\n",offset, ieee->assoc_id);
if(ieee->assoc_id < 8*offset ||
ieee->assoc_id > 8*(offset + info_element->len -3))
break;
offset = (ieee->assoc_id / 8) - offset;// + ((aid % 8)? 0 : 1) ;
if(info_element->data[3+offset] & (1<<(ieee->assoc_id%8)))
network->dtim_data |= IEEE80211_DTIM_UCAST;
//IEEE80211_DEBUG_MGMT("MFIE_TYPE_TIM: partially ignored\n");
break;
case MFIE_TYPE_ERP:
network->erp_value = info_element->data[0];
network->flags |= NETWORK_HAS_ERP_VALUE;
IEEE80211_DEBUG_MGMT("MFIE_TYPE_ERP_SET: %d\n",
network->erp_value);
break;
case MFIE_TYPE_IBSS_SET:
network->atim_window = info_element->data[0];
IEEE80211_DEBUG_MGMT("MFIE_TYPE_IBSS_SET: %d\n",
network->atim_window);
break;
case MFIE_TYPE_CHALLENGE:
IEEE80211_DEBUG_MGMT("MFIE_TYPE_CHALLENGE: ignored\n");
break;
case MFIE_TYPE_GENERIC:
IEEE80211_DEBUG_MGMT("MFIE_TYPE_GENERIC: %d bytes\n",
info_element->len);
if (!ieee80211_parse_qos_info_param_IE(info_element,
network))
break;
if (info_element->len >= 4 &&
info_element->data[0] == 0x00 &&
info_element->data[1] == 0x50 &&
info_element->data[2] == 0xf2 &&
info_element->data[3] == 0x01) {
network->wpa_ie_len = min(info_element->len + 2,
MAX_WPA_IE_LEN);
memcpy(network->wpa_ie, info_element,
network->wpa_ie_len);
break;
}
#ifdef THOMAS_TURBO
if (info_element->len == 7 &&
info_element->data[0] == 0x00 &&
info_element->data[1] == 0xe0 &&
info_element->data[2] == 0x4c &&
info_element->data[3] == 0x01 &&
info_element->data[4] == 0x02) {
network->Turbo_Enable = 1;
}
#endif
//for HTcap and HTinfo parameters
if(tmp_htcap_len == 0){
if(info_element->len >= 4 &&
info_element->data[0] == 0x00 &&
info_element->data[1] == 0x90 &&
info_element->data[2] == 0x4c &&
info_element->data[3] == 0x033){
tmp_htcap_len = min(info_element->len,(u8)MAX_IE_LEN);
if(tmp_htcap_len != 0){
network->bssht.bdHTSpecVer = HT_SPEC_VER_EWC;
network->bssht.bdHTCapLen = tmp_htcap_len > sizeof(network->bssht.bdHTCapBuf)?\
sizeof(network->bssht.bdHTCapBuf):tmp_htcap_len;
memcpy(network->bssht.bdHTCapBuf,info_element->data,network->bssht.bdHTCapLen);
}
}
if(tmp_htcap_len != 0)
network->bssht.bdSupportHT = true;
else
network->bssht.bdSupportHT = false;
}
if(tmp_htinfo_len == 0){
if(info_element->len >= 4 &&
info_element->data[0] == 0x00 &&
info_element->data[1] == 0x90 &&
info_element->data[2] == 0x4c &&
info_element->data[3] == 0x034){
tmp_htinfo_len = min(info_element->len,(u8)MAX_IE_LEN);
if(tmp_htinfo_len != 0){
network->bssht.bdHTSpecVer = HT_SPEC_VER_EWC;
if(tmp_htinfo_len){
network->bssht.bdHTInfoLen = tmp_htinfo_len > sizeof(network->bssht.bdHTInfoBuf)?\
sizeof(network->bssht.bdHTInfoBuf):tmp_htinfo_len;
memcpy(network->bssht.bdHTInfoBuf,info_element->data,network->bssht.bdHTInfoLen);
}
}
}
}
if(ieee->aggregation){
if(network->bssht.bdSupportHT){
if(info_element->len >= 4 &&
info_element->data[0] == 0x00 &&
info_element->data[1] == 0xe0 &&
info_element->data[2] == 0x4c &&
info_element->data[3] == 0x02){
ht_realtek_agg_len = min(info_element->len,(u8)MAX_IE_LEN);
memcpy(ht_realtek_agg_buf,info_element->data,info_element->len);
}
if(ht_realtek_agg_len >= 5){
network->bssht.bdRT2RTAggregation = true;
if((ht_realtek_agg_buf[4] == 1) && (ht_realtek_agg_buf[5] & 0x02))
network->bssht.bdRT2RTLongSlotTime = true;
}
}
}
//if(tmp_htcap_len !=0 || tmp_htinfo_len != 0)
{
if((info_element->len >= 3 &&
info_element->data[0] == 0x00 &&
info_element->data[1] == 0x05 &&
info_element->data[2] == 0xb5) ||
(info_element->len >= 3 &&
info_element->data[0] == 0x00 &&
info_element->data[1] == 0x0a &&
info_element->data[2] == 0xf7) ||
(info_element->len >= 3 &&
info_element->data[0] == 0x00 &&
info_element->data[1] == 0x10 &&
info_element->data[2] == 0x18)){
network->broadcom_cap_exist = true;
}
}
if(info_element->len >= 3 &&
info_element->data[0] == 0x00 &&
info_element->data[1] == 0x0c &&
info_element->data[2] == 0x43)
{
network->ralink_cap_exist = true;
}
else
network->ralink_cap_exist = false;
//added by amy for atheros AP
if((info_element->len >= 3 &&
info_element->data[0] == 0x00 &&
info_element->data[1] == 0x03 &&
info_element->data[2] == 0x7f) ||
(info_element->len >= 3 &&
info_element->data[0] == 0x00 &&
info_element->data[1] == 0x13 &&
info_element->data[2] == 0x74))
{
printk("========>%s(): athros AP is exist\n",__FUNCTION__);
network->atheros_cap_exist = true;
}
else
network->atheros_cap_exist = false;
if(info_element->len >= 3 &&
info_element->data[0] == 0x00 &&
info_element->data[1] == 0x40 &&
info_element->data[2] == 0x96)
{
network->cisco_cap_exist = true;
}
else
network->cisco_cap_exist = false;
//added by amy for LEAP of cisco
if(info_element->len > 4 &&
info_element->data[0] == 0x00 &&
info_element->data[1] == 0x40 &&
info_element->data[2] == 0x96 &&
info_element->data[3] == 0x01)
{
if(info_element->len == 6)
{
memcpy(network->CcxRmState, &info_element[4], 2);
if(network->CcxRmState[0] != 0)
{
network->bCcxRmEnable = true;
}
else
network->bCcxRmEnable = false;
//
// CCXv4 Table 59-1 MBSSID Masks.
//
network->MBssidMask = network->CcxRmState[1] & 0x07;
if(network->MBssidMask != 0)
{
network->bMBssidValid = true;
network->MBssidMask = 0xff << (network->MBssidMask);
cpMacAddr(network->MBssid, network->bssid);
network->MBssid[5] &= network->MBssidMask;
}
else
{
network->bMBssidValid = false;
}
}
else
{
network->bCcxRmEnable = false;
}
}
if(info_element->len > 4 &&
info_element->data[0] == 0x00 &&
info_element->data[1] == 0x40 &&
info_element->data[2] == 0x96 &&
info_element->data[3] == 0x03)
{
if(info_element->len == 5)
{
network->bWithCcxVerNum = true;
network->BssCcxVerNumber = info_element->data[4];
}
else
{
network->bWithCcxVerNum = false;
network->BssCcxVerNumber = 0;
}
}
break;
case MFIE_TYPE_RSN:
IEEE80211_DEBUG_MGMT("MFIE_TYPE_RSN: %d bytes\n",
info_element->len);
network->rsn_ie_len = min(info_element->len + 2,
MAX_WPA_IE_LEN);
memcpy(network->rsn_ie, info_element,
network->rsn_ie_len);
break;
//HT related element.
case MFIE_TYPE_HT_CAP:
IEEE80211_DEBUG_SCAN("MFIE_TYPE_HT_CAP: %d bytes\n",
info_element->len);
tmp_htcap_len = min(info_element->len,(u8)MAX_IE_LEN);
if(tmp_htcap_len != 0){
network->bssht.bdHTSpecVer = HT_SPEC_VER_EWC;
network->bssht.bdHTCapLen = tmp_htcap_len > sizeof(network->bssht.bdHTCapBuf)?\
sizeof(network->bssht.bdHTCapBuf):tmp_htcap_len;
memcpy(network->bssht.bdHTCapBuf,info_element->data,network->bssht.bdHTCapLen);
//If peer is HT, but not WMM, call QosSetLegacyWMMParamWithHT()
// windows driver will update WMM parameters each beacon received once connected
// Linux driver is a bit different.
network->bssht.bdSupportHT = true;
}
else
network->bssht.bdSupportHT = false;
break;
case MFIE_TYPE_HT_INFO:
IEEE80211_DEBUG_SCAN("MFIE_TYPE_HT_INFO: %d bytes\n",
info_element->len);
tmp_htinfo_len = min(info_element->len,(u8)MAX_IE_LEN);
if(tmp_htinfo_len){
network->bssht.bdHTSpecVer = HT_SPEC_VER_IEEE;
network->bssht.bdHTInfoLen = tmp_htinfo_len > sizeof(network->bssht.bdHTInfoBuf)?\
sizeof(network->bssht.bdHTInfoBuf):tmp_htinfo_len;
memcpy(network->bssht.bdHTInfoBuf,info_element->data,network->bssht.bdHTInfoLen);
}
break;
case MFIE_TYPE_AIRONET:
IEEE80211_DEBUG_SCAN("MFIE_TYPE_AIRONET: %d bytes\n",
info_element->len);
if(info_element->len >IE_CISCO_FLAG_POSITION)
{
network->bWithAironetIE = true;
// CCX 1 spec v1.13, A01.1 CKIP Negotiation (page23):
// "A Cisco access point advertises support for CKIP in beacon and probe response packets,
// by adding an Aironet element and setting one or both of the CKIP negotiation bits."
if( (info_element->data[IE_CISCO_FLAG_POSITION]&SUPPORT_CKIP_MIC) ||
(info_element->data[IE_CISCO_FLAG_POSITION]&SUPPORT_CKIP_PK) )
{
network->bCkipSupported = true;
}
else
{
network->bCkipSupported = false;
}
}
else
{
network->bWithAironetIE = false;
network->bCkipSupported = false;
}
break;
case MFIE_TYPE_QOS_PARAMETER:
printk(KERN_ERR
"QoS Error need to parse QOS_PARAMETER IE\n");
break;
case MFIE_TYPE_COUNTRY:
IEEE80211_DEBUG_SCAN("MFIE_TYPE_COUNTRY: %d bytes\n",
info_element->len);
//printk("=====>Receive <%s> Country IE\n",network->ssid);
ieee80211_extract_country_ie(ieee, info_element, network, network->bssid);//addr2 is same as addr3 when from an AP
break;
/* TODO */
default:
IEEE80211_DEBUG_MGMT
("Unsupported info element: %s (%d)\n",
get_info_element_string(info_element->id),
info_element->id);
break;
}
length -= sizeof(*info_element) + info_element->len;
info_element =
(struct ieee80211_info_element *)&info_element->
data[info_element->len];
}
if(!network->atheros_cap_exist && !network->broadcom_cap_exist &&
!network->cisco_cap_exist && !network->ralink_cap_exist && !network->bssht.bdRT2RTAggregation)
{
network->unknown_cap_exist = true;
}
else
{
network->unknown_cap_exist = false;
}
return 0;
}
static inline u8 ieee80211_SignalStrengthTranslate(
u8 CurrSS
)
{
u8 RetSS;
// Step 1. Scale mapping.
if(CurrSS >= 71 && CurrSS <= 100)
{
RetSS = 90 + ((CurrSS - 70) / 3);
}
else if(CurrSS >= 41 && CurrSS <= 70)
{
RetSS = 78 + ((CurrSS - 40) / 3);
}
else if(CurrSS >= 31 && CurrSS <= 40)
{
RetSS = 66 + (CurrSS - 30);
}
else if(CurrSS >= 21 && CurrSS <= 30)
{
RetSS = 54 + (CurrSS - 20);
}
else if(CurrSS >= 5 && CurrSS <= 20)
{
RetSS = 42 + (((CurrSS - 5) * 2) / 3);
}
else if(CurrSS == 4)
{
RetSS = 36;
}
else if(CurrSS == 3)
{
RetSS = 27;
}
else if(CurrSS == 2)
{
RetSS = 18;
}
else if(CurrSS == 1)
{
RetSS = 9;
}
else
{
RetSS = CurrSS;
}
//RT_TRACE(COMP_DBG, DBG_LOUD, ("##### After Mapping: LastSS: %d, CurrSS: %d, RetSS: %d\n", LastSS, CurrSS, RetSS));
// Step 2. Smoothing.
//RT_TRACE(COMP_DBG, DBG_LOUD, ("$$$$$ After Smoothing: LastSS: %d, CurrSS: %d, RetSS: %d\n", LastSS, CurrSS, RetSS));
return RetSS;
}
long ieee80211_translate_todbm(u8 signal_strength_index )// 0-100 index.
{
long signal_power; // in dBm.
// Translate to dBm (x=0.5y-95).
signal_power = (long)((signal_strength_index + 1) >> 1);
signal_power -= 95;
return signal_power;
}
static inline int ieee80211_network_init(
struct ieee80211_device *ieee,
struct ieee80211_probe_response *beacon,
struct ieee80211_network *network,
struct ieee80211_rx_stats *stats)
{
#ifdef CONFIG_IEEE80211_DEBUG
//char rates_str[64];
//char *p;
#endif
network->qos_data.active = 0;
network->qos_data.supported = 0;
network->qos_data.param_count = 0;
network->qos_data.old_param_count = 0;
/* Pull out fixed field data */
memcpy(network->bssid, beacon->header.addr3, ETH_ALEN);
network->capability = le16_to_cpu(beacon->capability);
network->last_scanned = jiffies;
network->time_stamp[0] = le32_to_cpu(beacon->time_stamp[0]);
network->time_stamp[1] = le32_to_cpu(beacon->time_stamp[1]);
network->beacon_interval = le32_to_cpu(beacon->beacon_interval);
/* Where to pull this? beacon->listen_interval;*/
network->listen_interval = 0x0A;
network->rates_len = network->rates_ex_len = 0;
network->last_associate = 0;
network->ssid_len = 0;
network->flags = 0;
network->atim_window = 0;
network->erp_value = (network->capability & WLAN_CAPABILITY_IBSS) ?
0x3 : 0x0;
network->berp_info_valid = false;
network->broadcom_cap_exist = false;
network->ralink_cap_exist = false;
network->atheros_cap_exist = false;
network->cisco_cap_exist = false;
network->unknown_cap_exist = false;
#ifdef THOMAS_TURBO
network->Turbo_Enable = 0;
#endif
network->CountryIeLen = 0;
memset(network->CountryIeBuf, 0, MAX_IE_LEN);
//Initialize HT parameters
//ieee80211_ht_initialize(&network->bssht);
HTInitializeBssDesc(&network->bssht);
if (stats->freq == IEEE80211_52GHZ_BAND) {
/* for A band (No DS info) */
network->channel = stats->received_channel;
} else
network->flags |= NETWORK_HAS_CCK;
network->wpa_ie_len = 0;
network->rsn_ie_len = 0;
if (ieee80211_parse_info_param
(ieee,beacon->info_element, stats->len - sizeof(*beacon), network, stats))
return 1;
network->mode = 0;
if (stats->freq == IEEE80211_52GHZ_BAND)
network->mode = IEEE_A;
else {
if (network->flags & NETWORK_HAS_OFDM)
network->mode |= IEEE_G;
if (network->flags & NETWORK_HAS_CCK)
network->mode |= IEEE_B;
}
if (network->mode == 0) {
IEEE80211_DEBUG_SCAN("Filtered out '%s (%pM)' "
"network.\n",
escape_essid(network->ssid,
network->ssid_len),
network->bssid);
return 1;
}
if(network->bssht.bdSupportHT){
if(network->mode == IEEE_A)
network->mode = IEEE_N_5G;
else if(network->mode & (IEEE_G | IEEE_B))
network->mode = IEEE_N_24G;
}
if (ieee80211_is_empty_essid(network->ssid, network->ssid_len))
network->flags |= NETWORK_EMPTY_ESSID;
stats->signal = 30 + (stats->SignalStrength * 70) / 100;
//stats->signal = ieee80211_SignalStrengthTranslate(stats->signal);
stats->noise = ieee80211_translate_todbm((u8)(100-stats->signal)) -25;
memcpy(&network->stats, stats, sizeof(network->stats));
return 0;
}
static inline int is_same_network(struct ieee80211_network *src,
struct ieee80211_network *dst, struct ieee80211_device* ieee)
{
/* A network is only a duplicate if the channel, BSSID, ESSID
* and the capability field (in particular IBSS and BSS) all match.
* We treat all <hidden> with the same BSSID and channel
* as one network */
return //((src->ssid_len == dst->ssid_len) &&
(((src->ssid_len == dst->ssid_len) || (ieee->iw_mode == IW_MODE_INFRA)) &&
(src->channel == dst->channel) &&
!memcmp(src->bssid, dst->bssid, ETH_ALEN) &&
//!memcmp(src->ssid, dst->ssid, src->ssid_len) &&
(!memcmp(src->ssid, dst->ssid, src->ssid_len) || (ieee->iw_mode == IW_MODE_INFRA)) &&
((src->capability & WLAN_CAPABILITY_IBSS) ==
(dst->capability & WLAN_CAPABILITY_IBSS)) &&
((src->capability & WLAN_CAPABILITY_BSS) ==
(dst->capability & WLAN_CAPABILITY_BSS)));
}
static inline void update_network(struct ieee80211_network *dst,
struct ieee80211_network *src)
{
int qos_active;
u8 old_param;
memcpy(&dst->stats, &src->stats, sizeof(struct ieee80211_rx_stats));
dst->capability = src->capability;
memcpy(dst->rates, src->rates, src->rates_len);
dst->rates_len = src->rates_len;
memcpy(dst->rates_ex, src->rates_ex, src->rates_ex_len);
dst->rates_ex_len = src->rates_ex_len;
if(src->ssid_len > 0)
{
memset(dst->ssid, 0, dst->ssid_len);
dst->ssid_len = src->ssid_len;
memcpy(dst->ssid, src->ssid, src->ssid_len);
}
dst->mode = src->mode;
dst->flags = src->flags;
dst->time_stamp[0] = src->time_stamp[0];
dst->time_stamp[1] = src->time_stamp[1];
if (src->flags & NETWORK_HAS_ERP_VALUE)
{
dst->erp_value = src->erp_value;
dst->berp_info_valid = src->berp_info_valid = true;
}
dst->beacon_interval = src->beacon_interval;
dst->listen_interval = src->listen_interval;
dst->atim_window = src->atim_window;
dst->dtim_period = src->dtim_period;
dst->dtim_data = src->dtim_data;
dst->last_dtim_sta_time[0] = src->last_dtim_sta_time[0];
dst->last_dtim_sta_time[1] = src->last_dtim_sta_time[1];
memcpy(&dst->tim, &src->tim, sizeof(struct ieee80211_tim_parameters));
dst->bssht.bdSupportHT = src->bssht.bdSupportHT;
dst->bssht.bdRT2RTAggregation = src->bssht.bdRT2RTAggregation;
dst->bssht.bdHTCapLen= src->bssht.bdHTCapLen;
memcpy(dst->bssht.bdHTCapBuf,src->bssht.bdHTCapBuf,src->bssht.bdHTCapLen);
dst->bssht.bdHTInfoLen= src->bssht.bdHTInfoLen;
memcpy(dst->bssht.bdHTInfoBuf,src->bssht.bdHTInfoBuf,src->bssht.bdHTInfoLen);
dst->bssht.bdHTSpecVer = src->bssht.bdHTSpecVer;
dst->bssht.bdRT2RTLongSlotTime = src->bssht.bdRT2RTLongSlotTime;
dst->broadcom_cap_exist = src->broadcom_cap_exist;
dst->ralink_cap_exist = src->ralink_cap_exist;
dst->atheros_cap_exist = src->atheros_cap_exist;
dst->cisco_cap_exist = src->cisco_cap_exist;
dst->unknown_cap_exist = src->unknown_cap_exist;
memcpy(dst->wpa_ie, src->wpa_ie, src->wpa_ie_len);
dst->wpa_ie_len = src->wpa_ie_len;
memcpy(dst->rsn_ie, src->rsn_ie, src->rsn_ie_len);
dst->rsn_ie_len = src->rsn_ie_len;
dst->last_scanned = jiffies;
/* qos related parameters */
//qos_active = src->qos_data.active;
qos_active = dst->qos_data.active;
//old_param = dst->qos_data.old_param_count;
old_param = dst->qos_data.param_count;
if(dst->flags & NETWORK_HAS_QOS_MASK)
memcpy(&dst->qos_data, &src->qos_data,
sizeof(struct ieee80211_qos_data));
else {
dst->qos_data.supported = src->qos_data.supported;
dst->qos_data.param_count = src->qos_data.param_count;
}
if(dst->qos_data.supported == 1) {
dst->QoS_Enable = 1;
if(dst->ssid_len)
IEEE80211_DEBUG_QOS
("QoS the network %s is QoS supported\n",
dst->ssid);
else
IEEE80211_DEBUG_QOS
("QoS the network is QoS supported\n");
}
dst->qos_data.active = qos_active;
dst->qos_data.old_param_count = old_param;
/* dst->last_associate is not overwritten */
dst->wmm_info = src->wmm_info; //sure to exist in beacon or probe response frame.
if(src->wmm_param[0].ac_aci_acm_aifsn|| \
src->wmm_param[1].ac_aci_acm_aifsn|| \
src->wmm_param[2].ac_aci_acm_aifsn|| \
src->wmm_param[3].ac_aci_acm_aifsn) {
memcpy(dst->wmm_param, src->wmm_param, WME_AC_PRAM_LEN);
}
//dst->QoS_Enable = src->QoS_Enable;
#ifdef THOMAS_TURBO
dst->Turbo_Enable = src->Turbo_Enable;
#endif
dst->CountryIeLen = src->CountryIeLen;
memcpy(dst->CountryIeBuf, src->CountryIeBuf, src->CountryIeLen);
//added by amy for LEAP
dst->bWithAironetIE = src->bWithAironetIE;
dst->bCkipSupported = src->bCkipSupported;
memcpy(dst->CcxRmState,src->CcxRmState,2);
dst->bCcxRmEnable = src->bCcxRmEnable;
dst->MBssidMask = src->MBssidMask;
dst->bMBssidValid = src->bMBssidValid;
memcpy(dst->MBssid,src->MBssid,6);
dst->bWithCcxVerNum = src->bWithCcxVerNum;
dst->BssCcxVerNumber = src->BssCcxVerNumber;
}
static inline int is_beacon(__le16 fc)
{
return (WLAN_FC_GET_STYPE(le16_to_cpu(fc)) == IEEE80211_STYPE_BEACON);
}
static inline void ieee80211_process_probe_response(
struct ieee80211_device *ieee,
struct ieee80211_probe_response *beacon,
struct ieee80211_rx_stats *stats)
{
struct ieee80211_network network;
struct ieee80211_network *target;
struct ieee80211_network *oldest = NULL;
#ifdef CONFIG_IEEE80211_DEBUG
struct ieee80211_info_element *info_element = &beacon->info_element[0];
#endif
unsigned long flags;
short renew;
//u8 wmm_info;
memset(&network, 0, sizeof(struct ieee80211_network));
IEEE80211_DEBUG_SCAN(
"'%s' (%pM): %c%c%c%c %c%c%c%c-%c%c%c%c %c%c%c%c\n",
escape_essid(info_element->data, info_element->len),
beacon->header.addr3,
(beacon->capability & (1<<0xf)) ? '1' : '0',
(beacon->capability & (1<<0xe)) ? '1' : '0',
(beacon->capability & (1<<0xd)) ? '1' : '0',
(beacon->capability & (1<<0xc)) ? '1' : '0',
(beacon->capability & (1<<0xb)) ? '1' : '0',
(beacon->capability & (1<<0xa)) ? '1' : '0',
(beacon->capability & (1<<0x9)) ? '1' : '0',
(beacon->capability & (1<<0x8)) ? '1' : '0',
(beacon->capability & (1<<0x7)) ? '1' : '0',
(beacon->capability & (1<<0x6)) ? '1' : '0',
(beacon->capability & (1<<0x5)) ? '1' : '0',
(beacon->capability & (1<<0x4)) ? '1' : '0',
(beacon->capability & (1<<0x3)) ? '1' : '0',
(beacon->capability & (1<<0x2)) ? '1' : '0',
(beacon->capability & (1<<0x1)) ? '1' : '0',
(beacon->capability & (1<<0x0)) ? '1' : '0');
if (ieee80211_network_init(ieee, beacon, &network, stats)) {
IEEE80211_DEBUG_SCAN("Dropped '%s' (%pM) via %s.\n",
escape_essid(info_element->data,
info_element->len),
beacon->header.addr3,
WLAN_FC_GET_STYPE(beacon->header.frame_ctl) ==
IEEE80211_STYPE_PROBE_RESP ?
"PROBE RESPONSE" : "BEACON");
return;
}
// For Asus EeePc request,
// (1) if wireless adapter receive get any 802.11d country code in AP beacon,
// wireless adapter should follow the country code.
// (2) If there is no any country code in beacon,
// then wireless adapter should do active scan from ch1~11 and
// passive scan from ch12~14
if( !IsLegalChannel(ieee, network.channel) )
return;
if(ieee->bGlobalDomain)
{
if (WLAN_FC_GET_STYPE(beacon->header.frame_ctl) == IEEE80211_STYPE_PROBE_RESP)
{
// Case 1: Country code
if(IS_COUNTRY_IE_VALID(ieee) )
{
if( !IsLegalChannel(ieee, network.channel) )
{
printk("GetScanInfo(): For Country code, filter probe response at channel(%d).\n", network.channel);
return;
}
}
// Case 2: No any country code.
else
{
// Filter over channel ch12~14
if(network.channel > 11)
{
printk("GetScanInfo(): For Global Domain, filter probe response at channel(%d).\n", network.channel);
return;
}
}
}
else
{
// Case 1: Country code
if(IS_COUNTRY_IE_VALID(ieee) )
{
if( !IsLegalChannel(ieee, network.channel) )
{
printk("GetScanInfo(): For Country code, filter beacon at channel(%d).\n",network.channel);
return;
}
}
// Case 2: No any country code.
else
{
// Filter over channel ch12~14
if(network.channel > 14)
{
printk("GetScanInfo(): For Global Domain, filter beacon at channel(%d).\n",network.channel);
return;
}
}
}
}
/* The network parsed correctly -- so now we scan our known networks
* to see if we can find it in our list.
*
* NOTE: This search is definitely not optimized. Once its doing
* the "right thing" we'll optimize it for efficiency if
* necessary */
/* Search for this entry in the list and update it if it is
* already there. */
spin_lock_irqsave(&ieee->lock, flags);
if(is_same_network(&ieee->current_network, &network, ieee)) {
update_network(&ieee->current_network, &network);
if((ieee->current_network.mode == IEEE_N_24G || ieee->current_network.mode == IEEE_G)
&& ieee->current_network.berp_info_valid){
if(ieee->current_network.erp_value& ERP_UseProtection)
ieee->current_network.buseprotection = true;
else
ieee->current_network.buseprotection = false;
}
if(is_beacon(beacon->header.frame_ctl))
{
if(ieee->state == IEEE80211_LINKED)
ieee->LinkDetectInfo.NumRecvBcnInPeriod++;
}
else //hidden AP
network.flags = (~NETWORK_EMPTY_ESSID & network.flags)|(NETWORK_EMPTY_ESSID & ieee->current_network.flags);
}
list_for_each_entry(target, &ieee->network_list, list) {
if (is_same_network(target, &network, ieee))
break;
if ((oldest == NULL) ||
(target->last_scanned < oldest->last_scanned))
oldest = target;
}
/* If we didn't find a match, then get a new network slot to initialize
* with this beacon's information */
if (&target->list == &ieee->network_list) {
if (list_empty(&ieee->network_free_list)) {
/* If there are no more slots, expire the oldest */
list_del(&oldest->list);
target = oldest;
IEEE80211_DEBUG_SCAN("Expired '%s' (%pM) from "
"network list.\n",
escape_essid(target->ssid,
target->ssid_len),
target->bssid);
} else {
/* Otherwise just pull from the free list */
target = list_entry(ieee->network_free_list.next,
struct ieee80211_network, list);
list_del(ieee->network_free_list.next);
}
#ifdef CONFIG_IEEE80211_DEBUG
IEEE80211_DEBUG_SCAN("Adding '%s' (%pM) via %s.\n",
escape_essid(network.ssid,
network.ssid_len),
network.bssid,
WLAN_FC_GET_STYPE(beacon->header.frame_ctl) ==
IEEE80211_STYPE_PROBE_RESP ?
"PROBE RESPONSE" : "BEACON");
#endif
memcpy(target, &network, sizeof(*target));
list_add_tail(&target->list, &ieee->network_list);
if(ieee->softmac_features & IEEE_SOFTMAC_ASSOCIATE)
ieee80211_softmac_new_net(ieee,&network);
} else {
IEEE80211_DEBUG_SCAN("Updating '%s' (%pM) via %s.\n",
escape_essid(target->ssid,
target->ssid_len),
target->bssid,
WLAN_FC_GET_STYPE(beacon->header.frame_ctl) ==
IEEE80211_STYPE_PROBE_RESP ?
"PROBE RESPONSE" : "BEACON");
/* we have an entry and we are going to update it. But this entry may
* be already expired. In this case we do the same as we found a new
* net and call the new_net handler
*/
renew = !time_after(target->last_scanned + ieee->scan_age, jiffies);
//YJ,add,080819,for hidden ap
if(is_beacon(beacon->header.frame_ctl) == 0)
network.flags = (~NETWORK_EMPTY_ESSID & network.flags)|(NETWORK_EMPTY_ESSID & target->flags);
//if(strncmp(network.ssid, "linksys-c",9) == 0)
// printk("====>2 network.ssid=%s FLAG=%d target.ssid=%s FLAG=%d\n", network.ssid, network.flags, target->ssid, target->flags);
if(((network.flags & NETWORK_EMPTY_ESSID) == NETWORK_EMPTY_ESSID) \
&& (((network.ssid_len > 0) && (strncmp(target->ssid, network.ssid, network.ssid_len)))\
||((ieee->current_network.ssid_len == network.ssid_len)&&(strncmp(ieee->current_network.ssid, network.ssid, network.ssid_len) == 0)&&(ieee->state == IEEE80211_NOLINK))))
renew = 1;
//YJ,add,080819,for hidden ap,end
update_network(target, &network);
if(renew && (ieee->softmac_features & IEEE_SOFTMAC_ASSOCIATE))
ieee80211_softmac_new_net(ieee,&network);
}
spin_unlock_irqrestore(&ieee->lock, flags);
if (is_beacon(beacon->header.frame_ctl)&&is_same_network(&ieee->current_network, &network, ieee)&&\
(ieee->state == IEEE80211_LINKED)) {
if(ieee->handle_beacon != NULL) {
ieee->handle_beacon(ieee->dev,beacon,&ieee->current_network);
}
}
}
void ieee80211_rx_mgt(struct ieee80211_device *ieee,
struct ieee80211_hdr_4addr *header,
struct ieee80211_rx_stats *stats)
{
switch (WLAN_FC_GET_STYPE(header->frame_ctl)) {
case IEEE80211_STYPE_BEACON:
IEEE80211_DEBUG_MGMT("received BEACON (%d)\n",
WLAN_FC_GET_STYPE(header->frame_ctl));
IEEE80211_DEBUG_SCAN("Beacon\n");
ieee80211_process_probe_response(
ieee, (struct ieee80211_probe_response *)header, stats);
break;
case IEEE80211_STYPE_PROBE_RESP:
IEEE80211_DEBUG_MGMT("received PROBE RESPONSE (%d)\n",
WLAN_FC_GET_STYPE(header->frame_ctl));
IEEE80211_DEBUG_SCAN("Probe response\n");
ieee80211_process_probe_response(
ieee, (struct ieee80211_probe_response *)header, stats);
break;
}
}
EXPORT_SYMBOL(ieee80211_rx_mgt);
EXPORT_SYMBOL(ieee80211_rx);
| gpl-2.0 |
Tegra4/android_kernel_hp_phobos-old | drivers/target/target_core_iblock.c | 4802 | 17644 | /*******************************************************************************
* Filename: target_core_iblock.c
*
* This file contains the Storage Engine <-> Linux BlockIO transport
* specific functions.
*
* Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
* Copyright (c) 2005, 2006, 2007 SBE, Inc.
* Copyright (c) 2007-2010 Rising Tide Systems
* Copyright (c) 2008-2010 Linux-iSCSI.org
*
* Nicholas A. Bellinger <nab@kernel.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
******************************************************************************/
#include <linux/string.h>
#include <linux/parser.h>
#include <linux/timer.h>
#include <linux/fs.h>
#include <linux/blkdev.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/bio.h>
#include <linux/genhd.h>
#include <linux/file.h>
#include <linux/module.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
#include <target/target_core_base.h>
#include <target/target_core_backend.h>
#include "target_core_iblock.h"
#define IBLOCK_MAX_BIO_PER_TASK 32 /* max # of bios to submit at a time */
#define IBLOCK_BIO_POOL_SIZE 128
static struct se_subsystem_api iblock_template;
static void iblock_bio_done(struct bio *, int);
/* iblock_attach_hba(): (Part of se_subsystem_api_t template)
*
*
*/
static int iblock_attach_hba(struct se_hba *hba, u32 host_id)
{
pr_debug("CORE_HBA[%d] - TCM iBlock HBA Driver %s on"
" Generic Target Core Stack %s\n", hba->hba_id,
IBLOCK_VERSION, TARGET_CORE_MOD_VERSION);
return 0;
}
static void iblock_detach_hba(struct se_hba *hba)
{
}
static void *iblock_allocate_virtdevice(struct se_hba *hba, const char *name)
{
struct iblock_dev *ib_dev = NULL;
ib_dev = kzalloc(sizeof(struct iblock_dev), GFP_KERNEL);
if (!ib_dev) {
pr_err("Unable to allocate struct iblock_dev\n");
return NULL;
}
pr_debug( "IBLOCK: Allocated ib_dev for %s\n", name);
return ib_dev;
}
static struct se_device *iblock_create_virtdevice(
struct se_hba *hba,
struct se_subsystem_dev *se_dev,
void *p)
{
struct iblock_dev *ib_dev = p;
struct se_device *dev;
struct se_dev_limits dev_limits;
struct block_device *bd = NULL;
struct request_queue *q;
struct queue_limits *limits;
u32 dev_flags = 0;
int ret = -EINVAL;
if (!ib_dev) {
pr_err("Unable to locate struct iblock_dev parameter\n");
return ERR_PTR(ret);
}
memset(&dev_limits, 0, sizeof(struct se_dev_limits));
ib_dev->ibd_bio_set = bioset_create(IBLOCK_BIO_POOL_SIZE, 0);
if (!ib_dev->ibd_bio_set) {
pr_err("IBLOCK: Unable to create bioset()\n");
return ERR_PTR(-ENOMEM);
}
pr_debug("IBLOCK: Created bio_set()\n");
/*
* iblock_check_configfs_dev_params() ensures that ib_dev->ibd_udev_path
* must already have been set in order for echo 1 > $HBA/$DEV/enable to run.
*/
pr_debug( "IBLOCK: Claiming struct block_device: %s\n",
ib_dev->ibd_udev_path);
bd = blkdev_get_by_path(ib_dev->ibd_udev_path,
FMODE_WRITE|FMODE_READ|FMODE_EXCL, ib_dev);
if (IS_ERR(bd)) {
ret = PTR_ERR(bd);
goto failed;
}
/*
* Setup the local scope queue_limits from struct request_queue->limits
* to pass into transport_add_device_to_core_hba() as struct se_dev_limits.
*/
q = bdev_get_queue(bd);
limits = &dev_limits.limits;
limits->logical_block_size = bdev_logical_block_size(bd);
limits->max_hw_sectors = UINT_MAX;
limits->max_sectors = UINT_MAX;
dev_limits.hw_queue_depth = q->nr_requests;
dev_limits.queue_depth = q->nr_requests;
ib_dev->ibd_bd = bd;
dev = transport_add_device_to_core_hba(hba,
&iblock_template, se_dev, dev_flags, ib_dev,
&dev_limits, "IBLOCK", IBLOCK_VERSION);
if (!dev)
goto failed;
/*
* Check if the underlying struct block_device request_queue supports
* the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM
* in ATA and we need to set TPE=1
*/
if (blk_queue_discard(q)) {
dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count =
q->limits.max_discard_sectors;
/*
* Currently hardcoded to 1 in Linux/SCSI code..
*/
dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count = 1;
dev->se_sub_dev->se_dev_attrib.unmap_granularity =
q->limits.discard_granularity >> 9;
dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment =
q->limits.discard_alignment;
pr_debug("IBLOCK: BLOCK Discard support available,"
" disabled by default\n");
}
if (blk_queue_nonrot(q))
dev->se_sub_dev->se_dev_attrib.is_nonrot = 1;
return dev;
failed:
if (ib_dev->ibd_bio_set) {
bioset_free(ib_dev->ibd_bio_set);
ib_dev->ibd_bio_set = NULL;
}
ib_dev->ibd_bd = NULL;
return ERR_PTR(ret);
}
static void iblock_free_device(void *p)
{
struct iblock_dev *ib_dev = p;
if (ib_dev->ibd_bd != NULL)
blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
if (ib_dev->ibd_bio_set != NULL)
bioset_free(ib_dev->ibd_bio_set);
kfree(ib_dev);
}
static inline struct iblock_req *IBLOCK_REQ(struct se_task *task)
{
return container_of(task, struct iblock_req, ib_task);
}
static struct se_task *
iblock_alloc_task(unsigned char *cdb)
{
struct iblock_req *ib_req;
ib_req = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
if (!ib_req) {
pr_err("Unable to allocate memory for struct iblock_req\n");
return NULL;
}
atomic_set(&ib_req->pending, 1);
return &ib_req->ib_task;
}
static unsigned long long iblock_emulate_read_cap_with_block_size(
struct se_device *dev,
struct block_device *bd,
struct request_queue *q)
{
unsigned long long blocks_long = (div_u64(i_size_read(bd->bd_inode),
bdev_logical_block_size(bd)) - 1);
u32 block_size = bdev_logical_block_size(bd);
if (block_size == dev->se_sub_dev->se_dev_attrib.block_size)
return blocks_long;
switch (block_size) {
case 4096:
switch (dev->se_sub_dev->se_dev_attrib.block_size) {
case 2048:
blocks_long <<= 1;
break;
case 1024:
blocks_long <<= 2;
break;
case 512:
blocks_long <<= 3;
default:
break;
}
break;
case 2048:
switch (dev->se_sub_dev->se_dev_attrib.block_size) {
case 4096:
blocks_long >>= 1;
break;
case 1024:
blocks_long <<= 1;
break;
case 512:
blocks_long <<= 2;
break;
default:
break;
}
break;
case 1024:
switch (dev->se_sub_dev->se_dev_attrib.block_size) {
case 4096:
blocks_long >>= 2;
break;
case 2048:
blocks_long >>= 1;
break;
case 512:
blocks_long <<= 1;
break;
default:
break;
}
break;
case 512:
switch (dev->se_sub_dev->se_dev_attrib.block_size) {
case 4096:
blocks_long >>= 3;
break;
case 2048:
blocks_long >>= 2;
break;
case 1024:
blocks_long >>= 1;
break;
default:
break;
}
break;
default:
break;
}
return blocks_long;
}
static void iblock_end_io_flush(struct bio *bio, int err)
{
struct se_cmd *cmd = bio->bi_private;
if (err)
pr_err("IBLOCK: cache flush failed: %d\n", err);
if (cmd)
transport_complete_sync_cache(cmd, err == 0);
bio_put(bio);
}
/*
* Implement SYCHRONIZE CACHE. Note that we can't handle lba ranges and must
* always flush the whole cache.
*/
static void iblock_emulate_sync_cache(struct se_task *task)
{
struct se_cmd *cmd = task->task_se_cmd;
struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr;
int immed = (cmd->t_task_cdb[1] & 0x2);
struct bio *bio;
/*
* If the Immediate bit is set, queue up the GOOD response
* for this SYNCHRONIZE_CACHE op.
*/
if (immed)
transport_complete_sync_cache(cmd, 1);
bio = bio_alloc(GFP_KERNEL, 0);
bio->bi_end_io = iblock_end_io_flush;
bio->bi_bdev = ib_dev->ibd_bd;
if (!immed)
bio->bi_private = cmd;
submit_bio(WRITE_FLUSH, bio);
}
static int iblock_do_discard(struct se_device *dev, sector_t lba, u32 range)
{
struct iblock_dev *ibd = dev->dev_ptr;
struct block_device *bd = ibd->ibd_bd;
int barrier = 0;
return blkdev_issue_discard(bd, lba, range, GFP_KERNEL, barrier);
}
static void iblock_free_task(struct se_task *task)
{
kfree(IBLOCK_REQ(task));
}
enum {
Opt_udev_path, Opt_force, Opt_err
};
static match_table_t tokens = {
{Opt_udev_path, "udev_path=%s"},
{Opt_force, "force=%d"},
{Opt_err, NULL}
};
static ssize_t iblock_set_configfs_dev_params(struct se_hba *hba,
struct se_subsystem_dev *se_dev,
const char *page, ssize_t count)
{
struct iblock_dev *ib_dev = se_dev->se_dev_su_ptr;
char *orig, *ptr, *arg_p, *opts;
substring_t args[MAX_OPT_ARGS];
int ret = 0, token;
opts = kstrdup(page, GFP_KERNEL);
if (!opts)
return -ENOMEM;
orig = opts;
while ((ptr = strsep(&opts, ",\n")) != NULL) {
if (!*ptr)
continue;
token = match_token(ptr, tokens, args);
switch (token) {
case Opt_udev_path:
if (ib_dev->ibd_bd) {
pr_err("Unable to set udev_path= while"
" ib_dev->ibd_bd exists\n");
ret = -EEXIST;
goto out;
}
arg_p = match_strdup(&args[0]);
if (!arg_p) {
ret = -ENOMEM;
break;
}
snprintf(ib_dev->ibd_udev_path, SE_UDEV_PATH_LEN,
"%s", arg_p);
kfree(arg_p);
pr_debug("IBLOCK: Referencing UDEV path: %s\n",
ib_dev->ibd_udev_path);
ib_dev->ibd_flags |= IBDF_HAS_UDEV_PATH;
break;
case Opt_force:
break;
default:
break;
}
}
out:
kfree(orig);
return (!ret) ? count : ret;
}
static ssize_t iblock_check_configfs_dev_params(
struct se_hba *hba,
struct se_subsystem_dev *se_dev)
{
struct iblock_dev *ibd = se_dev->se_dev_su_ptr;
if (!(ibd->ibd_flags & IBDF_HAS_UDEV_PATH)) {
pr_err("Missing udev_path= parameters for IBLOCK\n");
return -EINVAL;
}
return 0;
}
static ssize_t iblock_show_configfs_dev_params(
struct se_hba *hba,
struct se_subsystem_dev *se_dev,
char *b)
{
struct iblock_dev *ibd = se_dev->se_dev_su_ptr;
struct block_device *bd = ibd->ibd_bd;
char buf[BDEVNAME_SIZE];
ssize_t bl = 0;
if (bd)
bl += sprintf(b + bl, "iBlock device: %s",
bdevname(bd, buf));
if (ibd->ibd_flags & IBDF_HAS_UDEV_PATH) {
bl += sprintf(b + bl, " UDEV PATH: %s\n",
ibd->ibd_udev_path);
} else
bl += sprintf(b + bl, "\n");
bl += sprintf(b + bl, " ");
if (bd) {
bl += sprintf(b + bl, "Major: %d Minor: %d %s\n",
MAJOR(bd->bd_dev), MINOR(bd->bd_dev), (!bd->bd_contains) ?
"" : (bd->bd_holder == ibd) ?
"CLAIMED: IBLOCK" : "CLAIMED: OS");
} else {
bl += sprintf(b + bl, "Major: 0 Minor: 0\n");
}
return bl;
}
static void iblock_bio_destructor(struct bio *bio)
{
struct se_task *task = bio->bi_private;
struct iblock_dev *ib_dev = task->task_se_cmd->se_dev->dev_ptr;
bio_free(bio, ib_dev->ibd_bio_set);
}
static struct bio *
iblock_get_bio(struct se_task *task, sector_t lba, u32 sg_num)
{
struct iblock_dev *ib_dev = task->task_se_cmd->se_dev->dev_ptr;
struct iblock_req *ib_req = IBLOCK_REQ(task);
struct bio *bio;
/*
* Only allocate as many vector entries as the bio code allows us to,
* we'll loop later on until we have handled the whole request.
*/
if (sg_num > BIO_MAX_PAGES)
sg_num = BIO_MAX_PAGES;
bio = bio_alloc_bioset(GFP_NOIO, sg_num, ib_dev->ibd_bio_set);
if (!bio) {
pr_err("Unable to allocate memory for bio\n");
return NULL;
}
pr_debug("Allocated bio: %p task_sg_nents: %u using ibd_bio_set:"
" %p\n", bio, task->task_sg_nents, ib_dev->ibd_bio_set);
pr_debug("Allocated bio: %p task_size: %u\n", bio, task->task_size);
bio->bi_bdev = ib_dev->ibd_bd;
bio->bi_private = task;
bio->bi_destructor = iblock_bio_destructor;
bio->bi_end_io = &iblock_bio_done;
bio->bi_sector = lba;
atomic_inc(&ib_req->pending);
pr_debug("Set bio->bi_sector: %llu\n", (unsigned long long)bio->bi_sector);
pr_debug("Set ib_req->pending: %d\n", atomic_read(&ib_req->pending));
return bio;
}
static void iblock_submit_bios(struct bio_list *list, int rw)
{
struct blk_plug plug;
struct bio *bio;
blk_start_plug(&plug);
while ((bio = bio_list_pop(list)))
submit_bio(rw, bio);
blk_finish_plug(&plug);
}
static int iblock_do_task(struct se_task *task)
{
struct se_cmd *cmd = task->task_se_cmd;
struct se_device *dev = cmd->se_dev;
struct iblock_req *ibr = IBLOCK_REQ(task);
struct bio *bio;
struct bio_list list;
struct scatterlist *sg;
u32 i, sg_num = task->task_sg_nents;
sector_t block_lba;
unsigned bio_cnt;
int rw;
if (task->task_data_direction == DMA_TO_DEVICE) {
/*
* Force data to disk if we pretend to not have a volatile
* write cache, or the initiator set the Force Unit Access bit.
*/
if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache == 0 ||
(dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 &&
(cmd->se_cmd_flags & SCF_FUA)))
rw = WRITE_FUA;
else
rw = WRITE;
} else {
rw = READ;
}
/*
* Do starting conversion up from non 512-byte blocksize with
* struct se_task SCSI blocksize into Linux/Block 512 units for BIO.
*/
if (dev->se_sub_dev->se_dev_attrib.block_size == 4096)
block_lba = (task->task_lba << 3);
else if (dev->se_sub_dev->se_dev_attrib.block_size == 2048)
block_lba = (task->task_lba << 2);
else if (dev->se_sub_dev->se_dev_attrib.block_size == 1024)
block_lba = (task->task_lba << 1);
else if (dev->se_sub_dev->se_dev_attrib.block_size == 512)
block_lba = task->task_lba;
else {
pr_err("Unsupported SCSI -> BLOCK LBA conversion:"
" %u\n", dev->se_sub_dev->se_dev_attrib.block_size);
cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
return -ENOSYS;
}
bio = iblock_get_bio(task, block_lba, sg_num);
if (!bio) {
cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
return -ENOMEM;
}
bio_list_init(&list);
bio_list_add(&list, bio);
bio_cnt = 1;
for_each_sg(task->task_sg, sg, task->task_sg_nents, i) {
/*
* XXX: if the length the device accepts is shorter than the
* length of the S/G list entry this will cause and
* endless loop. Better hope no driver uses huge pages.
*/
while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
!= sg->length) {
if (bio_cnt >= IBLOCK_MAX_BIO_PER_TASK) {
iblock_submit_bios(&list, rw);
bio_cnt = 0;
}
bio = iblock_get_bio(task, block_lba, sg_num);
if (!bio)
goto fail;
bio_list_add(&list, bio);
bio_cnt++;
}
/* Always in 512 byte units for Linux/Block */
block_lba += sg->length >> IBLOCK_LBA_SHIFT;
sg_num--;
}
iblock_submit_bios(&list, rw);
if (atomic_dec_and_test(&ibr->pending)) {
transport_complete_task(task,
!atomic_read(&ibr->ib_bio_err_cnt));
}
return 0;
fail:
while ((bio = bio_list_pop(&list)))
bio_put(bio);
cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
return -ENOMEM;
}
static u32 iblock_get_device_rev(struct se_device *dev)
{
return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */
}
static u32 iblock_get_device_type(struct se_device *dev)
{
return TYPE_DISK;
}
static sector_t iblock_get_blocks(struct se_device *dev)
{
struct iblock_dev *ibd = dev->dev_ptr;
struct block_device *bd = ibd->ibd_bd;
struct request_queue *q = bdev_get_queue(bd);
return iblock_emulate_read_cap_with_block_size(dev, bd, q);
}
static void iblock_bio_done(struct bio *bio, int err)
{
struct se_task *task = bio->bi_private;
struct iblock_req *ibr = IBLOCK_REQ(task);
/*
* Set -EIO if !BIO_UPTODATE and the passed is still err=0
*/
if (!test_bit(BIO_UPTODATE, &bio->bi_flags) && !err)
err = -EIO;
if (err != 0) {
pr_err("test_bit(BIO_UPTODATE) failed for bio: %p,"
" err: %d\n", bio, err);
/*
* Bump the ib_bio_err_cnt and release bio.
*/
atomic_inc(&ibr->ib_bio_err_cnt);
smp_mb__after_atomic_inc();
}
bio_put(bio);
if (!atomic_dec_and_test(&ibr->pending))
return;
pr_debug("done[%p] bio: %p task_lba: %llu bio_lba: %llu err=%d\n",
task, bio, task->task_lba,
(unsigned long long)bio->bi_sector, err);
transport_complete_task(task, !atomic_read(&ibr->ib_bio_err_cnt));
}
static struct se_subsystem_api iblock_template = {
.name = "iblock",
.owner = THIS_MODULE,
.transport_type = TRANSPORT_PLUGIN_VHBA_PDEV,
.write_cache_emulated = 1,
.fua_write_emulated = 1,
.attach_hba = iblock_attach_hba,
.detach_hba = iblock_detach_hba,
.allocate_virtdevice = iblock_allocate_virtdevice,
.create_virtdevice = iblock_create_virtdevice,
.free_device = iblock_free_device,
.alloc_task = iblock_alloc_task,
.do_task = iblock_do_task,
.do_discard = iblock_do_discard,
.do_sync_cache = iblock_emulate_sync_cache,
.free_task = iblock_free_task,
.check_configfs_dev_params = iblock_check_configfs_dev_params,
.set_configfs_dev_params = iblock_set_configfs_dev_params,
.show_configfs_dev_params = iblock_show_configfs_dev_params,
.get_device_rev = iblock_get_device_rev,
.get_device_type = iblock_get_device_type,
.get_blocks = iblock_get_blocks,
};
static int __init iblock_module_init(void)
{
return transport_subsystem_register(&iblock_template);
}
static void iblock_module_exit(void)
{
transport_subsystem_release(&iblock_template);
}
MODULE_DESCRIPTION("TCM IBLOCK subsystem plugin");
MODULE_AUTHOR("nab@Linux-iSCSI.org");
MODULE_LICENSE("GPL");
module_init(iblock_module_init);
module_exit(iblock_module_exit);
| gpl-2.0 |
Rashed97/android_kernel_samsung_lt03lte | drivers/net/usb/pegasus.c | 4802 | 39300 | /*
* Copyright (c) 1999-2005 Petko Manolov (petkan@users.sourceforge.net)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* ChangeLog:
* .... Most of the time spent on reading sources & docs.
* v0.2.x First official release for the Linux kernel.
* v0.3.0 Beutified and structured, some bugs fixed.
* v0.3.x URBifying bulk requests and bugfixing. First relatively
* stable release. Still can touch device's registers only
* from top-halves.
* v0.4.0 Control messages remained unurbified are now URBs.
* Now we can touch the HW at any time.
* v0.4.9 Control urbs again use process context to wait. Argh...
* Some long standing bugs (enable_net_traffic) fixed.
* Also nasty trick about resubmiting control urb from
* interrupt context used. Please let me know how it
* behaves. Pegasus II support added since this version.
* TODO: suppressing HCD warnings spewage on disconnect.
* v0.4.13 Ethernet address is now set at probe(), not at open()
* time as this seems to break dhcpd.
* v0.5.0 branch to 2.5.x kernels
* v0.5.1 ethtool support added
* v0.5.5 rx socket buffers are in a pool and the their allocation
* is out of the interrupt routine.
*/
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/mii.h>
#include <linux/usb.h>
#include <linux/module.h>
#include <asm/byteorder.h>
#include <asm/uaccess.h>
#include "pegasus.h"
/*
* Version Information
*/
#define DRIVER_VERSION "v0.6.14 (2006/09/27)"
#define DRIVER_AUTHOR "Petko Manolov <petkan@users.sourceforge.net>"
#define DRIVER_DESC "Pegasus/Pegasus II USB Ethernet driver"
static const char driver_name[] = "pegasus";
#undef PEGASUS_WRITE_EEPROM
#define BMSR_MEDIA (BMSR_10HALF | BMSR_10FULL | BMSR_100HALF | \
BMSR_100FULL | BMSR_ANEGCAPABLE)
static bool loopback;
static bool mii_mode;
static char *devid;
static struct usb_eth_dev usb_dev_id[] = {
#define PEGASUS_DEV(pn, vid, pid, flags) \
{.name = pn, .vendor = vid, .device = pid, .private = flags},
#define PEGASUS_DEV_CLASS(pn, vid, pid, dclass, flags) \
PEGASUS_DEV(pn, vid, pid, flags)
#include "pegasus.h"
#undef PEGASUS_DEV
#undef PEGASUS_DEV_CLASS
{NULL, 0, 0, 0},
{NULL, 0, 0, 0}
};
static struct usb_device_id pegasus_ids[] = {
#define PEGASUS_DEV(pn, vid, pid, flags) \
{.match_flags = USB_DEVICE_ID_MATCH_DEVICE, .idVendor = vid, .idProduct = pid},
/*
* The Belkin F8T012xx1 bluetooth adaptor has the same vendor and product
* IDs as the Belkin F5D5050, so we need to teach the pegasus driver to
* ignore adaptors belonging to the "Wireless" class 0xE0. For this one
* case anyway, seeing as the pegasus is for "Wired" adaptors.
*/
#define PEGASUS_DEV_CLASS(pn, vid, pid, dclass, flags) \
{.match_flags = (USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_DEV_CLASS), \
.idVendor = vid, .idProduct = pid, .bDeviceClass = dclass},
#include "pegasus.h"
#undef PEGASUS_DEV
#undef PEGASUS_DEV_CLASS
{},
{}
};
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
module_param(loopback, bool, 0);
module_param(mii_mode, bool, 0);
module_param(devid, charp, 0);
MODULE_PARM_DESC(loopback, "Enable MAC loopback mode (bit 0)");
MODULE_PARM_DESC(mii_mode, "Enable HomePNA mode (bit 0),default=MII mode = 0");
MODULE_PARM_DESC(devid, "The format is: 'DEV_name:VendorID:DeviceID:Flags'");
/* use ethtool to change the level for any given device */
static int msg_level = -1;
module_param(msg_level, int, 0);
MODULE_PARM_DESC(msg_level, "Override default message level");
MODULE_DEVICE_TABLE(usb, pegasus_ids);
static const struct net_device_ops pegasus_netdev_ops;
static int update_eth_regs_async(pegasus_t *);
/* Aargh!!! I _really_ hate such tweaks */
static void ctrl_callback(struct urb *urb)
{
pegasus_t *pegasus = urb->context;
int status = urb->status;
if (!pegasus)
return;
switch (status) {
case 0:
if (pegasus->flags & ETH_REGS_CHANGE) {
pegasus->flags &= ~ETH_REGS_CHANGE;
pegasus->flags |= ETH_REGS_CHANGED;
update_eth_regs_async(pegasus);
return;
}
break;
case -EINPROGRESS:
return;
case -ENOENT:
break;
default:
if (net_ratelimit())
netif_dbg(pegasus, drv, pegasus->net,
"%s, status %d\n", __func__, status);
break;
}
pegasus->flags &= ~ETH_REGS_CHANGED;
wake_up(&pegasus->ctrl_wait);
}
static int get_registers(pegasus_t *pegasus, __u16 indx, __u16 size,
void *data)
{
int ret;
char *buffer;
DECLARE_WAITQUEUE(wait, current);
buffer = kmalloc(size, GFP_KERNEL);
if (!buffer) {
netif_warn(pegasus, drv, pegasus->net,
"out of memory in %s\n", __func__);
return -ENOMEM;
}
add_wait_queue(&pegasus->ctrl_wait, &wait);
set_current_state(TASK_UNINTERRUPTIBLE);
while (pegasus->flags & ETH_REGS_CHANGED)
schedule();
remove_wait_queue(&pegasus->ctrl_wait, &wait);
set_current_state(TASK_RUNNING);
pegasus->dr.bRequestType = PEGASUS_REQT_READ;
pegasus->dr.bRequest = PEGASUS_REQ_GET_REGS;
pegasus->dr.wValue = cpu_to_le16(0);
pegasus->dr.wIndex = cpu_to_le16(indx);
pegasus->dr.wLength = cpu_to_le16(size);
pegasus->ctrl_urb->transfer_buffer_length = size;
usb_fill_control_urb(pegasus->ctrl_urb, pegasus->usb,
usb_rcvctrlpipe(pegasus->usb, 0),
(char *) &pegasus->dr,
buffer, size, ctrl_callback, pegasus);
add_wait_queue(&pegasus->ctrl_wait, &wait);
set_current_state(TASK_UNINTERRUPTIBLE);
/* using ATOMIC, we'd never wake up if we slept */
if ((ret = usb_submit_urb(pegasus->ctrl_urb, GFP_ATOMIC))) {
set_current_state(TASK_RUNNING);
if (ret == -ENODEV)
netif_device_detach(pegasus->net);
if (net_ratelimit())
netif_err(pegasus, drv, pegasus->net,
"%s, status %d\n", __func__, ret);
goto out;
}
schedule();
out:
remove_wait_queue(&pegasus->ctrl_wait, &wait);
memcpy(data, buffer, size);
kfree(buffer);
return ret;
}
static int set_registers(pegasus_t *pegasus, __u16 indx, __u16 size,
void *data)
{
int ret;
char *buffer;
DECLARE_WAITQUEUE(wait, current);
buffer = kmemdup(data, size, GFP_KERNEL);
if (!buffer) {
netif_warn(pegasus, drv, pegasus->net,
"out of memory in %s\n", __func__);
return -ENOMEM;
}
add_wait_queue(&pegasus->ctrl_wait, &wait);
set_current_state(TASK_UNINTERRUPTIBLE);
while (pegasus->flags & ETH_REGS_CHANGED)
schedule();
remove_wait_queue(&pegasus->ctrl_wait, &wait);
set_current_state(TASK_RUNNING);
pegasus->dr.bRequestType = PEGASUS_REQT_WRITE;
pegasus->dr.bRequest = PEGASUS_REQ_SET_REGS;
pegasus->dr.wValue = cpu_to_le16(0);
pegasus->dr.wIndex = cpu_to_le16(indx);
pegasus->dr.wLength = cpu_to_le16(size);
pegasus->ctrl_urb->transfer_buffer_length = size;
usb_fill_control_urb(pegasus->ctrl_urb, pegasus->usb,
usb_sndctrlpipe(pegasus->usb, 0),
(char *) &pegasus->dr,
buffer, size, ctrl_callback, pegasus);
add_wait_queue(&pegasus->ctrl_wait, &wait);
set_current_state(TASK_UNINTERRUPTIBLE);
if ((ret = usb_submit_urb(pegasus->ctrl_urb, GFP_ATOMIC))) {
if (ret == -ENODEV)
netif_device_detach(pegasus->net);
netif_err(pegasus, drv, pegasus->net,
"%s, status %d\n", __func__, ret);
goto out;
}
schedule();
out:
remove_wait_queue(&pegasus->ctrl_wait, &wait);
kfree(buffer);
return ret;
}
static int set_register(pegasus_t *pegasus, __u16 indx, __u8 data)
{
int ret;
char *tmp;
DECLARE_WAITQUEUE(wait, current);
tmp = kmemdup(&data, 1, GFP_KERNEL);
if (!tmp) {
netif_warn(pegasus, drv, pegasus->net,
"out of memory in %s\n", __func__);
return -ENOMEM;
}
add_wait_queue(&pegasus->ctrl_wait, &wait);
set_current_state(TASK_UNINTERRUPTIBLE);
while (pegasus->flags & ETH_REGS_CHANGED)
schedule();
remove_wait_queue(&pegasus->ctrl_wait, &wait);
set_current_state(TASK_RUNNING);
pegasus->dr.bRequestType = PEGASUS_REQT_WRITE;
pegasus->dr.bRequest = PEGASUS_REQ_SET_REG;
pegasus->dr.wValue = cpu_to_le16(data);
pegasus->dr.wIndex = cpu_to_le16(indx);
pegasus->dr.wLength = cpu_to_le16(1);
pegasus->ctrl_urb->transfer_buffer_length = 1;
usb_fill_control_urb(pegasus->ctrl_urb, pegasus->usb,
usb_sndctrlpipe(pegasus->usb, 0),
(char *) &pegasus->dr,
tmp, 1, ctrl_callback, pegasus);
add_wait_queue(&pegasus->ctrl_wait, &wait);
set_current_state(TASK_UNINTERRUPTIBLE);
if ((ret = usb_submit_urb(pegasus->ctrl_urb, GFP_ATOMIC))) {
if (ret == -ENODEV)
netif_device_detach(pegasus->net);
if (net_ratelimit())
netif_err(pegasus, drv, pegasus->net,
"%s, status %d\n", __func__, ret);
goto out;
}
schedule();
out:
remove_wait_queue(&pegasus->ctrl_wait, &wait);
kfree(tmp);
return ret;
}
static int update_eth_regs_async(pegasus_t *pegasus)
{
int ret;
pegasus->dr.bRequestType = PEGASUS_REQT_WRITE;
pegasus->dr.bRequest = PEGASUS_REQ_SET_REGS;
pegasus->dr.wValue = cpu_to_le16(0);
pegasus->dr.wIndex = cpu_to_le16(EthCtrl0);
pegasus->dr.wLength = cpu_to_le16(3);
pegasus->ctrl_urb->transfer_buffer_length = 3;
usb_fill_control_urb(pegasus->ctrl_urb, pegasus->usb,
usb_sndctrlpipe(pegasus->usb, 0),
(char *) &pegasus->dr,
pegasus->eth_regs, 3, ctrl_callback, pegasus);
if ((ret = usb_submit_urb(pegasus->ctrl_urb, GFP_ATOMIC))) {
if (ret == -ENODEV)
netif_device_detach(pegasus->net);
netif_err(pegasus, drv, pegasus->net,
"%s, status %d\n", __func__, ret);
}
return ret;
}
/* Returns 0 on success, error on failure */
static int read_mii_word(pegasus_t *pegasus, __u8 phy, __u8 indx, __u16 *regd)
{
int i;
__u8 data[4] = { phy, 0, 0, indx };
__le16 regdi;
int ret;
set_register(pegasus, PhyCtrl, 0);
set_registers(pegasus, PhyAddr, sizeof(data), data);
set_register(pegasus, PhyCtrl, (indx | PHY_READ));
for (i = 0; i < REG_TIMEOUT; i++) {
ret = get_registers(pegasus, PhyCtrl, 1, data);
if (ret == -ESHUTDOWN)
goto fail;
if (data[0] & PHY_DONE)
break;
}
if (i >= REG_TIMEOUT)
goto fail;
ret = get_registers(pegasus, PhyData, 2, ®di);
*regd = le16_to_cpu(regdi);
return ret;
fail:
netif_warn(pegasus, drv, pegasus->net, "%s failed\n", __func__);
return ret;
}
static int mdio_read(struct net_device *dev, int phy_id, int loc)
{
pegasus_t *pegasus = netdev_priv(dev);
u16 res;
read_mii_word(pegasus, phy_id, loc, &res);
return (int)res;
}
static int write_mii_word(pegasus_t *pegasus, __u8 phy, __u8 indx, __u16 regd)
{
int i;
__u8 data[4] = { phy, 0, 0, indx };
int ret;
data[1] = (u8) regd;
data[2] = (u8) (regd >> 8);
set_register(pegasus, PhyCtrl, 0);
set_registers(pegasus, PhyAddr, sizeof(data), data);
set_register(pegasus, PhyCtrl, (indx | PHY_WRITE));
for (i = 0; i < REG_TIMEOUT; i++) {
ret = get_registers(pegasus, PhyCtrl, 1, data);
if (ret == -ESHUTDOWN)
goto fail;
if (data[0] & PHY_DONE)
break;
}
if (i >= REG_TIMEOUT)
goto fail;
return ret;
fail:
netif_warn(pegasus, drv, pegasus->net, "%s failed\n", __func__);
return -ETIMEDOUT;
}
static void mdio_write(struct net_device *dev, int phy_id, int loc, int val)
{
pegasus_t *pegasus = netdev_priv(dev);
write_mii_word(pegasus, phy_id, loc, val);
}
static int read_eprom_word(pegasus_t *pegasus, __u8 index, __u16 *retdata)
{
int i;
__u8 tmp;
__le16 retdatai;
int ret;
set_register(pegasus, EpromCtrl, 0);
set_register(pegasus, EpromOffset, index);
set_register(pegasus, EpromCtrl, EPROM_READ);
for (i = 0; i < REG_TIMEOUT; i++) {
ret = get_registers(pegasus, EpromCtrl, 1, &tmp);
if (tmp & EPROM_DONE)
break;
if (ret == -ESHUTDOWN)
goto fail;
}
if (i >= REG_TIMEOUT)
goto fail;
ret = get_registers(pegasus, EpromData, 2, &retdatai);
*retdata = le16_to_cpu(retdatai);
return ret;
fail:
netif_warn(pegasus, drv, pegasus->net, "%s failed\n", __func__);
return -ETIMEDOUT;
}
#ifdef PEGASUS_WRITE_EEPROM
static inline void enable_eprom_write(pegasus_t *pegasus)
{
__u8 tmp;
int ret;
get_registers(pegasus, EthCtrl2, 1, &tmp);
set_register(pegasus, EthCtrl2, tmp | EPROM_WR_ENABLE);
}
static inline void disable_eprom_write(pegasus_t *pegasus)
{
__u8 tmp;
int ret;
get_registers(pegasus, EthCtrl2, 1, &tmp);
set_register(pegasus, EpromCtrl, 0);
set_register(pegasus, EthCtrl2, tmp & ~EPROM_WR_ENABLE);
}
static int write_eprom_word(pegasus_t *pegasus, __u8 index, __u16 data)
{
int i;
__u8 tmp, d[4] = { 0x3f, 0, 0, EPROM_WRITE };
int ret;
__le16 le_data = cpu_to_le16(data);
set_registers(pegasus, EpromOffset, 4, d);
enable_eprom_write(pegasus);
set_register(pegasus, EpromOffset, index);
set_registers(pegasus, EpromData, 2, &le_data);
set_register(pegasus, EpromCtrl, EPROM_WRITE);
for (i = 0; i < REG_TIMEOUT; i++) {
ret = get_registers(pegasus, EpromCtrl, 1, &tmp);
if (ret == -ESHUTDOWN)
goto fail;
if (tmp & EPROM_DONE)
break;
}
disable_eprom_write(pegasus);
if (i >= REG_TIMEOUT)
goto fail;
return ret;
fail:
netif_warn(pegasus, drv, pegasus->net, "%s failed\n", __func__);
return -ETIMEDOUT;
}
#endif /* PEGASUS_WRITE_EEPROM */
static inline void get_node_id(pegasus_t *pegasus, __u8 *id)
{
int i;
__u16 w16;
for (i = 0; i < 3; i++) {
read_eprom_word(pegasus, i, &w16);
((__le16 *) id)[i] = cpu_to_le16(w16);
}
}
static void set_ethernet_addr(pegasus_t *pegasus)
{
__u8 node_id[6];
if (pegasus->features & PEGASUS_II) {
get_registers(pegasus, 0x10, sizeof(node_id), node_id);
} else {
get_node_id(pegasus, node_id);
set_registers(pegasus, EthID, sizeof(node_id), node_id);
}
memcpy(pegasus->net->dev_addr, node_id, sizeof(node_id));
}
static inline int reset_mac(pegasus_t *pegasus)
{
__u8 data = 0x8;
int i;
set_register(pegasus, EthCtrl1, data);
for (i = 0; i < REG_TIMEOUT; i++) {
get_registers(pegasus, EthCtrl1, 1, &data);
if (~data & 0x08) {
if (loopback)
break;
if (mii_mode && (pegasus->features & HAS_HOME_PNA))
set_register(pegasus, Gpio1, 0x34);
else
set_register(pegasus, Gpio1, 0x26);
set_register(pegasus, Gpio0, pegasus->features);
set_register(pegasus, Gpio0, DEFAULT_GPIO_SET);
break;
}
}
if (i == REG_TIMEOUT)
return -ETIMEDOUT;
if (usb_dev_id[pegasus->dev_index].vendor == VENDOR_LINKSYS ||
usb_dev_id[pegasus->dev_index].vendor == VENDOR_DLINK) {
set_register(pegasus, Gpio0, 0x24);
set_register(pegasus, Gpio0, 0x26);
}
if (usb_dev_id[pegasus->dev_index].vendor == VENDOR_ELCON) {
__u16 auxmode;
read_mii_word(pegasus, 3, 0x1b, &auxmode);
write_mii_word(pegasus, 3, 0x1b, auxmode | 4);
}
return 0;
}
static int enable_net_traffic(struct net_device *dev, struct usb_device *usb)
{
__u16 linkpart;
__u8 data[4];
pegasus_t *pegasus = netdev_priv(dev);
int ret;
read_mii_word(pegasus, pegasus->phy, MII_LPA, &linkpart);
data[0] = 0xc9;
data[1] = 0;
if (linkpart & (ADVERTISE_100FULL | ADVERTISE_10FULL))
data[1] |= 0x20; /* set full duplex */
if (linkpart & (ADVERTISE_100FULL | ADVERTISE_100HALF))
data[1] |= 0x10; /* set 100 Mbps */
if (mii_mode)
data[1] = 0;
data[2] = loopback ? 0x09 : 0x01;
memcpy(pegasus->eth_regs, data, sizeof(data));
ret = set_registers(pegasus, EthCtrl0, 3, data);
if (usb_dev_id[pegasus->dev_index].vendor == VENDOR_LINKSYS ||
usb_dev_id[pegasus->dev_index].vendor == VENDOR_LINKSYS2 ||
usb_dev_id[pegasus->dev_index].vendor == VENDOR_DLINK) {
u16 auxmode;
read_mii_word(pegasus, 0, 0x1b, &auxmode);
write_mii_word(pegasus, 0, 0x1b, auxmode | 4);
}
return ret;
}
static void fill_skb_pool(pegasus_t *pegasus)
{
int i;
for (i = 0; i < RX_SKBS; i++) {
if (pegasus->rx_pool[i])
continue;
pegasus->rx_pool[i] = dev_alloc_skb(PEGASUS_MTU + 2);
/*
** we give up if the allocation fail. the tasklet will be
** rescheduled again anyway...
*/
if (pegasus->rx_pool[i] == NULL)
return;
skb_reserve(pegasus->rx_pool[i], 2);
}
}
static void free_skb_pool(pegasus_t *pegasus)
{
int i;
for (i = 0; i < RX_SKBS; i++) {
if (pegasus->rx_pool[i]) {
dev_kfree_skb(pegasus->rx_pool[i]);
pegasus->rx_pool[i] = NULL;
}
}
}
static inline struct sk_buff *pull_skb(pegasus_t * pegasus)
{
int i;
struct sk_buff *skb;
for (i = 0; i < RX_SKBS; i++) {
if (likely(pegasus->rx_pool[i] != NULL)) {
skb = pegasus->rx_pool[i];
pegasus->rx_pool[i] = NULL;
return skb;
}
}
return NULL;
}
static void read_bulk_callback(struct urb *urb)
{
pegasus_t *pegasus = urb->context;
struct net_device *net;
int rx_status, count = urb->actual_length;
int status = urb->status;
u8 *buf = urb->transfer_buffer;
__u16 pkt_len;
if (!pegasus)
return;
net = pegasus->net;
if (!netif_device_present(net) || !netif_running(net))
return;
switch (status) {
case 0:
break;
case -ETIME:
netif_dbg(pegasus, rx_err, net, "reset MAC\n");
pegasus->flags &= ~PEGASUS_RX_BUSY;
break;
case -EPIPE: /* stall, or disconnect from TT */
/* FIXME schedule work to clear the halt */
netif_warn(pegasus, rx_err, net, "no rx stall recovery\n");
return;
case -ENOENT:
case -ECONNRESET:
case -ESHUTDOWN:
netif_dbg(pegasus, ifdown, net, "rx unlink, %d\n", status);
return;
default:
netif_dbg(pegasus, rx_err, net, "RX status %d\n", status);
goto goon;
}
if (!count || count < 4)
goto goon;
rx_status = buf[count - 2];
if (rx_status & 0x1e) {
netif_dbg(pegasus, rx_err, net,
"RX packet error %x\n", rx_status);
pegasus->stats.rx_errors++;
if (rx_status & 0x06) /* long or runt */
pegasus->stats.rx_length_errors++;
if (rx_status & 0x08)
pegasus->stats.rx_crc_errors++;
if (rx_status & 0x10) /* extra bits */
pegasus->stats.rx_frame_errors++;
goto goon;
}
if (pegasus->chip == 0x8513) {
pkt_len = le32_to_cpu(*(__le32 *)urb->transfer_buffer);
pkt_len &= 0x0fff;
pegasus->rx_skb->data += 2;
} else {
pkt_len = buf[count - 3] << 8;
pkt_len += buf[count - 4];
pkt_len &= 0xfff;
pkt_len -= 8;
}
/*
* If the packet is unreasonably long, quietly drop it rather than
* kernel panicing by calling skb_put.
*/
if (pkt_len > PEGASUS_MTU)
goto goon;
/*
* at this point we are sure pegasus->rx_skb != NULL
* so we go ahead and pass up the packet.
*/
skb_put(pegasus->rx_skb, pkt_len);
pegasus->rx_skb->protocol = eth_type_trans(pegasus->rx_skb, net);
netif_rx(pegasus->rx_skb);
pegasus->stats.rx_packets++;
pegasus->stats.rx_bytes += pkt_len;
if (pegasus->flags & PEGASUS_UNPLUG)
return;
spin_lock(&pegasus->rx_pool_lock);
pegasus->rx_skb = pull_skb(pegasus);
spin_unlock(&pegasus->rx_pool_lock);
if (pegasus->rx_skb == NULL)
goto tl_sched;
goon:
usb_fill_bulk_urb(pegasus->rx_urb, pegasus->usb,
usb_rcvbulkpipe(pegasus->usb, 1),
pegasus->rx_skb->data, PEGASUS_MTU + 8,
read_bulk_callback, pegasus);
rx_status = usb_submit_urb(pegasus->rx_urb, GFP_ATOMIC);
if (rx_status == -ENODEV)
netif_device_detach(pegasus->net);
else if (rx_status) {
pegasus->flags |= PEGASUS_RX_URB_FAIL;
goto tl_sched;
} else {
pegasus->flags &= ~PEGASUS_RX_URB_FAIL;
}
return;
tl_sched:
tasklet_schedule(&pegasus->rx_tl);
}
static void rx_fixup(unsigned long data)
{
pegasus_t *pegasus;
unsigned long flags;
int status;
pegasus = (pegasus_t *) data;
if (pegasus->flags & PEGASUS_UNPLUG)
return;
spin_lock_irqsave(&pegasus->rx_pool_lock, flags);
fill_skb_pool(pegasus);
if (pegasus->flags & PEGASUS_RX_URB_FAIL)
if (pegasus->rx_skb)
goto try_again;
if (pegasus->rx_skb == NULL)
pegasus->rx_skb = pull_skb(pegasus);
if (pegasus->rx_skb == NULL) {
netif_warn(pegasus, rx_err, pegasus->net, "low on memory\n");
tasklet_schedule(&pegasus->rx_tl);
goto done;
}
usb_fill_bulk_urb(pegasus->rx_urb, pegasus->usb,
usb_rcvbulkpipe(pegasus->usb, 1),
pegasus->rx_skb->data, PEGASUS_MTU + 8,
read_bulk_callback, pegasus);
try_again:
status = usb_submit_urb(pegasus->rx_urb, GFP_ATOMIC);
if (status == -ENODEV)
netif_device_detach(pegasus->net);
else if (status) {
pegasus->flags |= PEGASUS_RX_URB_FAIL;
tasklet_schedule(&pegasus->rx_tl);
} else {
pegasus->flags &= ~PEGASUS_RX_URB_FAIL;
}
done:
spin_unlock_irqrestore(&pegasus->rx_pool_lock, flags);
}
static void write_bulk_callback(struct urb *urb)
{
pegasus_t *pegasus = urb->context;
struct net_device *net;
int status = urb->status;
if (!pegasus)
return;
net = pegasus->net;
if (!netif_device_present(net) || !netif_running(net))
return;
switch (status) {
case -EPIPE:
/* FIXME schedule_work() to clear the tx halt */
netif_stop_queue(net);
netif_warn(pegasus, tx_err, net, "no tx stall recovery\n");
return;
case -ENOENT:
case -ECONNRESET:
case -ESHUTDOWN:
netif_dbg(pegasus, ifdown, net, "tx unlink, %d\n", status);
return;
default:
netif_info(pegasus, tx_err, net, "TX status %d\n", status);
/* FALL THROUGH */
case 0:
break;
}
net->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(net);
}
static void intr_callback(struct urb *urb)
{
pegasus_t *pegasus = urb->context;
struct net_device *net;
int res, status = urb->status;
if (!pegasus)
return;
net = pegasus->net;
switch (status) {
case 0:
break;
case -ECONNRESET: /* unlink */
case -ENOENT:
case -ESHUTDOWN:
return;
default:
/* some Pegasus-I products report LOTS of data
* toggle errors... avoid log spamming
*/
netif_dbg(pegasus, timer, net, "intr status %d\n", status);
}
if (urb->actual_length >= 6) {
u8 *d = urb->transfer_buffer;
/* byte 0 == tx_status1, reg 2B */
if (d[0] & (TX_UNDERRUN|EXCESSIVE_COL
|LATE_COL|JABBER_TIMEOUT)) {
pegasus->stats.tx_errors++;
if (d[0] & TX_UNDERRUN)
pegasus->stats.tx_fifo_errors++;
if (d[0] & (EXCESSIVE_COL | JABBER_TIMEOUT))
pegasus->stats.tx_aborted_errors++;
if (d[0] & LATE_COL)
pegasus->stats.tx_window_errors++;
}
/* d[5].LINK_STATUS lies on some adapters.
* d[0].NO_CARRIER kicks in only with failed TX.
* ... so monitoring with MII may be safest.
*/
/* bytes 3-4 == rx_lostpkt, reg 2E/2F */
pegasus->stats.rx_missed_errors += ((d[3] & 0x7f) << 8) | d[4];
}
res = usb_submit_urb(urb, GFP_ATOMIC);
if (res == -ENODEV)
netif_device_detach(pegasus->net);
if (res)
netif_err(pegasus, timer, net,
"can't resubmit interrupt urb, %d\n", res);
}
static void pegasus_tx_timeout(struct net_device *net)
{
pegasus_t *pegasus = netdev_priv(net);
netif_warn(pegasus, timer, net, "tx timeout\n");
usb_unlink_urb(pegasus->tx_urb);
pegasus->stats.tx_errors++;
}
static netdev_tx_t pegasus_start_xmit(struct sk_buff *skb,
struct net_device *net)
{
pegasus_t *pegasus = netdev_priv(net);
int count = ((skb->len + 2) & 0x3f) ? skb->len + 2 : skb->len + 3;
int res;
__u16 l16 = skb->len;
netif_stop_queue(net);
((__le16 *) pegasus->tx_buff)[0] = cpu_to_le16(l16);
skb_copy_from_linear_data(skb, pegasus->tx_buff + 2, skb->len);
usb_fill_bulk_urb(pegasus->tx_urb, pegasus->usb,
usb_sndbulkpipe(pegasus->usb, 2),
pegasus->tx_buff, count,
write_bulk_callback, pegasus);
if ((res = usb_submit_urb(pegasus->tx_urb, GFP_ATOMIC))) {
netif_warn(pegasus, tx_err, net, "fail tx, %d\n", res);
switch (res) {
case -EPIPE: /* stall, or disconnect from TT */
/* cleanup should already have been scheduled */
break;
case -ENODEV: /* disconnect() upcoming */
case -EPERM:
netif_device_detach(pegasus->net);
break;
default:
pegasus->stats.tx_errors++;
netif_start_queue(net);
}
} else {
pegasus->stats.tx_packets++;
pegasus->stats.tx_bytes += skb->len;
}
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
static struct net_device_stats *pegasus_netdev_stats(struct net_device *dev)
{
return &((pegasus_t *) netdev_priv(dev))->stats;
}
static inline void disable_net_traffic(pegasus_t *pegasus)
{
__le16 tmp = cpu_to_le16(0);
set_registers(pegasus, EthCtrl0, sizeof(tmp), &tmp);
}
static inline void get_interrupt_interval(pegasus_t *pegasus)
{
u16 data;
u8 interval;
read_eprom_word(pegasus, 4, &data);
interval = data >> 8;
if (pegasus->usb->speed != USB_SPEED_HIGH) {
if (interval < 0x80) {
netif_info(pegasus, timer, pegasus->net,
"intr interval changed from %ums to %ums\n",
interval, 0x80);
interval = 0x80;
data = (data & 0x00FF) | ((u16)interval << 8);
#ifdef PEGASUS_WRITE_EEPROM
write_eprom_word(pegasus, 4, data);
#endif
}
}
pegasus->intr_interval = interval;
}
static void set_carrier(struct net_device *net)
{
pegasus_t *pegasus = netdev_priv(net);
u16 tmp;
if (read_mii_word(pegasus, pegasus->phy, MII_BMSR, &tmp))
return;
if (tmp & BMSR_LSTATUS)
netif_carrier_on(net);
else
netif_carrier_off(net);
}
static void free_all_urbs(pegasus_t *pegasus)
{
usb_free_urb(pegasus->intr_urb);
usb_free_urb(pegasus->tx_urb);
usb_free_urb(pegasus->rx_urb);
usb_free_urb(pegasus->ctrl_urb);
}
static void unlink_all_urbs(pegasus_t *pegasus)
{
usb_kill_urb(pegasus->intr_urb);
usb_kill_urb(pegasus->tx_urb);
usb_kill_urb(pegasus->rx_urb);
usb_kill_urb(pegasus->ctrl_urb);
}
static int alloc_urbs(pegasus_t *pegasus)
{
pegasus->ctrl_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!pegasus->ctrl_urb)
return 0;
pegasus->rx_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!pegasus->rx_urb) {
usb_free_urb(pegasus->ctrl_urb);
return 0;
}
pegasus->tx_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!pegasus->tx_urb) {
usb_free_urb(pegasus->rx_urb);
usb_free_urb(pegasus->ctrl_urb);
return 0;
}
pegasus->intr_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!pegasus->intr_urb) {
usb_free_urb(pegasus->tx_urb);
usb_free_urb(pegasus->rx_urb);
usb_free_urb(pegasus->ctrl_urb);
return 0;
}
return 1;
}
static int pegasus_open(struct net_device *net)
{
pegasus_t *pegasus = netdev_priv(net);
int res;
if (pegasus->rx_skb == NULL)
pegasus->rx_skb = pull_skb(pegasus);
/*
** Note: no point to free the pool. it is empty :-)
*/
if (!pegasus->rx_skb)
return -ENOMEM;
res = set_registers(pegasus, EthID, 6, net->dev_addr);
usb_fill_bulk_urb(pegasus->rx_urb, pegasus->usb,
usb_rcvbulkpipe(pegasus->usb, 1),
pegasus->rx_skb->data, PEGASUS_MTU + 8,
read_bulk_callback, pegasus);
if ((res = usb_submit_urb(pegasus->rx_urb, GFP_KERNEL))) {
if (res == -ENODEV)
netif_device_detach(pegasus->net);
netif_dbg(pegasus, ifup, net, "failed rx_urb, %d\n", res);
goto exit;
}
usb_fill_int_urb(pegasus->intr_urb, pegasus->usb,
usb_rcvintpipe(pegasus->usb, 3),
pegasus->intr_buff, sizeof(pegasus->intr_buff),
intr_callback, pegasus, pegasus->intr_interval);
if ((res = usb_submit_urb(pegasus->intr_urb, GFP_KERNEL))) {
if (res == -ENODEV)
netif_device_detach(pegasus->net);
netif_dbg(pegasus, ifup, net, "failed intr_urb, %d\n", res);
usb_kill_urb(pegasus->rx_urb);
goto exit;
}
if ((res = enable_net_traffic(net, pegasus->usb))) {
netif_dbg(pegasus, ifup, net,
"can't enable_net_traffic() - %d\n", res);
res = -EIO;
usb_kill_urb(pegasus->rx_urb);
usb_kill_urb(pegasus->intr_urb);
free_skb_pool(pegasus);
goto exit;
}
set_carrier(net);
netif_start_queue(net);
netif_dbg(pegasus, ifup, net, "open\n");
res = 0;
exit:
return res;
}
static int pegasus_close(struct net_device *net)
{
pegasus_t *pegasus = netdev_priv(net);
netif_stop_queue(net);
if (!(pegasus->flags & PEGASUS_UNPLUG))
disable_net_traffic(pegasus);
tasklet_kill(&pegasus->rx_tl);
unlink_all_urbs(pegasus);
return 0;
}
static void pegasus_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
pegasus_t *pegasus = netdev_priv(dev);
strncpy(info->driver, driver_name, sizeof(info->driver) - 1);
strncpy(info->version, DRIVER_VERSION, sizeof(info->version) - 1);
usb_make_path(pegasus->usb, info->bus_info, sizeof(info->bus_info));
}
/* also handles three patterns of some kind in hardware */
#define WOL_SUPPORTED (WAKE_MAGIC|WAKE_PHY)
static void
pegasus_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
pegasus_t *pegasus = netdev_priv(dev);
wol->supported = WAKE_MAGIC | WAKE_PHY;
wol->wolopts = pegasus->wolopts;
}
static int
pegasus_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
pegasus_t *pegasus = netdev_priv(dev);
u8 reg78 = 0x04;
if (wol->wolopts & ~WOL_SUPPORTED)
return -EINVAL;
if (wol->wolopts & WAKE_MAGIC)
reg78 |= 0x80;
if (wol->wolopts & WAKE_PHY)
reg78 |= 0x40;
/* FIXME this 0x10 bit still needs to get set in the chip... */
if (wol->wolopts)
pegasus->eth_regs[0] |= 0x10;
else
pegasus->eth_regs[0] &= ~0x10;
pegasus->wolopts = wol->wolopts;
return set_register(pegasus, WakeupControl, reg78);
}
static inline void pegasus_reset_wol(struct net_device *dev)
{
struct ethtool_wolinfo wol;
memset(&wol, 0, sizeof wol);
(void) pegasus_set_wol(dev, &wol);
}
static int
pegasus_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
{
pegasus_t *pegasus;
pegasus = netdev_priv(dev);
mii_ethtool_gset(&pegasus->mii, ecmd);
return 0;
}
static int
pegasus_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
{
pegasus_t *pegasus = netdev_priv(dev);
return mii_ethtool_sset(&pegasus->mii, ecmd);
}
static int pegasus_nway_reset(struct net_device *dev)
{
pegasus_t *pegasus = netdev_priv(dev);
return mii_nway_restart(&pegasus->mii);
}
static u32 pegasus_get_link(struct net_device *dev)
{
pegasus_t *pegasus = netdev_priv(dev);
return mii_link_ok(&pegasus->mii);
}
static u32 pegasus_get_msglevel(struct net_device *dev)
{
pegasus_t *pegasus = netdev_priv(dev);
return pegasus->msg_enable;
}
static void pegasus_set_msglevel(struct net_device *dev, u32 v)
{
pegasus_t *pegasus = netdev_priv(dev);
pegasus->msg_enable = v;
}
static const struct ethtool_ops ops = {
.get_drvinfo = pegasus_get_drvinfo,
.get_settings = pegasus_get_settings,
.set_settings = pegasus_set_settings,
.nway_reset = pegasus_nway_reset,
.get_link = pegasus_get_link,
.get_msglevel = pegasus_get_msglevel,
.set_msglevel = pegasus_set_msglevel,
.get_wol = pegasus_get_wol,
.set_wol = pegasus_set_wol,
};
static int pegasus_ioctl(struct net_device *net, struct ifreq *rq, int cmd)
{
__u16 *data = (__u16 *) &rq->ifr_ifru;
pegasus_t *pegasus = netdev_priv(net);
int res;
switch (cmd) {
case SIOCDEVPRIVATE:
data[0] = pegasus->phy;
case SIOCDEVPRIVATE + 1:
read_mii_word(pegasus, data[0], data[1] & 0x1f, &data[3]);
res = 0;
break;
case SIOCDEVPRIVATE + 2:
if (!capable(CAP_NET_ADMIN))
return -EPERM;
write_mii_word(pegasus, pegasus->phy, data[1] & 0x1f, data[2]);
res = 0;
break;
default:
res = -EOPNOTSUPP;
}
return res;
}
static void pegasus_set_multicast(struct net_device *net)
{
pegasus_t *pegasus = netdev_priv(net);
if (net->flags & IFF_PROMISC) {
pegasus->eth_regs[EthCtrl2] |= RX_PROMISCUOUS;
netif_info(pegasus, link, net, "Promiscuous mode enabled\n");
} else if (!netdev_mc_empty(net) || (net->flags & IFF_ALLMULTI)) {
pegasus->eth_regs[EthCtrl0] |= RX_MULTICAST;
pegasus->eth_regs[EthCtrl2] &= ~RX_PROMISCUOUS;
netif_dbg(pegasus, link, net, "set allmulti\n");
} else {
pegasus->eth_regs[EthCtrl0] &= ~RX_MULTICAST;
pegasus->eth_regs[EthCtrl2] &= ~RX_PROMISCUOUS;
}
pegasus->ctrl_urb->status = 0;
pegasus->flags |= ETH_REGS_CHANGE;
ctrl_callback(pegasus->ctrl_urb);
}
static __u8 mii_phy_probe(pegasus_t *pegasus)
{
int i;
__u16 tmp;
for (i = 0; i < 32; i++) {
read_mii_word(pegasus, i, MII_BMSR, &tmp);
if (tmp == 0 || tmp == 0xffff || (tmp & BMSR_MEDIA) == 0)
continue;
else
return i;
}
return 0xff;
}
static inline void setup_pegasus_II(pegasus_t *pegasus)
{
__u8 data = 0xa5;
set_register(pegasus, Reg1d, 0);
set_register(pegasus, Reg7b, 1);
mdelay(100);
if ((pegasus->features & HAS_HOME_PNA) && mii_mode)
set_register(pegasus, Reg7b, 0);
else
set_register(pegasus, Reg7b, 2);
set_register(pegasus, 0x83, data);
get_registers(pegasus, 0x83, 1, &data);
if (data == 0xa5)
pegasus->chip = 0x8513;
else
pegasus->chip = 0;
set_register(pegasus, 0x80, 0xc0);
set_register(pegasus, 0x83, 0xff);
set_register(pegasus, 0x84, 0x01);
if (pegasus->features & HAS_HOME_PNA && mii_mode)
set_register(pegasus, Reg81, 6);
else
set_register(pegasus, Reg81, 2);
}
static int pegasus_count;
static struct workqueue_struct *pegasus_workqueue;
#define CARRIER_CHECK_DELAY (2 * HZ)
static void check_carrier(struct work_struct *work)
{
pegasus_t *pegasus = container_of(work, pegasus_t, carrier_check.work);
set_carrier(pegasus->net);
if (!(pegasus->flags & PEGASUS_UNPLUG)) {
queue_delayed_work(pegasus_workqueue, &pegasus->carrier_check,
CARRIER_CHECK_DELAY);
}
}
static int pegasus_blacklisted(struct usb_device *udev)
{
struct usb_device_descriptor *udd = &udev->descriptor;
/* Special quirk to keep the driver from handling the Belkin Bluetooth
* dongle which happens to have the same ID.
*/
if ((udd->idVendor == cpu_to_le16(VENDOR_BELKIN)) &&
(udd->idProduct == cpu_to_le16(0x0121)) &&
(udd->bDeviceClass == USB_CLASS_WIRELESS_CONTROLLER) &&
(udd->bDeviceProtocol == 1))
return 1;
return 0;
}
/* we rely on probe() and remove() being serialized so we
* don't need extra locking on pegasus_count.
*/
static void pegasus_dec_workqueue(void)
{
pegasus_count--;
if (pegasus_count == 0) {
destroy_workqueue(pegasus_workqueue);
pegasus_workqueue = NULL;
}
}
static int pegasus_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
struct usb_device *dev = interface_to_usbdev(intf);
struct net_device *net;
pegasus_t *pegasus;
int dev_index = id - pegasus_ids;
int res = -ENOMEM;
if (pegasus_blacklisted(dev))
return -ENODEV;
if (pegasus_count == 0) {
pegasus_workqueue = create_singlethread_workqueue("pegasus");
if (!pegasus_workqueue)
return -ENOMEM;
}
pegasus_count++;
usb_get_dev(dev);
net = alloc_etherdev(sizeof(struct pegasus));
if (!net)
goto out;
pegasus = netdev_priv(net);
pegasus->dev_index = dev_index;
init_waitqueue_head(&pegasus->ctrl_wait);
if (!alloc_urbs(pegasus)) {
dev_err(&intf->dev, "can't allocate %s\n", "urbs");
goto out1;
}
tasklet_init(&pegasus->rx_tl, rx_fixup, (unsigned long) pegasus);
INIT_DELAYED_WORK(&pegasus->carrier_check, check_carrier);
pegasus->intf = intf;
pegasus->usb = dev;
pegasus->net = net;
net->watchdog_timeo = PEGASUS_TX_TIMEOUT;
net->netdev_ops = &pegasus_netdev_ops;
SET_ETHTOOL_OPS(net, &ops);
pegasus->mii.dev = net;
pegasus->mii.mdio_read = mdio_read;
pegasus->mii.mdio_write = mdio_write;
pegasus->mii.phy_id_mask = 0x1f;
pegasus->mii.reg_num_mask = 0x1f;
spin_lock_init(&pegasus->rx_pool_lock);
pegasus->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
| NETIF_MSG_PROBE | NETIF_MSG_LINK);
pegasus->features = usb_dev_id[dev_index].private;
get_interrupt_interval(pegasus);
if (reset_mac(pegasus)) {
dev_err(&intf->dev, "can't reset MAC\n");
res = -EIO;
goto out2;
}
set_ethernet_addr(pegasus);
fill_skb_pool(pegasus);
if (pegasus->features & PEGASUS_II) {
dev_info(&intf->dev, "setup Pegasus II specific registers\n");
setup_pegasus_II(pegasus);
}
pegasus->phy = mii_phy_probe(pegasus);
if (pegasus->phy == 0xff) {
dev_warn(&intf->dev, "can't locate MII phy, using default\n");
pegasus->phy = 1;
}
pegasus->mii.phy_id = pegasus->phy;
usb_set_intfdata(intf, pegasus);
SET_NETDEV_DEV(net, &intf->dev);
pegasus_reset_wol(net);
res = register_netdev(net);
if (res)
goto out3;
queue_delayed_work(pegasus_workqueue, &pegasus->carrier_check,
CARRIER_CHECK_DELAY);
dev_info(&intf->dev, "%s, %s, %pM\n",
net->name,
usb_dev_id[dev_index].name,
net->dev_addr);
return 0;
out3:
usb_set_intfdata(intf, NULL);
free_skb_pool(pegasus);
out2:
free_all_urbs(pegasus);
out1:
free_netdev(net);
out:
usb_put_dev(dev);
pegasus_dec_workqueue();
return res;
}
static void pegasus_disconnect(struct usb_interface *intf)
{
struct pegasus *pegasus = usb_get_intfdata(intf);
usb_set_intfdata(intf, NULL);
if (!pegasus) {
dev_dbg(&intf->dev, "unregistering non-bound device?\n");
return;
}
pegasus->flags |= PEGASUS_UNPLUG;
cancel_delayed_work(&pegasus->carrier_check);
unregister_netdev(pegasus->net);
usb_put_dev(interface_to_usbdev(intf));
unlink_all_urbs(pegasus);
free_all_urbs(pegasus);
free_skb_pool(pegasus);
if (pegasus->rx_skb != NULL) {
dev_kfree_skb(pegasus->rx_skb);
pegasus->rx_skb = NULL;
}
free_netdev(pegasus->net);
pegasus_dec_workqueue();
}
static int pegasus_suspend(struct usb_interface *intf, pm_message_t message)
{
struct pegasus *pegasus = usb_get_intfdata(intf);
netif_device_detach(pegasus->net);
cancel_delayed_work(&pegasus->carrier_check);
if (netif_running(pegasus->net)) {
usb_kill_urb(pegasus->rx_urb);
usb_kill_urb(pegasus->intr_urb);
}
return 0;
}
static int pegasus_resume(struct usb_interface *intf)
{
struct pegasus *pegasus = usb_get_intfdata(intf);
netif_device_attach(pegasus->net);
if (netif_running(pegasus->net)) {
pegasus->rx_urb->status = 0;
pegasus->rx_urb->actual_length = 0;
read_bulk_callback(pegasus->rx_urb);
pegasus->intr_urb->status = 0;
pegasus->intr_urb->actual_length = 0;
intr_callback(pegasus->intr_urb);
}
queue_delayed_work(pegasus_workqueue, &pegasus->carrier_check,
CARRIER_CHECK_DELAY);
return 0;
}
static const struct net_device_ops pegasus_netdev_ops = {
.ndo_open = pegasus_open,
.ndo_stop = pegasus_close,
.ndo_do_ioctl = pegasus_ioctl,
.ndo_start_xmit = pegasus_start_xmit,
.ndo_set_rx_mode = pegasus_set_multicast,
.ndo_get_stats = pegasus_netdev_stats,
.ndo_tx_timeout = pegasus_tx_timeout,
.ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
static struct usb_driver pegasus_driver = {
.name = driver_name,
.probe = pegasus_probe,
.disconnect = pegasus_disconnect,
.id_table = pegasus_ids,
.suspend = pegasus_suspend,
.resume = pegasus_resume,
};
static void __init parse_id(char *id)
{
unsigned int vendor_id = 0, device_id = 0, flags = 0, i = 0;
char *token, *name = NULL;
if ((token = strsep(&id, ":")) != NULL)
name = token;
/* name now points to a null terminated string*/
if ((token = strsep(&id, ":")) != NULL)
vendor_id = simple_strtoul(token, NULL, 16);
if ((token = strsep(&id, ":")) != NULL)
device_id = simple_strtoul(token, NULL, 16);
flags = simple_strtoul(id, NULL, 16);
pr_info("%s: new device %s, vendor ID 0x%04x, device ID 0x%04x, flags: 0x%x\n",
driver_name, name, vendor_id, device_id, flags);
if (vendor_id > 0x10000 || vendor_id == 0)
return;
if (device_id > 0x10000 || device_id == 0)
return;
for (i = 0; usb_dev_id[i].name; i++);
usb_dev_id[i].name = name;
usb_dev_id[i].vendor = vendor_id;
usb_dev_id[i].device = device_id;
usb_dev_id[i].private = flags;
pegasus_ids[i].match_flags = USB_DEVICE_ID_MATCH_DEVICE;
pegasus_ids[i].idVendor = vendor_id;
pegasus_ids[i].idProduct = device_id;
}
static int __init pegasus_init(void)
{
pr_info("%s: %s, " DRIVER_DESC "\n", driver_name, DRIVER_VERSION);
if (devid)
parse_id(devid);
return usb_register(&pegasus_driver);
}
static void __exit pegasus_exit(void)
{
usb_deregister(&pegasus_driver);
}
module_init(pegasus_init);
module_exit(pegasus_exit);
| gpl-2.0 |
yaymalaga/hellsPrime_kernel | arch/arm/mach-s5pv210/pm.c | 5058 | 4215 | /* linux/arch/arm/mach-s5pv210/pm.c
*
* Copyright (c) 2010 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* S5PV210 - Power Management support
*
* Based on arch/arm/mach-s3c2410/pm.c
* Copyright (c) 2006 Simtec Electronics
* Ben Dooks <ben@simtec.co.uk>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/init.h>
#include <linux/suspend.h>
#include <linux/syscore_ops.h>
#include <linux/io.h>
#include <plat/cpu.h>
#include <plat/pm.h>
#include <plat/regs-timer.h>
#include <mach/regs-irq.h>
#include <mach/regs-clock.h>
static struct sleep_save s5pv210_core_save[] = {
/* Clock source */
SAVE_ITEM(S5P_CLK_SRC0),
SAVE_ITEM(S5P_CLK_SRC1),
SAVE_ITEM(S5P_CLK_SRC2),
SAVE_ITEM(S5P_CLK_SRC3),
SAVE_ITEM(S5P_CLK_SRC4),
SAVE_ITEM(S5P_CLK_SRC5),
SAVE_ITEM(S5P_CLK_SRC6),
/* Clock source Mask */
SAVE_ITEM(S5P_CLK_SRC_MASK0),
SAVE_ITEM(S5P_CLK_SRC_MASK1),
/* Clock Divider */
SAVE_ITEM(S5P_CLK_DIV0),
SAVE_ITEM(S5P_CLK_DIV1),
SAVE_ITEM(S5P_CLK_DIV2),
SAVE_ITEM(S5P_CLK_DIV3),
SAVE_ITEM(S5P_CLK_DIV4),
SAVE_ITEM(S5P_CLK_DIV5),
SAVE_ITEM(S5P_CLK_DIV6),
SAVE_ITEM(S5P_CLK_DIV7),
/* Clock Main Gate */
SAVE_ITEM(S5P_CLKGATE_MAIN0),
SAVE_ITEM(S5P_CLKGATE_MAIN1),
SAVE_ITEM(S5P_CLKGATE_MAIN2),
/* Clock source Peri Gate */
SAVE_ITEM(S5P_CLKGATE_PERI0),
SAVE_ITEM(S5P_CLKGATE_PERI1),
/* Clock source SCLK Gate */
SAVE_ITEM(S5P_CLKGATE_SCLK0),
SAVE_ITEM(S5P_CLKGATE_SCLK1),
/* Clock IP Clock gate */
SAVE_ITEM(S5P_CLKGATE_IP0),
SAVE_ITEM(S5P_CLKGATE_IP1),
SAVE_ITEM(S5P_CLKGATE_IP2),
SAVE_ITEM(S5P_CLKGATE_IP3),
SAVE_ITEM(S5P_CLKGATE_IP4),
/* Clock Blcok and Bus gate */
SAVE_ITEM(S5P_CLKGATE_BLOCK),
SAVE_ITEM(S5P_CLKGATE_BUS0),
/* Clock ETC */
SAVE_ITEM(S5P_CLK_OUT),
SAVE_ITEM(S5P_MDNIE_SEL),
/* PWM Register */
SAVE_ITEM(S3C2410_TCFG0),
SAVE_ITEM(S3C2410_TCFG1),
SAVE_ITEM(S3C64XX_TINT_CSTAT),
SAVE_ITEM(S3C2410_TCON),
SAVE_ITEM(S3C2410_TCNTB(0)),
SAVE_ITEM(S3C2410_TCMPB(0)),
SAVE_ITEM(S3C2410_TCNTO(0)),
};
static int s5pv210_cpu_suspend(unsigned long arg)
{
unsigned long tmp;
/* issue the standby signal into the pm unit. Note, we
* issue a write-buffer drain just in case */
tmp = 0;
asm("b 1f\n\t"
".align 5\n\t"
"1:\n\t"
"mcr p15, 0, %0, c7, c10, 5\n\t"
"mcr p15, 0, %0, c7, c10, 4\n\t"
"wfi" : : "r" (tmp));
/* we should never get past here */
panic("sleep resumed to originator?");
}
static void s5pv210_pm_prepare(void)
{
unsigned int tmp;
/* ensure at least INFORM0 has the resume address */
__raw_writel(virt_to_phys(s3c_cpu_resume), S5P_INFORM0);
tmp = __raw_readl(S5P_SLEEP_CFG);
tmp &= ~(S5P_SLEEP_CFG_OSC_EN | S5P_SLEEP_CFG_USBOSC_EN);
__raw_writel(tmp, S5P_SLEEP_CFG);
/* WFI for SLEEP mode configuration by SYSCON */
tmp = __raw_readl(S5P_PWR_CFG);
tmp &= S5P_CFG_WFI_CLEAN;
tmp |= S5P_CFG_WFI_SLEEP;
__raw_writel(tmp, S5P_PWR_CFG);
/* SYSCON interrupt handling disable */
tmp = __raw_readl(S5P_OTHERS);
tmp |= S5P_OTHER_SYSC_INTOFF;
__raw_writel(tmp, S5P_OTHERS);
s3c_pm_do_save(s5pv210_core_save, ARRAY_SIZE(s5pv210_core_save));
}
static int s5pv210_pm_add(struct device *dev, struct subsys_interface *sif)
{
pm_cpu_prep = s5pv210_pm_prepare;
pm_cpu_sleep = s5pv210_cpu_suspend;
return 0;
}
static struct subsys_interface s5pv210_pm_interface = {
.name = "s5pv210_pm",
.subsys = &s5pv210_subsys,
.add_dev = s5pv210_pm_add,
};
static __init int s5pv210_pm_drvinit(void)
{
return subsys_interface_register(&s5pv210_pm_interface);
}
arch_initcall(s5pv210_pm_drvinit);
static void s5pv210_pm_resume(void)
{
u32 tmp;
tmp = __raw_readl(S5P_OTHERS);
tmp |= (S5P_OTHERS_RET_IO | S5P_OTHERS_RET_CF |\
S5P_OTHERS_RET_MMC | S5P_OTHERS_RET_UART);
__raw_writel(tmp , S5P_OTHERS);
s3c_pm_do_restore_core(s5pv210_core_save, ARRAY_SIZE(s5pv210_core_save));
}
static struct syscore_ops s5pv210_pm_syscore_ops = {
.resume = s5pv210_pm_resume,
};
static __init int s5pv210_pm_syscore_init(void)
{
register_syscore_ops(&s5pv210_pm_syscore_ops);
return 0;
}
arch_initcall(s5pv210_pm_syscore_init);
| gpl-2.0 |
dankocher/android_kernel_lge_w7ds | drivers/gpu/drm/udl/udl_encoder.c | 5314 | 1920 | /*
* Copyright (C) 2012 Red Hat
* based in parts on udlfb.c:
* Copyright (C) 2009 Roberto De Ioris <roberto@unbit.it>
* Copyright (C) 2009 Jaya Kumar <jayakumar.lkml@gmail.com>
* Copyright (C) 2009 Bernie Thompson <bernie@plugable.com>
*
* This file is subject to the terms and conditions of the GNU General Public
* License v2. See the file COPYING in the main directory of this archive for
* more details.
*/
#include "drmP.h"
#include "drm_crtc.h"
#include "drm_crtc_helper.h"
#include "udl_drv.h"
/* dummy encoder */
void udl_enc_destroy(struct drm_encoder *encoder)
{
drm_encoder_cleanup(encoder);
kfree(encoder);
}
static void udl_encoder_disable(struct drm_encoder *encoder)
{
}
static bool udl_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
return true;
}
static void udl_encoder_prepare(struct drm_encoder *encoder)
{
}
static void udl_encoder_commit(struct drm_encoder *encoder)
{
}
static void udl_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
}
static void
udl_encoder_dpms(struct drm_encoder *encoder, int mode)
{
}
static const struct drm_encoder_helper_funcs udl_helper_funcs = {
.dpms = udl_encoder_dpms,
.mode_fixup = udl_mode_fixup,
.prepare = udl_encoder_prepare,
.mode_set = udl_encoder_mode_set,
.commit = udl_encoder_commit,
.disable = udl_encoder_disable,
};
static const struct drm_encoder_funcs udl_enc_funcs = {
.destroy = udl_enc_destroy,
};
struct drm_encoder *udl_encoder_init(struct drm_device *dev)
{
struct drm_encoder *encoder;
encoder = kzalloc(sizeof(struct drm_encoder), GFP_KERNEL);
if (!encoder)
return NULL;
drm_encoder_init(dev, encoder, &udl_enc_funcs, DRM_MODE_ENCODER_TMDS);
drm_encoder_helper_add(encoder, &udl_helper_funcs);
encoder->possible_crtcs = 1;
return encoder;
}
| gpl-2.0 |
Yzoni/android_kernel_google_msm | arch/arm/mach-gemini/board-rut1xx.c | 5314 | 2066 | /*
* Support for Teltonika RUT1xx
*
* Copyright (C) 2008-2009 Paulius Zaleckas <paulius.zaleckas@teltonika.lt>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/leds.h>
#include <linux/input.h>
#include <linux/gpio_keys.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/mach/time.h>
#include "common.h"
static struct gpio_keys_button rut1xx_keys[] = {
{
.code = KEY_SETUP,
.gpio = 60,
.active_low = 1,
.desc = "Reset to defaults",
.type = EV_KEY,
},
};
static struct gpio_keys_platform_data rut1xx_keys_data = {
.buttons = rut1xx_keys,
.nbuttons = ARRAY_SIZE(rut1xx_keys),
};
static struct platform_device rut1xx_keys_device = {
.name = "gpio-keys",
.id = -1,
.dev = {
.platform_data = &rut1xx_keys_data,
},
};
static struct gpio_led rut100_leds[] = {
{
.name = "Power",
.default_trigger = "heartbeat",
.gpio = 17,
},
{
.name = "GSM",
.default_trigger = "default-on",
.gpio = 7,
.active_low = 1,
},
};
static struct gpio_led_platform_data rut100_leds_data = {
.num_leds = ARRAY_SIZE(rut100_leds),
.leds = rut100_leds,
};
static struct platform_device rut1xx_leds = {
.name = "leds-gpio",
.id = -1,
.dev = {
.platform_data = &rut100_leds_data,
},
};
static struct sys_timer rut1xx_timer = {
.init = gemini_timer_init,
};
static void __init rut1xx_init(void)
{
gemini_gpio_init();
platform_register_uart();
platform_register_pflash(SZ_8M, NULL, 0);
platform_device_register(&rut1xx_leds);
platform_device_register(&rut1xx_keys_device);
platform_register_rtc();
}
MACHINE_START(RUT100, "Teltonika RUT100")
.atag_offset = 0x100,
.map_io = gemini_map_io,
.init_irq = gemini_init_irq,
.timer = &rut1xx_timer,
.init_machine = rut1xx_init,
MACHINE_END
| gpl-2.0 |
drewis/android_kernel_grouper | drivers/video/hpfb.c | 8130 | 11168 | /*
* HP300 Topcat framebuffer support (derived from macfb of all things)
* Phil Blundell <philb@gnu.org> 1998
* DIO-II, colour map and Catseye support by
* Kars de Jong <jongk@linux-m68k.org>, May 2004.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/fb.h>
#include <linux/dio.h>
#include <asm/io.h>
#include <asm/uaccess.h>
static struct fb_info fb_info = {
.fix = {
.id = "HP300 ",
.type = FB_TYPE_PACKED_PIXELS,
.visual = FB_VISUAL_PSEUDOCOLOR,
.accel = FB_ACCEL_NONE,
}
};
static unsigned long fb_regs;
static unsigned char fb_bitmask;
#define TC_NBLANK 0x4080
#define TC_WEN 0x4088
#define TC_REN 0x408c
#define TC_FBEN 0x4090
#define TC_PRR 0x40ea
/* These defines match the X window system */
#define RR_CLEAR 0x0
#define RR_COPY 0x3
#define RR_NOOP 0x5
#define RR_XOR 0x6
#define RR_INVERT 0xa
#define RR_COPYINVERTED 0xc
#define RR_SET 0xf
/* blitter regs */
#define BUSY 0x4044
#define WMRR 0x40ef
#define SOURCE_X 0x40f2
#define SOURCE_Y 0x40f6
#define DEST_X 0x40fa
#define DEST_Y 0x40fe
#define WHEIGHT 0x4106
#define WWIDTH 0x4102
#define WMOVE 0x409c
static struct fb_var_screeninfo hpfb_defined = {
.red = {
.length = 8,
},
.green = {
.length = 8,
},
.blue = {
.length = 8,
},
.activate = FB_ACTIVATE_NOW,
.height = -1,
.width = -1,
.vmode = FB_VMODE_NONINTERLACED,
};
static int hpfb_setcolreg(unsigned regno, unsigned red, unsigned green,
unsigned blue, unsigned transp,
struct fb_info *info)
{
/* use MSBs */
unsigned char _red =red>>8;
unsigned char _green=green>>8;
unsigned char _blue =blue>>8;
unsigned char _regno=regno;
/*
* Set a single color register. The values supplied are
* already rounded down to the hardware's capabilities
* (according to the entries in the `var' structure). Return
* != 0 for invalid regno.
*/
if (regno >= info->cmap.len)
return 1;
while (in_be16(fb_regs + 0x6002) & 0x4) udelay(1);
out_be16(fb_regs + 0x60ba, 0xff);
out_be16(fb_regs + 0x60b2, _red);
out_be16(fb_regs + 0x60b4, _green);
out_be16(fb_regs + 0x60b6, _blue);
out_be16(fb_regs + 0x60b8, ~_regno);
out_be16(fb_regs + 0x60f0, 0xff);
udelay(100);
while (in_be16(fb_regs + 0x6002) & 0x4) udelay(1);
out_be16(fb_regs + 0x60b2, 0);
out_be16(fb_regs + 0x60b4, 0);
out_be16(fb_regs + 0x60b6, 0);
out_be16(fb_regs + 0x60b8, 0);
return 0;
}
/* 0 unblank, 1 blank, 2 no vsync, 3 no hsync, 4 off */
static int hpfb_blank(int blank, struct fb_info *info)
{
out_8(fb_regs + TC_NBLANK, (blank ? 0x00 : fb_bitmask));
return 0;
}
static void topcat_blit(int x0, int y0, int x1, int y1, int w, int h, int rr)
{
if (rr >= 0) {
while (in_8(fb_regs + BUSY) & fb_bitmask)
;
}
out_8(fb_regs + TC_FBEN, fb_bitmask);
if (rr >= 0) {
out_8(fb_regs + TC_WEN, fb_bitmask);
out_8(fb_regs + WMRR, rr);
}
out_be16(fb_regs + SOURCE_X, x0);
out_be16(fb_regs + SOURCE_Y, y0);
out_be16(fb_regs + DEST_X, x1);
out_be16(fb_regs + DEST_Y, y1);
out_be16(fb_regs + WWIDTH, w);
out_be16(fb_regs + WHEIGHT, h);
out_8(fb_regs + WMOVE, fb_bitmask);
}
static void hpfb_copyarea(struct fb_info *info, const struct fb_copyarea *area)
{
topcat_blit(area->sx, area->sy, area->dx, area->dy, area->width, area->height, RR_COPY);
}
static void hpfb_fillrect(struct fb_info *p, const struct fb_fillrect *region)
{
u8 clr;
clr = region->color & 0xff;
while (in_8(fb_regs + BUSY) & fb_bitmask)
;
/* Foreground */
out_8(fb_regs + TC_WEN, fb_bitmask & clr);
out_8(fb_regs + WMRR, (region->rop == ROP_COPY ? RR_SET : RR_INVERT));
/* Background */
out_8(fb_regs + TC_WEN, fb_bitmask & ~clr);
out_8(fb_regs + WMRR, (region->rop == ROP_COPY ? RR_CLEAR : RR_NOOP));
topcat_blit(region->dx, region->dy, region->dx, region->dy, region->width, region->height, -1);
}
static int hpfb_sync(struct fb_info *info)
{
/*
* Since we also access the framebuffer directly, we have to wait
* until the block mover is finished
*/
while (in_8(fb_regs + BUSY) & fb_bitmask)
;
out_8(fb_regs + TC_WEN, fb_bitmask);
out_8(fb_regs + TC_PRR, RR_COPY);
out_8(fb_regs + TC_FBEN, fb_bitmask);
return 0;
}
static struct fb_ops hpfb_ops = {
.owner = THIS_MODULE,
.fb_setcolreg = hpfb_setcolreg,
.fb_blank = hpfb_blank,
.fb_fillrect = hpfb_fillrect,
.fb_copyarea = hpfb_copyarea,
.fb_imageblit = cfb_imageblit,
.fb_sync = hpfb_sync,
};
/* Common to all HP framebuffers */
#define HPFB_FBWMSB 0x05 /* Frame buffer width */
#define HPFB_FBWLSB 0x07
#define HPFB_FBHMSB 0x09 /* Frame buffer height */
#define HPFB_FBHLSB 0x0b
#define HPFB_DWMSB 0x0d /* Display width */
#define HPFB_DWLSB 0x0f
#define HPFB_DHMSB 0x11 /* Display height */
#define HPFB_DHLSB 0x13
#define HPFB_NUMPLANES 0x5b /* Number of colour planes */
#define HPFB_FBOMSB 0x5d /* Frame buffer offset */
#define HPFB_FBOLSB 0x5f
static int __devinit hpfb_init_one(unsigned long phys_base,
unsigned long virt_base)
{
unsigned long fboff, fb_width, fb_height, fb_start;
fb_regs = virt_base;
fboff = (in_8(fb_regs + HPFB_FBOMSB) << 8) | in_8(fb_regs + HPFB_FBOLSB);
fb_info.fix.smem_start = (in_8(fb_regs + fboff) << 16);
if (phys_base >= DIOII_BASE) {
fb_info.fix.smem_start += phys_base;
}
if (DIO_SECID(fb_regs) != DIO_ID2_TOPCAT) {
/* This is the magic incantation the HP X server uses to make Catseye boards work. */
while (in_be16(fb_regs+0x4800) & 1)
;
out_be16(fb_regs+0x4800, 0); /* Catseye status */
out_be16(fb_regs+0x4510, 0); /* VB */
out_be16(fb_regs+0x4512, 0); /* TCNTRL */
out_be16(fb_regs+0x4514, 0); /* ACNTRL */
out_be16(fb_regs+0x4516, 0); /* PNCNTRL */
out_be16(fb_regs+0x4206, 0x90); /* RUG Command/Status */
out_be16(fb_regs+0x60a2, 0); /* Overlay Mask */
out_be16(fb_regs+0x60bc, 0); /* Ram Select */
}
/*
* Fill in the available video resolution
*/
fb_width = (in_8(fb_regs + HPFB_FBWMSB) << 8) | in_8(fb_regs + HPFB_FBWLSB);
fb_info.fix.line_length = fb_width;
fb_height = (in_8(fb_regs + HPFB_FBHMSB) << 8) | in_8(fb_regs + HPFB_FBHLSB);
fb_info.fix.smem_len = fb_width * fb_height;
fb_start = (unsigned long)ioremap_writethrough(fb_info.fix.smem_start,
fb_info.fix.smem_len);
hpfb_defined.xres = (in_8(fb_regs + HPFB_DWMSB) << 8) | in_8(fb_regs + HPFB_DWLSB);
hpfb_defined.yres = (in_8(fb_regs + HPFB_DHMSB) << 8) | in_8(fb_regs + HPFB_DHLSB);
hpfb_defined.xres_virtual = hpfb_defined.xres;
hpfb_defined.yres_virtual = hpfb_defined.yres;
hpfb_defined.bits_per_pixel = in_8(fb_regs + HPFB_NUMPLANES);
printk(KERN_INFO "hpfb: framebuffer at 0x%lx, mapped to 0x%lx, size %dk\n",
fb_info.fix.smem_start, fb_start, fb_info.fix.smem_len/1024);
printk(KERN_INFO "hpfb: mode is %dx%dx%d, linelength=%d\n",
hpfb_defined.xres, hpfb_defined.yres, hpfb_defined.bits_per_pixel, fb_info.fix.line_length);
/*
* Give the hardware a bit of a prod and work out how many bits per
* pixel are supported.
*/
out_8(fb_regs + TC_WEN, 0xff);
out_8(fb_regs + TC_PRR, RR_COPY);
out_8(fb_regs + TC_FBEN, 0xff);
out_8(fb_start, 0xff);
fb_bitmask = in_8(fb_start);
out_8(fb_start, 0);
/*
* Enable reading/writing of all the planes.
*/
out_8(fb_regs + TC_WEN, fb_bitmask);
out_8(fb_regs + TC_PRR, RR_COPY);
out_8(fb_regs + TC_REN, fb_bitmask);
out_8(fb_regs + TC_FBEN, fb_bitmask);
/*
* Clear the screen.
*/
topcat_blit(0, 0, 0, 0, fb_width, fb_height, RR_CLEAR);
/*
* Let there be consoles..
*/
if (DIO_SECID(fb_regs) == DIO_ID2_TOPCAT)
strcat(fb_info.fix.id, "Topcat");
else
strcat(fb_info.fix.id, "Catseye");
fb_info.fbops = &hpfb_ops;
fb_info.flags = FBINFO_DEFAULT;
fb_info.var = hpfb_defined;
fb_info.screen_base = (char *)fb_start;
fb_alloc_cmap(&fb_info.cmap, 1 << hpfb_defined.bits_per_pixel, 0);
if (register_framebuffer(&fb_info) < 0) {
fb_dealloc_cmap(&fb_info.cmap);
iounmap(fb_info.screen_base);
fb_info.screen_base = NULL;
return 1;
}
printk(KERN_INFO "fb%d: %s frame buffer device\n",
fb_info.node, fb_info.fix.id);
return 0;
}
/*
* Check that the secondary ID indicates that we have some hope of working with this
* framebuffer. The catseye boards are pretty much like topcats and we can muddle through.
*/
#define topcat_sid_ok(x) (((x) == DIO_ID2_LRCATSEYE) || ((x) == DIO_ID2_HRCCATSEYE) \
|| ((x) == DIO_ID2_HRMCATSEYE) || ((x) == DIO_ID2_TOPCAT))
/*
* Initialise the framebuffer
*/
static int __devinit hpfb_dio_probe(struct dio_dev * d, const struct dio_device_id * ent)
{
unsigned long paddr, vaddr;
paddr = d->resource.start;
if (!request_mem_region(d->resource.start, resource_size(&d->resource), d->name))
return -EBUSY;
if (d->scode >= DIOII_SCBASE) {
vaddr = (unsigned long)ioremap(paddr, resource_size(&d->resource));
} else {
vaddr = paddr + DIO_VIRADDRBASE;
}
printk(KERN_INFO "Topcat found at DIO select code %d "
"(secondary id %02x)\n", d->scode, (d->id >> 8) & 0xff);
if (hpfb_init_one(paddr, vaddr)) {
if (d->scode >= DIOII_SCBASE)
iounmap((void *)vaddr);
return -ENOMEM;
}
return 0;
}
static void __devexit hpfb_remove_one(struct dio_dev *d)
{
unregister_framebuffer(&fb_info);
if (d->scode >= DIOII_SCBASE)
iounmap((void *)fb_regs);
release_mem_region(d->resource.start, resource_size(&d->resource));
}
static struct dio_device_id hpfb_dio_tbl[] = {
{ DIO_ENCODE_ID(DIO_ID_FBUFFER, DIO_ID2_LRCATSEYE) },
{ DIO_ENCODE_ID(DIO_ID_FBUFFER, DIO_ID2_HRCCATSEYE) },
{ DIO_ENCODE_ID(DIO_ID_FBUFFER, DIO_ID2_HRMCATSEYE) },
{ DIO_ENCODE_ID(DIO_ID_FBUFFER, DIO_ID2_TOPCAT) },
{ 0 }
};
static struct dio_driver hpfb_driver = {
.name = "hpfb",
.id_table = hpfb_dio_tbl,
.probe = hpfb_dio_probe,
.remove = __devexit_p(hpfb_remove_one),
};
int __init hpfb_init(void)
{
unsigned int sid;
mm_segment_t fs;
unsigned char i;
int err;
/* Topcats can be on the internal IO bus or real DIO devices.
* The internal variant sits at 0x560000; it has primary
* and secondary ID registers just like the DIO version.
* So we merge the two detection routines.
*
* Perhaps this #define should be in a global header file:
* I believe it's common to all internal fbs, not just topcat.
*/
#define INTFBVADDR 0xf0560000
#define INTFBPADDR 0x560000
if (!MACH_IS_HP300)
return -ENODEV;
if (fb_get_options("hpfb", NULL))
return -ENODEV;
err = dio_register_driver(&hpfb_driver);
if (err)
return err;
fs = get_fs();
set_fs(KERNEL_DS);
err = get_user(i, (unsigned char *)INTFBVADDR + DIO_IDOFF);
set_fs(fs);
if (!err && (i == DIO_ID_FBUFFER) && topcat_sid_ok(sid = DIO_SECID(INTFBVADDR))) {
if (!request_mem_region(INTFBPADDR, DIO_DEVSIZE, "Internal Topcat"))
return -EBUSY;
printk(KERN_INFO "Internal Topcat found (secondary id %02x)\n", sid);
if (hpfb_init_one(INTFBPADDR, INTFBVADDR)) {
return -ENOMEM;
}
}
return 0;
}
void __exit hpfb_cleanup_module(void)
{
dio_unregister_driver(&hpfb_driver);
}
module_init(hpfb_init);
module_exit(hpfb_cleanup_module);
MODULE_LICENSE("GPL");
| gpl-2.0 |
pranav01/android_kernel_cyanogen_msm8916-1 | arch/cris/arch-v32/mach-fs/pinmux.c | 11714 | 7770 | /*
* Allocator for I/O pins. All pins are allocated to GPIO at bootup.
* Unassigned pins and GPIO pins can be allocated to a fixed interface
* or the I/O processor instead.
*
* Copyright (c) 2004-2007 Axis Communications AB.
*/
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/spinlock.h>
#include <hwregs/reg_map.h>
#include <hwregs/reg_rdwr.h>
#include <pinmux.h>
#include <hwregs/pinmux_defs.h>
#undef DEBUG
#define PORT_PINS 18
#define PORTS 4
static char pins[PORTS][PORT_PINS];
static DEFINE_SPINLOCK(pinmux_lock);
static void crisv32_pinmux_set(int port);
int crisv32_pinmux_init(void)
{
static int initialized;
if (!initialized) {
reg_pinmux_rw_pa pa = REG_RD(pinmux, regi_pinmux, rw_pa);
initialized = 1;
REG_WR_INT(pinmux, regi_pinmux, rw_hwprot, 0);
pa.pa0 = pa.pa1 = pa.pa2 = pa.pa3 =
pa.pa4 = pa.pa5 = pa.pa6 = pa.pa7 = regk_pinmux_yes;
REG_WR(pinmux, regi_pinmux, rw_pa, pa);
crisv32_pinmux_alloc(PORT_B, 0, PORT_PINS - 1, pinmux_gpio);
crisv32_pinmux_alloc(PORT_C, 0, PORT_PINS - 1, pinmux_gpio);
crisv32_pinmux_alloc(PORT_D, 0, PORT_PINS - 1, pinmux_gpio);
crisv32_pinmux_alloc(PORT_E, 0, PORT_PINS - 1, pinmux_gpio);
}
return 0;
}
int
crisv32_pinmux_alloc(int port, int first_pin, int last_pin, enum pin_mode mode)
{
int i;
unsigned long flags;
crisv32_pinmux_init();
if (port > PORTS || port < 0)
return -EINVAL;
spin_lock_irqsave(&pinmux_lock, flags);
for (i = first_pin; i <= last_pin; i++) {
if ((pins[port][i] != pinmux_none)
&& (pins[port][i] != pinmux_gpio)
&& (pins[port][i] != mode)) {
spin_unlock_irqrestore(&pinmux_lock, flags);
#ifdef DEBUG
panic("Pinmux alloc failed!\n");
#endif
return -EPERM;
}
}
for (i = first_pin; i <= last_pin; i++)
pins[port][i] = mode;
crisv32_pinmux_set(port);
spin_unlock_irqrestore(&pinmux_lock, flags);
return 0;
}
int crisv32_pinmux_alloc_fixed(enum fixed_function function)
{
int ret = -EINVAL;
char saved[sizeof pins];
unsigned long flags;
spin_lock_irqsave(&pinmux_lock, flags);
/* Save internal data for recovery */
memcpy(saved, pins, sizeof pins);
crisv32_pinmux_init(); /* Must be done before we read rw_hwprot */
reg_pinmux_rw_hwprot hwprot = REG_RD(pinmux, regi_pinmux, rw_hwprot);
switch (function) {
case pinmux_ser1:
ret = crisv32_pinmux_alloc(PORT_C, 4, 7, pinmux_fixed);
hwprot.ser1 = regk_pinmux_yes;
break;
case pinmux_ser2:
ret = crisv32_pinmux_alloc(PORT_C, 8, 11, pinmux_fixed);
hwprot.ser2 = regk_pinmux_yes;
break;
case pinmux_ser3:
ret = crisv32_pinmux_alloc(PORT_C, 12, 15, pinmux_fixed);
hwprot.ser3 = regk_pinmux_yes;
break;
case pinmux_sser0:
ret = crisv32_pinmux_alloc(PORT_C, 0, 3, pinmux_fixed);
ret |= crisv32_pinmux_alloc(PORT_C, 16, 16, pinmux_fixed);
hwprot.sser0 = regk_pinmux_yes;
break;
case pinmux_sser1:
ret = crisv32_pinmux_alloc(PORT_D, 0, 4, pinmux_fixed);
hwprot.sser1 = regk_pinmux_yes;
break;
case pinmux_ata0:
ret = crisv32_pinmux_alloc(PORT_D, 5, 7, pinmux_fixed);
ret |= crisv32_pinmux_alloc(PORT_D, 15, 17, pinmux_fixed);
hwprot.ata0 = regk_pinmux_yes;
break;
case pinmux_ata1:
ret = crisv32_pinmux_alloc(PORT_D, 0, 4, pinmux_fixed);
ret |= crisv32_pinmux_alloc(PORT_E, 17, 17, pinmux_fixed);
hwprot.ata1 = regk_pinmux_yes;
break;
case pinmux_ata2:
ret = crisv32_pinmux_alloc(PORT_C, 11, 15, pinmux_fixed);
ret |= crisv32_pinmux_alloc(PORT_E, 3, 3, pinmux_fixed);
hwprot.ata2 = regk_pinmux_yes;
break;
case pinmux_ata3:
ret = crisv32_pinmux_alloc(PORT_C, 8, 10, pinmux_fixed);
ret |= crisv32_pinmux_alloc(PORT_C, 0, 2, pinmux_fixed);
hwprot.ata2 = regk_pinmux_yes;
break;
case pinmux_ata:
ret = crisv32_pinmux_alloc(PORT_B, 0, 15, pinmux_fixed);
ret |= crisv32_pinmux_alloc(PORT_D, 8, 15, pinmux_fixed);
hwprot.ata = regk_pinmux_yes;
break;
case pinmux_eth1:
ret = crisv32_pinmux_alloc(PORT_E, 0, 17, pinmux_fixed);
hwprot.eth1 = regk_pinmux_yes;
hwprot.eth1_mgm = regk_pinmux_yes;
break;
case pinmux_timer:
ret = crisv32_pinmux_alloc(PORT_C, 16, 16, pinmux_fixed);
hwprot.timer = regk_pinmux_yes;
spin_unlock_irqrestore(&pinmux_lock, flags);
return ret;
}
if (!ret)
REG_WR(pinmux, regi_pinmux, rw_hwprot, hwprot);
else
memcpy(pins, saved, sizeof pins);
spin_unlock_irqrestore(&pinmux_lock, flags);
return ret;
}
void crisv32_pinmux_set(int port)
{
int i;
int gpio_val = 0;
int iop_val = 0;
for (i = 0; i < PORT_PINS; i++) {
if (pins[port][i] == pinmux_gpio)
gpio_val |= (1 << i);
else if (pins[port][i] == pinmux_iop)
iop_val |= (1 << i);
}
REG_WRITE(int, regi_pinmux + REG_RD_ADDR_pinmux_rw_pb_gio + 8 * port,
gpio_val);
REG_WRITE(int, regi_pinmux + REG_RD_ADDR_pinmux_rw_pb_iop + 8 * port,
iop_val);
#ifdef DEBUG
crisv32_pinmux_dump();
#endif
}
int crisv32_pinmux_dealloc(int port, int first_pin, int last_pin)
{
int i;
unsigned long flags;
crisv32_pinmux_init();
if (port > PORTS || port < 0)
return -EINVAL;
spin_lock_irqsave(&pinmux_lock, flags);
for (i = first_pin; i <= last_pin; i++)
pins[port][i] = pinmux_none;
crisv32_pinmux_set(port);
spin_unlock_irqrestore(&pinmux_lock, flags);
return 0;
}
int crisv32_pinmux_dealloc_fixed(enum fixed_function function)
{
int ret = -EINVAL;
char saved[sizeof pins];
unsigned long flags;
spin_lock_irqsave(&pinmux_lock, flags);
/* Save internal data for recovery */
memcpy(saved, pins, sizeof pins);
crisv32_pinmux_init(); /* Must be done before we read rw_hwprot */
reg_pinmux_rw_hwprot hwprot = REG_RD(pinmux, regi_pinmux, rw_hwprot);
switch (function) {
case pinmux_ser1:
ret = crisv32_pinmux_dealloc(PORT_C, 4, 7);
hwprot.ser1 = regk_pinmux_no;
break;
case pinmux_ser2:
ret = crisv32_pinmux_dealloc(PORT_C, 8, 11);
hwprot.ser2 = regk_pinmux_no;
break;
case pinmux_ser3:
ret = crisv32_pinmux_dealloc(PORT_C, 12, 15);
hwprot.ser3 = regk_pinmux_no;
break;
case pinmux_sser0:
ret = crisv32_pinmux_dealloc(PORT_C, 0, 3);
ret |= crisv32_pinmux_dealloc(PORT_C, 16, 16);
hwprot.sser0 = regk_pinmux_no;
break;
case pinmux_sser1:
ret = crisv32_pinmux_dealloc(PORT_D, 0, 4);
hwprot.sser1 = regk_pinmux_no;
break;
case pinmux_ata0:
ret = crisv32_pinmux_dealloc(PORT_D, 5, 7);
ret |= crisv32_pinmux_dealloc(PORT_D, 15, 17);
hwprot.ata0 = regk_pinmux_no;
break;
case pinmux_ata1:
ret = crisv32_pinmux_dealloc(PORT_D, 0, 4);
ret |= crisv32_pinmux_dealloc(PORT_E, 17, 17);
hwprot.ata1 = regk_pinmux_no;
break;
case pinmux_ata2:
ret = crisv32_pinmux_dealloc(PORT_C, 11, 15);
ret |= crisv32_pinmux_dealloc(PORT_E, 3, 3);
hwprot.ata2 = regk_pinmux_no;
break;
case pinmux_ata3:
ret = crisv32_pinmux_dealloc(PORT_C, 8, 10);
ret |= crisv32_pinmux_dealloc(PORT_C, 0, 2);
hwprot.ata2 = regk_pinmux_no;
break;
case pinmux_ata:
ret = crisv32_pinmux_dealloc(PORT_B, 0, 15);
ret |= crisv32_pinmux_dealloc(PORT_D, 8, 15);
hwprot.ata = regk_pinmux_no;
break;
case pinmux_eth1:
ret = crisv32_pinmux_dealloc(PORT_E, 0, 17);
hwprot.eth1 = regk_pinmux_no;
hwprot.eth1_mgm = regk_pinmux_no;
break;
case pinmux_timer:
ret = crisv32_pinmux_dealloc(PORT_C, 16, 16);
hwprot.timer = regk_pinmux_no;
spin_unlock_irqrestore(&pinmux_lock, flags);
return ret;
}
if (!ret)
REG_WR(pinmux, regi_pinmux, rw_hwprot, hwprot);
else
memcpy(pins, saved, sizeof pins);
spin_unlock_irqrestore(&pinmux_lock, flags);
return ret;
}
void crisv32_pinmux_dump(void)
{
int i, j;
crisv32_pinmux_init();
for (i = 0; i < PORTS; i++) {
printk(KERN_DEBUG "Port %c\n", 'B' + i);
for (j = 0; j < PORT_PINS; j++)
printk(KERN_DEBUG " Pin %d = %d\n", j, pins[i][j]);
}
}
__initcall(crisv32_pinmux_init);
| gpl-2.0 |
calixtolinux/Calixto-AM335x-Linux3.2-Versa-EVM-V1 | arch/cris/arch-v32/mach-fs/pinmux.c | 11714 | 7770 | /*
* Allocator for I/O pins. All pins are allocated to GPIO at bootup.
* Unassigned pins and GPIO pins can be allocated to a fixed interface
* or the I/O processor instead.
*
* Copyright (c) 2004-2007 Axis Communications AB.
*/
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/spinlock.h>
#include <hwregs/reg_map.h>
#include <hwregs/reg_rdwr.h>
#include <pinmux.h>
#include <hwregs/pinmux_defs.h>
#undef DEBUG
#define PORT_PINS 18
#define PORTS 4
static char pins[PORTS][PORT_PINS];
static DEFINE_SPINLOCK(pinmux_lock);
static void crisv32_pinmux_set(int port);
int crisv32_pinmux_init(void)
{
static int initialized;
if (!initialized) {
reg_pinmux_rw_pa pa = REG_RD(pinmux, regi_pinmux, rw_pa);
initialized = 1;
REG_WR_INT(pinmux, regi_pinmux, rw_hwprot, 0);
pa.pa0 = pa.pa1 = pa.pa2 = pa.pa3 =
pa.pa4 = pa.pa5 = pa.pa6 = pa.pa7 = regk_pinmux_yes;
REG_WR(pinmux, regi_pinmux, rw_pa, pa);
crisv32_pinmux_alloc(PORT_B, 0, PORT_PINS - 1, pinmux_gpio);
crisv32_pinmux_alloc(PORT_C, 0, PORT_PINS - 1, pinmux_gpio);
crisv32_pinmux_alloc(PORT_D, 0, PORT_PINS - 1, pinmux_gpio);
crisv32_pinmux_alloc(PORT_E, 0, PORT_PINS - 1, pinmux_gpio);
}
return 0;
}
int
crisv32_pinmux_alloc(int port, int first_pin, int last_pin, enum pin_mode mode)
{
int i;
unsigned long flags;
crisv32_pinmux_init();
if (port > PORTS || port < 0)
return -EINVAL;
spin_lock_irqsave(&pinmux_lock, flags);
for (i = first_pin; i <= last_pin; i++) {
if ((pins[port][i] != pinmux_none)
&& (pins[port][i] != pinmux_gpio)
&& (pins[port][i] != mode)) {
spin_unlock_irqrestore(&pinmux_lock, flags);
#ifdef DEBUG
panic("Pinmux alloc failed!\n");
#endif
return -EPERM;
}
}
for (i = first_pin; i <= last_pin; i++)
pins[port][i] = mode;
crisv32_pinmux_set(port);
spin_unlock_irqrestore(&pinmux_lock, flags);
return 0;
}
int crisv32_pinmux_alloc_fixed(enum fixed_function function)
{
int ret = -EINVAL;
char saved[sizeof pins];
unsigned long flags;
spin_lock_irqsave(&pinmux_lock, flags);
/* Save internal data for recovery */
memcpy(saved, pins, sizeof pins);
crisv32_pinmux_init(); /* Must be done before we read rw_hwprot */
reg_pinmux_rw_hwprot hwprot = REG_RD(pinmux, regi_pinmux, rw_hwprot);
switch (function) {
case pinmux_ser1:
ret = crisv32_pinmux_alloc(PORT_C, 4, 7, pinmux_fixed);
hwprot.ser1 = regk_pinmux_yes;
break;
case pinmux_ser2:
ret = crisv32_pinmux_alloc(PORT_C, 8, 11, pinmux_fixed);
hwprot.ser2 = regk_pinmux_yes;
break;
case pinmux_ser3:
ret = crisv32_pinmux_alloc(PORT_C, 12, 15, pinmux_fixed);
hwprot.ser3 = regk_pinmux_yes;
break;
case pinmux_sser0:
ret = crisv32_pinmux_alloc(PORT_C, 0, 3, pinmux_fixed);
ret |= crisv32_pinmux_alloc(PORT_C, 16, 16, pinmux_fixed);
hwprot.sser0 = regk_pinmux_yes;
break;
case pinmux_sser1:
ret = crisv32_pinmux_alloc(PORT_D, 0, 4, pinmux_fixed);
hwprot.sser1 = regk_pinmux_yes;
break;
case pinmux_ata0:
ret = crisv32_pinmux_alloc(PORT_D, 5, 7, pinmux_fixed);
ret |= crisv32_pinmux_alloc(PORT_D, 15, 17, pinmux_fixed);
hwprot.ata0 = regk_pinmux_yes;
break;
case pinmux_ata1:
ret = crisv32_pinmux_alloc(PORT_D, 0, 4, pinmux_fixed);
ret |= crisv32_pinmux_alloc(PORT_E, 17, 17, pinmux_fixed);
hwprot.ata1 = regk_pinmux_yes;
break;
case pinmux_ata2:
ret = crisv32_pinmux_alloc(PORT_C, 11, 15, pinmux_fixed);
ret |= crisv32_pinmux_alloc(PORT_E, 3, 3, pinmux_fixed);
hwprot.ata2 = regk_pinmux_yes;
break;
case pinmux_ata3:
ret = crisv32_pinmux_alloc(PORT_C, 8, 10, pinmux_fixed);
ret |= crisv32_pinmux_alloc(PORT_C, 0, 2, pinmux_fixed);
hwprot.ata2 = regk_pinmux_yes;
break;
case pinmux_ata:
ret = crisv32_pinmux_alloc(PORT_B, 0, 15, pinmux_fixed);
ret |= crisv32_pinmux_alloc(PORT_D, 8, 15, pinmux_fixed);
hwprot.ata = regk_pinmux_yes;
break;
case pinmux_eth1:
ret = crisv32_pinmux_alloc(PORT_E, 0, 17, pinmux_fixed);
hwprot.eth1 = regk_pinmux_yes;
hwprot.eth1_mgm = regk_pinmux_yes;
break;
case pinmux_timer:
ret = crisv32_pinmux_alloc(PORT_C, 16, 16, pinmux_fixed);
hwprot.timer = regk_pinmux_yes;
spin_unlock_irqrestore(&pinmux_lock, flags);
return ret;
}
if (!ret)
REG_WR(pinmux, regi_pinmux, rw_hwprot, hwprot);
else
memcpy(pins, saved, sizeof pins);
spin_unlock_irqrestore(&pinmux_lock, flags);
return ret;
}
void crisv32_pinmux_set(int port)
{
int i;
int gpio_val = 0;
int iop_val = 0;
for (i = 0; i < PORT_PINS; i++) {
if (pins[port][i] == pinmux_gpio)
gpio_val |= (1 << i);
else if (pins[port][i] == pinmux_iop)
iop_val |= (1 << i);
}
REG_WRITE(int, regi_pinmux + REG_RD_ADDR_pinmux_rw_pb_gio + 8 * port,
gpio_val);
REG_WRITE(int, regi_pinmux + REG_RD_ADDR_pinmux_rw_pb_iop + 8 * port,
iop_val);
#ifdef DEBUG
crisv32_pinmux_dump();
#endif
}
int crisv32_pinmux_dealloc(int port, int first_pin, int last_pin)
{
int i;
unsigned long flags;
crisv32_pinmux_init();
if (port > PORTS || port < 0)
return -EINVAL;
spin_lock_irqsave(&pinmux_lock, flags);
for (i = first_pin; i <= last_pin; i++)
pins[port][i] = pinmux_none;
crisv32_pinmux_set(port);
spin_unlock_irqrestore(&pinmux_lock, flags);
return 0;
}
int crisv32_pinmux_dealloc_fixed(enum fixed_function function)
{
int ret = -EINVAL;
char saved[sizeof pins];
unsigned long flags;
spin_lock_irqsave(&pinmux_lock, flags);
/* Save internal data for recovery */
memcpy(saved, pins, sizeof pins);
crisv32_pinmux_init(); /* Must be done before we read rw_hwprot */
reg_pinmux_rw_hwprot hwprot = REG_RD(pinmux, regi_pinmux, rw_hwprot);
switch (function) {
case pinmux_ser1:
ret = crisv32_pinmux_dealloc(PORT_C, 4, 7);
hwprot.ser1 = regk_pinmux_no;
break;
case pinmux_ser2:
ret = crisv32_pinmux_dealloc(PORT_C, 8, 11);
hwprot.ser2 = regk_pinmux_no;
break;
case pinmux_ser3:
ret = crisv32_pinmux_dealloc(PORT_C, 12, 15);
hwprot.ser3 = regk_pinmux_no;
break;
case pinmux_sser0:
ret = crisv32_pinmux_dealloc(PORT_C, 0, 3);
ret |= crisv32_pinmux_dealloc(PORT_C, 16, 16);
hwprot.sser0 = regk_pinmux_no;
break;
case pinmux_sser1:
ret = crisv32_pinmux_dealloc(PORT_D, 0, 4);
hwprot.sser1 = regk_pinmux_no;
break;
case pinmux_ata0:
ret = crisv32_pinmux_dealloc(PORT_D, 5, 7);
ret |= crisv32_pinmux_dealloc(PORT_D, 15, 17);
hwprot.ata0 = regk_pinmux_no;
break;
case pinmux_ata1:
ret = crisv32_pinmux_dealloc(PORT_D, 0, 4);
ret |= crisv32_pinmux_dealloc(PORT_E, 17, 17);
hwprot.ata1 = regk_pinmux_no;
break;
case pinmux_ata2:
ret = crisv32_pinmux_dealloc(PORT_C, 11, 15);
ret |= crisv32_pinmux_dealloc(PORT_E, 3, 3);
hwprot.ata2 = regk_pinmux_no;
break;
case pinmux_ata3:
ret = crisv32_pinmux_dealloc(PORT_C, 8, 10);
ret |= crisv32_pinmux_dealloc(PORT_C, 0, 2);
hwprot.ata2 = regk_pinmux_no;
break;
case pinmux_ata:
ret = crisv32_pinmux_dealloc(PORT_B, 0, 15);
ret |= crisv32_pinmux_dealloc(PORT_D, 8, 15);
hwprot.ata = regk_pinmux_no;
break;
case pinmux_eth1:
ret = crisv32_pinmux_dealloc(PORT_E, 0, 17);
hwprot.eth1 = regk_pinmux_no;
hwprot.eth1_mgm = regk_pinmux_no;
break;
case pinmux_timer:
ret = crisv32_pinmux_dealloc(PORT_C, 16, 16);
hwprot.timer = regk_pinmux_no;
spin_unlock_irqrestore(&pinmux_lock, flags);
return ret;
}
if (!ret)
REG_WR(pinmux, regi_pinmux, rw_hwprot, hwprot);
else
memcpy(pins, saved, sizeof pins);
spin_unlock_irqrestore(&pinmux_lock, flags);
return ret;
}
void crisv32_pinmux_dump(void)
{
int i, j;
crisv32_pinmux_init();
for (i = 0; i < PORTS; i++) {
printk(KERN_DEBUG "Port %c\n", 'B' + i);
for (j = 0; j < PORT_PINS; j++)
printk(KERN_DEBUG " Pin %d = %d\n", j, pins[i][j]);
}
}
__initcall(crisv32_pinmux_init);
| gpl-2.0 |
estiko/android_lenovo_a706_kinglplite | drivers/block/paride/ktti.c | 15554 | 2782 | /*
ktti.c (c) 1998 Grant R. Guenther <grant@torque.net>
Under the terms of the GNU General Public License.
ktti.c is a low-level protocol driver for the KT Technology
parallel port adapter. This adapter is used in the "PHd"
portable hard-drives. As far as I can tell, this device
supports 4-bit mode _only_.
*/
#define KTTI_VERSION "1.0"
#include <linux/module.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/wait.h>
#include <asm/io.h>
#include "paride.h"
#define j44(a,b) (((a>>4)&0x0f)|(b&0xf0))
/* cont = 0 - access the IDE register file
cont = 1 - access the IDE command set
*/
static int cont_map[2] = { 0x10, 0x08 };
static void ktti_write_regr( PIA *pi, int cont, int regr, int val)
{ int r;
r = regr + cont_map[cont];
w0(r); w2(0xb); w2(0xa); w2(3); w2(6);
w0(val); w2(3); w0(0); w2(6); w2(0xb);
}
static int ktti_read_regr( PIA *pi, int cont, int regr )
{ int a, b, r;
r = regr + cont_map[cont];
w0(r); w2(0xb); w2(0xa); w2(9); w2(0xc); w2(9);
a = r1(); w2(0xc); b = r1(); w2(9); w2(0xc); w2(9);
return j44(a,b);
}
static void ktti_read_block( PIA *pi, char * buf, int count )
{ int k, a, b;
for (k=0;k<count/2;k++) {
w0(0x10); w2(0xb); w2(0xa); w2(9); w2(0xc); w2(9);
a = r1(); w2(0xc); b = r1(); w2(9);
buf[2*k] = j44(a,b);
a = r1(); w2(0xc); b = r1(); w2(9);
buf[2*k+1] = j44(a,b);
}
}
static void ktti_write_block( PIA *pi, char * buf, int count )
{ int k;
for (k=0;k<count/2;k++) {
w0(0x10); w2(0xb); w2(0xa); w2(3); w2(6);
w0(buf[2*k]); w2(3);
w0(buf[2*k+1]); w2(6);
w2(0xb);
}
}
static void ktti_connect ( PIA *pi )
{ pi->saved_r0 = r0();
pi->saved_r2 = r2();
w2(0xb); w2(0xa); w0(0); w2(3); w2(6);
}
static void ktti_disconnect ( PIA *pi )
{ w2(0xb); w2(0xa); w0(0xa0); w2(3); w2(4);
w0(pi->saved_r0);
w2(pi->saved_r2);
}
static void ktti_log_adapter( PIA *pi, char * scratch, int verbose )
{ printk("%s: ktti %s, KT adapter at 0x%x, delay %d\n",
pi->device,KTTI_VERSION,pi->port,pi->delay);
}
static struct pi_protocol ktti = {
.owner = THIS_MODULE,
.name = "ktti",
.max_mode = 1,
.epp_first = 2,
.default_delay = 1,
.max_units = 1,
.write_regr = ktti_write_regr,
.read_regr = ktti_read_regr,
.write_block = ktti_write_block,
.read_block = ktti_read_block,
.connect = ktti_connect,
.disconnect = ktti_disconnect,
.log_adapter = ktti_log_adapter,
};
static int __init ktti_init(void)
{
return paride_register(&ktti);
}
static void __exit ktti_exit(void)
{
paride_unregister(&ktti);
}
MODULE_LICENSE("GPL");
module_init(ktti_init)
module_exit(ktti_exit)
| gpl-2.0 |
Stuxnet-Kernel/kernel_shamu | drivers/block/rbd.c | 195 | 139801 |
/*
rbd.c -- Export ceph rados objects as a Linux block device
based on drivers/block/osdblk.c:
Copyright 2009 Red Hat, Inc.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; see the file COPYING. If not, write to
the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
For usage instructions, please refer to:
Documentation/ABI/testing/sysfs-bus-rbd
*/
#include <linux/ceph/libceph.h>
#include <linux/ceph/osd_client.h>
#include <linux/ceph/mon_client.h>
#include <linux/ceph/decode.h>
#include <linux/parser.h>
#include <linux/bsearch.h>
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/blkdev.h>
#include <linux/slab.h>
#include "rbd_types.h"
#define RBD_DEBUG /* Activate rbd_assert() calls */
/*
* The basic unit of block I/O is a sector. It is interpreted in a
* number of contexts in Linux (blk, bio, genhd), but the default is
* universally 512 bytes. These symbols are just slightly more
* meaningful than the bare numbers they represent.
*/
#define SECTOR_SHIFT 9
#define SECTOR_SIZE (1ULL << SECTOR_SHIFT)
/*
* Increment the given counter and return its updated value.
* If the counter is already 0 it will not be incremented.
* If the counter is already at its maximum value returns
* -EINVAL without updating it.
*/
static int atomic_inc_return_safe(atomic_t *v)
{
unsigned int counter;
counter = (unsigned int)__atomic_add_unless(v, 1, 0);
if (counter <= (unsigned int)INT_MAX)
return (int)counter;
atomic_dec(v);
return -EINVAL;
}
/* Decrement the counter. Return the resulting value, or -EINVAL */
static int atomic_dec_return_safe(atomic_t *v)
{
int counter;
counter = atomic_dec_return(v);
if (counter >= 0)
return counter;
atomic_inc(v);
return -EINVAL;
}
#define RBD_DRV_NAME "rbd"
#define RBD_DRV_NAME_LONG "rbd (rados block device)"
#define RBD_MINORS_PER_MAJOR 256 /* max minors per blkdev */
#define RBD_SNAP_DEV_NAME_PREFIX "snap_"
#define RBD_MAX_SNAP_NAME_LEN \
(NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
#define RBD_MAX_SNAP_COUNT 510 /* allows max snapc to fit in 4KB */
#define RBD_SNAP_HEAD_NAME "-"
#define BAD_SNAP_INDEX U32_MAX /* invalid index into snap array */
/* This allows a single page to hold an image name sent by OSD */
#define RBD_IMAGE_NAME_LEN_MAX (PAGE_SIZE - sizeof (__le32) - 1)
#define RBD_IMAGE_ID_LEN_MAX 64
#define RBD_OBJ_PREFIX_LEN_MAX 64
/* Feature bits */
#define RBD_FEATURE_LAYERING (1<<0)
#define RBD_FEATURE_STRIPINGV2 (1<<1)
#define RBD_FEATURES_ALL \
(RBD_FEATURE_LAYERING | RBD_FEATURE_STRIPINGV2)
/* Features supported by this (client software) implementation. */
#define RBD_FEATURES_SUPPORTED (RBD_FEATURES_ALL)
/*
* An RBD device name will be "rbd#", where the "rbd" comes from
* RBD_DRV_NAME above, and # is a unique integer identifier.
* MAX_INT_FORMAT_WIDTH is used in ensuring DEV_NAME_LEN is big
* enough to hold all possible device names.
*/
#define DEV_NAME_LEN 32
#define MAX_INT_FORMAT_WIDTH ((5 * sizeof (int)) / 2 + 1)
/*
* block device image metadata (in-memory version)
*/
struct rbd_image_header {
/* These six fields never change for a given rbd image */
char *object_prefix;
__u8 obj_order;
__u8 crypt_type;
__u8 comp_type;
u64 stripe_unit;
u64 stripe_count;
u64 features; /* Might be changeable someday? */
/* The remaining fields need to be updated occasionally */
u64 image_size;
struct ceph_snap_context *snapc;
char *snap_names; /* format 1 only */
u64 *snap_sizes; /* format 1 only */
};
/*
* An rbd image specification.
*
* The tuple (pool_id, image_id, snap_id) is sufficient to uniquely
* identify an image. Each rbd_dev structure includes a pointer to
* an rbd_spec structure that encapsulates this identity.
*
* Each of the id's in an rbd_spec has an associated name. For a
* user-mapped image, the names are supplied and the id's associated
* with them are looked up. For a layered image, a parent image is
* defined by the tuple, and the names are looked up.
*
* An rbd_dev structure contains a parent_spec pointer which is
* non-null if the image it represents is a child in a layered
* image. This pointer will refer to the rbd_spec structure used
* by the parent rbd_dev for its own identity (i.e., the structure
* is shared between the parent and child).
*
* Since these structures are populated once, during the discovery
* phase of image construction, they are effectively immutable so
* we make no effort to synchronize access to them.
*
* Note that code herein does not assume the image name is known (it
* could be a null pointer).
*/
struct rbd_spec {
u64 pool_id;
const char *pool_name;
const char *image_id;
const char *image_name;
u64 snap_id;
const char *snap_name;
struct kref kref;
};
/*
* an instance of the client. multiple devices may share an rbd client.
*/
struct rbd_client {
struct ceph_client *client;
struct kref kref;
struct list_head node;
};
struct rbd_img_request;
typedef void (*rbd_img_callback_t)(struct rbd_img_request *);
#define BAD_WHICH U32_MAX /* Good which or bad which, which? */
struct rbd_obj_request;
typedef void (*rbd_obj_callback_t)(struct rbd_obj_request *);
enum obj_request_type {
OBJ_REQUEST_NODATA, OBJ_REQUEST_BIO, OBJ_REQUEST_PAGES
};
enum obj_req_flags {
OBJ_REQ_DONE, /* completion flag: not done = 0, done = 1 */
OBJ_REQ_IMG_DATA, /* object usage: standalone = 0, image = 1 */
OBJ_REQ_KNOWN, /* EXISTS flag valid: no = 0, yes = 1 */
OBJ_REQ_EXISTS, /* target exists: no = 0, yes = 1 */
};
struct rbd_obj_request {
const char *object_name;
u64 offset; /* object start byte */
u64 length; /* bytes from offset */
unsigned long flags;
/*
* An object request associated with an image will have its
* img_data flag set; a standalone object request will not.
*
* A standalone object request will have which == BAD_WHICH
* and a null obj_request pointer.
*
* An object request initiated in support of a layered image
* object (to check for its existence before a write) will
* have which == BAD_WHICH and a non-null obj_request pointer.
*
* Finally, an object request for rbd image data will have
* which != BAD_WHICH, and will have a non-null img_request
* pointer. The value of which will be in the range
* 0..(img_request->obj_request_count-1).
*/
union {
struct rbd_obj_request *obj_request; /* STAT op */
struct {
struct rbd_img_request *img_request;
u64 img_offset;
/* links for img_request->obj_requests list */
struct list_head links;
};
};
u32 which; /* posn image request list */
enum obj_request_type type;
union {
struct bio *bio_list;
struct {
struct page **pages;
u32 page_count;
};
};
struct page **copyup_pages;
u32 copyup_page_count;
struct ceph_osd_request *osd_req;
u64 xferred; /* bytes transferred */
int result;
rbd_obj_callback_t callback;
struct completion completion;
struct kref kref;
};
enum img_req_flags {
IMG_REQ_WRITE, /* I/O direction: read = 0, write = 1 */
IMG_REQ_CHILD, /* initiator: block = 0, child image = 1 */
IMG_REQ_LAYERED, /* ENOENT handling: normal = 0, layered = 1 */
};
struct rbd_img_request {
struct rbd_device *rbd_dev;
u64 offset; /* starting image byte offset */
u64 length; /* byte count from offset */
unsigned long flags;
union {
u64 snap_id; /* for reads */
struct ceph_snap_context *snapc; /* for writes */
};
union {
struct request *rq; /* block request */
struct rbd_obj_request *obj_request; /* obj req initiator */
};
struct page **copyup_pages;
u32 copyup_page_count;
spinlock_t completion_lock;/* protects next_completion */
u32 next_completion;
rbd_img_callback_t callback;
u64 xferred;/* aggregate bytes transferred */
int result; /* first nonzero obj_request result */
u32 obj_request_count;
struct list_head obj_requests; /* rbd_obj_request structs */
struct kref kref;
};
#define for_each_obj_request(ireq, oreq) \
list_for_each_entry(oreq, &(ireq)->obj_requests, links)
#define for_each_obj_request_from(ireq, oreq) \
list_for_each_entry_from(oreq, &(ireq)->obj_requests, links)
#define for_each_obj_request_safe(ireq, oreq, n) \
list_for_each_entry_safe_reverse(oreq, n, &(ireq)->obj_requests, links)
struct rbd_mapping {
u64 size;
u64 features;
bool read_only;
};
/*
* a single device
*/
struct rbd_device {
int dev_id; /* blkdev unique id */
int major; /* blkdev assigned major */
struct gendisk *disk; /* blkdev's gendisk and rq */
u32 image_format; /* Either 1 or 2 */
struct rbd_client *rbd_client;
char name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */
spinlock_t lock; /* queue, flags, open_count */
struct rbd_image_header header;
unsigned long flags; /* possibly lock protected */
struct rbd_spec *spec;
char *header_name;
struct ceph_file_layout layout;
struct ceph_osd_event *watch_event;
struct rbd_obj_request *watch_request;
struct rbd_spec *parent_spec;
u64 parent_overlap;
atomic_t parent_ref;
struct rbd_device *parent;
/* protects updating the header */
struct rw_semaphore header_rwsem;
struct rbd_mapping mapping;
struct list_head node;
/* sysfs related */
struct device dev;
unsigned long open_count; /* protected by lock */
};
/*
* Flag bits for rbd_dev->flags. If atomicity is required,
* rbd_dev->lock is used to protect access.
*
* Currently, only the "removing" flag (which is coupled with the
* "open_count" field) requires atomic access.
*/
enum rbd_dev_flags {
RBD_DEV_FLAG_EXISTS, /* mapped snapshot has not been deleted */
RBD_DEV_FLAG_REMOVING, /* this mapping is being removed */
};
static DEFINE_MUTEX(ctl_mutex); /* Serialize open/close/setup/teardown */
static LIST_HEAD(rbd_dev_list); /* devices */
static DEFINE_SPINLOCK(rbd_dev_list_lock);
static LIST_HEAD(rbd_client_list); /* clients */
static DEFINE_SPINLOCK(rbd_client_list_lock);
/* Slab caches for frequently-allocated structures */
static struct kmem_cache *rbd_img_request_cache;
static struct kmem_cache *rbd_obj_request_cache;
static struct kmem_cache *rbd_segment_name_cache;
static int rbd_img_request_submit(struct rbd_img_request *img_request);
static void rbd_dev_device_release(struct device *dev);
static ssize_t rbd_add(struct bus_type *bus, const char *buf,
size_t count);
static ssize_t rbd_remove(struct bus_type *bus, const char *buf,
size_t count);
static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping);
static void rbd_spec_put(struct rbd_spec *spec);
static struct bus_attribute rbd_bus_attrs[] = {
__ATTR(add, S_IWUSR, NULL, rbd_add),
__ATTR(remove, S_IWUSR, NULL, rbd_remove),
__ATTR_NULL
};
static struct bus_type rbd_bus_type = {
.name = "rbd",
.bus_attrs = rbd_bus_attrs,
};
static void rbd_root_dev_release(struct device *dev)
{
}
static struct device rbd_root_dev = {
.init_name = "rbd",
.release = rbd_root_dev_release,
};
static __printf(2, 3)
void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
{
struct va_format vaf;
va_list args;
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
if (!rbd_dev)
printk(KERN_WARNING "%s: %pV\n", RBD_DRV_NAME, &vaf);
else if (rbd_dev->disk)
printk(KERN_WARNING "%s: %s: %pV\n",
RBD_DRV_NAME, rbd_dev->disk->disk_name, &vaf);
else if (rbd_dev->spec && rbd_dev->spec->image_name)
printk(KERN_WARNING "%s: image %s: %pV\n",
RBD_DRV_NAME, rbd_dev->spec->image_name, &vaf);
else if (rbd_dev->spec && rbd_dev->spec->image_id)
printk(KERN_WARNING "%s: id %s: %pV\n",
RBD_DRV_NAME, rbd_dev->spec->image_id, &vaf);
else /* punt */
printk(KERN_WARNING "%s: rbd_dev %p: %pV\n",
RBD_DRV_NAME, rbd_dev, &vaf);
va_end(args);
}
#ifdef RBD_DEBUG
#define rbd_assert(expr) \
if (unlikely(!(expr))) { \
printk(KERN_ERR "\nAssertion failure in %s() " \
"at line %d:\n\n" \
"\trbd_assert(%s);\n\n", \
__func__, __LINE__, #expr); \
BUG(); \
}
#else /* !RBD_DEBUG */
# define rbd_assert(expr) ((void) 0)
#endif /* !RBD_DEBUG */
static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request);
static void rbd_img_parent_read(struct rbd_obj_request *obj_request);
static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
static int rbd_dev_refresh(struct rbd_device *rbd_dev);
static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev);
static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev);
static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
u64 snap_id);
static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
u8 *order, u64 *snap_size);
static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
u64 *snap_features);
static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name);
static int rbd_open(struct block_device *bdev, fmode_t mode)
{
struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
bool removing = false;
if ((mode & FMODE_WRITE) && rbd_dev->mapping.read_only)
return -EROFS;
spin_lock_irq(&rbd_dev->lock);
if (test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags))
removing = true;
else
rbd_dev->open_count++;
spin_unlock_irq(&rbd_dev->lock);
if (removing)
return -ENOENT;
mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
(void) get_device(&rbd_dev->dev);
set_device_ro(bdev, rbd_dev->mapping.read_only);
mutex_unlock(&ctl_mutex);
return 0;
}
static void rbd_release(struct gendisk *disk, fmode_t mode)
{
struct rbd_device *rbd_dev = disk->private_data;
unsigned long open_count_before;
spin_lock_irq(&rbd_dev->lock);
open_count_before = rbd_dev->open_count--;
spin_unlock_irq(&rbd_dev->lock);
rbd_assert(open_count_before > 0);
mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
put_device(&rbd_dev->dev);
mutex_unlock(&ctl_mutex);
}
static const struct block_device_operations rbd_bd_ops = {
.owner = THIS_MODULE,
.open = rbd_open,
.release = rbd_release,
};
/*
* Initialize an rbd client instance. Success or not, this function
* consumes ceph_opts.
*/
static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
{
struct rbd_client *rbdc;
int ret = -ENOMEM;
dout("%s:\n", __func__);
rbdc = kmalloc(sizeof(struct rbd_client), GFP_KERNEL);
if (!rbdc)
goto out_opt;
kref_init(&rbdc->kref);
INIT_LIST_HEAD(&rbdc->node);
mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
rbdc->client = ceph_create_client(ceph_opts, rbdc, 0, 0);
if (IS_ERR(rbdc->client))
goto out_mutex;
ceph_opts = NULL; /* Now rbdc->client is responsible for ceph_opts */
ret = ceph_open_session(rbdc->client);
if (ret < 0)
goto out_err;
spin_lock(&rbd_client_list_lock);
list_add_tail(&rbdc->node, &rbd_client_list);
spin_unlock(&rbd_client_list_lock);
mutex_unlock(&ctl_mutex);
dout("%s: rbdc %p\n", __func__, rbdc);
return rbdc;
out_err:
ceph_destroy_client(rbdc->client);
out_mutex:
mutex_unlock(&ctl_mutex);
kfree(rbdc);
out_opt:
if (ceph_opts)
ceph_destroy_options(ceph_opts);
dout("%s: error %d\n", __func__, ret);
return ERR_PTR(ret);
}
static struct rbd_client *__rbd_get_client(struct rbd_client *rbdc)
{
kref_get(&rbdc->kref);
return rbdc;
}
/*
* Find a ceph client with specific addr and configuration. If
* found, bump its reference count.
*/
static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts)
{
struct rbd_client *client_node;
bool found = false;
if (ceph_opts->flags & CEPH_OPT_NOSHARE)
return NULL;
spin_lock(&rbd_client_list_lock);
list_for_each_entry(client_node, &rbd_client_list, node) {
if (!ceph_compare_options(ceph_opts, client_node->client)) {
__rbd_get_client(client_node);
found = true;
break;
}
}
spin_unlock(&rbd_client_list_lock);
return found ? client_node : NULL;
}
/*
* mount options
*/
enum {
Opt_last_int,
/* int args above */
Opt_last_string,
/* string args above */
Opt_read_only,
Opt_read_write,
/* Boolean args above */
Opt_last_bool,
};
static match_table_t rbd_opts_tokens = {
/* int args above */
/* string args above */
{Opt_read_only, "read_only"},
{Opt_read_only, "ro"}, /* Alternate spelling */
{Opt_read_write, "read_write"},
{Opt_read_write, "rw"}, /* Alternate spelling */
/* Boolean args above */
{-1, NULL}
};
struct rbd_options {
bool read_only;
};
#define RBD_READ_ONLY_DEFAULT false
static int parse_rbd_opts_token(char *c, void *private)
{
struct rbd_options *rbd_opts = private;
substring_t argstr[MAX_OPT_ARGS];
int token, intval, ret;
token = match_token(c, rbd_opts_tokens, argstr);
if (token < 0)
return -EINVAL;
if (token < Opt_last_int) {
ret = match_int(&argstr[0], &intval);
if (ret < 0) {
pr_err("bad mount option arg (not int) "
"at '%s'\n", c);
return ret;
}
dout("got int token %d val %d\n", token, intval);
} else if (token > Opt_last_int && token < Opt_last_string) {
dout("got string token %d val %s\n", token,
argstr[0].from);
} else if (token > Opt_last_string && token < Opt_last_bool) {
dout("got Boolean token %d\n", token);
} else {
dout("got token %d\n", token);
}
switch (token) {
case Opt_read_only:
rbd_opts->read_only = true;
break;
case Opt_read_write:
rbd_opts->read_only = false;
break;
default:
rbd_assert(false);
break;
}
return 0;
}
/*
* Get a ceph client with specific addr and configuration, if one does
* not exist create it. Either way, ceph_opts is consumed by this
* function.
*/
static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
{
struct rbd_client *rbdc;
rbdc = rbd_client_find(ceph_opts);
if (rbdc) /* using an existing client */
ceph_destroy_options(ceph_opts);
else
rbdc = rbd_client_create(ceph_opts);
return rbdc;
}
/*
* Destroy ceph client
*
* Caller must hold rbd_client_list_lock.
*/
static void rbd_client_release(struct kref *kref)
{
struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref);
dout("%s: rbdc %p\n", __func__, rbdc);
spin_lock(&rbd_client_list_lock);
list_del(&rbdc->node);
spin_unlock(&rbd_client_list_lock);
ceph_destroy_client(rbdc->client);
kfree(rbdc);
}
/*
* Drop reference to ceph client node. If it's not referenced anymore, release
* it.
*/
static void rbd_put_client(struct rbd_client *rbdc)
{
if (rbdc)
kref_put(&rbdc->kref, rbd_client_release);
}
static bool rbd_image_format_valid(u32 image_format)
{
return image_format == 1 || image_format == 2;
}
static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk)
{
size_t size;
u32 snap_count;
/* The header has to start with the magic rbd header text */
if (memcmp(&ondisk->text, RBD_HEADER_TEXT, sizeof (RBD_HEADER_TEXT)))
return false;
/* The bio layer requires at least sector-sized I/O */
if (ondisk->options.order < SECTOR_SHIFT)
return false;
/* If we use u64 in a few spots we may be able to loosen this */
if (ondisk->options.order > 8 * sizeof (int) - 1)
return false;
/*
* The size of a snapshot header has to fit in a size_t, and
* that limits the number of snapshots.
*/
snap_count = le32_to_cpu(ondisk->snap_count);
size = SIZE_MAX - sizeof (struct ceph_snap_context);
if (snap_count > size / sizeof (__le64))
return false;
/*
* Not only that, but the size of the entire the snapshot
* header must also be representable in a size_t.
*/
size -= snap_count * sizeof (__le64);
if ((u64) size < le64_to_cpu(ondisk->snap_names_len))
return false;
return true;
}
/*
* Fill an rbd image header with information from the given format 1
* on-disk header.
*/
static int rbd_header_from_disk(struct rbd_device *rbd_dev,
struct rbd_image_header_ondisk *ondisk)
{
struct rbd_image_header *header = &rbd_dev->header;
bool first_time = header->object_prefix == NULL;
struct ceph_snap_context *snapc;
char *object_prefix = NULL;
char *snap_names = NULL;
u64 *snap_sizes = NULL;
u32 snap_count;
size_t size;
int ret = -ENOMEM;
u32 i;
/* Allocate this now to avoid having to handle failure below */
if (first_time) {
size_t len;
len = strnlen(ondisk->object_prefix,
sizeof (ondisk->object_prefix));
object_prefix = kmalloc(len + 1, GFP_KERNEL);
if (!object_prefix)
return -ENOMEM;
memcpy(object_prefix, ondisk->object_prefix, len);
object_prefix[len] = '\0';
}
/* Allocate the snapshot context and fill it in */
snap_count = le32_to_cpu(ondisk->snap_count);
snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
if (!snapc)
goto out_err;
snapc->seq = le64_to_cpu(ondisk->snap_seq);
if (snap_count) {
struct rbd_image_snap_ondisk *snaps;
u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len);
/* We'll keep a copy of the snapshot names... */
if (snap_names_len > (u64)SIZE_MAX)
goto out_2big;
snap_names = kmalloc(snap_names_len, GFP_KERNEL);
if (!snap_names)
goto out_err;
/* ...as well as the array of their sizes. */
size = snap_count * sizeof (*header->snap_sizes);
snap_sizes = kmalloc(size, GFP_KERNEL);
if (!snap_sizes)
goto out_err;
/*
* Copy the names, and fill in each snapshot's id
* and size.
*
* Note that rbd_dev_v1_header_info() guarantees the
* ondisk buffer we're working with has
* snap_names_len bytes beyond the end of the
* snapshot id array, this memcpy() is safe.
*/
memcpy(snap_names, &ondisk->snaps[snap_count], snap_names_len);
snaps = ondisk->snaps;
for (i = 0; i < snap_count; i++) {
snapc->snaps[i] = le64_to_cpu(snaps[i].id);
snap_sizes[i] = le64_to_cpu(snaps[i].image_size);
}
}
/* We won't fail any more, fill in the header */
down_write(&rbd_dev->header_rwsem);
if (first_time) {
header->object_prefix = object_prefix;
header->obj_order = ondisk->options.order;
header->crypt_type = ondisk->options.crypt_type;
header->comp_type = ondisk->options.comp_type;
/* The rest aren't used for format 1 images */
header->stripe_unit = 0;
header->stripe_count = 0;
header->features = 0;
} else {
ceph_put_snap_context(header->snapc);
kfree(header->snap_names);
kfree(header->snap_sizes);
}
/* The remaining fields always get updated (when we refresh) */
header->image_size = le64_to_cpu(ondisk->image_size);
header->snapc = snapc;
header->snap_names = snap_names;
header->snap_sizes = snap_sizes;
/* Make sure mapping size is consistent with header info */
if (rbd_dev->spec->snap_id == CEPH_NOSNAP || first_time)
if (rbd_dev->mapping.size != header->image_size)
rbd_dev->mapping.size = header->image_size;
up_write(&rbd_dev->header_rwsem);
return 0;
out_2big:
ret = -EIO;
out_err:
kfree(snap_sizes);
kfree(snap_names);
ceph_put_snap_context(snapc);
kfree(object_prefix);
return ret;
}
static const char *_rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, u32 which)
{
const char *snap_name;
rbd_assert(which < rbd_dev->header.snapc->num_snaps);
/* Skip over names until we find the one we are looking for */
snap_name = rbd_dev->header.snap_names;
while (which--)
snap_name += strlen(snap_name) + 1;
return kstrdup(snap_name, GFP_KERNEL);
}
/*
* Snapshot id comparison function for use with qsort()/bsearch().
* Note that result is for snapshots in *descending* order.
*/
static int snapid_compare_reverse(const void *s1, const void *s2)
{
u64 snap_id1 = *(u64 *)s1;
u64 snap_id2 = *(u64 *)s2;
if (snap_id1 < snap_id2)
return 1;
return snap_id1 == snap_id2 ? 0 : -1;
}
/*
* Search a snapshot context to see if the given snapshot id is
* present.
*
* Returns the position of the snapshot id in the array if it's found,
* or BAD_SNAP_INDEX otherwise.
*
* Note: The snapshot array is in kept sorted (by the osd) in
* reverse order, highest snapshot id first.
*/
static u32 rbd_dev_snap_index(struct rbd_device *rbd_dev, u64 snap_id)
{
struct ceph_snap_context *snapc = rbd_dev->header.snapc;
u64 *found;
found = bsearch(&snap_id, &snapc->snaps, snapc->num_snaps,
sizeof (snap_id), snapid_compare_reverse);
return found ? (u32)(found - &snapc->snaps[0]) : BAD_SNAP_INDEX;
}
static const char *rbd_dev_v1_snap_name(struct rbd_device *rbd_dev,
u64 snap_id)
{
u32 which;
const char *snap_name;
which = rbd_dev_snap_index(rbd_dev, snap_id);
if (which == BAD_SNAP_INDEX)
return ERR_PTR(-ENOENT);
snap_name = _rbd_dev_v1_snap_name(rbd_dev, which);
return snap_name ? snap_name : ERR_PTR(-ENOMEM);
}
static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id)
{
if (snap_id == CEPH_NOSNAP)
return RBD_SNAP_HEAD_NAME;
rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
if (rbd_dev->image_format == 1)
return rbd_dev_v1_snap_name(rbd_dev, snap_id);
return rbd_dev_v2_snap_name(rbd_dev, snap_id);
}
static int rbd_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
u64 *snap_size)
{
rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
if (snap_id == CEPH_NOSNAP) {
*snap_size = rbd_dev->header.image_size;
} else if (rbd_dev->image_format == 1) {
u32 which;
which = rbd_dev_snap_index(rbd_dev, snap_id);
if (which == BAD_SNAP_INDEX)
return -ENOENT;
*snap_size = rbd_dev->header.snap_sizes[which];
} else {
u64 size = 0;
int ret;
ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, NULL, &size);
if (ret)
return ret;
*snap_size = size;
}
return 0;
}
static int rbd_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
u64 *snap_features)
{
rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
if (snap_id == CEPH_NOSNAP) {
*snap_features = rbd_dev->header.features;
} else if (rbd_dev->image_format == 1) {
*snap_features = 0; /* No features for format 1 */
} else {
u64 features = 0;
int ret;
ret = _rbd_dev_v2_snap_features(rbd_dev, snap_id, &features);
if (ret)
return ret;
*snap_features = features;
}
return 0;
}
static int rbd_dev_mapping_set(struct rbd_device *rbd_dev)
{
u64 snap_id = rbd_dev->spec->snap_id;
u64 size = 0;
u64 features = 0;
int ret;
ret = rbd_snap_size(rbd_dev, snap_id, &size);
if (ret)
return ret;
ret = rbd_snap_features(rbd_dev, snap_id, &features);
if (ret)
return ret;
rbd_dev->mapping.size = size;
rbd_dev->mapping.features = features;
return 0;
}
static void rbd_dev_mapping_clear(struct rbd_device *rbd_dev)
{
rbd_dev->mapping.size = 0;
rbd_dev->mapping.features = 0;
}
static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset)
{
char *name;
u64 segment;
int ret;
char *name_format;
name = kmem_cache_alloc(rbd_segment_name_cache, GFP_NOIO);
if (!name)
return NULL;
segment = offset >> rbd_dev->header.obj_order;
name_format = "%s.%012llx";
if (rbd_dev->image_format == 2)
name_format = "%s.%016llx";
ret = snprintf(name, MAX_OBJ_NAME_SIZE + 1, name_format,
rbd_dev->header.object_prefix, segment);
if (ret < 0 || ret > MAX_OBJ_NAME_SIZE) {
pr_err("error formatting segment name for #%llu (%d)\n",
segment, ret);
kfree(name);
name = NULL;
}
return name;
}
static void rbd_segment_name_free(const char *name)
{
/* The explicit cast here is needed to drop the const qualifier */
kmem_cache_free(rbd_segment_name_cache, (void *)name);
}
static u64 rbd_segment_offset(struct rbd_device *rbd_dev, u64 offset)
{
u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
return offset & (segment_size - 1);
}
static u64 rbd_segment_length(struct rbd_device *rbd_dev,
u64 offset, u64 length)
{
u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
offset &= segment_size - 1;
rbd_assert(length <= U64_MAX - offset);
if (offset + length > segment_size)
length = segment_size - offset;
return length;
}
/*
* returns the size of an object in the image
*/
static u64 rbd_obj_bytes(struct rbd_image_header *header)
{
return 1 << header->obj_order;
}
/*
* bio helpers
*/
static void bio_chain_put(struct bio *chain)
{
struct bio *tmp;
while (chain) {
tmp = chain;
chain = chain->bi_next;
bio_put(tmp);
}
}
/*
* zeros a bio chain, starting at specific offset
*/
static void zero_bio_chain(struct bio *chain, int start_ofs)
{
struct bio_vec *bv;
unsigned long flags;
void *buf;
int i;
int pos = 0;
while (chain) {
bio_for_each_segment(bv, chain, i) {
if (pos + bv->bv_len > start_ofs) {
int remainder = max(start_ofs - pos, 0);
buf = bvec_kmap_irq(bv, &flags);
memset(buf + remainder, 0,
bv->bv_len - remainder);
flush_dcache_page(bv->bv_page);
bvec_kunmap_irq(buf, &flags);
}
pos += bv->bv_len;
}
chain = chain->bi_next;
}
}
/*
* similar to zero_bio_chain(), zeros data defined by a page array,
* starting at the given byte offset from the start of the array and
* continuing up to the given end offset. The pages array is
* assumed to be big enough to hold all bytes up to the end.
*/
static void zero_pages(struct page **pages, u64 offset, u64 end)
{
struct page **page = &pages[offset >> PAGE_SHIFT];
rbd_assert(end > offset);
rbd_assert(end - offset <= (u64)SIZE_MAX);
while (offset < end) {
size_t page_offset;
size_t length;
unsigned long flags;
void *kaddr;
page_offset = (size_t)(offset & ~PAGE_MASK);
length = min(PAGE_SIZE - page_offset, (size_t)(end - offset));
local_irq_save(flags);
kaddr = kmap_atomic(*page);
memset(kaddr + page_offset, 0, length);
flush_dcache_page(*page);
kunmap_atomic(kaddr);
local_irq_restore(flags);
offset += length;
page++;
}
}
/*
* Clone a portion of a bio, starting at the given byte offset
* and continuing for the number of bytes indicated.
*/
static struct bio *bio_clone_range(struct bio *bio_src,
unsigned int offset,
unsigned int len,
gfp_t gfpmask)
{
struct bio_vec *bv;
unsigned int resid;
unsigned short idx;
unsigned int voff;
unsigned short end_idx;
unsigned short vcnt;
struct bio *bio;
/* Handle the easy case for the caller */
if (!offset && len == bio_src->bi_size)
return bio_clone(bio_src, gfpmask);
if (WARN_ON_ONCE(!len))
return NULL;
if (WARN_ON_ONCE(len > bio_src->bi_size))
return NULL;
if (WARN_ON_ONCE(offset > bio_src->bi_size - len))
return NULL;
/* Find first affected segment... */
resid = offset;
bio_for_each_segment(bv, bio_src, idx) {
if (resid < bv->bv_len)
break;
resid -= bv->bv_len;
}
voff = resid;
/* ...and the last affected segment */
resid += len;
__bio_for_each_segment(bv, bio_src, end_idx, idx) {
if (resid <= bv->bv_len)
break;
resid -= bv->bv_len;
}
vcnt = end_idx - idx + 1;
/* Build the clone */
bio = bio_alloc(gfpmask, (unsigned int) vcnt);
if (!bio)
return NULL; /* ENOMEM */
bio->bi_bdev = bio_src->bi_bdev;
bio->bi_sector = bio_src->bi_sector + (offset >> SECTOR_SHIFT);
bio->bi_rw = bio_src->bi_rw;
bio->bi_flags |= 1 << BIO_CLONED;
/*
* Copy over our part of the bio_vec, then update the first
* and last (or only) entries.
*/
memcpy(&bio->bi_io_vec[0], &bio_src->bi_io_vec[idx],
vcnt * sizeof (struct bio_vec));
bio->bi_io_vec[0].bv_offset += voff;
if (vcnt > 1) {
bio->bi_io_vec[0].bv_len -= voff;
bio->bi_io_vec[vcnt - 1].bv_len = resid;
} else {
bio->bi_io_vec[0].bv_len = len;
}
bio->bi_vcnt = vcnt;
bio->bi_size = len;
bio->bi_idx = 0;
return bio;
}
/*
* Clone a portion of a bio chain, starting at the given byte offset
* into the first bio in the source chain and continuing for the
* number of bytes indicated. The result is another bio chain of
* exactly the given length, or a null pointer on error.
*
* The bio_src and offset parameters are both in-out. On entry they
* refer to the first source bio and the offset into that bio where
* the start of data to be cloned is located.
*
* On return, bio_src is updated to refer to the bio in the source
* chain that contains first un-cloned byte, and *offset will
* contain the offset of that byte within that bio.
*/
static struct bio *bio_chain_clone_range(struct bio **bio_src,
unsigned int *offset,
unsigned int len,
gfp_t gfpmask)
{
struct bio *bi = *bio_src;
unsigned int off = *offset;
struct bio *chain = NULL;
struct bio **end;
/* Build up a chain of clone bios up to the limit */
if (!bi || off >= bi->bi_size || !len)
return NULL; /* Nothing to clone */
end = &chain;
while (len) {
unsigned int bi_size;
struct bio *bio;
if (!bi) {
rbd_warn(NULL, "bio_chain exhausted with %u left", len);
goto out_err; /* EINVAL; ran out of bio's */
}
bi_size = min_t(unsigned int, bi->bi_size - off, len);
bio = bio_clone_range(bi, off, bi_size, gfpmask);
if (!bio)
goto out_err; /* ENOMEM */
*end = bio;
end = &bio->bi_next;
off += bi_size;
if (off == bi->bi_size) {
bi = bi->bi_next;
off = 0;
}
len -= bi_size;
}
*bio_src = bi;
*offset = off;
return chain;
out_err:
bio_chain_put(chain);
return NULL;
}
/*
* The default/initial value for all object request flags is 0. For
* each flag, once its value is set to 1 it is never reset to 0
* again.
*/
static void obj_request_img_data_set(struct rbd_obj_request *obj_request)
{
if (test_and_set_bit(OBJ_REQ_IMG_DATA, &obj_request->flags)) {
struct rbd_device *rbd_dev;
rbd_dev = obj_request->img_request->rbd_dev;
rbd_warn(rbd_dev, "obj_request %p already marked img_data\n",
obj_request);
}
}
static bool obj_request_img_data_test(struct rbd_obj_request *obj_request)
{
smp_mb();
return test_bit(OBJ_REQ_IMG_DATA, &obj_request->flags) != 0;
}
static void obj_request_done_set(struct rbd_obj_request *obj_request)
{
if (test_and_set_bit(OBJ_REQ_DONE, &obj_request->flags)) {
struct rbd_device *rbd_dev = NULL;
if (obj_request_img_data_test(obj_request))
rbd_dev = obj_request->img_request->rbd_dev;
rbd_warn(rbd_dev, "obj_request %p already marked done\n",
obj_request);
}
}
static bool obj_request_done_test(struct rbd_obj_request *obj_request)
{
smp_mb();
return test_bit(OBJ_REQ_DONE, &obj_request->flags) != 0;
}
/*
* This sets the KNOWN flag after (possibly) setting the EXISTS
* flag. The latter is set based on the "exists" value provided.
*
* Note that for our purposes once an object exists it never goes
* away again. It's possible that the response from two existence
* checks are separated by the creation of the target object, and
* the first ("doesn't exist") response arrives *after* the second
* ("does exist"). In that case we ignore the second one.
*/
static void obj_request_existence_set(struct rbd_obj_request *obj_request,
bool exists)
{
if (exists)
set_bit(OBJ_REQ_EXISTS, &obj_request->flags);
set_bit(OBJ_REQ_KNOWN, &obj_request->flags);
smp_mb();
}
static bool obj_request_known_test(struct rbd_obj_request *obj_request)
{
smp_mb();
return test_bit(OBJ_REQ_KNOWN, &obj_request->flags) != 0;
}
static bool obj_request_exists_test(struct rbd_obj_request *obj_request)
{
smp_mb();
return test_bit(OBJ_REQ_EXISTS, &obj_request->flags) != 0;
}
static bool obj_request_overlaps_parent(struct rbd_obj_request *obj_request)
{
struct rbd_device *rbd_dev = obj_request->img_request->rbd_dev;
return obj_request->img_offset <
round_up(rbd_dev->parent_overlap, rbd_obj_bytes(&rbd_dev->header));
}
static void rbd_obj_request_get(struct rbd_obj_request *obj_request)
{
dout("%s: obj %p (was %d)\n", __func__, obj_request,
atomic_read(&obj_request->kref.refcount));
kref_get(&obj_request->kref);
}
static void rbd_obj_request_destroy(struct kref *kref);
static void rbd_obj_request_put(struct rbd_obj_request *obj_request)
{
rbd_assert(obj_request != NULL);
dout("%s: obj %p (was %d)\n", __func__, obj_request,
atomic_read(&obj_request->kref.refcount));
kref_put(&obj_request->kref, rbd_obj_request_destroy);
}
static void rbd_img_request_get(struct rbd_img_request *img_request)
{
dout("%s: img %p (was %d)\n", __func__, img_request,
atomic_read(&img_request->kref.refcount));
kref_get(&img_request->kref);
}
static bool img_request_child_test(struct rbd_img_request *img_request);
static void rbd_parent_request_destroy(struct kref *kref);
static void rbd_img_request_destroy(struct kref *kref);
static void rbd_img_request_put(struct rbd_img_request *img_request)
{
rbd_assert(img_request != NULL);
dout("%s: img %p (was %d)\n", __func__, img_request,
atomic_read(&img_request->kref.refcount));
if (img_request_child_test(img_request))
kref_put(&img_request->kref, rbd_parent_request_destroy);
else
kref_put(&img_request->kref, rbd_img_request_destroy);
}
static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request,
struct rbd_obj_request *obj_request)
{
rbd_assert(obj_request->img_request == NULL);
/* Image request now owns object's original reference */
obj_request->img_request = img_request;
obj_request->which = img_request->obj_request_count;
rbd_assert(!obj_request_img_data_test(obj_request));
obj_request_img_data_set(obj_request);
rbd_assert(obj_request->which != BAD_WHICH);
img_request->obj_request_count++;
list_add_tail(&obj_request->links, &img_request->obj_requests);
dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
obj_request->which);
}
static inline void rbd_img_obj_request_del(struct rbd_img_request *img_request,
struct rbd_obj_request *obj_request)
{
rbd_assert(obj_request->which != BAD_WHICH);
dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
obj_request->which);
list_del(&obj_request->links);
rbd_assert(img_request->obj_request_count > 0);
img_request->obj_request_count--;
rbd_assert(obj_request->which == img_request->obj_request_count);
obj_request->which = BAD_WHICH;
rbd_assert(obj_request_img_data_test(obj_request));
rbd_assert(obj_request->img_request == img_request);
obj_request->img_request = NULL;
obj_request->callback = NULL;
rbd_obj_request_put(obj_request);
}
static bool obj_request_type_valid(enum obj_request_type type)
{
switch (type) {
case OBJ_REQUEST_NODATA:
case OBJ_REQUEST_BIO:
case OBJ_REQUEST_PAGES:
return true;
default:
return false;
}
}
static int rbd_obj_request_submit(struct ceph_osd_client *osdc,
struct rbd_obj_request *obj_request)
{
dout("%s: osdc %p obj %p\n", __func__, osdc, obj_request);
return ceph_osdc_start_request(osdc, obj_request->osd_req, false);
}
static void rbd_img_request_complete(struct rbd_img_request *img_request)
{
dout("%s: img %p\n", __func__, img_request);
/*
* If no error occurred, compute the aggregate transfer
* count for the image request. We could instead use
* atomic64_cmpxchg() to update it as each object request
* completes; not clear which way is better off hand.
*/
if (!img_request->result) {
struct rbd_obj_request *obj_request;
u64 xferred = 0;
for_each_obj_request(img_request, obj_request)
xferred += obj_request->xferred;
img_request->xferred = xferred;
}
if (img_request->callback)
img_request->callback(img_request);
else
rbd_img_request_put(img_request);
}
/* Caller is responsible for rbd_obj_request_destroy(obj_request) */
static int rbd_obj_request_wait(struct rbd_obj_request *obj_request)
{
dout("%s: obj %p\n", __func__, obj_request);
return wait_for_completion_interruptible(&obj_request->completion);
}
/*
* The default/initial value for all image request flags is 0. Each
* is conditionally set to 1 at image request initialization time
* and currently never change thereafter.
*/
static void img_request_write_set(struct rbd_img_request *img_request)
{
set_bit(IMG_REQ_WRITE, &img_request->flags);
smp_mb();
}
static bool img_request_write_test(struct rbd_img_request *img_request)
{
smp_mb();
return test_bit(IMG_REQ_WRITE, &img_request->flags) != 0;
}
static void img_request_child_set(struct rbd_img_request *img_request)
{
set_bit(IMG_REQ_CHILD, &img_request->flags);
smp_mb();
}
static void img_request_child_clear(struct rbd_img_request *img_request)
{
clear_bit(IMG_REQ_CHILD, &img_request->flags);
smp_mb();
}
static bool img_request_child_test(struct rbd_img_request *img_request)
{
smp_mb();
return test_bit(IMG_REQ_CHILD, &img_request->flags) != 0;
}
static void img_request_layered_set(struct rbd_img_request *img_request)
{
set_bit(IMG_REQ_LAYERED, &img_request->flags);
smp_mb();
}
static void img_request_layered_clear(struct rbd_img_request *img_request)
{
clear_bit(IMG_REQ_LAYERED, &img_request->flags);
smp_mb();
}
static bool img_request_layered_test(struct rbd_img_request *img_request)
{
smp_mb();
return test_bit(IMG_REQ_LAYERED, &img_request->flags) != 0;
}
static void
rbd_img_obj_request_read_callback(struct rbd_obj_request *obj_request)
{
u64 xferred = obj_request->xferred;
u64 length = obj_request->length;
dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
obj_request, obj_request->img_request, obj_request->result,
xferred, length);
/*
* ENOENT means a hole in the image. We zero-fill the entire
* length of the request. A short read also implies zero-fill
* to the end of the request. An error requires the whole
* length of the request to be reported finished with an error
* to the block layer. In each case we update the xferred
* count to indicate the whole request was satisfied.
*/
rbd_assert(obj_request->type != OBJ_REQUEST_NODATA);
if (obj_request->result == -ENOENT) {
if (obj_request->type == OBJ_REQUEST_BIO)
zero_bio_chain(obj_request->bio_list, 0);
else
zero_pages(obj_request->pages, 0, length);
obj_request->result = 0;
} else if (xferred < length && !obj_request->result) {
if (obj_request->type == OBJ_REQUEST_BIO)
zero_bio_chain(obj_request->bio_list, xferred);
else
zero_pages(obj_request->pages, xferred, length);
}
obj_request->xferred = length;
obj_request_done_set(obj_request);
}
static void rbd_obj_request_complete(struct rbd_obj_request *obj_request)
{
dout("%s: obj %p cb %p\n", __func__, obj_request,
obj_request->callback);
if (obj_request->callback)
obj_request->callback(obj_request);
else
complete_all(&obj_request->completion);
}
static void rbd_osd_trivial_callback(struct rbd_obj_request *obj_request)
{
dout("%s: obj %p\n", __func__, obj_request);
obj_request_done_set(obj_request);
}
static void rbd_osd_read_callback(struct rbd_obj_request *obj_request)
{
struct rbd_img_request *img_request = NULL;
struct rbd_device *rbd_dev = NULL;
bool layered = false;
if (obj_request_img_data_test(obj_request)) {
img_request = obj_request->img_request;
layered = img_request && img_request_layered_test(img_request);
rbd_dev = img_request->rbd_dev;
}
dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
obj_request, img_request, obj_request->result,
obj_request->xferred, obj_request->length);
if (layered && obj_request->result == -ENOENT &&
obj_request->img_offset < rbd_dev->parent_overlap)
rbd_img_parent_read(obj_request);
else if (img_request)
rbd_img_obj_request_read_callback(obj_request);
else
obj_request_done_set(obj_request);
}
static void rbd_osd_write_callback(struct rbd_obj_request *obj_request)
{
dout("%s: obj %p result %d %llu\n", __func__, obj_request,
obj_request->result, obj_request->length);
/*
* There is no such thing as a successful short write. Set
* it to our originally-requested length.
*/
obj_request->xferred = obj_request->length;
obj_request_done_set(obj_request);
}
/*
* For a simple stat call there's nothing to do. We'll do more if
* this is part of a write sequence for a layered image.
*/
static void rbd_osd_stat_callback(struct rbd_obj_request *obj_request)
{
dout("%s: obj %p\n", __func__, obj_request);
obj_request_done_set(obj_request);
}
static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
struct ceph_msg *msg)
{
struct rbd_obj_request *obj_request = osd_req->r_priv;
u16 opcode;
dout("%s: osd_req %p msg %p\n", __func__, osd_req, msg);
rbd_assert(osd_req == obj_request->osd_req);
if (obj_request_img_data_test(obj_request)) {
rbd_assert(obj_request->img_request);
rbd_assert(obj_request->which != BAD_WHICH);
} else {
rbd_assert(obj_request->which == BAD_WHICH);
}
if (osd_req->r_result < 0)
obj_request->result = osd_req->r_result;
BUG_ON(osd_req->r_num_ops > 2);
/*
* We support a 64-bit length, but ultimately it has to be
* passed to blk_end_request(), which takes an unsigned int.
*/
obj_request->xferred = osd_req->r_reply_op_len[0];
rbd_assert(obj_request->xferred < (u64)UINT_MAX);
opcode = osd_req->r_ops[0].op;
switch (opcode) {
case CEPH_OSD_OP_READ:
rbd_osd_read_callback(obj_request);
break;
case CEPH_OSD_OP_WRITE:
rbd_osd_write_callback(obj_request);
break;
case CEPH_OSD_OP_STAT:
rbd_osd_stat_callback(obj_request);
break;
case CEPH_OSD_OP_CALL:
case CEPH_OSD_OP_NOTIFY_ACK:
case CEPH_OSD_OP_WATCH:
rbd_osd_trivial_callback(obj_request);
break;
default:
rbd_warn(NULL, "%s: unsupported op %hu\n",
obj_request->object_name, (unsigned short) opcode);
break;
}
if (obj_request_done_test(obj_request))
rbd_obj_request_complete(obj_request);
}
static void rbd_osd_req_format_read(struct rbd_obj_request *obj_request)
{
struct rbd_img_request *img_request = obj_request->img_request;
struct ceph_osd_request *osd_req = obj_request->osd_req;
u64 snap_id;
rbd_assert(osd_req != NULL);
snap_id = img_request ? img_request->snap_id : CEPH_NOSNAP;
ceph_osdc_build_request(osd_req, obj_request->offset,
NULL, snap_id, NULL);
}
static void rbd_osd_req_format_write(struct rbd_obj_request *obj_request)
{
struct rbd_img_request *img_request = obj_request->img_request;
struct ceph_osd_request *osd_req = obj_request->osd_req;
struct ceph_snap_context *snapc;
struct timespec mtime = CURRENT_TIME;
rbd_assert(osd_req != NULL);
snapc = img_request ? img_request->snapc : NULL;
ceph_osdc_build_request(osd_req, obj_request->offset,
snapc, CEPH_NOSNAP, &mtime);
}
static struct ceph_osd_request *rbd_osd_req_create(
struct rbd_device *rbd_dev,
bool write_request,
struct rbd_obj_request *obj_request)
{
struct ceph_snap_context *snapc = NULL;
struct ceph_osd_client *osdc;
struct ceph_osd_request *osd_req;
if (obj_request_img_data_test(obj_request)) {
struct rbd_img_request *img_request = obj_request->img_request;
rbd_assert(write_request ==
img_request_write_test(img_request));
if (write_request)
snapc = img_request->snapc;
}
/* Allocate and initialize the request, for the single op */
osdc = &rbd_dev->rbd_client->client->osdc;
osd_req = ceph_osdc_alloc_request(osdc, snapc, 1, false, GFP_ATOMIC);
if (!osd_req)
return NULL; /* ENOMEM */
if (write_request)
osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
else
osd_req->r_flags = CEPH_OSD_FLAG_READ;
osd_req->r_callback = rbd_osd_req_callback;
osd_req->r_priv = obj_request;
osd_req->r_oid_len = strlen(obj_request->object_name);
rbd_assert(osd_req->r_oid_len < sizeof (osd_req->r_oid));
memcpy(osd_req->r_oid, obj_request->object_name, osd_req->r_oid_len);
osd_req->r_file_layout = rbd_dev->layout; /* struct */
return osd_req;
}
/*
* Create a copyup osd request based on the information in the
* object request supplied. A copyup request has two osd ops,
* a copyup method call, and a "normal" write request.
*/
static struct ceph_osd_request *
rbd_osd_req_create_copyup(struct rbd_obj_request *obj_request)
{
struct rbd_img_request *img_request;
struct ceph_snap_context *snapc;
struct rbd_device *rbd_dev;
struct ceph_osd_client *osdc;
struct ceph_osd_request *osd_req;
rbd_assert(obj_request_img_data_test(obj_request));
img_request = obj_request->img_request;
rbd_assert(img_request);
rbd_assert(img_request_write_test(img_request));
/* Allocate and initialize the request, for the two ops */
snapc = img_request->snapc;
rbd_dev = img_request->rbd_dev;
osdc = &rbd_dev->rbd_client->client->osdc;
osd_req = ceph_osdc_alloc_request(osdc, snapc, 2, false, GFP_ATOMIC);
if (!osd_req)
return NULL; /* ENOMEM */
osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
osd_req->r_callback = rbd_osd_req_callback;
osd_req->r_priv = obj_request;
osd_req->r_oid_len = strlen(obj_request->object_name);
rbd_assert(osd_req->r_oid_len < sizeof (osd_req->r_oid));
memcpy(osd_req->r_oid, obj_request->object_name, osd_req->r_oid_len);
osd_req->r_file_layout = rbd_dev->layout; /* struct */
return osd_req;
}
static void rbd_osd_req_destroy(struct ceph_osd_request *osd_req)
{
ceph_osdc_put_request(osd_req);
}
/* object_name is assumed to be a non-null pointer and NUL-terminated */
static struct rbd_obj_request *rbd_obj_request_create(const char *object_name,
u64 offset, u64 length,
enum obj_request_type type)
{
struct rbd_obj_request *obj_request;
size_t size;
char *name;
rbd_assert(obj_request_type_valid(type));
size = strlen(object_name) + 1;
name = kmalloc(size, GFP_KERNEL);
if (!name)
return NULL;
obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_KERNEL);
if (!obj_request) {
kfree(name);
return NULL;
}
obj_request->object_name = memcpy(name, object_name, size);
obj_request->offset = offset;
obj_request->length = length;
obj_request->flags = 0;
obj_request->which = BAD_WHICH;
obj_request->type = type;
INIT_LIST_HEAD(&obj_request->links);
init_completion(&obj_request->completion);
kref_init(&obj_request->kref);
dout("%s: \"%s\" %llu/%llu %d -> obj %p\n", __func__, object_name,
offset, length, (int)type, obj_request);
return obj_request;
}
static void rbd_obj_request_destroy(struct kref *kref)
{
struct rbd_obj_request *obj_request;
obj_request = container_of(kref, struct rbd_obj_request, kref);
dout("%s: obj %p\n", __func__, obj_request);
rbd_assert(obj_request->img_request == NULL);
rbd_assert(obj_request->which == BAD_WHICH);
if (obj_request->osd_req)
rbd_osd_req_destroy(obj_request->osd_req);
rbd_assert(obj_request_type_valid(obj_request->type));
switch (obj_request->type) {
case OBJ_REQUEST_NODATA:
break; /* Nothing to do */
case OBJ_REQUEST_BIO:
if (obj_request->bio_list)
bio_chain_put(obj_request->bio_list);
break;
case OBJ_REQUEST_PAGES:
if (obj_request->pages)
ceph_release_page_vector(obj_request->pages,
obj_request->page_count);
break;
}
kfree(obj_request->object_name);
obj_request->object_name = NULL;
kmem_cache_free(rbd_obj_request_cache, obj_request);
}
/* It's OK to call this for a device with no parent */
static void rbd_spec_put(struct rbd_spec *spec);
static void rbd_dev_unparent(struct rbd_device *rbd_dev)
{
rbd_dev_remove_parent(rbd_dev);
rbd_spec_put(rbd_dev->parent_spec);
rbd_dev->parent_spec = NULL;
rbd_dev->parent_overlap = 0;
}
/*
* Parent image reference counting is used to determine when an
* image's parent fields can be safely torn down--after there are no
* more in-flight requests to the parent image. When the last
* reference is dropped, cleaning them up is safe.
*/
static void rbd_dev_parent_put(struct rbd_device *rbd_dev)
{
int counter;
if (!rbd_dev->parent_spec)
return;
counter = atomic_dec_return_safe(&rbd_dev->parent_ref);
if (counter > 0)
return;
/* Last reference; clean up parent data structures */
if (!counter)
rbd_dev_unparent(rbd_dev);
else
rbd_warn(rbd_dev, "parent reference underflow\n");
}
/*
* If an image has a non-zero parent overlap, get a reference to its
* parent.
*
* We must get the reference before checking for the overlap to
* coordinate properly with zeroing the parent overlap in
* rbd_dev_v2_parent_info() when an image gets flattened. We
* drop it again if there is no overlap.
*
* Returns true if the rbd device has a parent with a non-zero
* overlap and a reference for it was successfully taken, or
* false otherwise.
*/
static bool rbd_dev_parent_get(struct rbd_device *rbd_dev)
{
int counter;
if (!rbd_dev->parent_spec)
return false;
counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
if (counter > 0 && rbd_dev->parent_overlap)
return true;
/* Image was flattened, but parent is not yet torn down */
if (counter < 0)
rbd_warn(rbd_dev, "parent reference overflow\n");
return false;
}
/*
* Caller is responsible for filling in the list of object requests
* that comprises the image request, and the Linux request pointer
* (if there is one).
*/
static struct rbd_img_request *rbd_img_request_create(
struct rbd_device *rbd_dev,
u64 offset, u64 length,
bool write_request)
{
struct rbd_img_request *img_request;
img_request = kmem_cache_alloc(rbd_img_request_cache, GFP_ATOMIC);
if (!img_request)
return NULL;
if (write_request) {
down_read(&rbd_dev->header_rwsem);
ceph_get_snap_context(rbd_dev->header.snapc);
up_read(&rbd_dev->header_rwsem);
}
img_request->rq = NULL;
img_request->rbd_dev = rbd_dev;
img_request->offset = offset;
img_request->length = length;
img_request->flags = 0;
if (write_request) {
img_request_write_set(img_request);
img_request->snapc = rbd_dev->header.snapc;
} else {
img_request->snap_id = rbd_dev->spec->snap_id;
}
if (rbd_dev_parent_get(rbd_dev))
img_request_layered_set(img_request);
spin_lock_init(&img_request->completion_lock);
img_request->next_completion = 0;
img_request->callback = NULL;
img_request->result = 0;
img_request->obj_request_count = 0;
INIT_LIST_HEAD(&img_request->obj_requests);
kref_init(&img_request->kref);
dout("%s: rbd_dev %p %s %llu/%llu -> img %p\n", __func__, rbd_dev,
write_request ? "write" : "read", offset, length,
img_request);
return img_request;
}
static void rbd_img_request_destroy(struct kref *kref)
{
struct rbd_img_request *img_request;
struct rbd_obj_request *obj_request;
struct rbd_obj_request *next_obj_request;
img_request = container_of(kref, struct rbd_img_request, kref);
dout("%s: img %p\n", __func__, img_request);
for_each_obj_request_safe(img_request, obj_request, next_obj_request)
rbd_img_obj_request_del(img_request, obj_request);
rbd_assert(img_request->obj_request_count == 0);
if (img_request_layered_test(img_request)) {
img_request_layered_clear(img_request);
rbd_dev_parent_put(img_request->rbd_dev);
}
if (img_request_write_test(img_request))
ceph_put_snap_context(img_request->snapc);
kmem_cache_free(rbd_img_request_cache, img_request);
}
static struct rbd_img_request *rbd_parent_request_create(
struct rbd_obj_request *obj_request,
u64 img_offset, u64 length)
{
struct rbd_img_request *parent_request;
struct rbd_device *rbd_dev;
rbd_assert(obj_request->img_request);
rbd_dev = obj_request->img_request->rbd_dev;
parent_request = rbd_img_request_create(rbd_dev->parent,
img_offset, length, false);
if (!parent_request)
return NULL;
img_request_child_set(parent_request);
rbd_obj_request_get(obj_request);
parent_request->obj_request = obj_request;
return parent_request;
}
static void rbd_parent_request_destroy(struct kref *kref)
{
struct rbd_img_request *parent_request;
struct rbd_obj_request *orig_request;
parent_request = container_of(kref, struct rbd_img_request, kref);
orig_request = parent_request->obj_request;
parent_request->obj_request = NULL;
rbd_obj_request_put(orig_request);
img_request_child_clear(parent_request);
rbd_img_request_destroy(kref);
}
static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request)
{
struct rbd_img_request *img_request;
unsigned int xferred;
int result;
bool more;
rbd_assert(obj_request_img_data_test(obj_request));
img_request = obj_request->img_request;
rbd_assert(obj_request->xferred <= (u64)UINT_MAX);
xferred = (unsigned int)obj_request->xferred;
result = obj_request->result;
if (result) {
struct rbd_device *rbd_dev = img_request->rbd_dev;
rbd_warn(rbd_dev, "%s %llx at %llx (%llx)\n",
img_request_write_test(img_request) ? "write" : "read",
obj_request->length, obj_request->img_offset,
obj_request->offset);
rbd_warn(rbd_dev, " result %d xferred %x\n",
result, xferred);
if (!img_request->result)
img_request->result = result;
}
/* Image object requests don't own their page array */
if (obj_request->type == OBJ_REQUEST_PAGES) {
obj_request->pages = NULL;
obj_request->page_count = 0;
}
if (img_request_child_test(img_request)) {
rbd_assert(img_request->obj_request != NULL);
more = obj_request->which < img_request->obj_request_count - 1;
} else {
rbd_assert(img_request->rq != NULL);
more = blk_end_request(img_request->rq, result, xferred);
}
return more;
}
static void rbd_img_obj_callback(struct rbd_obj_request *obj_request)
{
struct rbd_img_request *img_request;
u32 which = obj_request->which;
bool more = true;
rbd_assert(obj_request_img_data_test(obj_request));
img_request = obj_request->img_request;
dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
rbd_assert(img_request != NULL);
rbd_assert(img_request->obj_request_count > 0);
rbd_assert(which != BAD_WHICH);
rbd_assert(which < img_request->obj_request_count);
rbd_assert(which >= img_request->next_completion);
spin_lock_irq(&img_request->completion_lock);
if (which != img_request->next_completion)
goto out;
for_each_obj_request_from(img_request, obj_request) {
rbd_assert(more);
rbd_assert(which < img_request->obj_request_count);
if (!obj_request_done_test(obj_request))
break;
more = rbd_img_obj_end_request(obj_request);
which++;
}
rbd_assert(more ^ (which == img_request->obj_request_count));
img_request->next_completion = which;
out:
spin_unlock_irq(&img_request->completion_lock);
rbd_img_request_put(img_request);
if (!more)
rbd_img_request_complete(img_request);
}
/*
* Split up an image request into one or more object requests, each
* to a different object. The "type" parameter indicates whether
* "data_desc" is the pointer to the head of a list of bio
* structures, or the base of a page array. In either case this
* function assumes data_desc describes memory sufficient to hold
* all data described by the image request.
*/
static int rbd_img_request_fill(struct rbd_img_request *img_request,
enum obj_request_type type,
void *data_desc)
{
struct rbd_device *rbd_dev = img_request->rbd_dev;
struct rbd_obj_request *obj_request = NULL;
struct rbd_obj_request *next_obj_request;
bool write_request = img_request_write_test(img_request);
struct bio *bio_list = 0;
unsigned int bio_offset = 0;
struct page **pages = 0;
u64 img_offset;
u64 resid;
u16 opcode;
dout("%s: img %p type %d data_desc %p\n", __func__, img_request,
(int)type, data_desc);
opcode = write_request ? CEPH_OSD_OP_WRITE : CEPH_OSD_OP_READ;
img_offset = img_request->offset;
resid = img_request->length;
rbd_assert(resid > 0);
if (type == OBJ_REQUEST_BIO) {
bio_list = data_desc;
rbd_assert(img_offset == bio_list->bi_sector << SECTOR_SHIFT);
} else {
rbd_assert(type == OBJ_REQUEST_PAGES);
pages = data_desc;
}
while (resid) {
struct ceph_osd_request *osd_req;
const char *object_name;
u64 offset;
u64 length;
object_name = rbd_segment_name(rbd_dev, img_offset);
if (!object_name)
goto out_unwind;
offset = rbd_segment_offset(rbd_dev, img_offset);
length = rbd_segment_length(rbd_dev, img_offset, resid);
obj_request = rbd_obj_request_create(object_name,
offset, length, type);
/* object request has its own copy of the object name */
rbd_segment_name_free(object_name);
if (!obj_request)
goto out_unwind;
/*
* set obj_request->img_request before creating the
* osd_request so that it gets the right snapc
*/
rbd_img_obj_request_add(img_request, obj_request);
if (type == OBJ_REQUEST_BIO) {
unsigned int clone_size;
rbd_assert(length <= (u64)UINT_MAX);
clone_size = (unsigned int)length;
obj_request->bio_list =
bio_chain_clone_range(&bio_list,
&bio_offset,
clone_size,
GFP_ATOMIC);
if (!obj_request->bio_list)
goto out_partial;
} else {
unsigned int page_count;
obj_request->pages = pages;
page_count = (u32)calc_pages_for(offset, length);
obj_request->page_count = page_count;
if ((offset + length) & ~PAGE_MASK)
page_count--; /* more on last page */
pages += page_count;
}
osd_req = rbd_osd_req_create(rbd_dev, write_request,
obj_request);
if (!osd_req)
goto out_partial;
obj_request->osd_req = osd_req;
obj_request->callback = rbd_img_obj_callback;
rbd_img_request_get(img_request);
osd_req_op_extent_init(osd_req, 0, opcode, offset, length,
0, 0);
if (type == OBJ_REQUEST_BIO)
osd_req_op_extent_osd_data_bio(osd_req, 0,
obj_request->bio_list, length);
else
osd_req_op_extent_osd_data_pages(osd_req, 0,
obj_request->pages, length,
offset & ~PAGE_MASK, false, false);
if (write_request)
rbd_osd_req_format_write(obj_request);
else
rbd_osd_req_format_read(obj_request);
obj_request->img_offset = img_offset;
img_offset += length;
resid -= length;
}
return 0;
out_partial:
rbd_obj_request_put(obj_request);
out_unwind:
for_each_obj_request_safe(img_request, obj_request, next_obj_request)
rbd_img_obj_request_del(img_request, obj_request);
return -ENOMEM;
}
static void
rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request)
{
struct rbd_img_request *img_request;
struct rbd_device *rbd_dev;
struct page **pages;
u32 page_count;
rbd_assert(obj_request->type == OBJ_REQUEST_BIO);
rbd_assert(obj_request_img_data_test(obj_request));
img_request = obj_request->img_request;
rbd_assert(img_request);
rbd_dev = img_request->rbd_dev;
rbd_assert(rbd_dev);
pages = obj_request->copyup_pages;
rbd_assert(pages != NULL);
obj_request->copyup_pages = NULL;
page_count = obj_request->copyup_page_count;
rbd_assert(page_count);
obj_request->copyup_page_count = 0;
ceph_release_page_vector(pages, page_count);
/*
* We want the transfer count to reflect the size of the
* original write request. There is no such thing as a
* successful short write, so if the request was successful
* we can just set it to the originally-requested length.
*/
if (!obj_request->result)
obj_request->xferred = obj_request->length;
/* Finish up with the normal image object callback */
rbd_img_obj_callback(obj_request);
}
static void
rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request)
{
struct rbd_obj_request *orig_request;
struct ceph_osd_request *osd_req;
struct ceph_osd_client *osdc;
struct rbd_device *rbd_dev;
struct page **pages;
u32 page_count;
int img_result;
u64 parent_length;
u64 offset;
u64 length;
rbd_assert(img_request_child_test(img_request));
/* First get what we need from the image request */
pages = img_request->copyup_pages;
rbd_assert(pages != NULL);
img_request->copyup_pages = NULL;
page_count = img_request->copyup_page_count;
rbd_assert(page_count);
img_request->copyup_page_count = 0;
orig_request = img_request->obj_request;
rbd_assert(orig_request != NULL);
rbd_assert(obj_request_type_valid(orig_request->type));
img_result = img_request->result;
parent_length = img_request->length;
rbd_assert(parent_length == img_request->xferred);
rbd_img_request_put(img_request);
rbd_assert(orig_request->img_request);
rbd_dev = orig_request->img_request->rbd_dev;
rbd_assert(rbd_dev);
/*
* If the overlap has become 0 (most likely because the
* image has been flattened) we need to free the pages
* and re-submit the original write request.
*/
if (!rbd_dev->parent_overlap) {
struct ceph_osd_client *osdc;
ceph_release_page_vector(pages, page_count);
osdc = &rbd_dev->rbd_client->client->osdc;
img_result = rbd_obj_request_submit(osdc, orig_request);
if (!img_result)
return;
}
if (img_result)
goto out_err;
/*
* The original osd request is of no use to use any more.
* We need a new one that can hold the two ops in a copyup
* request. Allocate the new copyup osd request for the
* original request, and release the old one.
*/
img_result = -ENOMEM;
osd_req = rbd_osd_req_create_copyup(orig_request);
if (!osd_req)
goto out_err;
rbd_osd_req_destroy(orig_request->osd_req);
orig_request->osd_req = osd_req;
orig_request->copyup_pages = pages;
orig_request->copyup_page_count = page_count;
/* Initialize the copyup op */
osd_req_op_cls_init(osd_req, 0, CEPH_OSD_OP_CALL, "rbd", "copyup");
osd_req_op_cls_request_data_pages(osd_req, 0, pages, parent_length, 0,
false, false);
/* Then the original write request op */
offset = orig_request->offset;
length = orig_request->length;
osd_req_op_extent_init(osd_req, 1, CEPH_OSD_OP_WRITE,
offset, length, 0, 0);
if (orig_request->type == OBJ_REQUEST_BIO)
osd_req_op_extent_osd_data_bio(osd_req, 1,
orig_request->bio_list, length);
else
osd_req_op_extent_osd_data_pages(osd_req, 1,
orig_request->pages, length,
offset & ~PAGE_MASK, false, false);
rbd_osd_req_format_write(orig_request);
/* All set, send it off. */
orig_request->callback = rbd_img_obj_copyup_callback;
osdc = &rbd_dev->rbd_client->client->osdc;
img_result = rbd_obj_request_submit(osdc, orig_request);
if (!img_result)
return;
out_err:
/* Record the error code and complete the request */
orig_request->result = img_result;
orig_request->xferred = 0;
obj_request_done_set(orig_request);
rbd_obj_request_complete(orig_request);
}
/*
* Read from the parent image the range of data that covers the
* entire target of the given object request. This is used for
* satisfying a layered image write request when the target of an
* object request from the image request does not exist.
*
* A page array big enough to hold the returned data is allocated
* and supplied to rbd_img_request_fill() as the "data descriptor."
* When the read completes, this page array will be transferred to
* the original object request for the copyup operation.
*
* If an error occurs, record it as the result of the original
* object request and mark it done so it gets completed.
*/
static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request)
{
struct rbd_img_request *img_request = NULL;
struct rbd_img_request *parent_request = NULL;
struct rbd_device *rbd_dev;
u64 img_offset;
u64 length;
struct page **pages = NULL;
u32 page_count;
int result;
rbd_assert(obj_request_img_data_test(obj_request));
rbd_assert(obj_request_type_valid(obj_request->type));
img_request = obj_request->img_request;
rbd_assert(img_request != NULL);
rbd_dev = img_request->rbd_dev;
rbd_assert(rbd_dev->parent != NULL);
/*
* Determine the byte range covered by the object in the
* child image to which the original request was to be sent.
*/
img_offset = obj_request->img_offset - obj_request->offset;
length = (u64)1 << rbd_dev->header.obj_order;
/*
* There is no defined parent data beyond the parent
* overlap, so limit what we read at that boundary if
* necessary.
*/
if (img_offset + length > rbd_dev->parent_overlap) {
rbd_assert(img_offset < rbd_dev->parent_overlap);
length = rbd_dev->parent_overlap - img_offset;
}
/*
* Allocate a page array big enough to receive the data read
* from the parent.
*/
page_count = (u32)calc_pages_for(0, length);
pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
if (IS_ERR(pages)) {
result = PTR_ERR(pages);
pages = NULL;
goto out_err;
}
result = -ENOMEM;
parent_request = rbd_parent_request_create(obj_request,
img_offset, length);
if (!parent_request)
goto out_err;
result = rbd_img_request_fill(parent_request, OBJ_REQUEST_PAGES, pages);
if (result)
goto out_err;
parent_request->copyup_pages = pages;
parent_request->copyup_page_count = page_count;
parent_request->callback = rbd_img_obj_parent_read_full_callback;
result = rbd_img_request_submit(parent_request);
if (!result)
return 0;
parent_request->copyup_pages = NULL;
parent_request->copyup_page_count = 0;
parent_request->obj_request = NULL;
rbd_obj_request_put(obj_request);
out_err:
if (pages)
ceph_release_page_vector(pages, page_count);
if (parent_request)
rbd_img_request_put(parent_request);
obj_request->result = result;
obj_request->xferred = 0;
obj_request_done_set(obj_request);
return result;
}
static void rbd_img_obj_exists_callback(struct rbd_obj_request *obj_request)
{
struct rbd_obj_request *orig_request;
struct rbd_device *rbd_dev;
int result;
rbd_assert(!obj_request_img_data_test(obj_request));
/*
* All we need from the object request is the original
* request and the result of the STAT op. Grab those, then
* we're done with the request.
*/
orig_request = obj_request->obj_request;
obj_request->obj_request = NULL;
rbd_assert(orig_request);
rbd_assert(orig_request->img_request);
result = obj_request->result;
obj_request->result = 0;
dout("%s: obj %p for obj %p result %d %llu/%llu\n", __func__,
obj_request, orig_request, result,
obj_request->xferred, obj_request->length);
rbd_obj_request_put(obj_request);
/*
* If the overlap has become 0 (most likely because the
* image has been flattened) we need to free the pages
* and re-submit the original write request.
*/
rbd_dev = orig_request->img_request->rbd_dev;
if (!rbd_dev->parent_overlap) {
struct ceph_osd_client *osdc;
rbd_obj_request_put(orig_request);
osdc = &rbd_dev->rbd_client->client->osdc;
result = rbd_obj_request_submit(osdc, orig_request);
if (!result)
return;
}
/*
* Our only purpose here is to determine whether the object
* exists, and we don't want to treat the non-existence as
* an error. If something else comes back, transfer the
* error to the original request and complete it now.
*/
if (!result) {
obj_request_existence_set(orig_request, true);
} else if (result == -ENOENT) {
obj_request_existence_set(orig_request, false);
} else if (result) {
orig_request->result = result;
goto out;
}
/*
* Resubmit the original request now that we have recorded
* whether the target object exists.
*/
orig_request->result = rbd_img_obj_request_submit(orig_request);
out:
if (orig_request->result)
rbd_obj_request_complete(orig_request);
rbd_obj_request_put(orig_request);
}
static int rbd_img_obj_exists_submit(struct rbd_obj_request *obj_request)
{
struct rbd_obj_request *stat_request;
struct rbd_device *rbd_dev;
struct ceph_osd_client *osdc;
struct page **pages = NULL;
u32 page_count;
size_t size;
int ret;
/*
* The response data for a STAT call consists of:
* le64 length;
* struct {
* le32 tv_sec;
* le32 tv_nsec;
* } mtime;
*/
size = sizeof (__le64) + sizeof (__le32) + sizeof (__le32);
page_count = (u32)calc_pages_for(0, size);
pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
if (IS_ERR(pages))
return PTR_ERR(pages);
ret = -ENOMEM;
stat_request = rbd_obj_request_create(obj_request->object_name, 0, 0,
OBJ_REQUEST_PAGES);
if (!stat_request)
goto out;
rbd_obj_request_get(obj_request);
stat_request->obj_request = obj_request;
stat_request->pages = pages;
stat_request->page_count = page_count;
rbd_assert(obj_request->img_request);
rbd_dev = obj_request->img_request->rbd_dev;
stat_request->osd_req = rbd_osd_req_create(rbd_dev, false,
stat_request);
if (!stat_request->osd_req)
goto out;
stat_request->callback = rbd_img_obj_exists_callback;
osd_req_op_init(stat_request->osd_req, 0, CEPH_OSD_OP_STAT);
osd_req_op_raw_data_in_pages(stat_request->osd_req, 0, pages, size, 0,
false, false);
rbd_osd_req_format_read(stat_request);
osdc = &rbd_dev->rbd_client->client->osdc;
ret = rbd_obj_request_submit(osdc, stat_request);
out:
if (ret)
rbd_obj_request_put(obj_request);
return ret;
}
static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request)
{
struct rbd_img_request *img_request;
struct rbd_device *rbd_dev;
bool known;
rbd_assert(obj_request_img_data_test(obj_request));
img_request = obj_request->img_request;
rbd_assert(img_request);
rbd_dev = img_request->rbd_dev;
/*
* Only writes to layered images need special handling.
* Reads and non-layered writes are simple object requests.
* Layered writes that start beyond the end of the overlap
* with the parent have no parent data, so they too are
* simple object requests. Finally, if the target object is
* known to already exist, its parent data has already been
* copied, so a write to the object can also be handled as a
* simple object request.
*/
if (!img_request_write_test(img_request) ||
!img_request_layered_test(img_request) ||
!obj_request_overlaps_parent(obj_request) ||
((known = obj_request_known_test(obj_request)) &&
obj_request_exists_test(obj_request))) {
struct rbd_device *rbd_dev;
struct ceph_osd_client *osdc;
rbd_dev = obj_request->img_request->rbd_dev;
osdc = &rbd_dev->rbd_client->client->osdc;
return rbd_obj_request_submit(osdc, obj_request);
}
/*
* It's a layered write. The target object might exist but
* we may not know that yet. If we know it doesn't exist,
* start by reading the data for the full target object from
* the parent so we can use it for a copyup to the target.
*/
if (known)
return rbd_img_obj_parent_read_full(obj_request);
/* We don't know whether the target exists. Go find out. */
return rbd_img_obj_exists_submit(obj_request);
}
static int rbd_img_request_submit(struct rbd_img_request *img_request)
{
struct rbd_obj_request *obj_request;
struct rbd_obj_request *next_obj_request;
dout("%s: img %p\n", __func__, img_request);
for_each_obj_request_safe(img_request, obj_request, next_obj_request) {
int ret;
ret = rbd_img_obj_request_submit(obj_request);
if (ret)
return ret;
}
return 0;
}
static void rbd_img_parent_read_callback(struct rbd_img_request *img_request)
{
struct rbd_obj_request *obj_request;
struct rbd_device *rbd_dev;
u64 obj_end;
u64 img_xferred;
int img_result;
rbd_assert(img_request_child_test(img_request));
/* First get what we need from the image request and release it */
obj_request = img_request->obj_request;
img_xferred = img_request->xferred;
img_result = img_request->result;
rbd_img_request_put(img_request);
/*
* If the overlap has become 0 (most likely because the
* image has been flattened) we need to re-submit the
* original request.
*/
rbd_assert(obj_request);
rbd_assert(obj_request->img_request);
rbd_dev = obj_request->img_request->rbd_dev;
if (!rbd_dev->parent_overlap) {
struct ceph_osd_client *osdc;
osdc = &rbd_dev->rbd_client->client->osdc;
img_result = rbd_obj_request_submit(osdc, obj_request);
if (!img_result)
return;
}
obj_request->result = img_result;
if (obj_request->result)
goto out;
/*
* We need to zero anything beyond the parent overlap
* boundary. Since rbd_img_obj_request_read_callback()
* will zero anything beyond the end of a short read, an
* easy way to do this is to pretend the data from the
* parent came up short--ending at the overlap boundary.
*/
rbd_assert(obj_request->img_offset < U64_MAX - obj_request->length);
obj_end = obj_request->img_offset + obj_request->length;
if (obj_end > rbd_dev->parent_overlap) {
u64 xferred = 0;
if (obj_request->img_offset < rbd_dev->parent_overlap)
xferred = rbd_dev->parent_overlap -
obj_request->img_offset;
obj_request->xferred = min(img_xferred, xferred);
} else {
obj_request->xferred = img_xferred;
}
out:
rbd_img_obj_request_read_callback(obj_request);
rbd_obj_request_complete(obj_request);
}
static void rbd_img_parent_read(struct rbd_obj_request *obj_request)
{
struct rbd_img_request *img_request;
int result;
rbd_assert(obj_request_img_data_test(obj_request));
rbd_assert(obj_request->img_request != NULL);
rbd_assert(obj_request->result == (s32) -ENOENT);
rbd_assert(obj_request_type_valid(obj_request->type));
/* rbd_read_finish(obj_request, obj_request->length); */
img_request = rbd_parent_request_create(obj_request,
obj_request->img_offset,
obj_request->length);
result = -ENOMEM;
if (!img_request)
goto out_err;
if (obj_request->type == OBJ_REQUEST_BIO)
result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
obj_request->bio_list);
else
result = rbd_img_request_fill(img_request, OBJ_REQUEST_PAGES,
obj_request->pages);
if (result)
goto out_err;
img_request->callback = rbd_img_parent_read_callback;
result = rbd_img_request_submit(img_request);
if (result)
goto out_err;
return;
out_err:
if (img_request)
rbd_img_request_put(img_request);
obj_request->result = result;
obj_request->xferred = 0;
obj_request_done_set(obj_request);
}
static int rbd_obj_notify_ack_sync(struct rbd_device *rbd_dev, u64 notify_id)
{
struct rbd_obj_request *obj_request;
struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
int ret;
obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
OBJ_REQUEST_NODATA);
if (!obj_request)
return -ENOMEM;
ret = -ENOMEM;
obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, obj_request);
if (!obj_request->osd_req)
goto out;
osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_NOTIFY_ACK,
notify_id, 0, 0);
rbd_osd_req_format_read(obj_request);
ret = rbd_obj_request_submit(osdc, obj_request);
if (ret)
goto out;
ret = rbd_obj_request_wait(obj_request);
out:
rbd_obj_request_put(obj_request);
return ret;
}
static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
{
struct rbd_device *rbd_dev = (struct rbd_device *)data;
int ret;
if (!rbd_dev)
return;
dout("%s: \"%s\" notify_id %llu opcode %u\n", __func__,
rbd_dev->header_name, (unsigned long long)notify_id,
(unsigned int)opcode);
ret = rbd_dev_refresh(rbd_dev);
if (ret)
rbd_warn(rbd_dev, ": header refresh error (%d)\n", ret);
rbd_obj_notify_ack_sync(rbd_dev, notify_id);
}
/*
* Request sync osd watch/unwatch. The value of "start" determines
* whether a watch request is being initiated or torn down.
*/
static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev, bool start)
{
struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
struct rbd_obj_request *obj_request;
int ret;
rbd_assert(start ^ !!rbd_dev->watch_event);
rbd_assert(start ^ !!rbd_dev->watch_request);
if (start) {
ret = ceph_osdc_create_event(osdc, rbd_watch_cb, rbd_dev,
&rbd_dev->watch_event);
if (ret < 0)
return ret;
rbd_assert(rbd_dev->watch_event != NULL);
}
ret = -ENOMEM;
obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
OBJ_REQUEST_NODATA);
if (!obj_request)
goto out_cancel;
obj_request->osd_req = rbd_osd_req_create(rbd_dev, true, obj_request);
if (!obj_request->osd_req)
goto out_cancel;
if (start)
ceph_osdc_set_request_linger(osdc, obj_request->osd_req);
else
ceph_osdc_unregister_linger_request(osdc,
rbd_dev->watch_request->osd_req);
osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_WATCH,
rbd_dev->watch_event->cookie, 0, start ? 1 : 0);
rbd_osd_req_format_write(obj_request);
ret = rbd_obj_request_submit(osdc, obj_request);
if (ret)
goto out_cancel;
ret = rbd_obj_request_wait(obj_request);
if (ret)
goto out_cancel;
ret = obj_request->result;
if (ret)
goto out_cancel;
/*
* A watch request is set to linger, so the underlying osd
* request won't go away until we unregister it. We retain
* a pointer to the object request during that time (in
* rbd_dev->watch_request), so we'll keep a reference to
* it. We'll drop that reference (below) after we've
* unregistered it.
*/
if (start) {
rbd_dev->watch_request = obj_request;
return 0;
}
/* We have successfully torn down the watch request */
rbd_obj_request_put(rbd_dev->watch_request);
rbd_dev->watch_request = NULL;
out_cancel:
/* Cancel the event if we're tearing down, or on error */
ceph_osdc_cancel_event(rbd_dev->watch_event);
rbd_dev->watch_event = NULL;
if (obj_request)
rbd_obj_request_put(obj_request);
return ret;
}
/*
* Synchronous osd object method call. Returns the number of bytes
* returned in the outbound buffer, or a negative error code.
*/
static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
const char *object_name,
const char *class_name,
const char *method_name,
const void *outbound,
size_t outbound_size,
void *inbound,
size_t inbound_size)
{
struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
struct rbd_obj_request *obj_request;
struct page **pages;
u32 page_count;
int ret;
/*
* Method calls are ultimately read operations. The result
* should placed into the inbound buffer provided. They
* also supply outbound data--parameters for the object
* method. Currently if this is present it will be a
* snapshot id.
*/
page_count = (u32)calc_pages_for(0, inbound_size);
pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
if (IS_ERR(pages))
return PTR_ERR(pages);
ret = -ENOMEM;
obj_request = rbd_obj_request_create(object_name, 0, inbound_size,
OBJ_REQUEST_PAGES);
if (!obj_request)
goto out;
obj_request->pages = pages;
obj_request->page_count = page_count;
obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, obj_request);
if (!obj_request->osd_req)
goto out;
osd_req_op_cls_init(obj_request->osd_req, 0, CEPH_OSD_OP_CALL,
class_name, method_name);
if (outbound_size) {
struct ceph_pagelist *pagelist;
pagelist = kmalloc(sizeof (*pagelist), GFP_NOFS);
if (!pagelist)
goto out;
ceph_pagelist_init(pagelist);
ceph_pagelist_append(pagelist, outbound, outbound_size);
osd_req_op_cls_request_data_pagelist(obj_request->osd_req, 0,
pagelist);
}
osd_req_op_cls_response_data_pages(obj_request->osd_req, 0,
obj_request->pages, inbound_size,
0, false, false);
rbd_osd_req_format_read(obj_request);
ret = rbd_obj_request_submit(osdc, obj_request);
if (ret)
goto out;
ret = rbd_obj_request_wait(obj_request);
if (ret)
goto out;
ret = obj_request->result;
if (ret < 0)
goto out;
rbd_assert(obj_request->xferred < (u64)INT_MAX);
ret = (int)obj_request->xferred;
ceph_copy_from_page_vector(pages, inbound, 0, obj_request->xferred);
out:
if (obj_request)
rbd_obj_request_put(obj_request);
else
ceph_release_page_vector(pages, page_count);
return ret;
}
static void rbd_request_fn(struct request_queue *q)
__releases(q->queue_lock) __acquires(q->queue_lock)
{
struct rbd_device *rbd_dev = q->queuedata;
bool read_only = rbd_dev->mapping.read_only;
struct request *rq;
int result;
while ((rq = blk_fetch_request(q))) {
bool write_request = rq_data_dir(rq) == WRITE;
struct rbd_img_request *img_request;
u64 offset;
u64 length;
/* Ignore any non-FS requests that filter through. */
if (rq->cmd_type != REQ_TYPE_FS) {
dout("%s: non-fs request type %d\n", __func__,
(int) rq->cmd_type);
__blk_end_request_all(rq, 0);
continue;
}
/* Ignore/skip any zero-length requests */
offset = (u64) blk_rq_pos(rq) << SECTOR_SHIFT;
length = (u64) blk_rq_bytes(rq);
if (!length) {
dout("%s: zero-length request\n", __func__);
__blk_end_request_all(rq, 0);
continue;
}
spin_unlock_irq(q->queue_lock);
/* Disallow writes to a read-only device */
if (write_request) {
result = -EROFS;
if (read_only)
goto end_request;
rbd_assert(rbd_dev->spec->snap_id == CEPH_NOSNAP);
}
/*
* Quit early if the mapped snapshot no longer
* exists. It's still possible the snapshot will
* have disappeared by the time our request arrives
* at the osd, but there's no sense in sending it if
* we already know.
*/
if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags)) {
dout("request for non-existent snapshot");
rbd_assert(rbd_dev->spec->snap_id != CEPH_NOSNAP);
result = -ENXIO;
goto end_request;
}
result = -EINVAL;
if (offset && length > U64_MAX - offset + 1) {
rbd_warn(rbd_dev, "bad request range (%llu~%llu)\n",
offset, length);
goto end_request; /* Shouldn't happen */
}
result = -EIO;
if (offset + length > rbd_dev->mapping.size) {
rbd_warn(rbd_dev, "beyond EOD (%llu~%llu > %llu)\n",
offset, length, rbd_dev->mapping.size);
goto end_request;
}
result = -ENOMEM;
img_request = rbd_img_request_create(rbd_dev, offset, length,
write_request);
if (!img_request)
goto end_request;
img_request->rq = rq;
result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
rq->bio);
if (!result)
result = rbd_img_request_submit(img_request);
if (result)
rbd_img_request_put(img_request);
end_request:
spin_lock_irq(q->queue_lock);
if (result < 0) {
rbd_warn(rbd_dev, "%s %llx at %llx result %d\n",
write_request ? "write" : "read",
length, offset, result);
__blk_end_request_all(rq, result);
}
}
}
/*
* a queue callback. Makes sure that we don't create a bio that spans across
* multiple osd objects. One exception would be with a single page bios,
* which we handle later at bio_chain_clone_range()
*/
static int rbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bmd,
struct bio_vec *bvec)
{
struct rbd_device *rbd_dev = q->queuedata;
sector_t sector_offset;
sector_t sectors_per_obj;
sector_t obj_sector_offset;
int ret;
/*
* Find how far into its rbd object the partition-relative
* bio start sector is to offset relative to the enclosing
* device.
*/
sector_offset = get_start_sect(bmd->bi_bdev) + bmd->bi_sector;
sectors_per_obj = 1 << (rbd_dev->header.obj_order - SECTOR_SHIFT);
obj_sector_offset = sector_offset & (sectors_per_obj - 1);
/*
* Compute the number of bytes from that offset to the end
* of the object. Account for what's already used by the bio.
*/
ret = (int) (sectors_per_obj - obj_sector_offset) << SECTOR_SHIFT;
if (ret > bmd->bi_size)
ret -= bmd->bi_size;
else
ret = 0;
/*
* Don't send back more than was asked for. And if the bio
* was empty, let the whole thing through because: "Note
* that a block device *must* allow a single page to be
* added to an empty bio."
*/
rbd_assert(bvec->bv_len <= PAGE_SIZE);
if (ret > (int) bvec->bv_len || !bmd->bi_size)
ret = (int) bvec->bv_len;
return ret;
}
static void rbd_free_disk(struct rbd_device *rbd_dev)
{
struct gendisk *disk = rbd_dev->disk;
if (!disk)
return;
rbd_dev->disk = NULL;
if (disk->flags & GENHD_FL_UP) {
del_gendisk(disk);
if (disk->queue)
blk_cleanup_queue(disk->queue);
}
put_disk(disk);
}
static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
const char *object_name,
u64 offset, u64 length, void *buf)
{
struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
struct rbd_obj_request *obj_request;
struct page **pages = NULL;
u32 page_count;
size_t size;
int ret;
page_count = (u32) calc_pages_for(offset, length);
pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
if (IS_ERR(pages))
return PTR_ERR(pages);
ret = -ENOMEM;
obj_request = rbd_obj_request_create(object_name, offset, length,
OBJ_REQUEST_PAGES);
if (!obj_request)
goto out;
obj_request->pages = pages;
obj_request->page_count = page_count;
obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, obj_request);
if (!obj_request->osd_req)
goto out;
osd_req_op_extent_init(obj_request->osd_req, 0, CEPH_OSD_OP_READ,
offset, length, 0, 0);
osd_req_op_extent_osd_data_pages(obj_request->osd_req, 0,
obj_request->pages,
obj_request->length,
obj_request->offset & ~PAGE_MASK,
false, false);
rbd_osd_req_format_read(obj_request);
ret = rbd_obj_request_submit(osdc, obj_request);
if (ret)
goto out;
ret = rbd_obj_request_wait(obj_request);
if (ret)
goto out;
ret = obj_request->result;
if (ret < 0)
goto out;
rbd_assert(obj_request->xferred <= (u64) SIZE_MAX);
size = (size_t) obj_request->xferred;
ceph_copy_from_page_vector(pages, buf, 0, size);
rbd_assert(size <= (size_t)INT_MAX);
ret = (int)size;
out:
if (obj_request)
rbd_obj_request_put(obj_request);
else
ceph_release_page_vector(pages, page_count);
return ret;
}
/*
* Read the complete header for the given rbd device. On successful
* return, the rbd_dev->header field will contain up-to-date
* information about the image.
*/
static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev)
{
struct rbd_image_header_ondisk *ondisk = NULL;
u32 snap_count = 0;
u64 names_size = 0;
u32 want_count;
int ret;
/*
* The complete header will include an array of its 64-bit
* snapshot ids, followed by the names of those snapshots as
* a contiguous block of NUL-terminated strings. Note that
* the number of snapshots could change by the time we read
* it in, in which case we re-read it.
*/
do {
size_t size;
kfree(ondisk);
size = sizeof (*ondisk);
size += snap_count * sizeof (struct rbd_image_snap_ondisk);
size += names_size;
ondisk = kmalloc(size, GFP_KERNEL);
if (!ondisk)
return -ENOMEM;
ret = rbd_obj_read_sync(rbd_dev, rbd_dev->header_name,
0, size, ondisk);
if (ret < 0)
goto out;
if ((size_t)ret < size) {
ret = -ENXIO;
rbd_warn(rbd_dev, "short header read (want %zd got %d)",
size, ret);
goto out;
}
if (!rbd_dev_ondisk_valid(ondisk)) {
ret = -ENXIO;
rbd_warn(rbd_dev, "invalid header");
goto out;
}
names_size = le64_to_cpu(ondisk->snap_names_len);
want_count = snap_count;
snap_count = le32_to_cpu(ondisk->snap_count);
} while (snap_count != want_count);
ret = rbd_header_from_disk(rbd_dev, ondisk);
out:
kfree(ondisk);
return ret;
}
/*
* Clear the rbd device's EXISTS flag if the snapshot it's mapped to
* has disappeared from the (just updated) snapshot context.
*/
static void rbd_exists_validate(struct rbd_device *rbd_dev)
{
u64 snap_id;
if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags))
return;
snap_id = rbd_dev->spec->snap_id;
if (snap_id == CEPH_NOSNAP)
return;
if (rbd_dev_snap_index(rbd_dev, snap_id) == BAD_SNAP_INDEX)
clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
}
static void rbd_dev_update_size(struct rbd_device *rbd_dev)
{
sector_t size;
bool removing;
/*
* Don't hold the lock while doing disk operations,
* or lock ordering will conflict with the bdev mutex via:
* rbd_add() -> blkdev_get() -> rbd_open()
*/
spin_lock_irq(&rbd_dev->lock);
removing = test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags);
spin_unlock_irq(&rbd_dev->lock);
/*
* If the device is being removed, rbd_dev->disk has
* been destroyed, so don't try to update its size
*/
if (!removing) {
size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
dout("setting size to %llu sectors", (unsigned long long)size);
set_capacity(rbd_dev->disk, size);
revalidate_disk(rbd_dev->disk);
}
}
static int rbd_dev_refresh(struct rbd_device *rbd_dev)
{
u64 mapping_size;
int ret;
rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
mapping_size = rbd_dev->mapping.size;
mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
if (rbd_dev->image_format == 1)
ret = rbd_dev_v1_header_info(rbd_dev);
else
ret = rbd_dev_v2_header_info(rbd_dev);
/* If it's a mapped snapshot, validate its EXISTS flag */
rbd_exists_validate(rbd_dev);
mutex_unlock(&ctl_mutex);
if (mapping_size != rbd_dev->mapping.size) {
rbd_dev_update_size(rbd_dev);
}
return ret;
}
static int rbd_init_disk(struct rbd_device *rbd_dev)
{
struct gendisk *disk;
struct request_queue *q;
u64 segment_size;
/* create gendisk info */
disk = alloc_disk(RBD_MINORS_PER_MAJOR);
if (!disk)
return -ENOMEM;
snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d",
rbd_dev->dev_id);
disk->major = rbd_dev->major;
disk->first_minor = 0;
disk->fops = &rbd_bd_ops;
disk->private_data = rbd_dev;
q = blk_init_queue(rbd_request_fn, &rbd_dev->lock);
if (!q)
goto out_disk;
/* We use the default size, but let's be explicit about it. */
blk_queue_physical_block_size(q, SECTOR_SIZE);
/* set io sizes to object size */
segment_size = rbd_obj_bytes(&rbd_dev->header);
blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE);
blk_queue_max_segment_size(q, segment_size);
blk_queue_io_min(q, segment_size);
blk_queue_io_opt(q, segment_size);
blk_queue_merge_bvec(q, rbd_merge_bvec);
disk->queue = q;
q->queuedata = rbd_dev;
rbd_dev->disk = disk;
return 0;
out_disk:
put_disk(disk);
return -ENOMEM;
}
/*
sysfs
*/
static struct rbd_device *dev_to_rbd_dev(struct device *dev)
{
return container_of(dev, struct rbd_device, dev);
}
static ssize_t rbd_size_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
return sprintf(buf, "%llu\n",
(unsigned long long)rbd_dev->mapping.size);
}
/*
* Note this shows the features for whatever's mapped, which is not
* necessarily the base image.
*/
static ssize_t rbd_features_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
return sprintf(buf, "0x%016llx\n",
(unsigned long long)rbd_dev->mapping.features);
}
static ssize_t rbd_major_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
if (rbd_dev->major)
return sprintf(buf, "%d\n", rbd_dev->major);
return sprintf(buf, "(none)\n");
}
static ssize_t rbd_client_id_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
return sprintf(buf, "client%lld\n",
ceph_client_id(rbd_dev->rbd_client->client));
}
static ssize_t rbd_pool_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
return sprintf(buf, "%s\n", rbd_dev->spec->pool_name);
}
static ssize_t rbd_pool_id_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
return sprintf(buf, "%llu\n",
(unsigned long long) rbd_dev->spec->pool_id);
}
static ssize_t rbd_name_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
if (rbd_dev->spec->image_name)
return sprintf(buf, "%s\n", rbd_dev->spec->image_name);
return sprintf(buf, "(unknown)\n");
}
static ssize_t rbd_image_id_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
return sprintf(buf, "%s\n", rbd_dev->spec->image_id);
}
/*
* Shows the name of the currently-mapped snapshot (or
* RBD_SNAP_HEAD_NAME for the base image).
*/
static ssize_t rbd_snap_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
return sprintf(buf, "%s\n", rbd_dev->spec->snap_name);
}
/*
* For an rbd v2 image, shows the pool id, image id, and snapshot id
* for the parent image. If there is no parent, simply shows
* "(no parent image)".
*/
static ssize_t rbd_parent_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
struct rbd_spec *spec = rbd_dev->parent_spec;
int count;
char *bufp = buf;
if (!spec)
return sprintf(buf, "(no parent image)\n");
count = sprintf(bufp, "pool_id %llu\npool_name %s\n",
(unsigned long long) spec->pool_id, spec->pool_name);
if (count < 0)
return count;
bufp += count;
count = sprintf(bufp, "image_id %s\nimage_name %s\n", spec->image_id,
spec->image_name ? spec->image_name : "(unknown)");
if (count < 0)
return count;
bufp += count;
count = sprintf(bufp, "snap_id %llu\nsnap_name %s\n",
(unsigned long long) spec->snap_id, spec->snap_name);
if (count < 0)
return count;
bufp += count;
count = sprintf(bufp, "overlap %llu\n", rbd_dev->parent_overlap);
if (count < 0)
return count;
bufp += count;
return (ssize_t) (bufp - buf);
}
static ssize_t rbd_image_refresh(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t size)
{
struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
int ret;
ret = rbd_dev_refresh(rbd_dev);
if (ret)
rbd_warn(rbd_dev, ": manual header refresh error (%d)\n", ret);
return ret < 0 ? ret : size;
}
static DEVICE_ATTR(size, S_IRUGO, rbd_size_show, NULL);
static DEVICE_ATTR(features, S_IRUGO, rbd_features_show, NULL);
static DEVICE_ATTR(major, S_IRUGO, rbd_major_show, NULL);
static DEVICE_ATTR(client_id, S_IRUGO, rbd_client_id_show, NULL);
static DEVICE_ATTR(pool, S_IRUGO, rbd_pool_show, NULL);
static DEVICE_ATTR(pool_id, S_IRUGO, rbd_pool_id_show, NULL);
static DEVICE_ATTR(name, S_IRUGO, rbd_name_show, NULL);
static DEVICE_ATTR(image_id, S_IRUGO, rbd_image_id_show, NULL);
static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh);
static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL);
static DEVICE_ATTR(parent, S_IRUGO, rbd_parent_show, NULL);
static struct attribute *rbd_attrs[] = {
&dev_attr_size.attr,
&dev_attr_features.attr,
&dev_attr_major.attr,
&dev_attr_client_id.attr,
&dev_attr_pool.attr,
&dev_attr_pool_id.attr,
&dev_attr_name.attr,
&dev_attr_image_id.attr,
&dev_attr_current_snap.attr,
&dev_attr_parent.attr,
&dev_attr_refresh.attr,
NULL
};
static struct attribute_group rbd_attr_group = {
.attrs = rbd_attrs,
};
static const struct attribute_group *rbd_attr_groups[] = {
&rbd_attr_group,
NULL
};
static void rbd_sysfs_dev_release(struct device *dev)
{
}
static struct device_type rbd_device_type = {
.name = "rbd",
.groups = rbd_attr_groups,
.release = rbd_sysfs_dev_release,
};
static struct rbd_spec *rbd_spec_get(struct rbd_spec *spec)
{
kref_get(&spec->kref);
return spec;
}
static void rbd_spec_free(struct kref *kref);
static void rbd_spec_put(struct rbd_spec *spec)
{
if (spec)
kref_put(&spec->kref, rbd_spec_free);
}
static struct rbd_spec *rbd_spec_alloc(void)
{
struct rbd_spec *spec;
spec = kzalloc(sizeof (*spec), GFP_KERNEL);
if (!spec)
return NULL;
kref_init(&spec->kref);
return spec;
}
static void rbd_spec_free(struct kref *kref)
{
struct rbd_spec *spec = container_of(kref, struct rbd_spec, kref);
kfree(spec->pool_name);
kfree(spec->image_id);
kfree(spec->image_name);
kfree(spec->snap_name);
kfree(spec);
}
static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
struct rbd_spec *spec)
{
struct rbd_device *rbd_dev;
rbd_dev = kzalloc(sizeof (*rbd_dev), GFP_KERNEL);
if (!rbd_dev)
return NULL;
spin_lock_init(&rbd_dev->lock);
rbd_dev->flags = 0;
atomic_set(&rbd_dev->parent_ref, 0);
INIT_LIST_HEAD(&rbd_dev->node);
init_rwsem(&rbd_dev->header_rwsem);
rbd_dev->spec = spec;
rbd_dev->rbd_client = rbdc;
/* Initialize the layout used for all rbd requests */
rbd_dev->layout.fl_stripe_unit = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
rbd_dev->layout.fl_stripe_count = cpu_to_le32(1);
rbd_dev->layout.fl_object_size = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
rbd_dev->layout.fl_pg_pool = cpu_to_le32((u32) spec->pool_id);
return rbd_dev;
}
static void rbd_dev_destroy(struct rbd_device *rbd_dev)
{
rbd_put_client(rbd_dev->rbd_client);
rbd_spec_put(rbd_dev->spec);
kfree(rbd_dev);
}
/*
* Get the size and object order for an image snapshot, or if
* snap_id is CEPH_NOSNAP, gets this information for the base
* image.
*/
static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
u8 *order, u64 *snap_size)
{
__le64 snapid = cpu_to_le64(snap_id);
int ret;
struct {
u8 order;
__le64 size;
} __attribute__ ((packed)) size_buf = { 0 };
ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
"rbd", "get_size",
&snapid, sizeof (snapid),
&size_buf, sizeof (size_buf));
dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
if (ret < 0)
return ret;
if (ret < sizeof (size_buf))
return -ERANGE;
if (order) {
*order = size_buf.order;
dout(" order %u", (unsigned int)*order);
}
*snap_size = le64_to_cpu(size_buf.size);
dout(" snap_id 0x%016llx snap_size = %llu\n",
(unsigned long long)snap_id,
(unsigned long long)*snap_size);
return 0;
}
static int rbd_dev_v2_image_size(struct rbd_device *rbd_dev)
{
return _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
&rbd_dev->header.obj_order,
&rbd_dev->header.image_size);
}
static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
{
void *reply_buf;
int ret;
void *p;
reply_buf = kzalloc(RBD_OBJ_PREFIX_LEN_MAX, GFP_KERNEL);
if (!reply_buf)
return -ENOMEM;
ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
"rbd", "get_object_prefix", NULL, 0,
reply_buf, RBD_OBJ_PREFIX_LEN_MAX);
dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
if (ret < 0)
goto out;
p = reply_buf;
rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p,
p + ret, NULL, GFP_NOIO);
ret = 0;
if (IS_ERR(rbd_dev->header.object_prefix)) {
ret = PTR_ERR(rbd_dev->header.object_prefix);
rbd_dev->header.object_prefix = NULL;
} else {
dout(" object_prefix = %s\n", rbd_dev->header.object_prefix);
}
out:
kfree(reply_buf);
return ret;
}
static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
u64 *snap_features)
{
__le64 snapid = cpu_to_le64(snap_id);
struct {
__le64 features;
__le64 incompat;
} __attribute__ ((packed)) features_buf = { 0 };
u64 incompat;
int ret;
ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
"rbd", "get_features",
&snapid, sizeof (snapid),
&features_buf, sizeof (features_buf));
dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
if (ret < 0)
return ret;
if (ret < sizeof (features_buf))
return -ERANGE;
incompat = le64_to_cpu(features_buf.incompat);
if (incompat & ~RBD_FEATURES_SUPPORTED)
return -ENXIO;
*snap_features = le64_to_cpu(features_buf.features);
dout(" snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n",
(unsigned long long)snap_id,
(unsigned long long)*snap_features,
(unsigned long long)le64_to_cpu(features_buf.incompat));
return 0;
}
static int rbd_dev_v2_features(struct rbd_device *rbd_dev)
{
return _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
&rbd_dev->header.features);
}
static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
{
struct rbd_spec *parent_spec;
size_t size;
void *reply_buf = NULL;
__le64 snapid;
void *p;
void *end;
u64 pool_id;
char *image_id;
u64 overlap;
int ret;
parent_spec = rbd_spec_alloc();
if (!parent_spec)
return -ENOMEM;
size = sizeof (__le64) + /* pool_id */
sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX + /* image_id */
sizeof (__le64) + /* snap_id */
sizeof (__le64); /* overlap */
reply_buf = kmalloc(size, GFP_KERNEL);
if (!reply_buf) {
ret = -ENOMEM;
goto out_err;
}
snapid = cpu_to_le64(CEPH_NOSNAP);
ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
"rbd", "get_parent",
&snapid, sizeof (snapid),
reply_buf, size);
dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
if (ret < 0)
goto out_err;
p = reply_buf;
end = reply_buf + ret;
ret = -ERANGE;
ceph_decode_64_safe(&p, end, pool_id, out_err);
if (pool_id == CEPH_NOPOOL) {
/*
* Either the parent never existed, or we have
* record of it but the image got flattened so it no
* longer has a parent. When the parent of a
* layered image disappears we immediately set the
* overlap to 0. The effect of this is that all new
* requests will be treated as if the image had no
* parent.
*/
if (rbd_dev->parent_overlap) {
rbd_dev->parent_overlap = 0;
smp_mb();
rbd_dev_parent_put(rbd_dev);
pr_info("%s: clone image has been flattened\n",
rbd_dev->disk->disk_name);
}
goto out; /* No parent? No problem. */
}
/* The ceph file layout needs to fit pool id in 32 bits */
ret = -EIO;
if (pool_id > (u64)U32_MAX) {
rbd_warn(NULL, "parent pool id too large (%llu > %u)\n",
(unsigned long long)pool_id, U32_MAX);
goto out_err;
}
parent_spec->pool_id = pool_id;
image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
if (IS_ERR(image_id)) {
ret = PTR_ERR(image_id);
goto out_err;
}
parent_spec->image_id = image_id;
ceph_decode_64_safe(&p, end, parent_spec->snap_id, out_err);
ceph_decode_64_safe(&p, end, overlap, out_err);
if (overlap) {
rbd_spec_put(rbd_dev->parent_spec);
rbd_dev->parent_spec = parent_spec;
parent_spec = NULL; /* rbd_dev now owns this */
rbd_dev->parent_overlap = overlap;
} else {
rbd_warn(rbd_dev, "ignoring parent of clone with overlap 0\n");
}
out:
ret = 0;
out_err:
kfree(reply_buf);
rbd_spec_put(parent_spec);
return ret;
}
static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev)
{
struct {
__le64 stripe_unit;
__le64 stripe_count;
} __attribute__ ((packed)) striping_info_buf = { 0 };
size_t size = sizeof (striping_info_buf);
void *p;
u64 obj_size;
u64 stripe_unit;
u64 stripe_count;
int ret;
ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
"rbd", "get_stripe_unit_count", NULL, 0,
(char *)&striping_info_buf, size);
dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
if (ret < 0)
return ret;
if (ret < size)
return -ERANGE;
/*
* We don't actually support the "fancy striping" feature
* (STRIPINGV2) yet, but if the striping sizes are the
* defaults the behavior is the same as before. So find
* out, and only fail if the image has non-default values.
*/
ret = -EINVAL;
obj_size = (u64)1 << rbd_dev->header.obj_order;
p = &striping_info_buf;
stripe_unit = ceph_decode_64(&p);
if (stripe_unit != obj_size) {
rbd_warn(rbd_dev, "unsupported stripe unit "
"(got %llu want %llu)",
stripe_unit, obj_size);
return -EINVAL;
}
stripe_count = ceph_decode_64(&p);
if (stripe_count != 1) {
rbd_warn(rbd_dev, "unsupported stripe count "
"(got %llu want 1)", stripe_count);
return -EINVAL;
}
rbd_dev->header.stripe_unit = stripe_unit;
rbd_dev->header.stripe_count = stripe_count;
return 0;
}
static char *rbd_dev_image_name(struct rbd_device *rbd_dev)
{
size_t image_id_size;
char *image_id;
void *p;
void *end;
size_t size;
void *reply_buf = NULL;
size_t len = 0;
char *image_name = NULL;
int ret;
rbd_assert(!rbd_dev->spec->image_name);
len = strlen(rbd_dev->spec->image_id);
image_id_size = sizeof (__le32) + len;
image_id = kmalloc(image_id_size, GFP_KERNEL);
if (!image_id)
return NULL;
p = image_id;
end = image_id + image_id_size;
ceph_encode_string(&p, end, rbd_dev->spec->image_id, (u32)len);
size = sizeof (__le32) + RBD_IMAGE_NAME_LEN_MAX;
reply_buf = kmalloc(size, GFP_KERNEL);
if (!reply_buf)
goto out;
ret = rbd_obj_method_sync(rbd_dev, RBD_DIRECTORY,
"rbd", "dir_get_name",
image_id, image_id_size,
reply_buf, size);
if (ret < 0)
goto out;
p = reply_buf;
end = reply_buf + ret;
image_name = ceph_extract_encoded_string(&p, end, &len, GFP_KERNEL);
if (IS_ERR(image_name))
image_name = NULL;
else
dout("%s: name is %s len is %zd\n", __func__, image_name, len);
out:
kfree(reply_buf);
kfree(image_id);
return image_name;
}
static u64 rbd_v1_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
{
struct ceph_snap_context *snapc = rbd_dev->header.snapc;
const char *snap_name;
u32 which = 0;
/* Skip over names until we find the one we are looking for */
snap_name = rbd_dev->header.snap_names;
while (which < snapc->num_snaps) {
if (!strcmp(name, snap_name))
return snapc->snaps[which];
snap_name += strlen(snap_name) + 1;
which++;
}
return CEPH_NOSNAP;
}
static u64 rbd_v2_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
{
struct ceph_snap_context *snapc = rbd_dev->header.snapc;
u32 which;
bool found = false;
u64 snap_id;
for (which = 0; !found && which < snapc->num_snaps; which++) {
const char *snap_name;
snap_id = snapc->snaps[which];
snap_name = rbd_dev_v2_snap_name(rbd_dev, snap_id);
if (IS_ERR(snap_name)) {
/* ignore no-longer existing snapshots */
if (PTR_ERR(snap_name) == -ENOENT)
continue;
else
break;
}
found = !strcmp(name, snap_name);
kfree(snap_name);
}
return found ? snap_id : CEPH_NOSNAP;
}
/*
* Assumes name is never RBD_SNAP_HEAD_NAME; returns CEPH_NOSNAP if
* no snapshot by that name is found, or if an error occurs.
*/
static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
{
if (rbd_dev->image_format == 1)
return rbd_v1_snap_id_by_name(rbd_dev, name);
return rbd_v2_snap_id_by_name(rbd_dev, name);
}
/*
* When an rbd image has a parent image, it is identified by the
* pool, image, and snapshot ids (not names). This function fills
* in the names for those ids. (It's OK if we can't figure out the
* name for an image id, but the pool and snapshot ids should always
* exist and have names.) All names in an rbd spec are dynamically
* allocated.
*
* When an image being mapped (not a parent) is probed, we have the
* pool name and pool id, image name and image id, and the snapshot
* name. The only thing we're missing is the snapshot id.
*/
static int rbd_dev_spec_update(struct rbd_device *rbd_dev)
{
struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
struct rbd_spec *spec = rbd_dev->spec;
const char *pool_name;
const char *image_name;
const char *snap_name;
int ret;
/*
* An image being mapped will have the pool name (etc.), but
* we need to look up the snapshot id.
*/
if (spec->pool_name) {
if (strcmp(spec->snap_name, RBD_SNAP_HEAD_NAME)) {
u64 snap_id;
snap_id = rbd_snap_id_by_name(rbd_dev, spec->snap_name);
if (snap_id == CEPH_NOSNAP)
return -ENOENT;
spec->snap_id = snap_id;
} else {
spec->snap_id = CEPH_NOSNAP;
}
return 0;
}
/* Get the pool name; we have to make our own copy of this */
pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, spec->pool_id);
if (!pool_name) {
rbd_warn(rbd_dev, "no pool with id %llu", spec->pool_id);
return -EIO;
}
pool_name = kstrdup(pool_name, GFP_KERNEL);
if (!pool_name)
return -ENOMEM;
/* Fetch the image name; tolerate failure here */
image_name = rbd_dev_image_name(rbd_dev);
if (!image_name)
rbd_warn(rbd_dev, "unable to get image name");
/* Look up the snapshot name, and make a copy */
snap_name = rbd_snap_name(rbd_dev, spec->snap_id);
if (IS_ERR(snap_name)) {
ret = PTR_ERR(snap_name);
goto out_err;
}
spec->pool_name = pool_name;
spec->image_name = image_name;
spec->snap_name = snap_name;
return 0;
out_err:
kfree(image_name);
kfree(pool_name);
return ret;
}
static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev)
{
size_t size;
int ret;
void *reply_buf;
void *p;
void *end;
u64 seq;
u32 snap_count;
struct ceph_snap_context *snapc;
u32 i;
/*
* We'll need room for the seq value (maximum snapshot id),
* snapshot count, and array of that many snapshot ids.
* For now we have a fixed upper limit on the number we're
* prepared to receive.
*/
size = sizeof (__le64) + sizeof (__le32) +
RBD_MAX_SNAP_COUNT * sizeof (__le64);
reply_buf = kzalloc(size, GFP_KERNEL);
if (!reply_buf)
return -ENOMEM;
ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
"rbd", "get_snapcontext", NULL, 0,
reply_buf, size);
dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
if (ret < 0)
goto out;
p = reply_buf;
end = reply_buf + ret;
ret = -ERANGE;
ceph_decode_64_safe(&p, end, seq, out);
ceph_decode_32_safe(&p, end, snap_count, out);
/*
* Make sure the reported number of snapshot ids wouldn't go
* beyond the end of our buffer. But before checking that,
* make sure the computed size of the snapshot context we
* allocate is representable in a size_t.
*/
if (snap_count > (SIZE_MAX - sizeof (struct ceph_snap_context))
/ sizeof (u64)) {
ret = -EINVAL;
goto out;
}
if (!ceph_has_room(&p, end, snap_count * sizeof (__le64)))
goto out;
ret = 0;
snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
if (!snapc) {
ret = -ENOMEM;
goto out;
}
snapc->seq = seq;
for (i = 0; i < snap_count; i++)
snapc->snaps[i] = ceph_decode_64(&p);
ceph_put_snap_context(rbd_dev->header.snapc);
rbd_dev->header.snapc = snapc;
dout(" snap context seq = %llu, snap_count = %u\n",
(unsigned long long)seq, (unsigned int)snap_count);
out:
kfree(reply_buf);
return ret;
}
static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
u64 snap_id)
{
size_t size;
void *reply_buf;
__le64 snapid;
int ret;
void *p;
void *end;
char *snap_name;
size = sizeof (__le32) + RBD_MAX_SNAP_NAME_LEN;
reply_buf = kmalloc(size, GFP_KERNEL);
if (!reply_buf)
return ERR_PTR(-ENOMEM);
snapid = cpu_to_le64(snap_id);
ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
"rbd", "get_snapshot_name",
&snapid, sizeof (snapid),
reply_buf, size);
dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
if (ret < 0) {
snap_name = ERR_PTR(ret);
goto out;
}
p = reply_buf;
end = reply_buf + ret;
snap_name = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
if (IS_ERR(snap_name))
goto out;
dout(" snap_id 0x%016llx snap_name = %s\n",
(unsigned long long)snap_id, snap_name);
out:
kfree(reply_buf);
return snap_name;
}
static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev)
{
bool first_time = rbd_dev->header.object_prefix == NULL;
int ret;
down_write(&rbd_dev->header_rwsem);
ret = rbd_dev_v2_image_size(rbd_dev);
if (ret)
goto out;
if (first_time) {
ret = rbd_dev_v2_header_onetime(rbd_dev);
if (ret)
goto out;
}
/*
* If the image supports layering, get the parent info. We
* need to probe the first time regardless. Thereafter we
* only need to if there's a parent, to see if it has
* disappeared due to the mapped image getting flattened.
*/
if (rbd_dev->header.features & RBD_FEATURE_LAYERING &&
(first_time || rbd_dev->parent_spec)) {
bool warn;
ret = rbd_dev_v2_parent_info(rbd_dev);
if (ret)
goto out;
/*
* Print a warning if this is the initial probe and
* the image has a parent. Don't print it if the
* image now being probed is itself a parent. We
* can tell at this point because we won't know its
* pool name yet (just its pool id).
*/
warn = rbd_dev->parent_spec && rbd_dev->spec->pool_name;
if (first_time && warn)
rbd_warn(rbd_dev, "WARNING: kernel layering "
"is EXPERIMENTAL!");
}
if (rbd_dev->spec->snap_id == CEPH_NOSNAP)
if (rbd_dev->mapping.size != rbd_dev->header.image_size)
rbd_dev->mapping.size = rbd_dev->header.image_size;
ret = rbd_dev_v2_snap_context(rbd_dev);
dout("rbd_dev_v2_snap_context returned %d\n", ret);
out:
up_write(&rbd_dev->header_rwsem);
return ret;
}
static int rbd_bus_add_dev(struct rbd_device *rbd_dev)
{
struct device *dev;
int ret;
mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
dev = &rbd_dev->dev;
dev->bus = &rbd_bus_type;
dev->type = &rbd_device_type;
dev->parent = &rbd_root_dev;
dev->release = rbd_dev_device_release;
dev_set_name(dev, "%d", rbd_dev->dev_id);
ret = device_register(dev);
mutex_unlock(&ctl_mutex);
return ret;
}
static void rbd_bus_del_dev(struct rbd_device *rbd_dev)
{
device_unregister(&rbd_dev->dev);
}
static atomic64_t rbd_dev_id_max = ATOMIC64_INIT(0);
/*
* Get a unique rbd identifier for the given new rbd_dev, and add
* the rbd_dev to the global list. The minimum rbd id is 1.
*/
static void rbd_dev_id_get(struct rbd_device *rbd_dev)
{
rbd_dev->dev_id = atomic64_inc_return(&rbd_dev_id_max);
spin_lock(&rbd_dev_list_lock);
list_add_tail(&rbd_dev->node, &rbd_dev_list);
spin_unlock(&rbd_dev_list_lock);
dout("rbd_dev %p given dev id %llu\n", rbd_dev,
(unsigned long long) rbd_dev->dev_id);
}
/*
* Remove an rbd_dev from the global list, and record that its
* identifier is no longer in use.
*/
static void rbd_dev_id_put(struct rbd_device *rbd_dev)
{
struct list_head *tmp;
int rbd_id = rbd_dev->dev_id;
int max_id;
rbd_assert(rbd_id > 0);
dout("rbd_dev %p released dev id %llu\n", rbd_dev,
(unsigned long long) rbd_dev->dev_id);
spin_lock(&rbd_dev_list_lock);
list_del_init(&rbd_dev->node);
/*
* If the id being "put" is not the current maximum, there
* is nothing special we need to do.
*/
if (rbd_id != atomic64_read(&rbd_dev_id_max)) {
spin_unlock(&rbd_dev_list_lock);
return;
}
/*
* We need to update the current maximum id. Search the
* list to find out what it is. We're more likely to find
* the maximum at the end, so search the list backward.
*/
max_id = 0;
list_for_each_prev(tmp, &rbd_dev_list) {
struct rbd_device *rbd_dev;
rbd_dev = list_entry(tmp, struct rbd_device, node);
if (rbd_dev->dev_id > max_id)
max_id = rbd_dev->dev_id;
}
spin_unlock(&rbd_dev_list_lock);
/*
* The max id could have been updated by rbd_dev_id_get(), in
* which case it now accurately reflects the new maximum.
* Be careful not to overwrite the maximum value in that
* case.
*/
atomic64_cmpxchg(&rbd_dev_id_max, rbd_id, max_id);
dout(" max dev id has been reset\n");
}
/*
* Skips over white space at *buf, and updates *buf to point to the
* first found non-space character (if any). Returns the length of
* the token (string of non-white space characters) found. Note
* that *buf must be terminated with '\0'.
*/
static inline size_t next_token(const char **buf)
{
/*
* These are the characters that produce nonzero for
* isspace() in the "C" and "POSIX" locales.
*/
const char *spaces = " \f\n\r\t\v";
*buf += strspn(*buf, spaces); /* Find start of token */
return strcspn(*buf, spaces); /* Return token length */
}
/*
* Finds the next token in *buf, and if the provided token buffer is
* big enough, copies the found token into it. The result, if
* copied, is guaranteed to be terminated with '\0'. Note that *buf
* must be terminated with '\0' on entry.
*
* Returns the length of the token found (not including the '\0').
* Return value will be 0 if no token is found, and it will be >=
* token_size if the token would not fit.
*
* The *buf pointer will be updated to point beyond the end of the
* found token. Note that this occurs even if the token buffer is
* too small to hold it.
*/
static inline size_t copy_token(const char **buf,
char *token,
size_t token_size)
{
size_t len;
len = next_token(buf);
if (len < token_size) {
memcpy(token, *buf, len);
*(token + len) = '\0';
}
*buf += len;
return len;
}
/*
* Finds the next token in *buf, dynamically allocates a buffer big
* enough to hold a copy of it, and copies the token into the new
* buffer. The copy is guaranteed to be terminated with '\0'. Note
* that a duplicate buffer is created even for a zero-length token.
*
* Returns a pointer to the newly-allocated duplicate, or a null
* pointer if memory for the duplicate was not available. If
* the lenp argument is a non-null pointer, the length of the token
* (not including the '\0') is returned in *lenp.
*
* If successful, the *buf pointer will be updated to point beyond
* the end of the found token.
*
* Note: uses GFP_KERNEL for allocation.
*/
static inline char *dup_token(const char **buf, size_t *lenp)
{
char *dup;
size_t len;
len = next_token(buf);
dup = kmemdup(*buf, len + 1, GFP_KERNEL);
if (!dup)
return NULL;
*(dup + len) = '\0';
*buf += len;
if (lenp)
*lenp = len;
return dup;
}
/*
* Parse the options provided for an "rbd add" (i.e., rbd image
* mapping) request. These arrive via a write to /sys/bus/rbd/add,
* and the data written is passed here via a NUL-terminated buffer.
* Returns 0 if successful or an error code otherwise.
*
* The information extracted from these options is recorded in
* the other parameters which return dynamically-allocated
* structures:
* ceph_opts
* The address of a pointer that will refer to a ceph options
* structure. Caller must release the returned pointer using
* ceph_destroy_options() when it is no longer needed.
* rbd_opts
* Address of an rbd options pointer. Fully initialized by
* this function; caller must release with kfree().
* spec
* Address of an rbd image specification pointer. Fully
* initialized by this function based on parsed options.
* Caller must release with rbd_spec_put().
*
* The options passed take this form:
* <mon_addrs> <options> <pool_name> <image_name> [<snap_id>]
* where:
* <mon_addrs>
* A comma-separated list of one or more monitor addresses.
* A monitor address is an ip address, optionally followed
* by a port number (separated by a colon).
* I.e.: ip1[:port1][,ip2[:port2]...]
* <options>
* A comma-separated list of ceph and/or rbd options.
* <pool_name>
* The name of the rados pool containing the rbd image.
* <image_name>
* The name of the image in that pool to map.
* <snap_id>
* An optional snapshot id. If provided, the mapping will
* present data from the image at the time that snapshot was
* created. The image head is used if no snapshot id is
* provided. Snapshot mappings are always read-only.
*/
static int rbd_add_parse_args(const char *buf,
struct ceph_options **ceph_opts,
struct rbd_options **opts,
struct rbd_spec **rbd_spec)
{
size_t len;
char *options;
const char *mon_addrs;
char *snap_name;
size_t mon_addrs_size;
struct rbd_spec *spec = NULL;
struct rbd_options *rbd_opts = NULL;
struct ceph_options *copts;
int ret;
/* The first four tokens are required */
len = next_token(&buf);
if (!len) {
rbd_warn(NULL, "no monitor address(es) provided");
return -EINVAL;
}
mon_addrs = buf;
mon_addrs_size = len + 1;
buf += len;
ret = -EINVAL;
options = dup_token(&buf, NULL);
if (!options)
return -ENOMEM;
if (!*options) {
rbd_warn(NULL, "no options provided");
goto out_err;
}
spec = rbd_spec_alloc();
if (!spec)
goto out_mem;
spec->pool_name = dup_token(&buf, NULL);
if (!spec->pool_name)
goto out_mem;
if (!*spec->pool_name) {
rbd_warn(NULL, "no pool name provided");
goto out_err;
}
spec->image_name = dup_token(&buf, NULL);
if (!spec->image_name)
goto out_mem;
if (!*spec->image_name) {
rbd_warn(NULL, "no image name provided");
goto out_err;
}
/*
* Snapshot name is optional; default is to use "-"
* (indicating the head/no snapshot).
*/
len = next_token(&buf);
if (!len) {
buf = RBD_SNAP_HEAD_NAME; /* No snapshot supplied */
len = sizeof (RBD_SNAP_HEAD_NAME) - 1;
} else if (len > RBD_MAX_SNAP_NAME_LEN) {
ret = -ENAMETOOLONG;
goto out_err;
}
snap_name = kmemdup(buf, len + 1, GFP_KERNEL);
if (!snap_name)
goto out_mem;
*(snap_name + len) = '\0';
spec->snap_name = snap_name;
/* Initialize all rbd options to the defaults */
rbd_opts = kzalloc(sizeof (*rbd_opts), GFP_KERNEL);
if (!rbd_opts)
goto out_mem;
rbd_opts->read_only = RBD_READ_ONLY_DEFAULT;
copts = ceph_parse_options(options, mon_addrs,
mon_addrs + mon_addrs_size - 1,
parse_rbd_opts_token, rbd_opts);
if (IS_ERR(copts)) {
ret = PTR_ERR(copts);
goto out_err;
}
kfree(options);
*ceph_opts = copts;
*opts = rbd_opts;
*rbd_spec = spec;
return 0;
out_mem:
ret = -ENOMEM;
out_err:
kfree(rbd_opts);
rbd_spec_put(spec);
kfree(options);
return ret;
}
/*
* An rbd format 2 image has a unique identifier, distinct from the
* name given to it by the user. Internally, that identifier is
* what's used to specify the names of objects related to the image.
*
* A special "rbd id" object is used to map an rbd image name to its
* id. If that object doesn't exist, then there is no v2 rbd image
* with the supplied name.
*
* This function will record the given rbd_dev's image_id field if
* it can be determined, and in that case will return 0. If any
* errors occur a negative errno will be returned and the rbd_dev's
* image_id field will be unchanged (and should be NULL).
*/
static int rbd_dev_image_id(struct rbd_device *rbd_dev)
{
int ret;
size_t size;
char *object_name;
void *response;
char *image_id;
/*
* When probing a parent image, the image id is already
* known (and the image name likely is not). There's no
* need to fetch the image id again in this case. We
* do still need to set the image format though.
*/
if (rbd_dev->spec->image_id) {
rbd_dev->image_format = *rbd_dev->spec->image_id ? 2 : 1;
return 0;
}
/*
* First, see if the format 2 image id file exists, and if
* so, get the image's persistent id from it.
*/
size = sizeof (RBD_ID_PREFIX) + strlen(rbd_dev->spec->image_name);
object_name = kmalloc(size, GFP_NOIO);
if (!object_name)
return -ENOMEM;
sprintf(object_name, "%s%s", RBD_ID_PREFIX, rbd_dev->spec->image_name);
dout("rbd id object name is %s\n", object_name);
/* Response will be an encoded string, which includes a length */
size = sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX;
response = kzalloc(size, GFP_NOIO);
if (!response) {
ret = -ENOMEM;
goto out;
}
/* If it doesn't exist we'll assume it's a format 1 image */
ret = rbd_obj_method_sync(rbd_dev, object_name,
"rbd", "get_id", NULL, 0,
response, RBD_IMAGE_ID_LEN_MAX);
dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
if (ret == -ENOENT) {
image_id = kstrdup("", GFP_KERNEL);
ret = image_id ? 0 : -ENOMEM;
if (!ret)
rbd_dev->image_format = 1;
} else if (ret > sizeof (__le32)) {
void *p = response;
image_id = ceph_extract_encoded_string(&p, p + ret,
NULL, GFP_NOIO);
ret = IS_ERR(image_id) ? PTR_ERR(image_id) : 0;
if (!ret)
rbd_dev->image_format = 2;
} else {
ret = -EINVAL;
}
if (!ret) {
rbd_dev->spec->image_id = image_id;
dout("image_id is %s\n", image_id);
}
out:
kfree(response);
kfree(object_name);
return ret;
}
/*
* Undo whatever state changes are made by v1 or v2 header info
* call.
*/
static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
{
struct rbd_image_header *header;
/* Drop parent reference unless it's already been done (or none) */
if (rbd_dev->parent_overlap)
rbd_dev_parent_put(rbd_dev);
/* Free dynamic fields from the header, then zero it out */
header = &rbd_dev->header;
ceph_put_snap_context(header->snapc);
kfree(header->snap_sizes);
kfree(header->snap_names);
kfree(header->object_prefix);
memset(header, 0, sizeof (*header));
}
static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev)
{
int ret;
ret = rbd_dev_v2_object_prefix(rbd_dev);
if (ret)
goto out_err;
/*
* Get the and check features for the image. Currently the
* features are assumed to never change.
*/
ret = rbd_dev_v2_features(rbd_dev);
if (ret)
goto out_err;
/* If the image supports fancy striping, get its parameters */
if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) {
ret = rbd_dev_v2_striping_info(rbd_dev);
if (ret < 0)
goto out_err;
}
/* No support for crypto and compression type format 2 images */
return 0;
out_err:
rbd_dev->header.features = 0;
kfree(rbd_dev->header.object_prefix);
rbd_dev->header.object_prefix = NULL;
return ret;
}
static int rbd_dev_probe_parent(struct rbd_device *rbd_dev)
{
struct rbd_device *parent = NULL;
struct rbd_spec *parent_spec;
struct rbd_client *rbdc;
int ret;
if (!rbd_dev->parent_spec)
return 0;
/*
* We need to pass a reference to the client and the parent
* spec when creating the parent rbd_dev. Images related by
* parent/child relationships always share both.
*/
parent_spec = rbd_spec_get(rbd_dev->parent_spec);
rbdc = __rbd_get_client(rbd_dev->rbd_client);
ret = -ENOMEM;
parent = rbd_dev_create(rbdc, parent_spec);
if (!parent)
goto out_err;
ret = rbd_dev_image_probe(parent, false);
if (ret < 0)
goto out_err;
rbd_dev->parent = parent;
atomic_set(&rbd_dev->parent_ref, 1);
return 0;
out_err:
if (parent) {
rbd_dev_unparent(rbd_dev);
kfree(rbd_dev->header_name);
rbd_dev_destroy(parent);
} else {
rbd_put_client(rbdc);
rbd_spec_put(parent_spec);
}
return ret;
}
static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
{
int ret;
/* generate unique id: find highest unique id, add one */
rbd_dev_id_get(rbd_dev);
/* Fill in the device name, now that we have its id. */
BUILD_BUG_ON(DEV_NAME_LEN
< sizeof (RBD_DRV_NAME) + MAX_INT_FORMAT_WIDTH);
sprintf(rbd_dev->name, "%s%d", RBD_DRV_NAME, rbd_dev->dev_id);
/* Get our block major device number. */
ret = register_blkdev(0, rbd_dev->name);
if (ret < 0)
goto err_out_id;
rbd_dev->major = ret;
/* Set up the blkdev mapping. */
ret = rbd_init_disk(rbd_dev);
if (ret)
goto err_out_blkdev;
ret = rbd_dev_mapping_set(rbd_dev);
if (ret)
goto err_out_disk;
set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
ret = rbd_bus_add_dev(rbd_dev);
if (ret)
goto err_out_mapping;
/* Everything's ready. Announce the disk to the world. */
set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
add_disk(rbd_dev->disk);
pr_info("%s: added with size 0x%llx\n", rbd_dev->disk->disk_name,
(unsigned long long) rbd_dev->mapping.size);
return ret;
err_out_mapping:
rbd_dev_mapping_clear(rbd_dev);
err_out_disk:
rbd_free_disk(rbd_dev);
err_out_blkdev:
unregister_blkdev(rbd_dev->major, rbd_dev->name);
err_out_id:
rbd_dev_id_put(rbd_dev);
rbd_dev_mapping_clear(rbd_dev);
return ret;
}
static int rbd_dev_header_name(struct rbd_device *rbd_dev)
{
struct rbd_spec *spec = rbd_dev->spec;
size_t size;
/* Record the header object name for this rbd image. */
rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
if (rbd_dev->image_format == 1)
size = strlen(spec->image_name) + sizeof (RBD_SUFFIX);
else
size = sizeof (RBD_HEADER_PREFIX) + strlen(spec->image_id);
rbd_dev->header_name = kmalloc(size, GFP_KERNEL);
if (!rbd_dev->header_name)
return -ENOMEM;
if (rbd_dev->image_format == 1)
sprintf(rbd_dev->header_name, "%s%s",
spec->image_name, RBD_SUFFIX);
else
sprintf(rbd_dev->header_name, "%s%s",
RBD_HEADER_PREFIX, spec->image_id);
return 0;
}
static void rbd_dev_image_release(struct rbd_device *rbd_dev)
{
rbd_dev_unprobe(rbd_dev);
kfree(rbd_dev->header_name);
rbd_dev->header_name = NULL;
rbd_dev->image_format = 0;
kfree(rbd_dev->spec->image_id);
rbd_dev->spec->image_id = NULL;
rbd_dev_destroy(rbd_dev);
}
/*
* Probe for the existence of the header object for the given rbd
* device. If this image is the one being mapped (i.e., not a
* parent), initiate a watch on its header object before using that
* object to get detailed information about the rbd image.
*/
static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping)
{
int ret;
int tmp;
/*
* Get the id from the image id object. Unless there's an
* error, rbd_dev->spec->image_id will be filled in with
* a dynamically-allocated string, and rbd_dev->image_format
* will be set to either 1 or 2.
*/
ret = rbd_dev_image_id(rbd_dev);
if (ret)
return ret;
rbd_assert(rbd_dev->spec->image_id);
rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
ret = rbd_dev_header_name(rbd_dev);
if (ret)
goto err_out_format;
if (mapping) {
ret = rbd_dev_header_watch_sync(rbd_dev, true);
if (ret)
goto out_header_name;
}
if (rbd_dev->image_format == 1)
ret = rbd_dev_v1_header_info(rbd_dev);
else
ret = rbd_dev_v2_header_info(rbd_dev);
if (ret)
goto err_out_watch;
ret = rbd_dev_spec_update(rbd_dev);
if (ret)
goto err_out_probe;
ret = rbd_dev_probe_parent(rbd_dev);
if (ret)
goto err_out_probe;
dout("discovered format %u image, header name is %s\n",
rbd_dev->image_format, rbd_dev->header_name);
return 0;
err_out_probe:
rbd_dev_unprobe(rbd_dev);
err_out_watch:
if (mapping) {
tmp = rbd_dev_header_watch_sync(rbd_dev, false);
if (tmp)
rbd_warn(rbd_dev, "unable to tear down "
"watch request (%d)\n", tmp);
}
out_header_name:
kfree(rbd_dev->header_name);
rbd_dev->header_name = NULL;
err_out_format:
rbd_dev->image_format = 0;
kfree(rbd_dev->spec->image_id);
rbd_dev->spec->image_id = NULL;
dout("probe failed, returning %d\n", ret);
return ret;
}
static ssize_t rbd_add(struct bus_type *bus,
const char *buf,
size_t count)
{
struct rbd_device *rbd_dev = NULL;
struct ceph_options *ceph_opts = NULL;
struct rbd_options *rbd_opts = NULL;
struct rbd_spec *spec = NULL;
struct rbd_client *rbdc;
struct ceph_osd_client *osdc;
bool read_only;
int rc = -ENOMEM;
if (!try_module_get(THIS_MODULE))
return -ENODEV;
/* parse add command */
rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec);
if (rc < 0)
goto err_out_module;
read_only = rbd_opts->read_only;
kfree(rbd_opts);
rbd_opts = NULL; /* done with this */
rbdc = rbd_get_client(ceph_opts);
if (IS_ERR(rbdc)) {
rc = PTR_ERR(rbdc);
goto err_out_args;
}
/* pick the pool */
osdc = &rbdc->client->osdc;
rc = ceph_pg_poolid_by_name(osdc->osdmap, spec->pool_name);
if (rc < 0)
goto err_out_client;
spec->pool_id = (u64)rc;
/* The ceph file layout needs to fit pool id in 32 bits */
if (spec->pool_id > (u64)U32_MAX) {
rbd_warn(NULL, "pool id too large (%llu > %u)\n",
(unsigned long long)spec->pool_id, U32_MAX);
rc = -EIO;
goto err_out_client;
}
rbd_dev = rbd_dev_create(rbdc, spec);
if (!rbd_dev)
goto err_out_client;
rbdc = NULL; /* rbd_dev now owns this */
spec = NULL; /* rbd_dev now owns this */
rc = rbd_dev_image_probe(rbd_dev, true);
if (rc < 0)
goto err_out_rbd_dev;
/* If we are mapping a snapshot it must be marked read-only */
if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
read_only = true;
rbd_dev->mapping.read_only = read_only;
rc = rbd_dev_device_setup(rbd_dev);
if (rc) {
rbd_dev_image_release(rbd_dev);
goto err_out_module;
}
return count;
err_out_rbd_dev:
rbd_dev_destroy(rbd_dev);
err_out_client:
rbd_put_client(rbdc);
err_out_args:
rbd_spec_put(spec);
err_out_module:
module_put(THIS_MODULE);
dout("Error adding device %s\n", buf);
return (ssize_t)rc;
}
static void rbd_dev_device_release(struct device *dev)
{
struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
rbd_free_disk(rbd_dev);
clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
rbd_dev_mapping_clear(rbd_dev);
unregister_blkdev(rbd_dev->major, rbd_dev->name);
rbd_dev->major = 0;
rbd_dev_id_put(rbd_dev);
rbd_dev_mapping_clear(rbd_dev);
}
static void rbd_dev_remove_parent(struct rbd_device *rbd_dev)
{
while (rbd_dev->parent) {
struct rbd_device *first = rbd_dev;
struct rbd_device *second = first->parent;
struct rbd_device *third;
/*
* Follow to the parent with no grandparent and
* remove it.
*/
while (second && (third = second->parent)) {
first = second;
second = third;
}
rbd_assert(second);
rbd_dev_image_release(second);
first->parent = NULL;
first->parent_overlap = 0;
rbd_assert(first->parent_spec);
rbd_spec_put(first->parent_spec);
first->parent_spec = NULL;
}
}
static ssize_t rbd_remove(struct bus_type *bus,
const char *buf,
size_t count)
{
struct rbd_device *rbd_dev = NULL;
struct list_head *tmp;
int dev_id;
unsigned long ul;
bool already = false;
int ret;
ret = strict_strtoul(buf, 10, &ul);
if (ret)
return ret;
/* convert to int; abort if we lost anything in the conversion */
dev_id = (int)ul;
if (dev_id != ul)
return -EINVAL;
mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
ret = -ENOENT;
spin_lock(&rbd_dev_list_lock);
list_for_each(tmp, &rbd_dev_list) {
rbd_dev = list_entry(tmp, struct rbd_device, node);
if (rbd_dev->dev_id == dev_id) {
ret = 0;
break;
}
}
if (!ret) {
spin_lock_irq(&rbd_dev->lock);
if (rbd_dev->open_count)
ret = -EBUSY;
else
already = test_and_set_bit(RBD_DEV_FLAG_REMOVING,
&rbd_dev->flags);
spin_unlock_irq(&rbd_dev->lock);
}
spin_unlock(&rbd_dev_list_lock);
if (ret < 0 || already)
goto done;
ret = rbd_dev_header_watch_sync(rbd_dev, false);
if (ret)
rbd_warn(rbd_dev, "failed to cancel watch event (%d)\n", ret);
/*
* flush remaining watch callbacks - these must be complete
* before the osd_client is shutdown
*/
dout("%s: flushing notifies", __func__);
ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc);
/*
* Don't free anything from rbd_dev->disk until after all
* notifies are completely processed. Otherwise
* rbd_bus_del_dev() will race with rbd_watch_cb(), resulting
* in a potential use after free of rbd_dev->disk or rbd_dev.
*/
rbd_bus_del_dev(rbd_dev);
rbd_dev_image_release(rbd_dev);
module_put(THIS_MODULE);
ret = count;
done:
mutex_unlock(&ctl_mutex);
return ret;
}
/*
* create control files in sysfs
* /sys/bus/rbd/...
*/
static int rbd_sysfs_init(void)
{
int ret;
ret = device_register(&rbd_root_dev);
if (ret < 0)
return ret;
ret = bus_register(&rbd_bus_type);
if (ret < 0)
device_unregister(&rbd_root_dev);
return ret;
}
static void rbd_sysfs_cleanup(void)
{
bus_unregister(&rbd_bus_type);
device_unregister(&rbd_root_dev);
}
static int rbd_slab_init(void)
{
rbd_assert(!rbd_img_request_cache);
rbd_img_request_cache = kmem_cache_create("rbd_img_request",
sizeof (struct rbd_img_request),
__alignof__(struct rbd_img_request),
0, NULL);
if (!rbd_img_request_cache)
return -ENOMEM;
rbd_assert(!rbd_obj_request_cache);
rbd_obj_request_cache = kmem_cache_create("rbd_obj_request",
sizeof (struct rbd_obj_request),
__alignof__(struct rbd_obj_request),
0, NULL);
if (!rbd_obj_request_cache)
goto out_err;
rbd_assert(!rbd_segment_name_cache);
rbd_segment_name_cache = kmem_cache_create("rbd_segment_name",
MAX_OBJ_NAME_SIZE + 1, 1, 0, NULL);
if (rbd_segment_name_cache)
return 0;
out_err:
if (rbd_obj_request_cache) {
kmem_cache_destroy(rbd_obj_request_cache);
rbd_obj_request_cache = NULL;
}
kmem_cache_destroy(rbd_img_request_cache);
rbd_img_request_cache = NULL;
return -ENOMEM;
}
static void rbd_slab_exit(void)
{
rbd_assert(rbd_segment_name_cache);
kmem_cache_destroy(rbd_segment_name_cache);
rbd_segment_name_cache = NULL;
rbd_assert(rbd_obj_request_cache);
kmem_cache_destroy(rbd_obj_request_cache);
rbd_obj_request_cache = NULL;
rbd_assert(rbd_img_request_cache);
kmem_cache_destroy(rbd_img_request_cache);
rbd_img_request_cache = NULL;
}
static int __init rbd_init(void)
{
int rc;
if (!libceph_compatible(NULL)) {
rbd_warn(NULL, "libceph incompatibility (quitting)");
return -EINVAL;
}
rc = rbd_slab_init();
if (rc)
return rc;
rc = rbd_sysfs_init();
if (rc)
rbd_slab_exit();
else
pr_info("loaded " RBD_DRV_NAME_LONG "\n");
return rc;
}
static void __exit rbd_exit(void)
{
rbd_sysfs_cleanup();
rbd_slab_exit();
}
module_init(rbd_init);
module_exit(rbd_exit);
MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
MODULE_DESCRIPTION("rados block device");
/* following authorship retained from original osdblk.c */
MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>");
MODULE_LICENSE("GPL");
| gpl-2.0 |
kh007im/ngawi-ics | drivers/gpu/drm/radeon/radeon_pm.c | 451 | 2160 | /*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Rafał Miłecki <zajec5@gmail.com>
*/
#include "drmP.h"
#include "radeon.h"
int radeon_debugfs_pm_init(struct radeon_device *rdev);
int radeon_pm_init(struct radeon_device *rdev)
{
if (radeon_debugfs_pm_init(rdev)) {
DRM_ERROR("Failed to register debugfs file for CP !\n");
}
return 0;
}
/*
* Debugfs info
*/
#if defined(CONFIG_DEBUG_FS)
static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct radeon_device *rdev = dev->dev_private;
seq_printf(m, "engine clock: %u0 Hz\n", radeon_get_engine_clock(rdev));
seq_printf(m, "memory clock: %u0 Hz\n", radeon_get_memory_clock(rdev));
return 0;
}
static struct drm_info_list radeon_pm_info_list[] = {
{"radeon_pm_info", radeon_debugfs_pm_info, 0, NULL},
};
#endif
int radeon_debugfs_pm_init(struct radeon_device *rdev)
{
#if defined(CONFIG_DEBUG_FS)
return radeon_debugfs_add_files(rdev, radeon_pm_info_list, ARRAY_SIZE(radeon_pm_info_list));
#else
return 0;
#endif
}
| gpl-2.0 |
Umang88/Radon-Kenzo | drivers/ata/pata_at32.c | 1987 | 10369 | /*
* AVR32 SMC/CFC PATA Driver
*
* Copyright (C) 2007 Atmel Norway
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation.
*/
#define DEBUG
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/slab.h>
#include <scsi/scsi_host.h>
#include <linux/ata.h>
#include <linux/libata.h>
#include <linux/err.h>
#include <linux/io.h>
#include <mach/board.h>
#include <mach/smc.h>
#define DRV_NAME "pata_at32"
#define DRV_VERSION "0.0.3"
/*
* CompactFlash controller memory layout relative to the base address:
*
* Attribute memory: 0000 0000 -> 003f ffff
* Common memory: 0040 0000 -> 007f ffff
* I/O memory: 0080 0000 -> 00bf ffff
* True IDE Mode: 00c0 0000 -> 00df ffff
* Alt IDE Mode: 00e0 0000 -> 00ff ffff
*
* Only True IDE and Alt True IDE mode are needed for this driver.
*
* True IDE mode => CS0 = 0, CS1 = 1 (cmd, error, stat, etc)
* Alt True IDE mode => CS0 = 1, CS1 = 0 (ctl, alt_stat)
*/
#define CF_IDE_OFFSET 0x00c00000
#define CF_ALT_IDE_OFFSET 0x00e00000
#define CF_RES_SIZE 2048
/*
* Define DEBUG_BUS if you are doing debugging of your own EBI -> PATA
* adaptor with a logic analyzer or similar.
*/
#undef DEBUG_BUS
/*
* ATA PIO modes
*
* Name | Mb/s | Min cycle time | Mask
* --------+-------+----------------+--------
* Mode 0 | 3.3 | 600 ns | 0x01
* Mode 1 | 5.2 | 383 ns | 0x03
* Mode 2 | 8.3 | 240 ns | 0x07
* Mode 3 | 11.1 | 180 ns | 0x0f
* Mode 4 | 16.7 | 120 ns | 0x1f
*
* Alter PIO_MASK below according to table to set maximal PIO mode.
*/
enum {
PIO_MASK = ATA_PIO4,
};
/*
* Struct containing private information about device.
*/
struct at32_ide_info {
unsigned int irq;
struct resource res_ide;
struct resource res_alt;
void __iomem *ide_addr;
void __iomem *alt_addr;
unsigned int cs;
struct smc_config smc;
};
/*
* Setup SMC for the given ATA timing.
*/
static int pata_at32_setup_timing(struct device *dev,
struct at32_ide_info *info,
const struct ata_timing *ata)
{
struct smc_config *smc = &info->smc;
struct smc_timing timing;
int active;
int recover;
memset(&timing, 0, sizeof(struct smc_timing));
/* Total cycle time */
timing.read_cycle = ata->cyc8b;
/* DIOR <= CFIOR timings */
timing.nrd_setup = ata->setup;
timing.nrd_pulse = ata->act8b;
timing.nrd_recover = ata->rec8b;
/* Convert nanosecond timing to clock cycles */
smc_set_timing(smc, &timing);
/* Add one extra cycle setup due to signal ring */
smc->nrd_setup = smc->nrd_setup + 1;
active = smc->nrd_setup + smc->nrd_pulse;
recover = smc->read_cycle - active;
/* Need at least two cycles recovery */
if (recover < 2)
smc->read_cycle = active + 2;
/* (CS0, CS1, DIR, OE) <= (CFCE1, CFCE2, CFRNW, NCSX) timings */
smc->ncs_read_setup = 1;
smc->ncs_read_pulse = smc->read_cycle - 2;
/* Write timings same as read timings */
smc->write_cycle = smc->read_cycle;
smc->nwe_setup = smc->nrd_setup;
smc->nwe_pulse = smc->nrd_pulse;
smc->ncs_write_setup = smc->ncs_read_setup;
smc->ncs_write_pulse = smc->ncs_read_pulse;
/* Do some debugging output of ATA and SMC timings */
dev_dbg(dev, "ATA: C=%d S=%d P=%d R=%d\n",
ata->cyc8b, ata->setup, ata->act8b, ata->rec8b);
dev_dbg(dev, "SMC: C=%d S=%d P=%d NS=%d NP=%d\n",
smc->read_cycle, smc->nrd_setup, smc->nrd_pulse,
smc->ncs_read_setup, smc->ncs_read_pulse);
/* Finally, configure the SMC */
return smc_set_configuration(info->cs, smc);
}
/*
* Procedures for libATA.
*/
static void pata_at32_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
struct ata_timing timing;
struct at32_ide_info *info = ap->host->private_data;
int ret;
/* Compute ATA timing */
ret = ata_timing_compute(adev, adev->pio_mode, &timing, 1000, 0);
if (ret) {
dev_warn(ap->dev, "Failed to compute ATA timing %d\n", ret);
return;
}
/* Setup SMC to ATA timing */
ret = pata_at32_setup_timing(ap->dev, info, &timing);
if (ret) {
dev_warn(ap->dev, "Failed to setup ATA timing %d\n", ret);
return;
}
}
static struct scsi_host_template at32_sht = {
ATA_PIO_SHT(DRV_NAME),
};
static struct ata_port_operations at32_port_ops = {
.inherits = &ata_sff_port_ops,
.cable_detect = ata_cable_40wire,
.set_piomode = pata_at32_set_piomode,
};
static int __init pata_at32_init_one(struct device *dev,
struct at32_ide_info *info)
{
struct ata_host *host;
struct ata_port *ap;
host = ata_host_alloc(dev, 1);
if (!host)
return -ENOMEM;
ap = host->ports[0];
/* Setup ATA bindings */
ap->ops = &at32_port_ops;
ap->pio_mask = PIO_MASK;
ap->flags |= ATA_FLAG_SLAVE_POSS;
/*
* Since all 8-bit taskfile transfers has to go on the lower
* byte of the data bus and there is a bug in the SMC that
* makes it impossible to alter the bus width during runtime,
* we need to hardwire the address signals as follows:
*
* A_IDE(2:0) <= A_EBI(3:1)
*
* This makes all addresses on the EBI even, thus all data
* will be on the lower byte of the data bus. All addresses
* used by libATA need to be altered according to this.
*/
ap->ioaddr.altstatus_addr = info->alt_addr + (0x06 << 1);
ap->ioaddr.ctl_addr = info->alt_addr + (0x06 << 1);
ap->ioaddr.data_addr = info->ide_addr + (ATA_REG_DATA << 1);
ap->ioaddr.error_addr = info->ide_addr + (ATA_REG_ERR << 1);
ap->ioaddr.feature_addr = info->ide_addr + (ATA_REG_FEATURE << 1);
ap->ioaddr.nsect_addr = info->ide_addr + (ATA_REG_NSECT << 1);
ap->ioaddr.lbal_addr = info->ide_addr + (ATA_REG_LBAL << 1);
ap->ioaddr.lbam_addr = info->ide_addr + (ATA_REG_LBAM << 1);
ap->ioaddr.lbah_addr = info->ide_addr + (ATA_REG_LBAH << 1);
ap->ioaddr.device_addr = info->ide_addr + (ATA_REG_DEVICE << 1);
ap->ioaddr.status_addr = info->ide_addr + (ATA_REG_STATUS << 1);
ap->ioaddr.command_addr = info->ide_addr + (ATA_REG_CMD << 1);
/* Set info as private data of ATA host */
host->private_data = info;
/* Register ATA device and return */
return ata_host_activate(host, info->irq, ata_sff_interrupt,
IRQF_SHARED | IRQF_TRIGGER_RISING,
&at32_sht);
}
/*
* This function may come in handy for people analyzing their own
* EBI -> PATA adaptors.
*/
#ifdef DEBUG_BUS
static void __init pata_at32_debug_bus(struct device *dev,
struct at32_ide_info *info)
{
const int d1 = 0xff;
const int d2 = 0x00;
int i;
/* Write 8-bit values (registers) */
iowrite8(d1, info->alt_addr + (0x06 << 1));
iowrite8(d2, info->alt_addr + (0x06 << 1));
for (i = 0; i < 8; i++) {
iowrite8(d1, info->ide_addr + (i << 1));
iowrite8(d2, info->ide_addr + (i << 1));
}
/* Write 16 bit values (data) */
iowrite16(d1, info->ide_addr);
iowrite16(d1 << 8, info->ide_addr);
iowrite16(d1, info->ide_addr);
iowrite16(d1 << 8, info->ide_addr);
}
#endif
static int __init pata_at32_probe(struct platform_device *pdev)
{
const struct ata_timing initial_timing =
{XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0};
struct device *dev = &pdev->dev;
struct at32_ide_info *info;
struct ide_platform_data *board = pdev->dev.platform_data;
struct resource *res;
int irq;
int ret;
if (!board)
return -ENXIO;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -ENXIO;
/* Retrive IRQ */
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
/* Setup struct containing private information */
info = kzalloc(sizeof(struct at32_ide_info), GFP_KERNEL);
if (!info)
return -ENOMEM;
info->irq = irq;
info->cs = board->cs;
/* Request memory resources */
info->res_ide.start = res->start + CF_IDE_OFFSET;
info->res_ide.end = info->res_ide.start + CF_RES_SIZE - 1;
info->res_ide.name = "ide";
info->res_ide.flags = IORESOURCE_MEM;
ret = request_resource(res, &info->res_ide);
if (ret)
goto err_req_res_ide;
info->res_alt.start = res->start + CF_ALT_IDE_OFFSET;
info->res_alt.end = info->res_alt.start + CF_RES_SIZE - 1;
info->res_alt.name = "alt";
info->res_alt.flags = IORESOURCE_MEM;
ret = request_resource(res, &info->res_alt);
if (ret)
goto err_req_res_alt;
/* Setup non-timing elements of SMC */
info->smc.bus_width = 2; /* 16 bit data bus */
info->smc.nrd_controlled = 1; /* Sample data on rising edge of NRD */
info->smc.nwe_controlled = 0; /* Drive data on falling edge of NCS */
info->smc.nwait_mode = 3; /* NWAIT is in READY mode */
info->smc.byte_write = 0; /* Byte select access type */
info->smc.tdf_mode = 0; /* TDF optimization disabled */
info->smc.tdf_cycles = 0; /* No TDF wait cycles */
/* Setup SMC to ATA timing */
ret = pata_at32_setup_timing(dev, info, &initial_timing);
if (ret)
goto err_setup_timing;
/* Map ATA address space */
ret = -ENOMEM;
info->ide_addr = devm_ioremap(dev, info->res_ide.start, 16);
info->alt_addr = devm_ioremap(dev, info->res_alt.start, 16);
if (!info->ide_addr || !info->alt_addr)
goto err_ioremap;
#ifdef DEBUG_BUS
pata_at32_debug_bus(dev, info);
#endif
/* Setup and register ATA device */
ret = pata_at32_init_one(dev, info);
if (ret)
goto err_ata_device;
return 0;
err_ata_device:
err_ioremap:
err_setup_timing:
release_resource(&info->res_alt);
err_req_res_alt:
release_resource(&info->res_ide);
err_req_res_ide:
kfree(info);
return ret;
}
static int __exit pata_at32_remove(struct platform_device *pdev)
{
struct ata_host *host = platform_get_drvdata(pdev);
struct at32_ide_info *info;
if (!host)
return 0;
info = host->private_data;
ata_host_detach(host);
if (!info)
return 0;
release_resource(&info->res_ide);
release_resource(&info->res_alt);
kfree(info);
return 0;
}
/* work with hotplug and coldplug */
MODULE_ALIAS("platform:at32_ide");
static struct platform_driver pata_at32_driver = {
.remove = __exit_p(pata_at32_remove),
.driver = {
.name = "at32_ide",
.owner = THIS_MODULE,
},
};
module_platform_driver_probe(pata_at32_driver, pata_at32_probe);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("AVR32 SMC/CFC PATA Driver");
MODULE_AUTHOR("Kristoffer Nyborg Gregertsen <kngregertsen@norway.atmel.com>");
MODULE_VERSION(DRV_VERSION);
| gpl-2.0 |
garwedgess/android_kernel_lge_g4 | arch/arm64/kvm/emulate.c | 1987 | 4243 | /*
* (not much of an) Emulation layer for 32bit guests.
*
* Copyright (C) 2012,2013 - ARM Ltd
* Author: Marc Zyngier <marc.zyngier@arm.com>
*
* based on arch/arm/kvm/emulate.c
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/kvm_host.h>
#include <asm/kvm_emulate.h>
/*
* stolen from arch/arm/kernel/opcodes.c
*
* condition code lookup table
* index into the table is test code: EQ, NE, ... LT, GT, AL, NV
*
* bit position in short is condition code: NZCV
*/
static const unsigned short cc_map[16] = {
0xF0F0, /* EQ == Z set */
0x0F0F, /* NE */
0xCCCC, /* CS == C set */
0x3333, /* CC */
0xFF00, /* MI == N set */
0x00FF, /* PL */
0xAAAA, /* VS == V set */
0x5555, /* VC */
0x0C0C, /* HI == C set && Z clear */
0xF3F3, /* LS == C clear || Z set */
0xAA55, /* GE == (N==V) */
0x55AA, /* LT == (N!=V) */
0x0A05, /* GT == (!Z && (N==V)) */
0xF5FA, /* LE == (Z || (N!=V)) */
0xFFFF, /* AL always */
0 /* NV */
};
static int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
{
u32 esr = kvm_vcpu_get_hsr(vcpu);
if (esr & ESR_EL2_CV)
return (esr & ESR_EL2_COND) >> ESR_EL2_COND_SHIFT;
return -1;
}
/*
* Check if a trapped instruction should have been executed or not.
*/
bool kvm_condition_valid32(const struct kvm_vcpu *vcpu)
{
unsigned long cpsr;
u32 cpsr_cond;
int cond;
/* Top two bits non-zero? Unconditional. */
if (kvm_vcpu_get_hsr(vcpu) >> 30)
return true;
/* Is condition field valid? */
cond = kvm_vcpu_get_condition(vcpu);
if (cond == 0xE)
return true;
cpsr = *vcpu_cpsr(vcpu);
if (cond < 0) {
/* This can happen in Thumb mode: examine IT state. */
unsigned long it;
it = ((cpsr >> 8) & 0xFC) | ((cpsr >> 25) & 0x3);
/* it == 0 => unconditional. */
if (it == 0)
return true;
/* The cond for this insn works out as the top 4 bits. */
cond = (it >> 4);
}
cpsr_cond = cpsr >> 28;
if (!((cc_map[cond] >> cpsr_cond) & 1))
return false;
return true;
}
/**
* adjust_itstate - adjust ITSTATE when emulating instructions in IT-block
* @vcpu: The VCPU pointer
*
* When exceptions occur while instructions are executed in Thumb IF-THEN
* blocks, the ITSTATE field of the CPSR is not advanced (updated), so we have
* to do this little bit of work manually. The fields map like this:
*
* IT[7:0] -> CPSR[26:25],CPSR[15:10]
*/
static void kvm_adjust_itstate(struct kvm_vcpu *vcpu)
{
unsigned long itbits, cond;
unsigned long cpsr = *vcpu_cpsr(vcpu);
bool is_arm = !(cpsr & COMPAT_PSR_T_BIT);
BUG_ON(is_arm && (cpsr & COMPAT_PSR_IT_MASK));
if (!(cpsr & COMPAT_PSR_IT_MASK))
return;
cond = (cpsr & 0xe000) >> 13;
itbits = (cpsr & 0x1c00) >> (10 - 2);
itbits |= (cpsr & (0x3 << 25)) >> 25;
/* Perform ITAdvance (see page A2-52 in ARM DDI 0406C) */
if ((itbits & 0x7) == 0)
itbits = cond = 0;
else
itbits = (itbits << 1) & 0x1f;
cpsr &= ~COMPAT_PSR_IT_MASK;
cpsr |= cond << 13;
cpsr |= (itbits & 0x1c) << (10 - 2);
cpsr |= (itbits & 0x3) << 25;
*vcpu_cpsr(vcpu) = cpsr;
}
/**
* kvm_skip_instr - skip a trapped instruction and proceed to the next
* @vcpu: The vcpu pointer
*/
void kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr)
{
bool is_thumb;
is_thumb = !!(*vcpu_cpsr(vcpu) & COMPAT_PSR_T_BIT);
if (is_thumb && !is_wide_instr)
*vcpu_pc(vcpu) += 2;
else
*vcpu_pc(vcpu) += 4;
kvm_adjust_itstate(vcpu);
}
| gpl-2.0 |
pacificIT/edison-kernel | sound/soc/kirkwood/kirkwood-dma.c | 2243 | 11124 | /*
* kirkwood-dma.c
*
* (c) 2010 Arnaud Patard <apatard@mandriva.com>
* (c) 2010 Arnaud Patard <arnaud.patard@rtp-net.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/dma-mapping.h>
#include <linux/mbus.h>
#include <sound/soc.h>
#include "kirkwood.h"
#define KIRKWOOD_RATES \
(SNDRV_PCM_RATE_8000_192000 | \
SNDRV_PCM_RATE_CONTINUOUS | \
SNDRV_PCM_RATE_KNOT)
#define KIRKWOOD_FORMATS \
(SNDRV_PCM_FMTBIT_S16_LE | \
SNDRV_PCM_FMTBIT_S24_LE | \
SNDRV_PCM_FMTBIT_S32_LE | \
SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_LE | \
SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_BE)
struct kirkwood_dma_priv {
struct snd_pcm_substream *play_stream;
struct snd_pcm_substream *rec_stream;
struct kirkwood_dma_data *data;
};
static struct snd_pcm_hardware kirkwood_dma_snd_hw = {
.info = (SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_MMAP |
SNDRV_PCM_INFO_MMAP_VALID |
SNDRV_PCM_INFO_BLOCK_TRANSFER |
SNDRV_PCM_INFO_PAUSE),
.formats = KIRKWOOD_FORMATS,
.rates = KIRKWOOD_RATES,
.rate_min = 8000,
.rate_max = 384000,
.channels_min = 1,
.channels_max = 8,
.buffer_bytes_max = KIRKWOOD_SND_MAX_PERIOD_BYTES * KIRKWOOD_SND_MAX_PERIODS,
.period_bytes_min = KIRKWOOD_SND_MIN_PERIOD_BYTES,
.period_bytes_max = KIRKWOOD_SND_MAX_PERIOD_BYTES,
.periods_min = KIRKWOOD_SND_MIN_PERIODS,
.periods_max = KIRKWOOD_SND_MAX_PERIODS,
.fifo_size = 0,
};
static u64 kirkwood_dma_dmamask = DMA_BIT_MASK(32);
static irqreturn_t kirkwood_dma_irq(int irq, void *dev_id)
{
struct kirkwood_dma_priv *prdata = dev_id;
struct kirkwood_dma_data *priv = prdata->data;
unsigned long mask, status, cause;
mask = readl(priv->io + KIRKWOOD_INT_MASK);
status = readl(priv->io + KIRKWOOD_INT_CAUSE) & mask;
cause = readl(priv->io + KIRKWOOD_ERR_CAUSE);
if (unlikely(cause)) {
printk(KERN_WARNING "%s: got err interrupt 0x%lx\n",
__func__, cause);
writel(cause, priv->io + KIRKWOOD_ERR_CAUSE);
}
/* we've enabled only bytes interrupts ... */
if (status & ~(KIRKWOOD_INT_CAUSE_PLAY_BYTES | \
KIRKWOOD_INT_CAUSE_REC_BYTES)) {
printk(KERN_WARNING "%s: unexpected interrupt %lx\n",
__func__, status);
return IRQ_NONE;
}
/* ack int */
writel(status, priv->io + KIRKWOOD_INT_CAUSE);
if (status & KIRKWOOD_INT_CAUSE_PLAY_BYTES)
snd_pcm_period_elapsed(prdata->play_stream);
if (status & KIRKWOOD_INT_CAUSE_REC_BYTES)
snd_pcm_period_elapsed(prdata->rec_stream);
return IRQ_HANDLED;
}
static void
kirkwood_dma_conf_mbus_windows(void __iomem *base, int win,
unsigned long dma,
const struct mbus_dram_target_info *dram)
{
int i;
/* First disable and clear windows */
writel(0, base + KIRKWOOD_AUDIO_WIN_CTRL_REG(win));
writel(0, base + KIRKWOOD_AUDIO_WIN_BASE_REG(win));
/* try to find matching cs for current dma address */
for (i = 0; i < dram->num_cs; i++) {
const struct mbus_dram_window *cs = dram->cs + i;
if ((cs->base & 0xffff0000) < (dma & 0xffff0000)) {
writel(cs->base & 0xffff0000,
base + KIRKWOOD_AUDIO_WIN_BASE_REG(win));
writel(((cs->size - 1) & 0xffff0000) |
(cs->mbus_attr << 8) |
(dram->mbus_dram_target_id << 4) | 1,
base + KIRKWOOD_AUDIO_WIN_CTRL_REG(win));
}
}
}
static int kirkwood_dma_open(struct snd_pcm_substream *substream)
{
int err;
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
struct snd_soc_platform *platform = soc_runtime->platform;
struct snd_soc_dai *cpu_dai = soc_runtime->cpu_dai;
struct kirkwood_dma_data *priv;
struct kirkwood_dma_priv *prdata = snd_soc_platform_get_drvdata(platform);
const struct mbus_dram_target_info *dram;
unsigned long addr;
priv = snd_soc_dai_get_dma_data(cpu_dai, substream);
snd_soc_set_runtime_hwparams(substream, &kirkwood_dma_snd_hw);
/* Ensure that all constraints linked to dma burst are fulfilled */
err = snd_pcm_hw_constraint_minmax(runtime,
SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
priv->burst * 2,
KIRKWOOD_AUDIO_BUF_MAX-1);
if (err < 0)
return err;
err = snd_pcm_hw_constraint_step(runtime, 0,
SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
priv->burst);
if (err < 0)
return err;
err = snd_pcm_hw_constraint_step(substream->runtime, 0,
SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
priv->burst);
if (err < 0)
return err;
if (prdata == NULL) {
prdata = kzalloc(sizeof(struct kirkwood_dma_priv), GFP_KERNEL);
if (prdata == NULL)
return -ENOMEM;
prdata->data = priv;
err = request_irq(priv->irq, kirkwood_dma_irq, IRQF_SHARED,
"kirkwood-i2s", prdata);
if (err) {
kfree(prdata);
return -EBUSY;
}
snd_soc_platform_set_drvdata(platform, prdata);
/*
* Enable Error interrupts. We're only ack'ing them but
* it's useful for diagnostics
*/
writel((unsigned long)-1, priv->io + KIRKWOOD_ERR_MASK);
}
dram = mv_mbus_dram_info();
addr = substream->dma_buffer.addr;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
prdata->play_stream = substream;
kirkwood_dma_conf_mbus_windows(priv->io,
KIRKWOOD_PLAYBACK_WIN, addr, dram);
} else {
prdata->rec_stream = substream;
kirkwood_dma_conf_mbus_windows(priv->io,
KIRKWOOD_RECORD_WIN, addr, dram);
}
return 0;
}
static int kirkwood_dma_close(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
struct snd_soc_dai *cpu_dai = soc_runtime->cpu_dai;
struct snd_soc_platform *platform = soc_runtime->platform;
struct kirkwood_dma_priv *prdata = snd_soc_platform_get_drvdata(platform);
struct kirkwood_dma_data *priv;
priv = snd_soc_dai_get_dma_data(cpu_dai, substream);
if (!prdata || !priv)
return 0;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
prdata->play_stream = NULL;
else
prdata->rec_stream = NULL;
if (!prdata->play_stream && !prdata->rec_stream) {
writel(0, priv->io + KIRKWOOD_ERR_MASK);
free_irq(priv->irq, prdata);
kfree(prdata);
snd_soc_platform_set_drvdata(platform, NULL);
}
return 0;
}
static int kirkwood_dma_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_pcm_runtime *runtime = substream->runtime;
snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
runtime->dma_bytes = params_buffer_bytes(params);
return 0;
}
static int kirkwood_dma_hw_free(struct snd_pcm_substream *substream)
{
snd_pcm_set_runtime_buffer(substream, NULL);
return 0;
}
static int kirkwood_dma_prepare(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
struct snd_soc_dai *cpu_dai = soc_runtime->cpu_dai;
struct kirkwood_dma_data *priv;
unsigned long size, count;
priv = snd_soc_dai_get_dma_data(cpu_dai, substream);
/* compute buffer size in term of "words" as requested in specs */
size = frames_to_bytes(runtime, runtime->buffer_size);
size = (size>>2)-1;
count = snd_pcm_lib_period_bytes(substream);
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
writel(count, priv->io + KIRKWOOD_PLAY_BYTE_INT_COUNT);
writel(runtime->dma_addr, priv->io + KIRKWOOD_PLAY_BUF_ADDR);
writel(size, priv->io + KIRKWOOD_PLAY_BUF_SIZE);
} else {
writel(count, priv->io + KIRKWOOD_REC_BYTE_INT_COUNT);
writel(runtime->dma_addr, priv->io + KIRKWOOD_REC_BUF_ADDR);
writel(size, priv->io + KIRKWOOD_REC_BUF_SIZE);
}
return 0;
}
static snd_pcm_uframes_t kirkwood_dma_pointer(struct snd_pcm_substream
*substream)
{
struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
struct snd_soc_dai *cpu_dai = soc_runtime->cpu_dai;
struct kirkwood_dma_data *priv;
snd_pcm_uframes_t count;
priv = snd_soc_dai_get_dma_data(cpu_dai, substream);
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
count = bytes_to_frames(substream->runtime,
readl(priv->io + KIRKWOOD_PLAY_BYTE_COUNT));
else
count = bytes_to_frames(substream->runtime,
readl(priv->io + KIRKWOOD_REC_BYTE_COUNT));
return count;
}
struct snd_pcm_ops kirkwood_dma_ops = {
.open = kirkwood_dma_open,
.close = kirkwood_dma_close,
.ioctl = snd_pcm_lib_ioctl,
.hw_params = kirkwood_dma_hw_params,
.hw_free = kirkwood_dma_hw_free,
.prepare = kirkwood_dma_prepare,
.pointer = kirkwood_dma_pointer,
};
static int kirkwood_dma_preallocate_dma_buffer(struct snd_pcm *pcm,
int stream)
{
struct snd_pcm_substream *substream = pcm->streams[stream].substream;
struct snd_dma_buffer *buf = &substream->dma_buffer;
size_t size = kirkwood_dma_snd_hw.buffer_bytes_max;
buf->dev.type = SNDRV_DMA_TYPE_DEV;
buf->dev.dev = pcm->card->dev;
buf->area = dma_alloc_coherent(pcm->card->dev, size,
&buf->addr, GFP_KERNEL);
if (!buf->area)
return -ENOMEM;
buf->bytes = size;
buf->private_data = NULL;
return 0;
}
static int kirkwood_dma_new(struct snd_soc_pcm_runtime *rtd)
{
struct snd_card *card = rtd->card->snd_card;
struct snd_pcm *pcm = rtd->pcm;
int ret;
if (!card->dev->dma_mask)
card->dev->dma_mask = &kirkwood_dma_dmamask;
if (!card->dev->coherent_dma_mask)
card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
ret = kirkwood_dma_preallocate_dma_buffer(pcm,
SNDRV_PCM_STREAM_PLAYBACK);
if (ret)
return ret;
}
if (pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream) {
ret = kirkwood_dma_preallocate_dma_buffer(pcm,
SNDRV_PCM_STREAM_CAPTURE);
if (ret)
return ret;
}
return 0;
}
static void kirkwood_dma_free_dma_buffers(struct snd_pcm *pcm)
{
struct snd_pcm_substream *substream;
struct snd_dma_buffer *buf;
int stream;
for (stream = 0; stream < 2; stream++) {
substream = pcm->streams[stream].substream;
if (!substream)
continue;
buf = &substream->dma_buffer;
if (!buf->area)
continue;
dma_free_coherent(pcm->card->dev, buf->bytes,
buf->area, buf->addr);
buf->area = NULL;
}
}
static struct snd_soc_platform_driver kirkwood_soc_platform = {
.ops = &kirkwood_dma_ops,
.pcm_new = kirkwood_dma_new,
.pcm_free = kirkwood_dma_free_dma_buffers,
};
static int kirkwood_soc_platform_probe(struct platform_device *pdev)
{
return snd_soc_register_platform(&pdev->dev, &kirkwood_soc_platform);
}
static int kirkwood_soc_platform_remove(struct platform_device *pdev)
{
snd_soc_unregister_platform(&pdev->dev);
return 0;
}
static struct platform_driver kirkwood_pcm_driver = {
.driver = {
.name = "kirkwood-pcm-audio",
.owner = THIS_MODULE,
},
.probe = kirkwood_soc_platform_probe,
.remove = kirkwood_soc_platform_remove,
};
module_platform_driver(kirkwood_pcm_driver);
MODULE_AUTHOR("Arnaud Patard <arnaud.patard@rtp-net.org>");
MODULE_DESCRIPTION("Marvell Kirkwood Audio DMA module");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:kirkwood-pcm-audio");
| gpl-2.0 |
h113331pp/kernel-3.10.17 | sound/soc/samsung/smdk_wm8994pcm.c | 2243 | 4093 | /*
* sound/soc/samsung/smdk_wm8994pcm.c
*
* Copyright (c) 2011 Samsung Electronics Co., Ltd
* http://www.samsung.com
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/module.h>
#include <sound/soc.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include "../codecs/wm8994.h"
#include "dma.h"
#include "pcm.h"
/*
* Board Settings:
* o '1' means 'ON'
* o '0' means 'OFF'
* o 'X' means 'Don't care'
*
* SMDKC210, SMDKV310: CFG3- 1001, CFG5-1000, CFG7-111111
*/
/*
* Configure audio route as :-
* $ amixer sset 'DAC1' on,on
* $ amixer sset 'Right Headphone Mux' 'DAC'
* $ amixer sset 'Left Headphone Mux' 'DAC'
* $ amixer sset 'DAC1R Mixer AIF1.1' on
* $ amixer sset 'DAC1L Mixer AIF1.1' on
* $ amixer sset 'IN2L' on
* $ amixer sset 'IN2L PGA IN2LN' on
* $ amixer sset 'MIXINL IN2L' on
* $ amixer sset 'AIF1ADC1L Mixer ADC/DMIC' on
* $ amixer sset 'IN2R' on
* $ amixer sset 'IN2R PGA IN2RN' on
* $ amixer sset 'MIXINR IN2R' on
* $ amixer sset 'AIF1ADC1R Mixer ADC/DMIC' on
*/
/* SMDK has a 16.9344MHZ crystal attached to WM8994 */
#define SMDK_WM8994_FREQ 16934400
static int smdk_wm8994_pcm_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_dai *codec_dai = rtd->codec_dai;
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
unsigned long mclk_freq;
int rfs, ret;
switch(params_rate(params)) {
case 8000:
rfs = 512;
break;
default:
dev_err(cpu_dai->dev, "%s:%d Sampling Rate %u not supported!\n",
__func__, __LINE__, params_rate(params));
return -EINVAL;
}
mclk_freq = params_rate(params) * rfs;
/* Set the codec DAI configuration */
ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_DSP_B
| SND_SOC_DAIFMT_IB_NF
| SND_SOC_DAIFMT_CBS_CFS);
if (ret < 0)
return ret;
/* Set the cpu DAI configuration */
ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_DSP_B
| SND_SOC_DAIFMT_IB_NF
| SND_SOC_DAIFMT_CBS_CFS);
if (ret < 0)
return ret;
ret = snd_soc_dai_set_sysclk(codec_dai, WM8994_SYSCLK_FLL1,
mclk_freq, SND_SOC_CLOCK_IN);
if (ret < 0)
return ret;
ret = snd_soc_dai_set_pll(codec_dai, WM8994_FLL1, WM8994_FLL_SRC_MCLK1,
SMDK_WM8994_FREQ, mclk_freq);
if (ret < 0)
return ret;
/* Set PCM source clock on CPU */
ret = snd_soc_dai_set_sysclk(cpu_dai, S3C_PCM_CLKSRC_MUX,
mclk_freq, SND_SOC_CLOCK_IN);
if (ret < 0)
return ret;
/* Set SCLK_DIV for making bclk */
ret = snd_soc_dai_set_clkdiv(cpu_dai, S3C_PCM_SCLK_PER_FS, rfs);
if (ret < 0)
return ret;
return 0;
}
static struct snd_soc_ops smdk_wm8994_pcm_ops = {
.hw_params = smdk_wm8994_pcm_hw_params,
};
static struct snd_soc_dai_link smdk_dai[] = {
{
.name = "WM8994 PAIF PCM",
.stream_name = "Primary PCM",
.cpu_dai_name = "samsung-pcm.0",
.codec_dai_name = "wm8994-aif1",
.platform_name = "samsung-pcm.0",
.codec_name = "wm8994-codec",
.ops = &smdk_wm8994_pcm_ops,
},
};
static struct snd_soc_card smdk_pcm = {
.name = "SMDK-PCM",
.owner = THIS_MODULE,
.dai_link = smdk_dai,
.num_links = 1,
};
static int snd_smdk_probe(struct platform_device *pdev)
{
int ret = 0;
smdk_pcm.dev = &pdev->dev;
ret = snd_soc_register_card(&smdk_pcm);
if (ret) {
dev_err(&pdev->dev, "snd_soc_register_card failed %d\n", ret);
return ret;
}
return 0;
}
static int snd_smdk_remove(struct platform_device *pdev)
{
snd_soc_unregister_card(&smdk_pcm);
platform_set_drvdata(pdev, NULL);
return 0;
}
static struct platform_driver snd_smdk_driver = {
.driver = {
.owner = THIS_MODULE,
.name = "samsung-smdk-pcm",
},
.probe = snd_smdk_probe,
.remove = snd_smdk_remove,
};
module_platform_driver(snd_smdk_driver);
MODULE_AUTHOR("Sangbeom Kim, <sbkim73@samsung.com>");
MODULE_DESCRIPTION("ALSA SoC SMDK WM8994 for PCM");
MODULE_LICENSE("GPL");
| gpl-2.0 |
djvoleur/kernel_samsung_exynos7420 | drivers/video/backlight/l4f00242t03.c | 2243 | 7083 | /*
* l4f00242t03.c -- support for Epson L4F00242T03 LCD
*
* Copyright 2007-2009 Freescale Semiconductor, Inc. All Rights Reserved.
*
* Copyright (c) 2009 Alberto Panizzo <maramaopercheseimorto@gmail.com>
* Inspired by Marek Vasut work in l4f00242t03.c
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/gpio.h>
#include <linux/lcd.h>
#include <linux/slab.h>
#include <linux/regulator/consumer.h>
#include <linux/spi/spi.h>
#include <linux/spi/l4f00242t03.h>
struct l4f00242t03_priv {
struct spi_device *spi;
struct lcd_device *ld;
int lcd_state;
struct regulator *io_reg;
struct regulator *core_reg;
};
static void l4f00242t03_reset(unsigned int gpio)
{
pr_debug("l4f00242t03_reset.\n");
gpio_set_value(gpio, 1);
mdelay(100);
gpio_set_value(gpio, 0);
mdelay(10); /* tRES >= 100us */
gpio_set_value(gpio, 1);
mdelay(20);
}
#define param(x) ((x) | 0x100)
static void l4f00242t03_lcd_init(struct spi_device *spi)
{
struct l4f00242t03_pdata *pdata = spi->dev.platform_data;
struct l4f00242t03_priv *priv = spi_get_drvdata(spi);
const u16 cmd[] = { 0x36, param(0), 0x3A, param(0x60) };
int ret;
dev_dbg(&spi->dev, "initializing LCD\n");
ret = regulator_set_voltage(priv->io_reg, 1800000, 1800000);
if (ret) {
dev_err(&spi->dev, "failed to set the IO regulator voltage.\n");
return;
}
ret = regulator_enable(priv->io_reg);
if (ret) {
dev_err(&spi->dev, "failed to enable the IO regulator.\n");
return;
}
ret = regulator_set_voltage(priv->core_reg, 2800000, 2800000);
if (ret) {
dev_err(&spi->dev, "failed to set the core regulator voltage.\n");
regulator_disable(priv->io_reg);
return;
}
ret = regulator_enable(priv->core_reg);
if (ret) {
dev_err(&spi->dev, "failed to enable the core regulator.\n");
regulator_disable(priv->io_reg);
return;
}
l4f00242t03_reset(pdata->reset_gpio);
gpio_set_value(pdata->data_enable_gpio, 1);
msleep(60);
spi_write(spi, (const u8 *)cmd, ARRAY_SIZE(cmd) * sizeof(u16));
}
static void l4f00242t03_lcd_powerdown(struct spi_device *spi)
{
struct l4f00242t03_pdata *pdata = spi->dev.platform_data;
struct l4f00242t03_priv *priv = spi_get_drvdata(spi);
dev_dbg(&spi->dev, "Powering down LCD\n");
gpio_set_value(pdata->data_enable_gpio, 0);
regulator_disable(priv->io_reg);
regulator_disable(priv->core_reg);
}
static int l4f00242t03_lcd_power_get(struct lcd_device *ld)
{
struct l4f00242t03_priv *priv = lcd_get_data(ld);
return priv->lcd_state;
}
static int l4f00242t03_lcd_power_set(struct lcd_device *ld, int power)
{
struct l4f00242t03_priv *priv = lcd_get_data(ld);
struct spi_device *spi = priv->spi;
const u16 slpout = 0x11;
const u16 dison = 0x29;
const u16 slpin = 0x10;
const u16 disoff = 0x28;
if (power <= FB_BLANK_NORMAL) {
if (priv->lcd_state <= FB_BLANK_NORMAL) {
/* Do nothing, the LCD is running */
} else if (priv->lcd_state < FB_BLANK_POWERDOWN) {
dev_dbg(&spi->dev, "Resuming LCD\n");
spi_write(spi, (const u8 *)&slpout, sizeof(u16));
msleep(60);
spi_write(spi, (const u8 *)&dison, sizeof(u16));
} else {
/* priv->lcd_state == FB_BLANK_POWERDOWN */
l4f00242t03_lcd_init(spi);
priv->lcd_state = FB_BLANK_VSYNC_SUSPEND;
l4f00242t03_lcd_power_set(priv->ld, power);
}
} else if (power < FB_BLANK_POWERDOWN) {
if (priv->lcd_state <= FB_BLANK_NORMAL) {
/* Send the display in standby */
dev_dbg(&spi->dev, "Standby the LCD\n");
spi_write(spi, (const u8 *)&disoff, sizeof(u16));
msleep(60);
spi_write(spi, (const u8 *)&slpin, sizeof(u16));
} else if (priv->lcd_state < FB_BLANK_POWERDOWN) {
/* Do nothing, the LCD is already in standby */
} else {
/* priv->lcd_state == FB_BLANK_POWERDOWN */
l4f00242t03_lcd_init(spi);
priv->lcd_state = FB_BLANK_UNBLANK;
l4f00242t03_lcd_power_set(ld, power);
}
} else {
/* power == FB_BLANK_POWERDOWN */
if (priv->lcd_state != FB_BLANK_POWERDOWN) {
/* Clear the screen before shutting down */
spi_write(spi, (const u8 *)&disoff, sizeof(u16));
msleep(60);
l4f00242t03_lcd_powerdown(spi);
}
}
priv->lcd_state = power;
return 0;
}
static struct lcd_ops l4f_ops = {
.set_power = l4f00242t03_lcd_power_set,
.get_power = l4f00242t03_lcd_power_get,
};
static int l4f00242t03_probe(struct spi_device *spi)
{
struct l4f00242t03_priv *priv;
struct l4f00242t03_pdata *pdata = spi->dev.platform_data;
int ret;
if (pdata == NULL) {
dev_err(&spi->dev, "Uninitialized platform data.\n");
return -EINVAL;
}
priv = devm_kzalloc(&spi->dev, sizeof(struct l4f00242t03_priv),
GFP_KERNEL);
if (priv == NULL) {
dev_err(&spi->dev, "No memory for this device.\n");
return -ENOMEM;
}
spi_set_drvdata(spi, priv);
spi->bits_per_word = 9;
spi_setup(spi);
priv->spi = spi;
ret = devm_gpio_request_one(&spi->dev, pdata->reset_gpio,
GPIOF_OUT_INIT_HIGH, "lcd l4f00242t03 reset");
if (ret) {
dev_err(&spi->dev,
"Unable to get the lcd l4f00242t03 reset gpio.\n");
return ret;
}
ret = devm_gpio_request_one(&spi->dev, pdata->data_enable_gpio,
GPIOF_OUT_INIT_LOW, "lcd l4f00242t03 data enable");
if (ret) {
dev_err(&spi->dev,
"Unable to get the lcd l4f00242t03 data en gpio.\n");
return ret;
}
priv->io_reg = devm_regulator_get(&spi->dev, "vdd");
if (IS_ERR(priv->io_reg)) {
dev_err(&spi->dev, "%s: Unable to get the IO regulator\n",
__func__);
return PTR_ERR(priv->io_reg);
}
priv->core_reg = devm_regulator_get(&spi->dev, "vcore");
if (IS_ERR(priv->core_reg)) {
dev_err(&spi->dev, "%s: Unable to get the core regulator\n",
__func__);
return PTR_ERR(priv->core_reg);
}
priv->ld = lcd_device_register("l4f00242t03",
&spi->dev, priv, &l4f_ops);
if (IS_ERR(priv->ld))
return PTR_ERR(priv->ld);
/* Init the LCD */
l4f00242t03_lcd_init(spi);
priv->lcd_state = FB_BLANK_VSYNC_SUSPEND;
l4f00242t03_lcd_power_set(priv->ld, FB_BLANK_UNBLANK);
dev_info(&spi->dev, "Epson l4f00242t03 lcd probed.\n");
return 0;
}
static int l4f00242t03_remove(struct spi_device *spi)
{
struct l4f00242t03_priv *priv = spi_get_drvdata(spi);
l4f00242t03_lcd_power_set(priv->ld, FB_BLANK_POWERDOWN);
lcd_device_unregister(priv->ld);
spi_set_drvdata(spi, NULL);
return 0;
}
static void l4f00242t03_shutdown(struct spi_device *spi)
{
struct l4f00242t03_priv *priv = spi_get_drvdata(spi);
if (priv)
l4f00242t03_lcd_power_set(priv->ld, FB_BLANK_POWERDOWN);
}
static struct spi_driver l4f00242t03_driver = {
.driver = {
.name = "l4f00242t03",
.owner = THIS_MODULE,
},
.probe = l4f00242t03_probe,
.remove = l4f00242t03_remove,
.shutdown = l4f00242t03_shutdown,
};
module_spi_driver(l4f00242t03_driver);
MODULE_AUTHOR("Alberto Panizzo <maramaopercheseimorto@gmail.com>");
MODULE_DESCRIPTION("EPSON L4F00242T03 LCD");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
lowtraxx/kernel | drivers/usb/core/endpoint.c | 2243 | 5174 | /*
* drivers/usb/core/endpoint.c
*
* (C) Copyright 2002,2004,2006 Greg Kroah-Hartman
* (C) Copyright 2002,2004 IBM Corp.
* (C) Copyright 2006 Novell Inc.
*
* Endpoint sysfs stuff
*
*/
#include <linux/kernel.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/idr.h>
#include <linux/usb.h>
#include "usb.h"
struct ep_device {
struct usb_endpoint_descriptor *desc;
struct usb_device *udev;
struct device dev;
};
#define to_ep_device(_dev) \
container_of(_dev, struct ep_device, dev)
struct ep_attribute {
struct attribute attr;
ssize_t (*show)(struct usb_device *,
struct usb_endpoint_descriptor *, char *);
};
#define to_ep_attribute(_attr) \
container_of(_attr, struct ep_attribute, attr)
#define usb_ep_attr(field, format_string) \
static ssize_t show_ep_##field(struct device *dev, \
struct device_attribute *attr, \
char *buf) \
{ \
struct ep_device *ep = to_ep_device(dev); \
return sprintf(buf, format_string, ep->desc->field); \
} \
static DEVICE_ATTR(field, S_IRUGO, show_ep_##field, NULL);
usb_ep_attr(bLength, "%02x\n")
usb_ep_attr(bEndpointAddress, "%02x\n")
usb_ep_attr(bmAttributes, "%02x\n")
usb_ep_attr(bInterval, "%02x\n")
static ssize_t show_ep_wMaxPacketSize(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ep_device *ep = to_ep_device(dev);
return sprintf(buf, "%04x\n",
usb_endpoint_maxp(ep->desc) & 0x07ff);
}
static DEVICE_ATTR(wMaxPacketSize, S_IRUGO, show_ep_wMaxPacketSize, NULL);
static ssize_t show_ep_type(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct ep_device *ep = to_ep_device(dev);
char *type = "unknown";
switch (usb_endpoint_type(ep->desc)) {
case USB_ENDPOINT_XFER_CONTROL:
type = "Control";
break;
case USB_ENDPOINT_XFER_ISOC:
type = "Isoc";
break;
case USB_ENDPOINT_XFER_BULK:
type = "Bulk";
break;
case USB_ENDPOINT_XFER_INT:
type = "Interrupt";
break;
}
return sprintf(buf, "%s\n", type);
}
static DEVICE_ATTR(type, S_IRUGO, show_ep_type, NULL);
static ssize_t show_ep_interval(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ep_device *ep = to_ep_device(dev);
char unit;
unsigned interval = 0;
unsigned in;
in = (ep->desc->bEndpointAddress & USB_DIR_IN);
switch (usb_endpoint_type(ep->desc)) {
case USB_ENDPOINT_XFER_CONTROL:
if (ep->udev->speed == USB_SPEED_HIGH)
/* uframes per NAK */
interval = ep->desc->bInterval;
break;
case USB_ENDPOINT_XFER_ISOC:
interval = 1 << (ep->desc->bInterval - 1);
break;
case USB_ENDPOINT_XFER_BULK:
if (ep->udev->speed == USB_SPEED_HIGH && !in)
/* uframes per NAK */
interval = ep->desc->bInterval;
break;
case USB_ENDPOINT_XFER_INT:
if (ep->udev->speed == USB_SPEED_HIGH)
interval = 1 << (ep->desc->bInterval - 1);
else
interval = ep->desc->bInterval;
break;
}
interval *= (ep->udev->speed == USB_SPEED_HIGH) ? 125 : 1000;
if (interval % 1000)
unit = 'u';
else {
unit = 'm';
interval /= 1000;
}
return sprintf(buf, "%d%cs\n", interval, unit);
}
static DEVICE_ATTR(interval, S_IRUGO, show_ep_interval, NULL);
static ssize_t show_ep_direction(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ep_device *ep = to_ep_device(dev);
char *direction;
if (usb_endpoint_xfer_control(ep->desc))
direction = "both";
else if (usb_endpoint_dir_in(ep->desc))
direction = "in";
else
direction = "out";
return sprintf(buf, "%s\n", direction);
}
static DEVICE_ATTR(direction, S_IRUGO, show_ep_direction, NULL);
static struct attribute *ep_dev_attrs[] = {
&dev_attr_bLength.attr,
&dev_attr_bEndpointAddress.attr,
&dev_attr_bmAttributes.attr,
&dev_attr_bInterval.attr,
&dev_attr_wMaxPacketSize.attr,
&dev_attr_interval.attr,
&dev_attr_type.attr,
&dev_attr_direction.attr,
NULL,
};
static struct attribute_group ep_dev_attr_grp = {
.attrs = ep_dev_attrs,
};
static const struct attribute_group *ep_dev_groups[] = {
&ep_dev_attr_grp,
NULL
};
static void ep_device_release(struct device *dev)
{
struct ep_device *ep_dev = to_ep_device(dev);
kfree(ep_dev);
}
struct device_type usb_ep_device_type = {
.name = "usb_endpoint",
.release = ep_device_release,
};
int usb_create_ep_devs(struct device *parent,
struct usb_host_endpoint *endpoint,
struct usb_device *udev)
{
struct ep_device *ep_dev;
int retval;
ep_dev = kzalloc(sizeof(*ep_dev), GFP_KERNEL);
if (!ep_dev) {
retval = -ENOMEM;
goto exit;
}
ep_dev->desc = &endpoint->desc;
ep_dev->udev = udev;
ep_dev->dev.groups = ep_dev_groups;
ep_dev->dev.type = &usb_ep_device_type;
ep_dev->dev.parent = parent;
dev_set_name(&ep_dev->dev, "ep_%02x", endpoint->desc.bEndpointAddress);
retval = device_register(&ep_dev->dev);
if (retval)
goto error_register;
device_enable_async_suspend(&ep_dev->dev);
endpoint->ep_dev = ep_dev;
return retval;
error_register:
put_device(&ep_dev->dev);
exit:
return retval;
}
void usb_remove_ep_devs(struct usb_host_endpoint *endpoint)
{
struct ep_device *ep_dev = endpoint->ep_dev;
if (ep_dev) {
device_unregister(&ep_dev->dev);
endpoint->ep_dev = NULL;
}
}
| gpl-2.0 |
TheYorickable/tf300t_jb_kernel | drivers/video/fb_ddc.c | 4035 | 2637 | /*
* driver/vide/fb_ddc.c - DDC/EDID read support.
*
* Copyright (C) 2006 Dennis Munsie <dmunsie@cecropia.com>
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive
* for more details.
*/
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/fb.h>
#include <linux/i2c-algo-bit.h>
#include <linux/slab.h>
#include "edid.h"
#define DDC_ADDR 0x50
static unsigned char *fb_do_probe_ddc_edid(struct i2c_adapter *adapter)
{
unsigned char start = 0x0;
unsigned char *buf = kmalloc(EDID_LENGTH, GFP_KERNEL);
struct i2c_msg msgs[] = {
{
.addr = DDC_ADDR,
.flags = 0,
.len = 1,
.buf = &start,
}, {
.addr = DDC_ADDR,
.flags = I2C_M_RD,
.len = EDID_LENGTH,
.buf = buf,
}
};
if (!buf) {
dev_warn(&adapter->dev, "unable to allocate memory for EDID "
"block.\n");
return NULL;
}
if (i2c_transfer(adapter, msgs, 2) == 2)
return buf;
dev_warn(&adapter->dev, "unable to read EDID block.\n");
kfree(buf);
return NULL;
}
unsigned char *fb_ddc_read(struct i2c_adapter *adapter)
{
struct i2c_algo_bit_data *algo_data = adapter->algo_data;
unsigned char *edid = NULL;
int i, j;
algo_data->setscl(algo_data->data, 1);
for (i = 0; i < 3; i++) {
/* For some old monitors we need the
* following process to initialize/stop DDC
*/
algo_data->setsda(algo_data->data, 1);
msleep(13);
algo_data->setscl(algo_data->data, 1);
for (j = 0; j < 5; j++) {
msleep(10);
if (algo_data->getscl(algo_data->data))
break;
}
if (j == 5)
continue;
algo_data->setsda(algo_data->data, 0);
msleep(15);
algo_data->setscl(algo_data->data, 0);
msleep(15);
algo_data->setsda(algo_data->data, 1);
msleep(15);
/* Do the real work */
edid = fb_do_probe_ddc_edid(adapter);
algo_data->setsda(algo_data->data, 0);
algo_data->setscl(algo_data->data, 0);
msleep(15);
algo_data->setscl(algo_data->data, 1);
for (j = 0; j < 10; j++) {
msleep(10);
if (algo_data->getscl(algo_data->data))
break;
}
algo_data->setsda(algo_data->data, 1);
msleep(15);
algo_data->setscl(algo_data->data, 0);
algo_data->setsda(algo_data->data, 0);
if (edid)
break;
}
/* Release the DDC lines when done or the Apple Cinema HD display
* will switch off
*/
algo_data->setsda(algo_data->data, 1);
algo_data->setscl(algo_data->data, 1);
adapter->class |= I2C_CLASS_DDC;
return edid;
}
EXPORT_SYMBOL_GPL(fb_ddc_read);
MODULE_AUTHOR("Dennis Munsie <dmunsie@cecropia.com>");
MODULE_DESCRIPTION("DDC/EDID reading support");
MODULE_LICENSE("GPL");
| gpl-2.0 |
Galland/Linux3188 | arch/mips/sgi-ip22/ip22-berr.c | 4035 | 3611 | /*
* ip22-berr.c: Bus error handling.
*
* Copyright (C) 2002, 2003 Ladislav Michl (ladis@linux-mips.org)
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <asm/addrspace.h>
#include <asm/system.h>
#include <asm/traps.h>
#include <asm/branch.h>
#include <asm/irq_regs.h>
#include <asm/sgi/mc.h>
#include <asm/sgi/hpc3.h>
#include <asm/sgi/ioc.h>
#include <asm/sgi/ip22.h>
static unsigned int cpu_err_stat; /* Status reg for CPU */
static unsigned int gio_err_stat; /* Status reg for GIO */
static unsigned int cpu_err_addr; /* Error address reg for CPU */
static unsigned int gio_err_addr; /* Error address reg for GIO */
static unsigned int extio_stat;
static unsigned int hpc3_berr_stat; /* Bus error interrupt status */
static void save_and_clear_buserr(void)
{
/* save status registers */
cpu_err_addr = sgimc->cerr;
cpu_err_stat = sgimc->cstat;
gio_err_addr = sgimc->gerr;
gio_err_stat = sgimc->gstat;
extio_stat = ip22_is_fullhouse() ? sgioc->extio : (sgint->errstat << 4);
hpc3_berr_stat = hpc3c0->bestat;
sgimc->cstat = sgimc->gstat = 0;
}
#define GIO_ERRMASK 0xff00
#define CPU_ERRMASK 0x3f00
static void print_buserr(void)
{
if (extio_stat & EXTIO_MC_BUSERR)
printk(KERN_ERR "MC Bus Error\n");
if (extio_stat & EXTIO_HPC3_BUSERR)
printk(KERN_ERR "HPC3 Bus Error 0x%x:<id=0x%x,%s,lane=0x%x>\n",
hpc3_berr_stat,
(hpc3_berr_stat & HPC3_BESTAT_PIDMASK) >>
HPC3_BESTAT_PIDSHIFT,
(hpc3_berr_stat & HPC3_BESTAT_CTYPE) ? "PIO" : "DMA",
hpc3_berr_stat & HPC3_BESTAT_BLMASK);
if (extio_stat & EXTIO_EISA_BUSERR)
printk(KERN_ERR "EISA Bus Error\n");
if (cpu_err_stat & CPU_ERRMASK)
printk(KERN_ERR "CPU error 0x%x<%s%s%s%s%s%s> @ 0x%08x\n",
cpu_err_stat,
cpu_err_stat & SGIMC_CSTAT_RD ? "RD " : "",
cpu_err_stat & SGIMC_CSTAT_PAR ? "PAR " : "",
cpu_err_stat & SGIMC_CSTAT_ADDR ? "ADDR " : "",
cpu_err_stat & SGIMC_CSTAT_SYSAD_PAR ? "SYSAD " : "",
cpu_err_stat & SGIMC_CSTAT_SYSCMD_PAR ? "SYSCMD " : "",
cpu_err_stat & SGIMC_CSTAT_BAD_DATA ? "BAD_DATA " : "",
cpu_err_addr);
if (gio_err_stat & GIO_ERRMASK)
printk(KERN_ERR "GIO error 0x%x:<%s%s%s%s%s%s%s%s> @ 0x%08x\n",
gio_err_stat,
gio_err_stat & SGIMC_GSTAT_RD ? "RD " : "",
gio_err_stat & SGIMC_GSTAT_WR ? "WR " : "",
gio_err_stat & SGIMC_GSTAT_TIME ? "TIME " : "",
gio_err_stat & SGIMC_GSTAT_PROM ? "PROM " : "",
gio_err_stat & SGIMC_GSTAT_ADDR ? "ADDR " : "",
gio_err_stat & SGIMC_GSTAT_BC ? "BC " : "",
gio_err_stat & SGIMC_GSTAT_PIO_RD ? "PIO_RD " : "",
gio_err_stat & SGIMC_GSTAT_PIO_WR ? "PIO_WR " : "",
gio_err_addr);
}
/*
* MC sends an interrupt whenever bus or parity errors occur. In addition,
* if the error happened during a CPU read, it also asserts the bus error
* pin on the R4K. Code in bus error handler save the MC bus error registers
* and then clear the interrupt when this happens.
*/
void ip22_be_interrupt(int irq)
{
const int field = 2 * sizeof(unsigned long);
struct pt_regs *regs = get_irq_regs();
save_and_clear_buserr();
print_buserr();
printk(KERN_ALERT "%s bus error, epc == %0*lx, ra == %0*lx\n",
(regs->cp0_cause & 4) ? "Data" : "Instruction",
field, regs->cp0_epc, field, regs->regs[31]);
/* Assume it would be too dangerous to continue ... */
die_if_kernel("Oops", regs);
force_sig(SIGBUS, current);
}
static int ip22_be_handler(struct pt_regs *regs, int is_fixup)
{
save_and_clear_buserr();
if (is_fixup)
return MIPS_BE_FIXUP;
print_buserr();
return MIPS_BE_FATAL;
}
void __init ip22_be_init(void)
{
board_be_handler = ip22_be_handler;
}
| gpl-2.0 |
androidarmv6/android_kernel_samsung_bcm21553-common | net/netrom/nr_dev.c | 4291 | 4427 | /*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* Copyright Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
*/
#include <linux/module.h>
#include <linux/proc_fs.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/fs.h>
#include <linux/types.h>
#include <linux/sysctl.h>
#include <linux/string.h>
#include <linux/socket.h>
#include <linux/errno.h>
#include <linux/fcntl.h>
#include <linux/in.h>
#include <linux/if_ether.h> /* For the statistics structure. */
#include <linux/slab.h>
#include <asm/system.h>
#include <asm/uaccess.h>
#include <asm/io.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/if_arp.h>
#include <linux/skbuff.h>
#include <net/ip.h>
#include <net/arp.h>
#include <net/ax25.h>
#include <net/netrom.h>
/*
* Only allow IP over NET/ROM frames through if the netrom device is up.
*/
int nr_rx_ip(struct sk_buff *skb, struct net_device *dev)
{
struct net_device_stats *stats = &dev->stats;
if (!netif_running(dev)) {
stats->rx_dropped++;
return 0;
}
stats->rx_packets++;
stats->rx_bytes += skb->len;
skb->protocol = htons(ETH_P_IP);
/* Spoof incoming device */
skb->dev = dev;
skb->mac_header = skb->network_header;
skb_reset_network_header(skb);
skb->pkt_type = PACKET_HOST;
netif_rx(skb);
return 1;
}
#ifdef CONFIG_INET
static int nr_rebuild_header(struct sk_buff *skb)
{
unsigned char *bp = skb->data;
if (arp_find(bp + 7, skb))
return 1;
bp[6] &= ~AX25_CBIT;
bp[6] &= ~AX25_EBIT;
bp[6] |= AX25_SSSID_SPARE;
bp += AX25_ADDR_LEN;
bp[6] &= ~AX25_CBIT;
bp[6] |= AX25_EBIT;
bp[6] |= AX25_SSSID_SPARE;
return 0;
}
#else
static int nr_rebuild_header(struct sk_buff *skb)
{
return 1;
}
#endif
static int nr_header(struct sk_buff *skb, struct net_device *dev,
unsigned short type,
const void *daddr, const void *saddr, unsigned len)
{
unsigned char *buff = skb_push(skb, NR_NETWORK_LEN + NR_TRANSPORT_LEN);
memcpy(buff, (saddr != NULL) ? saddr : dev->dev_addr, dev->addr_len);
buff[6] &= ~AX25_CBIT;
buff[6] &= ~AX25_EBIT;
buff[6] |= AX25_SSSID_SPARE;
buff += AX25_ADDR_LEN;
if (daddr != NULL)
memcpy(buff, daddr, dev->addr_len);
buff[6] &= ~AX25_CBIT;
buff[6] |= AX25_EBIT;
buff[6] |= AX25_SSSID_SPARE;
buff += AX25_ADDR_LEN;
*buff++ = sysctl_netrom_network_ttl_initialiser;
*buff++ = NR_PROTO_IP;
*buff++ = NR_PROTO_IP;
*buff++ = 0;
*buff++ = 0;
*buff++ = NR_PROTOEXT;
if (daddr != NULL)
return 37;
return -37;
}
static int __must_check nr_set_mac_address(struct net_device *dev, void *addr)
{
struct sockaddr *sa = addr;
int err;
if (!memcmp(dev->dev_addr, sa->sa_data, dev->addr_len))
return 0;
if (dev->flags & IFF_UP) {
err = ax25_listen_register((ax25_address *)sa->sa_data, NULL);
if (err)
return err;
ax25_listen_release((ax25_address *)dev->dev_addr, NULL);
}
memcpy(dev->dev_addr, sa->sa_data, dev->addr_len);
return 0;
}
static int nr_open(struct net_device *dev)
{
int err;
err = ax25_listen_register((ax25_address *)dev->dev_addr, NULL);
if (err)
return err;
netif_start_queue(dev);
return 0;
}
static int nr_close(struct net_device *dev)
{
ax25_listen_release((ax25_address *)dev->dev_addr, NULL);
netif_stop_queue(dev);
return 0;
}
static netdev_tx_t nr_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct net_device_stats *stats = &dev->stats;
unsigned int len = skb->len;
if (!nr_route_frame(skb, NULL)) {
kfree_skb(skb);
stats->tx_errors++;
return NETDEV_TX_OK;
}
stats->tx_packets++;
stats->tx_bytes += len;
return NETDEV_TX_OK;
}
static const struct header_ops nr_header_ops = {
.create = nr_header,
.rebuild= nr_rebuild_header,
};
static const struct net_device_ops nr_netdev_ops = {
.ndo_open = nr_open,
.ndo_stop = nr_close,
.ndo_start_xmit = nr_xmit,
.ndo_set_mac_address = nr_set_mac_address,
};
void nr_setup(struct net_device *dev)
{
dev->mtu = NR_MAX_PACKET_SIZE;
dev->netdev_ops = &nr_netdev_ops;
dev->header_ops = &nr_header_ops;
dev->hard_header_len = NR_NETWORK_LEN + NR_TRANSPORT_LEN;
dev->addr_len = AX25_ADDR_LEN;
dev->type = ARPHRD_NETROM;
/* New-style flags. */
dev->flags = IFF_NOARP;
}
| gpl-2.0 |
Dabug123/owlCore64 | drivers/media/firewire/firedtv-dvb.c | 4547 | 5895 | /*
* FireDTV driver (formerly known as FireSAT)
*
* Copyright (C) 2004 Andreas Monitzer <andy@monitzer.com>
* Copyright (C) 2008 Henrik Kurelid <henrik@kurelid.se>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
*/
#include <linux/bitops.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/types.h>
#include <dmxdev.h>
#include <dvb_demux.h>
#include <dvbdev.h>
#include <dvb_frontend.h>
#include "firedtv.h"
static int alloc_channel(struct firedtv *fdtv)
{
int i;
for (i = 0; i < 16; i++)
if (!__test_and_set_bit(i, &fdtv->channel_active))
break;
return i;
}
static void collect_channels(struct firedtv *fdtv, int *pidc, u16 pid[])
{
int i, n;
for (i = 0, n = 0; i < 16; i++)
if (test_bit(i, &fdtv->channel_active))
pid[n++] = fdtv->channel_pid[i];
*pidc = n;
}
static inline void dealloc_channel(struct firedtv *fdtv, int i)
{
__clear_bit(i, &fdtv->channel_active);
}
int fdtv_start_feed(struct dvb_demux_feed *dvbdmxfeed)
{
struct firedtv *fdtv = dvbdmxfeed->demux->priv;
int pidc, c, ret;
u16 pids[16];
switch (dvbdmxfeed->type) {
case DMX_TYPE_TS:
case DMX_TYPE_SEC:
break;
default:
dev_err(fdtv->device, "can't start dmx feed: invalid type %u\n",
dvbdmxfeed->type);
return -EINVAL;
}
if (mutex_lock_interruptible(&fdtv->demux_mutex))
return -EINTR;
if (dvbdmxfeed->type == DMX_TYPE_TS) {
switch (dvbdmxfeed->pes_type) {
case DMX_PES_VIDEO:
case DMX_PES_AUDIO:
case DMX_PES_TELETEXT:
case DMX_PES_PCR:
case DMX_PES_OTHER:
c = alloc_channel(fdtv);
break;
default:
dev_err(fdtv->device,
"can't start dmx feed: invalid pes type %u\n",
dvbdmxfeed->pes_type);
ret = -EINVAL;
goto out;
}
} else {
c = alloc_channel(fdtv);
}
if (c > 15) {
dev_err(fdtv->device, "can't start dmx feed: busy\n");
ret = -EBUSY;
goto out;
}
dvbdmxfeed->priv = (typeof(dvbdmxfeed->priv))(unsigned long)c;
fdtv->channel_pid[c] = dvbdmxfeed->pid;
collect_channels(fdtv, &pidc, pids);
if (dvbdmxfeed->pid == 8192) {
ret = avc_tuner_get_ts(fdtv);
if (ret) {
dealloc_channel(fdtv, c);
dev_err(fdtv->device, "can't get TS\n");
goto out;
}
} else {
ret = avc_tuner_set_pids(fdtv, pidc, pids);
if (ret) {
dealloc_channel(fdtv, c);
dev_err(fdtv->device, "can't set PIDs\n");
goto out;
}
}
out:
mutex_unlock(&fdtv->demux_mutex);
return ret;
}
int fdtv_stop_feed(struct dvb_demux_feed *dvbdmxfeed)
{
struct dvb_demux *demux = dvbdmxfeed->demux;
struct firedtv *fdtv = demux->priv;
int pidc, c, ret;
u16 pids[16];
if (dvbdmxfeed->type == DMX_TYPE_TS &&
!((dvbdmxfeed->ts_type & TS_PACKET) &&
(demux->dmx.frontend->source != DMX_MEMORY_FE))) {
if (dvbdmxfeed->ts_type & TS_DECODER) {
if (dvbdmxfeed->pes_type >= DMX_PES_OTHER ||
!demux->pesfilter[dvbdmxfeed->pes_type])
return -EINVAL;
demux->pids[dvbdmxfeed->pes_type] |= 0x8000;
demux->pesfilter[dvbdmxfeed->pes_type] = NULL;
}
if (!(dvbdmxfeed->ts_type & TS_DECODER &&
dvbdmxfeed->pes_type < DMX_PES_OTHER))
return 0;
}
if (mutex_lock_interruptible(&fdtv->demux_mutex))
return -EINTR;
c = (unsigned long)dvbdmxfeed->priv;
dealloc_channel(fdtv, c);
collect_channels(fdtv, &pidc, pids);
ret = avc_tuner_set_pids(fdtv, pidc, pids);
mutex_unlock(&fdtv->demux_mutex);
return ret;
}
DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
int fdtv_dvb_register(struct firedtv *fdtv, const char *name)
{
int err;
err = dvb_register_adapter(&fdtv->adapter, name,
THIS_MODULE, fdtv->device, adapter_nr);
if (err < 0)
goto fail_log;
/*DMX_TS_FILTERING | DMX_SECTION_FILTERING*/
fdtv->demux.dmx.capabilities = 0;
fdtv->demux.priv = fdtv;
fdtv->demux.filternum = 16;
fdtv->demux.feednum = 16;
fdtv->demux.start_feed = fdtv_start_feed;
fdtv->demux.stop_feed = fdtv_stop_feed;
fdtv->demux.write_to_decoder = NULL;
err = dvb_dmx_init(&fdtv->demux);
if (err)
goto fail_unreg_adapter;
fdtv->dmxdev.filternum = 16;
fdtv->dmxdev.demux = &fdtv->demux.dmx;
fdtv->dmxdev.capabilities = 0;
err = dvb_dmxdev_init(&fdtv->dmxdev, &fdtv->adapter);
if (err)
goto fail_dmx_release;
fdtv->frontend.source = DMX_FRONTEND_0;
err = fdtv->demux.dmx.add_frontend(&fdtv->demux.dmx, &fdtv->frontend);
if (err)
goto fail_dmxdev_release;
err = fdtv->demux.dmx.connect_frontend(&fdtv->demux.dmx,
&fdtv->frontend);
if (err)
goto fail_rem_frontend;
err = dvb_net_init(&fdtv->adapter, &fdtv->dvbnet, &fdtv->demux.dmx);
if (err)
goto fail_disconnect_frontend;
fdtv_frontend_init(fdtv, name);
err = dvb_register_frontend(&fdtv->adapter, &fdtv->fe);
if (err)
goto fail_net_release;
err = fdtv_ca_register(fdtv);
if (err)
dev_info(fdtv->device,
"Conditional Access Module not enabled\n");
return 0;
fail_net_release:
dvb_net_release(&fdtv->dvbnet);
fail_disconnect_frontend:
fdtv->demux.dmx.close(&fdtv->demux.dmx);
fail_rem_frontend:
fdtv->demux.dmx.remove_frontend(&fdtv->demux.dmx, &fdtv->frontend);
fail_dmxdev_release:
dvb_dmxdev_release(&fdtv->dmxdev);
fail_dmx_release:
dvb_dmx_release(&fdtv->demux);
fail_unreg_adapter:
dvb_unregister_adapter(&fdtv->adapter);
fail_log:
dev_err(fdtv->device, "DVB initialization failed\n");
return err;
}
void fdtv_dvb_unregister(struct firedtv *fdtv)
{
fdtv_ca_release(fdtv);
dvb_unregister_frontend(&fdtv->fe);
dvb_net_release(&fdtv->dvbnet);
fdtv->demux.dmx.close(&fdtv->demux.dmx);
fdtv->demux.dmx.remove_frontend(&fdtv->demux.dmx, &fdtv->frontend);
dvb_dmxdev_release(&fdtv->dmxdev);
dvb_dmx_release(&fdtv->demux);
dvb_unregister_adapter(&fdtv->adapter);
}
| gpl-2.0 |
Ronfante/android_kernel_xiaomi_cancro | drivers/media/video/s2255drv.c | 4803 | 77408 | /*
* s2255drv.c - a driver for the Sensoray 2255 USB video capture device
*
* Copyright (C) 2007-2010 by Sensoray Company Inc.
* Dean Anderson
*
* Some video buffer code based on vivi driver:
*
* Sensoray 2255 device supports 4 simultaneous channels.
* The channels are not "crossbar" inputs, they are physically
* attached to separate video decoders.
*
* Because of USB2.0 bandwidth limitations. There is only a
* certain amount of data which may be transferred at one time.
*
* Example maximum bandwidth utilization:
*
* -full size, color mode YUYV or YUV422P: 2 channels at once
* -full or half size Grey scale: all 4 channels at once
* -half size, color mode YUYV or YUV422P: all 4 channels at once
* -full size, color mode YUYV or YUV422P 1/2 frame rate: all 4 channels
* at once.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
#include <linux/firmware.h>
#include <linux/kernel.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/videodev2.h>
#include <linux/mm.h>
#include <media/videobuf-vmalloc.h>
#include <media/v4l2-common.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
#include <linux/vmalloc.h>
#include <linux/usb.h>
#define S2255_VERSION "1.22.1"
#define FIRMWARE_FILE_NAME "f2255usb.bin"
/* default JPEG quality */
#define S2255_DEF_JPEG_QUAL 50
/* vendor request in */
#define S2255_VR_IN 0
/* vendor request out */
#define S2255_VR_OUT 1
/* firmware query */
#define S2255_VR_FW 0x30
/* USB endpoint number for configuring the device */
#define S2255_CONFIG_EP 2
/* maximum time for DSP to start responding after last FW word loaded(ms) */
#define S2255_DSP_BOOTTIME 800
/* maximum time to wait for firmware to load (ms) */
#define S2255_LOAD_TIMEOUT (5000 + S2255_DSP_BOOTTIME)
#define S2255_DEF_BUFS 16
#define S2255_SETMODE_TIMEOUT 500
#define S2255_VIDSTATUS_TIMEOUT 350
#define S2255_MARKER_FRAME cpu_to_le32(0x2255DA4AL)
#define S2255_MARKER_RESPONSE cpu_to_le32(0x2255ACACL)
#define S2255_RESPONSE_SETMODE cpu_to_le32(0x01)
#define S2255_RESPONSE_FW cpu_to_le32(0x10)
#define S2255_RESPONSE_STATUS cpu_to_le32(0x20)
#define S2255_USB_XFER_SIZE (16 * 1024)
#define MAX_CHANNELS 4
#define SYS_FRAMES 4
/* maximum size is PAL full size plus room for the marker header(s) */
#define SYS_FRAMES_MAXSIZE (720*288*2*2 + 4096)
#define DEF_USB_BLOCK S2255_USB_XFER_SIZE
#define LINE_SZ_4CIFS_NTSC 640
#define LINE_SZ_2CIFS_NTSC 640
#define LINE_SZ_1CIFS_NTSC 320
#define LINE_SZ_4CIFS_PAL 704
#define LINE_SZ_2CIFS_PAL 704
#define LINE_SZ_1CIFS_PAL 352
#define NUM_LINES_4CIFS_NTSC 240
#define NUM_LINES_2CIFS_NTSC 240
#define NUM_LINES_1CIFS_NTSC 240
#define NUM_LINES_4CIFS_PAL 288
#define NUM_LINES_2CIFS_PAL 288
#define NUM_LINES_1CIFS_PAL 288
#define LINE_SZ_DEF 640
#define NUM_LINES_DEF 240
/* predefined settings */
#define FORMAT_NTSC 1
#define FORMAT_PAL 2
#define SCALE_4CIFS 1 /* 640x480(NTSC) or 704x576(PAL) */
#define SCALE_2CIFS 2 /* 640x240(NTSC) or 704x288(PAL) */
#define SCALE_1CIFS 3 /* 320x240(NTSC) or 352x288(PAL) */
/* SCALE_4CIFSI is the 2 fields interpolated into one */
#define SCALE_4CIFSI 4 /* 640x480(NTSC) or 704x576(PAL) high quality */
#define COLOR_YUVPL 1 /* YUV planar */
#define COLOR_YUVPK 2 /* YUV packed */
#define COLOR_Y8 4 /* monochrome */
#define COLOR_JPG 5 /* JPEG */
#define MASK_COLOR 0x000000ff
#define MASK_JPG_QUALITY 0x0000ff00
#define MASK_INPUT_TYPE 0x000f0000
/* frame decimation. */
#define FDEC_1 1 /* capture every frame. default */
#define FDEC_2 2 /* capture every 2nd frame */
#define FDEC_3 3 /* capture every 3rd frame */
#define FDEC_5 5 /* capture every 5th frame */
/*-------------------------------------------------------
* Default mode parameters.
*-------------------------------------------------------*/
#define DEF_SCALE SCALE_4CIFS
#define DEF_COLOR COLOR_YUVPL
#define DEF_FDEC FDEC_1
#define DEF_BRIGHT 0
#define DEF_CONTRAST 0x5c
#define DEF_SATURATION 0x80
#define DEF_HUE 0
/* usb config commands */
#define IN_DATA_TOKEN cpu_to_le32(0x2255c0de)
#define CMD_2255 0xc2255000
#define CMD_SET_MODE cpu_to_le32((CMD_2255 | 0x10))
#define CMD_START cpu_to_le32((CMD_2255 | 0x20))
#define CMD_STOP cpu_to_le32((CMD_2255 | 0x30))
#define CMD_STATUS cpu_to_le32((CMD_2255 | 0x40))
struct s2255_mode {
u32 format; /* input video format (NTSC, PAL) */
u32 scale; /* output video scale */
u32 color; /* output video color format */
u32 fdec; /* frame decimation */
u32 bright; /* brightness */
u32 contrast; /* contrast */
u32 saturation; /* saturation */
u32 hue; /* hue (NTSC only)*/
u32 single; /* capture 1 frame at a time (!=0), continuously (==0)*/
u32 usb_block; /* block size. should be 4096 of DEF_USB_BLOCK */
u32 restart; /* if DSP requires restart */
};
#define S2255_READ_IDLE 0
#define S2255_READ_FRAME 1
/* frame structure */
struct s2255_framei {
unsigned long size;
unsigned long ulState; /* ulState:S2255_READ_IDLE, S2255_READ_FRAME*/
void *lpvbits; /* image data */
unsigned long cur_size; /* current data copied to it */
};
/* image buffer structure */
struct s2255_bufferi {
unsigned long dwFrames; /* number of frames in buffer */
struct s2255_framei frame[SYS_FRAMES]; /* array of FRAME structures */
};
#define DEF_MODEI_NTSC_CONT {FORMAT_NTSC, DEF_SCALE, DEF_COLOR, \
DEF_FDEC, DEF_BRIGHT, DEF_CONTRAST, DEF_SATURATION, \
DEF_HUE, 0, DEF_USB_BLOCK, 0}
struct s2255_dmaqueue {
struct list_head active;
struct s2255_dev *dev;
};
/* for firmware loading, fw_state */
#define S2255_FW_NOTLOADED 0
#define S2255_FW_LOADED_DSPWAIT 1
#define S2255_FW_SUCCESS 2
#define S2255_FW_FAILED 3
#define S2255_FW_DISCONNECTING 4
#define S2255_FW_MARKER cpu_to_le32(0x22552f2f)
/* 2255 read states */
#define S2255_READ_IDLE 0
#define S2255_READ_FRAME 1
struct s2255_fw {
int fw_loaded;
int fw_size;
struct urb *fw_urb;
atomic_t fw_state;
void *pfw_data;
wait_queue_head_t wait_fw;
const struct firmware *fw;
};
struct s2255_pipeinfo {
u32 max_transfer_size;
u32 cur_transfer_size;
u8 *transfer_buffer;
u32 state;
void *stream_urb;
void *dev; /* back pointer to s2255_dev struct*/
u32 err_count;
u32 idx;
};
struct s2255_fmt; /*forward declaration */
struct s2255_dev;
struct s2255_channel {
struct video_device vdev;
int resources;
struct s2255_dmaqueue vidq;
struct s2255_bufferi buffer;
struct s2255_mode mode;
/* jpeg compression */
struct v4l2_jpegcompression jc;
/* capture parameters (for high quality mode full size) */
struct v4l2_captureparm cap_parm;
int cur_frame;
int last_frame;
int b_acquire;
/* allocated image size */
unsigned long req_image_size;
/* received packet size */
unsigned long pkt_size;
int bad_payload;
unsigned long frame_count;
/* if JPEG image */
int jpg_size;
/* if channel configured to default state */
int configured;
wait_queue_head_t wait_setmode;
int setmode_ready;
/* video status items */
int vidstatus;
wait_queue_head_t wait_vidstatus;
int vidstatus_ready;
unsigned int width;
unsigned int height;
const struct s2255_fmt *fmt;
int idx; /* channel number on device, 0-3 */
};
struct s2255_dev {
struct s2255_channel channel[MAX_CHANNELS];
struct v4l2_device v4l2_dev;
atomic_t num_channels;
int frames;
struct mutex lock; /* channels[].vdev.lock */
struct mutex open_lock;
struct usb_device *udev;
struct usb_interface *interface;
u8 read_endpoint;
struct timer_list timer;
struct s2255_fw *fw_data;
struct s2255_pipeinfo pipe;
u32 cc; /* current channel */
int frame_ready;
int chn_ready;
spinlock_t slock;
/* dsp firmware version (f2255usb.bin) */
int dsp_fw_ver;
u16 pid; /* product id */
};
static inline struct s2255_dev *to_s2255_dev(struct v4l2_device *v4l2_dev)
{
return container_of(v4l2_dev, struct s2255_dev, v4l2_dev);
}
struct s2255_fmt {
char *name;
u32 fourcc;
int depth;
};
/* buffer for one video frame */
struct s2255_buffer {
/* common v4l buffer stuff -- must be first */
struct videobuf_buffer vb;
const struct s2255_fmt *fmt;
};
struct s2255_fh {
struct s2255_dev *dev;
struct videobuf_queue vb_vidq;
enum v4l2_buf_type type;
struct s2255_channel *channel;
int resources;
};
/* current cypress EEPROM firmware version */
#define S2255_CUR_USB_FWVER ((3 << 8) | 12)
/* current DSP FW version */
#define S2255_CUR_DSP_FWVER 10104
/* Need DSP version 5+ for video status feature */
#define S2255_MIN_DSP_STATUS 5
#define S2255_MIN_DSP_COLORFILTER 8
#define S2255_NORMS (V4L2_STD_PAL | V4L2_STD_NTSC)
/* private V4L2 controls */
/*
* The following chart displays how COLORFILTER should be set
* =========================================================
* = fourcc = COLORFILTER =
* = ===============================
* = = 0 = 1 =
* =========================================================
* = V4L2_PIX_FMT_GREY(Y8) = monochrome from = monochrome=
* = = s-video or = composite =
* = = B/W camera = input =
* =========================================================
* = other = color, svideo = color, =
* = = = composite =
* =========================================================
*
* Notes:
* channels 0-3 on 2255 are composite
* channels 0-1 on 2257 are composite, 2-3 are s-video
* If COLORFILTER is 0 with a composite color camera connected,
* the output will appear monochrome but hatching
* will occur.
* COLORFILTER is different from "color killer" and "color effects"
* for reasons above.
*/
#define S2255_V4L2_YC_ON 1
#define S2255_V4L2_YC_OFF 0
#define V4L2_CID_PRIVATE_COLORFILTER (V4L2_CID_PRIVATE_BASE + 0)
/* frame prefix size (sent once every frame) */
#define PREFIX_SIZE 512
/* Channels on box are in reverse order */
static unsigned long G_chnmap[MAX_CHANNELS] = {3, 2, 1, 0};
static int debug;
static int *s2255_debug = &debug;
static int s2255_start_readpipe(struct s2255_dev *dev);
static void s2255_stop_readpipe(struct s2255_dev *dev);
static int s2255_start_acquire(struct s2255_channel *channel);
static int s2255_stop_acquire(struct s2255_channel *channel);
static void s2255_fillbuff(struct s2255_channel *chn, struct s2255_buffer *buf,
int jpgsize);
static int s2255_set_mode(struct s2255_channel *chan, struct s2255_mode *mode);
static int s2255_board_shutdown(struct s2255_dev *dev);
static void s2255_fwload_start(struct s2255_dev *dev, int reset);
static void s2255_destroy(struct s2255_dev *dev);
static long s2255_vendor_req(struct s2255_dev *dev, unsigned char req,
u16 index, u16 value, void *buf,
s32 buf_len, int bOut);
/* dev_err macro with driver name */
#define S2255_DRIVER_NAME "s2255"
#define s2255_dev_err(dev, fmt, arg...) \
dev_err(dev, S2255_DRIVER_NAME " - " fmt, ##arg)
#define dprintk(level, fmt, arg...) \
do { \
if (*s2255_debug >= (level)) { \
printk(KERN_DEBUG S2255_DRIVER_NAME \
": " fmt, ##arg); \
} \
} while (0)
static struct usb_driver s2255_driver;
/* Declare static vars that will be used as parameters */
static unsigned int vid_limit = 16; /* Video memory limit, in Mb */
/* start video number */
static int video_nr = -1; /* /dev/videoN, -1 for autodetect */
/* Enable jpeg capture. */
static int jpeg_enable = 1;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "Debug level(0-100) default 0");
module_param(vid_limit, int, 0644);
MODULE_PARM_DESC(vid_limit, "video memory limit(Mb)");
module_param(video_nr, int, 0644);
MODULE_PARM_DESC(video_nr, "start video minor(-1 default autodetect)");
module_param(jpeg_enable, int, 0644);
MODULE_PARM_DESC(jpeg_enable, "Jpeg enable(1-on 0-off) default 1");
/* USB device table */
#define USB_SENSORAY_VID 0x1943
static struct usb_device_id s2255_table[] = {
{USB_DEVICE(USB_SENSORAY_VID, 0x2255)},
{USB_DEVICE(USB_SENSORAY_VID, 0x2257)}, /*same family as 2255*/
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, s2255_table);
#define BUFFER_TIMEOUT msecs_to_jiffies(400)
/* image formats. */
/* JPEG formats must be defined last to support jpeg_enable parameter */
static const struct s2255_fmt formats[] = {
{
.name = "4:2:2, planar, YUV422P",
.fourcc = V4L2_PIX_FMT_YUV422P,
.depth = 16
}, {
.name = "4:2:2, packed, YUYV",
.fourcc = V4L2_PIX_FMT_YUYV,
.depth = 16
}, {
.name = "4:2:2, packed, UYVY",
.fourcc = V4L2_PIX_FMT_UYVY,
.depth = 16
}, {
.name = "8bpp GREY",
.fourcc = V4L2_PIX_FMT_GREY,
.depth = 8
}, {
.name = "JPG",
.fourcc = V4L2_PIX_FMT_JPEG,
.depth = 24
}, {
.name = "MJPG",
.fourcc = V4L2_PIX_FMT_MJPEG,
.depth = 24
}
};
static int norm_maxw(struct video_device *vdev)
{
return (vdev->current_norm & V4L2_STD_NTSC) ?
LINE_SZ_4CIFS_NTSC : LINE_SZ_4CIFS_PAL;
}
static int norm_maxh(struct video_device *vdev)
{
return (vdev->current_norm & V4L2_STD_NTSC) ?
(NUM_LINES_1CIFS_NTSC * 2) : (NUM_LINES_1CIFS_PAL * 2);
}
static int norm_minw(struct video_device *vdev)
{
return (vdev->current_norm & V4L2_STD_NTSC) ?
LINE_SZ_1CIFS_NTSC : LINE_SZ_1CIFS_PAL;
}
static int norm_minh(struct video_device *vdev)
{
return (vdev->current_norm & V4L2_STD_NTSC) ?
(NUM_LINES_1CIFS_NTSC) : (NUM_LINES_1CIFS_PAL);
}
/*
* TODO: fixme: move YUV reordering to hardware
* converts 2255 planar format to yuyv or uyvy
*/
static void planar422p_to_yuv_packed(const unsigned char *in,
unsigned char *out,
int width, int height,
int fmt)
{
unsigned char *pY;
unsigned char *pCb;
unsigned char *pCr;
unsigned long size = height * width;
unsigned int i;
pY = (unsigned char *)in;
pCr = (unsigned char *)in + height * width;
pCb = (unsigned char *)in + height * width + (height * width / 2);
for (i = 0; i < size * 2; i += 4) {
out[i] = (fmt == V4L2_PIX_FMT_YUYV) ? *pY++ : *pCr++;
out[i + 1] = (fmt == V4L2_PIX_FMT_YUYV) ? *pCr++ : *pY++;
out[i + 2] = (fmt == V4L2_PIX_FMT_YUYV) ? *pY++ : *pCb++;
out[i + 3] = (fmt == V4L2_PIX_FMT_YUYV) ? *pCb++ : *pY++;
}
return;
}
static void s2255_reset_dsppower(struct s2255_dev *dev)
{
s2255_vendor_req(dev, 0x40, 0x0000, 0x0001, NULL, 0, 1);
msleep(10);
s2255_vendor_req(dev, 0x50, 0x0000, 0x0000, NULL, 0, 1);
msleep(600);
s2255_vendor_req(dev, 0x10, 0x0000, 0x0000, NULL, 0, 1);
return;
}
/* kickstarts the firmware loading. from probe
*/
static void s2255_timer(unsigned long user_data)
{
struct s2255_fw *data = (struct s2255_fw *)user_data;
dprintk(100, "%s\n", __func__);
if (usb_submit_urb(data->fw_urb, GFP_ATOMIC) < 0) {
printk(KERN_ERR "s2255: can't submit urb\n");
atomic_set(&data->fw_state, S2255_FW_FAILED);
/* wake up anything waiting for the firmware */
wake_up(&data->wait_fw);
return;
}
}
/* this loads the firmware asynchronously.
Originally this was done synchroously in probe.
But it is better to load it asynchronously here than block
inside the probe function. Blocking inside probe affects boot time.
FW loading is triggered by the timer in the probe function
*/
static void s2255_fwchunk_complete(struct urb *urb)
{
struct s2255_fw *data = urb->context;
struct usb_device *udev = urb->dev;
int len;
dprintk(100, "%s: udev %p urb %p", __func__, udev, urb);
if (urb->status) {
dev_err(&udev->dev, "URB failed with status %d\n", urb->status);
atomic_set(&data->fw_state, S2255_FW_FAILED);
/* wake up anything waiting for the firmware */
wake_up(&data->wait_fw);
return;
}
if (data->fw_urb == NULL) {
s2255_dev_err(&udev->dev, "disconnected\n");
atomic_set(&data->fw_state, S2255_FW_FAILED);
/* wake up anything waiting for the firmware */
wake_up(&data->wait_fw);
return;
}
#define CHUNK_SIZE 512
/* all USB transfers must be done with continuous kernel memory.
can't allocate more than 128k in current linux kernel, so
upload the firmware in chunks
*/
if (data->fw_loaded < data->fw_size) {
len = (data->fw_loaded + CHUNK_SIZE) > data->fw_size ?
data->fw_size % CHUNK_SIZE : CHUNK_SIZE;
if (len < CHUNK_SIZE)
memset(data->pfw_data, 0, CHUNK_SIZE);
dprintk(100, "completed len %d, loaded %d \n", len,
data->fw_loaded);
memcpy(data->pfw_data,
(char *) data->fw->data + data->fw_loaded, len);
usb_fill_bulk_urb(data->fw_urb, udev, usb_sndbulkpipe(udev, 2),
data->pfw_data, CHUNK_SIZE,
s2255_fwchunk_complete, data);
if (usb_submit_urb(data->fw_urb, GFP_ATOMIC) < 0) {
dev_err(&udev->dev, "failed submit URB\n");
atomic_set(&data->fw_state, S2255_FW_FAILED);
/* wake up anything waiting for the firmware */
wake_up(&data->wait_fw);
return;
}
data->fw_loaded += len;
} else {
atomic_set(&data->fw_state, S2255_FW_LOADED_DSPWAIT);
dprintk(100, "%s: firmware upload complete\n", __func__);
}
return;
}
static int s2255_got_frame(struct s2255_channel *channel, int jpgsize)
{
struct s2255_dmaqueue *dma_q = &channel->vidq;
struct s2255_buffer *buf;
struct s2255_dev *dev = to_s2255_dev(channel->vdev.v4l2_dev);
unsigned long flags = 0;
int rc = 0;
spin_lock_irqsave(&dev->slock, flags);
if (list_empty(&dma_q->active)) {
dprintk(1, "No active queue to serve\n");
rc = -1;
goto unlock;
}
buf = list_entry(dma_q->active.next,
struct s2255_buffer, vb.queue);
list_del(&buf->vb.queue);
do_gettimeofday(&buf->vb.ts);
s2255_fillbuff(channel, buf, jpgsize);
wake_up(&buf->vb.done);
dprintk(2, "%s: [buf/i] [%p/%d]\n", __func__, buf, buf->vb.i);
unlock:
spin_unlock_irqrestore(&dev->slock, flags);
return rc;
}
static const struct s2255_fmt *format_by_fourcc(int fourcc)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(formats); i++) {
if (-1 == formats[i].fourcc)
continue;
if (!jpeg_enable && ((formats[i].fourcc == V4L2_PIX_FMT_JPEG) ||
(formats[i].fourcc == V4L2_PIX_FMT_MJPEG)))
continue;
if (formats[i].fourcc == fourcc)
return formats + i;
}
return NULL;
}
/* video buffer vmalloc implementation based partly on VIVI driver which is
* Copyright (c) 2006 by
* Mauro Carvalho Chehab <mchehab--a.t--infradead.org>
* Ted Walther <ted--a.t--enumera.com>
* John Sokol <sokol--a.t--videotechnology.com>
* http://v4l.videotechnology.com/
*
*/
static void s2255_fillbuff(struct s2255_channel *channel,
struct s2255_buffer *buf, int jpgsize)
{
int pos = 0;
struct timeval ts;
const char *tmpbuf;
char *vbuf = videobuf_to_vmalloc(&buf->vb);
unsigned long last_frame;
struct s2255_framei *frm;
if (!vbuf)
return;
last_frame = channel->last_frame;
if (last_frame != -1) {
frm = &channel->buffer.frame[last_frame];
tmpbuf =
(const char *)channel->buffer.frame[last_frame].lpvbits;
switch (buf->fmt->fourcc) {
case V4L2_PIX_FMT_YUYV:
case V4L2_PIX_FMT_UYVY:
planar422p_to_yuv_packed((const unsigned char *)tmpbuf,
vbuf, buf->vb.width,
buf->vb.height,
buf->fmt->fourcc);
break;
case V4L2_PIX_FMT_GREY:
memcpy(vbuf, tmpbuf, buf->vb.width * buf->vb.height);
break;
case V4L2_PIX_FMT_JPEG:
case V4L2_PIX_FMT_MJPEG:
buf->vb.size = jpgsize;
memcpy(vbuf, tmpbuf, buf->vb.size);
break;
case V4L2_PIX_FMT_YUV422P:
memcpy(vbuf, tmpbuf,
buf->vb.width * buf->vb.height * 2);
break;
default:
printk(KERN_DEBUG "s2255: unknown format?\n");
}
channel->last_frame = -1;
} else {
printk(KERN_ERR "s2255: =======no frame\n");
return;
}
dprintk(2, "s2255fill at : Buffer 0x%08lx size= %d\n",
(unsigned long)vbuf, pos);
/* tell v4l buffer was filled */
buf->vb.field_count = channel->frame_count * 2;
do_gettimeofday(&ts);
buf->vb.ts = ts;
buf->vb.state = VIDEOBUF_DONE;
}
/* ------------------------------------------------------------------
Videobuf operations
------------------------------------------------------------------*/
static int buffer_setup(struct videobuf_queue *vq, unsigned int *count,
unsigned int *size)
{
struct s2255_fh *fh = vq->priv_data;
struct s2255_channel *channel = fh->channel;
*size = channel->width * channel->height * (channel->fmt->depth >> 3);
if (0 == *count)
*count = S2255_DEF_BUFS;
if (*size * *count > vid_limit * 1024 * 1024)
*count = (vid_limit * 1024 * 1024) / *size;
return 0;
}
static void free_buffer(struct videobuf_queue *vq, struct s2255_buffer *buf)
{
dprintk(4, "%s\n", __func__);
videobuf_vmalloc_free(&buf->vb);
buf->vb.state = VIDEOBUF_NEEDS_INIT;
}
static int buffer_prepare(struct videobuf_queue *vq, struct videobuf_buffer *vb,
enum v4l2_field field)
{
struct s2255_fh *fh = vq->priv_data;
struct s2255_channel *channel = fh->channel;
struct s2255_buffer *buf = container_of(vb, struct s2255_buffer, vb);
int rc;
int w = channel->width;
int h = channel->height;
dprintk(4, "%s, field=%d\n", __func__, field);
if (channel->fmt == NULL)
return -EINVAL;
if ((w < norm_minw(&channel->vdev)) ||
(w > norm_maxw(&channel->vdev)) ||
(h < norm_minh(&channel->vdev)) ||
(h > norm_maxh(&channel->vdev))) {
dprintk(4, "invalid buffer prepare\n");
return -EINVAL;
}
buf->vb.size = w * h * (channel->fmt->depth >> 3);
if (0 != buf->vb.baddr && buf->vb.bsize < buf->vb.size) {
dprintk(4, "invalid buffer prepare\n");
return -EINVAL;
}
buf->fmt = channel->fmt;
buf->vb.width = w;
buf->vb.height = h;
buf->vb.field = field;
if (VIDEOBUF_NEEDS_INIT == buf->vb.state) {
rc = videobuf_iolock(vq, &buf->vb, NULL);
if (rc < 0)
goto fail;
}
buf->vb.state = VIDEOBUF_PREPARED;
return 0;
fail:
free_buffer(vq, buf);
return rc;
}
static void buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb)
{
struct s2255_buffer *buf = container_of(vb, struct s2255_buffer, vb);
struct s2255_fh *fh = vq->priv_data;
struct s2255_channel *channel = fh->channel;
struct s2255_dmaqueue *vidq = &channel->vidq;
dprintk(1, "%s\n", __func__);
buf->vb.state = VIDEOBUF_QUEUED;
list_add_tail(&buf->vb.queue, &vidq->active);
}
static void buffer_release(struct videobuf_queue *vq,
struct videobuf_buffer *vb)
{
struct s2255_buffer *buf = container_of(vb, struct s2255_buffer, vb);
struct s2255_fh *fh = vq->priv_data;
dprintk(4, "%s %d\n", __func__, fh->channel->idx);
free_buffer(vq, buf);
}
static struct videobuf_queue_ops s2255_video_qops = {
.buf_setup = buffer_setup,
.buf_prepare = buffer_prepare,
.buf_queue = buffer_queue,
.buf_release = buffer_release,
};
static int res_get(struct s2255_fh *fh)
{
struct s2255_channel *channel = fh->channel;
/* is it free? */
if (channel->resources)
return 0; /* no, someone else uses it */
/* it's free, grab it */
channel->resources = 1;
fh->resources = 1;
dprintk(1, "s2255: res: get\n");
return 1;
}
static int res_locked(struct s2255_fh *fh)
{
return fh->channel->resources;
}
static int res_check(struct s2255_fh *fh)
{
return fh->resources;
}
static void res_free(struct s2255_fh *fh)
{
struct s2255_channel *channel = fh->channel;
channel->resources = 0;
fh->resources = 0;
dprintk(1, "res: put\n");
}
static int vidioc_querymenu(struct file *file, void *priv,
struct v4l2_querymenu *qmenu)
{
static const char *colorfilter[] = {
"Off",
"On",
NULL
};
if (qmenu->id == V4L2_CID_PRIVATE_COLORFILTER) {
int i;
const char **menu_items = colorfilter;
for (i = 0; i < qmenu->index && menu_items[i]; i++)
; /* do nothing (from v4l2-common.c) */
if (menu_items[i] == NULL || menu_items[i][0] == '\0')
return -EINVAL;
strlcpy(qmenu->name, menu_items[qmenu->index],
sizeof(qmenu->name));
return 0;
}
return v4l2_ctrl_query_menu(qmenu, NULL, NULL);
}
static int vidioc_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
struct s2255_fh *fh = file->private_data;
struct s2255_dev *dev = fh->dev;
strlcpy(cap->driver, "s2255", sizeof(cap->driver));
strlcpy(cap->card, "s2255", sizeof(cap->card));
usb_make_path(dev->udev, cap->bus_info, sizeof(cap->bus_info));
cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
return 0;
}
static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
int index = f->index;
if (index >= ARRAY_SIZE(formats))
return -EINVAL;
if (!jpeg_enable && ((formats[index].fourcc == V4L2_PIX_FMT_JPEG) ||
(formats[index].fourcc == V4L2_PIX_FMT_MJPEG)))
return -EINVAL;
dprintk(4, "name %s\n", formats[index].name);
strlcpy(f->description, formats[index].name, sizeof(f->description));
f->pixelformat = formats[index].fourcc;
return 0;
}
static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
struct s2255_fh *fh = priv;
struct s2255_channel *channel = fh->channel;
f->fmt.pix.width = channel->width;
f->fmt.pix.height = channel->height;
f->fmt.pix.field = fh->vb_vidq.field;
f->fmt.pix.pixelformat = channel->fmt->fourcc;
f->fmt.pix.bytesperline = f->fmt.pix.width * (channel->fmt->depth >> 3);
f->fmt.pix.sizeimage = f->fmt.pix.height * f->fmt.pix.bytesperline;
return 0;
}
static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
const struct s2255_fmt *fmt;
enum v4l2_field field;
int b_any_field = 0;
struct s2255_fh *fh = priv;
struct s2255_channel *channel = fh->channel;
int is_ntsc;
is_ntsc =
(channel->vdev.current_norm & V4L2_STD_NTSC) ? 1 : 0;
fmt = format_by_fourcc(f->fmt.pix.pixelformat);
if (fmt == NULL)
return -EINVAL;
field = f->fmt.pix.field;
if (field == V4L2_FIELD_ANY)
b_any_field = 1;
dprintk(50, "%s NTSC: %d suggested width: %d, height: %d\n",
__func__, is_ntsc, f->fmt.pix.width, f->fmt.pix.height);
if (is_ntsc) {
/* NTSC */
if (f->fmt.pix.height >= NUM_LINES_1CIFS_NTSC * 2) {
f->fmt.pix.height = NUM_LINES_1CIFS_NTSC * 2;
if (b_any_field) {
field = V4L2_FIELD_SEQ_TB;
} else if (!((field == V4L2_FIELD_INTERLACED) ||
(field == V4L2_FIELD_SEQ_TB) ||
(field == V4L2_FIELD_INTERLACED_TB))) {
dprintk(1, "unsupported field setting\n");
return -EINVAL;
}
} else {
f->fmt.pix.height = NUM_LINES_1CIFS_NTSC;
if (b_any_field) {
field = V4L2_FIELD_TOP;
} else if (!((field == V4L2_FIELD_TOP) ||
(field == V4L2_FIELD_BOTTOM))) {
dprintk(1, "unsupported field setting\n");
return -EINVAL;
}
}
if (f->fmt.pix.width >= LINE_SZ_4CIFS_NTSC)
f->fmt.pix.width = LINE_SZ_4CIFS_NTSC;
else if (f->fmt.pix.width >= LINE_SZ_2CIFS_NTSC)
f->fmt.pix.width = LINE_SZ_2CIFS_NTSC;
else if (f->fmt.pix.width >= LINE_SZ_1CIFS_NTSC)
f->fmt.pix.width = LINE_SZ_1CIFS_NTSC;
else
f->fmt.pix.width = LINE_SZ_1CIFS_NTSC;
} else {
/* PAL */
if (f->fmt.pix.height >= NUM_LINES_1CIFS_PAL * 2) {
f->fmt.pix.height = NUM_LINES_1CIFS_PAL * 2;
if (b_any_field) {
field = V4L2_FIELD_SEQ_TB;
} else if (!((field == V4L2_FIELD_INTERLACED) ||
(field == V4L2_FIELD_SEQ_TB) ||
(field == V4L2_FIELD_INTERLACED_TB))) {
dprintk(1, "unsupported field setting\n");
return -EINVAL;
}
} else {
f->fmt.pix.height = NUM_LINES_1CIFS_PAL;
if (b_any_field) {
field = V4L2_FIELD_TOP;
} else if (!((field == V4L2_FIELD_TOP) ||
(field == V4L2_FIELD_BOTTOM))) {
dprintk(1, "unsupported field setting\n");
return -EINVAL;
}
}
if (f->fmt.pix.width >= LINE_SZ_4CIFS_PAL) {
f->fmt.pix.width = LINE_SZ_4CIFS_PAL;
field = V4L2_FIELD_SEQ_TB;
} else if (f->fmt.pix.width >= LINE_SZ_2CIFS_PAL) {
f->fmt.pix.width = LINE_SZ_2CIFS_PAL;
field = V4L2_FIELD_TOP;
} else if (f->fmt.pix.width >= LINE_SZ_1CIFS_PAL) {
f->fmt.pix.width = LINE_SZ_1CIFS_PAL;
field = V4L2_FIELD_TOP;
} else {
f->fmt.pix.width = LINE_SZ_1CIFS_PAL;
field = V4L2_FIELD_TOP;
}
}
f->fmt.pix.field = field;
f->fmt.pix.bytesperline = (f->fmt.pix.width * fmt->depth) >> 3;
f->fmt.pix.sizeimage = f->fmt.pix.height * f->fmt.pix.bytesperline;
dprintk(50, "%s: set width %d height %d field %d\n", __func__,
f->fmt.pix.width, f->fmt.pix.height, f->fmt.pix.field);
return 0;
}
static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
struct s2255_fh *fh = priv;
struct s2255_channel *channel = fh->channel;
const struct s2255_fmt *fmt;
struct videobuf_queue *q = &fh->vb_vidq;
struct s2255_mode mode;
int ret;
int norm;
ret = vidioc_try_fmt_vid_cap(file, fh, f);
if (ret < 0)
return ret;
fmt = format_by_fourcc(f->fmt.pix.pixelformat);
if (fmt == NULL)
return -EINVAL;
mutex_lock(&q->vb_lock);
if (videobuf_queue_is_busy(&fh->vb_vidq)) {
dprintk(1, "queue busy\n");
ret = -EBUSY;
goto out_s_fmt;
}
if (res_locked(fh)) {
dprintk(1, "%s: channel busy\n", __func__);
ret = -EBUSY;
goto out_s_fmt;
}
mode = channel->mode;
channel->fmt = fmt;
channel->width = f->fmt.pix.width;
channel->height = f->fmt.pix.height;
fh->vb_vidq.field = f->fmt.pix.field;
fh->type = f->type;
norm = norm_minw(&channel->vdev);
if (channel->width > norm_minw(&channel->vdev)) {
if (channel->height > norm_minh(&channel->vdev)) {
if (channel->cap_parm.capturemode &
V4L2_MODE_HIGHQUALITY)
mode.scale = SCALE_4CIFSI;
else
mode.scale = SCALE_4CIFS;
} else
mode.scale = SCALE_2CIFS;
} else {
mode.scale = SCALE_1CIFS;
}
/* color mode */
switch (channel->fmt->fourcc) {
case V4L2_PIX_FMT_GREY:
mode.color &= ~MASK_COLOR;
mode.color |= COLOR_Y8;
break;
case V4L2_PIX_FMT_JPEG:
case V4L2_PIX_FMT_MJPEG:
mode.color &= ~MASK_COLOR;
mode.color |= COLOR_JPG;
mode.color |= (channel->jc.quality << 8);
break;
case V4L2_PIX_FMT_YUV422P:
mode.color &= ~MASK_COLOR;
mode.color |= COLOR_YUVPL;
break;
case V4L2_PIX_FMT_YUYV:
case V4L2_PIX_FMT_UYVY:
default:
mode.color &= ~MASK_COLOR;
mode.color |= COLOR_YUVPK;
break;
}
if ((mode.color & MASK_COLOR) != (channel->mode.color & MASK_COLOR))
mode.restart = 1;
else if (mode.scale != channel->mode.scale)
mode.restart = 1;
else if (mode.format != channel->mode.format)
mode.restart = 1;
channel->mode = mode;
(void) s2255_set_mode(channel, &mode);
ret = 0;
out_s_fmt:
mutex_unlock(&q->vb_lock);
return ret;
}
static int vidioc_reqbufs(struct file *file, void *priv,
struct v4l2_requestbuffers *p)
{
int rc;
struct s2255_fh *fh = priv;
rc = videobuf_reqbufs(&fh->vb_vidq, p);
return rc;
}
static int vidioc_querybuf(struct file *file, void *priv, struct v4l2_buffer *p)
{
int rc;
struct s2255_fh *fh = priv;
rc = videobuf_querybuf(&fh->vb_vidq, p);
return rc;
}
static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *p)
{
int rc;
struct s2255_fh *fh = priv;
rc = videobuf_qbuf(&fh->vb_vidq, p);
return rc;
}
static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *p)
{
int rc;
struct s2255_fh *fh = priv;
rc = videobuf_dqbuf(&fh->vb_vidq, p, file->f_flags & O_NONBLOCK);
return rc;
}
/* write to the configuration pipe, synchronously */
static int s2255_write_config(struct usb_device *udev, unsigned char *pbuf,
int size)
{
int pipe;
int done;
long retval = -1;
if (udev) {
pipe = usb_sndbulkpipe(udev, S2255_CONFIG_EP);
retval = usb_bulk_msg(udev, pipe, pbuf, size, &done, 500);
}
return retval;
}
static u32 get_transfer_size(struct s2255_mode *mode)
{
int linesPerFrame = LINE_SZ_DEF;
int pixelsPerLine = NUM_LINES_DEF;
u32 outImageSize;
u32 usbInSize;
unsigned int mask_mult;
if (mode == NULL)
return 0;
if (mode->format == FORMAT_NTSC) {
switch (mode->scale) {
case SCALE_4CIFS:
case SCALE_4CIFSI:
linesPerFrame = NUM_LINES_4CIFS_NTSC * 2;
pixelsPerLine = LINE_SZ_4CIFS_NTSC;
break;
case SCALE_2CIFS:
linesPerFrame = NUM_LINES_2CIFS_NTSC;
pixelsPerLine = LINE_SZ_2CIFS_NTSC;
break;
case SCALE_1CIFS:
linesPerFrame = NUM_LINES_1CIFS_NTSC;
pixelsPerLine = LINE_SZ_1CIFS_NTSC;
break;
default:
break;
}
} else if (mode->format == FORMAT_PAL) {
switch (mode->scale) {
case SCALE_4CIFS:
case SCALE_4CIFSI:
linesPerFrame = NUM_LINES_4CIFS_PAL * 2;
pixelsPerLine = LINE_SZ_4CIFS_PAL;
break;
case SCALE_2CIFS:
linesPerFrame = NUM_LINES_2CIFS_PAL;
pixelsPerLine = LINE_SZ_2CIFS_PAL;
break;
case SCALE_1CIFS:
linesPerFrame = NUM_LINES_1CIFS_PAL;
pixelsPerLine = LINE_SZ_1CIFS_PAL;
break;
default:
break;
}
}
outImageSize = linesPerFrame * pixelsPerLine;
if ((mode->color & MASK_COLOR) != COLOR_Y8) {
/* 2 bytes/pixel if not monochrome */
outImageSize *= 2;
}
/* total bytes to send including prefix and 4K padding;
must be a multiple of USB_READ_SIZE */
usbInSize = outImageSize + PREFIX_SIZE; /* always send prefix */
mask_mult = 0xffffffffUL - DEF_USB_BLOCK + 1;
/* if size not a multiple of USB_READ_SIZE */
if (usbInSize & ~mask_mult)
usbInSize = (usbInSize & mask_mult) + (DEF_USB_BLOCK);
return usbInSize;
}
static void s2255_print_cfg(struct s2255_dev *sdev, struct s2255_mode *mode)
{
struct device *dev = &sdev->udev->dev;
dev_info(dev, "------------------------------------------------\n");
dev_info(dev, "format: %d\nscale %d\n", mode->format, mode->scale);
dev_info(dev, "fdec: %d\ncolor %d\n", mode->fdec, mode->color);
dev_info(dev, "bright: 0x%x\n", mode->bright);
dev_info(dev, "------------------------------------------------\n");
}
/*
* set mode is the function which controls the DSP.
* the restart parameter in struct s2255_mode should be set whenever
* the image size could change via color format, video system or image
* size.
* When the restart parameter is set, we sleep for ONE frame to allow the
* DSP time to get the new frame
*/
static int s2255_set_mode(struct s2255_channel *channel,
struct s2255_mode *mode)
{
int res;
__le32 *buffer;
unsigned long chn_rev;
struct s2255_dev *dev = to_s2255_dev(channel->vdev.v4l2_dev);
chn_rev = G_chnmap[channel->idx];
dprintk(3, "%s channel: %d\n", __func__, channel->idx);
/* if JPEG, set the quality */
if ((mode->color & MASK_COLOR) == COLOR_JPG) {
mode->color &= ~MASK_COLOR;
mode->color |= COLOR_JPG;
mode->color &= ~MASK_JPG_QUALITY;
mode->color |= (channel->jc.quality << 8);
}
/* save the mode */
channel->mode = *mode;
channel->req_image_size = get_transfer_size(mode);
dprintk(1, "%s: reqsize %ld\n", __func__, channel->req_image_size);
buffer = kzalloc(512, GFP_KERNEL);
if (buffer == NULL) {
dev_err(&dev->udev->dev, "out of mem\n");
return -ENOMEM;
}
/* set the mode */
buffer[0] = IN_DATA_TOKEN;
buffer[1] = (__le32) cpu_to_le32(chn_rev);
buffer[2] = CMD_SET_MODE;
memcpy(&buffer[3], &channel->mode, sizeof(struct s2255_mode));
channel->setmode_ready = 0;
res = s2255_write_config(dev->udev, (unsigned char *)buffer, 512);
if (debug)
s2255_print_cfg(dev, mode);
kfree(buffer);
/* wait at least 3 frames before continuing */
if (mode->restart) {
wait_event_timeout(channel->wait_setmode,
(channel->setmode_ready != 0),
msecs_to_jiffies(S2255_SETMODE_TIMEOUT));
if (channel->setmode_ready != 1) {
printk(KERN_DEBUG "s2255: no set mode response\n");
res = -EFAULT;
}
}
/* clear the restart flag */
channel->mode.restart = 0;
dprintk(1, "%s chn %d, result: %d\n", __func__, channel->idx, res);
return res;
}
static int s2255_cmd_status(struct s2255_channel *channel, u32 *pstatus)
{
int res;
__le32 *buffer;
u32 chn_rev;
struct s2255_dev *dev = to_s2255_dev(channel->vdev.v4l2_dev);
chn_rev = G_chnmap[channel->idx];
dprintk(4, "%s chan %d\n", __func__, channel->idx);
buffer = kzalloc(512, GFP_KERNEL);
if (buffer == NULL) {
dev_err(&dev->udev->dev, "out of mem\n");
return -ENOMEM;
}
/* form the get vid status command */
buffer[0] = IN_DATA_TOKEN;
buffer[1] = (__le32) cpu_to_le32(chn_rev);
buffer[2] = CMD_STATUS;
*pstatus = 0;
channel->vidstatus_ready = 0;
res = s2255_write_config(dev->udev, (unsigned char *)buffer, 512);
kfree(buffer);
wait_event_timeout(channel->wait_vidstatus,
(channel->vidstatus_ready != 0),
msecs_to_jiffies(S2255_VIDSTATUS_TIMEOUT));
if (channel->vidstatus_ready != 1) {
printk(KERN_DEBUG "s2255: no vidstatus response\n");
res = -EFAULT;
}
*pstatus = channel->vidstatus;
dprintk(4, "%s, vid status %d\n", __func__, *pstatus);
return res;
}
static int vidioc_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
{
int res;
struct s2255_fh *fh = priv;
struct s2255_dev *dev = fh->dev;
struct s2255_channel *channel = fh->channel;
int j;
dprintk(4, "%s\n", __func__);
if (fh->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
dev_err(&dev->udev->dev, "invalid fh type0\n");
return -EINVAL;
}
if (i != fh->type) {
dev_err(&dev->udev->dev, "invalid fh type1\n");
return -EINVAL;
}
if (!res_get(fh)) {
s2255_dev_err(&dev->udev->dev, "stream busy\n");
return -EBUSY;
}
channel->last_frame = -1;
channel->bad_payload = 0;
channel->cur_frame = 0;
channel->frame_count = 0;
for (j = 0; j < SYS_FRAMES; j++) {
channel->buffer.frame[j].ulState = S2255_READ_IDLE;
channel->buffer.frame[j].cur_size = 0;
}
res = videobuf_streamon(&fh->vb_vidq);
if (res == 0) {
s2255_start_acquire(channel);
channel->b_acquire = 1;
} else
res_free(fh);
return res;
}
static int vidioc_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
{
struct s2255_fh *fh = priv;
dprintk(4, "%s\n, channel: %d", __func__, fh->channel->idx);
if (fh->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
printk(KERN_ERR "invalid fh type0\n");
return -EINVAL;
}
if (i != fh->type) {
printk(KERN_ERR "invalid type i\n");
return -EINVAL;
}
s2255_stop_acquire(fh->channel);
videobuf_streamoff(&fh->vb_vidq);
res_free(fh);
return 0;
}
static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id *i)
{
struct s2255_fh *fh = priv;
struct s2255_mode mode;
struct videobuf_queue *q = &fh->vb_vidq;
int ret = 0;
mutex_lock(&q->vb_lock);
if (videobuf_queue_is_busy(q)) {
dprintk(1, "queue busy\n");
ret = -EBUSY;
goto out_s_std;
}
if (res_locked(fh)) {
dprintk(1, "can't change standard after started\n");
ret = -EBUSY;
goto out_s_std;
}
mode = fh->channel->mode;
if (*i & V4L2_STD_NTSC) {
dprintk(4, "%s NTSC\n", __func__);
/* if changing format, reset frame decimation/intervals */
if (mode.format != FORMAT_NTSC) {
mode.restart = 1;
mode.format = FORMAT_NTSC;
mode.fdec = FDEC_1;
}
} else if (*i & V4L2_STD_PAL) {
dprintk(4, "%s PAL\n", __func__);
if (mode.format != FORMAT_PAL) {
mode.restart = 1;
mode.format = FORMAT_PAL;
mode.fdec = FDEC_1;
}
} else {
ret = -EINVAL;
}
if (mode.restart)
s2255_set_mode(fh->channel, &mode);
out_s_std:
mutex_unlock(&q->vb_lock);
return ret;
}
/* Sensoray 2255 is a multiple channel capture device.
It does not have a "crossbar" of inputs.
We use one V4L device per channel. The user must
be aware that certain combinations are not allowed.
For instance, you cannot do full FPS on more than 2 channels(2 videodevs)
at once in color(you can do full fps on 4 channels with greyscale.
*/
static int vidioc_enum_input(struct file *file, void *priv,
struct v4l2_input *inp)
{
struct s2255_fh *fh = priv;
struct s2255_dev *dev = fh->dev;
struct s2255_channel *channel = fh->channel;
u32 status = 0;
if (inp->index != 0)
return -EINVAL;
inp->type = V4L2_INPUT_TYPE_CAMERA;
inp->std = S2255_NORMS;
inp->status = 0;
if (dev->dsp_fw_ver >= S2255_MIN_DSP_STATUS) {
int rc;
rc = s2255_cmd_status(fh->channel, &status);
dprintk(4, "s2255_cmd_status rc: %d status %x\n", rc, status);
if (rc == 0)
inp->status = (status & 0x01) ? 0
: V4L2_IN_ST_NO_SIGNAL;
}
switch (dev->pid) {
case 0x2255:
default:
strlcpy(inp->name, "Composite", sizeof(inp->name));
break;
case 0x2257:
strlcpy(inp->name, (channel->idx < 2) ? "Composite" : "S-Video",
sizeof(inp->name));
break;
}
return 0;
}
static int vidioc_g_input(struct file *file, void *priv, unsigned int *i)
{
*i = 0;
return 0;
}
static int vidioc_s_input(struct file *file, void *priv, unsigned int i)
{
if (i > 0)
return -EINVAL;
return 0;
}
/* --- controls ---------------------------------------------- */
static int vidioc_queryctrl(struct file *file, void *priv,
struct v4l2_queryctrl *qc)
{
struct s2255_fh *fh = priv;
struct s2255_channel *channel = fh->channel;
struct s2255_dev *dev = fh->dev;
switch (qc->id) {
case V4L2_CID_BRIGHTNESS:
v4l2_ctrl_query_fill(qc, -127, 127, 1, DEF_BRIGHT);
break;
case V4L2_CID_CONTRAST:
v4l2_ctrl_query_fill(qc, 0, 255, 1, DEF_CONTRAST);
break;
case V4L2_CID_SATURATION:
v4l2_ctrl_query_fill(qc, 0, 255, 1, DEF_SATURATION);
break;
case V4L2_CID_HUE:
v4l2_ctrl_query_fill(qc, 0, 255, 1, DEF_HUE);
break;
case V4L2_CID_PRIVATE_COLORFILTER:
if (dev->dsp_fw_ver < S2255_MIN_DSP_COLORFILTER)
return -EINVAL;
if ((dev->pid == 0x2257) && (channel->idx > 1))
return -EINVAL;
strlcpy(qc->name, "Color Filter", sizeof(qc->name));
qc->type = V4L2_CTRL_TYPE_MENU;
qc->minimum = 0;
qc->maximum = 1;
qc->step = 1;
qc->default_value = 1;
qc->flags = 0;
break;
default:
return -EINVAL;
}
dprintk(4, "%s, id %d\n", __func__, qc->id);
return 0;
}
static int vidioc_g_ctrl(struct file *file, void *priv,
struct v4l2_control *ctrl)
{
struct s2255_fh *fh = priv;
struct s2255_dev *dev = fh->dev;
struct s2255_channel *channel = fh->channel;
switch (ctrl->id) {
case V4L2_CID_BRIGHTNESS:
ctrl->value = channel->mode.bright;
break;
case V4L2_CID_CONTRAST:
ctrl->value = channel->mode.contrast;
break;
case V4L2_CID_SATURATION:
ctrl->value = channel->mode.saturation;
break;
case V4L2_CID_HUE:
ctrl->value = channel->mode.hue;
break;
case V4L2_CID_PRIVATE_COLORFILTER:
if (dev->dsp_fw_ver < S2255_MIN_DSP_COLORFILTER)
return -EINVAL;
if ((dev->pid == 0x2257) && (channel->idx > 1))
return -EINVAL;
ctrl->value = !((channel->mode.color & MASK_INPUT_TYPE) >> 16);
break;
default:
return -EINVAL;
}
dprintk(4, "%s, id %d val %d\n", __func__, ctrl->id, ctrl->value);
return 0;
}
static int vidioc_s_ctrl(struct file *file, void *priv,
struct v4l2_control *ctrl)
{
struct s2255_fh *fh = priv;
struct s2255_channel *channel = fh->channel;
struct s2255_dev *dev = to_s2255_dev(channel->vdev.v4l2_dev);
struct s2255_mode mode;
mode = channel->mode;
dprintk(4, "%s\n", __func__);
/* update the mode to the corresponding value */
switch (ctrl->id) {
case V4L2_CID_BRIGHTNESS:
mode.bright = ctrl->value;
break;
case V4L2_CID_CONTRAST:
mode.contrast = ctrl->value;
break;
case V4L2_CID_HUE:
mode.hue = ctrl->value;
break;
case V4L2_CID_SATURATION:
mode.saturation = ctrl->value;
break;
case V4L2_CID_PRIVATE_COLORFILTER:
if (dev->dsp_fw_ver < S2255_MIN_DSP_COLORFILTER)
return -EINVAL;
if ((dev->pid == 0x2257) && (channel->idx > 1))
return -EINVAL;
mode.color &= ~MASK_INPUT_TYPE;
mode.color |= ((ctrl->value ? 0 : 1) << 16);
break;
default:
return -EINVAL;
}
mode.restart = 0;
/* set mode here. Note: stream does not need restarted.
some V4L programs restart stream unnecessarily
after a s_crtl.
*/
s2255_set_mode(fh->channel, &mode);
return 0;
}
static int vidioc_g_jpegcomp(struct file *file, void *priv,
struct v4l2_jpegcompression *jc)
{
struct s2255_fh *fh = priv;
struct s2255_channel *channel = fh->channel;
*jc = channel->jc;
dprintk(2, "%s: quality %d\n", __func__, jc->quality);
return 0;
}
static int vidioc_s_jpegcomp(struct file *file, void *priv,
struct v4l2_jpegcompression *jc)
{
struct s2255_fh *fh = priv;
struct s2255_channel *channel = fh->channel;
if (jc->quality < 0 || jc->quality > 100)
return -EINVAL;
channel->jc.quality = jc->quality;
dprintk(2, "%s: quality %d\n", __func__, jc->quality);
return 0;
}
static int vidioc_g_parm(struct file *file, void *priv,
struct v4l2_streamparm *sp)
{
struct s2255_fh *fh = priv;
__u32 def_num, def_dem;
struct s2255_channel *channel = fh->channel;
if (sp->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
memset(sp, 0, sizeof(struct v4l2_streamparm));
sp->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
sp->parm.capture.capturemode = channel->cap_parm.capturemode;
def_num = (channel->mode.format == FORMAT_NTSC) ? 1001 : 1000;
def_dem = (channel->mode.format == FORMAT_NTSC) ? 30000 : 25000;
sp->parm.capture.timeperframe.denominator = def_dem;
switch (channel->mode.fdec) {
default:
case FDEC_1:
sp->parm.capture.timeperframe.numerator = def_num;
break;
case FDEC_2:
sp->parm.capture.timeperframe.numerator = def_num * 2;
break;
case FDEC_3:
sp->parm.capture.timeperframe.numerator = def_num * 3;
break;
case FDEC_5:
sp->parm.capture.timeperframe.numerator = def_num * 5;
break;
}
dprintk(4, "%s capture mode, %d timeperframe %d/%d\n", __func__,
sp->parm.capture.capturemode,
sp->parm.capture.timeperframe.numerator,
sp->parm.capture.timeperframe.denominator);
return 0;
}
static int vidioc_s_parm(struct file *file, void *priv,
struct v4l2_streamparm *sp)
{
struct s2255_fh *fh = priv;
struct s2255_channel *channel = fh->channel;
struct s2255_mode mode;
int fdec = FDEC_1;
__u32 def_num, def_dem;
if (sp->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
mode = channel->mode;
/* high quality capture mode requires a stream restart */
if (channel->cap_parm.capturemode
!= sp->parm.capture.capturemode && res_locked(fh))
return -EBUSY;
def_num = (mode.format == FORMAT_NTSC) ? 1001 : 1000;
def_dem = (mode.format == FORMAT_NTSC) ? 30000 : 25000;
if (def_dem != sp->parm.capture.timeperframe.denominator)
sp->parm.capture.timeperframe.numerator = def_num;
else if (sp->parm.capture.timeperframe.numerator <= def_num)
sp->parm.capture.timeperframe.numerator = def_num;
else if (sp->parm.capture.timeperframe.numerator <= (def_num * 2)) {
sp->parm.capture.timeperframe.numerator = def_num * 2;
fdec = FDEC_2;
} else if (sp->parm.capture.timeperframe.numerator <= (def_num * 3)) {
sp->parm.capture.timeperframe.numerator = def_num * 3;
fdec = FDEC_3;
} else {
sp->parm.capture.timeperframe.numerator = def_num * 5;
fdec = FDEC_5;
}
mode.fdec = fdec;
sp->parm.capture.timeperframe.denominator = def_dem;
s2255_set_mode(channel, &mode);
dprintk(4, "%s capture mode, %d timeperframe %d/%d, fdec %d\n",
__func__,
sp->parm.capture.capturemode,
sp->parm.capture.timeperframe.numerator,
sp->parm.capture.timeperframe.denominator, fdec);
return 0;
}
static int vidioc_enum_frameintervals(struct file *file, void *priv,
struct v4l2_frmivalenum *fe)
{
int is_ntsc = 0;
#define NUM_FRAME_ENUMS 4
int frm_dec[NUM_FRAME_ENUMS] = {1, 2, 3, 5};
if (fe->index < 0 || fe->index >= NUM_FRAME_ENUMS)
return -EINVAL;
switch (fe->width) {
case 640:
if (fe->height != 240 && fe->height != 480)
return -EINVAL;
is_ntsc = 1;
break;
case 320:
if (fe->height != 240)
return -EINVAL;
is_ntsc = 1;
break;
case 704:
if (fe->height != 288 && fe->height != 576)
return -EINVAL;
break;
case 352:
if (fe->height != 288)
return -EINVAL;
break;
default:
return -EINVAL;
}
fe->type = V4L2_FRMIVAL_TYPE_DISCRETE;
fe->discrete.denominator = is_ntsc ? 30000 : 25000;
fe->discrete.numerator = (is_ntsc ? 1001 : 1000) * frm_dec[fe->index];
dprintk(4, "%s discrete %d/%d\n", __func__, fe->discrete.numerator,
fe->discrete.denominator);
return 0;
}
static int s2255_open(struct file *file)
{
struct video_device *vdev = video_devdata(file);
struct s2255_channel *channel = video_drvdata(file);
struct s2255_dev *dev = to_s2255_dev(vdev->v4l2_dev);
struct s2255_fh *fh;
enum v4l2_buf_type type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
int state;
dprintk(1, "s2255: open called (dev=%s)\n",
video_device_node_name(vdev));
/*
* open lock necessary to prevent multiple instances
* of v4l-conf (or other programs) from simultaneously
* reloading firmware.
*/
mutex_lock(&dev->open_lock);
state = atomic_read(&dev->fw_data->fw_state);
switch (state) {
case S2255_FW_DISCONNECTING:
mutex_unlock(&dev->open_lock);
return -ENODEV;
case S2255_FW_FAILED:
s2255_dev_err(&dev->udev->dev,
"firmware load failed. retrying.\n");
s2255_fwload_start(dev, 1);
wait_event_timeout(dev->fw_data->wait_fw,
((atomic_read(&dev->fw_data->fw_state)
== S2255_FW_SUCCESS) ||
(atomic_read(&dev->fw_data->fw_state)
== S2255_FW_DISCONNECTING)),
msecs_to_jiffies(S2255_LOAD_TIMEOUT));
/* state may have changed, re-read */
state = atomic_read(&dev->fw_data->fw_state);
break;
case S2255_FW_NOTLOADED:
case S2255_FW_LOADED_DSPWAIT:
/* give S2255_LOAD_TIMEOUT time for firmware to load in case
driver loaded and then device immediately opened */
printk(KERN_INFO "%s waiting for firmware load\n", __func__);
wait_event_timeout(dev->fw_data->wait_fw,
((atomic_read(&dev->fw_data->fw_state)
== S2255_FW_SUCCESS) ||
(atomic_read(&dev->fw_data->fw_state)
== S2255_FW_DISCONNECTING)),
msecs_to_jiffies(S2255_LOAD_TIMEOUT));
/* state may have changed, re-read */
state = atomic_read(&dev->fw_data->fw_state);
break;
case S2255_FW_SUCCESS:
default:
break;
}
/* state may have changed in above switch statement */
switch (state) {
case S2255_FW_SUCCESS:
break;
case S2255_FW_FAILED:
printk(KERN_INFO "2255 firmware load failed.\n");
mutex_unlock(&dev->open_lock);
return -ENODEV;
case S2255_FW_DISCONNECTING:
printk(KERN_INFO "%s: disconnecting\n", __func__);
mutex_unlock(&dev->open_lock);
return -ENODEV;
case S2255_FW_LOADED_DSPWAIT:
case S2255_FW_NOTLOADED:
printk(KERN_INFO "%s: firmware not loaded yet"
"please try again later\n",
__func__);
/*
* Timeout on firmware load means device unusable.
* Set firmware failure state.
* On next s2255_open the firmware will be reloaded.
*/
atomic_set(&dev->fw_data->fw_state,
S2255_FW_FAILED);
mutex_unlock(&dev->open_lock);
return -EAGAIN;
default:
printk(KERN_INFO "%s: unknown state\n", __func__);
mutex_unlock(&dev->open_lock);
return -EFAULT;
}
mutex_unlock(&dev->open_lock);
/* allocate + initialize per filehandle data */
fh = kzalloc(sizeof(*fh), GFP_KERNEL);
if (NULL == fh)
return -ENOMEM;
file->private_data = fh;
fh->dev = dev;
fh->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
fh->channel = channel;
if (!channel->configured) {
/* configure channel to default state */
channel->fmt = &formats[0];
s2255_set_mode(channel, &channel->mode);
channel->configured = 1;
}
dprintk(1, "%s: dev=%s type=%s\n", __func__,
video_device_node_name(vdev), v4l2_type_names[type]);
dprintk(2, "%s: fh=0x%08lx, dev=0x%08lx, vidq=0x%08lx\n", __func__,
(unsigned long)fh, (unsigned long)dev,
(unsigned long)&channel->vidq);
dprintk(4, "%s: list_empty active=%d\n", __func__,
list_empty(&channel->vidq.active));
videobuf_queue_vmalloc_init(&fh->vb_vidq, &s2255_video_qops,
NULL, &dev->slock,
fh->type,
V4L2_FIELD_INTERLACED,
sizeof(struct s2255_buffer),
fh, vdev->lock);
return 0;
}
static unsigned int s2255_poll(struct file *file,
struct poll_table_struct *wait)
{
struct s2255_fh *fh = file->private_data;
int rc;
dprintk(100, "%s\n", __func__);
if (V4L2_BUF_TYPE_VIDEO_CAPTURE != fh->type)
return POLLERR;
rc = videobuf_poll_stream(file, &fh->vb_vidq, wait);
return rc;
}
static void s2255_destroy(struct s2255_dev *dev)
{
/* board shutdown stops the read pipe if it is running */
s2255_board_shutdown(dev);
/* make sure firmware still not trying to load */
del_timer(&dev->timer); /* only started in .probe and .open */
if (dev->fw_data->fw_urb) {
usb_kill_urb(dev->fw_data->fw_urb);
usb_free_urb(dev->fw_data->fw_urb);
dev->fw_data->fw_urb = NULL;
}
if (dev->fw_data->fw)
release_firmware(dev->fw_data->fw);
kfree(dev->fw_data->pfw_data);
kfree(dev->fw_data);
/* reset the DSP so firmware can be reloaded next time */
s2255_reset_dsppower(dev);
mutex_destroy(&dev->open_lock);
mutex_destroy(&dev->lock);
usb_put_dev(dev->udev);
v4l2_device_unregister(&dev->v4l2_dev);
dprintk(1, "%s", __func__);
kfree(dev);
}
static int s2255_release(struct file *file)
{
struct s2255_fh *fh = file->private_data;
struct s2255_dev *dev = fh->dev;
struct video_device *vdev = video_devdata(file);
struct s2255_channel *channel = fh->channel;
if (!dev)
return -ENODEV;
/* turn off stream */
if (res_check(fh)) {
if (channel->b_acquire)
s2255_stop_acquire(fh->channel);
videobuf_streamoff(&fh->vb_vidq);
res_free(fh);
}
videobuf_mmap_free(&fh->vb_vidq);
dprintk(1, "%s (dev=%s)\n", __func__, video_device_node_name(vdev));
kfree(fh);
return 0;
}
static int s2255_mmap_v4l(struct file *file, struct vm_area_struct *vma)
{
struct s2255_fh *fh = file->private_data;
int ret;
if (!fh)
return -ENODEV;
dprintk(4, "%s, vma=0x%08lx\n", __func__, (unsigned long)vma);
ret = videobuf_mmap_mapper(&fh->vb_vidq, vma);
dprintk(4, "%s vma start=0x%08lx, size=%ld, ret=%d\n", __func__,
(unsigned long)vma->vm_start,
(unsigned long)vma->vm_end - (unsigned long)vma->vm_start, ret);
return ret;
}
static const struct v4l2_file_operations s2255_fops_v4l = {
.owner = THIS_MODULE,
.open = s2255_open,
.release = s2255_release,
.poll = s2255_poll,
.unlocked_ioctl = video_ioctl2, /* V4L2 ioctl handler */
.mmap = s2255_mmap_v4l,
};
static const struct v4l2_ioctl_ops s2255_ioctl_ops = {
.vidioc_querymenu = vidioc_querymenu,
.vidioc_querycap = vidioc_querycap,
.vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
.vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
.vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
.vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap,
.vidioc_reqbufs = vidioc_reqbufs,
.vidioc_querybuf = vidioc_querybuf,
.vidioc_qbuf = vidioc_qbuf,
.vidioc_dqbuf = vidioc_dqbuf,
.vidioc_s_std = vidioc_s_std,
.vidioc_enum_input = vidioc_enum_input,
.vidioc_g_input = vidioc_g_input,
.vidioc_s_input = vidioc_s_input,
.vidioc_queryctrl = vidioc_queryctrl,
.vidioc_g_ctrl = vidioc_g_ctrl,
.vidioc_s_ctrl = vidioc_s_ctrl,
.vidioc_streamon = vidioc_streamon,
.vidioc_streamoff = vidioc_streamoff,
.vidioc_s_jpegcomp = vidioc_s_jpegcomp,
.vidioc_g_jpegcomp = vidioc_g_jpegcomp,
.vidioc_s_parm = vidioc_s_parm,
.vidioc_g_parm = vidioc_g_parm,
.vidioc_enum_frameintervals = vidioc_enum_frameintervals,
};
static void s2255_video_device_release(struct video_device *vdev)
{
struct s2255_dev *dev = to_s2255_dev(vdev->v4l2_dev);
dprintk(4, "%s, chnls: %d \n", __func__,
atomic_read(&dev->num_channels));
if (atomic_dec_and_test(&dev->num_channels))
s2255_destroy(dev);
return;
}
static struct video_device template = {
.name = "s2255v",
.fops = &s2255_fops_v4l,
.ioctl_ops = &s2255_ioctl_ops,
.release = s2255_video_device_release,
.tvnorms = S2255_NORMS,
.current_norm = V4L2_STD_NTSC_M,
};
static int s2255_probe_v4l(struct s2255_dev *dev)
{
int ret;
int i;
int cur_nr = video_nr;
struct s2255_channel *channel;
ret = v4l2_device_register(&dev->interface->dev, &dev->v4l2_dev);
if (ret)
return ret;
/* initialize all video 4 linux */
/* register 4 video devices */
for (i = 0; i < MAX_CHANNELS; i++) {
channel = &dev->channel[i];
INIT_LIST_HEAD(&channel->vidq.active);
channel->vidq.dev = dev;
/* register 4 video devices */
channel->vdev = template;
channel->vdev.lock = &dev->lock;
channel->vdev.v4l2_dev = &dev->v4l2_dev;
video_set_drvdata(&channel->vdev, channel);
if (video_nr == -1)
ret = video_register_device(&channel->vdev,
VFL_TYPE_GRABBER,
video_nr);
else
ret = video_register_device(&channel->vdev,
VFL_TYPE_GRABBER,
cur_nr + i);
if (ret) {
dev_err(&dev->udev->dev,
"failed to register video device!\n");
break;
}
atomic_inc(&dev->num_channels);
v4l2_info(&dev->v4l2_dev, "V4L2 device registered as %s\n",
video_device_node_name(&channel->vdev));
}
printk(KERN_INFO "Sensoray 2255 V4L driver Revision: %s\n",
S2255_VERSION);
/* if no channels registered, return error and probe will fail*/
if (atomic_read(&dev->num_channels) == 0) {
v4l2_device_unregister(&dev->v4l2_dev);
return ret;
}
if (atomic_read(&dev->num_channels) != MAX_CHANNELS)
printk(KERN_WARNING "s2255: Not all channels available.\n");
return 0;
}
/* this function moves the usb stream read pipe data
* into the system buffers.
* returns 0 on success, EAGAIN if more data to process( call this
* function again).
*
* Received frame structure:
* bytes 0-3: marker : 0x2255DA4AL (S2255_MARKER_FRAME)
* bytes 4-7: channel: 0-3
* bytes 8-11: payload size: size of the frame
* bytes 12-payloadsize+12: frame data
*/
static int save_frame(struct s2255_dev *dev, struct s2255_pipeinfo *pipe_info)
{
char *pdest;
u32 offset = 0;
int bframe = 0;
char *psrc;
unsigned long copy_size;
unsigned long size;
s32 idx = -1;
struct s2255_framei *frm;
unsigned char *pdata;
struct s2255_channel *channel;
dprintk(100, "buffer to user\n");
channel = &dev->channel[dev->cc];
idx = channel->cur_frame;
frm = &channel->buffer.frame[idx];
if (frm->ulState == S2255_READ_IDLE) {
int jj;
unsigned int cc;
__le32 *pdword; /*data from dsp is little endian */
int payload;
/* search for marker codes */
pdata = (unsigned char *)pipe_info->transfer_buffer;
pdword = (__le32 *)pdata;
for (jj = 0; jj < (pipe_info->cur_transfer_size - 12); jj++) {
switch (*pdword) {
case S2255_MARKER_FRAME:
dprintk(4, "found frame marker at offset:"
" %d [%x %x]\n", jj, pdata[0],
pdata[1]);
offset = jj + PREFIX_SIZE;
bframe = 1;
cc = le32_to_cpu(pdword[1]);
if (cc >= MAX_CHANNELS) {
printk(KERN_ERR
"bad channel\n");
return -EINVAL;
}
/* reverse it */
dev->cc = G_chnmap[cc];
channel = &dev->channel[dev->cc];
payload = le32_to_cpu(pdword[3]);
if (payload > channel->req_image_size) {
channel->bad_payload++;
/* discard the bad frame */
return -EINVAL;
}
channel->pkt_size = payload;
channel->jpg_size = le32_to_cpu(pdword[4]);
break;
case S2255_MARKER_RESPONSE:
pdata += DEF_USB_BLOCK;
jj += DEF_USB_BLOCK;
if (le32_to_cpu(pdword[1]) >= MAX_CHANNELS)
break;
cc = G_chnmap[le32_to_cpu(pdword[1])];
if (cc >= MAX_CHANNELS)
break;
channel = &dev->channel[cc];
switch (pdword[2]) {
case S2255_RESPONSE_SETMODE:
/* check if channel valid */
/* set mode ready */
channel->setmode_ready = 1;
wake_up(&channel->wait_setmode);
dprintk(5, "setmode ready %d\n", cc);
break;
case S2255_RESPONSE_FW:
dev->chn_ready |= (1 << cc);
if ((dev->chn_ready & 0x0f) != 0x0f)
break;
/* all channels ready */
printk(KERN_INFO "s2255: fw loaded\n");
atomic_set(&dev->fw_data->fw_state,
S2255_FW_SUCCESS);
wake_up(&dev->fw_data->wait_fw);
break;
case S2255_RESPONSE_STATUS:
channel->vidstatus = le32_to_cpu(pdword[3]);
channel->vidstatus_ready = 1;
wake_up(&channel->wait_vidstatus);
dprintk(5, "got vidstatus %x chan %d\n",
le32_to_cpu(pdword[3]), cc);
break;
default:
printk(KERN_INFO "s2255 unknown resp\n");
}
default:
pdata++;
break;
}
if (bframe)
break;
} /* for */
if (!bframe)
return -EINVAL;
}
channel = &dev->channel[dev->cc];
idx = channel->cur_frame;
frm = &channel->buffer.frame[idx];
/* search done. now find out if should be acquiring on this channel */
if (!channel->b_acquire) {
/* we found a frame, but this channel is turned off */
frm->ulState = S2255_READ_IDLE;
return -EINVAL;
}
if (frm->ulState == S2255_READ_IDLE) {
frm->ulState = S2255_READ_FRAME;
frm->cur_size = 0;
}
/* skip the marker 512 bytes (and offset if out of sync) */
psrc = (u8 *)pipe_info->transfer_buffer + offset;
if (frm->lpvbits == NULL) {
dprintk(1, "s2255 frame buffer == NULL.%p %p %d %d",
frm, dev, dev->cc, idx);
return -ENOMEM;
}
pdest = frm->lpvbits + frm->cur_size;
copy_size = (pipe_info->cur_transfer_size - offset);
size = channel->pkt_size - PREFIX_SIZE;
/* sanity check on pdest */
if ((copy_size + frm->cur_size) < channel->req_image_size)
memcpy(pdest, psrc, copy_size);
frm->cur_size += copy_size;
dprintk(4, "cur_size size %lu size %lu \n", frm->cur_size, size);
if (frm->cur_size >= size) {
dprintk(2, "****************[%d]Buffer[%d]full*************\n",
dev->cc, idx);
channel->last_frame = channel->cur_frame;
channel->cur_frame++;
/* end of system frame ring buffer, start at zero */
if ((channel->cur_frame == SYS_FRAMES) ||
(channel->cur_frame == channel->buffer.dwFrames))
channel->cur_frame = 0;
/* frame ready */
if (channel->b_acquire)
s2255_got_frame(channel, channel->jpg_size);
channel->frame_count++;
frm->ulState = S2255_READ_IDLE;
frm->cur_size = 0;
}
/* done successfully */
return 0;
}
static void s2255_read_video_callback(struct s2255_dev *dev,
struct s2255_pipeinfo *pipe_info)
{
int res;
dprintk(50, "callback read video \n");
if (dev->cc >= MAX_CHANNELS) {
dev->cc = 0;
dev_err(&dev->udev->dev, "invalid channel\n");
return;
}
/* otherwise copy to the system buffers */
res = save_frame(dev, pipe_info);
if (res != 0)
dprintk(4, "s2255: read callback failed\n");
dprintk(50, "callback read video done\n");
return;
}
static long s2255_vendor_req(struct s2255_dev *dev, unsigned char Request,
u16 Index, u16 Value, void *TransferBuffer,
s32 TransferBufferLength, int bOut)
{
int r;
if (!bOut) {
r = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
Request,
USB_TYPE_VENDOR | USB_RECIP_DEVICE |
USB_DIR_IN,
Value, Index, TransferBuffer,
TransferBufferLength, HZ * 5);
} else {
r = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
Request, USB_TYPE_VENDOR | USB_RECIP_DEVICE,
Value, Index, TransferBuffer,
TransferBufferLength, HZ * 5);
}
return r;
}
/*
* retrieve FX2 firmware version. future use.
* @param dev pointer to device extension
* @return -1 for fail, else returns firmware version as an int(16 bits)
*/
static int s2255_get_fx2fw(struct s2255_dev *dev)
{
int fw;
int ret;
unsigned char transBuffer[64];
ret = s2255_vendor_req(dev, S2255_VR_FW, 0, 0, transBuffer, 2,
S2255_VR_IN);
if (ret < 0)
dprintk(2, "get fw error: %x\n", ret);
fw = transBuffer[0] + (transBuffer[1] << 8);
dprintk(2, "Get FW %x %x\n", transBuffer[0], transBuffer[1]);
return fw;
}
/*
* Create the system ring buffer to copy frames into from the
* usb read pipe.
*/
static int s2255_create_sys_buffers(struct s2255_channel *channel)
{
unsigned long i;
unsigned long reqsize;
dprintk(1, "create sys buffers\n");
channel->buffer.dwFrames = SYS_FRAMES;
/* always allocate maximum size(PAL) for system buffers */
reqsize = SYS_FRAMES_MAXSIZE;
if (reqsize > SYS_FRAMES_MAXSIZE)
reqsize = SYS_FRAMES_MAXSIZE;
for (i = 0; i < SYS_FRAMES; i++) {
/* allocate the frames */
channel->buffer.frame[i].lpvbits = vmalloc(reqsize);
dprintk(1, "valloc %p chan %d, idx %lu, pdata %p\n",
&channel->buffer.frame[i], channel->idx, i,
channel->buffer.frame[i].lpvbits);
channel->buffer.frame[i].size = reqsize;
if (channel->buffer.frame[i].lpvbits == NULL) {
printk(KERN_INFO "out of memory. using less frames\n");
channel->buffer.dwFrames = i;
break;
}
}
/* make sure internal states are set */
for (i = 0; i < SYS_FRAMES; i++) {
channel->buffer.frame[i].ulState = 0;
channel->buffer.frame[i].cur_size = 0;
}
channel->cur_frame = 0;
channel->last_frame = -1;
return 0;
}
static int s2255_release_sys_buffers(struct s2255_channel *channel)
{
unsigned long i;
dprintk(1, "release sys buffers\n");
for (i = 0; i < SYS_FRAMES; i++) {
if (channel->buffer.frame[i].lpvbits) {
dprintk(1, "vfree %p\n",
channel->buffer.frame[i].lpvbits);
vfree(channel->buffer.frame[i].lpvbits);
}
channel->buffer.frame[i].lpvbits = NULL;
}
return 0;
}
static int s2255_board_init(struct s2255_dev *dev)
{
struct s2255_mode mode_def = DEF_MODEI_NTSC_CONT;
int fw_ver;
int j;
struct s2255_pipeinfo *pipe = &dev->pipe;
dprintk(4, "board init: %p", dev);
memset(pipe, 0, sizeof(*pipe));
pipe->dev = dev;
pipe->cur_transfer_size = S2255_USB_XFER_SIZE;
pipe->max_transfer_size = S2255_USB_XFER_SIZE;
pipe->transfer_buffer = kzalloc(pipe->max_transfer_size,
GFP_KERNEL);
if (pipe->transfer_buffer == NULL) {
dprintk(1, "out of memory!\n");
return -ENOMEM;
}
/* query the firmware */
fw_ver = s2255_get_fx2fw(dev);
printk(KERN_INFO "s2255: usb firmware version %d.%d\n",
(fw_ver >> 8) & 0xff,
fw_ver & 0xff);
if (fw_ver < S2255_CUR_USB_FWVER)
printk(KERN_INFO "s2255: newer USB firmware available\n");
for (j = 0; j < MAX_CHANNELS; j++) {
struct s2255_channel *channel = &dev->channel[j];
channel->b_acquire = 0;
channel->mode = mode_def;
if (dev->pid == 0x2257 && j > 1)
channel->mode.color |= (1 << 16);
channel->jc.quality = S2255_DEF_JPEG_QUAL;
channel->width = LINE_SZ_4CIFS_NTSC;
channel->height = NUM_LINES_4CIFS_NTSC * 2;
channel->fmt = &formats[0];
channel->mode.restart = 1;
channel->req_image_size = get_transfer_size(&mode_def);
channel->frame_count = 0;
/* create the system buffers */
s2255_create_sys_buffers(channel);
}
/* start read pipe */
s2255_start_readpipe(dev);
dprintk(1, "%s: success\n", __func__);
return 0;
}
static int s2255_board_shutdown(struct s2255_dev *dev)
{
u32 i;
dprintk(1, "%s: dev: %p", __func__, dev);
for (i = 0; i < MAX_CHANNELS; i++) {
if (dev->channel[i].b_acquire)
s2255_stop_acquire(&dev->channel[i]);
}
s2255_stop_readpipe(dev);
for (i = 0; i < MAX_CHANNELS; i++)
s2255_release_sys_buffers(&dev->channel[i]);
/* release transfer buffer */
kfree(dev->pipe.transfer_buffer);
return 0;
}
static void read_pipe_completion(struct urb *purb)
{
struct s2255_pipeinfo *pipe_info;
struct s2255_dev *dev;
int status;
int pipe;
pipe_info = purb->context;
dprintk(100, "%s: urb:%p, status %d\n", __func__, purb,
purb->status);
if (pipe_info == NULL) {
dev_err(&purb->dev->dev, "no context!\n");
return;
}
dev = pipe_info->dev;
if (dev == NULL) {
dev_err(&purb->dev->dev, "no context!\n");
return;
}
status = purb->status;
/* if shutting down, do not resubmit, exit immediately */
if (status == -ESHUTDOWN) {
dprintk(2, "%s: err shutdown\n", __func__);
pipe_info->err_count++;
return;
}
if (pipe_info->state == 0) {
dprintk(2, "%s: exiting USB pipe", __func__);
return;
}
if (status == 0)
s2255_read_video_callback(dev, pipe_info);
else {
pipe_info->err_count++;
dprintk(1, "%s: failed URB %d\n", __func__, status);
}
pipe = usb_rcvbulkpipe(dev->udev, dev->read_endpoint);
/* reuse urb */
usb_fill_bulk_urb(pipe_info->stream_urb, dev->udev,
pipe,
pipe_info->transfer_buffer,
pipe_info->cur_transfer_size,
read_pipe_completion, pipe_info);
if (pipe_info->state != 0) {
if (usb_submit_urb(pipe_info->stream_urb, GFP_ATOMIC)) {
dev_err(&dev->udev->dev, "error submitting urb\n");
}
} else {
dprintk(2, "%s :complete state 0\n", __func__);
}
return;
}
static int s2255_start_readpipe(struct s2255_dev *dev)
{
int pipe;
int retval;
struct s2255_pipeinfo *pipe_info = &dev->pipe;
pipe = usb_rcvbulkpipe(dev->udev, dev->read_endpoint);
dprintk(2, "%s: IN %d\n", __func__, dev->read_endpoint);
pipe_info->state = 1;
pipe_info->err_count = 0;
pipe_info->stream_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!pipe_info->stream_urb) {
dev_err(&dev->udev->dev,
"ReadStream: Unable to alloc URB\n");
return -ENOMEM;
}
/* transfer buffer allocated in board_init */
usb_fill_bulk_urb(pipe_info->stream_urb, dev->udev,
pipe,
pipe_info->transfer_buffer,
pipe_info->cur_transfer_size,
read_pipe_completion, pipe_info);
retval = usb_submit_urb(pipe_info->stream_urb, GFP_KERNEL);
if (retval) {
printk(KERN_ERR "s2255: start read pipe failed\n");
return retval;
}
return 0;
}
/* starts acquisition process */
static int s2255_start_acquire(struct s2255_channel *channel)
{
unsigned char *buffer;
int res;
unsigned long chn_rev;
int j;
struct s2255_dev *dev = to_s2255_dev(channel->vdev.v4l2_dev);
chn_rev = G_chnmap[channel->idx];
buffer = kzalloc(512, GFP_KERNEL);
if (buffer == NULL) {
dev_err(&dev->udev->dev, "out of mem\n");
return -ENOMEM;
}
channel->last_frame = -1;
channel->bad_payload = 0;
channel->cur_frame = 0;
for (j = 0; j < SYS_FRAMES; j++) {
channel->buffer.frame[j].ulState = 0;
channel->buffer.frame[j].cur_size = 0;
}
/* send the start command */
*(__le32 *) buffer = IN_DATA_TOKEN;
*((__le32 *) buffer + 1) = (__le32) cpu_to_le32(chn_rev);
*((__le32 *) buffer + 2) = CMD_START;
res = s2255_write_config(dev->udev, (unsigned char *)buffer, 512);
if (res != 0)
dev_err(&dev->udev->dev, "CMD_START error\n");
dprintk(2, "start acquire exit[%d] %d \n", channel->idx, res);
kfree(buffer);
return 0;
}
static int s2255_stop_acquire(struct s2255_channel *channel)
{
unsigned char *buffer;
int res;
unsigned long chn_rev;
struct s2255_dev *dev = to_s2255_dev(channel->vdev.v4l2_dev);
chn_rev = G_chnmap[channel->idx];
buffer = kzalloc(512, GFP_KERNEL);
if (buffer == NULL) {
dev_err(&dev->udev->dev, "out of mem\n");
return -ENOMEM;
}
/* send the stop command */
*(__le32 *) buffer = IN_DATA_TOKEN;
*((__le32 *) buffer + 1) = (__le32) cpu_to_le32(chn_rev);
*((__le32 *) buffer + 2) = CMD_STOP;
res = s2255_write_config(dev->udev, (unsigned char *)buffer, 512);
if (res != 0)
dev_err(&dev->udev->dev, "CMD_STOP error\n");
kfree(buffer);
channel->b_acquire = 0;
dprintk(4, "%s: chn %d, res %d\n", __func__, channel->idx, res);
return res;
}
static void s2255_stop_readpipe(struct s2255_dev *dev)
{
struct s2255_pipeinfo *pipe = &dev->pipe;
pipe->state = 0;
if (pipe->stream_urb) {
/* cancel urb */
usb_kill_urb(pipe->stream_urb);
usb_free_urb(pipe->stream_urb);
pipe->stream_urb = NULL;
}
dprintk(4, "%s", __func__);
return;
}
static void s2255_fwload_start(struct s2255_dev *dev, int reset)
{
if (reset)
s2255_reset_dsppower(dev);
dev->fw_data->fw_size = dev->fw_data->fw->size;
atomic_set(&dev->fw_data->fw_state, S2255_FW_NOTLOADED);
memcpy(dev->fw_data->pfw_data,
dev->fw_data->fw->data, CHUNK_SIZE);
dev->fw_data->fw_loaded = CHUNK_SIZE;
usb_fill_bulk_urb(dev->fw_data->fw_urb, dev->udev,
usb_sndbulkpipe(dev->udev, 2),
dev->fw_data->pfw_data,
CHUNK_SIZE, s2255_fwchunk_complete,
dev->fw_data);
mod_timer(&dev->timer, jiffies + HZ);
}
/* standard usb probe function */
static int s2255_probe(struct usb_interface *interface,
const struct usb_device_id *id)
{
struct s2255_dev *dev = NULL;
struct usb_host_interface *iface_desc;
struct usb_endpoint_descriptor *endpoint;
int i;
int retval = -ENOMEM;
__le32 *pdata;
int fw_size;
dprintk(2, "%s\n", __func__);
/* allocate memory for our device state and initialize it to zero */
dev = kzalloc(sizeof(struct s2255_dev), GFP_KERNEL);
if (dev == NULL) {
s2255_dev_err(&interface->dev, "out of memory\n");
return -ENOMEM;
}
atomic_set(&dev->num_channels, 0);
dev->pid = id->idProduct;
dev->fw_data = kzalloc(sizeof(struct s2255_fw), GFP_KERNEL);
if (!dev->fw_data)
goto errorFWDATA1;
mutex_init(&dev->lock);
mutex_init(&dev->open_lock);
/* grab usb_device and save it */
dev->udev = usb_get_dev(interface_to_usbdev(interface));
if (dev->udev == NULL) {
dev_err(&interface->dev, "null usb device\n");
retval = -ENODEV;
goto errorUDEV;
}
dprintk(1, "dev: %p, udev %p interface %p\n", dev,
dev->udev, interface);
dev->interface = interface;
/* set up the endpoint information */
iface_desc = interface->cur_altsetting;
dprintk(1, "num endpoints %d\n", iface_desc->desc.bNumEndpoints);
for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
endpoint = &iface_desc->endpoint[i].desc;
if (!dev->read_endpoint && usb_endpoint_is_bulk_in(endpoint)) {
/* we found the bulk in endpoint */
dev->read_endpoint = endpoint->bEndpointAddress;
}
}
if (!dev->read_endpoint) {
dev_err(&interface->dev, "Could not find bulk-in endpoint\n");
goto errorEP;
}
init_timer(&dev->timer);
dev->timer.function = s2255_timer;
dev->timer.data = (unsigned long)dev->fw_data;
init_waitqueue_head(&dev->fw_data->wait_fw);
for (i = 0; i < MAX_CHANNELS; i++) {
struct s2255_channel *channel = &dev->channel[i];
dev->channel[i].idx = i;
init_waitqueue_head(&channel->wait_setmode);
init_waitqueue_head(&channel->wait_vidstatus);
}
dev->fw_data->fw_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!dev->fw_data->fw_urb) {
dev_err(&interface->dev, "out of memory!\n");
goto errorFWURB;
}
dev->fw_data->pfw_data = kzalloc(CHUNK_SIZE, GFP_KERNEL);
if (!dev->fw_data->pfw_data) {
dev_err(&interface->dev, "out of memory!\n");
goto errorFWDATA2;
}
/* load the first chunk */
if (request_firmware(&dev->fw_data->fw,
FIRMWARE_FILE_NAME, &dev->udev->dev)) {
printk(KERN_ERR "sensoray 2255 failed to get firmware\n");
goto errorREQFW;
}
/* check the firmware is valid */
fw_size = dev->fw_data->fw->size;
pdata = (__le32 *) &dev->fw_data->fw->data[fw_size - 8];
if (*pdata != S2255_FW_MARKER) {
printk(KERN_INFO "Firmware invalid.\n");
retval = -ENODEV;
goto errorFWMARKER;
} else {
/* make sure firmware is the latest */
__le32 *pRel;
pRel = (__le32 *) &dev->fw_data->fw->data[fw_size - 4];
printk(KERN_INFO "s2255 dsp fw version %x\n", *pRel);
dev->dsp_fw_ver = le32_to_cpu(*pRel);
if (dev->dsp_fw_ver < S2255_CUR_DSP_FWVER)
printk(KERN_INFO "s2255: f2255usb.bin out of date.\n");
if (dev->pid == 0x2257 &&
dev->dsp_fw_ver < S2255_MIN_DSP_COLORFILTER)
printk(KERN_WARNING "s2255: 2257 requires firmware %d"
" or above.\n", S2255_MIN_DSP_COLORFILTER);
}
usb_reset_device(dev->udev);
/* load 2255 board specific */
retval = s2255_board_init(dev);
if (retval)
goto errorBOARDINIT;
spin_lock_init(&dev->slock);
s2255_fwload_start(dev, 0);
/* loads v4l specific */
retval = s2255_probe_v4l(dev);
if (retval)
goto errorBOARDINIT;
dev_info(&interface->dev, "Sensoray 2255 detected\n");
return 0;
errorBOARDINIT:
s2255_board_shutdown(dev);
errorFWMARKER:
release_firmware(dev->fw_data->fw);
errorREQFW:
kfree(dev->fw_data->pfw_data);
errorFWDATA2:
usb_free_urb(dev->fw_data->fw_urb);
errorFWURB:
del_timer(&dev->timer);
errorEP:
usb_put_dev(dev->udev);
errorUDEV:
kfree(dev->fw_data);
mutex_destroy(&dev->open_lock);
mutex_destroy(&dev->lock);
errorFWDATA1:
kfree(dev);
printk(KERN_WARNING "Sensoray 2255 driver load failed: 0x%x\n", retval);
return retval;
}
/* disconnect routine. when board is removed physically or with rmmod */
static void s2255_disconnect(struct usb_interface *interface)
{
struct s2255_dev *dev = to_s2255_dev(usb_get_intfdata(interface));
int i;
int channels = atomic_read(&dev->num_channels);
mutex_lock(&dev->lock);
v4l2_device_disconnect(&dev->v4l2_dev);
mutex_unlock(&dev->lock);
/*see comments in the uvc_driver.c usb disconnect function */
atomic_inc(&dev->num_channels);
/* unregister each video device. */
for (i = 0; i < channels; i++)
video_unregister_device(&dev->channel[i].vdev);
/* wake up any of our timers */
atomic_set(&dev->fw_data->fw_state, S2255_FW_DISCONNECTING);
wake_up(&dev->fw_data->wait_fw);
for (i = 0; i < MAX_CHANNELS; i++) {
dev->channel[i].setmode_ready = 1;
wake_up(&dev->channel[i].wait_setmode);
dev->channel[i].vidstatus_ready = 1;
wake_up(&dev->channel[i].wait_vidstatus);
}
if (atomic_dec_and_test(&dev->num_channels))
s2255_destroy(dev);
dev_info(&interface->dev, "%s\n", __func__);
}
static struct usb_driver s2255_driver = {
.name = S2255_DRIVER_NAME,
.probe = s2255_probe,
.disconnect = s2255_disconnect,
.id_table = s2255_table,
};
module_usb_driver(s2255_driver);
MODULE_DESCRIPTION("Sensoray 2255 Video for Linux driver");
MODULE_AUTHOR("Dean Anderson (Sensoray Company Inc.)");
MODULE_LICENSE("GPL");
MODULE_VERSION(S2255_VERSION);
| gpl-2.0 |
mr-tweaker/sabermod_kernel_cancro | drivers/scsi/qla4xxx/ql4_os.c | 4803 | 169182 | /*
* QLogic iSCSI HBA Driver
* Copyright (c) 2003-2010 QLogic Corporation
*
* See LICENSE.qla4xxx for copyright and licensing details.
*/
#include <linux/moduleparam.h>
#include <linux/slab.h>
#include <linux/blkdev.h>
#include <linux/iscsi_boot_sysfs.h>
#include <linux/inet.h>
#include <scsi/scsi_tcq.h>
#include <scsi/scsicam.h>
#include "ql4_def.h"
#include "ql4_version.h"
#include "ql4_glbl.h"
#include "ql4_dbg.h"
#include "ql4_inline.h"
/*
* Driver version
*/
static char qla4xxx_version_str[40];
/*
* SRB allocation cache
*/
static struct kmem_cache *srb_cachep;
/*
* Module parameter information and variables
*/
static int ql4xdisablesysfsboot = 1;
module_param(ql4xdisablesysfsboot, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(ql4xdisablesysfsboot,
" Set to disable exporting boot targets to sysfs.\n"
"\t\t 0 - Export boot targets\n"
"\t\t 1 - Do not export boot targets (Default)");
int ql4xdontresethba;
module_param(ql4xdontresethba, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(ql4xdontresethba,
" Don't reset the HBA for driver recovery.\n"
"\t\t 0 - It will reset HBA (Default)\n"
"\t\t 1 - It will NOT reset HBA");
int ql4xextended_error_logging;
module_param(ql4xextended_error_logging, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(ql4xextended_error_logging,
" Option to enable extended error logging.\n"
"\t\t 0 - no logging (Default)\n"
"\t\t 2 - debug logging");
int ql4xenablemsix = 1;
module_param(ql4xenablemsix, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(ql4xenablemsix,
" Set to enable MSI or MSI-X interrupt mechanism.\n"
"\t\t 0 = enable INTx interrupt mechanism.\n"
"\t\t 1 = enable MSI-X interrupt mechanism (Default).\n"
"\t\t 2 = enable MSI interrupt mechanism.");
#define QL4_DEF_QDEPTH 32
static int ql4xmaxqdepth = QL4_DEF_QDEPTH;
module_param(ql4xmaxqdepth, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(ql4xmaxqdepth,
" Maximum queue depth to report for target devices.\n"
"\t\t Default: 32.");
static int ql4xsess_recovery_tmo = QL4_SESS_RECOVERY_TMO;
module_param(ql4xsess_recovery_tmo, int, S_IRUGO);
MODULE_PARM_DESC(ql4xsess_recovery_tmo,
" Target Session Recovery Timeout.\n"
"\t\t Default: 120 sec.");
static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha);
/*
* SCSI host template entry points
*/
static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha);
/*
* iSCSI template entry points
*/
static int qla4xxx_session_get_param(struct iscsi_cls_session *cls_sess,
enum iscsi_param param, char *buf);
static int qla4xxx_conn_get_param(struct iscsi_cls_conn *conn,
enum iscsi_param param, char *buf);
static int qla4xxx_host_get_param(struct Scsi_Host *shost,
enum iscsi_host_param param, char *buf);
static int qla4xxx_iface_set_param(struct Scsi_Host *shost, void *data,
uint32_t len);
static int qla4xxx_get_iface_param(struct iscsi_iface *iface,
enum iscsi_param_type param_type,
int param, char *buf);
static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc);
static struct iscsi_endpoint *qla4xxx_ep_connect(struct Scsi_Host *shost,
struct sockaddr *dst_addr,
int non_blocking);
static int qla4xxx_ep_poll(struct iscsi_endpoint *ep, int timeout_ms);
static void qla4xxx_ep_disconnect(struct iscsi_endpoint *ep);
static int qla4xxx_get_ep_param(struct iscsi_endpoint *ep,
enum iscsi_param param, char *buf);
static int qla4xxx_conn_start(struct iscsi_cls_conn *conn);
static struct iscsi_cls_conn *
qla4xxx_conn_create(struct iscsi_cls_session *cls_sess, uint32_t conn_idx);
static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session,
struct iscsi_cls_conn *cls_conn,
uint64_t transport_fd, int is_leading);
static void qla4xxx_conn_destroy(struct iscsi_cls_conn *conn);
static struct iscsi_cls_session *
qla4xxx_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
uint16_t qdepth, uint32_t initial_cmdsn);
static void qla4xxx_session_destroy(struct iscsi_cls_session *sess);
static void qla4xxx_task_work(struct work_struct *wdata);
static int qla4xxx_alloc_pdu(struct iscsi_task *, uint8_t);
static int qla4xxx_task_xmit(struct iscsi_task *);
static void qla4xxx_task_cleanup(struct iscsi_task *);
static void qla4xxx_fail_session(struct iscsi_cls_session *cls_session);
static void qla4xxx_conn_get_stats(struct iscsi_cls_conn *cls_conn,
struct iscsi_stats *stats);
static int qla4xxx_send_ping(struct Scsi_Host *shost, uint32_t iface_num,
uint32_t iface_type, uint32_t payload_size,
uint32_t pid, struct sockaddr *dst_addr);
static int qla4xxx_get_chap_list(struct Scsi_Host *shost, uint16_t chap_tbl_idx,
uint32_t *num_entries, char *buf);
static int qla4xxx_delete_chap(struct Scsi_Host *shost, uint16_t chap_tbl_idx);
/*
* SCSI host template entry points
*/
static int qla4xxx_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *cmd);
static int qla4xxx_eh_abort(struct scsi_cmnd *cmd);
static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd);
static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd);
static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd);
static int qla4xxx_slave_alloc(struct scsi_device *device);
static int qla4xxx_slave_configure(struct scsi_device *device);
static void qla4xxx_slave_destroy(struct scsi_device *sdev);
static umode_t ql4_attr_is_visible(int param_type, int param);
static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type);
static struct qla4_8xxx_legacy_intr_set legacy_intr[] =
QLA82XX_LEGACY_INTR_CONFIG;
static struct scsi_host_template qla4xxx_driver_template = {
.module = THIS_MODULE,
.name = DRIVER_NAME,
.proc_name = DRIVER_NAME,
.queuecommand = qla4xxx_queuecommand,
.eh_abort_handler = qla4xxx_eh_abort,
.eh_device_reset_handler = qla4xxx_eh_device_reset,
.eh_target_reset_handler = qla4xxx_eh_target_reset,
.eh_host_reset_handler = qla4xxx_eh_host_reset,
.eh_timed_out = qla4xxx_eh_cmd_timed_out,
.slave_configure = qla4xxx_slave_configure,
.slave_alloc = qla4xxx_slave_alloc,
.slave_destroy = qla4xxx_slave_destroy,
.this_id = -1,
.cmd_per_lun = 3,
.use_clustering = ENABLE_CLUSTERING,
.sg_tablesize = SG_ALL,
.max_sectors = 0xFFFF,
.shost_attrs = qla4xxx_host_attrs,
.host_reset = qla4xxx_host_reset,
.vendor_id = SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_QLOGIC,
};
static struct iscsi_transport qla4xxx_iscsi_transport = {
.owner = THIS_MODULE,
.name = DRIVER_NAME,
.caps = CAP_TEXT_NEGO |
CAP_DATA_PATH_OFFLOAD | CAP_HDRDGST |
CAP_DATADGST | CAP_LOGIN_OFFLOAD |
CAP_MULTI_R2T,
.attr_is_visible = ql4_attr_is_visible,
.create_session = qla4xxx_session_create,
.destroy_session = qla4xxx_session_destroy,
.start_conn = qla4xxx_conn_start,
.create_conn = qla4xxx_conn_create,
.bind_conn = qla4xxx_conn_bind,
.stop_conn = iscsi_conn_stop,
.destroy_conn = qla4xxx_conn_destroy,
.set_param = iscsi_set_param,
.get_conn_param = qla4xxx_conn_get_param,
.get_session_param = qla4xxx_session_get_param,
.get_ep_param = qla4xxx_get_ep_param,
.ep_connect = qla4xxx_ep_connect,
.ep_poll = qla4xxx_ep_poll,
.ep_disconnect = qla4xxx_ep_disconnect,
.get_stats = qla4xxx_conn_get_stats,
.send_pdu = iscsi_conn_send_pdu,
.xmit_task = qla4xxx_task_xmit,
.cleanup_task = qla4xxx_task_cleanup,
.alloc_pdu = qla4xxx_alloc_pdu,
.get_host_param = qla4xxx_host_get_param,
.set_iface_param = qla4xxx_iface_set_param,
.get_iface_param = qla4xxx_get_iface_param,
.bsg_request = qla4xxx_bsg_request,
.send_ping = qla4xxx_send_ping,
.get_chap = qla4xxx_get_chap_list,
.delete_chap = qla4xxx_delete_chap,
};
static struct scsi_transport_template *qla4xxx_scsi_transport;
static int qla4xxx_send_ping(struct Scsi_Host *shost, uint32_t iface_num,
uint32_t iface_type, uint32_t payload_size,
uint32_t pid, struct sockaddr *dst_addr)
{
struct scsi_qla_host *ha = to_qla_host(shost);
struct sockaddr_in *addr;
struct sockaddr_in6 *addr6;
uint32_t options = 0;
uint8_t ipaddr[IPv6_ADDR_LEN];
int rval;
memset(ipaddr, 0, IPv6_ADDR_LEN);
/* IPv4 to IPv4 */
if ((iface_type == ISCSI_IFACE_TYPE_IPV4) &&
(dst_addr->sa_family == AF_INET)) {
addr = (struct sockaddr_in *)dst_addr;
memcpy(ipaddr, &addr->sin_addr.s_addr, IP_ADDR_LEN);
DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IPv4 Ping src: %pI4 "
"dest: %pI4\n", __func__,
&ha->ip_config.ip_address, ipaddr));
rval = qla4xxx_ping_iocb(ha, options, payload_size, pid,
ipaddr);
if (rval)
rval = -EINVAL;
} else if ((iface_type == ISCSI_IFACE_TYPE_IPV6) &&
(dst_addr->sa_family == AF_INET6)) {
/* IPv6 to IPv6 */
addr6 = (struct sockaddr_in6 *)dst_addr;
memcpy(ipaddr, &addr6->sin6_addr.in6_u.u6_addr8, IPv6_ADDR_LEN);
options |= PING_IPV6_PROTOCOL_ENABLE;
/* Ping using LinkLocal address */
if ((iface_num == 0) || (iface_num == 1)) {
DEBUG2(ql4_printk(KERN_INFO, ha, "%s: LinkLocal Ping "
"src: %pI6 dest: %pI6\n", __func__,
&ha->ip_config.ipv6_link_local_addr,
ipaddr));
options |= PING_IPV6_LINKLOCAL_ADDR;
rval = qla4xxx_ping_iocb(ha, options, payload_size,
pid, ipaddr);
} else {
ql4_printk(KERN_WARNING, ha, "%s: iface num = %d "
"not supported\n", __func__, iface_num);
rval = -ENOSYS;
goto exit_send_ping;
}
/*
* If ping using LinkLocal address fails, try ping using
* IPv6 address
*/
if (rval != QLA_SUCCESS) {
options &= ~PING_IPV6_LINKLOCAL_ADDR;
if (iface_num == 0) {
options |= PING_IPV6_ADDR0;
DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IPv6 "
"Ping src: %pI6 "
"dest: %pI6\n", __func__,
&ha->ip_config.ipv6_addr0,
ipaddr));
} else if (iface_num == 1) {
options |= PING_IPV6_ADDR1;
DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IPv6 "
"Ping src: %pI6 "
"dest: %pI6\n", __func__,
&ha->ip_config.ipv6_addr1,
ipaddr));
}
rval = qla4xxx_ping_iocb(ha, options, payload_size,
pid, ipaddr);
if (rval)
rval = -EINVAL;
}
} else
rval = -ENOSYS;
exit_send_ping:
return rval;
}
static umode_t ql4_attr_is_visible(int param_type, int param)
{
switch (param_type) {
case ISCSI_HOST_PARAM:
switch (param) {
case ISCSI_HOST_PARAM_HWADDRESS:
case ISCSI_HOST_PARAM_IPADDRESS:
case ISCSI_HOST_PARAM_INITIATOR_NAME:
case ISCSI_HOST_PARAM_PORT_STATE:
case ISCSI_HOST_PARAM_PORT_SPEED:
return S_IRUGO;
default:
return 0;
}
case ISCSI_PARAM:
switch (param) {
case ISCSI_PARAM_PERSISTENT_ADDRESS:
case ISCSI_PARAM_PERSISTENT_PORT:
case ISCSI_PARAM_CONN_ADDRESS:
case ISCSI_PARAM_CONN_PORT:
case ISCSI_PARAM_TARGET_NAME:
case ISCSI_PARAM_TPGT:
case ISCSI_PARAM_TARGET_ALIAS:
case ISCSI_PARAM_MAX_BURST:
case ISCSI_PARAM_MAX_R2T:
case ISCSI_PARAM_FIRST_BURST:
case ISCSI_PARAM_MAX_RECV_DLENGTH:
case ISCSI_PARAM_MAX_XMIT_DLENGTH:
case ISCSI_PARAM_IFACE_NAME:
case ISCSI_PARAM_CHAP_OUT_IDX:
case ISCSI_PARAM_CHAP_IN_IDX:
case ISCSI_PARAM_USERNAME:
case ISCSI_PARAM_PASSWORD:
case ISCSI_PARAM_USERNAME_IN:
case ISCSI_PARAM_PASSWORD_IN:
return S_IRUGO;
default:
return 0;
}
case ISCSI_NET_PARAM:
switch (param) {
case ISCSI_NET_PARAM_IPV4_ADDR:
case ISCSI_NET_PARAM_IPV4_SUBNET:
case ISCSI_NET_PARAM_IPV4_GW:
case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
case ISCSI_NET_PARAM_IFACE_ENABLE:
case ISCSI_NET_PARAM_IPV6_LINKLOCAL:
case ISCSI_NET_PARAM_IPV6_ADDR:
case ISCSI_NET_PARAM_IPV6_ROUTER:
case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG:
case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG:
case ISCSI_NET_PARAM_VLAN_ID:
case ISCSI_NET_PARAM_VLAN_PRIORITY:
case ISCSI_NET_PARAM_VLAN_ENABLED:
case ISCSI_NET_PARAM_MTU:
case ISCSI_NET_PARAM_PORT:
return S_IRUGO;
default:
return 0;
}
}
return 0;
}
static int qla4xxx_get_chap_list(struct Scsi_Host *shost, uint16_t chap_tbl_idx,
uint32_t *num_entries, char *buf)
{
struct scsi_qla_host *ha = to_qla_host(shost);
struct ql4_chap_table *chap_table;
struct iscsi_chap_rec *chap_rec;
int max_chap_entries = 0;
int valid_chap_entries = 0;
int ret = 0, i;
if (is_qla8022(ha))
max_chap_entries = (ha->hw.flt_chap_size / 2) /
sizeof(struct ql4_chap_table);
else
max_chap_entries = MAX_CHAP_ENTRIES_40XX;
ql4_printk(KERN_INFO, ha, "%s: num_entries = %d, CHAP idx = %d\n",
__func__, *num_entries, chap_tbl_idx);
if (!buf) {
ret = -ENOMEM;
goto exit_get_chap_list;
}
chap_rec = (struct iscsi_chap_rec *) buf;
mutex_lock(&ha->chap_sem);
for (i = chap_tbl_idx; i < max_chap_entries; i++) {
chap_table = (struct ql4_chap_table *)ha->chap_list + i;
if (chap_table->cookie !=
__constant_cpu_to_le16(CHAP_VALID_COOKIE))
continue;
chap_rec->chap_tbl_idx = i;
strncpy(chap_rec->username, chap_table->name,
ISCSI_CHAP_AUTH_NAME_MAX_LEN);
strncpy(chap_rec->password, chap_table->secret,
QL4_CHAP_MAX_SECRET_LEN);
chap_rec->password_length = chap_table->secret_len;
if (chap_table->flags & BIT_7) /* local */
chap_rec->chap_type = CHAP_TYPE_OUT;
if (chap_table->flags & BIT_6) /* peer */
chap_rec->chap_type = CHAP_TYPE_IN;
chap_rec++;
valid_chap_entries++;
if (valid_chap_entries == *num_entries)
break;
else
continue;
}
mutex_unlock(&ha->chap_sem);
exit_get_chap_list:
ql4_printk(KERN_INFO, ha, "%s: Valid CHAP Entries = %d\n",
__func__, valid_chap_entries);
*num_entries = valid_chap_entries;
return ret;
}
static int __qla4xxx_is_chap_active(struct device *dev, void *data)
{
int ret = 0;
uint16_t *chap_tbl_idx = (uint16_t *) data;
struct iscsi_cls_session *cls_session;
struct iscsi_session *sess;
struct ddb_entry *ddb_entry;
if (!iscsi_is_session_dev(dev))
goto exit_is_chap_active;
cls_session = iscsi_dev_to_session(dev);
sess = cls_session->dd_data;
ddb_entry = sess->dd_data;
if (iscsi_session_chkready(cls_session))
goto exit_is_chap_active;
if (ddb_entry->chap_tbl_idx == *chap_tbl_idx)
ret = 1;
exit_is_chap_active:
return ret;
}
static int qla4xxx_is_chap_active(struct Scsi_Host *shost,
uint16_t chap_tbl_idx)
{
int ret = 0;
ret = device_for_each_child(&shost->shost_gendev, &chap_tbl_idx,
__qla4xxx_is_chap_active);
return ret;
}
static int qla4xxx_delete_chap(struct Scsi_Host *shost, uint16_t chap_tbl_idx)
{
struct scsi_qla_host *ha = to_qla_host(shost);
struct ql4_chap_table *chap_table;
dma_addr_t chap_dma;
int max_chap_entries = 0;
uint32_t offset = 0;
uint32_t chap_size;
int ret = 0;
chap_table = dma_pool_alloc(ha->chap_dma_pool, GFP_KERNEL, &chap_dma);
if (chap_table == NULL)
return -ENOMEM;
memset(chap_table, 0, sizeof(struct ql4_chap_table));
if (is_qla8022(ha))
max_chap_entries = (ha->hw.flt_chap_size / 2) /
sizeof(struct ql4_chap_table);
else
max_chap_entries = MAX_CHAP_ENTRIES_40XX;
if (chap_tbl_idx > max_chap_entries) {
ret = -EINVAL;
goto exit_delete_chap;
}
/* Check if chap index is in use.
* If chap is in use don't delet chap entry */
ret = qla4xxx_is_chap_active(shost, chap_tbl_idx);
if (ret) {
ql4_printk(KERN_INFO, ha, "CHAP entry %d is in use, cannot "
"delete from flash\n", chap_tbl_idx);
ret = -EBUSY;
goto exit_delete_chap;
}
chap_size = sizeof(struct ql4_chap_table);
if (is_qla40XX(ha))
offset = FLASH_CHAP_OFFSET | (chap_tbl_idx * chap_size);
else {
offset = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_region_chap << 2);
/* flt_chap_size is CHAP table size for both ports
* so divide it by 2 to calculate the offset for second port
*/
if (ha->port_num == 1)
offset += (ha->hw.flt_chap_size / 2);
offset += (chap_tbl_idx * chap_size);
}
ret = qla4xxx_get_flash(ha, chap_dma, offset, chap_size);
if (ret != QLA_SUCCESS) {
ret = -EINVAL;
goto exit_delete_chap;
}
DEBUG2(ql4_printk(KERN_INFO, ha, "Chap Cookie: x%x\n",
__le16_to_cpu(chap_table->cookie)));
if (__le16_to_cpu(chap_table->cookie) != CHAP_VALID_COOKIE) {
ql4_printk(KERN_ERR, ha, "No valid chap entry found\n");
goto exit_delete_chap;
}
chap_table->cookie = __constant_cpu_to_le16(0xFFFF);
offset = FLASH_CHAP_OFFSET |
(chap_tbl_idx * sizeof(struct ql4_chap_table));
ret = qla4xxx_set_flash(ha, chap_dma, offset, chap_size,
FLASH_OPT_RMW_COMMIT);
if (ret == QLA_SUCCESS && ha->chap_list) {
mutex_lock(&ha->chap_sem);
/* Update ha chap_list cache */
memcpy((struct ql4_chap_table *)ha->chap_list + chap_tbl_idx,
chap_table, sizeof(struct ql4_chap_table));
mutex_unlock(&ha->chap_sem);
}
if (ret != QLA_SUCCESS)
ret = -EINVAL;
exit_delete_chap:
dma_pool_free(ha->chap_dma_pool, chap_table, chap_dma);
return ret;
}
static int qla4xxx_get_iface_param(struct iscsi_iface *iface,
enum iscsi_param_type param_type,
int param, char *buf)
{
struct Scsi_Host *shost = iscsi_iface_to_shost(iface);
struct scsi_qla_host *ha = to_qla_host(shost);
int len = -ENOSYS;
if (param_type != ISCSI_NET_PARAM)
return -ENOSYS;
switch (param) {
case ISCSI_NET_PARAM_IPV4_ADDR:
len = sprintf(buf, "%pI4\n", &ha->ip_config.ip_address);
break;
case ISCSI_NET_PARAM_IPV4_SUBNET:
len = sprintf(buf, "%pI4\n", &ha->ip_config.subnet_mask);
break;
case ISCSI_NET_PARAM_IPV4_GW:
len = sprintf(buf, "%pI4\n", &ha->ip_config.gateway);
break;
case ISCSI_NET_PARAM_IFACE_ENABLE:
if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
len = sprintf(buf, "%s\n",
(ha->ip_config.ipv4_options &
IPOPT_IPV4_PROTOCOL_ENABLE) ?
"enabled" : "disabled");
else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
len = sprintf(buf, "%s\n",
(ha->ip_config.ipv6_options &
IPV6_OPT_IPV6_PROTOCOL_ENABLE) ?
"enabled" : "disabled");
break;
case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
len = sprintf(buf, "%s\n",
(ha->ip_config.tcp_options & TCPOPT_DHCP_ENABLE) ?
"dhcp" : "static");
break;
case ISCSI_NET_PARAM_IPV6_ADDR:
if (iface->iface_num == 0)
len = sprintf(buf, "%pI6\n", &ha->ip_config.ipv6_addr0);
if (iface->iface_num == 1)
len = sprintf(buf, "%pI6\n", &ha->ip_config.ipv6_addr1);
break;
case ISCSI_NET_PARAM_IPV6_LINKLOCAL:
len = sprintf(buf, "%pI6\n",
&ha->ip_config.ipv6_link_local_addr);
break;
case ISCSI_NET_PARAM_IPV6_ROUTER:
len = sprintf(buf, "%pI6\n",
&ha->ip_config.ipv6_default_router_addr);
break;
case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG:
len = sprintf(buf, "%s\n",
(ha->ip_config.ipv6_addl_options &
IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE) ?
"nd" : "static");
break;
case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG:
len = sprintf(buf, "%s\n",
(ha->ip_config.ipv6_addl_options &
IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR) ?
"auto" : "static");
break;
case ISCSI_NET_PARAM_VLAN_ID:
if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
len = sprintf(buf, "%d\n",
(ha->ip_config.ipv4_vlan_tag &
ISCSI_MAX_VLAN_ID));
else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
len = sprintf(buf, "%d\n",
(ha->ip_config.ipv6_vlan_tag &
ISCSI_MAX_VLAN_ID));
break;
case ISCSI_NET_PARAM_VLAN_PRIORITY:
if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
len = sprintf(buf, "%d\n",
((ha->ip_config.ipv4_vlan_tag >> 13) &
ISCSI_MAX_VLAN_PRIORITY));
else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
len = sprintf(buf, "%d\n",
((ha->ip_config.ipv6_vlan_tag >> 13) &
ISCSI_MAX_VLAN_PRIORITY));
break;
case ISCSI_NET_PARAM_VLAN_ENABLED:
if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
len = sprintf(buf, "%s\n",
(ha->ip_config.ipv4_options &
IPOPT_VLAN_TAGGING_ENABLE) ?
"enabled" : "disabled");
else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
len = sprintf(buf, "%s\n",
(ha->ip_config.ipv6_options &
IPV6_OPT_VLAN_TAGGING_ENABLE) ?
"enabled" : "disabled");
break;
case ISCSI_NET_PARAM_MTU:
len = sprintf(buf, "%d\n", ha->ip_config.eth_mtu_size);
break;
case ISCSI_NET_PARAM_PORT:
if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
len = sprintf(buf, "%d\n", ha->ip_config.ipv4_port);
else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
len = sprintf(buf, "%d\n", ha->ip_config.ipv6_port);
break;
default:
len = -ENOSYS;
}
return len;
}
static struct iscsi_endpoint *
qla4xxx_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
int non_blocking)
{
int ret;
struct iscsi_endpoint *ep;
struct qla_endpoint *qla_ep;
struct scsi_qla_host *ha;
struct sockaddr_in *addr;
struct sockaddr_in6 *addr6;
DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
if (!shost) {
ret = -ENXIO;
printk(KERN_ERR "%s: shost is NULL\n",
__func__);
return ERR_PTR(ret);
}
ha = iscsi_host_priv(shost);
ep = iscsi_create_endpoint(sizeof(struct qla_endpoint));
if (!ep) {
ret = -ENOMEM;
return ERR_PTR(ret);
}
qla_ep = ep->dd_data;
memset(qla_ep, 0, sizeof(struct qla_endpoint));
if (dst_addr->sa_family == AF_INET) {
memcpy(&qla_ep->dst_addr, dst_addr, sizeof(struct sockaddr_in));
addr = (struct sockaddr_in *)&qla_ep->dst_addr;
DEBUG2(ql4_printk(KERN_INFO, ha, "%s: %pI4\n", __func__,
(char *)&addr->sin_addr));
} else if (dst_addr->sa_family == AF_INET6) {
memcpy(&qla_ep->dst_addr, dst_addr,
sizeof(struct sockaddr_in6));
addr6 = (struct sockaddr_in6 *)&qla_ep->dst_addr;
DEBUG2(ql4_printk(KERN_INFO, ha, "%s: %pI6\n", __func__,
(char *)&addr6->sin6_addr));
}
qla_ep->host = shost;
return ep;
}
static int qla4xxx_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
{
struct qla_endpoint *qla_ep;
struct scsi_qla_host *ha;
int ret = 0;
DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
qla_ep = ep->dd_data;
ha = to_qla_host(qla_ep->host);
if (adapter_up(ha) && !test_bit(AF_BUILD_DDB_LIST, &ha->flags))
ret = 1;
return ret;
}
static void qla4xxx_ep_disconnect(struct iscsi_endpoint *ep)
{
DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
iscsi_destroy_endpoint(ep);
}
static int qla4xxx_get_ep_param(struct iscsi_endpoint *ep,
enum iscsi_param param,
char *buf)
{
struct qla_endpoint *qla_ep = ep->dd_data;
struct sockaddr *dst_addr;
DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
switch (param) {
case ISCSI_PARAM_CONN_PORT:
case ISCSI_PARAM_CONN_ADDRESS:
if (!qla_ep)
return -ENOTCONN;
dst_addr = (struct sockaddr *)&qla_ep->dst_addr;
if (!dst_addr)
return -ENOTCONN;
return iscsi_conn_get_addr_param((struct sockaddr_storage *)
&qla_ep->dst_addr, param, buf);
default:
return -ENOSYS;
}
}
static void qla4xxx_conn_get_stats(struct iscsi_cls_conn *cls_conn,
struct iscsi_stats *stats)
{
struct iscsi_session *sess;
struct iscsi_cls_session *cls_sess;
struct ddb_entry *ddb_entry;
struct scsi_qla_host *ha;
struct ql_iscsi_stats *ql_iscsi_stats;
int stats_size;
int ret;
dma_addr_t iscsi_stats_dma;
DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
cls_sess = iscsi_conn_to_session(cls_conn);
sess = cls_sess->dd_data;
ddb_entry = sess->dd_data;
ha = ddb_entry->ha;
stats_size = PAGE_ALIGN(sizeof(struct ql_iscsi_stats));
/* Allocate memory */
ql_iscsi_stats = dma_alloc_coherent(&ha->pdev->dev, stats_size,
&iscsi_stats_dma, GFP_KERNEL);
if (!ql_iscsi_stats) {
ql4_printk(KERN_ERR, ha,
"Unable to allocate memory for iscsi stats\n");
goto exit_get_stats;
}
ret = qla4xxx_get_mgmt_data(ha, ddb_entry->fw_ddb_index, stats_size,
iscsi_stats_dma);
if (ret != QLA_SUCCESS) {
ql4_printk(KERN_ERR, ha,
"Unable to retreive iscsi stats\n");
goto free_stats;
}
/* octets */
stats->txdata_octets = le64_to_cpu(ql_iscsi_stats->tx_data_octets);
stats->rxdata_octets = le64_to_cpu(ql_iscsi_stats->rx_data_octets);
/* xmit pdus */
stats->noptx_pdus = le32_to_cpu(ql_iscsi_stats->tx_nopout_pdus);
stats->scsicmd_pdus = le32_to_cpu(ql_iscsi_stats->tx_scsi_cmd_pdus);
stats->tmfcmd_pdus = le32_to_cpu(ql_iscsi_stats->tx_tmf_cmd_pdus);
stats->login_pdus = le32_to_cpu(ql_iscsi_stats->tx_login_cmd_pdus);
stats->text_pdus = le32_to_cpu(ql_iscsi_stats->tx_text_cmd_pdus);
stats->dataout_pdus = le32_to_cpu(ql_iscsi_stats->tx_scsi_write_pdus);
stats->logout_pdus = le32_to_cpu(ql_iscsi_stats->tx_logout_cmd_pdus);
stats->snack_pdus = le32_to_cpu(ql_iscsi_stats->tx_snack_req_pdus);
/* recv pdus */
stats->noprx_pdus = le32_to_cpu(ql_iscsi_stats->rx_nopin_pdus);
stats->scsirsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_scsi_resp_pdus);
stats->tmfrsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_tmf_resp_pdus);
stats->textrsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_text_resp_pdus);
stats->datain_pdus = le32_to_cpu(ql_iscsi_stats->rx_scsi_read_pdus);
stats->logoutrsp_pdus =
le32_to_cpu(ql_iscsi_stats->rx_logout_resp_pdus);
stats->r2t_pdus = le32_to_cpu(ql_iscsi_stats->rx_r2t_pdus);
stats->async_pdus = le32_to_cpu(ql_iscsi_stats->rx_async_pdus);
stats->rjt_pdus = le32_to_cpu(ql_iscsi_stats->rx_reject_pdus);
free_stats:
dma_free_coherent(&ha->pdev->dev, stats_size, ql_iscsi_stats,
iscsi_stats_dma);
exit_get_stats:
return;
}
static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc)
{
struct iscsi_cls_session *session;
struct iscsi_session *sess;
unsigned long flags;
enum blk_eh_timer_return ret = BLK_EH_NOT_HANDLED;
session = starget_to_session(scsi_target(sc->device));
sess = session->dd_data;
spin_lock_irqsave(&session->lock, flags);
if (session->state == ISCSI_SESSION_FAILED)
ret = BLK_EH_RESET_TIMER;
spin_unlock_irqrestore(&session->lock, flags);
return ret;
}
static void qla4xxx_set_port_speed(struct Scsi_Host *shost)
{
struct scsi_qla_host *ha = to_qla_host(shost);
struct iscsi_cls_host *ihost = shost->shost_data;
uint32_t speed = ISCSI_PORT_SPEED_UNKNOWN;
qla4xxx_get_firmware_state(ha);
switch (ha->addl_fw_state & 0x0F00) {
case FW_ADDSTATE_LINK_SPEED_10MBPS:
speed = ISCSI_PORT_SPEED_10MBPS;
break;
case FW_ADDSTATE_LINK_SPEED_100MBPS:
speed = ISCSI_PORT_SPEED_100MBPS;
break;
case FW_ADDSTATE_LINK_SPEED_1GBPS:
speed = ISCSI_PORT_SPEED_1GBPS;
break;
case FW_ADDSTATE_LINK_SPEED_10GBPS:
speed = ISCSI_PORT_SPEED_10GBPS;
break;
}
ihost->port_speed = speed;
}
static void qla4xxx_set_port_state(struct Scsi_Host *shost)
{
struct scsi_qla_host *ha = to_qla_host(shost);
struct iscsi_cls_host *ihost = shost->shost_data;
uint32_t state = ISCSI_PORT_STATE_DOWN;
if (test_bit(AF_LINK_UP, &ha->flags))
state = ISCSI_PORT_STATE_UP;
ihost->port_state = state;
}
static int qla4xxx_host_get_param(struct Scsi_Host *shost,
enum iscsi_host_param param, char *buf)
{
struct scsi_qla_host *ha = to_qla_host(shost);
int len;
switch (param) {
case ISCSI_HOST_PARAM_HWADDRESS:
len = sysfs_format_mac(buf, ha->my_mac, MAC_ADDR_LEN);
break;
case ISCSI_HOST_PARAM_IPADDRESS:
len = sprintf(buf, "%pI4\n", &ha->ip_config.ip_address);
break;
case ISCSI_HOST_PARAM_INITIATOR_NAME:
len = sprintf(buf, "%s\n", ha->name_string);
break;
case ISCSI_HOST_PARAM_PORT_STATE:
qla4xxx_set_port_state(shost);
len = sprintf(buf, "%s\n", iscsi_get_port_state_name(shost));
break;
case ISCSI_HOST_PARAM_PORT_SPEED:
qla4xxx_set_port_speed(shost);
len = sprintf(buf, "%s\n", iscsi_get_port_speed_name(shost));
break;
default:
return -ENOSYS;
}
return len;
}
static void qla4xxx_create_ipv4_iface(struct scsi_qla_host *ha)
{
if (ha->iface_ipv4)
return;
/* IPv4 */
ha->iface_ipv4 = iscsi_create_iface(ha->host,
&qla4xxx_iscsi_transport,
ISCSI_IFACE_TYPE_IPV4, 0, 0);
if (!ha->iface_ipv4)
ql4_printk(KERN_ERR, ha, "Could not create IPv4 iSCSI "
"iface0.\n");
}
static void qla4xxx_create_ipv6_iface(struct scsi_qla_host *ha)
{
if (!ha->iface_ipv6_0)
/* IPv6 iface-0 */
ha->iface_ipv6_0 = iscsi_create_iface(ha->host,
&qla4xxx_iscsi_transport,
ISCSI_IFACE_TYPE_IPV6, 0,
0);
if (!ha->iface_ipv6_0)
ql4_printk(KERN_ERR, ha, "Could not create IPv6 iSCSI "
"iface0.\n");
if (!ha->iface_ipv6_1)
/* IPv6 iface-1 */
ha->iface_ipv6_1 = iscsi_create_iface(ha->host,
&qla4xxx_iscsi_transport,
ISCSI_IFACE_TYPE_IPV6, 1,
0);
if (!ha->iface_ipv6_1)
ql4_printk(KERN_ERR, ha, "Could not create IPv6 iSCSI "
"iface1.\n");
}
static void qla4xxx_create_ifaces(struct scsi_qla_host *ha)
{
if (ha->ip_config.ipv4_options & IPOPT_IPV4_PROTOCOL_ENABLE)
qla4xxx_create_ipv4_iface(ha);
if (ha->ip_config.ipv6_options & IPV6_OPT_IPV6_PROTOCOL_ENABLE)
qla4xxx_create_ipv6_iface(ha);
}
static void qla4xxx_destroy_ipv4_iface(struct scsi_qla_host *ha)
{
if (ha->iface_ipv4) {
iscsi_destroy_iface(ha->iface_ipv4);
ha->iface_ipv4 = NULL;
}
}
static void qla4xxx_destroy_ipv6_iface(struct scsi_qla_host *ha)
{
if (ha->iface_ipv6_0) {
iscsi_destroy_iface(ha->iface_ipv6_0);
ha->iface_ipv6_0 = NULL;
}
if (ha->iface_ipv6_1) {
iscsi_destroy_iface(ha->iface_ipv6_1);
ha->iface_ipv6_1 = NULL;
}
}
static void qla4xxx_destroy_ifaces(struct scsi_qla_host *ha)
{
qla4xxx_destroy_ipv4_iface(ha);
qla4xxx_destroy_ipv6_iface(ha);
}
static void qla4xxx_set_ipv6(struct scsi_qla_host *ha,
struct iscsi_iface_param_info *iface_param,
struct addr_ctrl_blk *init_fw_cb)
{
/*
* iface_num 0 is valid for IPv6 Addr, linklocal, router, autocfg.
* iface_num 1 is valid only for IPv6 Addr.
*/
switch (iface_param->param) {
case ISCSI_NET_PARAM_IPV6_ADDR:
if (iface_param->iface_num & 0x1)
/* IPv6 Addr 1 */
memcpy(init_fw_cb->ipv6_addr1, iface_param->value,
sizeof(init_fw_cb->ipv6_addr1));
else
/* IPv6 Addr 0 */
memcpy(init_fw_cb->ipv6_addr0, iface_param->value,
sizeof(init_fw_cb->ipv6_addr0));
break;
case ISCSI_NET_PARAM_IPV6_LINKLOCAL:
if (iface_param->iface_num & 0x1)
break;
memcpy(init_fw_cb->ipv6_if_id, &iface_param->value[8],
sizeof(init_fw_cb->ipv6_if_id));
break;
case ISCSI_NET_PARAM_IPV6_ROUTER:
if (iface_param->iface_num & 0x1)
break;
memcpy(init_fw_cb->ipv6_dflt_rtr_addr, iface_param->value,
sizeof(init_fw_cb->ipv6_dflt_rtr_addr));
break;
case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG:
/* Autocfg applies to even interface */
if (iface_param->iface_num & 0x1)
break;
if (iface_param->value[0] == ISCSI_IPV6_AUTOCFG_DISABLE)
init_fw_cb->ipv6_addtl_opts &=
cpu_to_le16(
~IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE);
else if (iface_param->value[0] == ISCSI_IPV6_AUTOCFG_ND_ENABLE)
init_fw_cb->ipv6_addtl_opts |=
cpu_to_le16(
IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE);
else
ql4_printk(KERN_ERR, ha, "Invalid autocfg setting for "
"IPv6 addr\n");
break;
case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG:
/* Autocfg applies to even interface */
if (iface_param->iface_num & 0x1)
break;
if (iface_param->value[0] ==
ISCSI_IPV6_LINKLOCAL_AUTOCFG_ENABLE)
init_fw_cb->ipv6_addtl_opts |= cpu_to_le16(
IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR);
else if (iface_param->value[0] ==
ISCSI_IPV6_LINKLOCAL_AUTOCFG_DISABLE)
init_fw_cb->ipv6_addtl_opts &= cpu_to_le16(
~IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR);
else
ql4_printk(KERN_ERR, ha, "Invalid autocfg setting for "
"IPv6 linklocal addr\n");
break;
case ISCSI_NET_PARAM_IPV6_ROUTER_AUTOCFG:
/* Autocfg applies to even interface */
if (iface_param->iface_num & 0x1)
break;
if (iface_param->value[0] == ISCSI_IPV6_ROUTER_AUTOCFG_ENABLE)
memset(init_fw_cb->ipv6_dflt_rtr_addr, 0,
sizeof(init_fw_cb->ipv6_dflt_rtr_addr));
break;
case ISCSI_NET_PARAM_IFACE_ENABLE:
if (iface_param->value[0] == ISCSI_IFACE_ENABLE) {
init_fw_cb->ipv6_opts |=
cpu_to_le16(IPV6_OPT_IPV6_PROTOCOL_ENABLE);
qla4xxx_create_ipv6_iface(ha);
} else {
init_fw_cb->ipv6_opts &=
cpu_to_le16(~IPV6_OPT_IPV6_PROTOCOL_ENABLE &
0xFFFF);
qla4xxx_destroy_ipv6_iface(ha);
}
break;
case ISCSI_NET_PARAM_VLAN_TAG:
if (iface_param->len != sizeof(init_fw_cb->ipv6_vlan_tag))
break;
init_fw_cb->ipv6_vlan_tag =
cpu_to_be16(*(uint16_t *)iface_param->value);
break;
case ISCSI_NET_PARAM_VLAN_ENABLED:
if (iface_param->value[0] == ISCSI_VLAN_ENABLE)
init_fw_cb->ipv6_opts |=
cpu_to_le16(IPV6_OPT_VLAN_TAGGING_ENABLE);
else
init_fw_cb->ipv6_opts &=
cpu_to_le16(~IPV6_OPT_VLAN_TAGGING_ENABLE);
break;
case ISCSI_NET_PARAM_MTU:
init_fw_cb->eth_mtu_size =
cpu_to_le16(*(uint16_t *)iface_param->value);
break;
case ISCSI_NET_PARAM_PORT:
/* Autocfg applies to even interface */
if (iface_param->iface_num & 0x1)
break;
init_fw_cb->ipv6_port =
cpu_to_le16(*(uint16_t *)iface_param->value);
break;
default:
ql4_printk(KERN_ERR, ha, "Unknown IPv6 param = %d\n",
iface_param->param);
break;
}
}
static void qla4xxx_set_ipv4(struct scsi_qla_host *ha,
struct iscsi_iface_param_info *iface_param,
struct addr_ctrl_blk *init_fw_cb)
{
switch (iface_param->param) {
case ISCSI_NET_PARAM_IPV4_ADDR:
memcpy(init_fw_cb->ipv4_addr, iface_param->value,
sizeof(init_fw_cb->ipv4_addr));
break;
case ISCSI_NET_PARAM_IPV4_SUBNET:
memcpy(init_fw_cb->ipv4_subnet, iface_param->value,
sizeof(init_fw_cb->ipv4_subnet));
break;
case ISCSI_NET_PARAM_IPV4_GW:
memcpy(init_fw_cb->ipv4_gw_addr, iface_param->value,
sizeof(init_fw_cb->ipv4_gw_addr));
break;
case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
if (iface_param->value[0] == ISCSI_BOOTPROTO_DHCP)
init_fw_cb->ipv4_tcp_opts |=
cpu_to_le16(TCPOPT_DHCP_ENABLE);
else if (iface_param->value[0] == ISCSI_BOOTPROTO_STATIC)
init_fw_cb->ipv4_tcp_opts &=
cpu_to_le16(~TCPOPT_DHCP_ENABLE);
else
ql4_printk(KERN_ERR, ha, "Invalid IPv4 bootproto\n");
break;
case ISCSI_NET_PARAM_IFACE_ENABLE:
if (iface_param->value[0] == ISCSI_IFACE_ENABLE) {
init_fw_cb->ipv4_ip_opts |=
cpu_to_le16(IPOPT_IPV4_PROTOCOL_ENABLE);
qla4xxx_create_ipv4_iface(ha);
} else {
init_fw_cb->ipv4_ip_opts &=
cpu_to_le16(~IPOPT_IPV4_PROTOCOL_ENABLE &
0xFFFF);
qla4xxx_destroy_ipv4_iface(ha);
}
break;
case ISCSI_NET_PARAM_VLAN_TAG:
if (iface_param->len != sizeof(init_fw_cb->ipv4_vlan_tag))
break;
init_fw_cb->ipv4_vlan_tag =
cpu_to_be16(*(uint16_t *)iface_param->value);
break;
case ISCSI_NET_PARAM_VLAN_ENABLED:
if (iface_param->value[0] == ISCSI_VLAN_ENABLE)
init_fw_cb->ipv4_ip_opts |=
cpu_to_le16(IPOPT_VLAN_TAGGING_ENABLE);
else
init_fw_cb->ipv4_ip_opts &=
cpu_to_le16(~IPOPT_VLAN_TAGGING_ENABLE);
break;
case ISCSI_NET_PARAM_MTU:
init_fw_cb->eth_mtu_size =
cpu_to_le16(*(uint16_t *)iface_param->value);
break;
case ISCSI_NET_PARAM_PORT:
init_fw_cb->ipv4_port =
cpu_to_le16(*(uint16_t *)iface_param->value);
break;
default:
ql4_printk(KERN_ERR, ha, "Unknown IPv4 param = %d\n",
iface_param->param);
break;
}
}
static void
qla4xxx_initcb_to_acb(struct addr_ctrl_blk *init_fw_cb)
{
struct addr_ctrl_blk_def *acb;
acb = (struct addr_ctrl_blk_def *)init_fw_cb;
memset(acb->reserved1, 0, sizeof(acb->reserved1));
memset(acb->reserved2, 0, sizeof(acb->reserved2));
memset(acb->reserved3, 0, sizeof(acb->reserved3));
memset(acb->reserved4, 0, sizeof(acb->reserved4));
memset(acb->reserved5, 0, sizeof(acb->reserved5));
memset(acb->reserved6, 0, sizeof(acb->reserved6));
memset(acb->reserved7, 0, sizeof(acb->reserved7));
memset(acb->reserved8, 0, sizeof(acb->reserved8));
memset(acb->reserved9, 0, sizeof(acb->reserved9));
memset(acb->reserved10, 0, sizeof(acb->reserved10));
memset(acb->reserved11, 0, sizeof(acb->reserved11));
memset(acb->reserved12, 0, sizeof(acb->reserved12));
memset(acb->reserved13, 0, sizeof(acb->reserved13));
memset(acb->reserved14, 0, sizeof(acb->reserved14));
memset(acb->reserved15, 0, sizeof(acb->reserved15));
}
static int
qla4xxx_iface_set_param(struct Scsi_Host *shost, void *data, uint32_t len)
{
struct scsi_qla_host *ha = to_qla_host(shost);
int rval = 0;
struct iscsi_iface_param_info *iface_param = NULL;
struct addr_ctrl_blk *init_fw_cb = NULL;
dma_addr_t init_fw_cb_dma;
uint32_t mbox_cmd[MBOX_REG_COUNT];
uint32_t mbox_sts[MBOX_REG_COUNT];
uint32_t rem = len;
struct nlattr *attr;
init_fw_cb = dma_alloc_coherent(&ha->pdev->dev,
sizeof(struct addr_ctrl_blk),
&init_fw_cb_dma, GFP_KERNEL);
if (!init_fw_cb) {
ql4_printk(KERN_ERR, ha, "%s: Unable to alloc init_cb\n",
__func__);
return -ENOMEM;
}
memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk));
memset(&mbox_cmd, 0, sizeof(mbox_cmd));
memset(&mbox_sts, 0, sizeof(mbox_sts));
if (qla4xxx_get_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma)) {
ql4_printk(KERN_ERR, ha, "%s: get ifcb failed\n", __func__);
rval = -EIO;
goto exit_init_fw_cb;
}
nla_for_each_attr(attr, data, len, rem) {
iface_param = nla_data(attr);
if (iface_param->param_type != ISCSI_NET_PARAM)
continue;
switch (iface_param->iface_type) {
case ISCSI_IFACE_TYPE_IPV4:
switch (iface_param->iface_num) {
case 0:
qla4xxx_set_ipv4(ha, iface_param, init_fw_cb);
break;
default:
/* Cannot have more than one IPv4 interface */
ql4_printk(KERN_ERR, ha, "Invalid IPv4 iface "
"number = %d\n",
iface_param->iface_num);
break;
}
break;
case ISCSI_IFACE_TYPE_IPV6:
switch (iface_param->iface_num) {
case 0:
case 1:
qla4xxx_set_ipv6(ha, iface_param, init_fw_cb);
break;
default:
/* Cannot have more than two IPv6 interface */
ql4_printk(KERN_ERR, ha, "Invalid IPv6 iface "
"number = %d\n",
iface_param->iface_num);
break;
}
break;
default:
ql4_printk(KERN_ERR, ha, "Invalid iface type\n");
break;
}
}
init_fw_cb->cookie = cpu_to_le32(0x11BEAD5A);
rval = qla4xxx_set_flash(ha, init_fw_cb_dma, FLASH_SEGMENT_IFCB,
sizeof(struct addr_ctrl_blk),
FLASH_OPT_RMW_COMMIT);
if (rval != QLA_SUCCESS) {
ql4_printk(KERN_ERR, ha, "%s: set flash mbx failed\n",
__func__);
rval = -EIO;
goto exit_init_fw_cb;
}
rval = qla4xxx_disable_acb(ha);
if (rval != QLA_SUCCESS) {
ql4_printk(KERN_ERR, ha, "%s: disable acb mbx failed\n",
__func__);
rval = -EIO;
goto exit_init_fw_cb;
}
wait_for_completion_timeout(&ha->disable_acb_comp,
DISABLE_ACB_TOV * HZ);
qla4xxx_initcb_to_acb(init_fw_cb);
rval = qla4xxx_set_acb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma);
if (rval != QLA_SUCCESS) {
ql4_printk(KERN_ERR, ha, "%s: set acb mbx failed\n",
__func__);
rval = -EIO;
goto exit_init_fw_cb;
}
memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk));
qla4xxx_update_local_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb,
init_fw_cb_dma);
exit_init_fw_cb:
dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk),
init_fw_cb, init_fw_cb_dma);
return rval;
}
static int qla4xxx_session_get_param(struct iscsi_cls_session *cls_sess,
enum iscsi_param param, char *buf)
{
struct iscsi_session *sess = cls_sess->dd_data;
struct ddb_entry *ddb_entry = sess->dd_data;
struct scsi_qla_host *ha = ddb_entry->ha;
int rval, len;
uint16_t idx;
switch (param) {
case ISCSI_PARAM_CHAP_IN_IDX:
rval = qla4xxx_get_chap_index(ha, sess->username_in,
sess->password_in, BIDI_CHAP,
&idx);
if (rval)
return -EINVAL;
len = sprintf(buf, "%hu\n", idx);
break;
case ISCSI_PARAM_CHAP_OUT_IDX:
rval = qla4xxx_get_chap_index(ha, sess->username,
sess->password, LOCAL_CHAP,
&idx);
if (rval)
return -EINVAL;
len = sprintf(buf, "%hu\n", idx);
break;
default:
return iscsi_session_get_param(cls_sess, param, buf);
}
return len;
}
static int qla4xxx_conn_get_param(struct iscsi_cls_conn *cls_conn,
enum iscsi_param param, char *buf)
{
struct iscsi_conn *conn;
struct qla_conn *qla_conn;
struct sockaddr *dst_addr;
int len = 0;
conn = cls_conn->dd_data;
qla_conn = conn->dd_data;
dst_addr = &qla_conn->qla_ep->dst_addr;
switch (param) {
case ISCSI_PARAM_CONN_PORT:
case ISCSI_PARAM_CONN_ADDRESS:
return iscsi_conn_get_addr_param((struct sockaddr_storage *)
dst_addr, param, buf);
default:
return iscsi_conn_get_param(cls_conn, param, buf);
}
return len;
}
int qla4xxx_get_ddb_index(struct scsi_qla_host *ha, uint16_t *ddb_index)
{
uint32_t mbx_sts = 0;
uint16_t tmp_ddb_index;
int ret;
get_ddb_index:
tmp_ddb_index = find_first_zero_bit(ha->ddb_idx_map, MAX_DDB_ENTRIES);
if (tmp_ddb_index >= MAX_DDB_ENTRIES) {
DEBUG2(ql4_printk(KERN_INFO, ha,
"Free DDB index not available\n"));
ret = QLA_ERROR;
goto exit_get_ddb_index;
}
if (test_and_set_bit(tmp_ddb_index, ha->ddb_idx_map))
goto get_ddb_index;
DEBUG2(ql4_printk(KERN_INFO, ha,
"Found a free DDB index at %d\n", tmp_ddb_index));
ret = qla4xxx_req_ddb_entry(ha, tmp_ddb_index, &mbx_sts);
if (ret == QLA_ERROR) {
if (mbx_sts == MBOX_STS_COMMAND_ERROR) {
ql4_printk(KERN_INFO, ha,
"DDB index = %d not available trying next\n",
tmp_ddb_index);
goto get_ddb_index;
}
DEBUG2(ql4_printk(KERN_INFO, ha,
"Free FW DDB not available\n"));
}
*ddb_index = tmp_ddb_index;
exit_get_ddb_index:
return ret;
}
static int qla4xxx_match_ipaddress(struct scsi_qla_host *ha,
struct ddb_entry *ddb_entry,
char *existing_ipaddr,
char *user_ipaddr)
{
uint8_t dst_ipaddr[IPv6_ADDR_LEN];
char formatted_ipaddr[DDB_IPADDR_LEN];
int status = QLA_SUCCESS, ret = 0;
if (ddb_entry->fw_ddb_entry.options & DDB_OPT_IPV6_DEVICE) {
ret = in6_pton(user_ipaddr, strlen(user_ipaddr), dst_ipaddr,
'\0', NULL);
if (ret == 0) {
status = QLA_ERROR;
goto out_match;
}
ret = sprintf(formatted_ipaddr, "%pI6", dst_ipaddr);
} else {
ret = in4_pton(user_ipaddr, strlen(user_ipaddr), dst_ipaddr,
'\0', NULL);
if (ret == 0) {
status = QLA_ERROR;
goto out_match;
}
ret = sprintf(formatted_ipaddr, "%pI4", dst_ipaddr);
}
if (strcmp(existing_ipaddr, formatted_ipaddr))
status = QLA_ERROR;
out_match:
return status;
}
static int qla4xxx_match_fwdb_session(struct scsi_qla_host *ha,
struct iscsi_cls_conn *cls_conn)
{
int idx = 0, max_ddbs, rval;
struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn);
struct iscsi_session *sess, *existing_sess;
struct iscsi_conn *conn, *existing_conn;
struct ddb_entry *ddb_entry;
sess = cls_sess->dd_data;
conn = cls_conn->dd_data;
if (sess->targetname == NULL ||
conn->persistent_address == NULL ||
conn->persistent_port == 0)
return QLA_ERROR;
max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
MAX_DEV_DB_ENTRIES;
for (idx = 0; idx < max_ddbs; idx++) {
ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
if (ddb_entry == NULL)
continue;
if (ddb_entry->ddb_type != FLASH_DDB)
continue;
existing_sess = ddb_entry->sess->dd_data;
existing_conn = ddb_entry->conn->dd_data;
if (existing_sess->targetname == NULL ||
existing_conn->persistent_address == NULL ||
existing_conn->persistent_port == 0)
continue;
DEBUG2(ql4_printk(KERN_INFO, ha,
"IQN = %s User IQN = %s\n",
existing_sess->targetname,
sess->targetname));
DEBUG2(ql4_printk(KERN_INFO, ha,
"IP = %s User IP = %s\n",
existing_conn->persistent_address,
conn->persistent_address));
DEBUG2(ql4_printk(KERN_INFO, ha,
"Port = %d User Port = %d\n",
existing_conn->persistent_port,
conn->persistent_port));
if (strcmp(existing_sess->targetname, sess->targetname))
continue;
rval = qla4xxx_match_ipaddress(ha, ddb_entry,
existing_conn->persistent_address,
conn->persistent_address);
if (rval == QLA_ERROR)
continue;
if (existing_conn->persistent_port != conn->persistent_port)
continue;
break;
}
if (idx == max_ddbs)
return QLA_ERROR;
DEBUG2(ql4_printk(KERN_INFO, ha,
"Match found in fwdb sessions\n"));
return QLA_SUCCESS;
}
static struct iscsi_cls_session *
qla4xxx_session_create(struct iscsi_endpoint *ep,
uint16_t cmds_max, uint16_t qdepth,
uint32_t initial_cmdsn)
{
struct iscsi_cls_session *cls_sess;
struct scsi_qla_host *ha;
struct qla_endpoint *qla_ep;
struct ddb_entry *ddb_entry;
uint16_t ddb_index;
struct iscsi_session *sess;
struct sockaddr *dst_addr;
int ret;
DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
if (!ep) {
printk(KERN_ERR "qla4xxx: missing ep.\n");
return NULL;
}
qla_ep = ep->dd_data;
dst_addr = (struct sockaddr *)&qla_ep->dst_addr;
ha = to_qla_host(qla_ep->host);
ret = qla4xxx_get_ddb_index(ha, &ddb_index);
if (ret == QLA_ERROR)
return NULL;
cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport, qla_ep->host,
cmds_max, sizeof(struct ddb_entry),
sizeof(struct ql4_task_data),
initial_cmdsn, ddb_index);
if (!cls_sess)
return NULL;
sess = cls_sess->dd_data;
ddb_entry = sess->dd_data;
ddb_entry->fw_ddb_index = ddb_index;
ddb_entry->fw_ddb_device_state = DDB_DS_NO_CONNECTION_ACTIVE;
ddb_entry->ha = ha;
ddb_entry->sess = cls_sess;
ddb_entry->unblock_sess = qla4xxx_unblock_ddb;
ddb_entry->ddb_change = qla4xxx_ddb_change;
cls_sess->recovery_tmo = ql4xsess_recovery_tmo;
ha->fw_ddb_index_map[ddb_entry->fw_ddb_index] = ddb_entry;
ha->tot_ddbs++;
return cls_sess;
}
static void qla4xxx_session_destroy(struct iscsi_cls_session *cls_sess)
{
struct iscsi_session *sess;
struct ddb_entry *ddb_entry;
struct scsi_qla_host *ha;
unsigned long flags;
DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
sess = cls_sess->dd_data;
ddb_entry = sess->dd_data;
ha = ddb_entry->ha;
qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index);
spin_lock_irqsave(&ha->hardware_lock, flags);
qla4xxx_free_ddb(ha, ddb_entry);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
iscsi_session_teardown(cls_sess);
}
static struct iscsi_cls_conn *
qla4xxx_conn_create(struct iscsi_cls_session *cls_sess, uint32_t conn_idx)
{
struct iscsi_cls_conn *cls_conn;
struct iscsi_session *sess;
struct ddb_entry *ddb_entry;
DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
cls_conn = iscsi_conn_setup(cls_sess, sizeof(struct qla_conn),
conn_idx);
if (!cls_conn)
return NULL;
sess = cls_sess->dd_data;
ddb_entry = sess->dd_data;
ddb_entry->conn = cls_conn;
return cls_conn;
}
static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session,
struct iscsi_cls_conn *cls_conn,
uint64_t transport_fd, int is_leading)
{
struct iscsi_conn *conn;
struct qla_conn *qla_conn;
struct iscsi_endpoint *ep;
DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
if (iscsi_conn_bind(cls_session, cls_conn, is_leading))
return -EINVAL;
ep = iscsi_lookup_endpoint(transport_fd);
conn = cls_conn->dd_data;
qla_conn = conn->dd_data;
qla_conn->qla_ep = ep->dd_data;
return 0;
}
static int qla4xxx_conn_start(struct iscsi_cls_conn *cls_conn)
{
struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn);
struct iscsi_session *sess;
struct ddb_entry *ddb_entry;
struct scsi_qla_host *ha;
struct dev_db_entry *fw_ddb_entry = NULL;
dma_addr_t fw_ddb_entry_dma;
uint32_t mbx_sts = 0;
int ret = 0;
int status = QLA_SUCCESS;
DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
sess = cls_sess->dd_data;
ddb_entry = sess->dd_data;
ha = ddb_entry->ha;
/* Check if we have matching FW DDB, if yes then do not
* login to this target. This could cause target to logout previous
* connection
*/
ret = qla4xxx_match_fwdb_session(ha, cls_conn);
if (ret == QLA_SUCCESS) {
ql4_printk(KERN_INFO, ha,
"Session already exist in FW.\n");
ret = -EEXIST;
goto exit_conn_start;
}
fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
&fw_ddb_entry_dma, GFP_KERNEL);
if (!fw_ddb_entry) {
ql4_printk(KERN_ERR, ha,
"%s: Unable to allocate dma buffer\n", __func__);
ret = -ENOMEM;
goto exit_conn_start;
}
ret = qla4xxx_set_param_ddbentry(ha, ddb_entry, cls_conn, &mbx_sts);
if (ret) {
/* If iscsid is stopped and started then no need to do
* set param again since ddb state will be already
* active and FW does not allow set ddb to an
* active session.
*/
if (mbx_sts)
if (ddb_entry->fw_ddb_device_state ==
DDB_DS_SESSION_ACTIVE) {
ddb_entry->unblock_sess(ddb_entry->sess);
goto exit_set_param;
}
ql4_printk(KERN_ERR, ha, "%s: Failed set param for index[%d]\n",
__func__, ddb_entry->fw_ddb_index);
goto exit_conn_start;
}
status = qla4xxx_conn_open(ha, ddb_entry->fw_ddb_index);
if (status == QLA_ERROR) {
ql4_printk(KERN_ERR, ha, "%s: Login failed: %s\n", __func__,
sess->targetname);
ret = -EINVAL;
goto exit_conn_start;
}
if (ddb_entry->fw_ddb_device_state == DDB_DS_NO_CONNECTION_ACTIVE)
ddb_entry->fw_ddb_device_state = DDB_DS_LOGIN_IN_PROCESS;
DEBUG2(printk(KERN_INFO "%s: DDB state [%d]\n", __func__,
ddb_entry->fw_ddb_device_state));
exit_set_param:
ret = 0;
exit_conn_start:
if (fw_ddb_entry)
dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
fw_ddb_entry, fw_ddb_entry_dma);
return ret;
}
static void qla4xxx_conn_destroy(struct iscsi_cls_conn *cls_conn)
{
struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn);
struct iscsi_session *sess;
struct scsi_qla_host *ha;
struct ddb_entry *ddb_entry;
int options;
DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
sess = cls_sess->dd_data;
ddb_entry = sess->dd_data;
ha = ddb_entry->ha;
options = LOGOUT_OPTION_CLOSE_SESSION;
if (qla4xxx_session_logout_ddb(ha, ddb_entry, options) == QLA_ERROR)
ql4_printk(KERN_ERR, ha, "%s: Logout failed\n", __func__);
}
static void qla4xxx_task_work(struct work_struct *wdata)
{
struct ql4_task_data *task_data;
struct scsi_qla_host *ha;
struct passthru_status *sts;
struct iscsi_task *task;
struct iscsi_hdr *hdr;
uint8_t *data;
uint32_t data_len;
struct iscsi_conn *conn;
int hdr_len;
itt_t itt;
task_data = container_of(wdata, struct ql4_task_data, task_work);
ha = task_data->ha;
task = task_data->task;
sts = &task_data->sts;
hdr_len = sizeof(struct iscsi_hdr);
DEBUG3(printk(KERN_INFO "Status returned\n"));
DEBUG3(qla4xxx_dump_buffer(sts, 64));
DEBUG3(printk(KERN_INFO "Response buffer"));
DEBUG3(qla4xxx_dump_buffer(task_data->resp_buffer, 64));
conn = task->conn;
switch (sts->completionStatus) {
case PASSTHRU_STATUS_COMPLETE:
hdr = (struct iscsi_hdr *)task_data->resp_buffer;
/* Assign back the itt in hdr, until we use the PREASSIGN_TAG */
itt = sts->handle;
hdr->itt = itt;
data = task_data->resp_buffer + hdr_len;
data_len = task_data->resp_len - hdr_len;
iscsi_complete_pdu(conn, hdr, data, data_len);
break;
default:
ql4_printk(KERN_ERR, ha, "Passthru failed status = 0x%x\n",
sts->completionStatus);
break;
}
return;
}
static int qla4xxx_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
{
struct ql4_task_data *task_data;
struct iscsi_session *sess;
struct ddb_entry *ddb_entry;
struct scsi_qla_host *ha;
int hdr_len;
sess = task->conn->session;
ddb_entry = sess->dd_data;
ha = ddb_entry->ha;
task_data = task->dd_data;
memset(task_data, 0, sizeof(struct ql4_task_data));
if (task->sc) {
ql4_printk(KERN_INFO, ha,
"%s: SCSI Commands not implemented\n", __func__);
return -EINVAL;
}
hdr_len = sizeof(struct iscsi_hdr);
task_data->ha = ha;
task_data->task = task;
if (task->data_count) {
task_data->data_dma = dma_map_single(&ha->pdev->dev, task->data,
task->data_count,
PCI_DMA_TODEVICE);
}
DEBUG2(ql4_printk(KERN_INFO, ha, "%s: MaxRecvLen %u, iscsi hrd %d\n",
__func__, task->conn->max_recv_dlength, hdr_len));
task_data->resp_len = task->conn->max_recv_dlength + hdr_len;
task_data->resp_buffer = dma_alloc_coherent(&ha->pdev->dev,
task_data->resp_len,
&task_data->resp_dma,
GFP_ATOMIC);
if (!task_data->resp_buffer)
goto exit_alloc_pdu;
task_data->req_len = task->data_count + hdr_len;
task_data->req_buffer = dma_alloc_coherent(&ha->pdev->dev,
task_data->req_len,
&task_data->req_dma,
GFP_ATOMIC);
if (!task_data->req_buffer)
goto exit_alloc_pdu;
task->hdr = task_data->req_buffer;
INIT_WORK(&task_data->task_work, qla4xxx_task_work);
return 0;
exit_alloc_pdu:
if (task_data->resp_buffer)
dma_free_coherent(&ha->pdev->dev, task_data->resp_len,
task_data->resp_buffer, task_data->resp_dma);
if (task_data->req_buffer)
dma_free_coherent(&ha->pdev->dev, task_data->req_len,
task_data->req_buffer, task_data->req_dma);
return -ENOMEM;
}
static void qla4xxx_task_cleanup(struct iscsi_task *task)
{
struct ql4_task_data *task_data;
struct iscsi_session *sess;
struct ddb_entry *ddb_entry;
struct scsi_qla_host *ha;
int hdr_len;
hdr_len = sizeof(struct iscsi_hdr);
sess = task->conn->session;
ddb_entry = sess->dd_data;
ha = ddb_entry->ha;
task_data = task->dd_data;
if (task->data_count) {
dma_unmap_single(&ha->pdev->dev, task_data->data_dma,
task->data_count, PCI_DMA_TODEVICE);
}
DEBUG2(ql4_printk(KERN_INFO, ha, "%s: MaxRecvLen %u, iscsi hrd %d\n",
__func__, task->conn->max_recv_dlength, hdr_len));
dma_free_coherent(&ha->pdev->dev, task_data->resp_len,
task_data->resp_buffer, task_data->resp_dma);
dma_free_coherent(&ha->pdev->dev, task_data->req_len,
task_data->req_buffer, task_data->req_dma);
return;
}
static int qla4xxx_task_xmit(struct iscsi_task *task)
{
struct scsi_cmnd *sc = task->sc;
struct iscsi_session *sess = task->conn->session;
struct ddb_entry *ddb_entry = sess->dd_data;
struct scsi_qla_host *ha = ddb_entry->ha;
if (!sc)
return qla4xxx_send_passthru0(task);
ql4_printk(KERN_INFO, ha, "%s: scsi cmd xmit not implemented\n",
__func__);
return -ENOSYS;
}
static void qla4xxx_copy_fwddb_param(struct scsi_qla_host *ha,
struct dev_db_entry *fw_ddb_entry,
struct iscsi_cls_session *cls_sess,
struct iscsi_cls_conn *cls_conn)
{
int buflen = 0;
struct iscsi_session *sess;
struct ddb_entry *ddb_entry;
struct iscsi_conn *conn;
char ip_addr[DDB_IPADDR_LEN];
uint16_t options = 0;
sess = cls_sess->dd_data;
ddb_entry = sess->dd_data;
conn = cls_conn->dd_data;
ddb_entry->chap_tbl_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx);
conn->max_recv_dlength = BYTE_UNITS *
le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len);
conn->max_xmit_dlength = BYTE_UNITS *
le16_to_cpu(fw_ddb_entry->iscsi_max_snd_data_seg_len);
sess->initial_r2t_en =
(BIT_10 & le16_to_cpu(fw_ddb_entry->iscsi_options));
sess->max_r2t = le16_to_cpu(fw_ddb_entry->iscsi_max_outsnd_r2t);
sess->imm_data_en = (BIT_11 & le16_to_cpu(fw_ddb_entry->iscsi_options));
sess->first_burst = BYTE_UNITS *
le16_to_cpu(fw_ddb_entry->iscsi_first_burst_len);
sess->max_burst = BYTE_UNITS *
le16_to_cpu(fw_ddb_entry->iscsi_max_burst_len);
sess->time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
sess->time2retain = le16_to_cpu(fw_ddb_entry->iscsi_def_time2retain);
conn->persistent_port = le16_to_cpu(fw_ddb_entry->port);
sess->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp);
options = le16_to_cpu(fw_ddb_entry->options);
if (options & DDB_OPT_IPV6_DEVICE)
sprintf(ip_addr, "%pI6", fw_ddb_entry->ip_addr);
else
sprintf(ip_addr, "%pI4", fw_ddb_entry->ip_addr);
iscsi_set_param(cls_conn, ISCSI_PARAM_TARGET_NAME,
(char *)fw_ddb_entry->iscsi_name, buflen);
iscsi_set_param(cls_conn, ISCSI_PARAM_INITIATOR_NAME,
(char *)ha->name_string, buflen);
iscsi_set_param(cls_conn, ISCSI_PARAM_PERSISTENT_ADDRESS,
(char *)ip_addr, buflen);
iscsi_set_param(cls_conn, ISCSI_PARAM_TARGET_ALIAS,
(char *)fw_ddb_entry->iscsi_alias, buflen);
}
void qla4xxx_update_session_conn_fwddb_param(struct scsi_qla_host *ha,
struct ddb_entry *ddb_entry)
{
struct iscsi_cls_session *cls_sess;
struct iscsi_cls_conn *cls_conn;
uint32_t ddb_state;
dma_addr_t fw_ddb_entry_dma;
struct dev_db_entry *fw_ddb_entry;
fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
&fw_ddb_entry_dma, GFP_KERNEL);
if (!fw_ddb_entry) {
ql4_printk(KERN_ERR, ha,
"%s: Unable to allocate dma buffer\n", __func__);
goto exit_session_conn_fwddb_param;
}
if (qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, fw_ddb_entry,
fw_ddb_entry_dma, NULL, NULL, &ddb_state,
NULL, NULL, NULL) == QLA_ERROR) {
DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed "
"get_ddb_entry for fw_ddb_index %d\n",
ha->host_no, __func__,
ddb_entry->fw_ddb_index));
goto exit_session_conn_fwddb_param;
}
cls_sess = ddb_entry->sess;
cls_conn = ddb_entry->conn;
/* Update params */
qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess, cls_conn);
exit_session_conn_fwddb_param:
if (fw_ddb_entry)
dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
fw_ddb_entry, fw_ddb_entry_dma);
}
void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
struct ddb_entry *ddb_entry)
{
struct iscsi_cls_session *cls_sess;
struct iscsi_cls_conn *cls_conn;
struct iscsi_session *sess;
struct iscsi_conn *conn;
uint32_t ddb_state;
dma_addr_t fw_ddb_entry_dma;
struct dev_db_entry *fw_ddb_entry;
fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
&fw_ddb_entry_dma, GFP_KERNEL);
if (!fw_ddb_entry) {
ql4_printk(KERN_ERR, ha,
"%s: Unable to allocate dma buffer\n", __func__);
goto exit_session_conn_param;
}
if (qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, fw_ddb_entry,
fw_ddb_entry_dma, NULL, NULL, &ddb_state,
NULL, NULL, NULL) == QLA_ERROR) {
DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed "
"get_ddb_entry for fw_ddb_index %d\n",
ha->host_no, __func__,
ddb_entry->fw_ddb_index));
goto exit_session_conn_param;
}
cls_sess = ddb_entry->sess;
sess = cls_sess->dd_data;
cls_conn = ddb_entry->conn;
conn = cls_conn->dd_data;
/* Update timers after login */
ddb_entry->default_relogin_timeout =
(le16_to_cpu(fw_ddb_entry->def_timeout) > LOGIN_TOV) &&
(le16_to_cpu(fw_ddb_entry->def_timeout) < LOGIN_TOV * 10) ?
le16_to_cpu(fw_ddb_entry->def_timeout) : LOGIN_TOV;
ddb_entry->default_time2wait =
le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
/* Update params */
ddb_entry->chap_tbl_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx);
conn->max_recv_dlength = BYTE_UNITS *
le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len);
conn->max_xmit_dlength = BYTE_UNITS *
le16_to_cpu(fw_ddb_entry->iscsi_max_snd_data_seg_len);
sess->initial_r2t_en =
(BIT_10 & le16_to_cpu(fw_ddb_entry->iscsi_options));
sess->max_r2t = le16_to_cpu(fw_ddb_entry->iscsi_max_outsnd_r2t);
sess->imm_data_en = (BIT_11 & le16_to_cpu(fw_ddb_entry->iscsi_options));
sess->first_burst = BYTE_UNITS *
le16_to_cpu(fw_ddb_entry->iscsi_first_burst_len);
sess->max_burst = BYTE_UNITS *
le16_to_cpu(fw_ddb_entry->iscsi_max_burst_len);
sess->time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
sess->time2retain = le16_to_cpu(fw_ddb_entry->iscsi_def_time2retain);
sess->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp);
memcpy(sess->initiatorname, ha->name_string,
min(sizeof(ha->name_string), sizeof(sess->initiatorname)));
iscsi_set_param(cls_conn, ISCSI_PARAM_TARGET_ALIAS,
(char *)fw_ddb_entry->iscsi_alias, 0);
exit_session_conn_param:
if (fw_ddb_entry)
dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
fw_ddb_entry, fw_ddb_entry_dma);
}
/*
* Timer routines
*/
static void qla4xxx_start_timer(struct scsi_qla_host *ha, void *func,
unsigned long interval)
{
DEBUG(printk("scsi: %s: Starting timer thread for adapter %d\n",
__func__, ha->host->host_no));
init_timer(&ha->timer);
ha->timer.expires = jiffies + interval * HZ;
ha->timer.data = (unsigned long)ha;
ha->timer.function = (void (*)(unsigned long))func;
add_timer(&ha->timer);
ha->timer_active = 1;
}
static void qla4xxx_stop_timer(struct scsi_qla_host *ha)
{
del_timer_sync(&ha->timer);
ha->timer_active = 0;
}
/***
* qla4xxx_mark_device_missing - blocks the session
* @cls_session: Pointer to the session to be blocked
* @ddb_entry: Pointer to device database entry
*
* This routine marks a device missing and close connection.
**/
void qla4xxx_mark_device_missing(struct iscsi_cls_session *cls_session)
{
iscsi_block_session(cls_session);
}
/**
* qla4xxx_mark_all_devices_missing - mark all devices as missing.
* @ha: Pointer to host adapter structure.
*
* This routine marks a device missing and resets the relogin retry count.
**/
void qla4xxx_mark_all_devices_missing(struct scsi_qla_host *ha)
{
iscsi_host_for_each_session(ha->host, qla4xxx_mark_device_missing);
}
static struct srb* qla4xxx_get_new_srb(struct scsi_qla_host *ha,
struct ddb_entry *ddb_entry,
struct scsi_cmnd *cmd)
{
struct srb *srb;
srb = mempool_alloc(ha->srb_mempool, GFP_ATOMIC);
if (!srb)
return srb;
kref_init(&srb->srb_ref);
srb->ha = ha;
srb->ddb = ddb_entry;
srb->cmd = cmd;
srb->flags = 0;
CMD_SP(cmd) = (void *)srb;
return srb;
}
static void qla4xxx_srb_free_dma(struct scsi_qla_host *ha, struct srb *srb)
{
struct scsi_cmnd *cmd = srb->cmd;
if (srb->flags & SRB_DMA_VALID) {
scsi_dma_unmap(cmd);
srb->flags &= ~SRB_DMA_VALID;
}
CMD_SP(cmd) = NULL;
}
void qla4xxx_srb_compl(struct kref *ref)
{
struct srb *srb = container_of(ref, struct srb, srb_ref);
struct scsi_cmnd *cmd = srb->cmd;
struct scsi_qla_host *ha = srb->ha;
qla4xxx_srb_free_dma(ha, srb);
mempool_free(srb, ha->srb_mempool);
cmd->scsi_done(cmd);
}
/**
* qla4xxx_queuecommand - scsi layer issues scsi command to driver.
* @host: scsi host
* @cmd: Pointer to Linux's SCSI command structure
*
* Remarks:
* This routine is invoked by Linux to send a SCSI command to the driver.
* The mid-level driver tries to ensure that queuecommand never gets
* invoked concurrently with itself or the interrupt handler (although
* the interrupt handler may call this routine as part of request-
* completion handling). Unfortunely, it sometimes calls the scheduler
* in interrupt context which is a big NO! NO!.
**/
static int qla4xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
{
struct scsi_qla_host *ha = to_qla_host(host);
struct ddb_entry *ddb_entry = cmd->device->hostdata;
struct iscsi_cls_session *sess = ddb_entry->sess;
struct srb *srb;
int rval;
if (test_bit(AF_EEH_BUSY, &ha->flags)) {
if (test_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags))
cmd->result = DID_NO_CONNECT << 16;
else
cmd->result = DID_REQUEUE << 16;
goto qc_fail_command;
}
if (!sess) {
cmd->result = DID_IMM_RETRY << 16;
goto qc_fail_command;
}
rval = iscsi_session_chkready(sess);
if (rval) {
cmd->result = rval;
goto qc_fail_command;
}
if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) ||
test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags) ||
test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) ||
!test_bit(AF_ONLINE, &ha->flags) ||
!test_bit(AF_LINK_UP, &ha->flags) ||
test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags))
goto qc_host_busy;
srb = qla4xxx_get_new_srb(ha, ddb_entry, cmd);
if (!srb)
goto qc_host_busy;
rval = qla4xxx_send_command_to_isp(ha, srb);
if (rval != QLA_SUCCESS)
goto qc_host_busy_free_sp;
return 0;
qc_host_busy_free_sp:
qla4xxx_srb_free_dma(ha, srb);
mempool_free(srb, ha->srb_mempool);
qc_host_busy:
return SCSI_MLQUEUE_HOST_BUSY;
qc_fail_command:
cmd->scsi_done(cmd);
return 0;
}
/**
* qla4xxx_mem_free - frees memory allocated to adapter
* @ha: Pointer to host adapter structure.
*
* Frees memory previously allocated by qla4xxx_mem_alloc
**/
static void qla4xxx_mem_free(struct scsi_qla_host *ha)
{
if (ha->queues)
dma_free_coherent(&ha->pdev->dev, ha->queues_len, ha->queues,
ha->queues_dma);
ha->queues_len = 0;
ha->queues = NULL;
ha->queues_dma = 0;
ha->request_ring = NULL;
ha->request_dma = 0;
ha->response_ring = NULL;
ha->response_dma = 0;
ha->shadow_regs = NULL;
ha->shadow_regs_dma = 0;
/* Free srb pool. */
if (ha->srb_mempool)
mempool_destroy(ha->srb_mempool);
ha->srb_mempool = NULL;
if (ha->chap_dma_pool)
dma_pool_destroy(ha->chap_dma_pool);
if (ha->chap_list)
vfree(ha->chap_list);
ha->chap_list = NULL;
if (ha->fw_ddb_dma_pool)
dma_pool_destroy(ha->fw_ddb_dma_pool);
/* release io space registers */
if (is_qla8022(ha)) {
if (ha->nx_pcibase)
iounmap(
(struct device_reg_82xx __iomem *)ha->nx_pcibase);
} else if (ha->reg)
iounmap(ha->reg);
pci_release_regions(ha->pdev);
}
/**
* qla4xxx_mem_alloc - allocates memory for use by adapter.
* @ha: Pointer to host adapter structure
*
* Allocates DMA memory for request and response queues. Also allocates memory
* for srbs.
**/
static int qla4xxx_mem_alloc(struct scsi_qla_host *ha)
{
unsigned long align;
/* Allocate contiguous block of DMA memory for queues. */
ha->queues_len = ((REQUEST_QUEUE_DEPTH * QUEUE_SIZE) +
(RESPONSE_QUEUE_DEPTH * QUEUE_SIZE) +
sizeof(struct shadow_regs) +
MEM_ALIGN_VALUE +
(PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
ha->queues = dma_alloc_coherent(&ha->pdev->dev, ha->queues_len,
&ha->queues_dma, GFP_KERNEL);
if (ha->queues == NULL) {
ql4_printk(KERN_WARNING, ha,
"Memory Allocation failed - queues.\n");
goto mem_alloc_error_exit;
}
memset(ha->queues, 0, ha->queues_len);
/*
* As per RISC alignment requirements -- the bus-address must be a
* multiple of the request-ring size (in bytes).
*/
align = 0;
if ((unsigned long)ha->queues_dma & (MEM_ALIGN_VALUE - 1))
align = MEM_ALIGN_VALUE - ((unsigned long)ha->queues_dma &
(MEM_ALIGN_VALUE - 1));
/* Update request and response queue pointers. */
ha->request_dma = ha->queues_dma + align;
ha->request_ring = (struct queue_entry *) (ha->queues + align);
ha->response_dma = ha->queues_dma + align +
(REQUEST_QUEUE_DEPTH * QUEUE_SIZE);
ha->response_ring = (struct queue_entry *) (ha->queues + align +
(REQUEST_QUEUE_DEPTH *
QUEUE_SIZE));
ha->shadow_regs_dma = ha->queues_dma + align +
(REQUEST_QUEUE_DEPTH * QUEUE_SIZE) +
(RESPONSE_QUEUE_DEPTH * QUEUE_SIZE);
ha->shadow_regs = (struct shadow_regs *) (ha->queues + align +
(REQUEST_QUEUE_DEPTH *
QUEUE_SIZE) +
(RESPONSE_QUEUE_DEPTH *
QUEUE_SIZE));
/* Allocate memory for srb pool. */
ha->srb_mempool = mempool_create(SRB_MIN_REQ, mempool_alloc_slab,
mempool_free_slab, srb_cachep);
if (ha->srb_mempool == NULL) {
ql4_printk(KERN_WARNING, ha,
"Memory Allocation failed - SRB Pool.\n");
goto mem_alloc_error_exit;
}
ha->chap_dma_pool = dma_pool_create("ql4_chap", &ha->pdev->dev,
CHAP_DMA_BLOCK_SIZE, 8, 0);
if (ha->chap_dma_pool == NULL) {
ql4_printk(KERN_WARNING, ha,
"%s: chap_dma_pool allocation failed..\n", __func__);
goto mem_alloc_error_exit;
}
ha->fw_ddb_dma_pool = dma_pool_create("ql4_fw_ddb", &ha->pdev->dev,
DDB_DMA_BLOCK_SIZE, 8, 0);
if (ha->fw_ddb_dma_pool == NULL) {
ql4_printk(KERN_WARNING, ha,
"%s: fw_ddb_dma_pool allocation failed..\n",
__func__);
goto mem_alloc_error_exit;
}
return QLA_SUCCESS;
mem_alloc_error_exit:
qla4xxx_mem_free(ha);
return QLA_ERROR;
}
/**
* qla4_8xxx_check_temp - Check the ISP82XX temperature.
* @ha: adapter block pointer.
*
* Note: The caller should not hold the idc lock.
**/
static int qla4_8xxx_check_temp(struct scsi_qla_host *ha)
{
uint32_t temp, temp_state, temp_val;
int status = QLA_SUCCESS;
temp = qla4_8xxx_rd_32(ha, CRB_TEMP_STATE);
temp_state = qla82xx_get_temp_state(temp);
temp_val = qla82xx_get_temp_val(temp);
if (temp_state == QLA82XX_TEMP_PANIC) {
ql4_printk(KERN_WARNING, ha, "Device temperature %d degrees C"
" exceeds maximum allowed. Hardware has been shut"
" down.\n", temp_val);
status = QLA_ERROR;
} else if (temp_state == QLA82XX_TEMP_WARN) {
if (ha->temperature == QLA82XX_TEMP_NORMAL)
ql4_printk(KERN_WARNING, ha, "Device temperature %d"
" degrees C exceeds operating range."
" Immediate action needed.\n", temp_val);
} else {
if (ha->temperature == QLA82XX_TEMP_WARN)
ql4_printk(KERN_INFO, ha, "Device temperature is"
" now %d degrees C in normal range.\n",
temp_val);
}
ha->temperature = temp_state;
return status;
}
/**
* qla4_8xxx_check_fw_alive - Check firmware health
* @ha: Pointer to host adapter structure.
*
* Context: Interrupt
**/
static int qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha)
{
uint32_t fw_heartbeat_counter;
int status = QLA_SUCCESS;
fw_heartbeat_counter = qla4_8xxx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
/* If PEG_ALIVE_COUNTER is 0xffffffff, AER/EEH is in progress, ignore */
if (fw_heartbeat_counter == 0xffffffff) {
DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Device in frozen "
"state, QLA82XX_PEG_ALIVE_COUNTER is 0xffffffff\n",
ha->host_no, __func__));
return status;
}
if (ha->fw_heartbeat_counter == fw_heartbeat_counter) {
ha->seconds_since_last_heartbeat++;
/* FW not alive after 2 seconds */
if (ha->seconds_since_last_heartbeat == 2) {
ha->seconds_since_last_heartbeat = 0;
ql4_printk(KERN_INFO, ha,
"scsi(%ld): %s, Dumping hw/fw registers:\n "
" PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2:"
" 0x%x,\n PEG_NET_0_PC: 0x%x, PEG_NET_1_PC:"
" 0x%x,\n PEG_NET_2_PC: 0x%x, PEG_NET_3_PC:"
" 0x%x,\n PEG_NET_4_PC: 0x%x\n",
ha->host_no, __func__,
qla4_8xxx_rd_32(ha,
QLA82XX_PEG_HALT_STATUS1),
qla4_8xxx_rd_32(ha,
QLA82XX_PEG_HALT_STATUS2),
qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_0 +
0x3c),
qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_1 +
0x3c),
qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_2 +
0x3c),
qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_3 +
0x3c),
qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_4 +
0x3c));
status = QLA_ERROR;
}
} else
ha->seconds_since_last_heartbeat = 0;
ha->fw_heartbeat_counter = fw_heartbeat_counter;
return status;
}
/**
* qla4_8xxx_watchdog - Poll dev state
* @ha: Pointer to host adapter structure.
*
* Context: Interrupt
**/
void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
{
uint32_t dev_state, halt_status;
/* don't poll if reset is going on */
if (!(test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) ||
test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags))) {
dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
if (qla4_8xxx_check_temp(ha)) {
ql4_printk(KERN_INFO, ha, "disabling pause"
" transmit on port 0 & 1.\n");
qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
CRB_NIU_XG_PAUSE_CTL_P0 |
CRB_NIU_XG_PAUSE_CTL_P1);
set_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags);
qla4xxx_wake_dpc(ha);
} else if (dev_state == QLA82XX_DEV_NEED_RESET &&
!test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
if (!ql4xdontresethba) {
ql4_printk(KERN_INFO, ha, "%s: HW State: "
"NEED RESET!\n", __func__);
set_bit(DPC_RESET_HA, &ha->dpc_flags);
qla4xxx_wake_dpc(ha);
}
} else if (dev_state == QLA82XX_DEV_NEED_QUIESCENT &&
!test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) {
ql4_printk(KERN_INFO, ha, "%s: HW State: NEED QUIES!\n",
__func__);
set_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags);
qla4xxx_wake_dpc(ha);
} else {
/* Check firmware health */
if (qla4_8xxx_check_fw_alive(ha)) {
ql4_printk(KERN_INFO, ha, "disabling pause"
" transmit on port 0 & 1.\n");
qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
CRB_NIU_XG_PAUSE_CTL_P0 |
CRB_NIU_XG_PAUSE_CTL_P1);
halt_status = qla4_8xxx_rd_32(ha,
QLA82XX_PEG_HALT_STATUS1);
if (QLA82XX_FWERROR_CODE(halt_status) == 0x67)
ql4_printk(KERN_ERR, ha, "%s:"
" Firmware aborted with"
" error code 0x00006700."
" Device is being reset\n",
__func__);
/* Since we cannot change dev_state in interrupt
* context, set appropriate DPC flag then wakeup
* DPC */
if (halt_status & HALT_STATUS_UNRECOVERABLE)
set_bit(DPC_HA_UNRECOVERABLE,
&ha->dpc_flags);
else {
ql4_printk(KERN_INFO, ha, "%s: detect "
"abort needed!\n", __func__);
set_bit(DPC_RESET_HA, &ha->dpc_flags);
}
qla4xxx_mailbox_premature_completion(ha);
qla4xxx_wake_dpc(ha);
}
}
}
}
static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
{
struct iscsi_session *sess;
struct ddb_entry *ddb_entry;
struct scsi_qla_host *ha;
sess = cls_sess->dd_data;
ddb_entry = sess->dd_data;
ha = ddb_entry->ha;
if (!(ddb_entry->ddb_type == FLASH_DDB))
return;
if (adapter_up(ha) && !test_bit(DF_RELOGIN, &ddb_entry->flags) &&
!iscsi_is_session_online(cls_sess)) {
if (atomic_read(&ddb_entry->retry_relogin_timer) !=
INVALID_ENTRY) {
if (atomic_read(&ddb_entry->retry_relogin_timer) ==
0) {
atomic_set(&ddb_entry->retry_relogin_timer,
INVALID_ENTRY);
set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
set_bit(DF_RELOGIN, &ddb_entry->flags);
DEBUG2(ql4_printk(KERN_INFO, ha,
"%s: index [%d] login device\n",
__func__, ddb_entry->fw_ddb_index));
} else
atomic_dec(&ddb_entry->retry_relogin_timer);
}
}
/* Wait for relogin to timeout */
if (atomic_read(&ddb_entry->relogin_timer) &&
(atomic_dec_and_test(&ddb_entry->relogin_timer) != 0)) {
/*
* If the relogin times out and the device is
* still NOT ONLINE then try and relogin again.
*/
if (!iscsi_is_session_online(cls_sess)) {
/* Reset retry relogin timer */
atomic_inc(&ddb_entry->relogin_retry_count);
DEBUG2(ql4_printk(KERN_INFO, ha,
"%s: index[%d] relogin timed out-retrying"
" relogin (%d), retry (%d)\n", __func__,
ddb_entry->fw_ddb_index,
atomic_read(&ddb_entry->relogin_retry_count),
ddb_entry->default_time2wait + 4));
set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
atomic_set(&ddb_entry->retry_relogin_timer,
ddb_entry->default_time2wait + 4);
}
}
}
/**
* qla4xxx_timer - checks every second for work to do.
* @ha: Pointer to host adapter structure.
**/
static void qla4xxx_timer(struct scsi_qla_host *ha)
{
int start_dpc = 0;
uint16_t w;
iscsi_host_for_each_session(ha->host, qla4xxx_check_relogin_flash_ddb);
/* If we are in the middle of AER/EEH processing
* skip any processing and reschedule the timer
*/
if (test_bit(AF_EEH_BUSY, &ha->flags)) {
mod_timer(&ha->timer, jiffies + HZ);
return;
}
/* Hardware read to trigger an EEH error during mailbox waits. */
if (!pci_channel_offline(ha->pdev))
pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
if (is_qla8022(ha)) {
qla4_8xxx_watchdog(ha);
}
if (!is_qla8022(ha)) {
/* Check for heartbeat interval. */
if (ha->firmware_options & FWOPT_HEARTBEAT_ENABLE &&
ha->heartbeat_interval != 0) {
ha->seconds_since_last_heartbeat++;
if (ha->seconds_since_last_heartbeat >
ha->heartbeat_interval + 2)
set_bit(DPC_RESET_HA, &ha->dpc_flags);
}
}
/* Process any deferred work. */
if (!list_empty(&ha->work_list))
start_dpc++;
/* Wakeup the dpc routine for this adapter, if needed. */
if (start_dpc ||
test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags) ||
test_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags) ||
test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags) ||
test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
test_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags) ||
test_bit(DPC_LINK_CHANGED, &ha->dpc_flags) ||
test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags) ||
test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) ||
test_bit(DPC_AEN, &ha->dpc_flags)) {
DEBUG2(printk("scsi%ld: %s: scheduling dpc routine"
" - dpc flags = 0x%lx\n",
ha->host_no, __func__, ha->dpc_flags));
qla4xxx_wake_dpc(ha);
}
/* Reschedule timer thread to call us back in one second */
mod_timer(&ha->timer, jiffies + HZ);
DEBUG2(ha->seconds_since_last_intr++);
}
/**
* qla4xxx_cmd_wait - waits for all outstanding commands to complete
* @ha: Pointer to host adapter structure.
*
* This routine stalls the driver until all outstanding commands are returned.
* Caller must release the Hardware Lock prior to calling this routine.
**/
static int qla4xxx_cmd_wait(struct scsi_qla_host *ha)
{
uint32_t index = 0;
unsigned long flags;
struct scsi_cmnd *cmd;
unsigned long wtime = jiffies + (WAIT_CMD_TOV * HZ);
DEBUG2(ql4_printk(KERN_INFO, ha, "Wait up to %d seconds for cmds to "
"complete\n", WAIT_CMD_TOV));
while (!time_after_eq(jiffies, wtime)) {
spin_lock_irqsave(&ha->hardware_lock, flags);
/* Find a command that hasn't completed. */
for (index = 0; index < ha->host->can_queue; index++) {
cmd = scsi_host_find_tag(ha->host, index);
/*
* We cannot just check if the index is valid,
* becase if we are run from the scsi eh, then
* the scsi/block layer is going to prevent
* the tag from being released.
*/
if (cmd != NULL && CMD_SP(cmd))
break;
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
/* If No Commands are pending, wait is complete */
if (index == ha->host->can_queue)
return QLA_SUCCESS;
msleep(1000);
}
/* If we timed out on waiting for commands to come back
* return ERROR. */
return QLA_ERROR;
}
int qla4xxx_hw_reset(struct scsi_qla_host *ha)
{
uint32_t ctrl_status;
unsigned long flags = 0;
DEBUG2(printk(KERN_ERR "scsi%ld: %s\n", ha->host_no, __func__));
if (ql4xxx_lock_drvr_wait(ha) != QLA_SUCCESS)
return QLA_ERROR;
spin_lock_irqsave(&ha->hardware_lock, flags);
/*
* If the SCSI Reset Interrupt bit is set, clear it.
* Otherwise, the Soft Reset won't work.
*/
ctrl_status = readw(&ha->reg->ctrl_status);
if ((ctrl_status & CSR_SCSI_RESET_INTR) != 0)
writel(set_rmask(CSR_SCSI_RESET_INTR), &ha->reg->ctrl_status);
/* Issue Soft Reset */
writel(set_rmask(CSR_SOFT_RESET), &ha->reg->ctrl_status);
readl(&ha->reg->ctrl_status);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
return QLA_SUCCESS;
}
/**
* qla4xxx_soft_reset - performs soft reset.
* @ha: Pointer to host adapter structure.
**/
int qla4xxx_soft_reset(struct scsi_qla_host *ha)
{
uint32_t max_wait_time;
unsigned long flags = 0;
int status;
uint32_t ctrl_status;
status = qla4xxx_hw_reset(ha);
if (status != QLA_SUCCESS)
return status;
status = QLA_ERROR;
/* Wait until the Network Reset Intr bit is cleared */
max_wait_time = RESET_INTR_TOV;
do {
spin_lock_irqsave(&ha->hardware_lock, flags);
ctrl_status = readw(&ha->reg->ctrl_status);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
if ((ctrl_status & CSR_NET_RESET_INTR) == 0)
break;
msleep(1000);
} while ((--max_wait_time));
if ((ctrl_status & CSR_NET_RESET_INTR) != 0) {
DEBUG2(printk(KERN_WARNING
"scsi%ld: Network Reset Intr not cleared by "
"Network function, clearing it now!\n",
ha->host_no));
spin_lock_irqsave(&ha->hardware_lock, flags);
writel(set_rmask(CSR_NET_RESET_INTR), &ha->reg->ctrl_status);
readl(&ha->reg->ctrl_status);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
/* Wait until the firmware tells us the Soft Reset is done */
max_wait_time = SOFT_RESET_TOV;
do {
spin_lock_irqsave(&ha->hardware_lock, flags);
ctrl_status = readw(&ha->reg->ctrl_status);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
if ((ctrl_status & CSR_SOFT_RESET) == 0) {
status = QLA_SUCCESS;
break;
}
msleep(1000);
} while ((--max_wait_time));
/*
* Also, make sure that the SCSI Reset Interrupt bit has been cleared
* after the soft reset has taken place.
*/
spin_lock_irqsave(&ha->hardware_lock, flags);
ctrl_status = readw(&ha->reg->ctrl_status);
if ((ctrl_status & CSR_SCSI_RESET_INTR) != 0) {
writel(set_rmask(CSR_SCSI_RESET_INTR), &ha->reg->ctrl_status);
readl(&ha->reg->ctrl_status);
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
/* If soft reset fails then most probably the bios on other
* function is also enabled.
* Since the initialization is sequential the other fn
* wont be able to acknowledge the soft reset.
* Issue a force soft reset to workaround this scenario.
*/
if (max_wait_time == 0) {
/* Issue Force Soft Reset */
spin_lock_irqsave(&ha->hardware_lock, flags);
writel(set_rmask(CSR_FORCE_SOFT_RESET), &ha->reg->ctrl_status);
readl(&ha->reg->ctrl_status);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
/* Wait until the firmware tells us the Soft Reset is done */
max_wait_time = SOFT_RESET_TOV;
do {
spin_lock_irqsave(&ha->hardware_lock, flags);
ctrl_status = readw(&ha->reg->ctrl_status);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
if ((ctrl_status & CSR_FORCE_SOFT_RESET) == 0) {
status = QLA_SUCCESS;
break;
}
msleep(1000);
} while ((--max_wait_time));
}
return status;
}
/**
* qla4xxx_abort_active_cmds - returns all outstanding i/o requests to O.S.
* @ha: Pointer to host adapter structure.
* @res: returned scsi status
*
* This routine is called just prior to a HARD RESET to return all
* outstanding commands back to the Operating System.
* Caller should make sure that the following locks are released
* before this calling routine: Hardware lock, and io_request_lock.
**/
static void qla4xxx_abort_active_cmds(struct scsi_qla_host *ha, int res)
{
struct srb *srb;
int i;
unsigned long flags;
spin_lock_irqsave(&ha->hardware_lock, flags);
for (i = 0; i < ha->host->can_queue; i++) {
srb = qla4xxx_del_from_active_array(ha, i);
if (srb != NULL) {
srb->cmd->result = res;
kref_put(&srb->srb_ref, qla4xxx_srb_compl);
}
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
void qla4xxx_dead_adapter_cleanup(struct scsi_qla_host *ha)
{
clear_bit(AF_ONLINE, &ha->flags);
/* Disable the board */
ql4_printk(KERN_INFO, ha, "Disabling the board\n");
qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16);
qla4xxx_mark_all_devices_missing(ha);
clear_bit(AF_INIT_DONE, &ha->flags);
}
static void qla4xxx_fail_session(struct iscsi_cls_session *cls_session)
{
struct iscsi_session *sess;
struct ddb_entry *ddb_entry;
sess = cls_session->dd_data;
ddb_entry = sess->dd_data;
ddb_entry->fw_ddb_device_state = DDB_DS_SESSION_FAILED;
if (ddb_entry->ddb_type == FLASH_DDB)
iscsi_block_session(ddb_entry->sess);
else
iscsi_session_failure(cls_session->dd_data,
ISCSI_ERR_CONN_FAILED);
}
/**
* qla4xxx_recover_adapter - recovers adapter after a fatal error
* @ha: Pointer to host adapter structure.
**/
static int qla4xxx_recover_adapter(struct scsi_qla_host *ha)
{
int status = QLA_ERROR;
uint8_t reset_chip = 0;
uint32_t dev_state;
unsigned long wait;
/* Stall incoming I/O until we are done */
scsi_block_requests(ha->host);
clear_bit(AF_ONLINE, &ha->flags);
clear_bit(AF_LINK_UP, &ha->flags);
DEBUG2(ql4_printk(KERN_INFO, ha, "%s: adapter OFFLINE\n", __func__));
set_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
iscsi_host_for_each_session(ha->host, qla4xxx_fail_session);
if (test_bit(DPC_RESET_HA, &ha->dpc_flags))
reset_chip = 1;
/* For the DPC_RESET_HA_INTR case (ISP-4xxx specific)
* do not reset adapter, jump to initialize_adapter */
if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) {
status = QLA_SUCCESS;
goto recover_ha_init_adapter;
}
/* For the ISP-82xx adapter, issue a stop_firmware if invoked
* from eh_host_reset or ioctl module */
if (is_qla8022(ha) && !reset_chip &&
test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags)) {
DEBUG2(ql4_printk(KERN_INFO, ha,
"scsi%ld: %s - Performing stop_firmware...\n",
ha->host_no, __func__));
status = ha->isp_ops->reset_firmware(ha);
if (status == QLA_SUCCESS) {
if (!test_bit(AF_FW_RECOVERY, &ha->flags))
qla4xxx_cmd_wait(ha);
ha->isp_ops->disable_intrs(ha);
qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
} else {
/* If the stop_firmware fails then
* reset the entire chip */
reset_chip = 1;
clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
set_bit(DPC_RESET_HA, &ha->dpc_flags);
}
}
/* Issue full chip reset if recovering from a catastrophic error,
* or if stop_firmware fails for ISP-82xx.
* This is the default case for ISP-4xxx */
if (!is_qla8022(ha) || reset_chip) {
if (!is_qla8022(ha))
goto chip_reset;
/* Check if 82XX firmware is alive or not
* We may have arrived here from NEED_RESET
* detection only */
if (test_bit(AF_FW_RECOVERY, &ha->flags))
goto chip_reset;
wait = jiffies + (FW_ALIVE_WAIT_TOV * HZ);
while (time_before(jiffies, wait)) {
if (qla4_8xxx_check_fw_alive(ha)) {
qla4xxx_mailbox_premature_completion(ha);
break;
}
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(HZ);
}
if (!test_bit(AF_FW_RECOVERY, &ha->flags))
qla4xxx_cmd_wait(ha);
chip_reset:
qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
DEBUG2(ql4_printk(KERN_INFO, ha,
"scsi%ld: %s - Performing chip reset..\n",
ha->host_no, __func__));
status = ha->isp_ops->reset_chip(ha);
}
/* Flush any pending ddb changed AENs */
qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
recover_ha_init_adapter:
/* Upon successful firmware/chip reset, re-initialize the adapter */
if (status == QLA_SUCCESS) {
/* For ISP-4xxx, force function 1 to always initialize
* before function 3 to prevent both funcions from
* stepping on top of the other */
if (!is_qla8022(ha) && (ha->mac_index == 3))
ssleep(6);
/* NOTE: AF_ONLINE flag set upon successful completion of
* qla4xxx_initialize_adapter */
status = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
}
/* Retry failed adapter initialization, if necessary
* Do not retry initialize_adapter for RESET_HA_INTR (ISP-4xxx specific)
* case to prevent ping-pong resets between functions */
if (!test_bit(AF_ONLINE, &ha->flags) &&
!test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) {
/* Adapter initialization failed, see if we can retry
* resetting the ha.
* Since we don't want to block the DPC for too long
* with multiple resets in the same thread,
* utilize DPC to retry */
if (is_qla8022(ha)) {
qla4_8xxx_idc_lock(ha);
dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
qla4_8xxx_idc_unlock(ha);
if (dev_state == QLA82XX_DEV_FAILED) {
ql4_printk(KERN_INFO, ha, "%s: don't retry "
"recover adapter. H/W is in Failed "
"state\n", __func__);
qla4xxx_dead_adapter_cleanup(ha);
clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
clear_bit(DPC_RESET_HA, &ha->dpc_flags);
clear_bit(DPC_RESET_HA_FW_CONTEXT,
&ha->dpc_flags);
status = QLA_ERROR;
goto exit_recover;
}
}
if (!test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags)) {
ha->retry_reset_ha_cnt = MAX_RESET_HA_RETRIES;
DEBUG2(printk("scsi%ld: recover adapter - retrying "
"(%d) more times\n", ha->host_no,
ha->retry_reset_ha_cnt));
set_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
status = QLA_ERROR;
} else {
if (ha->retry_reset_ha_cnt > 0) {
/* Schedule another Reset HA--DPC will retry */
ha->retry_reset_ha_cnt--;
DEBUG2(printk("scsi%ld: recover adapter - "
"retry remaining %d\n",
ha->host_no,
ha->retry_reset_ha_cnt));
status = QLA_ERROR;
}
if (ha->retry_reset_ha_cnt == 0) {
/* Recover adapter retries have been exhausted.
* Adapter DEAD */
DEBUG2(printk("scsi%ld: recover adapter "
"failed - board disabled\n",
ha->host_no));
qla4xxx_dead_adapter_cleanup(ha);
clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
clear_bit(DPC_RESET_HA, &ha->dpc_flags);
clear_bit(DPC_RESET_HA_FW_CONTEXT,
&ha->dpc_flags);
status = QLA_ERROR;
}
}
} else {
clear_bit(DPC_RESET_HA, &ha->dpc_flags);
clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
}
exit_recover:
ha->adapter_error_count++;
if (test_bit(AF_ONLINE, &ha->flags))
ha->isp_ops->enable_intrs(ha);
scsi_unblock_requests(ha->host);
clear_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
DEBUG2(printk("scsi%ld: recover adapter: %s\n", ha->host_no,
status == QLA_ERROR ? "FAILED" : "SUCCEEDED"));
return status;
}
static void qla4xxx_relogin_devices(struct iscsi_cls_session *cls_session)
{
struct iscsi_session *sess;
struct ddb_entry *ddb_entry;
struct scsi_qla_host *ha;
sess = cls_session->dd_data;
ddb_entry = sess->dd_data;
ha = ddb_entry->ha;
if (!iscsi_is_session_online(cls_session)) {
if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) {
ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
" unblock session\n", ha->host_no, __func__,
ddb_entry->fw_ddb_index);
iscsi_unblock_session(ddb_entry->sess);
} else {
/* Trigger relogin */
if (ddb_entry->ddb_type == FLASH_DDB) {
if (!test_bit(DF_RELOGIN, &ddb_entry->flags))
qla4xxx_arm_relogin_timer(ddb_entry);
} else
iscsi_session_failure(cls_session->dd_data,
ISCSI_ERR_CONN_FAILED);
}
}
}
int qla4xxx_unblock_flash_ddb(struct iscsi_cls_session *cls_session)
{
struct iscsi_session *sess;
struct ddb_entry *ddb_entry;
struct scsi_qla_host *ha;
sess = cls_session->dd_data;
ddb_entry = sess->dd_data;
ha = ddb_entry->ha;
ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
" unblock session\n", ha->host_no, __func__,
ddb_entry->fw_ddb_index);
iscsi_unblock_session(ddb_entry->sess);
/* Start scan target */
if (test_bit(AF_ONLINE, &ha->flags)) {
ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
" start scan\n", ha->host_no, __func__,
ddb_entry->fw_ddb_index);
scsi_queue_work(ha->host, &ddb_entry->sess->scan_work);
}
return QLA_SUCCESS;
}
int qla4xxx_unblock_ddb(struct iscsi_cls_session *cls_session)
{
struct iscsi_session *sess;
struct ddb_entry *ddb_entry;
struct scsi_qla_host *ha;
sess = cls_session->dd_data;
ddb_entry = sess->dd_data;
ha = ddb_entry->ha;
ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
" unblock user space session\n", ha->host_no, __func__,
ddb_entry->fw_ddb_index);
iscsi_conn_start(ddb_entry->conn);
iscsi_conn_login_event(ddb_entry->conn,
ISCSI_CONN_STATE_LOGGED_IN);
return QLA_SUCCESS;
}
static void qla4xxx_relogin_all_devices(struct scsi_qla_host *ha)
{
iscsi_host_for_each_session(ha->host, qla4xxx_relogin_devices);
}
static void qla4xxx_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
{
uint16_t relogin_timer;
struct iscsi_session *sess;
struct ddb_entry *ddb_entry;
struct scsi_qla_host *ha;
sess = cls_sess->dd_data;
ddb_entry = sess->dd_data;
ha = ddb_entry->ha;
relogin_timer = max(ddb_entry->default_relogin_timeout,
(uint16_t)RELOGIN_TOV);
atomic_set(&ddb_entry->relogin_timer, relogin_timer);
DEBUG2(ql4_printk(KERN_INFO, ha,
"scsi%ld: Relogin index [%d]. TOV=%d\n", ha->host_no,
ddb_entry->fw_ddb_index, relogin_timer));
qla4xxx_login_flash_ddb(cls_sess);
}
static void qla4xxx_dpc_relogin(struct iscsi_cls_session *cls_sess)
{
struct iscsi_session *sess;
struct ddb_entry *ddb_entry;
struct scsi_qla_host *ha;
sess = cls_sess->dd_data;
ddb_entry = sess->dd_data;
ha = ddb_entry->ha;
if (!(ddb_entry->ddb_type == FLASH_DDB))
return;
if (test_and_clear_bit(DF_RELOGIN, &ddb_entry->flags) &&
!iscsi_is_session_online(cls_sess)) {
DEBUG2(ql4_printk(KERN_INFO, ha,
"relogin issued\n"));
qla4xxx_relogin_flash_ddb(cls_sess);
}
}
void qla4xxx_wake_dpc(struct scsi_qla_host *ha)
{
if (ha->dpc_thread)
queue_work(ha->dpc_thread, &ha->dpc_work);
}
static struct qla4_work_evt *
qla4xxx_alloc_work(struct scsi_qla_host *ha, uint32_t data_size,
enum qla4_work_type type)
{
struct qla4_work_evt *e;
uint32_t size = sizeof(struct qla4_work_evt) + data_size;
e = kzalloc(size, GFP_ATOMIC);
if (!e)
return NULL;
INIT_LIST_HEAD(&e->list);
e->type = type;
return e;
}
static void qla4xxx_post_work(struct scsi_qla_host *ha,
struct qla4_work_evt *e)
{
unsigned long flags;
spin_lock_irqsave(&ha->work_lock, flags);
list_add_tail(&e->list, &ha->work_list);
spin_unlock_irqrestore(&ha->work_lock, flags);
qla4xxx_wake_dpc(ha);
}
int qla4xxx_post_aen_work(struct scsi_qla_host *ha,
enum iscsi_host_event_code aen_code,
uint32_t data_size, uint8_t *data)
{
struct qla4_work_evt *e;
e = qla4xxx_alloc_work(ha, data_size, QLA4_EVENT_AEN);
if (!e)
return QLA_ERROR;
e->u.aen.code = aen_code;
e->u.aen.data_size = data_size;
memcpy(e->u.aen.data, data, data_size);
qla4xxx_post_work(ha, e);
return QLA_SUCCESS;
}
int qla4xxx_post_ping_evt_work(struct scsi_qla_host *ha,
uint32_t status, uint32_t pid,
uint32_t data_size, uint8_t *data)
{
struct qla4_work_evt *e;
e = qla4xxx_alloc_work(ha, data_size, QLA4_EVENT_PING_STATUS);
if (!e)
return QLA_ERROR;
e->u.ping.status = status;
e->u.ping.pid = pid;
e->u.ping.data_size = data_size;
memcpy(e->u.ping.data, data, data_size);
qla4xxx_post_work(ha, e);
return QLA_SUCCESS;
}
static void qla4xxx_do_work(struct scsi_qla_host *ha)
{
struct qla4_work_evt *e, *tmp;
unsigned long flags;
LIST_HEAD(work);
spin_lock_irqsave(&ha->work_lock, flags);
list_splice_init(&ha->work_list, &work);
spin_unlock_irqrestore(&ha->work_lock, flags);
list_for_each_entry_safe(e, tmp, &work, list) {
list_del_init(&e->list);
switch (e->type) {
case QLA4_EVENT_AEN:
iscsi_post_host_event(ha->host_no,
&qla4xxx_iscsi_transport,
e->u.aen.code,
e->u.aen.data_size,
e->u.aen.data);
break;
case QLA4_EVENT_PING_STATUS:
iscsi_ping_comp_event(ha->host_no,
&qla4xxx_iscsi_transport,
e->u.ping.status,
e->u.ping.pid,
e->u.ping.data_size,
e->u.ping.data);
break;
default:
ql4_printk(KERN_WARNING, ha, "event type: 0x%x not "
"supported", e->type);
}
kfree(e);
}
}
/**
* qla4xxx_do_dpc - dpc routine
* @data: in our case pointer to adapter structure
*
* This routine is a task that is schedule by the interrupt handler
* to perform the background processing for interrupts. We put it
* on a task queue that is consumed whenever the scheduler runs; that's
* so you can do anything (i.e. put the process to sleep etc). In fact,
* the mid-level tries to sleep when it reaches the driver threshold
* "host->can_queue". This can cause a panic if we were in our interrupt code.
**/
static void qla4xxx_do_dpc(struct work_struct *work)
{
struct scsi_qla_host *ha =
container_of(work, struct scsi_qla_host, dpc_work);
int status = QLA_ERROR;
DEBUG2(printk("scsi%ld: %s: DPC handler waking up."
"flags = 0x%08lx, dpc_flags = 0x%08lx\n",
ha->host_no, __func__, ha->flags, ha->dpc_flags))
/* Initialization not yet finished. Don't do anything yet. */
if (!test_bit(AF_INIT_DONE, &ha->flags))
return;
if (test_bit(AF_EEH_BUSY, &ha->flags)) {
DEBUG2(printk(KERN_INFO "scsi%ld: %s: flags = %lx\n",
ha->host_no, __func__, ha->flags));
return;
}
/* post events to application */
qla4xxx_do_work(ha);
if (is_qla8022(ha)) {
if (test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags)) {
qla4_8xxx_idc_lock(ha);
qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
QLA82XX_DEV_FAILED);
qla4_8xxx_idc_unlock(ha);
ql4_printk(KERN_INFO, ha, "HW State: FAILED\n");
qla4_8xxx_device_state_handler(ha);
}
if (test_and_clear_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) {
qla4_8xxx_need_qsnt_handler(ha);
}
}
if (!test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) &&
(test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags))) {
if (ql4xdontresethba) {
DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n",
ha->host_no, __func__));
clear_bit(DPC_RESET_HA, &ha->dpc_flags);
clear_bit(DPC_RESET_HA_INTR, &ha->dpc_flags);
clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
goto dpc_post_reset_ha;
}
if (test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags) ||
test_bit(DPC_RESET_HA, &ha->dpc_flags))
qla4xxx_recover_adapter(ha);
if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) {
uint8_t wait_time = RESET_INTR_TOV;
while ((readw(&ha->reg->ctrl_status) &
(CSR_SOFT_RESET | CSR_FORCE_SOFT_RESET)) != 0) {
if (--wait_time == 0)
break;
msleep(1000);
}
if (wait_time == 0)
DEBUG2(printk("scsi%ld: %s: SR|FSR "
"bit not cleared-- resetting\n",
ha->host_no, __func__));
qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
if (ql4xxx_lock_drvr_wait(ha) == QLA_SUCCESS) {
qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
status = qla4xxx_recover_adapter(ha);
}
clear_bit(DPC_RESET_HA_INTR, &ha->dpc_flags);
if (status == QLA_SUCCESS)
ha->isp_ops->enable_intrs(ha);
}
}
dpc_post_reset_ha:
/* ---- process AEN? --- */
if (test_and_clear_bit(DPC_AEN, &ha->dpc_flags))
qla4xxx_process_aen(ha, PROCESS_ALL_AENS);
/* ---- Get DHCP IP Address? --- */
if (test_and_clear_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags))
qla4xxx_get_dhcp_ip_address(ha);
/* ---- relogin device? --- */
if (adapter_up(ha) &&
test_and_clear_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags)) {
iscsi_host_for_each_session(ha->host, qla4xxx_dpc_relogin);
}
/* ---- link change? --- */
if (test_and_clear_bit(DPC_LINK_CHANGED, &ha->dpc_flags)) {
if (!test_bit(AF_LINK_UP, &ha->flags)) {
/* ---- link down? --- */
qla4xxx_mark_all_devices_missing(ha);
} else {
/* ---- link up? --- *
* F/W will auto login to all devices ONLY ONCE after
* link up during driver initialization and runtime
* fatal error recovery. Therefore, the driver must
* manually relogin to devices when recovering from
* connection failures, logouts, expired KATO, etc. */
if (test_and_clear_bit(AF_BUILD_DDB_LIST, &ha->flags)) {
qla4xxx_build_ddb_list(ha, ha->is_reset);
iscsi_host_for_each_session(ha->host,
qla4xxx_login_flash_ddb);
} else
qla4xxx_relogin_all_devices(ha);
}
}
}
/**
* qla4xxx_free_adapter - release the adapter
* @ha: pointer to adapter structure
**/
static void qla4xxx_free_adapter(struct scsi_qla_host *ha)
{
qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16);
if (test_bit(AF_INTERRUPTS_ON, &ha->flags)) {
/* Turn-off interrupts on the card. */
ha->isp_ops->disable_intrs(ha);
}
/* Remove timer thread, if present */
if (ha->timer_active)
qla4xxx_stop_timer(ha);
/* Kill the kernel thread for this host */
if (ha->dpc_thread)
destroy_workqueue(ha->dpc_thread);
/* Kill the kernel thread for this host */
if (ha->task_wq)
destroy_workqueue(ha->task_wq);
/* Put firmware in known state */
ha->isp_ops->reset_firmware(ha);
if (is_qla8022(ha)) {
qla4_8xxx_idc_lock(ha);
qla4_8xxx_clear_drv_active(ha);
qla4_8xxx_idc_unlock(ha);
}
/* Detach interrupts */
if (test_and_clear_bit(AF_IRQ_ATTACHED, &ha->flags))
qla4xxx_free_irqs(ha);
/* free extra memory */
qla4xxx_mem_free(ha);
}
int qla4_8xxx_iospace_config(struct scsi_qla_host *ha)
{
int status = 0;
unsigned long mem_base, mem_len, db_base, db_len;
struct pci_dev *pdev = ha->pdev;
status = pci_request_regions(pdev, DRIVER_NAME);
if (status) {
printk(KERN_WARNING
"scsi(%ld) Failed to reserve PIO regions (%s) "
"status=%d\n", ha->host_no, pci_name(pdev), status);
goto iospace_error_exit;
}
DEBUG2(printk(KERN_INFO "%s: revision-id=%d\n",
__func__, pdev->revision));
ha->revision_id = pdev->revision;
/* remap phys address */
mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */
mem_len = pci_resource_len(pdev, 0);
DEBUG2(printk(KERN_INFO "%s: ioremap from %lx a size of %lx\n",
__func__, mem_base, mem_len));
/* mapping of pcibase pointer */
ha->nx_pcibase = (unsigned long)ioremap(mem_base, mem_len);
if (!ha->nx_pcibase) {
printk(KERN_ERR
"cannot remap MMIO (%s), aborting\n", pci_name(pdev));
pci_release_regions(ha->pdev);
goto iospace_error_exit;
}
/* Mapping of IO base pointer, door bell read and write pointer */
/* mapping of IO base pointer */
ha->qla4_8xxx_reg =
(struct device_reg_82xx __iomem *)((uint8_t *)ha->nx_pcibase +
0xbc000 + (ha->pdev->devfn << 11));
db_base = pci_resource_start(pdev, 4); /* doorbell is on bar 4 */
db_len = pci_resource_len(pdev, 4);
ha->nx_db_wr_ptr = (ha->pdev->devfn == 4 ? QLA82XX_CAM_RAM_DB1 :
QLA82XX_CAM_RAM_DB2);
return 0;
iospace_error_exit:
return -ENOMEM;
}
/***
* qla4xxx_iospace_config - maps registers
* @ha: pointer to adapter structure
*
* This routines maps HBA's registers from the pci address space
* into the kernel virtual address space for memory mapped i/o.
**/
int qla4xxx_iospace_config(struct scsi_qla_host *ha)
{
unsigned long pio, pio_len, pio_flags;
unsigned long mmio, mmio_len, mmio_flags;
pio = pci_resource_start(ha->pdev, 0);
pio_len = pci_resource_len(ha->pdev, 0);
pio_flags = pci_resource_flags(ha->pdev, 0);
if (pio_flags & IORESOURCE_IO) {
if (pio_len < MIN_IOBASE_LEN) {
ql4_printk(KERN_WARNING, ha,
"Invalid PCI I/O region size\n");
pio = 0;
}
} else {
ql4_printk(KERN_WARNING, ha, "region #0 not a PIO resource\n");
pio = 0;
}
/* Use MMIO operations for all accesses. */
mmio = pci_resource_start(ha->pdev, 1);
mmio_len = pci_resource_len(ha->pdev, 1);
mmio_flags = pci_resource_flags(ha->pdev, 1);
if (!(mmio_flags & IORESOURCE_MEM)) {
ql4_printk(KERN_ERR, ha,
"region #0 not an MMIO resource, aborting\n");
goto iospace_error_exit;
}
if (mmio_len < MIN_IOBASE_LEN) {
ql4_printk(KERN_ERR, ha,
"Invalid PCI mem region size, aborting\n");
goto iospace_error_exit;
}
if (pci_request_regions(ha->pdev, DRIVER_NAME)) {
ql4_printk(KERN_WARNING, ha,
"Failed to reserve PIO/MMIO regions\n");
goto iospace_error_exit;
}
ha->pio_address = pio;
ha->pio_length = pio_len;
ha->reg = ioremap(mmio, MIN_IOBASE_LEN);
if (!ha->reg) {
ql4_printk(KERN_ERR, ha,
"cannot remap MMIO, aborting\n");
goto iospace_error_exit;
}
return 0;
iospace_error_exit:
return -ENOMEM;
}
static struct isp_operations qla4xxx_isp_ops = {
.iospace_config = qla4xxx_iospace_config,
.pci_config = qla4xxx_pci_config,
.disable_intrs = qla4xxx_disable_intrs,
.enable_intrs = qla4xxx_enable_intrs,
.start_firmware = qla4xxx_start_firmware,
.intr_handler = qla4xxx_intr_handler,
.interrupt_service_routine = qla4xxx_interrupt_service_routine,
.reset_chip = qla4xxx_soft_reset,
.reset_firmware = qla4xxx_hw_reset,
.queue_iocb = qla4xxx_queue_iocb,
.complete_iocb = qla4xxx_complete_iocb,
.rd_shdw_req_q_out = qla4xxx_rd_shdw_req_q_out,
.rd_shdw_rsp_q_in = qla4xxx_rd_shdw_rsp_q_in,
.get_sys_info = qla4xxx_get_sys_info,
};
static struct isp_operations qla4_8xxx_isp_ops = {
.iospace_config = qla4_8xxx_iospace_config,
.pci_config = qla4_8xxx_pci_config,
.disable_intrs = qla4_8xxx_disable_intrs,
.enable_intrs = qla4_8xxx_enable_intrs,
.start_firmware = qla4_8xxx_load_risc,
.intr_handler = qla4_8xxx_intr_handler,
.interrupt_service_routine = qla4_8xxx_interrupt_service_routine,
.reset_chip = qla4_8xxx_isp_reset,
.reset_firmware = qla4_8xxx_stop_firmware,
.queue_iocb = qla4_8xxx_queue_iocb,
.complete_iocb = qla4_8xxx_complete_iocb,
.rd_shdw_req_q_out = qla4_8xxx_rd_shdw_req_q_out,
.rd_shdw_rsp_q_in = qla4_8xxx_rd_shdw_rsp_q_in,
.get_sys_info = qla4_8xxx_get_sys_info,
};
uint16_t qla4xxx_rd_shdw_req_q_out(struct scsi_qla_host *ha)
{
return (uint16_t)le32_to_cpu(ha->shadow_regs->req_q_out);
}
uint16_t qla4_8xxx_rd_shdw_req_q_out(struct scsi_qla_host *ha)
{
return (uint16_t)le32_to_cpu(readl(&ha->qla4_8xxx_reg->req_q_out));
}
uint16_t qla4xxx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha)
{
return (uint16_t)le32_to_cpu(ha->shadow_regs->rsp_q_in);
}
uint16_t qla4_8xxx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha)
{
return (uint16_t)le32_to_cpu(readl(&ha->qla4_8xxx_reg->rsp_q_in));
}
static ssize_t qla4xxx_show_boot_eth_info(void *data, int type, char *buf)
{
struct scsi_qla_host *ha = data;
char *str = buf;
int rc;
switch (type) {
case ISCSI_BOOT_ETH_FLAGS:
rc = sprintf(str, "%d\n", SYSFS_FLAG_FW_SEL_BOOT);
break;
case ISCSI_BOOT_ETH_INDEX:
rc = sprintf(str, "0\n");
break;
case ISCSI_BOOT_ETH_MAC:
rc = sysfs_format_mac(str, ha->my_mac,
MAC_ADDR_LEN);
break;
default:
rc = -ENOSYS;
break;
}
return rc;
}
static umode_t qla4xxx_eth_get_attr_visibility(void *data, int type)
{
int rc;
switch (type) {
case ISCSI_BOOT_ETH_FLAGS:
case ISCSI_BOOT_ETH_MAC:
case ISCSI_BOOT_ETH_INDEX:
rc = S_IRUGO;
break;
default:
rc = 0;
break;
}
return rc;
}
static ssize_t qla4xxx_show_boot_ini_info(void *data, int type, char *buf)
{
struct scsi_qla_host *ha = data;
char *str = buf;
int rc;
switch (type) {
case ISCSI_BOOT_INI_INITIATOR_NAME:
rc = sprintf(str, "%s\n", ha->name_string);
break;
default:
rc = -ENOSYS;
break;
}
return rc;
}
static umode_t qla4xxx_ini_get_attr_visibility(void *data, int type)
{
int rc;
switch (type) {
case ISCSI_BOOT_INI_INITIATOR_NAME:
rc = S_IRUGO;
break;
default:
rc = 0;
break;
}
return rc;
}
static ssize_t
qla4xxx_show_boot_tgt_info(struct ql4_boot_session_info *boot_sess, int type,
char *buf)
{
struct ql4_conn_info *boot_conn = &boot_sess->conn_list[0];
char *str = buf;
int rc;
switch (type) {
case ISCSI_BOOT_TGT_NAME:
rc = sprintf(buf, "%s\n", (char *)&boot_sess->target_name);
break;
case ISCSI_BOOT_TGT_IP_ADDR:
if (boot_sess->conn_list[0].dest_ipaddr.ip_type == 0x1)
rc = sprintf(buf, "%pI4\n",
&boot_conn->dest_ipaddr.ip_address);
else
rc = sprintf(str, "%pI6\n",
&boot_conn->dest_ipaddr.ip_address);
break;
case ISCSI_BOOT_TGT_PORT:
rc = sprintf(str, "%d\n", boot_conn->dest_port);
break;
case ISCSI_BOOT_TGT_CHAP_NAME:
rc = sprintf(str, "%.*s\n",
boot_conn->chap.target_chap_name_length,
(char *)&boot_conn->chap.target_chap_name);
break;
case ISCSI_BOOT_TGT_CHAP_SECRET:
rc = sprintf(str, "%.*s\n",
boot_conn->chap.target_secret_length,
(char *)&boot_conn->chap.target_secret);
break;
case ISCSI_BOOT_TGT_REV_CHAP_NAME:
rc = sprintf(str, "%.*s\n",
boot_conn->chap.intr_chap_name_length,
(char *)&boot_conn->chap.intr_chap_name);
break;
case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
rc = sprintf(str, "%.*s\n",
boot_conn->chap.intr_secret_length,
(char *)&boot_conn->chap.intr_secret);
break;
case ISCSI_BOOT_TGT_FLAGS:
rc = sprintf(str, "%d\n", SYSFS_FLAG_FW_SEL_BOOT);
break;
case ISCSI_BOOT_TGT_NIC_ASSOC:
rc = sprintf(str, "0\n");
break;
default:
rc = -ENOSYS;
break;
}
return rc;
}
static ssize_t qla4xxx_show_boot_tgt_pri_info(void *data, int type, char *buf)
{
struct scsi_qla_host *ha = data;
struct ql4_boot_session_info *boot_sess = &(ha->boot_tgt.boot_pri_sess);
return qla4xxx_show_boot_tgt_info(boot_sess, type, buf);
}
static ssize_t qla4xxx_show_boot_tgt_sec_info(void *data, int type, char *buf)
{
struct scsi_qla_host *ha = data;
struct ql4_boot_session_info *boot_sess = &(ha->boot_tgt.boot_sec_sess);
return qla4xxx_show_boot_tgt_info(boot_sess, type, buf);
}
static umode_t qla4xxx_tgt_get_attr_visibility(void *data, int type)
{
int rc;
switch (type) {
case ISCSI_BOOT_TGT_NAME:
case ISCSI_BOOT_TGT_IP_ADDR:
case ISCSI_BOOT_TGT_PORT:
case ISCSI_BOOT_TGT_CHAP_NAME:
case ISCSI_BOOT_TGT_CHAP_SECRET:
case ISCSI_BOOT_TGT_REV_CHAP_NAME:
case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
case ISCSI_BOOT_TGT_NIC_ASSOC:
case ISCSI_BOOT_TGT_FLAGS:
rc = S_IRUGO;
break;
default:
rc = 0;
break;
}
return rc;
}
static void qla4xxx_boot_release(void *data)
{
struct scsi_qla_host *ha = data;
scsi_host_put(ha->host);
}
static int get_fw_boot_info(struct scsi_qla_host *ha, uint16_t ddb_index[])
{
dma_addr_t buf_dma;
uint32_t addr, pri_addr, sec_addr;
uint32_t offset;
uint16_t func_num;
uint8_t val;
uint8_t *buf = NULL;
size_t size = 13 * sizeof(uint8_t);
int ret = QLA_SUCCESS;
func_num = PCI_FUNC(ha->pdev->devfn);
ql4_printk(KERN_INFO, ha, "%s: Get FW boot info for 0x%x func %d\n",
__func__, ha->pdev->device, func_num);
if (is_qla40XX(ha)) {
if (func_num == 1) {
addr = NVRAM_PORT0_BOOT_MODE;
pri_addr = NVRAM_PORT0_BOOT_PRI_TGT;
sec_addr = NVRAM_PORT0_BOOT_SEC_TGT;
} else if (func_num == 3) {
addr = NVRAM_PORT1_BOOT_MODE;
pri_addr = NVRAM_PORT1_BOOT_PRI_TGT;
sec_addr = NVRAM_PORT1_BOOT_SEC_TGT;
} else {
ret = QLA_ERROR;
goto exit_boot_info;
}
/* Check Boot Mode */
val = rd_nvram_byte(ha, addr);
if (!(val & 0x07)) {
DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Adapter boot "
"options : 0x%x\n", __func__, val));
ret = QLA_ERROR;
goto exit_boot_info;
}
/* get primary valid target index */
val = rd_nvram_byte(ha, pri_addr);
if (val & BIT_7)
ddb_index[0] = (val & 0x7f);
/* get secondary valid target index */
val = rd_nvram_byte(ha, sec_addr);
if (val & BIT_7)
ddb_index[1] = (val & 0x7f);
} else if (is_qla8022(ha)) {
buf = dma_alloc_coherent(&ha->pdev->dev, size,
&buf_dma, GFP_KERNEL);
if (!buf) {
DEBUG2(ql4_printk(KERN_ERR, ha,
"%s: Unable to allocate dma buffer\n",
__func__));
ret = QLA_ERROR;
goto exit_boot_info;
}
if (ha->port_num == 0)
offset = BOOT_PARAM_OFFSET_PORT0;
else if (ha->port_num == 1)
offset = BOOT_PARAM_OFFSET_PORT1;
else {
ret = QLA_ERROR;
goto exit_boot_info_free;
}
addr = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_iscsi_param * 4) +
offset;
if (qla4xxx_get_flash(ha, buf_dma, addr,
13 * sizeof(uint8_t)) != QLA_SUCCESS) {
DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: Get Flash"
" failed\n", ha->host_no, __func__));
ret = QLA_ERROR;
goto exit_boot_info_free;
}
/* Check Boot Mode */
if (!(buf[1] & 0x07)) {
DEBUG2(ql4_printk(KERN_INFO, ha, "Firmware boot options"
" : 0x%x\n", buf[1]));
ret = QLA_ERROR;
goto exit_boot_info_free;
}
/* get primary valid target index */
if (buf[2] & BIT_7)
ddb_index[0] = buf[2] & 0x7f;
/* get secondary valid target index */
if (buf[11] & BIT_7)
ddb_index[1] = buf[11] & 0x7f;
} else {
ret = QLA_ERROR;
goto exit_boot_info;
}
DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Primary target ID %d, Secondary"
" target ID %d\n", __func__, ddb_index[0],
ddb_index[1]));
exit_boot_info_free:
dma_free_coherent(&ha->pdev->dev, size, buf, buf_dma);
exit_boot_info:
ha->pri_ddb_idx = ddb_index[0];
ha->sec_ddb_idx = ddb_index[1];
return ret;
}
/**
* qla4xxx_get_bidi_chap - Get a BIDI CHAP user and password
* @ha: pointer to adapter structure
* @username: CHAP username to be returned
* @password: CHAP password to be returned
*
* If a boot entry has BIDI CHAP enabled then we need to set the BIDI CHAP
* user and password in the sysfs entry in /sys/firmware/iscsi_boot#/.
* So from the CHAP cache find the first BIDI CHAP entry and set it
* to the boot record in sysfs.
**/
static int qla4xxx_get_bidi_chap(struct scsi_qla_host *ha, char *username,
char *password)
{
int i, ret = -EINVAL;
int max_chap_entries = 0;
struct ql4_chap_table *chap_table;
if (is_qla8022(ha))
max_chap_entries = (ha->hw.flt_chap_size / 2) /
sizeof(struct ql4_chap_table);
else
max_chap_entries = MAX_CHAP_ENTRIES_40XX;
if (!ha->chap_list) {
ql4_printk(KERN_ERR, ha, "Do not have CHAP table cache\n");
return ret;
}
mutex_lock(&ha->chap_sem);
for (i = 0; i < max_chap_entries; i++) {
chap_table = (struct ql4_chap_table *)ha->chap_list + i;
if (chap_table->cookie !=
__constant_cpu_to_le16(CHAP_VALID_COOKIE)) {
continue;
}
if (chap_table->flags & BIT_7) /* local */
continue;
if (!(chap_table->flags & BIT_6)) /* Not BIDI */
continue;
strncpy(password, chap_table->secret, QL4_CHAP_MAX_SECRET_LEN);
strncpy(username, chap_table->name, QL4_CHAP_MAX_NAME_LEN);
ret = 0;
break;
}
mutex_unlock(&ha->chap_sem);
return ret;
}
static int qla4xxx_get_boot_target(struct scsi_qla_host *ha,
struct ql4_boot_session_info *boot_sess,
uint16_t ddb_index)
{
struct ql4_conn_info *boot_conn = &boot_sess->conn_list[0];
struct dev_db_entry *fw_ddb_entry;
dma_addr_t fw_ddb_entry_dma;
uint16_t idx;
uint16_t options;
int ret = QLA_SUCCESS;
fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
&fw_ddb_entry_dma, GFP_KERNEL);
if (!fw_ddb_entry) {
DEBUG2(ql4_printk(KERN_ERR, ha,
"%s: Unable to allocate dma buffer.\n",
__func__));
ret = QLA_ERROR;
return ret;
}
if (qla4xxx_bootdb_by_index(ha, fw_ddb_entry,
fw_ddb_entry_dma, ddb_index)) {
DEBUG2(ql4_printk(KERN_INFO, ha, "%s: No Flash DDB found at "
"index [%d]\n", __func__, ddb_index));
ret = QLA_ERROR;
goto exit_boot_target;
}
/* Update target name and IP from DDB */
memcpy(boot_sess->target_name, fw_ddb_entry->iscsi_name,
min(sizeof(boot_sess->target_name),
sizeof(fw_ddb_entry->iscsi_name)));
options = le16_to_cpu(fw_ddb_entry->options);
if (options & DDB_OPT_IPV6_DEVICE) {
memcpy(&boot_conn->dest_ipaddr.ip_address,
&fw_ddb_entry->ip_addr[0], IPv6_ADDR_LEN);
} else {
boot_conn->dest_ipaddr.ip_type = 0x1;
memcpy(&boot_conn->dest_ipaddr.ip_address,
&fw_ddb_entry->ip_addr[0], IP_ADDR_LEN);
}
boot_conn->dest_port = le16_to_cpu(fw_ddb_entry->port);
/* update chap information */
idx = __le16_to_cpu(fw_ddb_entry->chap_tbl_idx);
if (BIT_7 & le16_to_cpu(fw_ddb_entry->iscsi_options)) {
DEBUG2(ql4_printk(KERN_INFO, ha, "Setting chap\n"));
ret = qla4xxx_get_chap(ha, (char *)&boot_conn->chap.
target_chap_name,
(char *)&boot_conn->chap.target_secret,
idx);
if (ret) {
ql4_printk(KERN_ERR, ha, "Failed to set chap\n");
ret = QLA_ERROR;
goto exit_boot_target;
}
boot_conn->chap.target_chap_name_length = QL4_CHAP_MAX_NAME_LEN;
boot_conn->chap.target_secret_length = QL4_CHAP_MAX_SECRET_LEN;
}
if (BIT_4 & le16_to_cpu(fw_ddb_entry->iscsi_options)) {
DEBUG2(ql4_printk(KERN_INFO, ha, "Setting BIDI chap\n"));
ret = qla4xxx_get_bidi_chap(ha,
(char *)&boot_conn->chap.intr_chap_name,
(char *)&boot_conn->chap.intr_secret);
if (ret) {
ql4_printk(KERN_ERR, ha, "Failed to set BIDI chap\n");
ret = QLA_ERROR;
goto exit_boot_target;
}
boot_conn->chap.intr_chap_name_length = QL4_CHAP_MAX_NAME_LEN;
boot_conn->chap.intr_secret_length = QL4_CHAP_MAX_SECRET_LEN;
}
exit_boot_target:
dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
fw_ddb_entry, fw_ddb_entry_dma);
return ret;
}
static int qla4xxx_get_boot_info(struct scsi_qla_host *ha)
{
uint16_t ddb_index[2];
int ret = QLA_ERROR;
int rval;
memset(ddb_index, 0, sizeof(ddb_index));
ddb_index[0] = 0xffff;
ddb_index[1] = 0xffff;
ret = get_fw_boot_info(ha, ddb_index);
if (ret != QLA_SUCCESS) {
DEBUG2(ql4_printk(KERN_INFO, ha,
"%s: No boot target configured.\n", __func__));
return ret;
}
if (ql4xdisablesysfsboot)
return QLA_SUCCESS;
if (ddb_index[0] == 0xffff)
goto sec_target;
rval = qla4xxx_get_boot_target(ha, &(ha->boot_tgt.boot_pri_sess),
ddb_index[0]);
if (rval != QLA_SUCCESS) {
DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Primary boot target not "
"configured\n", __func__));
} else
ret = QLA_SUCCESS;
sec_target:
if (ddb_index[1] == 0xffff)
goto exit_get_boot_info;
rval = qla4xxx_get_boot_target(ha, &(ha->boot_tgt.boot_sec_sess),
ddb_index[1]);
if (rval != QLA_SUCCESS) {
DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Secondary boot target not"
" configured\n", __func__));
} else
ret = QLA_SUCCESS;
exit_get_boot_info:
return ret;
}
static int qla4xxx_setup_boot_info(struct scsi_qla_host *ha)
{
struct iscsi_boot_kobj *boot_kobj;
if (qla4xxx_get_boot_info(ha) != QLA_SUCCESS)
return QLA_ERROR;
if (ql4xdisablesysfsboot) {
ql4_printk(KERN_INFO, ha,
"%s: syfsboot disabled - driver will trigger login "
"and publish session for discovery .\n", __func__);
return QLA_SUCCESS;
}
ha->boot_kset = iscsi_boot_create_host_kset(ha->host->host_no);
if (!ha->boot_kset)
goto kset_free;
if (!scsi_host_get(ha->host))
goto kset_free;
boot_kobj = iscsi_boot_create_target(ha->boot_kset, 0, ha,
qla4xxx_show_boot_tgt_pri_info,
qla4xxx_tgt_get_attr_visibility,
qla4xxx_boot_release);
if (!boot_kobj)
goto put_host;
if (!scsi_host_get(ha->host))
goto kset_free;
boot_kobj = iscsi_boot_create_target(ha->boot_kset, 1, ha,
qla4xxx_show_boot_tgt_sec_info,
qla4xxx_tgt_get_attr_visibility,
qla4xxx_boot_release);
if (!boot_kobj)
goto put_host;
if (!scsi_host_get(ha->host))
goto kset_free;
boot_kobj = iscsi_boot_create_initiator(ha->boot_kset, 0, ha,
qla4xxx_show_boot_ini_info,
qla4xxx_ini_get_attr_visibility,
qla4xxx_boot_release);
if (!boot_kobj)
goto put_host;
if (!scsi_host_get(ha->host))
goto kset_free;
boot_kobj = iscsi_boot_create_ethernet(ha->boot_kset, 0, ha,
qla4xxx_show_boot_eth_info,
qla4xxx_eth_get_attr_visibility,
qla4xxx_boot_release);
if (!boot_kobj)
goto put_host;
return QLA_SUCCESS;
put_host:
scsi_host_put(ha->host);
kset_free:
iscsi_boot_destroy_kset(ha->boot_kset);
return -ENOMEM;
}
/**
* qla4xxx_create chap_list - Create CHAP list from FLASH
* @ha: pointer to adapter structure
*
* Read flash and make a list of CHAP entries, during login when a CHAP entry
* is received, it will be checked in this list. If entry exist then the CHAP
* entry index is set in the DDB. If CHAP entry does not exist in this list
* then a new entry is added in FLASH in CHAP table and the index obtained is
* used in the DDB.
**/
static void qla4xxx_create_chap_list(struct scsi_qla_host *ha)
{
int rval = 0;
uint8_t *chap_flash_data = NULL;
uint32_t offset;
dma_addr_t chap_dma;
uint32_t chap_size = 0;
if (is_qla40XX(ha))
chap_size = MAX_CHAP_ENTRIES_40XX *
sizeof(struct ql4_chap_table);
else /* Single region contains CHAP info for both
* ports which is divided into half for each port.
*/
chap_size = ha->hw.flt_chap_size / 2;
chap_flash_data = dma_alloc_coherent(&ha->pdev->dev, chap_size,
&chap_dma, GFP_KERNEL);
if (!chap_flash_data) {
ql4_printk(KERN_ERR, ha, "No memory for chap_flash_data\n");
return;
}
if (is_qla40XX(ha))
offset = FLASH_CHAP_OFFSET;
else {
offset = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_region_chap << 2);
if (ha->port_num == 1)
offset += chap_size;
}
rval = qla4xxx_get_flash(ha, chap_dma, offset, chap_size);
if (rval != QLA_SUCCESS)
goto exit_chap_list;
if (ha->chap_list == NULL)
ha->chap_list = vmalloc(chap_size);
if (ha->chap_list == NULL) {
ql4_printk(KERN_ERR, ha, "No memory for ha->chap_list\n");
goto exit_chap_list;
}
memcpy(ha->chap_list, chap_flash_data, chap_size);
exit_chap_list:
dma_free_coherent(&ha->pdev->dev, chap_size,
chap_flash_data, chap_dma);
}
static void qla4xxx_get_param_ddb(struct ddb_entry *ddb_entry,
struct ql4_tuple_ddb *tddb)
{
struct scsi_qla_host *ha;
struct iscsi_cls_session *cls_sess;
struct iscsi_cls_conn *cls_conn;
struct iscsi_session *sess;
struct iscsi_conn *conn;
DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
ha = ddb_entry->ha;
cls_sess = ddb_entry->sess;
sess = cls_sess->dd_data;
cls_conn = ddb_entry->conn;
conn = cls_conn->dd_data;
tddb->tpgt = sess->tpgt;
tddb->port = conn->persistent_port;
strncpy(tddb->iscsi_name, sess->targetname, ISCSI_NAME_SIZE);
strncpy(tddb->ip_addr, conn->persistent_address, DDB_IPADDR_LEN);
}
static void qla4xxx_convert_param_ddb(struct dev_db_entry *fw_ddb_entry,
struct ql4_tuple_ddb *tddb)
{
uint16_t options = 0;
tddb->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp);
memcpy(&tddb->iscsi_name[0], &fw_ddb_entry->iscsi_name[0],
min(sizeof(tddb->iscsi_name), sizeof(fw_ddb_entry->iscsi_name)));
options = le16_to_cpu(fw_ddb_entry->options);
if (options & DDB_OPT_IPV6_DEVICE)
sprintf(tddb->ip_addr, "%pI6", fw_ddb_entry->ip_addr);
else
sprintf(tddb->ip_addr, "%pI4", fw_ddb_entry->ip_addr);
tddb->port = le16_to_cpu(fw_ddb_entry->port);
memcpy(&tddb->isid[0], &fw_ddb_entry->isid[0], sizeof(tddb->isid));
}
static int qla4xxx_compare_tuple_ddb(struct scsi_qla_host *ha,
struct ql4_tuple_ddb *old_tddb,
struct ql4_tuple_ddb *new_tddb,
uint8_t is_isid_compare)
{
if (strcmp(old_tddb->iscsi_name, new_tddb->iscsi_name))
return QLA_ERROR;
if (strcmp(old_tddb->ip_addr, new_tddb->ip_addr))
return QLA_ERROR;
if (old_tddb->port != new_tddb->port)
return QLA_ERROR;
/* For multi sessions, driver generates the ISID, so do not compare
* ISID in reset path since it would be a comparision between the
* driver generated ISID and firmware generated ISID. This could
* lead to adding duplicated DDBs in the list as driver generated
* ISID would not match firmware generated ISID.
*/
if (is_isid_compare) {
DEBUG2(ql4_printk(KERN_INFO, ha, "%s: old ISID [%02x%02x%02x"
"%02x%02x%02x] New ISID [%02x%02x%02x%02x%02x%02x]\n",
__func__, old_tddb->isid[5], old_tddb->isid[4],
old_tddb->isid[3], old_tddb->isid[2], old_tddb->isid[1],
old_tddb->isid[0], new_tddb->isid[5], new_tddb->isid[4],
new_tddb->isid[3], new_tddb->isid[2], new_tddb->isid[1],
new_tddb->isid[0]));
if (memcmp(&old_tddb->isid[0], &new_tddb->isid[0],
sizeof(old_tddb->isid)))
return QLA_ERROR;
}
DEBUG2(ql4_printk(KERN_INFO, ha,
"Match Found, fw[%d,%d,%s,%s], [%d,%d,%s,%s]",
old_tddb->port, old_tddb->tpgt, old_tddb->ip_addr,
old_tddb->iscsi_name, new_tddb->port, new_tddb->tpgt,
new_tddb->ip_addr, new_tddb->iscsi_name));
return QLA_SUCCESS;
}
static int qla4xxx_is_session_exists(struct scsi_qla_host *ha,
struct dev_db_entry *fw_ddb_entry)
{
struct ddb_entry *ddb_entry;
struct ql4_tuple_ddb *fw_tddb = NULL;
struct ql4_tuple_ddb *tmp_tddb = NULL;
int idx;
int ret = QLA_ERROR;
fw_tddb = vzalloc(sizeof(*fw_tddb));
if (!fw_tddb) {
DEBUG2(ql4_printk(KERN_WARNING, ha,
"Memory Allocation failed.\n"));
ret = QLA_SUCCESS;
goto exit_check;
}
tmp_tddb = vzalloc(sizeof(*tmp_tddb));
if (!tmp_tddb) {
DEBUG2(ql4_printk(KERN_WARNING, ha,
"Memory Allocation failed.\n"));
ret = QLA_SUCCESS;
goto exit_check;
}
qla4xxx_convert_param_ddb(fw_ddb_entry, fw_tddb);
for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) {
ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
if (ddb_entry == NULL)
continue;
qla4xxx_get_param_ddb(ddb_entry, tmp_tddb);
if (!qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb, false)) {
ret = QLA_SUCCESS; /* found */
goto exit_check;
}
}
exit_check:
if (fw_tddb)
vfree(fw_tddb);
if (tmp_tddb)
vfree(tmp_tddb);
return ret;
}
static int qla4xxx_is_flash_ddb_exists(struct scsi_qla_host *ha,
struct list_head *list_nt,
struct dev_db_entry *fw_ddb_entry)
{
struct qla_ddb_index *nt_ddb_idx, *nt_ddb_idx_tmp;
struct ql4_tuple_ddb *fw_tddb = NULL;
struct ql4_tuple_ddb *tmp_tddb = NULL;
int ret = QLA_ERROR;
fw_tddb = vzalloc(sizeof(*fw_tddb));
if (!fw_tddb) {
DEBUG2(ql4_printk(KERN_WARNING, ha,
"Memory Allocation failed.\n"));
ret = QLA_SUCCESS;
goto exit_check;
}
tmp_tddb = vzalloc(sizeof(*tmp_tddb));
if (!tmp_tddb) {
DEBUG2(ql4_printk(KERN_WARNING, ha,
"Memory Allocation failed.\n"));
ret = QLA_SUCCESS;
goto exit_check;
}
qla4xxx_convert_param_ddb(fw_ddb_entry, fw_tddb);
list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) {
qla4xxx_convert_param_ddb(&nt_ddb_idx->fw_ddb, tmp_tddb);
if (!qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb, true)) {
ret = QLA_SUCCESS; /* found */
goto exit_check;
}
}
exit_check:
if (fw_tddb)
vfree(fw_tddb);
if (tmp_tddb)
vfree(tmp_tddb);
return ret;
}
static void qla4xxx_free_ddb_list(struct list_head *list_ddb)
{
struct qla_ddb_index *ddb_idx, *ddb_idx_tmp;
list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) {
list_del_init(&ddb_idx->list);
vfree(ddb_idx);
}
}
static struct iscsi_endpoint *qla4xxx_get_ep_fwdb(struct scsi_qla_host *ha,
struct dev_db_entry *fw_ddb_entry)
{
struct iscsi_endpoint *ep;
struct sockaddr_in *addr;
struct sockaddr_in6 *addr6;
struct sockaddr *dst_addr;
char *ip;
/* TODO: need to destroy on unload iscsi_endpoint*/
dst_addr = vmalloc(sizeof(*dst_addr));
if (!dst_addr)
return NULL;
if (fw_ddb_entry->options & DDB_OPT_IPV6_DEVICE) {
dst_addr->sa_family = AF_INET6;
addr6 = (struct sockaddr_in6 *)dst_addr;
ip = (char *)&addr6->sin6_addr;
memcpy(ip, fw_ddb_entry->ip_addr, IPv6_ADDR_LEN);
addr6->sin6_port = htons(le16_to_cpu(fw_ddb_entry->port));
} else {
dst_addr->sa_family = AF_INET;
addr = (struct sockaddr_in *)dst_addr;
ip = (char *)&addr->sin_addr;
memcpy(ip, fw_ddb_entry->ip_addr, IP_ADDR_LEN);
addr->sin_port = htons(le16_to_cpu(fw_ddb_entry->port));
}
ep = qla4xxx_ep_connect(ha->host, dst_addr, 0);
vfree(dst_addr);
return ep;
}
static int qla4xxx_verify_boot_idx(struct scsi_qla_host *ha, uint16_t idx)
{
if (ql4xdisablesysfsboot)
return QLA_SUCCESS;
if (idx == ha->pri_ddb_idx || idx == ha->sec_ddb_idx)
return QLA_ERROR;
return QLA_SUCCESS;
}
static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
struct ddb_entry *ddb_entry)
{
uint16_t def_timeout;
ddb_entry->ddb_type = FLASH_DDB;
ddb_entry->fw_ddb_index = INVALID_ENTRY;
ddb_entry->fw_ddb_device_state = DDB_DS_NO_CONNECTION_ACTIVE;
ddb_entry->ha = ha;
ddb_entry->unblock_sess = qla4xxx_unblock_flash_ddb;
ddb_entry->ddb_change = qla4xxx_flash_ddb_change;
atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
atomic_set(&ddb_entry->relogin_timer, 0);
atomic_set(&ddb_entry->relogin_retry_count, 0);
def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
ddb_entry->default_relogin_timeout =
(def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ?
def_timeout : LOGIN_TOV;
ddb_entry->default_time2wait =
le16_to_cpu(ddb_entry->fw_ddb_entry.iscsi_def_time2wait);
}
static void qla4xxx_wait_for_ip_configuration(struct scsi_qla_host *ha)
{
uint32_t idx = 0;
uint32_t ip_idx[IP_ADDR_COUNT] = {0, 1, 2, 3}; /* 4 IP interfaces */
uint32_t sts[MBOX_REG_COUNT];
uint32_t ip_state;
unsigned long wtime;
int ret;
wtime = jiffies + (HZ * IP_CONFIG_TOV);
do {
for (idx = 0; idx < IP_ADDR_COUNT; idx++) {
if (ip_idx[idx] == -1)
continue;
ret = qla4xxx_get_ip_state(ha, 0, ip_idx[idx], sts);
if (ret == QLA_ERROR) {
ip_idx[idx] = -1;
continue;
}
ip_state = (sts[1] & IP_STATE_MASK) >> IP_STATE_SHIFT;
DEBUG2(ql4_printk(KERN_INFO, ha,
"Waiting for IP state for idx = %d, state = 0x%x\n",
ip_idx[idx], ip_state));
if (ip_state == IP_ADDRSTATE_UNCONFIGURED ||
ip_state == IP_ADDRSTATE_INVALID ||
ip_state == IP_ADDRSTATE_PREFERRED ||
ip_state == IP_ADDRSTATE_DEPRICATED ||
ip_state == IP_ADDRSTATE_DISABLING)
ip_idx[idx] = -1;
}
/* Break if all IP states checked */
if ((ip_idx[0] == -1) &&
(ip_idx[1] == -1) &&
(ip_idx[2] == -1) &&
(ip_idx[3] == -1))
break;
schedule_timeout_uninterruptible(HZ);
} while (time_after(wtime, jiffies));
}
static void qla4xxx_build_st_list(struct scsi_qla_host *ha,
struct list_head *list_st)
{
struct qla_ddb_index *st_ddb_idx;
int max_ddbs;
int fw_idx_size;
struct dev_db_entry *fw_ddb_entry;
dma_addr_t fw_ddb_dma;
int ret;
uint32_t idx = 0, next_idx = 0;
uint32_t state = 0, conn_err = 0;
uint16_t conn_id = 0;
fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
&fw_ddb_dma);
if (fw_ddb_entry == NULL) {
DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n"));
goto exit_st_list;
}
max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
MAX_DEV_DB_ENTRIES;
fw_idx_size = sizeof(struct qla_ddb_index);
for (idx = 0; idx < max_ddbs; idx = next_idx) {
ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, fw_ddb_dma,
NULL, &next_idx, &state,
&conn_err, NULL, &conn_id);
if (ret == QLA_ERROR)
break;
/* Ignore DDB if invalid state (unassigned) */
if (state == DDB_DS_UNASSIGNED)
goto continue_next_st;
/* Check if ST, add to the list_st */
if (strlen((char *) fw_ddb_entry->iscsi_name) != 0)
goto continue_next_st;
st_ddb_idx = vzalloc(fw_idx_size);
if (!st_ddb_idx)
break;
st_ddb_idx->fw_ddb_idx = idx;
list_add_tail(&st_ddb_idx->list, list_st);
continue_next_st:
if (next_idx == 0)
break;
}
exit_st_list:
if (fw_ddb_entry)
dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
}
/**
* qla4xxx_remove_failed_ddb - Remove inactive or failed ddb from list
* @ha: pointer to adapter structure
* @list_ddb: List from which failed ddb to be removed
*
* Iterate over the list of DDBs and find and remove DDBs that are either in
* no connection active state or failed state
**/
static void qla4xxx_remove_failed_ddb(struct scsi_qla_host *ha,
struct list_head *list_ddb)
{
struct qla_ddb_index *ddb_idx, *ddb_idx_tmp;
uint32_t next_idx = 0;
uint32_t state = 0, conn_err = 0;
int ret;
list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) {
ret = qla4xxx_get_fwddb_entry(ha, ddb_idx->fw_ddb_idx,
NULL, 0, NULL, &next_idx, &state,
&conn_err, NULL, NULL);
if (ret == QLA_ERROR)
continue;
if (state == DDB_DS_NO_CONNECTION_ACTIVE ||
state == DDB_DS_SESSION_FAILED) {
list_del_init(&ddb_idx->list);
vfree(ddb_idx);
}
}
}
static int qla4xxx_sess_conn_setup(struct scsi_qla_host *ha,
struct dev_db_entry *fw_ddb_entry,
int is_reset)
{
struct iscsi_cls_session *cls_sess;
struct iscsi_session *sess;
struct iscsi_cls_conn *cls_conn;
struct iscsi_endpoint *ep;
uint16_t cmds_max = 32;
uint16_t conn_id = 0;
uint32_t initial_cmdsn = 0;
int ret = QLA_SUCCESS;
struct ddb_entry *ddb_entry = NULL;
/* Create session object, with INVALID_ENTRY,
* the targer_id would get set when we issue the login
*/
cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport, ha->host,
cmds_max, sizeof(struct ddb_entry),
sizeof(struct ql4_task_data),
initial_cmdsn, INVALID_ENTRY);
if (!cls_sess) {
ret = QLA_ERROR;
goto exit_setup;
}
/*
* so calling module_put function to decrement the
* reference count.
**/
module_put(qla4xxx_iscsi_transport.owner);
sess = cls_sess->dd_data;
ddb_entry = sess->dd_data;
ddb_entry->sess = cls_sess;
cls_sess->recovery_tmo = ql4xsess_recovery_tmo;
memcpy(&ddb_entry->fw_ddb_entry, fw_ddb_entry,
sizeof(struct dev_db_entry));
qla4xxx_setup_flash_ddb_entry(ha, ddb_entry);
cls_conn = iscsi_conn_setup(cls_sess, sizeof(struct qla_conn), conn_id);
if (!cls_conn) {
ret = QLA_ERROR;
goto exit_setup;
}
ddb_entry->conn = cls_conn;
/* Setup ep, for displaying attributes in sysfs */
ep = qla4xxx_get_ep_fwdb(ha, fw_ddb_entry);
if (ep) {
ep->conn = cls_conn;
cls_conn->ep = ep;
} else {
DEBUG2(ql4_printk(KERN_ERR, ha, "Unable to get ep\n"));
ret = QLA_ERROR;
goto exit_setup;
}
/* Update sess/conn params */
qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess, cls_conn);
if (is_reset == RESET_ADAPTER) {
iscsi_block_session(cls_sess);
/* Use the relogin path to discover new devices
* by short-circuting the logic of setting
* timer to relogin - instead set the flags
* to initiate login right away.
*/
set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
set_bit(DF_RELOGIN, &ddb_entry->flags);
}
exit_setup:
return ret;
}
static void qla4xxx_build_nt_list(struct scsi_qla_host *ha,
struct list_head *list_nt, int is_reset)
{
struct dev_db_entry *fw_ddb_entry;
dma_addr_t fw_ddb_dma;
int max_ddbs;
int fw_idx_size;
int ret;
uint32_t idx = 0, next_idx = 0;
uint32_t state = 0, conn_err = 0;
uint16_t conn_id = 0;
struct qla_ddb_index *nt_ddb_idx;
fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
&fw_ddb_dma);
if (fw_ddb_entry == NULL) {
DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n"));
goto exit_nt_list;
}
max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
MAX_DEV_DB_ENTRIES;
fw_idx_size = sizeof(struct qla_ddb_index);
for (idx = 0; idx < max_ddbs; idx = next_idx) {
ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, fw_ddb_dma,
NULL, &next_idx, &state,
&conn_err, NULL, &conn_id);
if (ret == QLA_ERROR)
break;
if (qla4xxx_verify_boot_idx(ha, idx) != QLA_SUCCESS)
goto continue_next_nt;
/* Check if NT, then add to list it */
if (strlen((char *) fw_ddb_entry->iscsi_name) == 0)
goto continue_next_nt;
if (!(state == DDB_DS_NO_CONNECTION_ACTIVE ||
state == DDB_DS_SESSION_FAILED))
goto continue_next_nt;
DEBUG2(ql4_printk(KERN_INFO, ha,
"Adding DDB to session = 0x%x\n", idx));
if (is_reset == INIT_ADAPTER) {
nt_ddb_idx = vmalloc(fw_idx_size);
if (!nt_ddb_idx)
break;
nt_ddb_idx->fw_ddb_idx = idx;
memcpy(&nt_ddb_idx->fw_ddb, fw_ddb_entry,
sizeof(struct dev_db_entry));
if (qla4xxx_is_flash_ddb_exists(ha, list_nt,
fw_ddb_entry) == QLA_SUCCESS) {
vfree(nt_ddb_idx);
goto continue_next_nt;
}
list_add_tail(&nt_ddb_idx->list, list_nt);
} else if (is_reset == RESET_ADAPTER) {
if (qla4xxx_is_session_exists(ha, fw_ddb_entry) ==
QLA_SUCCESS)
goto continue_next_nt;
}
ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, is_reset);
if (ret == QLA_ERROR)
goto exit_nt_list;
continue_next_nt:
if (next_idx == 0)
break;
}
exit_nt_list:
if (fw_ddb_entry)
dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
}
/**
* qla4xxx_build_ddb_list - Build ddb list and setup sessions
* @ha: pointer to adapter structure
* @is_reset: Is this init path or reset path
*
* Create a list of sendtargets (st) from firmware DDBs, issue send targets
* using connection open, then create the list of normal targets (nt)
* from firmware DDBs. Based on the list of nt setup session and connection
* objects.
**/
void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset)
{
uint16_t tmo = 0;
struct list_head list_st, list_nt;
struct qla_ddb_index *st_ddb_idx, *st_ddb_idx_tmp;
unsigned long wtime;
if (!test_bit(AF_LINK_UP, &ha->flags)) {
set_bit(AF_BUILD_DDB_LIST, &ha->flags);
ha->is_reset = is_reset;
return;
}
INIT_LIST_HEAD(&list_st);
INIT_LIST_HEAD(&list_nt);
qla4xxx_build_st_list(ha, &list_st);
/* Before issuing conn open mbox, ensure all IPs states are configured
* Note, conn open fails if IPs are not configured
*/
qla4xxx_wait_for_ip_configuration(ha);
/* Go thru the STs and fire the sendtargets by issuing conn open mbx */
list_for_each_entry_safe(st_ddb_idx, st_ddb_idx_tmp, &list_st, list) {
qla4xxx_conn_open(ha, st_ddb_idx->fw_ddb_idx);
}
/* Wait to ensure all sendtargets are done for min 12 sec wait */
tmo = ((ha->def_timeout > LOGIN_TOV) &&
(ha->def_timeout < LOGIN_TOV * 10) ?
ha->def_timeout : LOGIN_TOV);
DEBUG2(ql4_printk(KERN_INFO, ha,
"Default time to wait for build ddb %d\n", tmo));
wtime = jiffies + (HZ * tmo);
do {
if (list_empty(&list_st))
break;
qla4xxx_remove_failed_ddb(ha, &list_st);
schedule_timeout_uninterruptible(HZ / 10);
} while (time_after(wtime, jiffies));
/* Free up the sendtargets list */
qla4xxx_free_ddb_list(&list_st);
qla4xxx_build_nt_list(ha, &list_nt, is_reset);
qla4xxx_free_ddb_list(&list_nt);
qla4xxx_free_ddb_index(ha);
}
/**
* qla4xxx_probe_adapter - callback function to probe HBA
* @pdev: pointer to pci_dev structure
* @pci_device_id: pointer to pci_device entry
*
* This routine will probe for Qlogic 4xxx iSCSI host adapters.
* It returns zero if successful. It also initializes all data necessary for
* the driver.
**/
static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
int ret = -ENODEV, status;
struct Scsi_Host *host;
struct scsi_qla_host *ha;
uint8_t init_retry_count = 0;
char buf[34];
struct qla4_8xxx_legacy_intr_set *nx_legacy_intr;
uint32_t dev_state;
if (pci_enable_device(pdev))
return -1;
host = iscsi_host_alloc(&qla4xxx_driver_template, sizeof(*ha), 0);
if (host == NULL) {
printk(KERN_WARNING
"qla4xxx: Couldn't allocate host from scsi layer!\n");
goto probe_disable_device;
}
/* Clear our data area */
ha = to_qla_host(host);
memset(ha, 0, sizeof(*ha));
/* Save the information from PCI BIOS. */
ha->pdev = pdev;
ha->host = host;
ha->host_no = host->host_no;
pci_enable_pcie_error_reporting(pdev);
/* Setup Runtime configurable options */
if (is_qla8022(ha)) {
ha->isp_ops = &qla4_8xxx_isp_ops;
rwlock_init(&ha->hw_lock);
ha->qdr_sn_window = -1;
ha->ddr_mn_window = -1;
ha->curr_window = 255;
ha->func_num = PCI_FUNC(ha->pdev->devfn);
nx_legacy_intr = &legacy_intr[ha->func_num];
ha->nx_legacy_intr.int_vec_bit = nx_legacy_intr->int_vec_bit;
ha->nx_legacy_intr.tgt_status_reg =
nx_legacy_intr->tgt_status_reg;
ha->nx_legacy_intr.tgt_mask_reg = nx_legacy_intr->tgt_mask_reg;
ha->nx_legacy_intr.pci_int_reg = nx_legacy_intr->pci_int_reg;
} else {
ha->isp_ops = &qla4xxx_isp_ops;
}
/* Set EEH reset type to fundamental if required by hba */
if (is_qla8022(ha))
pdev->needs_freset = 1;
/* Configure PCI I/O space. */
ret = ha->isp_ops->iospace_config(ha);
if (ret)
goto probe_failed_ioconfig;
ql4_printk(KERN_INFO, ha, "Found an ISP%04x, irq %d, iobase 0x%p\n",
pdev->device, pdev->irq, ha->reg);
qla4xxx_config_dma_addressing(ha);
/* Initialize lists and spinlocks. */
INIT_LIST_HEAD(&ha->free_srb_q);
mutex_init(&ha->mbox_sem);
mutex_init(&ha->chap_sem);
init_completion(&ha->mbx_intr_comp);
init_completion(&ha->disable_acb_comp);
spin_lock_init(&ha->hardware_lock);
/* Initialize work list */
INIT_LIST_HEAD(&ha->work_list);
/* Allocate dma buffers */
if (qla4xxx_mem_alloc(ha)) {
ql4_printk(KERN_WARNING, ha,
"[ERROR] Failed to allocate memory for adapter\n");
ret = -ENOMEM;
goto probe_failed;
}
host->cmd_per_lun = 3;
host->max_channel = 0;
host->max_lun = MAX_LUNS - 1;
host->max_id = MAX_TARGETS;
host->max_cmd_len = IOCB_MAX_CDB_LEN;
host->can_queue = MAX_SRBS ;
host->transportt = qla4xxx_scsi_transport;
ret = scsi_init_shared_tag_map(host, MAX_SRBS);
if (ret) {
ql4_printk(KERN_WARNING, ha,
"%s: scsi_init_shared_tag_map failed\n", __func__);
goto probe_failed;
}
pci_set_drvdata(pdev, ha);
ret = scsi_add_host(host, &pdev->dev);
if (ret)
goto probe_failed;
if (is_qla8022(ha))
(void) qla4_8xxx_get_flash_info(ha);
/*
* Initialize the Host adapter request/response queues and
* firmware
* NOTE: interrupts enabled upon successful completion
*/
status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER);
while ((!test_bit(AF_ONLINE, &ha->flags)) &&
init_retry_count++ < MAX_INIT_RETRIES) {
if (is_qla8022(ha)) {
qla4_8xxx_idc_lock(ha);
dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
qla4_8xxx_idc_unlock(ha);
if (dev_state == QLA82XX_DEV_FAILED) {
ql4_printk(KERN_WARNING, ha, "%s: don't retry "
"initialize adapter. H/W is in failed state\n",
__func__);
break;
}
}
DEBUG2(printk("scsi: %s: retrying adapter initialization "
"(%d)\n", __func__, init_retry_count));
if (ha->isp_ops->reset_chip(ha) == QLA_ERROR)
continue;
status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER);
}
if (!test_bit(AF_ONLINE, &ha->flags)) {
ql4_printk(KERN_WARNING, ha, "Failed to initialize adapter\n");
if (is_qla8022(ha) && ql4xdontresethba) {
/* Put the device in failed state. */
DEBUG2(printk(KERN_ERR "HW STATE: FAILED\n"));
qla4_8xxx_idc_lock(ha);
qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
QLA82XX_DEV_FAILED);
qla4_8xxx_idc_unlock(ha);
}
ret = -ENODEV;
goto remove_host;
}
/* Startup the kernel thread for this host adapter. */
DEBUG2(printk("scsi: %s: Starting kernel thread for "
"qla4xxx_dpc\n", __func__));
sprintf(buf, "qla4xxx_%lu_dpc", ha->host_no);
ha->dpc_thread = create_singlethread_workqueue(buf);
if (!ha->dpc_thread) {
ql4_printk(KERN_WARNING, ha, "Unable to start DPC thread!\n");
ret = -ENODEV;
goto remove_host;
}
INIT_WORK(&ha->dpc_work, qla4xxx_do_dpc);
sprintf(buf, "qla4xxx_%lu_task", ha->host_no);
ha->task_wq = alloc_workqueue(buf, WQ_MEM_RECLAIM, 1);
if (!ha->task_wq) {
ql4_printk(KERN_WARNING, ha, "Unable to start task thread!\n");
ret = -ENODEV;
goto remove_host;
}
/* For ISP-82XX, request_irqs is called in qla4_8xxx_load_risc
* (which is called indirectly by qla4xxx_initialize_adapter),
* so that irqs will be registered after crbinit but before
* mbx_intr_enable.
*/
if (!is_qla8022(ha)) {
ret = qla4xxx_request_irqs(ha);
if (ret) {
ql4_printk(KERN_WARNING, ha, "Failed to reserve "
"interrupt %d already in use.\n", pdev->irq);
goto remove_host;
}
}
pci_save_state(ha->pdev);
ha->isp_ops->enable_intrs(ha);
/* Start timer thread. */
qla4xxx_start_timer(ha, qla4xxx_timer, 1);
set_bit(AF_INIT_DONE, &ha->flags);
printk(KERN_INFO
" QLogic iSCSI HBA Driver version: %s\n"
" QLogic ISP%04x @ %s, host#=%ld, fw=%02d.%02d.%02d.%02d\n",
qla4xxx_version_str, ha->pdev->device, pci_name(ha->pdev),
ha->host_no, ha->firmware_version[0], ha->firmware_version[1],
ha->patch_number, ha->build_number);
if (qla4xxx_setup_boot_info(ha))
ql4_printk(KERN_ERR, ha,
"%s: No iSCSI boot target configured\n", __func__);
/* Perform the build ddb list and login to each */
qla4xxx_build_ddb_list(ha, INIT_ADAPTER);
iscsi_host_for_each_session(ha->host, qla4xxx_login_flash_ddb);
qla4xxx_create_chap_list(ha);
qla4xxx_create_ifaces(ha);
return 0;
remove_host:
scsi_remove_host(ha->host);
probe_failed:
qla4xxx_free_adapter(ha);
probe_failed_ioconfig:
pci_disable_pcie_error_reporting(pdev);
scsi_host_put(ha->host);
probe_disable_device:
pci_disable_device(pdev);
return ret;
}
/**
* qla4xxx_prevent_other_port_reinit - prevent other port from re-initialize
* @ha: pointer to adapter structure
*
* Mark the other ISP-4xxx port to indicate that the driver is being removed,
* so that the other port will not re-initialize while in the process of
* removing the ha due to driver unload or hba hotplug.
**/
static void qla4xxx_prevent_other_port_reinit(struct scsi_qla_host *ha)
{
struct scsi_qla_host *other_ha = NULL;
struct pci_dev *other_pdev = NULL;
int fn = ISP4XXX_PCI_FN_2;
/*iscsi function numbers for ISP4xxx is 1 and 3*/
if (PCI_FUNC(ha->pdev->devfn) & BIT_1)
fn = ISP4XXX_PCI_FN_1;
other_pdev =
pci_get_domain_bus_and_slot(pci_domain_nr(ha->pdev->bus),
ha->pdev->bus->number, PCI_DEVFN(PCI_SLOT(ha->pdev->devfn),
fn));
/* Get other_ha if other_pdev is valid and state is enable*/
if (other_pdev) {
if (atomic_read(&other_pdev->enable_cnt)) {
other_ha = pci_get_drvdata(other_pdev);
if (other_ha) {
set_bit(AF_HA_REMOVAL, &other_ha->flags);
DEBUG2(ql4_printk(KERN_INFO, ha, "%s: "
"Prevent %s reinit\n", __func__,
dev_name(&other_ha->pdev->dev)));
}
}
pci_dev_put(other_pdev);
}
}
static void qla4xxx_destroy_fw_ddb_session(struct scsi_qla_host *ha)
{
struct ddb_entry *ddb_entry;
int options;
int idx;
for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) {
ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
if ((ddb_entry != NULL) &&
(ddb_entry->ddb_type == FLASH_DDB)) {
options = LOGOUT_OPTION_CLOSE_SESSION;
if (qla4xxx_session_logout_ddb(ha, ddb_entry, options)
== QLA_ERROR)
ql4_printk(KERN_ERR, ha, "%s: Logout failed\n",
__func__);
qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index);
/*
* we have decremented the reference count of the driver
* when we setup the session to have the driver unload
* to be seamless without actually destroying the
* session
**/
try_module_get(qla4xxx_iscsi_transport.owner);
iscsi_destroy_endpoint(ddb_entry->conn->ep);
qla4xxx_free_ddb(ha, ddb_entry);
iscsi_session_teardown(ddb_entry->sess);
}
}
}
/**
* qla4xxx_remove_adapter - calback function to remove adapter.
* @pci_dev: PCI device pointer
**/
static void __devexit qla4xxx_remove_adapter(struct pci_dev *pdev)
{
struct scsi_qla_host *ha;
ha = pci_get_drvdata(pdev);
if (!is_qla8022(ha))
qla4xxx_prevent_other_port_reinit(ha);
/* destroy iface from sysfs */
qla4xxx_destroy_ifaces(ha);
if ((!ql4xdisablesysfsboot) && ha->boot_kset)
iscsi_boot_destroy_kset(ha->boot_kset);
qla4xxx_destroy_fw_ddb_session(ha);
scsi_remove_host(ha->host);
qla4xxx_free_adapter(ha);
scsi_host_put(ha->host);
pci_disable_pcie_error_reporting(pdev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
}
/**
* qla4xxx_config_dma_addressing() - Configure OS DMA addressing method.
* @ha: HA context
*
* At exit, the @ha's flags.enable_64bit_addressing set to indicated
* supported addressing method.
*/
static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha)
{
int retval;
/* Update our PCI device dma_mask for full 64 bit mask */
if (pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(64)) == 0) {
if (pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
dev_dbg(&ha->pdev->dev,
"Failed to set 64 bit PCI consistent mask; "
"using 32 bit.\n");
retval = pci_set_consistent_dma_mask(ha->pdev,
DMA_BIT_MASK(32));
}
} else
retval = pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(32));
}
static int qla4xxx_slave_alloc(struct scsi_device *sdev)
{
struct iscsi_cls_session *cls_sess;
struct iscsi_session *sess;
struct ddb_entry *ddb;
int queue_depth = QL4_DEF_QDEPTH;
cls_sess = starget_to_session(sdev->sdev_target);
sess = cls_sess->dd_data;
ddb = sess->dd_data;
sdev->hostdata = ddb;
sdev->tagged_supported = 1;
if (ql4xmaxqdepth != 0 && ql4xmaxqdepth <= 0xffffU)
queue_depth = ql4xmaxqdepth;
scsi_activate_tcq(sdev, queue_depth);
return 0;
}
static int qla4xxx_slave_configure(struct scsi_device *sdev)
{
sdev->tagged_supported = 1;
return 0;
}
static void qla4xxx_slave_destroy(struct scsi_device *sdev)
{
scsi_deactivate_tcq(sdev, 1);
}
/**
* qla4xxx_del_from_active_array - returns an active srb
* @ha: Pointer to host adapter structure.
* @index: index into the active_array
*
* This routine removes and returns the srb at the specified index
**/
struct srb *qla4xxx_del_from_active_array(struct scsi_qla_host *ha,
uint32_t index)
{
struct srb *srb = NULL;
struct scsi_cmnd *cmd = NULL;
cmd = scsi_host_find_tag(ha->host, index);
if (!cmd)
return srb;
srb = (struct srb *)CMD_SP(cmd);
if (!srb)
return srb;
/* update counters */
if (srb->flags & SRB_DMA_VALID) {
ha->req_q_count += srb->iocb_cnt;
ha->iocb_cnt -= srb->iocb_cnt;
if (srb->cmd)
srb->cmd->host_scribble =
(unsigned char *)(unsigned long) MAX_SRBS;
}
return srb;
}
/**
* qla4xxx_eh_wait_on_command - waits for command to be returned by firmware
* @ha: Pointer to host adapter structure.
* @cmd: Scsi Command to wait on.
*
* This routine waits for the command to be returned by the Firmware
* for some max time.
**/
static int qla4xxx_eh_wait_on_command(struct scsi_qla_host *ha,
struct scsi_cmnd *cmd)
{
int done = 0;
struct srb *rp;
uint32_t max_wait_time = EH_WAIT_CMD_TOV;
int ret = SUCCESS;
/* Dont wait on command if PCI error is being handled
* by PCI AER driver
*/
if (unlikely(pci_channel_offline(ha->pdev)) ||
(test_bit(AF_EEH_BUSY, &ha->flags))) {
ql4_printk(KERN_WARNING, ha, "scsi%ld: Return from %s\n",
ha->host_no, __func__);
return ret;
}
do {
/* Checking to see if its returned to OS */
rp = (struct srb *) CMD_SP(cmd);
if (rp == NULL) {
done++;
break;
}
msleep(2000);
} while (max_wait_time--);
return done;
}
/**
* qla4xxx_wait_for_hba_online - waits for HBA to come online
* @ha: Pointer to host adapter structure
**/
static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha)
{
unsigned long wait_online;
wait_online = jiffies + (HBA_ONLINE_TOV * HZ);
while (time_before(jiffies, wait_online)) {
if (adapter_up(ha))
return QLA_SUCCESS;
msleep(2000);
}
return QLA_ERROR;
}
/**
* qla4xxx_eh_wait_for_commands - wait for active cmds to finish.
* @ha: pointer to HBA
* @t: target id
* @l: lun id
*
* This function waits for all outstanding commands to a lun to complete. It
* returns 0 if all pending commands are returned and 1 otherwise.
**/
static int qla4xxx_eh_wait_for_commands(struct scsi_qla_host *ha,
struct scsi_target *stgt,
struct scsi_device *sdev)
{
int cnt;
int status = 0;
struct scsi_cmnd *cmd;
/*
* Waiting for all commands for the designated target or dev
* in the active array
*/
for (cnt = 0; cnt < ha->host->can_queue; cnt++) {
cmd = scsi_host_find_tag(ha->host, cnt);
if (cmd && stgt == scsi_target(cmd->device) &&
(!sdev || sdev == cmd->device)) {
if (!qla4xxx_eh_wait_on_command(ha, cmd)) {
status++;
break;
}
}
}
return status;
}
/**
* qla4xxx_eh_abort - callback for abort task.
* @cmd: Pointer to Linux's SCSI command structure
*
* This routine is called by the Linux OS to abort the specified
* command.
**/
static int qla4xxx_eh_abort(struct scsi_cmnd *cmd)
{
struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
unsigned int id = cmd->device->id;
unsigned int lun = cmd->device->lun;
unsigned long flags;
struct srb *srb = NULL;
int ret = SUCCESS;
int wait = 0;
ql4_printk(KERN_INFO, ha,
"scsi%ld:%d:%d: Abort command issued cmd=%p\n",
ha->host_no, id, lun, cmd);
spin_lock_irqsave(&ha->hardware_lock, flags);
srb = (struct srb *) CMD_SP(cmd);
if (!srb) {
spin_unlock_irqrestore(&ha->hardware_lock, flags);
return SUCCESS;
}
kref_get(&srb->srb_ref);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
if (qla4xxx_abort_task(ha, srb) != QLA_SUCCESS) {
DEBUG3(printk("scsi%ld:%d:%d: Abort_task mbx failed.\n",
ha->host_no, id, lun));
ret = FAILED;
} else {
DEBUG3(printk("scsi%ld:%d:%d: Abort_task mbx success.\n",
ha->host_no, id, lun));
wait = 1;
}
kref_put(&srb->srb_ref, qla4xxx_srb_compl);
/* Wait for command to complete */
if (wait) {
if (!qla4xxx_eh_wait_on_command(ha, cmd)) {
DEBUG2(printk("scsi%ld:%d:%d: Abort handler timed out\n",
ha->host_no, id, lun));
ret = FAILED;
}
}
ql4_printk(KERN_INFO, ha,
"scsi%ld:%d:%d: Abort command - %s\n",
ha->host_no, id, lun, (ret == SUCCESS) ? "succeeded" : "failed");
return ret;
}
/**
* qla4xxx_eh_device_reset - callback for target reset.
* @cmd: Pointer to Linux's SCSI command structure
*
* This routine is called by the Linux OS to reset all luns on the
* specified target.
**/
static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd)
{
struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
struct ddb_entry *ddb_entry = cmd->device->hostdata;
int ret = FAILED, stat;
if (!ddb_entry)
return ret;
ret = iscsi_block_scsi_eh(cmd);
if (ret)
return ret;
ret = FAILED;
ql4_printk(KERN_INFO, ha,
"scsi%ld:%d:%d:%d: DEVICE RESET ISSUED.\n", ha->host_no,
cmd->device->channel, cmd->device->id, cmd->device->lun);
DEBUG2(printk(KERN_INFO
"scsi%ld: DEVICE_RESET cmd=%p jiffies = 0x%lx, to=%x,"
"dpc_flags=%lx, status=%x allowed=%d\n", ha->host_no,
cmd, jiffies, cmd->request->timeout / HZ,
ha->dpc_flags, cmd->result, cmd->allowed));
/* FIXME: wait for hba to go online */
stat = qla4xxx_reset_lun(ha, ddb_entry, cmd->device->lun);
if (stat != QLA_SUCCESS) {
ql4_printk(KERN_INFO, ha, "DEVICE RESET FAILED. %d\n", stat);
goto eh_dev_reset_done;
}
if (qla4xxx_eh_wait_for_commands(ha, scsi_target(cmd->device),
cmd->device)) {
ql4_printk(KERN_INFO, ha,
"DEVICE RESET FAILED - waiting for "
"commands.\n");
goto eh_dev_reset_done;
}
/* Send marker. */
if (qla4xxx_send_marker_iocb(ha, ddb_entry, cmd->device->lun,
MM_LUN_RESET) != QLA_SUCCESS)
goto eh_dev_reset_done;
ql4_printk(KERN_INFO, ha,
"scsi(%ld:%d:%d:%d): DEVICE RESET SUCCEEDED.\n",
ha->host_no, cmd->device->channel, cmd->device->id,
cmd->device->lun);
ret = SUCCESS;
eh_dev_reset_done:
return ret;
}
/**
* qla4xxx_eh_target_reset - callback for target reset.
* @cmd: Pointer to Linux's SCSI command structure
*
* This routine is called by the Linux OS to reset the target.
**/
static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd)
{
struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
struct ddb_entry *ddb_entry = cmd->device->hostdata;
int stat, ret;
if (!ddb_entry)
return FAILED;
ret = iscsi_block_scsi_eh(cmd);
if (ret)
return ret;
starget_printk(KERN_INFO, scsi_target(cmd->device),
"WARM TARGET RESET ISSUED.\n");
DEBUG2(printk(KERN_INFO
"scsi%ld: TARGET_DEVICE_RESET cmd=%p jiffies = 0x%lx, "
"to=%x,dpc_flags=%lx, status=%x allowed=%d\n",
ha->host_no, cmd, jiffies, cmd->request->timeout / HZ,
ha->dpc_flags, cmd->result, cmd->allowed));
stat = qla4xxx_reset_target(ha, ddb_entry);
if (stat != QLA_SUCCESS) {
starget_printk(KERN_INFO, scsi_target(cmd->device),
"WARM TARGET RESET FAILED.\n");
return FAILED;
}
if (qla4xxx_eh_wait_for_commands(ha, scsi_target(cmd->device),
NULL)) {
starget_printk(KERN_INFO, scsi_target(cmd->device),
"WARM TARGET DEVICE RESET FAILED - "
"waiting for commands.\n");
return FAILED;
}
/* Send marker. */
if (qla4xxx_send_marker_iocb(ha, ddb_entry, cmd->device->lun,
MM_TGT_WARM_RESET) != QLA_SUCCESS) {
starget_printk(KERN_INFO, scsi_target(cmd->device),
"WARM TARGET DEVICE RESET FAILED - "
"marker iocb failed.\n");
return FAILED;
}
starget_printk(KERN_INFO, scsi_target(cmd->device),
"WARM TARGET RESET SUCCEEDED.\n");
return SUCCESS;
}
/**
* qla4xxx_is_eh_active - check if error handler is running
* @shost: Pointer to SCSI Host struct
*
* This routine finds that if reset host is called in EH
* scenario or from some application like sg_reset
**/
static int qla4xxx_is_eh_active(struct Scsi_Host *shost)
{
if (shost->shost_state == SHOST_RECOVERY)
return 1;
return 0;
}
/**
* qla4xxx_eh_host_reset - kernel callback
* @cmd: Pointer to Linux's SCSI command structure
*
* This routine is invoked by the Linux kernel to perform fatal error
* recovery on the specified adapter.
**/
static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd)
{
int return_status = FAILED;
struct scsi_qla_host *ha;
ha = to_qla_host(cmd->device->host);
if (ql4xdontresethba) {
DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n",
ha->host_no, __func__));
/* Clear outstanding srb in queues */
if (qla4xxx_is_eh_active(cmd->device->host))
qla4xxx_abort_active_cmds(ha, DID_ABORT << 16);
return FAILED;
}
ql4_printk(KERN_INFO, ha,
"scsi(%ld:%d:%d:%d): HOST RESET ISSUED.\n", ha->host_no,
cmd->device->channel, cmd->device->id, cmd->device->lun);
if (qla4xxx_wait_for_hba_online(ha) != QLA_SUCCESS) {
DEBUG2(printk("scsi%ld:%d: %s: Unable to reset host. Adapter "
"DEAD.\n", ha->host_no, cmd->device->channel,
__func__));
return FAILED;
}
if (!test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
if (is_qla8022(ha))
set_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
else
set_bit(DPC_RESET_HA, &ha->dpc_flags);
}
if (qla4xxx_recover_adapter(ha) == QLA_SUCCESS)
return_status = SUCCESS;
ql4_printk(KERN_INFO, ha, "HOST RESET %s.\n",
return_status == FAILED ? "FAILED" : "SUCCEEDED");
return return_status;
}
static int qla4xxx_context_reset(struct scsi_qla_host *ha)
{
uint32_t mbox_cmd[MBOX_REG_COUNT];
uint32_t mbox_sts[MBOX_REG_COUNT];
struct addr_ctrl_blk_def *acb = NULL;
uint32_t acb_len = sizeof(struct addr_ctrl_blk_def);
int rval = QLA_SUCCESS;
dma_addr_t acb_dma;
acb = dma_alloc_coherent(&ha->pdev->dev,
sizeof(struct addr_ctrl_blk_def),
&acb_dma, GFP_KERNEL);
if (!acb) {
ql4_printk(KERN_ERR, ha, "%s: Unable to alloc acb\n",
__func__);
rval = -ENOMEM;
goto exit_port_reset;
}
memset(acb, 0, acb_len);
rval = qla4xxx_get_acb(ha, acb_dma, PRIMARI_ACB, acb_len);
if (rval != QLA_SUCCESS) {
rval = -EIO;
goto exit_free_acb;
}
rval = qla4xxx_disable_acb(ha);
if (rval != QLA_SUCCESS) {
rval = -EIO;
goto exit_free_acb;
}
wait_for_completion_timeout(&ha->disable_acb_comp,
DISABLE_ACB_TOV * HZ);
rval = qla4xxx_set_acb(ha, &mbox_cmd[0], &mbox_sts[0], acb_dma);
if (rval != QLA_SUCCESS) {
rval = -EIO;
goto exit_free_acb;
}
exit_free_acb:
dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk_def),
acb, acb_dma);
exit_port_reset:
DEBUG2(ql4_printk(KERN_INFO, ha, "%s %s\n", __func__,
rval == QLA_SUCCESS ? "SUCCEEDED" : "FAILED"));
return rval;
}
static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type)
{
struct scsi_qla_host *ha = to_qla_host(shost);
int rval = QLA_SUCCESS;
if (ql4xdontresethba) {
DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Don't Reset HBA\n",
__func__));
rval = -EPERM;
goto exit_host_reset;
}
rval = qla4xxx_wait_for_hba_online(ha);
if (rval != QLA_SUCCESS) {
DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Unable to reset host "
"adapter\n", __func__));
rval = -EIO;
goto exit_host_reset;
}
if (test_bit(DPC_RESET_HA, &ha->dpc_flags))
goto recover_adapter;
switch (reset_type) {
case SCSI_ADAPTER_RESET:
set_bit(DPC_RESET_HA, &ha->dpc_flags);
break;
case SCSI_FIRMWARE_RESET:
if (!test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
if (is_qla8022(ha))
/* set firmware context reset */
set_bit(DPC_RESET_HA_FW_CONTEXT,
&ha->dpc_flags);
else {
rval = qla4xxx_context_reset(ha);
goto exit_host_reset;
}
}
break;
}
recover_adapter:
rval = qla4xxx_recover_adapter(ha);
if (rval != QLA_SUCCESS) {
DEBUG2(ql4_printk(KERN_INFO, ha, "%s: recover adapter fail\n",
__func__));
rval = -EIO;
}
exit_host_reset:
return rval;
}
/* PCI AER driver recovers from all correctable errors w/o
* driver intervention. For uncorrectable errors PCI AER
* driver calls the following device driver's callbacks
*
* - Fatal Errors - link_reset
* - Non-Fatal Errors - driver's pci_error_detected() which
* returns CAN_RECOVER, NEED_RESET or DISCONNECT.
*
* PCI AER driver calls
* CAN_RECOVER - driver's pci_mmio_enabled(), mmio_enabled
* returns RECOVERED or NEED_RESET if fw_hung
* NEED_RESET - driver's slot_reset()
* DISCONNECT - device is dead & cannot recover
* RECOVERED - driver's pci_resume()
*/
static pci_ers_result_t
qla4xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
{
struct scsi_qla_host *ha = pci_get_drvdata(pdev);
ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: error detected:state %x\n",
ha->host_no, __func__, state);
if (!is_aer_supported(ha))
return PCI_ERS_RESULT_NONE;
switch (state) {
case pci_channel_io_normal:
clear_bit(AF_EEH_BUSY, &ha->flags);
return PCI_ERS_RESULT_CAN_RECOVER;
case pci_channel_io_frozen:
set_bit(AF_EEH_BUSY, &ha->flags);
qla4xxx_mailbox_premature_completion(ha);
qla4xxx_free_irqs(ha);
pci_disable_device(pdev);
/* Return back all IOs */
qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
return PCI_ERS_RESULT_NEED_RESET;
case pci_channel_io_perm_failure:
set_bit(AF_EEH_BUSY, &ha->flags);
set_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags);
qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16);
return PCI_ERS_RESULT_DISCONNECT;
}
return PCI_ERS_RESULT_NEED_RESET;
}
/**
* qla4xxx_pci_mmio_enabled() gets called if
* qla4xxx_pci_error_detected() returns PCI_ERS_RESULT_CAN_RECOVER
* and read/write to the device still works.
**/
static pci_ers_result_t
qla4xxx_pci_mmio_enabled(struct pci_dev *pdev)
{
struct scsi_qla_host *ha = pci_get_drvdata(pdev);
if (!is_aer_supported(ha))
return PCI_ERS_RESULT_NONE;
return PCI_ERS_RESULT_RECOVERED;
}
static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
{
uint32_t rval = QLA_ERROR;
uint32_t ret = 0;
int fn;
struct pci_dev *other_pdev = NULL;
ql4_printk(KERN_WARNING, ha, "scsi%ld: In %s\n", ha->host_no, __func__);
set_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
if (test_bit(AF_ONLINE, &ha->flags)) {
clear_bit(AF_ONLINE, &ha->flags);
clear_bit(AF_LINK_UP, &ha->flags);
iscsi_host_for_each_session(ha->host, qla4xxx_fail_session);
qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
}
fn = PCI_FUNC(ha->pdev->devfn);
while (fn > 0) {
fn--;
ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Finding PCI device at "
"func %x\n", ha->host_no, __func__, fn);
/* Get the pci device given the domain, bus,
* slot/function number */
other_pdev =
pci_get_domain_bus_and_slot(pci_domain_nr(ha->pdev->bus),
ha->pdev->bus->number, PCI_DEVFN(PCI_SLOT(ha->pdev->devfn),
fn));
if (!other_pdev)
continue;
if (atomic_read(&other_pdev->enable_cnt)) {
ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Found PCI "
"func in enabled state%x\n", ha->host_no,
__func__, fn);
pci_dev_put(other_pdev);
break;
}
pci_dev_put(other_pdev);
}
/* The first function on the card, the reset owner will
* start & initialize the firmware. The other functions
* on the card will reset the firmware context
*/
if (!fn) {
ql4_printk(KERN_INFO, ha, "scsi%ld: %s: devfn being reset "
"0x%x is the owner\n", ha->host_no, __func__,
ha->pdev->devfn);
qla4_8xxx_idc_lock(ha);
qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
QLA82XX_DEV_COLD);
qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_IDC_VERSION,
QLA82XX_IDC_VERSION);
qla4_8xxx_idc_unlock(ha);
clear_bit(AF_FW_RECOVERY, &ha->flags);
rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
qla4_8xxx_idc_lock(ha);
if (rval != QLA_SUCCESS) {
ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: "
"FAILED\n", ha->host_no, __func__);
qla4_8xxx_clear_drv_active(ha);
qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
QLA82XX_DEV_FAILED);
} else {
ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: "
"READY\n", ha->host_no, __func__);
qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
QLA82XX_DEV_READY);
/* Clear driver state register */
qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_STATE, 0);
qla4_8xxx_set_drv_active(ha);
ret = qla4xxx_request_irqs(ha);
if (ret) {
ql4_printk(KERN_WARNING, ha, "Failed to "
"reserve interrupt %d already in use.\n",
ha->pdev->irq);
rval = QLA_ERROR;
} else {
ha->isp_ops->enable_intrs(ha);
rval = QLA_SUCCESS;
}
}
qla4_8xxx_idc_unlock(ha);
} else {
ql4_printk(KERN_INFO, ha, "scsi%ld: %s: devfn 0x%x is not "
"the reset owner\n", ha->host_no, __func__,
ha->pdev->devfn);
if ((qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE) ==
QLA82XX_DEV_READY)) {
clear_bit(AF_FW_RECOVERY, &ha->flags);
rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
if (rval == QLA_SUCCESS) {
ret = qla4xxx_request_irqs(ha);
if (ret) {
ql4_printk(KERN_WARNING, ha, "Failed to"
" reserve interrupt %d already in"
" use.\n", ha->pdev->irq);
rval = QLA_ERROR;
} else {
ha->isp_ops->enable_intrs(ha);
rval = QLA_SUCCESS;
}
}
qla4_8xxx_idc_lock(ha);
qla4_8xxx_set_drv_active(ha);
qla4_8xxx_idc_unlock(ha);
}
}
clear_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
return rval;
}
static pci_ers_result_t
qla4xxx_pci_slot_reset(struct pci_dev *pdev)
{
pci_ers_result_t ret = PCI_ERS_RESULT_DISCONNECT;
struct scsi_qla_host *ha = pci_get_drvdata(pdev);
int rc;
ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: slot_reset\n",
ha->host_no, __func__);
if (!is_aer_supported(ha))
return PCI_ERS_RESULT_NONE;
/* Restore the saved state of PCIe device -
* BAR registers, PCI Config space, PCIX, MSI,
* IOV states
*/
pci_restore_state(pdev);
/* pci_restore_state() clears the saved_state flag of the device
* save restored state which resets saved_state flag
*/
pci_save_state(pdev);
/* Initialize device or resume if in suspended state */
rc = pci_enable_device(pdev);
if (rc) {
ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: Can't re-enable "
"device after reset\n", ha->host_no, __func__);
goto exit_slot_reset;
}
ha->isp_ops->disable_intrs(ha);
if (is_qla8022(ha)) {
if (qla4_8xxx_error_recovery(ha) == QLA_SUCCESS) {
ret = PCI_ERS_RESULT_RECOVERED;
goto exit_slot_reset;
} else
goto exit_slot_reset;
}
exit_slot_reset:
ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: Return=%x\n"
"device after reset\n", ha->host_no, __func__, ret);
return ret;
}
static void
qla4xxx_pci_resume(struct pci_dev *pdev)
{
struct scsi_qla_host *ha = pci_get_drvdata(pdev);
int ret;
ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: pci_resume\n",
ha->host_no, __func__);
ret = qla4xxx_wait_for_hba_online(ha);
if (ret != QLA_SUCCESS) {
ql4_printk(KERN_ERR, ha, "scsi%ld: %s: the device failed to "
"resume I/O from slot/link_reset\n", ha->host_no,
__func__);
}
pci_cleanup_aer_uncorrect_error_status(pdev);
clear_bit(AF_EEH_BUSY, &ha->flags);
}
static struct pci_error_handlers qla4xxx_err_handler = {
.error_detected = qla4xxx_pci_error_detected,
.mmio_enabled = qla4xxx_pci_mmio_enabled,
.slot_reset = qla4xxx_pci_slot_reset,
.resume = qla4xxx_pci_resume,
};
static struct pci_device_id qla4xxx_pci_tbl[] = {
{
.vendor = PCI_VENDOR_ID_QLOGIC,
.device = PCI_DEVICE_ID_QLOGIC_ISP4010,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
},
{
.vendor = PCI_VENDOR_ID_QLOGIC,
.device = PCI_DEVICE_ID_QLOGIC_ISP4022,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
},
{
.vendor = PCI_VENDOR_ID_QLOGIC,
.device = PCI_DEVICE_ID_QLOGIC_ISP4032,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
},
{
.vendor = PCI_VENDOR_ID_QLOGIC,
.device = PCI_DEVICE_ID_QLOGIC_ISP8022,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
},
{0, 0},
};
MODULE_DEVICE_TABLE(pci, qla4xxx_pci_tbl);
static struct pci_driver qla4xxx_pci_driver = {
.name = DRIVER_NAME,
.id_table = qla4xxx_pci_tbl,
.probe = qla4xxx_probe_adapter,
.remove = qla4xxx_remove_adapter,
.err_handler = &qla4xxx_err_handler,
};
static int __init qla4xxx_module_init(void)
{
int ret;
/* Allocate cache for SRBs. */
srb_cachep = kmem_cache_create("qla4xxx_srbs", sizeof(struct srb), 0,
SLAB_HWCACHE_ALIGN, NULL);
if (srb_cachep == NULL) {
printk(KERN_ERR
"%s: Unable to allocate SRB cache..."
"Failing load!\n", DRIVER_NAME);
ret = -ENOMEM;
goto no_srp_cache;
}
/* Derive version string. */
strcpy(qla4xxx_version_str, QLA4XXX_DRIVER_VERSION);
if (ql4xextended_error_logging)
strcat(qla4xxx_version_str, "-debug");
qla4xxx_scsi_transport =
iscsi_register_transport(&qla4xxx_iscsi_transport);
if (!qla4xxx_scsi_transport){
ret = -ENODEV;
goto release_srb_cache;
}
ret = pci_register_driver(&qla4xxx_pci_driver);
if (ret)
goto unregister_transport;
printk(KERN_INFO "QLogic iSCSI HBA Driver\n");
return 0;
unregister_transport:
iscsi_unregister_transport(&qla4xxx_iscsi_transport);
release_srb_cache:
kmem_cache_destroy(srb_cachep);
no_srp_cache:
return ret;
}
static void __exit qla4xxx_module_exit(void)
{
pci_unregister_driver(&qla4xxx_pci_driver);
iscsi_unregister_transport(&qla4xxx_iscsi_transport);
kmem_cache_destroy(srb_cachep);
}
module_init(qla4xxx_module_init);
module_exit(qla4xxx_module_exit);
MODULE_AUTHOR("QLogic Corporation");
MODULE_DESCRIPTION("QLogic iSCSI HBA Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(QLA4XXX_DRIVER_VERSION);
| gpl-2.0 |
zakee94/stellar_msm8226 | drivers/media/video/cx23885/cimax2.c | 8131 | 12963 | /*
* cimax2.c
*
* CIMax2(R) SP2 driver in conjunction with NetUp Dual DVB-S2 CI card
*
* Copyright (C) 2009 NetUP Inc.
* Copyright (C) 2009 Igor M. Liplianin <liplianin@netup.ru>
* Copyright (C) 2009 Abylay Ospan <aospan@netup.ru>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include "cx23885.h"
#include "dvb_ca_en50221.h"
/**** Bit definitions for MC417_RWD and MC417_OEN registers ***
bits 31-16
+-----------+
| Reserved |
+-----------+
bit 15 bit 14 bit 13 bit 12 bit 11 bit 10 bit 9 bit 8
+-------+-------+-------+-------+-------+-------+-------+-------+
| WR# | RD# | | ACK# | ADHI | ADLO | CS1# | CS0# |
+-------+-------+-------+-------+-------+-------+-------+-------+
bit 7 bit 6 bit 5 bit 4 bit 3 bit 2 bit 1 bit 0
+-------+-------+-------+-------+-------+-------+-------+-------+
| DATA7| DATA6| DATA5| DATA4| DATA3| DATA2| DATA1| DATA0|
+-------+-------+-------+-------+-------+-------+-------+-------+
***/
/* MC417 */
#define NETUP_DATA 0x000000ff
#define NETUP_WR 0x00008000
#define NETUP_RD 0x00004000
#define NETUP_ACK 0x00001000
#define NETUP_ADHI 0x00000800
#define NETUP_ADLO 0x00000400
#define NETUP_CS1 0x00000200
#define NETUP_CS0 0x00000100
#define NETUP_EN_ALL 0x00001000
#define NETUP_CTRL_OFF (NETUP_CS1 | NETUP_CS0 | NETUP_WR | NETUP_RD)
#define NETUP_CI_CTL 0x04
#define NETUP_CI_RD 1
#define NETUP_IRQ_DETAM 0x1
#define NETUP_IRQ_IRQAM 0x4
static unsigned int ci_dbg;
module_param(ci_dbg, int, 0644);
MODULE_PARM_DESC(ci_dbg, "Enable CI debugging");
static unsigned int ci_irq_enable;
module_param(ci_irq_enable, int, 0644);
MODULE_PARM_DESC(ci_irq_enable, "Enable IRQ from CAM");
#define ci_dbg_print(args...) \
do { \
if (ci_dbg) \
printk(KERN_DEBUG args); \
} while (0)
#define ci_irq_flags() (ci_irq_enable ? NETUP_IRQ_IRQAM : 0)
/* stores all private variables for communication with CI */
struct netup_ci_state {
struct dvb_ca_en50221 ca;
struct mutex ca_mutex;
struct i2c_adapter *i2c_adap;
u8 ci_i2c_addr;
int status;
struct work_struct work;
void *priv;
u8 current_irq_mode;
int current_ci_flag;
unsigned long next_status_checked_time;
};
int netup_read_i2c(struct i2c_adapter *i2c_adap, u8 addr, u8 reg,
u8 *buf, int len)
{
int ret;
struct i2c_msg msg[] = {
{
.addr = addr,
.flags = 0,
.buf = ®,
.len = 1
}, {
.addr = addr,
.flags = I2C_M_RD,
.buf = buf,
.len = len
}
};
ret = i2c_transfer(i2c_adap, msg, 2);
if (ret != 2) {
ci_dbg_print("%s: i2c read error, Reg = 0x%02x, Status = %d\n",
__func__, reg, ret);
return -1;
}
ci_dbg_print("%s: i2c read Addr=0x%04x, Reg = 0x%02x, data = %02x\n",
__func__, addr, reg, buf[0]);
return 0;
}
int netup_write_i2c(struct i2c_adapter *i2c_adap, u8 addr, u8 reg,
u8 *buf, int len)
{
int ret;
u8 buffer[len + 1];
struct i2c_msg msg = {
.addr = addr,
.flags = 0,
.buf = &buffer[0],
.len = len + 1
};
buffer[0] = reg;
memcpy(&buffer[1], buf, len);
ret = i2c_transfer(i2c_adap, &msg, 1);
if (ret != 1) {
ci_dbg_print("%s: i2c write error, Reg=[0x%02x], Status=%d\n",
__func__, reg, ret);
return -1;
}
return 0;
}
int netup_ci_get_mem(struct cx23885_dev *dev)
{
int mem;
unsigned long timeout = jiffies + msecs_to_jiffies(1);
for (;;) {
mem = cx_read(MC417_RWD);
if ((mem & NETUP_ACK) == 0)
break;
if (time_after(jiffies, timeout))
break;
udelay(1);
}
cx_set(MC417_RWD, NETUP_CTRL_OFF);
return mem & 0xff;
}
int netup_ci_op_cam(struct dvb_ca_en50221 *en50221, int slot,
u8 flag, u8 read, int addr, u8 data)
{
struct netup_ci_state *state = en50221->data;
struct cx23885_tsport *port = state->priv;
struct cx23885_dev *dev = port->dev;
u8 store;
int mem;
int ret;
if (0 != slot)
return -EINVAL;
if (state->current_ci_flag != flag) {
ret = netup_read_i2c(state->i2c_adap, state->ci_i2c_addr,
0, &store, 1);
if (ret != 0)
return ret;
store &= ~0x0c;
store |= flag;
ret = netup_write_i2c(state->i2c_adap, state->ci_i2c_addr,
0, &store, 1);
if (ret != 0)
return ret;
};
state->current_ci_flag = flag;
mutex_lock(&dev->gpio_lock);
/* write addr */
cx_write(MC417_OEN, NETUP_EN_ALL);
cx_write(MC417_RWD, NETUP_CTRL_OFF |
NETUP_ADLO | (0xff & addr));
cx_clear(MC417_RWD, NETUP_ADLO);
cx_write(MC417_RWD, NETUP_CTRL_OFF |
NETUP_ADHI | (0xff & (addr >> 8)));
cx_clear(MC417_RWD, NETUP_ADHI);
if (read) { /* data in */
cx_write(MC417_OEN, NETUP_EN_ALL | NETUP_DATA);
} else /* data out */
cx_write(MC417_RWD, NETUP_CTRL_OFF | data);
/* choose chip */
cx_clear(MC417_RWD,
(state->ci_i2c_addr == 0x40) ? NETUP_CS0 : NETUP_CS1);
/* read/write */
cx_clear(MC417_RWD, (read) ? NETUP_RD : NETUP_WR);
mem = netup_ci_get_mem(dev);
mutex_unlock(&dev->gpio_lock);
if (!read)
if (mem < 0)
return -EREMOTEIO;
ci_dbg_print("%s: %s: chipaddr=[0x%x] addr=[0x%02x], %s=%x\n", __func__,
(read) ? "read" : "write", state->ci_i2c_addr, addr,
(flag == NETUP_CI_CTL) ? "ctl" : "mem",
(read) ? mem : data);
if (read)
return mem;
return 0;
}
int netup_ci_read_attribute_mem(struct dvb_ca_en50221 *en50221,
int slot, int addr)
{
return netup_ci_op_cam(en50221, slot, 0, NETUP_CI_RD, addr, 0);
}
int netup_ci_write_attribute_mem(struct dvb_ca_en50221 *en50221,
int slot, int addr, u8 data)
{
return netup_ci_op_cam(en50221, slot, 0, 0, addr, data);
}
int netup_ci_read_cam_ctl(struct dvb_ca_en50221 *en50221, int slot, u8 addr)
{
return netup_ci_op_cam(en50221, slot, NETUP_CI_CTL,
NETUP_CI_RD, addr, 0);
}
int netup_ci_write_cam_ctl(struct dvb_ca_en50221 *en50221, int slot,
u8 addr, u8 data)
{
return netup_ci_op_cam(en50221, slot, NETUP_CI_CTL, 0, addr, data);
}
int netup_ci_slot_reset(struct dvb_ca_en50221 *en50221, int slot)
{
struct netup_ci_state *state = en50221->data;
u8 buf = 0x80;
int ret;
if (0 != slot)
return -EINVAL;
udelay(500);
ret = netup_write_i2c(state->i2c_adap, state->ci_i2c_addr,
0, &buf, 1);
if (ret != 0)
return ret;
udelay(500);
buf = 0x00;
ret = netup_write_i2c(state->i2c_adap, state->ci_i2c_addr,
0, &buf, 1);
msleep(1000);
dvb_ca_en50221_camready_irq(&state->ca, 0);
return 0;
}
int netup_ci_slot_shutdown(struct dvb_ca_en50221 *en50221, int slot)
{
/* not implemented */
return 0;
}
int netup_ci_set_irq(struct dvb_ca_en50221 *en50221, u8 irq_mode)
{
struct netup_ci_state *state = en50221->data;
int ret;
if (irq_mode == state->current_irq_mode)
return 0;
ci_dbg_print("%s: chipaddr=[0x%x] setting ci IRQ to [0x%x] \n",
__func__, state->ci_i2c_addr, irq_mode);
ret = netup_write_i2c(state->i2c_adap, state->ci_i2c_addr,
0x1b, &irq_mode, 1);
if (ret != 0)
return ret;
state->current_irq_mode = irq_mode;
return 0;
}
int netup_ci_slot_ts_ctl(struct dvb_ca_en50221 *en50221, int slot)
{
struct netup_ci_state *state = en50221->data;
u8 buf;
if (0 != slot)
return -EINVAL;
netup_read_i2c(state->i2c_adap, state->ci_i2c_addr,
0, &buf, 1);
buf |= 0x60;
return netup_write_i2c(state->i2c_adap, state->ci_i2c_addr,
0, &buf, 1);
}
/* work handler */
static void netup_read_ci_status(struct work_struct *work)
{
struct netup_ci_state *state =
container_of(work, struct netup_ci_state, work);
u8 buf[33];
int ret;
/* CAM module IRQ processing. fast operation */
dvb_ca_en50221_frda_irq(&state->ca, 0);
/* CAM module INSERT/REMOVE processing. slow operation because of i2c
* transfers */
if (time_after(jiffies, state->next_status_checked_time)
|| !state->status) {
ret = netup_read_i2c(state->i2c_adap, state->ci_i2c_addr,
0, &buf[0], 33);
state->next_status_checked_time = jiffies
+ msecs_to_jiffies(1000);
if (ret != 0)
return;
ci_dbg_print("%s: Slot Status Addr=[0x%04x], "
"Reg=[0x%02x], data=%02x, "
"TS config = %02x\n", __func__,
state->ci_i2c_addr, 0, buf[0],
buf[0]);
if (buf[0] & 1)
state->status = DVB_CA_EN50221_POLL_CAM_PRESENT |
DVB_CA_EN50221_POLL_CAM_READY;
else
state->status = 0;
}
}
/* CI irq handler */
int netup_ci_slot_status(struct cx23885_dev *dev, u32 pci_status)
{
struct cx23885_tsport *port = NULL;
struct netup_ci_state *state = NULL;
ci_dbg_print("%s:\n", __func__);
if (0 == (pci_status & (PCI_MSK_GPIO0 | PCI_MSK_GPIO1)))
return 0;
if (pci_status & PCI_MSK_GPIO0) {
port = &dev->ts1;
state = port->port_priv;
schedule_work(&state->work);
ci_dbg_print("%s: Wakeup CI0\n", __func__);
}
if (pci_status & PCI_MSK_GPIO1) {
port = &dev->ts2;
state = port->port_priv;
schedule_work(&state->work);
ci_dbg_print("%s: Wakeup CI1\n", __func__);
}
return 1;
}
int netup_poll_ci_slot_status(struct dvb_ca_en50221 *en50221, int slot, int open)
{
struct netup_ci_state *state = en50221->data;
if (0 != slot)
return -EINVAL;
netup_ci_set_irq(en50221, open ? (NETUP_IRQ_DETAM | ci_irq_flags())
: NETUP_IRQ_DETAM);
return state->status;
}
int netup_ci_init(struct cx23885_tsport *port)
{
struct netup_ci_state *state;
u8 cimax_init[34] = {
0x00, /* module A control*/
0x00, /* auto select mask high A */
0x00, /* auto select mask low A */
0x00, /* auto select pattern high A */
0x00, /* auto select pattern low A */
0x44, /* memory access time A */
0x00, /* invert input A */
0x00, /* RFU */
0x00, /* RFU */
0x00, /* module B control*/
0x00, /* auto select mask high B */
0x00, /* auto select mask low B */
0x00, /* auto select pattern high B */
0x00, /* auto select pattern low B */
0x44, /* memory access time B */
0x00, /* invert input B */
0x00, /* RFU */
0x00, /* RFU */
0x00, /* auto select mask high Ext */
0x00, /* auto select mask low Ext */
0x00, /* auto select pattern high Ext */
0x00, /* auto select pattern low Ext */
0x00, /* RFU */
0x02, /* destination - module A */
0x01, /* power on (use it like store place) */
0x00, /* RFU */
0x00, /* int status read only */
ci_irq_flags() | NETUP_IRQ_DETAM, /* DETAM, IRQAM unmasked */
0x05, /* EXTINT=active-high, INT=push-pull */
0x00, /* USCG1 */
0x04, /* ack active low */
0x00, /* LOCK = 0 */
0x33, /* serial mode, rising in, rising out, MSB first*/
0x31, /* synchronization */
};
int ret;
ci_dbg_print("%s\n", __func__);
state = kzalloc(sizeof(struct netup_ci_state), GFP_KERNEL);
if (!state) {
ci_dbg_print("%s: Unable create CI structure!\n", __func__);
ret = -ENOMEM;
goto err;
}
port->port_priv = state;
switch (port->nr) {
case 1:
state->ci_i2c_addr = 0x40;
break;
case 2:
state->ci_i2c_addr = 0x41;
break;
}
state->i2c_adap = &port->dev->i2c_bus[0].i2c_adap;
state->ca.owner = THIS_MODULE;
state->ca.read_attribute_mem = netup_ci_read_attribute_mem;
state->ca.write_attribute_mem = netup_ci_write_attribute_mem;
state->ca.read_cam_control = netup_ci_read_cam_ctl;
state->ca.write_cam_control = netup_ci_write_cam_ctl;
state->ca.slot_reset = netup_ci_slot_reset;
state->ca.slot_shutdown = netup_ci_slot_shutdown;
state->ca.slot_ts_enable = netup_ci_slot_ts_ctl;
state->ca.poll_slot_status = netup_poll_ci_slot_status;
state->ca.data = state;
state->priv = port;
state->current_irq_mode = ci_irq_flags() | NETUP_IRQ_DETAM;
ret = netup_write_i2c(state->i2c_adap, state->ci_i2c_addr,
0, &cimax_init[0], 34);
/* lock registers */
ret |= netup_write_i2c(state->i2c_adap, state->ci_i2c_addr,
0x1f, &cimax_init[0x18], 1);
/* power on slots */
ret |= netup_write_i2c(state->i2c_adap, state->ci_i2c_addr,
0x18, &cimax_init[0x18], 1);
if (0 != ret)
goto err;
ret = dvb_ca_en50221_init(&port->frontends.adapter,
&state->ca,
/* flags */ 0,
/* n_slots */ 1);
if (0 != ret)
goto err;
INIT_WORK(&state->work, netup_read_ci_status);
schedule_work(&state->work);
ci_dbg_print("%s: CI initialized!\n", __func__);
return 0;
err:
ci_dbg_print("%s: Cannot initialize CI: Error %d.\n", __func__, ret);
kfree(state);
return ret;
}
void netup_ci_exit(struct cx23885_tsport *port)
{
struct netup_ci_state *state;
if (NULL == port)
return;
state = (struct netup_ci_state *)port->port_priv;
if (NULL == state)
return;
if (NULL == state->ca.data)
return;
dvb_ca_en50221_release(&state->ca);
kfree(state);
}
| gpl-2.0 |
santod/KK_sense_kernel_htc_m8vzw | drivers/media/video/cx23885/cimax2.c | 8131 | 12963 | /*
* cimax2.c
*
* CIMax2(R) SP2 driver in conjunction with NetUp Dual DVB-S2 CI card
*
* Copyright (C) 2009 NetUP Inc.
* Copyright (C) 2009 Igor M. Liplianin <liplianin@netup.ru>
* Copyright (C) 2009 Abylay Ospan <aospan@netup.ru>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include "cx23885.h"
#include "dvb_ca_en50221.h"
/**** Bit definitions for MC417_RWD and MC417_OEN registers ***
bits 31-16
+-----------+
| Reserved |
+-----------+
bit 15 bit 14 bit 13 bit 12 bit 11 bit 10 bit 9 bit 8
+-------+-------+-------+-------+-------+-------+-------+-------+
| WR# | RD# | | ACK# | ADHI | ADLO | CS1# | CS0# |
+-------+-------+-------+-------+-------+-------+-------+-------+
bit 7 bit 6 bit 5 bit 4 bit 3 bit 2 bit 1 bit 0
+-------+-------+-------+-------+-------+-------+-------+-------+
| DATA7| DATA6| DATA5| DATA4| DATA3| DATA2| DATA1| DATA0|
+-------+-------+-------+-------+-------+-------+-------+-------+
***/
/* MC417 */
#define NETUP_DATA 0x000000ff
#define NETUP_WR 0x00008000
#define NETUP_RD 0x00004000
#define NETUP_ACK 0x00001000
#define NETUP_ADHI 0x00000800
#define NETUP_ADLO 0x00000400
#define NETUP_CS1 0x00000200
#define NETUP_CS0 0x00000100
#define NETUP_EN_ALL 0x00001000
#define NETUP_CTRL_OFF (NETUP_CS1 | NETUP_CS0 | NETUP_WR | NETUP_RD)
#define NETUP_CI_CTL 0x04
#define NETUP_CI_RD 1
#define NETUP_IRQ_DETAM 0x1
#define NETUP_IRQ_IRQAM 0x4
static unsigned int ci_dbg;
module_param(ci_dbg, int, 0644);
MODULE_PARM_DESC(ci_dbg, "Enable CI debugging");
static unsigned int ci_irq_enable;
module_param(ci_irq_enable, int, 0644);
MODULE_PARM_DESC(ci_irq_enable, "Enable IRQ from CAM");
#define ci_dbg_print(args...) \
do { \
if (ci_dbg) \
printk(KERN_DEBUG args); \
} while (0)
#define ci_irq_flags() (ci_irq_enable ? NETUP_IRQ_IRQAM : 0)
/* stores all private variables for communication with CI */
struct netup_ci_state {
struct dvb_ca_en50221 ca;
struct mutex ca_mutex;
struct i2c_adapter *i2c_adap;
u8 ci_i2c_addr;
int status;
struct work_struct work;
void *priv;
u8 current_irq_mode;
int current_ci_flag;
unsigned long next_status_checked_time;
};
int netup_read_i2c(struct i2c_adapter *i2c_adap, u8 addr, u8 reg,
u8 *buf, int len)
{
int ret;
struct i2c_msg msg[] = {
{
.addr = addr,
.flags = 0,
.buf = ®,
.len = 1
}, {
.addr = addr,
.flags = I2C_M_RD,
.buf = buf,
.len = len
}
};
ret = i2c_transfer(i2c_adap, msg, 2);
if (ret != 2) {
ci_dbg_print("%s: i2c read error, Reg = 0x%02x, Status = %d\n",
__func__, reg, ret);
return -1;
}
ci_dbg_print("%s: i2c read Addr=0x%04x, Reg = 0x%02x, data = %02x\n",
__func__, addr, reg, buf[0]);
return 0;
}
int netup_write_i2c(struct i2c_adapter *i2c_adap, u8 addr, u8 reg,
u8 *buf, int len)
{
int ret;
u8 buffer[len + 1];
struct i2c_msg msg = {
.addr = addr,
.flags = 0,
.buf = &buffer[0],
.len = len + 1
};
buffer[0] = reg;
memcpy(&buffer[1], buf, len);
ret = i2c_transfer(i2c_adap, &msg, 1);
if (ret != 1) {
ci_dbg_print("%s: i2c write error, Reg=[0x%02x], Status=%d\n",
__func__, reg, ret);
return -1;
}
return 0;
}
int netup_ci_get_mem(struct cx23885_dev *dev)
{
int mem;
unsigned long timeout = jiffies + msecs_to_jiffies(1);
for (;;) {
mem = cx_read(MC417_RWD);
if ((mem & NETUP_ACK) == 0)
break;
if (time_after(jiffies, timeout))
break;
udelay(1);
}
cx_set(MC417_RWD, NETUP_CTRL_OFF);
return mem & 0xff;
}
int netup_ci_op_cam(struct dvb_ca_en50221 *en50221, int slot,
u8 flag, u8 read, int addr, u8 data)
{
struct netup_ci_state *state = en50221->data;
struct cx23885_tsport *port = state->priv;
struct cx23885_dev *dev = port->dev;
u8 store;
int mem;
int ret;
if (0 != slot)
return -EINVAL;
if (state->current_ci_flag != flag) {
ret = netup_read_i2c(state->i2c_adap, state->ci_i2c_addr,
0, &store, 1);
if (ret != 0)
return ret;
store &= ~0x0c;
store |= flag;
ret = netup_write_i2c(state->i2c_adap, state->ci_i2c_addr,
0, &store, 1);
if (ret != 0)
return ret;
};
state->current_ci_flag = flag;
mutex_lock(&dev->gpio_lock);
/* write addr */
cx_write(MC417_OEN, NETUP_EN_ALL);
cx_write(MC417_RWD, NETUP_CTRL_OFF |
NETUP_ADLO | (0xff & addr));
cx_clear(MC417_RWD, NETUP_ADLO);
cx_write(MC417_RWD, NETUP_CTRL_OFF |
NETUP_ADHI | (0xff & (addr >> 8)));
cx_clear(MC417_RWD, NETUP_ADHI);
if (read) { /* data in */
cx_write(MC417_OEN, NETUP_EN_ALL | NETUP_DATA);
} else /* data out */
cx_write(MC417_RWD, NETUP_CTRL_OFF | data);
/* choose chip */
cx_clear(MC417_RWD,
(state->ci_i2c_addr == 0x40) ? NETUP_CS0 : NETUP_CS1);
/* read/write */
cx_clear(MC417_RWD, (read) ? NETUP_RD : NETUP_WR);
mem = netup_ci_get_mem(dev);
mutex_unlock(&dev->gpio_lock);
if (!read)
if (mem < 0)
return -EREMOTEIO;
ci_dbg_print("%s: %s: chipaddr=[0x%x] addr=[0x%02x], %s=%x\n", __func__,
(read) ? "read" : "write", state->ci_i2c_addr, addr,
(flag == NETUP_CI_CTL) ? "ctl" : "mem",
(read) ? mem : data);
if (read)
return mem;
return 0;
}
int netup_ci_read_attribute_mem(struct dvb_ca_en50221 *en50221,
int slot, int addr)
{
return netup_ci_op_cam(en50221, slot, 0, NETUP_CI_RD, addr, 0);
}
int netup_ci_write_attribute_mem(struct dvb_ca_en50221 *en50221,
int slot, int addr, u8 data)
{
return netup_ci_op_cam(en50221, slot, 0, 0, addr, data);
}
int netup_ci_read_cam_ctl(struct dvb_ca_en50221 *en50221, int slot, u8 addr)
{
return netup_ci_op_cam(en50221, slot, NETUP_CI_CTL,
NETUP_CI_RD, addr, 0);
}
int netup_ci_write_cam_ctl(struct dvb_ca_en50221 *en50221, int slot,
u8 addr, u8 data)
{
return netup_ci_op_cam(en50221, slot, NETUP_CI_CTL, 0, addr, data);
}
int netup_ci_slot_reset(struct dvb_ca_en50221 *en50221, int slot)
{
struct netup_ci_state *state = en50221->data;
u8 buf = 0x80;
int ret;
if (0 != slot)
return -EINVAL;
udelay(500);
ret = netup_write_i2c(state->i2c_adap, state->ci_i2c_addr,
0, &buf, 1);
if (ret != 0)
return ret;
udelay(500);
buf = 0x00;
ret = netup_write_i2c(state->i2c_adap, state->ci_i2c_addr,
0, &buf, 1);
msleep(1000);
dvb_ca_en50221_camready_irq(&state->ca, 0);
return 0;
}
int netup_ci_slot_shutdown(struct dvb_ca_en50221 *en50221, int slot)
{
/* not implemented */
return 0;
}
int netup_ci_set_irq(struct dvb_ca_en50221 *en50221, u8 irq_mode)
{
struct netup_ci_state *state = en50221->data;
int ret;
if (irq_mode == state->current_irq_mode)
return 0;
ci_dbg_print("%s: chipaddr=[0x%x] setting ci IRQ to [0x%x] \n",
__func__, state->ci_i2c_addr, irq_mode);
ret = netup_write_i2c(state->i2c_adap, state->ci_i2c_addr,
0x1b, &irq_mode, 1);
if (ret != 0)
return ret;
state->current_irq_mode = irq_mode;
return 0;
}
int netup_ci_slot_ts_ctl(struct dvb_ca_en50221 *en50221, int slot)
{
struct netup_ci_state *state = en50221->data;
u8 buf;
if (0 != slot)
return -EINVAL;
netup_read_i2c(state->i2c_adap, state->ci_i2c_addr,
0, &buf, 1);
buf |= 0x60;
return netup_write_i2c(state->i2c_adap, state->ci_i2c_addr,
0, &buf, 1);
}
/* work handler */
static void netup_read_ci_status(struct work_struct *work)
{
struct netup_ci_state *state =
container_of(work, struct netup_ci_state, work);
u8 buf[33];
int ret;
/* CAM module IRQ processing. fast operation */
dvb_ca_en50221_frda_irq(&state->ca, 0);
/* CAM module INSERT/REMOVE processing. slow operation because of i2c
* transfers */
if (time_after(jiffies, state->next_status_checked_time)
|| !state->status) {
ret = netup_read_i2c(state->i2c_adap, state->ci_i2c_addr,
0, &buf[0], 33);
state->next_status_checked_time = jiffies
+ msecs_to_jiffies(1000);
if (ret != 0)
return;
ci_dbg_print("%s: Slot Status Addr=[0x%04x], "
"Reg=[0x%02x], data=%02x, "
"TS config = %02x\n", __func__,
state->ci_i2c_addr, 0, buf[0],
buf[0]);
if (buf[0] & 1)
state->status = DVB_CA_EN50221_POLL_CAM_PRESENT |
DVB_CA_EN50221_POLL_CAM_READY;
else
state->status = 0;
}
}
/* CI irq handler */
int netup_ci_slot_status(struct cx23885_dev *dev, u32 pci_status)
{
struct cx23885_tsport *port = NULL;
struct netup_ci_state *state = NULL;
ci_dbg_print("%s:\n", __func__);
if (0 == (pci_status & (PCI_MSK_GPIO0 | PCI_MSK_GPIO1)))
return 0;
if (pci_status & PCI_MSK_GPIO0) {
port = &dev->ts1;
state = port->port_priv;
schedule_work(&state->work);
ci_dbg_print("%s: Wakeup CI0\n", __func__);
}
if (pci_status & PCI_MSK_GPIO1) {
port = &dev->ts2;
state = port->port_priv;
schedule_work(&state->work);
ci_dbg_print("%s: Wakeup CI1\n", __func__);
}
return 1;
}
int netup_poll_ci_slot_status(struct dvb_ca_en50221 *en50221, int slot, int open)
{
struct netup_ci_state *state = en50221->data;
if (0 != slot)
return -EINVAL;
netup_ci_set_irq(en50221, open ? (NETUP_IRQ_DETAM | ci_irq_flags())
: NETUP_IRQ_DETAM);
return state->status;
}
int netup_ci_init(struct cx23885_tsport *port)
{
struct netup_ci_state *state;
u8 cimax_init[34] = {
0x00, /* module A control*/
0x00, /* auto select mask high A */
0x00, /* auto select mask low A */
0x00, /* auto select pattern high A */
0x00, /* auto select pattern low A */
0x44, /* memory access time A */
0x00, /* invert input A */
0x00, /* RFU */
0x00, /* RFU */
0x00, /* module B control*/
0x00, /* auto select mask high B */
0x00, /* auto select mask low B */
0x00, /* auto select pattern high B */
0x00, /* auto select pattern low B */
0x44, /* memory access time B */
0x00, /* invert input B */
0x00, /* RFU */
0x00, /* RFU */
0x00, /* auto select mask high Ext */
0x00, /* auto select mask low Ext */
0x00, /* auto select pattern high Ext */
0x00, /* auto select pattern low Ext */
0x00, /* RFU */
0x02, /* destination - module A */
0x01, /* power on (use it like store place) */
0x00, /* RFU */
0x00, /* int status read only */
ci_irq_flags() | NETUP_IRQ_DETAM, /* DETAM, IRQAM unmasked */
0x05, /* EXTINT=active-high, INT=push-pull */
0x00, /* USCG1 */
0x04, /* ack active low */
0x00, /* LOCK = 0 */
0x33, /* serial mode, rising in, rising out, MSB first*/
0x31, /* synchronization */
};
int ret;
ci_dbg_print("%s\n", __func__);
state = kzalloc(sizeof(struct netup_ci_state), GFP_KERNEL);
if (!state) {
ci_dbg_print("%s: Unable create CI structure!\n", __func__);
ret = -ENOMEM;
goto err;
}
port->port_priv = state;
switch (port->nr) {
case 1:
state->ci_i2c_addr = 0x40;
break;
case 2:
state->ci_i2c_addr = 0x41;
break;
}
state->i2c_adap = &port->dev->i2c_bus[0].i2c_adap;
state->ca.owner = THIS_MODULE;
state->ca.read_attribute_mem = netup_ci_read_attribute_mem;
state->ca.write_attribute_mem = netup_ci_write_attribute_mem;
state->ca.read_cam_control = netup_ci_read_cam_ctl;
state->ca.write_cam_control = netup_ci_write_cam_ctl;
state->ca.slot_reset = netup_ci_slot_reset;
state->ca.slot_shutdown = netup_ci_slot_shutdown;
state->ca.slot_ts_enable = netup_ci_slot_ts_ctl;
state->ca.poll_slot_status = netup_poll_ci_slot_status;
state->ca.data = state;
state->priv = port;
state->current_irq_mode = ci_irq_flags() | NETUP_IRQ_DETAM;
ret = netup_write_i2c(state->i2c_adap, state->ci_i2c_addr,
0, &cimax_init[0], 34);
/* lock registers */
ret |= netup_write_i2c(state->i2c_adap, state->ci_i2c_addr,
0x1f, &cimax_init[0x18], 1);
/* power on slots */
ret |= netup_write_i2c(state->i2c_adap, state->ci_i2c_addr,
0x18, &cimax_init[0x18], 1);
if (0 != ret)
goto err;
ret = dvb_ca_en50221_init(&port->frontends.adapter,
&state->ca,
/* flags */ 0,
/* n_slots */ 1);
if (0 != ret)
goto err;
INIT_WORK(&state->work, netup_read_ci_status);
schedule_work(&state->work);
ci_dbg_print("%s: CI initialized!\n", __func__);
return 0;
err:
ci_dbg_print("%s: Cannot initialize CI: Error %d.\n", __func__, ret);
kfree(state);
return ret;
}
void netup_ci_exit(struct cx23885_tsport *port)
{
struct netup_ci_state *state;
if (NULL == port)
return;
state = (struct netup_ci_state *)port->port_priv;
if (NULL == state)
return;
if (NULL == state->ca.data)
return;
dvb_ca_en50221_release(&state->ca);
kfree(state);
}
| gpl-2.0 |
arasilinux/kernel-3.2.0 | fs/nls/nls_ascii.c | 12227 | 5874 | /*
* linux/fs/nls/nls_ascii.c
*
* Charset ascii translation tables.
* Generated automatically from the Unicode and charset
* tables from the Unicode Organization (www.unicode.org).
* The Unicode to charset table has only exact mappings.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/nls.h>
#include <linux/errno.h>
static const wchar_t charset2uni[256] = {
/* 0x00*/
0x0000, 0x0001, 0x0002, 0x0003,
0x0004, 0x0005, 0x0006, 0x0007,
0x0008, 0x0009, 0x000a, 0x000b,
0x000c, 0x000d, 0x000e, 0x000f,
/* 0x10*/
0x0010, 0x0011, 0x0012, 0x0013,
0x0014, 0x0015, 0x0016, 0x0017,
0x0018, 0x0019, 0x001a, 0x001b,
0x001c, 0x001d, 0x001e, 0x001f,
/* 0x20*/
0x0020, 0x0021, 0x0022, 0x0023,
0x0024, 0x0025, 0x0026, 0x0027,
0x0028, 0x0029, 0x002a, 0x002b,
0x002c, 0x002d, 0x002e, 0x002f,
/* 0x30*/
0x0030, 0x0031, 0x0032, 0x0033,
0x0034, 0x0035, 0x0036, 0x0037,
0x0038, 0x0039, 0x003a, 0x003b,
0x003c, 0x003d, 0x003e, 0x003f,
/* 0x40*/
0x0040, 0x0041, 0x0042, 0x0043,
0x0044, 0x0045, 0x0046, 0x0047,
0x0048, 0x0049, 0x004a, 0x004b,
0x004c, 0x004d, 0x004e, 0x004f,
/* 0x50*/
0x0050, 0x0051, 0x0052, 0x0053,
0x0054, 0x0055, 0x0056, 0x0057,
0x0058, 0x0059, 0x005a, 0x005b,
0x005c, 0x005d, 0x005e, 0x005f,
/* 0x60*/
0x0060, 0x0061, 0x0062, 0x0063,
0x0064, 0x0065, 0x0066, 0x0067,
0x0068, 0x0069, 0x006a, 0x006b,
0x006c, 0x006d, 0x006e, 0x006f,
/* 0x70*/
0x0070, 0x0071, 0x0072, 0x0073,
0x0074, 0x0075, 0x0076, 0x0077,
0x0078, 0x0079, 0x007a, 0x007b,
0x007c, 0x007d, 0x007e, 0x007f,
};
static const unsigned char page00[256] = {
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
};
static const unsigned char *const page_uni2charset[256] = {
page00,
};
static const unsigned char charset2lower[256] = {
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x40-0x47 */
0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x48-0x4f */
0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x50-0x57 */
0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
};
static const unsigned char charset2upper[256] = {
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x60-0x67 */
0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x68-0x6f */
0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x70-0x77 */
0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
};
static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
{
const unsigned char *uni2charset;
unsigned char cl = uni & 0x00ff;
unsigned char ch = (uni & 0xff00) >> 8;
if (boundlen <= 0)
return -ENAMETOOLONG;
uni2charset = page_uni2charset[ch];
if (uni2charset && uni2charset[cl])
out[0] = uni2charset[cl];
else
return -EINVAL;
return 1;
}
static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
{
*uni = charset2uni[*rawstring];
if (*uni == 0x0000)
return -EINVAL;
return 1;
}
static struct nls_table table = {
.charset = "ascii",
.uni2char = uni2char,
.char2uni = char2uni,
.charset2lower = charset2lower,
.charset2upper = charset2upper,
.owner = THIS_MODULE,
};
static int __init init_nls_ascii(void)
{
return register_nls(&table);
}
static void __exit exit_nls_ascii(void)
{
unregister_nls(&table);
}
module_init(init_nls_ascii)
module_exit(exit_nls_ascii)
MODULE_LICENSE("Dual BSD/GPL");
| gpl-2.0 |
ptmr3/SGH-T989_JB | sound/isa/gus/gus_mem_proc.c | 14019 | 3022 | /*
* Copyright (c) by Jaroslav Kysela <perex@perex.cz>
* GUS's memory access via proc filesystem
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/slab.h>
#include <sound/core.h>
#include <sound/gus.h>
#include <sound/info.h>
struct gus_proc_private {
int rom; /* data are in ROM */
unsigned int address;
unsigned int size;
struct snd_gus_card * gus;
};
static ssize_t snd_gf1_mem_proc_dump(struct snd_info_entry *entry,
void *file_private_data,
struct file *file, char __user *buf,
size_t count, loff_t pos)
{
struct gus_proc_private *priv = entry->private_data;
struct snd_gus_card *gus = priv->gus;
int err;
err = snd_gus_dram_read(gus, buf, pos, count, priv->rom);
if (err < 0)
return err;
return count;
}
static void snd_gf1_mem_proc_free(struct snd_info_entry *entry)
{
struct gus_proc_private *priv = entry->private_data;
kfree(priv);
}
static struct snd_info_entry_ops snd_gf1_mem_proc_ops = {
.read = snd_gf1_mem_proc_dump,
};
int snd_gf1_mem_proc_init(struct snd_gus_card * gus)
{
int idx;
char name[16];
struct gus_proc_private *priv;
struct snd_info_entry *entry;
for (idx = 0; idx < 4; idx++) {
if (gus->gf1.mem_alloc.banks_8[idx].size > 0) {
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (priv == NULL)
return -ENOMEM;
priv->gus = gus;
sprintf(name, "gus-ram-%i", idx);
if (! snd_card_proc_new(gus->card, name, &entry)) {
entry->content = SNDRV_INFO_CONTENT_DATA;
entry->private_data = priv;
entry->private_free = snd_gf1_mem_proc_free;
entry->c.ops = &snd_gf1_mem_proc_ops;
priv->address = gus->gf1.mem_alloc.banks_8[idx].address;
priv->size = entry->size = gus->gf1.mem_alloc.banks_8[idx].size;
}
}
}
for (idx = 0; idx < 4; idx++) {
if (gus->gf1.rom_present & (1 << idx)) {
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (priv == NULL)
return -ENOMEM;
priv->rom = 1;
priv->gus = gus;
sprintf(name, "gus-rom-%i", idx);
if (! snd_card_proc_new(gus->card, name, &entry)) {
entry->content = SNDRV_INFO_CONTENT_DATA;
entry->private_data = priv;
entry->private_free = snd_gf1_mem_proc_free;
entry->c.ops = &snd_gf1_mem_proc_ops;
priv->address = idx * 4096 * 1024;
priv->size = entry->size = gus->gf1.rom_memory;
}
}
}
return 0;
}
| gpl-2.0 |
VirtuOR/OpenTRILL | linux-3.10.7/drivers/infiniband/hw/ipath/ipath_stats.c | 14275 | 11237 | /*
* Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
* Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "ipath_kernel.h"
struct infinipath_stats ipath_stats;
/**
* ipath_snap_cntr - snapshot a chip counter
* @dd: the infinipath device
* @creg: the counter to snapshot
*
* called from add_timer and user counter read calls, to deal with
* counters that wrap in "human time". The words sent and received, and
* the packets sent and received are all that we worry about. For now,
* at least, we don't worry about error counters, because if they wrap
* that quickly, we probably don't care. We may eventually just make this
* handle all the counters. word counters can wrap in about 20 seconds
* of full bandwidth traffic, packet counters in a few hours.
*/
u64 ipath_snap_cntr(struct ipath_devdata *dd, ipath_creg creg)
{
u32 val, reg64 = 0;
u64 val64;
unsigned long t0, t1;
u64 ret;
t0 = jiffies;
/* If fast increment counters are only 32 bits, snapshot them,
* and maintain them as 64bit values in the driver */
if (!(dd->ipath_flags & IPATH_32BITCOUNTERS) &&
(creg == dd->ipath_cregs->cr_wordsendcnt ||
creg == dd->ipath_cregs->cr_wordrcvcnt ||
creg == dd->ipath_cregs->cr_pktsendcnt ||
creg == dd->ipath_cregs->cr_pktrcvcnt)) {
val64 = ipath_read_creg(dd, creg);
val = val64 == ~0ULL ? ~0U : 0;
reg64 = 1;
} else /* val64 just to keep gcc quiet... */
val64 = val = ipath_read_creg32(dd, creg);
/*
* See if a second has passed. This is just a way to detect things
* that are quite broken. Normally this should take just a few
* cycles (the check is for long enough that we don't care if we get
* pre-empted.) An Opteron HT O read timeout is 4 seconds with
* normal NB values
*/
t1 = jiffies;
if (time_before(t0 + HZ, t1) && val == -1) {
ipath_dev_err(dd, "Error! Read counter 0x%x timed out\n",
creg);
ret = 0ULL;
goto bail;
}
if (reg64) {
ret = val64;
goto bail;
}
if (creg == dd->ipath_cregs->cr_wordsendcnt) {
if (val != dd->ipath_lastsword) {
dd->ipath_sword += val - dd->ipath_lastsword;
dd->ipath_lastsword = val;
}
val64 = dd->ipath_sword;
} else if (creg == dd->ipath_cregs->cr_wordrcvcnt) {
if (val != dd->ipath_lastrword) {
dd->ipath_rword += val - dd->ipath_lastrword;
dd->ipath_lastrword = val;
}
val64 = dd->ipath_rword;
} else if (creg == dd->ipath_cregs->cr_pktsendcnt) {
if (val != dd->ipath_lastspkts) {
dd->ipath_spkts += val - dd->ipath_lastspkts;
dd->ipath_lastspkts = val;
}
val64 = dd->ipath_spkts;
} else if (creg == dd->ipath_cregs->cr_pktrcvcnt) {
if (val != dd->ipath_lastrpkts) {
dd->ipath_rpkts += val - dd->ipath_lastrpkts;
dd->ipath_lastrpkts = val;
}
val64 = dd->ipath_rpkts;
} else if (creg == dd->ipath_cregs->cr_ibsymbolerrcnt) {
if (dd->ibdeltainprog)
val64 -= val64 - dd->ibsymsnap;
val64 -= dd->ibsymdelta;
} else if (creg == dd->ipath_cregs->cr_iblinkerrrecovcnt) {
if (dd->ibdeltainprog)
val64 -= val64 - dd->iblnkerrsnap;
val64 -= dd->iblnkerrdelta;
} else
val64 = (u64) val;
ret = val64;
bail:
return ret;
}
/**
* ipath_qcheck - print delta of egrfull/hdrqfull errors for kernel ports
* @dd: the infinipath device
*
* print the delta of egrfull/hdrqfull errors for kernel ports no more than
* every 5 seconds. User processes are printed at close, but kernel doesn't
* close, so... Separate routine so may call from other places someday, and
* so function name when printed by _IPATH_INFO is meaningfull
*/
static void ipath_qcheck(struct ipath_devdata *dd)
{
static u64 last_tot_hdrqfull;
struct ipath_portdata *pd = dd->ipath_pd[0];
size_t blen = 0;
char buf[128];
u32 hdrqtail;
*buf = 0;
if (pd->port_hdrqfull != dd->ipath_p0_hdrqfull) {
blen = snprintf(buf, sizeof buf, "port 0 hdrqfull %u",
pd->port_hdrqfull -
dd->ipath_p0_hdrqfull);
dd->ipath_p0_hdrqfull = pd->port_hdrqfull;
}
if (ipath_stats.sps_etidfull != dd->ipath_last_tidfull) {
blen += snprintf(buf + blen, sizeof buf - blen,
"%srcvegrfull %llu",
blen ? ", " : "",
(unsigned long long)
(ipath_stats.sps_etidfull -
dd->ipath_last_tidfull));
dd->ipath_last_tidfull = ipath_stats.sps_etidfull;
}
/*
* this is actually the number of hdrq full interrupts, not actual
* events, but at the moment that's mostly what I'm interested in.
* Actual count, etc. is in the counters, if needed. For production
* users this won't ordinarily be printed.
*/
if ((ipath_debug & (__IPATH_PKTDBG | __IPATH_DBG)) &&
ipath_stats.sps_hdrqfull != last_tot_hdrqfull) {
blen += snprintf(buf + blen, sizeof buf - blen,
"%shdrqfull %llu (all ports)",
blen ? ", " : "",
(unsigned long long)
(ipath_stats.sps_hdrqfull -
last_tot_hdrqfull));
last_tot_hdrqfull = ipath_stats.sps_hdrqfull;
}
if (blen)
ipath_dbg("%s\n", buf);
hdrqtail = ipath_get_hdrqtail(pd);
if (pd->port_head != hdrqtail) {
if (dd->ipath_lastport0rcv_cnt ==
ipath_stats.sps_port0pkts) {
ipath_cdbg(PKT, "missing rcv interrupts? "
"port0 hd=%x tl=%x; port0pkts %llx; write"
" hd (w/intr)\n",
pd->port_head, hdrqtail,
(unsigned long long)
ipath_stats.sps_port0pkts);
ipath_write_ureg(dd, ur_rcvhdrhead, hdrqtail |
dd->ipath_rhdrhead_intr_off, pd->port_port);
}
dd->ipath_lastport0rcv_cnt = ipath_stats.sps_port0pkts;
}
}
static void ipath_chk_errormask(struct ipath_devdata *dd)
{
static u32 fixed;
u32 ctrl;
unsigned long errormask;
unsigned long hwerrs;
if (!dd->ipath_errormask || !(dd->ipath_flags & IPATH_INITTED))
return;
errormask = ipath_read_kreg64(dd, dd->ipath_kregs->kr_errormask);
if (errormask == dd->ipath_errormask)
return;
fixed++;
hwerrs = ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwerrstatus);
ctrl = ipath_read_kreg32(dd, dd->ipath_kregs->kr_control);
ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,
dd->ipath_errormask);
if ((hwerrs & dd->ipath_hwerrmask) ||
(ctrl & INFINIPATH_C_FREEZEMODE)) {
/* force re-interrupt of pending events, just in case */
ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear, 0ULL);
ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear, 0ULL);
ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, 0ULL);
dev_info(&dd->pcidev->dev,
"errormask fixed(%u) %lx -> %lx, ctrl %x hwerr %lx\n",
fixed, errormask, (unsigned long)dd->ipath_errormask,
ctrl, hwerrs);
} else
ipath_dbg("errormask fixed(%u) %lx -> %lx, no freeze\n",
fixed, errormask,
(unsigned long)dd->ipath_errormask);
}
/**
* ipath_get_faststats - get word counters from chip before they overflow
* @opaque - contains a pointer to the infinipath device ipath_devdata
*
* called from add_timer
*/
void ipath_get_faststats(unsigned long opaque)
{
struct ipath_devdata *dd = (struct ipath_devdata *) opaque;
int i;
static unsigned cnt;
unsigned long flags;
u64 traffic_wds;
/*
* don't access the chip while running diags, or memory diags can
* fail
*/
if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_INITTED) ||
ipath_diag_inuse)
/* but re-arm the timer, for diags case; won't hurt other */
goto done;
/*
* We now try to maintain a "active timer", based on traffic
* exceeding a threshold, so we need to check the word-counts
* even if they are 64-bit.
*/
traffic_wds = ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordsendcnt) +
ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordrcvcnt);
spin_lock_irqsave(&dd->ipath_eep_st_lock, flags);
traffic_wds -= dd->ipath_traffic_wds;
dd->ipath_traffic_wds += traffic_wds;
if (traffic_wds >= IPATH_TRAFFIC_ACTIVE_THRESHOLD)
atomic_add(5, &dd->ipath_active_time); /* S/B #define */
spin_unlock_irqrestore(&dd->ipath_eep_st_lock, flags);
if (dd->ipath_flags & IPATH_32BITCOUNTERS) {
ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktsendcnt);
ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktrcvcnt);
}
ipath_qcheck(dd);
/*
* deal with repeat error suppression. Doesn't really matter if
* last error was almost a full interval ago, or just a few usecs
* ago; still won't get more than 2 per interval. We may want
* longer intervals for this eventually, could do with mod, counter
* or separate timer. Also see code in ipath_handle_errors() and
* ipath_handle_hwerrors().
*/
if (dd->ipath_lasterror)
dd->ipath_lasterror = 0;
if (dd->ipath_lasthwerror)
dd->ipath_lasthwerror = 0;
if (dd->ipath_maskederrs
&& time_after(jiffies, dd->ipath_unmasktime)) {
char ebuf[256];
int iserr;
iserr = ipath_decode_err(dd, ebuf, sizeof ebuf,
dd->ipath_maskederrs);
if (dd->ipath_maskederrs &
~(INFINIPATH_E_RRCVEGRFULL | INFINIPATH_E_RRCVHDRFULL |
INFINIPATH_E_PKTERRS))
ipath_dev_err(dd, "Re-enabling masked errors "
"(%s)\n", ebuf);
else {
/*
* rcvegrfull and rcvhdrqfull are "normal", for some
* types of processes (mostly benchmarks) that send
* huge numbers of messages, while not processing
* them. So only complain about these at debug
* level.
*/
if (iserr)
ipath_dbg(
"Re-enabling queue full errors (%s)\n",
ebuf);
else
ipath_cdbg(ERRPKT, "Re-enabling packet"
" problem interrupt (%s)\n", ebuf);
}
/* re-enable masked errors */
dd->ipath_errormask |= dd->ipath_maskederrs;
ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,
dd->ipath_errormask);
dd->ipath_maskederrs = 0;
}
/* limit qfull messages to ~one per minute per port */
if ((++cnt & 0x10)) {
for (i = (int) dd->ipath_cfgports; --i >= 0; ) {
struct ipath_portdata *pd = dd->ipath_pd[i];
if (pd && pd->port_lastrcvhdrqtail != -1)
pd->port_lastrcvhdrqtail = -1;
}
}
ipath_chk_errormask(dd);
done:
mod_timer(&dd->ipath_stats_timer, jiffies + HZ * 5);
}
| gpl-2.0 |
bjzhang/linux_arm_xen | drivers/block/paride/fit3.c | 15555 | 4484 | /*
fit3.c (c) 1998 Grant R. Guenther <grant@torque.net>
Under the terms of the GNU General Public License.
fit3.c is a low-level protocol driver for newer models
of the Fidelity International Technology parallel port adapter.
This adapter is used in their TransDisk 3000 portable
hard-drives, as well as CD-ROM, PD-CD and other devices.
The TD-2000 and certain older devices use a different protocol.
Try the fit2 protocol module with them.
NB: The FIT adapters do not appear to support the control
registers. So, we map ALT_STATUS to STATUS and NO-OP writes
to the device control register - this means that IDE reset
will not work on these devices.
*/
#define FIT3_VERSION "1.0"
#include <linux/module.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/wait.h>
#include <asm/io.h>
#include "paride.h"
#define j44(a,b) (((a>>3)&0x0f)|((b<<1)&0xf0))
#define w7(byte) {out_p(7,byte);}
#define r7() (in_p(7) & 0xff)
/* cont = 0 - access the IDE register file
cont = 1 - access the IDE command set
*/
static void fit3_write_regr( PIA *pi, int cont, int regr, int val)
{ if (cont == 1) return;
switch (pi->mode) {
case 0:
case 1: w2(0xc); w0(regr); w2(0x8); w2(0xc);
w0(val); w2(0xd);
w0(0); w2(0xc);
break;
case 2: w2(0xc); w0(regr); w2(0x8); w2(0xc);
w4(val); w4(0);
w2(0xc);
break;
}
}
static int fit3_read_regr( PIA *pi, int cont, int regr )
{ int a, b;
if (cont) {
if (regr != 6) return 0xff;
regr = 7;
}
switch (pi->mode) {
case 0: w2(0xc); w0(regr + 0x10); w2(0x8); w2(0xc);
w2(0xd); a = r1();
w2(0xf); b = r1();
w2(0xc);
return j44(a,b);
case 1: w2(0xc); w0(regr + 0x90); w2(0x8); w2(0xc);
w2(0xec); w2(0xee); w2(0xef); a = r0();
w2(0xc);
return a;
case 2: w2(0xc); w0(regr + 0x90); w2(0x8); w2(0xc);
w2(0xec);
a = r4(); b = r4();
w2(0xc);
return a;
}
return -1;
}
static void fit3_read_block( PIA *pi, char * buf, int count )
{ int k, a, b, c, d;
switch (pi->mode) {
case 0: w2(0xc); w0(0x10); w2(0x8); w2(0xc);
for (k=0;k<count/2;k++) {
w2(0xd); a = r1();
w2(0xf); b = r1();
w2(0xc); c = r1();
w2(0xe); d = r1();
buf[2*k ] = j44(a,b);
buf[2*k+1] = j44(c,d);
}
w2(0xc);
break;
case 1: w2(0xc); w0(0x90); w2(0x8); w2(0xc);
w2(0xec); w2(0xee);
for (k=0;k<count/2;k++) {
w2(0xef); a = r0();
w2(0xee); b = r0();
buf[2*k ] = a;
buf[2*k+1] = b;
}
w2(0xec);
w2(0xc);
break;
case 2: w2(0xc); w0(0x90); w2(0x8); w2(0xc);
w2(0xec);
for (k=0;k<count;k++) buf[k] = r4();
w2(0xc);
break;
}
}
static void fit3_write_block( PIA *pi, char * buf, int count )
{ int k;
switch (pi->mode) {
case 0:
case 1: w2(0xc); w0(0); w2(0x8); w2(0xc);
for (k=0;k<count/2;k++) {
w0(buf[2*k ]); w2(0xd);
w0(buf[2*k+1]); w2(0xc);
}
break;
case 2: w2(0xc); w0(0); w2(0x8); w2(0xc);
for (k=0;k<count;k++) w4(buf[k]);
w2(0xc);
break;
}
}
static void fit3_connect ( PIA *pi )
{ pi->saved_r0 = r0();
pi->saved_r2 = r2();
w2(0xc); w0(0); w2(0xa);
if (pi->mode == 2) {
w2(0xc); w0(0x9); w2(0x8); w2(0xc);
}
}
static void fit3_disconnect ( PIA *pi )
{ w2(0xc); w0(0xa); w2(0x8); w2(0xc);
w0(pi->saved_r0);
w2(pi->saved_r2);
}
static void fit3_log_adapter( PIA *pi, char * scratch, int verbose )
{ char *mode_string[3] = {"4-bit","8-bit","EPP"};
printk("%s: fit3 %s, FIT 3000 adapter at 0x%x, "
"mode %d (%s), delay %d\n",
pi->device,FIT3_VERSION,pi->port,
pi->mode,mode_string[pi->mode],pi->delay);
}
static struct pi_protocol fit3 = {
.owner = THIS_MODULE,
.name = "fit3",
.max_mode = 3,
.epp_first = 2,
.default_delay = 1,
.max_units = 1,
.write_regr = fit3_write_regr,
.read_regr = fit3_read_regr,
.write_block = fit3_write_block,
.read_block = fit3_read_block,
.connect = fit3_connect,
.disconnect = fit3_disconnect,
.log_adapter = fit3_log_adapter,
};
static int __init fit3_init(void)
{
return paride_register(&fit3);
}
static void __exit fit3_exit(void)
{
paride_unregister(&fit3);
}
MODULE_LICENSE("GPL");
module_init(fit3_init)
module_exit(fit3_exit)
| gpl-2.0 |
YogeshNain/linux | sound/soc/codecs/tlv320aic32x4.c | 196 | 26505 | /*
* linux/sound/soc/codecs/tlv320aic32x4.c
*
* Copyright 2011 Vista Silicon S.L.
*
* Author: Javier Martin <javier.martin@vista-silicon.com>
*
* Based on sound/soc/codecs/wm8974 and TI driver for kernel 2.6.27.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
* MA 02110-1301, USA.
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/pm.h>
#include <linux/gpio.h>
#include <linux/of_gpio.h>
#include <linux/i2c.h>
#include <linux/cdev.h>
#include <linux/slab.h>
#include <linux/clk.h>
#include <linux/regulator/consumer.h>
#include <sound/tlv320aic32x4.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
#include <sound/soc-dapm.h>
#include <sound/initval.h>
#include <sound/tlv.h>
#include "tlv320aic32x4.h"
struct aic32x4_rate_divs {
u32 mclk;
u32 rate;
u8 p_val;
u8 pll_j;
u16 pll_d;
u16 dosr;
u8 ndac;
u8 mdac;
u8 aosr;
u8 nadc;
u8 madc;
u8 blck_N;
};
struct aic32x4_priv {
struct regmap *regmap;
u32 sysclk;
u32 power_cfg;
u32 micpga_routing;
bool swapdacs;
int rstn_gpio;
struct clk *mclk;
struct regulator *supply_ldo;
struct regulator *supply_iov;
struct regulator *supply_dv;
struct regulator *supply_av;
};
/* 0dB min, 0.5dB steps */
static DECLARE_TLV_DB_SCALE(tlv_step_0_5, 0, 50, 0);
/* -63.5dB min, 0.5dB steps */
static DECLARE_TLV_DB_SCALE(tlv_pcm, -6350, 50, 0);
/* -6dB min, 1dB steps */
static DECLARE_TLV_DB_SCALE(tlv_driver_gain, -600, 100, 0);
/* -12dB min, 0.5dB steps */
static DECLARE_TLV_DB_SCALE(tlv_adc_vol, -1200, 50, 0);
static const struct snd_kcontrol_new aic32x4_snd_controls[] = {
SOC_DOUBLE_R_S_TLV("PCM Playback Volume", AIC32X4_LDACVOL,
AIC32X4_RDACVOL, 0, -0x7f, 0x30, 7, 0, tlv_pcm),
SOC_DOUBLE_R_S_TLV("HP Driver Gain Volume", AIC32X4_HPLGAIN,
AIC32X4_HPRGAIN, 0, -0x6, 0x1d, 5, 0,
tlv_driver_gain),
SOC_DOUBLE_R_S_TLV("LO Driver Gain Volume", AIC32X4_LOLGAIN,
AIC32X4_LORGAIN, 0, -0x6, 0x1d, 5, 0,
tlv_driver_gain),
SOC_DOUBLE_R("HP DAC Playback Switch", AIC32X4_HPLGAIN,
AIC32X4_HPRGAIN, 6, 0x01, 1),
SOC_DOUBLE_R("LO DAC Playback Switch", AIC32X4_LOLGAIN,
AIC32X4_LORGAIN, 6, 0x01, 1),
SOC_DOUBLE_R("Mic PGA Switch", AIC32X4_LMICPGAVOL,
AIC32X4_RMICPGAVOL, 7, 0x01, 1),
SOC_SINGLE("ADCFGA Left Mute Switch", AIC32X4_ADCFGA, 7, 1, 0),
SOC_SINGLE("ADCFGA Right Mute Switch", AIC32X4_ADCFGA, 3, 1, 0),
SOC_DOUBLE_R_S_TLV("ADC Level Volume", AIC32X4_LADCVOL,
AIC32X4_RADCVOL, 0, -0x18, 0x28, 6, 0, tlv_adc_vol),
SOC_DOUBLE_R_TLV("PGA Level Volume", AIC32X4_LMICPGAVOL,
AIC32X4_RMICPGAVOL, 0, 0x5f, 0, tlv_step_0_5),
SOC_SINGLE("Auto-mute Switch", AIC32X4_DACMUTE, 4, 7, 0),
SOC_SINGLE("AGC Left Switch", AIC32X4_LAGC1, 7, 1, 0),
SOC_SINGLE("AGC Right Switch", AIC32X4_RAGC1, 7, 1, 0),
SOC_DOUBLE_R("AGC Target Level", AIC32X4_LAGC1, AIC32X4_RAGC1,
4, 0x07, 0),
SOC_DOUBLE_R("AGC Gain Hysteresis", AIC32X4_LAGC1, AIC32X4_RAGC1,
0, 0x03, 0),
SOC_DOUBLE_R("AGC Hysteresis", AIC32X4_LAGC2, AIC32X4_RAGC2,
6, 0x03, 0),
SOC_DOUBLE_R("AGC Noise Threshold", AIC32X4_LAGC2, AIC32X4_RAGC2,
1, 0x1F, 0),
SOC_DOUBLE_R("AGC Max PGA", AIC32X4_LAGC3, AIC32X4_RAGC3,
0, 0x7F, 0),
SOC_DOUBLE_R("AGC Attack Time", AIC32X4_LAGC4, AIC32X4_RAGC4,
3, 0x1F, 0),
SOC_DOUBLE_R("AGC Decay Time", AIC32X4_LAGC5, AIC32X4_RAGC5,
3, 0x1F, 0),
SOC_DOUBLE_R("AGC Noise Debounce", AIC32X4_LAGC6, AIC32X4_RAGC6,
0, 0x1F, 0),
SOC_DOUBLE_R("AGC Signal Debounce", AIC32X4_LAGC7, AIC32X4_RAGC7,
0, 0x0F, 0),
};
static const struct aic32x4_rate_divs aic32x4_divs[] = {
/* 8k rate */
{AIC32X4_FREQ_12000000, 8000, 1, 7, 6800, 768, 5, 3, 128, 5, 18, 24},
{AIC32X4_FREQ_24000000, 8000, 2, 7, 6800, 768, 15, 1, 64, 45, 4, 24},
{AIC32X4_FREQ_25000000, 8000, 2, 7, 3728, 768, 15, 1, 64, 45, 4, 24},
/* 11.025k rate */
{AIC32X4_FREQ_12000000, 11025, 1, 7, 5264, 512, 8, 2, 128, 8, 8, 16},
{AIC32X4_FREQ_24000000, 11025, 2, 7, 5264, 512, 16, 1, 64, 32, 4, 16},
/* 16k rate */
{AIC32X4_FREQ_12000000, 16000, 1, 7, 6800, 384, 5, 3, 128, 5, 9, 12},
{AIC32X4_FREQ_24000000, 16000, 2, 7, 6800, 384, 15, 1, 64, 18, 5, 12},
{AIC32X4_FREQ_25000000, 16000, 2, 7, 3728, 384, 15, 1, 64, 18, 5, 12},
/* 22.05k rate */
{AIC32X4_FREQ_12000000, 22050, 1, 7, 5264, 256, 4, 4, 128, 4, 8, 8},
{AIC32X4_FREQ_24000000, 22050, 2, 7, 5264, 256, 16, 1, 64, 16, 4, 8},
{AIC32X4_FREQ_25000000, 22050, 2, 7, 2253, 256, 16, 1, 64, 16, 4, 8},
/* 32k rate */
{AIC32X4_FREQ_12000000, 32000, 1, 7, 1680, 192, 2, 7, 64, 2, 21, 6},
{AIC32X4_FREQ_24000000, 32000, 2, 7, 1680, 192, 7, 2, 64, 7, 6, 6},
/* 44.1k rate */
{AIC32X4_FREQ_12000000, 44100, 1, 7, 5264, 128, 2, 8, 128, 2, 8, 4},
{AIC32X4_FREQ_24000000, 44100, 2, 7, 5264, 128, 8, 2, 64, 8, 4, 4},
{AIC32X4_FREQ_25000000, 44100, 2, 7, 2253, 128, 8, 2, 64, 8, 4, 4},
/* 48k rate */
{AIC32X4_FREQ_12000000, 48000, 1, 8, 1920, 128, 2, 8, 128, 2, 8, 4},
{AIC32X4_FREQ_24000000, 48000, 2, 8, 1920, 128, 8, 2, 64, 8, 4, 4},
{AIC32X4_FREQ_25000000, 48000, 2, 7, 8643, 128, 8, 2, 64, 8, 4, 4}
};
static const struct snd_kcontrol_new hpl_output_mixer_controls[] = {
SOC_DAPM_SINGLE("L_DAC Switch", AIC32X4_HPLROUTE, 3, 1, 0),
SOC_DAPM_SINGLE("IN1_L Switch", AIC32X4_HPLROUTE, 2, 1, 0),
};
static const struct snd_kcontrol_new hpr_output_mixer_controls[] = {
SOC_DAPM_SINGLE("R_DAC Switch", AIC32X4_HPRROUTE, 3, 1, 0),
SOC_DAPM_SINGLE("IN1_R Switch", AIC32X4_HPRROUTE, 2, 1, 0),
};
static const struct snd_kcontrol_new lol_output_mixer_controls[] = {
SOC_DAPM_SINGLE("L_DAC Switch", AIC32X4_LOLROUTE, 3, 1, 0),
};
static const struct snd_kcontrol_new lor_output_mixer_controls[] = {
SOC_DAPM_SINGLE("R_DAC Switch", AIC32X4_LORROUTE, 3, 1, 0),
};
static const struct snd_kcontrol_new left_input_mixer_controls[] = {
SOC_DAPM_SINGLE("IN1_L P Switch", AIC32X4_LMICPGAPIN, 6, 1, 0),
SOC_DAPM_SINGLE("IN2_L P Switch", AIC32X4_LMICPGAPIN, 4, 1, 0),
SOC_DAPM_SINGLE("IN3_L P Switch", AIC32X4_LMICPGAPIN, 2, 1, 0),
};
static const struct snd_kcontrol_new right_input_mixer_controls[] = {
SOC_DAPM_SINGLE("IN1_R P Switch", AIC32X4_RMICPGAPIN, 6, 1, 0),
SOC_DAPM_SINGLE("IN2_R P Switch", AIC32X4_RMICPGAPIN, 4, 1, 0),
SOC_DAPM_SINGLE("IN3_R P Switch", AIC32X4_RMICPGAPIN, 2, 1, 0),
};
static const struct snd_soc_dapm_widget aic32x4_dapm_widgets[] = {
SND_SOC_DAPM_DAC("Left DAC", "Left Playback", AIC32X4_DACSETUP, 7, 0),
SND_SOC_DAPM_MIXER("HPL Output Mixer", SND_SOC_NOPM, 0, 0,
&hpl_output_mixer_controls[0],
ARRAY_SIZE(hpl_output_mixer_controls)),
SND_SOC_DAPM_PGA("HPL Power", AIC32X4_OUTPWRCTL, 5, 0, NULL, 0),
SND_SOC_DAPM_MIXER("LOL Output Mixer", SND_SOC_NOPM, 0, 0,
&lol_output_mixer_controls[0],
ARRAY_SIZE(lol_output_mixer_controls)),
SND_SOC_DAPM_PGA("LOL Power", AIC32X4_OUTPWRCTL, 3, 0, NULL, 0),
SND_SOC_DAPM_DAC("Right DAC", "Right Playback", AIC32X4_DACSETUP, 6, 0),
SND_SOC_DAPM_MIXER("HPR Output Mixer", SND_SOC_NOPM, 0, 0,
&hpr_output_mixer_controls[0],
ARRAY_SIZE(hpr_output_mixer_controls)),
SND_SOC_DAPM_PGA("HPR Power", AIC32X4_OUTPWRCTL, 4, 0, NULL, 0),
SND_SOC_DAPM_MIXER("LOR Output Mixer", SND_SOC_NOPM, 0, 0,
&lor_output_mixer_controls[0],
ARRAY_SIZE(lor_output_mixer_controls)),
SND_SOC_DAPM_PGA("LOR Power", AIC32X4_OUTPWRCTL, 2, 0, NULL, 0),
SND_SOC_DAPM_MIXER("Left Input Mixer", SND_SOC_NOPM, 0, 0,
&left_input_mixer_controls[0],
ARRAY_SIZE(left_input_mixer_controls)),
SND_SOC_DAPM_MIXER("Right Input Mixer", SND_SOC_NOPM, 0, 0,
&right_input_mixer_controls[0],
ARRAY_SIZE(right_input_mixer_controls)),
SND_SOC_DAPM_ADC("Left ADC", "Left Capture", AIC32X4_ADCSETUP, 7, 0),
SND_SOC_DAPM_ADC("Right ADC", "Right Capture", AIC32X4_ADCSETUP, 6, 0),
SND_SOC_DAPM_MICBIAS("Mic Bias", AIC32X4_MICBIAS, 6, 0),
SND_SOC_DAPM_OUTPUT("HPL"),
SND_SOC_DAPM_OUTPUT("HPR"),
SND_SOC_DAPM_OUTPUT("LOL"),
SND_SOC_DAPM_OUTPUT("LOR"),
SND_SOC_DAPM_INPUT("IN1_L"),
SND_SOC_DAPM_INPUT("IN1_R"),
SND_SOC_DAPM_INPUT("IN2_L"),
SND_SOC_DAPM_INPUT("IN2_R"),
SND_SOC_DAPM_INPUT("IN3_L"),
SND_SOC_DAPM_INPUT("IN3_R"),
};
static const struct snd_soc_dapm_route aic32x4_dapm_routes[] = {
/* Left Output */
{"HPL Output Mixer", "L_DAC Switch", "Left DAC"},
{"HPL Output Mixer", "IN1_L Switch", "IN1_L"},
{"HPL Power", NULL, "HPL Output Mixer"},
{"HPL", NULL, "HPL Power"},
{"LOL Output Mixer", "L_DAC Switch", "Left DAC"},
{"LOL Power", NULL, "LOL Output Mixer"},
{"LOL", NULL, "LOL Power"},
/* Right Output */
{"HPR Output Mixer", "R_DAC Switch", "Right DAC"},
{"HPR Output Mixer", "IN1_R Switch", "IN1_R"},
{"HPR Power", NULL, "HPR Output Mixer"},
{"HPR", NULL, "HPR Power"},
{"LOR Output Mixer", "R_DAC Switch", "Right DAC"},
{"LOR Power", NULL, "LOR Output Mixer"},
{"LOR", NULL, "LOR Power"},
/* Left input */
{"Left Input Mixer", "IN1_L P Switch", "IN1_L"},
{"Left Input Mixer", "IN2_L P Switch", "IN2_L"},
{"Left Input Mixer", "IN3_L P Switch", "IN3_L"},
{"Left ADC", NULL, "Left Input Mixer"},
/* Right Input */
{"Right Input Mixer", "IN1_R P Switch", "IN1_R"},
{"Right Input Mixer", "IN2_R P Switch", "IN2_R"},
{"Right Input Mixer", "IN3_R P Switch", "IN3_R"},
{"Right ADC", NULL, "Right Input Mixer"},
};
static const struct regmap_range_cfg aic32x4_regmap_pages[] = {
{
.selector_reg = 0,
.selector_mask = 0xff,
.window_start = 0,
.window_len = 128,
.range_min = 0,
.range_max = AIC32X4_RMICPGAVOL,
},
};
static const struct regmap_config aic32x4_regmap = {
.reg_bits = 8,
.val_bits = 8,
.max_register = AIC32X4_RMICPGAVOL,
.ranges = aic32x4_regmap_pages,
.num_ranges = ARRAY_SIZE(aic32x4_regmap_pages),
};
static inline int aic32x4_get_divs(int mclk, int rate)
{
int i;
for (i = 0; i < ARRAY_SIZE(aic32x4_divs); i++) {
if ((aic32x4_divs[i].rate == rate)
&& (aic32x4_divs[i].mclk == mclk)) {
return i;
}
}
printk(KERN_ERR "aic32x4: master clock and sample rate is not supported\n");
return -EINVAL;
}
static int aic32x4_set_dai_sysclk(struct snd_soc_dai *codec_dai,
int clk_id, unsigned int freq, int dir)
{
struct snd_soc_codec *codec = codec_dai->codec;
struct aic32x4_priv *aic32x4 = snd_soc_codec_get_drvdata(codec);
switch (freq) {
case AIC32X4_FREQ_12000000:
case AIC32X4_FREQ_24000000:
case AIC32X4_FREQ_25000000:
aic32x4->sysclk = freq;
return 0;
}
printk(KERN_ERR "aic32x4: invalid frequency to set DAI system clock\n");
return -EINVAL;
}
static int aic32x4_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
{
struct snd_soc_codec *codec = codec_dai->codec;
u8 iface_reg_1;
u8 iface_reg_2;
u8 iface_reg_3;
iface_reg_1 = snd_soc_read(codec, AIC32X4_IFACE1);
iface_reg_1 = iface_reg_1 & ~(3 << 6 | 3 << 2);
iface_reg_2 = snd_soc_read(codec, AIC32X4_IFACE2);
iface_reg_2 = 0;
iface_reg_3 = snd_soc_read(codec, AIC32X4_IFACE3);
iface_reg_3 = iface_reg_3 & ~(1 << 3);
/* set master/slave audio interface */
switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
case SND_SOC_DAIFMT_CBM_CFM:
iface_reg_1 |= AIC32X4_BCLKMASTER | AIC32X4_WCLKMASTER;
break;
case SND_SOC_DAIFMT_CBS_CFS:
break;
default:
printk(KERN_ERR "aic32x4: invalid DAI master/slave interface\n");
return -EINVAL;
}
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_I2S:
break;
case SND_SOC_DAIFMT_DSP_A:
iface_reg_1 |= (AIC32X4_DSP_MODE << AIC32X4_PLLJ_SHIFT);
iface_reg_3 |= (1 << 3); /* invert bit clock */
iface_reg_2 = 0x01; /* add offset 1 */
break;
case SND_SOC_DAIFMT_DSP_B:
iface_reg_1 |= (AIC32X4_DSP_MODE << AIC32X4_PLLJ_SHIFT);
iface_reg_3 |= (1 << 3); /* invert bit clock */
break;
case SND_SOC_DAIFMT_RIGHT_J:
iface_reg_1 |=
(AIC32X4_RIGHT_JUSTIFIED_MODE << AIC32X4_PLLJ_SHIFT);
break;
case SND_SOC_DAIFMT_LEFT_J:
iface_reg_1 |=
(AIC32X4_LEFT_JUSTIFIED_MODE << AIC32X4_PLLJ_SHIFT);
break;
default:
printk(KERN_ERR "aic32x4: invalid DAI interface format\n");
return -EINVAL;
}
snd_soc_write(codec, AIC32X4_IFACE1, iface_reg_1);
snd_soc_write(codec, AIC32X4_IFACE2, iface_reg_2);
snd_soc_write(codec, AIC32X4_IFACE3, iface_reg_3);
return 0;
}
static int aic32x4_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
{
struct snd_soc_codec *codec = dai->codec;
struct aic32x4_priv *aic32x4 = snd_soc_codec_get_drvdata(codec);
u8 data;
int i;
i = aic32x4_get_divs(aic32x4->sysclk, params_rate(params));
if (i < 0) {
printk(KERN_ERR "aic32x4: sampling rate not supported\n");
return i;
}
/* Use PLL as CODEC_CLKIN and DAC_MOD_CLK as BDIV_CLKIN */
snd_soc_write(codec, AIC32X4_CLKMUX, AIC32X4_PLLCLKIN);
snd_soc_write(codec, AIC32X4_IFACE3, AIC32X4_DACMOD2BCLK);
/* We will fix R value to 1 and will make P & J=K.D as varialble */
data = snd_soc_read(codec, AIC32X4_PLLPR);
data &= ~(7 << 4);
snd_soc_write(codec, AIC32X4_PLLPR,
(data | (aic32x4_divs[i].p_val << 4) | 0x01));
snd_soc_write(codec, AIC32X4_PLLJ, aic32x4_divs[i].pll_j);
snd_soc_write(codec, AIC32X4_PLLDMSB, (aic32x4_divs[i].pll_d >> 8));
snd_soc_write(codec, AIC32X4_PLLDLSB,
(aic32x4_divs[i].pll_d & 0xff));
/* NDAC divider value */
data = snd_soc_read(codec, AIC32X4_NDAC);
data &= ~(0x7f);
snd_soc_write(codec, AIC32X4_NDAC, data | aic32x4_divs[i].ndac);
/* MDAC divider value */
data = snd_soc_read(codec, AIC32X4_MDAC);
data &= ~(0x7f);
snd_soc_write(codec, AIC32X4_MDAC, data | aic32x4_divs[i].mdac);
/* DOSR MSB & LSB values */
snd_soc_write(codec, AIC32X4_DOSRMSB, aic32x4_divs[i].dosr >> 8);
snd_soc_write(codec, AIC32X4_DOSRLSB,
(aic32x4_divs[i].dosr & 0xff));
/* NADC divider value */
data = snd_soc_read(codec, AIC32X4_NADC);
data &= ~(0x7f);
snd_soc_write(codec, AIC32X4_NADC, data | aic32x4_divs[i].nadc);
/* MADC divider value */
data = snd_soc_read(codec, AIC32X4_MADC);
data &= ~(0x7f);
snd_soc_write(codec, AIC32X4_MADC, data | aic32x4_divs[i].madc);
/* AOSR value */
snd_soc_write(codec, AIC32X4_AOSR, aic32x4_divs[i].aosr);
/* BCLK N divider */
data = snd_soc_read(codec, AIC32X4_BCLKN);
data &= ~(0x7f);
snd_soc_write(codec, AIC32X4_BCLKN, data | aic32x4_divs[i].blck_N);
data = snd_soc_read(codec, AIC32X4_IFACE1);
data = data & ~(3 << 4);
switch (params_width(params)) {
case 16:
break;
case 20:
data |= (AIC32X4_WORD_LEN_20BITS << AIC32X4_DOSRMSB_SHIFT);
break;
case 24:
data |= (AIC32X4_WORD_LEN_24BITS << AIC32X4_DOSRMSB_SHIFT);
break;
case 32:
data |= (AIC32X4_WORD_LEN_32BITS << AIC32X4_DOSRMSB_SHIFT);
break;
}
snd_soc_write(codec, AIC32X4_IFACE1, data);
if (params_channels(params) == 1) {
data = AIC32X4_RDAC2LCHN | AIC32X4_LDAC2LCHN;
} else {
if (aic32x4->swapdacs)
data = AIC32X4_RDAC2LCHN | AIC32X4_LDAC2RCHN;
else
data = AIC32X4_LDAC2LCHN | AIC32X4_RDAC2RCHN;
}
snd_soc_update_bits(codec, AIC32X4_DACSETUP, AIC32X4_DAC_CHAN_MASK,
data);
return 0;
}
static int aic32x4_mute(struct snd_soc_dai *dai, int mute)
{
struct snd_soc_codec *codec = dai->codec;
u8 dac_reg;
dac_reg = snd_soc_read(codec, AIC32X4_DACMUTE) & ~AIC32X4_MUTEON;
if (mute)
snd_soc_write(codec, AIC32X4_DACMUTE, dac_reg | AIC32X4_MUTEON);
else
snd_soc_write(codec, AIC32X4_DACMUTE, dac_reg);
return 0;
}
static int aic32x4_set_bias_level(struct snd_soc_codec *codec,
enum snd_soc_bias_level level)
{
struct aic32x4_priv *aic32x4 = snd_soc_codec_get_drvdata(codec);
int ret;
switch (level) {
case SND_SOC_BIAS_ON:
/* Switch on master clock */
ret = clk_prepare_enable(aic32x4->mclk);
if (ret) {
dev_err(codec->dev, "Failed to enable master clock\n");
return ret;
}
/* Switch on PLL */
snd_soc_update_bits(codec, AIC32X4_PLLPR,
AIC32X4_PLLEN, AIC32X4_PLLEN);
/* Switch on NDAC Divider */
snd_soc_update_bits(codec, AIC32X4_NDAC,
AIC32X4_NDACEN, AIC32X4_NDACEN);
/* Switch on MDAC Divider */
snd_soc_update_bits(codec, AIC32X4_MDAC,
AIC32X4_MDACEN, AIC32X4_MDACEN);
/* Switch on NADC Divider */
snd_soc_update_bits(codec, AIC32X4_NADC,
AIC32X4_NADCEN, AIC32X4_NADCEN);
/* Switch on MADC Divider */
snd_soc_update_bits(codec, AIC32X4_MADC,
AIC32X4_MADCEN, AIC32X4_MADCEN);
/* Switch on BCLK_N Divider */
snd_soc_update_bits(codec, AIC32X4_BCLKN,
AIC32X4_BCLKEN, AIC32X4_BCLKEN);
break;
case SND_SOC_BIAS_PREPARE:
break;
case SND_SOC_BIAS_STANDBY:
/* Switch off BCLK_N Divider */
snd_soc_update_bits(codec, AIC32X4_BCLKN,
AIC32X4_BCLKEN, 0);
/* Switch off MADC Divider */
snd_soc_update_bits(codec, AIC32X4_MADC,
AIC32X4_MADCEN, 0);
/* Switch off NADC Divider */
snd_soc_update_bits(codec, AIC32X4_NADC,
AIC32X4_NADCEN, 0);
/* Switch off MDAC Divider */
snd_soc_update_bits(codec, AIC32X4_MDAC,
AIC32X4_MDACEN, 0);
/* Switch off NDAC Divider */
snd_soc_update_bits(codec, AIC32X4_NDAC,
AIC32X4_NDACEN, 0);
/* Switch off PLL */
snd_soc_update_bits(codec, AIC32X4_PLLPR,
AIC32X4_PLLEN, 0);
/* Switch off master clock */
clk_disable_unprepare(aic32x4->mclk);
break;
case SND_SOC_BIAS_OFF:
break;
}
return 0;
}
#define AIC32X4_RATES SNDRV_PCM_RATE_8000_48000
#define AIC32X4_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE \
| SNDRV_PCM_FMTBIT_S24_3LE | SNDRV_PCM_FMTBIT_S32_LE)
static const struct snd_soc_dai_ops aic32x4_ops = {
.hw_params = aic32x4_hw_params,
.digital_mute = aic32x4_mute,
.set_fmt = aic32x4_set_dai_fmt,
.set_sysclk = aic32x4_set_dai_sysclk,
};
static struct snd_soc_dai_driver aic32x4_dai = {
.name = "tlv320aic32x4-hifi",
.playback = {
.stream_name = "Playback",
.channels_min = 1,
.channels_max = 2,
.rates = AIC32X4_RATES,
.formats = AIC32X4_FORMATS,},
.capture = {
.stream_name = "Capture",
.channels_min = 1,
.channels_max = 2,
.rates = AIC32X4_RATES,
.formats = AIC32X4_FORMATS,},
.ops = &aic32x4_ops,
.symmetric_rates = 1,
};
static int aic32x4_probe(struct snd_soc_codec *codec)
{
struct aic32x4_priv *aic32x4 = snd_soc_codec_get_drvdata(codec);
u32 tmp_reg;
if (gpio_is_valid(aic32x4->rstn_gpio)) {
ndelay(10);
gpio_set_value(aic32x4->rstn_gpio, 1);
}
snd_soc_write(codec, AIC32X4_RESET, 0x01);
/* Power platform configuration */
if (aic32x4->power_cfg & AIC32X4_PWR_MICBIAS_2075_LDOIN) {
snd_soc_write(codec, AIC32X4_MICBIAS, AIC32X4_MICBIAS_LDOIN |
AIC32X4_MICBIAS_2075V);
}
if (aic32x4->power_cfg & AIC32X4_PWR_AVDD_DVDD_WEAK_DISABLE)
snd_soc_write(codec, AIC32X4_PWRCFG, AIC32X4_AVDDWEAKDISABLE);
tmp_reg = (aic32x4->power_cfg & AIC32X4_PWR_AIC32X4_LDO_ENABLE) ?
AIC32X4_LDOCTLEN : 0;
snd_soc_write(codec, AIC32X4_LDOCTL, tmp_reg);
tmp_reg = snd_soc_read(codec, AIC32X4_CMMODE);
if (aic32x4->power_cfg & AIC32X4_PWR_CMMODE_LDOIN_RANGE_18_36)
tmp_reg |= AIC32X4_LDOIN_18_36;
if (aic32x4->power_cfg & AIC32X4_PWR_CMMODE_HP_LDOIN_POWERED)
tmp_reg |= AIC32X4_LDOIN2HP;
snd_soc_write(codec, AIC32X4_CMMODE, tmp_reg);
/* Mic PGA routing */
if (aic32x4->micpga_routing & AIC32X4_MICPGA_ROUTE_LMIC_IN2R_10K)
snd_soc_write(codec, AIC32X4_LMICPGANIN,
AIC32X4_LMICPGANIN_IN2R_10K);
else
snd_soc_write(codec, AIC32X4_LMICPGANIN,
AIC32X4_LMICPGANIN_CM1L_10K);
if (aic32x4->micpga_routing & AIC32X4_MICPGA_ROUTE_RMIC_IN1L_10K)
snd_soc_write(codec, AIC32X4_RMICPGANIN,
AIC32X4_RMICPGANIN_IN1L_10K);
else
snd_soc_write(codec, AIC32X4_RMICPGANIN,
AIC32X4_RMICPGANIN_CM1R_10K);
/*
* Workaround: for an unknown reason, the ADC needs to be powered up
* and down for the first capture to work properly. It seems related to
* a HW BUG or some kind of behavior not documented in the datasheet.
*/
tmp_reg = snd_soc_read(codec, AIC32X4_ADCSETUP);
snd_soc_write(codec, AIC32X4_ADCSETUP, tmp_reg |
AIC32X4_LADC_EN | AIC32X4_RADC_EN);
snd_soc_write(codec, AIC32X4_ADCSETUP, tmp_reg);
return 0;
}
static struct snd_soc_codec_driver soc_codec_dev_aic32x4 = {
.probe = aic32x4_probe,
.set_bias_level = aic32x4_set_bias_level,
.suspend_bias_off = true,
.controls = aic32x4_snd_controls,
.num_controls = ARRAY_SIZE(aic32x4_snd_controls),
.dapm_widgets = aic32x4_dapm_widgets,
.num_dapm_widgets = ARRAY_SIZE(aic32x4_dapm_widgets),
.dapm_routes = aic32x4_dapm_routes,
.num_dapm_routes = ARRAY_SIZE(aic32x4_dapm_routes),
};
static int aic32x4_parse_dt(struct aic32x4_priv *aic32x4,
struct device_node *np)
{
aic32x4->swapdacs = false;
aic32x4->micpga_routing = 0;
aic32x4->rstn_gpio = of_get_named_gpio(np, "reset-gpios", 0);
return 0;
}
static void aic32x4_disable_regulators(struct aic32x4_priv *aic32x4)
{
regulator_disable(aic32x4->supply_iov);
if (!IS_ERR(aic32x4->supply_ldo))
regulator_disable(aic32x4->supply_ldo);
if (!IS_ERR(aic32x4->supply_dv))
regulator_disable(aic32x4->supply_dv);
if (!IS_ERR(aic32x4->supply_av))
regulator_disable(aic32x4->supply_av);
}
static int aic32x4_setup_regulators(struct device *dev,
struct aic32x4_priv *aic32x4)
{
int ret = 0;
aic32x4->supply_ldo = devm_regulator_get_optional(dev, "ldoin");
aic32x4->supply_iov = devm_regulator_get(dev, "iov");
aic32x4->supply_dv = devm_regulator_get_optional(dev, "dv");
aic32x4->supply_av = devm_regulator_get_optional(dev, "av");
/* Check if the regulator requirements are fulfilled */
if (IS_ERR(aic32x4->supply_iov)) {
dev_err(dev, "Missing supply 'iov'\n");
return PTR_ERR(aic32x4->supply_iov);
}
if (IS_ERR(aic32x4->supply_ldo)) {
if (PTR_ERR(aic32x4->supply_ldo) == -EPROBE_DEFER)
return -EPROBE_DEFER;
if (IS_ERR(aic32x4->supply_dv)) {
dev_err(dev, "Missing supply 'dv' or 'ldoin'\n");
return PTR_ERR(aic32x4->supply_dv);
}
if (IS_ERR(aic32x4->supply_av)) {
dev_err(dev, "Missing supply 'av' or 'ldoin'\n");
return PTR_ERR(aic32x4->supply_av);
}
} else {
if (IS_ERR(aic32x4->supply_dv) &&
PTR_ERR(aic32x4->supply_dv) == -EPROBE_DEFER)
return -EPROBE_DEFER;
if (IS_ERR(aic32x4->supply_av) &&
PTR_ERR(aic32x4->supply_av) == -EPROBE_DEFER)
return -EPROBE_DEFER;
}
ret = regulator_enable(aic32x4->supply_iov);
if (ret) {
dev_err(dev, "Failed to enable regulator iov\n");
return ret;
}
if (!IS_ERR(aic32x4->supply_ldo)) {
ret = regulator_enable(aic32x4->supply_ldo);
if (ret) {
dev_err(dev, "Failed to enable regulator ldo\n");
goto error_ldo;
}
}
if (!IS_ERR(aic32x4->supply_dv)) {
ret = regulator_enable(aic32x4->supply_dv);
if (ret) {
dev_err(dev, "Failed to enable regulator dv\n");
goto error_dv;
}
}
if (!IS_ERR(aic32x4->supply_av)) {
ret = regulator_enable(aic32x4->supply_av);
if (ret) {
dev_err(dev, "Failed to enable regulator av\n");
goto error_av;
}
}
if (!IS_ERR(aic32x4->supply_ldo) && IS_ERR(aic32x4->supply_av))
aic32x4->power_cfg |= AIC32X4_PWR_AIC32X4_LDO_ENABLE;
return 0;
error_av:
if (!IS_ERR(aic32x4->supply_dv))
regulator_disable(aic32x4->supply_dv);
error_dv:
if (!IS_ERR(aic32x4->supply_ldo))
regulator_disable(aic32x4->supply_ldo);
error_ldo:
regulator_disable(aic32x4->supply_iov);
return ret;
}
static int aic32x4_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
struct aic32x4_pdata *pdata = i2c->dev.platform_data;
struct aic32x4_priv *aic32x4;
struct device_node *np = i2c->dev.of_node;
int ret;
aic32x4 = devm_kzalloc(&i2c->dev, sizeof(struct aic32x4_priv),
GFP_KERNEL);
if (aic32x4 == NULL)
return -ENOMEM;
aic32x4->regmap = devm_regmap_init_i2c(i2c, &aic32x4_regmap);
if (IS_ERR(aic32x4->regmap))
return PTR_ERR(aic32x4->regmap);
i2c_set_clientdata(i2c, aic32x4);
if (pdata) {
aic32x4->power_cfg = pdata->power_cfg;
aic32x4->swapdacs = pdata->swapdacs;
aic32x4->micpga_routing = pdata->micpga_routing;
aic32x4->rstn_gpio = pdata->rstn_gpio;
} else if (np) {
ret = aic32x4_parse_dt(aic32x4, np);
if (ret) {
dev_err(&i2c->dev, "Failed to parse DT node\n");
return ret;
}
} else {
aic32x4->power_cfg = 0;
aic32x4->swapdacs = false;
aic32x4->micpga_routing = 0;
aic32x4->rstn_gpio = -1;
}
aic32x4->mclk = devm_clk_get(&i2c->dev, "mclk");
if (IS_ERR(aic32x4->mclk)) {
dev_err(&i2c->dev, "Failed getting the mclk. The current implementation does not support the usage of this codec without mclk\n");
return PTR_ERR(aic32x4->mclk);
}
if (gpio_is_valid(aic32x4->rstn_gpio)) {
ret = devm_gpio_request_one(&i2c->dev, aic32x4->rstn_gpio,
GPIOF_OUT_INIT_LOW, "tlv320aic32x4 rstn");
if (ret != 0)
return ret;
}
ret = aic32x4_setup_regulators(&i2c->dev, aic32x4);
if (ret) {
dev_err(&i2c->dev, "Failed to setup regulators\n");
return ret;
}
ret = snd_soc_register_codec(&i2c->dev,
&soc_codec_dev_aic32x4, &aic32x4_dai, 1);
if (ret) {
dev_err(&i2c->dev, "Failed to register codec\n");
aic32x4_disable_regulators(aic32x4);
return ret;
}
i2c_set_clientdata(i2c, aic32x4);
return 0;
}
static int aic32x4_i2c_remove(struct i2c_client *client)
{
struct aic32x4_priv *aic32x4 = i2c_get_clientdata(client);
aic32x4_disable_regulators(aic32x4);
snd_soc_unregister_codec(&client->dev);
return 0;
}
static const struct i2c_device_id aic32x4_i2c_id[] = {
{ "tlv320aic32x4", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, aic32x4_i2c_id);
static const struct of_device_id aic32x4_of_id[] = {
{ .compatible = "ti,tlv320aic32x4", },
{ /* senitel */ }
};
MODULE_DEVICE_TABLE(of, aic32x4_of_id);
static struct i2c_driver aic32x4_i2c_driver = {
.driver = {
.name = "tlv320aic32x4",
.owner = THIS_MODULE,
.of_match_table = aic32x4_of_id,
},
.probe = aic32x4_i2c_probe,
.remove = aic32x4_i2c_remove,
.id_table = aic32x4_i2c_id,
};
module_i2c_driver(aic32x4_i2c_driver);
MODULE_DESCRIPTION("ASoC tlv320aic32x4 codec driver");
MODULE_AUTHOR("Javier Martin <javier.martin@vista-silicon.com>");
MODULE_LICENSE("GPL");
| gpl-2.0 |
siis/pfwall | drivers/usb/musb/musb_host.c | 196 | 65606 | /*
* MUSB OTG driver host support
*
* Copyright 2005 Mentor Graphics Corporation
* Copyright (C) 2005-2006 by Texas Instruments
* Copyright (C) 2006-2007 Nokia Corporation
* Copyright (C) 2008-2009 MontaVista Software, Inc. <source@mvista.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA
*
* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
* NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/dma-mapping.h>
#include "musb_core.h"
#include "musb_host.h"
/* MUSB HOST status 22-mar-2006
*
* - There's still lots of partial code duplication for fault paths, so
* they aren't handled as consistently as they need to be.
*
* - PIO mostly behaved when last tested.
* + including ep0, with all usbtest cases 9, 10
* + usbtest 14 (ep0out) doesn't seem to run at all
* + double buffered OUT/TX endpoints saw stalls(!) with certain usbtest
* configurations, but otherwise double buffering passes basic tests.
* + for 2.6.N, for N > ~10, needs API changes for hcd framework.
*
* - DMA (CPPI) ... partially behaves, not currently recommended
* + about 1/15 the speed of typical EHCI implementations (PCI)
* + RX, all too often reqpkt seems to misbehave after tx
* + TX, no known issues (other than evident silicon issue)
*
* - DMA (Mentor/OMAP) ...has at least toggle update problems
*
* - [23-feb-2009] minimal traffic scheduling to avoid bulk RX packet
* starvation ... nothing yet for TX, interrupt, or bulk.
*
* - Not tested with HNP, but some SRP paths seem to behave.
*
* NOTE 24-August-2006:
*
* - Bulk traffic finally uses both sides of hardware ep1, freeing up an
* extra endpoint for periodic use enabling hub + keybd + mouse. That
* mostly works, except that with "usbnet" it's easy to trigger cases
* with "ping" where RX loses. (a) ping to davinci, even "ping -f",
* fine; but (b) ping _from_ davinci, even "ping -c 1", ICMP RX loses
* although ARP RX wins. (That test was done with a full speed link.)
*/
/*
* NOTE on endpoint usage:
*
* CONTROL transfers all go through ep0. BULK ones go through dedicated IN
* and OUT endpoints ... hardware is dedicated for those "async" queue(s).
* (Yes, bulk _could_ use more of the endpoints than that, and would even
* benefit from it.)
*
* INTERUPPT and ISOCHRONOUS transfers are scheduled to the other endpoints.
* So far that scheduling is both dumb and optimistic: the endpoint will be
* "claimed" until its software queue is no longer refilled. No multiplexing
* of transfers between endpoints, or anything clever.
*/
static void musb_ep_program(struct musb *musb, u8 epnum,
struct urb *urb, int is_out,
u8 *buf, u32 offset, u32 len);
/*
* Clear TX fifo. Needed to avoid BABBLE errors.
*/
static void musb_h_tx_flush_fifo(struct musb_hw_ep *ep)
{
struct musb *musb = ep->musb;
void __iomem *epio = ep->regs;
u16 csr;
u16 lastcsr = 0;
int retries = 1000;
csr = musb_readw(epio, MUSB_TXCSR);
while (csr & MUSB_TXCSR_FIFONOTEMPTY) {
if (csr != lastcsr)
dev_dbg(musb->controller, "Host TX FIFONOTEMPTY csr: %02x\n", csr);
lastcsr = csr;
csr |= MUSB_TXCSR_FLUSHFIFO;
musb_writew(epio, MUSB_TXCSR, csr);
csr = musb_readw(epio, MUSB_TXCSR);
if (WARN(retries-- < 1,
"Could not flush host TX%d fifo: csr: %04x\n",
ep->epnum, csr))
return;
mdelay(1);
}
}
static void musb_h_ep0_flush_fifo(struct musb_hw_ep *ep)
{
void __iomem *epio = ep->regs;
u16 csr;
int retries = 5;
/* scrub any data left in the fifo */
do {
csr = musb_readw(epio, MUSB_TXCSR);
if (!(csr & (MUSB_CSR0_TXPKTRDY | MUSB_CSR0_RXPKTRDY)))
break;
musb_writew(epio, MUSB_TXCSR, MUSB_CSR0_FLUSHFIFO);
csr = musb_readw(epio, MUSB_TXCSR);
udelay(10);
} while (--retries);
WARN(!retries, "Could not flush host TX%d fifo: csr: %04x\n",
ep->epnum, csr);
/* and reset for the next transfer */
musb_writew(epio, MUSB_TXCSR, 0);
}
/*
* Start transmit. Caller is responsible for locking shared resources.
* musb must be locked.
*/
static inline void musb_h_tx_start(struct musb_hw_ep *ep)
{
u16 txcsr;
/* NOTE: no locks here; caller should lock and select EP */
if (ep->epnum) {
txcsr = musb_readw(ep->regs, MUSB_TXCSR);
txcsr |= MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_H_WZC_BITS;
musb_writew(ep->regs, MUSB_TXCSR, txcsr);
} else {
txcsr = MUSB_CSR0_H_SETUPPKT | MUSB_CSR0_TXPKTRDY;
musb_writew(ep->regs, MUSB_CSR0, txcsr);
}
}
static inline void musb_h_tx_dma_start(struct musb_hw_ep *ep)
{
u16 txcsr;
/* NOTE: no locks here; caller should lock and select EP */
txcsr = musb_readw(ep->regs, MUSB_TXCSR);
txcsr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_H_WZC_BITS;
if (is_cppi_enabled())
txcsr |= MUSB_TXCSR_DMAMODE;
musb_writew(ep->regs, MUSB_TXCSR, txcsr);
}
static void musb_ep_set_qh(struct musb_hw_ep *ep, int is_in, struct musb_qh *qh)
{
if (is_in != 0 || ep->is_shared_fifo)
ep->in_qh = qh;
if (is_in == 0 || ep->is_shared_fifo)
ep->out_qh = qh;
}
static struct musb_qh *musb_ep_get_qh(struct musb_hw_ep *ep, int is_in)
{
return is_in ? ep->in_qh : ep->out_qh;
}
/*
* Start the URB at the front of an endpoint's queue
* end must be claimed from the caller.
*
* Context: controller locked, irqs blocked
*/
static void
musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh)
{
u16 frame;
u32 len;
void __iomem *mbase = musb->mregs;
struct urb *urb = next_urb(qh);
void *buf = urb->transfer_buffer;
u32 offset = 0;
struct musb_hw_ep *hw_ep = qh->hw_ep;
unsigned pipe = urb->pipe;
u8 address = usb_pipedevice(pipe);
int epnum = hw_ep->epnum;
/* initialize software qh state */
qh->offset = 0;
qh->segsize = 0;
/* gather right source of data */
switch (qh->type) {
case USB_ENDPOINT_XFER_CONTROL:
/* control transfers always start with SETUP */
is_in = 0;
musb->ep0_stage = MUSB_EP0_START;
buf = urb->setup_packet;
len = 8;
break;
case USB_ENDPOINT_XFER_ISOC:
qh->iso_idx = 0;
qh->frame = 0;
offset = urb->iso_frame_desc[0].offset;
len = urb->iso_frame_desc[0].length;
break;
default: /* bulk, interrupt */
/* actual_length may be nonzero on retry paths */
buf = urb->transfer_buffer + urb->actual_length;
len = urb->transfer_buffer_length - urb->actual_length;
}
dev_dbg(musb->controller, "qh %p urb %p dev%d ep%d%s%s, hw_ep %d, %p/%d\n",
qh, urb, address, qh->epnum,
is_in ? "in" : "out",
({char *s; switch (qh->type) {
case USB_ENDPOINT_XFER_CONTROL: s = ""; break;
case USB_ENDPOINT_XFER_BULK: s = "-bulk"; break;
case USB_ENDPOINT_XFER_ISOC: s = "-iso"; break;
default: s = "-intr"; break;
}; s; }),
epnum, buf + offset, len);
/* Configure endpoint */
musb_ep_set_qh(hw_ep, is_in, qh);
musb_ep_program(musb, epnum, urb, !is_in, buf, offset, len);
/* transmit may have more work: start it when it is time */
if (is_in)
return;
/* determine if the time is right for a periodic transfer */
switch (qh->type) {
case USB_ENDPOINT_XFER_ISOC:
case USB_ENDPOINT_XFER_INT:
dev_dbg(musb->controller, "check whether there's still time for periodic Tx\n");
frame = musb_readw(mbase, MUSB_FRAME);
/* FIXME this doesn't implement that scheduling policy ...
* or handle framecounter wrapping
*/
if ((urb->transfer_flags & URB_ISO_ASAP)
|| (frame >= urb->start_frame)) {
/* REVISIT the SOF irq handler shouldn't duplicate
* this code; and we don't init urb->start_frame...
*/
qh->frame = 0;
goto start;
} else {
qh->frame = urb->start_frame;
/* enable SOF interrupt so we can count down */
dev_dbg(musb->controller, "SOF for %d\n", epnum);
#if 1 /* ifndef CONFIG_ARCH_DAVINCI */
musb_writeb(mbase, MUSB_INTRUSBE, 0xff);
#endif
}
break;
default:
start:
dev_dbg(musb->controller, "Start TX%d %s\n", epnum,
hw_ep->tx_channel ? "dma" : "pio");
if (!hw_ep->tx_channel)
musb_h_tx_start(hw_ep);
else if (is_cppi_enabled() || tusb_dma_omap())
musb_h_tx_dma_start(hw_ep);
}
}
/* Context: caller owns controller lock, IRQs are blocked */
static void musb_giveback(struct musb *musb, struct urb *urb, int status)
__releases(musb->lock)
__acquires(musb->lock)
{
dev_dbg(musb->controller,
"complete %p %pF (%d), dev%d ep%d%s, %d/%d\n",
urb, urb->complete, status,
usb_pipedevice(urb->pipe),
usb_pipeendpoint(urb->pipe),
usb_pipein(urb->pipe) ? "in" : "out",
urb->actual_length, urb->transfer_buffer_length
);
usb_hcd_unlink_urb_from_ep(musb_to_hcd(musb), urb);
spin_unlock(&musb->lock);
usb_hcd_giveback_urb(musb_to_hcd(musb), urb, status);
spin_lock(&musb->lock);
}
/* For bulk/interrupt endpoints only */
static inline void musb_save_toggle(struct musb_qh *qh, int is_in,
struct urb *urb)
{
void __iomem *epio = qh->hw_ep->regs;
u16 csr;
/*
* FIXME: the current Mentor DMA code seems to have
* problems getting toggle correct.
*/
if (is_in)
csr = musb_readw(epio, MUSB_RXCSR) & MUSB_RXCSR_H_DATATOGGLE;
else
csr = musb_readw(epio, MUSB_TXCSR) & MUSB_TXCSR_H_DATATOGGLE;
usb_settoggle(urb->dev, qh->epnum, !is_in, csr ? 1 : 0);
}
/*
* Advance this hardware endpoint's queue, completing the specified URB and
* advancing to either the next URB queued to that qh, or else invalidating
* that qh and advancing to the next qh scheduled after the current one.
*
* Context: caller owns controller lock, IRQs are blocked
*/
static void musb_advance_schedule(struct musb *musb, struct urb *urb,
struct musb_hw_ep *hw_ep, int is_in)
{
struct musb_qh *qh = musb_ep_get_qh(hw_ep, is_in);
struct musb_hw_ep *ep = qh->hw_ep;
int ready = qh->is_ready;
int status;
status = (urb->status == -EINPROGRESS) ? 0 : urb->status;
/* save toggle eagerly, for paranoia */
switch (qh->type) {
case USB_ENDPOINT_XFER_BULK:
case USB_ENDPOINT_XFER_INT:
musb_save_toggle(qh, is_in, urb);
break;
case USB_ENDPOINT_XFER_ISOC:
if (status == 0 && urb->error_count)
status = -EXDEV;
break;
}
qh->is_ready = 0;
musb_giveback(musb, urb, status);
qh->is_ready = ready;
/* reclaim resources (and bandwidth) ASAP; deschedule it, and
* invalidate qh as soon as list_empty(&hep->urb_list)
*/
if (list_empty(&qh->hep->urb_list)) {
struct list_head *head;
if (is_in)
ep->rx_reinit = 1;
else
ep->tx_reinit = 1;
/* Clobber old pointers to this qh */
musb_ep_set_qh(ep, is_in, NULL);
qh->hep->hcpriv = NULL;
switch (qh->type) {
case USB_ENDPOINT_XFER_CONTROL:
case USB_ENDPOINT_XFER_BULK:
/* fifo policy for these lists, except that NAKing
* should rotate a qh to the end (for fairness).
*/
if (qh->mux == 1) {
head = qh->ring.prev;
list_del(&qh->ring);
kfree(qh);
qh = first_qh(head);
break;
}
case USB_ENDPOINT_XFER_ISOC:
case USB_ENDPOINT_XFER_INT:
/* this is where periodic bandwidth should be
* de-allocated if it's tracked and allocated;
* and where we'd update the schedule tree...
*/
kfree(qh);
qh = NULL;
break;
}
}
if (qh != NULL && qh->is_ready) {
dev_dbg(musb->controller, "... next ep%d %cX urb %p\n",
hw_ep->epnum, is_in ? 'R' : 'T', next_urb(qh));
musb_start_urb(musb, is_in, qh);
}
}
static u16 musb_h_flush_rxfifo(struct musb_hw_ep *hw_ep, u16 csr)
{
/* we don't want fifo to fill itself again;
* ignore dma (various models),
* leave toggle alone (may not have been saved yet)
*/
csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_RXPKTRDY;
csr &= ~(MUSB_RXCSR_H_REQPKT
| MUSB_RXCSR_H_AUTOREQ
| MUSB_RXCSR_AUTOCLEAR);
/* write 2x to allow double buffering */
musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
/* flush writebuffer */
return musb_readw(hw_ep->regs, MUSB_RXCSR);
}
/*
* PIO RX for a packet (or part of it).
*/
static bool
musb_host_packet_rx(struct musb *musb, struct urb *urb, u8 epnum, u8 iso_err)
{
u16 rx_count;
u8 *buf;
u16 csr;
bool done = false;
u32 length;
int do_flush = 0;
struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
void __iomem *epio = hw_ep->regs;
struct musb_qh *qh = hw_ep->in_qh;
int pipe = urb->pipe;
void *buffer = urb->transfer_buffer;
/* musb_ep_select(mbase, epnum); */
rx_count = musb_readw(epio, MUSB_RXCOUNT);
dev_dbg(musb->controller, "RX%d count %d, buffer %p len %d/%d\n", epnum, rx_count,
urb->transfer_buffer, qh->offset,
urb->transfer_buffer_length);
/* unload FIFO */
if (usb_pipeisoc(pipe)) {
int status = 0;
struct usb_iso_packet_descriptor *d;
if (iso_err) {
status = -EILSEQ;
urb->error_count++;
}
d = urb->iso_frame_desc + qh->iso_idx;
buf = buffer + d->offset;
length = d->length;
if (rx_count > length) {
if (status == 0) {
status = -EOVERFLOW;
urb->error_count++;
}
dev_dbg(musb->controller, "** OVERFLOW %d into %d\n", rx_count, length);
do_flush = 1;
} else
length = rx_count;
urb->actual_length += length;
d->actual_length = length;
d->status = status;
/* see if we are done */
done = (++qh->iso_idx >= urb->number_of_packets);
} else {
/* non-isoch */
buf = buffer + qh->offset;
length = urb->transfer_buffer_length - qh->offset;
if (rx_count > length) {
if (urb->status == -EINPROGRESS)
urb->status = -EOVERFLOW;
dev_dbg(musb->controller, "** OVERFLOW %d into %d\n", rx_count, length);
do_flush = 1;
} else
length = rx_count;
urb->actual_length += length;
qh->offset += length;
/* see if we are done */
done = (urb->actual_length == urb->transfer_buffer_length)
|| (rx_count < qh->maxpacket)
|| (urb->status != -EINPROGRESS);
if (done
&& (urb->status == -EINPROGRESS)
&& (urb->transfer_flags & URB_SHORT_NOT_OK)
&& (urb->actual_length
< urb->transfer_buffer_length))
urb->status = -EREMOTEIO;
}
musb_read_fifo(hw_ep, length, buf);
csr = musb_readw(epio, MUSB_RXCSR);
csr |= MUSB_RXCSR_H_WZC_BITS;
if (unlikely(do_flush))
musb_h_flush_rxfifo(hw_ep, csr);
else {
/* REVISIT this assumes AUTOCLEAR is never set */
csr &= ~(MUSB_RXCSR_RXPKTRDY | MUSB_RXCSR_H_REQPKT);
if (!done)
csr |= MUSB_RXCSR_H_REQPKT;
musb_writew(epio, MUSB_RXCSR, csr);
}
return done;
}
/* we don't always need to reinit a given side of an endpoint...
* when we do, use tx/rx reinit routine and then construct a new CSR
* to address data toggle, NYET, and DMA or PIO.
*
* it's possible that driver bugs (especially for DMA) or aborting a
* transfer might have left the endpoint busier than it should be.
* the busy/not-empty tests are basically paranoia.
*/
static void
musb_rx_reinit(struct musb *musb, struct musb_qh *qh, struct musb_hw_ep *ep)
{
u16 csr;
/* NOTE: we know the "rx" fifo reinit never triggers for ep0.
* That always uses tx_reinit since ep0 repurposes TX register
* offsets; the initial SETUP packet is also a kind of OUT.
*/
/* if programmed for Tx, put it in RX mode */
if (ep->is_shared_fifo) {
csr = musb_readw(ep->regs, MUSB_TXCSR);
if (csr & MUSB_TXCSR_MODE) {
musb_h_tx_flush_fifo(ep);
csr = musb_readw(ep->regs, MUSB_TXCSR);
musb_writew(ep->regs, MUSB_TXCSR,
csr | MUSB_TXCSR_FRCDATATOG);
}
/*
* Clear the MODE bit (and everything else) to enable Rx.
* NOTE: we mustn't clear the DMAMODE bit before DMAENAB.
*/
if (csr & MUSB_TXCSR_DMAMODE)
musb_writew(ep->regs, MUSB_TXCSR, MUSB_TXCSR_DMAMODE);
musb_writew(ep->regs, MUSB_TXCSR, 0);
/* scrub all previous state, clearing toggle */
} else {
csr = musb_readw(ep->regs, MUSB_RXCSR);
if (csr & MUSB_RXCSR_RXPKTRDY)
WARNING("rx%d, packet/%d ready?\n", ep->epnum,
musb_readw(ep->regs, MUSB_RXCOUNT));
musb_h_flush_rxfifo(ep, MUSB_RXCSR_CLRDATATOG);
}
/* target addr and (for multipoint) hub addr/port */
if (musb->is_multipoint) {
musb_write_rxfunaddr(ep->target_regs, qh->addr_reg);
musb_write_rxhubaddr(ep->target_regs, qh->h_addr_reg);
musb_write_rxhubport(ep->target_regs, qh->h_port_reg);
} else
musb_writeb(musb->mregs, MUSB_FADDR, qh->addr_reg);
/* protocol/endpoint, interval/NAKlimit, i/o size */
musb_writeb(ep->regs, MUSB_RXTYPE, qh->type_reg);
musb_writeb(ep->regs, MUSB_RXINTERVAL, qh->intv_reg);
/* NOTE: bulk combining rewrites high bits of maxpacket */
/* Set RXMAXP with the FIFO size of the endpoint
* to disable double buffer mode.
*/
if (musb->double_buffer_not_ok)
musb_writew(ep->regs, MUSB_RXMAXP, ep->max_packet_sz_rx);
else
musb_writew(ep->regs, MUSB_RXMAXP,
qh->maxpacket | ((qh->hb_mult - 1) << 11));
ep->rx_reinit = 0;
}
static bool musb_tx_dma_program(struct dma_controller *dma,
struct musb_hw_ep *hw_ep, struct musb_qh *qh,
struct urb *urb, u32 offset, u32 length)
{
struct dma_channel *channel = hw_ep->tx_channel;
void __iomem *epio = hw_ep->regs;
u16 pkt_size = qh->maxpacket;
u16 csr;
u8 mode;
#ifdef CONFIG_USB_INVENTRA_DMA
if (length > channel->max_len)
length = channel->max_len;
csr = musb_readw(epio, MUSB_TXCSR);
if (length > pkt_size) {
mode = 1;
csr |= MUSB_TXCSR_DMAMODE | MUSB_TXCSR_DMAENAB;
/* autoset shouldn't be set in high bandwidth */
if (qh->hb_mult == 1)
csr |= MUSB_TXCSR_AUTOSET;
} else {
mode = 0;
csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAMODE);
csr |= MUSB_TXCSR_DMAENAB; /* against programmer's guide */
}
channel->desired_mode = mode;
musb_writew(epio, MUSB_TXCSR, csr);
#else
if (!is_cppi_enabled() && !tusb_dma_omap())
return false;
channel->actual_len = 0;
/*
* TX uses "RNDIS" mode automatically but needs help
* to identify the zero-length-final-packet case.
*/
mode = (urb->transfer_flags & URB_ZERO_PACKET) ? 1 : 0;
#endif
qh->segsize = length;
/*
* Ensure the data reaches to main memory before starting
* DMA transfer
*/
wmb();
if (!dma->channel_program(channel, pkt_size, mode,
urb->transfer_dma + offset, length)) {
dma->channel_release(channel);
hw_ep->tx_channel = NULL;
csr = musb_readw(epio, MUSB_TXCSR);
csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAENAB);
musb_writew(epio, MUSB_TXCSR, csr | MUSB_TXCSR_H_WZC_BITS);
return false;
}
return true;
}
/*
* Program an HDRC endpoint as per the given URB
* Context: irqs blocked, controller lock held
*/
static void musb_ep_program(struct musb *musb, u8 epnum,
struct urb *urb, int is_out,
u8 *buf, u32 offset, u32 len)
{
struct dma_controller *dma_controller;
struct dma_channel *dma_channel;
u8 dma_ok;
void __iomem *mbase = musb->mregs;
struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
void __iomem *epio = hw_ep->regs;
struct musb_qh *qh = musb_ep_get_qh(hw_ep, !is_out);
u16 packet_sz = qh->maxpacket;
dev_dbg(musb->controller, "%s hw%d urb %p spd%d dev%d ep%d%s "
"h_addr%02x h_port%02x bytes %d\n",
is_out ? "-->" : "<--",
epnum, urb, urb->dev->speed,
qh->addr_reg, qh->epnum, is_out ? "out" : "in",
qh->h_addr_reg, qh->h_port_reg,
len);
musb_ep_select(mbase, epnum);
/* candidate for DMA? */
dma_controller = musb->dma_controller;
if (is_dma_capable() && epnum && dma_controller) {
dma_channel = is_out ? hw_ep->tx_channel : hw_ep->rx_channel;
if (!dma_channel) {
dma_channel = dma_controller->channel_alloc(
dma_controller, hw_ep, is_out);
if (is_out)
hw_ep->tx_channel = dma_channel;
else
hw_ep->rx_channel = dma_channel;
}
} else
dma_channel = NULL;
/* make sure we clear DMAEnab, autoSet bits from previous run */
/* OUT/transmit/EP0 or IN/receive? */
if (is_out) {
u16 csr;
u16 int_txe;
u16 load_count;
csr = musb_readw(epio, MUSB_TXCSR);
/* disable interrupt in case we flush */
int_txe = musb_readw(mbase, MUSB_INTRTXE);
musb_writew(mbase, MUSB_INTRTXE, int_txe & ~(1 << epnum));
/* general endpoint setup */
if (epnum) {
/* flush all old state, set default */
musb_h_tx_flush_fifo(hw_ep);
/*
* We must not clear the DMAMODE bit before or in
* the same cycle with the DMAENAB bit, so we clear
* the latter first...
*/
csr &= ~(MUSB_TXCSR_H_NAKTIMEOUT
| MUSB_TXCSR_AUTOSET
| MUSB_TXCSR_DMAENAB
| MUSB_TXCSR_FRCDATATOG
| MUSB_TXCSR_H_RXSTALL
| MUSB_TXCSR_H_ERROR
| MUSB_TXCSR_TXPKTRDY
);
csr |= MUSB_TXCSR_MODE;
if (usb_gettoggle(urb->dev, qh->epnum, 1))
csr |= MUSB_TXCSR_H_WR_DATATOGGLE
| MUSB_TXCSR_H_DATATOGGLE;
else
csr |= MUSB_TXCSR_CLRDATATOG;
musb_writew(epio, MUSB_TXCSR, csr);
/* REVISIT may need to clear FLUSHFIFO ... */
csr &= ~MUSB_TXCSR_DMAMODE;
musb_writew(epio, MUSB_TXCSR, csr);
csr = musb_readw(epio, MUSB_TXCSR);
} else {
/* endpoint 0: just flush */
musb_h_ep0_flush_fifo(hw_ep);
}
/* target addr and (for multipoint) hub addr/port */
if (musb->is_multipoint) {
musb_write_txfunaddr(mbase, epnum, qh->addr_reg);
musb_write_txhubaddr(mbase, epnum, qh->h_addr_reg);
musb_write_txhubport(mbase, epnum, qh->h_port_reg);
/* FIXME if !epnum, do the same for RX ... */
} else
musb_writeb(mbase, MUSB_FADDR, qh->addr_reg);
/* protocol/endpoint/interval/NAKlimit */
if (epnum) {
musb_writeb(epio, MUSB_TXTYPE, qh->type_reg);
if (musb->double_buffer_not_ok)
musb_writew(epio, MUSB_TXMAXP,
hw_ep->max_packet_sz_tx);
else if (can_bulk_split(musb, qh->type))
musb_writew(epio, MUSB_TXMAXP, packet_sz
| ((hw_ep->max_packet_sz_tx /
packet_sz) - 1) << 11);
else
musb_writew(epio, MUSB_TXMAXP,
qh->maxpacket |
((qh->hb_mult - 1) << 11));
musb_writeb(epio, MUSB_TXINTERVAL, qh->intv_reg);
} else {
musb_writeb(epio, MUSB_NAKLIMIT0, qh->intv_reg);
if (musb->is_multipoint)
musb_writeb(epio, MUSB_TYPE0,
qh->type_reg);
}
if (can_bulk_split(musb, qh->type))
load_count = min((u32) hw_ep->max_packet_sz_tx,
len);
else
load_count = min((u32) packet_sz, len);
if (dma_channel && musb_tx_dma_program(dma_controller,
hw_ep, qh, urb, offset, len))
load_count = 0;
if (load_count) {
/* PIO to load FIFO */
qh->segsize = load_count;
musb_write_fifo(hw_ep, load_count, buf);
}
/* re-enable interrupt */
musb_writew(mbase, MUSB_INTRTXE, int_txe);
/* IN/receive */
} else {
u16 csr;
if (hw_ep->rx_reinit) {
musb_rx_reinit(musb, qh, hw_ep);
/* init new state: toggle and NYET, maybe DMA later */
if (usb_gettoggle(urb->dev, qh->epnum, 0))
csr = MUSB_RXCSR_H_WR_DATATOGGLE
| MUSB_RXCSR_H_DATATOGGLE;
else
csr = 0;
if (qh->type == USB_ENDPOINT_XFER_INT)
csr |= MUSB_RXCSR_DISNYET;
} else {
csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
if (csr & (MUSB_RXCSR_RXPKTRDY
| MUSB_RXCSR_DMAENAB
| MUSB_RXCSR_H_REQPKT))
ERR("broken !rx_reinit, ep%d csr %04x\n",
hw_ep->epnum, csr);
/* scrub any stale state, leaving toggle alone */
csr &= MUSB_RXCSR_DISNYET;
}
/* kick things off */
if ((is_cppi_enabled() || tusb_dma_omap()) && dma_channel) {
/* Candidate for DMA */
dma_channel->actual_len = 0L;
qh->segsize = len;
/* AUTOREQ is in a DMA register */
musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
/*
* Unless caller treats short RX transfers as
* errors, we dare not queue multiple transfers.
*/
dma_ok = dma_controller->channel_program(dma_channel,
packet_sz, !(urb->transfer_flags &
URB_SHORT_NOT_OK),
urb->transfer_dma + offset,
qh->segsize);
if (!dma_ok) {
dma_controller->channel_release(dma_channel);
hw_ep->rx_channel = dma_channel = NULL;
} else
csr |= MUSB_RXCSR_DMAENAB;
}
csr |= MUSB_RXCSR_H_REQPKT;
dev_dbg(musb->controller, "RXCSR%d := %04x\n", epnum, csr);
musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
}
}
/*
* Service the default endpoint (ep0) as host.
* Return true until it's time to start the status stage.
*/
static bool musb_h_ep0_continue(struct musb *musb, u16 len, struct urb *urb)
{
bool more = false;
u8 *fifo_dest = NULL;
u16 fifo_count = 0;
struct musb_hw_ep *hw_ep = musb->control_ep;
struct musb_qh *qh = hw_ep->in_qh;
struct usb_ctrlrequest *request;
switch (musb->ep0_stage) {
case MUSB_EP0_IN:
fifo_dest = urb->transfer_buffer + urb->actual_length;
fifo_count = min_t(size_t, len, urb->transfer_buffer_length -
urb->actual_length);
if (fifo_count < len)
urb->status = -EOVERFLOW;
musb_read_fifo(hw_ep, fifo_count, fifo_dest);
urb->actual_length += fifo_count;
if (len < qh->maxpacket) {
/* always terminate on short read; it's
* rarely reported as an error.
*/
} else if (urb->actual_length <
urb->transfer_buffer_length)
more = true;
break;
case MUSB_EP0_START:
request = (struct usb_ctrlrequest *) urb->setup_packet;
if (!request->wLength) {
dev_dbg(musb->controller, "start no-DATA\n");
break;
} else if (request->bRequestType & USB_DIR_IN) {
dev_dbg(musb->controller, "start IN-DATA\n");
musb->ep0_stage = MUSB_EP0_IN;
more = true;
break;
} else {
dev_dbg(musb->controller, "start OUT-DATA\n");
musb->ep0_stage = MUSB_EP0_OUT;
more = true;
}
/* FALLTHROUGH */
case MUSB_EP0_OUT:
fifo_count = min_t(size_t, qh->maxpacket,
urb->transfer_buffer_length -
urb->actual_length);
if (fifo_count) {
fifo_dest = (u8 *) (urb->transfer_buffer
+ urb->actual_length);
dev_dbg(musb->controller, "Sending %d byte%s to ep0 fifo %p\n",
fifo_count,
(fifo_count == 1) ? "" : "s",
fifo_dest);
musb_write_fifo(hw_ep, fifo_count, fifo_dest);
urb->actual_length += fifo_count;
more = true;
}
break;
default:
ERR("bogus ep0 stage %d\n", musb->ep0_stage);
break;
}
return more;
}
/*
* Handle default endpoint interrupt as host. Only called in IRQ time
* from musb_interrupt().
*
* called with controller irqlocked
*/
irqreturn_t musb_h_ep0_irq(struct musb *musb)
{
struct urb *urb;
u16 csr, len;
int status = 0;
void __iomem *mbase = musb->mregs;
struct musb_hw_ep *hw_ep = musb->control_ep;
void __iomem *epio = hw_ep->regs;
struct musb_qh *qh = hw_ep->in_qh;
bool complete = false;
irqreturn_t retval = IRQ_NONE;
/* ep0 only has one queue, "in" */
urb = next_urb(qh);
musb_ep_select(mbase, 0);
csr = musb_readw(epio, MUSB_CSR0);
len = (csr & MUSB_CSR0_RXPKTRDY)
? musb_readb(epio, MUSB_COUNT0)
: 0;
dev_dbg(musb->controller, "<== csr0 %04x, qh %p, count %d, urb %p, stage %d\n",
csr, qh, len, urb, musb->ep0_stage);
/* if we just did status stage, we are done */
if (MUSB_EP0_STATUS == musb->ep0_stage) {
retval = IRQ_HANDLED;
complete = true;
}
/* prepare status */
if (csr & MUSB_CSR0_H_RXSTALL) {
dev_dbg(musb->controller, "STALLING ENDPOINT\n");
status = -EPIPE;
} else if (csr & MUSB_CSR0_H_ERROR) {
dev_dbg(musb->controller, "no response, csr0 %04x\n", csr);
status = -EPROTO;
} else if (csr & MUSB_CSR0_H_NAKTIMEOUT) {
dev_dbg(musb->controller, "control NAK timeout\n");
/* NOTE: this code path would be a good place to PAUSE a
* control transfer, if another one is queued, so that
* ep0 is more likely to stay busy. That's already done
* for bulk RX transfers.
*
* if (qh->ring.next != &musb->control), then
* we have a candidate... NAKing is *NOT* an error
*/
musb_writew(epio, MUSB_CSR0, 0);
retval = IRQ_HANDLED;
}
if (status) {
dev_dbg(musb->controller, "aborting\n");
retval = IRQ_HANDLED;
if (urb)
urb->status = status;
complete = true;
/* use the proper sequence to abort the transfer */
if (csr & MUSB_CSR0_H_REQPKT) {
csr &= ~MUSB_CSR0_H_REQPKT;
musb_writew(epio, MUSB_CSR0, csr);
csr &= ~MUSB_CSR0_H_NAKTIMEOUT;
musb_writew(epio, MUSB_CSR0, csr);
} else {
musb_h_ep0_flush_fifo(hw_ep);
}
musb_writeb(epio, MUSB_NAKLIMIT0, 0);
/* clear it */
musb_writew(epio, MUSB_CSR0, 0);
}
if (unlikely(!urb)) {
/* stop endpoint since we have no place for its data, this
* SHOULD NEVER HAPPEN! */
ERR("no URB for end 0\n");
musb_h_ep0_flush_fifo(hw_ep);
goto done;
}
if (!complete) {
/* call common logic and prepare response */
if (musb_h_ep0_continue(musb, len, urb)) {
/* more packets required */
csr = (MUSB_EP0_IN == musb->ep0_stage)
? MUSB_CSR0_H_REQPKT : MUSB_CSR0_TXPKTRDY;
} else {
/* data transfer complete; perform status phase */
if (usb_pipeout(urb->pipe)
|| !urb->transfer_buffer_length)
csr = MUSB_CSR0_H_STATUSPKT
| MUSB_CSR0_H_REQPKT;
else
csr = MUSB_CSR0_H_STATUSPKT
| MUSB_CSR0_TXPKTRDY;
/* flag status stage */
musb->ep0_stage = MUSB_EP0_STATUS;
dev_dbg(musb->controller, "ep0 STATUS, csr %04x\n", csr);
}
musb_writew(epio, MUSB_CSR0, csr);
retval = IRQ_HANDLED;
} else
musb->ep0_stage = MUSB_EP0_IDLE;
/* call completion handler if done */
if (complete)
musb_advance_schedule(musb, urb, hw_ep, 1);
done:
return retval;
}
#ifdef CONFIG_USB_INVENTRA_DMA
/* Host side TX (OUT) using Mentor DMA works as follows:
submit_urb ->
- if queue was empty, Program Endpoint
- ... which starts DMA to fifo in mode 1 or 0
DMA Isr (transfer complete) -> TxAvail()
- Stop DMA (~DmaEnab) (<--- Alert ... currently happens
only in musb_cleanup_urb)
- TxPktRdy has to be set in mode 0 or for
short packets in mode 1.
*/
#endif
/* Service a Tx-Available or dma completion irq for the endpoint */
void musb_host_tx(struct musb *musb, u8 epnum)
{
int pipe;
bool done = false;
u16 tx_csr;
size_t length = 0;
size_t offset = 0;
struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
void __iomem *epio = hw_ep->regs;
struct musb_qh *qh = hw_ep->out_qh;
struct urb *urb = next_urb(qh);
u32 status = 0;
void __iomem *mbase = musb->mregs;
struct dma_channel *dma;
bool transfer_pending = false;
musb_ep_select(mbase, epnum);
tx_csr = musb_readw(epio, MUSB_TXCSR);
/* with CPPI, DMA sometimes triggers "extra" irqs */
if (!urb) {
dev_dbg(musb->controller, "extra TX%d ready, csr %04x\n", epnum, tx_csr);
return;
}
pipe = urb->pipe;
dma = is_dma_capable() ? hw_ep->tx_channel : NULL;
dev_dbg(musb->controller, "OUT/TX%d end, csr %04x%s\n", epnum, tx_csr,
dma ? ", dma" : "");
/* check for errors */
if (tx_csr & MUSB_TXCSR_H_RXSTALL) {
/* dma was disabled, fifo flushed */
dev_dbg(musb->controller, "TX end %d stall\n", epnum);
/* stall; record URB status */
status = -EPIPE;
} else if (tx_csr & MUSB_TXCSR_H_ERROR) {
/* (NON-ISO) dma was disabled, fifo flushed */
dev_dbg(musb->controller, "TX 3strikes on ep=%d\n", epnum);
status = -ETIMEDOUT;
} else if (tx_csr & MUSB_TXCSR_H_NAKTIMEOUT) {
dev_dbg(musb->controller, "TX end=%d device not responding\n", epnum);
/* NOTE: this code path would be a good place to PAUSE a
* transfer, if there's some other (nonperiodic) tx urb
* that could use this fifo. (dma complicates it...)
* That's already done for bulk RX transfers.
*
* if (bulk && qh->ring.next != &musb->out_bulk), then
* we have a candidate... NAKing is *NOT* an error
*/
musb_ep_select(mbase, epnum);
musb_writew(epio, MUSB_TXCSR,
MUSB_TXCSR_H_WZC_BITS
| MUSB_TXCSR_TXPKTRDY);
return;
}
if (status) {
if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
dma->status = MUSB_DMA_STATUS_CORE_ABORT;
(void) musb->dma_controller->channel_abort(dma);
}
/* do the proper sequence to abort the transfer in the
* usb core; the dma engine should already be stopped.
*/
musb_h_tx_flush_fifo(hw_ep);
tx_csr &= ~(MUSB_TXCSR_AUTOSET
| MUSB_TXCSR_DMAENAB
| MUSB_TXCSR_H_ERROR
| MUSB_TXCSR_H_RXSTALL
| MUSB_TXCSR_H_NAKTIMEOUT
);
musb_ep_select(mbase, epnum);
musb_writew(epio, MUSB_TXCSR, tx_csr);
/* REVISIT may need to clear FLUSHFIFO ... */
musb_writew(epio, MUSB_TXCSR, tx_csr);
musb_writeb(epio, MUSB_TXINTERVAL, 0);
done = true;
}
/* second cppi case */
if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
dev_dbg(musb->controller, "extra TX%d ready, csr %04x\n", epnum, tx_csr);
return;
}
if (is_dma_capable() && dma && !status) {
/*
* DMA has completed. But if we're using DMA mode 1 (multi
* packet DMA), we need a terminal TXPKTRDY interrupt before
* we can consider this transfer completed, lest we trash
* its last packet when writing the next URB's data. So we
* switch back to mode 0 to get that interrupt; we'll come
* back here once it happens.
*/
if (tx_csr & MUSB_TXCSR_DMAMODE) {
/*
* We shouldn't clear DMAMODE with DMAENAB set; so
* clear them in a safe order. That should be OK
* once TXPKTRDY has been set (and I've never seen
* it being 0 at this moment -- DMA interrupt latency
* is significant) but if it hasn't been then we have
* no choice but to stop being polite and ignore the
* programmer's guide... :-)
*
* Note that we must write TXCSR with TXPKTRDY cleared
* in order not to re-trigger the packet send (this bit
* can't be cleared by CPU), and there's another caveat:
* TXPKTRDY may be set shortly and then cleared in the
* double-buffered FIFO mode, so we do an extra TXCSR
* read for debouncing...
*/
tx_csr &= musb_readw(epio, MUSB_TXCSR);
if (tx_csr & MUSB_TXCSR_TXPKTRDY) {
tx_csr &= ~(MUSB_TXCSR_DMAENAB |
MUSB_TXCSR_TXPKTRDY);
musb_writew(epio, MUSB_TXCSR,
tx_csr | MUSB_TXCSR_H_WZC_BITS);
}
tx_csr &= ~(MUSB_TXCSR_DMAMODE |
MUSB_TXCSR_TXPKTRDY);
musb_writew(epio, MUSB_TXCSR,
tx_csr | MUSB_TXCSR_H_WZC_BITS);
/*
* There is no guarantee that we'll get an interrupt
* after clearing DMAMODE as we might have done this
* too late (after TXPKTRDY was cleared by controller).
* Re-read TXCSR as we have spoiled its previous value.
*/
tx_csr = musb_readw(epio, MUSB_TXCSR);
}
/*
* We may get here from a DMA completion or TXPKTRDY interrupt.
* In any case, we must check the FIFO status here and bail out
* only if the FIFO still has data -- that should prevent the
* "missed" TXPKTRDY interrupts and deal with double-buffered
* FIFO mode too...
*/
if (tx_csr & (MUSB_TXCSR_FIFONOTEMPTY | MUSB_TXCSR_TXPKTRDY)) {
dev_dbg(musb->controller, "DMA complete but packet still in FIFO, "
"CSR %04x\n", tx_csr);
return;
}
}
if (!status || dma || usb_pipeisoc(pipe)) {
if (dma)
length = dma->actual_len;
else
length = qh->segsize;
qh->offset += length;
if (usb_pipeisoc(pipe)) {
struct usb_iso_packet_descriptor *d;
d = urb->iso_frame_desc + qh->iso_idx;
d->actual_length = length;
d->status = status;
if (++qh->iso_idx >= urb->number_of_packets) {
done = true;
} else {
d++;
offset = d->offset;
length = d->length;
}
} else if (dma && urb->transfer_buffer_length == qh->offset) {
done = true;
} else {
/* see if we need to send more data, or ZLP */
if (qh->segsize < qh->maxpacket)
done = true;
else if (qh->offset == urb->transfer_buffer_length
&& !(urb->transfer_flags
& URB_ZERO_PACKET))
done = true;
if (!done) {
offset = qh->offset;
length = urb->transfer_buffer_length - offset;
transfer_pending = true;
}
}
}
/* urb->status != -EINPROGRESS means request has been faulted,
* so we must abort this transfer after cleanup
*/
if (urb->status != -EINPROGRESS) {
done = true;
if (status == 0)
status = urb->status;
}
if (done) {
/* set status */
urb->status = status;
urb->actual_length = qh->offset;
musb_advance_schedule(musb, urb, hw_ep, USB_DIR_OUT);
return;
} else if ((usb_pipeisoc(pipe) || transfer_pending) && dma) {
if (musb_tx_dma_program(musb->dma_controller, hw_ep, qh, urb,
offset, length)) {
if (is_cppi_enabled() || tusb_dma_omap())
musb_h_tx_dma_start(hw_ep);
return;
}
} else if (tx_csr & MUSB_TXCSR_DMAENAB) {
dev_dbg(musb->controller, "not complete, but DMA enabled?\n");
return;
}
/*
* PIO: start next packet in this URB.
*
* REVISIT: some docs say that when hw_ep->tx_double_buffered,
* (and presumably, FIFO is not half-full) we should write *two*
* packets before updating TXCSR; other docs disagree...
*/
if (length > qh->maxpacket)
length = qh->maxpacket;
/* Unmap the buffer so that CPU can use it */
usb_hcd_unmap_urb_for_dma(musb_to_hcd(musb), urb);
musb_write_fifo(hw_ep, length, urb->transfer_buffer + offset);
qh->segsize = length;
musb_ep_select(mbase, epnum);
musb_writew(epio, MUSB_TXCSR,
MUSB_TXCSR_H_WZC_BITS | MUSB_TXCSR_TXPKTRDY);
}
#ifdef CONFIG_USB_INVENTRA_DMA
/* Host side RX (IN) using Mentor DMA works as follows:
submit_urb ->
- if queue was empty, ProgramEndpoint
- first IN token is sent out (by setting ReqPkt)
LinuxIsr -> RxReady()
/\ => first packet is received
| - Set in mode 0 (DmaEnab, ~ReqPkt)
| -> DMA Isr (transfer complete) -> RxReady()
| - Ack receive (~RxPktRdy), turn off DMA (~DmaEnab)
| - if urb not complete, send next IN token (ReqPkt)
| | else complete urb.
| |
---------------------------
*
* Nuances of mode 1:
* For short packets, no ack (+RxPktRdy) is sent automatically
* (even if AutoClear is ON)
* For full packets, ack (~RxPktRdy) and next IN token (+ReqPkt) is sent
* automatically => major problem, as collecting the next packet becomes
* difficult. Hence mode 1 is not used.
*
* REVISIT
* All we care about at this driver level is that
* (a) all URBs terminate with REQPKT cleared and fifo(s) empty;
* (b) termination conditions are: short RX, or buffer full;
* (c) fault modes include
* - iff URB_SHORT_NOT_OK, short RX status is -EREMOTEIO.
* (and that endpoint's dma queue stops immediately)
* - overflow (full, PLUS more bytes in the terminal packet)
*
* So for example, usb-storage sets URB_SHORT_NOT_OK, and would
* thus be a great candidate for using mode 1 ... for all but the
* last packet of one URB's transfer.
*/
#endif
/* Schedule next QH from musb->in_bulk and move the current qh to
* the end; avoids starvation for other endpoints.
*/
static void musb_bulk_rx_nak_timeout(struct musb *musb, struct musb_hw_ep *ep)
{
struct dma_channel *dma;
struct urb *urb;
void __iomem *mbase = musb->mregs;
void __iomem *epio = ep->regs;
struct musb_qh *cur_qh, *next_qh;
u16 rx_csr;
musb_ep_select(mbase, ep->epnum);
dma = is_dma_capable() ? ep->rx_channel : NULL;
/* clear nak timeout bit */
rx_csr = musb_readw(epio, MUSB_RXCSR);
rx_csr |= MUSB_RXCSR_H_WZC_BITS;
rx_csr &= ~MUSB_RXCSR_DATAERROR;
musb_writew(epio, MUSB_RXCSR, rx_csr);
cur_qh = first_qh(&musb->in_bulk);
if (cur_qh) {
urb = next_urb(cur_qh);
if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
dma->status = MUSB_DMA_STATUS_CORE_ABORT;
musb->dma_controller->channel_abort(dma);
urb->actual_length += dma->actual_len;
dma->actual_len = 0L;
}
musb_save_toggle(cur_qh, 1, urb);
/* move cur_qh to end of queue */
list_move_tail(&cur_qh->ring, &musb->in_bulk);
/* get the next qh from musb->in_bulk */
next_qh = first_qh(&musb->in_bulk);
/* set rx_reinit and schedule the next qh */
ep->rx_reinit = 1;
musb_start_urb(musb, 1, next_qh);
}
}
/*
* Service an RX interrupt for the given IN endpoint; docs cover bulk, iso,
* and high-bandwidth IN transfer cases.
*/
void musb_host_rx(struct musb *musb, u8 epnum)
{
struct urb *urb;
struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
void __iomem *epio = hw_ep->regs;
struct musb_qh *qh = hw_ep->in_qh;
size_t xfer_len;
void __iomem *mbase = musb->mregs;
int pipe;
u16 rx_csr, val;
bool iso_err = false;
bool done = false;
u32 status;
struct dma_channel *dma;
musb_ep_select(mbase, epnum);
urb = next_urb(qh);
dma = is_dma_capable() ? hw_ep->rx_channel : NULL;
status = 0;
xfer_len = 0;
rx_csr = musb_readw(epio, MUSB_RXCSR);
val = rx_csr;
if (unlikely(!urb)) {
/* REVISIT -- THIS SHOULD NEVER HAPPEN ... but, at least
* usbtest #11 (unlinks) triggers it regularly, sometimes
* with fifo full. (Only with DMA??)
*/
dev_dbg(musb->controller, "BOGUS RX%d ready, csr %04x, count %d\n", epnum, val,
musb_readw(epio, MUSB_RXCOUNT));
musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG);
return;
}
pipe = urb->pipe;
dev_dbg(musb->controller, "<== hw %d rxcsr %04x, urb actual %d (+dma %zu)\n",
epnum, rx_csr, urb->actual_length,
dma ? dma->actual_len : 0);
/* check for errors, concurrent stall & unlink is not really
* handled yet! */
if (rx_csr & MUSB_RXCSR_H_RXSTALL) {
dev_dbg(musb->controller, "RX end %d STALL\n", epnum);
/* stall; record URB status */
status = -EPIPE;
} else if (rx_csr & MUSB_RXCSR_H_ERROR) {
dev_dbg(musb->controller, "end %d RX proto error\n", epnum);
status = -EPROTO;
musb_writeb(epio, MUSB_RXINTERVAL, 0);
} else if (rx_csr & MUSB_RXCSR_DATAERROR) {
if (USB_ENDPOINT_XFER_ISOC != qh->type) {
dev_dbg(musb->controller, "RX end %d NAK timeout\n", epnum);
/* NOTE: NAKing is *NOT* an error, so we want to
* continue. Except ... if there's a request for
* another QH, use that instead of starving it.
*
* Devices like Ethernet and serial adapters keep
* reads posted at all times, which will starve
* other devices without this logic.
*/
if (usb_pipebulk(urb->pipe)
&& qh->mux == 1
&& !list_is_singular(&musb->in_bulk)) {
musb_bulk_rx_nak_timeout(musb, hw_ep);
return;
}
musb_ep_select(mbase, epnum);
rx_csr |= MUSB_RXCSR_H_WZC_BITS;
rx_csr &= ~MUSB_RXCSR_DATAERROR;
musb_writew(epio, MUSB_RXCSR, rx_csr);
goto finish;
} else {
dev_dbg(musb->controller, "RX end %d ISO data error\n", epnum);
/* packet error reported later */
iso_err = true;
}
} else if (rx_csr & MUSB_RXCSR_INCOMPRX) {
dev_dbg(musb->controller, "end %d high bandwidth incomplete ISO packet RX\n",
epnum);
status = -EPROTO;
}
/* faults abort the transfer */
if (status) {
/* clean up dma and collect transfer count */
if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
dma->status = MUSB_DMA_STATUS_CORE_ABORT;
(void) musb->dma_controller->channel_abort(dma);
xfer_len = dma->actual_len;
}
musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG);
musb_writeb(epio, MUSB_RXINTERVAL, 0);
done = true;
goto finish;
}
if (unlikely(dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY)) {
/* SHOULD NEVER HAPPEN ... but at least DaVinci has done it */
ERR("RX%d dma busy, csr %04x\n", epnum, rx_csr);
goto finish;
}
/* thorough shutdown for now ... given more precise fault handling
* and better queueing support, we might keep a DMA pipeline going
* while processing this irq for earlier completions.
*/
/* FIXME this is _way_ too much in-line logic for Mentor DMA */
#ifndef CONFIG_USB_INVENTRA_DMA
if (rx_csr & MUSB_RXCSR_H_REQPKT) {
/* REVISIT this happened for a while on some short reads...
* the cleanup still needs investigation... looks bad...
* and also duplicates dma cleanup code above ... plus,
* shouldn't this be the "half full" double buffer case?
*/
if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
dma->status = MUSB_DMA_STATUS_CORE_ABORT;
(void) musb->dma_controller->channel_abort(dma);
xfer_len = dma->actual_len;
done = true;
}
dev_dbg(musb->controller, "RXCSR%d %04x, reqpkt, len %zu%s\n", epnum, rx_csr,
xfer_len, dma ? ", dma" : "");
rx_csr &= ~MUSB_RXCSR_H_REQPKT;
musb_ep_select(mbase, epnum);
musb_writew(epio, MUSB_RXCSR,
MUSB_RXCSR_H_WZC_BITS | rx_csr);
}
#endif
if (dma && (rx_csr & MUSB_RXCSR_DMAENAB)) {
xfer_len = dma->actual_len;
val &= ~(MUSB_RXCSR_DMAENAB
| MUSB_RXCSR_H_AUTOREQ
| MUSB_RXCSR_AUTOCLEAR
| MUSB_RXCSR_RXPKTRDY);
musb_writew(hw_ep->regs, MUSB_RXCSR, val);
#ifdef CONFIG_USB_INVENTRA_DMA
if (usb_pipeisoc(pipe)) {
struct usb_iso_packet_descriptor *d;
d = urb->iso_frame_desc + qh->iso_idx;
d->actual_length = xfer_len;
/* even if there was an error, we did the dma
* for iso_frame_desc->length
*/
if (d->status != -EILSEQ && d->status != -EOVERFLOW)
d->status = 0;
if (++qh->iso_idx >= urb->number_of_packets)
done = true;
else
done = false;
} else {
/* done if urb buffer is full or short packet is recd */
done = (urb->actual_length + xfer_len >=
urb->transfer_buffer_length
|| dma->actual_len < qh->maxpacket);
}
/* send IN token for next packet, without AUTOREQ */
if (!done) {
val |= MUSB_RXCSR_H_REQPKT;
musb_writew(epio, MUSB_RXCSR,
MUSB_RXCSR_H_WZC_BITS | val);
}
dev_dbg(musb->controller, "ep %d dma %s, rxcsr %04x, rxcount %d\n", epnum,
done ? "off" : "reset",
musb_readw(epio, MUSB_RXCSR),
musb_readw(epio, MUSB_RXCOUNT));
#else
done = true;
#endif
} else if (urb->status == -EINPROGRESS) {
/* if no errors, be sure a packet is ready for unloading */
if (unlikely(!(rx_csr & MUSB_RXCSR_RXPKTRDY))) {
status = -EPROTO;
ERR("Rx interrupt with no errors or packet!\n");
/* FIXME this is another "SHOULD NEVER HAPPEN" */
/* SCRUB (RX) */
/* do the proper sequence to abort the transfer */
musb_ep_select(mbase, epnum);
val &= ~MUSB_RXCSR_H_REQPKT;
musb_writew(epio, MUSB_RXCSR, val);
goto finish;
}
/* we are expecting IN packets */
#ifdef CONFIG_USB_INVENTRA_DMA
if (dma) {
struct dma_controller *c;
u16 rx_count;
int ret, length;
dma_addr_t buf;
rx_count = musb_readw(epio, MUSB_RXCOUNT);
dev_dbg(musb->controller, "RX%d count %d, buffer 0x%x len %d/%d\n",
epnum, rx_count,
urb->transfer_dma
+ urb->actual_length,
qh->offset,
urb->transfer_buffer_length);
c = musb->dma_controller;
if (usb_pipeisoc(pipe)) {
int d_status = 0;
struct usb_iso_packet_descriptor *d;
d = urb->iso_frame_desc + qh->iso_idx;
if (iso_err) {
d_status = -EILSEQ;
urb->error_count++;
}
if (rx_count > d->length) {
if (d_status == 0) {
d_status = -EOVERFLOW;
urb->error_count++;
}
dev_dbg(musb->controller, "** OVERFLOW %d into %d\n",\
rx_count, d->length);
length = d->length;
} else
length = rx_count;
d->status = d_status;
buf = urb->transfer_dma + d->offset;
} else {
length = rx_count;
buf = urb->transfer_dma +
urb->actual_length;
}
dma->desired_mode = 0;
#ifdef USE_MODE1
/* because of the issue below, mode 1 will
* only rarely behave with correct semantics.
*/
if ((urb->transfer_flags &
URB_SHORT_NOT_OK)
&& (urb->transfer_buffer_length -
urb->actual_length)
> qh->maxpacket)
dma->desired_mode = 1;
if (rx_count < hw_ep->max_packet_sz_rx) {
length = rx_count;
dma->desired_mode = 0;
} else {
length = urb->transfer_buffer_length;
}
#endif
/* Disadvantage of using mode 1:
* It's basically usable only for mass storage class; essentially all
* other protocols also terminate transfers on short packets.
*
* Details:
* An extra IN token is sent at the end of the transfer (due to AUTOREQ)
* If you try to use mode 1 for (transfer_buffer_length - 512), and try
* to use the extra IN token to grab the last packet using mode 0, then
* the problem is that you cannot be sure when the device will send the
* last packet and RxPktRdy set. Sometimes the packet is recd too soon
* such that it gets lost when RxCSR is re-set at the end of the mode 1
* transfer, while sometimes it is recd just a little late so that if you
* try to configure for mode 0 soon after the mode 1 transfer is
* completed, you will find rxcount 0. Okay, so you might think why not
* wait for an interrupt when the pkt is recd. Well, you won't get any!
*/
val = musb_readw(epio, MUSB_RXCSR);
val &= ~MUSB_RXCSR_H_REQPKT;
if (dma->desired_mode == 0)
val &= ~MUSB_RXCSR_H_AUTOREQ;
else
val |= MUSB_RXCSR_H_AUTOREQ;
val |= MUSB_RXCSR_DMAENAB;
/* autoclear shouldn't be set in high bandwidth */
if (qh->hb_mult == 1)
val |= MUSB_RXCSR_AUTOCLEAR;
musb_writew(epio, MUSB_RXCSR,
MUSB_RXCSR_H_WZC_BITS | val);
/* REVISIT if when actual_length != 0,
* transfer_buffer_length needs to be
* adjusted first...
*/
ret = c->channel_program(
dma, qh->maxpacket,
dma->desired_mode, buf, length);
if (!ret) {
c->channel_release(dma);
hw_ep->rx_channel = NULL;
dma = NULL;
/* REVISIT reset CSR */
}
}
#endif /* Mentor DMA */
if (!dma) {
/* Unmap the buffer so that CPU can use it */
usb_hcd_unmap_urb_for_dma(musb_to_hcd(musb), urb);
done = musb_host_packet_rx(musb, urb,
epnum, iso_err);
dev_dbg(musb->controller, "read %spacket\n", done ? "last " : "");
}
}
finish:
urb->actual_length += xfer_len;
qh->offset += xfer_len;
if (done) {
if (urb->status == -EINPROGRESS)
urb->status = status;
musb_advance_schedule(musb, urb, hw_ep, USB_DIR_IN);
}
}
/* schedule nodes correspond to peripheral endpoints, like an OHCI QH.
* the software schedule associates multiple such nodes with a given
* host side hardware endpoint + direction; scheduling may activate
* that hardware endpoint.
*/
static int musb_schedule(
struct musb *musb,
struct musb_qh *qh,
int is_in)
{
int idle;
int best_diff;
int best_end, epnum;
struct musb_hw_ep *hw_ep = NULL;
struct list_head *head = NULL;
u8 toggle;
u8 txtype;
struct urb *urb = next_urb(qh);
/* use fixed hardware for control and bulk */
if (qh->type == USB_ENDPOINT_XFER_CONTROL) {
head = &musb->control;
hw_ep = musb->control_ep;
goto success;
}
/* else, periodic transfers get muxed to other endpoints */
/*
* We know this qh hasn't been scheduled, so all we need to do
* is choose which hardware endpoint to put it on ...
*
* REVISIT what we really want here is a regular schedule tree
* like e.g. OHCI uses.
*/
best_diff = 4096;
best_end = -1;
for (epnum = 1, hw_ep = musb->endpoints + 1;
epnum < musb->nr_endpoints;
epnum++, hw_ep++) {
int diff;
if (musb_ep_get_qh(hw_ep, is_in) != NULL)
continue;
if (hw_ep == musb->bulk_ep)
continue;
if (is_in)
diff = hw_ep->max_packet_sz_rx;
else
diff = hw_ep->max_packet_sz_tx;
diff -= (qh->maxpacket * qh->hb_mult);
if (diff >= 0 && best_diff > diff) {
/*
* Mentor controller has a bug in that if we schedule
* a BULK Tx transfer on an endpoint that had earlier
* handled ISOC then the BULK transfer has to start on
* a zero toggle. If the BULK transfer starts on a 1
* toggle then this transfer will fail as the mentor
* controller starts the Bulk transfer on a 0 toggle
* irrespective of the programming of the toggle bits
* in the TXCSR register. Check for this condition
* while allocating the EP for a Tx Bulk transfer. If
* so skip this EP.
*/
hw_ep = musb->endpoints + epnum;
toggle = usb_gettoggle(urb->dev, qh->epnum, !is_in);
txtype = (musb_readb(hw_ep->regs, MUSB_TXTYPE)
>> 4) & 0x3;
if (!is_in && (qh->type == USB_ENDPOINT_XFER_BULK) &&
toggle && (txtype == USB_ENDPOINT_XFER_ISOC))
continue;
best_diff = diff;
best_end = epnum;
}
}
/* use bulk reserved ep1 if no other ep is free */
if (best_end < 0 && qh->type == USB_ENDPOINT_XFER_BULK) {
hw_ep = musb->bulk_ep;
if (is_in)
head = &musb->in_bulk;
else
head = &musb->out_bulk;
/* Enable bulk RX NAK timeout scheme when bulk requests are
* multiplexed. This scheme doen't work in high speed to full
* speed scenario as NAK interrupts are not coming from a
* full speed device connected to a high speed device.
* NAK timeout interval is 8 (128 uframe or 16ms) for HS and
* 4 (8 frame or 8ms) for FS device.
*/
if (is_in && qh->dev)
qh->intv_reg =
(USB_SPEED_HIGH == qh->dev->speed) ? 8 : 4;
goto success;
} else if (best_end < 0) {
return -ENOSPC;
}
idle = 1;
qh->mux = 0;
hw_ep = musb->endpoints + best_end;
dev_dbg(musb->controller, "qh %p periodic slot %d\n", qh, best_end);
success:
if (head) {
idle = list_empty(head);
list_add_tail(&qh->ring, head);
qh->mux = 1;
}
qh->hw_ep = hw_ep;
qh->hep->hcpriv = qh;
if (idle)
musb_start_urb(musb, is_in, qh);
return 0;
}
static int musb_urb_enqueue(
struct usb_hcd *hcd,
struct urb *urb,
gfp_t mem_flags)
{
unsigned long flags;
struct musb *musb = hcd_to_musb(hcd);
struct usb_host_endpoint *hep = urb->ep;
struct musb_qh *qh;
struct usb_endpoint_descriptor *epd = &hep->desc;
int ret;
unsigned type_reg;
unsigned interval;
/* host role must be active */
if (!is_host_active(musb) || !musb->is_active)
return -ENODEV;
spin_lock_irqsave(&musb->lock, flags);
ret = usb_hcd_link_urb_to_ep(hcd, urb);
qh = ret ? NULL : hep->hcpriv;
if (qh)
urb->hcpriv = qh;
spin_unlock_irqrestore(&musb->lock, flags);
/* DMA mapping was already done, if needed, and this urb is on
* hep->urb_list now ... so we're done, unless hep wasn't yet
* scheduled onto a live qh.
*
* REVISIT best to keep hep->hcpriv valid until the endpoint gets
* disabled, testing for empty qh->ring and avoiding qh setup costs
* except for the first urb queued after a config change.
*/
if (qh || ret)
return ret;
/* Allocate and initialize qh, minimizing the work done each time
* hw_ep gets reprogrammed, or with irqs blocked. Then schedule it.
*
* REVISIT consider a dedicated qh kmem_cache, so it's harder
* for bugs in other kernel code to break this driver...
*/
qh = kzalloc(sizeof *qh, mem_flags);
if (!qh) {
spin_lock_irqsave(&musb->lock, flags);
usb_hcd_unlink_urb_from_ep(hcd, urb);
spin_unlock_irqrestore(&musb->lock, flags);
return -ENOMEM;
}
qh->hep = hep;
qh->dev = urb->dev;
INIT_LIST_HEAD(&qh->ring);
qh->is_ready = 1;
qh->maxpacket = usb_endpoint_maxp(epd);
qh->type = usb_endpoint_type(epd);
/* Bits 11 & 12 of wMaxPacketSize encode high bandwidth multiplier.
* Some musb cores don't support high bandwidth ISO transfers; and
* we don't (yet!) support high bandwidth interrupt transfers.
*/
qh->hb_mult = 1 + ((qh->maxpacket >> 11) & 0x03);
if (qh->hb_mult > 1) {
int ok = (qh->type == USB_ENDPOINT_XFER_ISOC);
if (ok)
ok = (usb_pipein(urb->pipe) && musb->hb_iso_rx)
|| (usb_pipeout(urb->pipe) && musb->hb_iso_tx);
if (!ok) {
ret = -EMSGSIZE;
goto done;
}
qh->maxpacket &= 0x7ff;
}
qh->epnum = usb_endpoint_num(epd);
/* NOTE: urb->dev->devnum is wrong during SET_ADDRESS */
qh->addr_reg = (u8) usb_pipedevice(urb->pipe);
/* precompute rxtype/txtype/type0 register */
type_reg = (qh->type << 4) | qh->epnum;
switch (urb->dev->speed) {
case USB_SPEED_LOW:
type_reg |= 0xc0;
break;
case USB_SPEED_FULL:
type_reg |= 0x80;
break;
default:
type_reg |= 0x40;
}
qh->type_reg = type_reg;
/* Precompute RXINTERVAL/TXINTERVAL register */
switch (qh->type) {
case USB_ENDPOINT_XFER_INT:
/*
* Full/low speeds use the linear encoding,
* high speed uses the logarithmic encoding.
*/
if (urb->dev->speed <= USB_SPEED_FULL) {
interval = max_t(u8, epd->bInterval, 1);
break;
}
/* FALLTHROUGH */
case USB_ENDPOINT_XFER_ISOC:
/* ISO always uses logarithmic encoding */
interval = min_t(u8, epd->bInterval, 16);
break;
default:
/* REVISIT we actually want to use NAK limits, hinting to the
* transfer scheduling logic to try some other qh, e.g. try
* for 2 msec first:
*
* interval = (USB_SPEED_HIGH == urb->dev->speed) ? 16 : 2;
*
* The downside of disabling this is that transfer scheduling
* gets VERY unfair for nonperiodic transfers; a misbehaving
* peripheral could make that hurt. That's perfectly normal
* for reads from network or serial adapters ... so we have
* partial NAKlimit support for bulk RX.
*
* The upside of disabling it is simpler transfer scheduling.
*/
interval = 0;
}
qh->intv_reg = interval;
/* precompute addressing for external hub/tt ports */
if (musb->is_multipoint) {
struct usb_device *parent = urb->dev->parent;
if (parent != hcd->self.root_hub) {
qh->h_addr_reg = (u8) parent->devnum;
/* set up tt info if needed */
if (urb->dev->tt) {
qh->h_port_reg = (u8) urb->dev->ttport;
if (urb->dev->tt->hub)
qh->h_addr_reg =
(u8) urb->dev->tt->hub->devnum;
if (urb->dev->tt->multi)
qh->h_addr_reg |= 0x80;
}
}
}
/* invariant: hep->hcpriv is null OR the qh that's already scheduled.
* until we get real dma queues (with an entry for each urb/buffer),
* we only have work to do in the former case.
*/
spin_lock_irqsave(&musb->lock, flags);
if (hep->hcpriv) {
/* some concurrent activity submitted another urb to hep...
* odd, rare, error prone, but legal.
*/
kfree(qh);
qh = NULL;
ret = 0;
} else
ret = musb_schedule(musb, qh,
epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK);
if (ret == 0) {
urb->hcpriv = qh;
/* FIXME set urb->start_frame for iso/intr, it's tested in
* musb_start_urb(), but otherwise only konicawc cares ...
*/
}
spin_unlock_irqrestore(&musb->lock, flags);
done:
if (ret != 0) {
spin_lock_irqsave(&musb->lock, flags);
usb_hcd_unlink_urb_from_ep(hcd, urb);
spin_unlock_irqrestore(&musb->lock, flags);
kfree(qh);
}
return ret;
}
/*
* abort a transfer that's at the head of a hardware queue.
* called with controller locked, irqs blocked
* that hardware queue advances to the next transfer, unless prevented
*/
static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh)
{
struct musb_hw_ep *ep = qh->hw_ep;
struct musb *musb = ep->musb;
void __iomem *epio = ep->regs;
unsigned hw_end = ep->epnum;
void __iomem *regs = ep->musb->mregs;
int is_in = usb_pipein(urb->pipe);
int status = 0;
u16 csr;
musb_ep_select(regs, hw_end);
if (is_dma_capable()) {
struct dma_channel *dma;
dma = is_in ? ep->rx_channel : ep->tx_channel;
if (dma) {
status = ep->musb->dma_controller->channel_abort(dma);
dev_dbg(musb->controller,
"abort %cX%d DMA for urb %p --> %d\n",
is_in ? 'R' : 'T', ep->epnum,
urb, status);
urb->actual_length += dma->actual_len;
}
}
/* turn off DMA requests, discard state, stop polling ... */
if (is_in) {
/* giveback saves bulk toggle */
csr = musb_h_flush_rxfifo(ep, 0);
/* REVISIT we still get an irq; should likely clear the
* endpoint's irq status here to avoid bogus irqs.
* clearing that status is platform-specific...
*/
} else if (ep->epnum) {
musb_h_tx_flush_fifo(ep);
csr = musb_readw(epio, MUSB_TXCSR);
csr &= ~(MUSB_TXCSR_AUTOSET
| MUSB_TXCSR_DMAENAB
| MUSB_TXCSR_H_RXSTALL
| MUSB_TXCSR_H_NAKTIMEOUT
| MUSB_TXCSR_H_ERROR
| MUSB_TXCSR_TXPKTRDY);
musb_writew(epio, MUSB_TXCSR, csr);
/* REVISIT may need to clear FLUSHFIFO ... */
musb_writew(epio, MUSB_TXCSR, csr);
/* flush cpu writebuffer */
csr = musb_readw(epio, MUSB_TXCSR);
} else {
musb_h_ep0_flush_fifo(ep);
}
if (status == 0)
musb_advance_schedule(ep->musb, urb, ep, is_in);
return status;
}
static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
{
struct musb *musb = hcd_to_musb(hcd);
struct musb_qh *qh;
unsigned long flags;
int is_in = usb_pipein(urb->pipe);
int ret;
dev_dbg(musb->controller, "urb=%p, dev%d ep%d%s\n", urb,
usb_pipedevice(urb->pipe),
usb_pipeendpoint(urb->pipe),
is_in ? "in" : "out");
spin_lock_irqsave(&musb->lock, flags);
ret = usb_hcd_check_unlink_urb(hcd, urb, status);
if (ret)
goto done;
qh = urb->hcpriv;
if (!qh)
goto done;
/*
* Any URB not actively programmed into endpoint hardware can be
* immediately given back; that's any URB not at the head of an
* endpoint queue, unless someday we get real DMA queues. And even
* if it's at the head, it might not be known to the hardware...
*
* Otherwise abort current transfer, pending DMA, etc.; urb->status
* has already been updated. This is a synchronous abort; it'd be
* OK to hold off until after some IRQ, though.
*
* NOTE: qh is invalid unless !list_empty(&hep->urb_list)
*/
if (!qh->is_ready
|| urb->urb_list.prev != &qh->hep->urb_list
|| musb_ep_get_qh(qh->hw_ep, is_in) != qh) {
int ready = qh->is_ready;
qh->is_ready = 0;
musb_giveback(musb, urb, 0);
qh->is_ready = ready;
/* If nothing else (usually musb_giveback) is using it
* and its URB list has emptied, recycle this qh.
*/
if (ready && list_empty(&qh->hep->urb_list)) {
qh->hep->hcpriv = NULL;
list_del(&qh->ring);
kfree(qh);
}
} else
ret = musb_cleanup_urb(urb, qh);
done:
spin_unlock_irqrestore(&musb->lock, flags);
return ret;
}
/* disable an endpoint */
static void
musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
{
u8 is_in = hep->desc.bEndpointAddress & USB_DIR_IN;
unsigned long flags;
struct musb *musb = hcd_to_musb(hcd);
struct musb_qh *qh;
struct urb *urb;
spin_lock_irqsave(&musb->lock, flags);
qh = hep->hcpriv;
if (qh == NULL)
goto exit;
/* NOTE: qh is invalid unless !list_empty(&hep->urb_list) */
/* Kick the first URB off the hardware, if needed */
qh->is_ready = 0;
if (musb_ep_get_qh(qh->hw_ep, is_in) == qh) {
urb = next_urb(qh);
/* make software (then hardware) stop ASAP */
if (!urb->unlinked)
urb->status = -ESHUTDOWN;
/* cleanup */
musb_cleanup_urb(urb, qh);
/* Then nuke all the others ... and advance the
* queue on hw_ep (e.g. bulk ring) when we're done.
*/
while (!list_empty(&hep->urb_list)) {
urb = next_urb(qh);
urb->status = -ESHUTDOWN;
musb_advance_schedule(musb, urb, qh->hw_ep, is_in);
}
} else {
/* Just empty the queue; the hardware is busy with
* other transfers, and since !qh->is_ready nothing
* will activate any of these as it advances.
*/
while (!list_empty(&hep->urb_list))
musb_giveback(musb, next_urb(qh), -ESHUTDOWN);
hep->hcpriv = NULL;
list_del(&qh->ring);
kfree(qh);
}
exit:
spin_unlock_irqrestore(&musb->lock, flags);
}
static int musb_h_get_frame_number(struct usb_hcd *hcd)
{
struct musb *musb = hcd_to_musb(hcd);
return musb_readw(musb->mregs, MUSB_FRAME);
}
static int musb_h_start(struct usb_hcd *hcd)
{
struct musb *musb = hcd_to_musb(hcd);
/* NOTE: musb_start() is called when the hub driver turns
* on port power, or when (OTG) peripheral starts.
*/
hcd->state = HC_STATE_RUNNING;
musb->port1_status = 0;
return 0;
}
static void musb_h_stop(struct usb_hcd *hcd)
{
musb_stop(hcd_to_musb(hcd));
hcd->state = HC_STATE_HALT;
}
static int musb_bus_suspend(struct usb_hcd *hcd)
{
struct musb *musb = hcd_to_musb(hcd);
u8 devctl;
if (!is_host_active(musb))
return 0;
switch (musb->xceiv->state) {
case OTG_STATE_A_SUSPEND:
return 0;
case OTG_STATE_A_WAIT_VRISE:
/* ID could be grounded even if there's no device
* on the other end of the cable. NOTE that the
* A_WAIT_VRISE timers are messy with MUSB...
*/
devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS)
musb->xceiv->state = OTG_STATE_A_WAIT_BCON;
break;
default:
break;
}
if (musb->is_active) {
WARNING("trying to suspend as %s while active\n",
otg_state_string(musb->xceiv->state));
return -EBUSY;
} else
return 0;
}
static int musb_bus_resume(struct usb_hcd *hcd)
{
/* resuming child port does the work */
return 0;
}
const struct hc_driver musb_hc_driver = {
.description = "musb-hcd",
.product_desc = "MUSB HDRC host driver",
.hcd_priv_size = sizeof(struct musb),
.flags = HCD_USB2 | HCD_MEMORY,
/* not using irq handler or reset hooks from usbcore, since
* those must be shared with peripheral code for OTG configs
*/
.start = musb_h_start,
.stop = musb_h_stop,
.get_frame_number = musb_h_get_frame_number,
.urb_enqueue = musb_urb_enqueue,
.urb_dequeue = musb_urb_dequeue,
.endpoint_disable = musb_h_disable,
.hub_status_data = musb_hub_status_data,
.hub_control = musb_hub_control,
.bus_suspend = musb_bus_suspend,
.bus_resume = musb_bus_resume,
/* .start_port_reset = NULL, */
/* .hub_irq_enable = NULL, */
};
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.