repo_name string | path string | copies string | size string | content string | license string |
|---|---|---|---|---|---|
deadman96385/android_kernel_asus_Z00A | drivers/staging/speakup/kobjects.c | 2087 | 24784 | /*
* Speakup kobject implementation
*
* Copyright (C) 2009 William Hubbs
*
* This code is based on kobject-example.c, which came with linux 2.6.x.
*
* Copyright (C) 2004-2007 Greg Kroah-Hartman <greg@kroah.com>
* Copyright (C) 2007 Novell Inc.
*
* Released under the GPL version 2 only.
*
*/
#include <linux/slab.h> /* For kmalloc. */
#include <linux/kernel.h>
#include <linux/kobject.h>
#include <linux/string.h>
#include <linux/string_helpers.h>
#include <linux/sysfs.h>
#include <linux/ctype.h>
#include "speakup.h"
#include "spk_priv.h"
/*
* This is called when a user reads the characters or chartab sys file.
*/
static ssize_t chars_chartab_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
int i;
int len = 0;
char *cp;
char *buf_pointer = buf;
size_t bufsize = PAGE_SIZE;
unsigned long flags;
spk_lock(flags);
*buf_pointer = '\0';
for (i = 0; i < 256; i++) {
if (bufsize <= 1)
break;
if (strcmp("characters", attr->attr.name) == 0) {
len = scnprintf(buf_pointer, bufsize, "%d\t%s\n",
i, spk_characters[i]);
} else { /* show chartab entry */
if (IS_TYPE(i, B_CTL))
cp = "B_CTL";
else if (IS_TYPE(i, WDLM))
cp = "WDLM";
else if (IS_TYPE(i, A_PUNC))
cp = "A_PUNC";
else if (IS_TYPE(i, PUNC))
cp = "PUNC";
else if (IS_TYPE(i, NUM))
cp = "NUM";
else if (IS_TYPE(i, A_CAP))
cp = "A_CAP";
else if (IS_TYPE(i, ALPHA))
cp = "ALPHA";
else if (IS_TYPE(i, B_CAPSYM))
cp = "B_CAPSYM";
else if (IS_TYPE(i, B_SYM))
cp = "B_SYM";
else
cp = "0";
len =
scnprintf(buf_pointer, bufsize, "%d\t%s\n", i, cp);
}
bufsize -= len;
buf_pointer += len;
}
spk_unlock(flags);
return buf_pointer - buf;
}
/*
* Print informational messages or warnings after updating
* character descriptions or chartab entries.
*/
static void report_char_chartab_status(int reset, int received, int used,
int rejected, int do_characters)
{
char *object_type[] = {
"character class entries",
"character descriptions",
};
int len;
char buf[80];
if (reset) {
pr_info("%s reset to defaults\n", object_type[do_characters]);
} else if (received) {
len = snprintf(buf, sizeof(buf),
" updated %d of %d %s\n",
used, received, object_type[do_characters]);
if (rejected)
snprintf(buf + (len - 1), sizeof(buf) - (len - 1),
" with %d reject%s\n",
rejected, rejected > 1 ? "s" : "");
printk(buf);
}
}
/*
* This is called when a user changes the characters or chartab parameters.
*/
static ssize_t chars_chartab_store(struct kobject *kobj,
struct kobj_attribute *attr, const char *buf, size_t count)
{
char *cp = (char *) buf;
char *end = cp + count; /* the null at the end of the buffer */
char *linefeed = NULL;
char keyword[MAX_DESC_LEN + 1];
char *outptr = NULL; /* Will hold keyword or desc. */
char *temp = NULL;
char *desc = NULL;
ssize_t retval = count;
unsigned long flags;
unsigned long index = 0;
int charclass = 0;
int received = 0;
int used = 0;
int rejected = 0;
int reset = 0;
int do_characters = !strcmp(attr->attr.name, "characters");
size_t desc_length = 0;
int i;
spk_lock(flags);
while (cp < end) {
while ((cp < end) && (*cp == ' ' || *cp == '\t'))
cp++;
if (cp == end)
break;
if ((*cp == '\n') || strchr("dDrR", *cp)) {
reset = 1;
break;
}
received++;
linefeed = strchr(cp, '\n');
if (!linefeed) {
rejected++;
break;
}
if (!isdigit(*cp)) {
rejected++;
cp = linefeed + 1;
continue;
}
index = simple_strtoul(cp, &temp, 10);
if (index > 255) {
rejected++;
cp = linefeed + 1;
continue;
}
while ((temp < linefeed) && (*temp == ' ' || *temp == '\t'))
temp++;
desc_length = linefeed - temp;
if (desc_length > MAX_DESC_LEN) {
rejected++;
cp = linefeed + 1;
continue;
}
if (do_characters) {
desc = kmalloc(desc_length + 1, GFP_ATOMIC);
if (!desc) {
retval = -ENOMEM;
reset = 1; /* just reset on error. */
break;
}
outptr = desc;
} else {
outptr = keyword;
}
for (i = 0; i < desc_length; i++)
outptr[i] = temp[i];
outptr[desc_length] = '\0';
if (do_characters) {
if (spk_characters[index] != spk_default_chars[index])
kfree(spk_characters[index]);
spk_characters[index] = desc;
used++;
} else {
charclass = spk_chartab_get_value(keyword);
if (charclass == 0) {
rejected++;
cp = linefeed + 1;
continue;
}
if (charclass != spk_chartab[index]) {
spk_chartab[index] = charclass;
used++;
}
}
cp = linefeed + 1;
}
if (reset) {
if (do_characters)
spk_reset_default_chars();
else
spk_reset_default_chartab();
}
spk_unlock(flags);
report_char_chartab_status(reset, received, used, rejected,
do_characters);
return retval;
}
/*
* This is called when a user reads the keymap parameter.
*/
static ssize_t keymap_show(struct kobject *kobj, struct kobj_attribute *attr,
char *buf)
{
char *cp = buf;
int i;
int n;
int num_keys;
int nstates;
u_char *cp1;
u_char ch;
unsigned long flags;
spk_lock(flags);
cp1 = spk_key_buf + SHIFT_TBL_SIZE;
num_keys = (int)(*cp1);
nstates = (int)cp1[1];
cp += sprintf(cp, "%d, %d, %d,\n", KEY_MAP_VER, num_keys, nstates);
cp1 += 2; /* now pointing at shift states */
/* dump num_keys+1 as first row is shift states + flags,
* each subsequent row is key + states */
for (n = 0; n <= num_keys; n++) {
for (i = 0; i <= nstates; i++) {
ch = *cp1++;
cp += sprintf(cp, "%d,", (int)ch);
*cp++ = (i < nstates) ? SPACE : '\n';
}
}
cp += sprintf(cp, "0, %d\n", KEY_MAP_VER);
spk_unlock(flags);
return (int)(cp-buf);
}
/*
* This is called when a user changes the keymap parameter.
*/
static ssize_t keymap_store(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t count)
{
int i;
ssize_t ret = count;
char *in_buff = NULL;
char *cp;
u_char *cp1;
unsigned long flags;
spk_lock(flags);
in_buff = kmemdup(buf, count + 1, GFP_ATOMIC);
if (!in_buff) {
spk_unlock(flags);
return -ENOMEM;
}
if (strchr("dDrR", *in_buff)) {
spk_set_key_info(spk_key_defaults, spk_key_buf);
pr_info("keymap set to default values\n");
kfree(in_buff);
spk_unlock(flags);
return count;
}
if (in_buff[count - 1] == '\n')
in_buff[count - 1] = '\0';
cp = in_buff;
cp1 = (u_char *)in_buff;
for (i = 0; i < 3; i++) {
cp = spk_s2uchar(cp, cp1);
cp1++;
}
i = (int)cp1[-2]+1;
i *= (int)cp1[-1]+1;
i += 2; /* 0 and last map ver */
if (cp1[-3] != KEY_MAP_VER || cp1[-1] > 10 ||
i+SHIFT_TBL_SIZE+4 >= sizeof(spk_key_buf)) {
pr_warn("i %d %d %d %d\n", i,
(int)cp1[-3], (int)cp1[-2], (int)cp1[-1]);
kfree(in_buff);
spk_unlock(flags);
return -EINVAL;
}
while (--i >= 0) {
cp = spk_s2uchar(cp, cp1);
cp1++;
if (!(*cp))
break;
}
if (i != 0 || cp1[-1] != KEY_MAP_VER || cp1[-2] != 0) {
ret = -EINVAL;
pr_warn("end %d %d %d %d\n", i,
(int)cp1[-3], (int)cp1[-2], (int)cp1[-1]);
} else {
if (spk_set_key_info(in_buff, spk_key_buf)) {
spk_set_key_info(spk_key_defaults, spk_key_buf);
ret = -EINVAL;
pr_warn("set key failed\n");
}
}
kfree(in_buff);
spk_unlock(flags);
return ret;
}
/*
* This is called when a user changes the value of the silent parameter.
*/
static ssize_t silent_store(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t count)
{
int len;
struct vc_data *vc = vc_cons[fg_console].d;
char ch = 0;
char shut;
unsigned long flags;
len = strlen(buf);
if (len > 0 && len < 3) {
ch = buf[0];
if (ch == '\n')
ch = '0';
}
if (ch < '0' || ch > '7') {
pr_warn("silent value '%c' not in range (0,7)\n", ch);
return -EINVAL;
}
spk_lock(flags);
if (ch&2) {
shut = 1;
spk_do_flush();
} else {
shut = 0;
}
if (ch&4)
shut |= 0x40;
if (ch&1)
spk_shut_up |= shut;
else
spk_shut_up &= ~shut;
spk_unlock(flags);
return count;
}
/*
* This is called when a user reads the synth setting.
*/
static ssize_t synth_show(struct kobject *kobj, struct kobj_attribute *attr,
char *buf)
{
int rv;
if (synth == NULL)
rv = sprintf(buf, "%s\n", "none");
else
rv = sprintf(buf, "%s\n", synth->name);
return rv;
}
/*
* This is called when a user requests to change synthesizers.
*/
static ssize_t synth_store(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t count)
{
int len;
char new_synth_name[10];
len = strlen(buf);
if (len < 2 || len > 9)
return -EINVAL;
strncpy(new_synth_name, buf, len);
if (new_synth_name[len - 1] == '\n')
len--;
new_synth_name[len] = '\0';
spk_strlwr(new_synth_name);
if ((synth != NULL) && (!strcmp(new_synth_name, synth->name))) {
pr_warn("%s already in use\n", new_synth_name);
} else if (synth_init(new_synth_name) != 0) {
pr_warn("failed to init synth %s\n", new_synth_name);
return -ENODEV;
}
return count;
}
/*
* This is called when text is sent to the synth via the synth_direct file.
*/
static ssize_t synth_direct_store(struct kobject *kobj,
struct kobj_attribute *attr, const char *buf, size_t count)
{
u_char tmp[256];
int len;
int bytes;
const char *ptr = buf;
if (!synth)
return -EPERM;
len = strlen(buf);
while (len > 0) {
bytes = min_t(size_t, len, 250);
strncpy(tmp, ptr, bytes);
tmp[bytes] = '\0';
string_unescape_any_inplace(tmp);
synth_printf("%s", tmp);
ptr += bytes;
len -= bytes;
}
return count;
}
/*
* This function is called when a user reads the version.
*/
static ssize_t version_show(struct kobject *kobj, struct kobj_attribute *attr,
char *buf)
{
char *cp;
cp = buf;
cp += sprintf(cp, "Speakup version %s\n", SPEAKUP_VERSION);
if (synth)
cp += sprintf(cp, "%s synthesizer driver version %s\n",
synth->name, synth->version);
return cp - buf;
}
/*
* This is called when a user reads the punctuation settings.
*/
static ssize_t punc_show(struct kobject *kobj, struct kobj_attribute *attr,
char *buf)
{
int i;
char *cp = buf;
struct st_var_header *p_header;
struct punc_var_t *var;
struct st_bits_data *pb;
short mask;
unsigned long flags;
p_header = spk_var_header_by_name(attr->attr.name);
if (p_header == NULL) {
pr_warn("p_header is null, attr->attr.name is %s\n",
attr->attr.name);
return -EINVAL;
}
var = spk_get_punc_var(p_header->var_id);
if (var == NULL) {
pr_warn("var is null, p_header->var_id is %i\n",
p_header->var_id);
return -EINVAL;
}
spk_lock(flags);
pb = (struct st_bits_data *) &spk_punc_info[var->value];
mask = pb->mask;
for (i = 33; i < 128; i++) {
if (!(spk_chartab[i]&mask))
continue;
*cp++ = (char)i;
}
spk_unlock(flags);
return cp-buf;
}
/*
* This is called when a user changes the punctuation settings.
*/
static ssize_t punc_store(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t count)
{
int x;
struct st_var_header *p_header;
struct punc_var_t *var;
char punc_buf[100];
unsigned long flags;
x = strlen(buf);
if (x < 1 || x > 99)
return -EINVAL;
p_header = spk_var_header_by_name(attr->attr.name);
if (p_header == NULL) {
pr_warn("p_header is null, attr->attr.name is %s\n",
attr->attr.name);
return -EINVAL;
}
var = spk_get_punc_var(p_header->var_id);
if (var == NULL) {
pr_warn("var is null, p_header->var_id is %i\n",
p_header->var_id);
return -EINVAL;
}
strncpy(punc_buf, buf, x);
while (x && punc_buf[x - 1] == '\n')
x--;
punc_buf[x] = '\0';
spk_lock(flags);
if (*punc_buf == 'd' || *punc_buf == 'r')
x = spk_set_mask_bits(0, var->value, 3);
else
x = spk_set_mask_bits(punc_buf, var->value, 3);
spk_unlock(flags);
return count;
}
/*
* This function is called when a user reads one of the variable parameters.
*/
ssize_t spk_var_show(struct kobject *kobj, struct kobj_attribute *attr,
char *buf)
{
int rv = 0;
struct st_var_header *param;
struct var_t *var;
char *cp1;
char *cp;
char ch;
unsigned long flags;
param = spk_var_header_by_name(attr->attr.name);
if (param == NULL)
return -EINVAL;
spk_lock(flags);
var = (struct var_t *) param->data;
switch (param->var_type) {
case VAR_NUM:
case VAR_TIME:
if (var)
rv = sprintf(buf, "%i\n", var->u.n.value);
else
rv = sprintf(buf, "0\n");
break;
case VAR_STRING:
if (var) {
cp1 = buf;
*cp1++ = '"';
for (cp = (char *)param->p_val; (ch = *cp); cp++) {
if (ch >= ' ' && ch < '~')
*cp1++ = ch;
else
cp1 += sprintf(cp1, "\\""x%02x", ch);
}
*cp1++ = '"';
*cp1++ = '\n';
*cp1 = '\0';
rv = cp1-buf;
} else {
rv = sprintf(buf, "\"\"\n");
}
break;
default:
rv = sprintf(buf, "Bad parameter %s, type %i\n",
param->name, param->var_type);
break;
}
spk_unlock(flags);
return rv;
}
EXPORT_SYMBOL_GPL(spk_var_show);
/*
* This function is called when a user echos a value to one of the
* variable parameters.
*/
ssize_t spk_var_store(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t count)
{
struct st_var_header *param;
int ret;
int len;
char *cp;
struct var_t *var_data;
int value;
unsigned long flags;
param = spk_var_header_by_name(attr->attr.name);
if (param == NULL)
return -EINVAL;
if (param->data == NULL)
return 0;
ret = 0;
cp = (char *)buf;
string_unescape_any_inplace(cp);
spk_lock(flags);
switch (param->var_type) {
case VAR_NUM:
case VAR_TIME:
if (*cp == 'd' || *cp == 'r' || *cp == '\0')
len = E_DEFAULT;
else if (*cp == '+' || *cp == '-')
len = E_INC;
else
len = E_SET;
value = simple_strtol(cp, NULL, 10);
ret = spk_set_num_var(value, param, len);
if (ret == -ERANGE) {
var_data = param->data;
pr_warn("value for %s out of range, expect %d to %d\n",
attr->attr.name,
var_data->u.n.low, var_data->u.n.high);
}
break;
case VAR_STRING:
len = strlen(buf);
if ((len >= 1) && (buf[len - 1] == '\n'))
--len;
if ((len >= 2) && (buf[0] == '"') && (buf[len - 1] == '"')) {
++buf;
len -= 2;
}
cp = (char *) buf;
cp[len] = '\0';
ret = spk_set_string_var(buf, param, len);
if (ret == -E2BIG)
pr_warn("value too long for %s\n",
attr->attr.name);
break;
default:
pr_warn("%s unknown type %d\n",
param->name, (int)param->var_type);
break;
}
/*
* If voice was just changed, we might need to reset our default
* pitch and volume.
*/
if (strcmp(attr->attr.name, "voice") == 0) {
if (synth && synth->default_pitch) {
param = spk_var_header_by_name("pitch");
if (param) {
spk_set_num_var(synth->default_pitch[value],
param, E_NEW_DEFAULT);
spk_set_num_var(0, param, E_DEFAULT);
}
}
if (synth && synth->default_vol) {
param = spk_var_header_by_name("vol");
if (param) {
spk_set_num_var(synth->default_vol[value],
param, E_NEW_DEFAULT);
spk_set_num_var(0, param, E_DEFAULT);
}
}
}
spk_unlock(flags);
if (ret == -ERESTART)
pr_info("%s reset to default value\n", attr->attr.name);
return count;
}
EXPORT_SYMBOL_GPL(spk_var_store);
/*
* Functions for reading and writing lists of i18n messages. Incomplete.
*/
static ssize_t message_show_helper(char *buf, enum msg_index_t first,
enum msg_index_t last)
{
size_t bufsize = PAGE_SIZE;
char *buf_pointer = buf;
int printed;
enum msg_index_t cursor;
int index = 0;
*buf_pointer = '\0'; /* buf_pointer always looking at a NUL byte. */
for (cursor = first; cursor <= last; cursor++, index++) {
if (bufsize <= 1)
break;
printed = scnprintf(buf_pointer, bufsize, "%d\t%s\n",
index, spk_msg_get(cursor));
buf_pointer += printed;
bufsize -= printed;
}
return buf_pointer - buf;
}
static void report_msg_status(int reset, int received, int used,
int rejected, char *groupname)
{
int len;
char buf[160];
if (reset) {
pr_info("i18n messages from group %s reset to defaults\n",
groupname);
} else if (received) {
len = snprintf(buf, sizeof(buf),
" updated %d of %d i18n messages from group %s\n",
used, received, groupname);
if (rejected)
snprintf(buf + (len - 1), sizeof(buf) - (len - 1),
" with %d reject%s\n",
rejected, rejected > 1 ? "s" : "");
printk(buf);
}
}
static ssize_t message_store_helper(const char *buf, size_t count,
struct msg_group_t *group)
{
char *cp = (char *) buf;
char *end = cp + count;
char *linefeed = NULL;
char *temp = NULL;
ssize_t msg_stored = 0;
ssize_t retval = count;
size_t desc_length = 0;
unsigned long index = 0;
int received = 0;
int used = 0;
int rejected = 0;
int reset = 0;
enum msg_index_t firstmessage = group->start;
enum msg_index_t lastmessage = group->end;
enum msg_index_t curmessage;
while (cp < end) {
while ((cp < end) && (*cp == ' ' || *cp == '\t'))
cp++;
if (cp == end)
break;
if (strchr("dDrR", *cp)) {
reset = 1;
break;
}
received++;
linefeed = strchr(cp, '\n');
if (!linefeed) {
rejected++;
break;
}
if (!isdigit(*cp)) {
rejected++;
cp = linefeed + 1;
continue;
}
index = simple_strtoul(cp, &temp, 10);
while ((temp < linefeed) && (*temp == ' ' || *temp == '\t'))
temp++;
desc_length = linefeed - temp;
curmessage = firstmessage + index;
/*
* Note the check (curmessage < firstmessage). It is not
* redundant. Suppose that the user gave us an index
* equal to ULONG_MAX - 1. If firstmessage > 1, then
* firstmessage + index < firstmessage!
*/
if ((curmessage < firstmessage) || (curmessage > lastmessage)) {
rejected++;
cp = linefeed + 1;
continue;
}
msg_stored = spk_msg_set(curmessage, temp, desc_length);
if (msg_stored < 0) {
retval = msg_stored;
if (msg_stored == -ENOMEM)
reset = 1;
break;
} else {
used++;
}
cp = linefeed + 1;
}
if (reset)
spk_reset_msg_group(group);
report_msg_status(reset, received, used, rejected, group->name);
return retval;
}
static ssize_t message_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
ssize_t retval = 0;
struct msg_group_t *group = spk_find_msg_group(attr->attr.name);
unsigned long flags;
BUG_ON(!group);
spk_lock(flags);
retval = message_show_helper(buf, group->start, group->end);
spk_unlock(flags);
return retval;
}
static ssize_t message_store(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t count)
{
ssize_t retval = 0;
struct msg_group_t *group = spk_find_msg_group(attr->attr.name);
BUG_ON(!group);
retval = message_store_helper(buf, count, group);
return retval;
}
/*
* Declare the attributes.
*/
static struct kobj_attribute keymap_attribute =
__ATTR(keymap, ROOT_W, keymap_show, keymap_store);
static struct kobj_attribute silent_attribute =
__ATTR(silent, USER_W, NULL, silent_store);
static struct kobj_attribute synth_attribute =
__ATTR(synth, USER_RW, synth_show, synth_store);
static struct kobj_attribute synth_direct_attribute =
__ATTR(synth_direct, USER_W, NULL, synth_direct_store);
static struct kobj_attribute version_attribute =
__ATTR_RO(version);
static struct kobj_attribute delimiters_attribute =
__ATTR(delimiters, USER_RW, punc_show, punc_store);
static struct kobj_attribute ex_num_attribute =
__ATTR(ex_num, USER_RW, punc_show, punc_store);
static struct kobj_attribute punc_all_attribute =
__ATTR(punc_all, USER_RW, punc_show, punc_store);
static struct kobj_attribute punc_most_attribute =
__ATTR(punc_most, USER_RW, punc_show, punc_store);
static struct kobj_attribute punc_some_attribute =
__ATTR(punc_some, USER_RW, punc_show, punc_store);
static struct kobj_attribute repeats_attribute =
__ATTR(repeats, USER_RW, punc_show, punc_store);
static struct kobj_attribute attrib_bleep_attribute =
__ATTR(attrib_bleep, USER_RW, spk_var_show, spk_var_store);
static struct kobj_attribute bell_pos_attribute =
__ATTR(bell_pos, USER_RW, spk_var_show, spk_var_store);
static struct kobj_attribute bleep_time_attribute =
__ATTR(bleep_time, USER_RW, spk_var_show, spk_var_store);
static struct kobj_attribute bleeps_attribute =
__ATTR(bleeps, USER_RW, spk_var_show, spk_var_store);
static struct kobj_attribute cursor_time_attribute =
__ATTR(cursor_time, USER_RW, spk_var_show, spk_var_store);
static struct kobj_attribute key_echo_attribute =
__ATTR(key_echo, USER_RW, spk_var_show, spk_var_store);
static struct kobj_attribute no_interrupt_attribute =
__ATTR(no_interrupt, USER_RW, spk_var_show, spk_var_store);
static struct kobj_attribute punc_level_attribute =
__ATTR(punc_level, USER_RW, spk_var_show, spk_var_store);
static struct kobj_attribute reading_punc_attribute =
__ATTR(reading_punc, USER_RW, spk_var_show, spk_var_store);
static struct kobj_attribute say_control_attribute =
__ATTR(say_control, USER_RW, spk_var_show, spk_var_store);
static struct kobj_attribute say_word_ctl_attribute =
__ATTR(say_word_ctl, USER_RW, spk_var_show, spk_var_store);
static struct kobj_attribute spell_delay_attribute =
__ATTR(spell_delay, USER_RW, spk_var_show, spk_var_store);
/*
* These attributes are i18n related.
*/
static struct kobj_attribute announcements_attribute =
__ATTR(announcements, USER_RW, message_show, message_store);
static struct kobj_attribute characters_attribute =
__ATTR(characters, USER_RW, chars_chartab_show, chars_chartab_store);
static struct kobj_attribute chartab_attribute =
__ATTR(chartab, USER_RW, chars_chartab_show, chars_chartab_store);
static struct kobj_attribute ctl_keys_attribute =
__ATTR(ctl_keys, USER_RW, message_show, message_store);
static struct kobj_attribute colors_attribute =
__ATTR(colors, USER_RW, message_show, message_store);
static struct kobj_attribute formatted_attribute =
__ATTR(formatted, USER_RW, message_show, message_store);
static struct kobj_attribute function_names_attribute =
__ATTR(function_names, USER_RW, message_show, message_store);
static struct kobj_attribute key_names_attribute =
__ATTR(key_names, USER_RW, message_show, message_store);
static struct kobj_attribute states_attribute =
__ATTR(states, USER_RW, message_show, message_store);
/*
* Create groups of attributes so that we can create and destroy them all
* at once.
*/
static struct attribute *main_attrs[] = {
&keymap_attribute.attr,
&silent_attribute.attr,
&synth_attribute.attr,
&synth_direct_attribute.attr,
&version_attribute.attr,
&delimiters_attribute.attr,
&ex_num_attribute.attr,
&punc_all_attribute.attr,
&punc_most_attribute.attr,
&punc_some_attribute.attr,
&repeats_attribute.attr,
&attrib_bleep_attribute.attr,
&bell_pos_attribute.attr,
&bleep_time_attribute.attr,
&bleeps_attribute.attr,
&cursor_time_attribute.attr,
&key_echo_attribute.attr,
&no_interrupt_attribute.attr,
&punc_level_attribute.attr,
&reading_punc_attribute.attr,
&say_control_attribute.attr,
&say_word_ctl_attribute.attr,
&spell_delay_attribute.attr,
NULL,
};
static struct attribute *i18n_attrs[] = {
&announcements_attribute.attr,
&characters_attribute.attr,
&chartab_attribute.attr,
&ctl_keys_attribute.attr,
&colors_attribute.attr,
&formatted_attribute.attr,
&function_names_attribute.attr,
&key_names_attribute.attr,
&states_attribute.attr,
NULL,
};
/*
* An unnamed attribute group will put all of the attributes directly in
* the kobject directory. If we specify a name, a subdirectory will be
* created for the attributes with the directory being the name of the
* attribute group.
*/
static struct attribute_group main_attr_group = {
.attrs = main_attrs,
};
static struct attribute_group i18n_attr_group = {
.attrs = i18n_attrs,
.name = "i18n",
};
static struct kobject *accessibility_kobj;
struct kobject *speakup_kobj;
int speakup_kobj_init(void)
{
int retval;
/*
* Create a simple kobject with the name of "accessibility",
* located under /sys/
*
* As this is a simple directory, no uevent will be sent to
* userspace. That is why this function should not be used for
* any type of dynamic kobjects, where the name and number are
* not known ahead of time.
*/
accessibility_kobj = kobject_create_and_add("accessibility", NULL);
if (!accessibility_kobj) {
retval = -ENOMEM;
goto out;
}
speakup_kobj = kobject_create_and_add("speakup", accessibility_kobj);
if (!speakup_kobj) {
retval = -ENOMEM;
goto err_acc;
}
/* Create the files associated with this kobject */
retval = sysfs_create_group(speakup_kobj, &main_attr_group);
if (retval)
goto err_speakup;
retval = sysfs_create_group(speakup_kobj, &i18n_attr_group);
if (retval)
goto err_group;
goto out;
err_group:
sysfs_remove_group(speakup_kobj, &main_attr_group);
err_speakup:
kobject_put(speakup_kobj);
err_acc:
kobject_put(accessibility_kobj);
out:
return retval;
}
void speakup_kobj_exit(void)
{
sysfs_remove_group(speakup_kobj, &i18n_attr_group);
sysfs_remove_group(speakup_kobj, &main_attr_group);
kobject_put(speakup_kobj);
kobject_put(accessibility_kobj);
}
| gpl-2.0 |
MoKee/android_kernel_motorola_apq8084 | arch/powerpc/kernel/ptrace32.c | 2087 | 8718 | /*
* ptrace for 32-bit processes running on a 64-bit kernel.
*
* PowerPC version
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
*
* Derived from "arch/m68k/kernel/ptrace.c"
* Copyright (C) 1994 by Hamish Macdonald
* Taken from linux/kernel/ptrace.c and modified for M680x0.
* linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds
*
* Modified by Cort Dougan (cort@hq.fsmlabs.com)
* and Paul Mackerras (paulus@samba.org).
*
* This file is subject to the terms and conditions of the GNU General
* Public License. See the file COPYING in the main directory of
* this archive for more details.
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/errno.h>
#include <linux/ptrace.h>
#include <linux/regset.h>
#include <linux/user.h>
#include <linux/security.h>
#include <linux/signal.h>
#include <linux/compat.h>
#include <asm/uaccess.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/switch_to.h>
/*
* does not yet catch signals sent when the child dies.
* in exit.c or in signal.c.
*/
/* Macros to workout the correct index for the FPR in the thread struct */
#define FPRNUMBER(i) (((i) - PT_FPR0) >> 1)
#define FPRHALF(i) (((i) - PT_FPR0) & 1)
#define FPRINDEX(i) TS_FPRWIDTH * FPRNUMBER(i) * 2 + FPRHALF(i)
#define FPRINDEX_3264(i) (TS_FPRWIDTH * ((i) - PT_FPR0))
long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
compat_ulong_t caddr, compat_ulong_t cdata)
{
unsigned long addr = caddr;
unsigned long data = cdata;
int ret;
switch (request) {
/*
* Read 4 bytes of the other process' storage
* data is a pointer specifying where the user wants the
* 4 bytes copied into
* addr is a pointer in the user's storage that contains an 8 byte
* address in the other process of the 4 bytes that is to be read
* (this is run in a 32-bit process looking at a 64-bit process)
* when I and D space are separate, these will need to be fixed.
*/
case PPC_PTRACE_PEEKTEXT_3264:
case PPC_PTRACE_PEEKDATA_3264: {
u32 tmp;
int copied;
u32 __user * addrOthers;
ret = -EIO;
/* Get the addr in the other process that we want to read */
if (get_user(addrOthers, (u32 __user * __user *)addr) != 0)
break;
copied = access_process_vm(child, (u64)addrOthers, &tmp,
sizeof(tmp), 0);
if (copied != sizeof(tmp))
break;
ret = put_user(tmp, (u32 __user *)data);
break;
}
/* Read a register (specified by ADDR) out of the "user area" */
case PTRACE_PEEKUSR: {
int index;
unsigned long tmp;
ret = -EIO;
/* convert to index and check */
index = (unsigned long) addr >> 2;
if ((addr & 3) || (index > PT_FPSCR32))
break;
CHECK_FULL_REGS(child->thread.regs);
if (index < PT_FPR0) {
ret = ptrace_get_reg(child, index, &tmp);
if (ret)
break;
} else {
flush_fp_to_thread(child);
/*
* the user space code considers the floating point
* to be an array of unsigned int (32 bits) - the
* index passed in is based on this assumption.
*/
tmp = ((unsigned int *)child->thread.fpr)
[FPRINDEX(index)];
}
ret = put_user((unsigned int)tmp, (u32 __user *)data);
break;
}
/*
* Read 4 bytes out of the other process' pt_regs area
* data is a pointer specifying where the user wants the
* 4 bytes copied into
* addr is the offset into the other process' pt_regs structure
* that is to be read
* (this is run in a 32-bit process looking at a 64-bit process)
*/
case PPC_PTRACE_PEEKUSR_3264: {
u32 index;
u32 reg32bits;
u64 tmp;
u32 numReg;
u32 part;
ret = -EIO;
/* Determine which register the user wants */
index = (u64)addr >> 2;
numReg = index / 2;
/* Determine which part of the register the user wants */
if (index % 2)
part = 1; /* want the 2nd half of the register (right-most). */
else
part = 0; /* want the 1st half of the register (left-most). */
/* Validate the input - check to see if address is on the wrong boundary
* or beyond the end of the user area
*/
if ((addr & 3) || numReg > PT_FPSCR)
break;
CHECK_FULL_REGS(child->thread.regs);
if (numReg >= PT_FPR0) {
flush_fp_to_thread(child);
/* get 64 bit FPR */
tmp = ((u64 *)child->thread.fpr)
[FPRINDEX_3264(numReg)];
} else { /* register within PT_REGS struct */
unsigned long tmp2;
ret = ptrace_get_reg(child, numReg, &tmp2);
if (ret)
break;
tmp = tmp2;
}
reg32bits = ((u32*)&tmp)[part];
ret = put_user(reg32bits, (u32 __user *)data);
break;
}
/*
* Write 4 bytes into the other process' storage
* data is the 4 bytes that the user wants written
* addr is a pointer in the user's storage that contains an
* 8 byte address in the other process where the 4 bytes
* that is to be written
* (this is run in a 32-bit process looking at a 64-bit process)
* when I and D space are separate, these will need to be fixed.
*/
case PPC_PTRACE_POKETEXT_3264:
case PPC_PTRACE_POKEDATA_3264: {
u32 tmp = data;
u32 __user * addrOthers;
/* Get the addr in the other process that we want to write into */
ret = -EIO;
if (get_user(addrOthers, (u32 __user * __user *)addr) != 0)
break;
ret = 0;
if (access_process_vm(child, (u64)addrOthers, &tmp,
sizeof(tmp), 1) == sizeof(tmp))
break;
ret = -EIO;
break;
}
/* write the word at location addr in the USER area */
case PTRACE_POKEUSR: {
unsigned long index;
ret = -EIO;
/* convert to index and check */
index = (unsigned long) addr >> 2;
if ((addr & 3) || (index > PT_FPSCR32))
break;
CHECK_FULL_REGS(child->thread.regs);
if (index < PT_FPR0) {
ret = ptrace_put_reg(child, index, data);
} else {
flush_fp_to_thread(child);
/*
* the user space code considers the floating point
* to be an array of unsigned int (32 bits) - the
* index passed in is based on this assumption.
*/
((unsigned int *)child->thread.fpr)
[FPRINDEX(index)] = data;
ret = 0;
}
break;
}
/*
* Write 4 bytes into the other process' pt_regs area
* data is the 4 bytes that the user wants written
* addr is the offset into the other process' pt_regs structure
* that is to be written into
* (this is run in a 32-bit process looking at a 64-bit process)
*/
case PPC_PTRACE_POKEUSR_3264: {
u32 index;
u32 numReg;
ret = -EIO;
/* Determine which register the user wants */
index = (u64)addr >> 2;
numReg = index / 2;
/*
* Validate the input - check to see if address is on the
* wrong boundary or beyond the end of the user area
*/
if ((addr & 3) || (numReg > PT_FPSCR))
break;
CHECK_FULL_REGS(child->thread.regs);
if (numReg < PT_FPR0) {
unsigned long freg;
ret = ptrace_get_reg(child, numReg, &freg);
if (ret)
break;
if (index % 2)
freg = (freg & ~0xfffffffful) | (data & 0xfffffffful);
else
freg = (freg & 0xfffffffful) | (data << 32);
ret = ptrace_put_reg(child, numReg, freg);
} else {
u64 *tmp;
flush_fp_to_thread(child);
/* get 64 bit FPR ... */
tmp = &(((u64 *)child->thread.fpr)
[FPRINDEX_3264(numReg)]);
/* ... write the 32 bit part we want */
((u32 *)tmp)[index % 2] = data;
ret = 0;
}
break;
}
case PTRACE_GET_DEBUGREG: {
#ifndef CONFIG_PPC_ADV_DEBUG_REGS
unsigned long dabr_fake;
#endif
ret = -EINVAL;
/* We only support one DABR and no IABRS at the moment */
if (addr > 0)
break;
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
ret = put_user(child->thread.dac1, (u32 __user *)data);
#else
dabr_fake = (
(child->thread.hw_brk.address & (~HW_BRK_TYPE_DABR)) |
(child->thread.hw_brk.type & HW_BRK_TYPE_DABR));
ret = put_user(dabr_fake, (u32 __user *)data);
#endif
break;
}
case PTRACE_GETREGS: /* Get all pt_regs from the child. */
return copy_regset_to_user(
child, task_user_regset_view(current), 0,
0, PT_REGS_COUNT * sizeof(compat_long_t),
compat_ptr(data));
case PTRACE_SETREGS: /* Set all gp regs in the child. */
return copy_regset_from_user(
child, task_user_regset_view(current), 0,
0, PT_REGS_COUNT * sizeof(compat_long_t),
compat_ptr(data));
case PTRACE_GETFPREGS:
case PTRACE_SETFPREGS:
case PTRACE_GETVRREGS:
case PTRACE_SETVRREGS:
case PTRACE_GETVSRREGS:
case PTRACE_SETVSRREGS:
case PTRACE_GETREGS64:
case PTRACE_SETREGS64:
case PTRACE_KILL:
case PTRACE_SINGLESTEP:
case PTRACE_DETACH:
case PTRACE_SET_DEBUGREG:
case PTRACE_SYSCALL:
case PTRACE_CONT:
case PPC_PTRACE_GETHWDBGINFO:
case PPC_PTRACE_SETHWDEBUG:
case PPC_PTRACE_DELHWDEBUG:
ret = arch_ptrace(child, request, addr, data);
break;
default:
ret = compat_ptrace_request(child, request, addr, data);
break;
}
return ret;
}
| gpl-2.0 |
DZB-Team/zombie_kernel_sprout | drivers/net/ethernet/sun/sunqe.c | 2087 | 25758 | /* sunqe.c: Sparc QuadEthernet 10baseT SBUS card driver.
* Once again I am out to prove that every ethernet
* controller out there can be most efficiently programmed
* if you make it look like a LANCE.
*
* Copyright (C) 1996, 1999, 2003, 2006, 2008 David S. Miller (davem@davemloft.net)
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/fcntl.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/in.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/crc32.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/ethtool.h>
#include <linux/bitops.h>
#include <linux/dma-mapping.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <asm/io.h>
#include <asm/dma.h>
#include <asm/byteorder.h>
#include <asm/idprom.h>
#include <asm/openprom.h>
#include <asm/oplib.h>
#include <asm/auxio.h>
#include <asm/pgtable.h>
#include <asm/irq.h>
#include "sunqe.h"
#define DRV_NAME "sunqe"
#define DRV_VERSION "4.1"
#define DRV_RELDATE "August 27, 2008"
#define DRV_AUTHOR "David S. Miller (davem@davemloft.net)"
static char version[] =
DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " " DRV_AUTHOR "\n";
MODULE_VERSION(DRV_VERSION);
MODULE_AUTHOR(DRV_AUTHOR);
MODULE_DESCRIPTION("Sun QuadEthernet 10baseT SBUS card driver");
MODULE_LICENSE("GPL");
static struct sunqec *root_qec_dev;
static void qe_set_multicast(struct net_device *dev);
#define QEC_RESET_TRIES 200
static inline int qec_global_reset(void __iomem *gregs)
{
int tries = QEC_RESET_TRIES;
sbus_writel(GLOB_CTRL_RESET, gregs + GLOB_CTRL);
while (--tries) {
u32 tmp = sbus_readl(gregs + GLOB_CTRL);
if (tmp & GLOB_CTRL_RESET) {
udelay(20);
continue;
}
break;
}
if (tries)
return 0;
printk(KERN_ERR "QuadEther: AIEEE cannot reset the QEC!\n");
return -1;
}
#define MACE_RESET_RETRIES 200
#define QE_RESET_RETRIES 200
static inline int qe_stop(struct sunqe *qep)
{
void __iomem *cregs = qep->qcregs;
void __iomem *mregs = qep->mregs;
int tries;
/* Reset the MACE, then the QEC channel. */
sbus_writeb(MREGS_BCONFIG_RESET, mregs + MREGS_BCONFIG);
tries = MACE_RESET_RETRIES;
while (--tries) {
u8 tmp = sbus_readb(mregs + MREGS_BCONFIG);
if (tmp & MREGS_BCONFIG_RESET) {
udelay(20);
continue;
}
break;
}
if (!tries) {
printk(KERN_ERR "QuadEther: AIEEE cannot reset the MACE!\n");
return -1;
}
sbus_writel(CREG_CTRL_RESET, cregs + CREG_CTRL);
tries = QE_RESET_RETRIES;
while (--tries) {
u32 tmp = sbus_readl(cregs + CREG_CTRL);
if (tmp & CREG_CTRL_RESET) {
udelay(20);
continue;
}
break;
}
if (!tries) {
printk(KERN_ERR "QuadEther: Cannot reset QE channel!\n");
return -1;
}
return 0;
}
static void qe_init_rings(struct sunqe *qep)
{
struct qe_init_block *qb = qep->qe_block;
struct sunqe_buffers *qbufs = qep->buffers;
__u32 qbufs_dvma = qep->buffers_dvma;
int i;
qep->rx_new = qep->rx_old = qep->tx_new = qep->tx_old = 0;
memset(qb, 0, sizeof(struct qe_init_block));
memset(qbufs, 0, sizeof(struct sunqe_buffers));
for (i = 0; i < RX_RING_SIZE; i++) {
qb->qe_rxd[i].rx_addr = qbufs_dvma + qebuf_offset(rx_buf, i);
qb->qe_rxd[i].rx_flags =
(RXD_OWN | ((RXD_PKT_SZ) & RXD_LENGTH));
}
}
static int qe_init(struct sunqe *qep, int from_irq)
{
struct sunqec *qecp = qep->parent;
void __iomem *cregs = qep->qcregs;
void __iomem *mregs = qep->mregs;
void __iomem *gregs = qecp->gregs;
unsigned char *e = &qep->dev->dev_addr[0];
u32 tmp;
int i;
/* Shut it up. */
if (qe_stop(qep))
return -EAGAIN;
/* Setup initial rx/tx init block pointers. */
sbus_writel(qep->qblock_dvma + qib_offset(qe_rxd, 0), cregs + CREG_RXDS);
sbus_writel(qep->qblock_dvma + qib_offset(qe_txd, 0), cregs + CREG_TXDS);
/* Enable/mask the various irq's. */
sbus_writel(0, cregs + CREG_RIMASK);
sbus_writel(1, cregs + CREG_TIMASK);
sbus_writel(0, cregs + CREG_QMASK);
sbus_writel(CREG_MMASK_RXCOLL, cregs + CREG_MMASK);
/* Setup the FIFO pointers into QEC local memory. */
tmp = qep->channel * sbus_readl(gregs + GLOB_MSIZE);
sbus_writel(tmp, cregs + CREG_RXRBUFPTR);
sbus_writel(tmp, cregs + CREG_RXWBUFPTR);
tmp = sbus_readl(cregs + CREG_RXRBUFPTR) +
sbus_readl(gregs + GLOB_RSIZE);
sbus_writel(tmp, cregs + CREG_TXRBUFPTR);
sbus_writel(tmp, cregs + CREG_TXWBUFPTR);
/* Clear the channel collision counter. */
sbus_writel(0, cregs + CREG_CCNT);
/* For 10baseT, inter frame space nor throttle seems to be necessary. */
sbus_writel(0, cregs + CREG_PIPG);
/* Now dork with the AMD MACE. */
sbus_writeb(MREGS_PHYCONFIG_AUTO, mregs + MREGS_PHYCONFIG);
sbus_writeb(MREGS_TXFCNTL_AUTOPAD, mregs + MREGS_TXFCNTL);
sbus_writeb(0, mregs + MREGS_RXFCNTL);
/* The QEC dma's the rx'd packets from local memory out to main memory,
* and therefore it interrupts when the packet reception is "complete".
* So don't listen for the MACE talking about it.
*/
sbus_writeb(MREGS_IMASK_COLL | MREGS_IMASK_RXIRQ, mregs + MREGS_IMASK);
sbus_writeb(MREGS_BCONFIG_BSWAP | MREGS_BCONFIG_64TS, mregs + MREGS_BCONFIG);
sbus_writeb((MREGS_FCONFIG_TXF16 | MREGS_FCONFIG_RXF32 |
MREGS_FCONFIG_RFWU | MREGS_FCONFIG_TFWU),
mregs + MREGS_FCONFIG);
/* Only usable interface on QuadEther is twisted pair. */
sbus_writeb(MREGS_PLSCONFIG_TP, mregs + MREGS_PLSCONFIG);
/* Tell MACE we are changing the ether address. */
sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_PARESET,
mregs + MREGS_IACONFIG);
while ((sbus_readb(mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0)
barrier();
sbus_writeb(e[0], mregs + MREGS_ETHADDR);
sbus_writeb(e[1], mregs + MREGS_ETHADDR);
sbus_writeb(e[2], mregs + MREGS_ETHADDR);
sbus_writeb(e[3], mregs + MREGS_ETHADDR);
sbus_writeb(e[4], mregs + MREGS_ETHADDR);
sbus_writeb(e[5], mregs + MREGS_ETHADDR);
/* Clear out the address filter. */
sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_LARESET,
mregs + MREGS_IACONFIG);
while ((sbus_readb(mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0)
barrier();
for (i = 0; i < 8; i++)
sbus_writeb(0, mregs + MREGS_FILTER);
/* Address changes are now complete. */
sbus_writeb(0, mregs + MREGS_IACONFIG);
qe_init_rings(qep);
/* Wait a little bit for the link to come up... */
mdelay(5);
if (!(sbus_readb(mregs + MREGS_PHYCONFIG) & MREGS_PHYCONFIG_LTESTDIS)) {
int tries = 50;
while (--tries) {
u8 tmp;
mdelay(5);
barrier();
tmp = sbus_readb(mregs + MREGS_PHYCONFIG);
if ((tmp & MREGS_PHYCONFIG_LSTAT) != 0)
break;
}
if (tries == 0)
printk(KERN_NOTICE "%s: Warning, link state is down.\n", qep->dev->name);
}
/* Missed packet counter is cleared on a read. */
sbus_readb(mregs + MREGS_MPCNT);
/* Reload multicast information, this will enable the receiver
* and transmitter.
*/
qe_set_multicast(qep->dev);
/* QEC should now start to show interrupts. */
return 0;
}
/* Grrr, certain error conditions completely lock up the AMD MACE,
* so when we get these we _must_ reset the chip.
*/
static int qe_is_bolixed(struct sunqe *qep, u32 qe_status)
{
struct net_device *dev = qep->dev;
int mace_hwbug_workaround = 0;
if (qe_status & CREG_STAT_EDEFER) {
printk(KERN_ERR "%s: Excessive transmit defers.\n", dev->name);
dev->stats.tx_errors++;
}
if (qe_status & CREG_STAT_CLOSS) {
printk(KERN_ERR "%s: Carrier lost, link down?\n", dev->name);
dev->stats.tx_errors++;
dev->stats.tx_carrier_errors++;
}
if (qe_status & CREG_STAT_ERETRIES) {
printk(KERN_ERR "%s: Excessive transmit retries (more than 16).\n", dev->name);
dev->stats.tx_errors++;
mace_hwbug_workaround = 1;
}
if (qe_status & CREG_STAT_LCOLL) {
printk(KERN_ERR "%s: Late transmit collision.\n", dev->name);
dev->stats.tx_errors++;
dev->stats.collisions++;
mace_hwbug_workaround = 1;
}
if (qe_status & CREG_STAT_FUFLOW) {
printk(KERN_ERR "%s: Transmit fifo underflow, driver bug.\n", dev->name);
dev->stats.tx_errors++;
mace_hwbug_workaround = 1;
}
if (qe_status & CREG_STAT_JERROR) {
printk(KERN_ERR "%s: Jabber error.\n", dev->name);
}
if (qe_status & CREG_STAT_BERROR) {
printk(KERN_ERR "%s: Babble error.\n", dev->name);
}
if (qe_status & CREG_STAT_CCOFLOW) {
dev->stats.tx_errors += 256;
dev->stats.collisions += 256;
}
if (qe_status & CREG_STAT_TXDERROR) {
printk(KERN_ERR "%s: Transmit descriptor is bogus, driver bug.\n", dev->name);
dev->stats.tx_errors++;
dev->stats.tx_aborted_errors++;
mace_hwbug_workaround = 1;
}
if (qe_status & CREG_STAT_TXLERR) {
printk(KERN_ERR "%s: Transmit late error.\n", dev->name);
dev->stats.tx_errors++;
mace_hwbug_workaround = 1;
}
if (qe_status & CREG_STAT_TXPERR) {
printk(KERN_ERR "%s: Transmit DMA parity error.\n", dev->name);
dev->stats.tx_errors++;
dev->stats.tx_aborted_errors++;
mace_hwbug_workaround = 1;
}
if (qe_status & CREG_STAT_TXSERR) {
printk(KERN_ERR "%s: Transmit DMA sbus error ack.\n", dev->name);
dev->stats.tx_errors++;
dev->stats.tx_aborted_errors++;
mace_hwbug_workaround = 1;
}
if (qe_status & CREG_STAT_RCCOFLOW) {
dev->stats.rx_errors += 256;
dev->stats.collisions += 256;
}
if (qe_status & CREG_STAT_RUOFLOW) {
dev->stats.rx_errors += 256;
dev->stats.rx_over_errors += 256;
}
if (qe_status & CREG_STAT_MCOFLOW) {
dev->stats.rx_errors += 256;
dev->stats.rx_missed_errors += 256;
}
if (qe_status & CREG_STAT_RXFOFLOW) {
printk(KERN_ERR "%s: Receive fifo overflow.\n", dev->name);
dev->stats.rx_errors++;
dev->stats.rx_over_errors++;
}
if (qe_status & CREG_STAT_RLCOLL) {
printk(KERN_ERR "%s: Late receive collision.\n", dev->name);
dev->stats.rx_errors++;
dev->stats.collisions++;
}
if (qe_status & CREG_STAT_FCOFLOW) {
dev->stats.rx_errors += 256;
dev->stats.rx_frame_errors += 256;
}
if (qe_status & CREG_STAT_CECOFLOW) {
dev->stats.rx_errors += 256;
dev->stats.rx_crc_errors += 256;
}
if (qe_status & CREG_STAT_RXDROP) {
printk(KERN_ERR "%s: Receive packet dropped.\n", dev->name);
dev->stats.rx_errors++;
dev->stats.rx_dropped++;
dev->stats.rx_missed_errors++;
}
if (qe_status & CREG_STAT_RXSMALL) {
printk(KERN_ERR "%s: Receive buffer too small, driver bug.\n", dev->name);
dev->stats.rx_errors++;
dev->stats.rx_length_errors++;
}
if (qe_status & CREG_STAT_RXLERR) {
printk(KERN_ERR "%s: Receive late error.\n", dev->name);
dev->stats.rx_errors++;
mace_hwbug_workaround = 1;
}
if (qe_status & CREG_STAT_RXPERR) {
printk(KERN_ERR "%s: Receive DMA parity error.\n", dev->name);
dev->stats.rx_errors++;
dev->stats.rx_missed_errors++;
mace_hwbug_workaround = 1;
}
if (qe_status & CREG_STAT_RXSERR) {
printk(KERN_ERR "%s: Receive DMA sbus error ack.\n", dev->name);
dev->stats.rx_errors++;
dev->stats.rx_missed_errors++;
mace_hwbug_workaround = 1;
}
if (mace_hwbug_workaround)
qe_init(qep, 1);
return mace_hwbug_workaround;
}
/* Per-QE receive interrupt service routine. Just like on the happy meal
* we receive directly into skb's with a small packet copy water mark.
*/
static void qe_rx(struct sunqe *qep)
{
struct qe_rxd *rxbase = &qep->qe_block->qe_rxd[0];
struct net_device *dev = qep->dev;
struct qe_rxd *this;
struct sunqe_buffers *qbufs = qep->buffers;
__u32 qbufs_dvma = qep->buffers_dvma;
int elem = qep->rx_new;
u32 flags;
this = &rxbase[elem];
while (!((flags = this->rx_flags) & RXD_OWN)) {
struct sk_buff *skb;
unsigned char *this_qbuf =
&qbufs->rx_buf[elem & (RX_RING_SIZE - 1)][0];
__u32 this_qbuf_dvma = qbufs_dvma +
qebuf_offset(rx_buf, (elem & (RX_RING_SIZE - 1)));
struct qe_rxd *end_rxd =
&rxbase[(elem+RX_RING_SIZE)&(RX_RING_MAXSIZE-1)];
int len = (flags & RXD_LENGTH) - 4; /* QE adds ether FCS size to len */
/* Check for errors. */
if (len < ETH_ZLEN) {
dev->stats.rx_errors++;
dev->stats.rx_length_errors++;
dev->stats.rx_dropped++;
} else {
skb = netdev_alloc_skb(dev, len + 2);
if (skb == NULL) {
dev->stats.rx_dropped++;
} else {
skb_reserve(skb, 2);
skb_put(skb, len);
skb_copy_to_linear_data(skb, this_qbuf,
len);
skb->protocol = eth_type_trans(skb, qep->dev);
netif_rx(skb);
dev->stats.rx_packets++;
dev->stats.rx_bytes += len;
}
}
end_rxd->rx_addr = this_qbuf_dvma;
end_rxd->rx_flags = (RXD_OWN | ((RXD_PKT_SZ) & RXD_LENGTH));
elem = NEXT_RX(elem);
this = &rxbase[elem];
}
qep->rx_new = elem;
}
static void qe_tx_reclaim(struct sunqe *qep);
/* Interrupts for all QE's get filtered out via the QEC master controller,
* so we just run through each qe and check to see who is signaling
* and thus needs to be serviced.
*/
static irqreturn_t qec_interrupt(int irq, void *dev_id)
{
struct sunqec *qecp = dev_id;
u32 qec_status;
int channel = 0;
/* Latch the status now. */
qec_status = sbus_readl(qecp->gregs + GLOB_STAT);
while (channel < 4) {
if (qec_status & 0xf) {
struct sunqe *qep = qecp->qes[channel];
u32 qe_status;
qe_status = sbus_readl(qep->qcregs + CREG_STAT);
if (qe_status & CREG_STAT_ERRORS) {
if (qe_is_bolixed(qep, qe_status))
goto next;
}
if (qe_status & CREG_STAT_RXIRQ)
qe_rx(qep);
if (netif_queue_stopped(qep->dev) &&
(qe_status & CREG_STAT_TXIRQ)) {
spin_lock(&qep->lock);
qe_tx_reclaim(qep);
if (TX_BUFFS_AVAIL(qep) > 0) {
/* Wake net queue and return to
* lazy tx reclaim.
*/
netif_wake_queue(qep->dev);
sbus_writel(1, qep->qcregs + CREG_TIMASK);
}
spin_unlock(&qep->lock);
}
next:
;
}
qec_status >>= 4;
channel++;
}
return IRQ_HANDLED;
}
static int qe_open(struct net_device *dev)
{
struct sunqe *qep = netdev_priv(dev);
qep->mconfig = (MREGS_MCONFIG_TXENAB |
MREGS_MCONFIG_RXENAB |
MREGS_MCONFIG_MBAENAB);
return qe_init(qep, 0);
}
static int qe_close(struct net_device *dev)
{
struct sunqe *qep = netdev_priv(dev);
qe_stop(qep);
return 0;
}
/* Reclaim TX'd frames from the ring. This must always run under
* the IRQ protected qep->lock.
*/
static void qe_tx_reclaim(struct sunqe *qep)
{
struct qe_txd *txbase = &qep->qe_block->qe_txd[0];
int elem = qep->tx_old;
while (elem != qep->tx_new) {
u32 flags = txbase[elem].tx_flags;
if (flags & TXD_OWN)
break;
elem = NEXT_TX(elem);
}
qep->tx_old = elem;
}
static void qe_tx_timeout(struct net_device *dev)
{
struct sunqe *qep = netdev_priv(dev);
int tx_full;
spin_lock_irq(&qep->lock);
/* Try to reclaim, if that frees up some tx
* entries, we're fine.
*/
qe_tx_reclaim(qep);
tx_full = TX_BUFFS_AVAIL(qep) <= 0;
spin_unlock_irq(&qep->lock);
if (! tx_full)
goto out;
printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name);
qe_init(qep, 1);
out:
netif_wake_queue(dev);
}
/* Get a packet queued to go onto the wire. */
static int qe_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct sunqe *qep = netdev_priv(dev);
struct sunqe_buffers *qbufs = qep->buffers;
__u32 txbuf_dvma, qbufs_dvma = qep->buffers_dvma;
unsigned char *txbuf;
int len, entry;
spin_lock_irq(&qep->lock);
qe_tx_reclaim(qep);
len = skb->len;
entry = qep->tx_new;
txbuf = &qbufs->tx_buf[entry & (TX_RING_SIZE - 1)][0];
txbuf_dvma = qbufs_dvma +
qebuf_offset(tx_buf, (entry & (TX_RING_SIZE - 1)));
/* Avoid a race... */
qep->qe_block->qe_txd[entry].tx_flags = TXD_UPDATE;
skb_copy_from_linear_data(skb, txbuf, len);
qep->qe_block->qe_txd[entry].tx_addr = txbuf_dvma;
qep->qe_block->qe_txd[entry].tx_flags =
(TXD_OWN | TXD_SOP | TXD_EOP | (len & TXD_LENGTH));
qep->tx_new = NEXT_TX(entry);
/* Get it going. */
sbus_writel(CREG_CTRL_TWAKEUP, qep->qcregs + CREG_CTRL);
dev->stats.tx_packets++;
dev->stats.tx_bytes += len;
if (TX_BUFFS_AVAIL(qep) <= 0) {
/* Halt the net queue and enable tx interrupts.
* When the tx queue empties the tx irq handler
* will wake up the queue and return us back to
* the lazy tx reclaim scheme.
*/
netif_stop_queue(dev);
sbus_writel(0, qep->qcregs + CREG_TIMASK);
}
spin_unlock_irq(&qep->lock);
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
static void qe_set_multicast(struct net_device *dev)
{
struct sunqe *qep = netdev_priv(dev);
struct netdev_hw_addr *ha;
u8 new_mconfig = qep->mconfig;
int i;
u32 crc;
/* Lock out others. */
netif_stop_queue(dev);
if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) {
sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_LARESET,
qep->mregs + MREGS_IACONFIG);
while ((sbus_readb(qep->mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0)
barrier();
for (i = 0; i < 8; i++)
sbus_writeb(0xff, qep->mregs + MREGS_FILTER);
sbus_writeb(0, qep->mregs + MREGS_IACONFIG);
} else if (dev->flags & IFF_PROMISC) {
new_mconfig |= MREGS_MCONFIG_PROMISC;
} else {
u16 hash_table[4];
u8 *hbytes = (unsigned char *) &hash_table[0];
memset(hash_table, 0, sizeof(hash_table));
netdev_for_each_mc_addr(ha, dev) {
crc = ether_crc_le(6, ha->addr);
crc >>= 26;
hash_table[crc >> 4] |= 1 << (crc & 0xf);
}
/* Program the qe with the new filter value. */
sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_LARESET,
qep->mregs + MREGS_IACONFIG);
while ((sbus_readb(qep->mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0)
barrier();
for (i = 0; i < 8; i++) {
u8 tmp = *hbytes++;
sbus_writeb(tmp, qep->mregs + MREGS_FILTER);
}
sbus_writeb(0, qep->mregs + MREGS_IACONFIG);
}
/* Any change of the logical address filter, the physical address,
* or enabling/disabling promiscuous mode causes the MACE to disable
* the receiver. So we must re-enable them here or else the MACE
* refuses to listen to anything on the network. Sheesh, took
* me a day or two to find this bug.
*/
qep->mconfig = new_mconfig;
sbus_writeb(qep->mconfig, qep->mregs + MREGS_MCONFIG);
/* Let us get going again. */
netif_wake_queue(dev);
}
/* Ethtool support... */
static void qe_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
const struct linux_prom_registers *regs;
struct sunqe *qep = netdev_priv(dev);
struct platform_device *op;
strlcpy(info->driver, "sunqe", sizeof(info->driver));
strlcpy(info->version, "3.0", sizeof(info->version));
op = qep->op;
regs = of_get_property(op->dev.of_node, "reg", NULL);
if (regs)
snprintf(info->bus_info, sizeof(info->bus_info), "SBUS:%d",
regs->which_io);
}
static u32 qe_get_link(struct net_device *dev)
{
struct sunqe *qep = netdev_priv(dev);
void __iomem *mregs = qep->mregs;
u8 phyconfig;
spin_lock_irq(&qep->lock);
phyconfig = sbus_readb(mregs + MREGS_PHYCONFIG);
spin_unlock_irq(&qep->lock);
return phyconfig & MREGS_PHYCONFIG_LSTAT;
}
static const struct ethtool_ops qe_ethtool_ops = {
.get_drvinfo = qe_get_drvinfo,
.get_link = qe_get_link,
};
/* This is only called once at boot time for each card probed. */
static void qec_init_once(struct sunqec *qecp, struct platform_device *op)
{
u8 bsizes = qecp->qec_bursts;
if (sbus_can_burst64() && (bsizes & DMA_BURST64)) {
sbus_writel(GLOB_CTRL_B64, qecp->gregs + GLOB_CTRL);
} else if (bsizes & DMA_BURST32) {
sbus_writel(GLOB_CTRL_B32, qecp->gregs + GLOB_CTRL);
} else {
sbus_writel(GLOB_CTRL_B16, qecp->gregs + GLOB_CTRL);
}
/* Packetsize only used in 100baseT BigMAC configurations,
* set it to zero just to be on the safe side.
*/
sbus_writel(GLOB_PSIZE_2048, qecp->gregs + GLOB_PSIZE);
/* Set the local memsize register, divided up to one piece per QE channel. */
sbus_writel((resource_size(&op->resource[1]) >> 2),
qecp->gregs + GLOB_MSIZE);
/* Divide up the local QEC memory amongst the 4 QE receiver and
* transmitter FIFOs. Basically it is (total / 2 / num_channels).
*/
sbus_writel((resource_size(&op->resource[1]) >> 2) >> 1,
qecp->gregs + GLOB_TSIZE);
sbus_writel((resource_size(&op->resource[1]) >> 2) >> 1,
qecp->gregs + GLOB_RSIZE);
}
static u8 qec_get_burst(struct device_node *dp)
{
u8 bsizes, bsizes_more;
/* Find and set the burst sizes for the QEC, since it
* does the actual dma for all 4 channels.
*/
bsizes = of_getintprop_default(dp, "burst-sizes", 0xff);
bsizes &= 0xff;
bsizes_more = of_getintprop_default(dp->parent, "burst-sizes", 0xff);
if (bsizes_more != 0xff)
bsizes &= bsizes_more;
if (bsizes == 0xff || (bsizes & DMA_BURST16) == 0 ||
(bsizes & DMA_BURST32)==0)
bsizes = (DMA_BURST32 - 1);
return bsizes;
}
static struct sunqec *get_qec(struct platform_device *child)
{
struct platform_device *op = to_platform_device(child->dev.parent);
struct sunqec *qecp;
qecp = dev_get_drvdata(&op->dev);
if (!qecp) {
qecp = kzalloc(sizeof(struct sunqec), GFP_KERNEL);
if (qecp) {
u32 ctrl;
qecp->op = op;
qecp->gregs = of_ioremap(&op->resource[0], 0,
GLOB_REG_SIZE,
"QEC Global Registers");
if (!qecp->gregs)
goto fail;
/* Make sure the QEC is in MACE mode. */
ctrl = sbus_readl(qecp->gregs + GLOB_CTRL);
ctrl &= 0xf0000000;
if (ctrl != GLOB_CTRL_MMODE) {
printk(KERN_ERR "qec: Not in MACE mode!\n");
goto fail;
}
if (qec_global_reset(qecp->gregs))
goto fail;
qecp->qec_bursts = qec_get_burst(op->dev.of_node);
qec_init_once(qecp, op);
if (request_irq(op->archdata.irqs[0], qec_interrupt,
IRQF_SHARED, "qec", (void *) qecp)) {
printk(KERN_ERR "qec: Can't register irq.\n");
goto fail;
}
dev_set_drvdata(&op->dev, qecp);
qecp->next_module = root_qec_dev;
root_qec_dev = qecp;
}
}
return qecp;
fail:
if (qecp->gregs)
of_iounmap(&op->resource[0], qecp->gregs, GLOB_REG_SIZE);
kfree(qecp);
return NULL;
}
static const struct net_device_ops qec_ops = {
.ndo_open = qe_open,
.ndo_stop = qe_close,
.ndo_start_xmit = qe_start_xmit,
.ndo_set_rx_mode = qe_set_multicast,
.ndo_tx_timeout = qe_tx_timeout,
.ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
static int qec_ether_init(struct platform_device *op)
{
static unsigned version_printed;
struct net_device *dev;
struct sunqec *qecp;
struct sunqe *qe;
int i, res;
if (version_printed++ == 0)
printk(KERN_INFO "%s", version);
dev = alloc_etherdev(sizeof(struct sunqe));
if (!dev)
return -ENOMEM;
memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
qe = netdev_priv(dev);
res = -ENODEV;
i = of_getintprop_default(op->dev.of_node, "channel#", -1);
if (i == -1)
goto fail;
qe->channel = i;
spin_lock_init(&qe->lock);
qecp = get_qec(op);
if (!qecp)
goto fail;
qecp->qes[qe->channel] = qe;
qe->dev = dev;
qe->parent = qecp;
qe->op = op;
res = -ENOMEM;
qe->qcregs = of_ioremap(&op->resource[0], 0,
CREG_REG_SIZE, "QEC Channel Registers");
if (!qe->qcregs) {
printk(KERN_ERR "qe: Cannot map channel registers.\n");
goto fail;
}
qe->mregs = of_ioremap(&op->resource[1], 0,
MREGS_REG_SIZE, "QE MACE Registers");
if (!qe->mregs) {
printk(KERN_ERR "qe: Cannot map MACE registers.\n");
goto fail;
}
qe->qe_block = dma_alloc_coherent(&op->dev, PAGE_SIZE,
&qe->qblock_dvma, GFP_ATOMIC);
qe->buffers = dma_alloc_coherent(&op->dev, sizeof(struct sunqe_buffers),
&qe->buffers_dvma, GFP_ATOMIC);
if (qe->qe_block == NULL || qe->qblock_dvma == 0 ||
qe->buffers == NULL || qe->buffers_dvma == 0)
goto fail;
/* Stop this QE. */
qe_stop(qe);
SET_NETDEV_DEV(dev, &op->dev);
dev->watchdog_timeo = 5*HZ;
dev->irq = op->archdata.irqs[0];
dev->dma = 0;
dev->ethtool_ops = &qe_ethtool_ops;
dev->netdev_ops = &qec_ops;
res = register_netdev(dev);
if (res)
goto fail;
dev_set_drvdata(&op->dev, qe);
printk(KERN_INFO "%s: qe channel[%d] %pM\n", dev->name, qe->channel,
dev->dev_addr);
return 0;
fail:
if (qe->qcregs)
of_iounmap(&op->resource[0], qe->qcregs, CREG_REG_SIZE);
if (qe->mregs)
of_iounmap(&op->resource[1], qe->mregs, MREGS_REG_SIZE);
if (qe->qe_block)
dma_free_coherent(&op->dev, PAGE_SIZE,
qe->qe_block, qe->qblock_dvma);
if (qe->buffers)
dma_free_coherent(&op->dev,
sizeof(struct sunqe_buffers),
qe->buffers,
qe->buffers_dvma);
free_netdev(dev);
return res;
}
static int qec_sbus_probe(struct platform_device *op)
{
return qec_ether_init(op);
}
static int qec_sbus_remove(struct platform_device *op)
{
struct sunqe *qp = dev_get_drvdata(&op->dev);
struct net_device *net_dev = qp->dev;
unregister_netdev(net_dev);
of_iounmap(&op->resource[0], qp->qcregs, CREG_REG_SIZE);
of_iounmap(&op->resource[1], qp->mregs, MREGS_REG_SIZE);
dma_free_coherent(&op->dev, PAGE_SIZE,
qp->qe_block, qp->qblock_dvma);
dma_free_coherent(&op->dev, sizeof(struct sunqe_buffers),
qp->buffers, qp->buffers_dvma);
free_netdev(net_dev);
dev_set_drvdata(&op->dev, NULL);
return 0;
}
static const struct of_device_id qec_sbus_match[] = {
{
.name = "qe",
},
{},
};
MODULE_DEVICE_TABLE(of, qec_sbus_match);
static struct platform_driver qec_sbus_driver = {
.driver = {
.name = "qec",
.owner = THIS_MODULE,
.of_match_table = qec_sbus_match,
},
.probe = qec_sbus_probe,
.remove = qec_sbus_remove,
};
static int __init qec_init(void)
{
return platform_driver_register(&qec_sbus_driver);
}
static void __exit qec_exit(void)
{
platform_driver_unregister(&qec_sbus_driver);
while (root_qec_dev) {
struct sunqec *next = root_qec_dev->next_module;
struct platform_device *op = root_qec_dev->op;
free_irq(op->archdata.irqs[0], (void *) root_qec_dev);
of_iounmap(&op->resource[0], root_qec_dev->gregs,
GLOB_REG_SIZE);
kfree(root_qec_dev);
root_qec_dev = next;
}
}
module_init(qec_init);
module_exit(qec_exit);
| gpl-2.0 |
superac11/x500_letv_kernel_3.10_K11 | drivers/usb/misc/usb3503.c | 2087 | 7703 | /*
* Driver for SMSC USB3503 USB 2.0 hub controller driver
*
* Copyright (c) 2012-2013 Dongjin Kim (tobetter@gmail.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/i2c.h>
#include <linux/gpio.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/of_gpio.h>
#include <linux/platform_device.h>
#include <linux/platform_data/usb3503.h>
#define USB3503_VIDL 0x00
#define USB3503_VIDM 0x01
#define USB3503_PIDL 0x02
#define USB3503_PIDM 0x03
#define USB3503_DIDL 0x04
#define USB3503_DIDM 0x05
#define USB3503_CFG1 0x06
#define USB3503_SELF_BUS_PWR (1 << 7)
#define USB3503_CFG2 0x07
#define USB3503_CFG3 0x08
#define USB3503_NRD 0x09
#define USB3503_PDS 0x0a
#define USB3503_PORT1 (1 << 1)
#define USB3503_PORT2 (1 << 2)
#define USB3503_PORT3 (1 << 3)
#define USB3503_SP_ILOCK 0xe7
#define USB3503_SPILOCK_CONNECT (1 << 1)
#define USB3503_SPILOCK_CONFIG (1 << 0)
#define USB3503_CFGP 0xee
#define USB3503_CLKSUSP (1 << 7)
struct usb3503 {
enum usb3503_mode mode;
struct i2c_client *client;
int gpio_intn;
int gpio_reset;
int gpio_connect;
};
static int usb3503_write_register(struct i2c_client *client,
char reg, char data)
{
return i2c_smbus_write_byte_data(client, reg, data);
}
static int usb3503_read_register(struct i2c_client *client, char reg)
{
return i2c_smbus_read_byte_data(client, reg);
}
static int usb3503_set_bits(struct i2c_client *client, char reg, char req)
{
int err;
err = usb3503_read_register(client, reg);
if (err < 0)
return err;
err = usb3503_write_register(client, reg, err | req);
if (err < 0)
return err;
return 0;
}
static int usb3503_clear_bits(struct i2c_client *client, char reg, char req)
{
int err;
err = usb3503_read_register(client, reg);
if (err < 0)
return err;
err = usb3503_write_register(client, reg, err & ~req);
if (err < 0)
return err;
return 0;
}
static int usb3503_reset(int gpio_reset, int state)
{
if (gpio_is_valid(gpio_reset))
gpio_set_value(gpio_reset, state);
/* Wait RefClk when RESET_N is released, otherwise Hub will
* not transition to Hub Communication Stage.
*/
if (state)
msleep(100);
return 0;
}
static int usb3503_switch_mode(struct usb3503 *hub, enum usb3503_mode mode)
{
struct i2c_client *i2c = hub->client;
int err = 0;
switch (mode) {
case USB3503_MODE_HUB:
usb3503_reset(hub->gpio_reset, 1);
/* SP_ILOCK: set connect_n, config_n for config */
err = usb3503_write_register(i2c, USB3503_SP_ILOCK,
(USB3503_SPILOCK_CONNECT
| USB3503_SPILOCK_CONFIG));
if (err < 0) {
dev_err(&i2c->dev, "SP_ILOCK failed (%d)\n", err);
goto err_hubmode;
}
/* PDS : Port2,3 Disable For Self Powered Operation */
err = usb3503_set_bits(i2c, USB3503_PDS,
(USB3503_PORT2 | USB3503_PORT3));
if (err < 0) {
dev_err(&i2c->dev, "PDS failed (%d)\n", err);
goto err_hubmode;
}
/* CFG1 : SELF_BUS_PWR -> Self-Powerd operation */
err = usb3503_set_bits(i2c, USB3503_CFG1, USB3503_SELF_BUS_PWR);
if (err < 0) {
dev_err(&i2c->dev, "CFG1 failed (%d)\n", err);
goto err_hubmode;
}
/* SP_LOCK: clear connect_n, config_n for hub connect */
err = usb3503_clear_bits(i2c, USB3503_SP_ILOCK,
(USB3503_SPILOCK_CONNECT
| USB3503_SPILOCK_CONFIG));
if (err < 0) {
dev_err(&i2c->dev, "SP_ILOCK failed (%d)\n", err);
goto err_hubmode;
}
hub->mode = mode;
dev_info(&i2c->dev, "switched to HUB mode\n");
break;
case USB3503_MODE_STANDBY:
usb3503_reset(hub->gpio_reset, 0);
hub->mode = mode;
dev_info(&i2c->dev, "switched to STANDBY mode\n");
break;
default:
dev_err(&i2c->dev, "unknown mode is request\n");
err = -EINVAL;
break;
}
err_hubmode:
return err;
}
static int usb3503_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
{
struct usb3503_platform_data *pdata = i2c->dev.platform_data;
struct device_node *np = i2c->dev.of_node;
struct usb3503 *hub;
int err = -ENOMEM;
u32 mode = USB3503_MODE_UNKNOWN;
hub = kzalloc(sizeof(struct usb3503), GFP_KERNEL);
if (!hub) {
dev_err(&i2c->dev, "private data alloc fail\n");
return err;
}
i2c_set_clientdata(i2c, hub);
hub->client = i2c;
if (pdata) {
hub->gpio_intn = pdata->gpio_intn;
hub->gpio_connect = pdata->gpio_connect;
hub->gpio_reset = pdata->gpio_reset;
hub->mode = pdata->initial_mode;
} else if (np) {
hub->gpio_intn = of_get_named_gpio(np, "connect-gpios", 0);
if (hub->gpio_intn == -EPROBE_DEFER)
return -EPROBE_DEFER;
hub->gpio_connect = of_get_named_gpio(np, "intn-gpios", 0);
if (hub->gpio_connect == -EPROBE_DEFER)
return -EPROBE_DEFER;
hub->gpio_reset = of_get_named_gpio(np, "reset-gpios", 0);
if (hub->gpio_reset == -EPROBE_DEFER)
return -EPROBE_DEFER;
of_property_read_u32(np, "initial-mode", &mode);
hub->mode = mode;
}
if (gpio_is_valid(hub->gpio_intn)) {
err = gpio_request_one(hub->gpio_intn,
GPIOF_OUT_INIT_HIGH, "usb3503 intn");
if (err) {
dev_err(&i2c->dev,
"unable to request GPIO %d as connect pin (%d)\n",
hub->gpio_intn, err);
goto err_out;
}
}
if (gpio_is_valid(hub->gpio_connect)) {
err = gpio_request_one(hub->gpio_connect,
GPIOF_OUT_INIT_HIGH, "usb3503 connect");
if (err) {
dev_err(&i2c->dev,
"unable to request GPIO %d as connect pin (%d)\n",
hub->gpio_connect, err);
goto err_gpio_connect;
}
}
if (gpio_is_valid(hub->gpio_reset)) {
err = gpio_request_one(hub->gpio_reset,
GPIOF_OUT_INIT_LOW, "usb3503 reset");
if (err) {
dev_err(&i2c->dev,
"unable to request GPIO %d as reset pin (%d)\n",
hub->gpio_reset, err);
goto err_gpio_reset;
}
}
usb3503_switch_mode(hub, hub->mode);
dev_info(&i2c->dev, "%s: probed on %s mode\n", __func__,
(hub->mode == USB3503_MODE_HUB) ? "hub" : "standby");
return 0;
err_gpio_reset:
if (gpio_is_valid(hub->gpio_connect))
gpio_free(hub->gpio_connect);
err_gpio_connect:
if (gpio_is_valid(hub->gpio_intn))
gpio_free(hub->gpio_intn);
err_out:
kfree(hub);
return err;
}
static int usb3503_remove(struct i2c_client *i2c)
{
struct usb3503 *hub = i2c_get_clientdata(i2c);
if (gpio_is_valid(hub->gpio_intn))
gpio_free(hub->gpio_intn);
if (gpio_is_valid(hub->gpio_connect))
gpio_free(hub->gpio_connect);
if (gpio_is_valid(hub->gpio_reset))
gpio_free(hub->gpio_reset);
kfree(hub);
return 0;
}
static const struct i2c_device_id usb3503_id[] = {
{ USB3503_I2C_NAME, 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, usb3503_id);
#ifdef CONFIG_OF
static const struct of_device_id usb3503_of_match[] = {
{ .compatible = "smsc,usb3503", },
{},
};
MODULE_DEVICE_TABLE(of, usb3503_of_match);
#endif
static struct i2c_driver usb3503_driver = {
.driver = {
.name = USB3503_I2C_NAME,
.of_match_table = of_match_ptr(usb3503_of_match),
},
.probe = usb3503_probe,
.remove = usb3503_remove,
.id_table = usb3503_id,
};
module_i2c_driver(usb3503_driver);
MODULE_AUTHOR("Dongjin Kim <tobetter@gmail.com>");
MODULE_DESCRIPTION("USB3503 USB HUB driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
kozmikkick/kozmikkernel3.8 | kernel/jump_label.c | 2343 | 11079 | /*
* jump label support
*
* Copyright (C) 2009 Jason Baron <jbaron@redhat.com>
* Copyright (C) 2011 Peter Zijlstra <pzijlstr@redhat.com>
*
*/
#include <linux/memory.h>
#include <linux/uaccess.h>
#include <linux/module.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/sort.h>
#include <linux/err.h>
#include <linux/static_key.h>
#ifdef HAVE_JUMP_LABEL
/* mutex to protect coming/going of the the jump_label table */
static DEFINE_MUTEX(jump_label_mutex);
void jump_label_lock(void)
{
mutex_lock(&jump_label_mutex);
}
void jump_label_unlock(void)
{
mutex_unlock(&jump_label_mutex);
}
static int jump_label_cmp(const void *a, const void *b)
{
const struct jump_entry *jea = a;
const struct jump_entry *jeb = b;
if (jea->key < jeb->key)
return -1;
if (jea->key > jeb->key)
return 1;
return 0;
}
static void
jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
{
unsigned long size;
size = (((unsigned long)stop - (unsigned long)start)
/ sizeof(struct jump_entry));
sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
}
static void jump_label_update(struct static_key *key, int enable);
void static_key_slow_inc(struct static_key *key)
{
if (atomic_inc_not_zero(&key->enabled))
return;
jump_label_lock();
if (atomic_read(&key->enabled) == 0) {
if (!jump_label_get_branch_default(key))
jump_label_update(key, JUMP_LABEL_ENABLE);
else
jump_label_update(key, JUMP_LABEL_DISABLE);
}
atomic_inc(&key->enabled);
jump_label_unlock();
}
EXPORT_SYMBOL_GPL(static_key_slow_inc);
static void __static_key_slow_dec(struct static_key *key,
unsigned long rate_limit, struct delayed_work *work)
{
if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) {
WARN(atomic_read(&key->enabled) < 0,
"jump label: negative count!\n");
return;
}
if (rate_limit) {
atomic_inc(&key->enabled);
schedule_delayed_work(work, rate_limit);
} else {
if (!jump_label_get_branch_default(key))
jump_label_update(key, JUMP_LABEL_DISABLE);
else
jump_label_update(key, JUMP_LABEL_ENABLE);
}
jump_label_unlock();
}
static void jump_label_update_timeout(struct work_struct *work)
{
struct static_key_deferred *key =
container_of(work, struct static_key_deferred, work.work);
__static_key_slow_dec(&key->key, 0, NULL);
}
void static_key_slow_dec(struct static_key *key)
{
__static_key_slow_dec(key, 0, NULL);
}
EXPORT_SYMBOL_GPL(static_key_slow_dec);
void static_key_slow_dec_deferred(struct static_key_deferred *key)
{
__static_key_slow_dec(&key->key, key->timeout, &key->work);
}
EXPORT_SYMBOL_GPL(static_key_slow_dec_deferred);
void jump_label_rate_limit(struct static_key_deferred *key,
unsigned long rl)
{
key->timeout = rl;
INIT_DELAYED_WORK(&key->work, jump_label_update_timeout);
}
EXPORT_SYMBOL_GPL(jump_label_rate_limit);
static int addr_conflict(struct jump_entry *entry, void *start, void *end)
{
if (entry->code <= (unsigned long)end &&
entry->code + JUMP_LABEL_NOP_SIZE > (unsigned long)start)
return 1;
return 0;
}
static int __jump_label_text_reserved(struct jump_entry *iter_start,
struct jump_entry *iter_stop, void *start, void *end)
{
struct jump_entry *iter;
iter = iter_start;
while (iter < iter_stop) {
if (addr_conflict(iter, start, end))
return 1;
iter++;
}
return 0;
}
/*
* Update code which is definitely not currently executing.
* Architectures which need heavyweight synchronization to modify
* running code can override this to make the non-live update case
* cheaper.
*/
void __weak __init_or_module arch_jump_label_transform_static(struct jump_entry *entry,
enum jump_label_type type)
{
arch_jump_label_transform(entry, type);
}
static void __jump_label_update(struct static_key *key,
struct jump_entry *entry,
struct jump_entry *stop, int enable)
{
for (; (entry < stop) &&
(entry->key == (jump_label_t)(unsigned long)key);
entry++) {
/*
* entry->code set to 0 invalidates module init text sections
* kernel_text_address() verifies we are not in core kernel
* init code, see jump_label_invalidate_module_init().
*/
if (entry->code && kernel_text_address(entry->code))
arch_jump_label_transform(entry, enable);
}
}
static enum jump_label_type jump_label_type(struct static_key *key)
{
bool true_branch = jump_label_get_branch_default(key);
bool state = static_key_enabled(key);
if ((!true_branch && state) || (true_branch && !state))
return JUMP_LABEL_ENABLE;
return JUMP_LABEL_DISABLE;
}
void __init jump_label_init(void)
{
struct jump_entry *iter_start = __start___jump_table;
struct jump_entry *iter_stop = __stop___jump_table;
struct static_key *key = NULL;
struct jump_entry *iter;
jump_label_lock();
jump_label_sort_entries(iter_start, iter_stop);
for (iter = iter_start; iter < iter_stop; iter++) {
struct static_key *iterk;
iterk = (struct static_key *)(unsigned long)iter->key;
arch_jump_label_transform_static(iter, jump_label_type(iterk));
if (iterk == key)
continue;
key = iterk;
/*
* Set key->entries to iter, but preserve JUMP_LABEL_TRUE_BRANCH.
*/
*((unsigned long *)&key->entries) += (unsigned long)iter;
#ifdef CONFIG_MODULES
key->next = NULL;
#endif
}
jump_label_unlock();
}
#ifdef CONFIG_MODULES
struct static_key_mod {
struct static_key_mod *next;
struct jump_entry *entries;
struct module *mod;
};
static int __jump_label_mod_text_reserved(void *start, void *end)
{
struct module *mod;
mod = __module_text_address((unsigned long)start);
if (!mod)
return 0;
WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod);
return __jump_label_text_reserved(mod->jump_entries,
mod->jump_entries + mod->num_jump_entries,
start, end);
}
static void __jump_label_mod_update(struct static_key *key, int enable)
{
struct static_key_mod *mod = key->next;
while (mod) {
struct module *m = mod->mod;
__jump_label_update(key, mod->entries,
m->jump_entries + m->num_jump_entries,
enable);
mod = mod->next;
}
}
/***
* apply_jump_label_nops - patch module jump labels with arch_get_jump_label_nop()
* @mod: module to patch
*
* Allow for run-time selection of the optimal nops. Before the module
* loads patch these with arch_get_jump_label_nop(), which is specified by
* the arch specific jump label code.
*/
void jump_label_apply_nops(struct module *mod)
{
struct jump_entry *iter_start = mod->jump_entries;
struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
struct jump_entry *iter;
/* if the module doesn't have jump label entries, just return */
if (iter_start == iter_stop)
return;
for (iter = iter_start; iter < iter_stop; iter++) {
arch_jump_label_transform_static(iter, JUMP_LABEL_DISABLE);
}
}
static int jump_label_add_module(struct module *mod)
{
struct jump_entry *iter_start = mod->jump_entries;
struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
struct jump_entry *iter;
struct static_key *key = NULL;
struct static_key_mod *jlm;
/* if the module doesn't have jump label entries, just return */
if (iter_start == iter_stop)
return 0;
jump_label_sort_entries(iter_start, iter_stop);
for (iter = iter_start; iter < iter_stop; iter++) {
struct static_key *iterk;
iterk = (struct static_key *)(unsigned long)iter->key;
if (iterk == key)
continue;
key = iterk;
if (__module_address(iter->key) == mod) {
/*
* Set key->entries to iter, but preserve JUMP_LABEL_TRUE_BRANCH.
*/
*((unsigned long *)&key->entries) += (unsigned long)iter;
key->next = NULL;
continue;
}
jlm = kzalloc(sizeof(struct static_key_mod), GFP_KERNEL);
if (!jlm)
return -ENOMEM;
jlm->mod = mod;
jlm->entries = iter;
jlm->next = key->next;
key->next = jlm;
if (jump_label_type(key) == JUMP_LABEL_ENABLE)
__jump_label_update(key, iter, iter_stop, JUMP_LABEL_ENABLE);
}
return 0;
}
static void jump_label_del_module(struct module *mod)
{
struct jump_entry *iter_start = mod->jump_entries;
struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
struct jump_entry *iter;
struct static_key *key = NULL;
struct static_key_mod *jlm, **prev;
for (iter = iter_start; iter < iter_stop; iter++) {
if (iter->key == (jump_label_t)(unsigned long)key)
continue;
key = (struct static_key *)(unsigned long)iter->key;
if (__module_address(iter->key) == mod)
continue;
prev = &key->next;
jlm = key->next;
while (jlm && jlm->mod != mod) {
prev = &jlm->next;
jlm = jlm->next;
}
if (jlm) {
*prev = jlm->next;
kfree(jlm);
}
}
}
static void jump_label_invalidate_module_init(struct module *mod)
{
struct jump_entry *iter_start = mod->jump_entries;
struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
struct jump_entry *iter;
for (iter = iter_start; iter < iter_stop; iter++) {
if (within_module_init(iter->code, mod))
iter->code = 0;
}
}
static int
jump_label_module_notify(struct notifier_block *self, unsigned long val,
void *data)
{
struct module *mod = data;
int ret = 0;
switch (val) {
case MODULE_STATE_COMING:
jump_label_lock();
ret = jump_label_add_module(mod);
if (ret)
jump_label_del_module(mod);
jump_label_unlock();
break;
case MODULE_STATE_GOING:
jump_label_lock();
jump_label_del_module(mod);
jump_label_unlock();
break;
case MODULE_STATE_LIVE:
jump_label_lock();
jump_label_invalidate_module_init(mod);
jump_label_unlock();
break;
}
return notifier_from_errno(ret);
}
struct notifier_block jump_label_module_nb = {
.notifier_call = jump_label_module_notify,
.priority = 1, /* higher than tracepoints */
};
static __init int jump_label_init_module(void)
{
return register_module_notifier(&jump_label_module_nb);
}
early_initcall(jump_label_init_module);
#endif /* CONFIG_MODULES */
/***
* jump_label_text_reserved - check if addr range is reserved
* @start: start text addr
* @end: end text addr
*
* checks if the text addr located between @start and @end
* overlaps with any of the jump label patch addresses. Code
* that wants to modify kernel text should first verify that
* it does not overlap with any of the jump label addresses.
* Caller must hold jump_label_mutex.
*
* returns 1 if there is an overlap, 0 otherwise
*/
int jump_label_text_reserved(void *start, void *end)
{
int ret = __jump_label_text_reserved(__start___jump_table,
__stop___jump_table, start, end);
if (ret)
return ret;
#ifdef CONFIG_MODULES
ret = __jump_label_mod_text_reserved(start, end);
#endif
return ret;
}
static void jump_label_update(struct static_key *key, int enable)
{
struct jump_entry *stop = __stop___jump_table;
struct jump_entry *entry = jump_label_get_entries(key);
#ifdef CONFIG_MODULES
struct module *mod = __module_address((unsigned long)key);
__jump_label_mod_update(key, enable);
if (mod)
stop = mod->jump_entries + mod->num_jump_entries;
#endif
/* if there are no users, entry can be NULL */
if (entry)
__jump_label_update(key, entry, stop, enable);
}
#endif
| gpl-2.0 |
shinru2004/HTC-C525c | arch/sh/kernel/cpu/sh2a/setup-sh7203.c | 2855 | 11428 | /*
* SH7203 and SH7263 Setup
*
* Copyright (C) 2007 - 2009 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/platform_device.h>
#include <linux/init.h>
#include <linux/serial.h>
#include <linux/serial_sci.h>
#include <linux/sh_timer.h>
#include <linux/io.h>
enum {
UNUSED = 0,
/* interrupt sources */
IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7,
PINT0, PINT1, PINT2, PINT3, PINT4, PINT5, PINT6, PINT7,
DMAC0, DMAC1, DMAC2, DMAC3, DMAC4, DMAC5, DMAC6, DMAC7,
USB, LCDC, CMT0, CMT1, BSC, WDT,
MTU0_ABCD, MTU0_VEF, MTU1_AB, MTU1_VU, MTU2_AB, MTU2_VU,
MTU3_ABCD, MTU4_ABCD, MTU2_TCI3V, MTU2_TCI4V,
ADC_ADI,
IIC30, IIC31, IIC32, IIC33,
SCIF0, SCIF1, SCIF2, SCIF3,
SSU0, SSU1,
SSI0_SSII, SSI1_SSII, SSI2_SSII, SSI3_SSII,
/* ROM-DEC, SDHI, SRC, and IEB are SH7263 specific */
ROMDEC, FLCTL, SDHI, RTC, RCAN0, RCAN1,
SRC, IEBI,
/* interrupt groups */
PINT,
};
static struct intc_vect vectors[] __initdata = {
INTC_IRQ(IRQ0, 64), INTC_IRQ(IRQ1, 65),
INTC_IRQ(IRQ2, 66), INTC_IRQ(IRQ3, 67),
INTC_IRQ(IRQ4, 68), INTC_IRQ(IRQ5, 69),
INTC_IRQ(IRQ6, 70), INTC_IRQ(IRQ7, 71),
INTC_IRQ(PINT0, 80), INTC_IRQ(PINT1, 81),
INTC_IRQ(PINT2, 82), INTC_IRQ(PINT3, 83),
INTC_IRQ(PINT4, 84), INTC_IRQ(PINT5, 85),
INTC_IRQ(PINT6, 86), INTC_IRQ(PINT7, 87),
INTC_IRQ(DMAC0, 108), INTC_IRQ(DMAC0, 109),
INTC_IRQ(DMAC1, 112), INTC_IRQ(DMAC1, 113),
INTC_IRQ(DMAC2, 116), INTC_IRQ(DMAC2, 117),
INTC_IRQ(DMAC3, 120), INTC_IRQ(DMAC3, 121),
INTC_IRQ(DMAC4, 124), INTC_IRQ(DMAC4, 125),
INTC_IRQ(DMAC5, 128), INTC_IRQ(DMAC5, 129),
INTC_IRQ(DMAC6, 132), INTC_IRQ(DMAC6, 133),
INTC_IRQ(DMAC7, 136), INTC_IRQ(DMAC7, 137),
INTC_IRQ(USB, 140), INTC_IRQ(LCDC, 141),
INTC_IRQ(CMT0, 142), INTC_IRQ(CMT1, 143),
INTC_IRQ(BSC, 144), INTC_IRQ(WDT, 145),
INTC_IRQ(MTU0_ABCD, 146), INTC_IRQ(MTU0_ABCD, 147),
INTC_IRQ(MTU0_ABCD, 148), INTC_IRQ(MTU0_ABCD, 149),
INTC_IRQ(MTU0_VEF, 150),
INTC_IRQ(MTU0_VEF, 151), INTC_IRQ(MTU0_VEF, 152),
INTC_IRQ(MTU1_AB, 153), INTC_IRQ(MTU1_AB, 154),
INTC_IRQ(MTU1_VU, 155), INTC_IRQ(MTU1_VU, 156),
INTC_IRQ(MTU2_AB, 157), INTC_IRQ(MTU2_AB, 158),
INTC_IRQ(MTU2_VU, 159), INTC_IRQ(MTU2_VU, 160),
INTC_IRQ(MTU3_ABCD, 161), INTC_IRQ(MTU3_ABCD, 162),
INTC_IRQ(MTU3_ABCD, 163), INTC_IRQ(MTU3_ABCD, 164),
INTC_IRQ(MTU2_TCI3V, 165),
INTC_IRQ(MTU4_ABCD, 166), INTC_IRQ(MTU4_ABCD, 167),
INTC_IRQ(MTU4_ABCD, 168), INTC_IRQ(MTU4_ABCD, 169),
INTC_IRQ(MTU2_TCI4V, 170),
INTC_IRQ(ADC_ADI, 171),
INTC_IRQ(IIC30, 172), INTC_IRQ(IIC30, 173),
INTC_IRQ(IIC30, 174), INTC_IRQ(IIC30, 175),
INTC_IRQ(IIC30, 176),
INTC_IRQ(IIC31, 177), INTC_IRQ(IIC31, 178),
INTC_IRQ(IIC31, 179), INTC_IRQ(IIC31, 180),
INTC_IRQ(IIC31, 181),
INTC_IRQ(IIC32, 182), INTC_IRQ(IIC32, 183),
INTC_IRQ(IIC32, 184), INTC_IRQ(IIC32, 185),
INTC_IRQ(IIC32, 186),
INTC_IRQ(IIC33, 187), INTC_IRQ(IIC33, 188),
INTC_IRQ(IIC33, 189), INTC_IRQ(IIC33, 190),
INTC_IRQ(IIC33, 191),
INTC_IRQ(SCIF0, 192), INTC_IRQ(SCIF0, 193),
INTC_IRQ(SCIF0, 194), INTC_IRQ(SCIF0, 195),
INTC_IRQ(SCIF1, 196), INTC_IRQ(SCIF1, 197),
INTC_IRQ(SCIF1, 198), INTC_IRQ(SCIF1, 199),
INTC_IRQ(SCIF2, 200), INTC_IRQ(SCIF2, 201),
INTC_IRQ(SCIF2, 202), INTC_IRQ(SCIF2, 203),
INTC_IRQ(SCIF3, 204), INTC_IRQ(SCIF3, 205),
INTC_IRQ(SCIF3, 206), INTC_IRQ(SCIF3, 207),
INTC_IRQ(SSU0, 208), INTC_IRQ(SSU0, 209),
INTC_IRQ(SSU0, 210),
INTC_IRQ(SSU1, 211), INTC_IRQ(SSU1, 212),
INTC_IRQ(SSU1, 213),
INTC_IRQ(SSI0_SSII, 214), INTC_IRQ(SSI1_SSII, 215),
INTC_IRQ(SSI2_SSII, 216), INTC_IRQ(SSI3_SSII, 217),
INTC_IRQ(FLCTL, 224), INTC_IRQ(FLCTL, 225),
INTC_IRQ(FLCTL, 226), INTC_IRQ(FLCTL, 227),
INTC_IRQ(RTC, 231), INTC_IRQ(RTC, 232),
INTC_IRQ(RTC, 233),
INTC_IRQ(RCAN0, 234), INTC_IRQ(RCAN0, 235),
INTC_IRQ(RCAN0, 236), INTC_IRQ(RCAN0, 237),
INTC_IRQ(RCAN0, 238),
INTC_IRQ(RCAN1, 239), INTC_IRQ(RCAN1, 240),
INTC_IRQ(RCAN1, 241), INTC_IRQ(RCAN1, 242),
INTC_IRQ(RCAN1, 243),
/* SH7263-specific trash */
#ifdef CONFIG_CPU_SUBTYPE_SH7263
INTC_IRQ(ROMDEC, 218), INTC_IRQ(ROMDEC, 219),
INTC_IRQ(ROMDEC, 220), INTC_IRQ(ROMDEC, 221),
INTC_IRQ(ROMDEC, 222), INTC_IRQ(ROMDEC, 223),
INTC_IRQ(SDHI, 228), INTC_IRQ(SDHI, 229),
INTC_IRQ(SDHI, 230),
INTC_IRQ(SRC, 244), INTC_IRQ(SRC, 245),
INTC_IRQ(SRC, 246),
INTC_IRQ(IEBI, 247),
#endif
};
static struct intc_group groups[] __initdata = {
INTC_GROUP(PINT, PINT0, PINT1, PINT2, PINT3,
PINT4, PINT5, PINT6, PINT7),
};
static struct intc_prio_reg prio_registers[] __initdata = {
{ 0xfffe0818, 0, 16, 4, /* IPR01 */ { IRQ0, IRQ1, IRQ2, IRQ3 } },
{ 0xfffe081a, 0, 16, 4, /* IPR02 */ { IRQ4, IRQ5, IRQ6, IRQ7 } },
{ 0xfffe0820, 0, 16, 4, /* IPR05 */ { PINT, 0, 0, 0 } },
{ 0xfffe0c00, 0, 16, 4, /* IPR06 */ { DMAC0, DMAC1, DMAC2, DMAC3 } },
{ 0xfffe0c02, 0, 16, 4, /* IPR07 */ { DMAC4, DMAC5, DMAC6, DMAC7 } },
{ 0xfffe0c04, 0, 16, 4, /* IPR08 */ { USB, LCDC, CMT0, CMT1 } },
{ 0xfffe0c06, 0, 16, 4, /* IPR09 */ { BSC, WDT, MTU0_ABCD, MTU0_VEF } },
{ 0xfffe0c08, 0, 16, 4, /* IPR10 */ { MTU1_AB, MTU1_VU, MTU2_AB,
MTU2_VU } },
{ 0xfffe0c0a, 0, 16, 4, /* IPR11 */ { MTU3_ABCD, MTU2_TCI3V, MTU4_ABCD,
MTU2_TCI4V } },
{ 0xfffe0c0c, 0, 16, 4, /* IPR12 */ { ADC_ADI, IIC30, IIC31, IIC32 } },
{ 0xfffe0c0e, 0, 16, 4, /* IPR13 */ { IIC33, SCIF0, SCIF1, SCIF2 } },
{ 0xfffe0c10, 0, 16, 4, /* IPR14 */ { SCIF3, SSU0, SSU1, SSI0_SSII } },
#ifdef CONFIG_CPU_SUBTYPE_SH7203
{ 0xfffe0c12, 0, 16, 4, /* IPR15 */ { SSI1_SSII, SSI2_SSII,
SSI3_SSII, 0 } },
{ 0xfffe0c14, 0, 16, 4, /* IPR16 */ { FLCTL, 0, RTC, RCAN0 } },
{ 0xfffe0c16, 0, 16, 4, /* IPR17 */ { RCAN1, 0, 0, 0 } },
#else
{ 0xfffe0c12, 0, 16, 4, /* IPR15 */ { SSI1_SSII, SSI2_SSII,
SSI3_SSII, ROMDEC } },
{ 0xfffe0c14, 0, 16, 4, /* IPR16 */ { FLCTL, SDHI, RTC, RCAN0 } },
{ 0xfffe0c16, 0, 16, 4, /* IPR17 */ { RCAN1, SRC, IEBI, 0 } },
#endif
};
static struct intc_mask_reg mask_registers[] __initdata = {
{ 0xfffe0808, 0, 16, /* PINTER */
{ 0, 0, 0, 0, 0, 0, 0, 0,
PINT7, PINT6, PINT5, PINT4, PINT3, PINT2, PINT1, PINT0 } },
};
static DECLARE_INTC_DESC(intc_desc, "sh7203", vectors, groups,
mask_registers, prio_registers, NULL);
static struct plat_sci_port scif0_platform_data = {
.mapbase = 0xfffe8000,
.flags = UPF_BOOT_AUTOCONF,
.scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
.scbrr_algo_id = SCBRR_ALGO_2,
.type = PORT_SCIF,
.irqs = { 192, 192, 192, 192 },
};
static struct platform_device scif0_device = {
.name = "sh-sci",
.id = 0,
.dev = {
.platform_data = &scif0_platform_data,
},
};
static struct plat_sci_port scif1_platform_data = {
.mapbase = 0xfffe8800,
.flags = UPF_BOOT_AUTOCONF,
.scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
.scbrr_algo_id = SCBRR_ALGO_2,
.type = PORT_SCIF,
.irqs = { 196, 196, 196, 196 },
};
static struct platform_device scif1_device = {
.name = "sh-sci",
.id = 1,
.dev = {
.platform_data = &scif1_platform_data,
},
};
static struct plat_sci_port scif2_platform_data = {
.mapbase = 0xfffe9000,
.flags = UPF_BOOT_AUTOCONF,
.scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
.scbrr_algo_id = SCBRR_ALGO_2,
.type = PORT_SCIF,
.irqs = { 200, 200, 200, 200 },
};
static struct platform_device scif2_device = {
.name = "sh-sci",
.id = 2,
.dev = {
.platform_data = &scif2_platform_data,
},
};
static struct plat_sci_port scif3_platform_data = {
.mapbase = 0xfffe9800,
.flags = UPF_BOOT_AUTOCONF,
.scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
.scbrr_algo_id = SCBRR_ALGO_2,
.type = PORT_SCIF,
.irqs = { 204, 204, 204, 204 },
};
static struct platform_device scif3_device = {
.name = "sh-sci",
.id = 3,
.dev = {
.platform_data = &scif3_platform_data,
},
};
static struct sh_timer_config cmt0_platform_data = {
.channel_offset = 0x02,
.timer_bit = 0,
.clockevent_rating = 125,
.clocksource_rating = 0, /* disabled due to code generation issues */
};
static struct resource cmt0_resources[] = {
[0] = {
.start = 0xfffec002,
.end = 0xfffec007,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = 142,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device cmt0_device = {
.name = "sh_cmt",
.id = 0,
.dev = {
.platform_data = &cmt0_platform_data,
},
.resource = cmt0_resources,
.num_resources = ARRAY_SIZE(cmt0_resources),
};
static struct sh_timer_config cmt1_platform_data = {
.channel_offset = 0x08,
.timer_bit = 1,
.clockevent_rating = 125,
.clocksource_rating = 0, /* disabled due to code generation issues */
};
static struct resource cmt1_resources[] = {
[0] = {
.start = 0xfffec008,
.end = 0xfffec00d,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = 143,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device cmt1_device = {
.name = "sh_cmt",
.id = 1,
.dev = {
.platform_data = &cmt1_platform_data,
},
.resource = cmt1_resources,
.num_resources = ARRAY_SIZE(cmt1_resources),
};
static struct sh_timer_config mtu2_0_platform_data = {
.channel_offset = -0x80,
.timer_bit = 0,
.clockevent_rating = 200,
};
static struct resource mtu2_0_resources[] = {
[0] = {
.start = 0xfffe4300,
.end = 0xfffe4326,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = 146,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device mtu2_0_device = {
.name = "sh_mtu2",
.id = 0,
.dev = {
.platform_data = &mtu2_0_platform_data,
},
.resource = mtu2_0_resources,
.num_resources = ARRAY_SIZE(mtu2_0_resources),
};
static struct sh_timer_config mtu2_1_platform_data = {
.channel_offset = -0x100,
.timer_bit = 1,
.clockevent_rating = 200,
};
static struct resource mtu2_1_resources[] = {
[0] = {
.start = 0xfffe4380,
.end = 0xfffe4390,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = 153,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device mtu2_1_device = {
.name = "sh_mtu2",
.id = 1,
.dev = {
.platform_data = &mtu2_1_platform_data,
},
.resource = mtu2_1_resources,
.num_resources = ARRAY_SIZE(mtu2_1_resources),
};
static struct resource rtc_resources[] = {
[0] = {
.start = 0xffff2000,
.end = 0xffff2000 + 0x58 - 1,
.flags = IORESOURCE_IO,
},
[1] = {
/* Shared Period/Carry/Alarm IRQ */
.start = 231,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device rtc_device = {
.name = "sh-rtc",
.id = -1,
.num_resources = ARRAY_SIZE(rtc_resources),
.resource = rtc_resources,
};
static struct platform_device *sh7203_devices[] __initdata = {
&scif0_device,
&scif1_device,
&scif2_device,
&scif3_device,
&cmt0_device,
&cmt1_device,
&mtu2_0_device,
&mtu2_1_device,
&rtc_device,
};
static int __init sh7203_devices_setup(void)
{
return platform_add_devices(sh7203_devices,
ARRAY_SIZE(sh7203_devices));
}
arch_initcall(sh7203_devices_setup);
void __init plat_irq_setup(void)
{
register_intc_controller(&intc_desc);
}
static struct platform_device *sh7203_early_devices[] __initdata = {
&scif0_device,
&scif1_device,
&scif2_device,
&scif3_device,
&cmt0_device,
&cmt1_device,
&mtu2_0_device,
&mtu2_1_device,
};
#define STBCR3 0xfffe0408
#define STBCR4 0xfffe040c
void __init plat_early_device_setup(void)
{
/* enable CMT clock */
__raw_writeb(__raw_readb(STBCR4) & ~0x04, STBCR4);
/* enable MTU2 clock */
__raw_writeb(__raw_readb(STBCR3) & ~0x20, STBCR3);
early_platform_add_devices(sh7203_early_devices,
ARRAY_SIZE(sh7203_early_devices));
}
| gpl-2.0 |
karandeepdps/ics_p690_kernel_2.35 | arch/arm/mach-pnx4008/dma.c | 4135 | 20576 | /*
* linux/arch/arm/mach-pnx4008/dma.c
*
* PNX4008 DMA registration and IRQ dispatching
*
* Author: Vitaly Wool
* Copyright: MontaVista Software Inc. (c) 2005
*
* Based on the code from Nicolas Pitre
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/dma-mapping.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/gfp.h>
#include <asm/system.h>
#include <mach/hardware.h>
#include <mach/dma.h>
#include <asm/dma-mapping.h>
#include <mach/clock.h>
static struct dma_channel {
char *name;
void (*irq_handler) (int, int, void *);
void *data;
struct pnx4008_dma_ll *ll;
u32 ll_dma;
void *target_addr;
int target_id;
} dma_channels[MAX_DMA_CHANNELS];
static struct ll_pool {
void *vaddr;
void *cur;
dma_addr_t dma_addr;
int count;
} ll_pool;
static DEFINE_SPINLOCK(ll_lock);
struct pnx4008_dma_ll *pnx4008_alloc_ll_entry(dma_addr_t * ll_dma)
{
struct pnx4008_dma_ll *ll = NULL;
unsigned long flags;
spin_lock_irqsave(&ll_lock, flags);
if (ll_pool.count > 4) { /* can give one more */
ll = *(struct pnx4008_dma_ll **) ll_pool.cur;
*ll_dma = ll_pool.dma_addr + ((void *)ll - ll_pool.vaddr);
*(void **)ll_pool.cur = **(void ***)ll_pool.cur;
memset(ll, 0, sizeof(*ll));
ll_pool.count--;
}
spin_unlock_irqrestore(&ll_lock, flags);
return ll;
}
EXPORT_SYMBOL_GPL(pnx4008_alloc_ll_entry);
void pnx4008_free_ll_entry(struct pnx4008_dma_ll * ll, dma_addr_t ll_dma)
{
unsigned long flags;
if (ll) {
if ((unsigned long)((long)ll - (long)ll_pool.vaddr) > 0x4000) {
printk(KERN_ERR "Trying to free entry not allocated by DMA\n");
BUG();
}
if (ll->flags & DMA_BUFFER_ALLOCATED)
ll->free(ll->alloc_data);
spin_lock_irqsave(&ll_lock, flags);
*(long *)ll = *(long *)ll_pool.cur;
*(long *)ll_pool.cur = (long)ll;
ll_pool.count++;
spin_unlock_irqrestore(&ll_lock, flags);
}
}
EXPORT_SYMBOL_GPL(pnx4008_free_ll_entry);
void pnx4008_free_ll(u32 ll_dma, struct pnx4008_dma_ll * ll)
{
struct pnx4008_dma_ll *ptr;
u32 dma;
while (ll) {
dma = ll->next_dma;
ptr = ll->next;
pnx4008_free_ll_entry(ll, ll_dma);
ll_dma = dma;
ll = ptr;
}
}
EXPORT_SYMBOL_GPL(pnx4008_free_ll);
static int dma_channels_requested = 0;
static inline void dma_increment_usage(void)
{
if (!dma_channels_requested++) {
struct clk *clk = clk_get(0, "dma_ck");
if (!IS_ERR(clk)) {
clk_set_rate(clk, 1);
clk_put(clk);
}
pnx4008_config_dma(-1, -1, 1);
}
}
static inline void dma_decrement_usage(void)
{
if (!--dma_channels_requested) {
struct clk *clk = clk_get(0, "dma_ck");
if (!IS_ERR(clk)) {
clk_set_rate(clk, 0);
clk_put(clk);
}
pnx4008_config_dma(-1, -1, 0);
}
}
static DEFINE_SPINLOCK(dma_lock);
static inline void pnx4008_dma_lock(void)
{
spin_lock_irq(&dma_lock);
}
static inline void pnx4008_dma_unlock(void)
{
spin_unlock_irq(&dma_lock);
}
#define VALID_CHANNEL(c) (((c) >= 0) && ((c) < MAX_DMA_CHANNELS))
int pnx4008_request_channel(char *name, int ch,
void (*irq_handler) (int, int, void *), void *data)
{
int i, found = 0;
/* basic sanity checks */
if (!name || (ch != -1 && !VALID_CHANNEL(ch)))
return -EINVAL;
pnx4008_dma_lock();
/* try grabbing a DMA channel with the requested priority */
for (i = MAX_DMA_CHANNELS - 1; i >= 0; i--) {
if (!dma_channels[i].name && (ch == -1 || ch == i)) {
found = 1;
break;
}
}
if (found) {
dma_increment_usage();
dma_channels[i].name = name;
dma_channels[i].irq_handler = irq_handler;
dma_channels[i].data = data;
dma_channels[i].ll = NULL;
dma_channels[i].ll_dma = 0;
} else {
printk(KERN_WARNING "No more available DMA channels for %s\n",
name);
i = -ENODEV;
}
pnx4008_dma_unlock();
return i;
}
EXPORT_SYMBOL_GPL(pnx4008_request_channel);
void pnx4008_free_channel(int ch)
{
if (!dma_channels[ch].name) {
printk(KERN_CRIT
"%s: trying to free channel %d which is already freed\n",
__func__, ch);
return;
}
pnx4008_dma_lock();
pnx4008_free_ll(dma_channels[ch].ll_dma, dma_channels[ch].ll);
dma_channels[ch].ll = NULL;
dma_decrement_usage();
dma_channels[ch].name = NULL;
pnx4008_dma_unlock();
}
EXPORT_SYMBOL_GPL(pnx4008_free_channel);
int pnx4008_config_dma(int ahb_m1_be, int ahb_m2_be, int enable)
{
unsigned long dma_cfg = __raw_readl(DMAC_CONFIG);
switch (ahb_m1_be) {
case 0:
dma_cfg &= ~(1 << 1);
break;
case 1:
dma_cfg |= (1 << 1);
break;
default:
break;
}
switch (ahb_m2_be) {
case 0:
dma_cfg &= ~(1 << 2);
break;
case 1:
dma_cfg |= (1 << 2);
break;
default:
break;
}
switch (enable) {
case 0:
dma_cfg &= ~(1 << 0);
break;
case 1:
dma_cfg |= (1 << 0);
break;
default:
break;
}
pnx4008_dma_lock();
__raw_writel(dma_cfg, DMAC_CONFIG);
pnx4008_dma_unlock();
return 0;
}
EXPORT_SYMBOL_GPL(pnx4008_config_dma);
int pnx4008_dma_pack_control(const struct pnx4008_dma_ch_ctrl * ch_ctrl,
unsigned long *ctrl)
{
int i = 0, dbsize, sbsize, err = 0;
if (!ctrl || !ch_ctrl) {
err = -EINVAL;
goto out;
}
*ctrl = 0;
switch (ch_ctrl->tc_mask) {
case 0:
break;
case 1:
*ctrl |= (1 << 31);
break;
default:
err = -EINVAL;
goto out;
}
switch (ch_ctrl->cacheable) {
case 0:
break;
case 1:
*ctrl |= (1 << 30);
break;
default:
err = -EINVAL;
goto out;
}
switch (ch_ctrl->bufferable) {
case 0:
break;
case 1:
*ctrl |= (1 << 29);
break;
default:
err = -EINVAL;
goto out;
}
switch (ch_ctrl->priv_mode) {
case 0:
break;
case 1:
*ctrl |= (1 << 28);
break;
default:
err = -EINVAL;
goto out;
}
switch (ch_ctrl->di) {
case 0:
break;
case 1:
*ctrl |= (1 << 27);
break;
default:
err = -EINVAL;
goto out;
}
switch (ch_ctrl->si) {
case 0:
break;
case 1:
*ctrl |= (1 << 26);
break;
default:
err = -EINVAL;
goto out;
}
switch (ch_ctrl->dest_ahb1) {
case 0:
break;
case 1:
*ctrl |= (1 << 25);
break;
default:
err = -EINVAL;
goto out;
}
switch (ch_ctrl->src_ahb1) {
case 0:
break;
case 1:
*ctrl |= (1 << 24);
break;
default:
err = -EINVAL;
goto out;
}
switch (ch_ctrl->dwidth) {
case WIDTH_BYTE:
*ctrl &= ~(7 << 21);
break;
case WIDTH_HWORD:
*ctrl &= ~(7 << 21);
*ctrl |= (1 << 21);
break;
case WIDTH_WORD:
*ctrl &= ~(7 << 21);
*ctrl |= (2 << 21);
break;
default:
err = -EINVAL;
goto out;
}
switch (ch_ctrl->swidth) {
case WIDTH_BYTE:
*ctrl &= ~(7 << 18);
break;
case WIDTH_HWORD:
*ctrl &= ~(7 << 18);
*ctrl |= (1 << 18);
break;
case WIDTH_WORD:
*ctrl &= ~(7 << 18);
*ctrl |= (2 << 18);
break;
default:
err = -EINVAL;
goto out;
}
dbsize = ch_ctrl->dbsize;
while (!(dbsize & 1)) {
i++;
dbsize >>= 1;
}
if (ch_ctrl->dbsize != 1 || i > 8 || i == 1) {
err = -EINVAL;
goto out;
} else if (i > 1)
i--;
*ctrl &= ~(7 << 15);
*ctrl |= (i << 15);
sbsize = ch_ctrl->sbsize;
while (!(sbsize & 1)) {
i++;
sbsize >>= 1;
}
if (ch_ctrl->sbsize != 1 || i > 8 || i == 1) {
err = -EINVAL;
goto out;
} else if (i > 1)
i--;
*ctrl &= ~(7 << 12);
*ctrl |= (i << 12);
if (ch_ctrl->tr_size > 0x7ff) {
err = -E2BIG;
goto out;
}
*ctrl &= ~0x7ff;
*ctrl |= ch_ctrl->tr_size & 0x7ff;
out:
return err;
}
EXPORT_SYMBOL_GPL(pnx4008_dma_pack_control);
int pnx4008_dma_parse_control(unsigned long ctrl,
struct pnx4008_dma_ch_ctrl * ch_ctrl)
{
int err = 0;
if (!ch_ctrl) {
err = -EINVAL;
goto out;
}
ch_ctrl->tr_size = ctrl & 0x7ff;
ctrl >>= 12;
ch_ctrl->sbsize = 1 << (ctrl & 7);
if (ch_ctrl->sbsize > 1)
ch_ctrl->sbsize <<= 1;
ctrl >>= 3;
ch_ctrl->dbsize = 1 << (ctrl & 7);
if (ch_ctrl->dbsize > 1)
ch_ctrl->dbsize <<= 1;
ctrl >>= 3;
switch (ctrl & 7) {
case 0:
ch_ctrl->swidth = WIDTH_BYTE;
break;
case 1:
ch_ctrl->swidth = WIDTH_HWORD;
break;
case 2:
ch_ctrl->swidth = WIDTH_WORD;
break;
default:
err = -EINVAL;
goto out;
}
ctrl >>= 3;
switch (ctrl & 7) {
case 0:
ch_ctrl->dwidth = WIDTH_BYTE;
break;
case 1:
ch_ctrl->dwidth = WIDTH_HWORD;
break;
case 2:
ch_ctrl->dwidth = WIDTH_WORD;
break;
default:
err = -EINVAL;
goto out;
}
ctrl >>= 3;
ch_ctrl->src_ahb1 = ctrl & 1;
ctrl >>= 1;
ch_ctrl->dest_ahb1 = ctrl & 1;
ctrl >>= 1;
ch_ctrl->si = ctrl & 1;
ctrl >>= 1;
ch_ctrl->di = ctrl & 1;
ctrl >>= 1;
ch_ctrl->priv_mode = ctrl & 1;
ctrl >>= 1;
ch_ctrl->bufferable = ctrl & 1;
ctrl >>= 1;
ch_ctrl->cacheable = ctrl & 1;
ctrl >>= 1;
ch_ctrl->tc_mask = ctrl & 1;
out:
return err;
}
EXPORT_SYMBOL_GPL(pnx4008_dma_parse_control);
int pnx4008_dma_pack_config(const struct pnx4008_dma_ch_config * ch_cfg,
unsigned long *cfg)
{
int err = 0;
if (!cfg || !ch_cfg) {
err = -EINVAL;
goto out;
}
*cfg = 0;
switch (ch_cfg->halt) {
case 0:
break;
case 1:
*cfg |= (1 << 18);
break;
default:
err = -EINVAL;
goto out;
}
switch (ch_cfg->active) {
case 0:
break;
case 1:
*cfg |= (1 << 17);
break;
default:
err = -EINVAL;
goto out;
}
switch (ch_cfg->lock) {
case 0:
break;
case 1:
*cfg |= (1 << 16);
break;
default:
err = -EINVAL;
goto out;
}
switch (ch_cfg->itc) {
case 0:
break;
case 1:
*cfg |= (1 << 15);
break;
default:
err = -EINVAL;
goto out;
}
switch (ch_cfg->ie) {
case 0:
break;
case 1:
*cfg |= (1 << 14);
break;
default:
err = -EINVAL;
goto out;
}
switch (ch_cfg->flow_cntrl) {
case FC_MEM2MEM_DMA:
*cfg &= ~(7 << 11);
break;
case FC_MEM2PER_DMA:
*cfg &= ~(7 << 11);
*cfg |= (1 << 11);
break;
case FC_PER2MEM_DMA:
*cfg &= ~(7 << 11);
*cfg |= (2 << 11);
break;
case FC_PER2PER_DMA:
*cfg &= ~(7 << 11);
*cfg |= (3 << 11);
break;
case FC_PER2PER_DPER:
*cfg &= ~(7 << 11);
*cfg |= (4 << 11);
break;
case FC_MEM2PER_PER:
*cfg &= ~(7 << 11);
*cfg |= (5 << 11);
break;
case FC_PER2MEM_PER:
*cfg &= ~(7 << 11);
*cfg |= (6 << 11);
break;
case FC_PER2PER_SPER:
*cfg |= (7 << 11);
break;
default:
err = -EINVAL;
goto out;
}
*cfg &= ~(0x1f << 6);
*cfg |= ((ch_cfg->dest_per & 0x1f) << 6);
*cfg &= ~(0x1f << 1);
*cfg |= ((ch_cfg->src_per & 0x1f) << 1);
out:
return err;
}
EXPORT_SYMBOL_GPL(pnx4008_dma_pack_config);
int pnx4008_dma_parse_config(unsigned long cfg,
struct pnx4008_dma_ch_config * ch_cfg)
{
int err = 0;
if (!ch_cfg) {
err = -EINVAL;
goto out;
}
cfg >>= 1;
ch_cfg->src_per = cfg & 0x1f;
cfg >>= 5;
ch_cfg->dest_per = cfg & 0x1f;
cfg >>= 5;
switch (cfg & 7) {
case 0:
ch_cfg->flow_cntrl = FC_MEM2MEM_DMA;
break;
case 1:
ch_cfg->flow_cntrl = FC_MEM2PER_DMA;
break;
case 2:
ch_cfg->flow_cntrl = FC_PER2MEM_DMA;
break;
case 3:
ch_cfg->flow_cntrl = FC_PER2PER_DMA;
break;
case 4:
ch_cfg->flow_cntrl = FC_PER2PER_DPER;
break;
case 5:
ch_cfg->flow_cntrl = FC_MEM2PER_PER;
break;
case 6:
ch_cfg->flow_cntrl = FC_PER2MEM_PER;
break;
case 7:
ch_cfg->flow_cntrl = FC_PER2PER_SPER;
}
cfg >>= 3;
ch_cfg->ie = cfg & 1;
cfg >>= 1;
ch_cfg->itc = cfg & 1;
cfg >>= 1;
ch_cfg->lock = cfg & 1;
cfg >>= 1;
ch_cfg->active = cfg & 1;
cfg >>= 1;
ch_cfg->halt = cfg & 1;
out:
return err;
}
EXPORT_SYMBOL_GPL(pnx4008_dma_parse_config);
void pnx4008_dma_split_head_entry(struct pnx4008_dma_config * config,
struct pnx4008_dma_ch_ctrl * ctrl)
{
int new_len = ctrl->tr_size, num_entries = 0;
int old_len = new_len;
int src_width, dest_width, count = 1;
switch (ctrl->swidth) {
case WIDTH_BYTE:
src_width = 1;
break;
case WIDTH_HWORD:
src_width = 2;
break;
case WIDTH_WORD:
src_width = 4;
break;
default:
return;
}
switch (ctrl->dwidth) {
case WIDTH_BYTE:
dest_width = 1;
break;
case WIDTH_HWORD:
dest_width = 2;
break;
case WIDTH_WORD:
dest_width = 4;
break;
default:
return;
}
while (new_len > 0x7FF) {
num_entries++;
new_len = (ctrl->tr_size + num_entries) / (num_entries + 1);
}
if (num_entries != 0) {
struct pnx4008_dma_ll *ll = NULL;
config->ch_ctrl &= ~0x7ff;
config->ch_ctrl |= new_len;
if (!config->is_ll) {
config->is_ll = 1;
while (num_entries) {
if (!ll) {
config->ll =
pnx4008_alloc_ll_entry(&config->
ll_dma);
ll = config->ll;
} else {
ll->next =
pnx4008_alloc_ll_entry(&ll->
next_dma);
ll = ll->next;
}
if (ctrl->si)
ll->src_addr =
config->src_addr +
src_width * new_len * count;
else
ll->src_addr = config->src_addr;
if (ctrl->di)
ll->dest_addr =
config->dest_addr +
dest_width * new_len * count;
else
ll->dest_addr = config->dest_addr;
ll->ch_ctrl = config->ch_ctrl & 0x7fffffff;
ll->next_dma = 0;
ll->next = NULL;
num_entries--;
count++;
}
} else {
struct pnx4008_dma_ll *ll_old = config->ll;
unsigned long ll_dma_old = config->ll_dma;
while (num_entries) {
if (!ll) {
config->ll =
pnx4008_alloc_ll_entry(&config->
ll_dma);
ll = config->ll;
} else {
ll->next =
pnx4008_alloc_ll_entry(&ll->
next_dma);
ll = ll->next;
}
if (ctrl->si)
ll->src_addr =
config->src_addr +
src_width * new_len * count;
else
ll->src_addr = config->src_addr;
if (ctrl->di)
ll->dest_addr =
config->dest_addr +
dest_width * new_len * count;
else
ll->dest_addr = config->dest_addr;
ll->ch_ctrl = config->ch_ctrl & 0x7fffffff;
ll->next_dma = 0;
ll->next = NULL;
num_entries--;
count++;
}
ll->next_dma = ll_dma_old;
ll->next = ll_old;
}
/* adjust last length/tc */
ll->ch_ctrl = config->ch_ctrl & (~0x7ff);
ll->ch_ctrl |= old_len - new_len * (count - 1);
config->ch_ctrl &= 0x7fffffff;
}
}
EXPORT_SYMBOL_GPL(pnx4008_dma_split_head_entry);
void pnx4008_dma_split_ll_entry(struct pnx4008_dma_ll * cur_ll,
struct pnx4008_dma_ch_ctrl * ctrl)
{
int new_len = ctrl->tr_size, num_entries = 0;
int old_len = new_len;
int src_width, dest_width, count = 1;
switch (ctrl->swidth) {
case WIDTH_BYTE:
src_width = 1;
break;
case WIDTH_HWORD:
src_width = 2;
break;
case WIDTH_WORD:
src_width = 4;
break;
default:
return;
}
switch (ctrl->dwidth) {
case WIDTH_BYTE:
dest_width = 1;
break;
case WIDTH_HWORD:
dest_width = 2;
break;
case WIDTH_WORD:
dest_width = 4;
break;
default:
return;
}
while (new_len > 0x7FF) {
num_entries++;
new_len = (ctrl->tr_size + num_entries) / (num_entries + 1);
}
if (num_entries != 0) {
struct pnx4008_dma_ll *ll = NULL;
cur_ll->ch_ctrl &= ~0x7ff;
cur_ll->ch_ctrl |= new_len;
if (!cur_ll->next) {
while (num_entries) {
if (!ll) {
cur_ll->next =
pnx4008_alloc_ll_entry(&cur_ll->
next_dma);
ll = cur_ll->next;
} else {
ll->next =
pnx4008_alloc_ll_entry(&ll->
next_dma);
ll = ll->next;
}
if (ctrl->si)
ll->src_addr =
cur_ll->src_addr +
src_width * new_len * count;
else
ll->src_addr = cur_ll->src_addr;
if (ctrl->di)
ll->dest_addr =
cur_ll->dest_addr +
dest_width * new_len * count;
else
ll->dest_addr = cur_ll->dest_addr;
ll->ch_ctrl = cur_ll->ch_ctrl & 0x7fffffff;
ll->next_dma = 0;
ll->next = NULL;
num_entries--;
count++;
}
} else {
struct pnx4008_dma_ll *ll_old = cur_ll->next;
unsigned long ll_dma_old = cur_ll->next_dma;
while (num_entries) {
if (!ll) {
cur_ll->next =
pnx4008_alloc_ll_entry(&cur_ll->
next_dma);
ll = cur_ll->next;
} else {
ll->next =
pnx4008_alloc_ll_entry(&ll->
next_dma);
ll = ll->next;
}
if (ctrl->si)
ll->src_addr =
cur_ll->src_addr +
src_width * new_len * count;
else
ll->src_addr = cur_ll->src_addr;
if (ctrl->di)
ll->dest_addr =
cur_ll->dest_addr +
dest_width * new_len * count;
else
ll->dest_addr = cur_ll->dest_addr;
ll->ch_ctrl = cur_ll->ch_ctrl & 0x7fffffff;
ll->next_dma = 0;
ll->next = NULL;
num_entries--;
count++;
}
ll->next_dma = ll_dma_old;
ll->next = ll_old;
}
/* adjust last length/tc */
ll->ch_ctrl = cur_ll->ch_ctrl & (~0x7ff);
ll->ch_ctrl |= old_len - new_len * (count - 1);
cur_ll->ch_ctrl &= 0x7fffffff;
}
}
EXPORT_SYMBOL_GPL(pnx4008_dma_split_ll_entry);
int pnx4008_config_channel(int ch, struct pnx4008_dma_config * config)
{
if (!VALID_CHANNEL(ch) || !dma_channels[ch].name)
return -EINVAL;
pnx4008_dma_lock();
__raw_writel(config->src_addr, DMAC_Cx_SRC_ADDR(ch));
__raw_writel(config->dest_addr, DMAC_Cx_DEST_ADDR(ch));
if (config->is_ll)
__raw_writel(config->ll_dma, DMAC_Cx_LLI(ch));
else
__raw_writel(0, DMAC_Cx_LLI(ch));
__raw_writel(config->ch_ctrl, DMAC_Cx_CONTROL(ch));
__raw_writel(config->ch_cfg, DMAC_Cx_CONFIG(ch));
pnx4008_dma_unlock();
return 0;
}
EXPORT_SYMBOL_GPL(pnx4008_config_channel);
int pnx4008_channel_get_config(int ch, struct pnx4008_dma_config * config)
{
if (!VALID_CHANNEL(ch) || !dma_channels[ch].name || !config)
return -EINVAL;
pnx4008_dma_lock();
config->ch_cfg = __raw_readl(DMAC_Cx_CONFIG(ch));
config->ch_ctrl = __raw_readl(DMAC_Cx_CONTROL(ch));
config->ll_dma = __raw_readl(DMAC_Cx_LLI(ch));
config->is_ll = config->ll_dma ? 1 : 0;
config->src_addr = __raw_readl(DMAC_Cx_SRC_ADDR(ch));
config->dest_addr = __raw_readl(DMAC_Cx_DEST_ADDR(ch));
pnx4008_dma_unlock();
return 0;
}
EXPORT_SYMBOL_GPL(pnx4008_channel_get_config);
int pnx4008_dma_ch_enable(int ch)
{
unsigned long ch_cfg;
if (!VALID_CHANNEL(ch) || !dma_channels[ch].name)
return -EINVAL;
pnx4008_dma_lock();
ch_cfg = __raw_readl(DMAC_Cx_CONFIG(ch));
ch_cfg |= 1;
__raw_writel(ch_cfg, DMAC_Cx_CONFIG(ch));
pnx4008_dma_unlock();
return 0;
}
EXPORT_SYMBOL_GPL(pnx4008_dma_ch_enable);
int pnx4008_dma_ch_disable(int ch)
{
unsigned long ch_cfg;
if (!VALID_CHANNEL(ch) || !dma_channels[ch].name)
return -EINVAL;
pnx4008_dma_lock();
ch_cfg = __raw_readl(DMAC_Cx_CONFIG(ch));
ch_cfg &= ~1;
__raw_writel(ch_cfg, DMAC_Cx_CONFIG(ch));
pnx4008_dma_unlock();
return 0;
}
EXPORT_SYMBOL_GPL(pnx4008_dma_ch_disable);
int pnx4008_dma_ch_enabled(int ch)
{
unsigned long ch_cfg;
if (!VALID_CHANNEL(ch) || !dma_channels[ch].name)
return -EINVAL;
pnx4008_dma_lock();
ch_cfg = __raw_readl(DMAC_Cx_CONFIG(ch));
pnx4008_dma_unlock();
return ch_cfg & 1;
}
EXPORT_SYMBOL_GPL(pnx4008_dma_ch_enabled);
static irqreturn_t dma_irq_handler(int irq, void *dev_id)
{
int i;
unsigned long dint = __raw_readl(DMAC_INT_STAT);
unsigned long tcint = __raw_readl(DMAC_INT_TC_STAT);
unsigned long eint = __raw_readl(DMAC_INT_ERR_STAT);
unsigned long i_bit;
for (i = MAX_DMA_CHANNELS - 1; i >= 0; i--) {
i_bit = 1 << i;
if (dint & i_bit) {
struct dma_channel *channel = &dma_channels[i];
if (channel->name && channel->irq_handler) {
int cause = 0;
if (eint & i_bit)
cause |= DMA_ERR_INT;
if (tcint & i_bit)
cause |= DMA_TC_INT;
channel->irq_handler(i, cause, channel->data);
} else {
/*
* IRQ for an unregistered DMA channel
*/
printk(KERN_WARNING
"spurious IRQ for DMA channel %d\n", i);
}
if (tcint & i_bit)
__raw_writel(i_bit, DMAC_INT_TC_CLEAR);
if (eint & i_bit)
__raw_writel(i_bit, DMAC_INT_ERR_CLEAR);
}
}
return IRQ_HANDLED;
}
static int __init pnx4008_dma_init(void)
{
int ret, i;
ret = request_irq(DMA_INT, dma_irq_handler, 0, "DMA", NULL);
if (ret) {
printk(KERN_CRIT "Wow! Can't register IRQ for DMA\n");
goto out;
}
ll_pool.count = 0x4000 / sizeof(struct pnx4008_dma_ll);
ll_pool.cur = ll_pool.vaddr =
dma_alloc_coherent(NULL, ll_pool.count * sizeof(struct pnx4008_dma_ll),
&ll_pool.dma_addr, GFP_KERNEL);
if (!ll_pool.vaddr) {
ret = -ENOMEM;
free_irq(DMA_INT, NULL);
goto out;
}
for (i = 0; i < ll_pool.count - 1; i++) {
void **addr = ll_pool.vaddr + i * sizeof(struct pnx4008_dma_ll);
*addr = (void *)addr + sizeof(struct pnx4008_dma_ll);
}
*(long *)(ll_pool.vaddr +
(ll_pool.count - 1) * sizeof(struct pnx4008_dma_ll)) =
(long)ll_pool.vaddr;
__raw_writel(1, DMAC_CONFIG);
out:
return ret;
}
arch_initcall(pnx4008_dma_init);
| gpl-2.0 |
invisiblek/linux-2.6.32.26-inc | drivers/char/rio/riotable.c | 4391 | 30133 | /*
** -----------------------------------------------------------------------------
**
** Perle Specialix driver for Linux
** Ported from existing RIO Driver for SCO sources.
*
* (C) 1990 - 2000 Specialix International Ltd., Byfleet, Surrey, UK.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
**
** Module : riotable.c
** SID : 1.2
** Last Modified : 11/6/98 10:33:47
** Retrieved : 11/6/98 10:33:50
**
** ident @(#)riotable.c 1.2
**
** -----------------------------------------------------------------------------
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/string.h>
#include <asm/io.h>
#include <asm/system.h>
#include <asm/string.h>
#include <asm/uaccess.h>
#include <linux/termios.h>
#include <linux/serial.h>
#include <linux/generic_serial.h>
#include "linux_compat.h"
#include "rio_linux.h"
#include "pkt.h"
#include "daemon.h"
#include "rio.h"
#include "riospace.h"
#include "cmdpkt.h"
#include "map.h"
#include "rup.h"
#include "port.h"
#include "riodrvr.h"
#include "rioinfo.h"
#include "func.h"
#include "errors.h"
#include "pci.h"
#include "parmmap.h"
#include "unixrup.h"
#include "board.h"
#include "host.h"
#include "phb.h"
#include "link.h"
#include "cmdblk.h"
#include "route.h"
#include "cirrus.h"
#include "rioioctl.h"
#include "param.h"
#include "protsts.h"
/*
** A configuration table has been loaded. It is now up to us
** to sort it out and use the information contained therein.
*/
int RIONewTable(struct rio_info *p)
{
int Host, Host1, Host2, NameIsUnique, Entry, SubEnt;
struct Map *MapP;
struct Map *HostMapP;
struct Host *HostP;
char *cptr;
/*
** We have been sent a new table to install. We need to break
** it down into little bits and spread it around a bit to see
** what we have got.
*/
/*
** Things to check:
** (things marked 'xx' aren't checked any more!)
** (1) That there are no booted Hosts/RTAs out there.
** (2) That the names are properly formed
** (3) That blank entries really are.
** xx (4) That hosts mentioned in the table actually exist. xx
** (5) That the IDs are unique (per host).
** (6) That host IDs are zero
** (7) That port numbers are valid
** (8) That port numbers aren't duplicated
** (9) That names aren't duplicated
** xx (10) That hosts that actually exist are mentioned in the table. xx
*/
rio_dprintk(RIO_DEBUG_TABLE, "RIONewTable: entering(1)\n");
if (p->RIOSystemUp) { /* (1) */
p->RIOError.Error = HOST_HAS_ALREADY_BEEN_BOOTED;
return -EBUSY;
}
p->RIOError.Error = NOTHING_WRONG_AT_ALL;
p->RIOError.Entry = -1;
p->RIOError.Other = -1;
for (Entry = 0; Entry < TOTAL_MAP_ENTRIES; Entry++) {
MapP = &p->RIOConnectTable[Entry];
if ((MapP->Flags & RTA16_SECOND_SLOT) == 0) {
rio_dprintk(RIO_DEBUG_TABLE, "RIONewTable: entering(2)\n");
cptr = MapP->Name; /* (2) */
cptr[MAX_NAME_LEN - 1] = '\0';
if (cptr[0] == '\0') {
memcpy(MapP->Name, MapP->RtaUniqueNum ? "RTA NN" : "HOST NN", 8);
MapP->Name[5] = '0' + Entry / 10;
MapP->Name[6] = '0' + Entry % 10;
}
while (*cptr) {
if (*cptr < ' ' || *cptr > '~') {
p->RIOError.Error = BAD_CHARACTER_IN_NAME;
p->RIOError.Entry = Entry;
return -ENXIO;
}
cptr++;
}
}
/*
** If the entry saved was a tentative entry then just forget
** about it.
*/
if (MapP->Flags & SLOT_TENTATIVE) {
MapP->HostUniqueNum = 0;
MapP->RtaUniqueNum = 0;
continue;
}
rio_dprintk(RIO_DEBUG_TABLE, "RIONewTable: entering(3)\n");
if (!MapP->RtaUniqueNum && !MapP->HostUniqueNum) { /* (3) */
if (MapP->ID || MapP->SysPort || MapP->Flags) {
rio_dprintk(RIO_DEBUG_TABLE, "%s pretending to be empty but isn't\n", MapP->Name);
p->RIOError.Error = TABLE_ENTRY_ISNT_PROPERLY_NULL;
p->RIOError.Entry = Entry;
return -ENXIO;
}
rio_dprintk(RIO_DEBUG_TABLE, "!RIO: Daemon: test (3) passes\n");
continue;
}
rio_dprintk(RIO_DEBUG_TABLE, "RIONewTable: entering(4)\n");
for (Host = 0; Host < p->RIONumHosts; Host++) { /* (4) */
if (p->RIOHosts[Host].UniqueNum == MapP->HostUniqueNum) {
HostP = &p->RIOHosts[Host];
/*
** having done the lookup, we don't really want to do
** it again, so hang the host number in a safe place
*/
MapP->Topology[0].Unit = Host;
break;
}
}
if (Host >= p->RIONumHosts) {
rio_dprintk(RIO_DEBUG_TABLE, "RTA %s has unknown host unique number 0x%x\n", MapP->Name, MapP->HostUniqueNum);
MapP->HostUniqueNum = 0;
/* MapP->RtaUniqueNum = 0; */
/* MapP->ID = 0; */
/* MapP->Flags = 0; */
/* MapP->SysPort = 0; */
/* MapP->Name[0] = 0; */
continue;
}
rio_dprintk(RIO_DEBUG_TABLE, "RIONewTable: entering(5)\n");
if (MapP->RtaUniqueNum) { /* (5) */
if (!MapP->ID) {
rio_dprintk(RIO_DEBUG_TABLE, "RIO: RTA %s has been allocated an ID of zero!\n", MapP->Name);
p->RIOError.Error = ZERO_RTA_ID;
p->RIOError.Entry = Entry;
return -ENXIO;
}
if (MapP->ID > MAX_RUP) {
rio_dprintk(RIO_DEBUG_TABLE, "RIO: RTA %s has been allocated an invalid ID %d\n", MapP->Name, MapP->ID);
p->RIOError.Error = ID_NUMBER_OUT_OF_RANGE;
p->RIOError.Entry = Entry;
return -ENXIO;
}
for (SubEnt = 0; SubEnt < Entry; SubEnt++) {
if (MapP->HostUniqueNum == p->RIOConnectTable[SubEnt].HostUniqueNum && MapP->ID == p->RIOConnectTable[SubEnt].ID) {
rio_dprintk(RIO_DEBUG_TABLE, "Dupl. ID number allocated to RTA %s and RTA %s\n", MapP->Name, p->RIOConnectTable[SubEnt].Name);
p->RIOError.Error = DUPLICATED_RTA_ID;
p->RIOError.Entry = Entry;
p->RIOError.Other = SubEnt;
return -ENXIO;
}
/*
** If the RtaUniqueNum is the same, it may be looking at both
** entries for a 16 port RTA, so check the ids
*/
if ((MapP->RtaUniqueNum == p->RIOConnectTable[SubEnt].RtaUniqueNum)
&& (MapP->ID2 != p->RIOConnectTable[SubEnt].ID)) {
rio_dprintk(RIO_DEBUG_TABLE, "RTA %s has duplicate unique number\n", MapP->Name);
rio_dprintk(RIO_DEBUG_TABLE, "RTA %s has duplicate unique number\n", p->RIOConnectTable[SubEnt].Name);
p->RIOError.Error = DUPLICATE_UNIQUE_NUMBER;
p->RIOError.Entry = Entry;
p->RIOError.Other = SubEnt;
return -ENXIO;
}
}
rio_dprintk(RIO_DEBUG_TABLE, "RIONewTable: entering(7a)\n");
/* (7a) */
if ((MapP->SysPort != NO_PORT) && (MapP->SysPort % PORTS_PER_RTA)) {
rio_dprintk(RIO_DEBUG_TABLE, "TTY Port number %d-RTA %s is not a multiple of %d!\n", (int) MapP->SysPort, MapP->Name, PORTS_PER_RTA);
p->RIOError.Error = TTY_NUMBER_OUT_OF_RANGE;
p->RIOError.Entry = Entry;
return -ENXIO;
}
rio_dprintk(RIO_DEBUG_TABLE, "RIONewTable: entering(7b)\n");
/* (7b) */
if ((MapP->SysPort != NO_PORT) && (MapP->SysPort >= RIO_PORTS)) {
rio_dprintk(RIO_DEBUG_TABLE, "TTY Port number %d for RTA %s is too big\n", (int) MapP->SysPort, MapP->Name);
p->RIOError.Error = TTY_NUMBER_OUT_OF_RANGE;
p->RIOError.Entry = Entry;
return -ENXIO;
}
for (SubEnt = 0; SubEnt < Entry; SubEnt++) {
if (p->RIOConnectTable[SubEnt].Flags & RTA16_SECOND_SLOT)
continue;
if (p->RIOConnectTable[SubEnt].RtaUniqueNum) {
rio_dprintk(RIO_DEBUG_TABLE, "RIONewTable: entering(8)\n");
/* (8) */
if ((MapP->SysPort != NO_PORT) && (MapP->SysPort == p->RIOConnectTable[SubEnt].SysPort)) {
rio_dprintk(RIO_DEBUG_TABLE, "RTA %s:same TTY port # as RTA %s (%d)\n", MapP->Name, p->RIOConnectTable[SubEnt].Name, (int) MapP->SysPort);
p->RIOError.Error = TTY_NUMBER_IN_USE;
p->RIOError.Entry = Entry;
p->RIOError.Other = SubEnt;
return -ENXIO;
}
rio_dprintk(RIO_DEBUG_TABLE, "RIONewTable: entering(9)\n");
if (strcmp(MapP->Name, p->RIOConnectTable[SubEnt].Name) == 0 && !(MapP->Flags & RTA16_SECOND_SLOT)) { /* (9) */
rio_dprintk(RIO_DEBUG_TABLE, "RTA name %s used twice\n", MapP->Name);
p->RIOError.Error = NAME_USED_TWICE;
p->RIOError.Entry = Entry;
p->RIOError.Other = SubEnt;
return -ENXIO;
}
}
}
} else { /* (6) */
rio_dprintk(RIO_DEBUG_TABLE, "RIONewTable: entering(6)\n");
if (MapP->ID) {
rio_dprintk(RIO_DEBUG_TABLE, "RIO:HOST %s has been allocated ID that isn't zero!\n", MapP->Name);
p->RIOError.Error = HOST_ID_NOT_ZERO;
p->RIOError.Entry = Entry;
return -ENXIO;
}
if (MapP->SysPort != NO_PORT) {
rio_dprintk(RIO_DEBUG_TABLE, "RIO: HOST %s has been allocated port numbers!\n", MapP->Name);
p->RIOError.Error = HOST_SYSPORT_BAD;
p->RIOError.Entry = Entry;
return -ENXIO;
}
}
}
/*
** wow! if we get here then it's a goody!
*/
/*
** Zero the (old) entries for each host...
*/
for (Host = 0; Host < RIO_HOSTS; Host++) {
for (Entry = 0; Entry < MAX_RUP; Entry++) {
memset(&p->RIOHosts[Host].Mapping[Entry], 0, sizeof(struct Map));
}
memset(&p->RIOHosts[Host].Name[0], 0, sizeof(p->RIOHosts[Host].Name));
}
/*
** Copy in the new table entries
*/
for (Entry = 0; Entry < TOTAL_MAP_ENTRIES; Entry++) {
rio_dprintk(RIO_DEBUG_TABLE, "RIONewTable: Copy table for Host entry %d\n", Entry);
MapP = &p->RIOConnectTable[Entry];
/*
** Now, if it is an empty slot ignore it!
*/
if (MapP->HostUniqueNum == 0)
continue;
/*
** we saved the host number earlier, so grab it back
*/
HostP = &p->RIOHosts[MapP->Topology[0].Unit];
/*
** If it is a host, then we only need to fill in the name field.
*/
if (MapP->ID == 0) {
rio_dprintk(RIO_DEBUG_TABLE, "Host entry found. Name %s\n", MapP->Name);
memcpy(HostP->Name, MapP->Name, MAX_NAME_LEN);
continue;
}
/*
** Its an RTA entry, so fill in the host mapping entries for it
** and the port mapping entries. Notice that entry zero is for
** ID one.
*/
HostMapP = &HostP->Mapping[MapP->ID - 1];
if (MapP->Flags & SLOT_IN_USE) {
rio_dprintk(RIO_DEBUG_TABLE, "Rta entry found. Name %s\n", MapP->Name);
/*
** structure assign, then sort out the bits we shouldn't have done
*/
*HostMapP = *MapP;
HostMapP->Flags = SLOT_IN_USE;
if (MapP->Flags & RTA16_SECOND_SLOT)
HostMapP->Flags |= RTA16_SECOND_SLOT;
RIOReMapPorts(p, HostP, HostMapP);
} else {
rio_dprintk(RIO_DEBUG_TABLE, "TENTATIVE Rta entry found. Name %s\n", MapP->Name);
}
}
for (Entry = 0; Entry < TOTAL_MAP_ENTRIES; Entry++) {
p->RIOSavedTable[Entry] = p->RIOConnectTable[Entry];
}
for (Host = 0; Host < p->RIONumHosts; Host++) {
for (SubEnt = 0; SubEnt < LINKS_PER_UNIT; SubEnt++) {
p->RIOHosts[Host].Topology[SubEnt].Unit = ROUTE_DISCONNECT;
p->RIOHosts[Host].Topology[SubEnt].Link = NO_LINK;
}
for (Entry = 0; Entry < MAX_RUP; Entry++) {
for (SubEnt = 0; SubEnt < LINKS_PER_UNIT; SubEnt++) {
p->RIOHosts[Host].Mapping[Entry].Topology[SubEnt].Unit = ROUTE_DISCONNECT;
p->RIOHosts[Host].Mapping[Entry].Topology[SubEnt].Link = NO_LINK;
}
}
if (!p->RIOHosts[Host].Name[0]) {
memcpy(p->RIOHosts[Host].Name, "HOST 1", 7);
p->RIOHosts[Host].Name[5] += Host;
}
/*
** Check that default name assigned is unique.
*/
Host1 = Host;
NameIsUnique = 0;
while (!NameIsUnique) {
NameIsUnique = 1;
for (Host2 = 0; Host2 < p->RIONumHosts; Host2++) {
if (Host2 == Host)
continue;
if (strcmp(p->RIOHosts[Host].Name, p->RIOHosts[Host2].Name)
== 0) {
NameIsUnique = 0;
Host1++;
if (Host1 >= p->RIONumHosts)
Host1 = 0;
p->RIOHosts[Host].Name[5] = '1' + Host1;
}
}
}
/*
** Rename host if name already used.
*/
if (Host1 != Host) {
rio_dprintk(RIO_DEBUG_TABLE, "Default name %s already used\n", p->RIOHosts[Host].Name);
memcpy(p->RIOHosts[Host].Name, "HOST 1", 7);
p->RIOHosts[Host].Name[5] += Host1;
}
rio_dprintk(RIO_DEBUG_TABLE, "Assigning default name %s\n", p->RIOHosts[Host].Name);
}
return 0;
}
/*
** User process needs the config table - build it from first
** principles.
**
* FIXME: SMP locking
*/
int RIOApel(struct rio_info *p)
{
int Host;
int link;
int Rup;
int Next = 0;
struct Map *MapP;
struct Host *HostP;
unsigned long flags;
rio_dprintk(RIO_DEBUG_TABLE, "Generating a table to return to config.rio\n");
memset(&p->RIOConnectTable[0], 0, sizeof(struct Map) * TOTAL_MAP_ENTRIES);
for (Host = 0; Host < RIO_HOSTS; Host++) {
rio_dprintk(RIO_DEBUG_TABLE, "Processing host %d\n", Host);
HostP = &p->RIOHosts[Host];
rio_spin_lock_irqsave(&HostP->HostLock, flags);
MapP = &p->RIOConnectTable[Next++];
MapP->HostUniqueNum = HostP->UniqueNum;
if ((HostP->Flags & RUN_STATE) != RC_RUNNING) {
rio_spin_unlock_irqrestore(&HostP->HostLock, flags);
continue;
}
MapP->RtaUniqueNum = 0;
MapP->ID = 0;
MapP->Flags = SLOT_IN_USE;
MapP->SysPort = NO_PORT;
for (link = 0; link < LINKS_PER_UNIT; link++)
MapP->Topology[link] = HostP->Topology[link];
memcpy(MapP->Name, HostP->Name, MAX_NAME_LEN);
for (Rup = 0; Rup < MAX_RUP; Rup++) {
if (HostP->Mapping[Rup].Flags & (SLOT_IN_USE | SLOT_TENTATIVE)) {
p->RIOConnectTable[Next] = HostP->Mapping[Rup];
if (HostP->Mapping[Rup].Flags & SLOT_IN_USE)
p->RIOConnectTable[Next].Flags |= SLOT_IN_USE;
if (HostP->Mapping[Rup].Flags & SLOT_TENTATIVE)
p->RIOConnectTable[Next].Flags |= SLOT_TENTATIVE;
if (HostP->Mapping[Rup].Flags & RTA16_SECOND_SLOT)
p->RIOConnectTable[Next].Flags |= RTA16_SECOND_SLOT;
Next++;
}
}
rio_spin_unlock_irqrestore(&HostP->HostLock, flags);
}
return 0;
}
/*
** config.rio has taken a dislike to one of the gross maps entries.
** if the entry is suitably inactive, then we can gob on it and remove
** it from the table.
*/
int RIODeleteRta(struct rio_info *p, struct Map *MapP)
{
int host, entry, port, link;
int SysPort;
struct Host *HostP;
struct Map *HostMapP;
struct Port *PortP;
int work_done = 0;
unsigned long lock_flags, sem_flags;
rio_dprintk(RIO_DEBUG_TABLE, "Delete entry on host %x, rta %x\n", MapP->HostUniqueNum, MapP->RtaUniqueNum);
for (host = 0; host < p->RIONumHosts; host++) {
HostP = &p->RIOHosts[host];
rio_spin_lock_irqsave(&HostP->HostLock, lock_flags);
if ((HostP->Flags & RUN_STATE) != RC_RUNNING) {
rio_spin_unlock_irqrestore(&HostP->HostLock, lock_flags);
continue;
}
for (entry = 0; entry < MAX_RUP; entry++) {
if (MapP->RtaUniqueNum == HostP->Mapping[entry].RtaUniqueNum) {
HostMapP = &HostP->Mapping[entry];
rio_dprintk(RIO_DEBUG_TABLE, "Found entry offset %d on host %s\n", entry, HostP->Name);
/*
** Check all four links of the unit are disconnected
*/
for (link = 0; link < LINKS_PER_UNIT; link++) {
if (HostMapP->Topology[link].Unit != ROUTE_DISCONNECT) {
rio_dprintk(RIO_DEBUG_TABLE, "Entry is in use and cannot be deleted!\n");
p->RIOError.Error = UNIT_IS_IN_USE;
rio_spin_unlock_irqrestore(&HostP->HostLock, lock_flags);
return -EBUSY;
}
}
/*
** Slot has been allocated, BUT not booted/routed/
** connected/selected or anything else-ed
*/
SysPort = HostMapP->SysPort;
if (SysPort != NO_PORT) {
for (port = SysPort; port < SysPort + PORTS_PER_RTA; port++) {
PortP = p->RIOPortp[port];
rio_dprintk(RIO_DEBUG_TABLE, "Unmap port\n");
rio_spin_lock_irqsave(&PortP->portSem, sem_flags);
PortP->Mapped = 0;
if (PortP->State & (RIO_MOPEN | RIO_LOPEN)) {
rio_dprintk(RIO_DEBUG_TABLE, "Gob on port\n");
PortP->TxBufferIn = PortP->TxBufferOut = 0;
/* What should I do
wakeup( &PortP->TxBufferIn );
wakeup( &PortP->TxBufferOut);
*/
PortP->InUse = NOT_INUSE;
/* What should I do
wakeup( &PortP->InUse );
signal(PortP->TtyP->t_pgrp,SIGKILL);
ttyflush(PortP->TtyP,(FREAD|FWRITE));
*/
PortP->State |= RIO_CLOSING | RIO_DELETED;
}
/*
** For the second slot of a 16 port RTA, the
** driver needs to reset the changes made to
** the phb to port mappings in RIORouteRup.
*/
if (PortP->SecondBlock) {
u16 dest_unit = HostMapP->ID;
u16 dest_port = port - SysPort;
u16 __iomem *TxPktP;
struct PKT __iomem *Pkt;
for (TxPktP = PortP->TxStart; TxPktP <= PortP->TxEnd; TxPktP++) {
/*
** *TxPktP is the pointer to the
** transmit packet on the host card.
** This needs to be translated into
** a 32 bit pointer so it can be
** accessed from the driver.
*/
Pkt = (struct PKT __iomem *) RIO_PTR(HostP->Caddr, readw(&*TxPktP));
rio_dprintk(RIO_DEBUG_TABLE, "Tx packet (%x) destination: Old %x:%x New %x:%x\n", readw(TxPktP), readb(&Pkt->dest_unit), readb(&Pkt->dest_port), dest_unit, dest_port);
writew(dest_unit, &Pkt->dest_unit);
writew(dest_port, &Pkt->dest_port);
}
rio_dprintk(RIO_DEBUG_TABLE, "Port %d phb destination: Old %x:%x New %x:%x\n", port, readb(&PortP->PhbP->destination) & 0xff, (readb(&PortP->PhbP->destination) >> 8) & 0xff, dest_unit, dest_port);
writew(dest_unit + (dest_port << 8), &PortP->PhbP->destination);
}
rio_spin_unlock_irqrestore(&PortP->portSem, sem_flags);
}
}
rio_dprintk(RIO_DEBUG_TABLE, "Entry nulled.\n");
memset(HostMapP, 0, sizeof(struct Map));
work_done++;
}
}
rio_spin_unlock_irqrestore(&HostP->HostLock, lock_flags);
}
/* XXXXX lock me up */
for (entry = 0; entry < TOTAL_MAP_ENTRIES; entry++) {
if (p->RIOSavedTable[entry].RtaUniqueNum == MapP->RtaUniqueNum) {
memset(&p->RIOSavedTable[entry], 0, sizeof(struct Map));
work_done++;
}
if (p->RIOConnectTable[entry].RtaUniqueNum == MapP->RtaUniqueNum) {
memset(&p->RIOConnectTable[entry], 0, sizeof(struct Map));
work_done++;
}
}
if (work_done)
return 0;
rio_dprintk(RIO_DEBUG_TABLE, "Couldn't find entry to be deleted\n");
p->RIOError.Error = COULDNT_FIND_ENTRY;
return -ENXIO;
}
int RIOAssignRta(struct rio_info *p, struct Map *MapP)
{
int host;
struct Map *HostMapP;
char *sptr;
int link;
rio_dprintk(RIO_DEBUG_TABLE, "Assign entry on host %x, rta %x, ID %d, Sysport %d\n", MapP->HostUniqueNum, MapP->RtaUniqueNum, MapP->ID, (int) MapP->SysPort);
if ((MapP->ID != (u16) - 1) && ((int) MapP->ID < (int) 1 || (int) MapP->ID > MAX_RUP)) {
rio_dprintk(RIO_DEBUG_TABLE, "Bad ID in map entry!\n");
p->RIOError.Error = ID_NUMBER_OUT_OF_RANGE;
return -EINVAL;
}
if (MapP->RtaUniqueNum == 0) {
rio_dprintk(RIO_DEBUG_TABLE, "Rta Unique number zero!\n");
p->RIOError.Error = RTA_UNIQUE_NUMBER_ZERO;
return -EINVAL;
}
if ((MapP->SysPort != NO_PORT) && (MapP->SysPort % PORTS_PER_RTA)) {
rio_dprintk(RIO_DEBUG_TABLE, "Port %d not multiple of %d!\n", (int) MapP->SysPort, PORTS_PER_RTA);
p->RIOError.Error = TTY_NUMBER_OUT_OF_RANGE;
return -EINVAL;
}
if ((MapP->SysPort != NO_PORT) && (MapP->SysPort >= RIO_PORTS)) {
rio_dprintk(RIO_DEBUG_TABLE, "Port %d not valid!\n", (int) MapP->SysPort);
p->RIOError.Error = TTY_NUMBER_OUT_OF_RANGE;
return -EINVAL;
}
/*
** Copy the name across to the map entry.
*/
MapP->Name[MAX_NAME_LEN - 1] = '\0';
sptr = MapP->Name;
while (*sptr) {
if (*sptr < ' ' || *sptr > '~') {
rio_dprintk(RIO_DEBUG_TABLE, "Name entry contains non-printing characters!\n");
p->RIOError.Error = BAD_CHARACTER_IN_NAME;
return -EINVAL;
}
sptr++;
}
for (host = 0; host < p->RIONumHosts; host++) {
if (MapP->HostUniqueNum == p->RIOHosts[host].UniqueNum) {
if ((p->RIOHosts[host].Flags & RUN_STATE) != RC_RUNNING) {
p->RIOError.Error = HOST_NOT_RUNNING;
return -ENXIO;
}
/*
** Now we have a host we need to allocate an ID
** if the entry does not already have one.
*/
if (MapP->ID == (u16) - 1) {
int nNewID;
rio_dprintk(RIO_DEBUG_TABLE, "Attempting to get a new ID for rta \"%s\"\n", MapP->Name);
/*
** The idea here is to allow RTA's to be assigned
** before they actually appear on the network.
** This allows the addition of RTA's without having
** to plug them in.
** What we do is:
** - Find a free ID and allocate it to the RTA.
** - If this map entry is the second half of a
** 16 port entry then find the other half and
** make sure the 2 cross reference each other.
*/
if (RIOFindFreeID(p, &p->RIOHosts[host], &nNewID, NULL) != 0) {
p->RIOError.Error = COULDNT_FIND_ENTRY;
return -EBUSY;
}
MapP->ID = (u16) nNewID + 1;
rio_dprintk(RIO_DEBUG_TABLE, "Allocated ID %d for this new RTA.\n", MapP->ID);
HostMapP = &p->RIOHosts[host].Mapping[nNewID];
HostMapP->RtaUniqueNum = MapP->RtaUniqueNum;
HostMapP->HostUniqueNum = MapP->HostUniqueNum;
HostMapP->ID = MapP->ID;
for (link = 0; link < LINKS_PER_UNIT; link++) {
HostMapP->Topology[link].Unit = ROUTE_DISCONNECT;
HostMapP->Topology[link].Link = NO_LINK;
}
if (MapP->Flags & RTA16_SECOND_SLOT) {
int unit;
for (unit = 0; unit < MAX_RUP; unit++)
if (p->RIOHosts[host].Mapping[unit].RtaUniqueNum == MapP->RtaUniqueNum)
break;
if (unit == MAX_RUP) {
p->RIOError.Error = COULDNT_FIND_ENTRY;
return -EBUSY;
}
HostMapP->Flags |= RTA16_SECOND_SLOT;
HostMapP->ID2 = MapP->ID2 = p->RIOHosts[host].Mapping[unit].ID;
p->RIOHosts[host].Mapping[unit].ID2 = MapP->ID;
rio_dprintk(RIO_DEBUG_TABLE, "Cross referenced id %d to ID %d.\n", MapP->ID, p->RIOHosts[host].Mapping[unit].ID);
}
}
HostMapP = &p->RIOHosts[host].Mapping[MapP->ID - 1];
if (HostMapP->Flags & SLOT_IN_USE) {
rio_dprintk(RIO_DEBUG_TABLE, "Map table slot for ID %d is already in use.\n", MapP->ID);
p->RIOError.Error = ID_ALREADY_IN_USE;
return -EBUSY;
}
/*
** Assign the sys ports and the name, and mark the slot as
** being in use.
*/
HostMapP->SysPort = MapP->SysPort;
if ((MapP->Flags & RTA16_SECOND_SLOT) == 0)
memcpy(HostMapP->Name, MapP->Name, MAX_NAME_LEN);
HostMapP->Flags = SLOT_IN_USE | RTA_BOOTED;
#ifdef NEED_TO_FIX
RIO_SV_BROADCAST(p->RIOHosts[host].svFlags[MapP->ID - 1]);
#endif
if (MapP->Flags & RTA16_SECOND_SLOT)
HostMapP->Flags |= RTA16_SECOND_SLOT;
RIOReMapPorts(p, &p->RIOHosts[host], HostMapP);
/*
** Adjust 2nd block of 8 phbs
*/
if (MapP->Flags & RTA16_SECOND_SLOT)
RIOFixPhbs(p, &p->RIOHosts[host], HostMapP->ID - 1);
if (HostMapP->SysPort != NO_PORT) {
if (HostMapP->SysPort < p->RIOFirstPortsBooted)
p->RIOFirstPortsBooted = HostMapP->SysPort;
if (HostMapP->SysPort > p->RIOLastPortsBooted)
p->RIOLastPortsBooted = HostMapP->SysPort;
}
if (MapP->Flags & RTA16_SECOND_SLOT)
rio_dprintk(RIO_DEBUG_TABLE, "Second map of RTA %s added to configuration\n", p->RIOHosts[host].Mapping[MapP->ID2 - 1].Name);
else
rio_dprintk(RIO_DEBUG_TABLE, "RTA %s added to configuration\n", MapP->Name);
return 0;
}
}
p->RIOError.Error = UNKNOWN_HOST_NUMBER;
rio_dprintk(RIO_DEBUG_TABLE, "Unknown host %x\n", MapP->HostUniqueNum);
return -ENXIO;
}
int RIOReMapPorts(struct rio_info *p, struct Host *HostP, struct Map *HostMapP)
{
struct Port *PortP;
unsigned int SubEnt;
unsigned int HostPort;
unsigned int SysPort;
u16 RtaType;
unsigned long flags;
rio_dprintk(RIO_DEBUG_TABLE, "Mapping sysport %d to id %d\n", (int) HostMapP->SysPort, HostMapP->ID);
/*
** We need to tell the UnixRups which sysport the rup corresponds to
*/
HostP->UnixRups[HostMapP->ID - 1].BaseSysPort = HostMapP->SysPort;
if (HostMapP->SysPort == NO_PORT)
return (0);
RtaType = GetUnitType(HostMapP->RtaUniqueNum);
rio_dprintk(RIO_DEBUG_TABLE, "Mapping sysport %d-%d\n", (int) HostMapP->SysPort, (int) HostMapP->SysPort + PORTS_PER_RTA - 1);
/*
** now map each of its eight ports
*/
for (SubEnt = 0; SubEnt < PORTS_PER_RTA; SubEnt++) {
rio_dprintk(RIO_DEBUG_TABLE, "subent = %d, HostMapP->SysPort = %d\n", SubEnt, (int) HostMapP->SysPort);
SysPort = HostMapP->SysPort + SubEnt; /* portnumber within system */
/* portnumber on host */
HostPort = (HostMapP->ID - 1) * PORTS_PER_RTA + SubEnt;
rio_dprintk(RIO_DEBUG_TABLE, "c1 p = %p, p->rioPortp = %p\n", p, p->RIOPortp);
PortP = p->RIOPortp[SysPort];
rio_dprintk(RIO_DEBUG_TABLE, "Map port\n");
/*
** Point at all the real neat data structures
*/
rio_spin_lock_irqsave(&PortP->portSem, flags);
PortP->HostP = HostP;
PortP->Caddr = HostP->Caddr;
/*
** The PhbP cannot be filled in yet
** unless the host has been booted
*/
if ((HostP->Flags & RUN_STATE) == RC_RUNNING) {
struct PHB __iomem *PhbP = PortP->PhbP = &HostP->PhbP[HostPort];
PortP->TxAdd = (u16 __iomem *) RIO_PTR(HostP->Caddr, readw(&PhbP->tx_add));
PortP->TxStart = (u16 __iomem *) RIO_PTR(HostP->Caddr, readw(&PhbP->tx_start));
PortP->TxEnd = (u16 __iomem *) RIO_PTR(HostP->Caddr, readw(&PhbP->tx_end));
PortP->RxRemove = (u16 __iomem *) RIO_PTR(HostP->Caddr, readw(&PhbP->rx_remove));
PortP->RxStart = (u16 __iomem *) RIO_PTR(HostP->Caddr, readw(&PhbP->rx_start));
PortP->RxEnd = (u16 __iomem *) RIO_PTR(HostP->Caddr, readw(&PhbP->rx_end));
} else
PortP->PhbP = NULL;
/*
** port related flags
*/
PortP->HostPort = HostPort;
/*
** For each part of a 16 port RTA, RupNum is ID - 1.
*/
PortP->RupNum = HostMapP->ID - 1;
if (HostMapP->Flags & RTA16_SECOND_SLOT) {
PortP->ID2 = HostMapP->ID2 - 1;
PortP->SecondBlock = 1;
} else {
PortP->ID2 = 0;
PortP->SecondBlock = 0;
}
PortP->RtaUniqueNum = HostMapP->RtaUniqueNum;
/*
** If the port was already mapped then thats all we need to do.
*/
if (PortP->Mapped) {
rio_spin_unlock_irqrestore(&PortP->portSem, flags);
continue;
} else
HostMapP->Flags &= ~RTA_NEWBOOT;
PortP->State = 0;
PortP->Config = 0;
/*
** Check out the module type - if it is special (read only etc.)
** then we need to set flags in the PortP->Config.
** Note: For 16 port RTA, all ports are of the same type.
*/
if (RtaType == TYPE_RTA16) {
PortP->Config |= p->RIOModuleTypes[HostP->UnixRups[HostMapP->ID - 1].ModTypes].Flags[SubEnt % PORTS_PER_MODULE];
} else {
if (SubEnt < PORTS_PER_MODULE)
PortP->Config |= p->RIOModuleTypes[LONYBLE(HostP->UnixRups[HostMapP->ID - 1].ModTypes)].Flags[SubEnt % PORTS_PER_MODULE];
else
PortP->Config |= p->RIOModuleTypes[HINYBLE(HostP->UnixRups[HostMapP->ID - 1].ModTypes)].Flags[SubEnt % PORTS_PER_MODULE];
}
/*
** more port related flags
*/
PortP->PortState = 0;
PortP->ModemLines = 0;
PortP->ModemState = 0;
PortP->CookMode = COOK_WELL;
PortP->ParamSem = 0;
PortP->FlushCmdBodge = 0;
PortP->WflushFlag = 0;
PortP->MagicFlags = 0;
PortP->Lock = 0;
PortP->Store = 0;
PortP->FirstOpen = 1;
/*
** Buffers 'n things
*/
PortP->RxDataStart = 0;
PortP->Cor2Copy = 0;
PortP->Name = &HostMapP->Name[0];
PortP->statsGather = 0;
PortP->txchars = 0;
PortP->rxchars = 0;
PortP->opens = 0;
PortP->closes = 0;
PortP->ioctls = 0;
if (PortP->TxRingBuffer)
memset(PortP->TxRingBuffer, 0, p->RIOBufferSize);
else if (p->RIOBufferSize) {
PortP->TxRingBuffer = kzalloc(p->RIOBufferSize, GFP_KERNEL);
}
PortP->TxBufferOut = 0;
PortP->TxBufferIn = 0;
PortP->Debug = 0;
/*
** LastRxTgl stores the state of the rx toggle bit for this
** port, to be compared with the state of the next pkt received.
** If the same, we have received the same rx pkt from the RTA
** twice. Initialise to a value not equal to PHB_RX_TGL or 0.
*/
PortP->LastRxTgl = ~(u8) PHB_RX_TGL;
/*
** and mark the port as usable
*/
PortP->Mapped = 1;
rio_spin_unlock_irqrestore(&PortP->portSem, flags);
}
if (HostMapP->SysPort < p->RIOFirstPortsMapped)
p->RIOFirstPortsMapped = HostMapP->SysPort;
if (HostMapP->SysPort > p->RIOLastPortsMapped)
p->RIOLastPortsMapped = HostMapP->SysPort;
return 0;
}
int RIOChangeName(struct rio_info *p, struct Map *MapP)
{
int host;
struct Map *HostMapP;
char *sptr;
rio_dprintk(RIO_DEBUG_TABLE, "Change name entry on host %x, rta %x, ID %d, Sysport %d\n", MapP->HostUniqueNum, MapP->RtaUniqueNum, MapP->ID, (int) MapP->SysPort);
if (MapP->ID > MAX_RUP) {
rio_dprintk(RIO_DEBUG_TABLE, "Bad ID in map entry!\n");
p->RIOError.Error = ID_NUMBER_OUT_OF_RANGE;
return -EINVAL;
}
MapP->Name[MAX_NAME_LEN - 1] = '\0';
sptr = MapP->Name;
while (*sptr) {
if (*sptr < ' ' || *sptr > '~') {
rio_dprintk(RIO_DEBUG_TABLE, "Name entry contains non-printing characters!\n");
p->RIOError.Error = BAD_CHARACTER_IN_NAME;
return -EINVAL;
}
sptr++;
}
for (host = 0; host < p->RIONumHosts; host++) {
if (MapP->HostUniqueNum == p->RIOHosts[host].UniqueNum) {
if ((p->RIOHosts[host].Flags & RUN_STATE) != RC_RUNNING) {
p->RIOError.Error = HOST_NOT_RUNNING;
return -ENXIO;
}
if (MapP->ID == 0) {
memcpy(p->RIOHosts[host].Name, MapP->Name, MAX_NAME_LEN);
return 0;
}
HostMapP = &p->RIOHosts[host].Mapping[MapP->ID - 1];
if (HostMapP->RtaUniqueNum != MapP->RtaUniqueNum) {
p->RIOError.Error = RTA_NUMBER_WRONG;
return -ENXIO;
}
memcpy(HostMapP->Name, MapP->Name, MAX_NAME_LEN);
return 0;
}
}
p->RIOError.Error = UNKNOWN_HOST_NUMBER;
rio_dprintk(RIO_DEBUG_TABLE, "Unknown host %x\n", MapP->HostUniqueNum);
return -ENXIO;
}
| gpl-2.0 |
sub77/kernel_samsung_matisse | arch/blackfin/mach-bf538/boards/ezkit.c | 4391 | 23431 | /*
* Copyright 2004-2009 Analog Devices Inc.
* 2005 National ICT Australia (NICTA)
* Aidan Williams <aidan@nicta.com.au>
*
* Licensed under the GPL-2
*/
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/physmap.h>
#include <linux/mtd/partitions.h>
#include <linux/spi/spi.h>
#include <linux/spi/flash.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <asm/bfin5xx_spi.h>
#include <asm/dma.h>
#include <asm/gpio.h>
#include <asm/nand.h>
#include <asm/portmux.h>
#include <asm/dpmc.h>
#include <linux/input.h>
/*
* Name the Board for the /proc/cpuinfo
*/
const char bfin_board_name[] = "ADI BF538-EZKIT";
/*
* Driver needs to know address, irq and flag pin.
*/
#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE)
static struct platform_device rtc_device = {
.name = "rtc-bfin",
.id = -1,
};
#endif /* CONFIG_RTC_DRV_BFIN */
#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE)
#ifdef CONFIG_SERIAL_BFIN_UART0
static struct resource bfin_uart0_resources[] = {
{
.start = UART0_THR,
.end = UART0_GCTL+2,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_UART0_TX,
.end = IRQ_UART0_TX,
.flags = IORESOURCE_IRQ,
},
{
.start = IRQ_UART0_RX,
.end = IRQ_UART0_RX,
.flags = IORESOURCE_IRQ,
},
{
.start = IRQ_UART0_ERROR,
.end = IRQ_UART0_ERROR,
.flags = IORESOURCE_IRQ,
},
{
.start = CH_UART0_TX,
.end = CH_UART0_TX,
.flags = IORESOURCE_DMA,
},
{
.start = CH_UART0_RX,
.end = CH_UART0_RX,
.flags = IORESOURCE_DMA,
},
#ifdef CONFIG_BFIN_UART0_CTSRTS
{ /* CTS pin */
.start = GPIO_PG7,
.end = GPIO_PG7,
.flags = IORESOURCE_IO,
},
{ /* RTS pin */
.start = GPIO_PG6,
.end = GPIO_PG6,
.flags = IORESOURCE_IO,
},
#endif
};
static unsigned short bfin_uart0_peripherals[] = {
P_UART0_TX, P_UART0_RX, 0
};
static struct platform_device bfin_uart0_device = {
.name = "bfin-uart",
.id = 0,
.num_resources = ARRAY_SIZE(bfin_uart0_resources),
.resource = bfin_uart0_resources,
.dev = {
.platform_data = &bfin_uart0_peripherals, /* Passed to driver */
},
};
#endif /* CONFIG_SERIAL_BFIN_UART0 */
#ifdef CONFIG_SERIAL_BFIN_UART1
static struct resource bfin_uart1_resources[] = {
{
.start = UART1_THR,
.end = UART1_GCTL+2,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_UART1_TX,
.end = IRQ_UART1_TX,
.flags = IORESOURCE_IRQ,
},
{
.start = IRQ_UART1_RX,
.end = IRQ_UART1_RX,
.flags = IORESOURCE_IRQ,
},
{
.start = IRQ_UART1_ERROR,
.end = IRQ_UART1_ERROR,
.flags = IORESOURCE_IRQ,
},
{
.start = CH_UART1_TX,
.end = CH_UART1_TX,
.flags = IORESOURCE_DMA,
},
{
.start = CH_UART1_RX,
.end = CH_UART1_RX,
.flags = IORESOURCE_DMA,
},
};
static unsigned short bfin_uart1_peripherals[] = {
P_UART1_TX, P_UART1_RX, 0
};
static struct platform_device bfin_uart1_device = {
.name = "bfin-uart",
.id = 1,
.num_resources = ARRAY_SIZE(bfin_uart1_resources),
.resource = bfin_uart1_resources,
.dev = {
.platform_data = &bfin_uart1_peripherals, /* Passed to driver */
},
};
#endif /* CONFIG_SERIAL_BFIN_UART1 */
#ifdef CONFIG_SERIAL_BFIN_UART2
static struct resource bfin_uart2_resources[] = {
{
.start = UART2_THR,
.end = UART2_GCTL+2,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_UART2_TX,
.end = IRQ_UART2_TX,
.flags = IORESOURCE_IRQ,
},
{
.start = IRQ_UART2_RX,
.end = IRQ_UART2_RX,
.flags = IORESOURCE_IRQ,
},
{
.start = IRQ_UART2_ERROR,
.end = IRQ_UART2_ERROR,
.flags = IORESOURCE_IRQ,
},
{
.start = CH_UART2_TX,
.end = CH_UART2_TX,
.flags = IORESOURCE_DMA,
},
{
.start = CH_UART2_RX,
.end = CH_UART2_RX,
.flags = IORESOURCE_DMA,
},
};
static unsigned short bfin_uart2_peripherals[] = {
P_UART2_TX, P_UART2_RX, 0
};
static struct platform_device bfin_uart2_device = {
.name = "bfin-uart",
.id = 2,
.num_resources = ARRAY_SIZE(bfin_uart2_resources),
.resource = bfin_uart2_resources,
.dev = {
.platform_data = &bfin_uart2_peripherals, /* Passed to driver */
},
};
#endif /* CONFIG_SERIAL_BFIN_UART2 */
#endif /* CONFIG_SERIAL_BFIN */
#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
#ifdef CONFIG_BFIN_SIR0
static struct resource bfin_sir0_resources[] = {
{
.start = 0xFFC00400,
.end = 0xFFC004FF,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_UART0_RX,
.end = IRQ_UART0_RX+1,
.flags = IORESOURCE_IRQ,
},
{
.start = CH_UART0_RX,
.end = CH_UART0_RX+1,
.flags = IORESOURCE_DMA,
},
};
static struct platform_device bfin_sir0_device = {
.name = "bfin_sir",
.id = 0,
.num_resources = ARRAY_SIZE(bfin_sir0_resources),
.resource = bfin_sir0_resources,
};
#endif /* CONFIG_BFIN_SIR0 */
#ifdef CONFIG_BFIN_SIR1
static struct resource bfin_sir1_resources[] = {
{
.start = 0xFFC02000,
.end = 0xFFC020FF,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_UART1_RX,
.end = IRQ_UART1_RX+1,
.flags = IORESOURCE_IRQ,
},
{
.start = CH_UART1_RX,
.end = CH_UART1_RX+1,
.flags = IORESOURCE_DMA,
},
};
static struct platform_device bfin_sir1_device = {
.name = "bfin_sir",
.id = 1,
.num_resources = ARRAY_SIZE(bfin_sir1_resources),
.resource = bfin_sir1_resources,
};
#endif /* CONFIG_BFIN_SIR1 */
#ifdef CONFIG_BFIN_SIR2
static struct resource bfin_sir2_resources[] = {
{
.start = 0xFFC02100,
.end = 0xFFC021FF,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_UART2_RX,
.end = IRQ_UART2_RX+1,
.flags = IORESOURCE_IRQ,
},
{
.start = CH_UART2_RX,
.end = CH_UART2_RX+1,
.flags = IORESOURCE_DMA,
},
};
static struct platform_device bfin_sir2_device = {
.name = "bfin_sir",
.id = 2,
.num_resources = ARRAY_SIZE(bfin_sir2_resources),
.resource = bfin_sir2_resources,
};
#endif /* CONFIG_BFIN_SIR2 */
#endif /* CONFIG_BFIN_SIR */
#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE)
#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
static struct resource bfin_sport0_uart_resources[] = {
{
.start = SPORT0_TCR1,
.end = SPORT0_MRCS3+4,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_SPORT0_RX,
.end = IRQ_SPORT0_RX+1,
.flags = IORESOURCE_IRQ,
},
{
.start = IRQ_SPORT0_ERROR,
.end = IRQ_SPORT0_ERROR,
.flags = IORESOURCE_IRQ,
},
};
static unsigned short bfin_sport0_peripherals[] = {
P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
P_SPORT0_DRPRI, P_SPORT0_RSCLK, 0
};
static struct platform_device bfin_sport0_uart_device = {
.name = "bfin-sport-uart",
.id = 0,
.num_resources = ARRAY_SIZE(bfin_sport0_uart_resources),
.resource = bfin_sport0_uart_resources,
.dev = {
.platform_data = &bfin_sport0_peripherals, /* Passed to driver */
},
};
#endif /* CONFIG_SERIAL_BFIN_SPORT0_UART */
#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART
static struct resource bfin_sport1_uart_resources[] = {
{
.start = SPORT1_TCR1,
.end = SPORT1_MRCS3+4,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_SPORT1_RX,
.end = IRQ_SPORT1_RX+1,
.flags = IORESOURCE_IRQ,
},
{
.start = IRQ_SPORT1_ERROR,
.end = IRQ_SPORT1_ERROR,
.flags = IORESOURCE_IRQ,
},
};
static unsigned short bfin_sport1_peripherals[] = {
P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS,
P_SPORT1_DRPRI, P_SPORT1_RSCLK, 0
};
static struct platform_device bfin_sport1_uart_device = {
.name = "bfin-sport-uart",
.id = 1,
.num_resources = ARRAY_SIZE(bfin_sport1_uart_resources),
.resource = bfin_sport1_uart_resources,
.dev = {
.platform_data = &bfin_sport1_peripherals, /* Passed to driver */
},
};
#endif /* CONFIG_SERIAL_BFIN_SPORT1_UART */
#ifdef CONFIG_SERIAL_BFIN_SPORT2_UART
static struct resource bfin_sport2_uart_resources[] = {
{
.start = SPORT2_TCR1,
.end = SPORT2_MRCS3+4,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_SPORT2_RX,
.end = IRQ_SPORT2_RX+1,
.flags = IORESOURCE_IRQ,
},
{
.start = IRQ_SPORT2_ERROR,
.end = IRQ_SPORT2_ERROR,
.flags = IORESOURCE_IRQ,
},
};
static unsigned short bfin_sport2_peripherals[] = {
P_SPORT2_TFS, P_SPORT2_DTPRI, P_SPORT2_TSCLK, P_SPORT2_RFS,
P_SPORT2_DRPRI, P_SPORT2_RSCLK, P_SPORT2_DRSEC, P_SPORT2_DTSEC, 0
};
static struct platform_device bfin_sport2_uart_device = {
.name = "bfin-sport-uart",
.id = 2,
.num_resources = ARRAY_SIZE(bfin_sport2_uart_resources),
.resource = bfin_sport2_uart_resources,
.dev = {
.platform_data = &bfin_sport2_peripherals, /* Passed to driver */
},
};
#endif /* CONFIG_SERIAL_BFIN_SPORT2_UART */
#ifdef CONFIG_SERIAL_BFIN_SPORT3_UART
static struct resource bfin_sport3_uart_resources[] = {
{
.start = SPORT3_TCR1,
.end = SPORT3_MRCS3+4,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_SPORT3_RX,
.end = IRQ_SPORT3_RX+1,
.flags = IORESOURCE_IRQ,
},
{
.start = IRQ_SPORT3_ERROR,
.end = IRQ_SPORT3_ERROR,
.flags = IORESOURCE_IRQ,
},
};
static unsigned short bfin_sport3_peripherals[] = {
P_SPORT3_TFS, P_SPORT3_DTPRI, P_SPORT3_TSCLK, P_SPORT3_RFS,
P_SPORT3_DRPRI, P_SPORT3_RSCLK, P_SPORT3_DRSEC, P_SPORT3_DTSEC, 0
};
static struct platform_device bfin_sport3_uart_device = {
.name = "bfin-sport-uart",
.id = 3,
.num_resources = ARRAY_SIZE(bfin_sport3_uart_resources),
.resource = bfin_sport3_uart_resources,
.dev = {
.platform_data = &bfin_sport3_peripherals, /* Passed to driver */
},
};
#endif /* CONFIG_SERIAL_BFIN_SPORT3_UART */
#endif /* CONFIG_SERIAL_BFIN_SPORT */
#if defined(CONFIG_CAN_BFIN) || defined(CONFIG_CAN_BFIN_MODULE)
static unsigned short bfin_can_peripherals[] = {
P_CAN0_RX, P_CAN0_TX, 0
};
static struct resource bfin_can_resources[] = {
{
.start = 0xFFC02A00,
.end = 0xFFC02FFF,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_CAN_RX,
.end = IRQ_CAN_RX,
.flags = IORESOURCE_IRQ,
},
{
.start = IRQ_CAN_TX,
.end = IRQ_CAN_TX,
.flags = IORESOURCE_IRQ,
},
{
.start = IRQ_CAN_ERROR,
.end = IRQ_CAN_ERROR,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device bfin_can_device = {
.name = "bfin_can",
.num_resources = ARRAY_SIZE(bfin_can_resources),
.resource = bfin_can_resources,
.dev = {
.platform_data = &bfin_can_peripherals, /* Passed to driver */
},
};
#endif /* CONFIG_CAN_BFIN */
/*
* USB-LAN EzExtender board
* Driver needs to know address, irq and flag pin.
*/
#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE)
#include <linux/smc91x.h>
static struct smc91x_platdata smc91x_info = {
.flags = SMC91X_USE_16BIT | SMC91X_NOWAIT,
.leda = RPC_LED_100_10,
.ledb = RPC_LED_TX_RX,
};
static struct resource smc91x_resources[] = {
{
.name = "smc91x-regs",
.start = 0x20310300,
.end = 0x20310300 + 16,
.flags = IORESOURCE_MEM,
}, {
.start = IRQ_PF0,
.end = IRQ_PF0,
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
},
};
static struct platform_device smc91x_device = {
.name = "smc91x",
.id = 0,
.num_resources = ARRAY_SIZE(smc91x_resources),
.resource = smc91x_resources,
.dev = {
.platform_data = &smc91x_info,
},
};
#endif /* CONFIG_SMC91X */
#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
/* all SPI peripherals info goes here */
#if defined(CONFIG_MTD_M25P80) \
|| defined(CONFIG_MTD_M25P80_MODULE)
/* SPI flash chip (m25p16) */
static struct mtd_partition bfin_spi_flash_partitions[] = {
{
.name = "bootloader(spi)",
.size = 0x00040000,
.offset = 0,
.mask_flags = MTD_CAP_ROM
}, {
.name = "linux kernel(spi)",
.size = 0x1c0000,
.offset = 0x40000
}
};
static struct flash_platform_data bfin_spi_flash_data = {
.name = "m25p80",
.parts = bfin_spi_flash_partitions,
.nr_parts = ARRAY_SIZE(bfin_spi_flash_partitions),
.type = "m25p16",
};
static struct bfin5xx_spi_chip spi_flash_chip_info = {
.enable_dma = 0, /* use dma transfer with this chip*/
};
#endif /* CONFIG_MTD_M25P80 */
#endif /* CONFIG_SPI_BFIN5XX */
#if defined(CONFIG_TOUCHSCREEN_AD7879) || defined(CONFIG_TOUCHSCREEN_AD7879_MODULE)
#include <linux/spi/ad7879.h>
static const struct ad7879_platform_data bfin_ad7879_ts_info = {
.model = 7879, /* Model = AD7879 */
.x_plate_ohms = 620, /* 620 Ohm from the touch datasheet */
.pressure_max = 10000,
.pressure_min = 0,
.first_conversion_delay = 3, /* wait 512us before do a first conversion */
.acquisition_time = 1, /* 4us acquisition time per sample */
.median = 2, /* do 8 measurements */
.averaging = 1, /* take the average of 4 middle samples */
.pen_down_acc_interval = 255, /* 9.4 ms */
.gpio_export = 1, /* Export GPIO to gpiolib */
.gpio_base = -1, /* Dynamic allocation */
};
#endif /* CONFIG_TOUCHSCREEN_AD7879 */
#if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE)
#include <asm/bfin-lq035q1.h>
static struct bfin_lq035q1fb_disp_info bfin_lq035q1_data = {
.mode = LQ035_NORM | LQ035_RGB | LQ035_RL | LQ035_TB,
.ppi_mode = USE_RGB565_16_BIT_PPI,
.use_bl = 0, /* let something else control the LCD Blacklight */
.gpio_bl = GPIO_PF7,
};
static struct resource bfin_lq035q1_resources[] = {
{
.start = IRQ_PPI_ERROR,
.end = IRQ_PPI_ERROR,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device bfin_lq035q1_device = {
.name = "bfin-lq035q1",
.id = -1,
.num_resources = ARRAY_SIZE(bfin_lq035q1_resources),
.resource = bfin_lq035q1_resources,
.dev = {
.platform_data = &bfin_lq035q1_data,
},
};
#endif /* CONFIG_FB_BFIN_LQ035Q1 */
static struct spi_board_info bf538_spi_board_info[] __initdata = {
#if defined(CONFIG_MTD_M25P80) \
|| defined(CONFIG_MTD_M25P80_MODULE)
{
/* the modalias must be the same as spi device driver name */
.modalias = "m25p80", /* Name of spi_driver for this device */
.max_speed_hz = 25000000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0, /* Framework bus number */
.chip_select = 1, /* SPI_SSEL1*/
.platform_data = &bfin_spi_flash_data,
.controller_data = &spi_flash_chip_info,
.mode = SPI_MODE_3,
},
#endif /* CONFIG_MTD_M25P80 */
#if defined(CONFIG_TOUCHSCREEN_AD7879_SPI) || defined(CONFIG_TOUCHSCREEN_AD7879_SPI_MODULE)
{
.modalias = "ad7879",
.platform_data = &bfin_ad7879_ts_info,
.irq = IRQ_PF3,
.max_speed_hz = 5000000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 1,
.mode = SPI_CPHA | SPI_CPOL,
},
#endif /* CONFIG_TOUCHSCREEN_AD7879_SPI */
#if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE)
{
.modalias = "bfin-lq035q1-spi",
.max_speed_hz = 20000000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 2,
.mode = SPI_CPHA | SPI_CPOL,
},
#endif /* CONFIG_FB_BFIN_LQ035Q1 */
#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE)
{
.modalias = "spidev",
.max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 1,
},
#endif /* CONFIG_SPI_SPIDEV */
};
/* SPI (0) */
static struct resource bfin_spi0_resource[] = {
[0] = {
.start = SPI0_REGBASE,
.end = SPI0_REGBASE + 0xFF,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = CH_SPI0,
.end = CH_SPI0,
.flags = IORESOURCE_DMA,
},
[2] = {
.start = IRQ_SPI0,
.end = IRQ_SPI0,
.flags = IORESOURCE_IRQ,
}
};
/* SPI (1) */
static struct resource bfin_spi1_resource[] = {
[0] = {
.start = SPI1_REGBASE,
.end = SPI1_REGBASE + 0xFF,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = CH_SPI1,
.end = CH_SPI1,
.flags = IORESOURCE_DMA,
},
[2] = {
.start = IRQ_SPI1,
.end = IRQ_SPI1,
.flags = IORESOURCE_IRQ,
}
};
/* SPI (2) */
static struct resource bfin_spi2_resource[] = {
[0] = {
.start = SPI2_REGBASE,
.end = SPI2_REGBASE + 0xFF,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = CH_SPI2,
.end = CH_SPI2,
.flags = IORESOURCE_DMA,
},
[2] = {
.start = IRQ_SPI2,
.end = IRQ_SPI2,
.flags = IORESOURCE_IRQ,
}
};
/* SPI controller data */
static struct bfin5xx_spi_master bf538_spi_master_info0 = {
.num_chipselect = 8,
.enable_dma = 1, /* master has the ability to do dma transfer */
.pin_req = {P_SPI0_SCK, P_SPI0_MISO, P_SPI0_MOSI, 0},
};
static struct platform_device bf538_spi_master0 = {
.name = "bfin-spi",
.id = 0, /* Bus number */
.num_resources = ARRAY_SIZE(bfin_spi0_resource),
.resource = bfin_spi0_resource,
.dev = {
.platform_data = &bf538_spi_master_info0, /* Passed to driver */
},
};
static struct bfin5xx_spi_master bf538_spi_master_info1 = {
.num_chipselect = 2,
.enable_dma = 1, /* master has the ability to do dma transfer */
.pin_req = {P_SPI1_SCK, P_SPI1_MISO, P_SPI1_MOSI, 0},
};
static struct platform_device bf538_spi_master1 = {
.name = "bfin-spi",
.id = 1, /* Bus number */
.num_resources = ARRAY_SIZE(bfin_spi1_resource),
.resource = bfin_spi1_resource,
.dev = {
.platform_data = &bf538_spi_master_info1, /* Passed to driver */
},
};
static struct bfin5xx_spi_master bf538_spi_master_info2 = {
.num_chipselect = 2,
.enable_dma = 1, /* master has the ability to do dma transfer */
.pin_req = {P_SPI2_SCK, P_SPI2_MISO, P_SPI2_MOSI, 0},
};
static struct platform_device bf538_spi_master2 = {
.name = "bfin-spi",
.id = 2, /* Bus number */
.num_resources = ARRAY_SIZE(bfin_spi2_resource),
.resource = bfin_spi2_resource,
.dev = {
.platform_data = &bf538_spi_master_info2, /* Passed to driver */
},
};
#if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE)
static struct resource bfin_twi0_resource[] = {
[0] = {
.start = TWI0_REGBASE,
.end = TWI0_REGBASE + 0xFF,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = IRQ_TWI0,
.end = IRQ_TWI0,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device i2c_bfin_twi0_device = {
.name = "i2c-bfin-twi",
.id = 0,
.num_resources = ARRAY_SIZE(bfin_twi0_resource),
.resource = bfin_twi0_resource,
};
#if !defined(CONFIG_BF542) /* The BF542 only has 1 TWI */
static struct resource bfin_twi1_resource[] = {
[0] = {
.start = TWI1_REGBASE,
.end = TWI1_REGBASE + 0xFF,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = IRQ_TWI1,
.end = IRQ_TWI1,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device i2c_bfin_twi1_device = {
.name = "i2c-bfin-twi",
.id = 1,
.num_resources = ARRAY_SIZE(bfin_twi1_resource),
.resource = bfin_twi1_resource,
};
#endif /* CONFIG_BF542 */
#endif /* CONFIG_I2C_BLACKFIN_TWI */
#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE)
#include <linux/gpio_keys.h>
static struct gpio_keys_button bfin_gpio_keys_table[] = {
{BTN_0, GPIO_PC7, 1, "gpio-keys: BTN0"},
};
static struct gpio_keys_platform_data bfin_gpio_keys_data = {
.buttons = bfin_gpio_keys_table,
.nbuttons = ARRAY_SIZE(bfin_gpio_keys_table),
};
static struct platform_device bfin_device_gpiokeys = {
.name = "gpio-keys",
.dev = {
.platform_data = &bfin_gpio_keys_data,
},
};
#endif
static const unsigned int cclk_vlev_datasheet[] =
{
/*
* Internal VLEV BF538SBBC1533
****temporarily using these values until data sheet is updated
*/
VRPAIR(VLEV_100, 150000000),
VRPAIR(VLEV_100, 250000000),
VRPAIR(VLEV_110, 276000000),
VRPAIR(VLEV_115, 301000000),
VRPAIR(VLEV_120, 525000000),
VRPAIR(VLEV_125, 550000000),
VRPAIR(VLEV_130, 600000000),
};
static struct bfin_dpmc_platform_data bfin_dmpc_vreg_data = {
.tuple_tab = cclk_vlev_datasheet,
.tabsize = ARRAY_SIZE(cclk_vlev_datasheet),
.vr_settling_time = 25 /* us */,
};
static struct platform_device bfin_dpmc = {
.name = "bfin dpmc",
.dev = {
.platform_data = &bfin_dmpc_vreg_data,
},
};
#if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE)
static struct mtd_partition ezkit_partitions[] = {
{
.name = "bootloader(nor)",
.size = 0x40000,
.offset = 0,
}, {
.name = "linux kernel(nor)",
.size = 0x180000,
.offset = MTDPART_OFS_APPEND,
}, {
.name = "file system(nor)",
.size = MTDPART_SIZ_FULL,
.offset = MTDPART_OFS_APPEND,
}
};
static struct physmap_flash_data ezkit_flash_data = {
.width = 2,
.parts = ezkit_partitions,
.nr_parts = ARRAY_SIZE(ezkit_partitions),
};
static struct resource ezkit_flash_resource = {
.start = 0x20000000,
#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE)
.end = 0x202fffff,
#else
.end = 0x203fffff,
#endif
.flags = IORESOURCE_MEM,
};
static struct platform_device ezkit_flash_device = {
.name = "physmap-flash",
.id = 0,
.dev = {
.platform_data = &ezkit_flash_data,
},
.num_resources = 1,
.resource = &ezkit_flash_resource,
};
#endif
static struct platform_device *cm_bf538_devices[] __initdata = {
&bfin_dpmc,
#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE)
&rtc_device,
#endif
#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE)
#ifdef CONFIG_SERIAL_BFIN_UART0
&bfin_uart0_device,
#endif
#ifdef CONFIG_SERIAL_BFIN_UART1
&bfin_uart1_device,
#endif
#ifdef CONFIG_SERIAL_BFIN_UART2
&bfin_uart2_device,
#endif
#endif
#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
&bf538_spi_master0,
&bf538_spi_master1,
&bf538_spi_master2,
#endif
#if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE)
&i2c_bfin_twi0_device,
&i2c_bfin_twi1_device,
#endif
#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
#ifdef CONFIG_BFIN_SIR0
&bfin_sir0_device,
#endif
#ifdef CONFIG_BFIN_SIR1
&bfin_sir1_device,
#endif
#ifdef CONFIG_BFIN_SIR2
&bfin_sir2_device,
#endif
#endif
#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE)
#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
&bfin_sport0_uart_device,
#endif
#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART
&bfin_sport1_uart_device,
#endif
#ifdef CONFIG_SERIAL_BFIN_SPORT2_UART
&bfin_sport2_uart_device,
#endif
#ifdef CONFIG_SERIAL_BFIN_SPORT3_UART
&bfin_sport3_uart_device,
#endif
#endif
#if defined(CONFIG_CAN_BFIN) || defined(CONFIG_CAN_BFIN_MODULE)
&bfin_can_device,
#endif
#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE)
&smc91x_device,
#endif
#if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE)
&bfin_lq035q1_device,
#endif
#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE)
&bfin_device_gpiokeys,
#endif
#if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE)
&ezkit_flash_device,
#endif
};
static int __init ezkit_init(void)
{
printk(KERN_INFO "%s(): registering device resources\n", __func__);
platform_add_devices(cm_bf538_devices, ARRAY_SIZE(cm_bf538_devices));
#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
spi_register_board_info(bf538_spi_board_info,
ARRAY_SIZE(bf538_spi_board_info));
#endif
return 0;
}
arch_initcall(ezkit_init);
static struct platform_device *ezkit_early_devices[] __initdata = {
#if defined(CONFIG_SERIAL_BFIN_CONSOLE) || defined(CONFIG_EARLY_PRINTK)
#ifdef CONFIG_SERIAL_BFIN_UART0
&bfin_uart0_device,
#endif
#ifdef CONFIG_SERIAL_BFIN_UART1
&bfin_uart1_device,
#endif
#ifdef CONFIG_SERIAL_BFIN_UART2
&bfin_uart2_device,
#endif
#endif
#if defined(CONFIG_SERIAL_BFIN_SPORT_CONSOLE)
#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
&bfin_sport0_uart_device,
#endif
#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART
&bfin_sport1_uart_device,
#endif
#ifdef CONFIG_SERIAL_BFIN_SPORT2_UART
&bfin_sport2_uart_device,
#endif
#ifdef CONFIG_SERIAL_BFIN_SPORT3_UART
&bfin_sport3_uart_device,
#endif
#endif
};
void __init native_machine_early_platform_add_devices(void)
{
printk(KERN_INFO "register early platform devices\n");
early_platform_add_devices(ezkit_early_devices,
ARRAY_SIZE(ezkit_early_devices));
}
| gpl-2.0 |
vetzki/kernel_msm | net/ipv4/netfilter/nf_defrag_ipv4.c | 4391 | 3280 | /* (C) 1999-2001 Paul `Rusty' Russell
* (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/types.h>
#include <linux/ip.h>
#include <linux/netfilter.h>
#include <linux/module.h>
#include <linux/skbuff.h>
#include <net/route.h>
#include <net/ip.h>
#include <linux/netfilter_bridge.h>
#include <linux/netfilter_ipv4.h>
#include <net/netfilter/ipv4/nf_defrag_ipv4.h>
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
#include <net/netfilter/nf_conntrack.h>
#endif
#include <net/netfilter/nf_conntrack_zones.h>
/* Returns new sk_buff, or NULL */
static int nf_ct_ipv4_gather_frags(struct sk_buff *skb, u_int32_t user)
{
int err;
skb_orphan(skb);
local_bh_disable();
err = ip_defrag(skb, user);
local_bh_enable();
if (!err)
ip_send_check(ip_hdr(skb));
return err;
}
static enum ip_defrag_users nf_ct_defrag_user(unsigned int hooknum,
struct sk_buff *skb)
{
u16 zone = NF_CT_DEFAULT_ZONE;
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
if (skb->nfct)
zone = nf_ct_zone((struct nf_conn *)skb->nfct);
#endif
#ifdef CONFIG_BRIDGE_NETFILTER
if (skb->nf_bridge &&
skb->nf_bridge->mask & BRNF_NF_BRIDGE_PREROUTING)
return IP_DEFRAG_CONNTRACK_BRIDGE_IN + zone;
#endif
if (hooknum == NF_INET_PRE_ROUTING)
return IP_DEFRAG_CONNTRACK_IN + zone;
else
return IP_DEFRAG_CONNTRACK_OUT + zone;
}
static unsigned int ipv4_conntrack_defrag(unsigned int hooknum,
struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
int (*okfn)(struct sk_buff *))
{
struct sock *sk = skb->sk;
struct inet_sock *inet = inet_sk(skb->sk);
if (sk && (sk->sk_family == PF_INET) &&
inet->nodefrag)
return NF_ACCEPT;
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
#if !defined(CONFIG_NF_NAT) && !defined(CONFIG_NF_NAT_MODULE)
/* Previously seen (loopback)? Ignore. Do this before
fragment check. */
if (skb->nfct && !nf_ct_is_template((struct nf_conn *)skb->nfct))
return NF_ACCEPT;
#endif
#endif
/* Gather fragments. */
if (ip_is_fragment(ip_hdr(skb))) {
enum ip_defrag_users user = nf_ct_defrag_user(hooknum, skb);
if (nf_ct_ipv4_gather_frags(skb, user))
return NF_STOLEN;
}
return NF_ACCEPT;
}
static struct nf_hook_ops ipv4_defrag_ops[] = {
{
.hook = ipv4_conntrack_defrag,
.owner = THIS_MODULE,
.pf = PF_INET,
.hooknum = NF_INET_PRE_ROUTING,
.priority = NF_IP_PRI_CONNTRACK_DEFRAG,
},
{
.hook = ipv4_conntrack_defrag,
.owner = THIS_MODULE,
.pf = PF_INET,
.hooknum = NF_INET_LOCAL_OUT,
.priority = NF_IP_PRI_CONNTRACK_DEFRAG,
},
};
static int __init nf_defrag_init(void)
{
return nf_register_hooks(ipv4_defrag_ops, ARRAY_SIZE(ipv4_defrag_ops));
}
static void __exit nf_defrag_fini(void)
{
nf_unregister_hooks(ipv4_defrag_ops, ARRAY_SIZE(ipv4_defrag_ops));
}
void nf_defrag_ipv4_enable(void)
{
}
EXPORT_SYMBOL_GPL(nf_defrag_ipv4_enable);
module_init(nf_defrag_init);
module_exit(nf_defrag_fini);
MODULE_LICENSE("GPL");
| gpl-2.0 |
thanhphat11/Kernel-Caf-Msm8974_Ef63 | net/dccp/ackvec.c | 8487 | 12733 | /*
* net/dccp/ackvec.c
*
* An implementation of Ack Vectors for the DCCP protocol
* Copyright (c) 2007 University of Aberdeen, Scotland, UK
* Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; version 2 of the License;
*/
#include "dccp.h"
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/export.h>
static struct kmem_cache *dccp_ackvec_slab;
static struct kmem_cache *dccp_ackvec_record_slab;
struct dccp_ackvec *dccp_ackvec_alloc(const gfp_t priority)
{
struct dccp_ackvec *av = kmem_cache_zalloc(dccp_ackvec_slab, priority);
if (av != NULL) {
av->av_buf_head = av->av_buf_tail = DCCPAV_MAX_ACKVEC_LEN - 1;
INIT_LIST_HEAD(&av->av_records);
}
return av;
}
static void dccp_ackvec_purge_records(struct dccp_ackvec *av)
{
struct dccp_ackvec_record *cur, *next;
list_for_each_entry_safe(cur, next, &av->av_records, avr_node)
kmem_cache_free(dccp_ackvec_record_slab, cur);
INIT_LIST_HEAD(&av->av_records);
}
void dccp_ackvec_free(struct dccp_ackvec *av)
{
if (likely(av != NULL)) {
dccp_ackvec_purge_records(av);
kmem_cache_free(dccp_ackvec_slab, av);
}
}
/**
* dccp_ackvec_update_records - Record information about sent Ack Vectors
* @av: Ack Vector records to update
* @seqno: Sequence number of the packet carrying the Ack Vector just sent
* @nonce_sum: The sum of all buffer nonces contained in the Ack Vector
*/
int dccp_ackvec_update_records(struct dccp_ackvec *av, u64 seqno, u8 nonce_sum)
{
struct dccp_ackvec_record *avr;
avr = kmem_cache_alloc(dccp_ackvec_record_slab, GFP_ATOMIC);
if (avr == NULL)
return -ENOBUFS;
avr->avr_ack_seqno = seqno;
avr->avr_ack_ptr = av->av_buf_head;
avr->avr_ack_ackno = av->av_buf_ackno;
avr->avr_ack_nonce = nonce_sum;
avr->avr_ack_runlen = dccp_ackvec_runlen(av->av_buf + av->av_buf_head);
/*
* When the buffer overflows, we keep no more than one record. This is
* the simplest way of disambiguating sender-Acks dating from before the
* overflow from sender-Acks which refer to after the overflow; a simple
* solution is preferable here since we are handling an exception.
*/
if (av->av_overflow)
dccp_ackvec_purge_records(av);
/*
* Since GSS is incremented for each packet, the list is automatically
* arranged in descending order of @ack_seqno.
*/
list_add(&avr->avr_node, &av->av_records);
dccp_pr_debug("Added Vector, ack_seqno=%llu, ack_ackno=%llu (rl=%u)\n",
(unsigned long long)avr->avr_ack_seqno,
(unsigned long long)avr->avr_ack_ackno,
avr->avr_ack_runlen);
return 0;
}
static struct dccp_ackvec_record *dccp_ackvec_lookup(struct list_head *av_list,
const u64 ackno)
{
struct dccp_ackvec_record *avr;
/*
* Exploit that records are inserted in descending order of sequence
* number, start with the oldest record first. If @ackno is `before'
* the earliest ack_ackno, the packet is too old to be considered.
*/
list_for_each_entry_reverse(avr, av_list, avr_node) {
if (avr->avr_ack_seqno == ackno)
return avr;
if (before48(ackno, avr->avr_ack_seqno))
break;
}
return NULL;
}
/*
* Buffer index and length computation using modulo-buffersize arithmetic.
* Note that, as pointers move from right to left, head is `before' tail.
*/
static inline u16 __ackvec_idx_add(const u16 a, const u16 b)
{
return (a + b) % DCCPAV_MAX_ACKVEC_LEN;
}
static inline u16 __ackvec_idx_sub(const u16 a, const u16 b)
{
return __ackvec_idx_add(a, DCCPAV_MAX_ACKVEC_LEN - b);
}
u16 dccp_ackvec_buflen(const struct dccp_ackvec *av)
{
if (unlikely(av->av_overflow))
return DCCPAV_MAX_ACKVEC_LEN;
return __ackvec_idx_sub(av->av_buf_tail, av->av_buf_head);
}
/**
* dccp_ackvec_update_old - Update previous state as per RFC 4340, 11.4.1
* @av: non-empty buffer to update
* @distance: negative or zero distance of @seqno from buf_ackno downward
* @seqno: the (old) sequence number whose record is to be updated
* @state: state in which packet carrying @seqno was received
*/
static void dccp_ackvec_update_old(struct dccp_ackvec *av, s64 distance,
u64 seqno, enum dccp_ackvec_states state)
{
u16 ptr = av->av_buf_head;
BUG_ON(distance > 0);
if (unlikely(dccp_ackvec_is_empty(av)))
return;
do {
u8 runlen = dccp_ackvec_runlen(av->av_buf + ptr);
if (distance + runlen >= 0) {
/*
* Only update the state if packet has not been received
* yet. This is OK as per the second table in RFC 4340,
* 11.4.1; i.e. here we are using the following table:
* RECEIVED
* 0 1 3
* S +---+---+---+
* T 0 | 0 | 0 | 0 |
* O +---+---+---+
* R 1 | 1 | 1 | 1 |
* E +---+---+---+
* D 3 | 0 | 1 | 3 |
* +---+---+---+
* The "Not Received" state was set by reserve_seats().
*/
if (av->av_buf[ptr] == DCCPAV_NOT_RECEIVED)
av->av_buf[ptr] = state;
else
dccp_pr_debug("Not changing %llu state to %u\n",
(unsigned long long)seqno, state);
break;
}
distance += runlen + 1;
ptr = __ackvec_idx_add(ptr, 1);
} while (ptr != av->av_buf_tail);
}
/* Mark @num entries after buf_head as "Not yet received". */
static void dccp_ackvec_reserve_seats(struct dccp_ackvec *av, u16 num)
{
u16 start = __ackvec_idx_add(av->av_buf_head, 1),
len = DCCPAV_MAX_ACKVEC_LEN - start;
/* check for buffer wrap-around */
if (num > len) {
memset(av->av_buf + start, DCCPAV_NOT_RECEIVED, len);
start = 0;
num -= len;
}
if (num)
memset(av->av_buf + start, DCCPAV_NOT_RECEIVED, num);
}
/**
* dccp_ackvec_add_new - Record one or more new entries in Ack Vector buffer
* @av: container of buffer to update (can be empty or non-empty)
* @num_packets: number of packets to register (must be >= 1)
* @seqno: sequence number of the first packet in @num_packets
* @state: state in which packet carrying @seqno was received
*/
static void dccp_ackvec_add_new(struct dccp_ackvec *av, u32 num_packets,
u64 seqno, enum dccp_ackvec_states state)
{
u32 num_cells = num_packets;
if (num_packets > DCCPAV_BURST_THRESH) {
u32 lost_packets = num_packets - 1;
DCCP_WARN("Warning: large burst loss (%u)\n", lost_packets);
/*
* We received 1 packet and have a loss of size "num_packets-1"
* which we squeeze into num_cells-1 rather than reserving an
* entire byte for each lost packet.
* The reason is that the vector grows in O(burst_length); when
* it grows too large there will no room left for the payload.
* This is a trade-off: if a few packets out of the burst show
* up later, their state will not be changed; it is simply too
* costly to reshuffle/reallocate/copy the buffer each time.
* Should such problems persist, we will need to switch to a
* different underlying data structure.
*/
for (num_packets = num_cells = 1; lost_packets; ++num_cells) {
u8 len = min(lost_packets, (u32)DCCPAV_MAX_RUNLEN);
av->av_buf_head = __ackvec_idx_sub(av->av_buf_head, 1);
av->av_buf[av->av_buf_head] = DCCPAV_NOT_RECEIVED | len;
lost_packets -= len;
}
}
if (num_cells + dccp_ackvec_buflen(av) >= DCCPAV_MAX_ACKVEC_LEN) {
DCCP_CRIT("Ack Vector buffer overflow: dropping old entries\n");
av->av_overflow = true;
}
av->av_buf_head = __ackvec_idx_sub(av->av_buf_head, num_packets);
if (av->av_overflow)
av->av_buf_tail = av->av_buf_head;
av->av_buf[av->av_buf_head] = state;
av->av_buf_ackno = seqno;
if (num_packets > 1)
dccp_ackvec_reserve_seats(av, num_packets - 1);
}
/**
* dccp_ackvec_input - Register incoming packet in the buffer
*/
void dccp_ackvec_input(struct dccp_ackvec *av, struct sk_buff *skb)
{
u64 seqno = DCCP_SKB_CB(skb)->dccpd_seq;
enum dccp_ackvec_states state = DCCPAV_RECEIVED;
if (dccp_ackvec_is_empty(av)) {
dccp_ackvec_add_new(av, 1, seqno, state);
av->av_tail_ackno = seqno;
} else {
s64 num_packets = dccp_delta_seqno(av->av_buf_ackno, seqno);
u8 *current_head = av->av_buf + av->av_buf_head;
if (num_packets == 1 &&
dccp_ackvec_state(current_head) == state &&
dccp_ackvec_runlen(current_head) < DCCPAV_MAX_RUNLEN) {
*current_head += 1;
av->av_buf_ackno = seqno;
} else if (num_packets > 0) {
dccp_ackvec_add_new(av, num_packets, seqno, state);
} else {
dccp_ackvec_update_old(av, num_packets, seqno, state);
}
}
}
/**
* dccp_ackvec_clear_state - Perform house-keeping / garbage-collection
* This routine is called when the peer acknowledges the receipt of Ack Vectors
* up to and including @ackno. While based on on section A.3 of RFC 4340, here
* are additional precautions to prevent corrupted buffer state. In particular,
* we use tail_ackno to identify outdated records; it always marks the earliest
* packet of group (2) in 11.4.2.
*/
void dccp_ackvec_clear_state(struct dccp_ackvec *av, const u64 ackno)
{
struct dccp_ackvec_record *avr, *next;
u8 runlen_now, eff_runlen;
s64 delta;
avr = dccp_ackvec_lookup(&av->av_records, ackno);
if (avr == NULL)
return;
/*
* Deal with outdated acknowledgments: this arises when e.g. there are
* several old records and the acks from the peer come in slowly. In
* that case we may still have records that pre-date tail_ackno.
*/
delta = dccp_delta_seqno(av->av_tail_ackno, avr->avr_ack_ackno);
if (delta < 0)
goto free_records;
/*
* Deal with overlapping Ack Vectors: don't subtract more than the
* number of packets between tail_ackno and ack_ackno.
*/
eff_runlen = delta < avr->avr_ack_runlen ? delta : avr->avr_ack_runlen;
runlen_now = dccp_ackvec_runlen(av->av_buf + avr->avr_ack_ptr);
/*
* The run length of Ack Vector cells does not decrease over time. If
* the run length is the same as at the time the Ack Vector was sent, we
* free the ack_ptr cell. That cell can however not be freed if the run
* length has increased: in this case we need to move the tail pointer
* backwards (towards higher indices), to its next-oldest neighbour.
*/
if (runlen_now > eff_runlen) {
av->av_buf[avr->avr_ack_ptr] -= eff_runlen + 1;
av->av_buf_tail = __ackvec_idx_add(avr->avr_ack_ptr, 1);
/* This move may not have cleared the overflow flag. */
if (av->av_overflow)
av->av_overflow = (av->av_buf_head == av->av_buf_tail);
} else {
av->av_buf_tail = avr->avr_ack_ptr;
/*
* We have made sure that avr points to a valid cell within the
* buffer. This cell is either older than head, or equals head
* (empty buffer): in both cases we no longer have any overflow.
*/
av->av_overflow = 0;
}
/*
* The peer has acknowledged up to and including ack_ackno. Hence the
* first packet in group (2) of 11.4.2 is the successor of ack_ackno.
*/
av->av_tail_ackno = ADD48(avr->avr_ack_ackno, 1);
free_records:
list_for_each_entry_safe_from(avr, next, &av->av_records, avr_node) {
list_del(&avr->avr_node);
kmem_cache_free(dccp_ackvec_record_slab, avr);
}
}
/*
* Routines to keep track of Ack Vectors received in an skb
*/
int dccp_ackvec_parsed_add(struct list_head *head, u8 *vec, u8 len, u8 nonce)
{
struct dccp_ackvec_parsed *new = kmalloc(sizeof(*new), GFP_ATOMIC);
if (new == NULL)
return -ENOBUFS;
new->vec = vec;
new->len = len;
new->nonce = nonce;
list_add_tail(&new->node, head);
return 0;
}
EXPORT_SYMBOL_GPL(dccp_ackvec_parsed_add);
void dccp_ackvec_parsed_cleanup(struct list_head *parsed_chunks)
{
struct dccp_ackvec_parsed *cur, *next;
list_for_each_entry_safe(cur, next, parsed_chunks, node)
kfree(cur);
INIT_LIST_HEAD(parsed_chunks);
}
EXPORT_SYMBOL_GPL(dccp_ackvec_parsed_cleanup);
int __init dccp_ackvec_init(void)
{
dccp_ackvec_slab = kmem_cache_create("dccp_ackvec",
sizeof(struct dccp_ackvec), 0,
SLAB_HWCACHE_ALIGN, NULL);
if (dccp_ackvec_slab == NULL)
goto out_err;
dccp_ackvec_record_slab = kmem_cache_create("dccp_ackvec_record",
sizeof(struct dccp_ackvec_record),
0, SLAB_HWCACHE_ALIGN, NULL);
if (dccp_ackvec_record_slab == NULL)
goto out_destroy_slab;
return 0;
out_destroy_slab:
kmem_cache_destroy(dccp_ackvec_slab);
dccp_ackvec_slab = NULL;
out_err:
DCCP_CRIT("Unable to create Ack Vector slab cache");
return -ENOBUFS;
}
void dccp_ackvec_exit(void)
{
if (dccp_ackvec_slab != NULL) {
kmem_cache_destroy(dccp_ackvec_slab);
dccp_ackvec_slab = NULL;
}
if (dccp_ackvec_record_slab != NULL) {
kmem_cache_destroy(dccp_ackvec_record_slab);
dccp_ackvec_record_slab = NULL;
}
}
| gpl-2.0 |
MinimalOS/android_kernel_lge_mako | arch/arm/nwfpe/fpa11.c | 9767 | 3118 | /*
NetWinder Floating Point Emulator
(c) Rebel.COM, 1998,1999
(c) Philip Blundell, 2001
Direct questions, comments to Scott Bambrough <scottb@netwinder.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include "fpa11.h"
#include "fpopcode.h"
#include "fpmodule.h"
#include "fpmodule.inl"
#include <linux/compiler.h>
#include <linux/string.h>
/* Reset the FPA11 chip. Called to initialize and reset the emulator. */
static void resetFPA11(void)
{
int i;
FPA11 *fpa11 = GET_FPA11();
/* initialize the register type array */
for (i = 0; i <= 7; i++) {
fpa11->fType[i] = typeNone;
}
/* FPSR: set system id to FP_EMULATOR, set AC, clear all other bits */
fpa11->fpsr = FP_EMULATOR | BIT_AC;
}
int8 SetRoundingMode(const unsigned int opcode)
{
switch (opcode & MASK_ROUNDING_MODE) {
default:
case ROUND_TO_NEAREST:
return float_round_nearest_even;
case ROUND_TO_PLUS_INFINITY:
return float_round_up;
case ROUND_TO_MINUS_INFINITY:
return float_round_down;
case ROUND_TO_ZERO:
return float_round_to_zero;
}
}
int8 SetRoundingPrecision(const unsigned int opcode)
{
#ifdef CONFIG_FPE_NWFPE_XP
switch (opcode & MASK_ROUNDING_PRECISION) {
case ROUND_SINGLE:
return 32;
case ROUND_DOUBLE:
return 64;
case ROUND_EXTENDED:
return 80;
default:
return 80;
}
#endif
return 80;
}
void nwfpe_init_fpa(union fp_state *fp)
{
FPA11 *fpa11 = (FPA11 *)fp;
#ifdef NWFPE_DEBUG
printk("NWFPE: setting up state.\n");
#endif
memset(fpa11, 0, sizeof(FPA11));
resetFPA11();
fpa11->initflag = 1;
}
/* Emulate the instruction in the opcode. */
unsigned int EmulateAll(unsigned int opcode)
{
unsigned int code;
#ifdef NWFPE_DEBUG
printk("NWFPE: emulating opcode %08x\n", opcode);
#endif
code = opcode & 0x00000f00;
if (code == 0x00000100 || code == 0x00000200) {
/* For coprocessor 1 or 2 (FPA11) */
code = opcode & 0x0e000000;
if (code == 0x0e000000) {
if (opcode & 0x00000010) {
/* Emulate conversion opcodes. */
/* Emulate register transfer opcodes. */
/* Emulate comparison opcodes. */
return EmulateCPRT(opcode);
} else {
/* Emulate monadic arithmetic opcodes. */
/* Emulate dyadic arithmetic opcodes. */
return EmulateCPDO(opcode);
}
} else if (code == 0x0c000000) {
/* Emulate load/store opcodes. */
/* Emulate load/store multiple opcodes. */
return EmulateCPDT(opcode);
}
}
/* Invalid instruction detected. Return FALSE. */
return 0;
}
| gpl-2.0 |
linuxsky/linux-80211n-csitool | arch/powerpc/platforms/chrp/nvram.c | 10023 | 2253 | /*
* c 2001 PPC 64 Team, IBM Corp
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* /dev/nvram driver for PPC
*
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <asm/uaccess.h>
#include <asm/prom.h>
#include <asm/machdep.h>
#include <asm/rtas.h>
#include "chrp.h"
static unsigned int nvram_size;
static unsigned char nvram_buf[4];
static DEFINE_SPINLOCK(nvram_lock);
static unsigned char chrp_nvram_read(int addr)
{
unsigned int done;
unsigned long flags;
unsigned char ret;
if (addr >= nvram_size) {
printk(KERN_DEBUG "%s: read addr %d > nvram_size %u\n",
current->comm, addr, nvram_size);
return 0xff;
}
spin_lock_irqsave(&nvram_lock, flags);
if ((rtas_call(rtas_token("nvram-fetch"), 3, 2, &done, addr,
__pa(nvram_buf), 1) != 0) || 1 != done)
ret = 0xff;
else
ret = nvram_buf[0];
spin_unlock_irqrestore(&nvram_lock, flags);
return ret;
}
static void chrp_nvram_write(int addr, unsigned char val)
{
unsigned int done;
unsigned long flags;
if (addr >= nvram_size) {
printk(KERN_DEBUG "%s: write addr %d > nvram_size %u\n",
current->comm, addr, nvram_size);
return;
}
spin_lock_irqsave(&nvram_lock, flags);
nvram_buf[0] = val;
if ((rtas_call(rtas_token("nvram-store"), 3, 2, &done, addr,
__pa(nvram_buf), 1) != 0) || 1 != done)
printk(KERN_DEBUG "rtas IO error storing 0x%02x at %d", val, addr);
spin_unlock_irqrestore(&nvram_lock, flags);
}
void __init chrp_nvram_init(void)
{
struct device_node *nvram;
const unsigned int *nbytes_p;
unsigned int proplen;
nvram = of_find_node_by_type(NULL, "nvram");
if (nvram == NULL)
return;
nbytes_p = of_get_property(nvram, "#bytes", &proplen);
if (nbytes_p == NULL || proplen != sizeof(unsigned int)) {
of_node_put(nvram);
return;
}
nvram_size = *nbytes_p;
printk(KERN_INFO "CHRP nvram contains %u bytes\n", nvram_size);
of_node_put(nvram);
ppc_md.nvram_read_val = chrp_nvram_read;
ppc_md.nvram_write_val = chrp_nvram_write;
return;
}
| gpl-2.0 |
ligfx/dolphin | Externals/wxWidgets3/src/msw/datetimectrl.cpp | 40 | 5654 | ///////////////////////////////////////////////////////////////////////////////
// Name: src/msw/datetimectrl.cpp
// Purpose: Implementation of wxDateTimePickerCtrl for MSW.
// Author: Vadim Zeitlin
// Created: 2011-09-22 (extracted from src/msw/datectrl.cpp)
// Copyright: (c) 2005-2011 Vadim Zeitlin <vadim@wxwidgets.org>
// Licence: wxWindows licence
///////////////////////////////////////////////////////////////////////////////
// ============================================================================
// declarations
// ============================================================================
// ----------------------------------------------------------------------------
// headers
// ----------------------------------------------------------------------------
// for compilers that support precompilation, includes "wx.h".
#include "wx/wxprec.h"
#ifdef __BORLANDC__
#pragma hdrstop
#endif
#include "wx/datetimectrl.h"
#ifdef wxNEEDS_DATETIMEPICKCTRL
#ifndef WX_PRECOMP
#include "wx/msw/wrapwin.h"
#include "wx/msw/wrapcctl.h" // include <commctrl.h> "properly"
#include "wx/msw/private.h"
#include "wx/dcclient.h"
#endif // WX_PRECOMP
#include "wx/msw/private/datecontrols.h"
// apparently some versions of mingw define these macros erroneously
#ifndef DateTime_GetSystemtime
#define DateTime_GetSystemtime DateTime_GetSystemTime
#endif
#ifndef DateTime_SetSystemtime
#define DateTime_SetSystemtime DateTime_SetSystemTime
#endif
#ifndef DTM_GETIDEALSIZE
#define DTM_GETIDEALSIZE 0x100f
#endif
// ============================================================================
// wxDateTimePickerCtrl implementation
// ============================================================================
bool
wxDateTimePickerCtrl::MSWCreateDateTimePicker(wxWindow *parent,
wxWindowID id,
const wxDateTime& dt,
const wxPoint& pos,
const wxSize& size,
long style,
const wxValidator& validator,
const wxString& name)
{
if ( !wxMSWDateControls::CheckInitialization() )
return false;
// initialize the base class
if ( !CreateControl(parent, id, pos, size, style, validator, name) )
return false;
// create the native control
if ( !MSWCreateControl(DATETIMEPICK_CLASS, wxString(), pos, size) )
return false;
if ( dt.IsValid() || MSWAllowsNone() )
SetValue(dt);
else
SetValue(wxDateTime::Now());
return true;
}
void wxDateTimePickerCtrl::SetValue(const wxDateTime& dt)
{
wxCHECK_RET( dt.IsValid() || MSWAllowsNone(),
wxT("this control requires a valid date") );
SYSTEMTIME st;
if ( dt.IsValid() )
dt.GetAsMSWSysTime(&st);
if ( !DateTime_SetSystemtime(GetHwnd(),
dt.IsValid() ? GDT_VALID : GDT_NONE,
&st) )
{
// The only expected failure is when the date is out of range but we
// already checked for this above.
wxFAIL_MSG( wxT("Setting the calendar date unexpectedly failed.") );
// In any case, skip updating m_date below.
return;
}
m_date = dt;
}
wxDateTime wxDateTimePickerCtrl::GetValue() const
{
return m_date;
}
wxSize wxDateTimePickerCtrl::DoGetBestSize() const
{
// Since Vista, the control can compute its best size itself, just ask it.
wxSize size;
if ( wxGetWinVersion() >= wxWinVersion_Vista )
{
SIZE idealSize;
::SendMessage(m_hWnd, DTM_GETIDEALSIZE, 0, (LPARAM)&idealSize);
size = wxSize(idealSize.cx, idealSize.cy);
}
else // Windows XP
{
wxClientDC dc(const_cast<wxDateTimePickerCtrl *>(this));
// Use the same native format as the underlying native control.
#if wxUSE_INTL
wxString s = wxDateTime::Now().Format(wxLocale::GetOSInfo(MSWGetFormat()));
#else // !wxUSE_INTL
wxString s("XXX-YYY-ZZZZ");
#endif // wxUSE_INTL/!wxUSE_INTL
// the best size for the control is bigger than just the string
// representation of the current value because the control must accommodate
// any date and while the widths of all digits are usually about the same,
// the width of the month string varies a lot, so try to account for it
s += wxS("W");
size = dc.GetTextExtent(s);
// account for the drop-down arrow or spin arrows
size.x += wxSystemSettings::GetMetric(wxSYS_HSCROLL_ARROW_X);
}
// We need to account for the checkbox, if we have one, ourselves as
// DTM_GETIDEALSIZE doesn't seem to take it into account, at least under
// Windows 7.
if ( MSWAllowsNone() )
size.x += 3*GetCharWidth();
// In any case, adjust the height to be the same as for the text controls.
size.y = EDIT_HEIGHT_FROM_CHAR_HEIGHT(size.y);
return size;
}
bool
wxDateTimePickerCtrl::MSWOnNotify(int idCtrl, WXLPARAM lParam, WXLPARAM *result)
{
NMHDR* hdr = (NMHDR *)lParam;
switch ( hdr->code )
{
case DTN_DATETIMECHANGE:
if ( MSWOnDateTimeChange(*(NMDATETIMECHANGE*)(hdr)) )
{
*result = 0;
return true;
}
break;
}
return wxDateTimePickerCtrlBase::MSWOnNotify(idCtrl, lParam, result);
}
#endif // wxNEEDS_DATETIMEPICKCTRL
| gpl-2.0 |
HaveF/papercrop | luabind-0.9/src/inheritance.cpp | 40 | 6325 | // Copyright Daniel Wallin 2009. Use, modification and distribution is
// subject to the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
#define LUABIND_BUILDING
#include <limits>
#include <map>
#include <vector>
#include <queue>
#include <boost/dynamic_bitset.hpp>
#include <boost/foreach.hpp>
#include <boost/tuple/tuple.hpp>
#include <boost/tuple/tuple_comparison.hpp>
#include <luabind/typeid.hpp>
#include <luabind/detail/inheritance.hpp>
namespace luabind { namespace detail {
class_id const class_id_map::local_id_base =
std::numeric_limits<class_id>::max() / 2;
namespace
{
struct edge
{
edge(class_id target, cast_function cast)
: target(target)
, cast(cast)
{}
class_id target;
cast_function cast;
};
bool operator<(edge const& x, edge const& y)
{
return x.target < y.target;
}
struct vertex
{
vertex(class_id id)
: id(id)
{}
class_id id;
std::vector<edge> edges;
};
typedef std::pair<std::ptrdiff_t, int> cache_entry;
class cache
{
public:
static std::ptrdiff_t const unknown;
static std::ptrdiff_t const invalid;
cache_entry get(
class_id src, class_id target, class_id dynamic_id
, std::ptrdiff_t object_offset) const;
void put(
class_id src, class_id target, class_id dynamic_id
, std::ptrdiff_t object_offset
, std::size_t distance, std::ptrdiff_t offset);
void invalidate();
private:
typedef boost::tuple<
class_id, class_id, class_id, std::ptrdiff_t> key_type;
typedef std::map<key_type, cache_entry> map_type;
map_type m_cache;
};
std::ptrdiff_t const cache::unknown =
std::numeric_limits<std::ptrdiff_t>::max();
std::ptrdiff_t const cache::invalid = cache::unknown - 1;
cache_entry cache::get(
class_id src, class_id target, class_id dynamic_id
, std::ptrdiff_t object_offset) const
{
map_type::const_iterator i = m_cache.find(
key_type(src, target, dynamic_id, object_offset));
return i != m_cache.end() ? i->second : cache_entry(unknown, -1);
}
void cache::put(
class_id src, class_id target, class_id dynamic_id
, std::ptrdiff_t object_offset, std::size_t distance, std::ptrdiff_t offset)
{
m_cache.insert(std::make_pair(
key_type(src, target, dynamic_id, object_offset)
, cache_entry(offset, distance)
));
}
void cache::invalidate()
{
m_cache.clear();
}
} // namespace unnamed
class cast_graph::impl
{
public:
std::pair<void*, int> cast(
void* p, class_id src, class_id target
, class_id dynamic_id, void const* dynamic_ptr) const;
void insert(class_id src, class_id target, cast_function cast);
private:
std::vector<vertex> m_vertices;
mutable cache m_cache;
};
namespace
{
struct queue_entry
{
queue_entry(void* p, class_id vertex_id, int distance)
: p(p)
, vertex_id(vertex_id)
, distance(distance)
{}
void* p;
class_id vertex_id;
int distance;
};
} // namespace unnamed
std::pair<void*, int> cast_graph::impl::cast(
void* const p, class_id src, class_id target
, class_id dynamic_id, void const* dynamic_ptr) const
{
if (src == target)
return std::make_pair(p, 0);
if (src >= m_vertices.size() || target >= m_vertices.size())
return std::pair<void*, int>((void*)0, -1);
std::ptrdiff_t const object_offset =
(char const*)dynamic_ptr - (char const*)p;
cache_entry cached = m_cache.get(src, target, dynamic_id, object_offset);
if (cached.first != cache::unknown)
{
if (cached.first == cache::invalid)
return std::pair<void*, int>((void*)0, -1);
return std::make_pair((char*)p + cached.first, cached.second);
}
std::queue<queue_entry> q;
q.push(queue_entry(p, src, 0));
boost::dynamic_bitset<> visited(m_vertices.size());
while (!q.empty())
{
queue_entry const qe = q.front();
q.pop();
visited[qe.vertex_id] = true;
vertex const& v = m_vertices[qe.vertex_id];
if (v.id == target)
{
m_cache.put(
src, target, dynamic_id, object_offset
, qe.distance, (char*)qe.p - (char*)p
);
return std::make_pair(qe.p, qe.distance);
}
BOOST_FOREACH(edge const& e, v.edges)
{
if (visited[e.target])
continue;
if (void* casted = e.cast(qe.p))
q.push(queue_entry(casted, e.target, qe.distance + 1));
}
}
m_cache.put(src, target, dynamic_id, object_offset, cache::invalid, -1);
return std::pair<void*, int>((void*)0, -1);
}
void cast_graph::impl::insert(
class_id src, class_id target, cast_function cast)
{
class_id const max_id = std::max(src, target);
if (max_id >= m_vertices.size())
{
m_vertices.reserve(max_id + 1);
for (class_id i = m_vertices.size(); i < max_id + 1; ++i)
m_vertices.push_back(vertex(i));
}
std::vector<edge>& edges = m_vertices[src].edges;
std::vector<edge>::iterator i = std::lower_bound(
edges.begin(), edges.end(), edge(target, 0)
);
if (i == edges.end() || i->target != target)
{
edges.insert(i, edge(target, cast));
m_cache.invalidate();
}
}
std::pair<void*, int> cast_graph::cast(
void* p, class_id src, class_id target
, class_id dynamic_id, void const* dynamic_ptr) const
{
return m_impl->cast(p, src, target, dynamic_id, dynamic_ptr);
}
void cast_graph::insert(class_id src, class_id target, cast_function cast)
{
m_impl->insert(src, target, cast);
}
cast_graph::cast_graph()
: m_impl(new impl)
{}
cast_graph::~cast_graph()
{}
LUABIND_API class_id allocate_class_id(type_id const& cls)
{
typedef std::map<type_id, class_id> map_type;
static map_type registered;
static class_id id = 0;
std::pair<map_type::iterator, bool> inserted = registered.insert(
std::make_pair(cls, id));
if (inserted.second)
++id;
return inserted.first->second;
}
}} // namespace luabind::detail
| gpl-2.0 |
yiancar/qmk_firmware | keyboards/yiancardesigns/seigaiha/matrix.c | 40 | 3723 | /*
Copyright 2012-2020 Jun Wako, Jack Humbert, Yiancar-Designs
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <stdint.h>
#include <stdbool.h>
#include "wait.h"
#include "quantum.h"
#include "i2c_master.h"
static const pin_t row_pins[MATRIX_ROWS] = MATRIX_ROW_PINS;
static const pin_t col_pins[MATRIX_COLS] = MATRIX_COL_PINS;
static void unselect_rows(void) {
for(uint8_t x = 0; x < MATRIX_ROWS; x++) {
setPinInputHigh(row_pins[x]);
}
}
static void select_row(uint8_t row) {
setPinOutput(row_pins[row]);
writePinLow(row_pins[row]);
}
static void unselect_row(uint8_t row) {
setPinInputHigh(row_pins[row]);
}
static void init_pins(void) {
unselect_rows();
// Set I/O
uint8_t send_data = 0x1F;
i2c_writeReg((PORT_EXPANDER_ADDRESS << 1), 0x00, &send_data, 1, 20);
// Set Pull-up
i2c_writeReg((PORT_EXPANDER_ADDRESS << 1), 0x06, &send_data, 1, 20);
for (uint8_t x = 0; x < MATRIX_COLS; x++) {
if ( x < 10 ) {
setPinInputHigh(col_pins[x]);
}
}
}
void matrix_init_custom(void) {
// TODO: initialize hardware here
// Initialize I2C
i2c_init();
// initialize key pins
init_pins();
wait_ms(50);
}
static bool read_cols_on_row(matrix_row_t current_matrix[], uint8_t current_row) {
// Store last value of row prior to reading
matrix_row_t last_row_value = current_matrix[current_row];
// Clear data in matrix row
current_matrix[current_row] = 0;
// Select row and wait for row selecton to stabilize
select_row(current_row);
matrix_io_delay();
uint8_t port_expander_col_buffer;
i2c_readReg((PORT_EXPANDER_ADDRESS << 1), 0x09, &port_expander_col_buffer, 1, 20);
// For each col...
for(uint8_t col_index = 0; col_index < MATRIX_COLS; col_index++) {
uint8_t pin_state;
// Select the col pin to read (active low)
switch (col_index) {
case 10 :
pin_state = port_expander_col_buffer & (1 << 0);
break;
case 11 :
pin_state = port_expander_col_buffer & (1 << 1);
break;
case 12 :
pin_state = port_expander_col_buffer & (1 << 2);
break;
case 13 :
pin_state = port_expander_col_buffer & (1 << 3);
break;
case 14 :
pin_state = port_expander_col_buffer & (1 << 4);
break;
default :
pin_state = readPin(col_pins[col_index]);
}
// Populate the matrix row with the state of the col pin
current_matrix[current_row] |= pin_state ? 0 : (MATRIX_ROW_SHIFTER << col_index);
}
// Unselect row
unselect_row(current_row);
return (last_row_value != current_matrix[current_row]);
}
bool matrix_scan_custom(matrix_row_t current_matrix[]) {
bool matrix_has_changed = false;
// Set row, read cols
for (uint8_t current_row = 0; current_row < MATRIX_ROWS; current_row++) {
matrix_has_changed |= read_cols_on_row(current_matrix, current_row);
}
return matrix_has_changed;
}
| gpl-2.0 |
intdes/linux-MPC8314 | drivers/platform/x86/amilo-rfkill.c | 40 | 4217 | /*
* Support for rfkill on some Fujitsu-Siemens Amilo laptops.
* Copyright 2011 Ben Hutchings.
*
* Based in part on the fsam7440 driver, which is:
* Copyright 2005 Alejandro Vidal Mata & Javier Vidal Mata.
* and on the fsaa1655g driver, which is:
* Copyright 2006 Martin Večeřa.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <linux/module.h>
#include <linux/dmi.h>
#include <linux/i8042.h>
#include <linux/io.h>
#include <linux/moduleparam.h>
#include <linux/platform_device.h>
#include <linux/rfkill.h>
/*
* These values were obtained from disassembling and debugging the
* PM.exe program installed in the Fujitsu-Siemens AMILO A1655G
*/
#define A1655_WIFI_COMMAND 0x10C5
#define A1655_WIFI_ON 0x25
#define A1655_WIFI_OFF 0x45
static int amilo_a1655_rfkill_set_block(void *data, bool blocked)
{
u8 param = blocked ? A1655_WIFI_OFF : A1655_WIFI_ON;
int rc;
i8042_lock_chip();
rc = i8042_command(¶m, A1655_WIFI_COMMAND);
i8042_unlock_chip();
return rc;
}
static const struct rfkill_ops amilo_a1655_rfkill_ops = {
.set_block = amilo_a1655_rfkill_set_block
};
/*
* These values were obtained from disassembling the PM.exe program
* installed in the Fujitsu-Siemens AMILO M 7440
*/
#define M7440_PORT1 0x118f
#define M7440_PORT2 0x118e
#define M7440_RADIO_ON1 0x12
#define M7440_RADIO_ON2 0x80
#define M7440_RADIO_OFF1 0x10
#define M7440_RADIO_OFF2 0x00
static int amilo_m7440_rfkill_set_block(void *data, bool blocked)
{
u8 val1 = blocked ? M7440_RADIO_OFF1 : M7440_RADIO_ON1;
u8 val2 = blocked ? M7440_RADIO_OFF2 : M7440_RADIO_ON2;
outb(val1, M7440_PORT1);
outb(val2, M7440_PORT2);
/* Check whether the state has changed correctly */
if (inb(M7440_PORT1) != val1 || inb(M7440_PORT2) != val2)
return -EIO;
return 0;
}
static const struct rfkill_ops amilo_m7440_rfkill_ops = {
.set_block = amilo_m7440_rfkill_set_block
};
static const struct dmi_system_id __devinitdata amilo_rfkill_id_table[] = {
{
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
DMI_MATCH(DMI_BOARD_NAME, "AMILO A1655"),
},
.driver_data = (void *)&amilo_a1655_rfkill_ops
},
{
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
DMI_MATCH(DMI_BOARD_NAME, "AMILO M7440"),
},
.driver_data = (void *)&amilo_m7440_rfkill_ops
},
{}
};
static struct platform_device *amilo_rfkill_pdev;
static struct rfkill *amilo_rfkill_dev;
static int __devinit amilo_rfkill_probe(struct platform_device *device)
{
const struct dmi_system_id *system_id =
dmi_first_match(amilo_rfkill_id_table);
int rc;
amilo_rfkill_dev = rfkill_alloc(KBUILD_MODNAME, &device->dev,
RFKILL_TYPE_WLAN,
system_id->driver_data, NULL);
if (!amilo_rfkill_dev)
return -ENOMEM;
rc = rfkill_register(amilo_rfkill_dev);
if (rc)
goto fail;
return 0;
fail:
rfkill_destroy(amilo_rfkill_dev);
return rc;
}
static int amilo_rfkill_remove(struct platform_device *device)
{
rfkill_unregister(amilo_rfkill_dev);
rfkill_destroy(amilo_rfkill_dev);
return 0;
}
static struct platform_driver amilo_rfkill_driver = {
.driver = {
.name = KBUILD_MODNAME,
.owner = THIS_MODULE,
},
.probe = amilo_rfkill_probe,
.remove = amilo_rfkill_remove,
};
static int __init amilo_rfkill_init(void)
{
int rc;
if (dmi_first_match(amilo_rfkill_id_table) == NULL)
return -ENODEV;
rc = platform_driver_register(&amilo_rfkill_driver);
if (rc)
return rc;
amilo_rfkill_pdev = platform_device_register_simple(KBUILD_MODNAME, -1,
NULL, 0);
if (IS_ERR(amilo_rfkill_pdev)) {
rc = PTR_ERR(amilo_rfkill_pdev);
goto fail;
}
return 0;
fail:
platform_driver_unregister(&amilo_rfkill_driver);
return rc;
}
static void __exit amilo_rfkill_exit(void)
{
platform_device_unregister(amilo_rfkill_pdev);
platform_driver_unregister(&amilo_rfkill_driver);
}
MODULE_AUTHOR("Ben Hutchings <ben@decadent.org.uk>");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(dmi, amilo_rfkill_id_table);
module_init(amilo_rfkill_init);
module_exit(amilo_rfkill_exit);
| gpl-2.0 |
djbw/linux | sound/aoa/core/gpio-feature.c | 296 | 10928 | /*
* Apple Onboard Audio feature call GPIO control
*
* Copyright 2006 Johannes Berg <johannes@sipsolutions.net>
*
* GPL v2, can be found in COPYING.
*
* This file contains the GPIO control routines for
* direct (through feature calls) access to the GPIO
* registers.
*/
#include <linux/of_irq.h>
#include <linux/interrupt.h>
#include <asm/pmac_feature.h>
#include "../aoa.h"
/* TODO: these are lots of global variables
* that aren't used on most machines...
* Move them into a dynamically allocated
* structure and use that.
*/
/* these are the GPIO numbers (register addresses as offsets into
* the GPIO space) */
static int headphone_mute_gpio;
static int master_mute_gpio;
static int amp_mute_gpio;
static int lineout_mute_gpio;
static int hw_reset_gpio;
static int lineout_detect_gpio;
static int headphone_detect_gpio;
static int linein_detect_gpio;
/* see the SWITCH_GPIO macro */
static int headphone_mute_gpio_activestate;
static int master_mute_gpio_activestate;
static int amp_mute_gpio_activestate;
static int lineout_mute_gpio_activestate;
static int hw_reset_gpio_activestate;
static int lineout_detect_gpio_activestate;
static int headphone_detect_gpio_activestate;
static int linein_detect_gpio_activestate;
/* node pointers that we save when getting the GPIO number
* to get the interrupt later */
static struct device_node *lineout_detect_node;
static struct device_node *linein_detect_node;
static struct device_node *headphone_detect_node;
static int lineout_detect_irq;
static int linein_detect_irq;
static int headphone_detect_irq;
static struct device_node *get_gpio(char *name,
char *altname,
int *gpioptr,
int *gpioactiveptr)
{
struct device_node *np, *gpio;
const u32 *reg;
const char *audio_gpio;
*gpioptr = -1;
/* check if we can get it the easy way ... */
np = of_find_node_by_name(NULL, name);
if (!np) {
/* some machines have only gpioX/extint-gpioX nodes,
* and an audio-gpio property saying what it is ...
* So what we have to do is enumerate all children
* of the gpio node and check them all. */
gpio = of_find_node_by_name(NULL, "gpio");
if (!gpio)
return NULL;
while ((np = of_get_next_child(gpio, np))) {
audio_gpio = of_get_property(np, "audio-gpio", NULL);
if (!audio_gpio)
continue;
if (strcmp(audio_gpio, name) == 0)
break;
if (altname && (strcmp(audio_gpio, altname) == 0))
break;
}
/* still not found, assume not there */
if (!np)
return NULL;
}
reg = of_get_property(np, "reg", NULL);
if (!reg)
return NULL;
*gpioptr = *reg;
/* this is a hack, usually the GPIOs 'reg' property
* should have the offset based from the GPIO space
* which is at 0x50, but apparently not always... */
if (*gpioptr < 0x50)
*gpioptr += 0x50;
reg = of_get_property(np, "audio-gpio-active-state", NULL);
if (!reg)
/* Apple seems to default to 1, but
* that doesn't seem right at least on most
* machines. So until proven that the opposite
* is necessary, we default to 0
* (which, incidentally, snd-powermac also does...) */
*gpioactiveptr = 0;
else
*gpioactiveptr = *reg;
return np;
}
static void get_irq(struct device_node * np, int *irqptr)
{
if (np)
*irqptr = irq_of_parse_and_map(np, 0);
else
*irqptr = 0;
}
/* 0x4 is outenable, 0x1 is out, thus 4 or 5 */
#define SWITCH_GPIO(name, v, on) \
(((v)&~1) | ((on)? \
(name##_gpio_activestate==0?4:5): \
(name##_gpio_activestate==0?5:4)))
#define FTR_GPIO(name, bit) \
static void ftr_gpio_set_##name(struct gpio_runtime *rt, int on)\
{ \
int v; \
\
if (unlikely(!rt)) return; \
\
if (name##_mute_gpio < 0) \
return; \
\
v = pmac_call_feature(PMAC_FTR_READ_GPIO, NULL, \
name##_mute_gpio, \
0); \
\
/* muted = !on... */ \
v = SWITCH_GPIO(name##_mute, v, !on); \
\
pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, \
name##_mute_gpio, v); \
\
rt->implementation_private &= ~(1<<bit); \
rt->implementation_private |= (!!on << bit); \
} \
static int ftr_gpio_get_##name(struct gpio_runtime *rt) \
{ \
if (unlikely(!rt)) return 0; \
return (rt->implementation_private>>bit)&1; \
}
FTR_GPIO(headphone, 0);
FTR_GPIO(amp, 1);
FTR_GPIO(lineout, 2);
FTR_GPIO(master, 3);
static void ftr_gpio_set_hw_reset(struct gpio_runtime *rt, int on)
{
int v;
if (unlikely(!rt)) return;
if (hw_reset_gpio < 0)
return;
v = pmac_call_feature(PMAC_FTR_READ_GPIO, NULL,
hw_reset_gpio, 0);
v = SWITCH_GPIO(hw_reset, v, on);
pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL,
hw_reset_gpio, v);
}
static struct gpio_methods methods;
static void ftr_gpio_all_amps_off(struct gpio_runtime *rt)
{
int saved;
if (unlikely(!rt)) return;
saved = rt->implementation_private;
ftr_gpio_set_headphone(rt, 0);
ftr_gpio_set_amp(rt, 0);
ftr_gpio_set_lineout(rt, 0);
if (methods.set_master)
ftr_gpio_set_master(rt, 0);
rt->implementation_private = saved;
}
static void ftr_gpio_all_amps_restore(struct gpio_runtime *rt)
{
int s;
if (unlikely(!rt)) return;
s = rt->implementation_private;
ftr_gpio_set_headphone(rt, (s>>0)&1);
ftr_gpio_set_amp(rt, (s>>1)&1);
ftr_gpio_set_lineout(rt, (s>>2)&1);
if (methods.set_master)
ftr_gpio_set_master(rt, (s>>3)&1);
}
static void ftr_handle_notify(struct work_struct *work)
{
struct gpio_notification *notif =
container_of(work, struct gpio_notification, work.work);
mutex_lock(¬if->mutex);
if (notif->notify)
notif->notify(notif->data);
mutex_unlock(¬if->mutex);
}
static void gpio_enable_dual_edge(int gpio)
{
int v;
if (gpio == -1)
return;
v = pmac_call_feature(PMAC_FTR_READ_GPIO, NULL, gpio, 0);
v |= 0x80; /* enable dual edge */
pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, gpio, v);
}
static void ftr_gpio_init(struct gpio_runtime *rt)
{
get_gpio("headphone-mute", NULL,
&headphone_mute_gpio,
&headphone_mute_gpio_activestate);
get_gpio("amp-mute", NULL,
&_mute_gpio,
&_mute_gpio_activestate);
get_gpio("lineout-mute", NULL,
&lineout_mute_gpio,
&lineout_mute_gpio_activestate);
get_gpio("hw-reset", "audio-hw-reset",
&hw_reset_gpio,
&hw_reset_gpio_activestate);
if (get_gpio("master-mute", NULL,
&master_mute_gpio,
&master_mute_gpio_activestate)) {
methods.set_master = ftr_gpio_set_master;
methods.get_master = ftr_gpio_get_master;
}
headphone_detect_node = get_gpio("headphone-detect", NULL,
&headphone_detect_gpio,
&headphone_detect_gpio_activestate);
/* go Apple, and thanks for giving these different names
* across the board... */
lineout_detect_node = get_gpio("lineout-detect", "line-output-detect",
&lineout_detect_gpio,
&lineout_detect_gpio_activestate);
linein_detect_node = get_gpio("linein-detect", "line-input-detect",
&linein_detect_gpio,
&linein_detect_gpio_activestate);
gpio_enable_dual_edge(headphone_detect_gpio);
gpio_enable_dual_edge(lineout_detect_gpio);
gpio_enable_dual_edge(linein_detect_gpio);
get_irq(headphone_detect_node, &headphone_detect_irq);
get_irq(lineout_detect_node, &lineout_detect_irq);
get_irq(linein_detect_node, &linein_detect_irq);
ftr_gpio_all_amps_off(rt);
rt->implementation_private = 0;
INIT_DELAYED_WORK(&rt->headphone_notify.work, ftr_handle_notify);
INIT_DELAYED_WORK(&rt->line_in_notify.work, ftr_handle_notify);
INIT_DELAYED_WORK(&rt->line_out_notify.work, ftr_handle_notify);
mutex_init(&rt->headphone_notify.mutex);
mutex_init(&rt->line_in_notify.mutex);
mutex_init(&rt->line_out_notify.mutex);
}
static void ftr_gpio_exit(struct gpio_runtime *rt)
{
ftr_gpio_all_amps_off(rt);
rt->implementation_private = 0;
if (rt->headphone_notify.notify)
free_irq(headphone_detect_irq, &rt->headphone_notify);
if (rt->line_in_notify.gpio_private)
free_irq(linein_detect_irq, &rt->line_in_notify);
if (rt->line_out_notify.gpio_private)
free_irq(lineout_detect_irq, &rt->line_out_notify);
cancel_delayed_work_sync(&rt->headphone_notify.work);
cancel_delayed_work_sync(&rt->line_in_notify.work);
cancel_delayed_work_sync(&rt->line_out_notify.work);
mutex_destroy(&rt->headphone_notify.mutex);
mutex_destroy(&rt->line_in_notify.mutex);
mutex_destroy(&rt->line_out_notify.mutex);
}
static irqreturn_t ftr_handle_notify_irq(int xx, void *data)
{
struct gpio_notification *notif = data;
schedule_delayed_work(¬if->work, 0);
return IRQ_HANDLED;
}
static int ftr_set_notify(struct gpio_runtime *rt,
enum notify_type type,
notify_func_t notify,
void *data)
{
struct gpio_notification *notif;
notify_func_t old;
int irq;
char *name;
int err = -EBUSY;
switch (type) {
case AOA_NOTIFY_HEADPHONE:
notif = &rt->headphone_notify;
name = "headphone-detect";
irq = headphone_detect_irq;
break;
case AOA_NOTIFY_LINE_IN:
notif = &rt->line_in_notify;
name = "linein-detect";
irq = linein_detect_irq;
break;
case AOA_NOTIFY_LINE_OUT:
notif = &rt->line_out_notify;
name = "lineout-detect";
irq = lineout_detect_irq;
break;
default:
return -EINVAL;
}
if (!irq)
return -ENODEV;
mutex_lock(¬if->mutex);
old = notif->notify;
if (!old && !notify) {
err = 0;
goto out_unlock;
}
if (old && notify) {
if (old == notify && notif->data == data)
err = 0;
goto out_unlock;
}
if (old && !notify)
free_irq(irq, notif);
if (!old && notify) {
err = request_irq(irq, ftr_handle_notify_irq, 0, name, notif);
if (err)
goto out_unlock;
}
notif->notify = notify;
notif->data = data;
err = 0;
out_unlock:
mutex_unlock(¬if->mutex);
return err;
}
static int ftr_get_detect(struct gpio_runtime *rt,
enum notify_type type)
{
int gpio, ret, active;
switch (type) {
case AOA_NOTIFY_HEADPHONE:
gpio = headphone_detect_gpio;
active = headphone_detect_gpio_activestate;
break;
case AOA_NOTIFY_LINE_IN:
gpio = linein_detect_gpio;
active = linein_detect_gpio_activestate;
break;
case AOA_NOTIFY_LINE_OUT:
gpio = lineout_detect_gpio;
active = lineout_detect_gpio_activestate;
break;
default:
return -EINVAL;
}
if (gpio == -1)
return -ENODEV;
ret = pmac_call_feature(PMAC_FTR_READ_GPIO, NULL, gpio, 0);
if (ret < 0)
return ret;
return ((ret >> 1) & 1) == active;
}
static struct gpio_methods methods = {
.init = ftr_gpio_init,
.exit = ftr_gpio_exit,
.all_amps_off = ftr_gpio_all_amps_off,
.all_amps_restore = ftr_gpio_all_amps_restore,
.set_headphone = ftr_gpio_set_headphone,
.set_speakers = ftr_gpio_set_amp,
.set_lineout = ftr_gpio_set_lineout,
.set_hw_reset = ftr_gpio_set_hw_reset,
.get_headphone = ftr_gpio_get_headphone,
.get_speakers = ftr_gpio_get_amp,
.get_lineout = ftr_gpio_get_lineout,
.set_notify = ftr_set_notify,
.get_detect = ftr_get_detect,
};
struct gpio_methods *ftr_gpio_methods = &methods;
EXPORT_SYMBOL_GPL(ftr_gpio_methods);
| gpl-2.0 |
mpokwsths/mpokang_kernel | net/wireless/reg.c | 296 | 61273 | /*
* Copyright 2002-2005, Instant802 Networks, Inc.
* Copyright 2005-2006, Devicescape Software, Inc.
* Copyright 2007 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2008 Luis R. Rodriguez <lrodriguz@atheros.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
/**
* DOC: Wireless regulatory infrastructure
*
* The usual implementation is for a driver to read a device EEPROM to
* determine which regulatory domain it should be operating under, then
* looking up the allowable channels in a driver-local table and finally
* registering those channels in the wiphy structure.
*
* Another set of compliance enforcement is for drivers to use their
* own compliance limits which can be stored on the EEPROM. The host
* driver or firmware may ensure these are used.
*
* In addition to all this we provide an extra layer of regulatory
* conformance. For drivers which do not have any regulatory
* information CRDA provides the complete regulatory solution.
* For others it provides a community effort on further restrictions
* to enhance compliance.
*
* Note: When number of rules --> infinity we will not be able to
* index on alpha2 any more, instead we'll probably have to
* rely on some SHA1 checksum of the regdomain for example.
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/random.h>
#include <linux/ctype.h>
#include <linux/nl80211.h>
#include <linux/platform_device.h>
#include <net/cfg80211.h>
#include "core.h"
#include "reg.h"
#include "regdb.h"
#include "nl80211.h"
#ifdef CONFIG_CFG80211_REG_DEBUG
#define REG_DBG_PRINT(format, args...) \
do { \
printk(KERN_DEBUG pr_fmt(format), ##args); \
} while (0)
#else
#define REG_DBG_PRINT(args...)
#endif
static struct regulatory_request core_request_world = {
.initiator = NL80211_REGDOM_SET_BY_CORE,
.alpha2[0] = '0',
.alpha2[1] = '0',
.intersect = false,
.processed = true,
.country_ie_env = ENVIRON_ANY,
};
/* Receipt of information from last regulatory request */
static struct regulatory_request *last_request = &core_request_world;
/* To trigger userspace events */
static struct platform_device *reg_pdev;
static struct device_type reg_device_type = {
.uevent = reg_device_uevent,
};
/*
* Central wireless core regulatory domains, we only need two,
* the current one and a world regulatory domain in case we have no
* information to give us an alpha2
*/
const struct ieee80211_regdomain *cfg80211_regdomain;
/*
* Protects static reg.c components:
* - cfg80211_world_regdom
* - cfg80211_regdom
* - last_request
*/
static DEFINE_MUTEX(reg_mutex);
static inline void assert_reg_lock(void)
{
lockdep_assert_held(®_mutex);
}
/* Used to queue up regulatory hints */
static LIST_HEAD(reg_requests_list);
static spinlock_t reg_requests_lock;
/* Used to queue up beacon hints for review */
static LIST_HEAD(reg_pending_beacons);
static spinlock_t reg_pending_beacons_lock;
/* Used to keep track of processed beacon hints */
static LIST_HEAD(reg_beacon_list);
struct reg_beacon {
struct list_head list;
struct ieee80211_channel chan;
};
static void reg_todo(struct work_struct *work);
static DECLARE_WORK(reg_work, reg_todo);
static void reg_timeout_work(struct work_struct *work);
static DECLARE_DELAYED_WORK(reg_timeout, reg_timeout_work);
/* We keep a static world regulatory domain in case of the absence of CRDA */
static const struct ieee80211_regdomain world_regdom = {
.n_reg_rules = 5,
.alpha2 = "00",
.reg_rules = {
/* IEEE 802.11b/g, channels 1..11 */
REG_RULE(2412-10, 2462+10, 40, 6, 20, 0),
/* IEEE 802.11b/g, channels 12..13. */
REG_RULE(2467-10, 2472+10, 40, 6, 20,
NL80211_RRF_PASSIVE_SCAN |
NL80211_RRF_NO_IBSS),
/* IEEE 802.11 channel 14 - Only JP enables
* this and for 802.11b only */
REG_RULE(2484-10, 2484+10, 20, 6, 20,
NL80211_RRF_PASSIVE_SCAN |
NL80211_RRF_NO_IBSS |
NL80211_RRF_NO_OFDM),
/* IEEE 802.11a, channel 36..48 */
REG_RULE(5180-10, 5240+10, 40, 6, 20,
NL80211_RRF_PASSIVE_SCAN |
NL80211_RRF_NO_IBSS),
/* NB: 5260 MHz - 5700 MHz requies DFS */
/* IEEE 802.11a, channel 149..165 */
REG_RULE(5745-10, 5825+10, 40, 6, 20,
NL80211_RRF_PASSIVE_SCAN |
NL80211_RRF_NO_IBSS),
}
};
static const struct ieee80211_regdomain *cfg80211_world_regdom =
&world_regdom;
static char *ieee80211_regdom = "00";
static char user_alpha2[2];
module_param(ieee80211_regdom, charp, 0444);
MODULE_PARM_DESC(ieee80211_regdom, "IEEE 802.11 regulatory domain code");
static void reset_regdomains(bool full_reset)
{
/* avoid freeing static information or freeing something twice */
if (cfg80211_regdomain == cfg80211_world_regdom)
cfg80211_regdomain = NULL;
if (cfg80211_world_regdom == &world_regdom)
cfg80211_world_regdom = NULL;
if (cfg80211_regdomain == &world_regdom)
cfg80211_regdomain = NULL;
kfree(cfg80211_regdomain);
kfree(cfg80211_world_regdom);
cfg80211_world_regdom = &world_regdom;
cfg80211_regdomain = NULL;
if (!full_reset)
return;
if (last_request != &core_request_world)
kfree(last_request);
last_request = &core_request_world;
}
/*
* Dynamic world regulatory domain requested by the wireless
* core upon initialization
*/
static void update_world_regdomain(const struct ieee80211_regdomain *rd)
{
BUG_ON(!last_request);
reset_regdomains(false);
cfg80211_world_regdom = rd;
cfg80211_regdomain = rd;
}
bool is_world_regdom(const char *alpha2)
{
if (!alpha2)
return false;
if (alpha2[0] == '0' && alpha2[1] == '0')
return true;
return false;
}
static bool is_alpha2_set(const char *alpha2)
{
if (!alpha2)
return false;
if (alpha2[0] != 0 && alpha2[1] != 0)
return true;
return false;
}
static bool is_unknown_alpha2(const char *alpha2)
{
if (!alpha2)
return false;
/*
* Special case where regulatory domain was built by driver
* but a specific alpha2 cannot be determined
*/
if (alpha2[0] == '9' && alpha2[1] == '9')
return true;
return false;
}
static bool is_intersected_alpha2(const char *alpha2)
{
if (!alpha2)
return false;
/*
* Special case where regulatory domain is the
* result of an intersection between two regulatory domain
* structures
*/
if (alpha2[0] == '9' && alpha2[1] == '8')
return true;
return false;
}
static bool is_an_alpha2(const char *alpha2)
{
if (!alpha2)
return false;
if (isalpha(alpha2[0]) && isalpha(alpha2[1]))
return true;
return false;
}
static bool alpha2_equal(const char *alpha2_x, const char *alpha2_y)
{
if (!alpha2_x || !alpha2_y)
return false;
if (alpha2_x[0] == alpha2_y[0] &&
alpha2_x[1] == alpha2_y[1])
return true;
return false;
}
static bool regdom_changes(const char *alpha2)
{
assert_cfg80211_lock();
if (!cfg80211_regdomain)
return true;
if (alpha2_equal(cfg80211_regdomain->alpha2, alpha2))
return false;
return true;
}
/*
* The NL80211_REGDOM_SET_BY_USER regdom alpha2 is cached, this lets
* you know if a valid regulatory hint with NL80211_REGDOM_SET_BY_USER
* has ever been issued.
*/
static bool is_user_regdom_saved(void)
{
if (user_alpha2[0] == '9' && user_alpha2[1] == '7')
return false;
/* This would indicate a mistake on the design */
if (WARN((!is_world_regdom(user_alpha2) &&
!is_an_alpha2(user_alpha2)),
"Unexpected user alpha2: %c%c\n",
user_alpha2[0],
user_alpha2[1]))
return false;
return true;
}
static int reg_copy_regd(const struct ieee80211_regdomain **dst_regd,
const struct ieee80211_regdomain *src_regd)
{
struct ieee80211_regdomain *regd;
int size_of_regd = 0;
unsigned int i;
size_of_regd = sizeof(struct ieee80211_regdomain) +
((src_regd->n_reg_rules + 1) * sizeof(struct ieee80211_reg_rule));
regd = kzalloc(size_of_regd, GFP_KERNEL);
if (!regd)
return -ENOMEM;
memcpy(regd, src_regd, sizeof(struct ieee80211_regdomain));
for (i = 0; i < src_regd->n_reg_rules; i++)
memcpy(®d->reg_rules[i], &src_regd->reg_rules[i],
sizeof(struct ieee80211_reg_rule));
*dst_regd = regd;
return 0;
}
#ifdef CONFIG_CFG80211_INTERNAL_REGDB
struct reg_regdb_search_request {
char alpha2[2];
struct list_head list;
};
static LIST_HEAD(reg_regdb_search_list);
static DEFINE_MUTEX(reg_regdb_search_mutex);
static void reg_regdb_search(struct work_struct *work)
{
struct reg_regdb_search_request *request;
const struct ieee80211_regdomain *curdom, *regdom;
int i, r;
bool set_reg = false;
mutex_lock(&cfg80211_mutex);
mutex_lock(®_regdb_search_mutex);
while (!list_empty(®_regdb_search_list)) {
request = list_first_entry(®_regdb_search_list,
struct reg_regdb_search_request,
list);
list_del(&request->list);
for (i=0; i<reg_regdb_size; i++) {
curdom = reg_regdb[i];
if (!memcmp(request->alpha2, curdom->alpha2, 2)) {
r = reg_copy_regd(®dom, curdom);
if (r)
break;
set_reg = true;
break;
}
}
kfree(request);
}
mutex_unlock(®_regdb_search_mutex);
if (set_reg)
set_regdom(regdom);
mutex_unlock(&cfg80211_mutex);
}
static DECLARE_WORK(reg_regdb_work, reg_regdb_search);
static void reg_regdb_query(const char *alpha2)
{
struct reg_regdb_search_request *request;
if (!alpha2)
return;
request = kzalloc(sizeof(struct reg_regdb_search_request), GFP_KERNEL);
if (!request)
return;
memcpy(request->alpha2, alpha2, 2);
mutex_lock(®_regdb_search_mutex);
list_add_tail(&request->list, ®_regdb_search_list);
mutex_unlock(®_regdb_search_mutex);
schedule_work(®_regdb_work);
}
/* Feel free to add any other sanity checks here */
static void reg_regdb_size_check(void)
{
/* We should ideally BUILD_BUG_ON() but then random builds would fail */
WARN_ONCE(!reg_regdb_size, "db.txt is empty, you should update it...");
}
#else
static inline void reg_regdb_size_check(void) {}
static inline void reg_regdb_query(const char *alpha2) {}
#endif /* CONFIG_CFG80211_INTERNAL_REGDB */
/*
* This lets us keep regulatory code which is updated on a regulatory
* basis in userspace. Country information is filled in by
* reg_device_uevent
*/
static int call_crda(const char *alpha2)
{
if (!is_world_regdom((char *) alpha2))
pr_info("Calling CRDA for country: %c%c\n",
alpha2[0], alpha2[1]);
else
pr_info("Calling CRDA to update world regulatory domain\n");
/* query internal regulatory database (if it exists) */
reg_regdb_query(alpha2);
return kobject_uevent(®_pdev->dev.kobj, KOBJ_CHANGE);
}
/* Used by nl80211 before kmalloc'ing our regulatory domain */
bool reg_is_valid_request(const char *alpha2)
{
assert_cfg80211_lock();
if (!last_request)
return false;
return alpha2_equal(last_request->alpha2, alpha2);
}
/* Sanity check on a regulatory rule */
static bool is_valid_reg_rule(const struct ieee80211_reg_rule *rule)
{
const struct ieee80211_freq_range *freq_range = &rule->freq_range;
u32 freq_diff;
if (freq_range->start_freq_khz <= 0 || freq_range->end_freq_khz <= 0)
return false;
if (freq_range->start_freq_khz > freq_range->end_freq_khz)
return false;
freq_diff = freq_range->end_freq_khz - freq_range->start_freq_khz;
if (freq_range->end_freq_khz <= freq_range->start_freq_khz ||
freq_range->max_bandwidth_khz > freq_diff)
return false;
return true;
}
static bool is_valid_rd(const struct ieee80211_regdomain *rd)
{
const struct ieee80211_reg_rule *reg_rule = NULL;
unsigned int i;
if (!rd->n_reg_rules)
return false;
if (WARN_ON(rd->n_reg_rules > NL80211_MAX_SUPP_REG_RULES))
return false;
for (i = 0; i < rd->n_reg_rules; i++) {
reg_rule = &rd->reg_rules[i];
if (!is_valid_reg_rule(reg_rule))
return false;
}
return true;
}
static bool reg_does_bw_fit(const struct ieee80211_freq_range *freq_range,
u32 center_freq_khz,
u32 bw_khz)
{
u32 start_freq_khz, end_freq_khz;
start_freq_khz = center_freq_khz - (bw_khz/2);
end_freq_khz = center_freq_khz + (bw_khz/2);
if (start_freq_khz >= freq_range->start_freq_khz &&
end_freq_khz <= freq_range->end_freq_khz)
return true;
return false;
}
/**
* freq_in_rule_band - tells us if a frequency is in a frequency band
* @freq_range: frequency rule we want to query
* @freq_khz: frequency we are inquiring about
*
* This lets us know if a specific frequency rule is or is not relevant to
* a specific frequency's band. Bands are device specific and artificial
* definitions (the "2.4 GHz band" and the "5 GHz band"), however it is
* safe for now to assume that a frequency rule should not be part of a
* frequency's band if the start freq or end freq are off by more than 2 GHz.
* This resolution can be lowered and should be considered as we add
* regulatory rule support for other "bands".
**/
static bool freq_in_rule_band(const struct ieee80211_freq_range *freq_range,
u32 freq_khz)
{
#define ONE_GHZ_IN_KHZ 1000000
if (abs(freq_khz - freq_range->start_freq_khz) <= (2 * ONE_GHZ_IN_KHZ))
return true;
if (abs(freq_khz - freq_range->end_freq_khz) <= (2 * ONE_GHZ_IN_KHZ))
return true;
return false;
#undef ONE_GHZ_IN_KHZ
}
/*
* Helper for regdom_intersect(), this does the real
* mathematical intersection fun
*/
static int reg_rules_intersect(
const struct ieee80211_reg_rule *rule1,
const struct ieee80211_reg_rule *rule2,
struct ieee80211_reg_rule *intersected_rule)
{
const struct ieee80211_freq_range *freq_range1, *freq_range2;
struct ieee80211_freq_range *freq_range;
const struct ieee80211_power_rule *power_rule1, *power_rule2;
struct ieee80211_power_rule *power_rule;
u32 freq_diff;
freq_range1 = &rule1->freq_range;
freq_range2 = &rule2->freq_range;
freq_range = &intersected_rule->freq_range;
power_rule1 = &rule1->power_rule;
power_rule2 = &rule2->power_rule;
power_rule = &intersected_rule->power_rule;
freq_range->start_freq_khz = max(freq_range1->start_freq_khz,
freq_range2->start_freq_khz);
freq_range->end_freq_khz = min(freq_range1->end_freq_khz,
freq_range2->end_freq_khz);
freq_range->max_bandwidth_khz = min(freq_range1->max_bandwidth_khz,
freq_range2->max_bandwidth_khz);
freq_diff = freq_range->end_freq_khz - freq_range->start_freq_khz;
if (freq_range->max_bandwidth_khz > freq_diff)
freq_range->max_bandwidth_khz = freq_diff;
power_rule->max_eirp = min(power_rule1->max_eirp,
power_rule2->max_eirp);
power_rule->max_antenna_gain = min(power_rule1->max_antenna_gain,
power_rule2->max_antenna_gain);
intersected_rule->flags = (rule1->flags | rule2->flags);
if (!is_valid_reg_rule(intersected_rule))
return -EINVAL;
return 0;
}
/**
* regdom_intersect - do the intersection between two regulatory domains
* @rd1: first regulatory domain
* @rd2: second regulatory domain
*
* Use this function to get the intersection between two regulatory domains.
* Once completed we will mark the alpha2 for the rd as intersected, "98",
* as no one single alpha2 can represent this regulatory domain.
*
* Returns a pointer to the regulatory domain structure which will hold the
* resulting intersection of rules between rd1 and rd2. We will
* kzalloc() this structure for you.
*/
static struct ieee80211_regdomain *regdom_intersect(
const struct ieee80211_regdomain *rd1,
const struct ieee80211_regdomain *rd2)
{
int r, size_of_regd;
unsigned int x, y;
unsigned int num_rules = 0, rule_idx = 0;
const struct ieee80211_reg_rule *rule1, *rule2;
struct ieee80211_reg_rule *intersected_rule;
struct ieee80211_regdomain *rd;
/* This is just a dummy holder to help us count */
struct ieee80211_reg_rule irule;
/* Uses the stack temporarily for counter arithmetic */
intersected_rule = &irule;
memset(intersected_rule, 0, sizeof(struct ieee80211_reg_rule));
if (!rd1 || !rd2)
return NULL;
/*
* First we get a count of the rules we'll need, then we actually
* build them. This is to so we can malloc() and free() a
* regdomain once. The reason we use reg_rules_intersect() here
* is it will return -EINVAL if the rule computed makes no sense.
* All rules that do check out OK are valid.
*/
for (x = 0; x < rd1->n_reg_rules; x++) {
rule1 = &rd1->reg_rules[x];
for (y = 0; y < rd2->n_reg_rules; y++) {
rule2 = &rd2->reg_rules[y];
if (!reg_rules_intersect(rule1, rule2,
intersected_rule))
num_rules++;
memset(intersected_rule, 0,
sizeof(struct ieee80211_reg_rule));
}
}
if (!num_rules)
return NULL;
size_of_regd = sizeof(struct ieee80211_regdomain) +
((num_rules + 1) * sizeof(struct ieee80211_reg_rule));
rd = kzalloc(size_of_regd, GFP_KERNEL);
if (!rd)
return NULL;
for (x = 0; x < rd1->n_reg_rules; x++) {
rule1 = &rd1->reg_rules[x];
for (y = 0; y < rd2->n_reg_rules; y++) {
rule2 = &rd2->reg_rules[y];
/*
* This time around instead of using the stack lets
* write to the target rule directly saving ourselves
* a memcpy()
*/
intersected_rule = &rd->reg_rules[rule_idx];
r = reg_rules_intersect(rule1, rule2,
intersected_rule);
/*
* No need to memset here the intersected rule here as
* we're not using the stack anymore
*/
if (r)
continue;
rule_idx++;
}
}
if (rule_idx != num_rules) {
kfree(rd);
return NULL;
}
rd->n_reg_rules = num_rules;
rd->alpha2[0] = '9';
rd->alpha2[1] = '8';
return rd;
}
/*
* XXX: add support for the rest of enum nl80211_reg_rule_flags, we may
* want to just have the channel structure use these
*/
static u32 map_regdom_flags(u32 rd_flags)
{
u32 channel_flags = 0;
if (rd_flags & NL80211_RRF_PASSIVE_SCAN)
channel_flags |= IEEE80211_CHAN_PASSIVE_SCAN;
if (rd_flags & NL80211_RRF_NO_IBSS)
channel_flags |= IEEE80211_CHAN_NO_IBSS;
if (rd_flags & NL80211_RRF_DFS)
channel_flags |= IEEE80211_CHAN_RADAR;
return channel_flags;
}
static int freq_reg_info_regd(struct wiphy *wiphy,
u32 center_freq,
u32 desired_bw_khz,
const struct ieee80211_reg_rule **reg_rule,
const struct ieee80211_regdomain *custom_regd)
{
int i;
bool band_rule_found = false;
const struct ieee80211_regdomain *regd;
bool bw_fits = false;
if (!desired_bw_khz)
desired_bw_khz = MHZ_TO_KHZ(20);
regd = custom_regd ? custom_regd : cfg80211_regdomain;
/*
* Follow the driver's regulatory domain, if present, unless a country
* IE has been processed or a user wants to help complaince further
*/
if (!custom_regd &&
last_request->initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE &&
last_request->initiator != NL80211_REGDOM_SET_BY_USER &&
wiphy->regd)
regd = wiphy->regd;
if (!regd)
return -EINVAL;
for (i = 0; i < regd->n_reg_rules; i++) {
const struct ieee80211_reg_rule *rr;
const struct ieee80211_freq_range *fr = NULL;
rr = ®d->reg_rules[i];
fr = &rr->freq_range;
/*
* We only need to know if one frequency rule was
* was in center_freq's band, that's enough, so lets
* not overwrite it once found
*/
if (!band_rule_found)
band_rule_found = freq_in_rule_band(fr, center_freq);
bw_fits = reg_does_bw_fit(fr,
center_freq,
desired_bw_khz);
if (band_rule_found && bw_fits) {
*reg_rule = rr;
return 0;
}
}
if (!band_rule_found)
return -ERANGE;
return -EINVAL;
}
int freq_reg_info(struct wiphy *wiphy,
u32 center_freq,
u32 desired_bw_khz,
const struct ieee80211_reg_rule **reg_rule)
{
assert_cfg80211_lock();
return freq_reg_info_regd(wiphy,
center_freq,
desired_bw_khz,
reg_rule,
NULL);
}
EXPORT_SYMBOL(freq_reg_info);
#ifdef CONFIG_CFG80211_REG_DEBUG
static const char *reg_initiator_name(enum nl80211_reg_initiator initiator)
{
switch (initiator) {
case NL80211_REGDOM_SET_BY_CORE:
return "Set by core";
case NL80211_REGDOM_SET_BY_USER:
return "Set by user";
case NL80211_REGDOM_SET_BY_DRIVER:
return "Set by driver";
case NL80211_REGDOM_SET_BY_COUNTRY_IE:
return "Set by country IE";
default:
WARN_ON(1);
return "Set by bug";
}
}
static void chan_reg_rule_print_dbg(struct ieee80211_channel *chan,
u32 desired_bw_khz,
const struct ieee80211_reg_rule *reg_rule)
{
const struct ieee80211_power_rule *power_rule;
const struct ieee80211_freq_range *freq_range;
char max_antenna_gain[32];
power_rule = ®_rule->power_rule;
freq_range = ®_rule->freq_range;
if (!power_rule->max_antenna_gain)
snprintf(max_antenna_gain, 32, "N/A");
else
snprintf(max_antenna_gain, 32, "%d", power_rule->max_antenna_gain);
REG_DBG_PRINT("Updating information on frequency %d MHz "
"for a %d MHz width channel with regulatory rule:\n",
chan->center_freq,
KHZ_TO_MHZ(desired_bw_khz));
REG_DBG_PRINT("%d KHz - %d KHz @ KHz), (%s mBi, %d mBm)\n",
freq_range->start_freq_khz,
freq_range->end_freq_khz,
max_antenna_gain,
power_rule->max_eirp);
}
#else
static void chan_reg_rule_print_dbg(struct ieee80211_channel *chan,
u32 desired_bw_khz,
const struct ieee80211_reg_rule *reg_rule)
{
return;
}
#endif
/*
* Note that right now we assume the desired channel bandwidth
* is always 20 MHz for each individual channel (HT40 uses 20 MHz
* per channel, the primary and the extension channel). To support
* smaller custom bandwidths such as 5 MHz or 10 MHz we'll need a
* new ieee80211_channel.target_bw and re run the regulatory check
* on the wiphy with the target_bw specified. Then we can simply use
* that below for the desired_bw_khz below.
*/
static void handle_channel(struct wiphy *wiphy,
enum nl80211_reg_initiator initiator,
enum ieee80211_band band,
unsigned int chan_idx)
{
int r;
u32 flags, bw_flags = 0;
u32 desired_bw_khz = MHZ_TO_KHZ(20);
const struct ieee80211_reg_rule *reg_rule = NULL;
const struct ieee80211_power_rule *power_rule = NULL;
const struct ieee80211_freq_range *freq_range = NULL;
struct ieee80211_supported_band *sband;
struct ieee80211_channel *chan;
struct wiphy *request_wiphy = NULL;
assert_cfg80211_lock();
request_wiphy = wiphy_idx_to_wiphy(last_request->wiphy_idx);
sband = wiphy->bands[band];
BUG_ON(chan_idx >= sband->n_channels);
chan = &sband->channels[chan_idx];
flags = chan->orig_flags;
r = freq_reg_info(wiphy,
MHZ_TO_KHZ(chan->center_freq),
desired_bw_khz,
®_rule);
if (r) {
/*
* We will disable all channels that do not match our
* received regulatory rule unless the hint is coming
* from a Country IE and the Country IE had no information
* about a band. The IEEE 802.11 spec allows for an AP
* to send only a subset of the regulatory rules allowed,
* so an AP in the US that only supports 2.4 GHz may only send
* a country IE with information for the 2.4 GHz band
* while 5 GHz is still supported.
*/
if (initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE &&
r == -ERANGE)
return;
REG_DBG_PRINT("Disabling freq %d MHz\n", chan->center_freq);
chan->flags |= IEEE80211_CHAN_DISABLED;
return;
}
chan_reg_rule_print_dbg(chan, desired_bw_khz, reg_rule);
power_rule = ®_rule->power_rule;
freq_range = ®_rule->freq_range;
if (freq_range->max_bandwidth_khz < MHZ_TO_KHZ(40))
bw_flags = IEEE80211_CHAN_NO_HT40;
if (last_request->initiator == NL80211_REGDOM_SET_BY_DRIVER &&
request_wiphy && request_wiphy == wiphy &&
request_wiphy->flags & WIPHY_FLAG_STRICT_REGULATORY) {
/*
* This guarantees the driver's requested regulatory domain
* will always be used as a base for further regulatory
* settings
*/
chan->flags = chan->orig_flags =
map_regdom_flags(reg_rule->flags) | bw_flags;
chan->max_antenna_gain = chan->orig_mag =
(int) MBI_TO_DBI(power_rule->max_antenna_gain);
chan->max_power = chan->orig_mpwr =
(int) MBM_TO_DBM(power_rule->max_eirp);
return;
}
chan->beacon_found = false;
chan->flags = flags | bw_flags | map_regdom_flags(reg_rule->flags);
chan->max_antenna_gain = min(chan->orig_mag,
(int) MBI_TO_DBI(power_rule->max_antenna_gain));
if (chan->orig_mpwr)
chan->max_power = min(chan->orig_mpwr,
(int) MBM_TO_DBM(power_rule->max_eirp));
else
chan->max_power = (int) MBM_TO_DBM(power_rule->max_eirp);
}
static void handle_band(struct wiphy *wiphy,
enum ieee80211_band band,
enum nl80211_reg_initiator initiator)
{
unsigned int i;
struct ieee80211_supported_band *sband;
BUG_ON(!wiphy->bands[band]);
sband = wiphy->bands[band];
for (i = 0; i < sband->n_channels; i++)
handle_channel(wiphy, initiator, band, i);
}
static bool ignore_reg_update(struct wiphy *wiphy,
enum nl80211_reg_initiator initiator)
{
if (!last_request) {
REG_DBG_PRINT("Ignoring regulatory request %s since "
"last_request is not set\n",
reg_initiator_name(initiator));
return true;
}
if (initiator == NL80211_REGDOM_SET_BY_CORE &&
wiphy->flags & WIPHY_FLAG_CUSTOM_REGULATORY) {
REG_DBG_PRINT("Ignoring regulatory request %s "
"since the driver uses its own custom "
"regulatory domain ",
reg_initiator_name(initiator));
return true;
}
/*
* wiphy->regd will be set once the device has its own
* desired regulatory domain set
*/
if (wiphy->flags & WIPHY_FLAG_STRICT_REGULATORY && !wiphy->regd &&
initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE &&
!is_world_regdom(last_request->alpha2)) {
REG_DBG_PRINT("Ignoring regulatory request %s "
"since the driver requires its own regulaotry "
"domain to be set first",
reg_initiator_name(initiator));
return true;
}
return false;
}
static void update_all_wiphy_regulatory(enum nl80211_reg_initiator initiator)
{
struct cfg80211_registered_device *rdev;
list_for_each_entry(rdev, &cfg80211_rdev_list, list)
wiphy_update_regulatory(&rdev->wiphy, initiator);
}
static void handle_reg_beacon(struct wiphy *wiphy,
unsigned int chan_idx,
struct reg_beacon *reg_beacon)
{
struct ieee80211_supported_band *sband;
struct ieee80211_channel *chan;
bool channel_changed = false;
struct ieee80211_channel chan_before;
assert_cfg80211_lock();
sband = wiphy->bands[reg_beacon->chan.band];
chan = &sband->channels[chan_idx];
if (likely(chan->center_freq != reg_beacon->chan.center_freq))
return;
if (chan->beacon_found)
return;
chan->beacon_found = true;
if (wiphy->flags & WIPHY_FLAG_DISABLE_BEACON_HINTS)
return;
chan_before.center_freq = chan->center_freq;
chan_before.flags = chan->flags;
if (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN) {
chan->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN;
channel_changed = true;
}
if (chan->flags & IEEE80211_CHAN_NO_IBSS) {
chan->flags &= ~IEEE80211_CHAN_NO_IBSS;
channel_changed = true;
}
if (channel_changed)
nl80211_send_beacon_hint_event(wiphy, &chan_before, chan);
}
/*
* Called when a scan on a wiphy finds a beacon on
* new channel
*/
static void wiphy_update_new_beacon(struct wiphy *wiphy,
struct reg_beacon *reg_beacon)
{
unsigned int i;
struct ieee80211_supported_band *sband;
assert_cfg80211_lock();
if (!wiphy->bands[reg_beacon->chan.band])
return;
sband = wiphy->bands[reg_beacon->chan.band];
for (i = 0; i < sband->n_channels; i++)
handle_reg_beacon(wiphy, i, reg_beacon);
}
/*
* Called upon reg changes or a new wiphy is added
*/
static void wiphy_update_beacon_reg(struct wiphy *wiphy)
{
unsigned int i;
struct ieee80211_supported_band *sband;
struct reg_beacon *reg_beacon;
assert_cfg80211_lock();
if (list_empty(®_beacon_list))
return;
list_for_each_entry(reg_beacon, ®_beacon_list, list) {
if (!wiphy->bands[reg_beacon->chan.band])
continue;
sband = wiphy->bands[reg_beacon->chan.band];
for (i = 0; i < sband->n_channels; i++)
handle_reg_beacon(wiphy, i, reg_beacon);
}
}
static bool reg_is_world_roaming(struct wiphy *wiphy)
{
if (is_world_regdom(cfg80211_regdomain->alpha2) ||
(wiphy->regd && is_world_regdom(wiphy->regd->alpha2)))
return true;
if (last_request &&
last_request->initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE &&
wiphy->flags & WIPHY_FLAG_CUSTOM_REGULATORY)
return true;
return false;
}
/* Reap the advantages of previously found beacons */
static void reg_process_beacons(struct wiphy *wiphy)
{
/*
* Means we are just firing up cfg80211, so no beacons would
* have been processed yet.
*/
if (!last_request)
return;
if (!reg_is_world_roaming(wiphy))
return;
wiphy_update_beacon_reg(wiphy);
}
static bool is_ht40_not_allowed(struct ieee80211_channel *chan)
{
if (!chan)
return true;
if (chan->flags & IEEE80211_CHAN_DISABLED)
return true;
/* This would happen when regulatory rules disallow HT40 completely */
if (IEEE80211_CHAN_NO_HT40 == (chan->flags & (IEEE80211_CHAN_NO_HT40)))
return true;
return false;
}
static void reg_process_ht_flags_channel(struct wiphy *wiphy,
enum ieee80211_band band,
unsigned int chan_idx)
{
struct ieee80211_supported_band *sband;
struct ieee80211_channel *channel;
struct ieee80211_channel *channel_before = NULL, *channel_after = NULL;
unsigned int i;
assert_cfg80211_lock();
sband = wiphy->bands[band];
BUG_ON(chan_idx >= sband->n_channels);
channel = &sband->channels[chan_idx];
if (is_ht40_not_allowed(channel)) {
channel->flags |= IEEE80211_CHAN_NO_HT40;
return;
}
/*
* We need to ensure the extension channels exist to
* be able to use HT40- or HT40+, this finds them (or not)
*/
for (i = 0; i < sband->n_channels; i++) {
struct ieee80211_channel *c = &sband->channels[i];
if (c->center_freq == (channel->center_freq - 20))
channel_before = c;
if (c->center_freq == (channel->center_freq + 20))
channel_after = c;
}
/*
* Please note that this assumes target bandwidth is 20 MHz,
* if that ever changes we also need to change the below logic
* to include that as well.
*/
if (is_ht40_not_allowed(channel_before))
channel->flags |= IEEE80211_CHAN_NO_HT40MINUS;
else
channel->flags &= ~IEEE80211_CHAN_NO_HT40MINUS;
if (is_ht40_not_allowed(channel_after))
channel->flags |= IEEE80211_CHAN_NO_HT40PLUS;
else
channel->flags &= ~IEEE80211_CHAN_NO_HT40PLUS;
}
static void reg_process_ht_flags_band(struct wiphy *wiphy,
enum ieee80211_band band)
{
unsigned int i;
struct ieee80211_supported_band *sband;
BUG_ON(!wiphy->bands[band]);
sband = wiphy->bands[band];
for (i = 0; i < sband->n_channels; i++)
reg_process_ht_flags_channel(wiphy, band, i);
}
static void reg_process_ht_flags(struct wiphy *wiphy)
{
enum ieee80211_band band;
if (!wiphy)
return;
for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
if (wiphy->bands[band])
reg_process_ht_flags_band(wiphy, band);
}
}
void wiphy_update_regulatory(struct wiphy *wiphy,
enum nl80211_reg_initiator initiator)
{
enum ieee80211_band band;
if (ignore_reg_update(wiphy, initiator))
return;
for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
if (wiphy->bands[band])
handle_band(wiphy, band, initiator);
}
reg_process_beacons(wiphy);
reg_process_ht_flags(wiphy);
if (wiphy->reg_notifier)
wiphy->reg_notifier(wiphy, last_request);
}
static void handle_channel_custom(struct wiphy *wiphy,
enum ieee80211_band band,
unsigned int chan_idx,
const struct ieee80211_regdomain *regd)
{
int r;
u32 desired_bw_khz = MHZ_TO_KHZ(20);
u32 bw_flags = 0;
const struct ieee80211_reg_rule *reg_rule = NULL;
const struct ieee80211_power_rule *power_rule = NULL;
const struct ieee80211_freq_range *freq_range = NULL;
struct ieee80211_supported_band *sband;
struct ieee80211_channel *chan;
assert_reg_lock();
sband = wiphy->bands[band];
BUG_ON(chan_idx >= sband->n_channels);
chan = &sband->channels[chan_idx];
r = freq_reg_info_regd(wiphy,
MHZ_TO_KHZ(chan->center_freq),
desired_bw_khz,
®_rule,
regd);
if (r) {
REG_DBG_PRINT("Disabling freq %d MHz as custom "
"regd has no rule that fits a %d MHz "
"wide channel\n",
chan->center_freq,
KHZ_TO_MHZ(desired_bw_khz));
chan->flags = IEEE80211_CHAN_DISABLED;
return;
}
chan_reg_rule_print_dbg(chan, desired_bw_khz, reg_rule);
power_rule = ®_rule->power_rule;
freq_range = ®_rule->freq_range;
if (freq_range->max_bandwidth_khz < MHZ_TO_KHZ(40))
bw_flags = IEEE80211_CHAN_NO_HT40;
chan->flags |= map_regdom_flags(reg_rule->flags) | bw_flags;
chan->max_antenna_gain = (int) MBI_TO_DBI(power_rule->max_antenna_gain);
chan->max_power = (int) MBM_TO_DBM(power_rule->max_eirp);
}
static void handle_band_custom(struct wiphy *wiphy, enum ieee80211_band band,
const struct ieee80211_regdomain *regd)
{
unsigned int i;
struct ieee80211_supported_band *sband;
BUG_ON(!wiphy->bands[band]);
sband = wiphy->bands[band];
for (i = 0; i < sband->n_channels; i++)
handle_channel_custom(wiphy, band, i, regd);
}
/* Used by drivers prior to wiphy registration */
void wiphy_apply_custom_regulatory(struct wiphy *wiphy,
const struct ieee80211_regdomain *regd)
{
enum ieee80211_band band;
unsigned int bands_set = 0;
mutex_lock(®_mutex);
for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
if (!wiphy->bands[band])
continue;
handle_band_custom(wiphy, band, regd);
bands_set++;
}
mutex_unlock(®_mutex);
/*
* no point in calling this if it won't have any effect
* on your device's supportd bands.
*/
WARN_ON(!bands_set);
}
EXPORT_SYMBOL(wiphy_apply_custom_regulatory);
/*
* Return value which can be used by ignore_request() to indicate
* it has been determined we should intersect two regulatory domains
*/
#define REG_INTERSECT 1
/* This has the logic which determines when a new request
* should be ignored. */
static int ignore_request(struct wiphy *wiphy,
struct regulatory_request *pending_request)
{
struct wiphy *last_wiphy = NULL;
assert_cfg80211_lock();
/* All initial requests are respected */
if (!last_request)
return 0;
switch (pending_request->initiator) {
case NL80211_REGDOM_SET_BY_CORE:
return 0;
case NL80211_REGDOM_SET_BY_COUNTRY_IE:
last_wiphy = wiphy_idx_to_wiphy(last_request->wiphy_idx);
if (unlikely(!is_an_alpha2(pending_request->alpha2)))
return -EINVAL;
if (last_request->initiator ==
NL80211_REGDOM_SET_BY_COUNTRY_IE) {
if (last_wiphy != wiphy) {
/*
* Two cards with two APs claiming different
* Country IE alpha2s. We could
* intersect them, but that seems unlikely
* to be correct. Reject second one for now.
*/
if (regdom_changes(pending_request->alpha2))
return -EOPNOTSUPP;
return -EALREADY;
}
/*
* Two consecutive Country IE hints on the same wiphy.
* This should be picked up early by the driver/stack
*/
if (WARN_ON(regdom_changes(pending_request->alpha2)))
return 0;
return -EALREADY;
}
return 0;
case NL80211_REGDOM_SET_BY_DRIVER:
if (last_request->initiator == NL80211_REGDOM_SET_BY_CORE) {
if (regdom_changes(pending_request->alpha2))
return 0;
return -EALREADY;
}
/*
* This would happen if you unplug and plug your card
* back in or if you add a new device for which the previously
* loaded card also agrees on the regulatory domain.
*/
if (last_request->initiator == NL80211_REGDOM_SET_BY_DRIVER &&
!regdom_changes(pending_request->alpha2))
return -EALREADY;
return REG_INTERSECT;
case NL80211_REGDOM_SET_BY_USER:
if (last_request->initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE)
return REG_INTERSECT;
/*
* If the user knows better the user should set the regdom
* to their country before the IE is picked up
*/
if (last_request->initiator == NL80211_REGDOM_SET_BY_USER &&
last_request->intersect)
return -EOPNOTSUPP;
/*
* Process user requests only after previous user/driver/core
* requests have been processed
*/
if (last_request->initiator == NL80211_REGDOM_SET_BY_CORE ||
last_request->initiator == NL80211_REGDOM_SET_BY_DRIVER ||
last_request->initiator == NL80211_REGDOM_SET_BY_USER) {
if (regdom_changes(last_request->alpha2))
return -EAGAIN;
}
if (!regdom_changes(pending_request->alpha2))
return -EALREADY;
return 0;
}
return -EINVAL;
}
static void reg_set_request_processed(void)
{
bool need_more_processing = false;
last_request->processed = true;
spin_lock(®_requests_lock);
if (!list_empty(®_requests_list))
need_more_processing = true;
spin_unlock(®_requests_lock);
if (last_request->initiator == NL80211_REGDOM_SET_BY_USER)
cancel_delayed_work(®_timeout);
if (need_more_processing)
schedule_work(®_work);
}
/**
* __regulatory_hint - hint to the wireless core a regulatory domain
* @wiphy: if the hint comes from country information from an AP, this
* is required to be set to the wiphy that received the information
* @pending_request: the regulatory request currently being processed
*
* The Wireless subsystem can use this function to hint to the wireless core
* what it believes should be the current regulatory domain.
*
* Returns zero if all went fine, %-EALREADY if a regulatory domain had
* already been set or other standard error codes.
*
* Caller must hold &cfg80211_mutex and ®_mutex
*/
static int __regulatory_hint(struct wiphy *wiphy,
struct regulatory_request *pending_request)
{
bool intersect = false;
int r = 0;
assert_cfg80211_lock();
r = ignore_request(wiphy, pending_request);
if (r == REG_INTERSECT) {
if (pending_request->initiator ==
NL80211_REGDOM_SET_BY_DRIVER) {
r = reg_copy_regd(&wiphy->regd, cfg80211_regdomain);
if (r) {
kfree(pending_request);
return r;
}
}
intersect = true;
} else if (r) {
/*
* If the regulatory domain being requested by the
* driver has already been set just copy it to the
* wiphy
*/
if (r == -EALREADY &&
pending_request->initiator ==
NL80211_REGDOM_SET_BY_DRIVER) {
r = reg_copy_regd(&wiphy->regd, cfg80211_regdomain);
if (r) {
kfree(pending_request);
return r;
}
r = -EALREADY;
goto new_request;
}
kfree(pending_request);
return r;
}
new_request:
if (last_request != &core_request_world)
kfree(last_request);
last_request = pending_request;
last_request->intersect = intersect;
pending_request = NULL;
if (last_request->initiator == NL80211_REGDOM_SET_BY_USER) {
user_alpha2[0] = last_request->alpha2[0];
user_alpha2[1] = last_request->alpha2[1];
}
/* When r == REG_INTERSECT we do need to call CRDA */
if (r < 0) {
/*
* Since CRDA will not be called in this case as we already
* have applied the requested regulatory domain before we just
* inform userspace we have processed the request
*/
if (r == -EALREADY) {
nl80211_send_reg_change_event(last_request);
reg_set_request_processed();
}
return r;
}
return call_crda(last_request->alpha2);
}
/* This processes *all* regulatory hints */
static void reg_process_hint(struct regulatory_request *reg_request)
{
int r = 0;
struct wiphy *wiphy = NULL;
enum nl80211_reg_initiator initiator = reg_request->initiator;
BUG_ON(!reg_request->alpha2);
if (wiphy_idx_valid(reg_request->wiphy_idx))
wiphy = wiphy_idx_to_wiphy(reg_request->wiphy_idx);
if (reg_request->initiator == NL80211_REGDOM_SET_BY_DRIVER &&
!wiphy) {
kfree(reg_request);
return;
}
r = __regulatory_hint(wiphy, reg_request);
/* This is required so that the orig_* parameters are saved */
if (r == -EALREADY && wiphy &&
wiphy->flags & WIPHY_FLAG_STRICT_REGULATORY) {
wiphy_update_regulatory(wiphy, initiator);
return;
}
/*
* We only time out user hints, given that they should be the only
* source of bogus requests.
*/
if (r != -EALREADY &&
reg_request->initiator == NL80211_REGDOM_SET_BY_USER)
schedule_delayed_work(®_timeout, msecs_to_jiffies(3142));
}
/*
* Processes regulatory hints, this is all the NL80211_REGDOM_SET_BY_*
* Regulatory hints come on a first come first serve basis and we
* must process each one atomically.
*/
static void reg_process_pending_hints(void)
{
struct regulatory_request *reg_request;
mutex_lock(&cfg80211_mutex);
mutex_lock(®_mutex);
/* When last_request->processed becomes true this will be rescheduled */
if (last_request && !last_request->processed) {
REG_DBG_PRINT("Pending regulatory request, waiting "
"for it to be processed...");
goto out;
}
spin_lock(®_requests_lock);
if (list_empty(®_requests_list)) {
spin_unlock(®_requests_lock);
goto out;
}
reg_request = list_first_entry(®_requests_list,
struct regulatory_request,
list);
list_del_init(®_request->list);
spin_unlock(®_requests_lock);
reg_process_hint(reg_request);
out:
mutex_unlock(®_mutex);
mutex_unlock(&cfg80211_mutex);
}
/* Processes beacon hints -- this has nothing to do with country IEs */
static void reg_process_pending_beacon_hints(void)
{
struct cfg80211_registered_device *rdev;
struct reg_beacon *pending_beacon, *tmp;
/*
* No need to hold the reg_mutex here as we just touch wiphys
* and do not read or access regulatory variables.
*/
mutex_lock(&cfg80211_mutex);
/* This goes through the _pending_ beacon list */
spin_lock_bh(®_pending_beacons_lock);
if (list_empty(®_pending_beacons)) {
spin_unlock_bh(®_pending_beacons_lock);
goto out;
}
list_for_each_entry_safe(pending_beacon, tmp,
®_pending_beacons, list) {
list_del_init(&pending_beacon->list);
/* Applies the beacon hint to current wiphys */
list_for_each_entry(rdev, &cfg80211_rdev_list, list)
wiphy_update_new_beacon(&rdev->wiphy, pending_beacon);
/* Remembers the beacon hint for new wiphys or reg changes */
list_add_tail(&pending_beacon->list, ®_beacon_list);
}
spin_unlock_bh(®_pending_beacons_lock);
out:
mutex_unlock(&cfg80211_mutex);
}
static void reg_todo(struct work_struct *work)
{
reg_process_pending_hints();
reg_process_pending_beacon_hints();
}
static void queue_regulatory_request(struct regulatory_request *request)
{
if (isalpha(request->alpha2[0]))
request->alpha2[0] = toupper(request->alpha2[0]);
if (isalpha(request->alpha2[1]))
request->alpha2[1] = toupper(request->alpha2[1]);
spin_lock(®_requests_lock);
list_add_tail(&request->list, ®_requests_list);
spin_unlock(®_requests_lock);
schedule_work(®_work);
}
/*
* Core regulatory hint -- happens during cfg80211_init()
* and when we restore regulatory settings.
*/
static int regulatory_hint_core(const char *alpha2)
{
struct regulatory_request *request;
request = kzalloc(sizeof(struct regulatory_request),
GFP_KERNEL);
if (!request)
return -ENOMEM;
request->alpha2[0] = alpha2[0];
request->alpha2[1] = alpha2[1];
request->initiator = NL80211_REGDOM_SET_BY_CORE;
queue_regulatory_request(request);
return 0;
}
/* User hints */
int regulatory_hint_user(const char *alpha2)
{
struct regulatory_request *request;
BUG_ON(!alpha2);
request = kzalloc(sizeof(struct regulatory_request), GFP_KERNEL);
if (!request)
return -ENOMEM;
request->wiphy_idx = WIPHY_IDX_STALE;
request->alpha2[0] = alpha2[0];
request->alpha2[1] = alpha2[1];
request->initiator = NL80211_REGDOM_SET_BY_USER;
queue_regulatory_request(request);
return 0;
}
/* Driver hints */
int regulatory_hint(struct wiphy *wiphy, const char *alpha2)
{
struct regulatory_request *request;
BUG_ON(!alpha2);
BUG_ON(!wiphy);
request = kzalloc(sizeof(struct regulatory_request), GFP_KERNEL);
if (!request)
return -ENOMEM;
request->wiphy_idx = get_wiphy_idx(wiphy);
/* Must have registered wiphy first */
BUG_ON(!wiphy_idx_valid(request->wiphy_idx));
request->alpha2[0] = alpha2[0];
request->alpha2[1] = alpha2[1];
request->initiator = NL80211_REGDOM_SET_BY_DRIVER;
queue_regulatory_request(request);
return 0;
}
EXPORT_SYMBOL(regulatory_hint);
/*
* We hold wdev_lock() here so we cannot hold cfg80211_mutex() and
* therefore cannot iterate over the rdev list here.
*/
void regulatory_hint_11d(struct wiphy *wiphy,
enum ieee80211_band band,
u8 *country_ie,
u8 country_ie_len)
{
char alpha2[2];
enum environment_cap env = ENVIRON_ANY;
struct regulatory_request *request;
mutex_lock(®_mutex);
if (unlikely(!last_request))
goto out;
/* IE len must be evenly divisible by 2 */
if (country_ie_len & 0x01)
goto out;
if (country_ie_len < IEEE80211_COUNTRY_IE_MIN_LEN)
goto out;
alpha2[0] = country_ie[0];
alpha2[1] = country_ie[1];
if (country_ie[2] == 'I')
env = ENVIRON_INDOOR;
else if (country_ie[2] == 'O')
env = ENVIRON_OUTDOOR;
/*
* We will run this only upon a successful connection on cfg80211.
* We leave conflict resolution to the workqueue, where can hold
* cfg80211_mutex.
*/
if (likely(last_request->initiator ==
NL80211_REGDOM_SET_BY_COUNTRY_IE &&
wiphy_idx_valid(last_request->wiphy_idx)))
goto out;
request = kzalloc(sizeof(struct regulatory_request), GFP_KERNEL);
if (!request)
goto out;
request->wiphy_idx = get_wiphy_idx(wiphy);
request->alpha2[0] = alpha2[0];
request->alpha2[1] = alpha2[1];
request->initiator = NL80211_REGDOM_SET_BY_COUNTRY_IE;
request->country_ie_env = env;
mutex_unlock(®_mutex);
queue_regulatory_request(request);
return;
out:
mutex_unlock(®_mutex);
}
static void restore_alpha2(char *alpha2, bool reset_user)
{
/* indicates there is no alpha2 to consider for restoration */
alpha2[0] = '9';
alpha2[1] = '7';
/* The user setting has precedence over the module parameter */
if (is_user_regdom_saved()) {
/* Unless we're asked to ignore it and reset it */
if (reset_user) {
REG_DBG_PRINT("Restoring regulatory settings "
"including user preference\n");
user_alpha2[0] = '9';
user_alpha2[1] = '7';
/*
* If we're ignoring user settings, we still need to
* check the module parameter to ensure we put things
* back as they were for a full restore.
*/
if (!is_world_regdom(ieee80211_regdom)) {
REG_DBG_PRINT("Keeping preference on "
"module parameter ieee80211_regdom: %c%c\n",
ieee80211_regdom[0],
ieee80211_regdom[1]);
alpha2[0] = ieee80211_regdom[0];
alpha2[1] = ieee80211_regdom[1];
}
} else {
REG_DBG_PRINT("Restoring regulatory settings "
"while preserving user preference for: %c%c\n",
user_alpha2[0],
user_alpha2[1]);
alpha2[0] = user_alpha2[0];
alpha2[1] = user_alpha2[1];
}
} else if (!is_world_regdom(ieee80211_regdom)) {
REG_DBG_PRINT("Keeping preference on "
"module parameter ieee80211_regdom: %c%c\n",
ieee80211_regdom[0],
ieee80211_regdom[1]);
alpha2[0] = ieee80211_regdom[0];
alpha2[1] = ieee80211_regdom[1];
} else
REG_DBG_PRINT("Restoring regulatory settings\n");
}
/*
* Restoring regulatory settings involves ingoring any
* possibly stale country IE information and user regulatory
* settings if so desired, this includes any beacon hints
* learned as we could have traveled outside to another country
* after disconnection. To restore regulatory settings we do
* exactly what we did at bootup:
*
* - send a core regulatory hint
* - send a user regulatory hint if applicable
*
* Device drivers that send a regulatory hint for a specific country
* keep their own regulatory domain on wiphy->regd so that does does
* not need to be remembered.
*/
static void restore_regulatory_settings(bool reset_user)
{
char alpha2[2];
char world_alpha2[2];
struct reg_beacon *reg_beacon, *btmp;
struct regulatory_request *reg_request, *tmp;
LIST_HEAD(tmp_reg_req_list);
mutex_lock(&cfg80211_mutex);
mutex_lock(®_mutex);
reset_regdomains(true);
restore_alpha2(alpha2, reset_user);
/*
* If there's any pending requests we simply
* stash them to a temporary pending queue and
* add then after we've restored regulatory
* settings.
*/
spin_lock(®_requests_lock);
if (!list_empty(®_requests_list)) {
list_for_each_entry_safe(reg_request, tmp,
®_requests_list, list) {
if (reg_request->initiator !=
NL80211_REGDOM_SET_BY_USER)
continue;
list_del(®_request->list);
list_add_tail(®_request->list, &tmp_reg_req_list);
}
}
spin_unlock(®_requests_lock);
/* Clear beacon hints */
spin_lock_bh(®_pending_beacons_lock);
if (!list_empty(®_pending_beacons)) {
list_for_each_entry_safe(reg_beacon, btmp,
®_pending_beacons, list) {
list_del(®_beacon->list);
kfree(reg_beacon);
}
}
spin_unlock_bh(®_pending_beacons_lock);
if (!list_empty(®_beacon_list)) {
list_for_each_entry_safe(reg_beacon, btmp,
®_beacon_list, list) {
list_del(®_beacon->list);
kfree(reg_beacon);
}
}
/* First restore to the basic regulatory settings */
cfg80211_regdomain = cfg80211_world_regdom;
world_alpha2[0] = cfg80211_regdomain->alpha2[0];
world_alpha2[1] = cfg80211_regdomain->alpha2[1];
mutex_unlock(®_mutex);
mutex_unlock(&cfg80211_mutex);
regulatory_hint_core(world_alpha2);
/*
* This restores the ieee80211_regdom module parameter
* preference or the last user requested regulatory
* settings, user regulatory settings takes precedence.
*/
if (is_an_alpha2(alpha2))
regulatory_hint_user(user_alpha2);
if (list_empty(&tmp_reg_req_list))
return;
mutex_lock(&cfg80211_mutex);
mutex_lock(®_mutex);
spin_lock(®_requests_lock);
list_for_each_entry_safe(reg_request, tmp, &tmp_reg_req_list, list) {
REG_DBG_PRINT("Adding request for country %c%c back "
"into the queue\n",
reg_request->alpha2[0],
reg_request->alpha2[1]);
list_del(®_request->list);
list_add_tail(®_request->list, ®_requests_list);
}
spin_unlock(®_requests_lock);
mutex_unlock(®_mutex);
mutex_unlock(&cfg80211_mutex);
REG_DBG_PRINT("Kicking the queue\n");
schedule_work(®_work);
}
void regulatory_hint_disconnect(void)
{
REG_DBG_PRINT("All devices are disconnected, going to "
"restore regulatory settings\n");
restore_regulatory_settings(false);
}
static bool freq_is_chan_12_13_14(u16 freq)
{
if (freq == ieee80211_channel_to_frequency(12, IEEE80211_BAND_2GHZ) ||
freq == ieee80211_channel_to_frequency(13, IEEE80211_BAND_2GHZ) ||
freq == ieee80211_channel_to_frequency(14, IEEE80211_BAND_2GHZ))
return true;
return false;
}
int regulatory_hint_found_beacon(struct wiphy *wiphy,
struct ieee80211_channel *beacon_chan,
gfp_t gfp)
{
struct reg_beacon *reg_beacon;
if (likely((beacon_chan->beacon_found ||
(beacon_chan->flags & IEEE80211_CHAN_RADAR) ||
(beacon_chan->band == IEEE80211_BAND_2GHZ &&
!freq_is_chan_12_13_14(beacon_chan->center_freq)))))
return 0;
reg_beacon = kzalloc(sizeof(struct reg_beacon), gfp);
if (!reg_beacon)
return -ENOMEM;
REG_DBG_PRINT("Found new beacon on "
"frequency: %d MHz (Ch %d) on %s\n",
beacon_chan->center_freq,
ieee80211_frequency_to_channel(beacon_chan->center_freq),
wiphy_name(wiphy));
memcpy(®_beacon->chan, beacon_chan,
sizeof(struct ieee80211_channel));
/*
* Since we can be called from BH or and non-BH context
* we must use spin_lock_bh()
*/
spin_lock_bh(®_pending_beacons_lock);
list_add_tail(®_beacon->list, ®_pending_beacons);
spin_unlock_bh(®_pending_beacons_lock);
schedule_work(®_work);
return 0;
}
static void print_rd_rules(const struct ieee80211_regdomain *rd)
{
unsigned int i;
const struct ieee80211_reg_rule *reg_rule = NULL;
const struct ieee80211_freq_range *freq_range = NULL;
const struct ieee80211_power_rule *power_rule = NULL;
pr_info(" (start_freq - end_freq @ bandwidth), (max_antenna_gain, max_eirp)\n");
for (i = 0; i < rd->n_reg_rules; i++) {
reg_rule = &rd->reg_rules[i];
freq_range = ®_rule->freq_range;
power_rule = ®_rule->power_rule;
/*
* There may not be documentation for max antenna gain
* in certain regions
*/
if (power_rule->max_antenna_gain)
pr_info(" (%d KHz - %d KHz @ %d KHz), (%d mBi, %d mBm)\n",
freq_range->start_freq_khz,
freq_range->end_freq_khz,
freq_range->max_bandwidth_khz,
power_rule->max_antenna_gain,
power_rule->max_eirp);
else
pr_info(" (%d KHz - %d KHz @ %d KHz), (N/A, %d mBm)\n",
freq_range->start_freq_khz,
freq_range->end_freq_khz,
freq_range->max_bandwidth_khz,
power_rule->max_eirp);
}
}
static void print_regdomain(const struct ieee80211_regdomain *rd)
{
if (is_intersected_alpha2(rd->alpha2)) {
if (last_request->initiator ==
NL80211_REGDOM_SET_BY_COUNTRY_IE) {
struct cfg80211_registered_device *rdev;
rdev = cfg80211_rdev_by_wiphy_idx(
last_request->wiphy_idx);
if (rdev) {
pr_info("Current regulatory domain updated by AP to: %c%c\n",
rdev->country_ie_alpha2[0],
rdev->country_ie_alpha2[1]);
} else
pr_info("Current regulatory domain intersected:\n");
} else
pr_info("Current regulatory domain intersected:\n");
} else if (is_world_regdom(rd->alpha2))
pr_info("World regulatory domain updated:\n");
else {
if (is_unknown_alpha2(rd->alpha2))
pr_info("Regulatory domain changed to driver built-in settings (unknown country)\n");
else
pr_info("Regulatory domain changed to country: %c%c\n",
rd->alpha2[0], rd->alpha2[1]);
}
print_rd_rules(rd);
}
static void print_regdomain_info(const struct ieee80211_regdomain *rd)
{
pr_info("Regulatory domain: %c%c\n", rd->alpha2[0], rd->alpha2[1]);
print_rd_rules(rd);
}
/* Takes ownership of rd only if it doesn't fail */
static int __set_regdom(const struct ieee80211_regdomain *rd)
{
const struct ieee80211_regdomain *intersected_rd = NULL;
struct cfg80211_registered_device *rdev = NULL;
struct wiphy *request_wiphy;
/* Some basic sanity checks first */
if (is_world_regdom(rd->alpha2)) {
if (WARN_ON(!reg_is_valid_request(rd->alpha2)))
return -EINVAL;
update_world_regdomain(rd);
return 0;
}
if (!is_alpha2_set(rd->alpha2) && !is_an_alpha2(rd->alpha2) &&
!is_unknown_alpha2(rd->alpha2))
return -EINVAL;
if (!last_request)
return -EINVAL;
/*
* Lets only bother proceeding on the same alpha2 if the current
* rd is non static (it means CRDA was present and was used last)
* and the pending request came in from a country IE
*/
if (last_request->initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE) {
/*
* If someone else asked us to change the rd lets only bother
* checking if the alpha2 changes if CRDA was already called
*/
if (!regdom_changes(rd->alpha2))
return -EINVAL;
}
/*
* Now lets set the regulatory domain, update all driver channels
* and finally inform them of what we have done, in case they want
* to review or adjust their own settings based on their own
* internal EEPROM data
*/
if (WARN_ON(!reg_is_valid_request(rd->alpha2)))
return -EINVAL;
if (!is_valid_rd(rd)) {
pr_err("Invalid regulatory domain detected:\n");
print_regdomain_info(rd);
return -EINVAL;
}
request_wiphy = wiphy_idx_to_wiphy(last_request->wiphy_idx);
if (!request_wiphy &&
(last_request->initiator == NL80211_REGDOM_SET_BY_DRIVER ||
last_request->initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE)) {
schedule_delayed_work(®_timeout, 0);
return -ENODEV;
}
if (!last_request->intersect) {
int r;
if (last_request->initiator != NL80211_REGDOM_SET_BY_DRIVER) {
reset_regdomains(false);
cfg80211_regdomain = rd;
return 0;
}
/*
* For a driver hint, lets copy the regulatory domain the
* driver wanted to the wiphy to deal with conflicts
*/
/*
* Userspace could have sent two replies with only
* one kernel request.
*/
if (request_wiphy->regd)
return -EALREADY;
r = reg_copy_regd(&request_wiphy->regd, rd);
if (r)
return r;
reset_regdomains(false);
cfg80211_regdomain = rd;
return 0;
}
/* Intersection requires a bit more work */
if (last_request->initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE) {
intersected_rd = regdom_intersect(rd, cfg80211_regdomain);
if (!intersected_rd)
return -EINVAL;
/*
* We can trash what CRDA provided now.
* However if a driver requested this specific regulatory
* domain we keep it for its private use
*/
if (last_request->initiator == NL80211_REGDOM_SET_BY_DRIVER)
request_wiphy->regd = rd;
else
kfree(rd);
rd = NULL;
reset_regdomains(false);
cfg80211_regdomain = intersected_rd;
return 0;
}
if (!intersected_rd)
return -EINVAL;
rdev = wiphy_to_dev(request_wiphy);
rdev->country_ie_alpha2[0] = rd->alpha2[0];
rdev->country_ie_alpha2[1] = rd->alpha2[1];
rdev->env = last_request->country_ie_env;
BUG_ON(intersected_rd == rd);
kfree(rd);
rd = NULL;
reset_regdomains(false);
cfg80211_regdomain = intersected_rd;
return 0;
}
/*
* Use this call to set the current regulatory domain. Conflicts with
* multiple drivers can be ironed out later. Caller must've already
* kmalloc'd the rd structure. Caller must hold cfg80211_mutex
*/
int set_regdom(const struct ieee80211_regdomain *rd)
{
int r;
assert_cfg80211_lock();
mutex_lock(®_mutex);
/* Note that this doesn't update the wiphys, this is done below */
r = __set_regdom(rd);
if (r) {
kfree(rd);
mutex_unlock(®_mutex);
return r;
}
/* This would make this whole thing pointless */
if (!last_request->intersect)
BUG_ON(rd != cfg80211_regdomain);
/* update all wiphys now with the new established regulatory domain */
update_all_wiphy_regulatory(last_request->initiator);
print_regdomain(cfg80211_regdomain);
nl80211_send_reg_change_event(last_request);
reg_set_request_processed();
mutex_unlock(®_mutex);
return r;
}
#ifdef CONFIG_HOTPLUG
int reg_device_uevent(struct device *dev, struct kobj_uevent_env *env)
{
if (last_request && !last_request->processed) {
if (add_uevent_var(env, "COUNTRY=%c%c",
last_request->alpha2[0],
last_request->alpha2[1]))
return -ENOMEM;
}
return 0;
}
#else
int reg_device_uevent(struct device *dev, struct kobj_uevent_env *env)
{
return -ENODEV;
}
#endif /* CONFIG_HOTPLUG */
/* Caller must hold cfg80211_mutex */
void reg_device_remove(struct wiphy *wiphy)
{
struct wiphy *request_wiphy = NULL;
assert_cfg80211_lock();
mutex_lock(®_mutex);
kfree(wiphy->regd);
if (last_request)
request_wiphy = wiphy_idx_to_wiphy(last_request->wiphy_idx);
if (!request_wiphy || request_wiphy != wiphy)
goto out;
last_request->wiphy_idx = WIPHY_IDX_STALE;
last_request->country_ie_env = ENVIRON_ANY;
out:
mutex_unlock(®_mutex);
}
static void reg_timeout_work(struct work_struct *work)
{
REG_DBG_PRINT("Timeout while waiting for CRDA to reply, "
"restoring regulatory settings");
restore_regulatory_settings(true);
}
int __init regulatory_init(void)
{
int err = 0;
reg_pdev = platform_device_register_simple("regulatory", 0, NULL, 0);
if (IS_ERR(reg_pdev))
return PTR_ERR(reg_pdev);
reg_pdev->dev.type = ®_device_type;
spin_lock_init(®_requests_lock);
spin_lock_init(®_pending_beacons_lock);
reg_regdb_size_check();
cfg80211_regdomain = cfg80211_world_regdom;
user_alpha2[0] = '9';
user_alpha2[1] = '7';
/* We always try to get an update for the static regdomain */
err = regulatory_hint_core(cfg80211_regdomain->alpha2);
if (err) {
if (err == -ENOMEM)
return err;
/*
* N.B. kobject_uevent_env() can fail mainly for when we're out
* memory which is handled and propagated appropriately above
* but it can also fail during a netlink_broadcast() or during
* early boot for call_usermodehelper(). For now treat these
* errors as non-fatal.
*/
pr_err("kobject_uevent_env() was unable to call CRDA during init\n");
#ifdef CONFIG_CFG80211_REG_DEBUG
/* We want to find out exactly why when debugging */
WARN_ON(err);
#endif
}
/*
* Finally, if the user set the module parameter treat it
* as a user hint.
*/
if (!is_world_regdom(ieee80211_regdom))
regulatory_hint_user(ieee80211_regdom);
return 0;
}
void /* __init_or_exit */ regulatory_exit(void)
{
struct regulatory_request *reg_request, *tmp;
struct reg_beacon *reg_beacon, *btmp;
cancel_work_sync(®_work);
cancel_delayed_work_sync(®_timeout);
mutex_lock(&cfg80211_mutex);
mutex_lock(®_mutex);
reset_regdomains(true);
dev_set_uevent_suppress(®_pdev->dev, true);
platform_device_unregister(reg_pdev);
spin_lock_bh(®_pending_beacons_lock);
if (!list_empty(®_pending_beacons)) {
list_for_each_entry_safe(reg_beacon, btmp,
®_pending_beacons, list) {
list_del(®_beacon->list);
kfree(reg_beacon);
}
}
spin_unlock_bh(®_pending_beacons_lock);
if (!list_empty(®_beacon_list)) {
list_for_each_entry_safe(reg_beacon, btmp,
®_beacon_list, list) {
list_del(®_beacon->list);
kfree(reg_beacon);
}
}
spin_lock(®_requests_lock);
if (!list_empty(®_requests_list)) {
list_for_each_entry_safe(reg_request, tmp,
®_requests_list, list) {
list_del(®_request->list);
kfree(reg_request);
}
}
spin_unlock(®_requests_lock);
mutex_unlock(®_mutex);
mutex_unlock(&cfg80211_mutex);
}
| gpl-2.0 |
HinTak/linux | tools/testing/selftests/bpf/prog_tests/cpu_mask.c | 552 | 1876 | // SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include <bpf/btf.h>
#include "bpf/libbpf_internal.h"
static int duration = 0;
static void validate_mask(int case_nr, const char *exp, bool *mask, int n)
{
int i;
for (i = 0; exp[i]; i++) {
if (exp[i] == '1') {
if (CHECK(i + 1 > n, "mask_short",
"case #%d: mask too short, got n=%d, need at least %d\n",
case_nr, n, i + 1))
return;
CHECK(!mask[i], "cpu_not_set",
"case #%d: mask differs, expected cpu#%d SET\n",
case_nr, i);
} else {
CHECK(i < n && mask[i], "cpu_set",
"case #%d: mask differs, expected cpu#%d UNSET\n",
case_nr, i);
}
}
CHECK(i < n, "mask_long",
"case #%d: mask too long, got n=%d, expected at most %d\n",
case_nr, n, i);
}
static struct {
const char *cpu_mask;
const char *expect;
bool fails;
} test_cases[] = {
{ "0\n", "1", false },
{ "0,2\n", "101", false },
{ "0-2\n", "111", false },
{ "0-2,3-4\n", "11111", false },
{ "0", "1", false },
{ "0-2", "111", false },
{ "0,2", "101", false },
{ "0,1-3", "1111", false },
{ "0,1,2,3", "1111", false },
{ "0,2-3,5", "101101", false },
{ "3-3", "0001", false },
{ "2-4,6,9-10", "00111010011", false },
/* failure cases */
{ "", "", true },
{ "0-", "", true },
{ "0 ", "", true },
{ "0_1", "", true },
{ "1-0", "", true },
{ "-1", "", true },
};
void test_cpu_mask()
{
int i, err, n;
bool *mask;
for (i = 0; i < ARRAY_SIZE(test_cases); i++) {
mask = NULL;
err = parse_cpu_mask_str(test_cases[i].cpu_mask, &mask, &n);
if (test_cases[i].fails) {
CHECK(!err, "should_fail",
"case #%d: parsing should fail!\n", i + 1);
} else {
if (CHECK(err, "parse_err",
"case #%d: cpu mask parsing failed: %d\n",
i + 1, err))
continue;
validate_mask(i + 1, test_cases[i].expect, mask, n);
}
free(mask);
}
}
| gpl-2.0 |
dbussink/linux | drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv50.c | 552 | 2482 | /*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "nv04.h"
#include <core/device.h>
const struct nvkm_mc_intr
nv50_mc_intr[] = {
{ 0x04000000, NVDEV_ENGINE_DISP }, /* DISP before FIFO, so pageflip-timestamping works! */
{ 0x00000001, NVDEV_ENGINE_MPEG },
{ 0x00000100, NVDEV_ENGINE_FIFO },
{ 0x00001000, NVDEV_ENGINE_GR },
{ 0x00004000, NVDEV_ENGINE_CIPHER }, /* NV84- */
{ 0x00008000, NVDEV_ENGINE_BSP }, /* NV84- */
{ 0x00020000, NVDEV_ENGINE_VP }, /* NV84- */
{ 0x00100000, NVDEV_SUBDEV_TIMER },
{ 0x00200000, NVDEV_SUBDEV_GPIO }, /* PMGR->GPIO */
{ 0x00200000, NVDEV_SUBDEV_I2C }, /* PMGR->I2C/AUX */
{ 0x10000000, NVDEV_SUBDEV_BUS },
{ 0x80000000, NVDEV_ENGINE_SW },
{ 0x0002d101, NVDEV_SUBDEV_FB },
{},
};
static void
nv50_mc_msi_rearm(struct nvkm_mc *pmc)
{
struct nvkm_device *device = nv_device(pmc);
pci_write_config_byte(device->pdev, 0x68, 0xff);
}
int
nv50_mc_init(struct nvkm_object *object)
{
struct nv04_mc_priv *priv = (void *)object;
nv_wr32(priv, 0x000200, 0xffffffff); /* everything on */
return nvkm_mc_init(&priv->base);
}
struct nvkm_oclass *
nv50_mc_oclass = &(struct nvkm_mc_oclass) {
.base.handle = NV_SUBDEV(MC, 0x50),
.base.ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv04_mc_ctor,
.dtor = _nvkm_mc_dtor,
.init = nv50_mc_init,
.fini = _nvkm_mc_fini,
},
.intr = nv50_mc_intr,
.msi_rearm = nv50_mc_msi_rearm,
}.base;
| gpl-2.0 |
zeferot/test | drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c | 552 | 4362 | /*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "nv04.h"
#include <core/ramht.h>
#include <engine/gr/nv40.h>
/******************************************************************************
* instmem subdev implementation
*****************************************************************************/
static u32
nv40_instmem_rd32(struct nvkm_object *object, u64 addr)
{
struct nv04_instmem_priv *priv = (void *)object;
return ioread32_native(priv->iomem + addr);
}
static void
nv40_instmem_wr32(struct nvkm_object *object, u64 addr, u32 data)
{
struct nv04_instmem_priv *priv = (void *)object;
iowrite32_native(data, priv->iomem + addr);
}
static int
nv40_instmem_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject)
{
struct nvkm_device *device = nv_device(parent);
struct nv04_instmem_priv *priv;
int ret, bar, vs;
ret = nvkm_instmem_create(parent, engine, oclass, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
/* map bar */
if (nv_device_resource_len(device, 2))
bar = 2;
else
bar = 3;
priv->iomem = ioremap(nv_device_resource_start(device, bar),
nv_device_resource_len(device, bar));
if (!priv->iomem) {
nv_error(priv, "unable to map PRAMIN BAR\n");
return -EFAULT;
}
/* PRAMIN aperture maps over the end of vram, reserve enough space
* to fit graphics contexts for every channel, the magics come
* from engine/gr/nv40.c
*/
vs = hweight8((nv_rd32(priv, 0x001540) & 0x0000ff00) >> 8);
if (device->chipset == 0x40) priv->base.reserved = 0x6aa0 * vs;
else if (device->chipset < 0x43) priv->base.reserved = 0x4f00 * vs;
else if (nv44_gr_class(priv)) priv->base.reserved = 0x4980 * vs;
else priv->base.reserved = 0x4a40 * vs;
priv->base.reserved += 16 * 1024;
priv->base.reserved *= 32; /* per-channel */
priv->base.reserved += 512 * 1024; /* pci(e)gart table */
priv->base.reserved += 512 * 1024; /* object storage */
priv->base.reserved = round_up(priv->base.reserved, 4096);
ret = nvkm_mm_init(&priv->heap, 0, priv->base.reserved, 1);
if (ret)
return ret;
/* 0x00000-0x10000: reserve for probable vbios image */
ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x10000, 0, 0,
&priv->vbios);
if (ret)
return ret;
/* 0x10000-0x18000: reserve for RAMHT */
ret = nvkm_ramht_new(nv_object(priv), NULL, 0x08000, 0, &priv->ramht);
if (ret)
return ret;
/* 0x18000-0x18200: reserve for RAMRO
* 0x18200-0x20000: padding
*/
ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x08000, 0, 0,
&priv->ramro);
if (ret)
return ret;
/* 0x20000-0x21000: reserve for RAMFC
* 0x21000-0x40000: padding and some unknown crap
*/
ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x20000, 0,
NVOBJ_FLAG_ZERO_ALLOC, &priv->ramfc);
if (ret)
return ret;
return 0;
}
struct nvkm_oclass *
nv40_instmem_oclass = &(struct nvkm_instmem_impl) {
.base.handle = NV_SUBDEV(INSTMEM, 0x40),
.base.ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv40_instmem_ctor,
.dtor = nv04_instmem_dtor,
.init = _nvkm_instmem_init,
.fini = _nvkm_instmem_fini,
.rd32 = nv40_instmem_rd32,
.wr32 = nv40_instmem_wr32,
},
.instobj = &nv04_instobj_oclass.base,
}.base;
| gpl-2.0 |
remarkableno/linux | drivers/gpu/drm/nouveau/nvkm/subdev/devinit/g84.c | 552 | 2249 | /*
* Copyright 2013 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "nv50.h"
#include <subdev/bios.h>
#include <subdev/bios/init.h>
static u64
g84_devinit_disable(struct nvkm_devinit *devinit)
{
struct nv50_devinit_priv *priv = (void *)devinit;
u32 r001540 = nv_rd32(priv, 0x001540);
u32 r00154c = nv_rd32(priv, 0x00154c);
u64 disable = 0ULL;
if (!(r001540 & 0x40000000)) {
disable |= (1ULL << NVDEV_ENGINE_MPEG);
disable |= (1ULL << NVDEV_ENGINE_VP);
disable |= (1ULL << NVDEV_ENGINE_BSP);
disable |= (1ULL << NVDEV_ENGINE_CIPHER);
}
if (!(r00154c & 0x00000004))
disable |= (1ULL << NVDEV_ENGINE_DISP);
if (!(r00154c & 0x00000020))
disable |= (1ULL << NVDEV_ENGINE_BSP);
if (!(r00154c & 0x00000040))
disable |= (1ULL << NVDEV_ENGINE_CIPHER);
return disable;
}
struct nvkm_oclass *
g84_devinit_oclass = &(struct nvkm_devinit_impl) {
.base.handle = NV_SUBDEV(DEVINIT, 0x84),
.base.ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv50_devinit_ctor,
.dtor = _nvkm_devinit_dtor,
.init = nv50_devinit_init,
.fini = _nvkm_devinit_fini,
},
.pll_set = nv50_devinit_pll_set,
.disable = g84_devinit_disable,
.post = nvbios_init,
}.base;
| gpl-2.0 |
invisiblek/linux-2.6.32.26-inc | arch/arm/common/vic.c | 552 | 10445 | /*
* linux/arch/arm/common/vic.c
*
* Copyright (C) 1999 - 2003 ARM Limited
* Copyright (C) 2000 Deep Blue Solutions Ltd
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/init.h>
#include <linux/list.h>
#include <linux/io.h>
#include <linux/sysdev.h>
#include <linux/device.h>
#include <linux/amba/bus.h>
#include <asm/mach/irq.h>
#include <asm/hardware/vic.h>
static void vic_ack_irq(unsigned int irq)
{
void __iomem *base = get_irq_chip_data(irq);
irq &= 31;
writel(1 << irq, base + VIC_INT_ENABLE_CLEAR);
/* moreover, clear the soft-triggered, in case it was the reason */
writel(1 << irq, base + VIC_INT_SOFT_CLEAR);
}
static void vic_mask_irq(unsigned int irq)
{
void __iomem *base = get_irq_chip_data(irq);
irq &= 31;
writel(1 << irq, base + VIC_INT_ENABLE_CLEAR);
}
static void vic_unmask_irq(unsigned int irq)
{
void __iomem *base = get_irq_chip_data(irq);
irq &= 31;
writel(1 << irq, base + VIC_INT_ENABLE);
}
/**
* vic_init2 - common initialisation code
* @base: Base of the VIC.
*
* Common initialisation code for registeration
* and resume.
*/
static void vic_init2(void __iomem *base)
{
int i;
for (i = 0; i < 16; i++) {
void __iomem *reg = base + VIC_VECT_CNTL0 + (i * 4);
writel(VIC_VECT_CNTL_ENABLE | i, reg);
}
writel(32, base + VIC_PL190_DEF_VECT_ADDR);
}
#if defined(CONFIG_PM)
/**
* struct vic_device - VIC PM device
* @sysdev: The system device which is registered.
* @irq: The IRQ number for the base of the VIC.
* @base: The register base for the VIC.
* @resume_sources: A bitmask of interrupts for resume.
* @resume_irqs: The IRQs enabled for resume.
* @int_select: Save for VIC_INT_SELECT.
* @int_enable: Save for VIC_INT_ENABLE.
* @soft_int: Save for VIC_INT_SOFT.
* @protect: Save for VIC_PROTECT.
*/
struct vic_device {
struct sys_device sysdev;
void __iomem *base;
int irq;
u32 resume_sources;
u32 resume_irqs;
u32 int_select;
u32 int_enable;
u32 soft_int;
u32 protect;
};
/* we cannot allocate memory when VICs are initially registered */
static struct vic_device vic_devices[CONFIG_ARM_VIC_NR];
static inline struct vic_device *to_vic(struct sys_device *sys)
{
return container_of(sys, struct vic_device, sysdev);
}
static int vic_id;
static int vic_class_resume(struct sys_device *dev)
{
struct vic_device *vic = to_vic(dev);
void __iomem *base = vic->base;
printk(KERN_DEBUG "%s: resuming vic at %p\n", __func__, base);
/* re-initialise static settings */
vic_init2(base);
writel(vic->int_select, base + VIC_INT_SELECT);
writel(vic->protect, base + VIC_PROTECT);
/* set the enabled ints and then clear the non-enabled */
writel(vic->int_enable, base + VIC_INT_ENABLE);
writel(~vic->int_enable, base + VIC_INT_ENABLE_CLEAR);
/* and the same for the soft-int register */
writel(vic->soft_int, base + VIC_INT_SOFT);
writel(~vic->soft_int, base + VIC_INT_SOFT_CLEAR);
return 0;
}
static int vic_class_suspend(struct sys_device *dev, pm_message_t state)
{
struct vic_device *vic = to_vic(dev);
void __iomem *base = vic->base;
printk(KERN_DEBUG "%s: suspending vic at %p\n", __func__, base);
vic->int_select = readl(base + VIC_INT_SELECT);
vic->int_enable = readl(base + VIC_INT_ENABLE);
vic->soft_int = readl(base + VIC_INT_SOFT);
vic->protect = readl(base + VIC_PROTECT);
/* set the interrupts (if any) that are used for
* resuming the system */
writel(vic->resume_irqs, base + VIC_INT_ENABLE);
writel(~vic->resume_irqs, base + VIC_INT_ENABLE_CLEAR);
return 0;
}
struct sysdev_class vic_class = {
.name = "vic",
.suspend = vic_class_suspend,
.resume = vic_class_resume,
};
/**
* vic_pm_register - Register a VIC for later power management control
* @base: The base address of the VIC.
* @irq: The base IRQ for the VIC.
* @resume_sources: bitmask of interrupts allowed for resume sources.
*
* Register the VIC with the system device tree so that it can be notified
* of suspend and resume requests and ensure that the correct actions are
* taken to re-instate the settings on resume.
*/
static void __init vic_pm_register(void __iomem *base, unsigned int irq, u32 resume_sources)
{
struct vic_device *v;
if (vic_id >= ARRAY_SIZE(vic_devices))
printk(KERN_ERR "%s: too few VICs, increase CONFIG_ARM_VIC_NR\n", __func__);
else {
v = &vic_devices[vic_id];
v->base = base;
v->resume_sources = resume_sources;
v->irq = irq;
vic_id++;
}
}
/**
* vic_pm_init - initicall to register VIC pm
*
* This is called via late_initcall() to register
* the resources for the VICs due to the early
* nature of the VIC's registration.
*/
static int __init vic_pm_init(void)
{
struct vic_device *dev = vic_devices;
int err;
int id;
if (vic_id == 0)
return 0;
err = sysdev_class_register(&vic_class);
if (err) {
printk(KERN_ERR "%s: cannot register class\n", __func__);
return err;
}
for (id = 0; id < vic_id; id++, dev++) {
dev->sysdev.id = id;
dev->sysdev.cls = &vic_class;
err = sysdev_register(&dev->sysdev);
if (err) {
printk(KERN_ERR "%s: failed to register device\n",
__func__);
return err;
}
}
return 0;
}
late_initcall(vic_pm_init);
static struct vic_device *vic_from_irq(unsigned int irq)
{
struct vic_device *v = vic_devices;
unsigned int base_irq = irq & ~31;
int id;
for (id = 0; id < vic_id; id++, v++) {
if (v->irq == base_irq)
return v;
}
return NULL;
}
static int vic_set_wake(unsigned int irq, unsigned int on)
{
struct vic_device *v = vic_from_irq(irq);
unsigned int off = irq & 31;
u32 bit = 1 << off;
if (!v)
return -EINVAL;
if (!(bit & v->resume_sources))
return -EINVAL;
if (on)
v->resume_irqs |= bit;
else
v->resume_irqs &= ~bit;
return 0;
}
#else
static inline void vic_pm_register(void __iomem *base, unsigned int irq, u32 arg1) { }
#define vic_set_wake NULL
#endif /* CONFIG_PM */
static struct irq_chip vic_chip = {
.name = "VIC",
.ack = vic_ack_irq,
.mask = vic_mask_irq,
.unmask = vic_unmask_irq,
.set_wake = vic_set_wake,
};
/* The PL190 cell from ARM has been modified by ST, so handle both here */
static void vik_init_st(void __iomem *base, unsigned int irq_start,
u32 vic_sources);
/**
* vic_init - initialise a vectored interrupt controller
* @base: iomem base address
* @irq_start: starting interrupt number, must be muliple of 32
* @vic_sources: bitmask of interrupt sources to allow
* @resume_sources: bitmask of interrupt sources to allow for resume
*/
void __init vic_init(void __iomem *base, unsigned int irq_start,
u32 vic_sources, u32 resume_sources)
{
unsigned int i;
u32 cellid = 0;
enum amba_vendor vendor;
/* Identify which VIC cell this one is, by reading the ID */
for (i = 0; i < 4; i++) {
u32 addr = ((u32)base & PAGE_MASK) + 0xfe0 + (i * 4);
cellid |= (readl(addr) & 0xff) << (8 * i);
}
vendor = (cellid >> 12) & 0xff;
printk(KERN_INFO "VIC @%p: id 0x%08x, vendor 0x%02x\n",
base, cellid, vendor);
switch(vendor) {
case AMBA_VENDOR_ST:
vik_init_st(base, irq_start, vic_sources);
return;
default:
printk(KERN_WARNING "VIC: unknown vendor, continuing anyways\n");
/* fall through */
case AMBA_VENDOR_ARM:
break;
}
/* Disable all interrupts initially. */
writel(0, base + VIC_INT_SELECT);
writel(0, base + VIC_INT_ENABLE);
writel(~0, base + VIC_INT_ENABLE_CLEAR);
writel(0, base + VIC_IRQ_STATUS);
writel(0, base + VIC_ITCR);
writel(~0, base + VIC_INT_SOFT_CLEAR);
/*
* Make sure we clear all existing interrupts
*/
writel(0, base + VIC_PL190_VECT_ADDR);
for (i = 0; i < 19; i++) {
unsigned int value;
value = readl(base + VIC_PL190_VECT_ADDR);
writel(value, base + VIC_PL190_VECT_ADDR);
}
vic_init2(base);
for (i = 0; i < 32; i++) {
if (vic_sources & (1 << i)) {
unsigned int irq = irq_start + i;
set_irq_chip(irq, &vic_chip);
set_irq_chip_data(irq, base);
set_irq_handler(irq, handle_level_irq);
set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
}
}
vic_pm_register(base, irq_start, resume_sources);
}
/*
* The PL190 cell from ARM has been modified by ST to handle 64 interrupts.
* The original cell has 32 interrupts, while the modified one has 64,
* replocating two blocks 0x00..0x1f in 0x20..0x3f. In that case
* the probe function is called twice, with base set to offset 000
* and 020 within the page. We call this "second block".
*/
static void __init vik_init_st(void __iomem *base, unsigned int irq_start,
u32 vic_sources)
{
unsigned int i;
int vic_2nd_block = ((unsigned long)base & ~PAGE_MASK) != 0;
/* Disable all interrupts initially. */
writel(0, base + VIC_INT_SELECT);
writel(0, base + VIC_INT_ENABLE);
writel(~0, base + VIC_INT_ENABLE_CLEAR);
writel(0, base + VIC_IRQ_STATUS);
writel(0, base + VIC_ITCR);
writel(~0, base + VIC_INT_SOFT_CLEAR);
/*
* Make sure we clear all existing interrupts. The vector registers
* in this cell are after the second block of general registers,
* so we can address them using standard offsets, but only from
* the second base address, which is 0x20 in the page
*/
if (vic_2nd_block) {
writel(0, base + VIC_PL190_VECT_ADDR);
for (i = 0; i < 19; i++) {
unsigned int value;
value = readl(base + VIC_PL190_VECT_ADDR);
writel(value, base + VIC_PL190_VECT_ADDR);
}
/* ST has 16 vectors as well, but we don't enable them by now */
for (i = 0; i < 16; i++) {
void __iomem *reg = base + VIC_VECT_CNTL0 + (i * 4);
writel(0, reg);
}
writel(32, base + VIC_PL190_DEF_VECT_ADDR);
}
for (i = 0; i < 32; i++) {
if (vic_sources & (1 << i)) {
unsigned int irq = irq_start + i;
set_irq_chip(irq, &vic_chip);
set_irq_chip_data(irq, base);
set_irq_handler(irq, handle_level_irq);
set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
}
}
}
| gpl-2.0 |
tifler/linux-mainline | drivers/video/fbdev/core/syscopyarea.c | 2088 | 8730 | /*
* Generic Bit Block Transfer for frame buffers located in system RAM with
* packed pixels of any depth.
*
* Based almost entirely from cfbcopyarea.c (which is based almost entirely
* on Geert Uytterhoeven's copyarea routine)
*
* Copyright (C) 2007 Antonino Daplas <adaplas@pol.net>
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive for
* more details.
*
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/fb.h>
#include <asm/types.h>
#include <asm/io.h>
#include "fb_draw.h"
/*
* Generic bitwise copy algorithm
*/
static void
bitcpy(struct fb_info *p, unsigned long *dst, unsigned dst_idx,
const unsigned long *src, unsigned src_idx, int bits, unsigned n)
{
unsigned long first, last;
int const shift = dst_idx-src_idx;
int left, right;
first = FB_SHIFT_HIGH(p, ~0UL, dst_idx);
last = ~(FB_SHIFT_HIGH(p, ~0UL, (dst_idx+n) % bits));
if (!shift) {
/* Same alignment for source and dest */
if (dst_idx+n <= bits) {
/* Single word */
if (last)
first &= last;
*dst = comp(*src, *dst, first);
} else {
/* Multiple destination words */
/* Leading bits */
if (first != ~0UL) {
*dst = comp(*src, *dst, first);
dst++;
src++;
n -= bits - dst_idx;
}
/* Main chunk */
n /= bits;
while (n >= 8) {
*dst++ = *src++;
*dst++ = *src++;
*dst++ = *src++;
*dst++ = *src++;
*dst++ = *src++;
*dst++ = *src++;
*dst++ = *src++;
*dst++ = *src++;
n -= 8;
}
while (n--)
*dst++ = *src++;
/* Trailing bits */
if (last)
*dst = comp(*src, *dst, last);
}
} else {
unsigned long d0, d1;
int m;
/* Different alignment for source and dest */
right = shift & (bits - 1);
left = -shift & (bits - 1);
if (dst_idx+n <= bits) {
/* Single destination word */
if (last)
first &= last;
if (shift > 0) {
/* Single source word */
*dst = comp(*src << left, *dst, first);
} else if (src_idx+n <= bits) {
/* Single source word */
*dst = comp(*src >> right, *dst, first);
} else {
/* 2 source words */
d0 = *src++;
d1 = *src;
*dst = comp(d0 >> right | d1 << left, *dst,
first);
}
} else {
/* Multiple destination words */
/** We must always remember the last value read,
because in case SRC and DST overlap bitwise (e.g.
when moving just one pixel in 1bpp), we always
collect one full long for DST and that might
overlap with the current long from SRC. We store
this value in 'd0'. */
d0 = *src++;
/* Leading bits */
if (shift > 0) {
/* Single source word */
*dst = comp(d0 << left, *dst, first);
dst++;
n -= bits - dst_idx;
} else {
/* 2 source words */
d1 = *src++;
*dst = comp(d0 >> right | d1 << left, *dst,
first);
d0 = d1;
dst++;
n -= bits - dst_idx;
}
/* Main chunk */
m = n % bits;
n /= bits;
while (n >= 4) {
d1 = *src++;
*dst++ = d0 >> right | d1 << left;
d0 = d1;
d1 = *src++;
*dst++ = d0 >> right | d1 << left;
d0 = d1;
d1 = *src++;
*dst++ = d0 >> right | d1 << left;
d0 = d1;
d1 = *src++;
*dst++ = d0 >> right | d1 << left;
d0 = d1;
n -= 4;
}
while (n--) {
d1 = *src++;
*dst++ = d0 >> right | d1 << left;
d0 = d1;
}
/* Trailing bits */
if (m) {
if (m <= bits - right) {
/* Single source word */
d0 >>= right;
} else {
/* 2 source words */
d1 = *src;
d0 = d0 >> right | d1 << left;
}
*dst = comp(d0, *dst, last);
}
}
}
}
/*
* Generic bitwise copy algorithm, operating backward
*/
static void
bitcpy_rev(struct fb_info *p, unsigned long *dst, unsigned dst_idx,
const unsigned long *src, unsigned src_idx, unsigned bits,
unsigned n)
{
unsigned long first, last;
int shift;
dst += (dst_idx + n - 1) / bits;
src += (src_idx + n - 1) / bits;
dst_idx = (dst_idx + n - 1) % bits;
src_idx = (src_idx + n - 1) % bits;
shift = dst_idx-src_idx;
first = ~FB_SHIFT_HIGH(p, ~0UL, (dst_idx + 1) % bits);
last = FB_SHIFT_HIGH(p, ~0UL, (bits + dst_idx + 1 - n) % bits);
if (!shift) {
/* Same alignment for source and dest */
if ((unsigned long)dst_idx+1 >= n) {
/* Single word */
if (first)
last &= first;
*dst = comp(*src, *dst, last);
} else {
/* Multiple destination words */
/* Leading bits */
if (first) {
*dst = comp(*src, *dst, first);
dst--;
src--;
n -= dst_idx+1;
}
/* Main chunk */
n /= bits;
while (n >= 8) {
*dst-- = *src--;
*dst-- = *src--;
*dst-- = *src--;
*dst-- = *src--;
*dst-- = *src--;
*dst-- = *src--;
*dst-- = *src--;
*dst-- = *src--;
n -= 8;
}
while (n--)
*dst-- = *src--;
/* Trailing bits */
if (last != -1UL)
*dst = comp(*src, *dst, last);
}
} else {
/* Different alignment for source and dest */
int const left = shift & (bits-1);
int const right = -shift & (bits-1);
if ((unsigned long)dst_idx+1 >= n) {
/* Single destination word */
if (first)
last &= first;
if (shift < 0) {
/* Single source word */
*dst = comp(*src >> right, *dst, last);
} else if (1+(unsigned long)src_idx >= n) {
/* Single source word */
*dst = comp(*src << left, *dst, last);
} else {
/* 2 source words */
*dst = comp(*src << left | *(src-1) >> right,
*dst, last);
}
} else {
/* Multiple destination words */
/** We must always remember the last value read,
because in case SRC and DST overlap bitwise (e.g.
when moving just one pixel in 1bpp), we always
collect one full long for DST and that might
overlap with the current long from SRC. We store
this value in 'd0'. */
unsigned long d0, d1;
int m;
d0 = *src--;
/* Leading bits */
if (shift < 0) {
/* Single source word */
d1 = d0;
d0 >>= right;
} else {
/* 2 source words */
d1 = *src--;
d0 = d0 << left | d1 >> right;
}
if (!first)
*dst = d0;
else
*dst = comp(d0, *dst, first);
d0 = d1;
dst--;
n -= dst_idx+1;
/* Main chunk */
m = n % bits;
n /= bits;
while (n >= 4) {
d1 = *src--;
*dst-- = d0 << left | d1 >> right;
d0 = d1;
d1 = *src--;
*dst-- = d0 << left | d1 >> right;
d0 = d1;
d1 = *src--;
*dst-- = d0 << left | d1 >> right;
d0 = d1;
d1 = *src--;
*dst-- = d0 << left | d1 >> right;
d0 = d1;
n -= 4;
}
while (n--) {
d1 = *src--;
*dst-- = d0 << left | d1 >> right;
d0 = d1;
}
/* Trailing bits */
if (m) {
if (m <= bits - left) {
/* Single source word */
d0 <<= left;
} else {
/* 2 source words */
d1 = *src;
d0 = d0 << left | d1 >> right;
}
*dst = comp(d0, *dst, last);
}
}
}
}
void sys_copyarea(struct fb_info *p, const struct fb_copyarea *area)
{
u32 dx = area->dx, dy = area->dy, sx = area->sx, sy = area->sy;
u32 height = area->height, width = area->width;
unsigned long const bits_per_line = p->fix.line_length*8u;
unsigned long *base = NULL;
int bits = BITS_PER_LONG, bytes = bits >> 3;
unsigned dst_idx = 0, src_idx = 0, rev_copy = 0;
if (p->state != FBINFO_STATE_RUNNING)
return;
/* if the beginning of the target area might overlap with the end of
the source area, be have to copy the area reverse. */
if ((dy == sy && dx > sx) || (dy > sy)) {
dy += height;
sy += height;
rev_copy = 1;
}
/* split the base of the framebuffer into a long-aligned address and
the index of the first bit */
base = (unsigned long *)((unsigned long)p->screen_base & ~(bytes-1));
dst_idx = src_idx = 8*((unsigned long)p->screen_base & (bytes-1));
/* add offset of source and target area */
dst_idx += dy*bits_per_line + dx*p->var.bits_per_pixel;
src_idx += sy*bits_per_line + sx*p->var.bits_per_pixel;
if (p->fbops->fb_sync)
p->fbops->fb_sync(p);
if (rev_copy) {
while (height--) {
dst_idx -= bits_per_line;
src_idx -= bits_per_line;
bitcpy_rev(p, base + (dst_idx / bits), dst_idx % bits,
base + (src_idx / bits), src_idx % bits, bits,
width*p->var.bits_per_pixel);
}
} else {
while (height--) {
bitcpy(p, base + (dst_idx / bits), dst_idx % bits,
base + (src_idx / bits), src_idx % bits, bits,
width*p->var.bits_per_pixel);
dst_idx += bits_per_line;
src_idx += bits_per_line;
}
}
}
EXPORT_SYMBOL(sys_copyarea);
MODULE_AUTHOR("Antonino Daplas <adaplas@pol.net>");
MODULE_DESCRIPTION("Generic copyarea (sys-to-sys)");
MODULE_LICENSE("GPL");
| gpl-2.0 |
jfdsmabalot/kernel_moto-g | drivers/power/battery_current_limit.c | 2088 | 13269 | /* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#define pr_fmt(fmt) "%s: " fmt, __func__
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/platform_device.h>
#include <linux/errno.h>
#include <linux/device.h>
#include <linux/power_supply.h>
#include <linux/delay.h>
#include <linux/slab.h>
#define BCL_DEV_NAME "battery_current_limit"
#define BCL_NAME_LENGTH 20
/*
* Default BCL poll interval 1000 msec
*/
#define BCL_POLL_INTERVAL 1000
/*
* Mininum BCL poll interval 10 msec
*/
#define MIN_BCL_POLL_INTERVAL 10
#define BATTERY_VOLTAGE_MIN 3400
static const char bcl_type[] = "bcl";
/*
* Battery Current Limit Enable or Not
*/
enum bcl_device_mode {
BCL_DEVICE_DISABLED = 0,
BCL_DEVICE_ENABLED,
};
/*
* Battery Current Limit Iavail Threshold Mode set
*/
enum bcl_iavail_threshold_mode {
BCL_IAVAIL_THRESHOLD_DISABLED = 0,
BCL_IAVAIL_THRESHOLD_ENABLED,
};
/*
* Battery Current Limit Iavail Threshold Mode
*/
enum bcl_iavail_threshold_type {
BCL_IAVAIL_LOW_THRESHOLD_TYPE = 0,
BCL_IAVAIL_HIGH_THRESHOLD_TYPE,
BCL_IAVAIL_THRESHOLD_TYPE_MAX,
};
/**
* BCL control block
*
*/
struct bcl_context {
/* BCL device */
struct device *dev;
/* BCL related config parameter */
/* BCL mode enable or not */
enum bcl_device_mode bcl_mode;
/* BCL Iavail Threshold Activate or Not */
enum bcl_iavail_threshold_mode
bcl_threshold_mode[BCL_IAVAIL_THRESHOLD_TYPE_MAX];
/* BCL Iavail Threshold value in milli Amp */
int bcl_threshold_value_ma[BCL_IAVAIL_THRESHOLD_TYPE_MAX];
/* BCL Type */
char bcl_type[BCL_NAME_LENGTH];
/* BCL poll in msec */
int bcl_poll_interval_msec;
/* BCL realtime value based on poll */
/* BCL realtime vbat in mV*/
int bcl_vbat_mv;
/* BCL realtime rbat in mOhms*/
int bcl_rbat_mohm;
/*BCL realtime iavail in milli Amp*/
int bcl_iavail;
/*BCL vbatt min in mV*/
int bcl_vbat_min;
/* BCL period poll delay work structure */
struct delayed_work bcl_iavail_work;
};
static struct bcl_context *gbcl;
static int bcl_get_battery_voltage(int *vbatt_mv)
{
static struct power_supply *psy;
union power_supply_propval ret = {0,};
if (psy == NULL) {
psy = power_supply_get_by_name("battery");
if (psy == NULL) {
pr_err("failed to get ps battery\n");
return -EINVAL;
}
}
if (psy->get_property(psy, POWER_SUPPLY_PROP_VOLTAGE_NOW, &ret))
return -EINVAL;
if (ret.intval <= 0)
return -EINVAL;
*vbatt_mv = ret.intval / 1000;
return 0;
}
static int bcl_get_resistance(int *rbatt_mohm)
{
static struct power_supply *psy;
union power_supply_propval ret = {0,};
if (psy == NULL) {
psy = power_supply_get_by_name("bms");
if (psy == NULL) {
pr_err("failed to get ps bms\n");
return -EINVAL;
}
}
if (psy->get_property(psy, POWER_SUPPLY_PROP_RESISTANCE, &ret))
return -EINVAL;
if (ret.intval <= 0)
return -EINVAL;
*rbatt_mohm = ret.intval / 1000;
return 0;
}
/*
* BCL iavail calculation and trigger notification to user space
* if iavail cross threshold
*/
static void bcl_calculate_iavail_trigger(void)
{
int iavail_ma = 0;
int vbatt_mv;
int rbatt_mohm;
bool threshold_cross = false;
if (!gbcl) {
pr_err("called before initialization\n");
return;
}
if (bcl_get_battery_voltage(&vbatt_mv))
return;
if (bcl_get_resistance(&rbatt_mohm))
return;
iavail_ma = (vbatt_mv - gbcl->bcl_vbat_min) * 1000 / rbatt_mohm;
gbcl->bcl_rbat_mohm = rbatt_mohm;
gbcl->bcl_vbat_mv = vbatt_mv;
gbcl->bcl_iavail = iavail_ma;
pr_debug("iavail %d, vbatt %d rbatt %d\n", iavail_ma, vbatt_mv,
rbatt_mohm);
if ((gbcl->bcl_threshold_mode[BCL_IAVAIL_HIGH_THRESHOLD_TYPE] ==
BCL_IAVAIL_THRESHOLD_ENABLED)
&& (iavail_ma >=
gbcl->bcl_threshold_value_ma[BCL_IAVAIL_HIGH_THRESHOLD_TYPE]))
threshold_cross = true;
else if ((gbcl->bcl_threshold_mode[BCL_IAVAIL_LOW_THRESHOLD_TYPE]
== BCL_IAVAIL_THRESHOLD_ENABLED)
&& (iavail_ma <=
gbcl->bcl_threshold_value_ma[BCL_IAVAIL_LOW_THRESHOLD_TYPE]))
threshold_cross = true;
if (threshold_cross)
sysfs_notify(&gbcl->dev->kobj, NULL, "type");
}
/*
* BCL iavail work
*/
static void bcl_iavail_work(struct work_struct *work)
{
struct bcl_context *bcl = container_of(work,
struct bcl_context, bcl_iavail_work.work);
if (gbcl->bcl_mode == BCL_DEVICE_ENABLED) {
bcl_calculate_iavail_trigger();
/* restart the delay work for caculating imax */
schedule_delayed_work(&bcl->bcl_iavail_work,
msecs_to_jiffies(bcl->bcl_poll_interval_msec));
}
}
/*
* Set BCL mode
*/
static void bcl_mode_set(enum bcl_device_mode mode)
{
if (!gbcl)
return;
if (gbcl->bcl_mode == mode)
return;
if (gbcl->bcl_mode == BCL_DEVICE_DISABLED
&& mode == BCL_DEVICE_ENABLED) {
gbcl->bcl_mode = mode;
bcl_iavail_work(&(gbcl->bcl_iavail_work.work));
return;
} else if (gbcl->bcl_mode == BCL_DEVICE_ENABLED
&& mode == BCL_DEVICE_DISABLED) {
gbcl->bcl_mode = mode;
cancel_delayed_work_sync(&(gbcl->bcl_iavail_work));
return;
}
return;
}
#define show_bcl(name, variable, format) \
static ssize_t \
name##_show(struct device *dev, struct device_attribute *attr, char *buf) \
{ \
if (gbcl) \
return snprintf(buf, PAGE_SIZE, format, gbcl->variable); \
else \
return -EPERM; \
}
show_bcl(type, bcl_type, "%s\n")
show_bcl(vbat, bcl_vbat_mv, "%d\n")
show_bcl(rbat, bcl_rbat_mohm, "%d\n")
show_bcl(iavail, bcl_iavail, "%d\n")
show_bcl(vbat_min, bcl_vbat_min, "%d\n");
show_bcl(poll_interval, bcl_poll_interval_msec, "%d\n")
static ssize_t
mode_show(struct device *dev, struct device_attribute *attr, char *buf)
{
if (!gbcl)
return -EPERM;
return snprintf(buf, PAGE_SIZE, "%s\n",
gbcl->bcl_mode == BCL_DEVICE_ENABLED ? "enabled"
: "disabled");
}
static ssize_t
mode_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
if (!gbcl)
return -EPERM;
if (!strncmp(buf, "enabled", 7))
bcl_mode_set(BCL_DEVICE_ENABLED);
else if (!strncmp(buf, "disabled", 8))
bcl_mode_set(BCL_DEVICE_DISABLED);
else
return -EINVAL;
return count;
}
static ssize_t
poll_interval_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
int value;
if (!gbcl)
return -EPERM;
if (!sscanf(buf, "%d", &value))
return -EINVAL;
if (value < MIN_BCL_POLL_INTERVAL)
return -EINVAL;
gbcl->bcl_poll_interval_msec = value;
return count;
}
static ssize_t vbat_min_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
int value;
int ret;
if (!gbcl)
return -EPERM;
ret = kstrtoint(buf, 10, &value);
if (ret || (value < 0)) {
pr_err("Incorrect vbatt min value\n");
return -EINVAL;
}
gbcl->bcl_vbat_min = value;
return count;
}
static ssize_t iavail_low_threshold_mode_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
if (!gbcl)
return -EPERM;
return snprintf(buf, PAGE_SIZE, "%s\n",
gbcl->bcl_threshold_mode[BCL_IAVAIL_LOW_THRESHOLD_TYPE]
== BCL_IAVAIL_THRESHOLD_ENABLED ? "enabled" : "disabled");
}
static ssize_t iavail_low_threshold_mode_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
if (!gbcl)
return -EPERM;
if (!strncmp(buf, "enabled", 7))
gbcl->bcl_threshold_mode[BCL_IAVAIL_LOW_THRESHOLD_TYPE]
= BCL_IAVAIL_THRESHOLD_ENABLED;
else if (!strncmp(buf, "disabled", 7))
gbcl->bcl_threshold_mode[BCL_IAVAIL_LOW_THRESHOLD_TYPE]
= BCL_IAVAIL_THRESHOLD_DISABLED;
else
return -EINVAL;
return count;
}
static ssize_t iavail_high_threshold_mode_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
if (!gbcl)
return -EPERM;
return snprintf(buf, PAGE_SIZE, "%s\n",
gbcl->bcl_threshold_mode[BCL_IAVAIL_HIGH_THRESHOLD_TYPE]
== BCL_IAVAIL_THRESHOLD_ENABLED ? "enabled" : "disabled");
}
static ssize_t iavail_high_threshold_mode_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
if (!gbcl)
return -EPERM;
if (!strncmp(buf, "enabled", 7))
gbcl->bcl_threshold_mode[BCL_IAVAIL_HIGH_THRESHOLD_TYPE]
= BCL_IAVAIL_THRESHOLD_ENABLED;
else if (!strncmp(buf, "disabled", 7))
gbcl->bcl_threshold_mode[BCL_IAVAIL_HIGH_THRESHOLD_TYPE]
= BCL_IAVAIL_THRESHOLD_DISABLED;
else
return -EINVAL;
return count;
}
static ssize_t iavail_low_threshold_value_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
if (!gbcl)
return -EPERM;
return snprintf(buf, PAGE_SIZE, "%d\n",
gbcl->bcl_threshold_value_ma[BCL_IAVAIL_LOW_THRESHOLD_TYPE]);
}
static ssize_t iavail_low_threshold_value_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
int val;
int ret;
ret = kstrtoint(buf, 10, &val);
if (ret || (val < 0)) {
pr_err("Incorrect available current threshold value\n");
return -EINVAL;
}
gbcl->bcl_threshold_value_ma[BCL_IAVAIL_LOW_THRESHOLD_TYPE] = val;
return count;
}
static ssize_t iavail_high_threshold_value_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
if (!gbcl)
return -EPERM;
return snprintf(buf, PAGE_SIZE, "%d\n",
gbcl->bcl_threshold_value_ma[BCL_IAVAIL_HIGH_THRESHOLD_TYPE]);
}
static ssize_t iavail_high_threshold_value_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
int val;
int ret;
ret = kstrtoint(buf, 10, &val);
if (ret || (val < 0)) {
pr_err("Incorrect available current threshold value\n");
return -EINVAL;
}
gbcl->bcl_threshold_value_ma[BCL_IAVAIL_HIGH_THRESHOLD_TYPE] = val;
return count;
}
/*
* BCL device attributes
*/
static struct device_attribute bcl_dev_attr[] = {
__ATTR(type, 0444, type_show, NULL),
__ATTR(iavail, 0444, iavail_show, NULL),
__ATTR(vbat_min, 0644, vbat_min_show, vbat_min_store),
__ATTR(vbat, 0444, vbat_show, NULL),
__ATTR(rbat, 0444, rbat_show, NULL),
__ATTR(mode, 0644, mode_show, mode_store),
__ATTR(poll_interval, 0644,
poll_interval_show, poll_interval_store),
__ATTR(iavail_low_threshold_mode, 0644,
iavail_low_threshold_mode_show,
iavail_low_threshold_mode_store),
__ATTR(iavail_high_threshold_mode, 0644,
iavail_high_threshold_mode_show,
iavail_high_threshold_mode_store),
__ATTR(iavail_low_threshold_value, 0644,
iavail_low_threshold_value_show,
iavail_low_threshold_value_store),
__ATTR(iavail_high_threshold_value, 0644,
iavail_high_threshold_value_show,
iavail_high_threshold_value_store),
};
static int create_bcl_sysfs(struct bcl_context *bcl)
{
int result = 0;
int num_attr = sizeof(bcl_dev_attr)/sizeof(struct device_attribute);
int i;
for (i = 0; i < num_attr; i++) {
result = device_create_file(bcl->dev, &bcl_dev_attr[i]);
if (result < 0)
return result;
}
return 0;
}
static void remove_bcl_sysfs(struct bcl_context *bcl)
{
int num_attr = sizeof(bcl_dev_attr)/sizeof(struct device_attribute);
int i;
for (i = 0; i < num_attr; i++)
device_remove_file(bcl->dev, &bcl_dev_attr[i]);
return;
}
static int __devinit bcl_probe(struct platform_device *pdev)
{
struct bcl_context *bcl;
int ret = 0;
bcl = kzalloc(sizeof(struct bcl_context), GFP_KERNEL);
if (!bcl) {
pr_err("Cannot allocate bcl_context\n");
return -ENOMEM;
}
gbcl = bcl;
/* For BCL */
/* Init default BCL params */
bcl->dev = &pdev->dev;
bcl->bcl_mode = BCL_DEVICE_DISABLED;
bcl->bcl_threshold_mode[BCL_IAVAIL_LOW_THRESHOLD_TYPE] =
BCL_IAVAIL_THRESHOLD_DISABLED;
bcl->bcl_threshold_mode[BCL_IAVAIL_HIGH_THRESHOLD_TYPE] =
BCL_IAVAIL_THRESHOLD_DISABLED;
bcl->bcl_threshold_value_ma[BCL_IAVAIL_LOW_THRESHOLD_TYPE] = 0;
bcl->bcl_threshold_value_ma[BCL_IAVAIL_HIGH_THRESHOLD_TYPE] = 0;
bcl->bcl_vbat_min = BATTERY_VOLTAGE_MIN;
snprintf(bcl->bcl_type, BCL_NAME_LENGTH, "%s", bcl_type);
bcl->bcl_poll_interval_msec = BCL_POLL_INTERVAL;
ret = create_bcl_sysfs(bcl);
if (ret < 0) {
pr_err("Cannot create bcl sysfs\n");
kfree(bcl);
return ret;
}
platform_set_drvdata(pdev, bcl);
INIT_DELAYED_WORK_DEFERRABLE(&bcl->bcl_iavail_work, bcl_iavail_work);
return 0;
}
static int __devexit bcl_remove(struct platform_device *pdev)
{
remove_bcl_sysfs(gbcl);
kfree(gbcl);
gbcl = NULL;
platform_set_drvdata(pdev, NULL);
return 0;
}
static struct of_device_id bcl_match_table[] = {
{.compatible = "qcom,bcl"},
{},
};
static struct platform_driver bcl_driver = {
.probe = bcl_probe,
.remove = __devexit_p(bcl_remove),
.driver = {
.name = BCL_DEV_NAME,
.owner = THIS_MODULE,
.of_match_table = bcl_match_table,
},
};
static int __init bcl_init(void)
{
return platform_driver_register(&bcl_driver);
}
static void __exit bcl_exit(void)
{
platform_driver_unregister(&bcl_driver);
}
late_initcall(bcl_init);
module_exit(bcl_exit);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("battery current limit driver");
MODULE_ALIAS("platform:" BCL_DEV_NAME);
| gpl-2.0 |
zarboz/aozp-ville | fs/lockd/svclock.c | 2088 | 26202 | /*
* linux/fs/lockd/svclock.c
*
* Handling of server-side locks, mostly of the blocked variety.
* This is the ugliest part of lockd because we tread on very thin ice.
* GRANT and CANCEL calls may get stuck, meet in mid-flight, etc.
* IMNSHO introducing the grant callback into the NLM protocol was one
* of the worst ideas Sun ever had. Except maybe for the idea of doing
* NFS file locking at all.
*
* I'm trying hard to avoid race conditions by protecting most accesses
* to a file's list of blocked locks through a semaphore. The global
* list of blocked locks is not protected in this fashion however.
* Therefore, some functions (such as the RPC callback for the async grant
* call) move blocked locks towards the head of the list *while some other
* process might be traversing it*. This should not be a problem in
* practice, because this will only cause functions traversing the list
* to visit some blocks twice.
*
* Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
*/
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/sunrpc/clnt.h>
#include <linux/sunrpc/svc.h>
#include <linux/lockd/nlm.h>
#include <linux/lockd/lockd.h>
#include <linux/kthread.h>
#define NLMDBG_FACILITY NLMDBG_SVCLOCK
#ifdef CONFIG_LOCKD_V4
#define nlm_deadlock nlm4_deadlock
#else
#define nlm_deadlock nlm_lck_denied
#endif
static void nlmsvc_release_block(struct nlm_block *block);
static void nlmsvc_insert_block(struct nlm_block *block, unsigned long);
static void nlmsvc_remove_block(struct nlm_block *block);
static int nlmsvc_setgrantargs(struct nlm_rqst *call, struct nlm_lock *lock);
static void nlmsvc_freegrantargs(struct nlm_rqst *call);
static const struct rpc_call_ops nlmsvc_grant_ops;
static const char *nlmdbg_cookie2a(const struct nlm_cookie *cookie);
/*
* The list of blocked locks to retry
*/
static LIST_HEAD(nlm_blocked);
static DEFINE_SPINLOCK(nlm_blocked_lock);
/*
* Insert a blocked lock into the global list
*/
static void
nlmsvc_insert_block_locked(struct nlm_block *block, unsigned long when)
{
struct nlm_block *b;
struct list_head *pos;
dprintk("lockd: nlmsvc_insert_block(%p, %ld)\n", block, when);
if (list_empty(&block->b_list)) {
kref_get(&block->b_count);
} else {
list_del_init(&block->b_list);
}
pos = &nlm_blocked;
if (when != NLM_NEVER) {
if ((when += jiffies) == NLM_NEVER)
when ++;
list_for_each(pos, &nlm_blocked) {
b = list_entry(pos, struct nlm_block, b_list);
if (time_after(b->b_when,when) || b->b_when == NLM_NEVER)
break;
}
/* On normal exit from the loop, pos == &nlm_blocked,
* so we will be adding to the end of the list - good
*/
}
list_add_tail(&block->b_list, pos);
block->b_when = when;
}
static void nlmsvc_insert_block(struct nlm_block *block, unsigned long when)
{
spin_lock(&nlm_blocked_lock);
nlmsvc_insert_block_locked(block, when);
spin_unlock(&nlm_blocked_lock);
}
/*
* Remove a block from the global list
*/
static inline void
nlmsvc_remove_block(struct nlm_block *block)
{
if (!list_empty(&block->b_list)) {
spin_lock(&nlm_blocked_lock);
list_del_init(&block->b_list);
spin_unlock(&nlm_blocked_lock);
nlmsvc_release_block(block);
}
}
/*
* Find a block for a given lock
*/
static struct nlm_block *
nlmsvc_lookup_block(struct nlm_file *file, struct nlm_lock *lock)
{
struct nlm_block *block;
struct file_lock *fl;
dprintk("lockd: nlmsvc_lookup_block f=%p pd=%d %Ld-%Ld ty=%d\n",
file, lock->fl.fl_pid,
(long long)lock->fl.fl_start,
(long long)lock->fl.fl_end, lock->fl.fl_type);
list_for_each_entry(block, &nlm_blocked, b_list) {
fl = &block->b_call->a_args.lock.fl;
dprintk("lockd: check f=%p pd=%d %Ld-%Ld ty=%d cookie=%s\n",
block->b_file, fl->fl_pid,
(long long)fl->fl_start,
(long long)fl->fl_end, fl->fl_type,
nlmdbg_cookie2a(&block->b_call->a_args.cookie));
if (block->b_file == file && nlm_compare_locks(fl, &lock->fl)) {
kref_get(&block->b_count);
return block;
}
}
return NULL;
}
static inline int nlm_cookie_match(struct nlm_cookie *a, struct nlm_cookie *b)
{
if (a->len != b->len)
return 0;
if (memcmp(a->data, b->data, a->len))
return 0;
return 1;
}
/*
* Find a block with a given NLM cookie.
*/
static inline struct nlm_block *
nlmsvc_find_block(struct nlm_cookie *cookie)
{
struct nlm_block *block;
list_for_each_entry(block, &nlm_blocked, b_list) {
if (nlm_cookie_match(&block->b_call->a_args.cookie,cookie))
goto found;
}
return NULL;
found:
dprintk("nlmsvc_find_block(%s): block=%p\n", nlmdbg_cookie2a(cookie), block);
kref_get(&block->b_count);
return block;
}
/*
* Create a block and initialize it.
*
* Note: we explicitly set the cookie of the grant reply to that of
* the blocked lock request. The spec explicitly mentions that the client
* should _not_ rely on the callback containing the same cookie as the
* request, but (as I found out later) that's because some implementations
* do just this. Never mind the standards comittees, they support our
* logging industries.
*
* 10 years later: I hope we can safely ignore these old and broken
* clients by now. Let's fix this so we can uniquely identify an incoming
* GRANTED_RES message by cookie, without having to rely on the client's IP
* address. --okir
*/
static struct nlm_block *
nlmsvc_create_block(struct svc_rqst *rqstp, struct nlm_host *host,
struct nlm_file *file, struct nlm_lock *lock,
struct nlm_cookie *cookie)
{
struct nlm_block *block;
struct nlm_rqst *call = NULL;
nlm_get_host(host);
call = nlm_alloc_call(host);
if (call == NULL)
return NULL;
/* Allocate memory for block, and initialize arguments */
block = kzalloc(sizeof(*block), GFP_KERNEL);
if (block == NULL)
goto failed;
kref_init(&block->b_count);
INIT_LIST_HEAD(&block->b_list);
INIT_LIST_HEAD(&block->b_flist);
if (!nlmsvc_setgrantargs(call, lock))
goto failed_free;
/* Set notifier function for VFS, and init args */
call->a_args.lock.fl.fl_flags |= FL_SLEEP;
call->a_args.lock.fl.fl_lmops = &nlmsvc_lock_operations;
nlmclnt_next_cookie(&call->a_args.cookie);
dprintk("lockd: created block %p...\n", block);
/* Create and initialize the block */
block->b_daemon = rqstp->rq_server;
block->b_host = host;
block->b_file = file;
block->b_fl = NULL;
file->f_count++;
/* Add to file's list of blocks */
list_add(&block->b_flist, &file->f_blocks);
/* Set up RPC arguments for callback */
block->b_call = call;
call->a_flags = RPC_TASK_ASYNC;
call->a_block = block;
return block;
failed_free:
kfree(block);
failed:
nlmsvc_release_call(call);
return NULL;
}
/*
* Delete a block.
* It is the caller's responsibility to check whether the file
* can be closed hereafter.
*/
static int nlmsvc_unlink_block(struct nlm_block *block)
{
int status;
dprintk("lockd: unlinking block %p...\n", block);
/* Remove block from list */
status = posix_unblock_lock(block->b_file->f_file, &block->b_call->a_args.lock.fl);
nlmsvc_remove_block(block);
return status;
}
static void nlmsvc_free_block(struct kref *kref)
{
struct nlm_block *block = container_of(kref, struct nlm_block, b_count);
struct nlm_file *file = block->b_file;
dprintk("lockd: freeing block %p...\n", block);
/* Remove block from file's list of blocks */
mutex_lock(&file->f_mutex);
list_del_init(&block->b_flist);
mutex_unlock(&file->f_mutex);
nlmsvc_freegrantargs(block->b_call);
nlmsvc_release_call(block->b_call);
nlm_release_file(block->b_file);
kfree(block->b_fl);
kfree(block);
}
static void nlmsvc_release_block(struct nlm_block *block)
{
if (block != NULL)
kref_put(&block->b_count, nlmsvc_free_block);
}
/*
* Loop over all blocks and delete blocks held by
* a matching host.
*/
void nlmsvc_traverse_blocks(struct nlm_host *host,
struct nlm_file *file,
nlm_host_match_fn_t match)
{
struct nlm_block *block, *next;
restart:
mutex_lock(&file->f_mutex);
list_for_each_entry_safe(block, next, &file->f_blocks, b_flist) {
if (!match(block->b_host, host))
continue;
/* Do not destroy blocks that are not on
* the global retry list - why? */
if (list_empty(&block->b_list))
continue;
kref_get(&block->b_count);
mutex_unlock(&file->f_mutex);
nlmsvc_unlink_block(block);
nlmsvc_release_block(block);
goto restart;
}
mutex_unlock(&file->f_mutex);
}
/*
* Initialize arguments for GRANTED call. The nlm_rqst structure
* has been cleared already.
*/
static int nlmsvc_setgrantargs(struct nlm_rqst *call, struct nlm_lock *lock)
{
locks_copy_lock(&call->a_args.lock.fl, &lock->fl);
memcpy(&call->a_args.lock.fh, &lock->fh, sizeof(call->a_args.lock.fh));
call->a_args.lock.caller = utsname()->nodename;
call->a_args.lock.oh.len = lock->oh.len;
/* set default data area */
call->a_args.lock.oh.data = call->a_owner;
call->a_args.lock.svid = lock->fl.fl_pid;
if (lock->oh.len > NLMCLNT_OHSIZE) {
void *data = kmalloc(lock->oh.len, GFP_KERNEL);
if (!data)
return 0;
call->a_args.lock.oh.data = (u8 *) data;
}
memcpy(call->a_args.lock.oh.data, lock->oh.data, lock->oh.len);
return 1;
}
static void nlmsvc_freegrantargs(struct nlm_rqst *call)
{
if (call->a_args.lock.oh.data != call->a_owner)
kfree(call->a_args.lock.oh.data);
locks_release_private(&call->a_args.lock.fl);
}
/*
* Deferred lock request handling for non-blocking lock
*/
static __be32
nlmsvc_defer_lock_rqst(struct svc_rqst *rqstp, struct nlm_block *block)
{
__be32 status = nlm_lck_denied_nolocks;
block->b_flags |= B_QUEUED;
nlmsvc_insert_block(block, NLM_TIMEOUT);
block->b_cache_req = &rqstp->rq_chandle;
if (rqstp->rq_chandle.defer) {
block->b_deferred_req =
rqstp->rq_chandle.defer(block->b_cache_req);
if (block->b_deferred_req != NULL)
status = nlm_drop_reply;
}
dprintk("lockd: nlmsvc_defer_lock_rqst block %p flags %d status %d\n",
block, block->b_flags, ntohl(status));
return status;
}
/*
* Attempt to establish a lock, and if it can't be granted, block it
* if required.
*/
__be32
nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file,
struct nlm_host *host, struct nlm_lock *lock, int wait,
struct nlm_cookie *cookie, int reclaim)
{
struct nlm_block *block = NULL;
int error;
__be32 ret;
dprintk("lockd: nlmsvc_lock(%s/%ld, ty=%d, pi=%d, %Ld-%Ld, bl=%d)\n",
file->f_file->f_path.dentry->d_inode->i_sb->s_id,
file->f_file->f_path.dentry->d_inode->i_ino,
lock->fl.fl_type, lock->fl.fl_pid,
(long long)lock->fl.fl_start,
(long long)lock->fl.fl_end,
wait);
/* Lock file against concurrent access */
mutex_lock(&file->f_mutex);
/* Get existing block (in case client is busy-waiting)
* or create new block
*/
block = nlmsvc_lookup_block(file, lock);
if (block == NULL) {
block = nlmsvc_create_block(rqstp, host, file, lock, cookie);
ret = nlm_lck_denied_nolocks;
if (block == NULL)
goto out;
lock = &block->b_call->a_args.lock;
} else
lock->fl.fl_flags &= ~FL_SLEEP;
if (block->b_flags & B_QUEUED) {
dprintk("lockd: nlmsvc_lock deferred block %p flags %d\n",
block, block->b_flags);
if (block->b_granted) {
nlmsvc_unlink_block(block);
ret = nlm_granted;
goto out;
}
if (block->b_flags & B_TIMED_OUT) {
nlmsvc_unlink_block(block);
ret = nlm_lck_denied;
goto out;
}
ret = nlm_drop_reply;
goto out;
}
if (locks_in_grace() && !reclaim) {
ret = nlm_lck_denied_grace_period;
goto out;
}
if (reclaim && !locks_in_grace()) {
ret = nlm_lck_denied_grace_period;
goto out;
}
if (!wait)
lock->fl.fl_flags &= ~FL_SLEEP;
error = vfs_lock_file(file->f_file, F_SETLK, &lock->fl, NULL);
lock->fl.fl_flags &= ~FL_SLEEP;
dprintk("lockd: vfs_lock_file returned %d\n", error);
switch (error) {
case 0:
ret = nlm_granted;
goto out;
case -EAGAIN:
/*
* If this is a blocking request for an
* already pending lock request then we need
* to put it back on lockd's block list
*/
if (wait)
break;
ret = nlm_lck_denied;
goto out;
case FILE_LOCK_DEFERRED:
if (wait)
break;
/* Filesystem lock operation is in progress
Add it to the queue waiting for callback */
ret = nlmsvc_defer_lock_rqst(rqstp, block);
goto out;
case -EDEADLK:
ret = nlm_deadlock;
goto out;
default: /* includes ENOLCK */
ret = nlm_lck_denied_nolocks;
goto out;
}
ret = nlm_lck_blocked;
/* Append to list of blocked */
nlmsvc_insert_block(block, NLM_NEVER);
out:
mutex_unlock(&file->f_mutex);
nlmsvc_release_block(block);
dprintk("lockd: nlmsvc_lock returned %u\n", ret);
return ret;
}
/*
* Test for presence of a conflicting lock.
*/
__be32
nlmsvc_testlock(struct svc_rqst *rqstp, struct nlm_file *file,
struct nlm_host *host, struct nlm_lock *lock,
struct nlm_lock *conflock, struct nlm_cookie *cookie)
{
struct nlm_block *block = NULL;
int error;
__be32 ret;
dprintk("lockd: nlmsvc_testlock(%s/%ld, ty=%d, %Ld-%Ld)\n",
file->f_file->f_path.dentry->d_inode->i_sb->s_id,
file->f_file->f_path.dentry->d_inode->i_ino,
lock->fl.fl_type,
(long long)lock->fl.fl_start,
(long long)lock->fl.fl_end);
/* Get existing block (in case client is busy-waiting) */
block = nlmsvc_lookup_block(file, lock);
if (block == NULL) {
struct file_lock *conf = kzalloc(sizeof(*conf), GFP_KERNEL);
if (conf == NULL)
return nlm_granted;
block = nlmsvc_create_block(rqstp, host, file, lock, cookie);
if (block == NULL) {
kfree(conf);
return nlm_granted;
}
block->b_fl = conf;
}
if (block->b_flags & B_QUEUED) {
dprintk("lockd: nlmsvc_testlock deferred block %p flags %d fl %p\n",
block, block->b_flags, block->b_fl);
if (block->b_flags & B_TIMED_OUT) {
nlmsvc_unlink_block(block);
ret = nlm_lck_denied;
goto out;
}
if (block->b_flags & B_GOT_CALLBACK) {
nlmsvc_unlink_block(block);
if (block->b_fl != NULL
&& block->b_fl->fl_type != F_UNLCK) {
lock->fl = *block->b_fl;
goto conf_lock;
} else {
ret = nlm_granted;
goto out;
}
}
ret = nlm_drop_reply;
goto out;
}
if (locks_in_grace()) {
ret = nlm_lck_denied_grace_period;
goto out;
}
error = vfs_test_lock(file->f_file, &lock->fl);
if (error == FILE_LOCK_DEFERRED) {
ret = nlmsvc_defer_lock_rqst(rqstp, block);
goto out;
}
if (error) {
ret = nlm_lck_denied_nolocks;
goto out;
}
if (lock->fl.fl_type == F_UNLCK) {
ret = nlm_granted;
goto out;
}
conf_lock:
dprintk("lockd: conflicting lock(ty=%d, %Ld-%Ld)\n",
lock->fl.fl_type, (long long)lock->fl.fl_start,
(long long)lock->fl.fl_end);
conflock->caller = "somehost"; /* FIXME */
conflock->len = strlen(conflock->caller);
conflock->oh.len = 0; /* don't return OH info */
conflock->svid = lock->fl.fl_pid;
conflock->fl.fl_type = lock->fl.fl_type;
conflock->fl.fl_start = lock->fl.fl_start;
conflock->fl.fl_end = lock->fl.fl_end;
ret = nlm_lck_denied;
out:
if (block)
nlmsvc_release_block(block);
return ret;
}
/*
* Remove a lock.
* This implies a CANCEL call: We send a GRANT_MSG, the client replies
* with a GRANT_RES call which gets lost, and calls UNLOCK immediately
* afterwards. In this case the block will still be there, and hence
* must be removed.
*/
__be32
nlmsvc_unlock(struct nlm_file *file, struct nlm_lock *lock)
{
int error;
dprintk("lockd: nlmsvc_unlock(%s/%ld, pi=%d, %Ld-%Ld)\n",
file->f_file->f_path.dentry->d_inode->i_sb->s_id,
file->f_file->f_path.dentry->d_inode->i_ino,
lock->fl.fl_pid,
(long long)lock->fl.fl_start,
(long long)lock->fl.fl_end);
/* First, cancel any lock that might be there */
nlmsvc_cancel_blocked(file, lock);
lock->fl.fl_type = F_UNLCK;
error = vfs_lock_file(file->f_file, F_SETLK, &lock->fl, NULL);
return (error < 0)? nlm_lck_denied_nolocks : nlm_granted;
}
/*
* Cancel a previously blocked request.
*
* A cancel request always overrides any grant that may currently
* be in progress.
* The calling procedure must check whether the file can be closed.
*/
__be32
nlmsvc_cancel_blocked(struct nlm_file *file, struct nlm_lock *lock)
{
struct nlm_block *block;
int status = 0;
dprintk("lockd: nlmsvc_cancel(%s/%ld, pi=%d, %Ld-%Ld)\n",
file->f_file->f_path.dentry->d_inode->i_sb->s_id,
file->f_file->f_path.dentry->d_inode->i_ino,
lock->fl.fl_pid,
(long long)lock->fl.fl_start,
(long long)lock->fl.fl_end);
if (locks_in_grace())
return nlm_lck_denied_grace_period;
mutex_lock(&file->f_mutex);
block = nlmsvc_lookup_block(file, lock);
mutex_unlock(&file->f_mutex);
if (block != NULL) {
vfs_cancel_lock(block->b_file->f_file,
&block->b_call->a_args.lock.fl);
status = nlmsvc_unlink_block(block);
nlmsvc_release_block(block);
}
return status ? nlm_lck_denied : nlm_granted;
}
/*
* This is a callback from the filesystem for VFS file lock requests.
* It will be used if fl_grant is defined and the filesystem can not
* respond to the request immediately.
* For GETLK request it will copy the reply to the nlm_block.
* For SETLK or SETLKW request it will get the local posix lock.
* In all cases it will move the block to the head of nlm_blocked q where
* nlmsvc_retry_blocked() can send back a reply for SETLKW or revisit the
* deferred rpc for GETLK and SETLK.
*/
static void
nlmsvc_update_deferred_block(struct nlm_block *block, struct file_lock *conf,
int result)
{
block->b_flags |= B_GOT_CALLBACK;
if (result == 0)
block->b_granted = 1;
else
block->b_flags |= B_TIMED_OUT;
if (conf) {
if (block->b_fl)
__locks_copy_lock(block->b_fl, conf);
}
}
static int nlmsvc_grant_deferred(struct file_lock *fl, struct file_lock *conf,
int result)
{
struct nlm_block *block;
int rc = -ENOENT;
spin_lock(&nlm_blocked_lock);
list_for_each_entry(block, &nlm_blocked, b_list) {
if (nlm_compare_locks(&block->b_call->a_args.lock.fl, fl)) {
dprintk("lockd: nlmsvc_notify_blocked block %p flags %d\n",
block, block->b_flags);
if (block->b_flags & B_QUEUED) {
if (block->b_flags & B_TIMED_OUT) {
rc = -ENOLCK;
break;
}
nlmsvc_update_deferred_block(block, conf, result);
} else if (result == 0)
block->b_granted = 1;
nlmsvc_insert_block_locked(block, 0);
svc_wake_up(block->b_daemon);
rc = 0;
break;
}
}
spin_unlock(&nlm_blocked_lock);
if (rc == -ENOENT)
printk(KERN_WARNING "lockd: grant for unknown block\n");
return rc;
}
/*
* Unblock a blocked lock request. This is a callback invoked from the
* VFS layer when a lock on which we blocked is removed.
*
* This function doesn't grant the blocked lock instantly, but rather moves
* the block to the head of nlm_blocked where it can be picked up by lockd.
*/
static void
nlmsvc_notify_blocked(struct file_lock *fl)
{
struct nlm_block *block;
dprintk("lockd: VFS unblock notification for block %p\n", fl);
spin_lock(&nlm_blocked_lock);
list_for_each_entry(block, &nlm_blocked, b_list) {
if (nlm_compare_locks(&block->b_call->a_args.lock.fl, fl)) {
nlmsvc_insert_block_locked(block, 0);
spin_unlock(&nlm_blocked_lock);
svc_wake_up(block->b_daemon);
return;
}
}
spin_unlock(&nlm_blocked_lock);
printk(KERN_WARNING "lockd: notification for unknown block!\n");
}
static int nlmsvc_same_owner(struct file_lock *fl1, struct file_lock *fl2)
{
return fl1->fl_owner == fl2->fl_owner && fl1->fl_pid == fl2->fl_pid;
}
const struct lock_manager_operations nlmsvc_lock_operations = {
.fl_compare_owner = nlmsvc_same_owner,
.fl_notify = nlmsvc_notify_blocked,
.fl_grant = nlmsvc_grant_deferred,
};
/*
* Try to claim a lock that was previously blocked.
*
* Note that we use both the RPC_GRANTED_MSG call _and_ an async
* RPC thread when notifying the client. This seems like overkill...
* Here's why:
* - we don't want to use a synchronous RPC thread, otherwise
* we might find ourselves hanging on a dead portmapper.
* - Some lockd implementations (e.g. HP) don't react to
* RPC_GRANTED calls; they seem to insist on RPC_GRANTED_MSG calls.
*/
static void
nlmsvc_grant_blocked(struct nlm_block *block)
{
struct nlm_file *file = block->b_file;
struct nlm_lock *lock = &block->b_call->a_args.lock;
int error;
dprintk("lockd: grant blocked lock %p\n", block);
kref_get(&block->b_count);
/* Unlink block request from list */
nlmsvc_unlink_block(block);
/* If b_granted is true this means we've been here before.
* Just retry the grant callback, possibly refreshing the RPC
* binding */
if (block->b_granted) {
nlm_rebind_host(block->b_host);
goto callback;
}
/* Try the lock operation again */
lock->fl.fl_flags |= FL_SLEEP;
error = vfs_lock_file(file->f_file, F_SETLK, &lock->fl, NULL);
lock->fl.fl_flags &= ~FL_SLEEP;
switch (error) {
case 0:
break;
case FILE_LOCK_DEFERRED:
dprintk("lockd: lock still blocked error %d\n", error);
nlmsvc_insert_block(block, NLM_NEVER);
nlmsvc_release_block(block);
return;
default:
printk(KERN_WARNING "lockd: unexpected error %d in %s!\n",
-error, __func__);
nlmsvc_insert_block(block, 10 * HZ);
nlmsvc_release_block(block);
return;
}
callback:
/* Lock was granted by VFS. */
dprintk("lockd: GRANTing blocked lock.\n");
block->b_granted = 1;
/* keep block on the list, but don't reattempt until the RPC
* completes or the submission fails
*/
nlmsvc_insert_block(block, NLM_NEVER);
/* Call the client -- use a soft RPC task since nlmsvc_retry_blocked
* will queue up a new one if this one times out
*/
error = nlm_async_call(block->b_call, NLMPROC_GRANTED_MSG,
&nlmsvc_grant_ops);
/* RPC submission failed, wait a bit and retry */
if (error < 0)
nlmsvc_insert_block(block, 10 * HZ);
}
/*
* This is the callback from the RPC layer when the NLM_GRANTED_MSG
* RPC call has succeeded or timed out.
* Like all RPC callbacks, it is invoked by the rpciod process, so it
* better not sleep. Therefore, we put the blocked lock on the nlm_blocked
* chain once more in order to have it removed by lockd itself (which can
* then sleep on the file semaphore without disrupting e.g. the nfs client).
*/
static void nlmsvc_grant_callback(struct rpc_task *task, void *data)
{
struct nlm_rqst *call = data;
struct nlm_block *block = call->a_block;
unsigned long timeout;
dprintk("lockd: GRANT_MSG RPC callback\n");
spin_lock(&nlm_blocked_lock);
/* if the block is not on a list at this point then it has
* been invalidated. Don't try to requeue it.
*
* FIXME: it's possible that the block is removed from the list
* after this check but before the nlmsvc_insert_block. In that
* case it will be added back. Perhaps we need better locking
* for nlm_blocked?
*/
if (list_empty(&block->b_list))
goto out;
/* Technically, we should down the file semaphore here. Since we
* move the block towards the head of the queue only, no harm
* can be done, though. */
if (task->tk_status < 0) {
/* RPC error: Re-insert for retransmission */
timeout = 10 * HZ;
} else {
/* Call was successful, now wait for client callback */
timeout = 60 * HZ;
}
nlmsvc_insert_block_locked(block, timeout);
svc_wake_up(block->b_daemon);
out:
spin_unlock(&nlm_blocked_lock);
}
/*
* FIXME: nlmsvc_release_block() grabs a mutex. This is not allowed for an
* .rpc_release rpc_call_op
*/
static void nlmsvc_grant_release(void *data)
{
struct nlm_rqst *call = data;
nlmsvc_release_block(call->a_block);
}
static const struct rpc_call_ops nlmsvc_grant_ops = {
.rpc_call_done = nlmsvc_grant_callback,
.rpc_release = nlmsvc_grant_release,
};
/*
* We received a GRANT_RES callback. Try to find the corresponding
* block.
*/
void
nlmsvc_grant_reply(struct nlm_cookie *cookie, __be32 status)
{
struct nlm_block *block;
dprintk("grant_reply: looking for cookie %x, s=%d \n",
*(unsigned int *)(cookie->data), status);
if (!(block = nlmsvc_find_block(cookie)))
return;
if (block) {
if (status == nlm_lck_denied_grace_period) {
/* Try again in a couple of seconds */
nlmsvc_insert_block(block, 10 * HZ);
} else {
/* Lock is now held by client, or has been rejected.
* In both cases, the block should be removed. */
nlmsvc_unlink_block(block);
}
}
nlmsvc_release_block(block);
}
/* Helper function to handle retry of a deferred block.
* If it is a blocking lock, call grant_blocked.
* For a non-blocking lock or test lock, revisit the request.
*/
static void
retry_deferred_block(struct nlm_block *block)
{
if (!(block->b_flags & B_GOT_CALLBACK))
block->b_flags |= B_TIMED_OUT;
nlmsvc_insert_block(block, NLM_TIMEOUT);
dprintk("revisit block %p flags %d\n", block, block->b_flags);
if (block->b_deferred_req) {
block->b_deferred_req->revisit(block->b_deferred_req, 0);
block->b_deferred_req = NULL;
}
}
/*
* Retry all blocked locks that have been notified. This is where lockd
* picks up locks that can be granted, or grant notifications that must
* be retransmitted.
*/
unsigned long
nlmsvc_retry_blocked(void)
{
unsigned long timeout = MAX_SCHEDULE_TIMEOUT;
struct nlm_block *block;
while (!list_empty(&nlm_blocked) && !kthread_should_stop()) {
block = list_entry(nlm_blocked.next, struct nlm_block, b_list);
if (block->b_when == NLM_NEVER)
break;
if (time_after(block->b_when, jiffies)) {
timeout = block->b_when - jiffies;
break;
}
dprintk("nlmsvc_retry_blocked(%p, when=%ld)\n",
block, block->b_when);
if (block->b_flags & B_QUEUED) {
dprintk("nlmsvc_retry_blocked delete block (%p, granted=%d, flags=%d)\n",
block, block->b_granted, block->b_flags);
retry_deferred_block(block);
} else
nlmsvc_grant_blocked(block);
}
return timeout;
}
#ifdef RPC_DEBUG
static const char *nlmdbg_cookie2a(const struct nlm_cookie *cookie)
{
/*
* We can get away with a static buffer because we're only
* called with BKL held.
*/
static char buf[2*NLM_MAXCOOKIELEN+1];
unsigned int i, len = sizeof(buf);
char *p = buf;
len--; /* allow for trailing \0 */
if (len < 3)
return "???";
for (i = 0 ; i < cookie->len ; i++) {
if (len < 2) {
strcpy(p-3, "...");
break;
}
sprintf(p, "%02x", cookie->data[i]);
p += 2;
len -= 2;
}
*p = '\0';
return buf;
}
#endif
| gpl-2.0 |
CyanogenMod/android_kernel_motorola_apq8084 | drivers/usb/gadget/atmel_usba_udc.c | 2088 | 49889 | /*
* Driver for the Atmel USBA high speed USB device controller
*
* Copyright (C) 2005-2007 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/clk.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/list.h>
#include <linux/platform_device.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
#include <linux/usb/atmel_usba_udc.h>
#include <linux/delay.h>
#include <linux/platform_data/atmel.h>
#include <asm/gpio.h>
#include "atmel_usba_udc.h"
static struct usba_udc the_udc;
static struct usba_ep *usba_ep;
#ifdef CONFIG_USB_GADGET_DEBUG_FS
#include <linux/debugfs.h>
#include <linux/uaccess.h>
static int queue_dbg_open(struct inode *inode, struct file *file)
{
struct usba_ep *ep = inode->i_private;
struct usba_request *req, *req_copy;
struct list_head *queue_data;
queue_data = kmalloc(sizeof(*queue_data), GFP_KERNEL);
if (!queue_data)
return -ENOMEM;
INIT_LIST_HEAD(queue_data);
spin_lock_irq(&ep->udc->lock);
list_for_each_entry(req, &ep->queue, queue) {
req_copy = kmemdup(req, sizeof(*req_copy), GFP_ATOMIC);
if (!req_copy)
goto fail;
list_add_tail(&req_copy->queue, queue_data);
}
spin_unlock_irq(&ep->udc->lock);
file->private_data = queue_data;
return 0;
fail:
spin_unlock_irq(&ep->udc->lock);
list_for_each_entry_safe(req, req_copy, queue_data, queue) {
list_del(&req->queue);
kfree(req);
}
kfree(queue_data);
return -ENOMEM;
}
/*
* bbbbbbbb llllllll IZS sssss nnnn FDL\n\0
*
* b: buffer address
* l: buffer length
* I/i: interrupt/no interrupt
* Z/z: zero/no zero
* S/s: short ok/short not ok
* s: status
* n: nr_packets
* F/f: submitted/not submitted to FIFO
* D/d: using/not using DMA
* L/l: last transaction/not last transaction
*/
static ssize_t queue_dbg_read(struct file *file, char __user *buf,
size_t nbytes, loff_t *ppos)
{
struct list_head *queue = file->private_data;
struct usba_request *req, *tmp_req;
size_t len, remaining, actual = 0;
char tmpbuf[38];
if (!access_ok(VERIFY_WRITE, buf, nbytes))
return -EFAULT;
mutex_lock(&file_inode(file)->i_mutex);
list_for_each_entry_safe(req, tmp_req, queue, queue) {
len = snprintf(tmpbuf, sizeof(tmpbuf),
"%8p %08x %c%c%c %5d %c%c%c\n",
req->req.buf, req->req.length,
req->req.no_interrupt ? 'i' : 'I',
req->req.zero ? 'Z' : 'z',
req->req.short_not_ok ? 's' : 'S',
req->req.status,
req->submitted ? 'F' : 'f',
req->using_dma ? 'D' : 'd',
req->last_transaction ? 'L' : 'l');
len = min(len, sizeof(tmpbuf));
if (len > nbytes)
break;
list_del(&req->queue);
kfree(req);
remaining = __copy_to_user(buf, tmpbuf, len);
actual += len - remaining;
if (remaining)
break;
nbytes -= len;
buf += len;
}
mutex_unlock(&file_inode(file)->i_mutex);
return actual;
}
static int queue_dbg_release(struct inode *inode, struct file *file)
{
struct list_head *queue_data = file->private_data;
struct usba_request *req, *tmp_req;
list_for_each_entry_safe(req, tmp_req, queue_data, queue) {
list_del(&req->queue);
kfree(req);
}
kfree(queue_data);
return 0;
}
static int regs_dbg_open(struct inode *inode, struct file *file)
{
struct usba_udc *udc;
unsigned int i;
u32 *data;
int ret = -ENOMEM;
mutex_lock(&inode->i_mutex);
udc = inode->i_private;
data = kmalloc(inode->i_size, GFP_KERNEL);
if (!data)
goto out;
spin_lock_irq(&udc->lock);
for (i = 0; i < inode->i_size / 4; i++)
data[i] = __raw_readl(udc->regs + i * 4);
spin_unlock_irq(&udc->lock);
file->private_data = data;
ret = 0;
out:
mutex_unlock(&inode->i_mutex);
return ret;
}
static ssize_t regs_dbg_read(struct file *file, char __user *buf,
size_t nbytes, loff_t *ppos)
{
struct inode *inode = file_inode(file);
int ret;
mutex_lock(&inode->i_mutex);
ret = simple_read_from_buffer(buf, nbytes, ppos,
file->private_data,
file_inode(file)->i_size);
mutex_unlock(&inode->i_mutex);
return ret;
}
static int regs_dbg_release(struct inode *inode, struct file *file)
{
kfree(file->private_data);
return 0;
}
const struct file_operations queue_dbg_fops = {
.owner = THIS_MODULE,
.open = queue_dbg_open,
.llseek = no_llseek,
.read = queue_dbg_read,
.release = queue_dbg_release,
};
const struct file_operations regs_dbg_fops = {
.owner = THIS_MODULE,
.open = regs_dbg_open,
.llseek = generic_file_llseek,
.read = regs_dbg_read,
.release = regs_dbg_release,
};
static void usba_ep_init_debugfs(struct usba_udc *udc,
struct usba_ep *ep)
{
struct dentry *ep_root;
ep_root = debugfs_create_dir(ep->ep.name, udc->debugfs_root);
if (!ep_root)
goto err_root;
ep->debugfs_dir = ep_root;
ep->debugfs_queue = debugfs_create_file("queue", 0400, ep_root,
ep, &queue_dbg_fops);
if (!ep->debugfs_queue)
goto err_queue;
if (ep->can_dma) {
ep->debugfs_dma_status
= debugfs_create_u32("dma_status", 0400, ep_root,
&ep->last_dma_status);
if (!ep->debugfs_dma_status)
goto err_dma_status;
}
if (ep_is_control(ep)) {
ep->debugfs_state
= debugfs_create_u32("state", 0400, ep_root,
&ep->state);
if (!ep->debugfs_state)
goto err_state;
}
return;
err_state:
if (ep->can_dma)
debugfs_remove(ep->debugfs_dma_status);
err_dma_status:
debugfs_remove(ep->debugfs_queue);
err_queue:
debugfs_remove(ep_root);
err_root:
dev_err(&ep->udc->pdev->dev,
"failed to create debugfs directory for %s\n", ep->ep.name);
}
static void usba_ep_cleanup_debugfs(struct usba_ep *ep)
{
debugfs_remove(ep->debugfs_queue);
debugfs_remove(ep->debugfs_dma_status);
debugfs_remove(ep->debugfs_state);
debugfs_remove(ep->debugfs_dir);
ep->debugfs_dma_status = NULL;
ep->debugfs_dir = NULL;
}
static void usba_init_debugfs(struct usba_udc *udc)
{
struct dentry *root, *regs;
struct resource *regs_resource;
root = debugfs_create_dir(udc->gadget.name, NULL);
if (IS_ERR(root) || !root)
goto err_root;
udc->debugfs_root = root;
regs = debugfs_create_file("regs", 0400, root, udc, ®s_dbg_fops);
if (!regs)
goto err_regs;
regs_resource = platform_get_resource(udc->pdev, IORESOURCE_MEM,
CTRL_IOMEM_ID);
regs->d_inode->i_size = resource_size(regs_resource);
udc->debugfs_regs = regs;
usba_ep_init_debugfs(udc, to_usba_ep(udc->gadget.ep0));
return;
err_regs:
debugfs_remove(root);
err_root:
udc->debugfs_root = NULL;
dev_err(&udc->pdev->dev, "debugfs is not available\n");
}
static void usba_cleanup_debugfs(struct usba_udc *udc)
{
usba_ep_cleanup_debugfs(to_usba_ep(udc->gadget.ep0));
debugfs_remove(udc->debugfs_regs);
debugfs_remove(udc->debugfs_root);
udc->debugfs_regs = NULL;
udc->debugfs_root = NULL;
}
#else
static inline void usba_ep_init_debugfs(struct usba_udc *udc,
struct usba_ep *ep)
{
}
static inline void usba_ep_cleanup_debugfs(struct usba_ep *ep)
{
}
static inline void usba_init_debugfs(struct usba_udc *udc)
{
}
static inline void usba_cleanup_debugfs(struct usba_udc *udc)
{
}
#endif
static int vbus_is_present(struct usba_udc *udc)
{
if (gpio_is_valid(udc->vbus_pin))
return gpio_get_value(udc->vbus_pin) ^ udc->vbus_pin_inverted;
/* No Vbus detection: Assume always present */
return 1;
}
#if defined(CONFIG_ARCH_AT91SAM9RL)
#include <mach/at91_pmc.h>
static void toggle_bias(int is_on)
{
unsigned int uckr = at91_pmc_read(AT91_CKGR_UCKR);
if (is_on)
at91_pmc_write(AT91_CKGR_UCKR, uckr | AT91_PMC_BIASEN);
else
at91_pmc_write(AT91_CKGR_UCKR, uckr & ~(AT91_PMC_BIASEN));
}
#else
static void toggle_bias(int is_on)
{
}
#endif /* CONFIG_ARCH_AT91SAM9RL */
static void next_fifo_transaction(struct usba_ep *ep, struct usba_request *req)
{
unsigned int transaction_len;
transaction_len = req->req.length - req->req.actual;
req->last_transaction = 1;
if (transaction_len > ep->ep.maxpacket) {
transaction_len = ep->ep.maxpacket;
req->last_transaction = 0;
} else if (transaction_len == ep->ep.maxpacket && req->req.zero)
req->last_transaction = 0;
DBG(DBG_QUEUE, "%s: submit_transaction, req %p (length %d)%s\n",
ep->ep.name, req, transaction_len,
req->last_transaction ? ", done" : "");
memcpy_toio(ep->fifo, req->req.buf + req->req.actual, transaction_len);
usba_ep_writel(ep, SET_STA, USBA_TX_PK_RDY);
req->req.actual += transaction_len;
}
static void submit_request(struct usba_ep *ep, struct usba_request *req)
{
DBG(DBG_QUEUE, "%s: submit_request: req %p (length %d)\n",
ep->ep.name, req, req->req.length);
req->req.actual = 0;
req->submitted = 1;
if (req->using_dma) {
if (req->req.length == 0) {
usba_ep_writel(ep, CTL_ENB, USBA_TX_PK_RDY);
return;
}
if (req->req.zero)
usba_ep_writel(ep, CTL_ENB, USBA_SHORT_PACKET);
else
usba_ep_writel(ep, CTL_DIS, USBA_SHORT_PACKET);
usba_dma_writel(ep, ADDRESS, req->req.dma);
usba_dma_writel(ep, CONTROL, req->ctrl);
} else {
next_fifo_transaction(ep, req);
if (req->last_transaction) {
usba_ep_writel(ep, CTL_DIS, USBA_TX_PK_RDY);
usba_ep_writel(ep, CTL_ENB, USBA_TX_COMPLETE);
} else {
usba_ep_writel(ep, CTL_DIS, USBA_TX_COMPLETE);
usba_ep_writel(ep, CTL_ENB, USBA_TX_PK_RDY);
}
}
}
static void submit_next_request(struct usba_ep *ep)
{
struct usba_request *req;
if (list_empty(&ep->queue)) {
usba_ep_writel(ep, CTL_DIS, USBA_TX_PK_RDY | USBA_RX_BK_RDY);
return;
}
req = list_entry(ep->queue.next, struct usba_request, queue);
if (!req->submitted)
submit_request(ep, req);
}
static void send_status(struct usba_udc *udc, struct usba_ep *ep)
{
ep->state = STATUS_STAGE_IN;
usba_ep_writel(ep, SET_STA, USBA_TX_PK_RDY);
usba_ep_writel(ep, CTL_ENB, USBA_TX_COMPLETE);
}
static void receive_data(struct usba_ep *ep)
{
struct usba_udc *udc = ep->udc;
struct usba_request *req;
unsigned long status;
unsigned int bytecount, nr_busy;
int is_complete = 0;
status = usba_ep_readl(ep, STA);
nr_busy = USBA_BFEXT(BUSY_BANKS, status);
DBG(DBG_QUEUE, "receive data: nr_busy=%u\n", nr_busy);
while (nr_busy > 0) {
if (list_empty(&ep->queue)) {
usba_ep_writel(ep, CTL_DIS, USBA_RX_BK_RDY);
break;
}
req = list_entry(ep->queue.next,
struct usba_request, queue);
bytecount = USBA_BFEXT(BYTE_COUNT, status);
if (status & (1 << 31))
is_complete = 1;
if (req->req.actual + bytecount >= req->req.length) {
is_complete = 1;
bytecount = req->req.length - req->req.actual;
}
memcpy_fromio(req->req.buf + req->req.actual,
ep->fifo, bytecount);
req->req.actual += bytecount;
usba_ep_writel(ep, CLR_STA, USBA_RX_BK_RDY);
if (is_complete) {
DBG(DBG_QUEUE, "%s: request done\n", ep->ep.name);
req->req.status = 0;
list_del_init(&req->queue);
usba_ep_writel(ep, CTL_DIS, USBA_RX_BK_RDY);
spin_unlock(&udc->lock);
req->req.complete(&ep->ep, &req->req);
spin_lock(&udc->lock);
}
status = usba_ep_readl(ep, STA);
nr_busy = USBA_BFEXT(BUSY_BANKS, status);
if (is_complete && ep_is_control(ep)) {
send_status(udc, ep);
break;
}
}
}
static void
request_complete(struct usba_ep *ep, struct usba_request *req, int status)
{
struct usba_udc *udc = ep->udc;
WARN_ON(!list_empty(&req->queue));
if (req->req.status == -EINPROGRESS)
req->req.status = status;
if (req->using_dma)
usb_gadget_unmap_request(&udc->gadget, &req->req, ep->is_in);
DBG(DBG_GADGET | DBG_REQ,
"%s: req %p complete: status %d, actual %u\n",
ep->ep.name, req, req->req.status, req->req.actual);
spin_unlock(&udc->lock);
req->req.complete(&ep->ep, &req->req);
spin_lock(&udc->lock);
}
static void
request_complete_list(struct usba_ep *ep, struct list_head *list, int status)
{
struct usba_request *req, *tmp_req;
list_for_each_entry_safe(req, tmp_req, list, queue) {
list_del_init(&req->queue);
request_complete(ep, req, status);
}
}
static int
usba_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
{
struct usba_ep *ep = to_usba_ep(_ep);
struct usba_udc *udc = ep->udc;
unsigned long flags, ept_cfg, maxpacket;
unsigned int nr_trans;
DBG(DBG_GADGET, "%s: ep_enable: desc=%p\n", ep->ep.name, desc);
maxpacket = usb_endpoint_maxp(desc) & 0x7ff;
if (((desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK) != ep->index)
|| ep->index == 0
|| desc->bDescriptorType != USB_DT_ENDPOINT
|| maxpacket == 0
|| maxpacket > ep->fifo_size) {
DBG(DBG_ERR, "ep_enable: Invalid argument");
return -EINVAL;
}
ep->is_isoc = 0;
ep->is_in = 0;
if (maxpacket <= 8)
ept_cfg = USBA_BF(EPT_SIZE, USBA_EPT_SIZE_8);
else
/* LSB is bit 1, not 0 */
ept_cfg = USBA_BF(EPT_SIZE, fls(maxpacket - 1) - 3);
DBG(DBG_HW, "%s: EPT_SIZE = %lu (maxpacket = %lu)\n",
ep->ep.name, ept_cfg, maxpacket);
if (usb_endpoint_dir_in(desc)) {
ep->is_in = 1;
ept_cfg |= USBA_EPT_DIR_IN;
}
switch (usb_endpoint_type(desc)) {
case USB_ENDPOINT_XFER_CONTROL:
ept_cfg |= USBA_BF(EPT_TYPE, USBA_EPT_TYPE_CONTROL);
ept_cfg |= USBA_BF(BK_NUMBER, USBA_BK_NUMBER_ONE);
break;
case USB_ENDPOINT_XFER_ISOC:
if (!ep->can_isoc) {
DBG(DBG_ERR, "ep_enable: %s is not isoc capable\n",
ep->ep.name);
return -EINVAL;
}
/*
* Bits 11:12 specify number of _additional_
* transactions per microframe.
*/
nr_trans = ((usb_endpoint_maxp(desc) >> 11) & 3) + 1;
if (nr_trans > 3)
return -EINVAL;
ep->is_isoc = 1;
ept_cfg |= USBA_BF(EPT_TYPE, USBA_EPT_TYPE_ISO);
/*
* Do triple-buffering on high-bandwidth iso endpoints.
*/
if (nr_trans > 1 && ep->nr_banks == 3)
ept_cfg |= USBA_BF(BK_NUMBER, USBA_BK_NUMBER_TRIPLE);
else
ept_cfg |= USBA_BF(BK_NUMBER, USBA_BK_NUMBER_DOUBLE);
ept_cfg |= USBA_BF(NB_TRANS, nr_trans);
break;
case USB_ENDPOINT_XFER_BULK:
ept_cfg |= USBA_BF(EPT_TYPE, USBA_EPT_TYPE_BULK);
ept_cfg |= USBA_BF(BK_NUMBER, USBA_BK_NUMBER_DOUBLE);
break;
case USB_ENDPOINT_XFER_INT:
ept_cfg |= USBA_BF(EPT_TYPE, USBA_EPT_TYPE_INT);
ept_cfg |= USBA_BF(BK_NUMBER, USBA_BK_NUMBER_DOUBLE);
break;
}
spin_lock_irqsave(&ep->udc->lock, flags);
ep->ep.desc = desc;
ep->ep.maxpacket = maxpacket;
usba_ep_writel(ep, CFG, ept_cfg);
usba_ep_writel(ep, CTL_ENB, USBA_EPT_ENABLE);
if (ep->can_dma) {
u32 ctrl;
usba_writel(udc, INT_ENB,
(usba_readl(udc, INT_ENB)
| USBA_BF(EPT_INT, 1 << ep->index)
| USBA_BF(DMA_INT, 1 << ep->index)));
ctrl = USBA_AUTO_VALID | USBA_INTDIS_DMA;
usba_ep_writel(ep, CTL_ENB, ctrl);
} else {
usba_writel(udc, INT_ENB,
(usba_readl(udc, INT_ENB)
| USBA_BF(EPT_INT, 1 << ep->index)));
}
spin_unlock_irqrestore(&udc->lock, flags);
DBG(DBG_HW, "EPT_CFG%d after init: %#08lx\n", ep->index,
(unsigned long)usba_ep_readl(ep, CFG));
DBG(DBG_HW, "INT_ENB after init: %#08lx\n",
(unsigned long)usba_readl(udc, INT_ENB));
return 0;
}
static int usba_ep_disable(struct usb_ep *_ep)
{
struct usba_ep *ep = to_usba_ep(_ep);
struct usba_udc *udc = ep->udc;
LIST_HEAD(req_list);
unsigned long flags;
DBG(DBG_GADGET, "ep_disable: %s\n", ep->ep.name);
spin_lock_irqsave(&udc->lock, flags);
if (!ep->ep.desc) {
spin_unlock_irqrestore(&udc->lock, flags);
/* REVISIT because this driver disables endpoints in
* reset_all_endpoints() before calling disconnect(),
* most gadget drivers would trigger this non-error ...
*/
if (udc->gadget.speed != USB_SPEED_UNKNOWN)
DBG(DBG_ERR, "ep_disable: %s not enabled\n",
ep->ep.name);
return -EINVAL;
}
ep->ep.desc = NULL;
list_splice_init(&ep->queue, &req_list);
if (ep->can_dma) {
usba_dma_writel(ep, CONTROL, 0);
usba_dma_writel(ep, ADDRESS, 0);
usba_dma_readl(ep, STATUS);
}
usba_ep_writel(ep, CTL_DIS, USBA_EPT_ENABLE);
usba_writel(udc, INT_ENB,
usba_readl(udc, INT_ENB)
& ~USBA_BF(EPT_INT, 1 << ep->index));
request_complete_list(ep, &req_list, -ESHUTDOWN);
spin_unlock_irqrestore(&udc->lock, flags);
return 0;
}
static struct usb_request *
usba_ep_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
{
struct usba_request *req;
DBG(DBG_GADGET, "ep_alloc_request: %p, 0x%x\n", _ep, gfp_flags);
req = kzalloc(sizeof(*req), gfp_flags);
if (!req)
return NULL;
INIT_LIST_HEAD(&req->queue);
return &req->req;
}
static void
usba_ep_free_request(struct usb_ep *_ep, struct usb_request *_req)
{
struct usba_request *req = to_usba_req(_req);
DBG(DBG_GADGET, "ep_free_request: %p, %p\n", _ep, _req);
kfree(req);
}
static int queue_dma(struct usba_udc *udc, struct usba_ep *ep,
struct usba_request *req, gfp_t gfp_flags)
{
unsigned long flags;
int ret;
DBG(DBG_DMA, "%s: req l/%u d/%08x %c%c%c\n",
ep->ep.name, req->req.length, req->req.dma,
req->req.zero ? 'Z' : 'z',
req->req.short_not_ok ? 'S' : 's',
req->req.no_interrupt ? 'I' : 'i');
if (req->req.length > 0x10000) {
/* Lengths from 0 to 65536 (inclusive) are supported */
DBG(DBG_ERR, "invalid request length %u\n", req->req.length);
return -EINVAL;
}
ret = usb_gadget_map_request(&udc->gadget, &req->req, ep->is_in);
if (ret)
return ret;
req->using_dma = 1;
req->ctrl = USBA_BF(DMA_BUF_LEN, req->req.length)
| USBA_DMA_CH_EN | USBA_DMA_END_BUF_IE
| USBA_DMA_END_TR_EN | USBA_DMA_END_TR_IE;
if (ep->is_in)
req->ctrl |= USBA_DMA_END_BUF_EN;
/*
* Add this request to the queue and submit for DMA if
* possible. Check if we're still alive first -- we may have
* received a reset since last time we checked.
*/
ret = -ESHUTDOWN;
spin_lock_irqsave(&udc->lock, flags);
if (ep->ep.desc) {
if (list_empty(&ep->queue))
submit_request(ep, req);
list_add_tail(&req->queue, &ep->queue);
ret = 0;
}
spin_unlock_irqrestore(&udc->lock, flags);
return ret;
}
static int
usba_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
{
struct usba_request *req = to_usba_req(_req);
struct usba_ep *ep = to_usba_ep(_ep);
struct usba_udc *udc = ep->udc;
unsigned long flags;
int ret;
DBG(DBG_GADGET | DBG_QUEUE | DBG_REQ, "%s: queue req %p, len %u\n",
ep->ep.name, req, _req->length);
if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN ||
!ep->ep.desc)
return -ESHUTDOWN;
req->submitted = 0;
req->using_dma = 0;
req->last_transaction = 0;
_req->status = -EINPROGRESS;
_req->actual = 0;
if (ep->can_dma)
return queue_dma(udc, ep, req, gfp_flags);
/* May have received a reset since last time we checked */
ret = -ESHUTDOWN;
spin_lock_irqsave(&udc->lock, flags);
if (ep->ep.desc) {
list_add_tail(&req->queue, &ep->queue);
if ((!ep_is_control(ep) && ep->is_in) ||
(ep_is_control(ep)
&& (ep->state == DATA_STAGE_IN
|| ep->state == STATUS_STAGE_IN)))
usba_ep_writel(ep, CTL_ENB, USBA_TX_PK_RDY);
else
usba_ep_writel(ep, CTL_ENB, USBA_RX_BK_RDY);
ret = 0;
}
spin_unlock_irqrestore(&udc->lock, flags);
return ret;
}
static void
usba_update_req(struct usba_ep *ep, struct usba_request *req, u32 status)
{
req->req.actual = req->req.length - USBA_BFEXT(DMA_BUF_LEN, status);
}
static int stop_dma(struct usba_ep *ep, u32 *pstatus)
{
unsigned int timeout;
u32 status;
/*
* Stop the DMA controller. When writing both CH_EN
* and LINK to 0, the other bits are not affected.
*/
usba_dma_writel(ep, CONTROL, 0);
/* Wait for the FIFO to empty */
for (timeout = 40; timeout; --timeout) {
status = usba_dma_readl(ep, STATUS);
if (!(status & USBA_DMA_CH_EN))
break;
udelay(1);
}
if (pstatus)
*pstatus = status;
if (timeout == 0) {
dev_err(&ep->udc->pdev->dev,
"%s: timed out waiting for DMA FIFO to empty\n",
ep->ep.name);
return -ETIMEDOUT;
}
return 0;
}
static int usba_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
{
struct usba_ep *ep = to_usba_ep(_ep);
struct usba_udc *udc = ep->udc;
struct usba_request *req = to_usba_req(_req);
unsigned long flags;
u32 status;
DBG(DBG_GADGET | DBG_QUEUE, "ep_dequeue: %s, req %p\n",
ep->ep.name, req);
spin_lock_irqsave(&udc->lock, flags);
if (req->using_dma) {
/*
* If this request is currently being transferred,
* stop the DMA controller and reset the FIFO.
*/
if (ep->queue.next == &req->queue) {
status = usba_dma_readl(ep, STATUS);
if (status & USBA_DMA_CH_EN)
stop_dma(ep, &status);
#ifdef CONFIG_USB_GADGET_DEBUG_FS
ep->last_dma_status = status;
#endif
usba_writel(udc, EPT_RST, 1 << ep->index);
usba_update_req(ep, req, status);
}
}
/*
* Errors should stop the queue from advancing until the
* completion function returns.
*/
list_del_init(&req->queue);
request_complete(ep, req, -ECONNRESET);
/* Process the next request if any */
submit_next_request(ep);
spin_unlock_irqrestore(&udc->lock, flags);
return 0;
}
static int usba_ep_set_halt(struct usb_ep *_ep, int value)
{
struct usba_ep *ep = to_usba_ep(_ep);
struct usba_udc *udc = ep->udc;
unsigned long flags;
int ret = 0;
DBG(DBG_GADGET, "endpoint %s: %s HALT\n", ep->ep.name,
value ? "set" : "clear");
if (!ep->ep.desc) {
DBG(DBG_ERR, "Attempted to halt uninitialized ep %s\n",
ep->ep.name);
return -ENODEV;
}
if (ep->is_isoc) {
DBG(DBG_ERR, "Attempted to halt isochronous ep %s\n",
ep->ep.name);
return -ENOTTY;
}
spin_lock_irqsave(&udc->lock, flags);
/*
* We can't halt IN endpoints while there are still data to be
* transferred
*/
if (!list_empty(&ep->queue)
|| ((value && ep->is_in && (usba_ep_readl(ep, STA)
& USBA_BF(BUSY_BANKS, -1L))))) {
ret = -EAGAIN;
} else {
if (value)
usba_ep_writel(ep, SET_STA, USBA_FORCE_STALL);
else
usba_ep_writel(ep, CLR_STA,
USBA_FORCE_STALL | USBA_TOGGLE_CLR);
usba_ep_readl(ep, STA);
}
spin_unlock_irqrestore(&udc->lock, flags);
return ret;
}
static int usba_ep_fifo_status(struct usb_ep *_ep)
{
struct usba_ep *ep = to_usba_ep(_ep);
return USBA_BFEXT(BYTE_COUNT, usba_ep_readl(ep, STA));
}
static void usba_ep_fifo_flush(struct usb_ep *_ep)
{
struct usba_ep *ep = to_usba_ep(_ep);
struct usba_udc *udc = ep->udc;
usba_writel(udc, EPT_RST, 1 << ep->index);
}
static const struct usb_ep_ops usba_ep_ops = {
.enable = usba_ep_enable,
.disable = usba_ep_disable,
.alloc_request = usba_ep_alloc_request,
.free_request = usba_ep_free_request,
.queue = usba_ep_queue,
.dequeue = usba_ep_dequeue,
.set_halt = usba_ep_set_halt,
.fifo_status = usba_ep_fifo_status,
.fifo_flush = usba_ep_fifo_flush,
};
static int usba_udc_get_frame(struct usb_gadget *gadget)
{
struct usba_udc *udc = to_usba_udc(gadget);
return USBA_BFEXT(FRAME_NUMBER, usba_readl(udc, FNUM));
}
static int usba_udc_wakeup(struct usb_gadget *gadget)
{
struct usba_udc *udc = to_usba_udc(gadget);
unsigned long flags;
u32 ctrl;
int ret = -EINVAL;
spin_lock_irqsave(&udc->lock, flags);
if (udc->devstatus & (1 << USB_DEVICE_REMOTE_WAKEUP)) {
ctrl = usba_readl(udc, CTRL);
usba_writel(udc, CTRL, ctrl | USBA_REMOTE_WAKE_UP);
ret = 0;
}
spin_unlock_irqrestore(&udc->lock, flags);
return ret;
}
static int
usba_udc_set_selfpowered(struct usb_gadget *gadget, int is_selfpowered)
{
struct usba_udc *udc = to_usba_udc(gadget);
unsigned long flags;
spin_lock_irqsave(&udc->lock, flags);
if (is_selfpowered)
udc->devstatus |= 1 << USB_DEVICE_SELF_POWERED;
else
udc->devstatus &= ~(1 << USB_DEVICE_SELF_POWERED);
spin_unlock_irqrestore(&udc->lock, flags);
return 0;
}
static int atmel_usba_start(struct usb_gadget *gadget,
struct usb_gadget_driver *driver);
static int atmel_usba_stop(struct usb_gadget *gadget,
struct usb_gadget_driver *driver);
static const struct usb_gadget_ops usba_udc_ops = {
.get_frame = usba_udc_get_frame,
.wakeup = usba_udc_wakeup,
.set_selfpowered = usba_udc_set_selfpowered,
.udc_start = atmel_usba_start,
.udc_stop = atmel_usba_stop,
};
static struct usb_endpoint_descriptor usba_ep0_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = 0,
.bmAttributes = USB_ENDPOINT_XFER_CONTROL,
.wMaxPacketSize = cpu_to_le16(64),
/* FIXME: I have no idea what to put here */
.bInterval = 1,
};
static void nop_release(struct device *dev)
{
}
static struct usba_udc the_udc = {
.gadget = {
.ops = &usba_udc_ops,
.ep_list = LIST_HEAD_INIT(the_udc.gadget.ep_list),
.max_speed = USB_SPEED_HIGH,
.name = "atmel_usba_udc",
.dev = {
.init_name = "gadget",
.release = nop_release,
},
},
};
/*
* Called with interrupts disabled and udc->lock held.
*/
static void reset_all_endpoints(struct usba_udc *udc)
{
struct usba_ep *ep;
struct usba_request *req, *tmp_req;
usba_writel(udc, EPT_RST, ~0UL);
ep = to_usba_ep(udc->gadget.ep0);
list_for_each_entry_safe(req, tmp_req, &ep->queue, queue) {
list_del_init(&req->queue);
request_complete(ep, req, -ECONNRESET);
}
/* NOTE: normally, the next call to the gadget driver is in
* charge of disabling endpoints... usually disconnect().
* The exception would be entering a high speed test mode.
*
* FIXME remove this code ... and retest thoroughly.
*/
list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list) {
if (ep->ep.desc) {
spin_unlock(&udc->lock);
usba_ep_disable(&ep->ep);
spin_lock(&udc->lock);
}
}
}
static struct usba_ep *get_ep_by_addr(struct usba_udc *udc, u16 wIndex)
{
struct usba_ep *ep;
if ((wIndex & USB_ENDPOINT_NUMBER_MASK) == 0)
return to_usba_ep(udc->gadget.ep0);
list_for_each_entry (ep, &udc->gadget.ep_list, ep.ep_list) {
u8 bEndpointAddress;
if (!ep->ep.desc)
continue;
bEndpointAddress = ep->ep.desc->bEndpointAddress;
if ((wIndex ^ bEndpointAddress) & USB_DIR_IN)
continue;
if ((bEndpointAddress & USB_ENDPOINT_NUMBER_MASK)
== (wIndex & USB_ENDPOINT_NUMBER_MASK))
return ep;
}
return NULL;
}
/* Called with interrupts disabled and udc->lock held */
static inline void set_protocol_stall(struct usba_udc *udc, struct usba_ep *ep)
{
usba_ep_writel(ep, SET_STA, USBA_FORCE_STALL);
ep->state = WAIT_FOR_SETUP;
}
static inline int is_stalled(struct usba_udc *udc, struct usba_ep *ep)
{
if (usba_ep_readl(ep, STA) & USBA_FORCE_STALL)
return 1;
return 0;
}
static inline void set_address(struct usba_udc *udc, unsigned int addr)
{
u32 regval;
DBG(DBG_BUS, "setting address %u...\n", addr);
regval = usba_readl(udc, CTRL);
regval = USBA_BFINS(DEV_ADDR, addr, regval);
usba_writel(udc, CTRL, regval);
}
static int do_test_mode(struct usba_udc *udc)
{
static const char test_packet_buffer[] = {
/* JKJKJKJK * 9 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
/* JJKKJJKK * 8 */
0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
/* JJKKJJKK * 8 */
0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE,
/* JJJJJJJKKKKKKK * 8 */
0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
/* JJJJJJJK * 8 */
0x7F, 0xBF, 0xDF, 0xEF, 0xF7, 0xFB, 0xFD,
/* {JKKKKKKK * 10}, JK */
0xFC, 0x7E, 0xBF, 0xDF, 0xEF, 0xF7, 0xFB, 0xFD, 0x7E
};
struct usba_ep *ep;
struct device *dev = &udc->pdev->dev;
int test_mode;
test_mode = udc->test_mode;
/* Start from a clean slate */
reset_all_endpoints(udc);
switch (test_mode) {
case 0x0100:
/* Test_J */
usba_writel(udc, TST, USBA_TST_J_MODE);
dev_info(dev, "Entering Test_J mode...\n");
break;
case 0x0200:
/* Test_K */
usba_writel(udc, TST, USBA_TST_K_MODE);
dev_info(dev, "Entering Test_K mode...\n");
break;
case 0x0300:
/*
* Test_SE0_NAK: Force high-speed mode and set up ep0
* for Bulk IN transfers
*/
ep = &usba_ep[0];
usba_writel(udc, TST,
USBA_BF(SPEED_CFG, USBA_SPEED_CFG_FORCE_HIGH));
usba_ep_writel(ep, CFG,
USBA_BF(EPT_SIZE, USBA_EPT_SIZE_64)
| USBA_EPT_DIR_IN
| USBA_BF(EPT_TYPE, USBA_EPT_TYPE_BULK)
| USBA_BF(BK_NUMBER, 1));
if (!(usba_ep_readl(ep, CFG) & USBA_EPT_MAPPED)) {
set_protocol_stall(udc, ep);
dev_err(dev, "Test_SE0_NAK: ep0 not mapped\n");
} else {
usba_ep_writel(ep, CTL_ENB, USBA_EPT_ENABLE);
dev_info(dev, "Entering Test_SE0_NAK mode...\n");
}
break;
case 0x0400:
/* Test_Packet */
ep = &usba_ep[0];
usba_ep_writel(ep, CFG,
USBA_BF(EPT_SIZE, USBA_EPT_SIZE_64)
| USBA_EPT_DIR_IN
| USBA_BF(EPT_TYPE, USBA_EPT_TYPE_BULK)
| USBA_BF(BK_NUMBER, 1));
if (!(usba_ep_readl(ep, CFG) & USBA_EPT_MAPPED)) {
set_protocol_stall(udc, ep);
dev_err(dev, "Test_Packet: ep0 not mapped\n");
} else {
usba_ep_writel(ep, CTL_ENB, USBA_EPT_ENABLE);
usba_writel(udc, TST, USBA_TST_PKT_MODE);
memcpy_toio(ep->fifo, test_packet_buffer,
sizeof(test_packet_buffer));
usba_ep_writel(ep, SET_STA, USBA_TX_PK_RDY);
dev_info(dev, "Entering Test_Packet mode...\n");
}
break;
default:
dev_err(dev, "Invalid test mode: 0x%04x\n", test_mode);
return -EINVAL;
}
return 0;
}
/* Avoid overly long expressions */
static inline bool feature_is_dev_remote_wakeup(struct usb_ctrlrequest *crq)
{
if (crq->wValue == cpu_to_le16(USB_DEVICE_REMOTE_WAKEUP))
return true;
return false;
}
static inline bool feature_is_dev_test_mode(struct usb_ctrlrequest *crq)
{
if (crq->wValue == cpu_to_le16(USB_DEVICE_TEST_MODE))
return true;
return false;
}
static inline bool feature_is_ep_halt(struct usb_ctrlrequest *crq)
{
if (crq->wValue == cpu_to_le16(USB_ENDPOINT_HALT))
return true;
return false;
}
static int handle_ep0_setup(struct usba_udc *udc, struct usba_ep *ep,
struct usb_ctrlrequest *crq)
{
int retval = 0;
switch (crq->bRequest) {
case USB_REQ_GET_STATUS: {
u16 status;
if (crq->bRequestType == (USB_DIR_IN | USB_RECIP_DEVICE)) {
status = cpu_to_le16(udc->devstatus);
} else if (crq->bRequestType
== (USB_DIR_IN | USB_RECIP_INTERFACE)) {
status = cpu_to_le16(0);
} else if (crq->bRequestType
== (USB_DIR_IN | USB_RECIP_ENDPOINT)) {
struct usba_ep *target;
target = get_ep_by_addr(udc, le16_to_cpu(crq->wIndex));
if (!target)
goto stall;
status = 0;
if (is_stalled(udc, target))
status |= cpu_to_le16(1);
} else
goto delegate;
/* Write directly to the FIFO. No queueing is done. */
if (crq->wLength != cpu_to_le16(sizeof(status)))
goto stall;
ep->state = DATA_STAGE_IN;
__raw_writew(status, ep->fifo);
usba_ep_writel(ep, SET_STA, USBA_TX_PK_RDY);
break;
}
case USB_REQ_CLEAR_FEATURE: {
if (crq->bRequestType == USB_RECIP_DEVICE) {
if (feature_is_dev_remote_wakeup(crq))
udc->devstatus
&= ~(1 << USB_DEVICE_REMOTE_WAKEUP);
else
/* Can't CLEAR_FEATURE TEST_MODE */
goto stall;
} else if (crq->bRequestType == USB_RECIP_ENDPOINT) {
struct usba_ep *target;
if (crq->wLength != cpu_to_le16(0)
|| !feature_is_ep_halt(crq))
goto stall;
target = get_ep_by_addr(udc, le16_to_cpu(crq->wIndex));
if (!target)
goto stall;
usba_ep_writel(target, CLR_STA, USBA_FORCE_STALL);
if (target->index != 0)
usba_ep_writel(target, CLR_STA,
USBA_TOGGLE_CLR);
} else {
goto delegate;
}
send_status(udc, ep);
break;
}
case USB_REQ_SET_FEATURE: {
if (crq->bRequestType == USB_RECIP_DEVICE) {
if (feature_is_dev_test_mode(crq)) {
send_status(udc, ep);
ep->state = STATUS_STAGE_TEST;
udc->test_mode = le16_to_cpu(crq->wIndex);
return 0;
} else if (feature_is_dev_remote_wakeup(crq)) {
udc->devstatus |= 1 << USB_DEVICE_REMOTE_WAKEUP;
} else {
goto stall;
}
} else if (crq->bRequestType == USB_RECIP_ENDPOINT) {
struct usba_ep *target;
if (crq->wLength != cpu_to_le16(0)
|| !feature_is_ep_halt(crq))
goto stall;
target = get_ep_by_addr(udc, le16_to_cpu(crq->wIndex));
if (!target)
goto stall;
usba_ep_writel(target, SET_STA, USBA_FORCE_STALL);
} else
goto delegate;
send_status(udc, ep);
break;
}
case USB_REQ_SET_ADDRESS:
if (crq->bRequestType != (USB_DIR_OUT | USB_RECIP_DEVICE))
goto delegate;
set_address(udc, le16_to_cpu(crq->wValue));
send_status(udc, ep);
ep->state = STATUS_STAGE_ADDR;
break;
default:
delegate:
spin_unlock(&udc->lock);
retval = udc->driver->setup(&udc->gadget, crq);
spin_lock(&udc->lock);
}
return retval;
stall:
pr_err("udc: %s: Invalid setup request: %02x.%02x v%04x i%04x l%d, "
"halting endpoint...\n",
ep->ep.name, crq->bRequestType, crq->bRequest,
le16_to_cpu(crq->wValue), le16_to_cpu(crq->wIndex),
le16_to_cpu(crq->wLength));
set_protocol_stall(udc, ep);
return -1;
}
static void usba_control_irq(struct usba_udc *udc, struct usba_ep *ep)
{
struct usba_request *req;
u32 epstatus;
u32 epctrl;
restart:
epstatus = usba_ep_readl(ep, STA);
epctrl = usba_ep_readl(ep, CTL);
DBG(DBG_INT, "%s [%d]: s/%08x c/%08x\n",
ep->ep.name, ep->state, epstatus, epctrl);
req = NULL;
if (!list_empty(&ep->queue))
req = list_entry(ep->queue.next,
struct usba_request, queue);
if ((epctrl & USBA_TX_PK_RDY) && !(epstatus & USBA_TX_PK_RDY)) {
if (req->submitted)
next_fifo_transaction(ep, req);
else
submit_request(ep, req);
if (req->last_transaction) {
usba_ep_writel(ep, CTL_DIS, USBA_TX_PK_RDY);
usba_ep_writel(ep, CTL_ENB, USBA_TX_COMPLETE);
}
goto restart;
}
if ((epstatus & epctrl) & USBA_TX_COMPLETE) {
usba_ep_writel(ep, CLR_STA, USBA_TX_COMPLETE);
switch (ep->state) {
case DATA_STAGE_IN:
usba_ep_writel(ep, CTL_ENB, USBA_RX_BK_RDY);
usba_ep_writel(ep, CTL_DIS, USBA_TX_COMPLETE);
ep->state = STATUS_STAGE_OUT;
break;
case STATUS_STAGE_ADDR:
/* Activate our new address */
usba_writel(udc, CTRL, (usba_readl(udc, CTRL)
| USBA_FADDR_EN));
usba_ep_writel(ep, CTL_DIS, USBA_TX_COMPLETE);
ep->state = WAIT_FOR_SETUP;
break;
case STATUS_STAGE_IN:
if (req) {
list_del_init(&req->queue);
request_complete(ep, req, 0);
submit_next_request(ep);
}
usba_ep_writel(ep, CTL_DIS, USBA_TX_COMPLETE);
ep->state = WAIT_FOR_SETUP;
break;
case STATUS_STAGE_TEST:
usba_ep_writel(ep, CTL_DIS, USBA_TX_COMPLETE);
ep->state = WAIT_FOR_SETUP;
if (do_test_mode(udc))
set_protocol_stall(udc, ep);
break;
default:
pr_err("udc: %s: TXCOMP: Invalid endpoint state %d, "
"halting endpoint...\n",
ep->ep.name, ep->state);
set_protocol_stall(udc, ep);
break;
}
goto restart;
}
if ((epstatus & epctrl) & USBA_RX_BK_RDY) {
switch (ep->state) {
case STATUS_STAGE_OUT:
usba_ep_writel(ep, CLR_STA, USBA_RX_BK_RDY);
usba_ep_writel(ep, CTL_DIS, USBA_RX_BK_RDY);
if (req) {
list_del_init(&req->queue);
request_complete(ep, req, 0);
}
ep->state = WAIT_FOR_SETUP;
break;
case DATA_STAGE_OUT:
receive_data(ep);
break;
default:
usba_ep_writel(ep, CLR_STA, USBA_RX_BK_RDY);
usba_ep_writel(ep, CTL_DIS, USBA_RX_BK_RDY);
pr_err("udc: %s: RXRDY: Invalid endpoint state %d, "
"halting endpoint...\n",
ep->ep.name, ep->state);
set_protocol_stall(udc, ep);
break;
}
goto restart;
}
if (epstatus & USBA_RX_SETUP) {
union {
struct usb_ctrlrequest crq;
unsigned long data[2];
} crq;
unsigned int pkt_len;
int ret;
if (ep->state != WAIT_FOR_SETUP) {
/*
* Didn't expect a SETUP packet at this
* point. Clean up any pending requests (which
* may be successful).
*/
int status = -EPROTO;
/*
* RXRDY and TXCOMP are dropped when SETUP
* packets arrive. Just pretend we received
* the status packet.
*/
if (ep->state == STATUS_STAGE_OUT
|| ep->state == STATUS_STAGE_IN) {
usba_ep_writel(ep, CTL_DIS, USBA_RX_BK_RDY);
status = 0;
}
if (req) {
list_del_init(&req->queue);
request_complete(ep, req, status);
}
}
pkt_len = USBA_BFEXT(BYTE_COUNT, usba_ep_readl(ep, STA));
DBG(DBG_HW, "Packet length: %u\n", pkt_len);
if (pkt_len != sizeof(crq)) {
pr_warning("udc: Invalid packet length %u "
"(expected %zu)\n", pkt_len, sizeof(crq));
set_protocol_stall(udc, ep);
return;
}
DBG(DBG_FIFO, "Copying ctrl request from 0x%p:\n", ep->fifo);
memcpy_fromio(crq.data, ep->fifo, sizeof(crq));
/* Free up one bank in the FIFO so that we can
* generate or receive a reply right away. */
usba_ep_writel(ep, CLR_STA, USBA_RX_SETUP);
/* printk(KERN_DEBUG "setup: %d: %02x.%02x\n",
ep->state, crq.crq.bRequestType,
crq.crq.bRequest); */
if (crq.crq.bRequestType & USB_DIR_IN) {
/*
* The USB 2.0 spec states that "if wLength is
* zero, there is no data transfer phase."
* However, testusb #14 seems to actually
* expect a data phase even if wLength = 0...
*/
ep->state = DATA_STAGE_IN;
} else {
if (crq.crq.wLength != cpu_to_le16(0))
ep->state = DATA_STAGE_OUT;
else
ep->state = STATUS_STAGE_IN;
}
ret = -1;
if (ep->index == 0)
ret = handle_ep0_setup(udc, ep, &crq.crq);
else {
spin_unlock(&udc->lock);
ret = udc->driver->setup(&udc->gadget, &crq.crq);
spin_lock(&udc->lock);
}
DBG(DBG_BUS, "req %02x.%02x, length %d, state %d, ret %d\n",
crq.crq.bRequestType, crq.crq.bRequest,
le16_to_cpu(crq.crq.wLength), ep->state, ret);
if (ret < 0) {
/* Let the host know that we failed */
set_protocol_stall(udc, ep);
}
}
}
static void usba_ep_irq(struct usba_udc *udc, struct usba_ep *ep)
{
struct usba_request *req;
u32 epstatus;
u32 epctrl;
epstatus = usba_ep_readl(ep, STA);
epctrl = usba_ep_readl(ep, CTL);
DBG(DBG_INT, "%s: interrupt, status: 0x%08x\n", ep->ep.name, epstatus);
while ((epctrl & USBA_TX_PK_RDY) && !(epstatus & USBA_TX_PK_RDY)) {
DBG(DBG_BUS, "%s: TX PK ready\n", ep->ep.name);
if (list_empty(&ep->queue)) {
dev_warn(&udc->pdev->dev, "ep_irq: queue empty\n");
usba_ep_writel(ep, CTL_DIS, USBA_TX_PK_RDY);
return;
}
req = list_entry(ep->queue.next, struct usba_request, queue);
if (req->using_dma) {
/* Send a zero-length packet */
usba_ep_writel(ep, SET_STA,
USBA_TX_PK_RDY);
usba_ep_writel(ep, CTL_DIS,
USBA_TX_PK_RDY);
list_del_init(&req->queue);
submit_next_request(ep);
request_complete(ep, req, 0);
} else {
if (req->submitted)
next_fifo_transaction(ep, req);
else
submit_request(ep, req);
if (req->last_transaction) {
list_del_init(&req->queue);
submit_next_request(ep);
request_complete(ep, req, 0);
}
}
epstatus = usba_ep_readl(ep, STA);
epctrl = usba_ep_readl(ep, CTL);
}
if ((epstatus & epctrl) & USBA_RX_BK_RDY) {
DBG(DBG_BUS, "%s: RX data ready\n", ep->ep.name);
receive_data(ep);
usba_ep_writel(ep, CLR_STA, USBA_RX_BK_RDY);
}
}
static void usba_dma_irq(struct usba_udc *udc, struct usba_ep *ep)
{
struct usba_request *req;
u32 status, control, pending;
status = usba_dma_readl(ep, STATUS);
control = usba_dma_readl(ep, CONTROL);
#ifdef CONFIG_USB_GADGET_DEBUG_FS
ep->last_dma_status = status;
#endif
pending = status & control;
DBG(DBG_INT | DBG_DMA, "dma irq, s/%#08x, c/%#08x\n", status, control);
if (status & USBA_DMA_CH_EN) {
dev_err(&udc->pdev->dev,
"DMA_CH_EN is set after transfer is finished!\n");
dev_err(&udc->pdev->dev,
"status=%#08x, pending=%#08x, control=%#08x\n",
status, pending, control);
/*
* try to pretend nothing happened. We might have to
* do something here...
*/
}
if (list_empty(&ep->queue))
/* Might happen if a reset comes along at the right moment */
return;
if (pending & (USBA_DMA_END_TR_ST | USBA_DMA_END_BUF_ST)) {
req = list_entry(ep->queue.next, struct usba_request, queue);
usba_update_req(ep, req, status);
list_del_init(&req->queue);
submit_next_request(ep);
request_complete(ep, req, 0);
}
}
static irqreturn_t usba_udc_irq(int irq, void *devid)
{
struct usba_udc *udc = devid;
u32 status;
u32 dma_status;
u32 ep_status;
spin_lock(&udc->lock);
status = usba_readl(udc, INT_STA);
DBG(DBG_INT, "irq, status=%#08x\n", status);
if (status & USBA_DET_SUSPEND) {
toggle_bias(0);
usba_writel(udc, INT_CLR, USBA_DET_SUSPEND);
DBG(DBG_BUS, "Suspend detected\n");
if (udc->gadget.speed != USB_SPEED_UNKNOWN
&& udc->driver && udc->driver->suspend) {
spin_unlock(&udc->lock);
udc->driver->suspend(&udc->gadget);
spin_lock(&udc->lock);
}
}
if (status & USBA_WAKE_UP) {
toggle_bias(1);
usba_writel(udc, INT_CLR, USBA_WAKE_UP);
DBG(DBG_BUS, "Wake Up CPU detected\n");
}
if (status & USBA_END_OF_RESUME) {
usba_writel(udc, INT_CLR, USBA_END_OF_RESUME);
DBG(DBG_BUS, "Resume detected\n");
if (udc->gadget.speed != USB_SPEED_UNKNOWN
&& udc->driver && udc->driver->resume) {
spin_unlock(&udc->lock);
udc->driver->resume(&udc->gadget);
spin_lock(&udc->lock);
}
}
dma_status = USBA_BFEXT(DMA_INT, status);
if (dma_status) {
int i;
for (i = 1; i < USBA_NR_ENDPOINTS; i++)
if (dma_status & (1 << i))
usba_dma_irq(udc, &usba_ep[i]);
}
ep_status = USBA_BFEXT(EPT_INT, status);
if (ep_status) {
int i;
for (i = 0; i < USBA_NR_ENDPOINTS; i++)
if (ep_status & (1 << i)) {
if (ep_is_control(&usba_ep[i]))
usba_control_irq(udc, &usba_ep[i]);
else
usba_ep_irq(udc, &usba_ep[i]);
}
}
if (status & USBA_END_OF_RESET) {
struct usba_ep *ep0;
usba_writel(udc, INT_CLR, USBA_END_OF_RESET);
reset_all_endpoints(udc);
if (udc->gadget.speed != USB_SPEED_UNKNOWN
&& udc->driver->disconnect) {
udc->gadget.speed = USB_SPEED_UNKNOWN;
spin_unlock(&udc->lock);
udc->driver->disconnect(&udc->gadget);
spin_lock(&udc->lock);
}
if (status & USBA_HIGH_SPEED)
udc->gadget.speed = USB_SPEED_HIGH;
else
udc->gadget.speed = USB_SPEED_FULL;
DBG(DBG_BUS, "%s bus reset detected\n",
usb_speed_string(udc->gadget.speed));
ep0 = &usba_ep[0];
ep0->ep.desc = &usba_ep0_desc;
ep0->state = WAIT_FOR_SETUP;
usba_ep_writel(ep0, CFG,
(USBA_BF(EPT_SIZE, EP0_EPT_SIZE)
| USBA_BF(EPT_TYPE, USBA_EPT_TYPE_CONTROL)
| USBA_BF(BK_NUMBER, USBA_BK_NUMBER_ONE)));
usba_ep_writel(ep0, CTL_ENB,
USBA_EPT_ENABLE | USBA_RX_SETUP);
usba_writel(udc, INT_ENB,
(usba_readl(udc, INT_ENB)
| USBA_BF(EPT_INT, 1)
| USBA_DET_SUSPEND
| USBA_END_OF_RESUME));
/*
* Unclear why we hit this irregularly, e.g. in usbtest,
* but it's clearly harmless...
*/
if (!(usba_ep_readl(ep0, CFG) & USBA_EPT_MAPPED))
dev_dbg(&udc->pdev->dev,
"ODD: EP0 configuration is invalid!\n");
}
spin_unlock(&udc->lock);
return IRQ_HANDLED;
}
static irqreturn_t usba_vbus_irq(int irq, void *devid)
{
struct usba_udc *udc = devid;
int vbus;
/* debounce */
udelay(10);
spin_lock(&udc->lock);
/* May happen if Vbus pin toggles during probe() */
if (!udc->driver)
goto out;
vbus = vbus_is_present(udc);
if (vbus != udc->vbus_prev) {
if (vbus) {
toggle_bias(1);
usba_writel(udc, CTRL, USBA_ENABLE_MASK);
usba_writel(udc, INT_ENB, USBA_END_OF_RESET);
} else {
udc->gadget.speed = USB_SPEED_UNKNOWN;
reset_all_endpoints(udc);
toggle_bias(0);
usba_writel(udc, CTRL, USBA_DISABLE_MASK);
if (udc->driver->disconnect) {
spin_unlock(&udc->lock);
udc->driver->disconnect(&udc->gadget);
spin_lock(&udc->lock);
}
}
udc->vbus_prev = vbus;
}
out:
spin_unlock(&udc->lock);
return IRQ_HANDLED;
}
static int atmel_usba_start(struct usb_gadget *gadget,
struct usb_gadget_driver *driver)
{
struct usba_udc *udc = container_of(gadget, struct usba_udc, gadget);
unsigned long flags;
spin_lock_irqsave(&udc->lock, flags);
udc->devstatus = 1 << USB_DEVICE_SELF_POWERED;
udc->driver = driver;
spin_unlock_irqrestore(&udc->lock, flags);
clk_enable(udc->pclk);
clk_enable(udc->hclk);
DBG(DBG_GADGET, "registered driver `%s'\n", driver->driver.name);
udc->vbus_prev = 0;
if (gpio_is_valid(udc->vbus_pin))
enable_irq(gpio_to_irq(udc->vbus_pin));
/* If Vbus is present, enable the controller and wait for reset */
spin_lock_irqsave(&udc->lock, flags);
if (vbus_is_present(udc) && udc->vbus_prev == 0) {
toggle_bias(1);
usba_writel(udc, CTRL, USBA_ENABLE_MASK);
usba_writel(udc, INT_ENB, USBA_END_OF_RESET);
}
spin_unlock_irqrestore(&udc->lock, flags);
return 0;
}
static int atmel_usba_stop(struct usb_gadget *gadget,
struct usb_gadget_driver *driver)
{
struct usba_udc *udc = container_of(gadget, struct usba_udc, gadget);
unsigned long flags;
if (gpio_is_valid(udc->vbus_pin))
disable_irq(gpio_to_irq(udc->vbus_pin));
spin_lock_irqsave(&udc->lock, flags);
udc->gadget.speed = USB_SPEED_UNKNOWN;
reset_all_endpoints(udc);
spin_unlock_irqrestore(&udc->lock, flags);
/* This will also disable the DP pullup */
toggle_bias(0);
usba_writel(udc, CTRL, USBA_DISABLE_MASK);
udc->driver = NULL;
clk_disable(udc->hclk);
clk_disable(udc->pclk);
DBG(DBG_GADGET, "unregistered driver `%s'\n", driver->driver.name);
return 0;
}
static int __init usba_udc_probe(struct platform_device *pdev)
{
struct usba_platform_data *pdata = pdev->dev.platform_data;
struct resource *regs, *fifo;
struct clk *pclk, *hclk;
struct usba_udc *udc = &the_udc;
int irq, ret, i;
regs = platform_get_resource(pdev, IORESOURCE_MEM, CTRL_IOMEM_ID);
fifo = platform_get_resource(pdev, IORESOURCE_MEM, FIFO_IOMEM_ID);
if (!regs || !fifo || !pdata)
return -ENXIO;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
pclk = clk_get(&pdev->dev, "pclk");
if (IS_ERR(pclk))
return PTR_ERR(pclk);
hclk = clk_get(&pdev->dev, "hclk");
if (IS_ERR(hclk)) {
ret = PTR_ERR(hclk);
goto err_get_hclk;
}
spin_lock_init(&udc->lock);
udc->pdev = pdev;
udc->pclk = pclk;
udc->hclk = hclk;
udc->vbus_pin = -ENODEV;
ret = -ENOMEM;
udc->regs = ioremap(regs->start, resource_size(regs));
if (!udc->regs) {
dev_err(&pdev->dev, "Unable to map I/O memory, aborting.\n");
goto err_map_regs;
}
dev_info(&pdev->dev, "MMIO registers at 0x%08lx mapped at %p\n",
(unsigned long)regs->start, udc->regs);
udc->fifo = ioremap(fifo->start, resource_size(fifo));
if (!udc->fifo) {
dev_err(&pdev->dev, "Unable to map FIFO, aborting.\n");
goto err_map_fifo;
}
dev_info(&pdev->dev, "FIFO at 0x%08lx mapped at %p\n",
(unsigned long)fifo->start, udc->fifo);
platform_set_drvdata(pdev, udc);
/* Make sure we start from a clean slate */
clk_enable(pclk);
toggle_bias(0);
usba_writel(udc, CTRL, USBA_DISABLE_MASK);
clk_disable(pclk);
usba_ep = kzalloc(sizeof(struct usba_ep) * pdata->num_ep,
GFP_KERNEL);
if (!usba_ep)
goto err_alloc_ep;
the_udc.gadget.ep0 = &usba_ep[0].ep;
INIT_LIST_HEAD(&usba_ep[0].ep.ep_list);
usba_ep[0].ep_regs = udc->regs + USBA_EPT_BASE(0);
usba_ep[0].dma_regs = udc->regs + USBA_DMA_BASE(0);
usba_ep[0].fifo = udc->fifo + USBA_FIFO_BASE(0);
usba_ep[0].ep.ops = &usba_ep_ops;
usba_ep[0].ep.name = pdata->ep[0].name;
usba_ep[0].ep.maxpacket = pdata->ep[0].fifo_size;
usba_ep[0].udc = &the_udc;
INIT_LIST_HEAD(&usba_ep[0].queue);
usba_ep[0].fifo_size = pdata->ep[0].fifo_size;
usba_ep[0].nr_banks = pdata->ep[0].nr_banks;
usba_ep[0].index = pdata->ep[0].index;
usba_ep[0].can_dma = pdata->ep[0].can_dma;
usba_ep[0].can_isoc = pdata->ep[0].can_isoc;
for (i = 1; i < pdata->num_ep; i++) {
struct usba_ep *ep = &usba_ep[i];
ep->ep_regs = udc->regs + USBA_EPT_BASE(i);
ep->dma_regs = udc->regs + USBA_DMA_BASE(i);
ep->fifo = udc->fifo + USBA_FIFO_BASE(i);
ep->ep.ops = &usba_ep_ops;
ep->ep.name = pdata->ep[i].name;
ep->ep.maxpacket = pdata->ep[i].fifo_size;
ep->udc = &the_udc;
INIT_LIST_HEAD(&ep->queue);
ep->fifo_size = pdata->ep[i].fifo_size;
ep->nr_banks = pdata->ep[i].nr_banks;
ep->index = pdata->ep[i].index;
ep->can_dma = pdata->ep[i].can_dma;
ep->can_isoc = pdata->ep[i].can_isoc;
list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
}
ret = request_irq(irq, usba_udc_irq, 0, "atmel_usba_udc", udc);
if (ret) {
dev_err(&pdev->dev, "Cannot request irq %d (error %d)\n",
irq, ret);
goto err_request_irq;
}
udc->irq = irq;
if (gpio_is_valid(pdata->vbus_pin)) {
if (!gpio_request(pdata->vbus_pin, "atmel_usba_udc")) {
udc->vbus_pin = pdata->vbus_pin;
udc->vbus_pin_inverted = pdata->vbus_pin_inverted;
ret = request_irq(gpio_to_irq(udc->vbus_pin),
usba_vbus_irq, 0,
"atmel_usba_udc", udc);
if (ret) {
gpio_free(udc->vbus_pin);
udc->vbus_pin = -ENODEV;
dev_warn(&udc->pdev->dev,
"failed to request vbus irq; "
"assuming always on\n");
} else {
disable_irq(gpio_to_irq(udc->vbus_pin));
}
} else {
/* gpio_request fail so use -EINVAL for gpio_is_valid */
udc->vbus_pin = -EINVAL;
}
}
ret = usb_add_gadget_udc(&pdev->dev, &udc->gadget);
if (ret)
goto err_add_udc;
usba_init_debugfs(udc);
for (i = 1; i < pdata->num_ep; i++)
usba_ep_init_debugfs(udc, &usba_ep[i]);
return 0;
err_add_udc:
if (gpio_is_valid(pdata->vbus_pin)) {
free_irq(gpio_to_irq(udc->vbus_pin), udc);
gpio_free(udc->vbus_pin);
}
free_irq(irq, udc);
err_request_irq:
kfree(usba_ep);
err_alloc_ep:
iounmap(udc->fifo);
err_map_fifo:
iounmap(udc->regs);
err_map_regs:
clk_put(hclk);
err_get_hclk:
clk_put(pclk);
return ret;
}
static int __exit usba_udc_remove(struct platform_device *pdev)
{
struct usba_udc *udc;
int i;
struct usba_platform_data *pdata = pdev->dev.platform_data;
udc = platform_get_drvdata(pdev);
usb_del_gadget_udc(&udc->gadget);
for (i = 1; i < pdata->num_ep; i++)
usba_ep_cleanup_debugfs(&usba_ep[i]);
usba_cleanup_debugfs(udc);
if (gpio_is_valid(udc->vbus_pin)) {
free_irq(gpio_to_irq(udc->vbus_pin), udc);
gpio_free(udc->vbus_pin);
}
free_irq(udc->irq, udc);
kfree(usba_ep);
iounmap(udc->fifo);
iounmap(udc->regs);
clk_put(udc->hclk);
clk_put(udc->pclk);
return 0;
}
static struct platform_driver udc_driver = {
.remove = __exit_p(usba_udc_remove),
.driver = {
.name = "atmel_usba_udc",
.owner = THIS_MODULE,
},
};
module_platform_driver_probe(udc_driver, usba_udc_probe);
MODULE_DESCRIPTION("Atmel USBA UDC driver");
MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:atmel_usba_udc");
| gpl-2.0 |
AdiPat/android_kernel_samsung_janice | arch/x86/platform/efi/efi_64.c | 2344 | 2917 | /*
* x86_64 specific EFI support functions
* Based on Extensible Firmware Interface Specification version 1.0
*
* Copyright (C) 2005-2008 Intel Co.
* Fenghua Yu <fenghua.yu@intel.com>
* Bibo Mao <bibo.mao@intel.com>
* Chandramouli Narayanan <mouli@linux.intel.com>
* Huang Ying <ying.huang@intel.com>
*
* Code to convert EFI to E820 map has been implemented in elilo bootloader
* based on a EFI patch by Edgar Hucek. Based on the E820 map, the page table
* is setup appropriately for EFI runtime code.
* - mouli 06/14/2007.
*
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/types.h>
#include <linux/spinlock.h>
#include <linux/bootmem.h>
#include <linux/ioport.h>
#include <linux/module.h>
#include <linux/efi.h>
#include <linux/uaccess.h>
#include <linux/io.h>
#include <linux/reboot.h>
#include <asm/setup.h>
#include <asm/page.h>
#include <asm/e820.h>
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/proto.h>
#include <asm/efi.h>
#include <asm/cacheflush.h>
#include <asm/fixmap.h>
static pgd_t *save_pgd __initdata;
static unsigned long efi_flags __initdata;
static void __init early_code_mapping_set_exec(int executable)
{
efi_memory_desc_t *md;
void *p;
if (!(__supported_pte_mask & _PAGE_NX))
return;
/* Make EFI service code area executable */
for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
md = p;
if (md->type == EFI_RUNTIME_SERVICES_CODE ||
md->type == EFI_BOOT_SERVICES_CODE)
efi_set_executable(md, executable);
}
}
void __init efi_call_phys_prelog(void)
{
unsigned long vaddress;
int pgd;
int n_pgds;
early_code_mapping_set_exec(1);
local_irq_save(efi_flags);
n_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT), PGDIR_SIZE);
save_pgd = kmalloc(n_pgds * sizeof(pgd_t), GFP_KERNEL);
for (pgd = 0; pgd < n_pgds; pgd++) {
save_pgd[pgd] = *pgd_offset_k(pgd * PGDIR_SIZE);
vaddress = (unsigned long)__va(pgd * PGDIR_SIZE);
set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), *pgd_offset_k(vaddress));
}
__flush_tlb_all();
}
void __init efi_call_phys_epilog(void)
{
/*
* After the lock is released, the original page table is restored.
*/
int pgd;
int n_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT) , PGDIR_SIZE);
for (pgd = 0; pgd < n_pgds; pgd++)
set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), save_pgd[pgd]);
kfree(save_pgd);
__flush_tlb_all();
local_irq_restore(efi_flags);
early_code_mapping_set_exec(0);
}
void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size,
u32 type)
{
unsigned long last_map_pfn;
if (type == EFI_MEMORY_MAPPED_IO)
return ioremap(phys_addr, size);
last_map_pfn = init_memory_mapping(phys_addr, phys_addr + size);
if ((last_map_pfn << PAGE_SHIFT) < phys_addr + size) {
unsigned long top = last_map_pfn << PAGE_SHIFT;
efi_ioremap(top, size - (top - phys_addr), type);
}
return (void __iomem *)__va(phys_addr);
}
| gpl-2.0 |
windyyuan/linux | net/netrom/nr_dev.c | 3112 | 4407 | /*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* Copyright Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
*/
#include <linux/module.h>
#include <linux/proc_fs.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/fs.h>
#include <linux/types.h>
#include <linux/sysctl.h>
#include <linux/string.h>
#include <linux/socket.h>
#include <linux/errno.h>
#include <linux/fcntl.h>
#include <linux/in.h>
#include <linux/if_ether.h> /* For the statistics structure. */
#include <linux/slab.h>
#include <asm/uaccess.h>
#include <asm/io.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/if_arp.h>
#include <linux/skbuff.h>
#include <net/ip.h>
#include <net/arp.h>
#include <net/ax25.h>
#include <net/netrom.h>
/*
* Only allow IP over NET/ROM frames through if the netrom device is up.
*/
int nr_rx_ip(struct sk_buff *skb, struct net_device *dev)
{
struct net_device_stats *stats = &dev->stats;
if (!netif_running(dev)) {
stats->rx_dropped++;
return 0;
}
stats->rx_packets++;
stats->rx_bytes += skb->len;
skb->protocol = htons(ETH_P_IP);
/* Spoof incoming device */
skb->dev = dev;
skb->mac_header = skb->network_header;
skb_reset_network_header(skb);
skb->pkt_type = PACKET_HOST;
netif_rx(skb);
return 1;
}
#ifdef CONFIG_INET
static int nr_rebuild_header(struct sk_buff *skb)
{
unsigned char *bp = skb->data;
if (arp_find(bp + 7, skb))
return 1;
bp[6] &= ~AX25_CBIT;
bp[6] &= ~AX25_EBIT;
bp[6] |= AX25_SSSID_SPARE;
bp += AX25_ADDR_LEN;
bp[6] &= ~AX25_CBIT;
bp[6] |= AX25_EBIT;
bp[6] |= AX25_SSSID_SPARE;
return 0;
}
#else
static int nr_rebuild_header(struct sk_buff *skb)
{
return 1;
}
#endif
static int nr_header(struct sk_buff *skb, struct net_device *dev,
unsigned short type,
const void *daddr, const void *saddr, unsigned int len)
{
unsigned char *buff = skb_push(skb, NR_NETWORK_LEN + NR_TRANSPORT_LEN);
memcpy(buff, (saddr != NULL) ? saddr : dev->dev_addr, dev->addr_len);
buff[6] &= ~AX25_CBIT;
buff[6] &= ~AX25_EBIT;
buff[6] |= AX25_SSSID_SPARE;
buff += AX25_ADDR_LEN;
if (daddr != NULL)
memcpy(buff, daddr, dev->addr_len);
buff[6] &= ~AX25_CBIT;
buff[6] |= AX25_EBIT;
buff[6] |= AX25_SSSID_SPARE;
buff += AX25_ADDR_LEN;
*buff++ = sysctl_netrom_network_ttl_initialiser;
*buff++ = NR_PROTO_IP;
*buff++ = NR_PROTO_IP;
*buff++ = 0;
*buff++ = 0;
*buff++ = NR_PROTOEXT;
if (daddr != NULL)
return 37;
return -37;
}
static int __must_check nr_set_mac_address(struct net_device *dev, void *addr)
{
struct sockaddr *sa = addr;
int err;
if (!memcmp(dev->dev_addr, sa->sa_data, dev->addr_len))
return 0;
if (dev->flags & IFF_UP) {
err = ax25_listen_register((ax25_address *)sa->sa_data, NULL);
if (err)
return err;
ax25_listen_release((ax25_address *)dev->dev_addr, NULL);
}
memcpy(dev->dev_addr, sa->sa_data, dev->addr_len);
return 0;
}
static int nr_open(struct net_device *dev)
{
int err;
err = ax25_listen_register((ax25_address *)dev->dev_addr, NULL);
if (err)
return err;
netif_start_queue(dev);
return 0;
}
static int nr_close(struct net_device *dev)
{
ax25_listen_release((ax25_address *)dev->dev_addr, NULL);
netif_stop_queue(dev);
return 0;
}
static netdev_tx_t nr_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct net_device_stats *stats = &dev->stats;
unsigned int len = skb->len;
if (!nr_route_frame(skb, NULL)) {
kfree_skb(skb);
stats->tx_errors++;
return NETDEV_TX_OK;
}
stats->tx_packets++;
stats->tx_bytes += len;
return NETDEV_TX_OK;
}
static const struct header_ops nr_header_ops = {
.create = nr_header,
.rebuild= nr_rebuild_header,
};
static const struct net_device_ops nr_netdev_ops = {
.ndo_open = nr_open,
.ndo_stop = nr_close,
.ndo_start_xmit = nr_xmit,
.ndo_set_mac_address = nr_set_mac_address,
};
void nr_setup(struct net_device *dev)
{
dev->mtu = NR_MAX_PACKET_SIZE;
dev->netdev_ops = &nr_netdev_ops;
dev->header_ops = &nr_header_ops;
dev->hard_header_len = NR_NETWORK_LEN + NR_TRANSPORT_LEN;
dev->addr_len = AX25_ADDR_LEN;
dev->type = ARPHRD_NETROM;
/* New-style flags. */
dev->flags = IFF_NOARP;
}
| gpl-2.0 |
RR-msm7x30/samsung-kernel-msm7x30-common | tools/perf/builtin-annotate.c | 4904 | 8369 | /*
* builtin-annotate.c
*
* Builtin annotate command: Analyze the perf.data input file,
* look up and read DSOs and symbol information and display
* a histogram of results, along various sorting keys.
*/
#include "builtin.h"
#include "util/util.h"
#include "util/color.h"
#include <linux/list.h>
#include "util/cache.h"
#include <linux/rbtree.h>
#include "util/symbol.h"
#include "perf.h"
#include "util/debug.h"
#include "util/evlist.h"
#include "util/evsel.h"
#include "util/annotate.h"
#include "util/event.h"
#include "util/parse-options.h"
#include "util/parse-events.h"
#include "util/thread.h"
#include "util/sort.h"
#include "util/hist.h"
#include "util/session.h"
#include "util/tool.h"
#include <linux/bitmap.h>
struct perf_annotate {
struct perf_tool tool;
char const *input_name;
bool force, use_tui, use_stdio;
bool full_paths;
bool print_line;
const char *sym_hist_filter;
const char *cpu_list;
DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
};
static int perf_evsel__add_sample(struct perf_evsel *evsel,
struct perf_sample *sample,
struct addr_location *al,
struct perf_annotate *ann)
{
struct hist_entry *he;
int ret;
if (ann->sym_hist_filter != NULL &&
(al->sym == NULL ||
strcmp(ann->sym_hist_filter, al->sym->name) != 0)) {
/* We're only interested in a symbol named sym_hist_filter */
if (al->sym != NULL) {
rb_erase(&al->sym->rb_node,
&al->map->dso->symbols[al->map->type]);
symbol__delete(al->sym);
}
return 0;
}
he = __hists__add_entry(&evsel->hists, al, NULL, 1);
if (he == NULL)
return -ENOMEM;
ret = 0;
if (he->ms.sym != NULL) {
struct annotation *notes = symbol__annotation(he->ms.sym);
if (notes->src == NULL && symbol__alloc_hist(he->ms.sym) < 0)
return -ENOMEM;
ret = hist_entry__inc_addr_samples(he, evsel->idx, al->addr);
}
evsel->hists.stats.total_period += sample->period;
hists__inc_nr_events(&evsel->hists, PERF_RECORD_SAMPLE);
return ret;
}
static int process_sample_event(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct perf_evsel *evsel,
struct machine *machine)
{
struct perf_annotate *ann = container_of(tool, struct perf_annotate, tool);
struct addr_location al;
if (perf_event__preprocess_sample(event, machine, &al, sample,
symbol__annotate_init) < 0) {
pr_warning("problem processing %d event, skipping it.\n",
event->header.type);
return -1;
}
if (ann->cpu_list && !test_bit(sample->cpu, ann->cpu_bitmap))
return 0;
if (!al.filtered && perf_evsel__add_sample(evsel, sample, &al, ann)) {
pr_warning("problem incrementing symbol count, "
"skipping event\n");
return -1;
}
return 0;
}
static int hist_entry__tty_annotate(struct hist_entry *he, int evidx,
struct perf_annotate *ann)
{
return symbol__tty_annotate(he->ms.sym, he->ms.map, evidx,
ann->print_line, ann->full_paths, 0, 0);
}
static void hists__find_annotations(struct hists *self, int evidx,
struct perf_annotate *ann)
{
struct rb_node *nd = rb_first(&self->entries), *next;
int key = K_RIGHT;
while (nd) {
struct hist_entry *he = rb_entry(nd, struct hist_entry, rb_node);
struct annotation *notes;
if (he->ms.sym == NULL || he->ms.map->dso->annotate_warned)
goto find_next;
notes = symbol__annotation(he->ms.sym);
if (notes->src == NULL) {
find_next:
if (key == K_LEFT)
nd = rb_prev(nd);
else
nd = rb_next(nd);
continue;
}
if (use_browser > 0) {
key = hist_entry__tui_annotate(he, evidx, NULL, NULL, 0);
switch (key) {
case K_RIGHT:
next = rb_next(nd);
break;
case K_LEFT:
next = rb_prev(nd);
break;
default:
return;
}
if (next != NULL)
nd = next;
} else {
hist_entry__tty_annotate(he, evidx, ann);
nd = rb_next(nd);
/*
* Since we have a hist_entry per IP for the same
* symbol, free he->ms.sym->src to signal we already
* processed this symbol.
*/
free(notes->src);
notes->src = NULL;
}
}
}
static int __cmd_annotate(struct perf_annotate *ann)
{
int ret;
struct perf_session *session;
struct perf_evsel *pos;
u64 total_nr_samples;
session = perf_session__new(ann->input_name, O_RDONLY,
ann->force, false, &ann->tool);
if (session == NULL)
return -ENOMEM;
if (ann->cpu_list) {
ret = perf_session__cpu_bitmap(session, ann->cpu_list,
ann->cpu_bitmap);
if (ret)
goto out_delete;
}
ret = perf_session__process_events(session, &ann->tool);
if (ret)
goto out_delete;
if (dump_trace) {
perf_session__fprintf_nr_events(session, stdout);
goto out_delete;
}
if (verbose > 3)
perf_session__fprintf(session, stdout);
if (verbose > 2)
perf_session__fprintf_dsos(session, stdout);
total_nr_samples = 0;
list_for_each_entry(pos, &session->evlist->entries, node) {
struct hists *hists = &pos->hists;
u32 nr_samples = hists->stats.nr_events[PERF_RECORD_SAMPLE];
if (nr_samples > 0) {
total_nr_samples += nr_samples;
hists__collapse_resort(hists);
hists__output_resort(hists);
hists__find_annotations(hists, pos->idx, ann);
}
}
if (total_nr_samples == 0) {
ui__warning("The %s file has no samples!\n", session->filename);
goto out_delete;
}
out_delete:
/*
* Speed up the exit process, for large files this can
* take quite a while.
*
* XXX Enable this when using valgrind or if we ever
* librarize this command.
*
* Also experiment with obstacks to see how much speed
* up we'll get here.
*
* perf_session__delete(session);
*/
return ret;
}
static const char * const annotate_usage[] = {
"perf annotate [<options>]",
NULL
};
int cmd_annotate(int argc, const char **argv, const char *prefix __used)
{
struct perf_annotate annotate = {
.tool = {
.sample = process_sample_event,
.mmap = perf_event__process_mmap,
.comm = perf_event__process_comm,
.fork = perf_event__process_task,
.ordered_samples = true,
.ordering_requires_timestamps = true,
},
};
const struct option options[] = {
OPT_STRING('i', "input", &annotate.input_name, "file",
"input file name"),
OPT_STRING('d', "dsos", &symbol_conf.dso_list_str, "dso[,dso...]",
"only consider symbols in these dsos"),
OPT_STRING('s', "symbol", &annotate.sym_hist_filter, "symbol",
"symbol to annotate"),
OPT_BOOLEAN('f', "force", &annotate.force, "don't complain, do it"),
OPT_INCR('v', "verbose", &verbose,
"be more verbose (show symbol address, etc)"),
OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
"dump raw trace in ASCII"),
OPT_BOOLEAN(0, "tui", &annotate.use_tui, "Use the TUI interface"),
OPT_BOOLEAN(0, "stdio", &annotate.use_stdio, "Use the stdio interface"),
OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
"file", "vmlinux pathname"),
OPT_BOOLEAN('m', "modules", &symbol_conf.use_modules,
"load module symbols - WARNING: use only with -k and LIVE kernel"),
OPT_BOOLEAN('l', "print-line", &annotate.print_line,
"print matching source lines (may be slow)"),
OPT_BOOLEAN('P', "full-paths", &annotate.full_paths,
"Don't shorten the displayed pathnames"),
OPT_STRING('C', "cpu", &annotate.cpu_list, "cpu", "list of cpus to profile"),
OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory",
"Look for files with symbols relative to this directory"),
OPT_BOOLEAN(0, "source", &symbol_conf.annotate_src,
"Interleave source code with assembly code (default)"),
OPT_BOOLEAN(0, "asm-raw", &symbol_conf.annotate_asm_raw,
"Display raw encoding of assembly instructions (default)"),
OPT_STRING('M', "disassembler-style", &disassembler_style, "disassembler style",
"Specify disassembler style (e.g. -M intel for intel syntax)"),
OPT_END()
};
argc = parse_options(argc, argv, options, annotate_usage, 0);
if (annotate.use_stdio)
use_browser = 0;
else if (annotate.use_tui)
use_browser = 1;
setup_browser(true);
symbol_conf.priv_size = sizeof(struct annotation);
symbol_conf.try_vmlinux_path = true;
if (symbol__init() < 0)
return -1;
setup_sorting(annotate_usage, options);
if (argc) {
/*
* Special case: if there's an argument left then assume tha
* it's a symbol filter:
*/
if (argc > 1)
usage_with_options(annotate_usage, options);
annotate.sym_hist_filter = argv[0];
}
return __cmd_annotate(&annotate);
}
| gpl-2.0 |
yoAeroA00/android_kernel_nokia_msm8610 | drivers/ide/piix.c | 5160 | 14392 | /*
* Copyright (C) 1998-1999 Andrzej Krzysztofowicz, Author and Maintainer
* Copyright (C) 1998-2000 Andre Hedrick <andre@linux-ide.org>
* Copyright (C) 2003 Red Hat
* Copyright (C) 2006-2007 MontaVista Software, Inc. <source@mvista.com>
*
* May be copied or modified under the terms of the GNU General Public License
*
* Documentation:
*
* Publicly available from Intel web site. Errata documentation
* is also publicly available. As an aide to anyone hacking on this
* driver the list of errata that are relevant is below.going back to
* PIIX4. Older device documentation is now a bit tricky to find.
*
* Errata of note:
*
* Unfixable
* PIIX4 errata #9 - Only on ultra obscure hw
* ICH3 errata #13 - Not observed to affect real hw
* by Intel
*
* Things we must deal with
* PIIX4 errata #10 - BM IDE hang with non UDMA
* (must stop/start dma to recover)
* 440MX errata #15 - As PIIX4 errata #10
* PIIX4 errata #15 - Must not read control registers
* during a PIO transfer
* 440MX errata #13 - As PIIX4 errata #15
* ICH2 errata #21 - DMA mode 0 doesn't work right
* ICH0/1 errata #55 - As ICH2 errata #21
* ICH2 spec c #9 - Extra operations needed to handle
* drive hotswap [NOT YET SUPPORTED]
* ICH2 spec c #20 - IDE PRD must not cross a 64K boundary
* and must be dword aligned
* ICH2 spec c #24 - UDMA mode 4,5 t85/86 should be 6ns not 3.3
*
* Should have been BIOS fixed:
* 450NX: errata #19 - DMA hangs on old 450NX
* 450NX: errata #20 - DMA hangs on old 450NX
* 450NX: errata #25 - Corruption with DMA on old 450NX
* ICH3 errata #15 - IDE deadlock under high load
* (BIOS must set dev 31 fn 0 bit 23)
* ICH3 errata #18 - Don't use native mode
*/
#include <linux/types.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/ide.h>
#include <linux/init.h>
#include <asm/io.h>
#define DRV_NAME "piix"
static int no_piix_dma;
/**
* piix_set_pio_mode - set host controller for PIO mode
* @port: port
* @drive: drive
*
* Set the interface PIO mode based upon the settings done by AMI BIOS.
*/
static void piix_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
struct pci_dev *dev = to_pci_dev(hwif->dev);
int is_slave = drive->dn & 1;
int master_port = hwif->channel ? 0x42 : 0x40;
int slave_port = 0x44;
unsigned long flags;
u16 master_data;
u8 slave_data;
static DEFINE_SPINLOCK(tune_lock);
int control = 0;
const u8 pio = drive->pio_mode - XFER_PIO_0;
/* ISP RTC */
static const u8 timings[][2]= {
{ 0, 0 },
{ 0, 0 },
{ 1, 0 },
{ 2, 1 },
{ 2, 3 }, };
/*
* Master vs slave is synchronized above us but the slave register is
* shared by the two hwifs so the corner case of two slave timeouts in
* parallel must be locked.
*/
spin_lock_irqsave(&tune_lock, flags);
pci_read_config_word(dev, master_port, &master_data);
if (pio > 1)
control |= 1; /* Programmable timing on */
if (drive->media == ide_disk)
control |= 4; /* Prefetch, post write */
if (ide_pio_need_iordy(drive, pio))
control |= 2; /* IORDY */
if (is_slave) {
master_data |= 0x4000;
master_data &= ~0x0070;
if (pio > 1) {
/* Set PPE, IE and TIME */
master_data |= control << 4;
}
pci_read_config_byte(dev, slave_port, &slave_data);
slave_data &= hwif->channel ? 0x0f : 0xf0;
slave_data |= ((timings[pio][0] << 2) | timings[pio][1]) <<
(hwif->channel ? 4 : 0);
} else {
master_data &= ~0x3307;
if (pio > 1) {
/* enable PPE, IE and TIME */
master_data |= control;
}
master_data |= (timings[pio][0] << 12) | (timings[pio][1] << 8);
}
pci_write_config_word(dev, master_port, master_data);
if (is_slave)
pci_write_config_byte(dev, slave_port, slave_data);
spin_unlock_irqrestore(&tune_lock, flags);
}
/**
* piix_set_dma_mode - set host controller for DMA mode
* @hwif: port
* @drive: drive
*
* Set a PIIX host controller to the desired DMA mode. This involves
* programming the right timing data into the PCI configuration space.
*/
static void piix_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
struct pci_dev *dev = to_pci_dev(hwif->dev);
u8 maslave = hwif->channel ? 0x42 : 0x40;
int a_speed = 3 << (drive->dn * 4);
int u_flag = 1 << drive->dn;
int v_flag = 0x01 << drive->dn;
int w_flag = 0x10 << drive->dn;
int u_speed = 0;
int sitre;
u16 reg4042, reg4a;
u8 reg48, reg54, reg55;
const u8 speed = drive->dma_mode;
pci_read_config_word(dev, maslave, ®4042);
sitre = (reg4042 & 0x4000) ? 1 : 0;
pci_read_config_byte(dev, 0x48, ®48);
pci_read_config_word(dev, 0x4a, ®4a);
pci_read_config_byte(dev, 0x54, ®54);
pci_read_config_byte(dev, 0x55, ®55);
if (speed >= XFER_UDMA_0) {
u8 udma = speed - XFER_UDMA_0;
u_speed = min_t(u8, 2 - (udma & 1), udma) << (drive->dn * 4);
if (!(reg48 & u_flag))
pci_write_config_byte(dev, 0x48, reg48 | u_flag);
if (speed == XFER_UDMA_5) {
pci_write_config_byte(dev, 0x55, (u8) reg55|w_flag);
} else {
pci_write_config_byte(dev, 0x55, (u8) reg55 & ~w_flag);
}
if ((reg4a & a_speed) != u_speed)
pci_write_config_word(dev, 0x4a, (reg4a & ~a_speed) | u_speed);
if (speed > XFER_UDMA_2) {
if (!(reg54 & v_flag))
pci_write_config_byte(dev, 0x54, reg54 | v_flag);
} else
pci_write_config_byte(dev, 0x54, reg54 & ~v_flag);
} else {
const u8 mwdma_to_pio[] = { 0, 3, 4 };
if (reg48 & u_flag)
pci_write_config_byte(dev, 0x48, reg48 & ~u_flag);
if (reg4a & a_speed)
pci_write_config_word(dev, 0x4a, reg4a & ~a_speed);
if (reg54 & v_flag)
pci_write_config_byte(dev, 0x54, reg54 & ~v_flag);
if (reg55 & w_flag)
pci_write_config_byte(dev, 0x55, (u8) reg55 & ~w_flag);
if (speed >= XFER_MW_DMA_0)
drive->pio_mode =
mwdma_to_pio[speed - XFER_MW_DMA_0] + XFER_PIO_0;
else
drive->pio_mode = XFER_PIO_2; /* for SWDMA2 */
piix_set_pio_mode(hwif, drive);
}
}
/**
* init_chipset_ich - set up the ICH chipset
* @dev: PCI device to set up
*
* Initialize the PCI device as required. For the ICH this turns
* out to be nice and simple.
*/
static int init_chipset_ich(struct pci_dev *dev)
{
u32 extra = 0;
pci_read_config_dword(dev, 0x54, &extra);
pci_write_config_dword(dev, 0x54, extra | 0x400);
return 0;
}
/**
* ich_clear_irq - clear BMDMA status
* @drive: IDE drive
*
* ICHx contollers set DMA INTR no matter DMA or PIO.
* BMDMA status might need to be cleared even for
* PIO interrupts to prevent spurious/lost IRQ.
*/
static void ich_clear_irq(ide_drive_t *drive)
{
ide_hwif_t *hwif = drive->hwif;
u8 dma_stat;
/*
* ide_dma_end() needs BMDMA status for error checking.
* So, skip clearing BMDMA status here and leave it
* to ide_dma_end() if this is DMA interrupt.
*/
if (drive->waiting_for_dma || hwif->dma_base == 0)
return;
/* clear the INTR & ERROR bits */
dma_stat = inb(hwif->dma_base + ATA_DMA_STATUS);
/* Should we force the bit as well ? */
outb(dma_stat, hwif->dma_base + ATA_DMA_STATUS);
}
struct ich_laptop {
u16 device;
u16 subvendor;
u16 subdevice;
};
/*
* List of laptops that use short cables rather than 80 wire
*/
static const struct ich_laptop ich_laptop[] = {
/* devid, subvendor, subdev */
{ 0x27DF, 0x1025, 0x0102 }, /* ICH7 on Acer 5602aWLMi */
{ 0x27DF, 0x0005, 0x0280 }, /* ICH7 on Acer 5602WLMi */
{ 0x27DF, 0x1025, 0x0110 }, /* ICH7 on Acer 3682WLMi */
{ 0x27DF, 0x1043, 0x1267 }, /* ICH7 on Asus W5F */
{ 0x27DF, 0x103C, 0x30A1 }, /* ICH7 on HP Compaq nc2400 */
{ 0x27DF, 0x1071, 0xD221 }, /* ICH7 on Hercules EC-900 */
{ 0x24CA, 0x1025, 0x0061 }, /* ICH4 on Acer Aspire 2023WLMi */
{ 0x24CA, 0x1025, 0x003d }, /* ICH4 on ACER TM290 */
{ 0x266F, 0x1025, 0x0066 }, /* ICH6 on ACER Aspire 1694WLMi */
{ 0x2653, 0x1043, 0x82D8 }, /* ICH6M on Asus Eee 701 */
{ 0x27df, 0x104d, 0x900e }, /* ICH7 on Sony TZ-90 */
/* end marker */
{ 0, }
};
static u8 piix_cable_detect(ide_hwif_t *hwif)
{
struct pci_dev *pdev = to_pci_dev(hwif->dev);
const struct ich_laptop *lap = &ich_laptop[0];
u8 reg54h = 0, mask = hwif->channel ? 0xc0 : 0x30;
/* check for specials */
while (lap->device) {
if (lap->device == pdev->device &&
lap->subvendor == pdev->subsystem_vendor &&
lap->subdevice == pdev->subsystem_device) {
return ATA_CBL_PATA40_SHORT;
}
lap++;
}
pci_read_config_byte(pdev, 0x54, ®54h);
return (reg54h & mask) ? ATA_CBL_PATA80 : ATA_CBL_PATA40;
}
/**
* init_hwif_piix - fill in the hwif for the PIIX
* @hwif: IDE interface
*
* Set up the ide_hwif_t for the PIIX interface according to the
* capabilities of the hardware.
*/
static void __devinit init_hwif_piix(ide_hwif_t *hwif)
{
if (!hwif->dma_base)
return;
if (no_piix_dma)
hwif->ultra_mask = hwif->mwdma_mask = hwif->swdma_mask = 0;
}
static const struct ide_port_ops piix_port_ops = {
.set_pio_mode = piix_set_pio_mode,
.set_dma_mode = piix_set_dma_mode,
.cable_detect = piix_cable_detect,
};
static const struct ide_port_ops ich_port_ops = {
.set_pio_mode = piix_set_pio_mode,
.set_dma_mode = piix_set_dma_mode,
.clear_irq = ich_clear_irq,
.cable_detect = piix_cable_detect,
};
#define DECLARE_PIIX_DEV(udma) \
{ \
.name = DRV_NAME, \
.init_hwif = init_hwif_piix, \
.enablebits = {{0x41,0x80,0x80}, {0x43,0x80,0x80}}, \
.port_ops = &piix_port_ops, \
.pio_mask = ATA_PIO4, \
.swdma_mask = ATA_SWDMA2_ONLY, \
.mwdma_mask = ATA_MWDMA12_ONLY, \
.udma_mask = udma, \
}
#define DECLARE_ICH_DEV(mwdma, udma) \
{ \
.name = DRV_NAME, \
.init_chipset = init_chipset_ich, \
.init_hwif = init_hwif_piix, \
.enablebits = {{0x41,0x80,0x80}, {0x43,0x80,0x80}}, \
.port_ops = &ich_port_ops, \
.pio_mask = ATA_PIO4, \
.swdma_mask = ATA_SWDMA2_ONLY, \
.mwdma_mask = mwdma, \
.udma_mask = udma, \
}
static const struct ide_port_info piix_pci_info[] __devinitdata = {
/* 0: MPIIX */
{ /*
* MPIIX actually has only a single IDE channel mapped to
* the primary or secondary ports depending on the value
* of the bit 14 of the IDETIM register at offset 0x6c
*/
.name = DRV_NAME,
.enablebits = {{0x6d,0xc0,0x80}, {0x6d,0xc0,0xc0}},
.host_flags = IDE_HFLAG_ISA_PORTS | IDE_HFLAG_NO_DMA,
.pio_mask = ATA_PIO4,
/* This is a painful system best to let it self tune for now */
},
/* 1: PIIXa/PIIXb/PIIX3 */
DECLARE_PIIX_DEV(0x00), /* no udma */
/* 2: PIIX4 */
DECLARE_PIIX_DEV(ATA_UDMA2),
/* 3: ICH0 */
DECLARE_ICH_DEV(ATA_MWDMA12_ONLY, ATA_UDMA2),
/* 4: ICH */
DECLARE_ICH_DEV(ATA_MWDMA12_ONLY, ATA_UDMA4),
/* 5: PIIX4 */
DECLARE_PIIX_DEV(ATA_UDMA4),
/* 6: ICH[2-6]/ICH[2-3]M/C-ICH/ICH5-SATA/ESB2/ICH8M */
DECLARE_ICH_DEV(ATA_MWDMA12_ONLY, ATA_UDMA5),
/* 7: ICH7/7-R, no MWDMA1 */
DECLARE_ICH_DEV(ATA_MWDMA2_ONLY, ATA_UDMA5),
};
/**
* piix_init_one - called when a PIIX is found
* @dev: the piix device
* @id: the matching pci id
*
* Called when the PCI registration layer (or the IDE initialization)
* finds a device matching our IDE device tables.
*/
static int __devinit piix_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
return ide_pci_init_one(dev, &piix_pci_info[id->driver_data], NULL);
}
/**
* piix_check_450nx - Check for problem 450NX setup
*
* Check for the present of 450NX errata #19 and errata #25. If
* they are found, disable use of DMA IDE
*/
static void __devinit piix_check_450nx(void)
{
struct pci_dev *pdev = NULL;
u16 cfg;
while((pdev=pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454NX, pdev))!=NULL)
{
/* Look for 450NX PXB. Check for problem configurations
A PCI quirk checks bit 6 already */
pci_read_config_word(pdev, 0x41, &cfg);
/* Only on the original revision: IDE DMA can hang */
if (pdev->revision == 0x00)
no_piix_dma = 1;
/* On all revisions below 5 PXB bus lock must be disabled for IDE */
else if (cfg & (1<<14) && pdev->revision < 5)
no_piix_dma = 2;
}
if(no_piix_dma)
printk(KERN_WARNING DRV_NAME ": 450NX errata present, disabling IDE DMA.\n");
if(no_piix_dma == 2)
printk(KERN_WARNING DRV_NAME ": A BIOS update may resolve this.\n");
}
static const struct pci_device_id piix_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82371FB_0), 1 },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82371FB_1), 1 },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82371MX), 0 },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82371SB_1), 1 },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82371AB), 2 },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801AB_1), 3 },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82443MX_1), 2 },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801AA_1), 4 },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82372FB_1), 5 },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82451NX), 2 },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801BA_9), 6 },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801BA_8), 6 },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801CA_10), 6 },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801CA_11), 6 },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801DB_11), 6 },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801EB_11), 6 },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801E_11), 6 },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801DB_10), 6 },
#ifdef CONFIG_BLK_DEV_IDE_SATA
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801EB_1), 6 },
#endif
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ESB_2), 6 },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICH6_19), 6 },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICH7_21), 7 },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801DB_1), 6 },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ESB2_18), 7 },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICH8_6), 6 },
{ 0, },
};
MODULE_DEVICE_TABLE(pci, piix_pci_tbl);
static struct pci_driver piix_pci_driver = {
.name = "PIIX_IDE",
.id_table = piix_pci_tbl,
.probe = piix_init_one,
.remove = ide_pci_remove,
.suspend = ide_pci_suspend,
.resume = ide_pci_resume,
};
static int __init piix_ide_init(void)
{
piix_check_450nx();
return ide_pci_register_driver(&piix_pci_driver);
}
static void __exit piix_ide_exit(void)
{
pci_unregister_driver(&piix_pci_driver);
}
module_init(piix_ide_init);
module_exit(piix_ide_exit);
MODULE_AUTHOR("Andre Hedrick, Andrzej Krzysztofowicz");
MODULE_DESCRIPTION("PCI driver module for Intel PIIX IDE");
MODULE_LICENSE("GPL");
| gpl-2.0 |
faux123/msm8660-aosp-ics | arch/powerpc/platforms/powermac/pfunc_core.c | 7720 | 25647 | /*
*
* FIXME: Properly make this race free with refcounting etc...
*
* FIXME: LOCKING !!!
*/
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <asm/prom.h>
#include <asm/pmac_pfunc.h>
/* Debug */
#define LOG_PARSE(fmt...)
#define LOG_ERROR(fmt...) printk(fmt)
#define LOG_BLOB(t,b,c)
#undef DEBUG
#ifdef DEBUG
#define DBG(fmt...) printk(fmt)
#else
#define DBG(fmt...)
#endif
/* Command numbers */
#define PMF_CMD_LIST 0
#define PMF_CMD_WRITE_GPIO 1
#define PMF_CMD_READ_GPIO 2
#define PMF_CMD_WRITE_REG32 3
#define PMF_CMD_READ_REG32 4
#define PMF_CMD_WRITE_REG16 5
#define PMF_CMD_READ_REG16 6
#define PMF_CMD_WRITE_REG8 7
#define PMF_CMD_READ_REG8 8
#define PMF_CMD_DELAY 9
#define PMF_CMD_WAIT_REG32 10
#define PMF_CMD_WAIT_REG16 11
#define PMF_CMD_WAIT_REG8 12
#define PMF_CMD_READ_I2C 13
#define PMF_CMD_WRITE_I2C 14
#define PMF_CMD_RMW_I2C 15
#define PMF_CMD_GEN_I2C 16
#define PMF_CMD_SHIFT_BYTES_RIGHT 17
#define PMF_CMD_SHIFT_BYTES_LEFT 18
#define PMF_CMD_READ_CFG 19
#define PMF_CMD_WRITE_CFG 20
#define PMF_CMD_RMW_CFG 21
#define PMF_CMD_READ_I2C_SUBADDR 22
#define PMF_CMD_WRITE_I2C_SUBADDR 23
#define PMF_CMD_SET_I2C_MODE 24
#define PMF_CMD_RMW_I2C_SUBADDR 25
#define PMF_CMD_READ_REG32_MASK_SHR_XOR 26
#define PMF_CMD_READ_REG16_MASK_SHR_XOR 27
#define PMF_CMD_READ_REG8_MASK_SHR_XOR 28
#define PMF_CMD_WRITE_REG32_SHL_MASK 29
#define PMF_CMD_WRITE_REG16_SHL_MASK 30
#define PMF_CMD_WRITE_REG8_SHL_MASK 31
#define PMF_CMD_MASK_AND_COMPARE 32
#define PMF_CMD_COUNT 33
/* This structure holds the state of the parser while walking through
* a function definition
*/
struct pmf_cmd {
const void *cmdptr;
const void *cmdend;
struct pmf_function *func;
void *instdata;
struct pmf_args *args;
int error;
};
#if 0
/* Debug output */
static void print_blob(const char *title, const void *blob, int bytes)
{
printk("%s", title);
while(bytes--) {
printk("%02x ", *((u8 *)blob));
blob += 1;
}
printk("\n");
}
#endif
/*
* Parser helpers
*/
static u32 pmf_next32(struct pmf_cmd *cmd)
{
u32 value;
if ((cmd->cmdend - cmd->cmdptr) < 4) {
cmd->error = 1;
return 0;
}
value = *((u32 *)cmd->cmdptr);
cmd->cmdptr += 4;
return value;
}
static const void* pmf_next_blob(struct pmf_cmd *cmd, int count)
{
const void *value;
if ((cmd->cmdend - cmd->cmdptr) < count) {
cmd->error = 1;
return NULL;
}
value = cmd->cmdptr;
cmd->cmdptr += count;
return value;
}
/*
* Individual command parsers
*/
#define PMF_PARSE_CALL(name, cmd, handlers, p...) \
do { \
if (cmd->error) \
return -ENXIO; \
if (handlers == NULL) \
return 0; \
if (handlers->name) \
return handlers->name(cmd->func, cmd->instdata, \
cmd->args, p); \
return -1; \
} while(0) \
static int pmf_parser_write_gpio(struct pmf_cmd *cmd, struct pmf_handlers *h)
{
u8 value = (u8)pmf_next32(cmd);
u8 mask = (u8)pmf_next32(cmd);
LOG_PARSE("pmf: write_gpio(value: %02x, mask: %02x)\n", value, mask);
PMF_PARSE_CALL(write_gpio, cmd, h, value, mask);
}
static int pmf_parser_read_gpio(struct pmf_cmd *cmd, struct pmf_handlers *h)
{
u8 mask = (u8)pmf_next32(cmd);
int rshift = (int)pmf_next32(cmd);
u8 xor = (u8)pmf_next32(cmd);
LOG_PARSE("pmf: read_gpio(mask: %02x, rshift: %d, xor: %02x)\n",
mask, rshift, xor);
PMF_PARSE_CALL(read_gpio, cmd, h, mask, rshift, xor);
}
static int pmf_parser_write_reg32(struct pmf_cmd *cmd, struct pmf_handlers *h)
{
u32 offset = pmf_next32(cmd);
u32 value = pmf_next32(cmd);
u32 mask = pmf_next32(cmd);
LOG_PARSE("pmf: write_reg32(offset: %08x, value: %08x, mask: %08x)\n",
offset, value, mask);
PMF_PARSE_CALL(write_reg32, cmd, h, offset, value, mask);
}
static int pmf_parser_read_reg32(struct pmf_cmd *cmd, struct pmf_handlers *h)
{
u32 offset = pmf_next32(cmd);
LOG_PARSE("pmf: read_reg32(offset: %08x)\n", offset);
PMF_PARSE_CALL(read_reg32, cmd, h, offset);
}
static int pmf_parser_write_reg16(struct pmf_cmd *cmd, struct pmf_handlers *h)
{
u32 offset = pmf_next32(cmd);
u16 value = (u16)pmf_next32(cmd);
u16 mask = (u16)pmf_next32(cmd);
LOG_PARSE("pmf: write_reg16(offset: %08x, value: %04x, mask: %04x)\n",
offset, value, mask);
PMF_PARSE_CALL(write_reg16, cmd, h, offset, value, mask);
}
static int pmf_parser_read_reg16(struct pmf_cmd *cmd, struct pmf_handlers *h)
{
u32 offset = pmf_next32(cmd);
LOG_PARSE("pmf: read_reg16(offset: %08x)\n", offset);
PMF_PARSE_CALL(read_reg16, cmd, h, offset);
}
static int pmf_parser_write_reg8(struct pmf_cmd *cmd, struct pmf_handlers *h)
{
u32 offset = pmf_next32(cmd);
u8 value = (u16)pmf_next32(cmd);
u8 mask = (u16)pmf_next32(cmd);
LOG_PARSE("pmf: write_reg8(offset: %08x, value: %02x, mask: %02x)\n",
offset, value, mask);
PMF_PARSE_CALL(write_reg8, cmd, h, offset, value, mask);
}
static int pmf_parser_read_reg8(struct pmf_cmd *cmd, struct pmf_handlers *h)
{
u32 offset = pmf_next32(cmd);
LOG_PARSE("pmf: read_reg8(offset: %08x)\n", offset);
PMF_PARSE_CALL(read_reg8, cmd, h, offset);
}
static int pmf_parser_delay(struct pmf_cmd *cmd, struct pmf_handlers *h)
{
u32 duration = pmf_next32(cmd);
LOG_PARSE("pmf: delay(duration: %d us)\n", duration);
PMF_PARSE_CALL(delay, cmd, h, duration);
}
static int pmf_parser_wait_reg32(struct pmf_cmd *cmd, struct pmf_handlers *h)
{
u32 offset = pmf_next32(cmd);
u32 value = pmf_next32(cmd);
u32 mask = pmf_next32(cmd);
LOG_PARSE("pmf: wait_reg32(offset: %08x, comp_value: %08x,mask: %08x)\n",
offset, value, mask);
PMF_PARSE_CALL(wait_reg32, cmd, h, offset, value, mask);
}
static int pmf_parser_wait_reg16(struct pmf_cmd *cmd, struct pmf_handlers *h)
{
u32 offset = pmf_next32(cmd);
u16 value = (u16)pmf_next32(cmd);
u16 mask = (u16)pmf_next32(cmd);
LOG_PARSE("pmf: wait_reg16(offset: %08x, comp_value: %04x,mask: %04x)\n",
offset, value, mask);
PMF_PARSE_CALL(wait_reg16, cmd, h, offset, value, mask);
}
static int pmf_parser_wait_reg8(struct pmf_cmd *cmd, struct pmf_handlers *h)
{
u32 offset = pmf_next32(cmd);
u8 value = (u8)pmf_next32(cmd);
u8 mask = (u8)pmf_next32(cmd);
LOG_PARSE("pmf: wait_reg8(offset: %08x, comp_value: %02x,mask: %02x)\n",
offset, value, mask);
PMF_PARSE_CALL(wait_reg8, cmd, h, offset, value, mask);
}
static int pmf_parser_read_i2c(struct pmf_cmd *cmd, struct pmf_handlers *h)
{
u32 bytes = pmf_next32(cmd);
LOG_PARSE("pmf: read_i2c(bytes: %ud)\n", bytes);
PMF_PARSE_CALL(read_i2c, cmd, h, bytes);
}
static int pmf_parser_write_i2c(struct pmf_cmd *cmd, struct pmf_handlers *h)
{
u32 bytes = pmf_next32(cmd);
const void *blob = pmf_next_blob(cmd, bytes);
LOG_PARSE("pmf: write_i2c(bytes: %ud) ...\n", bytes);
LOG_BLOB("pmf: data: \n", blob, bytes);
PMF_PARSE_CALL(write_i2c, cmd, h, bytes, blob);
}
static int pmf_parser_rmw_i2c(struct pmf_cmd *cmd, struct pmf_handlers *h)
{
u32 maskbytes = pmf_next32(cmd);
u32 valuesbytes = pmf_next32(cmd);
u32 totalbytes = pmf_next32(cmd);
const void *maskblob = pmf_next_blob(cmd, maskbytes);
const void *valuesblob = pmf_next_blob(cmd, valuesbytes);
LOG_PARSE("pmf: rmw_i2c(maskbytes: %ud, valuebytes: %ud, "
"totalbytes: %d) ...\n",
maskbytes, valuesbytes, totalbytes);
LOG_BLOB("pmf: mask data: \n", maskblob, maskbytes);
LOG_BLOB("pmf: values data: \n", valuesblob, valuesbytes);
PMF_PARSE_CALL(rmw_i2c, cmd, h, maskbytes, valuesbytes, totalbytes,
maskblob, valuesblob);
}
static int pmf_parser_read_cfg(struct pmf_cmd *cmd, struct pmf_handlers *h)
{
u32 offset = pmf_next32(cmd);
u32 bytes = pmf_next32(cmd);
LOG_PARSE("pmf: read_cfg(offset: %x, bytes: %ud)\n", offset, bytes);
PMF_PARSE_CALL(read_cfg, cmd, h, offset, bytes);
}
static int pmf_parser_write_cfg(struct pmf_cmd *cmd, struct pmf_handlers *h)
{
u32 offset = pmf_next32(cmd);
u32 bytes = pmf_next32(cmd);
const void *blob = pmf_next_blob(cmd, bytes);
LOG_PARSE("pmf: write_cfg(offset: %x, bytes: %ud)\n", offset, bytes);
LOG_BLOB("pmf: data: \n", blob, bytes);
PMF_PARSE_CALL(write_cfg, cmd, h, offset, bytes, blob);
}
static int pmf_parser_rmw_cfg(struct pmf_cmd *cmd, struct pmf_handlers *h)
{
u32 offset = pmf_next32(cmd);
u32 maskbytes = pmf_next32(cmd);
u32 valuesbytes = pmf_next32(cmd);
u32 totalbytes = pmf_next32(cmd);
const void *maskblob = pmf_next_blob(cmd, maskbytes);
const void *valuesblob = pmf_next_blob(cmd, valuesbytes);
LOG_PARSE("pmf: rmw_cfg(maskbytes: %ud, valuebytes: %ud,"
" totalbytes: %d) ...\n",
maskbytes, valuesbytes, totalbytes);
LOG_BLOB("pmf: mask data: \n", maskblob, maskbytes);
LOG_BLOB("pmf: values data: \n", valuesblob, valuesbytes);
PMF_PARSE_CALL(rmw_cfg, cmd, h, offset, maskbytes, valuesbytes,
totalbytes, maskblob, valuesblob);
}
static int pmf_parser_read_i2c_sub(struct pmf_cmd *cmd, struct pmf_handlers *h)
{
u8 subaddr = (u8)pmf_next32(cmd);
u32 bytes = pmf_next32(cmd);
LOG_PARSE("pmf: read_i2c_sub(subaddr: %x, bytes: %ud)\n",
subaddr, bytes);
PMF_PARSE_CALL(read_i2c_sub, cmd, h, subaddr, bytes);
}
static int pmf_parser_write_i2c_sub(struct pmf_cmd *cmd, struct pmf_handlers *h)
{
u8 subaddr = (u8)pmf_next32(cmd);
u32 bytes = pmf_next32(cmd);
const void *blob = pmf_next_blob(cmd, bytes);
LOG_PARSE("pmf: write_i2c_sub(subaddr: %x, bytes: %ud) ...\n",
subaddr, bytes);
LOG_BLOB("pmf: data: \n", blob, bytes);
PMF_PARSE_CALL(write_i2c_sub, cmd, h, subaddr, bytes, blob);
}
static int pmf_parser_set_i2c_mode(struct pmf_cmd *cmd, struct pmf_handlers *h)
{
u32 mode = pmf_next32(cmd);
LOG_PARSE("pmf: set_i2c_mode(mode: %d)\n", mode);
PMF_PARSE_CALL(set_i2c_mode, cmd, h, mode);
}
static int pmf_parser_rmw_i2c_sub(struct pmf_cmd *cmd, struct pmf_handlers *h)
{
u8 subaddr = (u8)pmf_next32(cmd);
u32 maskbytes = pmf_next32(cmd);
u32 valuesbytes = pmf_next32(cmd);
u32 totalbytes = pmf_next32(cmd);
const void *maskblob = pmf_next_blob(cmd, maskbytes);
const void *valuesblob = pmf_next_blob(cmd, valuesbytes);
LOG_PARSE("pmf: rmw_i2c_sub(subaddr: %x, maskbytes: %ud, valuebytes: %ud"
", totalbytes: %d) ...\n",
subaddr, maskbytes, valuesbytes, totalbytes);
LOG_BLOB("pmf: mask data: \n", maskblob, maskbytes);
LOG_BLOB("pmf: values data: \n", valuesblob, valuesbytes);
PMF_PARSE_CALL(rmw_i2c_sub, cmd, h, subaddr, maskbytes, valuesbytes,
totalbytes, maskblob, valuesblob);
}
static int pmf_parser_read_reg32_msrx(struct pmf_cmd *cmd,
struct pmf_handlers *h)
{
u32 offset = pmf_next32(cmd);
u32 mask = pmf_next32(cmd);
u32 shift = pmf_next32(cmd);
u32 xor = pmf_next32(cmd);
LOG_PARSE("pmf: read_reg32_msrx(offset: %x, mask: %x, shift: %x,"
" xor: %x\n", offset, mask, shift, xor);
PMF_PARSE_CALL(read_reg32_msrx, cmd, h, offset, mask, shift, xor);
}
static int pmf_parser_read_reg16_msrx(struct pmf_cmd *cmd,
struct pmf_handlers *h)
{
u32 offset = pmf_next32(cmd);
u32 mask = pmf_next32(cmd);
u32 shift = pmf_next32(cmd);
u32 xor = pmf_next32(cmd);
LOG_PARSE("pmf: read_reg16_msrx(offset: %x, mask: %x, shift: %x,"
" xor: %x\n", offset, mask, shift, xor);
PMF_PARSE_CALL(read_reg16_msrx, cmd, h, offset, mask, shift, xor);
}
static int pmf_parser_read_reg8_msrx(struct pmf_cmd *cmd,
struct pmf_handlers *h)
{
u32 offset = pmf_next32(cmd);
u32 mask = pmf_next32(cmd);
u32 shift = pmf_next32(cmd);
u32 xor = pmf_next32(cmd);
LOG_PARSE("pmf: read_reg8_msrx(offset: %x, mask: %x, shift: %x,"
" xor: %x\n", offset, mask, shift, xor);
PMF_PARSE_CALL(read_reg8_msrx, cmd, h, offset, mask, shift, xor);
}
static int pmf_parser_write_reg32_slm(struct pmf_cmd *cmd,
struct pmf_handlers *h)
{
u32 offset = pmf_next32(cmd);
u32 shift = pmf_next32(cmd);
u32 mask = pmf_next32(cmd);
LOG_PARSE("pmf: write_reg32_slm(offset: %x, shift: %x, mask: %x\n",
offset, shift, mask);
PMF_PARSE_CALL(write_reg32_slm, cmd, h, offset, shift, mask);
}
static int pmf_parser_write_reg16_slm(struct pmf_cmd *cmd,
struct pmf_handlers *h)
{
u32 offset = pmf_next32(cmd);
u32 shift = pmf_next32(cmd);
u32 mask = pmf_next32(cmd);
LOG_PARSE("pmf: write_reg16_slm(offset: %x, shift: %x, mask: %x\n",
offset, shift, mask);
PMF_PARSE_CALL(write_reg16_slm, cmd, h, offset, shift, mask);
}
static int pmf_parser_write_reg8_slm(struct pmf_cmd *cmd,
struct pmf_handlers *h)
{
u32 offset = pmf_next32(cmd);
u32 shift = pmf_next32(cmd);
u32 mask = pmf_next32(cmd);
LOG_PARSE("pmf: write_reg8_slm(offset: %x, shift: %x, mask: %x\n",
offset, shift, mask);
PMF_PARSE_CALL(write_reg8_slm, cmd, h, offset, shift, mask);
}
static int pmf_parser_mask_and_compare(struct pmf_cmd *cmd,
struct pmf_handlers *h)
{
u32 bytes = pmf_next32(cmd);
const void *maskblob = pmf_next_blob(cmd, bytes);
const void *valuesblob = pmf_next_blob(cmd, bytes);
LOG_PARSE("pmf: mask_and_compare(length: %ud ...\n", bytes);
LOG_BLOB("pmf: mask data: \n", maskblob, bytes);
LOG_BLOB("pmf: values data: \n", valuesblob, bytes);
PMF_PARSE_CALL(mask_and_compare, cmd, h,
bytes, maskblob, valuesblob);
}
typedef int (*pmf_cmd_parser_t)(struct pmf_cmd *cmd, struct pmf_handlers *h);
static pmf_cmd_parser_t pmf_parsers[PMF_CMD_COUNT] =
{
NULL,
pmf_parser_write_gpio,
pmf_parser_read_gpio,
pmf_parser_write_reg32,
pmf_parser_read_reg32,
pmf_parser_write_reg16,
pmf_parser_read_reg16,
pmf_parser_write_reg8,
pmf_parser_read_reg8,
pmf_parser_delay,
pmf_parser_wait_reg32,
pmf_parser_wait_reg16,
pmf_parser_wait_reg8,
pmf_parser_read_i2c,
pmf_parser_write_i2c,
pmf_parser_rmw_i2c,
NULL, /* Bogus command */
NULL, /* Shift bytes right: NYI */
NULL, /* Shift bytes left: NYI */
pmf_parser_read_cfg,
pmf_parser_write_cfg,
pmf_parser_rmw_cfg,
pmf_parser_read_i2c_sub,
pmf_parser_write_i2c_sub,
pmf_parser_set_i2c_mode,
pmf_parser_rmw_i2c_sub,
pmf_parser_read_reg32_msrx,
pmf_parser_read_reg16_msrx,
pmf_parser_read_reg8_msrx,
pmf_parser_write_reg32_slm,
pmf_parser_write_reg16_slm,
pmf_parser_write_reg8_slm,
pmf_parser_mask_and_compare,
};
struct pmf_device {
struct list_head link;
struct device_node *node;
struct pmf_handlers *handlers;
struct list_head functions;
struct kref ref;
};
static LIST_HEAD(pmf_devices);
static DEFINE_SPINLOCK(pmf_lock);
static DEFINE_MUTEX(pmf_irq_mutex);
static void pmf_release_device(struct kref *kref)
{
struct pmf_device *dev = container_of(kref, struct pmf_device, ref);
kfree(dev);
}
static inline void pmf_put_device(struct pmf_device *dev)
{
kref_put(&dev->ref, pmf_release_device);
}
static inline struct pmf_device *pmf_get_device(struct pmf_device *dev)
{
kref_get(&dev->ref);
return dev;
}
static inline struct pmf_device *pmf_find_device(struct device_node *np)
{
struct pmf_device *dev;
list_for_each_entry(dev, &pmf_devices, link) {
if (dev->node == np)
return pmf_get_device(dev);
}
return NULL;
}
static int pmf_parse_one(struct pmf_function *func,
struct pmf_handlers *handlers,
void *instdata, struct pmf_args *args)
{
struct pmf_cmd cmd;
u32 ccode;
int count, rc;
cmd.cmdptr = func->data;
cmd.cmdend = func->data + func->length;
cmd.func = func;
cmd.instdata = instdata;
cmd.args = args;
cmd.error = 0;
LOG_PARSE("pmf: func %s, %d bytes, %s...\n",
func->name, func->length,
handlers ? "executing" : "parsing");
/* One subcommand to parse for now */
count = 1;
while(count-- && cmd.cmdptr < cmd.cmdend) {
/* Get opcode */
ccode = pmf_next32(&cmd);
/* Check if we are hitting a command list, fetch new count */
if (ccode == 0) {
count = pmf_next32(&cmd) - 1;
ccode = pmf_next32(&cmd);
}
if (cmd.error) {
LOG_ERROR("pmf: parse error, not enough data\n");
return -ENXIO;
}
if (ccode >= PMF_CMD_COUNT) {
LOG_ERROR("pmf: command code %d unknown !\n", ccode);
return -ENXIO;
}
if (pmf_parsers[ccode] == NULL) {
LOG_ERROR("pmf: no parser for command %d !\n", ccode);
return -ENXIO;
}
rc = pmf_parsers[ccode](&cmd, handlers);
if (rc != 0) {
LOG_ERROR("pmf: parser for command %d returned"
" error %d\n", ccode, rc);
return rc;
}
}
/* We are doing an initial parse pass, we need to adjust the size */
if (handlers == NULL)
func->length = cmd.cmdptr - func->data;
return 0;
}
static int pmf_add_function_prop(struct pmf_device *dev, void *driverdata,
const char *name, u32 *data,
unsigned int length)
{
int count = 0;
struct pmf_function *func = NULL;
DBG("pmf: Adding functions for platform-do-%s\n", name);
while (length >= 12) {
/* Allocate a structure */
func = kzalloc(sizeof(struct pmf_function), GFP_KERNEL);
if (func == NULL)
goto bail;
kref_init(&func->ref);
INIT_LIST_HEAD(&func->irq_clients);
func->node = dev->node;
func->driver_data = driverdata;
func->name = name;
func->phandle = data[0];
func->flags = data[1];
data += 2;
length -= 8;
func->data = data;
func->length = length;
func->dev = dev;
DBG("pmf: idx %d: flags=%08x, phandle=%08x "
" %d bytes remaining, parsing...\n",
count+1, func->flags, func->phandle, length);
if (pmf_parse_one(func, NULL, NULL, NULL)) {
kfree(func);
goto bail;
}
length -= func->length;
data = (u32 *)(((u8 *)data) + func->length);
list_add(&func->link, &dev->functions);
pmf_get_device(dev);
count++;
}
bail:
DBG("pmf: Added %d functions\n", count);
return count;
}
static int pmf_add_functions(struct pmf_device *dev, void *driverdata)
{
struct property *pp;
#define PP_PREFIX "platform-do-"
const int plen = strlen(PP_PREFIX);
int count = 0;
for (pp = dev->node->properties; pp != 0; pp = pp->next) {
char *name;
if (strncmp(pp->name, PP_PREFIX, plen) != 0)
continue;
name = pp->name + plen;
if (strlen(name) && pp->length >= 12)
count += pmf_add_function_prop(dev, driverdata, name,
pp->value, pp->length);
}
return count;
}
int pmf_register_driver(struct device_node *np,
struct pmf_handlers *handlers,
void *driverdata)
{
struct pmf_device *dev;
unsigned long flags;
int rc = 0;
if (handlers == NULL)
return -EINVAL;
DBG("pmf: registering driver for node %s\n", np->full_name);
spin_lock_irqsave(&pmf_lock, flags);
dev = pmf_find_device(np);
spin_unlock_irqrestore(&pmf_lock, flags);
if (dev != NULL) {
DBG("pmf: already there !\n");
pmf_put_device(dev);
return -EBUSY;
}
dev = kzalloc(sizeof(struct pmf_device), GFP_KERNEL);
if (dev == NULL) {
DBG("pmf: no memory !\n");
return -ENOMEM;
}
kref_init(&dev->ref);
dev->node = of_node_get(np);
dev->handlers = handlers;
INIT_LIST_HEAD(&dev->functions);
rc = pmf_add_functions(dev, driverdata);
if (rc == 0) {
DBG("pmf: no functions, disposing.. \n");
of_node_put(np);
kfree(dev);
return -ENODEV;
}
spin_lock_irqsave(&pmf_lock, flags);
list_add(&dev->link, &pmf_devices);
spin_unlock_irqrestore(&pmf_lock, flags);
return 0;
}
EXPORT_SYMBOL_GPL(pmf_register_driver);
struct pmf_function *pmf_get_function(struct pmf_function *func)
{
if (!try_module_get(func->dev->handlers->owner))
return NULL;
kref_get(&func->ref);
return func;
}
EXPORT_SYMBOL_GPL(pmf_get_function);
static void pmf_release_function(struct kref *kref)
{
struct pmf_function *func =
container_of(kref, struct pmf_function, ref);
pmf_put_device(func->dev);
kfree(func);
}
static inline void __pmf_put_function(struct pmf_function *func)
{
kref_put(&func->ref, pmf_release_function);
}
void pmf_put_function(struct pmf_function *func)
{
if (func == NULL)
return;
module_put(func->dev->handlers->owner);
__pmf_put_function(func);
}
EXPORT_SYMBOL_GPL(pmf_put_function);
void pmf_unregister_driver(struct device_node *np)
{
struct pmf_device *dev;
unsigned long flags;
DBG("pmf: unregistering driver for node %s\n", np->full_name);
spin_lock_irqsave(&pmf_lock, flags);
dev = pmf_find_device(np);
if (dev == NULL) {
DBG("pmf: not such driver !\n");
spin_unlock_irqrestore(&pmf_lock, flags);
return;
}
list_del(&dev->link);
while(!list_empty(&dev->functions)) {
struct pmf_function *func =
list_entry(dev->functions.next, typeof(*func), link);
list_del(&func->link);
__pmf_put_function(func);
}
pmf_put_device(dev);
spin_unlock_irqrestore(&pmf_lock, flags);
}
EXPORT_SYMBOL_GPL(pmf_unregister_driver);
struct pmf_function *__pmf_find_function(struct device_node *target,
const char *name, u32 flags)
{
struct device_node *actor = of_node_get(target);
struct pmf_device *dev;
struct pmf_function *func, *result = NULL;
char fname[64];
const u32 *prop;
u32 ph;
/*
* Look for a "platform-*" function reference. If we can't find
* one, then we fallback to a direct call attempt
*/
snprintf(fname, 63, "platform-%s", name);
prop = of_get_property(target, fname, NULL);
if (prop == NULL)
goto find_it;
ph = *prop;
if (ph == 0)
goto find_it;
/*
* Ok, now try to find the actor. If we can't find it, we fail,
* there is no point in falling back there
*/
of_node_put(actor);
actor = of_find_node_by_phandle(ph);
if (actor == NULL)
return NULL;
find_it:
dev = pmf_find_device(actor);
if (dev == NULL) {
result = NULL;
goto out;
}
list_for_each_entry(func, &dev->functions, link) {
if (name && strcmp(name, func->name))
continue;
if (func->phandle && target->phandle != func->phandle)
continue;
if ((func->flags & flags) == 0)
continue;
result = func;
break;
}
pmf_put_device(dev);
out:
of_node_put(actor);
return result;
}
int pmf_register_irq_client(struct device_node *target,
const char *name,
struct pmf_irq_client *client)
{
struct pmf_function *func;
unsigned long flags;
spin_lock_irqsave(&pmf_lock, flags);
func = __pmf_find_function(target, name, PMF_FLAGS_INT_GEN);
if (func)
func = pmf_get_function(func);
spin_unlock_irqrestore(&pmf_lock, flags);
if (func == NULL)
return -ENODEV;
/* guard against manipulations of list */
mutex_lock(&pmf_irq_mutex);
if (list_empty(&func->irq_clients))
func->dev->handlers->irq_enable(func);
/* guard against pmf_do_irq while changing list */
spin_lock_irqsave(&pmf_lock, flags);
list_add(&client->link, &func->irq_clients);
spin_unlock_irqrestore(&pmf_lock, flags);
client->func = func;
mutex_unlock(&pmf_irq_mutex);
return 0;
}
EXPORT_SYMBOL_GPL(pmf_register_irq_client);
void pmf_unregister_irq_client(struct pmf_irq_client *client)
{
struct pmf_function *func = client->func;
unsigned long flags;
BUG_ON(func == NULL);
/* guard against manipulations of list */
mutex_lock(&pmf_irq_mutex);
client->func = NULL;
/* guard against pmf_do_irq while changing list */
spin_lock_irqsave(&pmf_lock, flags);
list_del(&client->link);
spin_unlock_irqrestore(&pmf_lock, flags);
if (list_empty(&func->irq_clients))
func->dev->handlers->irq_disable(func);
mutex_unlock(&pmf_irq_mutex);
pmf_put_function(func);
}
EXPORT_SYMBOL_GPL(pmf_unregister_irq_client);
void pmf_do_irq(struct pmf_function *func)
{
unsigned long flags;
struct pmf_irq_client *client;
/* For now, using a spinlock over the whole function. Can be made
* to drop the lock using 2 lists if necessary
*/
spin_lock_irqsave(&pmf_lock, flags);
list_for_each_entry(client, &func->irq_clients, link) {
if (!try_module_get(client->owner))
continue;
client->handler(client->data);
module_put(client->owner);
}
spin_unlock_irqrestore(&pmf_lock, flags);
}
EXPORT_SYMBOL_GPL(pmf_do_irq);
int pmf_call_one(struct pmf_function *func, struct pmf_args *args)
{
struct pmf_device *dev = func->dev;
void *instdata = NULL;
int rc = 0;
DBG(" ** pmf_call_one(%s/%s) **\n", dev->node->full_name, func->name);
if (dev->handlers->begin)
instdata = dev->handlers->begin(func, args);
rc = pmf_parse_one(func, dev->handlers, instdata, args);
if (dev->handlers->end)
dev->handlers->end(func, instdata);
return rc;
}
EXPORT_SYMBOL_GPL(pmf_call_one);
int pmf_do_functions(struct device_node *np, const char *name,
u32 phandle, u32 fflags, struct pmf_args *args)
{
struct pmf_device *dev;
struct pmf_function *func, *tmp;
unsigned long flags;
int rc = -ENODEV;
spin_lock_irqsave(&pmf_lock, flags);
dev = pmf_find_device(np);
if (dev == NULL) {
spin_unlock_irqrestore(&pmf_lock, flags);
return -ENODEV;
}
list_for_each_entry_safe(func, tmp, &dev->functions, link) {
if (name && strcmp(name, func->name))
continue;
if (phandle && func->phandle && phandle != func->phandle)
continue;
if ((func->flags & fflags) == 0)
continue;
if (pmf_get_function(func) == NULL)
continue;
spin_unlock_irqrestore(&pmf_lock, flags);
rc = pmf_call_one(func, args);
pmf_put_function(func);
spin_lock_irqsave(&pmf_lock, flags);
}
pmf_put_device(dev);
spin_unlock_irqrestore(&pmf_lock, flags);
return rc;
}
EXPORT_SYMBOL_GPL(pmf_do_functions);
struct pmf_function *pmf_find_function(struct device_node *target,
const char *name)
{
struct pmf_function *func;
unsigned long flags;
spin_lock_irqsave(&pmf_lock, flags);
func = __pmf_find_function(target, name, PMF_FLAGS_ON_DEMAND);
if (func)
func = pmf_get_function(func);
spin_unlock_irqrestore(&pmf_lock, flags);
return func;
}
EXPORT_SYMBOL_GPL(pmf_find_function);
int pmf_call_function(struct device_node *target, const char *name,
struct pmf_args *args)
{
struct pmf_function *func = pmf_find_function(target, name);
int rc;
if (func == NULL)
return -ENODEV;
rc = pmf_call_one(func, args);
pmf_put_function(func);
return rc;
}
EXPORT_SYMBOL_GPL(pmf_call_function);
| gpl-2.0 |
MoKee/android_kernel_goldfish | drivers/staging/vt6656/wpa2.c | 8232 | 12316 | /*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*
*
* File: wpa2.c
*
* Purpose: Handles the Basic Service Set & Node Database functions
*
* Functions:
*
* Revision History:
*
* Author: Yiching Chen
*
* Date: Oct. 4, 2004
*
*/
#include "wpa2.h"
#include "device.h"
/*--------------------- Static Definitions -------------------------*/
static int msglevel =MSG_LEVEL_INFO;
//static int msglevel =MSG_LEVEL_DEBUG;
/*--------------------- Static Classes ----------------------------*/
/*--------------------- Static Variables --------------------------*/
const BYTE abyOUIGK[4] = { 0x00, 0x0F, 0xAC, 0x00 };
const BYTE abyOUIWEP40[4] = { 0x00, 0x0F, 0xAC, 0x01 };
const BYTE abyOUIWEP104[4] = { 0x00, 0x0F, 0xAC, 0x05 };
const BYTE abyOUITKIP[4] = { 0x00, 0x0F, 0xAC, 0x02 };
const BYTE abyOUICCMP[4] = { 0x00, 0x0F, 0xAC, 0x04 };
const BYTE abyOUI8021X[4] = { 0x00, 0x0F, 0xAC, 0x01 };
const BYTE abyOUIPSK[4] = { 0x00, 0x0F, 0xAC, 0x02 };
/*--------------------- Static Functions --------------------------*/
/*--------------------- Export Variables --------------------------*/
/*--------------------- Export Functions --------------------------*/
/*+
*
* Description:
* Clear RSN information in BSSList.
*
* Parameters:
* In:
* pBSSNode - BSS list.
* Out:
* none
*
* Return Value: none.
*
-*/
void
WPA2_ClearRSN (
PKnownBSS pBSSNode
)
{
int ii;
pBSSNode->bWPA2Valid = FALSE;
pBSSNode->byCSSGK = WLAN_11i_CSS_CCMP;
for (ii=0; ii < 4; ii ++)
pBSSNode->abyCSSPK[ii] = WLAN_11i_CSS_CCMP;
pBSSNode->wCSSPKCount = 1;
for (ii=0; ii < 4; ii ++)
pBSSNode->abyAKMSSAuthType[ii] = WLAN_11i_AKMSS_802_1X;
pBSSNode->wAKMSSAuthCount = 1;
pBSSNode->sRSNCapObj.bRSNCapExist = FALSE;
pBSSNode->sRSNCapObj.wRSNCap = 0;
}
/*+
*
* Description:
* Parse RSN IE.
*
* Parameters:
* In:
* pBSSNode - BSS list.
* pRSN - Pointer to the RSN IE.
* Out:
* none
*
* Return Value: none.
*
-*/
void
WPA2vParseRSN (
PKnownBSS pBSSNode,
PWLAN_IE_RSN pRSN
)
{
int i, j;
WORD m = 0, n = 0;
PBYTE pbyOUI;
BOOL bUseGK = FALSE;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"WPA2_ParseRSN: [%d]\n", pRSN->len);
WPA2_ClearRSN(pBSSNode);
if (pRSN->len == 2) { // ver(2)
if ((pRSN->byElementID == WLAN_EID_RSN) && (pRSN->wVersion == 1)) {
pBSSNode->bWPA2Valid = TRUE;
}
return;
}
if (pRSN->len < 6) { // ver(2) + GK(4)
// invalid CSS, P802.11i/D10.0, p31
return;
}
// information element header makes sense
if ((pRSN->byElementID == WLAN_EID_RSN) &&
(pRSN->wVersion == 1)) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Legal 802.11i RSN\n");
pbyOUI = &(pRSN->abyRSN[0]);
if ( !memcmp(pbyOUI, abyOUIWEP40, 4))
pBSSNode->byCSSGK = WLAN_11i_CSS_WEP40;
else if ( !memcmp(pbyOUI, abyOUITKIP, 4))
pBSSNode->byCSSGK = WLAN_11i_CSS_TKIP;
else if ( !memcmp(pbyOUI, abyOUICCMP, 4))
pBSSNode->byCSSGK = WLAN_11i_CSS_CCMP;
else if ( !memcmp(pbyOUI, abyOUIWEP104, 4))
pBSSNode->byCSSGK = WLAN_11i_CSS_WEP104;
else if ( !memcmp(pbyOUI, abyOUIGK, 4)) {
// invalid CSS, P802.11i/D10.0, p32
return;
} else
// any vendor checks here
pBSSNode->byCSSGK = WLAN_11i_CSS_UNKNOWN;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"802.11i CSS: %X\n", pBSSNode->byCSSGK);
if (pRSN->len == 6) {
pBSSNode->bWPA2Valid = TRUE;
return;
}
if (pRSN->len >= 8) { // ver(2) + GK(4) + PK count(2)
pBSSNode->wCSSPKCount = *((PWORD) &(pRSN->abyRSN[4]));
j = 0;
pbyOUI = &(pRSN->abyRSN[6]);
for (i = 0; (i < pBSSNode->wCSSPKCount) && (j < sizeof(pBSSNode->abyCSSPK)/sizeof(BYTE)); i++) {
if (pRSN->len >= 8+i*4+4) { // ver(2)+GK(4)+PKCnt(2)+PKS(4*i)
if ( !memcmp(pbyOUI, abyOUIGK, 4)) {
pBSSNode->abyCSSPK[j++] = WLAN_11i_CSS_USE_GROUP;
bUseGK = TRUE;
} else if ( !memcmp(pbyOUI, abyOUIWEP40, 4)) {
// Invialid CSS, continue to parsing
} else if ( !memcmp(pbyOUI, abyOUITKIP, 4)) {
if (pBSSNode->byCSSGK != WLAN_11i_CSS_CCMP)
pBSSNode->abyCSSPK[j++] = WLAN_11i_CSS_TKIP;
else
; // Invialid CSS, continue to parsing
} else if ( !memcmp(pbyOUI, abyOUICCMP, 4)) {
pBSSNode->abyCSSPK[j++] = WLAN_11i_CSS_CCMP;
} else if ( !memcmp(pbyOUI, abyOUIWEP104, 4)) {
// Invialid CSS, continue to parsing
} else {
// any vendor checks here
pBSSNode->abyCSSPK[j++] = WLAN_11i_CSS_UNKNOWN;
}
pbyOUI += 4;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"abyCSSPK[%d]: %X\n", j-1, pBSSNode->abyCSSPK[j-1]);
} else
break;
} //for
if (bUseGK == TRUE) {
if (j != 1) {
// invalid CSS, This should be only PK CSS.
return;
}
if (pBSSNode->byCSSGK == WLAN_11i_CSS_CCMP) {
// invalid CSS, If CCMP is enable , PK can't be CSSGK.
return;
}
}
if ((pBSSNode->wCSSPKCount != 0) && (j == 0)) {
// invalid CSS, No valid PK.
return;
}
pBSSNode->wCSSPKCount = (WORD)j;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"wCSSPKCount: %d\n", pBSSNode->wCSSPKCount);
}
m = *((PWORD) &(pRSN->abyRSN[4]));
if (pRSN->len >= 10+m*4) { // ver(2) + GK(4) + PK count(2) + PKS(4*m) + AKMSS count(2)
pBSSNode->wAKMSSAuthCount = *((PWORD) &(pRSN->abyRSN[6+4*m]));
j = 0;
pbyOUI = &(pRSN->abyRSN[8+4*m]);
for (i = 0; (i < pBSSNode->wAKMSSAuthCount) && (j < sizeof(pBSSNode->abyAKMSSAuthType)/sizeof(BYTE)); i++) {
if (pRSN->len >= 10+(m+i)*4+4) { // ver(2)+GK(4)+PKCnt(2)+PKS(4*m)+AKMSS(2)+AKS(4*i)
if ( !memcmp(pbyOUI, abyOUI8021X, 4))
pBSSNode->abyAKMSSAuthType[j++] = WLAN_11i_AKMSS_802_1X;
else if ( !memcmp(pbyOUI, abyOUIPSK, 4))
pBSSNode->abyAKMSSAuthType[j++] = WLAN_11i_AKMSS_PSK;
else
// any vendor checks here
pBSSNode->abyAKMSSAuthType[j++] = WLAN_11i_AKMSS_UNKNOWN;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"abyAKMSSAuthType[%d]: %X\n", j-1, pBSSNode->abyAKMSSAuthType[j-1]);
} else
break;
}
pBSSNode->wAKMSSAuthCount = (WORD)j;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"wAKMSSAuthCount: %d\n", pBSSNode->wAKMSSAuthCount);
n = *((PWORD) &(pRSN->abyRSN[6+4*m]));
if (pRSN->len >= 12+4*m+4*n) { // ver(2)+GK(4)+PKCnt(2)+PKS(4*m)+AKMSSCnt(2)+AKMSS(4*n)+Cap(2)
pBSSNode->sRSNCapObj.bRSNCapExist = TRUE;
pBSSNode->sRSNCapObj.wRSNCap = *((PWORD) &(pRSN->abyRSN[8+4*m+4*n]));
}
}
//ignore PMKID lists bcs only (Re)Assocrequest has this field
pBSSNode->bWPA2Valid = TRUE;
}
}
/*+
*
* Description:
* Set WPA IEs
*
* Parameters:
* In:
* pMgmtHandle - Pointer to management object
* Out:
* pRSNIEs - Pointer to the RSN IE to set.
*
* Return Value: length of IEs.
*
-*/
unsigned int
WPA2uSetIEs(void *pMgmtHandle,
PWLAN_IE_RSN pRSNIEs
)
{
PSMgmtObject pMgmt = (PSMgmtObject) pMgmtHandle;
PBYTE pbyBuffer = NULL;
unsigned int ii = 0;
PWORD pwPMKID = NULL;
if (pRSNIEs == NULL) {
return(0);
}
if (((pMgmt->eAuthenMode == WMAC_AUTH_WPA2) ||
(pMgmt->eAuthenMode == WMAC_AUTH_WPA2PSK)) &&
(pMgmt->pCurrBSS != NULL)) {
/* WPA2 IE */
pbyBuffer = (PBYTE) pRSNIEs;
pRSNIEs->byElementID = WLAN_EID_RSN;
pRSNIEs->len = 6; //Version(2)+GK(4)
pRSNIEs->wVersion = 1;
//Group Key Cipher Suite
pRSNIEs->abyRSN[0] = 0x00;
pRSNIEs->abyRSN[1] = 0x0F;
pRSNIEs->abyRSN[2] = 0xAC;
if (pMgmt->byCSSGK == KEY_CTL_WEP) {
pRSNIEs->abyRSN[3] = pMgmt->pCurrBSS->byCSSGK;
} else if (pMgmt->byCSSGK == KEY_CTL_TKIP) {
pRSNIEs->abyRSN[3] = WLAN_11i_CSS_TKIP;
} else if (pMgmt->byCSSGK == KEY_CTL_CCMP) {
pRSNIEs->abyRSN[3] = WLAN_11i_CSS_CCMP;
} else {
pRSNIEs->abyRSN[3] = WLAN_11i_CSS_UNKNOWN;
}
// Pairwise Key Cipher Suite
pRSNIEs->abyRSN[4] = 1;
pRSNIEs->abyRSN[5] = 0;
pRSNIEs->abyRSN[6] = 0x00;
pRSNIEs->abyRSN[7] = 0x0F;
pRSNIEs->abyRSN[8] = 0xAC;
if (pMgmt->byCSSPK == KEY_CTL_TKIP) {
pRSNIEs->abyRSN[9] = WLAN_11i_CSS_TKIP;
} else if (pMgmt->byCSSPK == KEY_CTL_CCMP) {
pRSNIEs->abyRSN[9] = WLAN_11i_CSS_CCMP;
} else if (pMgmt->byCSSPK == KEY_CTL_NONE) {
pRSNIEs->abyRSN[9] = WLAN_11i_CSS_USE_GROUP;
} else {
pRSNIEs->abyRSN[9] = WLAN_11i_CSS_UNKNOWN;
}
pRSNIEs->len += 6;
// Auth Key Management Suite
pRSNIEs->abyRSN[10] = 1;
pRSNIEs->abyRSN[11] = 0;
pRSNIEs->abyRSN[12] = 0x00;
pRSNIEs->abyRSN[13] = 0x0F;
pRSNIEs->abyRSN[14] = 0xAC;
if (pMgmt->eAuthenMode == WMAC_AUTH_WPA2PSK) {
pRSNIEs->abyRSN[15] = WLAN_11i_AKMSS_PSK;
} else if (pMgmt->eAuthenMode == WMAC_AUTH_WPA2) {
pRSNIEs->abyRSN[15] = WLAN_11i_AKMSS_802_1X;
} else {
pRSNIEs->abyRSN[15] = WLAN_11i_AKMSS_UNKNOWN;
}
pRSNIEs->len +=6;
// RSN Capabilites
if (pMgmt->pCurrBSS->sRSNCapObj.bRSNCapExist == TRUE) {
memcpy(&pRSNIEs->abyRSN[16], &pMgmt->pCurrBSS->sRSNCapObj.wRSNCap, 2);
} else {
pRSNIEs->abyRSN[16] = 0;
pRSNIEs->abyRSN[17] = 0;
}
pRSNIEs->len +=2;
if ((pMgmt->gsPMKIDCache.BSSIDInfoCount > 0) &&
(pMgmt->bRoaming == TRUE) &&
(pMgmt->eAuthenMode == WMAC_AUTH_WPA2)) {
/* RSN PMKID, pointer to PMKID count */
pwPMKID = (PWORD)(&pRSNIEs->abyRSN[18]);
*pwPMKID = 0; /* Initialize PMKID count */
pbyBuffer = &pRSNIEs->abyRSN[20]; /* Point to PMKID list */
for (ii = 0; ii < pMgmt->gsPMKIDCache.BSSIDInfoCount; ii++) {
if (!memcmp(&pMgmt->
gsPMKIDCache.BSSIDInfo[ii].abyBSSID[0],
pMgmt->abyCurrBSSID,
ETH_ALEN)) {
(*pwPMKID)++;
memcpy(pbyBuffer,
pMgmt->gsPMKIDCache.BSSIDInfo[ii].abyPMKID,
16);
pbyBuffer += 16;
}
}
if (*pwPMKID != 0) {
pRSNIEs->len += (2 + (*pwPMKID)*16);
} else {
pbyBuffer = &pRSNIEs->abyRSN[18];
}
}
return(pRSNIEs->len + WLAN_IEHDR_LEN);
}
return(0);
}
| gpl-2.0 |
emercs/BeagleBone-linux | arch/powerpc/boot/treeboot-bamboo.c | 14120 | 1041 | /*
* Copyright IBM Corporation, 2007
* Josh Boyer <jwboyer@linux.vnet.ibm.com>
*
* Based on ebony wrapper:
* Copyright 2007 David Gibson, IBM Corporation.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; version 2 of the License
*/
#include "ops.h"
#include "stdio.h"
#include "44x.h"
#include "stdlib.h"
BSS_STACK(4096);
#define PIBS_MAC0 0xfffc0400
#define PIBS_MAC1 0xfffc0500
char pibs_mac0[6];
char pibs_mac1[6];
static void read_pibs_mac(void)
{
unsigned long long mac64;
mac64 = strtoull((char *)PIBS_MAC0, 0, 16);
memcpy(&pibs_mac0, (char *)&mac64+2, 6);
mac64 = strtoull((char *)PIBS_MAC1, 0, 16);
memcpy(&pibs_mac1, (char *)&mac64+2, 6);
}
void platform_init(void)
{
unsigned long end_of_ram = 0x8000000;
unsigned long avail_ram = end_of_ram - (unsigned long)_end;
simple_alloc_init(_end, avail_ram, 32, 64);
read_pibs_mac();
bamboo_init((u8 *)&pibs_mac0, (u8 *)&pibs_mac1);
}
| gpl-2.0 |
HazyTeam/platform_kernel_oneplus_msm8974 | arch/powerpc/boot/treeboot-bamboo.c | 14120 | 1041 | /*
* Copyright IBM Corporation, 2007
* Josh Boyer <jwboyer@linux.vnet.ibm.com>
*
* Based on ebony wrapper:
* Copyright 2007 David Gibson, IBM Corporation.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; version 2 of the License
*/
#include "ops.h"
#include "stdio.h"
#include "44x.h"
#include "stdlib.h"
BSS_STACK(4096);
#define PIBS_MAC0 0xfffc0400
#define PIBS_MAC1 0xfffc0500
char pibs_mac0[6];
char pibs_mac1[6];
static void read_pibs_mac(void)
{
unsigned long long mac64;
mac64 = strtoull((char *)PIBS_MAC0, 0, 16);
memcpy(&pibs_mac0, (char *)&mac64+2, 6);
mac64 = strtoull((char *)PIBS_MAC1, 0, 16);
memcpy(&pibs_mac1, (char *)&mac64+2, 6);
}
void platform_init(void)
{
unsigned long end_of_ram = 0x8000000;
unsigned long avail_ram = end_of_ram - (unsigned long)_end;
simple_alloc_init(_end, avail_ram, 32, 64);
read_pibs_mac();
bamboo_init((u8 *)&pibs_mac0, (u8 *)&pibs_mac1);
}
| gpl-2.0 |
megraf/asuswrt-merlin | release/src-rt-7.x.main/src/linux/linux-2.6.36/fs/gfs2/xattr.c | 41 | 34378 | /*
* Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
* Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
*
* This copyrighted material is made available to anyone wishing to use,
* modify, copy, or redistribute it subject to the terms and conditions
* of the GNU General Public License version 2.
*/
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/completion.h>
#include <linux/buffer_head.h>
#include <linux/xattr.h>
#include <linux/gfs2_ondisk.h>
#include <asm/uaccess.h>
#include "gfs2.h"
#include "incore.h"
#include "acl.h"
#include "xattr.h"
#include "glock.h"
#include "inode.h"
#include "meta_io.h"
#include "quota.h"
#include "rgrp.h"
#include "trans.h"
#include "util.h"
/**
* ea_calc_size - returns the acutal number of bytes the request will take up
* (not counting any unstuffed data blocks)
* @sdp:
* @er:
* @size:
*
* Returns: 1 if the EA should be stuffed
*/
static int ea_calc_size(struct gfs2_sbd *sdp, unsigned int nsize, size_t dsize,
unsigned int *size)
{
unsigned int jbsize = sdp->sd_jbsize;
/* Stuffed */
*size = ALIGN(sizeof(struct gfs2_ea_header) + nsize + dsize, 8);
if (*size <= jbsize)
return 1;
/* Unstuffed */
*size = ALIGN(sizeof(struct gfs2_ea_header) + nsize +
(sizeof(__be64) * DIV_ROUND_UP(dsize, jbsize)), 8);
return 0;
}
static int ea_check_size(struct gfs2_sbd *sdp, unsigned int nsize, size_t dsize)
{
unsigned int size;
if (dsize > GFS2_EA_MAX_DATA_LEN)
return -ERANGE;
ea_calc_size(sdp, nsize, dsize, &size);
/* This can only happen with 512 byte blocks */
if (size > sdp->sd_jbsize)
return -ERANGE;
return 0;
}
typedef int (*ea_call_t) (struct gfs2_inode *ip, struct buffer_head *bh,
struct gfs2_ea_header *ea,
struct gfs2_ea_header *prev, void *private);
static int ea_foreach_i(struct gfs2_inode *ip, struct buffer_head *bh,
ea_call_t ea_call, void *data)
{
struct gfs2_ea_header *ea, *prev = NULL;
int error = 0;
if (gfs2_metatype_check(GFS2_SB(&ip->i_inode), bh, GFS2_METATYPE_EA))
return -EIO;
for (ea = GFS2_EA_BH2FIRST(bh);; prev = ea, ea = GFS2_EA2NEXT(ea)) {
if (!GFS2_EA_REC_LEN(ea))
goto fail;
if (!(bh->b_data <= (char *)ea && (char *)GFS2_EA2NEXT(ea) <=
bh->b_data + bh->b_size))
goto fail;
if (!GFS2_EATYPE_VALID(ea->ea_type))
goto fail;
error = ea_call(ip, bh, ea, prev, data);
if (error)
return error;
if (GFS2_EA_IS_LAST(ea)) {
if ((char *)GFS2_EA2NEXT(ea) !=
bh->b_data + bh->b_size)
goto fail;
break;
}
}
return error;
fail:
gfs2_consist_inode(ip);
return -EIO;
}
static int ea_foreach(struct gfs2_inode *ip, ea_call_t ea_call, void *data)
{
struct buffer_head *bh, *eabh;
__be64 *eablk, *end;
int error;
error = gfs2_meta_read(ip->i_gl, ip->i_eattr, DIO_WAIT, &bh);
if (error)
return error;
if (!(ip->i_diskflags & GFS2_DIF_EA_INDIRECT)) {
error = ea_foreach_i(ip, bh, ea_call, data);
goto out;
}
if (gfs2_metatype_check(GFS2_SB(&ip->i_inode), bh, GFS2_METATYPE_IN)) {
error = -EIO;
goto out;
}
eablk = (__be64 *)(bh->b_data + sizeof(struct gfs2_meta_header));
end = eablk + GFS2_SB(&ip->i_inode)->sd_inptrs;
for (; eablk < end; eablk++) {
u64 bn;
if (!*eablk)
break;
bn = be64_to_cpu(*eablk);
error = gfs2_meta_read(ip->i_gl, bn, DIO_WAIT, &eabh);
if (error)
break;
error = ea_foreach_i(ip, eabh, ea_call, data);
brelse(eabh);
if (error)
break;
}
out:
brelse(bh);
return error;
}
struct ea_find {
int type;
const char *name;
size_t namel;
struct gfs2_ea_location *ef_el;
};
static int ea_find_i(struct gfs2_inode *ip, struct buffer_head *bh,
struct gfs2_ea_header *ea, struct gfs2_ea_header *prev,
void *private)
{
struct ea_find *ef = private;
if (ea->ea_type == GFS2_EATYPE_UNUSED)
return 0;
if (ea->ea_type == ef->type) {
if (ea->ea_name_len == ef->namel &&
!memcmp(GFS2_EA2NAME(ea), ef->name, ea->ea_name_len)) {
struct gfs2_ea_location *el = ef->ef_el;
get_bh(bh);
el->el_bh = bh;
el->el_ea = ea;
el->el_prev = prev;
return 1;
}
}
return 0;
}
static int gfs2_ea_find(struct gfs2_inode *ip, int type, const char *name,
struct gfs2_ea_location *el)
{
struct ea_find ef;
int error;
ef.type = type;
ef.name = name;
ef.namel = strlen(name);
ef.ef_el = el;
memset(el, 0, sizeof(struct gfs2_ea_location));
error = ea_foreach(ip, ea_find_i, &ef);
if (error > 0)
return 0;
return error;
}
/**
* ea_dealloc_unstuffed -
* @ip:
* @bh:
* @ea:
* @prev:
* @private:
*
* Take advantage of the fact that all unstuffed blocks are
* allocated from the same RG. But watch, this may not always
* be true.
*
* Returns: errno
*/
static int ea_dealloc_unstuffed(struct gfs2_inode *ip, struct buffer_head *bh,
struct gfs2_ea_header *ea,
struct gfs2_ea_header *prev, void *private)
{
int *leave = private;
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
struct gfs2_rgrpd *rgd;
struct gfs2_holder rg_gh;
struct buffer_head *dibh;
__be64 *dataptrs;
u64 bn = 0;
u64 bstart = 0;
unsigned int blen = 0;
unsigned int blks = 0;
unsigned int x;
int error;
if (GFS2_EA_IS_STUFFED(ea))
return 0;
dataptrs = GFS2_EA2DATAPTRS(ea);
for (x = 0; x < ea->ea_num_ptrs; x++, dataptrs++) {
if (*dataptrs) {
blks++;
bn = be64_to_cpu(*dataptrs);
}
}
if (!blks)
return 0;
rgd = gfs2_blk2rgrpd(sdp, bn);
if (!rgd) {
gfs2_consist_inode(ip);
return -EIO;
}
error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, &rg_gh);
if (error)
return error;
error = gfs2_trans_begin(sdp, rgd->rd_length + RES_DINODE +
RES_EATTR + RES_STATFS + RES_QUOTA, blks);
if (error)
goto out_gunlock;
gfs2_trans_add_bh(ip->i_gl, bh, 1);
dataptrs = GFS2_EA2DATAPTRS(ea);
for (x = 0; x < ea->ea_num_ptrs; x++, dataptrs++) {
if (!*dataptrs)
break;
bn = be64_to_cpu(*dataptrs);
if (bstart + blen == bn)
blen++;
else {
if (bstart)
gfs2_free_meta(ip, bstart, blen);
bstart = bn;
blen = 1;
}
*dataptrs = 0;
gfs2_add_inode_blocks(&ip->i_inode, -1);
}
if (bstart)
gfs2_free_meta(ip, bstart, blen);
if (prev && !leave) {
u32 len;
len = GFS2_EA_REC_LEN(prev) + GFS2_EA_REC_LEN(ea);
prev->ea_rec_len = cpu_to_be32(len);
if (GFS2_EA_IS_LAST(ea))
prev->ea_flags |= GFS2_EAFLAG_LAST;
} else {
ea->ea_type = GFS2_EATYPE_UNUSED;
ea->ea_num_ptrs = 0;
}
error = gfs2_meta_inode_buffer(ip, &dibh);
if (!error) {
ip->i_inode.i_ctime = CURRENT_TIME;
gfs2_trans_add_bh(ip->i_gl, dibh, 1);
gfs2_dinode_out(ip, dibh->b_data);
brelse(dibh);
}
gfs2_trans_end(sdp);
out_gunlock:
gfs2_glock_dq_uninit(&rg_gh);
return error;
}
static int ea_remove_unstuffed(struct gfs2_inode *ip, struct buffer_head *bh,
struct gfs2_ea_header *ea,
struct gfs2_ea_header *prev, int leave)
{
struct gfs2_alloc *al;
int error;
al = gfs2_alloc_get(ip);
if (!al)
return -ENOMEM;
error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
if (error)
goto out_alloc;
error = gfs2_rindex_hold(GFS2_SB(&ip->i_inode), &al->al_ri_gh);
if (error)
goto out_quota;
error = ea_dealloc_unstuffed(ip, bh, ea, prev, (leave) ? &error : NULL);
gfs2_glock_dq_uninit(&al->al_ri_gh);
out_quota:
gfs2_quota_unhold(ip);
out_alloc:
gfs2_alloc_put(ip);
return error;
}
struct ea_list {
struct gfs2_ea_request *ei_er;
unsigned int ei_size;
};
static inline unsigned int gfs2_ea_strlen(struct gfs2_ea_header *ea)
{
switch (ea->ea_type) {
case GFS2_EATYPE_USR:
return 5 + ea->ea_name_len + 1;
case GFS2_EATYPE_SYS:
return 7 + ea->ea_name_len + 1;
case GFS2_EATYPE_SECURITY:
return 9 + ea->ea_name_len + 1;
default:
return 0;
}
}
static int ea_list_i(struct gfs2_inode *ip, struct buffer_head *bh,
struct gfs2_ea_header *ea, struct gfs2_ea_header *prev,
void *private)
{
struct ea_list *ei = private;
struct gfs2_ea_request *er = ei->ei_er;
unsigned int ea_size = gfs2_ea_strlen(ea);
if (ea->ea_type == GFS2_EATYPE_UNUSED)
return 0;
if (er->er_data_len) {
char *prefix = NULL;
unsigned int l = 0;
char c = 0;
if (ei->ei_size + ea_size > er->er_data_len)
return -ERANGE;
switch (ea->ea_type) {
case GFS2_EATYPE_USR:
prefix = "user.";
l = 5;
break;
case GFS2_EATYPE_SYS:
prefix = "system.";
l = 7;
break;
case GFS2_EATYPE_SECURITY:
prefix = "security.";
l = 9;
break;
}
BUG_ON(l == 0);
memcpy(er->er_data + ei->ei_size, prefix, l);
memcpy(er->er_data + ei->ei_size + l, GFS2_EA2NAME(ea),
ea->ea_name_len);
memcpy(er->er_data + ei->ei_size + ea_size - 1, &c, 1);
}
ei->ei_size += ea_size;
return 0;
}
/**
* gfs2_listxattr - List gfs2 extended attributes
* @dentry: The dentry whose inode we are interested in
* @buffer: The buffer to write the results
* @size: The size of the buffer
*
* Returns: actual size of data on success, -errno on error
*/
ssize_t gfs2_listxattr(struct dentry *dentry, char *buffer, size_t size)
{
struct gfs2_inode *ip = GFS2_I(dentry->d_inode);
struct gfs2_ea_request er;
struct gfs2_holder i_gh;
int error;
memset(&er, 0, sizeof(struct gfs2_ea_request));
if (size) {
er.er_data = buffer;
er.er_data_len = size;
}
error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
if (error)
return error;
if (ip->i_eattr) {
struct ea_list ei = { .ei_er = &er, .ei_size = 0 };
error = ea_foreach(ip, ea_list_i, &ei);
if (!error)
error = ei.ei_size;
}
gfs2_glock_dq_uninit(&i_gh);
return error;
}
/**
* ea_get_unstuffed - actually copies the unstuffed data into the
* request buffer
* @ip: The GFS2 inode
* @ea: The extended attribute header structure
* @data: The data to be copied
*
* Returns: errno
*/
static int ea_get_unstuffed(struct gfs2_inode *ip, struct gfs2_ea_header *ea,
char *data)
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
struct buffer_head **bh;
unsigned int amount = GFS2_EA_DATA_LEN(ea);
unsigned int nptrs = DIV_ROUND_UP(amount, sdp->sd_jbsize);
__be64 *dataptrs = GFS2_EA2DATAPTRS(ea);
unsigned int x;
int error = 0;
bh = kcalloc(nptrs, sizeof(struct buffer_head *), GFP_NOFS);
if (!bh)
return -ENOMEM;
for (x = 0; x < nptrs; x++) {
error = gfs2_meta_read(ip->i_gl, be64_to_cpu(*dataptrs), 0,
bh + x);
if (error) {
while (x--)
brelse(bh[x]);
goto out;
}
dataptrs++;
}
for (x = 0; x < nptrs; x++) {
error = gfs2_meta_wait(sdp, bh[x]);
if (error) {
for (; x < nptrs; x++)
brelse(bh[x]);
goto out;
}
if (gfs2_metatype_check(sdp, bh[x], GFS2_METATYPE_ED)) {
for (; x < nptrs; x++)
brelse(bh[x]);
error = -EIO;
goto out;
}
memcpy(data, bh[x]->b_data + sizeof(struct gfs2_meta_header),
(sdp->sd_jbsize > amount) ? amount : sdp->sd_jbsize);
amount -= sdp->sd_jbsize;
data += sdp->sd_jbsize;
brelse(bh[x]);
}
out:
kfree(bh);
return error;
}
static int gfs2_ea_get_copy(struct gfs2_inode *ip, struct gfs2_ea_location *el,
char *data, size_t size)
{
int ret;
size_t len = GFS2_EA_DATA_LEN(el->el_ea);
if (len > size)
return -ERANGE;
if (GFS2_EA_IS_STUFFED(el->el_ea)) {
memcpy(data, GFS2_EA2DATA(el->el_ea), len);
return len;
}
ret = ea_get_unstuffed(ip, el->el_ea, data);
if (ret < 0)
return ret;
return len;
}
int gfs2_xattr_acl_get(struct gfs2_inode *ip, const char *name, char **ppdata)
{
struct gfs2_ea_location el;
int error;
int len;
char *data;
error = gfs2_ea_find(ip, GFS2_EATYPE_SYS, name, &el);
if (error)
return error;
if (!el.el_ea)
goto out;
if (!GFS2_EA_DATA_LEN(el.el_ea))
goto out;
len = GFS2_EA_DATA_LEN(el.el_ea);
data = kmalloc(len, GFP_NOFS);
error = -ENOMEM;
if (data == NULL)
goto out;
error = gfs2_ea_get_copy(ip, &el, data, len);
if (error == 0)
error = len;
*ppdata = data;
out:
brelse(el.el_bh);
return error;
}
/**
* gfs2_xattr_get - Get a GFS2 extended attribute
* @inode: The inode
* @name: The name of the extended attribute
* @buffer: The buffer to write the result into
* @size: The size of the buffer
* @type: The type of extended attribute
*
* Returns: actual size of data on success, -errno on error
*/
static int gfs2_xattr_get(struct dentry *dentry, const char *name,
void *buffer, size_t size, int type)
{
struct gfs2_inode *ip = GFS2_I(dentry->d_inode);
struct gfs2_ea_location el;
int error;
if (!ip->i_eattr)
return -ENODATA;
if (strlen(name) > GFS2_EA_MAX_NAME_LEN)
return -EINVAL;
error = gfs2_ea_find(ip, type, name, &el);
if (error)
return error;
if (!el.el_ea)
return -ENODATA;
if (size)
error = gfs2_ea_get_copy(ip, &el, buffer, size);
else
error = GFS2_EA_DATA_LEN(el.el_ea);
brelse(el.el_bh);
return error;
}
/**
* ea_alloc_blk - allocates a new block for extended attributes.
* @ip: A pointer to the inode that's getting extended attributes
* @bhp: Pointer to pointer to a struct buffer_head
*
* Returns: errno
*/
static int ea_alloc_blk(struct gfs2_inode *ip, struct buffer_head **bhp)
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
struct gfs2_ea_header *ea;
unsigned int n = 1;
u64 block;
int error;
error = gfs2_alloc_block(ip, &block, &n);
if (error)
return error;
gfs2_trans_add_unrevoke(sdp, block, 1);
*bhp = gfs2_meta_new(ip->i_gl, block);
gfs2_trans_add_bh(ip->i_gl, *bhp, 1);
gfs2_metatype_set(*bhp, GFS2_METATYPE_EA, GFS2_FORMAT_EA);
gfs2_buffer_clear_tail(*bhp, sizeof(struct gfs2_meta_header));
ea = GFS2_EA_BH2FIRST(*bhp);
ea->ea_rec_len = cpu_to_be32(sdp->sd_jbsize);
ea->ea_type = GFS2_EATYPE_UNUSED;
ea->ea_flags = GFS2_EAFLAG_LAST;
ea->ea_num_ptrs = 0;
gfs2_add_inode_blocks(&ip->i_inode, 1);
return 0;
}
/**
* ea_write - writes the request info to an ea, creating new blocks if
* necessary
* @ip: inode that is being modified
* @ea: the location of the new ea in a block
* @er: the write request
*
* Note: does not update ea_rec_len or the GFS2_EAFLAG_LAST bin of ea_flags
*
* returns : errno
*/
static int ea_write(struct gfs2_inode *ip, struct gfs2_ea_header *ea,
struct gfs2_ea_request *er)
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
int error;
ea->ea_data_len = cpu_to_be32(er->er_data_len);
ea->ea_name_len = er->er_name_len;
ea->ea_type = er->er_type;
ea->__pad = 0;
memcpy(GFS2_EA2NAME(ea), er->er_name, er->er_name_len);
if (GFS2_EAREQ_SIZE_STUFFED(er) <= sdp->sd_jbsize) {
ea->ea_num_ptrs = 0;
memcpy(GFS2_EA2DATA(ea), er->er_data, er->er_data_len);
} else {
__be64 *dataptr = GFS2_EA2DATAPTRS(ea);
const char *data = er->er_data;
unsigned int data_len = er->er_data_len;
unsigned int copy;
unsigned int x;
ea->ea_num_ptrs = DIV_ROUND_UP(er->er_data_len, sdp->sd_jbsize);
for (x = 0; x < ea->ea_num_ptrs; x++) {
struct buffer_head *bh;
u64 block;
int mh_size = sizeof(struct gfs2_meta_header);
unsigned int n = 1;
error = gfs2_alloc_block(ip, &block, &n);
if (error)
return error;
gfs2_trans_add_unrevoke(sdp, block, 1);
bh = gfs2_meta_new(ip->i_gl, block);
gfs2_trans_add_bh(ip->i_gl, bh, 1);
gfs2_metatype_set(bh, GFS2_METATYPE_ED, GFS2_FORMAT_ED);
gfs2_add_inode_blocks(&ip->i_inode, 1);
copy = data_len > sdp->sd_jbsize ? sdp->sd_jbsize :
data_len;
memcpy(bh->b_data + mh_size, data, copy);
if (copy < sdp->sd_jbsize)
memset(bh->b_data + mh_size + copy, 0,
sdp->sd_jbsize - copy);
*dataptr++ = cpu_to_be64(bh->b_blocknr);
data += copy;
data_len -= copy;
brelse(bh);
}
gfs2_assert_withdraw(sdp, !data_len);
}
return 0;
}
typedef int (*ea_skeleton_call_t) (struct gfs2_inode *ip,
struct gfs2_ea_request *er, void *private);
static int ea_alloc_skeleton(struct gfs2_inode *ip, struct gfs2_ea_request *er,
unsigned int blks,
ea_skeleton_call_t skeleton_call, void *private)
{
struct gfs2_alloc *al;
struct buffer_head *dibh;
int error;
al = gfs2_alloc_get(ip);
if (!al)
return -ENOMEM;
error = gfs2_quota_lock_check(ip);
if (error)
goto out;
al->al_requested = blks;
error = gfs2_inplace_reserve(ip);
if (error)
goto out_gunlock_q;
error = gfs2_trans_begin(GFS2_SB(&ip->i_inode),
blks + al->al_rgd->rd_length +
RES_DINODE + RES_STATFS + RES_QUOTA, 0);
if (error)
goto out_ipres;
error = skeleton_call(ip, er, private);
if (error)
goto out_end_trans;
error = gfs2_meta_inode_buffer(ip, &dibh);
if (!error) {
ip->i_inode.i_ctime = CURRENT_TIME;
gfs2_trans_add_bh(ip->i_gl, dibh, 1);
gfs2_dinode_out(ip, dibh->b_data);
brelse(dibh);
}
out_end_trans:
gfs2_trans_end(GFS2_SB(&ip->i_inode));
out_ipres:
gfs2_inplace_release(ip);
out_gunlock_q:
gfs2_quota_unlock(ip);
out:
gfs2_alloc_put(ip);
return error;
}
static int ea_init_i(struct gfs2_inode *ip, struct gfs2_ea_request *er,
void *private)
{
struct buffer_head *bh;
int error;
error = ea_alloc_blk(ip, &bh);
if (error)
return error;
ip->i_eattr = bh->b_blocknr;
error = ea_write(ip, GFS2_EA_BH2FIRST(bh), er);
brelse(bh);
return error;
}
/**
* ea_init - initializes a new eattr block
* @ip:
* @er:
*
* Returns: errno
*/
static int ea_init(struct gfs2_inode *ip, int type, const char *name,
const void *data, size_t size)
{
struct gfs2_ea_request er;
unsigned int jbsize = GFS2_SB(&ip->i_inode)->sd_jbsize;
unsigned int blks = 1;
er.er_type = type;
er.er_name = name;
er.er_name_len = strlen(name);
er.er_data = (void *)data;
er.er_data_len = size;
if (GFS2_EAREQ_SIZE_STUFFED(&er) > jbsize)
blks += DIV_ROUND_UP(er.er_data_len, jbsize);
return ea_alloc_skeleton(ip, &er, blks, ea_init_i, NULL);
}
static struct gfs2_ea_header *ea_split_ea(struct gfs2_ea_header *ea)
{
u32 ea_size = GFS2_EA_SIZE(ea);
struct gfs2_ea_header *new = (struct gfs2_ea_header *)((char *)ea +
ea_size);
u32 new_size = GFS2_EA_REC_LEN(ea) - ea_size;
int last = ea->ea_flags & GFS2_EAFLAG_LAST;
ea->ea_rec_len = cpu_to_be32(ea_size);
ea->ea_flags ^= last;
new->ea_rec_len = cpu_to_be32(new_size);
new->ea_flags = last;
return new;
}
static void ea_set_remove_stuffed(struct gfs2_inode *ip,
struct gfs2_ea_location *el)
{
struct gfs2_ea_header *ea = el->el_ea;
struct gfs2_ea_header *prev = el->el_prev;
u32 len;
gfs2_trans_add_bh(ip->i_gl, el->el_bh, 1);
if (!prev || !GFS2_EA_IS_STUFFED(ea)) {
ea->ea_type = GFS2_EATYPE_UNUSED;
return;
} else if (GFS2_EA2NEXT(prev) != ea) {
prev = GFS2_EA2NEXT(prev);
gfs2_assert_withdraw(GFS2_SB(&ip->i_inode), GFS2_EA2NEXT(prev) == ea);
}
len = GFS2_EA_REC_LEN(prev) + GFS2_EA_REC_LEN(ea);
prev->ea_rec_len = cpu_to_be32(len);
if (GFS2_EA_IS_LAST(ea))
prev->ea_flags |= GFS2_EAFLAG_LAST;
}
struct ea_set {
int ea_split;
struct gfs2_ea_request *es_er;
struct gfs2_ea_location *es_el;
struct buffer_head *es_bh;
struct gfs2_ea_header *es_ea;
};
static int ea_set_simple_noalloc(struct gfs2_inode *ip, struct buffer_head *bh,
struct gfs2_ea_header *ea, struct ea_set *es)
{
struct gfs2_ea_request *er = es->es_er;
struct buffer_head *dibh;
int error;
error = gfs2_trans_begin(GFS2_SB(&ip->i_inode), RES_DINODE + 2 * RES_EATTR, 0);
if (error)
return error;
gfs2_trans_add_bh(ip->i_gl, bh, 1);
if (es->ea_split)
ea = ea_split_ea(ea);
ea_write(ip, ea, er);
if (es->es_el)
ea_set_remove_stuffed(ip, es->es_el);
error = gfs2_meta_inode_buffer(ip, &dibh);
if (error)
goto out;
ip->i_inode.i_ctime = CURRENT_TIME;
gfs2_trans_add_bh(ip->i_gl, dibh, 1);
gfs2_dinode_out(ip, dibh->b_data);
brelse(dibh);
out:
gfs2_trans_end(GFS2_SB(&ip->i_inode));
return error;
}
static int ea_set_simple_alloc(struct gfs2_inode *ip,
struct gfs2_ea_request *er, void *private)
{
struct ea_set *es = private;
struct gfs2_ea_header *ea = es->es_ea;
int error;
gfs2_trans_add_bh(ip->i_gl, es->es_bh, 1);
if (es->ea_split)
ea = ea_split_ea(ea);
error = ea_write(ip, ea, er);
if (error)
return error;
if (es->es_el)
ea_set_remove_stuffed(ip, es->es_el);
return 0;
}
static int ea_set_simple(struct gfs2_inode *ip, struct buffer_head *bh,
struct gfs2_ea_header *ea, struct gfs2_ea_header *prev,
void *private)
{
struct ea_set *es = private;
unsigned int size;
int stuffed;
int error;
stuffed = ea_calc_size(GFS2_SB(&ip->i_inode), es->es_er->er_name_len,
es->es_er->er_data_len, &size);
if (ea->ea_type == GFS2_EATYPE_UNUSED) {
if (GFS2_EA_REC_LEN(ea) < size)
return 0;
if (!GFS2_EA_IS_STUFFED(ea)) {
error = ea_remove_unstuffed(ip, bh, ea, prev, 1);
if (error)
return error;
}
es->ea_split = 0;
} else if (GFS2_EA_REC_LEN(ea) - GFS2_EA_SIZE(ea) >= size)
es->ea_split = 1;
else
return 0;
if (stuffed) {
error = ea_set_simple_noalloc(ip, bh, ea, es);
if (error)
return error;
} else {
unsigned int blks;
es->es_bh = bh;
es->es_ea = ea;
blks = 2 + DIV_ROUND_UP(es->es_er->er_data_len,
GFS2_SB(&ip->i_inode)->sd_jbsize);
error = ea_alloc_skeleton(ip, es->es_er, blks,
ea_set_simple_alloc, es);
if (error)
return error;
}
return 1;
}
static int ea_set_block(struct gfs2_inode *ip, struct gfs2_ea_request *er,
void *private)
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
struct buffer_head *indbh, *newbh;
__be64 *eablk;
int error;
int mh_size = sizeof(struct gfs2_meta_header);
if (ip->i_diskflags & GFS2_DIF_EA_INDIRECT) {
__be64 *end;
error = gfs2_meta_read(ip->i_gl, ip->i_eattr, DIO_WAIT,
&indbh);
if (error)
return error;
if (gfs2_metatype_check(sdp, indbh, GFS2_METATYPE_IN)) {
error = -EIO;
goto out;
}
eablk = (__be64 *)(indbh->b_data + mh_size);
end = eablk + sdp->sd_inptrs;
for (; eablk < end; eablk++)
if (!*eablk)
break;
if (eablk == end) {
error = -ENOSPC;
goto out;
}
gfs2_trans_add_bh(ip->i_gl, indbh, 1);
} else {
u64 blk;
unsigned int n = 1;
error = gfs2_alloc_block(ip, &blk, &n);
if (error)
return error;
gfs2_trans_add_unrevoke(sdp, blk, 1);
indbh = gfs2_meta_new(ip->i_gl, blk);
gfs2_trans_add_bh(ip->i_gl, indbh, 1);
gfs2_metatype_set(indbh, GFS2_METATYPE_IN, GFS2_FORMAT_IN);
gfs2_buffer_clear_tail(indbh, mh_size);
eablk = (__be64 *)(indbh->b_data + mh_size);
*eablk = cpu_to_be64(ip->i_eattr);
ip->i_eattr = blk;
ip->i_diskflags |= GFS2_DIF_EA_INDIRECT;
gfs2_add_inode_blocks(&ip->i_inode, 1);
eablk++;
}
error = ea_alloc_blk(ip, &newbh);
if (error)
goto out;
*eablk = cpu_to_be64((u64)newbh->b_blocknr);
error = ea_write(ip, GFS2_EA_BH2FIRST(newbh), er);
brelse(newbh);
if (error)
goto out;
if (private)
ea_set_remove_stuffed(ip, private);
out:
brelse(indbh);
return error;
}
static int ea_set_i(struct gfs2_inode *ip, int type, const char *name,
const void *value, size_t size, struct gfs2_ea_location *el)
{
struct gfs2_ea_request er;
struct ea_set es;
unsigned int blks = 2;
int error;
er.er_type = type;
er.er_name = name;
er.er_data = (void *)value;
er.er_name_len = strlen(name);
er.er_data_len = size;
memset(&es, 0, sizeof(struct ea_set));
es.es_er = &er;
es.es_el = el;
error = ea_foreach(ip, ea_set_simple, &es);
if (error > 0)
return 0;
if (error)
return error;
if (!(ip->i_diskflags & GFS2_DIF_EA_INDIRECT))
blks++;
if (GFS2_EAREQ_SIZE_STUFFED(&er) > GFS2_SB(&ip->i_inode)->sd_jbsize)
blks += DIV_ROUND_UP(er.er_data_len, GFS2_SB(&ip->i_inode)->sd_jbsize);
return ea_alloc_skeleton(ip, &er, blks, ea_set_block, el);
}
static int ea_set_remove_unstuffed(struct gfs2_inode *ip,
struct gfs2_ea_location *el)
{
if (el->el_prev && GFS2_EA2NEXT(el->el_prev) != el->el_ea) {
el->el_prev = GFS2_EA2NEXT(el->el_prev);
gfs2_assert_withdraw(GFS2_SB(&ip->i_inode),
GFS2_EA2NEXT(el->el_prev) == el->el_ea);
}
return ea_remove_unstuffed(ip, el->el_bh, el->el_ea, el->el_prev, 0);
}
static int ea_remove_stuffed(struct gfs2_inode *ip, struct gfs2_ea_location *el)
{
struct gfs2_ea_header *ea = el->el_ea;
struct gfs2_ea_header *prev = el->el_prev;
struct buffer_head *dibh;
int error;
error = gfs2_trans_begin(GFS2_SB(&ip->i_inode), RES_DINODE + RES_EATTR, 0);
if (error)
return error;
gfs2_trans_add_bh(ip->i_gl, el->el_bh, 1);
if (prev) {
u32 len;
len = GFS2_EA_REC_LEN(prev) + GFS2_EA_REC_LEN(ea);
prev->ea_rec_len = cpu_to_be32(len);
if (GFS2_EA_IS_LAST(ea))
prev->ea_flags |= GFS2_EAFLAG_LAST;
} else {
ea->ea_type = GFS2_EATYPE_UNUSED;
}
error = gfs2_meta_inode_buffer(ip, &dibh);
if (!error) {
ip->i_inode.i_ctime = CURRENT_TIME;
gfs2_trans_add_bh(ip->i_gl, dibh, 1);
gfs2_dinode_out(ip, dibh->b_data);
brelse(dibh);
}
gfs2_trans_end(GFS2_SB(&ip->i_inode));
return error;
}
/**
* gfs2_xattr_remove - Remove a GFS2 extended attribute
* @ip: The inode
* @type: The type of the extended attribute
* @name: The name of the extended attribute
*
* This is not called directly by the VFS since we use the (common)
* scheme of making a "set with NULL data" mean a remove request. Note
* that this is different from a set with zero length data.
*
* Returns: 0, or errno on failure
*/
static int gfs2_xattr_remove(struct gfs2_inode *ip, int type, const char *name)
{
struct gfs2_ea_location el;
int error;
if (!ip->i_eattr)
return -ENODATA;
error = gfs2_ea_find(ip, type, name, &el);
if (error)
return error;
if (!el.el_ea)
return -ENODATA;
if (GFS2_EA_IS_STUFFED(el.el_ea))
error = ea_remove_stuffed(ip, &el);
else
error = ea_remove_unstuffed(ip, el.el_bh, el.el_ea, el.el_prev, 0);
brelse(el.el_bh);
return error;
}
/**
* __gfs2_xattr_set - Set (or remove) a GFS2 extended attribute
* @ip: The inode
* @name: The name of the extended attribute
* @value: The value of the extended attribute (NULL for remove)
* @size: The size of the @value argument
* @flags: Create or Replace
* @type: The type of the extended attribute
*
* See gfs2_xattr_remove() for details of the removal of xattrs.
*
* Returns: 0 or errno on failure
*/
int __gfs2_xattr_set(struct inode *inode, const char *name,
const void *value, size_t size, int flags, int type)
{
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_sbd *sdp = GFS2_SB(inode);
struct gfs2_ea_location el;
unsigned int namel = strlen(name);
int error;
if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
return -EPERM;
if (namel > GFS2_EA_MAX_NAME_LEN)
return -ERANGE;
if (value == NULL)
return gfs2_xattr_remove(ip, type, name);
if (ea_check_size(sdp, namel, size))
return -ERANGE;
if (!ip->i_eattr) {
if (flags & XATTR_REPLACE)
return -ENODATA;
return ea_init(ip, type, name, value, size);
}
error = gfs2_ea_find(ip, type, name, &el);
if (error)
return error;
if (el.el_ea) {
if (ip->i_diskflags & GFS2_DIF_APPENDONLY) {
brelse(el.el_bh);
return -EPERM;
}
error = -EEXIST;
if (!(flags & XATTR_CREATE)) {
int unstuffed = !GFS2_EA_IS_STUFFED(el.el_ea);
error = ea_set_i(ip, type, name, value, size, &el);
if (!error && unstuffed)
ea_set_remove_unstuffed(ip, &el);
}
brelse(el.el_bh);
return error;
}
error = -ENODATA;
if (!(flags & XATTR_REPLACE))
error = ea_set_i(ip, type, name, value, size, NULL);
return error;
}
static int gfs2_xattr_set(struct dentry *dentry, const char *name,
const void *value, size_t size, int flags, int type)
{
return __gfs2_xattr_set(dentry->d_inode, name, value,
size, flags, type);
}
static int ea_acl_chmod_unstuffed(struct gfs2_inode *ip,
struct gfs2_ea_header *ea, char *data)
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
struct buffer_head **bh;
unsigned int amount = GFS2_EA_DATA_LEN(ea);
unsigned int nptrs = DIV_ROUND_UP(amount, sdp->sd_jbsize);
__be64 *dataptrs = GFS2_EA2DATAPTRS(ea);
unsigned int x;
int error;
bh = kcalloc(nptrs, sizeof(struct buffer_head *), GFP_NOFS);
if (!bh)
return -ENOMEM;
error = gfs2_trans_begin(sdp, nptrs + RES_DINODE, 0);
if (error)
goto out;
for (x = 0; x < nptrs; x++) {
error = gfs2_meta_read(ip->i_gl, be64_to_cpu(*dataptrs), 0,
bh + x);
if (error) {
while (x--)
brelse(bh[x]);
goto fail;
}
dataptrs++;
}
for (x = 0; x < nptrs; x++) {
error = gfs2_meta_wait(sdp, bh[x]);
if (error) {
for (; x < nptrs; x++)
brelse(bh[x]);
goto fail;
}
if (gfs2_metatype_check(sdp, bh[x], GFS2_METATYPE_ED)) {
for (; x < nptrs; x++)
brelse(bh[x]);
error = -EIO;
goto fail;
}
gfs2_trans_add_bh(ip->i_gl, bh[x], 1);
memcpy(bh[x]->b_data + sizeof(struct gfs2_meta_header), data,
(sdp->sd_jbsize > amount) ? amount : sdp->sd_jbsize);
amount -= sdp->sd_jbsize;
data += sdp->sd_jbsize;
brelse(bh[x]);
}
out:
kfree(bh);
return error;
fail:
gfs2_trans_end(sdp);
kfree(bh);
return error;
}
int gfs2_xattr_acl_chmod(struct gfs2_inode *ip, struct iattr *attr, char *data)
{
struct inode *inode = &ip->i_inode;
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
struct gfs2_ea_location el;
struct buffer_head *dibh;
int error;
error = gfs2_ea_find(ip, GFS2_EATYPE_SYS, GFS2_POSIX_ACL_ACCESS, &el);
if (error)
return error;
if (GFS2_EA_IS_STUFFED(el.el_ea)) {
error = gfs2_trans_begin(sdp, RES_DINODE + RES_EATTR, 0);
if (error == 0) {
gfs2_trans_add_bh(ip->i_gl, el.el_bh, 1);
memcpy(GFS2_EA2DATA(el.el_ea), data,
GFS2_EA_DATA_LEN(el.el_ea));
}
} else {
error = ea_acl_chmod_unstuffed(ip, el.el_ea, data);
}
brelse(el.el_bh);
if (error)
return error;
error = gfs2_meta_inode_buffer(ip, &dibh);
if (error)
goto out_trans_end;
if ((attr->ia_valid & ATTR_SIZE) &&
attr->ia_size != i_size_read(inode)) {
int error;
error = vmtruncate(inode, attr->ia_size);
gfs2_assert_warn(GFS2_SB(inode), !error);
}
setattr_copy(inode, attr);
mark_inode_dirty(inode);
gfs2_trans_add_bh(ip->i_gl, dibh, 1);
gfs2_dinode_out(ip, dibh->b_data);
brelse(dibh);
out_trans_end:
gfs2_trans_end(sdp);
return error;
}
static int ea_dealloc_indirect(struct gfs2_inode *ip)
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
struct gfs2_rgrp_list rlist;
struct buffer_head *indbh, *dibh;
__be64 *eablk, *end;
unsigned int rg_blocks = 0;
u64 bstart = 0;
unsigned int blen = 0;
unsigned int blks = 0;
unsigned int x;
int error;
memset(&rlist, 0, sizeof(struct gfs2_rgrp_list));
error = gfs2_meta_read(ip->i_gl, ip->i_eattr, DIO_WAIT, &indbh);
if (error)
return error;
if (gfs2_metatype_check(sdp, indbh, GFS2_METATYPE_IN)) {
error = -EIO;
goto out;
}
eablk = (__be64 *)(indbh->b_data + sizeof(struct gfs2_meta_header));
end = eablk + sdp->sd_inptrs;
for (; eablk < end; eablk++) {
u64 bn;
if (!*eablk)
break;
bn = be64_to_cpu(*eablk);
if (bstart + blen == bn)
blen++;
else {
if (bstart)
gfs2_rlist_add(sdp, &rlist, bstart);
bstart = bn;
blen = 1;
}
blks++;
}
if (bstart)
gfs2_rlist_add(sdp, &rlist, bstart);
else
goto out;
gfs2_rlist_alloc(&rlist, LM_ST_EXCLUSIVE);
for (x = 0; x < rlist.rl_rgrps; x++) {
struct gfs2_rgrpd *rgd;
rgd = rlist.rl_ghs[x].gh_gl->gl_object;
rg_blocks += rgd->rd_length;
}
error = gfs2_glock_nq_m(rlist.rl_rgrps, rlist.rl_ghs);
if (error)
goto out_rlist_free;
error = gfs2_trans_begin(sdp, rg_blocks + RES_DINODE + RES_INDIRECT +
RES_STATFS + RES_QUOTA, blks);
if (error)
goto out_gunlock;
gfs2_trans_add_bh(ip->i_gl, indbh, 1);
eablk = (__be64 *)(indbh->b_data + sizeof(struct gfs2_meta_header));
bstart = 0;
blen = 0;
for (; eablk < end; eablk++) {
u64 bn;
if (!*eablk)
break;
bn = be64_to_cpu(*eablk);
if (bstart + blen == bn)
blen++;
else {
if (bstart)
gfs2_free_meta(ip, bstart, blen);
bstart = bn;
blen = 1;
}
*eablk = 0;
gfs2_add_inode_blocks(&ip->i_inode, -1);
}
if (bstart)
gfs2_free_meta(ip, bstart, blen);
ip->i_diskflags &= ~GFS2_DIF_EA_INDIRECT;
error = gfs2_meta_inode_buffer(ip, &dibh);
if (!error) {
gfs2_trans_add_bh(ip->i_gl, dibh, 1);
gfs2_dinode_out(ip, dibh->b_data);
brelse(dibh);
}
gfs2_trans_end(sdp);
out_gunlock:
gfs2_glock_dq_m(rlist.rl_rgrps, rlist.rl_ghs);
out_rlist_free:
gfs2_rlist_free(&rlist);
out:
brelse(indbh);
return error;
}
static int ea_dealloc_block(struct gfs2_inode *ip)
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
struct gfs2_alloc *al = ip->i_alloc;
struct gfs2_rgrpd *rgd;
struct buffer_head *dibh;
int error;
rgd = gfs2_blk2rgrpd(sdp, ip->i_eattr);
if (!rgd) {
gfs2_consist_inode(ip);
return -EIO;
}
error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0,
&al->al_rgd_gh);
if (error)
return error;
error = gfs2_trans_begin(sdp, RES_RG_BIT + RES_DINODE + RES_STATFS +
RES_QUOTA, 1);
if (error)
goto out_gunlock;
gfs2_free_meta(ip, ip->i_eattr, 1);
ip->i_eattr = 0;
gfs2_add_inode_blocks(&ip->i_inode, -1);
error = gfs2_meta_inode_buffer(ip, &dibh);
if (!error) {
gfs2_trans_add_bh(ip->i_gl, dibh, 1);
gfs2_dinode_out(ip, dibh->b_data);
brelse(dibh);
}
gfs2_trans_end(sdp);
out_gunlock:
gfs2_glock_dq_uninit(&al->al_rgd_gh);
return error;
}
/**
* gfs2_ea_dealloc - deallocate the extended attribute fork
* @ip: the inode
*
* Returns: errno
*/
int gfs2_ea_dealloc(struct gfs2_inode *ip)
{
struct gfs2_alloc *al;
int error;
al = gfs2_alloc_get(ip);
if (!al)
return -ENOMEM;
error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
if (error)
goto out_alloc;
error = gfs2_rindex_hold(GFS2_SB(&ip->i_inode), &al->al_ri_gh);
if (error)
goto out_quota;
error = ea_foreach(ip, ea_dealloc_unstuffed, NULL);
if (error)
goto out_rindex;
if (ip->i_diskflags & GFS2_DIF_EA_INDIRECT) {
error = ea_dealloc_indirect(ip);
if (error)
goto out_rindex;
}
error = ea_dealloc_block(ip);
out_rindex:
gfs2_glock_dq_uninit(&al->al_ri_gh);
out_quota:
gfs2_quota_unhold(ip);
out_alloc:
gfs2_alloc_put(ip);
return error;
}
static const struct xattr_handler gfs2_xattr_user_handler = {
.prefix = XATTR_USER_PREFIX,
.flags = GFS2_EATYPE_USR,
.get = gfs2_xattr_get,
.set = gfs2_xattr_set,
};
static const struct xattr_handler gfs2_xattr_security_handler = {
.prefix = XATTR_SECURITY_PREFIX,
.flags = GFS2_EATYPE_SECURITY,
.get = gfs2_xattr_get,
.set = gfs2_xattr_set,
};
const struct xattr_handler *gfs2_xattr_handlers[] = {
&gfs2_xattr_user_handler,
&gfs2_xattr_security_handler,
&gfs2_xattr_system_handler,
NULL,
};
| gpl-2.0 |
sigma-random/asuswrt-merlin | release/src-rt-7.x.main/src/linux/linux-2.6.36/drivers/net/e1000/e1000_param.c | 41 | 22094 | /*******************************************************************************
Intel PRO/1000 Linux driver
Copyright(c) 1999 - 2006 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
version 2, as published by the Free Software Foundation.
This program is distributed in the hope it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information:
Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
#include "e1000.h"
/* This is the only thing that needs to be changed to adjust the
* maximum number of ports that the driver can manage.
*/
#define E1000_MAX_NIC 32
#define OPTION_UNSET -1
#define OPTION_DISABLED 0
#define OPTION_ENABLED 1
/* All parameters are treated the same, as an integer array of values.
* This macro just reduces the need to repeat the same declaration code
* over and over (plus this helps to avoid typo bugs).
*/
#define E1000_PARAM_INIT { [0 ... E1000_MAX_NIC] = OPTION_UNSET }
#define E1000_PARAM(X, desc) \
static int __devinitdata X[E1000_MAX_NIC+1] = E1000_PARAM_INIT; \
static unsigned int num_##X; \
module_param_array_named(X, X, int, &num_##X, 0); \
MODULE_PARM_DESC(X, desc);
/* Transmit Descriptor Count
*
* Valid Range: 80-256 for 82542 and 82543 gigabit ethernet controllers
* Valid Range: 80-4096 for 82544 and newer
*
* Default Value: 256
*/
E1000_PARAM(TxDescriptors, "Number of transmit descriptors");
/* Receive Descriptor Count
*
* Valid Range: 80-256 for 82542 and 82543 gigabit ethernet controllers
* Valid Range: 80-4096 for 82544 and newer
*
* Default Value: 256
*/
E1000_PARAM(RxDescriptors, "Number of receive descriptors");
/* User Specified Speed Override
*
* Valid Range: 0, 10, 100, 1000
* - 0 - auto-negotiate at all supported speeds
* - 10 - only link at 10 Mbps
* - 100 - only link at 100 Mbps
* - 1000 - only link at 1000 Mbps
*
* Default Value: 0
*/
E1000_PARAM(Speed, "Speed setting");
/* User Specified Duplex Override
*
* Valid Range: 0-2
* - 0 - auto-negotiate for duplex
* - 1 - only link at half duplex
* - 2 - only link at full duplex
*
* Default Value: 0
*/
E1000_PARAM(Duplex, "Duplex setting");
/* Auto-negotiation Advertisement Override
*
* Valid Range: 0x01-0x0F, 0x20-0x2F (copper); 0x20 (fiber)
*
* The AutoNeg value is a bit mask describing which speed and duplex
* combinations should be advertised during auto-negotiation.
* The supported speed and duplex modes are listed below
*
* Bit 7 6 5 4 3 2 1 0
* Speed (Mbps) N/A N/A 1000 N/A 100 100 10 10
* Duplex Full Full Half Full Half
*
* Default Value: 0x2F (copper); 0x20 (fiber)
*/
E1000_PARAM(AutoNeg, "Advertised auto-negotiation setting");
#define AUTONEG_ADV_DEFAULT 0x2F
#define AUTONEG_ADV_MASK 0x2F
/* User Specified Flow Control Override
*
* Valid Range: 0-3
* - 0 - No Flow Control
* - 1 - Rx only, respond to PAUSE frames but do not generate them
* - 2 - Tx only, generate PAUSE frames but ignore them on receive
* - 3 - Full Flow Control Support
*
* Default Value: Read flow control settings from the EEPROM
*/
E1000_PARAM(FlowControl, "Flow Control setting");
#define FLOW_CONTROL_DEFAULT FLOW_CONTROL_FULL
/* XsumRX - Receive Checksum Offload Enable/Disable
*
* Valid Range: 0, 1
* - 0 - disables all checksum offload
* - 1 - enables receive IP/TCP/UDP checksum offload
* on 82543 and newer -based NICs
*
* Default Value: 1
*/
E1000_PARAM(XsumRX, "Disable or enable Receive Checksum offload");
/* Transmit Interrupt Delay in units of 1.024 microseconds
* Tx interrupt delay needs to typically be set to something non zero
*
* Valid Range: 0-65535
*/
E1000_PARAM(TxIntDelay, "Transmit Interrupt Delay");
#define DEFAULT_TIDV 8
#define MAX_TXDELAY 0xFFFF
#define MIN_TXDELAY 0
/* Transmit Absolute Interrupt Delay in units of 1.024 microseconds
*
* Valid Range: 0-65535
*/
E1000_PARAM(TxAbsIntDelay, "Transmit Absolute Interrupt Delay");
#define DEFAULT_TADV 32
#define MAX_TXABSDELAY 0xFFFF
#define MIN_TXABSDELAY 0
/* Receive Interrupt Delay in units of 1.024 microseconds
* hardware will likely hang if you set this to anything but zero.
*
* Valid Range: 0-65535
*/
E1000_PARAM(RxIntDelay, "Receive Interrupt Delay");
#define DEFAULT_RDTR 0
#define MAX_RXDELAY 0xFFFF
#define MIN_RXDELAY 0
/* Receive Absolute Interrupt Delay in units of 1.024 microseconds
*
* Valid Range: 0-65535
*/
E1000_PARAM(RxAbsIntDelay, "Receive Absolute Interrupt Delay");
#define DEFAULT_RADV 8
#define MAX_RXABSDELAY 0xFFFF
#define MIN_RXABSDELAY 0
/* Interrupt Throttle Rate (interrupts/sec)
*
* Valid Range: 100-100000 (0=off, 1=dynamic, 3=dynamic conservative)
*/
E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate");
#define DEFAULT_ITR 3
#define MAX_ITR 100000
#define MIN_ITR 100
/* Enable Smart Power Down of the PHY
*
* Valid Range: 0, 1
*
* Default Value: 0 (disabled)
*/
E1000_PARAM(SmartPowerDownEnable, "Enable PHY smart power down");
struct e1000_option {
enum { enable_option, range_option, list_option } type;
const char *name;
const char *err;
int def;
union {
struct { /* range_option info */
int min;
int max;
} r;
struct { /* list_option info */
int nr;
const struct e1000_opt_list { int i; char *str; } *p;
} l;
} arg;
};
static int __devinit e1000_validate_option(unsigned int *value,
const struct e1000_option *opt,
struct e1000_adapter *adapter)
{
if (*value == OPTION_UNSET) {
*value = opt->def;
return 0;
}
switch (opt->type) {
case enable_option:
switch (*value) {
case OPTION_ENABLED:
e_dev_info("%s Enabled\n", opt->name);
return 0;
case OPTION_DISABLED:
e_dev_info("%s Disabled\n", opt->name);
return 0;
}
break;
case range_option:
if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
e_dev_info("%s set to %i\n", opt->name, *value);
return 0;
}
break;
case list_option: {
int i;
const struct e1000_opt_list *ent;
for (i = 0; i < opt->arg.l.nr; i++) {
ent = &opt->arg.l.p[i];
if (*value == ent->i) {
if (ent->str[0] != '\0')
e_dev_info("%s\n", ent->str);
return 0;
}
}
}
break;
default:
BUG();
}
e_dev_info("Invalid %s value specified (%i) %s\n",
opt->name, *value, opt->err);
*value = opt->def;
return -1;
}
static void e1000_check_fiber_options(struct e1000_adapter *adapter);
static void e1000_check_copper_options(struct e1000_adapter *adapter);
/**
* e1000_check_options - Range Checking for Command Line Parameters
* @adapter: board private structure
*
* This routine checks all command line parameters for valid user
* input. If an invalid value is given, or if no user specified
* value exists, a default value is used. The final value is stored
* in a variable in the adapter structure.
**/
void __devinit e1000_check_options(struct e1000_adapter *adapter)
{
struct e1000_option opt;
int bd = adapter->bd_number;
if (bd >= E1000_MAX_NIC) {
e_dev_warn("Warning: no configuration for board #%i "
"using defaults for all values\n", bd);
}
{ /* Transmit Descriptor Count */
struct e1000_tx_ring *tx_ring = adapter->tx_ring;
int i;
e1000_mac_type mac_type = adapter->hw.mac_type;
opt = (struct e1000_option) {
.type = range_option,
.name = "Transmit Descriptors",
.err = "using default of "
__MODULE_STRING(E1000_DEFAULT_TXD),
.def = E1000_DEFAULT_TXD,
.arg = { .r = {
.min = E1000_MIN_TXD,
.max = mac_type < e1000_82544 ? E1000_MAX_TXD : E1000_MAX_82544_TXD
}}
};
if (num_TxDescriptors > bd) {
tx_ring->count = TxDescriptors[bd];
e1000_validate_option(&tx_ring->count, &opt, adapter);
tx_ring->count = ALIGN(tx_ring->count,
REQ_TX_DESCRIPTOR_MULTIPLE);
} else {
tx_ring->count = opt.def;
}
for (i = 0; i < adapter->num_tx_queues; i++)
tx_ring[i].count = tx_ring->count;
}
{ /* Receive Descriptor Count */
struct e1000_rx_ring *rx_ring = adapter->rx_ring;
int i;
e1000_mac_type mac_type = adapter->hw.mac_type;
opt = (struct e1000_option) {
.type = range_option,
.name = "Receive Descriptors",
.err = "using default of "
__MODULE_STRING(E1000_DEFAULT_RXD),
.def = E1000_DEFAULT_RXD,
.arg = { .r = {
.min = E1000_MIN_RXD,
.max = mac_type < e1000_82544 ? E1000_MAX_RXD : E1000_MAX_82544_RXD
}}
};
if (num_RxDescriptors > bd) {
rx_ring->count = RxDescriptors[bd];
e1000_validate_option(&rx_ring->count, &opt, adapter);
rx_ring->count = ALIGN(rx_ring->count,
REQ_RX_DESCRIPTOR_MULTIPLE);
} else {
rx_ring->count = opt.def;
}
for (i = 0; i < adapter->num_rx_queues; i++)
rx_ring[i].count = rx_ring->count;
}
{ /* Checksum Offload Enable/Disable */
opt = (struct e1000_option) {
.type = enable_option,
.name = "Checksum Offload",
.err = "defaulting to Enabled",
.def = OPTION_ENABLED
};
if (num_XsumRX > bd) {
unsigned int rx_csum = XsumRX[bd];
e1000_validate_option(&rx_csum, &opt, adapter);
adapter->rx_csum = rx_csum;
} else {
adapter->rx_csum = opt.def;
}
}
{ /* Flow Control */
struct e1000_opt_list fc_list[] =
{{ E1000_FC_NONE, "Flow Control Disabled" },
{ E1000_FC_RX_PAUSE,"Flow Control Receive Only" },
{ E1000_FC_TX_PAUSE,"Flow Control Transmit Only" },
{ E1000_FC_FULL, "Flow Control Enabled" },
{ E1000_FC_DEFAULT, "Flow Control Hardware Default" }};
opt = (struct e1000_option) {
.type = list_option,
.name = "Flow Control",
.err = "reading default settings from EEPROM",
.def = E1000_FC_DEFAULT,
.arg = { .l = { .nr = ARRAY_SIZE(fc_list),
.p = fc_list }}
};
if (num_FlowControl > bd) {
unsigned int fc = FlowControl[bd];
e1000_validate_option(&fc, &opt, adapter);
adapter->hw.fc = adapter->hw.original_fc = fc;
} else {
adapter->hw.fc = adapter->hw.original_fc = opt.def;
}
}
{ /* Transmit Interrupt Delay */
opt = (struct e1000_option) {
.type = range_option,
.name = "Transmit Interrupt Delay",
.err = "using default of " __MODULE_STRING(DEFAULT_TIDV),
.def = DEFAULT_TIDV,
.arg = { .r = { .min = MIN_TXDELAY,
.max = MAX_TXDELAY }}
};
if (num_TxIntDelay > bd) {
adapter->tx_int_delay = TxIntDelay[bd];
e1000_validate_option(&adapter->tx_int_delay, &opt,
adapter);
} else {
adapter->tx_int_delay = opt.def;
}
}
{ /* Transmit Absolute Interrupt Delay */
opt = (struct e1000_option) {
.type = range_option,
.name = "Transmit Absolute Interrupt Delay",
.err = "using default of " __MODULE_STRING(DEFAULT_TADV),
.def = DEFAULT_TADV,
.arg = { .r = { .min = MIN_TXABSDELAY,
.max = MAX_TXABSDELAY }}
};
if (num_TxAbsIntDelay > bd) {
adapter->tx_abs_int_delay = TxAbsIntDelay[bd];
e1000_validate_option(&adapter->tx_abs_int_delay, &opt,
adapter);
} else {
adapter->tx_abs_int_delay = opt.def;
}
}
{ /* Receive Interrupt Delay */
opt = (struct e1000_option) {
.type = range_option,
.name = "Receive Interrupt Delay",
.err = "using default of " __MODULE_STRING(DEFAULT_RDTR),
.def = DEFAULT_RDTR,
.arg = { .r = { .min = MIN_RXDELAY,
.max = MAX_RXDELAY }}
};
if (num_RxIntDelay > bd) {
adapter->rx_int_delay = RxIntDelay[bd];
e1000_validate_option(&adapter->rx_int_delay, &opt,
adapter);
} else {
adapter->rx_int_delay = opt.def;
}
}
{ /* Receive Absolute Interrupt Delay */
opt = (struct e1000_option) {
.type = range_option,
.name = "Receive Absolute Interrupt Delay",
.err = "using default of " __MODULE_STRING(DEFAULT_RADV),
.def = DEFAULT_RADV,
.arg = { .r = { .min = MIN_RXABSDELAY,
.max = MAX_RXABSDELAY }}
};
if (num_RxAbsIntDelay > bd) {
adapter->rx_abs_int_delay = RxAbsIntDelay[bd];
e1000_validate_option(&adapter->rx_abs_int_delay, &opt,
adapter);
} else {
adapter->rx_abs_int_delay = opt.def;
}
}
{ /* Interrupt Throttling Rate */
opt = (struct e1000_option) {
.type = range_option,
.name = "Interrupt Throttling Rate (ints/sec)",
.err = "using default of " __MODULE_STRING(DEFAULT_ITR),
.def = DEFAULT_ITR,
.arg = { .r = { .min = MIN_ITR,
.max = MAX_ITR }}
};
if (num_InterruptThrottleRate > bd) {
adapter->itr = InterruptThrottleRate[bd];
switch (adapter->itr) {
case 0:
e_dev_info("%s turned off\n", opt.name);
break;
case 1:
e_dev_info("%s set to dynamic mode\n",
opt.name);
adapter->itr_setting = adapter->itr;
adapter->itr = 20000;
break;
case 3:
e_dev_info("%s set to dynamic conservative "
"mode\n", opt.name);
adapter->itr_setting = adapter->itr;
adapter->itr = 20000;
break;
case 4:
e_dev_info("%s set to simplified "
"(2000-8000) ints mode\n", opt.name);
adapter->itr_setting = adapter->itr;
break;
default:
e1000_validate_option(&adapter->itr, &opt,
adapter);
/* save the setting, because the dynamic bits
* change itr.
* clear the lower two bits because they are
* used as control */
adapter->itr_setting = adapter->itr & ~3;
break;
}
} else {
adapter->itr_setting = opt.def;
adapter->itr = 20000;
}
}
{ /* Smart Power Down */
opt = (struct e1000_option) {
.type = enable_option,
.name = "PHY Smart Power Down",
.err = "defaulting to Disabled",
.def = OPTION_DISABLED
};
if (num_SmartPowerDownEnable > bd) {
unsigned int spd = SmartPowerDownEnable[bd];
e1000_validate_option(&spd, &opt, adapter);
adapter->smart_power_down = spd;
} else {
adapter->smart_power_down = opt.def;
}
}
switch (adapter->hw.media_type) {
case e1000_media_type_fiber:
case e1000_media_type_internal_serdes:
e1000_check_fiber_options(adapter);
break;
case e1000_media_type_copper:
e1000_check_copper_options(adapter);
break;
default:
BUG();
}
}
/**
* e1000_check_fiber_options - Range Checking for Link Options, Fiber Version
* @adapter: board private structure
*
* Handles speed and duplex options on fiber adapters
**/
static void __devinit e1000_check_fiber_options(struct e1000_adapter *adapter)
{
int bd = adapter->bd_number;
if (num_Speed > bd) {
e_dev_info("Speed not valid for fiber adapters, parameter "
"ignored\n");
}
if (num_Duplex > bd) {
e_dev_info("Duplex not valid for fiber adapters, parameter "
"ignored\n");
}
if ((num_AutoNeg > bd) && (AutoNeg[bd] != 0x20)) {
e_dev_info("AutoNeg other than 1000/Full is not valid for fiber"
"adapters, parameter ignored\n");
}
}
/**
* e1000_check_copper_options - Range Checking for Link Options, Copper Version
* @adapter: board private structure
*
* Handles speed and duplex options on copper adapters
**/
static void __devinit e1000_check_copper_options(struct e1000_adapter *adapter)
{
struct e1000_option opt;
unsigned int speed, dplx, an;
int bd = adapter->bd_number;
{ /* Speed */
static const struct e1000_opt_list speed_list[] = {
{ 0, "" },
{ SPEED_10, "" },
{ SPEED_100, "" },
{ SPEED_1000, "" }};
opt = (struct e1000_option) {
.type = list_option,
.name = "Speed",
.err = "parameter ignored",
.def = 0,
.arg = { .l = { .nr = ARRAY_SIZE(speed_list),
.p = speed_list }}
};
if (num_Speed > bd) {
speed = Speed[bd];
e1000_validate_option(&speed, &opt, adapter);
} else {
speed = opt.def;
}
}
{ /* Duplex */
static const struct e1000_opt_list dplx_list[] = {
{ 0, "" },
{ HALF_DUPLEX, "" },
{ FULL_DUPLEX, "" }};
opt = (struct e1000_option) {
.type = list_option,
.name = "Duplex",
.err = "parameter ignored",
.def = 0,
.arg = { .l = { .nr = ARRAY_SIZE(dplx_list),
.p = dplx_list }}
};
if (num_Duplex > bd) {
dplx = Duplex[bd];
e1000_validate_option(&dplx, &opt, adapter);
} else {
dplx = opt.def;
}
}
if ((num_AutoNeg > bd) && (speed != 0 || dplx != 0)) {
e_dev_info("AutoNeg specified along with Speed or Duplex, "
"parameter ignored\n");
adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT;
} else { /* Autoneg */
static const struct e1000_opt_list an_list[] =
#define AA "AutoNeg advertising "
{{ 0x01, AA "10/HD" },
{ 0x02, AA "10/FD" },
{ 0x03, AA "10/FD, 10/HD" },
{ 0x04, AA "100/HD" },
{ 0x05, AA "100/HD, 10/HD" },
{ 0x06, AA "100/HD, 10/FD" },
{ 0x07, AA "100/HD, 10/FD, 10/HD" },
{ 0x08, AA "100/FD" },
{ 0x09, AA "100/FD, 10/HD" },
{ 0x0a, AA "100/FD, 10/FD" },
{ 0x0b, AA "100/FD, 10/FD, 10/HD" },
{ 0x0c, AA "100/FD, 100/HD" },
{ 0x0d, AA "100/FD, 100/HD, 10/HD" },
{ 0x0e, AA "100/FD, 100/HD, 10/FD" },
{ 0x0f, AA "100/FD, 100/HD, 10/FD, 10/HD" },
{ 0x20, AA "1000/FD" },
{ 0x21, AA "1000/FD, 10/HD" },
{ 0x22, AA "1000/FD, 10/FD" },
{ 0x23, AA "1000/FD, 10/FD, 10/HD" },
{ 0x24, AA "1000/FD, 100/HD" },
{ 0x25, AA "1000/FD, 100/HD, 10/HD" },
{ 0x26, AA "1000/FD, 100/HD, 10/FD" },
{ 0x27, AA "1000/FD, 100/HD, 10/FD, 10/HD" },
{ 0x28, AA "1000/FD, 100/FD" },
{ 0x29, AA "1000/FD, 100/FD, 10/HD" },
{ 0x2a, AA "1000/FD, 100/FD, 10/FD" },
{ 0x2b, AA "1000/FD, 100/FD, 10/FD, 10/HD" },
{ 0x2c, AA "1000/FD, 100/FD, 100/HD" },
{ 0x2d, AA "1000/FD, 100/FD, 100/HD, 10/HD" },
{ 0x2e, AA "1000/FD, 100/FD, 100/HD, 10/FD" },
{ 0x2f, AA "1000/FD, 100/FD, 100/HD, 10/FD, 10/HD" }};
opt = (struct e1000_option) {
.type = list_option,
.name = "AutoNeg",
.err = "parameter ignored",
.def = AUTONEG_ADV_DEFAULT,
.arg = { .l = { .nr = ARRAY_SIZE(an_list),
.p = an_list }}
};
if (num_AutoNeg > bd) {
an = AutoNeg[bd];
e1000_validate_option(&an, &opt, adapter);
} else {
an = opt.def;
}
adapter->hw.autoneg_advertised = an;
}
switch (speed + dplx) {
case 0:
adapter->hw.autoneg = adapter->fc_autoneg = 1;
if ((num_Speed > bd) && (speed != 0 || dplx != 0))
e_dev_info("Speed and duplex autonegotiation "
"enabled\n");
break;
case HALF_DUPLEX:
e_dev_info("Half Duplex specified without Speed\n");
e_dev_info("Using Autonegotiation at Half Duplex only\n");
adapter->hw.autoneg = adapter->fc_autoneg = 1;
adapter->hw.autoneg_advertised = ADVERTISE_10_HALF |
ADVERTISE_100_HALF;
break;
case FULL_DUPLEX:
e_dev_info("Full Duplex specified without Speed\n");
e_dev_info("Using Autonegotiation at Full Duplex only\n");
adapter->hw.autoneg = adapter->fc_autoneg = 1;
adapter->hw.autoneg_advertised = ADVERTISE_10_FULL |
ADVERTISE_100_FULL |
ADVERTISE_1000_FULL;
break;
case SPEED_10:
e_dev_info("10 Mbps Speed specified without Duplex\n");
e_dev_info("Using Autonegotiation at 10 Mbps only\n");
adapter->hw.autoneg = adapter->fc_autoneg = 1;
adapter->hw.autoneg_advertised = ADVERTISE_10_HALF |
ADVERTISE_10_FULL;
break;
case SPEED_10 + HALF_DUPLEX:
e_dev_info("Forcing to 10 Mbps Half Duplex\n");
adapter->hw.autoneg = adapter->fc_autoneg = 0;
adapter->hw.forced_speed_duplex = e1000_10_half;
adapter->hw.autoneg_advertised = 0;
break;
case SPEED_10 + FULL_DUPLEX:
e_dev_info("Forcing to 10 Mbps Full Duplex\n");
adapter->hw.autoneg = adapter->fc_autoneg = 0;
adapter->hw.forced_speed_duplex = e1000_10_full;
adapter->hw.autoneg_advertised = 0;
break;
case SPEED_100:
e_dev_info("100 Mbps Speed specified without Duplex\n");
e_dev_info("Using Autonegotiation at 100 Mbps only\n");
adapter->hw.autoneg = adapter->fc_autoneg = 1;
adapter->hw.autoneg_advertised = ADVERTISE_100_HALF |
ADVERTISE_100_FULL;
break;
case SPEED_100 + HALF_DUPLEX:
e_dev_info("Forcing to 100 Mbps Half Duplex\n");
adapter->hw.autoneg = adapter->fc_autoneg = 0;
adapter->hw.forced_speed_duplex = e1000_100_half;
adapter->hw.autoneg_advertised = 0;
break;
case SPEED_100 + FULL_DUPLEX:
e_dev_info("Forcing to 100 Mbps Full Duplex\n");
adapter->hw.autoneg = adapter->fc_autoneg = 0;
adapter->hw.forced_speed_duplex = e1000_100_full;
adapter->hw.autoneg_advertised = 0;
break;
case SPEED_1000:
e_dev_info("1000 Mbps Speed specified without Duplex\n");
goto full_duplex_only;
case SPEED_1000 + HALF_DUPLEX:
e_dev_info("Half Duplex is not supported at 1000 Mbps\n");
/* fall through */
case SPEED_1000 + FULL_DUPLEX:
full_duplex_only:
e_dev_info("Using Autonegotiation at 1000 Mbps Full Duplex "
"only\n");
adapter->hw.autoneg = adapter->fc_autoneg = 1;
adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL;
break;
default:
BUG();
}
/* Speed, AutoNeg and MDI/MDI-X must all play nice */
if (e1000_validate_mdi_setting(&(adapter->hw)) < 0) {
e_dev_info("Speed, AutoNeg and MDI-X specs are incompatible. "
"Setting MDI-X to a compatible value.\n");
}
}
| gpl-2.0 |
zhang-xin/kdi | arch/mips/pci/pci-ip27.c | 41 | 5838 | /*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2003 Christoph Hellwig (hch@lst.de)
* Copyright (C) 1999, 2000, 04 Ralf Baechle (ralf@linux-mips.org)
* Copyright (C) 1999, 2000 Silicon Graphics, Inc.
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <asm/sn/arch.h>
#include <asm/pci/bridge.h>
#include <asm/paccess.h>
#include <asm/sn/intr.h>
#include <asm/sn/sn0/hub.h>
/*
* Max #PCI busses we can handle; ie, max #PCI bridges.
*/
#define MAX_PCI_BUSSES 40
/*
* Max #PCI devices (like scsi controllers) we handle on a bus.
*/
#define MAX_DEVICES_PER_PCIBUS 8
/*
* XXX: No kmalloc available when we do our crosstalk scan,
* we should try to move it later in the boot process.
*/
static struct bridge_controller bridges[MAX_PCI_BUSSES];
/*
* Translate from irq to software PCI bus number and PCI slot.
*/
struct bridge_controller *irq_to_bridge[MAX_PCI_BUSSES * MAX_DEVICES_PER_PCIBUS];
int irq_to_slot[MAX_PCI_BUSSES * MAX_DEVICES_PER_PCIBUS];
extern struct pci_ops bridge_pci_ops;
int __cpuinit bridge_probe(nasid_t nasid, int widget_id, int masterwid)
{
unsigned long offset = NODE_OFFSET(nasid);
struct bridge_controller *bc;
static int num_bridges = 0;
bridge_t *bridge;
int slot;
pci_probe_only = 1;
printk("a bridge\n");
/* XXX: kludge alert.. */
if (!num_bridges)
ioport_resource.end = ~0UL;
bc = &bridges[num_bridges];
bc->pc.pci_ops = &bridge_pci_ops;
bc->pc.mem_resource = &bc->mem;
bc->pc.io_resource = &bc->io;
bc->pc.index = num_bridges;
bc->mem.name = "Bridge PCI MEM";
bc->pc.mem_offset = offset;
bc->mem.start = 0;
bc->mem.end = ~0UL;
bc->mem.flags = IORESOURCE_MEM;
bc->io.name = "Bridge IO MEM";
bc->pc.io_offset = offset;
bc->io.start = 0UL;
bc->io.end = ~0UL;
bc->io.flags = IORESOURCE_IO;
bc->irq_cpu = smp_processor_id();
bc->widget_id = widget_id;
bc->nasid = nasid;
bc->baddr = (u64)masterwid << 60 | PCI64_ATTR_BAR;
/*
* point to this bridge
*/
bridge = (bridge_t *) RAW_NODE_SWIN_BASE(nasid, widget_id);
/*
* Clear all pending interrupts.
*/
bridge->b_int_rst_stat = BRIDGE_IRR_ALL_CLR;
/*
* Until otherwise set up, assume all interrupts are from slot 0
*/
bridge->b_int_device = 0x0;
/*
* swap pio's to pci mem and io space (big windows)
*/
bridge->b_wid_control |= BRIDGE_CTRL_IO_SWAP |
BRIDGE_CTRL_MEM_SWAP;
#ifdef CONFIG_PAGE_SIZE_4KB
bridge->b_wid_control &= ~BRIDGE_CTRL_PAGE_SIZE;
#else /* 16kB or larger */
bridge->b_wid_control |= BRIDGE_CTRL_PAGE_SIZE;
#endif
/*
* Hmm... IRIX sets additional bits in the address which
* are documented as reserved in the bridge docs.
*/
bridge->b_wid_int_upper = 0x8000 | (masterwid << 16);
bridge->b_wid_int_lower = 0x01800090; /* PI_INT_PEND_MOD off*/
bridge->b_dir_map = (masterwid << 20); /* DMA */
bridge->b_int_enable = 0;
for (slot = 0; slot < 8; slot ++) {
bridge->b_device[slot].reg |= BRIDGE_DEV_SWAP_DIR;
bc->pci_int[slot] = -1;
}
bridge->b_wid_tflush; /* wait until Bridge PIO complete */
bc->base = bridge;
register_pci_controller(&bc->pc);
num_bridges++;
return 0;
}
/*
* All observed requests have pin == 1. We could have a global here, that
* gets incremented and returned every time - unfortunately, pci_map_irq
* may be called on the same device over and over, and need to return the
* same value. On O2000, pin can be 0 or 1, and PCI slots can be [0..7].
*
* A given PCI device, in general, should be able to intr any of the cpus
* on any one of the hubs connected to its xbow.
*/
int __devinit pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
return 0;
}
/* Most MIPS systems have straight-forward swizzling needs. */
static inline u8 bridge_swizzle(u8 pin, u8 slot)
{
return (((pin - 1) + slot) % 4) + 1;
}
static inline struct pci_dev *bridge_root_dev(struct pci_dev *dev)
{
while (dev->bus->parent) {
/* Move up the chain of bridges. */
dev = dev->bus->self;
}
return dev;
}
/* Do platform specific device initialization at pci_enable_device() time */
int pcibios_plat_dev_init(struct pci_dev *dev)
{
struct bridge_controller *bc = BRIDGE_CONTROLLER(dev->bus);
struct pci_dev *rdev = bridge_root_dev(dev);
int slot = PCI_SLOT(rdev->devfn);
int irq;
irq = bc->pci_int[slot];
if (irq == -1) {
irq = request_bridge_irq(bc);
if (irq < 0)
return irq;
bc->pci_int[slot] = irq;
}
irq_to_bridge[irq] = bc;
irq_to_slot[irq] = slot;
dev->irq = irq;
return 0;
}
/*
* Device might live on a subordinate PCI bus. XXX Walk up the chain of buses
* to find the slot number in sense of the bridge device register.
* XXX This also means multiple devices might rely on conflicting bridge
* settings.
*/
static inline void pci_disable_swapping(struct pci_dev *dev)
{
struct bridge_controller *bc = BRIDGE_CONTROLLER(dev->bus);
bridge_t *bridge = bc->base;
int slot = PCI_SLOT(dev->devfn);
/* Turn off byte swapping */
bridge->b_device[slot].reg &= ~BRIDGE_DEV_SWAP_DIR;
bridge->b_widget.w_tflush; /* Flush */
}
static inline void pci_enable_swapping(struct pci_dev *dev)
{
struct bridge_controller *bc = BRIDGE_CONTROLLER(dev->bus);
bridge_t *bridge = bc->base;
int slot = PCI_SLOT(dev->devfn);
/* Turn on byte swapping */
bridge->b_device[slot].reg |= BRIDGE_DEV_SWAP_DIR;
bridge->b_widget.w_tflush; /* Flush */
}
static void __init pci_fixup_ioc3(struct pci_dev *d)
{
pci_disable_swapping(d);
}
int pcibus_to_node(struct pci_bus *bus)
{
struct bridge_controller *bc = BRIDGE_CONTROLLER(bus);
return bc->nasid;
}
EXPORT_SYMBOL(pcibus_to_node);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SGI, PCI_DEVICE_ID_SGI_IOC3,
pci_fixup_ioc3);
| gpl-2.0 |
lazy404/kernel | arch/powerpc/kernel/hw_breakpoint.c | 297 | 9559 | /*
* HW_breakpoint: a unified kernel/user-space hardware breakpoint facility,
* using the CPU's debug registers. Derived from
* "arch/x86/kernel/hw_breakpoint.c"
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
* Copyright 2010 IBM Corporation
* Author: K.Prasad <prasad@linux.vnet.ibm.com>
*
*/
#include <linux/hw_breakpoint.h>
#include <linux/notifier.h>
#include <linux/kprobes.h>
#include <linux/percpu.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/smp.h>
#include <asm/hw_breakpoint.h>
#include <asm/processor.h>
#include <asm/sstep.h>
#include <asm/uaccess.h>
/*
* Stores the breakpoints currently in use on each breakpoint address
* register for every cpu
*/
static DEFINE_PER_CPU(struct perf_event *, bp_per_reg);
/*
* Returns total number of data or instruction breakpoints available.
*/
int hw_breakpoint_slots(int type)
{
if (type == TYPE_DATA)
return HBP_NUM;
return 0; /* no instruction breakpoints available */
}
/*
* Install a perf counter breakpoint.
*
* We seek a free debug address register and use it for this
* breakpoint.
*
* Atomic: we hold the counter->ctx->lock and we only handle variables
* and registers local to this cpu.
*/
int arch_install_hw_breakpoint(struct perf_event *bp)
{
struct arch_hw_breakpoint *info = counter_arch_bp(bp);
struct perf_event **slot = &__get_cpu_var(bp_per_reg);
*slot = bp;
/*
* Do not install DABR values if the instruction must be single-stepped.
* If so, DABR will be populated in single_step_dabr_instruction().
*/
if (current->thread.last_hit_ubp != bp)
set_breakpoint(info);
return 0;
}
/*
* Uninstall the breakpoint contained in the given counter.
*
* First we search the debug address register it uses and then we disable
* it.
*
* Atomic: we hold the counter->ctx->lock and we only handle variables
* and registers local to this cpu.
*/
void arch_uninstall_hw_breakpoint(struct perf_event *bp)
{
struct perf_event **slot = &__get_cpu_var(bp_per_reg);
if (*slot != bp) {
WARN_ONCE(1, "Can't find the breakpoint");
return;
}
*slot = NULL;
hw_breakpoint_disable();
}
/*
* Perform cleanup of arch-specific counters during unregistration
* of the perf-event
*/
void arch_unregister_hw_breakpoint(struct perf_event *bp)
{
/*
* If the breakpoint is unregistered between a hw_breakpoint_handler()
* and the single_step_dabr_instruction(), then cleanup the breakpoint
* restoration variables to prevent dangling pointers.
*/
if (bp->ctx && bp->ctx->task)
bp->ctx->task->thread.last_hit_ubp = NULL;
}
/*
* Check for virtual address in kernel space.
*/
int arch_check_bp_in_kernelspace(struct perf_event *bp)
{
struct arch_hw_breakpoint *info = counter_arch_bp(bp);
return is_kernel_addr(info->address);
}
int arch_bp_generic_fields(int type, int *gen_bp_type)
{
*gen_bp_type = 0;
if (type & HW_BRK_TYPE_READ)
*gen_bp_type |= HW_BREAKPOINT_R;
if (type & HW_BRK_TYPE_WRITE)
*gen_bp_type |= HW_BREAKPOINT_W;
if (*gen_bp_type == 0)
return -EINVAL;
return 0;
}
/*
* Validate the arch-specific HW Breakpoint register settings
*/
int arch_validate_hwbkpt_settings(struct perf_event *bp)
{
int ret = -EINVAL, length_max;
struct arch_hw_breakpoint *info = counter_arch_bp(bp);
if (!bp)
return ret;
info->type = HW_BRK_TYPE_TRANSLATE;
if (bp->attr.bp_type & HW_BREAKPOINT_R)
info->type |= HW_BRK_TYPE_READ;
if (bp->attr.bp_type & HW_BREAKPOINT_W)
info->type |= HW_BRK_TYPE_WRITE;
if (info->type == HW_BRK_TYPE_TRANSLATE)
/* must set alteast read or write */
return ret;
if (!(bp->attr.exclude_user))
info->type |= HW_BRK_TYPE_USER;
if (!(bp->attr.exclude_kernel))
info->type |= HW_BRK_TYPE_KERNEL;
if (!(bp->attr.exclude_hv))
info->type |= HW_BRK_TYPE_HYP;
info->address = bp->attr.bp_addr;
info->len = bp->attr.bp_len;
/*
* Since breakpoint length can be a maximum of HW_BREAKPOINT_LEN(8)
* and breakpoint addresses are aligned to nearest double-word
* HW_BREAKPOINT_ALIGN by rounding off to the lower address, the
* 'symbolsize' should satisfy the check below.
*/
length_max = 8; /* DABR */
if (cpu_has_feature(CPU_FTR_DAWR)) {
length_max = 512 ; /* 64 doublewords */
/* DAWR region can't cross 512 boundary */
if ((bp->attr.bp_addr >> 10) !=
((bp->attr.bp_addr + bp->attr.bp_len - 1) >> 10))
return -EINVAL;
}
if (info->len >
(length_max - (info->address & HW_BREAKPOINT_ALIGN)))
return -EINVAL;
return 0;
}
/*
* Restores the breakpoint on the debug registers.
* Invoke this function if it is known that the execution context is
* about to change to cause loss of MSR_SE settings.
*/
void thread_change_pc(struct task_struct *tsk, struct pt_regs *regs)
{
struct arch_hw_breakpoint *info;
if (likely(!tsk->thread.last_hit_ubp))
return;
info = counter_arch_bp(tsk->thread.last_hit_ubp);
regs->msr &= ~MSR_SE;
set_breakpoint(info);
tsk->thread.last_hit_ubp = NULL;
}
/*
* Handle debug exception notifications.
*/
int __kprobes hw_breakpoint_handler(struct die_args *args)
{
int rc = NOTIFY_STOP;
struct perf_event *bp;
struct pt_regs *regs = args->regs;
int stepped = 1;
struct arch_hw_breakpoint *info;
unsigned int instr;
unsigned long dar = regs->dar;
/* Disable breakpoints during exception handling */
hw_breakpoint_disable();
/*
* The counter may be concurrently released but that can only
* occur from a call_rcu() path. We can then safely fetch
* the breakpoint, use its callback, touch its counter
* while we are in an rcu_read_lock() path.
*/
rcu_read_lock();
bp = __get_cpu_var(bp_per_reg);
if (!bp)
goto out;
info = counter_arch_bp(bp);
/*
* Return early after invoking user-callback function without restoring
* DABR if the breakpoint is from ptrace which always operates in
* one-shot mode. The ptrace-ed process will receive the SIGTRAP signal
* generated in do_dabr().
*/
if (bp->overflow_handler == ptrace_triggered) {
perf_bp_event(bp, regs);
rc = NOTIFY_DONE;
goto out;
}
/*
* Verify if dar lies within the address range occupied by the symbol
* being watched to filter extraneous exceptions. If it doesn't,
* we still need to single-step the instruction, but we don't
* generate an event.
*/
info->type &= ~HW_BRK_TYPE_EXTRANEOUS_IRQ;
if (!((bp->attr.bp_addr <= dar) &&
(dar - bp->attr.bp_addr < bp->attr.bp_len)))
info->type |= HW_BRK_TYPE_EXTRANEOUS_IRQ;
/* Do not emulate user-space instructions, instead single-step them */
if (user_mode(regs)) {
current->thread.last_hit_ubp = bp;
regs->msr |= MSR_SE;
goto out;
}
stepped = 0;
instr = 0;
if (!__get_user_inatomic(instr, (unsigned int *) regs->nip))
stepped = emulate_step(regs, instr);
/*
* emulate_step() could not execute it. We've failed in reliably
* handling the hw-breakpoint. Unregister it and throw a warning
* message to let the user know about it.
*/
if (!stepped) {
WARN(1, "Unable to handle hardware breakpoint. Breakpoint at "
"0x%lx will be disabled.", info->address);
perf_event_disable(bp);
goto out;
}
/*
* As a policy, the callback is invoked in a 'trigger-after-execute'
* fashion
*/
if (!(info->type & HW_BRK_TYPE_EXTRANEOUS_IRQ))
perf_bp_event(bp, regs);
set_breakpoint(info);
out:
rcu_read_unlock();
return rc;
}
/*
* Handle single-step exceptions following a DABR hit.
*/
int __kprobes single_step_dabr_instruction(struct die_args *args)
{
struct pt_regs *regs = args->regs;
struct perf_event *bp = NULL;
struct arch_hw_breakpoint *info;
bp = current->thread.last_hit_ubp;
/*
* Check if we are single-stepping as a result of a
* previous HW Breakpoint exception
*/
if (!bp)
return NOTIFY_DONE;
info = counter_arch_bp(bp);
/*
* We shall invoke the user-defined callback function in the single
* stepping handler to confirm to 'trigger-after-execute' semantics
*/
if (!(info->type & HW_BRK_TYPE_EXTRANEOUS_IRQ))
perf_bp_event(bp, regs);
set_breakpoint(info);
current->thread.last_hit_ubp = NULL;
/*
* If the process was being single-stepped by ptrace, let the
* other single-step actions occur (e.g. generate SIGTRAP).
*/
if (test_thread_flag(TIF_SINGLESTEP))
return NOTIFY_DONE;
return NOTIFY_STOP;
}
/*
* Handle debug exception notifications.
*/
int __kprobes hw_breakpoint_exceptions_notify(
struct notifier_block *unused, unsigned long val, void *data)
{
int ret = NOTIFY_DONE;
switch (val) {
case DIE_DABR_MATCH:
ret = hw_breakpoint_handler(data);
break;
case DIE_SSTEP:
ret = single_step_dabr_instruction(data);
break;
}
return ret;
}
/*
* Release the user breakpoints used by ptrace
*/
void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
{
struct thread_struct *t = &tsk->thread;
unregister_hw_breakpoint(t->ptrace_bps[0]);
t->ptrace_bps[0] = NULL;
}
void hw_breakpoint_pmu_read(struct perf_event *bp)
{
/* TODO */
}
| gpl-2.0 |
tadeas482/kernel-old | drivers/net/wireless/rtlwifi/rtl8192cu/sw.c | 297 | 15042 | /******************************************************************************
*
* Copyright(c) 2009-2012 Realtek Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
*
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
* Hsinchu 300, Taiwan.
*
* Larry Finger <Larry.Finger@lwfinger.net>
*
*****************************************************************************/
#include "../wifi.h"
#include "../core.h"
#include "../usb.h"
#include "../efuse.h"
#include "reg.h"
#include "def.h"
#include "phy.h"
#include "mac.h"
#include "dm.h"
#include "rf.h"
#include "sw.h"
#include "trx.h"
#include "led.h"
#include "hw.h"
#include <linux/module.h>
MODULE_AUTHOR("Georgia <georgia@realtek.com>");
MODULE_AUTHOR("Ziv Huang <ziv_huang@realtek.com>");
MODULE_AUTHOR("Larry Finger <Larry.Finger@lwfinger.net>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Realtek 8192C/8188C 802.11n USB wireless");
MODULE_FIRMWARE("rtlwifi/rtl8192cufw.bin");
static int rtl92cu_init_sw_vars(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
int err;
rtlpriv->dm.dm_initialgain_enable = true;
rtlpriv->dm.dm_flag = 0;
rtlpriv->dm.disable_framebursting = false;
rtlpriv->dm.thermalvalue = 0;
rtlpriv->dbg.global_debuglevel = rtlpriv->cfg->mod_params->debug;
/* for firmware buf */
rtlpriv->rtlhal.pfirmware = vzalloc(0x4000);
if (!rtlpriv->rtlhal.pfirmware) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
"Can't alloc buffer for fw\n");
return 1;
}
pr_info("Loading firmware %s\n", rtlpriv->cfg->fw_name);
rtlpriv->max_fw_size = 0x4000;
err = request_firmware_nowait(THIS_MODULE, 1,
rtlpriv->cfg->fw_name, rtlpriv->io.dev,
GFP_KERNEL, hw, rtl_fw_cb);
return 0;
}
static void rtl92cu_deinit_sw_vars(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
if (rtlpriv->rtlhal.pfirmware) {
vfree(rtlpriv->rtlhal.pfirmware);
rtlpriv->rtlhal.pfirmware = NULL;
}
}
static struct rtl_hal_ops rtl8192cu_hal_ops = {
.init_sw_vars = rtl92cu_init_sw_vars,
.deinit_sw_vars = rtl92cu_deinit_sw_vars,
.read_chip_version = rtl92c_read_chip_version,
.read_eeprom_info = rtl92cu_read_eeprom_info,
.enable_interrupt = rtl92c_enable_interrupt,
.disable_interrupt = rtl92c_disable_interrupt,
.hw_init = rtl92cu_hw_init,
.hw_disable = rtl92cu_card_disable,
.set_network_type = rtl92cu_set_network_type,
.set_chk_bssid = rtl92cu_set_check_bssid,
.set_qos = rtl92c_set_qos,
.set_bcn_reg = rtl92cu_set_beacon_related_registers,
.set_bcn_intv = rtl92cu_set_beacon_interval,
.update_interrupt_mask = rtl92cu_update_interrupt_mask,
.get_hw_reg = rtl92cu_get_hw_reg,
.set_hw_reg = rtl92cu_set_hw_reg,
.update_rate_tbl = rtl92cu_update_hal_rate_table,
.update_rate_mask = rtl92cu_update_hal_rate_mask,
.fill_tx_desc = rtl92cu_tx_fill_desc,
.fill_fake_txdesc = rtl92cu_fill_fake_txdesc,
.fill_tx_cmddesc = rtl92cu_tx_fill_cmddesc,
.cmd_send_packet = rtl92cu_cmd_send_packet,
.query_rx_desc = rtl92cu_rx_query_desc,
.set_channel_access = rtl92cu_update_channel_access_setting,
.radio_onoff_checking = rtl92cu_gpio_radio_on_off_checking,
.set_bw_mode = rtl92c_phy_set_bw_mode,
.switch_channel = rtl92c_phy_sw_chnl,
.dm_watchdog = rtl92c_dm_watchdog,
.scan_operation_backup = rtl92c_phy_scan_operation_backup,
.set_rf_power_state = rtl92cu_phy_set_rf_power_state,
.led_control = rtl92cu_led_control,
.enable_hw_sec = rtl92cu_enable_hw_security_config,
.set_key = rtl92c_set_key,
.init_sw_leds = rtl92cu_init_sw_leds,
.deinit_sw_leds = rtl92cu_deinit_sw_leds,
.get_bbreg = rtl92c_phy_query_bb_reg,
.set_bbreg = rtl92c_phy_set_bb_reg,
.get_rfreg = rtl92cu_phy_query_rf_reg,
.set_rfreg = rtl92cu_phy_set_rf_reg,
.phy_rf6052_config = rtl92cu_phy_rf6052_config,
.phy_rf6052_set_cck_txpower = rtl92cu_phy_rf6052_set_cck_txpower,
.phy_rf6052_set_ofdm_txpower = rtl92cu_phy_rf6052_set_ofdm_txpower,
.config_bb_with_headerfile = _rtl92cu_phy_config_bb_with_headerfile,
.config_bb_with_pgheaderfile = _rtl92cu_phy_config_bb_with_pgheaderfile,
.phy_lc_calibrate = _rtl92cu_phy_lc_calibrate,
.phy_set_bw_mode_callback = rtl92cu_phy_set_bw_mode_callback,
.dm_dynamic_txpower = rtl92cu_dm_dynamic_txpower,
};
static struct rtl_mod_params rtl92cu_mod_params = {
.sw_crypto = 0,
.debug = DBG_EMERG,
};
module_param_named(swenc, rtl92cu_mod_params.sw_crypto, bool, 0444);
module_param_named(debug, rtl92cu_mod_params.debug, int, 0444);
MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n");
MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)");
static struct rtl_hal_usbint_cfg rtl92cu_interface_cfg = {
/* rx */
.in_ep_num = RTL92C_USB_BULK_IN_NUM,
.rx_urb_num = RTL92C_NUM_RX_URBS,
.rx_max_size = RTL92C_SIZE_MAX_RX_BUFFER,
.usb_rx_hdl = rtl8192cu_rx_hdl,
.usb_rx_segregate_hdl = NULL, /* rtl8192c_rx_segregate_hdl; */
/* tx */
.usb_tx_cleanup = rtl8192c_tx_cleanup,
.usb_tx_post_hdl = rtl8192c_tx_post_hdl,
.usb_tx_aggregate_hdl = rtl8192c_tx_aggregate_hdl,
/* endpoint mapping */
.usb_endpoint_mapping = rtl8192cu_endpoint_mapping,
.usb_mq_to_hwq = rtl8192cu_mq_to_hwq,
};
static struct rtl_hal_cfg rtl92cu_hal_cfg = {
.name = "rtl92c_usb",
.fw_name = "rtlwifi/rtl8192cufw.bin",
.ops = &rtl8192cu_hal_ops,
.mod_params = &rtl92cu_mod_params,
.usb_interface_cfg = &rtl92cu_interface_cfg,
.maps[SYS_ISO_CTRL] = REG_SYS_ISO_CTRL,
.maps[SYS_FUNC_EN] = REG_SYS_FUNC_EN,
.maps[SYS_CLK] = REG_SYS_CLKR,
.maps[MAC_RCR_AM] = AM,
.maps[MAC_RCR_AB] = AB,
.maps[MAC_RCR_ACRC32] = ACRC32,
.maps[MAC_RCR_ACF] = ACF,
.maps[MAC_RCR_AAP] = AAP,
.maps[EFUSE_TEST] = REG_EFUSE_TEST,
.maps[EFUSE_CTRL] = REG_EFUSE_CTRL,
.maps[EFUSE_CLK] = 0,
.maps[EFUSE_CLK_CTRL] = REG_EFUSE_CTRL,
.maps[EFUSE_PWC_EV12V] = PWC_EV12V,
.maps[EFUSE_FEN_ELDR] = FEN_ELDR,
.maps[EFUSE_LOADER_CLK_EN] = LOADER_CLK_EN,
.maps[EFUSE_ANA8M] = EFUSE_ANA8M,
.maps[EFUSE_HWSET_MAX_SIZE] = HWSET_MAX_SIZE,
.maps[EFUSE_MAX_SECTION_MAP] = EFUSE_MAX_SECTION,
.maps[EFUSE_REAL_CONTENT_SIZE] = EFUSE_REAL_CONTENT_LEN,
.maps[RWCAM] = REG_CAMCMD,
.maps[WCAMI] = REG_CAMWRITE,
.maps[RCAMO] = REG_CAMREAD,
.maps[CAMDBG] = REG_CAMDBG,
.maps[SECR] = REG_SECCFG,
.maps[SEC_CAM_NONE] = CAM_NONE,
.maps[SEC_CAM_WEP40] = CAM_WEP40,
.maps[SEC_CAM_TKIP] = CAM_TKIP,
.maps[SEC_CAM_AES] = CAM_AES,
.maps[SEC_CAM_WEP104] = CAM_WEP104,
.maps[RTL_IMR_BCNDMAINT6] = IMR_BCNDMAINT6,
.maps[RTL_IMR_BCNDMAINT5] = IMR_BCNDMAINT5,
.maps[RTL_IMR_BCNDMAINT4] = IMR_BCNDMAINT4,
.maps[RTL_IMR_BCNDMAINT3] = IMR_BCNDMAINT3,
.maps[RTL_IMR_BCNDMAINT2] = IMR_BCNDMAINT2,
.maps[RTL_IMR_BCNDMAINT1] = IMR_BCNDMAINT1,
.maps[RTL_IMR_BCNDOK8] = IMR_BCNDOK8,
.maps[RTL_IMR_BCNDOK7] = IMR_BCNDOK7,
.maps[RTL_IMR_BCNDOK6] = IMR_BCNDOK6,
.maps[RTL_IMR_BCNDOK5] = IMR_BCNDOK5,
.maps[RTL_IMR_BCNDOK4] = IMR_BCNDOK4,
.maps[RTL_IMR_BCNDOK3] = IMR_BCNDOK3,
.maps[RTL_IMR_BCNDOK2] = IMR_BCNDOK2,
.maps[RTL_IMR_BCNDOK1] = IMR_BCNDOK1,
.maps[RTL_IMR_TIMEOUT2] = IMR_TIMEOUT2,
.maps[RTL_IMR_TIMEOUT1] = IMR_TIMEOUT1,
.maps[RTL_IMR_TXFOVW] = IMR_TXFOVW,
.maps[RTL_IMR_PSTIMEOUT] = IMR_PSTIMEOUT,
.maps[RTL_IMR_BcnInt] = IMR_BCNINT,
.maps[RTL_IMR_RXFOVW] = IMR_RXFOVW,
.maps[RTL_IMR_RDU] = IMR_RDU,
.maps[RTL_IMR_ATIMEND] = IMR_ATIMEND,
.maps[RTL_IMR_BDOK] = IMR_BDOK,
.maps[RTL_IMR_MGNTDOK] = IMR_MGNTDOK,
.maps[RTL_IMR_TBDER] = IMR_TBDER,
.maps[RTL_IMR_HIGHDOK] = IMR_HIGHDOK,
.maps[RTL_IMR_TBDOK] = IMR_TBDOK,
.maps[RTL_IMR_BKDOK] = IMR_BKDOK,
.maps[RTL_IMR_BEDOK] = IMR_BEDOK,
.maps[RTL_IMR_VIDOK] = IMR_VIDOK,
.maps[RTL_IMR_VODOK] = IMR_VODOK,
.maps[RTL_IMR_ROK] = IMR_ROK,
.maps[RTL_IBSS_INT_MASKS] = (IMR_BCNINT | IMR_TBDOK | IMR_TBDER),
.maps[RTL_RC_CCK_RATE1M] = DESC92_RATE1M,
.maps[RTL_RC_CCK_RATE2M] = DESC92_RATE2M,
.maps[RTL_RC_CCK_RATE5_5M] = DESC92_RATE5_5M,
.maps[RTL_RC_CCK_RATE11M] = DESC92_RATE11M,
.maps[RTL_RC_OFDM_RATE6M] = DESC92_RATE6M,
.maps[RTL_RC_OFDM_RATE9M] = DESC92_RATE9M,
.maps[RTL_RC_OFDM_RATE12M] = DESC92_RATE12M,
.maps[RTL_RC_OFDM_RATE18M] = DESC92_RATE18M,
.maps[RTL_RC_OFDM_RATE24M] = DESC92_RATE24M,
.maps[RTL_RC_OFDM_RATE36M] = DESC92_RATE36M,
.maps[RTL_RC_OFDM_RATE48M] = DESC92_RATE48M,
.maps[RTL_RC_OFDM_RATE54M] = DESC92_RATE54M,
.maps[RTL_RC_HT_RATEMCS7] = DESC92_RATEMCS7,
.maps[RTL_RC_HT_RATEMCS15] = DESC92_RATEMCS15,
};
#define USB_VENDER_ID_REALTEK 0x0bda
/* 2010-10-19 DID_USB_V3.4 */
static struct usb_device_id rtl8192c_usb_ids[] = {
/*=== Realtek demoboard ===*/
/* Default ID */
{RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8191, rtl92cu_hal_cfg)},
/****** 8188CU ********/
/* RTL8188CTV */
{RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x018a, rtl92cu_hal_cfg)},
/* 8188CE-VAU USB minCard */
{RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8170, rtl92cu_hal_cfg)},
/* 8188cu 1*1 dongle */
{RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8176, rtl92cu_hal_cfg)},
/* 8188cu 1*1 dongle, (b/g mode only) */
{RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8177, rtl92cu_hal_cfg)},
/* 8188cu Slim Solo */
{RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817a, rtl92cu_hal_cfg)},
/* 8188cu Slim Combo */
{RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817b, rtl92cu_hal_cfg)},
/* 8188RU High-power USB Dongle */
{RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817d, rtl92cu_hal_cfg)},
/* 8188CE-VAU USB minCard (b/g mode only) */
{RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817e, rtl92cu_hal_cfg)},
/* 8188RU in Alfa AWUS036NHR */
{RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817f, rtl92cu_hal_cfg)},
/* RTL8188CUS-VL */
{RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x818a, rtl92cu_hal_cfg)},
{RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x819a, rtl92cu_hal_cfg)},
/* 8188 Combo for BC4 */
{RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8754, rtl92cu_hal_cfg)},
/****** 8192CU ********/
/* 8192cu 2*2 */
{RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8178, rtl92cu_hal_cfg)},
/* 8192CE-VAU USB minCard */
{RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817c, rtl92cu_hal_cfg)},
/*=== Customer ID ===*/
/****** 8188CU ********/
{RTL_USB_DEVICE(0x050d, 0x1102, rtl92cu_hal_cfg)}, /*Belkin - Edimax*/
{RTL_USB_DEVICE(0x050d, 0x11f2, rtl92cu_hal_cfg)}, /*Belkin - ISY*/
{RTL_USB_DEVICE(0x06f8, 0xe033, rtl92cu_hal_cfg)}, /*Hercules - Edimax*/
{RTL_USB_DEVICE(0x07b8, 0x8188, rtl92cu_hal_cfg)}, /*Abocom - Abocom*/
{RTL_USB_DEVICE(0x07b8, 0x8189, rtl92cu_hal_cfg)}, /*Funai - Abocom*/
{RTL_USB_DEVICE(0x0846, 0x9041, rtl92cu_hal_cfg)}, /*NetGear WNA1000M*/
{RTL_USB_DEVICE(0x0bda, 0x5088, rtl92cu_hal_cfg)}, /*Thinkware-CC&C*/
{RTL_USB_DEVICE(0x0df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
{RTL_USB_DEVICE(0x0df6, 0x005c, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
{RTL_USB_DEVICE(0x0df6, 0x0070, rtl92cu_hal_cfg)}, /*Sitecom - 150N */
{RTL_USB_DEVICE(0x0df6, 0x0077, rtl92cu_hal_cfg)}, /*Sitecom-WLA2100V2*/
{RTL_USB_DEVICE(0x0eb0, 0x9071, rtl92cu_hal_cfg)}, /*NO Brand - Etop*/
{RTL_USB_DEVICE(0x4856, 0x0091, rtl92cu_hal_cfg)}, /*NetweeN - Feixun*/
/* HP - Lite-On ,8188CUS Slim Combo */
{RTL_USB_DEVICE(0x103c, 0x1629, rtl92cu_hal_cfg)},
{RTL_USB_DEVICE(0x13d3, 0x3357, rtl92cu_hal_cfg)}, /* AzureWave */
{RTL_USB_DEVICE(0x2001, 0x3308, rtl92cu_hal_cfg)}, /*D-Link - Alpha*/
{RTL_USB_DEVICE(0x2019, 0x4902, rtl92cu_hal_cfg)}, /*Planex - Etop*/
{RTL_USB_DEVICE(0x2019, 0xab2a, rtl92cu_hal_cfg)}, /*Planex - Abocom*/
/*SW-WF02-AD15 -Abocom*/
{RTL_USB_DEVICE(0x2019, 0xab2e, rtl92cu_hal_cfg)},
{RTL_USB_DEVICE(0x2019, 0xed17, rtl92cu_hal_cfg)}, /*PCI - Edimax*/
{RTL_USB_DEVICE(0x20f4, 0x648b, rtl92cu_hal_cfg)}, /*TRENDnet - Cameo*/
{RTL_USB_DEVICE(0x7392, 0x7811, rtl92cu_hal_cfg)}, /*Edimax - Edimax*/
{RTL_USB_DEVICE(0x13d3, 0x3358, rtl92cu_hal_cfg)}, /*Azwave 8188CE-VAU*/
/* Russian customer -Azwave (8188CE-VAU b/g mode only) */
{RTL_USB_DEVICE(0x13d3, 0x3359, rtl92cu_hal_cfg)},
{RTL_USB_DEVICE(0x4855, 0x0090, rtl92cu_hal_cfg)}, /* Feixun */
{RTL_USB_DEVICE(0x4855, 0x0091, rtl92cu_hal_cfg)}, /* NetweeN-Feixun */
{RTL_USB_DEVICE(0x9846, 0x9041, rtl92cu_hal_cfg)}, /* Netgear Cameo */
/****** 8188 RU ********/
/* Netcore */
{RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x317f, rtl92cu_hal_cfg)},
/****** 8188CUS Slim Solo********/
{RTL_USB_DEVICE(0x04f2, 0xaff7, rtl92cu_hal_cfg)}, /*Xavi*/
{RTL_USB_DEVICE(0x04f2, 0xaff9, rtl92cu_hal_cfg)}, /*Xavi*/
{RTL_USB_DEVICE(0x04f2, 0xaffa, rtl92cu_hal_cfg)}, /*Xavi*/
/****** 8188CUS Slim Combo ********/
{RTL_USB_DEVICE(0x04f2, 0xaff8, rtl92cu_hal_cfg)}, /*Xavi*/
{RTL_USB_DEVICE(0x04f2, 0xaffb, rtl92cu_hal_cfg)}, /*Xavi*/
{RTL_USB_DEVICE(0x04f2, 0xaffc, rtl92cu_hal_cfg)}, /*Xavi*/
{RTL_USB_DEVICE(0x2019, 0x1201, rtl92cu_hal_cfg)}, /*Planex-Vencer*/
/****** 8192CU ********/
{RTL_USB_DEVICE(0x050d, 0x2102, rtl92cu_hal_cfg)}, /*Belcom-Sercomm*/
{RTL_USB_DEVICE(0x050d, 0x2103, rtl92cu_hal_cfg)}, /*Belcom-Edimax*/
{RTL_USB_DEVICE(0x0586, 0x341f, rtl92cu_hal_cfg)}, /*Zyxel -Abocom*/
{RTL_USB_DEVICE(0x07aa, 0x0056, rtl92cu_hal_cfg)}, /*ATKK-Gemtek*/
{RTL_USB_DEVICE(0x07b8, 0x8178, rtl92cu_hal_cfg)}, /*Funai -Abocom*/
{RTL_USB_DEVICE(0x0846, 0x9021, rtl92cu_hal_cfg)}, /*Netgear-Sercomm*/
{RTL_USB_DEVICE(0x0b05, 0x17ab, rtl92cu_hal_cfg)}, /*ASUS-Edimax*/
{RTL_USB_DEVICE(0x0bda, 0x8186, rtl92cu_hal_cfg)}, /*Realtek 92CE-VAU*/
{RTL_USB_DEVICE(0x0df6, 0x0061, rtl92cu_hal_cfg)}, /*Sitecom-Edimax*/
{RTL_USB_DEVICE(0x0e66, 0x0019, rtl92cu_hal_cfg)}, /*Hawking-Edimax*/
{RTL_USB_DEVICE(0x2001, 0x3307, rtl92cu_hal_cfg)}, /*D-Link-Cameo*/
{RTL_USB_DEVICE(0x2001, 0x3309, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/
{RTL_USB_DEVICE(0x2001, 0x330a, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/
{RTL_USB_DEVICE(0x2019, 0xab2b, rtl92cu_hal_cfg)}, /*Planex -Abocom*/
{RTL_USB_DEVICE(0x20f4, 0x624d, rtl92cu_hal_cfg)}, /*TRENDNet*/
{RTL_USB_DEVICE(0x2357, 0x0100, rtl92cu_hal_cfg)}, /*TP-Link WN8200ND*/
{RTL_USB_DEVICE(0x7392, 0x7822, rtl92cu_hal_cfg)}, /*Edimax -Edimax*/
{}
};
MODULE_DEVICE_TABLE(usb, rtl8192c_usb_ids);
static struct usb_driver rtl8192cu_driver = {
.name = "rtl8192cu",
.probe = rtl_usb_probe,
.disconnect = rtl_usb_disconnect,
.id_table = rtl8192c_usb_ids,
#ifdef CONFIG_PM
/* .suspend = rtl_usb_suspend, */
/* .resume = rtl_usb_resume, */
/* .reset_resume = rtl8192c_resume, */
#endif /* CONFIG_PM */
#ifdef CONFIG_AUTOSUSPEND
.supports_autosuspend = 1,
#endif
};
module_usb_driver(rtl8192cu_driver);
| gpl-2.0 |
NeptunIDE/linux | drivers/i2c/busses/i2c-mpc.c | 553 | 16452 | /*
* (C) Copyright 2003-2004
* Humboldt Solutions Ltd, adrian@humboldt.co.uk.
* This is a combined i2c adapter and algorithm driver for the
* MPC107/Tsi107 PowerPC northbridge and processors that include
* the same I2C unit (8240, 8245, 85xx).
*
* Release 0.8
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/of_platform.h>
#include <linux/of_i2c.h>
#include <linux/io.h>
#include <linux/fsl_devices.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <asm/mpc52xx.h>
#include <sysdev/fsl_soc.h>
#define DRV_NAME "mpc-i2c"
#define MPC_I2C_FDR 0x04
#define MPC_I2C_CR 0x08
#define MPC_I2C_SR 0x0c
#define MPC_I2C_DR 0x10
#define MPC_I2C_DFSRR 0x14
#define CCR_MEN 0x80
#define CCR_MIEN 0x40
#define CCR_MSTA 0x20
#define CCR_MTX 0x10
#define CCR_TXAK 0x08
#define CCR_RSTA 0x04
#define CSR_MCF 0x80
#define CSR_MAAS 0x40
#define CSR_MBB 0x20
#define CSR_MAL 0x10
#define CSR_SRW 0x04
#define CSR_MIF 0x02
#define CSR_RXAK 0x01
struct mpc_i2c {
struct device *dev;
void __iomem *base;
u32 interrupt;
wait_queue_head_t queue;
struct i2c_adapter adap;
int irq;
};
struct mpc_i2c_divider {
u16 divider;
u16 fdr; /* including dfsrr */
};
struct mpc_i2c_match_data {
void (*setclock)(struct device_node *node,
struct mpc_i2c *i2c,
u32 clock, u32 prescaler);
u32 prescaler;
};
static inline void writeccr(struct mpc_i2c *i2c, u32 x)
{
writeb(x, i2c->base + MPC_I2C_CR);
}
static irqreturn_t mpc_i2c_isr(int irq, void *dev_id)
{
struct mpc_i2c *i2c = dev_id;
if (readb(i2c->base + MPC_I2C_SR) & CSR_MIF) {
/* Read again to allow register to stabilise */
i2c->interrupt = readb(i2c->base + MPC_I2C_SR);
writeb(0, i2c->base + MPC_I2C_SR);
wake_up(&i2c->queue);
}
return IRQ_HANDLED;
}
/* Sometimes 9th clock pulse isn't generated, and slave doesn't release
* the bus, because it wants to send ACK.
* Following sequence of enabling/disabling and sending start/stop generates
* the pulse, so it's all OK.
*/
static void mpc_i2c_fixup(struct mpc_i2c *i2c)
{
writeccr(i2c, 0);
udelay(30);
writeccr(i2c, CCR_MEN);
udelay(30);
writeccr(i2c, CCR_MSTA | CCR_MTX);
udelay(30);
writeccr(i2c, CCR_MSTA | CCR_MTX | CCR_MEN);
udelay(30);
writeccr(i2c, CCR_MEN);
udelay(30);
}
static int i2c_wait(struct mpc_i2c *i2c, unsigned timeout, int writing)
{
unsigned long orig_jiffies = jiffies;
u32 x;
int result = 0;
if (i2c->irq == NO_IRQ) {
while (!(readb(i2c->base + MPC_I2C_SR) & CSR_MIF)) {
schedule();
if (time_after(jiffies, orig_jiffies + timeout)) {
dev_dbg(i2c->dev, "timeout\n");
writeccr(i2c, 0);
result = -EIO;
break;
}
}
x = readb(i2c->base + MPC_I2C_SR);
writeb(0, i2c->base + MPC_I2C_SR);
} else {
/* Interrupt mode */
result = wait_event_timeout(i2c->queue,
(i2c->interrupt & CSR_MIF), timeout);
if (unlikely(!(i2c->interrupt & CSR_MIF))) {
dev_dbg(i2c->dev, "wait timeout\n");
writeccr(i2c, 0);
result = -ETIMEDOUT;
}
x = i2c->interrupt;
i2c->interrupt = 0;
}
if (result < 0)
return result;
if (!(x & CSR_MCF)) {
dev_dbg(i2c->dev, "unfinished\n");
return -EIO;
}
if (x & CSR_MAL) {
dev_dbg(i2c->dev, "MAL\n");
return -EIO;
}
if (writing && (x & CSR_RXAK)) {
dev_dbg(i2c->dev, "No RXAK\n");
/* generate stop */
writeccr(i2c, CCR_MEN);
return -EIO;
}
return 0;
}
#ifdef CONFIG_PPC_MPC52xx
static const struct mpc_i2c_divider mpc_i2c_dividers_52xx[] = {
{20, 0x20}, {22, 0x21}, {24, 0x22}, {26, 0x23},
{28, 0x24}, {30, 0x01}, {32, 0x25}, {34, 0x02},
{36, 0x26}, {40, 0x27}, {44, 0x04}, {48, 0x28},
{52, 0x63}, {56, 0x29}, {60, 0x41}, {64, 0x2a},
{68, 0x07}, {72, 0x2b}, {80, 0x2c}, {88, 0x09},
{96, 0x2d}, {104, 0x0a}, {112, 0x2e}, {120, 0x81},
{128, 0x2f}, {136, 0x47}, {144, 0x0c}, {160, 0x30},
{176, 0x49}, {192, 0x31}, {208, 0x4a}, {224, 0x32},
{240, 0x0f}, {256, 0x33}, {272, 0x87}, {288, 0x10},
{320, 0x34}, {352, 0x89}, {384, 0x35}, {416, 0x8a},
{448, 0x36}, {480, 0x13}, {512, 0x37}, {576, 0x14},
{640, 0x38}, {768, 0x39}, {896, 0x3a}, {960, 0x17},
{1024, 0x3b}, {1152, 0x18}, {1280, 0x3c}, {1536, 0x3d},
{1792, 0x3e}, {1920, 0x1b}, {2048, 0x3f}, {2304, 0x1c},
{2560, 0x1d}, {3072, 0x1e}, {3584, 0x7e}, {3840, 0x1f},
{4096, 0x7f}, {4608, 0x5c}, {5120, 0x5d}, {6144, 0x5e},
{7168, 0xbe}, {7680, 0x5f}, {8192, 0xbf}, {9216, 0x9c},
{10240, 0x9d}, {12288, 0x9e}, {15360, 0x9f}
};
int mpc_i2c_get_fdr_52xx(struct device_node *node, u32 clock, int prescaler)
{
const struct mpc_i2c_divider *div = NULL;
unsigned int pvr = mfspr(SPRN_PVR);
u32 divider;
int i;
if (!clock)
return -EINVAL;
/* Determine divider value */
divider = mpc5xxx_get_bus_frequency(node) / clock;
/*
* We want to choose an FDR/DFSR that generates an I2C bus speed that
* is equal to or lower than the requested speed.
*/
for (i = 0; i < ARRAY_SIZE(mpc_i2c_dividers_52xx); i++) {
div = &mpc_i2c_dividers_52xx[i];
/* Old MPC5200 rev A CPUs do not support the high bits */
if (div->fdr & 0xc0 && pvr == 0x80822011)
continue;
if (div->divider >= divider)
break;
}
return div ? (int)div->fdr : -EINVAL;
}
static void mpc_i2c_setclock_52xx(struct device_node *node,
struct mpc_i2c *i2c,
u32 clock, u32 prescaler)
{
int ret, fdr;
ret = mpc_i2c_get_fdr_52xx(node, clock, prescaler);
fdr = (ret >= 0) ? ret : 0x3f; /* backward compatibility */
writeb(fdr & 0xff, i2c->base + MPC_I2C_FDR);
if (ret >= 0)
dev_info(i2c->dev, "clock %d Hz (fdr=%d)\n", clock, fdr);
}
#else /* !CONFIG_PPC_MPC52xx */
static void mpc_i2c_setclock_52xx(struct device_node *node,
struct mpc_i2c *i2c,
u32 clock, u32 prescaler)
{
}
#endif /* CONFIG_PPC_MPC52xx*/
#ifdef CONFIG_FSL_SOC
static const struct mpc_i2c_divider mpc_i2c_dividers_8xxx[] = {
{160, 0x0120}, {192, 0x0121}, {224, 0x0122}, {256, 0x0123},
{288, 0x0100}, {320, 0x0101}, {352, 0x0601}, {384, 0x0102},
{416, 0x0602}, {448, 0x0126}, {480, 0x0103}, {512, 0x0127},
{544, 0x0b03}, {576, 0x0104}, {608, 0x1603}, {640, 0x0105},
{672, 0x2003}, {704, 0x0b05}, {736, 0x2b03}, {768, 0x0106},
{800, 0x3603}, {832, 0x0b06}, {896, 0x012a}, {960, 0x0107},
{1024, 0x012b}, {1088, 0x1607}, {1152, 0x0108}, {1216, 0x2b07},
{1280, 0x0109}, {1408, 0x1609}, {1536, 0x010a}, {1664, 0x160a},
{1792, 0x012e}, {1920, 0x010b}, {2048, 0x012f}, {2176, 0x2b0b},
{2304, 0x010c}, {2560, 0x010d}, {2816, 0x2b0d}, {3072, 0x010e},
{3328, 0x2b0e}, {3584, 0x0132}, {3840, 0x010f}, {4096, 0x0133},
{4608, 0x0110}, {5120, 0x0111}, {6144, 0x0112}, {7168, 0x0136},
{7680, 0x0113}, {8192, 0x0137}, {9216, 0x0114}, {10240, 0x0115},
{12288, 0x0116}, {14336, 0x013a}, {15360, 0x0117}, {16384, 0x013b},
{18432, 0x0118}, {20480, 0x0119}, {24576, 0x011a}, {28672, 0x013e},
{30720, 0x011b}, {32768, 0x013f}, {36864, 0x011c}, {40960, 0x011d},
{49152, 0x011e}, {61440, 0x011f}
};
u32 mpc_i2c_get_sec_cfg_8xxx(void)
{
struct device_node *node = NULL;
u32 __iomem *reg;
u32 val = 0;
node = of_find_node_by_name(NULL, "global-utilities");
if (node) {
const u32 *prop = of_get_property(node, "reg", NULL);
if (prop) {
/*
* Map and check POR Device Status Register 2
* (PORDEVSR2) at 0xE0014
*/
reg = ioremap(get_immrbase() + *prop + 0x14, 0x4);
if (!reg)
printk(KERN_ERR
"Error: couldn't map PORDEVSR2\n");
else
val = in_be32(reg) & 0x00000080; /* sec-cfg */
iounmap(reg);
}
}
if (node)
of_node_put(node);
return val;
}
int mpc_i2c_get_fdr_8xxx(struct device_node *node, u32 clock, u32 prescaler)
{
const struct mpc_i2c_divider *div = NULL;
u32 divider;
int i;
if (!clock)
return -EINVAL;
/* Determine proper divider value */
if (of_device_is_compatible(node, "fsl,mpc8544-i2c"))
prescaler = mpc_i2c_get_sec_cfg_8xxx() ? 3 : 2;
if (!prescaler)
prescaler = 1;
divider = fsl_get_sys_freq() / clock / prescaler;
pr_debug("I2C: src_clock=%d clock=%d divider=%d\n",
fsl_get_sys_freq(), clock, divider);
/*
* We want to choose an FDR/DFSR that generates an I2C bus speed that
* is equal to or lower than the requested speed.
*/
for (i = 0; i < ARRAY_SIZE(mpc_i2c_dividers_8xxx); i++) {
div = &mpc_i2c_dividers_8xxx[i];
if (div->divider >= divider)
break;
}
return div ? (int)div->fdr : -EINVAL;
}
static void mpc_i2c_setclock_8xxx(struct device_node *node,
struct mpc_i2c *i2c,
u32 clock, u32 prescaler)
{
int ret, fdr;
ret = mpc_i2c_get_fdr_8xxx(node, clock, prescaler);
fdr = (ret >= 0) ? ret : 0x1031; /* backward compatibility */
writeb(fdr & 0xff, i2c->base + MPC_I2C_FDR);
writeb((fdr >> 8) & 0xff, i2c->base + MPC_I2C_DFSRR);
if (ret >= 0)
dev_info(i2c->dev, "clock %d Hz (dfsrr=%d fdr=%d)\n",
clock, fdr >> 8, fdr & 0xff);
}
#else /* !CONFIG_FSL_SOC */
static void mpc_i2c_setclock_8xxx(struct device_node *node,
struct mpc_i2c *i2c,
u32 clock, u32 prescaler)
{
}
#endif /* CONFIG_FSL_SOC */
static void mpc_i2c_start(struct mpc_i2c *i2c)
{
/* Clear arbitration */
writeb(0, i2c->base + MPC_I2C_SR);
/* Start with MEN */
writeccr(i2c, CCR_MEN);
}
static void mpc_i2c_stop(struct mpc_i2c *i2c)
{
writeccr(i2c, CCR_MEN);
}
static int mpc_write(struct mpc_i2c *i2c, int target,
const u8 *data, int length, int restart)
{
int i, result;
unsigned timeout = i2c->adap.timeout;
u32 flags = restart ? CCR_RSTA : 0;
/* Start as master */
writeccr(i2c, CCR_MIEN | CCR_MEN | CCR_MSTA | CCR_MTX | flags);
/* Write target byte */
writeb((target << 1), i2c->base + MPC_I2C_DR);
result = i2c_wait(i2c, timeout, 1);
if (result < 0)
return result;
for (i = 0; i < length; i++) {
/* Write data byte */
writeb(data[i], i2c->base + MPC_I2C_DR);
result = i2c_wait(i2c, timeout, 1);
if (result < 0)
return result;
}
return 0;
}
static int mpc_read(struct mpc_i2c *i2c, int target,
u8 *data, int length, int restart)
{
unsigned timeout = i2c->adap.timeout;
int i, result;
u32 flags = restart ? CCR_RSTA : 0;
/* Switch to read - restart */
writeccr(i2c, CCR_MIEN | CCR_MEN | CCR_MSTA | CCR_MTX | flags);
/* Write target address byte - this time with the read flag set */
writeb((target << 1) | 1, i2c->base + MPC_I2C_DR);
result = i2c_wait(i2c, timeout, 1);
if (result < 0)
return result;
if (length) {
if (length == 1)
writeccr(i2c, CCR_MIEN | CCR_MEN | CCR_MSTA | CCR_TXAK);
else
writeccr(i2c, CCR_MIEN | CCR_MEN | CCR_MSTA);
/* Dummy read */
readb(i2c->base + MPC_I2C_DR);
}
for (i = 0; i < length; i++) {
result = i2c_wait(i2c, timeout, 0);
if (result < 0)
return result;
/* Generate txack on next to last byte */
if (i == length - 2)
writeccr(i2c, CCR_MIEN | CCR_MEN | CCR_MSTA | CCR_TXAK);
/* Do not generate stop on last byte */
if (i == length - 1)
writeccr(i2c, CCR_MIEN | CCR_MEN | CCR_MSTA | CCR_MTX);
data[i] = readb(i2c->base + MPC_I2C_DR);
}
return length;
}
static int mpc_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
{
struct i2c_msg *pmsg;
int i;
int ret = 0;
unsigned long orig_jiffies = jiffies;
struct mpc_i2c *i2c = i2c_get_adapdata(adap);
mpc_i2c_start(i2c);
/* Allow bus up to 1s to become not busy */
while (readb(i2c->base + MPC_I2C_SR) & CSR_MBB) {
if (signal_pending(current)) {
dev_dbg(i2c->dev, "Interrupted\n");
writeccr(i2c, 0);
return -EINTR;
}
if (time_after(jiffies, orig_jiffies + HZ)) {
dev_dbg(i2c->dev, "timeout\n");
if (readb(i2c->base + MPC_I2C_SR) ==
(CSR_MCF | CSR_MBB | CSR_RXAK))
mpc_i2c_fixup(i2c);
return -EIO;
}
schedule();
}
for (i = 0; ret >= 0 && i < num; i++) {
pmsg = &msgs[i];
dev_dbg(i2c->dev,
"Doing %s %d bytes to 0x%02x - %d of %d messages\n",
pmsg->flags & I2C_M_RD ? "read" : "write",
pmsg->len, pmsg->addr, i + 1, num);
if (pmsg->flags & I2C_M_RD)
ret =
mpc_read(i2c, pmsg->addr, pmsg->buf, pmsg->len, i);
else
ret =
mpc_write(i2c, pmsg->addr, pmsg->buf, pmsg->len, i);
}
mpc_i2c_stop(i2c);
return (ret < 0) ? ret : num;
}
static u32 mpc_functionality(struct i2c_adapter *adap)
{
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
}
static const struct i2c_algorithm mpc_algo = {
.master_xfer = mpc_xfer,
.functionality = mpc_functionality,
};
static struct i2c_adapter mpc_ops = {
.owner = THIS_MODULE,
.name = "MPC adapter",
.algo = &mpc_algo,
.timeout = HZ,
};
static int __devinit fsl_i2c_probe(struct of_device *op,
const struct of_device_id *match)
{
struct mpc_i2c *i2c;
const u32 *prop;
u32 clock = 0;
int result = 0;
int plen;
i2c = kzalloc(sizeof(*i2c), GFP_KERNEL);
if (!i2c)
return -ENOMEM;
i2c->dev = &op->dev; /* for debug and error output */
init_waitqueue_head(&i2c->queue);
i2c->base = of_iomap(op->node, 0);
if (!i2c->base) {
dev_err(i2c->dev, "failed to map controller\n");
result = -ENOMEM;
goto fail_map;
}
i2c->irq = irq_of_parse_and_map(op->node, 0);
if (i2c->irq != NO_IRQ) { /* i2c->irq = NO_IRQ implies polling */
result = request_irq(i2c->irq, mpc_i2c_isr,
IRQF_SHARED, "i2c-mpc", i2c);
if (result < 0) {
dev_err(i2c->dev, "failed to attach interrupt\n");
goto fail_request;
}
}
if (!of_get_property(op->node, "fsl,preserve-clocking", NULL)) {
prop = of_get_property(op->node, "clock-frequency", &plen);
if (prop && plen == sizeof(u32))
clock = *prop;
if (match->data) {
struct mpc_i2c_match_data *data =
(struct mpc_i2c_match_data *)match->data;
data->setclock(op->node, i2c, clock, data->prescaler);
} else {
/* Backwards compatibility */
if (of_get_property(op->node, "dfsrr", NULL))
mpc_i2c_setclock_8xxx(op->node, i2c,
clock, 0);
}
}
dev_set_drvdata(&op->dev, i2c);
i2c->adap = mpc_ops;
i2c_set_adapdata(&i2c->adap, i2c);
i2c->adap.dev.parent = &op->dev;
result = i2c_add_adapter(&i2c->adap);
if (result < 0) {
dev_err(i2c->dev, "failed to add adapter\n");
goto fail_add;
}
of_register_i2c_devices(&i2c->adap, op->node);
return result;
fail_add:
dev_set_drvdata(&op->dev, NULL);
free_irq(i2c->irq, i2c);
fail_request:
irq_dispose_mapping(i2c->irq);
iounmap(i2c->base);
fail_map:
kfree(i2c);
return result;
};
static int __devexit fsl_i2c_remove(struct of_device *op)
{
struct mpc_i2c *i2c = dev_get_drvdata(&op->dev);
i2c_del_adapter(&i2c->adap);
dev_set_drvdata(&op->dev, NULL);
if (i2c->irq != NO_IRQ)
free_irq(i2c->irq, i2c);
irq_dispose_mapping(i2c->irq);
iounmap(i2c->base);
kfree(i2c);
return 0;
};
static const struct of_device_id mpc_i2c_of_match[] = {
{.compatible = "mpc5200-i2c",
.data = &(struct mpc_i2c_match_data) {
.setclock = mpc_i2c_setclock_52xx,
},
},
{.compatible = "fsl,mpc5200b-i2c",
.data = &(struct mpc_i2c_match_data) {
.setclock = mpc_i2c_setclock_52xx,
},
},
{.compatible = "fsl,mpc5200-i2c",
.data = &(struct mpc_i2c_match_data) {
.setclock = mpc_i2c_setclock_52xx,
},
},
{.compatible = "fsl,mpc8313-i2c",
.data = &(struct mpc_i2c_match_data) {
.setclock = mpc_i2c_setclock_8xxx,
},
},
{.compatible = "fsl,mpc8543-i2c",
.data = &(struct mpc_i2c_match_data) {
.setclock = mpc_i2c_setclock_8xxx,
.prescaler = 2,
},
},
{.compatible = "fsl,mpc8544-i2c",
.data = &(struct mpc_i2c_match_data) {
.setclock = mpc_i2c_setclock_8xxx,
.prescaler = 3,
},
/* Backward compatibility */
},
{.compatible = "fsl-i2c", },
{},
};
MODULE_DEVICE_TABLE(of, mpc_i2c_of_match);
/* Structure for a device driver */
static struct of_platform_driver mpc_i2c_driver = {
.match_table = mpc_i2c_of_match,
.probe = fsl_i2c_probe,
.remove = __devexit_p(fsl_i2c_remove),
.driver = {
.owner = THIS_MODULE,
.name = DRV_NAME,
},
};
static int __init fsl_i2c_init(void)
{
int rv;
rv = of_register_platform_driver(&mpc_i2c_driver);
if (rv)
printk(KERN_ERR DRV_NAME
" of_register_platform_driver failed (%i)\n", rv);
return rv;
}
static void __exit fsl_i2c_exit(void)
{
of_unregister_platform_driver(&mpc_i2c_driver);
}
module_init(fsl_i2c_init);
module_exit(fsl_i2c_exit);
MODULE_AUTHOR("Adrian Cox <adrian@humboldt.co.uk>");
MODULE_DESCRIPTION("I2C-Bus adapter for MPC107 bridge and "
"MPC824x/85xx/52xx processors");
MODULE_LICENSE("GPL");
| gpl-2.0 |
randomblame/jellytimekernel | drivers/scsi/initio.c | 809 | 83295 | /**************************************************************************
* Initio 9100 device driver for Linux.
*
* Copyright (c) 1994-1998 Initio Corporation
* Copyright (c) 1998 Bas Vermeulen <bvermeul@blackstar.xs4all.nl>
* Copyright (c) 2004 Christoph Hellwig <hch@lst.de>
* Copyright (c) 2007 Red Hat
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; see the file COPYING. If not, write to
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
*
*
*************************************************************************
*
* DESCRIPTION:
*
* This is the Linux low-level SCSI driver for Initio INI-9X00U/UW SCSI host
* adapters
*
* 08/06/97 hc - v1.01h
* - Support inic-940 and inic-935
* 09/26/97 hc - v1.01i
* - Make correction from J.W. Schultz suggestion
* 10/13/97 hc - Support reset function
* 10/21/97 hc - v1.01j
* - Support 32 LUN (SCSI 3)
* 01/14/98 hc - v1.01k
* - Fix memory allocation problem
* 03/04/98 hc - v1.01l
* - Fix tape rewind which will hang the system problem
* - Set can_queue to initio_num_scb
* 06/25/98 hc - v1.01m
* - Get it work for kernel version >= 2.1.75
* - Dynamic assign SCSI bus reset holding time in initio_init()
* 07/02/98 hc - v1.01n
* - Support 0002134A
* 08/07/98 hc - v1.01o
* - Change the initio_abort_srb routine to use scsi_done. <01>
* 09/07/98 hl - v1.02
* - Change the INI9100U define and proc_dir_entry to
* reflect the newer Kernel 2.1.118, but the v1.o1o
* should work with Kernel 2.1.118.
* 09/20/98 wh - v1.02a
* - Support Abort command.
* - Handle reset routine.
* 09/21/98 hl - v1.03
* - remove comments.
* 12/09/98 bv - v1.03a
* - Removed unused code
* 12/13/98 bv - v1.03b
* - Remove cli() locking for kernels >= 2.1.95. This uses
* spinlocks to serialize access to the pSRB_head and
* pSRB_tail members of the HCS structure.
* 09/01/99 bv - v1.03d
* - Fixed a deadlock problem in SMP.
* 21/01/99 bv - v1.03e
* - Add support for the Domex 3192U PCI SCSI
* This is a slightly modified patch by
* Brian Macy <bmacy@sunshinecomputing.com>
* 22/02/99 bv - v1.03f
* - Didn't detect the INIC-950 in 2.0.x correctly.
* Now fixed.
* 05/07/99 bv - v1.03g
* - Changed the assumption that HZ = 100
* 10/17/03 mc - v1.04
* - added new DMA API support
* 06/01/04 jmd - v1.04a
* - Re-add reset_bus support
**************************************************************************/
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/delay.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/spinlock.h>
#include <linux/stat.h>
#include <linux/kernel.h>
#include <linux/proc_fs.h>
#include <linux/string.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/slab.h>
#include <linux/jiffies.h>
#include <linux/dma-mapping.h>
#include <asm/io.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_tcq.h>
#include "initio.h"
#define SENSE_SIZE 14
#define i91u_MAXQUEUE 2
#define i91u_REVID "Initio INI-9X00U/UW SCSI device driver; Revision: 1.04a"
#define I950_DEVICE_ID 0x9500 /* Initio's inic-950 product ID */
#define I940_DEVICE_ID 0x9400 /* Initio's inic-940 product ID */
#define I935_DEVICE_ID 0x9401 /* Initio's inic-935 product ID */
#define I920_DEVICE_ID 0x0002 /* Initio's other product ID */
#ifdef DEBUG_i91u
static unsigned int i91u_debug = DEBUG_DEFAULT;
#endif
static int initio_tag_enable = 1;
#ifdef DEBUG_i91u
static int setup_debug = 0;
#endif
static void i91uSCBPost(u8 * pHcb, u8 * pScb);
/* PCI Devices supported by this driver */
static struct pci_device_id i91u_pci_devices[] = {
{ PCI_VENDOR_ID_INIT, I950_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{ PCI_VENDOR_ID_INIT, I940_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{ PCI_VENDOR_ID_INIT, I935_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{ PCI_VENDOR_ID_INIT, I920_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{ PCI_VENDOR_ID_DOMEX, I920_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{ }
};
MODULE_DEVICE_TABLE(pci, i91u_pci_devices);
#define DEBUG_INTERRUPT 0
#define DEBUG_QUEUE 0
#define DEBUG_STATE 0
#define INT_DISC 0
/*--- forward references ---*/
static struct scsi_ctrl_blk *initio_find_busy_scb(struct initio_host * host, u16 tarlun);
static struct scsi_ctrl_blk *initio_find_done_scb(struct initio_host * host);
static int tulip_main(struct initio_host * host);
static int initio_next_state(struct initio_host * host);
static int initio_state_1(struct initio_host * host);
static int initio_state_2(struct initio_host * host);
static int initio_state_3(struct initio_host * host);
static int initio_state_4(struct initio_host * host);
static int initio_state_5(struct initio_host * host);
static int initio_state_6(struct initio_host * host);
static int initio_state_7(struct initio_host * host);
static int initio_xfer_data_in(struct initio_host * host);
static int initio_xfer_data_out(struct initio_host * host);
static int initio_xpad_in(struct initio_host * host);
static int initio_xpad_out(struct initio_host * host);
static int initio_status_msg(struct initio_host * host);
static int initio_msgin(struct initio_host * host);
static int initio_msgin_sync(struct initio_host * host);
static int initio_msgin_accept(struct initio_host * host);
static int initio_msgout_reject(struct initio_host * host);
static int initio_msgin_extend(struct initio_host * host);
static int initio_msgout_ide(struct initio_host * host);
static int initio_msgout_abort_targ(struct initio_host * host);
static int initio_msgout_abort_tag(struct initio_host * host);
static int initio_bus_device_reset(struct initio_host * host);
static void initio_select_atn(struct initio_host * host, struct scsi_ctrl_blk * scb);
static void initio_select_atn3(struct initio_host * host, struct scsi_ctrl_blk * scb);
static void initio_select_atn_stop(struct initio_host * host, struct scsi_ctrl_blk * scb);
static int int_initio_busfree(struct initio_host * host);
static int int_initio_scsi_rst(struct initio_host * host);
static int int_initio_bad_seq(struct initio_host * host);
static int int_initio_resel(struct initio_host * host);
static int initio_sync_done(struct initio_host * host);
static int wdtr_done(struct initio_host * host);
static int wait_tulip(struct initio_host * host);
static int initio_wait_done_disc(struct initio_host * host);
static int initio_wait_disc(struct initio_host * host);
static void tulip_scsi(struct initio_host * host);
static int initio_post_scsi_rst(struct initio_host * host);
static void initio_se2_ew_en(unsigned long base);
static void initio_se2_ew_ds(unsigned long base);
static int initio_se2_rd_all(unsigned long base);
static void initio_se2_update_all(unsigned long base); /* setup default pattern */
static void initio_read_eeprom(unsigned long base);
/* ---- INTERNAL VARIABLES ---- */
static NVRAM i91unvram;
static NVRAM *i91unvramp;
static u8 i91udftNvRam[64] =
{
/*----------- header -----------*/
0x25, 0xc9, /* Signature */
0x40, /* Size */
0x01, /* Revision */
/* -- Host Adapter Structure -- */
0x95, /* ModelByte0 */
0x00, /* ModelByte1 */
0x00, /* ModelInfo */
0x01, /* NumOfCh */
NBC1_DEFAULT, /* BIOSConfig1 */
0, /* BIOSConfig2 */
0, /* HAConfig1 */
0, /* HAConfig2 */
/* SCSI channel 0 and target Structure */
7, /* SCSIid */
NCC1_DEFAULT, /* SCSIconfig1 */
0, /* SCSIconfig2 */
0x10, /* NumSCSItarget */
NTC_DEFAULT, NTC_DEFAULT, NTC_DEFAULT, NTC_DEFAULT,
NTC_DEFAULT, NTC_DEFAULT, NTC_DEFAULT, NTC_DEFAULT,
NTC_DEFAULT, NTC_DEFAULT, NTC_DEFAULT, NTC_DEFAULT,
NTC_DEFAULT, NTC_DEFAULT, NTC_DEFAULT, NTC_DEFAULT,
/* SCSI channel 1 and target Structure */
7, /* SCSIid */
NCC1_DEFAULT, /* SCSIconfig1 */
0, /* SCSIconfig2 */
0x10, /* NumSCSItarget */
NTC_DEFAULT, NTC_DEFAULT, NTC_DEFAULT, NTC_DEFAULT,
NTC_DEFAULT, NTC_DEFAULT, NTC_DEFAULT, NTC_DEFAULT,
NTC_DEFAULT, NTC_DEFAULT, NTC_DEFAULT, NTC_DEFAULT,
NTC_DEFAULT, NTC_DEFAULT, NTC_DEFAULT, NTC_DEFAULT,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0}; /* - CheckSum - */
static u8 initio_rate_tbl[8] = /* fast 20 */
{
/* nanosecond devide by 4 */
12, /* 50ns, 20M */
18, /* 75ns, 13.3M */
25, /* 100ns, 10M */
31, /* 125ns, 8M */
37, /* 150ns, 6.6M */
43, /* 175ns, 5.7M */
50, /* 200ns, 5M */
62 /* 250ns, 4M */
};
static void initio_do_pause(unsigned amount)
{
/* Pause for amount jiffies */
unsigned long the_time = jiffies + amount;
while (time_before_eq(jiffies, the_time))
cpu_relax();
}
/*-- forward reference --*/
/******************************************************************
Input: instruction for Serial E2PROM
EX: se2_rd(0 call se2_instr() to send address and read command
StartBit OP_Code Address Data
--------- -------- ------------------ -------
1 1 , 0 A5,A4,A3,A2,A1,A0 D15-D0
+-----------------------------------------------------
|
CS -----+
+--+ +--+ +--+ +--+ +--+
^ | ^ | ^ | ^ | ^ |
| | | | | | | | | |
CLK -------+ +--+ +--+ +--+ +--+ +--
(leading edge trigger)
+--1-----1--+
| SB OP | OP A5 A4
DI ----+ +--0------------------
(address and cmd sent to nvram)
-------------------------------------------+
|
DO +---
(data sent from nvram)
******************************************************************/
/**
* initio_se2_instr - bitbang an instruction
* @base: Base of InitIO controller
* @instr: Instruction for serial E2PROM
*
* Bitbang an instruction out to the serial E2Prom
*/
static void initio_se2_instr(unsigned long base, u8 instr)
{
int i;
u8 b;
outb(SE2CS | SE2DO, base + TUL_NVRAM); /* cs+start bit */
udelay(30);
outb(SE2CS | SE2CLK | SE2DO, base + TUL_NVRAM); /* +CLK */
udelay(30);
for (i = 0; i < 8; i++) {
if (instr & 0x80)
b = SE2CS | SE2DO; /* -CLK+dataBit */
else
b = SE2CS; /* -CLK */
outb(b, base + TUL_NVRAM);
udelay(30);
outb(b | SE2CLK, base + TUL_NVRAM); /* +CLK */
udelay(30);
instr <<= 1;
}
outb(SE2CS, base + TUL_NVRAM); /* -CLK */
udelay(30);
}
/**
* initio_se2_ew_en - Enable erase/write
* @base: Base address of InitIO controller
*
* Enable erase/write state of serial EEPROM
*/
void initio_se2_ew_en(unsigned long base)
{
initio_se2_instr(base, 0x30); /* EWEN */
outb(0, base + TUL_NVRAM); /* -CS */
udelay(30);
}
/**
* initio_se2_ew_ds - Disable erase/write
* @base: Base address of InitIO controller
*
* Disable erase/write state of serial EEPROM
*/
void initio_se2_ew_ds(unsigned long base)
{
initio_se2_instr(base, 0); /* EWDS */
outb(0, base + TUL_NVRAM); /* -CS */
udelay(30);
}
/**
* initio_se2_rd - read E2PROM word
* @base: Base of InitIO controller
* @addr: Address of word in E2PROM
*
* Read a word from the NV E2PROM device
*/
static u16 initio_se2_rd(unsigned long base, u8 addr)
{
u8 instr, rb;
u16 val = 0;
int i;
instr = (u8) (addr | 0x80);
initio_se2_instr(base, instr); /* READ INSTR */
for (i = 15; i >= 0; i--) {
outb(SE2CS | SE2CLK, base + TUL_NVRAM); /* +CLK */
udelay(30);
outb(SE2CS, base + TUL_NVRAM); /* -CLK */
/* sample data after the following edge of clock */
rb = inb(base + TUL_NVRAM);
rb &= SE2DI;
val += (rb << i);
udelay(30); /* 6/20/95 */
}
outb(0, base + TUL_NVRAM); /* no chip select */
udelay(30);
return val;
}
/**
* initio_se2_wr - read E2PROM word
* @base: Base of InitIO controller
* @addr: Address of word in E2PROM
* @val: Value to write
*
* Write a word to the NV E2PROM device. Used when recovering from
* a problem with the NV.
*/
static void initio_se2_wr(unsigned long base, u8 addr, u16 val)
{
u8 rb;
u8 instr;
int i;
instr = (u8) (addr | 0x40);
initio_se2_instr(base, instr); /* WRITE INSTR */
for (i = 15; i >= 0; i--) {
if (val & 0x8000)
outb(SE2CS | SE2DO, base + TUL_NVRAM); /* -CLK+dataBit 1 */
else
outb(SE2CS, base + TUL_NVRAM); /* -CLK+dataBit 0 */
udelay(30);
outb(SE2CS | SE2CLK, base + TUL_NVRAM); /* +CLK */
udelay(30);
val <<= 1;
}
outb(SE2CS, base + TUL_NVRAM); /* -CLK */
udelay(30);
outb(0, base + TUL_NVRAM); /* -CS */
udelay(30);
outb(SE2CS, base + TUL_NVRAM); /* +CS */
udelay(30);
for (;;) {
outb(SE2CS | SE2CLK, base + TUL_NVRAM); /* +CLK */
udelay(30);
outb(SE2CS, base + TUL_NVRAM); /* -CLK */
udelay(30);
if ((rb = inb(base + TUL_NVRAM)) & SE2DI)
break; /* write complete */
}
outb(0, base + TUL_NVRAM); /* -CS */
}
/**
* initio_se2_rd_all - read hostadapter NV configuration
* @base: Base address of InitIO controller
*
* Reads the E2PROM data into main memory. Ensures that the checksum
* and header marker are valid. Returns 1 on success -1 on error.
*/
static int initio_se2_rd_all(unsigned long base)
{
int i;
u16 chksum = 0;
u16 *np;
i91unvramp = &i91unvram;
np = (u16 *) i91unvramp;
for (i = 0; i < 32; i++)
*np++ = initio_se2_rd(base, i);
/* Is signature "ini" ok ? */
if (i91unvramp->NVM_Signature != INI_SIGNATURE)
return -1;
/* Is ckecksum ok ? */
np = (u16 *) i91unvramp;
for (i = 0; i < 31; i++)
chksum += *np++;
if (i91unvramp->NVM_CheckSum != chksum)
return -1;
return 1;
}
/**
* initio_se2_update_all - Update E2PROM
* @base: Base of InitIO controller
*
* Update the E2PROM by wrting any changes into the E2PROM
* chip, rewriting the checksum.
*/
static void initio_se2_update_all(unsigned long base)
{ /* setup default pattern */
int i;
u16 chksum = 0;
u16 *np, *np1;
i91unvramp = &i91unvram;
/* Calculate checksum first */
np = (u16 *) i91udftNvRam;
for (i = 0; i < 31; i++)
chksum += *np++;
*np = chksum;
initio_se2_ew_en(base); /* Enable write */
np = (u16 *) i91udftNvRam;
np1 = (u16 *) i91unvramp;
for (i = 0; i < 32; i++, np++, np1++) {
if (*np != *np1)
initio_se2_wr(base, i, *np);
}
initio_se2_ew_ds(base); /* Disable write */
}
/**
* initio_read_eeprom - Retrieve configuration
* @base: Base of InitIO Host Adapter
*
* Retrieve the host adapter configuration data from E2Prom. If the
* data is invalid then the defaults are used and are also restored
* into the E2PROM. This forms the access point for the SCSI driver
* into the E2PROM layer, the other functions for the E2PROM are all
* internal use.
*
* Must be called single threaded, uses a shared global area.
*/
static void initio_read_eeprom(unsigned long base)
{
u8 gctrl;
i91unvramp = &i91unvram;
/* Enable EEProm programming */
gctrl = inb(base + TUL_GCTRL);
outb(gctrl | TUL_GCTRL_EEPROM_BIT, base + TUL_GCTRL);
if (initio_se2_rd_all(base) != 1) {
initio_se2_update_all(base); /* setup default pattern */
initio_se2_rd_all(base); /* load again */
}
/* Disable EEProm programming */
gctrl = inb(base + TUL_GCTRL);
outb(gctrl & ~TUL_GCTRL_EEPROM_BIT, base + TUL_GCTRL);
}
/**
* initio_stop_bm - stop bus master
* @host: InitIO we are stopping
*
* Stop any pending DMA operation, aborting the DMA if necessary
*/
static void initio_stop_bm(struct initio_host * host)
{
if (inb(host->addr + TUL_XStatus) & XPEND) { /* if DMA xfer is pending, abort DMA xfer */
outb(TAX_X_ABT | TAX_X_CLR_FIFO, host->addr + TUL_XCmd);
/* wait Abort DMA xfer done */
while ((inb(host->addr + TUL_Int) & XABT) == 0)
cpu_relax();
}
outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0);
}
/**
* initio_reset_scsi - Reset SCSI host controller
* @host: InitIO host to reset
* @seconds: Recovery time
*
* Perform a full reset of the SCSI subsystem.
*/
static int initio_reset_scsi(struct initio_host * host, int seconds)
{
outb(TSC_RST_BUS, host->addr + TUL_SCtrl0);
while (!((host->jsint = inb(host->addr + TUL_SInt)) & TSS_SCSIRST_INT))
cpu_relax();
/* reset tulip chip */
outb(0, host->addr + TUL_SSignal);
/* Stall for a while, wait for target's firmware ready,make it 2 sec ! */
/* SONY 5200 tape drive won't work if only stall for 1 sec */
/* FIXME: this is a very long busy wait right now */
initio_do_pause(seconds * HZ);
inb(host->addr + TUL_SInt);
return SCSI_RESET_SUCCESS;
}
/**
* initio_init - set up an InitIO host adapter
* @host: InitIO host adapter
* @num_scbs: Number of SCBS
* @bios_addr: BIOS address
*
* Set up the host adapter and devices according to the configuration
* retrieved from the E2PROM.
*
* Locking: Calls E2PROM layer code which is not re-enterable so must
* run single threaded for now.
*/
static void initio_init(struct initio_host * host, u8 *bios_addr)
{
int i;
u8 *flags;
u8 *heads;
/* Get E2Prom configuration */
initio_read_eeprom(host->addr);
if (i91unvramp->NVM_SCSIInfo[0].NVM_NumOfTarg == 8)
host->max_tar = 8;
else
host->max_tar = 16;
host->config = i91unvramp->NVM_SCSIInfo[0].NVM_ChConfig1;
host->scsi_id = i91unvramp->NVM_SCSIInfo[0].NVM_ChSCSIID;
host->idmask = ~(1 << host->scsi_id);
#ifdef CHK_PARITY
/* Enable parity error response */
outb(inb(host->addr + TUL_PCMD) | 0x40, host->addr + TUL_PCMD);
#endif
/* Mask all the interrupt */
outb(0x1F, host->addr + TUL_Mask);
initio_stop_bm(host);
/* --- Initialize the tulip --- */
outb(TSC_RST_CHIP, host->addr + TUL_SCtrl0);
/* program HBA's SCSI ID */
outb(host->scsi_id << 4, host->addr + TUL_SScsiId);
/* Enable Initiator Mode ,phase latch,alternate sync period mode,
disable SCSI reset */
if (host->config & HCC_EN_PAR)
host->sconf1 = (TSC_INITDEFAULT | TSC_EN_SCSI_PAR);
else
host->sconf1 = (TSC_INITDEFAULT);
outb(host->sconf1, host->addr + TUL_SConfig);
/* Enable HW reselect */
outb(TSC_HW_RESELECT, host->addr + TUL_SCtrl1);
outb(0, host->addr + TUL_SPeriod);
/* selection time out = 250 ms */
outb(153, host->addr + TUL_STimeOut);
/* Enable SCSI terminator */
outb((host->config & (HCC_ACT_TERM1 | HCC_ACT_TERM2)),
host->addr + TUL_XCtrl);
outb(((host->config & HCC_AUTO_TERM) >> 4) |
(inb(host->addr + TUL_GCTRL1) & 0xFE),
host->addr + TUL_GCTRL1);
for (i = 0,
flags = & (i91unvramp->NVM_SCSIInfo[0].NVM_Targ0Config),
heads = bios_addr + 0x180;
i < host->max_tar;
i++, flags++) {
host->targets[i].flags = *flags & ~(TCF_SYNC_DONE | TCF_WDTR_DONE);
if (host->targets[i].flags & TCF_EN_255)
host->targets[i].drv_flags = TCF_DRV_255_63;
else
host->targets[i].drv_flags = 0;
host->targets[i].js_period = 0;
host->targets[i].sconfig0 = host->sconf1;
host->targets[i].heads = *heads++;
if (host->targets[i].heads == 255)
host->targets[i].drv_flags = TCF_DRV_255_63;
else
host->targets[i].drv_flags = 0;
host->targets[i].sectors = *heads++;
host->targets[i].flags &= ~TCF_BUSY;
host->act_tags[i] = 0;
host->max_tags[i] = 0xFF;
} /* for */
printk("i91u: PCI Base=0x%04X, IRQ=%d, BIOS=0x%04X0, SCSI ID=%d\n",
host->addr, host->pci_dev->irq,
host->bios_addr, host->scsi_id);
/* Reset SCSI Bus */
if (host->config & HCC_SCSI_RESET) {
printk(KERN_INFO "i91u: Reset SCSI Bus ... \n");
initio_reset_scsi(host, 10);
}
outb(0x17, host->addr + TUL_SCFG1);
outb(0xE9, host->addr + TUL_SIntEnable);
}
/**
* initio_alloc_scb - Allocate an SCB
* @host: InitIO host we are allocating for
*
* Walk the SCB list for the controller and allocate a free SCB if
* one exists.
*/
static struct scsi_ctrl_blk *initio_alloc_scb(struct initio_host *host)
{
struct scsi_ctrl_blk *scb;
unsigned long flags;
spin_lock_irqsave(&host->avail_lock, flags);
if ((scb = host->first_avail) != NULL) {
#if DEBUG_QUEUE
printk("find scb at %p\n", scb);
#endif
if ((host->first_avail = scb->next) == NULL)
host->last_avail = NULL;
scb->next = NULL;
scb->status = SCB_RENT;
}
spin_unlock_irqrestore(&host->avail_lock, flags);
return scb;
}
/**
* initio_release_scb - Release an SCB
* @host: InitIO host that owns the SCB
* @cmnd: SCB command block being returned
*
* Return an allocated SCB to the host free list
*/
static void initio_release_scb(struct initio_host * host, struct scsi_ctrl_blk * cmnd)
{
unsigned long flags;
#if DEBUG_QUEUE
printk("Release SCB %p; ", cmnd);
#endif
spin_lock_irqsave(&(host->avail_lock), flags);
cmnd->srb = NULL;
cmnd->status = 0;
cmnd->next = NULL;
if (host->last_avail != NULL) {
host->last_avail->next = cmnd;
host->last_avail = cmnd;
} else {
host->first_avail = cmnd;
host->last_avail = cmnd;
}
spin_unlock_irqrestore(&(host->avail_lock), flags);
}
/***************************************************************************/
static void initio_append_pend_scb(struct initio_host * host, struct scsi_ctrl_blk * scbp)
{
#if DEBUG_QUEUE
printk("Append pend SCB %p; ", scbp);
#endif
scbp->status = SCB_PEND;
scbp->next = NULL;
if (host->last_pending != NULL) {
host->last_pending->next = scbp;
host->last_pending = scbp;
} else {
host->first_pending = scbp;
host->last_pending = scbp;
}
}
/***************************************************************************/
static void initio_push_pend_scb(struct initio_host * host, struct scsi_ctrl_blk * scbp)
{
#if DEBUG_QUEUE
printk("Push pend SCB %p; ", scbp);
#endif
scbp->status = SCB_PEND;
if ((scbp->next = host->first_pending) != NULL) {
host->first_pending = scbp;
} else {
host->first_pending = scbp;
host->last_pending = scbp;
}
}
static struct scsi_ctrl_blk *initio_find_first_pend_scb(struct initio_host * host)
{
struct scsi_ctrl_blk *first;
first = host->first_pending;
while (first != NULL) {
if (first->opcode != ExecSCSI)
return first;
if (first->tagmsg == 0) {
if ((host->act_tags[first->target] == 0) &&
!(host->targets[first->target].flags & TCF_BUSY))
return first;
} else {
if ((host->act_tags[first->target] >=
host->max_tags[first->target]) |
(host->targets[first->target].flags & TCF_BUSY)) {
first = first->next;
continue;
}
return first;
}
first = first->next;
}
return first;
}
static void initio_unlink_pend_scb(struct initio_host * host, struct scsi_ctrl_blk * scb)
{
struct scsi_ctrl_blk *tmp, *prev;
#if DEBUG_QUEUE
printk("unlink pend SCB %p; ", scb);
#endif
prev = tmp = host->first_pending;
while (tmp != NULL) {
if (scb == tmp) { /* Unlink this SCB */
if (tmp == host->first_pending) {
if ((host->first_pending = tmp->next) == NULL)
host->last_pending = NULL;
} else {
prev->next = tmp->next;
if (tmp == host->last_pending)
host->last_pending = prev;
}
tmp->next = NULL;
break;
}
prev = tmp;
tmp = tmp->next;
}
}
static void initio_append_busy_scb(struct initio_host * host, struct scsi_ctrl_blk * scbp)
{
#if DEBUG_QUEUE
printk("append busy SCB %p; ", scbp);
#endif
if (scbp->tagmsg)
host->act_tags[scbp->target]++;
else
host->targets[scbp->target].flags |= TCF_BUSY;
scbp->status = SCB_BUSY;
scbp->next = NULL;
if (host->last_busy != NULL) {
host->last_busy->next = scbp;
host->last_busy = scbp;
} else {
host->first_busy = scbp;
host->last_busy = scbp;
}
}
/***************************************************************************/
static struct scsi_ctrl_blk *initio_pop_busy_scb(struct initio_host * host)
{
struct scsi_ctrl_blk *tmp;
if ((tmp = host->first_busy) != NULL) {
if ((host->first_busy = tmp->next) == NULL)
host->last_busy = NULL;
tmp->next = NULL;
if (tmp->tagmsg)
host->act_tags[tmp->target]--;
else
host->targets[tmp->target].flags &= ~TCF_BUSY;
}
#if DEBUG_QUEUE
printk("Pop busy SCB %p; ", tmp);
#endif
return tmp;
}
/***************************************************************************/
static void initio_unlink_busy_scb(struct initio_host * host, struct scsi_ctrl_blk * scb)
{
struct scsi_ctrl_blk *tmp, *prev;
#if DEBUG_QUEUE
printk("unlink busy SCB %p; ", scb);
#endif
prev = tmp = host->first_busy;
while (tmp != NULL) {
if (scb == tmp) { /* Unlink this SCB */
if (tmp == host->first_busy) {
if ((host->first_busy = tmp->next) == NULL)
host->last_busy = NULL;
} else {
prev->next = tmp->next;
if (tmp == host->last_busy)
host->last_busy = prev;
}
tmp->next = NULL;
if (tmp->tagmsg)
host->act_tags[tmp->target]--;
else
host->targets[tmp->target].flags &= ~TCF_BUSY;
break;
}
prev = tmp;
tmp = tmp->next;
}
return;
}
struct scsi_ctrl_blk *initio_find_busy_scb(struct initio_host * host, u16 tarlun)
{
struct scsi_ctrl_blk *tmp, *prev;
u16 scbp_tarlun;
prev = tmp = host->first_busy;
while (tmp != NULL) {
scbp_tarlun = (tmp->lun << 8) | (tmp->target);
if (scbp_tarlun == tarlun) { /* Unlink this SCB */
break;
}
prev = tmp;
tmp = tmp->next;
}
#if DEBUG_QUEUE
printk("find busy SCB %p; ", tmp);
#endif
return tmp;
}
static void initio_append_done_scb(struct initio_host * host, struct scsi_ctrl_blk * scbp)
{
#if DEBUG_QUEUE
printk("append done SCB %p; ", scbp);
#endif
scbp->status = SCB_DONE;
scbp->next = NULL;
if (host->last_done != NULL) {
host->last_done->next = scbp;
host->last_done = scbp;
} else {
host->first_done = scbp;
host->last_done = scbp;
}
}
struct scsi_ctrl_blk *initio_find_done_scb(struct initio_host * host)
{
struct scsi_ctrl_blk *tmp;
if ((tmp = host->first_done) != NULL) {
if ((host->first_done = tmp->next) == NULL)
host->last_done = NULL;
tmp->next = NULL;
}
#if DEBUG_QUEUE
printk("find done SCB %p; ",tmp);
#endif
return tmp;
}
static int initio_abort_srb(struct initio_host * host, struct scsi_cmnd *srbp)
{
unsigned long flags;
struct scsi_ctrl_blk *tmp, *prev;
spin_lock_irqsave(&host->semaph_lock, flags);
if ((host->semaph == 0) && (host->active == NULL)) {
/* disable Jasmin SCSI Int */
outb(0x1F, host->addr + TUL_Mask);
spin_unlock_irqrestore(&host->semaph_lock, flags);
/* FIXME: synchronize_irq needed ? */
tulip_main(host);
spin_lock_irqsave(&host->semaph_lock, flags);
host->semaph = 1;
outb(0x0F, host->addr + TUL_Mask);
spin_unlock_irqrestore(&host->semaph_lock, flags);
return SCSI_ABORT_SNOOZE;
}
prev = tmp = host->first_pending; /* Check Pend queue */
while (tmp != NULL) {
/* 07/27/98 */
if (tmp->srb == srbp) {
if (tmp == host->active) {
spin_unlock_irqrestore(&host->semaph_lock, flags);
return SCSI_ABORT_BUSY;
} else if (tmp == host->first_pending) {
if ((host->first_pending = tmp->next) == NULL)
host->last_pending = NULL;
} else {
prev->next = tmp->next;
if (tmp == host->last_pending)
host->last_pending = prev;
}
tmp->hastat = HOST_ABORTED;
tmp->flags |= SCF_DONE;
if (tmp->flags & SCF_POST)
(*tmp->post) ((u8 *) host, (u8 *) tmp);
spin_unlock_irqrestore(&host->semaph_lock, flags);
return SCSI_ABORT_SUCCESS;
}
prev = tmp;
tmp = tmp->next;
}
prev = tmp = host->first_busy; /* Check Busy queue */
while (tmp != NULL) {
if (tmp->srb == srbp) {
if (tmp == host->active) {
spin_unlock_irqrestore(&host->semaph_lock, flags);
return SCSI_ABORT_BUSY;
} else if (tmp->tagmsg == 0) {
spin_unlock_irqrestore(&host->semaph_lock, flags);
return SCSI_ABORT_BUSY;
} else {
host->act_tags[tmp->target]--;
if (tmp == host->first_busy) {
if ((host->first_busy = tmp->next) == NULL)
host->last_busy = NULL;
} else {
prev->next = tmp->next;
if (tmp == host->last_busy)
host->last_busy = prev;
}
tmp->next = NULL;
tmp->hastat = HOST_ABORTED;
tmp->flags |= SCF_DONE;
if (tmp->flags & SCF_POST)
(*tmp->post) ((u8 *) host, (u8 *) tmp);
spin_unlock_irqrestore(&host->semaph_lock, flags);
return SCSI_ABORT_SUCCESS;
}
}
prev = tmp;
tmp = tmp->next;
}
spin_unlock_irqrestore(&host->semaph_lock, flags);
return SCSI_ABORT_NOT_RUNNING;
}
/***************************************************************************/
static int initio_bad_seq(struct initio_host * host)
{
struct scsi_ctrl_blk *scb;
printk("initio_bad_seg c=%d\n", host->index);
if ((scb = host->active) != NULL) {
initio_unlink_busy_scb(host, scb);
scb->hastat = HOST_BAD_PHAS;
scb->tastat = 0;
initio_append_done_scb(host, scb);
}
initio_stop_bm(host);
initio_reset_scsi(host, 8); /* 7/29/98 */
return initio_post_scsi_rst(host);
}
/************************************************************************/
static void initio_exec_scb(struct initio_host * host, struct scsi_ctrl_blk * scb)
{
unsigned long flags;
scb->mode = 0;
scb->sgidx = 0;
scb->sgmax = scb->sglen;
spin_lock_irqsave(&host->semaph_lock, flags);
initio_append_pend_scb(host, scb); /* Append this SCB to Pending queue */
/* VVVVV 07/21/98 */
if (host->semaph == 1) {
/* Disable Jasmin SCSI Int */
outb(0x1F, host->addr + TUL_Mask);
host->semaph = 0;
spin_unlock_irqrestore(&host->semaph_lock, flags);
tulip_main(host);
spin_lock_irqsave(&host->semaph_lock, flags);
host->semaph = 1;
outb(0x0F, host->addr + TUL_Mask);
}
spin_unlock_irqrestore(&host->semaph_lock, flags);
return;
}
/***************************************************************************/
static int initio_isr(struct initio_host * host)
{
if (inb(host->addr + TUL_Int) & TSS_INT_PENDING) {
if (host->semaph == 1) {
outb(0x1F, host->addr + TUL_Mask);
/* Disable Tulip SCSI Int */
host->semaph = 0;
tulip_main(host);
host->semaph = 1;
outb(0x0F, host->addr + TUL_Mask);
return 1;
}
}
return 0;
}
static int tulip_main(struct initio_host * host)
{
struct scsi_ctrl_blk *scb;
for (;;) {
tulip_scsi(host); /* Call tulip_scsi */
/* Walk the list of completed SCBs */
while ((scb = initio_find_done_scb(host)) != NULL) { /* find done entry */
if (scb->tastat == INI_QUEUE_FULL) {
host->max_tags[scb->target] =
host->act_tags[scb->target] - 1;
scb->tastat = 0;
initio_append_pend_scb(host, scb);
continue;
}
if (!(scb->mode & SCM_RSENS)) { /* not in auto req. sense mode */
if (scb->tastat == 2) {
/* clr sync. nego flag */
if (scb->flags & SCF_SENSE) {
u8 len;
len = scb->senselen;
if (len == 0)
len = 1;
scb->buflen = scb->senselen;
scb->bufptr = scb->senseptr;
scb->flags &= ~(SCF_SG | SCF_DIR); /* for xfer_data_in */
/* so, we won't report wrong direction in xfer_data_in,
and won't report HOST_DO_DU in state_6 */
scb->mode = SCM_RSENS;
scb->ident &= 0xBF; /* Disable Disconnect */
scb->tagmsg = 0;
scb->tastat = 0;
scb->cdblen = 6;
scb->cdb[0] = SCSICMD_RequestSense;
scb->cdb[1] = 0;
scb->cdb[2] = 0;
scb->cdb[3] = 0;
scb->cdb[4] = len;
scb->cdb[5] = 0;
initio_push_pend_scb(host, scb);
break;
}
}
} else { /* in request sense mode */
if (scb->tastat == 2) { /* check contition status again after sending
requset sense cmd 0x3 */
scb->hastat = HOST_BAD_PHAS;
}
scb->tastat = 2;
}
scb->flags |= SCF_DONE;
if (scb->flags & SCF_POST) {
/* FIXME: only one post method and lose casts */
(*scb->post) ((u8 *) host, (u8 *) scb);
}
} /* while */
/* find_active: */
if (inb(host->addr + TUL_SStatus0) & TSS_INT_PENDING)
continue;
if (host->active) /* return to OS and wait for xfer_done_ISR/Selected_ISR */
return 1; /* return to OS, enable interrupt */
/* Check pending SCB */
if (initio_find_first_pend_scb(host) == NULL)
return 1; /* return to OS, enable interrupt */
} /* End of for loop */
/* statement won't reach here */
}
static void tulip_scsi(struct initio_host * host)
{
struct scsi_ctrl_blk *scb;
struct target_control *active_tc;
/* make sure to service interrupt asap */
if ((host->jsstatus0 = inb(host->addr + TUL_SStatus0)) & TSS_INT_PENDING) {
host->phase = host->jsstatus0 & TSS_PH_MASK;
host->jsstatus1 = inb(host->addr + TUL_SStatus1);
host->jsint = inb(host->addr + TUL_SInt);
if (host->jsint & TSS_SCSIRST_INT) { /* SCSI bus reset detected */
int_initio_scsi_rst(host);
return;
}
if (host->jsint & TSS_RESEL_INT) { /* if selected/reselected interrupt */
if (int_initio_resel(host) == 0)
initio_next_state(host);
return;
}
if (host->jsint & TSS_SEL_TIMEOUT) {
int_initio_busfree(host);
return;
}
if (host->jsint & TSS_DISC_INT) { /* BUS disconnection */
int_initio_busfree(host); /* unexpected bus free or sel timeout */
return;
}
if (host->jsint & (TSS_FUNC_COMP | TSS_BUS_SERV)) { /* func complete or Bus service */
if ((scb = host->active) != NULL)
initio_next_state(host);
return;
}
}
if (host->active != NULL)
return;
if ((scb = initio_find_first_pend_scb(host)) == NULL)
return;
/* program HBA's SCSI ID & target SCSI ID */
outb((host->scsi_id << 4) | (scb->target & 0x0F),
host->addr + TUL_SScsiId);
if (scb->opcode == ExecSCSI) {
active_tc = &host->targets[scb->target];
if (scb->tagmsg)
active_tc->drv_flags |= TCF_DRV_EN_TAG;
else
active_tc->drv_flags &= ~TCF_DRV_EN_TAG;
outb(active_tc->js_period, host->addr + TUL_SPeriod);
if ((active_tc->flags & (TCF_WDTR_DONE | TCF_NO_WDTR)) == 0) { /* do wdtr negotiation */
initio_select_atn_stop(host, scb);
} else {
if ((active_tc->flags & (TCF_SYNC_DONE | TCF_NO_SYNC_NEGO)) == 0) { /* do sync negotiation */
initio_select_atn_stop(host, scb);
} else {
if (scb->tagmsg)
initio_select_atn3(host, scb);
else
initio_select_atn(host, scb);
}
}
if (scb->flags & SCF_POLL) {
while (wait_tulip(host) != -1) {
if (initio_next_state(host) == -1)
break;
}
}
} else if (scb->opcode == BusDevRst) {
initio_select_atn_stop(host, scb);
scb->next_state = 8;
if (scb->flags & SCF_POLL) {
while (wait_tulip(host) != -1) {
if (initio_next_state(host) == -1)
break;
}
}
} else if (scb->opcode == AbortCmd) {
if (initio_abort_srb(host, scb->srb) != 0) {
initio_unlink_pend_scb(host, scb);
initio_release_scb(host, scb);
} else {
scb->opcode = BusDevRst;
initio_select_atn_stop(host, scb);
scb->next_state = 8;
}
} else {
initio_unlink_pend_scb(host, scb);
scb->hastat = 0x16; /* bad command */
initio_append_done_scb(host, scb);
}
return;
}
/**
* initio_next_state - Next SCSI state
* @host: InitIO host we are processing
*
* Progress the active command block along the state machine
* until we hit a state which we must wait for activity to occur.
*
* Returns zero or a negative code.
*/
static int initio_next_state(struct initio_host * host)
{
int next;
next = host->active->next_state;
for (;;) {
switch (next) {
case 1:
next = initio_state_1(host);
break;
case 2:
next = initio_state_2(host);
break;
case 3:
next = initio_state_3(host);
break;
case 4:
next = initio_state_4(host);
break;
case 5:
next = initio_state_5(host);
break;
case 6:
next = initio_state_6(host);
break;
case 7:
next = initio_state_7(host);
break;
case 8:
return initio_bus_device_reset(host);
default:
return initio_bad_seq(host);
}
if (next <= 0)
return next;
}
}
/**
* initio_state_1 - SCSI state machine
* @host: InitIO host we are controlling
*
* Perform SCSI state processing for Select/Attention/Stop
*/
static int initio_state_1(struct initio_host * host)
{
struct scsi_ctrl_blk *scb = host->active;
struct target_control *active_tc = host->active_tc;
#if DEBUG_STATE
printk("-s1-");
#endif
/* Move the SCB from pending to busy */
initio_unlink_pend_scb(host, scb);
initio_append_busy_scb(host, scb);
outb(active_tc->sconfig0, host->addr + TUL_SConfig );
/* ATN on */
if (host->phase == MSG_OUT) {
outb(TSC_EN_BUS_IN | TSC_HW_RESELECT, host->addr + TUL_SCtrl1);
outb(scb->ident, host->addr + TUL_SFifo);
if (scb->tagmsg) {
outb(scb->tagmsg, host->addr + TUL_SFifo);
outb(scb->tagid, host->addr + TUL_SFifo);
}
if ((active_tc->flags & (TCF_WDTR_DONE | TCF_NO_WDTR)) == 0) {
active_tc->flags |= TCF_WDTR_DONE;
outb(MSG_EXTEND, host->addr + TUL_SFifo);
outb(2, host->addr + TUL_SFifo); /* Extended msg length */
outb(3, host->addr + TUL_SFifo); /* Sync request */
outb(1, host->addr + TUL_SFifo); /* Start from 16 bits */
} else if ((active_tc->flags & (TCF_SYNC_DONE | TCF_NO_SYNC_NEGO)) == 0) {
active_tc->flags |= TCF_SYNC_DONE;
outb(MSG_EXTEND, host->addr + TUL_SFifo);
outb(3, host->addr + TUL_SFifo); /* extended msg length */
outb(1, host->addr + TUL_SFifo); /* sync request */
outb(initio_rate_tbl[active_tc->flags & TCF_SCSI_RATE], host->addr + TUL_SFifo);
outb(MAX_OFFSET, host->addr + TUL_SFifo); /* REQ/ACK offset */
}
outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
if (wait_tulip(host) == -1)
return -1;
}
outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0);
outb((inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7)), host->addr + TUL_SSignal);
/* Into before CDB xfer */
return 3;
}
/**
* initio_state_2 - SCSI state machine
* @host: InitIO host we are controlling
*
* state after selection with attention
* state after selection with attention3
*/
static int initio_state_2(struct initio_host * host)
{
struct scsi_ctrl_blk *scb = host->active;
struct target_control *active_tc = host->active_tc;
#if DEBUG_STATE
printk("-s2-");
#endif
initio_unlink_pend_scb(host, scb);
initio_append_busy_scb(host, scb);
outb(active_tc->sconfig0, host->addr + TUL_SConfig);
if (host->jsstatus1 & TSS_CMD_PH_CMP)
return 4;
outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0);
outb((inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7)), host->addr + TUL_SSignal);
/* Into before CDB xfer */
return 3;
}
/**
* initio_state_3 - SCSI state machine
* @host: InitIO host we are controlling
*
* state before CDB xfer is done
*/
static int initio_state_3(struct initio_host * host)
{
struct scsi_ctrl_blk *scb = host->active;
struct target_control *active_tc = host->active_tc;
int i;
#if DEBUG_STATE
printk("-s3-");
#endif
for (;;) {
switch (host->phase) {
case CMD_OUT: /* Command out phase */
for (i = 0; i < (int) scb->cdblen; i++)
outb(scb->cdb[i], host->addr + TUL_SFifo);
outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
if (wait_tulip(host) == -1)
return -1;
if (host->phase == CMD_OUT)
return initio_bad_seq(host);
return 4;
case MSG_IN: /* Message in phase */
scb->next_state = 3;
if (initio_msgin(host) == -1)
return -1;
break;
case STATUS_IN: /* Status phase */
if (initio_status_msg(host) == -1)
return -1;
break;
case MSG_OUT: /* Message out phase */
if (active_tc->flags & (TCF_SYNC_DONE | TCF_NO_SYNC_NEGO)) {
outb(MSG_NOP, host->addr + TUL_SFifo); /* msg nop */
outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
if (wait_tulip(host) == -1)
return -1;
} else {
active_tc->flags |= TCF_SYNC_DONE;
outb(MSG_EXTEND, host->addr + TUL_SFifo);
outb(3, host->addr + TUL_SFifo); /* ext. msg len */
outb(1, host->addr + TUL_SFifo); /* sync request */
outb(initio_rate_tbl[active_tc->flags & TCF_SCSI_RATE], host->addr + TUL_SFifo);
outb(MAX_OFFSET, host->addr + TUL_SFifo); /* REQ/ACK offset */
outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
if (wait_tulip(host) == -1)
return -1;
outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0);
outb(inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7), host->addr + TUL_SSignal);
}
break;
default:
return initio_bad_seq(host);
}
}
}
/**
* initio_state_4 - SCSI state machine
* @host: InitIO host we are controlling
*
* SCSI state machine. State 4
*/
static int initio_state_4(struct initio_host * host)
{
struct scsi_ctrl_blk *scb = host->active;
#if DEBUG_STATE
printk("-s4-");
#endif
if ((scb->flags & SCF_DIR) == SCF_NO_XF) {
return 6; /* Go to state 6 (After data) */
}
for (;;) {
if (scb->buflen == 0)
return 6;
switch (host->phase) {
case STATUS_IN: /* Status phase */
if ((scb->flags & SCF_DIR) != 0) /* if direction bit set then report data underrun */
scb->hastat = HOST_DO_DU;
if ((initio_status_msg(host)) == -1)
return -1;
break;
case MSG_IN: /* Message in phase */
scb->next_state = 0x4;
if (initio_msgin(host) == -1)
return -1;
break;
case MSG_OUT: /* Message out phase */
if (host->jsstatus0 & TSS_PAR_ERROR) {
scb->buflen = 0;
scb->hastat = HOST_DO_DU;
if (initio_msgout_ide(host) == -1)
return -1;
return 6;
} else {
outb(MSG_NOP, host->addr + TUL_SFifo); /* msg nop */
outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
if (wait_tulip(host) == -1)
return -1;
}
break;
case DATA_IN: /* Data in phase */
return initio_xfer_data_in(host);
case DATA_OUT: /* Data out phase */
return initio_xfer_data_out(host);
default:
return initio_bad_seq(host);
}
}
}
/**
* initio_state_5 - SCSI state machine
* @host: InitIO host we are controlling
*
* State after dma xfer done or phase change before xfer done
*/
static int initio_state_5(struct initio_host * host)
{
struct scsi_ctrl_blk *scb = host->active;
long cnt, xcnt; /* cannot use unsigned !! code: if (xcnt < 0) */
#if DEBUG_STATE
printk("-s5-");
#endif
/*------ get remaining count -------*/
cnt = inl(host->addr + TUL_SCnt0) & 0x0FFFFFF;
if (inb(host->addr + TUL_XCmd) & 0x20) {
/* ----------------------- DATA_IN ----------------------------- */
/* check scsi parity error */
if (host->jsstatus0 & TSS_PAR_ERROR)
scb->hastat = HOST_DO_DU;
if (inb(host->addr + TUL_XStatus) & XPEND) { /* DMA xfer pending, Send STOP */
/* tell Hardware scsi xfer has been terminated */
outb(inb(host->addr + TUL_XCtrl) | 0x80, host->addr + TUL_XCtrl);
/* wait until DMA xfer not pending */
while (inb(host->addr + TUL_XStatus) & XPEND)
cpu_relax();
}
} else {
/*-------- DATA OUT -----------*/
if ((inb(host->addr + TUL_SStatus1) & TSS_XFER_CMP) == 0) {
if (host->active_tc->js_period & TSC_WIDE_SCSI)
cnt += (inb(host->addr + TUL_SFifoCnt) & 0x1F) << 1;
else
cnt += (inb(host->addr + TUL_SFifoCnt) & 0x1F);
}
if (inb(host->addr + TUL_XStatus) & XPEND) { /* if DMA xfer is pending, abort DMA xfer */
outb(TAX_X_ABT, host->addr + TUL_XCmd);
/* wait Abort DMA xfer done */
while ((inb(host->addr + TUL_Int) & XABT) == 0)
cpu_relax();
}
if ((cnt == 1) && (host->phase == DATA_OUT)) {
outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
if (wait_tulip(host) == -1)
return -1;
cnt = 0;
} else {
if ((inb(host->addr + TUL_SStatus1) & TSS_XFER_CMP) == 0)
outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0);
}
}
if (cnt == 0) {
scb->buflen = 0;
return 6; /* After Data */
}
/* Update active data pointer */
xcnt = (long) scb->buflen - cnt; /* xcnt== bytes already xferred */
scb->buflen = (u32) cnt; /* cnt == bytes left to be xferred */
if (scb->flags & SCF_SG) {
struct sg_entry *sgp;
unsigned long i;
sgp = &scb->sglist[scb->sgidx];
for (i = scb->sgidx; i < scb->sgmax; sgp++, i++) {
xcnt -= (long) sgp->len;
if (xcnt < 0) { /* this sgp xfer half done */
xcnt += (long) sgp->len; /* xcnt == bytes xferred in this sgp */
sgp->data += (u32) xcnt; /* new ptr to be xfer */
sgp->len -= (u32) xcnt; /* new len to be xfer */
scb->bufptr += ((u32) (i - scb->sgidx) << 3);
/* new SG table ptr */
scb->sglen = (u8) (scb->sgmax - i);
/* new SG table len */
scb->sgidx = (u16) i;
/* for next disc and come in this loop */
return 4; /* Go to state 4 */
}
/* else (xcnt >= 0 , i.e. this sgp already xferred */
} /* for */
return 6; /* Go to state 6 */
} else {
scb->bufptr += (u32) xcnt;
}
return 4; /* Go to state 4 */
}
/**
* initio_state_6 - SCSI state machine
* @host: InitIO host we are controlling
*
* State after Data phase
*/
static int initio_state_6(struct initio_host * host)
{
struct scsi_ctrl_blk *scb = host->active;
#if DEBUG_STATE
printk("-s6-");
#endif
for (;;) {
switch (host->phase) {
case STATUS_IN: /* Status phase */
if ((initio_status_msg(host)) == -1)
return -1;
break;
case MSG_IN: /* Message in phase */
scb->next_state = 6;
if ((initio_msgin(host)) == -1)
return -1;
break;
case MSG_OUT: /* Message out phase */
outb(MSG_NOP, host->addr + TUL_SFifo); /* msg nop */
outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
if (wait_tulip(host) == -1)
return -1;
break;
case DATA_IN: /* Data in phase */
return initio_xpad_in(host);
case DATA_OUT: /* Data out phase */
return initio_xpad_out(host);
default:
return initio_bad_seq(host);
}
}
}
/**
* initio_state_7 - SCSI state machine
* @host: InitIO host we are controlling
*
*/
int initio_state_7(struct initio_host * host)
{
int cnt, i;
#if DEBUG_STATE
printk("-s7-");
#endif
/* flush SCSI FIFO */
cnt = inb(host->addr + TUL_SFifoCnt) & 0x1F;
if (cnt) {
for (i = 0; i < cnt; i++)
inb(host->addr + TUL_SFifo);
}
switch (host->phase) {
case DATA_IN: /* Data in phase */
case DATA_OUT: /* Data out phase */
return initio_bad_seq(host);
default:
return 6; /* Go to state 6 */
}
}
/**
* initio_xfer_data_in - Commence data input
* @host: InitIO host in use
*
* Commence a block of data transfer. The transfer itself will
* be managed by the controller and we will get a completion (or
* failure) interrupt.
*/
static int initio_xfer_data_in(struct initio_host * host)
{
struct scsi_ctrl_blk *scb = host->active;
if ((scb->flags & SCF_DIR) == SCF_DOUT)
return 6; /* wrong direction */
outl(scb->buflen, host->addr + TUL_SCnt0);
outb(TSC_XF_DMA_IN, host->addr + TUL_SCmd); /* 7/25/95 */
if (scb->flags & SCF_SG) { /* S/G xfer */
outl(((u32) scb->sglen) << 3, host->addr + TUL_XCntH);
outl(scb->bufptr, host->addr + TUL_XAddH);
outb(TAX_SG_IN, host->addr + TUL_XCmd);
} else {
outl(scb->buflen, host->addr + TUL_XCntH);
outl(scb->bufptr, host->addr + TUL_XAddH);
outb(TAX_X_IN, host->addr + TUL_XCmd);
}
scb->next_state = 0x5;
return 0; /* return to OS, wait xfer done , let jas_isr come in */
}
/**
* initio_xfer_data_out - Commence data output
* @host: InitIO host in use
*
* Commence a block of data transfer. The transfer itself will
* be managed by the controller and we will get a completion (or
* failure) interrupt.
*/
static int initio_xfer_data_out(struct initio_host * host)
{
struct scsi_ctrl_blk *scb = host->active;
if ((scb->flags & SCF_DIR) == SCF_DIN)
return 6; /* wrong direction */
outl(scb->buflen, host->addr + TUL_SCnt0);
outb(TSC_XF_DMA_OUT, host->addr + TUL_SCmd);
if (scb->flags & SCF_SG) { /* S/G xfer */
outl(((u32) scb->sglen) << 3, host->addr + TUL_XCntH);
outl(scb->bufptr, host->addr + TUL_XAddH);
outb(TAX_SG_OUT, host->addr + TUL_XCmd);
} else {
outl(scb->buflen, host->addr + TUL_XCntH);
outl(scb->bufptr, host->addr + TUL_XAddH);
outb(TAX_X_OUT, host->addr + TUL_XCmd);
}
scb->next_state = 0x5;
return 0; /* return to OS, wait xfer done , let jas_isr come in */
}
int initio_xpad_in(struct initio_host * host)
{
struct scsi_ctrl_blk *scb = host->active;
struct target_control *active_tc = host->active_tc;
if ((scb->flags & SCF_DIR) != SCF_NO_DCHK)
scb->hastat = HOST_DO_DU; /* over run */
for (;;) {
if (active_tc->js_period & TSC_WIDE_SCSI)
outl(2, host->addr + TUL_SCnt0);
else
outl(1, host->addr + TUL_SCnt0);
outb(TSC_XF_FIFO_IN, host->addr + TUL_SCmd);
if (wait_tulip(host) == -1)
return -1;
if (host->phase != DATA_IN) {
outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0);
return 6;
}
inb(host->addr + TUL_SFifo);
}
}
int initio_xpad_out(struct initio_host * host)
{
struct scsi_ctrl_blk *scb = host->active;
struct target_control *active_tc = host->active_tc;
if ((scb->flags & SCF_DIR) != SCF_NO_DCHK)
scb->hastat = HOST_DO_DU; /* over run */
for (;;) {
if (active_tc->js_period & TSC_WIDE_SCSI)
outl(2, host->addr + TUL_SCnt0);
else
outl(1, host->addr + TUL_SCnt0);
outb(0, host->addr + TUL_SFifo);
outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
if ((wait_tulip(host)) == -1)
return -1;
if (host->phase != DATA_OUT) { /* Disable wide CPU to allow read 16 bits */
outb(TSC_HW_RESELECT, host->addr + TUL_SCtrl1);
outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0);
return 6;
}
}
}
int initio_status_msg(struct initio_host * host)
{ /* status & MSG_IN */
struct scsi_ctrl_blk *scb = host->active;
u8 msg;
outb(TSC_CMD_COMP, host->addr + TUL_SCmd);
if (wait_tulip(host) == -1)
return -1;
/* get status */
scb->tastat = inb(host->addr + TUL_SFifo);
if (host->phase == MSG_OUT) {
if (host->jsstatus0 & TSS_PAR_ERROR)
outb(MSG_PARITY, host->addr + TUL_SFifo);
else
outb(MSG_NOP, host->addr + TUL_SFifo);
outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
return wait_tulip(host);
}
if (host->phase == MSG_IN) {
msg = inb(host->addr + TUL_SFifo);
if (host->jsstatus0 & TSS_PAR_ERROR) { /* Parity error */
if ((initio_msgin_accept(host)) == -1)
return -1;
if (host->phase != MSG_OUT)
return initio_bad_seq(host);
outb(MSG_PARITY, host->addr + TUL_SFifo);
outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
return wait_tulip(host);
}
if (msg == 0) { /* Command complete */
if ((scb->tastat & 0x18) == 0x10) /* No link support */
return initio_bad_seq(host);
outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0);
outb(TSC_MSG_ACCEPT, host->addr + TUL_SCmd);
return initio_wait_done_disc(host);
}
if (msg == MSG_LINK_COMP || msg == MSG_LINK_FLAG) {
if ((scb->tastat & 0x18) == 0x10)
return initio_msgin_accept(host);
}
}
return initio_bad_seq(host);
}
/* scsi bus free */
int int_initio_busfree(struct initio_host * host)
{
struct scsi_ctrl_blk *scb = host->active;
if (scb != NULL) {
if (scb->status & SCB_SELECT) { /* selection timeout */
initio_unlink_pend_scb(host, scb);
scb->hastat = HOST_SEL_TOUT;
initio_append_done_scb(host, scb);
} else { /* Unexpected bus free */
initio_unlink_busy_scb(host, scb);
scb->hastat = HOST_BUS_FREE;
initio_append_done_scb(host, scb);
}
host->active = NULL;
host->active_tc = NULL;
}
outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0); /* Flush SCSI FIFO */
outb(TSC_INITDEFAULT, host->addr + TUL_SConfig);
outb(TSC_HW_RESELECT, host->addr + TUL_SCtrl1); /* Enable HW reselect */
return -1;
}
/**
* int_initio_scsi_rst - SCSI reset occurred
* @host: Host seeing the reset
*
* A SCSI bus reset has occurred. Clean up any pending transfer
* the hardware is doing by DMA and then abort all active and
* disconnected commands. The mid layer should sort the rest out
* for us
*/
static int int_initio_scsi_rst(struct initio_host * host)
{
struct scsi_ctrl_blk *scb;
int i;
/* if DMA xfer is pending, abort DMA xfer */
if (inb(host->addr + TUL_XStatus) & 0x01) {
outb(TAX_X_ABT | TAX_X_CLR_FIFO, host->addr + TUL_XCmd);
/* wait Abort DMA xfer done */
while ((inb(host->addr + TUL_Int) & 0x04) == 0)
cpu_relax();
outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0);
}
/* Abort all active & disconnected scb */
while ((scb = initio_pop_busy_scb(host)) != NULL) {
scb->hastat = HOST_BAD_PHAS;
initio_append_done_scb(host, scb);
}
host->active = NULL;
host->active_tc = NULL;
/* clr sync nego. done flag */
for (i = 0; i < host->max_tar; i++)
host->targets[i].flags &= ~(TCF_SYNC_DONE | TCF_WDTR_DONE);
return -1;
}
/**
* int_initio_scsi_resel - Reselection occured
* @host: InitIO host adapter
*
* A SCSI reselection event has been signalled and the interrupt
* is now being processed. Work out which command block needs attention
* and continue processing that command.
*/
int int_initio_resel(struct initio_host * host)
{
struct scsi_ctrl_blk *scb;
struct target_control *active_tc;
u8 tag, msg = 0;
u8 tar, lun;
if ((scb = host->active) != NULL) {
/* FIXME: Why check and not just clear ? */
if (scb->status & SCB_SELECT) /* if waiting for selection complete */
scb->status &= ~SCB_SELECT;
host->active = NULL;
}
/* --------- get target id---------------------- */
tar = inb(host->addr + TUL_SBusId);
/* ------ get LUN from Identify message----------- */
lun = inb(host->addr + TUL_SIdent) & 0x0F;
/* 07/22/98 from 0x1F -> 0x0F */
active_tc = &host->targets[tar];
host->active_tc = active_tc;
outb(active_tc->sconfig0, host->addr + TUL_SConfig);
outb(active_tc->js_period, host->addr + TUL_SPeriod);
/* ------------- tag queueing ? ------------------- */
if (active_tc->drv_flags & TCF_DRV_EN_TAG) {
if ((initio_msgin_accept(host)) == -1)
return -1;
if (host->phase != MSG_IN)
goto no_tag;
outl(1, host->addr + TUL_SCnt0);
outb(TSC_XF_FIFO_IN, host->addr + TUL_SCmd);
if (wait_tulip(host) == -1)
return -1;
msg = inb(host->addr + TUL_SFifo); /* Read Tag Message */
if (msg < MSG_STAG || msg > MSG_OTAG) /* Is simple Tag */
goto no_tag;
if (initio_msgin_accept(host) == -1)
return -1;
if (host->phase != MSG_IN)
goto no_tag;
outl(1, host->addr + TUL_SCnt0);
outb(TSC_XF_FIFO_IN, host->addr + TUL_SCmd);
if (wait_tulip(host) == -1)
return -1;
tag = inb(host->addr + TUL_SFifo); /* Read Tag ID */
scb = host->scb + tag;
if (scb->target != tar || scb->lun != lun) {
return initio_msgout_abort_tag(host);
}
if (scb->status != SCB_BUSY) { /* 03/24/95 */
return initio_msgout_abort_tag(host);
}
host->active = scb;
if ((initio_msgin_accept(host)) == -1)
return -1;
} else { /* No tag */
no_tag:
if ((scb = initio_find_busy_scb(host, tar | (lun << 8))) == NULL) {
return initio_msgout_abort_targ(host);
}
host->active = scb;
if (!(active_tc->drv_flags & TCF_DRV_EN_TAG)) {
if ((initio_msgin_accept(host)) == -1)
return -1;
}
}
return 0;
}
/**
* int_initio_bad_seq - out of phase
* @host: InitIO host flagging event
*
* We have ended up out of phase somehow. Reset the host controller
* and throw all our toys out of the pram. Let the midlayer clean up
*/
static int int_initio_bad_seq(struct initio_host * host)
{ /* target wrong phase */
struct scsi_ctrl_blk *scb;
int i;
initio_reset_scsi(host, 10);
while ((scb = initio_pop_busy_scb(host)) != NULL) {
scb->hastat = HOST_BAD_PHAS;
initio_append_done_scb(host, scb);
}
for (i = 0; i < host->max_tar; i++)
host->targets[i].flags &= ~(TCF_SYNC_DONE | TCF_WDTR_DONE);
return -1;
}
/**
* initio_msgout_abort_targ - abort a tag
* @host: InitIO host
*
* Abort when the target/lun does not match or when our SCB is not
* busy. Used by untagged commands.
*/
static int initio_msgout_abort_targ(struct initio_host * host)
{
outb(((inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7)) | TSC_SET_ATN), host->addr + TUL_SSignal);
if (initio_msgin_accept(host) == -1)
return -1;
if (host->phase != MSG_OUT)
return initio_bad_seq(host);
outb(MSG_ABORT, host->addr + TUL_SFifo);
outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
return initio_wait_disc(host);
}
/**
* initio_msgout_abort_tag - abort a tag
* @host: InitIO host
*
* Abort when the target/lun does not match or when our SCB is not
* busy. Used for tagged commands.
*/
static int initio_msgout_abort_tag(struct initio_host * host)
{
outb(((inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7)) | TSC_SET_ATN), host->addr + TUL_SSignal);
if (initio_msgin_accept(host) == -1)
return -1;
if (host->phase != MSG_OUT)
return initio_bad_seq(host);
outb(MSG_ABORT_TAG, host->addr + TUL_SFifo);
outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
return initio_wait_disc(host);
}
/**
* initio_msgin - Message in
* @host: InitIO Host
*
* Process incoming message
*/
static int initio_msgin(struct initio_host * host)
{
struct target_control *active_tc;
for (;;) {
outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0);
outl(1, host->addr + TUL_SCnt0);
outb(TSC_XF_FIFO_IN, host->addr + TUL_SCmd);
if (wait_tulip(host) == -1)
return -1;
switch (inb(host->addr + TUL_SFifo)) {
case MSG_DISC: /* Disconnect msg */
outb(TSC_MSG_ACCEPT, host->addr + TUL_SCmd);
return initio_wait_disc(host);
case MSG_SDP:
case MSG_RESTORE:
case MSG_NOP:
initio_msgin_accept(host);
break;
case MSG_REJ: /* Clear ATN first */
outb((inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7)),
host->addr + TUL_SSignal);
active_tc = host->active_tc;
if ((active_tc->flags & (TCF_SYNC_DONE | TCF_NO_SYNC_NEGO)) == 0) /* do sync nego */
outb(((inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7)) | TSC_SET_ATN),
host->addr + TUL_SSignal);
initio_msgin_accept(host);
break;
case MSG_EXTEND: /* extended msg */
initio_msgin_extend(host);
break;
case MSG_IGNOREWIDE:
initio_msgin_accept(host);
break;
case MSG_COMP:
outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0);
outb(TSC_MSG_ACCEPT, host->addr + TUL_SCmd);
return initio_wait_done_disc(host);
default:
initio_msgout_reject(host);
break;
}
if (host->phase != MSG_IN)
return host->phase;
}
/* statement won't reach here */
}
static int initio_msgout_reject(struct initio_host * host)
{
outb(((inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7)) | TSC_SET_ATN), host->addr + TUL_SSignal);
if (initio_msgin_accept(host) == -1)
return -1;
if (host->phase == MSG_OUT) {
outb(MSG_REJ, host->addr + TUL_SFifo); /* Msg reject */
outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
return wait_tulip(host);
}
return host->phase;
}
static int initio_msgout_ide(struct initio_host * host)
{
outb(MSG_IDE, host->addr + TUL_SFifo); /* Initiator Detected Error */
outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
return wait_tulip(host);
}
static int initio_msgin_extend(struct initio_host * host)
{
u8 len, idx;
if (initio_msgin_accept(host) != MSG_IN)
return host->phase;
/* Get extended msg length */
outl(1, host->addr + TUL_SCnt0);
outb(TSC_XF_FIFO_IN, host->addr + TUL_SCmd);
if (wait_tulip(host) == -1)
return -1;
len = inb(host->addr + TUL_SFifo);
host->msg[0] = len;
for (idx = 1; len != 0; len--) {
if ((initio_msgin_accept(host)) != MSG_IN)
return host->phase;
outl(1, host->addr + TUL_SCnt0);
outb(TSC_XF_FIFO_IN, host->addr + TUL_SCmd);
if (wait_tulip(host) == -1)
return -1;
host->msg[idx++] = inb(host->addr + TUL_SFifo);
}
if (host->msg[1] == 1) { /* if it's synchronous data transfer request */
u8 r;
if (host->msg[0] != 3) /* if length is not right */
return initio_msgout_reject(host);
if (host->active_tc->flags & TCF_NO_SYNC_NEGO) { /* Set OFFSET=0 to do async, nego back */
host->msg[3] = 0;
} else {
if (initio_msgin_sync(host) == 0 &&
(host->active_tc->flags & TCF_SYNC_DONE)) {
initio_sync_done(host);
return initio_msgin_accept(host);
}
}
r = inb(host->addr + TUL_SSignal);
outb((r & (TSC_SET_ACK | 7)) | TSC_SET_ATN,
host->addr + TUL_SSignal);
if (initio_msgin_accept(host) != MSG_OUT)
return host->phase;
/* sync msg out */
outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0);
initio_sync_done(host);
outb(MSG_EXTEND, host->addr + TUL_SFifo);
outb(3, host->addr + TUL_SFifo);
outb(1, host->addr + TUL_SFifo);
outb(host->msg[2], host->addr + TUL_SFifo);
outb(host->msg[3], host->addr + TUL_SFifo);
outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
return wait_tulip(host);
}
if (host->msg[0] != 2 || host->msg[1] != 3)
return initio_msgout_reject(host);
/* if it's WIDE DATA XFER REQ */
if (host->active_tc->flags & TCF_NO_WDTR) {
host->msg[2] = 0;
} else {
if (host->msg[2] > 2) /* > 32 bits */
return initio_msgout_reject(host);
if (host->msg[2] == 2) { /* == 32 */
host->msg[2] = 1;
} else {
if ((host->active_tc->flags & TCF_NO_WDTR) == 0) {
wdtr_done(host);
if ((host->active_tc->flags & (TCF_SYNC_DONE | TCF_NO_SYNC_NEGO)) == 0)
outb(((inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7)) | TSC_SET_ATN), host->addr + TUL_SSignal);
return initio_msgin_accept(host);
}
}
}
outb(((inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7)) | TSC_SET_ATN), host->addr + TUL_SSignal);
if (initio_msgin_accept(host) != MSG_OUT)
return host->phase;
/* WDTR msg out */
outb(MSG_EXTEND, host->addr + TUL_SFifo);
outb(2, host->addr + TUL_SFifo);
outb(3, host->addr + TUL_SFifo);
outb(host->msg[2], host->addr + TUL_SFifo);
outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
return wait_tulip(host);
}
static int initio_msgin_sync(struct initio_host * host)
{
char default_period;
default_period = initio_rate_tbl[host->active_tc->flags & TCF_SCSI_RATE];
if (host->msg[3] > MAX_OFFSET) {
host->msg[3] = MAX_OFFSET;
if (host->msg[2] < default_period) {
host->msg[2] = default_period;
return 1;
}
if (host->msg[2] >= 59) /* Change to async */
host->msg[3] = 0;
return 1;
}
/* offset requests asynchronous transfers ? */
if (host->msg[3] == 0) {
return 0;
}
if (host->msg[2] < default_period) {
host->msg[2] = default_period;
return 1;
}
if (host->msg[2] >= 59) {
host->msg[3] = 0;
return 1;
}
return 0;
}
static int wdtr_done(struct initio_host * host)
{
host->active_tc->flags &= ~TCF_SYNC_DONE;
host->active_tc->flags |= TCF_WDTR_DONE;
host->active_tc->js_period = 0;
if (host->msg[2]) /* if 16 bit */
host->active_tc->js_period |= TSC_WIDE_SCSI;
host->active_tc->sconfig0 &= ~TSC_ALT_PERIOD;
outb(host->active_tc->sconfig0, host->addr + TUL_SConfig);
outb(host->active_tc->js_period, host->addr + TUL_SPeriod);
return 1;
}
static int initio_sync_done(struct initio_host * host)
{
int i;
host->active_tc->flags |= TCF_SYNC_DONE;
if (host->msg[3]) {
host->active_tc->js_period |= host->msg[3];
for (i = 0; i < 8; i++) {
if (initio_rate_tbl[i] >= host->msg[2]) /* pick the big one */
break;
}
host->active_tc->js_period |= (i << 4);
host->active_tc->sconfig0 |= TSC_ALT_PERIOD;
}
outb(host->active_tc->sconfig0, host->addr + TUL_SConfig);
outb(host->active_tc->js_period, host->addr + TUL_SPeriod);
return -1;
}
static int initio_post_scsi_rst(struct initio_host * host)
{
struct scsi_ctrl_blk *scb;
struct target_control *active_tc;
int i;
host->active = NULL;
host->active_tc = NULL;
host->flags = 0;
while ((scb = initio_pop_busy_scb(host)) != NULL) {
scb->hastat = HOST_BAD_PHAS;
initio_append_done_scb(host, scb);
}
/* clear sync done flag */
active_tc = &host->targets[0];
for (i = 0; i < host->max_tar; active_tc++, i++) {
active_tc->flags &= ~(TCF_SYNC_DONE | TCF_WDTR_DONE);
/* Initialize the sync. xfer register values to an asyn xfer */
active_tc->js_period = 0;
active_tc->sconfig0 = host->sconf1;
host->act_tags[0] = 0; /* 07/22/98 */
host->targets[i].flags &= ~TCF_BUSY; /* 07/22/98 */
} /* for */
return -1;
}
static void initio_select_atn_stop(struct initio_host * host, struct scsi_ctrl_blk * scb)
{
scb->status |= SCB_SELECT;
scb->next_state = 0x1;
host->active = scb;
host->active_tc = &host->targets[scb->target];
outb(TSC_SELATNSTOP, host->addr + TUL_SCmd);
}
static void initio_select_atn(struct initio_host * host, struct scsi_ctrl_blk * scb)
{
int i;
scb->status |= SCB_SELECT;
scb->next_state = 0x2;
outb(scb->ident, host->addr + TUL_SFifo);
for (i = 0; i < (int) scb->cdblen; i++)
outb(scb->cdb[i], host->addr + TUL_SFifo);
host->active_tc = &host->targets[scb->target];
host->active = scb;
outb(TSC_SEL_ATN, host->addr + TUL_SCmd);
}
static void initio_select_atn3(struct initio_host * host, struct scsi_ctrl_blk * scb)
{
int i;
scb->status |= SCB_SELECT;
scb->next_state = 0x2;
outb(scb->ident, host->addr + TUL_SFifo);
outb(scb->tagmsg, host->addr + TUL_SFifo);
outb(scb->tagid, host->addr + TUL_SFifo);
for (i = 0; i < scb->cdblen; i++)
outb(scb->cdb[i], host->addr + TUL_SFifo);
host->active_tc = &host->targets[scb->target];
host->active = scb;
outb(TSC_SEL_ATN3, host->addr + TUL_SCmd);
}
/**
* initio_bus_device_reset - SCSI Bus Device Reset
* @host: InitIO host to reset
*
* Perform a device reset and abort all pending SCBs for the
* victim device
*/
int initio_bus_device_reset(struct initio_host * host)
{
struct scsi_ctrl_blk *scb = host->active;
struct target_control *active_tc = host->active_tc;
struct scsi_ctrl_blk *tmp, *prev;
u8 tar;
if (host->phase != MSG_OUT)
return int_initio_bad_seq(host); /* Unexpected phase */
initio_unlink_pend_scb(host, scb);
initio_release_scb(host, scb);
tar = scb->target; /* target */
active_tc->flags &= ~(TCF_SYNC_DONE | TCF_WDTR_DONE | TCF_BUSY);
/* clr sync. nego & WDTR flags 07/22/98 */
/* abort all SCB with same target */
prev = tmp = host->first_busy; /* Check Busy queue */
while (tmp != NULL) {
if (tmp->target == tar) {
/* unlink it */
if (tmp == host->first_busy) {
if ((host->first_busy = tmp->next) == NULL)
host->last_busy = NULL;
} else {
prev->next = tmp->next;
if (tmp == host->last_busy)
host->last_busy = prev;
}
tmp->hastat = HOST_ABORTED;
initio_append_done_scb(host, tmp);
}
/* Previous haven't change */
else {
prev = tmp;
}
tmp = tmp->next;
}
outb(MSG_DEVRST, host->addr + TUL_SFifo);
outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
return initio_wait_disc(host);
}
static int initio_msgin_accept(struct initio_host * host)
{
outb(TSC_MSG_ACCEPT, host->addr + TUL_SCmd);
return wait_tulip(host);
}
static int wait_tulip(struct initio_host * host)
{
while (!((host->jsstatus0 = inb(host->addr + TUL_SStatus0))
& TSS_INT_PENDING))
cpu_relax();
host->jsint = inb(host->addr + TUL_SInt);
host->phase = host->jsstatus0 & TSS_PH_MASK;
host->jsstatus1 = inb(host->addr + TUL_SStatus1);
if (host->jsint & TSS_RESEL_INT) /* if SCSI bus reset detected */
return int_initio_resel(host);
if (host->jsint & TSS_SEL_TIMEOUT) /* if selected/reselected timeout interrupt */
return int_initio_busfree(host);
if (host->jsint & TSS_SCSIRST_INT) /* if SCSI bus reset detected */
return int_initio_scsi_rst(host);
if (host->jsint & TSS_DISC_INT) { /* BUS disconnection */
if (host->flags & HCF_EXPECT_DONE_DISC) {
outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0); /* Flush SCSI FIFO */
initio_unlink_busy_scb(host, host->active);
host->active->hastat = 0;
initio_append_done_scb(host, host->active);
host->active = NULL;
host->active_tc = NULL;
host->flags &= ~HCF_EXPECT_DONE_DISC;
outb(TSC_INITDEFAULT, host->addr + TUL_SConfig);
outb(TSC_HW_RESELECT, host->addr + TUL_SCtrl1); /* Enable HW reselect */
return -1;
}
if (host->flags & HCF_EXPECT_DISC) {
outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0); /* Flush SCSI FIFO */
host->active = NULL;
host->active_tc = NULL;
host->flags &= ~HCF_EXPECT_DISC;
outb(TSC_INITDEFAULT, host->addr + TUL_SConfig);
outb(TSC_HW_RESELECT, host->addr + TUL_SCtrl1); /* Enable HW reselect */
return -1;
}
return int_initio_busfree(host);
}
/* The old code really does the below. Can probably be removed */
if (host->jsint & (TSS_FUNC_COMP | TSS_BUS_SERV))
return host->phase;
return host->phase;
}
static int initio_wait_disc(struct initio_host * host)
{
while (!((host->jsstatus0 = inb(host->addr + TUL_SStatus0)) & TSS_INT_PENDING))
cpu_relax();
host->jsint = inb(host->addr + TUL_SInt);
if (host->jsint & TSS_SCSIRST_INT) /* if SCSI bus reset detected */
return int_initio_scsi_rst(host);
if (host->jsint & TSS_DISC_INT) { /* BUS disconnection */
outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0); /* Flush SCSI FIFO */
outb(TSC_INITDEFAULT, host->addr + TUL_SConfig);
outb(TSC_HW_RESELECT, host->addr + TUL_SCtrl1); /* Enable HW reselect */
host->active = NULL;
return -1;
}
return initio_bad_seq(host);
}
static int initio_wait_done_disc(struct initio_host * host)
{
while (!((host->jsstatus0 = inb(host->addr + TUL_SStatus0))
& TSS_INT_PENDING))
cpu_relax();
host->jsint = inb(host->addr + TUL_SInt);
if (host->jsint & TSS_SCSIRST_INT) /* if SCSI bus reset detected */
return int_initio_scsi_rst(host);
if (host->jsint & TSS_DISC_INT) { /* BUS disconnection */
outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0); /* Flush SCSI FIFO */
outb(TSC_INITDEFAULT, host->addr + TUL_SConfig);
outb(TSC_HW_RESELECT, host->addr + TUL_SCtrl1); /* Enable HW reselect */
initio_unlink_busy_scb(host, host->active);
initio_append_done_scb(host, host->active);
host->active = NULL;
return -1;
}
return initio_bad_seq(host);
}
/**
* i91u_intr - IRQ handler
* @irqno: IRQ number
* @dev_id: IRQ identifier
*
* Take the relevant locks and then invoke the actual isr processing
* code under the lock.
*/
static irqreturn_t i91u_intr(int irqno, void *dev_id)
{
struct Scsi_Host *dev = dev_id;
unsigned long flags;
int r;
spin_lock_irqsave(dev->host_lock, flags);
r = initio_isr((struct initio_host *)dev->hostdata);
spin_unlock_irqrestore(dev->host_lock, flags);
if (r)
return IRQ_HANDLED;
else
return IRQ_NONE;
}
/**
* initio_build_scb - Build the mappings and SCB
* @host: InitIO host taking the command
* @cblk: Firmware command block
* @cmnd: SCSI midlayer command block
*
* Translate the abstract SCSI command into a firmware command block
* suitable for feeding to the InitIO host controller. This also requires
* we build the scatter gather lists and ensure they are mapped properly.
*/
static void initio_build_scb(struct initio_host * host, struct scsi_ctrl_blk * cblk, struct scsi_cmnd * cmnd)
{ /* Create corresponding SCB */
struct scatterlist *sglist;
struct sg_entry *sg; /* Pointer to SG list */
int i, nseg;
long total_len;
dma_addr_t dma_addr;
/* Fill in the command headers */
cblk->post = i91uSCBPost; /* i91u's callback routine */
cblk->srb = cmnd;
cblk->opcode = ExecSCSI;
cblk->flags = SCF_POST; /* After SCSI done, call post routine */
cblk->target = cmnd->device->id;
cblk->lun = cmnd->device->lun;
cblk->ident = cmnd->device->lun | DISC_ALLOW;
cblk->flags |= SCF_SENSE; /* Turn on auto request sense */
/* Map the sense buffer into bus memory */
dma_addr = dma_map_single(&host->pci_dev->dev, cmnd->sense_buffer,
SENSE_SIZE, DMA_FROM_DEVICE);
cblk->senseptr = (u32)dma_addr;
cblk->senselen = SENSE_SIZE;
cmnd->SCp.ptr = (char *)(unsigned long)dma_addr;
cblk->cdblen = cmnd->cmd_len;
/* Clear the returned status */
cblk->hastat = 0;
cblk->tastat = 0;
/* Command the command */
memcpy(cblk->cdb, cmnd->cmnd, cmnd->cmd_len);
/* Set up tags */
if (cmnd->device->tagged_supported) { /* Tag Support */
cblk->tagmsg = SIMPLE_QUEUE_TAG; /* Do simple tag only */
} else {
cblk->tagmsg = 0; /* No tag support */
}
/* todo handle map_sg error */
nseg = scsi_dma_map(cmnd);
BUG_ON(nseg < 0);
if (nseg) {
dma_addr = dma_map_single(&host->pci_dev->dev, &cblk->sglist[0],
sizeof(struct sg_entry) * TOTAL_SG_ENTRY,
DMA_BIDIRECTIONAL);
cblk->bufptr = (u32)dma_addr;
cmnd->SCp.dma_handle = dma_addr;
cblk->sglen = nseg;
cblk->flags |= SCF_SG; /* Turn on SG list flag */
total_len = 0;
sg = &cblk->sglist[0];
scsi_for_each_sg(cmnd, sglist, cblk->sglen, i) {
sg->data = cpu_to_le32((u32)sg_dma_address(sglist));
sg->len = cpu_to_le32((u32)sg_dma_len(sglist));
total_len += sg_dma_len(sglist);
++sg;
}
cblk->buflen = (scsi_bufflen(cmnd) > total_len) ?
total_len : scsi_bufflen(cmnd);
} else { /* No data transfer required */
cblk->buflen = 0;
cblk->sglen = 0;
}
}
/**
* i91u_queuecommand - Queue a new command if possible
* @cmd: SCSI command block from the mid layer
* @done: Completion handler
*
* Attempts to queue a new command with the host adapter. Will return
* zero if successful or indicate a host busy condition if not (which
* will cause the mid layer to call us again later with the command)
*/
static int i91u_queuecommand(struct scsi_cmnd *cmd,
void (*done)(struct scsi_cmnd *))
{
struct initio_host *host = (struct initio_host *) cmd->device->host->hostdata;
struct scsi_ctrl_blk *cmnd;
cmd->scsi_done = done;
cmnd = initio_alloc_scb(host);
if (!cmnd)
return SCSI_MLQUEUE_HOST_BUSY;
initio_build_scb(host, cmnd, cmd);
initio_exec_scb(host, cmnd);
return 0;
}
/**
* i91u_bus_reset - reset the SCSI bus
* @cmnd: Command block we want to trigger the reset for
*
* Initiate a SCSI bus reset sequence
*/
static int i91u_bus_reset(struct scsi_cmnd * cmnd)
{
struct initio_host *host;
host = (struct initio_host *) cmnd->device->host->hostdata;
spin_lock_irq(cmnd->device->host->host_lock);
initio_reset_scsi(host, 0);
spin_unlock_irq(cmnd->device->host->host_lock);
return SUCCESS;
}
/**
* i91u_biospararm - return the "logical geometry
* @sdev: SCSI device
* @dev; Matching block device
* @capacity: Sector size of drive
* @info_array: Return space for BIOS geometry
*
* Map the device geometry in a manner compatible with the host
* controller BIOS behaviour.
*
* FIXME: limited to 2^32 sector devices.
*/
static int i91u_biosparam(struct scsi_device *sdev, struct block_device *dev,
sector_t capacity, int *info_array)
{
struct initio_host *host; /* Point to Host adapter control block */
struct target_control *tc;
host = (struct initio_host *) sdev->host->hostdata;
tc = &host->targets[sdev->id];
if (tc->heads) {
info_array[0] = tc->heads;
info_array[1] = tc->sectors;
info_array[2] = (unsigned long)capacity / tc->heads / tc->sectors;
} else {
if (tc->drv_flags & TCF_DRV_255_63) {
info_array[0] = 255;
info_array[1] = 63;
info_array[2] = (unsigned long)capacity / 255 / 63;
} else {
info_array[0] = 64;
info_array[1] = 32;
info_array[2] = (unsigned long)capacity >> 11;
}
}
#if defined(DEBUG_BIOSPARAM)
if (i91u_debug & debug_biosparam) {
printk("bios geometry: head=%d, sec=%d, cyl=%d\n",
info_array[0], info_array[1], info_array[2]);
printk("WARNING: check, if the bios geometry is correct.\n");
}
#endif
return 0;
}
/**
* i91u_unmap_scb - Unmap a command
* @pci_dev: PCI device the command is for
* @cmnd: The command itself
*
* Unmap any PCI mapping/IOMMU resources allocated when the command
* was mapped originally as part of initio_build_scb
*/
static void i91u_unmap_scb(struct pci_dev *pci_dev, struct scsi_cmnd *cmnd)
{
/* auto sense buffer */
if (cmnd->SCp.ptr) {
dma_unmap_single(&pci_dev->dev,
(dma_addr_t)((unsigned long)cmnd->SCp.ptr),
SENSE_SIZE, DMA_FROM_DEVICE);
cmnd->SCp.ptr = NULL;
}
/* request buffer */
if (scsi_sg_count(cmnd)) {
dma_unmap_single(&pci_dev->dev, cmnd->SCp.dma_handle,
sizeof(struct sg_entry) * TOTAL_SG_ENTRY,
DMA_BIDIRECTIONAL);
scsi_dma_unmap(cmnd);
}
}
/**
* i91uSCBPost - SCSI callback
* @host: Pointer to host adapter control block.
* @cmnd: Pointer to SCSI control block.
*
* This is callback routine be called when tulip finish one
* SCSI command.
*/
static void i91uSCBPost(u8 * host_mem, u8 * cblk_mem)
{
struct scsi_cmnd *cmnd; /* Pointer to SCSI request block */
struct initio_host *host;
struct scsi_ctrl_blk *cblk;
host = (struct initio_host *) host_mem;
cblk = (struct scsi_ctrl_blk *) cblk_mem;
if ((cmnd = cblk->srb) == NULL) {
printk(KERN_ERR "i91uSCBPost: SRB pointer is empty\n");
WARN_ON(1);
initio_release_scb(host, cblk); /* Release SCB for current channel */
return;
}
/*
* Remap the firmware error status into a mid layer one
*/
switch (cblk->hastat) {
case 0x0:
case 0xa: /* Linked command complete without error and linked normally */
case 0xb: /* Linked command complete without error interrupt generated */
cblk->hastat = 0;
break;
case 0x11: /* Selection time out-The initiator selection or target
reselection was not complete within the SCSI Time out period */
cblk->hastat = DID_TIME_OUT;
break;
case 0x14: /* Target bus phase sequence failure-An invalid bus phase or bus
phase sequence was requested by the target. The host adapter
will generate a SCSI Reset Condition, notifying the host with
a SCRD interrupt */
cblk->hastat = DID_RESET;
break;
case 0x1a: /* SCB Aborted. 07/21/98 */
cblk->hastat = DID_ABORT;
break;
case 0x12: /* Data overrun/underrun-The target attempted to transfer more data
than was allocated by the Data Length field or the sum of the
Scatter / Gather Data Length fields. */
case 0x13: /* Unexpected bus free-The target dropped the SCSI BSY at an unexpected time. */
case 0x16: /* Invalid SCB Operation Code. */
default:
printk("ini9100u: %x %x\n", cblk->hastat, cblk->tastat);
cblk->hastat = DID_ERROR; /* Couldn't find any better */
break;
}
cmnd->result = cblk->tastat | (cblk->hastat << 16);
WARN_ON(cmnd == NULL);
i91u_unmap_scb(host->pci_dev, cmnd);
cmnd->scsi_done(cmnd); /* Notify system DONE */
initio_release_scb(host, cblk); /* Release SCB for current channel */
}
static struct scsi_host_template initio_template = {
.proc_name = "INI9100U",
.name = "Initio INI-9X00U/UW SCSI device driver",
.queuecommand = i91u_queuecommand,
.eh_bus_reset_handler = i91u_bus_reset,
.bios_param = i91u_biosparam,
.can_queue = MAX_TARGETS * i91u_MAXQUEUE,
.this_id = 1,
.sg_tablesize = SG_ALL,
.cmd_per_lun = 1,
.use_clustering = ENABLE_CLUSTERING,
};
static int initio_probe_one(struct pci_dev *pdev,
const struct pci_device_id *id)
{
struct Scsi_Host *shost;
struct initio_host *host;
u32 reg;
u16 bios_seg;
struct scsi_ctrl_blk *scb, *tmp, *prev = NULL /* silence gcc */;
int num_scb, i, error;
error = pci_enable_device(pdev);
if (error)
return error;
pci_read_config_dword(pdev, 0x44, (u32 *) & reg);
bios_seg = (u16) (reg & 0xFF);
if (((reg & 0xFF00) >> 8) == 0xFF)
reg = 0;
bios_seg = (bios_seg << 8) + ((u16) ((reg & 0xFF00) >> 8));
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
printk(KERN_WARNING "i91u: Could not set 32 bit DMA mask\n");
error = -ENODEV;
goto out_disable_device;
}
shost = scsi_host_alloc(&initio_template, sizeof(struct initio_host));
if (!shost) {
printk(KERN_WARNING "initio: Could not allocate host structure.\n");
error = -ENOMEM;
goto out_disable_device;
}
host = (struct initio_host *)shost->hostdata;
memset(host, 0, sizeof(struct initio_host));
host->addr = pci_resource_start(pdev, 0);
host->bios_addr = bios_seg;
if (!request_region(host->addr, 256, "i91u")) {
printk(KERN_WARNING "initio: I/O port range 0x%x is busy.\n", host->addr);
error = -ENODEV;
goto out_host_put;
}
if (initio_tag_enable) /* 1.01i */
num_scb = MAX_TARGETS * i91u_MAXQUEUE;
else
num_scb = MAX_TARGETS + 3; /* 1-tape, 1-CD_ROM, 1- extra */
for (; num_scb >= MAX_TARGETS + 3; num_scb--) {
i = num_scb * sizeof(struct scsi_ctrl_blk);
if ((scb = kzalloc(i, GFP_DMA)) != NULL)
break;
}
if (!scb) {
printk(KERN_WARNING "initio: Cannot allocate SCB array.\n");
error = -ENOMEM;
goto out_release_region;
}
host->pci_dev = pdev;
host->semaph = 1;
spin_lock_init(&host->semaph_lock);
host->num_scbs = num_scb;
host->scb = scb;
host->next_pending = scb;
host->next_avail = scb;
for (i = 0, tmp = scb; i < num_scb; i++, tmp++) {
tmp->tagid = i;
if (i != 0)
prev->next = tmp;
prev = tmp;
}
prev->next = NULL;
host->scb_end = tmp;
host->first_avail = scb;
host->last_avail = prev;
spin_lock_init(&host->avail_lock);
initio_init(host, phys_to_virt(((u32)bios_seg << 4)));
host->jsstatus0 = 0;
shost->io_port = host->addr;
shost->n_io_port = 0xff;
shost->can_queue = num_scb; /* 03/05/98 */
shost->unique_id = host->addr;
shost->max_id = host->max_tar;
shost->max_lun = 32; /* 10/21/97 */
shost->irq = pdev->irq;
shost->this_id = host->scsi_id; /* Assign HCS index */
shost->base = host->addr;
shost->sg_tablesize = TOTAL_SG_ENTRY;
error = request_irq(pdev->irq, i91u_intr, IRQF_DISABLED|IRQF_SHARED, "i91u", shost);
if (error < 0) {
printk(KERN_WARNING "initio: Unable to request IRQ %d\n", pdev->irq);
goto out_free_scbs;
}
pci_set_drvdata(pdev, shost);
error = scsi_add_host(shost, &pdev->dev);
if (error)
goto out_free_irq;
scsi_scan_host(shost);
return 0;
out_free_irq:
free_irq(pdev->irq, shost);
out_free_scbs:
kfree(host->scb);
out_release_region:
release_region(host->addr, 256);
out_host_put:
scsi_host_put(shost);
out_disable_device:
pci_disable_device(pdev);
return error;
}
/**
* initio_remove_one - control shutdown
* @pdev: PCI device being released
*
* Release the resources assigned to this adapter after it has
* finished being used.
*/
static void initio_remove_one(struct pci_dev *pdev)
{
struct Scsi_Host *host = pci_get_drvdata(pdev);
struct initio_host *s = (struct initio_host *)host->hostdata;
scsi_remove_host(host);
free_irq(pdev->irq, host);
release_region(s->addr, 256);
scsi_host_put(host);
pci_disable_device(pdev);
}
MODULE_LICENSE("GPL");
static struct pci_device_id initio_pci_tbl[] = {
{PCI_VENDOR_ID_INIT, 0x9500, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{PCI_VENDOR_ID_INIT, 0x9400, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{PCI_VENDOR_ID_INIT, 0x9401, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{PCI_VENDOR_ID_INIT, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{PCI_VENDOR_ID_DOMEX, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{0,}
};
MODULE_DEVICE_TABLE(pci, initio_pci_tbl);
static struct pci_driver initio_pci_driver = {
.name = "initio",
.id_table = initio_pci_tbl,
.probe = initio_probe_one,
.remove = __devexit_p(initio_remove_one),
};
static int __init initio_init_driver(void)
{
return pci_register_driver(&initio_pci_driver);
}
static void __exit initio_exit_driver(void)
{
pci_unregister_driver(&initio_pci_driver);
}
MODULE_DESCRIPTION("Initio INI-9X00U/UW SCSI device driver");
MODULE_AUTHOR("Initio Corporation");
MODULE_LICENSE("GPL");
module_init(initio_init_driver);
module_exit(initio_exit_driver);
| gpl-2.0 |
profblock/adaptive-litmus | drivers/staging/wlan-ng/prism2mgmt.c | 1321 | 38130 | /* src/prism2/driver/prism2mgmt.c
*
* Management request handler functions.
*
* Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
* --------------------------------------------------------------------
*
* linux-wlan
*
* The contents of this file are subject to the Mozilla Public
* License Version 1.1 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of
* the License at http://www.mozilla.org/MPL/
*
* Software distributed under the License is distributed on an "AS
* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
* implied. See the License for the specific language governing
* rights and limitations under the License.
*
* Alternatively, the contents of this file may be used under the
* terms of the GNU Public License version 2 (the "GPL"), in which
* case the provisions of the GPL are applicable instead of the
* above. If you wish to allow the use of your version of this file
* only under the terms of the GPL and not to allow others to use
* your version of this file under the MPL, indicate your decision
* by deleting the provisions above and replace them with the notice
* and other provisions required by the GPL. If you do not delete
* the provisions above, a recipient may use your version of this
* file under either the MPL or the GPL.
*
* --------------------------------------------------------------------
*
* Inquiries regarding the linux-wlan Open Source project can be
* made directly to:
*
* AbsoluteValue Systems Inc.
* info@linux-wlan.com
* http://www.linux-wlan.com
*
* --------------------------------------------------------------------
*
* Portions of the development of this software were funded by
* Intersil Corporation as part of PRISM(R) chipset product development.
*
* --------------------------------------------------------------------
*
* The functions in this file handle management requests sent from
* user mode.
*
* Most of these functions have two separate blocks of code that are
* conditional on whether this is a station or an AP. This is used
* to separate out the STA and AP responses to these management primitives.
* It's a choice (good, bad, indifferent?) to have the code in the same
* place so it's clear that the same primitive is implemented in both
* cases but has different behavior.
*
* --------------------------------------------------------------------
*/
#include <linux/if_arp.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/wait.h>
#include <linux/sched.h>
#include <linux/types.h>
#include <linux/wireless.h>
#include <linux/netdevice.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <asm/byteorder.h>
#include <linux/random.h>
#include <linux/usb.h>
#include <linux/bitops.h>
#include "p80211types.h"
#include "p80211hdr.h"
#include "p80211mgmt.h"
#include "p80211conv.h"
#include "p80211msg.h"
#include "p80211netdev.h"
#include "p80211metadef.h"
#include "p80211metastruct.h"
#include "hfa384x.h"
#include "prism2mgmt.h"
/* Converts 802.11 format rate specifications to prism2 */
#define p80211rate_to_p2bit(n) ((((n)&~BIT(7)) == 2) ? BIT(0) : \
(((n)&~BIT(7)) == 4) ? BIT(1) : \
(((n)&~BIT(7)) == 11) ? BIT(2) : \
(((n)&~BIT(7)) == 22) ? BIT(3) : 0)
/*----------------------------------------------------------------
* prism2mgmt_scan
*
* Initiate a scan for BSSs.
*
* This function corresponds to MLME-scan.request and part of
* MLME-scan.confirm. As far as I can tell in the standard, there
* are no restrictions on when a scan.request may be issued. We have
* to handle in whatever state the driver/MAC happen to be.
*
* Arguments:
* wlandev wlan device structure
* msgp ptr to msg buffer
*
* Returns:
* 0 success and done
* <0 success, but we're waiting for something to finish.
* >0 an error occurred while handling the message.
* Side effects:
*
* Call context:
* process thread (usually)
* interrupt
----------------------------------------------------------------*/
int prism2mgmt_scan(wlandevice_t *wlandev, void *msgp)
{
int result = 0;
hfa384x_t *hw = wlandev->priv;
struct p80211msg_dot11req_scan *msg = msgp;
u16 roamingmode, word;
int i, timeout;
int istmpenable = 0;
hfa384x_HostScanRequest_data_t scanreq;
/* gatekeeper check */
if (HFA384x_FIRMWARE_VERSION(hw->ident_sta_fw.major,
hw->ident_sta_fw.minor,
hw->ident_sta_fw.variant) <
HFA384x_FIRMWARE_VERSION(1, 3, 2)) {
netdev_err(wlandev->netdev,
"HostScan not supported with current firmware (<1.3.2).\n");
result = 1;
msg->resultcode.data = P80211ENUM_resultcode_not_supported;
goto exit;
}
memset(&scanreq, 0, sizeof(scanreq));
/* save current roaming mode */
result = hfa384x_drvr_getconfig16(hw,
HFA384x_RID_CNFROAMINGMODE,
&roamingmode);
if (result) {
netdev_err(wlandev->netdev,
"getconfig(ROAMMODE) failed. result=%d\n", result);
msg->resultcode.data =
P80211ENUM_resultcode_implementation_failure;
goto exit;
}
/* drop into mode 3 for the scan */
result = hfa384x_drvr_setconfig16(hw,
HFA384x_RID_CNFROAMINGMODE,
HFA384x_ROAMMODE_HOSTSCAN_HOSTROAM);
if (result) {
netdev_err(wlandev->netdev,
"setconfig(ROAMINGMODE) failed. result=%d\n",
result);
msg->resultcode.data =
P80211ENUM_resultcode_implementation_failure;
goto exit;
}
/* active or passive? */
if (HFA384x_FIRMWARE_VERSION(hw->ident_sta_fw.major,
hw->ident_sta_fw.minor,
hw->ident_sta_fw.variant) >
HFA384x_FIRMWARE_VERSION(1, 5, 0)) {
if (msg->scantype.data != P80211ENUM_scantype_active)
word = cpu_to_le16(msg->maxchanneltime.data);
else
word = 0;
result =
hfa384x_drvr_setconfig16(hw, HFA384x_RID_CNFPASSIVESCANCTRL,
word);
if (result) {
netdev_warn(wlandev->netdev,
"Passive scan not supported with current firmware. (<1.5.1)\n");
}
}
/* set up the txrate to be 2MBPS. Should be fastest basicrate... */
word = HFA384x_RATEBIT_2;
scanreq.txRate = cpu_to_le16(word);
/* set up the channel list */
word = 0;
for (i = 0; i < msg->channellist.data.len; i++) {
u8 channel = msg->channellist.data.data[i];
if (channel > 14)
continue;
/* channel 1 is BIT 0 ... channel 14 is BIT 13 */
word |= (1 << (channel - 1));
}
scanreq.channelList = cpu_to_le16(word);
/* set up the ssid, if present. */
scanreq.ssid.len = cpu_to_le16(msg->ssid.data.len);
memcpy(scanreq.ssid.data, msg->ssid.data.data, msg->ssid.data.len);
/* Enable the MAC port if it's not already enabled */
result = hfa384x_drvr_getconfig16(hw, HFA384x_RID_PORTSTATUS, &word);
if (result) {
netdev_err(wlandev->netdev,
"getconfig(PORTSTATUS) failed. result=%d\n", result);
msg->resultcode.data =
P80211ENUM_resultcode_implementation_failure;
goto exit;
}
if (word == HFA384x_PORTSTATUS_DISABLED) {
u16 wordbuf[17];
result = hfa384x_drvr_setconfig16(hw,
HFA384x_RID_CNFROAMINGMODE,
HFA384x_ROAMMODE_HOSTSCAN_HOSTROAM);
if (result) {
netdev_err(wlandev->netdev,
"setconfig(ROAMINGMODE) failed. result=%d\n",
result);
msg->resultcode.data =
P80211ENUM_resultcode_implementation_failure;
goto exit;
}
/* Construct a bogus SSID and assign it to OwnSSID and
* DesiredSSID
*/
wordbuf[0] = cpu_to_le16(WLAN_SSID_MAXLEN);
get_random_bytes(&wordbuf[1], WLAN_SSID_MAXLEN);
result = hfa384x_drvr_setconfig(hw, HFA384x_RID_CNFOWNSSID,
wordbuf,
HFA384x_RID_CNFOWNSSID_LEN);
if (result) {
netdev_err(wlandev->netdev, "Failed to set OwnSSID.\n");
msg->resultcode.data =
P80211ENUM_resultcode_implementation_failure;
goto exit;
}
result = hfa384x_drvr_setconfig(hw, HFA384x_RID_CNFDESIREDSSID,
wordbuf,
HFA384x_RID_CNFDESIREDSSID_LEN);
if (result) {
netdev_err(wlandev->netdev,
"Failed to set DesiredSSID.\n");
msg->resultcode.data =
P80211ENUM_resultcode_implementation_failure;
goto exit;
}
/* bsstype */
result = hfa384x_drvr_setconfig16(hw,
HFA384x_RID_CNFPORTTYPE,
HFA384x_PORTTYPE_IBSS);
if (result) {
netdev_err(wlandev->netdev,
"Failed to set CNFPORTTYPE.\n");
msg->resultcode.data =
P80211ENUM_resultcode_implementation_failure;
goto exit;
}
/* ibss options */
result = hfa384x_drvr_setconfig16(hw,
HFA384x_RID_CREATEIBSS,
HFA384x_CREATEIBSS_JOINCREATEIBSS);
if (result) {
netdev_err(wlandev->netdev,
"Failed to set CREATEIBSS.\n");
msg->resultcode.data =
P80211ENUM_resultcode_implementation_failure;
goto exit;
}
result = hfa384x_drvr_enable(hw, 0);
if (result) {
netdev_err(wlandev->netdev,
"drvr_enable(0) failed. result=%d\n",
result);
msg->resultcode.data =
P80211ENUM_resultcode_implementation_failure;
goto exit;
}
istmpenable = 1;
}
/* Figure out our timeout first Kus, then HZ */
timeout = msg->channellist.data.len * msg->maxchanneltime.data;
timeout = (timeout * HZ) / 1000;
/* Issue the scan request */
hw->scanflag = 0;
result = hfa384x_drvr_setconfig(hw,
HFA384x_RID_HOSTSCAN, &scanreq,
sizeof(hfa384x_HostScanRequest_data_t));
if (result) {
netdev_err(wlandev->netdev,
"setconfig(SCANREQUEST) failed. result=%d\n",
result);
msg->resultcode.data =
P80211ENUM_resultcode_implementation_failure;
goto exit;
}
/* sleep until info frame arrives */
wait_event_interruptible_timeout(hw->cmdq, hw->scanflag, timeout);
msg->numbss.status = P80211ENUM_msgitem_status_data_ok;
if (hw->scanflag == -1)
hw->scanflag = 0;
msg->numbss.data = hw->scanflag;
hw->scanflag = 0;
/* Disable port if we temporarily enabled it. */
if (istmpenable) {
result = hfa384x_drvr_disable(hw, 0);
if (result) {
netdev_err(wlandev->netdev,
"drvr_disable(0) failed. result=%d\n",
result);
msg->resultcode.data =
P80211ENUM_resultcode_implementation_failure;
goto exit;
}
}
/* restore original roaming mode */
result = hfa384x_drvr_setconfig16(hw, HFA384x_RID_CNFROAMINGMODE,
roamingmode);
if (result) {
netdev_err(wlandev->netdev,
"setconfig(ROAMMODE) failed. result=%d\n", result);
msg->resultcode.data =
P80211ENUM_resultcode_implementation_failure;
goto exit;
}
result = 0;
msg->resultcode.data = P80211ENUM_resultcode_success;
exit:
msg->resultcode.status = P80211ENUM_msgitem_status_data_ok;
return result;
}
/*----------------------------------------------------------------
* prism2mgmt_scan_results
*
* Retrieve the BSS description for one of the BSSs identified in
* a scan.
*
* Arguments:
* wlandev wlan device structure
* msgp ptr to msg buffer
*
* Returns:
* 0 success and done
* <0 success, but we're waiting for something to finish.
* >0 an error occurred while handling the message.
* Side effects:
*
* Call context:
* process thread (usually)
* interrupt
----------------------------------------------------------------*/
int prism2mgmt_scan_results(wlandevice_t *wlandev, void *msgp)
{
int result = 0;
struct p80211msg_dot11req_scan_results *req;
hfa384x_t *hw = wlandev->priv;
hfa384x_HScanResultSub_t *item = NULL;
int count;
req = (struct p80211msg_dot11req_scan_results *) msgp;
req->resultcode.status = P80211ENUM_msgitem_status_data_ok;
if (!hw->scanresults) {
netdev_err(wlandev->netdev,
"dot11req_scan_results can only be used after a successful dot11req_scan.\n");
result = 2;
req->resultcode.data = P80211ENUM_resultcode_invalid_parameters;
goto exit;
}
count = (hw->scanresults->framelen - 3) / 32;
if (count > HFA384x_SCANRESULT_MAX)
count = HFA384x_SCANRESULT_MAX;
if (req->bssindex.data >= count) {
pr_debug("requested index (%d) out of range (%d)\n",
req->bssindex.data, count);
result = 2;
req->resultcode.data = P80211ENUM_resultcode_invalid_parameters;
goto exit;
}
item = &(hw->scanresults->info.hscanresult.result[req->bssindex.data]);
/* signal and noise */
req->signal.status = P80211ENUM_msgitem_status_data_ok;
req->noise.status = P80211ENUM_msgitem_status_data_ok;
req->signal.data = le16_to_cpu(item->sl);
req->noise.data = le16_to_cpu(item->anl);
/* BSSID */
req->bssid.status = P80211ENUM_msgitem_status_data_ok;
req->bssid.data.len = WLAN_BSSID_LEN;
memcpy(req->bssid.data.data, item->bssid, WLAN_BSSID_LEN);
/* SSID */
req->ssid.status = P80211ENUM_msgitem_status_data_ok;
req->ssid.data.len = le16_to_cpu(item->ssid.len);
req->ssid.data.len = min_t(u16, req->ssid.data.len, WLAN_SSID_MAXLEN);
memcpy(req->ssid.data.data, item->ssid.data, req->ssid.data.len);
/* supported rates */
for (count = 0; count < 10; count++)
if (item->supprates[count] == 0)
break;
#define REQBASICRATE(N) \
do { \
if ((count >= N) && DOT11_RATE5_ISBASIC_GET( \
item->supprates[(N)-1])) { \
req->basicrate ## N .data = item->supprates[(N)-1]; \
req->basicrate ## N .status = \
P80211ENUM_msgitem_status_data_ok; \
} \
} while (0)
REQBASICRATE(1);
REQBASICRATE(2);
REQBASICRATE(3);
REQBASICRATE(4);
REQBASICRATE(5);
REQBASICRATE(6);
REQBASICRATE(7);
REQBASICRATE(8);
#define REQSUPPRATE(N) \
do { \
if (count >= N) { \
req->supprate ## N .data = item->supprates[(N)-1]; \
req->supprate ## N .status = \
P80211ENUM_msgitem_status_data_ok; \
} \
} while (0)
REQSUPPRATE(1);
REQSUPPRATE(2);
REQSUPPRATE(3);
REQSUPPRATE(4);
REQSUPPRATE(5);
REQSUPPRATE(6);
REQSUPPRATE(7);
REQSUPPRATE(8);
/* beacon period */
req->beaconperiod.status = P80211ENUM_msgitem_status_data_ok;
req->beaconperiod.data = le16_to_cpu(item->bcnint);
/* timestamps */
req->timestamp.status = P80211ENUM_msgitem_status_data_ok;
req->timestamp.data = jiffies;
req->localtime.status = P80211ENUM_msgitem_status_data_ok;
req->localtime.data = jiffies;
/* atim window */
req->ibssatimwindow.status = P80211ENUM_msgitem_status_data_ok;
req->ibssatimwindow.data = le16_to_cpu(item->atim);
/* Channel */
req->dschannel.status = P80211ENUM_msgitem_status_data_ok;
req->dschannel.data = le16_to_cpu(item->chid);
/* capinfo bits */
count = le16_to_cpu(item->capinfo);
req->capinfo.status = P80211ENUM_msgitem_status_data_ok;
req->capinfo.data = count;
/* privacy flag */
req->privacy.status = P80211ENUM_msgitem_status_data_ok;
req->privacy.data = WLAN_GET_MGMT_CAP_INFO_PRIVACY(count);
/* cfpollable */
req->cfpollable.status = P80211ENUM_msgitem_status_data_ok;
req->cfpollable.data = WLAN_GET_MGMT_CAP_INFO_CFPOLLABLE(count);
/* cfpollreq */
req->cfpollreq.status = P80211ENUM_msgitem_status_data_ok;
req->cfpollreq.data = WLAN_GET_MGMT_CAP_INFO_CFPOLLREQ(count);
/* bsstype */
req->bsstype.status = P80211ENUM_msgitem_status_data_ok;
req->bsstype.data = (WLAN_GET_MGMT_CAP_INFO_ESS(count)) ?
P80211ENUM_bsstype_infrastructure : P80211ENUM_bsstype_independent;
result = 0;
req->resultcode.data = P80211ENUM_resultcode_success;
exit:
return result;
}
/*----------------------------------------------------------------
* prism2mgmt_start
*
* Start a BSS. Any station can do this for IBSS, only AP for ESS.
*
* Arguments:
* wlandev wlan device structure
* msgp ptr to msg buffer
*
* Returns:
* 0 success and done
* <0 success, but we're waiting for something to finish.
* >0 an error occurred while handling the message.
* Side effects:
*
* Call context:
* process thread (usually)
* interrupt
----------------------------------------------------------------*/
int prism2mgmt_start(wlandevice_t *wlandev, void *msgp)
{
int result = 0;
hfa384x_t *hw = wlandev->priv;
struct p80211msg_dot11req_start *msg = msgp;
p80211pstrd_t *pstr;
u8 bytebuf[80];
struct hfa384x_bytestr *p2bytestr = (struct hfa384x_bytestr *) bytebuf;
u16 word;
wlandev->macmode = WLAN_MACMODE_NONE;
/* Set the SSID */
memcpy(&wlandev->ssid, &msg->ssid.data, sizeof(msg->ssid.data));
/*** ADHOC IBSS ***/
/* see if current f/w is less than 8c3 */
if (HFA384x_FIRMWARE_VERSION(hw->ident_sta_fw.major,
hw->ident_sta_fw.minor,
hw->ident_sta_fw.variant) <
HFA384x_FIRMWARE_VERSION(0, 8, 3)) {
/* Ad-Hoc not quite supported on Prism2 */
msg->resultcode.status = P80211ENUM_msgitem_status_data_ok;
msg->resultcode.data = P80211ENUM_resultcode_not_supported;
goto done;
}
msg->resultcode.status = P80211ENUM_msgitem_status_data_ok;
/*** STATION ***/
/* Set the REQUIRED config items */
/* SSID */
pstr = (p80211pstrd_t *) &(msg->ssid.data);
prism2mgmt_pstr2bytestr(p2bytestr, pstr);
result = hfa384x_drvr_setconfig(hw, HFA384x_RID_CNFOWNSSID,
bytebuf, HFA384x_RID_CNFOWNSSID_LEN);
if (result) {
netdev_err(wlandev->netdev, "Failed to set CnfOwnSSID\n");
goto failed;
}
result = hfa384x_drvr_setconfig(hw, HFA384x_RID_CNFDESIREDSSID,
bytebuf,
HFA384x_RID_CNFDESIREDSSID_LEN);
if (result) {
netdev_err(wlandev->netdev, "Failed to set CnfDesiredSSID\n");
goto failed;
}
/* bsstype - we use the default in the ap firmware */
/* IBSS port */
hfa384x_drvr_setconfig16(hw, HFA384x_RID_CNFPORTTYPE, 0);
/* beacon period */
word = msg->beaconperiod.data;
result = hfa384x_drvr_setconfig16(hw, HFA384x_RID_CNFAPBCNint, word);
if (result) {
netdev_err(wlandev->netdev,
"Failed to set beacon period=%d.\n", word);
goto failed;
}
/* dschannel */
word = msg->dschannel.data;
result = hfa384x_drvr_setconfig16(hw, HFA384x_RID_CNFOWNCHANNEL, word);
if (result) {
netdev_err(wlandev->netdev,
"Failed to set channel=%d.\n", word);
goto failed;
}
/* Basic rates */
word = p80211rate_to_p2bit(msg->basicrate1.data);
if (msg->basicrate2.status == P80211ENUM_msgitem_status_data_ok)
word |= p80211rate_to_p2bit(msg->basicrate2.data);
if (msg->basicrate3.status == P80211ENUM_msgitem_status_data_ok)
word |= p80211rate_to_p2bit(msg->basicrate3.data);
if (msg->basicrate4.status == P80211ENUM_msgitem_status_data_ok)
word |= p80211rate_to_p2bit(msg->basicrate4.data);
if (msg->basicrate5.status == P80211ENUM_msgitem_status_data_ok)
word |= p80211rate_to_p2bit(msg->basicrate5.data);
if (msg->basicrate6.status == P80211ENUM_msgitem_status_data_ok)
word |= p80211rate_to_p2bit(msg->basicrate6.data);
if (msg->basicrate7.status == P80211ENUM_msgitem_status_data_ok)
word |= p80211rate_to_p2bit(msg->basicrate7.data);
if (msg->basicrate8.status == P80211ENUM_msgitem_status_data_ok)
word |= p80211rate_to_p2bit(msg->basicrate8.data);
result = hfa384x_drvr_setconfig16(hw, HFA384x_RID_CNFBASICRATES, word);
if (result) {
netdev_err(wlandev->netdev,
"Failed to set basicrates=%d.\n", word);
goto failed;
}
/* Operational rates (supprates and txratecontrol) */
word = p80211rate_to_p2bit(msg->operationalrate1.data);
if (msg->operationalrate2.status == P80211ENUM_msgitem_status_data_ok)
word |= p80211rate_to_p2bit(msg->operationalrate2.data);
if (msg->operationalrate3.status == P80211ENUM_msgitem_status_data_ok)
word |= p80211rate_to_p2bit(msg->operationalrate3.data);
if (msg->operationalrate4.status == P80211ENUM_msgitem_status_data_ok)
word |= p80211rate_to_p2bit(msg->operationalrate4.data);
if (msg->operationalrate5.status == P80211ENUM_msgitem_status_data_ok)
word |= p80211rate_to_p2bit(msg->operationalrate5.data);
if (msg->operationalrate6.status == P80211ENUM_msgitem_status_data_ok)
word |= p80211rate_to_p2bit(msg->operationalrate6.data);
if (msg->operationalrate7.status == P80211ENUM_msgitem_status_data_ok)
word |= p80211rate_to_p2bit(msg->operationalrate7.data);
if (msg->operationalrate8.status == P80211ENUM_msgitem_status_data_ok)
word |= p80211rate_to_p2bit(msg->operationalrate8.data);
result = hfa384x_drvr_setconfig16(hw, HFA384x_RID_CNFSUPPRATES, word);
if (result) {
netdev_err(wlandev->netdev,
"Failed to set supprates=%d.\n", word);
goto failed;
}
result = hfa384x_drvr_setconfig16(hw, HFA384x_RID_TXRATECNTL, word);
if (result) {
netdev_err(wlandev->netdev, "Failed to set txrates=%d.\n",
word);
goto failed;
}
/* Set the macmode so the frame setup code knows what to do */
if (msg->bsstype.data == P80211ENUM_bsstype_independent) {
wlandev->macmode = WLAN_MACMODE_IBSS_STA;
/* lets extend the data length a bit */
hfa384x_drvr_setconfig16(hw, HFA384x_RID_CNFMAXDATALEN, 2304);
}
/* Enable the Port */
result = hfa384x_drvr_enable(hw, 0);
if (result) {
netdev_err(wlandev->netdev,
"Enable macport failed, result=%d.\n", result);
goto failed;
}
msg->resultcode.data = P80211ENUM_resultcode_success;
goto done;
failed:
pr_debug("Failed to set a config option, result=%d\n", result);
msg->resultcode.data = P80211ENUM_resultcode_invalid_parameters;
done:
result = 0;
return result;
}
/*----------------------------------------------------------------
* prism2mgmt_readpda
*
* Collect the PDA data and put it in the message.
*
* Arguments:
* wlandev wlan device structure
* msgp ptr to msg buffer
*
* Returns:
* 0 success and done
* <0 success, but we're waiting for something to finish.
* >0 an error occurred while handling the message.
* Side effects:
*
* Call context:
* process thread (usually)
----------------------------------------------------------------*/
int prism2mgmt_readpda(wlandevice_t *wlandev, void *msgp)
{
hfa384x_t *hw = wlandev->priv;
struct p80211msg_p2req_readpda *msg = msgp;
int result;
/* We only support collecting the PDA when in the FWLOAD
* state.
*/
if (wlandev->msdstate != WLAN_MSD_FWLOAD) {
netdev_err(wlandev->netdev,
"PDA may only be read in the fwload state.\n");
msg->resultcode.data =
P80211ENUM_resultcode_implementation_failure;
msg->resultcode.status = P80211ENUM_msgitem_status_data_ok;
} else {
/* Call drvr_readpda(), it handles the auxport enable
* and validating the returned PDA.
*/
result = hfa384x_drvr_readpda(hw,
msg->pda.data,
HFA384x_PDA_LEN_MAX);
if (result) {
netdev_err(wlandev->netdev,
"hfa384x_drvr_readpda() failed, result=%d\n",
result);
msg->resultcode.data =
P80211ENUM_resultcode_implementation_failure;
msg->resultcode.status =
P80211ENUM_msgitem_status_data_ok;
return 0;
}
msg->pda.status = P80211ENUM_msgitem_status_data_ok;
msg->resultcode.data = P80211ENUM_resultcode_success;
msg->resultcode.status = P80211ENUM_msgitem_status_data_ok;
}
return 0;
}
/*----------------------------------------------------------------
* prism2mgmt_ramdl_state
*
* Establishes the beginning/end of a card RAM download session.
*
* It is expected that the ramdl_write() function will be called
* one or more times between the 'enable' and 'disable' calls to
* this function.
*
* Note: This function should not be called when a mac comm port
* is active.
*
* Arguments:
* wlandev wlan device structure
* msgp ptr to msg buffer
*
* Returns:
* 0 success and done
* <0 success, but we're waiting for something to finish.
* >0 an error occurred while handling the message.
* Side effects:
*
* Call context:
* process thread (usually)
----------------------------------------------------------------*/
int prism2mgmt_ramdl_state(wlandevice_t *wlandev, void *msgp)
{
hfa384x_t *hw = wlandev->priv;
struct p80211msg_p2req_ramdl_state *msg = msgp;
if (wlandev->msdstate != WLAN_MSD_FWLOAD) {
netdev_err(wlandev->netdev,
"ramdl_state(): may only be called in the fwload state.\n");
msg->resultcode.data =
P80211ENUM_resultcode_implementation_failure;
msg->resultcode.status = P80211ENUM_msgitem_status_data_ok;
return 0;
}
/*
** Note: Interrupts are locked out if this is an AP and are NOT
** locked out if this is a station.
*/
msg->resultcode.status = P80211ENUM_msgitem_status_data_ok;
if (msg->enable.data == P80211ENUM_truth_true) {
if (hfa384x_drvr_ramdl_enable(hw, msg->exeaddr.data)) {
msg->resultcode.data =
P80211ENUM_resultcode_implementation_failure;
} else {
msg->resultcode.data = P80211ENUM_resultcode_success;
}
} else {
hfa384x_drvr_ramdl_disable(hw);
msg->resultcode.data = P80211ENUM_resultcode_success;
}
return 0;
}
/*----------------------------------------------------------------
* prism2mgmt_ramdl_write
*
* Writes a buffer to the card RAM using the download state. This
* is for writing code to card RAM. To just read or write raw data
* use the aux functions.
*
* Arguments:
* wlandev wlan device structure
* msgp ptr to msg buffer
*
* Returns:
* 0 success and done
* <0 success, but we're waiting for something to finish.
* >0 an error occurred while handling the message.
* Side effects:
*
* Call context:
* process thread (usually)
----------------------------------------------------------------*/
int prism2mgmt_ramdl_write(wlandevice_t *wlandev, void *msgp)
{
hfa384x_t *hw = wlandev->priv;
struct p80211msg_p2req_ramdl_write *msg = msgp;
u32 addr;
u32 len;
u8 *buf;
if (wlandev->msdstate != WLAN_MSD_FWLOAD) {
netdev_err(wlandev->netdev,
"ramdl_write(): may only be called in the fwload state.\n");
msg->resultcode.data =
P80211ENUM_resultcode_implementation_failure;
msg->resultcode.status = P80211ENUM_msgitem_status_data_ok;
return 0;
}
msg->resultcode.status = P80211ENUM_msgitem_status_data_ok;
/* first validate the length */
if (msg->len.data > sizeof(msg->data.data)) {
msg->resultcode.status =
P80211ENUM_resultcode_invalid_parameters;
return 0;
}
/* call the hfa384x function to do the write */
addr = msg->addr.data;
len = msg->len.data;
buf = msg->data.data;
if (hfa384x_drvr_ramdl_write(hw, addr, buf, len))
msg->resultcode.data = P80211ENUM_resultcode_refused;
msg->resultcode.data = P80211ENUM_resultcode_success;
return 0;
}
/*----------------------------------------------------------------
* prism2mgmt_flashdl_state
*
* Establishes the beginning/end of a card Flash download session.
*
* It is expected that the flashdl_write() function will be called
* one or more times between the 'enable' and 'disable' calls to
* this function.
*
* Note: This function should not be called when a mac comm port
* is active.
*
* Arguments:
* wlandev wlan device structure
* msgp ptr to msg buffer
*
* Returns:
* 0 success and done
* <0 success, but we're waiting for something to finish.
* >0 an error occurred while handling the message.
* Side effects:
*
* Call context:
* process thread (usually)
----------------------------------------------------------------*/
int prism2mgmt_flashdl_state(wlandevice_t *wlandev, void *msgp)
{
int result = 0;
hfa384x_t *hw = wlandev->priv;
struct p80211msg_p2req_flashdl_state *msg = msgp;
if (wlandev->msdstate != WLAN_MSD_FWLOAD) {
netdev_err(wlandev->netdev,
"flashdl_state(): may only be called in the fwload state.\n");
msg->resultcode.data =
P80211ENUM_resultcode_implementation_failure;
msg->resultcode.status = P80211ENUM_msgitem_status_data_ok;
return 0;
}
/*
** Note: Interrupts are locked out if this is an AP and are NOT
** locked out if this is a station.
*/
msg->resultcode.status = P80211ENUM_msgitem_status_data_ok;
if (msg->enable.data == P80211ENUM_truth_true) {
if (hfa384x_drvr_flashdl_enable(hw)) {
msg->resultcode.data =
P80211ENUM_resultcode_implementation_failure;
} else {
msg->resultcode.data = P80211ENUM_resultcode_success;
}
} else {
hfa384x_drvr_flashdl_disable(hw);
msg->resultcode.data = P80211ENUM_resultcode_success;
/* NOTE: At this point, the MAC is in the post-reset
* state and the driver is in the fwload state.
* We need to get the MAC back into the fwload
* state. To do this, we set the nsdstate to HWPRESENT
* and then call the ifstate function to redo everything
* that got us into the fwload state.
*/
wlandev->msdstate = WLAN_MSD_HWPRESENT;
result = prism2sta_ifstate(wlandev, P80211ENUM_ifstate_fwload);
if (result != P80211ENUM_resultcode_success) {
netdev_err(wlandev->netdev,
"prism2sta_ifstate(fwload) failed, P80211ENUM_resultcode=%d\n",
result);
msg->resultcode.data =
P80211ENUM_resultcode_implementation_failure;
result = -1;
}
}
return 0;
}
/*----------------------------------------------------------------
* prism2mgmt_flashdl_write
*
*
*
* Arguments:
* wlandev wlan device structure
* msgp ptr to msg buffer
*
* Returns:
* 0 success and done
* <0 success, but we're waiting for something to finish.
* >0 an error occurred while handling the message.
* Side effects:
*
* Call context:
* process thread (usually)
----------------------------------------------------------------*/
int prism2mgmt_flashdl_write(wlandevice_t *wlandev, void *msgp)
{
hfa384x_t *hw = wlandev->priv;
struct p80211msg_p2req_flashdl_write *msg = msgp;
u32 addr;
u32 len;
u8 *buf;
if (wlandev->msdstate != WLAN_MSD_FWLOAD) {
netdev_err(wlandev->netdev,
"flashdl_write(): may only be called in the fwload state.\n");
msg->resultcode.data =
P80211ENUM_resultcode_implementation_failure;
msg->resultcode.status = P80211ENUM_msgitem_status_data_ok;
return 0;
}
/*
** Note: Interrupts are locked out if this is an AP and are NOT
** locked out if this is a station.
*/
msg->resultcode.status = P80211ENUM_msgitem_status_data_ok;
/* first validate the length */
if (msg->len.data > sizeof(msg->data.data)) {
msg->resultcode.status =
P80211ENUM_resultcode_invalid_parameters;
return 0;
}
/* call the hfa384x function to do the write */
addr = msg->addr.data;
len = msg->len.data;
buf = msg->data.data;
if (hfa384x_drvr_flashdl_write(hw, addr, buf, len))
msg->resultcode.data = P80211ENUM_resultcode_refused;
msg->resultcode.data = P80211ENUM_resultcode_success;
return 0;
}
/*----------------------------------------------------------------
* prism2mgmt_autojoin
*
* Associate with an ESS.
*
* Arguments:
* wlandev wlan device structure
* msgp ptr to msg buffer
*
* Returns:
* 0 success and done
* <0 success, but we're waiting for something to finish.
* >0 an error occurred while handling the message.
* Side effects:
*
* Call context:
* process thread (usually)
* interrupt
----------------------------------------------------------------*/
int prism2mgmt_autojoin(wlandevice_t *wlandev, void *msgp)
{
hfa384x_t *hw = wlandev->priv;
int result = 0;
u16 reg;
u16 port_type;
struct p80211msg_lnxreq_autojoin *msg = msgp;
p80211pstrd_t *pstr;
u8 bytebuf[256];
struct hfa384x_bytestr *p2bytestr = (struct hfa384x_bytestr *) bytebuf;
wlandev->macmode = WLAN_MACMODE_NONE;
/* Set the SSID */
memcpy(&wlandev->ssid, &msg->ssid.data, sizeof(msg->ssid.data));
/* Disable the Port */
hfa384x_drvr_disable(hw, 0);
/*** STATION ***/
/* Set the TxRates */
hfa384x_drvr_setconfig16(hw, HFA384x_RID_TXRATECNTL, 0x000f);
/* Set the auth type */
if (msg->authtype.data == P80211ENUM_authalg_sharedkey)
reg = HFA384x_CNFAUTHENTICATION_SHAREDKEY;
else
reg = HFA384x_CNFAUTHENTICATION_OPENSYSTEM;
hfa384x_drvr_setconfig16(hw, HFA384x_RID_CNFAUTHENTICATION, reg);
/* Set the ssid */
memset(bytebuf, 0, 256);
pstr = (p80211pstrd_t *) &(msg->ssid.data);
prism2mgmt_pstr2bytestr(p2bytestr, pstr);
result = hfa384x_drvr_setconfig(hw, HFA384x_RID_CNFDESIREDSSID,
bytebuf,
HFA384x_RID_CNFDESIREDSSID_LEN);
port_type = HFA384x_PORTTYPE_BSS;
/* Set the PortType */
hfa384x_drvr_setconfig16(hw, HFA384x_RID_CNFPORTTYPE, port_type);
/* Enable the Port */
hfa384x_drvr_enable(hw, 0);
/* Set the resultcode */
msg->resultcode.status = P80211ENUM_msgitem_status_data_ok;
msg->resultcode.data = P80211ENUM_resultcode_success;
return result;
}
/*----------------------------------------------------------------
* prism2mgmt_wlansniff
*
* Start or stop sniffing.
*
* Arguments:
* wlandev wlan device structure
* msgp ptr to msg buffer
*
* Returns:
* 0 success and done
* <0 success, but we're waiting for something to finish.
* >0 an error occurred while handling the message.
* Side effects:
*
* Call context:
* process thread (usually)
* interrupt
----------------------------------------------------------------*/
int prism2mgmt_wlansniff(wlandevice_t *wlandev, void *msgp)
{
int result = 0;
struct p80211msg_lnxreq_wlansniff *msg = msgp;
hfa384x_t *hw = wlandev->priv;
u16 word;
msg->resultcode.status = P80211ENUM_msgitem_status_data_ok;
switch (msg->enable.data) {
case P80211ENUM_truth_false:
/* Confirm that we're in monitor mode */
if (wlandev->netdev->type == ARPHRD_ETHER) {
msg->resultcode.data =
P80211ENUM_resultcode_invalid_parameters;
return 0;
}
/* Disable monitor mode */
result = hfa384x_cmd_monitor(hw, HFA384x_MONITOR_DISABLE);
if (result) {
pr_debug("failed to disable monitor mode, result=%d\n",
result);
goto failed;
}
/* Disable port 0 */
result = hfa384x_drvr_disable(hw, 0);
if (result) {
pr_debug
("failed to disable port 0 after sniffing, result=%d\n",
result);
goto failed;
}
/* Clear the driver state */
wlandev->netdev->type = ARPHRD_ETHER;
/* Restore the wepflags */
result = hfa384x_drvr_setconfig16(hw,
HFA384x_RID_CNFWEPFLAGS,
hw->presniff_wepflags);
if (result) {
pr_debug
("failed to restore wepflags=0x%04x, result=%d\n",
hw->presniff_wepflags, result);
goto failed;
}
/* Set the port to its prior type and enable (if necessary) */
if (hw->presniff_port_type != 0) {
word = hw->presniff_port_type;
result = hfa384x_drvr_setconfig16(hw,
HFA384x_RID_CNFPORTTYPE,
word);
if (result) {
pr_debug
("failed to restore porttype, result=%d\n",
result);
goto failed;
}
/* Enable the port */
result = hfa384x_drvr_enable(hw, 0);
if (result) {
pr_debug("failed to enable port to presniff setting, result=%d\n",
result);
goto failed;
}
} else {
result = hfa384x_drvr_disable(hw, 0);
}
netdev_info(wlandev->netdev, "monitor mode disabled\n");
msg->resultcode.data = P80211ENUM_resultcode_success;
return 0;
case P80211ENUM_truth_true:
/* Disable the port (if enabled), only check Port 0 */
if (hw->port_enabled[0]) {
if (wlandev->netdev->type == ARPHRD_ETHER) {
/* Save macport 0 state */
result = hfa384x_drvr_getconfig16(hw,
HFA384x_RID_CNFPORTTYPE,
&(hw->presniff_port_type));
if (result) {
pr_debug
("failed to read porttype, result=%d\n",
result);
goto failed;
}
/* Save the wepflags state */
result = hfa384x_drvr_getconfig16(hw,
HFA384x_RID_CNFWEPFLAGS,
&(hw->presniff_wepflags));
if (result) {
pr_debug
("failed to read wepflags, result=%d\n",
result);
goto failed;
}
hfa384x_drvr_stop(hw);
result = hfa384x_drvr_start(hw);
if (result) {
pr_debug("failed to restart the card for sniffing, result=%d\n",
result);
goto failed;
}
} else {
/* Disable the port */
result = hfa384x_drvr_disable(hw, 0);
if (result) {
pr_debug("failed to enable port for sniffing, result=%d\n",
result);
goto failed;
}
}
} else {
hw->presniff_port_type = 0;
}
/* Set the channel we wish to sniff */
word = msg->channel.data;
result = hfa384x_drvr_setconfig16(hw,
HFA384x_RID_CNFOWNCHANNEL,
word);
hw->sniff_channel = word;
if (result) {
pr_debug("failed to set channel %d, result=%d\n",
word, result);
goto failed;
}
/* Now if we're already sniffing, we can skip the rest */
if (wlandev->netdev->type != ARPHRD_ETHER) {
/* Set the port type to pIbss */
word = HFA384x_PORTTYPE_PSUEDOIBSS;
result = hfa384x_drvr_setconfig16(hw,
HFA384x_RID_CNFPORTTYPE,
word);
if (result) {
pr_debug
("failed to set porttype %d, result=%d\n",
word, result);
goto failed;
}
if ((msg->keepwepflags.status ==
P80211ENUM_msgitem_status_data_ok)
&& (msg->keepwepflags.data !=
P80211ENUM_truth_true)) {
/* Set the wepflags for no decryption */
word = HFA384x_WEPFLAGS_DISABLE_TXCRYPT |
HFA384x_WEPFLAGS_DISABLE_RXCRYPT;
result =
hfa384x_drvr_setconfig16(hw,
HFA384x_RID_CNFWEPFLAGS,
word);
}
if (result) {
pr_debug
("failed to set wepflags=0x%04x, result=%d\n",
word, result);
goto failed;
}
}
/* Do we want to strip the FCS in monitor mode? */
if ((msg->stripfcs.status == P80211ENUM_msgitem_status_data_ok)
&& (msg->stripfcs.data == P80211ENUM_truth_true)) {
hw->sniff_fcs = 0;
} else {
hw->sniff_fcs = 1;
}
/* Do we want to truncate the packets? */
if (msg->packet_trunc.status ==
P80211ENUM_msgitem_status_data_ok) {
hw->sniff_truncate = msg->packet_trunc.data;
} else {
hw->sniff_truncate = 0;
}
/* Enable the port */
result = hfa384x_drvr_enable(hw, 0);
if (result) {
pr_debug
("failed to enable port for sniffing, result=%d\n",
result);
goto failed;
}
/* Enable monitor mode */
result = hfa384x_cmd_monitor(hw, HFA384x_MONITOR_ENABLE);
if (result) {
pr_debug("failed to enable monitor mode, result=%d\n",
result);
goto failed;
}
if (wlandev->netdev->type == ARPHRD_ETHER)
netdev_info(wlandev->netdev, "monitor mode enabled\n");
/* Set the driver state */
/* Do we want the prism2 header? */
if ((msg->prismheader.status ==
P80211ENUM_msgitem_status_data_ok)
&& (msg->prismheader.data == P80211ENUM_truth_true)) {
hw->sniffhdr = 0;
wlandev->netdev->type = ARPHRD_IEEE80211_PRISM;
} else
if ((msg->wlanheader.status ==
P80211ENUM_msgitem_status_data_ok)
&& (msg->wlanheader.data == P80211ENUM_truth_true)) {
hw->sniffhdr = 1;
wlandev->netdev->type = ARPHRD_IEEE80211_PRISM;
} else {
wlandev->netdev->type = ARPHRD_IEEE80211;
}
msg->resultcode.data = P80211ENUM_resultcode_success;
return 0;
default:
msg->resultcode.data = P80211ENUM_resultcode_invalid_parameters;
return 0;
}
failed:
msg->resultcode.data = P80211ENUM_resultcode_refused;
return 0;
}
| gpl-2.0 |
primiano/edison-kernel | drivers/staging/comedi/drivers/ni_atmio.c | 2089 | 12458 | /*
comedi/drivers/ni_atmio.c
Hardware driver for NI AT-MIO E series cards
COMEDI - Linux Control and Measurement Device Interface
Copyright (C) 1997-2001 David A. Schleef <ds@schleef.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
Driver: ni_atmio
Description: National Instruments AT-MIO-E series
Author: ds
Devices: [National Instruments] AT-MIO-16E-1 (ni_atmio),
AT-MIO-16E-2, AT-MIO-16E-10, AT-MIO-16DE-10, AT-MIO-64E-3,
AT-MIO-16XE-50, AT-MIO-16XE-10, AT-AI-16XE-10
Status: works
Updated: Thu May 1 20:03:02 CDT 2003
The driver has 2.6 kernel isapnp support, and
will automatically probe for a supported board if the
I/O base is left unspecified with comedi_config.
However, many of
the isapnp id numbers are unknown. If your board is not
recognized, please send the output of 'cat /proc/isapnp'
(you may need to modprobe the isa-pnp module for
/proc/isapnp to exist) so the
id numbers for your board can be added to the driver.
Otherwise, you can use the isapnptools package to configure
your board. Use isapnp to
configure the I/O base and IRQ for the board, and then pass
the same values as
parameters in comedi_config. A sample isapnp.conf file is included
in the etc/ directory of Comedilib.
Comedilib includes a utility to autocalibrate these boards. The
boards seem to boot into a state where the all calibration DACs
are at one extreme of their range, thus the default calibration
is terrible. Calibration at boot is strongly encouraged.
To use the extended digital I/O on some of the boards, enable the
8255 driver when configuring the Comedi source tree.
External triggering is supported for some events. The channel index
(scan_begin_arg, etc.) maps to PFI0 - PFI9.
Some of the more esoteric triggering possibilities of these boards
are not supported.
*/
/*
The real guts of the driver is in ni_mio_common.c, which is included
both here and in ni_pcimio.c
Interrupt support added by Truxton Fulton <trux@truxton.com>
References for specifications:
340747b.pdf Register Level Programmer Manual (obsolete)
340747c.pdf Register Level Programmer Manual (new)
DAQ-STC reference manual
Other possibly relevant info:
320517c.pdf User manual (obsolete)
320517f.pdf User manual (new)
320889a.pdf delete
320906c.pdf maximum signal ratings
321066a.pdf about 16x
321791a.pdf discontinuation of at-mio-16e-10 rev. c
321808a.pdf about at-mio-16e-10 rev P
321837a.pdf discontinuation of at-mio-16de-10 rev d
321838a.pdf about at-mio-16de-10 rev N
ISSUES:
need to deal with external reference for DAC, and other DAC
properties in board properties
deal with at-mio-16de-10 revision D to N changes, etc.
*/
#include <linux/interrupt.h>
#include "../comedidev.h"
#include <linux/delay.h>
#include <linux/isapnp.h>
#include "ni_stc.h"
#include "8255.h"
#undef DEBUG
#define ATMIO 1
#undef PCIMIO
/*
* AT specific setup
*/
#define NI_SIZE 0x20
#define MAX_N_CALDACS 32
static const struct ni_board_struct ni_boards[] = {
{.device_id = 44,
.isapnp_id = 0x0000, /* XXX unknown */
.name = "at-mio-16e-1",
.n_adchan = 16,
.adbits = 12,
.ai_fifo_depth = 8192,
.alwaysdither = 0,
.gainlkup = ai_gain_16,
.ai_speed = 800,
.n_aochan = 2,
.aobits = 12,
.ao_fifo_depth = 2048,
.ao_range_table = &range_ni_E_ao_ext,
.ao_unipolar = 1,
.ao_speed = 1000,
.has_8255 = 0,
.num_p0_dio_channels = 8,
.caldac = {mb88341},
},
{.device_id = 25,
.isapnp_id = 0x1900,
.name = "at-mio-16e-2",
.n_adchan = 16,
.adbits = 12,
.ai_fifo_depth = 2048,
.alwaysdither = 0,
.gainlkup = ai_gain_16,
.ai_speed = 2000,
.n_aochan = 2,
.aobits = 12,
.ao_fifo_depth = 2048,
.ao_range_table = &range_ni_E_ao_ext,
.ao_unipolar = 1,
.ao_speed = 1000,
.has_8255 = 0,
.num_p0_dio_channels = 8,
.caldac = {mb88341},
},
{.device_id = 36,
.isapnp_id = 0x2400,
.name = "at-mio-16e-10",
.n_adchan = 16,
.adbits = 12,
.ai_fifo_depth = 512,
.alwaysdither = 0,
.gainlkup = ai_gain_16,
.ai_speed = 10000,
.n_aochan = 2,
.aobits = 12,
.ao_fifo_depth = 0,
.ao_range_table = &range_ni_E_ao_ext,
.ao_unipolar = 1,
.ao_speed = 10000,
.num_p0_dio_channels = 8,
.caldac = {ad8804_debug},
.has_8255 = 0,
},
{.device_id = 37,
.isapnp_id = 0x2500,
.name = "at-mio-16de-10",
.n_adchan = 16,
.adbits = 12,
.ai_fifo_depth = 512,
.alwaysdither = 0,
.gainlkup = ai_gain_16,
.ai_speed = 10000,
.n_aochan = 2,
.aobits = 12,
.ao_fifo_depth = 0,
.ao_range_table = &range_ni_E_ao_ext,
.ao_unipolar = 1,
.ao_speed = 10000,
.num_p0_dio_channels = 8,
.caldac = {ad8804_debug},
.has_8255 = 1,
},
{.device_id = 38,
.isapnp_id = 0x2600,
.name = "at-mio-64e-3",
.n_adchan = 64,
.adbits = 12,
.ai_fifo_depth = 2048,
.alwaysdither = 0,
.gainlkup = ai_gain_16,
.ai_speed = 2000,
.n_aochan = 2,
.aobits = 12,
.ao_fifo_depth = 2048,
.ao_range_table = &range_ni_E_ao_ext,
.ao_unipolar = 1,
.ao_speed = 1000,
.has_8255 = 0,
.num_p0_dio_channels = 8,
.caldac = {ad8804_debug},
},
{.device_id = 39,
.isapnp_id = 0x2700,
.name = "at-mio-16xe-50",
.n_adchan = 16,
.adbits = 16,
.ai_fifo_depth = 512,
.alwaysdither = 1,
.gainlkup = ai_gain_8,
.ai_speed = 50000,
.n_aochan = 2,
.aobits = 12,
.ao_fifo_depth = 0,
.ao_range_table = &range_bipolar10,
.ao_unipolar = 0,
.ao_speed = 50000,
.num_p0_dio_channels = 8,
.caldac = {dac8800, dac8043},
.has_8255 = 0,
},
{.device_id = 50,
.isapnp_id = 0x0000, /* XXX unknown */
.name = "at-mio-16xe-10",
.n_adchan = 16,
.adbits = 16,
.ai_fifo_depth = 512,
.alwaysdither = 1,
.gainlkup = ai_gain_14,
.ai_speed = 10000,
.n_aochan = 2,
.aobits = 16,
.ao_fifo_depth = 2048,
.ao_range_table = &range_ni_E_ao_ext,
.ao_unipolar = 1,
.ao_speed = 1000,
.num_p0_dio_channels = 8,
.caldac = {dac8800, dac8043, ad8522},
.has_8255 = 0,
},
{.device_id = 51,
.isapnp_id = 0x0000, /* XXX unknown */
.name = "at-ai-16xe-10",
.n_adchan = 16,
.adbits = 16,
.ai_fifo_depth = 512,
.alwaysdither = 1, /* unknown */
.gainlkup = ai_gain_14,
.ai_speed = 10000,
.n_aochan = 0,
.aobits = 0,
.ao_fifo_depth = 0,
.ao_unipolar = 0,
.num_p0_dio_channels = 8,
.caldac = {dac8800, dac8043, ad8522},
.has_8255 = 0,
}
};
static const int ni_irqpin[] = {
-1, -1, -1, 0, 1, 2, -1, 3, -1, -1, 4, 5, 6, -1, -1, 7
};
#define interrupt_pin(a) (ni_irqpin[(a)])
#define IRQ_POLARITY 0
#define NI_E_IRQ_FLAGS 0
struct ni_private {
struct pnp_dev *isapnp_dev;
NI_PRIVATE_COMMON
};
/* How we access registers */
#define ni_writel(a, b) (outl((a), (b)+dev->iobase))
#define ni_readl(a) (inl((a)+dev->iobase))
#define ni_writew(a, b) (outw((a), (b)+dev->iobase))
#define ni_readw(a) (inw((a)+dev->iobase))
#define ni_writeb(a, b) (outb((a), (b)+dev->iobase))
#define ni_readb(a) (inb((a)+dev->iobase))
/* How we access windowed registers */
/* We automatically take advantage of STC registers that can be
* read/written directly in the I/O space of the board. The
* AT-MIO devices map the low 8 STC registers to iobase+addr*2. */
static void ni_atmio_win_out(struct comedi_device *dev, uint16_t data, int addr)
{
struct ni_private *devpriv = dev->private;
unsigned long flags;
spin_lock_irqsave(&devpriv->window_lock, flags);
if ((addr) < 8) {
ni_writew(data, addr * 2);
} else {
ni_writew(addr, Window_Address);
ni_writew(data, Window_Data);
}
spin_unlock_irqrestore(&devpriv->window_lock, flags);
}
static uint16_t ni_atmio_win_in(struct comedi_device *dev, int addr)
{
struct ni_private *devpriv = dev->private;
unsigned long flags;
uint16_t ret;
spin_lock_irqsave(&devpriv->window_lock, flags);
if (addr < 8) {
ret = ni_readw(addr * 2);
} else {
ni_writew(addr, Window_Address);
ret = ni_readw(Window_Data);
}
spin_unlock_irqrestore(&devpriv->window_lock, flags);
return ret;
}
static struct pnp_device_id device_ids[] = {
{.id = "NIC1900", .driver_data = 0},
{.id = "NIC2400", .driver_data = 0},
{.id = "NIC2500", .driver_data = 0},
{.id = "NIC2600", .driver_data = 0},
{.id = "NIC2700", .driver_data = 0},
{.id = ""}
};
MODULE_DEVICE_TABLE(pnp, device_ids);
#include "ni_mio_common.c"
static int ni_isapnp_find_board(struct pnp_dev **dev)
{
struct pnp_dev *isapnp_dev = NULL;
int i;
for (i = 0; i < ARRAY_SIZE(ni_boards); i++) {
isapnp_dev = pnp_find_dev(NULL,
ISAPNP_VENDOR('N', 'I', 'C'),
ISAPNP_FUNCTION(ni_boards[i].
isapnp_id), NULL);
if (isapnp_dev == NULL || isapnp_dev->card == NULL)
continue;
if (pnp_device_attach(isapnp_dev) < 0) {
printk
("ni_atmio: %s found but already active, skipping.\n",
ni_boards[i].name);
continue;
}
if (pnp_activate_dev(isapnp_dev) < 0) {
pnp_device_detach(isapnp_dev);
return -EAGAIN;
}
if (!pnp_port_valid(isapnp_dev, 0)
|| !pnp_irq_valid(isapnp_dev, 0)) {
pnp_device_detach(isapnp_dev);
printk("ni_atmio: pnp invalid port or irq, aborting\n");
return -ENOMEM;
}
break;
}
if (i == ARRAY_SIZE(ni_boards))
return -ENODEV;
*dev = isapnp_dev;
return 0;
}
static int ni_getboardtype(struct comedi_device *dev)
{
int device_id = ni_read_eeprom(dev, 511);
int i;
for (i = 0; i < ARRAY_SIZE(ni_boards); i++) {
if (ni_boards[i].device_id == device_id)
return i;
}
if (device_id == 255)
printk(" can't find board\n");
else if (device_id == 0)
printk(" EEPROM read error (?) or device not found\n");
else
printk(" unknown device ID %d -- contact author\n", device_id);
return -1;
}
static int ni_atmio_attach(struct comedi_device *dev,
struct comedi_devconfig *it)
{
const struct ni_board_struct *boardtype;
struct ni_private *devpriv;
struct pnp_dev *isapnp_dev;
int ret;
unsigned long iobase;
int board;
unsigned int irq;
ret = ni_alloc_private(dev);
if (ret)
return ret;
devpriv = dev->private;
devpriv->stc_writew = &ni_atmio_win_out;
devpriv->stc_readw = &ni_atmio_win_in;
devpriv->stc_writel = &win_out2;
devpriv->stc_readl = &win_in2;
iobase = it->options[0];
irq = it->options[1];
isapnp_dev = NULL;
if (iobase == 0) {
ret = ni_isapnp_find_board(&isapnp_dev);
if (ret < 0)
return ret;
iobase = pnp_port_start(isapnp_dev, 0);
irq = pnp_irq(isapnp_dev, 0);
devpriv->isapnp_dev = isapnp_dev;
}
ret = comedi_request_region(dev, iobase, NI_SIZE);
if (ret)
return ret;
#ifdef DEBUG
/* board existence sanity check */
{
int i;
printk(" board fingerprint:");
for (i = 0; i < 16; i += 2) {
printk(" %04x %02x", inw(dev->iobase + i),
inb(dev->iobase + i + 1));
}
}
#endif
/* get board type */
board = ni_getboardtype(dev);
if (board < 0)
return -EIO;
dev->board_ptr = ni_boards + board;
boardtype = comedi_board(dev);
printk(" %s", boardtype->name);
dev->board_name = boardtype->name;
/* irq stuff */
if (irq != 0) {
if (irq > 15 || ni_irqpin[irq] == -1) {
printk(" invalid irq %u\n", irq);
return -EINVAL;
}
printk(" ( irq = %u )", irq);
ret = request_irq(irq, ni_E_interrupt, NI_E_IRQ_FLAGS,
"ni_atmio", dev);
if (ret < 0) {
printk(" irq not available\n");
return -EINVAL;
}
dev->irq = irq;
}
/* generic E series stuff in ni_mio_common.c */
ret = ni_E_init(dev);
if (ret < 0)
return ret;
return 0;
}
static void ni_atmio_detach(struct comedi_device *dev)
{
struct ni_private *devpriv = dev->private;
mio_common_detach(dev);
comedi_legacy_detach(dev);
if (devpriv->isapnp_dev)
pnp_device_detach(devpriv->isapnp_dev);
}
static struct comedi_driver ni_atmio_driver = {
.driver_name = "ni_atmio",
.module = THIS_MODULE,
.attach = ni_atmio_attach,
.detach = ni_atmio_detach,
};
module_comedi_driver(ni_atmio_driver);
| gpl-2.0 |
Honor8Dev/android_kernel_huawei_FRD-L04 | drivers/staging/comedi/drivers/ni_pcimio.c | 2089 | 44045 | /*
comedi/drivers/ni_pcimio.c
Hardware driver for NI PCI-MIO E series cards
COMEDI - Linux Control and Measurement Device Interface
Copyright (C) 1997-8 David A. Schleef <ds@schleef.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
Driver: ni_pcimio
Description: National Instruments PCI-MIO-E series and M series (all boards)
Author: ds, John Hallen, Frank Mori Hess, Rolf Mueller, Herbert Peremans,
Herman Bruyninckx, Terry Barnaby
Status: works
Devices: [National Instruments] PCI-MIO-16XE-50 (ni_pcimio),
PCI-MIO-16XE-10, PXI-6030E, PCI-MIO-16E-1, PCI-MIO-16E-4, PCI-6014, PCI-6040E,
PXI-6040E, PCI-6030E, PCI-6031E, PCI-6032E, PCI-6033E, PCI-6071E, PCI-6023E,
PCI-6024E, PCI-6025E, PXI-6025E, PCI-6034E, PCI-6035E, PCI-6052E,
PCI-6110, PCI-6111, PCI-6220, PCI-6221, PCI-6224, PXI-6224,
PCI-6225, PXI-6225, PCI-6229, PCI-6250, PCI-6251, PCIe-6251, PXIe-6251,
PCI-6254, PCI-6259, PCIe-6259,
PCI-6280, PCI-6281, PXI-6281, PCI-6284, PCI-6289,
PCI-6711, PXI-6711, PCI-6713, PXI-6713,
PXI-6071E, PCI-6070E, PXI-6070E,
PXI-6052E, PCI-6036E, PCI-6731, PCI-6733, PXI-6733,
PCI-6143, PXI-6143
Updated: Mon, 09 Jan 2012 14:52:48 +0000
These boards are almost identical to the AT-MIO E series, except that
they use the PCI bus instead of ISA (i.e., AT). See the notes for
the ni_atmio.o driver for additional information about these boards.
Autocalibration is supported on many of the devices, using the
comedi_calibrate (or comedi_soft_calibrate for m-series) utility.
M-Series boards do analog input and analog output calibration entirely
in software. The software calibration corrects
the analog input for offset, gain and
nonlinearity. The analog outputs are corrected for offset and gain.
See the comedilib documentation on comedi_get_softcal_converter() for
more information.
By default, the driver uses DMA to transfer analog input data to
memory. When DMA is enabled, not all triggering features are
supported.
Digital I/O may not work on 673x.
Note that the PCI-6143 is a simultaineous sampling device with 8 convertors.
With this board all of the convertors perform one simultaineous sample during
a scan interval. The period for a scan is used for the convert time in a
Comedi cmd. The convert trigger source is normally set to TRIG_NOW by default.
The RTSI trigger bus is supported on these cards on
subdevice 10. See the comedilib documentation for details.
Information (number of channels, bits, etc.) for some devices may be
incorrect. Please check this and submit a bug if there are problems
for your device.
SCXI is probably broken for m-series boards.
Bugs:
- When DMA is enabled, COMEDI_EV_CONVERT does
not work correctly.
*/
/*
The PCI-MIO E series driver was originally written by
Tomasz Motylewski <...>, and ported to comedi by ds.
References:
341079b.pdf PCI E Series Register-Level Programmer Manual
340934b.pdf DAQ-STC reference manual
322080b.pdf 6711/6713/6715 User Manual
320945c.pdf PCI E Series User Manual
322138a.pdf PCI-6052E and DAQPad-6052E User Manual
ISSUES:
need to deal with external reference for DAC, and other DAC
properties in board properties
deal with at-mio-16de-10 revision D to N changes, etc.
need to add other CALDAC type
need to slow down DAC loading. I don't trust NI's claim that
two writes to the PCI bus slows IO enough. I would prefer to
use udelay(). Timing specs: (clock)
AD8522 30ns
DAC8043 120ns
DAC8800 60ns
MB88341 ?
*/
#include <linux/delay.h>
#include "../comedidev.h"
#include <asm/byteorder.h>
#include "ni_stc.h"
#include "mite.h"
/* #define PCI_DEBUG */
#define PCIDMA
#define PCIMIO 1
#undef ATMIO
#define MAX_N_CALDACS (16+16+2)
#define DRV_NAME "ni_pcimio"
/* These are not all the possible ao ranges for 628x boards.
They can do OFFSET +- REFERENCE where OFFSET can be
0V, 5V, APFI<0,1>, or AO<0...3> and RANGE can
be 10V, 5V, 2V, 1V, APFI<0,1>, AO<0...3>. That's
63 different possibilities. An AO channel
can not act as it's own OFFSET or REFERENCE.
*/
static const struct comedi_lrange range_ni_M_628x_ao = { 8, {
RANGE(-10, 10),
RANGE(-5, 5),
RANGE(-2, 2),
RANGE(-1, 1),
RANGE(-5, 15),
RANGE(0, 10),
RANGE(3, 7),
RANGE(4, 6),
RANGE_ext(-1, 1)
}
};
static const struct comedi_lrange range_ni_M_625x_ao = { 3, {
RANGE(-10, 10),
RANGE(-5, 5),
RANGE_ext(-1, 1)
}
};
enum ni_pcimio_boardid {
BOARD_PCIMIO_16XE_50,
BOARD_PCIMIO_16XE_10,
BOARD_PCI6014,
BOARD_PXI6030E,
BOARD_PCIMIO_16E_1,
BOARD_PCIMIO_16E_4,
BOARD_PXI6040E,
BOARD_PCI6031E,
BOARD_PCI6032E,
BOARD_PCI6033E,
BOARD_PCI6071E,
BOARD_PCI6023E,
BOARD_PCI6024E,
BOARD_PCI6025E,
BOARD_PXI6025E,
BOARD_PCI6034E,
BOARD_PCI6035E,
BOARD_PCI6052E,
BOARD_PCI6110,
BOARD_PCI6111,
/* BOARD_PCI6115, */
/* BOARD_PXI6115, */
BOARD_PCI6711,
BOARD_PXI6711,
BOARD_PCI6713,
BOARD_PXI6713,
BOARD_PCI6731,
/* BOARD_PXI6731, */
BOARD_PCI6733,
BOARD_PXI6733,
BOARD_PXI6071E,
BOARD_PXI6070E,
BOARD_PXI6052E,
BOARD_PXI6031E,
BOARD_PCI6036E,
BOARD_PCI6220,
BOARD_PCI6221,
BOARD_PCI6221_37PIN,
BOARD_PCI6224,
BOARD_PXI6224,
BOARD_PCI6225,
BOARD_PXI6225,
BOARD_PCI6229,
BOARD_PCI6250,
BOARD_PCI6251,
BOARD_PCIE6251,
BOARD_PXIE6251,
BOARD_PCI6254,
BOARD_PCI6259,
BOARD_PCIE6259,
BOARD_PCI6280,
BOARD_PCI6281,
BOARD_PXI6281,
BOARD_PCI6284,
BOARD_PCI6289,
BOARD_PCI6143,
BOARD_PXI6143,
};
static const struct ni_board_struct ni_boards[] = {
[BOARD_PCIMIO_16XE_50] = {
.name = "pci-mio-16xe-50",
.n_adchan = 16,
.adbits = 16,
.ai_fifo_depth = 2048,
.alwaysdither = 1,
.gainlkup = ai_gain_8,
.ai_speed = 50000,
.n_aochan = 2,
.aobits = 12,
.ao_range_table = &range_bipolar10,
.ao_speed = 50000,
.num_p0_dio_channels = 8,
.caldac = { dac8800, dac8043 },
},
[BOARD_PCIMIO_16XE_10] = {
.name = "pci-mio-16xe-10", /* aka pci-6030E */
.n_adchan = 16,
.adbits = 16,
.ai_fifo_depth = 512,
.alwaysdither = 1,
.gainlkup = ai_gain_14,
.ai_speed = 10000,
.n_aochan = 2,
.aobits = 16,
.ao_fifo_depth = 2048,
.ao_range_table = &range_ni_E_ao_ext,
.ao_unipolar = 1,
.ao_speed = 10000,
.num_p0_dio_channels = 8,
.caldac = { dac8800, dac8043, ad8522 },
},
[BOARD_PCI6014] = {
.name = "pci-6014",
.n_adchan = 16,
.adbits = 16,
.ai_fifo_depth = 512,
.alwaysdither = 1,
.gainlkup = ai_gain_4,
.ai_speed = 5000,
.n_aochan = 2,
.aobits = 16,
.ao_range_table = &range_bipolar10,
.ao_speed = 100000,
.num_p0_dio_channels = 8,
.caldac = { ad8804_debug },
},
[BOARD_PXI6030E] = {
.name = "pxi-6030e",
.n_adchan = 16,
.adbits = 16,
.ai_fifo_depth = 512,
.alwaysdither = 1,
.gainlkup = ai_gain_14,
.ai_speed = 10000,
.n_aochan = 2,
.aobits = 16,
.ao_fifo_depth = 2048,
.ao_range_table = &range_ni_E_ao_ext,
.ao_unipolar = 1,
.ao_speed = 10000,
.num_p0_dio_channels = 8,
.caldac = { dac8800, dac8043, ad8522 },
},
[BOARD_PCIMIO_16E_1] = {
.name = "pci-mio-16e-1", /* aka pci-6070e */
.n_adchan = 16,
.adbits = 12,
.ai_fifo_depth = 512,
.gainlkup = ai_gain_16,
.ai_speed = 800,
.n_aochan = 2,
.aobits = 12,
.ao_fifo_depth = 2048,
.ao_range_table = &range_ni_E_ao_ext,
.ao_unipolar = 1,
.ao_speed = 1000,
.num_p0_dio_channels = 8,
.caldac = { mb88341 },
},
[BOARD_PCIMIO_16E_4] = {
.name = "pci-mio-16e-4", /* aka pci-6040e */
.n_adchan = 16,
.adbits = 12,
.ai_fifo_depth = 512,
.gainlkup = ai_gain_16,
/*
* there have been reported problems with
* full speed on this board
*/
.ai_speed = 2000,
.n_aochan = 2,
.aobits = 12,
.ao_fifo_depth = 512,
.ao_range_table = &range_ni_E_ao_ext,
.ao_unipolar = 1,
.ao_speed = 1000,
.num_p0_dio_channels = 8,
.caldac = { ad8804_debug }, /* doc says mb88341 */
},
[BOARD_PXI6040E] = {
.name = "pxi-6040e",
.n_adchan = 16,
.adbits = 12,
.ai_fifo_depth = 512,
.gainlkup = ai_gain_16,
.ai_speed = 2000,
.n_aochan = 2,
.aobits = 12,
.ao_fifo_depth = 512,
.ao_range_table = &range_ni_E_ao_ext,
.ao_unipolar = 1,
.ao_speed = 1000,
.num_p0_dio_channels = 8,
.caldac = { mb88341 },
},
[BOARD_PCI6031E] = {
.name = "pci-6031e",
.n_adchan = 64,
.adbits = 16,
.ai_fifo_depth = 512,
.alwaysdither = 1,
.gainlkup = ai_gain_14,
.ai_speed = 10000,
.n_aochan = 2,
.aobits = 16,
.ao_fifo_depth = 2048,
.ao_range_table = &range_ni_E_ao_ext,
.ao_unipolar = 1,
.ao_speed = 10000,
.num_p0_dio_channels = 8,
.caldac = { dac8800, dac8043, ad8522 },
},
[BOARD_PCI6032E] = {
.name = "pci-6032e",
.n_adchan = 16,
.adbits = 16,
.ai_fifo_depth = 512,
.alwaysdither = 1,
.gainlkup = ai_gain_14,
.ai_speed = 10000,
.num_p0_dio_channels = 8,
.caldac = { dac8800, dac8043, ad8522 },
},
[BOARD_PCI6033E] = {
.name = "pci-6033e",
.n_adchan = 64,
.adbits = 16,
.ai_fifo_depth = 512,
.alwaysdither = 1,
.gainlkup = ai_gain_14,
.ai_speed = 10000,
.num_p0_dio_channels = 8,
.caldac = { dac8800, dac8043, ad8522 },
},
[BOARD_PCI6071E] = {
.name = "pci-6071e",
.n_adchan = 64,
.adbits = 12,
.ai_fifo_depth = 512,
.alwaysdither = 1,
.gainlkup = ai_gain_16,
.ai_speed = 800,
.n_aochan = 2,
.aobits = 12,
.ao_fifo_depth = 2048,
.ao_range_table = &range_ni_E_ao_ext,
.ao_unipolar = 1,
.ao_speed = 1000,
.num_p0_dio_channels = 8,
.caldac = { ad8804_debug },
},
[BOARD_PCI6023E] = {
.name = "pci-6023e",
.n_adchan = 16,
.adbits = 12,
.ai_fifo_depth = 512,
.gainlkup = ai_gain_4,
.ai_speed = 5000,
.num_p0_dio_channels = 8,
.caldac = { ad8804_debug }, /* manual is wrong */
},
[BOARD_PCI6024E] = {
.name = "pci-6024e",
.n_adchan = 16,
.adbits = 12,
.ai_fifo_depth = 512,
.gainlkup = ai_gain_4,
.ai_speed = 5000,
.n_aochan = 2,
.aobits = 12,
.ao_range_table = &range_bipolar10,
.ao_speed = 100000,
.num_p0_dio_channels = 8,
.caldac = { ad8804_debug }, /* manual is wrong */
},
[BOARD_PCI6025E] = {
.name = "pci-6025e",
.n_adchan = 16,
.adbits = 12,
.ai_fifo_depth = 512,
.gainlkup = ai_gain_4,
.ai_speed = 5000,
.n_aochan = 2,
.aobits = 12,
.ao_range_table = &range_bipolar10,
.ao_speed = 100000,
.num_p0_dio_channels = 8,
.caldac = { ad8804_debug }, /* manual is wrong */
.has_8255 = 1,
},
[BOARD_PXI6025E] = {
.name = "pxi-6025e",
.n_adchan = 16,
.adbits = 12,
.ai_fifo_depth = 512,
.gainlkup = ai_gain_4,
.ai_speed = 5000,
.n_aochan = 2,
.aobits = 12,
.ao_range_table = &range_ni_E_ao_ext,
.ao_unipolar = 1,
.ao_speed = 100000,
.num_p0_dio_channels = 8,
.caldac = { ad8804_debug }, /* manual is wrong */
.has_8255 = 1,
},
[BOARD_PCI6034E] = {
.name = "pci-6034e",
.n_adchan = 16,
.adbits = 16,
.ai_fifo_depth = 512,
.alwaysdither = 1,
.gainlkup = ai_gain_4,
.ai_speed = 5000,
.num_p0_dio_channels = 8,
.caldac = { ad8804_debug },
},
[BOARD_PCI6035E] = {
.name = "pci-6035e",
.n_adchan = 16,
.adbits = 16,
.ai_fifo_depth = 512,
.alwaysdither = 1,
.gainlkup = ai_gain_4,
.ai_speed = 5000,
.n_aochan = 2,
.aobits = 12,
.ao_range_table = &range_bipolar10,
.ao_speed = 100000,
.num_p0_dio_channels = 8,
.caldac = { ad8804_debug },
},
[BOARD_PCI6052E] = {
.name = "pci-6052e",
.n_adchan = 16,
.adbits = 16,
.ai_fifo_depth = 512,
.alwaysdither = 1,
.gainlkup = ai_gain_16,
.ai_speed = 3000,
.n_aochan = 2,
.aobits = 16,
.ao_unipolar = 1,
.ao_fifo_depth = 2048,
.ao_range_table = &range_ni_E_ao_ext,
.ao_speed = 3000,
.num_p0_dio_channels = 8,
/* manual is wrong */
.caldac = { ad8804_debug, ad8804_debug, ad8522 },
},
[BOARD_PCI6110] = {
.name = "pci-6110",
.n_adchan = 4,
.adbits = 12,
.ai_fifo_depth = 8192,
.alwaysdither = 0,
.gainlkup = ai_gain_611x,
.ai_speed = 200,
.n_aochan = 2,
.aobits = 16,
.reg_type = ni_reg_611x,
.ao_range_table = &range_bipolar10,
.ao_fifo_depth = 2048,
.ao_speed = 250,
.num_p0_dio_channels = 8,
.caldac = { ad8804, ad8804 },
},
[BOARD_PCI6111] = {
.name = "pci-6111",
.n_adchan = 2,
.adbits = 12,
.ai_fifo_depth = 8192,
.gainlkup = ai_gain_611x,
.ai_speed = 200,
.n_aochan = 2,
.aobits = 16,
.reg_type = ni_reg_611x,
.ao_range_table = &range_bipolar10,
.ao_fifo_depth = 2048,
.ao_speed = 250,
.num_p0_dio_channels = 8,
.caldac = { ad8804, ad8804 },
},
#if 0
/* The 6115 boards probably need their own driver */
[BOARD_PCI6115] = { /* .device_id = 0x2ed0, */
.name = "pci-6115",
.n_adchan = 4,
.adbits = 12,
.ai_fifo_depth = 8192,
.gainlkup = ai_gain_611x,
.ai_speed = 100,
.n_aochan = 2,
.aobits = 16,
.ao_671x = 1,
.ao_fifo_depth = 2048,
.ao_speed = 250,
.num_p0_dio_channels = 8,
.reg_611x = 1,
/* XXX */
.caldac = { ad8804_debug, ad8804_debug, ad8804_debug },
},
#endif
#if 0
[BOARD_PXI6115] = { /* .device_id = ????, */
.name = "pxi-6115",
.n_adchan = 4,
.adbits = 12,
.ai_fifo_depth = 8192,
.gainlkup = ai_gain_611x,
.ai_speed = 100,
.n_aochan = 2,
.aobits = 16,
.ao_671x = 1,
.ao_fifo_depth = 2048,
.ao_speed = 250,
.reg_611x = 1,
.num_p0_dio_channels = 8,
/* XXX */
.caldac = { ad8804_debug, ad8804_debug, ad8804_debug },
},
#endif
[BOARD_PCI6711] = {
.name = "pci-6711",
.n_aochan = 4,
.aobits = 12,
/* data sheet says 8192, but fifo really holds 16384 samples */
.ao_fifo_depth = 16384,
.ao_range_table = &range_bipolar10,
.ao_speed = 1000,
.num_p0_dio_channels = 8,
.reg_type = ni_reg_6711,
.caldac = { ad8804_debug },
},
[BOARD_PXI6711] = {
.name = "pxi-6711",
.n_aochan = 4,
.aobits = 12,
.ao_fifo_depth = 16384,
.ao_range_table = &range_bipolar10,
.ao_speed = 1000,
.num_p0_dio_channels = 8,
.reg_type = ni_reg_6711,
.caldac = { ad8804_debug },
},
[BOARD_PCI6713] = {
.name = "pci-6713",
.n_aochan = 8,
.aobits = 12,
.ao_fifo_depth = 16384,
.ao_range_table = &range_bipolar10,
.ao_speed = 1000,
.num_p0_dio_channels = 8,
.reg_type = ni_reg_6713,
.caldac = { ad8804_debug, ad8804_debug },
},
[BOARD_PXI6713] = {
.name = "pxi-6713",
.n_aochan = 8,
.aobits = 12,
.ao_fifo_depth = 16384,
.ao_range_table = &range_bipolar10,
.ao_speed = 1000,
.num_p0_dio_channels = 8,
.reg_type = ni_reg_6713,
.caldac = { ad8804_debug, ad8804_debug },
},
[BOARD_PCI6731] = {
.name = "pci-6731",
.n_aochan = 4,
.aobits = 16,
.ao_fifo_depth = 8192,
.ao_range_table = &range_bipolar10,
.ao_speed = 1000,
.num_p0_dio_channels = 8,
.reg_type = ni_reg_6711,
.caldac = { ad8804_debug },
},
#if 0
[BOARD_PXI6731] = { /* .device_id = ????, */
.name = "pxi-6731",
.n_aochan = 4,
.aobits = 16,
.ao_fifo_depth = 8192,
.ao_range_table = &range_bipolar10,
.num_p0_dio_channels = 8,
.reg_type = ni_reg_6711,
.caldac = { ad8804_debug },
},
#endif
[BOARD_PCI6733] = {
.name = "pci-6733",
.n_aochan = 8,
.aobits = 16,
.ao_fifo_depth = 16384,
.ao_range_table = &range_bipolar10,
.ao_speed = 1000,
.num_p0_dio_channels = 8,
.reg_type = ni_reg_6713,
.caldac = { ad8804_debug, ad8804_debug },
},
[BOARD_PXI6733] = {
.name = "pxi-6733",
.n_aochan = 8,
.aobits = 16,
.ao_fifo_depth = 16384,
.ao_range_table = &range_bipolar10,
.ao_speed = 1000,
.num_p0_dio_channels = 8,
.reg_type = ni_reg_6713,
.caldac = { ad8804_debug, ad8804_debug },
},
[BOARD_PXI6071E] = {
.name = "pxi-6071e",
.n_adchan = 64,
.adbits = 12,
.ai_fifo_depth = 512,
.alwaysdither = 1,
.gainlkup = ai_gain_16,
.ai_speed = 800,
.n_aochan = 2,
.aobits = 12,
.ao_fifo_depth = 2048,
.ao_range_table = &range_ni_E_ao_ext,
.ao_unipolar = 1,
.ao_speed = 1000,
.num_p0_dio_channels = 8,
.caldac = { ad8804_debug },
},
[BOARD_PXI6070E] = {
.name = "pxi-6070e",
.n_adchan = 16,
.adbits = 12,
.ai_fifo_depth = 512,
.alwaysdither = 1,
.gainlkup = ai_gain_16,
.ai_speed = 800,
.n_aochan = 2,
.aobits = 12,
.ao_fifo_depth = 2048,
.ao_range_table = &range_ni_E_ao_ext,
.ao_unipolar = 1,
.ao_speed = 1000,
.num_p0_dio_channels = 8,
.caldac = { ad8804_debug },
},
[BOARD_PXI6052E] = {
.name = "pxi-6052e",
.n_adchan = 16,
.adbits = 16,
.ai_fifo_depth = 512,
.alwaysdither = 1,
.gainlkup = ai_gain_16,
.ai_speed = 3000,
.n_aochan = 2,
.aobits = 16,
.ao_unipolar = 1,
.ao_fifo_depth = 2048,
.ao_range_table = &range_ni_E_ao_ext,
.ao_speed = 3000,
.num_p0_dio_channels = 8,
.caldac = { mb88341, mb88341, ad8522 },
},
[BOARD_PXI6031E] = {
.name = "pxi-6031e",
.n_adchan = 64,
.adbits = 16,
.ai_fifo_depth = 512,
.alwaysdither = 1,
.gainlkup = ai_gain_14,
.ai_speed = 10000,
.n_aochan = 2,
.aobits = 16,
.ao_fifo_depth = 2048,
.ao_range_table = &range_ni_E_ao_ext,
.ao_unipolar = 1,
.ao_speed = 10000,
.num_p0_dio_channels = 8,
.caldac = { dac8800, dac8043, ad8522 },
},
[BOARD_PCI6036E] = {
.name = "pci-6036e",
.n_adchan = 16,
.adbits = 16,
.ai_fifo_depth = 512,
.alwaysdither = 1,
.gainlkup = ai_gain_4,
.ai_speed = 5000,
.n_aochan = 2,
.aobits = 16,
.ao_range_table = &range_bipolar10,
.ao_speed = 100000,
.num_p0_dio_channels = 8,
.caldac = { ad8804_debug },
},
[BOARD_PCI6220] = {
.name = "pci-6220",
.n_adchan = 16,
.adbits = 16,
.ai_fifo_depth = 512, /* FIXME: guess */
.gainlkup = ai_gain_622x,
.ai_speed = 4000,
.num_p0_dio_channels = 8,
.reg_type = ni_reg_622x,
.caldac = { caldac_none },
},
[BOARD_PCI6221] = {
.name = "pci-6221",
.n_adchan = 16,
.adbits = 16,
.ai_fifo_depth = 4095,
.gainlkup = ai_gain_622x,
.ai_speed = 4000,
.n_aochan = 2,
.aobits = 16,
.ao_fifo_depth = 8191,
.ao_range_table = &range_bipolar10,
.reg_type = ni_reg_622x,
.ao_speed = 1200,
.num_p0_dio_channels = 8,
.caldac = { caldac_none },
},
[BOARD_PCI6221_37PIN] = {
.name = "pci-6221_37pin",
.n_adchan = 16,
.adbits = 16,
.ai_fifo_depth = 4095,
.gainlkup = ai_gain_622x,
.ai_speed = 4000,
.n_aochan = 2,
.aobits = 16,
.ao_fifo_depth = 8191,
.ao_range_table = &range_bipolar10,
.reg_type = ni_reg_622x,
.ao_speed = 1200,
.num_p0_dio_channels = 8,
.caldac = { caldac_none },
},
[BOARD_PCI6224] = {
.name = "pci-6224",
.n_adchan = 32,
.adbits = 16,
.ai_fifo_depth = 4095,
.gainlkup = ai_gain_622x,
.ai_speed = 4000,
.reg_type = ni_reg_622x,
.num_p0_dio_channels = 32,
.caldac = { caldac_none },
},
[BOARD_PXI6224] = {
.name = "pxi-6224",
.n_adchan = 32,
.adbits = 16,
.ai_fifo_depth = 4095,
.gainlkup = ai_gain_622x,
.ai_speed = 4000,
.reg_type = ni_reg_622x,
.num_p0_dio_channels = 32,
.caldac = { caldac_none },
},
[BOARD_PCI6225] = {
.name = "pci-6225",
.n_adchan = 80,
.adbits = 16,
.ai_fifo_depth = 4095,
.gainlkup = ai_gain_622x,
.ai_speed = 4000,
.n_aochan = 2,
.aobits = 16,
.ao_fifo_depth = 8191,
.ao_range_table = &range_bipolar10,
.reg_type = ni_reg_622x,
.ao_speed = 1200,
.num_p0_dio_channels = 32,
.caldac = { caldac_none },
},
[BOARD_PXI6225] = {
.name = "pxi-6225",
.n_adchan = 80,
.adbits = 16,
.ai_fifo_depth = 4095,
.gainlkup = ai_gain_622x,
.ai_speed = 4000,
.n_aochan = 2,
.aobits = 16,
.ao_fifo_depth = 8191,
.ao_range_table = &range_bipolar10,
.reg_type = ni_reg_622x,
.ao_speed = 1200,
.num_p0_dio_channels = 32,
.caldac = { caldac_none },
},
[BOARD_PCI6229] = {
.name = "pci-6229",
.n_adchan = 32,
.adbits = 16,
.ai_fifo_depth = 4095,
.gainlkup = ai_gain_622x,
.ai_speed = 4000,
.n_aochan = 4,
.aobits = 16,
.ao_fifo_depth = 8191,
.ao_range_table = &range_bipolar10,
.reg_type = ni_reg_622x,
.ao_speed = 1200,
.num_p0_dio_channels = 32,
.caldac = { caldac_none },
},
[BOARD_PCI6250] = {
.name = "pci-6250",
.n_adchan = 16,
.adbits = 16,
.ai_fifo_depth = 4095,
.gainlkup = ai_gain_628x,
.ai_speed = 800,
.reg_type = ni_reg_625x,
.num_p0_dio_channels = 8,
.caldac = { caldac_none },
},
[BOARD_PCI6251] = {
.name = "pci-6251",
.n_adchan = 16,
.adbits = 16,
.ai_fifo_depth = 4095,
.gainlkup = ai_gain_628x,
.ai_speed = 800,
.n_aochan = 2,
.aobits = 16,
.ao_fifo_depth = 8191,
.ao_range_table = &range_ni_M_625x_ao,
.reg_type = ni_reg_625x,
.ao_speed = 350,
.num_p0_dio_channels = 8,
.caldac = { caldac_none },
},
[BOARD_PCIE6251] = {
.name = "pcie-6251",
.n_adchan = 16,
.adbits = 16,
.ai_fifo_depth = 4095,
.gainlkup = ai_gain_628x,
.ai_speed = 800,
.n_aochan = 2,
.aobits = 16,
.ao_fifo_depth = 8191,
.ao_range_table = &range_ni_M_625x_ao,
.reg_type = ni_reg_625x,
.ao_speed = 350,
.num_p0_dio_channels = 8,
.caldac = { caldac_none },
},
[BOARD_PXIE6251] = {
.name = "pxie-6251",
.n_adchan = 16,
.adbits = 16,
.ai_fifo_depth = 4095,
.gainlkup = ai_gain_628x,
.ai_speed = 800,
.n_aochan = 2,
.aobits = 16,
.ao_fifo_depth = 8191,
.ao_range_table = &range_ni_M_625x_ao,
.reg_type = ni_reg_625x,
.ao_speed = 350,
.num_p0_dio_channels = 8,
.caldac = { caldac_none },
},
[BOARD_PCI6254] = {
.name = "pci-6254",
.n_adchan = 32,
.adbits = 16,
.ai_fifo_depth = 4095,
.gainlkup = ai_gain_628x,
.ai_speed = 800,
.reg_type = ni_reg_625x,
.num_p0_dio_channels = 32,
.caldac = { caldac_none },
},
[BOARD_PCI6259] = {
.name = "pci-6259",
.n_adchan = 32,
.adbits = 16,
.ai_fifo_depth = 4095,
.gainlkup = ai_gain_628x,
.ai_speed = 800,
.n_aochan = 4,
.aobits = 16,
.ao_fifo_depth = 8191,
.ao_range_table = &range_ni_M_625x_ao,
.reg_type = ni_reg_625x,
.ao_speed = 350,
.num_p0_dio_channels = 32,
.caldac = { caldac_none },
},
[BOARD_PCIE6259] = {
.name = "pcie-6259",
.n_adchan = 32,
.adbits = 16,
.ai_fifo_depth = 4095,
.gainlkup = ai_gain_628x,
.ai_speed = 800,
.n_aochan = 4,
.aobits = 16,
.ao_fifo_depth = 8191,
.ao_range_table = &range_ni_M_625x_ao,
.reg_type = ni_reg_625x,
.ao_speed = 350,
.num_p0_dio_channels = 32,
.caldac = { caldac_none },
},
[BOARD_PCI6280] = {
.name = "pci-6280",
.n_adchan = 16,
.adbits = 18,
.ai_fifo_depth = 2047,
.gainlkup = ai_gain_628x,
.ai_speed = 1600,
.ao_fifo_depth = 8191,
.reg_type = ni_reg_628x,
.num_p0_dio_channels = 8,
.caldac = { caldac_none },
},
[BOARD_PCI6281] = {
.name = "pci-6281",
.n_adchan = 16,
.adbits = 18,
.ai_fifo_depth = 2047,
.gainlkup = ai_gain_628x,
.ai_speed = 1600,
.n_aochan = 2,
.aobits = 16,
.ao_fifo_depth = 8191,
.ao_range_table = &range_ni_M_628x_ao,
.reg_type = ni_reg_628x,
.ao_unipolar = 1,
.ao_speed = 350,
.num_p0_dio_channels = 8,
.caldac = { caldac_none },
},
[BOARD_PXI6281] = {
.name = "pxi-6281",
.n_adchan = 16,
.adbits = 18,
.ai_fifo_depth = 2047,
.gainlkup = ai_gain_628x,
.ai_speed = 1600,
.n_aochan = 2,
.aobits = 16,
.ao_fifo_depth = 8191,
.ao_range_table = &range_ni_M_628x_ao,
.reg_type = ni_reg_628x,
.ao_unipolar = 1,
.ao_speed = 350,
.num_p0_dio_channels = 8,
.caldac = { caldac_none },
},
[BOARD_PCI6284] = {
.name = "pci-6284",
.n_adchan = 32,
.adbits = 18,
.ai_fifo_depth = 2047,
.gainlkup = ai_gain_628x,
.ai_speed = 1600,
.reg_type = ni_reg_628x,
.num_p0_dio_channels = 32,
.caldac = { caldac_none },
},
[BOARD_PCI6289] = {
.name = "pci-6289",
.n_adchan = 32,
.adbits = 18,
.ai_fifo_depth = 2047,
.gainlkup = ai_gain_628x,
.ai_speed = 1600,
.n_aochan = 4,
.aobits = 16,
.ao_fifo_depth = 8191,
.ao_range_table = &range_ni_M_628x_ao,
.reg_type = ni_reg_628x,
.ao_unipolar = 1,
.ao_speed = 350,
.num_p0_dio_channels = 32,
.caldac = { caldac_none },
},
[BOARD_PCI6143] = {
.name = "pci-6143",
.n_adchan = 8,
.adbits = 16,
.ai_fifo_depth = 1024,
.gainlkup = ai_gain_6143,
.ai_speed = 4000,
.reg_type = ni_reg_6143,
.num_p0_dio_channels = 8,
.caldac = { ad8804_debug, ad8804_debug },
},
[BOARD_PXI6143] = {
.name = "pxi-6143",
.n_adchan = 8,
.adbits = 16,
.ai_fifo_depth = 1024,
.gainlkup = ai_gain_6143,
.ai_speed = 4000,
.reg_type = ni_reg_6143,
.num_p0_dio_channels = 8,
.caldac = { ad8804_debug, ad8804_debug },
},
};
struct ni_private {
NI_PRIVATE_COMMON};
/* How we access registers */
#define ni_writel(a, b) (writel((a), devpriv->mite->daq_io_addr + (b)))
#define ni_readl(a) (readl(devpriv->mite->daq_io_addr + (a)))
#define ni_writew(a, b) (writew((a), devpriv->mite->daq_io_addr + (b)))
#define ni_readw(a) (readw(devpriv->mite->daq_io_addr + (a)))
#define ni_writeb(a, b) (writeb((a), devpriv->mite->daq_io_addr + (b)))
#define ni_readb(a) (readb(devpriv->mite->daq_io_addr + (a)))
/* How we access STC registers */
/* We automatically take advantage of STC registers that can be
* read/written directly in the I/O space of the board. Most
* PCIMIO devices map the low 8 STC registers to iobase+addr*2.
* The 611x devices map the write registers to iobase+addr*2, and
* the read registers to iobase+(addr-1)*2. */
/* However, the 611x boards still aren't working, so I'm disabling
* non-windowed STC access temporarily */
static void e_series_win_out(struct comedi_device *dev, uint16_t data, int reg)
{
struct ni_private *devpriv = dev->private;
unsigned long flags;
spin_lock_irqsave(&devpriv->window_lock, flags);
ni_writew(reg, Window_Address);
ni_writew(data, Window_Data);
spin_unlock_irqrestore(&devpriv->window_lock, flags);
}
static uint16_t e_series_win_in(struct comedi_device *dev, int reg)
{
struct ni_private *devpriv = dev->private;
unsigned long flags;
uint16_t ret;
spin_lock_irqsave(&devpriv->window_lock, flags);
ni_writew(reg, Window_Address);
ret = ni_readw(Window_Data);
spin_unlock_irqrestore(&devpriv->window_lock, flags);
return ret;
}
static void m_series_stc_writew(struct comedi_device *dev, uint16_t data,
int reg)
{
struct ni_private *devpriv = dev->private;
unsigned offset;
switch (reg) {
case ADC_FIFO_Clear:
offset = M_Offset_AI_FIFO_Clear;
break;
case AI_Command_1_Register:
offset = M_Offset_AI_Command_1;
break;
case AI_Command_2_Register:
offset = M_Offset_AI_Command_2;
break;
case AI_Mode_1_Register:
offset = M_Offset_AI_Mode_1;
break;
case AI_Mode_2_Register:
offset = M_Offset_AI_Mode_2;
break;
case AI_Mode_3_Register:
offset = M_Offset_AI_Mode_3;
break;
case AI_Output_Control_Register:
offset = M_Offset_AI_Output_Control;
break;
case AI_Personal_Register:
offset = M_Offset_AI_Personal;
break;
case AI_SI2_Load_A_Register:
/* this is actually a 32 bit register on m series boards */
ni_writel(data, M_Offset_AI_SI2_Load_A);
return;
break;
case AI_SI2_Load_B_Register:
/* this is actually a 32 bit register on m series boards */
ni_writel(data, M_Offset_AI_SI2_Load_B);
return;
break;
case AI_START_STOP_Select_Register:
offset = M_Offset_AI_START_STOP_Select;
break;
case AI_Trigger_Select_Register:
offset = M_Offset_AI_Trigger_Select;
break;
case Analog_Trigger_Etc_Register:
offset = M_Offset_Analog_Trigger_Etc;
break;
case AO_Command_1_Register:
offset = M_Offset_AO_Command_1;
break;
case AO_Command_2_Register:
offset = M_Offset_AO_Command_2;
break;
case AO_Mode_1_Register:
offset = M_Offset_AO_Mode_1;
break;
case AO_Mode_2_Register:
offset = M_Offset_AO_Mode_2;
break;
case AO_Mode_3_Register:
offset = M_Offset_AO_Mode_3;
break;
case AO_Output_Control_Register:
offset = M_Offset_AO_Output_Control;
break;
case AO_Personal_Register:
offset = M_Offset_AO_Personal;
break;
case AO_Start_Select_Register:
offset = M_Offset_AO_Start_Select;
break;
case AO_Trigger_Select_Register:
offset = M_Offset_AO_Trigger_Select;
break;
case Clock_and_FOUT_Register:
offset = M_Offset_Clock_and_FOUT;
break;
case Configuration_Memory_Clear:
offset = M_Offset_Configuration_Memory_Clear;
break;
case DAC_FIFO_Clear:
offset = M_Offset_AO_FIFO_Clear;
break;
case DIO_Control_Register:
printk
("%s: FIXME: register 0x%x does not map cleanly on to m-series boards.\n",
__func__, reg);
return;
break;
case G_Autoincrement_Register(0):
offset = M_Offset_G0_Autoincrement;
break;
case G_Autoincrement_Register(1):
offset = M_Offset_G1_Autoincrement;
break;
case G_Command_Register(0):
offset = M_Offset_G0_Command;
break;
case G_Command_Register(1):
offset = M_Offset_G1_Command;
break;
case G_Input_Select_Register(0):
offset = M_Offset_G0_Input_Select;
break;
case G_Input_Select_Register(1):
offset = M_Offset_G1_Input_Select;
break;
case G_Mode_Register(0):
offset = M_Offset_G0_Mode;
break;
case G_Mode_Register(1):
offset = M_Offset_G1_Mode;
break;
case Interrupt_A_Ack_Register:
offset = M_Offset_Interrupt_A_Ack;
break;
case Interrupt_A_Enable_Register:
offset = M_Offset_Interrupt_A_Enable;
break;
case Interrupt_B_Ack_Register:
offset = M_Offset_Interrupt_B_Ack;
break;
case Interrupt_B_Enable_Register:
offset = M_Offset_Interrupt_B_Enable;
break;
case Interrupt_Control_Register:
offset = M_Offset_Interrupt_Control;
break;
case IO_Bidirection_Pin_Register:
offset = M_Offset_IO_Bidirection_Pin;
break;
case Joint_Reset_Register:
offset = M_Offset_Joint_Reset;
break;
case RTSI_Trig_A_Output_Register:
offset = M_Offset_RTSI_Trig_A_Output;
break;
case RTSI_Trig_B_Output_Register:
offset = M_Offset_RTSI_Trig_B_Output;
break;
case RTSI_Trig_Direction_Register:
offset = M_Offset_RTSI_Trig_Direction;
break;
/* FIXME: DIO_Output_Register (16 bit reg) is replaced by M_Offset_Static_Digital_Output (32 bit)
and M_Offset_SCXI_Serial_Data_Out (8 bit) */
default:
dev_warn(dev->class_dev,
"%s: bug! unhandled register=0x%x in switch.\n",
__func__, reg);
BUG();
return;
break;
}
ni_writew(data, offset);
}
static uint16_t m_series_stc_readw(struct comedi_device *dev, int reg)
{
struct ni_private *devpriv = dev->private;
unsigned offset;
switch (reg) {
case AI_Status_1_Register:
offset = M_Offset_AI_Status_1;
break;
case AO_Status_1_Register:
offset = M_Offset_AO_Status_1;
break;
case AO_Status_2_Register:
offset = M_Offset_AO_Status_2;
break;
case DIO_Serial_Input_Register:
return ni_readb(M_Offset_SCXI_Serial_Data_In);
break;
case Joint_Status_1_Register:
offset = M_Offset_Joint_Status_1;
break;
case Joint_Status_2_Register:
offset = M_Offset_Joint_Status_2;
break;
case G_Status_Register:
offset = M_Offset_G01_Status;
break;
default:
dev_warn(dev->class_dev,
"%s: bug! unhandled register=0x%x in switch.\n",
__func__, reg);
BUG();
return 0;
break;
}
return ni_readw(offset);
}
static void m_series_stc_writel(struct comedi_device *dev, uint32_t data,
int reg)
{
struct ni_private *devpriv = dev->private;
unsigned offset;
switch (reg) {
case AI_SC_Load_A_Registers:
offset = M_Offset_AI_SC_Load_A;
break;
case AI_SI_Load_A_Registers:
offset = M_Offset_AI_SI_Load_A;
break;
case AO_BC_Load_A_Register:
offset = M_Offset_AO_BC_Load_A;
break;
case AO_UC_Load_A_Register:
offset = M_Offset_AO_UC_Load_A;
break;
case AO_UI_Load_A_Register:
offset = M_Offset_AO_UI_Load_A;
break;
case G_Load_A_Register(0):
offset = M_Offset_G0_Load_A;
break;
case G_Load_A_Register(1):
offset = M_Offset_G1_Load_A;
break;
case G_Load_B_Register(0):
offset = M_Offset_G0_Load_B;
break;
case G_Load_B_Register(1):
offset = M_Offset_G1_Load_B;
break;
default:
dev_warn(dev->class_dev,
"%s: bug! unhandled register=0x%x in switch.\n",
__func__, reg);
BUG();
return;
break;
}
ni_writel(data, offset);
}
static uint32_t m_series_stc_readl(struct comedi_device *dev, int reg)
{
struct ni_private *devpriv = dev->private;
unsigned offset;
switch (reg) {
case G_HW_Save_Register(0):
offset = M_Offset_G0_HW_Save;
break;
case G_HW_Save_Register(1):
offset = M_Offset_G1_HW_Save;
break;
case G_Save_Register(0):
offset = M_Offset_G0_Save;
break;
case G_Save_Register(1):
offset = M_Offset_G1_Save;
break;
default:
dev_warn(dev->class_dev,
"%s: bug! unhandled register=0x%x in switch.\n",
__func__, reg);
BUG();
return 0;
break;
}
return ni_readl(offset);
}
#define interrupt_pin(a) 0
#define IRQ_POLARITY 1
#define NI_E_IRQ_FLAGS IRQF_SHARED
#include "ni_mio_common.c"
static int pcimio_ai_change(struct comedi_device *dev,
struct comedi_subdevice *s, unsigned long new_size);
static int pcimio_ao_change(struct comedi_device *dev,
struct comedi_subdevice *s, unsigned long new_size);
static int pcimio_gpct0_change(struct comedi_device *dev,
struct comedi_subdevice *s,
unsigned long new_size);
static int pcimio_gpct1_change(struct comedi_device *dev,
struct comedi_subdevice *s,
unsigned long new_size);
static int pcimio_dio_change(struct comedi_device *dev,
struct comedi_subdevice *s,
unsigned long new_size);
static void m_series_init_eeprom_buffer(struct comedi_device *dev)
{
struct ni_private *devpriv = dev->private;
static const int Start_Cal_EEPROM = 0x400;
static const unsigned window_size = 10;
static const int serial_number_eeprom_offset = 0x4;
static const int serial_number_eeprom_length = 0x4;
unsigned old_iodwbsr_bits;
unsigned old_iodwbsr1_bits;
unsigned old_iodwcr1_bits;
int i;
old_iodwbsr_bits = readl(devpriv->mite->mite_io_addr + MITE_IODWBSR);
old_iodwbsr1_bits = readl(devpriv->mite->mite_io_addr + MITE_IODWBSR_1);
old_iodwcr1_bits = readl(devpriv->mite->mite_io_addr + MITE_IODWCR_1);
writel(0x0, devpriv->mite->mite_io_addr + MITE_IODWBSR);
writel(((0x80 | window_size) | devpriv->mite->daq_phys_addr),
devpriv->mite->mite_io_addr + MITE_IODWBSR_1);
writel(0x1 | old_iodwcr1_bits,
devpriv->mite->mite_io_addr + MITE_IODWCR_1);
writel(0xf, devpriv->mite->mite_io_addr + 0x30);
BUG_ON(serial_number_eeprom_length > sizeof(devpriv->serial_number));
for (i = 0; i < serial_number_eeprom_length; ++i) {
char *byte_ptr = (char *)&devpriv->serial_number + i;
*byte_ptr = ni_readb(serial_number_eeprom_offset + i);
}
devpriv->serial_number = be32_to_cpu(devpriv->serial_number);
for (i = 0; i < M_SERIES_EEPROM_SIZE; ++i)
devpriv->eeprom_buffer[i] = ni_readb(Start_Cal_EEPROM + i);
writel(old_iodwbsr1_bits, devpriv->mite->mite_io_addr + MITE_IODWBSR_1);
writel(old_iodwbsr_bits, devpriv->mite->mite_io_addr + MITE_IODWBSR);
writel(old_iodwcr1_bits, devpriv->mite->mite_io_addr + MITE_IODWCR_1);
writel(0x0, devpriv->mite->mite_io_addr + 0x30);
}
static void init_6143(struct comedi_device *dev)
{
const struct ni_board_struct *board = comedi_board(dev);
struct ni_private *devpriv = dev->private;
/* Disable interrupts */
devpriv->stc_writew(dev, 0, Interrupt_Control_Register);
/* Initialise 6143 AI specific bits */
ni_writeb(0x00, Magic_6143); /* Set G0,G1 DMA mode to E series version */
ni_writeb(0x80, PipelineDelay_6143); /* Set EOCMode, ADCMode and pipelinedelay */
ni_writeb(0x00, EOC_Set_6143); /* Set EOC Delay */
/* Set the FIFO half full level */
ni_writel(board->ai_fifo_depth / 2, AIFIFO_Flag_6143);
/* Strobe Relay disable bit */
devpriv->ai_calib_source_enabled = 0;
ni_writew(devpriv->ai_calib_source | Calibration_Channel_6143_RelayOff,
Calibration_Channel_6143);
ni_writew(devpriv->ai_calib_source, Calibration_Channel_6143);
}
static void pcimio_detach(struct comedi_device *dev)
{
struct ni_private *devpriv = dev->private;
mio_common_detach(dev);
if (dev->irq)
free_irq(dev->irq, dev);
if (devpriv) {
mite_free_ring(devpriv->ai_mite_ring);
mite_free_ring(devpriv->ao_mite_ring);
mite_free_ring(devpriv->cdo_mite_ring);
mite_free_ring(devpriv->gpct_mite_ring[0]);
mite_free_ring(devpriv->gpct_mite_ring[1]);
if (devpriv->mite) {
mite_unsetup(devpriv->mite);
mite_free(devpriv->mite);
}
}
comedi_pci_disable(dev);
}
static int pcimio_auto_attach(struct comedi_device *dev,
unsigned long context)
{
struct pci_dev *pcidev = comedi_to_pci_dev(dev);
const struct ni_board_struct *board = NULL;
struct ni_private *devpriv;
int ret;
if (context < ARRAY_SIZE(ni_boards))
board = &ni_boards[context];
if (!board)
return -ENODEV;
dev->board_ptr = board;
dev->board_name = board->name;
ret = comedi_pci_enable(dev);
if (ret)
return ret;
ret = ni_alloc_private(dev);
if (ret)
return ret;
devpriv = dev->private;
devpriv->mite = mite_alloc(pcidev);
if (!devpriv->mite)
return -ENOMEM;
if (board->reg_type & ni_reg_m_series_mask) {
devpriv->stc_writew = &m_series_stc_writew;
devpriv->stc_readw = &m_series_stc_readw;
devpriv->stc_writel = &m_series_stc_writel;
devpriv->stc_readl = &m_series_stc_readl;
} else {
devpriv->stc_writew = &e_series_win_out;
devpriv->stc_readw = &e_series_win_in;
devpriv->stc_writel = &win_out2;
devpriv->stc_readl = &win_in2;
}
ret = mite_setup(devpriv->mite);
if (ret < 0) {
pr_warn("error setting up mite\n");
return ret;
}
devpriv->ai_mite_ring = mite_alloc_ring(devpriv->mite);
if (devpriv->ai_mite_ring == NULL)
return -ENOMEM;
devpriv->ao_mite_ring = mite_alloc_ring(devpriv->mite);
if (devpriv->ao_mite_ring == NULL)
return -ENOMEM;
devpriv->cdo_mite_ring = mite_alloc_ring(devpriv->mite);
if (devpriv->cdo_mite_ring == NULL)
return -ENOMEM;
devpriv->gpct_mite_ring[0] = mite_alloc_ring(devpriv->mite);
if (devpriv->gpct_mite_ring[0] == NULL)
return -ENOMEM;
devpriv->gpct_mite_ring[1] = mite_alloc_ring(devpriv->mite);
if (devpriv->gpct_mite_ring[1] == NULL)
return -ENOMEM;
if (board->reg_type & ni_reg_m_series_mask)
m_series_init_eeprom_buffer(dev);
if (board->reg_type == ni_reg_6143)
init_6143(dev);
dev->irq = mite_irq(devpriv->mite);
if (dev->irq == 0) {
pr_warn("unknown irq (bad)\n");
} else {
pr_debug("( irq = %u )\n", dev->irq);
ret = request_irq(dev->irq, ni_E_interrupt, NI_E_IRQ_FLAGS,
DRV_NAME, dev);
if (ret < 0) {
pr_warn("irq not available\n");
dev->irq = 0;
}
}
ret = ni_E_init(dev);
if (ret < 0)
return ret;
dev->subdevices[NI_AI_SUBDEV].buf_change = &pcimio_ai_change;
dev->subdevices[NI_AO_SUBDEV].buf_change = &pcimio_ao_change;
dev->subdevices[NI_GPCT_SUBDEV(0)].buf_change = &pcimio_gpct0_change;
dev->subdevices[NI_GPCT_SUBDEV(1)].buf_change = &pcimio_gpct1_change;
dev->subdevices[NI_DIO_SUBDEV].buf_change = &pcimio_dio_change;
return ret;
}
static int pcimio_ai_change(struct comedi_device *dev,
struct comedi_subdevice *s, unsigned long new_size)
{
struct ni_private *devpriv = dev->private;
int ret;
ret = mite_buf_change(devpriv->ai_mite_ring, s->async);
if (ret < 0)
return ret;
return 0;
}
static int pcimio_ao_change(struct comedi_device *dev,
struct comedi_subdevice *s, unsigned long new_size)
{
struct ni_private *devpriv = dev->private;
int ret;
ret = mite_buf_change(devpriv->ao_mite_ring, s->async);
if (ret < 0)
return ret;
return 0;
}
static int pcimio_gpct0_change(struct comedi_device *dev,
struct comedi_subdevice *s,
unsigned long new_size)
{
struct ni_private *devpriv = dev->private;
int ret;
ret = mite_buf_change(devpriv->gpct_mite_ring[0], s->async);
if (ret < 0)
return ret;
return 0;
}
static int pcimio_gpct1_change(struct comedi_device *dev,
struct comedi_subdevice *s,
unsigned long new_size)
{
struct ni_private *devpriv = dev->private;
int ret;
ret = mite_buf_change(devpriv->gpct_mite_ring[1], s->async);
if (ret < 0)
return ret;
return 0;
}
static int pcimio_dio_change(struct comedi_device *dev,
struct comedi_subdevice *s, unsigned long new_size)
{
struct ni_private *devpriv = dev->private;
int ret;
ret = mite_buf_change(devpriv->cdo_mite_ring, s->async);
if (ret < 0)
return ret;
return 0;
}
static struct comedi_driver ni_pcimio_driver = {
.driver_name = "ni_pcimio",
.module = THIS_MODULE,
.auto_attach = pcimio_auto_attach,
.detach = pcimio_detach,
};
static int ni_pcimio_pci_probe(struct pci_dev *dev,
const struct pci_device_id *id)
{
return comedi_pci_auto_config(dev, &ni_pcimio_driver, id->driver_data);
}
static DEFINE_PCI_DEVICE_TABLE(ni_pcimio_pci_table) = {
{ PCI_VDEVICE(NI, 0x0162), BOARD_PCIMIO_16XE_50 }, /* 0x1620? */
{ PCI_VDEVICE(NI, 0x1170), BOARD_PCIMIO_16XE_10 },
{ PCI_VDEVICE(NI, 0x1180), BOARD_PCIMIO_16E_1 },
{ PCI_VDEVICE(NI, 0x1190), BOARD_PCIMIO_16E_4 },
{ PCI_VDEVICE(NI, 0x11b0), BOARD_PXI6070E },
{ PCI_VDEVICE(NI, 0x11c0), BOARD_PXI6040E },
{ PCI_VDEVICE(NI, 0x11d0), BOARD_PXI6030E },
{ PCI_VDEVICE(NI, 0x1270), BOARD_PCI6032E },
{ PCI_VDEVICE(NI, 0x1330), BOARD_PCI6031E },
{ PCI_VDEVICE(NI, 0x1340), BOARD_PCI6033E },
{ PCI_VDEVICE(NI, 0x1350), BOARD_PCI6071E },
{ PCI_VDEVICE(NI, 0x14e0), BOARD_PCI6110 },
{ PCI_VDEVICE(NI, 0x14f0), BOARD_PCI6111 },
{ PCI_VDEVICE(NI, 0x1580), BOARD_PXI6031E },
{ PCI_VDEVICE(NI, 0x15b0), BOARD_PXI6071E },
{ PCI_VDEVICE(NI, 0x1880), BOARD_PCI6711 },
{ PCI_VDEVICE(NI, 0x1870), BOARD_PCI6713 },
{ PCI_VDEVICE(NI, 0x18b0), BOARD_PCI6052E },
{ PCI_VDEVICE(NI, 0x18c0), BOARD_PXI6052E },
{ PCI_VDEVICE(NI, 0x2410), BOARD_PCI6733 },
{ PCI_VDEVICE(NI, 0x2420), BOARD_PXI6733 },
{ PCI_VDEVICE(NI, 0x2430), BOARD_PCI6731 },
{ PCI_VDEVICE(NI, 0x2890), BOARD_PCI6036E },
{ PCI_VDEVICE(NI, 0x28c0), BOARD_PCI6014 },
{ PCI_VDEVICE(NI, 0x2a60), BOARD_PCI6023E },
{ PCI_VDEVICE(NI, 0x2a70), BOARD_PCI6024E },
{ PCI_VDEVICE(NI, 0x2a80), BOARD_PCI6025E },
{ PCI_VDEVICE(NI, 0x2ab0), BOARD_PXI6025E },
{ PCI_VDEVICE(NI, 0x2b80), BOARD_PXI6713 },
{ PCI_VDEVICE(NI, 0x2b90), BOARD_PXI6711 },
{ PCI_VDEVICE(NI, 0x2c80), BOARD_PCI6035E },
{ PCI_VDEVICE(NI, 0x2ca0), BOARD_PCI6034E },
{ PCI_VDEVICE(NI, 0x70aa), BOARD_PCI6229 },
{ PCI_VDEVICE(NI, 0x70ab), BOARD_PCI6259 },
{ PCI_VDEVICE(NI, 0x70ac), BOARD_PCI6289 },
{ PCI_VDEVICE(NI, 0x70af), BOARD_PCI6221 },
{ PCI_VDEVICE(NI, 0x70b0), BOARD_PCI6220 },
{ PCI_VDEVICE(NI, 0x70b4), BOARD_PCI6250 },
{ PCI_VDEVICE(NI, 0x70b6), BOARD_PCI6280 },
{ PCI_VDEVICE(NI, 0x70b7), BOARD_PCI6254 },
{ PCI_VDEVICE(NI, 0x70b8), BOARD_PCI6251 },
{ PCI_VDEVICE(NI, 0x70bc), BOARD_PCI6284 },
{ PCI_VDEVICE(NI, 0x70bd), BOARD_PCI6281 },
{ PCI_VDEVICE(NI, 0x70bf), BOARD_PXI6281 },
{ PCI_VDEVICE(NI, 0x70c0), BOARD_PCI6143 },
{ PCI_VDEVICE(NI, 0x70f2), BOARD_PCI6224 },
{ PCI_VDEVICE(NI, 0x70f3), BOARD_PXI6224 },
{ PCI_VDEVICE(NI, 0x710d), BOARD_PXI6143 },
{ PCI_VDEVICE(NI, 0x716c), BOARD_PCI6225 },
{ PCI_VDEVICE(NI, 0x716d), BOARD_PXI6225 },
{ PCI_VDEVICE(NI, 0x717f), BOARD_PCIE6259 },
{ PCI_VDEVICE(NI, 0x71bc), BOARD_PCI6221_37PIN },
{ PCI_VDEVICE(NI, 0x717d), BOARD_PCIE6251 },
{ PCI_VDEVICE(NI, 0x72e8), BOARD_PXIE6251 },
{ 0 }
};
MODULE_DEVICE_TABLE(pci, ni_pcimio_pci_table);
static struct pci_driver ni_pcimio_pci_driver = {
.name = "ni_pcimio",
.id_table = ni_pcimio_pci_table,
.probe = ni_pcimio_pci_probe,
.remove = comedi_pci_auto_unconfig,
};
module_comedi_pci_driver(ni_pcimio_driver, ni_pcimio_pci_driver);
MODULE_AUTHOR("Comedi http://www.comedi.org");
MODULE_DESCRIPTION("Comedi low-level driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
krexus-partners/kernel_moto_shamu | drivers/staging/comedi/drivers/ni_atmio.c | 2089 | 12458 | /*
comedi/drivers/ni_atmio.c
Hardware driver for NI AT-MIO E series cards
COMEDI - Linux Control and Measurement Device Interface
Copyright (C) 1997-2001 David A. Schleef <ds@schleef.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
Driver: ni_atmio
Description: National Instruments AT-MIO-E series
Author: ds
Devices: [National Instruments] AT-MIO-16E-1 (ni_atmio),
AT-MIO-16E-2, AT-MIO-16E-10, AT-MIO-16DE-10, AT-MIO-64E-3,
AT-MIO-16XE-50, AT-MIO-16XE-10, AT-AI-16XE-10
Status: works
Updated: Thu May 1 20:03:02 CDT 2003
The driver has 2.6 kernel isapnp support, and
will automatically probe for a supported board if the
I/O base is left unspecified with comedi_config.
However, many of
the isapnp id numbers are unknown. If your board is not
recognized, please send the output of 'cat /proc/isapnp'
(you may need to modprobe the isa-pnp module for
/proc/isapnp to exist) so the
id numbers for your board can be added to the driver.
Otherwise, you can use the isapnptools package to configure
your board. Use isapnp to
configure the I/O base and IRQ for the board, and then pass
the same values as
parameters in comedi_config. A sample isapnp.conf file is included
in the etc/ directory of Comedilib.
Comedilib includes a utility to autocalibrate these boards. The
boards seem to boot into a state where the all calibration DACs
are at one extreme of their range, thus the default calibration
is terrible. Calibration at boot is strongly encouraged.
To use the extended digital I/O on some of the boards, enable the
8255 driver when configuring the Comedi source tree.
External triggering is supported for some events. The channel index
(scan_begin_arg, etc.) maps to PFI0 - PFI9.
Some of the more esoteric triggering possibilities of these boards
are not supported.
*/
/*
The real guts of the driver is in ni_mio_common.c, which is included
both here and in ni_pcimio.c
Interrupt support added by Truxton Fulton <trux@truxton.com>
References for specifications:
340747b.pdf Register Level Programmer Manual (obsolete)
340747c.pdf Register Level Programmer Manual (new)
DAQ-STC reference manual
Other possibly relevant info:
320517c.pdf User manual (obsolete)
320517f.pdf User manual (new)
320889a.pdf delete
320906c.pdf maximum signal ratings
321066a.pdf about 16x
321791a.pdf discontinuation of at-mio-16e-10 rev. c
321808a.pdf about at-mio-16e-10 rev P
321837a.pdf discontinuation of at-mio-16de-10 rev d
321838a.pdf about at-mio-16de-10 rev N
ISSUES:
need to deal with external reference for DAC, and other DAC
properties in board properties
deal with at-mio-16de-10 revision D to N changes, etc.
*/
#include <linux/interrupt.h>
#include "../comedidev.h"
#include <linux/delay.h>
#include <linux/isapnp.h>
#include "ni_stc.h"
#include "8255.h"
#undef DEBUG
#define ATMIO 1
#undef PCIMIO
/*
* AT specific setup
*/
#define NI_SIZE 0x20
#define MAX_N_CALDACS 32
static const struct ni_board_struct ni_boards[] = {
{.device_id = 44,
.isapnp_id = 0x0000, /* XXX unknown */
.name = "at-mio-16e-1",
.n_adchan = 16,
.adbits = 12,
.ai_fifo_depth = 8192,
.alwaysdither = 0,
.gainlkup = ai_gain_16,
.ai_speed = 800,
.n_aochan = 2,
.aobits = 12,
.ao_fifo_depth = 2048,
.ao_range_table = &range_ni_E_ao_ext,
.ao_unipolar = 1,
.ao_speed = 1000,
.has_8255 = 0,
.num_p0_dio_channels = 8,
.caldac = {mb88341},
},
{.device_id = 25,
.isapnp_id = 0x1900,
.name = "at-mio-16e-2",
.n_adchan = 16,
.adbits = 12,
.ai_fifo_depth = 2048,
.alwaysdither = 0,
.gainlkup = ai_gain_16,
.ai_speed = 2000,
.n_aochan = 2,
.aobits = 12,
.ao_fifo_depth = 2048,
.ao_range_table = &range_ni_E_ao_ext,
.ao_unipolar = 1,
.ao_speed = 1000,
.has_8255 = 0,
.num_p0_dio_channels = 8,
.caldac = {mb88341},
},
{.device_id = 36,
.isapnp_id = 0x2400,
.name = "at-mio-16e-10",
.n_adchan = 16,
.adbits = 12,
.ai_fifo_depth = 512,
.alwaysdither = 0,
.gainlkup = ai_gain_16,
.ai_speed = 10000,
.n_aochan = 2,
.aobits = 12,
.ao_fifo_depth = 0,
.ao_range_table = &range_ni_E_ao_ext,
.ao_unipolar = 1,
.ao_speed = 10000,
.num_p0_dio_channels = 8,
.caldac = {ad8804_debug},
.has_8255 = 0,
},
{.device_id = 37,
.isapnp_id = 0x2500,
.name = "at-mio-16de-10",
.n_adchan = 16,
.adbits = 12,
.ai_fifo_depth = 512,
.alwaysdither = 0,
.gainlkup = ai_gain_16,
.ai_speed = 10000,
.n_aochan = 2,
.aobits = 12,
.ao_fifo_depth = 0,
.ao_range_table = &range_ni_E_ao_ext,
.ao_unipolar = 1,
.ao_speed = 10000,
.num_p0_dio_channels = 8,
.caldac = {ad8804_debug},
.has_8255 = 1,
},
{.device_id = 38,
.isapnp_id = 0x2600,
.name = "at-mio-64e-3",
.n_adchan = 64,
.adbits = 12,
.ai_fifo_depth = 2048,
.alwaysdither = 0,
.gainlkup = ai_gain_16,
.ai_speed = 2000,
.n_aochan = 2,
.aobits = 12,
.ao_fifo_depth = 2048,
.ao_range_table = &range_ni_E_ao_ext,
.ao_unipolar = 1,
.ao_speed = 1000,
.has_8255 = 0,
.num_p0_dio_channels = 8,
.caldac = {ad8804_debug},
},
{.device_id = 39,
.isapnp_id = 0x2700,
.name = "at-mio-16xe-50",
.n_adchan = 16,
.adbits = 16,
.ai_fifo_depth = 512,
.alwaysdither = 1,
.gainlkup = ai_gain_8,
.ai_speed = 50000,
.n_aochan = 2,
.aobits = 12,
.ao_fifo_depth = 0,
.ao_range_table = &range_bipolar10,
.ao_unipolar = 0,
.ao_speed = 50000,
.num_p0_dio_channels = 8,
.caldac = {dac8800, dac8043},
.has_8255 = 0,
},
{.device_id = 50,
.isapnp_id = 0x0000, /* XXX unknown */
.name = "at-mio-16xe-10",
.n_adchan = 16,
.adbits = 16,
.ai_fifo_depth = 512,
.alwaysdither = 1,
.gainlkup = ai_gain_14,
.ai_speed = 10000,
.n_aochan = 2,
.aobits = 16,
.ao_fifo_depth = 2048,
.ao_range_table = &range_ni_E_ao_ext,
.ao_unipolar = 1,
.ao_speed = 1000,
.num_p0_dio_channels = 8,
.caldac = {dac8800, dac8043, ad8522},
.has_8255 = 0,
},
{.device_id = 51,
.isapnp_id = 0x0000, /* XXX unknown */
.name = "at-ai-16xe-10",
.n_adchan = 16,
.adbits = 16,
.ai_fifo_depth = 512,
.alwaysdither = 1, /* unknown */
.gainlkup = ai_gain_14,
.ai_speed = 10000,
.n_aochan = 0,
.aobits = 0,
.ao_fifo_depth = 0,
.ao_unipolar = 0,
.num_p0_dio_channels = 8,
.caldac = {dac8800, dac8043, ad8522},
.has_8255 = 0,
}
};
static const int ni_irqpin[] = {
-1, -1, -1, 0, 1, 2, -1, 3, -1, -1, 4, 5, 6, -1, -1, 7
};
#define interrupt_pin(a) (ni_irqpin[(a)])
#define IRQ_POLARITY 0
#define NI_E_IRQ_FLAGS 0
struct ni_private {
struct pnp_dev *isapnp_dev;
NI_PRIVATE_COMMON
};
/* How we access registers */
#define ni_writel(a, b) (outl((a), (b)+dev->iobase))
#define ni_readl(a) (inl((a)+dev->iobase))
#define ni_writew(a, b) (outw((a), (b)+dev->iobase))
#define ni_readw(a) (inw((a)+dev->iobase))
#define ni_writeb(a, b) (outb((a), (b)+dev->iobase))
#define ni_readb(a) (inb((a)+dev->iobase))
/* How we access windowed registers */
/* We automatically take advantage of STC registers that can be
* read/written directly in the I/O space of the board. The
* AT-MIO devices map the low 8 STC registers to iobase+addr*2. */
static void ni_atmio_win_out(struct comedi_device *dev, uint16_t data, int addr)
{
struct ni_private *devpriv = dev->private;
unsigned long flags;
spin_lock_irqsave(&devpriv->window_lock, flags);
if ((addr) < 8) {
ni_writew(data, addr * 2);
} else {
ni_writew(addr, Window_Address);
ni_writew(data, Window_Data);
}
spin_unlock_irqrestore(&devpriv->window_lock, flags);
}
static uint16_t ni_atmio_win_in(struct comedi_device *dev, int addr)
{
struct ni_private *devpriv = dev->private;
unsigned long flags;
uint16_t ret;
spin_lock_irqsave(&devpriv->window_lock, flags);
if (addr < 8) {
ret = ni_readw(addr * 2);
} else {
ni_writew(addr, Window_Address);
ret = ni_readw(Window_Data);
}
spin_unlock_irqrestore(&devpriv->window_lock, flags);
return ret;
}
static struct pnp_device_id device_ids[] = {
{.id = "NIC1900", .driver_data = 0},
{.id = "NIC2400", .driver_data = 0},
{.id = "NIC2500", .driver_data = 0},
{.id = "NIC2600", .driver_data = 0},
{.id = "NIC2700", .driver_data = 0},
{.id = ""}
};
MODULE_DEVICE_TABLE(pnp, device_ids);
#include "ni_mio_common.c"
static int ni_isapnp_find_board(struct pnp_dev **dev)
{
struct pnp_dev *isapnp_dev = NULL;
int i;
for (i = 0; i < ARRAY_SIZE(ni_boards); i++) {
isapnp_dev = pnp_find_dev(NULL,
ISAPNP_VENDOR('N', 'I', 'C'),
ISAPNP_FUNCTION(ni_boards[i].
isapnp_id), NULL);
if (isapnp_dev == NULL || isapnp_dev->card == NULL)
continue;
if (pnp_device_attach(isapnp_dev) < 0) {
printk
("ni_atmio: %s found but already active, skipping.\n",
ni_boards[i].name);
continue;
}
if (pnp_activate_dev(isapnp_dev) < 0) {
pnp_device_detach(isapnp_dev);
return -EAGAIN;
}
if (!pnp_port_valid(isapnp_dev, 0)
|| !pnp_irq_valid(isapnp_dev, 0)) {
pnp_device_detach(isapnp_dev);
printk("ni_atmio: pnp invalid port or irq, aborting\n");
return -ENOMEM;
}
break;
}
if (i == ARRAY_SIZE(ni_boards))
return -ENODEV;
*dev = isapnp_dev;
return 0;
}
static int ni_getboardtype(struct comedi_device *dev)
{
int device_id = ni_read_eeprom(dev, 511);
int i;
for (i = 0; i < ARRAY_SIZE(ni_boards); i++) {
if (ni_boards[i].device_id == device_id)
return i;
}
if (device_id == 255)
printk(" can't find board\n");
else if (device_id == 0)
printk(" EEPROM read error (?) or device not found\n");
else
printk(" unknown device ID %d -- contact author\n", device_id);
return -1;
}
static int ni_atmio_attach(struct comedi_device *dev,
struct comedi_devconfig *it)
{
const struct ni_board_struct *boardtype;
struct ni_private *devpriv;
struct pnp_dev *isapnp_dev;
int ret;
unsigned long iobase;
int board;
unsigned int irq;
ret = ni_alloc_private(dev);
if (ret)
return ret;
devpriv = dev->private;
devpriv->stc_writew = &ni_atmio_win_out;
devpriv->stc_readw = &ni_atmio_win_in;
devpriv->stc_writel = &win_out2;
devpriv->stc_readl = &win_in2;
iobase = it->options[0];
irq = it->options[1];
isapnp_dev = NULL;
if (iobase == 0) {
ret = ni_isapnp_find_board(&isapnp_dev);
if (ret < 0)
return ret;
iobase = pnp_port_start(isapnp_dev, 0);
irq = pnp_irq(isapnp_dev, 0);
devpriv->isapnp_dev = isapnp_dev;
}
ret = comedi_request_region(dev, iobase, NI_SIZE);
if (ret)
return ret;
#ifdef DEBUG
/* board existence sanity check */
{
int i;
printk(" board fingerprint:");
for (i = 0; i < 16; i += 2) {
printk(" %04x %02x", inw(dev->iobase + i),
inb(dev->iobase + i + 1));
}
}
#endif
/* get board type */
board = ni_getboardtype(dev);
if (board < 0)
return -EIO;
dev->board_ptr = ni_boards + board;
boardtype = comedi_board(dev);
printk(" %s", boardtype->name);
dev->board_name = boardtype->name;
/* irq stuff */
if (irq != 0) {
if (irq > 15 || ni_irqpin[irq] == -1) {
printk(" invalid irq %u\n", irq);
return -EINVAL;
}
printk(" ( irq = %u )", irq);
ret = request_irq(irq, ni_E_interrupt, NI_E_IRQ_FLAGS,
"ni_atmio", dev);
if (ret < 0) {
printk(" irq not available\n");
return -EINVAL;
}
dev->irq = irq;
}
/* generic E series stuff in ni_mio_common.c */
ret = ni_E_init(dev);
if (ret < 0)
return ret;
return 0;
}
static void ni_atmio_detach(struct comedi_device *dev)
{
struct ni_private *devpriv = dev->private;
mio_common_detach(dev);
comedi_legacy_detach(dev);
if (devpriv->isapnp_dev)
pnp_device_detach(devpriv->isapnp_dev);
}
static struct comedi_driver ni_atmio_driver = {
.driver_name = "ni_atmio",
.module = THIS_MODULE,
.attach = ni_atmio_attach,
.detach = ni_atmio_detach,
};
module_comedi_driver(ni_atmio_driver);
| gpl-2.0 |
high1/android_kernel_htc_pico | ipc/sem.c | 2345 | 41544 | /*
* linux/ipc/sem.c
* Copyright (C) 1992 Krishna Balasubramanian
* Copyright (C) 1995 Eric Schenk, Bruno Haible
*
* /proc/sysvipc/sem support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
*
* SMP-threaded, sysctl's added
* (c) 1999 Manfred Spraul <manfred@colorfullife.com>
* Enforced range limit on SEM_UNDO
* (c) 2001 Red Hat Inc
* Lockless wakeup
* (c) 2003 Manfred Spraul <manfred@colorfullife.com>
* Further wakeup optimizations, documentation
* (c) 2010 Manfred Spraul <manfred@colorfullife.com>
*
* support for audit of ipc object properties and permission changes
* Dustin Kirkland <dustin.kirkland@us.ibm.com>
*
* namespaces support
* OpenVZ, SWsoft Inc.
* Pavel Emelianov <xemul@openvz.org>
*
* Implementation notes: (May 2010)
* This file implements System V semaphores.
*
* User space visible behavior:
* - FIFO ordering for semop() operations (just FIFO, not starvation
* protection)
* - multiple semaphore operations that alter the same semaphore in
* one semop() are handled.
* - sem_ctime (time of last semctl()) is updated in the IPC_SET, SETVAL and
* SETALL calls.
* - two Linux specific semctl() commands: SEM_STAT, SEM_INFO.
* - undo adjustments at process exit are limited to 0..SEMVMX.
* - namespace are supported.
* - SEMMSL, SEMMNS, SEMOPM and SEMMNI can be configured at runtine by writing
* to /proc/sys/kernel/sem.
* - statistics about the usage are reported in /proc/sysvipc/sem.
*
* Internals:
* - scalability:
* - all global variables are read-mostly.
* - semop() calls and semctl(RMID) are synchronized by RCU.
* - most operations do write operations (actually: spin_lock calls) to
* the per-semaphore array structure.
* Thus: Perfect SMP scaling between independent semaphore arrays.
* If multiple semaphores in one array are used, then cache line
* trashing on the semaphore array spinlock will limit the scaling.
* - semncnt and semzcnt are calculated on demand in count_semncnt() and
* count_semzcnt()
* - the task that performs a successful semop() scans the list of all
* sleeping tasks and completes any pending operations that can be fulfilled.
* Semaphores are actively given to waiting tasks (necessary for FIFO).
* (see update_queue())
* - To improve the scalability, the actual wake-up calls are performed after
* dropping all locks. (see wake_up_sem_queue_prepare(),
* wake_up_sem_queue_do())
* - All work is done by the waker, the woken up task does not have to do
* anything - not even acquiring a lock or dropping a refcount.
* - A woken up task may not even touch the semaphore array anymore, it may
* have been destroyed already by a semctl(RMID).
* - The synchronizations between wake-ups due to a timeout/signal and a
* wake-up due to a completed semaphore operation is achieved by using an
* intermediate state (IN_WAKEUP).
* - UNDO values are stored in an array (one per process and per
* semaphore array, lazily allocated). For backwards compatibility, multiple
* modes for the UNDO variables are supported (per process, per thread)
* (see copy_semundo, CLONE_SYSVSEM)
* - There are two lists of the pending operations: a per-array list
* and per-semaphore list (stored in the array). This allows to achieve FIFO
* ordering without always scanning all pending operations.
* The worst-case behavior is nevertheless O(N^2) for N wakeups.
*/
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/init.h>
#include <linux/proc_fs.h>
#include <linux/time.h>
#include <linux/security.h>
#include <linux/syscalls.h>
#include <linux/audit.h>
#include <linux/capability.h>
#include <linux/seq_file.h>
#include <linux/rwsem.h>
#include <linux/nsproxy.h>
#include <linux/ipc_namespace.h>
#include <asm/uaccess.h>
#include "util.h"
#define sem_ids(ns) ((ns)->ids[IPC_SEM_IDS])
#define sem_unlock(sma) ipc_unlock(&(sma)->sem_perm)
#define sem_checkid(sma, semid) ipc_checkid(&sma->sem_perm, semid)
static int newary(struct ipc_namespace *, struct ipc_params *);
static void freeary(struct ipc_namespace *, struct kern_ipc_perm *);
#ifdef CONFIG_PROC_FS
static int sysvipc_sem_proc_show(struct seq_file *s, void *it);
#endif
#define SEMMSL_FAST 256 /* 512 bytes on stack */
#define SEMOPM_FAST 64 /* ~ 372 bytes on stack */
/*
* linked list protection:
* sem_undo.id_next,
* sem_array.sem_pending{,last},
* sem_array.sem_undo: sem_lock() for read/write
* sem_undo.proc_next: only "current" is allowed to read/write that field.
*
*/
#define sc_semmsl sem_ctls[0]
#define sc_semmns sem_ctls[1]
#define sc_semopm sem_ctls[2]
#define sc_semmni sem_ctls[3]
void sem_init_ns(struct ipc_namespace *ns)
{
ns->sc_semmsl = SEMMSL;
ns->sc_semmns = SEMMNS;
ns->sc_semopm = SEMOPM;
ns->sc_semmni = SEMMNI;
ns->used_sems = 0;
ipc_init_ids(&ns->ids[IPC_SEM_IDS]);
}
#ifdef CONFIG_IPC_NS
void sem_exit_ns(struct ipc_namespace *ns)
{
free_ipcs(ns, &sem_ids(ns), freeary);
idr_destroy(&ns->ids[IPC_SEM_IDS].ipcs_idr);
}
#endif
void __init sem_init (void)
{
sem_init_ns(&init_ipc_ns);
ipc_init_proc_interface("sysvipc/sem",
" key semid perms nsems uid gid cuid cgid otime ctime\n",
IPC_SEM_IDS, sysvipc_sem_proc_show);
}
/*
* sem_lock_(check_) routines are called in the paths where the rw_mutex
* is not held.
*/
static inline struct sem_array *sem_lock(struct ipc_namespace *ns, int id)
{
struct kern_ipc_perm *ipcp = ipc_lock(&sem_ids(ns), id);
if (IS_ERR(ipcp))
return (struct sem_array *)ipcp;
return container_of(ipcp, struct sem_array, sem_perm);
}
static inline struct sem_array *sem_lock_check(struct ipc_namespace *ns,
int id)
{
struct kern_ipc_perm *ipcp = ipc_lock_check(&sem_ids(ns), id);
if (IS_ERR(ipcp))
return (struct sem_array *)ipcp;
return container_of(ipcp, struct sem_array, sem_perm);
}
static inline void sem_lock_and_putref(struct sem_array *sma)
{
ipc_lock_by_ptr(&sma->sem_perm);
ipc_rcu_putref(sma);
}
static inline void sem_getref_and_unlock(struct sem_array *sma)
{
ipc_rcu_getref(sma);
ipc_unlock(&(sma)->sem_perm);
}
static inline void sem_putref(struct sem_array *sma)
{
ipc_lock_by_ptr(&sma->sem_perm);
ipc_rcu_putref(sma);
ipc_unlock(&(sma)->sem_perm);
}
static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
{
ipc_rmid(&sem_ids(ns), &s->sem_perm);
}
/*
* Lockless wakeup algorithm:
* Without the check/retry algorithm a lockless wakeup is possible:
* - queue.status is initialized to -EINTR before blocking.
* - wakeup is performed by
* * unlinking the queue entry from sma->sem_pending
* * setting queue.status to IN_WAKEUP
* This is the notification for the blocked thread that a
* result value is imminent.
* * call wake_up_process
* * set queue.status to the final value.
* - the previously blocked thread checks queue.status:
* * if it's IN_WAKEUP, then it must wait until the value changes
* * if it's not -EINTR, then the operation was completed by
* update_queue. semtimedop can return queue.status without
* performing any operation on the sem array.
* * otherwise it must acquire the spinlock and check what's up.
*
* The two-stage algorithm is necessary to protect against the following
* races:
* - if queue.status is set after wake_up_process, then the woken up idle
* thread could race forward and try (and fail) to acquire sma->lock
* before update_queue had a chance to set queue.status
* - if queue.status is written before wake_up_process and if the
* blocked process is woken up by a signal between writing
* queue.status and the wake_up_process, then the woken up
* process could return from semtimedop and die by calling
* sys_exit before wake_up_process is called. Then wake_up_process
* will oops, because the task structure is already invalid.
* (yes, this happened on s390 with sysv msg).
*
*/
#define IN_WAKEUP 1
/**
* newary - Create a new semaphore set
* @ns: namespace
* @params: ptr to the structure that contains key, semflg and nsems
*
* Called with sem_ids.rw_mutex held (as a writer)
*/
static int newary(struct ipc_namespace *ns, struct ipc_params *params)
{
int id;
int retval;
struct sem_array *sma;
int size;
key_t key = params->key;
int nsems = params->u.nsems;
int semflg = params->flg;
int i;
if (!nsems)
return -EINVAL;
if (ns->used_sems + nsems > ns->sc_semmns)
return -ENOSPC;
size = sizeof (*sma) + nsems * sizeof (struct sem);
sma = ipc_rcu_alloc(size);
if (!sma) {
return -ENOMEM;
}
memset (sma, 0, size);
sma->sem_perm.mode = (semflg & S_IRWXUGO);
sma->sem_perm.key = key;
sma->sem_perm.security = NULL;
retval = security_sem_alloc(sma);
if (retval) {
ipc_rcu_putref(sma);
return retval;
}
id = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni);
if (id < 0) {
security_sem_free(sma);
ipc_rcu_putref(sma);
return id;
}
ns->used_sems += nsems;
sma->sem_base = (struct sem *) &sma[1];
for (i = 0; i < nsems; i++)
INIT_LIST_HEAD(&sma->sem_base[i].sem_pending);
sma->complex_count = 0;
INIT_LIST_HEAD(&sma->sem_pending);
INIT_LIST_HEAD(&sma->list_id);
sma->sem_nsems = nsems;
sma->sem_ctime = get_seconds();
sem_unlock(sma);
return sma->sem_perm.id;
}
/*
* Called with sem_ids.rw_mutex and ipcp locked.
*/
static inline int sem_security(struct kern_ipc_perm *ipcp, int semflg)
{
struct sem_array *sma;
sma = container_of(ipcp, struct sem_array, sem_perm);
return security_sem_associate(sma, semflg);
}
/*
* Called with sem_ids.rw_mutex and ipcp locked.
*/
static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
struct ipc_params *params)
{
struct sem_array *sma;
sma = container_of(ipcp, struct sem_array, sem_perm);
if (params->u.nsems > sma->sem_nsems)
return -EINVAL;
return 0;
}
SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
{
struct ipc_namespace *ns;
struct ipc_ops sem_ops;
struct ipc_params sem_params;
ns = current->nsproxy->ipc_ns;
if (nsems < 0 || nsems > ns->sc_semmsl)
return -EINVAL;
sem_ops.getnew = newary;
sem_ops.associate = sem_security;
sem_ops.more_checks = sem_more_checks;
sem_params.key = key;
sem_params.flg = semflg;
sem_params.u.nsems = nsems;
return ipcget(ns, &sem_ids(ns), &sem_ops, &sem_params);
}
/*
* Determine whether a sequence of semaphore operations would succeed
* all at once. Return 0 if yes, 1 if need to sleep, else return error code.
*/
static int try_atomic_semop (struct sem_array * sma, struct sembuf * sops,
int nsops, struct sem_undo *un, int pid)
{
int result, sem_op;
struct sembuf *sop;
struct sem * curr;
for (sop = sops; sop < sops + nsops; sop++) {
curr = sma->sem_base + sop->sem_num;
sem_op = sop->sem_op;
result = curr->semval;
if (!sem_op && result)
goto would_block;
result += sem_op;
if (result < 0)
goto would_block;
if (result > SEMVMX)
goto out_of_range;
if (sop->sem_flg & SEM_UNDO) {
int undo = un->semadj[sop->sem_num] - sem_op;
/*
* Exceeding the undo range is an error.
*/
if (undo < (-SEMAEM - 1) || undo > SEMAEM)
goto out_of_range;
}
curr->semval = result;
}
sop--;
while (sop >= sops) {
sma->sem_base[sop->sem_num].sempid = pid;
if (sop->sem_flg & SEM_UNDO)
un->semadj[sop->sem_num] -= sop->sem_op;
sop--;
}
return 0;
out_of_range:
result = -ERANGE;
goto undo;
would_block:
if (sop->sem_flg & IPC_NOWAIT)
result = -EAGAIN;
else
result = 1;
undo:
sop--;
while (sop >= sops) {
sma->sem_base[sop->sem_num].semval -= sop->sem_op;
sop--;
}
return result;
}
/** wake_up_sem_queue_prepare(q, error): Prepare wake-up
* @q: queue entry that must be signaled
* @error: Error value for the signal
*
* Prepare the wake-up of the queue entry q.
*/
static void wake_up_sem_queue_prepare(struct list_head *pt,
struct sem_queue *q, int error)
{
if (list_empty(pt)) {
/*
* Hold preempt off so that we don't get preempted and have the
* wakee busy-wait until we're scheduled back on.
*/
preempt_disable();
}
q->status = IN_WAKEUP;
q->pid = error;
list_add_tail(&q->simple_list, pt);
}
/**
* wake_up_sem_queue_do(pt) - do the actual wake-up
* @pt: list of tasks to be woken up
*
* Do the actual wake-up.
* The function is called without any locks held, thus the semaphore array
* could be destroyed already and the tasks can disappear as soon as the
* status is set to the actual return code.
*/
static void wake_up_sem_queue_do(struct list_head *pt)
{
struct sem_queue *q, *t;
int did_something;
did_something = !list_empty(pt);
list_for_each_entry_safe(q, t, pt, simple_list) {
wake_up_process(q->sleeper);
/* q can disappear immediately after writing q->status. */
smp_wmb();
q->status = q->pid;
}
if (did_something)
preempt_enable();
}
static void unlink_queue(struct sem_array *sma, struct sem_queue *q)
{
list_del(&q->list);
if (q->nsops == 1)
list_del(&q->simple_list);
else
sma->complex_count--;
}
/** check_restart(sma, q)
* @sma: semaphore array
* @q: the operation that just completed
*
* update_queue is O(N^2) when it restarts scanning the whole queue of
* waiting operations. Therefore this function checks if the restart is
* really necessary. It is called after a previously waiting operation
* was completed.
*/
static int check_restart(struct sem_array *sma, struct sem_queue *q)
{
struct sem *curr;
struct sem_queue *h;
/* if the operation didn't modify the array, then no restart */
if (q->alter == 0)
return 0;
/* pending complex operations are too difficult to analyse */
if (sma->complex_count)
return 1;
/* we were a sleeping complex operation. Too difficult */
if (q->nsops > 1)
return 1;
curr = sma->sem_base + q->sops[0].sem_num;
/* No-one waits on this queue */
if (list_empty(&curr->sem_pending))
return 0;
/* the new semaphore value */
if (curr->semval) {
/* It is impossible that someone waits for the new value:
* - q is a previously sleeping simple operation that
* altered the array. It must be a decrement, because
* simple increments never sleep.
* - The value is not 0, thus wait-for-zero won't proceed.
* - If there are older (higher priority) decrements
* in the queue, then they have observed the original
* semval value and couldn't proceed. The operation
* decremented to value - thus they won't proceed either.
*/
BUG_ON(q->sops[0].sem_op >= 0);
return 0;
}
/*
* semval is 0. Check if there are wait-for-zero semops.
* They must be the first entries in the per-semaphore simple queue
*/
h = list_first_entry(&curr->sem_pending, struct sem_queue, simple_list);
BUG_ON(h->nsops != 1);
BUG_ON(h->sops[0].sem_num != q->sops[0].sem_num);
/* Yes, there is a wait-for-zero semop. Restart */
if (h->sops[0].sem_op == 0)
return 1;
/* Again - no-one is waiting for the new value. */
return 0;
}
/**
* update_queue(sma, semnum): Look for tasks that can be completed.
* @sma: semaphore array.
* @semnum: semaphore that was modified.
* @pt: list head for the tasks that must be woken up.
*
* update_queue must be called after a semaphore in a semaphore array
* was modified. If multiple semaphore were modified, then @semnum
* must be set to -1.
* The tasks that must be woken up are added to @pt. The return code
* is stored in q->pid.
* The function return 1 if at least one semop was completed successfully.
*/
static int update_queue(struct sem_array *sma, int semnum, struct list_head *pt)
{
struct sem_queue *q;
struct list_head *walk;
struct list_head *pending_list;
int offset;
int semop_completed = 0;
/* if there are complex operations around, then knowing the semaphore
* that was modified doesn't help us. Assume that multiple semaphores
* were modified.
*/
if (sma->complex_count)
semnum = -1;
if (semnum == -1) {
pending_list = &sma->sem_pending;
offset = offsetof(struct sem_queue, list);
} else {
pending_list = &sma->sem_base[semnum].sem_pending;
offset = offsetof(struct sem_queue, simple_list);
}
again:
walk = pending_list->next;
while (walk != pending_list) {
int error, restart;
q = (struct sem_queue *)((char *)walk - offset);
walk = walk->next;
/* If we are scanning the single sop, per-semaphore list of
* one semaphore and that semaphore is 0, then it is not
* necessary to scan the "alter" entries: simple increments
* that affect only one entry succeed immediately and cannot
* be in the per semaphore pending queue, and decrements
* cannot be successful if the value is already 0.
*/
if (semnum != -1 && sma->sem_base[semnum].semval == 0 &&
q->alter)
break;
error = try_atomic_semop(sma, q->sops, q->nsops,
q->undo, q->pid);
/* Does q->sleeper still need to sleep? */
if (error > 0)
continue;
unlink_queue(sma, q);
if (error) {
restart = 0;
} else {
semop_completed = 1;
restart = check_restart(sma, q);
}
wake_up_sem_queue_prepare(pt, q, error);
if (restart)
goto again;
}
return semop_completed;
}
/**
* do_smart_update(sma, sops, nsops, otime, pt) - optimized update_queue
* @sma: semaphore array
* @sops: operations that were performed
* @nsops: number of operations
* @otime: force setting otime
* @pt: list head of the tasks that must be woken up.
*
* do_smart_update() does the required called to update_queue, based on the
* actual changes that were performed on the semaphore array.
* Note that the function does not do the actual wake-up: the caller is
* responsible for calling wake_up_sem_queue_do(@pt).
* It is safe to perform this call after dropping all locks.
*/
static void do_smart_update(struct sem_array *sma, struct sembuf *sops, int nsops,
int otime, struct list_head *pt)
{
int i;
if (sma->complex_count || sops == NULL) {
if (update_queue(sma, -1, pt))
otime = 1;
goto done;
}
for (i = 0; i < nsops; i++) {
if (sops[i].sem_op > 0 ||
(sops[i].sem_op < 0 &&
sma->sem_base[sops[i].sem_num].semval == 0))
if (update_queue(sma, sops[i].sem_num, pt))
otime = 1;
}
done:
if (otime)
sma->sem_otime = get_seconds();
}
/* The following counts are associated to each semaphore:
* semncnt number of tasks waiting on semval being nonzero
* semzcnt number of tasks waiting on semval being zero
* This model assumes that a task waits on exactly one semaphore.
* Since semaphore operations are to be performed atomically, tasks actually
* wait on a whole sequence of semaphores simultaneously.
* The counts we return here are a rough approximation, but still
* warrant that semncnt+semzcnt>0 if the task is on the pending queue.
*/
static int count_semncnt (struct sem_array * sma, ushort semnum)
{
int semncnt;
struct sem_queue * q;
semncnt = 0;
list_for_each_entry(q, &sma->sem_pending, list) {
struct sembuf * sops = q->sops;
int nsops = q->nsops;
int i;
for (i = 0; i < nsops; i++)
if (sops[i].sem_num == semnum
&& (sops[i].sem_op < 0)
&& !(sops[i].sem_flg & IPC_NOWAIT))
semncnt++;
}
return semncnt;
}
static int count_semzcnt (struct sem_array * sma, ushort semnum)
{
int semzcnt;
struct sem_queue * q;
semzcnt = 0;
list_for_each_entry(q, &sma->sem_pending, list) {
struct sembuf * sops = q->sops;
int nsops = q->nsops;
int i;
for (i = 0; i < nsops; i++)
if (sops[i].sem_num == semnum
&& (sops[i].sem_op == 0)
&& !(sops[i].sem_flg & IPC_NOWAIT))
semzcnt++;
}
return semzcnt;
}
static void free_un(struct rcu_head *head)
{
struct sem_undo *un = container_of(head, struct sem_undo, rcu);
kfree(un);
}
/* Free a semaphore set. freeary() is called with sem_ids.rw_mutex locked
* as a writer and the spinlock for this semaphore set hold. sem_ids.rw_mutex
* remains locked on exit.
*/
static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
{
struct sem_undo *un, *tu;
struct sem_queue *q, *tq;
struct sem_array *sma = container_of(ipcp, struct sem_array, sem_perm);
struct list_head tasks;
/* Free the existing undo structures for this semaphore set. */
assert_spin_locked(&sma->sem_perm.lock);
list_for_each_entry_safe(un, tu, &sma->list_id, list_id) {
list_del(&un->list_id);
spin_lock(&un->ulp->lock);
un->semid = -1;
list_del_rcu(&un->list_proc);
spin_unlock(&un->ulp->lock);
call_rcu(&un->rcu, free_un);
}
/* Wake up all pending processes and let them fail with EIDRM. */
INIT_LIST_HEAD(&tasks);
list_for_each_entry_safe(q, tq, &sma->sem_pending, list) {
unlink_queue(sma, q);
wake_up_sem_queue_prepare(&tasks, q, -EIDRM);
}
/* Remove the semaphore set from the IDR */
sem_rmid(ns, sma);
sem_unlock(sma);
wake_up_sem_queue_do(&tasks);
ns->used_sems -= sma->sem_nsems;
security_sem_free(sma);
ipc_rcu_putref(sma);
}
static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in, int version)
{
switch(version) {
case IPC_64:
return copy_to_user(buf, in, sizeof(*in));
case IPC_OLD:
{
struct semid_ds out;
memset(&out, 0, sizeof(out));
ipc64_perm_to_ipc_perm(&in->sem_perm, &out.sem_perm);
out.sem_otime = in->sem_otime;
out.sem_ctime = in->sem_ctime;
out.sem_nsems = in->sem_nsems;
return copy_to_user(buf, &out, sizeof(out));
}
default:
return -EINVAL;
}
}
static int semctl_nolock(struct ipc_namespace *ns, int semid,
int cmd, int version, union semun arg)
{
int err;
struct sem_array *sma;
switch(cmd) {
case IPC_INFO:
case SEM_INFO:
{
struct seminfo seminfo;
int max_id;
err = security_sem_semctl(NULL, cmd);
if (err)
return err;
memset(&seminfo,0,sizeof(seminfo));
seminfo.semmni = ns->sc_semmni;
seminfo.semmns = ns->sc_semmns;
seminfo.semmsl = ns->sc_semmsl;
seminfo.semopm = ns->sc_semopm;
seminfo.semvmx = SEMVMX;
seminfo.semmnu = SEMMNU;
seminfo.semmap = SEMMAP;
seminfo.semume = SEMUME;
down_read(&sem_ids(ns).rw_mutex);
if (cmd == SEM_INFO) {
seminfo.semusz = sem_ids(ns).in_use;
seminfo.semaem = ns->used_sems;
} else {
seminfo.semusz = SEMUSZ;
seminfo.semaem = SEMAEM;
}
max_id = ipc_get_maxid(&sem_ids(ns));
up_read(&sem_ids(ns).rw_mutex);
if (copy_to_user (arg.__buf, &seminfo, sizeof(struct seminfo)))
return -EFAULT;
return (max_id < 0) ? 0: max_id;
}
case IPC_STAT:
case SEM_STAT:
{
struct semid64_ds tbuf;
int id;
if (cmd == SEM_STAT) {
sma = sem_lock(ns, semid);
if (IS_ERR(sma))
return PTR_ERR(sma);
id = sma->sem_perm.id;
} else {
sma = sem_lock_check(ns, semid);
if (IS_ERR(sma))
return PTR_ERR(sma);
id = 0;
}
err = -EACCES;
if (ipcperms(ns, &sma->sem_perm, S_IRUGO))
goto out_unlock;
err = security_sem_semctl(sma, cmd);
if (err)
goto out_unlock;
memset(&tbuf, 0, sizeof(tbuf));
kernel_to_ipc64_perm(&sma->sem_perm, &tbuf.sem_perm);
tbuf.sem_otime = sma->sem_otime;
tbuf.sem_ctime = sma->sem_ctime;
tbuf.sem_nsems = sma->sem_nsems;
sem_unlock(sma);
if (copy_semid_to_user (arg.buf, &tbuf, version))
return -EFAULT;
return id;
}
default:
return -EINVAL;
}
out_unlock:
sem_unlock(sma);
return err;
}
static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
int cmd, int version, union semun arg)
{
struct sem_array *sma;
struct sem* curr;
int err;
ushort fast_sem_io[SEMMSL_FAST];
ushort* sem_io = fast_sem_io;
int nsems;
struct list_head tasks;
sma = sem_lock_check(ns, semid);
if (IS_ERR(sma))
return PTR_ERR(sma);
INIT_LIST_HEAD(&tasks);
nsems = sma->sem_nsems;
err = -EACCES;
if (ipcperms(ns, &sma->sem_perm,
(cmd == SETVAL || cmd == SETALL) ? S_IWUGO : S_IRUGO))
goto out_unlock;
err = security_sem_semctl(sma, cmd);
if (err)
goto out_unlock;
err = -EACCES;
switch (cmd) {
case GETALL:
{
ushort __user *array = arg.array;
int i;
if(nsems > SEMMSL_FAST) {
sem_getref_and_unlock(sma);
sem_io = ipc_alloc(sizeof(ushort)*nsems);
if(sem_io == NULL) {
sem_putref(sma);
return -ENOMEM;
}
sem_lock_and_putref(sma);
if (sma->sem_perm.deleted) {
sem_unlock(sma);
err = -EIDRM;
goto out_free;
}
}
for (i = 0; i < sma->sem_nsems; i++)
sem_io[i] = sma->sem_base[i].semval;
sem_unlock(sma);
err = 0;
if(copy_to_user(array, sem_io, nsems*sizeof(ushort)))
err = -EFAULT;
goto out_free;
}
case SETALL:
{
int i;
struct sem_undo *un;
sem_getref_and_unlock(sma);
if(nsems > SEMMSL_FAST) {
sem_io = ipc_alloc(sizeof(ushort)*nsems);
if(sem_io == NULL) {
sem_putref(sma);
return -ENOMEM;
}
}
if (copy_from_user (sem_io, arg.array, nsems*sizeof(ushort))) {
sem_putref(sma);
err = -EFAULT;
goto out_free;
}
for (i = 0; i < nsems; i++) {
if (sem_io[i] > SEMVMX) {
sem_putref(sma);
err = -ERANGE;
goto out_free;
}
}
sem_lock_and_putref(sma);
if (sma->sem_perm.deleted) {
sem_unlock(sma);
err = -EIDRM;
goto out_free;
}
for (i = 0; i < nsems; i++)
sma->sem_base[i].semval = sem_io[i];
assert_spin_locked(&sma->sem_perm.lock);
list_for_each_entry(un, &sma->list_id, list_id) {
for (i = 0; i < nsems; i++)
un->semadj[i] = 0;
}
sma->sem_ctime = get_seconds();
/* maybe some queued-up processes were waiting for this */
do_smart_update(sma, NULL, 0, 0, &tasks);
err = 0;
goto out_unlock;
}
/* GETVAL, GETPID, GETNCTN, GETZCNT, SETVAL: fall-through */
}
err = -EINVAL;
if(semnum < 0 || semnum >= nsems)
goto out_unlock;
curr = &sma->sem_base[semnum];
switch (cmd) {
case GETVAL:
err = curr->semval;
goto out_unlock;
case GETPID:
err = curr->sempid;
goto out_unlock;
case GETNCNT:
err = count_semncnt(sma,semnum);
goto out_unlock;
case GETZCNT:
err = count_semzcnt(sma,semnum);
goto out_unlock;
case SETVAL:
{
int val = arg.val;
struct sem_undo *un;
err = -ERANGE;
if (val > SEMVMX || val < 0)
goto out_unlock;
assert_spin_locked(&sma->sem_perm.lock);
list_for_each_entry(un, &sma->list_id, list_id)
un->semadj[semnum] = 0;
curr->semval = val;
curr->sempid = task_tgid_vnr(current);
sma->sem_ctime = get_seconds();
/* maybe some queued-up processes were waiting for this */
do_smart_update(sma, NULL, 0, 0, &tasks);
err = 0;
goto out_unlock;
}
}
out_unlock:
sem_unlock(sma);
wake_up_sem_queue_do(&tasks);
out_free:
if(sem_io != fast_sem_io)
ipc_free(sem_io, sizeof(ushort)*nsems);
return err;
}
static inline unsigned long
copy_semid_from_user(struct semid64_ds *out, void __user *buf, int version)
{
switch(version) {
case IPC_64:
if (copy_from_user(out, buf, sizeof(*out)))
return -EFAULT;
return 0;
case IPC_OLD:
{
struct semid_ds tbuf_old;
if(copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
return -EFAULT;
out->sem_perm.uid = tbuf_old.sem_perm.uid;
out->sem_perm.gid = tbuf_old.sem_perm.gid;
out->sem_perm.mode = tbuf_old.sem_perm.mode;
return 0;
}
default:
return -EINVAL;
}
}
/*
* This function handles some semctl commands which require the rw_mutex
* to be held in write mode.
* NOTE: no locks must be held, the rw_mutex is taken inside this function.
*/
static int semctl_down(struct ipc_namespace *ns, int semid,
int cmd, int version, union semun arg)
{
struct sem_array *sma;
int err;
struct semid64_ds semid64;
struct kern_ipc_perm *ipcp;
if(cmd == IPC_SET) {
if (copy_semid_from_user(&semid64, arg.buf, version))
return -EFAULT;
}
ipcp = ipcctl_pre_down(ns, &sem_ids(ns), semid, cmd,
&semid64.sem_perm, 0);
if (IS_ERR(ipcp))
return PTR_ERR(ipcp);
sma = container_of(ipcp, struct sem_array, sem_perm);
err = security_sem_semctl(sma, cmd);
if (err)
goto out_unlock;
switch(cmd){
case IPC_RMID:
freeary(ns, ipcp);
goto out_up;
case IPC_SET:
ipc_update_perm(&semid64.sem_perm, ipcp);
sma->sem_ctime = get_seconds();
break;
default:
err = -EINVAL;
}
out_unlock:
sem_unlock(sma);
out_up:
up_write(&sem_ids(ns).rw_mutex);
return err;
}
SYSCALL_DEFINE(semctl)(int semid, int semnum, int cmd, union semun arg)
{
int err = -EINVAL;
int version;
struct ipc_namespace *ns;
if (semid < 0)
return -EINVAL;
version = ipc_parse_version(&cmd);
ns = current->nsproxy->ipc_ns;
switch(cmd) {
case IPC_INFO:
case SEM_INFO:
case IPC_STAT:
case SEM_STAT:
err = semctl_nolock(ns, semid, cmd, version, arg);
return err;
case GETALL:
case GETVAL:
case GETPID:
case GETNCNT:
case GETZCNT:
case SETVAL:
case SETALL:
err = semctl_main(ns,semid,semnum,cmd,version,arg);
return err;
case IPC_RMID:
case IPC_SET:
err = semctl_down(ns, semid, cmd, version, arg);
return err;
default:
return -EINVAL;
}
}
#ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
asmlinkage long SyS_semctl(int semid, int semnum, int cmd, union semun arg)
{
return SYSC_semctl((int) semid, (int) semnum, (int) cmd, arg);
}
SYSCALL_ALIAS(sys_semctl, SyS_semctl);
#endif
/* If the task doesn't already have a undo_list, then allocate one
* here. We guarantee there is only one thread using this undo list,
* and current is THE ONE
*
* If this allocation and assignment succeeds, but later
* portions of this code fail, there is no need to free the sem_undo_list.
* Just let it stay associated with the task, and it'll be freed later
* at exit time.
*
* This can block, so callers must hold no locks.
*/
static inline int get_undo_list(struct sem_undo_list **undo_listp)
{
struct sem_undo_list *undo_list;
undo_list = current->sysvsem.undo_list;
if (!undo_list) {
undo_list = kzalloc(sizeof(*undo_list), GFP_KERNEL);
if (undo_list == NULL)
return -ENOMEM;
spin_lock_init(&undo_list->lock);
atomic_set(&undo_list->refcnt, 1);
INIT_LIST_HEAD(&undo_list->list_proc);
current->sysvsem.undo_list = undo_list;
}
*undo_listp = undo_list;
return 0;
}
static struct sem_undo *__lookup_undo(struct sem_undo_list *ulp, int semid)
{
struct sem_undo *un;
list_for_each_entry_rcu(un, &ulp->list_proc, list_proc) {
if (un->semid == semid)
return un;
}
return NULL;
}
static struct sem_undo *lookup_undo(struct sem_undo_list *ulp, int semid)
{
struct sem_undo *un;
assert_spin_locked(&ulp->lock);
un = __lookup_undo(ulp, semid);
if (un) {
list_del_rcu(&un->list_proc);
list_add_rcu(&un->list_proc, &ulp->list_proc);
}
return un;
}
/**
* find_alloc_undo - Lookup (and if not present create) undo array
* @ns: namespace
* @semid: semaphore array id
*
* The function looks up (and if not present creates) the undo structure.
* The size of the undo structure depends on the size of the semaphore
* array, thus the alloc path is not that straightforward.
* Lifetime-rules: sem_undo is rcu-protected, on success, the function
* performs a rcu_read_lock().
*/
static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
{
struct sem_array *sma;
struct sem_undo_list *ulp;
struct sem_undo *un, *new;
int nsems;
int error;
error = get_undo_list(&ulp);
if (error)
return ERR_PTR(error);
rcu_read_lock();
spin_lock(&ulp->lock);
un = lookup_undo(ulp, semid);
spin_unlock(&ulp->lock);
if (likely(un!=NULL))
goto out;
rcu_read_unlock();
/* no undo structure around - allocate one. */
/* step 1: figure out the size of the semaphore array */
sma = sem_lock_check(ns, semid);
if (IS_ERR(sma))
return ERR_CAST(sma);
nsems = sma->sem_nsems;
sem_getref_and_unlock(sma);
/* step 2: allocate new undo structure */
new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL);
if (!new) {
sem_putref(sma);
return ERR_PTR(-ENOMEM);
}
/* step 3: Acquire the lock on semaphore array */
sem_lock_and_putref(sma);
if (sma->sem_perm.deleted) {
sem_unlock(sma);
kfree(new);
un = ERR_PTR(-EIDRM);
goto out;
}
spin_lock(&ulp->lock);
/*
* step 4: check for races: did someone else allocate the undo struct?
*/
un = lookup_undo(ulp, semid);
if (un) {
kfree(new);
goto success;
}
/* step 5: initialize & link new undo structure */
new->semadj = (short *) &new[1];
new->ulp = ulp;
new->semid = semid;
assert_spin_locked(&ulp->lock);
list_add_rcu(&new->list_proc, &ulp->list_proc);
assert_spin_locked(&sma->sem_perm.lock);
list_add(&new->list_id, &sma->list_id);
un = new;
success:
spin_unlock(&ulp->lock);
rcu_read_lock();
sem_unlock(sma);
out:
return un;
}
/**
* get_queue_result - Retrieve the result code from sem_queue
* @q: Pointer to queue structure
*
* Retrieve the return code from the pending queue. If IN_WAKEUP is found in
* q->status, then we must loop until the value is replaced with the final
* value: This may happen if a task is woken up by an unrelated event (e.g.
* signal) and in parallel the task is woken up by another task because it got
* the requested semaphores.
*
* The function can be called with or without holding the semaphore spinlock.
*/
static int get_queue_result(struct sem_queue *q)
{
int error;
error = q->status;
while (unlikely(error == IN_WAKEUP)) {
cpu_relax();
error = q->status;
}
return error;
}
SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
unsigned, nsops, const struct timespec __user *, timeout)
{
int error = -EINVAL;
struct sem_array *sma;
struct sembuf fast_sops[SEMOPM_FAST];
struct sembuf* sops = fast_sops, *sop;
struct sem_undo *un;
int undos = 0, alter = 0, max;
struct sem_queue queue;
unsigned long jiffies_left = 0;
struct ipc_namespace *ns;
struct list_head tasks;
ns = current->nsproxy->ipc_ns;
if (nsops < 1 || semid < 0)
return -EINVAL;
if (nsops > ns->sc_semopm)
return -E2BIG;
if(nsops > SEMOPM_FAST) {
sops = kmalloc(sizeof(*sops)*nsops,GFP_KERNEL);
if(sops==NULL)
return -ENOMEM;
}
if (copy_from_user (sops, tsops, nsops * sizeof(*tsops))) {
error=-EFAULT;
goto out_free;
}
if (timeout) {
struct timespec _timeout;
if (copy_from_user(&_timeout, timeout, sizeof(*timeout))) {
error = -EFAULT;
goto out_free;
}
if (_timeout.tv_sec < 0 || _timeout.tv_nsec < 0 ||
_timeout.tv_nsec >= 1000000000L) {
error = -EINVAL;
goto out_free;
}
jiffies_left = timespec_to_jiffies(&_timeout);
}
max = 0;
for (sop = sops; sop < sops + nsops; sop++) {
if (sop->sem_num >= max)
max = sop->sem_num;
if (sop->sem_flg & SEM_UNDO)
undos = 1;
if (sop->sem_op != 0)
alter = 1;
}
if (undos) {
un = find_alloc_undo(ns, semid);
if (IS_ERR(un)) {
error = PTR_ERR(un);
goto out_free;
}
} else
un = NULL;
INIT_LIST_HEAD(&tasks);
sma = sem_lock_check(ns, semid);
if (IS_ERR(sma)) {
if (un)
rcu_read_unlock();
error = PTR_ERR(sma);
goto out_free;
}
/*
* semid identifiers are not unique - find_alloc_undo may have
* allocated an undo structure, it was invalidated by an RMID
* and now a new array with received the same id. Check and fail.
* This case can be detected checking un->semid. The existence of
* "un" itself is guaranteed by rcu.
*/
error = -EIDRM;
if (un) {
if (un->semid == -1) {
rcu_read_unlock();
goto out_unlock_free;
} else {
/*
* rcu lock can be released, "un" cannot disappear:
* - sem_lock is acquired, thus IPC_RMID is
* impossible.
* - exit_sem is impossible, it always operates on
* current (or a dead task).
*/
rcu_read_unlock();
}
}
error = -EFBIG;
if (max >= sma->sem_nsems)
goto out_unlock_free;
error = -EACCES;
if (ipcperms(ns, &sma->sem_perm, alter ? S_IWUGO : S_IRUGO))
goto out_unlock_free;
error = security_sem_semop(sma, sops, nsops, alter);
if (error)
goto out_unlock_free;
error = try_atomic_semop (sma, sops, nsops, un, task_tgid_vnr(current));
if (error <= 0) {
if (alter && error == 0)
do_smart_update(sma, sops, nsops, 1, &tasks);
goto out_unlock_free;
}
/* We need to sleep on this operation, so we put the current
* task into the pending queue and go to sleep.
*/
queue.sops = sops;
queue.nsops = nsops;
queue.undo = un;
queue.pid = task_tgid_vnr(current);
queue.alter = alter;
if (alter)
list_add_tail(&queue.list, &sma->sem_pending);
else
list_add(&queue.list, &sma->sem_pending);
if (nsops == 1) {
struct sem *curr;
curr = &sma->sem_base[sops->sem_num];
if (alter)
list_add_tail(&queue.simple_list, &curr->sem_pending);
else
list_add(&queue.simple_list, &curr->sem_pending);
} else {
INIT_LIST_HEAD(&queue.simple_list);
sma->complex_count++;
}
queue.status = -EINTR;
queue.sleeper = current;
current->state = TASK_INTERRUPTIBLE;
sem_unlock(sma);
if (timeout)
jiffies_left = schedule_timeout(jiffies_left);
else
schedule();
error = get_queue_result(&queue);
if (error != -EINTR) {
/* fast path: update_queue already obtained all requested
* resources.
* Perform a smp_mb(): User space could assume that semop()
* is a memory barrier: Without the mb(), the cpu could
* speculatively read in user space stale data that was
* overwritten by the previous owner of the semaphore.
*/
smp_mb();
goto out_free;
}
sma = sem_lock(ns, semid);
/*
* Wait until it's guaranteed that no wakeup_sem_queue_do() is ongoing.
*/
error = get_queue_result(&queue);
/*
* Array removed? If yes, leave without sem_unlock().
*/
if (IS_ERR(sma)) {
error = -EIDRM;
goto out_free;
}
/*
* If queue.status != -EINTR we are woken up by another process.
* Leave without unlink_queue(), but with sem_unlock().
*/
if (error != -EINTR) {
goto out_unlock_free;
}
/*
* If an interrupt occurred we have to clean up the queue
*/
if (timeout && jiffies_left == 0)
error = -EAGAIN;
unlink_queue(sma, &queue);
out_unlock_free:
sem_unlock(sma);
wake_up_sem_queue_do(&tasks);
out_free:
if(sops != fast_sops)
kfree(sops);
return error;
}
SYSCALL_DEFINE3(semop, int, semid, struct sembuf __user *, tsops,
unsigned, nsops)
{
return sys_semtimedop(semid, tsops, nsops, NULL);
}
/* If CLONE_SYSVSEM is set, establish sharing of SEM_UNDO state between
* parent and child tasks.
*/
int copy_semundo(unsigned long clone_flags, struct task_struct *tsk)
{
struct sem_undo_list *undo_list;
int error;
if (clone_flags & CLONE_SYSVSEM) {
error = get_undo_list(&undo_list);
if (error)
return error;
atomic_inc(&undo_list->refcnt);
tsk->sysvsem.undo_list = undo_list;
} else
tsk->sysvsem.undo_list = NULL;
return 0;
}
/*
* add semadj values to semaphores, free undo structures.
* undo structures are not freed when semaphore arrays are destroyed
* so some of them may be out of date.
* IMPLEMENTATION NOTE: There is some confusion over whether the
* set of adjustments that needs to be done should be done in an atomic
* manner or not. That is, if we are attempting to decrement the semval
* should we queue up and wait until we can do so legally?
* The original implementation attempted to do this (queue and wait).
* The current implementation does not do so. The POSIX standard
* and SVID should be consulted to determine what behavior is mandated.
*/
void exit_sem(struct task_struct *tsk)
{
struct sem_undo_list *ulp;
ulp = tsk->sysvsem.undo_list;
if (!ulp)
return;
tsk->sysvsem.undo_list = NULL;
if (!atomic_dec_and_test(&ulp->refcnt))
return;
for (;;) {
struct sem_array *sma;
struct sem_undo *un;
struct list_head tasks;
int semid;
int i;
rcu_read_lock();
un = list_entry_rcu(ulp->list_proc.next,
struct sem_undo, list_proc);
if (&un->list_proc == &ulp->list_proc)
semid = -1;
else
semid = un->semid;
rcu_read_unlock();
if (semid == -1)
break;
sma = sem_lock_check(tsk->nsproxy->ipc_ns, un->semid);
/* exit_sem raced with IPC_RMID, nothing to do */
if (IS_ERR(sma))
continue;
un = __lookup_undo(ulp, semid);
if (un == NULL) {
/* exit_sem raced with IPC_RMID+semget() that created
* exactly the same semid. Nothing to do.
*/
sem_unlock(sma);
continue;
}
/* remove un from the linked lists */
assert_spin_locked(&sma->sem_perm.lock);
list_del(&un->list_id);
spin_lock(&ulp->lock);
list_del_rcu(&un->list_proc);
spin_unlock(&ulp->lock);
/* perform adjustments registered in un */
for (i = 0; i < sma->sem_nsems; i++) {
struct sem * semaphore = &sma->sem_base[i];
if (un->semadj[i]) {
semaphore->semval += un->semadj[i];
/*
* Range checks of the new semaphore value,
* not defined by sus:
* - Some unices ignore the undo entirely
* (e.g. HP UX 11i 11.22, Tru64 V5.1)
* - some cap the value (e.g. FreeBSD caps
* at 0, but doesn't enforce SEMVMX)
*
* Linux caps the semaphore value, both at 0
* and at SEMVMX.
*
* Manfred <manfred@colorfullife.com>
*/
if (semaphore->semval < 0)
semaphore->semval = 0;
if (semaphore->semval > SEMVMX)
semaphore->semval = SEMVMX;
semaphore->sempid = task_tgid_vnr(current);
}
}
/* maybe some queued-up processes were waiting for this */
INIT_LIST_HEAD(&tasks);
do_smart_update(sma, NULL, 0, 1, &tasks);
sem_unlock(sma);
wake_up_sem_queue_do(&tasks);
call_rcu(&un->rcu, free_un);
}
kfree(ulp);
}
#ifdef CONFIG_PROC_FS
static int sysvipc_sem_proc_show(struct seq_file *s, void *it)
{
struct sem_array *sma = it;
return seq_printf(s,
"%10d %10d %4o %10u %5u %5u %5u %5u %10lu %10lu\n",
sma->sem_perm.key,
sma->sem_perm.id,
sma->sem_perm.mode,
sma->sem_nsems,
sma->sem_perm.uid,
sma->sem_perm.gid,
sma->sem_perm.cuid,
sma->sem_perm.cgid,
sma->sem_otime,
sma->sem_ctime);
}
#endif
| gpl-2.0 |
jiangyanfeng/android_kernel_huawei_G300 | drivers/usb/gadget/webcam.c | 2601 | 12045 | /*
* webcam.c -- USB webcam gadget driver
*
* Copyright (C) 2009-2010
* Laurent Pinchart (laurent.pinchart@ideasonboard.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
*/
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/usb/video.h>
#include "f_uvc.h"
/*
* Kbuild is not very cooperative with respect to linking separately
* compiled library objects into one module. So for now we won't use
* separate compilation ... ensuring init/exit sections work to shrink
* the runtime footprint, and giving us at least some parts of what
* a "gcc --combine ... part1.c part2.c part3.c ... " build would.
*/
#include "composite.c"
#include "usbstring.c"
#include "config.c"
#include "epautoconf.c"
#include "uvc_queue.c"
#include "uvc_video.c"
#include "uvc_v4l2.c"
#include "f_uvc.c"
/* --------------------------------------------------------------------------
* Device descriptor
*/
#define WEBCAM_VENDOR_ID 0x1d6b /* Linux Foundation */
#define WEBCAM_PRODUCT_ID 0x0102 /* Webcam A/V gadget */
#define WEBCAM_DEVICE_BCD 0x0010 /* 0.10 */
static char webcam_vendor_label[] = "Linux Foundation";
static char webcam_product_label[] = "Webcam gadget";
static char webcam_config_label[] = "Video";
/* string IDs are assigned dynamically */
#define STRING_MANUFACTURER_IDX 0
#define STRING_PRODUCT_IDX 1
#define STRING_DESCRIPTION_IDX 2
static struct usb_string webcam_strings[] = {
[STRING_MANUFACTURER_IDX].s = webcam_vendor_label,
[STRING_PRODUCT_IDX].s = webcam_product_label,
[STRING_DESCRIPTION_IDX].s = webcam_config_label,
{ }
};
static struct usb_gadget_strings webcam_stringtab = {
.language = 0x0409, /* en-us */
.strings = webcam_strings,
};
static struct usb_gadget_strings *webcam_device_strings[] = {
&webcam_stringtab,
NULL,
};
static struct usb_device_descriptor webcam_device_descriptor = {
.bLength = USB_DT_DEVICE_SIZE,
.bDescriptorType = USB_DT_DEVICE,
.bcdUSB = cpu_to_le16(0x0200),
.bDeviceClass = USB_CLASS_MISC,
.bDeviceSubClass = 0x02,
.bDeviceProtocol = 0x01,
.bMaxPacketSize0 = 0, /* dynamic */
.idVendor = cpu_to_le16(WEBCAM_VENDOR_ID),
.idProduct = cpu_to_le16(WEBCAM_PRODUCT_ID),
.bcdDevice = cpu_to_le16(WEBCAM_DEVICE_BCD),
.iManufacturer = 0, /* dynamic */
.iProduct = 0, /* dynamic */
.iSerialNumber = 0, /* dynamic */
.bNumConfigurations = 0, /* dynamic */
};
DECLARE_UVC_HEADER_DESCRIPTOR(1);
static const struct UVC_HEADER_DESCRIPTOR(1) uvc_control_header = {
.bLength = UVC_DT_HEADER_SIZE(1),
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubType = UVC_VC_HEADER,
.bcdUVC = cpu_to_le16(0x0100),
.wTotalLength = 0, /* dynamic */
.dwClockFrequency = cpu_to_le32(48000000),
.bInCollection = 0, /* dynamic */
.baInterfaceNr[0] = 0, /* dynamic */
};
static const struct uvc_camera_terminal_descriptor uvc_camera_terminal = {
.bLength = UVC_DT_CAMERA_TERMINAL_SIZE(3),
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubType = UVC_VC_INPUT_TERMINAL,
.bTerminalID = 1,
.wTerminalType = cpu_to_le16(0x0201),
.bAssocTerminal = 0,
.iTerminal = 0,
.wObjectiveFocalLengthMin = cpu_to_le16(0),
.wObjectiveFocalLengthMax = cpu_to_le16(0),
.wOcularFocalLength = cpu_to_le16(0),
.bControlSize = 3,
.bmControls[0] = 2,
.bmControls[1] = 0,
.bmControls[2] = 0,
};
static const struct uvc_processing_unit_descriptor uvc_processing = {
.bLength = UVC_DT_PROCESSING_UNIT_SIZE(2),
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubType = UVC_VC_PROCESSING_UNIT,
.bUnitID = 2,
.bSourceID = 1,
.wMaxMultiplier = cpu_to_le16(16*1024),
.bControlSize = 2,
.bmControls[0] = 1,
.bmControls[1] = 0,
.iProcessing = 0,
};
static const struct uvc_output_terminal_descriptor uvc_output_terminal = {
.bLength = UVC_DT_OUTPUT_TERMINAL_SIZE,
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubType = UVC_VC_OUTPUT_TERMINAL,
.bTerminalID = 3,
.wTerminalType = cpu_to_le16(0x0101),
.bAssocTerminal = 0,
.bSourceID = 2,
.iTerminal = 0,
};
DECLARE_UVC_INPUT_HEADER_DESCRIPTOR(1, 2);
static const struct UVC_INPUT_HEADER_DESCRIPTOR(1, 2) uvc_input_header = {
.bLength = UVC_DT_INPUT_HEADER_SIZE(1, 2),
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubType = UVC_VS_INPUT_HEADER,
.bNumFormats = 2,
.wTotalLength = 0, /* dynamic */
.bEndpointAddress = 0, /* dynamic */
.bmInfo = 0,
.bTerminalLink = 3,
.bStillCaptureMethod = 0,
.bTriggerSupport = 0,
.bTriggerUsage = 0,
.bControlSize = 1,
.bmaControls[0][0] = 0,
.bmaControls[1][0] = 4,
};
static const struct uvc_format_uncompressed uvc_format_yuv = {
.bLength = UVC_DT_FORMAT_UNCOMPRESSED_SIZE,
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubType = UVC_VS_FORMAT_UNCOMPRESSED,
.bFormatIndex = 1,
.bNumFrameDescriptors = 2,
.guidFormat =
{ 'Y', 'U', 'Y', '2', 0x00, 0x00, 0x10, 0x00,
0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71},
.bBitsPerPixel = 16,
.bDefaultFrameIndex = 1,
.bAspectRatioX = 0,
.bAspectRatioY = 0,
.bmInterfaceFlags = 0,
.bCopyProtect = 0,
};
DECLARE_UVC_FRAME_UNCOMPRESSED(1);
DECLARE_UVC_FRAME_UNCOMPRESSED(3);
static const struct UVC_FRAME_UNCOMPRESSED(3) uvc_frame_yuv_360p = {
.bLength = UVC_DT_FRAME_UNCOMPRESSED_SIZE(3),
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubType = UVC_VS_FRAME_UNCOMPRESSED,
.bFrameIndex = 1,
.bmCapabilities = 0,
.wWidth = cpu_to_le16(640),
.wHeight = cpu_to_le16(360),
.dwMinBitRate = cpu_to_le32(18432000),
.dwMaxBitRate = cpu_to_le32(55296000),
.dwMaxVideoFrameBufferSize = cpu_to_le32(460800),
.dwDefaultFrameInterval = cpu_to_le32(666666),
.bFrameIntervalType = 3,
.dwFrameInterval[0] = cpu_to_le32(666666),
.dwFrameInterval[1] = cpu_to_le32(1000000),
.dwFrameInterval[2] = cpu_to_le32(5000000),
};
static const struct UVC_FRAME_UNCOMPRESSED(1) uvc_frame_yuv_720p = {
.bLength = UVC_DT_FRAME_UNCOMPRESSED_SIZE(1),
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubType = UVC_VS_FRAME_UNCOMPRESSED,
.bFrameIndex = 2,
.bmCapabilities = 0,
.wWidth = cpu_to_le16(1280),
.wHeight = cpu_to_le16(720),
.dwMinBitRate = cpu_to_le32(29491200),
.dwMaxBitRate = cpu_to_le32(29491200),
.dwMaxVideoFrameBufferSize = cpu_to_le32(1843200),
.dwDefaultFrameInterval = cpu_to_le32(5000000),
.bFrameIntervalType = 1,
.dwFrameInterval[0] = cpu_to_le32(5000000),
};
static const struct uvc_format_mjpeg uvc_format_mjpg = {
.bLength = UVC_DT_FORMAT_MJPEG_SIZE,
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubType = UVC_VS_FORMAT_MJPEG,
.bFormatIndex = 2,
.bNumFrameDescriptors = 2,
.bmFlags = 0,
.bDefaultFrameIndex = 1,
.bAspectRatioX = 0,
.bAspectRatioY = 0,
.bmInterfaceFlags = 0,
.bCopyProtect = 0,
};
DECLARE_UVC_FRAME_MJPEG(1);
DECLARE_UVC_FRAME_MJPEG(3);
static const struct UVC_FRAME_MJPEG(3) uvc_frame_mjpg_360p = {
.bLength = UVC_DT_FRAME_MJPEG_SIZE(3),
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubType = UVC_VS_FRAME_MJPEG,
.bFrameIndex = 1,
.bmCapabilities = 0,
.wWidth = cpu_to_le16(640),
.wHeight = cpu_to_le16(360),
.dwMinBitRate = cpu_to_le32(18432000),
.dwMaxBitRate = cpu_to_le32(55296000),
.dwMaxVideoFrameBufferSize = cpu_to_le32(460800),
.dwDefaultFrameInterval = cpu_to_le32(666666),
.bFrameIntervalType = 3,
.dwFrameInterval[0] = cpu_to_le32(666666),
.dwFrameInterval[1] = cpu_to_le32(1000000),
.dwFrameInterval[2] = cpu_to_le32(5000000),
};
static const struct UVC_FRAME_MJPEG(1) uvc_frame_mjpg_720p = {
.bLength = UVC_DT_FRAME_MJPEG_SIZE(1),
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubType = UVC_VS_FRAME_MJPEG,
.bFrameIndex = 2,
.bmCapabilities = 0,
.wWidth = cpu_to_le16(1280),
.wHeight = cpu_to_le16(720),
.dwMinBitRate = cpu_to_le32(29491200),
.dwMaxBitRate = cpu_to_le32(29491200),
.dwMaxVideoFrameBufferSize = cpu_to_le32(1843200),
.dwDefaultFrameInterval = cpu_to_le32(5000000),
.bFrameIntervalType = 1,
.dwFrameInterval[0] = cpu_to_le32(5000000),
};
static const struct uvc_color_matching_descriptor uvc_color_matching = {
.bLength = UVC_DT_COLOR_MATCHING_SIZE,
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubType = UVC_VS_COLORFORMAT,
.bColorPrimaries = 1,
.bTransferCharacteristics = 1,
.bMatrixCoefficients = 4,
};
static const struct uvc_descriptor_header * const uvc_control_cls[] = {
(const struct uvc_descriptor_header *) &uvc_control_header,
(const struct uvc_descriptor_header *) &uvc_camera_terminal,
(const struct uvc_descriptor_header *) &uvc_processing,
(const struct uvc_descriptor_header *) &uvc_output_terminal,
NULL,
};
static const struct uvc_descriptor_header * const uvc_fs_streaming_cls[] = {
(const struct uvc_descriptor_header *) &uvc_input_header,
(const struct uvc_descriptor_header *) &uvc_format_yuv,
(const struct uvc_descriptor_header *) &uvc_frame_yuv_360p,
(const struct uvc_descriptor_header *) &uvc_frame_yuv_720p,
(const struct uvc_descriptor_header *) &uvc_format_mjpg,
(const struct uvc_descriptor_header *) &uvc_frame_mjpg_360p,
(const struct uvc_descriptor_header *) &uvc_frame_mjpg_720p,
(const struct uvc_descriptor_header *) &uvc_color_matching,
NULL,
};
static const struct uvc_descriptor_header * const uvc_hs_streaming_cls[] = {
(const struct uvc_descriptor_header *) &uvc_input_header,
(const struct uvc_descriptor_header *) &uvc_format_yuv,
(const struct uvc_descriptor_header *) &uvc_frame_yuv_360p,
(const struct uvc_descriptor_header *) &uvc_frame_yuv_720p,
(const struct uvc_descriptor_header *) &uvc_format_mjpg,
(const struct uvc_descriptor_header *) &uvc_frame_mjpg_360p,
(const struct uvc_descriptor_header *) &uvc_frame_mjpg_720p,
(const struct uvc_descriptor_header *) &uvc_color_matching,
NULL,
};
/* --------------------------------------------------------------------------
* USB configuration
*/
static int __init
webcam_config_bind(struct usb_configuration *c)
{
return uvc_bind_config(c, uvc_control_cls, uvc_fs_streaming_cls,
uvc_hs_streaming_cls);
}
static struct usb_configuration webcam_config_driver = {
.label = webcam_config_label,
.bConfigurationValue = 1,
.iConfiguration = 0, /* dynamic */
.bmAttributes = USB_CONFIG_ATT_SELFPOWER,
.bMaxPower = CONFIG_USB_GADGET_VBUS_DRAW / 2,
};
static int /* __init_or_exit */
webcam_unbind(struct usb_composite_dev *cdev)
{
return 0;
}
static int __init
webcam_bind(struct usb_composite_dev *cdev)
{
int ret;
/* Allocate string descriptor numbers ... note that string contents
* can be overridden by the composite_dev glue.
*/
if ((ret = usb_string_id(cdev)) < 0)
goto error;
webcam_strings[STRING_MANUFACTURER_IDX].id = ret;
webcam_device_descriptor.iManufacturer = ret;
if ((ret = usb_string_id(cdev)) < 0)
goto error;
webcam_strings[STRING_PRODUCT_IDX].id = ret;
webcam_device_descriptor.iProduct = ret;
if ((ret = usb_string_id(cdev)) < 0)
goto error;
webcam_strings[STRING_DESCRIPTION_IDX].id = ret;
webcam_config_driver.iConfiguration = ret;
/* Register our configuration. */
if ((ret = usb_add_config(cdev, &webcam_config_driver,
webcam_config_bind)) < 0)
goto error;
INFO(cdev, "Webcam Video Gadget\n");
return 0;
error:
webcam_unbind(cdev);
return ret;
}
/* --------------------------------------------------------------------------
* Driver
*/
static struct usb_composite_driver webcam_driver = {
.name = "g_webcam",
.dev = &webcam_device_descriptor,
.strings = webcam_device_strings,
.unbind = webcam_unbind,
};
static int __init
webcam_init(void)
{
return usb_composite_probe(&webcam_driver, webcam_bind);
}
static void __exit
webcam_cleanup(void)
{
usb_composite_unregister(&webcam_driver);
}
module_init(webcam_init);
module_exit(webcam_cleanup);
MODULE_AUTHOR("Laurent Pinchart");
MODULE_DESCRIPTION("Webcam Video Gadget");
MODULE_LICENSE("GPL");
MODULE_VERSION("0.1.0");
| gpl-2.0 |
pRiVi/linux | fs/devpts/inode.c | 2857 | 13935 | /* -*- linux-c -*- --------------------------------------------------------- *
*
* linux/fs/devpts/inode.c
*
* Copyright 1998-2004 H. Peter Anvin -- All Rights Reserved
*
* This file is part of the Linux kernel and is made available under
* the terms of the GNU General Public License, version 2, or at your
* option, any later version, incorporated herein by reference.
*
* ------------------------------------------------------------------------- */
#include <linux/module.h>
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/sched.h>
#include <linux/namei.h>
#include <linux/slab.h>
#include <linux/mount.h>
#include <linux/tty.h>
#include <linux/mutex.h>
#include <linux/magic.h>
#include <linux/idr.h>
#include <linux/devpts_fs.h>
#include <linux/parser.h>
#include <linux/fsnotify.h>
#include <linux/seq_file.h>
#define DEVPTS_DEFAULT_MODE 0600
/*
* ptmx is a new node in /dev/pts and will be unused in legacy (single-
* instance) mode. To prevent surprises in user space, set permissions of
* ptmx to 0. Use 'chmod' or remount with '-o ptmxmode' to set meaningful
* permissions.
*/
#define DEVPTS_DEFAULT_PTMX_MODE 0000
#define PTMX_MINOR 2
extern int pty_limit; /* Config limit on Unix98 ptys */
static DEFINE_MUTEX(allocated_ptys_lock);
static struct vfsmount *devpts_mnt;
struct pts_mount_opts {
int setuid;
int setgid;
uid_t uid;
gid_t gid;
umode_t mode;
umode_t ptmxmode;
int newinstance;
};
enum {
Opt_uid, Opt_gid, Opt_mode, Opt_ptmxmode, Opt_newinstance,
Opt_err
};
static const match_table_t tokens = {
{Opt_uid, "uid=%u"},
{Opt_gid, "gid=%u"},
{Opt_mode, "mode=%o"},
#ifdef CONFIG_DEVPTS_MULTIPLE_INSTANCES
{Opt_ptmxmode, "ptmxmode=%o"},
{Opt_newinstance, "newinstance"},
#endif
{Opt_err, NULL}
};
struct pts_fs_info {
struct ida allocated_ptys;
struct pts_mount_opts mount_opts;
struct dentry *ptmx_dentry;
};
static inline struct pts_fs_info *DEVPTS_SB(struct super_block *sb)
{
return sb->s_fs_info;
}
static inline struct super_block *pts_sb_from_inode(struct inode *inode)
{
#ifdef CONFIG_DEVPTS_MULTIPLE_INSTANCES
if (inode->i_sb->s_magic == DEVPTS_SUPER_MAGIC)
return inode->i_sb;
#endif
return devpts_mnt->mnt_sb;
}
#define PARSE_MOUNT 0
#define PARSE_REMOUNT 1
/*
* parse_mount_options():
* Set @opts to mount options specified in @data. If an option is not
* specified in @data, set it to its default value. The exception is
* 'newinstance' option which can only be set/cleared on a mount (i.e.
* cannot be changed during remount).
*
* Note: @data may be NULL (in which case all options are set to default).
*/
static int parse_mount_options(char *data, int op, struct pts_mount_opts *opts)
{
char *p;
opts->setuid = 0;
opts->setgid = 0;
opts->uid = 0;
opts->gid = 0;
opts->mode = DEVPTS_DEFAULT_MODE;
opts->ptmxmode = DEVPTS_DEFAULT_PTMX_MODE;
/* newinstance makes sense only on initial mount */
if (op == PARSE_MOUNT)
opts->newinstance = 0;
while ((p = strsep(&data, ",")) != NULL) {
substring_t args[MAX_OPT_ARGS];
int token;
int option;
if (!*p)
continue;
token = match_token(p, tokens, args);
switch (token) {
case Opt_uid:
if (match_int(&args[0], &option))
return -EINVAL;
opts->uid = option;
opts->setuid = 1;
break;
case Opt_gid:
if (match_int(&args[0], &option))
return -EINVAL;
opts->gid = option;
opts->setgid = 1;
break;
case Opt_mode:
if (match_octal(&args[0], &option))
return -EINVAL;
opts->mode = option & S_IALLUGO;
break;
#ifdef CONFIG_DEVPTS_MULTIPLE_INSTANCES
case Opt_ptmxmode:
if (match_octal(&args[0], &option))
return -EINVAL;
opts->ptmxmode = option & S_IALLUGO;
break;
case Opt_newinstance:
/* newinstance makes sense only on initial mount */
if (op == PARSE_MOUNT)
opts->newinstance = 1;
break;
#endif
default:
printk(KERN_ERR "devpts: called with bogus options\n");
return -EINVAL;
}
}
return 0;
}
#ifdef CONFIG_DEVPTS_MULTIPLE_INSTANCES
static int mknod_ptmx(struct super_block *sb)
{
int mode;
int rc = -ENOMEM;
struct dentry *dentry;
struct inode *inode;
struct dentry *root = sb->s_root;
struct pts_fs_info *fsi = DEVPTS_SB(sb);
struct pts_mount_opts *opts = &fsi->mount_opts;
mutex_lock(&root->d_inode->i_mutex);
/* If we have already created ptmx node, return */
if (fsi->ptmx_dentry) {
rc = 0;
goto out;
}
dentry = d_alloc_name(root, "ptmx");
if (!dentry) {
printk(KERN_NOTICE "Unable to alloc dentry for ptmx node\n");
goto out;
}
/*
* Create a new 'ptmx' node in this mount of devpts.
*/
inode = new_inode(sb);
if (!inode) {
printk(KERN_ERR "Unable to alloc inode for ptmx node\n");
dput(dentry);
goto out;
}
inode->i_ino = 2;
inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
mode = S_IFCHR|opts->ptmxmode;
init_special_inode(inode, mode, MKDEV(TTYAUX_MAJOR, 2));
d_add(dentry, inode);
fsi->ptmx_dentry = dentry;
rc = 0;
out:
mutex_unlock(&root->d_inode->i_mutex);
return rc;
}
static void update_ptmx_mode(struct pts_fs_info *fsi)
{
struct inode *inode;
if (fsi->ptmx_dentry) {
inode = fsi->ptmx_dentry->d_inode;
inode->i_mode = S_IFCHR|fsi->mount_opts.ptmxmode;
}
}
#else
static inline void update_ptmx_mode(struct pts_fs_info *fsi)
{
return;
}
#endif
static int devpts_remount(struct super_block *sb, int *flags, char *data)
{
int err;
struct pts_fs_info *fsi = DEVPTS_SB(sb);
struct pts_mount_opts *opts = &fsi->mount_opts;
err = parse_mount_options(data, PARSE_REMOUNT, opts);
/*
* parse_mount_options() restores options to default values
* before parsing and may have changed ptmxmode. So, update the
* mode in the inode too. Bogus options don't fail the remount,
* so do this even on error return.
*/
update_ptmx_mode(fsi);
return err;
}
static int devpts_show_options(struct seq_file *seq, struct vfsmount *vfs)
{
struct pts_fs_info *fsi = DEVPTS_SB(vfs->mnt_sb);
struct pts_mount_opts *opts = &fsi->mount_opts;
if (opts->setuid)
seq_printf(seq, ",uid=%u", opts->uid);
if (opts->setgid)
seq_printf(seq, ",gid=%u", opts->gid);
seq_printf(seq, ",mode=%03o", opts->mode);
#ifdef CONFIG_DEVPTS_MULTIPLE_INSTANCES
seq_printf(seq, ",ptmxmode=%03o", opts->ptmxmode);
#endif
return 0;
}
static const struct super_operations devpts_sops = {
.statfs = simple_statfs,
.remount_fs = devpts_remount,
.show_options = devpts_show_options,
};
static void *new_pts_fs_info(void)
{
struct pts_fs_info *fsi;
fsi = kzalloc(sizeof(struct pts_fs_info), GFP_KERNEL);
if (!fsi)
return NULL;
ida_init(&fsi->allocated_ptys);
fsi->mount_opts.mode = DEVPTS_DEFAULT_MODE;
fsi->mount_opts.ptmxmode = DEVPTS_DEFAULT_PTMX_MODE;
return fsi;
}
static int
devpts_fill_super(struct super_block *s, void *data, int silent)
{
struct inode *inode;
s->s_blocksize = 1024;
s->s_blocksize_bits = 10;
s->s_magic = DEVPTS_SUPER_MAGIC;
s->s_op = &devpts_sops;
s->s_time_gran = 1;
s->s_fs_info = new_pts_fs_info();
if (!s->s_fs_info)
goto fail;
inode = new_inode(s);
if (!inode)
goto free_fsi;
inode->i_ino = 1;
inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
inode->i_mode = S_IFDIR | S_IRUGO | S_IXUGO | S_IWUSR;
inode->i_op = &simple_dir_inode_operations;
inode->i_fop = &simple_dir_operations;
inode->i_nlink = 2;
s->s_root = d_alloc_root(inode);
if (s->s_root)
return 0;
printk(KERN_ERR "devpts: get root dentry failed\n");
iput(inode);
free_fsi:
kfree(s->s_fs_info);
fail:
return -ENOMEM;
}
#ifdef CONFIG_DEVPTS_MULTIPLE_INSTANCES
static int compare_init_pts_sb(struct super_block *s, void *p)
{
if (devpts_mnt)
return devpts_mnt->mnt_sb == s;
return 0;
}
/*
* devpts_mount()
*
* If the '-o newinstance' mount option was specified, mount a new
* (private) instance of devpts. PTYs created in this instance are
* independent of the PTYs in other devpts instances.
*
* If the '-o newinstance' option was not specified, mount/remount the
* initial kernel mount of devpts. This type of mount gives the
* legacy, single-instance semantics.
*
* The 'newinstance' option is needed to support multiple namespace
* semantics in devpts while preserving backward compatibility of the
* current 'single-namespace' semantics. i.e all mounts of devpts
* without the 'newinstance' mount option should bind to the initial
* kernel mount, like mount_single().
*
* Mounts with 'newinstance' option create a new, private namespace.
*
* NOTE:
*
* For single-mount semantics, devpts cannot use mount_single(),
* because mount_single()/sget() find and use the super-block from
* the most recent mount of devpts. But that recent mount may be a
* 'newinstance' mount and mount_single() would pick the newinstance
* super-block instead of the initial super-block.
*/
static struct dentry *devpts_mount(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data)
{
int error;
struct pts_mount_opts opts;
struct super_block *s;
error = parse_mount_options(data, PARSE_MOUNT, &opts);
if (error)
return ERR_PTR(error);
if (opts.newinstance)
s = sget(fs_type, NULL, set_anon_super, NULL);
else
s = sget(fs_type, compare_init_pts_sb, set_anon_super, NULL);
if (IS_ERR(s))
return ERR_CAST(s);
if (!s->s_root) {
s->s_flags = flags;
error = devpts_fill_super(s, data, flags & MS_SILENT ? 1 : 0);
if (error)
goto out_undo_sget;
s->s_flags |= MS_ACTIVE;
}
memcpy(&(DEVPTS_SB(s))->mount_opts, &opts, sizeof(opts));
error = mknod_ptmx(s);
if (error)
goto out_undo_sget;
return dget(s->s_root);
out_undo_sget:
deactivate_locked_super(s);
return ERR_PTR(error);
}
#else
/*
* This supports only the legacy single-instance semantics (no
* multiple-instance semantics)
*/
static struct dentry *devpts_mount(struct file_system_type *fs_type, int flags,
const char *dev_name, void *data)
{
return mount_single(fs_type, flags, data, devpts_fill_super);
}
#endif
static void devpts_kill_sb(struct super_block *sb)
{
struct pts_fs_info *fsi = DEVPTS_SB(sb);
kfree(fsi);
kill_litter_super(sb);
}
static struct file_system_type devpts_fs_type = {
.name = "devpts",
.mount = devpts_mount,
.kill_sb = devpts_kill_sb,
};
/*
* The normal naming convention is simply /dev/pts/<number>; this conforms
* to the System V naming convention
*/
int devpts_new_index(struct inode *ptmx_inode)
{
struct super_block *sb = pts_sb_from_inode(ptmx_inode);
struct pts_fs_info *fsi = DEVPTS_SB(sb);
int index;
int ida_ret;
retry:
if (!ida_pre_get(&fsi->allocated_ptys, GFP_KERNEL))
return -ENOMEM;
mutex_lock(&allocated_ptys_lock);
ida_ret = ida_get_new(&fsi->allocated_ptys, &index);
if (ida_ret < 0) {
mutex_unlock(&allocated_ptys_lock);
if (ida_ret == -EAGAIN)
goto retry;
return -EIO;
}
if (index >= pty_limit) {
ida_remove(&fsi->allocated_ptys, index);
mutex_unlock(&allocated_ptys_lock);
return -EIO;
}
mutex_unlock(&allocated_ptys_lock);
return index;
}
void devpts_kill_index(struct inode *ptmx_inode, int idx)
{
struct super_block *sb = pts_sb_from_inode(ptmx_inode);
struct pts_fs_info *fsi = DEVPTS_SB(sb);
mutex_lock(&allocated_ptys_lock);
ida_remove(&fsi->allocated_ptys, idx);
mutex_unlock(&allocated_ptys_lock);
}
int devpts_pty_new(struct inode *ptmx_inode, struct tty_struct *tty)
{
/* tty layer puts index from devpts_new_index() in here */
int number = tty->index;
struct tty_driver *driver = tty->driver;
dev_t device = MKDEV(driver->major, driver->minor_start+number);
struct dentry *dentry;
struct super_block *sb = pts_sb_from_inode(ptmx_inode);
struct inode *inode = new_inode(sb);
struct dentry *root = sb->s_root;
struct pts_fs_info *fsi = DEVPTS_SB(sb);
struct pts_mount_opts *opts = &fsi->mount_opts;
int ret = 0;
char s[12];
/* We're supposed to be given the slave end of a pty */
BUG_ON(driver->type != TTY_DRIVER_TYPE_PTY);
BUG_ON(driver->subtype != PTY_TYPE_SLAVE);
if (!inode)
return -ENOMEM;
inode->i_ino = number + 3;
inode->i_uid = opts->setuid ? opts->uid : current_fsuid();
inode->i_gid = opts->setgid ? opts->gid : current_fsgid();
inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
init_special_inode(inode, S_IFCHR|opts->mode, device);
inode->i_private = tty;
tty->driver_data = inode;
sprintf(s, "%d", number);
mutex_lock(&root->d_inode->i_mutex);
dentry = d_alloc_name(root, s);
if (dentry) {
d_add(dentry, inode);
fsnotify_create(root->d_inode, dentry);
} else {
iput(inode);
ret = -ENOMEM;
}
mutex_unlock(&root->d_inode->i_mutex);
return ret;
}
struct tty_struct *devpts_get_tty(struct inode *pts_inode, int number)
{
struct dentry *dentry;
struct tty_struct *tty;
BUG_ON(pts_inode->i_rdev == MKDEV(TTYAUX_MAJOR, PTMX_MINOR));
/* Ensure dentry has not been deleted by devpts_pty_kill() */
dentry = d_find_alias(pts_inode);
if (!dentry)
return NULL;
tty = NULL;
if (pts_inode->i_sb->s_magic == DEVPTS_SUPER_MAGIC)
tty = (struct tty_struct *)pts_inode->i_private;
dput(dentry);
return tty;
}
void devpts_pty_kill(struct tty_struct *tty)
{
struct inode *inode = tty->driver_data;
struct super_block *sb = pts_sb_from_inode(inode);
struct dentry *root = sb->s_root;
struct dentry *dentry;
BUG_ON(inode->i_rdev == MKDEV(TTYAUX_MAJOR, PTMX_MINOR));
mutex_lock(&root->d_inode->i_mutex);
dentry = d_find_alias(inode);
inode->i_nlink--;
d_delete(dentry);
dput(dentry); /* d_alloc_name() in devpts_pty_new() */
dput(dentry); /* d_find_alias above */
mutex_unlock(&root->d_inode->i_mutex);
}
static int __init init_devpts_fs(void)
{
int err = register_filesystem(&devpts_fs_type);
if (!err) {
devpts_mnt = kern_mount(&devpts_fs_type);
if (IS_ERR(devpts_mnt)) {
err = PTR_ERR(devpts_mnt);
unregister_filesystem(&devpts_fs_type);
}
}
return err;
}
module_init(init_devpts_fs)
| gpl-2.0 |
faux123/asus-tf101-hc-kernel | drivers/net/wan/sealevel.c | 3625 | 8142 | /*
* Sealevel Systems 4021 driver.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* (c) Copyright 1999, 2001 Alan Cox
* (c) Copyright 2001 Red Hat Inc.
* Generic HDLC port Copyright (C) 2008 Krzysztof Halasa <khc@pm.waw.pl>
*
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/net.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/if_arp.h>
#include <linux/delay.h>
#include <linux/hdlc.h>
#include <linux/ioport.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <net/arp.h>
#include <asm/irq.h>
#include <asm/io.h>
#include <asm/dma.h>
#include <asm/byteorder.h>
#include "z85230.h"
struct slvl_device
{
struct z8530_channel *chan;
int channel;
};
struct slvl_board
{
struct slvl_device dev[2];
struct z8530_dev board;
int iobase;
};
/*
* Network driver support routines
*/
static inline struct slvl_device* dev_to_chan(struct net_device *dev)
{
return (struct slvl_device *)dev_to_hdlc(dev)->priv;
}
/*
* Frame receive. Simple for our card as we do HDLC and there
* is no funny garbage involved
*/
static void sealevel_input(struct z8530_channel *c, struct sk_buff *skb)
{
/* Drop the CRC - it's not a good idea to try and negotiate it ;) */
skb_trim(skb, skb->len - 2);
skb->protocol = hdlc_type_trans(skb, c->netdevice);
skb_reset_mac_header(skb);
skb->dev = c->netdevice;
netif_rx(skb);
}
/*
* We've been placed in the UP state
*/
static int sealevel_open(struct net_device *d)
{
struct slvl_device *slvl = dev_to_chan(d);
int err = -1;
int unit = slvl->channel;
/*
* Link layer up.
*/
switch (unit) {
case 0:
err = z8530_sync_dma_open(d, slvl->chan);
break;
case 1:
err = z8530_sync_open(d, slvl->chan);
break;
}
if (err)
return err;
err = hdlc_open(d);
if (err) {
switch (unit) {
case 0:
z8530_sync_dma_close(d, slvl->chan);
break;
case 1:
z8530_sync_close(d, slvl->chan);
break;
}
return err;
}
slvl->chan->rx_function = sealevel_input;
/*
* Go go go
*/
netif_start_queue(d);
return 0;
}
static int sealevel_close(struct net_device *d)
{
struct slvl_device *slvl = dev_to_chan(d);
int unit = slvl->channel;
/*
* Discard new frames
*/
slvl->chan->rx_function = z8530_null_rx;
hdlc_close(d);
netif_stop_queue(d);
switch (unit) {
case 0:
z8530_sync_dma_close(d, slvl->chan);
break;
case 1:
z8530_sync_close(d, slvl->chan);
break;
}
return 0;
}
static int sealevel_ioctl(struct net_device *d, struct ifreq *ifr, int cmd)
{
/* struct slvl_device *slvl=dev_to_chan(d);
z8530_ioctl(d,&slvl->sync.chanA,ifr,cmd) */
return hdlc_ioctl(d, ifr, cmd);
}
/*
* Passed network frames, fire them downwind.
*/
static netdev_tx_t sealevel_queue_xmit(struct sk_buff *skb,
struct net_device *d)
{
return z8530_queue_xmit(dev_to_chan(d)->chan, skb);
}
static int sealevel_attach(struct net_device *dev, unsigned short encoding,
unsigned short parity)
{
if (encoding == ENCODING_NRZ && parity == PARITY_CRC16_PR1_CCITT)
return 0;
return -EINVAL;
}
static const struct net_device_ops sealevel_ops = {
.ndo_open = sealevel_open,
.ndo_stop = sealevel_close,
.ndo_change_mtu = hdlc_change_mtu,
.ndo_start_xmit = hdlc_start_xmit,
.ndo_do_ioctl = sealevel_ioctl,
};
static int slvl_setup(struct slvl_device *sv, int iobase, int irq)
{
struct net_device *dev = alloc_hdlcdev(sv);
if (!dev)
return -1;
dev_to_hdlc(dev)->attach = sealevel_attach;
dev_to_hdlc(dev)->xmit = sealevel_queue_xmit;
dev->netdev_ops = &sealevel_ops;
dev->base_addr = iobase;
dev->irq = irq;
if (register_hdlc_device(dev)) {
printk(KERN_ERR "sealevel: unable to register HDLC device\n");
free_netdev(dev);
return -1;
}
sv->chan->netdevice = dev;
return 0;
}
/*
* Allocate and setup Sealevel board.
*/
static __init struct slvl_board *slvl_init(int iobase, int irq,
int txdma, int rxdma, int slow)
{
struct z8530_dev *dev;
struct slvl_board *b;
/*
* Get the needed I/O space
*/
if (!request_region(iobase, 8, "Sealevel 4021")) {
printk(KERN_WARNING "sealevel: I/O 0x%X already in use.\n",
iobase);
return NULL;
}
b = kzalloc(sizeof(struct slvl_board), GFP_KERNEL);
if (!b)
goto err_kzalloc;
b->dev[0].chan = &b->board.chanA;
b->dev[0].channel = 0;
b->dev[1].chan = &b->board.chanB;
b->dev[1].channel = 1;
dev = &b->board;
/*
* Stuff in the I/O addressing
*/
dev->active = 0;
b->iobase = iobase;
/*
* Select 8530 delays for the old board
*/
if (slow)
iobase |= Z8530_PORT_SLEEP;
dev->chanA.ctrlio = iobase + 1;
dev->chanA.dataio = iobase;
dev->chanB.ctrlio = iobase + 3;
dev->chanB.dataio = iobase + 2;
dev->chanA.irqs = &z8530_nop;
dev->chanB.irqs = &z8530_nop;
/*
* Assert DTR enable DMA
*/
outb(3 | (1 << 7), b->iobase + 4);
/* We want a fast IRQ for this device. Actually we'd like an even faster
IRQ ;) - This is one driver RtLinux is made for */
if (request_irq(irq, z8530_interrupt, IRQF_DISABLED,
"SeaLevel", dev) < 0) {
printk(KERN_WARNING "sealevel: IRQ %d already in use.\n", irq);
goto err_request_irq;
}
dev->irq = irq;
dev->chanA.private = &b->dev[0];
dev->chanB.private = &b->dev[1];
dev->chanA.dev = dev;
dev->chanB.dev = dev;
dev->chanA.txdma = 3;
dev->chanA.rxdma = 1;
if (request_dma(dev->chanA.txdma, "SeaLevel (TX)"))
goto err_dma_tx;
if (request_dma(dev->chanA.rxdma, "SeaLevel (RX)"))
goto err_dma_rx;
disable_irq(irq);
/*
* Begin normal initialise
*/
if (z8530_init(dev) != 0) {
printk(KERN_ERR "Z8530 series device not found.\n");
enable_irq(irq);
goto free_hw;
}
if (dev->type == Z85C30) {
z8530_channel_load(&dev->chanA, z8530_hdlc_kilostream);
z8530_channel_load(&dev->chanB, z8530_hdlc_kilostream);
} else {
z8530_channel_load(&dev->chanA, z8530_hdlc_kilostream_85230);
z8530_channel_load(&dev->chanB, z8530_hdlc_kilostream_85230);
}
/*
* Now we can take the IRQ
*/
enable_irq(irq);
if (slvl_setup(&b->dev[0], iobase, irq))
goto free_hw;
if (slvl_setup(&b->dev[1], iobase, irq))
goto free_netdev0;
z8530_describe(dev, "I/O", iobase);
dev->active = 1;
return b;
free_netdev0:
unregister_hdlc_device(b->dev[0].chan->netdevice);
free_netdev(b->dev[0].chan->netdevice);
free_hw:
free_dma(dev->chanA.rxdma);
err_dma_rx:
free_dma(dev->chanA.txdma);
err_dma_tx:
free_irq(irq, dev);
err_request_irq:
kfree(b);
err_kzalloc:
release_region(iobase, 8);
return NULL;
}
static void __exit slvl_shutdown(struct slvl_board *b)
{
int u;
z8530_shutdown(&b->board);
for (u = 0; u < 2; u++) {
struct net_device *d = b->dev[u].chan->netdevice;
unregister_hdlc_device(d);
free_netdev(d);
}
free_irq(b->board.irq, &b->board);
free_dma(b->board.chanA.rxdma);
free_dma(b->board.chanA.txdma);
/* DMA off on the card, drop DTR */
outb(0, b->iobase);
release_region(b->iobase, 8);
kfree(b);
}
static int io=0x238;
static int txdma=1;
static int rxdma=3;
static int irq=5;
static int slow=0;
module_param(io, int, 0);
MODULE_PARM_DESC(io, "The I/O base of the Sealevel card");
module_param(txdma, int, 0);
MODULE_PARM_DESC(txdma, "Transmit DMA channel");
module_param(rxdma, int, 0);
MODULE_PARM_DESC(rxdma, "Receive DMA channel");
module_param(irq, int, 0);
MODULE_PARM_DESC(irq, "The interrupt line setting for the SeaLevel card");
module_param(slow, bool, 0);
MODULE_PARM_DESC(slow, "Set this for an older Sealevel card such as the 4012");
MODULE_AUTHOR("Alan Cox");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Modular driver for the SeaLevel 4021");
static struct slvl_board *slvl_unit;
static int __init slvl_init_module(void)
{
slvl_unit = slvl_init(io, irq, txdma, rxdma, slow);
return slvl_unit ? 0 : -ENODEV;
}
static void __exit slvl_cleanup_module(void)
{
if (slvl_unit)
slvl_shutdown(slvl_unit);
}
module_init(slvl_init_module);
module_exit(slvl_cleanup_module);
| gpl-2.0 |
Snuzzo/vigor_mofokernel | arch/powerpc/platforms/pseries/firmware.c | 7721 | 2613 | /*
* pSeries firmware setup code.
*
* Portions from arch/powerpc/platforms/pseries/setup.c:
* Copyright (C) 1995 Linus Torvalds
* Adapted from 'alpha' version by Gary Thomas
* Modified by Cort Dougan (cort@cs.nmt.edu)
* Modified by PPC64 Team, IBM Corp
*
* Portions from arch/powerpc/kernel/firmware.c
* Copyright (C) 2001 Ben. Herrenschmidt (benh@kernel.crashing.org)
* Modifications for ppc64:
* Copyright (C) 2003 Dave Engebretsen <engebret@us.ibm.com>
* Copyright (C) 2005 Stephen Rothwell, IBM Corporation
*
* Copyright 2006 IBM Corporation.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <asm/firmware.h>
#include <asm/prom.h>
#include <asm/udbg.h>
#include "pseries.h"
typedef struct {
unsigned long val;
char * name;
} firmware_feature_t;
static __initdata firmware_feature_t
firmware_features_table[FIRMWARE_MAX_FEATURES] = {
{FW_FEATURE_PFT, "hcall-pft"},
{FW_FEATURE_TCE, "hcall-tce"},
{FW_FEATURE_SPRG0, "hcall-sprg0"},
{FW_FEATURE_DABR, "hcall-dabr"},
{FW_FEATURE_COPY, "hcall-copy"},
{FW_FEATURE_ASR, "hcall-asr"},
{FW_FEATURE_DEBUG, "hcall-debug"},
{FW_FEATURE_PERF, "hcall-perf"},
{FW_FEATURE_DUMP, "hcall-dump"},
{FW_FEATURE_INTERRUPT, "hcall-interrupt"},
{FW_FEATURE_MIGRATE, "hcall-migrate"},
{FW_FEATURE_PERFMON, "hcall-perfmon"},
{FW_FEATURE_CRQ, "hcall-crq"},
{FW_FEATURE_VIO, "hcall-vio"},
{FW_FEATURE_RDMA, "hcall-rdma"},
{FW_FEATURE_LLAN, "hcall-lLAN"},
{FW_FEATURE_BULK_REMOVE, "hcall-bulk"},
{FW_FEATURE_XDABR, "hcall-xdabr"},
{FW_FEATURE_MULTITCE, "hcall-multi-tce"},
{FW_FEATURE_SPLPAR, "hcall-splpar"},
{FW_FEATURE_VPHN, "hcall-vphn"},
};
/* Build up the firmware features bitmask using the contents of
* device-tree/ibm,hypertas-functions. Ultimately this functionality may
* be moved into prom.c prom_init().
*/
void __init fw_feature_init(const char *hypertas, unsigned long len)
{
const char *s;
int i;
pr_debug(" -> fw_feature_init()\n");
for (s = hypertas; s < hypertas + len; s += strlen(s) + 1) {
for (i = 0; i < FIRMWARE_MAX_FEATURES; i++) {
/* check value against table of strings */
if (!firmware_features_table[i].name ||
strcmp(firmware_features_table[i].name, s))
continue;
/* we have a match */
powerpc_firmware_features |=
firmware_features_table[i].val;
break;
}
}
pr_debug(" <- fw_feature_init()\n");
}
| gpl-2.0 |
TeamExodus/kernel_lge_hammerhead | fs/jffs2/gc.c | 8745 | 43976 | /*
* JFFS2 -- Journalling Flash File System, Version 2.
*
* Copyright © 2001-2007 Red Hat, Inc.
* Copyright © 2004-2010 David Woodhouse <dwmw2@infradead.org>
*
* Created by David Woodhouse <dwmw2@infradead.org>
*
* For licensing information, see the file 'LICENCE' in this directory.
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/mtd/mtd.h>
#include <linux/slab.h>
#include <linux/pagemap.h>
#include <linux/crc32.h>
#include <linux/compiler.h>
#include <linux/stat.h>
#include "nodelist.h"
#include "compr.h"
static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c,
struct jffs2_inode_cache *ic,
struct jffs2_raw_node_ref *raw);
static int jffs2_garbage_collect_metadata(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
struct jffs2_inode_info *f, struct jffs2_full_dnode *fd);
static int jffs2_garbage_collect_dirent(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
struct jffs2_inode_info *f, struct jffs2_full_dirent *fd);
static int jffs2_garbage_collect_deletion_dirent(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
struct jffs2_inode_info *f, struct jffs2_full_dirent *fd);
static int jffs2_garbage_collect_hole(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
struct jffs2_inode_info *f, struct jffs2_full_dnode *fn,
uint32_t start, uint32_t end);
static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
struct jffs2_inode_info *f, struct jffs2_full_dnode *fn,
uint32_t start, uint32_t end);
static int jffs2_garbage_collect_live(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
struct jffs2_raw_node_ref *raw, struct jffs2_inode_info *f);
/* Called with erase_completion_lock held */
static struct jffs2_eraseblock *jffs2_find_gc_block(struct jffs2_sb_info *c)
{
struct jffs2_eraseblock *ret;
struct list_head *nextlist = NULL;
int n = jiffies % 128;
/* Pick an eraseblock to garbage collect next. This is where we'll
put the clever wear-levelling algorithms. Eventually. */
/* We possibly want to favour the dirtier blocks more when the
number of free blocks is low. */
again:
if (!list_empty(&c->bad_used_list) && c->nr_free_blocks > c->resv_blocks_gcbad) {
jffs2_dbg(1, "Picking block from bad_used_list to GC next\n");
nextlist = &c->bad_used_list;
} else if (n < 50 && !list_empty(&c->erasable_list)) {
/* Note that most of them will have gone directly to be erased.
So don't favour the erasable_list _too_ much. */
jffs2_dbg(1, "Picking block from erasable_list to GC next\n");
nextlist = &c->erasable_list;
} else if (n < 110 && !list_empty(&c->very_dirty_list)) {
/* Most of the time, pick one off the very_dirty list */
jffs2_dbg(1, "Picking block from very_dirty_list to GC next\n");
nextlist = &c->very_dirty_list;
} else if (n < 126 && !list_empty(&c->dirty_list)) {
jffs2_dbg(1, "Picking block from dirty_list to GC next\n");
nextlist = &c->dirty_list;
} else if (!list_empty(&c->clean_list)) {
jffs2_dbg(1, "Picking block from clean_list to GC next\n");
nextlist = &c->clean_list;
} else if (!list_empty(&c->dirty_list)) {
jffs2_dbg(1, "Picking block from dirty_list to GC next (clean_list was empty)\n");
nextlist = &c->dirty_list;
} else if (!list_empty(&c->very_dirty_list)) {
jffs2_dbg(1, "Picking block from very_dirty_list to GC next (clean_list and dirty_list were empty)\n");
nextlist = &c->very_dirty_list;
} else if (!list_empty(&c->erasable_list)) {
jffs2_dbg(1, "Picking block from erasable_list to GC next (clean_list and {very_,}dirty_list were empty)\n");
nextlist = &c->erasable_list;
} else if (!list_empty(&c->erasable_pending_wbuf_list)) {
/* There are blocks are wating for the wbuf sync */
jffs2_dbg(1, "Synching wbuf in order to reuse erasable_pending_wbuf_list blocks\n");
spin_unlock(&c->erase_completion_lock);
jffs2_flush_wbuf_pad(c);
spin_lock(&c->erase_completion_lock);
goto again;
} else {
/* Eep. All were empty */
jffs2_dbg(1, "No clean, dirty _or_ erasable blocks to GC from! Where are they all?\n");
return NULL;
}
ret = list_entry(nextlist->next, struct jffs2_eraseblock, list);
list_del(&ret->list);
c->gcblock = ret;
ret->gc_node = ret->first_node;
if (!ret->gc_node) {
pr_warn("Eep. ret->gc_node for block at 0x%08x is NULL\n",
ret->offset);
BUG();
}
/* Have we accidentally picked a clean block with wasted space ? */
if (ret->wasted_size) {
jffs2_dbg(1, "Converting wasted_size %08x to dirty_size\n",
ret->wasted_size);
ret->dirty_size += ret->wasted_size;
c->wasted_size -= ret->wasted_size;
c->dirty_size += ret->wasted_size;
ret->wasted_size = 0;
}
return ret;
}
/* jffs2_garbage_collect_pass
* Make a single attempt to progress GC. Move one node, and possibly
* start erasing one eraseblock.
*/
int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
{
struct jffs2_inode_info *f;
struct jffs2_inode_cache *ic;
struct jffs2_eraseblock *jeb;
struct jffs2_raw_node_ref *raw;
uint32_t gcblock_dirty;
int ret = 0, inum, nlink;
int xattr = 0;
if (mutex_lock_interruptible(&c->alloc_sem))
return -EINTR;
for (;;) {
spin_lock(&c->erase_completion_lock);
if (!c->unchecked_size)
break;
/* We can't start doing GC yet. We haven't finished checking
the node CRCs etc. Do it now. */
/* checked_ino is protected by the alloc_sem */
if (c->checked_ino > c->highest_ino && xattr) {
pr_crit("Checked all inodes but still 0x%x bytes of unchecked space?\n",
c->unchecked_size);
jffs2_dbg_dump_block_lists_nolock(c);
spin_unlock(&c->erase_completion_lock);
mutex_unlock(&c->alloc_sem);
return -ENOSPC;
}
spin_unlock(&c->erase_completion_lock);
if (!xattr)
xattr = jffs2_verify_xattr(c);
spin_lock(&c->inocache_lock);
ic = jffs2_get_ino_cache(c, c->checked_ino++);
if (!ic) {
spin_unlock(&c->inocache_lock);
continue;
}
if (!ic->pino_nlink) {
jffs2_dbg(1, "Skipping check of ino #%d with nlink/pino zero\n",
ic->ino);
spin_unlock(&c->inocache_lock);
jffs2_xattr_delete_inode(c, ic);
continue;
}
switch(ic->state) {
case INO_STATE_CHECKEDABSENT:
case INO_STATE_PRESENT:
jffs2_dbg(1, "Skipping ino #%u already checked\n",
ic->ino);
spin_unlock(&c->inocache_lock);
continue;
case INO_STATE_GC:
case INO_STATE_CHECKING:
pr_warn("Inode #%u is in state %d during CRC check phase!\n",
ic->ino, ic->state);
spin_unlock(&c->inocache_lock);
BUG();
case INO_STATE_READING:
/* We need to wait for it to finish, lest we move on
and trigger the BUG() above while we haven't yet
finished checking all its nodes */
jffs2_dbg(1, "Waiting for ino #%u to finish reading\n",
ic->ino);
/* We need to come back again for the _same_ inode. We've
made no progress in this case, but that should be OK */
c->checked_ino--;
mutex_unlock(&c->alloc_sem);
sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock);
return 0;
default:
BUG();
case INO_STATE_UNCHECKED:
;
}
ic->state = INO_STATE_CHECKING;
spin_unlock(&c->inocache_lock);
jffs2_dbg(1, "%s(): triggering inode scan of ino#%u\n",
__func__, ic->ino);
ret = jffs2_do_crccheck_inode(c, ic);
if (ret)
pr_warn("Returned error for crccheck of ino #%u. Expect badness...\n",
ic->ino);
jffs2_set_inocache_state(c, ic, INO_STATE_CHECKEDABSENT);
mutex_unlock(&c->alloc_sem);
return ret;
}
/* If there are any blocks which need erasing, erase them now */
if (!list_empty(&c->erase_complete_list) ||
!list_empty(&c->erase_pending_list)) {
spin_unlock(&c->erase_completion_lock);
mutex_unlock(&c->alloc_sem);
jffs2_dbg(1, "%s(): erasing pending blocks\n", __func__);
if (jffs2_erase_pending_blocks(c, 1))
return 0;
jffs2_dbg(1, "No progress from erasing block; doing GC anyway\n");
mutex_lock(&c->alloc_sem);
spin_lock(&c->erase_completion_lock);
}
/* First, work out which block we're garbage-collecting */
jeb = c->gcblock;
if (!jeb)
jeb = jffs2_find_gc_block(c);
if (!jeb) {
/* Couldn't find a free block. But maybe we can just erase one and make 'progress'? */
if (c->nr_erasing_blocks) {
spin_unlock(&c->erase_completion_lock);
mutex_unlock(&c->alloc_sem);
return -EAGAIN;
}
jffs2_dbg(1, "Couldn't find erase block to garbage collect!\n");
spin_unlock(&c->erase_completion_lock);
mutex_unlock(&c->alloc_sem);
return -EIO;
}
jffs2_dbg(1, "GC from block %08x, used_size %08x, dirty_size %08x, free_size %08x\n",
jeb->offset, jeb->used_size, jeb->dirty_size, jeb->free_size);
D1(if (c->nextblock)
printk(KERN_DEBUG "Nextblock at %08x, used_size %08x, dirty_size %08x, wasted_size %08x, free_size %08x\n", c->nextblock->offset, c->nextblock->used_size, c->nextblock->dirty_size, c->nextblock->wasted_size, c->nextblock->free_size));
if (!jeb->used_size) {
mutex_unlock(&c->alloc_sem);
goto eraseit;
}
raw = jeb->gc_node;
gcblock_dirty = jeb->dirty_size;
while(ref_obsolete(raw)) {
jffs2_dbg(1, "Node at 0x%08x is obsolete... skipping\n",
ref_offset(raw));
raw = ref_next(raw);
if (unlikely(!raw)) {
pr_warn("eep. End of raw list while still supposedly nodes to GC\n");
pr_warn("erase block at 0x%08x. free_size 0x%08x, dirty_size 0x%08x, used_size 0x%08x\n",
jeb->offset, jeb->free_size,
jeb->dirty_size, jeb->used_size);
jeb->gc_node = raw;
spin_unlock(&c->erase_completion_lock);
mutex_unlock(&c->alloc_sem);
BUG();
}
}
jeb->gc_node = raw;
jffs2_dbg(1, "Going to garbage collect node at 0x%08x\n",
ref_offset(raw));
if (!raw->next_in_ino) {
/* Inode-less node. Clean marker, snapshot or something like that */
spin_unlock(&c->erase_completion_lock);
if (ref_flags(raw) == REF_PRISTINE) {
/* It's an unknown node with JFFS2_FEATURE_RWCOMPAT_COPY */
jffs2_garbage_collect_pristine(c, NULL, raw);
} else {
/* Just mark it obsolete */
jffs2_mark_node_obsolete(c, raw);
}
mutex_unlock(&c->alloc_sem);
goto eraseit_lock;
}
ic = jffs2_raw_ref_to_ic(raw);
#ifdef CONFIG_JFFS2_FS_XATTR
/* When 'ic' refers xattr_datum/xattr_ref, this node is GCed as xattr.
* We can decide whether this node is inode or xattr by ic->class. */
if (ic->class == RAWNODE_CLASS_XATTR_DATUM
|| ic->class == RAWNODE_CLASS_XATTR_REF) {
spin_unlock(&c->erase_completion_lock);
if (ic->class == RAWNODE_CLASS_XATTR_DATUM) {
ret = jffs2_garbage_collect_xattr_datum(c, (struct jffs2_xattr_datum *)ic, raw);
} else {
ret = jffs2_garbage_collect_xattr_ref(c, (struct jffs2_xattr_ref *)ic, raw);
}
goto test_gcnode;
}
#endif
/* We need to hold the inocache. Either the erase_completion_lock or
the inocache_lock are sufficient; we trade down since the inocache_lock
causes less contention. */
spin_lock(&c->inocache_lock);
spin_unlock(&c->erase_completion_lock);
jffs2_dbg(1, "%s(): collecting from block @0x%08x. Node @0x%08x(%d), ino #%u\n",
__func__, jeb->offset, ref_offset(raw), ref_flags(raw),
ic->ino);
/* Three possibilities:
1. Inode is already in-core. We must iget it and do proper
updating to its fragtree, etc.
2. Inode is not in-core, node is REF_PRISTINE. We lock the
inocache to prevent a read_inode(), copy the node intact.
3. Inode is not in-core, node is not pristine. We must iget()
and take the slow path.
*/
switch(ic->state) {
case INO_STATE_CHECKEDABSENT:
/* It's been checked, but it's not currently in-core.
We can just copy any pristine nodes, but have
to prevent anyone else from doing read_inode() while
we're at it, so we set the state accordingly */
if (ref_flags(raw) == REF_PRISTINE)
ic->state = INO_STATE_GC;
else {
jffs2_dbg(1, "Ino #%u is absent but node not REF_PRISTINE. Reading.\n",
ic->ino);
}
break;
case INO_STATE_PRESENT:
/* It's in-core. GC must iget() it. */
break;
case INO_STATE_UNCHECKED:
case INO_STATE_CHECKING:
case INO_STATE_GC:
/* Should never happen. We should have finished checking
by the time we actually start doing any GC, and since
we're holding the alloc_sem, no other garbage collection
can happen.
*/
pr_crit("Inode #%u already in state %d in jffs2_garbage_collect_pass()!\n",
ic->ino, ic->state);
mutex_unlock(&c->alloc_sem);
spin_unlock(&c->inocache_lock);
BUG();
case INO_STATE_READING:
/* Someone's currently trying to read it. We must wait for
them to finish and then go through the full iget() route
to do the GC. However, sometimes read_inode() needs to get
the alloc_sem() (for marking nodes invalid) so we must
drop the alloc_sem before sleeping. */
mutex_unlock(&c->alloc_sem);
jffs2_dbg(1, "%s(): waiting for ino #%u in state %d\n",
__func__, ic->ino, ic->state);
sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock);
/* And because we dropped the alloc_sem we must start again from the
beginning. Ponder chance of livelock here -- we're returning success
without actually making any progress.
Q: What are the chances that the inode is back in INO_STATE_READING
again by the time we next enter this function? And that this happens
enough times to cause a real delay?
A: Small enough that I don't care :)
*/
return 0;
}
/* OK. Now if the inode is in state INO_STATE_GC, we are going to copy the
node intact, and we don't have to muck about with the fragtree etc.
because we know it's not in-core. If it _was_ in-core, we go through
all the iget() crap anyway */
if (ic->state == INO_STATE_GC) {
spin_unlock(&c->inocache_lock);
ret = jffs2_garbage_collect_pristine(c, ic, raw);
spin_lock(&c->inocache_lock);
ic->state = INO_STATE_CHECKEDABSENT;
wake_up(&c->inocache_wq);
if (ret != -EBADFD) {
spin_unlock(&c->inocache_lock);
goto test_gcnode;
}
/* Fall through if it wanted us to, with inocache_lock held */
}
/* Prevent the fairly unlikely race where the gcblock is
entirely obsoleted by the final close of a file which had
the only valid nodes in the block, followed by erasure,
followed by freeing of the ic because the erased block(s)
held _all_ the nodes of that inode.... never been seen but
it's vaguely possible. */
inum = ic->ino;
nlink = ic->pino_nlink;
spin_unlock(&c->inocache_lock);
f = jffs2_gc_fetch_inode(c, inum, !nlink);
if (IS_ERR(f)) {
ret = PTR_ERR(f);
goto release_sem;
}
if (!f) {
ret = 0;
goto release_sem;
}
ret = jffs2_garbage_collect_live(c, jeb, raw, f);
jffs2_gc_release_inode(c, f);
test_gcnode:
if (jeb->dirty_size == gcblock_dirty && !ref_obsolete(jeb->gc_node)) {
/* Eep. This really should never happen. GC is broken */
pr_err("Error garbage collecting node at %08x!\n",
ref_offset(jeb->gc_node));
ret = -ENOSPC;
}
release_sem:
mutex_unlock(&c->alloc_sem);
eraseit_lock:
/* If we've finished this block, start it erasing */
spin_lock(&c->erase_completion_lock);
eraseit:
if (c->gcblock && !c->gcblock->used_size) {
jffs2_dbg(1, "Block at 0x%08x completely obsoleted by GC. Moving to erase_pending_list\n",
c->gcblock->offset);
/* We're GC'ing an empty block? */
list_add_tail(&c->gcblock->list, &c->erase_pending_list);
c->gcblock = NULL;
c->nr_erasing_blocks++;
jffs2_garbage_collect_trigger(c);
}
spin_unlock(&c->erase_completion_lock);
return ret;
}
static int jffs2_garbage_collect_live(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
struct jffs2_raw_node_ref *raw, struct jffs2_inode_info *f)
{
struct jffs2_node_frag *frag;
struct jffs2_full_dnode *fn = NULL;
struct jffs2_full_dirent *fd;
uint32_t start = 0, end = 0, nrfrags = 0;
int ret = 0;
mutex_lock(&f->sem);
/* Now we have the lock for this inode. Check that it's still the one at the head
of the list. */
spin_lock(&c->erase_completion_lock);
if (c->gcblock != jeb) {
spin_unlock(&c->erase_completion_lock);
jffs2_dbg(1, "GC block is no longer gcblock. Restart\n");
goto upnout;
}
if (ref_obsolete(raw)) {
spin_unlock(&c->erase_completion_lock);
jffs2_dbg(1, "node to be GC'd was obsoleted in the meantime.\n");
/* They'll call again */
goto upnout;
}
spin_unlock(&c->erase_completion_lock);
/* OK. Looks safe. And nobody can get us now because we have the semaphore. Move the block */
if (f->metadata && f->metadata->raw == raw) {
fn = f->metadata;
ret = jffs2_garbage_collect_metadata(c, jeb, f, fn);
goto upnout;
}
/* FIXME. Read node and do lookup? */
for (frag = frag_first(&f->fragtree); frag; frag = frag_next(frag)) {
if (frag->node && frag->node->raw == raw) {
fn = frag->node;
end = frag->ofs + frag->size;
if (!nrfrags++)
start = frag->ofs;
if (nrfrags == frag->node->frags)
break; /* We've found them all */
}
}
if (fn) {
if (ref_flags(raw) == REF_PRISTINE) {
ret = jffs2_garbage_collect_pristine(c, f->inocache, raw);
if (!ret) {
/* Urgh. Return it sensibly. */
frag->node->raw = f->inocache->nodes;
}
if (ret != -EBADFD)
goto upnout;
}
/* We found a datanode. Do the GC */
if((start >> PAGE_CACHE_SHIFT) < ((end-1) >> PAGE_CACHE_SHIFT)) {
/* It crosses a page boundary. Therefore, it must be a hole. */
ret = jffs2_garbage_collect_hole(c, jeb, f, fn, start, end);
} else {
/* It could still be a hole. But we GC the page this way anyway */
ret = jffs2_garbage_collect_dnode(c, jeb, f, fn, start, end);
}
goto upnout;
}
/* Wasn't a dnode. Try dirent */
for (fd = f->dents; fd; fd=fd->next) {
if (fd->raw == raw)
break;
}
if (fd && fd->ino) {
ret = jffs2_garbage_collect_dirent(c, jeb, f, fd);
} else if (fd) {
ret = jffs2_garbage_collect_deletion_dirent(c, jeb, f, fd);
} else {
pr_warn("Raw node at 0x%08x wasn't in node lists for ino #%u\n",
ref_offset(raw), f->inocache->ino);
if (ref_obsolete(raw)) {
pr_warn("But it's obsolete so we don't mind too much\n");
} else {
jffs2_dbg_dump_node(c, ref_offset(raw));
BUG();
}
}
upnout:
mutex_unlock(&f->sem);
return ret;
}
static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c,
struct jffs2_inode_cache *ic,
struct jffs2_raw_node_ref *raw)
{
union jffs2_node_union *node;
size_t retlen;
int ret;
uint32_t phys_ofs, alloclen;
uint32_t crc, rawlen;
int retried = 0;
jffs2_dbg(1, "Going to GC REF_PRISTINE node at 0x%08x\n",
ref_offset(raw));
alloclen = rawlen = ref_totlen(c, c->gcblock, raw);
/* Ask for a small amount of space (or the totlen if smaller) because we
don't want to force wastage of the end of a block if splitting would
work. */
if (ic && alloclen > sizeof(struct jffs2_raw_inode) + JFFS2_MIN_DATA_LEN)
alloclen = sizeof(struct jffs2_raw_inode) + JFFS2_MIN_DATA_LEN;
ret = jffs2_reserve_space_gc(c, alloclen, &alloclen, rawlen);
/* 'rawlen' is not the exact summary size; it is only an upper estimation */
if (ret)
return ret;
if (alloclen < rawlen) {
/* Doesn't fit untouched. We'll go the old route and split it */
return -EBADFD;
}
node = kmalloc(rawlen, GFP_KERNEL);
if (!node)
return -ENOMEM;
ret = jffs2_flash_read(c, ref_offset(raw), rawlen, &retlen, (char *)node);
if (!ret && retlen != rawlen)
ret = -EIO;
if (ret)
goto out_node;
crc = crc32(0, node, sizeof(struct jffs2_unknown_node)-4);
if (je32_to_cpu(node->u.hdr_crc) != crc) {
pr_warn("Header CRC failed on REF_PRISTINE node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
ref_offset(raw), je32_to_cpu(node->u.hdr_crc), crc);
goto bail;
}
switch(je16_to_cpu(node->u.nodetype)) {
case JFFS2_NODETYPE_INODE:
crc = crc32(0, node, sizeof(node->i)-8);
if (je32_to_cpu(node->i.node_crc) != crc) {
pr_warn("Node CRC failed on REF_PRISTINE data node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
ref_offset(raw), je32_to_cpu(node->i.node_crc),
crc);
goto bail;
}
if (je32_to_cpu(node->i.dsize)) {
crc = crc32(0, node->i.data, je32_to_cpu(node->i.csize));
if (je32_to_cpu(node->i.data_crc) != crc) {
pr_warn("Data CRC failed on REF_PRISTINE data node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
ref_offset(raw),
je32_to_cpu(node->i.data_crc), crc);
goto bail;
}
}
break;
case JFFS2_NODETYPE_DIRENT:
crc = crc32(0, node, sizeof(node->d)-8);
if (je32_to_cpu(node->d.node_crc) != crc) {
pr_warn("Node CRC failed on REF_PRISTINE dirent node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
ref_offset(raw),
je32_to_cpu(node->d.node_crc), crc);
goto bail;
}
if (strnlen(node->d.name, node->d.nsize) != node->d.nsize) {
pr_warn("Name in dirent node at 0x%08x contains zeroes\n",
ref_offset(raw));
goto bail;
}
if (node->d.nsize) {
crc = crc32(0, node->d.name, node->d.nsize);
if (je32_to_cpu(node->d.name_crc) != crc) {
pr_warn("Name CRC failed on REF_PRISTINE dirent node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
ref_offset(raw),
je32_to_cpu(node->d.name_crc), crc);
goto bail;
}
}
break;
default:
/* If it's inode-less, we don't _know_ what it is. Just copy it intact */
if (ic) {
pr_warn("Unknown node type for REF_PRISTINE node at 0x%08x: 0x%04x\n",
ref_offset(raw), je16_to_cpu(node->u.nodetype));
goto bail;
}
}
/* OK, all the CRCs are good; this node can just be copied as-is. */
retry:
phys_ofs = write_ofs(c);
ret = jffs2_flash_write(c, phys_ofs, rawlen, &retlen, (char *)node);
if (ret || (retlen != rawlen)) {
pr_notice("Write of %d bytes at 0x%08x failed. returned %d, retlen %zd\n",
rawlen, phys_ofs, ret, retlen);
if (retlen) {
jffs2_add_physical_node_ref(c, phys_ofs | REF_OBSOLETE, rawlen, NULL);
} else {
pr_notice("Not marking the space at 0x%08x as dirty because the flash driver returned retlen zero\n",
phys_ofs);
}
if (!retried) {
/* Try to reallocate space and retry */
uint32_t dummy;
struct jffs2_eraseblock *jeb = &c->blocks[phys_ofs / c->sector_size];
retried = 1;
jffs2_dbg(1, "Retrying failed write of REF_PRISTINE node.\n");
jffs2_dbg_acct_sanity_check(c,jeb);
jffs2_dbg_acct_paranoia_check(c, jeb);
ret = jffs2_reserve_space_gc(c, rawlen, &dummy, rawlen);
/* this is not the exact summary size of it,
it is only an upper estimation */
if (!ret) {
jffs2_dbg(1, "Allocated space at 0x%08x to retry failed write.\n",
phys_ofs);
jffs2_dbg_acct_sanity_check(c,jeb);
jffs2_dbg_acct_paranoia_check(c, jeb);
goto retry;
}
jffs2_dbg(1, "Failed to allocate space to retry failed write: %d!\n",
ret);
}
if (!ret)
ret = -EIO;
goto out_node;
}
jffs2_add_physical_node_ref(c, phys_ofs | REF_PRISTINE, rawlen, ic);
jffs2_mark_node_obsolete(c, raw);
jffs2_dbg(1, "WHEEE! GC REF_PRISTINE node at 0x%08x succeeded\n",
ref_offset(raw));
out_node:
kfree(node);
return ret;
bail:
ret = -EBADFD;
goto out_node;
}
static int jffs2_garbage_collect_metadata(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
struct jffs2_inode_info *f, struct jffs2_full_dnode *fn)
{
struct jffs2_full_dnode *new_fn;
struct jffs2_raw_inode ri;
struct jffs2_node_frag *last_frag;
union jffs2_device_node dev;
char *mdata = NULL;
int mdatalen = 0;
uint32_t alloclen, ilen;
int ret;
if (S_ISBLK(JFFS2_F_I_MODE(f)) ||
S_ISCHR(JFFS2_F_I_MODE(f)) ) {
/* For these, we don't actually need to read the old node */
mdatalen = jffs2_encode_dev(&dev, JFFS2_F_I_RDEV(f));
mdata = (char *)&dev;
jffs2_dbg(1, "%s(): Writing %d bytes of kdev_t\n",
__func__, mdatalen);
} else if (S_ISLNK(JFFS2_F_I_MODE(f))) {
mdatalen = fn->size;
mdata = kmalloc(fn->size, GFP_KERNEL);
if (!mdata) {
pr_warn("kmalloc of mdata failed in jffs2_garbage_collect_metadata()\n");
return -ENOMEM;
}
ret = jffs2_read_dnode(c, f, fn, mdata, 0, mdatalen);
if (ret) {
pr_warn("read of old metadata failed in jffs2_garbage_collect_metadata(): %d\n",
ret);
kfree(mdata);
return ret;
}
jffs2_dbg(1, "%s(): Writing %d bites of symlink target\n",
__func__, mdatalen);
}
ret = jffs2_reserve_space_gc(c, sizeof(ri) + mdatalen, &alloclen,
JFFS2_SUMMARY_INODE_SIZE);
if (ret) {
pr_warn("jffs2_reserve_space_gc of %zd bytes for garbage_collect_metadata failed: %d\n",
sizeof(ri) + mdatalen, ret);
goto out;
}
last_frag = frag_last(&f->fragtree);
if (last_frag)
/* Fetch the inode length from the fragtree rather then
* from i_size since i_size may have not been updated yet */
ilen = last_frag->ofs + last_frag->size;
else
ilen = JFFS2_F_I_SIZE(f);
memset(&ri, 0, sizeof(ri));
ri.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
ri.nodetype = cpu_to_je16(JFFS2_NODETYPE_INODE);
ri.totlen = cpu_to_je32(sizeof(ri) + mdatalen);
ri.hdr_crc = cpu_to_je32(crc32(0, &ri, sizeof(struct jffs2_unknown_node)-4));
ri.ino = cpu_to_je32(f->inocache->ino);
ri.version = cpu_to_je32(++f->highest_version);
ri.mode = cpu_to_jemode(JFFS2_F_I_MODE(f));
ri.uid = cpu_to_je16(JFFS2_F_I_UID(f));
ri.gid = cpu_to_je16(JFFS2_F_I_GID(f));
ri.isize = cpu_to_je32(ilen);
ri.atime = cpu_to_je32(JFFS2_F_I_ATIME(f));
ri.ctime = cpu_to_je32(JFFS2_F_I_CTIME(f));
ri.mtime = cpu_to_je32(JFFS2_F_I_MTIME(f));
ri.offset = cpu_to_je32(0);
ri.csize = cpu_to_je32(mdatalen);
ri.dsize = cpu_to_je32(mdatalen);
ri.compr = JFFS2_COMPR_NONE;
ri.node_crc = cpu_to_je32(crc32(0, &ri, sizeof(ri)-8));
ri.data_crc = cpu_to_je32(crc32(0, mdata, mdatalen));
new_fn = jffs2_write_dnode(c, f, &ri, mdata, mdatalen, ALLOC_GC);
if (IS_ERR(new_fn)) {
pr_warn("Error writing new dnode: %ld\n", PTR_ERR(new_fn));
ret = PTR_ERR(new_fn);
goto out;
}
jffs2_mark_node_obsolete(c, fn->raw);
jffs2_free_full_dnode(fn);
f->metadata = new_fn;
out:
if (S_ISLNK(JFFS2_F_I_MODE(f)))
kfree(mdata);
return ret;
}
static int jffs2_garbage_collect_dirent(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
struct jffs2_inode_info *f, struct jffs2_full_dirent *fd)
{
struct jffs2_full_dirent *new_fd;
struct jffs2_raw_dirent rd;
uint32_t alloclen;
int ret;
rd.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
rd.nodetype = cpu_to_je16(JFFS2_NODETYPE_DIRENT);
rd.nsize = strlen(fd->name);
rd.totlen = cpu_to_je32(sizeof(rd) + rd.nsize);
rd.hdr_crc = cpu_to_je32(crc32(0, &rd, sizeof(struct jffs2_unknown_node)-4));
rd.pino = cpu_to_je32(f->inocache->ino);
rd.version = cpu_to_je32(++f->highest_version);
rd.ino = cpu_to_je32(fd->ino);
/* If the times on this inode were set by explicit utime() they can be different,
so refrain from splatting them. */
if (JFFS2_F_I_MTIME(f) == JFFS2_F_I_CTIME(f))
rd.mctime = cpu_to_je32(JFFS2_F_I_MTIME(f));
else
rd.mctime = cpu_to_je32(0);
rd.type = fd->type;
rd.node_crc = cpu_to_je32(crc32(0, &rd, sizeof(rd)-8));
rd.name_crc = cpu_to_je32(crc32(0, fd->name, rd.nsize));
ret = jffs2_reserve_space_gc(c, sizeof(rd)+rd.nsize, &alloclen,
JFFS2_SUMMARY_DIRENT_SIZE(rd.nsize));
if (ret) {
pr_warn("jffs2_reserve_space_gc of %zd bytes for garbage_collect_dirent failed: %d\n",
sizeof(rd)+rd.nsize, ret);
return ret;
}
new_fd = jffs2_write_dirent(c, f, &rd, fd->name, rd.nsize, ALLOC_GC);
if (IS_ERR(new_fd)) {
pr_warn("jffs2_write_dirent in garbage_collect_dirent failed: %ld\n",
PTR_ERR(new_fd));
return PTR_ERR(new_fd);
}
jffs2_add_fd_to_list(c, new_fd, &f->dents);
return 0;
}
static int jffs2_garbage_collect_deletion_dirent(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
struct jffs2_inode_info *f, struct jffs2_full_dirent *fd)
{
struct jffs2_full_dirent **fdp = &f->dents;
int found = 0;
/* On a medium where we can't actually mark nodes obsolete
pernamently, such as NAND flash, we need to work out
whether this deletion dirent is still needed to actively
delete a 'real' dirent with the same name that's still
somewhere else on the flash. */
if (!jffs2_can_mark_obsolete(c)) {
struct jffs2_raw_dirent *rd;
struct jffs2_raw_node_ref *raw;
int ret;
size_t retlen;
int name_len = strlen(fd->name);
uint32_t name_crc = crc32(0, fd->name, name_len);
uint32_t rawlen = ref_totlen(c, jeb, fd->raw);
rd = kmalloc(rawlen, GFP_KERNEL);
if (!rd)
return -ENOMEM;
/* Prevent the erase code from nicking the obsolete node refs while
we're looking at them. I really don't like this extra lock but
can't see any alternative. Suggestions on a postcard to... */
mutex_lock(&c->erase_free_sem);
for (raw = f->inocache->nodes; raw != (void *)f->inocache; raw = raw->next_in_ino) {
cond_resched();
/* We only care about obsolete ones */
if (!(ref_obsolete(raw)))
continue;
/* Any dirent with the same name is going to have the same length... */
if (ref_totlen(c, NULL, raw) != rawlen)
continue;
/* Doesn't matter if there's one in the same erase block. We're going to
delete it too at the same time. */
if (SECTOR_ADDR(raw->flash_offset) == SECTOR_ADDR(fd->raw->flash_offset))
continue;
jffs2_dbg(1, "Check potential deletion dirent at %08x\n",
ref_offset(raw));
/* This is an obsolete node belonging to the same directory, and it's of the right
length. We need to take a closer look...*/
ret = jffs2_flash_read(c, ref_offset(raw), rawlen, &retlen, (char *)rd);
if (ret) {
pr_warn("%s(): Read error (%d) reading obsolete node at %08x\n",
__func__, ret, ref_offset(raw));
/* If we can't read it, we don't need to continue to obsolete it. Continue */
continue;
}
if (retlen != rawlen) {
pr_warn("%s(): Short read (%zd not %u) reading header from obsolete node at %08x\n",
__func__, retlen, rawlen,
ref_offset(raw));
continue;
}
if (je16_to_cpu(rd->nodetype) != JFFS2_NODETYPE_DIRENT)
continue;
/* If the name CRC doesn't match, skip */
if (je32_to_cpu(rd->name_crc) != name_crc)
continue;
/* If the name length doesn't match, or it's another deletion dirent, skip */
if (rd->nsize != name_len || !je32_to_cpu(rd->ino))
continue;
/* OK, check the actual name now */
if (memcmp(rd->name, fd->name, name_len))
continue;
/* OK. The name really does match. There really is still an older node on
the flash which our deletion dirent obsoletes. So we have to write out
a new deletion dirent to replace it */
mutex_unlock(&c->erase_free_sem);
jffs2_dbg(1, "Deletion dirent at %08x still obsoletes real dirent \"%s\" at %08x for ino #%u\n",
ref_offset(fd->raw), fd->name,
ref_offset(raw), je32_to_cpu(rd->ino));
kfree(rd);
return jffs2_garbage_collect_dirent(c, jeb, f, fd);
}
mutex_unlock(&c->erase_free_sem);
kfree(rd);
}
/* FIXME: If we're deleting a dirent which contains the current mtime and ctime,
we should update the metadata node with those times accordingly */
/* No need for it any more. Just mark it obsolete and remove it from the list */
while (*fdp) {
if ((*fdp) == fd) {
found = 1;
*fdp = fd->next;
break;
}
fdp = &(*fdp)->next;
}
if (!found) {
pr_warn("Deletion dirent \"%s\" not found in list for ino #%u\n",
fd->name, f->inocache->ino);
}
jffs2_mark_node_obsolete(c, fd->raw);
jffs2_free_full_dirent(fd);
return 0;
}
static int jffs2_garbage_collect_hole(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
struct jffs2_inode_info *f, struct jffs2_full_dnode *fn,
uint32_t start, uint32_t end)
{
struct jffs2_raw_inode ri;
struct jffs2_node_frag *frag;
struct jffs2_full_dnode *new_fn;
uint32_t alloclen, ilen;
int ret;
jffs2_dbg(1, "Writing replacement hole node for ino #%u from offset 0x%x to 0x%x\n",
f->inocache->ino, start, end);
memset(&ri, 0, sizeof(ri));
if(fn->frags > 1) {
size_t readlen;
uint32_t crc;
/* It's partially obsoleted by a later write. So we have to
write it out again with the _same_ version as before */
ret = jffs2_flash_read(c, ref_offset(fn->raw), sizeof(ri), &readlen, (char *)&ri);
if (readlen != sizeof(ri) || ret) {
pr_warn("Node read failed in jffs2_garbage_collect_hole. Ret %d, retlen %zd. Data will be lost by writing new hole node\n",
ret, readlen);
goto fill;
}
if (je16_to_cpu(ri.nodetype) != JFFS2_NODETYPE_INODE) {
pr_warn("%s(): Node at 0x%08x had node type 0x%04x instead of JFFS2_NODETYPE_INODE(0x%04x)\n",
__func__, ref_offset(fn->raw),
je16_to_cpu(ri.nodetype), JFFS2_NODETYPE_INODE);
return -EIO;
}
if (je32_to_cpu(ri.totlen) != sizeof(ri)) {
pr_warn("%s(): Node at 0x%08x had totlen 0x%x instead of expected 0x%zx\n",
__func__, ref_offset(fn->raw),
je32_to_cpu(ri.totlen), sizeof(ri));
return -EIO;
}
crc = crc32(0, &ri, sizeof(ri)-8);
if (crc != je32_to_cpu(ri.node_crc)) {
pr_warn("%s: Node at 0x%08x had CRC 0x%08x which doesn't match calculated CRC 0x%08x\n",
__func__, ref_offset(fn->raw),
je32_to_cpu(ri.node_crc), crc);
/* FIXME: We could possibly deal with this by writing new holes for each frag */
pr_warn("Data in the range 0x%08x to 0x%08x of inode #%u will be lost\n",
start, end, f->inocache->ino);
goto fill;
}
if (ri.compr != JFFS2_COMPR_ZERO) {
pr_warn("%s(): Node 0x%08x wasn't a hole node!\n",
__func__, ref_offset(fn->raw));
pr_warn("Data in the range 0x%08x to 0x%08x of inode #%u will be lost\n",
start, end, f->inocache->ino);
goto fill;
}
} else {
fill:
ri.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
ri.nodetype = cpu_to_je16(JFFS2_NODETYPE_INODE);
ri.totlen = cpu_to_je32(sizeof(ri));
ri.hdr_crc = cpu_to_je32(crc32(0, &ri, sizeof(struct jffs2_unknown_node)-4));
ri.ino = cpu_to_je32(f->inocache->ino);
ri.version = cpu_to_je32(++f->highest_version);
ri.offset = cpu_to_je32(start);
ri.dsize = cpu_to_je32(end - start);
ri.csize = cpu_to_je32(0);
ri.compr = JFFS2_COMPR_ZERO;
}
frag = frag_last(&f->fragtree);
if (frag)
/* Fetch the inode length from the fragtree rather then
* from i_size since i_size may have not been updated yet */
ilen = frag->ofs + frag->size;
else
ilen = JFFS2_F_I_SIZE(f);
ri.mode = cpu_to_jemode(JFFS2_F_I_MODE(f));
ri.uid = cpu_to_je16(JFFS2_F_I_UID(f));
ri.gid = cpu_to_je16(JFFS2_F_I_GID(f));
ri.isize = cpu_to_je32(ilen);
ri.atime = cpu_to_je32(JFFS2_F_I_ATIME(f));
ri.ctime = cpu_to_je32(JFFS2_F_I_CTIME(f));
ri.mtime = cpu_to_je32(JFFS2_F_I_MTIME(f));
ri.data_crc = cpu_to_je32(0);
ri.node_crc = cpu_to_je32(crc32(0, &ri, sizeof(ri)-8));
ret = jffs2_reserve_space_gc(c, sizeof(ri), &alloclen,
JFFS2_SUMMARY_INODE_SIZE);
if (ret) {
pr_warn("jffs2_reserve_space_gc of %zd bytes for garbage_collect_hole failed: %d\n",
sizeof(ri), ret);
return ret;
}
new_fn = jffs2_write_dnode(c, f, &ri, NULL, 0, ALLOC_GC);
if (IS_ERR(new_fn)) {
pr_warn("Error writing new hole node: %ld\n", PTR_ERR(new_fn));
return PTR_ERR(new_fn);
}
if (je32_to_cpu(ri.version) == f->highest_version) {
jffs2_add_full_dnode_to_inode(c, f, new_fn);
if (f->metadata) {
jffs2_mark_node_obsolete(c, f->metadata->raw);
jffs2_free_full_dnode(f->metadata);
f->metadata = NULL;
}
return 0;
}
/*
* We should only get here in the case where the node we are
* replacing had more than one frag, so we kept the same version
* number as before. (Except in case of error -- see 'goto fill;'
* above.)
*/
D1(if(unlikely(fn->frags <= 1)) {
pr_warn("%s(): Replacing fn with %d frag(s) but new ver %d != highest_version %d of ino #%d\n",
__func__, fn->frags, je32_to_cpu(ri.version),
f->highest_version, je32_to_cpu(ri.ino));
});
/* This is a partially-overlapped hole node. Mark it REF_NORMAL not REF_PRISTINE */
mark_ref_normal(new_fn->raw);
for (frag = jffs2_lookup_node_frag(&f->fragtree, fn->ofs);
frag; frag = frag_next(frag)) {
if (frag->ofs > fn->size + fn->ofs)
break;
if (frag->node == fn) {
frag->node = new_fn;
new_fn->frags++;
fn->frags--;
}
}
if (fn->frags) {
pr_warn("%s(): Old node still has frags!\n", __func__);
BUG();
}
if (!new_fn->frags) {
pr_warn("%s(): New node has no frags!\n", __func__);
BUG();
}
jffs2_mark_node_obsolete(c, fn->raw);
jffs2_free_full_dnode(fn);
return 0;
}
static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_eraseblock *orig_jeb,
struct jffs2_inode_info *f, struct jffs2_full_dnode *fn,
uint32_t start, uint32_t end)
{
struct jffs2_full_dnode *new_fn;
struct jffs2_raw_inode ri;
uint32_t alloclen, offset, orig_end, orig_start;
int ret = 0;
unsigned char *comprbuf = NULL, *writebuf;
unsigned long pg;
unsigned char *pg_ptr;
memset(&ri, 0, sizeof(ri));
jffs2_dbg(1, "Writing replacement dnode for ino #%u from offset 0x%x to 0x%x\n",
f->inocache->ino, start, end);
orig_end = end;
orig_start = start;
if (c->nr_free_blocks + c->nr_erasing_blocks > c->resv_blocks_gcmerge) {
/* Attempt to do some merging. But only expand to cover logically
adjacent frags if the block containing them is already considered
to be dirty. Otherwise we end up with GC just going round in
circles dirtying the nodes it already wrote out, especially
on NAND where we have small eraseblocks and hence a much higher
chance of nodes having to be split to cross boundaries. */
struct jffs2_node_frag *frag;
uint32_t min, max;
min = start & ~(PAGE_CACHE_SIZE-1);
max = min + PAGE_CACHE_SIZE;
frag = jffs2_lookup_node_frag(&f->fragtree, start);
/* BUG_ON(!frag) but that'll happen anyway... */
BUG_ON(frag->ofs != start);
/* First grow down... */
while((frag = frag_prev(frag)) && frag->ofs >= min) {
/* If the previous frag doesn't even reach the beginning, there's
excessive fragmentation. Just merge. */
if (frag->ofs > min) {
jffs2_dbg(1, "Expanding down to cover partial frag (0x%x-0x%x)\n",
frag->ofs, frag->ofs+frag->size);
start = frag->ofs;
continue;
}
/* OK. This frag holds the first byte of the page. */
if (!frag->node || !frag->node->raw) {
jffs2_dbg(1, "First frag in page is hole (0x%x-0x%x). Not expanding down.\n",
frag->ofs, frag->ofs+frag->size);
break;
} else {
/* OK, it's a frag which extends to the beginning of the page. Does it live
in a block which is still considered clean? If so, don't obsolete it.
If not, cover it anyway. */
struct jffs2_raw_node_ref *raw = frag->node->raw;
struct jffs2_eraseblock *jeb;
jeb = &c->blocks[raw->flash_offset / c->sector_size];
if (jeb == c->gcblock) {
jffs2_dbg(1, "Expanding down to cover frag (0x%x-0x%x) in gcblock at %08x\n",
frag->ofs,
frag->ofs + frag->size,
ref_offset(raw));
start = frag->ofs;
break;
}
if (!ISDIRTY(jeb->dirty_size + jeb->wasted_size)) {
jffs2_dbg(1, "Not expanding down to cover frag (0x%x-0x%x) in clean block %08x\n",
frag->ofs,
frag->ofs + frag->size,
jeb->offset);
break;
}
jffs2_dbg(1, "Expanding down to cover frag (0x%x-0x%x) in dirty block %08x\n",
frag->ofs,
frag->ofs + frag->size,
jeb->offset);
start = frag->ofs;
break;
}
}
/* ... then up */
/* Find last frag which is actually part of the node we're to GC. */
frag = jffs2_lookup_node_frag(&f->fragtree, end-1);
while((frag = frag_next(frag)) && frag->ofs+frag->size <= max) {
/* If the previous frag doesn't even reach the beginning, there's lots
of fragmentation. Just merge. */
if (frag->ofs+frag->size < max) {
jffs2_dbg(1, "Expanding up to cover partial frag (0x%x-0x%x)\n",
frag->ofs, frag->ofs+frag->size);
end = frag->ofs + frag->size;
continue;
}
if (!frag->node || !frag->node->raw) {
jffs2_dbg(1, "Last frag in page is hole (0x%x-0x%x). Not expanding up.\n",
frag->ofs, frag->ofs+frag->size);
break;
} else {
/* OK, it's a frag which extends to the beginning of the page. Does it live
in a block which is still considered clean? If so, don't obsolete it.
If not, cover it anyway. */
struct jffs2_raw_node_ref *raw = frag->node->raw;
struct jffs2_eraseblock *jeb;
jeb = &c->blocks[raw->flash_offset / c->sector_size];
if (jeb == c->gcblock) {
jffs2_dbg(1, "Expanding up to cover frag (0x%x-0x%x) in gcblock at %08x\n",
frag->ofs,
frag->ofs + frag->size,
ref_offset(raw));
end = frag->ofs + frag->size;
break;
}
if (!ISDIRTY(jeb->dirty_size + jeb->wasted_size)) {
jffs2_dbg(1, "Not expanding up to cover frag (0x%x-0x%x) in clean block %08x\n",
frag->ofs,
frag->ofs + frag->size,
jeb->offset);
break;
}
jffs2_dbg(1, "Expanding up to cover frag (0x%x-0x%x) in dirty block %08x\n",
frag->ofs,
frag->ofs + frag->size,
jeb->offset);
end = frag->ofs + frag->size;
break;
}
}
jffs2_dbg(1, "Expanded dnode to write from (0x%x-0x%x) to (0x%x-0x%x)\n",
orig_start, orig_end, start, end);
D1(BUG_ON(end > frag_last(&f->fragtree)->ofs + frag_last(&f->fragtree)->size));
BUG_ON(end < orig_end);
BUG_ON(start > orig_start);
}
/* First, use readpage() to read the appropriate page into the page cache */
/* Q: What happens if we actually try to GC the _same_ page for which commit_write()
* triggered garbage collection in the first place?
* A: I _think_ it's OK. read_cache_page shouldn't deadlock, we'll write out the
* page OK. We'll actually write it out again in commit_write, which is a little
* suboptimal, but at least we're correct.
*/
pg_ptr = jffs2_gc_fetch_page(c, f, start, &pg);
if (IS_ERR(pg_ptr)) {
pr_warn("read_cache_page() returned error: %ld\n",
PTR_ERR(pg_ptr));
return PTR_ERR(pg_ptr);
}
offset = start;
while(offset < orig_end) {
uint32_t datalen;
uint32_t cdatalen;
uint16_t comprtype = JFFS2_COMPR_NONE;
ret = jffs2_reserve_space_gc(c, sizeof(ri) + JFFS2_MIN_DATA_LEN,
&alloclen, JFFS2_SUMMARY_INODE_SIZE);
if (ret) {
pr_warn("jffs2_reserve_space_gc of %zd bytes for garbage_collect_dnode failed: %d\n",
sizeof(ri) + JFFS2_MIN_DATA_LEN, ret);
break;
}
cdatalen = min_t(uint32_t, alloclen - sizeof(ri), end - offset);
datalen = end - offset;
writebuf = pg_ptr + (offset & (PAGE_CACHE_SIZE -1));
comprtype = jffs2_compress(c, f, writebuf, &comprbuf, &datalen, &cdatalen);
ri.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
ri.nodetype = cpu_to_je16(JFFS2_NODETYPE_INODE);
ri.totlen = cpu_to_je32(sizeof(ri) + cdatalen);
ri.hdr_crc = cpu_to_je32(crc32(0, &ri, sizeof(struct jffs2_unknown_node)-4));
ri.ino = cpu_to_je32(f->inocache->ino);
ri.version = cpu_to_je32(++f->highest_version);
ri.mode = cpu_to_jemode(JFFS2_F_I_MODE(f));
ri.uid = cpu_to_je16(JFFS2_F_I_UID(f));
ri.gid = cpu_to_je16(JFFS2_F_I_GID(f));
ri.isize = cpu_to_je32(JFFS2_F_I_SIZE(f));
ri.atime = cpu_to_je32(JFFS2_F_I_ATIME(f));
ri.ctime = cpu_to_je32(JFFS2_F_I_CTIME(f));
ri.mtime = cpu_to_je32(JFFS2_F_I_MTIME(f));
ri.offset = cpu_to_je32(offset);
ri.csize = cpu_to_je32(cdatalen);
ri.dsize = cpu_to_je32(datalen);
ri.compr = comprtype & 0xff;
ri.usercompr = (comprtype >> 8) & 0xff;
ri.node_crc = cpu_to_je32(crc32(0, &ri, sizeof(ri)-8));
ri.data_crc = cpu_to_je32(crc32(0, comprbuf, cdatalen));
new_fn = jffs2_write_dnode(c, f, &ri, comprbuf, cdatalen, ALLOC_GC);
jffs2_free_comprbuf(comprbuf, writebuf);
if (IS_ERR(new_fn)) {
pr_warn("Error writing new dnode: %ld\n",
PTR_ERR(new_fn));
ret = PTR_ERR(new_fn);
break;
}
ret = jffs2_add_full_dnode_to_inode(c, f, new_fn);
offset += datalen;
if (f->metadata) {
jffs2_mark_node_obsolete(c, f->metadata->raw);
jffs2_free_full_dnode(f->metadata);
f->metadata = NULL;
}
}
jffs2_gc_release_page(c, pg_ptr, &pg);
return ret;
}
| gpl-2.0 |
arcardinal/kernel_lge_g3 | drivers/media/common/saa7146_i2c.c | 9513 | 12683 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <media/saa7146_vv.h>
static u32 saa7146_i2c_func(struct i2c_adapter *adapter)
{
/* DEB_I2C("'%s'\n", adapter->name); */
return I2C_FUNC_I2C
| I2C_FUNC_SMBUS_QUICK
| I2C_FUNC_SMBUS_READ_BYTE | I2C_FUNC_SMBUS_WRITE_BYTE
| I2C_FUNC_SMBUS_READ_BYTE_DATA | I2C_FUNC_SMBUS_WRITE_BYTE_DATA;
}
/* this function returns the status-register of our i2c-device */
static inline u32 saa7146_i2c_status(struct saa7146_dev *dev)
{
u32 iicsta = saa7146_read(dev, I2C_STATUS);
/* DEB_I2C("status: 0x%08x\n", iicsta); */
return iicsta;
}
/* this function runs through the i2c-messages and prepares the data to be
sent through the saa7146. have a look at the specifications p. 122 ff
to understand this. it returns the number of u32s to send, or -1
in case of an error. */
static int saa7146_i2c_msg_prepare(const struct i2c_msg *m, int num, __le32 *op)
{
int h1, h2;
int i, j, addr;
int mem = 0, op_count = 0;
/* first determine size of needed memory */
for(i = 0; i < num; i++) {
mem += m[i].len + 1;
}
/* worst case: we need one u32 for three bytes to be send
plus one extra byte to address the device */
mem = 1 + ((mem-1) / 3);
/* we assume that op points to a memory of at least
* SAA7146_I2C_MEM bytes size. if we exceed this limit...
*/
if ((4 * mem) > SAA7146_I2C_MEM) {
/* DEB_I2C("cannot prepare i2c-message\n"); */
return -ENOMEM;
}
/* be careful: clear out the i2c-mem first */
memset(op,0,sizeof(__le32)*mem);
/* loop through all messages */
for(i = 0; i < num; i++) {
/* insert the address of the i2c-slave.
note: we get 7 bit i2c-addresses,
so we have to perform a translation */
addr = (m[i].addr*2) + ( (0 != (m[i].flags & I2C_M_RD)) ? 1 : 0);
h1 = op_count/3; h2 = op_count%3;
op[h1] |= cpu_to_le32( (u8)addr << ((3-h2)*8));
op[h1] |= cpu_to_le32(SAA7146_I2C_START << ((3-h2)*2));
op_count++;
/* loop through all bytes of message i */
for(j = 0; j < m[i].len; j++) {
/* insert the data bytes */
h1 = op_count/3; h2 = op_count%3;
op[h1] |= cpu_to_le32( (u32)((u8)m[i].buf[j]) << ((3-h2)*8));
op[h1] |= cpu_to_le32( SAA7146_I2C_CONT << ((3-h2)*2));
op_count++;
}
}
/* have a look at the last byte inserted:
if it was: ...CONT change it to ...STOP */
h1 = (op_count-1)/3; h2 = (op_count-1)%3;
if ( SAA7146_I2C_CONT == (0x3 & (le32_to_cpu(op[h1]) >> ((3-h2)*2))) ) {
op[h1] &= ~cpu_to_le32(0x2 << ((3-h2)*2));
op[h1] |= cpu_to_le32(SAA7146_I2C_STOP << ((3-h2)*2));
}
/* return the number of u32s to send */
return mem;
}
/* this functions loops through all i2c-messages. normally, it should determine
which bytes were read through the adapter and write them back to the corresponding
i2c-message. but instead, we simply write back all bytes.
fixme: this could be improved. */
static int saa7146_i2c_msg_cleanup(const struct i2c_msg *m, int num, __le32 *op)
{
int i, j;
int op_count = 0;
/* loop through all messages */
for(i = 0; i < num; i++) {
op_count++;
/* loop through all bytes of message i */
for(j = 0; j < m[i].len; j++) {
/* write back all bytes that could have been read */
m[i].buf[j] = (le32_to_cpu(op[op_count/3]) >> ((3-(op_count%3))*8));
op_count++;
}
}
return 0;
}
/* this functions resets the i2c-device and returns 0 if everything was fine, otherwise -1 */
static int saa7146_i2c_reset(struct saa7146_dev *dev)
{
/* get current status */
u32 status = saa7146_i2c_status(dev);
/* clear registers for sure */
saa7146_write(dev, I2C_STATUS, dev->i2c_bitrate);
saa7146_write(dev, I2C_TRANSFER, 0);
/* check if any operation is still in progress */
if ( 0 != ( status & SAA7146_I2C_BUSY) ) {
/* yes, kill ongoing operation */
DEB_I2C("busy_state detected\n");
/* set "ABORT-OPERATION"-bit (bit 7)*/
saa7146_write(dev, I2C_STATUS, (dev->i2c_bitrate | MASK_07));
saa7146_write(dev, MC2, (MASK_00 | MASK_16));
msleep(SAA7146_I2C_DELAY);
/* clear all error-bits pending; this is needed because p.123, note 1 */
saa7146_write(dev, I2C_STATUS, dev->i2c_bitrate);
saa7146_write(dev, MC2, (MASK_00 | MASK_16));
msleep(SAA7146_I2C_DELAY);
}
/* check if any error is (still) present. (this can be necessary because p.123, note 1) */
status = saa7146_i2c_status(dev);
if ( dev->i2c_bitrate != status ) {
DEB_I2C("error_state detected. status:0x%08x\n", status);
/* Repeat the abort operation. This seems to be necessary
after serious protocol errors caused by e.g. the SAA7740 */
saa7146_write(dev, I2C_STATUS, (dev->i2c_bitrate | MASK_07));
saa7146_write(dev, MC2, (MASK_00 | MASK_16));
msleep(SAA7146_I2C_DELAY);
/* clear all error-bits pending */
saa7146_write(dev, I2C_STATUS, dev->i2c_bitrate);
saa7146_write(dev, MC2, (MASK_00 | MASK_16));
msleep(SAA7146_I2C_DELAY);
/* the data sheet says it might be necessary to clear the status
twice after an abort */
saa7146_write(dev, I2C_STATUS, dev->i2c_bitrate);
saa7146_write(dev, MC2, (MASK_00 | MASK_16));
msleep(SAA7146_I2C_DELAY);
}
/* if any error is still present, a fatal error has occurred ... */
status = saa7146_i2c_status(dev);
if ( dev->i2c_bitrate != status ) {
DEB_I2C("fatal error. status:0x%08x\n", status);
return -1;
}
return 0;
}
/* this functions writes out the data-byte 'dword' to the i2c-device.
it returns 0 if ok, -1 if the transfer failed, -2 if the transfer
failed badly (e.g. address error) */
static int saa7146_i2c_writeout(struct saa7146_dev *dev, __le32 *dword, int short_delay)
{
u32 status = 0, mc2 = 0;
int trial = 0;
unsigned long timeout;
/* write out i2c-command */
DEB_I2C("before: 0x%08x (status: 0x%08x), %d\n",
*dword, saa7146_read(dev, I2C_STATUS), dev->i2c_op);
if( 0 != (SAA7146_USE_I2C_IRQ & dev->ext->flags)) {
saa7146_write(dev, I2C_STATUS, dev->i2c_bitrate);
saa7146_write(dev, I2C_TRANSFER, le32_to_cpu(*dword));
dev->i2c_op = 1;
SAA7146_ISR_CLEAR(dev, MASK_16|MASK_17);
SAA7146_IER_ENABLE(dev, MASK_16|MASK_17);
saa7146_write(dev, MC2, (MASK_00 | MASK_16));
timeout = HZ/100 + 1; /* 10ms */
timeout = wait_event_interruptible_timeout(dev->i2c_wq, dev->i2c_op == 0, timeout);
if (timeout == -ERESTARTSYS || dev->i2c_op) {
SAA7146_IER_DISABLE(dev, MASK_16|MASK_17);
SAA7146_ISR_CLEAR(dev, MASK_16|MASK_17);
if (timeout == -ERESTARTSYS)
/* a signal arrived */
return -ERESTARTSYS;
pr_warn("%s %s [irq]: timed out waiting for end of xfer\n",
dev->name, __func__);
return -EIO;
}
status = saa7146_read(dev, I2C_STATUS);
} else {
saa7146_write(dev, I2C_STATUS, dev->i2c_bitrate);
saa7146_write(dev, I2C_TRANSFER, le32_to_cpu(*dword));
saa7146_write(dev, MC2, (MASK_00 | MASK_16));
/* do not poll for i2c-status before upload is complete */
timeout = jiffies + HZ/100 + 1; /* 10ms */
while(1) {
mc2 = (saa7146_read(dev, MC2) & 0x1);
if( 0 != mc2 ) {
break;
}
if (time_after(jiffies,timeout)) {
pr_warn("%s %s: timed out waiting for MC2\n",
dev->name, __func__);
return -EIO;
}
}
/* wait until we get a transfer done or error */
timeout = jiffies + HZ/100 + 1; /* 10ms */
/* first read usually delivers bogus results... */
saa7146_i2c_status(dev);
while(1) {
status = saa7146_i2c_status(dev);
if ((status & 0x3) != 1)
break;
if (time_after(jiffies,timeout)) {
/* this is normal when probing the bus
* (no answer from nonexisistant device...)
*/
pr_warn("%s %s [poll]: timed out waiting for end of xfer\n",
dev->name, __func__);
return -EIO;
}
if (++trial < 50 && short_delay)
udelay(10);
else
msleep(1);
}
}
/* give a detailed status report */
if ( 0 != (status & (SAA7146_I2C_SPERR | SAA7146_I2C_APERR |
SAA7146_I2C_DTERR | SAA7146_I2C_DRERR |
SAA7146_I2C_AL | SAA7146_I2C_ERR |
SAA7146_I2C_BUSY)) ) {
if ( 0 == (status & SAA7146_I2C_ERR) ||
0 == (status & SAA7146_I2C_BUSY) ) {
/* it may take some time until ERR goes high - ignore */
DEB_I2C("unexpected i2c status %04x\n", status);
}
if( 0 != (status & SAA7146_I2C_SPERR) ) {
DEB_I2C("error due to invalid start/stop condition\n");
}
if( 0 != (status & SAA7146_I2C_DTERR) ) {
DEB_I2C("error in data transmission\n");
}
if( 0 != (status & SAA7146_I2C_DRERR) ) {
DEB_I2C("error when receiving data\n");
}
if( 0 != (status & SAA7146_I2C_AL) ) {
DEB_I2C("error because arbitration lost\n");
}
/* we handle address-errors here */
if( 0 != (status & SAA7146_I2C_APERR) ) {
DEB_I2C("error in address phase\n");
return -EREMOTEIO;
}
return -EIO;
}
/* read back data, just in case we were reading ... */
*dword = cpu_to_le32(saa7146_read(dev, I2C_TRANSFER));
DEB_I2C("after: 0x%08x\n", *dword);
return 0;
}
static int saa7146_i2c_transfer(struct saa7146_dev *dev, const struct i2c_msg *msgs, int num, int retries)
{
int i = 0, count = 0;
__le32 *buffer = dev->d_i2c.cpu_addr;
int err = 0;
int short_delay = 0;
if (mutex_lock_interruptible(&dev->i2c_lock))
return -ERESTARTSYS;
for(i=0;i<num;i++) {
DEB_I2C("msg:%d/%d\n", i+1, num);
}
/* prepare the message(s), get number of u32s to transfer */
count = saa7146_i2c_msg_prepare(msgs, num, buffer);
if ( 0 > count ) {
err = -1;
goto out;
}
if ( count > 3 || 0 != (SAA7146_I2C_SHORT_DELAY & dev->ext->flags) )
short_delay = 1;
do {
/* reset the i2c-device if necessary */
err = saa7146_i2c_reset(dev);
if ( 0 > err ) {
DEB_I2C("could not reset i2c-device\n");
goto out;
}
/* write out the u32s one after another */
for(i = 0; i < count; i++) {
err = saa7146_i2c_writeout(dev, &buffer[i], short_delay);
if ( 0 != err) {
/* this one is unsatisfying: some i2c slaves on some
dvb cards don't acknowledge correctly, so the saa7146
thinks that an address error occurred. in that case, the
transaction should be retrying, even if an address error
occurred. analog saa7146 based cards extensively rely on
i2c address probing, however, and address errors indicate that a
device is really *not* there. retrying in that case
increases the time the device needs to probe greatly, so
it should be avoided. So we bail out in irq mode after an
address error and trust the saa7146 address error detection. */
if (-EREMOTEIO == err && 0 != (SAA7146_USE_I2C_IRQ & dev->ext->flags))
goto out;
DEB_I2C("error while sending message(s). starting again\n");
break;
}
}
if( 0 == err ) {
err = num;
break;
}
/* delay a bit before retrying */
msleep(10);
} while (err != num && retries--);
/* quit if any error occurred */
if (err != num)
goto out;
/* if any things had to be read, get the results */
if ( 0 != saa7146_i2c_msg_cleanup(msgs, num, buffer)) {
DEB_I2C("could not cleanup i2c-message\n");
err = -1;
goto out;
}
/* return the number of delivered messages */
DEB_I2C("transmission successful. (msg:%d)\n", err);
out:
/* another bug in revision 0: the i2c-registers get uploaded randomly by other
uploads, so we better clear them out before continuing */
if( 0 == dev->revision ) {
__le32 zero = 0;
saa7146_i2c_reset(dev);
if( 0 != saa7146_i2c_writeout(dev, &zero, short_delay)) {
pr_info("revision 0 error. this should never happen\n");
}
}
mutex_unlock(&dev->i2c_lock);
return err;
}
/* utility functions */
static int saa7146_i2c_xfer(struct i2c_adapter* adapter, struct i2c_msg *msg, int num)
{
struct v4l2_device *v4l2_dev = i2c_get_adapdata(adapter);
struct saa7146_dev *dev = to_saa7146_dev(v4l2_dev);
/* use helper function to transfer data */
return saa7146_i2c_transfer(dev, msg, num, adapter->retries);
}
/*****************************************************************************/
/* i2c-adapter helper functions */
/* exported algorithm data */
static struct i2c_algorithm saa7146_algo = {
.master_xfer = saa7146_i2c_xfer,
.functionality = saa7146_i2c_func,
};
int saa7146_i2c_adapter_prepare(struct saa7146_dev *dev, struct i2c_adapter *i2c_adapter, u32 bitrate)
{
DEB_EE("bitrate: 0x%08x\n", bitrate);
/* enable i2c-port pins */
saa7146_write(dev, MC1, (MASK_08 | MASK_24));
dev->i2c_bitrate = bitrate;
saa7146_i2c_reset(dev);
if (i2c_adapter) {
i2c_set_adapdata(i2c_adapter, &dev->v4l2_dev);
i2c_adapter->dev.parent = &dev->pci->dev;
i2c_adapter->algo = &saa7146_algo;
i2c_adapter->algo_data = NULL;
i2c_adapter->timeout = SAA7146_I2C_TIMEOUT;
i2c_adapter->retries = SAA7146_I2C_RETRIES;
}
return 0;
}
| gpl-2.0 |
aduggan/linux | tools/perf/util/annotate.c | 42 | 41515 | /*
* Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
*
* Parts came from builtin-annotate.c, see those files for further
* copyright notes.
*
* Released under the GPL v2. (and only v2, not any later version)
*/
#include "util.h"
#include "ui/ui.h"
#include "sort.h"
#include "build-id.h"
#include "color.h"
#include "cache.h"
#include "symbol.h"
#include "debug.h"
#include "annotate.h"
#include "evsel.h"
#include <regex.h>
#include <pthread.h>
#include <linux/bitops.h>
const char *disassembler_style;
const char *objdump_path;
static regex_t file_lineno;
static struct ins *ins__find(const char *name);
static int disasm_line__parse(char *line, char **namep, char **rawp);
static void ins__delete(struct ins_operands *ops)
{
if (ops == NULL)
return;
zfree(&ops->source.raw);
zfree(&ops->source.name);
zfree(&ops->target.raw);
zfree(&ops->target.name);
}
static int ins__raw_scnprintf(struct ins *ins, char *bf, size_t size,
struct ins_operands *ops)
{
return scnprintf(bf, size, "%-6.6s %s", ins->name, ops->raw);
}
int ins__scnprintf(struct ins *ins, char *bf, size_t size,
struct ins_operands *ops)
{
if (ins->ops->scnprintf)
return ins->ops->scnprintf(ins, bf, size, ops);
return ins__raw_scnprintf(ins, bf, size, ops);
}
static int call__parse(struct ins_operands *ops)
{
char *endptr, *tok, *name;
ops->target.addr = strtoull(ops->raw, &endptr, 16);
name = strchr(endptr, '<');
if (name == NULL)
goto indirect_call;
name++;
#ifdef __arm__
if (strchr(name, '+'))
return -1;
#endif
tok = strchr(name, '>');
if (tok == NULL)
return -1;
*tok = '\0';
ops->target.name = strdup(name);
*tok = '>';
return ops->target.name == NULL ? -1 : 0;
indirect_call:
tok = strchr(endptr, '(');
if (tok != NULL) {
ops->target.addr = 0;
return 0;
}
tok = strchr(endptr, '*');
if (tok == NULL)
return -1;
ops->target.addr = strtoull(tok + 1, NULL, 16);
return 0;
}
static int call__scnprintf(struct ins *ins, char *bf, size_t size,
struct ins_operands *ops)
{
if (ops->target.name)
return scnprintf(bf, size, "%-6.6s %s", ins->name, ops->target.name);
if (ops->target.addr == 0)
return ins__raw_scnprintf(ins, bf, size, ops);
return scnprintf(bf, size, "%-6.6s *%" PRIx64, ins->name, ops->target.addr);
}
static struct ins_ops call_ops = {
.parse = call__parse,
.scnprintf = call__scnprintf,
};
bool ins__is_call(const struct ins *ins)
{
return ins->ops == &call_ops;
}
static int jump__parse(struct ins_operands *ops)
{
const char *s = strchr(ops->raw, '+');
ops->target.addr = strtoull(ops->raw, NULL, 16);
if (s++ != NULL)
ops->target.offset = strtoull(s, NULL, 16);
else
ops->target.offset = UINT64_MAX;
return 0;
}
static int jump__scnprintf(struct ins *ins, char *bf, size_t size,
struct ins_operands *ops)
{
return scnprintf(bf, size, "%-6.6s %" PRIx64, ins->name, ops->target.offset);
}
static struct ins_ops jump_ops = {
.parse = jump__parse,
.scnprintf = jump__scnprintf,
};
bool ins__is_jump(const struct ins *ins)
{
return ins->ops == &jump_ops;
}
static int comment__symbol(char *raw, char *comment, u64 *addrp, char **namep)
{
char *endptr, *name, *t;
if (strstr(raw, "(%rip)") == NULL)
return 0;
*addrp = strtoull(comment, &endptr, 16);
name = strchr(endptr, '<');
if (name == NULL)
return -1;
name++;
t = strchr(name, '>');
if (t == NULL)
return 0;
*t = '\0';
*namep = strdup(name);
*t = '>';
return 0;
}
static int lock__parse(struct ins_operands *ops)
{
char *name;
ops->locked.ops = zalloc(sizeof(*ops->locked.ops));
if (ops->locked.ops == NULL)
return 0;
if (disasm_line__parse(ops->raw, &name, &ops->locked.ops->raw) < 0)
goto out_free_ops;
ops->locked.ins = ins__find(name);
free(name);
if (ops->locked.ins == NULL)
goto out_free_ops;
if (!ops->locked.ins->ops)
return 0;
if (ops->locked.ins->ops->parse &&
ops->locked.ins->ops->parse(ops->locked.ops) < 0)
goto out_free_ops;
return 0;
out_free_ops:
zfree(&ops->locked.ops);
return 0;
}
static int lock__scnprintf(struct ins *ins, char *bf, size_t size,
struct ins_operands *ops)
{
int printed;
if (ops->locked.ins == NULL)
return ins__raw_scnprintf(ins, bf, size, ops);
printed = scnprintf(bf, size, "%-6.6s ", ins->name);
return printed + ins__scnprintf(ops->locked.ins, bf + printed,
size - printed, ops->locked.ops);
}
static void lock__delete(struct ins_operands *ops)
{
struct ins *ins = ops->locked.ins;
if (ins && ins->ops->free)
ins->ops->free(ops->locked.ops);
else
ins__delete(ops->locked.ops);
zfree(&ops->locked.ops);
zfree(&ops->target.raw);
zfree(&ops->target.name);
}
static struct ins_ops lock_ops = {
.free = lock__delete,
.parse = lock__parse,
.scnprintf = lock__scnprintf,
};
static int mov__parse(struct ins_operands *ops)
{
char *s = strchr(ops->raw, ','), *target, *comment, prev;
if (s == NULL)
return -1;
*s = '\0';
ops->source.raw = strdup(ops->raw);
*s = ',';
if (ops->source.raw == NULL)
return -1;
target = ++s;
#ifdef __arm__
comment = strchr(s, ';');
#else
comment = strchr(s, '#');
#endif
if (comment != NULL)
s = comment - 1;
else
s = strchr(s, '\0') - 1;
while (s > target && isspace(s[0]))
--s;
s++;
prev = *s;
*s = '\0';
ops->target.raw = strdup(target);
*s = prev;
if (ops->target.raw == NULL)
goto out_free_source;
if (comment == NULL)
return 0;
while (comment[0] != '\0' && isspace(comment[0]))
++comment;
comment__symbol(ops->source.raw, comment, &ops->source.addr, &ops->source.name);
comment__symbol(ops->target.raw, comment, &ops->target.addr, &ops->target.name);
return 0;
out_free_source:
zfree(&ops->source.raw);
return -1;
}
static int mov__scnprintf(struct ins *ins, char *bf, size_t size,
struct ins_operands *ops)
{
return scnprintf(bf, size, "%-6.6s %s,%s", ins->name,
ops->source.name ?: ops->source.raw,
ops->target.name ?: ops->target.raw);
}
static struct ins_ops mov_ops = {
.parse = mov__parse,
.scnprintf = mov__scnprintf,
};
static int dec__parse(struct ins_operands *ops)
{
char *target, *comment, *s, prev;
target = s = ops->raw;
while (s[0] != '\0' && !isspace(s[0]))
++s;
prev = *s;
*s = '\0';
ops->target.raw = strdup(target);
*s = prev;
if (ops->target.raw == NULL)
return -1;
comment = strchr(s, '#');
if (comment == NULL)
return 0;
while (comment[0] != '\0' && isspace(comment[0]))
++comment;
comment__symbol(ops->target.raw, comment, &ops->target.addr, &ops->target.name);
return 0;
}
static int dec__scnprintf(struct ins *ins, char *bf, size_t size,
struct ins_operands *ops)
{
return scnprintf(bf, size, "%-6.6s %s", ins->name,
ops->target.name ?: ops->target.raw);
}
static struct ins_ops dec_ops = {
.parse = dec__parse,
.scnprintf = dec__scnprintf,
};
static int nop__scnprintf(struct ins *ins __maybe_unused, char *bf, size_t size,
struct ins_operands *ops __maybe_unused)
{
return scnprintf(bf, size, "%-6.6s", "nop");
}
static struct ins_ops nop_ops = {
.scnprintf = nop__scnprintf,
};
static struct ins_ops ret_ops = {
.scnprintf = ins__raw_scnprintf,
};
bool ins__is_ret(const struct ins *ins)
{
return ins->ops == &ret_ops;
}
static struct ins instructions[] = {
{ .name = "add", .ops = &mov_ops, },
{ .name = "addl", .ops = &mov_ops, },
{ .name = "addq", .ops = &mov_ops, },
{ .name = "addw", .ops = &mov_ops, },
{ .name = "and", .ops = &mov_ops, },
#ifdef __arm__
{ .name = "b", .ops = &jump_ops, }, // might also be a call
{ .name = "bcc", .ops = &jump_ops, },
{ .name = "bcs", .ops = &jump_ops, },
{ .name = "beq", .ops = &jump_ops, },
{ .name = "bge", .ops = &jump_ops, },
{ .name = "bgt", .ops = &jump_ops, },
{ .name = "bhi", .ops = &jump_ops, },
{ .name = "bl", .ops = &call_ops, },
{ .name = "bls", .ops = &jump_ops, },
{ .name = "blt", .ops = &jump_ops, },
{ .name = "blx", .ops = &call_ops, },
{ .name = "bne", .ops = &jump_ops, },
#endif
{ .name = "bts", .ops = &mov_ops, },
{ .name = "call", .ops = &call_ops, },
{ .name = "callq", .ops = &call_ops, },
{ .name = "cmp", .ops = &mov_ops, },
{ .name = "cmpb", .ops = &mov_ops, },
{ .name = "cmpl", .ops = &mov_ops, },
{ .name = "cmpq", .ops = &mov_ops, },
{ .name = "cmpw", .ops = &mov_ops, },
{ .name = "cmpxch", .ops = &mov_ops, },
{ .name = "dec", .ops = &dec_ops, },
{ .name = "decl", .ops = &dec_ops, },
{ .name = "imul", .ops = &mov_ops, },
{ .name = "inc", .ops = &dec_ops, },
{ .name = "incl", .ops = &dec_ops, },
{ .name = "ja", .ops = &jump_ops, },
{ .name = "jae", .ops = &jump_ops, },
{ .name = "jb", .ops = &jump_ops, },
{ .name = "jbe", .ops = &jump_ops, },
{ .name = "jc", .ops = &jump_ops, },
{ .name = "jcxz", .ops = &jump_ops, },
{ .name = "je", .ops = &jump_ops, },
{ .name = "jecxz", .ops = &jump_ops, },
{ .name = "jg", .ops = &jump_ops, },
{ .name = "jge", .ops = &jump_ops, },
{ .name = "jl", .ops = &jump_ops, },
{ .name = "jle", .ops = &jump_ops, },
{ .name = "jmp", .ops = &jump_ops, },
{ .name = "jmpq", .ops = &jump_ops, },
{ .name = "jna", .ops = &jump_ops, },
{ .name = "jnae", .ops = &jump_ops, },
{ .name = "jnb", .ops = &jump_ops, },
{ .name = "jnbe", .ops = &jump_ops, },
{ .name = "jnc", .ops = &jump_ops, },
{ .name = "jne", .ops = &jump_ops, },
{ .name = "jng", .ops = &jump_ops, },
{ .name = "jnge", .ops = &jump_ops, },
{ .name = "jnl", .ops = &jump_ops, },
{ .name = "jnle", .ops = &jump_ops, },
{ .name = "jno", .ops = &jump_ops, },
{ .name = "jnp", .ops = &jump_ops, },
{ .name = "jns", .ops = &jump_ops, },
{ .name = "jnz", .ops = &jump_ops, },
{ .name = "jo", .ops = &jump_ops, },
{ .name = "jp", .ops = &jump_ops, },
{ .name = "jpe", .ops = &jump_ops, },
{ .name = "jpo", .ops = &jump_ops, },
{ .name = "jrcxz", .ops = &jump_ops, },
{ .name = "js", .ops = &jump_ops, },
{ .name = "jz", .ops = &jump_ops, },
{ .name = "lea", .ops = &mov_ops, },
{ .name = "lock", .ops = &lock_ops, },
{ .name = "mov", .ops = &mov_ops, },
{ .name = "movb", .ops = &mov_ops, },
{ .name = "movdqa",.ops = &mov_ops, },
{ .name = "movl", .ops = &mov_ops, },
{ .name = "movq", .ops = &mov_ops, },
{ .name = "movslq", .ops = &mov_ops, },
{ .name = "movzbl", .ops = &mov_ops, },
{ .name = "movzwl", .ops = &mov_ops, },
{ .name = "nop", .ops = &nop_ops, },
{ .name = "nopl", .ops = &nop_ops, },
{ .name = "nopw", .ops = &nop_ops, },
{ .name = "or", .ops = &mov_ops, },
{ .name = "orl", .ops = &mov_ops, },
{ .name = "test", .ops = &mov_ops, },
{ .name = "testb", .ops = &mov_ops, },
{ .name = "testl", .ops = &mov_ops, },
{ .name = "xadd", .ops = &mov_ops, },
{ .name = "xbeginl", .ops = &jump_ops, },
{ .name = "xbeginq", .ops = &jump_ops, },
{ .name = "retq", .ops = &ret_ops, },
};
static int ins__key_cmp(const void *name, const void *insp)
{
const struct ins *ins = insp;
return strcmp(name, ins->name);
}
static int ins__cmp(const void *a, const void *b)
{
const struct ins *ia = a;
const struct ins *ib = b;
return strcmp(ia->name, ib->name);
}
static void ins__sort(void)
{
const int nmemb = ARRAY_SIZE(instructions);
qsort(instructions, nmemb, sizeof(struct ins), ins__cmp);
}
static struct ins *ins__find(const char *name)
{
const int nmemb = ARRAY_SIZE(instructions);
static bool sorted;
if (!sorted) {
ins__sort();
sorted = true;
}
return bsearch(name, instructions, nmemb, sizeof(struct ins), ins__key_cmp);
}
int symbol__annotate_init(struct map *map __maybe_unused, struct symbol *sym)
{
struct annotation *notes = symbol__annotation(sym);
pthread_mutex_init(¬es->lock, NULL);
return 0;
}
int symbol__alloc_hist(struct symbol *sym)
{
struct annotation *notes = symbol__annotation(sym);
const size_t size = symbol__size(sym);
size_t sizeof_sym_hist;
/* Check for overflow when calculating sizeof_sym_hist */
if (size > (SIZE_MAX - sizeof(struct sym_hist)) / sizeof(u64))
return -1;
sizeof_sym_hist = (sizeof(struct sym_hist) + size * sizeof(u64));
/* Check for overflow in zalloc argument */
if (sizeof_sym_hist > (SIZE_MAX - sizeof(*notes->src))
/ symbol_conf.nr_events)
return -1;
notes->src = zalloc(sizeof(*notes->src) + symbol_conf.nr_events * sizeof_sym_hist);
if (notes->src == NULL)
return -1;
notes->src->sizeof_sym_hist = sizeof_sym_hist;
notes->src->nr_histograms = symbol_conf.nr_events;
INIT_LIST_HEAD(¬es->src->source);
return 0;
}
/* The cycles histogram is lazily allocated. */
static int symbol__alloc_hist_cycles(struct symbol *sym)
{
struct annotation *notes = symbol__annotation(sym);
const size_t size = symbol__size(sym);
notes->src->cycles_hist = calloc(size, sizeof(struct cyc_hist));
if (notes->src->cycles_hist == NULL)
return -1;
return 0;
}
void symbol__annotate_zero_histograms(struct symbol *sym)
{
struct annotation *notes = symbol__annotation(sym);
pthread_mutex_lock(¬es->lock);
if (notes->src != NULL) {
memset(notes->src->histograms, 0,
notes->src->nr_histograms * notes->src->sizeof_sym_hist);
if (notes->src->cycles_hist)
memset(notes->src->cycles_hist, 0,
symbol__size(sym) * sizeof(struct cyc_hist));
}
pthread_mutex_unlock(¬es->lock);
}
static int __symbol__account_cycles(struct annotation *notes,
u64 start,
unsigned offset, unsigned cycles,
unsigned have_start)
{
struct cyc_hist *ch;
ch = notes->src->cycles_hist;
/*
* For now we can only account one basic block per
* final jump. But multiple could be overlapping.
* Always account the longest one. So when
* a shorter one has been already seen throw it away.
*
* We separately always account the full cycles.
*/
ch[offset].num_aggr++;
ch[offset].cycles_aggr += cycles;
if (!have_start && ch[offset].have_start)
return 0;
if (ch[offset].num) {
if (have_start && (!ch[offset].have_start ||
ch[offset].start > start)) {
ch[offset].have_start = 0;
ch[offset].cycles = 0;
ch[offset].num = 0;
if (ch[offset].reset < 0xffff)
ch[offset].reset++;
} else if (have_start &&
ch[offset].start < start)
return 0;
}
ch[offset].have_start = have_start;
ch[offset].start = start;
ch[offset].cycles += cycles;
ch[offset].num++;
return 0;
}
static int __symbol__inc_addr_samples(struct symbol *sym, struct map *map,
struct annotation *notes, int evidx, u64 addr)
{
unsigned offset;
struct sym_hist *h;
pr_debug3("%s: addr=%#" PRIx64 "\n", __func__, map->unmap_ip(map, addr));
if (addr < sym->start || addr >= sym->end) {
pr_debug("%s(%d): ERANGE! sym->name=%s, start=%#" PRIx64 ", addr=%#" PRIx64 ", end=%#" PRIx64 "\n",
__func__, __LINE__, sym->name, sym->start, addr, sym->end);
return -ERANGE;
}
offset = addr - sym->start;
h = annotation__histogram(notes, evidx);
h->sum++;
h->addr[offset]++;
pr_debug3("%#" PRIx64 " %s: period++ [addr: %#" PRIx64 ", %#" PRIx64
", evidx=%d] => %" PRIu64 "\n", sym->start, sym->name,
addr, addr - sym->start, evidx, h->addr[offset]);
return 0;
}
static struct annotation *symbol__get_annotation(struct symbol *sym, bool cycles)
{
struct annotation *notes = symbol__annotation(sym);
if (notes->src == NULL) {
if (symbol__alloc_hist(sym) < 0)
return NULL;
}
if (!notes->src->cycles_hist && cycles) {
if (symbol__alloc_hist_cycles(sym) < 0)
return NULL;
}
return notes;
}
static int symbol__inc_addr_samples(struct symbol *sym, struct map *map,
int evidx, u64 addr)
{
struct annotation *notes;
if (sym == NULL)
return 0;
notes = symbol__get_annotation(sym, false);
if (notes == NULL)
return -ENOMEM;
return __symbol__inc_addr_samples(sym, map, notes, evidx, addr);
}
static int symbol__account_cycles(u64 addr, u64 start,
struct symbol *sym, unsigned cycles)
{
struct annotation *notes;
unsigned offset;
if (sym == NULL)
return 0;
notes = symbol__get_annotation(sym, true);
if (notes == NULL)
return -ENOMEM;
if (addr < sym->start || addr >= sym->end)
return -ERANGE;
if (start) {
if (start < sym->start || start >= sym->end)
return -ERANGE;
if (start >= addr)
start = 0;
}
offset = addr - sym->start;
return __symbol__account_cycles(notes,
start ? start - sym->start : 0,
offset, cycles,
!!start);
}
int addr_map_symbol__account_cycles(struct addr_map_symbol *ams,
struct addr_map_symbol *start,
unsigned cycles)
{
u64 saddr = 0;
int err;
if (!cycles)
return 0;
/*
* Only set start when IPC can be computed. We can only
* compute it when the basic block is completely in a single
* function.
* Special case the case when the jump is elsewhere, but
* it starts on the function start.
*/
if (start &&
(start->sym == ams->sym ||
(ams->sym &&
start->addr == ams->sym->start + ams->map->start)))
saddr = start->al_addr;
if (saddr == 0)
pr_debug2("BB with bad start: addr %"PRIx64" start %"PRIx64" sym %"PRIx64" saddr %"PRIx64"\n",
ams->addr,
start ? start->addr : 0,
ams->sym ? ams->sym->start + ams->map->start : 0,
saddr);
err = symbol__account_cycles(ams->al_addr, saddr, ams->sym, cycles);
if (err)
pr_debug2("account_cycles failed %d\n", err);
return err;
}
int addr_map_symbol__inc_samples(struct addr_map_symbol *ams, int evidx)
{
return symbol__inc_addr_samples(ams->sym, ams->map, evidx, ams->al_addr);
}
int hist_entry__inc_addr_samples(struct hist_entry *he, int evidx, u64 ip)
{
return symbol__inc_addr_samples(he->ms.sym, he->ms.map, evidx, ip);
}
static void disasm_line__init_ins(struct disasm_line *dl)
{
dl->ins = ins__find(dl->name);
if (dl->ins == NULL)
return;
if (!dl->ins->ops)
return;
if (dl->ins->ops->parse && dl->ins->ops->parse(&dl->ops) < 0)
dl->ins = NULL;
}
static int disasm_line__parse(char *line, char **namep, char **rawp)
{
char *name = line, tmp;
while (isspace(name[0]))
++name;
if (name[0] == '\0')
return -1;
*rawp = name + 1;
while ((*rawp)[0] != '\0' && !isspace((*rawp)[0]))
++*rawp;
tmp = (*rawp)[0];
(*rawp)[0] = '\0';
*namep = strdup(name);
if (*namep == NULL)
goto out_free_name;
(*rawp)[0] = tmp;
if ((*rawp)[0] != '\0') {
(*rawp)++;
while (isspace((*rawp)[0]))
++(*rawp);
}
return 0;
out_free_name:
zfree(namep);
return -1;
}
static struct disasm_line *disasm_line__new(s64 offset, char *line,
size_t privsize, int line_nr)
{
struct disasm_line *dl = zalloc(sizeof(*dl) + privsize);
if (dl != NULL) {
dl->offset = offset;
dl->line = strdup(line);
dl->line_nr = line_nr;
if (dl->line == NULL)
goto out_delete;
if (offset != -1) {
if (disasm_line__parse(dl->line, &dl->name, &dl->ops.raw) < 0)
goto out_free_line;
disasm_line__init_ins(dl);
}
}
return dl;
out_free_line:
zfree(&dl->line);
out_delete:
free(dl);
return NULL;
}
void disasm_line__free(struct disasm_line *dl)
{
zfree(&dl->line);
zfree(&dl->name);
if (dl->ins && dl->ins->ops->free)
dl->ins->ops->free(&dl->ops);
else
ins__delete(&dl->ops);
free(dl);
}
int disasm_line__scnprintf(struct disasm_line *dl, char *bf, size_t size, bool raw)
{
if (raw || !dl->ins)
return scnprintf(bf, size, "%-6.6s %s", dl->name, dl->ops.raw);
return ins__scnprintf(dl->ins, bf, size, &dl->ops);
}
static void disasm__add(struct list_head *head, struct disasm_line *line)
{
list_add_tail(&line->node, head);
}
struct disasm_line *disasm__get_next_ip_line(struct list_head *head, struct disasm_line *pos)
{
list_for_each_entry_continue(pos, head, node)
if (pos->offset >= 0)
return pos;
return NULL;
}
double disasm__calc_percent(struct annotation *notes, int evidx, s64 offset,
s64 end, const char **path, u64 *nr_samples)
{
struct source_line *src_line = notes->src->lines;
double percent = 0.0;
*nr_samples = 0;
if (src_line) {
size_t sizeof_src_line = sizeof(*src_line) +
sizeof(src_line->samples) * (src_line->nr_pcnt - 1);
while (offset < end) {
src_line = (void *)notes->src->lines +
(sizeof_src_line * offset);
if (*path == NULL)
*path = src_line->path;
percent += src_line->samples[evidx].percent;
*nr_samples += src_line->samples[evidx].nr;
offset++;
}
} else {
struct sym_hist *h = annotation__histogram(notes, evidx);
unsigned int hits = 0;
while (offset < end)
hits += h->addr[offset++];
if (h->sum) {
*nr_samples = hits;
percent = 100.0 * hits / h->sum;
}
}
return percent;
}
static int disasm_line__print(struct disasm_line *dl, struct symbol *sym, u64 start,
struct perf_evsel *evsel, u64 len, int min_pcnt, int printed,
int max_lines, struct disasm_line *queue)
{
static const char *prev_line;
static const char *prev_color;
if (dl->offset != -1) {
const char *path = NULL;
u64 nr_samples;
double percent, max_percent = 0.0;
double *ppercents = &percent;
u64 *psamples = &nr_samples;
int i, nr_percent = 1;
const char *color;
struct annotation *notes = symbol__annotation(sym);
s64 offset = dl->offset;
const u64 addr = start + offset;
struct disasm_line *next;
next = disasm__get_next_ip_line(¬es->src->source, dl);
if (perf_evsel__is_group_event(evsel)) {
nr_percent = evsel->nr_members;
ppercents = calloc(nr_percent, sizeof(double));
psamples = calloc(nr_percent, sizeof(u64));
if (ppercents == NULL || psamples == NULL) {
return -1;
}
}
for (i = 0; i < nr_percent; i++) {
percent = disasm__calc_percent(notes,
notes->src->lines ? i : evsel->idx + i,
offset,
next ? next->offset : (s64) len,
&path, &nr_samples);
ppercents[i] = percent;
psamples[i] = nr_samples;
if (percent > max_percent)
max_percent = percent;
}
if (max_percent < min_pcnt)
return -1;
if (max_lines && printed >= max_lines)
return 1;
if (queue != NULL) {
list_for_each_entry_from(queue, ¬es->src->source, node) {
if (queue == dl)
break;
disasm_line__print(queue, sym, start, evsel, len,
0, 0, 1, NULL);
}
}
color = get_percent_color(max_percent);
/*
* Also color the filename and line if needed, with
* the same color than the percentage. Don't print it
* twice for close colored addr with the same filename:line
*/
if (path) {
if (!prev_line || strcmp(prev_line, path)
|| color != prev_color) {
color_fprintf(stdout, color, " %s", path);
prev_line = path;
prev_color = color;
}
}
for (i = 0; i < nr_percent; i++) {
percent = ppercents[i];
nr_samples = psamples[i];
color = get_percent_color(percent);
if (symbol_conf.show_total_period)
color_fprintf(stdout, color, " %7" PRIu64,
nr_samples);
else
color_fprintf(stdout, color, " %7.2f", percent);
}
printf(" : ");
color_fprintf(stdout, PERF_COLOR_MAGENTA, " %" PRIx64 ":", addr);
color_fprintf(stdout, PERF_COLOR_BLUE, "%s\n", dl->line);
if (ppercents != &percent)
free(ppercents);
if (psamples != &nr_samples)
free(psamples);
} else if (max_lines && printed >= max_lines)
return 1;
else {
int width = 8;
if (queue)
return -1;
if (perf_evsel__is_group_event(evsel))
width *= evsel->nr_members;
if (!*dl->line)
printf(" %*s:\n", width, " ");
else
printf(" %*s: %s\n", width, " ", dl->line);
}
return 0;
}
/*
* symbol__parse_objdump_line() parses objdump output (with -d --no-show-raw)
* which looks like following
*
* 0000000000415500 <_init>:
* 415500: sub $0x8,%rsp
* 415504: mov 0x2f5ad5(%rip),%rax # 70afe0 <_DYNAMIC+0x2f8>
* 41550b: test %rax,%rax
* 41550e: je 415515 <_init+0x15>
* 415510: callq 416e70 <__gmon_start__@plt>
* 415515: add $0x8,%rsp
* 415519: retq
*
* it will be parsed and saved into struct disasm_line as
* <offset> <name> <ops.raw>
*
* The offset will be a relative offset from the start of the symbol and -1
* means that it's not a disassembly line so should be treated differently.
* The ops.raw part will be parsed further according to type of the instruction.
*/
static int symbol__parse_objdump_line(struct symbol *sym, struct map *map,
FILE *file, size_t privsize,
int *line_nr)
{
struct annotation *notes = symbol__annotation(sym);
struct disasm_line *dl;
char *line = NULL, *parsed_line, *tmp, *tmp2, *c;
size_t line_len;
s64 line_ip, offset = -1;
regmatch_t match[2];
if (getline(&line, &line_len, file) < 0)
return -1;
if (!line)
return -1;
while (line_len != 0 && isspace(line[line_len - 1]))
line[--line_len] = '\0';
c = strchr(line, '\n');
if (c)
*c = 0;
line_ip = -1;
parsed_line = line;
/* /filename:linenr ? Save line number and ignore. */
if (regexec(&file_lineno, line, 2, match, 0) == 0) {
*line_nr = atoi(line + match[1].rm_so);
return 0;
}
/*
* Strip leading spaces:
*/
tmp = line;
while (*tmp) {
if (*tmp != ' ')
break;
tmp++;
}
if (*tmp) {
/*
* Parse hexa addresses followed by ':'
*/
line_ip = strtoull(tmp, &tmp2, 16);
if (*tmp2 != ':' || tmp == tmp2 || tmp2[1] == '\0')
line_ip = -1;
}
if (line_ip != -1) {
u64 start = map__rip_2objdump(map, sym->start),
end = map__rip_2objdump(map, sym->end);
offset = line_ip - start;
if ((u64)line_ip < start || (u64)line_ip >= end)
offset = -1;
else
parsed_line = tmp2 + 1;
}
dl = disasm_line__new(offset, parsed_line, privsize, *line_nr);
free(line);
(*line_nr)++;
if (dl == NULL)
return -1;
if (dl->ops.target.offset == UINT64_MAX)
dl->ops.target.offset = dl->ops.target.addr -
map__rip_2objdump(map, sym->start);
/* kcore has no symbols, so add the call target name */
if (dl->ins && ins__is_call(dl->ins) && !dl->ops.target.name) {
struct addr_map_symbol target = {
.map = map,
.addr = dl->ops.target.addr,
};
if (!map_groups__find_ams(&target, NULL) &&
target.sym->start == target.al_addr)
dl->ops.target.name = strdup(target.sym->name);
}
disasm__add(¬es->src->source, dl);
return 0;
}
static __attribute__((constructor)) void symbol__init_regexpr(void)
{
regcomp(&file_lineno, "^/[^:]+:([0-9]+)", REG_EXTENDED);
}
static void delete_last_nop(struct symbol *sym)
{
struct annotation *notes = symbol__annotation(sym);
struct list_head *list = ¬es->src->source;
struct disasm_line *dl;
while (!list_empty(list)) {
dl = list_entry(list->prev, struct disasm_line, node);
if (dl->ins && dl->ins->ops) {
if (dl->ins->ops != &nop_ops)
return;
} else {
if (!strstr(dl->line, " nop ") &&
!strstr(dl->line, " nopl ") &&
!strstr(dl->line, " nopw "))
return;
}
list_del(&dl->node);
disasm_line__free(dl);
}
}
int symbol__strerror_disassemble(struct symbol *sym __maybe_unused, struct map *map,
int errnum, char *buf, size_t buflen)
{
struct dso *dso = map->dso;
BUG_ON(buflen == 0);
if (errnum >= 0) {
str_error_r(errnum, buf, buflen);
return 0;
}
switch (errnum) {
case SYMBOL_ANNOTATE_ERRNO__NO_VMLINUX: {
char bf[SBUILD_ID_SIZE + 15] = " with build id ";
char *build_id_msg = NULL;
if (dso->has_build_id) {
build_id__sprintf(dso->build_id,
sizeof(dso->build_id), bf + 15);
build_id_msg = bf;
}
scnprintf(buf, buflen,
"No vmlinux file%s\nwas found in the path.\n\n"
"Note that annotation using /proc/kcore requires CAP_SYS_RAWIO capability.\n\n"
"Please use:\n\n"
" perf buildid-cache -vu vmlinux\n\n"
"or:\n\n"
" --vmlinux vmlinux\n", build_id_msg ?: "");
}
break;
default:
scnprintf(buf, buflen, "Internal error: Invalid %d error code\n", errnum);
break;
}
return 0;
}
int symbol__disassemble(struct symbol *sym, struct map *map, size_t privsize)
{
struct dso *dso = map->dso;
char *filename = dso__build_id_filename(dso, NULL, 0);
bool free_filename = true;
char command[PATH_MAX * 2];
FILE *file;
int err = 0;
char symfs_filename[PATH_MAX];
struct kcore_extract kce;
bool delete_extract = false;
int stdout_fd[2];
int lineno = 0;
int nline;
pid_t pid;
if (filename)
symbol__join_symfs(symfs_filename, filename);
if (filename == NULL) {
if (dso->has_build_id)
return ENOMEM;
goto fallback;
} else if (dso__is_kcore(dso) ||
readlink(symfs_filename, command, sizeof(command)) < 0 ||
strstr(command, DSO__NAME_KALLSYMS) ||
access(symfs_filename, R_OK)) {
free(filename);
fallback:
/*
* If we don't have build-ids or the build-id file isn't in the
* cache, or is just a kallsyms file, well, lets hope that this
* DSO is the same as when 'perf record' ran.
*/
filename = (char *)dso->long_name;
symbol__join_symfs(symfs_filename, filename);
free_filename = false;
}
if (dso->symtab_type == DSO_BINARY_TYPE__KALLSYMS &&
!dso__is_kcore(dso)) {
err = SYMBOL_ANNOTATE_ERRNO__NO_VMLINUX;
goto out_free_filename;
}
pr_debug("%s: filename=%s, sym=%s, start=%#" PRIx64 ", end=%#" PRIx64 "\n", __func__,
filename, sym->name, map->unmap_ip(map, sym->start),
map->unmap_ip(map, sym->end));
pr_debug("annotating [%p] %30s : [%p] %30s\n",
dso, dso->long_name, sym, sym->name);
if (dso__is_kcore(dso)) {
kce.kcore_filename = symfs_filename;
kce.addr = map__rip_2objdump(map, sym->start);
kce.offs = sym->start;
kce.len = sym->end - sym->start;
if (!kcore_extract__create(&kce)) {
delete_extract = true;
strlcpy(symfs_filename, kce.extract_filename,
sizeof(symfs_filename));
if (free_filename) {
free(filename);
free_filename = false;
}
filename = symfs_filename;
}
} else if (dso__needs_decompress(dso)) {
char tmp[PATH_MAX];
struct kmod_path m;
int fd;
bool ret;
if (kmod_path__parse_ext(&m, symfs_filename))
goto out_free_filename;
snprintf(tmp, PATH_MAX, "/tmp/perf-kmod-XXXXXX");
fd = mkstemp(tmp);
if (fd < 0) {
free(m.ext);
goto out_free_filename;
}
ret = decompress_to_file(m.ext, symfs_filename, fd);
if (ret)
pr_err("Cannot decompress %s %s\n", m.ext, symfs_filename);
free(m.ext);
close(fd);
if (!ret)
goto out_free_filename;
strcpy(symfs_filename, tmp);
}
snprintf(command, sizeof(command),
"%s %s%s --start-address=0x%016" PRIx64
" --stop-address=0x%016" PRIx64
" -l -d %s %s -C %s 2>/dev/null|grep -v %s|expand",
objdump_path ? objdump_path : "objdump",
disassembler_style ? "-M " : "",
disassembler_style ? disassembler_style : "",
map__rip_2objdump(map, sym->start),
map__rip_2objdump(map, sym->end),
symbol_conf.annotate_asm_raw ? "" : "--no-show-raw",
symbol_conf.annotate_src ? "-S" : "",
symfs_filename, filename);
pr_debug("Executing: %s\n", command);
err = -1;
if (pipe(stdout_fd) < 0) {
pr_err("Failure creating the pipe to run %s\n", command);
goto out_remove_tmp;
}
pid = fork();
if (pid < 0) {
pr_err("Failure forking to run %s\n", command);
goto out_close_stdout;
}
if (pid == 0) {
close(stdout_fd[0]);
dup2(stdout_fd[1], 1);
close(stdout_fd[1]);
execl("/bin/sh", "sh", "-c", command, NULL);
perror(command);
exit(-1);
}
close(stdout_fd[1]);
file = fdopen(stdout_fd[0], "r");
if (!file) {
pr_err("Failure creating FILE stream for %s\n", command);
/*
* If we were using debug info should retry with
* original binary.
*/
goto out_remove_tmp;
}
nline = 0;
while (!feof(file)) {
if (symbol__parse_objdump_line(sym, map, file, privsize,
&lineno) < 0)
break;
nline++;
}
if (nline == 0)
pr_err("No output from %s\n", command);
/*
* kallsyms does not have symbol sizes so there may a nop at the end.
* Remove it.
*/
if (dso__is_kcore(dso))
delete_last_nop(sym);
fclose(file);
err = 0;
out_remove_tmp:
close(stdout_fd[0]);
if (dso__needs_decompress(dso))
unlink(symfs_filename);
out_free_filename:
if (delete_extract)
kcore_extract__delete(&kce);
if (free_filename)
free(filename);
return err;
out_close_stdout:
close(stdout_fd[1]);
goto out_remove_tmp;
}
static void insert_source_line(struct rb_root *root, struct source_line *src_line)
{
struct source_line *iter;
struct rb_node **p = &root->rb_node;
struct rb_node *parent = NULL;
int i, ret;
while (*p != NULL) {
parent = *p;
iter = rb_entry(parent, struct source_line, node);
ret = strcmp(iter->path, src_line->path);
if (ret == 0) {
for (i = 0; i < src_line->nr_pcnt; i++)
iter->samples[i].percent_sum += src_line->samples[i].percent;
return;
}
if (ret < 0)
p = &(*p)->rb_left;
else
p = &(*p)->rb_right;
}
for (i = 0; i < src_line->nr_pcnt; i++)
src_line->samples[i].percent_sum = src_line->samples[i].percent;
rb_link_node(&src_line->node, parent, p);
rb_insert_color(&src_line->node, root);
}
static int cmp_source_line(struct source_line *a, struct source_line *b)
{
int i;
for (i = 0; i < a->nr_pcnt; i++) {
if (a->samples[i].percent_sum == b->samples[i].percent_sum)
continue;
return a->samples[i].percent_sum > b->samples[i].percent_sum;
}
return 0;
}
static void __resort_source_line(struct rb_root *root, struct source_line *src_line)
{
struct source_line *iter;
struct rb_node **p = &root->rb_node;
struct rb_node *parent = NULL;
while (*p != NULL) {
parent = *p;
iter = rb_entry(parent, struct source_line, node);
if (cmp_source_line(src_line, iter))
p = &(*p)->rb_left;
else
p = &(*p)->rb_right;
}
rb_link_node(&src_line->node, parent, p);
rb_insert_color(&src_line->node, root);
}
static void resort_source_line(struct rb_root *dest_root, struct rb_root *src_root)
{
struct source_line *src_line;
struct rb_node *node;
node = rb_first(src_root);
while (node) {
struct rb_node *next;
src_line = rb_entry(node, struct source_line, node);
next = rb_next(node);
rb_erase(node, src_root);
__resort_source_line(dest_root, src_line);
node = next;
}
}
static void symbol__free_source_line(struct symbol *sym, int len)
{
struct annotation *notes = symbol__annotation(sym);
struct source_line *src_line = notes->src->lines;
size_t sizeof_src_line;
int i;
sizeof_src_line = sizeof(*src_line) +
(sizeof(src_line->samples) * (src_line->nr_pcnt - 1));
for (i = 0; i < len; i++) {
free_srcline(src_line->path);
src_line = (void *)src_line + sizeof_src_line;
}
zfree(¬es->src->lines);
}
/* Get the filename:line for the colored entries */
static int symbol__get_source_line(struct symbol *sym, struct map *map,
struct perf_evsel *evsel,
struct rb_root *root, int len)
{
u64 start;
int i, k;
int evidx = evsel->idx;
struct source_line *src_line;
struct annotation *notes = symbol__annotation(sym);
struct sym_hist *h = annotation__histogram(notes, evidx);
struct rb_root tmp_root = RB_ROOT;
int nr_pcnt = 1;
u64 h_sum = h->sum;
size_t sizeof_src_line = sizeof(struct source_line);
if (perf_evsel__is_group_event(evsel)) {
for (i = 1; i < evsel->nr_members; i++) {
h = annotation__histogram(notes, evidx + i);
h_sum += h->sum;
}
nr_pcnt = evsel->nr_members;
sizeof_src_line += (nr_pcnt - 1) * sizeof(src_line->samples);
}
if (!h_sum)
return 0;
src_line = notes->src->lines = calloc(len, sizeof_src_line);
if (!notes->src->lines)
return -1;
start = map__rip_2objdump(map, sym->start);
for (i = 0; i < len; i++) {
u64 offset;
double percent_max = 0.0;
src_line->nr_pcnt = nr_pcnt;
for (k = 0; k < nr_pcnt; k++) {
h = annotation__histogram(notes, evidx + k);
src_line->samples[k].percent = 100.0 * h->addr[i] / h->sum;
if (src_line->samples[k].percent > percent_max)
percent_max = src_line->samples[k].percent;
}
if (percent_max <= 0.5)
goto next;
offset = start + i;
src_line->path = get_srcline(map->dso, offset, NULL, false);
insert_source_line(&tmp_root, src_line);
next:
src_line = (void *)src_line + sizeof_src_line;
}
resort_source_line(root, &tmp_root);
return 0;
}
static void print_summary(struct rb_root *root, const char *filename)
{
struct source_line *src_line;
struct rb_node *node;
printf("\nSorted summary for file %s\n", filename);
printf("----------------------------------------------\n\n");
if (RB_EMPTY_ROOT(root)) {
printf(" Nothing higher than %1.1f%%\n", MIN_GREEN);
return;
}
node = rb_first(root);
while (node) {
double percent, percent_max = 0.0;
const char *color;
char *path;
int i;
src_line = rb_entry(node, struct source_line, node);
for (i = 0; i < src_line->nr_pcnt; i++) {
percent = src_line->samples[i].percent_sum;
color = get_percent_color(percent);
color_fprintf(stdout, color, " %7.2f", percent);
if (percent > percent_max)
percent_max = percent;
}
path = src_line->path;
color = get_percent_color(percent_max);
color_fprintf(stdout, color, " %s\n", path);
node = rb_next(node);
}
}
static void symbol__annotate_hits(struct symbol *sym, struct perf_evsel *evsel)
{
struct annotation *notes = symbol__annotation(sym);
struct sym_hist *h = annotation__histogram(notes, evsel->idx);
u64 len = symbol__size(sym), offset;
for (offset = 0; offset < len; ++offset)
if (h->addr[offset] != 0)
printf("%*" PRIx64 ": %" PRIu64 "\n", BITS_PER_LONG / 2,
sym->start + offset, h->addr[offset]);
printf("%*s: %" PRIu64 "\n", BITS_PER_LONG / 2, "h->sum", h->sum);
}
int symbol__annotate_printf(struct symbol *sym, struct map *map,
struct perf_evsel *evsel, bool full_paths,
int min_pcnt, int max_lines, int context)
{
struct dso *dso = map->dso;
char *filename;
const char *d_filename;
const char *evsel_name = perf_evsel__name(evsel);
struct annotation *notes = symbol__annotation(sym);
struct sym_hist *h = annotation__histogram(notes, evsel->idx);
struct disasm_line *pos, *queue = NULL;
u64 start = map__rip_2objdump(map, sym->start);
int printed = 2, queue_len = 0;
int more = 0;
u64 len;
int width = 8;
int graph_dotted_len;
filename = strdup(dso->long_name);
if (!filename)
return -ENOMEM;
if (full_paths)
d_filename = filename;
else
d_filename = basename(filename);
len = symbol__size(sym);
if (perf_evsel__is_group_event(evsel))
width *= evsel->nr_members;
graph_dotted_len = printf(" %-*.*s| Source code & Disassembly of %s for %s (%" PRIu64 " samples)\n",
width, width, "Percent", d_filename, evsel_name, h->sum);
printf("%-*.*s----\n",
graph_dotted_len, graph_dotted_len, graph_dotted_line);
if (verbose)
symbol__annotate_hits(sym, evsel);
list_for_each_entry(pos, ¬es->src->source, node) {
if (context && queue == NULL) {
queue = pos;
queue_len = 0;
}
switch (disasm_line__print(pos, sym, start, evsel, len,
min_pcnt, printed, max_lines,
queue)) {
case 0:
++printed;
if (context) {
printed += queue_len;
queue = NULL;
queue_len = 0;
}
break;
case 1:
/* filtered by max_lines */
++more;
break;
case -1:
default:
/*
* Filtered by min_pcnt or non IP lines when
* context != 0
*/
if (!context)
break;
if (queue_len == context)
queue = list_entry(queue->node.next, typeof(*queue), node);
else
++queue_len;
break;
}
}
free(filename);
return more;
}
void symbol__annotate_zero_histogram(struct symbol *sym, int evidx)
{
struct annotation *notes = symbol__annotation(sym);
struct sym_hist *h = annotation__histogram(notes, evidx);
memset(h, 0, notes->src->sizeof_sym_hist);
}
void symbol__annotate_decay_histogram(struct symbol *sym, int evidx)
{
struct annotation *notes = symbol__annotation(sym);
struct sym_hist *h = annotation__histogram(notes, evidx);
int len = symbol__size(sym), offset;
h->sum = 0;
for (offset = 0; offset < len; ++offset) {
h->addr[offset] = h->addr[offset] * 7 / 8;
h->sum += h->addr[offset];
}
}
void disasm__purge(struct list_head *head)
{
struct disasm_line *pos, *n;
list_for_each_entry_safe(pos, n, head, node) {
list_del(&pos->node);
disasm_line__free(pos);
}
}
static size_t disasm_line__fprintf(struct disasm_line *dl, FILE *fp)
{
size_t printed;
if (dl->offset == -1)
return fprintf(fp, "%s\n", dl->line);
printed = fprintf(fp, "%#" PRIx64 " %s", dl->offset, dl->name);
if (dl->ops.raw[0] != '\0') {
printed += fprintf(fp, "%.*s %s\n", 6 - (int)printed, " ",
dl->ops.raw);
}
return printed + fprintf(fp, "\n");
}
size_t disasm__fprintf(struct list_head *head, FILE *fp)
{
struct disasm_line *pos;
size_t printed = 0;
list_for_each_entry(pos, head, node)
printed += disasm_line__fprintf(pos, fp);
return printed;
}
int symbol__tty_annotate(struct symbol *sym, struct map *map,
struct perf_evsel *evsel, bool print_lines,
bool full_paths, int min_pcnt, int max_lines)
{
struct dso *dso = map->dso;
struct rb_root source_line = RB_ROOT;
u64 len;
if (symbol__disassemble(sym, map, 0) < 0)
return -1;
len = symbol__size(sym);
if (print_lines) {
srcline_full_filename = full_paths;
symbol__get_source_line(sym, map, evsel, &source_line, len);
print_summary(&source_line, dso->long_name);
}
symbol__annotate_printf(sym, map, evsel, full_paths,
min_pcnt, max_lines, 0);
if (print_lines)
symbol__free_source_line(sym, len);
disasm__purge(&symbol__annotation(sym)->src->source);
return 0;
}
bool ui__has_annotation(void)
{
return use_browser == 1 && perf_hpp_list.sym;
}
| gpl-2.0 |
Pivosgroup/buildroot-linux-kernel | drivers/usb/host/fhci-q.c | 42 | 7105 | /*
* Freescale QUICC Engine USB Host Controller Driver
*
* Copyright (c) Freescale Semicondutor, Inc. 2006.
* Shlomi Gridish <gridish@freescale.com>
* Jerry Huang <Chang-Ming.Huang@freescale.com>
* Copyright (c) Logic Product Development, Inc. 2007
* Peter Barada <peterb@logicpd.com>
* Copyright (c) MontaVista Software, Inc. 2008.
* Anton Vorontsov <avorontsov@ru.mvista.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/spinlock.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/usb.h>
#include "../core/hcd.h"
#include "fhci.h"
/* maps the hardware error code to the USB error code */
static int status_to_error(u32 status)
{
if (status == USB_TD_OK)
return 0;
else if (status & USB_TD_RX_ER_CRC)
return -EILSEQ;
else if (status & USB_TD_RX_ER_NONOCT)
return -EPROTO;
else if (status & USB_TD_RX_ER_OVERUN)
return -ECOMM;
else if (status & USB_TD_RX_ER_BITSTUFF)
return -EPROTO;
else if (status & USB_TD_RX_ER_PID)
return -EILSEQ;
else if (status & (USB_TD_TX_ER_NAK | USB_TD_TX_ER_TIMEOUT))
return -ETIMEDOUT;
else if (status & USB_TD_TX_ER_STALL)
return -EPIPE;
else if (status & USB_TD_TX_ER_UNDERUN)
return -ENOSR;
else if (status & USB_TD_RX_DATA_UNDERUN)
return -EREMOTEIO;
else if (status & USB_TD_RX_DATA_OVERUN)
return -EOVERFLOW;
else
return -EINVAL;
}
void fhci_add_td_to_frame(struct fhci_time_frame *frame, struct td *td)
{
list_add_tail(&td->frame_lh, &frame->tds_list);
}
void fhci_add_tds_to_ed(struct ed *ed, struct td **td_list, int number)
{
int i;
for (i = 0; i < number; i++) {
struct td *td = td_list[i];
list_add_tail(&td->node, &ed->td_list);
}
if (ed->td_head == NULL)
ed->td_head = td_list[0];
}
static struct td *peek_td_from_ed(struct ed *ed)
{
struct td *td;
if (!list_empty(&ed->td_list))
td = list_entry(ed->td_list.next, struct td, node);
else
td = NULL;
return td;
}
struct td *fhci_remove_td_from_frame(struct fhci_time_frame *frame)
{
struct td *td;
if (!list_empty(&frame->tds_list)) {
td = list_entry(frame->tds_list.next, struct td, frame_lh);
list_del_init(frame->tds_list.next);
} else
td = NULL;
return td;
}
struct td *fhci_peek_td_from_frame(struct fhci_time_frame *frame)
{
struct td *td;
if (!list_empty(&frame->tds_list))
td = list_entry(frame->tds_list.next, struct td, frame_lh);
else
td = NULL;
return td;
}
struct td *fhci_remove_td_from_ed(struct ed *ed)
{
struct td *td;
if (!list_empty(&ed->td_list)) {
td = list_entry(ed->td_list.next, struct td, node);
list_del_init(ed->td_list.next);
/* if this TD was the ED's head, find next TD */
if (!list_empty(&ed->td_list))
ed->td_head = list_entry(ed->td_list.next, struct td,
node);
else
ed->td_head = NULL;
} else
td = NULL;
return td;
}
struct td *fhci_remove_td_from_done_list(struct fhci_controller_list *p_list)
{
struct td *td;
if (!list_empty(&p_list->done_list)) {
td = list_entry(p_list->done_list.next, struct td, node);
list_del_init(p_list->done_list.next);
} else
td = NULL;
return td;
}
void fhci_move_td_from_ed_to_done_list(struct fhci_usb *usb, struct ed *ed)
{
struct td *td;
td = ed->td_head;
list_del_init(&td->node);
/* If this TD was the ED's head,find next TD */
if (!list_empty(&ed->td_list))
ed->td_head = list_entry(ed->td_list.next, struct td, node);
else {
ed->td_head = NULL;
ed->state = FHCI_ED_SKIP;
}
ed->toggle_carry = td->toggle;
list_add_tail(&td->node, &usb->hc_list->done_list);
if (td->ioc)
usb->transfer_confirm(usb->fhci);
}
/* free done FHCI URB resource such as ED and TD */
static void free_urb_priv(struct fhci_hcd *fhci, struct urb *urb)
{
int i;
struct urb_priv *urb_priv = urb->hcpriv;
struct ed *ed = urb_priv->ed;
for (i = 0; i < urb_priv->num_of_tds; i++) {
list_del_init(&urb_priv->tds[i]->node);
fhci_recycle_empty_td(fhci, urb_priv->tds[i]);
}
/* if this TD was the ED's head,find the next TD */
if (!list_empty(&ed->td_list))
ed->td_head = list_entry(ed->td_list.next, struct td, node);
else
ed->td_head = NULL;
kfree(urb_priv->tds);
kfree(urb_priv);
urb->hcpriv = NULL;
/* if this TD was the ED's head,find next TD */
if (ed->td_head == NULL)
list_del_init(&ed->node);
fhci->active_urbs--;
}
/* this routine called to complete and free done URB */
void fhci_urb_complete_free(struct fhci_hcd *fhci, struct urb *urb)
{
free_urb_priv(fhci, urb);
if (urb->status == -EINPROGRESS) {
if (urb->actual_length != urb->transfer_buffer_length &&
urb->transfer_flags & URB_SHORT_NOT_OK)
urb->status = -EREMOTEIO;
else
urb->status = 0;
}
usb_hcd_unlink_urb_from_ep(fhci_to_hcd(fhci), urb);
spin_unlock(&fhci->lock);
usb_hcd_giveback_urb(fhci_to_hcd(fhci), urb, urb->status);
spin_lock(&fhci->lock);
}
/*
* caculate transfer length/stats and update the urb
* Precondition: irqsafe(only for urb-?status locking)
*/
void fhci_done_td(struct urb *urb, struct td *td)
{
struct ed *ed = td->ed;
u32 cc = td->status;
/* ISO...drivers see per-TD length/status */
if (ed->mode == FHCI_TF_ISO) {
u32 len;
if (!(urb->transfer_flags & URB_SHORT_NOT_OK &&
cc == USB_TD_RX_DATA_UNDERUN))
cc = USB_TD_OK;
if (usb_pipeout(urb->pipe))
len = urb->iso_frame_desc[td->iso_index].length;
else
len = td->actual_len;
urb->actual_length += len;
urb->iso_frame_desc[td->iso_index].actual_length = len;
urb->iso_frame_desc[td->iso_index].status =
status_to_error(cc);
}
/* BULK,INT,CONTROL... drivers see aggregate length/status,
* except that "setup" bytes aren't counted and "short" transfers
* might not be reported as errors.
*/
else {
if (td->error_cnt >= 3)
urb->error_count = 3;
/* control endpoint only have soft stalls */
/* update packet status if needed(short may be ok) */
if (!(urb->transfer_flags & URB_SHORT_NOT_OK) &&
cc == USB_TD_RX_DATA_UNDERUN) {
ed->state = FHCI_ED_OPER;
cc = USB_TD_OK;
}
if (cc != USB_TD_OK) {
if (urb->status == -EINPROGRESS)
urb->status = status_to_error(cc);
}
/* count all non-empty packets except control SETUP packet */
if (td->type != FHCI_TA_SETUP || td->iso_index != 0)
urb->actual_length += td->actual_len;
}
}
/* there are some pedning request to unlink */
void fhci_del_ed_list(struct fhci_hcd *fhci, struct ed *ed)
{
struct td *td = peek_td_from_ed(ed);
struct urb *urb = td->urb;
struct urb_priv *urb_priv = urb->hcpriv;
if (urb_priv->state == URB_DEL) {
td = fhci_remove_td_from_ed(ed);
/* HC may have partly processed this TD */
if (td->status != USB_TD_INPROGRESS)
fhci_done_td(urb, td);
/* URB is done;clean up */
if (++(urb_priv->tds_cnt) == urb_priv->num_of_tds)
fhci_urb_complete_free(fhci, urb);
}
}
| gpl-2.0 |
abiyug/r-source | src/extra/intl/localename.c | 42 | 43417 | /* Determine name of the currently selected locale.
Copyright (C) 1995-1999, 2000-2007 Free Software Foundation, Inc.
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU Library General Public License as published
by the Free Software Foundation; either version 2, or (at your option)
any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Library General Public License for more details.
You should have received a copy of the GNU Library General Public
License along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
USA. */
/* Written by Ulrich Drepper <drepper@gnu.org>, 1995. */
/* Win32 code written by Tor Lillqvist <tml@iki.fi>. */
/* MacOS X code written by Bruno Haible <bruno@clisp.org>. */
#ifdef HAVE_CONFIG_H
# include <config.h>
#endif
/* Specification. */
#ifdef IN_LIBINTL
# include "gettextP.h"
#else
# include "localename.h"
#endif
#include <stdlib.h>
#include <locale.h>
#if HAVE_CFLOCALECOPYCURRENT || HAVE_CFPREFERENCESCOPYAPPVALUE
# include <string.h>
# include <CoreFoundation/CFString.h>
# if HAVE_CFLOCALECOPYCURRENT
# include <CoreFoundation/CFLocale.h>
# elif HAVE_CFPREFERENCESCOPYAPPVALUE
# include <CoreFoundation/CFPreferences.h>
# endif
#endif
#if defined _WIN32 || defined __WIN32__
# define WIN32_NATIVE
#endif
#ifdef WIN32_NATIVE
# define WIN32_LEAN_AND_MEAN
# include <windows.h>
/* List of language codes, sorted by value:
0x01 LANG_ARABIC
0x02 LANG_BULGARIAN
0x03 LANG_CATALAN
0x04 LANG_CHINESE
0x05 LANG_CZECH
0x06 LANG_DANISH
0x07 LANG_GERMAN
0x08 LANG_GREEK
0x09 LANG_ENGLISH
0x0a LANG_SPANISH
0x0b LANG_FINNISH
0x0c LANG_FRENCH
0x0d LANG_HEBREW
0x0e LANG_HUNGARIAN
0x0f LANG_ICELANDIC
0x10 LANG_ITALIAN
0x11 LANG_JAPANESE
0x12 LANG_KOREAN
0x13 LANG_DUTCH
0x14 LANG_NORWEGIAN
0x15 LANG_POLISH
0x16 LANG_PORTUGUESE
0x17 LANG_RHAETO_ROMANCE
0x18 LANG_ROMANIAN
0x19 LANG_RUSSIAN
0x1a LANG_CROATIAN == LANG_SERBIAN
0x1b LANG_SLOVAK
0x1c LANG_ALBANIAN
0x1d LANG_SWEDISH
0x1e LANG_THAI
0x1f LANG_TURKISH
0x20 LANG_URDU
0x21 LANG_INDONESIAN
0x22 LANG_UKRAINIAN
0x23 LANG_BELARUSIAN
0x24 LANG_SLOVENIAN
0x25 LANG_ESTONIAN
0x26 LANG_LATVIAN
0x27 LANG_LITHUANIAN
0x28 LANG_TAJIK
0x29 LANG_FARSI
0x2a LANG_VIETNAMESE
0x2b LANG_ARMENIAN
0x2c LANG_AZERI
0x2d LANG_BASQUE
0x2e LANG_SORBIAN
0x2f LANG_MACEDONIAN
0x30 LANG_SUTU
0x31 LANG_TSONGA
0x32 LANG_TSWANA
0x33 LANG_VENDA
0x34 LANG_XHOSA
0x35 LANG_ZULU
0x36 LANG_AFRIKAANS
0x37 LANG_GEORGIAN
0x38 LANG_FAEROESE
0x39 LANG_HINDI
0x3a LANG_MALTESE
0x3b LANG_SAAMI
0x3c LANG_GAELIC
0x3d LANG_YIDDISH
0x3e LANG_MALAY
0x3f LANG_KAZAK
0x40 LANG_KYRGYZ
0x41 LANG_SWAHILI
0x42 LANG_TURKMEN
0x43 LANG_UZBEK
0x44 LANG_TATAR
0x45 LANG_BENGALI
0x46 LANG_PUNJABI
0x47 LANG_GUJARATI
0x48 LANG_ORIYA
0x49 LANG_TAMIL
0x4a LANG_TELUGU
0x4b LANG_KANNADA
0x4c LANG_MALAYALAM
0x4d LANG_ASSAMESE
0x4e LANG_MARATHI
0x4f LANG_SANSKRIT
0x50 LANG_MONGOLIAN
0x51 LANG_TIBETAN
0x52 LANG_WELSH
0x53 LANG_CAMBODIAN
0x54 LANG_LAO
0x55 LANG_BURMESE
0x56 LANG_GALICIAN
0x57 LANG_KONKANI
0x58 LANG_MANIPURI
0x59 LANG_SINDHI
0x5a LANG_SYRIAC
0x5b LANG_SINHALESE
0x5c LANG_CHEROKEE
0x5d LANG_INUKTITUT
0x5e LANG_AMHARIC
0x5f LANG_TAMAZIGHT
0x60 LANG_KASHMIRI
0x61 LANG_NEPALI
0x62 LANG_FRISIAN
0x63 LANG_PASHTO
0x64 LANG_TAGALOG
0x65 LANG_DIVEHI
0x66 LANG_EDO
0x67 LANG_FULFULDE
0x68 LANG_HAUSA
0x69 LANG_IBIBIO
0x6a LANG_YORUBA
0x70 LANG_IGBO
0x71 LANG_KANURI
0x72 LANG_OROMO
0x73 LANG_TIGRINYA
0x74 LANG_GUARANI
0x75 LANG_HAWAIIAN
0x76 LANG_LATIN
0x77 LANG_SOMALI
0x78 LANG_YI
0x79 LANG_PAPIAMENTU
*/
/* Mingw headers don't have latest language and sublanguage codes. */
# ifndef LANG_AFRIKAANS
# define LANG_AFRIKAANS 0x36
# endif
# ifndef LANG_ALBANIAN
# define LANG_ALBANIAN 0x1c
# endif
# ifndef LANG_AMHARIC
# define LANG_AMHARIC 0x5e
# endif
# ifndef LANG_ARABIC
# define LANG_ARABIC 0x01
# endif
# ifndef LANG_ARMENIAN
# define LANG_ARMENIAN 0x2b
# endif
# ifndef LANG_ASSAMESE
# define LANG_ASSAMESE 0x4d
# endif
# ifndef LANG_AZERI
# define LANG_AZERI 0x2c
# endif
# ifndef LANG_BASQUE
# define LANG_BASQUE 0x2d
# endif
# ifndef LANG_BELARUSIAN
# define LANG_BELARUSIAN 0x23
# endif
# ifndef LANG_BENGALI
# define LANG_BENGALI 0x45
# endif
# ifndef LANG_BURMESE
# define LANG_BURMESE 0x55
# endif
# ifndef LANG_CAMBODIAN
# define LANG_CAMBODIAN 0x53
# endif
# ifndef LANG_CATALAN
# define LANG_CATALAN 0x03
# endif
# ifndef LANG_CHEROKEE
# define LANG_CHEROKEE 0x5c
# endif
# ifndef LANG_DIVEHI
# define LANG_DIVEHI 0x65
# endif
# ifndef LANG_EDO
# define LANG_EDO 0x66
# endif
# ifndef LANG_ESTONIAN
# define LANG_ESTONIAN 0x25
# endif
# ifndef LANG_FAEROESE
# define LANG_FAEROESE 0x38
# endif
# ifndef LANG_FARSI
# define LANG_FARSI 0x29
# endif
# ifndef LANG_FRISIAN
# define LANG_FRISIAN 0x62
# endif
# ifndef LANG_FULFULDE
# define LANG_FULFULDE 0x67
# endif
# ifndef LANG_GAELIC
# define LANG_GAELIC 0x3c
# endif
# ifndef LANG_GALICIAN
# define LANG_GALICIAN 0x56
# endif
# ifndef LANG_GEORGIAN
# define LANG_GEORGIAN 0x37
# endif
# ifndef LANG_GUARANI
# define LANG_GUARANI 0x74
# endif
# ifndef LANG_GUJARATI
# define LANG_GUJARATI 0x47
# endif
# ifndef LANG_HAUSA
# define LANG_HAUSA 0x68
# endif
# ifndef LANG_HAWAIIAN
# define LANG_HAWAIIAN 0x75
# endif
# ifndef LANG_HEBREW
# define LANG_HEBREW 0x0d
# endif
# ifndef LANG_HINDI
# define LANG_HINDI 0x39
# endif
# ifndef LANG_IBIBIO
# define LANG_IBIBIO 0x69
# endif
# ifndef LANG_IGBO
# define LANG_IGBO 0x70
# endif
# ifndef LANG_INDONESIAN
# define LANG_INDONESIAN 0x21
# endif
# ifndef LANG_INUKTITUT
# define LANG_INUKTITUT 0x5d
# endif
# ifndef LANG_KANNADA
# define LANG_KANNADA 0x4b
# endif
# ifndef LANG_KANURI
# define LANG_KANURI 0x71
# endif
# ifndef LANG_KASHMIRI
# define LANG_KASHMIRI 0x60
# endif
# ifndef LANG_KAZAK
# define LANG_KAZAK 0x3f
# endif
# ifndef LANG_KONKANI
# define LANG_KONKANI 0x57
# endif
# ifndef LANG_KYRGYZ
# define LANG_KYRGYZ 0x40
# endif
# ifndef LANG_LAO
# define LANG_LAO 0x54
# endif
# ifndef LANG_LATIN
# define LANG_LATIN 0x76
# endif
# ifndef LANG_LATVIAN
# define LANG_LATVIAN 0x26
# endif
# ifndef LANG_LITHUANIAN
# define LANG_LITHUANIAN 0x27
# endif
# ifndef LANG_MACEDONIAN
# define LANG_MACEDONIAN 0x2f
# endif
# ifndef LANG_MALAY
# define LANG_MALAY 0x3e
# endif
# ifndef LANG_MALAYALAM
# define LANG_MALAYALAM 0x4c
# endif
# ifndef LANG_MALTESE
# define LANG_MALTESE 0x3a
# endif
# ifndef LANG_MANIPURI
# define LANG_MANIPURI 0x58
# endif
# ifndef LANG_MARATHI
# define LANG_MARATHI 0x4e
# endif
# ifndef LANG_MONGOLIAN
# define LANG_MONGOLIAN 0x50
# endif
# ifndef LANG_NEPALI
# define LANG_NEPALI 0x61
# endif
# ifndef LANG_ORIYA
# define LANG_ORIYA 0x48
# endif
# ifndef LANG_OROMO
# define LANG_OROMO 0x72
# endif
# ifndef LANG_PAPIAMENTU
# define LANG_PAPIAMENTU 0x79
# endif
# ifndef LANG_PASHTO
# define LANG_PASHTO 0x63
# endif
# ifndef LANG_PUNJABI
# define LANG_PUNJABI 0x46
# endif
# ifndef LANG_RHAETO_ROMANCE
# define LANG_RHAETO_ROMANCE 0x17
# endif
# ifndef LANG_SAAMI
# define LANG_SAAMI 0x3b
# endif
# ifndef LANG_SANSKRIT
# define LANG_SANSKRIT 0x4f
# endif
# ifndef LANG_SERBIAN
# define LANG_SERBIAN 0x1a
# endif
# ifndef LANG_SINDHI
# define LANG_SINDHI 0x59
# endif
# ifndef LANG_SINHALESE
# define LANG_SINHALESE 0x5b
# endif
# ifndef LANG_SLOVAK
# define LANG_SLOVAK 0x1b
# endif
# ifndef LANG_SOMALI
# define LANG_SOMALI 0x77
# endif
# ifndef LANG_SORBIAN
# define LANG_SORBIAN 0x2e
# endif
# ifndef LANG_SUTU
# define LANG_SUTU 0x30
# endif
# ifndef LANG_SWAHILI
# define LANG_SWAHILI 0x41
# endif
# ifndef LANG_SYRIAC
# define LANG_SYRIAC 0x5a
# endif
# ifndef LANG_TAGALOG
# define LANG_TAGALOG 0x64
# endif
# ifndef LANG_TAJIK
# define LANG_TAJIK 0x28
# endif
# ifndef LANG_TAMAZIGHT
# define LANG_TAMAZIGHT 0x5f
# endif
# ifndef LANG_TAMIL
# define LANG_TAMIL 0x49
# endif
# ifndef LANG_TATAR
# define LANG_TATAR 0x44
# endif
# ifndef LANG_TELUGU
# define LANG_TELUGU 0x4a
# endif
# ifndef LANG_THAI
# define LANG_THAI 0x1e
# endif
# ifndef LANG_TIBETAN
# define LANG_TIBETAN 0x51
# endif
# ifndef LANG_TIGRINYA
# define LANG_TIGRINYA 0x73
# endif
# ifndef LANG_TSONGA
# define LANG_TSONGA 0x31
# endif
# ifndef LANG_TSWANA
# define LANG_TSWANA 0x32
# endif
# ifndef LANG_TURKMEN
# define LANG_TURKMEN 0x42
# endif
# ifndef LANG_UKRAINIAN
# define LANG_UKRAINIAN 0x22
# endif
# ifndef LANG_URDU
# define LANG_URDU 0x20
# endif
# ifndef LANG_UZBEK
# define LANG_UZBEK 0x43
# endif
# ifndef LANG_VENDA
# define LANG_VENDA 0x33
# endif
# ifndef LANG_VIETNAMESE
# define LANG_VIETNAMESE 0x2a
# endif
# ifndef LANG_WELSH
# define LANG_WELSH 0x52
# endif
# ifndef LANG_XHOSA
# define LANG_XHOSA 0x34
# endif
# ifndef LANG_YI
# define LANG_YI 0x78
# endif
# ifndef LANG_YIDDISH
# define LANG_YIDDISH 0x3d
# endif
# ifndef LANG_YORUBA
# define LANG_YORUBA 0x6a
# endif
# ifndef LANG_ZULU
# define LANG_ZULU 0x35
# endif
# ifndef SUBLANG_ARABIC_SAUDI_ARABIA
# define SUBLANG_ARABIC_SAUDI_ARABIA 0x01
# endif
# ifndef SUBLANG_ARABIC_IRAQ
# define SUBLANG_ARABIC_IRAQ 0x02
# endif
# ifndef SUBLANG_ARABIC_EGYPT
# define SUBLANG_ARABIC_EGYPT 0x03
# endif
# ifndef SUBLANG_ARABIC_LIBYA
# define SUBLANG_ARABIC_LIBYA 0x04
# endif
# ifndef SUBLANG_ARABIC_ALGERIA
# define SUBLANG_ARABIC_ALGERIA 0x05
# endif
# ifndef SUBLANG_ARABIC_MOROCCO
# define SUBLANG_ARABIC_MOROCCO 0x06
# endif
# ifndef SUBLANG_ARABIC_TUNISIA
# define SUBLANG_ARABIC_TUNISIA 0x07
# endif
# ifndef SUBLANG_ARABIC_OMAN
# define SUBLANG_ARABIC_OMAN 0x08
# endif
# ifndef SUBLANG_ARABIC_YEMEN
# define SUBLANG_ARABIC_YEMEN 0x09
# endif
# ifndef SUBLANG_ARABIC_SYRIA
# define SUBLANG_ARABIC_SYRIA 0x0a
# endif
# ifndef SUBLANG_ARABIC_JORDAN
# define SUBLANG_ARABIC_JORDAN 0x0b
# endif
# ifndef SUBLANG_ARABIC_LEBANON
# define SUBLANG_ARABIC_LEBANON 0x0c
# endif
# ifndef SUBLANG_ARABIC_KUWAIT
# define SUBLANG_ARABIC_KUWAIT 0x0d
# endif
# ifndef SUBLANG_ARABIC_UAE
# define SUBLANG_ARABIC_UAE 0x0e
# endif
# ifndef SUBLANG_ARABIC_BAHRAIN
# define SUBLANG_ARABIC_BAHRAIN 0x0f
# endif
# ifndef SUBLANG_ARABIC_QATAR
# define SUBLANG_ARABIC_QATAR 0x10
# endif
# ifndef SUBLANG_AZERI_LATIN
# define SUBLANG_AZERI_LATIN 0x01
# endif
# ifndef SUBLANG_AZERI_CYRILLIC
# define SUBLANG_AZERI_CYRILLIC 0x02
# endif
# ifndef SUBLANG_BENGALI_INDIA
# define SUBLANG_BENGALI_INDIA 0x01
# endif
# ifndef SUBLANG_BENGALI_BANGLADESH
# define SUBLANG_BENGALI_BANGLADESH 0x02
# endif
# ifndef SUBLANG_CHINESE_MACAU
# define SUBLANG_CHINESE_MACAU 0x05
# endif
# ifndef SUBLANG_ENGLISH_SOUTH_AFRICA
# define SUBLANG_ENGLISH_SOUTH_AFRICA 0x07
# endif
# ifndef SUBLANG_ENGLISH_JAMAICA
# define SUBLANG_ENGLISH_JAMAICA 0x08
# endif
# ifndef SUBLANG_ENGLISH_CARIBBEAN
# define SUBLANG_ENGLISH_CARIBBEAN 0x09
# endif
# ifndef SUBLANG_ENGLISH_BELIZE
# define SUBLANG_ENGLISH_BELIZE 0x0a
# endif
# ifndef SUBLANG_ENGLISH_TRINIDAD
# define SUBLANG_ENGLISH_TRINIDAD 0x0b
# endif
# ifndef SUBLANG_ENGLISH_ZIMBABWE
# define SUBLANG_ENGLISH_ZIMBABWE 0x0c
# endif
# ifndef SUBLANG_ENGLISH_PHILIPPINES
# define SUBLANG_ENGLISH_PHILIPPINES 0x0d
# endif
# ifndef SUBLANG_ENGLISH_INDONESIA
# define SUBLANG_ENGLISH_INDONESIA 0x0e
# endif
# ifndef SUBLANG_ENGLISH_HONGKONG
# define SUBLANG_ENGLISH_HONGKONG 0x0f
# endif
# ifndef SUBLANG_ENGLISH_INDIA
# define SUBLANG_ENGLISH_INDIA 0x10
# endif
# ifndef SUBLANG_ENGLISH_MALAYSIA
# define SUBLANG_ENGLISH_MALAYSIA 0x11
# endif
# ifndef SUBLANG_ENGLISH_SINGAPORE
# define SUBLANG_ENGLISH_SINGAPORE 0x12
# endif
# ifndef SUBLANG_FRENCH_LUXEMBOURG
# define SUBLANG_FRENCH_LUXEMBOURG 0x05
# endif
# ifndef SUBLANG_FRENCH_MONACO
# define SUBLANG_FRENCH_MONACO 0x06
# endif
# ifndef SUBLANG_FRENCH_WESTINDIES
# define SUBLANG_FRENCH_WESTINDIES 0x07
# endif
# ifndef SUBLANG_FRENCH_REUNION
# define SUBLANG_FRENCH_REUNION 0x08
# endif
# ifndef SUBLANG_FRENCH_CONGO
# define SUBLANG_FRENCH_CONGO 0x09
# endif
# ifndef SUBLANG_FRENCH_SENEGAL
# define SUBLANG_FRENCH_SENEGAL 0x0a
# endif
# ifndef SUBLANG_FRENCH_CAMEROON
# define SUBLANG_FRENCH_CAMEROON 0x0b
# endif
# ifndef SUBLANG_FRENCH_COTEDIVOIRE
# define SUBLANG_FRENCH_COTEDIVOIRE 0x0c
# endif
# ifndef SUBLANG_FRENCH_MALI
# define SUBLANG_FRENCH_MALI 0x0d
# endif
# ifndef SUBLANG_FRENCH_MOROCCO
# define SUBLANG_FRENCH_MOROCCO 0x0e
# endif
# ifndef SUBLANG_FRENCH_HAITI
# define SUBLANG_FRENCH_HAITI 0x0f
# endif
# ifndef SUBLANG_GERMAN_LUXEMBOURG
# define SUBLANG_GERMAN_LUXEMBOURG 0x04
# endif
# ifndef SUBLANG_GERMAN_LIECHTENSTEIN
# define SUBLANG_GERMAN_LIECHTENSTEIN 0x05
# endif
# ifndef SUBLANG_KASHMIRI_INDIA
# define SUBLANG_KASHMIRI_INDIA 0x02
# endif
# ifndef SUBLANG_MALAY_MALAYSIA
# define SUBLANG_MALAY_MALAYSIA 0x01
# endif
# ifndef SUBLANG_MALAY_BRUNEI_DARUSSALAM
# define SUBLANG_MALAY_BRUNEI_DARUSSALAM 0x02
# endif
# ifndef SUBLANG_NEPALI_INDIA
# define SUBLANG_NEPALI_INDIA 0x02
# endif
# ifndef SUBLANG_PUNJABI_INDIA
# define SUBLANG_PUNJABI_INDIA 0x01
# endif
# ifndef SUBLANG_PUNJABI_PAKISTAN
# define SUBLANG_PUNJABI_PAKISTAN 0x02
# endif
# ifndef SUBLANG_ROMANIAN_ROMANIA
# define SUBLANG_ROMANIAN_ROMANIA 0x01
# endif
# ifndef SUBLANG_ROMANIAN_MOLDOVA
# define SUBLANG_ROMANIAN_MOLDOVA 0x02
# endif
# ifndef SUBLANG_SERBIAN_LATIN
# define SUBLANG_SERBIAN_LATIN 0x02
# endif
# ifndef SUBLANG_SERBIAN_CYRILLIC
# define SUBLANG_SERBIAN_CYRILLIC 0x03
# endif
# ifndef SUBLANG_SINDHI_PAKISTAN
# define SUBLANG_SINDHI_PAKISTAN 0x01
# endif
# ifndef SUBLANG_SINDHI_AFGHANISTAN
# define SUBLANG_SINDHI_AFGHANISTAN 0x02
# endif
# ifndef SUBLANG_SPANISH_GUATEMALA
# define SUBLANG_SPANISH_GUATEMALA 0x04
# endif
# ifndef SUBLANG_SPANISH_COSTA_RICA
# define SUBLANG_SPANISH_COSTA_RICA 0x05
# endif
# ifndef SUBLANG_SPANISH_PANAMA
# define SUBLANG_SPANISH_PANAMA 0x06
# endif
# ifndef SUBLANG_SPANISH_DOMINICAN_REPUBLIC
# define SUBLANG_SPANISH_DOMINICAN_REPUBLIC 0x07
# endif
# ifndef SUBLANG_SPANISH_VENEZUELA
# define SUBLANG_SPANISH_VENEZUELA 0x08
# endif
# ifndef SUBLANG_SPANISH_COLOMBIA
# define SUBLANG_SPANISH_COLOMBIA 0x09
# endif
# ifndef SUBLANG_SPANISH_PERU
# define SUBLANG_SPANISH_PERU 0x0a
# endif
# ifndef SUBLANG_SPANISH_ARGENTINA
# define SUBLANG_SPANISH_ARGENTINA 0x0b
# endif
# ifndef SUBLANG_SPANISH_ECUADOR
# define SUBLANG_SPANISH_ECUADOR 0x0c
# endif
# ifndef SUBLANG_SPANISH_CHILE
# define SUBLANG_SPANISH_CHILE 0x0d
# endif
# ifndef SUBLANG_SPANISH_URUGUAY
# define SUBLANG_SPANISH_URUGUAY 0x0e
# endif
# ifndef SUBLANG_SPANISH_PARAGUAY
# define SUBLANG_SPANISH_PARAGUAY 0x0f
# endif
# ifndef SUBLANG_SPANISH_BOLIVIA
# define SUBLANG_SPANISH_BOLIVIA 0x10
# endif
# ifndef SUBLANG_SPANISH_EL_SALVADOR
# define SUBLANG_SPANISH_EL_SALVADOR 0x11
# endif
# ifndef SUBLANG_SPANISH_HONDURAS
# define SUBLANG_SPANISH_HONDURAS 0x12
# endif
# ifndef SUBLANG_SPANISH_NICARAGUA
# define SUBLANG_SPANISH_NICARAGUA 0x13
# endif
# ifndef SUBLANG_SPANISH_PUERTO_RICO
# define SUBLANG_SPANISH_PUERTO_RICO 0x14
# endif
# ifndef SUBLANG_SWEDISH_FINLAND
# define SUBLANG_SWEDISH_FINLAND 0x02
# endif
# ifndef SUBLANG_TAMAZIGHT_ARABIC
# define SUBLANG_TAMAZIGHT_ARABIC 0x01
# endif
# ifndef SUBLANG_TAMAZIGHT_ALGERIA_LATIN
# define SUBLANG_TAMAZIGHT_ALGERIA_LATIN 0x02
# endif
# ifndef SUBLANG_TIGRINYA_ETHIOPIA
# define SUBLANG_TIGRINYA_ETHIOPIA 0x01
# endif
# ifndef SUBLANG_TIGRINYA_ERITREA
# define SUBLANG_TIGRINYA_ERITREA 0x02
# endif
# ifndef SUBLANG_URDU_PAKISTAN
# define SUBLANG_URDU_PAKISTAN 0x01
# endif
# ifndef SUBLANG_URDU_INDIA
# define SUBLANG_URDU_INDIA 0x02
# endif
# ifndef SUBLANG_UZBEK_LATIN
# define SUBLANG_UZBEK_LATIN 0x01
# endif
# ifndef SUBLANG_UZBEK_CYRILLIC
# define SUBLANG_UZBEK_CYRILLIC 0x02
# endif
#endif
# if HAVE_CFLOCALECOPYCURRENT || HAVE_CFPREFERENCESCOPYAPPVALUE
/* MacOS X 10.2 or newer */
/* Canonicalize a MacOS X locale name to a Unix locale name.
NAME is a sufficiently large buffer.
On input, it contains the MacOS X locale name.
On output, it contains the Unix locale name. */
# if !defined IN_LIBINTL
static
# endif
void
gl_locale_name_canonicalize (char *name)
{
/* This conversion is based on a posting by
Deborah GoldSmith <goldsmit@apple.com> on 2005-03-08,
http://lists.apple.com/archives/carbon-dev/2005/Mar/msg00293.html */
/* Convert legacy (NeXTstep inherited) English names to Unix (ISO 639 and
ISO 3166) names. Prior to MacOS X 10.3, there is no API for doing this.
Therefore we do it ourselves, using a table based on the results of the
MacOS X 10.3.8 function
CFLocaleCreateCanonicalLocaleIdentifierFromString(). */
typedef struct { const char legacy[21+1]; const char unixy[5+1]; }
legacy_entry;
static const legacy_entry legacy_table[] = {
{ "Afrikaans", "af" },
{ "Albanian", "sq" },
{ "Amharic", "am" },
{ "Arabic", "ar" },
{ "Armenian", "hy" },
{ "Assamese", "as" },
{ "Aymara", "ay" },
{ "Azerbaijani", "az" },
{ "Basque", "eu" },
{ "Belarusian", "be" },
{ "Belorussian", "be" },
{ "Bengali", "bn" },
{ "Brazilian Portugese", "pt_BR" },
{ "Brazilian Portuguese", "pt_BR" },
{ "Breton", "br" },
{ "Bulgarian", "bg" },
{ "Burmese", "my" },
{ "Byelorussian", "be" },
{ "Catalan", "ca" },
{ "Chewa", "ny" },
{ "Chichewa", "ny" },
{ "Chinese", "zh" },
{ "Chinese, Simplified", "zh_CN" },
{ "Chinese, Traditional", "zh_TW" },
{ "Chinese, Tradtional", "zh_TW" },
{ "Croatian", "hr" },
{ "Czech", "cs" },
{ "Danish", "da" },
{ "Dutch", "nl" },
{ "Dzongkha", "dz" },
{ "English", "en" },
{ "Esperanto", "eo" },
{ "Estonian", "et" },
{ "Faroese", "fo" },
{ "Farsi", "fa" },
{ "Finnish", "fi" },
{ "Flemish", "nl_BE" },
{ "French", "fr" },
{ "Galician", "gl" },
{ "Gallegan", "gl" },
{ "Georgian", "ka" },
{ "German", "de" },
{ "Greek", "el" },
{ "Greenlandic", "kl" },
{ "Guarani", "gn" },
{ "Gujarati", "gu" },
{ "Hawaiian", "haw" }, /* Yes, "haw", not "cpe". */
{ "Hebrew", "he" },
{ "Hindi", "hi" },
{ "Hungarian", "hu" },
{ "Icelandic", "is" },
{ "Indonesian", "id" },
{ "Inuktitut", "iu" },
{ "Irish", "ga" },
{ "Italian", "it" },
{ "Japanese", "ja" },
{ "Javanese", "jv" },
{ "Kalaallisut", "kl" },
{ "Kannada", "kn" },
{ "Kashmiri", "ks" },
{ "Kazakh", "kk" },
{ "Khmer", "km" },
{ "Kinyarwanda", "rw" },
{ "Kirghiz", "ky" },
{ "Korean", "ko" },
{ "Kurdish", "ku" },
{ "Latin", "la" },
{ "Latvian", "lv" },
{ "Lithuanian", "lt" },
{ "Macedonian", "mk" },
{ "Malagasy", "mg" },
{ "Malay", "ms" },
{ "Malayalam", "ml" },
{ "Maltese", "mt" },
{ "Manx", "gv" },
{ "Marathi", "mr" },
{ "Moldavian", "mo" },
{ "Mongolian", "mn" },
{ "Nepali", "ne" },
{ "Norwegian", "nb" }, /* Yes, "nb", not the obsolete "no". */
{ "Nyanja", "ny" },
{ "Nynorsk", "nn" },
{ "Oriya", "or" },
{ "Oromo", "om" },
{ "Panjabi", "pa" },
{ "Pashto", "ps" },
{ "Persian", "fa" },
{ "Polish", "pl" },
{ "Portuguese", "pt" },
{ "Portuguese, Brazilian", "pt_BR" },
{ "Punjabi", "pa" },
{ "Pushto", "ps" },
{ "Quechua", "qu" },
{ "Romanian", "ro" },
{ "Ruanda", "rw" },
{ "Rundi", "rn" },
{ "Russian", "ru" },
{ "Sami", "se_NO" }, /* Not just "se". */
{ "Sanskrit", "sa" },
{ "Scottish", "gd" },
{ "Serbian", "sr" },
{ "Simplified Chinese", "zh_CN" },
{ "Sindhi", "sd" },
{ "Sinhalese", "si" },
{ "Slovak", "sk" },
{ "Slovenian", "sl" },
{ "Somali", "so" },
{ "Spanish", "es" },
{ "Sundanese", "su" },
{ "Swahili", "sw" },
{ "Swedish", "sv" },
{ "Tagalog", "tl" },
{ "Tajik", "tg" },
{ "Tajiki", "tg" },
{ "Tamil", "ta" },
{ "Tatar", "tt" },
{ "Telugu", "te" },
{ "Thai", "th" },
{ "Tibetan", "bo" },
{ "Tigrinya", "ti" },
{ "Tongan", "to" },
{ "Traditional Chinese", "zh_TW" },
{ "Turkish", "tr" },
{ "Turkmen", "tk" },
{ "Uighur", "ug" },
{ "Ukrainian", "uk" },
{ "Urdu", "ur" },
{ "Uzbek", "uz" },
{ "Vietnamese", "vi" },
{ "Welsh", "cy" },
{ "Yiddish", "yi" }
};
/* Convert new-style locale names with language tags (ISO 639 and ISO 15924)
to Unix (ISO 639 and ISO 3166) names. */
typedef struct { const char langtag[7+1]; const char unixy[12+1]; }
langtag_entry;
static const langtag_entry langtag_table[] = {
/* MacOS X has "az-Arab", "az-Cyrl", "az-Latn".
The default script for az on Unix is Latin. */
{ "az-Latn", "az" },
/* MacOS X has "ga-dots". Does not yet exist on Unix. */
{ "ga-dots", "ga" },
/* MacOS X has "kk-Cyrl". Does not yet exist on Unix. */
/* MacOS X has "mn-Cyrl", "mn-Mong".
The default script for mn on Unix is Cyrillic. */
{ "mn-Cyrl", "mn" },
/* MacOS X has "ms-Arab", "ms-Latn".
The default script for ms on Unix is Latin. */
{ "ms-Latn", "ms" },
/* MacOS X has "tg-Cyrl".
The default script for tg on Unix is Cyrillic. */
{ "tg-Cyrl", "tg" },
/* MacOS X has "tk-Cyrl". Does not yet exist on Unix. */
/* MacOS X has "tt-Cyrl".
The default script for tt on Unix is Cyrillic. */
{ "tt-Cyrl", "tt" },
/* MacOS X has "zh-Hans", "zh-Hant".
Country codes are used to distinguish these on Unix. */
{ "zh-Hans", "zh_CN" },
{ "zh-Hant", "zh_TW" }
};
/* Convert script names (ISO 15924) to Unix conventions.
See http://www.unicode.org/iso15924/iso15924-codes.html */
typedef struct { const char script[4+1]; const char unixy[9+1]; }
script_entry;
static const script_entry script_table[] = {
{ "Arab", "arabic" },
{ "Cyrl", "cyrillic" },
{ "Mong", "mongolian" }
};
/* Step 1: Convert using legacy_table. */
if (name[0] >= 'A' && name[0] <= 'Z')
{
unsigned int i1, i2;
i1 = 0;
i2 = sizeof (legacy_table) / sizeof (legacy_entry);
while (i2 - i1 > 1)
{
/* At this point we know that if name occurs in legacy_table,
its index must be >= i1 and < i2. */
unsigned int i = (i1 + i2) >> 1;
const legacy_entry *p = &legacy_table[i];
if (strcmp (name, p->legacy) < 0)
i2 = i;
else
i1 = i;
}
if (strcmp (name, legacy_table[i1].legacy) == 0)
{
strcpy (name, legacy_table[i1].unixy);
return;
}
}
/* Step 2: Convert using langtag_table and script_table. */
if (strlen (name) == 7 && name[2] == '-')
{
unsigned int i1, i2;
i1 = 0;
i2 = sizeof (langtag_table) / sizeof (langtag_entry);
while (i2 - i1 > 1)
{
/* At this point we know that if name occurs in langtag_table,
its index must be >= i1 and < i2. */
unsigned int i = (i1 + i2) >> 1;
const langtag_entry *p = &langtag_table[i];
if (strcmp (name, p->langtag) < 0)
i2 = i;
else
i1 = i;
}
if (strcmp (name, langtag_table[i1].langtag) == 0)
{
strcpy (name, langtag_table[i1].unixy);
return;
}
i1 = 0;
i2 = sizeof (script_table) / sizeof (script_entry);
while (i2 - i1 > 1)
{
/* At this point we know that if (name + 3) occurs in script_table,
its index must be >= i1 and < i2. */
unsigned int i = (i1 + i2) >> 1;
const script_entry *p = &script_table[i];
if (strcmp (name + 3, p->script) < 0)
i2 = i;
else
i1 = i;
}
if (strcmp (name + 3, script_table[i1].script) == 0)
{
name[2] = '@';
strcpy (name + 3, script_table[i1].unixy);
return;
}
}
/* Step 3: Convert new-style dash to Unix underscore. */
{
char *p;
for (p = name; *p != '\0'; p++)
if (*p == '-')
*p = '_';
}
}
#endif
/* XPG3 defines the result of 'setlocale (category, NULL)' as:
"Directs 'setlocale()' to query 'category' and return the current
setting of 'local'."
However it does not specify the exact format. Neither do SUSV2 and
ISO C 99. So we can use this feature only on selected systems (e.g.
those using GNU C Library). */
#if defined _LIBC || (defined __GLIBC__ && __GLIBC__ >= 2)
# define HAVE_LOCALE_NULL
#endif
/* Determine the current locale's name, and canonicalize it into XPG syntax
language[_territory][.codeset][@modifier]
The codeset part in the result is not reliable; the locale_charset()
should be used for codeset information instead.
The result must not be freed; it is statically allocated. */
const char *
gl_locale_name_posix (int category, const char *categoryname)
{
/* Use the POSIX methods of looking to 'LC_ALL', 'LC_xxx', and 'LANG'.
On some systems this can be done by the 'setlocale' function itself. */
#if defined HAVE_SETLOCALE && defined HAVE_LC_MESSAGES && defined HAVE_LOCALE_NULL
return setlocale (category, NULL);
#else
const char *retval;
/* Setting of LC_ALL overrides all other. */
retval = getenv ("LC_ALL");
if (retval != NULL && retval[0] != '\0')
return retval;
/* Next comes the name of the desired category. */
retval = getenv (categoryname);
if (retval != NULL && retval[0] != '\0')
return retval;
/* Last possibility is the LANG environment variable. */
retval = getenv ("LANG");
if (retval != NULL && retval[0] != '\0')
return retval;
return NULL;
#endif
}
const char *
gl_locale_name_default (void)
{
/* POSIX:2001 says:
"All implementations shall define a locale as the default locale, to be
invoked when no environment variables are set, or set to the empty
string. This default locale can be the POSIX locale or any other
implementation-defined locale. Some implementations may provide
facilities for local installation administrators to set the default
locale, customizing it for each location. POSIX:2001 does not require
such a facility. */
#if !(HAVE_CFLOCALECOPYCURRENT || HAVE_CFPREFERENCESCOPYAPPVALUE || defined(WIN32_NATIVE))
/* The system does not have a way of setting the locale, other than the
POSIX specified environment variables. We use C as default locale. */
return "C";
#else
/* Return an XPG style locale name language[_territory][@modifier].
Don't even bother determining the codeset; it's not useful in this
context, because message catalogs are not specific to a single
codeset. */
# if HAVE_CFLOCALECOPYCURRENT || HAVE_CFPREFERENCESCOPYAPPVALUE
/* MacOS X 10.2 or newer */
{
/* Cache the locale name, since CoreFoundation calls are expensive. */
static const char *cached_localename;
if (cached_localename == NULL)
{
char namebuf[256];
# if HAVE_CFLOCALECOPYCURRENT /* MacOS X 10.3 or newer */
CFLocaleRef locale = CFLocaleCopyCurrent ();
CFStringRef name = CFLocaleGetIdentifier (locale);
if (CFStringGetCString (name, namebuf, sizeof(namebuf),
kCFStringEncodingASCII))
{
gl_locale_name_canonicalize (namebuf);
cached_localename = strdup (namebuf);
}
CFRelease (locale);
# elif HAVE_CFPREFERENCESCOPYAPPVALUE /* MacOS X 10.2 or newer */
CFTypeRef value =
CFPreferencesCopyAppValue (CFSTR ("AppleLocale"),
kCFPreferencesCurrentApplication);
if (value != NULL
&& CFGetTypeID (value) == CFStringGetTypeID ()
&& CFStringGetCString ((CFStringRef)value, namebuf, sizeof(namebuf),
kCFStringEncodingASCII))
{
gl_locale_name_canonicalize (namebuf);
cached_localename = strdup (namebuf);
}
# endif
if (cached_localename == NULL)
cached_localename = "C";
}
return cached_localename;
}
# endif
# if defined(WIN32_NATIVE) /* WIN32, not Cygwin */
{
LCID lcid;
LANGID langid;
int primary, sub;
/* Use native Win32 API locale ID. */
lcid = GetThreadLocale ();
/* Strip off the sorting rules, keep only the language part. */
langid = LANGIDFROMLCID (lcid);
/* Split into language and territory part. */
primary = PRIMARYLANGID (langid);
sub = SUBLANGID (langid);
/* Dispatch on language.
See also http://www.unicode.org/unicode/onlinedat/languages.html .
For details about languages, see http://www.ethnologue.com/ . */
switch (primary)
{
case LANG_AFRIKAANS: return "af_ZA";
case LANG_ALBANIAN: return "sq_AL";
case LANG_AMHARIC: return "am_ET";
case LANG_ARABIC:
switch (sub)
{
case SUBLANG_ARABIC_SAUDI_ARABIA: return "ar_SA";
case SUBLANG_ARABIC_IRAQ: return "ar_IQ";
case SUBLANG_ARABIC_EGYPT: return "ar_EG";
case SUBLANG_ARABIC_LIBYA: return "ar_LY";
case SUBLANG_ARABIC_ALGERIA: return "ar_DZ";
case SUBLANG_ARABIC_MOROCCO: return "ar_MA";
case SUBLANG_ARABIC_TUNISIA: return "ar_TN";
case SUBLANG_ARABIC_OMAN: return "ar_OM";
case SUBLANG_ARABIC_YEMEN: return "ar_YE";
case SUBLANG_ARABIC_SYRIA: return "ar_SY";
case SUBLANG_ARABIC_JORDAN: return "ar_JO";
case SUBLANG_ARABIC_LEBANON: return "ar_LB";
case SUBLANG_ARABIC_KUWAIT: return "ar_KW";
case SUBLANG_ARABIC_UAE: return "ar_AE";
case SUBLANG_ARABIC_BAHRAIN: return "ar_BH";
case SUBLANG_ARABIC_QATAR: return "ar_QA";
}
return "ar";
case LANG_ARMENIAN: return "hy_AM";
case LANG_ASSAMESE: return "as_IN";
case LANG_AZERI:
switch (sub)
{
/* FIXME: Adjust this when Azerbaijani locales appear on Unix. */
case SUBLANG_AZERI_LATIN: return "az_AZ@latin";
case SUBLANG_AZERI_CYRILLIC: return "az_AZ@cyrillic";
}
return "az";
case LANG_BASQUE:
switch (sub)
{
case SUBLANG_DEFAULT: return "eu_ES";
}
return "eu"; /* Ambiguous: could be "eu_ES" or "eu_FR". */
case LANG_BELARUSIAN: return "be_BY";
case LANG_BENGALI:
switch (sub)
{
case SUBLANG_BENGALI_INDIA: return "bn_IN";
case SUBLANG_BENGALI_BANGLADESH: return "bn_BD";
}
return "bn";
case LANG_BULGARIAN: return "bg_BG";
case LANG_BURMESE: return "my_MM";
case LANG_CAMBODIAN: return "km_KH";
case LANG_CATALAN: return "ca_ES";
case LANG_CHEROKEE: return "chr_US";
case LANG_CHINESE:
switch (sub)
{
case SUBLANG_CHINESE_TRADITIONAL: return "zh_TW";
case SUBLANG_CHINESE_SIMPLIFIED: return "zh_CN";
case SUBLANG_CHINESE_HONGKONG: return "zh_HK";
case SUBLANG_CHINESE_SINGAPORE: return "zh_SG";
case SUBLANG_CHINESE_MACAU: return "zh_MO";
}
return "zh";
case LANG_CROATIAN: /* LANG_CROATIAN == LANG_SERBIAN
* What used to be called Serbo-Croatian
* should really now be two separate
* languages because of political reasons.
* (Says tml, who knows nothing about Serbian
* or Croatian.)
* (I can feel those flames coming already.)
*/
switch (sub)
{
case SUBLANG_DEFAULT: return "hr_HR";
case SUBLANG_SERBIAN_LATIN: return "sr_CS";
case SUBLANG_SERBIAN_CYRILLIC: return "sr_CS@cyrillic";
}
return "hr";
case LANG_CZECH: return "cs_CZ";
case LANG_DANISH: return "da_DK";
case LANG_DIVEHI: return "dv_MV";
case LANG_DUTCH:
switch (sub)
{
case SUBLANG_DUTCH: return "nl_NL";
case SUBLANG_DUTCH_BELGIAN: /* FLEMISH, VLAAMS */ return "nl_BE";
}
return "nl";
case LANG_EDO: return "bin_NG";
case LANG_ENGLISH:
switch (sub)
{
/* SUBLANG_ENGLISH_US == SUBLANG_DEFAULT. Heh. I thought
* English was the language spoken in England.
* Oh well.
*/
case SUBLANG_ENGLISH_US: return "en_US";
case SUBLANG_ENGLISH_UK: return "en_GB";
case SUBLANG_ENGLISH_AUS: return "en_AU";
case SUBLANG_ENGLISH_CAN: return "en_CA";
case SUBLANG_ENGLISH_NZ: return "en_NZ";
case SUBLANG_ENGLISH_EIRE: return "en_IE";
case SUBLANG_ENGLISH_SOUTH_AFRICA: return "en_ZA";
case SUBLANG_ENGLISH_JAMAICA: return "en_JM";
case SUBLANG_ENGLISH_CARIBBEAN: return "en_GD"; /* Grenada? */
case SUBLANG_ENGLISH_BELIZE: return "en_BZ";
case SUBLANG_ENGLISH_TRINIDAD: return "en_TT";
case SUBLANG_ENGLISH_ZIMBABWE: return "en_ZW";
case SUBLANG_ENGLISH_PHILIPPINES: return "en_PH";
case SUBLANG_ENGLISH_INDONESIA: return "en_ID";
case SUBLANG_ENGLISH_HONGKONG: return "en_HK";
case SUBLANG_ENGLISH_INDIA: return "en_IN";
case SUBLANG_ENGLISH_MALAYSIA: return "en_MY";
case SUBLANG_ENGLISH_SINGAPORE: return "en_SG";
}
return "en";
case LANG_ESTONIAN: return "et_EE";
case LANG_FAEROESE: return "fo_FO";
case LANG_FARSI: return "fa_IR";
case LANG_FINNISH: return "fi_FI";
case LANG_FRENCH:
switch (sub)
{
case SUBLANG_FRENCH: return "fr_FR";
case SUBLANG_FRENCH_BELGIAN: /* WALLOON */ return "fr_BE";
case SUBLANG_FRENCH_CANADIAN: return "fr_CA";
case SUBLANG_FRENCH_SWISS: return "fr_CH";
case SUBLANG_FRENCH_LUXEMBOURG: return "fr_LU";
case SUBLANG_FRENCH_MONACO: return "fr_MC";
case SUBLANG_FRENCH_WESTINDIES: return "fr"; /* Caribbean? */
case SUBLANG_FRENCH_REUNION: return "fr_RE";
case SUBLANG_FRENCH_CONGO: return "fr_CG";
case SUBLANG_FRENCH_SENEGAL: return "fr_SN";
case SUBLANG_FRENCH_CAMEROON: return "fr_CM";
case SUBLANG_FRENCH_COTEDIVOIRE: return "fr_CI";
case SUBLANG_FRENCH_MALI: return "fr_ML";
case SUBLANG_FRENCH_MOROCCO: return "fr_MA";
case SUBLANG_FRENCH_HAITI: return "fr_HT";
}
return "fr";
case LANG_FRISIAN: return "fy_NL";
case LANG_FULFULDE:
/* Spoken in Nigeria, Guinea, Senegal, Mali, Niger, Cameroon, Benin. */
return "ff_NG";
case LANG_GAELIC:
switch (sub)
{
case 0x01: /* SCOTTISH */ return "gd_GB";
case 0x02: /* IRISH */ return "ga_IE";
}
return "C";
case LANG_GALICIAN: return "gl_ES";
case LANG_GEORGIAN: return "ka_GE";
case LANG_GERMAN:
switch (sub)
{
case SUBLANG_GERMAN: return "de_DE";
case SUBLANG_GERMAN_SWISS: return "de_CH";
case SUBLANG_GERMAN_AUSTRIAN: return "de_AT";
case SUBLANG_GERMAN_LUXEMBOURG: return "de_LU";
case SUBLANG_GERMAN_LIECHTENSTEIN: return "de_LI";
}
return "de";
case LANG_GREEK: return "el_GR";
case LANG_GUARANI: return "gn_PY";
case LANG_GUJARATI: return "gu_IN";
case LANG_HAUSA: return "ha_NG";
case LANG_HAWAIIAN:
/* FIXME: Do they mean Hawaiian ("haw_US", 1000 speakers)
or Hawaii Creole English ("cpe_US", 600000 speakers)? */
return "cpe_US";
case LANG_HEBREW: return "he_IL";
case LANG_HINDI: return "hi_IN";
case LANG_HUNGARIAN: return "hu_HU";
case LANG_IBIBIO: return "nic_NG";
case LANG_ICELANDIC: return "is_IS";
case LANG_IGBO: return "ig_NG";
case LANG_INDONESIAN: return "id_ID";
case LANG_INUKTITUT: return "iu_CA";
case LANG_ITALIAN:
switch (sub)
{
case SUBLANG_ITALIAN: return "it_IT";
case SUBLANG_ITALIAN_SWISS: return "it_CH";
}
return "it";
case LANG_JAPANESE: return "ja_JP";
case LANG_KANNADA: return "kn_IN";
case LANG_KANURI: return "kr_NG";
case LANG_KASHMIRI:
switch (sub)
{
case SUBLANG_DEFAULT: return "ks_PK";
case SUBLANG_KASHMIRI_INDIA: return "ks_IN";
}
return "ks";
case LANG_KAZAK: return "kk_KZ";
case LANG_KONKANI:
/* FIXME: Adjust this when such locales appear on Unix. */
return "kok_IN";
case LANG_KOREAN: return "ko_KR";
case LANG_KYRGYZ: return "ky_KG";
case LANG_LAO: return "lo_LA";
case LANG_LATIN: return "la_VA";
case LANG_LATVIAN: return "lv_LV";
case LANG_LITHUANIAN: return "lt_LT";
case LANG_MACEDONIAN: return "mk_MK";
case LANG_MALAY:
switch (sub)
{
case SUBLANG_MALAY_MALAYSIA: return "ms_MY";
case SUBLANG_MALAY_BRUNEI_DARUSSALAM: return "ms_BN";
}
return "ms";
case LANG_MALAYALAM: return "ml_IN";
case LANG_MALTESE: return "mt_MT";
case LANG_MANIPURI:
/* FIXME: Adjust this when such locales appear on Unix. */
return "mni_IN";
case LANG_MARATHI: return "mr_IN";
case LANG_MONGOLIAN:
switch (sub)
{
case SUBLANG_DEFAULT: return "mn_MN";
}
return "mn"; /* Ambiguous: could be "mn_CN" or "mn_MN". */
case LANG_NEPALI:
switch (sub)
{
case SUBLANG_DEFAULT: return "ne_NP";
case SUBLANG_NEPALI_INDIA: return "ne_IN";
}
return "ne";
case LANG_NORWEGIAN:
switch (sub)
{
case SUBLANG_NORWEGIAN_BOKMAL: return "nb_NO";
case SUBLANG_NORWEGIAN_NYNORSK: return "nn_NO";
}
return "no";
case LANG_ORIYA: return "or_IN";
case LANG_OROMO: return "om_ET";
case LANG_PAPIAMENTU: return "pap_AN";
case LANG_PASHTO:
return "ps"; /* Ambiguous: could be "ps_PK" or "ps_AF". */
case LANG_POLISH: return "pl_PL";
case LANG_PORTUGUESE:
switch (sub)
{
case SUBLANG_PORTUGUESE: return "pt_PT";
/* Hmm. SUBLANG_PORTUGUESE_BRAZILIAN == SUBLANG_DEFAULT.
Same phenomenon as SUBLANG_ENGLISH_US == SUBLANG_DEFAULT. */
case SUBLANG_PORTUGUESE_BRAZILIAN: return "pt_BR";
}
return "pt";
case LANG_PUNJABI:
switch (sub)
{
case SUBLANG_PUNJABI_INDIA: return "pa_IN"; /* Gurmukhi script */
case SUBLANG_PUNJABI_PAKISTAN: return "pa_PK"; /* Arabic script */
}
return "pa";
case LANG_RHAETO_ROMANCE: return "rm_CH";
case LANG_ROMANIAN:
switch (sub)
{
case SUBLANG_ROMANIAN_ROMANIA: return "ro_RO";
case SUBLANG_ROMANIAN_MOLDOVA: return "ro_MD";
}
return "ro";
case LANG_RUSSIAN:
switch (sub)
{
case SUBLANG_DEFAULT: return "ru_RU";
}
return "ru"; /* Ambiguous: could be "ru_RU" or "ru_UA" or "ru_MD". */
case LANG_SAAMI: /* actually Northern Sami */ return "se_NO";
case LANG_SANSKRIT: return "sa_IN";
case LANG_SINDHI:
switch (sub)
{
case SUBLANG_SINDHI_PAKISTAN: return "sd_PK";
// case SUBLANG_SINDHI_AFGHANISTAN: return "sd_AF";
}
return "sd";
case LANG_SINHALESE: return "si_LK";
case LANG_SLOVAK: return "sk_SK";
case LANG_SLOVENIAN: return "sl_SI";
case LANG_SOMALI: return "so_SO";
case LANG_SORBIAN:
/* FIXME: Adjust this when such locales appear on Unix. */
return "wen_DE";
case LANG_SPANISH:
switch (sub)
{
case SUBLANG_SPANISH: return "es_ES";
case SUBLANG_SPANISH_MEXICAN: return "es_MX";
case SUBLANG_SPANISH_MODERN:
return "es_ES@modern"; /* not seen on Unix */
case SUBLANG_SPANISH_GUATEMALA: return "es_GT";
case SUBLANG_SPANISH_COSTA_RICA: return "es_CR";
case SUBLANG_SPANISH_PANAMA: return "es_PA";
case SUBLANG_SPANISH_DOMINICAN_REPUBLIC: return "es_DO";
case SUBLANG_SPANISH_VENEZUELA: return "es_VE";
case SUBLANG_SPANISH_COLOMBIA: return "es_CO";
case SUBLANG_SPANISH_PERU: return "es_PE";
case SUBLANG_SPANISH_ARGENTINA: return "es_AR";
case SUBLANG_SPANISH_ECUADOR: return "es_EC";
case SUBLANG_SPANISH_CHILE: return "es_CL";
case SUBLANG_SPANISH_URUGUAY: return "es_UY";
case SUBLANG_SPANISH_PARAGUAY: return "es_PY";
case SUBLANG_SPANISH_BOLIVIA: return "es_BO";
case SUBLANG_SPANISH_EL_SALVADOR: return "es_SV";
case SUBLANG_SPANISH_HONDURAS: return "es_HN";
case SUBLANG_SPANISH_NICARAGUA: return "es_NI";
case SUBLANG_SPANISH_PUERTO_RICO: return "es_PR";
}
return "es";
case LANG_SUTU: return "bnt_TZ"; /* or "st_LS" or "nso_ZA"? */
case LANG_SWAHILI: return "sw_KE";
case LANG_SWEDISH:
switch (sub)
{
case SUBLANG_DEFAULT: return "sv_SE";
case SUBLANG_SWEDISH_FINLAND: return "sv_FI";
}
return "sv";
case LANG_SYRIAC: return "syr_TR"; /* An extinct language. */
case LANG_TAGALOG: return "tl_PH";
case LANG_TAJIK: return "tg_TJ";
case LANG_TAMAZIGHT:
switch (sub)
{
/* FIXME: Adjust this when Tamazight locales appear on Unix. */
case SUBLANG_TAMAZIGHT_ARABIC: return "ber_MA@arabic";
case SUBLANG_TAMAZIGHT_ALGERIA_LATIN: return "ber_DZ@latin";
}
return "ber_MA";
case LANG_TAMIL:
switch (sub)
{
case SUBLANG_DEFAULT: return "ta_IN";
}
return "ta"; /* Ambiguous: could be "ta_IN" or "ta_LK" or "ta_SG". */
case LANG_TATAR: return "tt_RU";
case LANG_TELUGU: return "te_IN";
case LANG_THAI: return "th_TH";
case LANG_TIBETAN: return "bo_CN";
case LANG_TIGRINYA:
switch (sub)
{
case SUBLANG_TIGRINYA_ETHIOPIA: return "ti_ET";
case SUBLANG_TIGRINYA_ERITREA: return "ti_ER";
}
return "ti";
case LANG_TSONGA: return "ts_ZA";
case LANG_TSWANA: return "tn_BW";
case LANG_TURKISH: return "tr_TR";
case LANG_TURKMEN: return "tk_TM";
case LANG_UKRAINIAN: return "uk_UA";
case LANG_URDU:
switch (sub)
{
case SUBLANG_URDU_PAKISTAN: return "ur_PK";
case SUBLANG_URDU_INDIA: return "ur_IN";
}
return "ur";
case LANG_UZBEK:
switch (sub)
{
case SUBLANG_UZBEK_LATIN: return "uz_UZ";
case SUBLANG_UZBEK_CYRILLIC: return "uz_UZ@cyrillic";
}
return "uz";
case LANG_VENDA: return "ve_ZA";
case LANG_VIETNAMESE: return "vi_VN";
case LANG_WELSH: return "cy_GB";
case LANG_XHOSA: return "xh_ZA";
case LANG_YI: return "sit_CN";
case LANG_YIDDISH: return "yi_IL";
case LANG_YORUBA: return "yo_NG";
case LANG_ZULU: return "zu_ZA";
default: return "C";
}
}
# endif
#endif
}
const char *
gl_locale_name (int category, const char *categoryname)
{
const char *retval;
retval = gl_locale_name_posix (category, categoryname);
if (retval != NULL)
return retval;
return gl_locale_name_default ();
}
| gpl-2.0 |
MoKee/android_kernel_htc_m7 | drivers/thermal/msm8960_tsens.c | 42 | 35478 | /* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/thermal.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/msm_tsens.h>
#include <linux/io.h>
#include <linux/err.h>
#include <linux/pm.h>
#include <linux/mfd/pm8xxx/pm8xxx-adc.h>
#include <mach/msm_iomap.h>
#include <mach/socinfo.h>
enum tsens_trip_type {
TSENS_TRIP_STAGE3 = 0,
TSENS_TRIP_STAGE2,
TSENS_TRIP_STAGE1,
TSENS_TRIP_STAGE0,
TSENS_TRIP_NUM,
};
#define TSENS_CAL_DEGC 30
#define TSENS_MAIN_SENSOR 0
#define TSENS_8960_QFPROM_ADDR0 (MSM_QFPROM_BASE + 0x00000404)
#define TSENS_8960_QFPROM_SPARE_ADDR0 (MSM_QFPROM_BASE + 0x00000414)
#define TSENS_8960_CONFIG 0x9b
#define TSENS_8960_CONFIG_SHIFT 0
#define TSENS_8960_CONFIG_MASK (0xf << TSENS_8960_CONFIG_SHIFT)
#define TSENS_CNTL_ADDR (MSM_CLK_CTL_BASE + 0x00003620)
#define TSENS_EN BIT(0)
#define TSENS_SW_RST BIT(1)
#define TSENS_ADC_CLK_SEL BIT(2)
#define SENSOR0_EN BIT(3)
#define SENSOR1_EN BIT(4)
#define SENSOR2_EN BIT(5)
#define SENSOR3_EN BIT(6)
#define SENSOR4_EN BIT(7)
#define SENSORS_EN (SENSOR0_EN | SENSOR1_EN | \
SENSOR2_EN | SENSOR3_EN | SENSOR4_EN)
#define TSENS_STATUS_CNTL_OFFSET 8
#define TSENS_MIN_STATUS_MASK BIT((tsens_status_cntl_start))
#define TSENS_LOWER_STATUS_CLR BIT((tsens_status_cntl_start + 1))
#define TSENS_UPPER_STATUS_CLR BIT((tsens_status_cntl_start + 2))
#define TSENS_MAX_STATUS_MASK BIT((tsens_status_cntl_start + 3))
#define TSENS_MEASURE_PERIOD 4
#define TSENS_8960_SLP_CLK_ENA BIT(26)
#define TSENS_THRESHOLD_ADDR (MSM_CLK_CTL_BASE + 0x00003624)
#define TSENS_THRESHOLD_MAX_CODE 0xff
#define TSENS_THRESHOLD_MIN_CODE 0
#define TSENS_THRESHOLD_MAX_LIMIT_SHIFT 24
#define TSENS_THRESHOLD_MIN_LIMIT_SHIFT 16
#define TSENS_THRESHOLD_UPPER_LIMIT_SHIFT 8
#define TSENS_THRESHOLD_LOWER_LIMIT_SHIFT 0
#define TSENS_THRESHOLD_MAX_LIMIT_MASK (TSENS_THRESHOLD_MAX_CODE << \
TSENS_THRESHOLD_MAX_LIMIT_SHIFT)
#define TSENS_THRESHOLD_MIN_LIMIT_MASK (TSENS_THRESHOLD_MAX_CODE << \
TSENS_THRESHOLD_MIN_LIMIT_SHIFT)
#define TSENS_THRESHOLD_UPPER_LIMIT_MASK (TSENS_THRESHOLD_MAX_CODE << \
TSENS_THRESHOLD_UPPER_LIMIT_SHIFT)
#define TSENS_THRESHOLD_LOWER_LIMIT_MASK (TSENS_THRESHOLD_MAX_CODE << \
TSENS_THRESHOLD_LOWER_LIMIT_SHIFT)
#define TSENS_LOWER_LIMIT_TH 0x50
#define TSENS_UPPER_LIMIT_TH 0xdf
#define TSENS_MIN_LIMIT_TH 0x0
#define TSENS_MAX_LIMIT_TH 0xff
#define TSENS_MIN_LIMIT_TEMP -60
#define TSENS_MAX_LIMIT_TEMP 120
#define TSENS_S0_STATUS_ADDR (MSM_CLK_CTL_BASE + 0x00003628)
#define TSENS_STATUS_ADDR_OFFSET 2
#define TSENS_SENSOR_STATUS_SIZE 4
#define TSENS_INT_STATUS_ADDR (MSM_CLK_CTL_BASE + 0x0000363c)
#define TSENS_LOWER_INT_MASK BIT(1)
#define TSENS_UPPER_INT_MASK BIT(2)
#define TSENS_MAX_INT_MASK BIT(3)
#define TSENS_TRDY_MASK BIT(7)
#define TSENS_8960_CONFIG_ADDR (MSM_CLK_CTL_BASE + 0x00003640)
#define TSENS_TRDY_RDY_MIN_TIME 1000
#define TSENS_TRDY_RDY_MAX_TIME 1100
#define TSENS_SENSOR_SHIFT 16
#define TSENS_RED_SHIFT 8
#define TSENS_8960_QFPROM_SHIFT 4
#define TSENS_SENSOR_QFPROM_SHIFT 2
#define TSENS_SENSOR0_SHIFT 3
#define TSENS_MASK1 1
#define TSENS_8660_QFPROM_ADDR (MSM_QFPROM_BASE + 0x000000bc)
#define TSENS_8660_QFPROM_RED_TEMP_SENSOR0_SHIFT 24
#define TSENS_8660_QFPROM_TEMP_SENSOR0_SHIFT 16
#define TSENS_8660_QFPROM_TEMP_SENSOR0_MASK (255 \
<< TSENS_8660_QFPROM_TEMP_SENSOR0_SHIFT)
#define TSENS_8660_CONFIG 01
#define TSENS_8660_CONFIG_SHIFT 28
#define TSENS_8660_CONFIG_MASK (3 << TSENS_8660_CONFIG_SHIFT)
#define TSENS_8660_SLP_CLK_ENA BIT(24)
#define TSENS_8064_SENSOR5_EN BIT(8)
#define TSENS_8064_SENSOR6_EN BIT(9)
#define TSENS_8064_SENSOR7_EN BIT(10)
#define TSENS_8064_SENSOR8_EN BIT(11)
#define TSENS_8064_SENSOR9_EN BIT(12)
#define TSENS_8064_SENSOR10_EN BIT(13)
#define TSENS_8064_SENSORS_EN (SENSORS_EN | \
TSENS_8064_SENSOR5_EN | \
TSENS_8064_SENSOR6_EN | \
TSENS_8064_SENSOR7_EN | \
TSENS_8064_SENSOR8_EN | \
TSENS_8064_SENSOR9_EN | \
TSENS_8064_SENSOR10_EN)
#define TSENS_8064_STATUS_CNTL (MSM_CLK_CTL_BASE + 0x00003660)
#define TSENS_8064_S5_STATUS_ADDR (MSM_CLK_CTL_BASE + 0x00003664)
#define TSENS_8064_SEQ_SENSORS 5
#define TSENS_8064_S4_S5_OFFSET 40
#define TSENS_CNTL_RESUME_MASK 0xfffffff9
#define TSENS_8960_SENSOR_MASK 0xf8
#define TSENS_8064_SENSOR_MASK 0x3ff8
static int tsens_status_cntl_start;
struct tsens_tm_device_sensor {
struct thermal_zone_device *tz_dev;
enum thermal_device_mode mode;
unsigned int sensor_num;
struct work_struct work;
int offset;
int calib_data;
int calib_data_backup;
uint32_t slope_mul_tsens_factor;
};
struct tsens_tm_device {
bool prev_reading_avail;
int tsens_factor;
int patherm0;
int patherm1;
uint32_t tsens_num_sensor;
enum platform_type hw_type;
int pm_tsens_thr_data;
int pm_tsens_cntl;
struct work_struct tsens_work;
struct tsens_tm_device_sensor sensor[0];
};
struct tsens_tm_device *tmdev;
static struct workqueue_struct *monitor_tsense_wq = NULL;
struct delayed_work monitor_tsens_status_worker;
static void monitor_tsens_status(struct work_struct *work);
static int tsens_tz_code_to_degC(int adc_code, int sensor_num)
{
int degcbeforefactor, degc;
degcbeforefactor = (adc_code *
tmdev->sensor[sensor_num].slope_mul_tsens_factor
+ tmdev->sensor[sensor_num].offset);
if (degcbeforefactor == 0)
degc = degcbeforefactor;
else if (degcbeforefactor > 0)
degc = (degcbeforefactor + tmdev->tsens_factor/2)
/ tmdev->tsens_factor;
else
degc = (degcbeforefactor - tmdev->tsens_factor/2)
/ tmdev->tsens_factor;
return degc;
}
static int tsens_tz_degC_to_code(int degC, int sensor_num)
{
int code = (degC * tmdev->tsens_factor -
tmdev->sensor[sensor_num].offset
+ tmdev->sensor[sensor_num].slope_mul_tsens_factor/2)
/ tmdev->sensor[sensor_num].slope_mul_tsens_factor;
if (code > TSENS_THRESHOLD_MAX_CODE)
code = TSENS_THRESHOLD_MAX_CODE;
else if (code < TSENS_THRESHOLD_MIN_CODE)
code = TSENS_THRESHOLD_MIN_CODE;
return code;
}
static void tsens8960_get_temp(int sensor_num, unsigned long *temp)
{
unsigned int code, offset = 0, sensor_addr;
if (!tmdev->prev_reading_avail) {
while (!(readl_relaxed(TSENS_INT_STATUS_ADDR)
& TSENS_TRDY_MASK))
usleep_range(TSENS_TRDY_RDY_MIN_TIME,
TSENS_TRDY_RDY_MAX_TIME);
tmdev->prev_reading_avail = true;
}
sensor_addr = (unsigned int)TSENS_S0_STATUS_ADDR;
if (tmdev->hw_type == APQ_8064 &&
sensor_num >= TSENS_8064_SEQ_SENSORS)
offset = TSENS_8064_S4_S5_OFFSET;
code = readl_relaxed(sensor_addr + offset +
(sensor_num << TSENS_STATUS_ADDR_OFFSET));
*temp = tsens_tz_code_to_degC(code, sensor_num);
}
static int tsens_tz_get_temp(struct thermal_zone_device *thermal,
unsigned long *temp)
{
struct tsens_tm_device_sensor *tm_sensor = thermal->devdata;
if (!tm_sensor || tm_sensor->mode != THERMAL_DEVICE_ENABLED || !temp)
return -EINVAL;
tsens8960_get_temp(tm_sensor->sensor_num, temp);
return 0;
}
int tsens_get_sensor_temp(int sensor_num, unsigned long *temp)
{
if (!tmdev)
return -ENODEV;
if (sensor_num < 0 || sensor_num >= TSENS_MAX_SENSORS || !temp)
return -EINVAL;
tsens8960_get_temp(sensor_num, temp);
return 0;
}
EXPORT_SYMBOL(tsens_get_sensor_temp);
int tsens_get_temp(struct tsens_device *device, unsigned long *temp)
{
if (!tmdev)
return -ENODEV;
if (!temp)
return -EINVAL;
tsens8960_get_temp(device->sensor_num, temp);
return 0;
}
EXPORT_SYMBOL(tsens_get_temp);
static int tsens_tz_get_mode(struct thermal_zone_device *thermal,
enum thermal_device_mode *mode)
{
struct tsens_tm_device_sensor *tm_sensor = thermal->devdata;
if (!tm_sensor || !mode)
return -EINVAL;
*mode = tm_sensor->mode;
return 0;
}
static int tsens_tz_set_mode(struct thermal_zone_device *thermal,
enum thermal_device_mode mode)
{
struct tsens_tm_device_sensor *tm_sensor = thermal->devdata;
unsigned int reg, mask, i;
if (!tm_sensor)
return -EINVAL;
if (mode != tm_sensor->mode) {
reg = readl_relaxed(TSENS_CNTL_ADDR);
mask = 1 << (tm_sensor->sensor_num + TSENS_SENSOR0_SHIFT);
if (mode == THERMAL_DEVICE_ENABLED) {
if ((mask != SENSOR0_EN) && !(reg & SENSOR0_EN)) {
pr_info("Main sensor not enabled\n");
return -EINVAL;
}
writel_relaxed(reg | TSENS_SW_RST, TSENS_CNTL_ADDR);
if (tmdev->hw_type == MSM_8960 ||
tmdev->hw_type == MDM_9615 ||
tmdev->hw_type == APQ_8064)
reg |= mask | TSENS_8960_SLP_CLK_ENA
| TSENS_EN;
else
reg |= mask | TSENS_8660_SLP_CLK_ENA
| TSENS_EN;
tmdev->prev_reading_avail = false;
} else {
reg &= ~mask;
if (!(reg & SENSOR0_EN)) {
if (tmdev->hw_type == APQ_8064)
reg &= ~(TSENS_8064_SENSORS_EN |
TSENS_8960_SLP_CLK_ENA |
TSENS_EN);
else if (tmdev->hw_type == MSM_8960 ||
tmdev->hw_type == MDM_9615)
reg &= ~(SENSORS_EN |
TSENS_8960_SLP_CLK_ENA |
TSENS_EN);
else
reg &= ~(SENSORS_EN |
TSENS_8660_SLP_CLK_ENA |
TSENS_EN);
for (i = 1; i < tmdev->tsens_num_sensor; i++)
tmdev->sensor[i].mode = mode;
}
}
writel_relaxed(reg, TSENS_CNTL_ADDR);
}
tm_sensor->mode = mode;
return 0;
}
static int tsens_tz_get_trip_type(struct thermal_zone_device *thermal,
int trip, enum thermal_trip_type *type)
{
struct tsens_tm_device_sensor *tm_sensor = thermal->devdata;
if (!tm_sensor || trip < 0 || !type)
return -EINVAL;
switch (trip) {
case TSENS_TRIP_STAGE3:
*type = THERMAL_TRIP_CRITICAL;
break;
case TSENS_TRIP_STAGE2:
*type = THERMAL_TRIP_CONFIGURABLE_HI;
break;
case TSENS_TRIP_STAGE1:
*type = THERMAL_TRIP_CONFIGURABLE_LOW;
break;
case TSENS_TRIP_STAGE0:
*type = THERMAL_TRIP_CRITICAL_LOW;
break;
default:
return -EINVAL;
}
return 0;
}
static int tsens_tz_activate_trip_type(struct thermal_zone_device *thermal,
int trip, enum thermal_trip_activation_mode mode)
{
struct tsens_tm_device_sensor *tm_sensor = thermal->devdata;
unsigned int reg_cntl, reg_th, code, hi_code, lo_code, mask;
if (!tm_sensor || trip < 0)
return -EINVAL;
lo_code = TSENS_THRESHOLD_MIN_CODE;
hi_code = TSENS_THRESHOLD_MAX_CODE;
if (tmdev->hw_type == APQ_8064)
reg_cntl = readl_relaxed(TSENS_8064_STATUS_CNTL);
else
reg_cntl = readl_relaxed(TSENS_CNTL_ADDR);
reg_th = readl_relaxed(TSENS_THRESHOLD_ADDR);
switch (trip) {
case TSENS_TRIP_STAGE3:
code = (reg_th & TSENS_THRESHOLD_MAX_LIMIT_MASK)
>> TSENS_THRESHOLD_MAX_LIMIT_SHIFT;
mask = TSENS_MAX_STATUS_MASK;
if (!(reg_cntl & TSENS_UPPER_STATUS_CLR))
lo_code = (reg_th & TSENS_THRESHOLD_UPPER_LIMIT_MASK)
>> TSENS_THRESHOLD_UPPER_LIMIT_SHIFT;
else if (!(reg_cntl & TSENS_LOWER_STATUS_CLR))
lo_code = (reg_th & TSENS_THRESHOLD_LOWER_LIMIT_MASK)
>> TSENS_THRESHOLD_LOWER_LIMIT_SHIFT;
else if (!(reg_cntl & TSENS_MIN_STATUS_MASK))
lo_code = (reg_th & TSENS_THRESHOLD_MIN_LIMIT_MASK)
>> TSENS_THRESHOLD_MIN_LIMIT_SHIFT;
break;
case TSENS_TRIP_STAGE2:
code = (reg_th & TSENS_THRESHOLD_UPPER_LIMIT_MASK)
>> TSENS_THRESHOLD_UPPER_LIMIT_SHIFT;
mask = TSENS_UPPER_STATUS_CLR;
if (!(reg_cntl & TSENS_MAX_STATUS_MASK))
hi_code = (reg_th & TSENS_THRESHOLD_MAX_LIMIT_MASK)
>> TSENS_THRESHOLD_MAX_LIMIT_SHIFT;
if (!(reg_cntl & TSENS_LOWER_STATUS_CLR))
lo_code = (reg_th & TSENS_THRESHOLD_LOWER_LIMIT_MASK)
>> TSENS_THRESHOLD_LOWER_LIMIT_SHIFT;
else if (!(reg_cntl & TSENS_MIN_STATUS_MASK))
lo_code = (reg_th & TSENS_THRESHOLD_MIN_LIMIT_MASK)
>> TSENS_THRESHOLD_MIN_LIMIT_SHIFT;
break;
case TSENS_TRIP_STAGE1:
code = (reg_th & TSENS_THRESHOLD_LOWER_LIMIT_MASK)
>> TSENS_THRESHOLD_LOWER_LIMIT_SHIFT;
mask = TSENS_LOWER_STATUS_CLR;
if (!(reg_cntl & TSENS_MIN_STATUS_MASK))
lo_code = (reg_th & TSENS_THRESHOLD_MIN_LIMIT_MASK)
>> TSENS_THRESHOLD_MIN_LIMIT_SHIFT;
if (!(reg_cntl & TSENS_UPPER_STATUS_CLR))
hi_code = (reg_th & TSENS_THRESHOLD_UPPER_LIMIT_MASK)
>> TSENS_THRESHOLD_UPPER_LIMIT_SHIFT;
else if (!(reg_cntl & TSENS_MAX_STATUS_MASK))
hi_code = (reg_th & TSENS_THRESHOLD_MAX_LIMIT_MASK)
>> TSENS_THRESHOLD_MAX_LIMIT_SHIFT;
break;
case TSENS_TRIP_STAGE0:
code = (reg_th & TSENS_THRESHOLD_MIN_LIMIT_MASK)
>> TSENS_THRESHOLD_MIN_LIMIT_SHIFT;
mask = TSENS_MIN_STATUS_MASK;
if (!(reg_cntl & TSENS_LOWER_STATUS_CLR))
hi_code = (reg_th & TSENS_THRESHOLD_LOWER_LIMIT_MASK)
>> TSENS_THRESHOLD_LOWER_LIMIT_SHIFT;
else if (!(reg_cntl & TSENS_UPPER_STATUS_CLR))
hi_code = (reg_th & TSENS_THRESHOLD_UPPER_LIMIT_MASK)
>> TSENS_THRESHOLD_UPPER_LIMIT_SHIFT;
else if (!(reg_cntl & TSENS_MAX_STATUS_MASK))
hi_code = (reg_th & TSENS_THRESHOLD_MAX_LIMIT_MASK)
>> TSENS_THRESHOLD_MAX_LIMIT_SHIFT;
break;
default:
return -EINVAL;
}
if (mode == THERMAL_TRIP_ACTIVATION_DISABLED) {
if (tmdev->hw_type == APQ_8064)
writel_relaxed(reg_cntl | mask, TSENS_8064_STATUS_CNTL);
else
writel_relaxed(reg_cntl | mask, TSENS_CNTL_ADDR);
} else {
if (code < lo_code || code > hi_code) {
pr_info("%s with invalid code %x\n", __func__, code);
return -EINVAL;
}
if (tmdev->hw_type == APQ_8064)
writel_relaxed(reg_cntl & ~mask,
TSENS_8064_STATUS_CNTL);
else
writel_relaxed(reg_cntl & ~mask, TSENS_CNTL_ADDR);
}
mb();
return 0;
}
static int tsens_tz_get_trip_temp(struct thermal_zone_device *thermal,
int trip, unsigned long *temp)
{
struct tsens_tm_device_sensor *tm_sensor = thermal->devdata;
unsigned int reg;
if (!tm_sensor || trip < 0 || !temp)
return -EINVAL;
reg = readl_relaxed(TSENS_THRESHOLD_ADDR);
switch (trip) {
case TSENS_TRIP_STAGE3:
reg = (reg & TSENS_THRESHOLD_MAX_LIMIT_MASK)
>> TSENS_THRESHOLD_MAX_LIMIT_SHIFT;
break;
case TSENS_TRIP_STAGE2:
reg = (reg & TSENS_THRESHOLD_UPPER_LIMIT_MASK)
>> TSENS_THRESHOLD_UPPER_LIMIT_SHIFT;
break;
case TSENS_TRIP_STAGE1:
reg = (reg & TSENS_THRESHOLD_LOWER_LIMIT_MASK)
>> TSENS_THRESHOLD_LOWER_LIMIT_SHIFT;
break;
case TSENS_TRIP_STAGE0:
reg = (reg & TSENS_THRESHOLD_MIN_LIMIT_MASK)
>> TSENS_THRESHOLD_MIN_LIMIT_SHIFT;
break;
default:
return -EINVAL;
}
*temp = tsens_tz_code_to_degC(reg, tm_sensor->sensor_num);
return 0;
}
static int tsens_tz_get_crit_temp(struct thermal_zone_device *thermal,
unsigned long *temp)
{
return tsens_tz_get_trip_temp(thermal, TSENS_TRIP_STAGE3, temp);
}
static int tsens_tz_notify(struct thermal_zone_device *thermal,
int count, enum thermal_trip_type type)
{
return 1;
}
static int tsens_tz_set_trip_temp(struct thermal_zone_device *thermal,
int trip, long temp)
{
struct tsens_tm_device_sensor *tm_sensor = thermal->devdata;
unsigned int reg_th, reg_cntl;
int code, hi_code, lo_code, code_err_chk;
code_err_chk = code = tsens_tz_degC_to_code(temp,
tm_sensor->sensor_num);
if (!tm_sensor || trip < 0)
return -EINVAL;
lo_code = TSENS_THRESHOLD_MIN_CODE;
hi_code = TSENS_THRESHOLD_MAX_CODE;
if (tmdev->hw_type == APQ_8064)
reg_cntl = readl_relaxed(TSENS_8064_STATUS_CNTL);
else
reg_cntl = readl_relaxed(TSENS_CNTL_ADDR);
reg_th = readl_relaxed(TSENS_THRESHOLD_ADDR);
switch (trip) {
case TSENS_TRIP_STAGE3:
code <<= TSENS_THRESHOLD_MAX_LIMIT_SHIFT;
reg_th &= ~TSENS_THRESHOLD_MAX_LIMIT_MASK;
if (!(reg_cntl & TSENS_UPPER_STATUS_CLR))
lo_code = (reg_th & TSENS_THRESHOLD_UPPER_LIMIT_MASK)
>> TSENS_THRESHOLD_UPPER_LIMIT_SHIFT;
else if (!(reg_cntl & TSENS_LOWER_STATUS_CLR))
lo_code = (reg_th & TSENS_THRESHOLD_LOWER_LIMIT_MASK)
>> TSENS_THRESHOLD_LOWER_LIMIT_SHIFT;
else if (!(reg_cntl & TSENS_MIN_STATUS_MASK))
lo_code = (reg_th & TSENS_THRESHOLD_MIN_LIMIT_MASK)
>> TSENS_THRESHOLD_MIN_LIMIT_SHIFT;
break;
case TSENS_TRIP_STAGE2:
code <<= TSENS_THRESHOLD_UPPER_LIMIT_SHIFT;
reg_th &= ~TSENS_THRESHOLD_UPPER_LIMIT_MASK;
if (!(reg_cntl & TSENS_MAX_STATUS_MASK))
hi_code = (reg_th & TSENS_THRESHOLD_MAX_LIMIT_MASK)
>> TSENS_THRESHOLD_MAX_LIMIT_SHIFT;
if (!(reg_cntl & TSENS_LOWER_STATUS_CLR))
lo_code = (reg_th & TSENS_THRESHOLD_LOWER_LIMIT_MASK)
>> TSENS_THRESHOLD_LOWER_LIMIT_SHIFT;
else if (!(reg_cntl & TSENS_MIN_STATUS_MASK))
lo_code = (reg_th & TSENS_THRESHOLD_MIN_LIMIT_MASK)
>> TSENS_THRESHOLD_MIN_LIMIT_SHIFT;
break;
case TSENS_TRIP_STAGE1:
code <<= TSENS_THRESHOLD_LOWER_LIMIT_SHIFT;
reg_th &= ~TSENS_THRESHOLD_LOWER_LIMIT_MASK;
if (!(reg_cntl & TSENS_MIN_STATUS_MASK))
lo_code = (reg_th & TSENS_THRESHOLD_MIN_LIMIT_MASK)
>> TSENS_THRESHOLD_MIN_LIMIT_SHIFT;
if (!(reg_cntl & TSENS_UPPER_STATUS_CLR))
hi_code = (reg_th & TSENS_THRESHOLD_UPPER_LIMIT_MASK)
>> TSENS_THRESHOLD_UPPER_LIMIT_SHIFT;
else if (!(reg_cntl & TSENS_MAX_STATUS_MASK))
hi_code = (reg_th & TSENS_THRESHOLD_MAX_LIMIT_MASK)
>> TSENS_THRESHOLD_MAX_LIMIT_SHIFT;
break;
case TSENS_TRIP_STAGE0:
code <<= TSENS_THRESHOLD_MIN_LIMIT_SHIFT;
reg_th &= ~TSENS_THRESHOLD_MIN_LIMIT_MASK;
if (!(reg_cntl & TSENS_LOWER_STATUS_CLR))
hi_code = (reg_th & TSENS_THRESHOLD_LOWER_LIMIT_MASK)
>> TSENS_THRESHOLD_LOWER_LIMIT_SHIFT;
else if (!(reg_cntl & TSENS_UPPER_STATUS_CLR))
hi_code = (reg_th & TSENS_THRESHOLD_UPPER_LIMIT_MASK)
>> TSENS_THRESHOLD_UPPER_LIMIT_SHIFT;
else if (!(reg_cntl & TSENS_MAX_STATUS_MASK))
hi_code = (reg_th & TSENS_THRESHOLD_MAX_LIMIT_MASK)
>> TSENS_THRESHOLD_MAX_LIMIT_SHIFT;
break;
default:
return -EINVAL;
}
if (code_err_chk < lo_code || code_err_chk > hi_code)
return -EINVAL;
writel_relaxed(reg_th | code, TSENS_THRESHOLD_ADDR);
return 0;
}
static struct thermal_zone_device_ops tsens_thermal_zone_ops = {
.get_temp = tsens_tz_get_temp,
.get_mode = tsens_tz_get_mode,
.set_mode = tsens_tz_set_mode,
.get_trip_type = tsens_tz_get_trip_type,
.activate_trip_type = tsens_tz_activate_trip_type,
.get_trip_temp = tsens_tz_get_trip_temp,
.set_trip_temp = tsens_tz_set_trip_temp,
.get_crit_temp = tsens_tz_get_crit_temp,
.notify = tsens_tz_notify,
};
static void monitor_tsens_status(struct work_struct *work)
{
unsigned int i, j, cntl, threshold, int_status, config;
int code;
int enable = 0;
struct pm8xxx_adc_chan_result result;
int rc = -1;
cntl = readl_relaxed(TSENS_CNTL_ADDR);
threshold = readl_relaxed(TSENS_THRESHOLD_ADDR);
int_status = readl_relaxed(TSENS_INT_STATUS_ADDR);
config = readl_relaxed(TSENS_8960_CONFIG_ADDR);
pr_info("TSENS_CNTL_ADDR[0x%08X], TSENS_THRESHOLD_ADDR[0x%08X], TSENS_INT_STATUS_ADDR[0x%08X], TSENS_8960_CONFIG_ADDR[0x%08X]\n", cntl, threshold, int_status, config);
if (tmdev->hw_type == APQ_8064)
cntl &= (uint32_t) TSENS_8064_SENSORS_EN;
else
cntl &= (uint32_t) SENSORS_EN;
cntl >>= TSENS_SENSOR0_SHIFT;
for (i = 0; i < tmdev->tsens_num_sensor; i++) {
if (i < 5)
code = readl_relaxed(TSENS_S0_STATUS_ADDR
+ (i << TSENS_STATUS_ADDR_OFFSET));
else {
j = i-5;
code = readl_relaxed(TSENS_8064_S5_STATUS_ADDR
+ (j << TSENS_STATUS_ADDR_OFFSET));
}
enable = cntl & (0x1 << i);
if(enable > 0)
pr_info("Sensor %d = %d C\n", i, tsens_tz_code_to_degC(code, i));
}
if (tmdev->patherm0 > 0) {
rc = pm8xxx_adc_read(tmdev->patherm0, &result);
pr_info("pa_therm0 = %lld C\n", result.physical);
}
if (tmdev->patherm1 > 0) {
rc = pm8xxx_adc_read(tmdev->patherm1, &result);
pr_info("pa_therm1 = %lld C\n", result.physical);
}
if (monitor_tsense_wq) {
queue_delayed_work(monitor_tsense_wq, &monitor_tsens_status_worker, msecs_to_jiffies(60000));
}
}
static void notify_uspace_tsens_fn(struct work_struct *work)
{
struct tsens_tm_device_sensor *tm = container_of(work,
struct tsens_tm_device_sensor, work);
sysfs_notify(&tm->tz_dev->device.kobj,
NULL, "type");
}
static void tsens_scheduler_fn(struct work_struct *work)
{
struct tsens_tm_device *tm = container_of(work, struct tsens_tm_device,
tsens_work);
unsigned int threshold, threshold_low, i, code, reg, sensor, mask;
unsigned int sensor_addr;
bool upper_th_x, lower_th_x;
int adc_code;
if (tmdev->hw_type == APQ_8064) {
reg = readl_relaxed(TSENS_8064_STATUS_CNTL);
writel_relaxed(reg | TSENS_LOWER_STATUS_CLR |
TSENS_UPPER_STATUS_CLR, TSENS_8064_STATUS_CNTL);
} else {
reg = readl_relaxed(TSENS_CNTL_ADDR);
writel_relaxed(reg | TSENS_LOWER_STATUS_CLR |
TSENS_UPPER_STATUS_CLR, TSENS_CNTL_ADDR);
}
mask = ~(TSENS_LOWER_STATUS_CLR | TSENS_UPPER_STATUS_CLR);
threshold = readl_relaxed(TSENS_THRESHOLD_ADDR);
threshold_low = (threshold & TSENS_THRESHOLD_LOWER_LIMIT_MASK)
>> TSENS_THRESHOLD_LOWER_LIMIT_SHIFT;
threshold = (threshold & TSENS_THRESHOLD_UPPER_LIMIT_MASK)
>> TSENS_THRESHOLD_UPPER_LIMIT_SHIFT;
sensor = readl_relaxed(TSENS_CNTL_ADDR);
if (tmdev->hw_type == APQ_8064) {
reg = readl_relaxed(TSENS_8064_STATUS_CNTL);
sensor &= (uint32_t) TSENS_8064_SENSORS_EN;
} else {
reg = sensor;
sensor &= (uint32_t) SENSORS_EN;
}
sensor >>= TSENS_SENSOR0_SHIFT;
sensor_addr = (unsigned int)TSENS_S0_STATUS_ADDR;
for (i = 0; i < tmdev->tsens_num_sensor; i++) {
if (i == TSENS_8064_SEQ_SENSORS)
sensor_addr += TSENS_8064_S4_S5_OFFSET;
if (sensor & TSENS_MASK1) {
code = readl_relaxed(sensor_addr);
upper_th_x = code >= threshold;
lower_th_x = code <= threshold_low;
if (upper_th_x)
mask |= TSENS_UPPER_STATUS_CLR;
if (lower_th_x)
mask |= TSENS_LOWER_STATUS_CLR;
if (upper_th_x || lower_th_x) {
schedule_work(&tm->sensor[i].work);
adc_code = readl_relaxed(sensor_addr);
pr_debug("Trigger (%d degrees) for sensor %d\n",
tsens_tz_code_to_degC(adc_code, i), i);
}
}
sensor >>= 1;
sensor_addr += TSENS_SENSOR_STATUS_SIZE;
}
if (tmdev->hw_type == APQ_8064)
writel_relaxed(reg & mask, TSENS_8064_STATUS_CNTL);
else
writel_relaxed(reg & mask, TSENS_CNTL_ADDR);
mb();
}
static irqreturn_t tsens_isr(int irq, void *data)
{
schedule_work(&tmdev->tsens_work);
return IRQ_HANDLED;
}
static void tsens8960_sensor_mode_init(void)
{
unsigned int reg_cntl = 0;
reg_cntl = readl_relaxed(TSENS_CNTL_ADDR);
if (tmdev->hw_type == MSM_8960 || tmdev->hw_type == MDM_9615 ||
tmdev->hw_type == APQ_8064) {
writel_relaxed(reg_cntl &
~((((1 << tmdev->tsens_num_sensor) - 1) >> 1)
<< (TSENS_SENSOR0_SHIFT + 1)), TSENS_CNTL_ADDR);
tmdev->sensor[TSENS_MAIN_SENSOR].mode = THERMAL_DEVICE_ENABLED;
}
}
#ifdef CONFIG_PM
static int tsens_suspend(struct device *dev)
{
int i = 0;
tmdev->pm_tsens_thr_data = readl_relaxed(TSENS_THRESHOLD_ADDR);
tmdev->pm_tsens_cntl = readl_relaxed(TSENS_CNTL_ADDR);
writel_relaxed(tmdev->pm_tsens_cntl &
~(TSENS_8960_SLP_CLK_ENA | TSENS_EN), TSENS_CNTL_ADDR);
tmdev->prev_reading_avail = 0;
for (i = 0; i < tmdev->tsens_num_sensor; i++)
tmdev->sensor[i].mode = THERMAL_DEVICE_DISABLED;
disable_irq_nosync(TSENS_UPPER_LOWER_INT);
mb();
return 0;
}
static int tsens_resume(struct device *dev)
{
unsigned int reg_cntl = 0, reg_cfg = 0, reg_sensor_mask = 0;
unsigned int reg_status_cntl = 0, reg_thr_data = 0, i = 0;
reg_cntl = readl_relaxed(TSENS_CNTL_ADDR);
writel_relaxed(reg_cntl | TSENS_SW_RST, TSENS_CNTL_ADDR);
if (tmdev->hw_type == MSM_8960 || tmdev->hw_type == MDM_9615) {
reg_cntl |= TSENS_8960_SLP_CLK_ENA |
(TSENS_MEASURE_PERIOD << 18) |
TSENS_MIN_STATUS_MASK | TSENS_MAX_STATUS_MASK |
SENSORS_EN;
writel_relaxed(reg_cntl, TSENS_CNTL_ADDR);
} else if (tmdev->hw_type == APQ_8064) {
reg_cntl |= TSENS_8960_SLP_CLK_ENA |
(TSENS_MEASURE_PERIOD << 18) |
(((1 << tmdev->tsens_num_sensor) - 1)
<< TSENS_SENSOR0_SHIFT);
writel_relaxed(reg_cntl, TSENS_CNTL_ADDR);
reg_status_cntl = readl_relaxed(TSENS_8064_STATUS_CNTL);
reg_status_cntl |= TSENS_MIN_STATUS_MASK |
TSENS_MAX_STATUS_MASK;
writel_relaxed(reg_status_cntl, TSENS_8064_STATUS_CNTL);
}
reg_cfg = readl_relaxed(TSENS_8960_CONFIG_ADDR);
reg_cfg = (reg_cfg & ~TSENS_8960_CONFIG_MASK) |
(TSENS_8960_CONFIG << TSENS_8960_CONFIG_SHIFT);
writel_relaxed(reg_cfg, TSENS_8960_CONFIG_ADDR);
writel_relaxed((tmdev->pm_tsens_cntl & TSENS_CNTL_RESUME_MASK),
TSENS_CNTL_ADDR);
reg_cntl = readl_relaxed(TSENS_CNTL_ADDR);
writel_relaxed(tmdev->pm_tsens_thr_data, TSENS_THRESHOLD_ADDR);
reg_thr_data = readl_relaxed(TSENS_THRESHOLD_ADDR);
if (tmdev->hw_type == MSM_8960 || tmdev->hw_type == MDM_9615)
reg_sensor_mask = ((reg_cntl & TSENS_8960_SENSOR_MASK)
>> TSENS_SENSOR0_SHIFT);
else {
reg_sensor_mask = ((reg_cntl & TSENS_8064_SENSOR_MASK)
>> TSENS_SENSOR0_SHIFT);
}
for (i = 0; i < tmdev->tsens_num_sensor; i++) {
if (reg_sensor_mask & TSENS_MASK1)
tmdev->sensor[i].mode = THERMAL_DEVICE_ENABLED;
reg_sensor_mask >>= 1;
}
enable_irq(TSENS_UPPER_LOWER_INT);
mb();
return 0;
}
static const struct dev_pm_ops tsens_pm_ops = {
.suspend = tsens_suspend,
.resume = tsens_resume,
};
#endif
static void tsens_disable_mode(void)
{
unsigned int reg_cntl = 0;
reg_cntl = readl_relaxed(TSENS_CNTL_ADDR);
if (tmdev->hw_type == MSM_8960 || tmdev->hw_type == MDM_9615 ||
tmdev->hw_type == APQ_8064)
writel_relaxed(reg_cntl &
~((((1 << tmdev->tsens_num_sensor) - 1) <<
TSENS_SENSOR0_SHIFT) | TSENS_8960_SLP_CLK_ENA
| TSENS_EN), TSENS_CNTL_ADDR);
else if (tmdev->hw_type == MSM_8660)
writel_relaxed(reg_cntl &
~((((1 << tmdev->tsens_num_sensor) - 1) <<
TSENS_SENSOR0_SHIFT) | TSENS_8660_SLP_CLK_ENA
| TSENS_EN), TSENS_CNTL_ADDR);
}
static void tsens_hw_init(void)
{
unsigned int reg_cntl = 0, reg_cfg = 0, reg_thr = 0;
unsigned int reg_status_cntl = 0;
int tsens_min_limit_th = 0, tsens_max_limit_th = 0;
int i, sort_max = 0, sort_min = 0;
reg_cntl = readl_relaxed(TSENS_CNTL_ADDR);
writel_relaxed(reg_cntl | TSENS_SW_RST, TSENS_CNTL_ADDR);
if (tmdev->hw_type == MSM_8960 || tmdev->hw_type == MDM_9615) {
reg_cntl |= TSENS_8960_SLP_CLK_ENA |
(TSENS_MEASURE_PERIOD << 18) |
TSENS_LOWER_STATUS_CLR | TSENS_UPPER_STATUS_CLR |
TSENS_MIN_STATUS_MASK | TSENS_MAX_STATUS_MASK |
SENSORS_EN;
writel_relaxed(reg_cntl, TSENS_CNTL_ADDR);
reg_cntl |= TSENS_EN;
writel_relaxed(reg_cntl, TSENS_CNTL_ADDR);
reg_cfg = readl_relaxed(TSENS_8960_CONFIG_ADDR);
reg_cfg = (reg_cfg & ~TSENS_8960_CONFIG_MASK) |
(TSENS_8960_CONFIG << TSENS_8960_CONFIG_SHIFT);
writel_relaxed(reg_cfg, TSENS_8960_CONFIG_ADDR);
if (tmdev->tsens_num_sensor) {
for (i = 0; i < tmdev->tsens_num_sensor; i++) {
if (tmdev->sensor[i].offset > tmdev->sensor[sort_max].offset)
sort_max = i;
else if (tmdev->sensor[i].offset < tmdev->sensor[sort_min].offset)
sort_min = i;
}
tsens_min_limit_th = tsens_tz_degC_to_code(TSENS_MIN_LIMIT_TEMP, sort_min);
tsens_max_limit_th = tsens_tz_degC_to_code(TSENS_MAX_LIMIT_TEMP, sort_max);
for (i = 0; i < tmdev->tsens_num_sensor; i++) {
pr_info("%s: sensor[%d] min_threshold %d, max_threshold %d\n", __func__, i,
tsens_tz_code_to_degC(tsens_min_limit_th, i), tsens_tz_code_to_degC(tsens_max_limit_th,i));
}
}
else {
tsens_min_limit_th = TSENS_MIN_LIMIT_TH;
tsens_max_limit_th = TSENS_MAX_LIMIT_TH;
}
} else if (tmdev->hw_type == MSM_8660) {
reg_cntl |= TSENS_8660_SLP_CLK_ENA | TSENS_EN |
(TSENS_MEASURE_PERIOD << 16) |
TSENS_LOWER_STATUS_CLR | TSENS_UPPER_STATUS_CLR |
TSENS_MIN_STATUS_MASK | TSENS_MAX_STATUS_MASK |
(((1 << tmdev->tsens_num_sensor) - 1) <<
TSENS_SENSOR0_SHIFT);
reg_cntl = (reg_cntl & ~TSENS_8660_CONFIG_MASK) |
(TSENS_8660_CONFIG << TSENS_8660_CONFIG_SHIFT);
writel_relaxed(reg_cntl, TSENS_CNTL_ADDR);
} else if (tmdev->hw_type == APQ_8064) {
reg_cntl |= TSENS_8960_SLP_CLK_ENA |
(TSENS_MEASURE_PERIOD << 18) |
(((1 << tmdev->tsens_num_sensor) - 1)
<< TSENS_SENSOR0_SHIFT);
writel_relaxed(reg_cntl, TSENS_CNTL_ADDR);
reg_status_cntl = readl_relaxed(TSENS_8064_STATUS_CNTL);
reg_status_cntl |= TSENS_LOWER_STATUS_CLR |
TSENS_UPPER_STATUS_CLR |
TSENS_MIN_STATUS_MASK |
TSENS_MAX_STATUS_MASK;
writel_relaxed(reg_status_cntl, TSENS_8064_STATUS_CNTL);
reg_cntl |= TSENS_EN;
writel_relaxed(reg_cntl, TSENS_CNTL_ADDR);
reg_cfg = readl_relaxed(TSENS_8960_CONFIG_ADDR);
reg_cfg = (reg_cfg & ~TSENS_8960_CONFIG_MASK) |
(TSENS_8960_CONFIG << TSENS_8960_CONFIG_SHIFT);
writel_relaxed(reg_cfg, TSENS_8960_CONFIG_ADDR);
}
reg_thr |= (TSENS_LOWER_LIMIT_TH << TSENS_THRESHOLD_LOWER_LIMIT_SHIFT) |
(TSENS_UPPER_LIMIT_TH << TSENS_THRESHOLD_UPPER_LIMIT_SHIFT) |
(tsens_min_limit_th << TSENS_THRESHOLD_MIN_LIMIT_SHIFT) |
(tsens_max_limit_th << TSENS_THRESHOLD_MAX_LIMIT_SHIFT);
writel_relaxed(reg_thr, TSENS_THRESHOLD_ADDR);
}
static int tsens_calib_sensors8660(void)
{
uint32_t *main_sensor_addr, sensor_shift, red_sensor_shift;
uint32_t sensor_mask, red_sensor_mask;
main_sensor_addr = TSENS_8660_QFPROM_ADDR;
sensor_shift = TSENS_SENSOR_SHIFT;
red_sensor_shift = sensor_shift + TSENS_RED_SHIFT;
sensor_mask = TSENS_THRESHOLD_MAX_CODE << sensor_shift;
red_sensor_mask = TSENS_THRESHOLD_MAX_CODE << red_sensor_shift;
tmdev->sensor[TSENS_MAIN_SENSOR].calib_data =
(readl_relaxed(main_sensor_addr) & sensor_mask)
>> sensor_shift;
tmdev->sensor[TSENS_MAIN_SENSOR].calib_data_backup =
(readl_relaxed(main_sensor_addr)
& red_sensor_mask) >> red_sensor_shift;
if (tmdev->sensor[TSENS_MAIN_SENSOR].calib_data_backup)
tmdev->sensor[TSENS_MAIN_SENSOR].calib_data =
tmdev->sensor[TSENS_MAIN_SENSOR].calib_data_backup;
if (!tmdev->sensor[TSENS_MAIN_SENSOR].calib_data) {
pr_err("QFPROM TSENS calibration data not present\n");
return -ENODEV;
}
tmdev->sensor[TSENS_MAIN_SENSOR].offset = tmdev->tsens_factor *
TSENS_CAL_DEGC -
tmdev->sensor[TSENS_MAIN_SENSOR].slope_mul_tsens_factor *
tmdev->sensor[TSENS_MAIN_SENSOR].calib_data;
tmdev->prev_reading_avail = false;
INIT_WORK(&tmdev->sensor[TSENS_MAIN_SENSOR].work,
notify_uspace_tsens_fn);
return 0;
}
static int tsens_calib_sensors8960(void)
{
uint32_t i;
uint8_t *main_sensor_addr, *backup_sensor_addr;
for (i = 0; i < tmdev->tsens_num_sensor; i++) {
main_sensor_addr = TSENS_8960_QFPROM_ADDR0 + i;
backup_sensor_addr = TSENS_8960_QFPROM_SPARE_ADDR0 + i;
tmdev->sensor[i].calib_data = readb_relaxed(main_sensor_addr);
tmdev->sensor[i].calib_data_backup =
readb_relaxed(backup_sensor_addr);
if (tmdev->sensor[i].calib_data_backup)
tmdev->sensor[i].calib_data =
tmdev->sensor[i].calib_data_backup;
if (!tmdev->sensor[i].calib_data) {
pr_err("QFPROM TSENS calibration data not present\n");
return -ENODEV;
}
tmdev->sensor[i].offset = (TSENS_CAL_DEGC *
tmdev->tsens_factor)
- (tmdev->sensor[i].calib_data *
tmdev->sensor[i].slope_mul_tsens_factor);
tmdev->prev_reading_avail = false;
INIT_WORK(&tmdev->sensor[i].work, notify_uspace_tsens_fn);
}
return 0;
}
static int tsens_check_version_support(void)
{
int rc = 0;
if (tmdev->hw_type == MSM_8960)
if (SOCINFO_VERSION_MAJOR(socinfo_get_version()) == 1)
rc = -ENODEV;
return rc;
}
static int tsens_calib_sensors(void)
{
int rc = -ENODEV;
if (tmdev->hw_type == MSM_8660)
rc = tsens_calib_sensors8660();
else if (tmdev->hw_type == MSM_8960 || tmdev->hw_type == MDM_9615 ||
tmdev->hw_type == APQ_8064)
rc = tsens_calib_sensors8960();
return rc;
}
int msm_tsens_early_init(struct tsens_platform_data *pdata)
{
int rc = 0, i;
if (!pdata) {
pr_err("No TSENS Platform data\n");
return -EINVAL;
}
tmdev = kzalloc(sizeof(struct tsens_tm_device) +
pdata->tsens_num_sensor *
sizeof(struct tsens_tm_device_sensor),
GFP_ATOMIC);
if (tmdev == NULL) {
pr_err("%s: kzalloc() failed.\n", __func__);
return -ENOMEM;
}
for (i = 0; i < pdata->tsens_num_sensor; i++)
tmdev->sensor[i].slope_mul_tsens_factor = pdata->slope[i];
tmdev->tsens_factor = pdata->tsens_factor;
tmdev->tsens_num_sensor = pdata->tsens_num_sensor;
tmdev->hw_type = pdata->hw_type;
tmdev->patherm0 = pdata->patherm0;
tmdev->patherm1 = pdata->patherm1;
rc = tsens_check_version_support();
if (rc < 0) {
kfree(tmdev);
tmdev = NULL;
return rc;
}
rc = tsens_calib_sensors();
if (rc < 0) {
kfree(tmdev);
tmdev = NULL;
return rc;
}
if (tmdev->hw_type == APQ_8064)
tsens_status_cntl_start = 0;
else
tsens_status_cntl_start = TSENS_STATUS_CNTL_OFFSET;
tsens_hw_init();
if (monitor_tsense_wq == NULL) {
monitor_tsense_wq = create_workqueue("monitor_tsense_wq");
printk(KERN_INFO "Create monitor tsense workqueue(0x%x)...\n", (unsigned int)monitor_tsense_wq);
}
if (monitor_tsense_wq) {
INIT_DELAYED_WORK(&monitor_tsens_status_worker, monitor_tsens_status);
queue_delayed_work(monitor_tsense_wq, &monitor_tsens_status_worker, msecs_to_jiffies(0));
}
pr_debug("msm_tsens_early_init: done\n");
return rc;
}
static int __devinit tsens_tm_probe(struct platform_device *pdev)
{
int rc, i;
if (!tmdev) {
pr_info("%s : TSENS early init not done.\n", __func__);
return -EFAULT;
}
for (i = 0; i < tmdev->tsens_num_sensor; i++) {
char name[18];
snprintf(name, sizeof(name), "tsens_tz_sensor%d", i);
tmdev->sensor[i].mode = THERMAL_DEVICE_ENABLED;
tmdev->sensor[i].sensor_num = i;
tmdev->sensor[i].tz_dev = thermal_zone_device_register(name,
TSENS_TRIP_NUM, &tmdev->sensor[i],
&tsens_thermal_zone_ops, 0, 0, 0, 0);
if (IS_ERR(tmdev->sensor[i].tz_dev)) {
pr_err("%s: thermal_zone_device_register() failed.\n",
__func__);
rc = -ENODEV;
goto fail;
}
tmdev->sensor[i].mode = THERMAL_DEVICE_DISABLED;
}
tsens8960_sensor_mode_init();
rc = request_irq(TSENS_UPPER_LOWER_INT, tsens_isr,
IRQF_TRIGGER_RISING, "tsens_interrupt", tmdev);
if (rc < 0) {
pr_err("%s: request_irq FAIL: %d\n", __func__, rc);
for (i = 0; i < tmdev->tsens_num_sensor; i++)
thermal_zone_device_unregister(tmdev->sensor[i].tz_dev);
goto fail;
}
INIT_WORK(&tmdev->tsens_work, tsens_scheduler_fn);
pr_debug("%s: OK\n", __func__);
mb();
return 0;
fail:
tsens_disable_mode();
kfree(tmdev);
tmdev = NULL;
mb();
return rc;
}
static int __devexit tsens_tm_remove(struct platform_device *pdev)
{
int i;
tsens_disable_mode();
mb();
free_irq(TSENS_UPPER_LOWER_INT, tmdev);
for (i = 0; i < tmdev->tsens_num_sensor; i++)
thermal_zone_device_unregister(tmdev->sensor[i].tz_dev);
kfree(tmdev);
tmdev = NULL;
return 0;
}
static struct platform_driver tsens_tm_driver = {
.probe = tsens_tm_probe,
.remove = tsens_tm_remove,
.driver = {
.name = "tsens8960-tm",
.owner = THIS_MODULE,
#ifdef CONFIG_PM
.pm = &tsens_pm_ops,
#endif
},
};
static int __init _tsens_tm_init(void)
{
return platform_driver_register(&tsens_tm_driver);
}
module_init(_tsens_tm_init);
static void __exit _tsens_tm_remove(void)
{
platform_driver_unregister(&tsens_tm_driver);
}
module_exit(_tsens_tm_remove);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("MSM8960 Temperature Sensor driver");
MODULE_VERSION("1.0");
MODULE_ALIAS("platform:tsens8960-tm");
| gpl-2.0 |
pershoot/kernel-2634 | drivers/media/video/sh_mobile_ceu_camera.c | 42 | 54204 | /*
* V4L2 Driver for SuperH Mobile CEU interface
*
* Copyright (C) 2008 Magnus Damm
*
* Based on V4L2 Driver for PXA camera host - "pxa_camera.c",
*
* Copyright (C) 2006, Sascha Hauer, Pengutronix
* Copyright (C) 2008, Guennadi Liakhovetski <kernel@pengutronix.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/io.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/moduleparam.h>
#include <linux/time.h>
#include <linux/version.h>
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/videodev2.h>
#include <linux/pm_runtime.h>
#include <linux/sched.h>
#include <media/v4l2-common.h>
#include <media/v4l2-dev.h>
#include <media/soc_camera.h>
#include <media/sh_mobile_ceu.h>
#include <media/videobuf-dma-contig.h>
#include <media/v4l2-mediabus.h>
#include <media/soc_mediabus.h>
/* register offsets for sh7722 / sh7723 */
#define CAPSR 0x00 /* Capture start register */
#define CAPCR 0x04 /* Capture control register */
#define CAMCR 0x08 /* Capture interface control register */
#define CMCYR 0x0c /* Capture interface cycle register */
#define CAMOR 0x10 /* Capture interface offset register */
#define CAPWR 0x14 /* Capture interface width register */
#define CAIFR 0x18 /* Capture interface input format register */
#define CSTCR 0x20 /* Camera strobe control register (<= sh7722) */
#define CSECR 0x24 /* Camera strobe emission count register (<= sh7722) */
#define CRCNTR 0x28 /* CEU register control register */
#define CRCMPR 0x2c /* CEU register forcible control register */
#define CFLCR 0x30 /* Capture filter control register */
#define CFSZR 0x34 /* Capture filter size clip register */
#define CDWDR 0x38 /* Capture destination width register */
#define CDAYR 0x3c /* Capture data address Y register */
#define CDACR 0x40 /* Capture data address C register */
#define CDBYR 0x44 /* Capture data bottom-field address Y register */
#define CDBCR 0x48 /* Capture data bottom-field address C register */
#define CBDSR 0x4c /* Capture bundle destination size register */
#define CFWCR 0x5c /* Firewall operation control register */
#define CLFCR 0x60 /* Capture low-pass filter control register */
#define CDOCR 0x64 /* Capture data output control register */
#define CDDCR 0x68 /* Capture data complexity level register */
#define CDDAR 0x6c /* Capture data complexity level address register */
#define CEIER 0x70 /* Capture event interrupt enable register */
#define CETCR 0x74 /* Capture event flag clear register */
#define CSTSR 0x7c /* Capture status register */
#define CSRTR 0x80 /* Capture software reset register */
#define CDSSR 0x84 /* Capture data size register */
#define CDAYR2 0x90 /* Capture data address Y register 2 */
#define CDACR2 0x94 /* Capture data address C register 2 */
#define CDBYR2 0x98 /* Capture data bottom-field address Y register 2 */
#define CDBCR2 0x9c /* Capture data bottom-field address C register 2 */
#undef DEBUG_GEOMETRY
#ifdef DEBUG_GEOMETRY
#define dev_geo dev_info
#else
#define dev_geo dev_dbg
#endif
/* per video frame buffer */
struct sh_mobile_ceu_buffer {
struct videobuf_buffer vb; /* v4l buffer must be first */
enum v4l2_mbus_pixelcode code;
};
struct sh_mobile_ceu_dev {
struct soc_camera_host ici;
struct soc_camera_device *icd;
unsigned int irq;
void __iomem *base;
unsigned long video_limit;
/* lock used to protect videobuf */
spinlock_t lock;
struct list_head capture;
struct videobuf_buffer *active;
struct sh_mobile_ceu_info *pdata;
u32 cflcr;
enum v4l2_field field;
unsigned int image_mode:1;
unsigned int is_16bit:1;
};
struct sh_mobile_ceu_cam {
struct v4l2_rect ceu_rect;
unsigned int cam_width;
unsigned int cam_height;
const struct soc_mbus_pixelfmt *extra_fmt;
enum v4l2_mbus_pixelcode code;
};
static unsigned long make_bus_param(struct sh_mobile_ceu_dev *pcdev)
{
unsigned long flags;
flags = SOCAM_MASTER |
SOCAM_PCLK_SAMPLE_RISING |
SOCAM_HSYNC_ACTIVE_HIGH |
SOCAM_HSYNC_ACTIVE_LOW |
SOCAM_VSYNC_ACTIVE_HIGH |
SOCAM_VSYNC_ACTIVE_LOW |
SOCAM_DATA_ACTIVE_HIGH;
if (pcdev->pdata->flags & SH_CEU_FLAG_USE_8BIT_BUS)
flags |= SOCAM_DATAWIDTH_8;
if (pcdev->pdata->flags & SH_CEU_FLAG_USE_16BIT_BUS)
flags |= SOCAM_DATAWIDTH_16;
if (flags & SOCAM_DATAWIDTH_MASK)
return flags;
return 0;
}
static void ceu_write(struct sh_mobile_ceu_dev *priv,
unsigned long reg_offs, u32 data)
{
iowrite32(data, priv->base + reg_offs);
}
static u32 ceu_read(struct sh_mobile_ceu_dev *priv, unsigned long reg_offs)
{
return ioread32(priv->base + reg_offs);
}
static int sh_mobile_ceu_soft_reset(struct sh_mobile_ceu_dev *pcdev)
{
int i, success = 0;
struct soc_camera_device *icd = pcdev->icd;
ceu_write(pcdev, CAPSR, 1 << 16); /* reset */
/* wait CSTSR.CPTON bit */
for (i = 0; i < 1000; i++) {
if (!(ceu_read(pcdev, CSTSR) & 1)) {
success++;
break;
}
udelay(1);
}
/* wait CAPSR.CPKIL bit */
for (i = 0; i < 1000; i++) {
if (!(ceu_read(pcdev, CAPSR) & (1 << 16))) {
success++;
break;
}
udelay(1);
}
if (2 != success) {
dev_warn(&icd->dev, "soft reset time out\n");
return -EIO;
}
return 0;
}
/*
* Videobuf operations
*/
static int sh_mobile_ceu_videobuf_setup(struct videobuf_queue *vq,
unsigned int *count,
unsigned int *size)
{
struct soc_camera_device *icd = vq->priv_data;
struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
struct sh_mobile_ceu_dev *pcdev = ici->priv;
int bytes_per_line = soc_mbus_bytes_per_line(icd->user_width,
icd->current_fmt->host_fmt);
if (bytes_per_line < 0)
return bytes_per_line;
*size = bytes_per_line * icd->user_height;
if (0 == *count)
*count = 2;
if (pcdev->video_limit) {
while (PAGE_ALIGN(*size) * *count > pcdev->video_limit)
(*count)--;
}
dev_dbg(icd->dev.parent, "count=%d, size=%d\n", *count, *size);
return 0;
}
static void free_buffer(struct videobuf_queue *vq,
struct sh_mobile_ceu_buffer *buf)
{
struct soc_camera_device *icd = vq->priv_data;
struct device *dev = icd->dev.parent;
dev_dbg(dev, "%s (vb=0x%p) 0x%08lx %zd\n", __func__,
&buf->vb, buf->vb.baddr, buf->vb.bsize);
if (in_interrupt())
BUG();
videobuf_waiton(&buf->vb, 0, 0);
videobuf_dma_contig_free(vq, &buf->vb);
dev_dbg(dev, "%s freed\n", __func__);
buf->vb.state = VIDEOBUF_NEEDS_INIT;
}
#define CEU_CETCR_MAGIC 0x0317f313 /* acknowledge magical interrupt sources */
#define CEU_CETCR_IGRW (1 << 4) /* prohibited register access interrupt bit */
#define CEU_CEIER_CPEIE (1 << 0) /* one-frame capture end interrupt */
#define CEU_CEIER_VBP (1 << 20) /* vbp error */
#define CEU_CAPCR_CTNCP (1 << 16) /* continuous capture mode (if set) */
#define CEU_CEIER_MASK (CEU_CEIER_CPEIE | CEU_CEIER_VBP)
/*
* return value doesn't reflex the success/failure to queue the new buffer,
* but rather the status of the previous buffer.
*/
static int sh_mobile_ceu_capture(struct sh_mobile_ceu_dev *pcdev)
{
struct soc_camera_device *icd = pcdev->icd;
dma_addr_t phys_addr_top, phys_addr_bottom;
unsigned long top1, top2;
unsigned long bottom1, bottom2;
u32 status;
int ret = 0;
/*
* The hardware is _very_ picky about this sequence. Especially
* the CEU_CETCR_MAGIC value. It seems like we need to acknowledge
* several not-so-well documented interrupt sources in CETCR.
*/
ceu_write(pcdev, CEIER, ceu_read(pcdev, CEIER) & ~CEU_CEIER_MASK);
status = ceu_read(pcdev, CETCR);
ceu_write(pcdev, CETCR, ~status & CEU_CETCR_MAGIC);
ceu_write(pcdev, CEIER, ceu_read(pcdev, CEIER) | CEU_CEIER_MASK);
ceu_write(pcdev, CAPCR, ceu_read(pcdev, CAPCR) & ~CEU_CAPCR_CTNCP);
ceu_write(pcdev, CETCR, CEU_CETCR_MAGIC ^ CEU_CETCR_IGRW);
/*
* When a VBP interrupt occurs, a capture end interrupt does not occur
* and the image of that frame is not captured correctly. So, soft reset
* is needed here.
*/
if (status & CEU_CEIER_VBP) {
sh_mobile_ceu_soft_reset(pcdev);
ret = -EIO;
}
if (!pcdev->active)
return ret;
if (V4L2_FIELD_INTERLACED_BT == pcdev->field) {
top1 = CDBYR;
top2 = CDBCR;
bottom1 = CDAYR;
bottom2 = CDACR;
} else {
top1 = CDAYR;
top2 = CDACR;
bottom1 = CDBYR;
bottom2 = CDBCR;
}
phys_addr_top = videobuf_to_dma_contig(pcdev->active);
ceu_write(pcdev, top1, phys_addr_top);
if (V4L2_FIELD_NONE != pcdev->field) {
phys_addr_bottom = phys_addr_top + icd->user_width;
ceu_write(pcdev, bottom1, phys_addr_bottom);
}
switch (icd->current_fmt->host_fmt->fourcc) {
case V4L2_PIX_FMT_NV12:
case V4L2_PIX_FMT_NV21:
case V4L2_PIX_FMT_NV16:
case V4L2_PIX_FMT_NV61:
phys_addr_top += icd->user_width *
icd->user_height;
ceu_write(pcdev, top2, phys_addr_top);
if (V4L2_FIELD_NONE != pcdev->field) {
phys_addr_bottom = phys_addr_top + icd->user_width;
ceu_write(pcdev, bottom2, phys_addr_bottom);
}
}
pcdev->active->state = VIDEOBUF_ACTIVE;
ceu_write(pcdev, CAPSR, 0x1); /* start capture */
return ret;
}
static int sh_mobile_ceu_videobuf_prepare(struct videobuf_queue *vq,
struct videobuf_buffer *vb,
enum v4l2_field field)
{
struct soc_camera_device *icd = vq->priv_data;
struct sh_mobile_ceu_buffer *buf;
int bytes_per_line = soc_mbus_bytes_per_line(icd->user_width,
icd->current_fmt->host_fmt);
int ret;
if (bytes_per_line < 0)
return bytes_per_line;
buf = container_of(vb, struct sh_mobile_ceu_buffer, vb);
dev_dbg(icd->dev.parent, "%s (vb=0x%p) 0x%08lx %zd\n", __func__,
vb, vb->baddr, vb->bsize);
/* Added list head initialization on alloc */
WARN_ON(!list_empty(&vb->queue));
#ifdef DEBUG
/*
* This can be useful if you want to see if we actually fill
* the buffer with something
*/
memset((void *)vb->baddr, 0xaa, vb->bsize);
#endif
BUG_ON(NULL == icd->current_fmt);
if (buf->code != icd->current_fmt->code ||
vb->width != icd->user_width ||
vb->height != icd->user_height ||
vb->field != field) {
buf->code = icd->current_fmt->code;
vb->width = icd->user_width;
vb->height = icd->user_height;
vb->field = field;
vb->state = VIDEOBUF_NEEDS_INIT;
}
vb->size = vb->height * bytes_per_line;
if (0 != vb->baddr && vb->bsize < vb->size) {
ret = -EINVAL;
goto out;
}
if (vb->state == VIDEOBUF_NEEDS_INIT) {
ret = videobuf_iolock(vq, vb, NULL);
if (ret)
goto fail;
vb->state = VIDEOBUF_PREPARED;
}
return 0;
fail:
free_buffer(vq, buf);
out:
return ret;
}
/* Called under spinlock_irqsave(&pcdev->lock, ...) */
static void sh_mobile_ceu_videobuf_queue(struct videobuf_queue *vq,
struct videobuf_buffer *vb)
{
struct soc_camera_device *icd = vq->priv_data;
struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
struct sh_mobile_ceu_dev *pcdev = ici->priv;
dev_dbg(icd->dev.parent, "%s (vb=0x%p) 0x%08lx %zd\n", __func__,
vb, vb->baddr, vb->bsize);
vb->state = VIDEOBUF_QUEUED;
list_add_tail(&vb->queue, &pcdev->capture);
if (!pcdev->active) {
/*
* Because there were no active buffer at this moment,
* we are not interested in the return value of
* sh_mobile_ceu_capture here.
*/
pcdev->active = vb;
sh_mobile_ceu_capture(pcdev);
}
}
static void sh_mobile_ceu_videobuf_release(struct videobuf_queue *vq,
struct videobuf_buffer *vb)
{
struct soc_camera_device *icd = vq->priv_data;
struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
struct sh_mobile_ceu_dev *pcdev = ici->priv;
unsigned long flags;
spin_lock_irqsave(&pcdev->lock, flags);
if (pcdev->active == vb) {
/* disable capture (release DMA buffer), reset */
ceu_write(pcdev, CAPSR, 1 << 16);
pcdev->active = NULL;
}
if ((vb->state == VIDEOBUF_ACTIVE || vb->state == VIDEOBUF_QUEUED) &&
!list_empty(&vb->queue)) {
vb->state = VIDEOBUF_ERROR;
list_del_init(&vb->queue);
}
spin_unlock_irqrestore(&pcdev->lock, flags);
free_buffer(vq, container_of(vb, struct sh_mobile_ceu_buffer, vb));
}
static struct videobuf_queue_ops sh_mobile_ceu_videobuf_ops = {
.buf_setup = sh_mobile_ceu_videobuf_setup,
.buf_prepare = sh_mobile_ceu_videobuf_prepare,
.buf_queue = sh_mobile_ceu_videobuf_queue,
.buf_release = sh_mobile_ceu_videobuf_release,
};
static irqreturn_t sh_mobile_ceu_irq(int irq, void *data)
{
struct sh_mobile_ceu_dev *pcdev = data;
struct videobuf_buffer *vb;
unsigned long flags;
spin_lock_irqsave(&pcdev->lock, flags);
vb = pcdev->active;
if (!vb)
/* Stale interrupt from a released buffer */
goto out;
list_del_init(&vb->queue);
if (!list_empty(&pcdev->capture))
pcdev->active = list_entry(pcdev->capture.next,
struct videobuf_buffer, queue);
else
pcdev->active = NULL;
vb->state = (sh_mobile_ceu_capture(pcdev) < 0) ?
VIDEOBUF_ERROR : VIDEOBUF_DONE;
do_gettimeofday(&vb->ts);
vb->field_count++;
wake_up(&vb->done);
out:
spin_unlock_irqrestore(&pcdev->lock, flags);
return IRQ_HANDLED;
}
/* Called with .video_lock held */
static int sh_mobile_ceu_add_device(struct soc_camera_device *icd)
{
struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
struct sh_mobile_ceu_dev *pcdev = ici->priv;
int ret;
if (pcdev->icd)
return -EBUSY;
dev_info(icd->dev.parent,
"SuperH Mobile CEU driver attached to camera %d\n",
icd->devnum);
pm_runtime_get_sync(ici->v4l2_dev.dev);
ret = sh_mobile_ceu_soft_reset(pcdev);
if (!ret)
pcdev->icd = icd;
return ret;
}
/* Called with .video_lock held */
static void sh_mobile_ceu_remove_device(struct soc_camera_device *icd)
{
struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
struct sh_mobile_ceu_dev *pcdev = ici->priv;
unsigned long flags;
BUG_ON(icd != pcdev->icd);
/* disable capture, disable interrupts */
ceu_write(pcdev, CEIER, 0);
sh_mobile_ceu_soft_reset(pcdev);
/* make sure active buffer is canceled */
spin_lock_irqsave(&pcdev->lock, flags);
if (pcdev->active) {
list_del(&pcdev->active->queue);
pcdev->active->state = VIDEOBUF_ERROR;
wake_up_all(&pcdev->active->done);
pcdev->active = NULL;
}
spin_unlock_irqrestore(&pcdev->lock, flags);
pm_runtime_put_sync(ici->v4l2_dev.dev);
dev_info(icd->dev.parent,
"SuperH Mobile CEU driver detached from camera %d\n",
icd->devnum);
pcdev->icd = NULL;
}
/*
* See chapter 29.4.12 "Capture Filter Control Register (CFLCR)"
* in SH7722 Hardware Manual
*/
static unsigned int size_dst(unsigned int src, unsigned int scale)
{
unsigned int mant_pre = scale >> 12;
if (!src || !scale)
return src;
return ((mant_pre + 2 * (src - 1)) / (2 * mant_pre) - 1) *
mant_pre * 4096 / scale + 1;
}
static u16 calc_scale(unsigned int src, unsigned int *dst)
{
u16 scale;
if (src == *dst)
return 0;
scale = (src * 4096 / *dst) & ~7;
while (scale > 4096 && size_dst(src, scale) < *dst)
scale -= 8;
*dst = size_dst(src, scale);
return scale;
}
/* rect is guaranteed to not exceed the scaled camera rectangle */
static void sh_mobile_ceu_set_rect(struct soc_camera_device *icd,
unsigned int out_width,
unsigned int out_height)
{
struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
struct sh_mobile_ceu_cam *cam = icd->host_priv;
struct v4l2_rect *rect = &cam->ceu_rect;
struct sh_mobile_ceu_dev *pcdev = ici->priv;
unsigned int height, width, cdwdr_width, in_width, in_height;
unsigned int left_offset, top_offset;
u32 camor;
dev_dbg(icd->dev.parent, "Crop %ux%u@%u:%u\n",
rect->width, rect->height, rect->left, rect->top);
left_offset = rect->left;
top_offset = rect->top;
if (pcdev->image_mode) {
in_width = rect->width;
if (!pcdev->is_16bit) {
in_width *= 2;
left_offset *= 2;
}
width = out_width;
cdwdr_width = out_width;
} else {
int bytes_per_line = soc_mbus_bytes_per_line(out_width,
icd->current_fmt->host_fmt);
unsigned int w_factor;
width = out_width;
switch (icd->current_fmt->host_fmt->packing) {
case SOC_MBUS_PACKING_2X8_PADHI:
w_factor = 2;
break;
default:
w_factor = 1;
}
in_width = rect->width * w_factor;
left_offset = left_offset * w_factor;
if (bytes_per_line < 0)
cdwdr_width = out_width;
else
cdwdr_width = bytes_per_line;
}
height = out_height;
in_height = rect->height;
if (V4L2_FIELD_NONE != pcdev->field) {
height /= 2;
in_height /= 2;
top_offset /= 2;
cdwdr_width *= 2;
}
/* Set CAMOR, CAPWR, CFSZR, take care of CDWDR */
camor = left_offset | (top_offset << 16);
dev_geo(icd->dev.parent,
"CAMOR 0x%x, CAPWR 0x%x, CFSZR 0x%x, CDWDR 0x%x\n", camor,
(in_height << 16) | in_width, (height << 16) | width,
cdwdr_width);
ceu_write(pcdev, CAMOR, camor);
ceu_write(pcdev, CAPWR, (in_height << 16) | in_width);
ceu_write(pcdev, CFSZR, (height << 16) | width);
ceu_write(pcdev, CDWDR, cdwdr_width);
}
static u32 capture_save_reset(struct sh_mobile_ceu_dev *pcdev)
{
u32 capsr = ceu_read(pcdev, CAPSR);
ceu_write(pcdev, CAPSR, 1 << 16); /* reset, stop capture */
return capsr;
}
static void capture_restore(struct sh_mobile_ceu_dev *pcdev, u32 capsr)
{
unsigned long timeout = jiffies + 10 * HZ;
/*
* Wait until the end of the current frame. It can take a long time,
* but if it has been aborted by a CAPSR reset, it shoule exit sooner.
*/
while ((ceu_read(pcdev, CSTSR) & 1) && time_before(jiffies, timeout))
msleep(1);
if (time_after(jiffies, timeout)) {
dev_err(pcdev->ici.v4l2_dev.dev,
"Timeout waiting for frame end! Interface problem?\n");
return;
}
/* Wait until reset clears, this shall not hang... */
while (ceu_read(pcdev, CAPSR) & (1 << 16))
udelay(10);
/* Anything to restore? */
if (capsr & ~(1 << 16))
ceu_write(pcdev, CAPSR, capsr);
}
static int sh_mobile_ceu_set_bus_param(struct soc_camera_device *icd,
__u32 pixfmt)
{
struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
struct sh_mobile_ceu_dev *pcdev = ici->priv;
int ret;
unsigned long camera_flags, common_flags, value;
int yuv_lineskip;
struct sh_mobile_ceu_cam *cam = icd->host_priv;
u32 capsr = capture_save_reset(pcdev);
camera_flags = icd->ops->query_bus_param(icd);
common_flags = soc_camera_bus_param_compatible(camera_flags,
make_bus_param(pcdev));
if (!common_flags)
return -EINVAL;
/* Make choises, based on platform preferences */
if ((common_flags & SOCAM_HSYNC_ACTIVE_HIGH) &&
(common_flags & SOCAM_HSYNC_ACTIVE_LOW)) {
if (pcdev->pdata->flags & SH_CEU_FLAG_HSYNC_LOW)
common_flags &= ~SOCAM_HSYNC_ACTIVE_HIGH;
else
common_flags &= ~SOCAM_HSYNC_ACTIVE_LOW;
}
if ((common_flags & SOCAM_VSYNC_ACTIVE_HIGH) &&
(common_flags & SOCAM_VSYNC_ACTIVE_LOW)) {
if (pcdev->pdata->flags & SH_CEU_FLAG_VSYNC_LOW)
common_flags &= ~SOCAM_VSYNC_ACTIVE_HIGH;
else
common_flags &= ~SOCAM_VSYNC_ACTIVE_LOW;
}
ret = icd->ops->set_bus_param(icd, common_flags);
if (ret < 0)
return ret;
switch (common_flags & SOCAM_DATAWIDTH_MASK) {
case SOCAM_DATAWIDTH_8:
pcdev->is_16bit = 0;
break;
case SOCAM_DATAWIDTH_16:
pcdev->is_16bit = 1;
break;
default:
return -EINVAL;
}
ceu_write(pcdev, CRCNTR, 0);
ceu_write(pcdev, CRCMPR, 0);
value = 0x00000010; /* data fetch by default */
yuv_lineskip = 0;
switch (icd->current_fmt->host_fmt->fourcc) {
case V4L2_PIX_FMT_NV12:
case V4L2_PIX_FMT_NV21:
yuv_lineskip = 1; /* skip for NV12/21, no skip for NV16/61 */
/* fall-through */
case V4L2_PIX_FMT_NV16:
case V4L2_PIX_FMT_NV61:
switch (cam->code) {
case V4L2_MBUS_FMT_YUYV8_2X8_BE:
value = 0x00000000; /* Cb0, Y0, Cr0, Y1 */
break;
case V4L2_MBUS_FMT_YVYU8_2X8_BE:
value = 0x00000100; /* Cr0, Y0, Cb0, Y1 */
break;
case V4L2_MBUS_FMT_YUYV8_2X8_LE:
value = 0x00000200; /* Y0, Cb0, Y1, Cr0 */
break;
case V4L2_MBUS_FMT_YVYU8_2X8_LE:
value = 0x00000300; /* Y0, Cr0, Y1, Cb0 */
break;
default:
BUG();
}
}
if (icd->current_fmt->host_fmt->fourcc == V4L2_PIX_FMT_NV21 ||
icd->current_fmt->host_fmt->fourcc == V4L2_PIX_FMT_NV61)
value ^= 0x00000100; /* swap U, V to change from NV1x->NVx1 */
value |= common_flags & SOCAM_VSYNC_ACTIVE_LOW ? 1 << 1 : 0;
value |= common_flags & SOCAM_HSYNC_ACTIVE_LOW ? 1 << 0 : 0;
value |= pcdev->is_16bit ? 1 << 12 : 0;
ceu_write(pcdev, CAMCR, value);
ceu_write(pcdev, CAPCR, 0x00300000);
switch (pcdev->field) {
case V4L2_FIELD_INTERLACED_TB:
value = 0x101;
break;
case V4L2_FIELD_INTERLACED_BT:
value = 0x102;
break;
default:
value = 0;
break;
}
ceu_write(pcdev, CAIFR, value);
sh_mobile_ceu_set_rect(icd, icd->user_width, icd->user_height);
mdelay(1);
ceu_write(pcdev, CFLCR, pcdev->cflcr);
/*
* A few words about byte order (observed in Big Endian mode)
*
* In data fetch mode bytes are received in chunks of 8 bytes.
* D0, D1, D2, D3, D4, D5, D6, D7 (D0 received first)
*
* The data is however by default written to memory in reverse order:
* D7, D6, D5, D4, D3, D2, D1, D0 (D7 written to lowest byte)
*
* The lowest three bits of CDOCR allows us to do swapping,
* using 7 we swap the data bytes to match the incoming order:
* D0, D1, D2, D3, D4, D5, D6, D7
*/
value = 0x00000017;
if (yuv_lineskip)
value &= ~0x00000010; /* convert 4:2:2 -> 4:2:0 */
ceu_write(pcdev, CDOCR, value);
ceu_write(pcdev, CFWCR, 0); /* keep "datafetch firewall" disabled */
dev_dbg(icd->dev.parent, "S_FMT successful for %c%c%c%c %ux%u\n",
pixfmt & 0xff, (pixfmt >> 8) & 0xff,
(pixfmt >> 16) & 0xff, (pixfmt >> 24) & 0xff,
icd->user_width, icd->user_height);
capture_restore(pcdev, capsr);
/* not in bundle mode: skip CBDSR, CDAYR2, CDACR2, CDBYR2, CDBCR2 */
return 0;
}
static int sh_mobile_ceu_try_bus_param(struct soc_camera_device *icd,
unsigned char buswidth)
{
struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
struct sh_mobile_ceu_dev *pcdev = ici->priv;
unsigned long camera_flags, common_flags;
camera_flags = icd->ops->query_bus_param(icd);
common_flags = soc_camera_bus_param_compatible(camera_flags,
make_bus_param(pcdev));
if (!common_flags || buswidth > 16 ||
(buswidth > 8 && !(common_flags & SOCAM_DATAWIDTH_16)))
return -EINVAL;
return 0;
}
static const struct soc_mbus_pixelfmt sh_mobile_ceu_formats[] = {
{
.fourcc = V4L2_PIX_FMT_NV12,
.name = "NV12",
.bits_per_sample = 12,
.packing = SOC_MBUS_PACKING_NONE,
.order = SOC_MBUS_ORDER_LE,
}, {
.fourcc = V4L2_PIX_FMT_NV21,
.name = "NV21",
.bits_per_sample = 12,
.packing = SOC_MBUS_PACKING_NONE,
.order = SOC_MBUS_ORDER_LE,
}, {
.fourcc = V4L2_PIX_FMT_NV16,
.name = "NV16",
.bits_per_sample = 16,
.packing = SOC_MBUS_PACKING_NONE,
.order = SOC_MBUS_ORDER_LE,
}, {
.fourcc = V4L2_PIX_FMT_NV61,
.name = "NV61",
.bits_per_sample = 16,
.packing = SOC_MBUS_PACKING_NONE,
.order = SOC_MBUS_ORDER_LE,
},
};
/* This will be corrected as we get more formats */
static bool sh_mobile_ceu_packing_supported(const struct soc_mbus_pixelfmt *fmt)
{
return fmt->packing == SOC_MBUS_PACKING_NONE ||
(fmt->bits_per_sample == 8 &&
fmt->packing == SOC_MBUS_PACKING_2X8_PADHI) ||
(fmt->bits_per_sample > 8 &&
fmt->packing == SOC_MBUS_PACKING_EXTEND16);
}
static int sh_mobile_ceu_get_formats(struct soc_camera_device *icd, int idx,
struct soc_camera_format_xlate *xlate)
{
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
struct device *dev = icd->dev.parent;
int ret, k, n;
int formats = 0;
struct sh_mobile_ceu_cam *cam;
enum v4l2_mbus_pixelcode code;
const struct soc_mbus_pixelfmt *fmt;
ret = v4l2_subdev_call(sd, video, enum_mbus_fmt, idx, &code);
if (ret < 0)
/* No more formats */
return 0;
fmt = soc_mbus_get_fmtdesc(code);
if (!fmt) {
dev_err(icd->dev.parent,
"Invalid format code #%d: %d\n", idx, code);
return -EINVAL;
}
ret = sh_mobile_ceu_try_bus_param(icd, fmt->bits_per_sample);
if (ret < 0)
return 0;
if (!icd->host_priv) {
cam = kzalloc(sizeof(*cam), GFP_KERNEL);
if (!cam)
return -ENOMEM;
icd->host_priv = cam;
} else {
cam = icd->host_priv;
}
/* Beginning of a pass */
if (!idx)
cam->extra_fmt = NULL;
switch (code) {
case V4L2_MBUS_FMT_YUYV8_2X8_BE:
case V4L2_MBUS_FMT_YVYU8_2X8_BE:
case V4L2_MBUS_FMT_YUYV8_2X8_LE:
case V4L2_MBUS_FMT_YVYU8_2X8_LE:
if (cam->extra_fmt)
break;
/*
* Our case is simple so far: for any of the above four camera
* formats we add all our four synthesized NV* formats, so,
* just marking the device with a single flag suffices. If
* the format generation rules are more complex, you would have
* to actually hang your already added / counted formats onto
* the host_priv pointer and check whether the format you're
* going to add now is already there.
*/
cam->extra_fmt = sh_mobile_ceu_formats;
n = ARRAY_SIZE(sh_mobile_ceu_formats);
formats += n;
for (k = 0; xlate && k < n; k++) {
xlate->host_fmt = &sh_mobile_ceu_formats[k];
xlate->code = code;
xlate++;
dev_dbg(dev, "Providing format %s using code %d\n",
sh_mobile_ceu_formats[k].name, code);
}
break;
default:
if (!sh_mobile_ceu_packing_supported(fmt))
return 0;
}
/* Generic pass-through */
formats++;
if (xlate) {
xlate->host_fmt = fmt;
xlate->code = code;
xlate++;
dev_dbg(dev, "Providing format %s in pass-through mode\n",
xlate->host_fmt->name);
}
return formats;
}
static void sh_mobile_ceu_put_formats(struct soc_camera_device *icd)
{
kfree(icd->host_priv);
icd->host_priv = NULL;
}
/* Check if any dimension of r1 is smaller than respective one of r2 */
static bool is_smaller(struct v4l2_rect *r1, struct v4l2_rect *r2)
{
return r1->width < r2->width || r1->height < r2->height;
}
/* Check if r1 fails to cover r2 */
static bool is_inside(struct v4l2_rect *r1, struct v4l2_rect *r2)
{
return r1->left > r2->left || r1->top > r2->top ||
r1->left + r1->width < r2->left + r2->width ||
r1->top + r1->height < r2->top + r2->height;
}
static unsigned int scale_down(unsigned int size, unsigned int scale)
{
return (size * 4096 + scale / 2) / scale;
}
static unsigned int scale_up(unsigned int size, unsigned int scale)
{
return (size * scale + 2048) / 4096;
}
static unsigned int calc_generic_scale(unsigned int input, unsigned int output)
{
return (input * 4096 + output / 2) / output;
}
static int client_g_rect(struct v4l2_subdev *sd, struct v4l2_rect *rect)
{
struct v4l2_crop crop;
struct v4l2_cropcap cap;
int ret;
crop.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
ret = v4l2_subdev_call(sd, video, g_crop, &crop);
if (!ret) {
*rect = crop.c;
return ret;
}
/* Camera driver doesn't support .g_crop(), assume default rectangle */
cap.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
ret = v4l2_subdev_call(sd, video, cropcap, &cap);
if (ret < 0)
return ret;
*rect = cap.defrect;
return ret;
}
/*
* The common for both scaling and cropping iterative approach is:
* 1. try if the client can produce exactly what requested by the user
* 2. if (1) failed, try to double the client image until we get one big enough
* 3. if (2) failed, try to request the maximum image
*/
static int client_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *crop,
struct v4l2_crop *cam_crop)
{
struct v4l2_rect *rect = &crop->c, *cam_rect = &cam_crop->c;
struct device *dev = sd->v4l2_dev->dev;
struct v4l2_cropcap cap;
int ret;
unsigned int width, height;
v4l2_subdev_call(sd, video, s_crop, crop);
ret = client_g_rect(sd, cam_rect);
if (ret < 0)
return ret;
/*
* Now cam_crop contains the current camera input rectangle, and it must
* be within camera cropcap bounds
*/
if (!memcmp(rect, cam_rect, sizeof(*rect))) {
/* Even if camera S_CROP failed, but camera rectangle matches */
dev_dbg(dev, "Camera S_CROP successful for %ux%u@%u:%u\n",
rect->width, rect->height, rect->left, rect->top);
return 0;
}
/* Try to fix cropping, that camera hasn't managed to set */
dev_geo(dev, "Fix camera S_CROP for %ux%u@%u:%u to %ux%u@%u:%u\n",
cam_rect->width, cam_rect->height,
cam_rect->left, cam_rect->top,
rect->width, rect->height, rect->left, rect->top);
/* We need sensor maximum rectangle */
ret = v4l2_subdev_call(sd, video, cropcap, &cap);
if (ret < 0)
return ret;
soc_camera_limit_side(&rect->left, &rect->width, cap.bounds.left, 2,
cap.bounds.width);
soc_camera_limit_side(&rect->top, &rect->height, cap.bounds.top, 4,
cap.bounds.height);
/*
* Popular special case - some cameras can only handle fixed sizes like
* QVGA, VGA,... Take care to avoid infinite loop.
*/
width = max(cam_rect->width, 2);
height = max(cam_rect->height, 2);
while (!ret && (is_smaller(cam_rect, rect) ||
is_inside(cam_rect, rect)) &&
(cap.bounds.width > width || cap.bounds.height > height)) {
width *= 2;
height *= 2;
cam_rect->width = width;
cam_rect->height = height;
/*
* We do not know what capabilities the camera has to set up
* left and top borders. We could try to be smarter in iterating
* them, e.g., if camera current left is to the right of the
* target left, set it to the middle point between the current
* left and minimum left. But that would add too much
* complexity: we would have to iterate each border separately.
*/
if (cam_rect->left > rect->left)
cam_rect->left = cap.bounds.left;
if (cam_rect->left + cam_rect->width < rect->left + rect->width)
cam_rect->width = rect->left + rect->width -
cam_rect->left;
if (cam_rect->top > rect->top)
cam_rect->top = cap.bounds.top;
if (cam_rect->top + cam_rect->height < rect->top + rect->height)
cam_rect->height = rect->top + rect->height -
cam_rect->top;
v4l2_subdev_call(sd, video, s_crop, cam_crop);
ret = client_g_rect(sd, cam_rect);
dev_geo(dev, "Camera S_CROP %d for %ux%u@%u:%u\n", ret,
cam_rect->width, cam_rect->height,
cam_rect->left, cam_rect->top);
}
/* S_CROP must not modify the rectangle */
if (is_smaller(cam_rect, rect) || is_inside(cam_rect, rect)) {
/*
* The camera failed to configure a suitable cropping,
* we cannot use the current rectangle, set to max
*/
*cam_rect = cap.bounds;
v4l2_subdev_call(sd, video, s_crop, cam_crop);
ret = client_g_rect(sd, cam_rect);
dev_geo(dev, "Camera S_CROP %d for max %ux%u@%u:%u\n", ret,
cam_rect->width, cam_rect->height,
cam_rect->left, cam_rect->top);
}
return ret;
}
static int get_camera_scales(struct v4l2_subdev *sd, struct v4l2_rect *rect,
unsigned int *scale_h, unsigned int *scale_v)
{
struct v4l2_mbus_framefmt mf;
int ret;
ret = v4l2_subdev_call(sd, video, g_mbus_fmt, &mf);
if (ret < 0)
return ret;
*scale_h = calc_generic_scale(rect->width, mf.width);
*scale_v = calc_generic_scale(rect->height, mf.height);
return 0;
}
static int get_camera_subwin(struct soc_camera_device *icd,
struct v4l2_rect *cam_subrect,
unsigned int cam_hscale, unsigned int cam_vscale)
{
struct sh_mobile_ceu_cam *cam = icd->host_priv;
struct v4l2_rect *ceu_rect = &cam->ceu_rect;
if (!ceu_rect->width) {
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
struct device *dev = icd->dev.parent;
struct v4l2_mbus_framefmt mf;
int ret;
/* First time */
ret = v4l2_subdev_call(sd, video, g_mbus_fmt, &mf);
if (ret < 0)
return ret;
dev_geo(dev, "camera fmt %ux%u\n", mf.width, mf.height);
if (mf.width > 2560) {
ceu_rect->width = 2560;
ceu_rect->left = (mf.width - 2560) / 2;
} else {
ceu_rect->width = mf.width;
ceu_rect->left = 0;
}
if (mf.height > 1920) {
ceu_rect->height = 1920;
ceu_rect->top = (mf.height - 1920) / 2;
} else {
ceu_rect->height = mf.height;
ceu_rect->top = 0;
}
dev_geo(dev, "initialised CEU rect %ux%u@%u:%u\n",
ceu_rect->width, ceu_rect->height,
ceu_rect->left, ceu_rect->top);
}
cam_subrect->width = scale_up(ceu_rect->width, cam_hscale);
cam_subrect->left = scale_up(ceu_rect->left, cam_hscale);
cam_subrect->height = scale_up(ceu_rect->height, cam_vscale);
cam_subrect->top = scale_up(ceu_rect->top, cam_vscale);
return 0;
}
static int client_s_fmt(struct soc_camera_device *icd,
struct v4l2_mbus_framefmt *mf, bool ceu_can_scale)
{
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
struct device *dev = icd->dev.parent;
unsigned int width = mf->width, height = mf->height, tmp_w, tmp_h;
unsigned int max_width, max_height;
struct v4l2_cropcap cap;
int ret;
cap.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
ret = v4l2_subdev_call(sd, video, cropcap, &cap);
if (ret < 0)
return ret;
max_width = min(cap.bounds.width, 2560);
max_height = min(cap.bounds.height, 1920);
ret = v4l2_subdev_call(sd, video, s_mbus_fmt, mf);
if (ret < 0)
return ret;
dev_geo(dev, "camera scaled to %ux%u\n", mf->width, mf->height);
if ((width == mf->width && height == mf->height) || !ceu_can_scale)
return 0;
/* Camera set a format, but geometry is not precise, try to improve */
tmp_w = mf->width;
tmp_h = mf->height;
/* width <= max_width && height <= max_height - guaranteed by try_fmt */
while ((width > tmp_w || height > tmp_h) &&
tmp_w < max_width && tmp_h < max_height) {
tmp_w = min(2 * tmp_w, max_width);
tmp_h = min(2 * tmp_h, max_height);
mf->width = tmp_w;
mf->height = tmp_h;
ret = v4l2_subdev_call(sd, video, s_mbus_fmt, mf);
dev_geo(dev, "Camera scaled to %ux%u\n",
mf->width, mf->height);
if (ret < 0) {
/* This shouldn't happen */
dev_err(dev, "Client failed to set format: %d\n", ret);
return ret;
}
}
return 0;
}
/**
* @rect - camera cropped rectangle
* @sub_rect - CEU cropped rectangle, mapped back to camera input area
* @ceu_rect - on output calculated CEU crop rectangle
*/
static int client_scale(struct soc_camera_device *icd, struct v4l2_rect *rect,
struct v4l2_rect *sub_rect, struct v4l2_rect *ceu_rect,
struct v4l2_mbus_framefmt *mf, bool ceu_can_scale)
{
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
struct sh_mobile_ceu_cam *cam = icd->host_priv;
struct device *dev = icd->dev.parent;
struct v4l2_mbus_framefmt mf_tmp = *mf;
unsigned int scale_h, scale_v;
int ret;
/* 5. Apply iterative camera S_FMT for camera user window. */
ret = client_s_fmt(icd, &mf_tmp, ceu_can_scale);
if (ret < 0)
return ret;
dev_geo(dev, "5: camera scaled to %ux%u\n",
mf_tmp.width, mf_tmp.height);
/* 6. Retrieve camera output window (g_fmt) */
/* unneeded - it is already in "mf_tmp" */
/* 7. Calculate new camera scales. */
ret = get_camera_scales(sd, rect, &scale_h, &scale_v);
if (ret < 0)
return ret;
dev_geo(dev, "7: camera scales %u:%u\n", scale_h, scale_v);
cam->cam_width = mf_tmp.width;
cam->cam_height = mf_tmp.height;
mf->width = mf_tmp.width;
mf->height = mf_tmp.height;
mf->colorspace = mf_tmp.colorspace;
/*
* 8. Calculate new CEU crop - apply camera scales to previously
* calculated "effective" crop.
*/
ceu_rect->left = scale_down(sub_rect->left, scale_h);
ceu_rect->width = scale_down(sub_rect->width, scale_h);
ceu_rect->top = scale_down(sub_rect->top, scale_v);
ceu_rect->height = scale_down(sub_rect->height, scale_v);
dev_geo(dev, "8: new CEU rect %ux%u@%u:%u\n",
ceu_rect->width, ceu_rect->height,
ceu_rect->left, ceu_rect->top);
return 0;
}
/* Get combined scales */
static int get_scales(struct soc_camera_device *icd,
unsigned int *scale_h, unsigned int *scale_v)
{
struct sh_mobile_ceu_cam *cam = icd->host_priv;
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
struct v4l2_crop cam_crop;
unsigned int width_in, height_in;
int ret;
cam_crop.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
ret = client_g_rect(sd, &cam_crop.c);
if (ret < 0)
return ret;
ret = get_camera_scales(sd, &cam_crop.c, scale_h, scale_v);
if (ret < 0)
return ret;
width_in = scale_up(cam->ceu_rect.width, *scale_h);
height_in = scale_up(cam->ceu_rect.height, *scale_v);
*scale_h = calc_generic_scale(width_in, icd->user_width);
*scale_v = calc_generic_scale(height_in, icd->user_height);
return 0;
}
/*
* CEU can scale and crop, but we don't want to waste bandwidth and kill the
* framerate by always requesting the maximum image from the client. See
* Documentation/video4linux/sh_mobile_camera_ceu.txt for a description of
* scaling and cropping algorithms and for the meaning of referenced here steps.
*/
static int sh_mobile_ceu_set_crop(struct soc_camera_device *icd,
struct v4l2_crop *a)
{
struct v4l2_rect *rect = &a->c;
struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
struct sh_mobile_ceu_dev *pcdev = ici->priv;
struct v4l2_crop cam_crop;
struct sh_mobile_ceu_cam *cam = icd->host_priv;
struct v4l2_rect *cam_rect = &cam_crop.c, *ceu_rect = &cam->ceu_rect;
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
struct device *dev = icd->dev.parent;
struct v4l2_mbus_framefmt mf;
unsigned int scale_comb_h, scale_comb_v, scale_ceu_h, scale_ceu_v,
out_width, out_height;
u32 capsr, cflcr;
int ret;
/* 1. Calculate current combined scales. */
ret = get_scales(icd, &scale_comb_h, &scale_comb_v);
if (ret < 0)
return ret;
dev_geo(dev, "1: combined scales %u:%u\n", scale_comb_h, scale_comb_v);
/* 2. Apply iterative camera S_CROP for new input window. */
ret = client_s_crop(sd, a, &cam_crop);
if (ret < 0)
return ret;
dev_geo(dev, "2: camera cropped to %ux%u@%u:%u\n",
cam_rect->width, cam_rect->height,
cam_rect->left, cam_rect->top);
/* On success cam_crop contains current camera crop */
/*
* 3. If old combined scales applied to new crop produce an impossible
* user window, adjust scales to produce nearest possible window.
*/
out_width = scale_down(rect->width, scale_comb_h);
out_height = scale_down(rect->height, scale_comb_v);
if (out_width > 2560)
out_width = 2560;
else if (out_width < 2)
out_width = 2;
if (out_height > 1920)
out_height = 1920;
else if (out_height < 4)
out_height = 4;
dev_geo(dev, "3: Adjusted output %ux%u\n", out_width, out_height);
/* 4. Use G_CROP to retrieve actual input window: already in cam_crop */
/*
* 5. Using actual input window and calculated combined scales calculate
* camera target output window.
*/
mf.width = scale_down(cam_rect->width, scale_comb_h);
mf.height = scale_down(cam_rect->height, scale_comb_v);
dev_geo(dev, "5: camera target %ux%u\n", mf.width, mf.height);
/* 6. - 9. */
mf.code = cam->code;
mf.field = pcdev->field;
capsr = capture_save_reset(pcdev);
dev_dbg(dev, "CAPSR 0x%x, CFLCR 0x%x\n", capsr, pcdev->cflcr);
/* Make relative to camera rectangle */
rect->left -= cam_rect->left;
rect->top -= cam_rect->top;
ret = client_scale(icd, cam_rect, rect, ceu_rect, &mf,
pcdev->image_mode &&
V4L2_FIELD_NONE == pcdev->field);
dev_geo(dev, "6-9: %d\n", ret);
/* 10. Use CEU cropping to crop to the new window. */
sh_mobile_ceu_set_rect(icd, out_width, out_height);
dev_geo(dev, "10: CEU cropped to %ux%u@%u:%u\n",
ceu_rect->width, ceu_rect->height,
ceu_rect->left, ceu_rect->top);
/*
* 11. Calculate CEU scales from camera scales from results of (10) and
* user window from (3)
*/
scale_ceu_h = calc_scale(ceu_rect->width, &out_width);
scale_ceu_v = calc_scale(ceu_rect->height, &out_height);
dev_geo(dev, "11: CEU scales %u:%u\n", scale_ceu_h, scale_ceu_v);
/* 12. Apply CEU scales. */
cflcr = scale_ceu_h | (scale_ceu_v << 16);
if (cflcr != pcdev->cflcr) {
pcdev->cflcr = cflcr;
ceu_write(pcdev, CFLCR, cflcr);
}
/* Restore capture */
if (pcdev->active)
capsr |= 1;
capture_restore(pcdev, capsr);
icd->user_width = out_width;
icd->user_height = out_height;
/* Even if only camera cropping succeeded */
return ret;
}
/* Similar to set_crop multistage iterative algorithm */
static int sh_mobile_ceu_set_fmt(struct soc_camera_device *icd,
struct v4l2_format *f)
{
struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
struct sh_mobile_ceu_dev *pcdev = ici->priv;
struct sh_mobile_ceu_cam *cam = icd->host_priv;
struct v4l2_pix_format *pix = &f->fmt.pix;
struct v4l2_mbus_framefmt mf;
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
struct device *dev = icd->dev.parent;
__u32 pixfmt = pix->pixelformat;
const struct soc_camera_format_xlate *xlate;
struct v4l2_crop cam_crop;
struct v4l2_rect *cam_rect = &cam_crop.c, cam_subrect, ceu_rect;
unsigned int scale_cam_h, scale_cam_v;
u16 scale_v, scale_h;
int ret;
bool image_mode;
enum v4l2_field field;
switch (pix->field) {
default:
pix->field = V4L2_FIELD_NONE;
/* fall-through */
case V4L2_FIELD_INTERLACED_TB:
case V4L2_FIELD_INTERLACED_BT:
case V4L2_FIELD_NONE:
field = pix->field;
break;
case V4L2_FIELD_INTERLACED:
field = V4L2_FIELD_INTERLACED_TB;
break;
}
xlate = soc_camera_xlate_by_fourcc(icd, pixfmt);
if (!xlate) {
dev_warn(dev, "Format %x not found\n", pixfmt);
return -EINVAL;
}
/* 1. Calculate current camera scales. */
cam_crop.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
ret = client_g_rect(sd, cam_rect);
if (ret < 0)
return ret;
ret = get_camera_scales(sd, cam_rect, &scale_cam_h, &scale_cam_v);
if (ret < 0)
return ret;
dev_geo(dev, "1: camera scales %u:%u\n", scale_cam_h, scale_cam_v);
/*
* 2. Calculate "effective" input crop (sensor subwindow) - CEU crop
* scaled back at current camera scales onto input window.
*/
ret = get_camera_subwin(icd, &cam_subrect, scale_cam_h, scale_cam_v);
if (ret < 0)
return ret;
dev_geo(dev, "2: subwin %ux%u@%u:%u\n",
cam_subrect.width, cam_subrect.height,
cam_subrect.left, cam_subrect.top);
/*
* 3. Calculate new combined scales from "effective" input window to
* requested user window.
*/
scale_h = calc_generic_scale(cam_subrect.width, pix->width);
scale_v = calc_generic_scale(cam_subrect.height, pix->height);
dev_geo(dev, "3: scales %u:%u\n", scale_h, scale_v);
/*
* 4. Calculate camera output window by applying combined scales to real
* input window.
*/
mf.width = scale_down(cam_rect->width, scale_h);
mf.height = scale_down(cam_rect->height, scale_v);
mf.field = pix->field;
mf.colorspace = pix->colorspace;
mf.code = xlate->code;
switch (pixfmt) {
case V4L2_PIX_FMT_NV12:
case V4L2_PIX_FMT_NV21:
case V4L2_PIX_FMT_NV16:
case V4L2_PIX_FMT_NV61:
image_mode = true;
break;
default:
image_mode = false;
}
dev_geo(dev, "4: camera output %ux%u\n", mf.width, mf.height);
/* 5. - 9. */
ret = client_scale(icd, cam_rect, &cam_subrect, &ceu_rect, &mf,
image_mode && V4L2_FIELD_NONE == field);
dev_geo(dev, "5-9: client scale %d\n", ret);
/* Done with the camera. Now see if we can improve the result */
dev_dbg(dev, "Camera %d fmt %ux%u, requested %ux%u\n",
ret, mf.width, mf.height, pix->width, pix->height);
if (ret < 0)
return ret;
if (mf.code != xlate->code)
return -EINVAL;
/* 10. Use CEU scaling to scale to the requested user window. */
/* We cannot scale up */
if (pix->width > mf.width)
pix->width = mf.width;
if (pix->width > ceu_rect.width)
pix->width = ceu_rect.width;
if (pix->height > mf.height)
pix->height = mf.height;
if (pix->height > ceu_rect.height)
pix->height = ceu_rect.height;
pix->colorspace = mf.colorspace;
if (image_mode) {
/* Scale pix->{width x height} down to width x height */
scale_h = calc_scale(ceu_rect.width, &pix->width);
scale_v = calc_scale(ceu_rect.height, &pix->height);
pcdev->cflcr = scale_h | (scale_v << 16);
} else {
pix->width = ceu_rect.width;
pix->height = ceu_rect.height;
scale_h = scale_v = 0;
pcdev->cflcr = 0;
}
dev_geo(dev, "10: W: %u : 0x%x = %u, H: %u : 0x%x = %u\n",
ceu_rect.width, scale_h, pix->width,
ceu_rect.height, scale_v, pix->height);
cam->code = xlate->code;
cam->ceu_rect = ceu_rect;
icd->current_fmt = xlate;
pcdev->field = field;
pcdev->image_mode = image_mode;
return 0;
}
static int sh_mobile_ceu_try_fmt(struct soc_camera_device *icd,
struct v4l2_format *f)
{
const struct soc_camera_format_xlate *xlate;
struct v4l2_pix_format *pix = &f->fmt.pix;
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
struct v4l2_mbus_framefmt mf;
__u32 pixfmt = pix->pixelformat;
int width, height;
int ret;
xlate = soc_camera_xlate_by_fourcc(icd, pixfmt);
if (!xlate) {
dev_warn(icd->dev.parent, "Format %x not found\n", pixfmt);
return -EINVAL;
}
/* FIXME: calculate using depth and bus width */
v4l_bound_align_image(&pix->width, 2, 2560, 1,
&pix->height, 4, 1920, 2, 0);
width = pix->width;
height = pix->height;
pix->bytesperline = soc_mbus_bytes_per_line(width, xlate->host_fmt);
if ((int)pix->bytesperline < 0)
return pix->bytesperline;
pix->sizeimage = height * pix->bytesperline;
/* limit to sensor capabilities */
mf.width = pix->width;
mf.height = pix->height;
mf.field = pix->field;
mf.code = xlate->code;
mf.colorspace = pix->colorspace;
ret = v4l2_subdev_call(sd, video, try_mbus_fmt, &mf);
if (ret < 0)
return ret;
pix->width = mf.width;
pix->height = mf.height;
pix->field = mf.field;
pix->colorspace = mf.colorspace;
switch (pixfmt) {
case V4L2_PIX_FMT_NV12:
case V4L2_PIX_FMT_NV21:
case V4L2_PIX_FMT_NV16:
case V4L2_PIX_FMT_NV61:
/* FIXME: check against rect_max after converting soc-camera */
/* We can scale precisely, need a bigger image from camera */
if (pix->width < width || pix->height < height) {
/*
* We presume, the sensor behaves sanely, i.e., if
* requested a bigger rectangle, it will not return a
* smaller one.
*/
mf.width = 2560;
mf.height = 1920;
ret = v4l2_subdev_call(sd, video, try_mbus_fmt, &mf);
if (ret < 0) {
/* Shouldn't actually happen... */
dev_err(icd->dev.parent,
"FIXME: client try_fmt() = %d\n", ret);
return ret;
}
}
/* We will scale exactly */
if (mf.width > width)
pix->width = width;
if (mf.height > height)
pix->height = height;
}
return ret;
}
static int sh_mobile_ceu_reqbufs(struct soc_camera_file *icf,
struct v4l2_requestbuffers *p)
{
int i;
/*
* This is for locking debugging only. I removed spinlocks and now I
* check whether .prepare is ever called on a linked buffer, or whether
* a dma IRQ can occur for an in-work or unlinked buffer. Until now
* it hadn't triggered
*/
for (i = 0; i < p->count; i++) {
struct sh_mobile_ceu_buffer *buf;
buf = container_of(icf->vb_vidq.bufs[i],
struct sh_mobile_ceu_buffer, vb);
INIT_LIST_HEAD(&buf->vb.queue);
}
return 0;
}
static unsigned int sh_mobile_ceu_poll(struct file *file, poll_table *pt)
{
struct soc_camera_file *icf = file->private_data;
struct sh_mobile_ceu_buffer *buf;
buf = list_entry(icf->vb_vidq.stream.next,
struct sh_mobile_ceu_buffer, vb.stream);
poll_wait(file, &buf->vb.done, pt);
if (buf->vb.state == VIDEOBUF_DONE ||
buf->vb.state == VIDEOBUF_ERROR)
return POLLIN|POLLRDNORM;
return 0;
}
static int sh_mobile_ceu_querycap(struct soc_camera_host *ici,
struct v4l2_capability *cap)
{
strlcpy(cap->card, "SuperH_Mobile_CEU", sizeof(cap->card));
cap->version = KERNEL_VERSION(0, 0, 5);
cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
return 0;
}
static void sh_mobile_ceu_init_videobuf(struct videobuf_queue *q,
struct soc_camera_device *icd)
{
struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
struct sh_mobile_ceu_dev *pcdev = ici->priv;
videobuf_queue_dma_contig_init(q,
&sh_mobile_ceu_videobuf_ops,
icd->dev.parent, &pcdev->lock,
V4L2_BUF_TYPE_VIDEO_CAPTURE,
pcdev->field,
sizeof(struct sh_mobile_ceu_buffer),
icd);
}
static int sh_mobile_ceu_get_parm(struct soc_camera_device *icd,
struct v4l2_streamparm *parm)
{
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
return v4l2_subdev_call(sd, video, g_parm, parm);
}
static int sh_mobile_ceu_set_parm(struct soc_camera_device *icd,
struct v4l2_streamparm *parm)
{
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
return v4l2_subdev_call(sd, video, s_parm, parm);
}
static int sh_mobile_ceu_get_ctrl(struct soc_camera_device *icd,
struct v4l2_control *ctrl)
{
struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
struct sh_mobile_ceu_dev *pcdev = ici->priv;
u32 val;
switch (ctrl->id) {
case V4L2_CID_SHARPNESS:
val = ceu_read(pcdev, CLFCR);
ctrl->value = val ^ 1;
return 0;
}
return -ENOIOCTLCMD;
}
static int sh_mobile_ceu_set_ctrl(struct soc_camera_device *icd,
struct v4l2_control *ctrl)
{
struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
struct sh_mobile_ceu_dev *pcdev = ici->priv;
switch (ctrl->id) {
case V4L2_CID_SHARPNESS:
switch (icd->current_fmt->host_fmt->fourcc) {
case V4L2_PIX_FMT_NV12:
case V4L2_PIX_FMT_NV21:
case V4L2_PIX_FMT_NV16:
case V4L2_PIX_FMT_NV61:
ceu_write(pcdev, CLFCR, !ctrl->value);
return 0;
}
return -EINVAL;
}
return -ENOIOCTLCMD;
}
static const struct v4l2_queryctrl sh_mobile_ceu_controls[] = {
{
.id = V4L2_CID_SHARPNESS,
.type = V4L2_CTRL_TYPE_BOOLEAN,
.name = "Low-pass filter",
.minimum = 0,
.maximum = 1,
.step = 1,
.default_value = 0,
},
};
static struct soc_camera_host_ops sh_mobile_ceu_host_ops = {
.owner = THIS_MODULE,
.add = sh_mobile_ceu_add_device,
.remove = sh_mobile_ceu_remove_device,
.get_formats = sh_mobile_ceu_get_formats,
.put_formats = sh_mobile_ceu_put_formats,
.set_crop = sh_mobile_ceu_set_crop,
.set_fmt = sh_mobile_ceu_set_fmt,
.try_fmt = sh_mobile_ceu_try_fmt,
.set_ctrl = sh_mobile_ceu_set_ctrl,
.get_ctrl = sh_mobile_ceu_get_ctrl,
.set_parm = sh_mobile_ceu_set_parm,
.get_parm = sh_mobile_ceu_get_parm,
.reqbufs = sh_mobile_ceu_reqbufs,
.poll = sh_mobile_ceu_poll,
.querycap = sh_mobile_ceu_querycap,
.set_bus_param = sh_mobile_ceu_set_bus_param,
.init_videobuf = sh_mobile_ceu_init_videobuf,
.controls = sh_mobile_ceu_controls,
.num_controls = ARRAY_SIZE(sh_mobile_ceu_controls),
};
static int __devinit sh_mobile_ceu_probe(struct platform_device *pdev)
{
struct sh_mobile_ceu_dev *pcdev;
struct resource *res;
void __iomem *base;
unsigned int irq;
int err = 0;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq = platform_get_irq(pdev, 0);
if (!res || (int)irq <= 0) {
dev_err(&pdev->dev, "Not enough CEU platform resources.\n");
err = -ENODEV;
goto exit;
}
pcdev = kzalloc(sizeof(*pcdev), GFP_KERNEL);
if (!pcdev) {
dev_err(&pdev->dev, "Could not allocate pcdev\n");
err = -ENOMEM;
goto exit;
}
INIT_LIST_HEAD(&pcdev->capture);
spin_lock_init(&pcdev->lock);
pcdev->pdata = pdev->dev.platform_data;
if (!pcdev->pdata) {
err = -EINVAL;
dev_err(&pdev->dev, "CEU platform data not set.\n");
goto exit_kfree;
}
base = ioremap_nocache(res->start, resource_size(res));
if (!base) {
err = -ENXIO;
dev_err(&pdev->dev, "Unable to ioremap CEU registers.\n");
goto exit_kfree;
}
pcdev->irq = irq;
pcdev->base = base;
pcdev->video_limit = 0; /* only enabled if second resource exists */
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (res) {
err = dma_declare_coherent_memory(&pdev->dev, res->start,
res->start,
resource_size(res),
DMA_MEMORY_MAP |
DMA_MEMORY_EXCLUSIVE);
if (!err) {
dev_err(&pdev->dev, "Unable to declare CEU memory.\n");
err = -ENXIO;
goto exit_iounmap;
}
pcdev->video_limit = resource_size(res);
}
/* request irq */
err = request_irq(pcdev->irq, sh_mobile_ceu_irq, IRQF_DISABLED,
dev_name(&pdev->dev), pcdev);
if (err) {
dev_err(&pdev->dev, "Unable to register CEU interrupt.\n");
goto exit_release_mem;
}
pm_suspend_ignore_children(&pdev->dev, true);
pm_runtime_enable(&pdev->dev);
pm_runtime_resume(&pdev->dev);
pcdev->ici.priv = pcdev;
pcdev->ici.v4l2_dev.dev = &pdev->dev;
pcdev->ici.nr = pdev->id;
pcdev->ici.drv_name = dev_name(&pdev->dev);
pcdev->ici.ops = &sh_mobile_ceu_host_ops;
err = soc_camera_host_register(&pcdev->ici);
if (err)
goto exit_free_clk;
return 0;
exit_free_clk:
pm_runtime_disable(&pdev->dev);
free_irq(pcdev->irq, pcdev);
exit_release_mem:
if (platform_get_resource(pdev, IORESOURCE_MEM, 1))
dma_release_declared_memory(&pdev->dev);
exit_iounmap:
iounmap(base);
exit_kfree:
kfree(pcdev);
exit:
return err;
}
static int __devexit sh_mobile_ceu_remove(struct platform_device *pdev)
{
struct soc_camera_host *soc_host = to_soc_camera_host(&pdev->dev);
struct sh_mobile_ceu_dev *pcdev = container_of(soc_host,
struct sh_mobile_ceu_dev, ici);
soc_camera_host_unregister(soc_host);
pm_runtime_disable(&pdev->dev);
free_irq(pcdev->irq, pcdev);
if (platform_get_resource(pdev, IORESOURCE_MEM, 1))
dma_release_declared_memory(&pdev->dev);
iounmap(pcdev->base);
kfree(pcdev);
return 0;
}
static int sh_mobile_ceu_runtime_nop(struct device *dev)
{
/* Runtime PM callback shared between ->runtime_suspend()
* and ->runtime_resume(). Simply returns success.
*
* This driver re-initializes all registers after
* pm_runtime_get_sync() anyway so there is no need
* to save and restore registers here.
*/
return 0;
}
static const struct dev_pm_ops sh_mobile_ceu_dev_pm_ops = {
.runtime_suspend = sh_mobile_ceu_runtime_nop,
.runtime_resume = sh_mobile_ceu_runtime_nop,
};
static struct platform_driver sh_mobile_ceu_driver = {
.driver = {
.name = "sh_mobile_ceu",
.pm = &sh_mobile_ceu_dev_pm_ops,
},
.probe = sh_mobile_ceu_probe,
.remove = __devexit_p(sh_mobile_ceu_remove),
};
static int __init sh_mobile_ceu_init(void)
{
return platform_driver_register(&sh_mobile_ceu_driver);
}
static void __exit sh_mobile_ceu_exit(void)
{
platform_driver_unregister(&sh_mobile_ceu_driver);
}
module_init(sh_mobile_ceu_init);
module_exit(sh_mobile_ceu_exit);
MODULE_DESCRIPTION("SuperH Mobile CEU driver");
MODULE_AUTHOR("Magnus Damm");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:sh_mobile_ceu");
| gpl-2.0 |
embeddedarm/linux-2.6.34-ts471x | drivers/platform/x86/eeepc-wmi.c | 42 | 9963 | /*
* Eee PC WMI hotkey driver
*
* Copyright(C) 2010 Intel Corporation.
*
* Portions based on wistron_btns.c:
* Copyright (C) 2005 Miloslav Trmac <mitr@volny.cz>
* Copyright (C) 2005 Bernhard Rosenkraenzer <bero@arklinux.org>
* Copyright (C) 2005 Dmitry Torokhov <dtor@mail.ru>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/input.h>
#include <linux/input/sparse-keymap.h>
#include <linux/fb.h>
#include <linux/backlight.h>
#include <linux/platform_device.h>
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
#define EEEPC_WMI_FILE "eeepc-wmi"
MODULE_AUTHOR("Yong Wang <yong.y.wang@intel.com>");
MODULE_DESCRIPTION("Eee PC WMI Hotkey Driver");
MODULE_LICENSE("GPL");
#define EEEPC_WMI_EVENT_GUID "ABBC0F72-8EA1-11D1-00A0-C90629100000"
#define EEEPC_WMI_MGMT_GUID "97845ED0-4E6D-11DE-8A39-0800200C9A66"
MODULE_ALIAS("wmi:"EEEPC_WMI_EVENT_GUID);
MODULE_ALIAS("wmi:"EEEPC_WMI_MGMT_GUID);
#define NOTIFY_BRNUP_MIN 0x11
#define NOTIFY_BRNUP_MAX 0x1f
#define NOTIFY_BRNDOWN_MIN 0x20
#define NOTIFY_BRNDOWN_MAX 0x2e
#define EEEPC_WMI_METHODID_DEVS 0x53564544
#define EEEPC_WMI_METHODID_DSTS 0x53544344
#define EEEPC_WMI_DEVID_BACKLIGHT 0x00050012
static const struct key_entry eeepc_wmi_keymap[] = {
/* Sleep already handled via generic ACPI code */
{ KE_KEY, 0x5d, { KEY_WLAN } },
{ KE_KEY, 0x32, { KEY_MUTE } },
{ KE_KEY, 0x31, { KEY_VOLUMEDOWN } },
{ KE_KEY, 0x30, { KEY_VOLUMEUP } },
{ KE_IGNORE, NOTIFY_BRNDOWN_MIN, { KEY_BRIGHTNESSDOWN } },
{ KE_IGNORE, NOTIFY_BRNUP_MIN, { KEY_BRIGHTNESSUP } },
{ KE_KEY, 0xcc, { KEY_SWITCHVIDEOMODE } },
{ KE_END, 0},
};
struct bios_args {
u32 dev_id;
u32 ctrl_param;
};
struct eeepc_wmi {
struct input_dev *inputdev;
struct backlight_device *backlight_device;
};
static struct platform_device *platform_device;
static int eeepc_wmi_input_init(struct eeepc_wmi *eeepc)
{
int err;
eeepc->inputdev = input_allocate_device();
if (!eeepc->inputdev)
return -ENOMEM;
eeepc->inputdev->name = "Eee PC WMI hotkeys";
eeepc->inputdev->phys = EEEPC_WMI_FILE "/input0";
eeepc->inputdev->id.bustype = BUS_HOST;
eeepc->inputdev->dev.parent = &platform_device->dev;
err = sparse_keymap_setup(eeepc->inputdev, eeepc_wmi_keymap, NULL);
if (err)
goto err_free_dev;
err = input_register_device(eeepc->inputdev);
if (err)
goto err_free_keymap;
return 0;
err_free_keymap:
sparse_keymap_free(eeepc->inputdev);
err_free_dev:
input_free_device(eeepc->inputdev);
return err;
}
static void eeepc_wmi_input_exit(struct eeepc_wmi *eeepc)
{
if (eeepc->inputdev) {
sparse_keymap_free(eeepc->inputdev);
input_unregister_device(eeepc->inputdev);
}
eeepc->inputdev = NULL;
}
static acpi_status eeepc_wmi_get_devstate(u32 dev_id, u32 *ctrl_param)
{
struct acpi_buffer input = { (acpi_size)sizeof(u32), &dev_id };
struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *obj;
acpi_status status;
u32 tmp;
status = wmi_evaluate_method(EEEPC_WMI_MGMT_GUID,
1, EEEPC_WMI_METHODID_DSTS, &input, &output);
if (ACPI_FAILURE(status))
return status;
obj = (union acpi_object *)output.pointer;
if (obj && obj->type == ACPI_TYPE_INTEGER)
tmp = (u32)obj->integer.value;
else
tmp = 0;
if (ctrl_param)
*ctrl_param = tmp;
kfree(obj);
return status;
}
static acpi_status eeepc_wmi_set_devstate(u32 dev_id, u32 ctrl_param)
{
struct bios_args args = {
.dev_id = dev_id,
.ctrl_param = ctrl_param,
};
struct acpi_buffer input = { (acpi_size)sizeof(args), &args };
acpi_status status;
status = wmi_evaluate_method(EEEPC_WMI_MGMT_GUID,
1, EEEPC_WMI_METHODID_DEVS, &input, NULL);
return status;
}
static int read_brightness(struct backlight_device *bd)
{
static u32 ctrl_param;
acpi_status status;
status = eeepc_wmi_get_devstate(EEEPC_WMI_DEVID_BACKLIGHT, &ctrl_param);
if (ACPI_FAILURE(status))
return -1;
else
return ctrl_param & 0xFF;
}
static int update_bl_status(struct backlight_device *bd)
{
static u32 ctrl_param;
acpi_status status;
ctrl_param = bd->props.brightness;
status = eeepc_wmi_set_devstate(EEEPC_WMI_DEVID_BACKLIGHT, ctrl_param);
if (ACPI_FAILURE(status))
return -1;
else
return 0;
}
static const struct backlight_ops eeepc_wmi_bl_ops = {
.get_brightness = read_brightness,
.update_status = update_bl_status,
};
static int eeepc_wmi_backlight_notify(struct eeepc_wmi *eeepc, int code)
{
struct backlight_device *bd = eeepc->backlight_device;
int old = bd->props.brightness;
int new;
if (code >= NOTIFY_BRNUP_MIN && code <= NOTIFY_BRNUP_MAX)
new = code - NOTIFY_BRNUP_MIN + 1;
else if (code >= NOTIFY_BRNDOWN_MIN && code <= NOTIFY_BRNDOWN_MAX)
new = code - NOTIFY_BRNDOWN_MIN;
bd->props.brightness = new;
backlight_update_status(bd);
backlight_force_update(bd, BACKLIGHT_UPDATE_HOTKEY);
return old;
}
static int eeepc_wmi_backlight_init(struct eeepc_wmi *eeepc)
{
struct backlight_device *bd;
struct backlight_properties props;
memset(&props, 0, sizeof(struct backlight_properties));
props.max_brightness = 15;
bd = backlight_device_register(EEEPC_WMI_FILE,
&platform_device->dev, eeepc,
&eeepc_wmi_bl_ops, &props);
if (IS_ERR(bd)) {
pr_err("Could not register backlight device\n");
return PTR_ERR(bd);
}
eeepc->backlight_device = bd;
bd->props.brightness = read_brightness(bd);
bd->props.power = FB_BLANK_UNBLANK;
backlight_update_status(bd);
return 0;
}
static void eeepc_wmi_backlight_exit(struct eeepc_wmi *eeepc)
{
if (eeepc->backlight_device)
backlight_device_unregister(eeepc->backlight_device);
eeepc->backlight_device = NULL;
}
static void eeepc_wmi_notify(u32 value, void *context)
{
struct eeepc_wmi *eeepc = context;
struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *obj;
acpi_status status;
int code;
int orig_code;
status = wmi_get_event_data(value, &response);
if (status != AE_OK) {
pr_err("bad event status 0x%x\n", status);
return;
}
obj = (union acpi_object *)response.pointer;
if (obj && obj->type == ACPI_TYPE_INTEGER) {
code = obj->integer.value;
orig_code = code;
if (code >= NOTIFY_BRNUP_MIN && code <= NOTIFY_BRNUP_MAX)
code = NOTIFY_BRNUP_MIN;
else if (code >= NOTIFY_BRNDOWN_MIN &&
code <= NOTIFY_BRNDOWN_MAX)
code = NOTIFY_BRNDOWN_MIN;
if (code == NOTIFY_BRNUP_MIN || code == NOTIFY_BRNDOWN_MIN) {
if (!acpi_video_backlight_support())
eeepc_wmi_backlight_notify(eeepc, orig_code);
}
if (!sparse_keymap_report_event(eeepc->inputdev,
code, 1, true))
pr_info("Unknown key %x pressed\n", code);
}
kfree(obj);
}
static int __devinit eeepc_wmi_platform_probe(struct platform_device *device)
{
struct eeepc_wmi *eeepc;
int err;
acpi_status status;
eeepc = platform_get_drvdata(device);
err = eeepc_wmi_input_init(eeepc);
if (err)
goto error_input;
if (!acpi_video_backlight_support()) {
err = eeepc_wmi_backlight_init(eeepc);
if (err)
goto error_backlight;
} else
pr_info("Backlight controlled by ACPI video driver\n");
status = wmi_install_notify_handler(EEEPC_WMI_EVENT_GUID,
eeepc_wmi_notify, eeepc);
if (ACPI_FAILURE(status)) {
pr_err("Unable to register notify handler - %d\n",
status);
err = -ENODEV;
goto error_wmi;
}
return 0;
error_wmi:
eeepc_wmi_backlight_exit(eeepc);
error_backlight:
eeepc_wmi_input_exit(eeepc);
error_input:
return err;
}
static int __devexit eeepc_wmi_platform_remove(struct platform_device *device)
{
struct eeepc_wmi *eeepc;
eeepc = platform_get_drvdata(device);
wmi_remove_notify_handler(EEEPC_WMI_EVENT_GUID);
eeepc_wmi_backlight_exit(eeepc);
eeepc_wmi_input_exit(eeepc);
return 0;
}
static struct platform_driver platform_driver = {
.driver = {
.name = EEEPC_WMI_FILE,
.owner = THIS_MODULE,
},
.probe = eeepc_wmi_platform_probe,
.remove = __devexit_p(eeepc_wmi_platform_remove),
};
static int __init eeepc_wmi_init(void)
{
struct eeepc_wmi *eeepc;
int err;
if (!wmi_has_guid(EEEPC_WMI_EVENT_GUID) ||
!wmi_has_guid(EEEPC_WMI_MGMT_GUID)) {
pr_warning("No known WMI GUID found\n");
return -ENODEV;
}
eeepc = kzalloc(sizeof(struct eeepc_wmi), GFP_KERNEL);
if (!eeepc)
return -ENOMEM;
platform_device = platform_device_alloc(EEEPC_WMI_FILE, -1);
if (!platform_device) {
pr_warning("Unable to allocate platform device\n");
err = -ENOMEM;
goto fail_platform;
}
err = platform_device_add(platform_device);
if (err) {
pr_warning("Unable to add platform device\n");
goto put_dev;
}
platform_set_drvdata(platform_device, eeepc);
err = platform_driver_register(&platform_driver);
if (err) {
pr_warning("Unable to register platform driver\n");
goto del_dev;
}
return 0;
del_dev:
platform_device_del(platform_device);
put_dev:
platform_device_put(platform_device);
fail_platform:
kfree(eeepc);
return err;
}
static void __exit eeepc_wmi_exit(void)
{
struct eeepc_wmi *eeepc;
eeepc = platform_get_drvdata(platform_device);
platform_driver_unregister(&platform_driver);
platform_device_unregister(platform_device);
kfree(eeepc);
}
module_init(eeepc_wmi_init);
module_exit(eeepc_wmi_exit);
| gpl-2.0 |
shimaore/yate | libs/miniwebrtc/audio/coding_ilbc/xcorr_coef.c | 42 | 4816 | /*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/******************************************************************
iLBC Speech Coder ANSI-C Source Code
WebRtcIlbcfix_XcorrCoef.c
******************************************************************/
#include "defines.h"
/*----------------------------------------------------------------*
* cross correlation which finds the optimal lag for the
* crossCorr*crossCorr/(energy) criteria
*---------------------------------------------------------------*/
int WebRtcIlbcfix_XcorrCoef(
WebRtc_Word16 *target, /* (i) first array */
WebRtc_Word16 *regressor, /* (i) second array */
WebRtc_Word16 subl, /* (i) dimension arrays */
WebRtc_Word16 searchLen, /* (i) the search lenght */
WebRtc_Word16 offset, /* (i) samples offset between arrays */
WebRtc_Word16 step /* (i) +1 or -1 */
){
int k;
WebRtc_Word16 maxlag;
WebRtc_Word16 pos;
WebRtc_Word16 max;
WebRtc_Word16 crossCorrScale, Energyscale;
WebRtc_Word16 crossCorrSqMod, crossCorrSqMod_Max;
WebRtc_Word32 crossCorr, Energy;
WebRtc_Word16 crossCorrmod, EnergyMod, EnergyMod_Max;
WebRtc_Word16 *tp, *rp;
WebRtc_Word16 *rp_beg, *rp_end;
WebRtc_Word16 totscale, totscale_max;
WebRtc_Word16 scalediff;
WebRtc_Word32 newCrit, maxCrit;
int shifts;
/* Initializations, to make sure that the first one is selected */
crossCorrSqMod_Max=0;
EnergyMod_Max=WEBRTC_SPL_WORD16_MAX;
totscale_max=-500;
maxlag=0;
pos=0;
/* Find scale value and start position */
if (step==1) {
max=WebRtcSpl_MaxAbsValueW16(regressor, (WebRtc_Word16)(subl+searchLen-1));
rp_beg = regressor;
rp_end = ®ressor[subl];
} else { /* step==-1 */
max=WebRtcSpl_MaxAbsValueW16(®ressor[-searchLen], (WebRtc_Word16)(subl+searchLen-1));
rp_beg = ®ressor[-1];
rp_end = ®ressor[subl-1];
}
/* Introduce a scale factor on the Energy in WebRtc_Word32 in
order to make sure that the calculation does not
overflow */
if (max>5000) {
shifts=2;
} else {
shifts=0;
}
/* Calculate the first energy, then do a +/- to get the other energies */
Energy=WebRtcSpl_DotProductWithScale(regressor, regressor, subl, shifts);
for (k=0;k<searchLen;k++) {
tp = target;
rp = ®ressor[pos];
crossCorr=WebRtcSpl_DotProductWithScale(tp, rp, subl, shifts);
if ((Energy>0)&&(crossCorr>0)) {
/* Put cross correlation and energy on 16 bit word */
crossCorrScale=(WebRtc_Word16)WebRtcSpl_NormW32(crossCorr)-16;
crossCorrmod=(WebRtc_Word16)WEBRTC_SPL_SHIFT_W32(crossCorr, crossCorrScale);
Energyscale=(WebRtc_Word16)WebRtcSpl_NormW32(Energy)-16;
EnergyMod=(WebRtc_Word16)WEBRTC_SPL_SHIFT_W32(Energy, Energyscale);
/* Square cross correlation and store upper WebRtc_Word16 */
crossCorrSqMod=(WebRtc_Word16)WEBRTC_SPL_MUL_16_16_RSFT(crossCorrmod, crossCorrmod, 16);
/* Calculate the total number of (dynamic) right shifts that have
been performed on (crossCorr*crossCorr)/energy
*/
totscale=Energyscale-(crossCorrScale<<1);
/* Calculate the shift difference in order to be able to compare the two
(crossCorr*crossCorr)/energy in the same domain
*/
scalediff=totscale-totscale_max;
scalediff=WEBRTC_SPL_MIN(scalediff,31);
scalediff=WEBRTC_SPL_MAX(scalediff,-31);
/* Compute the cross multiplication between the old best criteria
and the new one to be able to compare them without using a
division */
if (scalediff<0) {
newCrit = ((WebRtc_Word32)crossCorrSqMod*EnergyMod_Max)>>(-scalediff);
maxCrit = ((WebRtc_Word32)crossCorrSqMod_Max*EnergyMod);
} else {
newCrit = ((WebRtc_Word32)crossCorrSqMod*EnergyMod_Max);
maxCrit = ((WebRtc_Word32)crossCorrSqMod_Max*EnergyMod)>>scalediff;
}
/* Store the new lag value if the new criteria is larger
than previous largest criteria */
if (newCrit > maxCrit) {
crossCorrSqMod_Max = crossCorrSqMod;
EnergyMod_Max = EnergyMod;
totscale_max = totscale;
maxlag = k;
}
}
pos+=step;
/* Do a +/- to get the next energy */
Energy += step*(WEBRTC_SPL_RSHIFT_W32(
((WebRtc_Word32)(*rp_end)*(*rp_end)) - ((WebRtc_Word32)(*rp_beg)*(*rp_beg)),
shifts));
rp_beg+=step;
rp_end+=step;
}
return(maxlag+offset);
}
| gpl-2.0 |
Core2idiot/Kernel-Samsung-3.0...- | drivers/clk/clkdev.c | 298 | 3838 | /*
* drivers/clk/clkdev.c
*
* Copyright (C) 2008 Russell King.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Helper for the clk API to assist looking up a struct clk.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/list.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/string.h>
#include <linux/mutex.h>
#include <linux/clk.h>
#include <linux/clkdev.h>
static LIST_HEAD(clocks);
static DEFINE_MUTEX(clocks_mutex);
/*
* Find the correct struct clk for the device and connection ID.
* We do slightly fuzzy matching here:
* An entry with a NULL ID is assumed to be a wildcard.
* If an entry has a device ID, it must match
* If an entry has a connection ID, it must match
* Then we take the most specific entry - with the following
* order of precedence: dev+con > dev only > con only.
*/
static struct clk_lookup *clk_find(const char *dev_id, const char *con_id)
{
struct clk_lookup *p, *cl = NULL;
int match, best = 0;
list_for_each_entry(p, &clocks, node) {
match = 0;
if (p->dev_id) {
if (!dev_id || strcmp(p->dev_id, dev_id))
continue;
match += 2;
}
if (p->con_id) {
if (!con_id || strcmp(p->con_id, con_id))
continue;
match += 1;
}
if (match > best) {
cl = p;
if (match != 3)
best = match;
else
break;
}
}
return cl;
}
struct clk *clk_get_sys(const char *dev_id, const char *con_id)
{
struct clk_lookup *cl;
mutex_lock(&clocks_mutex);
cl = clk_find(dev_id, con_id);
if (cl && !__clk_get(cl->clk))
cl = NULL;
mutex_unlock(&clocks_mutex);
return cl ? cl->clk : ERR_PTR(-ENOENT);
}
EXPORT_SYMBOL(clk_get_sys);
struct clk *clk_get(struct device *dev, const char *con_id)
{
const char *dev_id = dev ? dev_name(dev) : NULL;
#if !defined(CONFIG_REMOVE_EBI1_FIXED_CLK) && defined(CONFIG_ARCH_MSM7X30)
if (strncmp(con_id, "ebi1_clk", strlen("ebi1_clk")) == 0)
BUG();
#endif
return clk_get_sys(dev_id, con_id);
}
EXPORT_SYMBOL(clk_get);
void clk_put(struct clk *clk)
{
__clk_put(clk);
}
EXPORT_SYMBOL(clk_put);
void clkdev_add(struct clk_lookup *cl)
{
mutex_lock(&clocks_mutex);
list_add_tail(&cl->node, &clocks);
mutex_unlock(&clocks_mutex);
}
EXPORT_SYMBOL(clkdev_add);
void __init clkdev_add_table(struct clk_lookup *cl, size_t num)
{
mutex_lock(&clocks_mutex);
while (num--) {
list_add_tail(&cl->node, &clocks);
cl++;
}
mutex_unlock(&clocks_mutex);
}
#define MAX_DEV_ID 20
#define MAX_CON_ID 16
struct clk_lookup_alloc {
struct clk_lookup cl;
char dev_id[MAX_DEV_ID];
char con_id[MAX_CON_ID];
};
struct clk_lookup * __init_refok
clkdev_alloc(struct clk *clk, const char *con_id, const char *dev_fmt, ...)
{
struct clk_lookup_alloc *cla;
cla = __clkdev_alloc(sizeof(*cla));
if (!cla)
return NULL;
cla->cl.clk = clk;
if (con_id) {
strlcpy(cla->con_id, con_id, sizeof(cla->con_id));
cla->cl.con_id = cla->con_id;
}
if (dev_fmt) {
va_list ap;
va_start(ap, dev_fmt);
vscnprintf(cla->dev_id, sizeof(cla->dev_id), dev_fmt, ap);
cla->cl.dev_id = cla->dev_id;
va_end(ap);
}
return &cla->cl;
}
EXPORT_SYMBOL(clkdev_alloc);
int clk_add_alias(const char *alias, const char *alias_dev_name, char *id,
struct device *dev)
{
struct clk *r = clk_get(dev, id);
struct clk_lookup *l;
if (IS_ERR(r))
return PTR_ERR(r);
l = clkdev_alloc(r, alias, alias_dev_name);
clk_put(r);
if (!l)
return -ENODEV;
clkdev_add(l);
return 0;
}
EXPORT_SYMBOL(clk_add_alias);
/*
* clkdev_drop - remove a clock dynamically allocated
*/
void clkdev_drop(struct clk_lookup *cl)
{
mutex_lock(&clocks_mutex);
list_del(&cl->node);
mutex_unlock(&clocks_mutex);
kfree(cl);
}
EXPORT_SYMBOL(clkdev_drop);
| gpl-2.0 |
n8ohu/android_kernel_pantech_lgvr | virt/kvm/eventfd.c | 554 | 13721 | /*
* kvm eventfd support - use eventfd objects to signal various KVM events
*
* Copyright 2009 Novell. All Rights Reserved.
*
* Author:
* Gregory Haskins <ghaskins@novell.com>
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <linux/kvm_host.h>
#include <linux/kvm.h>
#include <linux/workqueue.h>
#include <linux/syscalls.h>
#include <linux/wait.h>
#include <linux/poll.h>
#include <linux/file.h>
#include <linux/list.h>
#include <linux/eventfd.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include "iodev.h"
/*
* --------------------------------------------------------------------
* irqfd: Allows an fd to be used to inject an interrupt to the guest
*
* Credit goes to Avi Kivity for the original idea.
* --------------------------------------------------------------------
*/
struct _irqfd {
struct kvm *kvm;
struct eventfd_ctx *eventfd;
int gsi;
struct list_head list;
poll_table pt;
wait_queue_t wait;
struct work_struct inject;
struct work_struct shutdown;
};
static struct workqueue_struct *irqfd_cleanup_wq;
static void
irqfd_inject(struct work_struct *work)
{
struct _irqfd *irqfd = container_of(work, struct _irqfd, inject);
struct kvm *kvm = irqfd->kvm;
kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 1);
kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 0);
}
/*
* Race-free decouple logic (ordering is critical)
*/
static void
irqfd_shutdown(struct work_struct *work)
{
struct _irqfd *irqfd = container_of(work, struct _irqfd, shutdown);
u64 cnt;
/*
* Synchronize with the wait-queue and unhook ourselves to prevent
* further events.
*/
eventfd_ctx_remove_wait_queue(irqfd->eventfd, &irqfd->wait, &cnt);
/*
* We know no new events will be scheduled at this point, so block
* until all previously outstanding events have completed
*/
flush_work(&irqfd->inject);
/*
* It is now safe to release the object's resources
*/
eventfd_ctx_put(irqfd->eventfd);
kfree(irqfd);
}
/* assumes kvm->irqfds.lock is held */
static bool
irqfd_is_active(struct _irqfd *irqfd)
{
return list_empty(&irqfd->list) ? false : true;
}
/*
* Mark the irqfd as inactive and schedule it for removal
*
* assumes kvm->irqfds.lock is held
*/
static void
irqfd_deactivate(struct _irqfd *irqfd)
{
BUG_ON(!irqfd_is_active(irqfd));
list_del_init(&irqfd->list);
queue_work(irqfd_cleanup_wq, &irqfd->shutdown);
}
/*
* Called with wqh->lock held and interrupts disabled
*/
static int
irqfd_wakeup(wait_queue_t *wait, unsigned mode, int sync, void *key)
{
struct _irqfd *irqfd = container_of(wait, struct _irqfd, wait);
unsigned long flags = (unsigned long)key;
if (flags & POLLIN)
/* An event has been signaled, inject an interrupt */
schedule_work(&irqfd->inject);
if (flags & POLLHUP) {
/* The eventfd is closing, detach from KVM */
struct kvm *kvm = irqfd->kvm;
unsigned long flags;
spin_lock_irqsave(&kvm->irqfds.lock, flags);
/*
* We must check if someone deactivated the irqfd before
* we could acquire the irqfds.lock since the item is
* deactivated from the KVM side before it is unhooked from
* the wait-queue. If it is already deactivated, we can
* simply return knowing the other side will cleanup for us.
* We cannot race against the irqfd going away since the
* other side is required to acquire wqh->lock, which we hold
*/
if (irqfd_is_active(irqfd))
irqfd_deactivate(irqfd);
spin_unlock_irqrestore(&kvm->irqfds.lock, flags);
}
return 0;
}
static void
irqfd_ptable_queue_proc(struct file *file, wait_queue_head_t *wqh,
poll_table *pt)
{
struct _irqfd *irqfd = container_of(pt, struct _irqfd, pt);
add_wait_queue(wqh, &irqfd->wait);
}
static int
kvm_irqfd_assign(struct kvm *kvm, int fd, int gsi)
{
struct _irqfd *irqfd, *tmp;
struct file *file = NULL;
struct eventfd_ctx *eventfd = NULL;
int ret;
unsigned int events;
irqfd = kzalloc(sizeof(*irqfd), GFP_KERNEL);
if (!irqfd)
return -ENOMEM;
irqfd->kvm = kvm;
irqfd->gsi = gsi;
INIT_LIST_HEAD(&irqfd->list);
INIT_WORK(&irqfd->inject, irqfd_inject);
INIT_WORK(&irqfd->shutdown, irqfd_shutdown);
file = eventfd_fget(fd);
if (IS_ERR(file)) {
ret = PTR_ERR(file);
goto fail;
}
eventfd = eventfd_ctx_fileget(file);
if (IS_ERR(eventfd)) {
ret = PTR_ERR(eventfd);
goto fail;
}
irqfd->eventfd = eventfd;
/*
* Install our own custom wake-up handling so we are notified via
* a callback whenever someone signals the underlying eventfd
*/
init_waitqueue_func_entry(&irqfd->wait, irqfd_wakeup);
init_poll_funcptr(&irqfd->pt, irqfd_ptable_queue_proc);
spin_lock_irq(&kvm->irqfds.lock);
ret = 0;
list_for_each_entry(tmp, &kvm->irqfds.items, list) {
if (irqfd->eventfd != tmp->eventfd)
continue;
/* This fd is used for another irq already. */
ret = -EBUSY;
spin_unlock_irq(&kvm->irqfds.lock);
goto fail;
}
events = file->f_op->poll(file, &irqfd->pt);
list_add_tail(&irqfd->list, &kvm->irqfds.items);
spin_unlock_irq(&kvm->irqfds.lock);
/*
* Check if there was an event already pending on the eventfd
* before we registered, and trigger it as if we didn't miss it.
*/
if (events & POLLIN)
schedule_work(&irqfd->inject);
/*
* do not drop the file until the irqfd is fully initialized, otherwise
* we might race against the POLLHUP
*/
fput(file);
return 0;
fail:
if (eventfd && !IS_ERR(eventfd))
eventfd_ctx_put(eventfd);
if (!IS_ERR(file))
fput(file);
kfree(irqfd);
return ret;
}
void
kvm_eventfd_init(struct kvm *kvm)
{
spin_lock_init(&kvm->irqfds.lock);
INIT_LIST_HEAD(&kvm->irqfds.items);
INIT_LIST_HEAD(&kvm->ioeventfds);
}
/*
* shutdown any irqfd's that match fd+gsi
*/
static int
kvm_irqfd_deassign(struct kvm *kvm, int fd, int gsi)
{
struct _irqfd *irqfd, *tmp;
struct eventfd_ctx *eventfd;
eventfd = eventfd_ctx_fdget(fd);
if (IS_ERR(eventfd))
return PTR_ERR(eventfd);
spin_lock_irq(&kvm->irqfds.lock);
list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list) {
if (irqfd->eventfd == eventfd && irqfd->gsi == gsi)
irqfd_deactivate(irqfd);
}
spin_unlock_irq(&kvm->irqfds.lock);
eventfd_ctx_put(eventfd);
/*
* Block until we know all outstanding shutdown jobs have completed
* so that we guarantee there will not be any more interrupts on this
* gsi once this deassign function returns.
*/
flush_workqueue(irqfd_cleanup_wq);
return 0;
}
int
kvm_irqfd(struct kvm *kvm, int fd, int gsi, int flags)
{
if (flags & KVM_IRQFD_FLAG_DEASSIGN)
return kvm_irqfd_deassign(kvm, fd, gsi);
return kvm_irqfd_assign(kvm, fd, gsi);
}
/*
* This function is called as the kvm VM fd is being released. Shutdown all
* irqfds that still remain open
*/
void
kvm_irqfd_release(struct kvm *kvm)
{
struct _irqfd *irqfd, *tmp;
spin_lock_irq(&kvm->irqfds.lock);
list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list)
irqfd_deactivate(irqfd);
spin_unlock_irq(&kvm->irqfds.lock);
/*
* Block until we know all outstanding shutdown jobs have completed
* since we do not take a kvm* reference.
*/
flush_workqueue(irqfd_cleanup_wq);
}
/*
* create a host-wide workqueue for issuing deferred shutdown requests
* aggregated from all vm* instances. We need our own isolated single-thread
* queue to prevent deadlock against flushing the normal work-queue.
*/
static int __init irqfd_module_init(void)
{
irqfd_cleanup_wq = create_singlethread_workqueue("kvm-irqfd-cleanup");
if (!irqfd_cleanup_wq)
return -ENOMEM;
return 0;
}
static void __exit irqfd_module_exit(void)
{
destroy_workqueue(irqfd_cleanup_wq);
}
module_init(irqfd_module_init);
module_exit(irqfd_module_exit);
/*
* --------------------------------------------------------------------
* ioeventfd: translate a PIO/MMIO memory write to an eventfd signal.
*
* userspace can register a PIO/MMIO address with an eventfd for receiving
* notification when the memory has been touched.
* --------------------------------------------------------------------
*/
struct _ioeventfd {
struct list_head list;
u64 addr;
int length;
struct eventfd_ctx *eventfd;
u64 datamatch;
struct kvm_io_device dev;
bool wildcard;
};
static inline struct _ioeventfd *
to_ioeventfd(struct kvm_io_device *dev)
{
return container_of(dev, struct _ioeventfd, dev);
}
static void
ioeventfd_release(struct _ioeventfd *p)
{
eventfd_ctx_put(p->eventfd);
list_del(&p->list);
kfree(p);
}
static bool
ioeventfd_in_range(struct _ioeventfd *p, gpa_t addr, int len, const void *val)
{
u64 _val;
if (!(addr == p->addr && len == p->length))
/* address-range must be precise for a hit */
return false;
if (p->wildcard)
/* all else equal, wildcard is always a hit */
return true;
/* otherwise, we have to actually compare the data */
BUG_ON(!IS_ALIGNED((unsigned long)val, len));
switch (len) {
case 1:
_val = *(u8 *)val;
break;
case 2:
_val = *(u16 *)val;
break;
case 4:
_val = *(u32 *)val;
break;
case 8:
_val = *(u64 *)val;
break;
default:
return false;
}
return _val == p->datamatch ? true : false;
}
/* MMIO/PIO writes trigger an event if the addr/val match */
static int
ioeventfd_write(struct kvm_io_device *this, gpa_t addr, int len,
const void *val)
{
struct _ioeventfd *p = to_ioeventfd(this);
if (!ioeventfd_in_range(p, addr, len, val))
return -EOPNOTSUPP;
eventfd_signal(p->eventfd, 1);
return 0;
}
/*
* This function is called as KVM is completely shutting down. We do not
* need to worry about locking just nuke anything we have as quickly as possible
*/
static void
ioeventfd_destructor(struct kvm_io_device *this)
{
struct _ioeventfd *p = to_ioeventfd(this);
ioeventfd_release(p);
}
static const struct kvm_io_device_ops ioeventfd_ops = {
.write = ioeventfd_write,
.destructor = ioeventfd_destructor,
};
/* assumes kvm->slots_lock held */
static bool
ioeventfd_check_collision(struct kvm *kvm, struct _ioeventfd *p)
{
struct _ioeventfd *_p;
list_for_each_entry(_p, &kvm->ioeventfds, list)
if (_p->addr == p->addr && _p->length == p->length &&
(_p->wildcard || p->wildcard ||
_p->datamatch == p->datamatch))
return true;
return false;
}
static int
kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
{
int pio = args->flags & KVM_IOEVENTFD_FLAG_PIO;
enum kvm_bus bus_idx = pio ? KVM_PIO_BUS : KVM_MMIO_BUS;
struct _ioeventfd *p;
struct eventfd_ctx *eventfd;
int ret;
/* must be natural-word sized */
switch (args->len) {
case 1:
case 2:
case 4:
case 8:
break;
default:
return -EINVAL;
}
/* check for range overflow */
if (args->addr + args->len < args->addr)
return -EINVAL;
/* check for extra flags that we don't understand */
if (args->flags & ~KVM_IOEVENTFD_VALID_FLAG_MASK)
return -EINVAL;
eventfd = eventfd_ctx_fdget(args->fd);
if (IS_ERR(eventfd))
return PTR_ERR(eventfd);
p = kzalloc(sizeof(*p), GFP_KERNEL);
if (!p) {
ret = -ENOMEM;
goto fail;
}
INIT_LIST_HEAD(&p->list);
p->addr = args->addr;
p->length = args->len;
p->eventfd = eventfd;
/* The datamatch feature is optional, otherwise this is a wildcard */
if (args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH)
p->datamatch = args->datamatch;
else
p->wildcard = true;
mutex_lock(&kvm->slots_lock);
/* Verify that there isnt a match already */
if (ioeventfd_check_collision(kvm, p)) {
ret = -EEXIST;
goto unlock_fail;
}
kvm_iodevice_init(&p->dev, &ioeventfd_ops);
ret = kvm_io_bus_register_dev(kvm, bus_idx, &p->dev);
if (ret < 0)
goto unlock_fail;
list_add_tail(&p->list, &kvm->ioeventfds);
mutex_unlock(&kvm->slots_lock);
return 0;
unlock_fail:
mutex_unlock(&kvm->slots_lock);
fail:
kfree(p);
eventfd_ctx_put(eventfd);
return ret;
}
static int
kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
{
int pio = args->flags & KVM_IOEVENTFD_FLAG_PIO;
enum kvm_bus bus_idx = pio ? KVM_PIO_BUS : KVM_MMIO_BUS;
struct _ioeventfd *p, *tmp;
struct eventfd_ctx *eventfd;
int ret = -ENOENT;
eventfd = eventfd_ctx_fdget(args->fd);
if (IS_ERR(eventfd))
return PTR_ERR(eventfd);
mutex_lock(&kvm->slots_lock);
list_for_each_entry_safe(p, tmp, &kvm->ioeventfds, list) {
bool wildcard = !(args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH);
if (p->eventfd != eventfd ||
p->addr != args->addr ||
p->length != args->len ||
p->wildcard != wildcard)
continue;
if (!p->wildcard && p->datamatch != args->datamatch)
continue;
kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev);
ioeventfd_release(p);
ret = 0;
break;
}
mutex_unlock(&kvm->slots_lock);
eventfd_ctx_put(eventfd);
return ret;
}
int
kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
{
if (args->flags & KVM_IOEVENTFD_FLAG_DEASSIGN)
return kvm_deassign_ioeventfd(kvm, args);
return kvm_assign_ioeventfd(kvm, args);
}
| gpl-2.0 |
tellapart/ubuntu-precise | drivers/watchdog/lantiq_wdt.c | 554 | 6201 | /*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*
* Copyright (C) 2010 John Crispin <blogic@openwrt.org>
* Based on EP93xx wdt driver
*/
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/miscdevice.h>
#include <linux/watchdog.h>
#include <linux/platform_device.h>
#include <linux/uaccess.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <lantiq.h>
/* Section 3.4 of the datasheet
* The password sequence protects the WDT control register from unintended
* write actions, which might cause malfunction of the WDT.
*
* essentially the following two magic passwords need to be written to allow
* IO access to the WDT core
*/
#define LTQ_WDT_PW1 0x00BE0000
#define LTQ_WDT_PW2 0x00DC0000
#define LTQ_WDT_CR 0x0 /* watchdog control register */
#define LTQ_WDT_SR 0x8 /* watchdog status register */
#define LTQ_WDT_SR_EN (0x1 << 31) /* enable bit */
#define LTQ_WDT_SR_PWD (0x3 << 26) /* turn on power */
#define LTQ_WDT_SR_CLKDIV (0x3 << 24) /* turn on clock and set */
/* divider to 0x40000 */
#define LTQ_WDT_DIVIDER 0x40000
#define LTQ_MAX_TIMEOUT ((1 << 16) - 1) /* the reload field is 16 bit */
static int nowayout = WATCHDOG_NOWAYOUT;
static void __iomem *ltq_wdt_membase;
static unsigned long ltq_io_region_clk_rate;
static unsigned long ltq_wdt_bootstatus;
static unsigned long ltq_wdt_in_use;
static int ltq_wdt_timeout = 30;
static int ltq_wdt_ok_to_close;
static void
ltq_wdt_enable(void)
{
unsigned long int timeout = ltq_wdt_timeout *
(ltq_io_region_clk_rate / LTQ_WDT_DIVIDER) + 0x1000;
if (timeout > LTQ_MAX_TIMEOUT)
timeout = LTQ_MAX_TIMEOUT;
/* write the first password magic */
ltq_w32(LTQ_WDT_PW1, ltq_wdt_membase + LTQ_WDT_CR);
/* write the second magic plus the configuration and new timeout */
ltq_w32(LTQ_WDT_SR_EN | LTQ_WDT_SR_PWD | LTQ_WDT_SR_CLKDIV |
LTQ_WDT_PW2 | timeout, ltq_wdt_membase + LTQ_WDT_CR);
}
static void
ltq_wdt_disable(void)
{
/* write the first password magic */
ltq_w32(LTQ_WDT_PW1, ltq_wdt_membase + LTQ_WDT_CR);
/* write the second password magic with no config
* this turns the watchdog off
*/
ltq_w32(LTQ_WDT_PW2, ltq_wdt_membase + LTQ_WDT_CR);
}
static ssize_t
ltq_wdt_write(struct file *file, const char __user *data,
size_t len, loff_t *ppos)
{
if (len) {
if (!nowayout) {
size_t i;
ltq_wdt_ok_to_close = 0;
for (i = 0; i != len; i++) {
char c;
if (get_user(c, data + i))
return -EFAULT;
if (c == 'V')
ltq_wdt_ok_to_close = 1;
else
ltq_wdt_ok_to_close = 0;
}
}
ltq_wdt_enable();
}
return len;
}
static struct watchdog_info ident = {
.options = WDIOF_MAGICCLOSE | WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING |
WDIOF_CARDRESET,
.identity = "ltq_wdt",
};
static long
ltq_wdt_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
int ret = -ENOTTY;
switch (cmd) {
case WDIOC_GETSUPPORT:
ret = copy_to_user((struct watchdog_info __user *)arg, &ident,
sizeof(ident)) ? -EFAULT : 0;
break;
case WDIOC_GETBOOTSTATUS:
ret = put_user(ltq_wdt_bootstatus, (int __user *)arg);
break;
case WDIOC_GETSTATUS:
ret = put_user(0, (int __user *)arg);
break;
case WDIOC_SETTIMEOUT:
ret = get_user(ltq_wdt_timeout, (int __user *)arg);
if (!ret)
ltq_wdt_enable();
/* intentional drop through */
case WDIOC_GETTIMEOUT:
ret = put_user(ltq_wdt_timeout, (int __user *)arg);
break;
case WDIOC_KEEPALIVE:
ltq_wdt_enable();
ret = 0;
break;
}
return ret;
}
static int
ltq_wdt_open(struct inode *inode, struct file *file)
{
if (test_and_set_bit(0, <q_wdt_in_use))
return -EBUSY;
ltq_wdt_in_use = 1;
ltq_wdt_enable();
return nonseekable_open(inode, file);
}
static int
ltq_wdt_release(struct inode *inode, struct file *file)
{
if (ltq_wdt_ok_to_close)
ltq_wdt_disable();
else
pr_err("ltq_wdt: watchdog closed without warning\n");
ltq_wdt_ok_to_close = 0;
clear_bit(0, <q_wdt_in_use);
return 0;
}
static const struct file_operations ltq_wdt_fops = {
.owner = THIS_MODULE,
.write = ltq_wdt_write,
.unlocked_ioctl = ltq_wdt_ioctl,
.open = ltq_wdt_open,
.release = ltq_wdt_release,
.llseek = no_llseek,
};
static struct miscdevice ltq_wdt_miscdev = {
.minor = WATCHDOG_MINOR,
.name = "watchdog",
.fops = <q_wdt_fops,
};
static int __init
ltq_wdt_probe(struct platform_device *pdev)
{
struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
struct clk *clk;
if (!res) {
dev_err(&pdev->dev, "cannot obtain I/O memory region");
return -ENOENT;
}
res = devm_request_mem_region(&pdev->dev, res->start,
resource_size(res), dev_name(&pdev->dev));
if (!res) {
dev_err(&pdev->dev, "cannot request I/O memory region");
return -EBUSY;
}
ltq_wdt_membase = devm_ioremap_nocache(&pdev->dev, res->start,
resource_size(res));
if (!ltq_wdt_membase) {
dev_err(&pdev->dev, "cannot remap I/O memory region\n");
return -ENOMEM;
}
/* we do not need to enable the clock as it is always running */
clk = clk_get(&pdev->dev, "io");
WARN_ON(!clk);
ltq_io_region_clk_rate = clk_get_rate(clk);
clk_put(clk);
if (ltq_reset_cause() == LTQ_RST_CAUSE_WDTRST)
ltq_wdt_bootstatus = WDIOF_CARDRESET;
return misc_register(<q_wdt_miscdev);
}
static int __devexit
ltq_wdt_remove(struct platform_device *pdev)
{
misc_deregister(<q_wdt_miscdev);
if (ltq_wdt_membase)
iounmap(ltq_wdt_membase);
return 0;
}
static struct platform_driver ltq_wdt_driver = {
.remove = __devexit_p(ltq_wdt_remove),
.driver = {
.name = "ltq_wdt",
.owner = THIS_MODULE,
},
};
static int __init
init_ltq_wdt(void)
{
return platform_driver_probe(<q_wdt_driver, ltq_wdt_probe);
}
static void __exit
exit_ltq_wdt(void)
{
return platform_driver_unregister(<q_wdt_driver);
}
module_init(init_ltq_wdt);
module_exit(exit_ltq_wdt);
module_param(nowayout, int, 0);
MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started");
MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
MODULE_DESCRIPTION("Lantiq SoC Watchdog");
MODULE_LICENSE("GPL");
MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
| gpl-2.0 |
crdroid-devices/android_kernel_lge_msm8992 | drivers/staging/comedi/drivers/pcl711.c | 2090 | 14383 | /*
comedi/drivers/pcl711.c
hardware driver for PC-LabCard PCL-711 and AdSys ACL-8112
and compatibles
COMEDI - Linux Control and Measurement Device Interface
Copyright (C) 1998 David A. Schleef <ds@schleef.org>
Janne Jalkanen <jalkanen@cs.hut.fi>
Eric Bunn <ebu@cs.hut.fi>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
Driver: pcl711
Description: Advantech PCL-711 and 711b, ADLink ACL-8112
Author: ds, Janne Jalkanen <jalkanen@cs.hut.fi>, Eric Bunn <ebu@cs.hut.fi>
Status: mostly complete
Devices: [Advantech] PCL-711 (pcl711), PCL-711B (pcl711b),
[AdLink] ACL-8112HG (acl8112hg), ACL-8112DG (acl8112dg)
Since these boards do not have DMA or FIFOs, only immediate mode is
supported.
*/
/*
Dave Andruczyk <dave@tech.buffalostate.edu> also wrote a
driver for the PCL-711. I used a few ideas from his driver
here. His driver also has more comments, if you are
interested in understanding how this driver works.
http://tech.buffalostate.edu/~dave/driver/
The ACL-8112 driver was hacked from the sources of the PCL-711
driver (the 744 chip used on the 8112 is almost the same as
the 711b chip, but it has more I/O channels) by
Janne Jalkanen (jalkanen@cs.hut.fi) and
Erik Bunn (ebu@cs.hut.fi). Remerged with the PCL-711 driver
by ds.
[acl-8112]
This driver supports both TRIGNOW and TRIGCLK,
but does not yet support DMA transfers. It also supports
both high (HG) and low (DG) versions of the card, though
the HG version has been untested.
*/
#include <linux/interrupt.h>
#include "../comedidev.h"
#include <linux/ioport.h>
#include <linux/delay.h>
#include "comedi_fc.h"
#include "8253.h"
#define PCL711_SIZE 16
#define PCL711_CTR0 0
#define PCL711_CTR1 1
#define PCL711_CTR2 2
#define PCL711_CTRCTL 3
#define PCL711_AD_LO 4
#define PCL711_DA0_LO 4
#define PCL711_AD_HI 5
#define PCL711_DA0_HI 5
#define PCL711_DI_LO 6
#define PCL711_DA1_LO 6
#define PCL711_DI_HI 7
#define PCL711_DA1_HI 7
#define PCL711_CLRINTR 8
#define PCL711_GAIN 9
#define PCL711_MUX 10
#define PCL711_MODE 11
#define PCL711_SOFTTRIG 12
#define PCL711_DO_LO 13
#define PCL711_DO_HI 14
static const struct comedi_lrange range_pcl711b_ai = { 5, {
BIP_RANGE(5),
BIP_RANGE(2.5),
BIP_RANGE(1.25),
BIP_RANGE(0.625),
BIP_RANGE(0.3125)
}
};
static const struct comedi_lrange range_acl8112hg_ai = { 12, {
BIP_RANGE(5),
BIP_RANGE(0.5),
BIP_RANGE(0.05),
BIP_RANGE(0.005),
UNI_RANGE(10),
UNI_RANGE(1),
UNI_RANGE(0.1),
UNI_RANGE(0.01),
BIP_RANGE(10),
BIP_RANGE(1),
BIP_RANGE(0.1),
BIP_RANGE(0.01)
}
};
static const struct comedi_lrange range_acl8112dg_ai = { 9, {
BIP_RANGE(5),
BIP_RANGE(2.5),
BIP_RANGE(1.25),
BIP_RANGE(0.625),
UNI_RANGE(10),
UNI_RANGE(5),
UNI_RANGE(2.5),
UNI_RANGE(1.25),
BIP_RANGE(10)
}
};
/*
* flags
*/
#define PCL711_TIMEOUT 100
#define PCL711_DRDY 0x10
static const int i8253_osc_base = 500; /* 2 Mhz */
struct pcl711_board {
const char *name;
int is_pcl711b;
int is_8112;
int is_dg;
int n_ranges;
int n_aichan;
int n_aochan;
int maxirq;
const struct comedi_lrange *ai_range_type;
};
struct pcl711_private {
int board;
int adchan;
int ntrig;
int aip[8];
int mode;
unsigned int ao_readback[2];
unsigned int divisor1;
unsigned int divisor2;
};
static irqreturn_t pcl711_interrupt(int irq, void *d)
{
int lo, hi;
int data;
struct comedi_device *dev = d;
const struct pcl711_board *board = comedi_board(dev);
struct pcl711_private *devpriv = dev->private;
struct comedi_subdevice *s = &dev->subdevices[0];
if (!dev->attached) {
comedi_error(dev, "spurious interrupt");
return IRQ_HANDLED;
}
hi = inb(dev->iobase + PCL711_AD_HI);
lo = inb(dev->iobase + PCL711_AD_LO);
outb(0, dev->iobase + PCL711_CLRINTR);
data = (hi << 8) | lo;
/* FIXME! Nothing else sets ntrig! */
if (!(--devpriv->ntrig)) {
if (board->is_8112)
outb(1, dev->iobase + PCL711_MODE);
else
outb(0, dev->iobase + PCL711_MODE);
s->async->events |= COMEDI_CB_EOA;
}
comedi_event(dev, s);
return IRQ_HANDLED;
}
static void pcl711_set_changain(struct comedi_device *dev, int chan)
{
const struct pcl711_board *board = comedi_board(dev);
int chan_register;
outb(CR_RANGE(chan), dev->iobase + PCL711_GAIN);
chan_register = CR_CHAN(chan);
if (board->is_8112) {
/*
* Set the correct channel. The two channel banks are switched
* using the mask value.
* NB: To use differential channels, you should use
* mask = 0x30, but I haven't written the support for this
* yet. /JJ
*/
if (chan_register >= 8)
chan_register = 0x20 | (chan_register & 0x7);
else
chan_register |= 0x10;
} else {
outb(chan_register, dev->iobase + PCL711_MUX);
}
}
static int pcl711_ai_insn(struct comedi_device *dev, struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
const struct pcl711_board *board = comedi_board(dev);
int i, n;
int hi, lo;
pcl711_set_changain(dev, insn->chanspec);
for (n = 0; n < insn->n; n++) {
/*
* Write the correct mode (software polling) and start polling
* by writing to the trigger register
*/
outb(1, dev->iobase + PCL711_MODE);
if (!board->is_8112)
outb(0, dev->iobase + PCL711_SOFTTRIG);
i = PCL711_TIMEOUT;
while (--i) {
hi = inb(dev->iobase + PCL711_AD_HI);
if (!(hi & PCL711_DRDY))
goto ok;
udelay(1);
}
printk(KERN_ERR "comedi%d: pcl711: A/D timeout\n", dev->minor);
return -ETIME;
ok:
lo = inb(dev->iobase + PCL711_AD_LO);
data[n] = ((hi & 0xf) << 8) | lo;
}
return n;
}
static int pcl711_ai_cmdtest(struct comedi_device *dev,
struct comedi_subdevice *s, struct comedi_cmd *cmd)
{
struct pcl711_private *devpriv = dev->private;
int tmp;
int err = 0;
/* Step 1 : check if triggers are trivially valid */
err |= cfc_check_trigger_src(&cmd->start_src, TRIG_NOW);
err |= cfc_check_trigger_src(&cmd->scan_begin_src,
TRIG_TIMER | TRIG_EXT);
err |= cfc_check_trigger_src(&cmd->convert_src, TRIG_NOW);
err |= cfc_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
err |= cfc_check_trigger_src(&cmd->stop_src, TRIG_COUNT | TRIG_NONE);
if (err)
return 1;
/* Step 2a : make sure trigger sources are unique */
err |= cfc_check_trigger_is_unique(cmd->scan_begin_src);
err |= cfc_check_trigger_is_unique(cmd->stop_src);
/* Step 2b : and mutually compatible */
if (err)
return 2;
/* Step 3: check if arguments are trivially valid */
err |= cfc_check_trigger_arg_is(&cmd->start_arg, 0);
if (cmd->scan_begin_src == TRIG_EXT) {
err |= cfc_check_trigger_arg_is(&cmd->scan_begin_arg, 0);
} else {
#define MAX_SPEED 1000
#define TIMER_BASE 100
err |= cfc_check_trigger_arg_min(&cmd->scan_begin_arg,
MAX_SPEED);
}
err |= cfc_check_trigger_arg_is(&cmd->convert_arg, 0);
err |= cfc_check_trigger_arg_is(&cmd->scan_end_arg, cmd->chanlist_len);
if (cmd->stop_src == TRIG_NONE) {
err |= cfc_check_trigger_arg_is(&cmd->stop_arg, 0);
} else {
/* ignore */
}
if (err)
return 3;
/* step 4 */
if (cmd->scan_begin_src == TRIG_TIMER) {
tmp = cmd->scan_begin_arg;
i8253_cascade_ns_to_timer_2div(TIMER_BASE,
&devpriv->divisor1,
&devpriv->divisor2,
&cmd->scan_begin_arg,
cmd->flags & TRIG_ROUND_MASK);
if (tmp != cmd->scan_begin_arg)
err++;
}
if (err)
return 4;
return 0;
}
static int pcl711_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
{
struct pcl711_private *devpriv = dev->private;
int timer1, timer2;
struct comedi_cmd *cmd = &s->async->cmd;
pcl711_set_changain(dev, cmd->chanlist[0]);
if (cmd->scan_begin_src == TRIG_TIMER) {
/*
* Set timers
* timer chip is an 8253, with timers 1 and 2
* cascaded
* 0x74 = Select Counter 1 | LSB/MSB | Mode=2 | Binary
* Mode 2 = Rate generator
*
* 0xb4 = Select Counter 2 | LSB/MSB | Mode=2 | Binary
*/
timer1 = timer2 = 0;
i8253_cascade_ns_to_timer(i8253_osc_base, &timer1, &timer2,
&cmd->scan_begin_arg,
TRIG_ROUND_NEAREST);
outb(0x74, dev->iobase + PCL711_CTRCTL);
outb(timer1 & 0xff, dev->iobase + PCL711_CTR1);
outb((timer1 >> 8) & 0xff, dev->iobase + PCL711_CTR1);
outb(0xb4, dev->iobase + PCL711_CTRCTL);
outb(timer2 & 0xff, dev->iobase + PCL711_CTR2);
outb((timer2 >> 8) & 0xff, dev->iobase + PCL711_CTR2);
/* clear pending interrupts (just in case) */
outb(0, dev->iobase + PCL711_CLRINTR);
/*
* Set mode to IRQ transfer
*/
outb(devpriv->mode | 6, dev->iobase + PCL711_MODE);
} else {
/* external trigger */
outb(devpriv->mode | 3, dev->iobase + PCL711_MODE);
}
return 0;
}
/*
analog output
*/
static int pcl711_ao_insn(struct comedi_device *dev, struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
struct pcl711_private *devpriv = dev->private;
int n;
int chan = CR_CHAN(insn->chanspec);
for (n = 0; n < insn->n; n++) {
outb((data[n] & 0xff),
dev->iobase + (chan ? PCL711_DA1_LO : PCL711_DA0_LO));
outb((data[n] >> 8),
dev->iobase + (chan ? PCL711_DA1_HI : PCL711_DA0_HI));
devpriv->ao_readback[chan] = data[n];
}
return n;
}
static int pcl711_ao_insn_read(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
struct pcl711_private *devpriv = dev->private;
int n;
int chan = CR_CHAN(insn->chanspec);
for (n = 0; n < insn->n; n++)
data[n] = devpriv->ao_readback[chan];
return n;
}
/* Digital port read - Untested on 8112 */
static int pcl711_di_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
data[1] = inb(dev->iobase + PCL711_DI_LO) |
(inb(dev->iobase + PCL711_DI_HI) << 8);
return insn->n;
}
/* Digital port write - Untested on 8112 */
static int pcl711_do_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
if (data[0]) {
s->state &= ~data[0];
s->state |= data[0] & data[1];
}
if (data[0] & 0x00ff)
outb(s->state & 0xff, dev->iobase + PCL711_DO_LO);
if (data[0] & 0xff00)
outb((s->state >> 8), dev->iobase + PCL711_DO_HI);
data[1] = s->state;
return insn->n;
}
static int pcl711_attach(struct comedi_device *dev, struct comedi_devconfig *it)
{
const struct pcl711_board *board = comedi_board(dev);
struct pcl711_private *devpriv;
int ret;
unsigned int irq;
struct comedi_subdevice *s;
ret = comedi_request_region(dev, it->options[0], PCL711_SIZE);
if (ret)
return ret;
/* grab our IRQ */
irq = it->options[1];
if (irq > board->maxirq) {
printk(KERN_ERR "irq out of range\n");
return -EINVAL;
}
if (irq) {
if (request_irq(irq, pcl711_interrupt, 0, dev->board_name,
dev)) {
printk(KERN_ERR "unable to allocate irq %u\n", irq);
return -EINVAL;
} else {
printk(KERN_INFO "( irq = %u )\n", irq);
}
}
dev->irq = irq;
ret = comedi_alloc_subdevices(dev, 4);
if (ret)
return ret;
devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
if (!devpriv)
return -ENOMEM;
dev->private = devpriv;
s = &dev->subdevices[0];
/* AI subdevice */
s->type = COMEDI_SUBD_AI;
s->subdev_flags = SDF_READABLE | SDF_GROUND;
s->n_chan = board->n_aichan;
s->maxdata = 0xfff;
s->len_chanlist = 1;
s->range_table = board->ai_range_type;
s->insn_read = pcl711_ai_insn;
if (irq) {
dev->read_subdev = s;
s->subdev_flags |= SDF_CMD_READ;
s->do_cmdtest = pcl711_ai_cmdtest;
s->do_cmd = pcl711_ai_cmd;
}
s = &dev->subdevices[1];
/* AO subdevice */
s->type = COMEDI_SUBD_AO;
s->subdev_flags = SDF_WRITABLE;
s->n_chan = board->n_aochan;
s->maxdata = 0xfff;
s->len_chanlist = 1;
s->range_table = &range_bipolar5;
s->insn_write = pcl711_ao_insn;
s->insn_read = pcl711_ao_insn_read;
s = &dev->subdevices[2];
/* 16-bit digital input */
s->type = COMEDI_SUBD_DI;
s->subdev_flags = SDF_READABLE;
s->n_chan = 16;
s->maxdata = 1;
s->len_chanlist = 16;
s->range_table = &range_digital;
s->insn_bits = pcl711_di_insn_bits;
s = &dev->subdevices[3];
/* 16-bit digital out */
s->type = COMEDI_SUBD_DO;
s->subdev_flags = SDF_WRITABLE;
s->n_chan = 16;
s->maxdata = 1;
s->len_chanlist = 16;
s->range_table = &range_digital;
s->state = 0;
s->insn_bits = pcl711_do_insn_bits;
/*
this is the "base value" for the mode register, which is
used for the irq on the PCL711
*/
if (board->is_pcl711b)
devpriv->mode = (dev->irq << 4);
/* clear DAC */
outb(0, dev->iobase + PCL711_DA0_LO);
outb(0, dev->iobase + PCL711_DA0_HI);
outb(0, dev->iobase + PCL711_DA1_LO);
outb(0, dev->iobase + PCL711_DA1_HI);
printk(KERN_INFO "\n");
return 0;
}
static const struct pcl711_board boardtypes[] = {
{ "pcl711", 0, 0, 0, 5, 8, 1, 0, &range_bipolar5 },
{ "pcl711b", 1, 0, 0, 5, 8, 1, 7, &range_pcl711b_ai },
{ "acl8112hg", 0, 1, 0, 12, 16, 2, 15, &range_acl8112hg_ai },
{ "acl8112dg", 0, 1, 1, 9, 16, 2, 15, &range_acl8112dg_ai },
};
static struct comedi_driver pcl711_driver = {
.driver_name = "pcl711",
.module = THIS_MODULE,
.attach = pcl711_attach,
.detach = comedi_legacy_detach,
.board_name = &boardtypes[0].name,
.num_names = ARRAY_SIZE(boardtypes),
.offset = sizeof(struct pcl711_board),
};
module_comedi_driver(pcl711_driver);
MODULE_AUTHOR("Comedi http://www.comedi.org");
MODULE_DESCRIPTION("Comedi low-level driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
StefanescuCristian/shamu | drivers/staging/comedi/drivers/c6xdigio.c | 2090 | 12811 | /*
comedi/drivers/c6xdigio.c
Hardware driver for Mechatronic Systems Inc. C6x_DIGIO DSP daughter card.
(http://robot0.ge.uiuc.edu/~spong/mecha/)
COMEDI - Linux Control and Measurement Device Interface
Copyright (C) 1999 Dan Block
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
Driver: c6xdigio
Description: Mechatronic Systems Inc. C6x_DIGIO DSP daughter card
Author: Dan Block
Status: unknown
Devices: [Mechatronic Systems Inc.] C6x_DIGIO DSP daughter card (c6xdigio)
Updated: Sun Nov 20 20:18:34 EST 2005
This driver will not work with a 2.4 kernel.
http://robot0.ge.uiuc.edu/~spong/mecha/
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/errno.h>
#include <linux/ioport.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/timex.h>
#include <linux/timer.h>
#include <linux/io.h>
#include <linux/pnp.h>
#include "../comedidev.h"
static u8 ReadByteFromHwPort(unsigned long addr)
{
u8 result = inb(addr);
return result;
}
static void WriteByteToHwPort(unsigned long addr, u8 val)
{
outb_p(val, addr);
}
#define C6XDIGIO_SIZE 3
/*
* port offsets
*/
#define C6XDIGIO_PARALLEL_DATA 0
#define C6XDIGIO_PARALLEL_STATUS 1
#define C6XDIGIO_PARALLEL_CONTROL 2
struct pwmbitstype {
unsigned sb0:2;
unsigned sb1:2;
unsigned sb2:2;
unsigned sb3:2;
unsigned sb4:2;
};
union pwmcmdtype {
unsigned cmd; /* assuming here that int is 32bit */
struct pwmbitstype bits;
};
struct encbitstype {
unsigned sb0:3;
unsigned sb1:3;
unsigned sb2:3;
unsigned sb3:3;
unsigned sb4:3;
unsigned sb5:3;
unsigned sb6:3;
unsigned sb7:3;
};
union encvaluetype {
unsigned value;
struct encbitstype bits;
};
#define C6XDIGIO_TIME_OUT 20
static void C6X_pwmInit(unsigned long baseAddr)
{
int timeout = 0;
/* printk("Inside C6X_pwmInit\n"); */
WriteByteToHwPort(baseAddr, 0x70);
while (((ReadByteFromHwPort(baseAddr + 1) & 0x80) == 0)
&& (timeout < C6XDIGIO_TIME_OUT)) {
timeout++;
}
WriteByteToHwPort(baseAddr, 0x74);
timeout = 0;
while (((ReadByteFromHwPort(baseAddr + 1) & 0x80) == 0x80)
&& (timeout < C6XDIGIO_TIME_OUT)) {
timeout++;
}
WriteByteToHwPort(baseAddr, 0x70);
timeout = 0;
while (((ReadByteFromHwPort(baseAddr + 1) & 0x80) == 0x0)
&& (timeout < C6XDIGIO_TIME_OUT)) {
timeout++;
}
WriteByteToHwPort(baseAddr, 0x0);
timeout = 0;
while (((ReadByteFromHwPort(baseAddr + 1) & 0x80) == 0x80)
&& (timeout < C6XDIGIO_TIME_OUT)) {
timeout++;
}
}
static void C6X_pwmOutput(unsigned long baseAddr, unsigned channel, int value)
{
unsigned ppcmd;
union pwmcmdtype pwm;
int timeout = 0;
unsigned tmp;
/* printk("Inside C6X_pwmOutput\n"); */
pwm.cmd = value;
if (pwm.cmd > 498)
pwm.cmd = 498;
if (pwm.cmd < 2)
pwm.cmd = 2;
if (channel == 0) {
ppcmd = 0x28;
} else { /* if channel == 1 */
ppcmd = 0x30;
} /* endif */
WriteByteToHwPort(baseAddr, ppcmd + pwm.bits.sb0);
tmp = ReadByteFromHwPort(baseAddr + 1);
while (((tmp & 0x80) == 0) && (timeout < C6XDIGIO_TIME_OUT)) {
tmp = ReadByteFromHwPort(baseAddr + 1);
timeout++;
}
WriteByteToHwPort(baseAddr, ppcmd + pwm.bits.sb1 + 0x4);
timeout = 0;
tmp = ReadByteFromHwPort(baseAddr + 1);
while (((tmp & 0x80) == 0x80) && (timeout < C6XDIGIO_TIME_OUT)) {
tmp = ReadByteFromHwPort(baseAddr + 1);
timeout++;
}
WriteByteToHwPort(baseAddr, ppcmd + pwm.bits.sb2);
tmp = ReadByteFromHwPort(baseAddr + 1);
while (((tmp & 0x80) == 0) && (timeout < C6XDIGIO_TIME_OUT)) {
tmp = ReadByteFromHwPort(baseAddr + 1);
timeout++;
}
WriteByteToHwPort(baseAddr, ppcmd + pwm.bits.sb3 + 0x4);
timeout = 0;
tmp = ReadByteFromHwPort(baseAddr + 1);
while (((tmp & 0x80) == 0x80) && (timeout < C6XDIGIO_TIME_OUT)) {
tmp = ReadByteFromHwPort(baseAddr + 1);
timeout++;
}
WriteByteToHwPort(baseAddr, ppcmd + pwm.bits.sb4);
tmp = ReadByteFromHwPort(baseAddr + 1);
while (((tmp & 0x80) == 0) && (timeout < C6XDIGIO_TIME_OUT)) {
tmp = ReadByteFromHwPort(baseAddr + 1);
timeout++;
}
WriteByteToHwPort(baseAddr, 0x0);
timeout = 0;
tmp = ReadByteFromHwPort(baseAddr + 1);
while (((tmp & 0x80) == 0x80) && (timeout < C6XDIGIO_TIME_OUT)) {
tmp = ReadByteFromHwPort(baseAddr + 1);
timeout++;
}
}
static int C6X_encInput(unsigned long baseAddr, unsigned channel)
{
unsigned ppcmd;
union encvaluetype enc;
int timeout = 0;
int tmp;
/* printk("Inside C6X_encInput\n"); */
enc.value = 0;
if (channel == 0)
ppcmd = 0x48;
else
ppcmd = 0x50;
WriteByteToHwPort(baseAddr, ppcmd);
tmp = ReadByteFromHwPort(baseAddr + 1);
while (((tmp & 0x80) == 0) && (timeout < C6XDIGIO_TIME_OUT)) {
tmp = ReadByteFromHwPort(baseAddr + 1);
timeout++;
}
enc.bits.sb0 = ((ReadByteFromHwPort(baseAddr + 1) >> 3) & 0x7);
WriteByteToHwPort(baseAddr, ppcmd + 0x4);
timeout = 0;
tmp = ReadByteFromHwPort(baseAddr + 1);
while (((tmp & 0x80) == 0x80) && (timeout < C6XDIGIO_TIME_OUT)) {
tmp = ReadByteFromHwPort(baseAddr + 1);
timeout++;
}
enc.bits.sb1 = ((ReadByteFromHwPort(baseAddr + 1) >> 3) & 0x7);
WriteByteToHwPort(baseAddr, ppcmd);
timeout = 0;
tmp = ReadByteFromHwPort(baseAddr + 1);
while (((tmp & 0x80) == 0) && (timeout < C6XDIGIO_TIME_OUT)) {
tmp = ReadByteFromHwPort(baseAddr + 1);
timeout++;
}
enc.bits.sb2 = ((ReadByteFromHwPort(baseAddr + 1) >> 3) & 0x7);
WriteByteToHwPort(baseAddr, ppcmd + 0x4);
timeout = 0;
tmp = ReadByteFromHwPort(baseAddr + 1);
while (((tmp & 0x80) == 0x80) && (timeout < C6XDIGIO_TIME_OUT)) {
tmp = ReadByteFromHwPort(baseAddr + 1);
timeout++;
}
enc.bits.sb3 = ((ReadByteFromHwPort(baseAddr + 1) >> 3) & 0x7);
WriteByteToHwPort(baseAddr, ppcmd);
timeout = 0;
tmp = ReadByteFromHwPort(baseAddr + 1);
while (((tmp & 0x80) == 0) && (timeout < C6XDIGIO_TIME_OUT)) {
tmp = ReadByteFromHwPort(baseAddr + 1);
timeout++;
}
enc.bits.sb4 = ((ReadByteFromHwPort(baseAddr + 1) >> 3) & 0x7);
WriteByteToHwPort(baseAddr, ppcmd + 0x4);
timeout = 0;
tmp = ReadByteFromHwPort(baseAddr + 1);
while (((tmp & 0x80) == 0x80) && (timeout < C6XDIGIO_TIME_OUT)) {
tmp = ReadByteFromHwPort(baseAddr + 1);
timeout++;
}
enc.bits.sb5 = ((ReadByteFromHwPort(baseAddr + 1) >> 3) & 0x7);
WriteByteToHwPort(baseAddr, ppcmd);
timeout = 0;
tmp = ReadByteFromHwPort(baseAddr + 1);
while (((tmp & 0x80) == 0x0) && (timeout < C6XDIGIO_TIME_OUT)) {
tmp = ReadByteFromHwPort(baseAddr + 1);
timeout++;
}
enc.bits.sb6 = ((ReadByteFromHwPort(baseAddr + 1) >> 3) & 0x7);
WriteByteToHwPort(baseAddr, ppcmd + 0x4);
timeout = 0;
tmp = ReadByteFromHwPort(baseAddr + 1);
while (((tmp & 0x80) == 0x80) && (timeout < C6XDIGIO_TIME_OUT)) {
tmp = ReadByteFromHwPort(baseAddr + 1);
timeout++;
}
enc.bits.sb7 = ((ReadByteFromHwPort(baseAddr + 1) >> 3) & 0x7);
WriteByteToHwPort(baseAddr, ppcmd);
timeout = 0;
tmp = ReadByteFromHwPort(baseAddr + 1);
while (((tmp & 0x80) == 0x0) && (timeout < C6XDIGIO_TIME_OUT)) {
tmp = ReadByteFromHwPort(baseAddr + 1);
timeout++;
}
WriteByteToHwPort(baseAddr, 0x0);
timeout = 0;
tmp = ReadByteFromHwPort(baseAddr + 1);
while (((tmp & 0x80) == 0x80) && (timeout < C6XDIGIO_TIME_OUT)) {
tmp = ReadByteFromHwPort(baseAddr + 1);
timeout++;
}
return enc.value ^ 0x800000;
}
static void C6X_encResetAll(unsigned long baseAddr)
{
unsigned timeout = 0;
/* printk("Inside C6X_encResetAll\n"); */
WriteByteToHwPort(baseAddr, 0x68);
while (((ReadByteFromHwPort(baseAddr + 1) & 0x80) == 0)
&& (timeout < C6XDIGIO_TIME_OUT)) {
timeout++;
}
WriteByteToHwPort(baseAddr, 0x6C);
timeout = 0;
while (((ReadByteFromHwPort(baseAddr + 1) & 0x80) == 0x80)
&& (timeout < C6XDIGIO_TIME_OUT)) {
timeout++;
}
WriteByteToHwPort(baseAddr, 0x68);
timeout = 0;
while (((ReadByteFromHwPort(baseAddr + 1) & 0x80) == 0x0)
&& (timeout < C6XDIGIO_TIME_OUT)) {
timeout++;
}
WriteByteToHwPort(baseAddr, 0x0);
timeout = 0;
while (((ReadByteFromHwPort(baseAddr + 1) & 0x80) == 0x80)
&& (timeout < C6XDIGIO_TIME_OUT)) {
timeout++;
}
}
static int c6xdigio_pwmo_insn_read(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
printk(KERN_DEBUG "c6xdigio_pwmo_insn_read %x\n", insn->n);
return insn->n;
}
static int c6xdigio_pwmo_insn_write(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
int i;
int chan = CR_CHAN(insn->chanspec);
/* printk("c6xdigio_pwmo_insn_write %x\n", insn->n); */
for (i = 0; i < insn->n; i++) {
C6X_pwmOutput(dev->iobase, chan, data[i]);
/* devpriv->ao_readback[chan] = data[i]; */
}
return i;
}
/* static int c6xdigio_ei_init_insn_read(struct comedi_device *dev, */
/* struct comedi_subdevice *s, */
/* struct comedi_insn *insn, */
/* unsigned int *data) */
/* { */
/* printk("c6xdigio_ei_init_insn_read %x\n", insn->n); */
/* return insn->n; */
/* } */
/* static int c6xdigio_ei_init_insn_write(struct comedi_device *dev, */
/* struct comedi_subdevice *s, */
/* struct comedi_insn *insn, */
/* unsigned int *data) */
/* { */
/* int i; */
/* int chan = CR_CHAN(insn->chanspec); */
/* *//* C6X_encResetAll( dev->iobase ); */
/* *//* return insn->n; */
/* } */
static int c6xdigio_ei_insn_read(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
/* printk("c6xdigio_ei__insn_read %x\n", insn->n); */
int n;
int chan = CR_CHAN(insn->chanspec);
for (n = 0; n < insn->n; n++)
data[n] = (C6X_encInput(dev->iobase, chan) & 0xffffff);
return n;
}
static void board_init(struct comedi_device *dev)
{
/* printk("Inside board_init\n"); */
C6X_pwmInit(dev->iobase);
C6X_encResetAll(dev->iobase);
}
static const struct pnp_device_id c6xdigio_pnp_tbl[] = {
/* Standard LPT Printer Port */
{.id = "PNP0400", .driver_data = 0},
/* ECP Printer Port */
{.id = "PNP0401", .driver_data = 0},
{}
};
static struct pnp_driver c6xdigio_pnp_driver = {
.name = "c6xdigio",
.id_table = c6xdigio_pnp_tbl,
};
static int c6xdigio_attach(struct comedi_device *dev,
struct comedi_devconfig *it)
{
struct comedi_subdevice *s;
int ret;
ret = comedi_request_region(dev, it->options[0], C6XDIGIO_SIZE);
if (ret)
return ret;
ret = comedi_alloc_subdevices(dev, 2);
if (ret)
return ret;
/* Make sure that PnP ports get activated */
pnp_register_driver(&c6xdigio_pnp_driver);
s = &dev->subdevices[0];
/* pwm output subdevice */
s->type = COMEDI_SUBD_AO; /* Not sure what to put here */
s->subdev_flags = SDF_WRITEABLE;
s->n_chan = 2;
/* s->trig[0] = c6xdigio_pwmo; */
s->insn_read = c6xdigio_pwmo_insn_read;
s->insn_write = c6xdigio_pwmo_insn_write;
s->maxdata = 500;
s->range_table = &range_bipolar10; /* A suitable lie */
s = &dev->subdevices[1];
/* encoder (counter) subdevice */
s->type = COMEDI_SUBD_COUNTER;
s->subdev_flags = SDF_READABLE | SDF_LSAMPL;
s->n_chan = 2;
/* s->trig[0] = c6xdigio_ei; */
s->insn_read = c6xdigio_ei_insn_read;
s->maxdata = 0xffffff;
s->range_table = &range_unknown;
/* s = &dev->subdevices[2]; */
/* pwm output subdevice */
/* s->type = COMEDI_SUBD_COUNTER; // Not sure what to put here */
/* s->subdev_flags = SDF_WRITEABLE; */
/* s->n_chan = 1; */
/* s->trig[0] = c6xdigio_ei_init; */
/* s->insn_read = c6xdigio_ei_init_insn_read; */
/* s->insn_write = c6xdigio_ei_init_insn_write; */
/* s->maxdata = 0xFFFF; // Really just a don't care */
/* s->range_table = &range_unknown; // Not sure what to put here */
/* I will call this init anyway but more than likely the DSP board */
/* will not be connected when device driver is loaded. */
board_init(dev);
return 0;
}
static void c6xdigio_detach(struct comedi_device *dev)
{
comedi_legacy_detach(dev);
pnp_unregister_driver(&c6xdigio_pnp_driver);
}
static struct comedi_driver c6xdigio_driver = {
.driver_name = "c6xdigio",
.module = THIS_MODULE,
.attach = c6xdigio_attach,
.detach = c6xdigio_detach,
};
module_comedi_driver(c6xdigio_driver);
MODULE_AUTHOR("Comedi http://www.comedi.org");
MODULE_DESCRIPTION("Comedi low-level driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
GuneetAtwal/kernel_a210 | drivers/leds/leds-gpio.c | 4650 | 7604 | /*
* LEDs driver for GPIOs
*
* Copyright (C) 2007 8D Technologies inc.
* Raphael Assenat <raph@8d.com>
* Copyright (C) 2008 Freescale Semiconductor, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/gpio.h>
#include <linux/leds.h>
#include <linux/of_platform.h>
#include <linux/of_gpio.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
#include <linux/module.h>
struct gpio_led_data {
struct led_classdev cdev;
unsigned gpio;
struct work_struct work;
u8 new_level;
u8 can_sleep;
u8 active_low;
u8 blinking;
int (*platform_gpio_blink_set)(unsigned gpio, int state,
unsigned long *delay_on, unsigned long *delay_off);
};
static void gpio_led_work(struct work_struct *work)
{
struct gpio_led_data *led_dat =
container_of(work, struct gpio_led_data, work);
if (led_dat->blinking) {
led_dat->platform_gpio_blink_set(led_dat->gpio,
led_dat->new_level,
NULL, NULL);
led_dat->blinking = 0;
} else
gpio_set_value_cansleep(led_dat->gpio, led_dat->new_level);
}
static void gpio_led_set(struct led_classdev *led_cdev,
enum led_brightness value)
{
struct gpio_led_data *led_dat =
container_of(led_cdev, struct gpio_led_data, cdev);
int level;
if (value == LED_OFF)
level = 0;
else
level = 1;
if (led_dat->active_low)
level = !level;
/* Setting GPIOs with I2C/etc requires a task context, and we don't
* seem to have a reliable way to know if we're already in one; so
* let's just assume the worst.
*/
if (led_dat->can_sleep) {
led_dat->new_level = level;
schedule_work(&led_dat->work);
} else {
if (led_dat->blinking) {
led_dat->platform_gpio_blink_set(led_dat->gpio, level,
NULL, NULL);
led_dat->blinking = 0;
} else
gpio_set_value(led_dat->gpio, level);
}
}
static int gpio_blink_set(struct led_classdev *led_cdev,
unsigned long *delay_on, unsigned long *delay_off)
{
struct gpio_led_data *led_dat =
container_of(led_cdev, struct gpio_led_data, cdev);
led_dat->blinking = 1;
return led_dat->platform_gpio_blink_set(led_dat->gpio, GPIO_LED_BLINK,
delay_on, delay_off);
}
static int __devinit create_gpio_led(const struct gpio_led *template,
struct gpio_led_data *led_dat, struct device *parent,
int (*blink_set)(unsigned, int, unsigned long *, unsigned long *))
{
int ret, state;
led_dat->gpio = -1;
/* skip leds that aren't available */
if (!gpio_is_valid(template->gpio)) {
printk(KERN_INFO "Skipping unavailable LED gpio %d (%s)\n",
template->gpio, template->name);
return 0;
}
ret = gpio_request(template->gpio, template->name);
if (ret < 0)
return ret;
led_dat->cdev.name = template->name;
led_dat->cdev.default_trigger = template->default_trigger;
led_dat->gpio = template->gpio;
led_dat->can_sleep = gpio_cansleep(template->gpio);
led_dat->active_low = template->active_low;
led_dat->blinking = 0;
if (blink_set) {
led_dat->platform_gpio_blink_set = blink_set;
led_dat->cdev.blink_set = gpio_blink_set;
}
led_dat->cdev.brightness_set = gpio_led_set;
if (template->default_state == LEDS_GPIO_DEFSTATE_KEEP)
state = !!gpio_get_value_cansleep(led_dat->gpio) ^ led_dat->active_low;
else
state = (template->default_state == LEDS_GPIO_DEFSTATE_ON);
led_dat->cdev.brightness = state ? LED_FULL : LED_OFF;
if (!template->retain_state_suspended)
led_dat->cdev.flags |= LED_CORE_SUSPENDRESUME;
ret = gpio_direction_output(led_dat->gpio, led_dat->active_low ^ state);
if (ret < 0)
goto err;
INIT_WORK(&led_dat->work, gpio_led_work);
ret = led_classdev_register(parent, &led_dat->cdev);
if (ret < 0)
goto err;
return 0;
err:
gpio_free(led_dat->gpio);
return ret;
}
static void delete_gpio_led(struct gpio_led_data *led)
{
if (!gpio_is_valid(led->gpio))
return;
led_classdev_unregister(&led->cdev);
cancel_work_sync(&led->work);
gpio_free(led->gpio);
}
struct gpio_leds_priv {
int num_leds;
struct gpio_led_data leds[];
};
static inline int sizeof_gpio_leds_priv(int num_leds)
{
return sizeof(struct gpio_leds_priv) +
(sizeof(struct gpio_led_data) * num_leds);
}
/* Code to create from OpenFirmware platform devices */
#ifdef CONFIG_OF_GPIO
static struct gpio_leds_priv * __devinit gpio_leds_create_of(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node, *child;
struct gpio_leds_priv *priv;
int count = 0, ret;
/* count LEDs in this device, so we know how much to allocate */
for_each_child_of_node(np, child)
count++;
if (!count)
return NULL;
priv = kzalloc(sizeof_gpio_leds_priv(count), GFP_KERNEL);
if (!priv)
return NULL;
for_each_child_of_node(np, child) {
struct gpio_led led = {};
enum of_gpio_flags flags;
const char *state;
led.gpio = of_get_gpio_flags(child, 0, &flags);
led.active_low = flags & OF_GPIO_ACTIVE_LOW;
led.name = of_get_property(child, "label", NULL) ? : child->name;
led.default_trigger =
of_get_property(child, "linux,default-trigger", NULL);
state = of_get_property(child, "default-state", NULL);
if (state) {
if (!strcmp(state, "keep"))
led.default_state = LEDS_GPIO_DEFSTATE_KEEP;
else if (!strcmp(state, "on"))
led.default_state = LEDS_GPIO_DEFSTATE_ON;
else
led.default_state = LEDS_GPIO_DEFSTATE_OFF;
}
ret = create_gpio_led(&led, &priv->leds[priv->num_leds++],
&pdev->dev, NULL);
if (ret < 0) {
of_node_put(child);
goto err;
}
}
return priv;
err:
for (count = priv->num_leds - 2; count >= 0; count--)
delete_gpio_led(&priv->leds[count]);
kfree(priv);
return NULL;
}
static const struct of_device_id of_gpio_leds_match[] = {
{ .compatible = "gpio-leds", },
{},
};
#else /* CONFIG_OF_GPIO */
static struct gpio_leds_priv * __devinit gpio_leds_create_of(struct platform_device *pdev)
{
return NULL;
}
#define of_gpio_leds_match NULL
#endif /* CONFIG_OF_GPIO */
static int __devinit gpio_led_probe(struct platform_device *pdev)
{
struct gpio_led_platform_data *pdata = pdev->dev.platform_data;
struct gpio_leds_priv *priv;
int i, ret = 0;
if (pdata && pdata->num_leds) {
priv = kzalloc(sizeof_gpio_leds_priv(pdata->num_leds),
GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->num_leds = pdata->num_leds;
for (i = 0; i < priv->num_leds; i++) {
ret = create_gpio_led(&pdata->leds[i],
&priv->leds[i],
&pdev->dev, pdata->gpio_blink_set);
if (ret < 0) {
/* On failure: unwind the led creations */
for (i = i - 1; i >= 0; i--)
delete_gpio_led(&priv->leds[i]);
kfree(priv);
return ret;
}
}
} else {
priv = gpio_leds_create_of(pdev);
if (!priv)
return -ENODEV;
}
platform_set_drvdata(pdev, priv);
return 0;
}
static int __devexit gpio_led_remove(struct platform_device *pdev)
{
struct gpio_leds_priv *priv = dev_get_drvdata(&pdev->dev);
int i;
for (i = 0; i < priv->num_leds; i++)
delete_gpio_led(&priv->leds[i]);
dev_set_drvdata(&pdev->dev, NULL);
kfree(priv);
return 0;
}
static struct platform_driver gpio_led_driver = {
.probe = gpio_led_probe,
.remove = __devexit_p(gpio_led_remove),
.driver = {
.name = "leds-gpio",
.owner = THIS_MODULE,
.of_match_table = of_gpio_leds_match,
},
};
module_platform_driver(gpio_led_driver);
MODULE_AUTHOR("Raphael Assenat <raph@8d.com>, Trent Piepho <tpiepho@freescale.com>");
MODULE_DESCRIPTION("GPIO LED driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:leds-gpio");
| gpl-2.0 |
Klozz/kernel_asus_grouper_kitkat | drivers/atm/ambassador.c | 4650 | 67858 | /*
Madge Ambassador ATM Adapter driver.
Copyright (C) 1995-1999 Madge Networks Ltd.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
The GNU GPL is contained in /usr/doc/copyright/GPL on a Debian
system and in the file COPYING in the Linux kernel source.
*/
/* * dedicated to the memory of Graham Gordon 1971-1998 * */
#include <linux/module.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/atmdev.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/poison.h>
#include <linux/bitrev.h>
#include <linux/mutex.h>
#include <linux/firmware.h>
#include <linux/ihex.h>
#include <linux/slab.h>
#include <linux/atomic.h>
#include <asm/io.h>
#include <asm/byteorder.h>
#include "ambassador.h"
#define maintainer_string "Giuliano Procida at Madge Networks <gprocida@madge.com>"
#define description_string "Madge ATM Ambassador driver"
#define version_string "1.2.4"
static inline void __init show_version (void) {
printk ("%s version %s\n", description_string, version_string);
}
/*
Theory of Operation
I Hardware, detection, initialisation and shutdown.
1. Supported Hardware
This driver is for the PCI ATMizer-based Ambassador card (except
very early versions). It is not suitable for the similar EISA "TR7"
card. Commercially, both cards are known as Collage Server ATM
adapters.
The loader supports image transfer to the card, image start and few
other miscellaneous commands.
Only AAL5 is supported with vpi = 0 and vci in the range 0 to 1023.
The cards are big-endian.
2. Detection
Standard PCI stuff, the early cards are detected and rejected.
3. Initialisation
The cards are reset and the self-test results are checked. The
microcode image is then transferred and started. This waits for a
pointer to a descriptor containing details of the host-based queues
and buffers and various parameters etc. Once they are processed
normal operations may begin. The BIA is read using a microcode
command.
4. Shutdown
This may be accomplished either by a card reset or via the microcode
shutdown command. Further investigation required.
5. Persistent state
The card reset does not affect PCI configuration (good) or the
contents of several other "shared run-time registers" (bad) which
include doorbell and interrupt control as well as EEPROM and PCI
control. The driver must be careful when modifying these registers
not to touch bits it does not use and to undo any changes at exit.
II Driver software
0. Generalities
The adapter is quite intelligent (fast) and has a simple interface
(few features). VPI is always zero, 1024 VCIs are supported. There
is limited cell rate support. UBR channels can be capped and ABR
(explicit rate, but not EFCI) is supported. There is no CBR or VBR
support.
1. Driver <-> Adapter Communication
Apart from the basic loader commands, the driver communicates
through three entities: the command queue (CQ), the transmit queue
pair (TXQ) and the receive queue pairs (RXQ). These three entities
are set up by the host and passed to the microcode just after it has
been started.
All queues are host-based circular queues. They are contiguous and
(due to hardware limitations) have some restrictions as to their
locations in (bus) memory. They are of the "full means the same as
empty so don't do that" variety since the adapter uses pointers
internally.
The queue pairs work as follows: one queue is for supply to the
adapter, items in it are pending and are owned by the adapter; the
other is the queue for return from the adapter, items in it have
been dealt with by the adapter. The host adds items to the supply
(TX descriptors and free RX buffer descriptors) and removes items
from the return (TX and RX completions). The adapter deals with out
of order completions.
Interrupts (card to host) and the doorbell (host to card) are used
for signalling.
1. CQ
This is to communicate "open VC", "close VC", "get stats" etc. to
the adapter. At most one command is retired every millisecond by the
card. There is no out of order completion or notification. The
driver needs to check the return code of the command, waiting as
appropriate.
2. TXQ
TX supply items are of variable length (scatter gather support) and
so the queue items are (more or less) pointers to the real thing.
Each TX supply item contains a unique, host-supplied handle (the skb
bus address seems most sensible as this works for Alphas as well,
there is no need to do any endian conversions on the handles).
TX return items consist of just the handles above.
3. RXQ (up to 4 of these with different lengths and buffer sizes)
RX supply items consist of a unique, host-supplied handle (the skb
bus address again) and a pointer to the buffer data area.
RX return items consist of the handle above, the VC, length and a
status word. This just screams "oh so easy" doesn't it?
Note on RX pool sizes:
Each pool should have enough buffers to handle a back-to-back stream
of minimum sized frames on a single VC. For example:
frame spacing = 3us (about right)
delay = IRQ lat + RX handling + RX buffer replenish = 20 (us) (a guess)
min number of buffers for one VC = 1 + delay/spacing (buffers)
delay/spacing = latency = (20+2)/3 = 7 (buffers) (rounding up)
The 20us delay assumes that there is no need to sleep; if we need to
sleep to get buffers we are going to drop frames anyway.
In fact, each pool should have enough buffers to support the
simultaneous reassembly of a separate frame on each VC and cope with
the case in which frames complete in round robin cell fashion on
each VC.
Only one frame can complete at each cell arrival, so if "n" VCs are
open, the worst case is to have them all complete frames together
followed by all starting new frames together.
desired number of buffers = n + delay/spacing
These are the extreme requirements, however, they are "n+k" for some
"k" so we have only the constant to choose. This is the argument
rx_lats which current defaults to 7.
Actually, "n ? n+k : 0" is better and this is what is implemented,
subject to the limit given by the pool size.
4. Driver locking
Simple spinlocks are used around the TX and RX queue mechanisms.
Anyone with a faster, working method is welcome to implement it.
The adapter command queue is protected with a spinlock. We always
wait for commands to complete.
A more complex form of locking is used around parts of the VC open
and close functions. There are three reasons for a lock: 1. we need
to do atomic rate reservation and release (not used yet), 2. Opening
sometimes involves two adapter commands which must not be separated
by another command on the same VC, 3. the changes to RX pool size
must be atomic. The lock needs to work over context switches, so we
use a semaphore.
III Hardware Features and Microcode Bugs
1. Byte Ordering
*%^"$&%^$*&^"$(%^$#&^%$(&#%$*(&^#%!"!"!*!
2. Memory access
All structures that are not accessed using DMA must be 4-byte
aligned (not a problem) and must not cross 4MB boundaries.
There is a DMA memory hole at E0000000-E00000FF (groan).
TX fragments (DMA read) must not cross 4MB boundaries (would be 16MB
but for a hardware bug).
RX buffers (DMA write) must not cross 16MB boundaries and must
include spare trailing bytes up to the next 4-byte boundary; they
will be written with rubbish.
The PLX likes to prefetch; if reading up to 4 u32 past the end of
each TX fragment is not a problem, then TX can be made to go a
little faster by passing a flag at init that disables a prefetch
workaround. We do not pass this flag. (new microcode only)
Now we:
. Note that alloc_skb rounds up size to a 16byte boundary.
. Ensure all areas do not traverse 4MB boundaries.
. Ensure all areas do not start at a E00000xx bus address.
(I cannot be certain, but this may always hold with Linux)
. Make all failures cause a loud message.
. Discard non-conforming SKBs (causes TX failure or RX fill delay).
. Discard non-conforming TX fragment descriptors (the TX fails).
In the future we could:
. Allow RX areas that traverse 4MB (but not 16MB) boundaries.
. Segment TX areas into some/more fragments, when necessary.
. Relax checks for non-DMA items (ignore hole).
. Give scatter-gather (iovec) requirements using ???. (?)
3. VC close is broken (only for new microcode)
The VC close adapter microcode command fails to do anything if any
frames have been received on the VC but none have been transmitted.
Frames continue to be reassembled and passed (with IRQ) to the
driver.
IV To Do List
. Fix bugs!
. Timer code may be broken.
. Deal with buggy VC close (somehow) in microcode 12.
. Handle interrupted and/or non-blocking writes - is this a job for
the protocol layer?
. Add code to break up TX fragments when they span 4MB boundaries.
. Add SUNI phy layer (need to know where SUNI lives on card).
. Implement a tx_alloc fn to (a) satisfy TX alignment etc. and (b)
leave extra headroom space for Ambassador TX descriptors.
. Understand these elements of struct atm_vcc: recvq (proto?),
sleep, callback, listenq, backlog_quota, reply and user_back.
. Adjust TX/RX skb allocation to favour IP with LANE/CLIP (configurable).
. Impose a TX-pending limit (2?) on each VC, help avoid TX q overflow.
. Decide whether RX buffer recycling is or can be made completely safe;
turn it back on. It looks like Werner is going to axe this.
. Implement QoS changes on open VCs (involves extracting parts of VC open
and close into separate functions and using them to make changes).
. Hack on command queue so that someone can issue multiple commands and wait
on the last one (OR only "no-op" or "wait" commands are waited for).
. Eliminate need for while-schedule around do_command.
*/
static void do_housekeeping (unsigned long arg);
/********** globals **********/
static unsigned short debug = 0;
static unsigned int cmds = 8;
static unsigned int txs = 32;
static unsigned int rxs[NUM_RX_POOLS] = { 64, 64, 64, 64 };
static unsigned int rxs_bs[NUM_RX_POOLS] = { 4080, 12240, 36720, 65535 };
static unsigned int rx_lats = 7;
static unsigned char pci_lat = 0;
static const unsigned long onegigmask = -1 << 30;
/********** access to adapter **********/
static inline void wr_plain (const amb_dev * dev, size_t addr, u32 data) {
PRINTD (DBG_FLOW|DBG_REGS, "wr: %08zx <- %08x", addr, data);
#ifdef AMB_MMIO
dev->membase[addr / sizeof(u32)] = data;
#else
outl (data, dev->iobase + addr);
#endif
}
static inline u32 rd_plain (const amb_dev * dev, size_t addr) {
#ifdef AMB_MMIO
u32 data = dev->membase[addr / sizeof(u32)];
#else
u32 data = inl (dev->iobase + addr);
#endif
PRINTD (DBG_FLOW|DBG_REGS, "rd: %08zx -> %08x", addr, data);
return data;
}
static inline void wr_mem (const amb_dev * dev, size_t addr, u32 data) {
__be32 be = cpu_to_be32 (data);
PRINTD (DBG_FLOW|DBG_REGS, "wr: %08zx <- %08x b[%08x]", addr, data, be);
#ifdef AMB_MMIO
dev->membase[addr / sizeof(u32)] = be;
#else
outl (be, dev->iobase + addr);
#endif
}
static inline u32 rd_mem (const amb_dev * dev, size_t addr) {
#ifdef AMB_MMIO
__be32 be = dev->membase[addr / sizeof(u32)];
#else
__be32 be = inl (dev->iobase + addr);
#endif
u32 data = be32_to_cpu (be);
PRINTD (DBG_FLOW|DBG_REGS, "rd: %08zx -> %08x b[%08x]", addr, data, be);
return data;
}
/********** dump routines **********/
static inline void dump_registers (const amb_dev * dev) {
#ifdef DEBUG_AMBASSADOR
if (debug & DBG_REGS) {
size_t i;
PRINTD (DBG_REGS, "reading PLX control: ");
for (i = 0x00; i < 0x30; i += sizeof(u32))
rd_mem (dev, i);
PRINTD (DBG_REGS, "reading mailboxes: ");
for (i = 0x40; i < 0x60; i += sizeof(u32))
rd_mem (dev, i);
PRINTD (DBG_REGS, "reading doorb irqev irqen reset:");
for (i = 0x60; i < 0x70; i += sizeof(u32))
rd_mem (dev, i);
}
#else
(void) dev;
#endif
return;
}
static inline void dump_loader_block (volatile loader_block * lb) {
#ifdef DEBUG_AMBASSADOR
unsigned int i;
PRINTDB (DBG_LOAD, "lb @ %p; res: %d, cmd: %d, pay:",
lb, be32_to_cpu (lb->result), be32_to_cpu (lb->command));
for (i = 0; i < MAX_COMMAND_DATA; ++i)
PRINTDM (DBG_LOAD, " %08x", be32_to_cpu (lb->payload.data[i]));
PRINTDE (DBG_LOAD, ", vld: %08x", be32_to_cpu (lb->valid));
#else
(void) lb;
#endif
return;
}
static inline void dump_command (command * cmd) {
#ifdef DEBUG_AMBASSADOR
unsigned int i;
PRINTDB (DBG_CMD, "cmd @ %p, req: %08x, pars:",
cmd, /*be32_to_cpu*/ (cmd->request));
for (i = 0; i < 3; ++i)
PRINTDM (DBG_CMD, " %08x", /*be32_to_cpu*/ (cmd->args.par[i]));
PRINTDE (DBG_CMD, "");
#else
(void) cmd;
#endif
return;
}
static inline void dump_skb (char * prefix, unsigned int vc, struct sk_buff * skb) {
#ifdef DEBUG_AMBASSADOR
unsigned int i;
unsigned char * data = skb->data;
PRINTDB (DBG_DATA, "%s(%u) ", prefix, vc);
for (i=0; i<skb->len && i < 256;i++)
PRINTDM (DBG_DATA, "%02x ", data[i]);
PRINTDE (DBG_DATA,"");
#else
(void) prefix;
(void) vc;
(void) skb;
#endif
return;
}
/********** check memory areas for use by Ambassador **********/
/* see limitations under Hardware Features */
static int check_area (void * start, size_t length) {
// assumes length > 0
const u32 fourmegmask = -1 << 22;
const u32 twofivesixmask = -1 << 8;
const u32 starthole = 0xE0000000;
u32 startaddress = virt_to_bus (start);
u32 lastaddress = startaddress+length-1;
if ((startaddress ^ lastaddress) & fourmegmask ||
(startaddress & twofivesixmask) == starthole) {
PRINTK (KERN_ERR, "check_area failure: [%x,%x] - mail maintainer!",
startaddress, lastaddress);
return -1;
} else {
return 0;
}
}
/********** free an skb (as per ATM device driver documentation) **********/
static void amb_kfree_skb (struct sk_buff * skb) {
if (ATM_SKB(skb)->vcc->pop) {
ATM_SKB(skb)->vcc->pop (ATM_SKB(skb)->vcc, skb);
} else {
dev_kfree_skb_any (skb);
}
}
/********** TX completion **********/
static void tx_complete (amb_dev * dev, tx_out * tx) {
tx_simple * tx_descr = bus_to_virt (tx->handle);
struct sk_buff * skb = tx_descr->skb;
PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
// VC layer stats
atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
// free the descriptor
kfree (tx_descr);
// free the skb
amb_kfree_skb (skb);
dev->stats.tx_ok++;
return;
}
/********** RX completion **********/
static void rx_complete (amb_dev * dev, rx_out * rx) {
struct sk_buff * skb = bus_to_virt (rx->handle);
u16 vc = be16_to_cpu (rx->vc);
// unused: u16 lec_id = be16_to_cpu (rx->lec_id);
u16 status = be16_to_cpu (rx->status);
u16 rx_len = be16_to_cpu (rx->length);
PRINTD (DBG_FLOW|DBG_RX, "rx_complete %p %p (len=%hu)", dev, rx, rx_len);
// XXX move this in and add to VC stats ???
if (!status) {
struct atm_vcc * atm_vcc = dev->rxer[vc];
dev->stats.rx.ok++;
if (atm_vcc) {
if (rx_len <= atm_vcc->qos.rxtp.max_sdu) {
if (atm_charge (atm_vcc, skb->truesize)) {
// prepare socket buffer
ATM_SKB(skb)->vcc = atm_vcc;
skb_put (skb, rx_len);
dump_skb ("<<<", vc, skb);
// VC layer stats
atomic_inc(&atm_vcc->stats->rx);
__net_timestamp(skb);
// end of our responsibility
atm_vcc->push (atm_vcc, skb);
return;
} else {
// someone fix this (message), please!
PRINTD (DBG_INFO|DBG_RX, "dropped thanks to atm_charge (vc %hu, truesize %u)", vc, skb->truesize);
// drop stats incremented in atm_charge
}
} else {
PRINTK (KERN_INFO, "dropped over-size frame");
// should we count this?
atomic_inc(&atm_vcc->stats->rx_drop);
}
} else {
PRINTD (DBG_WARN|DBG_RX, "got frame but RX closed for channel %hu", vc);
// this is an adapter bug, only in new version of microcode
}
} else {
dev->stats.rx.error++;
if (status & CRC_ERR)
dev->stats.rx.badcrc++;
if (status & LEN_ERR)
dev->stats.rx.toolong++;
if (status & ABORT_ERR)
dev->stats.rx.aborted++;
if (status & UNUSED_ERR)
dev->stats.rx.unused++;
}
dev_kfree_skb_any (skb);
return;
}
/*
Note on queue handling.
Here "give" and "take" refer to queue entries and a queue (pair)
rather than frames to or from the host or adapter. Empty frame
buffers are given to the RX queue pair and returned unused or
containing RX frames. TX frames (well, pointers to TX fragment
lists) are given to the TX queue pair, completions are returned.
*/
/********** command queue **********/
// I really don't like this, but it's the best I can do at the moment
// also, the callers are responsible for byte order as the microcode
// sometimes does 16-bit accesses (yuk yuk yuk)
static int command_do (amb_dev * dev, command * cmd) {
amb_cq * cq = &dev->cq;
volatile amb_cq_ptrs * ptrs = &cq->ptrs;
command * my_slot;
PRINTD (DBG_FLOW|DBG_CMD, "command_do %p", dev);
if (test_bit (dead, &dev->flags))
return 0;
spin_lock (&cq->lock);
// if not full...
if (cq->pending < cq->maximum) {
// remember my slot for later
my_slot = ptrs->in;
PRINTD (DBG_CMD, "command in slot %p", my_slot);
dump_command (cmd);
// copy command in
*ptrs->in = *cmd;
cq->pending++;
ptrs->in = NEXTQ (ptrs->in, ptrs->start, ptrs->limit);
// mail the command
wr_mem (dev, offsetof(amb_mem, mb.adapter.cmd_address), virt_to_bus (ptrs->in));
if (cq->pending > cq->high)
cq->high = cq->pending;
spin_unlock (&cq->lock);
// these comments were in a while-loop before, msleep removes the loop
// go to sleep
// PRINTD (DBG_CMD, "wait: sleeping %lu for command", timeout);
msleep(cq->pending);
// wait for my slot to be reached (all waiters are here or above, until...)
while (ptrs->out != my_slot) {
PRINTD (DBG_CMD, "wait: command slot (now at %p)", ptrs->out);
set_current_state(TASK_UNINTERRUPTIBLE);
schedule();
}
// wait on my slot (... one gets to its slot, and... )
while (ptrs->out->request != cpu_to_be32 (SRB_COMPLETE)) {
PRINTD (DBG_CMD, "wait: command slot completion");
set_current_state(TASK_UNINTERRUPTIBLE);
schedule();
}
PRINTD (DBG_CMD, "command complete");
// update queue (... moves the queue along to the next slot)
spin_lock (&cq->lock);
cq->pending--;
// copy command out
*cmd = *ptrs->out;
ptrs->out = NEXTQ (ptrs->out, ptrs->start, ptrs->limit);
spin_unlock (&cq->lock);
return 0;
} else {
cq->filled++;
spin_unlock (&cq->lock);
return -EAGAIN;
}
}
/********** TX queue pair **********/
static int tx_give (amb_dev * dev, tx_in * tx) {
amb_txq * txq = &dev->txq;
unsigned long flags;
PRINTD (DBG_FLOW|DBG_TX, "tx_give %p", dev);
if (test_bit (dead, &dev->flags))
return 0;
spin_lock_irqsave (&txq->lock, flags);
if (txq->pending < txq->maximum) {
PRINTD (DBG_TX, "TX in slot %p", txq->in.ptr);
*txq->in.ptr = *tx;
txq->pending++;
txq->in.ptr = NEXTQ (txq->in.ptr, txq->in.start, txq->in.limit);
// hand over the TX and ring the bell
wr_mem (dev, offsetof(amb_mem, mb.adapter.tx_address), virt_to_bus (txq->in.ptr));
wr_mem (dev, offsetof(amb_mem, doorbell), TX_FRAME);
if (txq->pending > txq->high)
txq->high = txq->pending;
spin_unlock_irqrestore (&txq->lock, flags);
return 0;
} else {
txq->filled++;
spin_unlock_irqrestore (&txq->lock, flags);
return -EAGAIN;
}
}
static int tx_take (amb_dev * dev) {
amb_txq * txq = &dev->txq;
unsigned long flags;
PRINTD (DBG_FLOW|DBG_TX, "tx_take %p", dev);
spin_lock_irqsave (&txq->lock, flags);
if (txq->pending && txq->out.ptr->handle) {
// deal with TX completion
tx_complete (dev, txq->out.ptr);
// mark unused again
txq->out.ptr->handle = 0;
// remove item
txq->pending--;
txq->out.ptr = NEXTQ (txq->out.ptr, txq->out.start, txq->out.limit);
spin_unlock_irqrestore (&txq->lock, flags);
return 0;
} else {
spin_unlock_irqrestore (&txq->lock, flags);
return -1;
}
}
/********** RX queue pairs **********/
static int rx_give (amb_dev * dev, rx_in * rx, unsigned char pool) {
amb_rxq * rxq = &dev->rxq[pool];
unsigned long flags;
PRINTD (DBG_FLOW|DBG_RX, "rx_give %p[%hu]", dev, pool);
spin_lock_irqsave (&rxq->lock, flags);
if (rxq->pending < rxq->maximum) {
PRINTD (DBG_RX, "RX in slot %p", rxq->in.ptr);
*rxq->in.ptr = *rx;
rxq->pending++;
rxq->in.ptr = NEXTQ (rxq->in.ptr, rxq->in.start, rxq->in.limit);
// hand over the RX buffer
wr_mem (dev, offsetof(amb_mem, mb.adapter.rx_address[pool]), virt_to_bus (rxq->in.ptr));
spin_unlock_irqrestore (&rxq->lock, flags);
return 0;
} else {
spin_unlock_irqrestore (&rxq->lock, flags);
return -1;
}
}
static int rx_take (amb_dev * dev, unsigned char pool) {
amb_rxq * rxq = &dev->rxq[pool];
unsigned long flags;
PRINTD (DBG_FLOW|DBG_RX, "rx_take %p[%hu]", dev, pool);
spin_lock_irqsave (&rxq->lock, flags);
if (rxq->pending && (rxq->out.ptr->status || rxq->out.ptr->length)) {
// deal with RX completion
rx_complete (dev, rxq->out.ptr);
// mark unused again
rxq->out.ptr->status = 0;
rxq->out.ptr->length = 0;
// remove item
rxq->pending--;
rxq->out.ptr = NEXTQ (rxq->out.ptr, rxq->out.start, rxq->out.limit);
if (rxq->pending < rxq->low)
rxq->low = rxq->pending;
spin_unlock_irqrestore (&rxq->lock, flags);
return 0;
} else {
if (!rxq->pending && rxq->buffers_wanted)
rxq->emptied++;
spin_unlock_irqrestore (&rxq->lock, flags);
return -1;
}
}
/********** RX Pool handling **********/
/* pre: buffers_wanted = 0, post: pending = 0 */
static void drain_rx_pool (amb_dev * dev, unsigned char pool) {
amb_rxq * rxq = &dev->rxq[pool];
PRINTD (DBG_FLOW|DBG_POOL, "drain_rx_pool %p %hu", dev, pool);
if (test_bit (dead, &dev->flags))
return;
/* we are not quite like the fill pool routines as we cannot just
remove one buffer, we have to remove all of them, but we might as
well pretend... */
if (rxq->pending > rxq->buffers_wanted) {
command cmd;
cmd.request = cpu_to_be32 (SRB_FLUSH_BUFFER_Q);
cmd.args.flush.flags = cpu_to_be32 (pool << SRB_POOL_SHIFT);
while (command_do (dev, &cmd))
schedule();
/* the pool may also be emptied via the interrupt handler */
while (rxq->pending > rxq->buffers_wanted)
if (rx_take (dev, pool))
schedule();
}
return;
}
static void drain_rx_pools (amb_dev * dev) {
unsigned char pool;
PRINTD (DBG_FLOW|DBG_POOL, "drain_rx_pools %p", dev);
for (pool = 0; pool < NUM_RX_POOLS; ++pool)
drain_rx_pool (dev, pool);
}
static void fill_rx_pool (amb_dev * dev, unsigned char pool,
gfp_t priority)
{
rx_in rx;
amb_rxq * rxq;
PRINTD (DBG_FLOW|DBG_POOL, "fill_rx_pool %p %hu %x", dev, pool, priority);
if (test_bit (dead, &dev->flags))
return;
rxq = &dev->rxq[pool];
while (rxq->pending < rxq->maximum && rxq->pending < rxq->buffers_wanted) {
struct sk_buff * skb = alloc_skb (rxq->buffer_size, priority);
if (!skb) {
PRINTD (DBG_SKB|DBG_POOL, "failed to allocate skb for RX pool %hu", pool);
return;
}
if (check_area (skb->data, skb->truesize)) {
dev_kfree_skb_any (skb);
return;
}
// cast needed as there is no %? for pointer differences
PRINTD (DBG_SKB, "allocated skb at %p, head %p, area %li",
skb, skb->head, (long) (skb_end_pointer(skb) - skb->head));
rx.handle = virt_to_bus (skb);
rx.host_address = cpu_to_be32 (virt_to_bus (skb->data));
if (rx_give (dev, &rx, pool))
dev_kfree_skb_any (skb);
}
return;
}
// top up all RX pools
static void fill_rx_pools (amb_dev * dev) {
unsigned char pool;
PRINTD (DBG_FLOW|DBG_POOL, "fill_rx_pools %p", dev);
for (pool = 0; pool < NUM_RX_POOLS; ++pool)
fill_rx_pool (dev, pool, GFP_ATOMIC);
return;
}
/********** enable host interrupts **********/
static void interrupts_on (amb_dev * dev) {
wr_plain (dev, offsetof(amb_mem, interrupt_control),
rd_plain (dev, offsetof(amb_mem, interrupt_control))
| AMB_INTERRUPT_BITS);
}
/********** disable host interrupts **********/
static void interrupts_off (amb_dev * dev) {
wr_plain (dev, offsetof(amb_mem, interrupt_control),
rd_plain (dev, offsetof(amb_mem, interrupt_control))
&~ AMB_INTERRUPT_BITS);
}
/********** interrupt handling **********/
static irqreturn_t interrupt_handler(int irq, void *dev_id) {
amb_dev * dev = dev_id;
PRINTD (DBG_IRQ|DBG_FLOW, "interrupt_handler: %p", dev_id);
{
u32 interrupt = rd_plain (dev, offsetof(amb_mem, interrupt));
// for us or someone else sharing the same interrupt
if (!interrupt) {
PRINTD (DBG_IRQ, "irq not for me: %d", irq);
return IRQ_NONE;
}
// definitely for us
PRINTD (DBG_IRQ, "FYI: interrupt was %08x", interrupt);
wr_plain (dev, offsetof(amb_mem, interrupt), -1);
}
{
unsigned int irq_work = 0;
unsigned char pool;
for (pool = 0; pool < NUM_RX_POOLS; ++pool)
while (!rx_take (dev, pool))
++irq_work;
while (!tx_take (dev))
++irq_work;
if (irq_work) {
fill_rx_pools (dev);
PRINTD (DBG_IRQ, "work done: %u", irq_work);
} else {
PRINTD (DBG_IRQ|DBG_WARN, "no work done");
}
}
PRINTD (DBG_IRQ|DBG_FLOW, "interrupt_handler done: %p", dev_id);
return IRQ_HANDLED;
}
/********** make rate (not quite as much fun as Horizon) **********/
static int make_rate (unsigned int rate, rounding r,
u16 * bits, unsigned int * actual) {
unsigned char exp = -1; // hush gcc
unsigned int man = -1; // hush gcc
PRINTD (DBG_FLOW|DBG_QOS, "make_rate %u", rate);
// rates in cells per second, ITU format (nasty 16-bit floating-point)
// given 5-bit e and 9-bit m:
// rate = EITHER (1+m/2^9)*2^e OR 0
// bits = EITHER 1<<14 | e<<9 | m OR 0
// (bit 15 is "reserved", bit 14 "non-zero")
// smallest rate is 0 (special representation)
// largest rate is (1+511/512)*2^31 = 4290772992 (< 2^32-1)
// smallest non-zero rate is (1+0/512)*2^0 = 1 (> 0)
// simple algorithm:
// find position of top bit, this gives e
// remove top bit and shift (rounding if feeling clever) by 9-e
// ucode bug: please don't set bit 14! so 0 rate not representable
if (rate > 0xffc00000U) {
// larger than largest representable rate
if (r == round_up) {
return -EINVAL;
} else {
exp = 31;
man = 511;
}
} else if (rate) {
// representable rate
exp = 31;
man = rate;
// invariant: rate = man*2^(exp-31)
while (!(man & (1<<31))) {
exp = exp - 1;
man = man<<1;
}
// man has top bit set
// rate = (2^31+(man-2^31))*2^(exp-31)
// rate = (1+(man-2^31)/2^31)*2^exp
man = man<<1;
man &= 0xffffffffU; // a nop on 32-bit systems
// rate = (1+man/2^32)*2^exp
// exp is in the range 0 to 31, man is in the range 0 to 2^32-1
// time to lose significance... we want m in the range 0 to 2^9-1
// rounding presents a minor problem... we first decide which way
// we are rounding (based on given rounding direction and possibly
// the bits of the mantissa that are to be discarded).
switch (r) {
case round_down: {
// just truncate
man = man>>(32-9);
break;
}
case round_up: {
// check all bits that we are discarding
if (man & (~0U>>9)) {
man = (man>>(32-9)) + 1;
if (man == (1<<9)) {
// no need to check for round up outside of range
man = 0;
exp += 1;
}
} else {
man = (man>>(32-9));
}
break;
}
case round_nearest: {
// check msb that we are discarding
if (man & (1<<(32-9-1))) {
man = (man>>(32-9)) + 1;
if (man == (1<<9)) {
// no need to check for round up outside of range
man = 0;
exp += 1;
}
} else {
man = (man>>(32-9));
}
break;
}
}
} else {
// zero rate - not representable
if (r == round_down) {
return -EINVAL;
} else {
exp = 0;
man = 0;
}
}
PRINTD (DBG_QOS, "rate: man=%u, exp=%hu", man, exp);
if (bits)
*bits = /* (1<<14) | */ (exp<<9) | man;
if (actual)
*actual = (exp >= 9)
? (1 << exp) + (man << (exp-9))
: (1 << exp) + ((man + (1<<(9-exp-1))) >> (9-exp));
return 0;
}
/********** Linux ATM Operations **********/
// some are not yet implemented while others do not make sense for
// this device
/********** Open a VC **********/
static int amb_open (struct atm_vcc * atm_vcc)
{
int error;
struct atm_qos * qos;
struct atm_trafprm * txtp;
struct atm_trafprm * rxtp;
u16 tx_rate_bits = -1; // hush gcc
u16 tx_vc_bits = -1; // hush gcc
u16 tx_frame_bits = -1; // hush gcc
amb_dev * dev = AMB_DEV(atm_vcc->dev);
amb_vcc * vcc;
unsigned char pool = -1; // hush gcc
short vpi = atm_vcc->vpi;
int vci = atm_vcc->vci;
PRINTD (DBG_FLOW|DBG_VCC, "amb_open %x %x", vpi, vci);
#ifdef ATM_VPI_UNSPEC
// UNSPEC is deprecated, remove this code eventually
if (vpi == ATM_VPI_UNSPEC || vci == ATM_VCI_UNSPEC) {
PRINTK (KERN_WARNING, "rejecting open with unspecified VPI/VCI (deprecated)");
return -EINVAL;
}
#endif
if (!(0 <= vpi && vpi < (1<<NUM_VPI_BITS) &&
0 <= vci && vci < (1<<NUM_VCI_BITS))) {
PRINTD (DBG_WARN|DBG_VCC, "VPI/VCI out of range: %hd/%d", vpi, vci);
return -EINVAL;
}
qos = &atm_vcc->qos;
if (qos->aal != ATM_AAL5) {
PRINTD (DBG_QOS, "AAL not supported");
return -EINVAL;
}
// traffic parameters
PRINTD (DBG_QOS, "TX:");
txtp = &qos->txtp;
if (txtp->traffic_class != ATM_NONE) {
switch (txtp->traffic_class) {
case ATM_UBR: {
// we take "the PCR" as a rate-cap
int pcr = atm_pcr_goal (txtp);
if (!pcr) {
// no rate cap
tx_rate_bits = 0;
tx_vc_bits = TX_UBR;
tx_frame_bits = TX_FRAME_NOTCAP;
} else {
rounding r;
if (pcr < 0) {
r = round_down;
pcr = -pcr;
} else {
r = round_up;
}
error = make_rate (pcr, r, &tx_rate_bits, NULL);
if (error)
return error;
tx_vc_bits = TX_UBR_CAPPED;
tx_frame_bits = TX_FRAME_CAPPED;
}
break;
}
#if 0
case ATM_ABR: {
pcr = atm_pcr_goal (txtp);
PRINTD (DBG_QOS, "pcr goal = %d", pcr);
break;
}
#endif
default: {
// PRINTD (DBG_QOS, "request for non-UBR/ABR denied");
PRINTD (DBG_QOS, "request for non-UBR denied");
return -EINVAL;
}
}
PRINTD (DBG_QOS, "tx_rate_bits=%hx, tx_vc_bits=%hx",
tx_rate_bits, tx_vc_bits);
}
PRINTD (DBG_QOS, "RX:");
rxtp = &qos->rxtp;
if (rxtp->traffic_class == ATM_NONE) {
// do nothing
} else {
// choose an RX pool (arranged in increasing size)
for (pool = 0; pool < NUM_RX_POOLS; ++pool)
if ((unsigned int) rxtp->max_sdu <= dev->rxq[pool].buffer_size) {
PRINTD (DBG_VCC|DBG_QOS|DBG_POOL, "chose pool %hu (max_sdu %u <= %u)",
pool, rxtp->max_sdu, dev->rxq[pool].buffer_size);
break;
}
if (pool == NUM_RX_POOLS) {
PRINTD (DBG_WARN|DBG_VCC|DBG_QOS|DBG_POOL,
"no pool suitable for VC (RX max_sdu %d is too large)",
rxtp->max_sdu);
return -EINVAL;
}
switch (rxtp->traffic_class) {
case ATM_UBR: {
break;
}
#if 0
case ATM_ABR: {
pcr = atm_pcr_goal (rxtp);
PRINTD (DBG_QOS, "pcr goal = %d", pcr);
break;
}
#endif
default: {
// PRINTD (DBG_QOS, "request for non-UBR/ABR denied");
PRINTD (DBG_QOS, "request for non-UBR denied");
return -EINVAL;
}
}
}
// get space for our vcc stuff
vcc = kmalloc (sizeof(amb_vcc), GFP_KERNEL);
if (!vcc) {
PRINTK (KERN_ERR, "out of memory!");
return -ENOMEM;
}
atm_vcc->dev_data = (void *) vcc;
// no failures beyond this point
// we are not really "immediately before allocating the connection
// identifier in hardware", but it will just have to do!
set_bit(ATM_VF_ADDR,&atm_vcc->flags);
if (txtp->traffic_class != ATM_NONE) {
command cmd;
vcc->tx_frame_bits = tx_frame_bits;
mutex_lock(&dev->vcc_sf);
if (dev->rxer[vci]) {
// RXer on the channel already, just modify rate...
cmd.request = cpu_to_be32 (SRB_MODIFY_VC_RATE);
cmd.args.modify_rate.vc = cpu_to_be32 (vci); // vpi 0
cmd.args.modify_rate.rate = cpu_to_be32 (tx_rate_bits << SRB_RATE_SHIFT);
while (command_do (dev, &cmd))
schedule();
// ... and TX flags, preserving the RX pool
cmd.request = cpu_to_be32 (SRB_MODIFY_VC_FLAGS);
cmd.args.modify_flags.vc = cpu_to_be32 (vci); // vpi 0
cmd.args.modify_flags.flags = cpu_to_be32
( (AMB_VCC(dev->rxer[vci])->rx_info.pool << SRB_POOL_SHIFT)
| (tx_vc_bits << SRB_FLAGS_SHIFT) );
while (command_do (dev, &cmd))
schedule();
} else {
// no RXer on the channel, just open (with pool zero)
cmd.request = cpu_to_be32 (SRB_OPEN_VC);
cmd.args.open.vc = cpu_to_be32 (vci); // vpi 0
cmd.args.open.flags = cpu_to_be32 (tx_vc_bits << SRB_FLAGS_SHIFT);
cmd.args.open.rate = cpu_to_be32 (tx_rate_bits << SRB_RATE_SHIFT);
while (command_do (dev, &cmd))
schedule();
}
dev->txer[vci].tx_present = 1;
mutex_unlock(&dev->vcc_sf);
}
if (rxtp->traffic_class != ATM_NONE) {
command cmd;
vcc->rx_info.pool = pool;
mutex_lock(&dev->vcc_sf);
/* grow RX buffer pool */
if (!dev->rxq[pool].buffers_wanted)
dev->rxq[pool].buffers_wanted = rx_lats;
dev->rxq[pool].buffers_wanted += 1;
fill_rx_pool (dev, pool, GFP_KERNEL);
if (dev->txer[vci].tx_present) {
// TXer on the channel already
// switch (from pool zero) to this pool, preserving the TX bits
cmd.request = cpu_to_be32 (SRB_MODIFY_VC_FLAGS);
cmd.args.modify_flags.vc = cpu_to_be32 (vci); // vpi 0
cmd.args.modify_flags.flags = cpu_to_be32
( (pool << SRB_POOL_SHIFT)
| (dev->txer[vci].tx_vc_bits << SRB_FLAGS_SHIFT) );
} else {
// no TXer on the channel, open the VC (with no rate info)
cmd.request = cpu_to_be32 (SRB_OPEN_VC);
cmd.args.open.vc = cpu_to_be32 (vci); // vpi 0
cmd.args.open.flags = cpu_to_be32 (pool << SRB_POOL_SHIFT);
cmd.args.open.rate = cpu_to_be32 (0);
}
while (command_do (dev, &cmd))
schedule();
// this link allows RX frames through
dev->rxer[vci] = atm_vcc;
mutex_unlock(&dev->vcc_sf);
}
// indicate readiness
set_bit(ATM_VF_READY,&atm_vcc->flags);
return 0;
}
/********** Close a VC **********/
static void amb_close (struct atm_vcc * atm_vcc) {
amb_dev * dev = AMB_DEV (atm_vcc->dev);
amb_vcc * vcc = AMB_VCC (atm_vcc);
u16 vci = atm_vcc->vci;
PRINTD (DBG_VCC|DBG_FLOW, "amb_close");
// indicate unreadiness
clear_bit(ATM_VF_READY,&atm_vcc->flags);
// disable TXing
if (atm_vcc->qos.txtp.traffic_class != ATM_NONE) {
command cmd;
mutex_lock(&dev->vcc_sf);
if (dev->rxer[vci]) {
// RXer still on the channel, just modify rate... XXX not really needed
cmd.request = cpu_to_be32 (SRB_MODIFY_VC_RATE);
cmd.args.modify_rate.vc = cpu_to_be32 (vci); // vpi 0
cmd.args.modify_rate.rate = cpu_to_be32 (0);
// ... and clear TX rate flags (XXX to stop RM cell output?), preserving RX pool
} else {
// no RXer on the channel, close channel
cmd.request = cpu_to_be32 (SRB_CLOSE_VC);
cmd.args.close.vc = cpu_to_be32 (vci); // vpi 0
}
dev->txer[vci].tx_present = 0;
while (command_do (dev, &cmd))
schedule();
mutex_unlock(&dev->vcc_sf);
}
// disable RXing
if (atm_vcc->qos.rxtp.traffic_class != ATM_NONE) {
command cmd;
// this is (the?) one reason why we need the amb_vcc struct
unsigned char pool = vcc->rx_info.pool;
mutex_lock(&dev->vcc_sf);
if (dev->txer[vci].tx_present) {
// TXer still on the channel, just go to pool zero XXX not really needed
cmd.request = cpu_to_be32 (SRB_MODIFY_VC_FLAGS);
cmd.args.modify_flags.vc = cpu_to_be32 (vci); // vpi 0
cmd.args.modify_flags.flags = cpu_to_be32
(dev->txer[vci].tx_vc_bits << SRB_FLAGS_SHIFT);
} else {
// no TXer on the channel, close the VC
cmd.request = cpu_to_be32 (SRB_CLOSE_VC);
cmd.args.close.vc = cpu_to_be32 (vci); // vpi 0
}
// forget the rxer - no more skbs will be pushed
if (atm_vcc != dev->rxer[vci])
PRINTK (KERN_ERR, "%s vcc=%p rxer[vci]=%p",
"arghhh! we're going to die!",
vcc, dev->rxer[vci]);
dev->rxer[vci] = NULL;
while (command_do (dev, &cmd))
schedule();
/* shrink RX buffer pool */
dev->rxq[pool].buffers_wanted -= 1;
if (dev->rxq[pool].buffers_wanted == rx_lats) {
dev->rxq[pool].buffers_wanted = 0;
drain_rx_pool (dev, pool);
}
mutex_unlock(&dev->vcc_sf);
}
// free our structure
kfree (vcc);
// say the VPI/VCI is free again
clear_bit(ATM_VF_ADDR,&atm_vcc->flags);
return;
}
/********** Send **********/
static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
amb_dev * dev = AMB_DEV(atm_vcc->dev);
amb_vcc * vcc = AMB_VCC(atm_vcc);
u16 vc = atm_vcc->vci;
unsigned int tx_len = skb->len;
unsigned char * tx_data = skb->data;
tx_simple * tx_descr;
tx_in tx;
if (test_bit (dead, &dev->flags))
return -EIO;
PRINTD (DBG_FLOW|DBG_TX, "amb_send vc %x data %p len %u",
vc, tx_data, tx_len);
dump_skb (">>>", vc, skb);
if (!dev->txer[vc].tx_present) {
PRINTK (KERN_ERR, "attempt to send on RX-only VC %x", vc);
return -EBADFD;
}
// this is a driver private field so we have to set it ourselves,
// despite the fact that we are _required_ to use it to check for a
// pop function
ATM_SKB(skb)->vcc = atm_vcc;
if (skb->len > (size_t) atm_vcc->qos.txtp.max_sdu) {
PRINTK (KERN_ERR, "sk_buff length greater than agreed max_sdu, dropping...");
return -EIO;
}
if (check_area (skb->data, skb->len)) {
atomic_inc(&atm_vcc->stats->tx_err);
return -ENOMEM; // ?
}
// allocate memory for fragments
tx_descr = kmalloc (sizeof(tx_simple), GFP_KERNEL);
if (!tx_descr) {
PRINTK (KERN_ERR, "could not allocate TX descriptor");
return -ENOMEM;
}
if (check_area (tx_descr, sizeof(tx_simple))) {
kfree (tx_descr);
return -ENOMEM;
}
PRINTD (DBG_TX, "fragment list allocated at %p", tx_descr);
tx_descr->skb = skb;
tx_descr->tx_frag.bytes = cpu_to_be32 (tx_len);
tx_descr->tx_frag.address = cpu_to_be32 (virt_to_bus (tx_data));
tx_descr->tx_frag_end.handle = virt_to_bus (tx_descr);
tx_descr->tx_frag_end.vc = 0;
tx_descr->tx_frag_end.next_descriptor_length = 0;
tx_descr->tx_frag_end.next_descriptor = 0;
#ifdef AMB_NEW_MICROCODE
tx_descr->tx_frag_end.cpcs_uu = 0;
tx_descr->tx_frag_end.cpi = 0;
tx_descr->tx_frag_end.pad = 0;
#endif
tx.vc = cpu_to_be16 (vcc->tx_frame_bits | vc);
tx.tx_descr_length = cpu_to_be16 (sizeof(tx_frag)+sizeof(tx_frag_end));
tx.tx_descr_addr = cpu_to_be32 (virt_to_bus (&tx_descr->tx_frag));
while (tx_give (dev, &tx))
schedule();
return 0;
}
/********** Change QoS on a VC **********/
// int amb_change_qos (struct atm_vcc * atm_vcc, struct atm_qos * qos, int flags);
/********** Free RX Socket Buffer **********/
#if 0
static void amb_free_rx_skb (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
amb_dev * dev = AMB_DEV (atm_vcc->dev);
amb_vcc * vcc = AMB_VCC (atm_vcc);
unsigned char pool = vcc->rx_info.pool;
rx_in rx;
// This may be unsafe for various reasons that I cannot really guess
// at. However, I note that the ATM layer calls kfree_skb rather
// than dev_kfree_skb at this point so we are least covered as far
// as buffer locking goes. There may be bugs if pcap clones RX skbs.
PRINTD (DBG_FLOW|DBG_SKB, "amb_rx_free skb %p (atm_vcc %p, vcc %p)",
skb, atm_vcc, vcc);
rx.handle = virt_to_bus (skb);
rx.host_address = cpu_to_be32 (virt_to_bus (skb->data));
skb->data = skb->head;
skb->tail = skb->head;
skb->len = 0;
if (!rx_give (dev, &rx, pool)) {
// success
PRINTD (DBG_SKB|DBG_POOL, "recycled skb for pool %hu", pool);
return;
}
// just do what the ATM layer would have done
dev_kfree_skb_any (skb);
return;
}
#endif
/********** Proc File Output **********/
static int amb_proc_read (struct atm_dev * atm_dev, loff_t * pos, char * page) {
amb_dev * dev = AMB_DEV (atm_dev);
int left = *pos;
unsigned char pool;
PRINTD (DBG_FLOW, "amb_proc_read");
/* more diagnostics here? */
if (!left--) {
amb_stats * s = &dev->stats;
return sprintf (page,
"frames: TX OK %lu, RX OK %lu, RX bad %lu "
"(CRC %lu, long %lu, aborted %lu, unused %lu).\n",
s->tx_ok, s->rx.ok, s->rx.error,
s->rx.badcrc, s->rx.toolong,
s->rx.aborted, s->rx.unused);
}
if (!left--) {
amb_cq * c = &dev->cq;
return sprintf (page, "cmd queue [cur/hi/max]: %u/%u/%u. ",
c->pending, c->high, c->maximum);
}
if (!left--) {
amb_txq * t = &dev->txq;
return sprintf (page, "TX queue [cur/max high full]: %u/%u %u %u.\n",
t->pending, t->maximum, t->high, t->filled);
}
if (!left--) {
unsigned int count = sprintf (page, "RX queues [cur/max/req low empty]:");
for (pool = 0; pool < NUM_RX_POOLS; ++pool) {
amb_rxq * r = &dev->rxq[pool];
count += sprintf (page+count, " %u/%u/%u %u %u",
r->pending, r->maximum, r->buffers_wanted, r->low, r->emptied);
}
count += sprintf (page+count, ".\n");
return count;
}
if (!left--) {
unsigned int count = sprintf (page, "RX buffer sizes:");
for (pool = 0; pool < NUM_RX_POOLS; ++pool) {
amb_rxq * r = &dev->rxq[pool];
count += sprintf (page+count, " %u", r->buffer_size);
}
count += sprintf (page+count, ".\n");
return count;
}
#if 0
if (!left--) {
// suni block etc?
}
#endif
return 0;
}
/********** Operation Structure **********/
static const struct atmdev_ops amb_ops = {
.open = amb_open,
.close = amb_close,
.send = amb_send,
.proc_read = amb_proc_read,
.owner = THIS_MODULE,
};
/********** housekeeping **********/
static void do_housekeeping (unsigned long arg) {
amb_dev * dev = (amb_dev *) arg;
// could collect device-specific (not driver/atm-linux) stats here
// last resort refill once every ten seconds
fill_rx_pools (dev);
mod_timer(&dev->housekeeping, jiffies + 10*HZ);
return;
}
/********** creation of communication queues **********/
static int __devinit create_queues (amb_dev * dev, unsigned int cmds,
unsigned int txs, unsigned int * rxs,
unsigned int * rx_buffer_sizes) {
unsigned char pool;
size_t total = 0;
void * memory;
void * limit;
PRINTD (DBG_FLOW, "create_queues %p", dev);
total += cmds * sizeof(command);
total += txs * (sizeof(tx_in) + sizeof(tx_out));
for (pool = 0; pool < NUM_RX_POOLS; ++pool)
total += rxs[pool] * (sizeof(rx_in) + sizeof(rx_out));
memory = kmalloc (total, GFP_KERNEL);
if (!memory) {
PRINTK (KERN_ERR, "could not allocate queues");
return -ENOMEM;
}
if (check_area (memory, total)) {
PRINTK (KERN_ERR, "queues allocated in nasty area");
kfree (memory);
return -ENOMEM;
}
limit = memory + total;
PRINTD (DBG_INIT, "queues from %p to %p", memory, limit);
PRINTD (DBG_CMD, "command queue at %p", memory);
{
command * cmd = memory;
amb_cq * cq = &dev->cq;
cq->pending = 0;
cq->high = 0;
cq->maximum = cmds - 1;
cq->ptrs.start = cmd;
cq->ptrs.in = cmd;
cq->ptrs.out = cmd;
cq->ptrs.limit = cmd + cmds;
memory = cq->ptrs.limit;
}
PRINTD (DBG_TX, "TX queue pair at %p", memory);
{
tx_in * in = memory;
tx_out * out;
amb_txq * txq = &dev->txq;
txq->pending = 0;
txq->high = 0;
txq->filled = 0;
txq->maximum = txs - 1;
txq->in.start = in;
txq->in.ptr = in;
txq->in.limit = in + txs;
memory = txq->in.limit;
out = memory;
txq->out.start = out;
txq->out.ptr = out;
txq->out.limit = out + txs;
memory = txq->out.limit;
}
PRINTD (DBG_RX, "RX queue pairs at %p", memory);
for (pool = 0; pool < NUM_RX_POOLS; ++pool) {
rx_in * in = memory;
rx_out * out;
amb_rxq * rxq = &dev->rxq[pool];
rxq->buffer_size = rx_buffer_sizes[pool];
rxq->buffers_wanted = 0;
rxq->pending = 0;
rxq->low = rxs[pool] - 1;
rxq->emptied = 0;
rxq->maximum = rxs[pool] - 1;
rxq->in.start = in;
rxq->in.ptr = in;
rxq->in.limit = in + rxs[pool];
memory = rxq->in.limit;
out = memory;
rxq->out.start = out;
rxq->out.ptr = out;
rxq->out.limit = out + rxs[pool];
memory = rxq->out.limit;
}
if (memory == limit) {
return 0;
} else {
PRINTK (KERN_ERR, "bad queue alloc %p != %p (tell maintainer)", memory, limit);
kfree (limit - total);
return -ENOMEM;
}
}
/********** destruction of communication queues **********/
static void destroy_queues (amb_dev * dev) {
// all queues assumed empty
void * memory = dev->cq.ptrs.start;
// includes txq.in, txq.out, rxq[].in and rxq[].out
PRINTD (DBG_FLOW, "destroy_queues %p", dev);
PRINTD (DBG_INIT, "freeing queues at %p", memory);
kfree (memory);
return;
}
/********** basic loader commands and error handling **********/
// centisecond timeouts - guessing away here
static unsigned int command_timeouts [] = {
[host_memory_test] = 15,
[read_adapter_memory] = 2,
[write_adapter_memory] = 2,
[adapter_start] = 50,
[get_version_number] = 10,
[interrupt_host] = 1,
[flash_erase_sector] = 1,
[adap_download_block] = 1,
[adap_erase_flash] = 1,
[adap_run_in_iram] = 1,
[adap_end_download] = 1
};
static unsigned int command_successes [] = {
[host_memory_test] = COMMAND_PASSED_TEST,
[read_adapter_memory] = COMMAND_READ_DATA_OK,
[write_adapter_memory] = COMMAND_WRITE_DATA_OK,
[adapter_start] = COMMAND_COMPLETE,
[get_version_number] = COMMAND_COMPLETE,
[interrupt_host] = COMMAND_COMPLETE,
[flash_erase_sector] = COMMAND_COMPLETE,
[adap_download_block] = COMMAND_COMPLETE,
[adap_erase_flash] = COMMAND_COMPLETE,
[adap_run_in_iram] = COMMAND_COMPLETE,
[adap_end_download] = COMMAND_COMPLETE
};
static int decode_loader_result (loader_command cmd, u32 result)
{
int res;
const char *msg;
if (result == command_successes[cmd])
return 0;
switch (result) {
case BAD_COMMAND:
res = -EINVAL;
msg = "bad command";
break;
case COMMAND_IN_PROGRESS:
res = -ETIMEDOUT;
msg = "command in progress";
break;
case COMMAND_PASSED_TEST:
res = 0;
msg = "command passed test";
break;
case COMMAND_FAILED_TEST:
res = -EIO;
msg = "command failed test";
break;
case COMMAND_READ_DATA_OK:
res = 0;
msg = "command read data ok";
break;
case COMMAND_READ_BAD_ADDRESS:
res = -EINVAL;
msg = "command read bad address";
break;
case COMMAND_WRITE_DATA_OK:
res = 0;
msg = "command write data ok";
break;
case COMMAND_WRITE_BAD_ADDRESS:
res = -EINVAL;
msg = "command write bad address";
break;
case COMMAND_WRITE_FLASH_FAILURE:
res = -EIO;
msg = "command write flash failure";
break;
case COMMAND_COMPLETE:
res = 0;
msg = "command complete";
break;
case COMMAND_FLASH_ERASE_FAILURE:
res = -EIO;
msg = "command flash erase failure";
break;
case COMMAND_WRITE_BAD_DATA:
res = -EINVAL;
msg = "command write bad data";
break;
default:
res = -EINVAL;
msg = "unknown error";
PRINTD (DBG_LOAD|DBG_ERR,
"decode_loader_result got %d=%x !",
result, result);
break;
}
PRINTK (KERN_ERR, "%s", msg);
return res;
}
static int __devinit do_loader_command (volatile loader_block * lb,
const amb_dev * dev, loader_command cmd) {
unsigned long timeout;
PRINTD (DBG_FLOW|DBG_LOAD, "do_loader_command");
/* do a command
Set the return value to zero, set the command type and set the
valid entry to the right magic value. The payload is already
correctly byte-ordered so we leave it alone. Hit the doorbell
with the bus address of this structure.
*/
lb->result = 0;
lb->command = cpu_to_be32 (cmd);
lb->valid = cpu_to_be32 (DMA_VALID);
// dump_registers (dev);
// dump_loader_block (lb);
wr_mem (dev, offsetof(amb_mem, doorbell), virt_to_bus (lb) & ~onegigmask);
timeout = command_timeouts[cmd] * 10;
while (!lb->result || lb->result == cpu_to_be32 (COMMAND_IN_PROGRESS))
if (timeout) {
timeout = msleep_interruptible(timeout);
} else {
PRINTD (DBG_LOAD|DBG_ERR, "command %d timed out", cmd);
dump_registers (dev);
dump_loader_block (lb);
return -ETIMEDOUT;
}
if (cmd == adapter_start) {
// wait for start command to acknowledge...
timeout = 100;
while (rd_plain (dev, offsetof(amb_mem, doorbell)))
if (timeout) {
timeout = msleep_interruptible(timeout);
} else {
PRINTD (DBG_LOAD|DBG_ERR, "start command did not clear doorbell, res=%08x",
be32_to_cpu (lb->result));
dump_registers (dev);
return -ETIMEDOUT;
}
return 0;
} else {
return decode_loader_result (cmd, be32_to_cpu (lb->result));
}
}
/* loader: determine loader version */
static int __devinit get_loader_version (loader_block * lb,
const amb_dev * dev, u32 * version) {
int res;
PRINTD (DBG_FLOW|DBG_LOAD, "get_loader_version");
res = do_loader_command (lb, dev, get_version_number);
if (res)
return res;
if (version)
*version = be32_to_cpu (lb->payload.version);
return 0;
}
/* loader: write memory data blocks */
static int __devinit loader_write (loader_block* lb,
const amb_dev *dev,
const struct ihex_binrec *rec) {
transfer_block * tb = &lb->payload.transfer;
PRINTD (DBG_FLOW|DBG_LOAD, "loader_write");
tb->address = rec->addr;
tb->count = cpu_to_be32(be16_to_cpu(rec->len) / 4);
memcpy(tb->data, rec->data, be16_to_cpu(rec->len));
return do_loader_command (lb, dev, write_adapter_memory);
}
/* loader: verify memory data blocks */
static int __devinit loader_verify (loader_block * lb,
const amb_dev *dev,
const struct ihex_binrec *rec) {
transfer_block * tb = &lb->payload.transfer;
int res;
PRINTD (DBG_FLOW|DBG_LOAD, "loader_verify");
tb->address = rec->addr;
tb->count = cpu_to_be32(be16_to_cpu(rec->len) / 4);
res = do_loader_command (lb, dev, read_adapter_memory);
if (!res && memcmp(tb->data, rec->data, be16_to_cpu(rec->len)))
res = -EINVAL;
return res;
}
/* loader: start microcode */
static int __devinit loader_start (loader_block * lb,
const amb_dev * dev, u32 address) {
PRINTD (DBG_FLOW|DBG_LOAD, "loader_start");
lb->payload.start = cpu_to_be32 (address);
return do_loader_command (lb, dev, adapter_start);
}
/********** reset card **********/
static inline void sf (const char * msg)
{
PRINTK (KERN_ERR, "self-test failed: %s", msg);
}
static int amb_reset (amb_dev * dev, int diags) {
u32 word;
PRINTD (DBG_FLOW|DBG_LOAD, "amb_reset");
word = rd_plain (dev, offsetof(amb_mem, reset_control));
// put card into reset state
wr_plain (dev, offsetof(amb_mem, reset_control), word | AMB_RESET_BITS);
// wait a short while
udelay (10);
#if 1
// put card into known good state
wr_plain (dev, offsetof(amb_mem, interrupt_control), AMB_DOORBELL_BITS);
// clear all interrupts just in case
wr_plain (dev, offsetof(amb_mem, interrupt), -1);
#endif
// clear self-test done flag
wr_plain (dev, offsetof(amb_mem, mb.loader.ready), 0);
// take card out of reset state
wr_plain (dev, offsetof(amb_mem, reset_control), word &~ AMB_RESET_BITS);
if (diags) {
unsigned long timeout;
// 4.2 second wait
msleep(4200);
// half second time-out
timeout = 500;
while (!rd_plain (dev, offsetof(amb_mem, mb.loader.ready)))
if (timeout) {
timeout = msleep_interruptible(timeout);
} else {
PRINTD (DBG_LOAD|DBG_ERR, "reset timed out");
return -ETIMEDOUT;
}
// get results of self-test
// XXX double check byte-order
word = rd_mem (dev, offsetof(amb_mem, mb.loader.result));
if (word & SELF_TEST_FAILURE) {
if (word & GPINT_TST_FAILURE)
sf ("interrupt");
if (word & SUNI_DATA_PATTERN_FAILURE)
sf ("SUNI data pattern");
if (word & SUNI_DATA_BITS_FAILURE)
sf ("SUNI data bits");
if (word & SUNI_UTOPIA_FAILURE)
sf ("SUNI UTOPIA interface");
if (word & SUNI_FIFO_FAILURE)
sf ("SUNI cell buffer FIFO");
if (word & SRAM_FAILURE)
sf ("bad SRAM");
// better return value?
return -EIO;
}
}
return 0;
}
/********** transfer and start the microcode **********/
static int __devinit ucode_init (loader_block * lb, amb_dev * dev) {
const struct firmware *fw;
unsigned long start_address;
const struct ihex_binrec *rec;
const char *errmsg = 0;
int res;
res = request_ihex_firmware(&fw, "atmsar11.fw", &dev->pci_dev->dev);
if (res) {
PRINTK (KERN_ERR, "Cannot load microcode data");
return res;
}
/* First record contains just the start address */
rec = (const struct ihex_binrec *)fw->data;
if (be16_to_cpu(rec->len) != sizeof(__be32) || be32_to_cpu(rec->addr)) {
errmsg = "no start record";
goto fail;
}
start_address = be32_to_cpup((__be32 *)rec->data);
rec = ihex_next_binrec(rec);
PRINTD (DBG_FLOW|DBG_LOAD, "ucode_init");
while (rec) {
PRINTD (DBG_LOAD, "starting region (%x, %u)", be32_to_cpu(rec->addr),
be16_to_cpu(rec->len));
if (be16_to_cpu(rec->len) > 4 * MAX_TRANSFER_DATA) {
errmsg = "record too long";
goto fail;
}
if (be16_to_cpu(rec->len) & 3) {
errmsg = "odd number of bytes";
goto fail;
}
res = loader_write(lb, dev, rec);
if (res)
break;
res = loader_verify(lb, dev, rec);
if (res)
break;
}
release_firmware(fw);
if (!res)
res = loader_start(lb, dev, start_address);
return res;
fail:
release_firmware(fw);
PRINTK(KERN_ERR, "Bad microcode data (%s)", errmsg);
return -EINVAL;
}
/********** give adapter parameters **********/
static inline __be32 bus_addr(void * addr) {
return cpu_to_be32 (virt_to_bus (addr));
}
static int __devinit amb_talk (amb_dev * dev) {
adap_talk_block a;
unsigned char pool;
unsigned long timeout;
PRINTD (DBG_FLOW, "amb_talk %p", dev);
a.command_start = bus_addr (dev->cq.ptrs.start);
a.command_end = bus_addr (dev->cq.ptrs.limit);
a.tx_start = bus_addr (dev->txq.in.start);
a.tx_end = bus_addr (dev->txq.in.limit);
a.txcom_start = bus_addr (dev->txq.out.start);
a.txcom_end = bus_addr (dev->txq.out.limit);
for (pool = 0; pool < NUM_RX_POOLS; ++pool) {
// the other "a" items are set up by the adapter
a.rec_struct[pool].buffer_start = bus_addr (dev->rxq[pool].in.start);
a.rec_struct[pool].buffer_end = bus_addr (dev->rxq[pool].in.limit);
a.rec_struct[pool].rx_start = bus_addr (dev->rxq[pool].out.start);
a.rec_struct[pool].rx_end = bus_addr (dev->rxq[pool].out.limit);
a.rec_struct[pool].buffer_size = cpu_to_be32 (dev->rxq[pool].buffer_size);
}
#ifdef AMB_NEW_MICROCODE
// disable fast PLX prefetching
a.init_flags = 0;
#endif
// pass the structure
wr_mem (dev, offsetof(amb_mem, doorbell), virt_to_bus (&a));
// 2.2 second wait (must not touch doorbell during 2 second DMA test)
msleep(2200);
// give the adapter another half second?
timeout = 500;
while (rd_plain (dev, offsetof(amb_mem, doorbell)))
if (timeout) {
timeout = msleep_interruptible(timeout);
} else {
PRINTD (DBG_INIT|DBG_ERR, "adapter init timed out");
return -ETIMEDOUT;
}
return 0;
}
// get microcode version
static void __devinit amb_ucode_version (amb_dev * dev) {
u32 major;
u32 minor;
command cmd;
cmd.request = cpu_to_be32 (SRB_GET_VERSION);
while (command_do (dev, &cmd)) {
set_current_state(TASK_UNINTERRUPTIBLE);
schedule();
}
major = be32_to_cpu (cmd.args.version.major);
minor = be32_to_cpu (cmd.args.version.minor);
PRINTK (KERN_INFO, "microcode version is %u.%u", major, minor);
}
// get end station address
static void __devinit amb_esi (amb_dev * dev, u8 * esi) {
u32 lower4;
u16 upper2;
command cmd;
cmd.request = cpu_to_be32 (SRB_GET_BIA);
while (command_do (dev, &cmd)) {
set_current_state(TASK_UNINTERRUPTIBLE);
schedule();
}
lower4 = be32_to_cpu (cmd.args.bia.lower4);
upper2 = be32_to_cpu (cmd.args.bia.upper2);
PRINTD (DBG_LOAD, "BIA: lower4: %08x, upper2 %04x", lower4, upper2);
if (esi) {
unsigned int i;
PRINTDB (DBG_INIT, "ESI:");
for (i = 0; i < ESI_LEN; ++i) {
if (i < 4)
esi[i] = bitrev8(lower4>>(8*i));
else
esi[i] = bitrev8(upper2>>(8*(i-4)));
PRINTDM (DBG_INIT, " %02x", esi[i]);
}
PRINTDE (DBG_INIT, "");
}
return;
}
static void fixup_plx_window (amb_dev *dev, loader_block *lb)
{
// fix up the PLX-mapped window base address to match the block
unsigned long blb;
u32 mapreg;
blb = virt_to_bus(lb);
// the kernel stack had better not ever cross a 1Gb boundary!
mapreg = rd_plain (dev, offsetof(amb_mem, stuff[10]));
mapreg &= ~onegigmask;
mapreg |= blb & onegigmask;
wr_plain (dev, offsetof(amb_mem, stuff[10]), mapreg);
return;
}
static int __devinit amb_init (amb_dev * dev)
{
loader_block lb;
u32 version;
if (amb_reset (dev, 1)) {
PRINTK (KERN_ERR, "card reset failed!");
} else {
fixup_plx_window (dev, &lb);
if (get_loader_version (&lb, dev, &version)) {
PRINTK (KERN_INFO, "failed to get loader version");
} else {
PRINTK (KERN_INFO, "loader version is %08x", version);
if (ucode_init (&lb, dev)) {
PRINTK (KERN_ERR, "microcode failure");
} else if (create_queues (dev, cmds, txs, rxs, rxs_bs)) {
PRINTK (KERN_ERR, "failed to get memory for queues");
} else {
if (amb_talk (dev)) {
PRINTK (KERN_ERR, "adapter did not accept queues");
} else {
amb_ucode_version (dev);
return 0;
} /* amb_talk */
destroy_queues (dev);
} /* create_queues, ucode_init */
amb_reset (dev, 0);
} /* get_loader_version */
} /* amb_reset */
return -EINVAL;
}
static void setup_dev(amb_dev *dev, struct pci_dev *pci_dev)
{
unsigned char pool;
// set up known dev items straight away
dev->pci_dev = pci_dev;
pci_set_drvdata(pci_dev, dev);
dev->iobase = pci_resource_start (pci_dev, 1);
dev->irq = pci_dev->irq;
dev->membase = bus_to_virt(pci_resource_start(pci_dev, 0));
// flags (currently only dead)
dev->flags = 0;
// Allocate cell rates (fibre)
// ATM_OC3_PCR = 1555200000/8/270*260/53 - 29/53
// to be really pedantic, this should be ATM_OC3c_PCR
dev->tx_avail = ATM_OC3_PCR;
dev->rx_avail = ATM_OC3_PCR;
// semaphore for txer/rxer modifications - we cannot use a
// spinlock as the critical region needs to switch processes
mutex_init(&dev->vcc_sf);
// queue manipulation spinlocks; we want atomic reads and
// writes to the queue descriptors (handles IRQ and SMP)
// consider replacing "int pending" -> "atomic_t available"
// => problem related to who gets to move queue pointers
spin_lock_init (&dev->cq.lock);
spin_lock_init (&dev->txq.lock);
for (pool = 0; pool < NUM_RX_POOLS; ++pool)
spin_lock_init (&dev->rxq[pool].lock);
}
static void setup_pci_dev(struct pci_dev *pci_dev)
{
unsigned char lat;
// enable bus master accesses
pci_set_master(pci_dev);
// frobnicate latency (upwards, usually)
pci_read_config_byte (pci_dev, PCI_LATENCY_TIMER, &lat);
if (!pci_lat)
pci_lat = (lat < MIN_PCI_LATENCY) ? MIN_PCI_LATENCY : lat;
if (lat != pci_lat) {
PRINTK (KERN_INFO, "Changing PCI latency timer from %hu to %hu",
lat, pci_lat);
pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, pci_lat);
}
}
static int __devinit amb_probe(struct pci_dev *pci_dev, const struct pci_device_id *pci_ent)
{
amb_dev * dev;
int err;
unsigned int irq;
err = pci_enable_device(pci_dev);
if (err < 0) {
PRINTK (KERN_ERR, "skipped broken (PLX rev 2) card");
goto out;
}
// read resources from PCI configuration space
irq = pci_dev->irq;
if (pci_dev->device == PCI_DEVICE_ID_MADGE_AMBASSADOR_BAD) {
PRINTK (KERN_ERR, "skipped broken (PLX rev 2) card");
err = -EINVAL;
goto out_disable;
}
PRINTD (DBG_INFO, "found Madge ATM adapter (amb) at"
" IO %llx, IRQ %u, MEM %p",
(unsigned long long)pci_resource_start(pci_dev, 1),
irq, bus_to_virt(pci_resource_start(pci_dev, 0)));
// check IO region
err = pci_request_region(pci_dev, 1, DEV_LABEL);
if (err < 0) {
PRINTK (KERN_ERR, "IO range already in use!");
goto out_disable;
}
dev = kzalloc(sizeof(amb_dev), GFP_KERNEL);
if (!dev) {
PRINTK (KERN_ERR, "out of memory!");
err = -ENOMEM;
goto out_release;
}
setup_dev(dev, pci_dev);
err = amb_init(dev);
if (err < 0) {
PRINTK (KERN_ERR, "adapter initialisation failure");
goto out_free;
}
setup_pci_dev(pci_dev);
// grab (but share) IRQ and install handler
err = request_irq(irq, interrupt_handler, IRQF_SHARED, DEV_LABEL, dev);
if (err < 0) {
PRINTK (KERN_ERR, "request IRQ failed!");
goto out_reset;
}
dev->atm_dev = atm_dev_register (DEV_LABEL, &pci_dev->dev, &amb_ops, -1,
NULL);
if (!dev->atm_dev) {
PRINTD (DBG_ERR, "failed to register Madge ATM adapter");
err = -EINVAL;
goto out_free_irq;
}
PRINTD (DBG_INFO, "registered Madge ATM adapter (no. %d) (%p) at %p",
dev->atm_dev->number, dev, dev->atm_dev);
dev->atm_dev->dev_data = (void *) dev;
// register our address
amb_esi (dev, dev->atm_dev->esi);
// 0 bits for vpi, 10 bits for vci
dev->atm_dev->ci_range.vpi_bits = NUM_VPI_BITS;
dev->atm_dev->ci_range.vci_bits = NUM_VCI_BITS;
init_timer(&dev->housekeeping);
dev->housekeeping.function = do_housekeeping;
dev->housekeeping.data = (unsigned long) dev;
mod_timer(&dev->housekeeping, jiffies);
// enable host interrupts
interrupts_on (dev);
out:
return err;
out_free_irq:
free_irq(irq, dev);
out_reset:
amb_reset(dev, 0);
out_free:
kfree(dev);
out_release:
pci_release_region(pci_dev, 1);
out_disable:
pci_disable_device(pci_dev);
goto out;
}
static void __devexit amb_remove_one(struct pci_dev *pci_dev)
{
struct amb_dev *dev;
dev = pci_get_drvdata(pci_dev);
PRINTD(DBG_INFO|DBG_INIT, "closing %p (atm_dev = %p)", dev, dev->atm_dev);
del_timer_sync(&dev->housekeeping);
// the drain should not be necessary
drain_rx_pools(dev);
interrupts_off(dev);
amb_reset(dev, 0);
free_irq(dev->irq, dev);
pci_disable_device(pci_dev);
destroy_queues(dev);
atm_dev_deregister(dev->atm_dev);
kfree(dev);
pci_release_region(pci_dev, 1);
}
static void __init amb_check_args (void) {
unsigned char pool;
unsigned int max_rx_size;
#ifdef DEBUG_AMBASSADOR
PRINTK (KERN_NOTICE, "debug bitmap is %hx", debug &= DBG_MASK);
#else
if (debug)
PRINTK (KERN_NOTICE, "no debugging support");
#endif
if (cmds < MIN_QUEUE_SIZE)
PRINTK (KERN_NOTICE, "cmds has been raised to %u",
cmds = MIN_QUEUE_SIZE);
if (txs < MIN_QUEUE_SIZE)
PRINTK (KERN_NOTICE, "txs has been raised to %u",
txs = MIN_QUEUE_SIZE);
for (pool = 0; pool < NUM_RX_POOLS; ++pool)
if (rxs[pool] < MIN_QUEUE_SIZE)
PRINTK (KERN_NOTICE, "rxs[%hu] has been raised to %u",
pool, rxs[pool] = MIN_QUEUE_SIZE);
// buffers sizes should be greater than zero and strictly increasing
max_rx_size = 0;
for (pool = 0; pool < NUM_RX_POOLS; ++pool)
if (rxs_bs[pool] <= max_rx_size)
PRINTK (KERN_NOTICE, "useless pool (rxs_bs[%hu] = %u)",
pool, rxs_bs[pool]);
else
max_rx_size = rxs_bs[pool];
if (rx_lats < MIN_RX_BUFFERS)
PRINTK (KERN_NOTICE, "rx_lats has been raised to %u",
rx_lats = MIN_RX_BUFFERS);
return;
}
/********** module stuff **********/
MODULE_AUTHOR(maintainer_string);
MODULE_DESCRIPTION(description_string);
MODULE_LICENSE("GPL");
MODULE_FIRMWARE("atmsar11.fw");
module_param(debug, ushort, 0644);
module_param(cmds, uint, 0);
module_param(txs, uint, 0);
module_param_array(rxs, uint, NULL, 0);
module_param_array(rxs_bs, uint, NULL, 0);
module_param(rx_lats, uint, 0);
module_param(pci_lat, byte, 0);
MODULE_PARM_DESC(debug, "debug bitmap, see .h file");
MODULE_PARM_DESC(cmds, "number of command queue entries");
MODULE_PARM_DESC(txs, "number of TX queue entries");
MODULE_PARM_DESC(rxs, "number of RX queue entries [" __MODULE_STRING(NUM_RX_POOLS) "]");
MODULE_PARM_DESC(rxs_bs, "size of RX buffers [" __MODULE_STRING(NUM_RX_POOLS) "]");
MODULE_PARM_DESC(rx_lats, "number of extra buffers to cope with RX latencies");
MODULE_PARM_DESC(pci_lat, "PCI latency in bus cycles");
/********** module entry **********/
static struct pci_device_id amb_pci_tbl[] = {
{ PCI_VDEVICE(MADGE, PCI_DEVICE_ID_MADGE_AMBASSADOR), 0 },
{ PCI_VDEVICE(MADGE, PCI_DEVICE_ID_MADGE_AMBASSADOR_BAD), 0 },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, amb_pci_tbl);
static struct pci_driver amb_driver = {
.name = "amb",
.probe = amb_probe,
.remove = __devexit_p(amb_remove_one),
.id_table = amb_pci_tbl,
};
static int __init amb_module_init (void)
{
PRINTD (DBG_FLOW|DBG_INIT, "init_module");
// sanity check - cast needed as printk does not support %Zu
if (sizeof(amb_mem) != 4*16 + 4*12) {
PRINTK (KERN_ERR, "Fix amb_mem (is %lu words).",
(unsigned long) sizeof(amb_mem));
return -ENOMEM;
}
show_version();
amb_check_args();
// get the juice
return pci_register_driver(&amb_driver);
}
/********** module exit **********/
static void __exit amb_module_exit (void)
{
PRINTD (DBG_FLOW|DBG_INIT, "cleanup_module");
pci_unregister_driver(&amb_driver);
}
module_init(amb_module_init);
module_exit(amb_module_exit);
| gpl-2.0 |
godmachine81/AsusTF300-Enhanced-Kernel | arch/mips/pmc-sierra/msp71xx/gpio_extended.c | 11306 | 4761 | /*
* Generic PMC MSP71xx EXTENDED (EXD) GPIO handling. The extended gpio is
* a set of hardware registers that have no need for explicit locking as
* it is handled by unique method of writing individual set/clr bits.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* @author Patrick Glass <patrickglass@gmail.com>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/gpio.h>
#include <linux/io.h>
#define MSP71XX_DATA_OFFSET(gpio) (2 * (gpio))
#define MSP71XX_READ_OFFSET(gpio) (MSP71XX_DATA_OFFSET(gpio) + 1)
#define MSP71XX_CFG_OUT_OFFSET(gpio) (MSP71XX_DATA_OFFSET(gpio) + 16)
#define MSP71XX_CFG_IN_OFFSET(gpio) (MSP71XX_CFG_OUT_OFFSET(gpio) + 1)
#define MSP71XX_EXD_GPIO_BASE 0x0BC000000L
#define to_msp71xx_exd_gpio_chip(c) \
container_of(c, struct msp71xx_exd_gpio_chip, chip)
/*
* struct msp71xx_exd_gpio_chip - container for gpio chip and registers
* @chip: chip structure for the specified gpio bank
* @reg: register for control and data of gpio pin
*/
struct msp71xx_exd_gpio_chip {
struct gpio_chip chip;
void __iomem *reg;
};
/*
* msp71xx_exd_gpio_get() - return the chip's gpio value
* @chip: chip structure which controls the specified gpio
* @offset: gpio whose value will be returned
*
* It will return 0 if gpio value is low and other if high.
*/
static int msp71xx_exd_gpio_get(struct gpio_chip *chip, unsigned offset)
{
struct msp71xx_exd_gpio_chip *msp71xx_chip =
to_msp71xx_exd_gpio_chip(chip);
const unsigned bit = MSP71XX_READ_OFFSET(offset);
return __raw_readl(msp71xx_chip->reg) & (1 << bit);
}
/*
* msp71xx_exd_gpio_set() - set the output value for the gpio
* @chip: chip structure who controls the specified gpio
* @offset: gpio whose value will be assigned
* @value: logic level to assign to the gpio initially
*
* This will set the gpio bit specified to the desired value. It will set the
* gpio pin low if value is 0 otherwise it will be high.
*/
static void msp71xx_exd_gpio_set(struct gpio_chip *chip,
unsigned offset, int value)
{
struct msp71xx_exd_gpio_chip *msp71xx_chip =
to_msp71xx_exd_gpio_chip(chip);
const unsigned bit = MSP71XX_DATA_OFFSET(offset);
__raw_writel(1 << (bit + (value ? 1 : 0)), msp71xx_chip->reg);
}
/*
* msp71xx_exd_direction_output() - declare the direction mode for a gpio
* @chip: chip structure which controls the specified gpio
* @offset: gpio whose value will be assigned
* @value: logic level to assign to the gpio initially
*
* This call will set the mode for the @gpio to output. It will set the
* gpio pin low if value is 0 otherwise it will be high.
*/
static int msp71xx_exd_direction_output(struct gpio_chip *chip,
unsigned offset, int value)
{
struct msp71xx_exd_gpio_chip *msp71xx_chip =
to_msp71xx_exd_gpio_chip(chip);
msp71xx_exd_gpio_set(chip, offset, value);
__raw_writel(1 << MSP71XX_CFG_OUT_OFFSET(offset), msp71xx_chip->reg);
return 0;
}
/*
* msp71xx_exd_direction_input() - declare the direction mode for a gpio
* @chip: chip structure which controls the specified gpio
* @offset: gpio whose to which the value will be assigned
*
* This call will set the mode for the @gpio to input.
*/
static int msp71xx_exd_direction_input(struct gpio_chip *chip, unsigned offset)
{
struct msp71xx_exd_gpio_chip *msp71xx_chip =
to_msp71xx_exd_gpio_chip(chip);
__raw_writel(1 << MSP71XX_CFG_IN_OFFSET(offset), msp71xx_chip->reg);
return 0;
}
#define MSP71XX_EXD_GPIO_BANK(name, exd_reg, base_gpio, num_gpio) \
{ \
.chip = { \
.label = name, \
.direction_input = msp71xx_exd_direction_input, \
.direction_output = msp71xx_exd_direction_output, \
.get = msp71xx_exd_gpio_get, \
.set = msp71xx_exd_gpio_set, \
.base = base_gpio, \
.ngpio = num_gpio, \
}, \
.reg = (void __iomem *)(MSP71XX_EXD_GPIO_BASE + exd_reg), \
}
/*
* struct msp71xx_exd_gpio_banks[] - container array of gpio banks
* @chip: chip structure for the specified gpio bank
* @reg: register for reading and writing the gpio pin value
*
* This array structure defines the extended gpio banks for the
* PMC MIPS Processor. We specify the bank name, the data/config
* register,the base starting gpio number, and the number of
* gpios exposed by the bank of gpios.
*/
static struct msp71xx_exd_gpio_chip msp71xx_exd_gpio_banks[] = {
MSP71XX_EXD_GPIO_BANK("GPIO_23_16", 0x188, 16, 8),
MSP71XX_EXD_GPIO_BANK("GPIO_27_24", 0x18C, 24, 4),
};
void __init msp71xx_init_gpio_extended(void)
{
int i;
for (i = 0; i < ARRAY_SIZE(msp71xx_exd_gpio_banks); i++)
gpiochip_add(&msp71xx_exd_gpio_banks[i].chip);
}
| gpl-2.0 |
DennisBold/CodeAurora-MSM-Kernel | arch/sh/boards/mach-landisk/gio.c | 12074 | 3422 | /*
* arch/sh/boards/landisk/gio.c - driver for landisk
*
* This driver will also support the I-O DATA Device, Inc. LANDISK Board.
* LANDISK and USL-5P Button, LED and GIO driver drive function.
*
* Copylight (C) 2006 kogiidena
* Copylight (C) 2002 Atom Create Engineering Co., Ltd. *
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kdev_t.h>
#include <linux/cdev.h>
#include <linux/fs.h>
#include <asm/io.h>
#include <asm/uaccess.h>
#include <mach-landisk/mach/gio.h>
#include <mach-landisk/mach/iodata_landisk.h>
#define DEVCOUNT 4
#define GIO_MINOR 2 /* GIO minor no. */
static dev_t dev;
static struct cdev *cdev_p;
static int openCnt;
static int gio_open(struct inode *inode, struct file *filp)
{
int minor;
int ret = -ENOENT;
preempt_disable();
minor = MINOR(inode->i_rdev);
if (minor < DEVCOUNT) {
if (openCnt > 0) {
ret = -EALREADY;
} else {
openCnt++;
ret = 0;
}
}
preempt_enable();
return ret;
}
static int gio_close(struct inode *inode, struct file *filp)
{
int minor;
minor = MINOR(inode->i_rdev);
if (minor < DEVCOUNT) {
openCnt--;
}
return 0;
}
static long gio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
unsigned int data;
static unsigned int addr = 0;
if (cmd & 0x01) { /* write */
if (copy_from_user(&data, (int *)arg, sizeof(int))) {
return -EFAULT;
}
}
switch (cmd) {
case GIODRV_IOCSGIOSETADDR: /* address set */
addr = data;
break;
case GIODRV_IOCSGIODATA1: /* write byte */
__raw_writeb((unsigned char)(0x0ff & data), addr);
break;
case GIODRV_IOCSGIODATA2: /* write word */
if (addr & 0x01) {
return -EFAULT;
}
__raw_writew((unsigned short int)(0x0ffff & data), addr);
break;
case GIODRV_IOCSGIODATA4: /* write long */
if (addr & 0x03) {
return -EFAULT;
}
__raw_writel(data, addr);
break;
case GIODRV_IOCGGIODATA1: /* read byte */
data = __raw_readb(addr);
break;
case GIODRV_IOCGGIODATA2: /* read word */
if (addr & 0x01) {
return -EFAULT;
}
data = __raw_readw(addr);
break;
case GIODRV_IOCGGIODATA4: /* read long */
if (addr & 0x03) {
return -EFAULT;
}
data = __raw_readl(addr);
break;
default:
return -EFAULT;
break;
}
if ((cmd & 0x01) == 0) { /* read */
if (copy_to_user((int *)arg, &data, sizeof(int))) {
return -EFAULT;
}
}
return 0;
}
static const struct file_operations gio_fops = {
.owner = THIS_MODULE,
.open = gio_open, /* open */
.release = gio_close, /* release */
.unlocked_ioctl = gio_ioctl,
.llseek = noop_llseek,
};
static int __init gio_init(void)
{
int error;
printk(KERN_INFO "gio: driver initialized\n");
openCnt = 0;
if ((error = alloc_chrdev_region(&dev, 0, DEVCOUNT, "gio")) < 0) {
printk(KERN_ERR
"gio: Couldn't alloc_chrdev_region, error=%d\n",
error);
return 1;
}
cdev_p = cdev_alloc();
cdev_p->ops = &gio_fops;
error = cdev_add(cdev_p, dev, DEVCOUNT);
if (error) {
printk(KERN_ERR
"gio: Couldn't cdev_add, error=%d\n", error);
return 1;
}
return 0;
}
static void __exit gio_exit(void)
{
cdev_del(cdev_p);
unregister_chrdev_region(dev, DEVCOUNT);
}
module_init(gio_init);
module_exit(gio_exit);
MODULE_LICENSE("GPL");
| gpl-2.0 |
a0u/binutils-gdb | gold/testsuite/start_lib_test_main.c | 43 | 1118 | /* start_lib_test_main.c -- test --start-lib/--end-lib.
Copyright (C) 2010-2015 Free Software Foundation, Inc.
Written by Cary Coutant <ccoutant@google.com>
This file is part of gold.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
MA 02110-1301, USA.
This is a test of the --start-lib and --end-lib options. */
extern void t1 (void);
int
main (int argc __attribute__ ((unused)),
char** argv __attribute__ ((unused)))
{
t1 ();
return 0;
}
| gpl-2.0 |
x86-8/linux-3.7 | sound/soc/fsl/eukrea-tlv320.c | 43 | 4641 | /*
* eukrea-tlv320.c -- SoC audio for eukrea_cpuimxXX in I2S mode
*
* Copyright 2010 Eric Bénard, Eukréa Electromatique <eric@eukrea.com>
*
* based on sound/soc/s3c24xx/s3c24xx_simtec_tlv320aic23.c
* which is Copyright 2009 Simtec Electronics
* and on sound/soc/imx/phycore-ac97.c which is
* Copyright 2009 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/device.h>
#include <linux/i2c.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/soc.h>
#include <asm/mach-types.h>
#include "../codecs/tlv320aic23.h"
#include "imx-ssi.h"
#include "imx-audmux.h"
#define CODEC_CLOCK 12000000
static int eukrea_tlv320_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_dai *codec_dai = rtd->codec_dai;
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
int ret;
ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_I2S |
SND_SOC_DAIFMT_NB_NF |
SND_SOC_DAIFMT_CBM_CFM);
if (ret) {
pr_err("%s: failed set cpu dai format\n", __func__);
return ret;
}
ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_I2S |
SND_SOC_DAIFMT_NB_NF |
SND_SOC_DAIFMT_CBM_CFM);
if (ret) {
pr_err("%s: failed set codec dai format\n", __func__);
return ret;
}
ret = snd_soc_dai_set_sysclk(codec_dai, 0,
CODEC_CLOCK, SND_SOC_CLOCK_OUT);
if (ret) {
pr_err("%s: failed setting codec sysclk\n", __func__);
return ret;
}
snd_soc_dai_set_tdm_slot(cpu_dai, 0xffffffc, 0xffffffc, 2, 0);
ret = snd_soc_dai_set_sysclk(cpu_dai, IMX_SSP_SYS_CLK, 0,
SND_SOC_CLOCK_IN);
if (ret) {
pr_err("can't set CPU system clock IMX_SSP_SYS_CLK\n");
return ret;
}
return 0;
}
static struct snd_soc_ops eukrea_tlv320_snd_ops = {
.hw_params = eukrea_tlv320_hw_params,
};
static struct snd_soc_dai_link eukrea_tlv320_dai = {
.name = "tlv320aic23",
.stream_name = "TLV320AIC23",
.codec_dai_name = "tlv320aic23-hifi",
.platform_name = "imx-fiq-pcm-audio.0",
.codec_name = "tlv320aic23-codec.0-001a",
.cpu_dai_name = "imx-ssi.0",
.ops = &eukrea_tlv320_snd_ops,
};
static struct snd_soc_card eukrea_tlv320 = {
.name = "cpuimx-audio",
.owner = THIS_MODULE,
.dai_link = &eukrea_tlv320_dai,
.num_links = 1,
};
static int __devinit eukrea_tlv320_probe(struct platform_device *pdev)
{
int ret;
int int_port = 0, ext_port;
if (machine_is_eukrea_cpuimx27()) {
imx_audmux_v1_configure_port(MX27_AUDMUX_HPCR1_SSI0,
IMX_AUDMUX_V1_PCR_SYN |
IMX_AUDMUX_V1_PCR_TFSDIR |
IMX_AUDMUX_V1_PCR_TCLKDIR |
IMX_AUDMUX_V1_PCR_RFSDIR |
IMX_AUDMUX_V1_PCR_RCLKDIR |
IMX_AUDMUX_V1_PCR_TFCSEL(MX27_AUDMUX_HPCR3_SSI_PINS_4) |
IMX_AUDMUX_V1_PCR_RFCSEL(MX27_AUDMUX_HPCR3_SSI_PINS_4) |
IMX_AUDMUX_V1_PCR_RXDSEL(MX27_AUDMUX_HPCR3_SSI_PINS_4)
);
imx_audmux_v1_configure_port(MX27_AUDMUX_HPCR3_SSI_PINS_4,
IMX_AUDMUX_V1_PCR_SYN |
IMX_AUDMUX_V1_PCR_RXDSEL(MX27_AUDMUX_HPCR1_SSI0)
);
} else if (machine_is_eukrea_cpuimx25sd() ||
machine_is_eukrea_cpuimx35sd() ||
machine_is_eukrea_cpuimx51sd()) {
ext_port = machine_is_eukrea_cpuimx25sd() ? 4 : 3;
imx_audmux_v2_configure_port(int_port,
IMX_AUDMUX_V2_PTCR_SYN |
IMX_AUDMUX_V2_PTCR_TFSDIR |
IMX_AUDMUX_V2_PTCR_TFSEL(ext_port) |
IMX_AUDMUX_V2_PTCR_TCLKDIR |
IMX_AUDMUX_V2_PTCR_TCSEL(ext_port),
IMX_AUDMUX_V2_PDCR_RXDSEL(ext_port)
);
imx_audmux_v2_configure_port(ext_port,
IMX_AUDMUX_V2_PTCR_SYN,
IMX_AUDMUX_V2_PDCR_RXDSEL(int_port)
);
} else {
/* return happy. We might run on a totally different machine */
return 0;
}
eukrea_tlv320.dev = &pdev->dev;
ret = snd_soc_register_card(&eukrea_tlv320);
if (ret)
dev_err(&pdev->dev, "snd_soc_register_card failed (%d)\n", ret);
return ret;
}
static int __devexit eukrea_tlv320_remove(struct platform_device *pdev)
{
snd_soc_unregister_card(&eukrea_tlv320);
return 0;
}
static struct platform_driver eukrea_tlv320_driver = {
.driver = {
.name = "eukrea_tlv320",
.owner = THIS_MODULE,
},
.probe = eukrea_tlv320_probe,
.remove = __devexit_p(eukrea_tlv320_remove),};
module_platform_driver(eukrea_tlv320_driver);
MODULE_AUTHOR("Eric Bénard <eric@eukrea.com>");
MODULE_DESCRIPTION("CPUIMX ALSA SoC driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:eukrea_tlv320");
| gpl-2.0 |
shelan/jdk9-mirror | jdk/src/java.desktop/unix/native/libjsound/PLATFORM_API_LinuxOS_ALSA_MidiUtils.c | 43 | 17607 | /*
* Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#define USE_ERROR
#define USE_TRACE
#include "PLATFORM_API_LinuxOS_ALSA_MidiUtils.h"
#include "PLATFORM_API_LinuxOS_ALSA_CommonUtils.h"
#include <string.h>
#include <sys/time.h>
static INT64 getTimeInMicroseconds() {
struct timeval tv;
gettimeofday(&tv, NULL);
return (tv.tv_sec * 1000000UL) + tv.tv_usec;
}
const char* getErrorStr(INT32 err) {
return snd_strerror((int) err);
}
// callback for iteration through devices
// returns TRUE if iteration should continue
typedef int (*DeviceIteratorPtr)(UINT32 deviceID,
snd_rawmidi_info_t* rawmidi_info,
snd_ctl_card_info_t* cardinfo,
void *userData);
// for each ALSA device, call iterator. userData is passed to the iterator
// returns total number of iterations
static int iterateRawmidiDevices(snd_rawmidi_stream_t direction,
DeviceIteratorPtr iterator,
void* userData) {
int count = 0;
int subdeviceCount;
int card, dev, subDev;
char devname[16];
int err;
snd_ctl_t *handle;
snd_rawmidi_t *rawmidi;
snd_rawmidi_info_t *rawmidi_info;
snd_ctl_card_info_t *card_info, *defcardinfo = NULL;
UINT32 deviceID;
int doContinue = TRUE;
snd_rawmidi_info_malloc(&rawmidi_info);
snd_ctl_card_info_malloc(&card_info);
// 1st try "default" device
if (direction == SND_RAWMIDI_STREAM_INPUT) {
err = snd_rawmidi_open(&rawmidi, NULL, ALSA_DEFAULT_DEVICE_NAME,
SND_RAWMIDI_NONBLOCK);
} else if (direction == SND_RAWMIDI_STREAM_OUTPUT) {
err = snd_rawmidi_open(NULL, &rawmidi, ALSA_DEFAULT_DEVICE_NAME,
SND_RAWMIDI_NONBLOCK);
} else {
ERROR0("ERROR: iterateRawmidiDevices(): direction is neither"
" SND_RAWMIDI_STREAM_INPUT nor SND_RAWMIDI_STREAM_OUTPUT\n");
err = MIDI_INVALID_ARGUMENT;
}
if (err < 0) {
ERROR1("ERROR: snd_rawmidi_open (\"default\"): %s\n",
snd_strerror(err));
} else {
err = snd_rawmidi_info(rawmidi, rawmidi_info);
snd_rawmidi_close(rawmidi);
if (err < 0) {
ERROR1("ERROR: snd_rawmidi_info (\"default\"): %s\n",
snd_strerror(err));
} else {
// try to get card info
card = snd_rawmidi_info_get_card(rawmidi_info);
if (card >= 0) {
sprintf(devname, ALSA_HARDWARE_CARD, card);
if (snd_ctl_open(&handle, devname, SND_CTL_NONBLOCK) >= 0) {
if (snd_ctl_card_info(handle, card_info) >= 0) {
defcardinfo = card_info;
}
snd_ctl_close(handle);
}
}
// call calback function for the device
if (iterator != NULL) {
doContinue = (*iterator)(ALSA_DEFAULT_DEVICE_ID, rawmidi_info,
defcardinfo, userData);
}
count++;
}
}
// iterate cards
card = -1;
TRACE0("testing for cards...\n");
if (snd_card_next(&card) >= 0) {
TRACE1("Found card %d\n", card);
while (doContinue && (card >= 0)) {
sprintf(devname, ALSA_HARDWARE_CARD, card);
TRACE1("Opening control for alsa rawmidi device \"%s\"...\n", devname);
err = snd_ctl_open(&handle, devname, SND_CTL_NONBLOCK);
if (err < 0) {
ERROR2("ERROR: snd_ctl_open, card=%d: %s\n", card, snd_strerror(err));
} else {
TRACE0("snd_ctl_open() SUCCESS\n");
err = snd_ctl_card_info(handle, card_info);
if (err < 0) {
ERROR2("ERROR: snd_ctl_card_info, card=%d: %s\n", card, snd_strerror(err));
} else {
TRACE0("snd_ctl_card_info() SUCCESS\n");
dev = -1;
while (doContinue) {
if (snd_ctl_rawmidi_next_device(handle, &dev) < 0) {
ERROR0("snd_ctl_rawmidi_next_device\n");
}
TRACE0("snd_ctl_rawmidi_next_device() SUCCESS\n");
if (dev < 0) {
break;
}
snd_rawmidi_info_set_device(rawmidi_info, dev);
snd_rawmidi_info_set_subdevice(rawmidi_info, 0);
snd_rawmidi_info_set_stream(rawmidi_info, direction);
err = snd_ctl_rawmidi_info(handle, rawmidi_info);
TRACE0("after snd_ctl_rawmidi_info()\n");
if (err < 0) {
if (err != -ENOENT) {
ERROR2("ERROR: snd_ctl_rawmidi_info, card=%d: %s", card, snd_strerror(err));
}
} else {
TRACE0("snd_ctl_rawmidi_info() SUCCESS\n");
subdeviceCount = needEnumerateSubdevices(ALSA_RAWMIDI)
? snd_rawmidi_info_get_subdevices_count(rawmidi_info)
: 1;
if (iterator!=NULL) {
for (subDev = 0; subDev < subdeviceCount; subDev++) {
TRACE3(" Iterating %d,%d,%d\n", card, dev, subDev);
deviceID = encodeDeviceID(card, dev, subDev);
doContinue = (*iterator)(deviceID, rawmidi_info,
card_info, userData);
count++;
TRACE0("returned from iterator\n");
if (!doContinue) {
break;
}
}
} else {
count += subdeviceCount;
}
}
} // of while(doContinue)
}
snd_ctl_close(handle);
}
if (snd_card_next(&card) < 0) {
break;
}
}
} else {
ERROR0("No cards found!\n");
}
snd_ctl_card_info_free(card_info);
snd_rawmidi_info_free(rawmidi_info);
return count;
}
int getMidiDeviceCount(snd_rawmidi_stream_t direction) {
int deviceCount;
TRACE0("> getMidiDeviceCount()\n");
initAlsaSupport();
deviceCount = iterateRawmidiDevices(direction, NULL, NULL);
TRACE0("< getMidiDeviceCount()\n");
return deviceCount;
}
/*
userData is assumed to be a pointer to ALSA_MIDIDeviceDescription.
ALSA_MIDIDeviceDescription->index has to be set to the index of the device
we want to get information of before this method is called the first time via
iterateRawmidiDevices(). On each call of this method,
ALSA_MIDIDeviceDescription->index is decremented. If it is equal to zero,
we have reached the desired device, so action is taken.
So after successful completion of iterateRawmidiDevices(),
ALSA_MIDIDeviceDescription->index is zero. If it isn't, this is an
indication of an error.
*/
static int deviceInfoIterator(UINT32 deviceID, snd_rawmidi_info_t *rawmidi_info,
snd_ctl_card_info_t *cardinfo, void *userData) {
char buffer[300];
ALSA_MIDIDeviceDescription* desc = (ALSA_MIDIDeviceDescription*)userData;
#ifdef ALSA_MIDI_USE_PLUGHW
int usePlugHw = 1;
#else
int usePlugHw = 0;
#endif
TRACE0("deviceInfoIterator\n");
initAlsaSupport();
if (desc->index == 0) {
// we found the device with correct index
desc->deviceID = deviceID;
buffer[0]=' '; buffer[1]='[';
// buffer[300] is enough to store the actual device string w/o overrun
getDeviceStringFromDeviceID(&buffer[2], deviceID, usePlugHw, ALSA_RAWMIDI);
strncat(buffer, "]", sizeof(buffer) - strlen(buffer) - 1);
strncpy(desc->name,
(cardinfo != NULL)
? snd_ctl_card_info_get_id(cardinfo)
: snd_rawmidi_info_get_id(rawmidi_info),
desc->strLen - strlen(buffer));
strncat(desc->name, buffer, desc->strLen - strlen(desc->name));
desc->description[0] = 0;
if (cardinfo != NULL) {
strncpy(desc->description, snd_ctl_card_info_get_name(cardinfo),
desc->strLen);
strncat(desc->description, ", ",
desc->strLen - strlen(desc->description));
}
strncat(desc->description, snd_rawmidi_info_get_id(rawmidi_info),
desc->strLen - strlen(desc->description));
strncat(desc->description, ", ", desc->strLen - strlen(desc->description));
strncat(desc->description, snd_rawmidi_info_get_name(rawmidi_info),
desc->strLen - strlen(desc->description));
TRACE2("Returning %s, %s\n", desc->name, desc->description);
return FALSE; // do not continue iteration
}
desc->index--;
return TRUE;
}
static int getMIDIDeviceDescriptionByIndex(snd_rawmidi_stream_t direction,
ALSA_MIDIDeviceDescription* desc) {
initAlsaSupport();
TRACE1(" getMIDIDeviceDescriptionByIndex (index = %d)\n", desc->index);
iterateRawmidiDevices(direction, &deviceInfoIterator, desc);
return (desc->index == 0) ? MIDI_SUCCESS : MIDI_INVALID_DEVICEID;
}
int initMIDIDeviceDescription(ALSA_MIDIDeviceDescription* desc, int index) {
int ret = MIDI_SUCCESS;
desc->index = index;
desc->strLen = 200;
desc->name = (char*) calloc(desc->strLen + 1, 1);
desc->description = (char*) calloc(desc->strLen + 1, 1);
if (! desc->name ||
! desc->description) {
ret = MIDI_OUT_OF_MEMORY;
}
return ret;
}
void freeMIDIDeviceDescription(ALSA_MIDIDeviceDescription* desc) {
if (desc->name) {
free(desc->name);
}
if (desc->description) {
free(desc->description);
}
}
int getMidiDeviceName(snd_rawmidi_stream_t direction, int index, char *name,
UINT32 nameLength) {
ALSA_MIDIDeviceDescription desc;
int ret;
TRACE1("getMidiDeviceName: nameLength: %d\n", (int) nameLength);
ret = initMIDIDeviceDescription(&desc, index);
if (ret == MIDI_SUCCESS) {
TRACE0("getMidiDeviceName: initMIDIDeviceDescription() SUCCESS\n");
ret = getMIDIDeviceDescriptionByIndex(direction, &desc);
if (ret == MIDI_SUCCESS) {
TRACE1("getMidiDeviceName: desc.name: %s\n", desc.name);
strncpy(name, desc.name, nameLength - 1);
name[nameLength - 1] = 0;
}
}
freeMIDIDeviceDescription(&desc);
return ret;
}
int getMidiDeviceVendor(int index, char *name, UINT32 nameLength) {
strncpy(name, ALSA_VENDOR, nameLength - 1);
name[nameLength - 1] = 0;
return MIDI_SUCCESS;
}
int getMidiDeviceDescription(snd_rawmidi_stream_t direction,
int index, char *name, UINT32 nameLength) {
ALSA_MIDIDeviceDescription desc;
int ret;
ret = initMIDIDeviceDescription(&desc, index);
if (ret == MIDI_SUCCESS) {
ret = getMIDIDeviceDescriptionByIndex(direction, &desc);
if (ret == MIDI_SUCCESS) {
strncpy(name, desc.description, nameLength - 1);
name[nameLength - 1] = 0;
}
}
freeMIDIDeviceDescription(&desc);
return ret;
}
int getMidiDeviceVersion(int index, char *name, UINT32 nameLength) {
getALSAVersion(name, nameLength);
return MIDI_SUCCESS;
}
static int getMidiDeviceID(snd_rawmidi_stream_t direction, int index,
UINT32* deviceID) {
ALSA_MIDIDeviceDescription desc;
int ret;
ret = initMIDIDeviceDescription(&desc, index);
if (ret == MIDI_SUCCESS) {
ret = getMIDIDeviceDescriptionByIndex(direction, &desc);
if (ret == MIDI_SUCCESS) {
// TRACE1("getMidiDeviceName: desc.name: %s\n", desc.name);
*deviceID = desc.deviceID;
}
}
freeMIDIDeviceDescription(&desc);
return ret;
}
/*
direction has to be either SND_RAWMIDI_STREAM_INPUT or
SND_RAWMIDI_STREAM_OUTPUT.
Returns 0 on success. Otherwise, MIDI_OUT_OF_MEMORY, MIDI_INVALID_ARGUMENT
or a negative ALSA error code is returned.
*/
INT32 openMidiDevice(snd_rawmidi_stream_t direction, INT32 deviceIndex,
MidiDeviceHandle** handle) {
snd_rawmidi_t* native_handle;
snd_midi_event_t* event_parser = NULL;
int err;
UINT32 deviceID = 0;
char devicename[100];
#ifdef ALSA_MIDI_USE_PLUGHW
int usePlugHw = 1;
#else
int usePlugHw = 0;
#endif
TRACE0("> openMidiDevice()\n");
(*handle) = (MidiDeviceHandle*) calloc(sizeof(MidiDeviceHandle), 1);
if (!(*handle)) {
ERROR0("ERROR: openDevice: out of memory\n");
return MIDI_OUT_OF_MEMORY;
}
// TODO: iterate to get dev ID from index
err = getMidiDeviceID(direction, deviceIndex, &deviceID);
TRACE1(" openMidiDevice(): deviceID: %d\n", (int) deviceID);
getDeviceStringFromDeviceID(devicename, deviceID,
usePlugHw, ALSA_RAWMIDI);
TRACE1(" openMidiDevice(): deviceString: %s\n", devicename);
// finally open the device
if (direction == SND_RAWMIDI_STREAM_INPUT) {
err = snd_rawmidi_open(&native_handle, NULL, devicename,
SND_RAWMIDI_NONBLOCK);
} else if (direction == SND_RAWMIDI_STREAM_OUTPUT) {
err = snd_rawmidi_open(NULL, &native_handle, devicename,
SND_RAWMIDI_NONBLOCK);
} else {
ERROR0(" ERROR: openMidiDevice(): direction is neither SND_RAWMIDI_STREAM_INPUT nor SND_RAWMIDI_STREAM_OUTPUT\n");
err = MIDI_INVALID_ARGUMENT;
}
if (err < 0) {
ERROR1("< ERROR: openMidiDevice(): snd_rawmidi_open() returned %d\n", err);
free(*handle);
(*handle) = NULL;
return err;
}
/* We opened with non-blocking behaviour to not get hung if the device
is used by a different process. Writing, however, should
be blocking. So we change it here. */
if (direction == SND_RAWMIDI_STREAM_OUTPUT) {
err = snd_rawmidi_nonblock(native_handle, 0);
if (err < 0) {
ERROR1(" ERROR: openMidiDevice(): snd_rawmidi_nonblock() returned %d\n", err);
snd_rawmidi_close(native_handle);
free(*handle);
(*handle) = NULL;
return err;
}
}
if (direction == SND_RAWMIDI_STREAM_INPUT) {
err = snd_midi_event_new(EVENT_PARSER_BUFSIZE, &event_parser);
if (err < 0) {
ERROR1(" ERROR: openMidiDevice(): snd_midi_event_new() returned %d\n", err);
snd_rawmidi_close(native_handle);
free(*handle);
(*handle) = NULL;
return err;
}
}
(*handle)->deviceHandle = (void*) native_handle;
(*handle)->startTime = getTimeInMicroseconds();
(*handle)->platformData = event_parser;
TRACE0("< openMidiDevice(): succeeded\n");
return err;
}
INT32 closeMidiDevice(MidiDeviceHandle* handle) {
int err;
TRACE0("> closeMidiDevice()\n");
if (!handle) {
ERROR0("< ERROR: closeMidiDevice(): handle is NULL\n");
return MIDI_INVALID_HANDLE;
}
if (!handle->deviceHandle) {
ERROR0("< ERROR: closeMidiDevice(): native handle is NULL\n");
return MIDI_INVALID_HANDLE;
}
err = snd_rawmidi_close((snd_rawmidi_t*) handle->deviceHandle);
TRACE1(" snd_rawmidi_close() returns %d\n", err);
if (handle->platformData) {
snd_midi_event_free((snd_midi_event_t*) handle->platformData);
}
free(handle);
TRACE0("< closeMidiDevice: succeeded\n");
return err;
}
INT64 getMidiTimestamp(MidiDeviceHandle* handle) {
if (!handle) {
ERROR0("< ERROR: closeMidiDevice(): handle is NULL\n");
return MIDI_INVALID_HANDLE;
}
return getTimeInMicroseconds() - handle->startTime;
}
/* end */
| gpl-2.0 |
weolar/src | WebCore/editing/MarkupAccumulator.cpp | 43 | 16818 | /*
* Copyright (C) 2004, 2005, 2006, 2007, 2008, 2009 Apple Inc. All rights reserved.
* Copyright (C) 2009, 2010 Google Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "config.h"
#include "MarkupAccumulator.h"
#include "CDATASection.h"
#include "Comment.h"
#include "DocumentFragment.h"
#include "DocumentType.h"
#include "Editor.h"
#include "HTMLElement.h"
#include "HTMLNames.h"
#include "KURL.h"
#include "ProcessingInstruction.h"
#include "XMLNSNames.h"
#include <wtf/unicode/CharacterNames.h>
namespace WebCore {
using namespace HTMLNames;
void appendCharactersReplacingEntities(Vector<UChar>& out, const UChar* content, size_t length, EntityMask entityMask)
{
DEFINE_STATIC_LOCAL(const String, ampReference, ("&"));
DEFINE_STATIC_LOCAL(const String, ltReference, ("<"));
DEFINE_STATIC_LOCAL(const String, gtReference, (">"));
DEFINE_STATIC_LOCAL(const String, quotReference, ("""));
DEFINE_STATIC_LOCAL(const String, nbspReference, (" "));
static const EntityDescription entityMaps[] = {
{ '&', ampReference, EntityAmp },
{ '<', ltReference, EntityLt },
{ '>', gtReference, EntityGt },
{ '"', quotReference, EntityQuot },
{ noBreakSpace, nbspReference, EntityNbsp },
};
size_t positionAfterLastEntity = 0;
for (size_t i = 0; i < length; ++i) {
for (size_t m = 0; m < WTF_ARRAY_LENGTH(entityMaps); ++m) {
if (content[i] == entityMaps[m].entity && entityMaps[m].mask & entityMask) {
out.append(content + positionAfterLastEntity, i - positionAfterLastEntity);
append(out, entityMaps[m].reference);
positionAfterLastEntity = i + 1;
break;
}
}
}
out.append(content + positionAfterLastEntity, length - positionAfterLastEntity);
}
MarkupAccumulator::MarkupAccumulator(Vector<Node*>* nodes, EAbsoluteURLs shouldResolveURLs, const Range* range)
: m_nodes(nodes)
, m_range(range)
, m_shouldResolveURLs(shouldResolveURLs)
{
}
MarkupAccumulator::~MarkupAccumulator()
{
}
String MarkupAccumulator::serializeNodes(Node* node, Node* nodeToSkip, EChildrenOnly childrenOnly)
{
Vector<UChar> out;
serializeNodesWithNamespaces(node, nodeToSkip, childrenOnly, 0);
out.reserveInitialCapacity(length());
concatenateMarkup(out);
return String::adopt(out);
}
void MarkupAccumulator::serializeNodesWithNamespaces(Node* node, Node* nodeToSkip, EChildrenOnly childrenOnly, const Namespaces* namespaces)
{
if (node == nodeToSkip)
return;
Namespaces namespaceHash;
if (namespaces)
namespaceHash = *namespaces;
if (!childrenOnly)
appendStartTag(node, &namespaceHash);
if (!(node->document()->isHTMLDocument() && elementCannotHaveEndTag(node))) {
for (Node* current = node->firstChild(); current; current = current->nextSibling())
serializeNodesWithNamespaces(current, nodeToSkip, IncludeNode, &namespaceHash);
}
if (!childrenOnly)
appendEndTag(node);
}
void MarkupAccumulator::appendString(const String& string)
{
m_succeedingMarkup.append(string);
}
void MarkupAccumulator::appendStartTag(Node* node, Namespaces* namespaces)
{
Vector<UChar> markup;
appendStartMarkup(markup, node, namespaces);
appendString(String::adopt(markup));
if (m_nodes)
m_nodes->append(node);
}
void MarkupAccumulator::appendEndTag(Node* node)
{
Vector<UChar> markup;
appendEndMarkup(markup, node);
appendString(String::adopt(markup));
}
size_t MarkupAccumulator::totalLength(const Vector<String>& strings)
{
size_t length = 0;
for (size_t i = 0; i < strings.size(); ++i)
length += strings[i].length();
return length;
}
// FIXME: This is a very inefficient way of accumulating the markup.
// We're converting results of appendStartMarkup and appendEndMarkup from Vector<UChar> to String
// and then back to Vector<UChar> and again to String here.
void MarkupAccumulator::concatenateMarkup(Vector<UChar>& out)
{
for (size_t i = 0; i < m_succeedingMarkup.size(); ++i)
append(out, m_succeedingMarkup[i]);
}
void MarkupAccumulator::appendAttributeValue(Vector<UChar>& result, const String& attribute, bool documentIsHTML)
{
appendCharactersReplacingEntities(result, attribute.characters(), attribute.length(),
documentIsHTML ? EntityMaskInHTMLAttributeValue : EntityMaskInAttributeValue);
}
void MarkupAccumulator::appendCustomAttributes(Vector<UChar>&, Element*, Namespaces*)
{
}
void MarkupAccumulator::appendQuotedURLAttributeValue(Vector<UChar>& result, const String& urlString)
{
UChar quoteChar = '\"';
String strippedURLString = urlString.stripWhiteSpace();
if (protocolIsJavaScript(strippedURLString)) {
// minimal escaping for javascript urls
if (strippedURLString.contains('"')) {
if (strippedURLString.contains('\''))
strippedURLString.replace('\"', """);
else
quoteChar = '\'';
}
result.append(quoteChar);
append(result, strippedURLString);
result.append(quoteChar);
return;
}
// FIXME: This does not fully match other browsers. Firefox percent-escapes non-ASCII characters for innerHTML.
result.append(quoteChar);
appendAttributeValue(result, urlString, false);
result.append(quoteChar);
}
void MarkupAccumulator::appendNodeValue(Vector<UChar>& out, const Node* node, const Range* range, EntityMask entityMask)
{
String str = node->nodeValue();
const UChar* characters = str.characters();
size_t length = str.length();
if (range) {
ExceptionCode ec;
if (node == range->endContainer(ec))
length = range->endOffset(ec);
if (node == range->startContainer(ec)) {
size_t start = range->startOffset(ec);
characters += start;
length -= start;
}
}
appendCharactersReplacingEntities(out, characters, length, entityMask);
}
bool MarkupAccumulator::shouldAddNamespaceElement(const Element* element)
{
// Don't add namespace attribute if it is already defined for this elem.
const AtomicString& prefix = element->prefix();
AtomicString attr = !prefix.isEmpty() ? "xmlns:" + prefix : "xmlns";
return !element->hasAttribute(attr);
}
bool MarkupAccumulator::shouldAddNamespaceAttribute(const Attribute& attribute, Namespaces& namespaces)
{
namespaces.checkConsistency();
// Don't add namespace attributes twice
if (attribute.name() == XMLNSNames::xmlnsAttr) {
namespaces.set(emptyAtom.impl(), attribute.value().impl());
return false;
}
QualifiedName xmlnsPrefixAttr(xmlnsAtom, attribute.localName(), XMLNSNames::xmlnsNamespaceURI);
if (attribute.name() == xmlnsPrefixAttr) {
namespaces.set(attribute.localName().impl(), attribute.value().impl());
return false;
}
return true;
}
void MarkupAccumulator::appendNamespace(Vector<UChar>& result, const AtomicString& prefix, const AtomicString& namespaceURI, Namespaces& namespaces)
{
namespaces.checkConsistency();
if (namespaceURI.isEmpty())
return;
// Use emptyAtoms's impl() for both null and empty strings since the HashMap can't handle 0 as a key
AtomicStringImpl* pre = prefix.isEmpty() ? emptyAtom.impl() : prefix.impl();
AtomicStringImpl* foundNS = namespaces.get(pre);
if (foundNS != namespaceURI.impl()) {
namespaces.set(pre, namespaceURI.impl());
result.append(' ');
append(result, xmlnsAtom.string());
if (!prefix.isEmpty()) {
result.append(':');
append(result, prefix);
}
result.append('=');
result.append('"');
appendAttributeValue(result, namespaceURI, false);
result.append('"');
}
}
EntityMask MarkupAccumulator::entityMaskForText(Text* text) const
{
const QualifiedName* parentName = 0;
if (text->parentElement())
parentName = &static_cast<Element*>(text->parentElement())->tagQName();
if (parentName && (*parentName == scriptTag || *parentName == styleTag || *parentName == xmpTag))
return EntityMaskInCDATA;
return text->document()->isHTMLDocument() ? EntityMaskInHTMLPCDATA : EntityMaskInPCDATA;
}
void MarkupAccumulator::appendText(Vector<UChar>& out, Text* text)
{
appendNodeValue(out, text, m_range, entityMaskForText(text));
}
void MarkupAccumulator::appendComment(Vector<UChar>& out, const String& comment)
{
// FIXME: Comment content is not escaped, but XMLSerializer (and possibly other callers) should raise an exception if it includes "-->".
append(out, "<!--");
append(out, comment);
append(out, "-->");
}
void MarkupAccumulator::appendDocumentType(Vector<UChar>& result, const DocumentType* n)
{
if (n->name().isEmpty())
return;
append(result, "<!DOCTYPE ");
append(result, n->name());
if (!n->publicId().isEmpty()) {
append(result, " PUBLIC \"");
append(result, n->publicId());
append(result, "\"");
if (!n->systemId().isEmpty()) {
append(result, " \"");
append(result, n->systemId());
append(result, "\"");
}
} else if (!n->systemId().isEmpty()) {
append(result, " SYSTEM \"");
append(result, n->systemId());
append(result, "\"");
}
if (!n->internalSubset().isEmpty()) {
append(result, " [");
append(result, n->internalSubset());
append(result, "]");
}
append(result, ">");
}
void MarkupAccumulator::appendProcessingInstruction(Vector<UChar>& out, const String& target, const String& data)
{
// FIXME: PI data is not escaped, but XMLSerializer (and possibly other callers) this should raise an exception if it includes "?>".
append(out, "<?");
append(out, target);
append(out, " ");
append(out, data);
append(out, "?>");
}
void MarkupAccumulator::appendElement(Vector<UChar>& out, Element* element, Namespaces* namespaces)
{
appendOpenTag(out, element, namespaces);
NamedNodeMap* attributes = element->attributes();
unsigned length = attributes->length();
for (unsigned int i = 0; i < length; i++)
appendAttribute(out, element, *attributes->attributeItem(i), namespaces);
// Give an opportunity to subclasses to add their own attributes.
appendCustomAttributes(out, element, namespaces);
appendCloseTag(out, element);
}
void MarkupAccumulator::appendOpenTag(Vector<UChar>& out, Element* element, Namespaces* namespaces)
{
out.append('<');
append(out, element->nodeNamePreservingCase());
if (!element->document()->isHTMLDocument() && namespaces && shouldAddNamespaceElement(element))
appendNamespace(out, element->prefix(), element->namespaceURI(), *namespaces);
}
void MarkupAccumulator::appendCloseTag(Vector<UChar>& out, Element* element)
{
if (shouldSelfClose(element)) {
if (element->isHTMLElement())
out.append(' '); // XHTML 1.0 <-> HTML compatibility.
out.append('/');
}
out.append('>');
}
void MarkupAccumulator::appendAttribute(Vector<UChar>& out, Element* element, const Attribute& attribute, Namespaces* namespaces)
{
bool documentIsHTML = element->document()->isHTMLDocument();
out.append(' ');
if (documentIsHTML)
append(out, attribute.name().localName());
else
append(out, attribute.name().toString());
out.append('=');
if (element->isURLAttribute(const_cast<Attribute*>(&attribute))) {
// We don't want to complete file:/// URLs because it may contain sensitive information
// about the user's system.
if (shouldResolveURLs() && !element->document()->url().isLocalFile())
appendQuotedURLAttributeValue(out, element->document()->completeURL(attribute.value()).string());
else
appendQuotedURLAttributeValue(out, attribute.value());
} else {
out.append('\"');
appendAttributeValue(out, attribute.value(), documentIsHTML);
out.append('\"');
}
if (!documentIsHTML && namespaces && shouldAddNamespaceAttribute(attribute, *namespaces))
appendNamespace(out, attribute.prefix(), attribute.namespaceURI(), *namespaces);
}
void MarkupAccumulator::appendCDATASection(Vector<UChar>& out, const String& section)
{
// FIXME: CDATA content is not escaped, but XMLSerializer (and possibly other callers) should raise an exception if it includes "]]>".
append(out, "<![CDATA[");
append(out, section);
append(out, "]]>");
}
void MarkupAccumulator::appendStartMarkup(Vector<UChar>& result, const Node* node, Namespaces* namespaces)
{
if (namespaces)
namespaces->checkConsistency();
switch (node->nodeType()) {
case Node::TEXT_NODE:
appendText(result, static_cast<Text*>(const_cast<Node*>(node)));
break;
case Node::COMMENT_NODE:
appendComment(result, static_cast<const Comment*>(node)->data());
break;
case Node::DOCUMENT_NODE:
case Node::DOCUMENT_FRAGMENT_NODE:
break;
case Node::DOCUMENT_TYPE_NODE:
appendDocumentType(result, static_cast<const DocumentType*>(node));
break;
case Node::PROCESSING_INSTRUCTION_NODE:
appendProcessingInstruction(result, static_cast<const ProcessingInstruction*>(node)->target(), static_cast<const ProcessingInstruction*>(node)->data());
break;
case Node::ELEMENT_NODE:
appendElement(result, static_cast<Element*>(const_cast<Node*>(node)), namespaces);
break;
case Node::CDATA_SECTION_NODE:
appendCDATASection(result, static_cast<const CDATASection*>(node)->data());
break;
case Node::ATTRIBUTE_NODE:
case Node::ENTITY_NODE:
case Node::ENTITY_REFERENCE_NODE:
case Node::NOTATION_NODE:
case Node::XPATH_NAMESPACE_NODE:
case Node::SHADOW_ROOT_NODE:
ASSERT_NOT_REACHED();
break;
}
}
// Rules of self-closure
// 1. No elements in HTML documents use the self-closing syntax.
// 2. Elements w/ children never self-close because they use a separate end tag.
// 3. HTML elements which do not have a "forbidden" end tag will close with a separate end tag.
// 4. Other elements self-close.
bool MarkupAccumulator::shouldSelfClose(const Node* node)
{
if (node->document()->isHTMLDocument())
return false;
if (node->hasChildNodes())
return false;
if (node->isHTMLElement() && !elementCannotHaveEndTag(node))
return false;
return true;
}
bool MarkupAccumulator::elementCannotHaveEndTag(const Node* node)
{
if (!node->isHTMLElement())
return false;
// FIXME: ieForbidsInsertHTML may not be the right function to call here
// ieForbidsInsertHTML is used to disallow setting innerHTML/outerHTML
// or createContextualFragment. It does not necessarily align with
// which elements should be serialized w/o end tags.
return static_cast<const HTMLElement*>(node)->ieForbidsInsertHTML();
}
void MarkupAccumulator::appendEndMarkup(Vector<UChar>& result, const Node* node)
{
if (!node->isElementNode() || shouldSelfClose(node) || (!node->hasChildNodes() && elementCannotHaveEndTag(node)))
return;
result.append('<');
result.append('/');
append(result, static_cast<const Element*>(node)->nodeNamePreservingCase());
result.append('>');
}
}
| gpl-2.0 |
nitdroid/kernel-ng | drivers/input/mousedev.c | 555 | 26451 | /*
* Input driver to ExplorerPS/2 device driver module.
*
* Copyright (c) 1999-2002 Vojtech Pavlik
* Copyright (c) 2004 Dmitry Torokhov
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*/
#define MOUSEDEV_MINOR_BASE 32
#define MOUSEDEV_MINORS 32
#define MOUSEDEV_MIX 31
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/smp_lock.h>
#include <linux/poll.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/input.h>
#include <linux/random.h>
#include <linux/major.h>
#include <linux/device.h>
#ifdef CONFIG_INPUT_MOUSEDEV_PSAUX
#include <linux/miscdevice.h>
#endif
MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>");
MODULE_DESCRIPTION("Mouse (ExplorerPS/2) device interfaces");
MODULE_LICENSE("GPL");
#ifndef CONFIG_INPUT_MOUSEDEV_SCREEN_X
#define CONFIG_INPUT_MOUSEDEV_SCREEN_X 1024
#endif
#ifndef CONFIG_INPUT_MOUSEDEV_SCREEN_Y
#define CONFIG_INPUT_MOUSEDEV_SCREEN_Y 768
#endif
static int xres = CONFIG_INPUT_MOUSEDEV_SCREEN_X;
module_param(xres, uint, 0644);
MODULE_PARM_DESC(xres, "Horizontal screen resolution");
static int yres = CONFIG_INPUT_MOUSEDEV_SCREEN_Y;
module_param(yres, uint, 0644);
MODULE_PARM_DESC(yres, "Vertical screen resolution");
static unsigned tap_time = 200;
module_param(tap_time, uint, 0644);
MODULE_PARM_DESC(tap_time, "Tap time for touchpads in absolute mode (msecs)");
struct mousedev_hw_data {
int dx, dy, dz;
int x, y;
int abs_event;
unsigned long buttons;
};
struct mousedev {
int exist;
int open;
int minor;
struct input_handle handle;
wait_queue_head_t wait;
struct list_head client_list;
spinlock_t client_lock; /* protects client_list */
struct mutex mutex;
struct device dev;
struct list_head mixdev_node;
int mixdev_open;
struct mousedev_hw_data packet;
unsigned int pkt_count;
int old_x[4], old_y[4];
int frac_dx, frac_dy;
unsigned long touch;
};
enum mousedev_emul {
MOUSEDEV_EMUL_PS2,
MOUSEDEV_EMUL_IMPS,
MOUSEDEV_EMUL_EXPS
};
struct mousedev_motion {
int dx, dy, dz;
unsigned long buttons;
};
#define PACKET_QUEUE_LEN 16
struct mousedev_client {
struct fasync_struct *fasync;
struct mousedev *mousedev;
struct list_head node;
struct mousedev_motion packets[PACKET_QUEUE_LEN];
unsigned int head, tail;
spinlock_t packet_lock;
int pos_x, pos_y;
signed char ps2[6];
unsigned char ready, buffer, bufsiz;
unsigned char imexseq, impsseq;
enum mousedev_emul mode;
unsigned long last_buttons;
};
#define MOUSEDEV_SEQ_LEN 6
static unsigned char mousedev_imps_seq[] = { 0xf3, 200, 0xf3, 100, 0xf3, 80 };
static unsigned char mousedev_imex_seq[] = { 0xf3, 200, 0xf3, 200, 0xf3, 80 };
static struct input_handler mousedev_handler;
static struct mousedev *mousedev_table[MOUSEDEV_MINORS];
static DEFINE_MUTEX(mousedev_table_mutex);
static struct mousedev *mousedev_mix;
static LIST_HEAD(mousedev_mix_list);
static void mixdev_open_devices(void);
static void mixdev_close_devices(void);
#define fx(i) (mousedev->old_x[(mousedev->pkt_count - (i)) & 03])
#define fy(i) (mousedev->old_y[(mousedev->pkt_count - (i)) & 03])
static void mousedev_touchpad_event(struct input_dev *dev,
struct mousedev *mousedev,
unsigned int code, int value)
{
int size, tmp;
enum { FRACTION_DENOM = 128 };
switch (code) {
case ABS_X:
fx(0) = value;
if (mousedev->touch && mousedev->pkt_count >= 2) {
size = dev->absmax[ABS_X] - dev->absmin[ABS_X];
if (size == 0)
size = 256 * 2;
tmp = ((value - fx(2)) * 256 * FRACTION_DENOM) / size;
tmp += mousedev->frac_dx;
mousedev->packet.dx = tmp / FRACTION_DENOM;
mousedev->frac_dx =
tmp - mousedev->packet.dx * FRACTION_DENOM;
}
break;
case ABS_Y:
fy(0) = value;
if (mousedev->touch && mousedev->pkt_count >= 2) {
/* use X size to keep the same scale */
size = dev->absmax[ABS_X] - dev->absmin[ABS_X];
if (size == 0)
size = 256 * 2;
tmp = -((value - fy(2)) * 256 * FRACTION_DENOM) / size;
tmp += mousedev->frac_dy;
mousedev->packet.dy = tmp / FRACTION_DENOM;
mousedev->frac_dy = tmp -
mousedev->packet.dy * FRACTION_DENOM;
}
break;
}
}
static void mousedev_abs_event(struct input_dev *dev, struct mousedev *mousedev,
unsigned int code, int value)
{
int size;
switch (code) {
case ABS_X:
size = dev->absmax[ABS_X] - dev->absmin[ABS_X];
if (size == 0)
size = xres ? : 1;
if (value > dev->absmax[ABS_X])
value = dev->absmax[ABS_X];
if (value < dev->absmin[ABS_X])
value = dev->absmin[ABS_X];
mousedev->packet.x =
((value - dev->absmin[ABS_X]) * xres) / size;
mousedev->packet.abs_event = 1;
break;
case ABS_Y:
size = dev->absmax[ABS_Y] - dev->absmin[ABS_Y];
if (size == 0)
size = yres ? : 1;
if (value > dev->absmax[ABS_Y])
value = dev->absmax[ABS_Y];
if (value < dev->absmin[ABS_Y])
value = dev->absmin[ABS_Y];
mousedev->packet.y = yres -
((value - dev->absmin[ABS_Y]) * yres) / size;
mousedev->packet.abs_event = 1;
break;
}
}
static void mousedev_rel_event(struct mousedev *mousedev,
unsigned int code, int value)
{
switch (code) {
case REL_X:
mousedev->packet.dx += value;
break;
case REL_Y:
mousedev->packet.dy -= value;
break;
case REL_WHEEL:
mousedev->packet.dz -= value;
break;
}
}
static void mousedev_key_event(struct mousedev *mousedev,
unsigned int code, int value)
{
int index;
switch (code) {
case BTN_TOUCH:
case BTN_0:
case BTN_LEFT: index = 0; break;
case BTN_STYLUS:
case BTN_1:
case BTN_RIGHT: index = 1; break;
case BTN_2:
case BTN_FORWARD:
case BTN_STYLUS2:
case BTN_MIDDLE: index = 2; break;
case BTN_3:
case BTN_BACK:
case BTN_SIDE: index = 3; break;
case BTN_4:
case BTN_EXTRA: index = 4; break;
default: return;
}
if (value) {
set_bit(index, &mousedev->packet.buttons);
set_bit(index, &mousedev_mix->packet.buttons);
} else {
clear_bit(index, &mousedev->packet.buttons);
clear_bit(index, &mousedev_mix->packet.buttons);
}
}
static void mousedev_notify_readers(struct mousedev *mousedev,
struct mousedev_hw_data *packet)
{
struct mousedev_client *client;
struct mousedev_motion *p;
unsigned int new_head;
int wake_readers = 0;
rcu_read_lock();
list_for_each_entry_rcu(client, &mousedev->client_list, node) {
/* Just acquire the lock, interrupts already disabled */
spin_lock(&client->packet_lock);
p = &client->packets[client->head];
if (client->ready && p->buttons != mousedev->packet.buttons) {
new_head = (client->head + 1) % PACKET_QUEUE_LEN;
if (new_head != client->tail) {
p = &client->packets[client->head = new_head];
memset(p, 0, sizeof(struct mousedev_motion));
}
}
if (packet->abs_event) {
p->dx += packet->x - client->pos_x;
p->dy += packet->y - client->pos_y;
client->pos_x = packet->x;
client->pos_y = packet->y;
}
client->pos_x += packet->dx;
client->pos_x = client->pos_x < 0 ?
0 : (client->pos_x >= xres ? xres : client->pos_x);
client->pos_y += packet->dy;
client->pos_y = client->pos_y < 0 ?
0 : (client->pos_y >= yres ? yres : client->pos_y);
p->dx += packet->dx;
p->dy += packet->dy;
p->dz += packet->dz;
p->buttons = mousedev->packet.buttons;
if (p->dx || p->dy || p->dz ||
p->buttons != client->last_buttons)
client->ready = 1;
spin_unlock(&client->packet_lock);
if (client->ready) {
kill_fasync(&client->fasync, SIGIO, POLL_IN);
wake_readers = 1;
}
}
rcu_read_unlock();
if (wake_readers)
wake_up_interruptible(&mousedev->wait);
}
static void mousedev_touchpad_touch(struct mousedev *mousedev, int value)
{
if (!value) {
if (mousedev->touch &&
time_before(jiffies,
mousedev->touch + msecs_to_jiffies(tap_time))) {
/*
* Toggle left button to emulate tap.
* We rely on the fact that mousedev_mix always has 0
* motion packet so we won't mess current position.
*/
set_bit(0, &mousedev->packet.buttons);
set_bit(0, &mousedev_mix->packet.buttons);
mousedev_notify_readers(mousedev, &mousedev_mix->packet);
mousedev_notify_readers(mousedev_mix,
&mousedev_mix->packet);
clear_bit(0, &mousedev->packet.buttons);
clear_bit(0, &mousedev_mix->packet.buttons);
}
mousedev->touch = mousedev->pkt_count = 0;
mousedev->frac_dx = 0;
mousedev->frac_dy = 0;
} else if (!mousedev->touch)
mousedev->touch = jiffies;
}
static void mousedev_event(struct input_handle *handle,
unsigned int type, unsigned int code, int value)
{
struct mousedev *mousedev = handle->private;
switch (type) {
case EV_ABS:
/* Ignore joysticks */
if (test_bit(BTN_TRIGGER, handle->dev->keybit))
return;
if (test_bit(BTN_TOOL_FINGER, handle->dev->keybit))
mousedev_touchpad_event(handle->dev,
mousedev, code, value);
else
mousedev_abs_event(handle->dev, mousedev, code, value);
break;
case EV_REL:
mousedev_rel_event(mousedev, code, value);
break;
case EV_KEY:
if (value != 2) {
if (code == BTN_TOUCH &&
test_bit(BTN_TOOL_FINGER, handle->dev->keybit))
mousedev_touchpad_touch(mousedev, value);
else
mousedev_key_event(mousedev, code, value);
}
break;
case EV_SYN:
if (code == SYN_REPORT) {
if (mousedev->touch) {
mousedev->pkt_count++;
/*
* Input system eats duplicate events,
* but we need all of them to do correct
* averaging so apply present one forward
*/
fx(0) = fx(1);
fy(0) = fy(1);
}
mousedev_notify_readers(mousedev, &mousedev->packet);
mousedev_notify_readers(mousedev_mix, &mousedev->packet);
mousedev->packet.dx = mousedev->packet.dy =
mousedev->packet.dz = 0;
mousedev->packet.abs_event = 0;
}
break;
}
}
static int mousedev_fasync(int fd, struct file *file, int on)
{
struct mousedev_client *client = file->private_data;
return fasync_helper(fd, file, on, &client->fasync);
}
static void mousedev_free(struct device *dev)
{
struct mousedev *mousedev = container_of(dev, struct mousedev, dev);
input_put_device(mousedev->handle.dev);
kfree(mousedev);
}
static int mousedev_open_device(struct mousedev *mousedev)
{
int retval;
retval = mutex_lock_interruptible(&mousedev->mutex);
if (retval)
return retval;
if (mousedev->minor == MOUSEDEV_MIX)
mixdev_open_devices();
else if (!mousedev->exist)
retval = -ENODEV;
else if (!mousedev->open++) {
retval = input_open_device(&mousedev->handle);
if (retval)
mousedev->open--;
}
mutex_unlock(&mousedev->mutex);
return retval;
}
static void mousedev_close_device(struct mousedev *mousedev)
{
mutex_lock(&mousedev->mutex);
if (mousedev->minor == MOUSEDEV_MIX)
mixdev_close_devices();
else if (mousedev->exist && !--mousedev->open)
input_close_device(&mousedev->handle);
mutex_unlock(&mousedev->mutex);
}
/*
* Open all available devices so they can all be multiplexed in one.
* stream. Note that this function is called with mousedev_mix->mutex
* held.
*/
static void mixdev_open_devices(void)
{
struct mousedev *mousedev;
if (mousedev_mix->open++)
return;
list_for_each_entry(mousedev, &mousedev_mix_list, mixdev_node) {
if (!mousedev->mixdev_open) {
if (mousedev_open_device(mousedev))
continue;
mousedev->mixdev_open = 1;
}
}
}
/*
* Close all devices that were opened as part of multiplexed
* device. Note that this function is called with mousedev_mix->mutex
* held.
*/
static void mixdev_close_devices(void)
{
struct mousedev *mousedev;
if (--mousedev_mix->open)
return;
list_for_each_entry(mousedev, &mousedev_mix_list, mixdev_node) {
if (mousedev->mixdev_open) {
mousedev->mixdev_open = 0;
mousedev_close_device(mousedev);
}
}
}
static void mousedev_attach_client(struct mousedev *mousedev,
struct mousedev_client *client)
{
spin_lock(&mousedev->client_lock);
list_add_tail_rcu(&client->node, &mousedev->client_list);
spin_unlock(&mousedev->client_lock);
synchronize_rcu();
}
static void mousedev_detach_client(struct mousedev *mousedev,
struct mousedev_client *client)
{
spin_lock(&mousedev->client_lock);
list_del_rcu(&client->node);
spin_unlock(&mousedev->client_lock);
synchronize_rcu();
}
static int mousedev_release(struct inode *inode, struct file *file)
{
struct mousedev_client *client = file->private_data;
struct mousedev *mousedev = client->mousedev;
mousedev_detach_client(mousedev, client);
kfree(client);
mousedev_close_device(mousedev);
put_device(&mousedev->dev);
return 0;
}
static int mousedev_open(struct inode *inode, struct file *file)
{
struct mousedev_client *client;
struct mousedev *mousedev;
int error;
int i;
#ifdef CONFIG_INPUT_MOUSEDEV_PSAUX
if (imajor(inode) == MISC_MAJOR)
i = MOUSEDEV_MIX;
else
#endif
i = iminor(inode) - MOUSEDEV_MINOR_BASE;
if (i >= MOUSEDEV_MINORS)
return -ENODEV;
lock_kernel();
error = mutex_lock_interruptible(&mousedev_table_mutex);
if (error) {
unlock_kernel();
return error;
}
mousedev = mousedev_table[i];
if (mousedev)
get_device(&mousedev->dev);
mutex_unlock(&mousedev_table_mutex);
if (!mousedev) {
unlock_kernel();
return -ENODEV;
}
client = kzalloc(sizeof(struct mousedev_client), GFP_KERNEL);
if (!client) {
error = -ENOMEM;
goto err_put_mousedev;
}
spin_lock_init(&client->packet_lock);
client->pos_x = xres / 2;
client->pos_y = yres / 2;
client->mousedev = mousedev;
mousedev_attach_client(mousedev, client);
error = mousedev_open_device(mousedev);
if (error)
goto err_free_client;
file->private_data = client;
unlock_kernel();
return 0;
err_free_client:
mousedev_detach_client(mousedev, client);
kfree(client);
err_put_mousedev:
put_device(&mousedev->dev);
unlock_kernel();
return error;
}
static inline int mousedev_limit_delta(int delta, int limit)
{
return delta > limit ? limit : (delta < -limit ? -limit : delta);
}
static void mousedev_packet(struct mousedev_client *client,
signed char *ps2_data)
{
struct mousedev_motion *p = &client->packets[client->tail];
ps2_data[0] = 0x08 |
((p->dx < 0) << 4) | ((p->dy < 0) << 5) | (p->buttons & 0x07);
ps2_data[1] = mousedev_limit_delta(p->dx, 127);
ps2_data[2] = mousedev_limit_delta(p->dy, 127);
p->dx -= ps2_data[1];
p->dy -= ps2_data[2];
switch (client->mode) {
case MOUSEDEV_EMUL_EXPS:
ps2_data[3] = mousedev_limit_delta(p->dz, 7);
p->dz -= ps2_data[3];
ps2_data[3] = (ps2_data[3] & 0x0f) | ((p->buttons & 0x18) << 1);
client->bufsiz = 4;
break;
case MOUSEDEV_EMUL_IMPS:
ps2_data[0] |=
((p->buttons & 0x10) >> 3) | ((p->buttons & 0x08) >> 1);
ps2_data[3] = mousedev_limit_delta(p->dz, 127);
p->dz -= ps2_data[3];
client->bufsiz = 4;
break;
case MOUSEDEV_EMUL_PS2:
default:
ps2_data[0] |=
((p->buttons & 0x10) >> 3) | ((p->buttons & 0x08) >> 1);
p->dz = 0;
client->bufsiz = 3;
break;
}
if (!p->dx && !p->dy && !p->dz) {
if (client->tail == client->head) {
client->ready = 0;
client->last_buttons = p->buttons;
} else
client->tail = (client->tail + 1) % PACKET_QUEUE_LEN;
}
}
static void mousedev_generate_response(struct mousedev_client *client,
int command)
{
client->ps2[0] = 0xfa; /* ACK */
switch (command) {
case 0xeb: /* Poll */
mousedev_packet(client, &client->ps2[1]);
client->bufsiz++; /* account for leading ACK */
break;
case 0xf2: /* Get ID */
switch (client->mode) {
case MOUSEDEV_EMUL_PS2:
client->ps2[1] = 0;
break;
case MOUSEDEV_EMUL_IMPS:
client->ps2[1] = 3;
break;
case MOUSEDEV_EMUL_EXPS:
client->ps2[1] = 4;
break;
}
client->bufsiz = 2;
break;
case 0xe9: /* Get info */
client->ps2[1] = 0x60; client->ps2[2] = 3; client->ps2[3] = 200;
client->bufsiz = 4;
break;
case 0xff: /* Reset */
client->impsseq = client->imexseq = 0;
client->mode = MOUSEDEV_EMUL_PS2;
client->ps2[1] = 0xaa; client->ps2[2] = 0x00;
client->bufsiz = 3;
break;
default:
client->bufsiz = 1;
break;
}
client->buffer = client->bufsiz;
}
static ssize_t mousedev_write(struct file *file, const char __user *buffer,
size_t count, loff_t *ppos)
{
struct mousedev_client *client = file->private_data;
unsigned char c;
unsigned int i;
for (i = 0; i < count; i++) {
if (get_user(c, buffer + i))
return -EFAULT;
spin_lock_irq(&client->packet_lock);
if (c == mousedev_imex_seq[client->imexseq]) {
if (++client->imexseq == MOUSEDEV_SEQ_LEN) {
client->imexseq = 0;
client->mode = MOUSEDEV_EMUL_EXPS;
}
} else
client->imexseq = 0;
if (c == mousedev_imps_seq[client->impsseq]) {
if (++client->impsseq == MOUSEDEV_SEQ_LEN) {
client->impsseq = 0;
client->mode = MOUSEDEV_EMUL_IMPS;
}
} else
client->impsseq = 0;
mousedev_generate_response(client, c);
spin_unlock_irq(&client->packet_lock);
}
kill_fasync(&client->fasync, SIGIO, POLL_IN);
wake_up_interruptible(&client->mousedev->wait);
return count;
}
static ssize_t mousedev_read(struct file *file, char __user *buffer,
size_t count, loff_t *ppos)
{
struct mousedev_client *client = file->private_data;
struct mousedev *mousedev = client->mousedev;
signed char data[sizeof(client->ps2)];
int retval = 0;
if (!client->ready && !client->buffer && mousedev->exist &&
(file->f_flags & O_NONBLOCK))
return -EAGAIN;
retval = wait_event_interruptible(mousedev->wait,
!mousedev->exist || client->ready || client->buffer);
if (retval)
return retval;
if (!mousedev->exist)
return -ENODEV;
spin_lock_irq(&client->packet_lock);
if (!client->buffer && client->ready) {
mousedev_packet(client, client->ps2);
client->buffer = client->bufsiz;
}
if (count > client->buffer)
count = client->buffer;
memcpy(data, client->ps2 + client->bufsiz - client->buffer, count);
client->buffer -= count;
spin_unlock_irq(&client->packet_lock);
if (copy_to_user(buffer, data, count))
return -EFAULT;
return count;
}
/* No kernel lock - fine */
static unsigned int mousedev_poll(struct file *file, poll_table *wait)
{
struct mousedev_client *client = file->private_data;
struct mousedev *mousedev = client->mousedev;
poll_wait(file, &mousedev->wait, wait);
return ((client->ready || client->buffer) ? (POLLIN | POLLRDNORM) : 0) |
(mousedev->exist ? 0 : (POLLHUP | POLLERR));
}
static const struct file_operations mousedev_fops = {
.owner = THIS_MODULE,
.read = mousedev_read,
.write = mousedev_write,
.poll = mousedev_poll,
.open = mousedev_open,
.release = mousedev_release,
.fasync = mousedev_fasync,
};
static int mousedev_install_chrdev(struct mousedev *mousedev)
{
mousedev_table[mousedev->minor] = mousedev;
return 0;
}
static void mousedev_remove_chrdev(struct mousedev *mousedev)
{
mutex_lock(&mousedev_table_mutex);
mousedev_table[mousedev->minor] = NULL;
mutex_unlock(&mousedev_table_mutex);
}
/*
* Mark device non-existent. This disables writes, ioctls and
* prevents new users from opening the device. Already posted
* blocking reads will stay, however new ones will fail.
*/
static void mousedev_mark_dead(struct mousedev *mousedev)
{
mutex_lock(&mousedev->mutex);
mousedev->exist = 0;
mutex_unlock(&mousedev->mutex);
}
/*
* Wake up users waiting for IO so they can disconnect from
* dead device.
*/
static void mousedev_hangup(struct mousedev *mousedev)
{
struct mousedev_client *client;
spin_lock(&mousedev->client_lock);
list_for_each_entry(client, &mousedev->client_list, node)
kill_fasync(&client->fasync, SIGIO, POLL_HUP);
spin_unlock(&mousedev->client_lock);
wake_up_interruptible(&mousedev->wait);
}
static void mousedev_cleanup(struct mousedev *mousedev)
{
struct input_handle *handle = &mousedev->handle;
mousedev_mark_dead(mousedev);
mousedev_hangup(mousedev);
mousedev_remove_chrdev(mousedev);
/* mousedev is marked dead so no one else accesses mousedev->open */
if (mousedev->open)
input_close_device(handle);
}
static struct mousedev *mousedev_create(struct input_dev *dev,
struct input_handler *handler,
int minor)
{
struct mousedev *mousedev;
int error;
mousedev = kzalloc(sizeof(struct mousedev), GFP_KERNEL);
if (!mousedev) {
error = -ENOMEM;
goto err_out;
}
INIT_LIST_HEAD(&mousedev->client_list);
INIT_LIST_HEAD(&mousedev->mixdev_node);
spin_lock_init(&mousedev->client_lock);
mutex_init(&mousedev->mutex);
lockdep_set_subclass(&mousedev->mutex,
minor == MOUSEDEV_MIX ? MOUSEDEV_MIX : 0);
init_waitqueue_head(&mousedev->wait);
if (minor == MOUSEDEV_MIX)
dev_set_name(&mousedev->dev, "mice");
else
dev_set_name(&mousedev->dev, "mouse%d", minor);
mousedev->minor = minor;
mousedev->exist = 1;
mousedev->handle.dev = input_get_device(dev);
mousedev->handle.name = dev_name(&mousedev->dev);
mousedev->handle.handler = handler;
mousedev->handle.private = mousedev;
mousedev->dev.class = &input_class;
if (dev)
mousedev->dev.parent = &dev->dev;
mousedev->dev.devt = MKDEV(INPUT_MAJOR, MOUSEDEV_MINOR_BASE + minor);
mousedev->dev.release = mousedev_free;
device_initialize(&mousedev->dev);
if (minor != MOUSEDEV_MIX) {
error = input_register_handle(&mousedev->handle);
if (error)
goto err_free_mousedev;
}
error = mousedev_install_chrdev(mousedev);
if (error)
goto err_unregister_handle;
error = device_add(&mousedev->dev);
if (error)
goto err_cleanup_mousedev;
return mousedev;
err_cleanup_mousedev:
mousedev_cleanup(mousedev);
err_unregister_handle:
if (minor != MOUSEDEV_MIX)
input_unregister_handle(&mousedev->handle);
err_free_mousedev:
put_device(&mousedev->dev);
err_out:
return ERR_PTR(error);
}
static void mousedev_destroy(struct mousedev *mousedev)
{
device_del(&mousedev->dev);
mousedev_cleanup(mousedev);
if (mousedev->minor != MOUSEDEV_MIX)
input_unregister_handle(&mousedev->handle);
put_device(&mousedev->dev);
}
static int mixdev_add_device(struct mousedev *mousedev)
{
int retval;
retval = mutex_lock_interruptible(&mousedev_mix->mutex);
if (retval)
return retval;
if (mousedev_mix->open) {
retval = mousedev_open_device(mousedev);
if (retval)
goto out;
mousedev->mixdev_open = 1;
}
get_device(&mousedev->dev);
list_add_tail(&mousedev->mixdev_node, &mousedev_mix_list);
out:
mutex_unlock(&mousedev_mix->mutex);
return retval;
}
static void mixdev_remove_device(struct mousedev *mousedev)
{
mutex_lock(&mousedev_mix->mutex);
if (mousedev->mixdev_open) {
mousedev->mixdev_open = 0;
mousedev_close_device(mousedev);
}
list_del_init(&mousedev->mixdev_node);
mutex_unlock(&mousedev_mix->mutex);
put_device(&mousedev->dev);
}
static int mousedev_connect(struct input_handler *handler,
struct input_dev *dev,
const struct input_device_id *id)
{
struct mousedev *mousedev;
int minor;
int error;
for (minor = 0; minor < MOUSEDEV_MINORS; minor++)
if (!mousedev_table[minor])
break;
if (minor == MOUSEDEV_MINORS) {
printk(KERN_ERR "mousedev: no more free mousedev devices\n");
return -ENFILE;
}
mousedev = mousedev_create(dev, handler, minor);
if (IS_ERR(mousedev))
return PTR_ERR(mousedev);
error = mixdev_add_device(mousedev);
if (error) {
mousedev_destroy(mousedev);
return error;
}
return 0;
}
static void mousedev_disconnect(struct input_handle *handle)
{
struct mousedev *mousedev = handle->private;
mixdev_remove_device(mousedev);
mousedev_destroy(mousedev);
}
static const struct input_device_id mousedev_ids[] = {
{
.flags = INPUT_DEVICE_ID_MATCH_EVBIT |
INPUT_DEVICE_ID_MATCH_KEYBIT |
INPUT_DEVICE_ID_MATCH_RELBIT,
.evbit = { BIT_MASK(EV_KEY) | BIT_MASK(EV_REL) },
.keybit = { [BIT_WORD(BTN_LEFT)] = BIT_MASK(BTN_LEFT) },
.relbit = { BIT_MASK(REL_X) | BIT_MASK(REL_Y) },
}, /* A mouse like device, at least one button,
two relative axes */
{
.flags = INPUT_DEVICE_ID_MATCH_EVBIT |
INPUT_DEVICE_ID_MATCH_RELBIT,
.evbit = { BIT_MASK(EV_KEY) | BIT_MASK(EV_REL) },
.relbit = { BIT_MASK(REL_WHEEL) },
}, /* A separate scrollwheel */
{
.flags = INPUT_DEVICE_ID_MATCH_EVBIT |
INPUT_DEVICE_ID_MATCH_KEYBIT |
INPUT_DEVICE_ID_MATCH_ABSBIT,
.evbit = { BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS) },
.keybit = { [BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH) },
.absbit = { BIT_MASK(ABS_X) | BIT_MASK(ABS_Y) },
}, /* A tablet like device, at least touch detection,
two absolute axes */
{
.flags = INPUT_DEVICE_ID_MATCH_EVBIT |
INPUT_DEVICE_ID_MATCH_KEYBIT |
INPUT_DEVICE_ID_MATCH_ABSBIT,
.evbit = { BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS) },
.keybit = { [BIT_WORD(BTN_TOOL_FINGER)] =
BIT_MASK(BTN_TOOL_FINGER) },
.absbit = { BIT_MASK(ABS_X) | BIT_MASK(ABS_Y) |
BIT_MASK(ABS_PRESSURE) |
BIT_MASK(ABS_TOOL_WIDTH) },
}, /* A touchpad */
{
.flags = INPUT_DEVICE_ID_MATCH_EVBIT |
INPUT_DEVICE_ID_MATCH_KEYBIT |
INPUT_DEVICE_ID_MATCH_ABSBIT,
.evbit = { BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS) },
.keybit = { [BIT_WORD(BTN_LEFT)] = BIT_MASK(BTN_LEFT) },
.absbit = { BIT_MASK(ABS_X) | BIT_MASK(ABS_Y) },
}, /* Mouse-like device with absolute X and Y but ordinary
clicks, like hp ILO2 High Performance mouse */
{ }, /* Terminating entry */
};
MODULE_DEVICE_TABLE(input, mousedev_ids);
static struct input_handler mousedev_handler = {
.event = mousedev_event,
.connect = mousedev_connect,
.disconnect = mousedev_disconnect,
.fops = &mousedev_fops,
.minor = MOUSEDEV_MINOR_BASE,
.name = "mousedev",
.id_table = mousedev_ids,
};
#ifdef CONFIG_INPUT_MOUSEDEV_PSAUX
static struct miscdevice psaux_mouse = {
PSMOUSE_MINOR, "psaux", &mousedev_fops
};
static int psaux_registered;
#endif
static int __init mousedev_init(void)
{
int error;
mousedev_mix = mousedev_create(NULL, &mousedev_handler, MOUSEDEV_MIX);
if (IS_ERR(mousedev_mix))
return PTR_ERR(mousedev_mix);
error = input_register_handler(&mousedev_handler);
if (error) {
mousedev_destroy(mousedev_mix);
return error;
}
#ifdef CONFIG_INPUT_MOUSEDEV_PSAUX
error = misc_register(&psaux_mouse);
if (error)
printk(KERN_WARNING "mice: could not register psaux device, "
"error: %d\n", error);
else
psaux_registered = 1;
#endif
printk(KERN_INFO "mice: PS/2 mouse device common for all mice\n");
return 0;
}
static void __exit mousedev_exit(void)
{
#ifdef CONFIG_INPUT_MOUSEDEV_PSAUX
if (psaux_registered)
misc_deregister(&psaux_mouse);
#endif
input_unregister_handler(&mousedev_handler);
mousedev_destroy(mousedev_mix);
}
module_init(mousedev_init);
module_exit(mousedev_exit);
| gpl-2.0 |
stelios97/sony-kernel-msm7x27a | arch/arm/mach-msm/board-msm7627-regulator.c | 811 | 6601 | /*
* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include "board-msm7627-regulator.h"
#define PCOM_VREG_CONSUMERS(name) \
static struct regulator_consumer_supply __pcom_vreg_supply_##name[]
#define PCOM_VREG_CONSTRAINT_LVSW(_name, _always_on, _boot_on, _supply_uV) \
{ \
.name = #_name, \
.min_uV = 0, \
.max_uV = 0, \
.input_uV = _supply_uV, \
.valid_modes_mask = REGULATOR_MODE_NORMAL, \
.valid_ops_mask = REGULATOR_CHANGE_STATUS, \
.apply_uV = 0, \
.boot_on = _boot_on, \
.always_on = _always_on \
}
#define PCOM_VREG_CONSTRAINT_DYN(_name, _min_uV, _max_uV, _always_on, \
_boot_on, _apply_uV, _supply_uV) \
{ \
.name = #_name, \
.min_uV = _min_uV, \
.max_uV = _max_uV, \
.valid_modes_mask = REGULATOR_MODE_NORMAL, \
.valid_ops_mask = REGULATOR_CHANGE_VOLTAGE | REGULATOR_CHANGE_STATUS, \
.input_uV = _supply_uV, \
.apply_uV = _apply_uV, \
.boot_on = _boot_on, \
.always_on = _always_on \
}
#define PCOM_VREG_INIT(_name, _supply, _constraints)\
{ \
.supply_regulator = _supply, \
.consumer_supplies = __pcom_vreg_supply_##_name, \
.num_consumer_supplies = ARRAY_SIZE(__pcom_vreg_supply_##_name), \
.constraints = _constraints \
}
#define PCOM_VREG_SMP(_name, _id, _supply, _min_uV, _max_uV, _rise_time, \
_pulldown, _always_on, _boot_on, _apply_uV, _supply_uV) \
{ \
.init_data = PCOM_VREG_INIT(_name, _supply, \
PCOM_VREG_CONSTRAINT_DYN(_name, _min_uV, _max_uV, _always_on, \
_boot_on, _apply_uV, _supply_uV)), \
.id = _id, \
.rise_time = _rise_time, \
.pulldown = _pulldown, \
.negative = 0, \
}
#define PCOM_VREG_LDO PCOM_VREG_SMP
PCOM_VREG_CONSUMERS(smps0) = {
REGULATOR_SUPPLY("smps0", NULL),
REGULATOR_SUPPLY("msmc1", NULL),
};
PCOM_VREG_CONSUMERS(smps1) = {
REGULATOR_SUPPLY("smps1", NULL),
REGULATOR_SUPPLY("msmc2", NULL),
};
PCOM_VREG_CONSUMERS(smps2) = {
REGULATOR_SUPPLY("smps2", NULL),
REGULATOR_SUPPLY("pa", NULL),
};
PCOM_VREG_CONSUMERS(smps3) = {
REGULATOR_SUPPLY("smps3", NULL),
REGULATOR_SUPPLY("msme1", NULL),
};
PCOM_VREG_CONSUMERS(ldo00) = {
REGULATOR_SUPPLY("ldo00", NULL),
REGULATOR_SUPPLY("gp3", NULL),
};
PCOM_VREG_CONSUMERS(ldo01) = {
REGULATOR_SUPPLY("ldo01", NULL),
REGULATOR_SUPPLY("msma", NULL),
};
PCOM_VREG_CONSUMERS(ldo02) = {
REGULATOR_SUPPLY("ldo02", NULL),
REGULATOR_SUPPLY("msmp", NULL),
};
PCOM_VREG_CONSUMERS(ldo03) = {
REGULATOR_SUPPLY("ldo03", NULL),
REGULATOR_SUPPLY("ruim", NULL),
};
PCOM_VREG_CONSUMERS(ldo04) = {
REGULATOR_SUPPLY("ldo04", NULL),
REGULATOR_SUPPLY("tcxo", NULL),
};
PCOM_VREG_CONSUMERS(ldo05) = {
REGULATOR_SUPPLY("ldo05", NULL),
REGULATOR_SUPPLY("mmc", NULL),
};
PCOM_VREG_CONSUMERS(ldo06) = {
REGULATOR_SUPPLY("ldo06", NULL),
REGULATOR_SUPPLY("usb", NULL),
};
PCOM_VREG_CONSUMERS(ldo07) = {
REGULATOR_SUPPLY("ldo07", NULL),
REGULATOR_SUPPLY("rfrx1", NULL),
};
PCOM_VREG_CONSUMERS(ldo08) = {
REGULATOR_SUPPLY("ldo08", NULL),
REGULATOR_SUPPLY("synt", NULL),
};
PCOM_VREG_CONSUMERS(ldo09) = {
REGULATOR_SUPPLY("ldo09", NULL),
REGULATOR_SUPPLY("gp1", NULL),
};
PCOM_VREG_CONSUMERS(ldo10) = {
REGULATOR_SUPPLY("ldo10", NULL),
REGULATOR_SUPPLY("gp4", NULL),
};
PCOM_VREG_CONSUMERS(ldo11) = {
REGULATOR_SUPPLY("ldo11", NULL),
REGULATOR_SUPPLY("gp2", NULL),
};
PCOM_VREG_CONSUMERS(ldo12) = {
REGULATOR_SUPPLY("ldo12", NULL),
REGULATOR_SUPPLY("rftx", NULL),
};
PCOM_VREG_CONSUMERS(ldo13) = {
REGULATOR_SUPPLY("ldo13", NULL),
REGULATOR_SUPPLY("wlan", NULL),
};
PCOM_VREG_CONSUMERS(ldo14) = {
REGULATOR_SUPPLY("ldo14", NULL),
REGULATOR_SUPPLY("rf", NULL),
};
PCOM_VREG_CONSUMERS(ldo15) = {
REGULATOR_SUPPLY("ldo15", NULL),
REGULATOR_SUPPLY("gp6", NULL),
};
PCOM_VREG_CONSUMERS(ldo16) = {
REGULATOR_SUPPLY("ldo16", NULL),
REGULATOR_SUPPLY("gp5", NULL),
};
PCOM_VREG_CONSUMERS(ldo17) = {
REGULATOR_SUPPLY("ldo17", NULL),
REGULATOR_SUPPLY("msme2", NULL),
};
/**
* Minimum and Maximum range for the regulators is as per the
* device Datasheet. Actual value used by consumer is between
* the provided range.
*/
static struct proccomm_regulator_info msm7627_pcom_vreg_info[] = {
/* Standard regulators (SMPS and LDO)
* R = rise time (us)
* P = pulldown (1 = pull down, 0 = float, -1 = don't care)
* A = always on
* B = boot on
* V = automatic voltage set (meaningful for single-voltage regs only)
* S = supply voltage (uV)
* name id supp min uV max uV R P A B V S */
PCOM_VREG_SMP(smps0, 3, NULL, 750000, 3050000, 0, -1, 0, 0, 0, 0),
PCOM_VREG_SMP(smps1, 4, NULL, 750000, 3050000, 0, -1, 0, 0, 0, 0),
PCOM_VREG_SMP(smps2, 10, NULL, 750000, 3050000, 0, -1, 0, 0, 0, 0),
PCOM_VREG_SMP(smps3, 2, NULL, 750000, 3050000, 0, -1, 0, 0, 0, 0),
PCOM_VREG_LDO(ldo00, 5, NULL, 2850000, 2850000, 0, -1, 0, 0, 0, 0),
PCOM_VREG_LDO(ldo01, 0, NULL, 2600000, 2600000, 0, -1, 0, 0, 0, 0),
PCOM_VREG_LDO(ldo02, 1, NULL, 2600000, 2600000, 0, -1, 0, 0, 0, 0),
PCOM_VREG_LDO(ldo03, 19, NULL, 2850000, 2850000, 0, -1, 0, 0, 0, 0),
PCOM_VREG_LDO(ldo04, 9, NULL, 2850000, 2850000, 0, -1, 0, 0, 0, 0),
PCOM_VREG_LDO(ldo05, 18, NULL, 2850000, 2850000, 0, -1, 0, 0, 0, 0),
PCOM_VREG_LDO(ldo06, 16, NULL, 3300000, 3300000, 0, -1, 0, 0, 0, 0),
PCOM_VREG_LDO(ldo07, 12, NULL, 2700000, 2700000, 0, -1, 0, 0, 0, 0),
PCOM_VREG_LDO(ldo08, 14, NULL, 2700000, 2700000, 0, -1, 0, 0, 0, 0),
PCOM_VREG_LDO(ldo09, 8, NULL, 2900000, 2900000, 0, -1, 0, 0, 0, 0),
PCOM_VREG_LDO(ldo10, 7, NULL, 2600000, 2600000, 0, -1, 0, 0, 0, 0),
PCOM_VREG_LDO(ldo11, 21, NULL, 1800000, 1800000, 0, -1, 0, 0, 0, 0),
PCOM_VREG_LDO(ldo12, 11, NULL, 1800000, 1800000, 0, -1, 0, 0, 0, 0),
PCOM_VREG_LDO(ldo13, 15, NULL, 1800000, 2850000, 0, -1, 0, 0, 0, 0),
PCOM_VREG_LDO(ldo14, 24, NULL, 2700000, 2700000, 0, -1, 0, 0, 0, 0),
PCOM_VREG_LDO(ldo15, 23, NULL, 2600000, 2600000, 0, -1, 0, 0, 0, 0),
PCOM_VREG_LDO(ldo16, 22, NULL, 2850000, 3000000, 0, -1, 0, 0, 0, 0),
PCOM_VREG_LDO(ldo17, 6, NULL, 1300000, 1300000, 0, -1, 0, 0, 0, 0),
};
struct proccomm_regulator_platform_data msm7627_proccomm_regulator_data = {
.regs = msm7627_pcom_vreg_info,
.nregs = ARRAY_SIZE(msm7627_pcom_vreg_info)
};
| gpl-2.0 |
ma34s/so03c_kernel | fs/sysfs/group.c | 811 | 3902 | /*
* fs/sysfs/group.c - Operations for adding/removing multiple files at once.
*
* Copyright (c) 2003 Patrick Mochel
* Copyright (c) 2003 Open Source Development Lab
*
* This file is released undert the GPL v2.
*
*/
#include <linux/kobject.h>
#include <linux/module.h>
#include <linux/dcache.h>
#include <linux/namei.h>
#include <linux/err.h>
#include "sysfs.h"
static void remove_files(struct sysfs_dirent *dir_sd, struct kobject *kobj,
const struct attribute_group *grp)
{
struct attribute *const* attr;
int i;
for (i = 0, attr = grp->attrs; *attr; i++, attr++)
sysfs_hash_and_remove(dir_sd, (*attr)->name);
}
static int create_files(struct sysfs_dirent *dir_sd, struct kobject *kobj,
const struct attribute_group *grp, int update)
{
struct attribute *const* attr;
int error = 0, i;
for (i = 0, attr = grp->attrs; *attr && !error; i++, attr++) {
mode_t mode = 0;
/* in update mode, we're changing the permissions or
* visibility. Do this by first removing then
* re-adding (if required) the file */
if (update)
sysfs_hash_and_remove(dir_sd, (*attr)->name);
if (grp->is_visible) {
mode = grp->is_visible(kobj, *attr, i);
if (!mode)
continue;
}
error = sysfs_add_file_mode(dir_sd, *attr, SYSFS_KOBJ_ATTR,
(*attr)->mode | mode);
if (unlikely(error))
break;
}
if (error)
remove_files(dir_sd, kobj, grp);
return error;
}
static int internal_create_group(struct kobject *kobj, int update,
const struct attribute_group *grp)
{
struct sysfs_dirent *sd;
int error;
BUG_ON(!kobj || (!update && !kobj->sd));
/* Updates may happen before the object has been instantiated */
if (unlikely(update && !kobj->sd))
return -EINVAL;
if (grp->name) {
error = sysfs_create_subdir(kobj, grp->name, &sd);
if (error)
return error;
} else
sd = kobj->sd;
sysfs_get(sd);
error = create_files(sd, kobj, grp, update);
if (error) {
if (grp->name)
sysfs_remove_subdir(sd);
}
sysfs_put(sd);
return error;
}
/**
* sysfs_create_group - given a directory kobject, create an attribute group
* @kobj: The kobject to create the group on
* @grp: The attribute group to create
*
* This function creates a group for the first time. It will explicitly
* warn and error if any of the attribute files being created already exist.
*
* Returns 0 on success or error.
*/
int sysfs_create_group(struct kobject *kobj,
const struct attribute_group *grp)
{
return internal_create_group(kobj, 0, grp);
}
/**
* sysfs_update_group - given a directory kobject, create an attribute group
* @kobj: The kobject to create the group on
* @grp: The attribute group to create
*
* This function updates an attribute group. Unlike
* sysfs_create_group(), it will explicitly not warn or error if any
* of the attribute files being created already exist. Furthermore,
* if the visibility of the files has changed through the is_visible()
* callback, it will update the permissions and add or remove the
* relevant files.
*
* The primary use for this function is to call it after making a change
* that affects group visibility.
*
* Returns 0 on success or error.
*/
int sysfs_update_group(struct kobject *kobj,
const struct attribute_group *grp)
{
return internal_create_group(kobj, 1, grp);
}
void sysfs_remove_group(struct kobject * kobj,
const struct attribute_group * grp)
{
struct sysfs_dirent *dir_sd = kobj->sd;
struct sysfs_dirent *sd;
if (grp->name) {
sd = sysfs_get_dirent(dir_sd, grp->name);
if (!sd) {
WARN(!sd, KERN_WARNING "sysfs group %p not found for "
"kobject '%s'\n", grp, kobject_name(kobj));
return;
}
} else
sd = sysfs_get(dir_sd);
remove_files(sd, kobj, grp);
if (grp->name)
sysfs_remove_subdir(sd);
sysfs_put(sd);
}
EXPORT_SYMBOL_GPL(sysfs_create_group);
EXPORT_SYMBOL_GPL(sysfs_update_group);
EXPORT_SYMBOL_GPL(sysfs_remove_group);
| gpl-2.0 |
bagnz0r/GT-I8160_Kernel | drivers/mtd/ubi/build.c | 1579 | 41323 | /*
* Copyright (c) International Business Machines Corp., 2006
* Copyright (c) Nokia Corporation, 2007
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
* the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Author: Artem Bityutskiy (Битюцкий Артём),
* Frank Haverkamp
*/
/*
* This file includes UBI initialization and building of UBI devices.
*
* When UBI is initialized, it attaches all the MTD devices specified as the
* module load parameters or the kernel boot parameters. If MTD devices were
* specified, UBI does not attach any MTD device, but it is possible to do
* later using the "UBI control device".
*
* At the moment we only attach UBI devices by scanning, which will become a
* bottleneck when flashes reach certain large size. Then one may improve UBI
* and add other methods, although it does not seem to be easy to do.
*/
#include <linux/err.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/stringify.h>
#include <linux/namei.h>
#include <linux/stat.h>
#include <linux/miscdevice.h>
#include <linux/log2.h>
#include <linux/kthread.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include "ubi.h"
/* Maximum length of the 'mtd=' parameter */
#define MTD_PARAM_LEN_MAX 64
#ifdef CONFIG_MTD_UBI_MODULE
#define ubi_is_module() 1
#else
#define ubi_is_module() 0
#endif
/**
* struct mtd_dev_param - MTD device parameter description data structure.
* @name: MTD character device node path, MTD device name, or MTD device number
* string
* @vid_hdr_offs: VID header offset
*/
struct mtd_dev_param {
char name[MTD_PARAM_LEN_MAX];
int vid_hdr_offs;
};
/* Numbers of elements set in the @mtd_dev_param array */
static int __initdata mtd_devs;
/* MTD devices specification parameters */
static struct mtd_dev_param __initdata mtd_dev_param[UBI_MAX_DEVICES];
/* Root UBI "class" object (corresponds to '/<sysfs>/class/ubi/') */
struct class *ubi_class;
/* Slab cache for wear-leveling entries */
struct kmem_cache *ubi_wl_entry_slab;
/* UBI control character device */
static struct miscdevice ubi_ctrl_cdev = {
.minor = MISC_DYNAMIC_MINOR,
.name = "ubi_ctrl",
.fops = &ubi_ctrl_cdev_operations,
};
/* All UBI devices in system */
static struct ubi_device *ubi_devices[UBI_MAX_DEVICES];
/* Serializes UBI devices creations and removals */
DEFINE_MUTEX(ubi_devices_mutex);
/* Protects @ubi_devices and @ubi->ref_count */
static DEFINE_SPINLOCK(ubi_devices_lock);
/* "Show" method for files in '/<sysfs>/class/ubi/' */
static ssize_t ubi_version_show(struct class *class,
struct class_attribute *attr, char *buf)
{
return sprintf(buf, "%d\n", UBI_VERSION);
}
/* UBI version attribute ('/<sysfs>/class/ubi/version') */
static struct class_attribute ubi_version =
__ATTR(version, S_IRUGO, ubi_version_show, NULL);
static ssize_t dev_attribute_show(struct device *dev,
struct device_attribute *attr, char *buf);
/* UBI device attributes (correspond to files in '/<sysfs>/class/ubi/ubiX') */
static struct device_attribute dev_eraseblock_size =
__ATTR(eraseblock_size, S_IRUGO, dev_attribute_show, NULL);
static struct device_attribute dev_avail_eraseblocks =
__ATTR(avail_eraseblocks, S_IRUGO, dev_attribute_show, NULL);
static struct device_attribute dev_total_eraseblocks =
__ATTR(total_eraseblocks, S_IRUGO, dev_attribute_show, NULL);
static struct device_attribute dev_volumes_count =
__ATTR(volumes_count, S_IRUGO, dev_attribute_show, NULL);
static struct device_attribute dev_max_ec =
__ATTR(max_ec, S_IRUGO, dev_attribute_show, NULL);
static struct device_attribute dev_reserved_for_bad =
__ATTR(reserved_for_bad, S_IRUGO, dev_attribute_show, NULL);
static struct device_attribute dev_bad_peb_count =
__ATTR(bad_peb_count, S_IRUGO, dev_attribute_show, NULL);
static struct device_attribute dev_max_vol_count =
__ATTR(max_vol_count, S_IRUGO, dev_attribute_show, NULL);
static struct device_attribute dev_min_io_size =
__ATTR(min_io_size, S_IRUGO, dev_attribute_show, NULL);
static struct device_attribute dev_bgt_enabled =
__ATTR(bgt_enabled, S_IRUGO, dev_attribute_show, NULL);
static struct device_attribute dev_mtd_num =
__ATTR(mtd_num, S_IRUGO, dev_attribute_show, NULL);
/**
* ubi_volume_notify - send a volume change notification.
* @ubi: UBI device description object
* @vol: volume description object of the changed volume
* @ntype: notification type to send (%UBI_VOLUME_ADDED, etc)
*
* This is a helper function which notifies all subscribers about a volume
* change event (creation, removal, re-sizing, re-naming, updating). Returns
* zero in case of success and a negative error code in case of failure.
*/
int ubi_volume_notify(struct ubi_device *ubi, struct ubi_volume *vol, int ntype)
{
struct ubi_notification nt;
ubi_do_get_device_info(ubi, &nt.di);
ubi_do_get_volume_info(ubi, vol, &nt.vi);
return blocking_notifier_call_chain(&ubi_notifiers, ntype, &nt);
}
/**
* ubi_notify_all - send a notification to all volumes.
* @ubi: UBI device description object
* @ntype: notification type to send (%UBI_VOLUME_ADDED, etc)
* @nb: the notifier to call
*
* This function walks all volumes of UBI device @ubi and sends the @ntype
* notification for each volume. If @nb is %NULL, then all registered notifiers
* are called, otherwise only the @nb notifier is called. Returns the number of
* sent notifications.
*/
int ubi_notify_all(struct ubi_device *ubi, int ntype, struct notifier_block *nb)
{
struct ubi_notification nt;
int i, count = 0;
ubi_do_get_device_info(ubi, &nt.di);
mutex_lock(&ubi->device_mutex);
for (i = 0; i < ubi->vtbl_slots; i++) {
/*
* Since the @ubi->device is locked, and we are not going to
* change @ubi->volumes, we do not have to lock
* @ubi->volumes_lock.
*/
if (!ubi->volumes[i])
continue;
ubi_do_get_volume_info(ubi, ubi->volumes[i], &nt.vi);
if (nb)
nb->notifier_call(nb, ntype, &nt);
else
blocking_notifier_call_chain(&ubi_notifiers, ntype,
&nt);
count += 1;
}
mutex_unlock(&ubi->device_mutex);
return count;
}
/**
* ubi_enumerate_volumes - send "add" notification for all existing volumes.
* @nb: the notifier to call
*
* This function walks all UBI devices and volumes and sends the
* %UBI_VOLUME_ADDED notification for each volume. If @nb is %NULL, then all
* registered notifiers are called, otherwise only the @nb notifier is called.
* Returns the number of sent notifications.
*/
int ubi_enumerate_volumes(struct notifier_block *nb)
{
int i, count = 0;
/*
* Since the @ubi_devices_mutex is locked, and we are not going to
* change @ubi_devices, we do not have to lock @ubi_devices_lock.
*/
for (i = 0; i < UBI_MAX_DEVICES; i++) {
struct ubi_device *ubi = ubi_devices[i];
if (!ubi)
continue;
count += ubi_notify_all(ubi, UBI_VOLUME_ADDED, nb);
}
return count;
}
/**
* ubi_get_device - get UBI device.
* @ubi_num: UBI device number
*
* This function returns UBI device description object for UBI device number
* @ubi_num, or %NULL if the device does not exist. This function increases the
* device reference count to prevent removal of the device. In other words, the
* device cannot be removed if its reference count is not zero.
*/
struct ubi_device *ubi_get_device(int ubi_num)
{
struct ubi_device *ubi;
spin_lock(&ubi_devices_lock);
ubi = ubi_devices[ubi_num];
if (ubi) {
ubi_assert(ubi->ref_count >= 0);
ubi->ref_count += 1;
get_device(&ubi->dev);
}
spin_unlock(&ubi_devices_lock);
return ubi;
}
/**
* ubi_put_device - drop an UBI device reference.
* @ubi: UBI device description object
*/
void ubi_put_device(struct ubi_device *ubi)
{
spin_lock(&ubi_devices_lock);
ubi->ref_count -= 1;
put_device(&ubi->dev);
spin_unlock(&ubi_devices_lock);
}
/**
* ubi_get_by_major - get UBI device by character device major number.
* @major: major number
*
* This function is similar to 'ubi_get_device()', but it searches the device
* by its major number.
*/
struct ubi_device *ubi_get_by_major(int major)
{
int i;
struct ubi_device *ubi;
spin_lock(&ubi_devices_lock);
for (i = 0; i < UBI_MAX_DEVICES; i++) {
ubi = ubi_devices[i];
if (ubi && MAJOR(ubi->cdev.dev) == major) {
ubi_assert(ubi->ref_count >= 0);
ubi->ref_count += 1;
get_device(&ubi->dev);
spin_unlock(&ubi_devices_lock);
return ubi;
}
}
spin_unlock(&ubi_devices_lock);
return NULL;
}
/**
* ubi_major2num - get UBI device number by character device major number.
* @major: major number
*
* This function searches UBI device number object by its major number. If UBI
* device was not found, this function returns -ENODEV, otherwise the UBI device
* number is returned.
*/
int ubi_major2num(int major)
{
int i, ubi_num = -ENODEV;
spin_lock(&ubi_devices_lock);
for (i = 0; i < UBI_MAX_DEVICES; i++) {
struct ubi_device *ubi = ubi_devices[i];
if (ubi && MAJOR(ubi->cdev.dev) == major) {
ubi_num = ubi->ubi_num;
break;
}
}
spin_unlock(&ubi_devices_lock);
return ubi_num;
}
/* "Show" method for files in '/<sysfs>/class/ubi/ubiX/' */
static ssize_t dev_attribute_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
ssize_t ret;
struct ubi_device *ubi;
/*
* The below code looks weird, but it actually makes sense. We get the
* UBI device reference from the contained 'struct ubi_device'. But it
* is unclear if the device was removed or not yet. Indeed, if the
* device was removed before we increased its reference count,
* 'ubi_get_device()' will return -ENODEV and we fail.
*
* Remember, 'struct ubi_device' is freed in the release function, so
* we still can use 'ubi->ubi_num'.
*/
ubi = container_of(dev, struct ubi_device, dev);
ubi = ubi_get_device(ubi->ubi_num);
if (!ubi)
return -ENODEV;
if (attr == &dev_eraseblock_size)
ret = sprintf(buf, "%d\n", ubi->leb_size);
else if (attr == &dev_avail_eraseblocks)
ret = sprintf(buf, "%d\n", ubi->avail_pebs);
else if (attr == &dev_total_eraseblocks)
ret = sprintf(buf, "%d\n", ubi->good_peb_count);
else if (attr == &dev_volumes_count)
ret = sprintf(buf, "%d\n", ubi->vol_count - UBI_INT_VOL_COUNT);
else if (attr == &dev_max_ec)
ret = sprintf(buf, "%d\n", ubi->max_ec);
else if (attr == &dev_reserved_for_bad)
ret = sprintf(buf, "%d\n", ubi->beb_rsvd_pebs);
else if (attr == &dev_bad_peb_count)
ret = sprintf(buf, "%d\n", ubi->bad_peb_count);
else if (attr == &dev_max_vol_count)
ret = sprintf(buf, "%d\n", ubi->vtbl_slots);
else if (attr == &dev_min_io_size)
ret = sprintf(buf, "%d\n", ubi->min_io_size);
else if (attr == &dev_bgt_enabled)
ret = sprintf(buf, "%d\n", ubi->thread_enabled);
else if (attr == &dev_mtd_num)
ret = sprintf(buf, "%d\n", ubi->mtd->index);
else
ret = -EINVAL;
ubi_put_device(ubi);
return ret;
}
static void dev_release(struct device *dev)
{
struct ubi_device *ubi = container_of(dev, struct ubi_device, dev);
kfree(ubi);
}
/**
* ubi_sysfs_init - initialize sysfs for an UBI device.
* @ubi: UBI device description object
* @ref: set to %1 on exit in case of failure if a reference to @ubi->dev was
* taken
*
* This function returns zero in case of success and a negative error code in
* case of failure.
*/
static int ubi_sysfs_init(struct ubi_device *ubi, int *ref)
{
int err;
ubi->dev.release = dev_release;
ubi->dev.devt = ubi->cdev.dev;
ubi->dev.class = ubi_class;
dev_set_name(&ubi->dev, UBI_NAME_STR"%d", ubi->ubi_num);
err = device_register(&ubi->dev);
if (err)
return err;
*ref = 1;
err = device_create_file(&ubi->dev, &dev_eraseblock_size);
if (err)
return err;
err = device_create_file(&ubi->dev, &dev_avail_eraseblocks);
if (err)
return err;
err = device_create_file(&ubi->dev, &dev_total_eraseblocks);
if (err)
return err;
err = device_create_file(&ubi->dev, &dev_volumes_count);
if (err)
return err;
err = device_create_file(&ubi->dev, &dev_max_ec);
if (err)
return err;
err = device_create_file(&ubi->dev, &dev_reserved_for_bad);
if (err)
return err;
err = device_create_file(&ubi->dev, &dev_bad_peb_count);
if (err)
return err;
err = device_create_file(&ubi->dev, &dev_max_vol_count);
if (err)
return err;
err = device_create_file(&ubi->dev, &dev_min_io_size);
if (err)
return err;
err = device_create_file(&ubi->dev, &dev_bgt_enabled);
if (err)
return err;
err = device_create_file(&ubi->dev, &dev_mtd_num);
return err;
}
/**
* ubi_sysfs_close - close sysfs for an UBI device.
* @ubi: UBI device description object
*/
static void ubi_sysfs_close(struct ubi_device *ubi)
{
device_remove_file(&ubi->dev, &dev_mtd_num);
device_remove_file(&ubi->dev, &dev_bgt_enabled);
device_remove_file(&ubi->dev, &dev_min_io_size);
device_remove_file(&ubi->dev, &dev_max_vol_count);
device_remove_file(&ubi->dev, &dev_bad_peb_count);
device_remove_file(&ubi->dev, &dev_reserved_for_bad);
device_remove_file(&ubi->dev, &dev_max_ec);
device_remove_file(&ubi->dev, &dev_volumes_count);
device_remove_file(&ubi->dev, &dev_total_eraseblocks);
device_remove_file(&ubi->dev, &dev_avail_eraseblocks);
device_remove_file(&ubi->dev, &dev_eraseblock_size);
device_unregister(&ubi->dev);
}
/**
* kill_volumes - destroy all user volumes.
* @ubi: UBI device description object
*/
static void kill_volumes(struct ubi_device *ubi)
{
int i;
for (i = 0; i < ubi->vtbl_slots; i++)
if (ubi->volumes[i])
ubi_free_volume(ubi, ubi->volumes[i]);
}
/**
* uif_init - initialize user interfaces for an UBI device.
* @ubi: UBI device description object
* @ref: set to %1 on exit in case of failure if a reference to @ubi->dev was
* taken, otherwise set to %0
*
* This function initializes various user interfaces for an UBI device. If the
* initialization fails at an early stage, this function frees all the
* resources it allocated, returns an error, and @ref is set to %0. However,
* if the initialization fails after the UBI device was registered in the
* driver core subsystem, this function takes a reference to @ubi->dev, because
* otherwise the release function ('dev_release()') would free whole @ubi
* object. The @ref argument is set to %1 in this case. The caller has to put
* this reference.
*
* This function returns zero in case of success and a negative error code in
* case of failure.
*/
static int uif_init(struct ubi_device *ubi, int *ref)
{
int i, err;
dev_t dev;
*ref = 0;
sprintf(ubi->ubi_name, UBI_NAME_STR "%d", ubi->ubi_num);
/*
* Major numbers for the UBI character devices are allocated
* dynamically. Major numbers of volume character devices are
* equivalent to ones of the corresponding UBI character device. Minor
* numbers of UBI character devices are 0, while minor numbers of
* volume character devices start from 1. Thus, we allocate one major
* number and ubi->vtbl_slots + 1 minor numbers.
*/
err = alloc_chrdev_region(&dev, 0, ubi->vtbl_slots + 1, ubi->ubi_name);
if (err) {
ubi_err("cannot register UBI character devices");
return err;
}
ubi_assert(MINOR(dev) == 0);
cdev_init(&ubi->cdev, &ubi_cdev_operations);
dbg_gen("%s major is %u", ubi->ubi_name, MAJOR(dev));
ubi->cdev.owner = THIS_MODULE;
err = cdev_add(&ubi->cdev, dev, 1);
if (err) {
ubi_err("cannot add character device");
goto out_unreg;
}
err = ubi_sysfs_init(ubi, ref);
if (err)
goto out_sysfs;
for (i = 0; i < ubi->vtbl_slots; i++)
if (ubi->volumes[i]) {
err = ubi_add_volume(ubi, ubi->volumes[i]);
if (err) {
ubi_err("cannot add volume %d", i);
goto out_volumes;
}
}
return 0;
out_volumes:
kill_volumes(ubi);
out_sysfs:
if (*ref)
get_device(&ubi->dev);
ubi_sysfs_close(ubi);
cdev_del(&ubi->cdev);
out_unreg:
unregister_chrdev_region(ubi->cdev.dev, ubi->vtbl_slots + 1);
ubi_err("cannot initialize UBI %s, error %d", ubi->ubi_name, err);
return err;
}
/**
* uif_close - close user interfaces for an UBI device.
* @ubi: UBI device description object
*
* Note, since this function un-registers UBI volume device objects (@vol->dev),
* the memory allocated voe the volumes is freed as well (in the release
* function).
*/
static void uif_close(struct ubi_device *ubi)
{
kill_volumes(ubi);
ubi_sysfs_close(ubi);
cdev_del(&ubi->cdev);
unregister_chrdev_region(ubi->cdev.dev, ubi->vtbl_slots + 1);
}
/**
* free_internal_volumes - free internal volumes.
* @ubi: UBI device description object
*/
static void free_internal_volumes(struct ubi_device *ubi)
{
int i;
for (i = ubi->vtbl_slots;
i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) {
kfree(ubi->volumes[i]->eba_tbl);
kfree(ubi->volumes[i]);
}
}
/**
* attach_by_scanning - attach an MTD device using scanning method.
* @ubi: UBI device descriptor
*
* This function returns zero in case of success and a negative error code in
* case of failure.
*
* Note, currently this is the only method to attach UBI devices. Hopefully in
* the future we'll have more scalable attaching methods and avoid full media
* scanning. But even in this case scanning will be needed as a fall-back
* attaching method if there are some on-flash table corruptions.
*/
static int attach_by_scanning(struct ubi_device *ubi)
{
int err;
struct ubi_scan_info *si;
si = ubi_scan(ubi);
if (IS_ERR(si))
return PTR_ERR(si);
ubi->bad_peb_count = si->bad_peb_count;
ubi->good_peb_count = ubi->peb_count - ubi->bad_peb_count;
ubi->corr_peb_count = si->corr_peb_count;
ubi->max_ec = si->max_ec;
ubi->mean_ec = si->mean_ec;
ubi_msg("max. sequence number: %llu", si->max_sqnum);
err = ubi_read_volume_table(ubi, si);
if (err)
goto out_si;
err = ubi_wl_init_scan(ubi, si);
if (err)
goto out_vtbl;
err = ubi_eba_init_scan(ubi, si);
if (err)
goto out_wl;
ubi_scan_destroy_si(si);
return 0;
out_wl:
ubi_wl_close(ubi);
out_vtbl:
free_internal_volumes(ubi);
vfree(ubi->vtbl);
out_si:
ubi_scan_destroy_si(si);
return err;
}
/**
* io_init - initialize I/O sub-system for a given UBI device.
* @ubi: UBI device description object
*
* If @ubi->vid_hdr_offset or @ubi->leb_start is zero, default offsets are
* assumed:
* o EC header is always at offset zero - this cannot be changed;
* o VID header starts just after the EC header at the closest address
* aligned to @io->hdrs_min_io_size;
* o data starts just after the VID header at the closest address aligned to
* @io->min_io_size
*
* This function returns zero in case of success and a negative error code in
* case of failure.
*/
static int io_init(struct ubi_device *ubi)
{
if (ubi->mtd->numeraseregions != 0) {
/*
* Some flashes have several erase regions. Different regions
* may have different eraseblock size and other
* characteristics. It looks like mostly multi-region flashes
* have one "main" region and one or more small regions to
* store boot loader code or boot parameters or whatever. I
* guess we should just pick the largest region. But this is
* not implemented.
*/
ubi_err("multiple regions, not implemented");
return -EINVAL;
}
if (ubi->vid_hdr_offset < 0)
return -EINVAL;
/*
* Note, in this implementation we support MTD devices with 0x7FFFFFFF
* physical eraseblocks maximum.
*/
ubi->peb_size = ubi->mtd->erasesize;
ubi->peb_count = mtd_div_by_eb(ubi->mtd->size, ubi->mtd);
ubi->flash_size = ubi->mtd->size;
if (ubi->mtd->block_isbad && ubi->mtd->block_markbad)
ubi->bad_allowed = 1;
if (ubi->mtd->type == MTD_NORFLASH) {
ubi_assert(ubi->mtd->writesize == 1);
ubi->nor_flash = 1;
}
ubi->min_io_size = ubi->mtd->writesize;
ubi->hdrs_min_io_size = ubi->mtd->writesize >> ubi->mtd->subpage_sft;
/*
* Make sure minimal I/O unit is power of 2. Note, there is no
* fundamental reason for this assumption. It is just an optimization
* which allows us to avoid costly division operations.
*/
if (!is_power_of_2(ubi->min_io_size)) {
ubi_err("min. I/O unit (%d) is not power of 2",
ubi->min_io_size);
return -EINVAL;
}
ubi_assert(ubi->hdrs_min_io_size > 0);
ubi_assert(ubi->hdrs_min_io_size <= ubi->min_io_size);
ubi_assert(ubi->min_io_size % ubi->hdrs_min_io_size == 0);
ubi->max_write_size = ubi->mtd->writebufsize;
/*
* Maximum write size has to be greater or equivalent to min. I/O
* size, and be multiple of min. I/O size.
*/
if (ubi->max_write_size < ubi->min_io_size ||
ubi->max_write_size % ubi->min_io_size ||
!is_power_of_2(ubi->max_write_size)) {
ubi_err("bad write buffer size %d for %d min. I/O unit",
ubi->max_write_size, ubi->min_io_size);
return -EINVAL;
}
/* Calculate default aligned sizes of EC and VID headers */
ubi->ec_hdr_alsize = ALIGN(UBI_EC_HDR_SIZE, ubi->hdrs_min_io_size);
ubi->vid_hdr_alsize = ALIGN(UBI_VID_HDR_SIZE, ubi->hdrs_min_io_size);
dbg_msg("min_io_size %d", ubi->min_io_size);
dbg_msg("max_write_size %d", ubi->max_write_size);
dbg_msg("hdrs_min_io_size %d", ubi->hdrs_min_io_size);
dbg_msg("ec_hdr_alsize %d", ubi->ec_hdr_alsize);
dbg_msg("vid_hdr_alsize %d", ubi->vid_hdr_alsize);
if (ubi->vid_hdr_offset == 0)
/* Default offset */
ubi->vid_hdr_offset = ubi->vid_hdr_aloffset =
ubi->ec_hdr_alsize;
else {
ubi->vid_hdr_aloffset = ubi->vid_hdr_offset &
~(ubi->hdrs_min_io_size - 1);
ubi->vid_hdr_shift = ubi->vid_hdr_offset -
ubi->vid_hdr_aloffset;
}
/* Similar for the data offset */
ubi->leb_start = ubi->vid_hdr_offset + UBI_VID_HDR_SIZE;
ubi->leb_start = ALIGN(ubi->leb_start, ubi->min_io_size);
dbg_msg("vid_hdr_offset %d", ubi->vid_hdr_offset);
dbg_msg("vid_hdr_aloffset %d", ubi->vid_hdr_aloffset);
dbg_msg("vid_hdr_shift %d", ubi->vid_hdr_shift);
dbg_msg("leb_start %d", ubi->leb_start);
/* The shift must be aligned to 32-bit boundary */
if (ubi->vid_hdr_shift % 4) {
ubi_err("unaligned VID header shift %d",
ubi->vid_hdr_shift);
return -EINVAL;
}
/* Check sanity */
if (ubi->vid_hdr_offset < UBI_EC_HDR_SIZE ||
ubi->leb_start < ubi->vid_hdr_offset + UBI_VID_HDR_SIZE ||
ubi->leb_start > ubi->peb_size - UBI_VID_HDR_SIZE ||
ubi->leb_start & (ubi->min_io_size - 1)) {
ubi_err("bad VID header (%d) or data offsets (%d)",
ubi->vid_hdr_offset, ubi->leb_start);
return -EINVAL;
}
/*
* Set maximum amount of physical erroneous eraseblocks to be 10%.
* Erroneous PEB are those which have read errors.
*/
ubi->max_erroneous = ubi->peb_count / 10;
if (ubi->max_erroneous < 16)
ubi->max_erroneous = 16;
dbg_msg("max_erroneous %d", ubi->max_erroneous);
/*
* It may happen that EC and VID headers are situated in one minimal
* I/O unit. In this case we can only accept this UBI image in
* read-only mode.
*/
if (ubi->vid_hdr_offset + UBI_VID_HDR_SIZE <= ubi->hdrs_min_io_size) {
ubi_warn("EC and VID headers are in the same minimal I/O unit, "
"switch to read-only mode");
ubi->ro_mode = 1;
}
ubi->leb_size = ubi->peb_size - ubi->leb_start;
if (!(ubi->mtd->flags & MTD_WRITEABLE)) {
ubi_msg("MTD device %d is write-protected, attach in "
"read-only mode", ubi->mtd->index);
ubi->ro_mode = 1;
}
ubi_msg("physical eraseblock size: %d bytes (%d KiB)",
ubi->peb_size, ubi->peb_size >> 10);
ubi_msg("logical eraseblock size: %d bytes", ubi->leb_size);
ubi_msg("smallest flash I/O unit: %d", ubi->min_io_size);
if (ubi->hdrs_min_io_size != ubi->min_io_size)
ubi_msg("sub-page size: %d",
ubi->hdrs_min_io_size);
ubi_msg("VID header offset: %d (aligned %d)",
ubi->vid_hdr_offset, ubi->vid_hdr_aloffset);
ubi_msg("data offset: %d", ubi->leb_start);
/*
* Note, ideally, we have to initialize ubi->bad_peb_count here. But
* unfortunately, MTD does not provide this information. We should loop
* over all physical eraseblocks and invoke mtd->block_is_bad() for
* each physical eraseblock. So, we skip ubi->bad_peb_count
* uninitialized and initialize it after scanning.
*/
return 0;
}
/**
* autoresize - re-size the volume which has the "auto-resize" flag set.
* @ubi: UBI device description object
* @vol_id: ID of the volume to re-size
*
* This function re-sizes the volume marked by the @UBI_VTBL_AUTORESIZE_FLG in
* the volume table to the largest possible size. See comments in ubi-header.h
* for more description of the flag. Returns zero in case of success and a
* negative error code in case of failure.
*/
static int autoresize(struct ubi_device *ubi, int vol_id)
{
struct ubi_volume_desc desc;
struct ubi_volume *vol = ubi->volumes[vol_id];
int err, old_reserved_pebs = vol->reserved_pebs;
/*
* Clear the auto-resize flag in the volume in-memory copy of the
* volume table, and 'ubi_resize_volume()' will propagate this change
* to the flash.
*/
ubi->vtbl[vol_id].flags &= ~UBI_VTBL_AUTORESIZE_FLG;
if (ubi->avail_pebs == 0) {
struct ubi_vtbl_record vtbl_rec;
/*
* No available PEBs to re-size the volume, clear the flag on
* flash and exit.
*/
memcpy(&vtbl_rec, &ubi->vtbl[vol_id],
sizeof(struct ubi_vtbl_record));
err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec);
if (err)
ubi_err("cannot clean auto-resize flag for volume %d",
vol_id);
} else {
desc.vol = vol;
err = ubi_resize_volume(&desc,
old_reserved_pebs + ubi->avail_pebs);
if (err)
ubi_err("cannot auto-resize volume %d", vol_id);
}
if (err)
return err;
ubi_msg("volume %d (\"%s\") re-sized from %d to %d LEBs", vol_id,
vol->name, old_reserved_pebs, vol->reserved_pebs);
return 0;
}
/**
* ubi_attach_mtd_dev - attach an MTD device.
* @mtd: MTD device description object
* @ubi_num: number to assign to the new UBI device
* @vid_hdr_offset: VID header offset
*
* This function attaches MTD device @mtd_dev to UBI and assign @ubi_num number
* to the newly created UBI device, unless @ubi_num is %UBI_DEV_NUM_AUTO, in
* which case this function finds a vacant device number and assigns it
* automatically. Returns the new UBI device number in case of success and a
* negative error code in case of failure.
*
* Note, the invocations of this function has to be serialized by the
* @ubi_devices_mutex.
*/
int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
{
struct ubi_device *ubi;
int i, err, ref = 0;
/*
* Check if we already have the same MTD device attached.
*
* Note, this function assumes that UBI devices creations and deletions
* are serialized, so it does not take the &ubi_devices_lock.
*/
for (i = 0; i < UBI_MAX_DEVICES; i++) {
ubi = ubi_devices[i];
if (ubi && mtd->index == ubi->mtd->index) {
dbg_err("mtd%d is already attached to ubi%d",
mtd->index, i);
return -EEXIST;
}
}
/*
* Make sure this MTD device is not emulated on top of an UBI volume
* already. Well, generally this recursion works fine, but there are
* different problems like the UBI module takes a reference to itself
* by attaching (and thus, opening) the emulated MTD device. This
* results in inability to unload the module. And in general it makes
* no sense to attach emulated MTD devices, so we prohibit this.
*/
if (mtd->type == MTD_UBIVOLUME) {
ubi_err("refuse attaching mtd%d - it is already emulated on "
"top of UBI", mtd->index);
return -EINVAL;
}
if (ubi_num == UBI_DEV_NUM_AUTO) {
/* Search for an empty slot in the @ubi_devices array */
for (ubi_num = 0; ubi_num < UBI_MAX_DEVICES; ubi_num++)
if (!ubi_devices[ubi_num])
break;
if (ubi_num == UBI_MAX_DEVICES) {
dbg_err("only %d UBI devices may be created",
UBI_MAX_DEVICES);
return -ENFILE;
}
} else {
if (ubi_num >= UBI_MAX_DEVICES)
return -EINVAL;
/* Make sure ubi_num is not busy */
if (ubi_devices[ubi_num]) {
dbg_err("ubi%d already exists", ubi_num);
return -EEXIST;
}
}
ubi = kzalloc(sizeof(struct ubi_device), GFP_KERNEL);
if (!ubi)
return -ENOMEM;
ubi->mtd = mtd;
ubi->ubi_num = ubi_num;
ubi->vid_hdr_offset = vid_hdr_offset;
ubi->autoresize_vol_id = -1;
mutex_init(&ubi->buf_mutex);
mutex_init(&ubi->ckvol_mutex);
mutex_init(&ubi->device_mutex);
spin_lock_init(&ubi->volumes_lock);
ubi_msg("attaching mtd%d to ubi%d", mtd->index, ubi_num);
dbg_msg("sizeof(struct ubi_scan_leb) %zu", sizeof(struct ubi_scan_leb));
dbg_msg("sizeof(struct ubi_wl_entry) %zu", sizeof(struct ubi_wl_entry));
err = io_init(ubi);
if (err)
goto out_free;
err = -ENOMEM;
ubi->peb_buf1 = vmalloc(ubi->peb_size);
if (!ubi->peb_buf1)
goto out_free;
ubi->peb_buf2 = vmalloc(ubi->peb_size);
if (!ubi->peb_buf2)
goto out_free;
err = attach_by_scanning(ubi);
if (err) {
dbg_err("failed to attach by scanning, error %d", err);
goto out_free;
}
if (ubi->autoresize_vol_id != -1) {
err = autoresize(ubi, ubi->autoresize_vol_id);
if (err)
goto out_detach;
}
err = uif_init(ubi, &ref);
if (err)
goto out_detach;
ubi->bgt_thread = kthread_create(ubi_thread, ubi, ubi->bgt_name);
if (IS_ERR(ubi->bgt_thread)) {
err = PTR_ERR(ubi->bgt_thread);
ubi_err("cannot spawn \"%s\", error %d", ubi->bgt_name,
err);
goto out_uif;
}
ubi_msg("attached mtd%d to ubi%d", mtd->index, ubi_num);
ubi_msg("MTD device name: \"%s\"", mtd->name);
ubi_msg("MTD device size: %llu MiB", ubi->flash_size >> 20);
ubi_msg("number of good PEBs: %d", ubi->good_peb_count);
ubi_msg("number of bad PEBs: %d", ubi->bad_peb_count);
ubi_msg("number of corrupted PEBs: %d", ubi->corr_peb_count);
ubi_msg("max. allowed volumes: %d", ubi->vtbl_slots);
ubi_msg("wear-leveling threshold: %d", CONFIG_MTD_UBI_WL_THRESHOLD);
ubi_msg("number of internal volumes: %d", UBI_INT_VOL_COUNT);
ubi_msg("number of user volumes: %d",
ubi->vol_count - UBI_INT_VOL_COUNT);
ubi_msg("available PEBs: %d", ubi->avail_pebs);
ubi_msg("total number of reserved PEBs: %d", ubi->rsvd_pebs);
ubi_msg("number of PEBs reserved for bad PEB handling: %d",
ubi->beb_rsvd_pebs);
ubi_msg("max/mean erase counter: %d/%d", ubi->max_ec, ubi->mean_ec);
ubi_msg("image sequence number: %d", ubi->image_seq);
/*
* The below lock makes sure we do not race with 'ubi_thread()' which
* checks @ubi->thread_enabled. Otherwise we may fail to wake it up.
*/
spin_lock(&ubi->wl_lock);
ubi->thread_enabled = 1;
wake_up_process(ubi->bgt_thread);
spin_unlock(&ubi->wl_lock);
ubi_devices[ubi_num] = ubi;
ubi_notify_all(ubi, UBI_VOLUME_ADDED, NULL);
return ubi_num;
out_uif:
uif_close(ubi);
out_detach:
ubi_wl_close(ubi);
free_internal_volumes(ubi);
vfree(ubi->vtbl);
out_free:
vfree(ubi->peb_buf1);
vfree(ubi->peb_buf2);
if (ref)
put_device(&ubi->dev);
else
kfree(ubi);
return err;
}
/**
* ubi_detach_mtd_dev - detach an MTD device.
* @ubi_num: UBI device number to detach from
* @anyway: detach MTD even if device reference count is not zero
*
* This function destroys an UBI device number @ubi_num and detaches the
* underlying MTD device. Returns zero in case of success and %-EBUSY if the
* UBI device is busy and cannot be destroyed, and %-EINVAL if it does not
* exist.
*
* Note, the invocations of this function has to be serialized by the
* @ubi_devices_mutex.
*/
int ubi_detach_mtd_dev(int ubi_num, int anyway)
{
struct ubi_device *ubi;
if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES)
return -EINVAL;
ubi = ubi_get_device(ubi_num);
if (!ubi)
return -EINVAL;
spin_lock(&ubi_devices_lock);
put_device(&ubi->dev);
ubi->ref_count -= 1;
if (ubi->ref_count) {
if (!anyway) {
spin_unlock(&ubi_devices_lock);
return -EBUSY;
}
/* This may only happen if there is a bug */
ubi_err("%s reference count %d, destroy anyway",
ubi->ubi_name, ubi->ref_count);
}
ubi_devices[ubi_num] = NULL;
spin_unlock(&ubi_devices_lock);
ubi_assert(ubi_num == ubi->ubi_num);
ubi_notify_all(ubi, UBI_VOLUME_REMOVED, NULL);
dbg_msg("detaching mtd%d from ubi%d", ubi->mtd->index, ubi_num);
/*
* Before freeing anything, we have to stop the background thread to
* prevent it from doing anything on this device while we are freeing.
*/
if (ubi->bgt_thread)
kthread_stop(ubi->bgt_thread);
/*
* Get a reference to the device in order to prevent 'dev_release()'
* from freeing the @ubi object.
*/
get_device(&ubi->dev);
uif_close(ubi);
ubi_wl_close(ubi);
free_internal_volumes(ubi);
vfree(ubi->vtbl);
put_mtd_device(ubi->mtd);
vfree(ubi->peb_buf1);
vfree(ubi->peb_buf2);
ubi_msg("mtd%d is detached from ubi%d", ubi->mtd->index, ubi->ubi_num);
put_device(&ubi->dev);
return 0;
}
/**
* open_mtd_by_chdev - open an MTD device by its character device node path.
* @mtd_dev: MTD character device node path
*
* This helper function opens an MTD device by its character node device path.
* Returns MTD device description object in case of success and a negative
* error code in case of failure.
*/
static struct mtd_info * __init open_mtd_by_chdev(const char *mtd_dev)
{
int err, major, minor, mode;
struct path path;
/* Probably this is an MTD character device node path */
err = kern_path(mtd_dev, LOOKUP_FOLLOW, &path);
if (err)
return ERR_PTR(err);
/* MTD device number is defined by the major / minor numbers */
major = imajor(path.dentry->d_inode);
minor = iminor(path.dentry->d_inode);
mode = path.dentry->d_inode->i_mode;
path_put(&path);
if (major != MTD_CHAR_MAJOR || !S_ISCHR(mode))
return ERR_PTR(-EINVAL);
if (minor & 1)
/*
* Just do not think the "/dev/mtdrX" devices support is need,
* so do not support them to avoid doing extra work.
*/
return ERR_PTR(-EINVAL);
return get_mtd_device(NULL, minor / 2);
}
/**
* open_mtd_device - open MTD device by name, character device path, or number.
* @mtd_dev: name, character device node path, or MTD device device number
*
* This function tries to open and MTD device described by @mtd_dev string,
* which is first treated as ASCII MTD device number, and if it is not true, it
* is treated as MTD device name, and if that is also not true, it is treated
* as MTD character device node path. Returns MTD device description object in
* case of success and a negative error code in case of failure.
*/
static struct mtd_info * __init open_mtd_device(const char *mtd_dev)
{
struct mtd_info *mtd;
int mtd_num;
char *endp;
mtd_num = simple_strtoul(mtd_dev, &endp, 0);
if (*endp != '\0' || mtd_dev == endp) {
/*
* This does not look like an ASCII integer, probably this is
* MTD device name.
*/
mtd = get_mtd_device_nm(mtd_dev);
if (IS_ERR(mtd) && PTR_ERR(mtd) == -ENODEV)
/* Probably this is an MTD character device node path */
mtd = open_mtd_by_chdev(mtd_dev);
} else
mtd = get_mtd_device(NULL, mtd_num);
return mtd;
}
static int __init ubi_init(void)
{
int err, i, k;
/* Ensure that EC and VID headers have correct size */
BUILD_BUG_ON(sizeof(struct ubi_ec_hdr) != 64);
BUILD_BUG_ON(sizeof(struct ubi_vid_hdr) != 64);
if (mtd_devs > UBI_MAX_DEVICES) {
ubi_err("too many MTD devices, maximum is %d", UBI_MAX_DEVICES);
return -EINVAL;
}
/* Create base sysfs directory and sysfs files */
ubi_class = class_create(THIS_MODULE, UBI_NAME_STR);
if (IS_ERR(ubi_class)) {
err = PTR_ERR(ubi_class);
ubi_err("cannot create UBI class");
goto out;
}
err = class_create_file(ubi_class, &ubi_version);
if (err) {
ubi_err("cannot create sysfs file");
goto out_class;
}
err = misc_register(&ubi_ctrl_cdev);
if (err) {
ubi_err("cannot register device");
goto out_version;
}
ubi_wl_entry_slab = kmem_cache_create("ubi_wl_entry_slab",
sizeof(struct ubi_wl_entry),
0, 0, NULL);
if (!ubi_wl_entry_slab)
goto out_dev_unreg;
/* Attach MTD devices */
for (i = 0; i < mtd_devs; i++) {
struct mtd_dev_param *p = &mtd_dev_param[i];
struct mtd_info *mtd;
cond_resched();
mtd = open_mtd_device(p->name);
if (IS_ERR(mtd)) {
err = PTR_ERR(mtd);
goto out_detach;
}
mutex_lock(&ubi_devices_mutex);
err = ubi_attach_mtd_dev(mtd, UBI_DEV_NUM_AUTO,
p->vid_hdr_offs);
mutex_unlock(&ubi_devices_mutex);
if (err < 0) {
ubi_err("cannot attach mtd%d", mtd->index);
put_mtd_device(mtd);
/*
* Originally UBI stopped initializing on any error.
* However, later on it was found out that this
* behavior is not very good when UBI is compiled into
* the kernel and the MTD devices to attach are passed
* through the command line. Indeed, UBI failure
* stopped whole boot sequence.
*
* To fix this, we changed the behavior for the
* non-module case, but preserved the old behavior for
* the module case, just for compatibility. This is a
* little inconsistent, though.
*/
if (ubi_is_module())
goto out_detach;
}
}
return 0;
out_detach:
for (k = 0; k < i; k++)
if (ubi_devices[k]) {
mutex_lock(&ubi_devices_mutex);
ubi_detach_mtd_dev(ubi_devices[k]->ubi_num, 1);
mutex_unlock(&ubi_devices_mutex);
}
kmem_cache_destroy(ubi_wl_entry_slab);
out_dev_unreg:
misc_deregister(&ubi_ctrl_cdev);
out_version:
class_remove_file(ubi_class, &ubi_version);
out_class:
class_destroy(ubi_class);
out:
ubi_err("UBI error: cannot initialize UBI, error %d", err);
return err;
}
module_init(ubi_init);
static void __exit ubi_exit(void)
{
int i;
for (i = 0; i < UBI_MAX_DEVICES; i++)
if (ubi_devices[i]) {
mutex_lock(&ubi_devices_mutex);
ubi_detach_mtd_dev(ubi_devices[i]->ubi_num, 1);
mutex_unlock(&ubi_devices_mutex);
}
kmem_cache_destroy(ubi_wl_entry_slab);
misc_deregister(&ubi_ctrl_cdev);
class_remove_file(ubi_class, &ubi_version);
class_destroy(ubi_class);
}
module_exit(ubi_exit);
/**
* bytes_str_to_int - convert a number of bytes string into an integer.
* @str: the string to convert
*
* This function returns positive resulting integer in case of success and a
* negative error code in case of failure.
*/
static int __init bytes_str_to_int(const char *str)
{
char *endp;
unsigned long result;
result = simple_strtoul(str, &endp, 0);
if (str == endp || result >= INT_MAX) {
printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
str);
return -EINVAL;
}
switch (*endp) {
case 'G':
result *= 1024;
case 'M':
result *= 1024;
case 'K':
result *= 1024;
if (endp[1] == 'i' && endp[2] == 'B')
endp += 2;
case '\0':
break;
default:
printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
str);
return -EINVAL;
}
return result;
}
/**
* ubi_mtd_param_parse - parse the 'mtd=' UBI parameter.
* @val: the parameter value to parse
* @kp: not used
*
* This function returns zero in case of success and a negative error code in
* case of error.
*/
static int __init ubi_mtd_param_parse(const char *val, struct kernel_param *kp)
{
int i, len;
struct mtd_dev_param *p;
char buf[MTD_PARAM_LEN_MAX];
char *pbuf = &buf[0];
char *tokens[2] = {NULL, NULL};
if (!val)
return -EINVAL;
if (mtd_devs == UBI_MAX_DEVICES) {
printk(KERN_ERR "UBI error: too many parameters, max. is %d\n",
UBI_MAX_DEVICES);
return -EINVAL;
}
len = strnlen(val, MTD_PARAM_LEN_MAX);
if (len == MTD_PARAM_LEN_MAX) {
printk(KERN_ERR "UBI error: parameter \"%s\" is too long, "
"max. is %d\n", val, MTD_PARAM_LEN_MAX);
return -EINVAL;
}
if (len == 0) {
printk(KERN_WARNING "UBI warning: empty 'mtd=' parameter - "
"ignored\n");
return 0;
}
strcpy(buf, val);
/* Get rid of the final newline */
if (buf[len - 1] == '\n')
buf[len - 1] = '\0';
for (i = 0; i < 2; i++)
tokens[i] = strsep(&pbuf, ",");
if (pbuf) {
printk(KERN_ERR "UBI error: too many arguments at \"%s\"\n",
val);
return -EINVAL;
}
p = &mtd_dev_param[mtd_devs];
strcpy(&p->name[0], tokens[0]);
if (tokens[1])
p->vid_hdr_offs = bytes_str_to_int(tokens[1]);
if (p->vid_hdr_offs < 0)
return p->vid_hdr_offs;
mtd_devs += 1;
return 0;
}
module_param_call(mtd, ubi_mtd_param_parse, NULL, NULL, 000);
MODULE_PARM_DESC(mtd, "MTD devices to attach. Parameter format: "
"mtd=<name|num|path>[,<vid_hdr_offs>].\n"
"Multiple \"mtd\" parameters may be specified.\n"
"MTD devices may be specified by their number, name, or "
"path to the MTD character device node.\n"
"Optional \"vid_hdr_offs\" parameter specifies UBI VID "
"header position to be used by UBI.\n"
"Example 1: mtd=/dev/mtd0 - attach MTD device "
"/dev/mtd0.\n"
"Example 2: mtd=content,1984 mtd=4 - attach MTD device "
"with name \"content\" using VID header offset 1984, and "
"MTD device number 4 with default VID header offset.");
MODULE_VERSION(__stringify(UBI_VERSION));
MODULE_DESCRIPTION("UBI - Unsorted Block Images");
MODULE_AUTHOR("Artem Bityutskiy");
MODULE_LICENSE("GPL");
| gpl-2.0 |
jmztaylor/android_kernel_htc_a3ul_old | arch/arm/mach-msm/board-sapphire-panel.c | 3627 | 33725 | /* linux/arch/arm/mach-msm/board-sapphire-panel.c
* Copyright (C) 2007-2009 HTC Corporation.
* Author: Thomas Tsai <thomas_tsai@htc.com>
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/leds.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/gpio.h>
#include <asm/mach-types.h>
#include <mach/msm_fb.h>
#include <mach/vreg.h>
#include <mach/htc_pwrsink.h>
#include <mach/proc_comm.h>
#include "gpio_chip.h"
#include "board-sapphire.h"
#include "devices.h"
#define DEBUG_SAPPHIRE_PANEL 0
#define userid 0xD10
#define VSYNC_GPIO 97
enum sapphire_panel_type {
SAPPHIRE_PANEL_SHARP = 0,
SAPPHIRE_PANEL_TOPPOLY,
NUM_OF_SAPPHIRE_PANELS,
};
static int g_panel_id = -1 ;
static int g_panel_inited = 0 ;
#define SAPPHIRE_DEFAULT_BACKLIGHT_BRIGHTNESS 132
#define GOOGLE_DEFAULT_BACKLIGHT_BRIGHTNESS 102
#define SDBB SAPPHIRE_DEFAULT_BACKLIGHT_BRIGHTNESS
#define GDBB GOOGLE_DEFAULT_BACKLIGHT_BRIGHTNESS
static int sapphire_backlight_off;
static int sapphire_backlight_brightness =
SAPPHIRE_DEFAULT_BACKLIGHT_BRIGHTNESS;
static uint8_t sapphire_backlight_last_level = 33;
static DEFINE_MUTEX(sapphire_backlight_lock);
/* Divide dimming level into 12 sections, and restrict maximum level to 27 */
#define DIMMING_STEPS 12
static unsigned dimming_levels[NUM_OF_SAPPHIRE_PANELS][DIMMING_STEPS] = {
{0, 1, 2, 3, 6, 9, 11, 13, 16, 19, 22, 25}, /* Sharp */
{0, 1, 2, 4, 7, 10, 13, 15, 18, 21, 24, 27}, /* Toppolly */
};
static unsigned pwrsink_percents[] = {0, 6, 8, 15, 26, 34, 46, 54, 65, 77, 87,
100};
static void sapphire_set_backlight_level(uint8_t level)
{
unsigned dimming_factor = 255/DIMMING_STEPS + 1;
int index, new_level ;
unsigned percent;
unsigned long flags;
int i = 0;
/* Non-linear transform for the difference between two
* kind of default backlight settings.
*/
new_level = level<=GDBB ?
level*SDBB/GDBB : (SDBB + (level-GDBB)*(255-SDBB) / (255-GDBB)) ;
index = new_level/dimming_factor ;
#if DEBUG_SAPPHIRE_PANEL
printk(KERN_INFO "level=%d, new level=%d, dimming_levels[%d]=%d\n",
level, new_level, index, dimming_levels[g_panel_id][index]);
#endif
percent = pwrsink_percents[index];
level = dimming_levels[g_panel_id][index];
if (sapphire_backlight_last_level == level)
return;
if (level == 0) {
gpio_set_value(27, 0);
msleep(2);
} else {
local_irq_save(flags);
if (sapphire_backlight_last_level == 0) {
gpio_set_value(27, 1);
udelay(40);
sapphire_backlight_last_level = 33;
}
i = (sapphire_backlight_last_level - level + 33) % 33;
while (i-- > 0) {
gpio_set_value(27, 0);
udelay(1);
gpio_set_value(27, 1);
udelay(1);
}
local_irq_restore(flags);
}
sapphire_backlight_last_level = level;
htc_pwrsink_set(PWRSINK_BACKLIGHT, percent);
}
#define MDDI_CLIENT_CORE_BASE 0x108000
#define LCD_CONTROL_BLOCK_BASE 0x110000
#define SPI_BLOCK_BASE 0x120000
#define I2C_BLOCK_BASE 0x130000
#define PWM_BLOCK_BASE 0x140000
#define GPIO_BLOCK_BASE 0x150000
#define SYSTEM_BLOCK1_BASE 0x160000
#define SYSTEM_BLOCK2_BASE 0x170000
#define DPSUS (MDDI_CLIENT_CORE_BASE|0x24)
#define SYSCLKENA (MDDI_CLIENT_CORE_BASE|0x2C)
#define PWM0OFF (PWM_BLOCK_BASE|0x1C)
#define V_VDDE2E_VDD2_GPIO 0
#define V_VDDE2E_VDD2_GPIO_5M 89
#define MDDI_RST_N 82
#define MDDICAP0 (MDDI_CLIENT_CORE_BASE|0x00)
#define MDDICAP1 (MDDI_CLIENT_CORE_BASE|0x04)
#define MDDICAP2 (MDDI_CLIENT_CORE_BASE|0x08)
#define MDDICAP3 (MDDI_CLIENT_CORE_BASE|0x0C)
#define MDCAPCHG (MDDI_CLIENT_CORE_BASE|0x10)
#define MDCRCERC (MDDI_CLIENT_CORE_BASE|0x14)
#define TTBUSSEL (MDDI_CLIENT_CORE_BASE|0x18)
#define DPSET0 (MDDI_CLIENT_CORE_BASE|0x1C)
#define DPSET1 (MDDI_CLIENT_CORE_BASE|0x20)
#define DPSUS (MDDI_CLIENT_CORE_BASE|0x24)
#define DPRUN (MDDI_CLIENT_CORE_BASE|0x28)
#define SYSCKENA (MDDI_CLIENT_CORE_BASE|0x2C)
#define TESTMODE (MDDI_CLIENT_CORE_BASE|0x30)
#define FIFOMONI (MDDI_CLIENT_CORE_BASE|0x34)
#define INTMONI (MDDI_CLIENT_CORE_BASE|0x38)
#define MDIOBIST (MDDI_CLIENT_CORE_BASE|0x3C)
#define MDIOPSET (MDDI_CLIENT_CORE_BASE|0x40)
#define BITMAP0 (MDDI_CLIENT_CORE_BASE|0x44)
#define BITMAP1 (MDDI_CLIENT_CORE_BASE|0x48)
#define BITMAP2 (MDDI_CLIENT_CORE_BASE|0x4C)
#define BITMAP3 (MDDI_CLIENT_CORE_BASE|0x50)
#define BITMAP4 (MDDI_CLIENT_CORE_BASE|0x54)
#define SRST (LCD_CONTROL_BLOCK_BASE|0x00)
#define PORT_ENB (LCD_CONTROL_BLOCK_BASE|0x04)
#define START (LCD_CONTROL_BLOCK_BASE|0x08)
#define PORT (LCD_CONTROL_BLOCK_BASE|0x0C)
#define CMN (LCD_CONTROL_BLOCK_BASE|0x10)
#define GAMMA (LCD_CONTROL_BLOCK_BASE|0x14)
#define INTFLG (LCD_CONTROL_BLOCK_BASE|0x18)
#define INTMSK (LCD_CONTROL_BLOCK_BASE|0x1C)
#define MPLFBUF (LCD_CONTROL_BLOCK_BASE|0x20)
#define HDE_LEFT (LCD_CONTROL_BLOCK_BASE|0x24)
#define VDE_TOP (LCD_CONTROL_BLOCK_BASE|0x28)
#define PXL (LCD_CONTROL_BLOCK_BASE|0x30)
#define HCYCLE (LCD_CONTROL_BLOCK_BASE|0x34)
#define HSW (LCD_CONTROL_BLOCK_BASE|0x38)
#define HDE_START (LCD_CONTROL_BLOCK_BASE|0x3C)
#define HDE_SIZE (LCD_CONTROL_BLOCK_BASE|0x40)
#define VCYCLE (LCD_CONTROL_BLOCK_BASE|0x44)
#define VSW (LCD_CONTROL_BLOCK_BASE|0x48)
#define VDE_START (LCD_CONTROL_BLOCK_BASE|0x4C)
#define VDE_SIZE (LCD_CONTROL_BLOCK_BASE|0x50)
#define WAKEUP (LCD_CONTROL_BLOCK_BASE|0x54)
#define WSYN_DLY (LCD_CONTROL_BLOCK_BASE|0x58)
#define REGENB (LCD_CONTROL_BLOCK_BASE|0x5C)
#define VSYNIF (LCD_CONTROL_BLOCK_BASE|0x60)
#define WRSTB (LCD_CONTROL_BLOCK_BASE|0x64)
#define RDSTB (LCD_CONTROL_BLOCK_BASE|0x68)
#define ASY_DATA (LCD_CONTROL_BLOCK_BASE|0x6C)
#define ASY_DATB (LCD_CONTROL_BLOCK_BASE|0x70)
#define ASY_DATC (LCD_CONTROL_BLOCK_BASE|0x74)
#define ASY_DATD (LCD_CONTROL_BLOCK_BASE|0x78)
#define ASY_DATE (LCD_CONTROL_BLOCK_BASE|0x7C)
#define ASY_DATF (LCD_CONTROL_BLOCK_BASE|0x80)
#define ASY_DATG (LCD_CONTROL_BLOCK_BASE|0x84)
#define ASY_DATH (LCD_CONTROL_BLOCK_BASE|0x88)
#define ASY_CMDSET (LCD_CONTROL_BLOCK_BASE|0x8C)
#define SSICTL (SPI_BLOCK_BASE|0x00)
#define SSITIME (SPI_BLOCK_BASE|0x04)
#define SSITX (SPI_BLOCK_BASE|0x08)
#define SSIRX (SPI_BLOCK_BASE|0x0C)
#define SSIINTC (SPI_BLOCK_BASE|0x10)
#define SSIINTS (SPI_BLOCK_BASE|0x14)
#define SSIDBG1 (SPI_BLOCK_BASE|0x18)
#define SSIDBG2 (SPI_BLOCK_BASE|0x1C)
#define SSIID (SPI_BLOCK_BASE|0x20)
#define WKREQ (SYSTEM_BLOCK1_BASE|0x00)
#define CLKENB (SYSTEM_BLOCK1_BASE|0x04)
#define DRAMPWR (SYSTEM_BLOCK1_BASE|0x08)
#define INTMASK (SYSTEM_BLOCK1_BASE|0x0C)
#define GPIOSEL (SYSTEM_BLOCK2_BASE|0x00)
#define GPIODATA (GPIO_BLOCK_BASE|0x00)
#define GPIODIR (GPIO_BLOCK_BASE|0x04)
#define GPIOIS (GPIO_BLOCK_BASE|0x08)
#define GPIOIBE (GPIO_BLOCK_BASE|0x0C)
#define GPIOIEV (GPIO_BLOCK_BASE|0x10)
#define GPIOIE (GPIO_BLOCK_BASE|0x14)
#define GPIORIS (GPIO_BLOCK_BASE|0x18)
#define GPIOMIS (GPIO_BLOCK_BASE|0x1C)
#define GPIOIC (GPIO_BLOCK_BASE|0x20)
#define GPIOOMS (GPIO_BLOCK_BASE|0x24)
#define GPIOPC (GPIO_BLOCK_BASE|0x28)
#define GPIOID (GPIO_BLOCK_BASE|0x30)
#define SPI_WRITE(reg, val) \
{ SSITX, 0x00010000 | (((reg) & 0xff) << 8) | ((val) & 0xff) }, \
{ 0, 5 },
#define SPI_WRITE1(reg) \
{ SSITX, (reg) & 0xff }, \
{ 0, 5 },
struct mddi_table {
uint32_t reg;
uint32_t value;
};
static struct mddi_table mddi_toshiba_init_table[] = {
{ DPSET0, 0x09e90046 },
{ DPSET1, 0x00000118 },
{ DPSUS, 0x00000000 },
{ DPRUN, 0x00000001 },
{ 1, 14 }, /* msleep 14 */
{ SYSCKENA, 0x00000001 },
/*{ CLKENB, 0x000000EF } */
{ CLKENB, 0x0000A1EF }, /* # SYS.CLKENB # Enable clocks for each module (without DCLK , i2cCLK) */
/*{ CLKENB, 0x000025CB }, Clock enable register */
{ GPIODATA, 0x02000200 }, /* # GPI .GPIODATA # GPIO2(RESET_LCD_N) set to 0 , GPIO3(eDRAM_Power) set to 0 */
{ GPIODIR, 0x000030D }, /* 24D # GPI .GPIODIR # Select direction of GPIO port (0,2,3,6,9 output) */
{ GPIOSEL, 0/*0x00000173*/}, /* # SYS.GPIOSEL # GPIO port multiplexing control */
{ GPIOPC, 0x03C300C0 }, /* # GPI .GPIOPC # GPIO2,3 PD cut */
{ WKREQ, 0x00000000 }, /* # SYS.WKREQ # Wake-up request event is VSYNC alignment */
{ GPIOIBE, 0x000003FF },
{ GPIOIS, 0x00000000 },
{ GPIOIC, 0x000003FF },
{ GPIOIE, 0x00000000 },
{ GPIODATA, 0x00040004 }, /* # GPI .GPIODATA # eDRAM VD supply */
{ 1, 1 }, /* msleep 1 */
{ GPIODATA, 0x02040004 }, /* # GPI .GPIODATA # eDRAM VD supply */
{ DRAMPWR, 0x00000001 }, /* eDRAM power */
};
static struct mddi_table mddi_toshiba_panel_init_table[] = {
{ SRST, 0x00000003 }, /* FIFO/LCDC not reset */
{ PORT_ENB, 0x00000001 }, /* Enable sync. Port */
{ START, 0x00000000 }, /* To stop operation */
/*{ START, 0x00000001 }, To start operation */
{ PORT, 0x00000004 }, /* Polarity of VS/HS/DE. */
{ CMN, 0x00000000 },
{ GAMMA, 0x00000000 }, /* No Gamma correction */
{ INTFLG, 0x00000000 }, /* VSYNC interrupt flag clear/status */
{ INTMSK, 0x00000000 }, /* VSYNC interrupt mask is off. */
{ MPLFBUF, 0x00000000 }, /* Select frame buffer's base address. */
{ HDE_LEFT, 0x00000000 }, /* The value of HDE_LEFT. */
{ VDE_TOP, 0x00000000 }, /* The value of VDE_TPO. */
{ PXL, 0x00000001 }, /* 1. RGB666 */
/* 2. Data is valid from 1st frame of beginning. */
{ HDE_START, 0x00000006 }, /* HDE_START= 14 PCLK */
{ HDE_SIZE, 0x0000009F }, /* HDE_SIZE=320 PCLK */
{ HSW, 0x00000004 }, /* HSW= 10 PCLK */
{ VSW, 0x00000001 }, /* VSW=2 HCYCLE */
{ VDE_START, 0x00000003 }, /* VDE_START=4 HCYCLE */
{ VDE_SIZE, 0x000001DF }, /* VDE_SIZE=480 HCYCLE */
{ WAKEUP, 0x000001e2 }, /* Wakeup position in VSYNC mode. */
{ WSYN_DLY, 0x00000000 }, /* Wakeup position in VSIN mode. */
{ REGENB, 0x00000001 }, /* Set 1 to enable to change the value of registers. */
{ CLKENB, 0x000025CB }, /* Clock enable register */
{ SSICTL, 0x00000170 }, /* SSI control register */
{ SSITIME, 0x00000250 }, /* SSI timing control register */
{ SSICTL, 0x00000172 }, /* SSI control register */
};
static struct mddi_table mddi_sharp_init_table[] = {
{ VCYCLE, 0x000001eb },
{ HCYCLE, 0x000000ae },
{ REGENB, 0x00000001 }, /* Set 1 to enable to change the value of registers. */
{ GPIODATA, 0x00040000 }, /* GPIO2 low */
{ GPIODIR, 0x00000004 }, /* GPIO2 out */
{ 1, 1 }, /* msleep 1 */
{ GPIODATA, 0x00040004 }, /* GPIO2 high */
{ 1, 10 }, /* msleep 10 */
SPI_WRITE(0x5f, 0x01)
SPI_WRITE1(0x11)
{ 1, 200 }, /* msleep 200 */
SPI_WRITE1(0x29)
SPI_WRITE1(0xde)
{ START, 0x00000001 }, /* To start operation */
};
static struct mddi_table mddi_sharp_deinit_table[] = {
{ 1, 200 }, /* msleep 200 */
SPI_WRITE(0x10, 0x1)
{ 1, 100 }, /* msleep 100 */
{ GPIODATA, 0x00040004 }, /* GPIO2 high */
{ GPIODIR, 0x00000004 }, /* GPIO2 out */
{ GPIODATA, 0x00040000 }, /* GPIO2 low */
{ 1, 10 }, /* msleep 10 */
};
static struct mddi_table mddi_tpo_init_table[] = {
{ VCYCLE, 0x000001e5 },
{ HCYCLE, 0x000000ac },
{ REGENB, 0x00000001 }, /* Set 1 to enable to change the value of registers. */
{ 0, 20 }, /* udelay 20 */
{ GPIODATA, 0x00000004 }, /* GPIO2 high */
{ GPIODIR, 0x00000004 }, /* GPIO2 out */
{ 0, 20 }, /* udelay 20 */
SPI_WRITE(0x08, 0x01)
{ 0, 500 }, /* udelay 500 */
SPI_WRITE(0x08, 0x00)
SPI_WRITE(0x02, 0x00)
SPI_WRITE(0x03, 0x04)
SPI_WRITE(0x04, 0x0e)
SPI_WRITE(0x09, 0x02)
SPI_WRITE(0x0b, 0x08)
SPI_WRITE(0x0c, 0x53)
SPI_WRITE(0x0d, 0x01)
SPI_WRITE(0x0e, 0xe0)
SPI_WRITE(0x0f, 0x01)
SPI_WRITE(0x10, 0x58)
SPI_WRITE(0x20, 0x1e)
SPI_WRITE(0x21, 0x0a)
SPI_WRITE(0x22, 0x0a)
SPI_WRITE(0x23, 0x1e)
SPI_WRITE(0x25, 0x32)
SPI_WRITE(0x26, 0x00)
SPI_WRITE(0x27, 0xac)
SPI_WRITE(0x29, 0x06)
SPI_WRITE(0x2a, 0xa4)
SPI_WRITE(0x2b, 0x45)
SPI_WRITE(0x2c, 0x45)
SPI_WRITE(0x2d, 0x15)
SPI_WRITE(0x2e, 0x5a)
SPI_WRITE(0x2f, 0xff)
SPI_WRITE(0x30, 0x6b)
SPI_WRITE(0x31, 0x0d)
SPI_WRITE(0x32, 0x48)
SPI_WRITE(0x33, 0x82)
SPI_WRITE(0x34, 0xbd)
SPI_WRITE(0x35, 0xe7)
SPI_WRITE(0x36, 0x18)
SPI_WRITE(0x37, 0x94)
SPI_WRITE(0x38, 0x01)
SPI_WRITE(0x39, 0x5d)
SPI_WRITE(0x3a, 0xae)
SPI_WRITE(0x3b, 0xff)
SPI_WRITE(0x07, 0x09)
{ 0, 10 }, /* udelay 10 */
{ START, 0x00000001 }, /* To start operation */
};
static struct mddi_table mddi_tpo_deinit_table[] = {
SPI_WRITE(0x07, 0x19)
{ START, 0x00000000 }, /* To stop operation */
{ GPIODATA, 0x00040004 }, /* GPIO2 high */
{ GPIODIR, 0x00000004 }, /* GPIO2 out */
{ GPIODATA, 0x00040000 }, /* GPIO2 low */
{ 0, 5 }, /* usleep 5 */
};
#define GPIOSEL_VWAKEINT (1U << 0)
#define INTMASK_VWAKEOUT (1U << 0)
static void sapphire_process_mddi_table(
struct msm_mddi_client_data *client_data,
const struct mddi_table *table,
size_t count)
{
int i;
for (i = 0; i < count; i++) {
uint32_t reg = table[i].reg;
uint32_t value = table[i].value;
if (reg == 0)
udelay(value);
else if (reg == 1)
msleep(value);
else
client_data->remote_write(client_data, value, reg);
}
}
static struct vreg *vreg_lcm_2v85;
static void sapphire_mddi_power_client(struct msm_mddi_client_data *client_data,
int on)
{
unsigned id, on_off;
#if DEBUG_SAPPHIRE_PANEL
printk(KERN_INFO "sapphire_mddi_client_power:%d\r\n", on);
#endif
if (on) {
on_off = 0;
id = PM_VREG_PDOWN_MDDI_ID;
msm_proc_comm(PCOM_VREG_PULLDOWN, &on_off, &id);
gpio_set_value(SAPPHIRE_MDDI_1V5_EN, 1);
mdelay(5); /* delay time >5ms and <10ms */
if (is_12pin_camera())
gpio_set_value(V_VDDE2E_VDD2_GPIO_5M, 1);
else
gpio_set_value(V_VDDE2E_VDD2_GPIO, 1);
gpio_set_value(SAPPHIRE_GPIO_MDDI_32K_EN, 1);
msleep(3);
id = PM_VREG_PDOWN_AUX_ID;
msm_proc_comm(PCOM_VREG_PULLDOWN, &on_off, &id);
vreg_enable(vreg_lcm_2v85);
msleep(3);
} else {
gpio_set_value(SAPPHIRE_GPIO_MDDI_32K_EN, 0);
gpio_set_value(MDDI_RST_N, 0);
msleep(10);
vreg_disable(vreg_lcm_2v85);
on_off = 1;
id = PM_VREG_PDOWN_AUX_ID;
msm_proc_comm(PCOM_VREG_PULLDOWN, &on_off, &id);
msleep(5);
if (is_12pin_camera())
gpio_set_value(V_VDDE2E_VDD2_GPIO_5M, 0);
else
gpio_set_value(V_VDDE2E_VDD2_GPIO, 0);
msleep(200);
gpio_set_value(SAPPHIRE_MDDI_1V5_EN, 0);
id = PM_VREG_PDOWN_MDDI_ID;
msm_proc_comm(PCOM_VREG_PULLDOWN, &on_off, &id);
}
}
static int sapphire_mddi_toshiba_client_init(
struct msm_mddi_bridge_platform_data *bridge_data,
struct msm_mddi_client_data *client_data)
{
int panel_id;
/* Set the MDDI_RST_N accroding to MDDI client repectively(
* been set in sapphire_mddi_power_client() originally)
*/
gpio_set_value(MDDI_RST_N, 1);
msleep(10);
client_data->auto_hibernate(client_data, 0);
sapphire_process_mddi_table(client_data, mddi_toshiba_init_table,
ARRAY_SIZE(mddi_toshiba_init_table));
client_data->auto_hibernate(client_data, 1);
g_panel_id = panel_id =
(client_data->remote_read(client_data, GPIODATA) >> 4) & 3;
if (panel_id > 1) {
#if DEBUG_SAPPHIRE_PANEL
printk(KERN_ERR "unknown panel id at mddi_enable\n");
#endif
return -1;
}
return 0;
}
static int sapphire_mddi_toshiba_client_uninit(
struct msm_mddi_bridge_platform_data *bridge_data,
struct msm_mddi_client_data *client_data)
{
gpio_set_value(MDDI_RST_N, 0);
msleep(10);
return 0;
}
static int sapphire_mddi_panel_unblank(
struct msm_mddi_bridge_platform_data *bridge_data,
struct msm_mddi_client_data *client_data)
{
int panel_id, ret = 0;
sapphire_set_backlight_level(0);
client_data->auto_hibernate(client_data, 0);
sapphire_process_mddi_table(client_data, mddi_toshiba_panel_init_table,
ARRAY_SIZE(mddi_toshiba_panel_init_table));
panel_id = (client_data->remote_read(client_data, GPIODATA) >> 4) & 3;
switch (panel_id) {
case 0:
#if DEBUG_SAPPHIRE_PANEL
printk(KERN_DEBUG "init sharp panel\n");
#endif
sapphire_process_mddi_table(client_data,
mddi_sharp_init_table,
ARRAY_SIZE(mddi_sharp_init_table));
break;
case 1:
#if DEBUG_SAPPHIRE_PANEL
printk(KERN_DEBUG "init tpo panel\n");
#endif
sapphire_process_mddi_table(client_data,
mddi_tpo_init_table,
ARRAY_SIZE(mddi_tpo_init_table));
break;
default:
printk(KERN_DEBUG "unknown panel_id: %d\n", panel_id);
ret = -1;
};
mutex_lock(&sapphire_backlight_lock);
sapphire_set_backlight_level(sapphire_backlight_brightness);
sapphire_backlight_off = 0;
mutex_unlock(&sapphire_backlight_lock);
client_data->auto_hibernate(client_data, 1);
/* reenable vsync */
client_data->remote_write(client_data, GPIOSEL_VWAKEINT,
GPIOSEL);
client_data->remote_write(client_data, INTMASK_VWAKEOUT,
INTMASK);
return ret;
}
static int sapphire_mddi_panel_blank(
struct msm_mddi_bridge_platform_data *bridge_data,
struct msm_mddi_client_data *client_data)
{
int panel_id, ret = 0;
panel_id = (client_data->remote_read(client_data, GPIODATA) >> 4) & 3;
client_data->auto_hibernate(client_data, 0);
switch (panel_id) {
case 0:
printk(KERN_DEBUG "deinit sharp panel\n");
sapphire_process_mddi_table(client_data,
mddi_sharp_deinit_table,
ARRAY_SIZE(mddi_sharp_deinit_table));
break;
case 1:
printk(KERN_DEBUG "deinit tpo panel\n");
sapphire_process_mddi_table(client_data,
mddi_tpo_deinit_table,
ARRAY_SIZE(mddi_tpo_deinit_table));
break;
default:
printk(KERN_DEBUG "unknown panel_id: %d\n", panel_id);
ret = -1;
};
client_data->auto_hibernate(client_data, 1);
mutex_lock(&sapphire_backlight_lock);
sapphire_set_backlight_level(0);
sapphire_backlight_off = 1;
mutex_unlock(&sapphire_backlight_lock);
client_data->remote_write(client_data, 0, SYSCLKENA);
client_data->remote_write(client_data, 1, DPSUS);
return ret;
}
/* Initial sequence of sharp panel with Novatek NT35399 MDDI client */
static const struct mddi_table sharp2_init_table[] = {
{ 0x02A0, 0x00 },
{ 0x02A1, 0x00 },
{ 0x02A2, 0x3F },
{ 0x02A3, 0x01 },
{ 0x02B0, 0x00 },
{ 0x02B1, 0x00 },
{ 0x02B2, 0xDF },
{ 0x02B3, 0x01 },
{ 0x02D0, 0x00 },
{ 0x02D1, 0x00 },
{ 0x02D2, 0x00 },
{ 0x02D3, 0x00 },
{ 0x0350, 0x80 }, /* Set frame tearing effect(FTE) position */
{ 0x0351, 0x00 },
{ 0x0360, 0x30 },
{ 0x0361, 0xC1 },
{ 0x0362, 0x00 },
{ 0x0370, 0x00 },
{ 0x0371, 0xEF },
{ 0x0372, 0x01 },
{ 0x0B00, 0x10 },
{ 0x0B10, 0x00 },
{ 0x0B20, 0x22 },
{ 0x0B30, 0x46 },
{ 0x0B40, 0x07 },
{ 0x0B41, 0x1C },
{ 0x0B50, 0x0F },
{ 0x0B51, 0x7A },
{ 0x0B60, 0x16 },
{ 0x0B70, 0x0D },
{ 0x0B80, 0x04 },
{ 0x0B90, 0x07 },
{ 0x0BA0, 0x04 },
{ 0x0BA1, 0x86 },
{ 0x0BB0, 0xFF },
{ 0x0BB1, 0x01 },
{ 0x0BB2, 0xF7 },
{ 0x0BB3, 0x01 },
{ 0x0BC0, 0x00 },
{ 0x0BC1, 0x00 },
{ 0x0BC2, 0x00 },
{ 0x0BC3, 0x00 },
{ 0x0BE0, 0x01 },
{ 0x0BE1, 0x3F },
{ 0x0BF0, 0x03 },
{ 0x0C10, 0x02 },
{ 0x0C30, 0x22 },
{ 0x0C31, 0x20 },
{ 0x0C40, 0x48 },
{ 0x0C41, 0x06 },
{ 0xE00, 0x0028},
{ 0xE01, 0x002F},
{ 0xE02, 0x0032},
{ 0xE03, 0x000A},
{ 0xE04, 0x0023},
{ 0xE05, 0x0024},
{ 0xE06, 0x0022},
{ 0xE07, 0x0012},
{ 0xE08, 0x000D},
{ 0xE09, 0x0035},
{ 0xE0A, 0x000E},
{ 0xE0B, 0x001A},
{ 0xE0C, 0x003C},
{ 0xE0D, 0x003A},
{ 0xE0E, 0x0050},
{ 0xE0F, 0x0069},
{ 0xE10, 0x0006},
{ 0xE11, 0x001F},
{ 0xE12, 0x0035},
{ 0xE13, 0x0020},
{ 0xE14, 0x0043},
{ 0xE15, 0x0030},
{ 0xE16, 0x003C},
{ 0xE17, 0x0010},
{ 0xE18, 0x0009},
{ 0xE19, 0x0051},
{ 0xE1A, 0x001D},
{ 0xE1B, 0x003C},
{ 0xE1C, 0x0053},
{ 0xE1D, 0x0041},
{ 0xE1E, 0x0045},
{ 0xE1F, 0x004B},
{ 0xE20, 0x000A},
{ 0xE21, 0x0014},
{ 0xE22, 0x001C},
{ 0xE23, 0x0013},
{ 0xE24, 0x002E},
{ 0xE25, 0x0029},
{ 0xE26, 0x001B},
{ 0xE27, 0x0014},
{ 0xE28, 0x000E},
{ 0xE29, 0x0032},
{ 0xE2A, 0x000D},
{ 0xE2B, 0x001B},
{ 0xE2C, 0x0033},
{ 0xE2D, 0x0033},
{ 0xE2E, 0x005B},
{ 0xE2F, 0x0069},
{ 0xE30, 0x0006},
{ 0xE31, 0x0014},
{ 0xE32, 0x003D},
{ 0xE33, 0x0029},
{ 0xE34, 0x0042},
{ 0xE35, 0x0032},
{ 0xE36, 0x003F},
{ 0xE37, 0x000E},
{ 0xE38, 0x0008},
{ 0xE39, 0x0059},
{ 0xE3A, 0x0015},
{ 0xE3B, 0x002E},
{ 0xE3C, 0x0049},
{ 0xE3D, 0x0058},
{ 0xE3E, 0x0061},
{ 0xE3F, 0x006B},
{ 0xE40, 0x000A},
{ 0xE41, 0x001A},
{ 0xE42, 0x0022},
{ 0xE43, 0x0014},
{ 0xE44, 0x002F},
{ 0xE45, 0x002A},
{ 0xE46, 0x001A},
{ 0xE47, 0x0014},
{ 0xE48, 0x000E},
{ 0xE49, 0x002F},
{ 0xE4A, 0x000F},
{ 0xE4B, 0x001B},
{ 0xE4C, 0x0030},
{ 0xE4D, 0x002C},
{ 0xE4E, 0x0051},
{ 0xE4F, 0x0069},
{ 0xE50, 0x0006},
{ 0xE51, 0x001E},
{ 0xE52, 0x0043},
{ 0xE53, 0x002F},
{ 0xE54, 0x0043},
{ 0xE55, 0x0032},
{ 0xE56, 0x0043},
{ 0xE57, 0x000D},
{ 0xE58, 0x0008},
{ 0xE59, 0x0059},
{ 0xE5A, 0x0016},
{ 0xE5B, 0x0030},
{ 0xE5C, 0x004B},
{ 0xE5D, 0x0051},
{ 0xE5E, 0x005A},
{ 0xE5F, 0x006B},
{ 0x0290, 0x01 },
};
#undef TPO2_ONE_GAMMA
/* Initial sequence of TPO panel with Novatek NT35399 MDDI client */
static const struct mddi_table tpo2_init_table[] = {
/* Panel interface control */
{ 0xB30, 0x44 },
{ 0xB40, 0x00 },
{ 0xB41, 0x87 },
{ 0xB50, 0x06 },
{ 0xB51, 0x7B },
{ 0xB60, 0x0E },
{ 0xB70, 0x0F },
{ 0xB80, 0x03 },
{ 0xB90, 0x00 },
{ 0x350, 0x70 }, /* FTE is at line 0x70 */
/* Entry Mode */
{ 0x360, 0x30 },
{ 0x361, 0xC1 },
{ 0x362, 0x04 },
/* 0x2 for gray scale gamma correction, 0x12 for RGB gamma correction */
#ifdef TPO2_ONE_GAMMA
{ 0xB00, 0x02 },
#else
{ 0xB00, 0x12 },
#endif
/* Driver output control */
{ 0x371, 0xEF },
{ 0x372, 0x03 },
/* DCDC on glass control */
{ 0xC31, 0x10 },
{ 0xBA0, 0x00 },
{ 0xBA1, 0x86 },
/* VCOMH voltage control */
{ 0xC50, 0x3b },
/* Special function control */
{ 0xC10, 0x82 },
/* Power control */
{ 0xC40, 0x44 },
{ 0xC41, 0x02 },
/* Source output control */
{ 0xBE0, 0x01 },
{ 0xBE1, 0x00 },
/* Windows address setting */
{ 0x2A0, 0x00 },
{ 0x2A1, 0x00 },
{ 0x2A2, 0x3F },
{ 0x2A3, 0x01 },
{ 0x2B0, 0x00 },
{ 0x2B1, 0x00 },
{ 0x2B2, 0xDF },
{ 0x2B3, 0x01 },
/* RAM address setting */
{ 0x2D0, 0x00 },
{ 0x2D1, 0x00 },
{ 0x2D2, 0x00 },
{ 0x2D3, 0x00 },
{ 0xF20, 0x55 },
{ 0xF21, 0xAA },
{ 0xF22, 0x66 },
{ 0xF57, 0x45 },
/*
* The NT35399 provides gray or RGB gamma correction table,
* which determinated by register-0xb00, and following table
*/
#ifdef TPO2_ONE_GAMMA
/* Positive Gamma setting */
{ 0xE00, 0x04 },
{ 0xE01, 0x12 },
{ 0xE02, 0x18 },
{ 0xE03, 0x10 },
{ 0xE04, 0x29 },
{ 0xE05, 0x26 },
{ 0xE06, 0x1f },
{ 0xE07, 0x11 },
{ 0xE08, 0x0c },
{ 0xE09, 0x3a },
{ 0xE0A, 0x0d },
{ 0xE0B, 0x28 },
{ 0xE0C, 0x40 },
{ 0xE0D, 0x4e },
{ 0xE0E, 0x6f },
{ 0xE0F, 0x5E },
/* Negative Gamma setting */
{ 0xE10, 0x0B },
{ 0xE11, 0x00 },
{ 0xE12, 0x00 },
{ 0xE13, 0x1F },
{ 0xE14, 0x4b },
{ 0xE15, 0x33 },
{ 0xE16, 0x13 },
{ 0xE17, 0x12 },
{ 0xE18, 0x0d },
{ 0xE19, 0x2f },
{ 0xE1A, 0x16 },
{ 0xE1B, 0x2e },
{ 0xE1C, 0x49 },
{ 0xE1D, 0x41 },
{ 0xE1E, 0x46 },
{ 0xE1F, 0x55 },
#else
/* Red Positive Gamma */
{ 0xE00, 0x0f },
{ 0xE01, 0x19 },
{ 0xE02, 0x22 },
{ 0xE03, 0x0b },
{ 0xE04, 0x23 },
{ 0xE05, 0x23 },
{ 0xE06, 0x14 },
{ 0xE07, 0x13 },
{ 0xE08, 0x0f },
{ 0xE09, 0x2a },
{ 0xE0A, 0x0d },
{ 0xE0B, 0x26 },
{ 0xE0C, 0x43 },
{ 0xE0D, 0x20 },
{ 0xE0E, 0x2a },
{ 0xE0F, 0x5c },
/* Red Negative Gamma */
{ 0xE10, 0x0d },
{ 0xE11, 0x45 },
{ 0xE12, 0x4c },
{ 0xE13, 0x1c },
{ 0xE14, 0x4d },
{ 0xE15, 0x33 },
{ 0xE16, 0x23 },
{ 0xE17, 0x0f },
{ 0xE18, 0x0b },
{ 0xE19, 0x3a },
{ 0xE1A, 0x19 },
{ 0xE1B, 0x32 },
{ 0xE1C, 0x4e },
{ 0xE1D, 0x37 },
{ 0xE1E, 0x38 },
{ 0xE1F, 0x3b },
/* Green Positive Gamma */
{ 0xE20, 0x00 },
{ 0xE21, 0x09 },
{ 0xE22, 0x10 },
{ 0xE23, 0x0f },
{ 0xE24, 0x29 },
{ 0xE25, 0x23 },
{ 0xE26, 0x0b },
{ 0xE27, 0x14 },
{ 0xE28, 0x12 },
{ 0xE29, 0x25 },
{ 0xE2A, 0x12 },
{ 0xE2B, 0x2f },
{ 0xE2C, 0x43 },
{ 0xE2D, 0x2d },
{ 0xE2E, 0x52 },
{ 0xE2F, 0x61 },
/* Green Negative Gamma */
{ 0xE30, 0x08 },
{ 0xE31, 0x1d },
{ 0xE32, 0x3f },
{ 0xE33, 0x1c },
{ 0xE34, 0x44 },
{ 0xE35, 0x2e },
{ 0xE36, 0x28 },
{ 0xE37, 0x0c },
{ 0xE38, 0x0a },
{ 0xE39, 0x42 },
{ 0xE3A, 0x17 },
{ 0xE3B, 0x30 },
{ 0xE3C, 0x4b },
{ 0xE3D, 0x3f },
{ 0xE3E, 0x43 },
{ 0xE3F, 0x45 },
/* Blue Positive Gamma */
{ 0xE40, 0x32 },
{ 0xE41, 0x32 },
{ 0xE42, 0x31 },
{ 0xE43, 0x06 },
{ 0xE44, 0x08 },
{ 0xE45, 0x0d },
{ 0xE46, 0x04 },
{ 0xE47, 0x14 },
{ 0xE48, 0x0f },
{ 0xE49, 0x1d },
{ 0xE4A, 0x1a },
{ 0xE4B, 0x39 },
{ 0xE4C, 0x4c },
{ 0xE4D, 0x1e },
{ 0xE4E, 0x43 },
{ 0xE4F, 0x61 },
/* Blue Negative Gamma */
{ 0xE50, 0x08 },
{ 0xE51, 0x2c },
{ 0xE52, 0x4e },
{ 0xE53, 0x13 },
{ 0xE54, 0x3a },
{ 0xE55, 0x26 },
{ 0xE56, 0x30 },
{ 0xE57, 0x0f },
{ 0xE58, 0x0a },
{ 0xE59, 0x49 },
{ 0xE5A, 0x34 },
{ 0xE5B, 0x4a },
{ 0xE5C, 0x53 },
{ 0xE5D, 0x28 },
{ 0xE5E, 0x26 },
{ 0xE5F, 0x27 },
#endif
/* Sleep in mode */
{ 0x110, 0x00 },
{ 0x1, 0x23 },
/* Display on mode */
{ 0x290, 0x00 },
{ 0x1, 0x27 },
/* Driver output control */
{ 0x372, 0x01 },
{ 0x1, 0x40 },
/* Display on mode */
{ 0x290, 0x01 },
};
static const struct mddi_table tpo2_display_on[] = {
{ 0x290, 0x01 },
};
static const struct mddi_table tpo2_display_off[] = {
{ 0x110, 0x01 },
{ 0x290, 0x00 },
{ 0x1, 100 },
};
static const struct mddi_table tpo2_power_off[] = {
{ 0x0110, 0x01 },
};
static int nt35399_detect_panel(struct msm_mddi_client_data *client_data)
{
int id = -1, i ;
/* If the MDDI client is failed to report the panel ID,
* perform retrial 5 times.
*/
for( i=0; i < 5; i++ ) {
client_data->remote_write(client_data, 0, 0x110);
msleep(5);
id = client_data->remote_read(client_data, userid) ;
if( id == 0 || id == 1 ) {
if(i==0) {
printk(KERN_ERR "%s: got valid panel ID=%d, "
"without retry\n",
__FUNCTION__, id);
}
else {
printk(KERN_ERR "%s: got valid panel ID=%d, "
"after %d retry\n",
__FUNCTION__, id, i+1);
}
break ;
}
printk(KERN_ERR "%s: got invalid panel ID:%d, trial #%d\n",
__FUNCTION__, id, i+1);
gpio_set_value(MDDI_RST_N, 0);
msleep(5);
gpio_set_value(MDDI_RST_N, 1);
msleep(10);
gpio_set_value(MDDI_RST_N, 0);
udelay(100);
gpio_set_value(MDDI_RST_N, 1);
mdelay(10);
}
printk(KERN_INFO "%s: final panel id=%d\n", __FUNCTION__, id);
switch(id) {
case 0:
return SAPPHIRE_PANEL_TOPPOLY;
case 1:
return SAPPHIRE_PANEL_SHARP;
default :
printk(KERN_ERR "%s(): Invalid panel ID: %d, "
"treat as sharp panel.", __FUNCTION__, id);
return SAPPHIRE_PANEL_SHARP;
}
}
static int nt35399_client_init(
struct msm_mddi_bridge_platform_data *bridge_data,
struct msm_mddi_client_data *client_data)
{
int panel_id;
if (g_panel_inited == 0) {
g_panel_id = panel_id = nt35399_detect_panel(client_data);
g_panel_inited = 1 ;
} else {
gpio_set_value(MDDI_RST_N, 1);
msleep(10);
gpio_set_value(MDDI_RST_N, 0);
udelay(100);
gpio_set_value(MDDI_RST_N, 1);
mdelay(10);
g_panel_id = panel_id = nt35399_detect_panel(client_data);
if (panel_id == -1) {
printk("Invalid panel id\n");
return -1;
}
client_data->auto_hibernate(client_data, 0);
if (panel_id == SAPPHIRE_PANEL_TOPPOLY) {
sapphire_process_mddi_table(client_data, tpo2_init_table,
ARRAY_SIZE(tpo2_init_table));
} else if(panel_id == SAPPHIRE_PANEL_SHARP) {
sapphire_process_mddi_table(client_data, sharp2_init_table,
ARRAY_SIZE(sharp2_init_table));
}
client_data->auto_hibernate(client_data, 1);
}
return 0;
}
static int nt35399_client_uninit(
struct msm_mddi_bridge_platform_data *bridge_data,
struct msm_mddi_client_data *cdata)
{
return 0;
}
static int nt35399_panel_unblank(
struct msm_mddi_bridge_platform_data *bridge_data,
struct msm_mddi_client_data *client_data)
{
int ret = 0;
mdelay(20);
sapphire_set_backlight_level(0);
client_data->auto_hibernate(client_data, 0);
mutex_lock(&sapphire_backlight_lock);
sapphire_set_backlight_level(sapphire_backlight_brightness);
sapphire_backlight_off = 0;
mutex_unlock(&sapphire_backlight_lock);
client_data->auto_hibernate(client_data, 1);
return ret;
}
static int nt35399_panel_blank(
struct msm_mddi_bridge_platform_data *bridge_data,
struct msm_mddi_client_data *client_data)
{
int ret = 0;
client_data->auto_hibernate(client_data, 0);
sapphire_process_mddi_table(client_data, tpo2_display_off,
ARRAY_SIZE(tpo2_display_off));
client_data->auto_hibernate(client_data, 1);
mutex_lock(&sapphire_backlight_lock);
sapphire_set_backlight_level(0);
sapphire_backlight_off = 1;
mutex_unlock(&sapphire_backlight_lock);
return ret;
}
static void sapphire_brightness_set(struct led_classdev *led_cdev, enum led_brightness value)
{
mutex_lock(&sapphire_backlight_lock);
sapphire_backlight_brightness = value;
if (!sapphire_backlight_off)
sapphire_set_backlight_level(sapphire_backlight_brightness);
mutex_unlock(&sapphire_backlight_lock);
}
static struct led_classdev sapphire_backlight_led = {
.name = "lcd-backlight",
.brightness = SAPPHIRE_DEFAULT_BACKLIGHT_BRIGHTNESS,
.brightness_set = sapphire_brightness_set,
};
static int sapphire_backlight_probe(struct platform_device *pdev)
{
led_classdev_register(&pdev->dev, &sapphire_backlight_led);
return 0;
}
static int sapphire_backlight_remove(struct platform_device *pdev)
{
led_classdev_unregister(&sapphire_backlight_led);
return 0;
}
static struct platform_driver sapphire_backlight_driver = {
.probe = sapphire_backlight_probe,
.remove = sapphire_backlight_remove,
.driver = {
.name = "sapphire-backlight",
.owner = THIS_MODULE,
},
};
static struct resource resources_msm_fb[] = {
{
.start = SMI64_MSM_FB_BASE,
.end = SMI64_MSM_FB_BASE + SMI64_MSM_FB_SIZE - 1,
.flags = IORESOURCE_MEM,
},
};
static struct msm_mddi_bridge_platform_data toshiba_client_data = {
.init = sapphire_mddi_toshiba_client_init,
.uninit = sapphire_mddi_toshiba_client_uninit,
.blank = sapphire_mddi_panel_blank,
.unblank = sapphire_mddi_panel_unblank,
.fb_data = {
.xres = 320,
.yres = 480,
.width = 45,
.height = 67,
.output_format = 0,
},
};
#define NT35399_MFR_NAME 0x0bda
#define NT35399_PRODUCT_CODE 0x8a47
static void nt35399_fixup(uint16_t * mfr_name, uint16_t * product_code)
{
printk(KERN_DEBUG "%s: enter.\n", __func__);
*mfr_name = NT35399_MFR_NAME ;
*product_code= NT35399_PRODUCT_CODE ;
}
static struct msm_mddi_bridge_platform_data nt35399_client_data = {
.init = nt35399_client_init,
.uninit = nt35399_client_uninit,
.blank = nt35399_panel_blank,
.unblank = nt35399_panel_unblank,
.fb_data = {
.xres = 320,
.yres = 480,
.output_format = 0,
},
};
static struct msm_mddi_platform_data mddi_pdata = {
.clk_rate = 122880000,
.power_client = sapphire_mddi_power_client,
.fixup = nt35399_fixup,
.vsync_irq = MSM_GPIO_TO_INT(VSYNC_GPIO),
.fb_resource = resources_msm_fb,
.num_clients = 2,
.client_platform_data = {
{
.product_id = (0xd263 << 16 | 0),
.name = "mddi_c_d263_0000",
.id = 0,
.client_data = &toshiba_client_data,
.clk_rate = 0,
},
{
.product_id =
(NT35399_MFR_NAME << 16 | NT35399_PRODUCT_CODE),
.name = "mddi_c_simple" ,
.id = 0,
.client_data = &nt35399_client_data,
.clk_rate = 0,
},
},
};
static struct platform_device sapphire_backlight = {
.name = "sapphire-backlight",
};
int __init sapphire_init_panel(void)
{
int rc = -1;
uint32_t config = PCOM_GPIO_CFG(27, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_8MA); /* GPIO27 */
if (!machine_is_sapphire())
return 0;
/* checking board as soon as possible */
printk("sapphire_init_panel:machine_is_sapphire=%d, machine_arch_type=%d, MACH_TYPE_SAPPHIRE=%d\r\n", machine_is_sapphire(), machine_arch_type, MACH_TYPE_SAPPHIRE);
if (!machine_is_sapphire())
return 0;
vreg_lcm_2v85 = vreg_get(0, "gp4");
if (IS_ERR(vreg_lcm_2v85))
return PTR_ERR(vreg_lcm_2v85);
msm_proc_comm(PCOM_RPC_GPIO_TLMM_CONFIG_EX, &config, 0);
/* setup FB by SMI size */
if (sapphire_get_smi_size() == 32) {
resources_msm_fb[0].start = SMI32_MSM_FB_BASE;
resources_msm_fb[0].end = SMI32_MSM_FB_BASE + SMI32_MSM_FB_SIZE - 1;
}
rc = gpio_request(VSYNC_GPIO, "vsync");
if (rc)
return rc;
rc = gpio_direction_input(VSYNC_GPIO);
if (rc)
return rc;
rc = platform_device_register(&msm_device_mdp);
if (rc)
return rc;
msm_device_mddi0.dev.platform_data = &mddi_pdata;
rc = platform_device_register(&msm_device_mddi0);
if (rc)
return rc;
platform_device_register(&sapphire_backlight);
return platform_driver_register(&sapphire_backlight_driver);
}
device_initcall(sapphire_init_panel);
| gpl-2.0 |
marek-g/kobo-kernel-2.6.35.3-android | drivers/watchdog/pcwd_usb.c | 4139 | 22745 | /*
* Berkshire USB-PC Watchdog Card Driver
*
* (c) Copyright 2004-2007 Wim Van Sebroeck <wim@iguana.be>.
*
* Based on source code of the following authors:
* Ken Hollis <kenji@bitgate.com>,
* Alan Cox <alan@lxorguk.ukuu.org.uk>,
* Matt Domsch <Matt_Domsch@dell.com>,
* Rob Radez <rob@osinvestor.com>,
* Greg Kroah-Hartman <greg@kroah.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Neither Wim Van Sebroeck nor Iguana vzw. admit liability nor
* provide warranty for any of this software. This material is
* provided "AS-IS" and at no charge.
*
* Thanks also to Simon Machell at Berkshire Products Inc. for
* providing the test hardware. More info is available at
* http://www.berkprod.com/ or http://www.pcwatchdog.com/
*/
#include <linux/module.h> /* For module specific items */
#include <linux/moduleparam.h> /* For new moduleparam's */
#include <linux/types.h> /* For standard types (like size_t) */
#include <linux/errno.h> /* For the -ENODEV/... values */
#include <linux/kernel.h> /* For printk/panic/... */
#include <linux/delay.h> /* For mdelay function */
#include <linux/miscdevice.h> /* For MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR) */
#include <linux/watchdog.h> /* For the watchdog specific items */
#include <linux/notifier.h> /* For notifier support */
#include <linux/reboot.h> /* For reboot_notifier stuff */
#include <linux/init.h> /* For __init/__exit/... */
#include <linux/fs.h> /* For file operations */
#include <linux/usb.h> /* For USB functions */
#include <linux/slab.h> /* For kmalloc, ... */
#include <linux/mutex.h> /* For mutex locking */
#include <linux/hid.h> /* For HID_REQ_SET_REPORT & HID_DT_REPORT */
#include <linux/uaccess.h> /* For copy_to_user/put_user/... */
#ifdef CONFIG_USB_DEBUG
static int debug = 1;
#else
static int debug;
#endif
/* Use our own dbg macro */
#undef dbg
#define dbg(format, arg...) \
do { if (debug) printk(KERN_DEBUG PFX format "\n" , ## arg); } while (0)
/* Module and Version Information */
#define DRIVER_VERSION "1.02"
#define DRIVER_AUTHOR "Wim Van Sebroeck <wim@iguana.be>"
#define DRIVER_DESC "Berkshire USB-PC Watchdog driver"
#define DRIVER_LICENSE "GPL"
#define DRIVER_NAME "pcwd_usb"
#define PFX DRIVER_NAME ": "
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE(DRIVER_LICENSE);
MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
MODULE_ALIAS_MISCDEV(TEMP_MINOR);
/* Module Parameters */
module_param(debug, int, 0);
MODULE_PARM_DESC(debug, "Debug enabled or not");
#define WATCHDOG_HEARTBEAT 0 /* default heartbeat =
delay-time from dip-switches */
static int heartbeat = WATCHDOG_HEARTBEAT;
module_param(heartbeat, int, 0);
MODULE_PARM_DESC(heartbeat, "Watchdog heartbeat in seconds. "
"(0<heartbeat<65536 or 0=delay-time from dip-switches, default="
__MODULE_STRING(WATCHDOG_HEARTBEAT) ")");
static int nowayout = WATCHDOG_NOWAYOUT;
module_param(nowayout, int, 0);
MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
/* The vendor and product id's for the USB-PC Watchdog card */
#define USB_PCWD_VENDOR_ID 0x0c98
#define USB_PCWD_PRODUCT_ID 0x1140
/* table of devices that work with this driver */
static struct usb_device_id usb_pcwd_table[] = {
{ USB_DEVICE(USB_PCWD_VENDOR_ID, USB_PCWD_PRODUCT_ID) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, usb_pcwd_table);
/* according to documentation max. time to process a command for the USB
* watchdog card is 100 or 200 ms, so we give it 250 ms to do it's job */
#define USB_COMMAND_TIMEOUT 250
/* Watchdog's internal commands */
#define CMD_READ_TEMP 0x02 /* Read Temperature;
Re-trigger Watchdog */
#define CMD_TRIGGER CMD_READ_TEMP
#define CMD_GET_STATUS 0x04 /* Get Status Information */
#define CMD_GET_FIRMWARE_VERSION 0x08 /* Get Firmware Version */
#define CMD_GET_DIP_SWITCH_SETTINGS 0x0c /* Get Dip Switch Settings */
#define CMD_READ_WATCHDOG_TIMEOUT 0x18 /* Read Current Watchdog Time */
#define CMD_WRITE_WATCHDOG_TIMEOUT 0x19 /* Write Current WatchdogTime */
#define CMD_ENABLE_WATCHDOG 0x30 /* Enable / Disable Watchdog */
#define CMD_DISABLE_WATCHDOG CMD_ENABLE_WATCHDOG
/* Watchdog's Dip Switch heartbeat values */
static const int heartbeat_tbl[] = {
5, /* OFF-OFF-OFF = 5 Sec */
10, /* OFF-OFF-ON = 10 Sec */
30, /* OFF-ON-OFF = 30 Sec */
60, /* OFF-ON-ON = 1 Min */
300, /* ON-OFF-OFF = 5 Min */
600, /* ON-OFF-ON = 10 Min */
1800, /* ON-ON-OFF = 30 Min */
3600, /* ON-ON-ON = 1 hour */
};
/* We can only use 1 card due to the /dev/watchdog restriction */
static int cards_found;
/* some internal variables */
static unsigned long is_active;
static char expect_release;
/* Structure to hold all of our device specific stuff */
struct usb_pcwd_private {
/* save off the usb device pointer */
struct usb_device *udev;
/* the interface for this device */
struct usb_interface *interface;
/* the interface number used for cmd's */
unsigned int interface_number;
/* the buffer to intr data */
unsigned char *intr_buffer;
/* the dma address for the intr buffer */
dma_addr_t intr_dma;
/* the size of the intr buffer */
size_t intr_size;
/* the urb used for the intr pipe */
struct urb *intr_urb;
/* The command that is reported back */
unsigned char cmd_command;
/* The data MSB that is reported back */
unsigned char cmd_data_msb;
/* The data LSB that is reported back */
unsigned char cmd_data_lsb;
/* true if we received a report after a command */
atomic_t cmd_received;
/* Wether or not the device exists */
int exists;
/* locks this structure */
struct mutex mtx;
};
static struct usb_pcwd_private *usb_pcwd_device;
/* prevent races between open() and disconnect() */
static DEFINE_MUTEX(disconnect_mutex);
/* local function prototypes */
static int usb_pcwd_probe(struct usb_interface *interface,
const struct usb_device_id *id);
static void usb_pcwd_disconnect(struct usb_interface *interface);
/* usb specific object needed to register this driver with the usb subsystem */
static struct usb_driver usb_pcwd_driver = {
.name = DRIVER_NAME,
.probe = usb_pcwd_probe,
.disconnect = usb_pcwd_disconnect,
.id_table = usb_pcwd_table,
};
static void usb_pcwd_intr_done(struct urb *urb)
{
struct usb_pcwd_private *usb_pcwd =
(struct usb_pcwd_private *)urb->context;
unsigned char *data = usb_pcwd->intr_buffer;
int retval;
switch (urb->status) {
case 0: /* success */
break;
case -ECONNRESET: /* unlink */
case -ENOENT:
case -ESHUTDOWN:
/* this urb is terminated, clean up */
dbg("%s - urb shutting down with status: %d", __func__,
urb->status);
return;
/* -EPIPE: should clear the halt */
default: /* error */
dbg("%s - nonzero urb status received: %d", __func__,
urb->status);
goto resubmit;
}
dbg("received following data cmd=0x%02x msb=0x%02x lsb=0x%02x",
data[0], data[1], data[2]);
usb_pcwd->cmd_command = data[0];
usb_pcwd->cmd_data_msb = data[1];
usb_pcwd->cmd_data_lsb = data[2];
/* notify anyone waiting that the cmd has finished */
atomic_set(&usb_pcwd->cmd_received, 1);
resubmit:
retval = usb_submit_urb(urb, GFP_ATOMIC);
if (retval)
printk(KERN_ERR PFX "can't resubmit intr, "
"usb_submit_urb failed with result %d\n", retval);
}
static int usb_pcwd_send_command(struct usb_pcwd_private *usb_pcwd,
unsigned char cmd, unsigned char *msb, unsigned char *lsb)
{
int got_response, count;
unsigned char buf[6];
/* We will not send any commands if the USB PCWD device does
* not exist */
if ((!usb_pcwd) || (!usb_pcwd->exists))
return -1;
/* The USB PC Watchdog uses a 6 byte report format.
* The board currently uses only 3 of the six bytes of the report. */
buf[0] = cmd; /* Byte 0 = CMD */
buf[1] = *msb; /* Byte 1 = Data MSB */
buf[2] = *lsb; /* Byte 2 = Data LSB */
buf[3] = buf[4] = buf[5] = 0; /* All other bytes not used */
dbg("sending following data cmd=0x%02x msb=0x%02x lsb=0x%02x",
buf[0], buf[1], buf[2]);
atomic_set(&usb_pcwd->cmd_received, 0);
if (usb_control_msg(usb_pcwd->udev, usb_sndctrlpipe(usb_pcwd->udev, 0),
HID_REQ_SET_REPORT, HID_DT_REPORT,
0x0200, usb_pcwd->interface_number, buf, sizeof(buf),
USB_COMMAND_TIMEOUT) != sizeof(buf)) {
dbg("usb_pcwd_send_command: error in usb_control_msg for "
"cmd 0x%x 0x%x 0x%x\n", cmd, *msb, *lsb);
}
/* wait till the usb card processed the command,
* with a max. timeout of USB_COMMAND_TIMEOUT */
got_response = 0;
for (count = 0; (count < USB_COMMAND_TIMEOUT) && (!got_response);
count++) {
mdelay(1);
if (atomic_read(&usb_pcwd->cmd_received))
got_response = 1;
}
if ((got_response) && (cmd == usb_pcwd->cmd_command)) {
/* read back response */
*msb = usb_pcwd->cmd_data_msb;
*lsb = usb_pcwd->cmd_data_lsb;
}
return got_response;
}
static int usb_pcwd_start(struct usb_pcwd_private *usb_pcwd)
{
unsigned char msb = 0x00;
unsigned char lsb = 0x00;
int retval;
/* Enable Watchdog */
retval = usb_pcwd_send_command(usb_pcwd, CMD_ENABLE_WATCHDOG,
&msb, &lsb);
if ((retval == 0) || (lsb == 0)) {
printk(KERN_ERR PFX
"Card did not acknowledge enable attempt\n");
return -1;
}
return 0;
}
static int usb_pcwd_stop(struct usb_pcwd_private *usb_pcwd)
{
unsigned char msb = 0xA5;
unsigned char lsb = 0xC3;
int retval;
/* Disable Watchdog */
retval = usb_pcwd_send_command(usb_pcwd, CMD_DISABLE_WATCHDOG,
&msb, &lsb);
if ((retval == 0) || (lsb != 0)) {
printk(KERN_ERR PFX
"Card did not acknowledge disable attempt\n");
return -1;
}
return 0;
}
static int usb_pcwd_keepalive(struct usb_pcwd_private *usb_pcwd)
{
unsigned char dummy;
/* Re-trigger Watchdog */
usb_pcwd_send_command(usb_pcwd, CMD_TRIGGER, &dummy, &dummy);
return 0;
}
static int usb_pcwd_set_heartbeat(struct usb_pcwd_private *usb_pcwd, int t)
{
unsigned char msb = t / 256;
unsigned char lsb = t % 256;
if ((t < 0x0001) || (t > 0xFFFF))
return -EINVAL;
/* Write new heartbeat to watchdog */
usb_pcwd_send_command(usb_pcwd, CMD_WRITE_WATCHDOG_TIMEOUT, &msb, &lsb);
heartbeat = t;
return 0;
}
static int usb_pcwd_get_temperature(struct usb_pcwd_private *usb_pcwd,
int *temperature)
{
unsigned char msb, lsb;
usb_pcwd_send_command(usb_pcwd, CMD_READ_TEMP, &msb, &lsb);
/*
* Convert celsius to fahrenheit, since this was
* the decided 'standard' for this return value.
*/
*temperature = (lsb * 9 / 5) + 32;
return 0;
}
static int usb_pcwd_get_timeleft(struct usb_pcwd_private *usb_pcwd,
int *time_left)
{
unsigned char msb, lsb;
/* Read the time that's left before rebooting */
/* Note: if the board is not yet armed then we will read 0xFFFF */
usb_pcwd_send_command(usb_pcwd, CMD_READ_WATCHDOG_TIMEOUT, &msb, &lsb);
*time_left = (msb << 8) + lsb;
return 0;
}
/*
* /dev/watchdog handling
*/
static ssize_t usb_pcwd_write(struct file *file, const char __user *data,
size_t len, loff_t *ppos)
{
/* See if we got the magic character 'V' and reload the timer */
if (len) {
if (!nowayout) {
size_t i;
/* note: just in case someone wrote the magic character
* five months ago... */
expect_release = 0;
/* scan to see whether or not we got the
* magic character */
for (i = 0; i != len; i++) {
char c;
if (get_user(c, data + i))
return -EFAULT;
if (c == 'V')
expect_release = 42;
}
}
/* someone wrote to us, we should reload the timer */
usb_pcwd_keepalive(usb_pcwd_device);
}
return len;
}
static long usb_pcwd_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
void __user *argp = (void __user *)arg;
int __user *p = argp;
static const struct watchdog_info ident = {
.options = WDIOF_KEEPALIVEPING |
WDIOF_SETTIMEOUT |
WDIOF_MAGICCLOSE,
.firmware_version = 1,
.identity = DRIVER_NAME,
};
switch (cmd) {
case WDIOC_GETSUPPORT:
return copy_to_user(argp, &ident, sizeof(ident)) ? -EFAULT : 0;
case WDIOC_GETSTATUS:
case WDIOC_GETBOOTSTATUS:
return put_user(0, p);
case WDIOC_GETTEMP:
{
int temperature;
if (usb_pcwd_get_temperature(usb_pcwd_device, &temperature))
return -EFAULT;
return put_user(temperature, p);
}
case WDIOC_SETOPTIONS:
{
int new_options, retval = -EINVAL;
if (get_user(new_options, p))
return -EFAULT;
if (new_options & WDIOS_DISABLECARD) {
usb_pcwd_stop(usb_pcwd_device);
retval = 0;
}
if (new_options & WDIOS_ENABLECARD) {
usb_pcwd_start(usb_pcwd_device);
retval = 0;
}
return retval;
}
case WDIOC_KEEPALIVE:
usb_pcwd_keepalive(usb_pcwd_device);
return 0;
case WDIOC_SETTIMEOUT:
{
int new_heartbeat;
if (get_user(new_heartbeat, p))
return -EFAULT;
if (usb_pcwd_set_heartbeat(usb_pcwd_device, new_heartbeat))
return -EINVAL;
usb_pcwd_keepalive(usb_pcwd_device);
/* Fall */
}
case WDIOC_GETTIMEOUT:
return put_user(heartbeat, p);
case WDIOC_GETTIMELEFT:
{
int time_left;
if (usb_pcwd_get_timeleft(usb_pcwd_device, &time_left))
return -EFAULT;
return put_user(time_left, p);
}
default:
return -ENOTTY;
}
}
static int usb_pcwd_open(struct inode *inode, struct file *file)
{
/* /dev/watchdog can only be opened once */
if (test_and_set_bit(0, &is_active))
return -EBUSY;
/* Activate */
usb_pcwd_start(usb_pcwd_device);
usb_pcwd_keepalive(usb_pcwd_device);
return nonseekable_open(inode, file);
}
static int usb_pcwd_release(struct inode *inode, struct file *file)
{
/*
* Shut off the timer.
*/
if (expect_release == 42) {
usb_pcwd_stop(usb_pcwd_device);
} else {
printk(KERN_CRIT PFX
"Unexpected close, not stopping watchdog!\n");
usb_pcwd_keepalive(usb_pcwd_device);
}
expect_release = 0;
clear_bit(0, &is_active);
return 0;
}
/*
* /dev/temperature handling
*/
static ssize_t usb_pcwd_temperature_read(struct file *file, char __user *data,
size_t len, loff_t *ppos)
{
int temperature;
if (usb_pcwd_get_temperature(usb_pcwd_device, &temperature))
return -EFAULT;
if (copy_to_user(data, &temperature, 1))
return -EFAULT;
return 1;
}
static int usb_pcwd_temperature_open(struct inode *inode, struct file *file)
{
return nonseekable_open(inode, file);
}
static int usb_pcwd_temperature_release(struct inode *inode, struct file *file)
{
return 0;
}
/*
* Notify system
*/
static int usb_pcwd_notify_sys(struct notifier_block *this, unsigned long code,
void *unused)
{
if (code == SYS_DOWN || code == SYS_HALT)
usb_pcwd_stop(usb_pcwd_device); /* Turn the WDT off */
return NOTIFY_DONE;
}
/*
* Kernel Interfaces
*/
static const struct file_operations usb_pcwd_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.write = usb_pcwd_write,
.unlocked_ioctl = usb_pcwd_ioctl,
.open = usb_pcwd_open,
.release = usb_pcwd_release,
};
static struct miscdevice usb_pcwd_miscdev = {
.minor = WATCHDOG_MINOR,
.name = "watchdog",
.fops = &usb_pcwd_fops,
};
static const struct file_operations usb_pcwd_temperature_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.read = usb_pcwd_temperature_read,
.open = usb_pcwd_temperature_open,
.release = usb_pcwd_temperature_release,
};
static struct miscdevice usb_pcwd_temperature_miscdev = {
.minor = TEMP_MINOR,
.name = "temperature",
.fops = &usb_pcwd_temperature_fops,
};
static struct notifier_block usb_pcwd_notifier = {
.notifier_call = usb_pcwd_notify_sys,
};
/**
* usb_pcwd_delete
*/
static inline void usb_pcwd_delete(struct usb_pcwd_private *usb_pcwd)
{
usb_free_urb(usb_pcwd->intr_urb);
if (usb_pcwd->intr_buffer != NULL)
usb_free_coherent(usb_pcwd->udev, usb_pcwd->intr_size,
usb_pcwd->intr_buffer, usb_pcwd->intr_dma);
kfree(usb_pcwd);
}
/**
* usb_pcwd_probe
*
* Called by the usb core when a new device is connected that it thinks
* this driver might be interested in.
*/
static int usb_pcwd_probe(struct usb_interface *interface,
const struct usb_device_id *id)
{
struct usb_device *udev = interface_to_usbdev(interface);
struct usb_host_interface *iface_desc;
struct usb_endpoint_descriptor *endpoint;
struct usb_pcwd_private *usb_pcwd = NULL;
int pipe, maxp;
int retval = -ENOMEM;
int got_fw_rev;
unsigned char fw_rev_major, fw_rev_minor;
char fw_ver_str[20];
unsigned char option_switches, dummy;
cards_found++;
if (cards_found > 1) {
printk(KERN_ERR PFX "This driver only supports 1 device\n");
return -ENODEV;
}
/* get the active interface descriptor */
iface_desc = interface->cur_altsetting;
/* check out that we have a HID device */
if (!(iface_desc->desc.bInterfaceClass == USB_CLASS_HID)) {
printk(KERN_ERR PFX
"The device isn't a Human Interface Device\n");
return -ENODEV;
}
/* check out the endpoint: it has to be Interrupt & IN */
endpoint = &iface_desc->endpoint[0].desc;
if (!usb_endpoint_is_int_in(endpoint)) {
/* we didn't find a Interrupt endpoint with direction IN */
printk(KERN_ERR PFX "Couldn't find an INTR & IN endpoint\n");
return -ENODEV;
}
/* get a handle to the interrupt data pipe */
pipe = usb_rcvintpipe(udev, endpoint->bEndpointAddress);
maxp = usb_maxpacket(udev, pipe, usb_pipeout(pipe));
/* allocate memory for our device and initialize it */
usb_pcwd = kzalloc(sizeof(struct usb_pcwd_private), GFP_KERNEL);
if (usb_pcwd == NULL) {
printk(KERN_ERR PFX "Out of memory\n");
goto error;
}
usb_pcwd_device = usb_pcwd;
mutex_init(&usb_pcwd->mtx);
usb_pcwd->udev = udev;
usb_pcwd->interface = interface;
usb_pcwd->interface_number = iface_desc->desc.bInterfaceNumber;
usb_pcwd->intr_size = (le16_to_cpu(endpoint->wMaxPacketSize) > 8 ?
le16_to_cpu(endpoint->wMaxPacketSize) : 8);
/* set up the memory buffer's */
usb_pcwd->intr_buffer = usb_alloc_coherent(udev, usb_pcwd->intr_size,
GFP_ATOMIC, &usb_pcwd->intr_dma);
if (!usb_pcwd->intr_buffer) {
printk(KERN_ERR PFX "Out of memory\n");
goto error;
}
/* allocate the urb's */
usb_pcwd->intr_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!usb_pcwd->intr_urb) {
printk(KERN_ERR PFX "Out of memory\n");
goto error;
}
/* initialise the intr urb's */
usb_fill_int_urb(usb_pcwd->intr_urb, udev, pipe,
usb_pcwd->intr_buffer, usb_pcwd->intr_size,
usb_pcwd_intr_done, usb_pcwd, endpoint->bInterval);
usb_pcwd->intr_urb->transfer_dma = usb_pcwd->intr_dma;
usb_pcwd->intr_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
/* register our interrupt URB with the USB system */
if (usb_submit_urb(usb_pcwd->intr_urb, GFP_KERNEL)) {
printk(KERN_ERR PFX "Problem registering interrupt URB\n");
retval = -EIO; /* failure */
goto error;
}
/* The device exists and can be communicated with */
usb_pcwd->exists = 1;
/* disable card */
usb_pcwd_stop(usb_pcwd);
/* Get the Firmware Version */
got_fw_rev = usb_pcwd_send_command(usb_pcwd, CMD_GET_FIRMWARE_VERSION,
&fw_rev_major, &fw_rev_minor);
if (got_fw_rev)
sprintf(fw_ver_str, "%u.%02u", fw_rev_major, fw_rev_minor);
else
sprintf(fw_ver_str, "<card no answer>");
printk(KERN_INFO PFX "Found card (Firmware: %s) with temp option\n",
fw_ver_str);
/* Get switch settings */
usb_pcwd_send_command(usb_pcwd, CMD_GET_DIP_SWITCH_SETTINGS, &dummy,
&option_switches);
printk(KERN_INFO PFX "Option switches (0x%02x): "
"Temperature Reset Enable=%s, Power On Delay=%s\n",
option_switches,
((option_switches & 0x10) ? "ON" : "OFF"),
((option_switches & 0x08) ? "ON" : "OFF"));
/* If heartbeat = 0 then we use the heartbeat from the dip-switches */
if (heartbeat == 0)
heartbeat = heartbeat_tbl[(option_switches & 0x07)];
/* Check that the heartbeat value is within it's range ;
* if not reset to the default */
if (usb_pcwd_set_heartbeat(usb_pcwd, heartbeat)) {
usb_pcwd_set_heartbeat(usb_pcwd, WATCHDOG_HEARTBEAT);
printk(KERN_INFO PFX
"heartbeat value must be 0<heartbeat<65536, using %d\n",
WATCHDOG_HEARTBEAT);
}
retval = register_reboot_notifier(&usb_pcwd_notifier);
if (retval != 0) {
printk(KERN_ERR PFX
"cannot register reboot notifier (err=%d)\n",
retval);
goto error;
}
retval = misc_register(&usb_pcwd_temperature_miscdev);
if (retval != 0) {
printk(KERN_ERR PFX
"cannot register miscdev on minor=%d (err=%d)\n",
TEMP_MINOR, retval);
goto err_out_unregister_reboot;
}
retval = misc_register(&usb_pcwd_miscdev);
if (retval != 0) {
printk(KERN_ERR PFX
"cannot register miscdev on minor=%d (err=%d)\n",
WATCHDOG_MINOR, retval);
goto err_out_misc_deregister;
}
/* we can register the device now, as it is ready */
usb_set_intfdata(interface, usb_pcwd);
printk(KERN_INFO PFX "initialized. heartbeat=%d sec (nowayout=%d)\n",
heartbeat, nowayout);
return 0;
err_out_misc_deregister:
misc_deregister(&usb_pcwd_temperature_miscdev);
err_out_unregister_reboot:
unregister_reboot_notifier(&usb_pcwd_notifier);
error:
if (usb_pcwd)
usb_pcwd_delete(usb_pcwd);
usb_pcwd_device = NULL;
return retval;
}
/**
* usb_pcwd_disconnect
*
* Called by the usb core when the device is removed from the system.
*
* This routine guarantees that the driver will not submit any more urbs
* by clearing dev->udev.
*/
static void usb_pcwd_disconnect(struct usb_interface *interface)
{
struct usb_pcwd_private *usb_pcwd;
/* prevent races with open() */
mutex_lock(&disconnect_mutex);
usb_pcwd = usb_get_intfdata(interface);
usb_set_intfdata(interface, NULL);
mutex_lock(&usb_pcwd->mtx);
/* Stop the timer before we leave */
if (!nowayout)
usb_pcwd_stop(usb_pcwd);
/* We should now stop communicating with the USB PCWD device */
usb_pcwd->exists = 0;
/* Deregister */
misc_deregister(&usb_pcwd_miscdev);
misc_deregister(&usb_pcwd_temperature_miscdev);
unregister_reboot_notifier(&usb_pcwd_notifier);
mutex_unlock(&usb_pcwd->mtx);
/* Delete the USB PCWD device */
usb_pcwd_delete(usb_pcwd);
cards_found--;
mutex_unlock(&disconnect_mutex);
printk(KERN_INFO PFX "USB PC Watchdog disconnected\n");
}
/**
* usb_pcwd_init
*/
static int __init usb_pcwd_init(void)
{
int result;
/* register this driver with the USB subsystem */
result = usb_register(&usb_pcwd_driver);
if (result) {
printk(KERN_ERR PFX "usb_register failed. Error number %d\n",
result);
return result;
}
printk(KERN_INFO PFX DRIVER_DESC " v" DRIVER_VERSION "\n");
return 0;
}
/**
* usb_pcwd_exit
*/
static void __exit usb_pcwd_exit(void)
{
/* deregister this driver with the USB subsystem */
usb_deregister(&usb_pcwd_driver);
}
module_init(usb_pcwd_init);
module_exit(usb_pcwd_exit);
| gpl-2.0 |
alexpotter1/Neutron_msm8974_hammerhead | drivers/net/ethernet/stmicro/stmmac/chain_mode.c | 5419 | 3785 | /*******************************************************************************
Specialised functions for managing Chained mode
Copyright(C) 2011 STMicroelectronics Ltd
It defines all the functions used to handle the normal/enhanced
descriptors in case of the DMA is configured to work in chained or
in ring mode.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
version 2, as published by the Free Software Foundation.
This program is distributed in the hope it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/
#include "stmmac.h"
unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
{
struct stmmac_priv *priv = (struct stmmac_priv *) p;
unsigned int txsize = priv->dma_tx_size;
unsigned int entry = priv->cur_tx % txsize;
struct dma_desc *desc = priv->dma_tx + entry;
unsigned int nopaged_len = skb_headlen(skb);
unsigned int bmax;
unsigned int i = 1, len;
if (priv->plat->enh_desc)
bmax = BUF_SIZE_8KiB;
else
bmax = BUF_SIZE_2KiB;
len = nopaged_len - bmax;
desc->des2 = dma_map_single(priv->device, skb->data,
bmax, DMA_TO_DEVICE);
priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum);
while (len != 0) {
entry = (++priv->cur_tx) % txsize;
desc = priv->dma_tx + entry;
if (len > bmax) {
desc->des2 = dma_map_single(priv->device,
(skb->data + bmax * i),
bmax, DMA_TO_DEVICE);
priv->hw->desc->prepare_tx_desc(desc, 0, bmax,
csum);
priv->hw->desc->set_tx_owner(desc);
priv->tx_skbuff[entry] = NULL;
len -= bmax;
i++;
} else {
desc->des2 = dma_map_single(priv->device,
(skb->data + bmax * i), len,
DMA_TO_DEVICE);
priv->hw->desc->prepare_tx_desc(desc, 0, len,
csum);
priv->hw->desc->set_tx_owner(desc);
priv->tx_skbuff[entry] = NULL;
len = 0;
}
}
return entry;
}
static unsigned int stmmac_is_jumbo_frm(int len, int enh_desc)
{
unsigned int ret = 0;
if ((enh_desc && (len > BUF_SIZE_8KiB)) ||
(!enh_desc && (len > BUF_SIZE_2KiB))) {
ret = 1;
}
return ret;
}
static void stmmac_refill_desc3(int bfsize, struct dma_desc *p)
{
}
static void stmmac_init_desc3(int des3_as_data_buf, struct dma_desc *p)
{
}
static void stmmac_clean_desc3(struct dma_desc *p)
{
}
static void stmmac_init_dma_chain(struct dma_desc *des, dma_addr_t phy_addr,
unsigned int size)
{
/*
* In chained mode the des3 points to the next element in the ring.
* The latest element has to point to the head.
*/
int i;
struct dma_desc *p = des;
dma_addr_t dma_phy = phy_addr;
for (i = 0; i < (size - 1); i++) {
dma_phy += sizeof(struct dma_desc);
p->des3 = (unsigned int)dma_phy;
p++;
}
p->des3 = (unsigned int)phy_addr;
}
static int stmmac_set_16kib_bfsize(int mtu)
{
/* Not supported */
return 0;
}
const struct stmmac_ring_mode_ops ring_mode_ops = {
.is_jumbo_frm = stmmac_is_jumbo_frm,
.jumbo_frm = stmmac_jumbo_frm,
.refill_desc3 = stmmac_refill_desc3,
.init_desc3 = stmmac_init_desc3,
.init_dma_chain = stmmac_init_dma_chain,
.clean_desc3 = stmmac_clean_desc3,
.set_16kib_bfsize = stmmac_set_16kib_bfsize,
};
| gpl-2.0 |
proto-dev/Proto-dev_kernel | net/unix/garbage.c | 7723 | 10621 | /*
* NET3: Garbage Collector For AF_UNIX sockets
*
* Garbage Collector:
* Copyright (C) Barak A. Pearlmutter.
* Released under the GPL version 2 or later.
*
* Chopped about by Alan Cox 22/3/96 to make it fit the AF_UNIX socket problem.
* If it doesn't work blame me, it worked when Barak sent it.
*
* Assumptions:
*
* - object w/ a bit
* - free list
*
* Current optimizations:
*
* - explicit stack instead of recursion
* - tail recurse on first born instead of immediate push/pop
* - we gather the stuff that should not be killed into tree
* and stack is just a path from root to the current pointer.
*
* Future optimizations:
*
* - don't just push entire root set; process in place
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Fixes:
* Alan Cox 07 Sept 1997 Vmalloc internal stack as needed.
* Cope with changing max_files.
* Al Viro 11 Oct 1998
* Graph may have cycles. That is, we can send the descriptor
* of foo to bar and vice versa. Current code chokes on that.
* Fix: move SCM_RIGHTS ones into the separate list and then
* skb_free() them all instead of doing explicit fput's.
* Another problem: since fput() may block somebody may
* create a new unix_socket when we are in the middle of sweep
* phase. Fix: revert the logic wrt MARKED. Mark everything
* upon the beginning and unmark non-junk ones.
*
* [12 Oct 1998] AAARGH! New code purges all SCM_RIGHTS
* sent to connect()'ed but still not accept()'ed sockets.
* Fixed. Old code had slightly different problem here:
* extra fput() in situation when we passed the descriptor via
* such socket and closed it (descriptor). That would happen on
* each unix_gc() until the accept(). Since the struct file in
* question would go to the free list and might be reused...
* That might be the reason of random oopses on filp_close()
* in unrelated processes.
*
* AV 28 Feb 1999
* Kill the explicit allocation of stack. Now we keep the tree
* with root in dummy + pointer (gc_current) to one of the nodes.
* Stack is represented as path from gc_current to dummy. Unmark
* now means "add to tree". Push == "make it a son of gc_current".
* Pop == "move gc_current to parent". We keep only pointers to
* parents (->gc_tree).
* AV 1 Mar 1999
* Damn. Added missing check for ->dead in listen queues scanning.
*
* Miklos Szeredi 25 Jun 2007
* Reimplement with a cycle collecting algorithm. This should
* solve several problems with the previous code, like being racy
* wrt receive and holding up unrelated socket operations.
*/
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/socket.h>
#include <linux/un.h>
#include <linux/net.h>
#include <linux/fs.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/file.h>
#include <linux/proc_fs.h>
#include <linux/mutex.h>
#include <linux/wait.h>
#include <net/sock.h>
#include <net/af_unix.h>
#include <net/scm.h>
#include <net/tcp_states.h>
/* Internal data structures and random procedures: */
static LIST_HEAD(gc_inflight_list);
static LIST_HEAD(gc_candidates);
static DEFINE_SPINLOCK(unix_gc_lock);
static DECLARE_WAIT_QUEUE_HEAD(unix_gc_wait);
unsigned int unix_tot_inflight;
struct sock *unix_get_socket(struct file *filp)
{
struct sock *u_sock = NULL;
struct inode *inode = filp->f_path.dentry->d_inode;
/*
* Socket ?
*/
if (S_ISSOCK(inode->i_mode) && !(filp->f_mode & FMODE_PATH)) {
struct socket *sock = SOCKET_I(inode);
struct sock *s = sock->sk;
/*
* PF_UNIX ?
*/
if (s && sock->ops && sock->ops->family == PF_UNIX)
u_sock = s;
}
return u_sock;
}
/*
* Keep the number of times in flight count for the file
* descriptor if it is for an AF_UNIX socket.
*/
void unix_inflight(struct file *fp)
{
struct sock *s = unix_get_socket(fp);
if (s) {
struct unix_sock *u = unix_sk(s);
spin_lock(&unix_gc_lock);
if (atomic_long_inc_return(&u->inflight) == 1) {
BUG_ON(!list_empty(&u->link));
list_add_tail(&u->link, &gc_inflight_list);
} else {
BUG_ON(list_empty(&u->link));
}
unix_tot_inflight++;
spin_unlock(&unix_gc_lock);
}
}
void unix_notinflight(struct file *fp)
{
struct sock *s = unix_get_socket(fp);
if (s) {
struct unix_sock *u = unix_sk(s);
spin_lock(&unix_gc_lock);
BUG_ON(list_empty(&u->link));
if (atomic_long_dec_and_test(&u->inflight))
list_del_init(&u->link);
unix_tot_inflight--;
spin_unlock(&unix_gc_lock);
}
}
static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *),
struct sk_buff_head *hitlist)
{
struct sk_buff *skb;
struct sk_buff *next;
spin_lock(&x->sk_receive_queue.lock);
skb_queue_walk_safe(&x->sk_receive_queue, skb, next) {
/*
* Do we have file descriptors ?
*/
if (UNIXCB(skb).fp) {
bool hit = false;
/*
* Process the descriptors of this socket
*/
int nfd = UNIXCB(skb).fp->count;
struct file **fp = UNIXCB(skb).fp->fp;
while (nfd--) {
/*
* Get the socket the fd matches
* if it indeed does so
*/
struct sock *sk = unix_get_socket(*fp++);
if (sk) {
struct unix_sock *u = unix_sk(sk);
/*
* Ignore non-candidates, they could
* have been added to the queues after
* starting the garbage collection
*/
if (u->gc_candidate) {
hit = true;
func(u);
}
}
}
if (hit && hitlist != NULL) {
__skb_unlink(skb, &x->sk_receive_queue);
__skb_queue_tail(hitlist, skb);
}
}
}
spin_unlock(&x->sk_receive_queue.lock);
}
static void scan_children(struct sock *x, void (*func)(struct unix_sock *),
struct sk_buff_head *hitlist)
{
if (x->sk_state != TCP_LISTEN)
scan_inflight(x, func, hitlist);
else {
struct sk_buff *skb;
struct sk_buff *next;
struct unix_sock *u;
LIST_HEAD(embryos);
/*
* For a listening socket collect the queued embryos
* and perform a scan on them as well.
*/
spin_lock(&x->sk_receive_queue.lock);
skb_queue_walk_safe(&x->sk_receive_queue, skb, next) {
u = unix_sk(skb->sk);
/*
* An embryo cannot be in-flight, so it's safe
* to use the list link.
*/
BUG_ON(!list_empty(&u->link));
list_add_tail(&u->link, &embryos);
}
spin_unlock(&x->sk_receive_queue.lock);
while (!list_empty(&embryos)) {
u = list_entry(embryos.next, struct unix_sock, link);
scan_inflight(&u->sk, func, hitlist);
list_del_init(&u->link);
}
}
}
static void dec_inflight(struct unix_sock *usk)
{
atomic_long_dec(&usk->inflight);
}
static void inc_inflight(struct unix_sock *usk)
{
atomic_long_inc(&usk->inflight);
}
static void inc_inflight_move_tail(struct unix_sock *u)
{
atomic_long_inc(&u->inflight);
/*
* If this still might be part of a cycle, move it to the end
* of the list, so that it's checked even if it was already
* passed over
*/
if (u->gc_maybe_cycle)
list_move_tail(&u->link, &gc_candidates);
}
static bool gc_in_progress = false;
#define UNIX_INFLIGHT_TRIGGER_GC 16000
void wait_for_unix_gc(void)
{
/*
* If number of inflight sockets is insane,
* force a garbage collect right now.
*/
if (unix_tot_inflight > UNIX_INFLIGHT_TRIGGER_GC && !gc_in_progress)
unix_gc();
wait_event(unix_gc_wait, gc_in_progress == false);
}
/* The external entry point: unix_gc() */
void unix_gc(void)
{
struct unix_sock *u;
struct unix_sock *next;
struct sk_buff_head hitlist;
struct list_head cursor;
LIST_HEAD(not_cycle_list);
spin_lock(&unix_gc_lock);
/* Avoid a recursive GC. */
if (gc_in_progress)
goto out;
gc_in_progress = true;
/*
* First, select candidates for garbage collection. Only
* in-flight sockets are considered, and from those only ones
* which don't have any external reference.
*
* Holding unix_gc_lock will protect these candidates from
* being detached, and hence from gaining an external
* reference. Since there are no possible receivers, all
* buffers currently on the candidates' queues stay there
* during the garbage collection.
*
* We also know that no new candidate can be added onto the
* receive queues. Other, non candidate sockets _can_ be
* added to queue, so we must make sure only to touch
* candidates.
*/
list_for_each_entry_safe(u, next, &gc_inflight_list, link) {
long total_refs;
long inflight_refs;
total_refs = file_count(u->sk.sk_socket->file);
inflight_refs = atomic_long_read(&u->inflight);
BUG_ON(inflight_refs < 1);
BUG_ON(total_refs < inflight_refs);
if (total_refs == inflight_refs) {
list_move_tail(&u->link, &gc_candidates);
u->gc_candidate = 1;
u->gc_maybe_cycle = 1;
}
}
/*
* Now remove all internal in-flight reference to children of
* the candidates.
*/
list_for_each_entry(u, &gc_candidates, link)
scan_children(&u->sk, dec_inflight, NULL);
/*
* Restore the references for children of all candidates,
* which have remaining references. Do this recursively, so
* only those remain, which form cyclic references.
*
* Use a "cursor" link, to make the list traversal safe, even
* though elements might be moved about.
*/
list_add(&cursor, &gc_candidates);
while (cursor.next != &gc_candidates) {
u = list_entry(cursor.next, struct unix_sock, link);
/* Move cursor to after the current position. */
list_move(&cursor, &u->link);
if (atomic_long_read(&u->inflight) > 0) {
list_move_tail(&u->link, ¬_cycle_list);
u->gc_maybe_cycle = 0;
scan_children(&u->sk, inc_inflight_move_tail, NULL);
}
}
list_del(&cursor);
/*
* not_cycle_list contains those sockets which do not make up a
* cycle. Restore these to the inflight list.
*/
while (!list_empty(¬_cycle_list)) {
u = list_entry(not_cycle_list.next, struct unix_sock, link);
u->gc_candidate = 0;
list_move_tail(&u->link, &gc_inflight_list);
}
/*
* Now gc_candidates contains only garbage. Restore original
* inflight counters for these as well, and remove the skbuffs
* which are creating the cycle(s).
*/
skb_queue_head_init(&hitlist);
list_for_each_entry(u, &gc_candidates, link)
scan_children(&u->sk, inc_inflight, &hitlist);
spin_unlock(&unix_gc_lock);
/* Here we are. Hitlist is filled. Die. */
__skb_queue_purge(&hitlist);
spin_lock(&unix_gc_lock);
/* All candidates should have been detached by now. */
BUG_ON(!list_empty(&gc_candidates));
gc_in_progress = false;
wake_up(&unix_gc_wait);
out:
spin_unlock(&unix_gc_lock);
}
| gpl-2.0 |
samno1607/XyZ | drivers/video/jz4740_fb.c | 8235 | 19873 | /*
* Copyright (C) 2009-2010, Lars-Peter Clausen <lars@metafoo.de>
* JZ4740 SoC LCD framebuffer driver
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 675 Mass Ave, Cambridge, MA 02139, USA.
*
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/console.h>
#include <linux/fb.h>
#include <linux/dma-mapping.h>
#include <asm/mach-jz4740/jz4740_fb.h>
#include <asm/mach-jz4740/gpio.h>
#define JZ_REG_LCD_CFG 0x00
#define JZ_REG_LCD_VSYNC 0x04
#define JZ_REG_LCD_HSYNC 0x08
#define JZ_REG_LCD_VAT 0x0C
#define JZ_REG_LCD_DAH 0x10
#define JZ_REG_LCD_DAV 0x14
#define JZ_REG_LCD_PS 0x18
#define JZ_REG_LCD_CLS 0x1C
#define JZ_REG_LCD_SPL 0x20
#define JZ_REG_LCD_REV 0x24
#define JZ_REG_LCD_CTRL 0x30
#define JZ_REG_LCD_STATE 0x34
#define JZ_REG_LCD_IID 0x38
#define JZ_REG_LCD_DA0 0x40
#define JZ_REG_LCD_SA0 0x44
#define JZ_REG_LCD_FID0 0x48
#define JZ_REG_LCD_CMD0 0x4C
#define JZ_REG_LCD_DA1 0x50
#define JZ_REG_LCD_SA1 0x54
#define JZ_REG_LCD_FID1 0x58
#define JZ_REG_LCD_CMD1 0x5C
#define JZ_LCD_CFG_SLCD BIT(31)
#define JZ_LCD_CFG_PS_DISABLE BIT(23)
#define JZ_LCD_CFG_CLS_DISABLE BIT(22)
#define JZ_LCD_CFG_SPL_DISABLE BIT(21)
#define JZ_LCD_CFG_REV_DISABLE BIT(20)
#define JZ_LCD_CFG_HSYNCM BIT(19)
#define JZ_LCD_CFG_PCLKM BIT(18)
#define JZ_LCD_CFG_INV BIT(17)
#define JZ_LCD_CFG_SYNC_DIR BIT(16)
#define JZ_LCD_CFG_PS_POLARITY BIT(15)
#define JZ_LCD_CFG_CLS_POLARITY BIT(14)
#define JZ_LCD_CFG_SPL_POLARITY BIT(13)
#define JZ_LCD_CFG_REV_POLARITY BIT(12)
#define JZ_LCD_CFG_HSYNC_ACTIVE_LOW BIT(11)
#define JZ_LCD_CFG_PCLK_FALLING_EDGE BIT(10)
#define JZ_LCD_CFG_DE_ACTIVE_LOW BIT(9)
#define JZ_LCD_CFG_VSYNC_ACTIVE_LOW BIT(8)
#define JZ_LCD_CFG_18_BIT BIT(7)
#define JZ_LCD_CFG_PDW (BIT(5) | BIT(4))
#define JZ_LCD_CFG_MODE_MASK 0xf
#define JZ_LCD_CTRL_BURST_4 (0x0 << 28)
#define JZ_LCD_CTRL_BURST_8 (0x1 << 28)
#define JZ_LCD_CTRL_BURST_16 (0x2 << 28)
#define JZ_LCD_CTRL_RGB555 BIT(27)
#define JZ_LCD_CTRL_OFUP BIT(26)
#define JZ_LCD_CTRL_FRC_GRAYSCALE_16 (0x0 << 24)
#define JZ_LCD_CTRL_FRC_GRAYSCALE_4 (0x1 << 24)
#define JZ_LCD_CTRL_FRC_GRAYSCALE_2 (0x2 << 24)
#define JZ_LCD_CTRL_PDD_MASK (0xff << 16)
#define JZ_LCD_CTRL_EOF_IRQ BIT(13)
#define JZ_LCD_CTRL_SOF_IRQ BIT(12)
#define JZ_LCD_CTRL_OFU_IRQ BIT(11)
#define JZ_LCD_CTRL_IFU0_IRQ BIT(10)
#define JZ_LCD_CTRL_IFU1_IRQ BIT(9)
#define JZ_LCD_CTRL_DD_IRQ BIT(8)
#define JZ_LCD_CTRL_QDD_IRQ BIT(7)
#define JZ_LCD_CTRL_REVERSE_ENDIAN BIT(6)
#define JZ_LCD_CTRL_LSB_FISRT BIT(5)
#define JZ_LCD_CTRL_DISABLE BIT(4)
#define JZ_LCD_CTRL_ENABLE BIT(3)
#define JZ_LCD_CTRL_BPP_1 0x0
#define JZ_LCD_CTRL_BPP_2 0x1
#define JZ_LCD_CTRL_BPP_4 0x2
#define JZ_LCD_CTRL_BPP_8 0x3
#define JZ_LCD_CTRL_BPP_15_16 0x4
#define JZ_LCD_CTRL_BPP_18_24 0x5
#define JZ_LCD_CMD_SOF_IRQ BIT(15)
#define JZ_LCD_CMD_EOF_IRQ BIT(16)
#define JZ_LCD_CMD_ENABLE_PAL BIT(12)
#define JZ_LCD_SYNC_MASK 0x3ff
#define JZ_LCD_STATE_DISABLED BIT(0)
struct jzfb_framedesc {
uint32_t next;
uint32_t addr;
uint32_t id;
uint32_t cmd;
} __packed;
struct jzfb {
struct fb_info *fb;
struct platform_device *pdev;
void __iomem *base;
struct resource *mem;
struct jz4740_fb_platform_data *pdata;
size_t vidmem_size;
void *vidmem;
dma_addr_t vidmem_phys;
struct jzfb_framedesc *framedesc;
dma_addr_t framedesc_phys;
struct clk *ldclk;
struct clk *lpclk;
unsigned is_enabled:1;
struct mutex lock;
uint32_t pseudo_palette[16];
};
static const struct fb_fix_screeninfo jzfb_fix __devinitdata = {
.id = "JZ4740 FB",
.type = FB_TYPE_PACKED_PIXELS,
.visual = FB_VISUAL_TRUECOLOR,
.xpanstep = 0,
.ypanstep = 0,
.ywrapstep = 0,
.accel = FB_ACCEL_NONE,
};
static const struct jz_gpio_bulk_request jz_lcd_ctrl_pins[] = {
JZ_GPIO_BULK_PIN(LCD_PCLK),
JZ_GPIO_BULK_PIN(LCD_HSYNC),
JZ_GPIO_BULK_PIN(LCD_VSYNC),
JZ_GPIO_BULK_PIN(LCD_DE),
JZ_GPIO_BULK_PIN(LCD_PS),
JZ_GPIO_BULK_PIN(LCD_REV),
JZ_GPIO_BULK_PIN(LCD_CLS),
JZ_GPIO_BULK_PIN(LCD_SPL),
};
static const struct jz_gpio_bulk_request jz_lcd_data_pins[] = {
JZ_GPIO_BULK_PIN(LCD_DATA0),
JZ_GPIO_BULK_PIN(LCD_DATA1),
JZ_GPIO_BULK_PIN(LCD_DATA2),
JZ_GPIO_BULK_PIN(LCD_DATA3),
JZ_GPIO_BULK_PIN(LCD_DATA4),
JZ_GPIO_BULK_PIN(LCD_DATA5),
JZ_GPIO_BULK_PIN(LCD_DATA6),
JZ_GPIO_BULK_PIN(LCD_DATA7),
JZ_GPIO_BULK_PIN(LCD_DATA8),
JZ_GPIO_BULK_PIN(LCD_DATA9),
JZ_GPIO_BULK_PIN(LCD_DATA10),
JZ_GPIO_BULK_PIN(LCD_DATA11),
JZ_GPIO_BULK_PIN(LCD_DATA12),
JZ_GPIO_BULK_PIN(LCD_DATA13),
JZ_GPIO_BULK_PIN(LCD_DATA14),
JZ_GPIO_BULK_PIN(LCD_DATA15),
JZ_GPIO_BULK_PIN(LCD_DATA16),
JZ_GPIO_BULK_PIN(LCD_DATA17),
};
static unsigned int jzfb_num_ctrl_pins(struct jzfb *jzfb)
{
unsigned int num;
switch (jzfb->pdata->lcd_type) {
case JZ_LCD_TYPE_GENERIC_16_BIT:
num = 4;
break;
case JZ_LCD_TYPE_GENERIC_18_BIT:
num = 4;
break;
case JZ_LCD_TYPE_8BIT_SERIAL:
num = 3;
break;
case JZ_LCD_TYPE_SPECIAL_TFT_1:
case JZ_LCD_TYPE_SPECIAL_TFT_2:
case JZ_LCD_TYPE_SPECIAL_TFT_3:
num = 8;
break;
default:
num = 0;
break;
}
return num;
}
static unsigned int jzfb_num_data_pins(struct jzfb *jzfb)
{
unsigned int num;
switch (jzfb->pdata->lcd_type) {
case JZ_LCD_TYPE_GENERIC_16_BIT:
num = 16;
break;
case JZ_LCD_TYPE_GENERIC_18_BIT:
num = 18;
break;
case JZ_LCD_TYPE_8BIT_SERIAL:
num = 8;
break;
case JZ_LCD_TYPE_SPECIAL_TFT_1:
case JZ_LCD_TYPE_SPECIAL_TFT_2:
case JZ_LCD_TYPE_SPECIAL_TFT_3:
if (jzfb->pdata->bpp == 18)
num = 18;
else
num = 16;
break;
default:
num = 0;
break;
}
return num;
}
/* Based on CNVT_TOHW macro from skeletonfb.c */
static inline uint32_t jzfb_convert_color_to_hw(unsigned val,
struct fb_bitfield *bf)
{
return (((val << bf->length) + 0x7FFF - val) >> 16) << bf->offset;
}
static int jzfb_setcolreg(unsigned regno, unsigned red, unsigned green,
unsigned blue, unsigned transp, struct fb_info *fb)
{
uint32_t color;
if (regno >= 16)
return -EINVAL;
color = jzfb_convert_color_to_hw(red, &fb->var.red);
color |= jzfb_convert_color_to_hw(green, &fb->var.green);
color |= jzfb_convert_color_to_hw(blue, &fb->var.blue);
color |= jzfb_convert_color_to_hw(transp, &fb->var.transp);
((uint32_t *)(fb->pseudo_palette))[regno] = color;
return 0;
}
static int jzfb_get_controller_bpp(struct jzfb *jzfb)
{
switch (jzfb->pdata->bpp) {
case 18:
case 24:
return 32;
case 15:
return 16;
default:
return jzfb->pdata->bpp;
}
}
static struct fb_videomode *jzfb_get_mode(struct jzfb *jzfb,
struct fb_var_screeninfo *var)
{
size_t i;
struct fb_videomode *mode = jzfb->pdata->modes;
for (i = 0; i < jzfb->pdata->num_modes; ++i, ++mode) {
if (mode->xres == var->xres && mode->yres == var->yres)
return mode;
}
return NULL;
}
static int jzfb_check_var(struct fb_var_screeninfo *var, struct fb_info *fb)
{
struct jzfb *jzfb = fb->par;
struct fb_videomode *mode;
if (var->bits_per_pixel != jzfb_get_controller_bpp(jzfb) &&
var->bits_per_pixel != jzfb->pdata->bpp)
return -EINVAL;
mode = jzfb_get_mode(jzfb, var);
if (mode == NULL)
return -EINVAL;
fb_videomode_to_var(var, mode);
switch (jzfb->pdata->bpp) {
case 8:
break;
case 15:
var->red.offset = 10;
var->red.length = 5;
var->green.offset = 6;
var->green.length = 5;
var->blue.offset = 0;
var->blue.length = 5;
break;
case 16:
var->red.offset = 11;
var->red.length = 5;
var->green.offset = 5;
var->green.length = 6;
var->blue.offset = 0;
var->blue.length = 5;
break;
case 18:
var->red.offset = 16;
var->red.length = 6;
var->green.offset = 8;
var->green.length = 6;
var->blue.offset = 0;
var->blue.length = 6;
var->bits_per_pixel = 32;
break;
case 32:
case 24:
var->transp.offset = 24;
var->transp.length = 8;
var->red.offset = 16;
var->red.length = 8;
var->green.offset = 8;
var->green.length = 8;
var->blue.offset = 0;
var->blue.length = 8;
var->bits_per_pixel = 32;
break;
default:
break;
}
return 0;
}
static int jzfb_set_par(struct fb_info *info)
{
struct jzfb *jzfb = info->par;
struct jz4740_fb_platform_data *pdata = jzfb->pdata;
struct fb_var_screeninfo *var = &info->var;
struct fb_videomode *mode;
uint16_t hds, vds;
uint16_t hde, vde;
uint16_t ht, vt;
uint32_t ctrl;
uint32_t cfg;
unsigned long rate;
mode = jzfb_get_mode(jzfb, var);
if (mode == NULL)
return -EINVAL;
if (mode == info->mode)
return 0;
info->mode = mode;
hds = mode->hsync_len + mode->left_margin;
hde = hds + mode->xres;
ht = hde + mode->right_margin;
vds = mode->vsync_len + mode->upper_margin;
vde = vds + mode->yres;
vt = vde + mode->lower_margin;
ctrl = JZ_LCD_CTRL_OFUP | JZ_LCD_CTRL_BURST_16;
switch (pdata->bpp) {
case 1:
ctrl |= JZ_LCD_CTRL_BPP_1;
break;
case 2:
ctrl |= JZ_LCD_CTRL_BPP_2;
break;
case 4:
ctrl |= JZ_LCD_CTRL_BPP_4;
break;
case 8:
ctrl |= JZ_LCD_CTRL_BPP_8;
break;
case 15:
ctrl |= JZ_LCD_CTRL_RGB555; /* Falltrough */
case 16:
ctrl |= JZ_LCD_CTRL_BPP_15_16;
break;
case 18:
case 24:
case 32:
ctrl |= JZ_LCD_CTRL_BPP_18_24;
break;
default:
break;
}
cfg = pdata->lcd_type & 0xf;
if (!(mode->sync & FB_SYNC_HOR_HIGH_ACT))
cfg |= JZ_LCD_CFG_HSYNC_ACTIVE_LOW;
if (!(mode->sync & FB_SYNC_VERT_HIGH_ACT))
cfg |= JZ_LCD_CFG_VSYNC_ACTIVE_LOW;
if (pdata->pixclk_falling_edge)
cfg |= JZ_LCD_CFG_PCLK_FALLING_EDGE;
if (pdata->date_enable_active_low)
cfg |= JZ_LCD_CFG_DE_ACTIVE_LOW;
if (pdata->lcd_type == JZ_LCD_TYPE_GENERIC_18_BIT)
cfg |= JZ_LCD_CFG_18_BIT;
if (mode->pixclock) {
rate = PICOS2KHZ(mode->pixclock) * 1000;
mode->refresh = rate / vt / ht;
} else {
if (pdata->lcd_type == JZ_LCD_TYPE_8BIT_SERIAL)
rate = mode->refresh * (vt + 2 * mode->xres) * ht;
else
rate = mode->refresh * vt * ht;
mode->pixclock = KHZ2PICOS(rate / 1000);
}
mutex_lock(&jzfb->lock);
if (!jzfb->is_enabled)
clk_enable(jzfb->ldclk);
else
ctrl |= JZ_LCD_CTRL_ENABLE;
switch (pdata->lcd_type) {
case JZ_LCD_TYPE_SPECIAL_TFT_1:
case JZ_LCD_TYPE_SPECIAL_TFT_2:
case JZ_LCD_TYPE_SPECIAL_TFT_3:
writel(pdata->special_tft_config.spl, jzfb->base + JZ_REG_LCD_SPL);
writel(pdata->special_tft_config.cls, jzfb->base + JZ_REG_LCD_CLS);
writel(pdata->special_tft_config.ps, jzfb->base + JZ_REG_LCD_PS);
writel(pdata->special_tft_config.ps, jzfb->base + JZ_REG_LCD_REV);
break;
default:
cfg |= JZ_LCD_CFG_PS_DISABLE;
cfg |= JZ_LCD_CFG_CLS_DISABLE;
cfg |= JZ_LCD_CFG_SPL_DISABLE;
cfg |= JZ_LCD_CFG_REV_DISABLE;
break;
}
writel(mode->hsync_len, jzfb->base + JZ_REG_LCD_HSYNC);
writel(mode->vsync_len, jzfb->base + JZ_REG_LCD_VSYNC);
writel((ht << 16) | vt, jzfb->base + JZ_REG_LCD_VAT);
writel((hds << 16) | hde, jzfb->base + JZ_REG_LCD_DAH);
writel((vds << 16) | vde, jzfb->base + JZ_REG_LCD_DAV);
writel(cfg, jzfb->base + JZ_REG_LCD_CFG);
writel(ctrl, jzfb->base + JZ_REG_LCD_CTRL);
if (!jzfb->is_enabled)
clk_disable(jzfb->ldclk);
mutex_unlock(&jzfb->lock);
clk_set_rate(jzfb->lpclk, rate);
clk_set_rate(jzfb->ldclk, rate * 3);
return 0;
}
static void jzfb_enable(struct jzfb *jzfb)
{
uint32_t ctrl;
clk_enable(jzfb->ldclk);
jz_gpio_bulk_resume(jz_lcd_ctrl_pins, jzfb_num_ctrl_pins(jzfb));
jz_gpio_bulk_resume(jz_lcd_data_pins, jzfb_num_data_pins(jzfb));
writel(0, jzfb->base + JZ_REG_LCD_STATE);
writel(jzfb->framedesc->next, jzfb->base + JZ_REG_LCD_DA0);
ctrl = readl(jzfb->base + JZ_REG_LCD_CTRL);
ctrl |= JZ_LCD_CTRL_ENABLE;
ctrl &= ~JZ_LCD_CTRL_DISABLE;
writel(ctrl, jzfb->base + JZ_REG_LCD_CTRL);
}
static void jzfb_disable(struct jzfb *jzfb)
{
uint32_t ctrl;
ctrl = readl(jzfb->base + JZ_REG_LCD_CTRL);
ctrl |= JZ_LCD_CTRL_DISABLE;
writel(ctrl, jzfb->base + JZ_REG_LCD_CTRL);
do {
ctrl = readl(jzfb->base + JZ_REG_LCD_STATE);
} while (!(ctrl & JZ_LCD_STATE_DISABLED));
jz_gpio_bulk_suspend(jz_lcd_ctrl_pins, jzfb_num_ctrl_pins(jzfb));
jz_gpio_bulk_suspend(jz_lcd_data_pins, jzfb_num_data_pins(jzfb));
clk_disable(jzfb->ldclk);
}
static int jzfb_blank(int blank_mode, struct fb_info *info)
{
struct jzfb *jzfb = info->par;
switch (blank_mode) {
case FB_BLANK_UNBLANK:
mutex_lock(&jzfb->lock);
if (jzfb->is_enabled) {
mutex_unlock(&jzfb->lock);
return 0;
}
jzfb_enable(jzfb);
jzfb->is_enabled = 1;
mutex_unlock(&jzfb->lock);
break;
default:
mutex_lock(&jzfb->lock);
if (!jzfb->is_enabled) {
mutex_unlock(&jzfb->lock);
return 0;
}
jzfb_disable(jzfb);
jzfb->is_enabled = 0;
mutex_unlock(&jzfb->lock);
break;
}
return 0;
}
static int jzfb_alloc_devmem(struct jzfb *jzfb)
{
int max_videosize = 0;
struct fb_videomode *mode = jzfb->pdata->modes;
void *page;
int i;
for (i = 0; i < jzfb->pdata->num_modes; ++mode, ++i) {
if (max_videosize < mode->xres * mode->yres)
max_videosize = mode->xres * mode->yres;
}
max_videosize *= jzfb_get_controller_bpp(jzfb) >> 3;
jzfb->framedesc = dma_alloc_coherent(&jzfb->pdev->dev,
sizeof(*jzfb->framedesc),
&jzfb->framedesc_phys, GFP_KERNEL);
if (!jzfb->framedesc)
return -ENOMEM;
jzfb->vidmem_size = PAGE_ALIGN(max_videosize);
jzfb->vidmem = dma_alloc_coherent(&jzfb->pdev->dev,
jzfb->vidmem_size,
&jzfb->vidmem_phys, GFP_KERNEL);
if (!jzfb->vidmem)
goto err_free_framedesc;
for (page = jzfb->vidmem;
page < jzfb->vidmem + PAGE_ALIGN(jzfb->vidmem_size);
page += PAGE_SIZE) {
SetPageReserved(virt_to_page(page));
}
jzfb->framedesc->next = jzfb->framedesc_phys;
jzfb->framedesc->addr = jzfb->vidmem_phys;
jzfb->framedesc->id = 0xdeafbead;
jzfb->framedesc->cmd = 0;
jzfb->framedesc->cmd |= max_videosize / 4;
return 0;
err_free_framedesc:
dma_free_coherent(&jzfb->pdev->dev, sizeof(*jzfb->framedesc),
jzfb->framedesc, jzfb->framedesc_phys);
return -ENOMEM;
}
static void jzfb_free_devmem(struct jzfb *jzfb)
{
dma_free_coherent(&jzfb->pdev->dev, jzfb->vidmem_size,
jzfb->vidmem, jzfb->vidmem_phys);
dma_free_coherent(&jzfb->pdev->dev, sizeof(*jzfb->framedesc),
jzfb->framedesc, jzfb->framedesc_phys);
}
static struct fb_ops jzfb_ops = {
.owner = THIS_MODULE,
.fb_check_var = jzfb_check_var,
.fb_set_par = jzfb_set_par,
.fb_blank = jzfb_blank,
.fb_fillrect = sys_fillrect,
.fb_copyarea = sys_copyarea,
.fb_imageblit = sys_imageblit,
.fb_setcolreg = jzfb_setcolreg,
};
static int __devinit jzfb_probe(struct platform_device *pdev)
{
int ret;
struct jzfb *jzfb;
struct fb_info *fb;
struct jz4740_fb_platform_data *pdata = pdev->dev.platform_data;
struct resource *mem;
if (!pdata) {
dev_err(&pdev->dev, "Missing platform data\n");
return -ENXIO;
}
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!mem) {
dev_err(&pdev->dev, "Failed to get register memory resource\n");
return -ENXIO;
}
mem = request_mem_region(mem->start, resource_size(mem), pdev->name);
if (!mem) {
dev_err(&pdev->dev, "Failed to request register memory region\n");
return -EBUSY;
}
fb = framebuffer_alloc(sizeof(struct jzfb), &pdev->dev);
if (!fb) {
dev_err(&pdev->dev, "Failed to allocate framebuffer device\n");
ret = -ENOMEM;
goto err_release_mem_region;
}
fb->fbops = &jzfb_ops;
fb->flags = FBINFO_DEFAULT;
jzfb = fb->par;
jzfb->pdev = pdev;
jzfb->pdata = pdata;
jzfb->mem = mem;
jzfb->ldclk = clk_get(&pdev->dev, "lcd");
if (IS_ERR(jzfb->ldclk)) {
ret = PTR_ERR(jzfb->ldclk);
dev_err(&pdev->dev, "Failed to get lcd clock: %d\n", ret);
goto err_framebuffer_release;
}
jzfb->lpclk = clk_get(&pdev->dev, "lcd_pclk");
if (IS_ERR(jzfb->lpclk)) {
ret = PTR_ERR(jzfb->lpclk);
dev_err(&pdev->dev, "Failed to get lcd pixel clock: %d\n", ret);
goto err_put_ldclk;
}
jzfb->base = ioremap(mem->start, resource_size(mem));
if (!jzfb->base) {
dev_err(&pdev->dev, "Failed to ioremap register memory region\n");
ret = -EBUSY;
goto err_put_lpclk;
}
platform_set_drvdata(pdev, jzfb);
mutex_init(&jzfb->lock);
fb_videomode_to_modelist(pdata->modes, pdata->num_modes,
&fb->modelist);
fb_videomode_to_var(&fb->var, pdata->modes);
fb->var.bits_per_pixel = pdata->bpp;
jzfb_check_var(&fb->var, fb);
ret = jzfb_alloc_devmem(jzfb);
if (ret) {
dev_err(&pdev->dev, "Failed to allocate video memory\n");
goto err_iounmap;
}
fb->fix = jzfb_fix;
fb->fix.line_length = fb->var.bits_per_pixel * fb->var.xres / 8;
fb->fix.mmio_start = mem->start;
fb->fix.mmio_len = resource_size(mem);
fb->fix.smem_start = jzfb->vidmem_phys;
fb->fix.smem_len = fb->fix.line_length * fb->var.yres;
fb->screen_base = jzfb->vidmem;
fb->pseudo_palette = jzfb->pseudo_palette;
fb_alloc_cmap(&fb->cmap, 256, 0);
clk_enable(jzfb->ldclk);
jzfb->is_enabled = 1;
writel(jzfb->framedesc->next, jzfb->base + JZ_REG_LCD_DA0);
fb->mode = NULL;
jzfb_set_par(fb);
jz_gpio_bulk_request(jz_lcd_ctrl_pins, jzfb_num_ctrl_pins(jzfb));
jz_gpio_bulk_request(jz_lcd_data_pins, jzfb_num_data_pins(jzfb));
ret = register_framebuffer(fb);
if (ret) {
dev_err(&pdev->dev, "Failed to register framebuffer: %d\n", ret);
goto err_free_devmem;
}
jzfb->fb = fb;
return 0;
err_free_devmem:
jz_gpio_bulk_free(jz_lcd_ctrl_pins, jzfb_num_ctrl_pins(jzfb));
jz_gpio_bulk_free(jz_lcd_data_pins, jzfb_num_data_pins(jzfb));
fb_dealloc_cmap(&fb->cmap);
jzfb_free_devmem(jzfb);
err_iounmap:
iounmap(jzfb->base);
err_put_lpclk:
clk_put(jzfb->lpclk);
err_put_ldclk:
clk_put(jzfb->ldclk);
err_framebuffer_release:
framebuffer_release(fb);
err_release_mem_region:
release_mem_region(mem->start, resource_size(mem));
return ret;
}
static int __devexit jzfb_remove(struct platform_device *pdev)
{
struct jzfb *jzfb = platform_get_drvdata(pdev);
jzfb_blank(FB_BLANK_POWERDOWN, jzfb->fb);
jz_gpio_bulk_free(jz_lcd_ctrl_pins, jzfb_num_ctrl_pins(jzfb));
jz_gpio_bulk_free(jz_lcd_data_pins, jzfb_num_data_pins(jzfb));
iounmap(jzfb->base);
release_mem_region(jzfb->mem->start, resource_size(jzfb->mem));
fb_dealloc_cmap(&jzfb->fb->cmap);
jzfb_free_devmem(jzfb);
platform_set_drvdata(pdev, NULL);
clk_put(jzfb->lpclk);
clk_put(jzfb->ldclk);
framebuffer_release(jzfb->fb);
return 0;
}
#ifdef CONFIG_PM
static int jzfb_suspend(struct device *dev)
{
struct jzfb *jzfb = dev_get_drvdata(dev);
console_lock();
fb_set_suspend(jzfb->fb, 1);
console_unlock();
mutex_lock(&jzfb->lock);
if (jzfb->is_enabled)
jzfb_disable(jzfb);
mutex_unlock(&jzfb->lock);
return 0;
}
static int jzfb_resume(struct device *dev)
{
struct jzfb *jzfb = dev_get_drvdata(dev);
clk_enable(jzfb->ldclk);
mutex_lock(&jzfb->lock);
if (jzfb->is_enabled)
jzfb_enable(jzfb);
mutex_unlock(&jzfb->lock);
console_lock();
fb_set_suspend(jzfb->fb, 0);
console_unlock();
return 0;
}
static const struct dev_pm_ops jzfb_pm_ops = {
.suspend = jzfb_suspend,
.resume = jzfb_resume,
.poweroff = jzfb_suspend,
.restore = jzfb_resume,
};
#define JZFB_PM_OPS (&jzfb_pm_ops)
#else
#define JZFB_PM_OPS NULL
#endif
static struct platform_driver jzfb_driver = {
.probe = jzfb_probe,
.remove = __devexit_p(jzfb_remove),
.driver = {
.name = "jz4740-fb",
.pm = JZFB_PM_OPS,
},
};
static int __init jzfb_init(void)
{
return platform_driver_register(&jzfb_driver);
}
module_init(jzfb_init);
static void __exit jzfb_exit(void)
{
platform_driver_unregister(&jzfb_driver);
}
module_exit(jzfb_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
MODULE_DESCRIPTION("JZ4740 SoC LCD framebuffer driver");
MODULE_ALIAS("platform:jz4740-fb");
| gpl-2.0 |
jeboo/zte_a2017U_B27 | arch/powerpc/boot/mpc52xx-psc.c | 13867 | 1467 | /*
* MPC5200 PSC serial console support.
*
* Author: Grant Likely <grant.likely@secretlab.ca>
*
* Copyright (c) 2007 Secret Lab Technologies Ltd.
* Copyright (c) 2007 Freescale Semiconductor, Inc.
*
* It is assumed that the firmware (or the platform file) has already set
* up the port.
*/
#include "types.h"
#include "io.h"
#include "ops.h"
/* Programmable Serial Controller (PSC) status register bits */
#define MPC52xx_PSC_SR 0x04
#define MPC52xx_PSC_SR_RXRDY 0x0100
#define MPC52xx_PSC_SR_RXFULL 0x0200
#define MPC52xx_PSC_SR_TXRDY 0x0400
#define MPC52xx_PSC_SR_TXEMP 0x0800
#define MPC52xx_PSC_BUFFER 0x0C
static void *psc;
static int psc_open(void)
{
/* Assume the firmware has already configured the PSC into
* uart mode */
return 0;
}
static void psc_putc(unsigned char c)
{
while (!(in_be16(psc + MPC52xx_PSC_SR) & MPC52xx_PSC_SR_TXRDY)) ;
out_8(psc + MPC52xx_PSC_BUFFER, c);
}
static unsigned char psc_tstc(void)
{
return (in_be16(psc + MPC52xx_PSC_SR) & MPC52xx_PSC_SR_RXRDY) != 0;
}
static unsigned char psc_getc(void)
{
while (!(in_be16(psc + MPC52xx_PSC_SR) & MPC52xx_PSC_SR_RXRDY)) ;
return in_8(psc + MPC52xx_PSC_BUFFER);
}
int mpc5200_psc_console_init(void *devp, struct serial_console_data *scdp)
{
/* Get the base address of the psc registers */
if (dt_get_virtual_reg(devp, &psc, 1) < 1)
return -1;
scdp->open = psc_open;
scdp->putc = psc_putc;
scdp->getc = psc_getc;
scdp->tstc = psc_tstc;
return 0;
}
| gpl-2.0 |
tepelmann/linux-perf-cumulate | security/yama/yama_lsm.c | 44 | 9915 | /*
* Yama Linux Security Module
*
* Author: Kees Cook <keescook@chromium.org>
*
* Copyright (C) 2010 Canonical, Ltd.
* Copyright (C) 2011 The Chromium OS Authors.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2, as
* published by the Free Software Foundation.
*
*/
#include <linux/security.h>
#include <linux/sysctl.h>
#include <linux/ptrace.h>
#include <linux/prctl.h>
#include <linux/ratelimit.h>
#define YAMA_SCOPE_DISABLED 0
#define YAMA_SCOPE_RELATIONAL 1
#define YAMA_SCOPE_CAPABILITY 2
#define YAMA_SCOPE_NO_ATTACH 3
static int ptrace_scope = YAMA_SCOPE_RELATIONAL;
/* describe a ptrace relationship for potential exception */
struct ptrace_relation {
struct task_struct *tracer;
struct task_struct *tracee;
struct list_head node;
};
static LIST_HEAD(ptracer_relations);
static DEFINE_SPINLOCK(ptracer_relations_lock);
/**
* yama_ptracer_add - add/replace an exception for this tracer/tracee pair
* @tracer: the task_struct of the process doing the ptrace
* @tracee: the task_struct of the process to be ptraced
*
* Each tracee can have, at most, one tracer registered. Each time this
* is called, the prior registered tracer will be replaced for the tracee.
*
* Returns 0 if relationship was added, -ve on error.
*/
static int yama_ptracer_add(struct task_struct *tracer,
struct task_struct *tracee)
{
int rc = 0;
struct ptrace_relation *added;
struct ptrace_relation *entry, *relation = NULL;
added = kmalloc(sizeof(*added), GFP_KERNEL);
if (!added)
return -ENOMEM;
spin_lock_bh(&ptracer_relations_lock);
list_for_each_entry(entry, &ptracer_relations, node)
if (entry->tracee == tracee) {
relation = entry;
break;
}
if (!relation) {
relation = added;
relation->tracee = tracee;
list_add(&relation->node, &ptracer_relations);
}
relation->tracer = tracer;
spin_unlock_bh(&ptracer_relations_lock);
if (added != relation)
kfree(added);
return rc;
}
/**
* yama_ptracer_del - remove exceptions related to the given tasks
* @tracer: remove any relation where tracer task matches
* @tracee: remove any relation where tracee task matches
*/
static void yama_ptracer_del(struct task_struct *tracer,
struct task_struct *tracee)
{
struct ptrace_relation *relation, *safe;
spin_lock_bh(&ptracer_relations_lock);
list_for_each_entry_safe(relation, safe, &ptracer_relations, node)
if (relation->tracee == tracee ||
(tracer && relation->tracer == tracer)) {
list_del(&relation->node);
kfree(relation);
}
spin_unlock_bh(&ptracer_relations_lock);
}
/**
* yama_task_free - check for task_pid to remove from exception list
* @task: task being removed
*/
void yama_task_free(struct task_struct *task)
{
yama_ptracer_del(task, task);
}
/**
* yama_task_prctl - check for Yama-specific prctl operations
* @option: operation
* @arg2: argument
* @arg3: argument
* @arg4: argument
* @arg5: argument
*
* Return 0 on success, -ve on error. -ENOSYS is returned when Yama
* does not handle the given option.
*/
int yama_task_prctl(int option, unsigned long arg2, unsigned long arg3,
unsigned long arg4, unsigned long arg5)
{
int rc;
struct task_struct *myself = current;
rc = cap_task_prctl(option, arg2, arg3, arg4, arg5);
if (rc != -ENOSYS)
return rc;
switch (option) {
case PR_SET_PTRACER:
/* Since a thread can call prctl(), find the group leader
* before calling _add() or _del() on it, since we want
* process-level granularity of control. The tracer group
* leader checking is handled later when walking the ancestry
* at the time of PTRACE_ATTACH check.
*/
rcu_read_lock();
if (!thread_group_leader(myself))
myself = rcu_dereference(myself->group_leader);
get_task_struct(myself);
rcu_read_unlock();
if (arg2 == 0) {
yama_ptracer_del(NULL, myself);
rc = 0;
} else if (arg2 == PR_SET_PTRACER_ANY || (int)arg2 == -1) {
rc = yama_ptracer_add(NULL, myself);
} else {
struct task_struct *tracer;
rcu_read_lock();
tracer = find_task_by_vpid(arg2);
if (tracer)
get_task_struct(tracer);
else
rc = -EINVAL;
rcu_read_unlock();
if (tracer) {
rc = yama_ptracer_add(tracer, myself);
put_task_struct(tracer);
}
}
put_task_struct(myself);
break;
}
return rc;
}
/**
* task_is_descendant - walk up a process family tree looking for a match
* @parent: the process to compare against while walking up from child
* @child: the process to start from while looking upwards for parent
*
* Returns 1 if child is a descendant of parent, 0 if not.
*/
static int task_is_descendant(struct task_struct *parent,
struct task_struct *child)
{
int rc = 0;
struct task_struct *walker = child;
if (!parent || !child)
return 0;
rcu_read_lock();
if (!thread_group_leader(parent))
parent = rcu_dereference(parent->group_leader);
while (walker->pid > 0) {
if (!thread_group_leader(walker))
walker = rcu_dereference(walker->group_leader);
if (walker == parent) {
rc = 1;
break;
}
walker = rcu_dereference(walker->real_parent);
}
rcu_read_unlock();
return rc;
}
/**
* ptracer_exception_found - tracer registered as exception for this tracee
* @tracer: the task_struct of the process attempting ptrace
* @tracee: the task_struct of the process to be ptraced
*
* Returns 1 if tracer has is ptracer exception ancestor for tracee.
*/
static int ptracer_exception_found(struct task_struct *tracer,
struct task_struct *tracee)
{
int rc = 0;
struct ptrace_relation *relation;
struct task_struct *parent = NULL;
bool found = false;
spin_lock_bh(&ptracer_relations_lock);
rcu_read_lock();
if (!thread_group_leader(tracee))
tracee = rcu_dereference(tracee->group_leader);
list_for_each_entry(relation, &ptracer_relations, node)
if (relation->tracee == tracee) {
parent = relation->tracer;
found = true;
break;
}
if (found && (parent == NULL || task_is_descendant(parent, tracer)))
rc = 1;
rcu_read_unlock();
spin_unlock_bh(&ptracer_relations_lock);
return rc;
}
/**
* yama_ptrace_access_check - validate PTRACE_ATTACH calls
* @child: task that current task is attempting to ptrace
* @mode: ptrace attach mode
*
* Returns 0 if following the ptrace is allowed, -ve on error.
*/
int yama_ptrace_access_check(struct task_struct *child,
unsigned int mode)
{
int rc;
/* If standard caps disallows it, so does Yama. We should
* only tighten restrictions further.
*/
rc = cap_ptrace_access_check(child, mode);
if (rc)
return rc;
/* require ptrace target be a child of ptracer on attach */
if (mode == PTRACE_MODE_ATTACH) {
switch (ptrace_scope) {
case YAMA_SCOPE_DISABLED:
/* No additional restrictions. */
break;
case YAMA_SCOPE_RELATIONAL:
if (!task_is_descendant(current, child) &&
!ptracer_exception_found(current, child) &&
!ns_capable(task_user_ns(child), CAP_SYS_PTRACE))
rc = -EPERM;
break;
case YAMA_SCOPE_CAPABILITY:
if (!ns_capable(task_user_ns(child), CAP_SYS_PTRACE))
rc = -EPERM;
break;
case YAMA_SCOPE_NO_ATTACH:
default:
rc = -EPERM;
break;
}
}
if (rc) {
printk_ratelimited(KERN_NOTICE
"ptrace of pid %d was attempted by: %s (pid %d)\n",
child->pid, current->comm, current->pid);
}
return rc;
}
/**
* yama_ptrace_traceme - validate PTRACE_TRACEME calls
* @parent: task that will become the ptracer of the current task
*
* Returns 0 if following the ptrace is allowed, -ve on error.
*/
int yama_ptrace_traceme(struct task_struct *parent)
{
int rc;
/* If standard caps disallows it, so does Yama. We should
* only tighten restrictions further.
*/
rc = cap_ptrace_traceme(parent);
if (rc)
return rc;
/* Only disallow PTRACE_TRACEME on more aggressive settings. */
switch (ptrace_scope) {
case YAMA_SCOPE_CAPABILITY:
if (!ns_capable(task_user_ns(parent), CAP_SYS_PTRACE))
rc = -EPERM;
break;
case YAMA_SCOPE_NO_ATTACH:
rc = -EPERM;
break;
}
if (rc) {
printk_ratelimited(KERN_NOTICE
"ptraceme of pid %d was attempted by: %s (pid %d)\n",
current->pid, parent->comm, parent->pid);
}
return rc;
}
#ifndef CONFIG_SECURITY_YAMA_STACKED
static struct security_operations yama_ops = {
.name = "yama",
.ptrace_access_check = yama_ptrace_access_check,
.ptrace_traceme = yama_ptrace_traceme,
.task_prctl = yama_task_prctl,
.task_free = yama_task_free,
};
#endif
#ifdef CONFIG_SYSCTL
static int yama_dointvec_minmax(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
int rc;
if (write && !capable(CAP_SYS_PTRACE))
return -EPERM;
rc = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
if (rc)
return rc;
/* Lock the max value if it ever gets set. */
if (write && *(int *)table->data == *(int *)table->extra2)
table->extra1 = table->extra2;
return rc;
}
static int zero;
static int max_scope = YAMA_SCOPE_NO_ATTACH;
struct ctl_path yama_sysctl_path[] = {
{ .procname = "kernel", },
{ .procname = "yama", },
{ }
};
static struct ctl_table yama_sysctl_table[] = {
{
.procname = "ptrace_scope",
.data = &ptrace_scope,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = yama_dointvec_minmax,
.extra1 = &zero,
.extra2 = &max_scope,
},
{ }
};
#endif /* CONFIG_SYSCTL */
static __init int yama_init(void)
{
#ifndef CONFIG_SECURITY_YAMA_STACKED
if (!security_module_enable(&yama_ops))
return 0;
#endif
printk(KERN_INFO "Yama: becoming mindful.\n");
#ifndef CONFIG_SECURITY_YAMA_STACKED
if (register_security(&yama_ops))
panic("Yama: kernel registration failed.\n");
#endif
#ifdef CONFIG_SYSCTL
if (!register_sysctl_paths(yama_sysctl_path, yama_sysctl_table))
panic("Yama: sysctl registration failed.\n");
#endif
return 0;
}
security_initcall(yama_init);
| gpl-2.0 |
NuxiNL/linux | tools/perf/util/session.c | 44 | 58998 | #include <linux/kernel.h>
#include <traceevent/event-parse.h>
#include <byteswap.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/mman.h>
#include "evlist.h"
#include "evsel.h"
#include "session.h"
#include "tool.h"
#include "sort.h"
#include "util.h"
#include "cpumap.h"
#include "perf_regs.h"
#include "asm/bug.h"
#include "auxtrace.h"
#include "thread-stack.h"
#include "stat.h"
static int perf_session__deliver_event(struct perf_session *session,
union perf_event *event,
struct perf_sample *sample,
struct perf_tool *tool,
u64 file_offset);
static int perf_session__open(struct perf_session *session)
{
struct perf_data_file *file = session->file;
if (perf_session__read_header(session) < 0) {
pr_err("incompatible file format (rerun with -v to learn more)\n");
return -1;
}
if (perf_data_file__is_pipe(file))
return 0;
if (perf_header__has_feat(&session->header, HEADER_STAT))
return 0;
if (!perf_evlist__valid_sample_type(session->evlist)) {
pr_err("non matching sample_type\n");
return -1;
}
if (!perf_evlist__valid_sample_id_all(session->evlist)) {
pr_err("non matching sample_id_all\n");
return -1;
}
if (!perf_evlist__valid_read_format(session->evlist)) {
pr_err("non matching read_format\n");
return -1;
}
return 0;
}
void perf_session__set_id_hdr_size(struct perf_session *session)
{
u16 id_hdr_size = perf_evlist__id_hdr_size(session->evlist);
machines__set_id_hdr_size(&session->machines, id_hdr_size);
}
int perf_session__create_kernel_maps(struct perf_session *session)
{
int ret = machine__create_kernel_maps(&session->machines.host);
if (ret >= 0)
ret = machines__create_guest_kernel_maps(&session->machines);
return ret;
}
static void perf_session__destroy_kernel_maps(struct perf_session *session)
{
machines__destroy_kernel_maps(&session->machines);
}
static bool perf_session__has_comm_exec(struct perf_session *session)
{
struct perf_evsel *evsel;
evlist__for_each(session->evlist, evsel) {
if (evsel->attr.comm_exec)
return true;
}
return false;
}
static void perf_session__set_comm_exec(struct perf_session *session)
{
bool comm_exec = perf_session__has_comm_exec(session);
machines__set_comm_exec(&session->machines, comm_exec);
}
static int ordered_events__deliver_event(struct ordered_events *oe,
struct ordered_event *event)
{
struct perf_sample sample;
struct perf_session *session = container_of(oe, struct perf_session,
ordered_events);
int ret = perf_evlist__parse_sample(session->evlist, event->event, &sample);
if (ret) {
pr_err("Can't parse sample, err = %d\n", ret);
return ret;
}
return perf_session__deliver_event(session, event->event, &sample,
session->tool, event->file_offset);
}
struct perf_session *perf_session__new(struct perf_data_file *file,
bool repipe, struct perf_tool *tool)
{
struct perf_session *session = zalloc(sizeof(*session));
if (!session)
goto out;
session->repipe = repipe;
session->tool = tool;
INIT_LIST_HEAD(&session->auxtrace_index);
machines__init(&session->machines);
ordered_events__init(&session->ordered_events, ordered_events__deliver_event);
if (file) {
if (perf_data_file__open(file))
goto out_delete;
session->file = file;
if (perf_data_file__is_read(file)) {
if (perf_session__open(session) < 0)
goto out_close;
perf_session__set_id_hdr_size(session);
perf_session__set_comm_exec(session);
}
} else {
session->machines.host.env = &perf_env;
}
if (!file || perf_data_file__is_write(file)) {
/*
* In O_RDONLY mode this will be performed when reading the
* kernel MMAP event, in perf_event__process_mmap().
*/
if (perf_session__create_kernel_maps(session) < 0)
pr_warning("Cannot read kernel map\n");
}
if (tool && tool->ordering_requires_timestamps &&
tool->ordered_events && !perf_evlist__sample_id_all(session->evlist)) {
dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n");
tool->ordered_events = false;
}
return session;
out_close:
perf_data_file__close(file);
out_delete:
perf_session__delete(session);
out:
return NULL;
}
static void perf_session__delete_threads(struct perf_session *session)
{
machine__delete_threads(&session->machines.host);
}
void perf_session__delete(struct perf_session *session)
{
auxtrace__free(session);
auxtrace_index__free(&session->auxtrace_index);
perf_session__destroy_kernel_maps(session);
perf_session__delete_threads(session);
perf_env__exit(&session->header.env);
machines__exit(&session->machines);
if (session->file)
perf_data_file__close(session->file);
free(session);
}
static int process_event_synth_tracing_data_stub(struct perf_tool *tool
__maybe_unused,
union perf_event *event
__maybe_unused,
struct perf_session *session
__maybe_unused)
{
dump_printf(": unhandled!\n");
return 0;
}
static int process_event_synth_attr_stub(struct perf_tool *tool __maybe_unused,
union perf_event *event __maybe_unused,
struct perf_evlist **pevlist
__maybe_unused)
{
dump_printf(": unhandled!\n");
return 0;
}
static int process_event_synth_event_update_stub(struct perf_tool *tool __maybe_unused,
union perf_event *event __maybe_unused,
struct perf_evlist **pevlist
__maybe_unused)
{
if (dump_trace)
perf_event__fprintf_event_update(event, stdout);
dump_printf(": unhandled!\n");
return 0;
}
static int process_event_sample_stub(struct perf_tool *tool __maybe_unused,
union perf_event *event __maybe_unused,
struct perf_sample *sample __maybe_unused,
struct perf_evsel *evsel __maybe_unused,
struct machine *machine __maybe_unused)
{
dump_printf(": unhandled!\n");
return 0;
}
static int process_event_stub(struct perf_tool *tool __maybe_unused,
union perf_event *event __maybe_unused,
struct perf_sample *sample __maybe_unused,
struct machine *machine __maybe_unused)
{
dump_printf(": unhandled!\n");
return 0;
}
static int process_finished_round_stub(struct perf_tool *tool __maybe_unused,
union perf_event *event __maybe_unused,
struct ordered_events *oe __maybe_unused)
{
dump_printf(": unhandled!\n");
return 0;
}
static int process_finished_round(struct perf_tool *tool,
union perf_event *event,
struct ordered_events *oe);
static int skipn(int fd, off_t n)
{
char buf[4096];
ssize_t ret;
while (n > 0) {
ret = read(fd, buf, min(n, (off_t)sizeof(buf)));
if (ret <= 0)
return ret;
n -= ret;
}
return 0;
}
static s64 process_event_auxtrace_stub(struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct perf_session *session
__maybe_unused)
{
dump_printf(": unhandled!\n");
if (perf_data_file__is_pipe(session->file))
skipn(perf_data_file__fd(session->file), event->auxtrace.size);
return event->auxtrace.size;
}
static int process_event_op2_stub(struct perf_tool *tool __maybe_unused,
union perf_event *event __maybe_unused,
struct perf_session *session __maybe_unused)
{
dump_printf(": unhandled!\n");
return 0;
}
static
int process_event_thread_map_stub(struct perf_tool *tool __maybe_unused,
union perf_event *event __maybe_unused,
struct perf_session *session __maybe_unused)
{
if (dump_trace)
perf_event__fprintf_thread_map(event, stdout);
dump_printf(": unhandled!\n");
return 0;
}
static
int process_event_cpu_map_stub(struct perf_tool *tool __maybe_unused,
union perf_event *event __maybe_unused,
struct perf_session *session __maybe_unused)
{
if (dump_trace)
perf_event__fprintf_cpu_map(event, stdout);
dump_printf(": unhandled!\n");
return 0;
}
static
int process_event_stat_config_stub(struct perf_tool *tool __maybe_unused,
union perf_event *event __maybe_unused,
struct perf_session *session __maybe_unused)
{
if (dump_trace)
perf_event__fprintf_stat_config(event, stdout);
dump_printf(": unhandled!\n");
return 0;
}
static int process_stat_stub(struct perf_tool *tool __maybe_unused,
union perf_event *event __maybe_unused,
struct perf_session *perf_session
__maybe_unused)
{
if (dump_trace)
perf_event__fprintf_stat(event, stdout);
dump_printf(": unhandled!\n");
return 0;
}
static int process_stat_round_stub(struct perf_tool *tool __maybe_unused,
union perf_event *event __maybe_unused,
struct perf_session *perf_session
__maybe_unused)
{
if (dump_trace)
perf_event__fprintf_stat_round(event, stdout);
dump_printf(": unhandled!\n");
return 0;
}
void perf_tool__fill_defaults(struct perf_tool *tool)
{
if (tool->sample == NULL)
tool->sample = process_event_sample_stub;
if (tool->mmap == NULL)
tool->mmap = process_event_stub;
if (tool->mmap2 == NULL)
tool->mmap2 = process_event_stub;
if (tool->comm == NULL)
tool->comm = process_event_stub;
if (tool->fork == NULL)
tool->fork = process_event_stub;
if (tool->exit == NULL)
tool->exit = process_event_stub;
if (tool->lost == NULL)
tool->lost = perf_event__process_lost;
if (tool->lost_samples == NULL)
tool->lost_samples = perf_event__process_lost_samples;
if (tool->aux == NULL)
tool->aux = perf_event__process_aux;
if (tool->itrace_start == NULL)
tool->itrace_start = perf_event__process_itrace_start;
if (tool->context_switch == NULL)
tool->context_switch = perf_event__process_switch;
if (tool->read == NULL)
tool->read = process_event_sample_stub;
if (tool->throttle == NULL)
tool->throttle = process_event_stub;
if (tool->unthrottle == NULL)
tool->unthrottle = process_event_stub;
if (tool->attr == NULL)
tool->attr = process_event_synth_attr_stub;
if (tool->event_update == NULL)
tool->event_update = process_event_synth_event_update_stub;
if (tool->tracing_data == NULL)
tool->tracing_data = process_event_synth_tracing_data_stub;
if (tool->build_id == NULL)
tool->build_id = process_event_op2_stub;
if (tool->finished_round == NULL) {
if (tool->ordered_events)
tool->finished_round = process_finished_round;
else
tool->finished_round = process_finished_round_stub;
}
if (tool->id_index == NULL)
tool->id_index = process_event_op2_stub;
if (tool->auxtrace_info == NULL)
tool->auxtrace_info = process_event_op2_stub;
if (tool->auxtrace == NULL)
tool->auxtrace = process_event_auxtrace_stub;
if (tool->auxtrace_error == NULL)
tool->auxtrace_error = process_event_op2_stub;
if (tool->thread_map == NULL)
tool->thread_map = process_event_thread_map_stub;
if (tool->cpu_map == NULL)
tool->cpu_map = process_event_cpu_map_stub;
if (tool->stat_config == NULL)
tool->stat_config = process_event_stat_config_stub;
if (tool->stat == NULL)
tool->stat = process_stat_stub;
if (tool->stat_round == NULL)
tool->stat_round = process_stat_round_stub;
if (tool->time_conv == NULL)
tool->time_conv = process_event_op2_stub;
}
static void swap_sample_id_all(union perf_event *event, void *data)
{
void *end = (void *) event + event->header.size;
int size = end - data;
BUG_ON(size % sizeof(u64));
mem_bswap_64(data, size);
}
static void perf_event__all64_swap(union perf_event *event,
bool sample_id_all __maybe_unused)
{
struct perf_event_header *hdr = &event->header;
mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr));
}
static void perf_event__comm_swap(union perf_event *event, bool sample_id_all)
{
event->comm.pid = bswap_32(event->comm.pid);
event->comm.tid = bswap_32(event->comm.tid);
if (sample_id_all) {
void *data = &event->comm.comm;
data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
swap_sample_id_all(event, data);
}
}
static void perf_event__mmap_swap(union perf_event *event,
bool sample_id_all)
{
event->mmap.pid = bswap_32(event->mmap.pid);
event->mmap.tid = bswap_32(event->mmap.tid);
event->mmap.start = bswap_64(event->mmap.start);
event->mmap.len = bswap_64(event->mmap.len);
event->mmap.pgoff = bswap_64(event->mmap.pgoff);
if (sample_id_all) {
void *data = &event->mmap.filename;
data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
swap_sample_id_all(event, data);
}
}
static void perf_event__mmap2_swap(union perf_event *event,
bool sample_id_all)
{
event->mmap2.pid = bswap_32(event->mmap2.pid);
event->mmap2.tid = bswap_32(event->mmap2.tid);
event->mmap2.start = bswap_64(event->mmap2.start);
event->mmap2.len = bswap_64(event->mmap2.len);
event->mmap2.pgoff = bswap_64(event->mmap2.pgoff);
event->mmap2.maj = bswap_32(event->mmap2.maj);
event->mmap2.min = bswap_32(event->mmap2.min);
event->mmap2.ino = bswap_64(event->mmap2.ino);
if (sample_id_all) {
void *data = &event->mmap2.filename;
data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
swap_sample_id_all(event, data);
}
}
static void perf_event__task_swap(union perf_event *event, bool sample_id_all)
{
event->fork.pid = bswap_32(event->fork.pid);
event->fork.tid = bswap_32(event->fork.tid);
event->fork.ppid = bswap_32(event->fork.ppid);
event->fork.ptid = bswap_32(event->fork.ptid);
event->fork.time = bswap_64(event->fork.time);
if (sample_id_all)
swap_sample_id_all(event, &event->fork + 1);
}
static void perf_event__read_swap(union perf_event *event, bool sample_id_all)
{
event->read.pid = bswap_32(event->read.pid);
event->read.tid = bswap_32(event->read.tid);
event->read.value = bswap_64(event->read.value);
event->read.time_enabled = bswap_64(event->read.time_enabled);
event->read.time_running = bswap_64(event->read.time_running);
event->read.id = bswap_64(event->read.id);
if (sample_id_all)
swap_sample_id_all(event, &event->read + 1);
}
static void perf_event__aux_swap(union perf_event *event, bool sample_id_all)
{
event->aux.aux_offset = bswap_64(event->aux.aux_offset);
event->aux.aux_size = bswap_64(event->aux.aux_size);
event->aux.flags = bswap_64(event->aux.flags);
if (sample_id_all)
swap_sample_id_all(event, &event->aux + 1);
}
static void perf_event__itrace_start_swap(union perf_event *event,
bool sample_id_all)
{
event->itrace_start.pid = bswap_32(event->itrace_start.pid);
event->itrace_start.tid = bswap_32(event->itrace_start.tid);
if (sample_id_all)
swap_sample_id_all(event, &event->itrace_start + 1);
}
static void perf_event__switch_swap(union perf_event *event, bool sample_id_all)
{
if (event->header.type == PERF_RECORD_SWITCH_CPU_WIDE) {
event->context_switch.next_prev_pid =
bswap_32(event->context_switch.next_prev_pid);
event->context_switch.next_prev_tid =
bswap_32(event->context_switch.next_prev_tid);
}
if (sample_id_all)
swap_sample_id_all(event, &event->context_switch + 1);
}
static void perf_event__throttle_swap(union perf_event *event,
bool sample_id_all)
{
event->throttle.time = bswap_64(event->throttle.time);
event->throttle.id = bswap_64(event->throttle.id);
event->throttle.stream_id = bswap_64(event->throttle.stream_id);
if (sample_id_all)
swap_sample_id_all(event, &event->throttle + 1);
}
static u8 revbyte(u8 b)
{
int rev = (b >> 4) | ((b & 0xf) << 4);
rev = ((rev & 0xcc) >> 2) | ((rev & 0x33) << 2);
rev = ((rev & 0xaa) >> 1) | ((rev & 0x55) << 1);
return (u8) rev;
}
/*
* XXX this is hack in attempt to carry flags bitfield
* through endian village. ABI says:
*
* Bit-fields are allocated from right to left (least to most significant)
* on little-endian implementations and from left to right (most to least
* significant) on big-endian implementations.
*
* The above seems to be byte specific, so we need to reverse each
* byte of the bitfield. 'Internet' also says this might be implementation
* specific and we probably need proper fix and carry perf_event_attr
* bitfield flags in separate data file FEAT_ section. Thought this seems
* to work for now.
*/
static void swap_bitfield(u8 *p, unsigned len)
{
unsigned i;
for (i = 0; i < len; i++) {
*p = revbyte(*p);
p++;
}
}
/* exported for swapping attributes in file header */
void perf_event__attr_swap(struct perf_event_attr *attr)
{
attr->type = bswap_32(attr->type);
attr->size = bswap_32(attr->size);
#define bswap_safe(f, n) \
(attr->size > (offsetof(struct perf_event_attr, f) + \
sizeof(attr->f) * (n)))
#define bswap_field(f, sz) \
do { \
if (bswap_safe(f, 0)) \
attr->f = bswap_##sz(attr->f); \
} while(0)
#define bswap_field_32(f) bswap_field(f, 32)
#define bswap_field_64(f) bswap_field(f, 64)
bswap_field_64(config);
bswap_field_64(sample_period);
bswap_field_64(sample_type);
bswap_field_64(read_format);
bswap_field_32(wakeup_events);
bswap_field_32(bp_type);
bswap_field_64(bp_addr);
bswap_field_64(bp_len);
bswap_field_64(branch_sample_type);
bswap_field_64(sample_regs_user);
bswap_field_32(sample_stack_user);
bswap_field_32(aux_watermark);
/*
* After read_format are bitfields. Check read_format because
* we are unable to use offsetof on bitfield.
*/
if (bswap_safe(read_format, 1))
swap_bitfield((u8 *) (&attr->read_format + 1),
sizeof(u64));
#undef bswap_field_64
#undef bswap_field_32
#undef bswap_field
#undef bswap_safe
}
static void perf_event__hdr_attr_swap(union perf_event *event,
bool sample_id_all __maybe_unused)
{
size_t size;
perf_event__attr_swap(&event->attr.attr);
size = event->header.size;
size -= (void *)&event->attr.id - (void *)event;
mem_bswap_64(event->attr.id, size);
}
static void perf_event__event_update_swap(union perf_event *event,
bool sample_id_all __maybe_unused)
{
event->event_update.type = bswap_64(event->event_update.type);
event->event_update.id = bswap_64(event->event_update.id);
}
static void perf_event__event_type_swap(union perf_event *event,
bool sample_id_all __maybe_unused)
{
event->event_type.event_type.event_id =
bswap_64(event->event_type.event_type.event_id);
}
static void perf_event__tracing_data_swap(union perf_event *event,
bool sample_id_all __maybe_unused)
{
event->tracing_data.size = bswap_32(event->tracing_data.size);
}
static void perf_event__auxtrace_info_swap(union perf_event *event,
bool sample_id_all __maybe_unused)
{
size_t size;
event->auxtrace_info.type = bswap_32(event->auxtrace_info.type);
size = event->header.size;
size -= (void *)&event->auxtrace_info.priv - (void *)event;
mem_bswap_64(event->auxtrace_info.priv, size);
}
static void perf_event__auxtrace_swap(union perf_event *event,
bool sample_id_all __maybe_unused)
{
event->auxtrace.size = bswap_64(event->auxtrace.size);
event->auxtrace.offset = bswap_64(event->auxtrace.offset);
event->auxtrace.reference = bswap_64(event->auxtrace.reference);
event->auxtrace.idx = bswap_32(event->auxtrace.idx);
event->auxtrace.tid = bswap_32(event->auxtrace.tid);
event->auxtrace.cpu = bswap_32(event->auxtrace.cpu);
}
static void perf_event__auxtrace_error_swap(union perf_event *event,
bool sample_id_all __maybe_unused)
{
event->auxtrace_error.type = bswap_32(event->auxtrace_error.type);
event->auxtrace_error.code = bswap_32(event->auxtrace_error.code);
event->auxtrace_error.cpu = bswap_32(event->auxtrace_error.cpu);
event->auxtrace_error.pid = bswap_32(event->auxtrace_error.pid);
event->auxtrace_error.tid = bswap_32(event->auxtrace_error.tid);
event->auxtrace_error.ip = bswap_64(event->auxtrace_error.ip);
}
static void perf_event__thread_map_swap(union perf_event *event,
bool sample_id_all __maybe_unused)
{
unsigned i;
event->thread_map.nr = bswap_64(event->thread_map.nr);
for (i = 0; i < event->thread_map.nr; i++)
event->thread_map.entries[i].pid = bswap_64(event->thread_map.entries[i].pid);
}
static void perf_event__cpu_map_swap(union perf_event *event,
bool sample_id_all __maybe_unused)
{
struct cpu_map_data *data = &event->cpu_map.data;
struct cpu_map_entries *cpus;
struct cpu_map_mask *mask;
unsigned i;
data->type = bswap_64(data->type);
switch (data->type) {
case PERF_CPU_MAP__CPUS:
cpus = (struct cpu_map_entries *)data->data;
cpus->nr = bswap_16(cpus->nr);
for (i = 0; i < cpus->nr; i++)
cpus->cpu[i] = bswap_16(cpus->cpu[i]);
break;
case PERF_CPU_MAP__MASK:
mask = (struct cpu_map_mask *) data->data;
mask->nr = bswap_16(mask->nr);
mask->long_size = bswap_16(mask->long_size);
switch (mask->long_size) {
case 4: mem_bswap_32(&mask->mask, mask->nr); break;
case 8: mem_bswap_64(&mask->mask, mask->nr); break;
default:
pr_err("cpu_map swap: unsupported long size\n");
}
default:
break;
}
}
static void perf_event__stat_config_swap(union perf_event *event,
bool sample_id_all __maybe_unused)
{
u64 size;
size = event->stat_config.nr * sizeof(event->stat_config.data[0]);
size += 1; /* nr item itself */
mem_bswap_64(&event->stat_config.nr, size);
}
static void perf_event__stat_swap(union perf_event *event,
bool sample_id_all __maybe_unused)
{
event->stat.id = bswap_64(event->stat.id);
event->stat.thread = bswap_32(event->stat.thread);
event->stat.cpu = bswap_32(event->stat.cpu);
event->stat.val = bswap_64(event->stat.val);
event->stat.ena = bswap_64(event->stat.ena);
event->stat.run = bswap_64(event->stat.run);
}
static void perf_event__stat_round_swap(union perf_event *event,
bool sample_id_all __maybe_unused)
{
event->stat_round.type = bswap_64(event->stat_round.type);
event->stat_round.time = bswap_64(event->stat_round.time);
}
typedef void (*perf_event__swap_op)(union perf_event *event,
bool sample_id_all);
static perf_event__swap_op perf_event__swap_ops[] = {
[PERF_RECORD_MMAP] = perf_event__mmap_swap,
[PERF_RECORD_MMAP2] = perf_event__mmap2_swap,
[PERF_RECORD_COMM] = perf_event__comm_swap,
[PERF_RECORD_FORK] = perf_event__task_swap,
[PERF_RECORD_EXIT] = perf_event__task_swap,
[PERF_RECORD_LOST] = perf_event__all64_swap,
[PERF_RECORD_READ] = perf_event__read_swap,
[PERF_RECORD_THROTTLE] = perf_event__throttle_swap,
[PERF_RECORD_UNTHROTTLE] = perf_event__throttle_swap,
[PERF_RECORD_SAMPLE] = perf_event__all64_swap,
[PERF_RECORD_AUX] = perf_event__aux_swap,
[PERF_RECORD_ITRACE_START] = perf_event__itrace_start_swap,
[PERF_RECORD_LOST_SAMPLES] = perf_event__all64_swap,
[PERF_RECORD_SWITCH] = perf_event__switch_swap,
[PERF_RECORD_SWITCH_CPU_WIDE] = perf_event__switch_swap,
[PERF_RECORD_HEADER_ATTR] = perf_event__hdr_attr_swap,
[PERF_RECORD_HEADER_EVENT_TYPE] = perf_event__event_type_swap,
[PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap,
[PERF_RECORD_HEADER_BUILD_ID] = NULL,
[PERF_RECORD_ID_INDEX] = perf_event__all64_swap,
[PERF_RECORD_AUXTRACE_INFO] = perf_event__auxtrace_info_swap,
[PERF_RECORD_AUXTRACE] = perf_event__auxtrace_swap,
[PERF_RECORD_AUXTRACE_ERROR] = perf_event__auxtrace_error_swap,
[PERF_RECORD_THREAD_MAP] = perf_event__thread_map_swap,
[PERF_RECORD_CPU_MAP] = perf_event__cpu_map_swap,
[PERF_RECORD_STAT_CONFIG] = perf_event__stat_config_swap,
[PERF_RECORD_STAT] = perf_event__stat_swap,
[PERF_RECORD_STAT_ROUND] = perf_event__stat_round_swap,
[PERF_RECORD_EVENT_UPDATE] = perf_event__event_update_swap,
[PERF_RECORD_TIME_CONV] = perf_event__all64_swap,
[PERF_RECORD_HEADER_MAX] = NULL,
};
/*
* When perf record finishes a pass on every buffers, it records this pseudo
* event.
* We record the max timestamp t found in the pass n.
* Assuming these timestamps are monotonic across cpus, we know that if
* a buffer still has events with timestamps below t, they will be all
* available and then read in the pass n + 1.
* Hence when we start to read the pass n + 2, we can safely flush every
* events with timestamps below t.
*
* ============ PASS n =================
* CPU 0 | CPU 1
* |
* cnt1 timestamps | cnt2 timestamps
* 1 | 2
* 2 | 3
* - | 4 <--- max recorded
*
* ============ PASS n + 1 ==============
* CPU 0 | CPU 1
* |
* cnt1 timestamps | cnt2 timestamps
* 3 | 5
* 4 | 6
* 5 | 7 <---- max recorded
*
* Flush every events below timestamp 4
*
* ============ PASS n + 2 ==============
* CPU 0 | CPU 1
* |
* cnt1 timestamps | cnt2 timestamps
* 6 | 8
* 7 | 9
* - | 10
*
* Flush every events below timestamp 7
* etc...
*/
static int process_finished_round(struct perf_tool *tool __maybe_unused,
union perf_event *event __maybe_unused,
struct ordered_events *oe)
{
if (dump_trace)
fprintf(stdout, "\n");
return ordered_events__flush(oe, OE_FLUSH__ROUND);
}
int perf_session__queue_event(struct perf_session *s, union perf_event *event,
struct perf_sample *sample, u64 file_offset)
{
return ordered_events__queue(&s->ordered_events, event, sample, file_offset);
}
static void callchain__lbr_callstack_printf(struct perf_sample *sample)
{
struct ip_callchain *callchain = sample->callchain;
struct branch_stack *lbr_stack = sample->branch_stack;
u64 kernel_callchain_nr = callchain->nr;
unsigned int i;
for (i = 0; i < kernel_callchain_nr; i++) {
if (callchain->ips[i] == PERF_CONTEXT_USER)
break;
}
if ((i != kernel_callchain_nr) && lbr_stack->nr) {
u64 total_nr;
/*
* LBR callstack can only get user call chain,
* i is kernel call chain number,
* 1 is PERF_CONTEXT_USER.
*
* The user call chain is stored in LBR registers.
* LBR are pair registers. The caller is stored
* in "from" register, while the callee is stored
* in "to" register.
* For example, there is a call stack
* "A"->"B"->"C"->"D".
* The LBR registers will recorde like
* "C"->"D", "B"->"C", "A"->"B".
* So only the first "to" register and all "from"
* registers are needed to construct the whole stack.
*/
total_nr = i + 1 + lbr_stack->nr + 1;
kernel_callchain_nr = i + 1;
printf("... LBR call chain: nr:%" PRIu64 "\n", total_nr);
for (i = 0; i < kernel_callchain_nr; i++)
printf("..... %2d: %016" PRIx64 "\n",
i, callchain->ips[i]);
printf("..... %2d: %016" PRIx64 "\n",
(int)(kernel_callchain_nr), lbr_stack->entries[0].to);
for (i = 0; i < lbr_stack->nr; i++)
printf("..... %2d: %016" PRIx64 "\n",
(int)(i + kernel_callchain_nr + 1), lbr_stack->entries[i].from);
}
}
static void callchain__printf(struct perf_evsel *evsel,
struct perf_sample *sample)
{
unsigned int i;
struct ip_callchain *callchain = sample->callchain;
if (perf_evsel__has_branch_callstack(evsel))
callchain__lbr_callstack_printf(sample);
printf("... FP chain: nr:%" PRIu64 "\n", callchain->nr);
for (i = 0; i < callchain->nr; i++)
printf("..... %2d: %016" PRIx64 "\n",
i, callchain->ips[i]);
}
static void branch_stack__printf(struct perf_sample *sample)
{
uint64_t i;
printf("... branch stack: nr:%" PRIu64 "\n", sample->branch_stack->nr);
for (i = 0; i < sample->branch_stack->nr; i++) {
struct branch_entry *e = &sample->branch_stack->entries[i];
printf("..... %2"PRIu64": %016" PRIx64 " -> %016" PRIx64 " %hu cycles %s%s%s%s %x\n",
i, e->from, e->to,
e->flags.cycles,
e->flags.mispred ? "M" : " ",
e->flags.predicted ? "P" : " ",
e->flags.abort ? "A" : " ",
e->flags.in_tx ? "T" : " ",
(unsigned)e->flags.reserved);
}
}
static void regs_dump__printf(u64 mask, u64 *regs)
{
unsigned rid, i = 0;
for_each_set_bit(rid, (unsigned long *) &mask, sizeof(mask) * 8) {
u64 val = regs[i++];
printf(".... %-5s 0x%" PRIx64 "\n",
perf_reg_name(rid), val);
}
}
static const char *regs_abi[] = {
[PERF_SAMPLE_REGS_ABI_NONE] = "none",
[PERF_SAMPLE_REGS_ABI_32] = "32-bit",
[PERF_SAMPLE_REGS_ABI_64] = "64-bit",
};
static inline const char *regs_dump_abi(struct regs_dump *d)
{
if (d->abi > PERF_SAMPLE_REGS_ABI_64)
return "unknown";
return regs_abi[d->abi];
}
static void regs__printf(const char *type, struct regs_dump *regs)
{
u64 mask = regs->mask;
printf("... %s regs: mask 0x%" PRIx64 " ABI %s\n",
type,
mask,
regs_dump_abi(regs));
regs_dump__printf(mask, regs->regs);
}
static void regs_user__printf(struct perf_sample *sample)
{
struct regs_dump *user_regs = &sample->user_regs;
if (user_regs->regs)
regs__printf("user", user_regs);
}
static void regs_intr__printf(struct perf_sample *sample)
{
struct regs_dump *intr_regs = &sample->intr_regs;
if (intr_regs->regs)
regs__printf("intr", intr_regs);
}
static void stack_user__printf(struct stack_dump *dump)
{
printf("... ustack: size %" PRIu64 ", offset 0x%x\n",
dump->size, dump->offset);
}
static void perf_evlist__print_tstamp(struct perf_evlist *evlist,
union perf_event *event,
struct perf_sample *sample)
{
u64 sample_type = __perf_evlist__combined_sample_type(evlist);
if (event->header.type != PERF_RECORD_SAMPLE &&
!perf_evlist__sample_id_all(evlist)) {
fputs("-1 -1 ", stdout);
return;
}
if ((sample_type & PERF_SAMPLE_CPU))
printf("%u ", sample->cpu);
if (sample_type & PERF_SAMPLE_TIME)
printf("%" PRIu64 " ", sample->time);
}
static void sample_read__printf(struct perf_sample *sample, u64 read_format)
{
printf("... sample_read:\n");
if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
printf("...... time enabled %016" PRIx64 "\n",
sample->read.time_enabled);
if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
printf("...... time running %016" PRIx64 "\n",
sample->read.time_running);
if (read_format & PERF_FORMAT_GROUP) {
u64 i;
printf(".... group nr %" PRIu64 "\n", sample->read.group.nr);
for (i = 0; i < sample->read.group.nr; i++) {
struct sample_read_value *value;
value = &sample->read.group.values[i];
printf("..... id %016" PRIx64
", value %016" PRIx64 "\n",
value->id, value->value);
}
} else
printf("..... id %016" PRIx64 ", value %016" PRIx64 "\n",
sample->read.one.id, sample->read.one.value);
}
static void dump_event(struct perf_evlist *evlist, union perf_event *event,
u64 file_offset, struct perf_sample *sample)
{
if (!dump_trace)
return;
printf("\n%#" PRIx64 " [%#x]: event: %d\n",
file_offset, event->header.size, event->header.type);
trace_event(event);
if (sample)
perf_evlist__print_tstamp(evlist, event, sample);
printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset,
event->header.size, perf_event__name(event->header.type));
}
static void dump_sample(struct perf_evsel *evsel, union perf_event *event,
struct perf_sample *sample)
{
u64 sample_type;
if (!dump_trace)
return;
printf("(IP, 0x%x): %d/%d: %#" PRIx64 " period: %" PRIu64 " addr: %#" PRIx64 "\n",
event->header.misc, sample->pid, sample->tid, sample->ip,
sample->period, sample->addr);
sample_type = evsel->attr.sample_type;
if (sample_type & PERF_SAMPLE_CALLCHAIN)
callchain__printf(evsel, sample);
if ((sample_type & PERF_SAMPLE_BRANCH_STACK) && !perf_evsel__has_branch_callstack(evsel))
branch_stack__printf(sample);
if (sample_type & PERF_SAMPLE_REGS_USER)
regs_user__printf(sample);
if (sample_type & PERF_SAMPLE_REGS_INTR)
regs_intr__printf(sample);
if (sample_type & PERF_SAMPLE_STACK_USER)
stack_user__printf(&sample->user_stack);
if (sample_type & PERF_SAMPLE_WEIGHT)
printf("... weight: %" PRIu64 "\n", sample->weight);
if (sample_type & PERF_SAMPLE_DATA_SRC)
printf(" . data_src: 0x%"PRIx64"\n", sample->data_src);
if (sample_type & PERF_SAMPLE_TRANSACTION)
printf("... transaction: %" PRIx64 "\n", sample->transaction);
if (sample_type & PERF_SAMPLE_READ)
sample_read__printf(sample, evsel->attr.read_format);
}
static struct machine *machines__find_for_cpumode(struct machines *machines,
union perf_event *event,
struct perf_sample *sample)
{
struct machine *machine;
if (perf_guest &&
((sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL) ||
(sample->cpumode == PERF_RECORD_MISC_GUEST_USER))) {
u32 pid;
if (event->header.type == PERF_RECORD_MMAP
|| event->header.type == PERF_RECORD_MMAP2)
pid = event->mmap.pid;
else
pid = sample->pid;
machine = machines__find(machines, pid);
if (!machine)
machine = machines__findnew(machines, DEFAULT_GUEST_KERNEL_ID);
return machine;
}
return &machines->host;
}
static int deliver_sample_value(struct perf_evlist *evlist,
struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct sample_read_value *v,
struct machine *machine)
{
struct perf_sample_id *sid = perf_evlist__id2sid(evlist, v->id);
if (sid) {
sample->id = v->id;
sample->period = v->value - sid->period;
sid->period = v->value;
}
if (!sid || sid->evsel == NULL) {
++evlist->stats.nr_unknown_id;
return 0;
}
return tool->sample(tool, event, sample, sid->evsel, machine);
}
static int deliver_sample_group(struct perf_evlist *evlist,
struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine)
{
int ret = -EINVAL;
u64 i;
for (i = 0; i < sample->read.group.nr; i++) {
ret = deliver_sample_value(evlist, tool, event, sample,
&sample->read.group.values[i],
machine);
if (ret)
break;
}
return ret;
}
static int
perf_evlist__deliver_sample(struct perf_evlist *evlist,
struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct perf_evsel *evsel,
struct machine *machine)
{
/* We know evsel != NULL. */
u64 sample_type = evsel->attr.sample_type;
u64 read_format = evsel->attr.read_format;
/* Standard sample delievery. */
if (!(sample_type & PERF_SAMPLE_READ))
return tool->sample(tool, event, sample, evsel, machine);
/* For PERF_SAMPLE_READ we have either single or group mode. */
if (read_format & PERF_FORMAT_GROUP)
return deliver_sample_group(evlist, tool, event, sample,
machine);
else
return deliver_sample_value(evlist, tool, event, sample,
&sample->read.one, machine);
}
static int machines__deliver_event(struct machines *machines,
struct perf_evlist *evlist,
union perf_event *event,
struct perf_sample *sample,
struct perf_tool *tool, u64 file_offset)
{
struct perf_evsel *evsel;
struct machine *machine;
dump_event(evlist, event, file_offset, sample);
evsel = perf_evlist__id2evsel(evlist, sample->id);
machine = machines__find_for_cpumode(machines, event, sample);
switch (event->header.type) {
case PERF_RECORD_SAMPLE:
if (evsel == NULL) {
++evlist->stats.nr_unknown_id;
return 0;
}
dump_sample(evsel, event, sample);
if (machine == NULL) {
++evlist->stats.nr_unprocessable_samples;
return 0;
}
return perf_evlist__deliver_sample(evlist, tool, event, sample, evsel, machine);
case PERF_RECORD_MMAP:
return tool->mmap(tool, event, sample, machine);
case PERF_RECORD_MMAP2:
if (event->header.misc & PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT)
++evlist->stats.nr_proc_map_timeout;
return tool->mmap2(tool, event, sample, machine);
case PERF_RECORD_COMM:
return tool->comm(tool, event, sample, machine);
case PERF_RECORD_FORK:
return tool->fork(tool, event, sample, machine);
case PERF_RECORD_EXIT:
return tool->exit(tool, event, sample, machine);
case PERF_RECORD_LOST:
if (tool->lost == perf_event__process_lost)
evlist->stats.total_lost += event->lost.lost;
return tool->lost(tool, event, sample, machine);
case PERF_RECORD_LOST_SAMPLES:
if (tool->lost_samples == perf_event__process_lost_samples)
evlist->stats.total_lost_samples += event->lost_samples.lost;
return tool->lost_samples(tool, event, sample, machine);
case PERF_RECORD_READ:
return tool->read(tool, event, sample, evsel, machine);
case PERF_RECORD_THROTTLE:
return tool->throttle(tool, event, sample, machine);
case PERF_RECORD_UNTHROTTLE:
return tool->unthrottle(tool, event, sample, machine);
case PERF_RECORD_AUX:
if (tool->aux == perf_event__process_aux &&
(event->aux.flags & PERF_AUX_FLAG_TRUNCATED))
evlist->stats.total_aux_lost += 1;
return tool->aux(tool, event, sample, machine);
case PERF_RECORD_ITRACE_START:
return tool->itrace_start(tool, event, sample, machine);
case PERF_RECORD_SWITCH:
case PERF_RECORD_SWITCH_CPU_WIDE:
return tool->context_switch(tool, event, sample, machine);
default:
++evlist->stats.nr_unknown_events;
return -1;
}
}
static int perf_session__deliver_event(struct perf_session *session,
union perf_event *event,
struct perf_sample *sample,
struct perf_tool *tool,
u64 file_offset)
{
int ret;
ret = auxtrace__process_event(session, event, sample, tool);
if (ret < 0)
return ret;
if (ret > 0)
return 0;
return machines__deliver_event(&session->machines, session->evlist,
event, sample, tool, file_offset);
}
static s64 perf_session__process_user_event(struct perf_session *session,
union perf_event *event,
u64 file_offset)
{
struct ordered_events *oe = &session->ordered_events;
struct perf_tool *tool = session->tool;
int fd = perf_data_file__fd(session->file);
int err;
dump_event(session->evlist, event, file_offset, NULL);
/* These events are processed right away */
switch (event->header.type) {
case PERF_RECORD_HEADER_ATTR:
err = tool->attr(tool, event, &session->evlist);
if (err == 0) {
perf_session__set_id_hdr_size(session);
perf_session__set_comm_exec(session);
}
return err;
case PERF_RECORD_EVENT_UPDATE:
return tool->event_update(tool, event, &session->evlist);
case PERF_RECORD_HEADER_EVENT_TYPE:
/*
* Depreceated, but we need to handle it for sake
* of old data files create in pipe mode.
*/
return 0;
case PERF_RECORD_HEADER_TRACING_DATA:
/* setup for reading amidst mmap */
lseek(fd, file_offset, SEEK_SET);
return tool->tracing_data(tool, event, session);
case PERF_RECORD_HEADER_BUILD_ID:
return tool->build_id(tool, event, session);
case PERF_RECORD_FINISHED_ROUND:
return tool->finished_round(tool, event, oe);
case PERF_RECORD_ID_INDEX:
return tool->id_index(tool, event, session);
case PERF_RECORD_AUXTRACE_INFO:
return tool->auxtrace_info(tool, event, session);
case PERF_RECORD_AUXTRACE:
/* setup for reading amidst mmap */
lseek(fd, file_offset + event->header.size, SEEK_SET);
return tool->auxtrace(tool, event, session);
case PERF_RECORD_AUXTRACE_ERROR:
perf_session__auxtrace_error_inc(session, event);
return tool->auxtrace_error(tool, event, session);
case PERF_RECORD_THREAD_MAP:
return tool->thread_map(tool, event, session);
case PERF_RECORD_CPU_MAP:
return tool->cpu_map(tool, event, session);
case PERF_RECORD_STAT_CONFIG:
return tool->stat_config(tool, event, session);
case PERF_RECORD_STAT:
return tool->stat(tool, event, session);
case PERF_RECORD_STAT_ROUND:
return tool->stat_round(tool, event, session);
case PERF_RECORD_TIME_CONV:
session->time_conv = event->time_conv;
return tool->time_conv(tool, event, session);
default:
return -EINVAL;
}
}
int perf_session__deliver_synth_event(struct perf_session *session,
union perf_event *event,
struct perf_sample *sample)
{
struct perf_evlist *evlist = session->evlist;
struct perf_tool *tool = session->tool;
events_stats__inc(&evlist->stats, event->header.type);
if (event->header.type >= PERF_RECORD_USER_TYPE_START)
return perf_session__process_user_event(session, event, 0);
return machines__deliver_event(&session->machines, evlist, event, sample, tool, 0);
}
static void event_swap(union perf_event *event, bool sample_id_all)
{
perf_event__swap_op swap;
swap = perf_event__swap_ops[event->header.type];
if (swap)
swap(event, sample_id_all);
}
int perf_session__peek_event(struct perf_session *session, off_t file_offset,
void *buf, size_t buf_sz,
union perf_event **event_ptr,
struct perf_sample *sample)
{
union perf_event *event;
size_t hdr_sz, rest;
int fd;
if (session->one_mmap && !session->header.needs_swap) {
event = file_offset - session->one_mmap_offset +
session->one_mmap_addr;
goto out_parse_sample;
}
if (perf_data_file__is_pipe(session->file))
return -1;
fd = perf_data_file__fd(session->file);
hdr_sz = sizeof(struct perf_event_header);
if (buf_sz < hdr_sz)
return -1;
if (lseek(fd, file_offset, SEEK_SET) == (off_t)-1 ||
readn(fd, buf, hdr_sz) != (ssize_t)hdr_sz)
return -1;
event = (union perf_event *)buf;
if (session->header.needs_swap)
perf_event_header__bswap(&event->header);
if (event->header.size < hdr_sz || event->header.size > buf_sz)
return -1;
rest = event->header.size - hdr_sz;
if (readn(fd, buf, rest) != (ssize_t)rest)
return -1;
if (session->header.needs_swap)
event_swap(event, perf_evlist__sample_id_all(session->evlist));
out_parse_sample:
if (sample && event->header.type < PERF_RECORD_USER_TYPE_START &&
perf_evlist__parse_sample(session->evlist, event, sample))
return -1;
*event_ptr = event;
return 0;
}
static s64 perf_session__process_event(struct perf_session *session,
union perf_event *event, u64 file_offset)
{
struct perf_evlist *evlist = session->evlist;
struct perf_tool *tool = session->tool;
struct perf_sample sample;
int ret;
if (session->header.needs_swap)
event_swap(event, perf_evlist__sample_id_all(evlist));
if (event->header.type >= PERF_RECORD_HEADER_MAX)
return -EINVAL;
events_stats__inc(&evlist->stats, event->header.type);
if (event->header.type >= PERF_RECORD_USER_TYPE_START)
return perf_session__process_user_event(session, event, file_offset);
/*
* For all kernel events we get the sample data
*/
ret = perf_evlist__parse_sample(evlist, event, &sample);
if (ret)
return ret;
if (tool->ordered_events) {
ret = perf_session__queue_event(session, event, &sample, file_offset);
if (ret != -ETIME)
return ret;
}
return perf_session__deliver_event(session, event, &sample, tool,
file_offset);
}
void perf_event_header__bswap(struct perf_event_header *hdr)
{
hdr->type = bswap_32(hdr->type);
hdr->misc = bswap_16(hdr->misc);
hdr->size = bswap_16(hdr->size);
}
struct thread *perf_session__findnew(struct perf_session *session, pid_t pid)
{
return machine__findnew_thread(&session->machines.host, -1, pid);
}
int perf_session__register_idle_thread(struct perf_session *session)
{
struct thread *thread;
int err = 0;
thread = machine__findnew_thread(&session->machines.host, 0, 0);
if (thread == NULL || thread__set_comm(thread, "swapper", 0)) {
pr_err("problem inserting idle task.\n");
err = -1;
}
/* machine__findnew_thread() got the thread, so put it */
thread__put(thread);
return err;
}
static void perf_session__warn_about_errors(const struct perf_session *session)
{
const struct events_stats *stats = &session->evlist->stats;
const struct ordered_events *oe = &session->ordered_events;
if (session->tool->lost == perf_event__process_lost &&
stats->nr_events[PERF_RECORD_LOST] != 0) {
ui__warning("Processed %d events and lost %d chunks!\n\n"
"Check IO/CPU overload!\n\n",
stats->nr_events[0],
stats->nr_events[PERF_RECORD_LOST]);
}
if (session->tool->lost_samples == perf_event__process_lost_samples) {
double drop_rate;
drop_rate = (double)stats->total_lost_samples /
(double) (stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples);
if (drop_rate > 0.05) {
ui__warning("Processed %" PRIu64 " samples and lost %3.2f%% samples!\n\n",
stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples,
drop_rate * 100.0);
}
}
if (session->tool->aux == perf_event__process_aux &&
stats->total_aux_lost != 0) {
ui__warning("AUX data lost %" PRIu64 " times out of %u!\n\n",
stats->total_aux_lost,
stats->nr_events[PERF_RECORD_AUX]);
}
if (stats->nr_unknown_events != 0) {
ui__warning("Found %u unknown events!\n\n"
"Is this an older tool processing a perf.data "
"file generated by a more recent tool?\n\n"
"If that is not the case, consider "
"reporting to linux-kernel@vger.kernel.org.\n\n",
stats->nr_unknown_events);
}
if (stats->nr_unknown_id != 0) {
ui__warning("%u samples with id not present in the header\n",
stats->nr_unknown_id);
}
if (stats->nr_invalid_chains != 0) {
ui__warning("Found invalid callchains!\n\n"
"%u out of %u events were discarded for this reason.\n\n"
"Consider reporting to linux-kernel@vger.kernel.org.\n\n",
stats->nr_invalid_chains,
stats->nr_events[PERF_RECORD_SAMPLE]);
}
if (stats->nr_unprocessable_samples != 0) {
ui__warning("%u unprocessable samples recorded.\n"
"Do you have a KVM guest running and not using 'perf kvm'?\n",
stats->nr_unprocessable_samples);
}
if (oe->nr_unordered_events != 0)
ui__warning("%u out of order events recorded.\n", oe->nr_unordered_events);
events_stats__auxtrace_error_warn(stats);
if (stats->nr_proc_map_timeout != 0) {
ui__warning("%d map information files for pre-existing threads were\n"
"not processed, if there are samples for addresses they\n"
"will not be resolved, you may find out which are these\n"
"threads by running with -v and redirecting the output\n"
"to a file.\n"
"The time limit to process proc map is too short?\n"
"Increase it by --proc-map-timeout\n",
stats->nr_proc_map_timeout);
}
}
static int perf_session__flush_thread_stack(struct thread *thread,
void *p __maybe_unused)
{
return thread_stack__flush(thread);
}
static int perf_session__flush_thread_stacks(struct perf_session *session)
{
return machines__for_each_thread(&session->machines,
perf_session__flush_thread_stack,
NULL);
}
volatile int session_done;
static int __perf_session__process_pipe_events(struct perf_session *session)
{
struct ordered_events *oe = &session->ordered_events;
struct perf_tool *tool = session->tool;
int fd = perf_data_file__fd(session->file);
union perf_event *event;
uint32_t size, cur_size = 0;
void *buf = NULL;
s64 skip = 0;
u64 head;
ssize_t err;
void *p;
perf_tool__fill_defaults(tool);
head = 0;
cur_size = sizeof(union perf_event);
buf = malloc(cur_size);
if (!buf)
return -errno;
more:
event = buf;
err = readn(fd, event, sizeof(struct perf_event_header));
if (err <= 0) {
if (err == 0)
goto done;
pr_err("failed to read event header\n");
goto out_err;
}
if (session->header.needs_swap)
perf_event_header__bswap(&event->header);
size = event->header.size;
if (size < sizeof(struct perf_event_header)) {
pr_err("bad event header size\n");
goto out_err;
}
if (size > cur_size) {
void *new = realloc(buf, size);
if (!new) {
pr_err("failed to allocate memory to read event\n");
goto out_err;
}
buf = new;
cur_size = size;
event = buf;
}
p = event;
p += sizeof(struct perf_event_header);
if (size - sizeof(struct perf_event_header)) {
err = readn(fd, p, size - sizeof(struct perf_event_header));
if (err <= 0) {
if (err == 0) {
pr_err("unexpected end of event stream\n");
goto done;
}
pr_err("failed to read event data\n");
goto out_err;
}
}
if ((skip = perf_session__process_event(session, event, head)) < 0) {
pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
head, event->header.size, event->header.type);
err = -EINVAL;
goto out_err;
}
head += size;
if (skip > 0)
head += skip;
if (!session_done())
goto more;
done:
/* do the final flush for ordered samples */
err = ordered_events__flush(oe, OE_FLUSH__FINAL);
if (err)
goto out_err;
err = auxtrace__flush_events(session, tool);
if (err)
goto out_err;
err = perf_session__flush_thread_stacks(session);
out_err:
free(buf);
perf_session__warn_about_errors(session);
ordered_events__free(&session->ordered_events);
auxtrace__free_events(session);
return err;
}
static union perf_event *
fetch_mmaped_event(struct perf_session *session,
u64 head, size_t mmap_size, char *buf)
{
union perf_event *event;
/*
* Ensure we have enough space remaining to read
* the size of the event in the headers.
*/
if (head + sizeof(event->header) > mmap_size)
return NULL;
event = (union perf_event *)(buf + head);
if (session->header.needs_swap)
perf_event_header__bswap(&event->header);
if (head + event->header.size > mmap_size) {
/* We're not fetching the event so swap back again */
if (session->header.needs_swap)
perf_event_header__bswap(&event->header);
return NULL;
}
return event;
}
/*
* On 64bit we can mmap the data file in one go. No need for tiny mmap
* slices. On 32bit we use 32MB.
*/
#if BITS_PER_LONG == 64
#define MMAP_SIZE ULLONG_MAX
#define NUM_MMAPS 1
#else
#define MMAP_SIZE (32 * 1024 * 1024ULL)
#define NUM_MMAPS 128
#endif
static int __perf_session__process_events(struct perf_session *session,
u64 data_offset, u64 data_size,
u64 file_size)
{
struct ordered_events *oe = &session->ordered_events;
struct perf_tool *tool = session->tool;
int fd = perf_data_file__fd(session->file);
u64 head, page_offset, file_offset, file_pos, size;
int err, mmap_prot, mmap_flags, map_idx = 0;
size_t mmap_size;
char *buf, *mmaps[NUM_MMAPS];
union perf_event *event;
struct ui_progress prog;
s64 skip;
perf_tool__fill_defaults(tool);
page_offset = page_size * (data_offset / page_size);
file_offset = page_offset;
head = data_offset - page_offset;
if (data_size == 0)
goto out;
if (data_offset + data_size < file_size)
file_size = data_offset + data_size;
ui_progress__init(&prog, file_size, "Processing events...");
mmap_size = MMAP_SIZE;
if (mmap_size > file_size) {
mmap_size = file_size;
session->one_mmap = true;
}
memset(mmaps, 0, sizeof(mmaps));
mmap_prot = PROT_READ;
mmap_flags = MAP_SHARED;
if (session->header.needs_swap) {
mmap_prot |= PROT_WRITE;
mmap_flags = MAP_PRIVATE;
}
remap:
buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, fd,
file_offset);
if (buf == MAP_FAILED) {
pr_err("failed to mmap file\n");
err = -errno;
goto out_err;
}
mmaps[map_idx] = buf;
map_idx = (map_idx + 1) & (ARRAY_SIZE(mmaps) - 1);
file_pos = file_offset + head;
if (session->one_mmap) {
session->one_mmap_addr = buf;
session->one_mmap_offset = file_offset;
}
more:
event = fetch_mmaped_event(session, head, mmap_size, buf);
if (!event) {
if (mmaps[map_idx]) {
munmap(mmaps[map_idx], mmap_size);
mmaps[map_idx] = NULL;
}
page_offset = page_size * (head / page_size);
file_offset += page_offset;
head -= page_offset;
goto remap;
}
size = event->header.size;
if (size < sizeof(struct perf_event_header) ||
(skip = perf_session__process_event(session, event, file_pos)) < 0) {
pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
file_offset + head, event->header.size,
event->header.type);
err = -EINVAL;
goto out_err;
}
if (skip)
size += skip;
head += size;
file_pos += size;
ui_progress__update(&prog, size);
if (session_done())
goto out;
if (file_pos < file_size)
goto more;
out:
/* do the final flush for ordered samples */
err = ordered_events__flush(oe, OE_FLUSH__FINAL);
if (err)
goto out_err;
err = auxtrace__flush_events(session, tool);
if (err)
goto out_err;
err = perf_session__flush_thread_stacks(session);
out_err:
ui_progress__finish();
perf_session__warn_about_errors(session);
/*
* We may switching perf.data output, make ordered_events
* reusable.
*/
ordered_events__reinit(&session->ordered_events);
auxtrace__free_events(session);
session->one_mmap = false;
return err;
}
int perf_session__process_events(struct perf_session *session)
{
u64 size = perf_data_file__size(session->file);
int err;
if (perf_session__register_idle_thread(session) < 0)
return -ENOMEM;
if (!perf_data_file__is_pipe(session->file))
err = __perf_session__process_events(session,
session->header.data_offset,
session->header.data_size, size);
else
err = __perf_session__process_pipe_events(session);
return err;
}
bool perf_session__has_traces(struct perf_session *session, const char *msg)
{
struct perf_evsel *evsel;
evlist__for_each(session->evlist, evsel) {
if (evsel->attr.type == PERF_TYPE_TRACEPOINT)
return true;
}
pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
return false;
}
int maps__set_kallsyms_ref_reloc_sym(struct map **maps,
const char *symbol_name, u64 addr)
{
char *bracket;
enum map_type i;
struct ref_reloc_sym *ref;
ref = zalloc(sizeof(struct ref_reloc_sym));
if (ref == NULL)
return -ENOMEM;
ref->name = strdup(symbol_name);
if (ref->name == NULL) {
free(ref);
return -ENOMEM;
}
bracket = strchr(ref->name, ']');
if (bracket)
*bracket = '\0';
ref->addr = addr;
for (i = 0; i < MAP__NR_TYPES; ++i) {
struct kmap *kmap = map__kmap(maps[i]);
if (!kmap)
continue;
kmap->ref_reloc_sym = ref;
}
return 0;
}
size_t perf_session__fprintf_dsos(struct perf_session *session, FILE *fp)
{
return machines__fprintf_dsos(&session->machines, fp);
}
size_t perf_session__fprintf_dsos_buildid(struct perf_session *session, FILE *fp,
bool (skip)(struct dso *dso, int parm), int parm)
{
return machines__fprintf_dsos_buildid(&session->machines, fp, skip, parm);
}
size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp)
{
size_t ret;
const char *msg = "";
if (perf_header__has_feat(&session->header, HEADER_AUXTRACE))
msg = " (excludes AUX area (e.g. instruction trace) decoded / synthesized events)";
ret = fprintf(fp, "\nAggregated stats:%s\n", msg);
ret += events_stats__fprintf(&session->evlist->stats, fp);
return ret;
}
size_t perf_session__fprintf(struct perf_session *session, FILE *fp)
{
/*
* FIXME: Here we have to actually print all the machines in this
* session, not just the host...
*/
return machine__fprintf(&session->machines.host, fp);
}
struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session,
unsigned int type)
{
struct perf_evsel *pos;
evlist__for_each(session->evlist, pos) {
if (pos->attr.type == type)
return pos;
}
return NULL;
}
int perf_session__cpu_bitmap(struct perf_session *session,
const char *cpu_list, unsigned long *cpu_bitmap)
{
int i, err = -1;
struct cpu_map *map;
for (i = 0; i < PERF_TYPE_MAX; ++i) {
struct perf_evsel *evsel;
evsel = perf_session__find_first_evtype(session, i);
if (!evsel)
continue;
if (!(evsel->attr.sample_type & PERF_SAMPLE_CPU)) {
pr_err("File does not contain CPU events. "
"Remove -c option to proceed.\n");
return -1;
}
}
map = cpu_map__new(cpu_list);
if (map == NULL) {
pr_err("Invalid cpu_list\n");
return -1;
}
for (i = 0; i < map->nr; i++) {
int cpu = map->map[i];
if (cpu >= MAX_NR_CPUS) {
pr_err("Requested CPU %d too large. "
"Consider raising MAX_NR_CPUS\n", cpu);
goto out_delete_map;
}
set_bit(cpu, cpu_bitmap);
}
err = 0;
out_delete_map:
cpu_map__put(map);
return err;
}
void perf_session__fprintf_info(struct perf_session *session, FILE *fp,
bool full)
{
struct stat st;
int fd, ret;
if (session == NULL || fp == NULL)
return;
fd = perf_data_file__fd(session->file);
ret = fstat(fd, &st);
if (ret == -1)
return;
fprintf(fp, "# ========\n");
fprintf(fp, "# captured on: %s", ctime(&st.st_ctime));
perf_header__fprintf_info(session, fp, full);
fprintf(fp, "# ========\n#\n");
}
int __perf_session__set_tracepoints_handlers(struct perf_session *session,
const struct perf_evsel_str_handler *assocs,
size_t nr_assocs)
{
struct perf_evsel *evsel;
size_t i;
int err;
for (i = 0; i < nr_assocs; i++) {
/*
* Adding a handler for an event not in the session,
* just ignore it.
*/
evsel = perf_evlist__find_tracepoint_by_name(session->evlist, assocs[i].name);
if (evsel == NULL)
continue;
err = -EEXIST;
if (evsel->handler != NULL)
goto out;
evsel->handler = assocs[i].handler;
}
err = 0;
out:
return err;
}
int perf_event__process_id_index(struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct perf_session *session)
{
struct perf_evlist *evlist = session->evlist;
struct id_index_event *ie = &event->id_index;
size_t i, nr, max_nr;
max_nr = (ie->header.size - sizeof(struct id_index_event)) /
sizeof(struct id_index_entry);
nr = ie->nr;
if (nr > max_nr)
return -EINVAL;
if (dump_trace)
fprintf(stdout, " nr: %zu\n", nr);
for (i = 0; i < nr; i++) {
struct id_index_entry *e = &ie->entries[i];
struct perf_sample_id *sid;
if (dump_trace) {
fprintf(stdout, " ... id: %"PRIu64, e->id);
fprintf(stdout, " idx: %"PRIu64, e->idx);
fprintf(stdout, " cpu: %"PRId64, e->cpu);
fprintf(stdout, " tid: %"PRId64"\n", e->tid);
}
sid = perf_evlist__id2sid(evlist, e->id);
if (!sid)
return -ENOENT;
sid->idx = e->idx;
sid->cpu = e->cpu;
sid->tid = e->tid;
}
return 0;
}
int perf_event__synthesize_id_index(struct perf_tool *tool,
perf_event__handler_t process,
struct perf_evlist *evlist,
struct machine *machine)
{
union perf_event *ev;
struct perf_evsel *evsel;
size_t nr = 0, i = 0, sz, max_nr, n;
int err;
pr_debug2("Synthesizing id index\n");
max_nr = (UINT16_MAX - sizeof(struct id_index_event)) /
sizeof(struct id_index_entry);
evlist__for_each(evlist, evsel)
nr += evsel->ids;
n = nr > max_nr ? max_nr : nr;
sz = sizeof(struct id_index_event) + n * sizeof(struct id_index_entry);
ev = zalloc(sz);
if (!ev)
return -ENOMEM;
ev->id_index.header.type = PERF_RECORD_ID_INDEX;
ev->id_index.header.size = sz;
ev->id_index.nr = n;
evlist__for_each(evlist, evsel) {
u32 j;
for (j = 0; j < evsel->ids; j++) {
struct id_index_entry *e;
struct perf_sample_id *sid;
if (i >= n) {
err = process(tool, ev, NULL, machine);
if (err)
goto out_err;
nr -= n;
i = 0;
}
e = &ev->id_index.entries[i++];
e->id = evsel->id[j];
sid = perf_evlist__id2sid(evlist, e->id);
if (!sid) {
free(ev);
return -ENOENT;
}
e->idx = sid->idx;
e->cpu = sid->cpu;
e->tid = sid->tid;
}
}
sz = sizeof(struct id_index_event) + nr * sizeof(struct id_index_entry);
ev->id_index.header.size = sz;
ev->id_index.nr = nr;
err = process(tool, ev, NULL, machine);
out_err:
free(ev);
return err;
}
| gpl-2.0 |
hallabro/hallabro-trinity | src/server/game/Handlers/AddonHandler.cpp | 44 | 5618 | /*
* Copyright (C) 2008-2012 TrinityCore <http://www.trinitycore.org/>
* Copyright (C) 2005-2009 MaNGOS <http://getmangos.com/>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along
* with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "zlib.h"
#include "AddonHandler.h"
#include "DatabaseEnv.h"
#include "Opcodes.h"
#include "Log.h"
AddonHandler::AddonHandler()
{
}
AddonHandler::~AddonHandler()
{
}
bool AddonHandler::BuildAddonPacket(WorldPacket* Source, WorldPacket* Target)
{
ByteBuffer AddOnPacked;
uLongf AddonRealSize;
uint32 CurrentPosition;
uint32 TempValue;
unsigned char tdata[256] =
{
0xC3, 0x5B, 0x50, 0x84, 0xB9, 0x3E, 0x32, 0x42, 0x8C, 0xD0, 0xC7, 0x48, 0xFA, 0x0E, 0x5D, 0x54,
0x5A, 0xA3, 0x0E, 0x14, 0xBA, 0x9E, 0x0D, 0xB9, 0x5D, 0x8B, 0xEE, 0xB6, 0x84, 0x93, 0x45, 0x75,
0xFF, 0x31, 0xFE, 0x2F, 0x64, 0x3F, 0x3D, 0x6D, 0x07, 0xD9, 0x44, 0x9B, 0x40, 0x85, 0x59, 0x34,
0x4E, 0x10, 0xE1, 0xE7, 0x43, 0x69, 0xEF, 0x7C, 0x16, 0xFC, 0xB4, 0xED, 0x1B, 0x95, 0x28, 0xA8,
0x23, 0x76, 0x51, 0x31, 0x57, 0x30, 0x2B, 0x79, 0x08, 0x50, 0x10, 0x1C, 0x4A, 0x1A, 0x2C, 0xC8,
0x8B, 0x8F, 0x05, 0x2D, 0x22, 0x3D, 0xDB, 0x5A, 0x24, 0x7A, 0x0F, 0x13, 0x50, 0x37, 0x8F, 0x5A,
0xCC, 0x9E, 0x04, 0x44, 0x0E, 0x87, 0x01, 0xD4, 0xA3, 0x15, 0x94, 0x16, 0x34, 0xC6, 0xC2, 0xC3,
0xFB, 0x49, 0xFE, 0xE1, 0xF9, 0xDA, 0x8C, 0x50, 0x3C, 0xBE, 0x2C, 0xBB, 0x57, 0xED, 0x46, 0xB9,
0xAD, 0x8B, 0xC6, 0xDF, 0x0E, 0xD6, 0x0F, 0xBE, 0x80, 0xB3, 0x8B, 0x1E, 0x77, 0xCF, 0xAD, 0x22,
0xCF, 0xB7, 0x4B, 0xCF, 0xFB, 0xF0, 0x6B, 0x11, 0x45, 0x2D, 0x7A, 0x81, 0x18, 0xF2, 0x92, 0x7E,
0x98, 0x56, 0x5D, 0x5E, 0x69, 0x72, 0x0A, 0x0D, 0x03, 0x0A, 0x85, 0xA2, 0x85, 0x9C, 0xCB, 0xFB,
0x56, 0x6E, 0x8F, 0x44, 0xBB, 0x8F, 0x02, 0x22, 0x68, 0x63, 0x97, 0xBC, 0x85, 0xBA, 0xA8, 0xF7,
0xB5, 0x40, 0x68, 0x3C, 0x77, 0x86, 0x6F, 0x4B, 0xD7, 0x88, 0xCA, 0x8A, 0xD7, 0xCE, 0x36, 0xF0,
0x45, 0x6E, 0xD5, 0x64, 0x79, 0x0F, 0x17, 0xFC, 0x64, 0xDD, 0x10, 0x6F, 0xF3, 0xF5, 0xE0, 0xA6,
0xC3, 0xFB, 0x1B, 0x8C, 0x29, 0xEF, 0x8E, 0xE5, 0x34, 0xCB, 0xD1, 0x2A, 0xCE, 0x79, 0xC3, 0x9A,
0x0D, 0x36, 0xEA, 0x01, 0xE0, 0xAA, 0x91, 0x20, 0x54, 0xF0, 0x72, 0xD8, 0x1E, 0xC7, 0x89, 0xD2
};
// broken addon packet, can't be received from real client
if (Source->rpos() + 4 > Source->size())
return false;
*Source >> TempValue; // get real size of the packed structure
// empty addon packet, nothing process, can't be received from real client
if (!TempValue)
return false;
AddonRealSize = TempValue; // temp value because ZLIB only excepts uLongf
CurrentPosition = Source->rpos(); // get the position of the pointer in the structure
AddOnPacked.resize(AddonRealSize); // resize target for zlib action
if (!uncompress(const_cast<uint8*>(AddOnPacked.contents()), &AddonRealSize, const_cast<uint8*>((*Source).contents() + CurrentPosition), (*Source).size() - CurrentPosition)!= Z_OK)
{
Target->Initialize(SMSG_ADDON_INFO);
uint32 addonsCount;
AddOnPacked >> addonsCount; // addons count?
for (uint32 i = 0; i < addonsCount; ++i)
{
std::string addonName;
uint8 enabled;
uint32 crc, unk2;
// check next addon data format correctness
if (AddOnPacked.rpos()+1 > AddOnPacked.size())
return false;
AddOnPacked >> addonName;
// recheck next addon data format correctness
if (AddOnPacked.rpos()+1+4+4 > AddOnPacked.size())
return false;
AddOnPacked >> enabled >> crc >> unk2;
sLog->outDebug(LOG_FILTER_NETWORKIO, "ADDON: Name: %s, Enabled: 0x%x, CRC: 0x%x, Unknown2: 0x%x", addonName.c_str(), enabled, crc, unk2);
uint8 state = (enabled ? 2 : 1);
*Target << uint8(state);
uint8 unk1 = (enabled ? 1 : 0);
*Target << uint8(unk1);
if (unk1)
{
uint8 unk = (crc != 0x4c1c776d); // If addon is Standard addon CRC
*Target << uint8(unk);
if (unk)
Target->append(tdata, sizeof(tdata));
*Target << uint32(0);
}
uint8 unk3 = (enabled ? 0 : 1);
*Target << uint8(unk3);
if (unk3)
{
// String, 256 (null terminated?)
*Target << uint8(0);
}
}
uint32 unk4;
AddOnPacked >> unk4;
uint32 count = 0;
*Target << uint32(count);
if (AddOnPacked.rpos() != AddOnPacked.size())
sLog->outDebug(LOG_FILTER_NETWORKIO, "packet under read!");
}
else
{
sLog->outError("Addon packet uncompress error :(");
return false;
}
return true;
}
| gpl-2.0 |
EmcraftSystems/linux-emcraft | drivers/net/wireless/rtl8189es/core/rtw_recv.c | 44 | 115890 | /******************************************************************************
*
* Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
*
*
******************************************************************************/
#define _RTW_RECV_C_
#include <drv_conf.h>
#include <osdep_service.h>
#include <drv_types.h>
#include <recv_osdep.h>
#include <mlme_osdep.h>
#include <ip.h>
#include <if_ether.h>
#include <ethernet.h>
#ifdef CONFIG_USB_HCI
#include <usb_ops.h>
#endif
#if defined (PLATFORM_LINUX) && defined (PLATFORM_WINDOWS)
#error "Shall be Linux or Windows, but not both!\n"
#endif
#include <wifi.h>
#include <circ_buf.h>
#ifdef CONFIG_NEW_SIGNAL_STAT_PROCESS
void rtw_signal_stat_timer_hdl(RTW_TIMER_HDL_ARGS);
#endif //CONFIG_NEW_SIGNAL_STAT_PROCESS
void _rtw_init_sta_recv_priv(struct sta_recv_priv *psta_recvpriv)
{
_func_enter_;
_rtw_memset((u8 *)psta_recvpriv, 0, sizeof (struct sta_recv_priv));
_rtw_spinlock_init(&psta_recvpriv->lock);
//for(i=0; i<MAX_RX_NUMBLKS; i++)
// _rtw_init_queue(&psta_recvpriv->blk_strms[i]);
_rtw_init_queue(&psta_recvpriv->defrag_q);
_func_exit_;
}
sint _rtw_init_recv_priv(struct recv_priv *precvpriv, _adapter *padapter)
{
sint i;
union recv_frame *precvframe;
sint res=_SUCCESS;
_func_enter_;
// We don't need to memset padapter->XXX to zero, because adapter is allocated by rtw_zvmalloc().
//_rtw_memset((unsigned char *)precvpriv, 0, sizeof (struct recv_priv));
_rtw_spinlock_init(&precvpriv->lock);
_rtw_init_queue(&precvpriv->free_recv_queue);
_rtw_init_queue(&precvpriv->recv_pending_queue);
_rtw_init_queue(&precvpriv->uc_swdec_pending_queue);
precvpriv->adapter = padapter;
precvpriv->free_recvframe_cnt = NR_RECVFRAME;
rtw_os_recv_resource_init(precvpriv, padapter);
precvpriv->pallocated_frame_buf = rtw_zvmalloc(NR_RECVFRAME * sizeof(union recv_frame) + RXFRAME_ALIGN_SZ);
if(precvpriv->pallocated_frame_buf==NULL){
res= _FAIL;
goto exit;
}
//_rtw_memset(precvpriv->pallocated_frame_buf, 0, NR_RECVFRAME * sizeof(union recv_frame) + RXFRAME_ALIGN_SZ);
precvpriv->precv_frame_buf = (u8 *)N_BYTE_ALIGMENT((SIZE_PTR)(precvpriv->pallocated_frame_buf), RXFRAME_ALIGN_SZ);
//precvpriv->precv_frame_buf = precvpriv->pallocated_frame_buf + RXFRAME_ALIGN_SZ -
// ((SIZE_PTR) (precvpriv->pallocated_frame_buf) &(RXFRAME_ALIGN_SZ-1));
precvframe = (union recv_frame*) precvpriv->precv_frame_buf;
for(i=0; i < NR_RECVFRAME ; i++)
{
_rtw_init_listhead(&(precvframe->u.list));
rtw_list_insert_tail(&(precvframe->u.list), &(precvpriv->free_recv_queue.queue));
res = rtw_os_recv_resource_alloc(padapter, precvframe);
precvframe->u.hdr.len = 0;
precvframe->u.hdr.adapter =padapter;
precvframe++;
}
#ifdef CONFIG_USB_HCI
precvpriv->rx_pending_cnt=1;
_rtw_init_sema(&precvpriv->allrxreturnevt, 0);
#endif
res = rtw_hal_init_recv_priv(padapter);
#ifdef CONFIG_NEW_SIGNAL_STAT_PROCESS
#ifdef PLATFORM_LINUX
_init_timer(&precvpriv->signal_stat_timer, padapter->pnetdev, RTW_TIMER_HDL_NAME(signal_stat), padapter);
#elif defined(PLATFORM_OS_CE) || defined(PLATFORM_WINDOWS)
_init_timer(&precvpriv->signal_stat_timer, padapter->hndis_adapter, RTW_TIMER_HDL_NAME(signal_stat), padapter);
#endif
precvpriv->signal_stat_sampling_interval = 1000; //ms
//precvpriv->signal_stat_converging_constant = 5000; //ms
rtw_set_signal_stat_timer(precvpriv);
#endif //CONFIG_NEW_SIGNAL_STAT_PROCESS
exit:
_func_exit_;
return res;
}
void rtw_mfree_recv_priv_lock(struct recv_priv *precvpriv);
void rtw_mfree_recv_priv_lock(struct recv_priv *precvpriv)
{
_rtw_spinlock_free(&precvpriv->lock);
#ifdef CONFIG_RECV_THREAD_MODE
_rtw_free_sema(&precvpriv->recv_sema);
_rtw_free_sema(&precvpriv->terminate_recvthread_sema);
#endif
_rtw_spinlock_free(&precvpriv->free_recv_queue.lock);
_rtw_spinlock_free(&precvpriv->recv_pending_queue.lock);
_rtw_spinlock_free(&precvpriv->free_recv_buf_queue.lock);
#ifdef CONFIG_USE_USB_BUFFER_ALLOC_RX
_rtw_spinlock_free(&precvpriv->recv_buf_pending_queue.lock);
#endif // CONFIG_USE_USB_BUFFER_ALLOC_RX
}
void _rtw_free_recv_priv (struct recv_priv *precvpriv)
{
_adapter *padapter = precvpriv->adapter;
_func_enter_;
rtw_free_uc_swdec_pending_queue(padapter);
rtw_mfree_recv_priv_lock(precvpriv);
rtw_os_recv_resource_free(precvpriv);
if(precvpriv->pallocated_frame_buf) {
rtw_vmfree(precvpriv->pallocated_frame_buf, NR_RECVFRAME * sizeof(union recv_frame) + RXFRAME_ALIGN_SZ);
}
rtw_hal_free_recv_priv(padapter);
_func_exit_;
}
union recv_frame *_rtw_alloc_recvframe (_queue *pfree_recv_queue)
{
union recv_frame *precvframe;
_list *plist, *phead;
_adapter *padapter;
struct recv_priv *precvpriv;
_func_enter_;
if(_rtw_queue_empty(pfree_recv_queue) == _TRUE)
{
precvframe = NULL;
}
else
{
phead = get_list_head(pfree_recv_queue);
plist = get_next(phead);
precvframe = LIST_CONTAINOR(plist, union recv_frame, u);
rtw_list_delete(&precvframe->u.hdr.list);
padapter=precvframe->u.hdr.adapter;
if(padapter !=NULL){
precvpriv=&padapter->recvpriv;
if(pfree_recv_queue == &precvpriv->free_recv_queue)
precvpriv->free_recvframe_cnt--;
}
}
_func_exit_;
return precvframe;
}
union recv_frame *rtw_alloc_recvframe (_queue *pfree_recv_queue)
{
_irqL irqL;
union recv_frame *precvframe;
_enter_critical_bh(&pfree_recv_queue->lock, &irqL);
precvframe = _rtw_alloc_recvframe(pfree_recv_queue);
_exit_critical_bh(&pfree_recv_queue->lock, &irqL);
return precvframe;
}
void rtw_init_recvframe(union recv_frame *precvframe, struct recv_priv *precvpriv)
{
/* Perry: This can be removed */
_rtw_init_listhead(&precvframe->u.hdr.list);
precvframe->u.hdr.len=0;
}
int rtw_free_recvframe(union recv_frame *precvframe, _queue *pfree_recv_queue)
{
_irqL irqL;
_adapter *padapter=precvframe->u.hdr.adapter;
struct recv_priv *precvpriv = &padapter->recvpriv;
_func_enter_;
#ifdef CONFIG_CONCURRENT_MODE
if(padapter->adapter_type > PRIMARY_ADAPTER)
{
padapter = padapter->pbuddy_adapter;//get primary_padapter
precvpriv = &padapter->recvpriv;
pfree_recv_queue = &precvpriv->free_recv_queue;
precvframe->u.hdr.adapter = padapter;
}
#endif
#ifdef PLATFORM_WINDOWS
rtw_os_read_port(padapter, precvframe->u.hdr.precvbuf);
#endif
#if defined(PLATFORM_LINUX) || defined(PLATFORM_FREEBSD)
if(precvframe->u.hdr.pkt)
{
#ifdef CONFIG_BSD_RX_USE_MBUF
m_freem(precvframe->u.hdr.pkt);
#else // CONFIG_BSD_RX_USE_MBUF
dev_kfree_skb_any(precvframe->u.hdr.pkt);//free skb by driver
#endif // CONFIG_BSD_RX_USE_MBUF
precvframe->u.hdr.pkt = NULL;
}
#endif //defined(PLATFORM_LINUX) || defined(PLATFORM_FREEBSD)
_enter_critical_bh(&pfree_recv_queue->lock, &irqL);
rtw_list_delete(&(precvframe->u.hdr.list));
precvframe->u.hdr.len = 0;
rtw_list_insert_tail(&(precvframe->u.hdr.list), get_list_head(pfree_recv_queue));
if(padapter !=NULL){
if(pfree_recv_queue == &precvpriv->free_recv_queue)
precvpriv->free_recvframe_cnt++;
}
_exit_critical_bh(&pfree_recv_queue->lock, &irqL);
_func_exit_;
return _SUCCESS;
}
sint _rtw_enqueue_recvframe(union recv_frame *precvframe, _queue *queue)
{
_adapter *padapter=precvframe->u.hdr.adapter;
struct recv_priv *precvpriv = &padapter->recvpriv;
_func_enter_;
//_rtw_init_listhead(&(precvframe->u.hdr.list));
rtw_list_delete(&(precvframe->u.hdr.list));
rtw_list_insert_tail(&(precvframe->u.hdr.list), get_list_head(queue));
if (padapter != NULL) {
if (queue == &precvpriv->free_recv_queue)
precvpriv->free_recvframe_cnt++;
}
_func_exit_;
return _SUCCESS;
}
sint rtw_enqueue_recvframe(union recv_frame *precvframe, _queue *queue)
{
sint ret;
_irqL irqL;
//_spinlock(&pfree_recv_queue->lock);
_enter_critical_bh(&queue->lock, &irqL);
ret = _rtw_enqueue_recvframe(precvframe, queue);
//_rtw_spinunlock(&pfree_recv_queue->lock);
_exit_critical_bh(&queue->lock, &irqL);
return ret;
}
/*
sint rtw_enqueue_recvframe(union recv_frame *precvframe, _queue *queue)
{
return rtw_free_recvframe(precvframe, queue);
}
*/
/*
caller : defrag ; recvframe_chk_defrag in recv_thread (passive)
pframequeue: defrag_queue : will be accessed in recv_thread (passive)
using spinlock to protect
*/
void rtw_free_recvframe_queue(_queue *pframequeue, _queue *pfree_recv_queue)
{
union recv_frame *precvframe;
_list *plist, *phead;
_func_enter_;
_rtw_spinlock(&pframequeue->lock);
phead = get_list_head(pframequeue);
plist = get_next(phead);
while(rtw_end_of_queue_search(phead, plist) == _FALSE)
{
precvframe = LIST_CONTAINOR(plist, union recv_frame, u);
plist = get_next(plist);
//rtw_list_delete(&precvframe->u.hdr.list); // will do this in rtw_free_recvframe()
rtw_free_recvframe(precvframe, pfree_recv_queue);
}
_rtw_spinunlock(&pframequeue->lock);
_func_exit_;
}
u32 rtw_free_uc_swdec_pending_queue(_adapter *adapter)
{
u32 cnt = 0;
union recv_frame *pending_frame;
while((pending_frame=rtw_alloc_recvframe(&adapter->recvpriv.uc_swdec_pending_queue))) {
rtw_free_recvframe(pending_frame, &adapter->recvpriv.free_recv_queue);
DBG_871X("%s: dequeue uc_swdec_pending_queue\n", __func__);
cnt++;
}
return cnt;
}
sint rtw_enqueue_recvbuf_to_head(struct recv_buf *precvbuf, _queue *queue)
{
_irqL irqL;
_enter_critical_bh(&queue->lock, &irqL);
rtw_list_delete(&precvbuf->list);
rtw_list_insert_head(&precvbuf->list, get_list_head(queue));
_exit_critical_bh(&queue->lock, &irqL);
return _SUCCESS;
}
sint rtw_enqueue_recvbuf(struct recv_buf *precvbuf, _queue *queue)
{
_irqL irqL;
#ifdef CONFIG_SDIO_HCI
_enter_critical_bh(&queue->lock, &irqL);
#else
_enter_critical_ex(&queue->lock, &irqL);
#endif/*#ifdef CONFIG_SDIO_HCI*/
rtw_list_delete(&precvbuf->list);
rtw_list_insert_tail(&precvbuf->list, get_list_head(queue));
#ifdef CONFIG_SDIO_HCI
_exit_critical_bh(&queue->lock, &irqL);
#else
_exit_critical_ex(&queue->lock, &irqL);
#endif/*#ifdef CONFIG_SDIO_HCI*/
return _SUCCESS;
}
struct recv_buf *rtw_dequeue_recvbuf (_queue *queue)
{
_irqL irqL;
struct recv_buf *precvbuf;
_list *plist, *phead;
#ifdef CONFIG_SDIO_HCI
_enter_critical_bh(&queue->lock, &irqL);
#else
_enter_critical_ex(&queue->lock, &irqL);
#endif/*#ifdef CONFIG_SDIO_HCI*/
if(_rtw_queue_empty(queue) == _TRUE)
{
precvbuf = NULL;
}
else
{
phead = get_list_head(queue);
plist = get_next(phead);
precvbuf = LIST_CONTAINOR(plist, struct recv_buf, list);
rtw_list_delete(&precvbuf->list);
}
#ifdef CONFIG_SDIO_HCI
_exit_critical_bh(&queue->lock, &irqL);
#else
_exit_critical_ex(&queue->lock, &irqL);
#endif/*#ifdef CONFIG_SDIO_HCI*/
return precvbuf;
}
sint recvframe_chkmic(_adapter *adapter, union recv_frame *precvframe);
sint recvframe_chkmic(_adapter *adapter, union recv_frame *precvframe){
sint i,res=_SUCCESS;
u32 datalen;
u8 miccode[8];
u8 bmic_err=_FALSE,brpt_micerror = _TRUE;
u8 *pframe, *payload,*pframemic;
u8 *mickey;
//u8 *iv,rxdata_key_idx=0;
struct sta_info *stainfo;
struct rx_pkt_attrib *prxattrib=&precvframe->u.hdr.attrib;
struct security_priv *psecuritypriv=&adapter->securitypriv;
struct mlme_ext_priv *pmlmeext = &adapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
_func_enter_;
stainfo=rtw_get_stainfo(&adapter->stapriv ,&prxattrib->ta[0]);
if(prxattrib->encrypt ==_TKIP_)
{
RT_TRACE(_module_rtl871x_recv_c_,_drv_info_,("\n recvframe_chkmic:prxattrib->encrypt ==_TKIP_\n"));
RT_TRACE(_module_rtl871x_recv_c_,_drv_info_,("\n recvframe_chkmic:da=0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x\n",
prxattrib->ra[0],prxattrib->ra[1],prxattrib->ra[2],prxattrib->ra[3],prxattrib->ra[4],prxattrib->ra[5]));
//calculate mic code
if(stainfo!= NULL)
{
if(IS_MCAST(prxattrib->ra))
{
//mickey=&psecuritypriv->dot118021XGrprxmickey.skey[0];
//iv = precvframe->u.hdr.rx_data+prxattrib->hdrlen;
//rxdata_key_idx =( ((iv[3])>>6)&0x3) ;
mickey=&psecuritypriv->dot118021XGrprxmickey[prxattrib->key_index].skey[0];
RT_TRACE(_module_rtl871x_recv_c_,_drv_info_,("\n recvframe_chkmic: bcmc key \n"));
//DBG_871X("\n recvframe_chkmic: bcmc key psecuritypriv->dot118021XGrpKeyid(%d),pmlmeinfo->key_index(%d) ,recv key_id(%d)\n",
// psecuritypriv->dot118021XGrpKeyid,pmlmeinfo->key_index,rxdata_key_idx);
if(psecuritypriv->binstallGrpkey==_FALSE)
{
res=_FAIL;
RT_TRACE(_module_rtl871x_recv_c_,_drv_err_,("\n recvframe_chkmic:didn't install group key!!!!!!!!!!\n"));
DBG_871X("\n recvframe_chkmic:didn't install group key!!!!!!!!!!\n");
goto exit;
}
}
else{
mickey=&stainfo->dot11tkiprxmickey.skey[0];
RT_TRACE(_module_rtl871x_recv_c_,_drv_err_,("\n recvframe_chkmic: unicast key \n"));
}
datalen=precvframe->u.hdr.len-prxattrib->hdrlen-prxattrib->iv_len-prxattrib->icv_len-8;//icv_len included the mic code
pframe=precvframe->u.hdr.rx_data;
payload=pframe+prxattrib->hdrlen+prxattrib->iv_len;
RT_TRACE(_module_rtl871x_recv_c_,_drv_info_,("\n prxattrib->iv_len=%d prxattrib->icv_len=%d\n",prxattrib->iv_len,prxattrib->icv_len));
//rtw_seccalctkipmic(&stainfo->dot11tkiprxmickey.skey[0],pframe,payload, datalen ,&miccode[0],(unsigned char)prxattrib->priority); //care the length of the data
rtw_seccalctkipmic(mickey,pframe,payload, datalen ,&miccode[0],(unsigned char)prxattrib->priority); //care the length of the data
pframemic=payload+datalen;
bmic_err=_FALSE;
for(i=0;i<8;i++){
if(miccode[i] != *(pframemic+i)){
RT_TRACE(_module_rtl871x_recv_c_,_drv_err_,("recvframe_chkmic:miccode[%d](%02x) != *(pframemic+%d)(%02x) ",i,miccode[i],i,*(pframemic+i)));
bmic_err=_TRUE;
}
}
if(bmic_err==_TRUE){
RT_TRACE(_module_rtl871x_recv_c_,_drv_err_,("\n *(pframemic-8)-*(pframemic-1)=0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x\n",
*(pframemic-8),*(pframemic-7),*(pframemic-6),*(pframemic-5),*(pframemic-4),*(pframemic-3),*(pframemic-2),*(pframemic-1)));
RT_TRACE(_module_rtl871x_recv_c_,_drv_err_,("\n *(pframemic-16)-*(pframemic-9)=0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x\n",
*(pframemic-16),*(pframemic-15),*(pframemic-14),*(pframemic-13),*(pframemic-12),*(pframemic-11),*(pframemic-10),*(pframemic-9)));
{
uint i;
RT_TRACE(_module_rtl871x_recv_c_,_drv_err_,("\n ======demp packet (len=%d)======\n",precvframe->u.hdr.len));
for(i=0;i<precvframe->u.hdr.len;i=i+8){
RT_TRACE(_module_rtl871x_recv_c_,_drv_err_,("0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x",
*(precvframe->u.hdr.rx_data+i),*(precvframe->u.hdr.rx_data+i+1),
*(precvframe->u.hdr.rx_data+i+2),*(precvframe->u.hdr.rx_data+i+3),
*(precvframe->u.hdr.rx_data+i+4),*(precvframe->u.hdr.rx_data+i+5),
*(precvframe->u.hdr.rx_data+i+6),*(precvframe->u.hdr.rx_data+i+7)));
}
RT_TRACE(_module_rtl871x_recv_c_,_drv_err_,("\n ======demp packet end [len=%d]======\n",precvframe->u.hdr.len));
RT_TRACE(_module_rtl871x_recv_c_,_drv_err_,("\n hrdlen=%d, \n",prxattrib->hdrlen));
}
RT_TRACE(_module_rtl871x_recv_c_,_drv_err_,("ra=0x%.2x 0x%.2x 0x%.2x 0x%.2x 0x%.2x 0x%.2x psecuritypriv->binstallGrpkey=%d ",
prxattrib->ra[0],prxattrib->ra[1],prxattrib->ra[2],
prxattrib->ra[3],prxattrib->ra[4],prxattrib->ra[5],psecuritypriv->binstallGrpkey));
// double check key_index for some timing issue ,
// cannot compare with psecuritypriv->dot118021XGrpKeyid also cause timing issue
if((IS_MCAST(prxattrib->ra)==_TRUE) && (prxattrib->key_index != pmlmeinfo->key_index ))
brpt_micerror = _FALSE;
if((prxattrib->bdecrypted ==_TRUE)&& (brpt_micerror == _TRUE))
{
rtw_handle_tkip_mic_err(adapter,(u8)IS_MCAST(prxattrib->ra));
RT_TRACE(_module_rtl871x_recv_c_,_drv_err_,(" mic error :prxattrib->bdecrypted=%d ",prxattrib->bdecrypted));
DBG_871X(" mic error :prxattrib->bdecrypted=%d\n",prxattrib->bdecrypted);
}
else
{
RT_TRACE(_module_rtl871x_recv_c_,_drv_err_,(" mic error :prxattrib->bdecrypted=%d ",prxattrib->bdecrypted));
DBG_871X(" mic error :prxattrib->bdecrypted=%d\n",prxattrib->bdecrypted);
}
res=_FAIL;
}
else{
//mic checked ok
if((psecuritypriv->bcheck_grpkey ==_FALSE)&&(IS_MCAST(prxattrib->ra)==_TRUE)){
psecuritypriv->bcheck_grpkey =_TRUE;
RT_TRACE(_module_rtl871x_recv_c_,_drv_err_,("psecuritypriv->bcheck_grpkey =_TRUE"));
}
}
}
else
{
RT_TRACE(_module_rtl871x_recv_c_,_drv_err_,("recvframe_chkmic: rtw_get_stainfo==NULL!!!\n"));
}
recvframe_pull_tail(precvframe, 8);
}
exit:
_func_exit_;
return res;
}
//decrypt and set the ivlen,icvlen of the recv_frame
union recv_frame * decryptor(_adapter *padapter,union recv_frame *precv_frame);
union recv_frame * decryptor(_adapter *padapter,union recv_frame *precv_frame)
{
struct rx_pkt_attrib *prxattrib = &precv_frame->u.hdr.attrib;
struct security_priv *psecuritypriv=&padapter->securitypriv;
union recv_frame *return_packet=precv_frame;
u32 res=_SUCCESS;
_func_enter_;
RT_TRACE(_module_rtl871x_recv_c_,_drv_info_,("prxstat->decrypted=%x prxattrib->encrypt = 0x%03x\n",prxattrib->bdecrypted,prxattrib->encrypt));
if(prxattrib->encrypt>0)
{
u8 *iv = precv_frame->u.hdr.rx_data+prxattrib->hdrlen;
prxattrib->key_index = ( ((iv[3])>>6)&0x3) ;
if(prxattrib->key_index > WEP_KEYS)
{
DBG_871X("prxattrib->key_index(%d) > WEP_KEYS \n", prxattrib->key_index);
switch(prxattrib->encrypt){
case _WEP40_:
case _WEP104_:
prxattrib->key_index = psecuritypriv->dot11PrivacyKeyIndex;
break;
case _TKIP_:
case _AES_:
default:
prxattrib->key_index = psecuritypriv->dot118021XGrpKeyid;
break;
}
}
}
if((prxattrib->encrypt>0) && ((prxattrib->bdecrypted==0) ||(psecuritypriv->sw_decrypt==_TRUE)))
{
#ifdef CONFIG_CONCURRENT_MODE
if(!IS_MCAST(prxattrib->ra))//bc/mc packets use sw decryption for concurrent mode
#endif
psecuritypriv->hw_decrypted=_FALSE;
#ifdef DBG_RX_DECRYPTOR
DBG_871X("prxstat->bdecrypted:%d, prxattrib->encrypt:%d, Setting psecuritypriv->hw_decrypted = %d\n"
, prxattrib->bdecrypted ,prxattrib->encrypt, psecuritypriv->hw_decrypted);
#endif
switch(prxattrib->encrypt){
case _WEP40_:
case _WEP104_:
rtw_wep_decrypt(padapter, (u8 *)precv_frame);
break;
case _TKIP_:
res = rtw_tkip_decrypt(padapter, (u8 *)precv_frame);
break;
case _AES_:
res = rtw_aes_decrypt(padapter, (u8 * )precv_frame);
break;
#ifdef CONFIG_WAPI_SUPPORT
case _SMS4_:
rtw_sms4_decrypt(padapter, (u8 * )precv_frame);
break;
#endif
default:
break;
}
}
else if(prxattrib->bdecrypted==1
&& prxattrib->encrypt >0
&& (psecuritypriv->busetkipkey==1 || prxattrib->encrypt !=_TKIP_ )
)
{
#if 0
if((prxstat->icv==1)&&(prxattrib->encrypt!=_AES_))
{
psecuritypriv->hw_decrypted=_FALSE;
RT_TRACE(_module_rtl871x_recv_c_,_drv_err_,("psecuritypriv->hw_decrypted=_FALSE"));
rtw_free_recvframe(precv_frame, &padapter->recvpriv.free_recv_queue);
return_packet=NULL;
}
else
#endif
{
psecuritypriv->hw_decrypted=_TRUE;
#ifdef DBG_RX_DECRYPTOR
DBG_871X("prxstat->bdecrypted:%d, prxattrib->encrypt:%d, Setting psecuritypriv->hw_decrypted = %d\n"
, prxattrib->bdecrypted ,prxattrib->encrypt, psecuritypriv->hw_decrypted);
#endif
}
}
else {
#ifdef DBG_RX_DECRYPTOR
DBG_871X("prxstat->bdecrypted:%d, prxattrib->encrypt:%d, psecuritypriv->hw_decrypted:%d\n"
, prxattrib->bdecrypted ,prxattrib->encrypt, psecuritypriv->hw_decrypted);
#endif
}
if(res == _FAIL)
{
rtw_free_recvframe(return_packet,&padapter->recvpriv.free_recv_queue);
return_packet = NULL;
}
//recvframe_chkmic(adapter, precv_frame); //move to recvframme_defrag function
_func_exit_;
return return_packet;
}
//###set the security information in the recv_frame
union recv_frame * portctrl(_adapter *adapter,union recv_frame * precv_frame);
union recv_frame * portctrl(_adapter *adapter,union recv_frame * precv_frame)
{
u8 *psta_addr, *ptr;
uint auth_alg;
struct recv_frame_hdr *pfhdr;
struct sta_info *psta;
struct sta_priv *pstapriv ;
union recv_frame *prtnframe;
u16 ether_type=0;
u16 eapol_type = 0x888e;//for Funia BD's WPA issue
struct rx_pkt_attrib *pattrib;
_func_enter_;
pstapriv = &adapter->stapriv;
psta = rtw_get_stainfo(pstapriv, psta_addr);
auth_alg = adapter->securitypriv.dot11AuthAlgrthm;
ptr = get_recvframe_data(precv_frame);
pfhdr = &precv_frame->u.hdr;
pattrib = &pfhdr->attrib;
psta_addr = pattrib->ta;
prtnframe = NULL;
RT_TRACE(_module_rtl871x_recv_c_,_drv_info_,("########portctrl:adapter->securitypriv.dot11AuthAlgrthm=%d\n",adapter->securitypriv.dot11AuthAlgrthm));
if(auth_alg==2)
{
if ((psta!=NULL) && (psta->ieee8021x_blocked))
{
//blocked
//only accept EAPOL frame
RT_TRACE(_module_rtl871x_recv_c_,_drv_info_,("########portctrl:psta->ieee8021x_blocked==1\n"));
prtnframe=precv_frame;
//get ether_type
ptr=ptr+pfhdr->attrib.hdrlen+pfhdr->attrib.iv_len+LLC_HEADER_SIZE;
_rtw_memcpy(ðer_type,ptr, 2);
ether_type= ntohs((unsigned short )ether_type);
if (ether_type == eapol_type) {
prtnframe=precv_frame;
}
else {
//free this frame
rtw_free_recvframe(precv_frame, &adapter->recvpriv.free_recv_queue);
prtnframe=NULL;
}
}
else
{
//allowed
//check decryption status, and decrypt the frame if needed
RT_TRACE(_module_rtl871x_recv_c_,_drv_info_,("########portctrl:psta->ieee8021x_blocked==0\n"));
RT_TRACE(_module_rtl871x_recv_c_,_drv_info_,("portctrl:precv_frame->hdr.attrib.privacy=%x\n",precv_frame->u.hdr.attrib.privacy));
if (pattrib->bdecrypted == 0)
{
RT_TRACE(_module_rtl871x_recv_c_,_drv_info_,("portctrl:prxstat->decrypted=%x\n", pattrib->bdecrypted));
}
prtnframe=precv_frame;
//check is the EAPOL frame or not (Rekey)
if(ether_type == eapol_type){
RT_TRACE(_module_rtl871x_recv_c_,_drv_notice_,("########portctrl:ether_type == 0x888e\n"));
//check Rekey
prtnframe=precv_frame;
}
else{
RT_TRACE(_module_rtl871x_recv_c_,_drv_info_,("########portctrl:ether_type=0x%04x\n", ether_type));
}
}
}
else
{
prtnframe=precv_frame;
}
_func_exit_;
return prtnframe;
}
sint recv_decache(union recv_frame *precv_frame, u8 bretry, struct stainfo_rxcache *prxcache);
sint recv_decache(union recv_frame *precv_frame, u8 bretry, struct stainfo_rxcache *prxcache)
{
sint tid = precv_frame->u.hdr.attrib.priority;
u16 seq_ctrl = ( (precv_frame->u.hdr.attrib.seq_num&0xffff) << 4) |
(precv_frame->u.hdr.attrib.frag_num & 0xf);
_func_enter_;
if(tid>15)
{
RT_TRACE(_module_rtl871x_recv_c_, _drv_notice_, ("recv_decache, (tid>15)! seq_ctrl=0x%x, tid=0x%x\n", seq_ctrl, tid));
return _FAIL;
}
if(1)//if(bretry)
{
if(seq_ctrl == prxcache->tid_rxseq[tid])
{
RT_TRACE(_module_rtl871x_recv_c_, _drv_notice_, ("recv_decache, seq_ctrl=0x%x, tid=0x%x, tid_rxseq=0x%x\n", seq_ctrl, tid, prxcache->tid_rxseq[tid]));
return _FAIL;
}
}
prxcache->tid_rxseq[tid] = seq_ctrl;
_func_exit_;
return _SUCCESS;
}
void process_pwrbit_data(_adapter *padapter, union recv_frame *precv_frame);
void process_pwrbit_data(_adapter *padapter, union recv_frame *precv_frame)
{
#ifdef CONFIG_AP_MODE
unsigned char pwrbit;
u8 *ptr = precv_frame->u.hdr.rx_data;
struct rx_pkt_attrib *pattrib = &precv_frame->u.hdr.attrib;
struct sta_priv *pstapriv = &padapter->stapriv;
struct sta_info *psta=NULL;
psta = rtw_get_stainfo(pstapriv, pattrib->src);
pwrbit = GetPwrMgt(ptr);
if(psta)
{
if(pwrbit)
{
if(!(psta->state & WIFI_SLEEP_STATE))
{
//psta->state |= WIFI_SLEEP_STATE;
//pstapriv->sta_dz_bitmap |= BIT(psta->aid);
stop_sta_xmit(padapter, psta);
//DBG_871X("to sleep, sta_dz_bitmap=%x\n", pstapriv->sta_dz_bitmap);
}
}
else
{
if(psta->state & WIFI_SLEEP_STATE)
{
//psta->state ^= WIFI_SLEEP_STATE;
//pstapriv->sta_dz_bitmap &= ~BIT(psta->aid);
wakeup_sta_to_xmit(padapter, psta);
//DBG_871X("to wakeup, sta_dz_bitmap=%x\n", pstapriv->sta_dz_bitmap);
}
}
}
#endif
}
void process_wmmps_data(_adapter *padapter, union recv_frame *precv_frame);
void process_wmmps_data(_adapter *padapter, union recv_frame *precv_frame)
{
#ifdef CONFIG_AP_MODE
struct rx_pkt_attrib *pattrib = &precv_frame->u.hdr.attrib;
struct sta_priv *pstapriv = &padapter->stapriv;
struct sta_info *psta=NULL;
psta = rtw_get_stainfo(pstapriv, pattrib->src);
if(!psta) return;
#ifdef CONFIG_TDLS
if( !(psta->tdls_sta_state & TDLS_LINKED_STATE ) )
{
#endif //CONFIG_TDLS
if(!psta->qos_option)
return;
if(!(psta->qos_info&0xf))
return;
#ifdef CONFIG_TDLS
}
#endif //CONFIG_TDLS
if(psta->state&WIFI_SLEEP_STATE)
{
u8 wmmps_ac=0;
switch(pattrib->priority)
{
case 1:
case 2:
wmmps_ac = psta->uapsd_bk&BIT(1);
break;
case 4:
case 5:
wmmps_ac = psta->uapsd_vi&BIT(1);
break;
case 6:
case 7:
wmmps_ac = psta->uapsd_vo&BIT(1);
break;
case 0:
case 3:
default:
wmmps_ac = psta->uapsd_be&BIT(1);
break;
}
if(wmmps_ac)
{
if(psta->sleepq_ac_len>0)
{
//process received triggered frame
xmit_delivery_enabled_frames(padapter, psta);
}
else
{
//issue one qos null frame with More data bit = 0 and the EOSP bit set (=1)
issue_qos_nulldata(padapter, psta->hwaddr, (u16)pattrib->priority, 0, 0);
}
}
}
#endif
}
#ifdef CONFIG_TDLS
sint OnTDLS(_adapter *adapter, union recv_frame *precv_frame)
{
struct rx_pkt_attrib *pattrib = & precv_frame->u.hdr.attrib;
sint ret = _SUCCESS;
u8 *paction = get_recvframe_data(precv_frame);
u8 category_field = 1;
#ifdef CONFIG_WFD
u8 WFA_OUI[3] = { 0x50, 0x6f, 0x9a };
#endif //CONFIG_WFD
struct tdls_info *ptdlsinfo = &(adapter->tdlsinfo);
//point to action field
paction+=pattrib->hdrlen
+ pattrib->iv_len
+ SNAP_SIZE
+ ETH_TYPE_LEN
+ PAYLOAD_TYPE_LEN
+ category_field;
if(ptdlsinfo->enable == 0)
{
DBG_871X("recv tdls frame, "
"but tdls haven't enabled\n");
ret = _FAIL;
return ret;
}
switch(*paction){
case TDLS_SETUP_REQUEST:
DBG_871X("recv tdls setup request frame\n");
ret=On_TDLS_Setup_Req(adapter, precv_frame);
break;
case TDLS_SETUP_RESPONSE:
DBG_871X("recv tdls setup response frame\n");
ret=On_TDLS_Setup_Rsp(adapter, precv_frame);
break;
case TDLS_SETUP_CONFIRM:
DBG_871X("recv tdls setup confirm frame\n");
ret=On_TDLS_Setup_Cfm(adapter, precv_frame);
break;
case TDLS_TEARDOWN:
DBG_871X("recv tdls teardown, free sta_info\n");
ret=On_TDLS_Teardown(adapter, precv_frame);
break;
case TDLS_DISCOVERY_REQUEST:
DBG_871X("recv tdls discovery request frame\n");
ret=On_TDLS_Dis_Req(adapter, precv_frame);
break;
case TDLS_PEER_TRAFFIC_RESPONSE:
DBG_871X("recv tdls peer traffic response frame\n");
ret=On_TDLS_Peer_Traffic_Rsp(adapter, precv_frame);
break;
case TDLS_CHANNEL_SWITCH_REQUEST:
DBG_871X("recv tdls channel switch request frame\n");
ret=On_TDLS_Ch_Switch_Req(adapter, precv_frame);
break;
case TDLS_CHANNEL_SWITCH_RESPONSE:
DBG_871X("recv tdls channel switch response frame\n");
ret=On_TDLS_Ch_Switch_Rsp(adapter, precv_frame);
break;
#ifdef CONFIG_WFD
case 0x50: //First byte of WFA OUI
if( _rtw_memcmp(WFA_OUI, (paction), 3) )
{
if( *(paction + 3) == 0x04) //Probe request frame
{
//WFDTDLS: for sigma test, do not setup direct link automatically
ptdlsinfo->dev_discovered = 1;
DBG_871X("recv tunneled probe request frame\n");
issue_tunneled_probe_rsp(adapter, precv_frame);
}
if( *(paction + 3) == 0x05) //Probe response frame
{
//WFDTDLS: for sigma test, do not setup direct link automatically
ptdlsinfo->dev_discovered = 1;
DBG_871X("recv tunneled probe response frame\n");
}
}
break;
#endif //CONFIG_WFD
default:
DBG_871X("receive TDLS frame but not supported\n");
ret=_FAIL;
break;
}
exit:
return ret;
}
#endif
void count_rx_stats(_adapter *padapter, union recv_frame *prframe, struct sta_info*sta);
void count_rx_stats(_adapter *padapter, union recv_frame *prframe, struct sta_info*sta)
{
int sz;
struct sta_info *psta = NULL;
struct stainfo_stats *pstats = NULL;
struct rx_pkt_attrib *pattrib = & prframe->u.hdr.attrib;
struct recv_priv *precvpriv = &padapter->recvpriv;
sz = get_recvframe_len(prframe);
precvpriv->rx_bytes += sz;
padapter->mlmepriv.LinkDetectInfo.NumRxOkInPeriod++;
if( (!MacAddr_isBcst(pattrib->dst)) && (!IS_MCAST(pattrib->dst))){
padapter->mlmepriv.LinkDetectInfo.NumRxUnicastOkInPeriod++;
}
if(sta)
psta = sta;
else
psta = prframe->u.hdr.psta;
if(psta)
{
pstats = &psta->sta_stats;
pstats->rx_data_pkts++;
pstats->rx_bytes += sz;
}
}
sint sta2sta_data_frame(
_adapter *adapter,
union recv_frame *precv_frame,
struct sta_info**psta
);
sint sta2sta_data_frame(
_adapter *adapter,
union recv_frame *precv_frame,
struct sta_info**psta
)
{
u8 *ptr = precv_frame->u.hdr.rx_data;
sint ret = _SUCCESS;
struct rx_pkt_attrib *pattrib = & precv_frame->u.hdr.attrib;
struct sta_priv *pstapriv = &adapter->stapriv;
struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
u8 *mybssid = get_bssid(pmlmepriv);
u8 *myhwaddr = myid(&adapter->eeprompriv);
u8 * sta_addr = NULL;
sint bmcast = IS_MCAST(pattrib->dst);
#ifdef CONFIG_TDLS
struct tdls_info *ptdlsinfo = &adapter->tdlsinfo;
struct sta_info *ptdls_sta=NULL;
u8 *psnap_type=ptr+pattrib->hdrlen + pattrib->iv_len+SNAP_SIZE;
//frame body located after [+2]: ether-type, [+1]: payload type
u8 *pframe_body = psnap_type+2+1;
#endif
_func_enter_;
if ((check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) == _TRUE) ||
(check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) == _TRUE))
{
// filter packets that SA is myself or multicast or broadcast
if (_rtw_memcmp(myhwaddr, pattrib->src, ETH_ALEN)){
RT_TRACE(_module_rtl871x_recv_c_,_drv_err_,(" SA==myself \n"));
ret= _FAIL;
goto exit;
}
if( (!_rtw_memcmp(myhwaddr, pattrib->dst, ETH_ALEN)) && (!bmcast) ){
ret= _FAIL;
goto exit;
}
if( _rtw_memcmp(pattrib->bssid, "\x0\x0\x0\x0\x0\x0", ETH_ALEN) ||
_rtw_memcmp(mybssid, "\x0\x0\x0\x0\x0\x0", ETH_ALEN) ||
(!_rtw_memcmp(pattrib->bssid, mybssid, ETH_ALEN)) ) {
ret= _FAIL;
goto exit;
}
sta_addr = pattrib->src;
}
else if(check_fwstate(pmlmepriv, WIFI_STATION_STATE) == _TRUE)
{
#ifdef CONFIG_TDLS
//direct link data transfer
if(ptdlsinfo->setup_state == TDLS_LINKED_STATE){
ptdls_sta = rtw_get_stainfo(pstapriv, pattrib->src);
if(ptdls_sta==NULL)
{
ret=_FAIL;
goto exit;
}
else if(ptdls_sta->tdls_sta_state&TDLS_LINKED_STATE)
{
//drop QoS-SubType Data, including QoS NULL, excluding QoS-Data
if( (GetFrameSubType(ptr) & WIFI_QOS_DATA_TYPE )== WIFI_QOS_DATA_TYPE)
{
if(GetFrameSubType(ptr)&(BIT(4)|BIT(5)|BIT(6)))
{
DBG_871X("drop QoS-Sybtype Data\n");
ret= _FAIL;
goto exit;
}
}
// filter packets that SA is myself or multicast or broadcast
if (_rtw_memcmp(myhwaddr, pattrib->src, ETH_ALEN)){
ret= _FAIL;
goto exit;
}
// da should be for me
if((!_rtw_memcmp(myhwaddr, pattrib->dst, ETH_ALEN))&& (!bmcast))
{
ret= _FAIL;
goto exit;
}
// check BSSID
if( _rtw_memcmp(pattrib->bssid, "\x0\x0\x0\x0\x0\x0", ETH_ALEN) ||
_rtw_memcmp(mybssid, "\x0\x0\x0\x0\x0\x0", ETH_ALEN) ||
(!_rtw_memcmp(pattrib->bssid, mybssid, ETH_ALEN)) )
{
ret= _FAIL;
goto exit;
}
//process UAPSD tdls sta
process_pwrbit_data(adapter, precv_frame);
// if NULL-frame, check pwrbit
if ((GetFrameSubType(ptr)) == WIFI_DATA_NULL)
{
//NULL-frame with pwrbit=1, buffer_STA should buffer frames for sleep_STA
if(GetPwrMgt(ptr))
{
DBG_871X("TDLS: recv peer null frame with pwr bit 1\n");
ptdls_sta->tdls_sta_state|=TDLS_PEER_SLEEP_STATE;
// it would be triggered when we are off channel and receiving NULL DATA
// we can confirm that peer STA is at off channel
}
else if(ptdls_sta->tdls_sta_state&TDLS_CH_SWITCH_ON_STATE)
{
if((ptdls_sta->tdls_sta_state & TDLS_PEER_AT_OFF_STATE) != TDLS_PEER_AT_OFF_STATE)
{
issue_nulldata_to_TDLS_peer_STA(adapter, ptdls_sta, 0);
ptdls_sta->tdls_sta_state |= TDLS_PEER_AT_OFF_STATE;
On_TDLS_Peer_Traffic_Rsp(adapter, precv_frame);
}
}
ret= _FAIL;
goto exit;
}
//receive some of all TDLS management frames, process it at ON_TDLS
if((_rtw_memcmp(psnap_type, SNAP_ETH_TYPE_TDLS, 2))){
ret= OnTDLS(adapter, precv_frame);
goto exit;
}
}
sta_addr = pattrib->src;
}
else
#endif //CONFIG_TDLS
{
// For Station mode, sa and bssid should always be BSSID, and DA is my mac-address
if(!_rtw_memcmp(pattrib->bssid, pattrib->src, ETH_ALEN) )
{
RT_TRACE(_module_rtl871x_recv_c_,_drv_err_,("bssid != TA under STATION_MODE; drop pkt\n"));
ret= _FAIL;
goto exit;
}
sta_addr = pattrib->bssid;
}
}
else if(check_fwstate(pmlmepriv, WIFI_AP_STATE) == _TRUE)
{
if (bmcast)
{
// For AP mode, if DA == MCAST, then BSSID should be also MCAST
if (!IS_MCAST(pattrib->bssid)){
ret= _FAIL;
goto exit;
}
}
else // not mc-frame
{
// For AP mode, if DA is non-MCAST, then it must be BSSID, and bssid == BSSID
if(!_rtw_memcmp(pattrib->bssid, pattrib->dst, ETH_ALEN)) {
ret= _FAIL;
goto exit;
}
sta_addr = pattrib->src;
}
}
else if(check_fwstate(pmlmepriv, WIFI_MP_STATE) == _TRUE)
{
_rtw_memcpy(pattrib->dst, GetAddr1Ptr(ptr), ETH_ALEN);
_rtw_memcpy(pattrib->src, GetAddr2Ptr(ptr), ETH_ALEN);
_rtw_memcpy(pattrib->bssid, GetAddr3Ptr(ptr), ETH_ALEN);
_rtw_memcpy(pattrib->ra, pattrib->dst, ETH_ALEN);
_rtw_memcpy(pattrib->ta, pattrib->src, ETH_ALEN);
sta_addr = mybssid;
}
else
{
ret = _FAIL;
}
if(bmcast)
*psta = rtw_get_bcmc_stainfo(adapter);
else
*psta = rtw_get_stainfo(pstapriv, sta_addr); // get ap_info
#ifdef CONFIG_TDLS
if(ptdls_sta != NULL)
*psta = ptdls_sta;
#endif //CONFIG_TDLS
if (*psta == NULL) {
RT_TRACE(_module_rtl871x_recv_c_,_drv_err_,("can't get psta under sta2sta_data_frame ; drop pkt\n"));
#ifdef CONFIG_MP_INCLUDED
if (adapter->registrypriv.mp_mode == 1)
{
if(check_fwstate(pmlmepriv, WIFI_MP_STATE) == _TRUE)
adapter->mppriv.rx_pktloss++;
}
#endif
ret= _FAIL;
goto exit;
}
exit:
_func_exit_;
return ret;
}
sint ap2sta_data_frame(
_adapter *adapter,
union recv_frame *precv_frame,
struct sta_info**psta );
sint ap2sta_data_frame(
_adapter *adapter,
union recv_frame *precv_frame,
struct sta_info**psta )
{
u8 *ptr = precv_frame->u.hdr.rx_data;
struct rx_pkt_attrib *pattrib = & precv_frame->u.hdr.attrib;
sint ret = _SUCCESS;
struct sta_priv *pstapriv = &adapter->stapriv;
struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
u8 *mybssid = get_bssid(pmlmepriv);
u8 *myhwaddr = myid(&adapter->eeprompriv);
sint bmcast = IS_MCAST(pattrib->dst);
_func_enter_;
if ((check_fwstate(pmlmepriv, WIFI_STATION_STATE) == _TRUE)
&& (check_fwstate(pmlmepriv, _FW_LINKED) == _TRUE
|| check_fwstate(pmlmepriv, _FW_UNDER_LINKING) == _TRUE )
)
{
// filter packets that SA is myself or multicast or broadcast
if (_rtw_memcmp(myhwaddr, pattrib->src, ETH_ALEN)){
RT_TRACE(_module_rtl871x_recv_c_,_drv_err_,(" SA==myself \n"));
#ifdef DBG_RX_DROP_FRAME
DBG_871X("DBG_RX_DROP_FRAME %s SA="MAC_FMT", myhwaddr="MAC_FMT"\n",
__FUNCTION__, MAC_ARG(pattrib->src), MAC_ARG(myhwaddr));
#endif
ret= _FAIL;
goto exit;
}
// da should be for me
if((!_rtw_memcmp(myhwaddr, pattrib->dst, ETH_ALEN))&& (!bmcast))
{
RT_TRACE(_module_rtl871x_recv_c_,_drv_info_,
(" ap2sta_data_frame: compare DA fail; DA="MAC_FMT"\n", MAC_ARG(pattrib->dst)));
#ifdef DBG_RX_DROP_FRAME
DBG_871X("DBG_RX_DROP_FRAME %s DA="MAC_FMT"\n", __func__, MAC_ARG(pattrib->dst));
#endif
ret= _FAIL;
goto exit;
}
// check BSSID
if( _rtw_memcmp(pattrib->bssid, "\x0\x0\x0\x0\x0\x0", ETH_ALEN) ||
_rtw_memcmp(mybssid, "\x0\x0\x0\x0\x0\x0", ETH_ALEN) ||
(!_rtw_memcmp(pattrib->bssid, mybssid, ETH_ALEN)) )
{
RT_TRACE(_module_rtl871x_recv_c_,_drv_info_,
(" ap2sta_data_frame: compare BSSID fail ; BSSID="MAC_FMT"\n", MAC_ARG(pattrib->bssid)));
RT_TRACE(_module_rtl871x_recv_c_,_drv_info_,("mybssid="MAC_FMT"\n", MAC_ARG(mybssid)));
#ifdef DBG_RX_DROP_FRAME
DBG_871X("DBG_RX_DROP_FRAME %s BSSID="MAC_FMT", mybssid="MAC_FMT"\n",
__FUNCTION__, MAC_ARG(pattrib->bssid), MAC_ARG(mybssid));
DBG_871X( "this adapter = %d, buddy adapter = %d\n", adapter->adapter_type, adapter->pbuddy_adapter->adapter_type );
#endif
if(!bmcast)
{
DBG_871X("issue_deauth to the nonassociated ap=" MAC_FMT " for the reason(7)\n", MAC_ARG(pattrib->bssid));
issue_deauth(adapter, pattrib->bssid, WLAN_REASON_CLASS3_FRAME_FROM_NONASSOC_STA);
}
ret= _FAIL;
goto exit;
}
if(bmcast)
*psta = rtw_get_bcmc_stainfo(adapter);
else
*psta = rtw_get_stainfo(pstapriv, pattrib->bssid); // get ap_info
if (*psta == NULL) {
RT_TRACE(_module_rtl871x_recv_c_,_drv_err_,("ap2sta: can't get psta under STATION_MODE ; drop pkt\n"));
#ifdef DBG_RX_DROP_FRAME
DBG_871X("DBG_RX_DROP_FRAME %s can't get psta under STATION_MODE ; drop pkt\n", __FUNCTION__);
#endif
ret= _FAIL;
goto exit;
}
//if ((GetFrameSubType(ptr) & WIFI_QOS_DATA_TYPE) == WIFI_QOS_DATA_TYPE) {
//}
if (GetFrameSubType(ptr) & BIT(6)) {
/* No data, will not indicate to upper layer, temporily count it here */
count_rx_stats(adapter, precv_frame, *psta);
ret = RTW_RX_HANDLED;
goto exit;
}
}
else if ((check_fwstate(pmlmepriv, WIFI_MP_STATE) == _TRUE) &&
(check_fwstate(pmlmepriv, _FW_LINKED) == _TRUE) )
{
_rtw_memcpy(pattrib->dst, GetAddr1Ptr(ptr), ETH_ALEN);
_rtw_memcpy(pattrib->src, GetAddr2Ptr(ptr), ETH_ALEN);
_rtw_memcpy(pattrib->bssid, GetAddr3Ptr(ptr), ETH_ALEN);
_rtw_memcpy(pattrib->ra, pattrib->dst, ETH_ALEN);
_rtw_memcpy(pattrib->ta, pattrib->src, ETH_ALEN);
//
_rtw_memcpy(pattrib->bssid, mybssid, ETH_ALEN);
*psta = rtw_get_stainfo(pstapriv, pattrib->bssid); // get sta_info
if (*psta == NULL) {
RT_TRACE(_module_rtl871x_recv_c_,_drv_err_,("can't get psta under MP_MODE ; drop pkt\n"));
#ifdef DBG_RX_DROP_FRAME
DBG_871X("DBG_RX_DROP_FRAME %s can't get psta under WIFI_MP_STATE ; drop pkt\n", __FUNCTION__);
#endif
ret= _FAIL;
goto exit;
}
}
else if (check_fwstate(pmlmepriv, WIFI_AP_STATE) == _TRUE)
{
/* Special case */
ret = RTW_RX_HANDLED;
goto exit;
}
else
{
if(_rtw_memcmp(myhwaddr, pattrib->dst, ETH_ALEN)&& (!bmcast))
{
*psta = rtw_get_stainfo(pstapriv, pattrib->bssid); // get sta_info
if (*psta == NULL)
{
DBG_871X("issue_deauth to the ap=" MAC_FMT " for the reason(7)\n", MAC_ARG(pattrib->bssid));
issue_deauth(adapter, pattrib->bssid, WLAN_REASON_CLASS3_FRAME_FROM_NONASSOC_STA);
}
}
ret = _FAIL;
#ifdef DBG_RX_DROP_FRAME
DBG_871X("DBG_RX_DROP_FRAME %s fw_state:0x%x\n", __FUNCTION__, get_fwstate(pmlmepriv));
#endif
}
exit:
_func_exit_;
return ret;
}
sint sta2ap_data_frame(
_adapter *adapter,
union recv_frame *precv_frame,
struct sta_info**psta );
sint sta2ap_data_frame(
_adapter *adapter,
union recv_frame *precv_frame,
struct sta_info**psta )
{
u8 *ptr = precv_frame->u.hdr.rx_data;
struct rx_pkt_attrib *pattrib = & precv_frame->u.hdr.attrib;
struct sta_priv *pstapriv = &adapter->stapriv;
struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
unsigned char *mybssid = get_bssid(pmlmepriv);
sint ret=_SUCCESS;
_func_enter_;
if (check_fwstate(pmlmepriv, WIFI_AP_STATE) == _TRUE)
{
//For AP mode, RA=BSSID, TX=STA(SRC_ADDR), A3=DST_ADDR
if(!_rtw_memcmp(pattrib->bssid, mybssid, ETH_ALEN))
{
ret= _FAIL;
goto exit;
}
*psta = rtw_get_stainfo(pstapriv, pattrib->src);
if (*psta == NULL)
{
RT_TRACE(_module_rtl871x_recv_c_,_drv_err_,("can't get psta under AP_MODE; drop pkt\n"));
DBG_871X("issue_deauth to sta=" MAC_FMT " for the reason(7)\n", MAC_ARG(pattrib->src));
issue_deauth(adapter, pattrib->src, WLAN_REASON_CLASS3_FRAME_FROM_NONASSOC_STA);
ret = RTW_RX_HANDLED;
goto exit;
}
process_pwrbit_data(adapter, precv_frame);
if ((GetFrameSubType(ptr) & WIFI_QOS_DATA_TYPE) == WIFI_QOS_DATA_TYPE) {
process_wmmps_data(adapter, precv_frame);
}
if (GetFrameSubType(ptr) & BIT(6)) {
/* No data, will not indicate to upper layer, temporily count it here */
count_rx_stats(adapter, precv_frame, *psta);
ret = RTW_RX_HANDLED;
goto exit;
}
}
else {
u8 *myhwaddr = myid(&adapter->eeprompriv);
if (!_rtw_memcmp(pattrib->ra, myhwaddr, ETH_ALEN)) {
ret = RTW_RX_HANDLED;
goto exit;
}
DBG_871X("issue_deauth to sta=" MAC_FMT " for the reason(7)\n", MAC_ARG(pattrib->src));
issue_deauth(adapter, pattrib->src, WLAN_REASON_CLASS3_FRAME_FROM_NONASSOC_STA);
ret = RTW_RX_HANDLED;
goto exit;
}
exit:
_func_exit_;
return ret;
}
sint validate_recv_ctrl_frame(_adapter *padapter, union recv_frame *precv_frame);
sint validate_recv_ctrl_frame(_adapter *padapter, union recv_frame *precv_frame)
{
#ifdef CONFIG_AP_MODE
struct rx_pkt_attrib *pattrib = &precv_frame->u.hdr.attrib;
struct sta_priv *pstapriv = &padapter->stapriv;
u8 *pframe = precv_frame->u.hdr.rx_data;
//uint len = precv_frame->u.hdr.len;
//DBG_871X("+validate_recv_ctrl_frame\n");
if (GetFrameType(pframe) != WIFI_CTRL_TYPE)
{
return _FAIL;
}
//receive the frames that ra(a1) is my address
if (!_rtw_memcmp(GetAddr1Ptr(pframe), myid(&padapter->eeprompriv), ETH_ALEN))
{
return _FAIL;
}
//only handle ps-poll
if(GetFrameSubType(pframe) == WIFI_PSPOLL)
{
u16 aid;
u8 wmmps_ac=0;
struct sta_info *psta=NULL;
aid = GetAid(pframe);
psta = rtw_get_stainfo(pstapriv, GetAddr2Ptr(pframe));
if((psta==NULL) || (psta->aid!=aid))
{
return _FAIL;
}
//for rx pkt statistics
psta->sta_stats.rx_ctrl_pkts++;
switch(pattrib->priority)
{
case 1:
case 2:
wmmps_ac = psta->uapsd_bk&BIT(0);
break;
case 4:
case 5:
wmmps_ac = psta->uapsd_vi&BIT(0);
break;
case 6:
case 7:
wmmps_ac = psta->uapsd_vo&BIT(0);
break;
case 0:
case 3:
default:
wmmps_ac = psta->uapsd_be&BIT(0);
break;
}
if(wmmps_ac)
return _FAIL;
if(psta->state & WIFI_STA_ALIVE_CHK_STATE)
{
DBG_871X("%s alive check-rx ps-poll\n", __func__);
psta->expire_to = pstapriv->expire_to;
psta->state ^= WIFI_STA_ALIVE_CHK_STATE;
}
if((psta->state&WIFI_SLEEP_STATE) && (pstapriv->sta_dz_bitmap&BIT(psta->aid)))
{
_irqL irqL;
_list *xmitframe_plist, *xmitframe_phead;
struct xmit_frame *pxmitframe=NULL;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
//_enter_critical_bh(&psta->sleep_q.lock, &irqL);
_enter_critical_bh(&pxmitpriv->lock, &irqL);
xmitframe_phead = get_list_head(&psta->sleep_q);
xmitframe_plist = get_next(xmitframe_phead);
if ((rtw_end_of_queue_search(xmitframe_phead, xmitframe_plist)) == _FALSE)
{
pxmitframe = LIST_CONTAINOR(xmitframe_plist, struct xmit_frame, list);
xmitframe_plist = get_next(xmitframe_plist);
rtw_list_delete(&pxmitframe->list);
psta->sleepq_len--;
if(psta->sleepq_len>0)
pxmitframe->attrib.mdata = 1;
else
pxmitframe->attrib.mdata = 0;
pxmitframe->attrib.triggered = 1;
//DBG_871X("handling ps-poll, q_len=%d, tim=%x\n", psta->sleepq_len, pstapriv->tim_bitmap);
#if 0
_exit_critical_bh(&psta->sleep_q.lock, &irqL);
if(rtw_hal_xmit(padapter, pxmitframe) == _TRUE)
{
rtw_os_xmit_complete(padapter, pxmitframe);
}
_enter_critical_bh(&psta->sleep_q.lock, &irqL);
#endif
rtw_hal_xmitframe_enqueue(padapter, pxmitframe);
if(psta->sleepq_len==0)
{
pstapriv->tim_bitmap &= ~BIT(psta->aid);
//DBG_871X("after handling ps-poll, tim=%x\n", pstapriv->tim_bitmap);
//upate BCN for TIM IE
//update_BCNTIM(padapter);
update_beacon(padapter, _TIM_IE_, NULL, _FALSE);
}
//_exit_critical_bh(&psta->sleep_q.lock, &irqL);
_exit_critical_bh(&pxmitpriv->lock, &irqL);
}
else
{
//_exit_critical_bh(&psta->sleep_q.lock, &irqL);
_exit_critical_bh(&pxmitpriv->lock, &irqL);
//DBG_871X("no buffered packets to xmit\n");
if(pstapriv->tim_bitmap&BIT(psta->aid))
{
if(psta->sleepq_len==0)
{
DBG_871X("no buffered packets to xmit\n");
//issue nulldata with More data bit = 0 to indicate we have no buffered packets
issue_nulldata(padapter, psta->hwaddr, 0, 0, 0);
}
else
{
DBG_871X("error!psta->sleepq_len=%d\n", psta->sleepq_len);
psta->sleepq_len=0;
}
pstapriv->tim_bitmap &= ~BIT(psta->aid);
//upate BCN for TIM IE
//update_BCNTIM(padapter);
update_beacon(padapter, _TIM_IE_, NULL, _FALSE);
}
}
}
}
#endif
return _FAIL;
}
union recv_frame* recvframe_chk_defrag(PADAPTER padapter, union recv_frame *precv_frame);
sint validate_recv_mgnt_frame(PADAPTER padapter, union recv_frame *precv_frame);
sint validate_recv_mgnt_frame(PADAPTER padapter, union recv_frame *precv_frame)
{
//struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("+validate_recv_mgnt_frame\n"));
#if 0
if(check_fwstate(pmlmepriv, WIFI_AP_STATE) == _TRUE)
{
#ifdef CONFIG_NATIVEAP_MLME
mgt_dispatcher(padapter, precv_frame);
#else
rtw_hostapd_mlme_rx(padapter, precv_frame);
#endif
}
else
{
mgt_dispatcher(padapter, precv_frame);
}
#endif
precv_frame = recvframe_chk_defrag(padapter, precv_frame);
if (precv_frame == NULL) {
RT_TRACE(_module_rtl871x_recv_c_, _drv_notice_,("%s: fragment packet\n",__FUNCTION__));
return _SUCCESS;
}
{
//for rx pkt statistics
struct sta_info *psta = rtw_get_stainfo(&padapter->stapriv, GetAddr2Ptr(precv_frame->u.hdr.rx_data));
if (psta) {
psta->sta_stats.rx_mgnt_pkts++;
if (GetFrameSubType(precv_frame->u.hdr.rx_data) == WIFI_BEACON)
psta->sta_stats.rx_beacon_pkts++;
else if (GetFrameSubType(precv_frame->u.hdr.rx_data) == WIFI_PROBEREQ)
psta->sta_stats.rx_probereq_pkts++;
else if (GetFrameSubType(precv_frame->u.hdr.rx_data) == WIFI_PROBERSP) {
if (_rtw_memcmp(padapter->eeprompriv.mac_addr, GetAddr1Ptr(precv_frame->u.hdr.rx_data), ETH_ALEN) == _TRUE)
psta->sta_stats.rx_probersp_pkts++;
else if (is_broadcast_mac_addr(GetAddr1Ptr(precv_frame->u.hdr.rx_data))
|| is_multicast_mac_addr(GetAddr1Ptr(precv_frame->u.hdr.rx_data)))
psta->sta_stats.rx_probersp_bm_pkts++;
else
psta->sta_stats.rx_probersp_uo_pkts++;
}
}
}
#ifdef CONFIG_INTEL_PROXIM
if(padapter->proximity.proxim_on==_TRUE)
{
struct rx_pkt_attrib * pattrib=&precv_frame->u.hdr.attrib;
struct recv_stat* prxstat=( struct recv_stat * ) precv_frame->u.hdr.rx_head ;
u8 * pda,*psa,*pbssid,*ptr;
ptr=precv_frame->u.hdr.rx_data;
pda = get_da(ptr);
psa = get_sa(ptr);
pbssid = get_hdr_bssid(ptr);
_rtw_memcpy(pattrib->dst, pda, ETH_ALEN);
_rtw_memcpy(pattrib->src, psa, ETH_ALEN);
_rtw_memcpy(pattrib->bssid, pbssid, ETH_ALEN);
switch(pattrib->to_fr_ds)
{
case 0:
_rtw_memcpy(pattrib->ra, pda, ETH_ALEN);
_rtw_memcpy(pattrib->ta, psa, ETH_ALEN);
break;
case 1:
_rtw_memcpy(pattrib->ra, pda, ETH_ALEN);
_rtw_memcpy(pattrib->ta, pbssid, ETH_ALEN);
break;
case 2:
_rtw_memcpy(pattrib->ra, pbssid, ETH_ALEN);
_rtw_memcpy(pattrib->ta, psa, ETH_ALEN);
break;
case 3:
_rtw_memcpy(pattrib->ra, GetAddr1Ptr(ptr), ETH_ALEN);
_rtw_memcpy(pattrib->ta, GetAddr2Ptr(ptr), ETH_ALEN);
RT_TRACE(_module_rtl871x_recv_c_,_drv_err_,(" case 3\n"));
break;
default:
break;
}
pattrib->priority=0;
pattrib->hdrlen = pattrib->to_fr_ds==3 ? 30 : 24;
padapter->proximity.proxim_rx(padapter,precv_frame);
}
#endif
mgt_dispatcher(padapter, precv_frame);
return _SUCCESS;
}
sint validate_recv_data_frame(_adapter *adapter, union recv_frame *precv_frame);
sint validate_recv_data_frame(_adapter *adapter, union recv_frame *precv_frame)
{
u8 bretry;
u8 *psa, *pda, *pbssid;
struct sta_info *psta = NULL;
u8 *ptr = precv_frame->u.hdr.rx_data;
struct rx_pkt_attrib *pattrib = & precv_frame->u.hdr.attrib;
struct sta_priv *pstapriv = &adapter->stapriv;
struct security_priv *psecuritypriv = &adapter->securitypriv;
sint ret = _SUCCESS;
#ifdef CONFIG_TDLS
struct tdls_info *ptdlsinfo = &adapter->tdlsinfo;
#endif //CONFIG_TDLS
_func_enter_;
bretry = GetRetry(ptr);
pda = get_da(ptr);
psa = get_sa(ptr);
pbssid = get_hdr_bssid(ptr);
if(pbssid == NULL){
#ifdef DBG_RX_DROP_FRAME
DBG_871X("DBG_RX_DROP_FRAME %s pbssid == NULL\n", __func__);
#endif
ret= _FAIL;
goto exit;
}
_rtw_memcpy(pattrib->dst, pda, ETH_ALEN);
_rtw_memcpy(pattrib->src, psa, ETH_ALEN);
_rtw_memcpy(pattrib->bssid, pbssid, ETH_ALEN);
switch(pattrib->to_fr_ds)
{
case 0:
_rtw_memcpy(pattrib->ra, pda, ETH_ALEN);
_rtw_memcpy(pattrib->ta, psa, ETH_ALEN);
ret = sta2sta_data_frame(adapter, precv_frame, &psta);
break;
case 1:
_rtw_memcpy(pattrib->ra, pda, ETH_ALEN);
_rtw_memcpy(pattrib->ta, pbssid, ETH_ALEN);
ret = ap2sta_data_frame(adapter, precv_frame, &psta);
break;
case 2:
_rtw_memcpy(pattrib->ra, pbssid, ETH_ALEN);
_rtw_memcpy(pattrib->ta, psa, ETH_ALEN);
ret = sta2ap_data_frame(adapter, precv_frame, &psta);
break;
case 3:
_rtw_memcpy(pattrib->ra, GetAddr1Ptr(ptr), ETH_ALEN);
_rtw_memcpy(pattrib->ta, GetAddr2Ptr(ptr), ETH_ALEN);
ret =_FAIL;
RT_TRACE(_module_rtl871x_recv_c_,_drv_err_,(" case 3\n"));
break;
default:
ret =_FAIL;
break;
}
if(ret ==_FAIL){
#ifdef DBG_RX_DROP_FRAME
DBG_871X("DBG_RX_DROP_FRAME %s case:%d, res:%d\n", __FUNCTION__, pattrib->to_fr_ds, ret);
#endif
goto exit;
} else if (ret == RTW_RX_HANDLED) {
goto exit;
}
if(psta==NULL){
RT_TRACE(_module_rtl871x_recv_c_,_drv_err_,(" after to_fr_ds_chk; psta==NULL \n"));
#ifdef DBG_RX_DROP_FRAME
DBG_871X("DBG_RX_DROP_FRAME %s psta == NULL\n", __func__);
#endif
ret= _FAIL;
goto exit;
}
//psta->rssi = prxcmd->rssi;
//psta->signal_quality= prxcmd->sq;
precv_frame->u.hdr.psta = psta;
pattrib->amsdu=0;
pattrib->ack_policy = 0;
//parsing QC field
if(pattrib->qos == 1)
{
pattrib->priority = GetPriority((ptr + 24));
pattrib->ack_policy = GetAckpolicy((ptr + 24));
pattrib->amsdu = GetAMsdu((ptr + 24));
pattrib->hdrlen = pattrib->to_fr_ds==3 ? 32 : 26;
if(pattrib->priority!=0 && pattrib->priority!=3)
{
adapter->recvpriv.bIsAnyNonBEPkts = _TRUE;
}
}
else
{
pattrib->priority=0;
pattrib->hdrlen = pattrib->to_fr_ds==3 ? 30 : 24;
}
if(pattrib->order)//HT-CTRL 11n
{
pattrib->hdrlen += 4;
}
precv_frame->u.hdr.preorder_ctrl = &psta->recvreorder_ctrl[pattrib->priority];
// decache, drop duplicate recv packets
if(recv_decache(precv_frame, bretry, &psta->sta_recvpriv.rxcache) == _FAIL)
{
RT_TRACE(_module_rtl871x_recv_c_,_drv_err_,("decache : drop pkt\n"));
#ifdef DBG_RX_DROP_FRAME
DBG_871X("DBG_RX_DROP_FRAME %s recv_decache return _FAIL\n", __func__);
#endif
ret= _FAIL;
goto exit;
}
#if 0
if(psta->tdls_sta_state & TDLS_LINKED_STATE )
{
if(psta->dot118021XPrivacy==_AES_)
pattrib->encrypt=psta->dot118021XPrivacy;
}
#endif //CONFIG_TDLS
if(pattrib->privacy){
RT_TRACE(_module_rtl871x_recv_c_,_drv_info_,("validate_recv_data_frame:pattrib->privacy=%x\n", pattrib->privacy));
RT_TRACE(_module_rtl871x_recv_c_,_drv_info_,("\n ^^^^^^^^^^^IS_MCAST(pattrib->ra(0x%02x))=%d^^^^^^^^^^^^^^^6\n", pattrib->ra[0],IS_MCAST(pattrib->ra)));
#ifdef CONFIG_TDLS
if((psta->tdls_sta_state & TDLS_LINKED_STATE) && (psta->dot118021XPrivacy==_AES_))
{
pattrib->encrypt=psta->dot118021XPrivacy;
}
else
#endif //CONFIG_TDLS
GET_ENCRY_ALGO(psecuritypriv, psta, pattrib->encrypt, IS_MCAST(pattrib->ra));
RT_TRACE(_module_rtl871x_recv_c_,_drv_info_,("\n pattrib->encrypt=%d\n",pattrib->encrypt));
SET_ICE_IV_LEN(pattrib->iv_len, pattrib->icv_len, pattrib->encrypt);
}
else
{
pattrib->encrypt = 0;
pattrib->iv_len = pattrib->icv_len = 0;
}
exit:
_func_exit_;
return ret;
}
sint validate_recv_frame(_adapter *adapter, union recv_frame *precv_frame);
sint validate_recv_frame(_adapter *adapter, union recv_frame *precv_frame)
{
//shall check frame subtype, to / from ds, da, bssid
//then call check if rx seq/frag. duplicated.
u8 type;
u8 subtype;
sint retval = _SUCCESS;
struct rx_pkt_attrib *pattrib = & precv_frame->u.hdr.attrib;
u8 *ptr = precv_frame->u.hdr.rx_data;
u8 ver =(unsigned char) (*ptr)&0x3 ;
#ifdef CONFIG_FIND_BEST_CHANNEL
struct mlme_ext_priv *pmlmeext = &adapter->mlmeextpriv;
#endif
#ifdef CONFIG_TDLS
struct tdls_info *ptdlsinfo = &adapter->tdlsinfo;
#endif //CONFIG_TDLS
#ifdef CONFIG_WAPI_SUPPORT
PRT_WAPI_T pWapiInfo = &adapter->wapiInfo;
struct recv_frame_hdr *phdr = &precv_frame->u.hdr;
u8 wai_pkt = 0;
u16 sc;
u8 external_len = 0;
#endif
_func_enter_;
#ifdef CONFIG_FIND_BEST_CHANNEL
if (pmlmeext->sitesurvey_res.state == SCAN_PROCESS) {
int ch_set_idx = rtw_ch_set_search_ch(pmlmeext->channel_set, rtw_get_oper_ch(adapter));
if (ch_set_idx >= 0)
pmlmeext->channel_set[ch_set_idx].rx_count++;
}
#endif
#ifdef CONFIG_TDLS
if(ptdlsinfo->ch_sensing==1 && ptdlsinfo->cur_channel !=0){
ptdlsinfo->collect_pkt_num[ptdlsinfo->cur_channel-1]++;
}
#endif //CONFIG_TDLS
#ifdef RTK_DMP_PLATFORM
if ( 0 )
{
DBG_871X("++\n");
{
int i;
for(i=0; i<64;i=i+8)
DBG_871X("%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X:", *(ptr+i),
*(ptr+i+1), *(ptr+i+2) ,*(ptr+i+3) ,*(ptr+i+4),*(ptr+i+5), *(ptr+i+6), *(ptr+i+7));
}
DBG_871X("--\n");
}
#endif //RTK_DMP_PLATFORM
//add version chk
if(ver!=0){
RT_TRACE(_module_rtl871x_recv_c_,_drv_err_,("validate_recv_data_frame fail! (ver!=0)\n"));
retval= _FAIL;
goto exit;
}
type = GetFrameType(ptr);
subtype = GetFrameSubType(ptr); //bit(7)~bit(2)
pattrib->to_fr_ds = get_tofr_ds(ptr);
pattrib->frag_num = GetFragNum(ptr);
pattrib->seq_num = GetSequence(ptr);
pattrib->pw_save = GetPwrMgt(ptr);
pattrib->mfrag = GetMFrag(ptr);
pattrib->mdata = GetMData(ptr);
pattrib->privacy = GetPrivacy(ptr);
pattrib->order = GetOrder(ptr);
#ifdef CONFIG_WAPI_SUPPORT
sc = (pattrib->seq_num<<4) | pattrib->frag_num;
#endif
#if 1 //Dump rx packets
{
u8 bDumpRxPkt;
rtw_hal_get_def_var(adapter, HAL_DEF_DBG_DUMP_RXPKT, &(bDumpRxPkt));
if(bDumpRxPkt ==1){//dump all rx packets
int i;
DBG_871X("############################# \n");
for(i=0; i<64;i=i+8)
DBG_871X("%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X:\n", *(ptr+i),
*(ptr+i+1), *(ptr+i+2) ,*(ptr+i+3) ,*(ptr+i+4),*(ptr+i+5), *(ptr+i+6), *(ptr+i+7));
DBG_871X("############################# \n");
}
else if(bDumpRxPkt ==2){
if(type== WIFI_MGT_TYPE){
int i;
DBG_871X("############################# \n");
for(i=0; i<64;i=i+8)
DBG_871X("%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X:\n", *(ptr+i),
*(ptr+i+1), *(ptr+i+2) ,*(ptr+i+3) ,*(ptr+i+4),*(ptr+i+5), *(ptr+i+6), *(ptr+i+7));
DBG_871X("############################# \n");
}
}
else if(bDumpRxPkt ==3){
if(type== WIFI_DATA_TYPE){
int i;
DBG_871X("############################# \n");
for(i=0; i<64;i=i+8)
DBG_871X("%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X:\n", *(ptr+i),
*(ptr+i+1), *(ptr+i+2) ,*(ptr+i+3) ,*(ptr+i+4),*(ptr+i+5), *(ptr+i+6), *(ptr+i+7));
DBG_871X("############################# \n");
}
}
}
#endif
switch (type)
{
case WIFI_MGT_TYPE: //mgnt
retval = validate_recv_mgnt_frame(adapter, precv_frame);
if (retval == _FAIL)
{
RT_TRACE(_module_rtl871x_recv_c_,_drv_err_,("validate_recv_mgnt_frame fail\n"));
}
retval = _FAIL; // only data frame return _SUCCESS
break;
case WIFI_CTRL_TYPE: //ctrl
retval = validate_recv_ctrl_frame(adapter, precv_frame);
if (retval == _FAIL)
{
RT_TRACE(_module_rtl871x_recv_c_,_drv_err_,("validate_recv_ctrl_frame fail\n"));
}
retval = _FAIL; // only data frame return _SUCCESS
break;
case WIFI_DATA_TYPE: //data
#ifdef CONFIG_WAPI_SUPPORT
if(pattrib->qos)
external_len = 2;
else
external_len= 0;
wai_pkt = rtw_wapi_is_wai_packet(adapter,ptr);
phdr->bIsWaiPacket = wai_pkt;
if(wai_pkt !=0){
if(sc != adapter->wapiInfo.wapiSeqnumAndFragNum)
{
adapter->wapiInfo.wapiSeqnumAndFragNum = sc;
}
else
{
retval = _FAIL;
break;
}
}
else{
if(rtw_wapi_drop_for_key_absent(adapter,GetAddr2Ptr(ptr))){
retval=_FAIL;
WAPI_TRACE(WAPI_RX,"drop for key absent for rx \n");
break;
}
}
#endif
rtw_led_control(adapter, LED_CTL_RX);
pattrib->qos = (subtype & BIT(7))? 1:0;
retval = validate_recv_data_frame(adapter, precv_frame);
if (retval == _FAIL)
{
struct recv_priv *precvpriv = &adapter->recvpriv;
//RT_TRACE(_module_rtl871x_recv_c_,_drv_err_,("validate_recv_data_frame fail\n"));
precvpriv->rx_drop++;
}
break;
default:
RT_TRACE(_module_rtl871x_recv_c_,_drv_err_,("validate_recv_data_frame fail! type=0x%x\n", type));
#ifdef DBG_RX_DROP_FRAME
DBG_871X("DBG_RX_DROP_FRAME validate_recv_data_frame fail! type=0x%x\n", type);
#endif
retval = _FAIL;
break;
}
exit:
_func_exit_;
return retval;
}
//remove the wlanhdr and add the eth_hdr
#if 1
sint wlanhdr_to_ethhdr ( union recv_frame *precvframe);
sint wlanhdr_to_ethhdr ( union recv_frame *precvframe)
{
sint rmv_len;
u16 eth_type, len;
u8 bsnaphdr;
u8 *psnap_type;
struct ieee80211_snap_hdr *psnap;
sint ret=_SUCCESS;
_adapter *adapter =precvframe->u.hdr.adapter;
struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
u8 *ptr = get_recvframe_data(precvframe) ; // point to frame_ctrl field
struct rx_pkt_attrib *pattrib = & precvframe->u.hdr.attrib;
_func_enter_;
if(pattrib->encrypt){
recvframe_pull_tail(precvframe, pattrib->icv_len);
}
psnap=(struct ieee80211_snap_hdr *)(ptr+pattrib->hdrlen + pattrib->iv_len);
psnap_type=ptr+pattrib->hdrlen + pattrib->iv_len+SNAP_SIZE;
/* convert hdr + possible LLC headers into Ethernet header */
//eth_type = (psnap_type[0] << 8) | psnap_type[1];
if((_rtw_memcmp(psnap, rtw_rfc1042_header, SNAP_SIZE) &&
(_rtw_memcmp(psnap_type, SNAP_ETH_TYPE_IPX, 2) == _FALSE) &&
(_rtw_memcmp(psnap_type, SNAP_ETH_TYPE_APPLETALK_AARP, 2)==_FALSE) )||
//eth_type != ETH_P_AARP && eth_type != ETH_P_IPX) ||
_rtw_memcmp(psnap, rtw_bridge_tunnel_header, SNAP_SIZE)){
/* remove RFC1042 or Bridge-Tunnel encapsulation and replace EtherType */
bsnaphdr = _TRUE;
}
else {
/* Leave Ethernet header part of hdr and full payload */
bsnaphdr = _FALSE;
}
rmv_len = pattrib->hdrlen + pattrib->iv_len +(bsnaphdr?SNAP_SIZE:0);
len = precvframe->u.hdr.len - rmv_len;
RT_TRACE(_module_rtl871x_recv_c_,_drv_info_,("\n===pattrib->hdrlen: %x, pattrib->iv_len:%x ===\n\n", pattrib->hdrlen, pattrib->iv_len));
_rtw_memcpy(ð_type, ptr+rmv_len, 2);
eth_type= ntohs((unsigned short )eth_type); //pattrib->ether_type
pattrib->eth_type = eth_type;
if ((check_fwstate(pmlmepriv, WIFI_MP_STATE) == _TRUE))
{
ptr += rmv_len ;
*ptr = 0x87;
*(ptr+1) = 0x12;
eth_type = 0x8712;
// append rx status for mp test packets
ptr = recvframe_pull(precvframe, (rmv_len-sizeof(struct ethhdr)+2)-24);
_rtw_memcpy(ptr, get_rxmem(precvframe), 24);
ptr+=24;
}
else {
ptr = recvframe_pull(precvframe, (rmv_len-sizeof(struct ethhdr)+ (bsnaphdr?2:0)));
}
_rtw_memcpy(ptr, pattrib->dst, ETH_ALEN);
_rtw_memcpy(ptr+ETH_ALEN, pattrib->src, ETH_ALEN);
if(!bsnaphdr) {
len = htons(len);
_rtw_memcpy(ptr+12, &len, 2);
}
_func_exit_;
return ret;
}
#else
sint wlanhdr_to_ethhdr ( union recv_frame *precvframe)
{
sint rmv_len;
u16 eth_type;
u8 bsnaphdr;
u8 *psnap_type;
struct ieee80211_snap_hdr *psnap;
sint ret=_SUCCESS;
_adapter *adapter =precvframe->u.hdr.adapter;
struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
u8* ptr = get_recvframe_data(precvframe) ; // point to frame_ctrl field
struct rx_pkt_attrib *pattrib = & precvframe->u.hdr.attrib;
struct _vlan *pvlan = NULL;
_func_enter_;
psnap=(struct ieee80211_snap_hdr *)(ptr+pattrib->hdrlen + pattrib->iv_len);
psnap_type=ptr+pattrib->hdrlen + pattrib->iv_len+SNAP_SIZE;
if (psnap->dsap==0xaa && psnap->ssap==0xaa && psnap->ctrl==0x03)
{
if (_rtw_memcmp(psnap->oui, oui_rfc1042, WLAN_IEEE_OUI_LEN))
bsnaphdr=_TRUE;//wlan_pkt_format = WLAN_PKT_FORMAT_SNAP_RFC1042;
else if (_rtw_memcmp(psnap->oui, SNAP_HDR_APPLETALK_DDP, WLAN_IEEE_OUI_LEN) &&
_rtw_memcmp(psnap_type, SNAP_ETH_TYPE_APPLETALK_DDP, 2) )
bsnaphdr=_TRUE; //wlan_pkt_format = WLAN_PKT_FORMAT_APPLETALK;
else if (_rtw_memcmp( psnap->oui, oui_8021h, WLAN_IEEE_OUI_LEN))
bsnaphdr=_TRUE; //wlan_pkt_format = WLAN_PKT_FORMAT_SNAP_TUNNEL;
else {
RT_TRACE(_module_rtl871x_recv_c_,_drv_err_,("drop pkt due to invalid frame format!\n"));
ret= _FAIL;
goto exit;
}
} else
bsnaphdr=_FALSE;//wlan_pkt_format = WLAN_PKT_FORMAT_OTHERS;
rmv_len = pattrib->hdrlen + pattrib->iv_len +(bsnaphdr?SNAP_SIZE:0);
RT_TRACE(_module_rtl871x_recv_c_,_drv_info_,("===pattrib->hdrlen: %x, pattrib->iv_len:%x ===\n", pattrib->hdrlen, pattrib->iv_len));
if (check_fwstate(pmlmepriv, WIFI_MP_STATE) == _TRUE)
{
ptr += rmv_len ;
*ptr = 0x87;
*(ptr+1) = 0x12;
//back to original pointer
ptr -= rmv_len;
}
ptr += rmv_len ;
_rtw_memcpy(ð_type, ptr, 2);
eth_type= ntohs((unsigned short )eth_type); //pattrib->ether_type
ptr +=2;
if(pattrib->encrypt){
recvframe_pull_tail(precvframe, pattrib->icv_len);
}
if(eth_type == 0x8100) //vlan
{
pvlan = (struct _vlan *) ptr;
//eth_type = get_vlan_encap_proto(pvlan);
//eth_type = pvlan->h_vlan_encapsulated_proto;//?
rmv_len += 4;
ptr+=4;
}
if(eth_type==0x0800)//ip
{
//struct iphdr* piphdr = (struct iphdr*) ptr;
//__u8 tos = (unsigned char)(pattrib->priority & 0xff);
//piphdr->tos = tos;
//if (piphdr->protocol == 0x06)
//{
// RT_TRACE(_module_rtl871x_recv_c_,_drv_info_,("@@@===recv tcp len:%d @@@===\n", precvframe->u.hdr.len));
//}
}
else if(eth_type==0x8712)// append rx status for mp test packets
{
//ptr -= 16;
//_rtw_memcpy(ptr, get_rxmem(precvframe), 16);
}
else
{
#ifdef PLATFORM_OS_XP
NDIS_PACKET_8021Q_INFO VlanPriInfo;
UINT32 UserPriority = precvframe->u.hdr.attrib.priority;
UINT32 VlanID = (pvlan!=NULL ? get_vlan_id(pvlan) : 0 );
VlanPriInfo.Value = // Get current value.
NDIS_PER_PACKET_INFO_FROM_PACKET(precvframe->u.hdr.pkt, Ieee8021QInfo);
VlanPriInfo.TagHeader.UserPriority = UserPriority;
VlanPriInfo.TagHeader.VlanId = VlanID ;
VlanPriInfo.TagHeader.CanonicalFormatId = 0; // Should be zero.
VlanPriInfo.TagHeader.Reserved = 0; // Should be zero.
NDIS_PER_PACKET_INFO_FROM_PACKET(precvframe->u.hdr.pkt, Ieee8021QInfo) = VlanPriInfo.Value;
#endif
}
if(eth_type==0x8712)// append rx status for mp test packets
{
ptr = recvframe_pull(precvframe, (rmv_len-sizeof(struct ethhdr)+2)-24);
_rtw_memcpy(ptr, get_rxmem(precvframe), 24);
ptr+=24;
}
else
ptr = recvframe_pull(precvframe, (rmv_len-sizeof(struct ethhdr)+2));
_rtw_memcpy(ptr, pattrib->dst, ETH_ALEN);
_rtw_memcpy(ptr+ETH_ALEN, pattrib->src, ETH_ALEN);
eth_type = htons((unsigned short)eth_type) ;
_rtw_memcpy(ptr+12, ð_type, 2);
exit:
_func_exit_;
return ret;
}
#endif
#if defined(CONFIG_SDIO_HCI) || defined(CONFIG_GSPI_HCI)
#ifdef PLATFORM_LINUX
static void recvframe_expand_pkt(
PADAPTER padapter,
union recv_frame *prframe)
{
struct recv_frame_hdr *pfhdr;
_pkt *ppkt;
u8 shift_sz;
u32 alloc_sz;
pfhdr = &prframe->u.hdr;
// 6 is for IP header 8 bytes alignment in QoS packet case.
if (pfhdr->attrib.qos)
shift_sz = 6;
else
shift_sz = 0;
// for first fragment packet, need to allocate
// (1536 + RXDESC_SIZE + drvinfo_sz) to reassemble packet
// 8 is for skb->data 8 bytes alignment.
// alloc_sz = _RND(1536 + RXDESC_SIZE + pfhdr->attrib.drvinfosize + shift_sz + 8, 128);
alloc_sz = 1664; // round (1536 + 24 + 32 + shift_sz + 8) to 128 bytes alignment
//3 1. alloc new skb
// prepare extra space for 4 bytes alignment
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)) // http://www.mail-archive.com/netdev@vger.kernel.org/msg17214.html
ppkt = dev_alloc_skb(alloc_sz);
if (ppkt) ppkt->dev = padapter->pnetdev;
#else
ppkt = netdev_alloc_skb(padapter->pnetdev, alloc_sz);
#endif
if (!ppkt) return; // no way to expand
//3 2. Prepare new skb to replace & release old skb
// force ppkt->data at 8-byte alignment address
skb_reserve(ppkt, 8 - ((SIZE_PTR)ppkt->data & 7));
// force ip_hdr at 8-byte alignment address according to shift_sz
skb_reserve(ppkt, shift_sz);
// copy data to new pkt
_rtw_memcpy(skb_put(ppkt, pfhdr->len), pfhdr->rx_data, pfhdr->len);
dev_kfree_skb_any(pfhdr->pkt);
// attach new pkt to recvframe
pfhdr->pkt = ppkt;
pfhdr->rx_head = ppkt->head;
pfhdr->rx_data = ppkt->data;
pfhdr->rx_tail = skb_tail_pointer(ppkt);
pfhdr->rx_end = skb_end_pointer(ppkt);
}
#else
#warning "recvframe_expand_pkt not implement, defrag may crash system"
#endif
#endif
//perform defrag
union recv_frame * recvframe_defrag(_adapter *adapter,_queue *defrag_q);
union recv_frame * recvframe_defrag(_adapter *adapter,_queue *defrag_q)
{
_list *plist, *phead;
u8 *data,wlanhdr_offset;
u8 curfragnum;
struct recv_frame_hdr *pfhdr,*pnfhdr;
union recv_frame* prframe, *pnextrframe;
_queue *pfree_recv_queue;
_func_enter_;
curfragnum=0;
pfree_recv_queue=&adapter->recvpriv.free_recv_queue;
phead = get_list_head(defrag_q);
plist = get_next(phead);
prframe = LIST_CONTAINOR(plist, union recv_frame, u);
pfhdr=&prframe->u.hdr;
rtw_list_delete(&(prframe->u.list));
if(curfragnum!=pfhdr->attrib.frag_num)
{
//the first fragment number must be 0
//free the whole queue
rtw_free_recvframe(prframe, pfree_recv_queue);
rtw_free_recvframe_queue(defrag_q, pfree_recv_queue);
return NULL;
}
#if defined(CONFIG_SDIO_HCI) || defined(CONFIG_GSPI_HCI)
#ifndef CONFIG_SDIO_RX_COPY
recvframe_expand_pkt(adapter, prframe);
#endif
#endif
curfragnum++;
plist= get_list_head(defrag_q);
plist = get_next(plist);
data=get_recvframe_data(prframe);
while(rtw_end_of_queue_search(phead, plist) == _FALSE)
{
pnextrframe = LIST_CONTAINOR(plist, union recv_frame , u);
pnfhdr=&pnextrframe->u.hdr;
//check the fragment sequence (2nd ~n fragment frame)
if(curfragnum!=pnfhdr->attrib.frag_num)
{
//the fragment number must be increasing (after decache)
//release the defrag_q & prframe
rtw_free_recvframe(prframe, pfree_recv_queue);
rtw_free_recvframe_queue(defrag_q, pfree_recv_queue);
return NULL;
}
curfragnum++;
//copy the 2nd~n fragment frame's payload to the first fragment
//get the 2nd~last fragment frame's payload
wlanhdr_offset = pnfhdr->attrib.hdrlen + pnfhdr->attrib.iv_len;
recvframe_pull(pnextrframe, wlanhdr_offset);
//append to first fragment frame's tail (if privacy frame, pull the ICV)
recvframe_pull_tail(prframe, pfhdr->attrib.icv_len);
//memcpy
_rtw_memcpy(pfhdr->rx_tail, pnfhdr->rx_data, pnfhdr->len);
recvframe_put(prframe, pnfhdr->len);
pfhdr->attrib.icv_len=pnfhdr->attrib.icv_len;
plist = get_next(plist);
};
//free the defrag_q queue and return the prframe
rtw_free_recvframe_queue(defrag_q, pfree_recv_queue);
RT_TRACE(_module_rtl871x_recv_c_,_drv_info_,("Performance defrag!!!!!\n"));
_func_exit_;
return prframe;
}
//check if need to defrag, if needed queue the frame to defrag_q
union recv_frame* recvframe_chk_defrag(PADAPTER padapter, union recv_frame *precv_frame)
{
u8 ismfrag;
u8 fragnum;
u8 *psta_addr;
struct recv_frame_hdr *pfhdr;
struct sta_info *psta;
struct sta_priv *pstapriv;
_list *phead;
union recv_frame *prtnframe = NULL;
_queue *pfree_recv_queue, *pdefrag_q;
_func_enter_;
pstapriv = &padapter->stapriv;
pfhdr = &precv_frame->u.hdr;
pfree_recv_queue = &padapter->recvpriv.free_recv_queue;
//need to define struct of wlan header frame ctrl
ismfrag = pfhdr->attrib.mfrag;
fragnum = pfhdr->attrib.frag_num;
psta_addr = pfhdr->attrib.ta;
psta = rtw_get_stainfo(pstapriv, psta_addr);
if (psta == NULL)
{
u8 type = GetFrameType(pfhdr->rx_data);
if (type != WIFI_DATA_TYPE) {
psta = rtw_get_bcmc_stainfo(padapter);
pdefrag_q = &psta->sta_recvpriv.defrag_q;
} else
pdefrag_q = NULL;
}
else
pdefrag_q = &psta->sta_recvpriv.defrag_q;
if ((ismfrag==0) && (fragnum==0))
{
prtnframe = precv_frame;//isn't a fragment frame
}
if (ismfrag==1)
{
//0~(n-1) fragment frame
//enqueue to defraf_g
if(pdefrag_q != NULL)
{
if(fragnum==0)
{
//the first fragment
if(_rtw_queue_empty(pdefrag_q) == _FALSE)
{
//free current defrag_q
rtw_free_recvframe_queue(pdefrag_q, pfree_recv_queue);
}
}
//Then enqueue the 0~(n-1) fragment into the defrag_q
//_rtw_spinlock(&pdefrag_q->lock);
phead = get_list_head(pdefrag_q);
rtw_list_insert_tail(&pfhdr->list, phead);
//_rtw_spinunlock(&pdefrag_q->lock);
RT_TRACE(_module_rtl871x_recv_c_,_drv_info_,("Enqueuq: ismfrag = %d, fragnum= %d\n", ismfrag,fragnum));
prtnframe=NULL;
}
else
{
//can't find this ta's defrag_queue, so free this recv_frame
rtw_free_recvframe(precv_frame, pfree_recv_queue);
prtnframe=NULL;
RT_TRACE(_module_rtl871x_recv_c_,_drv_err_,("Free because pdefrag_q ==NULL: ismfrag = %d, fragnum= %d\n", ismfrag, fragnum));
}
}
if((ismfrag==0)&&(fragnum!=0))
{
//the last fragment frame
//enqueue the last fragment
if(pdefrag_q != NULL)
{
//_rtw_spinlock(&pdefrag_q->lock);
phead = get_list_head(pdefrag_q);
rtw_list_insert_tail(&pfhdr->list,phead);
//_rtw_spinunlock(&pdefrag_q->lock);
//call recvframe_defrag to defrag
RT_TRACE(_module_rtl871x_recv_c_,_drv_info_,("defrag: ismfrag = %d, fragnum= %d\n", ismfrag, fragnum));
precv_frame = recvframe_defrag(padapter, pdefrag_q);
prtnframe=precv_frame;
}
else
{
//can't find this ta's defrag_queue, so free this recv_frame
rtw_free_recvframe(precv_frame, pfree_recv_queue);
prtnframe=NULL;
RT_TRACE(_module_rtl871x_recv_c_,_drv_err_,("Free because pdefrag_q ==NULL: ismfrag = %d, fragnum= %d\n", ismfrag,fragnum));
}
}
if((prtnframe!=NULL)&&(prtnframe->u.hdr.attrib.privacy))
{
//after defrag we must check tkip mic code
if(recvframe_chkmic(padapter, prtnframe)==_FAIL)
{
RT_TRACE(_module_rtl871x_recv_c_,_drv_err_,("recvframe_chkmic(padapter, prtnframe)==_FAIL\n"));
rtw_free_recvframe(prtnframe,pfree_recv_queue);
prtnframe=NULL;
}
}
_func_exit_;
return prtnframe;
}
#define ENDIAN_FREE 1
int amsdu_to_msdu(_adapter *padapter, union recv_frame *prframe);
int amsdu_to_msdu(_adapter *padapter, union recv_frame *prframe)
{
#if defined (PLATFORM_LINUX) || defined (PLATFORM_FREEBSD) //for amsdu TP improvement,Creator: Thomas
int a_len, padding_len;
u16 eth_type, nSubframe_Length;
u8 nr_subframes, i;
unsigned char *pdata;
struct rx_pkt_attrib *pattrib;
#ifndef PLATFORM_FREEBSD
unsigned char *data_ptr;
_pkt *sub_skb,*subframes[MAX_SUBFRAME_COUNT];
#endif //PLATFORM_FREEBSD
struct recv_priv *precvpriv = &padapter->recvpriv;
_queue *pfree_recv_queue = &(precvpriv->free_recv_queue);
int ret = _SUCCESS;
#ifdef PLATFORM_FREEBSD
struct mbuf *sub_m=NULL, *subframes[MAX_SUBFRAME_COUNT];
u8 *ptr,offset;
#endif //PLATFORM_FREEBSD
nr_subframes = 0;
pattrib = &prframe->u.hdr.attrib;
recvframe_pull(prframe, prframe->u.hdr.attrib.hdrlen);
if(prframe->u.hdr.attrib.iv_len >0)
{
recvframe_pull(prframe, prframe->u.hdr.attrib.iv_len);
}
a_len = prframe->u.hdr.len;
pdata = prframe->u.hdr.rx_data;
while(a_len > ETH_HLEN) {
/* Offset 12 denote 2 mac address */
#ifdef ENDIAN_FREE
//nSubframe_Length = ntohs(*((u16*)(pdata + 12)));
nSubframe_Length = RTW_GET_BE16(pdata + 12);
#else // ENDIAN_FREE
nSubframe_Length = *((u16*)(pdata + 12));
//==m==>change the length order
nSubframe_Length = (nSubframe_Length>>8) + (nSubframe_Length<<8);
//ntohs(nSubframe_Length);
#endif // ENDIAN_FREE
if( a_len < (ETHERNET_HEADER_SIZE + nSubframe_Length) ) {
DBG_871X("nRemain_Length is %d and nSubframe_Length is : %d\n",a_len,nSubframe_Length);
goto exit;
}
#ifndef PLATFORM_FREEBSD
/* move the data point to data content */
pdata += ETH_HLEN;
a_len -= ETH_HLEN;
/* Allocate new skb for releasing to upper layer */
#ifdef CONFIG_SKB_COPY
sub_skb = dev_alloc_skb(nSubframe_Length + 12);
if(sub_skb)
{
skb_reserve(sub_skb, 12);
data_ptr = (u8 *)skb_put(sub_skb, nSubframe_Length);
_rtw_memcpy(data_ptr, pdata, nSubframe_Length);
}
else
#endif // CONFIG_SKB_COPY
{
sub_skb = skb_clone(prframe->u.hdr.pkt, GFP_ATOMIC);
if(sub_skb)
{
sub_skb->data = pdata;
sub_skb->len = nSubframe_Length;
skb_set_tail_pointer(sub_skb, nSubframe_Length);
}
else
{
DBG_871X("skb_clone() Fail!!! , nr_subframes = %d\n",nr_subframes);
break;
}
}
#else // PLATFORM_FREEBSD
//PLATFORM_FREEBSD
//Allocate a mbuff,
//sub_m =m_devget(pdata, nSubframe_Length+12, 12, padapter->pifp,NULL);
sub_m =m_devget(pdata, nSubframe_Length+ETH_HLEN, ETHER_ALIGN, padapter->pifp,NULL);
pdata += ETH_HLEN;
a_len -= ETH_HLEN;
#endif // PLATFORM_FREEBSD
#ifndef PLATFORM_FREEBSD
//sub_skb->dev = padapter->pnetdev;
subframes[nr_subframes++] = sub_skb;
#else //PLATFORM_FREEBSD
//PLATFORM_FREEBSD
subframes[nr_subframes++] = sub_m;
#endif //PLATFORM_FREEBSD
if(nr_subframes >= MAX_SUBFRAME_COUNT) {
DBG_871X("ParseSubframe(): Too many Subframes! Packets dropped!\n");
break;
}
pdata += nSubframe_Length;
a_len -= nSubframe_Length;
if(a_len != 0) {
padding_len = 4 - ((nSubframe_Length + ETH_HLEN) & (4-1));
if(padding_len == 4) {
padding_len = 0;
}
if(a_len < padding_len) {
goto exit;
}
pdata += padding_len;
a_len -= padding_len;
}
}
for(i=0; i<nr_subframes; i++){
#ifndef PLATFORM_FREEBSD
sub_skb = subframes[i];
/* convert hdr + possible LLC headers into Ethernet header */
#ifdef ENDIAN_FREE
//eth_type = ntohs(*(u16*)&sub_skb->data[6]);
eth_type = RTW_GET_BE16(&sub_skb->data[6]);
#else // ENDIAN_FREE
eth_type = (sub_skb->data[6] << 8) | sub_skb->data[7];
#endif // ENDIAN_FREE
if (sub_skb->len >= 8 &&
((_rtw_memcmp(sub_skb->data, rtw_rfc1042_header, SNAP_SIZE) &&
eth_type != ETH_P_AARP && eth_type != ETH_P_IPX) ||
_rtw_memcmp(sub_skb->data, rtw_bridge_tunnel_header, SNAP_SIZE) )) {
/* remove RFC1042 or Bridge-Tunnel encapsulation and replace EtherType */
skb_pull(sub_skb, SNAP_SIZE);
_rtw_memcpy(skb_push(sub_skb, ETH_ALEN), pattrib->src, ETH_ALEN);
_rtw_memcpy(skb_push(sub_skb, ETH_ALEN), pattrib->dst, ETH_ALEN);
} else {
u16 len;
/* Leave Ethernet header part of hdr and full payload */
len = htons(sub_skb->len);
_rtw_memcpy(skb_push(sub_skb, 2), &len, 2);
_rtw_memcpy(skb_push(sub_skb, ETH_ALEN), pattrib->src, ETH_ALEN);
_rtw_memcpy(skb_push(sub_skb, ETH_ALEN), pattrib->dst, ETH_ALEN);
}
/* Indicat the packets to upper layer */
if (sub_skb) {
//memset(sub_skb->cb, 0, sizeof(sub_skb->cb));
#ifdef CONFIG_BR_EXT
// Insert NAT2.5 RX here!
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
void *br_port = NULL;
#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 35))
br_port = padapter->pnetdev->br_port;
#else // (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 35))
rcu_read_lock();
br_port = rcu_dereference(padapter->pnetdev->rx_handler_data);
rcu_read_unlock();
#endif // (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 35))
if( br_port && (check_fwstate(pmlmepriv, WIFI_STATION_STATE|WIFI_ADHOC_STATE) == _TRUE) )
{
int nat25_handle_frame(_adapter *priv, struct sk_buff *skb);
if (nat25_handle_frame(padapter, sub_skb) == -1) {
//priv->ext_stats.rx_data_drops++;
//DEBUG_ERR("RX DROP: nat25_handle_frame fail!\n");
//return FAIL;
#if 1
// bypass this frame to upper layer!!
#else
dev_kfree_skb_any(sub_skb);
continue;
#endif
}
}
#endif // CONFIG_BR_EXT
sub_skb->protocol = eth_type_trans(sub_skb, padapter->pnetdev);
sub_skb->dev = padapter->pnetdev;
#ifdef CONFIG_TCP_CSUM_OFFLOAD_RX
if ( (pattrib->tcpchk_valid == 1) && (pattrib->tcp_chkrpt == 1) ) {
sub_skb->ip_summed = CHECKSUM_UNNECESSARY;
} else {
sub_skb->ip_summed = CHECKSUM_NONE;
}
#else /* !CONFIG_TCP_CSUM_OFFLOAD_RX */
sub_skb->ip_summed = CHECKSUM_NONE;
#endif //CONFIG_TCP_CSUM_OFFLOAD_RX
netif_rx(sub_skb);
}
#else //PLATFORM_FREEBSD
//PLATFORM_FREEBSD
sub_m = subframes[i];
ptr=mtod(sub_m, u8 *);
offset=ETH_HLEN;
/* convert hdr + possible LLC headers into Ethernet header */
#ifdef ENDIAN_FREE
eth_type = ntohs(*(u16*)&ptr[offset+6]);
#else // ENDIAN_FREE
eth_type = ( ptr[offset+6] << 8) | ptr[offset+7];
#endif // ENDIAN_FREE
if (sub_m->m_pkthdr.len >= ETH_HLEN+8 &&
((_rtw_memcmp(ptr+ETH_HLEN, rtw_rfc1042_header, SNAP_SIZE) &&
eth_type != ETH_P_AARP && eth_type != ETH_P_IPX) ||
_rtw_memcmp(ptr+ETH_HLEN, rtw_bridge_tunnel_header, SNAP_SIZE) )) {
/* remove RFC1042 or Bridge-Tunnel encapsulation and replace EtherType */
offset+=SNAP_SIZE;
_rtw_memcpy(&ptr[offset-ETH_ALEN], pattrib->src, ETH_ALEN);
offset-=ETH_ALEN;
_rtw_memcpy(&ptr[offset-ETH_ALEN], pattrib->dst, ETH_ALEN);
offset-=ETH_ALEN;
} else {
u16 len;
/* Leave Ethernet header part of hdr and full payload */
len = htons(sub_m->m_pkthdr.len-offset);
_rtw_memcpy(&ptr[offset- 2], &len, 2);
offset-=2;
_rtw_memcpy(&ptr[offset-ETH_ALEN], pattrib->src, ETH_ALEN);
offset-=ETH_ALEN;
_rtw_memcpy(&ptr[offset-ETH_ALEN], pattrib->dst, ETH_ALEN);
offset-=ETH_ALEN;
}
m_adj(sub_m,offset);
/* Indicat the packets to upper layer */
if (sub_m) {
#if 0
#ifdef CONFIG_TCP_CSUM_OFFLOAD_RX
if ( (pattrib->tcpchk_valid == 1) && (pattrib->tcp_chkrpt == 1) ) {
sub_skb->ip_summed = CHECKSUM_UNNECESSARY;
} else {
sub_skb->ip_summed = CHECKSUM_NONE;
}
#else /* !CONFIG_TCP_CSUM_OFFLOAD_RX */
sub_skb->ip_summed = CHECKSUM_NONE;
#endif //CONFIG_TCP_CSUM_OFFLOAD_RX
#endif //0
if ( ((u32)(mtod(sub_m, caddr_t) + 14) % 4) != 0)
printf("%s()-%d: mtod(sub_m) = %p\n", __FUNCTION__, __LINE__, mtod(sub_m, caddr_t));
#ifdef CONFIG_RX_INDICATE_QUEUE
IF_ENQUEUE(&precvpriv->rx_indicate_queue, sub_m);
if (_IF_QLEN(&precvpriv->rx_indicate_queue) <= 1) {
taskqueue_enqueue(taskqueue_thread, &precvpriv->rx_indicate_tasklet);
}
#else // CONFIG_RX_INDICATE_QUEUE
(*padapter->pifp->if_input)(padapter->pifp, sub_m);
#endif // CONFIG_RX_INDICATE_QUEUE
}
#endif //PLATFORM_FREEBSD
}
exit:
prframe->u.hdr.len=0;
rtw_free_recvframe(prframe, pfree_recv_queue);//free this recv_frame
return ret;
#else // || defined (PLATFORM_LINUX) || defined (PLATFORM_FREEBSD)
#ifdef PLATFORM_WINDOWS
_irqL irql;
#endif //PLATFORM_WINDOWS
unsigned char *ptr, *pdata, *pbuf, *psnap_type;
union recv_frame *pnrframe, *pnrframe_new;
int a_len, mv_len, padding_len;
u16 eth_type, type_len;
u8 bsnaphdr;
struct ieee80211_snap_hdr *psnap;
struct _vlan *pvlan;
struct recv_priv *precvpriv = &padapter->recvpriv;
_queue *pfree_recv_queue = &(precvpriv->free_recv_queue);
int ret = _SUCCESS;
#ifdef PLATFORM_WINDOWS
struct recv_buf *precvbuf = prframe->u.hdr.precvbuf;
#endif //PLATFORM_WINDOWS
a_len = prframe->u.hdr.len - prframe->u.hdr.attrib.hdrlen;
recvframe_pull(prframe, prframe->u.hdr.attrib.hdrlen);
if(prframe->u.hdr.attrib.iv_len >0)
{
recvframe_pull(prframe, prframe->u.hdr.attrib.iv_len);
}
pdata = prframe->u.hdr.rx_data;
prframe->u.hdr.len=0;
pnrframe = prframe;
do{
mv_len=0;
pnrframe->u.hdr.rx_data = pnrframe->u.hdr.rx_tail = pdata;
ptr = pdata;
_rtw_memcpy(pnrframe->u.hdr.attrib.dst, ptr, ETH_ALEN);
ptr+=ETH_ALEN;
_rtw_memcpy(pnrframe->u.hdr.attrib.src, ptr, ETH_ALEN);
ptr+=ETH_ALEN;
_rtw_memcpy(&type_len, ptr, 2);
type_len= ntohs((unsigned short )type_len);
ptr +=2;
mv_len += ETH_HLEN;
recvframe_put(pnrframe, type_len+ETH_HLEN);//update tail;
if(pnrframe->u.hdr.rx_data >= pnrframe->u.hdr.rx_tail || type_len<8)
{
//panic("pnrframe->u.hdr.rx_data >= pnrframe->u.hdr.rx_tail || type_len<8\n");
rtw_free_recvframe(pnrframe, pfree_recv_queue);
goto exit;
}
psnap=(struct ieee80211_snap_hdr *)(ptr);
psnap_type=ptr+SNAP_SIZE;
if (psnap->dsap==0xaa && psnap->ssap==0xaa && psnap->ctrl==0x03)
{
if ( _rtw_memcmp(psnap->oui, oui_rfc1042, WLAN_IEEE_OUI_LEN))
{
bsnaphdr=_TRUE;//wlan_pkt_format = WLAN_PKT_FORMAT_SNAP_RFC1042;
}
else if (_rtw_memcmp(psnap->oui, SNAP_HDR_APPLETALK_DDP, WLAN_IEEE_OUI_LEN) &&
_rtw_memcmp(psnap_type, SNAP_ETH_TYPE_APPLETALK_DDP, 2) )
{
bsnaphdr=_TRUE; //wlan_pkt_format = WLAN_PKT_FORMAT_APPLETALK;
}
else if (_rtw_memcmp( psnap->oui, oui_8021h, WLAN_IEEE_OUI_LEN))
{
bsnaphdr=_TRUE; //wlan_pkt_format = WLAN_PKT_FORMAT_SNAP_TUNNEL;
}
else
{
RT_TRACE(_module_rtl871x_recv_c_,_drv_err_,("drop pkt due to invalid frame format!\n"));
//KeBugCheckEx(0x87123333, 0xe0, 0x4c, 0x87, 0xdd);
//panic("0x87123333, 0xe0, 0x4c, 0x87, 0xdd\n");
rtw_free_recvframe(pnrframe, pfree_recv_queue);
goto exit;
}
}
else
{
bsnaphdr=_FALSE;//wlan_pkt_format = WLAN_PKT_FORMAT_OTHERS;
}
ptr += (bsnaphdr?SNAP_SIZE:0);
_rtw_memcpy(ð_type, ptr, 2);
eth_type= ntohs((unsigned short )eth_type); //pattrib->ether_type
mv_len+= 2+(bsnaphdr?SNAP_SIZE:0);
ptr += 2;//now move to iphdr;
pvlan = NULL;
if(eth_type == 0x8100) //vlan
{
pvlan = (struct _vlan *)ptr;
ptr+=4;
mv_len+=4;
}
if(eth_type==0x0800)//ip
{
struct iphdr* piphdr = (struct iphdr*)ptr;
if (piphdr->protocol == 0x06)
{
RT_TRACE(_module_rtl871x_recv_c_,_drv_info_,("@@@===recv tcp len:%d @@@===\n", pnrframe->u.hdr.len));
}
}
#ifdef PLATFORM_OS_XP
else
{
NDIS_PACKET_8021Q_INFO VlanPriInfo;
UINT32 UserPriority = pnrframe->u.hdr.attrib.priority;
UINT32 VlanID = (pvlan!=NULL ? get_vlan_id(pvlan) : 0 );
VlanPriInfo.Value = // Get current value.
NDIS_PER_PACKET_INFO_FROM_PACKET(pnrframe->u.hdr.pkt, Ieee8021QInfo);
VlanPriInfo.TagHeader.UserPriority = UserPriority;
VlanPriInfo.TagHeader.VlanId = VlanID;
VlanPriInfo.TagHeader.CanonicalFormatId = 0; // Should be zero.
VlanPriInfo.TagHeader.Reserved = 0; // Should be zero.
NDIS_PER_PACKET_INFO_FROM_PACKET(pnrframe->u.hdr.pkt, Ieee8021QInfo) = VlanPriInfo.Value;
}
#endif //PLATFORM_OS_XP
pbuf = recvframe_pull(pnrframe, (mv_len-sizeof(struct ethhdr)));
_rtw_memcpy(pbuf, pnrframe->u.hdr.attrib.dst, ETH_ALEN);
_rtw_memcpy(pbuf+ETH_ALEN, pnrframe->u.hdr.attrib.src, ETH_ALEN);
eth_type = htons((unsigned short)eth_type) ;
_rtw_memcpy(pbuf+12, ð_type, 2);
padding_len = (4) - ((type_len + ETH_HLEN)&(4-1));
a_len -= (type_len + ETH_HLEN + padding_len) ;
#if 0
if(a_len > ETH_HLEN)
{
pnrframe_new = rtw_alloc_recvframe(pfree_recv_queue);
if(pnrframe_new)
{
_pkt *pskb_copy;
unsigned int copy_len = pnrframe->u.hdr.len;
_rtw_init_listhead(&pnrframe_new->u.hdr.list);
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)) // http://www.mail-archive.com/netdev@vger.kernel.org/msg17214.html
pskb_copy = dev_alloc_skb(copy_len+64);
#else
pskb_copy = netdev_alloc_skb(padapter->pnetdev, copy_len + 64);
#endif
if(pskb_copy==NULL)
{
DBG_871X("amsdu_to_msdu:can not all(ocate memory for skb copy\n");
}
pnrframe_new->u.hdr.pkt = pskb_copy;
_rtw_memcpy(pskb_copy->data, pnrframe->u.hdr.rx_data, copy_len);
pnrframe_new->u.hdr.rx_data = pnrframe->u.hdr.rx_data;
pnrframe_new->u.hdr.rx_tail = pnrframe->u.hdr.rx_data + copy_len;
if ((padapter->bDriverStopped ==_FALSE)&&( padapter->bSurpriseRemoved==_FALSE))
{
rtw_recv_indicatepkt(padapter, pnrframe_new);//indicate this recv_frame
}
else
{
rtw_free_recvframe(pnrframe_new, pfree_recv_queue);//free this recv_frame
}
}
else
{
DBG_871X("amsdu_to_msdu:can not allocate memory for pnrframe_new\n");
}
}
else
{
if ((padapter->bDriverStopped ==_FALSE)&&( padapter->bSurpriseRemoved==_FALSE))
{
rtw_recv_indicatepkt(padapter, pnrframe);//indicate this recv_frame
}
else
{
rtw_free_recvframe(pnrframe, pfree_recv_queue);//free this recv_frame
}
pnrframe = NULL;
}
#else // 0
//padding_len = (4) - ((type_len + ETH_HLEN)&(4-1));
//a_len -= (type_len + ETH_HLEN + padding_len) ;
pnrframe_new = NULL;
if(a_len > ETH_HLEN)
{
pnrframe_new = rtw_alloc_recvframe(pfree_recv_queue);
if(pnrframe_new)
{
//pnrframe_new->u.hdr.precvbuf = precvbuf;//precvbuf is assigned before call rtw_init_recvframe()
//rtw_init_recvframe(pnrframe_new, precvpriv);
{
#ifdef PLATFORM_LINUX
_pkt *pskb = pnrframe->u.hdr.pkt;
#endif //PLATFORM_LINUX
_rtw_init_listhead(&pnrframe_new->u.hdr.list);
pnrframe_new->u.hdr.len=0;
#ifdef PLATFORM_LINUX
if(pskb)
{
pnrframe_new->u.hdr.pkt = skb_clone(pskb, GFP_ATOMIC);
}
#endif //PLATFORM_LINUX
}
pdata += (type_len + ETH_HLEN + padding_len);
pnrframe_new->u.hdr.rx_head = pnrframe_new->u.hdr.rx_data = pnrframe_new->u.hdr.rx_tail = pdata;
pnrframe_new->u.hdr.rx_end = pdata + a_len + padding_len;//
#ifdef PLATFORM_WINDOWS
pnrframe_new->u.hdr.precvbuf=precvbuf;
_enter_critical_bh(&precvbuf->recvbuf_lock, &irql);
precvbuf->ref_cnt++;
_exit_critical_bh(&precvbuf->recvbuf_lock, &irql);
#endif //PLATFORM_WINDOWS
}
else
{
//panic("pnrframe_new=%x\n", pnrframe_new);
}
}
if ((padapter->bDriverStopped ==_FALSE)&&( padapter->bSurpriseRemoved==_FALSE) )
{
rtw_recv_indicatepkt(padapter, pnrframe);//indicate this recv_frame
}
else
{
rtw_free_recvframe(pnrframe, pfree_recv_queue);//free this recv_frame
}
pnrframe = NULL;
if(pnrframe_new)
{
pnrframe = pnrframe_new;
}
#endif // end defined (PLATFORM_LINUX) || defined (PLATFORM_FREEBSD)
}while(pnrframe);
exit:
return ret;
#endif
}
int check_indicate_seq(struct recv_reorder_ctrl *preorder_ctrl, u16 seq_num);
int check_indicate_seq(struct recv_reorder_ctrl *preorder_ctrl, u16 seq_num)
{
u8 wsize = preorder_ctrl->wsize_b;
u16 wend = (preorder_ctrl->indicate_seq + wsize -1) & 0xFFF;//% 4096;
// Rx Reorder initialize condition.
if (preorder_ctrl->indicate_seq == 0xFFFF)
{
preorder_ctrl->indicate_seq = seq_num;
#ifdef DBG_RX_SEQ
DBG_871X("DBG_RX_SEQ %s:%d init IndicateSeq: %d, NewSeq: %d\n", __FUNCTION__, __LINE__,
preorder_ctrl->indicate_seq, seq_num);
#endif
//DbgPrint("check_indicate_seq, 1st->indicate_seq=%d\n", precvpriv->indicate_seq);
}
//DbgPrint("enter->check_indicate_seq(): IndicateSeq: %d, NewSeq: %d\n", precvpriv->indicate_seq, seq_num);
// Drop out the packet which SeqNum is smaller than WinStart
if( SN_LESS(seq_num, preorder_ctrl->indicate_seq) )
{
//RT_TRACE(COMP_RX_REORDER, DBG_LOUD, ("CheckRxTsIndicateSeq(): Packet Drop! IndicateSeq: %d, NewSeq: %d\n", pTS->RxIndicateSeq, NewSeqNum));
//DbgPrint("CheckRxTsIndicateSeq(): Packet Drop! IndicateSeq: %d, NewSeq: %d\n", precvpriv->indicate_seq, seq_num);
#ifdef DBG_RX_DROP_FRAME
DBG_871X("%s IndicateSeq: %d > NewSeq: %d\n", __FUNCTION__,
preorder_ctrl->indicate_seq, seq_num);
#endif
return _FALSE;
}
//
// Sliding window manipulation. Conditions includes:
// 1. Incoming SeqNum is equal to WinStart =>Window shift 1
// 2. Incoming SeqNum is larger than the WinEnd => Window shift N
//
if( SN_EQUAL(seq_num, preorder_ctrl->indicate_seq) )
{
preorder_ctrl->indicate_seq = (preorder_ctrl->indicate_seq + 1) & 0xFFF;
#ifdef DBG_RX_SEQ
DBG_871X("DBG_RX_SEQ %s:%d SN_EQUAL IndicateSeq: %d, NewSeq: %d\n", __FUNCTION__, __LINE__,
preorder_ctrl->indicate_seq, seq_num);
#endif
}
else if(SN_LESS(wend, seq_num))
{
//RT_TRACE(COMP_RX_REORDER, DBG_LOUD, ("CheckRxTsIndicateSeq(): Window Shift! IndicateSeq: %d, NewSeq: %d\n", pTS->RxIndicateSeq, NewSeqNum));
//DbgPrint("CheckRxTsIndicateSeq(): Window Shift! IndicateSeq: %d, NewSeq: %d\n", precvpriv->indicate_seq, seq_num);
// boundary situation, when seq_num cross 0xFFF
if(seq_num >= (wsize - 1))
preorder_ctrl->indicate_seq = seq_num + 1 -wsize;
else
preorder_ctrl->indicate_seq = 0xFFF - (wsize - (seq_num + 1)) + 1;
#ifdef DBG_RX_SEQ
DBG_871X("DBG_RX_SEQ %s:%d SN_LESS(wend, seq_num) IndicateSeq: %d, NewSeq: %d\n", __FUNCTION__, __LINE__,
preorder_ctrl->indicate_seq, seq_num);
#endif
}
//DbgPrint("exit->check_indicate_seq(): IndicateSeq: %d, NewSeq: %d\n", precvpriv->indicate_seq, seq_num);
return _TRUE;
}
int enqueue_reorder_recvframe(struct recv_reorder_ctrl *preorder_ctrl, union recv_frame *prframe);
int enqueue_reorder_recvframe(struct recv_reorder_ctrl *preorder_ctrl, union recv_frame *prframe)
{
struct rx_pkt_attrib *pattrib = &prframe->u.hdr.attrib;
_queue *ppending_recvframe_queue = &preorder_ctrl->pending_recvframe_queue;
_list *phead, *plist;
union recv_frame *pnextrframe;
struct rx_pkt_attrib *pnextattrib;
//DbgPrint("+enqueue_reorder_recvframe()\n");
//_enter_critical_ex(&ppending_recvframe_queue->lock, &irql);
//_rtw_spinlock_ex(&ppending_recvframe_queue->lock);
phead = get_list_head(ppending_recvframe_queue);
plist = get_next(phead);
while(rtw_end_of_queue_search(phead, plist) == _FALSE)
{
pnextrframe = LIST_CONTAINOR(plist, union recv_frame, u);
pnextattrib = &pnextrframe->u.hdr.attrib;
if(SN_LESS(pnextattrib->seq_num, pattrib->seq_num))
{
plist = get_next(plist);
}
else if( SN_EQUAL(pnextattrib->seq_num, pattrib->seq_num))
{
//Duplicate entry is found!! Do not insert current entry.
//RT_TRACE(COMP_RX_REORDER, DBG_TRACE, ("InsertRxReorderList(): Duplicate packet is dropped!! IndicateSeq: %d, NewSeq: %d\n", pTS->RxIndicateSeq, SeqNum));
//_exit_critical_ex(&ppending_recvframe_queue->lock, &irql);
return _FALSE;
}
else
{
break;
}
//DbgPrint("enqueue_reorder_recvframe():while\n");
}
//_enter_critical_ex(&ppending_recvframe_queue->lock, &irql);
//_rtw_spinlock_ex(&ppending_recvframe_queue->lock);
rtw_list_delete(&(prframe->u.hdr.list));
rtw_list_insert_tail(&(prframe->u.hdr.list), plist);
//_rtw_spinunlock_ex(&ppending_recvframe_queue->lock);
//_exit_critical_ex(&ppending_recvframe_queue->lock, &irql);
//RT_TRACE(COMP_RX_REORDER, DBG_TRACE, ("InsertRxReorderList(): Pkt insert into buffer!! IndicateSeq: %d, NewSeq: %d\n", pTS->RxIndicateSeq, SeqNum));
return _TRUE;
}
int recv_indicatepkts_in_order(_adapter *padapter, struct recv_reorder_ctrl *preorder_ctrl, int bforced);
int recv_indicatepkts_in_order(_adapter *padapter, struct recv_reorder_ctrl *preorder_ctrl, int bforced)
{
//_irqL irql;
//u8 bcancelled;
_list *phead, *plist;
union recv_frame *prframe;
struct rx_pkt_attrib *pattrib;
//u8 index = 0;
int bPktInBuf = _FALSE;
struct recv_priv *precvpriv = &padapter->recvpriv;
_queue *ppending_recvframe_queue = &preorder_ctrl->pending_recvframe_queue;
//DbgPrint("+recv_indicatepkts_in_order\n");
//_enter_critical_ex(&ppending_recvframe_queue->lock, &irql);
//_rtw_spinlock_ex(&ppending_recvframe_queue->lock);
phead = get_list_head(ppending_recvframe_queue);
plist = get_next(phead);
#if 0
// Check if there is any other indication thread running.
if(pTS->RxIndicateState == RXTS_INDICATE_PROCESSING)
return;
#endif
// Handling some condition for forced indicate case.
if(bforced==_TRUE)
{
if(rtw_is_list_empty(phead))
{
// _exit_critical_ex(&ppending_recvframe_queue->lock, &irql);
//_rtw_spinunlock_ex(&ppending_recvframe_queue->lock);
return _TRUE;
}
prframe = LIST_CONTAINOR(plist, union recv_frame, u);
pattrib = &prframe->u.hdr.attrib;
preorder_ctrl->indicate_seq = pattrib->seq_num;
#ifdef DBG_RX_SEQ
DBG_871X("DBG_RX_SEQ %s:%d IndicateSeq: %d, NewSeq: %d\n", __FUNCTION__, __LINE__,
preorder_ctrl->indicate_seq, pattrib->seq_num);
#endif
}
// Prepare indication list and indication.
// Check if there is any packet need indicate.
while(!rtw_is_list_empty(phead))
{
prframe = LIST_CONTAINOR(plist, union recv_frame, u);
pattrib = &prframe->u.hdr.attrib;
if(!SN_LESS(preorder_ctrl->indicate_seq, pattrib->seq_num))
{
RT_TRACE(_module_rtl871x_recv_c_, _drv_notice_,
("recv_indicatepkts_in_order: indicate=%d seq=%d amsdu=%d\n",
preorder_ctrl->indicate_seq, pattrib->seq_num, pattrib->amsdu));
#if 0
// This protect buffer from overflow.
if(index >= REORDER_WIN_SIZE)
{
RT_ASSERT(FALSE, ("IndicateRxReorderList(): Buffer overflow!! \n"));
bPktInBuf = TRUE;
break;
}
#endif
plist = get_next(plist);
rtw_list_delete(&(prframe->u.hdr.list));
if(SN_EQUAL(preorder_ctrl->indicate_seq, pattrib->seq_num))
{
preorder_ctrl->indicate_seq = (preorder_ctrl->indicate_seq + 1) & 0xFFF;
#ifdef DBG_RX_SEQ
DBG_871X("DBG_RX_SEQ %s:%d IndicateSeq: %d, NewSeq: %d\n", __FUNCTION__, __LINE__,
preorder_ctrl->indicate_seq, pattrib->seq_num);
#endif
}
#if 0
index++;
if(index==1)
{
//Cancel previous pending timer.
//PlatformCancelTimer(Adapter, &pTS->RxPktPendingTimer);
if(bforced!=_TRUE)
{
//DBG_871X("_cancel_timer(&preorder_ctrl->reordering_ctrl_timer, &bcancelled);\n");
_cancel_timer(&preorder_ctrl->reordering_ctrl_timer, &bcancelled);
}
}
#endif
//Set this as a lock to make sure that only one thread is indicating packet.
//pTS->RxIndicateState = RXTS_INDICATE_PROCESSING;
// Indicate packets
//RT_ASSERT((index<=REORDER_WIN_SIZE), ("RxReorderIndicatePacket(): Rx Reorder buffer full!! \n"));
//indicate this recv_frame
//DbgPrint("recv_indicatepkts_in_order, indicate_seq=%d, seq_num=%d\n", precvpriv->indicate_seq, pattrib->seq_num);
if(!pattrib->amsdu)
{
//DBG_871X("recv_indicatepkts_in_order, amsdu!=1, indicate_seq=%d, seq_num=%d\n", preorder_ctrl->indicate_seq, pattrib->seq_num);
if ((padapter->bDriverStopped == _FALSE) &&
(padapter->bSurpriseRemoved == _FALSE))
{
rtw_recv_indicatepkt(padapter, prframe);//indicate this recv_frame
}
}
else if(pattrib->amsdu==1)
{
if(amsdu_to_msdu(padapter, prframe)!=_SUCCESS)
{
rtw_free_recvframe(prframe, &precvpriv->free_recv_queue);
}
}
else
{
//error condition;
}
//Update local variables.
bPktInBuf = _FALSE;
}
else
{
bPktInBuf = _TRUE;
break;
}
//DbgPrint("recv_indicatepkts_in_order():while\n");
}
//_rtw_spinunlock_ex(&ppending_recvframe_queue->lock);
//_exit_critical_ex(&ppending_recvframe_queue->lock, &irql);
/*
//Release the indication lock and set to new indication step.
if(bPktInBuf)
{
// Set new pending timer.
//pTS->RxIndicateState = RXTS_INDICATE_REORDER;
//PlatformSetTimer(Adapter, &pTS->RxPktPendingTimer, pHTInfo->RxReorderPendingTime);
//DBG_871X("_set_timer(&preorder_ctrl->reordering_ctrl_timer, REORDER_WAIT_TIME)\n");
_set_timer(&preorder_ctrl->reordering_ctrl_timer, REORDER_WAIT_TIME);
}
else
{
//pTS->RxIndicateState = RXTS_INDICATE_IDLE;
}
*/
//_exit_critical_ex(&ppending_recvframe_queue->lock, &irql);
//return _TRUE;
return bPktInBuf;
}
int recv_indicatepkt_reorder(_adapter *padapter, union recv_frame *prframe);
int recv_indicatepkt_reorder(_adapter *padapter, union recv_frame *prframe)
{
_irqL irql;
int retval = _SUCCESS;
struct rx_pkt_attrib *pattrib = &prframe->u.hdr.attrib;
struct recv_reorder_ctrl *preorder_ctrl = prframe->u.hdr.preorder_ctrl;
_queue *ppending_recvframe_queue = &preorder_ctrl->pending_recvframe_queue;
if(!pattrib->amsdu)
{
//s1.
wlanhdr_to_ethhdr(prframe);
if ((pattrib->qos!=1) /*|| pattrib->priority!=0 || IS_MCAST(pattrib->ra)*/
|| (pattrib->eth_type==0x0806) || (pattrib->ack_policy!=0))
{
if ((padapter->bDriverStopped == _FALSE) &&
(padapter->bSurpriseRemoved == _FALSE))
{
RT_TRACE(_module_rtl871x_recv_c_, _drv_notice_, ("@@@@ recv_indicatepkt_reorder -recv_func recv_indicatepkt\n" ));
rtw_recv_indicatepkt(padapter, prframe);
return _SUCCESS;
}
#ifdef DBG_RX_DROP_FRAME
DBG_871X("DBG_RX_DROP_FRAME %s pattrib->qos !=1\n", __FUNCTION__);
#endif
return _FAIL;
}
if (preorder_ctrl->enable == _FALSE)
{
//indicate this recv_frame
preorder_ctrl->indicate_seq = pattrib->seq_num;
#ifdef DBG_RX_SEQ
DBG_871X("DBG_RX_SEQ %s:%d IndicateSeq: %d, NewSeq: %d\n", __FUNCTION__, __LINE__,
preorder_ctrl->indicate_seq, pattrib->seq_num);
#endif
rtw_recv_indicatepkt(padapter, prframe);
preorder_ctrl->indicate_seq = (preorder_ctrl->indicate_seq + 1)%4096;
#ifdef DBG_RX_SEQ
DBG_871X("DBG_RX_SEQ %s:%d IndicateSeq: %d, NewSeq: %d\n", __FUNCTION__, __LINE__,
preorder_ctrl->indicate_seq, pattrib->seq_num);
#endif
return _SUCCESS;
}
#ifndef CONFIG_RECV_REORDERING_CTRL
//indicate this recv_frame
rtw_recv_indicatepkt(padapter, prframe);
return _SUCCESS;
#endif
}
else if(pattrib->amsdu==1) //temp filter -> means didn't support A-MSDUs in a A-MPDU
{
if (preorder_ctrl->enable == _FALSE)
{
preorder_ctrl->indicate_seq = pattrib->seq_num;
#ifdef DBG_RX_SEQ
DBG_871X("DBG_RX_SEQ %s:%d IndicateSeq: %d, NewSeq: %d\n", __FUNCTION__, __LINE__,
preorder_ctrl->indicate_seq, pattrib->seq_num);
#endif
retval = amsdu_to_msdu(padapter, prframe);
preorder_ctrl->indicate_seq = (preorder_ctrl->indicate_seq + 1)%4096;
#ifdef DBG_RX_SEQ
DBG_871X("DBG_RX_SEQ %s:%d IndicateSeq: %d, NewSeq: %d\n", __FUNCTION__, __LINE__,
preorder_ctrl->indicate_seq, pattrib->seq_num);
#endif
if(retval != _SUCCESS){
#ifdef DBG_RX_DROP_FRAME
DBG_871X("DBG_RX_DROP_FRAME %s amsdu_to_msdu fail\n", __FUNCTION__);
#endif
}
return retval;
}
}
else
{
}
_enter_critical_bh(&ppending_recvframe_queue->lock, &irql);
RT_TRACE(_module_rtl871x_recv_c_, _drv_notice_,
("recv_indicatepkt_reorder: indicate=%d seq=%d\n",
preorder_ctrl->indicate_seq, pattrib->seq_num));
//s2. check if winstart_b(indicate_seq) needs to been updated
if(!check_indicate_seq(preorder_ctrl, pattrib->seq_num))
{
//pHTInfo->RxReorderDropCounter++;
//ReturnRFDList(Adapter, pRfd);
//RT_TRACE(COMP_RX_REORDER, DBG_TRACE, ("RxReorderIndicatePacket() ==> Packet Drop!!\n"));
//_exit_critical_ex(&ppending_recvframe_queue->lock, &irql);
//return _FAIL;
#ifdef DBG_RX_DROP_FRAME
DBG_871X("DBG_RX_DROP_FRAME %s check_indicate_seq fail\n", __FUNCTION__);
#endif
#if 0
rtw_recv_indicatepkt(padapter, prframe);
_exit_critical_bh(&ppending_recvframe_queue->lock, &irql);
goto _success_exit;
#else
goto _err_exit;
#endif
}
//s3. Insert all packet into Reorder Queue to maintain its ordering.
if(!enqueue_reorder_recvframe(preorder_ctrl, prframe))
{
//DbgPrint("recv_indicatepkt_reorder, enqueue_reorder_recvframe fail!\n");
//_exit_critical_ex(&ppending_recvframe_queue->lock, &irql);
//return _FAIL;
#ifdef DBG_RX_DROP_FRAME
DBG_871X("DBG_RX_DROP_FRAME %s enqueue_reorder_recvframe fail\n", __FUNCTION__);
#endif
goto _err_exit;
}
//s4.
// Indication process.
// After Packet dropping and Sliding Window shifting as above, we can now just indicate the packets
// with the SeqNum smaller than latest WinStart and buffer other packets.
//
// For Rx Reorder condition:
// 1. All packets with SeqNum smaller than WinStart => Indicate
// 2. All packets with SeqNum larger than or equal to WinStart => Buffer it.
//
//recv_indicatepkts_in_order(padapter, preorder_ctrl, _TRUE);
if(recv_indicatepkts_in_order(padapter, preorder_ctrl, _FALSE)==_TRUE)
{
_set_timer(&preorder_ctrl->reordering_ctrl_timer, REORDER_WAIT_TIME);
_exit_critical_bh(&ppending_recvframe_queue->lock, &irql);
}
else
{
_exit_critical_bh(&ppending_recvframe_queue->lock, &irql);
_cancel_timer_ex(&preorder_ctrl->reordering_ctrl_timer);
}
_success_exit:
return _SUCCESS;
_err_exit:
_exit_critical_bh(&ppending_recvframe_queue->lock, &irql);
return _FAIL;
}
void rtw_reordering_ctrl_timeout_handler(void *pcontext)
{
_irqL irql;
struct recv_reorder_ctrl *preorder_ctrl = (struct recv_reorder_ctrl *)pcontext;
_adapter *padapter = preorder_ctrl->padapter;
_queue *ppending_recvframe_queue = &preorder_ctrl->pending_recvframe_queue;
if(padapter->bDriverStopped ||padapter->bSurpriseRemoved)
{
return;
}
//DBG_871X("+rtw_reordering_ctrl_timeout_handler()=>\n");
_enter_critical_bh(&ppending_recvframe_queue->lock, &irql);
if(recv_indicatepkts_in_order(padapter, preorder_ctrl, _TRUE)==_TRUE)
{
_set_timer(&preorder_ctrl->reordering_ctrl_timer, REORDER_WAIT_TIME);
}
_exit_critical_bh(&ppending_recvframe_queue->lock, &irql);
}
int process_recv_indicatepkts(_adapter *padapter, union recv_frame *prframe);
int process_recv_indicatepkts(_adapter *padapter, union recv_frame *prframe)
{
int retval = _SUCCESS;
//struct recv_priv *precvpriv = &padapter->recvpriv;
//struct rx_pkt_attrib *pattrib = &prframe->u.hdr.attrib;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
#ifdef CONFIG_TDLS
struct sta_info *psta = prframe->u.hdr.psta;
#endif //CONFIG_TDLS
#ifdef CONFIG_80211N_HT
struct ht_priv *phtpriv = &pmlmepriv->htpriv;
#ifdef CONFIG_TDLS
if( (phtpriv->ht_option==_TRUE) ||
((psta->tdls_sta_state & TDLS_LINKED_STATE) &&
(psta->htpriv.ht_option==_TRUE) &&
(psta->htpriv.ampdu_enable==_TRUE))) //B/G/N Mode
#else
if(phtpriv->ht_option==_TRUE) //B/G/N Mode
#endif //CONFIG_TDLS
{
//prframe->u.hdr.preorder_ctrl = &precvpriv->recvreorder_ctrl[pattrib->priority];
if(recv_indicatepkt_reorder(padapter, prframe)!=_SUCCESS)// including perform A-MPDU Rx Ordering Buffer Control
{
#ifdef DBG_RX_DROP_FRAME
DBG_871X("DBG_RX_DROP_FRAME %s recv_indicatepkt_reorder error!\n", __FUNCTION__);
#endif
if ((padapter->bDriverStopped == _FALSE) &&
(padapter->bSurpriseRemoved == _FALSE))
{
retval = _FAIL;
return retval;
}
}
}
else //B/G mode
#endif
{
retval=wlanhdr_to_ethhdr (prframe);
if(retval != _SUCCESS)
{
RT_TRACE(_module_rtl871x_recv_c_,_drv_err_,("wlanhdr_to_ethhdr: drop pkt \n"));
#ifdef DBG_RX_DROP_FRAME
DBG_871X("DBG_RX_DROP_FRAME %s wlanhdr_to_ethhdr error!\n", __FUNCTION__);
#endif
return retval;
}
if ((padapter->bDriverStopped ==_FALSE)&&( padapter->bSurpriseRemoved==_FALSE))
{
//indicate this recv_frame
RT_TRACE(_module_rtl871x_recv_c_, _drv_notice_, ("@@@@ process_recv_indicatepkts- recv_func recv_indicatepkt\n" ));
rtw_recv_indicatepkt(padapter, prframe);
}
else
{
RT_TRACE(_module_rtl871x_recv_c_, _drv_notice_, ("@@@@ process_recv_indicatepkts- recv_func free_indicatepkt\n" ));
RT_TRACE(_module_rtl871x_recv_c_, _drv_notice_, ("recv_func:bDriverStopped(%d) OR bSurpriseRemoved(%d)", padapter->bDriverStopped, padapter->bSurpriseRemoved));
retval = _FAIL;
return retval;
}
}
return retval;
}
int recv_func_prehandle(_adapter *padapter, union recv_frame *rframe)
{
int ret = _SUCCESS;
struct rx_pkt_attrib *pattrib = &rframe->u.hdr.attrib;
struct recv_priv *precvpriv = &padapter->recvpriv;
_queue *pfree_recv_queue = &padapter->recvpriv.free_recv_queue;
#ifdef CONFIG_MP_INCLUDED
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
#endif //CONFIG_MP_INCLUDED
#ifdef CONFIG_MP_INCLUDED
if (padapter->registrypriv.mp_mode == 1)
{
if (pattrib->crc_err == 1)
{
padapter->mppriv.rx_crcerrpktcount++;
}
else
{
padapter->mppriv.rx_pktcount++;
}
if (check_fwstate(pmlmepriv, WIFI_MP_LPBK_STATE) == _FALSE) {
RT_TRACE(_module_rtl871x_recv_c_, _drv_alert_, ("MP - Not in loopback mode , drop pkt \n"));
ret = _FAIL;
rtw_free_recvframe(rframe, pfree_recv_queue);//free this recv_frame
goto exit;
}
}
#endif
//check the frame crtl field and decache
ret = validate_recv_frame(padapter, rframe);
if (ret != _SUCCESS)
{
RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("recv_func: validate_recv_frame fail! drop pkt\n"));
rtw_free_recvframe(rframe, pfree_recv_queue);//free this recv_frame
goto exit;
}
exit:
return ret;
}
int recv_func_posthandle(_adapter *padapter, union recv_frame *prframe)
{
int ret = _SUCCESS;
union recv_frame *orig_prframe = prframe;
struct rx_pkt_attrib *pattrib = &prframe->u.hdr.attrib;
struct recv_priv *precvpriv = &padapter->recvpriv;
_queue *pfree_recv_queue = &padapter->recvpriv.free_recv_queue;
#ifdef CONFIG_TDLS
u8 *psnap_type, *pcategory;
struct sta_info *ptdls_sta = NULL;
#endif //CONFIG_TDLS
// DATA FRAME
rtw_led_control(padapter, LED_CTL_RX);
prframe = decryptor(padapter, prframe);
if (prframe == NULL) {
RT_TRACE(_module_rtl871x_recv_c_,_drv_err_,("decryptor: drop pkt\n"));
#ifdef DBG_RX_DROP_FRAME
DBG_871X("DBG_RX_DROP_FRAME %s decryptor: drop pkt\n", __FUNCTION__);
#endif
ret = _FAIL;
goto _recv_data_drop;
}
#if 0
if ( padapter->adapter_type == PRIMARY_ADAPTER )
{
DBG_871X("+++\n");
{
int i;
u8 *ptr = get_recvframe_data(prframe);
for(i=0; i<140;i=i+8)
DBG_871X("%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X:", *(ptr+i),
*(ptr+i+1), *(ptr+i+2) ,*(ptr+i+3) ,*(ptr+i+4),*(ptr+i+5), *(ptr+i+6), *(ptr+i+7));
}
DBG_871X("---\n");
}
#endif
#ifdef CONFIG_TDLS
//check TDLS frame
psnap_type = get_recvframe_data(orig_prframe);
psnap_type+=pattrib->hdrlen + pattrib->iv_len+SNAP_SIZE;
pcategory = psnap_type + ETH_TYPE_LEN + PAYLOAD_TYPE_LEN;
if((_rtw_memcmp(psnap_type, SNAP_ETH_TYPE_TDLS, ETH_TYPE_LEN)) &&
((*pcategory==RTW_WLAN_CATEGORY_TDLS) || (*pcategory==RTW_WLAN_CATEGORY_P2P))){
ret = OnTDLS(padapter, prframe); //all of functions will return _FAIL
goto _exit_recv_func;
}
#endif //CONFIG_TDLS
prframe = recvframe_chk_defrag(padapter, prframe);
if(prframe==NULL) {
RT_TRACE(_module_rtl871x_recv_c_,_drv_err_,("recvframe_chk_defrag: drop pkt\n"));
#ifdef DBG_RX_DROP_FRAME
DBG_871X("DBG_RX_DROP_FRAME %s recvframe_chk_defrag: drop pkt\n", __FUNCTION__);
#endif
goto _recv_data_drop;
}
prframe=portctrl(padapter, prframe);
if (prframe == NULL) {
RT_TRACE(_module_rtl871x_recv_c_,_drv_err_,("portctrl: drop pkt \n"));
#ifdef DBG_RX_DROP_FRAME
DBG_871X("DBG_RX_DROP_FRAME %s portctrl: drop pkt\n", __FUNCTION__);
#endif
ret = _FAIL;
goto _recv_data_drop;
}
#ifdef CONFIG_TDLS
if(padapter->tdlsinfo.setup_state == TDLS_LINKED_STATE)
ptdls_sta = rtw_get_stainfo(&padapter->stapriv, pattrib->src);
count_rx_stats(padapter, prframe, ptdls_sta);
#else
count_rx_stats(padapter, prframe, NULL);
#endif //CONFIG_TDLS
#ifdef CONFIG_WAPI_SUPPORT
rtw_wapi_update_info(padapter, prframe);
#endif
#ifdef CONFIG_80211N_HT
ret = process_recv_indicatepkts(padapter, prframe);
if (ret != _SUCCESS)
{
RT_TRACE(_module_rtl871x_recv_c_,_drv_err_,("recv_func: process_recv_indicatepkts fail! \n"));
#ifdef DBG_RX_DROP_FRAME
DBG_871X("DBG_RX_DROP_FRAME %s process_recv_indicatepkts fail!\n", __FUNCTION__);
#endif
rtw_free_recvframe(orig_prframe, pfree_recv_queue);//free this recv_frame
goto _recv_data_drop;
}
#else // CONFIG_80211N_HT
if (!pattrib->amsdu)
{
ret = wlanhdr_to_ethhdr (prframe);
if (ret != _SUCCESS)
{
RT_TRACE(_module_rtl871x_recv_c_,_drv_err_,("wlanhdr_to_ethhdr: drop pkt \n"));
#ifdef DBG_RX_DROP_FRAME
DBG_871X("DBG_RX_DROP_FRAME %s wlanhdr_to_ethhdr: drop pkt\n", __FUNCTION__);
#endif
rtw_free_recvframe(orig_prframe, pfree_recv_queue);//free this recv_frame
goto _recv_data_drop;
}
if ((padapter->bDriverStopped == _FALSE) && (padapter->bSurpriseRemoved == _FALSE))
{
RT_TRACE(_module_rtl871x_recv_c_, _drv_alert_, ("@@@@ recv_func: recv_func rtw_recv_indicatepkt\n" ));
//indicate this recv_frame
ret = rtw_recv_indicatepkt(padapter, prframe);
if (ret != _SUCCESS)
{
#ifdef DBG_RX_DROP_FRAME
DBG_871X("DBG_RX_DROP_FRAME %s rtw_recv_indicatepkt fail!\n", __FUNCTION__);
#endif
goto _recv_data_drop;
}
}
else
{
RT_TRACE(_module_rtl871x_recv_c_, _drv_alert_, ("@@@@ recv_func: rtw_free_recvframe\n" ));
RT_TRACE(_module_rtl871x_recv_c_, _drv_debug_, ("recv_func:bDriverStopped(%d) OR bSurpriseRemoved(%d)", padapter->bDriverStopped, padapter->bSurpriseRemoved));
#ifdef DBG_RX_DROP_FRAME
DBG_871X("DBG_RX_DROP_FRAME %s ecv_func:bDriverStopped(%d) OR bSurpriseRemoved(%d)\n", __FUNCTION__,
padapter->bDriverStopped, padapter->bSurpriseRemoved);
#endif
ret = _FAIL;
rtw_free_recvframe(orig_prframe, pfree_recv_queue); //free this recv_frame
}
}
else if(pattrib->amsdu==1)
{
ret = amsdu_to_msdu(padapter, prframe);
if(ret != _SUCCESS)
{
#ifdef DBG_RX_DROP_FRAME
DBG_871X("DBG_RX_DROP_FRAME %s amsdu_to_msdu fail\n", __FUNCTION__);
#endif
rtw_free_recvframe(orig_prframe, pfree_recv_queue);
goto _recv_data_drop;
}
}
else
{
#ifdef DBG_RX_DROP_FRAME
DBG_871X("DBG_RX_DROP_FRAME %s what is this condition??\n", __FUNCTION__);
#endif
goto _recv_data_drop;
}
#endif // CONFIG_80211N_HT
_exit_recv_func:
return ret;
_recv_data_drop:
precvpriv->rx_drop++;
return ret;
}
int recv_func(_adapter *padapter, union recv_frame *rframe);
int recv_func(_adapter *padapter, union recv_frame *rframe)
{
int ret;
struct rx_pkt_attrib *prxattrib = &rframe->u.hdr.attrib;
struct recv_priv *recvpriv = &padapter->recvpriv;
struct security_priv *psecuritypriv=&padapter->securitypriv;
struct mlme_priv *mlmepriv = &padapter->mlmepriv;
/* check if need to handle uc_swdec_pending_queue*/
if (check_fwstate(mlmepriv, WIFI_STATION_STATE) && psecuritypriv->busetkipkey)
{
union recv_frame *pending_frame;
_irqL irqL;
while((pending_frame=rtw_alloc_recvframe(&padapter->recvpriv.uc_swdec_pending_queue))) {
if (recv_func_posthandle(padapter, pending_frame) == _SUCCESS)
DBG_871X("%s: dequeue uc_swdec_pending_queue\n", __func__);
}
}
ret = recv_func_prehandle(padapter, rframe);
if(ret == _SUCCESS) {
/* check if need to enqueue into uc_swdec_pending_queue*/
if (check_fwstate(mlmepriv, WIFI_STATION_STATE) &&
!IS_MCAST(prxattrib->ra) && prxattrib->encrypt>0 &&
(prxattrib->bdecrypted == 0 ||psecuritypriv->sw_decrypt == _TRUE) &&
!is_wep_enc(psecuritypriv->dot11PrivacyAlgrthm) &&
!psecuritypriv->busetkipkey) {
rtw_enqueue_recvframe(rframe, &padapter->recvpriv.uc_swdec_pending_queue);
DBG_871X("%s: no key, enqueue uc_swdec_pending_queue\n", __func__);
goto exit;
}
ret = recv_func_posthandle(padapter, rframe);
}
exit:
return ret;
}
s32 rtw_recv_entry(union recv_frame *precvframe)
{
_adapter *padapter;
struct recv_priv *precvpriv;
s32 ret=_SUCCESS;
_func_enter_;
// RT_TRACE(_module_rtl871x_recv_c_,_drv_info_,("+rtw_recv_entry\n"));
padapter = precvframe->u.hdr.adapter;
precvpriv = &padapter->recvpriv;
if ((ret = recv_func(padapter, precvframe)) == _FAIL)
{
RT_TRACE(_module_rtl871x_recv_c_,_drv_info_,("rtw_recv_entry: recv_func return fail!!!\n"));
goto _recv_entry_drop;
}
precvpriv->rx_pkts++;
_func_exit_;
return ret;
_recv_entry_drop:
#ifdef CONFIG_MP_INCLUDED
if (padapter->registrypriv.mp_mode == 1)
padapter->mppriv.rx_pktloss = precvpriv->rx_drop;
#endif
//RT_TRACE(_module_rtl871x_recv_c_,_drv_err_,("_recv_entry_drop\n"));
_func_exit_;
return ret;
}
#ifdef CONFIG_NEW_SIGNAL_STAT_PROCESS
void rtw_signal_stat_timer_hdl(RTW_TIMER_HDL_ARGS){
_adapter *adapter = (_adapter *)FunctionContext;
struct recv_priv *recvpriv = &adapter->recvpriv;
u32 tmp_s, tmp_q;
u8 avg_signal_strength = 0;
u8 avg_signal_qual = 0;
u32 num_signal_strength = 0;
u32 num_signal_qual = 0;
u8 _alpha = 3; // this value is based on converging_constant = 5000 and sampling_interval = 1000
if(adapter->recvpriv.is_signal_dbg) {
//update the user specific value, signal_strength_dbg, to signal_strength, rssi
adapter->recvpriv.signal_strength= adapter->recvpriv.signal_strength_dbg;
adapter->recvpriv.rssi=(s8)translate_percentage_to_dbm((u8)adapter->recvpriv.signal_strength_dbg);
} else {
if(recvpriv->signal_strength_data.update_req == 0) {// update_req is clear, means we got rx
avg_signal_strength = recvpriv->signal_strength_data.avg_val;
num_signal_strength = recvpriv->signal_strength_data.total_num;
// after avg_vals are accquired, we can re-stat the signal values
recvpriv->signal_strength_data.update_req = 1;
}
if(recvpriv->signal_qual_data.update_req == 0) {// update_req is clear, means we got rx
avg_signal_qual = recvpriv->signal_qual_data.avg_val;
num_signal_qual = recvpriv->signal_qual_data.total_num;
// after avg_vals are accquired, we can re-stat the signal values
recvpriv->signal_qual_data.update_req = 1;
}
if (num_signal_strength == 0) {
if (rtw_get_on_cur_ch_time(adapter) == 0
|| rtw_get_passing_time_ms(rtw_get_on_cur_ch_time(adapter)) < 2 * adapter->mlmeextpriv.mlmext_info.bcn_interval
) {
goto set_timer;
}
}
if(check_fwstate(&adapter->mlmepriv, _FW_UNDER_SURVEY) == _TRUE
|| check_fwstate(&adapter->mlmepriv, _FW_LINKED) == _FALSE
) {
goto set_timer;
}
#ifdef CONFIG_CONCURRENT_MODE
if (check_buddy_fwstate(adapter, _FW_UNDER_SURVEY) == _TRUE)
goto set_timer;
#endif
//update value of signal_strength, rssi, signal_qual
tmp_s = (avg_signal_strength+(_alpha-1)*recvpriv->signal_strength);
if(tmp_s %_alpha)
tmp_s = tmp_s/_alpha + 1;
else
tmp_s = tmp_s/_alpha;
if(tmp_s>100)
tmp_s = 100;
tmp_q = (avg_signal_qual+(_alpha-1)*recvpriv->signal_qual);
if(tmp_q %_alpha)
tmp_q = tmp_q/_alpha + 1;
else
tmp_q = tmp_q/_alpha;
if(tmp_q>100)
tmp_q = 100;
recvpriv->signal_strength = tmp_s;
recvpriv->rssi = (s8)translate_percentage_to_dbm(tmp_s);
recvpriv->signal_qual = tmp_q;
#if defined(DBG_RX_SIGNAL_DISPLAY_PROCESSING) && 1
DBG_871X(FUNC_ADPT_FMT" signal_strength:%3u, rssi:%3d, signal_qual:%3u"
", num_signal_strength:%u, num_signal_qual:%u"
", on_cur_ch_ms:%d"
"\n"
, FUNC_ADPT_ARG(adapter)
, recvpriv->signal_strength
, recvpriv->rssi
, recvpriv->signal_qual
, num_signal_strength, num_signal_qual
, rtw_get_on_cur_ch_time(adapter) ? rtw_get_passing_time_ms(rtw_get_on_cur_ch_time(adapter)) : 0
);
#endif
}
set_timer:
rtw_set_signal_stat_timer(recvpriv);
}
#endif //CONFIG_NEW_SIGNAL_STAT_PROCESS
| gpl-2.0 |
VincentS/glibc | resolv/ns_samedomain.c | 44 | 4965 | /*
* Copyright (c) 2004 by Internet Systems Consortium, Inc. ("ISC")
* Copyright (c) 1995,1999 by Internet Software Consortium.
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SOFTWARE CONSORTIUM DISCLAIMS
* ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL INTERNET SOFTWARE
* CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
* SOFTWARE.
*/
#if !defined(_LIBC) && !defined(lint)
static const char rcsid[] = "$BINDId: ns_samedomain.c,v 8.9 1999/10/15 21:06:51 vixie Exp $";
#endif
#include <sys/types.h>
#include <arpa/nameser.h>
#include <errno.h>
#include <string.h>
/*%
* Check whether a name belongs to a domain.
*
* Inputs:
*\li a - the domain whose ancestry is being verified
*\li b - the potential ancestor we're checking against
*
* Return:
*\li boolean - is a at or below b?
*
* Notes:
*\li Trailing dots are first removed from name and domain.
* Always compare complete subdomains, not only whether the
* domain name is the trailing string of the given name.
*
*\li "host.foobar.top" lies in "foobar.top" and in "top" and in ""
* but NOT in "bar.top"
*/
int
ns_samedomain(const char *a, const char *b) {
size_t la, lb;
int diff, i, escaped;
const char *cp;
la = strlen(a);
lb = strlen(b);
/* Ignore a trailing label separator (i.e. an unescaped dot) in 'a'. */
if (la != 0U && a[la - 1] == '.') {
escaped = 0;
/* Note this loop doesn't get executed if la==1. */
for (i = la - 2; i >= 0; i--)
if (a[i] == '\\') {
if (escaped)
escaped = 0;
else
escaped = 1;
} else
break;
if (!escaped)
la--;
}
/* Ignore a trailing label separator (i.e. an unescaped dot) in 'b'. */
if (lb != 0U && b[lb - 1] == '.') {
escaped = 0;
/* note this loop doesn't get executed if lb==1 */
for (i = lb - 2; i >= 0; i--)
if (b[i] == '\\') {
if (escaped)
escaped = 0;
else
escaped = 1;
} else
break;
if (!escaped)
lb--;
}
/* lb == 0 means 'b' is the root domain, so 'a' must be in 'b'. */
if (lb == 0U)
return (1);
/* 'b' longer than 'a' means 'a' can't be in 'b'. */
if (lb > la)
return (0);
/* 'a' and 'b' being equal at this point indicates sameness. */
if (lb == la)
return (strncasecmp(a, b, lb) == 0);
/* Ok, we know la > lb. */
diff = la - lb;
/*
* If 'a' is only 1 character longer than 'b', then it can't be
* a subdomain of 'b' (because of the need for the '.' label
* separator).
*/
if (diff < 2)
return (0);
/*
* If the character before the last 'lb' characters of 'b'
* isn't '.', then it can't be a match (this lets us avoid
* having "foobar.com" match "bar.com").
*/
if (a[diff - 1] != '.')
return (0);
/*
* We're not sure about that '.', however. It could be escaped
* and thus not a really a label separator.
*/
escaped = 0;
for (i = diff - 2; i >= 0; i--)
if (a[i] == '\\') {
if (escaped)
escaped = 0;
else
escaped = 1;
} else
break;
if (escaped)
return (0);
/* Now compare aligned trailing substring. */
cp = a + diff;
return (strncasecmp(cp, b, lb) == 0);
}
libresolv_hidden_def (ns_samedomain)
/*%
* is "a" a subdomain of "b"?
*/
int
ns_subdomain(const char *a, const char *b) {
return (ns_samename(a, b) != 1 && ns_samedomain(a, b));
}
/*%
* make a canonical copy of domain name "src"
*
* notes:
* \code
* foo -> foo.
* foo. -> foo.
* foo.. -> foo.
* foo\. -> foo\..
* foo\\. -> foo\\.
* \endcode
*/
int
ns_makecanon(const char *src, char *dst, size_t dstsize) {
size_t n = strlen(src);
if (n + sizeof "." > dstsize) { /*%< Note: sizeof == 2 */
__set_errno (EMSGSIZE);
return (-1);
}
strcpy(dst, src);
while (n >= 1U && dst[n - 1] == '.') /*%< Ends in "." */
if (n >= 2U && dst[n - 2] == '\\' && /*%< Ends in "\." */
(n < 3U || dst[n - 3] != '\\')) /*%< But not "\\." */
break;
else
dst[--n] = '\0';
dst[n++] = '.';
dst[n] = '\0';
return (0);
}
libresolv_hidden_def (ns_makecanon)
/*%
* determine whether domain name "a" is the same as domain name "b"
*
* return:
*\li -1 on error
*\li 0 if names differ
*\li 1 if names are the same
*/
int
ns_samename(const char *a, const char *b) {
char ta[NS_MAXDNAME], tb[NS_MAXDNAME];
if (ns_makecanon(a, ta, sizeof ta) < 0 ||
ns_makecanon(b, tb, sizeof tb) < 0)
return (-1);
if (strcasecmp(ta, tb) == 0)
return (1);
else
return (0);
}
libresolv_hidden_def (ns_samename)
/*! \file */
| gpl-2.0 |
SOKP/kernel_yu_msm8916 | drivers/net/ethernet/xscale/ixp4xx_eth.c | 2092 | 39277 | /*
* Intel IXP4xx Ethernet driver for Linux
*
* Copyright (C) 2007 Krzysztof Halasa <khc@pm.waw.pl>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License
* as published by the Free Software Foundation.
*
* Ethernet port config (0x00 is not present on IXP42X):
*
* logical port 0x00 0x10 0x20
* NPE 0 (NPE-A) 1 (NPE-B) 2 (NPE-C)
* physical PortId 2 0 1
* TX queue 23 24 25
* RX-free queue 26 27 28
* TX-done queue is always 31, per-port RX and TX-ready queues are configurable
*
*
* Queue entries:
* bits 0 -> 1 - NPE ID (RX and TX-done)
* bits 0 -> 2 - priority (TX, per 802.1D)
* bits 3 -> 4 - port ID (user-set?)
* bits 5 -> 31 - physical descriptor address
*/
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
#include <linux/etherdevice.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/net_tstamp.h>
#include <linux/phy.h>
#include <linux/platform_device.h>
#include <linux/ptp_classify.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <mach/ixp46x_ts.h>
#include <mach/npe.h>
#include <mach/qmgr.h>
#define DEBUG_DESC 0
#define DEBUG_RX 0
#define DEBUG_TX 0
#define DEBUG_PKT_BYTES 0
#define DEBUG_MDIO 0
#define DEBUG_CLOSE 0
#define DRV_NAME "ixp4xx_eth"
#define MAX_NPES 3
#define RX_DESCS 64 /* also length of all RX queues */
#define TX_DESCS 16 /* also length of all TX queues */
#define TXDONE_QUEUE_LEN 64 /* dwords */
#define POOL_ALLOC_SIZE (sizeof(struct desc) * (RX_DESCS + TX_DESCS))
#define REGS_SIZE 0x1000
#define MAX_MRU 1536 /* 0x600 */
#define RX_BUFF_SIZE ALIGN((NET_IP_ALIGN) + MAX_MRU, 4)
#define NAPI_WEIGHT 16
#define MDIO_INTERVAL (3 * HZ)
#define MAX_MDIO_RETRIES 100 /* microseconds, typically 30 cycles */
#define MAX_CLOSE_WAIT 1000 /* microseconds, typically 2-3 cycles */
#define NPE_ID(port_id) ((port_id) >> 4)
#define PHYSICAL_ID(port_id) ((NPE_ID(port_id) + 2) % 3)
#define TX_QUEUE(port_id) (NPE_ID(port_id) + 23)
#define RXFREE_QUEUE(port_id) (NPE_ID(port_id) + 26)
#define TXDONE_QUEUE 31
#define PTP_SLAVE_MODE 1
#define PTP_MASTER_MODE 2
#define PORT2CHANNEL(p) NPE_ID(p->id)
/* TX Control Registers */
#define TX_CNTRL0_TX_EN 0x01
#define TX_CNTRL0_HALFDUPLEX 0x02
#define TX_CNTRL0_RETRY 0x04
#define TX_CNTRL0_PAD_EN 0x08
#define TX_CNTRL0_APPEND_FCS 0x10
#define TX_CNTRL0_2DEFER 0x20
#define TX_CNTRL0_RMII 0x40 /* reduced MII */
#define TX_CNTRL1_RETRIES 0x0F /* 4 bits */
/* RX Control Registers */
#define RX_CNTRL0_RX_EN 0x01
#define RX_CNTRL0_PADSTRIP_EN 0x02
#define RX_CNTRL0_SEND_FCS 0x04
#define RX_CNTRL0_PAUSE_EN 0x08
#define RX_CNTRL0_LOOP_EN 0x10
#define RX_CNTRL0_ADDR_FLTR_EN 0x20
#define RX_CNTRL0_RX_RUNT_EN 0x40
#define RX_CNTRL0_BCAST_DIS 0x80
#define RX_CNTRL1_DEFER_EN 0x01
/* Core Control Register */
#define CORE_RESET 0x01
#define CORE_RX_FIFO_FLUSH 0x02
#define CORE_TX_FIFO_FLUSH 0x04
#define CORE_SEND_JAM 0x08
#define CORE_MDC_EN 0x10 /* MDIO using NPE-B ETH-0 only */
#define DEFAULT_TX_CNTRL0 (TX_CNTRL0_TX_EN | TX_CNTRL0_RETRY | \
TX_CNTRL0_PAD_EN | TX_CNTRL0_APPEND_FCS | \
TX_CNTRL0_2DEFER)
#define DEFAULT_RX_CNTRL0 RX_CNTRL0_RX_EN
#define DEFAULT_CORE_CNTRL CORE_MDC_EN
/* NPE message codes */
#define NPE_GETSTATUS 0x00
#define NPE_EDB_SETPORTADDRESS 0x01
#define NPE_EDB_GETMACADDRESSDATABASE 0x02
#define NPE_EDB_SETMACADDRESSSDATABASE 0x03
#define NPE_GETSTATS 0x04
#define NPE_RESETSTATS 0x05
#define NPE_SETMAXFRAMELENGTHS 0x06
#define NPE_VLAN_SETRXTAGMODE 0x07
#define NPE_VLAN_SETDEFAULTRXVID 0x08
#define NPE_VLAN_SETPORTVLANTABLEENTRY 0x09
#define NPE_VLAN_SETPORTVLANTABLERANGE 0x0A
#define NPE_VLAN_SETRXQOSENTRY 0x0B
#define NPE_VLAN_SETPORTIDEXTRACTIONMODE 0x0C
#define NPE_STP_SETBLOCKINGSTATE 0x0D
#define NPE_FW_SETFIREWALLMODE 0x0E
#define NPE_PC_SETFRAMECONTROLDURATIONID 0x0F
#define NPE_PC_SETAPMACTABLE 0x11
#define NPE_SETLOOPBACK_MODE 0x12
#define NPE_PC_SETBSSIDTABLE 0x13
#define NPE_ADDRESS_FILTER_CONFIG 0x14
#define NPE_APPENDFCSCONFIG 0x15
#define NPE_NOTIFY_MAC_RECOVERY_DONE 0x16
#define NPE_MAC_RECOVERY_START 0x17
#ifdef __ARMEB__
typedef struct sk_buff buffer_t;
#define free_buffer dev_kfree_skb
#define free_buffer_irq dev_kfree_skb_irq
#else
typedef void buffer_t;
#define free_buffer kfree
#define free_buffer_irq kfree
#endif
struct eth_regs {
u32 tx_control[2], __res1[2]; /* 000 */
u32 rx_control[2], __res2[2]; /* 010 */
u32 random_seed, __res3[3]; /* 020 */
u32 partial_empty_threshold, __res4; /* 030 */
u32 partial_full_threshold, __res5; /* 038 */
u32 tx_start_bytes, __res6[3]; /* 040 */
u32 tx_deferral, rx_deferral, __res7[2];/* 050 */
u32 tx_2part_deferral[2], __res8[2]; /* 060 */
u32 slot_time, __res9[3]; /* 070 */
u32 mdio_command[4]; /* 080 */
u32 mdio_status[4]; /* 090 */
u32 mcast_mask[6], __res10[2]; /* 0A0 */
u32 mcast_addr[6], __res11[2]; /* 0C0 */
u32 int_clock_threshold, __res12[3]; /* 0E0 */
u32 hw_addr[6], __res13[61]; /* 0F0 */
u32 core_control; /* 1FC */
};
struct port {
struct resource *mem_res;
struct eth_regs __iomem *regs;
struct npe *npe;
struct net_device *netdev;
struct napi_struct napi;
struct phy_device *phydev;
struct eth_plat_info *plat;
buffer_t *rx_buff_tab[RX_DESCS], *tx_buff_tab[TX_DESCS];
struct desc *desc_tab; /* coherent */
u32 desc_tab_phys;
int id; /* logical port ID */
int speed, duplex;
u8 firmware[4];
int hwts_tx_en;
int hwts_rx_en;
};
/* NPE message structure */
struct msg {
#ifdef __ARMEB__
u8 cmd, eth_id, byte2, byte3;
u8 byte4, byte5, byte6, byte7;
#else
u8 byte3, byte2, eth_id, cmd;
u8 byte7, byte6, byte5, byte4;
#endif
};
/* Ethernet packet descriptor */
struct desc {
u32 next; /* pointer to next buffer, unused */
#ifdef __ARMEB__
u16 buf_len; /* buffer length */
u16 pkt_len; /* packet length */
u32 data; /* pointer to data buffer in RAM */
u8 dest_id;
u8 src_id;
u16 flags;
u8 qos;
u8 padlen;
u16 vlan_tci;
#else
u16 pkt_len; /* packet length */
u16 buf_len; /* buffer length */
u32 data; /* pointer to data buffer in RAM */
u16 flags;
u8 src_id;
u8 dest_id;
u16 vlan_tci;
u8 padlen;
u8 qos;
#endif
#ifdef __ARMEB__
u8 dst_mac_0, dst_mac_1, dst_mac_2, dst_mac_3;
u8 dst_mac_4, dst_mac_5, src_mac_0, src_mac_1;
u8 src_mac_2, src_mac_3, src_mac_4, src_mac_5;
#else
u8 dst_mac_3, dst_mac_2, dst_mac_1, dst_mac_0;
u8 src_mac_1, src_mac_0, dst_mac_5, dst_mac_4;
u8 src_mac_5, src_mac_4, src_mac_3, src_mac_2;
#endif
};
#define rx_desc_phys(port, n) ((port)->desc_tab_phys + \
(n) * sizeof(struct desc))
#define rx_desc_ptr(port, n) (&(port)->desc_tab[n])
#define tx_desc_phys(port, n) ((port)->desc_tab_phys + \
((n) + RX_DESCS) * sizeof(struct desc))
#define tx_desc_ptr(port, n) (&(port)->desc_tab[(n) + RX_DESCS])
#ifndef __ARMEB__
static inline void memcpy_swab32(u32 *dest, u32 *src, int cnt)
{
int i;
for (i = 0; i < cnt; i++)
dest[i] = swab32(src[i]);
}
#endif
static spinlock_t mdio_lock;
static struct eth_regs __iomem *mdio_regs; /* mdio command and status only */
static struct mii_bus *mdio_bus;
static int ports_open;
static struct port *npe_port_tab[MAX_NPES];
static struct dma_pool *dma_pool;
static struct sock_filter ptp_filter[] = {
PTP_FILTER
};
static int ixp_ptp_match(struct sk_buff *skb, u16 uid_hi, u32 uid_lo, u16 seqid)
{
u8 *data = skb->data;
unsigned int offset;
u16 *hi, *id;
u32 lo;
if (sk_run_filter(skb, ptp_filter) != PTP_CLASS_V1_IPV4)
return 0;
offset = ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN;
if (skb->len < offset + OFF_PTP_SEQUENCE_ID + sizeof(seqid))
return 0;
hi = (u16 *)(data + offset + OFF_PTP_SOURCE_UUID);
id = (u16 *)(data + offset + OFF_PTP_SEQUENCE_ID);
memcpy(&lo, &hi[1], sizeof(lo));
return (uid_hi == ntohs(*hi) &&
uid_lo == ntohl(lo) &&
seqid == ntohs(*id));
}
static void ixp_rx_timestamp(struct port *port, struct sk_buff *skb)
{
struct skb_shared_hwtstamps *shhwtstamps;
struct ixp46x_ts_regs *regs;
u64 ns;
u32 ch, hi, lo, val;
u16 uid, seq;
if (!port->hwts_rx_en)
return;
ch = PORT2CHANNEL(port);
regs = (struct ixp46x_ts_regs __iomem *) IXP4XX_TIMESYNC_BASE_VIRT;
val = __raw_readl(®s->channel[ch].ch_event);
if (!(val & RX_SNAPSHOT_LOCKED))
return;
lo = __raw_readl(®s->channel[ch].src_uuid_lo);
hi = __raw_readl(®s->channel[ch].src_uuid_hi);
uid = hi & 0xffff;
seq = (hi >> 16) & 0xffff;
if (!ixp_ptp_match(skb, htons(uid), htonl(lo), htons(seq)))
goto out;
lo = __raw_readl(®s->channel[ch].rx_snap_lo);
hi = __raw_readl(®s->channel[ch].rx_snap_hi);
ns = ((u64) hi) << 32;
ns |= lo;
ns <<= TICKS_NS_SHIFT;
shhwtstamps = skb_hwtstamps(skb);
memset(shhwtstamps, 0, sizeof(*shhwtstamps));
shhwtstamps->hwtstamp = ns_to_ktime(ns);
out:
__raw_writel(RX_SNAPSHOT_LOCKED, ®s->channel[ch].ch_event);
}
static void ixp_tx_timestamp(struct port *port, struct sk_buff *skb)
{
struct skb_shared_hwtstamps shhwtstamps;
struct ixp46x_ts_regs *regs;
struct skb_shared_info *shtx;
u64 ns;
u32 ch, cnt, hi, lo, val;
shtx = skb_shinfo(skb);
if (unlikely(shtx->tx_flags & SKBTX_HW_TSTAMP && port->hwts_tx_en))
shtx->tx_flags |= SKBTX_IN_PROGRESS;
else
return;
ch = PORT2CHANNEL(port);
regs = (struct ixp46x_ts_regs __iomem *) IXP4XX_TIMESYNC_BASE_VIRT;
/*
* This really stinks, but we have to poll for the Tx time stamp.
* Usually, the time stamp is ready after 4 to 6 microseconds.
*/
for (cnt = 0; cnt < 100; cnt++) {
val = __raw_readl(®s->channel[ch].ch_event);
if (val & TX_SNAPSHOT_LOCKED)
break;
udelay(1);
}
if (!(val & TX_SNAPSHOT_LOCKED)) {
shtx->tx_flags &= ~SKBTX_IN_PROGRESS;
return;
}
lo = __raw_readl(®s->channel[ch].tx_snap_lo);
hi = __raw_readl(®s->channel[ch].tx_snap_hi);
ns = ((u64) hi) << 32;
ns |= lo;
ns <<= TICKS_NS_SHIFT;
memset(&shhwtstamps, 0, sizeof(shhwtstamps));
shhwtstamps.hwtstamp = ns_to_ktime(ns);
skb_tstamp_tx(skb, &shhwtstamps);
__raw_writel(TX_SNAPSHOT_LOCKED, ®s->channel[ch].ch_event);
}
static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
{
struct hwtstamp_config cfg;
struct ixp46x_ts_regs *regs;
struct port *port = netdev_priv(netdev);
int ch;
if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
return -EFAULT;
if (cfg.flags) /* reserved for future extensions */
return -EINVAL;
ch = PORT2CHANNEL(port);
regs = (struct ixp46x_ts_regs __iomem *) IXP4XX_TIMESYNC_BASE_VIRT;
switch (cfg.tx_type) {
case HWTSTAMP_TX_OFF:
port->hwts_tx_en = 0;
break;
case HWTSTAMP_TX_ON:
port->hwts_tx_en = 1;
break;
default:
return -ERANGE;
}
switch (cfg.rx_filter) {
case HWTSTAMP_FILTER_NONE:
port->hwts_rx_en = 0;
break;
case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
port->hwts_rx_en = PTP_SLAVE_MODE;
__raw_writel(0, ®s->channel[ch].ch_control);
break;
case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
port->hwts_rx_en = PTP_MASTER_MODE;
__raw_writel(MASTER_MODE, ®s->channel[ch].ch_control);
break;
default:
return -ERANGE;
}
/* Clear out any old time stamps. */
__raw_writel(TX_SNAPSHOT_LOCKED | RX_SNAPSHOT_LOCKED,
®s->channel[ch].ch_event);
return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
}
static int ixp4xx_mdio_cmd(struct mii_bus *bus, int phy_id, int location,
int write, u16 cmd)
{
int cycles = 0;
if (__raw_readl(&mdio_regs->mdio_command[3]) & 0x80) {
printk(KERN_ERR "%s: MII not ready to transmit\n", bus->name);
return -1;
}
if (write) {
__raw_writel(cmd & 0xFF, &mdio_regs->mdio_command[0]);
__raw_writel(cmd >> 8, &mdio_regs->mdio_command[1]);
}
__raw_writel(((phy_id << 5) | location) & 0xFF,
&mdio_regs->mdio_command[2]);
__raw_writel((phy_id >> 3) | (write << 2) | 0x80 /* GO */,
&mdio_regs->mdio_command[3]);
while ((cycles < MAX_MDIO_RETRIES) &&
(__raw_readl(&mdio_regs->mdio_command[3]) & 0x80)) {
udelay(1);
cycles++;
}
if (cycles == MAX_MDIO_RETRIES) {
printk(KERN_ERR "%s #%i: MII write failed\n", bus->name,
phy_id);
return -1;
}
#if DEBUG_MDIO
printk(KERN_DEBUG "%s #%i: mdio_%s() took %i cycles\n", bus->name,
phy_id, write ? "write" : "read", cycles);
#endif
if (write)
return 0;
if (__raw_readl(&mdio_regs->mdio_status[3]) & 0x80) {
#if DEBUG_MDIO
printk(KERN_DEBUG "%s #%i: MII read failed\n", bus->name,
phy_id);
#endif
return 0xFFFF; /* don't return error */
}
return (__raw_readl(&mdio_regs->mdio_status[0]) & 0xFF) |
((__raw_readl(&mdio_regs->mdio_status[1]) & 0xFF) << 8);
}
static int ixp4xx_mdio_read(struct mii_bus *bus, int phy_id, int location)
{
unsigned long flags;
int ret;
spin_lock_irqsave(&mdio_lock, flags);
ret = ixp4xx_mdio_cmd(bus, phy_id, location, 0, 0);
spin_unlock_irqrestore(&mdio_lock, flags);
#if DEBUG_MDIO
printk(KERN_DEBUG "%s #%i: MII read [%i] -> 0x%X\n", bus->name,
phy_id, location, ret);
#endif
return ret;
}
static int ixp4xx_mdio_write(struct mii_bus *bus, int phy_id, int location,
u16 val)
{
unsigned long flags;
int ret;
spin_lock_irqsave(&mdio_lock, flags);
ret = ixp4xx_mdio_cmd(bus, phy_id, location, 1, val);
spin_unlock_irqrestore(&mdio_lock, flags);
#if DEBUG_MDIO
printk(KERN_DEBUG "%s #%i: MII write [%i] <- 0x%X, err = %i\n",
bus->name, phy_id, location, val, ret);
#endif
return ret;
}
static int ixp4xx_mdio_register(void)
{
int err;
if (!(mdio_bus = mdiobus_alloc()))
return -ENOMEM;
if (cpu_is_ixp43x()) {
/* IXP43x lacks NPE-B and uses NPE-C for MII PHY access */
if (!(ixp4xx_read_feature_bits() & IXP4XX_FEATURE_NPEC_ETH))
return -ENODEV;
mdio_regs = (struct eth_regs __iomem *)IXP4XX_EthC_BASE_VIRT;
} else {
/* All MII PHY accesses use NPE-B Ethernet registers */
if (!(ixp4xx_read_feature_bits() & IXP4XX_FEATURE_NPEB_ETH0))
return -ENODEV;
mdio_regs = (struct eth_regs __iomem *)IXP4XX_EthB_BASE_VIRT;
}
__raw_writel(DEFAULT_CORE_CNTRL, &mdio_regs->core_control);
spin_lock_init(&mdio_lock);
mdio_bus->name = "IXP4xx MII Bus";
mdio_bus->read = &ixp4xx_mdio_read;
mdio_bus->write = &ixp4xx_mdio_write;
snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "ixp4xx-eth-0");
if ((err = mdiobus_register(mdio_bus)))
mdiobus_free(mdio_bus);
return err;
}
static void ixp4xx_mdio_remove(void)
{
mdiobus_unregister(mdio_bus);
mdiobus_free(mdio_bus);
}
static void ixp4xx_adjust_link(struct net_device *dev)
{
struct port *port = netdev_priv(dev);
struct phy_device *phydev = port->phydev;
if (!phydev->link) {
if (port->speed) {
port->speed = 0;
printk(KERN_INFO "%s: link down\n", dev->name);
}
return;
}
if (port->speed == phydev->speed && port->duplex == phydev->duplex)
return;
port->speed = phydev->speed;
port->duplex = phydev->duplex;
if (port->duplex)
__raw_writel(DEFAULT_TX_CNTRL0 & ~TX_CNTRL0_HALFDUPLEX,
&port->regs->tx_control[0]);
else
__raw_writel(DEFAULT_TX_CNTRL0 | TX_CNTRL0_HALFDUPLEX,
&port->regs->tx_control[0]);
printk(KERN_INFO "%s: link up, speed %u Mb/s, %s duplex\n",
dev->name, port->speed, port->duplex ? "full" : "half");
}
static inline void debug_pkt(struct net_device *dev, const char *func,
u8 *data, int len)
{
#if DEBUG_PKT_BYTES
int i;
printk(KERN_DEBUG "%s: %s(%i) ", dev->name, func, len);
for (i = 0; i < len; i++) {
if (i >= DEBUG_PKT_BYTES)
break;
printk("%s%02X",
((i == 6) || (i == 12) || (i >= 14)) ? " " : "",
data[i]);
}
printk("\n");
#endif
}
static inline void debug_desc(u32 phys, struct desc *desc)
{
#if DEBUG_DESC
printk(KERN_DEBUG "%X: %X %3X %3X %08X %2X < %2X %4X %X"
" %X %X %02X%02X%02X%02X%02X%02X < %02X%02X%02X%02X%02X%02X\n",
phys, desc->next, desc->buf_len, desc->pkt_len,
desc->data, desc->dest_id, desc->src_id, desc->flags,
desc->qos, desc->padlen, desc->vlan_tci,
desc->dst_mac_0, desc->dst_mac_1, desc->dst_mac_2,
desc->dst_mac_3, desc->dst_mac_4, desc->dst_mac_5,
desc->src_mac_0, desc->src_mac_1, desc->src_mac_2,
desc->src_mac_3, desc->src_mac_4, desc->src_mac_5);
#endif
}
static inline int queue_get_desc(unsigned int queue, struct port *port,
int is_tx)
{
u32 phys, tab_phys, n_desc;
struct desc *tab;
if (!(phys = qmgr_get_entry(queue)))
return -1;
phys &= ~0x1F; /* mask out non-address bits */
tab_phys = is_tx ? tx_desc_phys(port, 0) : rx_desc_phys(port, 0);
tab = is_tx ? tx_desc_ptr(port, 0) : rx_desc_ptr(port, 0);
n_desc = (phys - tab_phys) / sizeof(struct desc);
BUG_ON(n_desc >= (is_tx ? TX_DESCS : RX_DESCS));
debug_desc(phys, &tab[n_desc]);
BUG_ON(tab[n_desc].next);
return n_desc;
}
static inline void queue_put_desc(unsigned int queue, u32 phys,
struct desc *desc)
{
debug_desc(phys, desc);
BUG_ON(phys & 0x1F);
qmgr_put_entry(queue, phys);
/* Don't check for queue overflow here, we've allocated sufficient
length and queues >= 32 don't support this check anyway. */
}
static inline void dma_unmap_tx(struct port *port, struct desc *desc)
{
#ifdef __ARMEB__
dma_unmap_single(&port->netdev->dev, desc->data,
desc->buf_len, DMA_TO_DEVICE);
#else
dma_unmap_single(&port->netdev->dev, desc->data & ~3,
ALIGN((desc->data & 3) + desc->buf_len, 4),
DMA_TO_DEVICE);
#endif
}
static void eth_rx_irq(void *pdev)
{
struct net_device *dev = pdev;
struct port *port = netdev_priv(dev);
#if DEBUG_RX
printk(KERN_DEBUG "%s: eth_rx_irq\n", dev->name);
#endif
qmgr_disable_irq(port->plat->rxq);
napi_schedule(&port->napi);
}
static int eth_poll(struct napi_struct *napi, int budget)
{
struct port *port = container_of(napi, struct port, napi);
struct net_device *dev = port->netdev;
unsigned int rxq = port->plat->rxq, rxfreeq = RXFREE_QUEUE(port->id);
int received = 0;
#if DEBUG_RX
printk(KERN_DEBUG "%s: eth_poll\n", dev->name);
#endif
while (received < budget) {
struct sk_buff *skb;
struct desc *desc;
int n;
#ifdef __ARMEB__
struct sk_buff *temp;
u32 phys;
#endif
if ((n = queue_get_desc(rxq, port, 0)) < 0) {
#if DEBUG_RX
printk(KERN_DEBUG "%s: eth_poll napi_complete\n",
dev->name);
#endif
napi_complete(napi);
qmgr_enable_irq(rxq);
if (!qmgr_stat_below_low_watermark(rxq) &&
napi_reschedule(napi)) { /* not empty again */
#if DEBUG_RX
printk(KERN_DEBUG "%s: eth_poll"
" napi_reschedule successed\n",
dev->name);
#endif
qmgr_disable_irq(rxq);
continue;
}
#if DEBUG_RX
printk(KERN_DEBUG "%s: eth_poll all done\n",
dev->name);
#endif
return received; /* all work done */
}
desc = rx_desc_ptr(port, n);
#ifdef __ARMEB__
if ((skb = netdev_alloc_skb(dev, RX_BUFF_SIZE))) {
phys = dma_map_single(&dev->dev, skb->data,
RX_BUFF_SIZE, DMA_FROM_DEVICE);
if (dma_mapping_error(&dev->dev, phys)) {
dev_kfree_skb(skb);
skb = NULL;
}
}
#else
skb = netdev_alloc_skb(dev,
ALIGN(NET_IP_ALIGN + desc->pkt_len, 4));
#endif
if (!skb) {
dev->stats.rx_dropped++;
/* put the desc back on RX-ready queue */
desc->buf_len = MAX_MRU;
desc->pkt_len = 0;
queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc);
continue;
}
/* process received frame */
#ifdef __ARMEB__
temp = skb;
skb = port->rx_buff_tab[n];
dma_unmap_single(&dev->dev, desc->data - NET_IP_ALIGN,
RX_BUFF_SIZE, DMA_FROM_DEVICE);
#else
dma_sync_single_for_cpu(&dev->dev, desc->data - NET_IP_ALIGN,
RX_BUFF_SIZE, DMA_FROM_DEVICE);
memcpy_swab32((u32 *)skb->data, (u32 *)port->rx_buff_tab[n],
ALIGN(NET_IP_ALIGN + desc->pkt_len, 4) / 4);
#endif
skb_reserve(skb, NET_IP_ALIGN);
skb_put(skb, desc->pkt_len);
debug_pkt(dev, "eth_poll", skb->data, skb->len);
ixp_rx_timestamp(port, skb);
skb->protocol = eth_type_trans(skb, dev);
dev->stats.rx_packets++;
dev->stats.rx_bytes += skb->len;
netif_receive_skb(skb);
/* put the new buffer on RX-free queue */
#ifdef __ARMEB__
port->rx_buff_tab[n] = temp;
desc->data = phys + NET_IP_ALIGN;
#endif
desc->buf_len = MAX_MRU;
desc->pkt_len = 0;
queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc);
received++;
}
#if DEBUG_RX
printk(KERN_DEBUG "eth_poll(): end, not all work done\n");
#endif
return received; /* not all work done */
}
static void eth_txdone_irq(void *unused)
{
u32 phys;
#if DEBUG_TX
printk(KERN_DEBUG DRV_NAME ": eth_txdone_irq\n");
#endif
while ((phys = qmgr_get_entry(TXDONE_QUEUE)) != 0) {
u32 npe_id, n_desc;
struct port *port;
struct desc *desc;
int start;
npe_id = phys & 3;
BUG_ON(npe_id >= MAX_NPES);
port = npe_port_tab[npe_id];
BUG_ON(!port);
phys &= ~0x1F; /* mask out non-address bits */
n_desc = (phys - tx_desc_phys(port, 0)) / sizeof(struct desc);
BUG_ON(n_desc >= TX_DESCS);
desc = tx_desc_ptr(port, n_desc);
debug_desc(phys, desc);
if (port->tx_buff_tab[n_desc]) { /* not the draining packet */
port->netdev->stats.tx_packets++;
port->netdev->stats.tx_bytes += desc->pkt_len;
dma_unmap_tx(port, desc);
#if DEBUG_TX
printk(KERN_DEBUG "%s: eth_txdone_irq free %p\n",
port->netdev->name, port->tx_buff_tab[n_desc]);
#endif
free_buffer_irq(port->tx_buff_tab[n_desc]);
port->tx_buff_tab[n_desc] = NULL;
}
start = qmgr_stat_below_low_watermark(port->plat->txreadyq);
queue_put_desc(port->plat->txreadyq, phys, desc);
if (start) { /* TX-ready queue was empty */
#if DEBUG_TX
printk(KERN_DEBUG "%s: eth_txdone_irq xmit ready\n",
port->netdev->name);
#endif
netif_wake_queue(port->netdev);
}
}
}
static int eth_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct port *port = netdev_priv(dev);
unsigned int txreadyq = port->plat->txreadyq;
int len, offset, bytes, n;
void *mem;
u32 phys;
struct desc *desc;
#if DEBUG_TX
printk(KERN_DEBUG "%s: eth_xmit\n", dev->name);
#endif
if (unlikely(skb->len > MAX_MRU)) {
dev_kfree_skb(skb);
dev->stats.tx_errors++;
return NETDEV_TX_OK;
}
debug_pkt(dev, "eth_xmit", skb->data, skb->len);
len = skb->len;
#ifdef __ARMEB__
offset = 0; /* no need to keep alignment */
bytes = len;
mem = skb->data;
#else
offset = (int)skb->data & 3; /* keep 32-bit alignment */
bytes = ALIGN(offset + len, 4);
if (!(mem = kmalloc(bytes, GFP_ATOMIC))) {
dev_kfree_skb(skb);
dev->stats.tx_dropped++;
return NETDEV_TX_OK;
}
memcpy_swab32(mem, (u32 *)((int)skb->data & ~3), bytes / 4);
#endif
phys = dma_map_single(&dev->dev, mem, bytes, DMA_TO_DEVICE);
if (dma_mapping_error(&dev->dev, phys)) {
dev_kfree_skb(skb);
#ifndef __ARMEB__
kfree(mem);
#endif
dev->stats.tx_dropped++;
return NETDEV_TX_OK;
}
n = queue_get_desc(txreadyq, port, 1);
BUG_ON(n < 0);
desc = tx_desc_ptr(port, n);
#ifdef __ARMEB__
port->tx_buff_tab[n] = skb;
#else
port->tx_buff_tab[n] = mem;
#endif
desc->data = phys + offset;
desc->buf_len = desc->pkt_len = len;
/* NPE firmware pads short frames with zeros internally */
wmb();
queue_put_desc(TX_QUEUE(port->id), tx_desc_phys(port, n), desc);
if (qmgr_stat_below_low_watermark(txreadyq)) { /* empty */
#if DEBUG_TX
printk(KERN_DEBUG "%s: eth_xmit queue full\n", dev->name);
#endif
netif_stop_queue(dev);
/* we could miss TX ready interrupt */
/* really empty in fact */
if (!qmgr_stat_below_low_watermark(txreadyq)) {
#if DEBUG_TX
printk(KERN_DEBUG "%s: eth_xmit ready again\n",
dev->name);
#endif
netif_wake_queue(dev);
}
}
#if DEBUG_TX
printk(KERN_DEBUG "%s: eth_xmit end\n", dev->name);
#endif
ixp_tx_timestamp(port, skb);
skb_tx_timestamp(skb);
#ifndef __ARMEB__
dev_kfree_skb(skb);
#endif
return NETDEV_TX_OK;
}
static void eth_set_mcast_list(struct net_device *dev)
{
struct port *port = netdev_priv(dev);
struct netdev_hw_addr *ha;
u8 diffs[ETH_ALEN], *addr;
int i;
static const u8 allmulti[] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
if (dev->flags & IFF_ALLMULTI) {
for (i = 0; i < ETH_ALEN; i++) {
__raw_writel(allmulti[i], &port->regs->mcast_addr[i]);
__raw_writel(allmulti[i], &port->regs->mcast_mask[i]);
}
__raw_writel(DEFAULT_RX_CNTRL0 | RX_CNTRL0_ADDR_FLTR_EN,
&port->regs->rx_control[0]);
return;
}
if ((dev->flags & IFF_PROMISC) || netdev_mc_empty(dev)) {
__raw_writel(DEFAULT_RX_CNTRL0 & ~RX_CNTRL0_ADDR_FLTR_EN,
&port->regs->rx_control[0]);
return;
}
memset(diffs, 0, ETH_ALEN);
addr = NULL;
netdev_for_each_mc_addr(ha, dev) {
if (!addr)
addr = ha->addr; /* first MAC address */
for (i = 0; i < ETH_ALEN; i++)
diffs[i] |= addr[i] ^ ha->addr[i];
}
for (i = 0; i < ETH_ALEN; i++) {
__raw_writel(addr[i], &port->regs->mcast_addr[i]);
__raw_writel(~diffs[i], &port->regs->mcast_mask[i]);
}
__raw_writel(DEFAULT_RX_CNTRL0 | RX_CNTRL0_ADDR_FLTR_EN,
&port->regs->rx_control[0]);
}
static int eth_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
{
struct port *port = netdev_priv(dev);
if (!netif_running(dev))
return -EINVAL;
if (cpu_is_ixp46x() && cmd == SIOCSHWTSTAMP)
return hwtstamp_ioctl(dev, req, cmd);
return phy_mii_ioctl(port->phydev, req, cmd);
}
/* ethtool support */
static void ixp4xx_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
struct port *port = netdev_priv(dev);
strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
snprintf(info->fw_version, sizeof(info->fw_version), "%u:%u:%u:%u",
port->firmware[0], port->firmware[1],
port->firmware[2], port->firmware[3]);
strlcpy(info->bus_info, "internal", sizeof(info->bus_info));
}
static int ixp4xx_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct port *port = netdev_priv(dev);
return phy_ethtool_gset(port->phydev, cmd);
}
static int ixp4xx_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct port *port = netdev_priv(dev);
return phy_ethtool_sset(port->phydev, cmd);
}
static int ixp4xx_nway_reset(struct net_device *dev)
{
struct port *port = netdev_priv(dev);
return phy_start_aneg(port->phydev);
}
int ixp46x_phc_index = -1;
EXPORT_SYMBOL_GPL(ixp46x_phc_index);
static int ixp4xx_get_ts_info(struct net_device *dev,
struct ethtool_ts_info *info)
{
if (!cpu_is_ixp46x()) {
info->so_timestamping =
SOF_TIMESTAMPING_TX_SOFTWARE |
SOF_TIMESTAMPING_RX_SOFTWARE |
SOF_TIMESTAMPING_SOFTWARE;
info->phc_index = -1;
return 0;
}
info->so_timestamping =
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
info->phc_index = ixp46x_phc_index;
info->tx_types =
(1 << HWTSTAMP_TX_OFF) |
(1 << HWTSTAMP_TX_ON);
info->rx_filters =
(1 << HWTSTAMP_FILTER_NONE) |
(1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
(1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ);
return 0;
}
static const struct ethtool_ops ixp4xx_ethtool_ops = {
.get_drvinfo = ixp4xx_get_drvinfo,
.get_settings = ixp4xx_get_settings,
.set_settings = ixp4xx_set_settings,
.nway_reset = ixp4xx_nway_reset,
.get_link = ethtool_op_get_link,
.get_ts_info = ixp4xx_get_ts_info,
};
static int request_queues(struct port *port)
{
int err;
err = qmgr_request_queue(RXFREE_QUEUE(port->id), RX_DESCS, 0, 0,
"%s:RX-free", port->netdev->name);
if (err)
return err;
err = qmgr_request_queue(port->plat->rxq, RX_DESCS, 0, 0,
"%s:RX", port->netdev->name);
if (err)
goto rel_rxfree;
err = qmgr_request_queue(TX_QUEUE(port->id), TX_DESCS, 0, 0,
"%s:TX", port->netdev->name);
if (err)
goto rel_rx;
err = qmgr_request_queue(port->plat->txreadyq, TX_DESCS, 0, 0,
"%s:TX-ready", port->netdev->name);
if (err)
goto rel_tx;
/* TX-done queue handles skbs sent out by the NPEs */
if (!ports_open) {
err = qmgr_request_queue(TXDONE_QUEUE, TXDONE_QUEUE_LEN, 0, 0,
"%s:TX-done", DRV_NAME);
if (err)
goto rel_txready;
}
return 0;
rel_txready:
qmgr_release_queue(port->plat->txreadyq);
rel_tx:
qmgr_release_queue(TX_QUEUE(port->id));
rel_rx:
qmgr_release_queue(port->plat->rxq);
rel_rxfree:
qmgr_release_queue(RXFREE_QUEUE(port->id));
printk(KERN_DEBUG "%s: unable to request hardware queues\n",
port->netdev->name);
return err;
}
static void release_queues(struct port *port)
{
qmgr_release_queue(RXFREE_QUEUE(port->id));
qmgr_release_queue(port->plat->rxq);
qmgr_release_queue(TX_QUEUE(port->id));
qmgr_release_queue(port->plat->txreadyq);
if (!ports_open)
qmgr_release_queue(TXDONE_QUEUE);
}
static int init_queues(struct port *port)
{
int i;
if (!ports_open) {
dma_pool = dma_pool_create(DRV_NAME, &port->netdev->dev,
POOL_ALLOC_SIZE, 32, 0);
if (!dma_pool)
return -ENOMEM;
}
if (!(port->desc_tab = dma_pool_alloc(dma_pool, GFP_KERNEL,
&port->desc_tab_phys)))
return -ENOMEM;
memset(port->desc_tab, 0, POOL_ALLOC_SIZE);
memset(port->rx_buff_tab, 0, sizeof(port->rx_buff_tab)); /* tables */
memset(port->tx_buff_tab, 0, sizeof(port->tx_buff_tab));
/* Setup RX buffers */
for (i = 0; i < RX_DESCS; i++) {
struct desc *desc = rx_desc_ptr(port, i);
buffer_t *buff; /* skb or kmalloc()ated memory */
void *data;
#ifdef __ARMEB__
if (!(buff = netdev_alloc_skb(port->netdev, RX_BUFF_SIZE)))
return -ENOMEM;
data = buff->data;
#else
if (!(buff = kmalloc(RX_BUFF_SIZE, GFP_KERNEL)))
return -ENOMEM;
data = buff;
#endif
desc->buf_len = MAX_MRU;
desc->data = dma_map_single(&port->netdev->dev, data,
RX_BUFF_SIZE, DMA_FROM_DEVICE);
if (dma_mapping_error(&port->netdev->dev, desc->data)) {
free_buffer(buff);
return -EIO;
}
desc->data += NET_IP_ALIGN;
port->rx_buff_tab[i] = buff;
}
return 0;
}
static void destroy_queues(struct port *port)
{
int i;
if (port->desc_tab) {
for (i = 0; i < RX_DESCS; i++) {
struct desc *desc = rx_desc_ptr(port, i);
buffer_t *buff = port->rx_buff_tab[i];
if (buff) {
dma_unmap_single(&port->netdev->dev,
desc->data - NET_IP_ALIGN,
RX_BUFF_SIZE, DMA_FROM_DEVICE);
free_buffer(buff);
}
}
for (i = 0; i < TX_DESCS; i++) {
struct desc *desc = tx_desc_ptr(port, i);
buffer_t *buff = port->tx_buff_tab[i];
if (buff) {
dma_unmap_tx(port, desc);
free_buffer(buff);
}
}
dma_pool_free(dma_pool, port->desc_tab, port->desc_tab_phys);
port->desc_tab = NULL;
}
if (!ports_open && dma_pool) {
dma_pool_destroy(dma_pool);
dma_pool = NULL;
}
}
static int eth_open(struct net_device *dev)
{
struct port *port = netdev_priv(dev);
struct npe *npe = port->npe;
struct msg msg;
int i, err;
if (!npe_running(npe)) {
err = npe_load_firmware(npe, npe_name(npe), &dev->dev);
if (err)
return err;
if (npe_recv_message(npe, &msg, "ETH_GET_STATUS")) {
printk(KERN_ERR "%s: %s not responding\n", dev->name,
npe_name(npe));
return -EIO;
}
port->firmware[0] = msg.byte4;
port->firmware[1] = msg.byte5;
port->firmware[2] = msg.byte6;
port->firmware[3] = msg.byte7;
}
memset(&msg, 0, sizeof(msg));
msg.cmd = NPE_VLAN_SETRXQOSENTRY;
msg.eth_id = port->id;
msg.byte5 = port->plat->rxq | 0x80;
msg.byte7 = port->plat->rxq << 4;
for (i = 0; i < 8; i++) {
msg.byte3 = i;
if (npe_send_recv_message(port->npe, &msg, "ETH_SET_RXQ"))
return -EIO;
}
msg.cmd = NPE_EDB_SETPORTADDRESS;
msg.eth_id = PHYSICAL_ID(port->id);
msg.byte2 = dev->dev_addr[0];
msg.byte3 = dev->dev_addr[1];
msg.byte4 = dev->dev_addr[2];
msg.byte5 = dev->dev_addr[3];
msg.byte6 = dev->dev_addr[4];
msg.byte7 = dev->dev_addr[5];
if (npe_send_recv_message(port->npe, &msg, "ETH_SET_MAC"))
return -EIO;
memset(&msg, 0, sizeof(msg));
msg.cmd = NPE_FW_SETFIREWALLMODE;
msg.eth_id = port->id;
if (npe_send_recv_message(port->npe, &msg, "ETH_SET_FIREWALL_MODE"))
return -EIO;
if ((err = request_queues(port)) != 0)
return err;
if ((err = init_queues(port)) != 0) {
destroy_queues(port);
release_queues(port);
return err;
}
port->speed = 0; /* force "link up" message */
phy_start(port->phydev);
for (i = 0; i < ETH_ALEN; i++)
__raw_writel(dev->dev_addr[i], &port->regs->hw_addr[i]);
__raw_writel(0x08, &port->regs->random_seed);
__raw_writel(0x12, &port->regs->partial_empty_threshold);
__raw_writel(0x30, &port->regs->partial_full_threshold);
__raw_writel(0x08, &port->regs->tx_start_bytes);
__raw_writel(0x15, &port->regs->tx_deferral);
__raw_writel(0x08, &port->regs->tx_2part_deferral[0]);
__raw_writel(0x07, &port->regs->tx_2part_deferral[1]);
__raw_writel(0x80, &port->regs->slot_time);
__raw_writel(0x01, &port->regs->int_clock_threshold);
/* Populate queues with buffers, no failure after this point */
for (i = 0; i < TX_DESCS; i++)
queue_put_desc(port->plat->txreadyq,
tx_desc_phys(port, i), tx_desc_ptr(port, i));
for (i = 0; i < RX_DESCS; i++)
queue_put_desc(RXFREE_QUEUE(port->id),
rx_desc_phys(port, i), rx_desc_ptr(port, i));
__raw_writel(TX_CNTRL1_RETRIES, &port->regs->tx_control[1]);
__raw_writel(DEFAULT_TX_CNTRL0, &port->regs->tx_control[0]);
__raw_writel(0, &port->regs->rx_control[1]);
__raw_writel(DEFAULT_RX_CNTRL0, &port->regs->rx_control[0]);
napi_enable(&port->napi);
eth_set_mcast_list(dev);
netif_start_queue(dev);
qmgr_set_irq(port->plat->rxq, QUEUE_IRQ_SRC_NOT_EMPTY,
eth_rx_irq, dev);
if (!ports_open) {
qmgr_set_irq(TXDONE_QUEUE, QUEUE_IRQ_SRC_NOT_EMPTY,
eth_txdone_irq, NULL);
qmgr_enable_irq(TXDONE_QUEUE);
}
ports_open++;
/* we may already have RX data, enables IRQ */
napi_schedule(&port->napi);
return 0;
}
static int eth_close(struct net_device *dev)
{
struct port *port = netdev_priv(dev);
struct msg msg;
int buffs = RX_DESCS; /* allocated RX buffers */
int i;
ports_open--;
qmgr_disable_irq(port->plat->rxq);
napi_disable(&port->napi);
netif_stop_queue(dev);
while (queue_get_desc(RXFREE_QUEUE(port->id), port, 0) >= 0)
buffs--;
memset(&msg, 0, sizeof(msg));
msg.cmd = NPE_SETLOOPBACK_MODE;
msg.eth_id = port->id;
msg.byte3 = 1;
if (npe_send_recv_message(port->npe, &msg, "ETH_ENABLE_LOOPBACK"))
printk(KERN_CRIT "%s: unable to enable loopback\n", dev->name);
i = 0;
do { /* drain RX buffers */
while (queue_get_desc(port->plat->rxq, port, 0) >= 0)
buffs--;
if (!buffs)
break;
if (qmgr_stat_empty(TX_QUEUE(port->id))) {
/* we have to inject some packet */
struct desc *desc;
u32 phys;
int n = queue_get_desc(port->plat->txreadyq, port, 1);
BUG_ON(n < 0);
desc = tx_desc_ptr(port, n);
phys = tx_desc_phys(port, n);
desc->buf_len = desc->pkt_len = 1;
wmb();
queue_put_desc(TX_QUEUE(port->id), phys, desc);
}
udelay(1);
} while (++i < MAX_CLOSE_WAIT);
if (buffs)
printk(KERN_CRIT "%s: unable to drain RX queue, %i buffer(s)"
" left in NPE\n", dev->name, buffs);
#if DEBUG_CLOSE
if (!buffs)
printk(KERN_DEBUG "Draining RX queue took %i cycles\n", i);
#endif
buffs = TX_DESCS;
while (queue_get_desc(TX_QUEUE(port->id), port, 1) >= 0)
buffs--; /* cancel TX */
i = 0;
do {
while (queue_get_desc(port->plat->txreadyq, port, 1) >= 0)
buffs--;
if (!buffs)
break;
} while (++i < MAX_CLOSE_WAIT);
if (buffs)
printk(KERN_CRIT "%s: unable to drain TX queue, %i buffer(s) "
"left in NPE\n", dev->name, buffs);
#if DEBUG_CLOSE
if (!buffs)
printk(KERN_DEBUG "Draining TX queues took %i cycles\n", i);
#endif
msg.byte3 = 0;
if (npe_send_recv_message(port->npe, &msg, "ETH_DISABLE_LOOPBACK"))
printk(KERN_CRIT "%s: unable to disable loopback\n",
dev->name);
phy_stop(port->phydev);
if (!ports_open)
qmgr_disable_irq(TXDONE_QUEUE);
destroy_queues(port);
release_queues(port);
return 0;
}
static const struct net_device_ops ixp4xx_netdev_ops = {
.ndo_open = eth_open,
.ndo_stop = eth_close,
.ndo_start_xmit = eth_xmit,
.ndo_set_rx_mode = eth_set_mcast_list,
.ndo_do_ioctl = eth_ioctl,
.ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
static int eth_init_one(struct platform_device *pdev)
{
struct port *port;
struct net_device *dev;
struct eth_plat_info *plat = pdev->dev.platform_data;
u32 regs_phys;
char phy_id[MII_BUS_ID_SIZE + 3];
int err;
if (ptp_filter_init(ptp_filter, ARRAY_SIZE(ptp_filter))) {
pr_err("ixp4xx_eth: bad ptp filter\n");
return -EINVAL;
}
if (!(dev = alloc_etherdev(sizeof(struct port))))
return -ENOMEM;
SET_NETDEV_DEV(dev, &pdev->dev);
port = netdev_priv(dev);
port->netdev = dev;
port->id = pdev->id;
switch (port->id) {
case IXP4XX_ETH_NPEA:
port->regs = (struct eth_regs __iomem *)IXP4XX_EthA_BASE_VIRT;
regs_phys = IXP4XX_EthA_BASE_PHYS;
break;
case IXP4XX_ETH_NPEB:
port->regs = (struct eth_regs __iomem *)IXP4XX_EthB_BASE_VIRT;
regs_phys = IXP4XX_EthB_BASE_PHYS;
break;
case IXP4XX_ETH_NPEC:
port->regs = (struct eth_regs __iomem *)IXP4XX_EthC_BASE_VIRT;
regs_phys = IXP4XX_EthC_BASE_PHYS;
break;
default:
err = -ENODEV;
goto err_free;
}
dev->netdev_ops = &ixp4xx_netdev_ops;
dev->ethtool_ops = &ixp4xx_ethtool_ops;
dev->tx_queue_len = 100;
netif_napi_add(dev, &port->napi, eth_poll, NAPI_WEIGHT);
if (!(port->npe = npe_request(NPE_ID(port->id)))) {
err = -EIO;
goto err_free;
}
port->mem_res = request_mem_region(regs_phys, REGS_SIZE, dev->name);
if (!port->mem_res) {
err = -EBUSY;
goto err_npe_rel;
}
port->plat = plat;
npe_port_tab[NPE_ID(port->id)] = port;
memcpy(dev->dev_addr, plat->hwaddr, ETH_ALEN);
platform_set_drvdata(pdev, dev);
__raw_writel(DEFAULT_CORE_CNTRL | CORE_RESET,
&port->regs->core_control);
udelay(50);
__raw_writel(DEFAULT_CORE_CNTRL, &port->regs->core_control);
udelay(50);
snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT,
mdio_bus->id, plat->phy);
port->phydev = phy_connect(dev, phy_id, &ixp4xx_adjust_link,
PHY_INTERFACE_MODE_MII);
if (IS_ERR(port->phydev)) {
err = PTR_ERR(port->phydev);
goto err_free_mem;
}
port->phydev->irq = PHY_POLL;
if ((err = register_netdev(dev)))
goto err_phy_dis;
printk(KERN_INFO "%s: MII PHY %i on %s\n", dev->name, plat->phy,
npe_name(port->npe));
return 0;
err_phy_dis:
phy_disconnect(port->phydev);
err_free_mem:
npe_port_tab[NPE_ID(port->id)] = NULL;
platform_set_drvdata(pdev, NULL);
release_resource(port->mem_res);
err_npe_rel:
npe_release(port->npe);
err_free:
free_netdev(dev);
return err;
}
static int eth_remove_one(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
struct port *port = netdev_priv(dev);
unregister_netdev(dev);
phy_disconnect(port->phydev);
npe_port_tab[NPE_ID(port->id)] = NULL;
platform_set_drvdata(pdev, NULL);
npe_release(port->npe);
release_resource(port->mem_res);
free_netdev(dev);
return 0;
}
static struct platform_driver ixp4xx_eth_driver = {
.driver.name = DRV_NAME,
.probe = eth_init_one,
.remove = eth_remove_one,
};
static int __init eth_init_module(void)
{
int err;
if ((err = ixp4xx_mdio_register()))
return err;
return platform_driver_register(&ixp4xx_eth_driver);
}
static void __exit eth_cleanup_module(void)
{
platform_driver_unregister(&ixp4xx_eth_driver);
ixp4xx_mdio_remove();
}
MODULE_AUTHOR("Krzysztof Halasa");
MODULE_DESCRIPTION("Intel IXP4xx Ethernet driver");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:ixp4xx_eth");
module_init(eth_init_module);
module_exit(eth_cleanup_module);
| gpl-2.0 |
jawad6233/zp998_kernel | net/ipv6/mip6.c | 2348 | 13525 | /*
* Copyright (C)2003-2006 Helsinki University of Technology
* Copyright (C)2003-2006 USAGI/WIDE Project
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/*
* Authors:
* Noriaki TAKAMIYA @USAGI
* Masahide NAKAMURA @USAGI
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/time.h>
#include <linux/ipv6.h>
#include <linux/icmpv6.h>
#include <net/sock.h>
#include <net/ipv6.h>
#include <net/ip6_checksum.h>
#include <net/rawv6.h>
#include <net/xfrm.h>
#include <net/mip6.h>
static inline unsigned int calc_padlen(unsigned int len, unsigned int n)
{
return (n - len + 16) & 0x7;
}
static inline void *mip6_padn(__u8 *data, __u8 padlen)
{
if (!data)
return NULL;
if (padlen == 1) {
data[0] = IPV6_TLV_PAD1;
} else if (padlen > 1) {
data[0] = IPV6_TLV_PADN;
data[1] = padlen - 2;
if (padlen > 2)
memset(data+2, 0, data[1]);
}
return data + padlen;
}
static inline void mip6_param_prob(struct sk_buff *skb, u8 code, int pos)
{
icmpv6_send(skb, ICMPV6_PARAMPROB, code, pos);
}
static int mip6_mh_len(int type)
{
int len = 0;
switch (type) {
case IP6_MH_TYPE_BRR:
len = 0;
break;
case IP6_MH_TYPE_HOTI:
case IP6_MH_TYPE_COTI:
case IP6_MH_TYPE_BU:
case IP6_MH_TYPE_BACK:
len = 1;
break;
case IP6_MH_TYPE_HOT:
case IP6_MH_TYPE_COT:
case IP6_MH_TYPE_BERROR:
len = 2;
break;
}
return len;
}
static int mip6_mh_filter(struct sock *sk, struct sk_buff *skb)
{
struct ip6_mh _hdr;
const struct ip6_mh *mh;
mh = skb_header_pointer(skb, skb_transport_offset(skb),
sizeof(_hdr), &_hdr);
if (!mh)
return -1;
if (((mh->ip6mh_hdrlen + 1) << 3) > skb->len)
return -1;
if (mh->ip6mh_hdrlen < mip6_mh_len(mh->ip6mh_type)) {
LIMIT_NETDEBUG(KERN_DEBUG "mip6: MH message too short: %d vs >=%d\n",
mh->ip6mh_hdrlen, mip6_mh_len(mh->ip6mh_type));
mip6_param_prob(skb, 0, offsetof(struct ip6_mh, ip6mh_hdrlen) +
skb_network_header_len(skb));
return -1;
}
if (mh->ip6mh_proto != IPPROTO_NONE) {
LIMIT_NETDEBUG(KERN_DEBUG "mip6: MH invalid payload proto = %d\n",
mh->ip6mh_proto);
mip6_param_prob(skb, 0, offsetof(struct ip6_mh, ip6mh_proto) +
skb_network_header_len(skb));
return -1;
}
return 0;
}
struct mip6_report_rate_limiter {
spinlock_t lock;
struct timeval stamp;
int iif;
struct in6_addr src;
struct in6_addr dst;
};
static struct mip6_report_rate_limiter mip6_report_rl = {
.lock = __SPIN_LOCK_UNLOCKED(mip6_report_rl.lock)
};
static int mip6_destopt_input(struct xfrm_state *x, struct sk_buff *skb)
{
const struct ipv6hdr *iph = ipv6_hdr(skb);
struct ipv6_destopt_hdr *destopt = (struct ipv6_destopt_hdr *)skb->data;
int err = destopt->nexthdr;
spin_lock(&x->lock);
if (!ipv6_addr_equal(&iph->saddr, (struct in6_addr *)x->coaddr) &&
!ipv6_addr_any((struct in6_addr *)x->coaddr))
err = -ENOENT;
spin_unlock(&x->lock);
return err;
}
/* Destination Option Header is inserted.
* IP Header's src address is replaced with Home Address Option in
* Destination Option Header.
*/
static int mip6_destopt_output(struct xfrm_state *x, struct sk_buff *skb)
{
struct ipv6hdr *iph;
struct ipv6_destopt_hdr *dstopt;
struct ipv6_destopt_hao *hao;
u8 nexthdr;
int len;
skb_push(skb, -skb_network_offset(skb));
iph = ipv6_hdr(skb);
nexthdr = *skb_mac_header(skb);
*skb_mac_header(skb) = IPPROTO_DSTOPTS;
dstopt = (struct ipv6_destopt_hdr *)skb_transport_header(skb);
dstopt->nexthdr = nexthdr;
hao = mip6_padn((char *)(dstopt + 1),
calc_padlen(sizeof(*dstopt), 6));
hao->type = IPV6_TLV_HAO;
BUILD_BUG_ON(sizeof(*hao) != 18);
hao->length = sizeof(*hao) - 2;
len = ((char *)hao - (char *)dstopt) + sizeof(*hao);
memcpy(&hao->addr, &iph->saddr, sizeof(hao->addr));
spin_lock_bh(&x->lock);
memcpy(&iph->saddr, x->coaddr, sizeof(iph->saddr));
spin_unlock_bh(&x->lock);
WARN_ON(len != x->props.header_len);
dstopt->hdrlen = (x->props.header_len >> 3) - 1;
return 0;
}
static inline int mip6_report_rl_allow(struct timeval *stamp,
const struct in6_addr *dst,
const struct in6_addr *src, int iif)
{
int allow = 0;
spin_lock_bh(&mip6_report_rl.lock);
if (mip6_report_rl.stamp.tv_sec != stamp->tv_sec ||
mip6_report_rl.stamp.tv_usec != stamp->tv_usec ||
mip6_report_rl.iif != iif ||
!ipv6_addr_equal(&mip6_report_rl.src, src) ||
!ipv6_addr_equal(&mip6_report_rl.dst, dst)) {
mip6_report_rl.stamp.tv_sec = stamp->tv_sec;
mip6_report_rl.stamp.tv_usec = stamp->tv_usec;
mip6_report_rl.iif = iif;
mip6_report_rl.src = *src;
mip6_report_rl.dst = *dst;
allow = 1;
}
spin_unlock_bh(&mip6_report_rl.lock);
return allow;
}
static int mip6_destopt_reject(struct xfrm_state *x, struct sk_buff *skb,
const struct flowi *fl)
{
struct net *net = xs_net(x);
struct inet6_skb_parm *opt = (struct inet6_skb_parm *)skb->cb;
const struct flowi6 *fl6 = &fl->u.ip6;
struct ipv6_destopt_hao *hao = NULL;
struct xfrm_selector sel;
int offset;
struct timeval stamp;
int err = 0;
if (unlikely(fl6->flowi6_proto == IPPROTO_MH &&
fl6->fl6_mh_type <= IP6_MH_TYPE_MAX))
goto out;
if (likely(opt->dsthao)) {
offset = ipv6_find_tlv(skb, opt->dsthao, IPV6_TLV_HAO);
if (likely(offset >= 0))
hao = (struct ipv6_destopt_hao *)
(skb_network_header(skb) + offset);
}
skb_get_timestamp(skb, &stamp);
if (!mip6_report_rl_allow(&stamp, &ipv6_hdr(skb)->daddr,
hao ? &hao->addr : &ipv6_hdr(skb)->saddr,
opt->iif))
goto out;
memset(&sel, 0, sizeof(sel));
memcpy(&sel.daddr, (xfrm_address_t *)&ipv6_hdr(skb)->daddr,
sizeof(sel.daddr));
sel.prefixlen_d = 128;
memcpy(&sel.saddr, (xfrm_address_t *)&ipv6_hdr(skb)->saddr,
sizeof(sel.saddr));
sel.prefixlen_s = 128;
sel.family = AF_INET6;
sel.proto = fl6->flowi6_proto;
sel.dport = xfrm_flowi_dport(fl, &fl6->uli);
if (sel.dport)
sel.dport_mask = htons(~0);
sel.sport = xfrm_flowi_sport(fl, &fl6->uli);
if (sel.sport)
sel.sport_mask = htons(~0);
sel.ifindex = fl6->flowi6_oif;
err = km_report(net, IPPROTO_DSTOPTS, &sel,
(hao ? (xfrm_address_t *)&hao->addr : NULL));
out:
return err;
}
static int mip6_destopt_offset(struct xfrm_state *x, struct sk_buff *skb,
u8 **nexthdr)
{
u16 offset = sizeof(struct ipv6hdr);
struct ipv6_opt_hdr *exthdr =
(struct ipv6_opt_hdr *)(ipv6_hdr(skb) + 1);
const unsigned char *nh = skb_network_header(skb);
unsigned int packet_len = skb->tail - skb->network_header;
int found_rhdr = 0;
*nexthdr = &ipv6_hdr(skb)->nexthdr;
while (offset + 1 <= packet_len) {
switch (**nexthdr) {
case NEXTHDR_HOP:
break;
case NEXTHDR_ROUTING:
found_rhdr = 1;
break;
case NEXTHDR_DEST:
/*
* HAO MUST NOT appear more than once.
* XXX: It is better to try to find by the end of
* XXX: packet if HAO exists.
*/
if (ipv6_find_tlv(skb, offset, IPV6_TLV_HAO) >= 0) {
LIMIT_NETDEBUG(KERN_WARNING "mip6: hao exists already, override\n");
return offset;
}
if (found_rhdr)
return offset;
break;
default:
return offset;
}
offset += ipv6_optlen(exthdr);
*nexthdr = &exthdr->nexthdr;
exthdr = (struct ipv6_opt_hdr *)(nh + offset);
}
return offset;
}
static int mip6_destopt_init_state(struct xfrm_state *x)
{
if (x->id.spi) {
pr_info("%s: spi is not 0: %u\n", __func__, x->id.spi);
return -EINVAL;
}
if (x->props.mode != XFRM_MODE_ROUTEOPTIMIZATION) {
pr_info("%s: state's mode is not %u: %u\n",
__func__, XFRM_MODE_ROUTEOPTIMIZATION, x->props.mode);
return -EINVAL;
}
x->props.header_len = sizeof(struct ipv6_destopt_hdr) +
calc_padlen(sizeof(struct ipv6_destopt_hdr), 6) +
sizeof(struct ipv6_destopt_hao);
WARN_ON(x->props.header_len != 24);
return 0;
}
/*
* Do nothing about destroying since it has no specific operation for
* destination options header unlike IPsec protocols.
*/
static void mip6_destopt_destroy(struct xfrm_state *x)
{
}
static const struct xfrm_type mip6_destopt_type =
{
.description = "MIP6DESTOPT",
.owner = THIS_MODULE,
.proto = IPPROTO_DSTOPTS,
.flags = XFRM_TYPE_NON_FRAGMENT | XFRM_TYPE_LOCAL_COADDR,
.init_state = mip6_destopt_init_state,
.destructor = mip6_destopt_destroy,
.input = mip6_destopt_input,
.output = mip6_destopt_output,
.reject = mip6_destopt_reject,
.hdr_offset = mip6_destopt_offset,
};
static int mip6_rthdr_input(struct xfrm_state *x, struct sk_buff *skb)
{
const struct ipv6hdr *iph = ipv6_hdr(skb);
struct rt2_hdr *rt2 = (struct rt2_hdr *)skb->data;
int err = rt2->rt_hdr.nexthdr;
spin_lock(&x->lock);
if (!ipv6_addr_equal(&iph->daddr, (struct in6_addr *)x->coaddr) &&
!ipv6_addr_any((struct in6_addr *)x->coaddr))
err = -ENOENT;
spin_unlock(&x->lock);
return err;
}
/* Routing Header type 2 is inserted.
* IP Header's dst address is replaced with Routing Header's Home Address.
*/
static int mip6_rthdr_output(struct xfrm_state *x, struct sk_buff *skb)
{
struct ipv6hdr *iph;
struct rt2_hdr *rt2;
u8 nexthdr;
skb_push(skb, -skb_network_offset(skb));
iph = ipv6_hdr(skb);
nexthdr = *skb_mac_header(skb);
*skb_mac_header(skb) = IPPROTO_ROUTING;
rt2 = (struct rt2_hdr *)skb_transport_header(skb);
rt2->rt_hdr.nexthdr = nexthdr;
rt2->rt_hdr.hdrlen = (x->props.header_len >> 3) - 1;
rt2->rt_hdr.type = IPV6_SRCRT_TYPE_2;
rt2->rt_hdr.segments_left = 1;
memset(&rt2->reserved, 0, sizeof(rt2->reserved));
WARN_ON(rt2->rt_hdr.hdrlen != 2);
memcpy(&rt2->addr, &iph->daddr, sizeof(rt2->addr));
spin_lock_bh(&x->lock);
memcpy(&iph->daddr, x->coaddr, sizeof(iph->daddr));
spin_unlock_bh(&x->lock);
return 0;
}
static int mip6_rthdr_offset(struct xfrm_state *x, struct sk_buff *skb,
u8 **nexthdr)
{
u16 offset = sizeof(struct ipv6hdr);
struct ipv6_opt_hdr *exthdr =
(struct ipv6_opt_hdr *)(ipv6_hdr(skb) + 1);
const unsigned char *nh = skb_network_header(skb);
unsigned int packet_len = skb->tail - skb->network_header;
int found_rhdr = 0;
*nexthdr = &ipv6_hdr(skb)->nexthdr;
while (offset + 1 <= packet_len) {
switch (**nexthdr) {
case NEXTHDR_HOP:
break;
case NEXTHDR_ROUTING:
if (offset + 3 <= packet_len) {
struct ipv6_rt_hdr *rt;
rt = (struct ipv6_rt_hdr *)(nh + offset);
if (rt->type != 0)
return offset;
}
found_rhdr = 1;
break;
case NEXTHDR_DEST:
if (ipv6_find_tlv(skb, offset, IPV6_TLV_HAO) >= 0)
return offset;
if (found_rhdr)
return offset;
break;
default:
return offset;
}
offset += ipv6_optlen(exthdr);
*nexthdr = &exthdr->nexthdr;
exthdr = (struct ipv6_opt_hdr *)(nh + offset);
}
return offset;
}
static int mip6_rthdr_init_state(struct xfrm_state *x)
{
if (x->id.spi) {
pr_info("%s: spi is not 0: %u\n", __func__, x->id.spi);
return -EINVAL;
}
if (x->props.mode != XFRM_MODE_ROUTEOPTIMIZATION) {
pr_info("%s: state's mode is not %u: %u\n",
__func__, XFRM_MODE_ROUTEOPTIMIZATION, x->props.mode);
return -EINVAL;
}
x->props.header_len = sizeof(struct rt2_hdr);
return 0;
}
/*
* Do nothing about destroying since it has no specific operation for routing
* header type 2 unlike IPsec protocols.
*/
static void mip6_rthdr_destroy(struct xfrm_state *x)
{
}
static const struct xfrm_type mip6_rthdr_type =
{
.description = "MIP6RT",
.owner = THIS_MODULE,
.proto = IPPROTO_ROUTING,
.flags = XFRM_TYPE_NON_FRAGMENT | XFRM_TYPE_REMOTE_COADDR,
.init_state = mip6_rthdr_init_state,
.destructor = mip6_rthdr_destroy,
.input = mip6_rthdr_input,
.output = mip6_rthdr_output,
.hdr_offset = mip6_rthdr_offset,
};
static int __init mip6_init(void)
{
pr_info("Mobile IPv6\n");
if (xfrm_register_type(&mip6_destopt_type, AF_INET6) < 0) {
pr_info("%s: can't add xfrm type(destopt)\n", __func__);
goto mip6_destopt_xfrm_fail;
}
if (xfrm_register_type(&mip6_rthdr_type, AF_INET6) < 0) {
pr_info("%s: can't add xfrm type(rthdr)\n", __func__);
goto mip6_rthdr_xfrm_fail;
}
if (rawv6_mh_filter_register(mip6_mh_filter) < 0) {
pr_info("%s: can't add rawv6 mh filter\n", __func__);
goto mip6_rawv6_mh_fail;
}
return 0;
mip6_rawv6_mh_fail:
xfrm_unregister_type(&mip6_rthdr_type, AF_INET6);
mip6_rthdr_xfrm_fail:
xfrm_unregister_type(&mip6_destopt_type, AF_INET6);
mip6_destopt_xfrm_fail:
return -EAGAIN;
}
static void __exit mip6_fini(void)
{
if (rawv6_mh_filter_unregister(mip6_mh_filter) < 0)
pr_info("%s: can't remove rawv6 mh filter\n", __func__);
if (xfrm_unregister_type(&mip6_rthdr_type, AF_INET6) < 0)
pr_info("%s: can't remove xfrm type(rthdr)\n", __func__);
if (xfrm_unregister_type(&mip6_destopt_type, AF_INET6) < 0)
pr_info("%s: can't remove xfrm type(destopt)\n", __func__);
}
module_init(mip6_init);
module_exit(mip6_fini);
MODULE_LICENSE("GPL");
MODULE_ALIAS_XFRM_TYPE(AF_INET6, XFRM_PROTO_DSTOPTS);
MODULE_ALIAS_XFRM_TYPE(AF_INET6, XFRM_PROTO_ROUTING);
| gpl-2.0 |
pedja1/kernel-hammerhead-pedja | drivers/net/ethernet/atheros/atl1c/atl1c_main.c | 2860 | 83522 | /*
* Copyright(c) 2008 - 2009 Atheros Corporation. All rights reserved.
*
* Derived from Intel e1000 driver
* Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 59
* Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#include "atl1c.h"
#define ATL1C_DRV_VERSION "1.0.1.0-NAPI"
char atl1c_driver_name[] = "atl1c";
char atl1c_driver_version[] = ATL1C_DRV_VERSION;
#define PCI_DEVICE_ID_ATTANSIC_L2C 0x1062
#define PCI_DEVICE_ID_ATTANSIC_L1C 0x1063
#define PCI_DEVICE_ID_ATHEROS_L2C_B 0x2060 /* AR8152 v1.1 Fast 10/100 */
#define PCI_DEVICE_ID_ATHEROS_L2C_B2 0x2062 /* AR8152 v2.0 Fast 10/100 */
#define PCI_DEVICE_ID_ATHEROS_L1D 0x1073 /* AR8151 v1.0 Gigabit 1000 */
#define PCI_DEVICE_ID_ATHEROS_L1D_2_0 0x1083 /* AR8151 v2.0 Gigabit 1000 */
#define L2CB_V10 0xc0
#define L2CB_V11 0xc1
/*
* atl1c_pci_tbl - PCI Device ID Table
*
* Wildcard entries (PCI_ANY_ID) should come last
* Last entry must be all 0s
*
* { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
* Class, Class Mask, private data (not used) }
*/
static DEFINE_PCI_DEVICE_TABLE(atl1c_pci_tbl) = {
{PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L1C)},
{PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L2C)},
{PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L2C_B)},
{PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L2C_B2)},
{PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L1D)},
{PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L1D_2_0)},
/* required last entry */
{ 0 }
};
MODULE_DEVICE_TABLE(pci, atl1c_pci_tbl);
MODULE_AUTHOR("Jie Yang <jie.yang@atheros.com>");
MODULE_DESCRIPTION("Atheros 1000M Ethernet Network Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(ATL1C_DRV_VERSION);
static int atl1c_stop_mac(struct atl1c_hw *hw);
static void atl1c_enable_rx_ctrl(struct atl1c_hw *hw);
static void atl1c_enable_tx_ctrl(struct atl1c_hw *hw);
static void atl1c_disable_l0s_l1(struct atl1c_hw *hw);
static void atl1c_set_aspm(struct atl1c_hw *hw, bool linkup);
static void atl1c_setup_mac_ctrl(struct atl1c_adapter *adapter);
static void atl1c_clean_rx_irq(struct atl1c_adapter *adapter, u8 que,
int *work_done, int work_to_do);
static int atl1c_up(struct atl1c_adapter *adapter);
static void atl1c_down(struct atl1c_adapter *adapter);
static const u16 atl1c_pay_load_size[] = {
128, 256, 512, 1024, 2048, 4096,
};
static const u16 atl1c_rfd_prod_idx_regs[AT_MAX_RECEIVE_QUEUE] =
{
REG_MB_RFD0_PROD_IDX,
REG_MB_RFD1_PROD_IDX,
REG_MB_RFD2_PROD_IDX,
REG_MB_RFD3_PROD_IDX
};
static const u16 atl1c_rfd_addr_lo_regs[AT_MAX_RECEIVE_QUEUE] =
{
REG_RFD0_HEAD_ADDR_LO,
REG_RFD1_HEAD_ADDR_LO,
REG_RFD2_HEAD_ADDR_LO,
REG_RFD3_HEAD_ADDR_LO
};
static const u16 atl1c_rrd_addr_lo_regs[AT_MAX_RECEIVE_QUEUE] =
{
REG_RRD0_HEAD_ADDR_LO,
REG_RRD1_HEAD_ADDR_LO,
REG_RRD2_HEAD_ADDR_LO,
REG_RRD3_HEAD_ADDR_LO
};
static const u32 atl1c_default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
NETIF_MSG_LINK | NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP;
static void atl1c_pcie_patch(struct atl1c_hw *hw)
{
u32 data;
AT_READ_REG(hw, REG_PCIE_PHYMISC, &data);
data |= PCIE_PHYMISC_FORCE_RCV_DET;
AT_WRITE_REG(hw, REG_PCIE_PHYMISC, data);
if (hw->nic_type == athr_l2c_b && hw->revision_id == L2CB_V10) {
AT_READ_REG(hw, REG_PCIE_PHYMISC2, &data);
data &= ~(PCIE_PHYMISC2_SERDES_CDR_MASK <<
PCIE_PHYMISC2_SERDES_CDR_SHIFT);
data |= 3 << PCIE_PHYMISC2_SERDES_CDR_SHIFT;
data &= ~(PCIE_PHYMISC2_SERDES_TH_MASK <<
PCIE_PHYMISC2_SERDES_TH_SHIFT);
data |= 3 << PCIE_PHYMISC2_SERDES_TH_SHIFT;
AT_WRITE_REG(hw, REG_PCIE_PHYMISC2, data);
}
}
/* FIXME: no need any more ? */
/*
* atl1c_init_pcie - init PCIE module
*/
static void atl1c_reset_pcie(struct atl1c_hw *hw, u32 flag)
{
u32 data;
u32 pci_cmd;
struct pci_dev *pdev = hw->adapter->pdev;
AT_READ_REG(hw, PCI_COMMAND, &pci_cmd);
pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
pci_cmd |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER |
PCI_COMMAND_IO);
AT_WRITE_REG(hw, PCI_COMMAND, pci_cmd);
/*
* Clear any PowerSaveing Settings
*/
pci_enable_wake(pdev, PCI_D3hot, 0);
pci_enable_wake(pdev, PCI_D3cold, 0);
/*
* Mask some pcie error bits
*/
AT_READ_REG(hw, REG_PCIE_UC_SEVERITY, &data);
data &= ~PCIE_UC_SERVRITY_DLP;
data &= ~PCIE_UC_SERVRITY_FCP;
AT_WRITE_REG(hw, REG_PCIE_UC_SEVERITY, data);
AT_READ_REG(hw, REG_LTSSM_ID_CTRL, &data);
data &= ~LTSSM_ID_EN_WRO;
AT_WRITE_REG(hw, REG_LTSSM_ID_CTRL, data);
atl1c_pcie_patch(hw);
if (flag & ATL1C_PCIE_L0S_L1_DISABLE)
atl1c_disable_l0s_l1(hw);
if (flag & ATL1C_PCIE_PHY_RESET)
AT_WRITE_REG(hw, REG_GPHY_CTRL, GPHY_CTRL_DEFAULT);
else
AT_WRITE_REG(hw, REG_GPHY_CTRL,
GPHY_CTRL_DEFAULT | GPHY_CTRL_EXT_RESET);
msleep(5);
}
/*
* atl1c_irq_enable - Enable default interrupt generation settings
* @adapter: board private structure
*/
static inline void atl1c_irq_enable(struct atl1c_adapter *adapter)
{
if (likely(atomic_dec_and_test(&adapter->irq_sem))) {
AT_WRITE_REG(&adapter->hw, REG_ISR, 0x7FFFFFFF);
AT_WRITE_REG(&adapter->hw, REG_IMR, adapter->hw.intr_mask);
AT_WRITE_FLUSH(&adapter->hw);
}
}
/*
* atl1c_irq_disable - Mask off interrupt generation on the NIC
* @adapter: board private structure
*/
static inline void atl1c_irq_disable(struct atl1c_adapter *adapter)
{
atomic_inc(&adapter->irq_sem);
AT_WRITE_REG(&adapter->hw, REG_IMR, 0);
AT_WRITE_REG(&adapter->hw, REG_ISR, ISR_DIS_INT);
AT_WRITE_FLUSH(&adapter->hw);
synchronize_irq(adapter->pdev->irq);
}
/*
* atl1c_irq_reset - reset interrupt confiure on the NIC
* @adapter: board private structure
*/
static inline void atl1c_irq_reset(struct atl1c_adapter *adapter)
{
atomic_set(&adapter->irq_sem, 1);
atl1c_irq_enable(adapter);
}
/*
* atl1c_wait_until_idle - wait up to AT_HW_MAX_IDLE_DELAY reads
* of the idle status register until the device is actually idle
*/
static u32 atl1c_wait_until_idle(struct atl1c_hw *hw)
{
int timeout;
u32 data;
for (timeout = 0; timeout < AT_HW_MAX_IDLE_DELAY; timeout++) {
AT_READ_REG(hw, REG_IDLE_STATUS, &data);
if ((data & IDLE_STATUS_MASK) == 0)
return 0;
msleep(1);
}
return data;
}
/*
* atl1c_phy_config - Timer Call-back
* @data: pointer to netdev cast into an unsigned long
*/
static void atl1c_phy_config(unsigned long data)
{
struct atl1c_adapter *adapter = (struct atl1c_adapter *) data;
struct atl1c_hw *hw = &adapter->hw;
unsigned long flags;
spin_lock_irqsave(&adapter->mdio_lock, flags);
atl1c_restart_autoneg(hw);
spin_unlock_irqrestore(&adapter->mdio_lock, flags);
}
void atl1c_reinit_locked(struct atl1c_adapter *adapter)
{
WARN_ON(in_interrupt());
atl1c_down(adapter);
atl1c_up(adapter);
clear_bit(__AT_RESETTING, &adapter->flags);
}
static void atl1c_check_link_status(struct atl1c_adapter *adapter)
{
struct atl1c_hw *hw = &adapter->hw;
struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
int err;
unsigned long flags;
u16 speed, duplex, phy_data;
spin_lock_irqsave(&adapter->mdio_lock, flags);
/* MII_BMSR must read twise */
atl1c_read_phy_reg(hw, MII_BMSR, &phy_data);
atl1c_read_phy_reg(hw, MII_BMSR, &phy_data);
spin_unlock_irqrestore(&adapter->mdio_lock, flags);
if ((phy_data & BMSR_LSTATUS) == 0) {
/* link down */
hw->hibernate = true;
if (atl1c_stop_mac(hw) != 0)
if (netif_msg_hw(adapter))
dev_warn(&pdev->dev, "stop mac failed\n");
atl1c_set_aspm(hw, false);
netif_carrier_off(netdev);
netif_stop_queue(netdev);
atl1c_phy_reset(hw);
atl1c_phy_init(&adapter->hw);
} else {
/* Link Up */
hw->hibernate = false;
spin_lock_irqsave(&adapter->mdio_lock, flags);
err = atl1c_get_speed_and_duplex(hw, &speed, &duplex);
spin_unlock_irqrestore(&adapter->mdio_lock, flags);
if (unlikely(err))
return;
/* link result is our setting */
if (adapter->link_speed != speed ||
adapter->link_duplex != duplex) {
adapter->link_speed = speed;
adapter->link_duplex = duplex;
atl1c_set_aspm(hw, true);
atl1c_enable_tx_ctrl(hw);
atl1c_enable_rx_ctrl(hw);
atl1c_setup_mac_ctrl(adapter);
if (netif_msg_link(adapter))
dev_info(&pdev->dev,
"%s: %s NIC Link is Up<%d Mbps %s>\n",
atl1c_driver_name, netdev->name,
adapter->link_speed,
adapter->link_duplex == FULL_DUPLEX ?
"Full Duplex" : "Half Duplex");
}
if (!netif_carrier_ok(netdev))
netif_carrier_on(netdev);
}
}
static void atl1c_link_chg_event(struct atl1c_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
u16 phy_data;
u16 link_up;
spin_lock(&adapter->mdio_lock);
atl1c_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data);
atl1c_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data);
spin_unlock(&adapter->mdio_lock);
link_up = phy_data & BMSR_LSTATUS;
/* notify upper layer link down ASAP */
if (!link_up) {
if (netif_carrier_ok(netdev)) {
/* old link state: Up */
netif_carrier_off(netdev);
if (netif_msg_link(adapter))
dev_info(&pdev->dev,
"%s: %s NIC Link is Down\n",
atl1c_driver_name, netdev->name);
adapter->link_speed = SPEED_0;
}
}
set_bit(ATL1C_WORK_EVENT_LINK_CHANGE, &adapter->work_event);
schedule_work(&adapter->common_task);
}
static void atl1c_common_task(struct work_struct *work)
{
struct atl1c_adapter *adapter;
struct net_device *netdev;
adapter = container_of(work, struct atl1c_adapter, common_task);
netdev = adapter->netdev;
if (test_and_clear_bit(ATL1C_WORK_EVENT_RESET, &adapter->work_event)) {
netif_device_detach(netdev);
atl1c_down(adapter);
atl1c_up(adapter);
netif_device_attach(netdev);
}
if (test_and_clear_bit(ATL1C_WORK_EVENT_LINK_CHANGE,
&adapter->work_event))
atl1c_check_link_status(adapter);
}
static void atl1c_del_timer(struct atl1c_adapter *adapter)
{
del_timer_sync(&adapter->phy_config_timer);
}
/*
* atl1c_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
*/
static void atl1c_tx_timeout(struct net_device *netdev)
{
struct atl1c_adapter *adapter = netdev_priv(netdev);
/* Do the reset outside of interrupt context */
set_bit(ATL1C_WORK_EVENT_RESET, &adapter->work_event);
schedule_work(&adapter->common_task);
}
/*
* atl1c_set_multi - Multicast and Promiscuous mode set
* @netdev: network interface device structure
*
* The set_multi entry point is called whenever the multicast address
* list or the network interface flags are updated. This routine is
* responsible for configuring the hardware for proper multicast,
* promiscuous mode, and all-multi behavior.
*/
static void atl1c_set_multi(struct net_device *netdev)
{
struct atl1c_adapter *adapter = netdev_priv(netdev);
struct atl1c_hw *hw = &adapter->hw;
struct netdev_hw_addr *ha;
u32 mac_ctrl_data;
u32 hash_value;
/* Check for Promiscuous and All Multicast modes */
AT_READ_REG(hw, REG_MAC_CTRL, &mac_ctrl_data);
if (netdev->flags & IFF_PROMISC) {
mac_ctrl_data |= MAC_CTRL_PROMIS_EN;
} else if (netdev->flags & IFF_ALLMULTI) {
mac_ctrl_data |= MAC_CTRL_MC_ALL_EN;
mac_ctrl_data &= ~MAC_CTRL_PROMIS_EN;
} else {
mac_ctrl_data &= ~(MAC_CTRL_PROMIS_EN | MAC_CTRL_MC_ALL_EN);
}
AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl_data);
/* clear the old settings from the multicast hash table */
AT_WRITE_REG(hw, REG_RX_HASH_TABLE, 0);
AT_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0);
/* comoute mc addresses' hash value ,and put it into hash table */
netdev_for_each_mc_addr(ha, netdev) {
hash_value = atl1c_hash_mc_addr(hw, ha->addr);
atl1c_hash_set(hw, hash_value);
}
}
static void __atl1c_vlan_mode(netdev_features_t features, u32 *mac_ctrl_data)
{
if (features & NETIF_F_HW_VLAN_RX) {
/* enable VLAN tag insert/strip */
*mac_ctrl_data |= MAC_CTRL_RMV_VLAN;
} else {
/* disable VLAN tag insert/strip */
*mac_ctrl_data &= ~MAC_CTRL_RMV_VLAN;
}
}
static void atl1c_vlan_mode(struct net_device *netdev,
netdev_features_t features)
{
struct atl1c_adapter *adapter = netdev_priv(netdev);
struct pci_dev *pdev = adapter->pdev;
u32 mac_ctrl_data = 0;
if (netif_msg_pktdata(adapter))
dev_dbg(&pdev->dev, "atl1c_vlan_mode\n");
atl1c_irq_disable(adapter);
AT_READ_REG(&adapter->hw, REG_MAC_CTRL, &mac_ctrl_data);
__atl1c_vlan_mode(features, &mac_ctrl_data);
AT_WRITE_REG(&adapter->hw, REG_MAC_CTRL, mac_ctrl_data);
atl1c_irq_enable(adapter);
}
static void atl1c_restore_vlan(struct atl1c_adapter *adapter)
{
struct pci_dev *pdev = adapter->pdev;
if (netif_msg_pktdata(adapter))
dev_dbg(&pdev->dev, "atl1c_restore_vlan\n");
atl1c_vlan_mode(adapter->netdev, adapter->netdev->features);
}
/*
* atl1c_set_mac - Change the Ethernet Address of the NIC
* @netdev: network interface device structure
* @p: pointer to an address structure
*
* Returns 0 on success, negative on failure
*/
static int atl1c_set_mac_addr(struct net_device *netdev, void *p)
{
struct atl1c_adapter *adapter = netdev_priv(netdev);
struct sockaddr *addr = p;
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
if (netif_running(netdev))
return -EBUSY;
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len);
netdev->addr_assign_type &= ~NET_ADDR_RANDOM;
atl1c_hw_set_mac_addr(&adapter->hw);
return 0;
}
static void atl1c_set_rxbufsize(struct atl1c_adapter *adapter,
struct net_device *dev)
{
int mtu = dev->mtu;
adapter->rx_buffer_len = mtu > AT_RX_BUF_SIZE ?
roundup(mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN, 8) : AT_RX_BUF_SIZE;
}
static netdev_features_t atl1c_fix_features(struct net_device *netdev,
netdev_features_t features)
{
/*
* Since there is no support for separate rx/tx vlan accel
* enable/disable make sure tx flag is always in same state as rx.
*/
if (features & NETIF_F_HW_VLAN_RX)
features |= NETIF_F_HW_VLAN_TX;
else
features &= ~NETIF_F_HW_VLAN_TX;
if (netdev->mtu > MAX_TSO_FRAME_SIZE)
features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
return features;
}
static int atl1c_set_features(struct net_device *netdev,
netdev_features_t features)
{
netdev_features_t changed = netdev->features ^ features;
if (changed & NETIF_F_HW_VLAN_RX)
atl1c_vlan_mode(netdev, features);
return 0;
}
/*
* atl1c_change_mtu - Change the Maximum Transfer Unit
* @netdev: network interface device structure
* @new_mtu: new value for maximum frame size
*
* Returns 0 on success, negative on failure
*/
static int atl1c_change_mtu(struct net_device *netdev, int new_mtu)
{
struct atl1c_adapter *adapter = netdev_priv(netdev);
int old_mtu = netdev->mtu;
int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) ||
(max_frame > MAX_JUMBO_FRAME_SIZE)) {
if (netif_msg_link(adapter))
dev_warn(&adapter->pdev->dev, "invalid MTU setting\n");
return -EINVAL;
}
/* set MTU */
if (old_mtu != new_mtu && netif_running(netdev)) {
while (test_and_set_bit(__AT_RESETTING, &adapter->flags))
msleep(1);
netdev->mtu = new_mtu;
adapter->hw.max_frame_size = new_mtu;
atl1c_set_rxbufsize(adapter, netdev);
atl1c_down(adapter);
netdev_update_features(netdev);
atl1c_up(adapter);
clear_bit(__AT_RESETTING, &adapter->flags);
if (adapter->hw.ctrl_flags & ATL1C_FPGA_VERSION) {
u32 phy_data;
AT_READ_REG(&adapter->hw, 0x1414, &phy_data);
phy_data |= 0x10000000;
AT_WRITE_REG(&adapter->hw, 0x1414, phy_data);
}
}
return 0;
}
/*
* caller should hold mdio_lock
*/
static int atl1c_mdio_read(struct net_device *netdev, int phy_id, int reg_num)
{
struct atl1c_adapter *adapter = netdev_priv(netdev);
u16 result;
atl1c_read_phy_reg(&adapter->hw, reg_num & MDIO_REG_ADDR_MASK, &result);
return result;
}
static void atl1c_mdio_write(struct net_device *netdev, int phy_id,
int reg_num, int val)
{
struct atl1c_adapter *adapter = netdev_priv(netdev);
atl1c_write_phy_reg(&adapter->hw, reg_num & MDIO_REG_ADDR_MASK, val);
}
/*
* atl1c_mii_ioctl -
* @netdev:
* @ifreq:
* @cmd:
*/
static int atl1c_mii_ioctl(struct net_device *netdev,
struct ifreq *ifr, int cmd)
{
struct atl1c_adapter *adapter = netdev_priv(netdev);
struct pci_dev *pdev = adapter->pdev;
struct mii_ioctl_data *data = if_mii(ifr);
unsigned long flags;
int retval = 0;
if (!netif_running(netdev))
return -EINVAL;
spin_lock_irqsave(&adapter->mdio_lock, flags);
switch (cmd) {
case SIOCGMIIPHY:
data->phy_id = 0;
break;
case SIOCGMIIREG:
if (atl1c_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
&data->val_out)) {
retval = -EIO;
goto out;
}
break;
case SIOCSMIIREG:
if (data->reg_num & ~(0x1F)) {
retval = -EFAULT;
goto out;
}
dev_dbg(&pdev->dev, "<atl1c_mii_ioctl> write %x %x",
data->reg_num, data->val_in);
if (atl1c_write_phy_reg(&adapter->hw,
data->reg_num, data->val_in)) {
retval = -EIO;
goto out;
}
break;
default:
retval = -EOPNOTSUPP;
break;
}
out:
spin_unlock_irqrestore(&adapter->mdio_lock, flags);
return retval;
}
/*
* atl1c_ioctl -
* @netdev:
* @ifreq:
* @cmd:
*/
static int atl1c_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
{
switch (cmd) {
case SIOCGMIIPHY:
case SIOCGMIIREG:
case SIOCSMIIREG:
return atl1c_mii_ioctl(netdev, ifr, cmd);
default:
return -EOPNOTSUPP;
}
}
/*
* atl1c_alloc_queues - Allocate memory for all rings
* @adapter: board private structure to initialize
*
*/
static int __devinit atl1c_alloc_queues(struct atl1c_adapter *adapter)
{
return 0;
}
static void atl1c_set_mac_type(struct atl1c_hw *hw)
{
switch (hw->device_id) {
case PCI_DEVICE_ID_ATTANSIC_L2C:
hw->nic_type = athr_l2c;
break;
case PCI_DEVICE_ID_ATTANSIC_L1C:
hw->nic_type = athr_l1c;
break;
case PCI_DEVICE_ID_ATHEROS_L2C_B:
hw->nic_type = athr_l2c_b;
break;
case PCI_DEVICE_ID_ATHEROS_L2C_B2:
hw->nic_type = athr_l2c_b2;
break;
case PCI_DEVICE_ID_ATHEROS_L1D:
hw->nic_type = athr_l1d;
break;
case PCI_DEVICE_ID_ATHEROS_L1D_2_0:
hw->nic_type = athr_l1d_2;
break;
default:
break;
}
}
static int atl1c_setup_mac_funcs(struct atl1c_hw *hw)
{
u32 phy_status_data;
u32 link_ctrl_data;
atl1c_set_mac_type(hw);
AT_READ_REG(hw, REG_PHY_STATUS, &phy_status_data);
AT_READ_REG(hw, REG_LINK_CTRL, &link_ctrl_data);
hw->ctrl_flags = ATL1C_INTR_MODRT_ENABLE |
ATL1C_TXQ_MODE_ENHANCE;
if (link_ctrl_data & LINK_CTRL_L0S_EN)
hw->ctrl_flags |= ATL1C_ASPM_L0S_SUPPORT;
if (link_ctrl_data & LINK_CTRL_L1_EN)
hw->ctrl_flags |= ATL1C_ASPM_L1_SUPPORT;
if (link_ctrl_data & LINK_CTRL_EXT_SYNC)
hw->ctrl_flags |= ATL1C_LINK_EXT_SYNC;
hw->ctrl_flags |= ATL1C_ASPM_CTRL_MON;
if (hw->nic_type == athr_l1c ||
hw->nic_type == athr_l1d ||
hw->nic_type == athr_l1d_2)
hw->link_cap_flags |= ATL1C_LINK_CAP_1000M;
return 0;
}
/*
* atl1c_sw_init - Initialize general software structures (struct atl1c_adapter)
* @adapter: board private structure to initialize
*
* atl1c_sw_init initializes the Adapter private data structure.
* Fields are initialized based on PCI device information and
* OS network device settings (MTU size).
*/
static int __devinit atl1c_sw_init(struct atl1c_adapter *adapter)
{
struct atl1c_hw *hw = &adapter->hw;
struct pci_dev *pdev = adapter->pdev;
u32 revision;
adapter->wol = 0;
device_set_wakeup_enable(&pdev->dev, false);
adapter->link_speed = SPEED_0;
adapter->link_duplex = FULL_DUPLEX;
adapter->num_rx_queues = AT_DEF_RECEIVE_QUEUE;
adapter->tpd_ring[0].count = 1024;
adapter->rfd_ring[0].count = 512;
hw->vendor_id = pdev->vendor;
hw->device_id = pdev->device;
hw->subsystem_vendor_id = pdev->subsystem_vendor;
hw->subsystem_id = pdev->subsystem_device;
AT_READ_REG(hw, PCI_CLASS_REVISION, &revision);
hw->revision_id = revision & 0xFF;
/* before link up, we assume hibernate is true */
hw->hibernate = true;
hw->media_type = MEDIA_TYPE_AUTO_SENSOR;
if (atl1c_setup_mac_funcs(hw) != 0) {
dev_err(&pdev->dev, "set mac function pointers failed\n");
return -1;
}
hw->intr_mask = IMR_NORMAL_MASK;
hw->phy_configured = false;
hw->preamble_len = 7;
hw->max_frame_size = adapter->netdev->mtu;
if (adapter->num_rx_queues < 2) {
hw->rss_type = atl1c_rss_disable;
hw->rss_mode = atl1c_rss_mode_disable;
} else {
hw->rss_type = atl1c_rss_ipv4;
hw->rss_mode = atl1c_rss_mul_que_mul_int;
hw->rss_hash_bits = 16;
}
hw->autoneg_advertised = ADVERTISED_Autoneg;
hw->indirect_tab = 0xE4E4E4E4;
hw->base_cpu = 0;
hw->ict = 50000; /* 100ms */
hw->smb_timer = 200000; /* 400ms */
hw->cmb_tpd = 4;
hw->cmb_tx_timer = 1; /* 2 us */
hw->rx_imt = 200;
hw->tx_imt = 1000;
hw->tpd_burst = 5;
hw->rfd_burst = 8;
hw->dma_order = atl1c_dma_ord_out;
hw->dmar_block = atl1c_dma_req_1024;
hw->dmaw_block = atl1c_dma_req_1024;
hw->dmar_dly_cnt = 15;
hw->dmaw_dly_cnt = 4;
if (atl1c_alloc_queues(adapter)) {
dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
return -ENOMEM;
}
/* TODO */
atl1c_set_rxbufsize(adapter, adapter->netdev);
atomic_set(&adapter->irq_sem, 1);
spin_lock_init(&adapter->mdio_lock);
spin_lock_init(&adapter->tx_lock);
set_bit(__AT_DOWN, &adapter->flags);
return 0;
}
static inline void atl1c_clean_buffer(struct pci_dev *pdev,
struct atl1c_buffer *buffer_info, int in_irq)
{
u16 pci_driection;
if (buffer_info->flags & ATL1C_BUFFER_FREE)
return;
if (buffer_info->dma) {
if (buffer_info->flags & ATL1C_PCIMAP_FROMDEVICE)
pci_driection = PCI_DMA_FROMDEVICE;
else
pci_driection = PCI_DMA_TODEVICE;
if (buffer_info->flags & ATL1C_PCIMAP_SINGLE)
pci_unmap_single(pdev, buffer_info->dma,
buffer_info->length, pci_driection);
else if (buffer_info->flags & ATL1C_PCIMAP_PAGE)
pci_unmap_page(pdev, buffer_info->dma,
buffer_info->length, pci_driection);
}
if (buffer_info->skb) {
if (in_irq)
dev_kfree_skb_irq(buffer_info->skb);
else
dev_kfree_skb(buffer_info->skb);
}
buffer_info->dma = 0;
buffer_info->skb = NULL;
ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_FREE);
}
/*
* atl1c_clean_tx_ring - Free Tx-skb
* @adapter: board private structure
*/
static void atl1c_clean_tx_ring(struct atl1c_adapter *adapter,
enum atl1c_trans_queue type)
{
struct atl1c_tpd_ring *tpd_ring = &adapter->tpd_ring[type];
struct atl1c_buffer *buffer_info;
struct pci_dev *pdev = adapter->pdev;
u16 index, ring_count;
ring_count = tpd_ring->count;
for (index = 0; index < ring_count; index++) {
buffer_info = &tpd_ring->buffer_info[index];
atl1c_clean_buffer(pdev, buffer_info, 0);
}
/* Zero out Tx-buffers */
memset(tpd_ring->desc, 0, sizeof(struct atl1c_tpd_desc) *
ring_count);
atomic_set(&tpd_ring->next_to_clean, 0);
tpd_ring->next_to_use = 0;
}
/*
* atl1c_clean_rx_ring - Free rx-reservation skbs
* @adapter: board private structure
*/
static void atl1c_clean_rx_ring(struct atl1c_adapter *adapter)
{
struct atl1c_rfd_ring *rfd_ring = adapter->rfd_ring;
struct atl1c_rrd_ring *rrd_ring = adapter->rrd_ring;
struct atl1c_buffer *buffer_info;
struct pci_dev *pdev = adapter->pdev;
int i, j;
for (i = 0; i < adapter->num_rx_queues; i++) {
for (j = 0; j < rfd_ring[i].count; j++) {
buffer_info = &rfd_ring[i].buffer_info[j];
atl1c_clean_buffer(pdev, buffer_info, 0);
}
/* zero out the descriptor ring */
memset(rfd_ring[i].desc, 0, rfd_ring[i].size);
rfd_ring[i].next_to_clean = 0;
rfd_ring[i].next_to_use = 0;
rrd_ring[i].next_to_use = 0;
rrd_ring[i].next_to_clean = 0;
}
}
/*
* Read / Write Ptr Initialize:
*/
static void atl1c_init_ring_ptrs(struct atl1c_adapter *adapter)
{
struct atl1c_tpd_ring *tpd_ring = adapter->tpd_ring;
struct atl1c_rfd_ring *rfd_ring = adapter->rfd_ring;
struct atl1c_rrd_ring *rrd_ring = adapter->rrd_ring;
struct atl1c_buffer *buffer_info;
int i, j;
for (i = 0; i < AT_MAX_TRANSMIT_QUEUE; i++) {
tpd_ring[i].next_to_use = 0;
atomic_set(&tpd_ring[i].next_to_clean, 0);
buffer_info = tpd_ring[i].buffer_info;
for (j = 0; j < tpd_ring->count; j++)
ATL1C_SET_BUFFER_STATE(&buffer_info[i],
ATL1C_BUFFER_FREE);
}
for (i = 0; i < adapter->num_rx_queues; i++) {
rfd_ring[i].next_to_use = 0;
rfd_ring[i].next_to_clean = 0;
rrd_ring[i].next_to_use = 0;
rrd_ring[i].next_to_clean = 0;
for (j = 0; j < rfd_ring[i].count; j++) {
buffer_info = &rfd_ring[i].buffer_info[j];
ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_FREE);
}
}
}
/*
* atl1c_free_ring_resources - Free Tx / RX descriptor Resources
* @adapter: board private structure
*
* Free all transmit software resources
*/
static void atl1c_free_ring_resources(struct atl1c_adapter *adapter)
{
struct pci_dev *pdev = adapter->pdev;
pci_free_consistent(pdev, adapter->ring_header.size,
adapter->ring_header.desc,
adapter->ring_header.dma);
adapter->ring_header.desc = NULL;
/* Note: just free tdp_ring.buffer_info,
* it contain rfd_ring.buffer_info, do not double free */
if (adapter->tpd_ring[0].buffer_info) {
kfree(adapter->tpd_ring[0].buffer_info);
adapter->tpd_ring[0].buffer_info = NULL;
}
}
/*
* atl1c_setup_mem_resources - allocate Tx / RX descriptor resources
* @adapter: board private structure
*
* Return 0 on success, negative on failure
*/
static int atl1c_setup_ring_resources(struct atl1c_adapter *adapter)
{
struct pci_dev *pdev = adapter->pdev;
struct atl1c_tpd_ring *tpd_ring = adapter->tpd_ring;
struct atl1c_rfd_ring *rfd_ring = adapter->rfd_ring;
struct atl1c_rrd_ring *rrd_ring = adapter->rrd_ring;
struct atl1c_ring_header *ring_header = &adapter->ring_header;
int num_rx_queues = adapter->num_rx_queues;
int size;
int i;
int count = 0;
int rx_desc_count = 0;
u32 offset = 0;
rrd_ring[0].count = rfd_ring[0].count;
for (i = 1; i < AT_MAX_TRANSMIT_QUEUE; i++)
tpd_ring[i].count = tpd_ring[0].count;
for (i = 1; i < adapter->num_rx_queues; i++)
rfd_ring[i].count = rrd_ring[i].count = rfd_ring[0].count;
/* 2 tpd queue, one high priority queue,
* another normal priority queue */
size = sizeof(struct atl1c_buffer) * (tpd_ring->count * 2 +
rfd_ring->count * num_rx_queues);
tpd_ring->buffer_info = kzalloc(size, GFP_KERNEL);
if (unlikely(!tpd_ring->buffer_info)) {
dev_err(&pdev->dev, "kzalloc failed, size = %d\n",
size);
goto err_nomem;
}
for (i = 0; i < AT_MAX_TRANSMIT_QUEUE; i++) {
tpd_ring[i].buffer_info =
(struct atl1c_buffer *) (tpd_ring->buffer_info + count);
count += tpd_ring[i].count;
}
for (i = 0; i < num_rx_queues; i++) {
rfd_ring[i].buffer_info =
(struct atl1c_buffer *) (tpd_ring->buffer_info + count);
count += rfd_ring[i].count;
rx_desc_count += rfd_ring[i].count;
}
/*
* real ring DMA buffer
* each ring/block may need up to 8 bytes for alignment, hence the
* additional bytes tacked onto the end.
*/
ring_header->size = size =
sizeof(struct atl1c_tpd_desc) * tpd_ring->count * 2 +
sizeof(struct atl1c_rx_free_desc) * rx_desc_count +
sizeof(struct atl1c_recv_ret_status) * rx_desc_count +
sizeof(struct atl1c_hw_stats) +
8 * 4 + 8 * 2 * num_rx_queues;
ring_header->desc = pci_alloc_consistent(pdev, ring_header->size,
&ring_header->dma);
if (unlikely(!ring_header->desc)) {
dev_err(&pdev->dev, "pci_alloc_consistend failed\n");
goto err_nomem;
}
memset(ring_header->desc, 0, ring_header->size);
/* init TPD ring */
tpd_ring[0].dma = roundup(ring_header->dma, 8);
offset = tpd_ring[0].dma - ring_header->dma;
for (i = 0; i < AT_MAX_TRANSMIT_QUEUE; i++) {
tpd_ring[i].dma = ring_header->dma + offset;
tpd_ring[i].desc = (u8 *) ring_header->desc + offset;
tpd_ring[i].size =
sizeof(struct atl1c_tpd_desc) * tpd_ring[i].count;
offset += roundup(tpd_ring[i].size, 8);
}
/* init RFD ring */
for (i = 0; i < num_rx_queues; i++) {
rfd_ring[i].dma = ring_header->dma + offset;
rfd_ring[i].desc = (u8 *) ring_header->desc + offset;
rfd_ring[i].size = sizeof(struct atl1c_rx_free_desc) *
rfd_ring[i].count;
offset += roundup(rfd_ring[i].size, 8);
}
/* init RRD ring */
for (i = 0; i < num_rx_queues; i++) {
rrd_ring[i].dma = ring_header->dma + offset;
rrd_ring[i].desc = (u8 *) ring_header->desc + offset;
rrd_ring[i].size = sizeof(struct atl1c_recv_ret_status) *
rrd_ring[i].count;
offset += roundup(rrd_ring[i].size, 8);
}
adapter->smb.dma = ring_header->dma + offset;
adapter->smb.smb = (u8 *)ring_header->desc + offset;
return 0;
err_nomem:
kfree(tpd_ring->buffer_info);
return -ENOMEM;
}
static void atl1c_configure_des_ring(struct atl1c_adapter *adapter)
{
struct atl1c_hw *hw = &adapter->hw;
struct atl1c_rfd_ring *rfd_ring = (struct atl1c_rfd_ring *)
adapter->rfd_ring;
struct atl1c_rrd_ring *rrd_ring = (struct atl1c_rrd_ring *)
adapter->rrd_ring;
struct atl1c_tpd_ring *tpd_ring = (struct atl1c_tpd_ring *)
adapter->tpd_ring;
struct atl1c_cmb *cmb = (struct atl1c_cmb *) &adapter->cmb;
struct atl1c_smb *smb = (struct atl1c_smb *) &adapter->smb;
int i;
u32 data;
/* TPD */
AT_WRITE_REG(hw, REG_TX_BASE_ADDR_HI,
(u32)((tpd_ring[atl1c_trans_normal].dma &
AT_DMA_HI_ADDR_MASK) >> 32));
/* just enable normal priority TX queue */
AT_WRITE_REG(hw, REG_NTPD_HEAD_ADDR_LO,
(u32)(tpd_ring[atl1c_trans_normal].dma &
AT_DMA_LO_ADDR_MASK));
AT_WRITE_REG(hw, REG_HTPD_HEAD_ADDR_LO,
(u32)(tpd_ring[atl1c_trans_high].dma &
AT_DMA_LO_ADDR_MASK));
AT_WRITE_REG(hw, REG_TPD_RING_SIZE,
(u32)(tpd_ring[0].count & TPD_RING_SIZE_MASK));
/* RFD */
AT_WRITE_REG(hw, REG_RX_BASE_ADDR_HI,
(u32)((rfd_ring[0].dma & AT_DMA_HI_ADDR_MASK) >> 32));
for (i = 0; i < adapter->num_rx_queues; i++)
AT_WRITE_REG(hw, atl1c_rfd_addr_lo_regs[i],
(u32)(rfd_ring[i].dma & AT_DMA_LO_ADDR_MASK));
AT_WRITE_REG(hw, REG_RFD_RING_SIZE,
rfd_ring[0].count & RFD_RING_SIZE_MASK);
AT_WRITE_REG(hw, REG_RX_BUF_SIZE,
adapter->rx_buffer_len & RX_BUF_SIZE_MASK);
/* RRD */
for (i = 0; i < adapter->num_rx_queues; i++)
AT_WRITE_REG(hw, atl1c_rrd_addr_lo_regs[i],
(u32)(rrd_ring[i].dma & AT_DMA_LO_ADDR_MASK));
AT_WRITE_REG(hw, REG_RRD_RING_SIZE,
(rrd_ring[0].count & RRD_RING_SIZE_MASK));
/* CMB */
AT_WRITE_REG(hw, REG_CMB_BASE_ADDR_LO, cmb->dma & AT_DMA_LO_ADDR_MASK);
/* SMB */
AT_WRITE_REG(hw, REG_SMB_BASE_ADDR_HI,
(u32)((smb->dma & AT_DMA_HI_ADDR_MASK) >> 32));
AT_WRITE_REG(hw, REG_SMB_BASE_ADDR_LO,
(u32)(smb->dma & AT_DMA_LO_ADDR_MASK));
if (hw->nic_type == athr_l2c_b) {
AT_WRITE_REG(hw, REG_SRAM_RXF_LEN, 0x02a0L);
AT_WRITE_REG(hw, REG_SRAM_TXF_LEN, 0x0100L);
AT_WRITE_REG(hw, REG_SRAM_RXF_ADDR, 0x029f0000L);
AT_WRITE_REG(hw, REG_SRAM_RFD0_INFO, 0x02bf02a0L);
AT_WRITE_REG(hw, REG_SRAM_TXF_ADDR, 0x03bf02c0L);
AT_WRITE_REG(hw, REG_SRAM_TRD_ADDR, 0x03df03c0L);
AT_WRITE_REG(hw, REG_TXF_WATER_MARK, 0); /* TX watermark, to enter l1 state.*/
AT_WRITE_REG(hw, REG_RXD_DMA_CTRL, 0); /* RXD threshold.*/
}
if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l1d_2) {
/* Power Saving for L2c_B */
AT_READ_REG(hw, REG_SERDES_LOCK, &data);
data |= SERDES_MAC_CLK_SLOWDOWN;
data |= SERDES_PYH_CLK_SLOWDOWN;
AT_WRITE_REG(hw, REG_SERDES_LOCK, data);
}
/* Load all of base address above */
AT_WRITE_REG(hw, REG_LOAD_PTR, 1);
}
static void atl1c_configure_tx(struct atl1c_adapter *adapter)
{
struct atl1c_hw *hw = &adapter->hw;
u32 dev_ctrl_data;
u32 max_pay_load;
u16 tx_offload_thresh;
u32 txq_ctrl_data;
u32 max_pay_load_data;
tx_offload_thresh = MAX_TX_OFFLOAD_THRESH;
AT_WRITE_REG(hw, REG_TX_TSO_OFFLOAD_THRESH,
(tx_offload_thresh >> 3) & TX_TSO_OFFLOAD_THRESH_MASK);
AT_READ_REG(hw, REG_DEVICE_CTRL, &dev_ctrl_data);
max_pay_load = (dev_ctrl_data >> DEVICE_CTRL_MAX_PAYLOAD_SHIFT) &
DEVICE_CTRL_MAX_PAYLOAD_MASK;
hw->dmaw_block = min_t(u32, max_pay_load, hw->dmaw_block);
max_pay_load = (dev_ctrl_data >> DEVICE_CTRL_MAX_RREQ_SZ_SHIFT) &
DEVICE_CTRL_MAX_RREQ_SZ_MASK;
hw->dmar_block = min_t(u32, max_pay_load, hw->dmar_block);
txq_ctrl_data = (hw->tpd_burst & TXQ_NUM_TPD_BURST_MASK) <<
TXQ_NUM_TPD_BURST_SHIFT;
if (hw->ctrl_flags & ATL1C_TXQ_MODE_ENHANCE)
txq_ctrl_data |= TXQ_CTRL_ENH_MODE;
max_pay_load_data = (atl1c_pay_load_size[hw->dmar_block] &
TXQ_TXF_BURST_NUM_MASK) << TXQ_TXF_BURST_NUM_SHIFT;
if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l2c_b2)
max_pay_load_data >>= 1;
txq_ctrl_data |= max_pay_load_data;
AT_WRITE_REG(hw, REG_TXQ_CTRL, txq_ctrl_data);
}
static void atl1c_configure_rx(struct atl1c_adapter *adapter)
{
struct atl1c_hw *hw = &adapter->hw;
u32 rxq_ctrl_data;
rxq_ctrl_data = (hw->rfd_burst & RXQ_RFD_BURST_NUM_MASK) <<
RXQ_RFD_BURST_NUM_SHIFT;
if (hw->ctrl_flags & ATL1C_RX_IPV6_CHKSUM)
rxq_ctrl_data |= IPV6_CHKSUM_CTRL_EN;
if (hw->rss_type == atl1c_rss_ipv4)
rxq_ctrl_data |= RSS_HASH_IPV4;
if (hw->rss_type == atl1c_rss_ipv4_tcp)
rxq_ctrl_data |= RSS_HASH_IPV4_TCP;
if (hw->rss_type == atl1c_rss_ipv6)
rxq_ctrl_data |= RSS_HASH_IPV6;
if (hw->rss_type == atl1c_rss_ipv6_tcp)
rxq_ctrl_data |= RSS_HASH_IPV6_TCP;
if (hw->rss_type != atl1c_rss_disable)
rxq_ctrl_data |= RRS_HASH_CTRL_EN;
rxq_ctrl_data |= (hw->rss_mode & RSS_MODE_MASK) <<
RSS_MODE_SHIFT;
rxq_ctrl_data |= (hw->rss_hash_bits & RSS_HASH_BITS_MASK) <<
RSS_HASH_BITS_SHIFT;
if (hw->ctrl_flags & ATL1C_ASPM_CTRL_MON)
rxq_ctrl_data |= (ASPM_THRUPUT_LIMIT_1M &
ASPM_THRUPUT_LIMIT_MASK) << ASPM_THRUPUT_LIMIT_SHIFT;
AT_WRITE_REG(hw, REG_RXQ_CTRL, rxq_ctrl_data);
}
static void atl1c_configure_rss(struct atl1c_adapter *adapter)
{
struct atl1c_hw *hw = &adapter->hw;
AT_WRITE_REG(hw, REG_IDT_TABLE, hw->indirect_tab);
AT_WRITE_REG(hw, REG_BASE_CPU_NUMBER, hw->base_cpu);
}
static void atl1c_configure_dma(struct atl1c_adapter *adapter)
{
struct atl1c_hw *hw = &adapter->hw;
u32 dma_ctrl_data;
dma_ctrl_data = DMA_CTRL_DMAR_REQ_PRI;
if (hw->ctrl_flags & ATL1C_CMB_ENABLE)
dma_ctrl_data |= DMA_CTRL_CMB_EN;
if (hw->ctrl_flags & ATL1C_SMB_ENABLE)
dma_ctrl_data |= DMA_CTRL_SMB_EN;
else
dma_ctrl_data |= MAC_CTRL_SMB_DIS;
switch (hw->dma_order) {
case atl1c_dma_ord_in:
dma_ctrl_data |= DMA_CTRL_DMAR_IN_ORDER;
break;
case atl1c_dma_ord_enh:
dma_ctrl_data |= DMA_CTRL_DMAR_ENH_ORDER;
break;
case atl1c_dma_ord_out:
dma_ctrl_data |= DMA_CTRL_DMAR_OUT_ORDER;
break;
default:
break;
}
dma_ctrl_data |= (((u32)hw->dmar_block) & DMA_CTRL_DMAR_BURST_LEN_MASK)
<< DMA_CTRL_DMAR_BURST_LEN_SHIFT;
dma_ctrl_data |= (((u32)hw->dmaw_block) & DMA_CTRL_DMAW_BURST_LEN_MASK)
<< DMA_CTRL_DMAW_BURST_LEN_SHIFT;
dma_ctrl_data |= (((u32)hw->dmar_dly_cnt) & DMA_CTRL_DMAR_DLY_CNT_MASK)
<< DMA_CTRL_DMAR_DLY_CNT_SHIFT;
dma_ctrl_data |= (((u32)hw->dmaw_dly_cnt) & DMA_CTRL_DMAW_DLY_CNT_MASK)
<< DMA_CTRL_DMAW_DLY_CNT_SHIFT;
AT_WRITE_REG(hw, REG_DMA_CTRL, dma_ctrl_data);
}
/*
* Stop the mac, transmit and receive units
* hw - Struct containing variables accessed by shared code
* return : 0 or idle status (if error)
*/
static int atl1c_stop_mac(struct atl1c_hw *hw)
{
u32 data;
AT_READ_REG(hw, REG_RXQ_CTRL, &data);
data &= ~(RXQ1_CTRL_EN | RXQ2_CTRL_EN |
RXQ3_CTRL_EN | RXQ_CTRL_EN);
AT_WRITE_REG(hw, REG_RXQ_CTRL, data);
AT_READ_REG(hw, REG_TXQ_CTRL, &data);
data &= ~TXQ_CTRL_EN;
AT_WRITE_REG(hw, REG_TWSI_CTRL, data);
atl1c_wait_until_idle(hw);
AT_READ_REG(hw, REG_MAC_CTRL, &data);
data &= ~(MAC_CTRL_TX_EN | MAC_CTRL_RX_EN);
AT_WRITE_REG(hw, REG_MAC_CTRL, data);
return (int)atl1c_wait_until_idle(hw);
}
static void atl1c_enable_rx_ctrl(struct atl1c_hw *hw)
{
u32 data;
AT_READ_REG(hw, REG_RXQ_CTRL, &data);
switch (hw->adapter->num_rx_queues) {
case 4:
data |= (RXQ3_CTRL_EN | RXQ2_CTRL_EN | RXQ1_CTRL_EN);
break;
case 3:
data |= (RXQ2_CTRL_EN | RXQ1_CTRL_EN);
break;
case 2:
data |= RXQ1_CTRL_EN;
break;
default:
break;
}
data |= RXQ_CTRL_EN;
AT_WRITE_REG(hw, REG_RXQ_CTRL, data);
}
static void atl1c_enable_tx_ctrl(struct atl1c_hw *hw)
{
u32 data;
AT_READ_REG(hw, REG_TXQ_CTRL, &data);
data |= TXQ_CTRL_EN;
AT_WRITE_REG(hw, REG_TXQ_CTRL, data);
}
/*
* Reset the transmit and receive units; mask and clear all interrupts.
* hw - Struct containing variables accessed by shared code
* return : 0 or idle status (if error)
*/
static int atl1c_reset_mac(struct atl1c_hw *hw)
{
struct atl1c_adapter *adapter = (struct atl1c_adapter *)hw->adapter;
struct pci_dev *pdev = adapter->pdev;
u32 master_ctrl_data = 0;
AT_WRITE_REG(hw, REG_IMR, 0);
AT_WRITE_REG(hw, REG_ISR, ISR_DIS_INT);
atl1c_stop_mac(hw);
/*
* Issue Soft Reset to the MAC. This will reset the chip's
* transmit, receive, DMA. It will not effect
* the current PCI configuration. The global reset bit is self-
* clearing, and should clear within a microsecond.
*/
AT_READ_REG(hw, REG_MASTER_CTRL, &master_ctrl_data);
master_ctrl_data |= MASTER_CTRL_OOB_DIS_OFF;
AT_WRITE_REGW(hw, REG_MASTER_CTRL, ((master_ctrl_data | MASTER_CTRL_SOFT_RST)
& 0xFFFF));
AT_WRITE_FLUSH(hw);
msleep(10);
/* Wait at least 10ms for All module to be Idle */
if (atl1c_wait_until_idle(hw)) {
dev_err(&pdev->dev,
"MAC state machine can't be idle since"
" disabled for 10ms second\n");
return -1;
}
return 0;
}
static void atl1c_disable_l0s_l1(struct atl1c_hw *hw)
{
u32 pm_ctrl_data;
AT_READ_REG(hw, REG_PM_CTRL, &pm_ctrl_data);
pm_ctrl_data &= ~(PM_CTRL_L1_ENTRY_TIMER_MASK <<
PM_CTRL_L1_ENTRY_TIMER_SHIFT);
pm_ctrl_data &= ~PM_CTRL_CLK_SWH_L1;
pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN;
pm_ctrl_data &= ~PM_CTRL_ASPM_L1_EN;
pm_ctrl_data &= ~PM_CTRL_MAC_ASPM_CHK;
pm_ctrl_data &= ~PM_CTRL_SERDES_PD_EX_L1;
pm_ctrl_data |= PM_CTRL_SERDES_BUDS_RX_L1_EN;
pm_ctrl_data |= PM_CTRL_SERDES_PLL_L1_EN;
pm_ctrl_data |= PM_CTRL_SERDES_L1_EN;
AT_WRITE_REG(hw, REG_PM_CTRL, pm_ctrl_data);
}
/*
* Set ASPM state.
* Enable/disable L0s/L1 depend on link state.
*/
static void atl1c_set_aspm(struct atl1c_hw *hw, bool linkup)
{
u32 pm_ctrl_data;
u32 link_ctrl_data;
u32 link_l1_timer = 0xF;
AT_READ_REG(hw, REG_PM_CTRL, &pm_ctrl_data);
AT_READ_REG(hw, REG_LINK_CTRL, &link_ctrl_data);
pm_ctrl_data &= ~PM_CTRL_SERDES_PD_EX_L1;
pm_ctrl_data &= ~(PM_CTRL_L1_ENTRY_TIMER_MASK <<
PM_CTRL_L1_ENTRY_TIMER_SHIFT);
pm_ctrl_data &= ~(PM_CTRL_LCKDET_TIMER_MASK <<
PM_CTRL_LCKDET_TIMER_SHIFT);
pm_ctrl_data |= AT_LCKDET_TIMER << PM_CTRL_LCKDET_TIMER_SHIFT;
if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l1d ||
hw->nic_type == athr_l2c_b2 || hw->nic_type == athr_l1d_2) {
link_ctrl_data &= ~LINK_CTRL_EXT_SYNC;
if (!(hw->ctrl_flags & ATL1C_APS_MODE_ENABLE)) {
if (hw->nic_type == athr_l2c_b && hw->revision_id == L2CB_V10)
link_ctrl_data |= LINK_CTRL_EXT_SYNC;
}
AT_WRITE_REG(hw, REG_LINK_CTRL, link_ctrl_data);
pm_ctrl_data |= PM_CTRL_RCVR_WT_TIMER;
pm_ctrl_data &= ~(PM_CTRL_PM_REQ_TIMER_MASK <<
PM_CTRL_PM_REQ_TIMER_SHIFT);
pm_ctrl_data |= AT_ASPM_L1_TIMER <<
PM_CTRL_PM_REQ_TIMER_SHIFT;
pm_ctrl_data &= ~PM_CTRL_SA_DLY_EN;
pm_ctrl_data &= ~PM_CTRL_HOTRST;
pm_ctrl_data |= 1 << PM_CTRL_L1_ENTRY_TIMER_SHIFT;
pm_ctrl_data |= PM_CTRL_SERDES_PD_EX_L1;
}
pm_ctrl_data |= PM_CTRL_MAC_ASPM_CHK;
if (linkup) {
pm_ctrl_data &= ~PM_CTRL_ASPM_L1_EN;
pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN;
if (hw->ctrl_flags & ATL1C_ASPM_L1_SUPPORT)
pm_ctrl_data |= PM_CTRL_ASPM_L1_EN;
if (hw->ctrl_flags & ATL1C_ASPM_L0S_SUPPORT)
pm_ctrl_data |= PM_CTRL_ASPM_L0S_EN;
if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l1d ||
hw->nic_type == athr_l2c_b2 || hw->nic_type == athr_l1d_2) {
if (hw->nic_type == athr_l2c_b)
if (!(hw->ctrl_flags & ATL1C_APS_MODE_ENABLE))
pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN;
pm_ctrl_data &= ~PM_CTRL_SERDES_L1_EN;
pm_ctrl_data &= ~PM_CTRL_SERDES_PLL_L1_EN;
pm_ctrl_data &= ~PM_CTRL_SERDES_BUDS_RX_L1_EN;
pm_ctrl_data |= PM_CTRL_CLK_SWH_L1;
if (hw->adapter->link_speed == SPEED_100 ||
hw->adapter->link_speed == SPEED_1000) {
pm_ctrl_data &= ~(PM_CTRL_L1_ENTRY_TIMER_MASK <<
PM_CTRL_L1_ENTRY_TIMER_SHIFT);
if (hw->nic_type == athr_l2c_b)
link_l1_timer = 7;
else if (hw->nic_type == athr_l2c_b2 ||
hw->nic_type == athr_l1d_2)
link_l1_timer = 4;
pm_ctrl_data |= link_l1_timer <<
PM_CTRL_L1_ENTRY_TIMER_SHIFT;
}
} else {
pm_ctrl_data |= PM_CTRL_SERDES_L1_EN;
pm_ctrl_data |= PM_CTRL_SERDES_PLL_L1_EN;
pm_ctrl_data |= PM_CTRL_SERDES_BUDS_RX_L1_EN;
pm_ctrl_data &= ~PM_CTRL_CLK_SWH_L1;
pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN;
pm_ctrl_data &= ~PM_CTRL_ASPM_L1_EN;
}
} else {
pm_ctrl_data &= ~PM_CTRL_SERDES_L1_EN;
pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN;
pm_ctrl_data &= ~PM_CTRL_SERDES_PLL_L1_EN;
pm_ctrl_data |= PM_CTRL_CLK_SWH_L1;
if (hw->ctrl_flags & ATL1C_ASPM_L1_SUPPORT)
pm_ctrl_data |= PM_CTRL_ASPM_L1_EN;
else
pm_ctrl_data &= ~PM_CTRL_ASPM_L1_EN;
}
AT_WRITE_REG(hw, REG_PM_CTRL, pm_ctrl_data);
return;
}
static void atl1c_setup_mac_ctrl(struct atl1c_adapter *adapter)
{
struct atl1c_hw *hw = &adapter->hw;
struct net_device *netdev = adapter->netdev;
u32 mac_ctrl_data;
mac_ctrl_data = MAC_CTRL_TX_EN | MAC_CTRL_RX_EN;
mac_ctrl_data |= (MAC_CTRL_TX_FLOW | MAC_CTRL_RX_FLOW);
if (adapter->link_duplex == FULL_DUPLEX) {
hw->mac_duplex = true;
mac_ctrl_data |= MAC_CTRL_DUPLX;
}
if (adapter->link_speed == SPEED_1000)
hw->mac_speed = atl1c_mac_speed_1000;
else
hw->mac_speed = atl1c_mac_speed_10_100;
mac_ctrl_data |= (hw->mac_speed & MAC_CTRL_SPEED_MASK) <<
MAC_CTRL_SPEED_SHIFT;
mac_ctrl_data |= (MAC_CTRL_ADD_CRC | MAC_CTRL_PAD);
mac_ctrl_data |= ((hw->preamble_len & MAC_CTRL_PRMLEN_MASK) <<
MAC_CTRL_PRMLEN_SHIFT);
__atl1c_vlan_mode(netdev->features, &mac_ctrl_data);
mac_ctrl_data |= MAC_CTRL_BC_EN;
if (netdev->flags & IFF_PROMISC)
mac_ctrl_data |= MAC_CTRL_PROMIS_EN;
if (netdev->flags & IFF_ALLMULTI)
mac_ctrl_data |= MAC_CTRL_MC_ALL_EN;
mac_ctrl_data |= MAC_CTRL_SINGLE_PAUSE_EN;
if (hw->nic_type == athr_l1d || hw->nic_type == athr_l2c_b2 ||
hw->nic_type == athr_l1d_2) {
mac_ctrl_data |= MAC_CTRL_SPEED_MODE_SW;
mac_ctrl_data |= MAC_CTRL_HASH_ALG_CRC32;
}
AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl_data);
}
/*
* atl1c_configure - Configure Transmit&Receive Unit after Reset
* @adapter: board private structure
*
* Configure the Tx /Rx unit of the MAC after a reset.
*/
static int atl1c_configure(struct atl1c_adapter *adapter)
{
struct atl1c_hw *hw = &adapter->hw;
u32 master_ctrl_data = 0;
u32 intr_modrt_data;
u32 data;
/* clear interrupt status */
AT_WRITE_REG(hw, REG_ISR, 0xFFFFFFFF);
/* Clear any WOL status */
AT_WRITE_REG(hw, REG_WOL_CTRL, 0);
/* set Interrupt Clear Timer
* HW will enable self to assert interrupt event to system after
* waiting x-time for software to notify it accept interrupt.
*/
data = CLK_GATING_EN_ALL;
if (hw->ctrl_flags & ATL1C_CLK_GATING_EN) {
if (hw->nic_type == athr_l2c_b)
data &= ~CLK_GATING_RXMAC_EN;
} else
data = 0;
AT_WRITE_REG(hw, REG_CLK_GATING_CTRL, data);
AT_WRITE_REG(hw, REG_INT_RETRIG_TIMER,
hw->ict & INT_RETRIG_TIMER_MASK);
atl1c_configure_des_ring(adapter);
if (hw->ctrl_flags & ATL1C_INTR_MODRT_ENABLE) {
intr_modrt_data = (hw->tx_imt & IRQ_MODRT_TIMER_MASK) <<
IRQ_MODRT_TX_TIMER_SHIFT;
intr_modrt_data |= (hw->rx_imt & IRQ_MODRT_TIMER_MASK) <<
IRQ_MODRT_RX_TIMER_SHIFT;
AT_WRITE_REG(hw, REG_IRQ_MODRT_TIMER_INIT, intr_modrt_data);
master_ctrl_data |=
MASTER_CTRL_TX_ITIMER_EN | MASTER_CTRL_RX_ITIMER_EN;
}
if (hw->ctrl_flags & ATL1C_INTR_CLEAR_ON_READ)
master_ctrl_data |= MASTER_CTRL_INT_RDCLR;
master_ctrl_data |= MASTER_CTRL_SA_TIMER_EN;
AT_WRITE_REG(hw, REG_MASTER_CTRL, master_ctrl_data);
if (hw->ctrl_flags & ATL1C_CMB_ENABLE) {
AT_WRITE_REG(hw, REG_CMB_TPD_THRESH,
hw->cmb_tpd & CMB_TPD_THRESH_MASK);
AT_WRITE_REG(hw, REG_CMB_TX_TIMER,
hw->cmb_tx_timer & CMB_TX_TIMER_MASK);
}
if (hw->ctrl_flags & ATL1C_SMB_ENABLE)
AT_WRITE_REG(hw, REG_SMB_STAT_TIMER,
hw->smb_timer & SMB_STAT_TIMER_MASK);
/* set MTU */
AT_WRITE_REG(hw, REG_MTU, hw->max_frame_size + ETH_HLEN +
VLAN_HLEN + ETH_FCS_LEN);
/* HDS, disable */
AT_WRITE_REG(hw, REG_HDS_CTRL, 0);
atl1c_configure_tx(adapter);
atl1c_configure_rx(adapter);
atl1c_configure_rss(adapter);
atl1c_configure_dma(adapter);
return 0;
}
static void atl1c_update_hw_stats(struct atl1c_adapter *adapter)
{
u16 hw_reg_addr = 0;
unsigned long *stats_item = NULL;
u32 data;
/* update rx status */
hw_reg_addr = REG_MAC_RX_STATUS_BIN;
stats_item = &adapter->hw_stats.rx_ok;
while (hw_reg_addr <= REG_MAC_RX_STATUS_END) {
AT_READ_REG(&adapter->hw, hw_reg_addr, &data);
*stats_item += data;
stats_item++;
hw_reg_addr += 4;
}
/* update tx status */
hw_reg_addr = REG_MAC_TX_STATUS_BIN;
stats_item = &adapter->hw_stats.tx_ok;
while (hw_reg_addr <= REG_MAC_TX_STATUS_END) {
AT_READ_REG(&adapter->hw, hw_reg_addr, &data);
*stats_item += data;
stats_item++;
hw_reg_addr += 4;
}
}
/*
* atl1c_get_stats - Get System Network Statistics
* @netdev: network interface device structure
*
* Returns the address of the device statistics structure.
* The statistics are actually updated from the timer callback.
*/
static struct net_device_stats *atl1c_get_stats(struct net_device *netdev)
{
struct atl1c_adapter *adapter = netdev_priv(netdev);
struct atl1c_hw_stats *hw_stats = &adapter->hw_stats;
struct net_device_stats *net_stats = &netdev->stats;
atl1c_update_hw_stats(adapter);
net_stats->rx_packets = hw_stats->rx_ok;
net_stats->tx_packets = hw_stats->tx_ok;
net_stats->rx_bytes = hw_stats->rx_byte_cnt;
net_stats->tx_bytes = hw_stats->tx_byte_cnt;
net_stats->multicast = hw_stats->rx_mcast;
net_stats->collisions = hw_stats->tx_1_col +
hw_stats->tx_2_col * 2 +
hw_stats->tx_late_col + hw_stats->tx_abort_col;
net_stats->rx_errors = hw_stats->rx_frag + hw_stats->rx_fcs_err +
hw_stats->rx_len_err + hw_stats->rx_sz_ov +
hw_stats->rx_rrd_ov + hw_stats->rx_align_err;
net_stats->rx_fifo_errors = hw_stats->rx_rxf_ov;
net_stats->rx_length_errors = hw_stats->rx_len_err;
net_stats->rx_crc_errors = hw_stats->rx_fcs_err;
net_stats->rx_frame_errors = hw_stats->rx_align_err;
net_stats->rx_over_errors = hw_stats->rx_rrd_ov + hw_stats->rx_rxf_ov;
net_stats->rx_missed_errors = hw_stats->rx_rrd_ov + hw_stats->rx_rxf_ov;
net_stats->tx_errors = hw_stats->tx_late_col + hw_stats->tx_abort_col +
hw_stats->tx_underrun + hw_stats->tx_trunc;
net_stats->tx_fifo_errors = hw_stats->tx_underrun;
net_stats->tx_aborted_errors = hw_stats->tx_abort_col;
net_stats->tx_window_errors = hw_stats->tx_late_col;
return net_stats;
}
static inline void atl1c_clear_phy_int(struct atl1c_adapter *adapter)
{
u16 phy_data;
spin_lock(&adapter->mdio_lock);
atl1c_read_phy_reg(&adapter->hw, MII_ISR, &phy_data);
spin_unlock(&adapter->mdio_lock);
}
static bool atl1c_clean_tx_irq(struct atl1c_adapter *adapter,
enum atl1c_trans_queue type)
{
struct atl1c_tpd_ring *tpd_ring = (struct atl1c_tpd_ring *)
&adapter->tpd_ring[type];
struct atl1c_buffer *buffer_info;
struct pci_dev *pdev = adapter->pdev;
u16 next_to_clean = atomic_read(&tpd_ring->next_to_clean);
u16 hw_next_to_clean;
u16 shift;
u32 data;
if (type == atl1c_trans_high)
shift = MB_HTPD_CONS_IDX_SHIFT;
else
shift = MB_NTPD_CONS_IDX_SHIFT;
AT_READ_REG(&adapter->hw, REG_MB_PRIO_CONS_IDX, &data);
hw_next_to_clean = (data >> shift) & MB_PRIO_PROD_IDX_MASK;
while (next_to_clean != hw_next_to_clean) {
buffer_info = &tpd_ring->buffer_info[next_to_clean];
atl1c_clean_buffer(pdev, buffer_info, 1);
if (++next_to_clean == tpd_ring->count)
next_to_clean = 0;
atomic_set(&tpd_ring->next_to_clean, next_to_clean);
}
if (netif_queue_stopped(adapter->netdev) &&
netif_carrier_ok(adapter->netdev)) {
netif_wake_queue(adapter->netdev);
}
return true;
}
/*
* atl1c_intr - Interrupt Handler
* @irq: interrupt number
* @data: pointer to a network interface device structure
* @pt_regs: CPU registers structure
*/
static irqreturn_t atl1c_intr(int irq, void *data)
{
struct net_device *netdev = data;
struct atl1c_adapter *adapter = netdev_priv(netdev);
struct pci_dev *pdev = adapter->pdev;
struct atl1c_hw *hw = &adapter->hw;
int max_ints = AT_MAX_INT_WORK;
int handled = IRQ_NONE;
u32 status;
u32 reg_data;
do {
AT_READ_REG(hw, REG_ISR, ®_data);
status = reg_data & hw->intr_mask;
if (status == 0 || (status & ISR_DIS_INT) != 0) {
if (max_ints != AT_MAX_INT_WORK)
handled = IRQ_HANDLED;
break;
}
/* link event */
if (status & ISR_GPHY)
atl1c_clear_phy_int(adapter);
/* Ack ISR */
AT_WRITE_REG(hw, REG_ISR, status | ISR_DIS_INT);
if (status & ISR_RX_PKT) {
if (likely(napi_schedule_prep(&adapter->napi))) {
hw->intr_mask &= ~ISR_RX_PKT;
AT_WRITE_REG(hw, REG_IMR, hw->intr_mask);
__napi_schedule(&adapter->napi);
}
}
if (status & ISR_TX_PKT)
atl1c_clean_tx_irq(adapter, atl1c_trans_normal);
handled = IRQ_HANDLED;
/* check if PCIE PHY Link down */
if (status & ISR_ERROR) {
if (netif_msg_hw(adapter))
dev_err(&pdev->dev,
"atl1c hardware error (status = 0x%x)\n",
status & ISR_ERROR);
/* reset MAC */
set_bit(ATL1C_WORK_EVENT_RESET, &adapter->work_event);
schedule_work(&adapter->common_task);
return IRQ_HANDLED;
}
if (status & ISR_OVER)
if (netif_msg_intr(adapter))
dev_warn(&pdev->dev,
"TX/RX overflow (status = 0x%x)\n",
status & ISR_OVER);
/* link event */
if (status & (ISR_GPHY | ISR_MANUAL)) {
netdev->stats.tx_carrier_errors++;
atl1c_link_chg_event(adapter);
break;
}
} while (--max_ints > 0);
/* re-enable Interrupt*/
AT_WRITE_REG(&adapter->hw, REG_ISR, 0);
return handled;
}
static inline void atl1c_rx_checksum(struct atl1c_adapter *adapter,
struct sk_buff *skb, struct atl1c_recv_ret_status *prrs)
{
/*
* The pid field in RRS in not correct sometimes, so we
* cannot figure out if the packet is fragmented or not,
* so we tell the KERNEL CHECKSUM_NONE
*/
skb_checksum_none_assert(skb);
}
static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter, const int ringid)
{
struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring[ringid];
struct pci_dev *pdev = adapter->pdev;
struct atl1c_buffer *buffer_info, *next_info;
struct sk_buff *skb;
void *vir_addr = NULL;
u16 num_alloc = 0;
u16 rfd_next_to_use, next_next;
struct atl1c_rx_free_desc *rfd_desc;
next_next = rfd_next_to_use = rfd_ring->next_to_use;
if (++next_next == rfd_ring->count)
next_next = 0;
buffer_info = &rfd_ring->buffer_info[rfd_next_to_use];
next_info = &rfd_ring->buffer_info[next_next];
while (next_info->flags & ATL1C_BUFFER_FREE) {
rfd_desc = ATL1C_RFD_DESC(rfd_ring, rfd_next_to_use);
skb = netdev_alloc_skb(adapter->netdev, adapter->rx_buffer_len);
if (unlikely(!skb)) {
if (netif_msg_rx_err(adapter))
dev_warn(&pdev->dev, "alloc rx buffer failed\n");
break;
}
/*
* Make buffer alignment 2 beyond a 16 byte boundary
* this will result in a 16 byte aligned IP header after
* the 14 byte MAC header is removed
*/
vir_addr = skb->data;
ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY);
buffer_info->skb = skb;
buffer_info->length = adapter->rx_buffer_len;
buffer_info->dma = pci_map_single(pdev, vir_addr,
buffer_info->length,
PCI_DMA_FROMDEVICE);
ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_SINGLE,
ATL1C_PCIMAP_FROMDEVICE);
rfd_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
rfd_next_to_use = next_next;
if (++next_next == rfd_ring->count)
next_next = 0;
buffer_info = &rfd_ring->buffer_info[rfd_next_to_use];
next_info = &rfd_ring->buffer_info[next_next];
num_alloc++;
}
if (num_alloc) {
/* TODO: update mailbox here */
wmb();
rfd_ring->next_to_use = rfd_next_to_use;
AT_WRITE_REG(&adapter->hw, atl1c_rfd_prod_idx_regs[ringid],
rfd_ring->next_to_use & MB_RFDX_PROD_IDX_MASK);
}
return num_alloc;
}
static void atl1c_clean_rrd(struct atl1c_rrd_ring *rrd_ring,
struct atl1c_recv_ret_status *rrs, u16 num)
{
u16 i;
/* the relationship between rrd and rfd is one map one */
for (i = 0; i < num; i++, rrs = ATL1C_RRD_DESC(rrd_ring,
rrd_ring->next_to_clean)) {
rrs->word3 &= ~RRS_RXD_UPDATED;
if (++rrd_ring->next_to_clean == rrd_ring->count)
rrd_ring->next_to_clean = 0;
}
}
static void atl1c_clean_rfd(struct atl1c_rfd_ring *rfd_ring,
struct atl1c_recv_ret_status *rrs, u16 num)
{
u16 i;
u16 rfd_index;
struct atl1c_buffer *buffer_info = rfd_ring->buffer_info;
rfd_index = (rrs->word0 >> RRS_RX_RFD_INDEX_SHIFT) &
RRS_RX_RFD_INDEX_MASK;
for (i = 0; i < num; i++) {
buffer_info[rfd_index].skb = NULL;
ATL1C_SET_BUFFER_STATE(&buffer_info[rfd_index],
ATL1C_BUFFER_FREE);
if (++rfd_index == rfd_ring->count)
rfd_index = 0;
}
rfd_ring->next_to_clean = rfd_index;
}
static void atl1c_clean_rx_irq(struct atl1c_adapter *adapter, u8 que,
int *work_done, int work_to_do)
{
u16 rfd_num, rfd_index;
u16 count = 0;
u16 length;
struct pci_dev *pdev = adapter->pdev;
struct net_device *netdev = adapter->netdev;
struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring[que];
struct atl1c_rrd_ring *rrd_ring = &adapter->rrd_ring[que];
struct sk_buff *skb;
struct atl1c_recv_ret_status *rrs;
struct atl1c_buffer *buffer_info;
while (1) {
if (*work_done >= work_to_do)
break;
rrs = ATL1C_RRD_DESC(rrd_ring, rrd_ring->next_to_clean);
if (likely(RRS_RXD_IS_VALID(rrs->word3))) {
rfd_num = (rrs->word0 >> RRS_RX_RFD_CNT_SHIFT) &
RRS_RX_RFD_CNT_MASK;
if (unlikely(rfd_num != 1))
/* TODO support mul rfd*/
if (netif_msg_rx_err(adapter))
dev_warn(&pdev->dev,
"Multi rfd not support yet!\n");
goto rrs_checked;
} else {
break;
}
rrs_checked:
atl1c_clean_rrd(rrd_ring, rrs, rfd_num);
if (rrs->word3 & (RRS_RX_ERR_SUM | RRS_802_3_LEN_ERR)) {
atl1c_clean_rfd(rfd_ring, rrs, rfd_num);
if (netif_msg_rx_err(adapter))
dev_warn(&pdev->dev,
"wrong packet! rrs word3 is %x\n",
rrs->word3);
continue;
}
length = le16_to_cpu((rrs->word3 >> RRS_PKT_SIZE_SHIFT) &
RRS_PKT_SIZE_MASK);
/* Good Receive */
if (likely(rfd_num == 1)) {
rfd_index = (rrs->word0 >> RRS_RX_RFD_INDEX_SHIFT) &
RRS_RX_RFD_INDEX_MASK;
buffer_info = &rfd_ring->buffer_info[rfd_index];
pci_unmap_single(pdev, buffer_info->dma,
buffer_info->length, PCI_DMA_FROMDEVICE);
skb = buffer_info->skb;
} else {
/* TODO */
if (netif_msg_rx_err(adapter))
dev_warn(&pdev->dev,
"Multi rfd not support yet!\n");
break;
}
atl1c_clean_rfd(rfd_ring, rrs, rfd_num);
skb_put(skb, length - ETH_FCS_LEN);
skb->protocol = eth_type_trans(skb, netdev);
atl1c_rx_checksum(adapter, skb, rrs);
if (rrs->word3 & RRS_VLAN_INS) {
u16 vlan;
AT_TAG_TO_VLAN(rrs->vlan_tag, vlan);
vlan = le16_to_cpu(vlan);
__vlan_hwaccel_put_tag(skb, vlan);
}
netif_receive_skb(skb);
(*work_done)++;
count++;
}
if (count)
atl1c_alloc_rx_buffer(adapter, que);
}
/*
* atl1c_clean - NAPI Rx polling callback
* @adapter: board private structure
*/
static int atl1c_clean(struct napi_struct *napi, int budget)
{
struct atl1c_adapter *adapter =
container_of(napi, struct atl1c_adapter, napi);
int work_done = 0;
/* Keep link state information with original netdev */
if (!netif_carrier_ok(adapter->netdev))
goto quit_polling;
/* just enable one RXQ */
atl1c_clean_rx_irq(adapter, 0, &work_done, budget);
if (work_done < budget) {
quit_polling:
napi_complete(napi);
adapter->hw.intr_mask |= ISR_RX_PKT;
AT_WRITE_REG(&adapter->hw, REG_IMR, adapter->hw.intr_mask);
}
return work_done;
}
#ifdef CONFIG_NET_POLL_CONTROLLER
/*
* Polling 'interrupt' - used by things like netconsole to send skbs
* without having to re-enable interrupts. It's not called while
* the interrupt routine is executing.
*/
static void atl1c_netpoll(struct net_device *netdev)
{
struct atl1c_adapter *adapter = netdev_priv(netdev);
disable_irq(adapter->pdev->irq);
atl1c_intr(adapter->pdev->irq, netdev);
enable_irq(adapter->pdev->irq);
}
#endif
static inline u16 atl1c_tpd_avail(struct atl1c_adapter *adapter, enum atl1c_trans_queue type)
{
struct atl1c_tpd_ring *tpd_ring = &adapter->tpd_ring[type];
u16 next_to_use = 0;
u16 next_to_clean = 0;
next_to_clean = atomic_read(&tpd_ring->next_to_clean);
next_to_use = tpd_ring->next_to_use;
return (u16)(next_to_clean > next_to_use) ?
(next_to_clean - next_to_use - 1) :
(tpd_ring->count + next_to_clean - next_to_use - 1);
}
/*
* get next usable tpd
* Note: should call atl1c_tdp_avail to make sure
* there is enough tpd to use
*/
static struct atl1c_tpd_desc *atl1c_get_tpd(struct atl1c_adapter *adapter,
enum atl1c_trans_queue type)
{
struct atl1c_tpd_ring *tpd_ring = &adapter->tpd_ring[type];
struct atl1c_tpd_desc *tpd_desc;
u16 next_to_use = 0;
next_to_use = tpd_ring->next_to_use;
if (++tpd_ring->next_to_use == tpd_ring->count)
tpd_ring->next_to_use = 0;
tpd_desc = ATL1C_TPD_DESC(tpd_ring, next_to_use);
memset(tpd_desc, 0, sizeof(struct atl1c_tpd_desc));
return tpd_desc;
}
static struct atl1c_buffer *
atl1c_get_tx_buffer(struct atl1c_adapter *adapter, struct atl1c_tpd_desc *tpd)
{
struct atl1c_tpd_ring *tpd_ring = adapter->tpd_ring;
return &tpd_ring->buffer_info[tpd -
(struct atl1c_tpd_desc *)tpd_ring->desc];
}
/* Calculate the transmit packet descript needed*/
static u16 atl1c_cal_tpd_req(const struct sk_buff *skb)
{
u16 tpd_req;
u16 proto_hdr_len = 0;
tpd_req = skb_shinfo(skb)->nr_frags + 1;
if (skb_is_gso(skb)) {
proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
if (proto_hdr_len < skb_headlen(skb))
tpd_req++;
if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
tpd_req++;
}
return tpd_req;
}
static int atl1c_tso_csum(struct atl1c_adapter *adapter,
struct sk_buff *skb,
struct atl1c_tpd_desc **tpd,
enum atl1c_trans_queue type)
{
struct pci_dev *pdev = adapter->pdev;
u8 hdr_len;
u32 real_len;
unsigned short offload_type;
int err;
if (skb_is_gso(skb)) {
if (skb_header_cloned(skb)) {
err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
if (unlikely(err))
return -1;
}
offload_type = skb_shinfo(skb)->gso_type;
if (offload_type & SKB_GSO_TCPV4) {
real_len = (((unsigned char *)ip_hdr(skb) - skb->data)
+ ntohs(ip_hdr(skb)->tot_len));
if (real_len < skb->len)
pskb_trim(skb, real_len);
hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb));
if (unlikely(skb->len == hdr_len)) {
/* only xsum need */
if (netif_msg_tx_queued(adapter))
dev_warn(&pdev->dev,
"IPV4 tso with zero data??\n");
goto check_sum;
} else {
ip_hdr(skb)->check = 0;
tcp_hdr(skb)->check = ~csum_tcpudp_magic(
ip_hdr(skb)->saddr,
ip_hdr(skb)->daddr,
0, IPPROTO_TCP, 0);
(*tpd)->word1 |= 1 << TPD_IPV4_PACKET_SHIFT;
}
}
if (offload_type & SKB_GSO_TCPV6) {
struct atl1c_tpd_ext_desc *etpd =
*(struct atl1c_tpd_ext_desc **)(tpd);
memset(etpd, 0, sizeof(struct atl1c_tpd_ext_desc));
*tpd = atl1c_get_tpd(adapter, type);
ipv6_hdr(skb)->payload_len = 0;
/* check payload == 0 byte ? */
hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb));
if (unlikely(skb->len == hdr_len)) {
/* only xsum need */
if (netif_msg_tx_queued(adapter))
dev_warn(&pdev->dev,
"IPV6 tso with zero data??\n");
goto check_sum;
} else
tcp_hdr(skb)->check = ~csum_ipv6_magic(
&ipv6_hdr(skb)->saddr,
&ipv6_hdr(skb)->daddr,
0, IPPROTO_TCP, 0);
etpd->word1 |= 1 << TPD_LSO_EN_SHIFT;
etpd->word1 |= 1 << TPD_LSO_VER_SHIFT;
etpd->pkt_len = cpu_to_le32(skb->len);
(*tpd)->word1 |= 1 << TPD_LSO_VER_SHIFT;
}
(*tpd)->word1 |= 1 << TPD_LSO_EN_SHIFT;
(*tpd)->word1 |= (skb_transport_offset(skb) & TPD_TCPHDR_OFFSET_MASK) <<
TPD_TCPHDR_OFFSET_SHIFT;
(*tpd)->word1 |= (skb_shinfo(skb)->gso_size & TPD_MSS_MASK) <<
TPD_MSS_SHIFT;
return 0;
}
check_sum:
if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
u8 css, cso;
cso = skb_checksum_start_offset(skb);
if (unlikely(cso & 0x1)) {
if (netif_msg_tx_err(adapter))
dev_err(&adapter->pdev->dev,
"payload offset should not an event number\n");
return -1;
} else {
css = cso + skb->csum_offset;
(*tpd)->word1 |= ((cso >> 1) & TPD_PLOADOFFSET_MASK) <<
TPD_PLOADOFFSET_SHIFT;
(*tpd)->word1 |= ((css >> 1) & TPD_CCSUM_OFFSET_MASK) <<
TPD_CCSUM_OFFSET_SHIFT;
(*tpd)->word1 |= 1 << TPD_CCSUM_EN_SHIFT;
}
}
return 0;
}
static void atl1c_tx_map(struct atl1c_adapter *adapter,
struct sk_buff *skb, struct atl1c_tpd_desc *tpd,
enum atl1c_trans_queue type)
{
struct atl1c_tpd_desc *use_tpd = NULL;
struct atl1c_buffer *buffer_info = NULL;
u16 buf_len = skb_headlen(skb);
u16 map_len = 0;
u16 mapped_len = 0;
u16 hdr_len = 0;
u16 nr_frags;
u16 f;
int tso;
nr_frags = skb_shinfo(skb)->nr_frags;
tso = (tpd->word1 >> TPD_LSO_EN_SHIFT) & TPD_LSO_EN_MASK;
if (tso) {
/* TSO */
map_len = hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
use_tpd = tpd;
buffer_info = atl1c_get_tx_buffer(adapter, use_tpd);
buffer_info->length = map_len;
buffer_info->dma = pci_map_single(adapter->pdev,
skb->data, hdr_len, PCI_DMA_TODEVICE);
ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY);
ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_SINGLE,
ATL1C_PCIMAP_TODEVICE);
mapped_len += map_len;
use_tpd->buffer_addr = cpu_to_le64(buffer_info->dma);
use_tpd->buffer_len = cpu_to_le16(buffer_info->length);
}
if (mapped_len < buf_len) {
/* mapped_len == 0, means we should use the first tpd,
which is given by caller */
if (mapped_len == 0)
use_tpd = tpd;
else {
use_tpd = atl1c_get_tpd(adapter, type);
memcpy(use_tpd, tpd, sizeof(struct atl1c_tpd_desc));
}
buffer_info = atl1c_get_tx_buffer(adapter, use_tpd);
buffer_info->length = buf_len - mapped_len;
buffer_info->dma =
pci_map_single(adapter->pdev, skb->data + mapped_len,
buffer_info->length, PCI_DMA_TODEVICE);
ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY);
ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_SINGLE,
ATL1C_PCIMAP_TODEVICE);
use_tpd->buffer_addr = cpu_to_le64(buffer_info->dma);
use_tpd->buffer_len = cpu_to_le16(buffer_info->length);
}
for (f = 0; f < nr_frags; f++) {
struct skb_frag_struct *frag;
frag = &skb_shinfo(skb)->frags[f];
use_tpd = atl1c_get_tpd(adapter, type);
memcpy(use_tpd, tpd, sizeof(struct atl1c_tpd_desc));
buffer_info = atl1c_get_tx_buffer(adapter, use_tpd);
buffer_info->length = skb_frag_size(frag);
buffer_info->dma = skb_frag_dma_map(&adapter->pdev->dev,
frag, 0,
buffer_info->length,
DMA_TO_DEVICE);
ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY);
ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_PAGE,
ATL1C_PCIMAP_TODEVICE);
use_tpd->buffer_addr = cpu_to_le64(buffer_info->dma);
use_tpd->buffer_len = cpu_to_le16(buffer_info->length);
}
/* The last tpd */
use_tpd->word1 |= 1 << TPD_EOP_SHIFT;
/* The last buffer info contain the skb address,
so it will be free after unmap */
buffer_info->skb = skb;
}
static void atl1c_tx_queue(struct atl1c_adapter *adapter, struct sk_buff *skb,
struct atl1c_tpd_desc *tpd, enum atl1c_trans_queue type)
{
struct atl1c_tpd_ring *tpd_ring = &adapter->tpd_ring[type];
u32 prod_data;
AT_READ_REG(&adapter->hw, REG_MB_PRIO_PROD_IDX, &prod_data);
switch (type) {
case atl1c_trans_high:
prod_data &= 0xFFFF0000;
prod_data |= tpd_ring->next_to_use & 0xFFFF;
break;
case atl1c_trans_normal:
prod_data &= 0x0000FFFF;
prod_data |= (tpd_ring->next_to_use & 0xFFFF) << 16;
break;
default:
break;
}
wmb();
AT_WRITE_REG(&adapter->hw, REG_MB_PRIO_PROD_IDX, prod_data);
}
static netdev_tx_t atl1c_xmit_frame(struct sk_buff *skb,
struct net_device *netdev)
{
struct atl1c_adapter *adapter = netdev_priv(netdev);
unsigned long flags;
u16 tpd_req = 1;
struct atl1c_tpd_desc *tpd;
enum atl1c_trans_queue type = atl1c_trans_normal;
if (test_bit(__AT_DOWN, &adapter->flags)) {
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
tpd_req = atl1c_cal_tpd_req(skb);
if (!spin_trylock_irqsave(&adapter->tx_lock, flags)) {
if (netif_msg_pktdata(adapter))
dev_info(&adapter->pdev->dev, "tx locked\n");
return NETDEV_TX_LOCKED;
}
if (atl1c_tpd_avail(adapter, type) < tpd_req) {
/* no enough descriptor, just stop queue */
netif_stop_queue(netdev);
spin_unlock_irqrestore(&adapter->tx_lock, flags);
return NETDEV_TX_BUSY;
}
tpd = atl1c_get_tpd(adapter, type);
/* do TSO and check sum */
if (atl1c_tso_csum(adapter, skb, &tpd, type) != 0) {
spin_unlock_irqrestore(&adapter->tx_lock, flags);
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
if (unlikely(vlan_tx_tag_present(skb))) {
u16 vlan = vlan_tx_tag_get(skb);
__le16 tag;
vlan = cpu_to_le16(vlan);
AT_VLAN_TO_TAG(vlan, tag);
tpd->word1 |= 1 << TPD_INS_VTAG_SHIFT;
tpd->vlan_tag = tag;
}
if (skb_network_offset(skb) != ETH_HLEN)
tpd->word1 |= 1 << TPD_ETH_TYPE_SHIFT; /* Ethernet frame */
atl1c_tx_map(adapter, skb, tpd, type);
atl1c_tx_queue(adapter, skb, tpd, type);
spin_unlock_irqrestore(&adapter->tx_lock, flags);
return NETDEV_TX_OK;
}
static void atl1c_free_irq(struct atl1c_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
free_irq(adapter->pdev->irq, netdev);
if (adapter->have_msi)
pci_disable_msi(adapter->pdev);
}
static int atl1c_request_irq(struct atl1c_adapter *adapter)
{
struct pci_dev *pdev = adapter->pdev;
struct net_device *netdev = adapter->netdev;
int flags = 0;
int err = 0;
adapter->have_msi = true;
err = pci_enable_msi(adapter->pdev);
if (err) {
if (netif_msg_ifup(adapter))
dev_err(&pdev->dev,
"Unable to allocate MSI interrupt Error: %d\n",
err);
adapter->have_msi = false;
} else
netdev->irq = pdev->irq;
if (!adapter->have_msi)
flags |= IRQF_SHARED;
err = request_irq(adapter->pdev->irq, atl1c_intr, flags,
netdev->name, netdev);
if (err) {
if (netif_msg_ifup(adapter))
dev_err(&pdev->dev,
"Unable to allocate interrupt Error: %d\n",
err);
if (adapter->have_msi)
pci_disable_msi(adapter->pdev);
return err;
}
if (netif_msg_ifup(adapter))
dev_dbg(&pdev->dev, "atl1c_request_irq OK\n");
return err;
}
static int atl1c_up(struct atl1c_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
int num;
int err;
int i;
netif_carrier_off(netdev);
atl1c_init_ring_ptrs(adapter);
atl1c_set_multi(netdev);
atl1c_restore_vlan(adapter);
for (i = 0; i < adapter->num_rx_queues; i++) {
num = atl1c_alloc_rx_buffer(adapter, i);
if (unlikely(num == 0)) {
err = -ENOMEM;
goto err_alloc_rx;
}
}
if (atl1c_configure(adapter)) {
err = -EIO;
goto err_up;
}
err = atl1c_request_irq(adapter);
if (unlikely(err))
goto err_up;
clear_bit(__AT_DOWN, &adapter->flags);
napi_enable(&adapter->napi);
atl1c_irq_enable(adapter);
atl1c_check_link_status(adapter);
netif_start_queue(netdev);
return err;
err_up:
err_alloc_rx:
atl1c_clean_rx_ring(adapter);
return err;
}
static void atl1c_down(struct atl1c_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
atl1c_del_timer(adapter);
adapter->work_event = 0; /* clear all event */
/* signal that we're down so the interrupt handler does not
* reschedule our watchdog timer */
set_bit(__AT_DOWN, &adapter->flags);
netif_carrier_off(netdev);
napi_disable(&adapter->napi);
atl1c_irq_disable(adapter);
atl1c_free_irq(adapter);
/* reset MAC to disable all RX/TX */
atl1c_reset_mac(&adapter->hw);
msleep(1);
adapter->link_speed = SPEED_0;
adapter->link_duplex = -1;
atl1c_clean_tx_ring(adapter, atl1c_trans_normal);
atl1c_clean_tx_ring(adapter, atl1c_trans_high);
atl1c_clean_rx_ring(adapter);
}
/*
* atl1c_open - Called when a network interface is made active
* @netdev: network interface device structure
*
* Returns 0 on success, negative value on failure
*
* The open entry point is called when a network interface is made
* active by the system (IFF_UP). At this point all resources needed
* for transmit and receive operations are allocated, the interrupt
* handler is registered with the OS, the watchdog timer is started,
* and the stack is notified that the interface is ready.
*/
static int atl1c_open(struct net_device *netdev)
{
struct atl1c_adapter *adapter = netdev_priv(netdev);
int err;
/* disallow open during test */
if (test_bit(__AT_TESTING, &adapter->flags))
return -EBUSY;
/* allocate rx/tx dma buffer & descriptors */
err = atl1c_setup_ring_resources(adapter);
if (unlikely(err))
return err;
err = atl1c_up(adapter);
if (unlikely(err))
goto err_up;
if (adapter->hw.ctrl_flags & ATL1C_FPGA_VERSION) {
u32 phy_data;
AT_READ_REG(&adapter->hw, REG_MDIO_CTRL, &phy_data);
phy_data |= MDIO_AP_EN;
AT_WRITE_REG(&adapter->hw, REG_MDIO_CTRL, phy_data);
}
return 0;
err_up:
atl1c_free_irq(adapter);
atl1c_free_ring_resources(adapter);
atl1c_reset_mac(&adapter->hw);
return err;
}
/*
* atl1c_close - Disables a network interface
* @netdev: network interface device structure
*
* Returns 0, this is not allowed to fail
*
* The close entry point is called when an interface is de-activated
* by the OS. The hardware is still under the drivers control, but
* needs to be disabled. A global MAC reset is issued to stop the
* hardware, and all transmit and receive resources are freed.
*/
static int atl1c_close(struct net_device *netdev)
{
struct atl1c_adapter *adapter = netdev_priv(netdev);
WARN_ON(test_bit(__AT_RESETTING, &adapter->flags));
atl1c_down(adapter);
atl1c_free_ring_resources(adapter);
return 0;
}
static int atl1c_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct net_device *netdev = pci_get_drvdata(pdev);
struct atl1c_adapter *adapter = netdev_priv(netdev);
struct atl1c_hw *hw = &adapter->hw;
u32 mac_ctrl_data = 0;
u32 master_ctrl_data = 0;
u32 wol_ctrl_data = 0;
u16 mii_intr_status_data = 0;
u32 wufc = adapter->wol;
atl1c_disable_l0s_l1(hw);
if (netif_running(netdev)) {
WARN_ON(test_bit(__AT_RESETTING, &adapter->flags));
atl1c_down(adapter);
}
netif_device_detach(netdev);
if (wufc)
if (atl1c_phy_power_saving(hw) != 0)
dev_dbg(&pdev->dev, "phy power saving failed");
AT_READ_REG(hw, REG_MASTER_CTRL, &master_ctrl_data);
AT_READ_REG(hw, REG_MAC_CTRL, &mac_ctrl_data);
master_ctrl_data &= ~MASTER_CTRL_CLK_SEL_DIS;
mac_ctrl_data &= ~(MAC_CTRL_PRMLEN_MASK << MAC_CTRL_PRMLEN_SHIFT);
mac_ctrl_data |= (((u32)adapter->hw.preamble_len &
MAC_CTRL_PRMLEN_MASK) <<
MAC_CTRL_PRMLEN_SHIFT);
mac_ctrl_data &= ~(MAC_CTRL_SPEED_MASK << MAC_CTRL_SPEED_SHIFT);
mac_ctrl_data &= ~MAC_CTRL_DUPLX;
if (wufc) {
mac_ctrl_data |= MAC_CTRL_RX_EN;
if (adapter->link_speed == SPEED_1000 ||
adapter->link_speed == SPEED_0) {
mac_ctrl_data |= atl1c_mac_speed_1000 <<
MAC_CTRL_SPEED_SHIFT;
mac_ctrl_data |= MAC_CTRL_DUPLX;
} else
mac_ctrl_data |= atl1c_mac_speed_10_100 <<
MAC_CTRL_SPEED_SHIFT;
if (adapter->link_duplex == DUPLEX_FULL)
mac_ctrl_data |= MAC_CTRL_DUPLX;
/* turn on magic packet wol */
if (wufc & AT_WUFC_MAG)
wol_ctrl_data |= WOL_MAGIC_EN | WOL_MAGIC_PME_EN;
if (wufc & AT_WUFC_LNKC) {
wol_ctrl_data |= WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN;
/* only link up can wake up */
if (atl1c_write_phy_reg(hw, MII_IER, IER_LINK_UP) != 0) {
dev_dbg(&pdev->dev, "%s: read write phy "
"register failed.\n",
atl1c_driver_name);
}
}
/* clear phy interrupt */
atl1c_read_phy_reg(hw, MII_ISR, &mii_intr_status_data);
/* Config MAC Ctrl register */
__atl1c_vlan_mode(netdev->features, &mac_ctrl_data);
/* magic packet maybe Broadcast&multicast&Unicast frame */
if (wufc & AT_WUFC_MAG)
mac_ctrl_data |= MAC_CTRL_BC_EN;
dev_dbg(&pdev->dev,
"%s: suspend MAC=0x%x\n",
atl1c_driver_name, mac_ctrl_data);
AT_WRITE_REG(hw, REG_MASTER_CTRL, master_ctrl_data);
AT_WRITE_REG(hw, REG_WOL_CTRL, wol_ctrl_data);
AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl_data);
AT_WRITE_REG(hw, REG_GPHY_CTRL, GPHY_CTRL_DEFAULT |
GPHY_CTRL_EXT_RESET);
} else {
AT_WRITE_REG(hw, REG_GPHY_CTRL, GPHY_CTRL_POWER_SAVING);
master_ctrl_data |= MASTER_CTRL_CLK_SEL_DIS;
mac_ctrl_data |= atl1c_mac_speed_10_100 << MAC_CTRL_SPEED_SHIFT;
mac_ctrl_data |= MAC_CTRL_DUPLX;
AT_WRITE_REG(hw, REG_MASTER_CTRL, master_ctrl_data);
AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl_data);
AT_WRITE_REG(hw, REG_WOL_CTRL, 0);
hw->phy_configured = false; /* re-init PHY when resume */
}
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int atl1c_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct net_device *netdev = pci_get_drvdata(pdev);
struct atl1c_adapter *adapter = netdev_priv(netdev);
AT_WRITE_REG(&adapter->hw, REG_WOL_CTRL, 0);
atl1c_reset_pcie(&adapter->hw, ATL1C_PCIE_L0S_L1_DISABLE |
ATL1C_PCIE_PHY_RESET);
atl1c_phy_reset(&adapter->hw);
atl1c_reset_mac(&adapter->hw);
atl1c_phy_init(&adapter->hw);
#if 0
AT_READ_REG(&adapter->hw, REG_PM_CTRLSTAT, &pm_data);
pm_data &= ~PM_CTRLSTAT_PME_EN;
AT_WRITE_REG(&adapter->hw, REG_PM_CTRLSTAT, pm_data);
#endif
netif_device_attach(netdev);
if (netif_running(netdev))
atl1c_up(adapter);
return 0;
}
#endif
static void atl1c_shutdown(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct atl1c_adapter *adapter = netdev_priv(netdev);
atl1c_suspend(&pdev->dev);
pci_wake_from_d3(pdev, adapter->wol);
pci_set_power_state(pdev, PCI_D3hot);
}
static const struct net_device_ops atl1c_netdev_ops = {
.ndo_open = atl1c_open,
.ndo_stop = atl1c_close,
.ndo_validate_addr = eth_validate_addr,
.ndo_start_xmit = atl1c_xmit_frame,
.ndo_set_mac_address = atl1c_set_mac_addr,
.ndo_set_rx_mode = atl1c_set_multi,
.ndo_change_mtu = atl1c_change_mtu,
.ndo_fix_features = atl1c_fix_features,
.ndo_set_features = atl1c_set_features,
.ndo_do_ioctl = atl1c_ioctl,
.ndo_tx_timeout = atl1c_tx_timeout,
.ndo_get_stats = atl1c_get_stats,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = atl1c_netpoll,
#endif
};
static int atl1c_init_netdev(struct net_device *netdev, struct pci_dev *pdev)
{
SET_NETDEV_DEV(netdev, &pdev->dev);
pci_set_drvdata(pdev, netdev);
netdev->irq = pdev->irq;
netdev->netdev_ops = &atl1c_netdev_ops;
netdev->watchdog_timeo = AT_TX_WATCHDOG;
atl1c_set_ethtool_ops(netdev);
/* TODO: add when ready */
netdev->hw_features = NETIF_F_SG |
NETIF_F_HW_CSUM |
NETIF_F_HW_VLAN_RX |
NETIF_F_TSO |
NETIF_F_TSO6;
netdev->features = netdev->hw_features |
NETIF_F_HW_VLAN_TX;
return 0;
}
/*
* atl1c_probe - Device Initialization Routine
* @pdev: PCI device information struct
* @ent: entry in atl1c_pci_tbl
*
* Returns 0 on success, negative on failure
*
* atl1c_probe initializes an adapter identified by a pci_dev structure.
* The OS initialization, configuring of the adapter private structure,
* and a hardware reset occur.
*/
static int __devinit atl1c_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct net_device *netdev;
struct atl1c_adapter *adapter;
static int cards_found;
int err = 0;
/* enable device (incl. PCI PM wakeup and hotplug setup) */
err = pci_enable_device_mem(pdev);
if (err) {
dev_err(&pdev->dev, "cannot enable PCI device\n");
return err;
}
/*
* The atl1c chip can DMA to 64-bit addresses, but it uses a single
* shared register for the high 32 bits, so only a single, aligned,
* 4 GB physical address range can be used at a time.
*
* Supporting 64-bit DMA on this hardware is more trouble than it's
* worth. It is far easier to limit to 32-bit DMA than update
* various kernel subsystems to support the mechanics required by a
* fixed-high-32-bit system.
*/
if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) ||
(pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)) {
dev_err(&pdev->dev, "No usable DMA configuration,aborting\n");
goto err_dma;
}
err = pci_request_regions(pdev, atl1c_driver_name);
if (err) {
dev_err(&pdev->dev, "cannot obtain PCI resources\n");
goto err_pci_reg;
}
pci_set_master(pdev);
netdev = alloc_etherdev(sizeof(struct atl1c_adapter));
if (netdev == NULL) {
err = -ENOMEM;
goto err_alloc_etherdev;
}
err = atl1c_init_netdev(netdev, pdev);
if (err) {
dev_err(&pdev->dev, "init netdevice failed\n");
goto err_init_netdev;
}
adapter = netdev_priv(netdev);
adapter->bd_number = cards_found;
adapter->netdev = netdev;
adapter->pdev = pdev;
adapter->hw.adapter = adapter;
adapter->msg_enable = netif_msg_init(-1, atl1c_default_msg);
adapter->hw.hw_addr = ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0));
if (!adapter->hw.hw_addr) {
err = -EIO;
dev_err(&pdev->dev, "cannot map device registers\n");
goto err_ioremap;
}
netdev->base_addr = (unsigned long)adapter->hw.hw_addr;
/* init mii data */
adapter->mii.dev = netdev;
adapter->mii.mdio_read = atl1c_mdio_read;
adapter->mii.mdio_write = atl1c_mdio_write;
adapter->mii.phy_id_mask = 0x1f;
adapter->mii.reg_num_mask = MDIO_REG_ADDR_MASK;
netif_napi_add(netdev, &adapter->napi, atl1c_clean, 64);
setup_timer(&adapter->phy_config_timer, atl1c_phy_config,
(unsigned long)adapter);
/* setup the private structure */
err = atl1c_sw_init(adapter);
if (err) {
dev_err(&pdev->dev, "net device private data init failed\n");
goto err_sw_init;
}
atl1c_reset_pcie(&adapter->hw, ATL1C_PCIE_L0S_L1_DISABLE |
ATL1C_PCIE_PHY_RESET);
/* Init GPHY as early as possible due to power saving issue */
atl1c_phy_reset(&adapter->hw);
err = atl1c_reset_mac(&adapter->hw);
if (err) {
err = -EIO;
goto err_reset;
}
/* reset the controller to
* put the device in a known good starting state */
err = atl1c_phy_init(&adapter->hw);
if (err) {
err = -EIO;
goto err_reset;
}
if (atl1c_read_mac_addr(&adapter->hw)) {
/* got a random MAC address, set NET_ADDR_RANDOM to netdev */
netdev->addr_assign_type |= NET_ADDR_RANDOM;
}
memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
memcpy(netdev->perm_addr, adapter->hw.mac_addr, netdev->addr_len);
if (netif_msg_probe(adapter))
dev_dbg(&pdev->dev, "mac address : %pM\n",
adapter->hw.mac_addr);
atl1c_hw_set_mac_addr(&adapter->hw);
INIT_WORK(&adapter->common_task, atl1c_common_task);
adapter->work_event = 0;
err = register_netdev(netdev);
if (err) {
dev_err(&pdev->dev, "register netdevice failed\n");
goto err_register;
}
if (netif_msg_probe(adapter))
dev_info(&pdev->dev, "version %s\n", ATL1C_DRV_VERSION);
cards_found++;
return 0;
err_reset:
err_register:
err_sw_init:
iounmap(adapter->hw.hw_addr);
err_init_netdev:
err_ioremap:
free_netdev(netdev);
err_alloc_etherdev:
pci_release_regions(pdev);
err_pci_reg:
err_dma:
pci_disable_device(pdev);
return err;
}
/*
* atl1c_remove - Device Removal Routine
* @pdev: PCI device information struct
*
* atl1c_remove is called by the PCI subsystem to alert the driver
* that it should release a PCI device. The could be caused by a
* Hot-Plug event, or because the driver is going to be removed from
* memory.
*/
static void __devexit atl1c_remove(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct atl1c_adapter *adapter = netdev_priv(netdev);
unregister_netdev(netdev);
atl1c_phy_disable(&adapter->hw);
iounmap(adapter->hw.hw_addr);
pci_release_regions(pdev);
pci_disable_device(pdev);
free_netdev(netdev);
}
/*
* atl1c_io_error_detected - called when PCI error is detected
* @pdev: Pointer to PCI device
* @state: The current pci connection state
*
* This function is called after a PCI bus error affecting
* this device has been detected.
*/
static pci_ers_result_t atl1c_io_error_detected(struct pci_dev *pdev,
pci_channel_state_t state)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct atl1c_adapter *adapter = netdev_priv(netdev);
netif_device_detach(netdev);
if (state == pci_channel_io_perm_failure)
return PCI_ERS_RESULT_DISCONNECT;
if (netif_running(netdev))
atl1c_down(adapter);
pci_disable_device(pdev);
/* Request a slot slot reset. */
return PCI_ERS_RESULT_NEED_RESET;
}
/*
* atl1c_io_slot_reset - called after the pci bus has been reset.
* @pdev: Pointer to PCI device
*
* Restart the card from scratch, as if from a cold-boot. Implementation
* resembles the first-half of the e1000_resume routine.
*/
static pci_ers_result_t atl1c_io_slot_reset(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct atl1c_adapter *adapter = netdev_priv(netdev);
if (pci_enable_device(pdev)) {
if (netif_msg_hw(adapter))
dev_err(&pdev->dev,
"Cannot re-enable PCI device after reset\n");
return PCI_ERS_RESULT_DISCONNECT;
}
pci_set_master(pdev);
pci_enable_wake(pdev, PCI_D3hot, 0);
pci_enable_wake(pdev, PCI_D3cold, 0);
atl1c_reset_mac(&adapter->hw);
return PCI_ERS_RESULT_RECOVERED;
}
/*
* atl1c_io_resume - called when traffic can start flowing again.
* @pdev: Pointer to PCI device
*
* This callback is called when the error recovery driver tells us that
* its OK to resume normal operation. Implementation resembles the
* second-half of the atl1c_resume routine.
*/
static void atl1c_io_resume(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct atl1c_adapter *adapter = netdev_priv(netdev);
if (netif_running(netdev)) {
if (atl1c_up(adapter)) {
if (netif_msg_hw(adapter))
dev_err(&pdev->dev,
"Cannot bring device back up after reset\n");
return;
}
}
netif_device_attach(netdev);
}
static struct pci_error_handlers atl1c_err_handler = {
.error_detected = atl1c_io_error_detected,
.slot_reset = atl1c_io_slot_reset,
.resume = atl1c_io_resume,
};
static SIMPLE_DEV_PM_OPS(atl1c_pm_ops, atl1c_suspend, atl1c_resume);
static struct pci_driver atl1c_driver = {
.name = atl1c_driver_name,
.id_table = atl1c_pci_tbl,
.probe = atl1c_probe,
.remove = __devexit_p(atl1c_remove),
.shutdown = atl1c_shutdown,
.err_handler = &atl1c_err_handler,
.driver.pm = &atl1c_pm_ops,
};
/*
* atl1c_init_module - Driver Registration Routine
*
* atl1c_init_module is the first routine called when the driver is
* loaded. All it does is register with the PCI subsystem.
*/
static int __init atl1c_init_module(void)
{
return pci_register_driver(&atl1c_driver);
}
/*
* atl1c_exit_module - Driver Exit Cleanup Routine
*
* atl1c_exit_module is called just before the driver is removed
* from memory.
*/
static void __exit atl1c_exit_module(void)
{
pci_unregister_driver(&atl1c_driver);
}
module_init(atl1c_init_module);
module_exit(atl1c_exit_module);
| gpl-2.0 |
mathur/rohan.kernel.op3 | drivers/ssb/driver_mipscore.c | 3116 | 8573 | /*
* Sonics Silicon Backplane
* Broadcom MIPS core driver
*
* Copyright 2005, Broadcom Corporation
* Copyright 2006, 2007, Michael Buesch <m@bues.ch>
*
* Licensed under the GNU/GPL. See COPYING for details.
*/
#include <linux/ssb/ssb.h>
#include <linux/mtd/physmap.h>
#include <linux/serial.h>
#include <linux/serial_core.h>
#include <linux/serial_reg.h>
#include <linux/time.h>
#include "ssb_private.h"
static const char * const part_probes[] = { "bcm47xxpart", NULL };
static struct physmap_flash_data ssb_pflash_data = {
.part_probe_types = part_probes,
};
static struct resource ssb_pflash_resource = {
.name = "ssb_pflash",
.flags = IORESOURCE_MEM,
};
struct platform_device ssb_pflash_dev = {
.name = "physmap-flash",
.dev = {
.platform_data = &ssb_pflash_data,
},
.resource = &ssb_pflash_resource,
.num_resources = 1,
};
static inline u32 mips_read32(struct ssb_mipscore *mcore,
u16 offset)
{
return ssb_read32(mcore->dev, offset);
}
static inline void mips_write32(struct ssb_mipscore *mcore,
u16 offset,
u32 value)
{
ssb_write32(mcore->dev, offset, value);
}
static const u32 ipsflag_irq_mask[] = {
0,
SSB_IPSFLAG_IRQ1,
SSB_IPSFLAG_IRQ2,
SSB_IPSFLAG_IRQ3,
SSB_IPSFLAG_IRQ4,
};
static const u32 ipsflag_irq_shift[] = {
0,
SSB_IPSFLAG_IRQ1_SHIFT,
SSB_IPSFLAG_IRQ2_SHIFT,
SSB_IPSFLAG_IRQ3_SHIFT,
SSB_IPSFLAG_IRQ4_SHIFT,
};
static inline u32 ssb_irqflag(struct ssb_device *dev)
{
u32 tpsflag = ssb_read32(dev, SSB_TPSFLAG);
if (tpsflag)
return ssb_read32(dev, SSB_TPSFLAG) & SSB_TPSFLAG_BPFLAG;
else
/* not irq supported */
return 0x3f;
}
static struct ssb_device *find_device(struct ssb_device *rdev, int irqflag)
{
struct ssb_bus *bus = rdev->bus;
int i;
for (i = 0; i < bus->nr_devices; i++) {
struct ssb_device *dev;
dev = &(bus->devices[i]);
if (ssb_irqflag(dev) == irqflag)
return dev;
}
return NULL;
}
/* Get the MIPS IRQ assignment for a specified device.
* If unassigned, 0 is returned.
* If disabled, 5 is returned.
* If not supported, 6 is returned.
*/
unsigned int ssb_mips_irq(struct ssb_device *dev)
{
struct ssb_bus *bus = dev->bus;
struct ssb_device *mdev = bus->mipscore.dev;
u32 irqflag;
u32 ipsflag;
u32 tmp;
unsigned int irq;
irqflag = ssb_irqflag(dev);
if (irqflag == 0x3f)
return 6;
ipsflag = ssb_read32(bus->mipscore.dev, SSB_IPSFLAG);
for (irq = 1; irq <= 4; irq++) {
tmp = ((ipsflag & ipsflag_irq_mask[irq]) >> ipsflag_irq_shift[irq]);
if (tmp == irqflag)
break;
}
if (irq == 5) {
if ((1 << irqflag) & ssb_read32(mdev, SSB_INTVEC))
irq = 0;
}
return irq;
}
static void clear_irq(struct ssb_bus *bus, unsigned int irq)
{
struct ssb_device *dev = bus->mipscore.dev;
/* Clear the IRQ in the MIPScore backplane registers */
if (irq == 0) {
ssb_write32(dev, SSB_INTVEC, 0);
} else {
ssb_write32(dev, SSB_IPSFLAG,
ssb_read32(dev, SSB_IPSFLAG) |
ipsflag_irq_mask[irq]);
}
}
static void set_irq(struct ssb_device *dev, unsigned int irq)
{
unsigned int oldirq = ssb_mips_irq(dev);
struct ssb_bus *bus = dev->bus;
struct ssb_device *mdev = bus->mipscore.dev;
u32 irqflag = ssb_irqflag(dev);
BUG_ON(oldirq == 6);
dev->irq = irq + 2;
/* clear the old irq */
if (oldirq == 0)
ssb_write32(mdev, SSB_INTVEC, (~(1 << irqflag) & ssb_read32(mdev, SSB_INTVEC)));
else if (oldirq != 5)
clear_irq(bus, oldirq);
/* assign the new one */
if (irq == 0) {
ssb_write32(mdev, SSB_INTVEC, ((1 << irqflag) | ssb_read32(mdev, SSB_INTVEC)));
} else {
u32 ipsflag = ssb_read32(mdev, SSB_IPSFLAG);
if ((ipsflag & ipsflag_irq_mask[irq]) != ipsflag_irq_mask[irq]) {
u32 oldipsflag = (ipsflag & ipsflag_irq_mask[irq]) >> ipsflag_irq_shift[irq];
struct ssb_device *olddev = find_device(dev, oldipsflag);
if (olddev)
set_irq(olddev, 0);
}
irqflag <<= ipsflag_irq_shift[irq];
irqflag |= (ipsflag & ~ipsflag_irq_mask[irq]);
ssb_write32(mdev, SSB_IPSFLAG, irqflag);
}
ssb_dbg("set_irq: core 0x%04x, irq %d => %d\n",
dev->id.coreid, oldirq+2, irq+2);
}
static void print_irq(struct ssb_device *dev, unsigned int irq)
{
static const char *irq_name[] = {"2(S)", "3", "4", "5", "6", "D", "I"};
ssb_dbg("core 0x%04x, irq : %s%s %s%s %s%s %s%s %s%s %s%s %s%s\n",
dev->id.coreid,
irq_name[0], irq == 0 ? "*" : " ",
irq_name[1], irq == 1 ? "*" : " ",
irq_name[2], irq == 2 ? "*" : " ",
irq_name[3], irq == 3 ? "*" : " ",
irq_name[4], irq == 4 ? "*" : " ",
irq_name[5], irq == 5 ? "*" : " ",
irq_name[6], irq == 6 ? "*" : " ");
}
static void dump_irq(struct ssb_bus *bus)
{
int i;
for (i = 0; i < bus->nr_devices; i++) {
struct ssb_device *dev;
dev = &(bus->devices[i]);
print_irq(dev, ssb_mips_irq(dev));
}
}
static void ssb_mips_serial_init(struct ssb_mipscore *mcore)
{
struct ssb_bus *bus = mcore->dev->bus;
if (ssb_extif_available(&bus->extif))
mcore->nr_serial_ports = ssb_extif_serial_init(&bus->extif, mcore->serial_ports);
else if (ssb_chipco_available(&bus->chipco))
mcore->nr_serial_ports = ssb_chipco_serial_init(&bus->chipco, mcore->serial_ports);
else
mcore->nr_serial_ports = 0;
}
static void ssb_mips_flash_detect(struct ssb_mipscore *mcore)
{
struct ssb_bus *bus = mcore->dev->bus;
struct ssb_pflash *pflash = &mcore->pflash;
/* When there is no chipcommon on the bus there is 4MB flash */
if (!ssb_chipco_available(&bus->chipco)) {
pflash->present = true;
pflash->buswidth = 2;
pflash->window = SSB_FLASH1;
pflash->window_size = SSB_FLASH1_SZ;
goto ssb_pflash;
}
/* There is ChipCommon, so use it to read info about flash */
switch (bus->chipco.capabilities & SSB_CHIPCO_CAP_FLASHT) {
case SSB_CHIPCO_FLASHT_STSER:
case SSB_CHIPCO_FLASHT_ATSER:
pr_debug("Found serial flash\n");
ssb_sflash_init(&bus->chipco);
break;
case SSB_CHIPCO_FLASHT_PARA:
pr_debug("Found parallel flash\n");
pflash->present = true;
pflash->window = SSB_FLASH2;
pflash->window_size = SSB_FLASH2_SZ;
if ((ssb_read32(bus->chipco.dev, SSB_CHIPCO_FLASH_CFG)
& SSB_CHIPCO_CFG_DS16) == 0)
pflash->buswidth = 1;
else
pflash->buswidth = 2;
break;
}
ssb_pflash:
if (pflash->present) {
ssb_pflash_data.width = pflash->buswidth;
ssb_pflash_resource.start = pflash->window;
ssb_pflash_resource.end = pflash->window + pflash->window_size;
}
}
u32 ssb_cpu_clock(struct ssb_mipscore *mcore)
{
struct ssb_bus *bus = mcore->dev->bus;
u32 pll_type, n, m, rate = 0;
if (bus->chipco.capabilities & SSB_CHIPCO_CAP_PMU)
return ssb_pmu_get_cpu_clock(&bus->chipco);
if (ssb_extif_available(&bus->extif)) {
ssb_extif_get_clockcontrol(&bus->extif, &pll_type, &n, &m);
} else if (ssb_chipco_available(&bus->chipco)) {
ssb_chipco_get_clockcpu(&bus->chipco, &pll_type, &n, &m);
} else
return 0;
if ((pll_type == SSB_PLLTYPE_5) || (bus->chip_id == 0x5365)) {
rate = 200000000;
} else {
rate = ssb_calc_clock_rate(pll_type, n, m);
}
if (pll_type == SSB_PLLTYPE_6) {
rate *= 2;
}
return rate;
}
void ssb_mipscore_init(struct ssb_mipscore *mcore)
{
struct ssb_bus *bus;
struct ssb_device *dev;
unsigned long hz, ns;
unsigned int irq, i;
if (!mcore->dev)
return; /* We don't have a MIPS core */
ssb_dbg("Initializing MIPS core...\n");
bus = mcore->dev->bus;
hz = ssb_clockspeed(bus);
if (!hz)
hz = 100000000;
ns = 1000000000 / hz;
if (ssb_extif_available(&bus->extif))
ssb_extif_timing_init(&bus->extif, ns);
else if (ssb_chipco_available(&bus->chipco))
ssb_chipco_timing_init(&bus->chipco, ns);
/* Assign IRQs to all cores on the bus, start with irq line 2, because serial usually takes 1 */
for (irq = 2, i = 0; i < bus->nr_devices; i++) {
int mips_irq;
dev = &(bus->devices[i]);
mips_irq = ssb_mips_irq(dev);
if (mips_irq > 4)
dev->irq = 0;
else
dev->irq = mips_irq + 2;
if (dev->irq > 5)
continue;
switch (dev->id.coreid) {
case SSB_DEV_USB11_HOST:
/* shouldn't need a separate irq line for non-4710, most of them have a proper
* external usb controller on the pci */
if ((bus->chip_id == 0x4710) && (irq <= 4)) {
set_irq(dev, irq++);
}
break;
case SSB_DEV_PCI:
case SSB_DEV_ETHERNET:
case SSB_DEV_ETHERNET_GBIT:
case SSB_DEV_80211:
case SSB_DEV_USB20_HOST:
/* These devices get their own IRQ line if available, the rest goes on IRQ0 */
if (irq <= 4) {
set_irq(dev, irq++);
break;
}
/* fallthrough */
case SSB_DEV_EXTIF:
set_irq(dev, 0);
break;
}
}
ssb_dbg("after irq reconfiguration\n");
dump_irq(bus);
ssb_mips_serial_init(mcore);
ssb_mips_flash_detect(mcore);
}
| gpl-2.0 |
davidevinavil/kernel_s500_jb | fs/pnode.c | 3628 | 9038 | /*
* linux/fs/pnode.c
*
* (C) Copyright IBM Corporation 2005.
* Released under GPL v2.
* Author : Ram Pai (linuxram@us.ibm.com)
*
*/
#include <linux/mnt_namespace.h>
#include <linux/mount.h>
#include <linux/fs.h>
#include "internal.h"
#include "pnode.h"
/* return the next shared peer mount of @p */
static inline struct mount *next_peer(struct mount *p)
{
return list_entry(p->mnt_share.next, struct mount, mnt_share);
}
static inline struct mount *first_slave(struct mount *p)
{
return list_entry(p->mnt_slave_list.next, struct mount, mnt_slave);
}
static inline struct mount *next_slave(struct mount *p)
{
return list_entry(p->mnt_slave.next, struct mount, mnt_slave);
}
static struct mount *get_peer_under_root(struct mount *mnt,
struct mnt_namespace *ns,
const struct path *root)
{
struct mount *m = mnt;
do {
/* Check the namespace first for optimization */
if (m->mnt_ns == ns && is_path_reachable(m, m->mnt.mnt_root, root))
return m;
m = next_peer(m);
} while (m != mnt);
return NULL;
}
/*
* Get ID of closest dominating peer group having a representative
* under the given root.
*
* Caller must hold namespace_sem
*/
int get_dominating_id(struct mount *mnt, const struct path *root)
{
struct mount *m;
for (m = mnt->mnt_master; m != NULL; m = m->mnt_master) {
struct mount *d = get_peer_under_root(m, mnt->mnt_ns, root);
if (d)
return d->mnt_group_id;
}
return 0;
}
static int do_make_slave(struct mount *mnt)
{
struct mount *peer_mnt = mnt, *master = mnt->mnt_master;
struct mount *slave_mnt;
/*
* slave 'mnt' to a peer mount that has the
* same root dentry. If none is available then
* slave it to anything that is available.
*/
while ((peer_mnt = next_peer(peer_mnt)) != mnt &&
peer_mnt->mnt.mnt_root != mnt->mnt.mnt_root) ;
if (peer_mnt == mnt) {
peer_mnt = next_peer(mnt);
if (peer_mnt == mnt)
peer_mnt = NULL;
}
if (IS_MNT_SHARED(mnt) && list_empty(&mnt->mnt_share))
mnt_release_group_id(mnt);
list_del_init(&mnt->mnt_share);
mnt->mnt_group_id = 0;
if (peer_mnt)
master = peer_mnt;
if (master) {
list_for_each_entry(slave_mnt, &mnt->mnt_slave_list, mnt_slave)
slave_mnt->mnt_master = master;
list_move(&mnt->mnt_slave, &master->mnt_slave_list);
list_splice(&mnt->mnt_slave_list, master->mnt_slave_list.prev);
INIT_LIST_HEAD(&mnt->mnt_slave_list);
} else {
struct list_head *p = &mnt->mnt_slave_list;
while (!list_empty(p)) {
slave_mnt = list_first_entry(p,
struct mount, mnt_slave);
list_del_init(&slave_mnt->mnt_slave);
slave_mnt->mnt_master = NULL;
}
}
mnt->mnt_master = master;
CLEAR_MNT_SHARED(mnt);
return 0;
}
/*
* vfsmount lock must be held for write
*/
void change_mnt_propagation(struct mount *mnt, int type)
{
if (type == MS_SHARED) {
set_mnt_shared(mnt);
return;
}
do_make_slave(mnt);
if (type != MS_SLAVE) {
list_del_init(&mnt->mnt_slave);
mnt->mnt_master = NULL;
if (type == MS_UNBINDABLE)
mnt->mnt.mnt_flags |= MNT_UNBINDABLE;
else
mnt->mnt.mnt_flags &= ~MNT_UNBINDABLE;
}
}
/*
* get the next mount in the propagation tree.
* @m: the mount seen last
* @origin: the original mount from where the tree walk initiated
*
* Note that peer groups form contiguous segments of slave lists.
* We rely on that in get_source() to be able to find out if
* vfsmount found while iterating with propagation_next() is
* a peer of one we'd found earlier.
*/
static struct mount *propagation_next(struct mount *m,
struct mount *origin)
{
/* are there any slaves of this mount? */
if (!IS_MNT_NEW(m) && !list_empty(&m->mnt_slave_list))
return first_slave(m);
while (1) {
struct mount *master = m->mnt_master;
if (master == origin->mnt_master) {
struct mount *next = next_peer(m);
return (next == origin) ? NULL : next;
} else if (m->mnt_slave.next != &master->mnt_slave_list)
return next_slave(m);
/* back at master */
m = master;
}
}
/*
* return the source mount to be used for cloning
*
* @dest the current destination mount
* @last_dest the last seen destination mount
* @last_src the last seen source mount
* @type return CL_SLAVE if the new mount has to be
* cloned as a slave.
*/
static struct mount *get_source(struct mount *dest,
struct mount *last_dest,
struct mount *last_src,
int *type)
{
struct mount *p_last_src = NULL;
struct mount *p_last_dest = NULL;
while (last_dest != dest->mnt_master) {
p_last_dest = last_dest;
p_last_src = last_src;
last_dest = last_dest->mnt_master;
last_src = last_src->mnt_master;
}
if (p_last_dest) {
do {
p_last_dest = next_peer(p_last_dest);
} while (IS_MNT_NEW(p_last_dest));
/* is that a peer of the earlier? */
if (dest == p_last_dest) {
*type = CL_MAKE_SHARED;
return p_last_src;
}
}
/* slave of the earlier, then */
*type = CL_SLAVE;
/* beginning of peer group among the slaves? */
if (IS_MNT_SHARED(dest))
*type |= CL_MAKE_SHARED;
return last_src;
}
/*
* mount 'source_mnt' under the destination 'dest_mnt' at
* dentry 'dest_dentry'. And propagate that mount to
* all the peer and slave mounts of 'dest_mnt'.
* Link all the new mounts into a propagation tree headed at
* source_mnt. Also link all the new mounts using ->mnt_list
* headed at source_mnt's ->mnt_list
*
* @dest_mnt: destination mount.
* @dest_dentry: destination dentry.
* @source_mnt: source mount.
* @tree_list : list of heads of trees to be attached.
*/
int propagate_mnt(struct mount *dest_mnt, struct dentry *dest_dentry,
struct mount *source_mnt, struct list_head *tree_list)
{
struct mount *m, *child;
int ret = 0;
struct mount *prev_dest_mnt = dest_mnt;
struct mount *prev_src_mnt = source_mnt;
LIST_HEAD(tmp_list);
LIST_HEAD(umount_list);
for (m = propagation_next(dest_mnt, dest_mnt); m;
m = propagation_next(m, dest_mnt)) {
int type;
struct mount *source;
if (IS_MNT_NEW(m))
continue;
source = get_source(m, prev_dest_mnt, prev_src_mnt, &type);
if (!(child = copy_tree(source, source->mnt.mnt_root, type))) {
ret = -ENOMEM;
list_splice(tree_list, tmp_list.prev);
goto out;
}
if (is_subdir(dest_dentry, m->mnt.mnt_root)) {
mnt_set_mountpoint(m, dest_dentry, child);
list_add_tail(&child->mnt_hash, tree_list);
} else {
/*
* This can happen if the parent mount was bind mounted
* on some subdirectory of a shared/slave mount.
*/
list_add_tail(&child->mnt_hash, &tmp_list);
}
prev_dest_mnt = m;
prev_src_mnt = child;
}
out:
br_write_lock(vfsmount_lock);
while (!list_empty(&tmp_list)) {
child = list_first_entry(&tmp_list, struct mount, mnt_hash);
umount_tree(child, 0, &umount_list);
}
br_write_unlock(vfsmount_lock);
release_mounts(&umount_list);
return ret;
}
/*
* return true if the refcount is greater than count
*/
static inline int do_refcount_check(struct mount *mnt, int count)
{
int mycount = mnt_get_count(mnt) - mnt->mnt_ghosts;
return (mycount > count);
}
/*
* check if the mount 'mnt' can be unmounted successfully.
* @mnt: the mount to be checked for unmount
* NOTE: unmounting 'mnt' would naturally propagate to all
* other mounts its parent propagates to.
* Check if any of these mounts that **do not have submounts**
* have more references than 'refcnt'. If so return busy.
*
* vfsmount lock must be held for write
*/
int propagate_mount_busy(struct mount *mnt, int refcnt)
{
struct mount *m, *child;
struct mount *parent = mnt->mnt_parent;
int ret = 0;
if (mnt == parent)
return do_refcount_check(mnt, refcnt);
/*
* quickly check if the current mount can be unmounted.
* If not, we don't have to go checking for all other
* mounts
*/
if (!list_empty(&mnt->mnt_mounts) || do_refcount_check(mnt, refcnt))
return 1;
for (m = propagation_next(parent, parent); m;
m = propagation_next(m, parent)) {
child = __lookup_mnt(&m->mnt, mnt->mnt_mountpoint, 0);
if (child && list_empty(&child->mnt_mounts) &&
(ret = do_refcount_check(child, 1)))
break;
}
return ret;
}
/*
* NOTE: unmounting 'mnt' naturally propagates to all other mounts its
* parent propagates to.
*/
static void __propagate_umount(struct mount *mnt)
{
struct mount *parent = mnt->mnt_parent;
struct mount *m;
BUG_ON(parent == mnt);
for (m = propagation_next(parent, parent); m;
m = propagation_next(m, parent)) {
struct mount *child = __lookup_mnt(&m->mnt,
mnt->mnt_mountpoint, 0);
/*
* umount the child only if the child has no
* other children
*/
if (child && list_empty(&child->mnt_mounts))
list_move_tail(&child->mnt_hash, &mnt->mnt_hash);
}
}
/*
* collect all mounts that receive propagation from the mount in @list,
* and return these additional mounts in the same list.
* @list: the list of mounts to be unmounted.
*
* vfsmount lock must be held for write
*/
int propagate_umount(struct list_head *list)
{
struct mount *mnt;
list_for_each_entry(mnt, list, mnt_hash)
__propagate_umount(mnt);
return 0;
}
| gpl-2.0 |
percy-g2/android_kernel_motorola_msm8610 | arch/arm/oprofile/common.c | 3884 | 2967 | /**
* @file common.c
*
* @remark Copyright 2004 Oprofile Authors
* @remark Copyright 2010 ARM Ltd.
* @remark Read the file COPYING
*
* @author Zwane Mwaikambo
* @author Will Deacon [move to perf]
*/
#include <linux/cpumask.h>
#include <linux/init.h>
#include <linux/mutex.h>
#include <linux/oprofile.h>
#include <linux/perf_event.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <asm/stacktrace.h>
#include <linux/uaccess.h>
#include <asm/perf_event.h>
#include <asm/ptrace.h>
#ifdef CONFIG_HW_PERF_EVENTS
char *op_name_from_perf_id(void)
{
enum arm_perf_pmu_ids id = armpmu_get_pmu_id();
switch (id) {
case ARM_PERF_PMU_ID_XSCALE1:
return "arm/xscale1";
case ARM_PERF_PMU_ID_XSCALE2:
return "arm/xscale2";
case ARM_PERF_PMU_ID_V6:
return "arm/armv6";
case ARM_PERF_PMU_ID_V6MP:
return "arm/mpcore";
case ARM_PERF_PMU_ID_CA5:
return "arm/armv7";
case ARM_PERF_PMU_ID_CA8:
return "arm/armv7";
case ARM_PERF_PMU_ID_CA9:
return "arm/armv7-ca9";
case ARM_PERF_PMU_ID_SCORPION:
return "arm/armv7-scorpion";
case ARM_PERF_PMU_ID_SCORPIONMP:
return "arm/armv7-scorpionmp";
case ARM_PERF_PMU_ID_KRAIT:
return "arm/armv7-krait";
default:
return NULL;
}
}
#endif
static int report_trace(struct stackframe *frame, void *d)
{
unsigned int *depth = d;
if (*depth) {
oprofile_add_trace(frame->pc);
(*depth)--;
}
return *depth == 0;
}
/*
* The registers we're interested in are at the end of the variable
* length saved register structure. The fp points at the end of this
* structure so the address of this struct is:
* (struct frame_tail *)(xxx->fp)-1
*/
struct frame_tail {
struct frame_tail *fp;
unsigned long sp;
unsigned long lr;
} __attribute__((packed));
static struct frame_tail* user_backtrace(struct frame_tail *tail)
{
struct frame_tail buftail[2];
/* Also check accessibility of one struct frame_tail beyond */
if (!access_ok(VERIFY_READ, tail, sizeof(buftail)))
return NULL;
if (__copy_from_user_inatomic(buftail, tail, sizeof(buftail)))
return NULL;
oprofile_add_trace(buftail[0].lr);
/* frame pointers should strictly progress back up the stack
* (towards higher addresses) */
if (tail + 1 >= buftail[0].fp)
return NULL;
return buftail[0].fp-1;
}
static void arm_backtrace(struct pt_regs * const regs, unsigned int depth)
{
struct frame_tail *tail = ((struct frame_tail *) regs->ARM_fp) - 1;
if (!user_mode(regs)) {
struct stackframe frame;
frame.fp = regs->ARM_fp;
frame.sp = regs->ARM_sp;
frame.lr = regs->ARM_lr;
frame.pc = regs->ARM_pc;
walk_stackframe(&frame, report_trace, &depth);
return;
}
while (depth-- && tail && !((unsigned long) tail & 3))
tail = user_backtrace(tail);
}
int __init oprofile_arch_init(struct oprofile_operations *ops)
{
/* provide backtrace support also in timer mode: */
ops->backtrace = arm_backtrace;
return oprofile_perf_init(ops);
}
void oprofile_arch_exit(void)
{
oprofile_perf_exit();
}
| gpl-2.0 |
CMRemix/android_kernel_samsung_hlte | arch/arm/oprofile/common.c | 3884 | 2967 | /**
* @file common.c
*
* @remark Copyright 2004 Oprofile Authors
* @remark Copyright 2010 ARM Ltd.
* @remark Read the file COPYING
*
* @author Zwane Mwaikambo
* @author Will Deacon [move to perf]
*/
#include <linux/cpumask.h>
#include <linux/init.h>
#include <linux/mutex.h>
#include <linux/oprofile.h>
#include <linux/perf_event.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <asm/stacktrace.h>
#include <linux/uaccess.h>
#include <asm/perf_event.h>
#include <asm/ptrace.h>
#ifdef CONFIG_HW_PERF_EVENTS
char *op_name_from_perf_id(void)
{
enum arm_perf_pmu_ids id = armpmu_get_pmu_id();
switch (id) {
case ARM_PERF_PMU_ID_XSCALE1:
return "arm/xscale1";
case ARM_PERF_PMU_ID_XSCALE2:
return "arm/xscale2";
case ARM_PERF_PMU_ID_V6:
return "arm/armv6";
case ARM_PERF_PMU_ID_V6MP:
return "arm/mpcore";
case ARM_PERF_PMU_ID_CA5:
return "arm/armv7";
case ARM_PERF_PMU_ID_CA8:
return "arm/armv7";
case ARM_PERF_PMU_ID_CA9:
return "arm/armv7-ca9";
case ARM_PERF_PMU_ID_SCORPION:
return "arm/armv7-scorpion";
case ARM_PERF_PMU_ID_SCORPIONMP:
return "arm/armv7-scorpionmp";
case ARM_PERF_PMU_ID_KRAIT:
return "arm/armv7-krait";
default:
return NULL;
}
}
#endif
static int report_trace(struct stackframe *frame, void *d)
{
unsigned int *depth = d;
if (*depth) {
oprofile_add_trace(frame->pc);
(*depth)--;
}
return *depth == 0;
}
/*
* The registers we're interested in are at the end of the variable
* length saved register structure. The fp points at the end of this
* structure so the address of this struct is:
* (struct frame_tail *)(xxx->fp)-1
*/
struct frame_tail {
struct frame_tail *fp;
unsigned long sp;
unsigned long lr;
} __attribute__((packed));
static struct frame_tail* user_backtrace(struct frame_tail *tail)
{
struct frame_tail buftail[2];
/* Also check accessibility of one struct frame_tail beyond */
if (!access_ok(VERIFY_READ, tail, sizeof(buftail)))
return NULL;
if (__copy_from_user_inatomic(buftail, tail, sizeof(buftail)))
return NULL;
oprofile_add_trace(buftail[0].lr);
/* frame pointers should strictly progress back up the stack
* (towards higher addresses) */
if (tail + 1 >= buftail[0].fp)
return NULL;
return buftail[0].fp-1;
}
static void arm_backtrace(struct pt_regs * const regs, unsigned int depth)
{
struct frame_tail *tail = ((struct frame_tail *) regs->ARM_fp) - 1;
if (!user_mode(regs)) {
struct stackframe frame;
frame.fp = regs->ARM_fp;
frame.sp = regs->ARM_sp;
frame.lr = regs->ARM_lr;
frame.pc = regs->ARM_pc;
walk_stackframe(&frame, report_trace, &depth);
return;
}
while (depth-- && tail && !((unsigned long) tail & 3))
tail = user_backtrace(tail);
}
int __init oprofile_arch_init(struct oprofile_operations *ops)
{
/* provide backtrace support also in timer mode: */
ops->backtrace = arm_backtrace;
return oprofile_perf_init(ops);
}
void oprofile_arch_exit(void)
{
oprofile_perf_exit();
}
| gpl-2.0 |
invisiblek/android_kernel_lge_g3 | drivers/mmc/host/ushc.c | 7724 | 13638 | /*
* USB SD Host Controller (USHC) controller driver.
*
* Copyright (C) 2010 Cambridge Silicon Radio Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or (at
* your option) any later version.
*
* Notes:
* - Only version 2 devices are supported.
* - Version 2 devices only support SDIO cards/devices (R2 response is
* unsupported).
*
* References:
* [USHC] USB SD Host Controller specification (CS-118793-SP)
*/
#include <linux/module.h>
#include <linux/usb.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/dma-mapping.h>
#include <linux/mmc/host.h>
enum ushc_request {
USHC_GET_CAPS = 0x00,
USHC_HOST_CTRL = 0x01,
USHC_PWR_CTRL = 0x02,
USHC_CLK_FREQ = 0x03,
USHC_EXEC_CMD = 0x04,
USHC_READ_RESP = 0x05,
USHC_RESET = 0x06,
};
enum ushc_request_type {
USHC_GET_CAPS_TYPE = USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
USHC_HOST_CTRL_TYPE = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
USHC_PWR_CTRL_TYPE = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
USHC_CLK_FREQ_TYPE = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
USHC_EXEC_CMD_TYPE = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
USHC_READ_RESP_TYPE = USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
USHC_RESET_TYPE = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
};
#define USHC_GET_CAPS_VERSION_MASK 0xff
#define USHC_GET_CAPS_3V3 (1 << 8)
#define USHC_GET_CAPS_3V0 (1 << 9)
#define USHC_GET_CAPS_1V8 (1 << 10)
#define USHC_GET_CAPS_HIGH_SPD (1 << 16)
#define USHC_HOST_CTRL_4BIT (1 << 1)
#define USHC_HOST_CTRL_HIGH_SPD (1 << 0)
#define USHC_PWR_CTRL_OFF 0x00
#define USHC_PWR_CTRL_3V3 0x01
#define USHC_PWR_CTRL_3V0 0x02
#define USHC_PWR_CTRL_1V8 0x03
#define USHC_READ_RESP_BUSY (1 << 4)
#define USHC_READ_RESP_ERR_TIMEOUT (1 << 3)
#define USHC_READ_RESP_ERR_CRC (1 << 2)
#define USHC_READ_RESP_ERR_DAT (1 << 1)
#define USHC_READ_RESP_ERR_CMD (1 << 0)
#define USHC_READ_RESP_ERR_MASK 0x0f
struct ushc_cbw {
__u8 signature;
__u8 cmd_idx;
__le16 block_size;
__le32 arg;
} __attribute__((packed));
#define USHC_CBW_SIGNATURE 'C'
struct ushc_csw {
__u8 signature;
__u8 status;
__le32 response;
} __attribute__((packed));
#define USHC_CSW_SIGNATURE 'S'
struct ushc_int_data {
u8 status;
u8 reserved[3];
};
#define USHC_INT_STATUS_SDIO_INT (1 << 1)
#define USHC_INT_STATUS_CARD_PRESENT (1 << 0)
struct ushc_data {
struct usb_device *usb_dev;
struct mmc_host *mmc;
struct urb *int_urb;
struct ushc_int_data *int_data;
struct urb *cbw_urb;
struct ushc_cbw *cbw;
struct urb *data_urb;
struct urb *csw_urb;
struct ushc_csw *csw;
spinlock_t lock;
struct mmc_request *current_req;
u32 caps;
u16 host_ctrl;
unsigned long flags;
u8 last_status;
int clock_freq;
};
#define DISCONNECTED 0
#define INT_EN 1
#define IGNORE_NEXT_INT 2
static void data_callback(struct urb *urb);
static int ushc_hw_reset(struct ushc_data *ushc)
{
return usb_control_msg(ushc->usb_dev, usb_sndctrlpipe(ushc->usb_dev, 0),
USHC_RESET, USHC_RESET_TYPE,
0, 0, NULL, 0, 100);
}
static int ushc_hw_get_caps(struct ushc_data *ushc)
{
int ret;
int version;
ret = usb_control_msg(ushc->usb_dev, usb_rcvctrlpipe(ushc->usb_dev, 0),
USHC_GET_CAPS, USHC_GET_CAPS_TYPE,
0, 0, &ushc->caps, sizeof(ushc->caps), 100);
if (ret < 0)
return ret;
ushc->caps = le32_to_cpu(ushc->caps);
version = ushc->caps & USHC_GET_CAPS_VERSION_MASK;
if (version != 0x02) {
dev_err(&ushc->usb_dev->dev, "controller version %d is not supported\n", version);
return -EINVAL;
}
return 0;
}
static int ushc_hw_set_host_ctrl(struct ushc_data *ushc, u16 mask, u16 val)
{
u16 host_ctrl;
int ret;
host_ctrl = (ushc->host_ctrl & ~mask) | val;
ret = usb_control_msg(ushc->usb_dev, usb_sndctrlpipe(ushc->usb_dev, 0),
USHC_HOST_CTRL, USHC_HOST_CTRL_TYPE,
host_ctrl, 0, NULL, 0, 100);
if (ret < 0)
return ret;
ushc->host_ctrl = host_ctrl;
return 0;
}
static void int_callback(struct urb *urb)
{
struct ushc_data *ushc = urb->context;
u8 status, last_status;
if (urb->status < 0)
return;
status = ushc->int_data->status;
last_status = ushc->last_status;
ushc->last_status = status;
/*
* Ignore the card interrupt status on interrupt transfers that
* were submitted while card interrupts where disabled.
*
* This avoid occasional spurious interrupts when enabling
* interrupts immediately after clearing the source on the card.
*/
if (!test_and_clear_bit(IGNORE_NEXT_INT, &ushc->flags)
&& test_bit(INT_EN, &ushc->flags)
&& status & USHC_INT_STATUS_SDIO_INT) {
mmc_signal_sdio_irq(ushc->mmc);
}
if ((status ^ last_status) & USHC_INT_STATUS_CARD_PRESENT)
mmc_detect_change(ushc->mmc, msecs_to_jiffies(100));
if (!test_bit(INT_EN, &ushc->flags))
set_bit(IGNORE_NEXT_INT, &ushc->flags);
usb_submit_urb(ushc->int_urb, GFP_ATOMIC);
}
static void cbw_callback(struct urb *urb)
{
struct ushc_data *ushc = urb->context;
if (urb->status != 0) {
usb_unlink_urb(ushc->data_urb);
usb_unlink_urb(ushc->csw_urb);
}
}
static void data_callback(struct urb *urb)
{
struct ushc_data *ushc = urb->context;
if (urb->status != 0)
usb_unlink_urb(ushc->csw_urb);
}
static void csw_callback(struct urb *urb)
{
struct ushc_data *ushc = urb->context;
struct mmc_request *req = ushc->current_req;
int status;
status = ushc->csw->status;
if (urb->status != 0) {
req->cmd->error = urb->status;
} else if (status & USHC_READ_RESP_ERR_CMD) {
if (status & USHC_READ_RESP_ERR_CRC)
req->cmd->error = -EIO;
else
req->cmd->error = -ETIMEDOUT;
}
if (req->data) {
if (status & USHC_READ_RESP_ERR_DAT) {
if (status & USHC_READ_RESP_ERR_CRC)
req->data->error = -EIO;
else
req->data->error = -ETIMEDOUT;
req->data->bytes_xfered = 0;
} else {
req->data->bytes_xfered = req->data->blksz * req->data->blocks;
}
}
req->cmd->resp[0] = le32_to_cpu(ushc->csw->response);
mmc_request_done(ushc->mmc, req);
}
static void ushc_request(struct mmc_host *mmc, struct mmc_request *req)
{
struct ushc_data *ushc = mmc_priv(mmc);
int ret;
unsigned long flags;
spin_lock_irqsave(&ushc->lock, flags);
if (test_bit(DISCONNECTED, &ushc->flags)) {
ret = -ENODEV;
goto out;
}
/* Version 2 firmware doesn't support the R2 response format. */
if (req->cmd->flags & MMC_RSP_136) {
ret = -EINVAL;
goto out;
}
/* The Astoria's data FIFOs don't work with clock speeds < 5MHz so
limit commands with data to 6MHz or more. */
if (req->data && ushc->clock_freq < 6000000) {
ret = -EINVAL;
goto out;
}
ushc->current_req = req;
/* Start cmd with CBW. */
ushc->cbw->cmd_idx = cpu_to_le16(req->cmd->opcode);
if (req->data)
ushc->cbw->block_size = cpu_to_le16(req->data->blksz);
else
ushc->cbw->block_size = 0;
ushc->cbw->arg = cpu_to_le32(req->cmd->arg);
ret = usb_submit_urb(ushc->cbw_urb, GFP_ATOMIC);
if (ret < 0)
goto out;
/* Submit data (if any). */
if (req->data) {
struct mmc_data *data = req->data;
int pipe;
if (data->flags & MMC_DATA_READ)
pipe = usb_rcvbulkpipe(ushc->usb_dev, 6);
else
pipe = usb_sndbulkpipe(ushc->usb_dev, 2);
usb_fill_bulk_urb(ushc->data_urb, ushc->usb_dev, pipe,
sg_virt(data->sg), data->sg->length,
data_callback, ushc);
ret = usb_submit_urb(ushc->data_urb, GFP_ATOMIC);
if (ret < 0)
goto out;
}
/* Submit CSW. */
ret = usb_submit_urb(ushc->csw_urb, GFP_ATOMIC);
if (ret < 0)
goto out;
out:
spin_unlock_irqrestore(&ushc->lock, flags);
if (ret < 0) {
usb_unlink_urb(ushc->cbw_urb);
usb_unlink_urb(ushc->data_urb);
req->cmd->error = ret;
mmc_request_done(mmc, req);
}
}
static int ushc_set_power(struct ushc_data *ushc, unsigned char power_mode)
{
u16 voltage;
switch (power_mode) {
case MMC_POWER_OFF:
voltage = USHC_PWR_CTRL_OFF;
break;
case MMC_POWER_UP:
case MMC_POWER_ON:
voltage = USHC_PWR_CTRL_3V3;
break;
default:
return -EINVAL;
}
return usb_control_msg(ushc->usb_dev, usb_sndctrlpipe(ushc->usb_dev, 0),
USHC_PWR_CTRL, USHC_PWR_CTRL_TYPE,
voltage, 0, NULL, 0, 100);
}
static int ushc_set_bus_width(struct ushc_data *ushc, int bus_width)
{
return ushc_hw_set_host_ctrl(ushc, USHC_HOST_CTRL_4BIT,
bus_width == 4 ? USHC_HOST_CTRL_4BIT : 0);
}
static int ushc_set_bus_freq(struct ushc_data *ushc, int clk, bool enable_hs)
{
int ret;
/* Hardware can't detect interrupts while the clock is off. */
if (clk == 0)
clk = 400000;
ret = ushc_hw_set_host_ctrl(ushc, USHC_HOST_CTRL_HIGH_SPD,
enable_hs ? USHC_HOST_CTRL_HIGH_SPD : 0);
if (ret < 0)
return ret;
ret = usb_control_msg(ushc->usb_dev, usb_sndctrlpipe(ushc->usb_dev, 0),
USHC_CLK_FREQ, USHC_CLK_FREQ_TYPE,
clk & 0xffff, (clk >> 16) & 0xffff, NULL, 0, 100);
if (ret < 0)
return ret;
ushc->clock_freq = clk;
return 0;
}
static void ushc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct ushc_data *ushc = mmc_priv(mmc);
ushc_set_power(ushc, ios->power_mode);
ushc_set_bus_width(ushc, 1 << ios->bus_width);
ushc_set_bus_freq(ushc, ios->clock, ios->timing == MMC_TIMING_SD_HS);
}
static int ushc_get_cd(struct mmc_host *mmc)
{
struct ushc_data *ushc = mmc_priv(mmc);
return !!(ushc->last_status & USHC_INT_STATUS_CARD_PRESENT);
}
static void ushc_enable_sdio_irq(struct mmc_host *mmc, int enable)
{
struct ushc_data *ushc = mmc_priv(mmc);
if (enable)
set_bit(INT_EN, &ushc->flags);
else
clear_bit(INT_EN, &ushc->flags);
}
static void ushc_clean_up(struct ushc_data *ushc)
{
usb_free_urb(ushc->int_urb);
usb_free_urb(ushc->csw_urb);
usb_free_urb(ushc->data_urb);
usb_free_urb(ushc->cbw_urb);
kfree(ushc->int_data);
kfree(ushc->cbw);
kfree(ushc->csw);
mmc_free_host(ushc->mmc);
}
static const struct mmc_host_ops ushc_ops = {
.request = ushc_request,
.set_ios = ushc_set_ios,
.get_cd = ushc_get_cd,
.enable_sdio_irq = ushc_enable_sdio_irq,
};
static int ushc_probe(struct usb_interface *intf, const struct usb_device_id *id)
{
struct usb_device *usb_dev = interface_to_usbdev(intf);
struct mmc_host *mmc;
struct ushc_data *ushc;
int ret;
mmc = mmc_alloc_host(sizeof(struct ushc_data), &intf->dev);
if (mmc == NULL)
return -ENOMEM;
ushc = mmc_priv(mmc);
usb_set_intfdata(intf, ushc);
ushc->usb_dev = usb_dev;
ushc->mmc = mmc;
spin_lock_init(&ushc->lock);
ret = ushc_hw_reset(ushc);
if (ret < 0)
goto err;
/* Read capabilities. */
ret = ushc_hw_get_caps(ushc);
if (ret < 0)
goto err;
mmc->ops = &ushc_ops;
mmc->f_min = 400000;
mmc->f_max = 50000000;
mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ;
mmc->caps |= (ushc->caps & USHC_GET_CAPS_HIGH_SPD) ? MMC_CAP_SD_HIGHSPEED : 0;
mmc->max_seg_size = 512*511;
mmc->max_segs = 1;
mmc->max_req_size = 512*511;
mmc->max_blk_size = 512;
mmc->max_blk_count = 511;
ushc->int_urb = usb_alloc_urb(0, GFP_KERNEL);
if (ushc->int_urb == NULL) {
ret = -ENOMEM;
goto err;
}
ushc->int_data = kzalloc(sizeof(struct ushc_int_data), GFP_KERNEL);
if (ushc->int_data == NULL) {
ret = -ENOMEM;
goto err;
}
usb_fill_int_urb(ushc->int_urb, ushc->usb_dev,
usb_rcvintpipe(usb_dev,
intf->cur_altsetting->endpoint[0].desc.bEndpointAddress),
ushc->int_data, sizeof(struct ushc_int_data),
int_callback, ushc,
intf->cur_altsetting->endpoint[0].desc.bInterval);
ushc->cbw_urb = usb_alloc_urb(0, GFP_KERNEL);
if (ushc->cbw_urb == NULL) {
ret = -ENOMEM;
goto err;
}
ushc->cbw = kzalloc(sizeof(struct ushc_cbw), GFP_KERNEL);
if (ushc->cbw == NULL) {
ret = -ENOMEM;
goto err;
}
ushc->cbw->signature = USHC_CBW_SIGNATURE;
usb_fill_bulk_urb(ushc->cbw_urb, ushc->usb_dev, usb_sndbulkpipe(usb_dev, 2),
ushc->cbw, sizeof(struct ushc_cbw),
cbw_callback, ushc);
ushc->data_urb = usb_alloc_urb(0, GFP_KERNEL);
if (ushc->data_urb == NULL) {
ret = -ENOMEM;
goto err;
}
ushc->csw_urb = usb_alloc_urb(0, GFP_KERNEL);
if (ushc->csw_urb == NULL) {
ret = -ENOMEM;
goto err;
}
ushc->csw = kzalloc(sizeof(struct ushc_cbw), GFP_KERNEL);
if (ushc->csw == NULL) {
ret = -ENOMEM;
goto err;
}
usb_fill_bulk_urb(ushc->csw_urb, ushc->usb_dev, usb_rcvbulkpipe(usb_dev, 6),
ushc->csw, sizeof(struct ushc_csw),
csw_callback, ushc);
ret = mmc_add_host(ushc->mmc);
if (ret)
goto err;
ret = usb_submit_urb(ushc->int_urb, GFP_KERNEL);
if (ret < 0) {
mmc_remove_host(ushc->mmc);
goto err;
}
return 0;
err:
ushc_clean_up(ushc);
return ret;
}
static void ushc_disconnect(struct usb_interface *intf)
{
struct ushc_data *ushc = usb_get_intfdata(intf);
spin_lock_irq(&ushc->lock);
set_bit(DISCONNECTED, &ushc->flags);
spin_unlock_irq(&ushc->lock);
usb_kill_urb(ushc->int_urb);
usb_kill_urb(ushc->cbw_urb);
usb_kill_urb(ushc->data_urb);
usb_kill_urb(ushc->csw_urb);
mmc_remove_host(ushc->mmc);
ushc_clean_up(ushc);
}
static struct usb_device_id ushc_id_table[] = {
/* CSR USB SD Host Controller */
{ USB_DEVICE(0x0a12, 0x5d10) },
{ },
};
MODULE_DEVICE_TABLE(usb, ushc_id_table);
static struct usb_driver ushc_driver = {
.name = "ushc",
.id_table = ushc_id_table,
.probe = ushc_probe,
.disconnect = ushc_disconnect,
};
module_usb_driver(ushc_driver);
MODULE_DESCRIPTION("USB SD Host Controller driver");
MODULE_AUTHOR("David Vrabel <david.vrabel@csr.com>");
MODULE_LICENSE("GPL");
| gpl-2.0 |
neobuddy89/hammerhead | drivers/mmc/host/ushc.c | 7724 | 13638 | /*
* USB SD Host Controller (USHC) controller driver.
*
* Copyright (C) 2010 Cambridge Silicon Radio Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or (at
* your option) any later version.
*
* Notes:
* - Only version 2 devices are supported.
* - Version 2 devices only support SDIO cards/devices (R2 response is
* unsupported).
*
* References:
* [USHC] USB SD Host Controller specification (CS-118793-SP)
*/
#include <linux/module.h>
#include <linux/usb.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/dma-mapping.h>
#include <linux/mmc/host.h>
enum ushc_request {
USHC_GET_CAPS = 0x00,
USHC_HOST_CTRL = 0x01,
USHC_PWR_CTRL = 0x02,
USHC_CLK_FREQ = 0x03,
USHC_EXEC_CMD = 0x04,
USHC_READ_RESP = 0x05,
USHC_RESET = 0x06,
};
enum ushc_request_type {
USHC_GET_CAPS_TYPE = USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
USHC_HOST_CTRL_TYPE = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
USHC_PWR_CTRL_TYPE = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
USHC_CLK_FREQ_TYPE = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
USHC_EXEC_CMD_TYPE = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
USHC_READ_RESP_TYPE = USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
USHC_RESET_TYPE = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
};
#define USHC_GET_CAPS_VERSION_MASK 0xff
#define USHC_GET_CAPS_3V3 (1 << 8)
#define USHC_GET_CAPS_3V0 (1 << 9)
#define USHC_GET_CAPS_1V8 (1 << 10)
#define USHC_GET_CAPS_HIGH_SPD (1 << 16)
#define USHC_HOST_CTRL_4BIT (1 << 1)
#define USHC_HOST_CTRL_HIGH_SPD (1 << 0)
#define USHC_PWR_CTRL_OFF 0x00
#define USHC_PWR_CTRL_3V3 0x01
#define USHC_PWR_CTRL_3V0 0x02
#define USHC_PWR_CTRL_1V8 0x03
#define USHC_READ_RESP_BUSY (1 << 4)
#define USHC_READ_RESP_ERR_TIMEOUT (1 << 3)
#define USHC_READ_RESP_ERR_CRC (1 << 2)
#define USHC_READ_RESP_ERR_DAT (1 << 1)
#define USHC_READ_RESP_ERR_CMD (1 << 0)
#define USHC_READ_RESP_ERR_MASK 0x0f
struct ushc_cbw {
__u8 signature;
__u8 cmd_idx;
__le16 block_size;
__le32 arg;
} __attribute__((packed));
#define USHC_CBW_SIGNATURE 'C'
struct ushc_csw {
__u8 signature;
__u8 status;
__le32 response;
} __attribute__((packed));
#define USHC_CSW_SIGNATURE 'S'
struct ushc_int_data {
u8 status;
u8 reserved[3];
};
#define USHC_INT_STATUS_SDIO_INT (1 << 1)
#define USHC_INT_STATUS_CARD_PRESENT (1 << 0)
struct ushc_data {
struct usb_device *usb_dev;
struct mmc_host *mmc;
struct urb *int_urb;
struct ushc_int_data *int_data;
struct urb *cbw_urb;
struct ushc_cbw *cbw;
struct urb *data_urb;
struct urb *csw_urb;
struct ushc_csw *csw;
spinlock_t lock;
struct mmc_request *current_req;
u32 caps;
u16 host_ctrl;
unsigned long flags;
u8 last_status;
int clock_freq;
};
#define DISCONNECTED 0
#define INT_EN 1
#define IGNORE_NEXT_INT 2
static void data_callback(struct urb *urb);
static int ushc_hw_reset(struct ushc_data *ushc)
{
return usb_control_msg(ushc->usb_dev, usb_sndctrlpipe(ushc->usb_dev, 0),
USHC_RESET, USHC_RESET_TYPE,
0, 0, NULL, 0, 100);
}
static int ushc_hw_get_caps(struct ushc_data *ushc)
{
int ret;
int version;
ret = usb_control_msg(ushc->usb_dev, usb_rcvctrlpipe(ushc->usb_dev, 0),
USHC_GET_CAPS, USHC_GET_CAPS_TYPE,
0, 0, &ushc->caps, sizeof(ushc->caps), 100);
if (ret < 0)
return ret;
ushc->caps = le32_to_cpu(ushc->caps);
version = ushc->caps & USHC_GET_CAPS_VERSION_MASK;
if (version != 0x02) {
dev_err(&ushc->usb_dev->dev, "controller version %d is not supported\n", version);
return -EINVAL;
}
return 0;
}
static int ushc_hw_set_host_ctrl(struct ushc_data *ushc, u16 mask, u16 val)
{
u16 host_ctrl;
int ret;
host_ctrl = (ushc->host_ctrl & ~mask) | val;
ret = usb_control_msg(ushc->usb_dev, usb_sndctrlpipe(ushc->usb_dev, 0),
USHC_HOST_CTRL, USHC_HOST_CTRL_TYPE,
host_ctrl, 0, NULL, 0, 100);
if (ret < 0)
return ret;
ushc->host_ctrl = host_ctrl;
return 0;
}
static void int_callback(struct urb *urb)
{
struct ushc_data *ushc = urb->context;
u8 status, last_status;
if (urb->status < 0)
return;
status = ushc->int_data->status;
last_status = ushc->last_status;
ushc->last_status = status;
/*
* Ignore the card interrupt status on interrupt transfers that
* were submitted while card interrupts where disabled.
*
* This avoid occasional spurious interrupts when enabling
* interrupts immediately after clearing the source on the card.
*/
if (!test_and_clear_bit(IGNORE_NEXT_INT, &ushc->flags)
&& test_bit(INT_EN, &ushc->flags)
&& status & USHC_INT_STATUS_SDIO_INT) {
mmc_signal_sdio_irq(ushc->mmc);
}
if ((status ^ last_status) & USHC_INT_STATUS_CARD_PRESENT)
mmc_detect_change(ushc->mmc, msecs_to_jiffies(100));
if (!test_bit(INT_EN, &ushc->flags))
set_bit(IGNORE_NEXT_INT, &ushc->flags);
usb_submit_urb(ushc->int_urb, GFP_ATOMIC);
}
static void cbw_callback(struct urb *urb)
{
struct ushc_data *ushc = urb->context;
if (urb->status != 0) {
usb_unlink_urb(ushc->data_urb);
usb_unlink_urb(ushc->csw_urb);
}
}
static void data_callback(struct urb *urb)
{
struct ushc_data *ushc = urb->context;
if (urb->status != 0)
usb_unlink_urb(ushc->csw_urb);
}
static void csw_callback(struct urb *urb)
{
struct ushc_data *ushc = urb->context;
struct mmc_request *req = ushc->current_req;
int status;
status = ushc->csw->status;
if (urb->status != 0) {
req->cmd->error = urb->status;
} else if (status & USHC_READ_RESP_ERR_CMD) {
if (status & USHC_READ_RESP_ERR_CRC)
req->cmd->error = -EIO;
else
req->cmd->error = -ETIMEDOUT;
}
if (req->data) {
if (status & USHC_READ_RESP_ERR_DAT) {
if (status & USHC_READ_RESP_ERR_CRC)
req->data->error = -EIO;
else
req->data->error = -ETIMEDOUT;
req->data->bytes_xfered = 0;
} else {
req->data->bytes_xfered = req->data->blksz * req->data->blocks;
}
}
req->cmd->resp[0] = le32_to_cpu(ushc->csw->response);
mmc_request_done(ushc->mmc, req);
}
static void ushc_request(struct mmc_host *mmc, struct mmc_request *req)
{
struct ushc_data *ushc = mmc_priv(mmc);
int ret;
unsigned long flags;
spin_lock_irqsave(&ushc->lock, flags);
if (test_bit(DISCONNECTED, &ushc->flags)) {
ret = -ENODEV;
goto out;
}
/* Version 2 firmware doesn't support the R2 response format. */
if (req->cmd->flags & MMC_RSP_136) {
ret = -EINVAL;
goto out;
}
/* The Astoria's data FIFOs don't work with clock speeds < 5MHz so
limit commands with data to 6MHz or more. */
if (req->data && ushc->clock_freq < 6000000) {
ret = -EINVAL;
goto out;
}
ushc->current_req = req;
/* Start cmd with CBW. */
ushc->cbw->cmd_idx = cpu_to_le16(req->cmd->opcode);
if (req->data)
ushc->cbw->block_size = cpu_to_le16(req->data->blksz);
else
ushc->cbw->block_size = 0;
ushc->cbw->arg = cpu_to_le32(req->cmd->arg);
ret = usb_submit_urb(ushc->cbw_urb, GFP_ATOMIC);
if (ret < 0)
goto out;
/* Submit data (if any). */
if (req->data) {
struct mmc_data *data = req->data;
int pipe;
if (data->flags & MMC_DATA_READ)
pipe = usb_rcvbulkpipe(ushc->usb_dev, 6);
else
pipe = usb_sndbulkpipe(ushc->usb_dev, 2);
usb_fill_bulk_urb(ushc->data_urb, ushc->usb_dev, pipe,
sg_virt(data->sg), data->sg->length,
data_callback, ushc);
ret = usb_submit_urb(ushc->data_urb, GFP_ATOMIC);
if (ret < 0)
goto out;
}
/* Submit CSW. */
ret = usb_submit_urb(ushc->csw_urb, GFP_ATOMIC);
if (ret < 0)
goto out;
out:
spin_unlock_irqrestore(&ushc->lock, flags);
if (ret < 0) {
usb_unlink_urb(ushc->cbw_urb);
usb_unlink_urb(ushc->data_urb);
req->cmd->error = ret;
mmc_request_done(mmc, req);
}
}
static int ushc_set_power(struct ushc_data *ushc, unsigned char power_mode)
{
u16 voltage;
switch (power_mode) {
case MMC_POWER_OFF:
voltage = USHC_PWR_CTRL_OFF;
break;
case MMC_POWER_UP:
case MMC_POWER_ON:
voltage = USHC_PWR_CTRL_3V3;
break;
default:
return -EINVAL;
}
return usb_control_msg(ushc->usb_dev, usb_sndctrlpipe(ushc->usb_dev, 0),
USHC_PWR_CTRL, USHC_PWR_CTRL_TYPE,
voltage, 0, NULL, 0, 100);
}
static int ushc_set_bus_width(struct ushc_data *ushc, int bus_width)
{
return ushc_hw_set_host_ctrl(ushc, USHC_HOST_CTRL_4BIT,
bus_width == 4 ? USHC_HOST_CTRL_4BIT : 0);
}
static int ushc_set_bus_freq(struct ushc_data *ushc, int clk, bool enable_hs)
{
int ret;
/* Hardware can't detect interrupts while the clock is off. */
if (clk == 0)
clk = 400000;
ret = ushc_hw_set_host_ctrl(ushc, USHC_HOST_CTRL_HIGH_SPD,
enable_hs ? USHC_HOST_CTRL_HIGH_SPD : 0);
if (ret < 0)
return ret;
ret = usb_control_msg(ushc->usb_dev, usb_sndctrlpipe(ushc->usb_dev, 0),
USHC_CLK_FREQ, USHC_CLK_FREQ_TYPE,
clk & 0xffff, (clk >> 16) & 0xffff, NULL, 0, 100);
if (ret < 0)
return ret;
ushc->clock_freq = clk;
return 0;
}
static void ushc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct ushc_data *ushc = mmc_priv(mmc);
ushc_set_power(ushc, ios->power_mode);
ushc_set_bus_width(ushc, 1 << ios->bus_width);
ushc_set_bus_freq(ushc, ios->clock, ios->timing == MMC_TIMING_SD_HS);
}
static int ushc_get_cd(struct mmc_host *mmc)
{
struct ushc_data *ushc = mmc_priv(mmc);
return !!(ushc->last_status & USHC_INT_STATUS_CARD_PRESENT);
}
static void ushc_enable_sdio_irq(struct mmc_host *mmc, int enable)
{
struct ushc_data *ushc = mmc_priv(mmc);
if (enable)
set_bit(INT_EN, &ushc->flags);
else
clear_bit(INT_EN, &ushc->flags);
}
static void ushc_clean_up(struct ushc_data *ushc)
{
usb_free_urb(ushc->int_urb);
usb_free_urb(ushc->csw_urb);
usb_free_urb(ushc->data_urb);
usb_free_urb(ushc->cbw_urb);
kfree(ushc->int_data);
kfree(ushc->cbw);
kfree(ushc->csw);
mmc_free_host(ushc->mmc);
}
static const struct mmc_host_ops ushc_ops = {
.request = ushc_request,
.set_ios = ushc_set_ios,
.get_cd = ushc_get_cd,
.enable_sdio_irq = ushc_enable_sdio_irq,
};
static int ushc_probe(struct usb_interface *intf, const struct usb_device_id *id)
{
struct usb_device *usb_dev = interface_to_usbdev(intf);
struct mmc_host *mmc;
struct ushc_data *ushc;
int ret;
mmc = mmc_alloc_host(sizeof(struct ushc_data), &intf->dev);
if (mmc == NULL)
return -ENOMEM;
ushc = mmc_priv(mmc);
usb_set_intfdata(intf, ushc);
ushc->usb_dev = usb_dev;
ushc->mmc = mmc;
spin_lock_init(&ushc->lock);
ret = ushc_hw_reset(ushc);
if (ret < 0)
goto err;
/* Read capabilities. */
ret = ushc_hw_get_caps(ushc);
if (ret < 0)
goto err;
mmc->ops = &ushc_ops;
mmc->f_min = 400000;
mmc->f_max = 50000000;
mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ;
mmc->caps |= (ushc->caps & USHC_GET_CAPS_HIGH_SPD) ? MMC_CAP_SD_HIGHSPEED : 0;
mmc->max_seg_size = 512*511;
mmc->max_segs = 1;
mmc->max_req_size = 512*511;
mmc->max_blk_size = 512;
mmc->max_blk_count = 511;
ushc->int_urb = usb_alloc_urb(0, GFP_KERNEL);
if (ushc->int_urb == NULL) {
ret = -ENOMEM;
goto err;
}
ushc->int_data = kzalloc(sizeof(struct ushc_int_data), GFP_KERNEL);
if (ushc->int_data == NULL) {
ret = -ENOMEM;
goto err;
}
usb_fill_int_urb(ushc->int_urb, ushc->usb_dev,
usb_rcvintpipe(usb_dev,
intf->cur_altsetting->endpoint[0].desc.bEndpointAddress),
ushc->int_data, sizeof(struct ushc_int_data),
int_callback, ushc,
intf->cur_altsetting->endpoint[0].desc.bInterval);
ushc->cbw_urb = usb_alloc_urb(0, GFP_KERNEL);
if (ushc->cbw_urb == NULL) {
ret = -ENOMEM;
goto err;
}
ushc->cbw = kzalloc(sizeof(struct ushc_cbw), GFP_KERNEL);
if (ushc->cbw == NULL) {
ret = -ENOMEM;
goto err;
}
ushc->cbw->signature = USHC_CBW_SIGNATURE;
usb_fill_bulk_urb(ushc->cbw_urb, ushc->usb_dev, usb_sndbulkpipe(usb_dev, 2),
ushc->cbw, sizeof(struct ushc_cbw),
cbw_callback, ushc);
ushc->data_urb = usb_alloc_urb(0, GFP_KERNEL);
if (ushc->data_urb == NULL) {
ret = -ENOMEM;
goto err;
}
ushc->csw_urb = usb_alloc_urb(0, GFP_KERNEL);
if (ushc->csw_urb == NULL) {
ret = -ENOMEM;
goto err;
}
ushc->csw = kzalloc(sizeof(struct ushc_cbw), GFP_KERNEL);
if (ushc->csw == NULL) {
ret = -ENOMEM;
goto err;
}
usb_fill_bulk_urb(ushc->csw_urb, ushc->usb_dev, usb_rcvbulkpipe(usb_dev, 6),
ushc->csw, sizeof(struct ushc_csw),
csw_callback, ushc);
ret = mmc_add_host(ushc->mmc);
if (ret)
goto err;
ret = usb_submit_urb(ushc->int_urb, GFP_KERNEL);
if (ret < 0) {
mmc_remove_host(ushc->mmc);
goto err;
}
return 0;
err:
ushc_clean_up(ushc);
return ret;
}
static void ushc_disconnect(struct usb_interface *intf)
{
struct ushc_data *ushc = usb_get_intfdata(intf);
spin_lock_irq(&ushc->lock);
set_bit(DISCONNECTED, &ushc->flags);
spin_unlock_irq(&ushc->lock);
usb_kill_urb(ushc->int_urb);
usb_kill_urb(ushc->cbw_urb);
usb_kill_urb(ushc->data_urb);
usb_kill_urb(ushc->csw_urb);
mmc_remove_host(ushc->mmc);
ushc_clean_up(ushc);
}
static struct usb_device_id ushc_id_table[] = {
/* CSR USB SD Host Controller */
{ USB_DEVICE(0x0a12, 0x5d10) },
{ },
};
MODULE_DEVICE_TABLE(usb, ushc_id_table);
static struct usb_driver ushc_driver = {
.name = "ushc",
.id_table = ushc_id_table,
.probe = ushc_probe,
.disconnect = ushc_disconnect,
};
module_usb_driver(ushc_driver);
MODULE_DESCRIPTION("USB SD Host Controller driver");
MODULE_AUTHOR("David Vrabel <david.vrabel@csr.com>");
MODULE_LICENSE("GPL");
| gpl-2.0 |
Luquidtester/DirtyKernel-3x-ION | arch/x86/kernel/hw_breakpoint.c | 10284 | 12505 | /*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
* Copyright (C) 2007 Alan Stern
* Copyright (C) 2009 IBM Corporation
* Copyright (C) 2009 Frederic Weisbecker <fweisbec@gmail.com>
*
* Authors: Alan Stern <stern@rowland.harvard.edu>
* K.Prasad <prasad@linux.vnet.ibm.com>
* Frederic Weisbecker <fweisbec@gmail.com>
*/
/*
* HW_breakpoint: a unified kernel/user-space hardware breakpoint facility,
* using the CPU's debug registers.
*/
#include <linux/perf_event.h>
#include <linux/hw_breakpoint.h>
#include <linux/irqflags.h>
#include <linux/notifier.h>
#include <linux/kallsyms.h>
#include <linux/kprobes.h>
#include <linux/percpu.h>
#include <linux/kdebug.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/smp.h>
#include <asm/hw_breakpoint.h>
#include <asm/processor.h>
#include <asm/debugreg.h>
/* Per cpu debug control register value */
DEFINE_PER_CPU(unsigned long, cpu_dr7);
EXPORT_PER_CPU_SYMBOL(cpu_dr7);
/* Per cpu debug address registers values */
static DEFINE_PER_CPU(unsigned long, cpu_debugreg[HBP_NUM]);
/*
* Stores the breakpoints currently in use on each breakpoint address
* register for each cpus
*/
static DEFINE_PER_CPU(struct perf_event *, bp_per_reg[HBP_NUM]);
static inline unsigned long
__encode_dr7(int drnum, unsigned int len, unsigned int type)
{
unsigned long bp_info;
bp_info = (len | type) & 0xf;
bp_info <<= (DR_CONTROL_SHIFT + drnum * DR_CONTROL_SIZE);
bp_info |= (DR_GLOBAL_ENABLE << (drnum * DR_ENABLE_SIZE));
return bp_info;
}
/*
* Encode the length, type, Exact, and Enable bits for a particular breakpoint
* as stored in debug register 7.
*/
unsigned long encode_dr7(int drnum, unsigned int len, unsigned int type)
{
return __encode_dr7(drnum, len, type) | DR_GLOBAL_SLOWDOWN;
}
/*
* Decode the length and type bits for a particular breakpoint as
* stored in debug register 7. Return the "enabled" status.
*/
int decode_dr7(unsigned long dr7, int bpnum, unsigned *len, unsigned *type)
{
int bp_info = dr7 >> (DR_CONTROL_SHIFT + bpnum * DR_CONTROL_SIZE);
*len = (bp_info & 0xc) | 0x40;
*type = (bp_info & 0x3) | 0x80;
return (dr7 >> (bpnum * DR_ENABLE_SIZE)) & 0x3;
}
/*
* Install a perf counter breakpoint.
*
* We seek a free debug address register and use it for this
* breakpoint. Eventually we enable it in the debug control register.
*
* Atomic: we hold the counter->ctx->lock and we only handle variables
* and registers local to this cpu.
*/
int arch_install_hw_breakpoint(struct perf_event *bp)
{
struct arch_hw_breakpoint *info = counter_arch_bp(bp);
unsigned long *dr7;
int i;
for (i = 0; i < HBP_NUM; i++) {
struct perf_event **slot = &__get_cpu_var(bp_per_reg[i]);
if (!*slot) {
*slot = bp;
break;
}
}
if (WARN_ONCE(i == HBP_NUM, "Can't find any breakpoint slot"))
return -EBUSY;
set_debugreg(info->address, i);
__this_cpu_write(cpu_debugreg[i], info->address);
dr7 = &__get_cpu_var(cpu_dr7);
*dr7 |= encode_dr7(i, info->len, info->type);
set_debugreg(*dr7, 7);
return 0;
}
/*
* Uninstall the breakpoint contained in the given counter.
*
* First we search the debug address register it uses and then we disable
* it.
*
* Atomic: we hold the counter->ctx->lock and we only handle variables
* and registers local to this cpu.
*/
void arch_uninstall_hw_breakpoint(struct perf_event *bp)
{
struct arch_hw_breakpoint *info = counter_arch_bp(bp);
unsigned long *dr7;
int i;
for (i = 0; i < HBP_NUM; i++) {
struct perf_event **slot = &__get_cpu_var(bp_per_reg[i]);
if (*slot == bp) {
*slot = NULL;
break;
}
}
if (WARN_ONCE(i == HBP_NUM, "Can't find any breakpoint slot"))
return;
dr7 = &__get_cpu_var(cpu_dr7);
*dr7 &= ~__encode_dr7(i, info->len, info->type);
set_debugreg(*dr7, 7);
}
static int get_hbp_len(u8 hbp_len)
{
unsigned int len_in_bytes = 0;
switch (hbp_len) {
case X86_BREAKPOINT_LEN_1:
len_in_bytes = 1;
break;
case X86_BREAKPOINT_LEN_2:
len_in_bytes = 2;
break;
case X86_BREAKPOINT_LEN_4:
len_in_bytes = 4;
break;
#ifdef CONFIG_X86_64
case X86_BREAKPOINT_LEN_8:
len_in_bytes = 8;
break;
#endif
}
return len_in_bytes;
}
/*
* Check for virtual address in kernel space.
*/
int arch_check_bp_in_kernelspace(struct perf_event *bp)
{
unsigned int len;
unsigned long va;
struct arch_hw_breakpoint *info = counter_arch_bp(bp);
va = info->address;
len = get_hbp_len(info->len);
return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
}
int arch_bp_generic_fields(int x86_len, int x86_type,
int *gen_len, int *gen_type)
{
/* Type */
switch (x86_type) {
case X86_BREAKPOINT_EXECUTE:
if (x86_len != X86_BREAKPOINT_LEN_X)
return -EINVAL;
*gen_type = HW_BREAKPOINT_X;
*gen_len = sizeof(long);
return 0;
case X86_BREAKPOINT_WRITE:
*gen_type = HW_BREAKPOINT_W;
break;
case X86_BREAKPOINT_RW:
*gen_type = HW_BREAKPOINT_W | HW_BREAKPOINT_R;
break;
default:
return -EINVAL;
}
/* Len */
switch (x86_len) {
case X86_BREAKPOINT_LEN_1:
*gen_len = HW_BREAKPOINT_LEN_1;
break;
case X86_BREAKPOINT_LEN_2:
*gen_len = HW_BREAKPOINT_LEN_2;
break;
case X86_BREAKPOINT_LEN_4:
*gen_len = HW_BREAKPOINT_LEN_4;
break;
#ifdef CONFIG_X86_64
case X86_BREAKPOINT_LEN_8:
*gen_len = HW_BREAKPOINT_LEN_8;
break;
#endif
default:
return -EINVAL;
}
return 0;
}
static int arch_build_bp_info(struct perf_event *bp)
{
struct arch_hw_breakpoint *info = counter_arch_bp(bp);
info->address = bp->attr.bp_addr;
/* Type */
switch (bp->attr.bp_type) {
case HW_BREAKPOINT_W:
info->type = X86_BREAKPOINT_WRITE;
break;
case HW_BREAKPOINT_W | HW_BREAKPOINT_R:
info->type = X86_BREAKPOINT_RW;
break;
case HW_BREAKPOINT_X:
info->type = X86_BREAKPOINT_EXECUTE;
/*
* x86 inst breakpoints need to have a specific undefined len.
* But we still need to check userspace is not trying to setup
* an unsupported length, to get a range breakpoint for example.
*/
if (bp->attr.bp_len == sizeof(long)) {
info->len = X86_BREAKPOINT_LEN_X;
return 0;
}
default:
return -EINVAL;
}
/* Len */
switch (bp->attr.bp_len) {
case HW_BREAKPOINT_LEN_1:
info->len = X86_BREAKPOINT_LEN_1;
break;
case HW_BREAKPOINT_LEN_2:
info->len = X86_BREAKPOINT_LEN_2;
break;
case HW_BREAKPOINT_LEN_4:
info->len = X86_BREAKPOINT_LEN_4;
break;
#ifdef CONFIG_X86_64
case HW_BREAKPOINT_LEN_8:
info->len = X86_BREAKPOINT_LEN_8;
break;
#endif
default:
return -EINVAL;
}
return 0;
}
/*
* Validate the arch-specific HW Breakpoint register settings
*/
int arch_validate_hwbkpt_settings(struct perf_event *bp)
{
struct arch_hw_breakpoint *info = counter_arch_bp(bp);
unsigned int align;
int ret;
ret = arch_build_bp_info(bp);
if (ret)
return ret;
ret = -EINVAL;
switch (info->len) {
case X86_BREAKPOINT_LEN_1:
align = 0;
break;
case X86_BREAKPOINT_LEN_2:
align = 1;
break;
case X86_BREAKPOINT_LEN_4:
align = 3;
break;
#ifdef CONFIG_X86_64
case X86_BREAKPOINT_LEN_8:
align = 7;
break;
#endif
default:
return ret;
}
/*
* Check that the low-order bits of the address are appropriate
* for the alignment implied by len.
*/
if (info->address & align)
return -EINVAL;
return 0;
}
/*
* Dump the debug register contents to the user.
* We can't dump our per cpu values because it
* may contain cpu wide breakpoint, something that
* doesn't belong to the current task.
*
* TODO: include non-ptrace user breakpoints (perf)
*/
void aout_dump_debugregs(struct user *dump)
{
int i;
int dr7 = 0;
struct perf_event *bp;
struct arch_hw_breakpoint *info;
struct thread_struct *thread = ¤t->thread;
for (i = 0; i < HBP_NUM; i++) {
bp = thread->ptrace_bps[i];
if (bp && !bp->attr.disabled) {
dump->u_debugreg[i] = bp->attr.bp_addr;
info = counter_arch_bp(bp);
dr7 |= encode_dr7(i, info->len, info->type);
} else {
dump->u_debugreg[i] = 0;
}
}
dump->u_debugreg[4] = 0;
dump->u_debugreg[5] = 0;
dump->u_debugreg[6] = current->thread.debugreg6;
dump->u_debugreg[7] = dr7;
}
EXPORT_SYMBOL_GPL(aout_dump_debugregs);
/*
* Release the user breakpoints used by ptrace
*/
void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
{
int i;
struct thread_struct *t = &tsk->thread;
for (i = 0; i < HBP_NUM; i++) {
unregister_hw_breakpoint(t->ptrace_bps[i]);
t->ptrace_bps[i] = NULL;
}
}
void hw_breakpoint_restore(void)
{
set_debugreg(__this_cpu_read(cpu_debugreg[0]), 0);
set_debugreg(__this_cpu_read(cpu_debugreg[1]), 1);
set_debugreg(__this_cpu_read(cpu_debugreg[2]), 2);
set_debugreg(__this_cpu_read(cpu_debugreg[3]), 3);
set_debugreg(current->thread.debugreg6, 6);
set_debugreg(__this_cpu_read(cpu_dr7), 7);
}
EXPORT_SYMBOL_GPL(hw_breakpoint_restore);
/*
* Handle debug exception notifications.
*
* Return value is either NOTIFY_STOP or NOTIFY_DONE as explained below.
*
* NOTIFY_DONE returned if one of the following conditions is true.
* i) When the causative address is from user-space and the exception
* is a valid one, i.e. not triggered as a result of lazy debug register
* switching
* ii) When there are more bits than trap<n> set in DR6 register (such
* as BD, BS or BT) indicating that more than one debug condition is
* met and requires some more action in do_debug().
*
* NOTIFY_STOP returned for all other cases
*
*/
static int __kprobes hw_breakpoint_handler(struct die_args *args)
{
int i, cpu, rc = NOTIFY_STOP;
struct perf_event *bp;
unsigned long dr7, dr6;
unsigned long *dr6_p;
/* The DR6 value is pointed by args->err */
dr6_p = (unsigned long *)ERR_PTR(args->err);
dr6 = *dr6_p;
/* If it's a single step, TRAP bits are random */
if (dr6 & DR_STEP)
return NOTIFY_DONE;
/* Do an early return if no trap bits are set in DR6 */
if ((dr6 & DR_TRAP_BITS) == 0)
return NOTIFY_DONE;
get_debugreg(dr7, 7);
/* Disable breakpoints during exception handling */
set_debugreg(0UL, 7);
/*
* Assert that local interrupts are disabled
* Reset the DRn bits in the virtualized register value.
* The ptrace trigger routine will add in whatever is needed.
*/
current->thread.debugreg6 &= ~DR_TRAP_BITS;
cpu = get_cpu();
/* Handle all the breakpoints that were triggered */
for (i = 0; i < HBP_NUM; ++i) {
if (likely(!(dr6 & (DR_TRAP0 << i))))
continue;
/*
* The counter may be concurrently released but that can only
* occur from a call_rcu() path. We can then safely fetch
* the breakpoint, use its callback, touch its counter
* while we are in an rcu_read_lock() path.
*/
rcu_read_lock();
bp = per_cpu(bp_per_reg[i], cpu);
/*
* Reset the 'i'th TRAP bit in dr6 to denote completion of
* exception handling
*/
(*dr6_p) &= ~(DR_TRAP0 << i);
/*
* bp can be NULL due to lazy debug register switching
* or due to concurrent perf counter removing.
*/
if (!bp) {
rcu_read_unlock();
break;
}
perf_bp_event(bp, args->regs);
/*
* Set up resume flag to avoid breakpoint recursion when
* returning back to origin.
*/
if (bp->hw.info.type == X86_BREAKPOINT_EXECUTE)
args->regs->flags |= X86_EFLAGS_RF;
rcu_read_unlock();
}
/*
* Further processing in do_debug() is needed for a) user-space
* breakpoints (to generate signals) and b) when the system has
* taken exception due to multiple causes
*/
if ((current->thread.debugreg6 & DR_TRAP_BITS) ||
(dr6 & (~DR_TRAP_BITS)))
rc = NOTIFY_DONE;
set_debugreg(dr7, 7);
put_cpu();
return rc;
}
/*
* Handle debug exception notifications.
*/
int __kprobes hw_breakpoint_exceptions_notify(
struct notifier_block *unused, unsigned long val, void *data)
{
if (val != DIE_DEBUG)
return NOTIFY_DONE;
return hw_breakpoint_handler(data);
}
void hw_breakpoint_pmu_read(struct perf_event *bp)
{
/* TODO */
}
| gpl-2.0 |
SudaMod-devices/boeffla-kernel-cm-bacon | drivers/infiniband/hw/cxgb3/cxio_hal.c | 11052 | 38223 | /*
* Copyright (c) 2006 Chelsio, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <asm/delay.h>
#include <linux/mutex.h>
#include <linux/netdevice.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include <net/net_namespace.h>
#include "cxio_resource.h"
#include "cxio_hal.h"
#include "cxgb3_offload.h"
#include "sge_defs.h"
static LIST_HEAD(rdev_list);
static cxio_hal_ev_callback_func_t cxio_ev_cb = NULL;
static struct cxio_rdev *cxio_hal_find_rdev_by_name(char *dev_name)
{
struct cxio_rdev *rdev;
list_for_each_entry(rdev, &rdev_list, entry)
if (!strcmp(rdev->dev_name, dev_name))
return rdev;
return NULL;
}
static struct cxio_rdev *cxio_hal_find_rdev_by_t3cdev(struct t3cdev *tdev)
{
struct cxio_rdev *rdev;
list_for_each_entry(rdev, &rdev_list, entry)
if (rdev->t3cdev_p == tdev)
return rdev;
return NULL;
}
int cxio_hal_cq_op(struct cxio_rdev *rdev_p, struct t3_cq *cq,
enum t3_cq_opcode op, u32 credit)
{
int ret;
struct t3_cqe *cqe;
u32 rptr;
struct rdma_cq_op setup;
setup.id = cq->cqid;
setup.credits = (op == CQ_CREDIT_UPDATE) ? credit : 0;
setup.op = op;
ret = rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_CQ_OP, &setup);
if ((ret < 0) || (op == CQ_CREDIT_UPDATE))
return ret;
/*
* If the rearm returned an index other than our current index,
* then there might be CQE's in flight (being DMA'd). We must wait
* here for them to complete or the consumer can miss a notification.
*/
if (Q_PTR2IDX((cq->rptr), cq->size_log2) != ret) {
int i=0;
rptr = cq->rptr;
/*
* Keep the generation correct by bumping rptr until it
* matches the index returned by the rearm - 1.
*/
while (Q_PTR2IDX((rptr+1), cq->size_log2) != ret)
rptr++;
/*
* Now rptr is the index for the (last) cqe that was
* in-flight at the time the HW rearmed the CQ. We
* spin until that CQE is valid.
*/
cqe = cq->queue + Q_PTR2IDX(rptr, cq->size_log2);
while (!CQ_VLD_ENTRY(rptr, cq->size_log2, cqe)) {
udelay(1);
if (i++ > 1000000) {
printk(KERN_ERR "%s: stalled rnic\n",
rdev_p->dev_name);
return -EIO;
}
}
return 1;
}
return 0;
}
static int cxio_hal_clear_cq_ctx(struct cxio_rdev *rdev_p, u32 cqid)
{
struct rdma_cq_setup setup;
setup.id = cqid;
setup.base_addr = 0; /* NULL address */
setup.size = 0; /* disaable the CQ */
setup.credits = 0;
setup.credit_thres = 0;
setup.ovfl_mode = 0;
return (rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_CQ_SETUP, &setup));
}
static int cxio_hal_clear_qp_ctx(struct cxio_rdev *rdev_p, u32 qpid)
{
u64 sge_cmd;
struct t3_modify_qp_wr *wqe;
struct sk_buff *skb = alloc_skb(sizeof(*wqe), GFP_KERNEL);
if (!skb) {
PDBG("%s alloc_skb failed\n", __func__);
return -ENOMEM;
}
wqe = (struct t3_modify_qp_wr *) skb_put(skb, sizeof(*wqe));
memset(wqe, 0, sizeof(*wqe));
build_fw_riwrh((struct fw_riwrh *) wqe, T3_WR_QP_MOD,
T3_COMPLETION_FLAG | T3_NOTIFY_FLAG, 0, qpid, 7,
T3_SOPEOP);
wqe->flags = cpu_to_be32(MODQP_WRITE_EC);
sge_cmd = qpid << 8 | 3;
wqe->sge_cmd = cpu_to_be64(sge_cmd);
skb->priority = CPL_PRIORITY_CONTROL;
return iwch_cxgb3_ofld_send(rdev_p->t3cdev_p, skb);
}
int cxio_create_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq, int kernel)
{
struct rdma_cq_setup setup;
int size = (1UL << (cq->size_log2)) * sizeof(struct t3_cqe);
size += 1; /* one extra page for storing cq-in-err state */
cq->cqid = cxio_hal_get_cqid(rdev_p->rscp);
if (!cq->cqid)
return -ENOMEM;
if (kernel) {
cq->sw_queue = kzalloc(size, GFP_KERNEL);
if (!cq->sw_queue)
return -ENOMEM;
}
cq->queue = dma_alloc_coherent(&(rdev_p->rnic_info.pdev->dev), size,
&(cq->dma_addr), GFP_KERNEL);
if (!cq->queue) {
kfree(cq->sw_queue);
return -ENOMEM;
}
dma_unmap_addr_set(cq, mapping, cq->dma_addr);
memset(cq->queue, 0, size);
setup.id = cq->cqid;
setup.base_addr = (u64) (cq->dma_addr);
setup.size = 1UL << cq->size_log2;
setup.credits = 65535;
setup.credit_thres = 1;
if (rdev_p->t3cdev_p->type != T3A)
setup.ovfl_mode = 0;
else
setup.ovfl_mode = 1;
return (rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_CQ_SETUP, &setup));
}
#ifdef notyet
int cxio_resize_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq)
{
struct rdma_cq_setup setup;
setup.id = cq->cqid;
setup.base_addr = (u64) (cq->dma_addr);
setup.size = 1UL << cq->size_log2;
setup.credits = setup.size;
setup.credit_thres = setup.size; /* TBD: overflow recovery */
setup.ovfl_mode = 1;
return (rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_CQ_SETUP, &setup));
}
#endif
static u32 get_qpid(struct cxio_rdev *rdev_p, struct cxio_ucontext *uctx)
{
struct cxio_qpid_list *entry;
u32 qpid;
int i;
mutex_lock(&uctx->lock);
if (!list_empty(&uctx->qpids)) {
entry = list_entry(uctx->qpids.next, struct cxio_qpid_list,
entry);
list_del(&entry->entry);
qpid = entry->qpid;
kfree(entry);
} else {
qpid = cxio_hal_get_qpid(rdev_p->rscp);
if (!qpid)
goto out;
for (i = qpid+1; i & rdev_p->qpmask; i++) {
entry = kmalloc(sizeof *entry, GFP_KERNEL);
if (!entry)
break;
entry->qpid = i;
list_add_tail(&entry->entry, &uctx->qpids);
}
}
out:
mutex_unlock(&uctx->lock);
PDBG("%s qpid 0x%x\n", __func__, qpid);
return qpid;
}
static void put_qpid(struct cxio_rdev *rdev_p, u32 qpid,
struct cxio_ucontext *uctx)
{
struct cxio_qpid_list *entry;
entry = kmalloc(sizeof *entry, GFP_KERNEL);
if (!entry)
return;
PDBG("%s qpid 0x%x\n", __func__, qpid);
entry->qpid = qpid;
mutex_lock(&uctx->lock);
list_add_tail(&entry->entry, &uctx->qpids);
mutex_unlock(&uctx->lock);
}
void cxio_release_ucontext(struct cxio_rdev *rdev_p, struct cxio_ucontext *uctx)
{
struct list_head *pos, *nxt;
struct cxio_qpid_list *entry;
mutex_lock(&uctx->lock);
list_for_each_safe(pos, nxt, &uctx->qpids) {
entry = list_entry(pos, struct cxio_qpid_list, entry);
list_del_init(&entry->entry);
if (!(entry->qpid & rdev_p->qpmask))
cxio_hal_put_qpid(rdev_p->rscp, entry->qpid);
kfree(entry);
}
mutex_unlock(&uctx->lock);
}
void cxio_init_ucontext(struct cxio_rdev *rdev_p, struct cxio_ucontext *uctx)
{
INIT_LIST_HEAD(&uctx->qpids);
mutex_init(&uctx->lock);
}
int cxio_create_qp(struct cxio_rdev *rdev_p, u32 kernel_domain,
struct t3_wq *wq, struct cxio_ucontext *uctx)
{
int depth = 1UL << wq->size_log2;
int rqsize = 1UL << wq->rq_size_log2;
wq->qpid = get_qpid(rdev_p, uctx);
if (!wq->qpid)
return -ENOMEM;
wq->rq = kzalloc(depth * sizeof(struct t3_swrq), GFP_KERNEL);
if (!wq->rq)
goto err1;
wq->rq_addr = cxio_hal_rqtpool_alloc(rdev_p, rqsize);
if (!wq->rq_addr)
goto err2;
wq->sq = kzalloc(depth * sizeof(struct t3_swsq), GFP_KERNEL);
if (!wq->sq)
goto err3;
wq->queue = dma_alloc_coherent(&(rdev_p->rnic_info.pdev->dev),
depth * sizeof(union t3_wr),
&(wq->dma_addr), GFP_KERNEL);
if (!wq->queue)
goto err4;
memset(wq->queue, 0, depth * sizeof(union t3_wr));
dma_unmap_addr_set(wq, mapping, wq->dma_addr);
wq->doorbell = (void __iomem *)rdev_p->rnic_info.kdb_addr;
if (!kernel_domain)
wq->udb = (u64)rdev_p->rnic_info.udbell_physbase +
(wq->qpid << rdev_p->qpshift);
wq->rdev = rdev_p;
PDBG("%s qpid 0x%x doorbell 0x%p udb 0x%llx\n", __func__,
wq->qpid, wq->doorbell, (unsigned long long) wq->udb);
return 0;
err4:
kfree(wq->sq);
err3:
cxio_hal_rqtpool_free(rdev_p, wq->rq_addr, rqsize);
err2:
kfree(wq->rq);
err1:
put_qpid(rdev_p, wq->qpid, uctx);
return -ENOMEM;
}
int cxio_destroy_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq)
{
int err;
err = cxio_hal_clear_cq_ctx(rdev_p, cq->cqid);
kfree(cq->sw_queue);
dma_free_coherent(&(rdev_p->rnic_info.pdev->dev),
(1UL << (cq->size_log2))
* sizeof(struct t3_cqe), cq->queue,
dma_unmap_addr(cq, mapping));
cxio_hal_put_cqid(rdev_p->rscp, cq->cqid);
return err;
}
int cxio_destroy_qp(struct cxio_rdev *rdev_p, struct t3_wq *wq,
struct cxio_ucontext *uctx)
{
dma_free_coherent(&(rdev_p->rnic_info.pdev->dev),
(1UL << (wq->size_log2))
* sizeof(union t3_wr), wq->queue,
dma_unmap_addr(wq, mapping));
kfree(wq->sq);
cxio_hal_rqtpool_free(rdev_p, wq->rq_addr, (1UL << wq->rq_size_log2));
kfree(wq->rq);
put_qpid(rdev_p, wq->qpid, uctx);
return 0;
}
static void insert_recv_cqe(struct t3_wq *wq, struct t3_cq *cq)
{
struct t3_cqe cqe;
PDBG("%s wq %p cq %p sw_rptr 0x%x sw_wptr 0x%x\n", __func__,
wq, cq, cq->sw_rptr, cq->sw_wptr);
memset(&cqe, 0, sizeof(cqe));
cqe.header = cpu_to_be32(V_CQE_STATUS(TPT_ERR_SWFLUSH) |
V_CQE_OPCODE(T3_SEND) |
V_CQE_TYPE(0) |
V_CQE_SWCQE(1) |
V_CQE_QPID(wq->qpid) |
V_CQE_GENBIT(Q_GENBIT(cq->sw_wptr,
cq->size_log2)));
*(cq->sw_queue + Q_PTR2IDX(cq->sw_wptr, cq->size_log2)) = cqe;
cq->sw_wptr++;
}
int cxio_flush_rq(struct t3_wq *wq, struct t3_cq *cq, int count)
{
u32 ptr;
int flushed = 0;
PDBG("%s wq %p cq %p\n", __func__, wq, cq);
/* flush RQ */
PDBG("%s rq_rptr %u rq_wptr %u skip count %u\n", __func__,
wq->rq_rptr, wq->rq_wptr, count);
ptr = wq->rq_rptr + count;
while (ptr++ != wq->rq_wptr) {
insert_recv_cqe(wq, cq);
flushed++;
}
return flushed;
}
static void insert_sq_cqe(struct t3_wq *wq, struct t3_cq *cq,
struct t3_swsq *sqp)
{
struct t3_cqe cqe;
PDBG("%s wq %p cq %p sw_rptr 0x%x sw_wptr 0x%x\n", __func__,
wq, cq, cq->sw_rptr, cq->sw_wptr);
memset(&cqe, 0, sizeof(cqe));
cqe.header = cpu_to_be32(V_CQE_STATUS(TPT_ERR_SWFLUSH) |
V_CQE_OPCODE(sqp->opcode) |
V_CQE_TYPE(1) |
V_CQE_SWCQE(1) |
V_CQE_QPID(wq->qpid) |
V_CQE_GENBIT(Q_GENBIT(cq->sw_wptr,
cq->size_log2)));
cqe.u.scqe.wrid_hi = sqp->sq_wptr;
*(cq->sw_queue + Q_PTR2IDX(cq->sw_wptr, cq->size_log2)) = cqe;
cq->sw_wptr++;
}
int cxio_flush_sq(struct t3_wq *wq, struct t3_cq *cq, int count)
{
__u32 ptr;
int flushed = 0;
struct t3_swsq *sqp = wq->sq + Q_PTR2IDX(wq->sq_rptr, wq->sq_size_log2);
ptr = wq->sq_rptr + count;
sqp = wq->sq + Q_PTR2IDX(ptr, wq->sq_size_log2);
while (ptr != wq->sq_wptr) {
sqp->signaled = 0;
insert_sq_cqe(wq, cq, sqp);
ptr++;
sqp = wq->sq + Q_PTR2IDX(ptr, wq->sq_size_log2);
flushed++;
}
return flushed;
}
/*
* Move all CQEs from the HWCQ into the SWCQ.
*/
void cxio_flush_hw_cq(struct t3_cq *cq)
{
struct t3_cqe *cqe, *swcqe;
PDBG("%s cq %p cqid 0x%x\n", __func__, cq, cq->cqid);
cqe = cxio_next_hw_cqe(cq);
while (cqe) {
PDBG("%s flushing hwcq rptr 0x%x to swcq wptr 0x%x\n",
__func__, cq->rptr, cq->sw_wptr);
swcqe = cq->sw_queue + Q_PTR2IDX(cq->sw_wptr, cq->size_log2);
*swcqe = *cqe;
swcqe->header |= cpu_to_be32(V_CQE_SWCQE(1));
cq->sw_wptr++;
cq->rptr++;
cqe = cxio_next_hw_cqe(cq);
}
}
static int cqe_completes_wr(struct t3_cqe *cqe, struct t3_wq *wq)
{
if (CQE_OPCODE(*cqe) == T3_TERMINATE)
return 0;
if ((CQE_OPCODE(*cqe) == T3_RDMA_WRITE) && RQ_TYPE(*cqe))
return 0;
if ((CQE_OPCODE(*cqe) == T3_READ_RESP) && SQ_TYPE(*cqe))
return 0;
if (CQE_SEND_OPCODE(*cqe) && RQ_TYPE(*cqe) &&
Q_EMPTY(wq->rq_rptr, wq->rq_wptr))
return 0;
return 1;
}
void cxio_count_scqes(struct t3_cq *cq, struct t3_wq *wq, int *count)
{
struct t3_cqe *cqe;
u32 ptr;
*count = 0;
ptr = cq->sw_rptr;
while (!Q_EMPTY(ptr, cq->sw_wptr)) {
cqe = cq->sw_queue + (Q_PTR2IDX(ptr, cq->size_log2));
if ((SQ_TYPE(*cqe) ||
((CQE_OPCODE(*cqe) == T3_READ_RESP) && wq->oldest_read)) &&
(CQE_QPID(*cqe) == wq->qpid))
(*count)++;
ptr++;
}
PDBG("%s cq %p count %d\n", __func__, cq, *count);
}
void cxio_count_rcqes(struct t3_cq *cq, struct t3_wq *wq, int *count)
{
struct t3_cqe *cqe;
u32 ptr;
*count = 0;
PDBG("%s count zero %d\n", __func__, *count);
ptr = cq->sw_rptr;
while (!Q_EMPTY(ptr, cq->sw_wptr)) {
cqe = cq->sw_queue + (Q_PTR2IDX(ptr, cq->size_log2));
if (RQ_TYPE(*cqe) && (CQE_OPCODE(*cqe) != T3_READ_RESP) &&
(CQE_QPID(*cqe) == wq->qpid) && cqe_completes_wr(cqe, wq))
(*count)++;
ptr++;
}
PDBG("%s cq %p count %d\n", __func__, cq, *count);
}
static int cxio_hal_init_ctrl_cq(struct cxio_rdev *rdev_p)
{
struct rdma_cq_setup setup;
setup.id = 0;
setup.base_addr = 0; /* NULL address */
setup.size = 1; /* enable the CQ */
setup.credits = 0;
/* force SGE to redirect to RspQ and interrupt */
setup.credit_thres = 0;
setup.ovfl_mode = 1;
return (rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_CQ_SETUP, &setup));
}
static int cxio_hal_init_ctrl_qp(struct cxio_rdev *rdev_p)
{
int err;
u64 sge_cmd, ctx0, ctx1;
u64 base_addr;
struct t3_modify_qp_wr *wqe;
struct sk_buff *skb;
skb = alloc_skb(sizeof(*wqe), GFP_KERNEL);
if (!skb) {
PDBG("%s alloc_skb failed\n", __func__);
return -ENOMEM;
}
err = cxio_hal_init_ctrl_cq(rdev_p);
if (err) {
PDBG("%s err %d initializing ctrl_cq\n", __func__, err);
goto err;
}
rdev_p->ctrl_qp.workq = dma_alloc_coherent(
&(rdev_p->rnic_info.pdev->dev),
(1 << T3_CTRL_QP_SIZE_LOG2) *
sizeof(union t3_wr),
&(rdev_p->ctrl_qp.dma_addr),
GFP_KERNEL);
if (!rdev_p->ctrl_qp.workq) {
PDBG("%s dma_alloc_coherent failed\n", __func__);
err = -ENOMEM;
goto err;
}
dma_unmap_addr_set(&rdev_p->ctrl_qp, mapping,
rdev_p->ctrl_qp.dma_addr);
rdev_p->ctrl_qp.doorbell = (void __iomem *)rdev_p->rnic_info.kdb_addr;
memset(rdev_p->ctrl_qp.workq, 0,
(1 << T3_CTRL_QP_SIZE_LOG2) * sizeof(union t3_wr));
mutex_init(&rdev_p->ctrl_qp.lock);
init_waitqueue_head(&rdev_p->ctrl_qp.waitq);
/* update HW Ctrl QP context */
base_addr = rdev_p->ctrl_qp.dma_addr;
base_addr >>= 12;
ctx0 = (V_EC_SIZE((1 << T3_CTRL_QP_SIZE_LOG2)) |
V_EC_BASE_LO((u32) base_addr & 0xffff));
ctx0 <<= 32;
ctx0 |= V_EC_CREDITS(FW_WR_NUM);
base_addr >>= 16;
ctx1 = (u32) base_addr;
base_addr >>= 32;
ctx1 |= ((u64) (V_EC_BASE_HI((u32) base_addr & 0xf) | V_EC_RESPQ(0) |
V_EC_TYPE(0) | V_EC_GEN(1) |
V_EC_UP_TOKEN(T3_CTL_QP_TID) | F_EC_VALID)) << 32;
wqe = (struct t3_modify_qp_wr *) skb_put(skb, sizeof(*wqe));
memset(wqe, 0, sizeof(*wqe));
build_fw_riwrh((struct fw_riwrh *) wqe, T3_WR_QP_MOD, 0, 0,
T3_CTL_QP_TID, 7, T3_SOPEOP);
wqe->flags = cpu_to_be32(MODQP_WRITE_EC);
sge_cmd = (3ULL << 56) | FW_RI_SGEEC_START << 8 | 3;
wqe->sge_cmd = cpu_to_be64(sge_cmd);
wqe->ctx1 = cpu_to_be64(ctx1);
wqe->ctx0 = cpu_to_be64(ctx0);
PDBG("CtrlQP dma_addr 0x%llx workq %p size %d\n",
(unsigned long long) rdev_p->ctrl_qp.dma_addr,
rdev_p->ctrl_qp.workq, 1 << T3_CTRL_QP_SIZE_LOG2);
skb->priority = CPL_PRIORITY_CONTROL;
return iwch_cxgb3_ofld_send(rdev_p->t3cdev_p, skb);
err:
kfree_skb(skb);
return err;
}
static int cxio_hal_destroy_ctrl_qp(struct cxio_rdev *rdev_p)
{
dma_free_coherent(&(rdev_p->rnic_info.pdev->dev),
(1UL << T3_CTRL_QP_SIZE_LOG2)
* sizeof(union t3_wr), rdev_p->ctrl_qp.workq,
dma_unmap_addr(&rdev_p->ctrl_qp, mapping));
return cxio_hal_clear_qp_ctx(rdev_p, T3_CTRL_QP_ID);
}
/* write len bytes of data into addr (32B aligned address)
* If data is NULL, clear len byte of memory to zero.
* caller acquires the ctrl_qp lock before the call
*/
static int cxio_hal_ctrl_qp_write_mem(struct cxio_rdev *rdev_p, u32 addr,
u32 len, void *data)
{
u32 i, nr_wqe, copy_len;
u8 *copy_data;
u8 wr_len, utx_len; /* length in 8 byte flit */
enum t3_wr_flags flag;
__be64 *wqe;
u64 utx_cmd;
addr &= 0x7FFFFFF;
nr_wqe = len % 96 ? len / 96 + 1 : len / 96; /* 96B max per WQE */
PDBG("%s wptr 0x%x rptr 0x%x len %d, nr_wqe %d data %p addr 0x%0x\n",
__func__, rdev_p->ctrl_qp.wptr, rdev_p->ctrl_qp.rptr, len,
nr_wqe, data, addr);
utx_len = 3; /* in 32B unit */
for (i = 0; i < nr_wqe; i++) {
if (Q_FULL(rdev_p->ctrl_qp.rptr, rdev_p->ctrl_qp.wptr,
T3_CTRL_QP_SIZE_LOG2)) {
PDBG("%s ctrl_qp full wtpr 0x%0x rptr 0x%0x, "
"wait for more space i %d\n", __func__,
rdev_p->ctrl_qp.wptr, rdev_p->ctrl_qp.rptr, i);
if (wait_event_interruptible(rdev_p->ctrl_qp.waitq,
!Q_FULL(rdev_p->ctrl_qp.rptr,
rdev_p->ctrl_qp.wptr,
T3_CTRL_QP_SIZE_LOG2))) {
PDBG("%s ctrl_qp workq interrupted\n",
__func__);
return -ERESTARTSYS;
}
PDBG("%s ctrl_qp wakeup, continue posting work request "
"i %d\n", __func__, i);
}
wqe = (__be64 *)(rdev_p->ctrl_qp.workq + (rdev_p->ctrl_qp.wptr %
(1 << T3_CTRL_QP_SIZE_LOG2)));
flag = 0;
if (i == (nr_wqe - 1)) {
/* last WQE */
flag = T3_COMPLETION_FLAG;
if (len % 32)
utx_len = len / 32 + 1;
else
utx_len = len / 32;
}
/*
* Force a CQE to return the credit to the workq in case
* we posted more than half the max QP size of WRs
*/
if ((i != 0) &&
(i % (((1 << T3_CTRL_QP_SIZE_LOG2)) >> 1) == 0)) {
flag = T3_COMPLETION_FLAG;
PDBG("%s force completion at i %d\n", __func__, i);
}
/* build the utx mem command */
wqe += (sizeof(struct t3_bypass_wr) >> 3);
utx_cmd = (T3_UTX_MEM_WRITE << 28) | (addr + i * 3);
utx_cmd <<= 32;
utx_cmd |= (utx_len << 28) | ((utx_len << 2) + 1);
*wqe = cpu_to_be64(utx_cmd);
wqe++;
copy_data = (u8 *) data + i * 96;
copy_len = len > 96 ? 96 : len;
/* clear memory content if data is NULL */
if (data)
memcpy(wqe, copy_data, copy_len);
else
memset(wqe, 0, copy_len);
if (copy_len % 32)
memset(((u8 *) wqe) + copy_len, 0,
32 - (copy_len % 32));
wr_len = ((sizeof(struct t3_bypass_wr)) >> 3) + 1 +
(utx_len << 2);
wqe = (__be64 *)(rdev_p->ctrl_qp.workq + (rdev_p->ctrl_qp.wptr %
(1 << T3_CTRL_QP_SIZE_LOG2)));
/* wptr in the WRID[31:0] */
((union t3_wrid *)(wqe+1))->id0.low = rdev_p->ctrl_qp.wptr;
/*
* This must be the last write with a memory barrier
* for the genbit
*/
build_fw_riwrh((struct fw_riwrh *) wqe, T3_WR_BP, flag,
Q_GENBIT(rdev_p->ctrl_qp.wptr,
T3_CTRL_QP_SIZE_LOG2), T3_CTRL_QP_ID,
wr_len, T3_SOPEOP);
if (flag == T3_COMPLETION_FLAG)
ring_doorbell(rdev_p->ctrl_qp.doorbell, T3_CTRL_QP_ID);
len -= 96;
rdev_p->ctrl_qp.wptr++;
}
return 0;
}
/* IN: stag key, pdid, perm, zbva, to, len, page_size, pbl_size and pbl_addr
* OUT: stag index
* TBD: shared memory region support
*/
static int __cxio_tpt_op(struct cxio_rdev *rdev_p, u32 reset_tpt_entry,
u32 *stag, u8 stag_state, u32 pdid,
enum tpt_mem_type type, enum tpt_mem_perm perm,
u32 zbva, u64 to, u32 len, u8 page_size,
u32 pbl_size, u32 pbl_addr)
{
int err;
struct tpt_entry tpt;
u32 stag_idx;
u32 wptr;
if (cxio_fatal_error(rdev_p))
return -EIO;
stag_state = stag_state > 0;
stag_idx = (*stag) >> 8;
if ((!reset_tpt_entry) && !(*stag != T3_STAG_UNSET)) {
stag_idx = cxio_hal_get_stag(rdev_p->rscp);
if (!stag_idx)
return -ENOMEM;
*stag = (stag_idx << 8) | ((*stag) & 0xFF);
}
PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
__func__, stag_state, type, pdid, stag_idx);
mutex_lock(&rdev_p->ctrl_qp.lock);
/* write TPT entry */
if (reset_tpt_entry)
memset(&tpt, 0, sizeof(tpt));
else {
tpt.valid_stag_pdid = cpu_to_be32(F_TPT_VALID |
V_TPT_STAG_KEY((*stag) & M_TPT_STAG_KEY) |
V_TPT_STAG_STATE(stag_state) |
V_TPT_STAG_TYPE(type) | V_TPT_PDID(pdid));
BUG_ON(page_size >= 28);
tpt.flags_pagesize_qpid = cpu_to_be32(V_TPT_PERM(perm) |
((perm & TPT_MW_BIND) ? F_TPT_MW_BIND_ENABLE : 0) |
V_TPT_ADDR_TYPE((zbva ? TPT_ZBTO : TPT_VATO)) |
V_TPT_PAGE_SIZE(page_size));
tpt.rsvd_pbl_addr = reset_tpt_entry ? 0 :
cpu_to_be32(V_TPT_PBL_ADDR(PBL_OFF(rdev_p, pbl_addr)>>3));
tpt.len = cpu_to_be32(len);
tpt.va_hi = cpu_to_be32((u32) (to >> 32));
tpt.va_low_or_fbo = cpu_to_be32((u32) (to & 0xFFFFFFFFULL));
tpt.rsvd_bind_cnt_or_pstag = 0;
tpt.rsvd_pbl_size = reset_tpt_entry ? 0 :
cpu_to_be32(V_TPT_PBL_SIZE(pbl_size >> 2));
}
err = cxio_hal_ctrl_qp_write_mem(rdev_p,
stag_idx +
(rdev_p->rnic_info.tpt_base >> 5),
sizeof(tpt), &tpt);
/* release the stag index to free pool */
if (reset_tpt_entry)
cxio_hal_put_stag(rdev_p->rscp, stag_idx);
wptr = rdev_p->ctrl_qp.wptr;
mutex_unlock(&rdev_p->ctrl_qp.lock);
if (!err)
if (wait_event_interruptible(rdev_p->ctrl_qp.waitq,
SEQ32_GE(rdev_p->ctrl_qp.rptr,
wptr)))
return -ERESTARTSYS;
return err;
}
int cxio_write_pbl(struct cxio_rdev *rdev_p, __be64 *pbl,
u32 pbl_addr, u32 pbl_size)
{
u32 wptr;
int err;
PDBG("%s *pdb_addr 0x%x, pbl_base 0x%x, pbl_size %d\n",
__func__, pbl_addr, rdev_p->rnic_info.pbl_base,
pbl_size);
mutex_lock(&rdev_p->ctrl_qp.lock);
err = cxio_hal_ctrl_qp_write_mem(rdev_p, pbl_addr >> 5, pbl_size << 3,
pbl);
wptr = rdev_p->ctrl_qp.wptr;
mutex_unlock(&rdev_p->ctrl_qp.lock);
if (err)
return err;
if (wait_event_interruptible(rdev_p->ctrl_qp.waitq,
SEQ32_GE(rdev_p->ctrl_qp.rptr,
wptr)))
return -ERESTARTSYS;
return 0;
}
int cxio_register_phys_mem(struct cxio_rdev *rdev_p, u32 *stag, u32 pdid,
enum tpt_mem_perm perm, u32 zbva, u64 to, u32 len,
u8 page_size, u32 pbl_size, u32 pbl_addr)
{
*stag = T3_STAG_UNSET;
return __cxio_tpt_op(rdev_p, 0, stag, 1, pdid, TPT_NON_SHARED_MR, perm,
zbva, to, len, page_size, pbl_size, pbl_addr);
}
int cxio_reregister_phys_mem(struct cxio_rdev *rdev_p, u32 *stag, u32 pdid,
enum tpt_mem_perm perm, u32 zbva, u64 to, u32 len,
u8 page_size, u32 pbl_size, u32 pbl_addr)
{
return __cxio_tpt_op(rdev_p, 0, stag, 1, pdid, TPT_NON_SHARED_MR, perm,
zbva, to, len, page_size, pbl_size, pbl_addr);
}
int cxio_dereg_mem(struct cxio_rdev *rdev_p, u32 stag, u32 pbl_size,
u32 pbl_addr)
{
return __cxio_tpt_op(rdev_p, 1, &stag, 0, 0, 0, 0, 0, 0ULL, 0, 0,
pbl_size, pbl_addr);
}
int cxio_allocate_window(struct cxio_rdev *rdev_p, u32 * stag, u32 pdid)
{
*stag = T3_STAG_UNSET;
return __cxio_tpt_op(rdev_p, 0, stag, 0, pdid, TPT_MW, 0, 0, 0ULL, 0, 0,
0, 0);
}
int cxio_deallocate_window(struct cxio_rdev *rdev_p, u32 stag)
{
return __cxio_tpt_op(rdev_p, 1, &stag, 0, 0, 0, 0, 0, 0ULL, 0, 0,
0, 0);
}
int cxio_allocate_stag(struct cxio_rdev *rdev_p, u32 *stag, u32 pdid, u32 pbl_size, u32 pbl_addr)
{
*stag = T3_STAG_UNSET;
return __cxio_tpt_op(rdev_p, 0, stag, 0, pdid, TPT_NON_SHARED_MR,
0, 0, 0ULL, 0, 0, pbl_size, pbl_addr);
}
int cxio_rdma_init(struct cxio_rdev *rdev_p, struct t3_rdma_init_attr *attr)
{
struct t3_rdma_init_wr *wqe;
struct sk_buff *skb = alloc_skb(sizeof(*wqe), GFP_ATOMIC);
if (!skb)
return -ENOMEM;
PDBG("%s rdev_p %p\n", __func__, rdev_p);
wqe = (struct t3_rdma_init_wr *) __skb_put(skb, sizeof(*wqe));
wqe->wrh.op_seop_flags = cpu_to_be32(V_FW_RIWR_OP(T3_WR_INIT));
wqe->wrh.gen_tid_len = cpu_to_be32(V_FW_RIWR_TID(attr->tid) |
V_FW_RIWR_LEN(sizeof(*wqe) >> 3));
wqe->wrid.id1 = 0;
wqe->qpid = cpu_to_be32(attr->qpid);
wqe->pdid = cpu_to_be32(attr->pdid);
wqe->scqid = cpu_to_be32(attr->scqid);
wqe->rcqid = cpu_to_be32(attr->rcqid);
wqe->rq_addr = cpu_to_be32(attr->rq_addr - rdev_p->rnic_info.rqt_base);
wqe->rq_size = cpu_to_be32(attr->rq_size);
wqe->mpaattrs = attr->mpaattrs;
wqe->qpcaps = attr->qpcaps;
wqe->ulpdu_size = cpu_to_be16(attr->tcp_emss);
wqe->rqe_count = cpu_to_be16(attr->rqe_count);
wqe->flags_rtr_type = cpu_to_be16(attr->flags |
V_RTR_TYPE(attr->rtr_type) |
V_CHAN(attr->chan));
wqe->ord = cpu_to_be32(attr->ord);
wqe->ird = cpu_to_be32(attr->ird);
wqe->qp_dma_addr = cpu_to_be64(attr->qp_dma_addr);
wqe->qp_dma_size = cpu_to_be32(attr->qp_dma_size);
wqe->irs = cpu_to_be32(attr->irs);
skb->priority = 0; /* 0=>ToeQ; 1=>CtrlQ */
return iwch_cxgb3_ofld_send(rdev_p->t3cdev_p, skb);
}
void cxio_register_ev_cb(cxio_hal_ev_callback_func_t ev_cb)
{
cxio_ev_cb = ev_cb;
}
void cxio_unregister_ev_cb(cxio_hal_ev_callback_func_t ev_cb)
{
cxio_ev_cb = NULL;
}
static int cxio_hal_ev_handler(struct t3cdev *t3cdev_p, struct sk_buff *skb)
{
static int cnt;
struct cxio_rdev *rdev_p = NULL;
struct respQ_msg_t *rsp_msg = (struct respQ_msg_t *) skb->data;
PDBG("%d: %s cq_id 0x%x cq_ptr 0x%x genbit %0x overflow %0x an %0x"
" se %0x notify %0x cqbranch %0x creditth %0x\n",
cnt, __func__, RSPQ_CQID(rsp_msg), RSPQ_CQPTR(rsp_msg),
RSPQ_GENBIT(rsp_msg), RSPQ_OVERFLOW(rsp_msg), RSPQ_AN(rsp_msg),
RSPQ_SE(rsp_msg), RSPQ_NOTIFY(rsp_msg), RSPQ_CQBRANCH(rsp_msg),
RSPQ_CREDIT_THRESH(rsp_msg));
PDBG("CQE: QPID 0x%0x genbit %0x type 0x%0x status 0x%0x opcode %d "
"len 0x%0x wrid_hi_stag 0x%x wrid_low_msn 0x%x\n",
CQE_QPID(rsp_msg->cqe), CQE_GENBIT(rsp_msg->cqe),
CQE_TYPE(rsp_msg->cqe), CQE_STATUS(rsp_msg->cqe),
CQE_OPCODE(rsp_msg->cqe), CQE_LEN(rsp_msg->cqe),
CQE_WRID_HI(rsp_msg->cqe), CQE_WRID_LOW(rsp_msg->cqe));
rdev_p = (struct cxio_rdev *)t3cdev_p->ulp;
if (!rdev_p) {
PDBG("%s called by t3cdev %p with null ulp\n", __func__,
t3cdev_p);
return 0;
}
if (CQE_QPID(rsp_msg->cqe) == T3_CTRL_QP_ID) {
rdev_p->ctrl_qp.rptr = CQE_WRID_LOW(rsp_msg->cqe) + 1;
wake_up_interruptible(&rdev_p->ctrl_qp.waitq);
dev_kfree_skb_irq(skb);
} else if (CQE_QPID(rsp_msg->cqe) == 0xfff8)
dev_kfree_skb_irq(skb);
else if (cxio_ev_cb)
(*cxio_ev_cb) (rdev_p, skb);
else
dev_kfree_skb_irq(skb);
cnt++;
return 0;
}
/* Caller takes care of locking if needed */
int cxio_rdev_open(struct cxio_rdev *rdev_p)
{
struct net_device *netdev_p = NULL;
int err = 0;
if (strlen(rdev_p->dev_name)) {
if (cxio_hal_find_rdev_by_name(rdev_p->dev_name)) {
return -EBUSY;
}
netdev_p = dev_get_by_name(&init_net, rdev_p->dev_name);
if (!netdev_p) {
return -EINVAL;
}
dev_put(netdev_p);
} else if (rdev_p->t3cdev_p) {
if (cxio_hal_find_rdev_by_t3cdev(rdev_p->t3cdev_p)) {
return -EBUSY;
}
netdev_p = rdev_p->t3cdev_p->lldev;
strncpy(rdev_p->dev_name, rdev_p->t3cdev_p->name,
T3_MAX_DEV_NAME_LEN);
} else {
PDBG("%s t3cdev_p or dev_name must be set\n", __func__);
return -EINVAL;
}
list_add_tail(&rdev_p->entry, &rdev_list);
PDBG("%s opening rnic dev %s\n", __func__, rdev_p->dev_name);
memset(&rdev_p->ctrl_qp, 0, sizeof(rdev_p->ctrl_qp));
if (!rdev_p->t3cdev_p)
rdev_p->t3cdev_p = dev2t3cdev(netdev_p);
rdev_p->t3cdev_p->ulp = (void *) rdev_p;
err = rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, GET_EMBEDDED_INFO,
&(rdev_p->fw_info));
if (err) {
printk(KERN_ERR "%s t3cdev_p(%p)->ctl returned error %d.\n",
__func__, rdev_p->t3cdev_p, err);
goto err1;
}
if (G_FW_VERSION_MAJOR(rdev_p->fw_info.fw_vers) != CXIO_FW_MAJ) {
printk(KERN_ERR MOD "fatal firmware version mismatch: "
"need version %u but adapter has version %u\n",
CXIO_FW_MAJ,
G_FW_VERSION_MAJOR(rdev_p->fw_info.fw_vers));
err = -EINVAL;
goto err1;
}
err = rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_GET_PARAMS,
&(rdev_p->rnic_info));
if (err) {
printk(KERN_ERR "%s t3cdev_p(%p)->ctl returned error %d.\n",
__func__, rdev_p->t3cdev_p, err);
goto err1;
}
err = rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, GET_PORTS,
&(rdev_p->port_info));
if (err) {
printk(KERN_ERR "%s t3cdev_p(%p)->ctl returned error %d.\n",
__func__, rdev_p->t3cdev_p, err);
goto err1;
}
/*
* qpshift is the number of bits to shift the qpid left in order
* to get the correct address of the doorbell for that qp.
*/
cxio_init_ucontext(rdev_p, &rdev_p->uctx);
rdev_p->qpshift = PAGE_SHIFT -
ilog2(65536 >>
ilog2(rdev_p->rnic_info.udbell_len >>
PAGE_SHIFT));
rdev_p->qpnr = rdev_p->rnic_info.udbell_len >> PAGE_SHIFT;
rdev_p->qpmask = (65536 >> ilog2(rdev_p->qpnr)) - 1;
PDBG("%s rnic %s info: tpt_base 0x%0x tpt_top 0x%0x num stags %d "
"pbl_base 0x%0x pbl_top 0x%0x rqt_base 0x%0x, rqt_top 0x%0x\n",
__func__, rdev_p->dev_name, rdev_p->rnic_info.tpt_base,
rdev_p->rnic_info.tpt_top, cxio_num_stags(rdev_p),
rdev_p->rnic_info.pbl_base,
rdev_p->rnic_info.pbl_top, rdev_p->rnic_info.rqt_base,
rdev_p->rnic_info.rqt_top);
PDBG("udbell_len 0x%0x udbell_physbase 0x%lx kdb_addr %p qpshift %lu "
"qpnr %d qpmask 0x%x\n",
rdev_p->rnic_info.udbell_len,
rdev_p->rnic_info.udbell_physbase, rdev_p->rnic_info.kdb_addr,
rdev_p->qpshift, rdev_p->qpnr, rdev_p->qpmask);
err = cxio_hal_init_ctrl_qp(rdev_p);
if (err) {
printk(KERN_ERR "%s error %d initializing ctrl_qp.\n",
__func__, err);
goto err1;
}
err = cxio_hal_init_resource(rdev_p, cxio_num_stags(rdev_p), 0,
0, T3_MAX_NUM_QP, T3_MAX_NUM_CQ,
T3_MAX_NUM_PD);
if (err) {
printk(KERN_ERR "%s error %d initializing hal resources.\n",
__func__, err);
goto err2;
}
err = cxio_hal_pblpool_create(rdev_p);
if (err) {
printk(KERN_ERR "%s error %d initializing pbl mem pool.\n",
__func__, err);
goto err3;
}
err = cxio_hal_rqtpool_create(rdev_p);
if (err) {
printk(KERN_ERR "%s error %d initializing rqt mem pool.\n",
__func__, err);
goto err4;
}
return 0;
err4:
cxio_hal_pblpool_destroy(rdev_p);
err3:
cxio_hal_destroy_resource(rdev_p->rscp);
err2:
cxio_hal_destroy_ctrl_qp(rdev_p);
err1:
rdev_p->t3cdev_p->ulp = NULL;
list_del(&rdev_p->entry);
return err;
}
void cxio_rdev_close(struct cxio_rdev *rdev_p)
{
if (rdev_p) {
cxio_hal_pblpool_destroy(rdev_p);
cxio_hal_rqtpool_destroy(rdev_p);
list_del(&rdev_p->entry);
cxio_hal_destroy_ctrl_qp(rdev_p);
cxio_hal_destroy_resource(rdev_p->rscp);
rdev_p->t3cdev_p->ulp = NULL;
}
}
int __init cxio_hal_init(void)
{
if (cxio_hal_init_rhdl_resource(T3_MAX_NUM_RI))
return -ENOMEM;
t3_register_cpl_handler(CPL_ASYNC_NOTIF, cxio_hal_ev_handler);
return 0;
}
void __exit cxio_hal_exit(void)
{
struct cxio_rdev *rdev, *tmp;
t3_register_cpl_handler(CPL_ASYNC_NOTIF, NULL);
list_for_each_entry_safe(rdev, tmp, &rdev_list, entry)
cxio_rdev_close(rdev);
cxio_hal_destroy_rhdl_resource();
}
static void flush_completed_wrs(struct t3_wq *wq, struct t3_cq *cq)
{
struct t3_swsq *sqp;
__u32 ptr = wq->sq_rptr;
int count = Q_COUNT(wq->sq_rptr, wq->sq_wptr);
sqp = wq->sq + Q_PTR2IDX(ptr, wq->sq_size_log2);
while (count--)
if (!sqp->signaled) {
ptr++;
sqp = wq->sq + Q_PTR2IDX(ptr, wq->sq_size_log2);
} else if (sqp->complete) {
/*
* Insert this completed cqe into the swcq.
*/
PDBG("%s moving cqe into swcq sq idx %ld cq idx %ld\n",
__func__, Q_PTR2IDX(ptr, wq->sq_size_log2),
Q_PTR2IDX(cq->sw_wptr, cq->size_log2));
sqp->cqe.header |= htonl(V_CQE_SWCQE(1));
*(cq->sw_queue + Q_PTR2IDX(cq->sw_wptr, cq->size_log2))
= sqp->cqe;
cq->sw_wptr++;
sqp->signaled = 0;
break;
} else
break;
}
static void create_read_req_cqe(struct t3_wq *wq, struct t3_cqe *hw_cqe,
struct t3_cqe *read_cqe)
{
read_cqe->u.scqe.wrid_hi = wq->oldest_read->sq_wptr;
read_cqe->len = wq->oldest_read->read_len;
read_cqe->header = htonl(V_CQE_QPID(CQE_QPID(*hw_cqe)) |
V_CQE_SWCQE(SW_CQE(*hw_cqe)) |
V_CQE_OPCODE(T3_READ_REQ) |
V_CQE_TYPE(1));
}
/*
* Return a ptr to the next read wr in the SWSQ or NULL.
*/
static void advance_oldest_read(struct t3_wq *wq)
{
u32 rptr = wq->oldest_read - wq->sq + 1;
u32 wptr = Q_PTR2IDX(wq->sq_wptr, wq->sq_size_log2);
while (Q_PTR2IDX(rptr, wq->sq_size_log2) != wptr) {
wq->oldest_read = wq->sq + Q_PTR2IDX(rptr, wq->sq_size_log2);
if (wq->oldest_read->opcode == T3_READ_REQ)
return;
rptr++;
}
wq->oldest_read = NULL;
}
/*
* cxio_poll_cq
*
* Caller must:
* check the validity of the first CQE,
* supply the wq assicated with the qpid.
*
* credit: cq credit to return to sge.
* cqe_flushed: 1 iff the CQE is flushed.
* cqe: copy of the polled CQE.
*
* return value:
* 0 CQE returned,
* -1 CQE skipped, try again.
*/
int cxio_poll_cq(struct t3_wq *wq, struct t3_cq *cq, struct t3_cqe *cqe,
u8 *cqe_flushed, u64 *cookie, u32 *credit)
{
int ret = 0;
struct t3_cqe *hw_cqe, read_cqe;
*cqe_flushed = 0;
*credit = 0;
hw_cqe = cxio_next_cqe(cq);
PDBG("%s CQE OOO %d qpid 0x%0x genbit %d type %d status 0x%0x"
" opcode 0x%0x len 0x%0x wrid_hi_stag 0x%x wrid_low_msn 0x%x\n",
__func__, CQE_OOO(*hw_cqe), CQE_QPID(*hw_cqe),
CQE_GENBIT(*hw_cqe), CQE_TYPE(*hw_cqe), CQE_STATUS(*hw_cqe),
CQE_OPCODE(*hw_cqe), CQE_LEN(*hw_cqe), CQE_WRID_HI(*hw_cqe),
CQE_WRID_LOW(*hw_cqe));
/*
* skip cqe's not affiliated with a QP.
*/
if (wq == NULL) {
ret = -1;
goto skip_cqe;
}
/*
* Gotta tweak READ completions:
* 1) the cqe doesn't contain the sq_wptr from the wr.
* 2) opcode not reflected from the wr.
* 3) read_len not reflected from the wr.
* 4) cq_type is RQ_TYPE not SQ_TYPE.
*/
if (RQ_TYPE(*hw_cqe) && (CQE_OPCODE(*hw_cqe) == T3_READ_RESP)) {
/*
* If this is an unsolicited read response, then the read
* was generated by the kernel driver as part of peer-2-peer
* connection setup. So ignore the completion.
*/
if (!wq->oldest_read) {
if (CQE_STATUS(*hw_cqe))
wq->error = 1;
ret = -1;
goto skip_cqe;
}
/*
* Don't write to the HWCQ, so create a new read req CQE
* in local memory.
*/
create_read_req_cqe(wq, hw_cqe, &read_cqe);
hw_cqe = &read_cqe;
advance_oldest_read(wq);
}
/*
* T3A: Discard TERMINATE CQEs.
*/
if (CQE_OPCODE(*hw_cqe) == T3_TERMINATE) {
ret = -1;
wq->error = 1;
goto skip_cqe;
}
if (CQE_STATUS(*hw_cqe) || wq->error) {
*cqe_flushed = wq->error;
wq->error = 1;
/*
* T3A inserts errors into the CQE. We cannot return
* these as work completions.
*/
/* incoming write failures */
if ((CQE_OPCODE(*hw_cqe) == T3_RDMA_WRITE)
&& RQ_TYPE(*hw_cqe)) {
ret = -1;
goto skip_cqe;
}
/* incoming read request failures */
if ((CQE_OPCODE(*hw_cqe) == T3_READ_RESP) && SQ_TYPE(*hw_cqe)) {
ret = -1;
goto skip_cqe;
}
/* incoming SEND with no receive posted failures */
if (CQE_SEND_OPCODE(*hw_cqe) && RQ_TYPE(*hw_cqe) &&
Q_EMPTY(wq->rq_rptr, wq->rq_wptr)) {
ret = -1;
goto skip_cqe;
}
BUG_ON((*cqe_flushed == 0) && !SW_CQE(*hw_cqe));
goto proc_cqe;
}
/*
* RECV completion.
*/
if (RQ_TYPE(*hw_cqe)) {
/*
* HW only validates 4 bits of MSN. So we must validate that
* the MSN in the SEND is the next expected MSN. If its not,
* then we complete this with TPT_ERR_MSN and mark the wq in
* error.
*/
if (Q_EMPTY(wq->rq_rptr, wq->rq_wptr)) {
wq->error = 1;
ret = -1;
goto skip_cqe;
}
if (unlikely((CQE_WRID_MSN(*hw_cqe) != (wq->rq_rptr + 1)))) {
wq->error = 1;
hw_cqe->header |= htonl(V_CQE_STATUS(TPT_ERR_MSN));
goto proc_cqe;
}
goto proc_cqe;
}
/*
* If we get here its a send completion.
*
* Handle out of order completion. These get stuffed
* in the SW SQ. Then the SW SQ is walked to move any
* now in-order completions into the SW CQ. This handles
* 2 cases:
* 1) reaping unsignaled WRs when the first subsequent
* signaled WR is completed.
* 2) out of order read completions.
*/
if (!SW_CQE(*hw_cqe) && (CQE_WRID_SQ_WPTR(*hw_cqe) != wq->sq_rptr)) {
struct t3_swsq *sqp;
PDBG("%s out of order completion going in swsq at idx %ld\n",
__func__,
Q_PTR2IDX(CQE_WRID_SQ_WPTR(*hw_cqe), wq->sq_size_log2));
sqp = wq->sq +
Q_PTR2IDX(CQE_WRID_SQ_WPTR(*hw_cqe), wq->sq_size_log2);
sqp->cqe = *hw_cqe;
sqp->complete = 1;
ret = -1;
goto flush_wq;
}
proc_cqe:
*cqe = *hw_cqe;
/*
* Reap the associated WR(s) that are freed up with this
* completion.
*/
if (SQ_TYPE(*hw_cqe)) {
wq->sq_rptr = CQE_WRID_SQ_WPTR(*hw_cqe);
PDBG("%s completing sq idx %ld\n", __func__,
Q_PTR2IDX(wq->sq_rptr, wq->sq_size_log2));
*cookie = wq->sq[Q_PTR2IDX(wq->sq_rptr, wq->sq_size_log2)].wr_id;
wq->sq_rptr++;
} else {
PDBG("%s completing rq idx %ld\n", __func__,
Q_PTR2IDX(wq->rq_rptr, wq->rq_size_log2));
*cookie = wq->rq[Q_PTR2IDX(wq->rq_rptr, wq->rq_size_log2)].wr_id;
if (wq->rq[Q_PTR2IDX(wq->rq_rptr, wq->rq_size_log2)].pbl_addr)
cxio_hal_pblpool_free(wq->rdev,
wq->rq[Q_PTR2IDX(wq->rq_rptr,
wq->rq_size_log2)].pbl_addr, T3_STAG0_PBL_SIZE);
BUG_ON(Q_EMPTY(wq->rq_rptr, wq->rq_wptr));
wq->rq_rptr++;
}
flush_wq:
/*
* Flush any completed cqes that are now in-order.
*/
flush_completed_wrs(wq, cq);
skip_cqe:
if (SW_CQE(*hw_cqe)) {
PDBG("%s cq %p cqid 0x%x skip sw cqe sw_rptr 0x%x\n",
__func__, cq, cq->cqid, cq->sw_rptr);
++cq->sw_rptr;
} else {
PDBG("%s cq %p cqid 0x%x skip hw cqe rptr 0x%x\n",
__func__, cq, cq->cqid, cq->rptr);
++cq->rptr;
/*
* T3A: compute credits.
*/
if (((cq->rptr - cq->wptr) > (1 << (cq->size_log2 - 1)))
|| ((cq->rptr - cq->wptr) >= 128)) {
*credit = cq->rptr - cq->wptr;
cq->wptr = cq->rptr;
}
}
return ret;
}
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.