repo_name string | path string | copies string | size string | content string | license string |
|---|---|---|---|---|---|
emwno/android_kernel_U8500 | drivers/s390/cio/qdio_debug.c | 2643 | 8109 | /*
* drivers/s390/cio/qdio_debug.c
*
* Copyright IBM Corp. 2008,2009
*
* Author: Jan Glauber (jang@linux.vnet.ibm.com)
*/
#include <linux/seq_file.h>
#include <linux/debugfs.h>
#include <asm/debug.h>
#include "qdio_debug.h"
#include "qdio.h"
debug_info_t *qdio_dbf_setup;
debug_info_t *qdio_dbf_error;
static struct dentry *debugfs_root;
#define QDIO_DEBUGFS_NAME_LEN 10
void qdio_allocate_dbf(struct qdio_initialize *init_data,
struct qdio_irq *irq_ptr)
{
char text[20];
DBF_EVENT("qfmt:%1d", init_data->q_format);
DBF_HEX(init_data->adapter_name, 8);
DBF_EVENT("qpff%4x", init_data->qib_param_field_format);
DBF_HEX(&init_data->qib_param_field, sizeof(void *));
DBF_HEX(&init_data->input_slib_elements, sizeof(void *));
DBF_HEX(&init_data->output_slib_elements, sizeof(void *));
DBF_EVENT("niq:%1d noq:%1d", init_data->no_input_qs,
init_data->no_output_qs);
DBF_HEX(&init_data->input_handler, sizeof(void *));
DBF_HEX(&init_data->output_handler, sizeof(void *));
DBF_HEX(&init_data->int_parm, sizeof(long));
DBF_HEX(&init_data->input_sbal_addr_array, sizeof(void *));
DBF_HEX(&init_data->output_sbal_addr_array, sizeof(void *));
DBF_EVENT("irq:%8lx", (unsigned long)irq_ptr);
/* allocate trace view for the interface */
snprintf(text, 20, "qdio_%s", dev_name(&init_data->cdev->dev));
irq_ptr->debug_area = debug_register(text, 2, 1, 16);
debug_register_view(irq_ptr->debug_area, &debug_hex_ascii_view);
debug_set_level(irq_ptr->debug_area, DBF_WARN);
DBF_DEV_EVENT(DBF_ERR, irq_ptr, "dbf created");
}
static int qstat_show(struct seq_file *m, void *v)
{
unsigned char state;
struct qdio_q *q = m->private;
int i;
if (!q)
return 0;
seq_printf(m, "DSCI: %d nr_used: %d\n",
*(u32 *)q->irq_ptr->dsci, atomic_read(&q->nr_buf_used));
seq_printf(m, "ftc: %d last_move: %d\n",
q->first_to_check, q->last_move);
if (q->is_input_q) {
seq_printf(m, "polling: %d ack start: %d ack count: %d\n",
q->u.in.polling, q->u.in.ack_start,
q->u.in.ack_count);
seq_printf(m, "IRQs disabled: %u\n",
test_bit(QDIO_QUEUE_IRQS_DISABLED,
&q->u.in.queue_irq_state));
}
seq_printf(m, "SBAL states:\n");
seq_printf(m, "|0 |8 |16 |24 |32 |40 |48 |56 63|\n");
for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++) {
debug_get_buf_state(q, i, &state);
switch (state) {
case SLSB_P_INPUT_NOT_INIT:
case SLSB_P_OUTPUT_NOT_INIT:
seq_printf(m, "N");
break;
case SLSB_P_INPUT_PRIMED:
case SLSB_CU_OUTPUT_PRIMED:
seq_printf(m, "+");
break;
case SLSB_P_INPUT_ACK:
seq_printf(m, "A");
break;
case SLSB_P_INPUT_ERROR:
case SLSB_P_OUTPUT_ERROR:
seq_printf(m, "x");
break;
case SLSB_CU_INPUT_EMPTY:
case SLSB_P_OUTPUT_EMPTY:
seq_printf(m, "-");
break;
case SLSB_P_INPUT_HALTED:
case SLSB_P_OUTPUT_HALTED:
seq_printf(m, ".");
break;
default:
seq_printf(m, "?");
}
if (i == 63)
seq_printf(m, "\n");
}
seq_printf(m, "\n");
seq_printf(m, "|64 |72 |80 |88 |96 |104 |112 | 127|\n");
seq_printf(m, "\nSBAL statistics:");
if (!q->irq_ptr->perf_stat_enabled) {
seq_printf(m, " disabled\n");
return 0;
}
seq_printf(m, "\n1 2.. 4.. 8.. "
"16.. 32.. 64.. 127\n");
for (i = 0; i < ARRAY_SIZE(q->q_stats.nr_sbals); i++)
seq_printf(m, "%-10u ", q->q_stats.nr_sbals[i]);
seq_printf(m, "\nError NOP Total\n%-10u %-10u %-10u\n\n",
q->q_stats.nr_sbal_error, q->q_stats.nr_sbal_nop,
q->q_stats.nr_sbal_total);
return 0;
}
static int qstat_seq_open(struct inode *inode, struct file *filp)
{
return single_open(filp, qstat_show,
filp->f_path.dentry->d_inode->i_private);
}
static const struct file_operations debugfs_fops = {
.owner = THIS_MODULE,
.open = qstat_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static char *qperf_names[] = {
"Assumed adapter interrupts",
"QDIO interrupts",
"Requested PCIs",
"Inbound tasklet runs",
"Inbound tasklet resched",
"Inbound tasklet resched2",
"Outbound tasklet runs",
"SIGA read",
"SIGA write",
"SIGA sync",
"Inbound calls",
"Inbound handler",
"Inbound stop_polling",
"Inbound queue full",
"Outbound calls",
"Outbound handler",
"Outbound queue full",
"Outbound fast_requeue",
"Outbound target_full",
"QEBSM eqbs",
"QEBSM eqbs partial",
"QEBSM sqbs",
"QEBSM sqbs partial",
"Discarded interrupts"
};
static int qperf_show(struct seq_file *m, void *v)
{
struct qdio_irq *irq_ptr = m->private;
unsigned int *stat;
int i;
if (!irq_ptr)
return 0;
if (!irq_ptr->perf_stat_enabled) {
seq_printf(m, "disabled\n");
return 0;
}
stat = (unsigned int *)&irq_ptr->perf_stat;
for (i = 0; i < ARRAY_SIZE(qperf_names); i++)
seq_printf(m, "%26s:\t%u\n",
qperf_names[i], *(stat + i));
return 0;
}
static ssize_t qperf_seq_write(struct file *file, const char __user *ubuf,
size_t count, loff_t *off)
{
struct seq_file *seq = file->private_data;
struct qdio_irq *irq_ptr = seq->private;
struct qdio_q *q;
unsigned long val;
char buf[8];
int ret, i;
if (!irq_ptr)
return 0;
if (count >= sizeof(buf))
return -EINVAL;
if (copy_from_user(&buf, ubuf, count))
return -EFAULT;
buf[count] = 0;
ret = strict_strtoul(buf, 10, &val);
if (ret < 0)
return ret;
switch (val) {
case 0:
irq_ptr->perf_stat_enabled = 0;
memset(&irq_ptr->perf_stat, 0, sizeof(irq_ptr->perf_stat));
for_each_input_queue(irq_ptr, q, i)
memset(&q->q_stats, 0, sizeof(q->q_stats));
for_each_output_queue(irq_ptr, q, i)
memset(&q->q_stats, 0, sizeof(q->q_stats));
break;
case 1:
irq_ptr->perf_stat_enabled = 1;
break;
}
return count;
}
static int qperf_seq_open(struct inode *inode, struct file *filp)
{
return single_open(filp, qperf_show,
filp->f_path.dentry->d_inode->i_private);
}
static struct file_operations debugfs_perf_fops = {
.owner = THIS_MODULE,
.open = qperf_seq_open,
.read = seq_read,
.write = qperf_seq_write,
.llseek = seq_lseek,
.release = single_release,
};
static void setup_debugfs_entry(struct qdio_q *q, struct ccw_device *cdev)
{
char name[QDIO_DEBUGFS_NAME_LEN];
snprintf(name, QDIO_DEBUGFS_NAME_LEN, "%s_%d",
q->is_input_q ? "input" : "output",
q->nr);
q->debugfs_q = debugfs_create_file(name, S_IFREG | S_IRUGO | S_IWUSR,
q->irq_ptr->debugfs_dev, q, &debugfs_fops);
if (IS_ERR(q->debugfs_q))
q->debugfs_q = NULL;
}
void qdio_setup_debug_entries(struct qdio_irq *irq_ptr, struct ccw_device *cdev)
{
struct qdio_q *q;
int i;
irq_ptr->debugfs_dev = debugfs_create_dir(dev_name(&cdev->dev),
debugfs_root);
if (IS_ERR(irq_ptr->debugfs_dev))
irq_ptr->debugfs_dev = NULL;
irq_ptr->debugfs_perf = debugfs_create_file("statistics",
S_IFREG | S_IRUGO | S_IWUSR,
irq_ptr->debugfs_dev, irq_ptr,
&debugfs_perf_fops);
if (IS_ERR(irq_ptr->debugfs_perf))
irq_ptr->debugfs_perf = NULL;
for_each_input_queue(irq_ptr, q, i)
setup_debugfs_entry(q, cdev);
for_each_output_queue(irq_ptr, q, i)
setup_debugfs_entry(q, cdev);
}
void qdio_shutdown_debug_entries(struct qdio_irq *irq_ptr, struct ccw_device *cdev)
{
struct qdio_q *q;
int i;
for_each_input_queue(irq_ptr, q, i)
debugfs_remove(q->debugfs_q);
for_each_output_queue(irq_ptr, q, i)
debugfs_remove(q->debugfs_q);
debugfs_remove(irq_ptr->debugfs_perf);
debugfs_remove(irq_ptr->debugfs_dev);
}
int __init qdio_debug_init(void)
{
debugfs_root = debugfs_create_dir("qdio", NULL);
qdio_dbf_setup = debug_register("qdio_setup", 16, 1, 16);
debug_register_view(qdio_dbf_setup, &debug_hex_ascii_view);
debug_set_level(qdio_dbf_setup, DBF_INFO);
DBF_EVENT("dbf created\n");
qdio_dbf_error = debug_register("qdio_error", 4, 1, 16);
debug_register_view(qdio_dbf_error, &debug_hex_ascii_view);
debug_set_level(qdio_dbf_error, DBF_INFO);
DBF_ERROR("dbf created\n");
return 0;
}
void qdio_debug_exit(void)
{
debugfs_remove(debugfs_root);
if (qdio_dbf_setup)
debug_unregister(qdio_dbf_setup);
if (qdio_dbf_error)
debug_unregister(qdio_dbf_error);
}
| gpl-2.0 |
sktjdgns1189/android_kernel_samsung_c1skt | drivers/infiniband/hw/mthca/mthca_mr.c | 2643 | 24574 | /*
* Copyright (c) 2004 Topspin Communications. All rights reserved.
* Copyright (c) 2005 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/slab.h>
#include <linux/errno.h>
#include "mthca_dev.h"
#include "mthca_cmd.h"
#include "mthca_memfree.h"
struct mthca_mtt {
struct mthca_buddy *buddy;
int order;
u32 first_seg;
};
/*
* Must be packed because mtt_seg is 64 bits but only aligned to 32 bits.
*/
struct mthca_mpt_entry {
__be32 flags;
__be32 page_size;
__be32 key;
__be32 pd;
__be64 start;
__be64 length;
__be32 lkey;
__be32 window_count;
__be32 window_count_limit;
__be64 mtt_seg;
__be32 mtt_sz; /* Arbel only */
u32 reserved[2];
} __attribute__((packed));
#define MTHCA_MPT_FLAG_SW_OWNS (0xfUL << 28)
#define MTHCA_MPT_FLAG_MIO (1 << 17)
#define MTHCA_MPT_FLAG_BIND_ENABLE (1 << 15)
#define MTHCA_MPT_FLAG_PHYSICAL (1 << 9)
#define MTHCA_MPT_FLAG_REGION (1 << 8)
#define MTHCA_MTT_FLAG_PRESENT 1
#define MTHCA_MPT_STATUS_SW 0xF0
#define MTHCA_MPT_STATUS_HW 0x00
#define SINAI_FMR_KEY_INC 0x1000000
/*
* Buddy allocator for MTT segments (currently not very efficient
* since it doesn't keep a free list and just searches linearly
* through the bitmaps)
*/
static u32 mthca_buddy_alloc(struct mthca_buddy *buddy, int order)
{
int o;
int m;
u32 seg;
spin_lock(&buddy->lock);
for (o = order; o <= buddy->max_order; ++o)
if (buddy->num_free[o]) {
m = 1 << (buddy->max_order - o);
seg = find_first_bit(buddy->bits[o], m);
if (seg < m)
goto found;
}
spin_unlock(&buddy->lock);
return -1;
found:
clear_bit(seg, buddy->bits[o]);
--buddy->num_free[o];
while (o > order) {
--o;
seg <<= 1;
set_bit(seg ^ 1, buddy->bits[o]);
++buddy->num_free[o];
}
spin_unlock(&buddy->lock);
seg <<= order;
return seg;
}
static void mthca_buddy_free(struct mthca_buddy *buddy, u32 seg, int order)
{
seg >>= order;
spin_lock(&buddy->lock);
while (test_bit(seg ^ 1, buddy->bits[order])) {
clear_bit(seg ^ 1, buddy->bits[order]);
--buddy->num_free[order];
seg >>= 1;
++order;
}
set_bit(seg, buddy->bits[order]);
++buddy->num_free[order];
spin_unlock(&buddy->lock);
}
static int mthca_buddy_init(struct mthca_buddy *buddy, int max_order)
{
int i, s;
buddy->max_order = max_order;
spin_lock_init(&buddy->lock);
buddy->bits = kzalloc((buddy->max_order + 1) * sizeof (long *),
GFP_KERNEL);
buddy->num_free = kzalloc((buddy->max_order + 1) * sizeof (int *),
GFP_KERNEL);
if (!buddy->bits || !buddy->num_free)
goto err_out;
for (i = 0; i <= buddy->max_order; ++i) {
s = BITS_TO_LONGS(1 << (buddy->max_order - i));
buddy->bits[i] = kmalloc(s * sizeof (long), GFP_KERNEL);
if (!buddy->bits[i])
goto err_out_free;
bitmap_zero(buddy->bits[i],
1 << (buddy->max_order - i));
}
set_bit(0, buddy->bits[buddy->max_order]);
buddy->num_free[buddy->max_order] = 1;
return 0;
err_out_free:
for (i = 0; i <= buddy->max_order; ++i)
kfree(buddy->bits[i]);
err_out:
kfree(buddy->bits);
kfree(buddy->num_free);
return -ENOMEM;
}
static void mthca_buddy_cleanup(struct mthca_buddy *buddy)
{
int i;
for (i = 0; i <= buddy->max_order; ++i)
kfree(buddy->bits[i]);
kfree(buddy->bits);
kfree(buddy->num_free);
}
static u32 mthca_alloc_mtt_range(struct mthca_dev *dev, int order,
struct mthca_buddy *buddy)
{
u32 seg = mthca_buddy_alloc(buddy, order);
if (seg == -1)
return -1;
if (mthca_is_memfree(dev))
if (mthca_table_get_range(dev, dev->mr_table.mtt_table, seg,
seg + (1 << order) - 1)) {
mthca_buddy_free(buddy, seg, order);
seg = -1;
}
return seg;
}
static struct mthca_mtt *__mthca_alloc_mtt(struct mthca_dev *dev, int size,
struct mthca_buddy *buddy)
{
struct mthca_mtt *mtt;
int i;
if (size <= 0)
return ERR_PTR(-EINVAL);
mtt = kmalloc(sizeof *mtt, GFP_KERNEL);
if (!mtt)
return ERR_PTR(-ENOMEM);
mtt->buddy = buddy;
mtt->order = 0;
for (i = dev->limits.mtt_seg_size / 8; i < size; i <<= 1)
++mtt->order;
mtt->first_seg = mthca_alloc_mtt_range(dev, mtt->order, buddy);
if (mtt->first_seg == -1) {
kfree(mtt);
return ERR_PTR(-ENOMEM);
}
return mtt;
}
struct mthca_mtt *mthca_alloc_mtt(struct mthca_dev *dev, int size)
{
return __mthca_alloc_mtt(dev, size, &dev->mr_table.mtt_buddy);
}
void mthca_free_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt)
{
if (!mtt)
return;
mthca_buddy_free(mtt->buddy, mtt->first_seg, mtt->order);
mthca_table_put_range(dev, dev->mr_table.mtt_table,
mtt->first_seg,
mtt->first_seg + (1 << mtt->order) - 1);
kfree(mtt);
}
static int __mthca_write_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt,
int start_index, u64 *buffer_list, int list_len)
{
struct mthca_mailbox *mailbox;
__be64 *mtt_entry;
int err = 0;
u8 status;
int i;
mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
mtt_entry = mailbox->buf;
while (list_len > 0) {
mtt_entry[0] = cpu_to_be64(dev->mr_table.mtt_base +
mtt->first_seg * dev->limits.mtt_seg_size +
start_index * 8);
mtt_entry[1] = 0;
for (i = 0; i < list_len && i < MTHCA_MAILBOX_SIZE / 8 - 2; ++i)
mtt_entry[i + 2] = cpu_to_be64(buffer_list[i] |
MTHCA_MTT_FLAG_PRESENT);
/*
* If we have an odd number of entries to write, add
* one more dummy entry for firmware efficiency.
*/
if (i & 1)
mtt_entry[i + 2] = 0;
err = mthca_WRITE_MTT(dev, mailbox, (i + 1) & ~1, &status);
if (err) {
mthca_warn(dev, "WRITE_MTT failed (%d)\n", err);
goto out;
}
if (status) {
mthca_warn(dev, "WRITE_MTT returned status 0x%02x\n",
status);
err = -EINVAL;
goto out;
}
list_len -= i;
start_index += i;
buffer_list += i;
}
out:
mthca_free_mailbox(dev, mailbox);
return err;
}
int mthca_write_mtt_size(struct mthca_dev *dev)
{
if (dev->mr_table.fmr_mtt_buddy != &dev->mr_table.mtt_buddy ||
!(dev->mthca_flags & MTHCA_FLAG_FMR))
/*
* Be friendly to WRITE_MTT command
* and leave two empty slots for the
* index and reserved fields of the
* mailbox.
*/
return PAGE_SIZE / sizeof (u64) - 2;
/* For Arbel, all MTTs must fit in the same page. */
return mthca_is_memfree(dev) ? (PAGE_SIZE / sizeof (u64)) : 0x7ffffff;
}
static void mthca_tavor_write_mtt_seg(struct mthca_dev *dev,
struct mthca_mtt *mtt, int start_index,
u64 *buffer_list, int list_len)
{
u64 __iomem *mtts;
int i;
mtts = dev->mr_table.tavor_fmr.mtt_base + mtt->first_seg * dev->limits.mtt_seg_size +
start_index * sizeof (u64);
for (i = 0; i < list_len; ++i)
mthca_write64_raw(cpu_to_be64(buffer_list[i] | MTHCA_MTT_FLAG_PRESENT),
mtts + i);
}
static void mthca_arbel_write_mtt_seg(struct mthca_dev *dev,
struct mthca_mtt *mtt, int start_index,
u64 *buffer_list, int list_len)
{
__be64 *mtts;
dma_addr_t dma_handle;
int i;
int s = start_index * sizeof (u64);
/* For Arbel, all MTTs must fit in the same page. */
BUG_ON(s / PAGE_SIZE != (s + list_len * sizeof(u64) - 1) / PAGE_SIZE);
/* Require full segments */
BUG_ON(s % dev->limits.mtt_seg_size);
mtts = mthca_table_find(dev->mr_table.mtt_table, mtt->first_seg +
s / dev->limits.mtt_seg_size, &dma_handle);
BUG_ON(!mtts);
dma_sync_single_for_cpu(&dev->pdev->dev, dma_handle,
list_len * sizeof (u64), DMA_TO_DEVICE);
for (i = 0; i < list_len; ++i)
mtts[i] = cpu_to_be64(buffer_list[i] | MTHCA_MTT_FLAG_PRESENT);
dma_sync_single_for_device(&dev->pdev->dev, dma_handle,
list_len * sizeof (u64), DMA_TO_DEVICE);
}
int mthca_write_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt,
int start_index, u64 *buffer_list, int list_len)
{
int size = mthca_write_mtt_size(dev);
int chunk;
if (dev->mr_table.fmr_mtt_buddy != &dev->mr_table.mtt_buddy ||
!(dev->mthca_flags & MTHCA_FLAG_FMR))
return __mthca_write_mtt(dev, mtt, start_index, buffer_list, list_len);
while (list_len > 0) {
chunk = min(size, list_len);
if (mthca_is_memfree(dev))
mthca_arbel_write_mtt_seg(dev, mtt, start_index,
buffer_list, chunk);
else
mthca_tavor_write_mtt_seg(dev, mtt, start_index,
buffer_list, chunk);
list_len -= chunk;
start_index += chunk;
buffer_list += chunk;
}
return 0;
}
static inline u32 tavor_hw_index_to_key(u32 ind)
{
return ind;
}
static inline u32 tavor_key_to_hw_index(u32 key)
{
return key;
}
static inline u32 arbel_hw_index_to_key(u32 ind)
{
return (ind >> 24) | (ind << 8);
}
static inline u32 arbel_key_to_hw_index(u32 key)
{
return (key << 24) | (key >> 8);
}
static inline u32 hw_index_to_key(struct mthca_dev *dev, u32 ind)
{
if (mthca_is_memfree(dev))
return arbel_hw_index_to_key(ind);
else
return tavor_hw_index_to_key(ind);
}
static inline u32 key_to_hw_index(struct mthca_dev *dev, u32 key)
{
if (mthca_is_memfree(dev))
return arbel_key_to_hw_index(key);
else
return tavor_key_to_hw_index(key);
}
static inline u32 adjust_key(struct mthca_dev *dev, u32 key)
{
if (dev->mthca_flags & MTHCA_FLAG_SINAI_OPT)
return ((key << 20) & 0x800000) | (key & 0x7fffff);
else
return key;
}
int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
u64 iova, u64 total_size, u32 access, struct mthca_mr *mr)
{
struct mthca_mailbox *mailbox;
struct mthca_mpt_entry *mpt_entry;
u32 key;
int i;
int err;
u8 status;
WARN_ON(buffer_size_shift >= 32);
key = mthca_alloc(&dev->mr_table.mpt_alloc);
if (key == -1)
return -ENOMEM;
key = adjust_key(dev, key);
mr->ibmr.rkey = mr->ibmr.lkey = hw_index_to_key(dev, key);
if (mthca_is_memfree(dev)) {
err = mthca_table_get(dev, dev->mr_table.mpt_table, key);
if (err)
goto err_out_mpt_free;
}
mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
if (IS_ERR(mailbox)) {
err = PTR_ERR(mailbox);
goto err_out_table;
}
mpt_entry = mailbox->buf;
mpt_entry->flags = cpu_to_be32(MTHCA_MPT_FLAG_SW_OWNS |
MTHCA_MPT_FLAG_MIO |
MTHCA_MPT_FLAG_REGION |
access);
if (!mr->mtt)
mpt_entry->flags |= cpu_to_be32(MTHCA_MPT_FLAG_PHYSICAL);
mpt_entry->page_size = cpu_to_be32(buffer_size_shift - 12);
mpt_entry->key = cpu_to_be32(key);
mpt_entry->pd = cpu_to_be32(pd);
mpt_entry->start = cpu_to_be64(iova);
mpt_entry->length = cpu_to_be64(total_size);
memset(&mpt_entry->lkey, 0,
sizeof *mpt_entry - offsetof(struct mthca_mpt_entry, lkey));
if (mr->mtt)
mpt_entry->mtt_seg =
cpu_to_be64(dev->mr_table.mtt_base +
mr->mtt->first_seg * dev->limits.mtt_seg_size);
if (0) {
mthca_dbg(dev, "Dumping MPT entry %08x:\n", mr->ibmr.lkey);
for (i = 0; i < sizeof (struct mthca_mpt_entry) / 4; ++i) {
if (i % 4 == 0)
printk("[%02x] ", i * 4);
printk(" %08x", be32_to_cpu(((__be32 *) mpt_entry)[i]));
if ((i + 1) % 4 == 0)
printk("\n");
}
}
err = mthca_SW2HW_MPT(dev, mailbox,
key & (dev->limits.num_mpts - 1),
&status);
if (err) {
mthca_warn(dev, "SW2HW_MPT failed (%d)\n", err);
goto err_out_mailbox;
} else if (status) {
mthca_warn(dev, "SW2HW_MPT returned status 0x%02x\n",
status);
err = -EINVAL;
goto err_out_mailbox;
}
mthca_free_mailbox(dev, mailbox);
return err;
err_out_mailbox:
mthca_free_mailbox(dev, mailbox);
err_out_table:
mthca_table_put(dev, dev->mr_table.mpt_table, key);
err_out_mpt_free:
mthca_free(&dev->mr_table.mpt_alloc, key);
return err;
}
int mthca_mr_alloc_notrans(struct mthca_dev *dev, u32 pd,
u32 access, struct mthca_mr *mr)
{
mr->mtt = NULL;
return mthca_mr_alloc(dev, pd, 12, 0, ~0ULL, access, mr);
}
int mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
u64 *buffer_list, int buffer_size_shift,
int list_len, u64 iova, u64 total_size,
u32 access, struct mthca_mr *mr)
{
int err;
mr->mtt = mthca_alloc_mtt(dev, list_len);
if (IS_ERR(mr->mtt))
return PTR_ERR(mr->mtt);
err = mthca_write_mtt(dev, mr->mtt, 0, buffer_list, list_len);
if (err) {
mthca_free_mtt(dev, mr->mtt);
return err;
}
err = mthca_mr_alloc(dev, pd, buffer_size_shift, iova,
total_size, access, mr);
if (err)
mthca_free_mtt(dev, mr->mtt);
return err;
}
/* Free mr or fmr */
static void mthca_free_region(struct mthca_dev *dev, u32 lkey)
{
mthca_table_put(dev, dev->mr_table.mpt_table,
key_to_hw_index(dev, lkey));
mthca_free(&dev->mr_table.mpt_alloc, key_to_hw_index(dev, lkey));
}
void mthca_free_mr(struct mthca_dev *dev, struct mthca_mr *mr)
{
int err;
u8 status;
err = mthca_HW2SW_MPT(dev, NULL,
key_to_hw_index(dev, mr->ibmr.lkey) &
(dev->limits.num_mpts - 1),
&status);
if (err)
mthca_warn(dev, "HW2SW_MPT failed (%d)\n", err);
else if (status)
mthca_warn(dev, "HW2SW_MPT returned status 0x%02x\n",
status);
mthca_free_region(dev, mr->ibmr.lkey);
mthca_free_mtt(dev, mr->mtt);
}
int mthca_fmr_alloc(struct mthca_dev *dev, u32 pd,
u32 access, struct mthca_fmr *mr)
{
struct mthca_mpt_entry *mpt_entry;
struct mthca_mailbox *mailbox;
u64 mtt_seg;
u32 key, idx;
u8 status;
int list_len = mr->attr.max_pages;
int err = -ENOMEM;
int i;
if (mr->attr.page_shift < 12 || mr->attr.page_shift >= 32)
return -EINVAL;
/* For Arbel, all MTTs must fit in the same page. */
if (mthca_is_memfree(dev) &&
mr->attr.max_pages * sizeof *mr->mem.arbel.mtts > PAGE_SIZE)
return -EINVAL;
mr->maps = 0;
key = mthca_alloc(&dev->mr_table.mpt_alloc);
if (key == -1)
return -ENOMEM;
key = adjust_key(dev, key);
idx = key & (dev->limits.num_mpts - 1);
mr->ibmr.rkey = mr->ibmr.lkey = hw_index_to_key(dev, key);
if (mthca_is_memfree(dev)) {
err = mthca_table_get(dev, dev->mr_table.mpt_table, key);
if (err)
goto err_out_mpt_free;
mr->mem.arbel.mpt = mthca_table_find(dev->mr_table.mpt_table, key, NULL);
BUG_ON(!mr->mem.arbel.mpt);
} else
mr->mem.tavor.mpt = dev->mr_table.tavor_fmr.mpt_base +
sizeof *(mr->mem.tavor.mpt) * idx;
mr->mtt = __mthca_alloc_mtt(dev, list_len, dev->mr_table.fmr_mtt_buddy);
if (IS_ERR(mr->mtt)) {
err = PTR_ERR(mr->mtt);
goto err_out_table;
}
mtt_seg = mr->mtt->first_seg * dev->limits.mtt_seg_size;
if (mthca_is_memfree(dev)) {
mr->mem.arbel.mtts = mthca_table_find(dev->mr_table.mtt_table,
mr->mtt->first_seg,
&mr->mem.arbel.dma_handle);
BUG_ON(!mr->mem.arbel.mtts);
} else
mr->mem.tavor.mtts = dev->mr_table.tavor_fmr.mtt_base + mtt_seg;
mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
if (IS_ERR(mailbox)) {
err = PTR_ERR(mailbox);
goto err_out_free_mtt;
}
mpt_entry = mailbox->buf;
mpt_entry->flags = cpu_to_be32(MTHCA_MPT_FLAG_SW_OWNS |
MTHCA_MPT_FLAG_MIO |
MTHCA_MPT_FLAG_REGION |
access);
mpt_entry->page_size = cpu_to_be32(mr->attr.page_shift - 12);
mpt_entry->key = cpu_to_be32(key);
mpt_entry->pd = cpu_to_be32(pd);
memset(&mpt_entry->start, 0,
sizeof *mpt_entry - offsetof(struct mthca_mpt_entry, start));
mpt_entry->mtt_seg = cpu_to_be64(dev->mr_table.mtt_base + mtt_seg);
if (0) {
mthca_dbg(dev, "Dumping MPT entry %08x:\n", mr->ibmr.lkey);
for (i = 0; i < sizeof (struct mthca_mpt_entry) / 4; ++i) {
if (i % 4 == 0)
printk("[%02x] ", i * 4);
printk(" %08x", be32_to_cpu(((__be32 *) mpt_entry)[i]));
if ((i + 1) % 4 == 0)
printk("\n");
}
}
err = mthca_SW2HW_MPT(dev, mailbox,
key & (dev->limits.num_mpts - 1),
&status);
if (err) {
mthca_warn(dev, "SW2HW_MPT failed (%d)\n", err);
goto err_out_mailbox_free;
}
if (status) {
mthca_warn(dev, "SW2HW_MPT returned status 0x%02x\n",
status);
err = -EINVAL;
goto err_out_mailbox_free;
}
mthca_free_mailbox(dev, mailbox);
return 0;
err_out_mailbox_free:
mthca_free_mailbox(dev, mailbox);
err_out_free_mtt:
mthca_free_mtt(dev, mr->mtt);
err_out_table:
mthca_table_put(dev, dev->mr_table.mpt_table, key);
err_out_mpt_free:
mthca_free(&dev->mr_table.mpt_alloc, key);
return err;
}
int mthca_free_fmr(struct mthca_dev *dev, struct mthca_fmr *fmr)
{
if (fmr->maps)
return -EBUSY;
mthca_free_region(dev, fmr->ibmr.lkey);
mthca_free_mtt(dev, fmr->mtt);
return 0;
}
static inline int mthca_check_fmr(struct mthca_fmr *fmr, u64 *page_list,
int list_len, u64 iova)
{
int i, page_mask;
if (list_len > fmr->attr.max_pages)
return -EINVAL;
page_mask = (1 << fmr->attr.page_shift) - 1;
/* We are getting page lists, so va must be page aligned. */
if (iova & page_mask)
return -EINVAL;
/* Trust the user not to pass misaligned data in page_list */
if (0)
for (i = 0; i < list_len; ++i) {
if (page_list[i] & ~page_mask)
return -EINVAL;
}
if (fmr->maps >= fmr->attr.max_maps)
return -EINVAL;
return 0;
}
int mthca_tavor_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
int list_len, u64 iova)
{
struct mthca_fmr *fmr = to_mfmr(ibfmr);
struct mthca_dev *dev = to_mdev(ibfmr->device);
struct mthca_mpt_entry mpt_entry;
u32 key;
int i, err;
err = mthca_check_fmr(fmr, page_list, list_len, iova);
if (err)
return err;
++fmr->maps;
key = tavor_key_to_hw_index(fmr->ibmr.lkey);
key += dev->limits.num_mpts;
fmr->ibmr.lkey = fmr->ibmr.rkey = tavor_hw_index_to_key(key);
writeb(MTHCA_MPT_STATUS_SW, fmr->mem.tavor.mpt);
for (i = 0; i < list_len; ++i) {
__be64 mtt_entry = cpu_to_be64(page_list[i] |
MTHCA_MTT_FLAG_PRESENT);
mthca_write64_raw(mtt_entry, fmr->mem.tavor.mtts + i);
}
mpt_entry.lkey = cpu_to_be32(key);
mpt_entry.length = cpu_to_be64(list_len * (1ull << fmr->attr.page_shift));
mpt_entry.start = cpu_to_be64(iova);
__raw_writel((__force u32) mpt_entry.lkey, &fmr->mem.tavor.mpt->key);
memcpy_toio(&fmr->mem.tavor.mpt->start, &mpt_entry.start,
offsetof(struct mthca_mpt_entry, window_count) -
offsetof(struct mthca_mpt_entry, start));
writeb(MTHCA_MPT_STATUS_HW, fmr->mem.tavor.mpt);
return 0;
}
int mthca_arbel_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
int list_len, u64 iova)
{
struct mthca_fmr *fmr = to_mfmr(ibfmr);
struct mthca_dev *dev = to_mdev(ibfmr->device);
u32 key;
int i, err;
err = mthca_check_fmr(fmr, page_list, list_len, iova);
if (err)
return err;
++fmr->maps;
key = arbel_key_to_hw_index(fmr->ibmr.lkey);
if (dev->mthca_flags & MTHCA_FLAG_SINAI_OPT)
key += SINAI_FMR_KEY_INC;
else
key += dev->limits.num_mpts;
fmr->ibmr.lkey = fmr->ibmr.rkey = arbel_hw_index_to_key(key);
*(u8 *) fmr->mem.arbel.mpt = MTHCA_MPT_STATUS_SW;
wmb();
dma_sync_single_for_cpu(&dev->pdev->dev, fmr->mem.arbel.dma_handle,
list_len * sizeof(u64), DMA_TO_DEVICE);
for (i = 0; i < list_len; ++i)
fmr->mem.arbel.mtts[i] = cpu_to_be64(page_list[i] |
MTHCA_MTT_FLAG_PRESENT);
dma_sync_single_for_device(&dev->pdev->dev, fmr->mem.arbel.dma_handle,
list_len * sizeof(u64), DMA_TO_DEVICE);
fmr->mem.arbel.mpt->key = cpu_to_be32(key);
fmr->mem.arbel.mpt->lkey = cpu_to_be32(key);
fmr->mem.arbel.mpt->length = cpu_to_be64(list_len * (1ull << fmr->attr.page_shift));
fmr->mem.arbel.mpt->start = cpu_to_be64(iova);
wmb();
*(u8 *) fmr->mem.arbel.mpt = MTHCA_MPT_STATUS_HW;
wmb();
return 0;
}
void mthca_tavor_fmr_unmap(struct mthca_dev *dev, struct mthca_fmr *fmr)
{
if (!fmr->maps)
return;
fmr->maps = 0;
writeb(MTHCA_MPT_STATUS_SW, fmr->mem.tavor.mpt);
}
void mthca_arbel_fmr_unmap(struct mthca_dev *dev, struct mthca_fmr *fmr)
{
if (!fmr->maps)
return;
fmr->maps = 0;
*(u8 *) fmr->mem.arbel.mpt = MTHCA_MPT_STATUS_SW;
}
int mthca_init_mr_table(struct mthca_dev *dev)
{
phys_addr_t addr;
int mpts, mtts, err, i;
err = mthca_alloc_init(&dev->mr_table.mpt_alloc,
dev->limits.num_mpts,
~0, dev->limits.reserved_mrws);
if (err)
return err;
if (!mthca_is_memfree(dev) &&
(dev->mthca_flags & MTHCA_FLAG_DDR_HIDDEN))
dev->limits.fmr_reserved_mtts = 0;
else
dev->mthca_flags |= MTHCA_FLAG_FMR;
if (dev->mthca_flags & MTHCA_FLAG_SINAI_OPT)
mthca_dbg(dev, "Memory key throughput optimization activated.\n");
err = mthca_buddy_init(&dev->mr_table.mtt_buddy,
fls(dev->limits.num_mtt_segs - 1));
if (err)
goto err_mtt_buddy;
dev->mr_table.tavor_fmr.mpt_base = NULL;
dev->mr_table.tavor_fmr.mtt_base = NULL;
if (dev->limits.fmr_reserved_mtts) {
i = fls(dev->limits.fmr_reserved_mtts - 1);
if (i >= 31) {
mthca_warn(dev, "Unable to reserve 2^31 FMR MTTs.\n");
err = -EINVAL;
goto err_fmr_mpt;
}
mpts = mtts = 1 << i;
} else {
mtts = dev->limits.num_mtt_segs;
mpts = dev->limits.num_mpts;
}
if (!mthca_is_memfree(dev) &&
(dev->mthca_flags & MTHCA_FLAG_FMR)) {
addr = pci_resource_start(dev->pdev, 4) +
((pci_resource_len(dev->pdev, 4) - 1) &
dev->mr_table.mpt_base);
dev->mr_table.tavor_fmr.mpt_base =
ioremap(addr, mpts * sizeof(struct mthca_mpt_entry));
if (!dev->mr_table.tavor_fmr.mpt_base) {
mthca_warn(dev, "MPT ioremap for FMR failed.\n");
err = -ENOMEM;
goto err_fmr_mpt;
}
addr = pci_resource_start(dev->pdev, 4) +
((pci_resource_len(dev->pdev, 4) - 1) &
dev->mr_table.mtt_base);
dev->mr_table.tavor_fmr.mtt_base =
ioremap(addr, mtts * dev->limits.mtt_seg_size);
if (!dev->mr_table.tavor_fmr.mtt_base) {
mthca_warn(dev, "MTT ioremap for FMR failed.\n");
err = -ENOMEM;
goto err_fmr_mtt;
}
}
if (dev->limits.fmr_reserved_mtts) {
err = mthca_buddy_init(&dev->mr_table.tavor_fmr.mtt_buddy, fls(mtts - 1));
if (err)
goto err_fmr_mtt_buddy;
/* Prevent regular MRs from using FMR keys */
err = mthca_buddy_alloc(&dev->mr_table.mtt_buddy, fls(mtts - 1));
if (err)
goto err_reserve_fmr;
dev->mr_table.fmr_mtt_buddy =
&dev->mr_table.tavor_fmr.mtt_buddy;
} else
dev->mr_table.fmr_mtt_buddy = &dev->mr_table.mtt_buddy;
/* FMR table is always the first, take reserved MTTs out of there */
if (dev->limits.reserved_mtts) {
i = fls(dev->limits.reserved_mtts - 1);
if (mthca_alloc_mtt_range(dev, i,
dev->mr_table.fmr_mtt_buddy) == -1) {
mthca_warn(dev, "MTT table of order %d is too small.\n",
dev->mr_table.fmr_mtt_buddy->max_order);
err = -ENOMEM;
goto err_reserve_mtts;
}
}
return 0;
err_reserve_mtts:
err_reserve_fmr:
if (dev->limits.fmr_reserved_mtts)
mthca_buddy_cleanup(&dev->mr_table.tavor_fmr.mtt_buddy);
err_fmr_mtt_buddy:
if (dev->mr_table.tavor_fmr.mtt_base)
iounmap(dev->mr_table.tavor_fmr.mtt_base);
err_fmr_mtt:
if (dev->mr_table.tavor_fmr.mpt_base)
iounmap(dev->mr_table.tavor_fmr.mpt_base);
err_fmr_mpt:
mthca_buddy_cleanup(&dev->mr_table.mtt_buddy);
err_mtt_buddy:
mthca_alloc_cleanup(&dev->mr_table.mpt_alloc);
return err;
}
void mthca_cleanup_mr_table(struct mthca_dev *dev)
{
/* XXX check if any MRs are still allocated? */
if (dev->limits.fmr_reserved_mtts)
mthca_buddy_cleanup(&dev->mr_table.tavor_fmr.mtt_buddy);
mthca_buddy_cleanup(&dev->mr_table.mtt_buddy);
if (dev->mr_table.tavor_fmr.mtt_base)
iounmap(dev->mr_table.tavor_fmr.mtt_base);
if (dev->mr_table.tavor_fmr.mpt_base)
iounmap(dev->mr_table.tavor_fmr.mpt_base);
mthca_alloc_cleanup(&dev->mr_table.mpt_alloc);
}
| gpl-2.0 |
MattCrystal/shiny-octo-happiness | arch/x86/mm/srat.c | 4691 | 4594 | /*
* ACPI 3.0 based NUMA setup
* Copyright 2004 Andi Kleen, SuSE Labs.
*
* Reads the ACPI SRAT table to figure out what memory belongs to which CPUs.
*
* Called from acpi_numa_init while reading the SRAT and SLIT tables.
* Assumes all memory regions belonging to a single proximity domain
* are in one chunk. Holes between them will be included in the node.
*/
#include <linux/kernel.h>
#include <linux/acpi.h>
#include <linux/mmzone.h>
#include <linux/bitmap.h>
#include <linux/module.h>
#include <linux/topology.h>
#include <linux/bootmem.h>
#include <linux/memblock.h>
#include <linux/mm.h>
#include <asm/proto.h>
#include <asm/numa.h>
#include <asm/e820.h>
#include <asm/apic.h>
#include <asm/uv/uv.h>
int acpi_numa __initdata;
static __init int setup_node(int pxm)
{
return acpi_map_pxm_to_node(pxm);
}
static __init void bad_srat(void)
{
printk(KERN_ERR "SRAT: SRAT not used.\n");
acpi_numa = -1;
}
static __init inline int srat_disabled(void)
{
return acpi_numa < 0;
}
/* Callback for SLIT parsing */
void __init acpi_numa_slit_init(struct acpi_table_slit *slit)
{
int i, j;
for (i = 0; i < slit->locality_count; i++)
for (j = 0; j < slit->locality_count; j++)
numa_set_distance(pxm_to_node(i), pxm_to_node(j),
slit->entry[slit->locality_count * i + j]);
}
/* Callback for Proximity Domain -> x2APIC mapping */
void __init
acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa)
{
int pxm, node;
int apic_id;
if (srat_disabled())
return;
if (pa->header.length < sizeof(struct acpi_srat_x2apic_cpu_affinity)) {
bad_srat();
return;
}
if ((pa->flags & ACPI_SRAT_CPU_ENABLED) == 0)
return;
pxm = pa->proximity_domain;
apic_id = pa->apic_id;
if (!apic->apic_id_valid(apic_id)) {
printk(KERN_INFO "SRAT: PXM %u -> X2APIC 0x%04x ignored\n",
pxm, apic_id);
return;
}
node = setup_node(pxm);
if (node < 0) {
printk(KERN_ERR "SRAT: Too many proximity domains %x\n", pxm);
bad_srat();
return;
}
if (apic_id >= MAX_LOCAL_APIC) {
printk(KERN_INFO "SRAT: PXM %u -> APIC 0x%04x -> Node %u skipped apicid that is too big\n", pxm, apic_id, node);
return;
}
set_apicid_to_node(apic_id, node);
node_set(node, numa_nodes_parsed);
acpi_numa = 1;
printk(KERN_INFO "SRAT: PXM %u -> APIC 0x%04x -> Node %u\n",
pxm, apic_id, node);
}
/* Callback for Proximity Domain -> LAPIC mapping */
void __init
acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa)
{
int pxm, node;
int apic_id;
if (srat_disabled())
return;
if (pa->header.length != sizeof(struct acpi_srat_cpu_affinity)) {
bad_srat();
return;
}
if ((pa->flags & ACPI_SRAT_CPU_ENABLED) == 0)
return;
pxm = pa->proximity_domain_lo;
if (acpi_srat_revision >= 2)
pxm |= *((unsigned int*)pa->proximity_domain_hi) << 8;
node = setup_node(pxm);
if (node < 0) {
printk(KERN_ERR "SRAT: Too many proximity domains %x\n", pxm);
bad_srat();
return;
}
if (get_uv_system_type() >= UV_X2APIC)
apic_id = (pa->apic_id << 8) | pa->local_sapic_eid;
else
apic_id = pa->apic_id;
if (apic_id >= MAX_LOCAL_APIC) {
printk(KERN_INFO "SRAT: PXM %u -> APIC 0x%02x -> Node %u skipped apicid that is too big\n", pxm, apic_id, node);
return;
}
set_apicid_to_node(apic_id, node);
node_set(node, numa_nodes_parsed);
acpi_numa = 1;
printk(KERN_INFO "SRAT: PXM %u -> APIC 0x%02x -> Node %u\n",
pxm, apic_id, node);
}
#ifdef CONFIG_MEMORY_HOTPLUG
static inline int save_add_info(void) {return 1;}
#else
static inline int save_add_info(void) {return 0;}
#endif
/* Callback for parsing of the Proximity Domain <-> Memory Area mappings */
void __init
acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
{
u64 start, end;
int node, pxm;
if (srat_disabled())
return;
if (ma->header.length != sizeof(struct acpi_srat_mem_affinity)) {
bad_srat();
return;
}
if ((ma->flags & ACPI_SRAT_MEM_ENABLED) == 0)
return;
if ((ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) && !save_add_info())
return;
start = ma->base_address;
end = start + ma->length;
pxm = ma->proximity_domain;
if (acpi_srat_revision <= 1)
pxm &= 0xff;
node = setup_node(pxm);
if (node < 0) {
printk(KERN_ERR "SRAT: Too many proximity domains.\n");
bad_srat();
return;
}
if (numa_add_memblk(node, start, end) < 0) {
bad_srat();
return;
}
printk(KERN_INFO "SRAT: Node %u PXM %u %Lx-%Lx\n", node, pxm,
start, end);
}
void __init acpi_numa_arch_fixup(void) {}
int __init x86_acpi_numa_init(void)
{
int ret;
ret = acpi_numa_init();
if (ret < 0)
return ret;
return srat_disabled() ? -EINVAL : 0;
}
| gpl-2.0 |
patrickhwood/linux | drivers/usb/renesas_usbhs/pipe.c | 4947 | 14943 | /*
* Renesas USB driver
*
* Copyright (C) 2011 Renesas Solutions Corp.
* Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
#include <linux/delay.h>
#include <linux/slab.h>
#include "./common.h"
#include "./pipe.h"
/*
* macros
*/
#define usbhsp_addr_offset(p) ((usbhs_pipe_number(p) - 1) * 2)
#define usbhsp_flags_set(p, f) ((p)->flags |= USBHS_PIPE_FLAGS_##f)
#define usbhsp_flags_clr(p, f) ((p)->flags &= ~USBHS_PIPE_FLAGS_##f)
#define usbhsp_flags_has(p, f) ((p)->flags & USBHS_PIPE_FLAGS_##f)
#define usbhsp_flags_init(p) do {(p)->flags = 0; } while (0)
/*
* for debug
*/
static char *usbhsp_pipe_name[] = {
[USB_ENDPOINT_XFER_CONTROL] = "DCP",
[USB_ENDPOINT_XFER_BULK] = "BULK",
[USB_ENDPOINT_XFER_INT] = "INT",
[USB_ENDPOINT_XFER_ISOC] = "ISO",
};
char *usbhs_pipe_name(struct usbhs_pipe *pipe)
{
return usbhsp_pipe_name[usbhs_pipe_type(pipe)];
}
/*
* DCPCTR/PIPEnCTR functions
*/
static void usbhsp_pipectrl_set(struct usbhs_pipe *pipe, u16 mask, u16 val)
{
struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
int offset = usbhsp_addr_offset(pipe);
if (usbhs_pipe_is_dcp(pipe))
usbhs_bset(priv, DCPCTR, mask, val);
else
usbhs_bset(priv, PIPEnCTR + offset, mask, val);
}
static u16 usbhsp_pipectrl_get(struct usbhs_pipe *pipe)
{
struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
int offset = usbhsp_addr_offset(pipe);
if (usbhs_pipe_is_dcp(pipe))
return usbhs_read(priv, DCPCTR);
else
return usbhs_read(priv, PIPEnCTR + offset);
}
/*
* DCP/PIPE functions
*/
static void __usbhsp_pipe_xxx_set(struct usbhs_pipe *pipe,
u16 dcp_reg, u16 pipe_reg,
u16 mask, u16 val)
{
struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
if (usbhs_pipe_is_dcp(pipe))
usbhs_bset(priv, dcp_reg, mask, val);
else
usbhs_bset(priv, pipe_reg, mask, val);
}
/*
* DCPCFG/PIPECFG functions
*/
static void usbhsp_pipe_cfg_set(struct usbhs_pipe *pipe, u16 mask, u16 val)
{
__usbhsp_pipe_xxx_set(pipe, DCPCFG, PIPECFG, mask, val);
}
/*
* PIPEBUF
*/
static void usbhsp_pipe_buf_set(struct usbhs_pipe *pipe, u16 mask, u16 val)
{
if (usbhs_pipe_is_dcp(pipe))
return;
__usbhsp_pipe_xxx_set(pipe, 0, PIPEBUF, mask, val);
}
/*
* DCPMAXP/PIPEMAXP
*/
static void usbhsp_pipe_maxp_set(struct usbhs_pipe *pipe, u16 mask, u16 val)
{
__usbhsp_pipe_xxx_set(pipe, DCPMAXP, PIPEMAXP, mask, val);
}
/*
* pipe control functions
*/
static void usbhsp_pipe_select(struct usbhs_pipe *pipe)
{
struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
/*
* On pipe, this is necessary before
* accesses to below registers.
*
* PIPESEL : usbhsp_pipe_select
* PIPECFG : usbhsp_pipe_cfg_xxx
* PIPEBUF : usbhsp_pipe_buf_xxx
* PIPEMAXP : usbhsp_pipe_maxp_xxx
* PIPEPERI
*/
/*
* if pipe is dcp, no pipe is selected.
* it is no problem, because dcp have its register
*/
usbhs_write(priv, PIPESEL, 0xF & usbhs_pipe_number(pipe));
}
static int usbhsp_pipe_barrier(struct usbhs_pipe *pipe)
{
struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
int timeout = 1024;
u16 val;
/*
* make sure....
*
* Modify these bits when CSSTS = 0, PID = NAK, and no pipe number is
* specified by the CURPIPE bits.
* When changing the setting of this bit after changing
* the PID bits for the selected pipe from BUF to NAK,
* check that CSSTS = 0 and PBUSY = 0.
*/
/*
* CURPIPE bit = 0
*
* see also
* "Operation"
* - "Pipe Control"
* - "Pipe Control Registers Switching Procedure"
*/
usbhs_write(priv, CFIFOSEL, 0);
usbhs_pipe_disable(pipe);
do {
val = usbhsp_pipectrl_get(pipe);
val &= CSSTS | PID_MASK;
if (!val)
return 0;
udelay(10);
} while (timeout--);
return -EBUSY;
}
int usbhs_pipe_is_accessible(struct usbhs_pipe *pipe)
{
u16 val;
val = usbhsp_pipectrl_get(pipe);
if (val & BSTS)
return 0;
return -EBUSY;
}
/*
* PID ctrl
*/
static void __usbhsp_pid_try_nak_if_stall(struct usbhs_pipe *pipe)
{
u16 pid = usbhsp_pipectrl_get(pipe);
pid &= PID_MASK;
/*
* see
* "Pipe n Control Register" - "PID"
*/
switch (pid) {
case PID_STALL11:
usbhsp_pipectrl_set(pipe, PID_MASK, PID_STALL10);
/* fall-through */
case PID_STALL10:
usbhsp_pipectrl_set(pipe, PID_MASK, PID_NAK);
}
}
void usbhs_pipe_disable(struct usbhs_pipe *pipe)
{
int timeout = 1024;
u16 val;
/* see "Pipe n Control Register" - "PID" */
__usbhsp_pid_try_nak_if_stall(pipe);
usbhsp_pipectrl_set(pipe, PID_MASK, PID_NAK);
do {
val = usbhsp_pipectrl_get(pipe);
val &= PBUSY;
if (!val)
break;
udelay(10);
} while (timeout--);
}
void usbhs_pipe_enable(struct usbhs_pipe *pipe)
{
/* see "Pipe n Control Register" - "PID" */
__usbhsp_pid_try_nak_if_stall(pipe);
usbhsp_pipectrl_set(pipe, PID_MASK, PID_BUF);
}
void usbhs_pipe_stall(struct usbhs_pipe *pipe)
{
u16 pid = usbhsp_pipectrl_get(pipe);
pid &= PID_MASK;
/*
* see
* "Pipe n Control Register" - "PID"
*/
switch (pid) {
case PID_NAK:
usbhsp_pipectrl_set(pipe, PID_MASK, PID_STALL10);
break;
case PID_BUF:
usbhsp_pipectrl_set(pipe, PID_MASK, PID_STALL11);
break;
}
}
int usbhs_pipe_is_stall(struct usbhs_pipe *pipe)
{
u16 pid = usbhsp_pipectrl_get(pipe) & PID_MASK;
return (int)(pid == PID_STALL10 || pid == PID_STALL11);
}
/*
* pipe setup
*/
static int usbhsp_possible_double_buffer(struct usbhs_pipe *pipe)
{
/*
* only ISO / BULK pipe can use double buffer
*/
if (usbhs_pipe_type_is(pipe, USB_ENDPOINT_XFER_BULK) ||
usbhs_pipe_type_is(pipe, USB_ENDPOINT_XFER_ISOC))
return 1;
return 0;
}
static u16 usbhsp_setup_pipecfg(struct usbhs_pipe *pipe,
int is_host,
int dir_in)
{
u16 type = 0;
u16 bfre = 0;
u16 dblb = 0;
u16 cntmd = 0;
u16 dir = 0;
u16 epnum = 0;
u16 shtnak = 0;
u16 type_array[] = {
[USB_ENDPOINT_XFER_BULK] = TYPE_BULK,
[USB_ENDPOINT_XFER_INT] = TYPE_INT,
[USB_ENDPOINT_XFER_ISOC] = TYPE_ISO,
};
int is_double = usbhsp_possible_double_buffer(pipe);
if (usbhs_pipe_is_dcp(pipe))
return -EINVAL;
/*
* PIPECFG
*
* see
* - "Register Descriptions" - "PIPECFG" register
* - "Features" - "Pipe configuration"
* - "Operation" - "Pipe Control"
*/
/* TYPE */
type = type_array[usbhs_pipe_type(pipe)];
/* BFRE */
if (usbhs_pipe_type_is(pipe, USB_ENDPOINT_XFER_ISOC) ||
usbhs_pipe_type_is(pipe, USB_ENDPOINT_XFER_BULK))
bfre = 0; /* FIXME */
/* DBLB */
if (usbhs_pipe_type_is(pipe, USB_ENDPOINT_XFER_ISOC) ||
usbhs_pipe_type_is(pipe, USB_ENDPOINT_XFER_BULK))
dblb = (is_double) ? DBLB : 0;
/* CNTMD */
if (usbhs_pipe_type_is(pipe, USB_ENDPOINT_XFER_BULK))
cntmd = 0; /* FIXME */
/* DIR */
if (dir_in)
usbhsp_flags_set(pipe, IS_DIR_HOST);
if (!!is_host ^ !!dir_in)
dir |= DIR_OUT;
if (!dir)
usbhsp_flags_set(pipe, IS_DIR_IN);
/* SHTNAK */
if (usbhs_pipe_type_is(pipe, USB_ENDPOINT_XFER_BULK) &&
!dir)
shtnak = SHTNAK;
/* EPNUM */
epnum = 0; /* see usbhs_pipe_config_update() */
return type |
bfre |
dblb |
cntmd |
dir |
shtnak |
epnum;
}
static u16 usbhsp_setup_pipebuff(struct usbhs_pipe *pipe)
{
struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
struct usbhs_pipe_info *info = usbhs_priv_to_pipeinfo(priv);
struct device *dev = usbhs_priv_to_dev(priv);
int pipe_num = usbhs_pipe_number(pipe);
int is_double = usbhsp_possible_double_buffer(pipe);
u16 buff_size;
u16 bufnmb;
u16 bufnmb_cnt;
/*
* PIPEBUF
*
* see
* - "Register Descriptions" - "PIPEBUF" register
* - "Features" - "Pipe configuration"
* - "Operation" - "FIFO Buffer Memory"
* - "Operation" - "Pipe Control"
*
* ex) if pipe6 - pipe9 are USB_ENDPOINT_XFER_INT (SH7724)
*
* BUFNMB: PIPE
* 0: pipe0 (DCP 256byte)
* 1: -
* 2: -
* 3: -
* 4: pipe6 (INT 64byte)
* 5: pipe7 (INT 64byte)
* 6: pipe8 (INT 64byte)
* 7: pipe9 (INT 64byte)
* 8 - xx: free (for BULK, ISOC)
*/
/*
* FIXME
*
* it doesn't have good buffer allocator
*
* DCP : 256 byte
* BULK: 512 byte
* INT : 64 byte
* ISOC: 512 byte
*/
if (usbhs_pipe_type_is(pipe, USB_ENDPOINT_XFER_CONTROL))
buff_size = 256;
else if (usbhs_pipe_type_is(pipe, USB_ENDPOINT_XFER_INT))
buff_size = 64;
else
buff_size = 512;
/* change buff_size to register value */
bufnmb_cnt = (buff_size / 64) - 1;
/* BUFNMB has been reserved for INT pipe
* see above */
if (usbhs_pipe_type_is(pipe, USB_ENDPOINT_XFER_INT)) {
bufnmb = pipe_num - 2;
} else {
bufnmb = info->bufnmb_last;
info->bufnmb_last += bufnmb_cnt + 1;
/*
* double buffer
*/
if (is_double)
info->bufnmb_last += bufnmb_cnt + 1;
}
dev_dbg(dev, "pipe : %d : buff_size 0x%x: bufnmb 0x%x\n",
pipe_num, buff_size, bufnmb);
return (0x1f & bufnmb_cnt) << 10 |
(0xff & bufnmb) << 0;
}
void usbhs_pipe_config_update(struct usbhs_pipe *pipe, u16 devsel,
u16 epnum, u16 maxp)
{
if (devsel > 0xA) {
struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
struct device *dev = usbhs_priv_to_dev(priv);
dev_err(dev, "devsel error %d\n", devsel);
devsel = 0;
}
usbhsp_pipe_barrier(pipe);
pipe->maxp = maxp;
usbhsp_pipe_select(pipe);
usbhsp_pipe_maxp_set(pipe, 0xFFFF,
(devsel << 12) |
maxp);
if (!usbhs_pipe_is_dcp(pipe))
usbhsp_pipe_cfg_set(pipe, 0x000F, epnum);
}
/*
* pipe control
*/
int usbhs_pipe_get_maxpacket(struct usbhs_pipe *pipe)
{
/*
* see
* usbhs_pipe_config_update()
* usbhs_dcp_malloc()
*/
return pipe->maxp;
}
int usbhs_pipe_is_dir_in(struct usbhs_pipe *pipe)
{
return usbhsp_flags_has(pipe, IS_DIR_IN);
}
int usbhs_pipe_is_dir_host(struct usbhs_pipe *pipe)
{
return usbhsp_flags_has(pipe, IS_DIR_HOST);
}
void usbhs_pipe_data_sequence(struct usbhs_pipe *pipe, int sequence)
{
u16 mask = (SQCLR | SQSET);
u16 val;
/*
* sequence
* 0 : data0
* 1 : data1
* -1 : no change
*/
switch (sequence) {
case 0:
val = SQCLR;
break;
case 1:
val = SQSET;
break;
default:
return;
}
usbhsp_pipectrl_set(pipe, mask, val);
}
void usbhs_pipe_clear(struct usbhs_pipe *pipe)
{
usbhsp_pipectrl_set(pipe, ACLRM, ACLRM);
usbhsp_pipectrl_set(pipe, ACLRM, 0);
}
static struct usbhs_pipe *usbhsp_get_pipe(struct usbhs_priv *priv, u32 type)
{
struct usbhs_pipe *pos, *pipe;
int i;
/*
* find target pipe
*/
pipe = NULL;
usbhs_for_each_pipe_with_dcp(pos, priv, i) {
if (!usbhs_pipe_type_is(pos, type))
continue;
if (usbhsp_flags_has(pos, IS_USED))
continue;
pipe = pos;
break;
}
if (!pipe)
return NULL;
/*
* initialize pipe flags
*/
usbhsp_flags_init(pipe);
usbhsp_flags_set(pipe, IS_USED);
return pipe;
}
void usbhs_pipe_init(struct usbhs_priv *priv,
int (*dma_map_ctrl)(struct usbhs_pkt *pkt, int map))
{
struct usbhs_pipe_info *info = usbhs_priv_to_pipeinfo(priv);
struct usbhs_pipe *pipe;
int i;
/*
* FIXME
*
* driver needs good allocator.
*
* find first free buffer area (BULK, ISOC)
* (DCP, INT area is fixed)
*
* buffer number 0 - 3 have been reserved for DCP
* see
* usbhsp_to_bufnmb
*/
info->bufnmb_last = 4;
usbhs_for_each_pipe_with_dcp(pipe, priv, i) {
if (usbhs_pipe_type_is(pipe, USB_ENDPOINT_XFER_INT))
info->bufnmb_last++;
usbhsp_flags_init(pipe);
pipe->fifo = NULL;
pipe->mod_private = NULL;
INIT_LIST_HEAD(&pipe->list);
/* pipe force init */
usbhs_pipe_clear(pipe);
}
info->dma_map_ctrl = dma_map_ctrl;
}
struct usbhs_pipe *usbhs_pipe_malloc(struct usbhs_priv *priv,
int endpoint_type,
int dir_in)
{
struct device *dev = usbhs_priv_to_dev(priv);
struct usbhs_pipe *pipe;
int is_host = usbhs_mod_is_host(priv);
int ret;
u16 pipecfg, pipebuf;
pipe = usbhsp_get_pipe(priv, endpoint_type);
if (!pipe) {
dev_err(dev, "can't get pipe (%s)\n",
usbhsp_pipe_name[endpoint_type]);
return NULL;
}
INIT_LIST_HEAD(&pipe->list);
usbhs_pipe_disable(pipe);
/* make sure pipe is not busy */
ret = usbhsp_pipe_barrier(pipe);
if (ret < 0) {
dev_err(dev, "pipe setup failed %d\n", usbhs_pipe_number(pipe));
return NULL;
}
pipecfg = usbhsp_setup_pipecfg(pipe, is_host, dir_in);
pipebuf = usbhsp_setup_pipebuff(pipe);
usbhsp_pipe_select(pipe);
usbhsp_pipe_cfg_set(pipe, 0xFFFF, pipecfg);
usbhsp_pipe_buf_set(pipe, 0xFFFF, pipebuf);
usbhs_pipe_sequence_data0(pipe);
dev_dbg(dev, "enable pipe %d : %s (%s)\n",
usbhs_pipe_number(pipe),
usbhs_pipe_name(pipe),
usbhs_pipe_is_dir_in(pipe) ? "in" : "out");
/*
* epnum / maxp are still not set to this pipe.
* call usbhs_pipe_config_update() after this function !!
*/
return pipe;
}
void usbhs_pipe_select_fifo(struct usbhs_pipe *pipe, struct usbhs_fifo *fifo)
{
if (pipe->fifo)
pipe->fifo->pipe = NULL;
pipe->fifo = fifo;
if (fifo)
fifo->pipe = pipe;
}
/*
* dcp control
*/
struct usbhs_pipe *usbhs_dcp_malloc(struct usbhs_priv *priv)
{
struct usbhs_pipe *pipe;
pipe = usbhsp_get_pipe(priv, USB_ENDPOINT_XFER_CONTROL);
if (!pipe)
return NULL;
INIT_LIST_HEAD(&pipe->list);
/*
* call usbhs_pipe_config_update() after this function !!
*/
return pipe;
}
void usbhs_dcp_control_transfer_done(struct usbhs_pipe *pipe)
{
struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
WARN_ON(!usbhs_pipe_is_dcp(pipe));
usbhs_pipe_enable(pipe);
if (!usbhs_mod_is_host(priv)) /* funconly */
usbhsp_pipectrl_set(pipe, CCPL, CCPL);
}
void usbhs_dcp_dir_for_host(struct usbhs_pipe *pipe, int dir_out)
{
usbhsp_pipe_cfg_set(pipe, DIR_OUT,
dir_out ? DIR_OUT : 0);
}
/*
* pipe module function
*/
int usbhs_pipe_probe(struct usbhs_priv *priv)
{
struct usbhs_pipe_info *info = usbhs_priv_to_pipeinfo(priv);
struct usbhs_pipe *pipe;
struct device *dev = usbhs_priv_to_dev(priv);
u32 *pipe_type = usbhs_get_dparam(priv, pipe_type);
int pipe_size = usbhs_get_dparam(priv, pipe_size);
int i;
/* This driver expects 1st pipe is DCP */
if (pipe_type[0] != USB_ENDPOINT_XFER_CONTROL) {
dev_err(dev, "1st PIPE is not DCP\n");
return -EINVAL;
}
info->pipe = kzalloc(sizeof(struct usbhs_pipe) * pipe_size, GFP_KERNEL);
if (!info->pipe) {
dev_err(dev, "Could not allocate pipe\n");
return -ENOMEM;
}
info->size = pipe_size;
/*
* init pipe
*/
usbhs_for_each_pipe_with_dcp(pipe, priv, i) {
pipe->priv = priv;
usbhs_pipe_type(pipe) =
pipe_type[i] & USB_ENDPOINT_XFERTYPE_MASK;
dev_dbg(dev, "pipe %x\t: %s\n",
i, usbhsp_pipe_name[pipe_type[i]]);
}
return 0;
}
void usbhs_pipe_remove(struct usbhs_priv *priv)
{
struct usbhs_pipe_info *info = usbhs_priv_to_pipeinfo(priv);
kfree(info->pipe);
}
| gpl-2.0 |
ivanich/senny_kernel-3.4 | drivers/input/touchscreen/ad7879-spi.c | 4947 | 3770 | /*
* AD7879/AD7889 touchscreen (SPI bus)
*
* Copyright (C) 2008-2010 Michael Hennerich, Analog Devices Inc.
*
* Licensed under the GPL-2 or later.
*/
#include <linux/input.h> /* BUS_SPI */
#include <linux/pm.h>
#include <linux/spi/spi.h>
#include <linux/module.h>
#include "ad7879.h"
#define AD7879_DEVID 0x7A /* AD7879/AD7889 */
#define MAX_SPI_FREQ_HZ 5000000
#define AD7879_CMD_MAGIC 0xE000
#define AD7879_CMD_READ (1 << 10)
#define AD7879_CMD(reg) (AD7879_CMD_MAGIC | ((reg) & 0xF))
#define AD7879_WRITECMD(reg) (AD7879_CMD(reg))
#define AD7879_READCMD(reg) (AD7879_CMD(reg) | AD7879_CMD_READ)
/*
* ad7879_read/write are only used for initial setup and for sysfs controls.
* The main traffic is done in ad7879_collect().
*/
static int ad7879_spi_xfer(struct spi_device *spi,
u16 cmd, u8 count, u16 *tx_buf, u16 *rx_buf)
{
struct spi_message msg;
struct spi_transfer *xfers;
void *spi_data;
u16 *command;
u16 *_rx_buf = _rx_buf; /* shut gcc up */
u8 idx;
int ret;
xfers = spi_data = kzalloc(sizeof(*xfers) * (count + 2), GFP_KERNEL);
if (!spi_data)
return -ENOMEM;
spi_message_init(&msg);
command = spi_data;
command[0] = cmd;
if (count == 1) {
/* ad7879_spi_{read,write} gave us buf on stack */
command[1] = *tx_buf;
tx_buf = &command[1];
_rx_buf = rx_buf;
rx_buf = &command[2];
}
++xfers;
xfers[0].tx_buf = command;
xfers[0].len = 2;
spi_message_add_tail(&xfers[0], &msg);
++xfers;
for (idx = 0; idx < count; ++idx) {
if (rx_buf)
xfers[idx].rx_buf = &rx_buf[idx];
if (tx_buf)
xfers[idx].tx_buf = &tx_buf[idx];
xfers[idx].len = 2;
spi_message_add_tail(&xfers[idx], &msg);
}
ret = spi_sync(spi, &msg);
if (count == 1)
_rx_buf[0] = command[2];
kfree(spi_data);
return ret;
}
static int ad7879_spi_multi_read(struct device *dev,
u8 first_reg, u8 count, u16 *buf)
{
struct spi_device *spi = to_spi_device(dev);
return ad7879_spi_xfer(spi, AD7879_READCMD(first_reg), count, NULL, buf);
}
static int ad7879_spi_read(struct device *dev, u8 reg)
{
struct spi_device *spi = to_spi_device(dev);
u16 ret, dummy;
return ad7879_spi_xfer(spi, AD7879_READCMD(reg), 1, &dummy, &ret) ? : ret;
}
static int ad7879_spi_write(struct device *dev, u8 reg, u16 val)
{
struct spi_device *spi = to_spi_device(dev);
u16 dummy;
return ad7879_spi_xfer(spi, AD7879_WRITECMD(reg), 1, &val, &dummy);
}
static const struct ad7879_bus_ops ad7879_spi_bus_ops = {
.bustype = BUS_SPI,
.read = ad7879_spi_read,
.multi_read = ad7879_spi_multi_read,
.write = ad7879_spi_write,
};
static int __devinit ad7879_spi_probe(struct spi_device *spi)
{
struct ad7879 *ts;
int err;
/* don't exceed max specified SPI CLK frequency */
if (spi->max_speed_hz > MAX_SPI_FREQ_HZ) {
dev_err(&spi->dev, "SPI CLK %d Hz?\n", spi->max_speed_hz);
return -EINVAL;
}
spi->bits_per_word = 16;
err = spi_setup(spi);
if (err) {
dev_dbg(&spi->dev, "spi master doesn't support 16 bits/word\n");
return err;
}
ts = ad7879_probe(&spi->dev, AD7879_DEVID, spi->irq, &ad7879_spi_bus_ops);
if (IS_ERR(ts))
return PTR_ERR(ts);
spi_set_drvdata(spi, ts);
return 0;
}
static int __devexit ad7879_spi_remove(struct spi_device *spi)
{
struct ad7879 *ts = spi_get_drvdata(spi);
ad7879_remove(ts);
spi_set_drvdata(spi, NULL);
return 0;
}
static struct spi_driver ad7879_spi_driver = {
.driver = {
.name = "ad7879",
.owner = THIS_MODULE,
.pm = &ad7879_pm_ops,
},
.probe = ad7879_spi_probe,
.remove = __devexit_p(ad7879_spi_remove),
};
module_spi_driver(ad7879_spi_driver);
MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
MODULE_DESCRIPTION("AD7879(-1) touchscreen SPI bus driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("spi:ad7879");
| gpl-2.0 |
NamelessRom/android_kernel_xiaomi_armani | drivers/net/phy/smsc.c | 4947 | 5943 | /*
* drivers/net/phy/smsc.c
*
* Driver for SMSC PHYs
*
* Author: Herbert Valerio Riedel
*
* Copyright (c) 2006 Herbert Valerio Riedel <hvr@gnu.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* Support added for SMSC LAN8187 and LAN8700 by steve.glendinning@smsc.com
*
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mii.h>
#include <linux/ethtool.h>
#include <linux/phy.h>
#include <linux/netdevice.h>
#include <linux/smscphy.h>
static int smsc_phy_config_intr(struct phy_device *phydev)
{
int rc = phy_write (phydev, MII_LAN83C185_IM,
((PHY_INTERRUPT_ENABLED == phydev->interrupts)
? MII_LAN83C185_ISF_INT_PHYLIB_EVENTS
: 0));
return rc < 0 ? rc : 0;
}
static int smsc_phy_ack_interrupt(struct phy_device *phydev)
{
int rc = phy_read (phydev, MII_LAN83C185_ISF);
return rc < 0 ? rc : 0;
}
static int smsc_phy_config_init(struct phy_device *phydev)
{
int rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS);
if (rc < 0)
return rc;
/* Enable energy detect mode for this SMSC Transceivers */
rc = phy_write(phydev, MII_LAN83C185_CTRL_STATUS,
rc | MII_LAN83C185_EDPWRDOWN);
if (rc < 0)
return rc;
return smsc_phy_ack_interrupt (phydev);
}
static int lan911x_config_init(struct phy_device *phydev)
{
return smsc_phy_ack_interrupt(phydev);
}
static struct phy_driver lan83c185_driver = {
.phy_id = 0x0007c0a0, /* OUI=0x00800f, Model#=0x0a */
.phy_id_mask = 0xfffffff0,
.name = "SMSC LAN83C185",
.features = (PHY_BASIC_FEATURES | SUPPORTED_Pause
| SUPPORTED_Asym_Pause),
.flags = PHY_HAS_INTERRUPT | PHY_HAS_MAGICANEG,
/* basic functions */
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
.config_init = smsc_phy_config_init,
/* IRQ related */
.ack_interrupt = smsc_phy_ack_interrupt,
.config_intr = smsc_phy_config_intr,
.suspend = genphy_suspend,
.resume = genphy_resume,
.driver = { .owner = THIS_MODULE, }
};
static struct phy_driver lan8187_driver = {
.phy_id = 0x0007c0b0, /* OUI=0x00800f, Model#=0x0b */
.phy_id_mask = 0xfffffff0,
.name = "SMSC LAN8187",
.features = (PHY_BASIC_FEATURES | SUPPORTED_Pause
| SUPPORTED_Asym_Pause),
.flags = PHY_HAS_INTERRUPT | PHY_HAS_MAGICANEG,
/* basic functions */
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
.config_init = smsc_phy_config_init,
/* IRQ related */
.ack_interrupt = smsc_phy_ack_interrupt,
.config_intr = smsc_phy_config_intr,
.suspend = genphy_suspend,
.resume = genphy_resume,
.driver = { .owner = THIS_MODULE, }
};
static struct phy_driver lan8700_driver = {
.phy_id = 0x0007c0c0, /* OUI=0x00800f, Model#=0x0c */
.phy_id_mask = 0xfffffff0,
.name = "SMSC LAN8700",
.features = (PHY_BASIC_FEATURES | SUPPORTED_Pause
| SUPPORTED_Asym_Pause),
.flags = PHY_HAS_INTERRUPT | PHY_HAS_MAGICANEG,
/* basic functions */
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
.config_init = smsc_phy_config_init,
/* IRQ related */
.ack_interrupt = smsc_phy_ack_interrupt,
.config_intr = smsc_phy_config_intr,
.suspend = genphy_suspend,
.resume = genphy_resume,
.driver = { .owner = THIS_MODULE, }
};
static struct phy_driver lan911x_int_driver = {
.phy_id = 0x0007c0d0, /* OUI=0x00800f, Model#=0x0d */
.phy_id_mask = 0xfffffff0,
.name = "SMSC LAN911x Internal PHY",
.features = (PHY_BASIC_FEATURES | SUPPORTED_Pause
| SUPPORTED_Asym_Pause),
.flags = PHY_HAS_INTERRUPT | PHY_HAS_MAGICANEG,
/* basic functions */
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
.config_init = lan911x_config_init,
/* IRQ related */
.ack_interrupt = smsc_phy_ack_interrupt,
.config_intr = smsc_phy_config_intr,
.suspend = genphy_suspend,
.resume = genphy_resume,
.driver = { .owner = THIS_MODULE, }
};
static struct phy_driver lan8710_driver = {
.phy_id = 0x0007c0f0, /* OUI=0x00800f, Model#=0x0f */
.phy_id_mask = 0xfffffff0,
.name = "SMSC LAN8710/LAN8720",
.features = (PHY_BASIC_FEATURES | SUPPORTED_Pause
| SUPPORTED_Asym_Pause),
.flags = PHY_HAS_INTERRUPT | PHY_HAS_MAGICANEG,
/* basic functions */
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
.config_init = smsc_phy_config_init,
/* IRQ related */
.ack_interrupt = smsc_phy_ack_interrupt,
.config_intr = smsc_phy_config_intr,
.suspend = genphy_suspend,
.resume = genphy_resume,
.driver = { .owner = THIS_MODULE, }
};
static int __init smsc_init(void)
{
int ret;
ret = phy_driver_register (&lan83c185_driver);
if (ret)
goto err1;
ret = phy_driver_register (&lan8187_driver);
if (ret)
goto err2;
ret = phy_driver_register (&lan8700_driver);
if (ret)
goto err3;
ret = phy_driver_register (&lan911x_int_driver);
if (ret)
goto err4;
ret = phy_driver_register (&lan8710_driver);
if (ret)
goto err5;
return 0;
err5:
phy_driver_unregister (&lan911x_int_driver);
err4:
phy_driver_unregister (&lan8700_driver);
err3:
phy_driver_unregister (&lan8187_driver);
err2:
phy_driver_unregister (&lan83c185_driver);
err1:
return ret;
}
static void __exit smsc_exit(void)
{
phy_driver_unregister (&lan8710_driver);
phy_driver_unregister (&lan911x_int_driver);
phy_driver_unregister (&lan8700_driver);
phy_driver_unregister (&lan8187_driver);
phy_driver_unregister (&lan83c185_driver);
}
MODULE_DESCRIPTION("SMSC PHY driver");
MODULE_AUTHOR("Herbert Valerio Riedel");
MODULE_LICENSE("GPL");
module_init(smsc_init);
module_exit(smsc_exit);
static struct mdio_device_id __maybe_unused smsc_tbl[] = {
{ 0x0007c0a0, 0xfffffff0 },
{ 0x0007c0b0, 0xfffffff0 },
{ 0x0007c0c0, 0xfffffff0 },
{ 0x0007c0d0, 0xfffffff0 },
{ 0x0007c0f0, 0xfffffff0 },
{ }
};
MODULE_DEVICE_TABLE(mdio, smsc_tbl);
| gpl-2.0 |
davidmueller13/davidskernel_lt03lte_tw_5.1 | drivers/staging/line6/pod.c | 4947 | 37089 | /*
* Line6 Linux USB driver - 0.9.1beta
*
* Copyright (C) 2004-2010 Markus Grabner (grabner@icg.tugraz.at)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation, version 2.
*
*/
#include <linux/slab.h>
#include <linux/wait.h>
#include <sound/control.h>
#include "audio.h"
#include "capture.h"
#include "control.h"
#include "driver.h"
#include "playback.h"
#include "pod.h"
#define POD_SYSEX_CODE 3
#define POD_BYTES_PER_FRAME 6 /* 24bit audio (stereo) */
/* *INDENT-OFF* */
enum {
POD_SYSEX_CLIP = 0x0f,
POD_SYSEX_SAVE = 0x24,
POD_SYSEX_SYSTEM = 0x56,
POD_SYSEX_SYSTEMREQ = 0x57,
/* POD_SYSEX_UPDATE = 0x6c, */ /* software update! */
POD_SYSEX_STORE = 0x71,
POD_SYSEX_FINISH = 0x72,
POD_SYSEX_DUMPMEM = 0x73,
POD_SYSEX_DUMP = 0x74,
POD_SYSEX_DUMPREQ = 0x75
/* POD_SYSEX_DUMPMEM2 = 0x76 */ /* dumps entire internal memory of PODxt Pro */
};
enum {
POD_monitor_level = 0x04,
POD_routing = 0x05,
POD_tuner_mute = 0x13,
POD_tuner_freq = 0x15,
POD_tuner_note = 0x16,
POD_tuner_pitch = 0x17,
POD_system_invalid = 0x10000
};
/* *INDENT-ON* */
enum {
POD_DUMP_MEMORY = 2
};
enum {
POD_BUSY_READ,
POD_BUSY_WRITE,
POD_CHANNEL_DIRTY,
POD_SAVE_PRESSED,
POD_BUSY_MIDISEND
};
static struct snd_ratden pod_ratden = {
.num_min = 78125,
.num_max = 78125,
.num_step = 1,
.den = 2
};
static struct line6_pcm_properties pod_pcm_properties = {
.snd_line6_playback_hw = {
.info = (SNDRV_PCM_INFO_MMAP |
SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_BLOCK_TRANSFER |
SNDRV_PCM_INFO_MMAP_VALID |
SNDRV_PCM_INFO_PAUSE |
#ifdef CONFIG_PM
SNDRV_PCM_INFO_RESUME |
#endif
SNDRV_PCM_INFO_SYNC_START),
.formats = SNDRV_PCM_FMTBIT_S24_3LE,
.rates = SNDRV_PCM_RATE_KNOT,
.rate_min = 39062,
.rate_max = 39063,
.channels_min = 2,
.channels_max = 2,
.buffer_bytes_max = 60000,
.period_bytes_min = 64,
.period_bytes_max = 8192,
.periods_min = 1,
.periods_max = 1024},
.snd_line6_capture_hw = {
.info = (SNDRV_PCM_INFO_MMAP |
SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_BLOCK_TRANSFER |
SNDRV_PCM_INFO_MMAP_VALID |
#ifdef CONFIG_PM
SNDRV_PCM_INFO_RESUME |
#endif
SNDRV_PCM_INFO_SYNC_START),
.formats = SNDRV_PCM_FMTBIT_S24_3LE,
.rates = SNDRV_PCM_RATE_KNOT,
.rate_min = 39062,
.rate_max = 39063,
.channels_min = 2,
.channels_max = 2,
.buffer_bytes_max = 60000,
.period_bytes_min = 64,
.period_bytes_max = 8192,
.periods_min = 1,
.periods_max = 1024},
.snd_line6_rates = {
.nrats = 1,
.rats = &pod_ratden},
.bytes_per_frame = POD_BYTES_PER_FRAME
};
static const char pod_request_channel[] = {
0xf0, 0x00, 0x01, 0x0c, 0x03, 0x75, 0xf7
};
static const char pod_version_header[] = {
0xf2, 0x7e, 0x7f, 0x06, 0x02
};
/* forward declarations: */
static void pod_startup2(unsigned long data);
static void pod_startup3(struct usb_line6_pod *pod);
static void pod_startup4(struct usb_line6_pod *pod);
/*
Mark all parameters as dirty and notify waiting processes.
*/
static void pod_mark_batch_all_dirty(struct usb_line6_pod *pod)
{
int i;
for (i = 0; i < POD_CONTROL_SIZE; i++)
set_bit(i, pod->param_dirty);
}
static char *pod_alloc_sysex_buffer(struct usb_line6_pod *pod, int code,
int size)
{
return line6_alloc_sysex_buffer(&pod->line6, POD_SYSEX_CODE, code,
size);
}
/*
Send channel dump data to the PODxt Pro.
*/
static void pod_dump(struct usb_line6_pod *pod, const unsigned char *data)
{
int size = 1 + sizeof(pod->prog_data);
char *sysex = pod_alloc_sysex_buffer(pod, POD_SYSEX_DUMP, size);
if (!sysex)
return;
/* Don't know what this is good for, but PODxt Pro transmits it, so we
* also do... */
sysex[SYSEX_DATA_OFS] = 5;
memcpy(sysex + SYSEX_DATA_OFS + 1, data, sizeof(pod->prog_data));
line6_send_sysex_message(&pod->line6, sysex, size);
memcpy(&pod->prog_data, data, sizeof(pod->prog_data));
pod_mark_batch_all_dirty(pod);
kfree(sysex);
}
/*
Store parameter value in driver memory and mark it as dirty.
*/
static void pod_store_parameter(struct usb_line6_pod *pod, int param, int value)
{
pod->prog_data.control[param] = value;
set_bit(param, pod->param_dirty);
pod->dirty = 1;
}
/*
Handle SAVE button.
*/
static void pod_save_button_pressed(struct usb_line6_pod *pod, int type,
int index)
{
pod->dirty = 0;
set_bit(POD_SAVE_PRESSED, &pod->atomic_flags);
}
/*
Process a completely received message.
*/
void line6_pod_process_message(struct usb_line6_pod *pod)
{
const unsigned char *buf = pod->line6.buffer_message;
/* filter messages by type */
switch (buf[0] & 0xf0) {
case LINE6_PARAM_CHANGE:
case LINE6_PROGRAM_CHANGE:
case LINE6_SYSEX_BEGIN:
break; /* handle these further down */
default:
return; /* ignore all others */
}
/* process all remaining messages */
switch (buf[0]) {
case LINE6_PARAM_CHANGE | LINE6_CHANNEL_DEVICE:
pod_store_parameter(pod, buf[1], buf[2]);
/* intentionally no break here! */
case LINE6_PARAM_CHANGE | LINE6_CHANNEL_HOST:
if ((buf[1] == POD_amp_model_setup) ||
(buf[1] == POD_effect_setup))
/* these also affect other settings */
line6_dump_request_async(&pod->dumpreq, &pod->line6, 0,
LINE6_DUMP_CURRENT);
break;
case LINE6_PROGRAM_CHANGE | LINE6_CHANNEL_DEVICE:
case LINE6_PROGRAM_CHANGE | LINE6_CHANNEL_HOST:
pod->channel_num = buf[1];
pod->dirty = 0;
set_bit(POD_CHANNEL_DIRTY, &pod->atomic_flags);
line6_dump_request_async(&pod->dumpreq, &pod->line6, 0,
LINE6_DUMP_CURRENT);
break;
case LINE6_SYSEX_BEGIN | LINE6_CHANNEL_DEVICE:
case LINE6_SYSEX_BEGIN | LINE6_CHANNEL_UNKNOWN:
if (memcmp(buf + 1, line6_midi_id, sizeof(line6_midi_id)) == 0) {
switch (buf[5]) {
case POD_SYSEX_DUMP:
if (pod->line6.message_length ==
sizeof(pod->prog_data) + 7) {
switch (pod->dumpreq.in_progress) {
case LINE6_DUMP_CURRENT:
memcpy(&pod->prog_data, buf + 7,
sizeof(pod->prog_data));
pod_mark_batch_all_dirty(pod);
break;
case POD_DUMP_MEMORY:
memcpy(&pod->prog_data_buf,
buf + 7,
sizeof
(pod->prog_data_buf));
break;
default:
DEBUG_MESSAGES(dev_err
(pod->
line6.ifcdev,
"unknown dump code %02X\n",
pod->
dumpreq.in_progress));
}
line6_dump_finished(&pod->dumpreq);
pod_startup3(pod);
} else
DEBUG_MESSAGES(dev_err
(pod->line6.ifcdev,
"wrong size of channel dump message (%d instead of %d)\n",
pod->
line6.message_length,
(int)
sizeof(pod->prog_data) +
7));
break;
case POD_SYSEX_SYSTEM:{
short value =
((int)buf[7] << 12) | ((int)buf[8]
<< 8) |
((int)buf[9] << 4) | (int)buf[10];
#define PROCESS_SYSTEM_PARAM(x) \
case POD_ ## x: \
pod->x.value = value; \
wake_up(&pod->x.wait); \
break;
switch (buf[6]) {
PROCESS_SYSTEM_PARAM
(monitor_level);
PROCESS_SYSTEM_PARAM(routing);
PROCESS_SYSTEM_PARAM
(tuner_mute);
PROCESS_SYSTEM_PARAM
(tuner_freq);
PROCESS_SYSTEM_PARAM
(tuner_note);
PROCESS_SYSTEM_PARAM
(tuner_pitch);
#undef PROCESS_SYSTEM_PARAM
default:
DEBUG_MESSAGES(dev_err
(pod->
line6.ifcdev,
"unknown tuner/system response %02X\n",
buf[6]));
}
break;
}
case POD_SYSEX_FINISH:
/* do we need to respond to this? */
break;
case POD_SYSEX_SAVE:
pod_save_button_pressed(pod, buf[6], buf[7]);
break;
case POD_SYSEX_CLIP:
DEBUG_MESSAGES(dev_err
(pod->line6.ifcdev,
"audio clipped\n"));
pod->clipping.value = 1;
wake_up(&pod->clipping.wait);
break;
case POD_SYSEX_STORE:
DEBUG_MESSAGES(dev_err
(pod->line6.ifcdev,
"message %02X not yet implemented\n",
buf[5]));
break;
default:
DEBUG_MESSAGES(dev_err
(pod->line6.ifcdev,
"unknown sysex message %02X\n",
buf[5]));
}
} else
if (memcmp
(buf, pod_version_header,
sizeof(pod_version_header)) == 0) {
pod->firmware_version =
buf[13] * 100 + buf[14] * 10 + buf[15];
pod->device_id =
((int)buf[8] << 16) | ((int)buf[9] << 8) | (int)
buf[10];
pod_startup4(pod);
} else
DEBUG_MESSAGES(dev_err
(pod->line6.ifcdev,
"unknown sysex header\n"));
break;
case LINE6_SYSEX_END:
break;
default:
DEBUG_MESSAGES(dev_err
(pod->line6.ifcdev,
"POD: unknown message %02X\n", buf[0]));
}
}
/*
Detect some cases that require a channel dump after sending a command to the
device. Important notes:
*) The actual dump request can not be sent here since we are not allowed to
wait for the completion of the first message in this context, and sending
the dump request before completion of the previous message leaves the POD
in an undefined state. The dump request will be sent when the echoed
commands are received.
*) This method fails if a param change message is "chopped" after the first
byte.
*/
void line6_pod_midi_postprocess(struct usb_line6_pod *pod, unsigned char *data,
int length)
{
int i;
if (!pod->midi_postprocess)
return;
for (i = 0; i < length; ++i) {
if (data[i] == (LINE6_PROGRAM_CHANGE | LINE6_CHANNEL_HOST)) {
line6_invalidate_current(&pod->dumpreq);
break;
} else
if ((data[i] == (LINE6_PARAM_CHANGE | LINE6_CHANNEL_HOST))
&& (i < length - 1))
if ((data[i + 1] == POD_amp_model_setup)
|| (data[i + 1] == POD_effect_setup)) {
line6_invalidate_current(&pod->dumpreq);
break;
}
}
}
/*
Send channel number (i.e., switch to a different sound).
*/
static void pod_send_channel(struct usb_line6_pod *pod, int value)
{
line6_invalidate_current(&pod->dumpreq);
if (line6_send_program(&pod->line6, value) == 0)
pod->channel_num = value;
else
line6_dump_finished(&pod->dumpreq);
}
/*
Transmit PODxt Pro control parameter.
*/
void line6_pod_transmit_parameter(struct usb_line6_pod *pod, int param,
int value)
{
if (line6_transmit_parameter(&pod->line6, param, value) == 0)
pod_store_parameter(pod, param, value);
if ((param == POD_amp_model_setup) || (param == POD_effect_setup)) /* these also affect other settings */
line6_invalidate_current(&pod->dumpreq);
}
/*
Resolve value to memory location.
*/
static int pod_resolve(const char *buf, short block0, short block1,
unsigned char *location)
{
unsigned long value;
short block;
int ret;
ret = strict_strtoul(buf, 10, &value);
if (ret)
return ret;
block = (value < 0x40) ? block0 : block1;
value &= 0x3f;
location[0] = block >> 7;
location[1] = value | (block & 0x7f);
return 0;
}
/*
Send command to store channel/effects setup/amp setup to PODxt Pro.
*/
static ssize_t pod_send_store_command(struct device *dev, const char *buf,
size_t count, short block0, short block1)
{
struct usb_interface *interface = to_usb_interface(dev);
struct usb_line6_pod *pod = usb_get_intfdata(interface);
int ret;
int size = 3 + sizeof(pod->prog_data_buf);
char *sysex = pod_alloc_sysex_buffer(pod, POD_SYSEX_STORE, size);
if (!sysex)
return 0;
sysex[SYSEX_DATA_OFS] = 5; /* see pod_dump() */
ret = pod_resolve(buf, block0, block1, sysex + SYSEX_DATA_OFS + 1);
if (ret) {
kfree(sysex);
return ret;
}
memcpy(sysex + SYSEX_DATA_OFS + 3, &pod->prog_data_buf,
sizeof(pod->prog_data_buf));
line6_send_sysex_message(&pod->line6, sysex, size);
kfree(sysex);
/* needs some delay here on AMD64 platform */
return count;
}
/*
Send command to retrieve channel/effects setup/amp setup to PODxt Pro.
*/
static ssize_t pod_send_retrieve_command(struct device *dev, const char *buf,
size_t count, short block0,
short block1)
{
struct usb_interface *interface = to_usb_interface(dev);
struct usb_line6_pod *pod = usb_get_intfdata(interface);
int ret;
int size = 4;
char *sysex = pod_alloc_sysex_buffer(pod, POD_SYSEX_DUMPMEM, size);
if (!sysex)
return 0;
ret = pod_resolve(buf, block0, block1, sysex + SYSEX_DATA_OFS);
if (ret) {
kfree(sysex);
return ret;
}
sysex[SYSEX_DATA_OFS + 2] = 0;
sysex[SYSEX_DATA_OFS + 3] = 0;
line6_dump_started(&pod->dumpreq, POD_DUMP_MEMORY);
if (line6_send_sysex_message(&pod->line6, sysex, size) < size)
line6_dump_finished(&pod->dumpreq);
kfree(sysex);
/* needs some delay here on AMD64 platform */
return count;
}
/*
Generic get name function.
*/
static ssize_t get_name_generic(struct usb_line6_pod *pod, const char *str,
char *buf)
{
int length = 0;
const char *p1;
char *p2;
char *last_non_space = buf;
int retval = line6_dump_wait_interruptible(&pod->dumpreq);
if (retval < 0)
return retval;
for (p1 = str, p2 = buf; *p1; ++p1, ++p2) {
*p2 = *p1;
if (*p2 != ' ')
last_non_space = p2;
if (++length == POD_NAME_LENGTH)
break;
}
*(last_non_space + 1) = '\n';
return last_non_space - buf + 2;
}
/*
"read" request on "channel" special file.
*/
static ssize_t pod_get_channel(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct usb_interface *interface = to_usb_interface(dev);
struct usb_line6_pod *pod = usb_get_intfdata(interface);
return sprintf(buf, "%d\n", pod->channel_num);
}
/*
"write" request on "channel" special file.
*/
static ssize_t pod_set_channel(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct usb_interface *interface = to_usb_interface(dev);
struct usb_line6_pod *pod = usb_get_intfdata(interface);
unsigned long value;
int ret;
ret = strict_strtoul(buf, 10, &value);
if (ret)
return ret;
pod_send_channel(pod, value);
return count;
}
/*
"read" request on "name" special file.
*/
static ssize_t pod_get_name(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct usb_interface *interface = to_usb_interface(dev);
struct usb_line6_pod *pod = usb_get_intfdata(interface);
return get_name_generic(pod, pod->prog_data.header + POD_NAME_OFFSET,
buf);
}
/*
"read" request on "name" special file.
*/
static ssize_t pod_get_name_buf(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct usb_interface *interface = to_usb_interface(dev);
struct usb_line6_pod *pod = usb_get_intfdata(interface);
return get_name_generic(pod,
pod->prog_data_buf.header + POD_NAME_OFFSET,
buf);
}
/*
"read" request on "dump" special file.
*/
static ssize_t pod_get_dump(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct usb_interface *interface = to_usb_interface(dev);
struct usb_line6_pod *pod = usb_get_intfdata(interface);
int retval = line6_dump_wait_interruptible(&pod->dumpreq);
if (retval < 0)
return retval;
memcpy(buf, &pod->prog_data, sizeof(pod->prog_data));
return sizeof(pod->prog_data);
}
/*
"write" request on "dump" special file.
*/
static ssize_t pod_set_dump(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct usb_interface *interface = to_usb_interface(dev);
struct usb_line6_pod *pod = usb_get_intfdata(interface);
if (count != sizeof(pod->prog_data)) {
dev_err(pod->line6.ifcdev,
"data block must be exactly %d bytes\n",
(int)sizeof(pod->prog_data));
return -EINVAL;
}
pod_dump(pod, buf);
return sizeof(pod->prog_data);
}
/*
Identify system parameters related to the tuner.
*/
static bool pod_is_tuner(int code)
{
return
(code == POD_tuner_mute) ||
(code == POD_tuner_freq) ||
(code == POD_tuner_note) || (code == POD_tuner_pitch);
}
/*
Get system parameter (as integer).
@param tuner non-zero, if code refers to a tuner parameter
*/
static int pod_get_system_param_int(struct usb_line6_pod *pod, int *value,
int code, struct ValueWait *param, int sign)
{
char *sysex;
static const int size = 1;
int retval = 0;
if (((pod->prog_data.control[POD_tuner] & 0x40) == 0)
&& pod_is_tuner(code))
return -ENODEV;
/* send value request to device: */
param->value = POD_system_invalid;
sysex = pod_alloc_sysex_buffer(pod, POD_SYSEX_SYSTEMREQ, size);
if (!sysex)
return -ENOMEM;
sysex[SYSEX_DATA_OFS] = code;
line6_send_sysex_message(&pod->line6, sysex, size);
kfree(sysex);
/* wait for device to respond: */
retval =
wait_event_interruptible(param->wait,
param->value != POD_system_invalid);
if (retval < 0)
return retval;
*value = sign ? (int)(signed short)param->value : (int)(unsigned short)
param->value;
if (*value == POD_system_invalid)
*value = 0; /* don't report uninitialized values */
return 0;
}
/*
Get system parameter (as string).
@param tuner non-zero, if code refers to a tuner parameter
*/
static ssize_t pod_get_system_param_string(struct usb_line6_pod *pod, char *buf,
int code, struct ValueWait *param,
int sign)
{
int retval, value = 0;
retval = pod_get_system_param_int(pod, &value, code, param, sign);
if (retval < 0)
return retval;
return sprintf(buf, "%d\n", value);
}
/*
Send system parameter (from integer).
@param tuner non-zero, if code refers to a tuner parameter
*/
static int pod_set_system_param_int(struct usb_line6_pod *pod, int value,
int code)
{
char *sysex;
static const int size = 5;
if (((pod->prog_data.control[POD_tuner] & 0x40) == 0)
&& pod_is_tuner(code))
return -EINVAL;
/* send value to tuner: */
sysex = pod_alloc_sysex_buffer(pod, POD_SYSEX_SYSTEM, size);
if (!sysex)
return -ENOMEM;
sysex[SYSEX_DATA_OFS] = code;
sysex[SYSEX_DATA_OFS + 1] = (value >> 12) & 0x0f;
sysex[SYSEX_DATA_OFS + 2] = (value >> 8) & 0x0f;
sysex[SYSEX_DATA_OFS + 3] = (value >> 4) & 0x0f;
sysex[SYSEX_DATA_OFS + 4] = (value) & 0x0f;
line6_send_sysex_message(&pod->line6, sysex, size);
kfree(sysex);
return 0;
}
/*
Send system parameter (from string).
@param tuner non-zero, if code refers to a tuner parameter
*/
static ssize_t pod_set_system_param_string(struct usb_line6_pod *pod,
const char *buf, int count, int code,
unsigned short mask)
{
int retval;
unsigned short value = simple_strtoul(buf, NULL, 10) & mask;
retval = pod_set_system_param_int(pod, value, code);
return (retval < 0) ? retval : count;
}
/*
"read" request on "dump_buf" special file.
*/
static ssize_t pod_get_dump_buf(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct usb_interface *interface = to_usb_interface(dev);
struct usb_line6_pod *pod = usb_get_intfdata(interface);
int retval = line6_dump_wait_interruptible(&pod->dumpreq);
if (retval < 0)
return retval;
memcpy(buf, &pod->prog_data_buf, sizeof(pod->prog_data_buf));
return sizeof(pod->prog_data_buf);
}
/*
"write" request on "dump_buf" special file.
*/
static ssize_t pod_set_dump_buf(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct usb_interface *interface = to_usb_interface(dev);
struct usb_line6_pod *pod = usb_get_intfdata(interface);
if (count != sizeof(pod->prog_data)) {
dev_err(pod->line6.ifcdev,
"data block must be exactly %d bytes\n",
(int)sizeof(pod->prog_data));
return -EINVAL;
}
memcpy(&pod->prog_data_buf, buf, sizeof(pod->prog_data));
return sizeof(pod->prog_data);
}
/*
"write" request on "finish" special file.
*/
static ssize_t pod_set_finish(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct usb_interface *interface = to_usb_interface(dev);
struct usb_line6_pod *pod = usb_get_intfdata(interface);
int size = 0;
char *sysex = pod_alloc_sysex_buffer(pod, POD_SYSEX_FINISH, size);
if (!sysex)
return 0;
line6_send_sysex_message(&pod->line6, sysex, size);
kfree(sysex);
return count;
}
/*
"write" request on "store_channel" special file.
*/
static ssize_t pod_set_store_channel(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
return pod_send_store_command(dev, buf, count, 0x0000, 0x00c0);
}
/*
"write" request on "store_effects_setup" special file.
*/
static ssize_t pod_set_store_effects_setup(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
return pod_send_store_command(dev, buf, count, 0x0080, 0x0080);
}
/*
"write" request on "store_amp_setup" special file.
*/
static ssize_t pod_set_store_amp_setup(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
return pod_send_store_command(dev, buf, count, 0x0040, 0x0100);
}
/*
"write" request on "retrieve_channel" special file.
*/
static ssize_t pod_set_retrieve_channel(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
return pod_send_retrieve_command(dev, buf, count, 0x0000, 0x00c0);
}
/*
"write" request on "retrieve_effects_setup" special file.
*/
static ssize_t pod_set_retrieve_effects_setup(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
return pod_send_retrieve_command(dev, buf, count, 0x0080, 0x0080);
}
/*
"write" request on "retrieve_amp_setup" special file.
*/
static ssize_t pod_set_retrieve_amp_setup(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
return pod_send_retrieve_command(dev, buf, count, 0x0040, 0x0100);
}
/*
"read" request on "dirty" special file.
*/
static ssize_t pod_get_dirty(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct usb_interface *interface = to_usb_interface(dev);
struct usb_line6_pod *pod = usb_get_intfdata(interface);
buf[0] = pod->dirty ? '1' : '0';
buf[1] = '\n';
return 2;
}
/*
"read" request on "midi_postprocess" special file.
*/
static ssize_t pod_get_midi_postprocess(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct usb_interface *interface = to_usb_interface(dev);
struct usb_line6_pod *pod = usb_get_intfdata(interface);
return sprintf(buf, "%d\n", pod->midi_postprocess);
}
/*
"write" request on "midi_postprocess" special file.
*/
static ssize_t pod_set_midi_postprocess(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct usb_interface *interface = to_usb_interface(dev);
struct usb_line6_pod *pod = usb_get_intfdata(interface);
unsigned long value;
int ret;
ret = strict_strtoul(buf, 10, &value);
if (ret)
return ret;
pod->midi_postprocess = value ? 1 : 0;
return count;
}
/*
"read" request on "serial_number" special file.
*/
static ssize_t pod_get_serial_number(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct usb_interface *interface = to_usb_interface(dev);
struct usb_line6_pod *pod = usb_get_intfdata(interface);
return sprintf(buf, "%d\n", pod->serial_number);
}
/*
"read" request on "firmware_version" special file.
*/
static ssize_t pod_get_firmware_version(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct usb_interface *interface = to_usb_interface(dev);
struct usb_line6_pod *pod = usb_get_intfdata(interface);
return sprintf(buf, "%d.%02d\n", pod->firmware_version / 100,
pod->firmware_version % 100);
}
/*
"read" request on "device_id" special file.
*/
static ssize_t pod_get_device_id(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct usb_interface *interface = to_usb_interface(dev);
struct usb_line6_pod *pod = usb_get_intfdata(interface);
return sprintf(buf, "%d\n", pod->device_id);
}
/*
"read" request on "clip" special file.
*/
static ssize_t pod_wait_for_clip(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct usb_interface *interface = to_usb_interface(dev);
struct usb_line6_pod *pod = usb_get_intfdata(interface);
return wait_event_interruptible(pod->clipping.wait,
pod->clipping.value != 0);
}
/*
POD startup procedure.
This is a sequence of functions with special requirements (e.g., must
not run immediately after initialization, must not run in interrupt
context). After the last one has finished, the device is ready to use.
*/
static void pod_startup1(struct usb_line6_pod *pod)
{
CHECK_STARTUP_PROGRESS(pod->startup_progress, POD_STARTUP_INIT);
/* delay startup procedure: */
line6_start_timer(&pod->startup_timer, POD_STARTUP_DELAY, pod_startup2,
(unsigned long)pod);
}
static void pod_startup2(unsigned long data)
{
struct usb_line6_pod *pod = (struct usb_line6_pod *)data;
/* schedule another startup procedure until startup is complete: */
if (pod->startup_progress >= POD_STARTUP_LAST)
return;
pod->startup_progress = POD_STARTUP_DUMPREQ;
line6_start_timer(&pod->startup_timer, POD_STARTUP_DELAY, pod_startup2,
(unsigned long)pod);
/* current channel dump: */
line6_dump_request_async(&pod->dumpreq, &pod->line6, 0,
LINE6_DUMP_CURRENT);
}
static void pod_startup3(struct usb_line6_pod *pod)
{
struct usb_line6 *line6 = &pod->line6;
CHECK_STARTUP_PROGRESS(pod->startup_progress, POD_STARTUP_VERSIONREQ);
/* request firmware version: */
line6_version_request_async(line6);
}
static void pod_startup4(struct usb_line6_pod *pod)
{
CHECK_STARTUP_PROGRESS(pod->startup_progress, POD_STARTUP_WORKQUEUE);
/* schedule work for global work queue: */
schedule_work(&pod->startup_work);
}
static void pod_startup5(struct work_struct *work)
{
struct usb_line6_pod *pod =
container_of(work, struct usb_line6_pod, startup_work);
struct usb_line6 *line6 = &pod->line6;
CHECK_STARTUP_PROGRESS(pod->startup_progress, POD_STARTUP_SETUP);
/* serial number: */
line6_read_serial_number(&pod->line6, &pod->serial_number);
/* ALSA audio interface: */
line6_register_audio(line6);
/* device files: */
line6_pod_create_files(pod->firmware_version,
line6->properties->device_bit, line6->ifcdev);
}
#define POD_GET_SYSTEM_PARAM(code, sign) \
static ssize_t pod_get_ ## code(struct device *dev, \
struct device_attribute *attr, char *buf) \
{ \
struct usb_interface *interface = to_usb_interface(dev); \
struct usb_line6_pod *pod = usb_get_intfdata(interface); \
return pod_get_system_param_string(pod, buf, POD_ ## code, \
&pod->code, sign); \
}
#define POD_GET_SET_SYSTEM_PARAM(code, mask, sign) \
POD_GET_SYSTEM_PARAM(code, sign) \
static ssize_t pod_set_ ## code(struct device *dev, \
struct device_attribute *attr, \
const char *buf, size_t count) \
{ \
struct usb_interface *interface = to_usb_interface(dev); \
struct usb_line6_pod *pod = usb_get_intfdata(interface); \
return pod_set_system_param_string(pod, buf, count, POD_ ## code, mask); \
}
POD_GET_SET_SYSTEM_PARAM(monitor_level, 0xffff, 0);
POD_GET_SET_SYSTEM_PARAM(routing, 0x0003, 0);
POD_GET_SET_SYSTEM_PARAM(tuner_mute, 0x0001, 0);
POD_GET_SET_SYSTEM_PARAM(tuner_freq, 0xffff, 0);
POD_GET_SYSTEM_PARAM(tuner_note, 1);
POD_GET_SYSTEM_PARAM(tuner_pitch, 1);
#undef GET_SET_SYSTEM_PARAM
#undef GET_SYSTEM_PARAM
/* POD special files: */
static DEVICE_ATTR(channel, S_IWUSR | S_IRUGO, pod_get_channel,
pod_set_channel);
static DEVICE_ATTR(clip, S_IRUGO, pod_wait_for_clip, line6_nop_write);
static DEVICE_ATTR(device_id, S_IRUGO, pod_get_device_id, line6_nop_write);
static DEVICE_ATTR(dirty, S_IRUGO, pod_get_dirty, line6_nop_write);
static DEVICE_ATTR(dump, S_IWUSR | S_IRUGO, pod_get_dump, pod_set_dump);
static DEVICE_ATTR(dump_buf, S_IWUSR | S_IRUGO, pod_get_dump_buf,
pod_set_dump_buf);
static DEVICE_ATTR(finish, S_IWUSR, line6_nop_read, pod_set_finish);
static DEVICE_ATTR(firmware_version, S_IRUGO, pod_get_firmware_version,
line6_nop_write);
static DEVICE_ATTR(midi_postprocess, S_IWUSR | S_IRUGO,
pod_get_midi_postprocess, pod_set_midi_postprocess);
static DEVICE_ATTR(monitor_level, S_IWUSR | S_IRUGO, pod_get_monitor_level,
pod_set_monitor_level);
static DEVICE_ATTR(name, S_IRUGO, pod_get_name, line6_nop_write);
static DEVICE_ATTR(name_buf, S_IRUGO, pod_get_name_buf, line6_nop_write);
static DEVICE_ATTR(retrieve_amp_setup, S_IWUSR, line6_nop_read,
pod_set_retrieve_amp_setup);
static DEVICE_ATTR(retrieve_channel, S_IWUSR, line6_nop_read,
pod_set_retrieve_channel);
static DEVICE_ATTR(retrieve_effects_setup, S_IWUSR, line6_nop_read,
pod_set_retrieve_effects_setup);
static DEVICE_ATTR(routing, S_IWUSR | S_IRUGO, pod_get_routing,
pod_set_routing);
static DEVICE_ATTR(serial_number, S_IRUGO, pod_get_serial_number,
line6_nop_write);
static DEVICE_ATTR(store_amp_setup, S_IWUSR, line6_nop_read,
pod_set_store_amp_setup);
static DEVICE_ATTR(store_channel, S_IWUSR, line6_nop_read,
pod_set_store_channel);
static DEVICE_ATTR(store_effects_setup, S_IWUSR, line6_nop_read,
pod_set_store_effects_setup);
static DEVICE_ATTR(tuner_freq, S_IWUSR | S_IRUGO, pod_get_tuner_freq,
pod_set_tuner_freq);
static DEVICE_ATTR(tuner_mute, S_IWUSR | S_IRUGO, pod_get_tuner_mute,
pod_set_tuner_mute);
static DEVICE_ATTR(tuner_note, S_IRUGO, pod_get_tuner_note, line6_nop_write);
static DEVICE_ATTR(tuner_pitch, S_IRUGO, pod_get_tuner_pitch, line6_nop_write);
#ifdef CONFIG_LINE6_USB_RAW
static DEVICE_ATTR(raw, S_IWUSR, line6_nop_read, line6_set_raw);
#endif
/* control info callback */
static int snd_pod_control_monitor_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
uinfo->count = 1;
uinfo->value.integer.min = 0;
uinfo->value.integer.max = 65535;
return 0;
}
/* control get callback */
static int snd_pod_control_monitor_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_line6_pcm *line6pcm = snd_kcontrol_chip(kcontrol);
struct usb_line6_pod *pod = (struct usb_line6_pod *)line6pcm->line6;
ucontrol->value.integer.value[0] = pod->monitor_level.value;
return 0;
}
/* control put callback */
static int snd_pod_control_monitor_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_line6_pcm *line6pcm = snd_kcontrol_chip(kcontrol);
struct usb_line6_pod *pod = (struct usb_line6_pod *)line6pcm->line6;
if (ucontrol->value.integer.value[0] == pod->monitor_level.value)
return 0;
pod->monitor_level.value = ucontrol->value.integer.value[0];
pod_set_system_param_int(pod, ucontrol->value.integer.value[0],
POD_monitor_level);
return 1;
}
/* control definition */
static struct snd_kcontrol_new pod_control_monitor = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Monitor Playback Volume",
.index = 0,
.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
.info = snd_pod_control_monitor_info,
.get = snd_pod_control_monitor_get,
.put = snd_pod_control_monitor_put
};
/*
POD destructor.
*/
static void pod_destruct(struct usb_interface *interface)
{
struct usb_line6_pod *pod = usb_get_intfdata(interface);
if (pod == NULL)
return;
line6_cleanup_audio(&pod->line6);
del_timer(&pod->startup_timer);
cancel_work_sync(&pod->startup_work);
/* free dump request data: */
line6_dumpreq_destruct(&pod->dumpreq);
}
/*
Create sysfs entries.
*/
static int pod_create_files2(struct device *dev)
{
int err;
CHECK_RETURN(device_create_file(dev, &dev_attr_channel));
CHECK_RETURN(device_create_file(dev, &dev_attr_clip));
CHECK_RETURN(device_create_file(dev, &dev_attr_device_id));
CHECK_RETURN(device_create_file(dev, &dev_attr_dirty));
CHECK_RETURN(device_create_file(dev, &dev_attr_dump));
CHECK_RETURN(device_create_file(dev, &dev_attr_dump_buf));
CHECK_RETURN(device_create_file(dev, &dev_attr_finish));
CHECK_RETURN(device_create_file(dev, &dev_attr_firmware_version));
CHECK_RETURN(device_create_file(dev, &dev_attr_midi_postprocess));
CHECK_RETURN(device_create_file(dev, &dev_attr_monitor_level));
CHECK_RETURN(device_create_file(dev, &dev_attr_name));
CHECK_RETURN(device_create_file(dev, &dev_attr_name_buf));
CHECK_RETURN(device_create_file(dev, &dev_attr_retrieve_amp_setup));
CHECK_RETURN(device_create_file(dev, &dev_attr_retrieve_channel));
CHECK_RETURN(device_create_file(dev, &dev_attr_retrieve_effects_setup));
CHECK_RETURN(device_create_file(dev, &dev_attr_routing));
CHECK_RETURN(device_create_file(dev, &dev_attr_serial_number));
CHECK_RETURN(device_create_file(dev, &dev_attr_store_amp_setup));
CHECK_RETURN(device_create_file(dev, &dev_attr_store_channel));
CHECK_RETURN(device_create_file(dev, &dev_attr_store_effects_setup));
CHECK_RETURN(device_create_file(dev, &dev_attr_tuner_freq));
CHECK_RETURN(device_create_file(dev, &dev_attr_tuner_mute));
CHECK_RETURN(device_create_file(dev, &dev_attr_tuner_note));
CHECK_RETURN(device_create_file(dev, &dev_attr_tuner_pitch));
#ifdef CONFIG_LINE6_USB_RAW
CHECK_RETURN(device_create_file(dev, &dev_attr_raw));
#endif
return 0;
}
/*
Try to init POD device.
*/
static int pod_try_init(struct usb_interface *interface,
struct usb_line6_pod *pod)
{
int err;
struct usb_line6 *line6 = &pod->line6;
init_timer(&pod->startup_timer);
INIT_WORK(&pod->startup_work, pod_startup5);
if ((interface == NULL) || (pod == NULL))
return -ENODEV;
pod->channel_num = 255;
/* initialize wait queues: */
init_waitqueue_head(&pod->monitor_level.wait);
init_waitqueue_head(&pod->routing.wait);
init_waitqueue_head(&pod->tuner_mute.wait);
init_waitqueue_head(&pod->tuner_freq.wait);
init_waitqueue_head(&pod->tuner_note.wait);
init_waitqueue_head(&pod->tuner_pitch.wait);
init_waitqueue_head(&pod->clipping.wait);
memset(pod->param_dirty, 0xff, sizeof(pod->param_dirty));
/* initialize USB buffers: */
err = line6_dumpreq_init(&pod->dumpreq, pod_request_channel,
sizeof(pod_request_channel));
if (err < 0) {
dev_err(&interface->dev, "Out of memory\n");
return -ENOMEM;
}
/* create sysfs entries: */
err = pod_create_files2(&interface->dev);
if (err < 0)
return err;
/* initialize audio system: */
err = line6_init_audio(line6);
if (err < 0)
return err;
/* initialize MIDI subsystem: */
err = line6_init_midi(line6);
if (err < 0)
return err;
/* initialize PCM subsystem: */
err = line6_init_pcm(line6, &pod_pcm_properties);
if (err < 0)
return err;
/* register monitor control: */
err = snd_ctl_add(line6->card,
snd_ctl_new1(&pod_control_monitor, line6->line6pcm));
if (err < 0)
return err;
/*
When the sound card is registered at this point, the PODxt Live
displays "Invalid Code Error 07", so we do it later in the event
handler.
*/
if (pod->line6.properties->capabilities & LINE6_BIT_CONTROL) {
pod->monitor_level.value = POD_system_invalid;
/* initiate startup procedure: */
pod_startup1(pod);
}
return 0;
}
/*
Init POD device (and clean up in case of failure).
*/
int line6_pod_init(struct usb_interface *interface, struct usb_line6_pod *pod)
{
int err = pod_try_init(interface, pod);
if (err < 0)
pod_destruct(interface);
return err;
}
/*
POD device disconnected.
*/
void line6_pod_disconnect(struct usb_interface *interface)
{
struct usb_line6_pod *pod;
if (interface == NULL)
return;
pod = usb_get_intfdata(interface);
if (pod != NULL) {
struct snd_line6_pcm *line6pcm = pod->line6.line6pcm;
struct device *dev = &interface->dev;
if (line6pcm != NULL)
line6_pcm_disconnect(line6pcm);
if (dev != NULL) {
/* remove sysfs entries: */
line6_pod_remove_files(pod->firmware_version,
pod->line6.
properties->device_bit, dev);
device_remove_file(dev, &dev_attr_channel);
device_remove_file(dev, &dev_attr_clip);
device_remove_file(dev, &dev_attr_device_id);
device_remove_file(dev, &dev_attr_dirty);
device_remove_file(dev, &dev_attr_dump);
device_remove_file(dev, &dev_attr_dump_buf);
device_remove_file(dev, &dev_attr_finish);
device_remove_file(dev, &dev_attr_firmware_version);
device_remove_file(dev, &dev_attr_midi_postprocess);
device_remove_file(dev, &dev_attr_monitor_level);
device_remove_file(dev, &dev_attr_name);
device_remove_file(dev, &dev_attr_name_buf);
device_remove_file(dev, &dev_attr_retrieve_amp_setup);
device_remove_file(dev, &dev_attr_retrieve_channel);
device_remove_file(dev,
&dev_attr_retrieve_effects_setup);
device_remove_file(dev, &dev_attr_routing);
device_remove_file(dev, &dev_attr_serial_number);
device_remove_file(dev, &dev_attr_store_amp_setup);
device_remove_file(dev, &dev_attr_store_channel);
device_remove_file(dev, &dev_attr_store_effects_setup);
device_remove_file(dev, &dev_attr_tuner_freq);
device_remove_file(dev, &dev_attr_tuner_mute);
device_remove_file(dev, &dev_attr_tuner_note);
device_remove_file(dev, &dev_attr_tuner_pitch);
#ifdef CONFIG_LINE6_USB_RAW
device_remove_file(dev, &dev_attr_raw);
#endif
}
}
pod_destruct(interface);
}
| gpl-2.0 |
BambooIV/android_kernel_zte_nx503a | sound/isa/ad1816a/ad1816a_lib.c | 5203 | 30644 | /*
ad1816a.c - lowlevel code for Analog Devices AD1816A chip.
Copyright (C) 1999-2000 by Massimo Piccioni <dafastidio@libero.it>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/ioport.h>
#include <sound/core.h>
#include <sound/tlv.h>
#include <sound/ad1816a.h>
#include <asm/io.h>
#include <asm/dma.h>
static inline int snd_ad1816a_busy_wait(struct snd_ad1816a *chip)
{
int timeout;
for (timeout = 1000; timeout-- > 0; udelay(10))
if (inb(AD1816A_REG(AD1816A_CHIP_STATUS)) & AD1816A_READY)
return 0;
snd_printk(KERN_WARNING "chip busy.\n");
return -EBUSY;
}
static inline unsigned char snd_ad1816a_in(struct snd_ad1816a *chip, unsigned char reg)
{
snd_ad1816a_busy_wait(chip);
return inb(AD1816A_REG(reg));
}
static inline void snd_ad1816a_out(struct snd_ad1816a *chip, unsigned char reg,
unsigned char value)
{
snd_ad1816a_busy_wait(chip);
outb(value, AD1816A_REG(reg));
}
static inline void snd_ad1816a_out_mask(struct snd_ad1816a *chip, unsigned char reg,
unsigned char mask, unsigned char value)
{
snd_ad1816a_out(chip, reg,
(value & mask) | (snd_ad1816a_in(chip, reg) & ~mask));
}
static unsigned short snd_ad1816a_read(struct snd_ad1816a *chip, unsigned char reg)
{
snd_ad1816a_out(chip, AD1816A_INDIR_ADDR, reg & 0x3f);
return snd_ad1816a_in(chip, AD1816A_INDIR_DATA_LOW) |
(snd_ad1816a_in(chip, AD1816A_INDIR_DATA_HIGH) << 8);
}
static void snd_ad1816a_write(struct snd_ad1816a *chip, unsigned char reg,
unsigned short value)
{
snd_ad1816a_out(chip, AD1816A_INDIR_ADDR, reg & 0x3f);
snd_ad1816a_out(chip, AD1816A_INDIR_DATA_LOW, value & 0xff);
snd_ad1816a_out(chip, AD1816A_INDIR_DATA_HIGH, (value >> 8) & 0xff);
}
static void snd_ad1816a_write_mask(struct snd_ad1816a *chip, unsigned char reg,
unsigned short mask, unsigned short value)
{
snd_ad1816a_write(chip, reg,
(value & mask) | (snd_ad1816a_read(chip, reg) & ~mask));
}
static unsigned char snd_ad1816a_get_format(struct snd_ad1816a *chip,
unsigned int format, int channels)
{
unsigned char retval = AD1816A_FMT_LINEAR_8;
switch (format) {
case SNDRV_PCM_FORMAT_MU_LAW:
retval = AD1816A_FMT_ULAW_8;
break;
case SNDRV_PCM_FORMAT_A_LAW:
retval = AD1816A_FMT_ALAW_8;
break;
case SNDRV_PCM_FORMAT_S16_LE:
retval = AD1816A_FMT_LINEAR_16_LIT;
break;
case SNDRV_PCM_FORMAT_S16_BE:
retval = AD1816A_FMT_LINEAR_16_BIG;
}
return (channels > 1) ? (retval | AD1816A_FMT_STEREO) : retval;
}
static int snd_ad1816a_open(struct snd_ad1816a *chip, unsigned int mode)
{
unsigned long flags;
spin_lock_irqsave(&chip->lock, flags);
if (chip->mode & mode) {
spin_unlock_irqrestore(&chip->lock, flags);
return -EAGAIN;
}
switch ((mode &= AD1816A_MODE_OPEN)) {
case AD1816A_MODE_PLAYBACK:
snd_ad1816a_out_mask(chip, AD1816A_INTERRUPT_STATUS,
AD1816A_PLAYBACK_IRQ_PENDING, 0x00);
snd_ad1816a_write_mask(chip, AD1816A_INTERRUPT_ENABLE,
AD1816A_PLAYBACK_IRQ_ENABLE, 0xffff);
break;
case AD1816A_MODE_CAPTURE:
snd_ad1816a_out_mask(chip, AD1816A_INTERRUPT_STATUS,
AD1816A_CAPTURE_IRQ_PENDING, 0x00);
snd_ad1816a_write_mask(chip, AD1816A_INTERRUPT_ENABLE,
AD1816A_CAPTURE_IRQ_ENABLE, 0xffff);
break;
case AD1816A_MODE_TIMER:
snd_ad1816a_out_mask(chip, AD1816A_INTERRUPT_STATUS,
AD1816A_TIMER_IRQ_PENDING, 0x00);
snd_ad1816a_write_mask(chip, AD1816A_INTERRUPT_ENABLE,
AD1816A_TIMER_IRQ_ENABLE, 0xffff);
}
chip->mode |= mode;
spin_unlock_irqrestore(&chip->lock, flags);
return 0;
}
static void snd_ad1816a_close(struct snd_ad1816a *chip, unsigned int mode)
{
unsigned long flags;
spin_lock_irqsave(&chip->lock, flags);
switch ((mode &= AD1816A_MODE_OPEN)) {
case AD1816A_MODE_PLAYBACK:
snd_ad1816a_out_mask(chip, AD1816A_INTERRUPT_STATUS,
AD1816A_PLAYBACK_IRQ_PENDING, 0x00);
snd_ad1816a_write_mask(chip, AD1816A_INTERRUPT_ENABLE,
AD1816A_PLAYBACK_IRQ_ENABLE, 0x0000);
break;
case AD1816A_MODE_CAPTURE:
snd_ad1816a_out_mask(chip, AD1816A_INTERRUPT_STATUS,
AD1816A_CAPTURE_IRQ_PENDING, 0x00);
snd_ad1816a_write_mask(chip, AD1816A_INTERRUPT_ENABLE,
AD1816A_CAPTURE_IRQ_ENABLE, 0x0000);
break;
case AD1816A_MODE_TIMER:
snd_ad1816a_out_mask(chip, AD1816A_INTERRUPT_STATUS,
AD1816A_TIMER_IRQ_PENDING, 0x00);
snd_ad1816a_write_mask(chip, AD1816A_INTERRUPT_ENABLE,
AD1816A_TIMER_IRQ_ENABLE, 0x0000);
}
if (!((chip->mode &= ~mode) & AD1816A_MODE_OPEN))
chip->mode = 0;
spin_unlock_irqrestore(&chip->lock, flags);
}
static int snd_ad1816a_trigger(struct snd_ad1816a *chip, unsigned char what,
int channel, int cmd, int iscapture)
{
int error = 0;
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_STOP:
spin_lock(&chip->lock);
cmd = (cmd == SNDRV_PCM_TRIGGER_START) ? 0xff: 0x00;
/* if (what & AD1816A_PLAYBACK_ENABLE) */
/* That is not valid, because playback and capture enable
* are the same bit pattern, just to different addresses
*/
if (! iscapture)
snd_ad1816a_out_mask(chip, AD1816A_PLAYBACK_CONFIG,
AD1816A_PLAYBACK_ENABLE, cmd);
else
snd_ad1816a_out_mask(chip, AD1816A_CAPTURE_CONFIG,
AD1816A_CAPTURE_ENABLE, cmd);
spin_unlock(&chip->lock);
break;
default:
snd_printk(KERN_WARNING "invalid trigger mode 0x%x.\n", what);
error = -EINVAL;
}
return error;
}
static int snd_ad1816a_playback_trigger(struct snd_pcm_substream *substream, int cmd)
{
struct snd_ad1816a *chip = snd_pcm_substream_chip(substream);
return snd_ad1816a_trigger(chip, AD1816A_PLAYBACK_ENABLE,
SNDRV_PCM_STREAM_PLAYBACK, cmd, 0);
}
static int snd_ad1816a_capture_trigger(struct snd_pcm_substream *substream, int cmd)
{
struct snd_ad1816a *chip = snd_pcm_substream_chip(substream);
return snd_ad1816a_trigger(chip, AD1816A_CAPTURE_ENABLE,
SNDRV_PCM_STREAM_CAPTURE, cmd, 1);
}
static int snd_ad1816a_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *hw_params)
{
return snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params));
}
static int snd_ad1816a_hw_free(struct snd_pcm_substream *substream)
{
return snd_pcm_lib_free_pages(substream);
}
static int snd_ad1816a_playback_prepare(struct snd_pcm_substream *substream)
{
struct snd_ad1816a *chip = snd_pcm_substream_chip(substream);
unsigned long flags;
struct snd_pcm_runtime *runtime = substream->runtime;
unsigned int size, rate;
spin_lock_irqsave(&chip->lock, flags);
chip->p_dma_size = size = snd_pcm_lib_buffer_bytes(substream);
snd_ad1816a_out_mask(chip, AD1816A_PLAYBACK_CONFIG,
AD1816A_PLAYBACK_ENABLE | AD1816A_PLAYBACK_PIO, 0x00);
snd_dma_program(chip->dma1, runtime->dma_addr, size,
DMA_MODE_WRITE | DMA_AUTOINIT);
rate = runtime->rate;
if (chip->clock_freq)
rate = (rate * 33000) / chip->clock_freq;
snd_ad1816a_write(chip, AD1816A_PLAYBACK_SAMPLE_RATE, rate);
snd_ad1816a_out_mask(chip, AD1816A_PLAYBACK_CONFIG,
AD1816A_FMT_ALL | AD1816A_FMT_STEREO,
snd_ad1816a_get_format(chip, runtime->format,
runtime->channels));
snd_ad1816a_write(chip, AD1816A_PLAYBACK_BASE_COUNT,
snd_pcm_lib_period_bytes(substream) / 4 - 1);
spin_unlock_irqrestore(&chip->lock, flags);
return 0;
}
static int snd_ad1816a_capture_prepare(struct snd_pcm_substream *substream)
{
struct snd_ad1816a *chip = snd_pcm_substream_chip(substream);
unsigned long flags;
struct snd_pcm_runtime *runtime = substream->runtime;
unsigned int size, rate;
spin_lock_irqsave(&chip->lock, flags);
chip->c_dma_size = size = snd_pcm_lib_buffer_bytes(substream);
snd_ad1816a_out_mask(chip, AD1816A_CAPTURE_CONFIG,
AD1816A_CAPTURE_ENABLE | AD1816A_CAPTURE_PIO, 0x00);
snd_dma_program(chip->dma2, runtime->dma_addr, size,
DMA_MODE_READ | DMA_AUTOINIT);
rate = runtime->rate;
if (chip->clock_freq)
rate = (rate * 33000) / chip->clock_freq;
snd_ad1816a_write(chip, AD1816A_CAPTURE_SAMPLE_RATE, rate);
snd_ad1816a_out_mask(chip, AD1816A_CAPTURE_CONFIG,
AD1816A_FMT_ALL | AD1816A_FMT_STEREO,
snd_ad1816a_get_format(chip, runtime->format,
runtime->channels));
snd_ad1816a_write(chip, AD1816A_CAPTURE_BASE_COUNT,
snd_pcm_lib_period_bytes(substream) / 4 - 1);
spin_unlock_irqrestore(&chip->lock, flags);
return 0;
}
static snd_pcm_uframes_t snd_ad1816a_playback_pointer(struct snd_pcm_substream *substream)
{
struct snd_ad1816a *chip = snd_pcm_substream_chip(substream);
size_t ptr;
if (!(chip->mode & AD1816A_MODE_PLAYBACK))
return 0;
ptr = snd_dma_pointer(chip->dma1, chip->p_dma_size);
return bytes_to_frames(substream->runtime, ptr);
}
static snd_pcm_uframes_t snd_ad1816a_capture_pointer(struct snd_pcm_substream *substream)
{
struct snd_ad1816a *chip = snd_pcm_substream_chip(substream);
size_t ptr;
if (!(chip->mode & AD1816A_MODE_CAPTURE))
return 0;
ptr = snd_dma_pointer(chip->dma2, chip->c_dma_size);
return bytes_to_frames(substream->runtime, ptr);
}
static irqreturn_t snd_ad1816a_interrupt(int irq, void *dev_id)
{
struct snd_ad1816a *chip = dev_id;
unsigned char status;
spin_lock(&chip->lock);
status = snd_ad1816a_in(chip, AD1816A_INTERRUPT_STATUS);
spin_unlock(&chip->lock);
if ((status & AD1816A_PLAYBACK_IRQ_PENDING) && chip->playback_substream)
snd_pcm_period_elapsed(chip->playback_substream);
if ((status & AD1816A_CAPTURE_IRQ_PENDING) && chip->capture_substream)
snd_pcm_period_elapsed(chip->capture_substream);
if ((status & AD1816A_TIMER_IRQ_PENDING) && chip->timer)
snd_timer_interrupt(chip->timer, chip->timer->sticks);
spin_lock(&chip->lock);
snd_ad1816a_out(chip, AD1816A_INTERRUPT_STATUS, 0x00);
spin_unlock(&chip->lock);
return IRQ_HANDLED;
}
static struct snd_pcm_hardware snd_ad1816a_playback = {
.info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_MMAP_VALID),
.formats = (SNDRV_PCM_FMTBIT_MU_LAW | SNDRV_PCM_FMTBIT_A_LAW |
SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE |
SNDRV_PCM_FMTBIT_S16_BE),
.rates = SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_8000_48000,
.rate_min = 4000,
.rate_max = 55200,
.channels_min = 1,
.channels_max = 2,
.buffer_bytes_max = (128*1024),
.period_bytes_min = 64,
.period_bytes_max = (128*1024),
.periods_min = 1,
.periods_max = 1024,
.fifo_size = 0,
};
static struct snd_pcm_hardware snd_ad1816a_capture = {
.info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_MMAP_VALID),
.formats = (SNDRV_PCM_FMTBIT_MU_LAW | SNDRV_PCM_FMTBIT_A_LAW |
SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE |
SNDRV_PCM_FMTBIT_S16_BE),
.rates = SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_8000_48000,
.rate_min = 4000,
.rate_max = 55200,
.channels_min = 1,
.channels_max = 2,
.buffer_bytes_max = (128*1024),
.period_bytes_min = 64,
.period_bytes_max = (128*1024),
.periods_min = 1,
.periods_max = 1024,
.fifo_size = 0,
};
static int snd_ad1816a_timer_close(struct snd_timer *timer)
{
struct snd_ad1816a *chip = snd_timer_chip(timer);
snd_ad1816a_close(chip, AD1816A_MODE_TIMER);
return 0;
}
static int snd_ad1816a_timer_open(struct snd_timer *timer)
{
struct snd_ad1816a *chip = snd_timer_chip(timer);
snd_ad1816a_open(chip, AD1816A_MODE_TIMER);
return 0;
}
static unsigned long snd_ad1816a_timer_resolution(struct snd_timer *timer)
{
if (snd_BUG_ON(!timer))
return 0;
return 10000;
}
static int snd_ad1816a_timer_start(struct snd_timer *timer)
{
unsigned short bits;
unsigned long flags;
struct snd_ad1816a *chip = snd_timer_chip(timer);
spin_lock_irqsave(&chip->lock, flags);
bits = snd_ad1816a_read(chip, AD1816A_INTERRUPT_ENABLE);
if (!(bits & AD1816A_TIMER_ENABLE)) {
snd_ad1816a_write(chip, AD1816A_TIMER_BASE_COUNT,
timer->sticks & 0xffff);
snd_ad1816a_write_mask(chip, AD1816A_INTERRUPT_ENABLE,
AD1816A_TIMER_ENABLE, 0xffff);
}
spin_unlock_irqrestore(&chip->lock, flags);
return 0;
}
static int snd_ad1816a_timer_stop(struct snd_timer *timer)
{
unsigned long flags;
struct snd_ad1816a *chip = snd_timer_chip(timer);
spin_lock_irqsave(&chip->lock, flags);
snd_ad1816a_write_mask(chip, AD1816A_INTERRUPT_ENABLE,
AD1816A_TIMER_ENABLE, 0x0000);
spin_unlock_irqrestore(&chip->lock, flags);
return 0;
}
static struct snd_timer_hardware snd_ad1816a_timer_table = {
.flags = SNDRV_TIMER_HW_AUTO,
.resolution = 10000,
.ticks = 65535,
.open = snd_ad1816a_timer_open,
.close = snd_ad1816a_timer_close,
.c_resolution = snd_ad1816a_timer_resolution,
.start = snd_ad1816a_timer_start,
.stop = snd_ad1816a_timer_stop,
};
static int snd_ad1816a_playback_open(struct snd_pcm_substream *substream)
{
struct snd_ad1816a *chip = snd_pcm_substream_chip(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
int error;
if ((error = snd_ad1816a_open(chip, AD1816A_MODE_PLAYBACK)) < 0)
return error;
runtime->hw = snd_ad1816a_playback;
snd_pcm_limit_isa_dma_size(chip->dma1, &runtime->hw.buffer_bytes_max);
snd_pcm_limit_isa_dma_size(chip->dma1, &runtime->hw.period_bytes_max);
chip->playback_substream = substream;
return 0;
}
static int snd_ad1816a_capture_open(struct snd_pcm_substream *substream)
{
struct snd_ad1816a *chip = snd_pcm_substream_chip(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
int error;
if ((error = snd_ad1816a_open(chip, AD1816A_MODE_CAPTURE)) < 0)
return error;
runtime->hw = snd_ad1816a_capture;
snd_pcm_limit_isa_dma_size(chip->dma2, &runtime->hw.buffer_bytes_max);
snd_pcm_limit_isa_dma_size(chip->dma2, &runtime->hw.period_bytes_max);
chip->capture_substream = substream;
return 0;
}
static int snd_ad1816a_playback_close(struct snd_pcm_substream *substream)
{
struct snd_ad1816a *chip = snd_pcm_substream_chip(substream);
chip->playback_substream = NULL;
snd_ad1816a_close(chip, AD1816A_MODE_PLAYBACK);
return 0;
}
static int snd_ad1816a_capture_close(struct snd_pcm_substream *substream)
{
struct snd_ad1816a *chip = snd_pcm_substream_chip(substream);
chip->capture_substream = NULL;
snd_ad1816a_close(chip, AD1816A_MODE_CAPTURE);
return 0;
}
static void __devinit snd_ad1816a_init(struct snd_ad1816a *chip)
{
unsigned long flags;
spin_lock_irqsave(&chip->lock, flags);
snd_ad1816a_out(chip, AD1816A_INTERRUPT_STATUS, 0x00);
snd_ad1816a_out_mask(chip, AD1816A_PLAYBACK_CONFIG,
AD1816A_PLAYBACK_ENABLE | AD1816A_PLAYBACK_PIO, 0x00);
snd_ad1816a_out_mask(chip, AD1816A_CAPTURE_CONFIG,
AD1816A_CAPTURE_ENABLE | AD1816A_CAPTURE_PIO, 0x00);
snd_ad1816a_write(chip, AD1816A_INTERRUPT_ENABLE, 0x0000);
snd_ad1816a_write_mask(chip, AD1816A_CHIP_CONFIG,
AD1816A_CAPTURE_NOT_EQUAL | AD1816A_WSS_ENABLE, 0xffff);
snd_ad1816a_write(chip, AD1816A_DSP_CONFIG, 0x0000);
snd_ad1816a_write(chip, AD1816A_POWERDOWN_CTRL, 0x0000);
spin_unlock_irqrestore(&chip->lock, flags);
}
static int __devinit snd_ad1816a_probe(struct snd_ad1816a *chip)
{
unsigned long flags;
spin_lock_irqsave(&chip->lock, flags);
switch (chip->version = snd_ad1816a_read(chip, AD1816A_VERSION_ID)) {
case 0:
chip->hardware = AD1816A_HW_AD1815;
break;
case 1:
chip->hardware = AD1816A_HW_AD18MAX10;
break;
case 3:
chip->hardware = AD1816A_HW_AD1816A;
break;
default:
chip->hardware = AD1816A_HW_AUTO;
}
spin_unlock_irqrestore(&chip->lock, flags);
return 0;
}
static int snd_ad1816a_free(struct snd_ad1816a *chip)
{
release_and_free_resource(chip->res_port);
if (chip->irq >= 0)
free_irq(chip->irq, (void *) chip);
if (chip->dma1 >= 0) {
snd_dma_disable(chip->dma1);
free_dma(chip->dma1);
}
if (chip->dma2 >= 0) {
snd_dma_disable(chip->dma2);
free_dma(chip->dma2);
}
kfree(chip);
return 0;
}
static int snd_ad1816a_dev_free(struct snd_device *device)
{
struct snd_ad1816a *chip = device->device_data;
return snd_ad1816a_free(chip);
}
static const char __devinit *snd_ad1816a_chip_id(struct snd_ad1816a *chip)
{
switch (chip->hardware) {
case AD1816A_HW_AD1816A: return "AD1816A";
case AD1816A_HW_AD1815: return "AD1815";
case AD1816A_HW_AD18MAX10: return "AD18max10";
default:
snd_printk(KERN_WARNING "Unknown chip version %d:%d.\n",
chip->version, chip->hardware);
return "AD1816A - unknown";
}
}
int __devinit snd_ad1816a_create(struct snd_card *card,
unsigned long port, int irq, int dma1, int dma2,
struct snd_ad1816a **rchip)
{
static struct snd_device_ops ops = {
.dev_free = snd_ad1816a_dev_free,
};
int error;
struct snd_ad1816a *chip;
*rchip = NULL;
chip = kzalloc(sizeof(*chip), GFP_KERNEL);
if (chip == NULL)
return -ENOMEM;
chip->irq = -1;
chip->dma1 = -1;
chip->dma2 = -1;
if ((chip->res_port = request_region(port, 16, "AD1816A")) == NULL) {
snd_printk(KERN_ERR "ad1816a: can't grab port 0x%lx\n", port);
snd_ad1816a_free(chip);
return -EBUSY;
}
if (request_irq(irq, snd_ad1816a_interrupt, 0, "AD1816A", (void *) chip)) {
snd_printk(KERN_ERR "ad1816a: can't grab IRQ %d\n", irq);
snd_ad1816a_free(chip);
return -EBUSY;
}
chip->irq = irq;
if (request_dma(dma1, "AD1816A - 1")) {
snd_printk(KERN_ERR "ad1816a: can't grab DMA1 %d\n", dma1);
snd_ad1816a_free(chip);
return -EBUSY;
}
chip->dma1 = dma1;
if (request_dma(dma2, "AD1816A - 2")) {
snd_printk(KERN_ERR "ad1816a: can't grab DMA2 %d\n", dma2);
snd_ad1816a_free(chip);
return -EBUSY;
}
chip->dma2 = dma2;
chip->card = card;
chip->port = port;
spin_lock_init(&chip->lock);
if ((error = snd_ad1816a_probe(chip))) {
snd_ad1816a_free(chip);
return error;
}
snd_ad1816a_init(chip);
/* Register device */
if ((error = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops)) < 0) {
snd_ad1816a_free(chip);
return error;
}
*rchip = chip;
return 0;
}
static struct snd_pcm_ops snd_ad1816a_playback_ops = {
.open = snd_ad1816a_playback_open,
.close = snd_ad1816a_playback_close,
.ioctl = snd_pcm_lib_ioctl,
.hw_params = snd_ad1816a_hw_params,
.hw_free = snd_ad1816a_hw_free,
.prepare = snd_ad1816a_playback_prepare,
.trigger = snd_ad1816a_playback_trigger,
.pointer = snd_ad1816a_playback_pointer,
};
static struct snd_pcm_ops snd_ad1816a_capture_ops = {
.open = snd_ad1816a_capture_open,
.close = snd_ad1816a_capture_close,
.ioctl = snd_pcm_lib_ioctl,
.hw_params = snd_ad1816a_hw_params,
.hw_free = snd_ad1816a_hw_free,
.prepare = snd_ad1816a_capture_prepare,
.trigger = snd_ad1816a_capture_trigger,
.pointer = snd_ad1816a_capture_pointer,
};
int __devinit snd_ad1816a_pcm(struct snd_ad1816a *chip, int device, struct snd_pcm **rpcm)
{
int error;
struct snd_pcm *pcm;
if ((error = snd_pcm_new(chip->card, "AD1816A", device, 1, 1, &pcm)))
return error;
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_ad1816a_playback_ops);
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_ad1816a_capture_ops);
pcm->private_data = chip;
pcm->info_flags = (chip->dma1 == chip->dma2 ) ? SNDRV_PCM_INFO_JOINT_DUPLEX : 0;
strcpy(pcm->name, snd_ad1816a_chip_id(chip));
snd_ad1816a_init(chip);
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
snd_dma_isa_data(),
64*1024, chip->dma1 > 3 || chip->dma2 > 3 ? 128*1024 : 64*1024);
chip->pcm = pcm;
if (rpcm)
*rpcm = pcm;
return 0;
}
int __devinit snd_ad1816a_timer(struct snd_ad1816a *chip, int device, struct snd_timer **rtimer)
{
struct snd_timer *timer;
struct snd_timer_id tid;
int error;
tid.dev_class = SNDRV_TIMER_CLASS_CARD;
tid.dev_sclass = SNDRV_TIMER_SCLASS_NONE;
tid.card = chip->card->number;
tid.device = device;
tid.subdevice = 0;
if ((error = snd_timer_new(chip->card, "AD1816A", &tid, &timer)) < 0)
return error;
strcpy(timer->name, snd_ad1816a_chip_id(chip));
timer->private_data = chip;
chip->timer = timer;
timer->hw = snd_ad1816a_timer_table;
if (rtimer)
*rtimer = timer;
return 0;
}
/*
*
*/
static int snd_ad1816a_info_mux(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
{
static char *texts[8] = {
"Line", "Mix", "CD", "Synth", "Video",
"Mic", "Phone",
};
uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
uinfo->count = 2;
uinfo->value.enumerated.items = 7;
if (uinfo->value.enumerated.item > 6)
uinfo->value.enumerated.item = 6;
strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]);
return 0;
}
static int snd_ad1816a_get_mux(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct snd_ad1816a *chip = snd_kcontrol_chip(kcontrol);
unsigned long flags;
unsigned short val;
spin_lock_irqsave(&chip->lock, flags);
val = snd_ad1816a_read(chip, AD1816A_ADC_SOURCE_SEL);
spin_unlock_irqrestore(&chip->lock, flags);
ucontrol->value.enumerated.item[0] = (val >> 12) & 7;
ucontrol->value.enumerated.item[1] = (val >> 4) & 7;
return 0;
}
static int snd_ad1816a_put_mux(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct snd_ad1816a *chip = snd_kcontrol_chip(kcontrol);
unsigned long flags;
unsigned short val;
int change;
if (ucontrol->value.enumerated.item[0] > 6 ||
ucontrol->value.enumerated.item[1] > 6)
return -EINVAL;
val = (ucontrol->value.enumerated.item[0] << 12) |
(ucontrol->value.enumerated.item[1] << 4);
spin_lock_irqsave(&chip->lock, flags);
change = snd_ad1816a_read(chip, AD1816A_ADC_SOURCE_SEL) != val;
snd_ad1816a_write(chip, AD1816A_ADC_SOURCE_SEL, val);
spin_unlock_irqrestore(&chip->lock, flags);
return change;
}
#define AD1816A_SINGLE_TLV(xname, reg, shift, mask, invert, xtlv) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
.access = SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_TLV_READ, \
.name = xname, .info = snd_ad1816a_info_single, \
.get = snd_ad1816a_get_single, .put = snd_ad1816a_put_single, \
.private_value = reg | (shift << 8) | (mask << 16) | (invert << 24), \
.tlv = { .p = (xtlv) } }
#define AD1816A_SINGLE(xname, reg, shift, mask, invert) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .info = snd_ad1816a_info_single, \
.get = snd_ad1816a_get_single, .put = snd_ad1816a_put_single, \
.private_value = reg | (shift << 8) | (mask << 16) | (invert << 24) }
static int snd_ad1816a_info_single(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
{
int mask = (kcontrol->private_value >> 16) & 0xff;
uinfo->type = mask == 1 ? SNDRV_CTL_ELEM_TYPE_BOOLEAN : SNDRV_CTL_ELEM_TYPE_INTEGER;
uinfo->count = 1;
uinfo->value.integer.min = 0;
uinfo->value.integer.max = mask;
return 0;
}
static int snd_ad1816a_get_single(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct snd_ad1816a *chip = snd_kcontrol_chip(kcontrol);
unsigned long flags;
int reg = kcontrol->private_value & 0xff;
int shift = (kcontrol->private_value >> 8) & 0xff;
int mask = (kcontrol->private_value >> 16) & 0xff;
int invert = (kcontrol->private_value >> 24) & 0xff;
spin_lock_irqsave(&chip->lock, flags);
ucontrol->value.integer.value[0] = (snd_ad1816a_read(chip, reg) >> shift) & mask;
spin_unlock_irqrestore(&chip->lock, flags);
if (invert)
ucontrol->value.integer.value[0] = mask - ucontrol->value.integer.value[0];
return 0;
}
static int snd_ad1816a_put_single(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct snd_ad1816a *chip = snd_kcontrol_chip(kcontrol);
unsigned long flags;
int reg = kcontrol->private_value & 0xff;
int shift = (kcontrol->private_value >> 8) & 0xff;
int mask = (kcontrol->private_value >> 16) & 0xff;
int invert = (kcontrol->private_value >> 24) & 0xff;
int change;
unsigned short old_val, val;
val = (ucontrol->value.integer.value[0] & mask);
if (invert)
val = mask - val;
val <<= shift;
spin_lock_irqsave(&chip->lock, flags);
old_val = snd_ad1816a_read(chip, reg);
val = (old_val & ~(mask << shift)) | val;
change = val != old_val;
snd_ad1816a_write(chip, reg, val);
spin_unlock_irqrestore(&chip->lock, flags);
return change;
}
#define AD1816A_DOUBLE_TLV(xname, reg, shift_left, shift_right, mask, invert, xtlv) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
.access = SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_TLV_READ, \
.name = xname, .info = snd_ad1816a_info_double, \
.get = snd_ad1816a_get_double, .put = snd_ad1816a_put_double, \
.private_value = reg | (shift_left << 8) | (shift_right << 12) | (mask << 16) | (invert << 24), \
.tlv = { .p = (xtlv) } }
#define AD1816A_DOUBLE(xname, reg, shift_left, shift_right, mask, invert) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .info = snd_ad1816a_info_double, \
.get = snd_ad1816a_get_double, .put = snd_ad1816a_put_double, \
.private_value = reg | (shift_left << 8) | (shift_right << 12) | (mask << 16) | (invert << 24) }
static int snd_ad1816a_info_double(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
{
int mask = (kcontrol->private_value >> 16) & 0xff;
uinfo->type = mask == 1 ? SNDRV_CTL_ELEM_TYPE_BOOLEAN : SNDRV_CTL_ELEM_TYPE_INTEGER;
uinfo->count = 2;
uinfo->value.integer.min = 0;
uinfo->value.integer.max = mask;
return 0;
}
static int snd_ad1816a_get_double(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct snd_ad1816a *chip = snd_kcontrol_chip(kcontrol);
unsigned long flags;
int reg = kcontrol->private_value & 0xff;
int shift_left = (kcontrol->private_value >> 8) & 0x0f;
int shift_right = (kcontrol->private_value >> 12) & 0x0f;
int mask = (kcontrol->private_value >> 16) & 0xff;
int invert = (kcontrol->private_value >> 24) & 0xff;
unsigned short val;
spin_lock_irqsave(&chip->lock, flags);
val = snd_ad1816a_read(chip, reg);
ucontrol->value.integer.value[0] = (val >> shift_left) & mask;
ucontrol->value.integer.value[1] = (val >> shift_right) & mask;
spin_unlock_irqrestore(&chip->lock, flags);
if (invert) {
ucontrol->value.integer.value[0] = mask - ucontrol->value.integer.value[0];
ucontrol->value.integer.value[1] = mask - ucontrol->value.integer.value[1];
}
return 0;
}
static int snd_ad1816a_put_double(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct snd_ad1816a *chip = snd_kcontrol_chip(kcontrol);
unsigned long flags;
int reg = kcontrol->private_value & 0xff;
int shift_left = (kcontrol->private_value >> 8) & 0x0f;
int shift_right = (kcontrol->private_value >> 12) & 0x0f;
int mask = (kcontrol->private_value >> 16) & 0xff;
int invert = (kcontrol->private_value >> 24) & 0xff;
int change;
unsigned short old_val, val1, val2;
val1 = ucontrol->value.integer.value[0] & mask;
val2 = ucontrol->value.integer.value[1] & mask;
if (invert) {
val1 = mask - val1;
val2 = mask - val2;
}
val1 <<= shift_left;
val2 <<= shift_right;
spin_lock_irqsave(&chip->lock, flags);
old_val = snd_ad1816a_read(chip, reg);
val1 = (old_val & ~((mask << shift_left) | (mask << shift_right))) | val1 | val2;
change = val1 != old_val;
snd_ad1816a_write(chip, reg, val1);
spin_unlock_irqrestore(&chip->lock, flags);
return change;
}
static const DECLARE_TLV_DB_SCALE(db_scale_4bit, -4500, 300, 0);
static const DECLARE_TLV_DB_SCALE(db_scale_5bit, -4650, 150, 0);
static const DECLARE_TLV_DB_SCALE(db_scale_6bit, -9450, 150, 0);
static const DECLARE_TLV_DB_SCALE(db_scale_5bit_12db_max, -3450, 150, 0);
static const DECLARE_TLV_DB_SCALE(db_scale_rec_gain, 0, 150, 0);
static struct snd_kcontrol_new snd_ad1816a_controls[] __devinitdata = {
AD1816A_DOUBLE("Master Playback Switch", AD1816A_MASTER_ATT, 15, 7, 1, 1),
AD1816A_DOUBLE_TLV("Master Playback Volume", AD1816A_MASTER_ATT, 8, 0, 31, 1,
db_scale_5bit),
AD1816A_DOUBLE("PCM Playback Switch", AD1816A_VOICE_ATT, 15, 7, 1, 1),
AD1816A_DOUBLE_TLV("PCM Playback Volume", AD1816A_VOICE_ATT, 8, 0, 63, 1,
db_scale_6bit),
AD1816A_DOUBLE("Line Playback Switch", AD1816A_LINE_GAIN_ATT, 15, 7, 1, 1),
AD1816A_DOUBLE_TLV("Line Playback Volume", AD1816A_LINE_GAIN_ATT, 8, 0, 31, 1,
db_scale_5bit_12db_max),
AD1816A_DOUBLE("CD Playback Switch", AD1816A_CD_GAIN_ATT, 15, 7, 1, 1),
AD1816A_DOUBLE_TLV("CD Playback Volume", AD1816A_CD_GAIN_ATT, 8, 0, 31, 1,
db_scale_5bit_12db_max),
AD1816A_DOUBLE("Synth Playback Switch", AD1816A_SYNTH_GAIN_ATT, 15, 7, 1, 1),
AD1816A_DOUBLE_TLV("Synth Playback Volume", AD1816A_SYNTH_GAIN_ATT, 8, 0, 31, 1,
db_scale_5bit_12db_max),
AD1816A_DOUBLE("FM Playback Switch", AD1816A_FM_ATT, 15, 7, 1, 1),
AD1816A_DOUBLE_TLV("FM Playback Volume", AD1816A_FM_ATT, 8, 0, 63, 1,
db_scale_6bit),
AD1816A_SINGLE("Mic Playback Switch", AD1816A_MIC_GAIN_ATT, 15, 1, 1),
AD1816A_SINGLE_TLV("Mic Playback Volume", AD1816A_MIC_GAIN_ATT, 8, 31, 1,
db_scale_5bit_12db_max),
AD1816A_SINGLE("Mic Boost", AD1816A_MIC_GAIN_ATT, 14, 1, 0),
AD1816A_DOUBLE("Video Playback Switch", AD1816A_VID_GAIN_ATT, 15, 7, 1, 1),
AD1816A_DOUBLE_TLV("Video Playback Volume", AD1816A_VID_GAIN_ATT, 8, 0, 31, 1,
db_scale_5bit_12db_max),
AD1816A_SINGLE("Phone Capture Switch", AD1816A_PHONE_IN_GAIN_ATT, 15, 1, 1),
AD1816A_SINGLE_TLV("Phone Capture Volume", AD1816A_PHONE_IN_GAIN_ATT, 0, 15, 1,
db_scale_4bit),
AD1816A_SINGLE("Phone Playback Switch", AD1816A_PHONE_OUT_ATT, 7, 1, 1),
AD1816A_SINGLE_TLV("Phone Playback Volume", AD1816A_PHONE_OUT_ATT, 0, 31, 1,
db_scale_5bit),
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Capture Source",
.info = snd_ad1816a_info_mux,
.get = snd_ad1816a_get_mux,
.put = snd_ad1816a_put_mux,
},
AD1816A_DOUBLE("Capture Switch", AD1816A_ADC_PGA, 15, 7, 1, 1),
AD1816A_DOUBLE_TLV("Capture Volume", AD1816A_ADC_PGA, 8, 0, 15, 0,
db_scale_rec_gain),
AD1816A_SINGLE("3D Control - Switch", AD1816A_3D_PHAT_CTRL, 15, 1, 1),
AD1816A_SINGLE("3D Control - Level", AD1816A_3D_PHAT_CTRL, 0, 15, 0),
};
int __devinit snd_ad1816a_mixer(struct snd_ad1816a *chip)
{
struct snd_card *card;
unsigned int idx;
int err;
if (snd_BUG_ON(!chip || !chip->card))
return -EINVAL;
card = chip->card;
strcpy(card->mixername, snd_ad1816a_chip_id(chip));
for (idx = 0; idx < ARRAY_SIZE(snd_ad1816a_controls); idx++) {
if ((err = snd_ctl_add(card, snd_ctl_new1(&snd_ad1816a_controls[idx], chip))) < 0)
return err;
}
return 0;
}
| gpl-2.0 |
zzicewind/linux | drivers/net/wireless/b43/leds.c | 9555 | 9271 | /*
Broadcom B43 wireless driver
LED control
Copyright (c) 2005 Martin Langer <martin-langer@gmx.de>,
Copyright (c) 2005 Stefano Brivio <stefano.brivio@polimi.it>
Copyright (c) 2005-2007 Michael Buesch <m@bues.ch>
Copyright (c) 2005 Danny van Dyk <kugelfang@gentoo.org>
Copyright (c) 2005 Andreas Jaggi <andreas.jaggi@waterwave.ch>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; see the file COPYING. If not, write to
the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
Boston, MA 02110-1301, USA.
*/
#include "b43.h"
#include "leds.h"
#include "rfkill.h"
static void b43_led_turn_on(struct b43_wldev *dev, u8 led_index,
bool activelow)
{
u16 ctl;
ctl = b43_read16(dev, B43_MMIO_GPIO_CONTROL);
if (activelow)
ctl &= ~(1 << led_index);
else
ctl |= (1 << led_index);
b43_write16(dev, B43_MMIO_GPIO_CONTROL, ctl);
}
static void b43_led_turn_off(struct b43_wldev *dev, u8 led_index,
bool activelow)
{
u16 ctl;
ctl = b43_read16(dev, B43_MMIO_GPIO_CONTROL);
if (activelow)
ctl |= (1 << led_index);
else
ctl &= ~(1 << led_index);
b43_write16(dev, B43_MMIO_GPIO_CONTROL, ctl);
}
static void b43_led_update(struct b43_wldev *dev,
struct b43_led *led)
{
bool radio_enabled;
bool turn_on;
if (!led->wl)
return;
radio_enabled = (dev->phy.radio_on && dev->radio_hw_enable);
/* The led->state read is racy, but we don't care. In case we raced
* with the brightness_set handler, we will be called again soon
* to fixup our state. */
if (radio_enabled)
turn_on = atomic_read(&led->state) != LED_OFF;
else
turn_on = false;
if (turn_on == led->hw_state)
return;
led->hw_state = turn_on;
if (turn_on)
b43_led_turn_on(dev, led->index, led->activelow);
else
b43_led_turn_off(dev, led->index, led->activelow);
}
static void b43_leds_work(struct work_struct *work)
{
struct b43_leds *leds = container_of(work, struct b43_leds, work);
struct b43_wl *wl = container_of(leds, struct b43_wl, leds);
struct b43_wldev *dev;
mutex_lock(&wl->mutex);
dev = wl->current_dev;
if (unlikely(!dev || b43_status(dev) < B43_STAT_STARTED))
goto out_unlock;
b43_led_update(dev, &wl->leds.led_tx);
b43_led_update(dev, &wl->leds.led_rx);
b43_led_update(dev, &wl->leds.led_radio);
b43_led_update(dev, &wl->leds.led_assoc);
out_unlock:
mutex_unlock(&wl->mutex);
}
/* Callback from the LED subsystem. */
static void b43_led_brightness_set(struct led_classdev *led_dev,
enum led_brightness brightness)
{
struct b43_led *led = container_of(led_dev, struct b43_led, led_dev);
struct b43_wl *wl = led->wl;
if (likely(!wl->leds.stop)) {
atomic_set(&led->state, brightness);
ieee80211_queue_work(wl->hw, &wl->leds.work);
}
}
static int b43_register_led(struct b43_wldev *dev, struct b43_led *led,
const char *name, const char *default_trigger,
u8 led_index, bool activelow)
{
int err;
if (led->wl)
return -EEXIST;
if (!default_trigger)
return -EINVAL;
led->wl = dev->wl;
led->index = led_index;
led->activelow = activelow;
strncpy(led->name, name, sizeof(led->name));
atomic_set(&led->state, 0);
led->led_dev.name = led->name;
led->led_dev.default_trigger = default_trigger;
led->led_dev.brightness_set = b43_led_brightness_set;
err = led_classdev_register(dev->dev->dev, &led->led_dev);
if (err) {
b43warn(dev->wl, "LEDs: Failed to register %s\n", name);
led->wl = NULL;
return err;
}
return 0;
}
static void b43_unregister_led(struct b43_led *led)
{
if (!led->wl)
return;
led_classdev_unregister(&led->led_dev);
led->wl = NULL;
}
static void b43_map_led(struct b43_wldev *dev,
u8 led_index,
enum b43_led_behaviour behaviour,
bool activelow)
{
struct ieee80211_hw *hw = dev->wl->hw;
char name[B43_LED_MAX_NAME_LEN + 1];
/* Map the b43 specific LED behaviour value to the
* generic LED triggers. */
switch (behaviour) {
case B43_LED_INACTIVE:
case B43_LED_OFF:
case B43_LED_ON:
break;
case B43_LED_ACTIVITY:
case B43_LED_TRANSFER:
case B43_LED_APTRANSFER:
snprintf(name, sizeof(name),
"b43-%s::tx", wiphy_name(hw->wiphy));
b43_register_led(dev, &dev->wl->leds.led_tx, name,
ieee80211_get_tx_led_name(hw),
led_index, activelow);
snprintf(name, sizeof(name),
"b43-%s::rx", wiphy_name(hw->wiphy));
b43_register_led(dev, &dev->wl->leds.led_rx, name,
ieee80211_get_rx_led_name(hw),
led_index, activelow);
break;
case B43_LED_RADIO_ALL:
case B43_LED_RADIO_A:
case B43_LED_RADIO_B:
case B43_LED_MODE_BG:
snprintf(name, sizeof(name),
"b43-%s::radio", wiphy_name(hw->wiphy));
b43_register_led(dev, &dev->wl->leds.led_radio, name,
ieee80211_get_radio_led_name(hw),
led_index, activelow);
break;
case B43_LED_WEIRD:
case B43_LED_ASSOC:
snprintf(name, sizeof(name),
"b43-%s::assoc", wiphy_name(hw->wiphy));
b43_register_led(dev, &dev->wl->leds.led_assoc, name,
ieee80211_get_assoc_led_name(hw),
led_index, activelow);
break;
default:
b43warn(dev->wl, "LEDs: Unknown behaviour 0x%02X\n",
behaviour);
break;
}
}
static void b43_led_get_sprominfo(struct b43_wldev *dev,
unsigned int led_index,
enum b43_led_behaviour *behaviour,
bool *activelow)
{
u8 sprom[4];
sprom[0] = dev->dev->bus_sprom->gpio0;
sprom[1] = dev->dev->bus_sprom->gpio1;
sprom[2] = dev->dev->bus_sprom->gpio2;
sprom[3] = dev->dev->bus_sprom->gpio3;
if (sprom[led_index] == 0xFF) {
/* There is no LED information in the SPROM
* for this LED. Hardcode it here. */
*activelow = false;
switch (led_index) {
case 0:
*behaviour = B43_LED_ACTIVITY;
*activelow = true;
if (dev->dev->board_vendor == PCI_VENDOR_ID_COMPAQ)
*behaviour = B43_LED_RADIO_ALL;
break;
case 1:
*behaviour = B43_LED_RADIO_B;
if (dev->dev->board_vendor == PCI_VENDOR_ID_ASUSTEK)
*behaviour = B43_LED_ASSOC;
break;
case 2:
*behaviour = B43_LED_RADIO_A;
break;
case 3:
*behaviour = B43_LED_OFF;
break;
default:
*behaviour = B43_LED_OFF;
B43_WARN_ON(1);
return;
}
} else {
*behaviour = sprom[led_index] & B43_LED_BEHAVIOUR;
*activelow = !!(sprom[led_index] & B43_LED_ACTIVELOW);
}
}
void b43_leds_init(struct b43_wldev *dev)
{
struct b43_led *led;
unsigned int i;
enum b43_led_behaviour behaviour;
bool activelow;
/* Sync the RF-kill LED state (if we have one) with radio and switch states. */
led = &dev->wl->leds.led_radio;
if (led->wl) {
if (dev->phy.radio_on && b43_is_hw_radio_enabled(dev)) {
b43_led_turn_on(dev, led->index, led->activelow);
led->hw_state = true;
atomic_set(&led->state, 1);
} else {
b43_led_turn_off(dev, led->index, led->activelow);
led->hw_state = false;
atomic_set(&led->state, 0);
}
}
/* Initialize TX/RX/ASSOC leds */
led = &dev->wl->leds.led_tx;
if (led->wl) {
b43_led_turn_off(dev, led->index, led->activelow);
led->hw_state = false;
atomic_set(&led->state, 0);
}
led = &dev->wl->leds.led_rx;
if (led->wl) {
b43_led_turn_off(dev, led->index, led->activelow);
led->hw_state = false;
atomic_set(&led->state, 0);
}
led = &dev->wl->leds.led_assoc;
if (led->wl) {
b43_led_turn_off(dev, led->index, led->activelow);
led->hw_state = false;
atomic_set(&led->state, 0);
}
/* Initialize other LED states. */
for (i = 0; i < B43_MAX_NR_LEDS; i++) {
b43_led_get_sprominfo(dev, i, &behaviour, &activelow);
switch (behaviour) {
case B43_LED_OFF:
b43_led_turn_off(dev, i, activelow);
break;
case B43_LED_ON:
b43_led_turn_on(dev, i, activelow);
break;
default:
/* Leave others as-is. */
break;
}
}
dev->wl->leds.stop = 0;
}
void b43_leds_exit(struct b43_wldev *dev)
{
struct b43_leds *leds = &dev->wl->leds;
b43_led_turn_off(dev, leds->led_tx.index, leds->led_tx.activelow);
b43_led_turn_off(dev, leds->led_rx.index, leds->led_rx.activelow);
b43_led_turn_off(dev, leds->led_assoc.index, leds->led_assoc.activelow);
b43_led_turn_off(dev, leds->led_radio.index, leds->led_radio.activelow);
}
void b43_leds_stop(struct b43_wldev *dev)
{
struct b43_leds *leds = &dev->wl->leds;
leds->stop = 1;
cancel_work_sync(&leds->work);
}
void b43_leds_register(struct b43_wldev *dev)
{
unsigned int i;
enum b43_led_behaviour behaviour;
bool activelow;
INIT_WORK(&dev->wl->leds.work, b43_leds_work);
/* Register the LEDs to the LED subsystem. */
for (i = 0; i < B43_MAX_NR_LEDS; i++) {
b43_led_get_sprominfo(dev, i, &behaviour, &activelow);
b43_map_led(dev, i, behaviour, activelow);
}
}
void b43_leds_unregister(struct b43_wl *wl)
{
struct b43_leds *leds = &wl->leds;
b43_unregister_led(&leds->led_tx);
b43_unregister_led(&leds->led_rx);
b43_unregister_led(&leds->led_assoc);
b43_unregister_led(&leds->led_radio);
}
| gpl-2.0 |
jwpi/glibc | resource/tst-getrlimit.c | 84 | 2467 | #include <errno.h>
#include <stdbool.h>
#include <stdio.h>
#include <sys/resource.h>
static struct
{
const char *name;
int resource;
bool required;
} tests[] =
{
/* The following 7 limits are part of POSIX and must exist. */
{ "RLIMIT_CORE", RLIMIT_CORE, true },
{ "RLIMIT_CPU", RLIMIT_CPU, true },
{ "RLIMIT_DATA", RLIMIT_DATA, true },
{ "RLIMIT_FSIZE", RLIMIT_FSIZE, true },
{ "RLIMIT_NOFILE", RLIMIT_NOFILE, true },
{ "RLIMIT_STACK", RLIMIT_STACK, true },
{ "RLIMIT_AS", RLIMIT_AS, true },
/* The following are traditional Unix limits which are also
expected (by us). */
{ "RLIMIT_RSS", RLIMIT_RSS, true },
{ "RLIMIT_NPROC", RLIMIT_NPROC, true },
/* The following are extensions. */
#ifdef RLIMIT_MEMLOCK
{ "RLIMIT_MEMLOCK", RLIMIT_MEMLOCK, false },
#endif
#ifdef RLIMIT_LOCKS
{ "RLIMIT_LOCKS", RLIMIT_LOCKS, false },
#endif
#ifdef RLIMIT_SIGPENDING
{ "RLIMIT_SIGPENDING", RLIMIT_SIGPENDING, false },
#endif
#ifdef RLIMIT_MSGQUEUE
{ "RLIMIT_MSGQUEUE", RLIMIT_MSGQUEUE, false },
#endif
#ifdef RLIMIT_NICE
{ "RLIMIT_NICE", RLIMIT_NICE, false },
#endif
#ifdef RLIMIT_RTPRIO
{ "RLIMIT_RTPRIO", RLIMIT_RTPRIO, false },
#endif
};
#define ntests (sizeof (tests) / sizeof (tests[0]))
static int
do_test (void)
{
int status = 0;
for (int i = 0; i < ntests; ++i)
{
bool this_ok = true;
struct rlimit r;
int res = getrlimit (tests[i].resource, &r);
if (res == -1)
{
if (errno == EINVAL)
{
if (tests[i].required)
{
printf ("limit %s expectedly not available for getrlimit\n",
tests[i].name);
status = 1;
this_ok = false;
}
}
else
{
printf ("getrlimit for %s returned unexpected error: %m\n",
tests[i].name);
status = 1;
this_ok = false;
}
}
struct rlimit64 r64;
res = getrlimit64 (tests[i].resource, &r64);
if (res == -1)
{
if (errno == EINVAL)
{
if (tests[i].required)
{
printf ("limit %s expectedly not available for getrlimit64"
"\n", tests[i].name);
status = 1;
this_ok = false;
}
}
else
{
printf ("getrlimit64 for %s returned unexpected error: %m\n",
tests[i].name);
status = 1;
this_ok = false;
}
}
if (this_ok)
printf ("limit %s OK\n", tests[i].name);
}
return status;
}
#define TEST_FUNCTION do_test ()
#include "../test-skeleton.c"
| gpl-2.0 |
bedalus/moggy | drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c | 84 | 57503 | /* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#define pr_fmt(fmt) "MSM-CPP %s:%d " fmt, __func__, __LINE__
#include <linux/delay.h>
#include <linux/clk.h>
#include <linux/firmware.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/module.h>
#include <linux/regulator/consumer.h>
#include <linux/ion.h>
#include <linux/proc_fs.h>
#include <linux/msm_ion.h>
#include <linux/iommu.h>
#include <linux/timer.h>
#include <linux/kernel.h>
#include <linux/workqueue.h>
#include <mach/clk.h>
#include <mach/iommu_domains.h>
#include <mach/iommu.h>
#include <mach/vreg.h>
#include <media/msm_isp.h>
#include <media/v4l2-event.h>
#include <media/v4l2-ioctl.h>
#include <media/msmb_camera.h>
#include <media/msmb_generic_buf_mgr.h>
#include <media/msmb_pproc.h>
#include "msm_cpp.h"
#include "msm_isp_util.h"
#include "msm_camera_io_util.h"
#include <linux/debugfs.h>
#define MSM_CPP_DRV_NAME "msm_cpp"
#define MSM_CPP_MAX_BUFF_QUEUE 16
#define CONFIG_MSM_CPP_DBG 0
#define CPP_CMD_TIMEOUT_MS 300
#define MSM_MICRO_IFACE_CLK_IDX 7
struct msm_cpp_timer_data_t {
struct cpp_device *cpp_dev;
struct msm_cpp_frame_info_t *processed_frame;
};
struct msm_cpp_timer_t {
atomic_t used;
struct msm_cpp_timer_data_t data;
struct timer_list cpp_timer;
};
struct msm_cpp_timer_t cpp_timer;
/* dump the frame command before writing to the hardware */
#define MSM_CPP_DUMP_FRM_CMD 0
static int msm_cpp_buffer_ops(struct cpp_device *cpp_dev,
uint32_t buff_mgr_ops, struct msm_buf_mngr_info *buff_mgr_info);
#if CONFIG_MSM_CPP_DBG
#define CPP_DBG(fmt, args...) pr_err(fmt, ##args)
#else
#define CPP_DBG(fmt, args...) pr_debug(fmt, ##args)
#endif
#define ERR_USER_COPY(to) pr_err("copy %s user\n", \
((to) ? "to" : "from"))
#define ERR_COPY_FROM_USER() ERR_USER_COPY(0)
#define msm_dequeue(queue, member) ({ \
unsigned long flags; \
struct msm_device_queue *__q = (queue); \
struct msm_queue_cmd *qcmd = 0; \
spin_lock_irqsave(&__q->lock, flags); \
if (!list_empty(&__q->list)) { \
__q->len--; \
qcmd = list_first_entry(&__q->list, \
struct msm_queue_cmd, member); \
list_del_init(&qcmd->member); \
} \
spin_unlock_irqrestore(&__q->lock, flags); \
qcmd; \
})
static void msm_queue_init(struct msm_device_queue *queue, const char *name)
{
CPP_DBG("E\n");
spin_lock_init(&queue->lock);
queue->len = 0;
queue->max = 0;
queue->name = name;
INIT_LIST_HEAD(&queue->list);
init_waitqueue_head(&queue->wait);
}
static void msm_enqueue(struct msm_device_queue *queue,
struct list_head *entry)
{
unsigned long flags;
spin_lock_irqsave(&queue->lock, flags);
queue->len++;
if (queue->len > queue->max) {
queue->max = queue->len;
pr_info("queue %s new max is %d\n", queue->name, queue->max);
}
list_add_tail(entry, &queue->list);
wake_up(&queue->wait);
CPP_DBG("woke up %s\n", queue->name);
spin_unlock_irqrestore(&queue->lock, flags);
}
static struct msm_cam_clk_info cpp_clk_info[] = {
{"camss_top_ahb_clk", -1},
{"vfe_clk_src", 266670000},
{"camss_vfe_vfe_clk", -1},
{"iface_clk", -1},
{"cpp_core_clk", 266670000},
{"cpp_iface_clk", -1},
{"cpp_bus_clk", -1},
{"micro_iface_clk", -1},
};
#define msm_cpp_empty_list(queue, member) { \
unsigned long flags; \
struct msm_queue_cmd *qcmd = NULL; \
if (queue) { \
spin_lock_irqsave(&queue->lock, flags); \
while (!list_empty(&queue->list)) { \
queue->len--; \
qcmd = list_first_entry(&queue->list, \
struct msm_queue_cmd, member); \
list_del_init(&qcmd->member); \
kfree(qcmd); \
} \
spin_unlock_irqrestore(&queue->lock, flags); \
} \
}
static int msm_cpp_notify_frame_done(struct cpp_device *cpp_dev);
static void cpp_load_fw(struct cpp_device *cpp_dev, char *fw_name_bin);
static void cpp_timer_callback(unsigned long data);
uint8_t induce_error;
static int msm_cpp_enable_debugfs(struct cpp_device *cpp_dev);
static void msm_cpp_write(u32 data, void __iomem *cpp_base)
{
writel_relaxed((data), cpp_base + MSM_CPP_MICRO_FIFO_RX_DATA);
}
static uint32_t msm_cpp_read(void __iomem *cpp_base)
{
uint32_t tmp, retry = 0;
do {
tmp = msm_camera_io_r(cpp_base + MSM_CPP_MICRO_FIFO_TX_STAT);
} while (((tmp & 0x2) == 0x0) && (retry++ < 10)) ;
if (retry < 10) {
tmp = msm_camera_io_r(cpp_base + MSM_CPP_MICRO_FIFO_TX_DATA);
CPP_DBG("Read data: 0%x\n", tmp);
} else {
CPP_DBG("Read failed\n");
tmp = 0xDEADBEEF;
}
return tmp;
}
static struct msm_cpp_buff_queue_info_t *msm_cpp_get_buff_queue_entry(
struct cpp_device *cpp_dev, uint32_t session_id, uint32_t stream_id)
{
uint32_t i = 0;
struct msm_cpp_buff_queue_info_t *buff_queue_info = NULL;
for (i = 0; i < cpp_dev->num_buffq; i++) {
if ((cpp_dev->buff_queue[i].used == 1) &&
(cpp_dev->buff_queue[i].session_id == session_id) &&
(cpp_dev->buff_queue[i].stream_id == stream_id)) {
buff_queue_info = &cpp_dev->buff_queue[i];
break;
}
}
if (buff_queue_info == NULL) {
pr_err("error buffer queue entry for sess:%d strm:%d not found\n",
session_id, stream_id);
}
return buff_queue_info;
}
static unsigned long msm_cpp_get_phy_addr(struct cpp_device *cpp_dev,
struct msm_cpp_buff_queue_info_t *buff_queue_info, uint32_t buff_index,
uint8_t native_buff, int *fd)
{
unsigned long phy_add = 0;
struct list_head *buff_head;
struct msm_cpp_buffer_map_list_t *buff, *save;
if (native_buff)
buff_head = &buff_queue_info->native_buff_head;
else
buff_head = &buff_queue_info->vb2_buff_head;
list_for_each_entry_safe(buff, save, buff_head, entry) {
if (buff->map_info.buff_info.index == buff_index) {
phy_add = buff->map_info.phy_addr;
*fd = buff->map_info.buff_info.fd;
break;
}
}
return phy_add;
}
static unsigned long msm_cpp_queue_buffer_info(struct cpp_device *cpp_dev,
struct msm_cpp_buff_queue_info_t *buff_queue,
struct msm_cpp_buffer_info_t *buffer_info)
{
struct list_head *buff_head;
struct msm_cpp_buffer_map_list_t *buff, *save;
int rc = 0;
if (buffer_info->native_buff)
buff_head = &buff_queue->native_buff_head;
else
buff_head = &buff_queue->vb2_buff_head;
list_for_each_entry_safe(buff, save, buff_head, entry) {
if (buff->map_info.buff_info.index == buffer_info->index) {
pr_err("error buffer index already queued\n");
return -EINVAL;
}
}
buff = kzalloc(
sizeof(struct msm_cpp_buffer_map_list_t), GFP_KERNEL);
if (!buff) {
pr_err("error allocating memory\n");
return -EINVAL;
}
buff->map_info.buff_info = *buffer_info;
buff->map_info.ion_handle = ion_import_dma_buf(cpp_dev->client,
buffer_info->fd);
if (IS_ERR_OR_NULL(buff->map_info.ion_handle)) {
pr_err("ION import failed\n");
goto QUEUE_BUFF_ERROR1;
}
rc = ion_map_iommu(cpp_dev->client, buff->map_info.ion_handle,
cpp_dev->domain_num, 0, SZ_4K, 0,
(unsigned long *)&buff->map_info.phy_addr,
&buff->map_info.len, 0, 0);
if (rc < 0) {
pr_err("ION mmap failed\n");
goto QUEUE_BUFF_ERROR2;
}
INIT_LIST_HEAD(&buff->entry);
list_add_tail(&buff->entry, buff_head);
return buff->map_info.phy_addr;
QUEUE_BUFF_ERROR2:
ion_free(cpp_dev->client, buff->map_info.ion_handle);
QUEUE_BUFF_ERROR1:
buff->map_info.ion_handle = NULL;
kzfree(buff);
return 0;
}
static void msm_cpp_dequeue_buffer_info(struct cpp_device *cpp_dev,
struct msm_cpp_buffer_map_list_t *buff)
{
ion_unmap_iommu(cpp_dev->client, buff->map_info.ion_handle,
cpp_dev->domain_num, 0);
ion_free(cpp_dev->client, buff->map_info.ion_handle);
buff->map_info.ion_handle = NULL;
list_del_init(&buff->entry);
kzfree(buff);
return;
}
static unsigned long msm_cpp_fetch_buffer_info(struct cpp_device *cpp_dev,
struct msm_cpp_buffer_info_t *buffer_info, uint32_t session_id,
uint32_t stream_id, int *fd)
{
unsigned long phy_addr = 0;
struct msm_cpp_buff_queue_info_t *buff_queue_info;
uint8_t native_buff = buffer_info->native_buff;
buff_queue_info = msm_cpp_get_buff_queue_entry(cpp_dev, session_id,
stream_id);
if (buff_queue_info == NULL) {
pr_err("error finding buffer queue entry for sessid:%d strmid:%d\n",
session_id, stream_id);
return phy_addr;
}
phy_addr = msm_cpp_get_phy_addr(cpp_dev, buff_queue_info,
buffer_info->index, native_buff, fd);
if ((phy_addr == 0) && (native_buff)) {
phy_addr = msm_cpp_queue_buffer_info(cpp_dev, buff_queue_info,
buffer_info);
*fd = buffer_info->fd;
}
return phy_addr;
}
static int32_t msm_cpp_enqueue_buff_info_list(struct cpp_device *cpp_dev,
struct msm_cpp_stream_buff_info_t *stream_buff_info)
{
uint32_t j;
struct msm_cpp_buff_queue_info_t *buff_queue_info;
buff_queue_info = msm_cpp_get_buff_queue_entry(cpp_dev,
(stream_buff_info->identity >> 16) & 0xFFFF,
stream_buff_info->identity & 0xFFFF);
if (buff_queue_info == NULL) {
pr_err("error finding buffer queue entry for sessid:%d strmid:%d\n",
(stream_buff_info->identity >> 16) & 0xFFFF,
stream_buff_info->identity & 0xFFFF);
return -EINVAL;
}
for (j = 0; j < stream_buff_info->num_buffs; j++) {
msm_cpp_queue_buffer_info(cpp_dev, buff_queue_info,
&stream_buff_info->buffer_info[j]);
}
return 0;
}
static int32_t msm_cpp_dequeue_buff_info_list(struct cpp_device *cpp_dev,
struct msm_cpp_buff_queue_info_t *buff_queue_info)
{
struct msm_cpp_buffer_map_list_t *buff, *save;
struct list_head *buff_head;
buff_head = &buff_queue_info->native_buff_head;
list_for_each_entry_safe(buff, save, buff_head, entry) {
msm_cpp_dequeue_buffer_info(cpp_dev, buff);
}
buff_head = &buff_queue_info->vb2_buff_head;
list_for_each_entry_safe(buff, save, buff_head, entry) {
msm_cpp_dequeue_buffer_info(cpp_dev, buff);
}
return 0;
}
static int32_t msm_cpp_add_buff_queue_entry(struct cpp_device *cpp_dev,
uint16_t session_id, uint16_t stream_id)
{
uint32_t i;
struct msm_cpp_buff_queue_info_t *buff_queue_info;
for (i = 0; i < cpp_dev->num_buffq; i++) {
if (cpp_dev->buff_queue[i].used == 0) {
buff_queue_info = &cpp_dev->buff_queue[i];
buff_queue_info->used = 1;
buff_queue_info->session_id = session_id;
buff_queue_info->stream_id = stream_id;
INIT_LIST_HEAD(&buff_queue_info->vb2_buff_head);
INIT_LIST_HEAD(&buff_queue_info->native_buff_head);
return 0;
}
}
pr_err("buffer queue full. error for sessionid: %d streamid: %d\n",
session_id, stream_id);
return -EINVAL;
}
static int32_t msm_cpp_free_buff_queue_entry(struct cpp_device *cpp_dev,
uint32_t session_id, uint32_t stream_id)
{
struct msm_cpp_buff_queue_info_t *buff_queue_info;
buff_queue_info = msm_cpp_get_buff_queue_entry(cpp_dev, session_id,
stream_id);
if (buff_queue_info == NULL) {
pr_err("error finding buffer queue entry for sessid:%d strmid:%d\n",
session_id, stream_id);
return -EINVAL;
}
buff_queue_info->used = 0;
buff_queue_info->session_id = 0;
buff_queue_info->stream_id = 0;
INIT_LIST_HEAD(&buff_queue_info->vb2_buff_head);
INIT_LIST_HEAD(&buff_queue_info->native_buff_head);
return 0;
}
static int32_t msm_cpp_create_buff_queue(struct cpp_device *cpp_dev,
uint32_t num_buffq)
{
struct msm_cpp_buff_queue_info_t *buff_queue;
buff_queue = kzalloc(
sizeof(struct msm_cpp_buff_queue_info_t) * num_buffq,
GFP_KERNEL);
if (!buff_queue) {
pr_err("Buff queue allocation failure\n");
return -ENOMEM;
}
if (cpp_dev->buff_queue) {
pr_err("Buff queue not empty\n");
kzfree(buff_queue);
return -EINVAL;
} else {
cpp_dev->buff_queue = buff_queue;
cpp_dev->num_buffq = num_buffq;
}
return 0;
}
static void msm_cpp_delete_buff_queue(struct cpp_device *cpp_dev)
{
uint32_t i;
for (i = 0; i < cpp_dev->num_buffq; i++) {
if (cpp_dev->buff_queue[i].used == 1) {
pr_err("Queue not free sessionid: %d, streamid: %d\n",
cpp_dev->buff_queue[i].session_id,
cpp_dev->buff_queue[i].stream_id);
msm_cpp_dequeue_buff_info_list
(cpp_dev, &cpp_dev->buff_queue[i]);
msm_cpp_free_buff_queue_entry(cpp_dev,
cpp_dev->buff_queue[i].session_id,
cpp_dev->buff_queue[i].stream_id);
}
}
kzfree(cpp_dev->buff_queue);
cpp_dev->buff_queue = NULL;
cpp_dev->num_buffq = 0;
return;
}
static void msm_cpp_poll(void __iomem *cpp_base, u32 val)
{
uint32_t tmp, retry = 0;
do {
usleep_range(1000, 2000);
tmp = msm_cpp_read(cpp_base);
if (tmp != 0xDEADBEEF)
CPP_DBG("poll: 0%x\n", tmp);
} while ((tmp != val) && (retry++ < MSM_CPP_POLL_RETRIES));
if (retry < MSM_CPP_POLL_RETRIES)
CPP_DBG("Poll finished\n");
else
pr_err("Poll failed: expect: 0x%x\n", val);
}
void cpp_release_ion_client(struct kref *ref)
{
struct cpp_device *cpp_dev = container_of(ref,
struct cpp_device, refcount);
pr_err("Calling ion_client_destroy\n");
ion_client_destroy(cpp_dev->client);
}
static int cpp_init_mem(struct cpp_device *cpp_dev)
{
int rc = 0;
kref_init(&cpp_dev->refcount);
kref_get(&cpp_dev->refcount);
cpp_dev->client = msm_ion_client_create(-1, "cpp");
CPP_DBG("E\n");
if (!cpp_dev->domain) {
pr_err("domain / iommu context not found\n");
return -ENODEV;
}
CPP_DBG("X\n");
return rc;
}
static void cpp_deinit_mem(struct cpp_device *cpp_dev)
{
CPP_DBG("E\n");
kref_put(&cpp_dev->refcount, cpp_release_ion_client);
CPP_DBG("X\n");
}
static irqreturn_t msm_cpp_irq(int irq_num, void *data)
{
unsigned long flags;
uint32_t tx_level;
uint32_t irq_status;
uint32_t i;
uint32_t tx_fifo[MSM_CPP_TX_FIFO_LEVEL];
struct cpp_device *cpp_dev = data;
struct msm_cpp_tasklet_queue_cmd *queue_cmd;
irq_status = msm_camera_io_r(cpp_dev->base + MSM_CPP_MICRO_IRQGEN_STAT);
CPP_DBG("status: 0x%x\n", irq_status);
if (irq_status & 0x8) {
tx_level = msm_camera_io_r(cpp_dev->base +
MSM_CPP_MICRO_FIFO_TX_STAT) >> 2;
for (i = 0; i < tx_level; i++) {
tx_fifo[i] = msm_camera_io_r(cpp_dev->base +
MSM_CPP_MICRO_FIFO_TX_DATA);
}
spin_lock_irqsave(&cpp_dev->tasklet_lock, flags);
queue_cmd = &cpp_dev->tasklet_queue_cmd[cpp_dev->taskletq_idx];
if (queue_cmd->cmd_used) {
pr_err("%s: cpp tasklet queue overflow\n", __func__);
list_del(&queue_cmd->list);
} else {
atomic_add(1, &cpp_dev->irq_cnt);
}
queue_cmd->irq_status = irq_status;
queue_cmd->tx_level = tx_level;
memset(&queue_cmd->tx_fifo[0], 0, sizeof(queue_cmd->tx_fifo));
for (i = 0; i < tx_level; i++)
queue_cmd->tx_fifo[i] = tx_fifo[i];
queue_cmd->cmd_used = 1;
cpp_dev->taskletq_idx =
(cpp_dev->taskletq_idx + 1) % MSM_CPP_TASKLETQ_SIZE;
list_add_tail(&queue_cmd->list, &cpp_dev->tasklet_q);
spin_unlock_irqrestore(&cpp_dev->tasklet_lock, flags);
tasklet_schedule(&cpp_dev->cpp_tasklet);
} else if (irq_status & 0x7C0) {
pr_err("%s: fatal error: 0x%x\n", __func__, irq_status);
pr_err("%s: DEBUG_SP: 0x%x\n", __func__,
msm_camera_io_r(cpp_dev->cpp_hw_base + 0x40));
pr_err("%s: DEBUG_T: 0x%x\n", __func__,
msm_camera_io_r(cpp_dev->cpp_hw_base + 0x44));
pr_err("%s: DEBUG_N: 0x%x\n", __func__,
msm_camera_io_r(cpp_dev->cpp_hw_base + 0x48));
pr_err("%s: DEBUG_R: 0x%x\n", __func__,
msm_camera_io_r(cpp_dev->cpp_hw_base + 0x4C));
pr_err("%s: DEBUG_OPPC: 0x%x\n", __func__,
msm_camera_io_r(cpp_dev->cpp_hw_base + 0x50));
pr_err("%s: DEBUG_MO: 0x%x\n", __func__,
msm_camera_io_r(cpp_dev->cpp_hw_base + 0x54));
pr_err("%s: DEBUG_TIMER0: 0x%x\n", __func__,
msm_camera_io_r(cpp_dev->cpp_hw_base + 0x60));
pr_err("%s: DEBUG_TIMER1: 0x%x\n", __func__,
msm_camera_io_r(cpp_dev->cpp_hw_base + 0x64));
pr_err("%s: DEBUG_GPI: 0x%x\n", __func__,
msm_camera_io_r(cpp_dev->cpp_hw_base + 0x70));
pr_err("%s: DEBUG_GPO: 0x%x\n", __func__,
msm_camera_io_r(cpp_dev->cpp_hw_base + 0x74));
pr_err("%s: DEBUG_T0: 0x%x\n", __func__,
msm_camera_io_r(cpp_dev->cpp_hw_base + 0x80));
pr_err("%s: DEBUG_R0: 0x%x\n", __func__,
msm_camera_io_r(cpp_dev->cpp_hw_base + 0x84));
pr_err("%s: DEBUG_T1: 0x%x\n", __func__,
msm_camera_io_r(cpp_dev->cpp_hw_base + 0x88));
pr_err("%s: DEBUG_R1: 0x%x\n", __func__,
msm_camera_io_r(cpp_dev->cpp_hw_base + 0x8C));
}
msm_camera_io_w(irq_status, cpp_dev->base + MSM_CPP_MICRO_IRQGEN_CLR);
return IRQ_HANDLED;
}
void msm_cpp_do_tasklet(unsigned long data)
{
unsigned long flags;
uint32_t irq_status;
uint32_t tx_level;
uint32_t msg_id, cmd_len;
uint32_t i;
uint32_t tx_fifo[MSM_CPP_TX_FIFO_LEVEL];
struct cpp_device *cpp_dev = (struct cpp_device *) data;
struct msm_cpp_tasklet_queue_cmd *queue_cmd;
struct msm_cpp_timer_t *timer = NULL;
while (atomic_read(&cpp_dev->irq_cnt)) {
spin_lock_irqsave(&cpp_dev->tasklet_lock, flags);
queue_cmd = list_first_entry(&cpp_dev->tasklet_q,
struct msm_cpp_tasklet_queue_cmd, list);
if (!queue_cmd) {
atomic_set(&cpp_dev->irq_cnt, 0);
spin_unlock_irqrestore(&cpp_dev->tasklet_lock, flags);
return;
}
atomic_sub(1, &cpp_dev->irq_cnt);
list_del(&queue_cmd->list);
queue_cmd->cmd_used = 0;
irq_status = queue_cmd->irq_status;
tx_level = queue_cmd->tx_level;
for (i = 0; i < tx_level; i++)
tx_fifo[i] = queue_cmd->tx_fifo[i];
spin_unlock_irqrestore(&cpp_dev->tasklet_lock, flags);
for (i = 0; i < tx_level; i++) {
if (tx_fifo[i] == MSM_CPP_MSG_ID_CMD) {
cmd_len = tx_fifo[i+1];
msg_id = tx_fifo[i+2];
if (msg_id == MSM_CPP_MSG_ID_FRAME_ACK) {
CPP_DBG("Frame done!!\n");
/* delete CPP timer */
CPP_DBG("delete timer.\n");
timer = &cpp_timer;
atomic_set(&timer->used, 0);
del_timer(&timer->cpp_timer);
timer->data.processed_frame = NULL;
msm_cpp_notify_frame_done(cpp_dev);
} else if (msg_id ==
MSM_CPP_MSG_ID_FRAME_NACK) {
pr_err("NACK error from hw!!\n");
CPP_DBG("delete timer.\n");
timer = &cpp_timer;
atomic_set(&timer->used, 0);
del_timer(&timer->cpp_timer);
timer->data.processed_frame = NULL;
msm_cpp_notify_frame_done(cpp_dev);
}
i += cmd_len + 2;
}
}
}
}
static int cpp_init_hardware(struct cpp_device *cpp_dev)
{
int rc = 0;
rc = msm_isp_init_bandwidth_mgr(ISP_CPP);
if (rc < 0) {
pr_err("%s: Bandwidth registration Failed!\n", __func__);
goto bus_scale_register_failed;
}
msm_isp_update_bandwidth(ISP_CPP, 981345600, 1066680000);
if (cpp_dev->fs_cpp == NULL) {
cpp_dev->fs_cpp =
regulator_get(&cpp_dev->pdev->dev, "vdd");
if (IS_ERR(cpp_dev->fs_cpp)) {
pr_err("Regulator cpp vdd get failed %ld\n",
PTR_ERR(cpp_dev->fs_cpp));
cpp_dev->fs_cpp = NULL;
rc = -EINVAL;
goto fs_failed;
}
rc = regulator_enable(cpp_dev->fs_cpp);
if (rc != 0) {
pr_err("Regulator cpp vdd enable failed\n");
regulator_put(cpp_dev->fs_cpp);
cpp_dev->fs_cpp = NULL;
goto fs_failed;
}
}
cpp_dev->cpp_clk[MSM_MICRO_IFACE_CLK_IDX] =
clk_get(&cpp_dev->pdev->dev,
cpp_clk_info[MSM_MICRO_IFACE_CLK_IDX].clk_name);
if (IS_ERR(cpp_dev->cpp_clk[MSM_MICRO_IFACE_CLK_IDX])) {
pr_err("%s get failed\n",
cpp_clk_info[MSM_MICRO_IFACE_CLK_IDX].clk_name);
rc = PTR_ERR(cpp_dev->cpp_clk[MSM_MICRO_IFACE_CLK_IDX]);
goto remap_failed;
}
rc = clk_reset(cpp_dev->cpp_clk[MSM_MICRO_IFACE_CLK_IDX],
CLK_RESET_ASSERT);
if (rc) {
pr_err("%s:micro_iface_clk assert failed\n", __func__);
clk_put(cpp_dev->cpp_clk[MSM_MICRO_IFACE_CLK_IDX]);
goto remap_failed;
}
usleep_range(10000, 12000);
rc = clk_reset(cpp_dev->cpp_clk[MSM_MICRO_IFACE_CLK_IDX],
CLK_RESET_DEASSERT);
if (rc) {
pr_err("%s:micro_iface_clk assert failed\n", __func__);
clk_put(cpp_dev->cpp_clk[MSM_MICRO_IFACE_CLK_IDX]);
goto remap_failed;
}
usleep_range(1000, 1200);
clk_put(cpp_dev->cpp_clk[MSM_MICRO_IFACE_CLK_IDX]);
rc = msm_cam_clk_enable(&cpp_dev->pdev->dev, cpp_clk_info,
cpp_dev->cpp_clk, ARRAY_SIZE(cpp_clk_info), 1);
if (rc < 0) {
pr_err("clk enable failed\n");
goto clk_failed;
}
cpp_dev->base = ioremap(cpp_dev->mem->start,
resource_size(cpp_dev->mem));
if (!cpp_dev->base) {
rc = -ENOMEM;
pr_err("ioremap failed\n");
goto remap_failed;
}
cpp_dev->vbif_base = ioremap(cpp_dev->vbif_mem->start,
resource_size(cpp_dev->vbif_mem));
if (!cpp_dev->vbif_base) {
rc = -ENOMEM;
pr_err("ioremap failed\n");
goto vbif_remap_failed;
}
cpp_dev->cpp_hw_base = ioremap(cpp_dev->cpp_hw_mem->start,
resource_size(cpp_dev->cpp_hw_mem));
if (!cpp_dev->cpp_hw_base) {
rc = -ENOMEM;
pr_err("ioremap failed\n");
goto cpp_hw_remap_failed;
}
if (cpp_dev->state != CPP_STATE_BOOT) {
rc = request_irq(cpp_dev->irq->start, msm_cpp_irq,
IRQF_TRIGGER_RISING, "cpp", cpp_dev);
if (rc < 0) {
pr_err("irq request fail\n");
goto req_irq_fail;
}
cpp_dev->buf_mgr_subdev = msm_buf_mngr_get_subdev();
rc = msm_cpp_buffer_ops(cpp_dev,
VIDIOC_MSM_BUF_MNGR_INIT, NULL);
if (rc < 0) {
pr_err("buf mngr init failed\n");
free_irq(cpp_dev->irq->start, cpp_dev);
goto req_irq_fail;
}
}
cpp_dev->hw_info.cpp_hw_version =
msm_camera_io_r(cpp_dev->cpp_hw_base);
pr_debug("CPP HW Version: 0x%x\n", cpp_dev->hw_info.cpp_hw_version);
cpp_dev->hw_info.cpp_hw_caps =
msm_camera_io_r(cpp_dev->cpp_hw_base + 0x4);
pr_debug("CPP HW Caps: 0x%x\n", cpp_dev->hw_info.cpp_hw_caps);
msm_camera_io_w(0x1, cpp_dev->vbif_base + 0x4);
cpp_dev->taskletq_idx = 0;
atomic_set(&cpp_dev->irq_cnt, 0);
msm_cpp_create_buff_queue(cpp_dev, MSM_CPP_MAX_BUFF_QUEUE);
if (cpp_dev->is_firmware_loaded == 1) {
disable_irq(cpp_dev->irq->start);
cpp_load_fw(cpp_dev, cpp_dev->fw_name_bin);
enable_irq(cpp_dev->irq->start);
msm_camera_io_w_mb(0x7C8, cpp_dev->base +
MSM_CPP_MICRO_IRQGEN_MASK);
msm_camera_io_w_mb(0xFFFF, cpp_dev->base +
MSM_CPP_MICRO_IRQGEN_CLR);
}
return rc;
req_irq_fail:
iounmap(cpp_dev->cpp_hw_base);
cpp_hw_remap_failed:
iounmap(cpp_dev->vbif_base);
vbif_remap_failed:
iounmap(cpp_dev->base);
remap_failed:
msm_cam_clk_enable(&cpp_dev->pdev->dev, cpp_clk_info,
cpp_dev->cpp_clk, ARRAY_SIZE(cpp_clk_info), 0);
clk_failed:
regulator_disable(cpp_dev->fs_cpp);
regulator_put(cpp_dev->fs_cpp);
fs_failed:
msm_isp_update_bandwidth(ISP_CPP, 0, 0);
msm_isp_deinit_bandwidth_mgr(ISP_CPP);
bus_scale_register_failed:
return rc;
}
static void cpp_release_hardware(struct cpp_device *cpp_dev)
{
int32_t rc;
if (cpp_dev->state != CPP_STATE_BOOT) {
rc = msm_cpp_buffer_ops(cpp_dev,
VIDIOC_MSM_BUF_MNGR_DEINIT, NULL);
if (rc < 0)
pr_err("error in buf mngr deinit rc=%d\n", rc);
free_irq(cpp_dev->irq->start, cpp_dev);
tasklet_kill(&cpp_dev->cpp_tasklet);
atomic_set(&cpp_dev->irq_cnt, 0);
}
msm_cpp_delete_buff_queue(cpp_dev);
iounmap(cpp_dev->base);
iounmap(cpp_dev->vbif_base);
iounmap(cpp_dev->cpp_hw_base);
msm_cam_clk_enable(&cpp_dev->pdev->dev, cpp_clk_info,
cpp_dev->cpp_clk, ARRAY_SIZE(cpp_clk_info), 0);
regulator_disable(cpp_dev->fs_cpp);
regulator_put(cpp_dev->fs_cpp);
cpp_dev->fs_cpp = NULL;
msm_isp_update_bandwidth(ISP_CPP, 0, 0);
msm_isp_deinit_bandwidth_mgr(ISP_CPP);
}
static void cpp_load_fw(struct cpp_device *cpp_dev, char *fw_name_bin)
{
uint32_t i;
uint32_t *ptr_bin = NULL;
int32_t rc = -EFAULT;
const struct firmware *fw = NULL;
struct device *dev = &cpp_dev->pdev->dev;
msm_camera_io_w(0x1, cpp_dev->base + MSM_CPP_MICRO_CLKEN_CTL);
msm_camera_io_w(0x1, cpp_dev->base +
MSM_CPP_MICRO_BOOT_START);
msm_cpp_poll(cpp_dev->base, MSM_CPP_MSG_ID_CMD);
if (fw_name_bin) {
pr_debug("%s: FW file: %s\n", __func__, fw_name_bin);
rc = request_firmware(&fw, fw_name_bin, dev);
if (rc) {
dev_err(dev,
"Fail to loc blob %s from dev %p, Error: %d\n",
fw_name_bin, dev, rc);
}
if (NULL != fw)
ptr_bin = (uint32_t *)fw->data;
msm_camera_io_w(0x1, cpp_dev->base +
MSM_CPP_MICRO_BOOT_START);
msm_cpp_poll(cpp_dev->base, MSM_CPP_MSG_ID_CMD);
msm_camera_io_w(0xFFFFFFFF, cpp_dev->base +
MSM_CPP_MICRO_IRQGEN_CLR);
/*Start firmware loading*/
msm_cpp_write(MSM_CPP_CMD_FW_LOAD, cpp_dev->base);
if (fw)
msm_cpp_write(fw->size, cpp_dev->base);
else
msm_cpp_write(MSM_CPP_END_ADDRESS, cpp_dev->base);
msm_cpp_write(MSM_CPP_START_ADDRESS, cpp_dev->base);
if (ptr_bin) {
for (i = 0; i < fw->size/4; i++) {
msm_cpp_write(*ptr_bin, cpp_dev->base);
ptr_bin++;
}
}
if (fw)
release_firmware(fw);
msm_camera_io_w_mb(0x00, cpp_dev->cpp_hw_base + 0xC);
msm_cpp_poll(cpp_dev->base, MSM_CPP_MSG_ID_OK);
msm_cpp_poll(cpp_dev->base, MSM_CPP_MSG_ID_CMD);
}
/*Trigger MC to jump to start address*/
msm_cpp_write(MSM_CPP_CMD_EXEC_JUMP, cpp_dev->base);
msm_cpp_write(MSM_CPP_JUMP_ADDRESS, cpp_dev->base);
msm_cpp_poll(cpp_dev->base, MSM_CPP_MSG_ID_CMD);
msm_cpp_poll(cpp_dev->base, 0x1);
msm_cpp_poll(cpp_dev->base, MSM_CPP_MSG_ID_JUMP_ACK);
msm_cpp_poll(cpp_dev->base, MSM_CPP_MSG_ID_TRAILER);
/*Get Bootloader Version*/
msm_cpp_write(MSM_CPP_CMD_GET_BOOTLOADER_VER, cpp_dev->base);
pr_info("MC Bootloader Version: 0x%x\n",
msm_cpp_read(cpp_dev->base));
/*Get Firmware Version*/
msm_cpp_write(MSM_CPP_CMD_GET_FW_VER, cpp_dev->base);
msm_cpp_write(MSM_CPP_MSG_ID_CMD, cpp_dev->base);
msm_cpp_write(0x1, cpp_dev->base);
msm_cpp_write(MSM_CPP_CMD_GET_FW_VER, cpp_dev->base);
msm_cpp_write(MSM_CPP_MSG_ID_TRAILER, cpp_dev->base);
msm_cpp_poll(cpp_dev->base, MSM_CPP_MSG_ID_CMD);
msm_cpp_poll(cpp_dev->base, 0x2);
msm_cpp_poll(cpp_dev->base, MSM_CPP_MSG_ID_FW_VER);
pr_info("CPP FW Version: 0x%x\n", msm_cpp_read(cpp_dev->base));
msm_cpp_poll(cpp_dev->base, MSM_CPP_MSG_ID_TRAILER);
/*Disable MC clock*/
/*msm_camera_io_w(0x0, cpp_dev->base +
MSM_CPP_MICRO_CLKEN_CTL);*/
}
static int cpp_open_node(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
int rc;
uint32_t i;
struct cpp_device *cpp_dev = v4l2_get_subdevdata(sd);
CPP_DBG("E\n");
mutex_lock(&cpp_dev->mutex);
if (cpp_dev->cpp_open_cnt == MAX_ACTIVE_CPP_INSTANCE) {
pr_err("No free CPP instance\n");
mutex_unlock(&cpp_dev->mutex);
return -ENODEV;
}
for (i = 0; i < MAX_ACTIVE_CPP_INSTANCE; i++) {
if (cpp_dev->cpp_subscribe_list[i].active == 0) {
cpp_dev->cpp_subscribe_list[i].active = 1;
cpp_dev->cpp_subscribe_list[i].vfh = &fh->vfh;
break;
}
}
if (i == MAX_ACTIVE_CPP_INSTANCE) {
pr_err("No free instance\n");
mutex_unlock(&cpp_dev->mutex);
return -ENODEV;
}
CPP_DBG("open %d %p\n", i, &fh->vfh);
cpp_dev->cpp_open_cnt++;
if (cpp_dev->cpp_open_cnt == 1) {
rc = cpp_init_hardware(cpp_dev);
if (rc < 0) {
cpp_dev->cpp_open_cnt--;
cpp_dev->cpp_subscribe_list[i].active = 0;
cpp_dev->cpp_subscribe_list[i].vfh = NULL;
mutex_unlock(&cpp_dev->mutex);
return rc;
}
iommu_attach_device(cpp_dev->domain, cpp_dev->iommu_ctx);
cpp_init_mem(cpp_dev);
cpp_dev->state = CPP_STATE_IDLE;
}
mutex_unlock(&cpp_dev->mutex);
return 0;
}
static int cpp_close_node(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
uint32_t i;
struct cpp_device *cpp_dev = v4l2_get_subdevdata(sd);
struct msm_device_queue *processing_q = NULL;
struct msm_device_queue *eventData_q = NULL;
if (!cpp_dev) {
pr_err("failed: cpp_dev %p\n", cpp_dev);
return -EINVAL;
}
mutex_lock(&cpp_dev->mutex);
processing_q = &cpp_dev->processing_q;
eventData_q = &cpp_dev->eventData_q;
if (cpp_dev->cpp_open_cnt == 0) {
mutex_unlock(&cpp_dev->mutex);
return 0;
}
for (i = 0; i < MAX_ACTIVE_CPP_INSTANCE; i++) {
if (cpp_dev->cpp_subscribe_list[i].active == 1) {
cpp_dev->cpp_subscribe_list[i].active = 0;
cpp_dev->cpp_subscribe_list[i].vfh = NULL;
break;
}
}
if (i == MAX_ACTIVE_CPP_INSTANCE) {
pr_err("Invalid close\n");
mutex_unlock(&cpp_dev->mutex);
return -ENODEV;
}
cpp_dev->cpp_open_cnt--;
if (cpp_dev->cpp_open_cnt == 0) {
pr_debug("irq_status: 0x%x\n",
msm_camera_io_r(cpp_dev->cpp_hw_base + 0x4));
pr_debug("DEBUG_SP: 0x%x\n",
msm_camera_io_r(cpp_dev->cpp_hw_base + 0x40));
pr_debug("DEBUG_T: 0x%x\n",
msm_camera_io_r(cpp_dev->cpp_hw_base + 0x44));
pr_debug("DEBUG_N: 0x%x\n",
msm_camera_io_r(cpp_dev->cpp_hw_base + 0x48));
pr_debug("DEBUG_R: 0x%x\n",
msm_camera_io_r(cpp_dev->cpp_hw_base + 0x4C));
pr_debug("DEBUG_OPPC: 0x%x\n",
msm_camera_io_r(cpp_dev->cpp_hw_base + 0x50));
pr_debug("DEBUG_MO: 0x%x\n",
msm_camera_io_r(cpp_dev->cpp_hw_base + 0x54));
pr_debug("DEBUG_TIMER0: 0x%x\n",
msm_camera_io_r(cpp_dev->cpp_hw_base + 0x60));
pr_debug("DEBUG_TIMER1: 0x%x\n",
msm_camera_io_r(cpp_dev->cpp_hw_base + 0x64));
pr_debug("DEBUG_GPI: 0x%x\n",
msm_camera_io_r(cpp_dev->cpp_hw_base + 0x70));
pr_debug("DEBUG_GPO: 0x%x\n",
msm_camera_io_r(cpp_dev->cpp_hw_base + 0x74));
pr_debug("DEBUG_T0: 0x%x\n",
msm_camera_io_r(cpp_dev->cpp_hw_base + 0x80));
pr_debug("DEBUG_R0: 0x%x\n",
msm_camera_io_r(cpp_dev->cpp_hw_base + 0x84));
pr_debug("DEBUG_T1: 0x%x\n",
msm_camera_io_r(cpp_dev->cpp_hw_base + 0x88));
pr_debug("DEBUG_R1: 0x%x\n",
msm_camera_io_r(cpp_dev->cpp_hw_base + 0x8C));
msm_camera_io_w(0x0, cpp_dev->base + MSM_CPP_MICRO_CLKEN_CTL);
cpp_deinit_mem(cpp_dev);
iommu_detach_device(cpp_dev->domain, cpp_dev->iommu_ctx);
cpp_release_hardware(cpp_dev);
msm_cpp_empty_list(processing_q, list_frame);
msm_cpp_empty_list(eventData_q, list_eventdata);
cpp_dev->state = CPP_STATE_OFF;
}
mutex_unlock(&cpp_dev->mutex);
return 0;
}
static const struct v4l2_subdev_internal_ops msm_cpp_internal_ops = {
.open = cpp_open_node,
.close = cpp_close_node,
};
static int msm_cpp_buffer_ops(struct cpp_device *cpp_dev,
uint32_t buff_mgr_ops, struct msm_buf_mngr_info *buff_mgr_info)
{
int rc = -EINVAL;
rc = v4l2_subdev_call(cpp_dev->buf_mgr_subdev, core, ioctl,
buff_mgr_ops, buff_mgr_info);
if (rc < 0)
pr_debug("%s: line %d rc = %d\n", __func__, __LINE__, rc);
return rc;
}
static int msm_cpp_notify_frame_done(struct cpp_device *cpp_dev)
{
struct v4l2_event v4l2_evt;
struct msm_queue_cmd *frame_qcmd = NULL;
struct msm_queue_cmd *event_qcmd = NULL;
struct msm_cpp_frame_info_t *processed_frame = NULL;
struct msm_device_queue *queue = &cpp_dev->processing_q;
struct msm_buf_mngr_info buff_mgr_info;
int rc = 0;
frame_qcmd = msm_dequeue(queue, list_frame);
if (frame_qcmd) {
processed_frame = frame_qcmd->command;
do_gettimeofday(&(processed_frame->out_time));
kfree(frame_qcmd);
event_qcmd = kzalloc(sizeof(struct msm_queue_cmd), GFP_ATOMIC);
if (!event_qcmd) {
pr_err("Insufficient memory. return");
return -ENOMEM;
}
atomic_set(&event_qcmd->on_heap, 1);
event_qcmd->command = processed_frame;
CPP_DBG("fid %d\n", processed_frame->frame_id);
msm_enqueue(&cpp_dev->eventData_q, &event_qcmd->list_eventdata);
if (!processed_frame->output_buffer_info[0].processed_divert) {
memset(&buff_mgr_info, 0 ,
sizeof(struct msm_buf_mngr_info));
buff_mgr_info.session_id =
((processed_frame->identity >> 16) & 0xFFFF);
buff_mgr_info.stream_id =
(processed_frame->identity & 0xFFFF);
buff_mgr_info.frame_id = processed_frame->frame_id;
buff_mgr_info.timestamp = processed_frame->timestamp;
buff_mgr_info.index =
processed_frame->output_buffer_info[0].index;
rc = msm_cpp_buffer_ops(cpp_dev,
VIDIOC_MSM_BUF_MNGR_BUF_DONE,
&buff_mgr_info);
if (rc < 0) {
pr_err("error putting buffer\n");
rc = -EINVAL;
}
}
if (processed_frame->duplicate_output &&
!processed_frame->
output_buffer_info[1].processed_divert) {
memset(&buff_mgr_info, 0 ,
sizeof(struct msm_buf_mngr_info));
buff_mgr_info.session_id =
((processed_frame->duplicate_identity >> 16) & 0xFFFF);
buff_mgr_info.stream_id =
(processed_frame->duplicate_identity & 0xFFFF);
buff_mgr_info.frame_id = processed_frame->frame_id;
buff_mgr_info.timestamp = processed_frame->timestamp;
buff_mgr_info.index =
processed_frame->output_buffer_info[1].index;
rc = msm_cpp_buffer_ops(cpp_dev,
VIDIOC_MSM_BUF_MNGR_BUF_DONE,
&buff_mgr_info);
if (rc < 0) {
pr_err("error putting buffer\n");
rc = -EINVAL;
}
}
v4l2_evt.id = processed_frame->inst_id;
v4l2_evt.type = V4L2_EVENT_CPP_FRAME_DONE;
v4l2_event_queue(cpp_dev->msm_sd.sd.devnode, &v4l2_evt);
}
return rc;
}
#if MSM_CPP_DUMP_FRM_CMD
static int msm_cpp_dump_frame_cmd(uint32_t *cmd, int32_t len)
{
int i;
pr_err("%s: -------- cpp frame cmd msg start --------", __func__);
for (i = 0; i < len; i++)
pr_err("%s: msg[%03d] = 0x%08x", __func__, i, cmd[i]);
pr_err("%s: --------- cpp frame cmd msg end ---------", __func__);
return 0;
}
#else
static int msm_cpp_dump_frame_cmd(uint32_t *cmd, int32_t len)
{
return 0;
}
#endif
static void msm_cpp_do_timeout_work(struct work_struct *work)
{
int ret;
uint32_t i = 0;
struct msm_cpp_frame_info_t *this_frame = NULL;
pr_err("cpp_timer_callback called. (jiffies=%lu)\n",
jiffies);
if (!work) {
pr_err("Invalid work:%p\n", work);
return;
}
if (!atomic_read(&cpp_timer.used)) {
pr_err("Delayed trigger, IRQ serviced\n");
return;
}
disable_irq(cpp_timer.data.cpp_dev->irq->start);
pr_err("Reloading firmware\n");
cpp_load_fw(cpp_timer.data.cpp_dev, NULL);
pr_err("Firmware loading done\n");
enable_irq(cpp_timer.data.cpp_dev->irq->start);
msm_camera_io_w_mb(0x8, cpp_timer.data.cpp_dev->base +
MSM_CPP_MICRO_IRQGEN_MASK);
msm_camera_io_w_mb(0xFFFF,
cpp_timer.data.cpp_dev->base +
MSM_CPP_MICRO_IRQGEN_CLR);
if (!atomic_read(&cpp_timer.used)) {
pr_err("Delayed trigger, IRQ serviced\n");
return;
}
this_frame = cpp_timer.data.processed_frame;
pr_err("ReInstalling cpp_timer\n");
setup_timer(&cpp_timer.cpp_timer, cpp_timer_callback,
(unsigned long)&cpp_timer);
pr_err("Starting timer to fire in %d ms. (jiffies=%lu)\n",
CPP_CMD_TIMEOUT_MS, jiffies);
ret = mod_timer(&cpp_timer.cpp_timer,
jiffies + msecs_to_jiffies(CPP_CMD_TIMEOUT_MS));
if (ret)
pr_err("error in mod_timer\n");
pr_err("Rescheduling for identity=0x%x, frame_id=%03d\n",
this_frame->identity, this_frame->frame_id);
msm_cpp_write(0x6, cpp_timer.data.cpp_dev->base);
msm_cpp_dump_frame_cmd(this_frame->cpp_cmd_msg,
this_frame->msg_len);
for (i = 0; i < this_frame->msg_len; i++)
msm_cpp_write(this_frame->cpp_cmd_msg[i],
cpp_timer.data.cpp_dev->base);
return;
}
void cpp_timer_callback(unsigned long data)
{
struct msm_cpp_work_t *work =
cpp_timer.data.cpp_dev->work;
queue_work(cpp_timer.data.cpp_dev->timer_wq,
(struct work_struct *)work);
}
static int msm_cpp_send_frame_to_hardware(struct cpp_device *cpp_dev,
struct msm_queue_cmd *frame_qcmd)
{
uint32_t i;
int32_t rc = -EAGAIN;
int ret;
struct msm_cpp_frame_info_t *process_frame;
if (cpp_dev->processing_q.len < MAX_CPP_PROCESSING_FRAME) {
process_frame = frame_qcmd->command;
msm_enqueue(&cpp_dev->processing_q,
&frame_qcmd->list_frame);
cpp_timer.data.processed_frame = process_frame;
atomic_set(&cpp_timer.used, 1);
/* install timer for cpp timeout */
CPP_DBG("Installing cpp_timer\n");
setup_timer(&cpp_timer.cpp_timer,
cpp_timer_callback, (unsigned long)&cpp_timer);
CPP_DBG("Starting timer to fire in %d ms. (jiffies=%lu)\n",
CPP_CMD_TIMEOUT_MS, jiffies);
ret = mod_timer(&cpp_timer.cpp_timer,
jiffies + msecs_to_jiffies(CPP_CMD_TIMEOUT_MS));
if (ret)
pr_err("error in mod_timer\n");
msm_cpp_write(0x6, cpp_dev->base);
msm_cpp_dump_frame_cmd(process_frame->cpp_cmd_msg,
process_frame->msg_len);
for (i = 0; i < process_frame->msg_len; i++) {
if ((induce_error) && (i == 1)) {
pr_err("Induce error\n");
msm_cpp_write(process_frame->cpp_cmd_msg[i]-1,
cpp_dev->base);
induce_error--;
} else
msm_cpp_write(process_frame->cpp_cmd_msg[i],
cpp_dev->base);
}
do_gettimeofday(&(process_frame->in_time));
rc = 0;
}
if (rc < 0)
pr_err("process queue full. drop frame\n");
return rc;
}
static int msm_cpp_flush_frames(struct cpp_device *cpp_dev)
{
return 0;
}
static int msm_cpp_cfg(struct cpp_device *cpp_dev,
struct msm_camera_v4l2_ioctl_t *ioctl_ptr)
{
int rc = 0;
struct msm_queue_cmd *frame_qcmd = NULL;
struct msm_cpp_frame_info_t *new_frame =
kzalloc(sizeof(struct msm_cpp_frame_info_t), GFP_KERNEL);
uint32_t *cpp_frame_msg;
unsigned long in_phyaddr, out_phyaddr0, out_phyaddr1;
uint16_t num_stripes = 0;
struct msm_buf_mngr_info buff_mgr_info, dup_buff_mgr_info;
struct msm_cpp_frame_info_t *u_frame_info =
(struct msm_cpp_frame_info_t *)ioctl_ptr->ioctl_ptr;
int32_t status = 0;
uint8_t fw_version_1_2_x = 0;
int in_fd;
int i = 0;
if (!new_frame) {
pr_err("Insufficient memory. return\n");
return -ENOMEM;
}
rc = (copy_from_user(new_frame, (void __user *)ioctl_ptr->ioctl_ptr,
sizeof(struct msm_cpp_frame_info_t)) ? -EFAULT : 0);
if (rc) {
ERR_COPY_FROM_USER();
rc = -EINVAL;
goto ERROR1;
}
if ((new_frame->msg_len == 0) ||
(new_frame->msg_len > MSM_CPP_MAX_FRAME_LENGTH)) {
pr_err("%s:%d: Invalid frame len:%d\n", __func__,
__LINE__, new_frame->msg_len);
rc = -EINVAL;
goto ERROR1;
}
cpp_frame_msg = kzalloc(sizeof(uint32_t)*new_frame->msg_len,
GFP_KERNEL);
if (!cpp_frame_msg) {
pr_err("Insufficient memory. return");
rc = -ENOMEM;
goto ERROR1;
}
rc = (copy_from_user(cpp_frame_msg,
(void __user *)new_frame->cpp_cmd_msg,
sizeof(uint32_t)*new_frame->msg_len) ? -EFAULT : 0);
if (rc) {
ERR_COPY_FROM_USER();
rc = -EINVAL;
goto ERROR2;
}
new_frame->cpp_cmd_msg = cpp_frame_msg;
in_phyaddr = msm_cpp_fetch_buffer_info(cpp_dev,
&new_frame->input_buffer_info,
((new_frame->input_buffer_info.identity >> 16) & 0xFFFF),
(new_frame->input_buffer_info.identity & 0xFFFF), &in_fd);
if (!in_phyaddr) {
pr_err("error gettting input physical address\n");
rc = -EINVAL;
goto ERROR2;
}
memset(&buff_mgr_info, 0, sizeof(struct msm_buf_mngr_info));
buff_mgr_info.session_id = ((new_frame->identity >> 16) & 0xFFFF);
buff_mgr_info.stream_id = (new_frame->identity & 0xFFFF);
rc = msm_cpp_buffer_ops(cpp_dev, VIDIOC_MSM_BUF_MNGR_GET_BUF,
&buff_mgr_info);
if (rc < 0) {
rc = -EAGAIN;
pr_debug("error getting buffer rc:%d\n", rc);
goto ERROR2;
}
new_frame->output_buffer_info[0].index = buff_mgr_info.index;
out_phyaddr0 = msm_cpp_fetch_buffer_info(cpp_dev,
&new_frame->output_buffer_info[0],
((new_frame->identity >> 16) & 0xFFFF),
(new_frame->identity & 0xFFFF),
&new_frame->output_buffer_info[0].fd);
if (!out_phyaddr0) {
pr_err("error gettting output physical address\n");
rc = -EINVAL;
goto ERROR3;
}
out_phyaddr1 = out_phyaddr0;
/* get buffer for duplicate output */
if (new_frame->duplicate_output) {
CPP_DBG("duplication enabled, dup_id=0x%x",
new_frame->duplicate_identity);
memset(&new_frame->output_buffer_info[1], 0,
sizeof(struct msm_cpp_buffer_info_t));
memset(&dup_buff_mgr_info, 0, sizeof(struct msm_buf_mngr_info));
dup_buff_mgr_info.session_id =
((new_frame->duplicate_identity >> 16) & 0xFFFF);
dup_buff_mgr_info.stream_id =
(new_frame->duplicate_identity & 0xFFFF);
rc = msm_cpp_buffer_ops(cpp_dev, VIDIOC_MSM_BUF_MNGR_GET_BUF,
&dup_buff_mgr_info);
if (rc < 0) {
rc = -EAGAIN;
pr_debug("error getting buffer rc:%d\n", rc);
goto ERROR3;
}
new_frame->output_buffer_info[1].index =
dup_buff_mgr_info.index;
out_phyaddr1 = msm_cpp_fetch_buffer_info(cpp_dev,
&new_frame->output_buffer_info[1],
((new_frame->duplicate_identity >> 16) & 0xFFFF),
(new_frame->duplicate_identity & 0xFFFF),
&new_frame->output_buffer_info[1].fd);
if (!out_phyaddr1) {
pr_err("error gettting output physical address\n");
rc = -EINVAL;
msm_cpp_buffer_ops(cpp_dev, VIDIOC_MSM_BUF_MNGR_PUT_BUF,
&dup_buff_mgr_info);
goto ERROR3;
}
/* set duplicate enable bit */
cpp_frame_msg[5] |= 0x1;
}
num_stripes = ((cpp_frame_msg[12] >> 20) & 0x3FF) +
((cpp_frame_msg[12] >> 10) & 0x3FF) +
(cpp_frame_msg[12] & 0x3FF);
fw_version_1_2_x = 0;
if ((cpp_dev->hw_info.cpp_hw_version == CPP_HW_VERSION_1_1_0) ||
(cpp_dev->hw_info.cpp_hw_version == CPP_HW_VERSION_1_1_1) ||
(cpp_dev->hw_info.cpp_hw_version == CPP_HW_VERSION_2_0_0))
fw_version_1_2_x = 2;
for (i = 0; i < num_stripes; i++) {
cpp_frame_msg[(133 + fw_version_1_2_x) + i * 27] +=
(uint32_t) in_phyaddr;
cpp_frame_msg[(139 + fw_version_1_2_x) + i * 27] +=
(uint32_t) out_phyaddr0;
cpp_frame_msg[(140 + fw_version_1_2_x) + i * 27] +=
(uint32_t) out_phyaddr1;
cpp_frame_msg[(141 + fw_version_1_2_x) + i * 27] +=
(uint32_t) out_phyaddr0;
cpp_frame_msg[(142 + fw_version_1_2_x) + i * 27] +=
(uint32_t) out_phyaddr1;
}
frame_qcmd = kzalloc(sizeof(struct msm_queue_cmd), GFP_KERNEL);
if (!frame_qcmd) {
pr_err("Insufficient memory. return\n");
rc = -ENOMEM;
goto ERROR3;
}
atomic_set(&frame_qcmd->on_heap, 1);
frame_qcmd->command = new_frame;
rc = msm_cpp_send_frame_to_hardware(cpp_dev, frame_qcmd);
if (rc < 0) {
pr_err("error cannot send frame to hardware\n");
rc = -EINVAL;
goto ERROR4;
}
ioctl_ptr->trans_code = rc;
status = rc;
rc = (copy_to_user((void __user *)u_frame_info->status, &status,
sizeof(int32_t)) ? -EFAULT : 0);
if (rc) {
ERR_COPY_FROM_USER();
rc = -EINVAL;
goto ERROR4;
}
return rc;
ERROR4:
kfree(frame_qcmd);
ERROR3:
msm_cpp_buffer_ops(cpp_dev, VIDIOC_MSM_BUF_MNGR_PUT_BUF,
&buff_mgr_info);
ERROR2:
kfree(cpp_frame_msg);
ERROR1:
kfree(new_frame);
ioctl_ptr->trans_code = rc;
status = rc;
if (copy_to_user((void __user *)u_frame_info->status, &status,
sizeof(int32_t)))
pr_err("error cannot copy error\n");
return rc;
}
long msm_cpp_subdev_ioctl(struct v4l2_subdev *sd,
unsigned int cmd, void *arg)
{
struct cpp_device *cpp_dev = v4l2_get_subdevdata(sd);
struct msm_camera_v4l2_ioctl_t *ioctl_ptr = arg;
int rc = 0;
if (ioctl_ptr == NULL) {
pr_err("ioctl_ptr is null\n");
return -EINVAL;
}
if (cpp_dev == NULL) {
pr_err("cpp_dev is null\n");
return -EINVAL;
}
mutex_lock(&cpp_dev->mutex);
CPP_DBG("E cmd: %d\n", cmd);
switch (cmd) {
case VIDIOC_MSM_CPP_GET_HW_INFO: {
if (copy_to_user((void __user *)ioctl_ptr->ioctl_ptr,
&cpp_dev->hw_info,
sizeof(struct cpp_hw_info))) {
mutex_unlock(&cpp_dev->mutex);
return -EINVAL;
}
break;
}
case VIDIOC_MSM_CPP_LOAD_FIRMWARE: {
if (cpp_dev->is_firmware_loaded == 0) {
if (cpp_dev->fw_name_bin != NULL) {
kfree(cpp_dev->fw_name_bin);
cpp_dev->fw_name_bin = NULL;
}
if ((ioctl_ptr->len == 0) ||
(ioctl_ptr->len > MSM_CPP_MAX_FW_NAME_LEN)) {
pr_err("ioctl_ptr->len is 0\n");
mutex_unlock(&cpp_dev->mutex);
return -EINVAL;
}
cpp_dev->fw_name_bin = kzalloc(ioctl_ptr->len+1,
GFP_KERNEL);
if (!cpp_dev->fw_name_bin) {
pr_err("%s:%d: malloc error\n", __func__,
__LINE__);
mutex_unlock(&cpp_dev->mutex);
return -EINVAL;
}
if (ioctl_ptr->ioctl_ptr == NULL) {
pr_err("ioctl_ptr->ioctl_ptr=NULL\n");
mutex_unlock(&cpp_dev->mutex);
return -EINVAL;
}
rc = (copy_from_user(cpp_dev->fw_name_bin,
(void __user *)ioctl_ptr->ioctl_ptr,
ioctl_ptr->len) ? -EFAULT : 0);
if (rc) {
ERR_COPY_FROM_USER();
kfree(cpp_dev->fw_name_bin);
cpp_dev->fw_name_bin = NULL;
mutex_unlock(&cpp_dev->mutex);
return -EINVAL;
}
*(cpp_dev->fw_name_bin+ioctl_ptr->len) = '\0';
disable_irq(cpp_dev->irq->start);
cpp_load_fw(cpp_dev, cpp_dev->fw_name_bin);
enable_irq(cpp_dev->irq->start);
cpp_dev->is_firmware_loaded = 1;
}
break;
}
case VIDIOC_MSM_CPP_CFG:
rc = msm_cpp_cfg(cpp_dev, ioctl_ptr);
break;
case VIDIOC_MSM_CPP_FLUSH_QUEUE:
rc = msm_cpp_flush_frames(cpp_dev);
break;
case VIDIOC_MSM_CPP_ENQUEUE_STREAM_BUFF_INFO: {
struct msm_cpp_stream_buff_info_t *u_stream_buff_info;
struct msm_cpp_stream_buff_info_t k_stream_buff_info;
if (sizeof(struct msm_cpp_stream_buff_info_t) !=
ioctl_ptr->len) {
pr_err("%s:%d: invalid length\n", __func__, __LINE__);
mutex_unlock(&cpp_dev->mutex);
return -EINVAL;
}
u_stream_buff_info = kzalloc(ioctl_ptr->len, GFP_KERNEL);
if (!u_stream_buff_info) {
pr_err("%s:%d: malloc error\n", __func__, __LINE__);
mutex_unlock(&cpp_dev->mutex);
return -EINVAL;
}
rc = (copy_from_user(u_stream_buff_info,
(void __user *)ioctl_ptr->ioctl_ptr,
ioctl_ptr->len) ? -EFAULT : 0);
if (rc) {
ERR_COPY_FROM_USER();
kfree(u_stream_buff_info);
mutex_unlock(&cpp_dev->mutex);
return -EINVAL;
}
if (u_stream_buff_info->num_buffs == 0) {
pr_err("%s:%d: Invalid number of buffers\n", __func__,
__LINE__);
kfree(u_stream_buff_info);
mutex_unlock(&cpp_dev->mutex);
return -EINVAL;
}
k_stream_buff_info.num_buffs = u_stream_buff_info->num_buffs;
k_stream_buff_info.identity = u_stream_buff_info->identity;
if (k_stream_buff_info.num_buffs > MSM_CAMERA_MAX_STREAM_BUF) {
pr_err("%s:%d: unexpected large num buff requested\n",
__func__, __LINE__);
kfree(u_stream_buff_info);
mutex_unlock(&cpp_dev->mutex);
return -EINVAL;
}
k_stream_buff_info.buffer_info =
kzalloc(k_stream_buff_info.num_buffs *
sizeof(struct msm_cpp_buffer_info_t), GFP_KERNEL);
if (ZERO_OR_NULL_PTR(k_stream_buff_info.buffer_info)) {
pr_err("%s:%d: malloc error\n", __func__, __LINE__);
kfree(u_stream_buff_info);
mutex_unlock(&cpp_dev->mutex);
return -EINVAL;
}
rc = (copy_from_user(k_stream_buff_info.buffer_info,
(void __user *)u_stream_buff_info->buffer_info,
k_stream_buff_info.num_buffs *
sizeof(struct msm_cpp_buffer_info_t)) ?
-EFAULT : 0);
if (rc) {
ERR_COPY_FROM_USER();
kfree(k_stream_buff_info.buffer_info);
kfree(u_stream_buff_info);
mutex_unlock(&cpp_dev->mutex);
return -EINVAL;
}
rc = msm_cpp_add_buff_queue_entry(cpp_dev,
((k_stream_buff_info.identity >> 16) & 0xFFFF),
(k_stream_buff_info.identity & 0xFFFF));
if (!rc)
rc = msm_cpp_enqueue_buff_info_list(cpp_dev,
&k_stream_buff_info);
kfree(k_stream_buff_info.buffer_info);
kfree(u_stream_buff_info);
break;
}
case VIDIOC_MSM_CPP_DEQUEUE_STREAM_BUFF_INFO: {
uint32_t identity;
struct msm_cpp_buff_queue_info_t *buff_queue_info;
if ((ioctl_ptr->len == 0) ||
(ioctl_ptr->len > sizeof(uint32_t)))
return -EINVAL;
rc = (copy_from_user(&identity,
(void __user *)ioctl_ptr->ioctl_ptr,
ioctl_ptr->len) ? -EFAULT : 0);
if (rc) {
ERR_COPY_FROM_USER();
mutex_unlock(&cpp_dev->mutex);
return -EINVAL;
}
buff_queue_info = msm_cpp_get_buff_queue_entry(cpp_dev,
((identity >> 16) & 0xFFFF), (identity & 0xFFFF));
if (buff_queue_info == NULL) {
pr_err("error finding buffer queue entry for identity:%d\n",
identity);
mutex_unlock(&cpp_dev->mutex);
return -EINVAL;
}
msm_cpp_dequeue_buff_info_list(cpp_dev, buff_queue_info);
rc = msm_cpp_free_buff_queue_entry(cpp_dev,
buff_queue_info->session_id,
buff_queue_info->stream_id);
break;
}
case VIDIOC_MSM_CPP_GET_EVENTPAYLOAD: {
struct msm_device_queue *queue = &cpp_dev->eventData_q;
struct msm_queue_cmd *event_qcmd;
struct msm_cpp_frame_info_t *process_frame;
event_qcmd = msm_dequeue(queue, list_eventdata);
if (event_qcmd) {
process_frame = event_qcmd->command;
CPP_DBG("fid %d\n", process_frame->frame_id);
if (copy_to_user((void __user *)ioctl_ptr->ioctl_ptr,
process_frame,
sizeof(struct msm_cpp_frame_info_t))) {
mutex_unlock(&cpp_dev->mutex);
return -EINVAL;
}
kfree(process_frame->cpp_cmd_msg);
kfree(process_frame);
kfree(event_qcmd);
} else {
pr_err("Empty command list\n");
mutex_unlock(&cpp_dev->mutex);
return -EFAULT;
}
break;
}
case MSM_SD_SHUTDOWN: {
mutex_unlock(&cpp_dev->mutex);
while (cpp_dev->cpp_open_cnt != 0)
cpp_close_node(sd, NULL);
mutex_lock(&cpp_dev->mutex);
rc = 0;
break;
}
case VIDIOC_MSM_CPP_QUEUE_BUF: {
struct msm_pproc_queue_buf_info queue_buf_info;
rc = (copy_from_user(&queue_buf_info,
(void __user *)ioctl_ptr->ioctl_ptr,
sizeof(struct msm_pproc_queue_buf_info)) ?
-EFAULT : 0);
if (rc) {
ERR_COPY_FROM_USER();
break;
}
if (queue_buf_info.is_buf_dirty) {
rc = msm_cpp_buffer_ops(cpp_dev,
VIDIOC_MSM_BUF_MNGR_PUT_BUF,
&queue_buf_info.buff_mgr_info);
} else {
rc = msm_cpp_buffer_ops(cpp_dev,
VIDIOC_MSM_BUF_MNGR_BUF_DONE,
&queue_buf_info.buff_mgr_info);
}
if (rc < 0) {
pr_err("error in buf done\n");
rc = -EINVAL;
}
break;
}
}
mutex_unlock(&cpp_dev->mutex);
CPP_DBG("X\n");
return rc;
}
int msm_cpp_subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
struct v4l2_event_subscription *sub)
{
CPP_DBG("Called\n");
return v4l2_event_subscribe(fh, sub, MAX_CPP_V4l2_EVENTS);
}
int msm_cpp_unsubscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
struct v4l2_event_subscription *sub)
{
CPP_DBG("Called\n");
return v4l2_event_unsubscribe(fh, sub);
}
static struct v4l2_subdev_core_ops msm_cpp_subdev_core_ops = {
.ioctl = msm_cpp_subdev_ioctl,
.subscribe_event = msm_cpp_subscribe_event,
.unsubscribe_event = msm_cpp_unsubscribe_event,
};
static const struct v4l2_subdev_ops msm_cpp_subdev_ops = {
.core = &msm_cpp_subdev_core_ops,
};
static struct v4l2_file_operations msm_cpp_v4l2_subdev_fops;
static long msm_cpp_subdev_do_ioctl(
struct file *file, unsigned int cmd, void *arg)
{
struct video_device *vdev = video_devdata(file);
struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
struct v4l2_fh *vfh = file->private_data;
switch (cmd) {
case VIDIOC_DQEVENT:
if (!(sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS))
return -ENOIOCTLCMD;
return v4l2_event_dequeue(vfh, arg, file->f_flags & O_NONBLOCK);
case VIDIOC_SUBSCRIBE_EVENT:
return v4l2_subdev_call(sd, core, subscribe_event, vfh, arg);
case VIDIOC_UNSUBSCRIBE_EVENT:
return v4l2_subdev_call(sd, core, unsubscribe_event, vfh, arg);
case VIDIOC_MSM_CPP_GET_INST_INFO: {
uint32_t i;
struct cpp_device *cpp_dev = v4l2_get_subdevdata(sd);
struct msm_camera_v4l2_ioctl_t *ioctl_ptr = arg;
struct msm_cpp_frame_info_t inst_info;
memset(&inst_info, 0, sizeof(struct msm_cpp_frame_info_t));
for (i = 0; i < MAX_ACTIVE_CPP_INSTANCE; i++) {
if (cpp_dev->cpp_subscribe_list[i].vfh == vfh) {
inst_info.inst_id = i;
break;
}
}
if (copy_to_user(
(void __user *)ioctl_ptr->ioctl_ptr, &inst_info,
sizeof(struct msm_cpp_frame_info_t))) {
return -EINVAL;
}
}
break;
default:
return v4l2_subdev_call(sd, core, ioctl, cmd, arg);
}
return 0;
}
static long msm_cpp_subdev_fops_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
return video_usercopy(file, cmd, arg, msm_cpp_subdev_do_ioctl);
}
static int cpp_register_domain(void)
{
struct msm_iova_partition cpp_fw_partition = {
.start = SZ_128K,
.size = SZ_2G - SZ_128K,
};
struct msm_iova_layout cpp_fw_layout = {
.partitions = &cpp_fw_partition,
.npartitions = 1,
.client_name = "camera_cpp",
.domain_flags = 0,
};
return msm_register_domain(&cpp_fw_layout);
}
static int __devinit cpp_probe(struct platform_device *pdev)
{
struct cpp_device *cpp_dev;
int rc = 0;
cpp_dev = kzalloc(sizeof(struct cpp_device), GFP_KERNEL);
if (!cpp_dev) {
pr_err("no enough memory\n");
return -ENOMEM;
}
cpp_dev->cpp_clk = kzalloc(sizeof(struct clk *) *
ARRAY_SIZE(cpp_clk_info), GFP_KERNEL);
if (!cpp_dev->cpp_clk) {
pr_err("no enough memory\n");
rc = -ENOMEM;
goto ERROR1;
}
v4l2_subdev_init(&cpp_dev->msm_sd.sd, &msm_cpp_subdev_ops);
cpp_dev->msm_sd.sd.internal_ops = &msm_cpp_internal_ops;
snprintf(cpp_dev->msm_sd.sd.name, ARRAY_SIZE(cpp_dev->msm_sd.sd.name),
"cpp");
cpp_dev->msm_sd.sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
cpp_dev->msm_sd.sd.flags |= V4L2_SUBDEV_FL_HAS_EVENTS;
v4l2_set_subdevdata(&cpp_dev->msm_sd.sd, cpp_dev);
platform_set_drvdata(pdev, &cpp_dev->msm_sd.sd);
mutex_init(&cpp_dev->mutex);
spin_lock_init(&cpp_dev->tasklet_lock);
if (pdev->dev.of_node)
of_property_read_u32((&pdev->dev)->of_node,
"cell-index", &pdev->id);
cpp_dev->pdev = pdev;
cpp_dev->mem = platform_get_resource_byname(pdev,
IORESOURCE_MEM, "cpp");
if (!cpp_dev->mem) {
pr_err("no mem resource?\n");
rc = -ENODEV;
goto ERROR2;
}
cpp_dev->vbif_mem = platform_get_resource_byname(pdev,
IORESOURCE_MEM, "cpp_vbif");
if (!cpp_dev->vbif_mem) {
pr_err("no mem resource?\n");
rc = -ENODEV;
goto ERROR2;
}
cpp_dev->cpp_hw_mem = platform_get_resource_byname(pdev,
IORESOURCE_MEM, "cpp_hw");
if (!cpp_dev->cpp_hw_mem) {
pr_err("no mem resource?\n");
rc = -ENODEV;
goto ERROR2;
}
cpp_dev->irq = platform_get_resource_byname(pdev,
IORESOURCE_IRQ, "cpp");
if (!cpp_dev->irq) {
pr_err("%s: no irq resource?\n", __func__);
rc = -ENODEV;
goto ERROR2;
}
cpp_dev->io = request_mem_region(cpp_dev->mem->start,
resource_size(cpp_dev->mem), pdev->name);
if (!cpp_dev->io) {
pr_err("%s: no valid mem region\n", __func__);
rc = -EBUSY;
goto ERROR2;
}
cpp_dev->domain_num = cpp_register_domain();
if (cpp_dev->domain_num < 0) {
pr_err("%s: could not register domain\n", __func__);
rc = -ENODEV;
goto ERROR3;
}
cpp_dev->domain =
msm_get_iommu_domain(cpp_dev->domain_num);
if (!cpp_dev->domain) {
pr_err("%s: cannot find domain\n", __func__);
rc = -ENODEV;
goto ERROR3;
}
cpp_dev->iommu_ctx = msm_iommu_get_ctx("cpp");
if (IS_ERR(cpp_dev->iommu_ctx)) {
pr_err("%s: cannot get iommu_ctx\n", __func__);
rc = -EPROBE_DEFER;
goto ERROR3;
}
media_entity_init(&cpp_dev->msm_sd.sd.entity, 0, NULL, 0);
cpp_dev->msm_sd.sd.entity.type = MEDIA_ENT_T_V4L2_SUBDEV;
cpp_dev->msm_sd.sd.entity.group_id = MSM_CAMERA_SUBDEV_CPP;
cpp_dev->msm_sd.sd.entity.name = pdev->name;
cpp_dev->msm_sd.close_seq = MSM_SD_CLOSE_3RD_CATEGORY;
msm_sd_register(&cpp_dev->msm_sd);
msm_cpp_v4l2_subdev_fops.owner = v4l2_subdev_fops.owner;
msm_cpp_v4l2_subdev_fops.open = v4l2_subdev_fops.open;
msm_cpp_v4l2_subdev_fops.unlocked_ioctl = msm_cpp_subdev_fops_ioctl;
msm_cpp_v4l2_subdev_fops.release = v4l2_subdev_fops.release;
msm_cpp_v4l2_subdev_fops.poll = v4l2_subdev_fops.poll;
cpp_dev->msm_sd.sd.devnode->fops = &msm_cpp_v4l2_subdev_fops;
cpp_dev->msm_sd.sd.entity.revision = cpp_dev->msm_sd.sd.devnode->num;
cpp_dev->state = CPP_STATE_BOOT;
rc = cpp_init_hardware(cpp_dev);
if (rc < 0)
goto CPP_PROBE_INIT_ERROR;
msm_camera_io_w(0x0, cpp_dev->base +
MSM_CPP_MICRO_IRQGEN_MASK);
msm_camera_io_w(0xFFFF, cpp_dev->base +
MSM_CPP_MICRO_IRQGEN_CLR);
msm_camera_io_w(0x80000000, cpp_dev->base + 0xF0);
cpp_release_hardware(cpp_dev);
cpp_dev->state = CPP_STATE_OFF;
msm_cpp_enable_debugfs(cpp_dev);
msm_queue_init(&cpp_dev->eventData_q, "eventdata");
msm_queue_init(&cpp_dev->processing_q, "frame");
INIT_LIST_HEAD(&cpp_dev->tasklet_q);
tasklet_init(&cpp_dev->cpp_tasklet, msm_cpp_do_tasklet,
(unsigned long)cpp_dev);
cpp_dev->timer_wq = create_workqueue("msm_cpp_workqueue");
cpp_dev->work = kmalloc(sizeof(struct msm_cpp_work_t),
GFP_KERNEL);
if (!cpp_dev->work) {
pr_err("cpp_dev->work is NULL\n");
rc = -ENOMEM;
goto ERROR3;
}
INIT_WORK((struct work_struct *)cpp_dev->work, msm_cpp_do_timeout_work);
cpp_dev->cpp_open_cnt = 0;
cpp_dev->is_firmware_loaded = 0;
cpp_timer.data.cpp_dev = cpp_dev;
atomic_set(&cpp_timer.used, 0);
cpp_dev->fw_name_bin = NULL;
return rc;
CPP_PROBE_INIT_ERROR:
media_entity_cleanup(&cpp_dev->msm_sd.sd.entity);
msm_sd_unregister(&cpp_dev->msm_sd);
ERROR3:
release_mem_region(cpp_dev->mem->start, resource_size(cpp_dev->mem));
ERROR2:
kfree(cpp_dev->cpp_clk);
ERROR1:
kfree(cpp_dev);
return rc;
}
static const struct of_device_id msm_cpp_dt_match[] = {
{.compatible = "qcom,cpp"},
{}
};
static int cpp_device_remove(struct platform_device *dev)
{
struct v4l2_subdev *sd = platform_get_drvdata(dev);
struct cpp_device *cpp_dev;
if (!sd) {
pr_err("%s: Subdevice is NULL\n", __func__);
return 0;
}
cpp_dev = (struct cpp_device *)v4l2_get_subdevdata(sd);
if (!cpp_dev) {
pr_err("%s: cpp device is NULL\n", __func__);
return 0;
}
msm_sd_unregister(&cpp_dev->msm_sd);
release_mem_region(cpp_dev->mem->start, resource_size(cpp_dev->mem));
release_mem_region(cpp_dev->vbif_mem->start,
resource_size(cpp_dev->vbif_mem));
release_mem_region(cpp_dev->cpp_hw_mem->start,
resource_size(cpp_dev->cpp_hw_mem));
mutex_destroy(&cpp_dev->mutex);
kfree(cpp_dev->work);
destroy_workqueue(cpp_dev->timer_wq);
kfree(cpp_dev->cpp_clk);
kfree(cpp_dev);
return 0;
}
static struct platform_driver cpp_driver = {
.probe = cpp_probe,
.remove = cpp_device_remove,
.driver = {
.name = MSM_CPP_DRV_NAME,
.owner = THIS_MODULE,
.of_match_table = msm_cpp_dt_match,
},
};
static int __init msm_cpp_init_module(void)
{
return platform_driver_register(&cpp_driver);
}
static void __exit msm_cpp_exit_module(void)
{
platform_driver_unregister(&cpp_driver);
}
static int msm_cpp_debugfs_error_s(void *data, u64 val)
{
pr_err("setting error inducement");
induce_error = val;
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(cpp_debugfs_error, NULL,
msm_cpp_debugfs_error_s, "%llu\n");
static int msm_cpp_enable_debugfs(struct cpp_device *cpp_dev)
{
struct dentry *debugfs_base;
debugfs_base = debugfs_create_dir("msm_cpp", NULL);
if (!debugfs_base)
return -ENOMEM;
if (!debugfs_create_file("error", S_IRUGO | S_IWUSR, debugfs_base,
(void *)cpp_dev, &cpp_debugfs_error))
return -ENOMEM;
return 0;
}
module_init(msm_cpp_init_module);
module_exit(msm_cpp_exit_module);
MODULE_DESCRIPTION("MSM CPP driver");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
Jazz-823/semc-kernel-msm7x30 | arch/x86/mm/pgtable_32.c | 596 | 3269 | #include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/mm.h>
#include <linux/nmi.h>
#include <linux/swap.h>
#include <linux/smp.h>
#include <linux/highmem.h>
#include <linux/slab.h>
#include <linux/pagemap.h>
#include <linux/spinlock.h>
#include <linux/module.h>
#include <linux/quicklist.h>
#include <asm/system.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/fixmap.h>
#include <asm/e820.h>
#include <asm/tlb.h>
#include <asm/tlbflush.h>
unsigned int __VMALLOC_RESERVE = 128 << 20;
/*
* Associate a virtual page frame with a given physical page frame
* and protection flags for that frame.
*/
void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
{
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
pgd = swapper_pg_dir + pgd_index(vaddr);
if (pgd_none(*pgd)) {
BUG();
return;
}
pud = pud_offset(pgd, vaddr);
if (pud_none(*pud)) {
BUG();
return;
}
pmd = pmd_offset(pud, vaddr);
if (pmd_none(*pmd)) {
BUG();
return;
}
pte = pte_offset_kernel(pmd, vaddr);
if (pte_val(pteval))
set_pte_at(&init_mm, vaddr, pte, pteval);
else
pte_clear(&init_mm, vaddr, pte);
/*
* It's enough to flush this one mapping.
* (PGE mappings get flushed as well)
*/
__flush_tlb_one(vaddr);
}
/*
* Associate a large virtual page frame with a given physical page frame
* and protection flags for that frame. pfn is for the base of the page,
* vaddr is what the page gets mapped to - both must be properly aligned.
* The pmd must already be instantiated. Assumes PAE mode.
*/
void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
{
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
if (vaddr & (PMD_SIZE-1)) { /* vaddr is misaligned */
printk(KERN_WARNING "set_pmd_pfn: vaddr misaligned\n");
return; /* BUG(); */
}
if (pfn & (PTRS_PER_PTE-1)) { /* pfn is misaligned */
printk(KERN_WARNING "set_pmd_pfn: pfn misaligned\n");
return; /* BUG(); */
}
pgd = swapper_pg_dir + pgd_index(vaddr);
if (pgd_none(*pgd)) {
printk(KERN_WARNING "set_pmd_pfn: pgd_none\n");
return; /* BUG(); */
}
pud = pud_offset(pgd, vaddr);
pmd = pmd_offset(pud, vaddr);
set_pmd(pmd, pfn_pmd(pfn, flags));
/*
* It's enough to flush this one mapping.
* (PGE mappings get flushed as well)
*/
__flush_tlb_one(vaddr);
}
unsigned long __FIXADDR_TOP = 0xfffff000;
EXPORT_SYMBOL(__FIXADDR_TOP);
/*
* vmalloc=size forces the vmalloc area to be exactly 'size'
* bytes. This can be used to increase (or decrease) the
* vmalloc area - the default is 128m.
*/
static int __init parse_vmalloc(char *arg)
{
if (!arg)
return -EINVAL;
/* Add VMALLOC_OFFSET to the parsed value due to vm area guard hole*/
__VMALLOC_RESERVE = memparse(arg, &arg) + VMALLOC_OFFSET;
return 0;
}
early_param("vmalloc", parse_vmalloc);
/*
* reservetop=size reserves a hole at the top of the kernel address space which
* a hypervisor can load into later. Needed for dynamically loaded hypervisors,
* so relocating the fixmap can be done before paging initialization.
*/
static int __init parse_reservetop(char *arg)
{
unsigned long address;
if (!arg)
return -EINVAL;
address = memparse(arg, &arg);
reserve_top_address(address);
return 0;
}
early_param("reservetop", parse_reservetop);
| gpl-2.0 |
liquidware/liquidware_beagleboard_linux | net/rds/send.c | 852 | 28337 | /*
* Copyright (c) 2006 Oracle. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/kernel.h>
#include <linux/gfp.h>
#include <net/sock.h>
#include <linux/in.h>
#include <linux/list.h>
#include "rds.h"
#include "rdma.h"
/* When transmitting messages in rds_send_xmit, we need to emerge from
* time to time and briefly release the CPU. Otherwise the softlock watchdog
* will kick our shin.
* Also, it seems fairer to not let one busy connection stall all the
* others.
*
* send_batch_count is the number of times we'll loop in send_xmit. Setting
* it to 0 will restore the old behavior (where we looped until we had
* drained the queue).
*/
static int send_batch_count = 64;
module_param(send_batch_count, int, 0444);
MODULE_PARM_DESC(send_batch_count, " batch factor when working the send queue");
/*
* Reset the send state. Caller must hold c_send_lock when calling here.
*/
void rds_send_reset(struct rds_connection *conn)
{
struct rds_message *rm, *tmp;
unsigned long flags;
if (conn->c_xmit_rm) {
/* Tell the user the RDMA op is no longer mapped by the
* transport. This isn't entirely true (it's flushed out
* independently) but as the connection is down, there's
* no ongoing RDMA to/from that memory */
rds_message_unmapped(conn->c_xmit_rm);
rds_message_put(conn->c_xmit_rm);
conn->c_xmit_rm = NULL;
}
conn->c_xmit_sg = 0;
conn->c_xmit_hdr_off = 0;
conn->c_xmit_data_off = 0;
conn->c_xmit_rdma_sent = 0;
conn->c_map_queued = 0;
conn->c_unacked_packets = rds_sysctl_max_unacked_packets;
conn->c_unacked_bytes = rds_sysctl_max_unacked_bytes;
/* Mark messages as retransmissions, and move them to the send q */
spin_lock_irqsave(&conn->c_lock, flags);
list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
set_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags);
}
list_splice_init(&conn->c_retrans, &conn->c_send_queue);
spin_unlock_irqrestore(&conn->c_lock, flags);
}
/*
* We're making the concious trade-off here to only send one message
* down the connection at a time.
* Pro:
* - tx queueing is a simple fifo list
* - reassembly is optional and easily done by transports per conn
* - no per flow rx lookup at all, straight to the socket
* - less per-frag memory and wire overhead
* Con:
* - queued acks can be delayed behind large messages
* Depends:
* - small message latency is higher behind queued large messages
* - large message latency isn't starved by intervening small sends
*/
int rds_send_xmit(struct rds_connection *conn)
{
struct rds_message *rm;
unsigned long flags;
unsigned int tmp;
unsigned int send_quota = send_batch_count;
struct scatterlist *sg;
int ret = 0;
int was_empty = 0;
LIST_HEAD(to_be_dropped);
/*
* sendmsg calls here after having queued its message on the send
* queue. We only have one task feeding the connection at a time. If
* another thread is already feeding the queue then we back off. This
* avoids blocking the caller and trading per-connection data between
* caches per message.
*
* The sem holder will issue a retry if they notice that someone queued
* a message after they stopped walking the send queue but before they
* dropped the sem.
*/
if (!mutex_trylock(&conn->c_send_lock)) {
rds_stats_inc(s_send_sem_contention);
ret = -ENOMEM;
goto out;
}
if (conn->c_trans->xmit_prepare)
conn->c_trans->xmit_prepare(conn);
/*
* spin trying to push headers and data down the connection until
* the connection doens't make forward progress.
*/
while (--send_quota) {
/*
* See if need to send a congestion map update if we're
* between sending messages. The send_sem protects our sole
* use of c_map_offset and _bytes.
* Note this is used only by transports that define a special
* xmit_cong_map function. For all others, we create allocate
* a cong_map message and treat it just like any other send.
*/
if (conn->c_map_bytes) {
ret = conn->c_trans->xmit_cong_map(conn, conn->c_lcong,
conn->c_map_offset);
if (ret <= 0)
break;
conn->c_map_offset += ret;
conn->c_map_bytes -= ret;
if (conn->c_map_bytes)
continue;
}
/* If we're done sending the current message, clear the
* offset and S/G temporaries.
*/
rm = conn->c_xmit_rm;
if (rm != NULL &&
conn->c_xmit_hdr_off == sizeof(struct rds_header) &&
conn->c_xmit_sg == rm->m_nents) {
conn->c_xmit_rm = NULL;
conn->c_xmit_sg = 0;
conn->c_xmit_hdr_off = 0;
conn->c_xmit_data_off = 0;
conn->c_xmit_rdma_sent = 0;
/* Release the reference to the previous message. */
rds_message_put(rm);
rm = NULL;
}
/* If we're asked to send a cong map update, do so.
*/
if (rm == NULL && test_and_clear_bit(0, &conn->c_map_queued)) {
if (conn->c_trans->xmit_cong_map != NULL) {
conn->c_map_offset = 0;
conn->c_map_bytes = sizeof(struct rds_header) +
RDS_CONG_MAP_BYTES;
continue;
}
rm = rds_cong_update_alloc(conn);
if (IS_ERR(rm)) {
ret = PTR_ERR(rm);
break;
}
conn->c_xmit_rm = rm;
}
/*
* Grab the next message from the send queue, if there is one.
*
* c_xmit_rm holds a ref while we're sending this message down
* the connction. We can use this ref while holding the
* send_sem.. rds_send_reset() is serialized with it.
*/
if (rm == NULL) {
unsigned int len;
spin_lock_irqsave(&conn->c_lock, flags);
if (!list_empty(&conn->c_send_queue)) {
rm = list_entry(conn->c_send_queue.next,
struct rds_message,
m_conn_item);
rds_message_addref(rm);
/*
* Move the message from the send queue to the retransmit
* list right away.
*/
list_move_tail(&rm->m_conn_item, &conn->c_retrans);
}
spin_unlock_irqrestore(&conn->c_lock, flags);
if (rm == NULL) {
was_empty = 1;
break;
}
/* Unfortunately, the way Infiniband deals with
* RDMA to a bad MR key is by moving the entire
* queue pair to error state. We cold possibly
* recover from that, but right now we drop the
* connection.
* Therefore, we never retransmit messages with RDMA ops.
*/
if (rm->m_rdma_op &&
test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags)) {
spin_lock_irqsave(&conn->c_lock, flags);
if (test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags))
list_move(&rm->m_conn_item, &to_be_dropped);
spin_unlock_irqrestore(&conn->c_lock, flags);
rds_message_put(rm);
continue;
}
/* Require an ACK every once in a while */
len = ntohl(rm->m_inc.i_hdr.h_len);
if (conn->c_unacked_packets == 0 ||
conn->c_unacked_bytes < len) {
__set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
conn->c_unacked_packets = rds_sysctl_max_unacked_packets;
conn->c_unacked_bytes = rds_sysctl_max_unacked_bytes;
rds_stats_inc(s_send_ack_required);
} else {
conn->c_unacked_bytes -= len;
conn->c_unacked_packets--;
}
conn->c_xmit_rm = rm;
}
/*
* Try and send an rdma message. Let's see if we can
* keep this simple and require that the transport either
* send the whole rdma or none of it.
*/
if (rm->m_rdma_op && !conn->c_xmit_rdma_sent) {
ret = conn->c_trans->xmit_rdma(conn, rm->m_rdma_op);
if (ret)
break;
conn->c_xmit_rdma_sent = 1;
/* The transport owns the mapped memory for now.
* You can't unmap it while it's on the send queue */
set_bit(RDS_MSG_MAPPED, &rm->m_flags);
}
if (conn->c_xmit_hdr_off < sizeof(struct rds_header) ||
conn->c_xmit_sg < rm->m_nents) {
ret = conn->c_trans->xmit(conn, rm,
conn->c_xmit_hdr_off,
conn->c_xmit_sg,
conn->c_xmit_data_off);
if (ret <= 0)
break;
if (conn->c_xmit_hdr_off < sizeof(struct rds_header)) {
tmp = min_t(int, ret,
sizeof(struct rds_header) -
conn->c_xmit_hdr_off);
conn->c_xmit_hdr_off += tmp;
ret -= tmp;
}
sg = &rm->m_sg[conn->c_xmit_sg];
while (ret) {
tmp = min_t(int, ret, sg->length -
conn->c_xmit_data_off);
conn->c_xmit_data_off += tmp;
ret -= tmp;
if (conn->c_xmit_data_off == sg->length) {
conn->c_xmit_data_off = 0;
sg++;
conn->c_xmit_sg++;
BUG_ON(ret != 0 &&
conn->c_xmit_sg == rm->m_nents);
}
}
}
}
/* Nuke any messages we decided not to retransmit. */
if (!list_empty(&to_be_dropped))
rds_send_remove_from_sock(&to_be_dropped, RDS_RDMA_DROPPED);
if (conn->c_trans->xmit_complete)
conn->c_trans->xmit_complete(conn);
/*
* We might be racing with another sender who queued a message but
* backed off on noticing that we held the c_send_lock. If we check
* for queued messages after dropping the sem then either we'll
* see the queued message or the queuer will get the sem. If we
* notice the queued message then we trigger an immediate retry.
*
* We need to be careful only to do this when we stopped processing
* the send queue because it was empty. It's the only way we
* stop processing the loop when the transport hasn't taken
* responsibility for forward progress.
*/
mutex_unlock(&conn->c_send_lock);
if (conn->c_map_bytes || (send_quota == 0 && !was_empty)) {
/* We exhausted the send quota, but there's work left to
* do. Return and (re-)schedule the send worker.
*/
ret = -EAGAIN;
}
if (ret == 0 && was_empty) {
/* A simple bit test would be way faster than taking the
* spin lock */
spin_lock_irqsave(&conn->c_lock, flags);
if (!list_empty(&conn->c_send_queue)) {
rds_stats_inc(s_send_sem_queue_raced);
ret = -EAGAIN;
}
spin_unlock_irqrestore(&conn->c_lock, flags);
}
out:
return ret;
}
static void rds_send_sndbuf_remove(struct rds_sock *rs, struct rds_message *rm)
{
u32 len = be32_to_cpu(rm->m_inc.i_hdr.h_len);
assert_spin_locked(&rs->rs_lock);
BUG_ON(rs->rs_snd_bytes < len);
rs->rs_snd_bytes -= len;
if (rs->rs_snd_bytes == 0)
rds_stats_inc(s_send_queue_empty);
}
static inline int rds_send_is_acked(struct rds_message *rm, u64 ack,
is_acked_func is_acked)
{
if (is_acked)
return is_acked(rm, ack);
return be64_to_cpu(rm->m_inc.i_hdr.h_sequence) <= ack;
}
/*
* Returns true if there are no messages on the send and retransmit queues
* which have a sequence number greater than or equal to the given sequence
* number.
*/
int rds_send_acked_before(struct rds_connection *conn, u64 seq)
{
struct rds_message *rm, *tmp;
int ret = 1;
spin_lock(&conn->c_lock);
list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
if (be64_to_cpu(rm->m_inc.i_hdr.h_sequence) < seq)
ret = 0;
break;
}
list_for_each_entry_safe(rm, tmp, &conn->c_send_queue, m_conn_item) {
if (be64_to_cpu(rm->m_inc.i_hdr.h_sequence) < seq)
ret = 0;
break;
}
spin_unlock(&conn->c_lock);
return ret;
}
/*
* This is pretty similar to what happens below in the ACK
* handling code - except that we call here as soon as we get
* the IB send completion on the RDMA op and the accompanying
* message.
*/
void rds_rdma_send_complete(struct rds_message *rm, int status)
{
struct rds_sock *rs = NULL;
struct rds_rdma_op *ro;
struct rds_notifier *notifier;
spin_lock(&rm->m_rs_lock);
ro = rm->m_rdma_op;
if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags) &&
ro && ro->r_notify && ro->r_notifier) {
notifier = ro->r_notifier;
rs = rm->m_rs;
sock_hold(rds_rs_to_sk(rs));
notifier->n_status = status;
spin_lock(&rs->rs_lock);
list_add_tail(¬ifier->n_list, &rs->rs_notify_queue);
spin_unlock(&rs->rs_lock);
ro->r_notifier = NULL;
}
spin_unlock(&rm->m_rs_lock);
if (rs) {
rds_wake_sk_sleep(rs);
sock_put(rds_rs_to_sk(rs));
}
}
EXPORT_SYMBOL_GPL(rds_rdma_send_complete);
/*
* This is the same as rds_rdma_send_complete except we
* don't do any locking - we have all the ingredients (message,
* socket, socket lock) and can just move the notifier.
*/
static inline void
__rds_rdma_send_complete(struct rds_sock *rs, struct rds_message *rm, int status)
{
struct rds_rdma_op *ro;
ro = rm->m_rdma_op;
if (ro && ro->r_notify && ro->r_notifier) {
ro->r_notifier->n_status = status;
list_add_tail(&ro->r_notifier->n_list, &rs->rs_notify_queue);
ro->r_notifier = NULL;
}
/* No need to wake the app - caller does this */
}
/*
* This is called from the IB send completion when we detect
* a RDMA operation that failed with remote access error.
* So speed is not an issue here.
*/
struct rds_message *rds_send_get_message(struct rds_connection *conn,
struct rds_rdma_op *op)
{
struct rds_message *rm, *tmp, *found = NULL;
unsigned long flags;
spin_lock_irqsave(&conn->c_lock, flags);
list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
if (rm->m_rdma_op == op) {
atomic_inc(&rm->m_refcount);
found = rm;
goto out;
}
}
list_for_each_entry_safe(rm, tmp, &conn->c_send_queue, m_conn_item) {
if (rm->m_rdma_op == op) {
atomic_inc(&rm->m_refcount);
found = rm;
break;
}
}
out:
spin_unlock_irqrestore(&conn->c_lock, flags);
return found;
}
EXPORT_SYMBOL_GPL(rds_send_get_message);
/*
* This removes messages from the socket's list if they're on it. The list
* argument must be private to the caller, we must be able to modify it
* without locks. The messages must have a reference held for their
* position on the list. This function will drop that reference after
* removing the messages from the 'messages' list regardless of if it found
* the messages on the socket list or not.
*/
void rds_send_remove_from_sock(struct list_head *messages, int status)
{
unsigned long flags;
struct rds_sock *rs = NULL;
struct rds_message *rm;
while (!list_empty(messages)) {
int was_on_sock = 0;
rm = list_entry(messages->next, struct rds_message,
m_conn_item);
list_del_init(&rm->m_conn_item);
/*
* If we see this flag cleared then we're *sure* that someone
* else beat us to removing it from the sock. If we race
* with their flag update we'll get the lock and then really
* see that the flag has been cleared.
*
* The message spinlock makes sure nobody clears rm->m_rs
* while we're messing with it. It does not prevent the
* message from being removed from the socket, though.
*/
spin_lock_irqsave(&rm->m_rs_lock, flags);
if (!test_bit(RDS_MSG_ON_SOCK, &rm->m_flags))
goto unlock_and_drop;
if (rs != rm->m_rs) {
if (rs) {
rds_wake_sk_sleep(rs);
sock_put(rds_rs_to_sk(rs));
}
rs = rm->m_rs;
sock_hold(rds_rs_to_sk(rs));
}
spin_lock(&rs->rs_lock);
if (test_and_clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) {
struct rds_rdma_op *ro = rm->m_rdma_op;
struct rds_notifier *notifier;
list_del_init(&rm->m_sock_item);
rds_send_sndbuf_remove(rs, rm);
if (ro && ro->r_notifier && (status || ro->r_notify)) {
notifier = ro->r_notifier;
list_add_tail(¬ifier->n_list,
&rs->rs_notify_queue);
if (!notifier->n_status)
notifier->n_status = status;
rm->m_rdma_op->r_notifier = NULL;
}
was_on_sock = 1;
rm->m_rs = NULL;
}
spin_unlock(&rs->rs_lock);
unlock_and_drop:
spin_unlock_irqrestore(&rm->m_rs_lock, flags);
rds_message_put(rm);
if (was_on_sock)
rds_message_put(rm);
}
if (rs) {
rds_wake_sk_sleep(rs);
sock_put(rds_rs_to_sk(rs));
}
}
/*
* Transports call here when they've determined that the receiver queued
* messages up to, and including, the given sequence number. Messages are
* moved to the retrans queue when rds_send_xmit picks them off the send
* queue. This means that in the TCP case, the message may not have been
* assigned the m_ack_seq yet - but that's fine as long as tcp_is_acked
* checks the RDS_MSG_HAS_ACK_SEQ bit.
*
* XXX It's not clear to me how this is safely serialized with socket
* destruction. Maybe it should bail if it sees SOCK_DEAD.
*/
void rds_send_drop_acked(struct rds_connection *conn, u64 ack,
is_acked_func is_acked)
{
struct rds_message *rm, *tmp;
unsigned long flags;
LIST_HEAD(list);
spin_lock_irqsave(&conn->c_lock, flags);
list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
if (!rds_send_is_acked(rm, ack, is_acked))
break;
list_move(&rm->m_conn_item, &list);
clear_bit(RDS_MSG_ON_CONN, &rm->m_flags);
}
/* order flag updates with spin locks */
if (!list_empty(&list))
smp_mb__after_clear_bit();
spin_unlock_irqrestore(&conn->c_lock, flags);
/* now remove the messages from the sock list as needed */
rds_send_remove_from_sock(&list, RDS_RDMA_SUCCESS);
}
EXPORT_SYMBOL_GPL(rds_send_drop_acked);
void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest)
{
struct rds_message *rm, *tmp;
struct rds_connection *conn;
unsigned long flags, flags2;
LIST_HEAD(list);
int wake = 0;
/* get all the messages we're dropping under the rs lock */
spin_lock_irqsave(&rs->rs_lock, flags);
list_for_each_entry_safe(rm, tmp, &rs->rs_send_queue, m_sock_item) {
if (dest && (dest->sin_addr.s_addr != rm->m_daddr ||
dest->sin_port != rm->m_inc.i_hdr.h_dport))
continue;
wake = 1;
list_move(&rm->m_sock_item, &list);
rds_send_sndbuf_remove(rs, rm);
clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags);
}
/* order flag updates with the rs lock */
if (wake)
smp_mb__after_clear_bit();
spin_unlock_irqrestore(&rs->rs_lock, flags);
conn = NULL;
/* now remove the messages from the conn list as needed */
list_for_each_entry(rm, &list, m_sock_item) {
/* We do this here rather than in the loop above, so that
* we don't have to nest m_rs_lock under rs->rs_lock */
spin_lock_irqsave(&rm->m_rs_lock, flags2);
/* If this is a RDMA operation, notify the app. */
spin_lock(&rs->rs_lock);
__rds_rdma_send_complete(rs, rm, RDS_RDMA_CANCELED);
spin_unlock(&rs->rs_lock);
rm->m_rs = NULL;
spin_unlock_irqrestore(&rm->m_rs_lock, flags2);
/*
* If we see this flag cleared then we're *sure* that someone
* else beat us to removing it from the conn. If we race
* with their flag update we'll get the lock and then really
* see that the flag has been cleared.
*/
if (!test_bit(RDS_MSG_ON_CONN, &rm->m_flags))
continue;
if (conn != rm->m_inc.i_conn) {
if (conn)
spin_unlock_irqrestore(&conn->c_lock, flags);
conn = rm->m_inc.i_conn;
spin_lock_irqsave(&conn->c_lock, flags);
}
if (test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags)) {
list_del_init(&rm->m_conn_item);
rds_message_put(rm);
}
}
if (conn)
spin_unlock_irqrestore(&conn->c_lock, flags);
if (wake)
rds_wake_sk_sleep(rs);
while (!list_empty(&list)) {
rm = list_entry(list.next, struct rds_message, m_sock_item);
list_del_init(&rm->m_sock_item);
rds_message_wait(rm);
rds_message_put(rm);
}
}
/*
* we only want this to fire once so we use the callers 'queued'. It's
* possible that another thread can race with us and remove the
* message from the flow with RDS_CANCEL_SENT_TO.
*/
static int rds_send_queue_rm(struct rds_sock *rs, struct rds_connection *conn,
struct rds_message *rm, __be16 sport,
__be16 dport, int *queued)
{
unsigned long flags;
u32 len;
if (*queued)
goto out;
len = be32_to_cpu(rm->m_inc.i_hdr.h_len);
/* this is the only place which holds both the socket's rs_lock
* and the connection's c_lock */
spin_lock_irqsave(&rs->rs_lock, flags);
/*
* If there is a little space in sndbuf, we don't queue anything,
* and userspace gets -EAGAIN. But poll() indicates there's send
* room. This can lead to bad behavior (spinning) if snd_bytes isn't
* freed up by incoming acks. So we check the *old* value of
* rs_snd_bytes here to allow the last msg to exceed the buffer,
* and poll() now knows no more data can be sent.
*/
if (rs->rs_snd_bytes < rds_sk_sndbuf(rs)) {
rs->rs_snd_bytes += len;
/* let recv side know we are close to send space exhaustion.
* This is probably not the optimal way to do it, as this
* means we set the flag on *all* messages as soon as our
* throughput hits a certain threshold.
*/
if (rs->rs_snd_bytes >= rds_sk_sndbuf(rs) / 2)
__set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
list_add_tail(&rm->m_sock_item, &rs->rs_send_queue);
set_bit(RDS_MSG_ON_SOCK, &rm->m_flags);
rds_message_addref(rm);
rm->m_rs = rs;
/* The code ordering is a little weird, but we're
trying to minimize the time we hold c_lock */
rds_message_populate_header(&rm->m_inc.i_hdr, sport, dport, 0);
rm->m_inc.i_conn = conn;
rds_message_addref(rm);
spin_lock(&conn->c_lock);
rm->m_inc.i_hdr.h_sequence = cpu_to_be64(conn->c_next_tx_seq++);
list_add_tail(&rm->m_conn_item, &conn->c_send_queue);
set_bit(RDS_MSG_ON_CONN, &rm->m_flags);
spin_unlock(&conn->c_lock);
rdsdebug("queued msg %p len %d, rs %p bytes %d seq %llu\n",
rm, len, rs, rs->rs_snd_bytes,
(unsigned long long)be64_to_cpu(rm->m_inc.i_hdr.h_sequence));
*queued = 1;
}
spin_unlock_irqrestore(&rs->rs_lock, flags);
out:
return *queued;
}
static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm,
struct msghdr *msg, int *allocated_mr)
{
struct cmsghdr *cmsg;
int ret = 0;
for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) {
if (!CMSG_OK(msg, cmsg))
return -EINVAL;
if (cmsg->cmsg_level != SOL_RDS)
continue;
/* As a side effect, RDMA_DEST and RDMA_MAP will set
* rm->m_rdma_cookie and rm->m_rdma_mr.
*/
switch (cmsg->cmsg_type) {
case RDS_CMSG_RDMA_ARGS:
ret = rds_cmsg_rdma_args(rs, rm, cmsg);
break;
case RDS_CMSG_RDMA_DEST:
ret = rds_cmsg_rdma_dest(rs, rm, cmsg);
break;
case RDS_CMSG_RDMA_MAP:
ret = rds_cmsg_rdma_map(rs, rm, cmsg);
if (!ret)
*allocated_mr = 1;
break;
default:
return -EINVAL;
}
if (ret)
break;
}
return ret;
}
int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
size_t payload_len)
{
struct sock *sk = sock->sk;
struct rds_sock *rs = rds_sk_to_rs(sk);
struct sockaddr_in *usin = (struct sockaddr_in *)msg->msg_name;
__be32 daddr;
__be16 dport;
struct rds_message *rm = NULL;
struct rds_connection *conn;
int ret = 0;
int queued = 0, allocated_mr = 0;
int nonblock = msg->msg_flags & MSG_DONTWAIT;
long timeo = sock_sndtimeo(sk, nonblock);
/* Mirror Linux UDP mirror of BSD error message compatibility */
/* XXX: Perhaps MSG_MORE someday */
if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_CMSG_COMPAT)) {
printk(KERN_INFO "msg_flags 0x%08X\n", msg->msg_flags);
ret = -EOPNOTSUPP;
goto out;
}
if (msg->msg_namelen) {
/* XXX fail non-unicast destination IPs? */
if (msg->msg_namelen < sizeof(*usin) || usin->sin_family != AF_INET) {
ret = -EINVAL;
goto out;
}
daddr = usin->sin_addr.s_addr;
dport = usin->sin_port;
} else {
/* We only care about consistency with ->connect() */
lock_sock(sk);
daddr = rs->rs_conn_addr;
dport = rs->rs_conn_port;
release_sock(sk);
}
/* racing with another thread binding seems ok here */
if (daddr == 0 || rs->rs_bound_addr == 0) {
ret = -ENOTCONN; /* XXX not a great errno */
goto out;
}
rm = rds_message_copy_from_user(msg->msg_iov, payload_len);
if (IS_ERR(rm)) {
ret = PTR_ERR(rm);
rm = NULL;
goto out;
}
rm->m_daddr = daddr;
/* rds_conn_create has a spinlock that runs with IRQ off.
* Caching the conn in the socket helps a lot. */
if (rs->rs_conn && rs->rs_conn->c_faddr == daddr)
conn = rs->rs_conn;
else {
conn = rds_conn_create_outgoing(rs->rs_bound_addr, daddr,
rs->rs_transport,
sock->sk->sk_allocation);
if (IS_ERR(conn)) {
ret = PTR_ERR(conn);
goto out;
}
rs->rs_conn = conn;
}
/* Parse any control messages the user may have included. */
ret = rds_cmsg_send(rs, rm, msg, &allocated_mr);
if (ret)
goto out;
if ((rm->m_rdma_cookie || rm->m_rdma_op) &&
conn->c_trans->xmit_rdma == NULL) {
if (printk_ratelimit())
printk(KERN_NOTICE "rdma_op %p conn xmit_rdma %p\n",
rm->m_rdma_op, conn->c_trans->xmit_rdma);
ret = -EOPNOTSUPP;
goto out;
}
/* If the connection is down, trigger a connect. We may
* have scheduled a delayed reconnect however - in this case
* we should not interfere.
*/
if (rds_conn_state(conn) == RDS_CONN_DOWN &&
!test_and_set_bit(RDS_RECONNECT_PENDING, &conn->c_flags))
queue_delayed_work(rds_wq, &conn->c_conn_w, 0);
ret = rds_cong_wait(conn->c_fcong, dport, nonblock, rs);
if (ret) {
rs->rs_seen_congestion = 1;
goto out;
}
while (!rds_send_queue_rm(rs, conn, rm, rs->rs_bound_port,
dport, &queued)) {
rds_stats_inc(s_send_queue_full);
/* XXX make sure this is reasonable */
if (payload_len > rds_sk_sndbuf(rs)) {
ret = -EMSGSIZE;
goto out;
}
if (nonblock) {
ret = -EAGAIN;
goto out;
}
timeo = wait_event_interruptible_timeout(*sk_sleep(sk),
rds_send_queue_rm(rs, conn, rm,
rs->rs_bound_port,
dport,
&queued),
timeo);
rdsdebug("sendmsg woke queued %d timeo %ld\n", queued, timeo);
if (timeo > 0 || timeo == MAX_SCHEDULE_TIMEOUT)
continue;
ret = timeo;
if (ret == 0)
ret = -ETIMEDOUT;
goto out;
}
/*
* By now we've committed to the send. We reuse rds_send_worker()
* to retry sends in the rds thread if the transport asks us to.
*/
rds_stats_inc(s_send_queued);
if (!test_bit(RDS_LL_SEND_FULL, &conn->c_flags))
rds_send_worker(&conn->c_send_w.work);
rds_message_put(rm);
return payload_len;
out:
/* If the user included a RDMA_MAP cmsg, we allocated a MR on the fly.
* If the sendmsg goes through, we keep the MR. If it fails with EAGAIN
* or in any other way, we need to destroy the MR again */
if (allocated_mr)
rds_rdma_unuse(rs, rds_rdma_cookie_key(rm->m_rdma_cookie), 1);
if (rm)
rds_message_put(rm);
return ret;
}
/*
* Reply to a ping packet.
*/
int
rds_send_pong(struct rds_connection *conn, __be16 dport)
{
struct rds_message *rm;
unsigned long flags;
int ret = 0;
rm = rds_message_alloc(0, GFP_ATOMIC);
if (rm == NULL) {
ret = -ENOMEM;
goto out;
}
rm->m_daddr = conn->c_faddr;
/* If the connection is down, trigger a connect. We may
* have scheduled a delayed reconnect however - in this case
* we should not interfere.
*/
if (rds_conn_state(conn) == RDS_CONN_DOWN &&
!test_and_set_bit(RDS_RECONNECT_PENDING, &conn->c_flags))
queue_delayed_work(rds_wq, &conn->c_conn_w, 0);
ret = rds_cong_wait(conn->c_fcong, dport, 1, NULL);
if (ret)
goto out;
spin_lock_irqsave(&conn->c_lock, flags);
list_add_tail(&rm->m_conn_item, &conn->c_send_queue);
set_bit(RDS_MSG_ON_CONN, &rm->m_flags);
rds_message_addref(rm);
rm->m_inc.i_conn = conn;
rds_message_populate_header(&rm->m_inc.i_hdr, 0, dport,
conn->c_next_tx_seq);
conn->c_next_tx_seq++;
spin_unlock_irqrestore(&conn->c_lock, flags);
rds_stats_inc(s_send_queued);
rds_stats_inc(s_send_pong);
queue_delayed_work(rds_wq, &conn->c_send_w, 0);
rds_message_put(rm);
return 0;
out:
if (rm)
rds_message_put(rm);
return ret;
}
| gpl-2.0 |
vfalico/hydra | drivers/misc/mic/host/mic_virtio.c | 852 | 21674 | /*
* Intel MIC Platform Software Stack (MPSS)
*
* Copyright(c) 2013 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, version 2, as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* Intel MIC Host driver.
*
*/
#include <linux/pci.h>
#include <linux/sched.h>
#include <linux/uaccess.h>
#include <linux/dmaengine.h>
#include <linux/mic_common.h>
#include "../common/mic_dev.h"
#include "mic_device.h"
#include "mic_smpt.h"
#include "mic_virtio.h"
/*
* Size of the internal buffer used during DMA's as an intermediate buffer
* for copy to/from user.
*/
#define MIC_INT_DMA_BUF_SIZE PAGE_ALIGN(64 * 1024ULL)
static int mic_sync_dma(struct mic_device *mdev, dma_addr_t dst,
dma_addr_t src, size_t len)
{
int err = 0;
struct dma_async_tx_descriptor *tx;
struct dma_chan *mic_ch = mdev->dma_ch;
if (!mic_ch) {
err = -EBUSY;
goto error;
}
tx = mic_ch->device->device_prep_dma_memcpy(mic_ch, dst, src, len,
DMA_PREP_FENCE);
if (!tx) {
err = -ENOMEM;
goto error;
} else {
dma_cookie_t cookie = tx->tx_submit(tx);
err = dma_submit_error(cookie);
if (err)
goto error;
err = dma_sync_wait(mic_ch, cookie);
}
error:
if (err)
dev_err(mdev->sdev->parent, "%s %d err %d\n",
__func__, __LINE__, err);
return err;
}
/*
* Initiates the copies across the PCIe bus from card memory to a user
* space buffer. When transfers are done using DMA, source/destination
* addresses and transfer length must follow the alignment requirements of
* the MIC DMA engine.
*/
static int mic_virtio_copy_to_user(struct mic_vdev *mvdev, void __user *ubuf,
size_t len, u64 daddr, size_t dlen,
int vr_idx)
{
struct mic_device *mdev = mvdev->mdev;
void __iomem *dbuf = mdev->aper.va + daddr;
struct mic_vringh *mvr = &mvdev->mvr[vr_idx];
size_t dma_alignment = 1 << mdev->dma_ch->device->copy_align;
size_t dma_offset;
size_t partlen;
int err;
dma_offset = daddr - round_down(daddr, dma_alignment);
daddr -= dma_offset;
len += dma_offset;
while (len) {
partlen = min_t(size_t, len, MIC_INT_DMA_BUF_SIZE);
err = mic_sync_dma(mdev, mvr->buf_da, daddr,
ALIGN(partlen, dma_alignment));
if (err)
goto err;
if (copy_to_user(ubuf, mvr->buf + dma_offset,
partlen - dma_offset)) {
err = -EFAULT;
goto err;
}
daddr += partlen;
ubuf += partlen;
dbuf += partlen;
mvdev->in_bytes_dma += partlen;
mvdev->in_bytes += partlen;
len -= partlen;
dma_offset = 0;
}
return 0;
err:
dev_err(mic_dev(mvdev), "%s %d err %d\n", __func__, __LINE__, err);
return err;
}
/*
* Initiates copies across the PCIe bus from a user space buffer to card
* memory. When transfers are done using DMA, source/destination addresses
* and transfer length must follow the alignment requirements of the MIC
* DMA engine.
*/
static int mic_virtio_copy_from_user(struct mic_vdev *mvdev, void __user *ubuf,
size_t len, u64 daddr, size_t dlen,
int vr_idx)
{
struct mic_device *mdev = mvdev->mdev;
void __iomem *dbuf = mdev->aper.va + daddr;
struct mic_vringh *mvr = &mvdev->mvr[vr_idx];
size_t dma_alignment = 1 << mdev->dma_ch->device->copy_align;
size_t partlen;
int err;
if (daddr & (dma_alignment - 1)) {
mvdev->tx_dst_unaligned += len;
goto memcpy;
} else if (ALIGN(len, dma_alignment) > dlen) {
mvdev->tx_len_unaligned += len;
goto memcpy;
}
while (len) {
partlen = min_t(size_t, len, MIC_INT_DMA_BUF_SIZE);
if (copy_from_user(mvr->buf, ubuf, partlen)) {
err = -EFAULT;
goto err;
}
err = mic_sync_dma(mdev, daddr, mvr->buf_da,
ALIGN(partlen, dma_alignment));
if (err)
goto err;
daddr += partlen;
ubuf += partlen;
dbuf += partlen;
mvdev->out_bytes_dma += partlen;
mvdev->out_bytes += partlen;
len -= partlen;
}
memcpy:
/*
* We are copying to IO below and should ideally use something
* like copy_from_user_toio(..) if it existed.
*/
if (copy_from_user((void __force *)dbuf, ubuf, len)) {
err = -EFAULT;
goto err;
}
mvdev->out_bytes += len;
return 0;
err:
dev_err(mic_dev(mvdev), "%s %d err %d\n", __func__, __LINE__, err);
return err;
}
#define MIC_VRINGH_READ true
/* The function to call to notify the card about added buffers */
static void mic_notify(struct vringh *vrh)
{
struct mic_vringh *mvrh = container_of(vrh, struct mic_vringh, vrh);
struct mic_vdev *mvdev = mvrh->mvdev;
s8 db = mvdev->dc->h2c_vdev_db;
if (db != -1)
mvdev->mdev->ops->send_intr(mvdev->mdev, db);
}
/* Determine the total number of bytes consumed in a VRINGH KIOV */
static inline u32 mic_vringh_iov_consumed(struct vringh_kiov *iov)
{
int i;
u32 total = iov->consumed;
for (i = 0; i < iov->i; i++)
total += iov->iov[i].iov_len;
return total;
}
/*
* Traverse the VRINGH KIOV and issue the APIs to trigger the copies.
* This API is heavily based on the vringh_iov_xfer(..) implementation
* in vringh.c. The reason we cannot reuse vringh_iov_pull_kern(..)
* and vringh_iov_push_kern(..) directly is because there is no
* way to override the VRINGH xfer(..) routines as of v3.10.
*/
static int mic_vringh_copy(struct mic_vdev *mvdev, struct vringh_kiov *iov,
void __user *ubuf, size_t len, bool read, int vr_idx,
size_t *out_len)
{
int ret = 0;
size_t partlen, tot_len = 0;
while (len && iov->i < iov->used) {
partlen = min(iov->iov[iov->i].iov_len, len);
if (read)
ret = mic_virtio_copy_to_user(mvdev, ubuf, partlen,
(u64)iov->iov[iov->i].iov_base,
iov->iov[iov->i].iov_len,
vr_idx);
else
ret = mic_virtio_copy_from_user(mvdev, ubuf, partlen,
(u64)iov->iov[iov->i].iov_base,
iov->iov[iov->i].iov_len,
vr_idx);
if (ret) {
dev_err(mic_dev(mvdev), "%s %d err %d\n",
__func__, __LINE__, ret);
break;
}
len -= partlen;
ubuf += partlen;
tot_len += partlen;
iov->consumed += partlen;
iov->iov[iov->i].iov_len -= partlen;
iov->iov[iov->i].iov_base += partlen;
if (!iov->iov[iov->i].iov_len) {
/* Fix up old iov element then increment. */
iov->iov[iov->i].iov_len = iov->consumed;
iov->iov[iov->i].iov_base -= iov->consumed;
iov->consumed = 0;
iov->i++;
}
}
*out_len = tot_len;
return ret;
}
/*
* Use the standard VRINGH infrastructure in the kernel to fetch new
* descriptors, initiate the copies and update the used ring.
*/
static int _mic_virtio_copy(struct mic_vdev *mvdev,
struct mic_copy_desc *copy)
{
int ret = 0;
u32 iovcnt = copy->iovcnt;
struct iovec iov;
struct iovec __user *u_iov = copy->iov;
void __user *ubuf = NULL;
struct mic_vringh *mvr = &mvdev->mvr[copy->vr_idx];
struct vringh_kiov *riov = &mvr->riov;
struct vringh_kiov *wiov = &mvr->wiov;
struct vringh *vrh = &mvr->vrh;
u16 *head = &mvr->head;
struct mic_vring *vr = &mvr->vring;
size_t len = 0, out_len;
copy->out_len = 0;
/* Fetch a new IOVEC if all previous elements have been processed */
if (riov->i == riov->used && wiov->i == wiov->used) {
ret = vringh_getdesc_kern(vrh, riov, wiov,
head, GFP_KERNEL);
/* Check if there are available descriptors */
if (ret <= 0)
return ret;
}
while (iovcnt) {
if (!len) {
/* Copy over a new iovec from user space. */
ret = copy_from_user(&iov, u_iov, sizeof(*u_iov));
if (ret) {
ret = -EINVAL;
dev_err(mic_dev(mvdev), "%s %d err %d\n",
__func__, __LINE__, ret);
break;
}
len = iov.iov_len;
ubuf = iov.iov_base;
}
/* Issue all the read descriptors first */
ret = mic_vringh_copy(mvdev, riov, ubuf, len, MIC_VRINGH_READ,
copy->vr_idx, &out_len);
if (ret) {
dev_err(mic_dev(mvdev), "%s %d err %d\n",
__func__, __LINE__, ret);
break;
}
len -= out_len;
ubuf += out_len;
copy->out_len += out_len;
/* Issue the write descriptors next */
ret = mic_vringh_copy(mvdev, wiov, ubuf, len, !MIC_VRINGH_READ,
copy->vr_idx, &out_len);
if (ret) {
dev_err(mic_dev(mvdev), "%s %d err %d\n",
__func__, __LINE__, ret);
break;
}
len -= out_len;
ubuf += out_len;
copy->out_len += out_len;
if (!len) {
/* One user space iovec is now completed */
iovcnt--;
u_iov++;
}
/* Exit loop if all elements in KIOVs have been processed. */
if (riov->i == riov->used && wiov->i == wiov->used)
break;
}
/*
* Update the used ring if a descriptor was available and some data was
* copied in/out and the user asked for a used ring update.
*/
if (*head != USHRT_MAX && copy->out_len && copy->update_used) {
u32 total = 0;
/* Determine the total data consumed */
total += mic_vringh_iov_consumed(riov);
total += mic_vringh_iov_consumed(wiov);
vringh_complete_kern(vrh, *head, total);
*head = USHRT_MAX;
if (vringh_need_notify_kern(vrh) > 0)
vringh_notify(vrh);
vringh_kiov_cleanup(riov);
vringh_kiov_cleanup(wiov);
/* Update avail idx for user space */
vr->info->avail_idx = vrh->last_avail_idx;
}
return ret;
}
static inline int mic_verify_copy_args(struct mic_vdev *mvdev,
struct mic_copy_desc *copy)
{
if (copy->vr_idx >= mvdev->dd->num_vq) {
dev_err(mic_dev(mvdev), "%s %d err %d\n",
__func__, __LINE__, -EINVAL);
return -EINVAL;
}
return 0;
}
/* Copy a specified number of virtio descriptors in a chain */
int mic_virtio_copy_desc(struct mic_vdev *mvdev,
struct mic_copy_desc *copy)
{
int err;
struct mic_vringh *mvr = &mvdev->mvr[copy->vr_idx];
err = mic_verify_copy_args(mvdev, copy);
if (err)
return err;
mutex_lock(&mvr->vr_mutex);
if (!mic_vdevup(mvdev)) {
err = -ENODEV;
dev_err(mic_dev(mvdev), "%s %d err %d\n",
__func__, __LINE__, err);
goto err;
}
err = _mic_virtio_copy(mvdev, copy);
if (err) {
dev_err(mic_dev(mvdev), "%s %d err %d\n",
__func__, __LINE__, err);
}
err:
mutex_unlock(&mvr->vr_mutex);
return err;
}
static void mic_virtio_init_post(struct mic_vdev *mvdev)
{
struct mic_vqconfig *vqconfig = mic_vq_config(mvdev->dd);
int i;
for (i = 0; i < mvdev->dd->num_vq; i++) {
if (!le64_to_cpu(vqconfig[i].used_address)) {
dev_warn(mic_dev(mvdev), "used_address zero??\n");
continue;
}
mvdev->mvr[i].vrh.vring.used =
(void __force *)mvdev->mdev->aper.va +
le64_to_cpu(vqconfig[i].used_address);
}
mvdev->dc->used_address_updated = 0;
dev_dbg(mic_dev(mvdev), "%s: device type %d LINKUP\n",
__func__, mvdev->virtio_id);
}
static inline void mic_virtio_device_reset(struct mic_vdev *mvdev)
{
int i;
dev_dbg(mic_dev(mvdev), "%s: status %d device type %d RESET\n",
__func__, mvdev->dd->status, mvdev->virtio_id);
for (i = 0; i < mvdev->dd->num_vq; i++)
/*
* Avoid lockdep false positive. The + 1 is for the mic
* mutex which is held in the reset devices code path.
*/
mutex_lock_nested(&mvdev->mvr[i].vr_mutex, i + 1);
/* 0 status means "reset" */
mvdev->dd->status = 0;
mvdev->dc->vdev_reset = 0;
mvdev->dc->host_ack = 1;
for (i = 0; i < mvdev->dd->num_vq; i++) {
struct vringh *vrh = &mvdev->mvr[i].vrh;
mvdev->mvr[i].vring.info->avail_idx = 0;
vrh->completed = 0;
vrh->last_avail_idx = 0;
vrh->last_used_idx = 0;
}
for (i = 0; i < mvdev->dd->num_vq; i++)
mutex_unlock(&mvdev->mvr[i].vr_mutex);
}
void mic_virtio_reset_devices(struct mic_device *mdev)
{
struct list_head *pos, *tmp;
struct mic_vdev *mvdev;
dev_dbg(mdev->sdev->parent, "%s\n", __func__);
list_for_each_safe(pos, tmp, &mdev->vdev_list) {
mvdev = list_entry(pos, struct mic_vdev, list);
mic_virtio_device_reset(mvdev);
mvdev->poll_wake = 1;
wake_up(&mvdev->waitq);
}
}
void mic_bh_handler(struct work_struct *work)
{
struct mic_vdev *mvdev = container_of(work, struct mic_vdev,
virtio_bh_work);
if (mvdev->dc->used_address_updated)
mic_virtio_init_post(mvdev);
if (mvdev->dc->vdev_reset)
mic_virtio_device_reset(mvdev);
mvdev->poll_wake = 1;
wake_up(&mvdev->waitq);
}
static irqreturn_t mic_virtio_intr_handler(int irq, void *data)
{
struct mic_vdev *mvdev = data;
struct mic_device *mdev = mvdev->mdev;
mdev->ops->intr_workarounds(mdev);
schedule_work(&mvdev->virtio_bh_work);
return IRQ_HANDLED;
}
int mic_virtio_config_change(struct mic_vdev *mvdev,
void __user *argp)
{
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wake);
int ret = 0, retry, i;
struct mic_bootparam *bootparam = mvdev->mdev->dp;
s8 db = bootparam->h2c_config_db;
mutex_lock(&mvdev->mdev->mic_mutex);
for (i = 0; i < mvdev->dd->num_vq; i++)
mutex_lock_nested(&mvdev->mvr[i].vr_mutex, i + 1);
if (db == -1 || mvdev->dd->type == -1) {
ret = -EIO;
goto exit;
}
if (copy_from_user(mic_vq_configspace(mvdev->dd),
argp, mvdev->dd->config_len)) {
dev_err(mic_dev(mvdev), "%s %d err %d\n",
__func__, __LINE__, -EFAULT);
ret = -EFAULT;
goto exit;
}
mvdev->dc->config_change = MIC_VIRTIO_PARAM_CONFIG_CHANGED;
mvdev->mdev->ops->send_intr(mvdev->mdev, db);
for (retry = 100; retry--;) {
ret = wait_event_timeout(wake,
mvdev->dc->guest_ack, msecs_to_jiffies(100));
if (ret)
break;
}
dev_dbg(mic_dev(mvdev),
"%s %d retry: %d\n", __func__, __LINE__, retry);
mvdev->dc->config_change = 0;
mvdev->dc->guest_ack = 0;
exit:
for (i = 0; i < mvdev->dd->num_vq; i++)
mutex_unlock(&mvdev->mvr[i].vr_mutex);
mutex_unlock(&mvdev->mdev->mic_mutex);
return ret;
}
static int mic_copy_dp_entry(struct mic_vdev *mvdev,
void __user *argp,
__u8 *type,
struct mic_device_desc **devpage)
{
struct mic_device *mdev = mvdev->mdev;
struct mic_device_desc dd, *dd_config, *devp;
struct mic_vqconfig *vqconfig;
int ret = 0, i;
bool slot_found = false;
if (copy_from_user(&dd, argp, sizeof(dd))) {
dev_err(mic_dev(mvdev), "%s %d err %d\n",
__func__, __LINE__, -EFAULT);
return -EFAULT;
}
if (mic_aligned_desc_size(&dd) > MIC_MAX_DESC_BLK_SIZE ||
dd.num_vq > MIC_MAX_VRINGS) {
dev_err(mic_dev(mvdev), "%s %d err %d\n",
__func__, __LINE__, -EINVAL);
return -EINVAL;
}
dd_config = kmalloc(mic_desc_size(&dd), GFP_KERNEL);
if (dd_config == NULL) {
dev_err(mic_dev(mvdev), "%s %d err %d\n",
__func__, __LINE__, -ENOMEM);
return -ENOMEM;
}
if (copy_from_user(dd_config, argp, mic_desc_size(&dd))) {
ret = -EFAULT;
dev_err(mic_dev(mvdev), "%s %d err %d\n",
__func__, __LINE__, ret);
goto exit;
}
vqconfig = mic_vq_config(dd_config);
for (i = 0; i < dd.num_vq; i++) {
if (le16_to_cpu(vqconfig[i].num) > MIC_MAX_VRING_ENTRIES) {
ret = -EINVAL;
dev_err(mic_dev(mvdev), "%s %d err %d\n",
__func__, __LINE__, ret);
goto exit;
}
}
/* Find the first free device page entry */
for (i = sizeof(struct mic_bootparam);
i < MIC_DP_SIZE - mic_total_desc_size(dd_config);
i += mic_total_desc_size(devp)) {
devp = mdev->dp + i;
if (devp->type == 0 || devp->type == -1) {
slot_found = true;
break;
}
}
if (!slot_found) {
ret = -EINVAL;
dev_err(mic_dev(mvdev), "%s %d err %d\n",
__func__, __LINE__, ret);
goto exit;
}
/*
* Save off the type before doing the memcpy. Type will be set in the
* end after completing all initialization for the new device.
*/
*type = dd_config->type;
dd_config->type = 0;
memcpy(devp, dd_config, mic_desc_size(dd_config));
*devpage = devp;
exit:
kfree(dd_config);
return ret;
}
static void mic_init_device_ctrl(struct mic_vdev *mvdev,
struct mic_device_desc *devpage)
{
struct mic_device_ctrl *dc;
dc = (void *)devpage + mic_aligned_desc_size(devpage);
dc->config_change = 0;
dc->guest_ack = 0;
dc->vdev_reset = 0;
dc->host_ack = 0;
dc->used_address_updated = 0;
dc->c2h_vdev_db = -1;
dc->h2c_vdev_db = -1;
mvdev->dc = dc;
}
int mic_virtio_add_device(struct mic_vdev *mvdev,
void __user *argp)
{
struct mic_device *mdev = mvdev->mdev;
struct mic_device_desc *dd = NULL;
struct mic_vqconfig *vqconfig;
int vr_size, i, j, ret;
u8 type = 0;
s8 db;
char irqname[10];
struct mic_bootparam *bootparam = mdev->dp;
u16 num;
dma_addr_t vr_addr;
mutex_lock(&mdev->mic_mutex);
ret = mic_copy_dp_entry(mvdev, argp, &type, &dd);
if (ret) {
mutex_unlock(&mdev->mic_mutex);
return ret;
}
mic_init_device_ctrl(mvdev, dd);
mvdev->dd = dd;
mvdev->virtio_id = type;
vqconfig = mic_vq_config(dd);
INIT_WORK(&mvdev->virtio_bh_work, mic_bh_handler);
for (i = 0; i < dd->num_vq; i++) {
struct mic_vringh *mvr = &mvdev->mvr[i];
struct mic_vring *vr = &mvdev->mvr[i].vring;
num = le16_to_cpu(vqconfig[i].num);
mutex_init(&mvr->vr_mutex);
vr_size = PAGE_ALIGN(vring_size(num, MIC_VIRTIO_RING_ALIGN) +
sizeof(struct _mic_vring_info));
vr->va = (void *)
__get_free_pages(GFP_KERNEL | __GFP_ZERO,
get_order(vr_size));
if (!vr->va) {
ret = -ENOMEM;
dev_err(mic_dev(mvdev), "%s %d err %d\n",
__func__, __LINE__, ret);
goto err;
}
vr->len = vr_size;
vr->info = vr->va + vring_size(num, MIC_VIRTIO_RING_ALIGN);
vr->info->magic = cpu_to_le32(MIC_MAGIC + mvdev->virtio_id + i);
vr_addr = mic_map_single(mdev, vr->va, vr_size);
if (mic_map_error(vr_addr)) {
free_pages((unsigned long)vr->va, get_order(vr_size));
ret = -ENOMEM;
dev_err(mic_dev(mvdev), "%s %d err %d\n",
__func__, __LINE__, ret);
goto err;
}
vqconfig[i].address = cpu_to_le64(vr_addr);
vring_init(&vr->vr, num, vr->va, MIC_VIRTIO_RING_ALIGN);
ret = vringh_init_kern(&mvr->vrh,
*(u32 *)mic_vq_features(mvdev->dd), num, false,
vr->vr.desc, vr->vr.avail, vr->vr.used);
if (ret) {
dev_err(mic_dev(mvdev), "%s %d err %d\n",
__func__, __LINE__, ret);
goto err;
}
vringh_kiov_init(&mvr->riov, NULL, 0);
vringh_kiov_init(&mvr->wiov, NULL, 0);
mvr->head = USHRT_MAX;
mvr->mvdev = mvdev;
mvr->vrh.notify = mic_notify;
dev_dbg(mdev->sdev->parent,
"%s %d index %d va %p info %p vr_size 0x%x\n",
__func__, __LINE__, i, vr->va, vr->info, vr_size);
mvr->buf = (void *)__get_free_pages(GFP_KERNEL,
get_order(MIC_INT_DMA_BUF_SIZE));
mvr->buf_da = mic_map_single(mvdev->mdev, mvr->buf,
MIC_INT_DMA_BUF_SIZE);
}
snprintf(irqname, sizeof(irqname), "mic%dvirtio%d", mdev->id,
mvdev->virtio_id);
mvdev->virtio_db = mic_next_db(mdev);
mvdev->virtio_cookie = mic_request_threaded_irq(mdev,
mic_virtio_intr_handler,
NULL, irqname, mvdev,
mvdev->virtio_db, MIC_INTR_DB);
if (IS_ERR(mvdev->virtio_cookie)) {
ret = PTR_ERR(mvdev->virtio_cookie);
dev_dbg(mdev->sdev->parent, "request irq failed\n");
goto err;
}
mvdev->dc->c2h_vdev_db = mvdev->virtio_db;
list_add_tail(&mvdev->list, &mdev->vdev_list);
/*
* Order the type update with previous stores. This write barrier
* is paired with the corresponding read barrier before the uncached
* system memory read of the type, on the card while scanning the
* device page.
*/
smp_wmb();
dd->type = type;
dev_dbg(mdev->sdev->parent, "Added virtio device id %d\n", dd->type);
db = bootparam->h2c_config_db;
if (db != -1)
mdev->ops->send_intr(mdev, db);
mutex_unlock(&mdev->mic_mutex);
return 0;
err:
vqconfig = mic_vq_config(dd);
for (j = 0; j < i; j++) {
struct mic_vringh *mvr = &mvdev->mvr[j];
mic_unmap_single(mdev, le64_to_cpu(vqconfig[j].address),
mvr->vring.len);
free_pages((unsigned long)mvr->vring.va,
get_order(mvr->vring.len));
}
mutex_unlock(&mdev->mic_mutex);
return ret;
}
void mic_virtio_del_device(struct mic_vdev *mvdev)
{
struct list_head *pos, *tmp;
struct mic_vdev *tmp_mvdev;
struct mic_device *mdev = mvdev->mdev;
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wake);
int i, ret, retry;
struct mic_vqconfig *vqconfig;
struct mic_bootparam *bootparam = mdev->dp;
s8 db;
mutex_lock(&mdev->mic_mutex);
db = bootparam->h2c_config_db;
if (db == -1)
goto skip_hot_remove;
dev_dbg(mdev->sdev->parent,
"Requesting hot remove id %d\n", mvdev->virtio_id);
mvdev->dc->config_change = MIC_VIRTIO_PARAM_DEV_REMOVE;
mdev->ops->send_intr(mdev, db);
for (retry = 100; retry--;) {
ret = wait_event_timeout(wake,
mvdev->dc->guest_ack, msecs_to_jiffies(100));
if (ret)
break;
}
dev_dbg(mdev->sdev->parent,
"Device id %d config_change %d guest_ack %d retry %d\n",
mvdev->virtio_id, mvdev->dc->config_change,
mvdev->dc->guest_ack, retry);
mvdev->dc->config_change = 0;
mvdev->dc->guest_ack = 0;
skip_hot_remove:
mic_free_irq(mdev, mvdev->virtio_cookie, mvdev);
flush_work(&mvdev->virtio_bh_work);
vqconfig = mic_vq_config(mvdev->dd);
for (i = 0; i < mvdev->dd->num_vq; i++) {
struct mic_vringh *mvr = &mvdev->mvr[i];
mic_unmap_single(mvdev->mdev, mvr->buf_da,
MIC_INT_DMA_BUF_SIZE);
free_pages((unsigned long)mvr->buf,
get_order(MIC_INT_DMA_BUF_SIZE));
vringh_kiov_cleanup(&mvr->riov);
vringh_kiov_cleanup(&mvr->wiov);
mic_unmap_single(mdev, le64_to_cpu(vqconfig[i].address),
mvr->vring.len);
free_pages((unsigned long)mvr->vring.va,
get_order(mvr->vring.len));
}
list_for_each_safe(pos, tmp, &mdev->vdev_list) {
tmp_mvdev = list_entry(pos, struct mic_vdev, list);
if (tmp_mvdev == mvdev) {
list_del(pos);
dev_dbg(mdev->sdev->parent,
"Removing virtio device id %d\n",
mvdev->virtio_id);
break;
}
}
/*
* Order the type update with previous stores. This write barrier
* is paired with the corresponding read barrier before the uncached
* system memory read of the type, on the card while scanning the
* device page.
*/
smp_wmb();
mvdev->dd->type = -1;
mutex_unlock(&mdev->mic_mutex);
}
| gpl-2.0 |
TeamDS/htc-kernel-doubleshot_26 | drivers/hwmon/via-cputemp.c | 852 | 8135 | /*
* via-cputemp.c - Driver for VIA CPU core temperature monitoring
* Copyright (C) 2009 VIA Technologies, Inc.
*
* based on existing coretemp.c, which is
*
* Copyright (C) 2007 Rudolf Marek <r.marek@assembler.cz>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301 USA.
*/
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/jiffies.h>
#include <linux/hwmon.h>
#include <linux/sysfs.h>
#include <linux/hwmon-sysfs.h>
#include <linux/err.h>
#include <linux/mutex.h>
#include <linux/list.h>
#include <linux/platform_device.h>
#include <linux/cpu.h>
#include <asm/msr.h>
#include <asm/processor.h>
#define DRVNAME "via_cputemp"
enum { SHOW_TEMP, SHOW_LABEL, SHOW_NAME } SHOW;
/*
* Functions declaration
*/
struct via_cputemp_data {
struct device *hwmon_dev;
const char *name;
u32 id;
u32 msr;
};
/*
* Sysfs stuff
*/
static ssize_t show_name(struct device *dev, struct device_attribute
*devattr, char *buf)
{
int ret;
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct via_cputemp_data *data = dev_get_drvdata(dev);
if (attr->index == SHOW_NAME)
ret = sprintf(buf, "%s\n", data->name);
else /* show label */
ret = sprintf(buf, "Core %d\n", data->id);
return ret;
}
static ssize_t show_temp(struct device *dev,
struct device_attribute *devattr, char *buf)
{
struct via_cputemp_data *data = dev_get_drvdata(dev);
u32 eax, edx;
int err;
err = rdmsr_safe_on_cpu(data->id, data->msr, &eax, &edx);
if (err)
return -EAGAIN;
return sprintf(buf, "%lu\n", ((unsigned long)eax & 0xffffff) * 1000);
}
static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL,
SHOW_TEMP);
static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, show_name, NULL, SHOW_LABEL);
static SENSOR_DEVICE_ATTR(name, S_IRUGO, show_name, NULL, SHOW_NAME);
static struct attribute *via_cputemp_attributes[] = {
&sensor_dev_attr_name.dev_attr.attr,
&sensor_dev_attr_temp1_label.dev_attr.attr,
&sensor_dev_attr_temp1_input.dev_attr.attr,
NULL
};
static const struct attribute_group via_cputemp_group = {
.attrs = via_cputemp_attributes,
};
static int __devinit via_cputemp_probe(struct platform_device *pdev)
{
struct via_cputemp_data *data;
struct cpuinfo_x86 *c = &cpu_data(pdev->id);
int err;
u32 eax, edx;
data = kzalloc(sizeof(struct via_cputemp_data), GFP_KERNEL);
if (!data) {
err = -ENOMEM;
dev_err(&pdev->dev, "Out of memory\n");
goto exit;
}
data->id = pdev->id;
data->name = "via_cputemp";
switch (c->x86_model) {
case 0xA:
/* C7 A */
case 0xD:
/* C7 D */
data->msr = 0x1169;
break;
case 0xF:
/* Nano */
data->msr = 0x1423;
break;
default:
err = -ENODEV;
goto exit_free;
}
/* test if we can access the TEMPERATURE MSR */
err = rdmsr_safe_on_cpu(data->id, data->msr, &eax, &edx);
if (err) {
dev_err(&pdev->dev,
"Unable to access TEMPERATURE MSR, giving up\n");
goto exit_free;
}
platform_set_drvdata(pdev, data);
err = sysfs_create_group(&pdev->dev.kobj, &via_cputemp_group);
if (err)
goto exit_free;
data->hwmon_dev = hwmon_device_register(&pdev->dev);
if (IS_ERR(data->hwmon_dev)) {
err = PTR_ERR(data->hwmon_dev);
dev_err(&pdev->dev, "Class registration failed (%d)\n",
err);
goto exit_remove;
}
return 0;
exit_remove:
sysfs_remove_group(&pdev->dev.kobj, &via_cputemp_group);
exit_free:
platform_set_drvdata(pdev, NULL);
kfree(data);
exit:
return err;
}
static int __devexit via_cputemp_remove(struct platform_device *pdev)
{
struct via_cputemp_data *data = platform_get_drvdata(pdev);
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&pdev->dev.kobj, &via_cputemp_group);
platform_set_drvdata(pdev, NULL);
kfree(data);
return 0;
}
static struct platform_driver via_cputemp_driver = {
.driver = {
.owner = THIS_MODULE,
.name = DRVNAME,
},
.probe = via_cputemp_probe,
.remove = __devexit_p(via_cputemp_remove),
};
struct pdev_entry {
struct list_head list;
struct platform_device *pdev;
unsigned int cpu;
};
static LIST_HEAD(pdev_list);
static DEFINE_MUTEX(pdev_list_mutex);
static int __cpuinit via_cputemp_device_add(unsigned int cpu)
{
int err;
struct platform_device *pdev;
struct pdev_entry *pdev_entry;
pdev = platform_device_alloc(DRVNAME, cpu);
if (!pdev) {
err = -ENOMEM;
printk(KERN_ERR DRVNAME ": Device allocation failed\n");
goto exit;
}
pdev_entry = kzalloc(sizeof(struct pdev_entry), GFP_KERNEL);
if (!pdev_entry) {
err = -ENOMEM;
goto exit_device_put;
}
err = platform_device_add(pdev);
if (err) {
printk(KERN_ERR DRVNAME ": Device addition failed (%d)\n",
err);
goto exit_device_free;
}
pdev_entry->pdev = pdev;
pdev_entry->cpu = cpu;
mutex_lock(&pdev_list_mutex);
list_add_tail(&pdev_entry->list, &pdev_list);
mutex_unlock(&pdev_list_mutex);
return 0;
exit_device_free:
kfree(pdev_entry);
exit_device_put:
platform_device_put(pdev);
exit:
return err;
}
#ifdef CONFIG_HOTPLUG_CPU
static void via_cputemp_device_remove(unsigned int cpu)
{
struct pdev_entry *p, *n;
mutex_lock(&pdev_list_mutex);
list_for_each_entry_safe(p, n, &pdev_list, list) {
if (p->cpu == cpu) {
platform_device_unregister(p->pdev);
list_del(&p->list);
kfree(p);
}
}
mutex_unlock(&pdev_list_mutex);
}
static int __cpuinit via_cputemp_cpu_callback(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
unsigned int cpu = (unsigned long) hcpu;
switch (action) {
case CPU_ONLINE:
case CPU_DOWN_FAILED:
via_cputemp_device_add(cpu);
break;
case CPU_DOWN_PREPARE:
via_cputemp_device_remove(cpu);
break;
}
return NOTIFY_OK;
}
static struct notifier_block via_cputemp_cpu_notifier __refdata = {
.notifier_call = via_cputemp_cpu_callback,
};
#endif /* !CONFIG_HOTPLUG_CPU */
static int __init via_cputemp_init(void)
{
int i, err;
struct pdev_entry *p, *n;
if (cpu_data(0).x86_vendor != X86_VENDOR_CENTAUR) {
printk(KERN_DEBUG DRVNAME ": Not a VIA CPU\n");
err = -ENODEV;
goto exit;
}
err = platform_driver_register(&via_cputemp_driver);
if (err)
goto exit;
for_each_online_cpu(i) {
struct cpuinfo_x86 *c = &cpu_data(i);
if (c->x86 != 6)
continue;
if (c->x86_model < 0x0a)
continue;
if (c->x86_model > 0x0f) {
printk(KERN_WARNING DRVNAME ": Unknown CPU "
"model 0x%x\n", c->x86_model);
continue;
}
err = via_cputemp_device_add(i);
if (err)
goto exit_devices_unreg;
}
if (list_empty(&pdev_list)) {
err = -ENODEV;
goto exit_driver_unreg;
}
#ifdef CONFIG_HOTPLUG_CPU
register_hotcpu_notifier(&via_cputemp_cpu_notifier);
#endif
return 0;
exit_devices_unreg:
mutex_lock(&pdev_list_mutex);
list_for_each_entry_safe(p, n, &pdev_list, list) {
platform_device_unregister(p->pdev);
list_del(&p->list);
kfree(p);
}
mutex_unlock(&pdev_list_mutex);
exit_driver_unreg:
platform_driver_unregister(&via_cputemp_driver);
exit:
return err;
}
static void __exit via_cputemp_exit(void)
{
struct pdev_entry *p, *n;
#ifdef CONFIG_HOTPLUG_CPU
unregister_hotcpu_notifier(&via_cputemp_cpu_notifier);
#endif
mutex_lock(&pdev_list_mutex);
list_for_each_entry_safe(p, n, &pdev_list, list) {
platform_device_unregister(p->pdev);
list_del(&p->list);
kfree(p);
}
mutex_unlock(&pdev_list_mutex);
platform_driver_unregister(&via_cputemp_driver);
}
MODULE_AUTHOR("Harald Welte <HaraldWelte@viatech.com>");
MODULE_DESCRIPTION("VIA CPU temperature monitor");
MODULE_LICENSE("GPL");
module_init(via_cputemp_init)
module_exit(via_cputemp_exit)
| gpl-2.0 |
friedrich420/AEL_NOTE4_N910FXXU1ANK4 | drivers/ata/ata_piix.c | 1108 | 51233 | /*
* ata_piix.c - Intel PATA/SATA controllers
*
* Maintained by: Tejun Heo <tj@kernel.org>
* Please ALWAYS copy linux-ide@vger.kernel.org
* on emails.
*
*
* Copyright 2003-2005 Red Hat Inc
* Copyright 2003-2005 Jeff Garzik
*
*
* Copyright header from piix.c:
*
* Copyright (C) 1998-1999 Andrzej Krzysztofowicz, Author and Maintainer
* Copyright (C) 1998-2000 Andre Hedrick <andre@linux-ide.org>
* Copyright (C) 2003 Red Hat Inc
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; see the file COPYING. If not, write to
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
*
*
* libata documentation is available via 'make {ps|pdf}docs',
* as Documentation/DocBook/libata.*
*
* Hardware documentation available at http://developer.intel.com/
*
* Documentation
* Publicly available from Intel web site. Errata documentation
* is also publicly available. As an aide to anyone hacking on this
* driver the list of errata that are relevant is below, going back to
* PIIX4. Older device documentation is now a bit tricky to find.
*
* The chipsets all follow very much the same design. The original Triton
* series chipsets do _not_ support independent device timings, but this
* is fixed in Triton II. With the odd mobile exception the chips then
* change little except in gaining more modes until SATA arrives. This
* driver supports only the chips with independent timing (that is those
* with SITRE and the 0x44 timing register). See pata_oldpiix and pata_mpiix
* for the early chip drivers.
*
* Errata of note:
*
* Unfixable
* PIIX4 errata #9 - Only on ultra obscure hw
* ICH3 errata #13 - Not observed to affect real hw
* by Intel
*
* Things we must deal with
* PIIX4 errata #10 - BM IDE hang with non UDMA
* (must stop/start dma to recover)
* 440MX errata #15 - As PIIX4 errata #10
* PIIX4 errata #15 - Must not read control registers
* during a PIO transfer
* 440MX errata #13 - As PIIX4 errata #15
* ICH2 errata #21 - DMA mode 0 doesn't work right
* ICH0/1 errata #55 - As ICH2 errata #21
* ICH2 spec c #9 - Extra operations needed to handle
* drive hotswap [NOT YET SUPPORTED]
* ICH2 spec c #20 - IDE PRD must not cross a 64K boundary
* and must be dword aligned
* ICH2 spec c #24 - UDMA mode 4,5 t85/86 should be 6ns not 3.3
* ICH7 errata #16 - MWDMA1 timings are incorrect
*
* Should have been BIOS fixed:
* 450NX: errata #19 - DMA hangs on old 450NX
* 450NX: errata #20 - DMA hangs on old 450NX
* 450NX: errata #25 - Corruption with DMA on old 450NX
* ICH3 errata #15 - IDE deadlock under high load
* (BIOS must set dev 31 fn 0 bit 23)
* ICH3 errata #18 - Don't use native mode
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/gfp.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#include <linux/dmi.h>
#define DRV_NAME "ata_piix"
#define DRV_VERSION "2.13"
enum {
PIIX_IOCFG = 0x54, /* IDE I/O configuration register */
ICH5_PMR = 0x90, /* port mapping register */
ICH5_PCS = 0x92, /* port control and status */
PIIX_SIDPR_BAR = 5,
PIIX_SIDPR_LEN = 16,
PIIX_SIDPR_IDX = 0,
PIIX_SIDPR_DATA = 4,
PIIX_FLAG_CHECKINTR = (1 << 28), /* make sure PCI INTx enabled */
PIIX_FLAG_SIDPR = (1 << 29), /* SATA idx/data pair regs */
PIIX_PATA_FLAGS = ATA_FLAG_SLAVE_POSS,
PIIX_SATA_FLAGS = ATA_FLAG_SATA | PIIX_FLAG_CHECKINTR,
PIIX_FLAG_PIO16 = (1 << 30), /*support 16bit PIO only*/
PIIX_80C_PRI = (1 << 5) | (1 << 4),
PIIX_80C_SEC = (1 << 7) | (1 << 6),
/* constants for mapping table */
P0 = 0, /* port 0 */
P1 = 1, /* port 1 */
P2 = 2, /* port 2 */
P3 = 3, /* port 3 */
IDE = -1, /* IDE */
NA = -2, /* not available */
RV = -3, /* reserved */
PIIX_AHCI_DEVICE = 6,
/* host->flags bits */
PIIX_HOST_BROKEN_SUSPEND = (1 << 24),
};
enum piix_controller_ids {
/* controller IDs */
piix_pata_mwdma, /* PIIX3 MWDMA only */
piix_pata_33, /* PIIX4 at 33Mhz */
ich_pata_33, /* ICH up to UDMA 33 only */
ich_pata_66, /* ICH up to 66 Mhz */
ich_pata_100, /* ICH up to UDMA 100 */
ich_pata_100_nomwdma1, /* ICH up to UDMA 100 but with no MWDMA1*/
ich5_sata,
ich6_sata,
ich6m_sata,
ich8_sata,
ich8_2port_sata,
ich8m_apple_sata, /* locks up on second port enable */
tolapai_sata,
piix_pata_vmw, /* PIIX4 for VMware, spurious DMA_ERR */
ich8_sata_snb,
ich8_2port_sata_snb,
ich8_2port_sata_byt,
};
struct piix_map_db {
const u32 mask;
const u16 port_enable;
const int map[][4];
};
struct piix_host_priv {
const int *map;
u32 saved_iocfg;
void __iomem *sidpr;
};
static unsigned int in_module_init = 1;
static const struct pci_device_id piix_pci_tbl[] = {
/* Intel PIIX3 for the 430HX etc */
{ 0x8086, 0x7010, PCI_ANY_ID, PCI_ANY_ID, 0, 0, piix_pata_mwdma },
/* VMware ICH4 */
{ 0x8086, 0x7111, 0x15ad, 0x1976, 0, 0, piix_pata_vmw },
/* Intel PIIX4 for the 430TX/440BX/MX chipset: UDMA 33 */
/* Also PIIX4E (fn3 rev 2) and PIIX4M (fn3 rev 3) */
{ 0x8086, 0x7111, PCI_ANY_ID, PCI_ANY_ID, 0, 0, piix_pata_33 },
/* Intel PIIX4 */
{ 0x8086, 0x7199, PCI_ANY_ID, PCI_ANY_ID, 0, 0, piix_pata_33 },
/* Intel PIIX4 */
{ 0x8086, 0x7601, PCI_ANY_ID, PCI_ANY_ID, 0, 0, piix_pata_33 },
/* Intel PIIX */
{ 0x8086, 0x84CA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, piix_pata_33 },
/* Intel ICH (i810, i815, i840) UDMA 66*/
{ 0x8086, 0x2411, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_66 },
/* Intel ICH0 : UDMA 33*/
{ 0x8086, 0x2421, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_33 },
/* Intel ICH2M */
{ 0x8086, 0x244A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
/* Intel ICH2 (i810E2, i845, 850, 860) UDMA 100 */
{ 0x8086, 0x244B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
/* Intel ICH3M */
{ 0x8086, 0x248A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
/* Intel ICH3 (E7500/1) UDMA 100 */
{ 0x8086, 0x248B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
/* Intel ICH4-L */
{ 0x8086, 0x24C1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
/* Intel ICH4 (i845GV, i845E, i852, i855) UDMA 100 */
{ 0x8086, 0x24CA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
{ 0x8086, 0x24CB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
/* Intel ICH5 */
{ 0x8086, 0x24DB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
/* C-ICH (i810E2) */
{ 0x8086, 0x245B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
/* ESB (855GME/875P + 6300ESB) UDMA 100 */
{ 0x8086, 0x25A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
/* ICH6 (and 6) (i915) UDMA 100 */
{ 0x8086, 0x266F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
/* ICH7/7-R (i945, i975) UDMA 100*/
{ 0x8086, 0x27DF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100_nomwdma1 },
{ 0x8086, 0x269E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100_nomwdma1 },
/* ICH8 Mobile PATA Controller */
{ 0x8086, 0x2850, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
/* SATA ports */
/* 82801EB (ICH5) */
{ 0x8086, 0x24d1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata },
/* 82801EB (ICH5) */
{ 0x8086, 0x24df, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata },
/* 6300ESB (ICH5 variant with broken PCS present bits) */
{ 0x8086, 0x25a3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata },
/* 6300ESB pretending RAID */
{ 0x8086, 0x25b0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata },
/* 82801FB/FW (ICH6/ICH6W) */
{ 0x8086, 0x2651, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata },
/* 82801FR/FRW (ICH6R/ICH6RW) */
{ 0x8086, 0x2652, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata },
/* 82801FBM ICH6M (ICH6R with only port 0 and 2 implemented).
* Attach iff the controller is in IDE mode. */
{ 0x8086, 0x2653, PCI_ANY_ID, PCI_ANY_ID,
PCI_CLASS_STORAGE_IDE << 8, 0xffff00, ich6m_sata },
/* 82801GB/GR/GH (ICH7, identical to ICH6) */
{ 0x8086, 0x27c0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata },
/* 2801GBM/GHM (ICH7M, identical to ICH6M) */
{ 0x8086, 0x27c4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6m_sata },
/* Enterprise Southbridge 2 (631xESB/632xESB) */
{ 0x8086, 0x2680, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata },
/* SATA Controller 1 IDE (ICH8) */
{ 0x8086, 0x2820, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
/* SATA Controller 2 IDE (ICH8) */
{ 0x8086, 0x2825, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
/* Mobile SATA Controller IDE (ICH8M), Apple */
{ 0x8086, 0x2828, 0x106b, 0x00a0, 0, 0, ich8m_apple_sata },
{ 0x8086, 0x2828, 0x106b, 0x00a1, 0, 0, ich8m_apple_sata },
{ 0x8086, 0x2828, 0x106b, 0x00a3, 0, 0, ich8m_apple_sata },
/* Mobile SATA Controller IDE (ICH8M) */
{ 0x8086, 0x2828, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
/* SATA Controller IDE (ICH9) */
{ 0x8086, 0x2920, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
/* SATA Controller IDE (ICH9) */
{ 0x8086, 0x2921, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
/* SATA Controller IDE (ICH9) */
{ 0x8086, 0x2926, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
/* SATA Controller IDE (ICH9M) */
{ 0x8086, 0x2928, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
/* SATA Controller IDE (ICH9M) */
{ 0x8086, 0x292d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
/* SATA Controller IDE (ICH9M) */
{ 0x8086, 0x292e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
/* SATA Controller IDE (Tolapai) */
{ 0x8086, 0x5028, PCI_ANY_ID, PCI_ANY_ID, 0, 0, tolapai_sata },
/* SATA Controller IDE (ICH10) */
{ 0x8086, 0x3a00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
/* SATA Controller IDE (ICH10) */
{ 0x8086, 0x3a06, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
/* SATA Controller IDE (ICH10) */
{ 0x8086, 0x3a20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
/* SATA Controller IDE (ICH10) */
{ 0x8086, 0x3a26, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
/* SATA Controller IDE (PCH) */
{ 0x8086, 0x3b20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
/* SATA Controller IDE (PCH) */
{ 0x8086, 0x3b21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
/* SATA Controller IDE (PCH) */
{ 0x8086, 0x3b26, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
/* SATA Controller IDE (PCH) */
{ 0x8086, 0x3b28, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
/* SATA Controller IDE (PCH) */
{ 0x8086, 0x3b2d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
/* SATA Controller IDE (PCH) */
{ 0x8086, 0x3b2e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
/* SATA Controller IDE (CPT) */
{ 0x8086, 0x1c00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
/* SATA Controller IDE (CPT) */
{ 0x8086, 0x1c01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
/* SATA Controller IDE (CPT) */
{ 0x8086, 0x1c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
/* SATA Controller IDE (CPT) */
{ 0x8086, 0x1c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
/* SATA Controller IDE (PBG) */
{ 0x8086, 0x1d00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
/* SATA Controller IDE (PBG) */
{ 0x8086, 0x1d08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
/* SATA Controller IDE (Panther Point) */
{ 0x8086, 0x1e00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
/* SATA Controller IDE (Panther Point) */
{ 0x8086, 0x1e01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
/* SATA Controller IDE (Panther Point) */
{ 0x8086, 0x1e08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
/* SATA Controller IDE (Panther Point) */
{ 0x8086, 0x1e09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
/* SATA Controller IDE (Lynx Point) */
{ 0x8086, 0x8c00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
/* SATA Controller IDE (Lynx Point) */
{ 0x8086, 0x8c01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
/* SATA Controller IDE (Lynx Point) */
{ 0x8086, 0x8c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_snb },
/* SATA Controller IDE (Lynx Point) */
{ 0x8086, 0x8c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
/* SATA Controller IDE (Lynx Point-LP) */
{ 0x8086, 0x9c00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
/* SATA Controller IDE (Lynx Point-LP) */
{ 0x8086, 0x9c01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
/* SATA Controller IDE (Lynx Point-LP) */
{ 0x8086, 0x9c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
/* SATA Controller IDE (Lynx Point-LP) */
{ 0x8086, 0x9c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
/* SATA Controller IDE (DH89xxCC) */
{ 0x8086, 0x2326, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
/* SATA Controller IDE (Avoton) */
{ 0x8086, 0x1f20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
/* SATA Controller IDE (Avoton) */
{ 0x8086, 0x1f21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
/* SATA Controller IDE (Avoton) */
{ 0x8086, 0x1f30, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
/* SATA Controller IDE (Avoton) */
{ 0x8086, 0x1f31, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
/* SATA Controller IDE (Wellsburg) */
{ 0x8086, 0x8d00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
/* SATA Controller IDE (Wellsburg) */
{ 0x8086, 0x8d08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_snb },
/* SATA Controller IDE (Wellsburg) */
{ 0x8086, 0x8d60, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
/* SATA Controller IDE (Wellsburg) */
{ 0x8086, 0x8d68, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
/* SATA Controller IDE (BayTrail) */
{ 0x8086, 0x0F20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_byt },
{ 0x8086, 0x0F21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_byt },
/* SATA Controller IDE (Coleto Creek) */
{ 0x8086, 0x23a6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
{ } /* terminate list */
};
static const struct piix_map_db ich5_map_db = {
.mask = 0x7,
.port_enable = 0x3,
.map = {
/* PM PS SM SS MAP */
{ P0, NA, P1, NA }, /* 000b */
{ P1, NA, P0, NA }, /* 001b */
{ RV, RV, RV, RV },
{ RV, RV, RV, RV },
{ P0, P1, IDE, IDE }, /* 100b */
{ P1, P0, IDE, IDE }, /* 101b */
{ IDE, IDE, P0, P1 }, /* 110b */
{ IDE, IDE, P1, P0 }, /* 111b */
},
};
static const struct piix_map_db ich6_map_db = {
.mask = 0x3,
.port_enable = 0xf,
.map = {
/* PM PS SM SS MAP */
{ P0, P2, P1, P3 }, /* 00b */
{ IDE, IDE, P1, P3 }, /* 01b */
{ P0, P2, IDE, IDE }, /* 10b */
{ RV, RV, RV, RV },
},
};
static const struct piix_map_db ich6m_map_db = {
.mask = 0x3,
.port_enable = 0x5,
/* Map 01b isn't specified in the doc but some notebooks use
* it anyway. MAP 01b have been spotted on both ICH6M and
* ICH7M.
*/
.map = {
/* PM PS SM SS MAP */
{ P0, P2, NA, NA }, /* 00b */
{ IDE, IDE, P1, P3 }, /* 01b */
{ P0, P2, IDE, IDE }, /* 10b */
{ RV, RV, RV, RV },
},
};
static const struct piix_map_db ich8_map_db = {
.mask = 0x3,
.port_enable = 0xf,
.map = {
/* PM PS SM SS MAP */
{ P0, P2, P1, P3 }, /* 00b (hardwired when in AHCI) */
{ RV, RV, RV, RV },
{ P0, P2, IDE, IDE }, /* 10b (IDE mode) */
{ RV, RV, RV, RV },
},
};
static const struct piix_map_db ich8_2port_map_db = {
.mask = 0x3,
.port_enable = 0x3,
.map = {
/* PM PS SM SS MAP */
{ P0, NA, P1, NA }, /* 00b */
{ RV, RV, RV, RV }, /* 01b */
{ RV, RV, RV, RV }, /* 10b */
{ RV, RV, RV, RV },
},
};
static const struct piix_map_db ich8m_apple_map_db = {
.mask = 0x3,
.port_enable = 0x1,
.map = {
/* PM PS SM SS MAP */
{ P0, NA, NA, NA }, /* 00b */
{ RV, RV, RV, RV },
{ P0, P2, IDE, IDE }, /* 10b */
{ RV, RV, RV, RV },
},
};
static const struct piix_map_db tolapai_map_db = {
.mask = 0x3,
.port_enable = 0x3,
.map = {
/* PM PS SM SS MAP */
{ P0, NA, P1, NA }, /* 00b */
{ RV, RV, RV, RV }, /* 01b */
{ RV, RV, RV, RV }, /* 10b */
{ RV, RV, RV, RV },
},
};
static const struct piix_map_db *piix_map_db_table[] = {
[ich5_sata] = &ich5_map_db,
[ich6_sata] = &ich6_map_db,
[ich6m_sata] = &ich6m_map_db,
[ich8_sata] = &ich8_map_db,
[ich8_2port_sata] = &ich8_2port_map_db,
[ich8m_apple_sata] = &ich8m_apple_map_db,
[tolapai_sata] = &tolapai_map_db,
[ich8_sata_snb] = &ich8_map_db,
[ich8_2port_sata_snb] = &ich8_2port_map_db,
[ich8_2port_sata_byt] = &ich8_2port_map_db,
};
static struct pci_bits piix_enable_bits[] = {
{ 0x41U, 1U, 0x80UL, 0x80UL }, /* port 0 */
{ 0x43U, 1U, 0x80UL, 0x80UL }, /* port 1 */
};
MODULE_AUTHOR("Andre Hedrick, Alan Cox, Andrzej Krzysztofowicz, Jeff Garzik");
MODULE_DESCRIPTION("SCSI low-level driver for Intel PIIX/ICH ATA controllers");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, piix_pci_tbl);
MODULE_VERSION(DRV_VERSION);
struct ich_laptop {
u16 device;
u16 subvendor;
u16 subdevice;
};
/*
* List of laptops that use short cables rather than 80 wire
*/
static const struct ich_laptop ich_laptop[] = {
/* devid, subvendor, subdev */
{ 0x27DF, 0x0005, 0x0280 }, /* ICH7 on Acer 5602WLMi */
{ 0x27DF, 0x1025, 0x0102 }, /* ICH7 on Acer 5602aWLMi */
{ 0x27DF, 0x1025, 0x0110 }, /* ICH7 on Acer 3682WLMi */
{ 0x27DF, 0x1028, 0x02b0 }, /* ICH7 on unknown Dell */
{ 0x27DF, 0x1043, 0x1267 }, /* ICH7 on Asus W5F */
{ 0x27DF, 0x103C, 0x30A1 }, /* ICH7 on HP Compaq nc2400 */
{ 0x27DF, 0x103C, 0x361a }, /* ICH7 on unknown HP */
{ 0x27DF, 0x1071, 0xD221 }, /* ICH7 on Hercules EC-900 */
{ 0x27DF, 0x152D, 0x0778 }, /* ICH7 on unknown Intel */
{ 0x24CA, 0x1025, 0x0061 }, /* ICH4 on ACER Aspire 2023WLMi */
{ 0x24CA, 0x1025, 0x003d }, /* ICH4 on ACER TM290 */
{ 0x266F, 0x1025, 0x0066 }, /* ICH6 on ACER Aspire 1694WLMi */
{ 0x2653, 0x1043, 0x82D8 }, /* ICH6M on Asus Eee 701 */
{ 0x27df, 0x104d, 0x900e }, /* ICH7 on Sony TZ-90 */
/* end marker */
{ 0, }
};
static int piix_port_start(struct ata_port *ap)
{
if (!(ap->flags & PIIX_FLAG_PIO16))
ap->pflags |= ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE;
return ata_bmdma_port_start(ap);
}
/**
* ich_pata_cable_detect - Probe host controller cable detect info
* @ap: Port for which cable detect info is desired
*
* Read 80c cable indicator from ATA PCI device's PCI config
* register. This register is normally set by firmware (BIOS).
*
* LOCKING:
* None (inherited from caller).
*/
static int ich_pata_cable_detect(struct ata_port *ap)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
struct piix_host_priv *hpriv = ap->host->private_data;
const struct ich_laptop *lap = &ich_laptop[0];
u8 mask;
/* Check for specials - Acer Aspire 5602WLMi */
while (lap->device) {
if (lap->device == pdev->device &&
lap->subvendor == pdev->subsystem_vendor &&
lap->subdevice == pdev->subsystem_device)
return ATA_CBL_PATA40_SHORT;
lap++;
}
/* check BIOS cable detect results */
mask = ap->port_no == 0 ? PIIX_80C_PRI : PIIX_80C_SEC;
if ((hpriv->saved_iocfg & mask) == 0)
return ATA_CBL_PATA40;
return ATA_CBL_PATA80;
}
/**
* piix_pata_prereset - prereset for PATA host controller
* @link: Target link
* @deadline: deadline jiffies for the operation
*
* LOCKING:
* None (inherited from caller).
*/
static int piix_pata_prereset(struct ata_link *link, unsigned long deadline)
{
struct ata_port *ap = link->ap;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
if (!pci_test_config_bits(pdev, &piix_enable_bits[ap->port_no]))
return -ENOENT;
return ata_sff_prereset(link, deadline);
}
static DEFINE_SPINLOCK(piix_lock);
static void piix_set_timings(struct ata_port *ap, struct ata_device *adev,
u8 pio)
{
struct pci_dev *dev = to_pci_dev(ap->host->dev);
unsigned long flags;
unsigned int is_slave = (adev->devno != 0);
unsigned int master_port= ap->port_no ? 0x42 : 0x40;
unsigned int slave_port = 0x44;
u16 master_data;
u8 slave_data;
u8 udma_enable;
int control = 0;
/*
* See Intel Document 298600-004 for the timing programing rules
* for ICH controllers.
*/
static const /* ISP RTC */
u8 timings[][2] = { { 0, 0 },
{ 0, 0 },
{ 1, 0 },
{ 2, 1 },
{ 2, 3 }, };
if (pio >= 2)
control |= 1; /* TIME1 enable */
if (ata_pio_need_iordy(adev))
control |= 2; /* IE enable */
/* Intel specifies that the PPE functionality is for disk only */
if (adev->class == ATA_DEV_ATA)
control |= 4; /* PPE enable */
/*
* If the drive MWDMA is faster than it can do PIO then
* we must force PIO into PIO0
*/
if (adev->pio_mode < XFER_PIO_0 + pio)
/* Enable DMA timing only */
control |= 8; /* PIO cycles in PIO0 */
spin_lock_irqsave(&piix_lock, flags);
/* PIO configuration clears DTE unconditionally. It will be
* programmed in set_dmamode which is guaranteed to be called
* after set_piomode if any DMA mode is available.
*/
pci_read_config_word(dev, master_port, &master_data);
if (is_slave) {
/* clear TIME1|IE1|PPE1|DTE1 */
master_data &= 0xff0f;
/* enable PPE1, IE1 and TIME1 as needed */
master_data |= (control << 4);
pci_read_config_byte(dev, slave_port, &slave_data);
slave_data &= (ap->port_no ? 0x0f : 0xf0);
/* Load the timing nibble for this slave */
slave_data |= ((timings[pio][0] << 2) | timings[pio][1])
<< (ap->port_no ? 4 : 0);
} else {
/* clear ISP|RCT|TIME0|IE0|PPE0|DTE0 */
master_data &= 0xccf0;
/* Enable PPE, IE and TIME as appropriate */
master_data |= control;
/* load ISP and RCT */
master_data |=
(timings[pio][0] << 12) |
(timings[pio][1] << 8);
}
/* Enable SITRE (separate slave timing register) */
master_data |= 0x4000;
pci_write_config_word(dev, master_port, master_data);
if (is_slave)
pci_write_config_byte(dev, slave_port, slave_data);
/* Ensure the UDMA bit is off - it will be turned back on if
UDMA is selected */
if (ap->udma_mask) {
pci_read_config_byte(dev, 0x48, &udma_enable);
udma_enable &= ~(1 << (2 * ap->port_no + adev->devno));
pci_write_config_byte(dev, 0x48, udma_enable);
}
spin_unlock_irqrestore(&piix_lock, flags);
}
/**
* piix_set_piomode - Initialize host controller PATA PIO timings
* @ap: Port whose timings we are configuring
* @adev: Drive in question
*
* Set PIO mode for device, in host controller PCI config space.
*
* LOCKING:
* None (inherited from caller).
*/
static void piix_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
piix_set_timings(ap, adev, adev->pio_mode - XFER_PIO_0);
}
/**
* do_pata_set_dmamode - Initialize host controller PATA PIO timings
* @ap: Port whose timings we are configuring
* @adev: Drive in question
* @isich: set if the chip is an ICH device
*
* Set UDMA mode for device, in host controller PCI config space.
*
* LOCKING:
* None (inherited from caller).
*/
static void do_pata_set_dmamode(struct ata_port *ap, struct ata_device *adev, int isich)
{
struct pci_dev *dev = to_pci_dev(ap->host->dev);
unsigned long flags;
u8 speed = adev->dma_mode;
int devid = adev->devno + 2 * ap->port_no;
u8 udma_enable = 0;
if (speed >= XFER_UDMA_0) {
unsigned int udma = speed - XFER_UDMA_0;
u16 udma_timing;
u16 ideconf;
int u_clock, u_speed;
spin_lock_irqsave(&piix_lock, flags);
pci_read_config_byte(dev, 0x48, &udma_enable);
/*
* UDMA is handled by a combination of clock switching and
* selection of dividers
*
* Handy rule: Odd modes are UDMATIMx 01, even are 02
* except UDMA0 which is 00
*/
u_speed = min(2 - (udma & 1), udma);
if (udma == 5)
u_clock = 0x1000; /* 100Mhz */
else if (udma > 2)
u_clock = 1; /* 66Mhz */
else
u_clock = 0; /* 33Mhz */
udma_enable |= (1 << devid);
/* Load the CT/RP selection */
pci_read_config_word(dev, 0x4A, &udma_timing);
udma_timing &= ~(3 << (4 * devid));
udma_timing |= u_speed << (4 * devid);
pci_write_config_word(dev, 0x4A, udma_timing);
if (isich) {
/* Select a 33/66/100Mhz clock */
pci_read_config_word(dev, 0x54, &ideconf);
ideconf &= ~(0x1001 << devid);
ideconf |= u_clock << devid;
/* For ICH or later we should set bit 10 for better
performance (WR_PingPong_En) */
pci_write_config_word(dev, 0x54, ideconf);
}
pci_write_config_byte(dev, 0x48, udma_enable);
spin_unlock_irqrestore(&piix_lock, flags);
} else {
/* MWDMA is driven by the PIO timings. */
unsigned int mwdma = speed - XFER_MW_DMA_0;
const unsigned int needed_pio[3] = {
XFER_PIO_0, XFER_PIO_3, XFER_PIO_4
};
int pio = needed_pio[mwdma] - XFER_PIO_0;
/* XFER_PIO_0 is never used currently */
piix_set_timings(ap, adev, pio);
}
}
/**
* piix_set_dmamode - Initialize host controller PATA DMA timings
* @ap: Port whose timings we are configuring
* @adev: um
*
* Set MW/UDMA mode for device, in host controller PCI config space.
*
* LOCKING:
* None (inherited from caller).
*/
static void piix_set_dmamode(struct ata_port *ap, struct ata_device *adev)
{
do_pata_set_dmamode(ap, adev, 0);
}
/**
* ich_set_dmamode - Initialize host controller PATA DMA timings
* @ap: Port whose timings we are configuring
* @adev: um
*
* Set MW/UDMA mode for device, in host controller PCI config space.
*
* LOCKING:
* None (inherited from caller).
*/
static void ich_set_dmamode(struct ata_port *ap, struct ata_device *adev)
{
do_pata_set_dmamode(ap, adev, 1);
}
/*
* Serial ATA Index/Data Pair Superset Registers access
*
* Beginning from ICH8, there's a sane way to access SCRs using index
* and data register pair located at BAR5 which means that we have
* separate SCRs for master and slave. This is handled using libata
* slave_link facility.
*/
static const int piix_sidx_map[] = {
[SCR_STATUS] = 0,
[SCR_ERROR] = 2,
[SCR_CONTROL] = 1,
};
static void piix_sidpr_sel(struct ata_link *link, unsigned int reg)
{
struct ata_port *ap = link->ap;
struct piix_host_priv *hpriv = ap->host->private_data;
iowrite32(((ap->port_no * 2 + link->pmp) << 8) | piix_sidx_map[reg],
hpriv->sidpr + PIIX_SIDPR_IDX);
}
static int piix_sidpr_scr_read(struct ata_link *link,
unsigned int reg, u32 *val)
{
struct piix_host_priv *hpriv = link->ap->host->private_data;
if (reg >= ARRAY_SIZE(piix_sidx_map))
return -EINVAL;
piix_sidpr_sel(link, reg);
*val = ioread32(hpriv->sidpr + PIIX_SIDPR_DATA);
return 0;
}
static int piix_sidpr_scr_write(struct ata_link *link,
unsigned int reg, u32 val)
{
struct piix_host_priv *hpriv = link->ap->host->private_data;
if (reg >= ARRAY_SIZE(piix_sidx_map))
return -EINVAL;
piix_sidpr_sel(link, reg);
iowrite32(val, hpriv->sidpr + PIIX_SIDPR_DATA);
return 0;
}
static int piix_sidpr_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
unsigned hints)
{
return sata_link_scr_lpm(link, policy, false);
}
static bool piix_irq_check(struct ata_port *ap)
{
if (unlikely(!ap->ioaddr.bmdma_addr))
return false;
return ap->ops->bmdma_status(ap) & ATA_DMA_INTR;
}
#ifdef CONFIG_PM
static int piix_broken_suspend(void)
{
static const struct dmi_system_id sysids[] = {
{
.ident = "TECRA M3",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
DMI_MATCH(DMI_PRODUCT_NAME, "TECRA M3"),
},
},
{
.ident = "TECRA M3",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
DMI_MATCH(DMI_PRODUCT_NAME, "Tecra M3"),
},
},
{
.ident = "TECRA M4",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
DMI_MATCH(DMI_PRODUCT_NAME, "Tecra M4"),
},
},
{
.ident = "TECRA M4",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
DMI_MATCH(DMI_PRODUCT_NAME, "TECRA M4"),
},
},
{
.ident = "TECRA M5",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
DMI_MATCH(DMI_PRODUCT_NAME, "TECRA M5"),
},
},
{
.ident = "TECRA M6",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
DMI_MATCH(DMI_PRODUCT_NAME, "TECRA M6"),
},
},
{
.ident = "TECRA M7",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
DMI_MATCH(DMI_PRODUCT_NAME, "TECRA M7"),
},
},
{
.ident = "TECRA A8",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
DMI_MATCH(DMI_PRODUCT_NAME, "TECRA A8"),
},
},
{
.ident = "Satellite R20",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
DMI_MATCH(DMI_PRODUCT_NAME, "Satellite R20"),
},
},
{
.ident = "Satellite R25",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
DMI_MATCH(DMI_PRODUCT_NAME, "Satellite R25"),
},
},
{
.ident = "Satellite U200",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
DMI_MATCH(DMI_PRODUCT_NAME, "Satellite U200"),
},
},
{
.ident = "Satellite U200",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
DMI_MATCH(DMI_PRODUCT_NAME, "SATELLITE U200"),
},
},
{
.ident = "Satellite Pro U200",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
DMI_MATCH(DMI_PRODUCT_NAME, "SATELLITE PRO U200"),
},
},
{
.ident = "Satellite U205",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
DMI_MATCH(DMI_PRODUCT_NAME, "Satellite U205"),
},
},
{
.ident = "SATELLITE U205",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
DMI_MATCH(DMI_PRODUCT_NAME, "SATELLITE U205"),
},
},
{
.ident = "Satellite Pro A120",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
DMI_MATCH(DMI_PRODUCT_NAME, "Satellite Pro A120"),
},
},
{
.ident = "Portege M500",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
DMI_MATCH(DMI_PRODUCT_NAME, "PORTEGE M500"),
},
},
{
.ident = "VGN-BX297XP",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
DMI_MATCH(DMI_PRODUCT_NAME, "VGN-BX297XP"),
},
},
{ } /* terminate list */
};
static const char *oemstrs[] = {
"Tecra M3,",
};
int i;
if (dmi_check_system(sysids))
return 1;
for (i = 0; i < ARRAY_SIZE(oemstrs); i++)
if (dmi_find_device(DMI_DEV_TYPE_OEM_STRING, oemstrs[i], NULL))
return 1;
/* TECRA M4 sometimes forgets its identify and reports bogus
* DMI information. As the bogus information is a bit
* generic, match as many entries as possible. This manual
* matching is necessary because dmi_system_id.matches is
* limited to four entries.
*/
if (dmi_match(DMI_SYS_VENDOR, "TOSHIBA") &&
dmi_match(DMI_PRODUCT_NAME, "000000") &&
dmi_match(DMI_PRODUCT_VERSION, "000000") &&
dmi_match(DMI_PRODUCT_SERIAL, "000000") &&
dmi_match(DMI_BOARD_VENDOR, "TOSHIBA") &&
dmi_match(DMI_BOARD_NAME, "Portable PC") &&
dmi_match(DMI_BOARD_VERSION, "Version A0"))
return 1;
return 0;
}
static int piix_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
{
struct ata_host *host = dev_get_drvdata(&pdev->dev);
unsigned long flags;
int rc = 0;
rc = ata_host_suspend(host, mesg);
if (rc)
return rc;
/* Some braindamaged ACPI suspend implementations expect the
* controller to be awake on entry; otherwise, it burns cpu
* cycles and power trying to do something to the sleeping
* beauty.
*/
if (piix_broken_suspend() && (mesg.event & PM_EVENT_SLEEP)) {
pci_save_state(pdev);
/* mark its power state as "unknown", since we don't
* know if e.g. the BIOS will change its device state
* when we suspend.
*/
if (pdev->current_state == PCI_D0)
pdev->current_state = PCI_UNKNOWN;
/* tell resume that it's waking up from broken suspend */
spin_lock_irqsave(&host->lock, flags);
host->flags |= PIIX_HOST_BROKEN_SUSPEND;
spin_unlock_irqrestore(&host->lock, flags);
} else
ata_pci_device_do_suspend(pdev, mesg);
return 0;
}
static int piix_pci_device_resume(struct pci_dev *pdev)
{
struct ata_host *host = dev_get_drvdata(&pdev->dev);
unsigned long flags;
int rc;
if (host->flags & PIIX_HOST_BROKEN_SUSPEND) {
spin_lock_irqsave(&host->lock, flags);
host->flags &= ~PIIX_HOST_BROKEN_SUSPEND;
spin_unlock_irqrestore(&host->lock, flags);
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
/* PCI device wasn't disabled during suspend. Use
* pci_reenable_device() to avoid affecting the enable
* count.
*/
rc = pci_reenable_device(pdev);
if (rc)
dev_err(&pdev->dev,
"failed to enable device after resume (%d)\n",
rc);
} else
rc = ata_pci_device_do_resume(pdev);
if (rc == 0)
ata_host_resume(host);
return rc;
}
#endif
static u8 piix_vmw_bmdma_status(struct ata_port *ap)
{
return ata_bmdma_status(ap) & ~ATA_DMA_ERR;
}
static struct scsi_host_template piix_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
static struct ata_port_operations piix_sata_ops = {
.inherits = &ata_bmdma32_port_ops,
.sff_irq_check = piix_irq_check,
.port_start = piix_port_start,
};
static struct ata_port_operations piix_pata_ops = {
.inherits = &piix_sata_ops,
.cable_detect = ata_cable_40wire,
.set_piomode = piix_set_piomode,
.set_dmamode = piix_set_dmamode,
.prereset = piix_pata_prereset,
};
static struct ata_port_operations piix_vmw_ops = {
.inherits = &piix_pata_ops,
.bmdma_status = piix_vmw_bmdma_status,
};
static struct ata_port_operations ich_pata_ops = {
.inherits = &piix_pata_ops,
.cable_detect = ich_pata_cable_detect,
.set_dmamode = ich_set_dmamode,
};
static struct device_attribute *piix_sidpr_shost_attrs[] = {
&dev_attr_link_power_management_policy,
NULL
};
static struct scsi_host_template piix_sidpr_sht = {
ATA_BMDMA_SHT(DRV_NAME),
.shost_attrs = piix_sidpr_shost_attrs,
};
static struct ata_port_operations piix_sidpr_sata_ops = {
.inherits = &piix_sata_ops,
.hardreset = sata_std_hardreset,
.scr_read = piix_sidpr_scr_read,
.scr_write = piix_sidpr_scr_write,
.set_lpm = piix_sidpr_set_lpm,
};
static struct ata_port_info piix_port_info[] = {
[piix_pata_mwdma] = /* PIIX3 MWDMA only */
{
.flags = PIIX_PATA_FLAGS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA12_ONLY, /* mwdma1-2 ?? CHECK 0 should be ok but slow */
.port_ops = &piix_pata_ops,
},
[piix_pata_33] = /* PIIX4 at 33MHz */
{
.flags = PIIX_PATA_FLAGS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA12_ONLY, /* mwdma1-2 ?? CHECK 0 should be ok but slow */
.udma_mask = ATA_UDMA2,
.port_ops = &piix_pata_ops,
},
[ich_pata_33] = /* ICH0 - ICH at 33Mhz*/
{
.flags = PIIX_PATA_FLAGS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA12_ONLY, /* Check: maybe MWDMA0 is ok */
.udma_mask = ATA_UDMA2,
.port_ops = &ich_pata_ops,
},
[ich_pata_66] = /* ICH controllers up to 66MHz */
{
.flags = PIIX_PATA_FLAGS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA12_ONLY, /* MWDMA0 is broken on chip */
.udma_mask = ATA_UDMA4,
.port_ops = &ich_pata_ops,
},
[ich_pata_100] =
{
.flags = PIIX_PATA_FLAGS | PIIX_FLAG_CHECKINTR,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA12_ONLY,
.udma_mask = ATA_UDMA5,
.port_ops = &ich_pata_ops,
},
[ich_pata_100_nomwdma1] =
{
.flags = PIIX_PATA_FLAGS | PIIX_FLAG_CHECKINTR,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2_ONLY,
.udma_mask = ATA_UDMA5,
.port_ops = &ich_pata_ops,
},
[ich5_sata] =
{
.flags = PIIX_SATA_FLAGS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
.port_ops = &piix_sata_ops,
},
[ich6_sata] =
{
.flags = PIIX_SATA_FLAGS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
.port_ops = &piix_sata_ops,
},
[ich6m_sata] =
{
.flags = PIIX_SATA_FLAGS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
.port_ops = &piix_sata_ops,
},
[ich8_sata] =
{
.flags = PIIX_SATA_FLAGS | PIIX_FLAG_SIDPR,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
.port_ops = &piix_sata_ops,
},
[ich8_2port_sata] =
{
.flags = PIIX_SATA_FLAGS | PIIX_FLAG_SIDPR,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
.port_ops = &piix_sata_ops,
},
[tolapai_sata] =
{
.flags = PIIX_SATA_FLAGS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
.port_ops = &piix_sata_ops,
},
[ich8m_apple_sata] =
{
.flags = PIIX_SATA_FLAGS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
.port_ops = &piix_sata_ops,
},
[piix_pata_vmw] =
{
.flags = PIIX_PATA_FLAGS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA12_ONLY, /* mwdma1-2 ?? CHECK 0 should be ok but slow */
.udma_mask = ATA_UDMA2,
.port_ops = &piix_vmw_ops,
},
/*
* some Sandybridge chipsets have broken 32 mode up to now,
* see https://bugzilla.kernel.org/show_bug.cgi?id=40592
*/
[ich8_sata_snb] =
{
.flags = PIIX_SATA_FLAGS | PIIX_FLAG_SIDPR | PIIX_FLAG_PIO16,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
.port_ops = &piix_sata_ops,
},
[ich8_2port_sata_snb] =
{
.flags = PIIX_SATA_FLAGS | PIIX_FLAG_SIDPR
| PIIX_FLAG_PIO16,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
.port_ops = &piix_sata_ops,
},
[ich8_2port_sata_byt] =
{
.flags = PIIX_SATA_FLAGS | PIIX_FLAG_SIDPR | PIIX_FLAG_PIO16,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
.port_ops = &piix_sata_ops,
},
};
#define AHCI_PCI_BAR 5
#define AHCI_GLOBAL_CTL 0x04
#define AHCI_ENABLE (1 << 31)
static int piix_disable_ahci(struct pci_dev *pdev)
{
void __iomem *mmio;
u32 tmp;
int rc = 0;
/* BUG: pci_enable_device has not yet been called. This
* works because this device is usually set up by BIOS.
*/
if (!pci_resource_start(pdev, AHCI_PCI_BAR) ||
!pci_resource_len(pdev, AHCI_PCI_BAR))
return 0;
mmio = pci_iomap(pdev, AHCI_PCI_BAR, 64);
if (!mmio)
return -ENOMEM;
tmp = ioread32(mmio + AHCI_GLOBAL_CTL);
if (tmp & AHCI_ENABLE) {
tmp &= ~AHCI_ENABLE;
iowrite32(tmp, mmio + AHCI_GLOBAL_CTL);
tmp = ioread32(mmio + AHCI_GLOBAL_CTL);
if (tmp & AHCI_ENABLE)
rc = -EIO;
}
pci_iounmap(pdev, mmio);
return rc;
}
/**
* piix_check_450nx_errata - Check for problem 450NX setup
* @ata_dev: the PCI device to check
*
* Check for the present of 450NX errata #19 and errata #25. If
* they are found return an error code so we can turn off DMA
*/
static int piix_check_450nx_errata(struct pci_dev *ata_dev)
{
struct pci_dev *pdev = NULL;
u16 cfg;
int no_piix_dma = 0;
while ((pdev = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454NX, pdev)) != NULL) {
/* Look for 450NX PXB. Check for problem configurations
A PCI quirk checks bit 6 already */
pci_read_config_word(pdev, 0x41, &cfg);
/* Only on the original revision: IDE DMA can hang */
if (pdev->revision == 0x00)
no_piix_dma = 1;
/* On all revisions below 5 PXB bus lock must be disabled for IDE */
else if (cfg & (1<<14) && pdev->revision < 5)
no_piix_dma = 2;
}
if (no_piix_dma)
dev_warn(&ata_dev->dev,
"450NX errata present, disabling IDE DMA%s\n",
no_piix_dma == 2 ? " - a BIOS update may resolve this"
: "");
return no_piix_dma;
}
static void piix_init_pcs(struct ata_host *host,
const struct piix_map_db *map_db)
{
struct pci_dev *pdev = to_pci_dev(host->dev);
u16 pcs, new_pcs;
pci_read_config_word(pdev, ICH5_PCS, &pcs);
new_pcs = pcs | map_db->port_enable;
if (new_pcs != pcs) {
DPRINTK("updating PCS from 0x%x to 0x%x\n", pcs, new_pcs);
pci_write_config_word(pdev, ICH5_PCS, new_pcs);
msleep(150);
}
}
static const int *piix_init_sata_map(struct pci_dev *pdev,
struct ata_port_info *pinfo,
const struct piix_map_db *map_db)
{
const int *map;
int i, invalid_map = 0;
u8 map_value;
pci_read_config_byte(pdev, ICH5_PMR, &map_value);
map = map_db->map[map_value & map_db->mask];
dev_info(&pdev->dev, "MAP [");
for (i = 0; i < 4; i++) {
switch (map[i]) {
case RV:
invalid_map = 1;
pr_cont(" XX");
break;
case NA:
pr_cont(" --");
break;
case IDE:
WARN_ON((i & 1) || map[i + 1] != IDE);
pinfo[i / 2] = piix_port_info[ich_pata_100];
i++;
pr_cont(" IDE IDE");
break;
default:
pr_cont(" P%d", map[i]);
if (i & 1)
pinfo[i / 2].flags |= ATA_FLAG_SLAVE_POSS;
break;
}
}
pr_cont(" ]\n");
if (invalid_map)
dev_err(&pdev->dev, "invalid MAP value %u\n", map_value);
return map;
}
static bool piix_no_sidpr(struct ata_host *host)
{
struct pci_dev *pdev = to_pci_dev(host->dev);
/*
* Samsung DB-P70 only has three ATA ports exposed and
* curiously the unconnected first port reports link online
* while not responding to SRST protocol causing excessive
* detection delay.
*
* Unfortunately, the system doesn't carry enough DMI
* information to identify the machine but does have subsystem
* vendor and device set. As it's unclear whether the
* subsystem vendor/device is used only for this specific
* board, the port can't be disabled solely with the
* information; however, turning off SIDPR access works around
* the problem. Turn it off.
*
* This problem is reported in bnc#441240.
*
* https://bugzilla.novell.com/show_bug.cgi?id=441420
*/
if (pdev->vendor == PCI_VENDOR_ID_INTEL && pdev->device == 0x2920 &&
pdev->subsystem_vendor == PCI_VENDOR_ID_SAMSUNG &&
pdev->subsystem_device == 0xb049) {
dev_warn(host->dev,
"Samsung DB-P70 detected, disabling SIDPR\n");
return true;
}
return false;
}
static int piix_init_sidpr(struct ata_host *host)
{
struct pci_dev *pdev = to_pci_dev(host->dev);
struct piix_host_priv *hpriv = host->private_data;
struct ata_link *link0 = &host->ports[0]->link;
u32 scontrol;
int i, rc;
/* check for availability */
for (i = 0; i < 4; i++)
if (hpriv->map[i] == IDE)
return 0;
/* is it blacklisted? */
if (piix_no_sidpr(host))
return 0;
if (!(host->ports[0]->flags & PIIX_FLAG_SIDPR))
return 0;
if (pci_resource_start(pdev, PIIX_SIDPR_BAR) == 0 ||
pci_resource_len(pdev, PIIX_SIDPR_BAR) != PIIX_SIDPR_LEN)
return 0;
if (pcim_iomap_regions(pdev, 1 << PIIX_SIDPR_BAR, DRV_NAME))
return 0;
hpriv->sidpr = pcim_iomap_table(pdev)[PIIX_SIDPR_BAR];
/* SCR access via SIDPR doesn't work on some configurations.
* Give it a test drive by inhibiting power save modes which
* we'll do anyway.
*/
piix_sidpr_scr_read(link0, SCR_CONTROL, &scontrol);
/* if IPM is already 3, SCR access is probably working. Don't
* un-inhibit power save modes as BIOS might have inhibited
* them for a reason.
*/
if ((scontrol & 0xf00) != 0x300) {
scontrol |= 0x300;
piix_sidpr_scr_write(link0, SCR_CONTROL, scontrol);
piix_sidpr_scr_read(link0, SCR_CONTROL, &scontrol);
if ((scontrol & 0xf00) != 0x300) {
dev_info(host->dev,
"SCR access via SIDPR is available but doesn't work\n");
return 0;
}
}
/* okay, SCRs available, set ops and ask libata for slave_link */
for (i = 0; i < 2; i++) {
struct ata_port *ap = host->ports[i];
ap->ops = &piix_sidpr_sata_ops;
if (ap->flags & ATA_FLAG_SLAVE_POSS) {
rc = ata_slave_link_init(ap);
if (rc)
return rc;
}
}
return 0;
}
static void piix_iocfg_bit18_quirk(struct ata_host *host)
{
static const struct dmi_system_id sysids[] = {
{
/* Clevo M570U sets IOCFG bit 18 if the cdrom
* isn't used to boot the system which
* disables the channel.
*/
.ident = "M570U",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Clevo Co."),
DMI_MATCH(DMI_PRODUCT_NAME, "M570U"),
},
},
{ } /* terminate list */
};
struct pci_dev *pdev = to_pci_dev(host->dev);
struct piix_host_priv *hpriv = host->private_data;
if (!dmi_check_system(sysids))
return;
/* The datasheet says that bit 18 is NOOP but certain systems
* seem to use it to disable a channel. Clear the bit on the
* affected systems.
*/
if (hpriv->saved_iocfg & (1 << 18)) {
dev_info(&pdev->dev, "applying IOCFG bit18 quirk\n");
pci_write_config_dword(pdev, PIIX_IOCFG,
hpriv->saved_iocfg & ~(1 << 18));
}
}
static bool piix_broken_system_poweroff(struct pci_dev *pdev)
{
static const struct dmi_system_id broken_systems[] = {
{
.ident = "HP Compaq 2510p",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq 2510p"),
},
/* PCI slot number of the controller */
.driver_data = (void *)0x1FUL,
},
{
.ident = "HP Compaq nc6000",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq nc6000"),
},
/* PCI slot number of the controller */
.driver_data = (void *)0x1FUL,
},
{ } /* terminate list */
};
const struct dmi_system_id *dmi = dmi_first_match(broken_systems);
if (dmi) {
unsigned long slot = (unsigned long)dmi->driver_data;
/* apply the quirk only to on-board controllers */
return slot == PCI_SLOT(pdev->devfn);
}
return false;
}
static int prefer_ms_hyperv = 1;
module_param(prefer_ms_hyperv, int, 0);
MODULE_PARM_DESC(prefer_ms_hyperv,
"Prefer Hyper-V paravirtualization drivers instead of ATA, "
"0 - Use ATA drivers, "
"1 (Default) - Use the paravirtualization drivers.");
static void piix_ignore_devices_quirk(struct ata_host *host)
{
#if IS_ENABLED(CONFIG_HYPERV_STORAGE)
static const struct dmi_system_id ignore_hyperv[] = {
{
/* On Hyper-V hypervisors the disks are exposed on
* both the emulated SATA controller and on the
* paravirtualised drivers. The CD/DVD devices
* are only exposed on the emulated controller.
* Request we ignore ATA devices on this host.
*/
.ident = "Hyper-V Virtual Machine",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR,
"Microsoft Corporation"),
DMI_MATCH(DMI_PRODUCT_NAME, "Virtual Machine"),
},
},
{ } /* terminate list */
};
static const struct dmi_system_id allow_virtual_pc[] = {
{
/* In MS Virtual PC guests the DMI ident is nearly
* identical to a Hyper-V guest. One difference is the
* product version which is used here to identify
* a Virtual PC guest. This entry allows ata_piix to
* drive the emulated hardware.
*/
.ident = "MS Virtual PC 2007",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR,
"Microsoft Corporation"),
DMI_MATCH(DMI_PRODUCT_NAME, "Virtual Machine"),
DMI_MATCH(DMI_PRODUCT_VERSION, "VS2005R2"),
},
},
{ } /* terminate list */
};
const struct dmi_system_id *ignore = dmi_first_match(ignore_hyperv);
const struct dmi_system_id *allow = dmi_first_match(allow_virtual_pc);
if (ignore && !allow && prefer_ms_hyperv) {
host->flags |= ATA_HOST_IGNORE_ATA;
dev_info(host->dev, "%s detected, ATA device ignore set\n",
ignore->ident);
}
#endif
}
/**
* piix_init_one - Register PIIX ATA PCI device with kernel services
* @pdev: PCI device to register
* @ent: Entry in piix_pci_tbl matching with @pdev
*
* Called from kernel PCI layer. We probe for combined mode (sigh),
* and then hand over control to libata, for it to do the rest.
*
* LOCKING:
* Inherited from PCI layer (may sleep).
*
* RETURNS:
* Zero on success, or -ERRNO value.
*/
static int piix_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct device *dev = &pdev->dev;
struct ata_port_info port_info[2];
const struct ata_port_info *ppi[] = { &port_info[0], &port_info[1] };
struct scsi_host_template *sht = &piix_sht;
unsigned long port_flags;
struct ata_host *host;
struct piix_host_priv *hpriv;
int rc;
ata_print_version_once(&pdev->dev, DRV_VERSION);
/* no hotplugging support for later devices (FIXME) */
if (!in_module_init && ent->driver_data >= ich5_sata)
return -ENODEV;
if (piix_broken_system_poweroff(pdev)) {
piix_port_info[ent->driver_data].flags |=
ATA_FLAG_NO_POWEROFF_SPINDOWN |
ATA_FLAG_NO_HIBERNATE_SPINDOWN;
dev_info(&pdev->dev, "quirky BIOS, skipping spindown "
"on poweroff and hibernation\n");
}
port_info[0] = piix_port_info[ent->driver_data];
port_info[1] = piix_port_info[ent->driver_data];
port_flags = port_info[0].flags;
/* enable device and prepare host */
rc = pcim_enable_device(pdev);
if (rc)
return rc;
hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
if (!hpriv)
return -ENOMEM;
/* Save IOCFG, this will be used for cable detection, quirk
* detection and restoration on detach. This is necessary
* because some ACPI implementations mess up cable related
* bits on _STM. Reported on kernel bz#11879.
*/
pci_read_config_dword(pdev, PIIX_IOCFG, &hpriv->saved_iocfg);
/* ICH6R may be driven by either ata_piix or ahci driver
* regardless of BIOS configuration. Make sure AHCI mode is
* off.
*/
if (pdev->vendor == PCI_VENDOR_ID_INTEL && pdev->device == 0x2652) {
rc = piix_disable_ahci(pdev);
if (rc)
return rc;
}
/* SATA map init can change port_info, do it before prepping host */
if (port_flags & ATA_FLAG_SATA)
hpriv->map = piix_init_sata_map(pdev, port_info,
piix_map_db_table[ent->driver_data]);
rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
if (rc)
return rc;
host->private_data = hpriv;
/* initialize controller */
if (port_flags & ATA_FLAG_SATA) {
piix_init_pcs(host, piix_map_db_table[ent->driver_data]);
rc = piix_init_sidpr(host);
if (rc)
return rc;
if (host->ports[0]->ops == &piix_sidpr_sata_ops)
sht = &piix_sidpr_sht;
}
/* apply IOCFG bit18 quirk */
piix_iocfg_bit18_quirk(host);
/* On ICH5, some BIOSen disable the interrupt using the
* PCI_COMMAND_INTX_DISABLE bit added in PCI 2.3.
* On ICH6, this bit has the same effect, but only when
* MSI is disabled (and it is disabled, as we don't use
* message-signalled interrupts currently).
*/
if (port_flags & PIIX_FLAG_CHECKINTR)
pci_intx(pdev, 1);
if (piix_check_450nx_errata(pdev)) {
/* This writes into the master table but it does not
really matter for this errata as we will apply it to
all the PIIX devices on the board */
host->ports[0]->mwdma_mask = 0;
host->ports[0]->udma_mask = 0;
host->ports[1]->mwdma_mask = 0;
host->ports[1]->udma_mask = 0;
}
host->flags |= ATA_HOST_PARALLEL_SCAN;
/* Allow hosts to specify device types to ignore when scanning. */
piix_ignore_devices_quirk(host);
pci_set_master(pdev);
return ata_pci_sff_activate_host(host, ata_bmdma_interrupt, sht);
}
static void piix_remove_one(struct pci_dev *pdev)
{
struct ata_host *host = dev_get_drvdata(&pdev->dev);
struct piix_host_priv *hpriv = host->private_data;
pci_write_config_dword(pdev, PIIX_IOCFG, hpriv->saved_iocfg);
ata_pci_remove_one(pdev);
}
static struct pci_driver piix_pci_driver = {
.name = DRV_NAME,
.id_table = piix_pci_tbl,
.probe = piix_init_one,
.remove = piix_remove_one,
#ifdef CONFIG_PM
.suspend = piix_pci_device_suspend,
.resume = piix_pci_device_resume,
#endif
};
static int __init piix_init(void)
{
int rc;
DPRINTK("pci_register_driver\n");
rc = pci_register_driver(&piix_pci_driver);
if (rc)
return rc;
in_module_init = 0;
DPRINTK("done\n");
return 0;
}
static void __exit piix_exit(void)
{
pci_unregister_driver(&piix_pci_driver);
}
module_init(piix_init);
module_exit(piix_exit);
| gpl-2.0 |
pavel-odintsov/openvz_rhel6_kernel_mirror | drivers/pci/hotplug/shpchp_pci.c | 1364 | 4023 | /*
* Standard Hot Plug Controller Driver
*
* Copyright (C) 1995,2001 Compaq Computer Corporation
* Copyright (C) 2001 Greg Kroah-Hartman (greg@kroah.com)
* Copyright (C) 2001 IBM Corp.
* Copyright (C) 2003-2004 Intel Corporation
*
* All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or (at
* your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
* Send feedback to <greg@kroah.com>, <kristen.c.accardi@intel.com>
*
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/pci.h>
#include "../pci.h"
#include "shpchp.h"
int __ref shpchp_configure_device(struct slot *p_slot)
{
struct pci_dev *dev;
struct pci_bus *parent = p_slot->ctrl->pci_dev->subordinate;
int num, fn;
struct controller *ctrl = p_slot->ctrl;
dev = pci_get_slot(parent, PCI_DEVFN(p_slot->device, 0));
if (dev) {
ctrl_err(ctrl, "Device %s already exists "
"at %04x:%02x:%02x, cannot hot-add\n", pci_name(dev),
pci_domain_nr(parent), p_slot->bus, p_slot->device);
pci_dev_put(dev);
return -EINVAL;
}
num = pci_scan_slot(parent, PCI_DEVFN(p_slot->device, 0));
if (num == 0) {
ctrl_err(ctrl, "No new device found\n");
return -ENODEV;
}
for (fn = 0; fn < 8; fn++) {
dev = pci_get_slot(parent, PCI_DEVFN(p_slot->device, fn));
if (!dev)
continue;
if ((dev->class >> 16) == PCI_BASE_CLASS_DISPLAY) {
ctrl_err(ctrl, "Cannot hot-add display device %s\n",
pci_name(dev));
pci_dev_put(dev);
continue;
}
if ((dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) ||
(dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)) {
/* Find an unused bus number for the new bridge */
struct pci_bus *child;
unsigned char busnr, start = parent->secondary;
unsigned char end = parent->subordinate;
for (busnr = start; busnr <= end; busnr++) {
if (!pci_find_bus(pci_domain_nr(parent),
busnr))
break;
}
if (busnr > end) {
ctrl_err(ctrl,
"No free bus for hot-added bridge\n");
pci_dev_put(dev);
continue;
}
child = pci_add_new_bus(parent, dev, busnr);
if (!child) {
ctrl_err(ctrl, "Cannot add new bus for %s\n",
pci_name(dev));
pci_dev_put(dev);
continue;
}
child->subordinate = pci_do_scan_bus(child);
pci_bus_size_bridges(child);
}
pci_configure_slot(dev);
pci_dev_put(dev);
}
pci_bus_assign_resources(parent);
pci_bus_add_devices(parent);
pci_enable_bridges(parent);
return 0;
}
int shpchp_unconfigure_device(struct slot *p_slot)
{
int rc = 0;
int j;
u8 bctl = 0;
struct pci_bus *parent = p_slot->ctrl->pci_dev->subordinate;
struct controller *ctrl = p_slot->ctrl;
ctrl_dbg(ctrl, "%s: domain:bus:dev = %04x:%02x:%02x\n",
__func__, pci_domain_nr(parent), p_slot->bus, p_slot->device);
for (j=0; j<8 ; j++) {
struct pci_dev* temp = pci_get_slot(parent,
(p_slot->device << 3) | j);
if (!temp)
continue;
if ((temp->class >> 16) == PCI_BASE_CLASS_DISPLAY) {
ctrl_err(ctrl, "Cannot remove display device %s\n",
pci_name(temp));
pci_dev_put(temp);
continue;
}
if (temp->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
pci_read_config_byte(temp, PCI_BRIDGE_CONTROL, &bctl);
if (bctl & PCI_BRIDGE_CTL_VGA) {
ctrl_err(ctrl,
"Cannot remove display device %s\n",
pci_name(temp));
pci_dev_put(temp);
continue;
}
}
pci_remove_bus_device(temp);
pci_dev_put(temp);
}
return rc;
}
| gpl-2.0 |
VorkTeam/vorkKernel-DESIRE | arch/mips/dec/ioasic-irq.c | 1620 | 2290 | /*
* DEC I/O ASIC interrupts.
*
* Copyright (c) 2002, 2003 Maciej W. Rozycki
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/types.h>
#include <asm/dec/ioasic.h>
#include <asm/dec/ioasic_addrs.h>
#include <asm/dec/ioasic_ints.h>
static int ioasic_irq_base;
static inline void unmask_ioasic_irq(unsigned int irq)
{
u32 simr;
simr = ioasic_read(IO_REG_SIMR);
simr |= (1 << (irq - ioasic_irq_base));
ioasic_write(IO_REG_SIMR, simr);
}
static inline void mask_ioasic_irq(unsigned int irq)
{
u32 simr;
simr = ioasic_read(IO_REG_SIMR);
simr &= ~(1 << (irq - ioasic_irq_base));
ioasic_write(IO_REG_SIMR, simr);
}
static inline void clear_ioasic_irq(unsigned int irq)
{
u32 sir;
sir = ~(1 << (irq - ioasic_irq_base));
ioasic_write(IO_REG_SIR, sir);
}
static inline void ack_ioasic_irq(unsigned int irq)
{
mask_ioasic_irq(irq);
fast_iob();
}
static inline void end_ioasic_irq(unsigned int irq)
{
if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS)))
unmask_ioasic_irq(irq);
}
static struct irq_chip ioasic_irq_type = {
.name = "IO-ASIC",
.ack = ack_ioasic_irq,
.mask = mask_ioasic_irq,
.mask_ack = ack_ioasic_irq,
.unmask = unmask_ioasic_irq,
};
#define unmask_ioasic_dma_irq unmask_ioasic_irq
#define mask_ioasic_dma_irq mask_ioasic_irq
#define ack_ioasic_dma_irq ack_ioasic_irq
static inline void end_ioasic_dma_irq(unsigned int irq)
{
clear_ioasic_irq(irq);
fast_iob();
end_ioasic_irq(irq);
}
static struct irq_chip ioasic_dma_irq_type = {
.name = "IO-ASIC-DMA",
.ack = ack_ioasic_dma_irq,
.mask = mask_ioasic_dma_irq,
.mask_ack = ack_ioasic_dma_irq,
.unmask = unmask_ioasic_dma_irq,
.end = end_ioasic_dma_irq,
};
void __init init_ioasic_irqs(int base)
{
int i;
/* Mask interrupts. */
ioasic_write(IO_REG_SIMR, 0);
fast_iob();
for (i = base; i < base + IO_INR_DMA; i++)
set_irq_chip_and_handler(i, &ioasic_irq_type,
handle_level_irq);
for (; i < base + IO_IRQ_LINES; i++)
set_irq_chip(i, &ioasic_dma_irq_type);
ioasic_irq_base = base;
}
| gpl-2.0 |
carlocaione/geniatech-kernel | drivers/gpu/drm/radeon/r520.c | 2644 | 8482 | /*
* Copyright 2008 Advanced Micro Devices, Inc.
* Copyright 2008 Red Hat Inc.
* Copyright 2009 Jerome Glisse.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Dave Airlie
* Alex Deucher
* Jerome Glisse
*/
#include "drmP.h"
#include "radeon.h"
#include "radeon_asic.h"
#include "atom.h"
#include "r520d.h"
/* This files gather functions specifics to: r520,rv530,rv560,rv570,r580 */
static int r520_mc_wait_for_idle(struct radeon_device *rdev)
{
unsigned i;
uint32_t tmp;
for (i = 0; i < rdev->usec_timeout; i++) {
/* read MC_STATUS */
tmp = RREG32_MC(R520_MC_STATUS);
if (tmp & R520_MC_STATUS_IDLE) {
return 0;
}
DRM_UDELAY(1);
}
return -1;
}
static void r520_gpu_init(struct radeon_device *rdev)
{
unsigned pipe_select_current, gb_pipe_select, tmp;
rv515_vga_render_disable(rdev);
/*
* DST_PIPE_CONFIG 0x170C
* GB_TILE_CONFIG 0x4018
* GB_FIFO_SIZE 0x4024
* GB_PIPE_SELECT 0x402C
* GB_PIPE_SELECT2 0x4124
* Z_PIPE_SHIFT 0
* Z_PIPE_MASK 0x000000003
* GB_FIFO_SIZE2 0x4128
* SC_SFIFO_SIZE_SHIFT 0
* SC_SFIFO_SIZE_MASK 0x000000003
* SC_MFIFO_SIZE_SHIFT 2
* SC_MFIFO_SIZE_MASK 0x00000000C
* FG_SFIFO_SIZE_SHIFT 4
* FG_SFIFO_SIZE_MASK 0x000000030
* ZB_MFIFO_SIZE_SHIFT 6
* ZB_MFIFO_SIZE_MASK 0x0000000C0
* GA_ENHANCE 0x4274
* SU_REG_DEST 0x42C8
*/
/* workaround for RV530 */
if (rdev->family == CHIP_RV530) {
WREG32(0x4128, 0xFF);
}
r420_pipes_init(rdev);
gb_pipe_select = RREG32(R400_GB_PIPE_SELECT);
tmp = RREG32(R300_DST_PIPE_CONFIG);
pipe_select_current = (tmp >> 2) & 3;
tmp = (1 << pipe_select_current) |
(((gb_pipe_select >> 8) & 0xF) << 4);
WREG32_PLL(0x000D, tmp);
if (r520_mc_wait_for_idle(rdev)) {
printk(KERN_WARNING "Failed to wait MC idle while "
"programming pipes. Bad things might happen.\n");
}
}
static void r520_vram_get_type(struct radeon_device *rdev)
{
uint32_t tmp;
rdev->mc.vram_width = 128;
rdev->mc.vram_is_ddr = true;
tmp = RREG32_MC(R520_MC_CNTL0);
switch ((tmp & R520_MEM_NUM_CHANNELS_MASK) >> R520_MEM_NUM_CHANNELS_SHIFT) {
case 0:
rdev->mc.vram_width = 32;
break;
case 1:
rdev->mc.vram_width = 64;
break;
case 2:
rdev->mc.vram_width = 128;
break;
case 3:
rdev->mc.vram_width = 256;
break;
default:
rdev->mc.vram_width = 128;
break;
}
if (tmp & R520_MC_CHANNEL_SIZE)
rdev->mc.vram_width *= 2;
}
void r520_mc_init(struct radeon_device *rdev)
{
r520_vram_get_type(rdev);
r100_vram_init_sizes(rdev);
radeon_vram_location(rdev, &rdev->mc, 0);
rdev->mc.gtt_base_align = 0;
if (!(rdev->flags & RADEON_IS_AGP))
radeon_gtt_location(rdev, &rdev->mc);
radeon_update_bandwidth_info(rdev);
}
void r520_mc_program(struct radeon_device *rdev)
{
struct rv515_mc_save save;
/* Stops all mc clients */
rv515_mc_stop(rdev, &save);
/* Wait for mc idle */
if (r520_mc_wait_for_idle(rdev))
dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n");
/* Write VRAM size in case we are limiting it */
WREG32(R_0000F8_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
/* Program MC, should be a 32bits limited address space */
WREG32_MC(R_000004_MC_FB_LOCATION,
S_000004_MC_FB_START(rdev->mc.vram_start >> 16) |
S_000004_MC_FB_TOP(rdev->mc.vram_end >> 16));
WREG32(R_000134_HDP_FB_LOCATION,
S_000134_HDP_FB_START(rdev->mc.vram_start >> 16));
if (rdev->flags & RADEON_IS_AGP) {
WREG32_MC(R_000005_MC_AGP_LOCATION,
S_000005_MC_AGP_START(rdev->mc.gtt_start >> 16) |
S_000005_MC_AGP_TOP(rdev->mc.gtt_end >> 16));
WREG32_MC(R_000006_AGP_BASE, lower_32_bits(rdev->mc.agp_base));
WREG32_MC(R_000007_AGP_BASE_2,
S_000007_AGP_BASE_ADDR_2(upper_32_bits(rdev->mc.agp_base)));
} else {
WREG32_MC(R_000005_MC_AGP_LOCATION, 0xFFFFFFFF);
WREG32_MC(R_000006_AGP_BASE, 0);
WREG32_MC(R_000007_AGP_BASE_2, 0);
}
rv515_mc_resume(rdev, &save);
}
static int r520_startup(struct radeon_device *rdev)
{
int r;
r520_mc_program(rdev);
/* Resume clock */
rv515_clock_startup(rdev);
/* Initialize GPU configuration (# pipes, ...) */
r520_gpu_init(rdev);
/* Initialize GART (initialize after TTM so we can allocate
* memory through TTM but finalize after TTM) */
if (rdev->flags & RADEON_IS_PCIE) {
r = rv370_pcie_gart_enable(rdev);
if (r)
return r;
}
/* allocate wb buffer */
r = radeon_wb_init(rdev);
if (r)
return r;
/* Enable IRQ */
rs600_irq_set(rdev);
rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
/* 1M ring buffer */
r = r100_cp_init(rdev, 1024 * 1024);
if (r) {
dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
return r;
}
r = r100_ib_init(rdev);
if (r) {
dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
return r;
}
return 0;
}
int r520_resume(struct radeon_device *rdev)
{
/* Make sur GART are not working */
if (rdev->flags & RADEON_IS_PCIE)
rv370_pcie_gart_disable(rdev);
/* Resume clock before doing reset */
rv515_clock_startup(rdev);
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
RREG32(R_0007C0_CP_STAT));
}
/* post */
atom_asic_init(rdev->mode_info.atom_context);
/* Resume clock after posting */
rv515_clock_startup(rdev);
/* Initialize surface registers */
radeon_surface_init(rdev);
return r520_startup(rdev);
}
int r520_init(struct radeon_device *rdev)
{
int r;
/* Initialize scratch registers */
radeon_scratch_init(rdev);
/* Initialize surface registers */
radeon_surface_init(rdev);
/* restore some register to sane defaults */
r100_restore_sanity(rdev);
/* TODO: disable VGA need to use VGA request */
/* BIOS*/
if (!radeon_get_bios(rdev)) {
if (ASIC_IS_AVIVO(rdev))
return -EINVAL;
}
if (rdev->is_atom_bios) {
r = radeon_atombios_init(rdev);
if (r)
return r;
} else {
dev_err(rdev->dev, "Expecting atombios for RV515 GPU\n");
return -EINVAL;
}
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev,
"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
RREG32(R_0007C0_CP_STAT));
}
/* check if cards are posted or not */
if (radeon_boot_test_post_card(rdev) == false)
return -EINVAL;
if (!radeon_card_posted(rdev) && rdev->bios) {
DRM_INFO("GPU not posted. posting now...\n");
atom_asic_init(rdev->mode_info.atom_context);
}
/* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
/* initialize AGP */
if (rdev->flags & RADEON_IS_AGP) {
r = radeon_agp_init(rdev);
if (r) {
radeon_agp_disable(rdev);
}
}
/* initialize memory controller */
r520_mc_init(rdev);
rv515_debugfs(rdev);
/* Fence driver */
r = radeon_fence_driver_init(rdev);
if (r)
return r;
r = radeon_irq_kms_init(rdev);
if (r)
return r;
/* Memory manager */
r = radeon_bo_init(rdev);
if (r)
return r;
r = rv370_pcie_gart_init(rdev);
if (r)
return r;
rv515_set_safe_registers(rdev);
rdev->accel_working = true;
r = r520_startup(rdev);
if (r) {
/* Somethings want wront with the accel init stop accel */
dev_err(rdev->dev, "Disabling GPU acceleration\n");
r100_cp_fini(rdev);
radeon_wb_fini(rdev);
r100_ib_fini(rdev);
radeon_irq_kms_fini(rdev);
rv370_pcie_gart_fini(rdev);
radeon_agp_fini(rdev);
rdev->accel_working = false;
}
return 0;
}
| gpl-2.0 |
flar2/m8-Sense-4.4.4 | drivers/w1/slaves/w1_therm.c | 4948 | 6715 | /*
* w1_therm.c
*
* Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net>
*
*
* This program is free software; you can redistribute it and/or modify
* it under the therms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <asm/types.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/sched.h>
#include <linux/device.h>
#include <linux/types.h>
#include <linux/delay.h>
#include "../w1.h"
#include "../w1_int.h"
#include "../w1_family.h"
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>");
MODULE_DESCRIPTION("Driver for 1-wire Dallas network protocol, temperature family.");
/* Allow the strong pullup to be disabled, but default to enabled.
* If it was disabled a parasite powered device might not get the require
* current to do a temperature conversion. If it is enabled parasite powered
* devices have a better chance of getting the current required.
*/
static int w1_strong_pullup = 1;
module_param_named(strong_pullup, w1_strong_pullup, int, 0);
static u8 bad_roms[][9] = {
{0xaa, 0x00, 0x4b, 0x46, 0xff, 0xff, 0x0c, 0x10, 0x87},
{}
};
static ssize_t w1_therm_read(struct device *device,
struct device_attribute *attr, char *buf);
static struct device_attribute w1_therm_attr =
__ATTR(w1_slave, S_IRUGO, w1_therm_read, NULL);
static int w1_therm_add_slave(struct w1_slave *sl)
{
return device_create_file(&sl->dev, &w1_therm_attr);
}
static void w1_therm_remove_slave(struct w1_slave *sl)
{
device_remove_file(&sl->dev, &w1_therm_attr);
}
static struct w1_family_ops w1_therm_fops = {
.add_slave = w1_therm_add_slave,
.remove_slave = w1_therm_remove_slave,
};
static struct w1_family w1_therm_family_DS18S20 = {
.fid = W1_THERM_DS18S20,
.fops = &w1_therm_fops,
};
static struct w1_family w1_therm_family_DS18B20 = {
.fid = W1_THERM_DS18B20,
.fops = &w1_therm_fops,
};
static struct w1_family w1_therm_family_DS1822 = {
.fid = W1_THERM_DS1822,
.fops = &w1_therm_fops,
};
static struct w1_family w1_therm_family_DS28EA00 = {
.fid = W1_THERM_DS28EA00,
.fops = &w1_therm_fops,
};
struct w1_therm_family_converter
{
u8 broken;
u16 reserved;
struct w1_family *f;
int (*convert)(u8 rom[9]);
};
/* The return value is millidegrees Centigrade. */
static inline int w1_DS18B20_convert_temp(u8 rom[9]);
static inline int w1_DS18S20_convert_temp(u8 rom[9]);
static struct w1_therm_family_converter w1_therm_families[] = {
{
.f = &w1_therm_family_DS18S20,
.convert = w1_DS18S20_convert_temp
},
{
.f = &w1_therm_family_DS1822,
.convert = w1_DS18B20_convert_temp
},
{
.f = &w1_therm_family_DS18B20,
.convert = w1_DS18B20_convert_temp
},
{
.f = &w1_therm_family_DS28EA00,
.convert = w1_DS18B20_convert_temp
},
};
static inline int w1_DS18B20_convert_temp(u8 rom[9])
{
s16 t = le16_to_cpup((__le16 *)rom);
return t*1000/16;
}
static inline int w1_DS18S20_convert_temp(u8 rom[9])
{
int t, h;
if (!rom[7])
return 0;
if (rom[1] == 0)
t = ((s32)rom[0] >> 1)*1000;
else
t = 1000*(-1*(s32)(0x100-rom[0]) >> 1);
t -= 250;
h = 1000*((s32)rom[7] - (s32)rom[6]);
h /= (s32)rom[7];
t += h;
return t;
}
static inline int w1_convert_temp(u8 rom[9], u8 fid)
{
int i;
for (i = 0; i < ARRAY_SIZE(w1_therm_families); ++i)
if (w1_therm_families[i].f->fid == fid)
return w1_therm_families[i].convert(rom);
return 0;
}
static int w1_therm_check_rom(u8 rom[9])
{
int i;
for (i=0; i<sizeof(bad_roms)/9; ++i)
if (!memcmp(bad_roms[i], rom, 9))
return 1;
return 0;
}
static ssize_t w1_therm_read(struct device *device,
struct device_attribute *attr, char *buf)
{
struct w1_slave *sl = dev_to_w1_slave(device);
struct w1_master *dev = sl->master;
u8 rom[9], crc, verdict, external_power;
int i, max_trying = 10;
ssize_t c = PAGE_SIZE;
i = mutex_lock_interruptible(&dev->mutex);
if (i != 0)
return i;
memset(rom, 0, sizeof(rom));
verdict = 0;
crc = 0;
while (max_trying--) {
if (!w1_reset_select_slave(sl)) {
int count = 0;
unsigned int tm = 750;
unsigned long sleep_rem;
w1_write_8(dev, W1_READ_PSUPPLY);
external_power = w1_read_8(dev);
if (w1_reset_select_slave(sl))
continue;
/* 750ms strong pullup (or delay) after the convert */
if (!external_power && w1_strong_pullup)
w1_next_pullup(dev, tm);
w1_write_8(dev, W1_CONVERT_TEMP);
if (external_power) {
mutex_unlock(&dev->mutex);
sleep_rem = msleep_interruptible(tm);
if (sleep_rem != 0)
return -EINTR;
i = mutex_lock_interruptible(&dev->mutex);
if (i != 0)
return i;
} else if (!w1_strong_pullup) {
sleep_rem = msleep_interruptible(tm);
if (sleep_rem != 0) {
mutex_unlock(&dev->mutex);
return -EINTR;
}
}
if (!w1_reset_select_slave(sl)) {
w1_write_8(dev, W1_READ_SCRATCHPAD);
if ((count = w1_read_block(dev, rom, 9)) != 9) {
dev_warn(device, "w1_read_block() "
"returned %u instead of 9.\n",
count);
}
crc = w1_calc_crc8(rom, 8);
if (rom[8] == crc)
verdict = 1;
}
}
if (!w1_therm_check_rom(rom))
break;
}
for (i = 0; i < 9; ++i)
c -= snprintf(buf + PAGE_SIZE - c, c, "%02x ", rom[i]);
c -= snprintf(buf + PAGE_SIZE - c, c, ": crc=%02x %s\n",
crc, (verdict) ? "YES" : "NO");
if (verdict)
memcpy(sl->rom, rom, sizeof(sl->rom));
else
dev_warn(device, "18S20 doesn't respond to CONVERT_TEMP.\n");
for (i = 0; i < 9; ++i)
c -= snprintf(buf + PAGE_SIZE - c, c, "%02x ", sl->rom[i]);
c -= snprintf(buf + PAGE_SIZE - c, c, "t=%d\n",
w1_convert_temp(rom, sl->family->fid));
mutex_unlock(&dev->mutex);
return PAGE_SIZE - c;
}
static int __init w1_therm_init(void)
{
int err, i;
for (i = 0; i < ARRAY_SIZE(w1_therm_families); ++i) {
err = w1_register_family(w1_therm_families[i].f);
if (err)
w1_therm_families[i].broken = 1;
}
return 0;
}
static void __exit w1_therm_fini(void)
{
int i;
for (i = 0; i < ARRAY_SIZE(w1_therm_families); ++i)
if (!w1_therm_families[i].broken)
w1_unregister_family(w1_therm_families[i].f);
}
module_init(w1_therm_init);
module_exit(w1_therm_fini);
| gpl-2.0 |
coldnew/linux | arch/um/kernel/exitcode.c | 4948 | 1734 | /*
* Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
* Licensed under the GPL
*/
#include <linux/ctype.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/types.h>
#include <asm/uaccess.h>
/*
* If read and write race, the read will still atomically read a valid
* value.
*/
int uml_exitcode = 0;
static int exitcode_proc_show(struct seq_file *m, void *v)
{
int val;
/*
* Save uml_exitcode in a local so that we don't need to guarantee
* that sprintf accesses it atomically.
*/
val = uml_exitcode;
seq_printf(m, "%d\n", val);
return 0;
}
static int exitcode_proc_open(struct inode *inode, struct file *file)
{
return single_open(file, exitcode_proc_show, NULL);
}
static ssize_t exitcode_proc_write(struct file *file,
const char __user *buffer, size_t count, loff_t *pos)
{
char *end, buf[sizeof("nnnnn\0")];
size_t size;
int tmp;
size = min(count, sizeof(buf));
if (copy_from_user(buf, buffer, size))
return -EFAULT;
tmp = simple_strtol(buf, &end, 0);
if ((*end != '\0') && !isspace(*end))
return -EINVAL;
uml_exitcode = tmp;
return count;
}
static const struct file_operations exitcode_proc_fops = {
.owner = THIS_MODULE,
.open = exitcode_proc_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
.write = exitcode_proc_write,
};
static int make_proc_exitcode(void)
{
struct proc_dir_entry *ent;
ent = proc_create("exitcode", 0600, NULL, &exitcode_proc_fops);
if (ent == NULL) {
printk(KERN_WARNING "make_proc_exitcode : Failed to register "
"/proc/exitcode\n");
return 0;
}
return 0;
}
__initcall(make_proc_exitcode);
| gpl-2.0 |
AmperificSuperKANG/kernel_hammerhead | tools/perf/util/cpumap.c | 4948 | 3803 | #include "util.h"
#include "../perf.h"
#include "cpumap.h"
#include <assert.h>
#include <stdio.h>
static struct cpu_map *cpu_map__default_new(void)
{
struct cpu_map *cpus;
int nr_cpus;
nr_cpus = sysconf(_SC_NPROCESSORS_ONLN);
if (nr_cpus < 0)
return NULL;
cpus = malloc(sizeof(*cpus) + nr_cpus * sizeof(int));
if (cpus != NULL) {
int i;
for (i = 0; i < nr_cpus; ++i)
cpus->map[i] = i;
cpus->nr = nr_cpus;
}
return cpus;
}
static struct cpu_map *cpu_map__trim_new(int nr_cpus, int *tmp_cpus)
{
size_t payload_size = nr_cpus * sizeof(int);
struct cpu_map *cpus = malloc(sizeof(*cpus) + payload_size);
if (cpus != NULL) {
cpus->nr = nr_cpus;
memcpy(cpus->map, tmp_cpus, payload_size);
}
return cpus;
}
static struct cpu_map *cpu_map__read_all_cpu_map(void)
{
struct cpu_map *cpus = NULL;
FILE *onlnf;
int nr_cpus = 0;
int *tmp_cpus = NULL, *tmp;
int max_entries = 0;
int n, cpu, prev;
char sep;
onlnf = fopen("/sys/devices/system/cpu/online", "r");
if (!onlnf)
return cpu_map__default_new();
sep = 0;
prev = -1;
for (;;) {
n = fscanf(onlnf, "%u%c", &cpu, &sep);
if (n <= 0)
break;
if (prev >= 0) {
int new_max = nr_cpus + cpu - prev - 1;
if (new_max >= max_entries) {
max_entries = new_max + MAX_NR_CPUS / 2;
tmp = realloc(tmp_cpus, max_entries * sizeof(int));
if (tmp == NULL)
goto out_free_tmp;
tmp_cpus = tmp;
}
while (++prev < cpu)
tmp_cpus[nr_cpus++] = prev;
}
if (nr_cpus == max_entries) {
max_entries += MAX_NR_CPUS;
tmp = realloc(tmp_cpus, max_entries * sizeof(int));
if (tmp == NULL)
goto out_free_tmp;
tmp_cpus = tmp;
}
tmp_cpus[nr_cpus++] = cpu;
if (n == 2 && sep == '-')
prev = cpu;
else
prev = -1;
if (n == 1 || sep == '\n')
break;
}
if (nr_cpus > 0)
cpus = cpu_map__trim_new(nr_cpus, tmp_cpus);
else
cpus = cpu_map__default_new();
out_free_tmp:
free(tmp_cpus);
fclose(onlnf);
return cpus;
}
struct cpu_map *cpu_map__new(const char *cpu_list)
{
struct cpu_map *cpus = NULL;
unsigned long start_cpu, end_cpu = 0;
char *p = NULL;
int i, nr_cpus = 0;
int *tmp_cpus = NULL, *tmp;
int max_entries = 0;
if (!cpu_list)
return cpu_map__read_all_cpu_map();
if (!isdigit(*cpu_list))
goto out;
while (isdigit(*cpu_list)) {
p = NULL;
start_cpu = strtoul(cpu_list, &p, 0);
if (start_cpu >= INT_MAX
|| (*p != '\0' && *p != ',' && *p != '-'))
goto invalid;
if (*p == '-') {
cpu_list = ++p;
p = NULL;
end_cpu = strtoul(cpu_list, &p, 0);
if (end_cpu >= INT_MAX || (*p != '\0' && *p != ','))
goto invalid;
if (end_cpu < start_cpu)
goto invalid;
} else {
end_cpu = start_cpu;
}
for (; start_cpu <= end_cpu; start_cpu++) {
/* check for duplicates */
for (i = 0; i < nr_cpus; i++)
if (tmp_cpus[i] == (int)start_cpu)
goto invalid;
if (nr_cpus == max_entries) {
max_entries += MAX_NR_CPUS;
tmp = realloc(tmp_cpus, max_entries * sizeof(int));
if (tmp == NULL)
goto invalid;
tmp_cpus = tmp;
}
tmp_cpus[nr_cpus++] = (int)start_cpu;
}
if (*p)
++p;
cpu_list = p;
}
if (nr_cpus > 0)
cpus = cpu_map__trim_new(nr_cpus, tmp_cpus);
else
cpus = cpu_map__default_new();
invalid:
free(tmp_cpus);
out:
return cpus;
}
size_t cpu_map__fprintf(struct cpu_map *map, FILE *fp)
{
int i;
size_t printed = fprintf(fp, "%d cpu%s: ",
map->nr, map->nr > 1 ? "s" : "");
for (i = 0; i < map->nr; ++i)
printed += fprintf(fp, "%s%d", i ? ", " : "", map->map[i]);
return printed + fprintf(fp, "\n");
}
struct cpu_map *cpu_map__dummy_new(void)
{
struct cpu_map *cpus = malloc(sizeof(*cpus) + sizeof(int));
if (cpus != NULL) {
cpus->nr = 1;
cpus->map[0] = -1;
}
return cpus;
}
void cpu_map__delete(struct cpu_map *map)
{
free(map);
}
| gpl-2.0 |
CaptainThrowback/kernel_htc_e8 | net/atm/mpc.c | 5460 | 39140 | #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/timer.h>
#include <linux/init.h>
#include <linux/bitops.h>
#include <linux/capability.h>
#include <linux/seq_file.h>
/* We are an ethernet device */
#include <linux/if_ether.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <net/sock.h>
#include <linux/skbuff.h>
#include <linux/ip.h>
#include <linux/uaccess.h>
#include <asm/byteorder.h>
#include <net/checksum.h> /* for ip_fast_csum() */
#include <net/arp.h>
#include <net/dst.h>
#include <linux/proc_fs.h>
/* And atm device */
#include <linux/atmdev.h>
#include <linux/atmlec.h>
#include <linux/atmmpc.h>
/* Modular too */
#include <linux/module.h>
#include "lec.h"
#include "mpc.h"
#include "resources.h"
/*
* mpc.c: Implementation of MPOA client kernel part
*/
#if 0
#define dprintk(format, args...) \
printk(KERN_DEBUG "mpoa:%s: " format, __func__, ##args)
#define dprintk_cont(format, args...) printk(KERN_CONT format, ##args)
#else
#define dprintk(format, args...) \
do { if (0) \
printk(KERN_DEBUG "mpoa:%s: " format, __func__, ##args);\
} while (0)
#define dprintk_cont(format, args...) \
do { if (0) printk(KERN_CONT format, ##args); } while (0)
#endif
#if 0
#define ddprintk(format, args...) \
printk(KERN_DEBUG "mpoa:%s: " format, __func__, ##args)
#define ddprintk_cont(format, args...) printk(KERN_CONT format, ##args)
#else
#define ddprintk(format, args...) \
do { if (0) \
printk(KERN_DEBUG "mpoa:%s: " format, __func__, ##args);\
} while (0)
#define ddprintk_cont(format, args...) \
do { if (0) printk(KERN_CONT format, ##args); } while (0)
#endif
/* mpc_daemon -> kernel */
static void MPOA_trigger_rcvd(struct k_message *msg, struct mpoa_client *mpc);
static void MPOA_res_reply_rcvd(struct k_message *msg, struct mpoa_client *mpc);
static void ingress_purge_rcvd(struct k_message *msg, struct mpoa_client *mpc);
static void egress_purge_rcvd(struct k_message *msg, struct mpoa_client *mpc);
static void mps_death(struct k_message *msg, struct mpoa_client *mpc);
static void clean_up(struct k_message *msg, struct mpoa_client *mpc,
int action);
static void MPOA_cache_impos_rcvd(struct k_message *msg,
struct mpoa_client *mpc);
static void set_mpc_ctrl_addr_rcvd(struct k_message *mesg,
struct mpoa_client *mpc);
static void set_mps_mac_addr_rcvd(struct k_message *mesg,
struct mpoa_client *mpc);
static const uint8_t *copy_macs(struct mpoa_client *mpc,
const uint8_t *router_mac,
const uint8_t *tlvs, uint8_t mps_macs,
uint8_t device_type);
static void purge_egress_shortcut(struct atm_vcc *vcc, eg_cache_entry *entry);
static void send_set_mps_ctrl_addr(const char *addr, struct mpoa_client *mpc);
static void mpoad_close(struct atm_vcc *vcc);
static int msg_from_mpoad(struct atm_vcc *vcc, struct sk_buff *skb);
static void mpc_push(struct atm_vcc *vcc, struct sk_buff *skb);
static netdev_tx_t mpc_send_packet(struct sk_buff *skb,
struct net_device *dev);
static int mpoa_event_listener(struct notifier_block *mpoa_notifier,
unsigned long event, void *dev);
static void mpc_timer_refresh(void);
static void mpc_cache_check(unsigned long checking_time);
static struct llc_snap_hdr llc_snap_mpoa_ctrl = {
0xaa, 0xaa, 0x03,
{0x00, 0x00, 0x5e},
{0x00, 0x03} /* For MPOA control PDUs */
};
static struct llc_snap_hdr llc_snap_mpoa_data = {
0xaa, 0xaa, 0x03,
{0x00, 0x00, 0x00},
{0x08, 0x00} /* This is for IP PDUs only */
};
static struct llc_snap_hdr llc_snap_mpoa_data_tagged = {
0xaa, 0xaa, 0x03,
{0x00, 0x00, 0x00},
{0x88, 0x4c} /* This is for tagged data PDUs */
};
static struct notifier_block mpoa_notifier = {
mpoa_event_listener,
NULL,
0
};
struct mpoa_client *mpcs = NULL; /* FIXME */
static struct atm_mpoa_qos *qos_head = NULL;
static DEFINE_TIMER(mpc_timer, NULL, 0, 0);
static struct mpoa_client *find_mpc_by_itfnum(int itf)
{
struct mpoa_client *mpc;
mpc = mpcs; /* our global linked list */
while (mpc != NULL) {
if (mpc->dev_num == itf)
return mpc;
mpc = mpc->next;
}
return NULL; /* not found */
}
static struct mpoa_client *find_mpc_by_vcc(struct atm_vcc *vcc)
{
struct mpoa_client *mpc;
mpc = mpcs; /* our global linked list */
while (mpc != NULL) {
if (mpc->mpoad_vcc == vcc)
return mpc;
mpc = mpc->next;
}
return NULL; /* not found */
}
static struct mpoa_client *find_mpc_by_lec(struct net_device *dev)
{
struct mpoa_client *mpc;
mpc = mpcs; /* our global linked list */
while (mpc != NULL) {
if (mpc->dev == dev)
return mpc;
mpc = mpc->next;
}
return NULL; /* not found */
}
/*
* Functions for managing QoS list
*/
/*
* Overwrites the old entry or makes a new one.
*/
struct atm_mpoa_qos *atm_mpoa_add_qos(__be32 dst_ip, struct atm_qos *qos)
{
struct atm_mpoa_qos *entry;
entry = atm_mpoa_search_qos(dst_ip);
if (entry != NULL) {
entry->qos = *qos;
return entry;
}
entry = kmalloc(sizeof(struct atm_mpoa_qos), GFP_KERNEL);
if (entry == NULL) {
pr_info("mpoa: out of memory\n");
return entry;
}
entry->ipaddr = dst_ip;
entry->qos = *qos;
entry->next = qos_head;
qos_head = entry;
return entry;
}
struct atm_mpoa_qos *atm_mpoa_search_qos(__be32 dst_ip)
{
struct atm_mpoa_qos *qos;
qos = qos_head;
while (qos) {
if (qos->ipaddr == dst_ip)
break;
qos = qos->next;
}
return qos;
}
/*
* Returns 0 for failure
*/
int atm_mpoa_delete_qos(struct atm_mpoa_qos *entry)
{
struct atm_mpoa_qos *curr;
if (entry == NULL)
return 0;
if (entry == qos_head) {
qos_head = qos_head->next;
kfree(entry);
return 1;
}
curr = qos_head;
while (curr != NULL) {
if (curr->next == entry) {
curr->next = entry->next;
kfree(entry);
return 1;
}
curr = curr->next;
}
return 0;
}
/* this is buggered - we need locking for qos_head */
void atm_mpoa_disp_qos(struct seq_file *m)
{
struct atm_mpoa_qos *qos;
qos = qos_head;
seq_printf(m, "QoS entries for shortcuts:\n");
seq_printf(m, "IP address\n TX:max_pcr pcr min_pcr max_cdv max_sdu\n RX:max_pcr pcr min_pcr max_cdv max_sdu\n");
while (qos != NULL) {
seq_printf(m, "%pI4\n %-7d %-7d %-7d %-7d %-7d\n %-7d %-7d %-7d %-7d %-7d\n",
&qos->ipaddr,
qos->qos.txtp.max_pcr,
qos->qos.txtp.pcr,
qos->qos.txtp.min_pcr,
qos->qos.txtp.max_cdv,
qos->qos.txtp.max_sdu,
qos->qos.rxtp.max_pcr,
qos->qos.rxtp.pcr,
qos->qos.rxtp.min_pcr,
qos->qos.rxtp.max_cdv,
qos->qos.rxtp.max_sdu);
qos = qos->next;
}
}
static struct net_device *find_lec_by_itfnum(int itf)
{
struct net_device *dev;
char name[IFNAMSIZ];
sprintf(name, "lec%d", itf);
dev = dev_get_by_name(&init_net, name);
return dev;
}
static struct mpoa_client *alloc_mpc(void)
{
struct mpoa_client *mpc;
mpc = kzalloc(sizeof(struct mpoa_client), GFP_KERNEL);
if (mpc == NULL)
return NULL;
rwlock_init(&mpc->ingress_lock);
rwlock_init(&mpc->egress_lock);
mpc->next = mpcs;
atm_mpoa_init_cache(mpc);
mpc->parameters.mpc_p1 = MPC_P1;
mpc->parameters.mpc_p2 = MPC_P2;
memset(mpc->parameters.mpc_p3, 0, sizeof(mpc->parameters.mpc_p3));
mpc->parameters.mpc_p4 = MPC_P4;
mpc->parameters.mpc_p5 = MPC_P5;
mpc->parameters.mpc_p6 = MPC_P6;
mpcs = mpc;
return mpc;
}
/*
*
* start_mpc() puts the MPC on line. All the packets destined
* to the lec underneath us are now being monitored and
* shortcuts will be established.
*
*/
static void start_mpc(struct mpoa_client *mpc, struct net_device *dev)
{
dprintk("(%s)\n", mpc->dev->name);
if (!dev->netdev_ops)
pr_info("(%s) not starting\n", dev->name);
else {
mpc->old_ops = dev->netdev_ops;
mpc->new_ops = *mpc->old_ops;
mpc->new_ops.ndo_start_xmit = mpc_send_packet;
dev->netdev_ops = &mpc->new_ops;
}
}
static void stop_mpc(struct mpoa_client *mpc)
{
struct net_device *dev = mpc->dev;
dprintk("(%s)", mpc->dev->name);
/* Lets not nullify lec device's dev->hard_start_xmit */
if (dev->netdev_ops != &mpc->new_ops) {
dprintk_cont(" mpc already stopped, not fatal\n");
return;
}
dprintk_cont("\n");
dev->netdev_ops = mpc->old_ops;
mpc->old_ops = NULL;
/* close_shortcuts(mpc); ??? FIXME */
}
static const char *mpoa_device_type_string(char type) __attribute__ ((unused));
static const char *mpoa_device_type_string(char type)
{
switch (type) {
case NON_MPOA:
return "non-MPOA device";
case MPS:
return "MPS";
case MPC:
return "MPC";
case MPS_AND_MPC:
return "both MPS and MPC";
}
return "unspecified (non-MPOA) device";
}
/*
* lec device calls this via its netdev_priv(dev)->lane2_ops
* ->associate_indicator() when it sees a TLV in LE_ARP packet.
* We fill in the pointer above when we see a LANE2 lec initializing
* See LANE2 spec 3.1.5
*
* Quite a big and ugly function but when you look at it
* all it does is to try to locate and parse MPOA Device
* Type TLV.
* We give our lec a pointer to this function and when the
* lec sees a TLV it uses the pointer to call this function.
*
*/
static void lane2_assoc_ind(struct net_device *dev, const u8 *mac_addr,
const u8 *tlvs, u32 sizeoftlvs)
{
uint32_t type;
uint8_t length, mpoa_device_type, number_of_mps_macs;
const uint8_t *end_of_tlvs;
struct mpoa_client *mpc;
mpoa_device_type = number_of_mps_macs = 0; /* silence gcc */
dprintk("(%s) received TLV(s), ", dev->name);
dprintk("total length of all TLVs %d\n", sizeoftlvs);
mpc = find_mpc_by_lec(dev); /* Sampo-Fix: moved here from below */
if (mpc == NULL) {
pr_info("(%s) no mpc\n", dev->name);
return;
}
end_of_tlvs = tlvs + sizeoftlvs;
while (end_of_tlvs - tlvs >= 5) {
type = ((tlvs[0] << 24) | (tlvs[1] << 16) |
(tlvs[2] << 8) | tlvs[3]);
length = tlvs[4];
tlvs += 5;
dprintk(" type 0x%x length %02x\n", type, length);
if (tlvs + length > end_of_tlvs) {
pr_info("TLV value extends past its buffer, aborting parse\n");
return;
}
if (type == 0) {
pr_info("mpoa: (%s) TLV type was 0, returning\n",
dev->name);
return;
}
if (type != TLV_MPOA_DEVICE_TYPE) {
tlvs += length;
continue; /* skip other TLVs */
}
mpoa_device_type = *tlvs++;
number_of_mps_macs = *tlvs++;
dprintk("(%s) MPOA device type '%s', ",
dev->name, mpoa_device_type_string(mpoa_device_type));
if (mpoa_device_type == MPS_AND_MPC &&
length < (42 + number_of_mps_macs*ETH_ALEN)) { /* :) */
pr_info("(%s) short MPOA Device Type TLV\n",
dev->name);
continue;
}
if ((mpoa_device_type == MPS || mpoa_device_type == MPC) &&
length < 22 + number_of_mps_macs*ETH_ALEN) {
pr_info("(%s) short MPOA Device Type TLV\n", dev->name);
continue;
}
if (mpoa_device_type != MPS &&
mpoa_device_type != MPS_AND_MPC) {
dprintk("ignoring non-MPS device ");
if (mpoa_device_type == MPC)
tlvs += 20;
continue; /* we are only interested in MPSs */
}
if (number_of_mps_macs == 0 &&
mpoa_device_type == MPS_AND_MPC) {
pr_info("(%s) MPS_AND_MPC has zero MACs\n", dev->name);
continue; /* someone should read the spec */
}
dprintk_cont("this MPS has %d MAC addresses\n",
number_of_mps_macs);
/*
* ok, now we can go and tell our daemon
* the control address of MPS
*/
send_set_mps_ctrl_addr(tlvs, mpc);
tlvs = copy_macs(mpc, mac_addr, tlvs,
number_of_mps_macs, mpoa_device_type);
if (tlvs == NULL)
return;
}
if (end_of_tlvs - tlvs != 0)
pr_info("(%s) ignoring %Zd bytes of trailing TLV garbage\n",
dev->name, end_of_tlvs - tlvs);
}
/*
* Store at least advertizing router's MAC address
* plus the possible MAC address(es) to mpc->mps_macs.
* For a freshly allocated MPOA client mpc->mps_macs == 0.
*/
static const uint8_t *copy_macs(struct mpoa_client *mpc,
const uint8_t *router_mac,
const uint8_t *tlvs, uint8_t mps_macs,
uint8_t device_type)
{
int num_macs;
num_macs = (mps_macs > 1) ? mps_macs : 1;
if (mpc->number_of_mps_macs != num_macs) { /* need to reallocate? */
if (mpc->number_of_mps_macs != 0)
kfree(mpc->mps_macs);
mpc->number_of_mps_macs = 0;
mpc->mps_macs = kmalloc(num_macs * ETH_ALEN, GFP_KERNEL);
if (mpc->mps_macs == NULL) {
pr_info("(%s) out of mem\n", mpc->dev->name);
return NULL;
}
}
memcpy(mpc->mps_macs, router_mac, ETH_ALEN);
tlvs += 20; if (device_type == MPS_AND_MPC) tlvs += 20;
if (mps_macs > 0)
memcpy(mpc->mps_macs, tlvs, mps_macs*ETH_ALEN);
tlvs += mps_macs*ETH_ALEN;
mpc->number_of_mps_macs = num_macs;
return tlvs;
}
static int send_via_shortcut(struct sk_buff *skb, struct mpoa_client *mpc)
{
in_cache_entry *entry;
struct iphdr *iph;
char *buff;
__be32 ipaddr = 0;
static struct {
struct llc_snap_hdr hdr;
__be32 tag;
} tagged_llc_snap_hdr = {
{0xaa, 0xaa, 0x03, {0x00, 0x00, 0x00}, {0x88, 0x4c}},
0
};
buff = skb->data + mpc->dev->hard_header_len;
iph = (struct iphdr *)buff;
ipaddr = iph->daddr;
ddprintk("(%s) ipaddr 0x%x\n",
mpc->dev->name, ipaddr);
entry = mpc->in_ops->get(ipaddr, mpc);
if (entry == NULL) {
entry = mpc->in_ops->add_entry(ipaddr, mpc);
if (entry != NULL)
mpc->in_ops->put(entry);
return 1;
}
/* threshold not exceeded or VCC not ready */
if (mpc->in_ops->cache_hit(entry, mpc) != OPEN) {
ddprintk("(%s) cache_hit: returns != OPEN\n",
mpc->dev->name);
mpc->in_ops->put(entry);
return 1;
}
ddprintk("(%s) using shortcut\n",
mpc->dev->name);
/* MPOA spec A.1.4, MPOA client must decrement IP ttl at least by one */
if (iph->ttl <= 1) {
ddprintk("(%s) IP ttl = %u, using LANE\n",
mpc->dev->name, iph->ttl);
mpc->in_ops->put(entry);
return 1;
}
iph->ttl--;
iph->check = 0;
iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
if (entry->ctrl_info.tag != 0) {
ddprintk("(%s) adding tag 0x%x\n",
mpc->dev->name, entry->ctrl_info.tag);
tagged_llc_snap_hdr.tag = entry->ctrl_info.tag;
skb_pull(skb, ETH_HLEN); /* get rid of Eth header */
skb_push(skb, sizeof(tagged_llc_snap_hdr));
/* add LLC/SNAP header */
skb_copy_to_linear_data(skb, &tagged_llc_snap_hdr,
sizeof(tagged_llc_snap_hdr));
} else {
skb_pull(skb, ETH_HLEN); /* get rid of Eth header */
skb_push(skb, sizeof(struct llc_snap_hdr));
/* add LLC/SNAP header + tag */
skb_copy_to_linear_data(skb, &llc_snap_mpoa_data,
sizeof(struct llc_snap_hdr));
}
atomic_add(skb->truesize, &sk_atm(entry->shortcut)->sk_wmem_alloc);
ATM_SKB(skb)->atm_options = entry->shortcut->atm_options;
entry->shortcut->send(entry->shortcut, skb);
entry->packets_fwded++;
mpc->in_ops->put(entry);
return 0;
}
/*
* Probably needs some error checks and locking, not sure...
*/
static netdev_tx_t mpc_send_packet(struct sk_buff *skb,
struct net_device *dev)
{
struct mpoa_client *mpc;
struct ethhdr *eth;
int i = 0;
mpc = find_mpc_by_lec(dev); /* this should NEVER fail */
if (mpc == NULL) {
pr_info("(%s) no MPC found\n", dev->name);
goto non_ip;
}
eth = (struct ethhdr *)skb->data;
if (eth->h_proto != htons(ETH_P_IP))
goto non_ip; /* Multi-Protocol Over ATM :-) */
/* Weed out funny packets (e.g., AF_PACKET or raw). */
if (skb->len < ETH_HLEN + sizeof(struct iphdr))
goto non_ip;
skb_set_network_header(skb, ETH_HLEN);
if (skb->len < ETH_HLEN + ip_hdr(skb)->ihl * 4 || ip_hdr(skb)->ihl < 5)
goto non_ip;
while (i < mpc->number_of_mps_macs) {
if (!compare_ether_addr(eth->h_dest,
(mpc->mps_macs + i*ETH_ALEN)))
if (send_via_shortcut(skb, mpc) == 0) /* try shortcut */
return NETDEV_TX_OK;
i++;
}
non_ip:
return mpc->old_ops->ndo_start_xmit(skb, dev);
}
static int atm_mpoa_vcc_attach(struct atm_vcc *vcc, void __user *arg)
{
int bytes_left;
struct mpoa_client *mpc;
struct atmmpc_ioc ioc_data;
in_cache_entry *in_entry;
__be32 ipaddr;
bytes_left = copy_from_user(&ioc_data, arg, sizeof(struct atmmpc_ioc));
if (bytes_left != 0) {
pr_info("mpoa:Short read (missed %d bytes) from userland\n",
bytes_left);
return -EFAULT;
}
ipaddr = ioc_data.ipaddr;
if (ioc_data.dev_num < 0 || ioc_data.dev_num >= MAX_LEC_ITF)
return -EINVAL;
mpc = find_mpc_by_itfnum(ioc_data.dev_num);
if (mpc == NULL)
return -EINVAL;
if (ioc_data.type == MPC_SOCKET_INGRESS) {
in_entry = mpc->in_ops->get(ipaddr, mpc);
if (in_entry == NULL ||
in_entry->entry_state < INGRESS_RESOLVED) {
pr_info("(%s) did not find RESOLVED entry from ingress cache\n",
mpc->dev->name);
if (in_entry != NULL)
mpc->in_ops->put(in_entry);
return -EINVAL;
}
pr_info("(%s) attaching ingress SVC, entry = %pI4\n",
mpc->dev->name, &in_entry->ctrl_info.in_dst_ip);
in_entry->shortcut = vcc;
mpc->in_ops->put(in_entry);
} else {
pr_info("(%s) attaching egress SVC\n", mpc->dev->name);
}
vcc->proto_data = mpc->dev;
vcc->push = mpc_push;
return 0;
}
/*
*
*/
static void mpc_vcc_close(struct atm_vcc *vcc, struct net_device *dev)
{
struct mpoa_client *mpc;
in_cache_entry *in_entry;
eg_cache_entry *eg_entry;
mpc = find_mpc_by_lec(dev);
if (mpc == NULL) {
pr_info("(%s) close for unknown MPC\n", dev->name);
return;
}
dprintk("(%s)\n", dev->name);
in_entry = mpc->in_ops->get_by_vcc(vcc, mpc);
if (in_entry) {
dprintk("(%s) ingress SVC closed ip = %pI4\n",
mpc->dev->name, &in_entry->ctrl_info.in_dst_ip);
in_entry->shortcut = NULL;
mpc->in_ops->put(in_entry);
}
eg_entry = mpc->eg_ops->get_by_vcc(vcc, mpc);
if (eg_entry) {
dprintk("(%s) egress SVC closed\n", mpc->dev->name);
eg_entry->shortcut = NULL;
mpc->eg_ops->put(eg_entry);
}
if (in_entry == NULL && eg_entry == NULL)
dprintk("(%s) unused vcc closed\n", dev->name);
}
static void mpc_push(struct atm_vcc *vcc, struct sk_buff *skb)
{
struct net_device *dev = (struct net_device *)vcc->proto_data;
struct sk_buff *new_skb;
eg_cache_entry *eg;
struct mpoa_client *mpc;
__be32 tag;
char *tmp;
ddprintk("(%s)\n", dev->name);
if (skb == NULL) {
dprintk("(%s) null skb, closing VCC\n", dev->name);
mpc_vcc_close(vcc, dev);
return;
}
skb->dev = dev;
if (memcmp(skb->data, &llc_snap_mpoa_ctrl,
sizeof(struct llc_snap_hdr)) == 0) {
struct sock *sk = sk_atm(vcc);
dprintk("(%s) control packet arrived\n", dev->name);
/* Pass control packets to daemon */
skb_queue_tail(&sk->sk_receive_queue, skb);
sk->sk_data_ready(sk, skb->len);
return;
}
/* data coming over the shortcut */
atm_return(vcc, skb->truesize);
mpc = find_mpc_by_lec(dev);
if (mpc == NULL) {
pr_info("(%s) unknown MPC\n", dev->name);
return;
}
if (memcmp(skb->data, &llc_snap_mpoa_data_tagged,
sizeof(struct llc_snap_hdr)) == 0) { /* MPOA tagged data */
ddprintk("(%s) tagged data packet arrived\n", dev->name);
} else if (memcmp(skb->data, &llc_snap_mpoa_data,
sizeof(struct llc_snap_hdr)) == 0) { /* MPOA data */
pr_info("(%s) Unsupported non-tagged data packet arrived. Purging\n",
dev->name);
dev_kfree_skb_any(skb);
return;
} else {
pr_info("(%s) garbage arrived, purging\n", dev->name);
dev_kfree_skb_any(skb);
return;
}
tmp = skb->data + sizeof(struct llc_snap_hdr);
tag = *(__be32 *)tmp;
eg = mpc->eg_ops->get_by_tag(tag, mpc);
if (eg == NULL) {
pr_info("mpoa: (%s) Didn't find egress cache entry, tag = %u\n",
dev->name, tag);
purge_egress_shortcut(vcc, NULL);
dev_kfree_skb_any(skb);
return;
}
/*
* See if ingress MPC is using shortcut we opened as a return channel.
* This means we have a bi-directional vcc opened by us.
*/
if (eg->shortcut == NULL) {
eg->shortcut = vcc;
pr_info("(%s) egress SVC in use\n", dev->name);
}
skb_pull(skb, sizeof(struct llc_snap_hdr) + sizeof(tag));
/* get rid of LLC/SNAP header */
new_skb = skb_realloc_headroom(skb, eg->ctrl_info.DH_length);
/* LLC/SNAP is shorter than MAC header :( */
dev_kfree_skb_any(skb);
if (new_skb == NULL) {
mpc->eg_ops->put(eg);
return;
}
skb_push(new_skb, eg->ctrl_info.DH_length); /* add MAC header */
skb_copy_to_linear_data(new_skb, eg->ctrl_info.DLL_header,
eg->ctrl_info.DH_length);
new_skb->protocol = eth_type_trans(new_skb, dev);
skb_reset_network_header(new_skb);
eg->latest_ip_addr = ip_hdr(new_skb)->saddr;
eg->packets_rcvd++;
mpc->eg_ops->put(eg);
memset(ATM_SKB(new_skb), 0, sizeof(struct atm_skb_data));
netif_rx(new_skb);
}
static struct atmdev_ops mpc_ops = { /* only send is required */
.close = mpoad_close,
.send = msg_from_mpoad
};
static struct atm_dev mpc_dev = {
.ops = &mpc_ops,
.type = "mpc",
.number = 42,
.lock = __SPIN_LOCK_UNLOCKED(mpc_dev.lock)
/* members not explicitly initialised will be 0 */
};
static int atm_mpoa_mpoad_attach(struct atm_vcc *vcc, int arg)
{
struct mpoa_client *mpc;
struct lec_priv *priv;
int err;
if (mpcs == NULL) {
init_timer(&mpc_timer);
mpc_timer_refresh();
/* This lets us now how our LECs are doing */
err = register_netdevice_notifier(&mpoa_notifier);
if (err < 0) {
del_timer(&mpc_timer);
return err;
}
}
mpc = find_mpc_by_itfnum(arg);
if (mpc == NULL) {
dprintk("allocating new mpc for itf %d\n", arg);
mpc = alloc_mpc();
if (mpc == NULL)
return -ENOMEM;
mpc->dev_num = arg;
mpc->dev = find_lec_by_itfnum(arg);
/* NULL if there was no lec */
}
if (mpc->mpoad_vcc) {
pr_info("mpoad is already present for itf %d\n", arg);
return -EADDRINUSE;
}
if (mpc->dev) { /* check if the lec is LANE2 capable */
priv = netdev_priv(mpc->dev);
if (priv->lane_version < 2) {
dev_put(mpc->dev);
mpc->dev = NULL;
} else
priv->lane2_ops->associate_indicator = lane2_assoc_ind;
}
mpc->mpoad_vcc = vcc;
vcc->dev = &mpc_dev;
vcc_insert_socket(sk_atm(vcc));
set_bit(ATM_VF_META, &vcc->flags);
set_bit(ATM_VF_READY, &vcc->flags);
if (mpc->dev) {
char empty[ATM_ESA_LEN];
memset(empty, 0, ATM_ESA_LEN);
start_mpc(mpc, mpc->dev);
/* set address if mpcd e.g. gets killed and restarted.
* If we do not do it now we have to wait for the next LE_ARP
*/
if (memcmp(mpc->mps_ctrl_addr, empty, ATM_ESA_LEN) != 0)
send_set_mps_ctrl_addr(mpc->mps_ctrl_addr, mpc);
}
__module_get(THIS_MODULE);
return arg;
}
static void send_set_mps_ctrl_addr(const char *addr, struct mpoa_client *mpc)
{
struct k_message mesg;
memcpy(mpc->mps_ctrl_addr, addr, ATM_ESA_LEN);
mesg.type = SET_MPS_CTRL_ADDR;
memcpy(mesg.MPS_ctrl, addr, ATM_ESA_LEN);
msg_to_mpoad(&mesg, mpc);
}
static void mpoad_close(struct atm_vcc *vcc)
{
struct mpoa_client *mpc;
struct sk_buff *skb;
mpc = find_mpc_by_vcc(vcc);
if (mpc == NULL) {
pr_info("did not find MPC\n");
return;
}
if (!mpc->mpoad_vcc) {
pr_info("close for non-present mpoad\n");
return;
}
mpc->mpoad_vcc = NULL;
if (mpc->dev) {
struct lec_priv *priv = netdev_priv(mpc->dev);
priv->lane2_ops->associate_indicator = NULL;
stop_mpc(mpc);
dev_put(mpc->dev);
}
mpc->in_ops->destroy_cache(mpc);
mpc->eg_ops->destroy_cache(mpc);
while ((skb = skb_dequeue(&sk_atm(vcc)->sk_receive_queue))) {
atm_return(vcc, skb->truesize);
kfree_skb(skb);
}
pr_info("(%s) going down\n",
(mpc->dev) ? mpc->dev->name : "<unknown>");
module_put(THIS_MODULE);
}
/*
*
*/
static int msg_from_mpoad(struct atm_vcc *vcc, struct sk_buff *skb)
{
struct mpoa_client *mpc = find_mpc_by_vcc(vcc);
struct k_message *mesg = (struct k_message *)skb->data;
atomic_sub(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
if (mpc == NULL) {
pr_info("no mpc found\n");
return 0;
}
dprintk("(%s)", mpc->dev ? mpc->dev->name : "<unknown>");
switch (mesg->type) {
case MPOA_RES_REPLY_RCVD:
dprintk_cont("mpoa_res_reply_rcvd\n");
MPOA_res_reply_rcvd(mesg, mpc);
break;
case MPOA_TRIGGER_RCVD:
dprintk_cont("mpoa_trigger_rcvd\n");
MPOA_trigger_rcvd(mesg, mpc);
break;
case INGRESS_PURGE_RCVD:
dprintk_cont("nhrp_purge_rcvd\n");
ingress_purge_rcvd(mesg, mpc);
break;
case EGRESS_PURGE_RCVD:
dprintk_cont("egress_purge_reply_rcvd\n");
egress_purge_rcvd(mesg, mpc);
break;
case MPS_DEATH:
dprintk_cont("mps_death\n");
mps_death(mesg, mpc);
break;
case CACHE_IMPOS_RCVD:
dprintk_cont("cache_impos_rcvd\n");
MPOA_cache_impos_rcvd(mesg, mpc);
break;
case SET_MPC_CTRL_ADDR:
dprintk_cont("set_mpc_ctrl_addr\n");
set_mpc_ctrl_addr_rcvd(mesg, mpc);
break;
case SET_MPS_MAC_ADDR:
dprintk_cont("set_mps_mac_addr\n");
set_mps_mac_addr_rcvd(mesg, mpc);
break;
case CLEAN_UP_AND_EXIT:
dprintk_cont("clean_up_and_exit\n");
clean_up(mesg, mpc, DIE);
break;
case RELOAD:
dprintk_cont("reload\n");
clean_up(mesg, mpc, RELOAD);
break;
case SET_MPC_PARAMS:
dprintk_cont("set_mpc_params\n");
mpc->parameters = mesg->content.params;
break;
default:
dprintk_cont("unknown message %d\n", mesg->type);
break;
}
kfree_skb(skb);
return 0;
}
/* Remember that this function may not do things that sleep */
int msg_to_mpoad(struct k_message *mesg, struct mpoa_client *mpc)
{
struct sk_buff *skb;
struct sock *sk;
if (mpc == NULL || !mpc->mpoad_vcc) {
pr_info("mesg %d to a non-existent mpoad\n", mesg->type);
return -ENXIO;
}
skb = alloc_skb(sizeof(struct k_message), GFP_ATOMIC);
if (skb == NULL)
return -ENOMEM;
skb_put(skb, sizeof(struct k_message));
skb_copy_to_linear_data(skb, mesg, sizeof(*mesg));
atm_force_charge(mpc->mpoad_vcc, skb->truesize);
sk = sk_atm(mpc->mpoad_vcc);
skb_queue_tail(&sk->sk_receive_queue, skb);
sk->sk_data_ready(sk, skb->len);
return 0;
}
static int mpoa_event_listener(struct notifier_block *mpoa_notifier,
unsigned long event, void *dev_ptr)
{
struct net_device *dev;
struct mpoa_client *mpc;
struct lec_priv *priv;
dev = dev_ptr;
if (!net_eq(dev_net(dev), &init_net))
return NOTIFY_DONE;
if (dev->name == NULL || strncmp(dev->name, "lec", 3))
return NOTIFY_DONE; /* we are only interested in lec:s */
switch (event) {
case NETDEV_REGISTER: /* a new lec device was allocated */
priv = netdev_priv(dev);
if (priv->lane_version < 2)
break;
priv->lane2_ops->associate_indicator = lane2_assoc_ind;
mpc = find_mpc_by_itfnum(priv->itfnum);
if (mpc == NULL) {
dprintk("allocating new mpc for %s\n", dev->name);
mpc = alloc_mpc();
if (mpc == NULL) {
pr_info("no new mpc");
break;
}
}
mpc->dev_num = priv->itfnum;
mpc->dev = dev;
dev_hold(dev);
dprintk("(%s) was initialized\n", dev->name);
break;
case NETDEV_UNREGISTER:
/* the lec device was deallocated */
mpc = find_mpc_by_lec(dev);
if (mpc == NULL)
break;
dprintk("device (%s) was deallocated\n", dev->name);
stop_mpc(mpc);
dev_put(mpc->dev);
mpc->dev = NULL;
break;
case NETDEV_UP:
/* the dev was ifconfig'ed up */
mpc = find_mpc_by_lec(dev);
if (mpc == NULL)
break;
if (mpc->mpoad_vcc != NULL)
start_mpc(mpc, dev);
break;
case NETDEV_DOWN:
/* the dev was ifconfig'ed down */
/* this means that the flow of packets from the
* upper layer stops
*/
mpc = find_mpc_by_lec(dev);
if (mpc == NULL)
break;
if (mpc->mpoad_vcc != NULL)
stop_mpc(mpc);
break;
case NETDEV_REBOOT:
case NETDEV_CHANGE:
case NETDEV_CHANGEMTU:
case NETDEV_CHANGEADDR:
case NETDEV_GOING_DOWN:
break;
default:
break;
}
return NOTIFY_DONE;
}
/*
* Functions which are called after a message is received from mpcd.
* Msg is reused on purpose.
*/
static void MPOA_trigger_rcvd(struct k_message *msg, struct mpoa_client *mpc)
{
__be32 dst_ip = msg->content.in_info.in_dst_ip;
in_cache_entry *entry;
entry = mpc->in_ops->get(dst_ip, mpc);
if (entry == NULL) {
entry = mpc->in_ops->add_entry(dst_ip, mpc);
entry->entry_state = INGRESS_RESOLVING;
msg->type = SND_MPOA_RES_RQST;
msg->content.in_info = entry->ctrl_info;
msg_to_mpoad(msg, mpc);
do_gettimeofday(&(entry->reply_wait));
mpc->in_ops->put(entry);
return;
}
if (entry->entry_state == INGRESS_INVALID) {
entry->entry_state = INGRESS_RESOLVING;
msg->type = SND_MPOA_RES_RQST;
msg->content.in_info = entry->ctrl_info;
msg_to_mpoad(msg, mpc);
do_gettimeofday(&(entry->reply_wait));
mpc->in_ops->put(entry);
return;
}
pr_info("(%s) entry already in resolving state\n",
(mpc->dev) ? mpc->dev->name : "<unknown>");
mpc->in_ops->put(entry);
}
/*
* Things get complicated because we have to check if there's an egress
* shortcut with suitable traffic parameters we could use.
*/
static void check_qos_and_open_shortcut(struct k_message *msg,
struct mpoa_client *client,
in_cache_entry *entry)
{
__be32 dst_ip = msg->content.in_info.in_dst_ip;
struct atm_mpoa_qos *qos = atm_mpoa_search_qos(dst_ip);
eg_cache_entry *eg_entry = client->eg_ops->get_by_src_ip(dst_ip, client);
if (eg_entry && eg_entry->shortcut) {
if (eg_entry->shortcut->qos.txtp.traffic_class &
msg->qos.txtp.traffic_class &
(qos ? qos->qos.txtp.traffic_class : ATM_UBR | ATM_CBR)) {
if (eg_entry->shortcut->qos.txtp.traffic_class == ATM_UBR)
entry->shortcut = eg_entry->shortcut;
else if (eg_entry->shortcut->qos.txtp.max_pcr > 0)
entry->shortcut = eg_entry->shortcut;
}
if (entry->shortcut) {
dprintk("(%s) using egress SVC to reach %pI4\n",
client->dev->name, &dst_ip);
client->eg_ops->put(eg_entry);
return;
}
}
if (eg_entry != NULL)
client->eg_ops->put(eg_entry);
/* No luck in the egress cache we must open an ingress SVC */
msg->type = OPEN_INGRESS_SVC;
if (qos &&
(qos->qos.txtp.traffic_class == msg->qos.txtp.traffic_class)) {
msg->qos = qos->qos;
pr_info("(%s) trying to get a CBR shortcut\n",
client->dev->name);
} else
memset(&msg->qos, 0, sizeof(struct atm_qos));
msg_to_mpoad(msg, client);
}
static void MPOA_res_reply_rcvd(struct k_message *msg, struct mpoa_client *mpc)
{
__be32 dst_ip = msg->content.in_info.in_dst_ip;
in_cache_entry *entry = mpc->in_ops->get(dst_ip, mpc);
dprintk("(%s) ip %pI4\n",
mpc->dev->name, &dst_ip);
ddprintk("(%s) entry = %p",
mpc->dev->name, entry);
if (entry == NULL) {
pr_info("(%s) ARGH, received res. reply for an entry that doesn't exist.\n",
mpc->dev->name);
return;
}
ddprintk_cont(" entry_state = %d ", entry->entry_state);
if (entry->entry_state == INGRESS_RESOLVED) {
pr_info("(%s) RESOLVED entry!\n", mpc->dev->name);
mpc->in_ops->put(entry);
return;
}
entry->ctrl_info = msg->content.in_info;
do_gettimeofday(&(entry->tv));
do_gettimeofday(&(entry->reply_wait)); /* Used in refreshing func from now on */
entry->refresh_time = 0;
ddprintk_cont("entry->shortcut = %p\n", entry->shortcut);
if (entry->entry_state == INGRESS_RESOLVING &&
entry->shortcut != NULL) {
entry->entry_state = INGRESS_RESOLVED;
mpc->in_ops->put(entry);
return; /* Shortcut already open... */
}
if (entry->shortcut != NULL) {
pr_info("(%s) entry->shortcut != NULL, impossible!\n",
mpc->dev->name);
mpc->in_ops->put(entry);
return;
}
check_qos_and_open_shortcut(msg, mpc, entry);
entry->entry_state = INGRESS_RESOLVED;
mpc->in_ops->put(entry);
return;
}
static void ingress_purge_rcvd(struct k_message *msg, struct mpoa_client *mpc)
{
__be32 dst_ip = msg->content.in_info.in_dst_ip;
__be32 mask = msg->ip_mask;
in_cache_entry *entry = mpc->in_ops->get_with_mask(dst_ip, mpc, mask);
if (entry == NULL) {
pr_info("(%s) purge for a non-existing entry, ip = %pI4\n",
mpc->dev->name, &dst_ip);
return;
}
do {
dprintk("(%s) removing an ingress entry, ip = %pI4\n",
mpc->dev->name, &dst_ip);
write_lock_bh(&mpc->ingress_lock);
mpc->in_ops->remove_entry(entry, mpc);
write_unlock_bh(&mpc->ingress_lock);
mpc->in_ops->put(entry);
entry = mpc->in_ops->get_with_mask(dst_ip, mpc, mask);
} while (entry != NULL);
}
static void egress_purge_rcvd(struct k_message *msg, struct mpoa_client *mpc)
{
__be32 cache_id = msg->content.eg_info.cache_id;
eg_cache_entry *entry = mpc->eg_ops->get_by_cache_id(cache_id, mpc);
if (entry == NULL) {
dprintk("(%s) purge for a non-existing entry\n",
mpc->dev->name);
return;
}
write_lock_irq(&mpc->egress_lock);
mpc->eg_ops->remove_entry(entry, mpc);
write_unlock_irq(&mpc->egress_lock);
mpc->eg_ops->put(entry);
}
static void purge_egress_shortcut(struct atm_vcc *vcc, eg_cache_entry *entry)
{
struct sock *sk;
struct k_message *purge_msg;
struct sk_buff *skb;
dprintk("entering\n");
if (vcc == NULL) {
pr_info("vcc == NULL\n");
return;
}
skb = alloc_skb(sizeof(struct k_message), GFP_ATOMIC);
if (skb == NULL) {
pr_info("out of memory\n");
return;
}
skb_put(skb, sizeof(struct k_message));
memset(skb->data, 0, sizeof(struct k_message));
purge_msg = (struct k_message *)skb->data;
purge_msg->type = DATA_PLANE_PURGE;
if (entry != NULL)
purge_msg->content.eg_info = entry->ctrl_info;
atm_force_charge(vcc, skb->truesize);
sk = sk_atm(vcc);
skb_queue_tail(&sk->sk_receive_queue, skb);
sk->sk_data_ready(sk, skb->len);
dprintk("exiting\n");
}
/*
* Our MPS died. Tell our daemon to send NHRP data plane purge to each
* of the egress shortcuts we have.
*/
static void mps_death(struct k_message *msg, struct mpoa_client *mpc)
{
eg_cache_entry *entry;
dprintk("(%s)\n", mpc->dev->name);
if (memcmp(msg->MPS_ctrl, mpc->mps_ctrl_addr, ATM_ESA_LEN)) {
pr_info("(%s) wrong MPS\n", mpc->dev->name);
return;
}
/* FIXME: This knows too much of the cache structure */
read_lock_irq(&mpc->egress_lock);
entry = mpc->eg_cache;
while (entry != NULL) {
purge_egress_shortcut(entry->shortcut, entry);
entry = entry->next;
}
read_unlock_irq(&mpc->egress_lock);
mpc->in_ops->destroy_cache(mpc);
mpc->eg_ops->destroy_cache(mpc);
}
static void MPOA_cache_impos_rcvd(struct k_message *msg,
struct mpoa_client *mpc)
{
uint16_t holding_time;
eg_cache_entry *entry = mpc->eg_ops->get_by_cache_id(msg->content.eg_info.cache_id, mpc);
holding_time = msg->content.eg_info.holding_time;
dprintk("(%s) entry = %p, holding_time = %u\n",
mpc->dev->name, entry, holding_time);
if (entry == NULL && holding_time) {
entry = mpc->eg_ops->add_entry(msg, mpc);
mpc->eg_ops->put(entry);
return;
}
if (holding_time) {
mpc->eg_ops->update(entry, holding_time);
return;
}
write_lock_irq(&mpc->egress_lock);
mpc->eg_ops->remove_entry(entry, mpc);
write_unlock_irq(&mpc->egress_lock);
mpc->eg_ops->put(entry);
}
static void set_mpc_ctrl_addr_rcvd(struct k_message *mesg,
struct mpoa_client *mpc)
{
struct lec_priv *priv;
int i, retval ;
uint8_t tlv[4 + 1 + 1 + 1 + ATM_ESA_LEN];
tlv[0] = 00; tlv[1] = 0xa0; tlv[2] = 0x3e; tlv[3] = 0x2a; /* type */
tlv[4] = 1 + 1 + ATM_ESA_LEN; /* length */
tlv[5] = 0x02; /* MPOA client */
tlv[6] = 0x00; /* number of MPS MAC addresses */
memcpy(&tlv[7], mesg->MPS_ctrl, ATM_ESA_LEN); /* MPC ctrl ATM addr */
memcpy(mpc->our_ctrl_addr, mesg->MPS_ctrl, ATM_ESA_LEN);
dprintk("(%s) setting MPC ctrl ATM address to",
mpc->dev ? mpc->dev->name : "<unknown>");
for (i = 7; i < sizeof(tlv); i++)
dprintk_cont(" %02x", tlv[i]);
dprintk_cont("\n");
if (mpc->dev) {
priv = netdev_priv(mpc->dev);
retval = priv->lane2_ops->associate_req(mpc->dev,
mpc->dev->dev_addr,
tlv, sizeof(tlv));
if (retval == 0)
pr_info("(%s) MPOA device type TLV association failed\n",
mpc->dev->name);
retval = priv->lane2_ops->resolve(mpc->dev, NULL, 1, NULL, NULL);
if (retval < 0)
pr_info("(%s) targetless LE_ARP request failed\n",
mpc->dev->name);
}
}
static void set_mps_mac_addr_rcvd(struct k_message *msg,
struct mpoa_client *client)
{
if (client->number_of_mps_macs)
kfree(client->mps_macs);
client->number_of_mps_macs = 0;
client->mps_macs = kmemdup(msg->MPS_ctrl, ETH_ALEN, GFP_KERNEL);
if (client->mps_macs == NULL) {
pr_info("out of memory\n");
return;
}
client->number_of_mps_macs = 1;
}
/*
* purge egress cache and tell daemon to 'action' (DIE, RELOAD)
*/
static void clean_up(struct k_message *msg, struct mpoa_client *mpc, int action)
{
eg_cache_entry *entry;
msg->type = SND_EGRESS_PURGE;
/* FIXME: This knows too much of the cache structure */
read_lock_irq(&mpc->egress_lock);
entry = mpc->eg_cache;
while (entry != NULL) {
msg->content.eg_info = entry->ctrl_info;
dprintk("cache_id %u\n", entry->ctrl_info.cache_id);
msg_to_mpoad(msg, mpc);
entry = entry->next;
}
read_unlock_irq(&mpc->egress_lock);
msg->type = action;
msg_to_mpoad(msg, mpc);
}
static void mpc_timer_refresh(void)
{
mpc_timer.expires = jiffies + (MPC_P2 * HZ);
mpc_timer.data = mpc_timer.expires;
mpc_timer.function = mpc_cache_check;
add_timer(&mpc_timer);
}
static void mpc_cache_check(unsigned long checking_time)
{
struct mpoa_client *mpc = mpcs;
static unsigned long previous_resolving_check_time;
static unsigned long previous_refresh_time;
while (mpc != NULL) {
mpc->in_ops->clear_count(mpc);
mpc->eg_ops->clear_expired(mpc);
if (checking_time - previous_resolving_check_time >
mpc->parameters.mpc_p4 * HZ) {
mpc->in_ops->check_resolving(mpc);
previous_resolving_check_time = checking_time;
}
if (checking_time - previous_refresh_time >
mpc->parameters.mpc_p5 * HZ) {
mpc->in_ops->refresh(mpc);
previous_refresh_time = checking_time;
}
mpc = mpc->next;
}
mpc_timer_refresh();
}
static int atm_mpoa_ioctl(struct socket *sock, unsigned int cmd,
unsigned long arg)
{
int err = 0;
struct atm_vcc *vcc = ATM_SD(sock);
if (cmd != ATMMPC_CTRL && cmd != ATMMPC_DATA)
return -ENOIOCTLCMD;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
switch (cmd) {
case ATMMPC_CTRL:
err = atm_mpoa_mpoad_attach(vcc, (int)arg);
if (err >= 0)
sock->state = SS_CONNECTED;
break;
case ATMMPC_DATA:
err = atm_mpoa_vcc_attach(vcc, (void __user *)arg);
break;
default:
break;
}
return err;
}
static struct atm_ioctl atm_ioctl_ops = {
.owner = THIS_MODULE,
.ioctl = atm_mpoa_ioctl,
};
static __init int atm_mpoa_init(void)
{
register_atm_ioctl(&atm_ioctl_ops);
if (mpc_proc_init() != 0)
pr_info("failed to initialize /proc/mpoa\n");
pr_info("mpc.c: initialized\n");
return 0;
}
static void __exit atm_mpoa_cleanup(void)
{
struct mpoa_client *mpc, *tmp;
struct atm_mpoa_qos *qos, *nextqos;
struct lec_priv *priv;
mpc_proc_clean();
del_timer(&mpc_timer);
unregister_netdevice_notifier(&mpoa_notifier);
deregister_atm_ioctl(&atm_ioctl_ops);
mpc = mpcs;
mpcs = NULL;
while (mpc != NULL) {
tmp = mpc->next;
if (mpc->dev != NULL) {
stop_mpc(mpc);
priv = netdev_priv(mpc->dev);
if (priv->lane2_ops != NULL)
priv->lane2_ops->associate_indicator = NULL;
}
ddprintk("about to clear caches\n");
mpc->in_ops->destroy_cache(mpc);
mpc->eg_ops->destroy_cache(mpc);
ddprintk("caches cleared\n");
kfree(mpc->mps_macs);
memset(mpc, 0, sizeof(struct mpoa_client));
ddprintk("about to kfree %p\n", mpc);
kfree(mpc);
ddprintk("next mpc is at %p\n", tmp);
mpc = tmp;
}
qos = qos_head;
qos_head = NULL;
while (qos != NULL) {
nextqos = qos->next;
dprintk("freeing qos entry %p\n", qos);
kfree(qos);
qos = nextqos;
}
}
module_init(atm_mpoa_init);
module_exit(atm_mpoa_cleanup);
MODULE_LICENSE("GPL");
| gpl-2.0 |
andip71/boeffla-kernel-samsung-n8000 | lib/lru_cache.c | 8020 | 14933 | /*
lru_cache.c
This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
Copyright (C) 2003-2008, LINBIT Information Technologies GmbH.
Copyright (C) 2003-2008, Philipp Reisner <philipp.reisner@linbit.com>.
Copyright (C) 2003-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
drbd is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2, or (at your option)
any later version.
drbd is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with drbd; see the file COPYING. If not, write to
the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
#include <linux/bitops.h>
#include <linux/slab.h>
#include <linux/string.h> /* for memset */
#include <linux/seq_file.h> /* for seq_printf */
#include <linux/lru_cache.h>
MODULE_AUTHOR("Philipp Reisner <phil@linbit.com>, "
"Lars Ellenberg <lars@linbit.com>");
MODULE_DESCRIPTION("lru_cache - Track sets of hot objects");
MODULE_LICENSE("GPL");
/* this is developers aid only.
* it catches concurrent access (lack of locking on the users part) */
#define PARANOIA_ENTRY() do { \
BUG_ON(!lc); \
BUG_ON(!lc->nr_elements); \
BUG_ON(test_and_set_bit(__LC_PARANOIA, &lc->flags)); \
} while (0)
#define RETURN(x...) do { \
clear_bit(__LC_PARANOIA, &lc->flags); \
smp_mb__after_clear_bit(); return x ; } while (0)
/* BUG() if e is not one of the elements tracked by lc */
#define PARANOIA_LC_ELEMENT(lc, e) do { \
struct lru_cache *lc_ = (lc); \
struct lc_element *e_ = (e); \
unsigned i = e_->lc_index; \
BUG_ON(i >= lc_->nr_elements); \
BUG_ON(lc_->lc_element[i] != e_); } while (0)
/**
* lc_create - prepares to track objects in an active set
* @name: descriptive name only used in lc_seq_printf_stats and lc_seq_dump_details
* @e_count: number of elements allowed to be active simultaneously
* @e_size: size of the tracked objects
* @e_off: offset to the &struct lc_element member in a tracked object
*
* Returns a pointer to a newly initialized struct lru_cache on success,
* or NULL on (allocation) failure.
*/
struct lru_cache *lc_create(const char *name, struct kmem_cache *cache,
unsigned e_count, size_t e_size, size_t e_off)
{
struct hlist_head *slot = NULL;
struct lc_element **element = NULL;
struct lru_cache *lc;
struct lc_element *e;
unsigned cache_obj_size = kmem_cache_size(cache);
unsigned i;
WARN_ON(cache_obj_size < e_size);
if (cache_obj_size < e_size)
return NULL;
/* e_count too big; would probably fail the allocation below anyways.
* for typical use cases, e_count should be few thousand at most. */
if (e_count > LC_MAX_ACTIVE)
return NULL;
slot = kcalloc(e_count, sizeof(struct hlist_head), GFP_KERNEL);
if (!slot)
goto out_fail;
element = kzalloc(e_count * sizeof(struct lc_element *), GFP_KERNEL);
if (!element)
goto out_fail;
lc = kzalloc(sizeof(*lc), GFP_KERNEL);
if (!lc)
goto out_fail;
INIT_LIST_HEAD(&lc->in_use);
INIT_LIST_HEAD(&lc->lru);
INIT_LIST_HEAD(&lc->free);
lc->name = name;
lc->element_size = e_size;
lc->element_off = e_off;
lc->nr_elements = e_count;
lc->new_number = LC_FREE;
lc->lc_cache = cache;
lc->lc_element = element;
lc->lc_slot = slot;
/* preallocate all objects */
for (i = 0; i < e_count; i++) {
void *p = kmem_cache_alloc(cache, GFP_KERNEL);
if (!p)
break;
memset(p, 0, lc->element_size);
e = p + e_off;
e->lc_index = i;
e->lc_number = LC_FREE;
list_add(&e->list, &lc->free);
element[i] = e;
}
if (i == e_count)
return lc;
/* else: could not allocate all elements, give up */
for (i--; i; i--) {
void *p = element[i];
kmem_cache_free(cache, p - e_off);
}
kfree(lc);
out_fail:
kfree(element);
kfree(slot);
return NULL;
}
void lc_free_by_index(struct lru_cache *lc, unsigned i)
{
void *p = lc->lc_element[i];
WARN_ON(!p);
if (p) {
p -= lc->element_off;
kmem_cache_free(lc->lc_cache, p);
}
}
/**
* lc_destroy - frees memory allocated by lc_create()
* @lc: the lru cache to destroy
*/
void lc_destroy(struct lru_cache *lc)
{
unsigned i;
if (!lc)
return;
for (i = 0; i < lc->nr_elements; i++)
lc_free_by_index(lc, i);
kfree(lc->lc_element);
kfree(lc->lc_slot);
kfree(lc);
}
/**
* lc_reset - does a full reset for @lc and the hash table slots.
* @lc: the lru cache to operate on
*
* It is roughly the equivalent of re-allocating a fresh lru_cache object,
* basically a short cut to lc_destroy(lc); lc = lc_create(...);
*/
void lc_reset(struct lru_cache *lc)
{
unsigned i;
INIT_LIST_HEAD(&lc->in_use);
INIT_LIST_HEAD(&lc->lru);
INIT_LIST_HEAD(&lc->free);
lc->used = 0;
lc->hits = 0;
lc->misses = 0;
lc->starving = 0;
lc->dirty = 0;
lc->changed = 0;
lc->flags = 0;
lc->changing_element = NULL;
lc->new_number = LC_FREE;
memset(lc->lc_slot, 0, sizeof(struct hlist_head) * lc->nr_elements);
for (i = 0; i < lc->nr_elements; i++) {
struct lc_element *e = lc->lc_element[i];
void *p = e;
p -= lc->element_off;
memset(p, 0, lc->element_size);
/* re-init it */
e->lc_index = i;
e->lc_number = LC_FREE;
list_add(&e->list, &lc->free);
}
}
/**
* lc_seq_printf_stats - print stats about @lc into @seq
* @seq: the seq_file to print into
* @lc: the lru cache to print statistics of
*/
size_t lc_seq_printf_stats(struct seq_file *seq, struct lru_cache *lc)
{
/* NOTE:
* total calls to lc_get are
* (starving + hits + misses)
* misses include "dirty" count (update from an other thread in
* progress) and "changed", when this in fact lead to an successful
* update of the cache.
*/
return seq_printf(seq, "\t%s: used:%u/%u "
"hits:%lu misses:%lu starving:%lu dirty:%lu changed:%lu\n",
lc->name, lc->used, lc->nr_elements,
lc->hits, lc->misses, lc->starving, lc->dirty, lc->changed);
}
static struct hlist_head *lc_hash_slot(struct lru_cache *lc, unsigned int enr)
{
return lc->lc_slot + (enr % lc->nr_elements);
}
/**
* lc_find - find element by label, if present in the hash table
* @lc: The lru_cache object
* @enr: element number
*
* Returns the pointer to an element, if the element with the requested
* "label" or element number is present in the hash table,
* or NULL if not found. Does not change the refcnt.
*/
struct lc_element *lc_find(struct lru_cache *lc, unsigned int enr)
{
struct hlist_node *n;
struct lc_element *e;
BUG_ON(!lc);
BUG_ON(!lc->nr_elements);
hlist_for_each_entry(e, n, lc_hash_slot(lc, enr), colision) {
if (e->lc_number == enr)
return e;
}
return NULL;
}
/* returned element will be "recycled" immediately */
static struct lc_element *lc_evict(struct lru_cache *lc)
{
struct list_head *n;
struct lc_element *e;
if (list_empty(&lc->lru))
return NULL;
n = lc->lru.prev;
e = list_entry(n, struct lc_element, list);
PARANOIA_LC_ELEMENT(lc, e);
list_del(&e->list);
hlist_del(&e->colision);
return e;
}
/**
* lc_del - removes an element from the cache
* @lc: The lru_cache object
* @e: The element to remove
*
* @e must be unused (refcnt == 0). Moves @e from "lru" to "free" list,
* sets @e->enr to %LC_FREE.
*/
void lc_del(struct lru_cache *lc, struct lc_element *e)
{
PARANOIA_ENTRY();
PARANOIA_LC_ELEMENT(lc, e);
BUG_ON(e->refcnt);
e->lc_number = LC_FREE;
hlist_del_init(&e->colision);
list_move(&e->list, &lc->free);
RETURN();
}
static struct lc_element *lc_get_unused_element(struct lru_cache *lc)
{
struct list_head *n;
if (list_empty(&lc->free))
return lc_evict(lc);
n = lc->free.next;
list_del(n);
return list_entry(n, struct lc_element, list);
}
static int lc_unused_element_available(struct lru_cache *lc)
{
if (!list_empty(&lc->free))
return 1; /* something on the free list */
if (!list_empty(&lc->lru))
return 1; /* something to evict */
return 0;
}
/**
* lc_get - get element by label, maybe change the active set
* @lc: the lru cache to operate on
* @enr: the label to look up
*
* Finds an element in the cache, increases its usage count,
* "touches" and returns it.
*
* In case the requested number is not present, it needs to be added to the
* cache. Therefore it is possible that an other element becomes evicted from
* the cache. In either case, the user is notified so he is able to e.g. keep
* a persistent log of the cache changes, and therefore the objects in use.
*
* Return values:
* NULL
* The cache was marked %LC_STARVING,
* or the requested label was not in the active set
* and a changing transaction is still pending (@lc was marked %LC_DIRTY).
* Or no unused or free element could be recycled (@lc will be marked as
* %LC_STARVING, blocking further lc_get() operations).
*
* pointer to the element with the REQUESTED element number.
* In this case, it can be used right away
*
* pointer to an UNUSED element with some different element number,
* where that different number may also be %LC_FREE.
*
* In this case, the cache is marked %LC_DIRTY (blocking further changes),
* and the returned element pointer is removed from the lru list and
* hash collision chains. The user now should do whatever housekeeping
* is necessary.
* Then he must call lc_changed(lc,element_pointer), to finish
* the change.
*
* NOTE: The user needs to check the lc_number on EACH use, so he recognizes
* any cache set change.
*/
struct lc_element *lc_get(struct lru_cache *lc, unsigned int enr)
{
struct lc_element *e;
PARANOIA_ENTRY();
if (lc->flags & LC_STARVING) {
++lc->starving;
RETURN(NULL);
}
e = lc_find(lc, enr);
if (e) {
++lc->hits;
if (e->refcnt++ == 0)
lc->used++;
list_move(&e->list, &lc->in_use); /* Not evictable... */
RETURN(e);
}
++lc->misses;
/* In case there is nothing available and we can not kick out
* the LRU element, we have to wait ...
*/
if (!lc_unused_element_available(lc)) {
__set_bit(__LC_STARVING, &lc->flags);
RETURN(NULL);
}
/* it was not present in the active set.
* we are going to recycle an unused (or even "free") element.
* user may need to commit a transaction to record that change.
* we serialize on flags & TF_DIRTY */
if (test_and_set_bit(__LC_DIRTY, &lc->flags)) {
++lc->dirty;
RETURN(NULL);
}
e = lc_get_unused_element(lc);
BUG_ON(!e);
clear_bit(__LC_STARVING, &lc->flags);
BUG_ON(++e->refcnt != 1);
lc->used++;
lc->changing_element = e;
lc->new_number = enr;
RETURN(e);
}
/* similar to lc_get,
* but only gets a new reference on an existing element.
* you either get the requested element, or NULL.
* will be consolidated into one function.
*/
struct lc_element *lc_try_get(struct lru_cache *lc, unsigned int enr)
{
struct lc_element *e;
PARANOIA_ENTRY();
if (lc->flags & LC_STARVING) {
++lc->starving;
RETURN(NULL);
}
e = lc_find(lc, enr);
if (e) {
++lc->hits;
if (e->refcnt++ == 0)
lc->used++;
list_move(&e->list, &lc->in_use); /* Not evictable... */
}
RETURN(e);
}
/**
* lc_changed - tell @lc that the change has been recorded
* @lc: the lru cache to operate on
* @e: the element pending label change
*/
void lc_changed(struct lru_cache *lc, struct lc_element *e)
{
PARANOIA_ENTRY();
BUG_ON(e != lc->changing_element);
PARANOIA_LC_ELEMENT(lc, e);
++lc->changed;
e->lc_number = lc->new_number;
list_add(&e->list, &lc->in_use);
hlist_add_head(&e->colision, lc_hash_slot(lc, lc->new_number));
lc->changing_element = NULL;
lc->new_number = LC_FREE;
clear_bit(__LC_DIRTY, &lc->flags);
smp_mb__after_clear_bit();
RETURN();
}
/**
* lc_put - give up refcnt of @e
* @lc: the lru cache to operate on
* @e: the element to put
*
* If refcnt reaches zero, the element is moved to the lru list,
* and a %LC_STARVING (if set) is cleared.
* Returns the new (post-decrement) refcnt.
*/
unsigned int lc_put(struct lru_cache *lc, struct lc_element *e)
{
PARANOIA_ENTRY();
PARANOIA_LC_ELEMENT(lc, e);
BUG_ON(e->refcnt == 0);
BUG_ON(e == lc->changing_element);
if (--e->refcnt == 0) {
/* move it to the front of LRU. */
list_move(&e->list, &lc->lru);
lc->used--;
clear_bit(__LC_STARVING, &lc->flags);
smp_mb__after_clear_bit();
}
RETURN(e->refcnt);
}
/**
* lc_element_by_index
* @lc: the lru cache to operate on
* @i: the index of the element to return
*/
struct lc_element *lc_element_by_index(struct lru_cache *lc, unsigned i)
{
BUG_ON(i >= lc->nr_elements);
BUG_ON(lc->lc_element[i] == NULL);
BUG_ON(lc->lc_element[i]->lc_index != i);
return lc->lc_element[i];
}
/**
* lc_index_of
* @lc: the lru cache to operate on
* @e: the element to query for its index position in lc->element
*/
unsigned int lc_index_of(struct lru_cache *lc, struct lc_element *e)
{
PARANOIA_LC_ELEMENT(lc, e);
return e->lc_index;
}
/**
* lc_set - associate index with label
* @lc: the lru cache to operate on
* @enr: the label to set
* @index: the element index to associate label with.
*
* Used to initialize the active set to some previously recorded state.
*/
void lc_set(struct lru_cache *lc, unsigned int enr, int index)
{
struct lc_element *e;
if (index < 0 || index >= lc->nr_elements)
return;
e = lc_element_by_index(lc, index);
e->lc_number = enr;
hlist_del_init(&e->colision);
hlist_add_head(&e->colision, lc_hash_slot(lc, enr));
list_move(&e->list, e->refcnt ? &lc->in_use : &lc->lru);
}
/**
* lc_dump - Dump a complete LRU cache to seq in textual form.
* @lc: the lru cache to operate on
* @seq: the &struct seq_file pointer to seq_printf into
* @utext: user supplied "heading" or other info
* @detail: function pointer the user may provide to dump further details
* of the object the lc_element is embedded in.
*/
void lc_seq_dump_details(struct seq_file *seq, struct lru_cache *lc, char *utext,
void (*detail) (struct seq_file *, struct lc_element *))
{
unsigned int nr_elements = lc->nr_elements;
struct lc_element *e;
int i;
seq_printf(seq, "\tnn: lc_number refcnt %s\n ", utext);
for (i = 0; i < nr_elements; i++) {
e = lc_element_by_index(lc, i);
if (e->lc_number == LC_FREE) {
seq_printf(seq, "\t%2d: FREE\n", i);
} else {
seq_printf(seq, "\t%2d: %4u %4u ", i,
e->lc_number, e->refcnt);
detail(seq, e);
}
}
}
EXPORT_SYMBOL(lc_create);
EXPORT_SYMBOL(lc_reset);
EXPORT_SYMBOL(lc_destroy);
EXPORT_SYMBOL(lc_set);
EXPORT_SYMBOL(lc_del);
EXPORT_SYMBOL(lc_try_get);
EXPORT_SYMBOL(lc_find);
EXPORT_SYMBOL(lc_get);
EXPORT_SYMBOL(lc_put);
EXPORT_SYMBOL(lc_changed);
EXPORT_SYMBOL(lc_element_by_index);
EXPORT_SYMBOL(lc_index_of);
EXPORT_SYMBOL(lc_seq_printf_stats);
EXPORT_SYMBOL(lc_seq_dump_details);
| gpl-2.0 |
LDAP/android_kernel_motorola_msm8226 | arch/s390/kernel/jump_label.c | 8020 | 1372 | /*
* Jump label s390 support
*
* Copyright IBM Corp. 2011
* Author(s): Jan Glauber <jang@linux.vnet.ibm.com>
*/
#include <linux/module.h>
#include <linux/uaccess.h>
#include <linux/stop_machine.h>
#include <linux/jump_label.h>
#include <asm/ipl.h>
#ifdef HAVE_JUMP_LABEL
struct insn {
u16 opcode;
s32 offset;
} __packed;
struct insn_args {
struct jump_entry *entry;
enum jump_label_type type;
};
static void __jump_label_transform(struct jump_entry *entry,
enum jump_label_type type)
{
struct insn insn;
int rc;
if (type == JUMP_LABEL_ENABLE) {
/* brcl 15,offset */
insn.opcode = 0xc0f4;
insn.offset = (entry->target - entry->code) >> 1;
} else {
/* brcl 0,0 */
insn.opcode = 0xc004;
insn.offset = 0;
}
rc = probe_kernel_write((void *)entry->code, &insn, JUMP_LABEL_NOP_SIZE);
WARN_ON_ONCE(rc < 0);
}
static int __sm_arch_jump_label_transform(void *data)
{
struct insn_args *args = data;
__jump_label_transform(args->entry, args->type);
return 0;
}
void arch_jump_label_transform(struct jump_entry *entry,
enum jump_label_type type)
{
struct insn_args args;
args.entry = entry;
args.type = type;
stop_machine(__sm_arch_jump_label_transform, &args, NULL);
}
void arch_jump_label_transform_static(struct jump_entry *entry,
enum jump_label_type type)
{
__jump_label_transform(entry, type);
}
#endif
| gpl-2.0 |
Shmarkus/android_kernel_rockchip_rk292x | drivers/char/mwave/mwavedd.c | 8276 | 19183 | /*
*
* mwavedd.c -- mwave device driver
*
*
* Written By: Mike Sullivan IBM Corporation
*
* Copyright (C) 1999 IBM Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* NO WARRANTY
* THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
* LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
* solely responsible for determining the appropriateness of using and
* distributing the Program and assumes all risks associated with its
* exercise of rights under this Agreement, including but not limited to
* the risks and costs of program errors, damage to or loss of data,
* programs or equipment, and unavailability or interruption of operations.
*
* DISCLAIMER OF LIABILITY
* NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
* TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
* USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
* HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*
* 10/23/2000 - Alpha Release
* First release to the public
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/major.h>
#include <linux/miscdevice.h>
#include <linux/device.h>
#include <linux/serial.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
#include <linux/mutex.h>
#include <linux/delay.h>
#include <linux/serial_8250.h>
#include "smapi.h"
#include "mwavedd.h"
#include "3780i.h"
#include "tp3780i.h"
MODULE_DESCRIPTION("3780i Advanced Communications Processor (Mwave) driver");
MODULE_AUTHOR("Mike Sullivan and Paul Schroeder");
MODULE_LICENSE("GPL");
/*
* These parameters support the setting of MWave resources. Note that no
* checks are made against other devices (ie. superio) for conflicts.
* We'll depend on users using the tpctl utility to do that for now
*/
static DEFINE_MUTEX(mwave_mutex);
int mwave_debug = 0;
int mwave_3780i_irq = 0;
int mwave_3780i_io = 0;
int mwave_uart_irq = 0;
int mwave_uart_io = 0;
module_param(mwave_debug, int, 0);
module_param(mwave_3780i_irq, int, 0);
module_param(mwave_3780i_io, int, 0);
module_param(mwave_uart_irq, int, 0);
module_param(mwave_uart_io, int, 0);
static int mwave_open(struct inode *inode, struct file *file);
static int mwave_close(struct inode *inode, struct file *file);
static long mwave_ioctl(struct file *filp, unsigned int iocmd,
unsigned long ioarg);
MWAVE_DEVICE_DATA mwave_s_mdd;
static int mwave_open(struct inode *inode, struct file *file)
{
unsigned int retval = 0;
PRINTK_3(TRACE_MWAVE,
"mwavedd::mwave_open, entry inode %p file %p\n",
inode, file);
PRINTK_2(TRACE_MWAVE,
"mwavedd::mwave_open, exit return retval %x\n", retval);
return retval;
}
static int mwave_close(struct inode *inode, struct file *file)
{
unsigned int retval = 0;
PRINTK_3(TRACE_MWAVE,
"mwavedd::mwave_close, entry inode %p file %p\n",
inode, file);
PRINTK_2(TRACE_MWAVE, "mwavedd::mwave_close, exit retval %x\n",
retval);
return retval;
}
static long mwave_ioctl(struct file *file, unsigned int iocmd,
unsigned long ioarg)
{
unsigned int retval = 0;
pMWAVE_DEVICE_DATA pDrvData = &mwave_s_mdd;
void __user *arg = (void __user *)ioarg;
PRINTK_4(TRACE_MWAVE,
"mwavedd::mwave_ioctl, entry file %p cmd %x arg %x\n",
file, iocmd, (int) ioarg);
switch (iocmd) {
case IOCTL_MW_RESET:
PRINTK_1(TRACE_MWAVE,
"mwavedd::mwave_ioctl, IOCTL_MW_RESET"
" calling tp3780I_ResetDSP\n");
mutex_lock(&mwave_mutex);
retval = tp3780I_ResetDSP(&pDrvData->rBDData);
mutex_unlock(&mwave_mutex);
PRINTK_2(TRACE_MWAVE,
"mwavedd::mwave_ioctl, IOCTL_MW_RESET"
" retval %x from tp3780I_ResetDSP\n",
retval);
break;
case IOCTL_MW_RUN:
PRINTK_1(TRACE_MWAVE,
"mwavedd::mwave_ioctl, IOCTL_MW_RUN"
" calling tp3780I_StartDSP\n");
mutex_lock(&mwave_mutex);
retval = tp3780I_StartDSP(&pDrvData->rBDData);
mutex_unlock(&mwave_mutex);
PRINTK_2(TRACE_MWAVE,
"mwavedd::mwave_ioctl, IOCTL_MW_RUN"
" retval %x from tp3780I_StartDSP\n",
retval);
break;
case IOCTL_MW_DSP_ABILITIES: {
MW_ABILITIES rAbilities;
PRINTK_1(TRACE_MWAVE,
"mwavedd::mwave_ioctl,"
" IOCTL_MW_DSP_ABILITIES calling"
" tp3780I_QueryAbilities\n");
mutex_lock(&mwave_mutex);
retval = tp3780I_QueryAbilities(&pDrvData->rBDData,
&rAbilities);
mutex_unlock(&mwave_mutex);
PRINTK_2(TRACE_MWAVE,
"mwavedd::mwave_ioctl, IOCTL_MW_DSP_ABILITIES"
" retval %x from tp3780I_QueryAbilities\n",
retval);
if (retval == 0) {
if( copy_to_user(arg, &rAbilities,
sizeof(MW_ABILITIES)) )
return -EFAULT;
}
PRINTK_2(TRACE_MWAVE,
"mwavedd::mwave_ioctl, IOCTL_MW_DSP_ABILITIES"
" exit retval %x\n",
retval);
}
break;
case IOCTL_MW_READ_DATA:
case IOCTL_MW_READCLEAR_DATA: {
MW_READWRITE rReadData;
unsigned short __user *pusBuffer = NULL;
if( copy_from_user(&rReadData, arg,
sizeof(MW_READWRITE)) )
return -EFAULT;
pusBuffer = (unsigned short __user *) (rReadData.pBuf);
PRINTK_4(TRACE_MWAVE,
"mwavedd::mwave_ioctl IOCTL_MW_READ_DATA,"
" size %lx, ioarg %lx pusBuffer %p\n",
rReadData.ulDataLength, ioarg, pusBuffer);
mutex_lock(&mwave_mutex);
retval = tp3780I_ReadWriteDspDStore(&pDrvData->rBDData,
iocmd,
pusBuffer,
rReadData.ulDataLength,
rReadData.usDspAddress);
mutex_unlock(&mwave_mutex);
}
break;
case IOCTL_MW_READ_INST: {
MW_READWRITE rReadData;
unsigned short __user *pusBuffer = NULL;
if( copy_from_user(&rReadData, arg,
sizeof(MW_READWRITE)) )
return -EFAULT;
pusBuffer = (unsigned short __user *) (rReadData.pBuf);
PRINTK_4(TRACE_MWAVE,
"mwavedd::mwave_ioctl IOCTL_MW_READ_INST,"
" size %lx, ioarg %lx pusBuffer %p\n",
rReadData.ulDataLength / 2, ioarg,
pusBuffer);
mutex_lock(&mwave_mutex);
retval = tp3780I_ReadWriteDspDStore(&pDrvData->rBDData,
iocmd, pusBuffer,
rReadData.ulDataLength / 2,
rReadData.usDspAddress);
mutex_unlock(&mwave_mutex);
}
break;
case IOCTL_MW_WRITE_DATA: {
MW_READWRITE rWriteData;
unsigned short __user *pusBuffer = NULL;
if( copy_from_user(&rWriteData, arg,
sizeof(MW_READWRITE)) )
return -EFAULT;
pusBuffer = (unsigned short __user *) (rWriteData.pBuf);
PRINTK_4(TRACE_MWAVE,
"mwavedd::mwave_ioctl IOCTL_MW_WRITE_DATA,"
" size %lx, ioarg %lx pusBuffer %p\n",
rWriteData.ulDataLength, ioarg,
pusBuffer);
mutex_lock(&mwave_mutex);
retval = tp3780I_ReadWriteDspDStore(&pDrvData->rBDData,
iocmd, pusBuffer,
rWriteData.ulDataLength,
rWriteData.usDspAddress);
mutex_unlock(&mwave_mutex);
}
break;
case IOCTL_MW_WRITE_INST: {
MW_READWRITE rWriteData;
unsigned short __user *pusBuffer = NULL;
if( copy_from_user(&rWriteData, arg,
sizeof(MW_READWRITE)) )
return -EFAULT;
pusBuffer = (unsigned short __user *)(rWriteData.pBuf);
PRINTK_4(TRACE_MWAVE,
"mwavedd::mwave_ioctl IOCTL_MW_WRITE_INST,"
" size %lx, ioarg %lx pusBuffer %p\n",
rWriteData.ulDataLength, ioarg,
pusBuffer);
mutex_lock(&mwave_mutex);
retval = tp3780I_ReadWriteDspIStore(&pDrvData->rBDData,
iocmd, pusBuffer,
rWriteData.ulDataLength,
rWriteData.usDspAddress);
mutex_unlock(&mwave_mutex);
}
break;
case IOCTL_MW_REGISTER_IPC: {
unsigned int ipcnum = (unsigned int) ioarg;
if (ipcnum >= ARRAY_SIZE(pDrvData->IPCs)) {
PRINTK_ERROR(KERN_ERR_MWAVE
"mwavedd::mwave_ioctl:"
" IOCTL_MW_REGISTER_IPC:"
" Error: Invalid ipcnum %x\n",
ipcnum);
return -EINVAL;
}
PRINTK_3(TRACE_MWAVE,
"mwavedd::mwave_ioctl IOCTL_MW_REGISTER_IPC"
" ipcnum %x entry usIntCount %x\n",
ipcnum,
pDrvData->IPCs[ipcnum].usIntCount);
mutex_lock(&mwave_mutex);
pDrvData->IPCs[ipcnum].bIsHere = FALSE;
pDrvData->IPCs[ipcnum].bIsEnabled = TRUE;
mutex_unlock(&mwave_mutex);
PRINTK_2(TRACE_MWAVE,
"mwavedd::mwave_ioctl IOCTL_MW_REGISTER_IPC"
" ipcnum %x exit\n",
ipcnum);
}
break;
case IOCTL_MW_GET_IPC: {
unsigned int ipcnum = (unsigned int) ioarg;
if (ipcnum >= ARRAY_SIZE(pDrvData->IPCs)) {
PRINTK_ERROR(KERN_ERR_MWAVE
"mwavedd::mwave_ioctl:"
" IOCTL_MW_GET_IPC: Error:"
" Invalid ipcnum %x\n", ipcnum);
return -EINVAL;
}
PRINTK_3(TRACE_MWAVE,
"mwavedd::mwave_ioctl IOCTL_MW_GET_IPC"
" ipcnum %x, usIntCount %x\n",
ipcnum,
pDrvData->IPCs[ipcnum].usIntCount);
mutex_lock(&mwave_mutex);
if (pDrvData->IPCs[ipcnum].bIsEnabled == TRUE) {
DECLARE_WAITQUEUE(wait, current);
PRINTK_2(TRACE_MWAVE,
"mwavedd::mwave_ioctl, thread for"
" ipc %x going to sleep\n",
ipcnum);
add_wait_queue(&pDrvData->IPCs[ipcnum].ipc_wait_queue, &wait);
pDrvData->IPCs[ipcnum].bIsHere = TRUE;
set_current_state(TASK_INTERRUPTIBLE);
/* check whether an event was signalled by */
/* the interrupt handler while we were gone */
if (pDrvData->IPCs[ipcnum].usIntCount == 1) { /* first int has occurred (race condition) */
pDrvData->IPCs[ipcnum].usIntCount = 2; /* first int has been handled */
PRINTK_2(TRACE_MWAVE,
"mwavedd::mwave_ioctl"
" IOCTL_MW_GET_IPC ipcnum %x"
" handling first int\n",
ipcnum);
} else { /* either 1st int has not yet occurred, or we have already handled the first int */
schedule();
if (pDrvData->IPCs[ipcnum].usIntCount == 1) {
pDrvData->IPCs[ipcnum].usIntCount = 2;
}
PRINTK_2(TRACE_MWAVE,
"mwavedd::mwave_ioctl"
" IOCTL_MW_GET_IPC ipcnum %x"
" woke up and returning to"
" application\n",
ipcnum);
}
pDrvData->IPCs[ipcnum].bIsHere = FALSE;
remove_wait_queue(&pDrvData->IPCs[ipcnum].ipc_wait_queue, &wait);
set_current_state(TASK_RUNNING);
PRINTK_2(TRACE_MWAVE,
"mwavedd::mwave_ioctl IOCTL_MW_GET_IPC,"
" returning thread for ipc %x"
" processing\n",
ipcnum);
}
mutex_unlock(&mwave_mutex);
}
break;
case IOCTL_MW_UNREGISTER_IPC: {
unsigned int ipcnum = (unsigned int) ioarg;
PRINTK_2(TRACE_MWAVE,
"mwavedd::mwave_ioctl IOCTL_MW_UNREGISTER_IPC"
" ipcnum %x\n",
ipcnum);
if (ipcnum >= ARRAY_SIZE(pDrvData->IPCs)) {
PRINTK_ERROR(KERN_ERR_MWAVE
"mwavedd::mwave_ioctl:"
" IOCTL_MW_UNREGISTER_IPC:"
" Error: Invalid ipcnum %x\n",
ipcnum);
return -EINVAL;
}
mutex_lock(&mwave_mutex);
if (pDrvData->IPCs[ipcnum].bIsEnabled == TRUE) {
pDrvData->IPCs[ipcnum].bIsEnabled = FALSE;
if (pDrvData->IPCs[ipcnum].bIsHere == TRUE) {
wake_up_interruptible(&pDrvData->IPCs[ipcnum].ipc_wait_queue);
}
}
mutex_unlock(&mwave_mutex);
}
break;
default:
return -ENOTTY;
break;
} /* switch */
PRINTK_2(TRACE_MWAVE, "mwavedd::mwave_ioctl, exit retval %x\n", retval);
return retval;
}
static ssize_t mwave_read(struct file *file, char __user *buf, size_t count,
loff_t * ppos)
{
PRINTK_5(TRACE_MWAVE,
"mwavedd::mwave_read entry file %p, buf %p, count %zx ppos %p\n",
file, buf, count, ppos);
return -EINVAL;
}
static ssize_t mwave_write(struct file *file, const char __user *buf,
size_t count, loff_t * ppos)
{
PRINTK_5(TRACE_MWAVE,
"mwavedd::mwave_write entry file %p, buf %p,"
" count %zx ppos %p\n",
file, buf, count, ppos);
return -EINVAL;
}
static int register_serial_portandirq(unsigned int port, int irq)
{
struct uart_port uart;
switch ( port ) {
case 0x3f8:
case 0x2f8:
case 0x3e8:
case 0x2e8:
/* OK */
break;
default:
PRINTK_ERROR(KERN_ERR_MWAVE
"mwavedd::register_serial_portandirq:"
" Error: Illegal port %x\n", port );
return -1;
} /* switch */
/* port is okay */
switch ( irq ) {
case 3:
case 4:
case 5:
case 7:
/* OK */
break;
default:
PRINTK_ERROR(KERN_ERR_MWAVE
"mwavedd::register_serial_portandirq:"
" Error: Illegal irq %x\n", irq );
return -1;
} /* switch */
/* irq is okay */
memset(&uart, 0, sizeof(struct uart_port));
uart.uartclk = 1843200;
uart.iobase = port;
uart.irq = irq;
uart.iotype = UPIO_PORT;
uart.flags = UPF_SHARE_IRQ;
return serial8250_register_port(&uart);
}
static const struct file_operations mwave_fops = {
.owner = THIS_MODULE,
.read = mwave_read,
.write = mwave_write,
.unlocked_ioctl = mwave_ioctl,
.open = mwave_open,
.release = mwave_close,
.llseek = default_llseek,
};
static struct miscdevice mwave_misc_dev = { MWAVE_MINOR, "mwave", &mwave_fops };
#if 0 /* totally b0rked */
/*
* sysfs support <paulsch@us.ibm.com>
*/
struct device mwave_device;
/* Prevent code redundancy, create a macro for mwave_show_* functions. */
#define mwave_show_function(attr_name, format_string, field) \
static ssize_t mwave_show_##attr_name(struct device *dev, struct device_attribute *attr, char *buf) \
{ \
DSP_3780I_CONFIG_SETTINGS *pSettings = \
&mwave_s_mdd.rBDData.rDspSettings; \
return sprintf(buf, format_string, pSettings->field); \
}
/* All of our attributes are read attributes. */
#define mwave_dev_rd_attr(attr_name, format_string, field) \
mwave_show_function(attr_name, format_string, field) \
static DEVICE_ATTR(attr_name, S_IRUGO, mwave_show_##attr_name, NULL)
mwave_dev_rd_attr (3780i_dma, "%i\n", usDspDma);
mwave_dev_rd_attr (3780i_irq, "%i\n", usDspIrq);
mwave_dev_rd_attr (3780i_io, "%#.4x\n", usDspBaseIO);
mwave_dev_rd_attr (uart_irq, "%i\n", usUartIrq);
mwave_dev_rd_attr (uart_io, "%#.4x\n", usUartBaseIO);
static struct device_attribute * const mwave_dev_attrs[] = {
&dev_attr_3780i_dma,
&dev_attr_3780i_irq,
&dev_attr_3780i_io,
&dev_attr_uart_irq,
&dev_attr_uart_io,
};
#endif
/*
* mwave_init is called on module load
*
* mwave_exit is called on module unload
* mwave_exit is also used to clean up after an aborted mwave_init
*/
static void mwave_exit(void)
{
pMWAVE_DEVICE_DATA pDrvData = &mwave_s_mdd;
PRINTK_1(TRACE_MWAVE, "mwavedd::mwave_exit entry\n");
#if 0
for (i = 0; i < pDrvData->nr_registered_attrs; i++)
device_remove_file(&mwave_device, mwave_dev_attrs[i]);
pDrvData->nr_registered_attrs = 0;
if (pDrvData->device_registered) {
device_unregister(&mwave_device);
pDrvData->device_registered = FALSE;
}
#endif
if ( pDrvData->sLine >= 0 ) {
serial8250_unregister_port(pDrvData->sLine);
}
if (pDrvData->bMwaveDevRegistered) {
misc_deregister(&mwave_misc_dev);
}
if (pDrvData->bDSPEnabled) {
tp3780I_DisableDSP(&pDrvData->rBDData);
}
if (pDrvData->bResourcesClaimed) {
tp3780I_ReleaseResources(&pDrvData->rBDData);
}
if (pDrvData->bBDInitialized) {
tp3780I_Cleanup(&pDrvData->rBDData);
}
PRINTK_1(TRACE_MWAVE, "mwavedd::mwave_exit exit\n");
}
module_exit(mwave_exit);
static int __init mwave_init(void)
{
int i;
int retval = 0;
pMWAVE_DEVICE_DATA pDrvData = &mwave_s_mdd;
PRINTK_1(TRACE_MWAVE, "mwavedd::mwave_init entry\n");
memset(&mwave_s_mdd, 0, sizeof(MWAVE_DEVICE_DATA));
pDrvData->bBDInitialized = FALSE;
pDrvData->bResourcesClaimed = FALSE;
pDrvData->bDSPEnabled = FALSE;
pDrvData->bDSPReset = FALSE;
pDrvData->bMwaveDevRegistered = FALSE;
pDrvData->sLine = -1;
for (i = 0; i < ARRAY_SIZE(pDrvData->IPCs); i++) {
pDrvData->IPCs[i].bIsEnabled = FALSE;
pDrvData->IPCs[i].bIsHere = FALSE;
pDrvData->IPCs[i].usIntCount = 0; /* no ints received yet */
init_waitqueue_head(&pDrvData->IPCs[i].ipc_wait_queue);
}
retval = tp3780I_InitializeBoardData(&pDrvData->rBDData);
PRINTK_2(TRACE_MWAVE,
"mwavedd::mwave_init, return from tp3780I_InitializeBoardData"
" retval %x\n",
retval);
if (retval) {
PRINTK_ERROR(KERN_ERR_MWAVE
"mwavedd::mwave_init: Error:"
" Failed to initialize board data\n");
goto cleanup_error;
}
pDrvData->bBDInitialized = TRUE;
retval = tp3780I_CalcResources(&pDrvData->rBDData);
PRINTK_2(TRACE_MWAVE,
"mwavedd::mwave_init, return from tp3780I_CalcResources"
" retval %x\n",
retval);
if (retval) {
PRINTK_ERROR(KERN_ERR_MWAVE
"mwavedd:mwave_init: Error:"
" Failed to calculate resources\n");
goto cleanup_error;
}
retval = tp3780I_ClaimResources(&pDrvData->rBDData);
PRINTK_2(TRACE_MWAVE,
"mwavedd::mwave_init, return from tp3780I_ClaimResources"
" retval %x\n",
retval);
if (retval) {
PRINTK_ERROR(KERN_ERR_MWAVE
"mwavedd:mwave_init: Error:"
" Failed to claim resources\n");
goto cleanup_error;
}
pDrvData->bResourcesClaimed = TRUE;
retval = tp3780I_EnableDSP(&pDrvData->rBDData);
PRINTK_2(TRACE_MWAVE,
"mwavedd::mwave_init, return from tp3780I_EnableDSP"
" retval %x\n",
retval);
if (retval) {
PRINTK_ERROR(KERN_ERR_MWAVE
"mwavedd:mwave_init: Error:"
" Failed to enable DSP\n");
goto cleanup_error;
}
pDrvData->bDSPEnabled = TRUE;
if (misc_register(&mwave_misc_dev) < 0) {
PRINTK_ERROR(KERN_ERR_MWAVE
"mwavedd:mwave_init: Error:"
" Failed to register misc device\n");
goto cleanup_error;
}
pDrvData->bMwaveDevRegistered = TRUE;
pDrvData->sLine = register_serial_portandirq(
pDrvData->rBDData.rDspSettings.usUartBaseIO,
pDrvData->rBDData.rDspSettings.usUartIrq
);
if (pDrvData->sLine < 0) {
PRINTK_ERROR(KERN_ERR_MWAVE
"mwavedd:mwave_init: Error:"
" Failed to register serial driver\n");
goto cleanup_error;
}
/* uart is registered */
#if 0
/* sysfs */
memset(&mwave_device, 0, sizeof (struct device));
dev_set_name(&mwave_device, "mwave");
if (device_register(&mwave_device))
goto cleanup_error;
pDrvData->device_registered = TRUE;
for (i = 0; i < ARRAY_SIZE(mwave_dev_attrs); i++) {
if(device_create_file(&mwave_device, mwave_dev_attrs[i])) {
PRINTK_ERROR(KERN_ERR_MWAVE
"mwavedd:mwave_init: Error:"
" Failed to create sysfs file %s\n",
mwave_dev_attrs[i]->attr.name);
goto cleanup_error;
}
pDrvData->nr_registered_attrs++;
}
#endif
/* SUCCESS! */
return 0;
cleanup_error:
PRINTK_ERROR(KERN_ERR_MWAVE
"mwavedd::mwave_init: Error:"
" Failed to initialize\n");
mwave_exit(); /* clean up */
return -EIO;
}
module_init(mwave_init);
| gpl-2.0 |
flar2/m7-Sense-4.4.3 | drivers/char/mwave/mwavedd.c | 8276 | 19183 | /*
*
* mwavedd.c -- mwave device driver
*
*
* Written By: Mike Sullivan IBM Corporation
*
* Copyright (C) 1999 IBM Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* NO WARRANTY
* THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
* LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
* solely responsible for determining the appropriateness of using and
* distributing the Program and assumes all risks associated with its
* exercise of rights under this Agreement, including but not limited to
* the risks and costs of program errors, damage to or loss of data,
* programs or equipment, and unavailability or interruption of operations.
*
* DISCLAIMER OF LIABILITY
* NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
* TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
* USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
* HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*
* 10/23/2000 - Alpha Release
* First release to the public
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/major.h>
#include <linux/miscdevice.h>
#include <linux/device.h>
#include <linux/serial.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
#include <linux/mutex.h>
#include <linux/delay.h>
#include <linux/serial_8250.h>
#include "smapi.h"
#include "mwavedd.h"
#include "3780i.h"
#include "tp3780i.h"
MODULE_DESCRIPTION("3780i Advanced Communications Processor (Mwave) driver");
MODULE_AUTHOR("Mike Sullivan and Paul Schroeder");
MODULE_LICENSE("GPL");
/*
* These parameters support the setting of MWave resources. Note that no
* checks are made against other devices (ie. superio) for conflicts.
* We'll depend on users using the tpctl utility to do that for now
*/
static DEFINE_MUTEX(mwave_mutex);
int mwave_debug = 0;
int mwave_3780i_irq = 0;
int mwave_3780i_io = 0;
int mwave_uart_irq = 0;
int mwave_uart_io = 0;
module_param(mwave_debug, int, 0);
module_param(mwave_3780i_irq, int, 0);
module_param(mwave_3780i_io, int, 0);
module_param(mwave_uart_irq, int, 0);
module_param(mwave_uart_io, int, 0);
static int mwave_open(struct inode *inode, struct file *file);
static int mwave_close(struct inode *inode, struct file *file);
static long mwave_ioctl(struct file *filp, unsigned int iocmd,
unsigned long ioarg);
MWAVE_DEVICE_DATA mwave_s_mdd;
static int mwave_open(struct inode *inode, struct file *file)
{
unsigned int retval = 0;
PRINTK_3(TRACE_MWAVE,
"mwavedd::mwave_open, entry inode %p file %p\n",
inode, file);
PRINTK_2(TRACE_MWAVE,
"mwavedd::mwave_open, exit return retval %x\n", retval);
return retval;
}
static int mwave_close(struct inode *inode, struct file *file)
{
unsigned int retval = 0;
PRINTK_3(TRACE_MWAVE,
"mwavedd::mwave_close, entry inode %p file %p\n",
inode, file);
PRINTK_2(TRACE_MWAVE, "mwavedd::mwave_close, exit retval %x\n",
retval);
return retval;
}
static long mwave_ioctl(struct file *file, unsigned int iocmd,
unsigned long ioarg)
{
unsigned int retval = 0;
pMWAVE_DEVICE_DATA pDrvData = &mwave_s_mdd;
void __user *arg = (void __user *)ioarg;
PRINTK_4(TRACE_MWAVE,
"mwavedd::mwave_ioctl, entry file %p cmd %x arg %x\n",
file, iocmd, (int) ioarg);
switch (iocmd) {
case IOCTL_MW_RESET:
PRINTK_1(TRACE_MWAVE,
"mwavedd::mwave_ioctl, IOCTL_MW_RESET"
" calling tp3780I_ResetDSP\n");
mutex_lock(&mwave_mutex);
retval = tp3780I_ResetDSP(&pDrvData->rBDData);
mutex_unlock(&mwave_mutex);
PRINTK_2(TRACE_MWAVE,
"mwavedd::mwave_ioctl, IOCTL_MW_RESET"
" retval %x from tp3780I_ResetDSP\n",
retval);
break;
case IOCTL_MW_RUN:
PRINTK_1(TRACE_MWAVE,
"mwavedd::mwave_ioctl, IOCTL_MW_RUN"
" calling tp3780I_StartDSP\n");
mutex_lock(&mwave_mutex);
retval = tp3780I_StartDSP(&pDrvData->rBDData);
mutex_unlock(&mwave_mutex);
PRINTK_2(TRACE_MWAVE,
"mwavedd::mwave_ioctl, IOCTL_MW_RUN"
" retval %x from tp3780I_StartDSP\n",
retval);
break;
case IOCTL_MW_DSP_ABILITIES: {
MW_ABILITIES rAbilities;
PRINTK_1(TRACE_MWAVE,
"mwavedd::mwave_ioctl,"
" IOCTL_MW_DSP_ABILITIES calling"
" tp3780I_QueryAbilities\n");
mutex_lock(&mwave_mutex);
retval = tp3780I_QueryAbilities(&pDrvData->rBDData,
&rAbilities);
mutex_unlock(&mwave_mutex);
PRINTK_2(TRACE_MWAVE,
"mwavedd::mwave_ioctl, IOCTL_MW_DSP_ABILITIES"
" retval %x from tp3780I_QueryAbilities\n",
retval);
if (retval == 0) {
if( copy_to_user(arg, &rAbilities,
sizeof(MW_ABILITIES)) )
return -EFAULT;
}
PRINTK_2(TRACE_MWAVE,
"mwavedd::mwave_ioctl, IOCTL_MW_DSP_ABILITIES"
" exit retval %x\n",
retval);
}
break;
case IOCTL_MW_READ_DATA:
case IOCTL_MW_READCLEAR_DATA: {
MW_READWRITE rReadData;
unsigned short __user *pusBuffer = NULL;
if( copy_from_user(&rReadData, arg,
sizeof(MW_READWRITE)) )
return -EFAULT;
pusBuffer = (unsigned short __user *) (rReadData.pBuf);
PRINTK_4(TRACE_MWAVE,
"mwavedd::mwave_ioctl IOCTL_MW_READ_DATA,"
" size %lx, ioarg %lx pusBuffer %p\n",
rReadData.ulDataLength, ioarg, pusBuffer);
mutex_lock(&mwave_mutex);
retval = tp3780I_ReadWriteDspDStore(&pDrvData->rBDData,
iocmd,
pusBuffer,
rReadData.ulDataLength,
rReadData.usDspAddress);
mutex_unlock(&mwave_mutex);
}
break;
case IOCTL_MW_READ_INST: {
MW_READWRITE rReadData;
unsigned short __user *pusBuffer = NULL;
if( copy_from_user(&rReadData, arg,
sizeof(MW_READWRITE)) )
return -EFAULT;
pusBuffer = (unsigned short __user *) (rReadData.pBuf);
PRINTK_4(TRACE_MWAVE,
"mwavedd::mwave_ioctl IOCTL_MW_READ_INST,"
" size %lx, ioarg %lx pusBuffer %p\n",
rReadData.ulDataLength / 2, ioarg,
pusBuffer);
mutex_lock(&mwave_mutex);
retval = tp3780I_ReadWriteDspDStore(&pDrvData->rBDData,
iocmd, pusBuffer,
rReadData.ulDataLength / 2,
rReadData.usDspAddress);
mutex_unlock(&mwave_mutex);
}
break;
case IOCTL_MW_WRITE_DATA: {
MW_READWRITE rWriteData;
unsigned short __user *pusBuffer = NULL;
if( copy_from_user(&rWriteData, arg,
sizeof(MW_READWRITE)) )
return -EFAULT;
pusBuffer = (unsigned short __user *) (rWriteData.pBuf);
PRINTK_4(TRACE_MWAVE,
"mwavedd::mwave_ioctl IOCTL_MW_WRITE_DATA,"
" size %lx, ioarg %lx pusBuffer %p\n",
rWriteData.ulDataLength, ioarg,
pusBuffer);
mutex_lock(&mwave_mutex);
retval = tp3780I_ReadWriteDspDStore(&pDrvData->rBDData,
iocmd, pusBuffer,
rWriteData.ulDataLength,
rWriteData.usDspAddress);
mutex_unlock(&mwave_mutex);
}
break;
case IOCTL_MW_WRITE_INST: {
MW_READWRITE rWriteData;
unsigned short __user *pusBuffer = NULL;
if( copy_from_user(&rWriteData, arg,
sizeof(MW_READWRITE)) )
return -EFAULT;
pusBuffer = (unsigned short __user *)(rWriteData.pBuf);
PRINTK_4(TRACE_MWAVE,
"mwavedd::mwave_ioctl IOCTL_MW_WRITE_INST,"
" size %lx, ioarg %lx pusBuffer %p\n",
rWriteData.ulDataLength, ioarg,
pusBuffer);
mutex_lock(&mwave_mutex);
retval = tp3780I_ReadWriteDspIStore(&pDrvData->rBDData,
iocmd, pusBuffer,
rWriteData.ulDataLength,
rWriteData.usDspAddress);
mutex_unlock(&mwave_mutex);
}
break;
case IOCTL_MW_REGISTER_IPC: {
unsigned int ipcnum = (unsigned int) ioarg;
if (ipcnum >= ARRAY_SIZE(pDrvData->IPCs)) {
PRINTK_ERROR(KERN_ERR_MWAVE
"mwavedd::mwave_ioctl:"
" IOCTL_MW_REGISTER_IPC:"
" Error: Invalid ipcnum %x\n",
ipcnum);
return -EINVAL;
}
PRINTK_3(TRACE_MWAVE,
"mwavedd::mwave_ioctl IOCTL_MW_REGISTER_IPC"
" ipcnum %x entry usIntCount %x\n",
ipcnum,
pDrvData->IPCs[ipcnum].usIntCount);
mutex_lock(&mwave_mutex);
pDrvData->IPCs[ipcnum].bIsHere = FALSE;
pDrvData->IPCs[ipcnum].bIsEnabled = TRUE;
mutex_unlock(&mwave_mutex);
PRINTK_2(TRACE_MWAVE,
"mwavedd::mwave_ioctl IOCTL_MW_REGISTER_IPC"
" ipcnum %x exit\n",
ipcnum);
}
break;
case IOCTL_MW_GET_IPC: {
unsigned int ipcnum = (unsigned int) ioarg;
if (ipcnum >= ARRAY_SIZE(pDrvData->IPCs)) {
PRINTK_ERROR(KERN_ERR_MWAVE
"mwavedd::mwave_ioctl:"
" IOCTL_MW_GET_IPC: Error:"
" Invalid ipcnum %x\n", ipcnum);
return -EINVAL;
}
PRINTK_3(TRACE_MWAVE,
"mwavedd::mwave_ioctl IOCTL_MW_GET_IPC"
" ipcnum %x, usIntCount %x\n",
ipcnum,
pDrvData->IPCs[ipcnum].usIntCount);
mutex_lock(&mwave_mutex);
if (pDrvData->IPCs[ipcnum].bIsEnabled == TRUE) {
DECLARE_WAITQUEUE(wait, current);
PRINTK_2(TRACE_MWAVE,
"mwavedd::mwave_ioctl, thread for"
" ipc %x going to sleep\n",
ipcnum);
add_wait_queue(&pDrvData->IPCs[ipcnum].ipc_wait_queue, &wait);
pDrvData->IPCs[ipcnum].bIsHere = TRUE;
set_current_state(TASK_INTERRUPTIBLE);
/* check whether an event was signalled by */
/* the interrupt handler while we were gone */
if (pDrvData->IPCs[ipcnum].usIntCount == 1) { /* first int has occurred (race condition) */
pDrvData->IPCs[ipcnum].usIntCount = 2; /* first int has been handled */
PRINTK_2(TRACE_MWAVE,
"mwavedd::mwave_ioctl"
" IOCTL_MW_GET_IPC ipcnum %x"
" handling first int\n",
ipcnum);
} else { /* either 1st int has not yet occurred, or we have already handled the first int */
schedule();
if (pDrvData->IPCs[ipcnum].usIntCount == 1) {
pDrvData->IPCs[ipcnum].usIntCount = 2;
}
PRINTK_2(TRACE_MWAVE,
"mwavedd::mwave_ioctl"
" IOCTL_MW_GET_IPC ipcnum %x"
" woke up and returning to"
" application\n",
ipcnum);
}
pDrvData->IPCs[ipcnum].bIsHere = FALSE;
remove_wait_queue(&pDrvData->IPCs[ipcnum].ipc_wait_queue, &wait);
set_current_state(TASK_RUNNING);
PRINTK_2(TRACE_MWAVE,
"mwavedd::mwave_ioctl IOCTL_MW_GET_IPC,"
" returning thread for ipc %x"
" processing\n",
ipcnum);
}
mutex_unlock(&mwave_mutex);
}
break;
case IOCTL_MW_UNREGISTER_IPC: {
unsigned int ipcnum = (unsigned int) ioarg;
PRINTK_2(TRACE_MWAVE,
"mwavedd::mwave_ioctl IOCTL_MW_UNREGISTER_IPC"
" ipcnum %x\n",
ipcnum);
if (ipcnum >= ARRAY_SIZE(pDrvData->IPCs)) {
PRINTK_ERROR(KERN_ERR_MWAVE
"mwavedd::mwave_ioctl:"
" IOCTL_MW_UNREGISTER_IPC:"
" Error: Invalid ipcnum %x\n",
ipcnum);
return -EINVAL;
}
mutex_lock(&mwave_mutex);
if (pDrvData->IPCs[ipcnum].bIsEnabled == TRUE) {
pDrvData->IPCs[ipcnum].bIsEnabled = FALSE;
if (pDrvData->IPCs[ipcnum].bIsHere == TRUE) {
wake_up_interruptible(&pDrvData->IPCs[ipcnum].ipc_wait_queue);
}
}
mutex_unlock(&mwave_mutex);
}
break;
default:
return -ENOTTY;
break;
} /* switch */
PRINTK_2(TRACE_MWAVE, "mwavedd::mwave_ioctl, exit retval %x\n", retval);
return retval;
}
static ssize_t mwave_read(struct file *file, char __user *buf, size_t count,
loff_t * ppos)
{
PRINTK_5(TRACE_MWAVE,
"mwavedd::mwave_read entry file %p, buf %p, count %zx ppos %p\n",
file, buf, count, ppos);
return -EINVAL;
}
static ssize_t mwave_write(struct file *file, const char __user *buf,
size_t count, loff_t * ppos)
{
PRINTK_5(TRACE_MWAVE,
"mwavedd::mwave_write entry file %p, buf %p,"
" count %zx ppos %p\n",
file, buf, count, ppos);
return -EINVAL;
}
static int register_serial_portandirq(unsigned int port, int irq)
{
struct uart_port uart;
switch ( port ) {
case 0x3f8:
case 0x2f8:
case 0x3e8:
case 0x2e8:
/* OK */
break;
default:
PRINTK_ERROR(KERN_ERR_MWAVE
"mwavedd::register_serial_portandirq:"
" Error: Illegal port %x\n", port );
return -1;
} /* switch */
/* port is okay */
switch ( irq ) {
case 3:
case 4:
case 5:
case 7:
/* OK */
break;
default:
PRINTK_ERROR(KERN_ERR_MWAVE
"mwavedd::register_serial_portandirq:"
" Error: Illegal irq %x\n", irq );
return -1;
} /* switch */
/* irq is okay */
memset(&uart, 0, sizeof(struct uart_port));
uart.uartclk = 1843200;
uart.iobase = port;
uart.irq = irq;
uart.iotype = UPIO_PORT;
uart.flags = UPF_SHARE_IRQ;
return serial8250_register_port(&uart);
}
static const struct file_operations mwave_fops = {
.owner = THIS_MODULE,
.read = mwave_read,
.write = mwave_write,
.unlocked_ioctl = mwave_ioctl,
.open = mwave_open,
.release = mwave_close,
.llseek = default_llseek,
};
static struct miscdevice mwave_misc_dev = { MWAVE_MINOR, "mwave", &mwave_fops };
#if 0 /* totally b0rked */
/*
* sysfs support <paulsch@us.ibm.com>
*/
struct device mwave_device;
/* Prevent code redundancy, create a macro for mwave_show_* functions. */
#define mwave_show_function(attr_name, format_string, field) \
static ssize_t mwave_show_##attr_name(struct device *dev, struct device_attribute *attr, char *buf) \
{ \
DSP_3780I_CONFIG_SETTINGS *pSettings = \
&mwave_s_mdd.rBDData.rDspSettings; \
return sprintf(buf, format_string, pSettings->field); \
}
/* All of our attributes are read attributes. */
#define mwave_dev_rd_attr(attr_name, format_string, field) \
mwave_show_function(attr_name, format_string, field) \
static DEVICE_ATTR(attr_name, S_IRUGO, mwave_show_##attr_name, NULL)
mwave_dev_rd_attr (3780i_dma, "%i\n", usDspDma);
mwave_dev_rd_attr (3780i_irq, "%i\n", usDspIrq);
mwave_dev_rd_attr (3780i_io, "%#.4x\n", usDspBaseIO);
mwave_dev_rd_attr (uart_irq, "%i\n", usUartIrq);
mwave_dev_rd_attr (uart_io, "%#.4x\n", usUartBaseIO);
static struct device_attribute * const mwave_dev_attrs[] = {
&dev_attr_3780i_dma,
&dev_attr_3780i_irq,
&dev_attr_3780i_io,
&dev_attr_uart_irq,
&dev_attr_uart_io,
};
#endif
/*
* mwave_init is called on module load
*
* mwave_exit is called on module unload
* mwave_exit is also used to clean up after an aborted mwave_init
*/
static void mwave_exit(void)
{
pMWAVE_DEVICE_DATA pDrvData = &mwave_s_mdd;
PRINTK_1(TRACE_MWAVE, "mwavedd::mwave_exit entry\n");
#if 0
for (i = 0; i < pDrvData->nr_registered_attrs; i++)
device_remove_file(&mwave_device, mwave_dev_attrs[i]);
pDrvData->nr_registered_attrs = 0;
if (pDrvData->device_registered) {
device_unregister(&mwave_device);
pDrvData->device_registered = FALSE;
}
#endif
if ( pDrvData->sLine >= 0 ) {
serial8250_unregister_port(pDrvData->sLine);
}
if (pDrvData->bMwaveDevRegistered) {
misc_deregister(&mwave_misc_dev);
}
if (pDrvData->bDSPEnabled) {
tp3780I_DisableDSP(&pDrvData->rBDData);
}
if (pDrvData->bResourcesClaimed) {
tp3780I_ReleaseResources(&pDrvData->rBDData);
}
if (pDrvData->bBDInitialized) {
tp3780I_Cleanup(&pDrvData->rBDData);
}
PRINTK_1(TRACE_MWAVE, "mwavedd::mwave_exit exit\n");
}
module_exit(mwave_exit);
static int __init mwave_init(void)
{
int i;
int retval = 0;
pMWAVE_DEVICE_DATA pDrvData = &mwave_s_mdd;
PRINTK_1(TRACE_MWAVE, "mwavedd::mwave_init entry\n");
memset(&mwave_s_mdd, 0, sizeof(MWAVE_DEVICE_DATA));
pDrvData->bBDInitialized = FALSE;
pDrvData->bResourcesClaimed = FALSE;
pDrvData->bDSPEnabled = FALSE;
pDrvData->bDSPReset = FALSE;
pDrvData->bMwaveDevRegistered = FALSE;
pDrvData->sLine = -1;
for (i = 0; i < ARRAY_SIZE(pDrvData->IPCs); i++) {
pDrvData->IPCs[i].bIsEnabled = FALSE;
pDrvData->IPCs[i].bIsHere = FALSE;
pDrvData->IPCs[i].usIntCount = 0; /* no ints received yet */
init_waitqueue_head(&pDrvData->IPCs[i].ipc_wait_queue);
}
retval = tp3780I_InitializeBoardData(&pDrvData->rBDData);
PRINTK_2(TRACE_MWAVE,
"mwavedd::mwave_init, return from tp3780I_InitializeBoardData"
" retval %x\n",
retval);
if (retval) {
PRINTK_ERROR(KERN_ERR_MWAVE
"mwavedd::mwave_init: Error:"
" Failed to initialize board data\n");
goto cleanup_error;
}
pDrvData->bBDInitialized = TRUE;
retval = tp3780I_CalcResources(&pDrvData->rBDData);
PRINTK_2(TRACE_MWAVE,
"mwavedd::mwave_init, return from tp3780I_CalcResources"
" retval %x\n",
retval);
if (retval) {
PRINTK_ERROR(KERN_ERR_MWAVE
"mwavedd:mwave_init: Error:"
" Failed to calculate resources\n");
goto cleanup_error;
}
retval = tp3780I_ClaimResources(&pDrvData->rBDData);
PRINTK_2(TRACE_MWAVE,
"mwavedd::mwave_init, return from tp3780I_ClaimResources"
" retval %x\n",
retval);
if (retval) {
PRINTK_ERROR(KERN_ERR_MWAVE
"mwavedd:mwave_init: Error:"
" Failed to claim resources\n");
goto cleanup_error;
}
pDrvData->bResourcesClaimed = TRUE;
retval = tp3780I_EnableDSP(&pDrvData->rBDData);
PRINTK_2(TRACE_MWAVE,
"mwavedd::mwave_init, return from tp3780I_EnableDSP"
" retval %x\n",
retval);
if (retval) {
PRINTK_ERROR(KERN_ERR_MWAVE
"mwavedd:mwave_init: Error:"
" Failed to enable DSP\n");
goto cleanup_error;
}
pDrvData->bDSPEnabled = TRUE;
if (misc_register(&mwave_misc_dev) < 0) {
PRINTK_ERROR(KERN_ERR_MWAVE
"mwavedd:mwave_init: Error:"
" Failed to register misc device\n");
goto cleanup_error;
}
pDrvData->bMwaveDevRegistered = TRUE;
pDrvData->sLine = register_serial_portandirq(
pDrvData->rBDData.rDspSettings.usUartBaseIO,
pDrvData->rBDData.rDspSettings.usUartIrq
);
if (pDrvData->sLine < 0) {
PRINTK_ERROR(KERN_ERR_MWAVE
"mwavedd:mwave_init: Error:"
" Failed to register serial driver\n");
goto cleanup_error;
}
/* uart is registered */
#if 0
/* sysfs */
memset(&mwave_device, 0, sizeof (struct device));
dev_set_name(&mwave_device, "mwave");
if (device_register(&mwave_device))
goto cleanup_error;
pDrvData->device_registered = TRUE;
for (i = 0; i < ARRAY_SIZE(mwave_dev_attrs); i++) {
if(device_create_file(&mwave_device, mwave_dev_attrs[i])) {
PRINTK_ERROR(KERN_ERR_MWAVE
"mwavedd:mwave_init: Error:"
" Failed to create sysfs file %s\n",
mwave_dev_attrs[i]->attr.name);
goto cleanup_error;
}
pDrvData->nr_registered_attrs++;
}
#endif
/* SUCCESS! */
return 0;
cleanup_error:
PRINTK_ERROR(KERN_ERR_MWAVE
"mwavedd::mwave_init: Error:"
" Failed to initialize\n");
mwave_exit(); /* clean up */
return -EIO;
}
module_init(mwave_init);
| gpl-2.0 |
soderstrom-rikard/adi-linux | drivers/zorro/names.c | 12628 | 2555 | /*
* Zorro Device Name Tables
*
* Copyright (C) 1999--2000 Geert Uytterhoeven
*
* Based on the PCI version:
*
* Copyright 1992--1999 Drew Eckhardt, Frederic Potter,
* David Mosberger-Tang, Martin Mares
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/zorro.h>
#ifdef CONFIG_ZORRO_NAMES
struct zorro_prod_info {
__u16 prod;
unsigned short seen;
const char *name;
};
struct zorro_manuf_info {
__u16 manuf;
unsigned short nr;
const char *name;
struct zorro_prod_info *prods;
};
/*
* This is ridiculous, but we want the strings in
* the .init section so that they don't take up
* real memory.. Parse the same file multiple times
* to get all the info.
*/
#define MANUF( manuf, name ) static char __manufstr_##manuf[] __initdata = name;
#define ENDMANUF()
#define PRODUCT( manuf, prod, name ) static char __prodstr_##manuf##prod[] __initdata = name;
#include "devlist.h"
#define MANUF( manuf, name ) static struct zorro_prod_info __prods_##manuf[] __initdata = {
#define ENDMANUF() };
#define PRODUCT( manuf, prod, name ) { 0x##prod, 0, __prodstr_##manuf##prod },
#include "devlist.h"
static struct zorro_manuf_info __initdata zorro_manuf_list[] = {
#define MANUF( manuf, name ) { 0x##manuf, sizeof(__prods_##manuf) / sizeof(struct zorro_prod_info), __manufstr_##manuf, __prods_##manuf },
#define ENDMANUF()
#define PRODUCT( manuf, prod, name )
#include "devlist.h"
};
#define MANUFS (sizeof(zorro_manuf_list)/sizeof(struct zorro_manuf_info))
void __init zorro_name_device(struct zorro_dev *dev)
{
const struct zorro_manuf_info *manuf_p = zorro_manuf_list;
int i = MANUFS;
char *name = dev->name;
do {
if (manuf_p->manuf == ZORRO_MANUF(dev->id))
goto match_manuf;
manuf_p++;
} while (--i);
/* Couldn't find either the manufacturer nor the product */
sprintf(name, "Zorro device %08x", dev->id);
return;
match_manuf: {
struct zorro_prod_info *prod_p = manuf_p->prods;
int i = manuf_p->nr;
while (i > 0) {
if (prod_p->prod ==
((ZORRO_PROD(dev->id)<<8) | ZORRO_EPC(dev->id)))
goto match_prod;
prod_p++;
i--;
}
/* Ok, found the manufacturer, but unknown product */
sprintf(name, "Zorro device %08x (%s)", dev->id, manuf_p->name);
return;
/* Full match */
match_prod: {
char *n = name + sprintf(name, "%s %s", manuf_p->name, prod_p->name);
int nr = prod_p->seen + 1;
prod_p->seen = nr;
if (nr > 1)
sprintf(n, " (#%d)", nr);
}
}
}
#else
void __init zorro_name_device(struct zorro_dev *dev)
{
}
#endif
| gpl-2.0 |
showp1984/bricked-flo | arch/x86/math-emu/poly_atan.c | 14420 | 6375 | /*---------------------------------------------------------------------------+
| poly_atan.c |
| |
| Compute the arctan of a FPU_REG, using a polynomial approximation. |
| |
| Copyright (C) 1992,1993,1994,1997 |
| W. Metzenthen, 22 Parker St, Ormond, Vic 3163, Australia |
| E-mail billm@suburbia.net |
| |
| |
+---------------------------------------------------------------------------*/
#include "exception.h"
#include "reg_constant.h"
#include "fpu_emu.h"
#include "fpu_system.h"
#include "status_w.h"
#include "control_w.h"
#include "poly.h"
#define HIPOWERon 6 /* odd poly, negative terms */
static const unsigned long long oddnegterms[HIPOWERon] = {
0x0000000000000000LL, /* Dummy (not for - 1.0) */
0x015328437f756467LL,
0x0005dda27b73dec6LL,
0x0000226bf2bfb91aLL,
0x000000ccc439c5f7LL,
0x0000000355438407LL
};
#define HIPOWERop 6 /* odd poly, positive terms */
static const unsigned long long oddplterms[HIPOWERop] = {
/* 0xaaaaaaaaaaaaaaabLL, transferred to fixedpterm[] */
0x0db55a71875c9ac2LL,
0x0029fce2d67880b0LL,
0x0000dfd3908b4596LL,
0x00000550fd61dab4LL,
0x0000001c9422b3f9LL,
0x000000003e3301e1LL
};
static const unsigned long long denomterm = 0xebd9b842c5c53a0eLL;
static const Xsig fixedpterm = MK_XSIG(0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa);
static const Xsig pi_signif = MK_XSIG(0xc90fdaa2, 0x2168c234, 0xc4c6628b);
/*--- poly_atan() -----------------------------------------------------------+
| |
+---------------------------------------------------------------------------*/
void poly_atan(FPU_REG *st0_ptr, u_char st0_tag,
FPU_REG *st1_ptr, u_char st1_tag)
{
u_char transformed, inverted, sign1, sign2;
int exponent;
long int dummy_exp;
Xsig accumulator, Numer, Denom, accumulatore, argSignif, argSq, argSqSq;
u_char tag;
sign1 = getsign(st0_ptr);
sign2 = getsign(st1_ptr);
if (st0_tag == TAG_Valid) {
exponent = exponent(st0_ptr);
} else {
/* This gives non-compatible stack contents... */
FPU_to_exp16(st0_ptr, st0_ptr);
exponent = exponent16(st0_ptr);
}
if (st1_tag == TAG_Valid) {
exponent -= exponent(st1_ptr);
} else {
/* This gives non-compatible stack contents... */
FPU_to_exp16(st1_ptr, st1_ptr);
exponent -= exponent16(st1_ptr);
}
if ((exponent < 0) || ((exponent == 0) &&
((st0_ptr->sigh < st1_ptr->sigh) ||
((st0_ptr->sigh == st1_ptr->sigh) &&
(st0_ptr->sigl < st1_ptr->sigl))))) {
inverted = 1;
Numer.lsw = Denom.lsw = 0;
XSIG_LL(Numer) = significand(st0_ptr);
XSIG_LL(Denom) = significand(st1_ptr);
} else {
inverted = 0;
exponent = -exponent;
Numer.lsw = Denom.lsw = 0;
XSIG_LL(Numer) = significand(st1_ptr);
XSIG_LL(Denom) = significand(st0_ptr);
}
div_Xsig(&Numer, &Denom, &argSignif);
exponent += norm_Xsig(&argSignif);
if ((exponent >= -1)
|| ((exponent == -2) && (argSignif.msw > 0xd413ccd0))) {
/* The argument is greater than sqrt(2)-1 (=0.414213562...) */
/* Convert the argument by an identity for atan */
transformed = 1;
if (exponent >= 0) {
#ifdef PARANOID
if (!((exponent == 0) &&
(argSignif.lsw == 0) && (argSignif.midw == 0) &&
(argSignif.msw == 0x80000000))) {
EXCEPTION(EX_INTERNAL | 0x104); /* There must be a logic error */
return;
}
#endif /* PARANOID */
argSignif.msw = 0; /* Make the transformed arg -> 0.0 */
} else {
Numer.lsw = Denom.lsw = argSignif.lsw;
XSIG_LL(Numer) = XSIG_LL(Denom) = XSIG_LL(argSignif);
if (exponent < -1)
shr_Xsig(&Numer, -1 - exponent);
negate_Xsig(&Numer);
shr_Xsig(&Denom, -exponent);
Denom.msw |= 0x80000000;
div_Xsig(&Numer, &Denom, &argSignif);
exponent = -1 + norm_Xsig(&argSignif);
}
} else {
transformed = 0;
}
argSq.lsw = argSignif.lsw;
argSq.midw = argSignif.midw;
argSq.msw = argSignif.msw;
mul_Xsig_Xsig(&argSq, &argSq);
argSqSq.lsw = argSq.lsw;
argSqSq.midw = argSq.midw;
argSqSq.msw = argSq.msw;
mul_Xsig_Xsig(&argSqSq, &argSqSq);
accumulatore.lsw = argSq.lsw;
XSIG_LL(accumulatore) = XSIG_LL(argSq);
shr_Xsig(&argSq, 2 * (-1 - exponent - 1));
shr_Xsig(&argSqSq, 4 * (-1 - exponent - 1));
/* Now have argSq etc with binary point at the left
.1xxxxxxxx */
/* Do the basic fixed point polynomial evaluation */
accumulator.msw = accumulator.midw = accumulator.lsw = 0;
polynomial_Xsig(&accumulator, &XSIG_LL(argSqSq),
oddplterms, HIPOWERop - 1);
mul64_Xsig(&accumulator, &XSIG_LL(argSq));
negate_Xsig(&accumulator);
polynomial_Xsig(&accumulator, &XSIG_LL(argSqSq), oddnegterms,
HIPOWERon - 1);
negate_Xsig(&accumulator);
add_two_Xsig(&accumulator, &fixedpterm, &dummy_exp);
mul64_Xsig(&accumulatore, &denomterm);
shr_Xsig(&accumulatore, 1 + 2 * (-1 - exponent));
accumulatore.msw |= 0x80000000;
div_Xsig(&accumulator, &accumulatore, &accumulator);
mul_Xsig_Xsig(&accumulator, &argSignif);
mul_Xsig_Xsig(&accumulator, &argSq);
shr_Xsig(&accumulator, 3);
negate_Xsig(&accumulator);
add_Xsig_Xsig(&accumulator, &argSignif);
if (transformed) {
/* compute pi/4 - accumulator */
shr_Xsig(&accumulator, -1 - exponent);
negate_Xsig(&accumulator);
add_Xsig_Xsig(&accumulator, &pi_signif);
exponent = -1;
}
if (inverted) {
/* compute pi/2 - accumulator */
shr_Xsig(&accumulator, -exponent);
negate_Xsig(&accumulator);
add_Xsig_Xsig(&accumulator, &pi_signif);
exponent = 0;
}
if (sign1) {
/* compute pi - accumulator */
shr_Xsig(&accumulator, 1 - exponent);
negate_Xsig(&accumulator);
add_Xsig_Xsig(&accumulator, &pi_signif);
exponent = 1;
}
exponent += round_Xsig(&accumulator);
significand(st1_ptr) = XSIG_LL(accumulator);
setexponent16(st1_ptr, exponent);
tag = FPU_round(st1_ptr, 1, 0, FULL_PRECISION, sign2);
FPU_settagi(1, tag);
set_precision_flag_up(); /* We do not really know if up or down,
use this as the default. */
}
| gpl-2.0 |
Sudokamikaze/XKernel-grouper | arch/arm/mach-omap2/smartreflex.c | 341 | 28581 | /*
* OMAP SmartReflex Voltage Control
*
* Author: Thara Gopinath <thara@ti.com>
*
* Copyright (C) 2010 Texas Instruments, Inc.
* Thara Gopinath <thara@ti.com>
*
* Copyright (C) 2008 Nokia Corporation
* Kalle Jokiniemi
*
* Copyright (C) 2007 Texas Instruments, Inc.
* Lesly A M <x0080970@ti.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/interrupt.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/pm_runtime.h>
#include <plat/common.h>
#include "pm.h"
#include "smartreflex.h"
#define SMARTREFLEX_NAME_LEN 16
#define NVALUE_NAME_LEN 40
#define SR_DISABLE_TIMEOUT 200
struct omap_sr {
int srid;
int ip_type;
int nvalue_count;
bool autocomp_active;
u32 clk_length;
u32 err_weight;
u32 err_minlimit;
u32 err_maxlimit;
u32 accum_data;
u32 senn_avgweight;
u32 senp_avgweight;
u32 senp_mod;
u32 senn_mod;
unsigned int irq;
void __iomem *base;
struct platform_device *pdev;
struct list_head node;
struct omap_sr_nvalue_table *nvalue_table;
struct voltagedomain *voltdm;
struct dentry *dbg_dir;
};
/* sr_list contains all the instances of smartreflex module */
static LIST_HEAD(sr_list);
static struct omap_sr_class_data *sr_class;
static struct omap_sr_pmic_data *sr_pmic_data;
static inline void sr_write_reg(struct omap_sr *sr, unsigned offset, u32 value)
{
__raw_writel(value, (sr->base + offset));
}
static inline void sr_modify_reg(struct omap_sr *sr, unsigned offset, u32 mask,
u32 value)
{
u32 reg_val;
u32 errconfig_offs = 0, errconfig_mask = 0;
reg_val = __raw_readl(sr->base + offset);
reg_val &= ~mask;
/*
* Smartreflex error config register is special as it contains
* certain status bits which if written a 1 into means a clear
* of those bits. So in order to make sure no accidental write of
* 1 happens to those status bits, do a clear of them in the read
* value. This mean this API doesn't rewrite values in these bits
* if they are currently set, but does allow the caller to write
* those bits.
*/
if (sr->ip_type == SR_TYPE_V1) {
errconfig_offs = ERRCONFIG_V1;
errconfig_mask = ERRCONFIG_STATUS_V1_MASK;
} else if (sr->ip_type == SR_TYPE_V2) {
errconfig_offs = ERRCONFIG_V2;
errconfig_mask = ERRCONFIG_VPBOUNDINTST_V2;
}
if (offset == errconfig_offs)
reg_val &= ~errconfig_mask;
reg_val |= value;
__raw_writel(reg_val, (sr->base + offset));
}
static inline u32 sr_read_reg(struct omap_sr *sr, unsigned offset)
{
return __raw_readl(sr->base + offset);
}
static struct omap_sr *_sr_lookup(struct voltagedomain *voltdm)
{
struct omap_sr *sr_info;
if (!voltdm) {
pr_err("%s: Null voltage domain passed!\n", __func__);
return ERR_PTR(-EINVAL);
}
list_for_each_entry(sr_info, &sr_list, node) {
if (voltdm == sr_info->voltdm)
return sr_info;
}
return ERR_PTR(-ENODATA);
}
static irqreturn_t sr_interrupt(int irq, void *data)
{
struct omap_sr *sr_info = (struct omap_sr *)data;
u32 status = 0;
if (sr_info->ip_type == SR_TYPE_V1) {
/* Read the status bits */
status = sr_read_reg(sr_info, ERRCONFIG_V1);
/* Clear them by writing back */
sr_write_reg(sr_info, ERRCONFIG_V1, status);
} else if (sr_info->ip_type == SR_TYPE_V2) {
/* Read the status bits */
status = sr_read_reg(sr_info, IRQSTATUS);
/* Clear them by writing back */
sr_write_reg(sr_info, IRQSTATUS, status);
}
if (sr_class->notify)
sr_class->notify(sr_info->voltdm, status);
return IRQ_HANDLED;
}
static void sr_set_clk_length(struct omap_sr *sr)
{
struct clk *sys_ck;
u32 sys_clk_speed;
if (cpu_is_omap34xx())
sys_ck = clk_get(NULL, "sys_ck");
else
sys_ck = clk_get(NULL, "sys_clkin_ck");
if (IS_ERR(sys_ck)) {
dev_err(&sr->pdev->dev, "%s: unable to get sys clk\n",
__func__);
return;
}
sys_clk_speed = clk_get_rate(sys_ck);
clk_put(sys_ck);
switch (sys_clk_speed) {
case 12000000:
sr->clk_length = SRCLKLENGTH_12MHZ_SYSCLK;
break;
case 13000000:
sr->clk_length = SRCLKLENGTH_13MHZ_SYSCLK;
break;
case 19200000:
sr->clk_length = SRCLKLENGTH_19MHZ_SYSCLK;
break;
case 26000000:
sr->clk_length = SRCLKLENGTH_26MHZ_SYSCLK;
break;
case 38400000:
sr->clk_length = SRCLKLENGTH_38MHZ_SYSCLK;
break;
default:
dev_err(&sr->pdev->dev, "%s: Invalid sysclk value: %d\n",
__func__, sys_clk_speed);
break;
}
}
static void sr_set_regfields(struct omap_sr *sr)
{
/*
* For time being these values are defined in smartreflex.h
* and populated during init. May be they can be moved to board
* file or pmic specific data structure. In that case these structure
* fields will have to be populated using the pdata or pmic structure.
*/
if (cpu_is_omap34xx() || cpu_is_omap44xx()) {
sr->err_weight = OMAP3430_SR_ERRWEIGHT;
sr->err_maxlimit = OMAP3430_SR_ERRMAXLIMIT;
sr->accum_data = OMAP3430_SR_ACCUMDATA;
if (!(strcmp(sr->voltdm->name, "mpu"))) {
sr->senn_avgweight = OMAP3430_SR1_SENNAVGWEIGHT;
sr->senp_avgweight = OMAP3430_SR1_SENPAVGWEIGHT;
} else {
sr->senn_avgweight = OMAP3430_SR2_SENNAVGWEIGHT;
sr->senp_avgweight = OMAP3430_SR2_SENPAVGWEIGHT;
}
}
}
static void sr_start_vddautocomp(struct omap_sr *sr)
{
if (!sr_class || !(sr_class->enable) || !(sr_class->configure)) {
dev_warn(&sr->pdev->dev,
"%s: smartreflex class driver not registered\n",
__func__);
return;
}
if (!sr_class->enable(sr->voltdm))
sr->autocomp_active = true;
}
static void sr_stop_vddautocomp(struct omap_sr *sr)
{
if (!sr_class || !(sr_class->disable)) {
dev_warn(&sr->pdev->dev,
"%s: smartreflex class driver not registered\n",
__func__);
return;
}
if (sr->autocomp_active) {
sr_class->disable(sr->voltdm, 1);
sr->autocomp_active = false;
}
}
/*
* This function handles the intializations which have to be done
* only when both sr device and class driver regiter has
* completed. This will be attempted to be called from both sr class
* driver register and sr device intializtion API's. Only one call
* will ultimately succeed.
*
* Currently this function registers interrrupt handler for a particular SR
* if smartreflex class driver is already registered and has
* requested for interrupts and the SR interrupt line in present.
*/
static int sr_late_init(struct omap_sr *sr_info)
{
char *name;
struct omap_sr_data *pdata = sr_info->pdev->dev.platform_data;
struct resource *mem;
int ret = 0;
if (sr_class->notify && sr_class->notify_flags && sr_info->irq) {
name = kasprintf(GFP_KERNEL, "sr_%s", sr_info->voltdm->name);
if (name == NULL) {
ret = -ENOMEM;
goto error;
}
ret = request_irq(sr_info->irq, sr_interrupt,
0, name, (void *)sr_info);
if (ret)
goto error;
disable_irq(sr_info->irq);
}
if (pdata && pdata->enable_on_init)
sr_start_vddautocomp(sr_info);
return ret;
error:
iounmap(sr_info->base);
mem = platform_get_resource(sr_info->pdev, IORESOURCE_MEM, 0);
release_mem_region(mem->start, resource_size(mem));
list_del(&sr_info->node);
dev_err(&sr_info->pdev->dev, "%s: ERROR in registering"
"interrupt handler. Smartreflex will"
"not function as desired\n", __func__);
kfree(name);
kfree(sr_info);
return ret;
}
static void sr_v1_disable(struct omap_sr *sr)
{
int timeout = 0;
/* Enable MCUDisableAcknowledge interrupt */
sr_modify_reg(sr, ERRCONFIG_V1,
ERRCONFIG_MCUDISACKINTEN, ERRCONFIG_MCUDISACKINTEN);
/* SRCONFIG - disable SR */
sr_modify_reg(sr, SRCONFIG, SRCONFIG_SRENABLE, 0x0);
/* Disable all other SR interrupts and clear the status */
sr_modify_reg(sr, ERRCONFIG_V1,
(ERRCONFIG_MCUACCUMINTEN | ERRCONFIG_MCUVALIDINTEN |
ERRCONFIG_MCUBOUNDINTEN | ERRCONFIG_VPBOUNDINTEN_V1),
(ERRCONFIG_MCUACCUMINTST | ERRCONFIG_MCUVALIDINTST |
ERRCONFIG_MCUBOUNDINTST |
ERRCONFIG_VPBOUNDINTST_V1));
/*
* Wait for SR to be disabled.
* wait until ERRCONFIG.MCUDISACKINTST = 1. Typical latency is 1us.
*/
omap_test_timeout((sr_read_reg(sr, ERRCONFIG_V1) &
ERRCONFIG_MCUDISACKINTST), SR_DISABLE_TIMEOUT,
timeout);
if (timeout >= SR_DISABLE_TIMEOUT)
dev_warn(&sr->pdev->dev, "%s: Smartreflex disable timedout\n",
__func__);
/* Disable MCUDisableAcknowledge interrupt & clear pending interrupt */
sr_modify_reg(sr, ERRCONFIG_V1, ERRCONFIG_MCUDISACKINTEN,
ERRCONFIG_MCUDISACKINTST);
}
static void sr_v2_disable(struct omap_sr *sr)
{
int timeout = 0;
/* Enable MCUDisableAcknowledge interrupt */
sr_write_reg(sr, IRQENABLE_SET, IRQENABLE_MCUDISABLEACKINT);
/* SRCONFIG - disable SR */
sr_modify_reg(sr, SRCONFIG, SRCONFIG_SRENABLE, 0x0);
/* Disable all other SR interrupts and clear the status */
sr_modify_reg(sr, ERRCONFIG_V2, ERRCONFIG_VPBOUNDINTEN_V2,
ERRCONFIG_VPBOUNDINTST_V2);
sr_write_reg(sr, IRQENABLE_CLR, (IRQENABLE_MCUACCUMINT |
IRQENABLE_MCUVALIDINT |
IRQENABLE_MCUBOUNDSINT));
sr_write_reg(sr, IRQSTATUS, (IRQSTATUS_MCUACCUMINT |
IRQSTATUS_MCVALIDINT |
IRQSTATUS_MCBOUNDSINT));
/*
* Wait for SR to be disabled.
* wait until IRQSTATUS.MCUDISACKINTST = 1. Typical latency is 1us.
*/
omap_test_timeout((sr_read_reg(sr, IRQSTATUS) &
IRQSTATUS_MCUDISABLEACKINT), SR_DISABLE_TIMEOUT,
timeout);
if (timeout >= SR_DISABLE_TIMEOUT)
dev_warn(&sr->pdev->dev, "%s: Smartreflex disable timedout\n",
__func__);
/* Disable MCUDisableAcknowledge interrupt & clear pending interrupt */
sr_write_reg(sr, IRQENABLE_CLR, IRQENABLE_MCUDISABLEACKINT);
sr_write_reg(sr, IRQSTATUS, IRQSTATUS_MCUDISABLEACKINT);
}
static u32 sr_retrieve_nvalue(struct omap_sr *sr, u32 efuse_offs)
{
int i;
if (!sr->nvalue_table) {
dev_warn(&sr->pdev->dev, "%s: Missing ntarget value table\n",
__func__);
return 0;
}
for (i = 0; i < sr->nvalue_count; i++) {
if (sr->nvalue_table[i].efuse_offs == efuse_offs)
return sr->nvalue_table[i].nvalue;
}
return 0;
}
/* Public Functions */
/**
* sr_configure_errgen() - Configures the smrtreflex to perform AVS using the
* error generator module.
* @voltdm: VDD pointer to which the SR module to be configured belongs to.
*
* This API is to be called from the smartreflex class driver to
* configure the error generator module inside the smartreflex module.
* SR settings if using the ERROR module inside Smartreflex.
* SR CLASS 3 by default uses only the ERROR module where as
* SR CLASS 2 can choose between ERROR module and MINMAXAVG
* module. Returns 0 on success and error value in case of failure.
*/
int sr_configure_errgen(struct voltagedomain *voltdm)
{
u32 sr_config, sr_errconfig, errconfig_offs, vpboundint_en;
u32 vpboundint_st, senp_en = 0, senn_en = 0;
u8 senp_shift, senn_shift;
struct omap_sr *sr = _sr_lookup(voltdm);
if (IS_ERR(sr)) {
pr_warning("%s: omap_sr struct for sr_%s not found\n",
__func__, voltdm->name);
return -EINVAL;
}
if (!sr->clk_length)
sr_set_clk_length(sr);
senp_en = sr->senp_mod;
senn_en = sr->senn_mod;
sr_config = (sr->clk_length << SRCONFIG_SRCLKLENGTH_SHIFT) |
SRCONFIG_SENENABLE | SRCONFIG_ERRGEN_EN;
if (sr->ip_type == SR_TYPE_V1) {
sr_config |= SRCONFIG_DELAYCTRL;
senn_shift = SRCONFIG_SENNENABLE_V1_SHIFT;
senp_shift = SRCONFIG_SENPENABLE_V1_SHIFT;
errconfig_offs = ERRCONFIG_V1;
vpboundint_en = ERRCONFIG_VPBOUNDINTEN_V1;
vpboundint_st = ERRCONFIG_VPBOUNDINTST_V1;
} else if (sr->ip_type == SR_TYPE_V2) {
senn_shift = SRCONFIG_SENNENABLE_V2_SHIFT;
senp_shift = SRCONFIG_SENPENABLE_V2_SHIFT;
errconfig_offs = ERRCONFIG_V2;
vpboundint_en = ERRCONFIG_VPBOUNDINTEN_V2;
vpboundint_st = ERRCONFIG_VPBOUNDINTST_V2;
} else {
dev_err(&sr->pdev->dev, "%s: Trying to Configure smartreflex"
"module without specifying the ip\n", __func__);
return -EINVAL;
}
sr_config |= ((senn_en << senn_shift) | (senp_en << senp_shift));
sr_write_reg(sr, SRCONFIG, sr_config);
sr_errconfig = (sr->err_weight << ERRCONFIG_ERRWEIGHT_SHIFT) |
(sr->err_maxlimit << ERRCONFIG_ERRMAXLIMIT_SHIFT) |
(sr->err_minlimit << ERRCONFIG_ERRMINLIMIT_SHIFT);
sr_modify_reg(sr, errconfig_offs, (SR_ERRWEIGHT_MASK |
SR_ERRMAXLIMIT_MASK | SR_ERRMINLIMIT_MASK),
sr_errconfig);
/* Enabling the interrupts if the ERROR module is used */
sr_modify_reg(sr, errconfig_offs,
vpboundint_en, (vpboundint_en | vpboundint_st));
return 0;
}
/**
* sr_configure_minmax() - Configures the smrtreflex to perform AVS using the
* minmaxavg module.
* @voltdm: VDD pointer to which the SR module to be configured belongs to.
*
* This API is to be called from the smartreflex class driver to
* configure the minmaxavg module inside the smartreflex module.
* SR settings if using the ERROR module inside Smartreflex.
* SR CLASS 3 by default uses only the ERROR module where as
* SR CLASS 2 can choose between ERROR module and MINMAXAVG
* module. Returns 0 on success and error value in case of failure.
*/
int sr_configure_minmax(struct voltagedomain *voltdm)
{
u32 sr_config, sr_avgwt;
u32 senp_en = 0, senn_en = 0;
u8 senp_shift, senn_shift;
struct omap_sr *sr = _sr_lookup(voltdm);
if (IS_ERR(sr)) {
pr_warning("%s: omap_sr struct for sr_%s not found\n",
__func__, voltdm->name);
return -EINVAL;
}
if (!sr->clk_length)
sr_set_clk_length(sr);
senp_en = sr->senp_mod;
senn_en = sr->senn_mod;
sr_config = (sr->clk_length << SRCONFIG_SRCLKLENGTH_SHIFT) |
SRCONFIG_SENENABLE |
(sr->accum_data << SRCONFIG_ACCUMDATA_SHIFT);
if (sr->ip_type == SR_TYPE_V1) {
sr_config |= SRCONFIG_DELAYCTRL;
senn_shift = SRCONFIG_SENNENABLE_V1_SHIFT;
senp_shift = SRCONFIG_SENPENABLE_V1_SHIFT;
} else if (sr->ip_type == SR_TYPE_V2) {
senn_shift = SRCONFIG_SENNENABLE_V2_SHIFT;
senp_shift = SRCONFIG_SENPENABLE_V2_SHIFT;
} else {
dev_err(&sr->pdev->dev, "%s: Trying to Configure smartreflex"
"module without specifying the ip\n", __func__);
return -EINVAL;
}
sr_config |= ((senn_en << senn_shift) | (senp_en << senp_shift));
sr_write_reg(sr, SRCONFIG, sr_config);
sr_avgwt = (sr->senp_avgweight << AVGWEIGHT_SENPAVGWEIGHT_SHIFT) |
(sr->senn_avgweight << AVGWEIGHT_SENNAVGWEIGHT_SHIFT);
sr_write_reg(sr, AVGWEIGHT, sr_avgwt);
/*
* Enabling the interrupts if MINMAXAVG module is used.
* TODO: check if all the interrupts are mandatory
*/
if (sr->ip_type == SR_TYPE_V1) {
sr_modify_reg(sr, ERRCONFIG_V1,
(ERRCONFIG_MCUACCUMINTEN | ERRCONFIG_MCUVALIDINTEN |
ERRCONFIG_MCUBOUNDINTEN),
(ERRCONFIG_MCUACCUMINTEN | ERRCONFIG_MCUACCUMINTST |
ERRCONFIG_MCUVALIDINTEN | ERRCONFIG_MCUVALIDINTST |
ERRCONFIG_MCUBOUNDINTEN | ERRCONFIG_MCUBOUNDINTST));
} else if (sr->ip_type == SR_TYPE_V2) {
sr_write_reg(sr, IRQSTATUS,
IRQSTATUS_MCUACCUMINT | IRQSTATUS_MCVALIDINT |
IRQSTATUS_MCBOUNDSINT | IRQSTATUS_MCUDISABLEACKINT);
sr_write_reg(sr, IRQENABLE_SET,
IRQENABLE_MCUACCUMINT | IRQENABLE_MCUVALIDINT |
IRQENABLE_MCUBOUNDSINT | IRQENABLE_MCUDISABLEACKINT);
}
return 0;
}
/**
* sr_enable() - Enables the smartreflex module.
* @voltdm: VDD pointer to which the SR module to be configured belongs to.
* @volt: The voltage at which the Voltage domain associated with
* the smartreflex module is operating at.
* This is required only to program the correct Ntarget value.
*
* This API is to be called from the smartreflex class driver to
* enable a smartreflex module. Returns 0 on success. Returns error
* value if the voltage passed is wrong or if ntarget value is wrong.
*/
int sr_enable(struct voltagedomain *voltdm, unsigned long volt)
{
u32 nvalue_reciprocal;
struct omap_volt_data *volt_data;
struct omap_sr *sr = _sr_lookup(voltdm);
int ret;
if (IS_ERR(sr)) {
pr_warning("%s: omap_sr struct for sr_%s not found\n",
__func__, voltdm->name);
return -EINVAL;
}
volt_data = omap_voltage_get_voltdata(sr->voltdm, volt);
if (IS_ERR(volt_data)) {
dev_warn(&sr->pdev->dev, "%s: Unable to get voltage table"
"for nominal voltage %ld\n", __func__, volt);
return -ENODATA;
}
nvalue_reciprocal = sr_retrieve_nvalue(sr, volt_data->sr_efuse_offs);
if (!nvalue_reciprocal) {
dev_warn(&sr->pdev->dev, "%s: NVALUE = 0 at voltage %ld\n",
__func__, volt);
return -ENODATA;
}
/* errminlimit is opp dependent and hence linked to voltage */
sr->err_minlimit = volt_data->sr_errminlimit;
pm_runtime_get_sync(&sr->pdev->dev);
/* Check if SR is already enabled. If yes do nothing */
if (sr_read_reg(sr, SRCONFIG) & SRCONFIG_SRENABLE)
return 0;
/* Configure SR */
ret = sr_class->configure(voltdm);
if (ret)
return ret;
sr_write_reg(sr, NVALUERECIPROCAL, nvalue_reciprocal);
/* SRCONFIG - enable SR */
sr_modify_reg(sr, SRCONFIG, SRCONFIG_SRENABLE, SRCONFIG_SRENABLE);
return 0;
}
/**
* sr_disable() - Disables the smartreflex module.
* @voltdm: VDD pointer to which the SR module to be configured belongs to.
*
* This API is to be called from the smartreflex class driver to
* disable a smartreflex module.
*/
void sr_disable(struct voltagedomain *voltdm)
{
struct omap_sr *sr = _sr_lookup(voltdm);
if (IS_ERR(sr)) {
pr_warning("%s: omap_sr struct for sr_%s not found\n",
__func__, voltdm->name);
return;
}
/* Check if SR clocks are already disabled. If yes do nothing */
if (pm_runtime_suspended(&sr->pdev->dev))
return;
/*
* Disable SR if only it is indeed enabled. Else just
* disable the clocks.
*/
if (sr_read_reg(sr, SRCONFIG) & SRCONFIG_SRENABLE) {
if (sr->ip_type == SR_TYPE_V1)
sr_v1_disable(sr);
else if (sr->ip_type == SR_TYPE_V2)
sr_v2_disable(sr);
}
pm_runtime_put_sync_suspend(&sr->pdev->dev);
}
/**
* sr_register_class() - API to register a smartreflex class parameters.
* @class_data: The structure containing various sr class specific data.
*
* This API is to be called by the smartreflex class driver to register itself
* with the smartreflex driver during init. Returns 0 on success else the
* error value.
*/
int sr_register_class(struct omap_sr_class_data *class_data)
{
struct omap_sr *sr_info;
if (!class_data) {
pr_warning("%s:, Smartreflex class data passed is NULL\n",
__func__);
return -EINVAL;
}
if (sr_class) {
pr_warning("%s: Smartreflex class driver already registered\n",
__func__);
return -EBUSY;
}
sr_class = class_data;
/*
* Call into late init to do intializations that require
* both sr driver and sr class driver to be initiallized.
*/
list_for_each_entry(sr_info, &sr_list, node)
sr_late_init(sr_info);
return 0;
}
/**
* omap_sr_enable() - API to enable SR clocks and to call into the
* registered smartreflex class enable API.
* @voltdm: VDD pointer to which the SR module to be configured belongs to.
*
* This API is to be called from the kernel in order to enable
* a particular smartreflex module. This API will do the initial
* configurations to turn on the smartreflex module and in turn call
* into the registered smartreflex class enable API.
*/
void omap_sr_enable(struct voltagedomain *voltdm)
{
struct omap_sr *sr = _sr_lookup(voltdm);
if (IS_ERR(sr)) {
pr_warning("%s: omap_sr struct for sr_%s not found\n",
__func__, voltdm->name);
return;
}
if (!sr->autocomp_active)
return;
if (!sr_class || !(sr_class->enable) || !(sr_class->configure)) {
dev_warn(&sr->pdev->dev, "%s: smartreflex class driver not"
"registered\n", __func__);
return;
}
sr_class->enable(voltdm);
}
/**
* omap_sr_disable() - API to disable SR without resetting the voltage
* processor voltage
* @voltdm: VDD pointer to which the SR module to be configured belongs to.
*
* This API is to be called from the kernel in order to disable
* a particular smartreflex module. This API will in turn call
* into the registered smartreflex class disable API. This API will tell
* the smartreflex class disable not to reset the VP voltage after
* disabling smartreflex.
*/
void omap_sr_disable(struct voltagedomain *voltdm)
{
struct omap_sr *sr = _sr_lookup(voltdm);
if (IS_ERR(sr)) {
pr_warning("%s: omap_sr struct for sr_%s not found\n",
__func__, voltdm->name);
return;
}
if (!sr->autocomp_active)
return;
if (!sr_class || !(sr_class->disable)) {
dev_warn(&sr->pdev->dev, "%s: smartreflex class driver not"
"registered\n", __func__);
return;
}
sr_class->disable(voltdm, 0);
}
/**
* omap_sr_disable_reset_volt() - API to disable SR and reset the
* voltage processor voltage
* @voltdm: VDD pointer to which the SR module to be configured belongs to.
*
* This API is to be called from the kernel in order to disable
* a particular smartreflex module. This API will in turn call
* into the registered smartreflex class disable API. This API will tell
* the smartreflex class disable to reset the VP voltage after
* disabling smartreflex.
*/
void omap_sr_disable_reset_volt(struct voltagedomain *voltdm)
{
struct omap_sr *sr = _sr_lookup(voltdm);
if (IS_ERR(sr)) {
pr_warning("%s: omap_sr struct for sr_%s not found\n",
__func__, voltdm->name);
return;
}
if (!sr->autocomp_active)
return;
if (!sr_class || !(sr_class->disable)) {
dev_warn(&sr->pdev->dev, "%s: smartreflex class driver not"
"registered\n", __func__);
return;
}
sr_class->disable(voltdm, 1);
}
/**
* omap_sr_register_pmic() - API to register pmic specific info.
* @pmic_data: The structure containing pmic specific data.
*
* This API is to be called from the PMIC specific code to register with
* smartreflex driver pmic specific info. Currently the only info required
* is the smartreflex init on the PMIC side.
*/
void omap_sr_register_pmic(struct omap_sr_pmic_data *pmic_data)
{
if (!pmic_data) {
pr_warning("%s: Trying to register NULL PMIC data structure"
"with smartreflex\n", __func__);
return;
}
sr_pmic_data = pmic_data;
}
/* PM Debug Fs enteries to enable disable smartreflex. */
static int omap_sr_autocomp_show(void *data, u64 *val)
{
struct omap_sr *sr_info = (struct omap_sr *) data;
if (!sr_info) {
pr_warning("%s: omap_sr struct not found\n", __func__);
return -EINVAL;
}
*val = sr_info->autocomp_active;
return 0;
}
static int omap_sr_autocomp_store(void *data, u64 val)
{
struct omap_sr *sr_info = (struct omap_sr *) data;
if (!sr_info) {
pr_warning("%s: omap_sr struct not found\n", __func__);
return -EINVAL;
}
/* Sanity check */
if (val && (val != 1)) {
pr_warning("%s: Invalid argument %lld\n", __func__, val);
return -EINVAL;
}
/* control enable/disable only if there is a delta in value */
if (sr_info->autocomp_active != val) {
if (!val)
sr_stop_vddautocomp(sr_info);
else
sr_start_vddautocomp(sr_info);
}
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(pm_sr_fops, omap_sr_autocomp_show,
omap_sr_autocomp_store, "%llu\n");
static int __init omap_sr_probe(struct platform_device *pdev)
{
struct omap_sr *sr_info = kzalloc(sizeof(struct omap_sr), GFP_KERNEL);
struct omap_sr_data *pdata = pdev->dev.platform_data;
struct resource *mem, *irq;
struct dentry *vdd_dbg_dir, *nvalue_dir;
struct omap_volt_data *volt_data;
int i, ret = 0;
if (!sr_info) {
dev_err(&pdev->dev, "%s: unable to allocate sr_info\n",
__func__);
return -ENOMEM;
}
if (!pdata) {
dev_err(&pdev->dev, "%s: platform data missing\n", __func__);
ret = -EINVAL;
goto err_free_devinfo;
}
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!mem) {
dev_err(&pdev->dev, "%s: no mem resource\n", __func__);
ret = -ENODEV;
goto err_free_devinfo;
}
mem = request_mem_region(mem->start, resource_size(mem),
dev_name(&pdev->dev));
if (!mem) {
dev_err(&pdev->dev, "%s: no mem region\n", __func__);
ret = -EBUSY;
goto err_free_devinfo;
}
irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
pm_runtime_enable(&pdev->dev);
pm_runtime_irq_safe(&pdev->dev);
sr_info->pdev = pdev;
sr_info->srid = pdev->id;
sr_info->voltdm = pdata->voltdm;
sr_info->nvalue_table = pdata->nvalue_table;
sr_info->nvalue_count = pdata->nvalue_count;
sr_info->senn_mod = pdata->senn_mod;
sr_info->senp_mod = pdata->senp_mod;
sr_info->autocomp_active = false;
sr_info->ip_type = pdata->ip_type;
sr_info->base = ioremap(mem->start, resource_size(mem));
if (!sr_info->base) {
dev_err(&pdev->dev, "%s: ioremap fail\n", __func__);
ret = -ENOMEM;
goto err_release_region;
}
if (irq)
sr_info->irq = irq->start;
sr_set_clk_length(sr_info);
sr_set_regfields(sr_info);
list_add(&sr_info->node, &sr_list);
/*
* Call into late init to do intializations that require
* both sr driver and sr class driver to be initiallized.
*/
if (sr_class) {
ret = sr_late_init(sr_info);
if (ret) {
pr_warning("%s: Error in SR late init\n", __func__);
return ret;
}
}
dev_info(&pdev->dev, "%s: SmartReflex driver initialized\n", __func__);
/*
* If the voltage domain debugfs directory is not created, do
* not try to create rest of the debugfs entries.
*/
vdd_dbg_dir = omap_voltage_get_dbgdir(sr_info->voltdm);
if (!vdd_dbg_dir) {
ret = -EINVAL;
goto err_iounmap;
}
sr_info->dbg_dir = debugfs_create_dir("smartreflex", vdd_dbg_dir);
if (IS_ERR(sr_info->dbg_dir)) {
dev_err(&pdev->dev, "%s: Unable to create debugfs directory\n",
__func__);
ret = PTR_ERR(sr_info->dbg_dir);
goto err_iounmap;
}
(void) debugfs_create_file("autocomp", S_IRUGO | S_IWUSR,
sr_info->dbg_dir, (void *)sr_info, &pm_sr_fops);
(void) debugfs_create_x32("errweight", S_IRUGO, sr_info->dbg_dir,
&sr_info->err_weight);
(void) debugfs_create_x32("errmaxlimit", S_IRUGO, sr_info->dbg_dir,
&sr_info->err_maxlimit);
(void) debugfs_create_x32("errminlimit", S_IRUGO, sr_info->dbg_dir,
&sr_info->err_minlimit);
nvalue_dir = debugfs_create_dir("nvalue", sr_info->dbg_dir);
if (IS_ERR(nvalue_dir)) {
dev_err(&pdev->dev, "%s: Unable to create debugfs directory"
"for n-values\n", __func__);
ret = PTR_ERR(nvalue_dir);
goto err_debugfs;
}
omap_voltage_get_volttable(sr_info->voltdm, &volt_data);
if (!volt_data) {
dev_warn(&pdev->dev, "%s: No Voltage table for the"
" corresponding vdd vdd_%s. Cannot create debugfs"
"entries for n-values\n",
__func__, sr_info->voltdm->name);
ret = -ENODATA;
goto err_debugfs;
}
for (i = 0; i < sr_info->nvalue_count; i++) {
char name[NVALUE_NAME_LEN + 1];
snprintf(name, sizeof(name), "volt_%d",
volt_data[i].volt_nominal);
(void) debugfs_create_x32(name, S_IRUGO | S_IWUSR, nvalue_dir,
&(sr_info->nvalue_table[i].nvalue));
}
return ret;
err_debugfs:
debugfs_remove_recursive(sr_info->dbg_dir);
err_iounmap:
list_del(&sr_info->node);
iounmap(sr_info->base);
err_release_region:
release_mem_region(mem->start, resource_size(mem));
err_free_devinfo:
kfree(sr_info);
return ret;
}
static int __devexit omap_sr_remove(struct platform_device *pdev)
{
struct omap_sr_data *pdata = pdev->dev.platform_data;
struct omap_sr *sr_info;
struct resource *mem;
if (!pdata) {
dev_err(&pdev->dev, "%s: platform data missing\n", __func__);
return -EINVAL;
}
sr_info = _sr_lookup(pdata->voltdm);
if (IS_ERR(sr_info)) {
dev_warn(&pdev->dev, "%s: omap_sr struct not found\n",
__func__);
return -EINVAL;
}
if (sr_info->autocomp_active)
sr_stop_vddautocomp(sr_info);
if (sr_info->dbg_dir)
debugfs_remove_recursive(sr_info->dbg_dir);
list_del(&sr_info->node);
iounmap(sr_info->base);
kfree(sr_info);
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(mem->start, resource_size(mem));
return 0;
}
static struct platform_driver smartreflex_driver = {
.remove = omap_sr_remove,
.driver = {
.name = "smartreflex",
},
};
static int __init sr_init(void)
{
int ret = 0;
/*
* sr_init is a late init. If by then a pmic specific API is not
* registered either there is no need for anything to be done on
* the PMIC side or somebody has forgotten to register a PMIC
* handler. Warn for the second condition.
*/
if (sr_pmic_data && sr_pmic_data->sr_pmic_init)
sr_pmic_data->sr_pmic_init();
else
pr_warning("%s: No PMIC hook to init smartreflex\n", __func__);
ret = platform_driver_probe(&smartreflex_driver, omap_sr_probe);
if (ret) {
pr_err("%s: platform driver register failed for SR\n",
__func__);
return ret;
}
return 0;
}
static void __exit sr_exit(void)
{
platform_driver_unregister(&smartreflex_driver);
}
late_initcall(sr_init);
module_exit(sr_exit);
MODULE_DESCRIPTION("OMAP Smartreflex Driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" DRIVER_NAME);
MODULE_AUTHOR("Texas Instruments Inc");
| gpl-2.0 |
Rlasalle15/android_kernel_moto_shamu | net/rfkill/rfkill-gpio.c | 2133 | 6213 | /*
* Copyright (c) 2011, NVIDIA Corporation.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <linux/gpio.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/rfkill.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/slab.h>
#include <linux/rfkill-gpio.h>
enum rfkill_gpio_clk_state {
UNSPECIFIED = 0,
PWR_ENABLED,
PWR_DISABLED
};
#define PWR_CLK_SET(_RF, _EN) \
((_RF)->pwr_clk_enabled = (!(_EN) ? PWR_ENABLED : PWR_DISABLED))
#define PWR_CLK_ENABLED(_RF) ((_RF)->pwr_clk_enabled == PWR_ENABLED)
#define PWR_CLK_DISABLED(_RF) ((_RF)->pwr_clk_enabled != PWR_ENABLED)
struct rfkill_gpio_data {
struct rfkill_gpio_platform_data *pdata;
struct rfkill *rfkill_dev;
char *reset_name;
char *shutdown_name;
enum rfkill_gpio_clk_state pwr_clk_enabled;
struct clk *pwr_clk;
};
static int rfkill_gpio_set_power(void *data, bool blocked)
{
struct rfkill_gpio_data *rfkill = data;
if (blocked) {
if (gpio_is_valid(rfkill->pdata->shutdown_gpio))
gpio_direction_output(rfkill->pdata->shutdown_gpio, 0);
if (gpio_is_valid(rfkill->pdata->reset_gpio))
gpio_direction_output(rfkill->pdata->reset_gpio, 0);
if (rfkill->pwr_clk && PWR_CLK_ENABLED(rfkill))
clk_disable(rfkill->pwr_clk);
} else {
if (rfkill->pwr_clk && PWR_CLK_DISABLED(rfkill))
clk_enable(rfkill->pwr_clk);
if (gpio_is_valid(rfkill->pdata->reset_gpio))
gpio_direction_output(rfkill->pdata->reset_gpio, 1);
if (gpio_is_valid(rfkill->pdata->shutdown_gpio))
gpio_direction_output(rfkill->pdata->shutdown_gpio, 1);
}
if (rfkill->pwr_clk)
PWR_CLK_SET(rfkill, blocked);
return 0;
}
static const struct rfkill_ops rfkill_gpio_ops = {
.set_block = rfkill_gpio_set_power,
};
static int rfkill_gpio_probe(struct platform_device *pdev)
{
struct rfkill_gpio_data *rfkill;
struct rfkill_gpio_platform_data *pdata = pdev->dev.platform_data;
int ret = 0;
int len = 0;
if (!pdata) {
pr_warn("%s: No platform data specified\n", __func__);
return -EINVAL;
}
/* make sure at-least one of the GPIO is defined and that
* a name is specified for this instance */
if (!pdata->name || (!gpio_is_valid(pdata->reset_gpio) &&
!gpio_is_valid(pdata->shutdown_gpio))) {
pr_warn("%s: invalid platform data\n", __func__);
return -EINVAL;
}
rfkill = kzalloc(sizeof(*rfkill), GFP_KERNEL);
if (!rfkill)
return -ENOMEM;
if (pdata->gpio_runtime_setup) {
ret = pdata->gpio_runtime_setup(pdev);
if (ret) {
pr_warn("%s: can't set up gpio\n", __func__);
goto fail_alloc;
}
}
rfkill->pdata = pdata;
len = strlen(pdata->name);
rfkill->reset_name = kzalloc(len + 7, GFP_KERNEL);
if (!rfkill->reset_name) {
ret = -ENOMEM;
goto fail_alloc;
}
rfkill->shutdown_name = kzalloc(len + 10, GFP_KERNEL);
if (!rfkill->shutdown_name) {
ret = -ENOMEM;
goto fail_reset_name;
}
snprintf(rfkill->reset_name, len + 6 , "%s_reset", pdata->name);
snprintf(rfkill->shutdown_name, len + 9, "%s_shutdown", pdata->name);
if (pdata->power_clk_name) {
rfkill->pwr_clk = clk_get(&pdev->dev, pdata->power_clk_name);
if (IS_ERR(rfkill->pwr_clk)) {
pr_warn("%s: can't find pwr_clk.\n", __func__);
ret = PTR_ERR(rfkill->pwr_clk);
goto fail_shutdown_name;
}
}
if (gpio_is_valid(pdata->reset_gpio)) {
ret = gpio_request(pdata->reset_gpio, rfkill->reset_name);
if (ret) {
pr_warn("%s: failed to get reset gpio.\n", __func__);
goto fail_clock;
}
}
if (gpio_is_valid(pdata->shutdown_gpio)) {
ret = gpio_request(pdata->shutdown_gpio, rfkill->shutdown_name);
if (ret) {
pr_warn("%s: failed to get shutdown gpio.\n", __func__);
goto fail_reset;
}
}
rfkill->rfkill_dev = rfkill_alloc(pdata->name, &pdev->dev, pdata->type,
&rfkill_gpio_ops, rfkill);
if (!rfkill->rfkill_dev) {
ret = -ENOMEM;
goto fail_shutdown;
}
ret = rfkill_register(rfkill->rfkill_dev);
if (ret < 0)
goto fail_rfkill;
platform_set_drvdata(pdev, rfkill);
dev_info(&pdev->dev, "%s device registered.\n", pdata->name);
return 0;
fail_rfkill:
rfkill_destroy(rfkill->rfkill_dev);
fail_shutdown:
if (gpio_is_valid(pdata->shutdown_gpio))
gpio_free(pdata->shutdown_gpio);
fail_reset:
if (gpio_is_valid(pdata->reset_gpio))
gpio_free(pdata->reset_gpio);
fail_clock:
if (rfkill->pwr_clk)
clk_put(rfkill->pwr_clk);
fail_shutdown_name:
kfree(rfkill->shutdown_name);
fail_reset_name:
kfree(rfkill->reset_name);
fail_alloc:
kfree(rfkill);
return ret;
}
static int rfkill_gpio_remove(struct platform_device *pdev)
{
struct rfkill_gpio_data *rfkill = platform_get_drvdata(pdev);
struct rfkill_gpio_platform_data *pdata = pdev->dev.platform_data;
if (pdata->gpio_runtime_close)
pdata->gpio_runtime_close(pdev);
rfkill_unregister(rfkill->rfkill_dev);
rfkill_destroy(rfkill->rfkill_dev);
if (gpio_is_valid(rfkill->pdata->shutdown_gpio))
gpio_free(rfkill->pdata->shutdown_gpio);
if (gpio_is_valid(rfkill->pdata->reset_gpio))
gpio_free(rfkill->pdata->reset_gpio);
if (rfkill->pwr_clk && PWR_CLK_ENABLED(rfkill))
clk_disable(rfkill->pwr_clk);
if (rfkill->pwr_clk)
clk_put(rfkill->pwr_clk);
kfree(rfkill->shutdown_name);
kfree(rfkill->reset_name);
kfree(rfkill);
return 0;
}
static struct platform_driver rfkill_gpio_driver = {
.probe = rfkill_gpio_probe,
.remove = rfkill_gpio_remove,
.driver = {
.name = "rfkill_gpio",
.owner = THIS_MODULE,
},
};
module_platform_driver(rfkill_gpio_driver);
MODULE_DESCRIPTION("gpio rfkill");
MODULE_AUTHOR("NVIDIA");
MODULE_LICENSE("GPL");
| gpl-2.0 |
puppies/fl2440 | linux-3.10.33/sound/soc/codecs/wm8955.c | 2133 | 28803 | /*
* wm8955.c -- WM8955 ALSA SoC Audio driver
*
* Copyright 2009 Wolfson Microelectronics plc
*
* Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/pm.h>
#include <linux/i2c.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
#include <sound/initval.h>
#include <sound/tlv.h>
#include <sound/wm8955.h>
#include "wm8955.h"
#define WM8955_NUM_SUPPLIES 4
static const char *wm8955_supply_names[WM8955_NUM_SUPPLIES] = {
"DCVDD",
"DBVDD",
"HPVDD",
"AVDD",
};
/* codec private data */
struct wm8955_priv {
struct regmap *regmap;
unsigned int mclk_rate;
int deemph;
int fs;
struct regulator_bulk_data supplies[WM8955_NUM_SUPPLIES];
};
static const struct reg_default wm8955_reg_defaults[] = {
{ 2, 0x0079 }, /* R2 - LOUT1 volume */
{ 3, 0x0079 }, /* R3 - ROUT1 volume */
{ 5, 0x0008 }, /* R5 - DAC Control */
{ 7, 0x000A }, /* R7 - Audio Interface */
{ 8, 0x0000 }, /* R8 - Sample Rate */
{ 10, 0x00FF }, /* R10 - Left DAC volume */
{ 11, 0x00FF }, /* R11 - Right DAC volume */
{ 12, 0x000F }, /* R12 - Bass control */
{ 13, 0x000F }, /* R13 - Treble control */
{ 23, 0x00C1 }, /* R23 - Additional control (1) */
{ 24, 0x0000 }, /* R24 - Additional control (2) */
{ 25, 0x0000 }, /* R25 - Power Management (1) */
{ 26, 0x0000 }, /* R26 - Power Management (2) */
{ 27, 0x0000 }, /* R27 - Additional Control (3) */
{ 34, 0x0050 }, /* R34 - Left out Mix (1) */
{ 35, 0x0050 }, /* R35 - Left out Mix (2) */
{ 36, 0x0050 }, /* R36 - Right out Mix (1) */
{ 37, 0x0050 }, /* R37 - Right Out Mix (2) */
{ 38, 0x0050 }, /* R38 - Mono out Mix (1) */
{ 39, 0x0050 }, /* R39 - Mono out Mix (2) */
{ 40, 0x0079 }, /* R40 - LOUT2 volume */
{ 41, 0x0079 }, /* R41 - ROUT2 volume */
{ 42, 0x0079 }, /* R42 - MONOOUT volume */
{ 43, 0x0000 }, /* R43 - Clocking / PLL */
{ 44, 0x0103 }, /* R44 - PLL Control 1 */
{ 45, 0x0024 }, /* R45 - PLL Control 2 */
{ 46, 0x01BA }, /* R46 - PLL Control 3 */
{ 59, 0x0000 }, /* R59 - PLL Control 4 */
};
static bool wm8955_writeable(struct device *dev, unsigned int reg)
{
switch (reg) {
case WM8955_LOUT1_VOLUME:
case WM8955_ROUT1_VOLUME:
case WM8955_DAC_CONTROL:
case WM8955_AUDIO_INTERFACE:
case WM8955_SAMPLE_RATE:
case WM8955_LEFT_DAC_VOLUME:
case WM8955_RIGHT_DAC_VOLUME:
case WM8955_BASS_CONTROL:
case WM8955_TREBLE_CONTROL:
case WM8955_RESET:
case WM8955_ADDITIONAL_CONTROL_1:
case WM8955_ADDITIONAL_CONTROL_2:
case WM8955_POWER_MANAGEMENT_1:
case WM8955_POWER_MANAGEMENT_2:
case WM8955_ADDITIONAL_CONTROL_3:
case WM8955_LEFT_OUT_MIX_1:
case WM8955_LEFT_OUT_MIX_2:
case WM8955_RIGHT_OUT_MIX_1:
case WM8955_RIGHT_OUT_MIX_2:
case WM8955_MONO_OUT_MIX_1:
case WM8955_MONO_OUT_MIX_2:
case WM8955_LOUT2_VOLUME:
case WM8955_ROUT2_VOLUME:
case WM8955_MONOOUT_VOLUME:
case WM8955_CLOCKING_PLL:
case WM8955_PLL_CONTROL_1:
case WM8955_PLL_CONTROL_2:
case WM8955_PLL_CONTROL_3:
case WM8955_PLL_CONTROL_4:
return true;
default:
return false;
}
}
static bool wm8955_volatile(struct device *dev, unsigned int reg)
{
switch (reg) {
case WM8955_RESET:
return true;
default:
return false;
}
}
static int wm8955_reset(struct snd_soc_codec *codec)
{
return snd_soc_write(codec, WM8955_RESET, 0);
}
struct pll_factors {
int n;
int k;
int outdiv;
};
/* The size in bits of the FLL divide multiplied by 10
* to allow rounding later */
#define FIXED_FLL_SIZE ((1 << 22) * 10)
static int wm8995_pll_factors(struct device *dev,
int Fref, int Fout, struct pll_factors *pll)
{
u64 Kpart;
unsigned int K, Ndiv, Nmod, target;
dev_dbg(dev, "Fref=%u Fout=%u\n", Fref, Fout);
/* The oscilator should run at should be 90-100MHz, and
* there's a divide by 4 plus an optional divide by 2 in the
* output path to generate the system clock. The clock table
* is sortd so we should always generate a suitable target. */
target = Fout * 4;
if (target < 90000000) {
pll->outdiv = 1;
target *= 2;
} else {
pll->outdiv = 0;
}
WARN_ON(target < 90000000 || target > 100000000);
dev_dbg(dev, "Fvco=%dHz\n", target);
/* Now, calculate N.K */
Ndiv = target / Fref;
pll->n = Ndiv;
Nmod = target % Fref;
dev_dbg(dev, "Nmod=%d\n", Nmod);
/* Calculate fractional part - scale up so we can round. */
Kpart = FIXED_FLL_SIZE * (long long)Nmod;
do_div(Kpart, Fref);
K = Kpart & 0xFFFFFFFF;
if ((K % 10) >= 5)
K += 5;
/* Move down to proper range now rounding is done */
pll->k = K / 10;
dev_dbg(dev, "N=%x K=%x OUTDIV=%x\n", pll->n, pll->k, pll->outdiv);
return 0;
}
/* Lookup table specifying SRATE (table 25 in datasheet); some of the
* output frequencies have been rounded to the standard frequencies
* they are intended to match where the error is slight. */
static struct {
int mclk;
int fs;
int usb;
int sr;
} clock_cfgs[] = {
{ 18432000, 8000, 0, 3, },
{ 18432000, 12000, 0, 9, },
{ 18432000, 16000, 0, 11, },
{ 18432000, 24000, 0, 29, },
{ 18432000, 32000, 0, 13, },
{ 18432000, 48000, 0, 1, },
{ 18432000, 96000, 0, 15, },
{ 16934400, 8018, 0, 19, },
{ 16934400, 11025, 0, 25, },
{ 16934400, 22050, 0, 27, },
{ 16934400, 44100, 0, 17, },
{ 16934400, 88200, 0, 31, },
{ 12000000, 8000, 1, 2, },
{ 12000000, 11025, 1, 25, },
{ 12000000, 12000, 1, 8, },
{ 12000000, 16000, 1, 10, },
{ 12000000, 22050, 1, 27, },
{ 12000000, 24000, 1, 28, },
{ 12000000, 32000, 1, 12, },
{ 12000000, 44100, 1, 17, },
{ 12000000, 48000, 1, 0, },
{ 12000000, 88200, 1, 31, },
{ 12000000, 96000, 1, 14, },
{ 12288000, 8000, 0, 2, },
{ 12288000, 12000, 0, 8, },
{ 12288000, 16000, 0, 10, },
{ 12288000, 24000, 0, 28, },
{ 12288000, 32000, 0, 12, },
{ 12288000, 48000, 0, 0, },
{ 12288000, 96000, 0, 14, },
{ 12289600, 8018, 0, 18, },
{ 12289600, 11025, 0, 24, },
{ 12289600, 22050, 0, 26, },
{ 11289600, 44100, 0, 16, },
{ 11289600, 88200, 0, 31, },
};
static int wm8955_configure_clocking(struct snd_soc_codec *codec)
{
struct wm8955_priv *wm8955 = snd_soc_codec_get_drvdata(codec);
int i, ret, val;
int clocking = 0;
int srate = 0;
int sr = -1;
struct pll_factors pll;
/* If we're not running a sample rate currently just pick one */
if (wm8955->fs == 0)
wm8955->fs = 8000;
/* Can we generate an exact output? */
for (i = 0; i < ARRAY_SIZE(clock_cfgs); i++) {
if (wm8955->fs != clock_cfgs[i].fs)
continue;
sr = i;
if (wm8955->mclk_rate == clock_cfgs[i].mclk)
break;
}
/* We should never get here with an unsupported sample rate */
if (sr == -1) {
dev_err(codec->dev, "Sample rate %dHz unsupported\n",
wm8955->fs);
WARN_ON(sr == -1);
return -EINVAL;
}
if (i == ARRAY_SIZE(clock_cfgs)) {
/* If we can't generate the right clock from MCLK then
* we should configure the PLL to supply us with an
* appropriate clock.
*/
clocking |= WM8955_MCLKSEL;
/* Use the last divider configuration we saw for the
* sample rate. */
ret = wm8995_pll_factors(codec->dev, wm8955->mclk_rate,
clock_cfgs[sr].mclk, &pll);
if (ret != 0) {
dev_err(codec->dev,
"Unable to generate %dHz from %dHz MCLK\n",
wm8955->fs, wm8955->mclk_rate);
return -EINVAL;
}
snd_soc_update_bits(codec, WM8955_PLL_CONTROL_1,
WM8955_N_MASK | WM8955_K_21_18_MASK,
(pll.n << WM8955_N_SHIFT) |
pll.k >> 18);
snd_soc_update_bits(codec, WM8955_PLL_CONTROL_2,
WM8955_K_17_9_MASK,
(pll.k >> 9) & WM8955_K_17_9_MASK);
snd_soc_update_bits(codec, WM8955_PLL_CONTROL_2,
WM8955_K_8_0_MASK,
pll.k & WM8955_K_8_0_MASK);
if (pll.k)
snd_soc_update_bits(codec, WM8955_PLL_CONTROL_4,
WM8955_KEN, WM8955_KEN);
else
snd_soc_update_bits(codec, WM8955_PLL_CONTROL_4,
WM8955_KEN, 0);
if (pll.outdiv)
val = WM8955_PLL_RB | WM8955_PLLOUTDIV2;
else
val = WM8955_PLL_RB;
/* Now start the PLL running */
snd_soc_update_bits(codec, WM8955_CLOCKING_PLL,
WM8955_PLL_RB | WM8955_PLLOUTDIV2, val);
snd_soc_update_bits(codec, WM8955_CLOCKING_PLL,
WM8955_PLLEN, WM8955_PLLEN);
}
srate = clock_cfgs[sr].usb | (clock_cfgs[sr].sr << WM8955_SR_SHIFT);
snd_soc_update_bits(codec, WM8955_SAMPLE_RATE,
WM8955_USB | WM8955_SR_MASK, srate);
snd_soc_update_bits(codec, WM8955_CLOCKING_PLL,
WM8955_MCLKSEL, clocking);
return 0;
}
static int wm8955_sysclk(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
struct snd_soc_codec *codec = w->codec;
int ret = 0;
/* Always disable the clocks - if we're doing reconfiguration this
* avoids misclocking.
*/
snd_soc_update_bits(codec, WM8955_POWER_MANAGEMENT_1,
WM8955_DIGENB, 0);
snd_soc_update_bits(codec, WM8955_CLOCKING_PLL,
WM8955_PLL_RB | WM8955_PLLEN, 0);
switch (event) {
case SND_SOC_DAPM_POST_PMD:
break;
case SND_SOC_DAPM_PRE_PMU:
ret = wm8955_configure_clocking(codec);
break;
default:
ret = -EINVAL;
break;
}
return ret;
}
static int deemph_settings[] = { 0, 32000, 44100, 48000 };
static int wm8955_set_deemph(struct snd_soc_codec *codec)
{
struct wm8955_priv *wm8955 = snd_soc_codec_get_drvdata(codec);
int val, i, best;
/* If we're using deemphasis select the nearest available sample
* rate.
*/
if (wm8955->deemph) {
best = 1;
for (i = 2; i < ARRAY_SIZE(deemph_settings); i++) {
if (abs(deemph_settings[i] - wm8955->fs) <
abs(deemph_settings[best] - wm8955->fs))
best = i;
}
val = best << WM8955_DEEMPH_SHIFT;
} else {
val = 0;
}
dev_dbg(codec->dev, "Set deemphasis %d\n", val);
return snd_soc_update_bits(codec, WM8955_DAC_CONTROL,
WM8955_DEEMPH_MASK, val);
}
static int wm8955_get_deemph(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
struct wm8955_priv *wm8955 = snd_soc_codec_get_drvdata(codec);
ucontrol->value.enumerated.item[0] = wm8955->deemph;
return 0;
}
static int wm8955_put_deemph(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
struct wm8955_priv *wm8955 = snd_soc_codec_get_drvdata(codec);
int deemph = ucontrol->value.enumerated.item[0];
if (deemph > 1)
return -EINVAL;
wm8955->deemph = deemph;
return wm8955_set_deemph(codec);
}
static const char *bass_mode_text[] = {
"Linear", "Adaptive",
};
static const struct soc_enum bass_mode =
SOC_ENUM_SINGLE(WM8955_BASS_CONTROL, 7, 2, bass_mode_text);
static const char *bass_cutoff_text[] = {
"Low", "High"
};
static const struct soc_enum bass_cutoff =
SOC_ENUM_SINGLE(WM8955_BASS_CONTROL, 6, 2, bass_cutoff_text);
static const char *treble_cutoff_text[] = {
"High", "Low"
};
static const struct soc_enum treble_cutoff =
SOC_ENUM_SINGLE(WM8955_TREBLE_CONTROL, 6, 2, treble_cutoff_text);
static const DECLARE_TLV_DB_SCALE(digital_tlv, -12750, 50, 1);
static const DECLARE_TLV_DB_SCALE(atten_tlv, -600, 600, 0);
static const DECLARE_TLV_DB_SCALE(bypass_tlv, -1500, 300, 0);
static const DECLARE_TLV_DB_SCALE(mono_tlv, -2100, 300, 0);
static const DECLARE_TLV_DB_SCALE(out_tlv, -12100, 100, 1);
static const DECLARE_TLV_DB_SCALE(treble_tlv, -1200, 150, 1);
static const struct snd_kcontrol_new wm8955_snd_controls[] = {
SOC_DOUBLE_R_TLV("Digital Playback Volume", WM8955_LEFT_DAC_VOLUME,
WM8955_RIGHT_DAC_VOLUME, 0, 255, 0, digital_tlv),
SOC_SINGLE_TLV("Playback Attenuation Volume", WM8955_DAC_CONTROL, 7, 1, 1,
atten_tlv),
SOC_SINGLE_BOOL_EXT("DAC Deemphasis Switch", 0,
wm8955_get_deemph, wm8955_put_deemph),
SOC_ENUM("Bass Mode", bass_mode),
SOC_ENUM("Bass Cutoff", bass_cutoff),
SOC_SINGLE("Bass Volume", WM8955_BASS_CONTROL, 0, 15, 1),
SOC_ENUM("Treble Cutoff", treble_cutoff),
SOC_SINGLE_TLV("Treble Volume", WM8955_TREBLE_CONTROL, 0, 14, 1, treble_tlv),
SOC_SINGLE_TLV("Left Bypass Volume", WM8955_LEFT_OUT_MIX_1, 4, 7, 1,
bypass_tlv),
SOC_SINGLE_TLV("Left Mono Volume", WM8955_LEFT_OUT_MIX_2, 4, 7, 1,
bypass_tlv),
SOC_SINGLE_TLV("Right Mono Volume", WM8955_RIGHT_OUT_MIX_1, 4, 7, 1,
bypass_tlv),
SOC_SINGLE_TLV("Right Bypass Volume", WM8955_RIGHT_OUT_MIX_2, 4, 7, 1,
bypass_tlv),
/* Not a stereo pair so they line up with the DAPM switches */
SOC_SINGLE_TLV("Mono Left Bypass Volume", WM8955_MONO_OUT_MIX_1, 4, 7, 1,
mono_tlv),
SOC_SINGLE_TLV("Mono Right Bypass Volume", WM8955_MONO_OUT_MIX_2, 4, 7, 1,
mono_tlv),
SOC_DOUBLE_R_TLV("Headphone Volume", WM8955_LOUT1_VOLUME,
WM8955_ROUT1_VOLUME, 0, 127, 0, out_tlv),
SOC_DOUBLE_R("Headphone ZC Switch", WM8955_LOUT1_VOLUME,
WM8955_ROUT1_VOLUME, 7, 1, 0),
SOC_DOUBLE_R_TLV("Speaker Volume", WM8955_LOUT2_VOLUME,
WM8955_ROUT2_VOLUME, 0, 127, 0, out_tlv),
SOC_DOUBLE_R("Speaker ZC Switch", WM8955_LOUT2_VOLUME,
WM8955_ROUT2_VOLUME, 7, 1, 0),
SOC_SINGLE_TLV("Mono Volume", WM8955_MONOOUT_VOLUME, 0, 127, 0, out_tlv),
SOC_SINGLE("Mono ZC Switch", WM8955_MONOOUT_VOLUME, 7, 1, 0),
};
static const struct snd_kcontrol_new lmixer[] = {
SOC_DAPM_SINGLE("Playback Switch", WM8955_LEFT_OUT_MIX_1, 8, 1, 0),
SOC_DAPM_SINGLE("Bypass Switch", WM8955_LEFT_OUT_MIX_1, 7, 1, 0),
SOC_DAPM_SINGLE("Right Playback Switch", WM8955_LEFT_OUT_MIX_2, 8, 1, 0),
SOC_DAPM_SINGLE("Mono Switch", WM8955_LEFT_OUT_MIX_2, 7, 1, 0),
};
static const struct snd_kcontrol_new rmixer[] = {
SOC_DAPM_SINGLE("Left Playback Switch", WM8955_RIGHT_OUT_MIX_1, 8, 1, 0),
SOC_DAPM_SINGLE("Mono Switch", WM8955_RIGHT_OUT_MIX_1, 7, 1, 0),
SOC_DAPM_SINGLE("Playback Switch", WM8955_RIGHT_OUT_MIX_2, 8, 1, 0),
SOC_DAPM_SINGLE("Bypass Switch", WM8955_RIGHT_OUT_MIX_2, 7, 1, 0),
};
static const struct snd_kcontrol_new mmixer[] = {
SOC_DAPM_SINGLE("Left Playback Switch", WM8955_MONO_OUT_MIX_1, 8, 1, 0),
SOC_DAPM_SINGLE("Left Bypass Switch", WM8955_MONO_OUT_MIX_1, 7, 1, 0),
SOC_DAPM_SINGLE("Right Playback Switch", WM8955_MONO_OUT_MIX_2, 8, 1, 0),
SOC_DAPM_SINGLE("Right Bypass Switch", WM8955_MONO_OUT_MIX_2, 7, 1, 0),
};
static const struct snd_soc_dapm_widget wm8955_dapm_widgets[] = {
SND_SOC_DAPM_INPUT("MONOIN-"),
SND_SOC_DAPM_INPUT("MONOIN+"),
SND_SOC_DAPM_INPUT("LINEINR"),
SND_SOC_DAPM_INPUT("LINEINL"),
SND_SOC_DAPM_PGA("Mono Input", SND_SOC_NOPM, 0, 0, NULL, 0),
SND_SOC_DAPM_SUPPLY("SYSCLK", WM8955_POWER_MANAGEMENT_1, 0, 1, wm8955_sysclk,
SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_SUPPLY("TSDEN", WM8955_ADDITIONAL_CONTROL_1, 8, 0, NULL, 0),
SND_SOC_DAPM_DAC("DACL", "Playback", WM8955_POWER_MANAGEMENT_2, 8, 0),
SND_SOC_DAPM_DAC("DACR", "Playback", WM8955_POWER_MANAGEMENT_2, 7, 0),
SND_SOC_DAPM_PGA("LOUT1 PGA", WM8955_POWER_MANAGEMENT_2, 6, 0, NULL, 0),
SND_SOC_DAPM_PGA("ROUT1 PGA", WM8955_POWER_MANAGEMENT_2, 5, 0, NULL, 0),
SND_SOC_DAPM_PGA("LOUT2 PGA", WM8955_POWER_MANAGEMENT_2, 4, 0, NULL, 0),
SND_SOC_DAPM_PGA("ROUT2 PGA", WM8955_POWER_MANAGEMENT_2, 3, 0, NULL, 0),
SND_SOC_DAPM_PGA("MOUT PGA", WM8955_POWER_MANAGEMENT_2, 2, 0, NULL, 0),
SND_SOC_DAPM_PGA("OUT3 PGA", WM8955_POWER_MANAGEMENT_2, 1, 0, NULL, 0),
/* The names are chosen to make the control names nice */
SND_SOC_DAPM_MIXER("Left", SND_SOC_NOPM, 0, 0,
lmixer, ARRAY_SIZE(lmixer)),
SND_SOC_DAPM_MIXER("Right", SND_SOC_NOPM, 0, 0,
rmixer, ARRAY_SIZE(rmixer)),
SND_SOC_DAPM_MIXER("Mono", SND_SOC_NOPM, 0, 0,
mmixer, ARRAY_SIZE(mmixer)),
SND_SOC_DAPM_OUTPUT("LOUT1"),
SND_SOC_DAPM_OUTPUT("ROUT1"),
SND_SOC_DAPM_OUTPUT("LOUT2"),
SND_SOC_DAPM_OUTPUT("ROUT2"),
SND_SOC_DAPM_OUTPUT("MONOOUT"),
SND_SOC_DAPM_OUTPUT("OUT3"),
};
static const struct snd_soc_dapm_route wm8955_dapm_routes[] = {
{ "DACL", NULL, "SYSCLK" },
{ "DACR", NULL, "SYSCLK" },
{ "Mono Input", NULL, "MONOIN-" },
{ "Mono Input", NULL, "MONOIN+" },
{ "Left", "Playback Switch", "DACL" },
{ "Left", "Right Playback Switch", "DACR" },
{ "Left", "Bypass Switch", "LINEINL" },
{ "Left", "Mono Switch", "Mono Input" },
{ "Right", "Playback Switch", "DACR" },
{ "Right", "Left Playback Switch", "DACL" },
{ "Right", "Bypass Switch", "LINEINR" },
{ "Right", "Mono Switch", "Mono Input" },
{ "Mono", "Left Playback Switch", "DACL" },
{ "Mono", "Right Playback Switch", "DACR" },
{ "Mono", "Left Bypass Switch", "LINEINL" },
{ "Mono", "Right Bypass Switch", "LINEINR" },
{ "LOUT1 PGA", NULL, "Left" },
{ "LOUT1", NULL, "TSDEN" },
{ "LOUT1", NULL, "LOUT1 PGA" },
{ "ROUT1 PGA", NULL, "Right" },
{ "ROUT1", NULL, "TSDEN" },
{ "ROUT1", NULL, "ROUT1 PGA" },
{ "LOUT2 PGA", NULL, "Left" },
{ "LOUT2", NULL, "TSDEN" },
{ "LOUT2", NULL, "LOUT2 PGA" },
{ "ROUT2 PGA", NULL, "Right" },
{ "ROUT2", NULL, "TSDEN" },
{ "ROUT2", NULL, "ROUT2 PGA" },
{ "MOUT PGA", NULL, "Mono" },
{ "MONOOUT", NULL, "MOUT PGA" },
/* OUT3 not currently implemented */
{ "OUT3", NULL, "OUT3 PGA" },
};
static int wm8955_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
{
struct snd_soc_codec *codec = dai->codec;
struct wm8955_priv *wm8955 = snd_soc_codec_get_drvdata(codec);
int ret;
int wl;
switch (params_format(params)) {
case SNDRV_PCM_FORMAT_S16_LE:
wl = 0;
break;
case SNDRV_PCM_FORMAT_S20_3LE:
wl = 0x4;
break;
case SNDRV_PCM_FORMAT_S24_LE:
wl = 0x8;
break;
case SNDRV_PCM_FORMAT_S32_LE:
wl = 0xc;
break;
default:
return -EINVAL;
}
snd_soc_update_bits(codec, WM8955_AUDIO_INTERFACE,
WM8955_WL_MASK, wl);
wm8955->fs = params_rate(params);
wm8955_set_deemph(codec);
/* If the chip is clocked then disable the clocks and force a
* reconfiguration, otherwise DAPM will power up the
* clocks for us later. */
ret = snd_soc_read(codec, WM8955_POWER_MANAGEMENT_1);
if (ret < 0)
return ret;
if (ret & WM8955_DIGENB) {
snd_soc_update_bits(codec, WM8955_POWER_MANAGEMENT_1,
WM8955_DIGENB, 0);
snd_soc_update_bits(codec, WM8955_CLOCKING_PLL,
WM8955_PLL_RB | WM8955_PLLEN, 0);
wm8955_configure_clocking(codec);
}
return 0;
}
static int wm8955_set_sysclk(struct snd_soc_dai *dai, int clk_id,
unsigned int freq, int dir)
{
struct snd_soc_codec *codec = dai->codec;
struct wm8955_priv *priv = snd_soc_codec_get_drvdata(codec);
int div;
switch (clk_id) {
case WM8955_CLK_MCLK:
if (freq > 15000000) {
priv->mclk_rate = freq /= 2;
div = WM8955_MCLKDIV2;
} else {
priv->mclk_rate = freq;
div = 0;
}
snd_soc_update_bits(codec, WM8955_SAMPLE_RATE,
WM8955_MCLKDIV2, div);
break;
default:
return -EINVAL;
}
dev_dbg(dai->dev, "Clock source is %d at %uHz\n", clk_id, freq);
return 0;
}
static int wm8955_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
{
struct snd_soc_codec *codec = dai->codec;
u16 aif = 0;
switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
case SND_SOC_DAIFMT_CBS_CFS:
break;
case SND_SOC_DAIFMT_CBM_CFM:
aif |= WM8955_MS;
break;
default:
return -EINVAL;
}
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_DSP_B:
aif |= WM8955_LRP;
case SND_SOC_DAIFMT_DSP_A:
aif |= 0x3;
break;
case SND_SOC_DAIFMT_I2S:
aif |= 0x2;
break;
case SND_SOC_DAIFMT_RIGHT_J:
break;
case SND_SOC_DAIFMT_LEFT_J:
aif |= 0x1;
break;
default:
return -EINVAL;
}
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_DSP_A:
case SND_SOC_DAIFMT_DSP_B:
/* frame inversion not valid for DSP modes */
switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
case SND_SOC_DAIFMT_NB_NF:
break;
case SND_SOC_DAIFMT_IB_NF:
aif |= WM8955_BCLKINV;
break;
default:
return -EINVAL;
}
break;
case SND_SOC_DAIFMT_I2S:
case SND_SOC_DAIFMT_RIGHT_J:
case SND_SOC_DAIFMT_LEFT_J:
switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
case SND_SOC_DAIFMT_NB_NF:
break;
case SND_SOC_DAIFMT_IB_IF:
aif |= WM8955_BCLKINV | WM8955_LRP;
break;
case SND_SOC_DAIFMT_IB_NF:
aif |= WM8955_BCLKINV;
break;
case SND_SOC_DAIFMT_NB_IF:
aif |= WM8955_LRP;
break;
default:
return -EINVAL;
}
break;
default:
return -EINVAL;
}
snd_soc_update_bits(codec, WM8955_AUDIO_INTERFACE,
WM8955_MS | WM8955_FORMAT_MASK | WM8955_BCLKINV |
WM8955_LRP, aif);
return 0;
}
static int wm8955_digital_mute(struct snd_soc_dai *codec_dai, int mute)
{
struct snd_soc_codec *codec = codec_dai->codec;
int val;
if (mute)
val = WM8955_DACMU;
else
val = 0;
snd_soc_update_bits(codec, WM8955_DAC_CONTROL, WM8955_DACMU, val);
return 0;
}
static int wm8955_set_bias_level(struct snd_soc_codec *codec,
enum snd_soc_bias_level level)
{
struct wm8955_priv *wm8955 = snd_soc_codec_get_drvdata(codec);
int ret;
switch (level) {
case SND_SOC_BIAS_ON:
break;
case SND_SOC_BIAS_PREPARE:
/* VMID resistance 2*50k */
snd_soc_update_bits(codec, WM8955_POWER_MANAGEMENT_1,
WM8955_VMIDSEL_MASK,
0x1 << WM8955_VMIDSEL_SHIFT);
/* Default bias current */
snd_soc_update_bits(codec, WM8955_ADDITIONAL_CONTROL_1,
WM8955_VSEL_MASK,
0x2 << WM8955_VSEL_SHIFT);
break;
case SND_SOC_BIAS_STANDBY:
if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
ret = regulator_bulk_enable(ARRAY_SIZE(wm8955->supplies),
wm8955->supplies);
if (ret != 0) {
dev_err(codec->dev,
"Failed to enable supplies: %d\n",
ret);
return ret;
}
regcache_sync(wm8955->regmap);
/* Enable VREF and VMID */
snd_soc_update_bits(codec, WM8955_POWER_MANAGEMENT_1,
WM8955_VREF |
WM8955_VMIDSEL_MASK,
WM8955_VREF |
0x3 << WM8955_VREF_SHIFT);
/* Let VMID ramp */
msleep(500);
/* High resistance VROI to maintain outputs */
snd_soc_update_bits(codec,
WM8955_ADDITIONAL_CONTROL_3,
WM8955_VROI, WM8955_VROI);
}
/* Maintain VMID with 2*250k */
snd_soc_update_bits(codec, WM8955_POWER_MANAGEMENT_1,
WM8955_VMIDSEL_MASK,
0x2 << WM8955_VMIDSEL_SHIFT);
/* Minimum bias current */
snd_soc_update_bits(codec, WM8955_ADDITIONAL_CONTROL_1,
WM8955_VSEL_MASK, 0);
break;
case SND_SOC_BIAS_OFF:
/* Low resistance VROI to help discharge */
snd_soc_update_bits(codec,
WM8955_ADDITIONAL_CONTROL_3,
WM8955_VROI, 0);
/* Turn off VMID and VREF */
snd_soc_update_bits(codec, WM8955_POWER_MANAGEMENT_1,
WM8955_VREF |
WM8955_VMIDSEL_MASK, 0);
regulator_bulk_disable(ARRAY_SIZE(wm8955->supplies),
wm8955->supplies);
break;
}
codec->dapm.bias_level = level;
return 0;
}
#define WM8955_RATES SNDRV_PCM_RATE_8000_96000
#define WM8955_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\
SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE)
static const struct snd_soc_dai_ops wm8955_dai_ops = {
.set_sysclk = wm8955_set_sysclk,
.set_fmt = wm8955_set_fmt,
.hw_params = wm8955_hw_params,
.digital_mute = wm8955_digital_mute,
};
static struct snd_soc_dai_driver wm8955_dai = {
.name = "wm8955-hifi",
.playback = {
.stream_name = "Playback",
.channels_min = 2,
.channels_max = 2,
.rates = WM8955_RATES,
.formats = WM8955_FORMATS,
},
.ops = &wm8955_dai_ops,
};
#ifdef CONFIG_PM
static int wm8955_suspend(struct snd_soc_codec *codec)
{
struct wm8955_priv *wm8955 = snd_soc_codec_get_drvdata(codec);
wm8955_set_bias_level(codec, SND_SOC_BIAS_OFF);
regcache_mark_dirty(wm8955->regmap);
return 0;
}
static int wm8955_resume(struct snd_soc_codec *codec)
{
wm8955_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
return 0;
}
#else
#define wm8955_suspend NULL
#define wm8955_resume NULL
#endif
static int wm8955_probe(struct snd_soc_codec *codec)
{
struct wm8955_priv *wm8955 = snd_soc_codec_get_drvdata(codec);
struct wm8955_pdata *pdata = dev_get_platdata(codec->dev);
int ret, i;
codec->control_data = wm8955->regmap;
ret = snd_soc_codec_set_cache_io(codec, 7, 9, SND_SOC_REGMAP);
if (ret != 0) {
dev_err(codec->dev, "Failed to set cache I/O: %d\n", ret);
return ret;
}
for (i = 0; i < ARRAY_SIZE(wm8955->supplies); i++)
wm8955->supplies[i].supply = wm8955_supply_names[i];
ret = regulator_bulk_get(codec->dev, ARRAY_SIZE(wm8955->supplies),
wm8955->supplies);
if (ret != 0) {
dev_err(codec->dev, "Failed to request supplies: %d\n", ret);
return ret;
}
ret = regulator_bulk_enable(ARRAY_SIZE(wm8955->supplies),
wm8955->supplies);
if (ret != 0) {
dev_err(codec->dev, "Failed to enable supplies: %d\n", ret);
goto err_get;
}
ret = wm8955_reset(codec);
if (ret < 0) {
dev_err(codec->dev, "Failed to issue reset: %d\n", ret);
goto err_enable;
}
/* Change some default settings - latch VU and enable ZC */
snd_soc_update_bits(codec, WM8955_LEFT_DAC_VOLUME,
WM8955_LDVU, WM8955_LDVU);
snd_soc_update_bits(codec, WM8955_RIGHT_DAC_VOLUME,
WM8955_RDVU, WM8955_RDVU);
snd_soc_update_bits(codec, WM8955_LOUT1_VOLUME,
WM8955_LO1VU | WM8955_LO1ZC,
WM8955_LO1VU | WM8955_LO1ZC);
snd_soc_update_bits(codec, WM8955_ROUT1_VOLUME,
WM8955_RO1VU | WM8955_RO1ZC,
WM8955_RO1VU | WM8955_RO1ZC);
snd_soc_update_bits(codec, WM8955_LOUT2_VOLUME,
WM8955_LO2VU | WM8955_LO2ZC,
WM8955_LO2VU | WM8955_LO2ZC);
snd_soc_update_bits(codec, WM8955_ROUT2_VOLUME,
WM8955_RO2VU | WM8955_RO2ZC,
WM8955_RO2VU | WM8955_RO2ZC);
snd_soc_update_bits(codec, WM8955_MONOOUT_VOLUME,
WM8955_MOZC, WM8955_MOZC);
/* Also enable adaptive bass boost by default */
snd_soc_update_bits(codec, WM8955_BASS_CONTROL, WM8955_BB, WM8955_BB);
/* Set platform data values */
if (pdata) {
if (pdata->out2_speaker)
snd_soc_update_bits(codec, WM8955_ADDITIONAL_CONTROL_2,
WM8955_ROUT2INV, WM8955_ROUT2INV);
if (pdata->monoin_diff)
snd_soc_update_bits(codec, WM8955_MONO_OUT_MIX_1,
WM8955_DMEN, WM8955_DMEN);
}
wm8955_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
/* Bias level configuration will have done an extra enable */
regulator_bulk_disable(ARRAY_SIZE(wm8955->supplies), wm8955->supplies);
return 0;
err_enable:
regulator_bulk_disable(ARRAY_SIZE(wm8955->supplies), wm8955->supplies);
err_get:
regulator_bulk_free(ARRAY_SIZE(wm8955->supplies), wm8955->supplies);
return ret;
}
static int wm8955_remove(struct snd_soc_codec *codec)
{
struct wm8955_priv *wm8955 = snd_soc_codec_get_drvdata(codec);
wm8955_set_bias_level(codec, SND_SOC_BIAS_OFF);
regulator_bulk_free(ARRAY_SIZE(wm8955->supplies), wm8955->supplies);
return 0;
}
static struct snd_soc_codec_driver soc_codec_dev_wm8955 = {
.probe = wm8955_probe,
.remove = wm8955_remove,
.suspend = wm8955_suspend,
.resume = wm8955_resume,
.set_bias_level = wm8955_set_bias_level,
.controls = wm8955_snd_controls,
.num_controls = ARRAY_SIZE(wm8955_snd_controls),
.dapm_widgets = wm8955_dapm_widgets,
.num_dapm_widgets = ARRAY_SIZE(wm8955_dapm_widgets),
.dapm_routes = wm8955_dapm_routes,
.num_dapm_routes = ARRAY_SIZE(wm8955_dapm_routes),
};
static const struct regmap_config wm8955_regmap = {
.reg_bits = 7,
.val_bits = 9,
.max_register = WM8955_MAX_REGISTER,
.volatile_reg = wm8955_volatile,
.writeable_reg = wm8955_writeable,
.cache_type = REGCACHE_RBTREE,
.reg_defaults = wm8955_reg_defaults,
.num_reg_defaults = ARRAY_SIZE(wm8955_reg_defaults),
};
static int wm8955_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
struct wm8955_priv *wm8955;
int ret;
wm8955 = devm_kzalloc(&i2c->dev, sizeof(struct wm8955_priv),
GFP_KERNEL);
if (wm8955 == NULL)
return -ENOMEM;
wm8955->regmap = devm_regmap_init_i2c(i2c, &wm8955_regmap);
if (IS_ERR(wm8955->regmap)) {
ret = PTR_ERR(wm8955->regmap);
dev_err(&i2c->dev, "Failed to allocate register map: %d\n",
ret);
return ret;
}
i2c_set_clientdata(i2c, wm8955);
ret = snd_soc_register_codec(&i2c->dev,
&soc_codec_dev_wm8955, &wm8955_dai, 1);
return ret;
}
static int wm8955_i2c_remove(struct i2c_client *client)
{
snd_soc_unregister_codec(&client->dev);
return 0;
}
static const struct i2c_device_id wm8955_i2c_id[] = {
{ "wm8955", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, wm8955_i2c_id);
static struct i2c_driver wm8955_i2c_driver = {
.driver = {
.name = "wm8955",
.owner = THIS_MODULE,
},
.probe = wm8955_i2c_probe,
.remove = wm8955_i2c_remove,
.id_table = wm8955_i2c_id,
};
module_i2c_driver(wm8955_i2c_driver);
MODULE_DESCRIPTION("ASoC WM8955 driver");
MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
MODULE_LICENSE("GPL");
| gpl-2.0 |
damienyong/Kernel-3.0.8 | kernel/net/irda/irlan/irlan_provider.c | 2389 | 11183 | /*********************************************************************
*
* Filename: irlan_provider.c
* Version: 0.9
* Description: IrDA LAN Access Protocol Implementation
* Status: Experimental.
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Sun Aug 31 20:14:37 1997
* Modified at: Sat Oct 30 12:52:10 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
* Sources: skeleton.c by Donald Becker <becker@CESDIS.gsfc.nasa.gov>
* slip.c by Laurence Culhane, <loz@holmes.demon.co.uk>
* Fred N. van Kempen, <waltje@uwalt.nl.mugnet.org>
*
* Copyright (c) 1998-1999 Dag Brattli <dagb@cs.uit.no>,
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
*
* Neither Dag Brattli nor University of Tromsø admit liability nor
* provide warranty for any of this software. This material is
* provided "AS-IS" and at no charge.
*
********************************************************************/
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/init.h>
#include <linux/random.h>
#include <linux/bitops.h>
#include <linux/slab.h>
#include <asm/system.h>
#include <asm/byteorder.h>
#include <net/irda/irda.h>
#include <net/irda/irttp.h>
#include <net/irda/irlmp.h>
#include <net/irda/irias_object.h>
#include <net/irda/iriap.h>
#include <net/irda/timer.h>
#include <net/irda/irlan_common.h>
#include <net/irda/irlan_eth.h>
#include <net/irda/irlan_event.h>
#include <net/irda/irlan_provider.h>
#include <net/irda/irlan_filter.h>
#include <net/irda/irlan_client.h>
static void irlan_provider_connect_indication(void *instance, void *sap,
struct qos_info *qos,
__u32 max_sdu_size,
__u8 max_header_size,
struct sk_buff *skb);
/*
* Function irlan_provider_control_data_indication (handle, skb)
*
* This function gets the data that is received on the control channel
*
*/
static int irlan_provider_data_indication(void *instance, void *sap,
struct sk_buff *skb)
{
struct irlan_cb *self;
__u8 code;
IRDA_DEBUG(4, "%s()\n", __func__ );
self = (struct irlan_cb *) instance;
IRDA_ASSERT(self != NULL, return -1;);
IRDA_ASSERT(self->magic == IRLAN_MAGIC, return -1;);
IRDA_ASSERT(skb != NULL, return -1;);
code = skb->data[0];
switch(code) {
case CMD_GET_PROVIDER_INFO:
IRDA_DEBUG(4, "Got GET_PROVIDER_INFO command!\n");
irlan_do_provider_event(self, IRLAN_GET_INFO_CMD, skb);
break;
case CMD_GET_MEDIA_CHAR:
IRDA_DEBUG(4, "Got GET_MEDIA_CHAR command!\n");
irlan_do_provider_event(self, IRLAN_GET_MEDIA_CMD, skb);
break;
case CMD_OPEN_DATA_CHANNEL:
IRDA_DEBUG(4, "Got OPEN_DATA_CHANNEL command!\n");
irlan_do_provider_event(self, IRLAN_OPEN_DATA_CMD, skb);
break;
case CMD_FILTER_OPERATION:
IRDA_DEBUG(4, "Got FILTER_OPERATION command!\n");
irlan_do_provider_event(self, IRLAN_FILTER_CONFIG_CMD, skb);
break;
case CMD_RECONNECT_DATA_CHAN:
IRDA_DEBUG(2, "%s(), Got RECONNECT_DATA_CHAN command\n", __func__ );
IRDA_DEBUG(2, "%s(), NOT IMPLEMENTED\n", __func__ );
break;
case CMD_CLOSE_DATA_CHAN:
IRDA_DEBUG(2, "Got CLOSE_DATA_CHAN command!\n");
IRDA_DEBUG(2, "%s(), NOT IMPLEMENTED\n", __func__ );
break;
default:
IRDA_DEBUG(2, "%s(), Unknown command!\n", __func__ );
break;
}
return 0;
}
/*
* Function irlan_provider_connect_indication (handle, skb, priv)
*
* Got connection from peer IrLAN client
*
*/
static void irlan_provider_connect_indication(void *instance, void *sap,
struct qos_info *qos,
__u32 max_sdu_size,
__u8 max_header_size,
struct sk_buff *skb)
{
struct irlan_cb *self;
struct tsap_cb *tsap;
IRDA_DEBUG(0, "%s()\n", __func__ );
self = (struct irlan_cb *) instance;
tsap = (struct tsap_cb *) sap;
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
IRDA_ASSERT(tsap == self->provider.tsap_ctrl,return;);
IRDA_ASSERT(self->provider.state == IRLAN_IDLE, return;);
self->provider.max_sdu_size = max_sdu_size;
self->provider.max_header_size = max_header_size;
irlan_do_provider_event(self, IRLAN_CONNECT_INDICATION, NULL);
/*
* If we are in peer mode, the client may not have got the discovery
* indication it needs to make progress. If the client is still in
* IDLE state, we must kick it.
*/
if ((self->provider.access_type == ACCESS_PEER) &&
(self->client.state == IRLAN_IDLE))
{
irlan_client_wakeup(self, self->saddr, self->daddr);
}
}
/*
* Function irlan_provider_connect_response (handle)
*
* Accept incoming connection
*
*/
void irlan_provider_connect_response(struct irlan_cb *self,
struct tsap_cb *tsap)
{
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
/* Just accept */
irttp_connect_response(tsap, IRLAN_MTU, NULL);
}
static void irlan_provider_disconnect_indication(void *instance, void *sap,
LM_REASON reason,
struct sk_buff *userdata)
{
struct irlan_cb *self;
struct tsap_cb *tsap;
IRDA_DEBUG(4, "%s(), reason=%d\n", __func__ , reason);
self = (struct irlan_cb *) instance;
tsap = (struct tsap_cb *) sap;
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
IRDA_ASSERT(tsap != NULL, return;);
IRDA_ASSERT(tsap->magic == TTP_TSAP_MAGIC, return;);
IRDA_ASSERT(tsap == self->provider.tsap_ctrl, return;);
irlan_do_provider_event(self, IRLAN_LMP_DISCONNECT, NULL);
}
/*
* Function irlan_parse_open_data_cmd (self, skb)
*
*
*
*/
int irlan_parse_open_data_cmd(struct irlan_cb *self, struct sk_buff *skb)
{
int ret;
ret = irlan_provider_parse_command(self, CMD_OPEN_DATA_CHANNEL, skb);
/* Open data channel */
irlan_open_data_tsap(self);
return ret;
}
/*
* Function parse_command (skb)
*
* Extract all parameters from received buffer, then feed them to
* check_params for parsing
*
*/
int irlan_provider_parse_command(struct irlan_cb *self, int cmd,
struct sk_buff *skb)
{
__u8 *frame;
__u8 *ptr;
int count;
__u16 val_len;
int i;
char *name;
char *value;
int ret = RSP_SUCCESS;
IRDA_ASSERT(skb != NULL, return -RSP_PROTOCOL_ERROR;);
IRDA_DEBUG(4, "%s(), skb->len=%d\n", __func__ , (int)skb->len);
IRDA_ASSERT(self != NULL, return -RSP_PROTOCOL_ERROR;);
IRDA_ASSERT(self->magic == IRLAN_MAGIC, return -RSP_PROTOCOL_ERROR;);
if (!skb)
return -RSP_PROTOCOL_ERROR;
frame = skb->data;
name = kmalloc(255, GFP_ATOMIC);
if (!name)
return -RSP_INSUFFICIENT_RESOURCES;
value = kmalloc(1016, GFP_ATOMIC);
if (!value) {
kfree(name);
return -RSP_INSUFFICIENT_RESOURCES;
}
/* How many parameters? */
count = frame[1];
IRDA_DEBUG(4, "Got %d parameters\n", count);
ptr = frame+2;
/* For all parameters */
for (i=0; i<count;i++) {
ret = irlan_extract_param(ptr, name, value, &val_len);
if (ret < 0) {
IRDA_DEBUG(2, "%s(), IrLAN, Error!\n", __func__ );
break;
}
ptr+=ret;
ret = RSP_SUCCESS;
irlan_check_command_param(self, name, value);
}
/* Cleanup */
kfree(name);
kfree(value);
return ret;
}
/*
* Function irlan_provider_send_reply (self, info)
*
* Send reply to query to peer IrLAN layer
*
*/
void irlan_provider_send_reply(struct irlan_cb *self, int command,
int ret_code)
{
struct sk_buff *skb;
IRDA_DEBUG(4, "%s()\n", __func__ );
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
skb = alloc_skb(IRLAN_MAX_HEADER + IRLAN_CMD_HEADER +
/* Bigger param length comes from CMD_GET_MEDIA_CHAR */
IRLAN_STRING_PARAMETER_LEN("FILTER_TYPE", "DIRECTED") +
IRLAN_STRING_PARAMETER_LEN("FILTER_TYPE", "BORADCAST") +
IRLAN_STRING_PARAMETER_LEN("FILTER_TYPE", "MULTICAST") +
IRLAN_STRING_PARAMETER_LEN("ACCESS_TYPE", "HOSTED"),
GFP_ATOMIC);
if (!skb)
return;
/* Reserve space for TTP, LMP, and LAP header */
skb_reserve(skb, self->provider.max_header_size);
skb_put(skb, 2);
switch (command) {
case CMD_GET_PROVIDER_INFO:
skb->data[0] = 0x00; /* Success */
skb->data[1] = 0x02; /* 2 parameters */
switch (self->media) {
case MEDIA_802_3:
irlan_insert_string_param(skb, "MEDIA", "802.3");
break;
case MEDIA_802_5:
irlan_insert_string_param(skb, "MEDIA", "802.5");
break;
default:
IRDA_DEBUG(2, "%s(), unknown media type!\n", __func__ );
break;
}
irlan_insert_short_param(skb, "IRLAN_VER", 0x0101);
break;
case CMD_GET_MEDIA_CHAR:
skb->data[0] = 0x00; /* Success */
skb->data[1] = 0x05; /* 5 parameters */
irlan_insert_string_param(skb, "FILTER_TYPE", "DIRECTED");
irlan_insert_string_param(skb, "FILTER_TYPE", "BROADCAST");
irlan_insert_string_param(skb, "FILTER_TYPE", "MULTICAST");
switch (self->provider.access_type) {
case ACCESS_DIRECT:
irlan_insert_string_param(skb, "ACCESS_TYPE", "DIRECT");
break;
case ACCESS_PEER:
irlan_insert_string_param(skb, "ACCESS_TYPE", "PEER");
break;
case ACCESS_HOSTED:
irlan_insert_string_param(skb, "ACCESS_TYPE", "HOSTED");
break;
default:
IRDA_DEBUG(2, "%s(), Unknown access type\n", __func__ );
break;
}
irlan_insert_short_param(skb, "MAX_FRAME", 0x05ee);
break;
case CMD_OPEN_DATA_CHANNEL:
skb->data[0] = 0x00; /* Success */
if (self->provider.send_arb_val) {
skb->data[1] = 0x03; /* 3 parameters */
irlan_insert_short_param(skb, "CON_ARB",
self->provider.send_arb_val);
} else
skb->data[1] = 0x02; /* 2 parameters */
irlan_insert_byte_param(skb, "DATA_CHAN", self->stsap_sel_data);
irlan_insert_string_param(skb, "RECONNECT_KEY", "LINUX RULES!");
break;
case CMD_FILTER_OPERATION:
irlan_filter_request(self, skb);
break;
default:
IRDA_DEBUG(2, "%s(), Unknown command!\n", __func__ );
break;
}
irttp_data_request(self->provider.tsap_ctrl, skb);
}
/*
* Function irlan_provider_register(void)
*
* Register provider support so we can accept incoming connections.
*
*/
int irlan_provider_open_ctrl_tsap(struct irlan_cb *self)
{
struct tsap_cb *tsap;
notify_t notify;
IRDA_DEBUG(4, "%s()\n", __func__ );
IRDA_ASSERT(self != NULL, return -1;);
IRDA_ASSERT(self->magic == IRLAN_MAGIC, return -1;);
/* Check if already open */
if (self->provider.tsap_ctrl)
return -1;
/*
* First register well known control TSAP
*/
irda_notify_init(¬ify);
notify.data_indication = irlan_provider_data_indication;
notify.connect_indication = irlan_provider_connect_indication;
notify.disconnect_indication = irlan_provider_disconnect_indication;
notify.instance = self;
strlcpy(notify.name, "IrLAN ctrl (p)", sizeof(notify.name));
tsap = irttp_open_tsap(LSAP_ANY, 1, ¬ify);
if (!tsap) {
IRDA_DEBUG(2, "%s(), Got no tsap!\n", __func__ );
return -1;
}
self->provider.tsap_ctrl = tsap;
/* Register with LM-IAS */
irlan_ias_register(self, tsap->stsap_sel);
return 0;
}
| gpl-2.0 |
drod2169/Linux-3.8.x | drivers/media/pci/ivtv/ivtv-streams.c | 2901 | 30606 | /*
init/start/stop/exit stream functions
Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
Copyright (C) 2004 Chris Kennedy <c@groovy.org>
Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/* License: GPL
* Author: Kevin Thayer <nufan_wfk at yahoo dot com>
*
* This file will hold API related functions, both internal (firmware api)
* and external (v4l2, etc)
*
* -----
* MPG600/MPG160 support by T.Adachi <tadachi@tadachi-net.com>
* and Takeru KOMORIYA<komoriya@paken.org>
*
* AVerMedia M179 GPIO info by Chris Pinkham <cpinkham@bc2va.org>
* using information provided by Jiun-Kuei Jung @ AVerMedia.
*/
#include "ivtv-driver.h"
#include "ivtv-fileops.h"
#include "ivtv-queue.h"
#include "ivtv-mailbox.h"
#include "ivtv-ioctl.h"
#include "ivtv-irq.h"
#include "ivtv-yuv.h"
#include "ivtv-cards.h"
#include "ivtv-streams.h"
#include "ivtv-firmware.h"
#include <media/v4l2-event.h>
static const struct v4l2_file_operations ivtv_v4l2_enc_fops = {
.owner = THIS_MODULE,
.read = ivtv_v4l2_read,
.write = ivtv_v4l2_write,
.open = ivtv_v4l2_open,
.unlocked_ioctl = video_ioctl2,
.release = ivtv_v4l2_close,
.poll = ivtv_v4l2_enc_poll,
};
static const struct v4l2_file_operations ivtv_v4l2_dec_fops = {
.owner = THIS_MODULE,
.read = ivtv_v4l2_read,
.write = ivtv_v4l2_write,
.open = ivtv_v4l2_open,
.unlocked_ioctl = video_ioctl2,
.release = ivtv_v4l2_close,
.poll = ivtv_v4l2_dec_poll,
};
static const struct v4l2_file_operations ivtv_v4l2_radio_fops = {
.owner = THIS_MODULE,
.open = ivtv_v4l2_open,
.unlocked_ioctl = video_ioctl2,
.release = ivtv_v4l2_close,
.poll = ivtv_v4l2_enc_poll,
};
#define IVTV_V4L2_DEC_MPG_OFFSET 16 /* offset from 0 to register decoder mpg v4l2 minors on */
#define IVTV_V4L2_ENC_PCM_OFFSET 24 /* offset from 0 to register pcm v4l2 minors on */
#define IVTV_V4L2_ENC_YUV_OFFSET 32 /* offset from 0 to register yuv v4l2 minors on */
#define IVTV_V4L2_DEC_YUV_OFFSET 48 /* offset from 0 to register decoder yuv v4l2 minors on */
#define IVTV_V4L2_DEC_VBI_OFFSET 8 /* offset from 0 to register decoder vbi input v4l2 minors on */
#define IVTV_V4L2_DEC_VOUT_OFFSET 16 /* offset from 0 to register vbi output v4l2 minors on */
static struct {
const char *name;
int vfl_type;
int num_offset;
int dma, pio;
u32 v4l2_caps;
const struct v4l2_file_operations *fops;
} ivtv_stream_info[] = {
{ /* IVTV_ENC_STREAM_TYPE_MPG */
"encoder MPG",
VFL_TYPE_GRABBER, 0,
PCI_DMA_FROMDEVICE, 0,
V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_TUNER |
V4L2_CAP_AUDIO | V4L2_CAP_READWRITE,
&ivtv_v4l2_enc_fops
},
{ /* IVTV_ENC_STREAM_TYPE_YUV */
"encoder YUV",
VFL_TYPE_GRABBER, IVTV_V4L2_ENC_YUV_OFFSET,
PCI_DMA_FROMDEVICE, 0,
V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_TUNER |
V4L2_CAP_AUDIO | V4L2_CAP_READWRITE,
&ivtv_v4l2_enc_fops
},
{ /* IVTV_ENC_STREAM_TYPE_VBI */
"encoder VBI",
VFL_TYPE_VBI, 0,
PCI_DMA_FROMDEVICE, 0,
V4L2_CAP_VBI_CAPTURE | V4L2_CAP_SLICED_VBI_CAPTURE | V4L2_CAP_TUNER |
V4L2_CAP_AUDIO | V4L2_CAP_READWRITE,
&ivtv_v4l2_enc_fops
},
{ /* IVTV_ENC_STREAM_TYPE_PCM */
"encoder PCM",
VFL_TYPE_GRABBER, IVTV_V4L2_ENC_PCM_OFFSET,
PCI_DMA_FROMDEVICE, 0,
V4L2_CAP_TUNER | V4L2_CAP_AUDIO | V4L2_CAP_READWRITE,
&ivtv_v4l2_enc_fops
},
{ /* IVTV_ENC_STREAM_TYPE_RAD */
"encoder radio",
VFL_TYPE_RADIO, 0,
PCI_DMA_NONE, 1,
V4L2_CAP_RADIO | V4L2_CAP_TUNER,
&ivtv_v4l2_radio_fops
},
{ /* IVTV_DEC_STREAM_TYPE_MPG */
"decoder MPG",
VFL_TYPE_GRABBER, IVTV_V4L2_DEC_MPG_OFFSET,
PCI_DMA_TODEVICE, 0,
V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_AUDIO | V4L2_CAP_READWRITE,
&ivtv_v4l2_dec_fops
},
{ /* IVTV_DEC_STREAM_TYPE_VBI */
"decoder VBI",
VFL_TYPE_VBI, IVTV_V4L2_DEC_VBI_OFFSET,
PCI_DMA_NONE, 1,
V4L2_CAP_SLICED_VBI_CAPTURE | V4L2_CAP_READWRITE,
&ivtv_v4l2_enc_fops
},
{ /* IVTV_DEC_STREAM_TYPE_VOUT */
"decoder VOUT",
VFL_TYPE_VBI, IVTV_V4L2_DEC_VOUT_OFFSET,
PCI_DMA_NONE, 1,
V4L2_CAP_SLICED_VBI_OUTPUT | V4L2_CAP_AUDIO | V4L2_CAP_READWRITE,
&ivtv_v4l2_dec_fops
},
{ /* IVTV_DEC_STREAM_TYPE_YUV */
"decoder YUV",
VFL_TYPE_GRABBER, IVTV_V4L2_DEC_YUV_OFFSET,
PCI_DMA_TODEVICE, 0,
V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_AUDIO | V4L2_CAP_READWRITE,
&ivtv_v4l2_dec_fops
}
};
static void ivtv_stream_init(struct ivtv *itv, int type)
{
struct ivtv_stream *s = &itv->streams[type];
struct video_device *vdev = s->vdev;
/* we need to keep vdev, so restore it afterwards */
memset(s, 0, sizeof(*s));
s->vdev = vdev;
/* initialize ivtv_stream fields */
s->itv = itv;
s->type = type;
s->name = ivtv_stream_info[type].name;
s->caps = ivtv_stream_info[type].v4l2_caps;
if (ivtv_stream_info[type].pio)
s->dma = PCI_DMA_NONE;
else
s->dma = ivtv_stream_info[type].dma;
s->buf_size = itv->stream_buf_size[type];
if (s->buf_size)
s->buffers = (itv->options.kilobytes[type] * 1024 + s->buf_size - 1) / s->buf_size;
spin_lock_init(&s->qlock);
init_waitqueue_head(&s->waitq);
s->sg_handle = IVTV_DMA_UNMAPPED;
ivtv_queue_init(&s->q_free);
ivtv_queue_init(&s->q_full);
ivtv_queue_init(&s->q_dma);
ivtv_queue_init(&s->q_predma);
ivtv_queue_init(&s->q_io);
}
static int ivtv_prep_dev(struct ivtv *itv, int type)
{
struct ivtv_stream *s = &itv->streams[type];
int num_offset = ivtv_stream_info[type].num_offset;
int num = itv->instance + ivtv_first_minor + num_offset;
/* These four fields are always initialized. If vdev == NULL, then
this stream is not in use. In that case no other fields but these
four can be used. */
s->vdev = NULL;
s->itv = itv;
s->type = type;
s->name = ivtv_stream_info[type].name;
/* Check whether the radio is supported */
if (type == IVTV_ENC_STREAM_TYPE_RAD && !(itv->v4l2_cap & V4L2_CAP_RADIO))
return 0;
if (type >= IVTV_DEC_STREAM_TYPE_MPG && !(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT))
return 0;
/* User explicitly selected 0 buffers for these streams, so don't
create them. */
if (ivtv_stream_info[type].dma != PCI_DMA_NONE &&
itv->options.kilobytes[type] == 0) {
IVTV_INFO("Disabled %s device\n", ivtv_stream_info[type].name);
return 0;
}
ivtv_stream_init(itv, type);
/* allocate and initialize the v4l2 video device structure */
s->vdev = video_device_alloc();
if (s->vdev == NULL) {
IVTV_ERR("Couldn't allocate v4l2 video_device for %s\n", s->name);
return -ENOMEM;
}
snprintf(s->vdev->name, sizeof(s->vdev->name), "%s %s",
itv->v4l2_dev.name, s->name);
s->vdev->num = num;
s->vdev->v4l2_dev = &itv->v4l2_dev;
if (ivtv_stream_info[type].v4l2_caps &
(V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_SLICED_VBI_OUTPUT))
s->vdev->vfl_dir = VFL_DIR_TX;
s->vdev->fops = ivtv_stream_info[type].fops;
s->vdev->ctrl_handler = itv->v4l2_dev.ctrl_handler;
s->vdev->release = video_device_release;
s->vdev->tvnorms = V4L2_STD_ALL;
s->vdev->lock = &itv->serialize_lock;
if (s->type == IVTV_DEC_STREAM_TYPE_VBI) {
v4l2_disable_ioctl(s->vdev, VIDIOC_S_AUDIO);
v4l2_disable_ioctl(s->vdev, VIDIOC_G_AUDIO);
v4l2_disable_ioctl(s->vdev, VIDIOC_ENUMAUDIO);
v4l2_disable_ioctl(s->vdev, VIDIOC_ENUMINPUT);
v4l2_disable_ioctl(s->vdev, VIDIOC_S_INPUT);
v4l2_disable_ioctl(s->vdev, VIDIOC_G_INPUT);
v4l2_disable_ioctl(s->vdev, VIDIOC_S_FREQUENCY);
v4l2_disable_ioctl(s->vdev, VIDIOC_G_FREQUENCY);
v4l2_disable_ioctl(s->vdev, VIDIOC_S_TUNER);
v4l2_disable_ioctl(s->vdev, VIDIOC_G_TUNER);
v4l2_disable_ioctl(s->vdev, VIDIOC_S_STD);
}
set_bit(V4L2_FL_USE_FH_PRIO, &s->vdev->flags);
ivtv_set_funcs(s->vdev);
return 0;
}
/* Initialize v4l2 variables and prepare v4l2 devices */
int ivtv_streams_setup(struct ivtv *itv)
{
int type;
/* Setup V4L2 Devices */
for (type = 0; type < IVTV_MAX_STREAMS; type++) {
/* Prepare device */
if (ivtv_prep_dev(itv, type))
break;
if (itv->streams[type].vdev == NULL)
continue;
/* Allocate Stream */
if (ivtv_stream_alloc(&itv->streams[type]))
break;
}
if (type == IVTV_MAX_STREAMS)
return 0;
/* One or more streams could not be initialized. Clean 'em all up. */
ivtv_streams_cleanup(itv, 0);
return -ENOMEM;
}
static int ivtv_reg_dev(struct ivtv *itv, int type)
{
struct ivtv_stream *s = &itv->streams[type];
int vfl_type = ivtv_stream_info[type].vfl_type;
const char *name;
int num;
if (s->vdev == NULL)
return 0;
num = s->vdev->num;
/* card number + user defined offset + device offset */
if (type != IVTV_ENC_STREAM_TYPE_MPG) {
struct ivtv_stream *s_mpg = &itv->streams[IVTV_ENC_STREAM_TYPE_MPG];
if (s_mpg->vdev)
num = s_mpg->vdev->num + ivtv_stream_info[type].num_offset;
}
video_set_drvdata(s->vdev, s);
/* Register device. First try the desired minor, then any free one. */
if (video_register_device_no_warn(s->vdev, vfl_type, num)) {
IVTV_ERR("Couldn't register v4l2 device for %s (device node number %d)\n",
s->name, num);
video_device_release(s->vdev);
s->vdev = NULL;
return -ENOMEM;
}
name = video_device_node_name(s->vdev);
switch (vfl_type) {
case VFL_TYPE_GRABBER:
IVTV_INFO("Registered device %s for %s (%d kB)\n",
name, s->name, itv->options.kilobytes[type]);
break;
case VFL_TYPE_RADIO:
IVTV_INFO("Registered device %s for %s\n",
name, s->name);
break;
case VFL_TYPE_VBI:
if (itv->options.kilobytes[type])
IVTV_INFO("Registered device %s for %s (%d kB)\n",
name, s->name, itv->options.kilobytes[type]);
else
IVTV_INFO("Registered device %s for %s\n",
name, s->name);
break;
}
return 0;
}
/* Register v4l2 devices */
int ivtv_streams_register(struct ivtv *itv)
{
int type;
int err = 0;
/* Register V4L2 devices */
for (type = 0; type < IVTV_MAX_STREAMS; type++)
err |= ivtv_reg_dev(itv, type);
if (err == 0)
return 0;
/* One or more streams could not be initialized. Clean 'em all up. */
ivtv_streams_cleanup(itv, 1);
return -ENOMEM;
}
/* Unregister v4l2 devices */
void ivtv_streams_cleanup(struct ivtv *itv, int unregister)
{
int type;
/* Teardown all streams */
for (type = 0; type < IVTV_MAX_STREAMS; type++) {
struct video_device *vdev = itv->streams[type].vdev;
itv->streams[type].vdev = NULL;
if (vdev == NULL)
continue;
ivtv_stream_free(&itv->streams[type]);
/* Unregister or release device */
if (unregister)
video_unregister_device(vdev);
else
video_device_release(vdev);
}
}
static void ivtv_vbi_setup(struct ivtv *itv)
{
int raw = ivtv_raw_vbi(itv);
u32 data[CX2341X_MBOX_MAX_DATA];
int lines;
int i;
/* Reset VBI */
ivtv_vapi(itv, CX2341X_ENC_SET_VBI_LINE, 5, 0xffff , 0, 0, 0, 0);
/* setup VBI registers */
if (raw)
v4l2_subdev_call(itv->sd_video, vbi, s_raw_fmt, &itv->vbi.in.fmt.vbi);
else
v4l2_subdev_call(itv->sd_video, vbi, s_sliced_fmt, &itv->vbi.in.fmt.sliced);
/* determine number of lines and total number of VBI bytes.
A raw line takes 1443 bytes: 2 * 720 + 4 byte frame header - 1
The '- 1' byte is probably an unused U or V byte. Or something...
A sliced line takes 51 bytes: 4 byte frame header, 4 byte internal
header, 42 data bytes + checksum (to be confirmed) */
if (raw) {
lines = itv->vbi.count * 2;
} else {
lines = itv->is_60hz ? 24 : 38;
if (itv->is_60hz && (itv->hw_flags & IVTV_HW_CX25840))
lines += 2;
}
itv->vbi.enc_size = lines * (raw ? itv->vbi.raw_size : itv->vbi.sliced_size);
/* Note: sliced vs raw flag doesn't seem to have any effect
TODO: check mode (0x02) value with older ivtv versions. */
data[0] = raw | 0x02 | (0xbd << 8);
/* Every X number of frames a VBI interrupt arrives (frames as in 25 or 30 fps) */
data[1] = 1;
/* The VBI frames are stored in a ringbuffer with this size (with a VBI frame as unit) */
data[2] = raw ? 4 : 4 * (itv->vbi.raw_size / itv->vbi.enc_size);
/* The start/stop codes determine which VBI lines end up in the raw VBI data area.
The codes are from table 24 in the saa7115 datasheet. Each raw/sliced/video line
is framed with codes FF0000XX where XX is the SAV/EAV (Start/End of Active Video)
code. These values for raw VBI are obtained from a driver disassembly. The sliced
start/stop codes was deduced from this, but they do not appear in the driver.
Other code pairs that I found are: 0x250E6249/0x13545454 and 0x25256262/0x38137F54.
However, I have no idea what these values are for. */
if (itv->hw_flags & IVTV_HW_CX25840) {
/* Setup VBI for the cx25840 digitizer */
if (raw) {
data[3] = 0x20602060;
data[4] = 0x30703070;
} else {
data[3] = 0xB0F0B0F0;
data[4] = 0xA0E0A0E0;
}
/* Lines per frame */
data[5] = lines;
/* bytes per line */
data[6] = (raw ? itv->vbi.raw_size : itv->vbi.sliced_size);
} else {
/* Setup VBI for the saa7115 digitizer */
if (raw) {
data[3] = 0x25256262;
data[4] = 0x387F7F7F;
} else {
data[3] = 0xABABECEC;
data[4] = 0xB6F1F1F1;
}
/* Lines per frame */
data[5] = lines;
/* bytes per line */
data[6] = itv->vbi.enc_size / lines;
}
IVTV_DEBUG_INFO(
"Setup VBI API header 0x%08x pkts %d buffs %d ln %d sz %d\n",
data[0], data[1], data[2], data[5], data[6]);
ivtv_api(itv, CX2341X_ENC_SET_VBI_CONFIG, 7, data);
/* returns the VBI encoder memory area. */
itv->vbi.enc_start = data[2];
itv->vbi.fpi = data[0];
if (!itv->vbi.fpi)
itv->vbi.fpi = 1;
IVTV_DEBUG_INFO("Setup VBI start 0x%08x frames %d fpi %d\n",
itv->vbi.enc_start, data[1], itv->vbi.fpi);
/* select VBI lines.
Note that the sliced argument seems to have no effect. */
for (i = 2; i <= 24; i++) {
int valid;
if (itv->is_60hz) {
valid = i >= 10 && i < 22;
} else {
valid = i >= 6 && i < 24;
}
ivtv_vapi(itv, CX2341X_ENC_SET_VBI_LINE, 5, i - 1,
valid, 0 , 0, 0);
ivtv_vapi(itv, CX2341X_ENC_SET_VBI_LINE, 5, (i - 1) | 0x80000000,
valid, 0, 0, 0);
}
/* Remaining VBI questions:
- Is it possible to select particular VBI lines only for inclusion in the MPEG
stream? Currently you can only get the first X lines.
- Is mixed raw and sliced VBI possible?
- What's the meaning of the raw/sliced flag?
- What's the meaning of params 2, 3 & 4 of the Select VBI command? */
}
int ivtv_start_v4l2_encode_stream(struct ivtv_stream *s)
{
u32 data[CX2341X_MBOX_MAX_DATA];
struct ivtv *itv = s->itv;
int captype = 0, subtype = 0;
int enable_passthrough = 0;
if (s->vdev == NULL)
return -EINVAL;
IVTV_DEBUG_INFO("Start encoder stream %s\n", s->name);
switch (s->type) {
case IVTV_ENC_STREAM_TYPE_MPG:
captype = 0;
subtype = 3;
/* Stop Passthrough */
if (itv->output_mode == OUT_PASSTHROUGH) {
ivtv_passthrough_mode(itv, 0);
enable_passthrough = 1;
}
itv->mpg_data_received = itv->vbi_data_inserted = 0;
itv->dualwatch_jiffies = jiffies;
itv->dualwatch_stereo_mode = v4l2_ctrl_g_ctrl(itv->cxhdl.audio_mode);
itv->search_pack_header = 0;
break;
case IVTV_ENC_STREAM_TYPE_YUV:
if (itv->output_mode == OUT_PASSTHROUGH) {
captype = 2;
subtype = 11; /* video+audio+decoder */
break;
}
captype = 1;
subtype = 1;
break;
case IVTV_ENC_STREAM_TYPE_PCM:
captype = 1;
subtype = 2;
break;
case IVTV_ENC_STREAM_TYPE_VBI:
captype = 1;
subtype = 4;
itv->vbi.frame = 0;
itv->vbi.inserted_frame = 0;
memset(itv->vbi.sliced_mpeg_size,
0, sizeof(itv->vbi.sliced_mpeg_size));
break;
default:
return -EINVAL;
}
s->subtype = subtype;
s->buffers_stolen = 0;
/* Clear Streamoff flags in case left from last capture */
clear_bit(IVTV_F_S_STREAMOFF, &s->s_flags);
if (atomic_read(&itv->capturing) == 0) {
int digitizer;
/* Always use frame based mode. Experiments have demonstrated that byte
stream based mode results in dropped frames and corruption. Not often,
but occasionally. Many thanks go to Leonard Orb who spent a lot of
effort and time trying to trace the cause of the drop outs. */
/* 1 frame per DMA */
/*ivtv_vapi(itv, CX2341X_ENC_SET_DMA_BLOCK_SIZE, 2, 128, 0); */
ivtv_vapi(itv, CX2341X_ENC_SET_DMA_BLOCK_SIZE, 2, 1, 1);
/* Stuff from Windows, we don't know what it is */
ivtv_vapi(itv, CX2341X_ENC_SET_VERT_CROP_LINE, 1, 0);
/* According to the docs, this should be correct. However, this is
untested. I don't dare enable this without having tested it.
Only very few old cards actually have this hardware combination.
ivtv_vapi(itv, CX2341X_ENC_SET_VERT_CROP_LINE, 1,
((itv->hw_flags & IVTV_HW_SAA7114) && itv->is_60hz) ? 10001 : 0);
*/
ivtv_vapi(itv, CX2341X_ENC_MISC, 2, 3, !itv->has_cx23415);
ivtv_vapi(itv, CX2341X_ENC_MISC, 2, 8, 0);
ivtv_vapi(itv, CX2341X_ENC_MISC, 2, 4, 1);
ivtv_vapi(itv, CX2341X_ENC_MISC, 1, 12);
/* assign placeholder */
ivtv_vapi(itv, CX2341X_ENC_SET_PLACEHOLDER, 12,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
if (itv->card->hw_all & (IVTV_HW_SAA7115 | IVTV_HW_SAA717X))
digitizer = 0xF1;
else if (itv->card->hw_all & IVTV_HW_SAA7114)
digitizer = 0xEF;
else /* cx25840 */
digitizer = 0x140;
ivtv_vapi(itv, CX2341X_ENC_SET_NUM_VSYNC_LINES, 2, digitizer, digitizer);
/* Setup VBI */
if (itv->v4l2_cap & V4L2_CAP_VBI_CAPTURE) {
ivtv_vbi_setup(itv);
}
/* assign program index info. Mask 7: select I/P/B, Num_req: 400 max */
ivtv_vapi_result(itv, data, CX2341X_ENC_SET_PGM_INDEX_INFO, 2, 7, 400);
itv->pgm_info_offset = data[0];
itv->pgm_info_num = data[1];
itv->pgm_info_write_idx = 0;
itv->pgm_info_read_idx = 0;
IVTV_DEBUG_INFO("PGM Index at 0x%08x with %d elements\n",
itv->pgm_info_offset, itv->pgm_info_num);
/* Setup API for Stream */
cx2341x_handler_setup(&itv->cxhdl);
/* mute if capturing radio */
if (test_bit(IVTV_F_I_RADIO_USER, &itv->i_flags))
ivtv_vapi(itv, CX2341X_ENC_MUTE_VIDEO, 1,
1 | (v4l2_ctrl_g_ctrl(itv->cxhdl.video_mute_yuv) << 8));
}
/* Vsync Setup */
if (itv->has_cx23415 && !test_and_set_bit(IVTV_F_I_DIG_RST, &itv->i_flags)) {
/* event notification (on) */
ivtv_vapi(itv, CX2341X_ENC_SET_EVENT_NOTIFICATION, 4, 0, 1, IVTV_IRQ_ENC_VIM_RST, -1);
ivtv_clear_irq_mask(itv, IVTV_IRQ_ENC_VIM_RST);
}
if (atomic_read(&itv->capturing) == 0) {
/* Clear all Pending Interrupts */
ivtv_set_irq_mask(itv, IVTV_IRQ_MASK_CAPTURE);
clear_bit(IVTV_F_I_EOS, &itv->i_flags);
cx2341x_handler_set_busy(&itv->cxhdl, 1);
/* Initialize Digitizer for Capture */
/* Avoid tinny audio problem - ensure audio clocks are going */
v4l2_subdev_call(itv->sd_audio, audio, s_stream, 1);
/* Avoid unpredictable PCI bus hang - disable video clocks */
v4l2_subdev_call(itv->sd_video, video, s_stream, 0);
ivtv_msleep_timeout(300, 0);
ivtv_vapi(itv, CX2341X_ENC_INITIALIZE_INPUT, 0);
v4l2_subdev_call(itv->sd_video, video, s_stream, 1);
}
/* begin_capture */
if (ivtv_vapi(itv, CX2341X_ENC_START_CAPTURE, 2, captype, subtype))
{
IVTV_DEBUG_WARN( "Error starting capture!\n");
return -EINVAL;
}
/* Start Passthrough */
if (enable_passthrough) {
ivtv_passthrough_mode(itv, 1);
}
if (s->type == IVTV_ENC_STREAM_TYPE_VBI)
ivtv_clear_irq_mask(itv, IVTV_IRQ_ENC_VBI_CAP);
else
ivtv_clear_irq_mask(itv, IVTV_IRQ_MASK_CAPTURE);
/* you're live! sit back and await interrupts :) */
atomic_inc(&itv->capturing);
return 0;
}
EXPORT_SYMBOL(ivtv_start_v4l2_encode_stream);
static int ivtv_setup_v4l2_decode_stream(struct ivtv_stream *s)
{
u32 data[CX2341X_MBOX_MAX_DATA];
struct ivtv *itv = s->itv;
int datatype;
u16 width;
u16 height;
if (s->vdev == NULL)
return -EINVAL;
IVTV_DEBUG_INFO("Setting some initial decoder settings\n");
width = itv->cxhdl.width;
height = itv->cxhdl.height;
/* set audio mode to left/stereo for dual/stereo mode. */
ivtv_vapi(itv, CX2341X_DEC_SET_AUDIO_MODE, 2, itv->audio_bilingual_mode, itv->audio_stereo_mode);
/* set number of internal decoder buffers */
ivtv_vapi(itv, CX2341X_DEC_SET_DISPLAY_BUFFERS, 1, 0);
/* prebuffering */
ivtv_vapi(itv, CX2341X_DEC_SET_PREBUFFERING, 1, 1);
/* extract from user packets */
ivtv_vapi_result(itv, data, CX2341X_DEC_EXTRACT_VBI, 1, 1);
itv->vbi.dec_start = data[0];
IVTV_DEBUG_INFO("Decoder VBI RE-Insert start 0x%08x size 0x%08x\n",
itv->vbi.dec_start, data[1]);
/* set decoder source settings */
/* Data type: 0 = mpeg from host,
1 = yuv from encoder,
2 = yuv_from_host */
switch (s->type) {
case IVTV_DEC_STREAM_TYPE_YUV:
if (itv->output_mode == OUT_PASSTHROUGH) {
datatype = 1;
} else {
/* Fake size to avoid switching video standard */
datatype = 2;
width = 720;
height = itv->is_out_50hz ? 576 : 480;
}
IVTV_DEBUG_INFO("Setup DEC YUV Stream data[0] = %d\n", datatype);
break;
case IVTV_DEC_STREAM_TYPE_MPG:
default:
datatype = 0;
break;
}
if (ivtv_vapi(itv, CX2341X_DEC_SET_DECODER_SOURCE, 4, datatype,
width, height, itv->cxhdl.audio_properties)) {
IVTV_DEBUG_WARN("Couldn't initialize decoder source\n");
}
/* Decoder sometimes dies here, so wait a moment */
ivtv_msleep_timeout(10, 0);
/* Known failure point for firmware, so check */
return ivtv_firmware_check(itv, "ivtv_setup_v4l2_decode_stream");
}
int ivtv_start_v4l2_decode_stream(struct ivtv_stream *s, int gop_offset)
{
struct ivtv *itv = s->itv;
int rc;
if (s->vdev == NULL)
return -EINVAL;
if (test_and_set_bit(IVTV_F_S_STREAMING, &s->s_flags))
return 0; /* already started */
IVTV_DEBUG_INFO("Starting decode stream %s (gop_offset %d)\n", s->name, gop_offset);
rc = ivtv_setup_v4l2_decode_stream(s);
if (rc < 0) {
clear_bit(IVTV_F_S_STREAMING, &s->s_flags);
return rc;
}
/* set dma size to 65536 bytes */
ivtv_vapi(itv, CX2341X_DEC_SET_DMA_BLOCK_SIZE, 1, 65536);
/* Clear Streamoff */
clear_bit(IVTV_F_S_STREAMOFF, &s->s_flags);
/* Zero out decoder counters */
writel(0, &itv->dec_mbox.mbox[IVTV_MBOX_DMA_END].data[0]);
writel(0, &itv->dec_mbox.mbox[IVTV_MBOX_DMA_END].data[1]);
writel(0, &itv->dec_mbox.mbox[IVTV_MBOX_DMA_END].data[2]);
writel(0, &itv->dec_mbox.mbox[IVTV_MBOX_DMA_END].data[3]);
writel(0, &itv->dec_mbox.mbox[IVTV_MBOX_DMA].data[0]);
writel(0, &itv->dec_mbox.mbox[IVTV_MBOX_DMA].data[1]);
writel(0, &itv->dec_mbox.mbox[IVTV_MBOX_DMA].data[2]);
writel(0, &itv->dec_mbox.mbox[IVTV_MBOX_DMA].data[3]);
/* turn on notification of dual/stereo mode change */
ivtv_vapi(itv, CX2341X_DEC_SET_EVENT_NOTIFICATION, 4, 0, 1, IVTV_IRQ_DEC_AUD_MODE_CHG, -1);
/* start playback */
ivtv_vapi(itv, CX2341X_DEC_START_PLAYBACK, 2, gop_offset, 0);
/* Let things settle before we actually start */
ivtv_msleep_timeout(10, 0);
/* Clear the following Interrupt mask bits for decoding */
ivtv_clear_irq_mask(itv, IVTV_IRQ_MASK_DECODE);
IVTV_DEBUG_IRQ("IRQ Mask is now: 0x%08x\n", itv->irqmask);
/* you're live! sit back and await interrupts :) */
atomic_inc(&itv->decoding);
return 0;
}
void ivtv_stop_all_captures(struct ivtv *itv)
{
int i;
for (i = IVTV_MAX_STREAMS - 1; i >= 0; i--) {
struct ivtv_stream *s = &itv->streams[i];
if (s->vdev == NULL)
continue;
if (test_bit(IVTV_F_S_STREAMING, &s->s_flags)) {
ivtv_stop_v4l2_encode_stream(s, 0);
}
}
}
int ivtv_stop_v4l2_encode_stream(struct ivtv_stream *s, int gop_end)
{
struct ivtv *itv = s->itv;
DECLARE_WAITQUEUE(wait, current);
int cap_type;
int stopmode;
if (s->vdev == NULL)
return -EINVAL;
/* This function assumes that you are allowed to stop the capture
and that we are actually capturing */
IVTV_DEBUG_INFO("Stop Capture\n");
if (s->type == IVTV_DEC_STREAM_TYPE_VOUT)
return 0;
if (atomic_read(&itv->capturing) == 0)
return 0;
switch (s->type) {
case IVTV_ENC_STREAM_TYPE_YUV:
cap_type = 1;
break;
case IVTV_ENC_STREAM_TYPE_PCM:
cap_type = 1;
break;
case IVTV_ENC_STREAM_TYPE_VBI:
cap_type = 1;
break;
case IVTV_ENC_STREAM_TYPE_MPG:
default:
cap_type = 0;
break;
}
/* Stop Capture Mode */
if (s->type == IVTV_ENC_STREAM_TYPE_MPG && gop_end) {
stopmode = 0;
} else {
stopmode = 1;
}
/* end_capture */
/* when: 0 = end of GOP 1 = NOW!, type: 0 = mpeg, subtype: 3 = video+audio */
ivtv_vapi(itv, CX2341X_ENC_STOP_CAPTURE, 3, stopmode, cap_type, s->subtype);
if (!test_bit(IVTV_F_S_PASSTHROUGH, &s->s_flags)) {
if (s->type == IVTV_ENC_STREAM_TYPE_MPG && gop_end) {
/* only run these if we're shutting down the last cap */
unsigned long duration;
unsigned long then = jiffies;
add_wait_queue(&itv->eos_waitq, &wait);
set_current_state(TASK_INTERRUPTIBLE);
/* wait 2s for EOS interrupt */
while (!test_bit(IVTV_F_I_EOS, &itv->i_flags) &&
time_before(jiffies,
then + msecs_to_jiffies(2000))) {
schedule_timeout(msecs_to_jiffies(10));
}
/* To convert jiffies to ms, we must multiply by 1000
* and divide by HZ. To avoid runtime division, we
* convert this to multiplication by 1000/HZ.
* Since integer division truncates, we get the best
* accuracy if we do a rounding calculation of the constant.
* Think of the case where HZ is 1024.
*/
duration = ((1000 + HZ / 2) / HZ) * (jiffies - then);
if (!test_bit(IVTV_F_I_EOS, &itv->i_flags)) {
IVTV_DEBUG_WARN("%s: EOS interrupt not received! stopping anyway.\n", s->name);
IVTV_DEBUG_WARN("%s: waited %lu ms.\n", s->name, duration);
} else {
IVTV_DEBUG_INFO("%s: EOS took %lu ms to occur.\n", s->name, duration);
}
set_current_state(TASK_RUNNING);
remove_wait_queue(&itv->eos_waitq, &wait);
set_bit(IVTV_F_S_STREAMOFF, &s->s_flags);
}
/* Handle any pending interrupts */
ivtv_msleep_timeout(100, 0);
}
atomic_dec(&itv->capturing);
/* Clear capture and no-read bits */
clear_bit(IVTV_F_S_STREAMING, &s->s_flags);
if (s->type == IVTV_ENC_STREAM_TYPE_VBI)
ivtv_set_irq_mask(itv, IVTV_IRQ_ENC_VBI_CAP);
if (atomic_read(&itv->capturing) > 0) {
return 0;
}
cx2341x_handler_set_busy(&itv->cxhdl, 0);
/* Set the following Interrupt mask bits for capture */
ivtv_set_irq_mask(itv, IVTV_IRQ_MASK_CAPTURE);
del_timer(&itv->dma_timer);
/* event notification (off) */
if (test_and_clear_bit(IVTV_F_I_DIG_RST, &itv->i_flags)) {
/* type: 0 = refresh */
/* on/off: 0 = off, intr: 0x10000000, mbox_id: -1: none */
ivtv_vapi(itv, CX2341X_ENC_SET_EVENT_NOTIFICATION, 4, 0, 0, IVTV_IRQ_ENC_VIM_RST, -1);
ivtv_set_irq_mask(itv, IVTV_IRQ_ENC_VIM_RST);
}
/* Raw-passthrough is implied on start. Make sure it's stopped so
the encoder will re-initialize when next started */
ivtv_vapi(itv, CX2341X_ENC_STOP_CAPTURE, 3, 1, 2, 7);
wake_up(&s->waitq);
return 0;
}
EXPORT_SYMBOL(ivtv_stop_v4l2_encode_stream);
int ivtv_stop_v4l2_decode_stream(struct ivtv_stream *s, int flags, u64 pts)
{
static const struct v4l2_event ev = {
.type = V4L2_EVENT_EOS,
};
struct ivtv *itv = s->itv;
if (s->vdev == NULL)
return -EINVAL;
if (s->type != IVTV_DEC_STREAM_TYPE_YUV && s->type != IVTV_DEC_STREAM_TYPE_MPG)
return -EINVAL;
if (!test_bit(IVTV_F_S_STREAMING, &s->s_flags))
return 0;
IVTV_DEBUG_INFO("Stop Decode at %llu, flags: %x\n", (unsigned long long)pts, flags);
/* Stop Decoder */
if (!(flags & V4L2_DEC_CMD_STOP_IMMEDIATELY) || pts) {
u32 tmp = 0;
/* Wait until the decoder is no longer running */
if (pts) {
ivtv_vapi(itv, CX2341X_DEC_STOP_PLAYBACK, 3,
0, (u32)(pts & 0xffffffff), (u32)(pts >> 32));
}
while (1) {
u32 data[CX2341X_MBOX_MAX_DATA];
ivtv_vapi_result(itv, data, CX2341X_DEC_GET_XFER_INFO, 0);
if (s->q_full.buffers + s->q_dma.buffers == 0) {
if (tmp == data[3])
break;
tmp = data[3];
}
if (ivtv_msleep_timeout(100, 1))
break;
}
}
ivtv_vapi(itv, CX2341X_DEC_STOP_PLAYBACK, 3, flags & V4L2_DEC_CMD_STOP_TO_BLACK, 0, 0);
/* turn off notification of dual/stereo mode change */
ivtv_vapi(itv, CX2341X_DEC_SET_EVENT_NOTIFICATION, 4, 0, 0, IVTV_IRQ_DEC_AUD_MODE_CHG, -1);
ivtv_set_irq_mask(itv, IVTV_IRQ_MASK_DECODE);
del_timer(&itv->dma_timer);
clear_bit(IVTV_F_S_NEEDS_DATA, &s->s_flags);
clear_bit(IVTV_F_S_STREAMING, &s->s_flags);
ivtv_flush_queues(s);
/* decoder needs time to settle */
ivtv_msleep_timeout(40, 0);
/* decrement decoding */
atomic_dec(&itv->decoding);
set_bit(IVTV_F_I_EV_DEC_STOPPED, &itv->i_flags);
wake_up(&itv->event_waitq);
v4l2_event_queue(s->vdev, &ev);
/* wake up wait queues */
wake_up(&s->waitq);
return 0;
}
int ivtv_passthrough_mode(struct ivtv *itv, int enable)
{
struct ivtv_stream *yuv_stream = &itv->streams[IVTV_ENC_STREAM_TYPE_YUV];
struct ivtv_stream *dec_stream = &itv->streams[IVTV_DEC_STREAM_TYPE_YUV];
if (yuv_stream->vdev == NULL || dec_stream->vdev == NULL)
return -EINVAL;
IVTV_DEBUG_INFO("ivtv ioctl: Select passthrough mode\n");
/* Prevent others from starting/stopping streams while we
initiate/terminate passthrough mode */
if (enable) {
if (itv->output_mode == OUT_PASSTHROUGH) {
return 0;
}
if (ivtv_set_output_mode(itv, OUT_PASSTHROUGH) != OUT_PASSTHROUGH)
return -EBUSY;
/* Fully initialize stream, and then unflag init */
set_bit(IVTV_F_S_PASSTHROUGH, &dec_stream->s_flags);
set_bit(IVTV_F_S_STREAMING, &dec_stream->s_flags);
/* Setup YUV Decoder */
ivtv_setup_v4l2_decode_stream(dec_stream);
/* Start Decoder */
ivtv_vapi(itv, CX2341X_DEC_START_PLAYBACK, 2, 0, 1);
atomic_inc(&itv->decoding);
/* Setup capture if not already done */
if (atomic_read(&itv->capturing) == 0) {
cx2341x_handler_setup(&itv->cxhdl);
cx2341x_handler_set_busy(&itv->cxhdl, 1);
}
/* Start Passthrough Mode */
ivtv_vapi(itv, CX2341X_ENC_START_CAPTURE, 2, 2, 11);
atomic_inc(&itv->capturing);
return 0;
}
if (itv->output_mode != OUT_PASSTHROUGH)
return 0;
/* Stop Passthrough Mode */
ivtv_vapi(itv, CX2341X_ENC_STOP_CAPTURE, 3, 1, 2, 11);
ivtv_vapi(itv, CX2341X_DEC_STOP_PLAYBACK, 3, 1, 0, 0);
atomic_dec(&itv->capturing);
atomic_dec(&itv->decoding);
clear_bit(IVTV_F_S_PASSTHROUGH, &dec_stream->s_flags);
clear_bit(IVTV_F_S_STREAMING, &dec_stream->s_flags);
itv->output_mode = OUT_NONE;
if (atomic_read(&itv->capturing) == 0)
cx2341x_handler_set_busy(&itv->cxhdl, 0);
return 0;
}
| gpl-2.0 |
DirtyUnicorns/android_kernel_oppo_n1 | arch/mips/dec/wbflush.c | 4693 | 2110 | /*
* Setup the right wbflush routine for the different DECstations.
*
* Created with information from:
* DECstation 3100 Desktop Workstation Functional Specification
* DECstation 5000/200 KN02 System Module Functional Specification
* mipsel-linux-objdump --disassemble vmunix | grep "wbflush" :-)
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1998 Harald Koerfgen
* Copyright (C) 2002 Maciej W. Rozycki
*/
#include <linux/init.h>
#include <asm/bootinfo.h>
#include <asm/wbflush.h>
#include <asm/barrier.h>
static void wbflush_kn01(void);
static void wbflush_kn210(void);
static void wbflush_mips(void);
void (*__wbflush) (void);
void __init wbflush_setup(void)
{
switch (mips_machtype) {
case MACH_DS23100:
case MACH_DS5000_200: /* DS5000 3max */
__wbflush = wbflush_kn01;
break;
case MACH_DS5100: /* DS5100 MIPSMATE */
__wbflush = wbflush_kn210;
break;
case MACH_DS5000_1XX: /* DS5000/100 3min */
case MACH_DS5000_XX: /* Personal DS5000/2x */
case MACH_DS5000_2X0: /* DS5000/240 3max+ */
case MACH_DS5900: /* DS5900 bigmax */
default:
__wbflush = wbflush_mips;
break;
}
}
/*
* For the DS3100 and DS5000/200 the R2020/R3220 writeback buffer functions
* as part of Coprocessor 0.
*/
static void wbflush_kn01(void)
{
asm(".set\tpush\n\t"
".set\tnoreorder\n\t"
"1:\tbc0f\t1b\n\t"
"nop\n\t"
".set\tpop");
}
/*
* For the DS5100 the writeback buffer seems to be a part of Coprocessor 3.
* But CP3 has to enabled first.
*/
static void wbflush_kn210(void)
{
asm(".set\tpush\n\t"
".set\tnoreorder\n\t"
"mfc0\t$2,$12\n\t"
"lui\t$3,0x8000\n\t"
"or\t$3,$2,$3\n\t"
"mtc0\t$3,$12\n\t"
"nop\n"
"1:\tbc3f\t1b\n\t"
"nop\n\t"
"mtc0\t$2,$12\n\t"
"nop\n\t"
".set\tpop"
: : : "$2", "$3");
}
/*
* I/O ASIC systems use a standard writeback buffer that gets flushed
* upon an uncached read.
*/
static void wbflush_mips(void)
{
__fast_iob();
}
#include <linux/module.h>
EXPORT_SYMBOL(__wbflush);
| gpl-2.0 |
TeamGlade-Devices/android_kernel_sony_msm8930 | arch/mips/lantiq/xway/clk-xway.c | 4693 | 5541 | /*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*
* Copyright (C) 2010 John Crispin <blogic@openwrt.org>
*/
#include <linux/io.h>
#include <linux/export.h>
#include <linux/init.h>
#include <linux/clk.h>
#include <asm/time.h>
#include <asm/irq.h>
#include <asm/div64.h>
#include <lantiq_soc.h>
static unsigned int ltq_ram_clocks[] = {
CLOCK_167M, CLOCK_133M, CLOCK_111M, CLOCK_83M };
#define DDR_HZ ltq_ram_clocks[ltq_cgu_r32(LTQ_CGU_SYS) & 0x3]
#define BASIC_FREQUENCY_1 35328000
#define BASIC_FREQUENCY_2 36000000
#define BASIS_REQUENCY_USB 12000000
#define GET_BITS(x, msb, lsb) \
(((x) & ((1 << ((msb) + 1)) - 1)) >> (lsb))
#define LTQ_CGU_PLL0_CFG 0x0004
#define LTQ_CGU_PLL1_CFG 0x0008
#define LTQ_CGU_PLL2_CFG 0x000C
#define LTQ_CGU_SYS 0x0010
#define LTQ_CGU_UPDATE 0x0014
#define LTQ_CGU_IF_CLK 0x0018
#define LTQ_CGU_OSC_CON 0x001C
#define LTQ_CGU_SMD 0x0020
#define LTQ_CGU_CT1SR 0x0028
#define LTQ_CGU_CT2SR 0x002C
#define LTQ_CGU_PCMCR 0x0030
#define LTQ_CGU_PCI_CR 0x0034
#define LTQ_CGU_PD_PC 0x0038
#define LTQ_CGU_FMR 0x003C
#define CGU_PLL0_PHASE_DIVIDER_ENABLE \
(ltq_cgu_r32(LTQ_CGU_PLL0_CFG) & (1 << 31))
#define CGU_PLL0_BYPASS \
(ltq_cgu_r32(LTQ_CGU_PLL0_CFG) & (1 << 30))
#define CGU_PLL0_CFG_DSMSEL \
(ltq_cgu_r32(LTQ_CGU_PLL0_CFG) & (1 << 28))
#define CGU_PLL0_CFG_FRAC_EN \
(ltq_cgu_r32(LTQ_CGU_PLL0_CFG) & (1 << 27))
#define CGU_PLL1_SRC \
(ltq_cgu_r32(LTQ_CGU_PLL1_CFG) & (1 << 31))
#define CGU_PLL2_PHASE_DIVIDER_ENABLE \
(ltq_cgu_r32(LTQ_CGU_PLL2_CFG) & (1 << 20))
#define CGU_SYS_FPI_SEL (1 << 6)
#define CGU_SYS_DDR_SEL 0x3
#define CGU_PLL0_SRC (1 << 29)
#define CGU_PLL0_CFG_PLLK GET_BITS(ltq_cgu_r32(LTQ_CGU_PLL0_CFG), 26, 17)
#define CGU_PLL0_CFG_PLLN GET_BITS(ltq_cgu_r32(LTQ_CGU_PLL0_CFG), 12, 6)
#define CGU_PLL0_CFG_PLLM GET_BITS(ltq_cgu_r32(LTQ_CGU_PLL0_CFG), 5, 2)
#define CGU_PLL2_SRC GET_BITS(ltq_cgu_r32(LTQ_CGU_PLL2_CFG), 18, 17)
#define CGU_PLL2_CFG_INPUT_DIV GET_BITS(ltq_cgu_r32(LTQ_CGU_PLL2_CFG), 16, 13)
static unsigned int ltq_get_pll0_fdiv(void);
static inline unsigned int get_input_clock(int pll)
{
switch (pll) {
case 0:
if (ltq_cgu_r32(LTQ_CGU_PLL0_CFG) & CGU_PLL0_SRC)
return BASIS_REQUENCY_USB;
else if (CGU_PLL0_PHASE_DIVIDER_ENABLE)
return BASIC_FREQUENCY_1;
else
return BASIC_FREQUENCY_2;
case 1:
if (CGU_PLL1_SRC)
return BASIS_REQUENCY_USB;
else if (CGU_PLL0_PHASE_DIVIDER_ENABLE)
return BASIC_FREQUENCY_1;
else
return BASIC_FREQUENCY_2;
case 2:
switch (CGU_PLL2_SRC) {
case 0:
return ltq_get_pll0_fdiv();
case 1:
return CGU_PLL2_PHASE_DIVIDER_ENABLE ?
BASIC_FREQUENCY_1 :
BASIC_FREQUENCY_2;
case 2:
return BASIS_REQUENCY_USB;
}
default:
return 0;
}
}
static inline unsigned int cal_dsm(int pll, unsigned int num, unsigned int den)
{
u64 res, clock = get_input_clock(pll);
res = num * clock;
do_div(res, den);
return res;
}
static inline unsigned int mash_dsm(int pll, unsigned int M, unsigned int N,
unsigned int K)
{
unsigned int num = ((N + 1) << 10) + K;
unsigned int den = (M + 1) << 10;
return cal_dsm(pll, num, den);
}
static inline unsigned int ssff_dsm_1(int pll, unsigned int M, unsigned int N,
unsigned int K)
{
unsigned int num = ((N + 1) << 11) + K + 512;
unsigned int den = (M + 1) << 11;
return cal_dsm(pll, num, den);
}
static inline unsigned int ssff_dsm_2(int pll, unsigned int M, unsigned int N,
unsigned int K)
{
unsigned int num = K >= 512 ?
((N + 1) << 12) + K - 512 : ((N + 1) << 12) + K + 3584;
unsigned int den = (M + 1) << 12;
return cal_dsm(pll, num, den);
}
static inline unsigned int dsm(int pll, unsigned int M, unsigned int N,
unsigned int K, unsigned int dsmsel, unsigned int phase_div_en)
{
if (!dsmsel)
return mash_dsm(pll, M, N, K);
else if (!phase_div_en)
return mash_dsm(pll, M, N, K);
else
return ssff_dsm_2(pll, M, N, K);
}
static inline unsigned int ltq_get_pll0_fosc(void)
{
if (CGU_PLL0_BYPASS)
return get_input_clock(0);
else
return !CGU_PLL0_CFG_FRAC_EN
? dsm(0, CGU_PLL0_CFG_PLLM, CGU_PLL0_CFG_PLLN, 0,
CGU_PLL0_CFG_DSMSEL,
CGU_PLL0_PHASE_DIVIDER_ENABLE)
: dsm(0, CGU_PLL0_CFG_PLLM, CGU_PLL0_CFG_PLLN,
CGU_PLL0_CFG_PLLK, CGU_PLL0_CFG_DSMSEL,
CGU_PLL0_PHASE_DIVIDER_ENABLE);
}
static unsigned int ltq_get_pll0_fdiv(void)
{
unsigned int div = CGU_PLL2_CFG_INPUT_DIV + 1;
return (ltq_get_pll0_fosc() + (div >> 1)) / div;
}
unsigned int ltq_get_io_region_clock(void)
{
unsigned int ret = ltq_get_pll0_fosc();
switch (ltq_cgu_r32(LTQ_CGU_PLL2_CFG) & CGU_SYS_DDR_SEL) {
default:
case 0:
return (ret + 1) / 2;
case 1:
return (ret * 2 + 2) / 5;
case 2:
return (ret + 1) / 3;
case 3:
return (ret + 2) / 4;
}
}
EXPORT_SYMBOL(ltq_get_io_region_clock);
unsigned int ltq_get_fpi_bus_clock(int fpi)
{
unsigned int ret = ltq_get_io_region_clock();
if ((fpi == 2) && (ltq_cgu_r32(LTQ_CGU_SYS) & CGU_SYS_FPI_SEL))
ret >>= 1;
return ret;
}
EXPORT_SYMBOL(ltq_get_fpi_bus_clock);
unsigned int ltq_get_cpu_hz(void)
{
switch (ltq_cgu_r32(LTQ_CGU_SYS) & 0xc) {
case 0:
return CLOCK_333M;
case 4:
return DDR_HZ;
case 8:
return DDR_HZ << 1;
default:
return DDR_HZ >> 1;
}
}
EXPORT_SYMBOL(ltq_get_cpu_hz);
unsigned int ltq_get_fpi_hz(void)
{
unsigned int ddr_clock = DDR_HZ;
if (ltq_cgu_r32(LTQ_CGU_SYS) & 0x40)
return ddr_clock >> 1;
return ddr_clock;
}
EXPORT_SYMBOL(ltq_get_fpi_hz);
| gpl-2.0 |
AndroPlus-org/android_kernel_sony_msm8974ac | arch/arm/mach-pxa/colibri-pxa270-income.c | 4949 | 5712 | /*
* linux/arch/arm/mach-pxa/income.c
*
* Support for Income s.r.o. SH-Dmaster PXA270 SBC
*
* Copyright (C) 2010
* Marek Vasut <marek.vasut@gmail.com>
* Pavel Revak <palo@bielyvlk.sk>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/gpio.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/pwm_backlight.h>
#include <linux/i2c/pxa-i2c.h>
#include <asm/irq.h>
#include <asm/mach-types.h>
#include <mach/hardware.h>
#include <mach/mmc.h>
#include <mach/ohci.h>
#include <mach/pxa27x.h>
#include <mach/pxa27x-udc.h>
#include <mach/pxafb.h>
#include "devices.h"
#include "generic.h"
#define GPIO114_INCOME_ETH_IRQ (114)
#define GPIO0_INCOME_SD_DETECT (0)
#define GPIO0_INCOME_SD_RO (1)
#define GPIO54_INCOME_LED_A (54)
#define GPIO55_INCOME_LED_B (55)
#define GPIO113_INCOME_TS_IRQ (113)
/******************************************************************************
* SD/MMC card controller
******************************************************************************/
#if defined(CONFIG_MMC_PXA) || defined(CONFIG_MMC_PXA_MODULE)
static struct pxamci_platform_data income_mci_platform_data = {
.ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34,
.gpio_power = -1,
.gpio_card_detect = GPIO0_INCOME_SD_DETECT,
.gpio_card_ro = GPIO0_INCOME_SD_RO,
.detect_delay_ms = 200,
};
static void __init income_mmc_init(void)
{
pxa_set_mci_info(&income_mci_platform_data);
}
#else
static inline void income_mmc_init(void) {}
#endif
/******************************************************************************
* USB Host
******************************************************************************/
#if defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE)
static struct pxaohci_platform_data income_ohci_info = {
.port_mode = PMM_PERPORT_MODE,
.flags = ENABLE_PORT1 | POWER_CONTROL_LOW | POWER_SENSE_LOW,
};
static void __init income_uhc_init(void)
{
pxa_set_ohci_info(&income_ohci_info);
}
#else
static inline void income_uhc_init(void) {}
#endif
/******************************************************************************
* LED
******************************************************************************/
#if defined(CONFIG_LEDS_GPIO) || defined(CONFIG_LEDS_GPIO_MODULE)
struct gpio_led income_gpio_leds[] = {
{
.name = "income:green:leda",
.default_trigger = "none",
.gpio = GPIO54_INCOME_LED_A,
.active_low = 1,
},
{
.name = "income:green:ledb",
.default_trigger = "none",
.gpio = GPIO55_INCOME_LED_B,
.active_low = 1,
}
};
static struct gpio_led_platform_data income_gpio_led_info = {
.leds = income_gpio_leds,
.num_leds = ARRAY_SIZE(income_gpio_leds),
};
static struct platform_device income_leds = {
.name = "leds-gpio",
.id = -1,
.dev = {
.platform_data = &income_gpio_led_info,
}
};
static void __init income_led_init(void)
{
platform_device_register(&income_leds);
}
#else
static inline void income_led_init(void) {}
#endif
/******************************************************************************
* I2C
******************************************************************************/
#if defined(CONFIG_I2C_PXA) || defined(CONFIG_I2C_PXA_MODULE)
static struct i2c_board_info __initdata income_i2c_devs[] = {
{
I2C_BOARD_INFO("ds1340", 0x68),
}, {
I2C_BOARD_INFO("lm75", 0x4f),
},
};
static void __init income_i2c_init(void)
{
pxa_set_i2c_info(NULL);
pxa27x_set_i2c_power_info(NULL);
i2c_register_board_info(0, ARRAY_AND_SIZE(income_i2c_devs));
}
#else
static inline void income_i2c_init(void) {}
#endif
/******************************************************************************
* Framebuffer
******************************************************************************/
#if defined(CONFIG_FB_PXA) || defined(CONFIG_FB_PXA_MODULE)
static struct pxafb_mode_info income_lcd_modes[] = {
{
.pixclock = 144700,
.xres = 320,
.yres = 240,
.bpp = 32,
.depth = 18,
.left_margin = 10,
.right_margin = 10,
.upper_margin = 7,
.lower_margin = 8,
.hsync_len = 20,
.vsync_len = 2,
.sync = FB_SYNC_VERT_HIGH_ACT,
},
};
static struct pxafb_mach_info income_lcd_screen = {
.modes = income_lcd_modes,
.num_modes = ARRAY_SIZE(income_lcd_modes),
.lcd_conn = LCD_COLOR_TFT_18BPP | LCD_PCLK_EDGE_FALL,
};
static void __init income_lcd_init(void)
{
pxa_set_fb_info(NULL, &income_lcd_screen);
}
#else
static inline void income_lcd_init(void) {}
#endif
/******************************************************************************
* Backlight
******************************************************************************/
#if defined(CONFIG_BACKLIGHT_PWM) || defined(CONFIG_BACKLIGHT_PWM_MODULE)
static struct platform_pwm_backlight_data income_backlight_data = {
.pwm_id = 0,
.max_brightness = 0x3ff,
.dft_brightness = 0x1ff,
.pwm_period_ns = 1000000,
};
static struct platform_device income_backlight = {
.name = "pwm-backlight",
.dev = {
.parent = &pxa27x_device_pwm0.dev,
.platform_data = &income_backlight_data,
},
};
static void __init income_pwm_init(void)
{
platform_device_register(&income_backlight);
}
#else
static inline void income_pwm_init(void) {}
#endif
void __init colibri_pxa270_income_boardinit(void)
{
pxa_set_ffuart_info(NULL);
pxa_set_btuart_info(NULL);
pxa_set_stuart_info(NULL);
income_mmc_init();
income_uhc_init();
income_led_init();
income_i2c_init();
income_lcd_init();
income_pwm_init();
}
| gpl-2.0 |
TEAM-RAZOR-DEVICES/android_kernel_lge_v500 | drivers/rtc/rtc-tile.c | 8021 | 3496 | /*
* Copyright 2011 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*
* Tilera-specific RTC driver.
*/
#include <linux/module.h>
#include <linux/device.h>
#include <linux/rtc.h>
#include <linux/platform_device.h>
/* Platform device pointer. */
static struct platform_device *tile_rtc_platform_device;
/*
* RTC read routine. Gets time info from RTC chip via hypervisor syscall.
*/
static int read_rtc_time(struct device *dev, struct rtc_time *tm)
{
HV_RTCTime hvtm = hv_get_rtc();
tm->tm_sec = hvtm.tm_sec;
tm->tm_min = hvtm.tm_min;
tm->tm_hour = hvtm.tm_hour;
tm->tm_mday = hvtm.tm_mday;
tm->tm_mon = hvtm.tm_mon;
tm->tm_year = hvtm.tm_year;
tm->tm_wday = 0;
tm->tm_yday = 0;
tm->tm_isdst = 0;
if (rtc_valid_tm(tm) < 0)
dev_warn(dev, "Read invalid date/time from RTC\n");
return 0;
}
/*
* RTC write routine. Sends time info to hypervisor via syscall, to be
* written to RTC chip.
*/
static int set_rtc_time(struct device *dev, struct rtc_time *tm)
{
HV_RTCTime hvtm;
hvtm.tm_sec = tm->tm_sec;
hvtm.tm_min = tm->tm_min;
hvtm.tm_hour = tm->tm_hour;
hvtm.tm_mday = tm->tm_mday;
hvtm.tm_mon = tm->tm_mon;
hvtm.tm_year = tm->tm_year;
hv_set_rtc(hvtm);
return 0;
}
/*
* RTC read/write ops.
*/
static const struct rtc_class_ops tile_rtc_ops = {
.read_time = read_rtc_time,
.set_time = set_rtc_time,
};
/*
* Device probe routine.
*/
static int __devinit tile_rtc_probe(struct platform_device *dev)
{
struct rtc_device *rtc;
rtc = rtc_device_register("tile",
&dev->dev, &tile_rtc_ops, THIS_MODULE);
if (IS_ERR(rtc))
return PTR_ERR(rtc);
platform_set_drvdata(dev, rtc);
return 0;
}
/*
* Device cleanup routine.
*/
static int __devexit tile_rtc_remove(struct platform_device *dev)
{
struct rtc_device *rtc = platform_get_drvdata(dev);
if (rtc)
rtc_device_unregister(rtc);
platform_set_drvdata(dev, NULL);
return 0;
}
static struct platform_driver tile_rtc_platform_driver = {
.driver = {
.name = "rtc-tile",
.owner = THIS_MODULE,
},
.probe = tile_rtc_probe,
.remove = __devexit_p(tile_rtc_remove),
};
/*
* Driver init routine.
*/
static int __init tile_rtc_driver_init(void)
{
int err;
err = platform_driver_register(&tile_rtc_platform_driver);
if (err)
return err;
tile_rtc_platform_device = platform_device_alloc("rtc-tile", 0);
if (tile_rtc_platform_device == NULL) {
err = -ENOMEM;
goto exit_driver_unregister;
}
err = platform_device_add(tile_rtc_platform_device);
if (err)
goto exit_device_put;
return 0;
exit_device_put:
platform_device_put(tile_rtc_platform_device);
exit_driver_unregister:
platform_driver_unregister(&tile_rtc_platform_driver);
return err;
}
/*
* Driver cleanup routine.
*/
static void __exit tile_rtc_driver_exit(void)
{
platform_driver_unregister(&tile_rtc_platform_driver);
}
module_init(tile_rtc_driver_init);
module_exit(tile_rtc_driver_exit);
MODULE_DESCRIPTION("Tilera-specific Real Time Clock Driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:rtc-tile");
| gpl-2.0 |
ISTweak/android_kernel_sony_blue_hayabusa | drivers/usb/c67x00/c67x00-sched.c | 8277 | 30484 | /*
* c67x00-sched.c: Cypress C67X00 USB Host Controller Driver - TD scheduling
*
* Copyright (C) 2006-2008 Barco N.V.
* Derived from the Cypress cy7c67200/300 ezusb linux driver and
* based on multiple host controller drivers inside the linux kernel.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
* MA 02110-1301 USA.
*/
#include <linux/kthread.h>
#include <linux/slab.h>
#include "c67x00.h"
#include "c67x00-hcd.h"
/*
* These are the stages for a control urb, they are kept
* in both urb->interval and td->privdata.
*/
#define SETUP_STAGE 0
#define DATA_STAGE 1
#define STATUS_STAGE 2
/* -------------------------------------------------------------------------- */
/**
* struct c67x00_ep_data: Host endpoint data structure
*/
struct c67x00_ep_data {
struct list_head queue;
struct list_head node;
struct usb_host_endpoint *hep;
struct usb_device *dev;
u16 next_frame; /* For int/isoc transactions */
};
/**
* struct c67x00_td
*
* Hardware parts are little endiannes, SW in CPU endianess.
*/
struct c67x00_td {
/* HW specific part */
__le16 ly_base_addr; /* Bytes 0-1 */
__le16 port_length; /* Bytes 2-3 */
u8 pid_ep; /* Byte 4 */
u8 dev_addr; /* Byte 5 */
u8 ctrl_reg; /* Byte 6 */
u8 status; /* Byte 7 */
u8 retry_cnt; /* Byte 8 */
#define TT_OFFSET 2
#define TT_CONTROL 0
#define TT_ISOCHRONOUS 1
#define TT_BULK 2
#define TT_INTERRUPT 3
u8 residue; /* Byte 9 */
__le16 next_td_addr; /* Bytes 10-11 */
/* SW part */
struct list_head td_list;
u16 td_addr;
void *data;
struct urb *urb;
unsigned long privdata;
/* These are needed for handling the toggle bits:
* an urb can be dequeued while a td is in progress
* after checking the td, the toggle bit might need to
* be fixed */
struct c67x00_ep_data *ep_data;
unsigned int pipe;
};
struct c67x00_urb_priv {
struct list_head hep_node;
struct urb *urb;
int port;
int cnt; /* packet number for isoc */
int status;
struct c67x00_ep_data *ep_data;
};
#define td_udev(td) ((td)->ep_data->dev)
#define CY_TD_SIZE 12
#define TD_PIDEP_OFFSET 0x04
#define TD_PIDEPMASK_PID 0xF0
#define TD_PIDEPMASK_EP 0x0F
#define TD_PORTLENMASK_DL 0x02FF
#define TD_PORTLENMASK_PN 0xC000
#define TD_STATUS_OFFSET 0x07
#define TD_STATUSMASK_ACK 0x01
#define TD_STATUSMASK_ERR 0x02
#define TD_STATUSMASK_TMOUT 0x04
#define TD_STATUSMASK_SEQ 0x08
#define TD_STATUSMASK_SETUP 0x10
#define TD_STATUSMASK_OVF 0x20
#define TD_STATUSMASK_NAK 0x40
#define TD_STATUSMASK_STALL 0x80
#define TD_ERROR_MASK (TD_STATUSMASK_ERR | TD_STATUSMASK_TMOUT | \
TD_STATUSMASK_STALL)
#define TD_RETRYCNT_OFFSET 0x08
#define TD_RETRYCNTMASK_ACT_FLG 0x10
#define TD_RETRYCNTMASK_TX_TYPE 0x0C
#define TD_RETRYCNTMASK_RTY_CNT 0x03
#define TD_RESIDUE_OVERFLOW 0x80
#define TD_PID_IN 0x90
/* Residue: signed 8bits, neg -> OVERFLOW, pos -> UNDERFLOW */
#define td_residue(td) ((__s8)(td->residue))
#define td_ly_base_addr(td) (__le16_to_cpu((td)->ly_base_addr))
#define td_port_length(td) (__le16_to_cpu((td)->port_length))
#define td_next_td_addr(td) (__le16_to_cpu((td)->next_td_addr))
#define td_active(td) ((td)->retry_cnt & TD_RETRYCNTMASK_ACT_FLG)
#define td_length(td) (td_port_length(td) & TD_PORTLENMASK_DL)
#define td_sequence_ok(td) (!td->status || \
(!(td->status & TD_STATUSMASK_SEQ) == \
!(td->ctrl_reg & SEQ_SEL)))
#define td_acked(td) (!td->status || \
(td->status & TD_STATUSMASK_ACK))
#define td_actual_bytes(td) (td_length(td) - td_residue(td))
/* -------------------------------------------------------------------------- */
#ifdef DEBUG
/**
* dbg_td - Dump the contents of the TD
*/
static void dbg_td(struct c67x00_hcd *c67x00, struct c67x00_td *td, char *msg)
{
struct device *dev = c67x00_hcd_dev(c67x00);
dev_dbg(dev, "### %s at 0x%04x\n", msg, td->td_addr);
dev_dbg(dev, "urb: 0x%p\n", td->urb);
dev_dbg(dev, "endpoint: %4d\n", usb_pipeendpoint(td->pipe));
dev_dbg(dev, "pipeout: %4d\n", usb_pipeout(td->pipe));
dev_dbg(dev, "ly_base_addr: 0x%04x\n", td_ly_base_addr(td));
dev_dbg(dev, "port_length: 0x%04x\n", td_port_length(td));
dev_dbg(dev, "pid_ep: 0x%02x\n", td->pid_ep);
dev_dbg(dev, "dev_addr: 0x%02x\n", td->dev_addr);
dev_dbg(dev, "ctrl_reg: 0x%02x\n", td->ctrl_reg);
dev_dbg(dev, "status: 0x%02x\n", td->status);
dev_dbg(dev, "retry_cnt: 0x%02x\n", td->retry_cnt);
dev_dbg(dev, "residue: 0x%02x\n", td->residue);
dev_dbg(dev, "next_td_addr: 0x%04x\n", td_next_td_addr(td));
dev_dbg(dev, "data:");
print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 16, 1,
td->data, td_length(td), 1);
}
#else /* DEBUG */
static inline void
dbg_td(struct c67x00_hcd *c67x00, struct c67x00_td *td, char *msg) { }
#endif /* DEBUG */
/* -------------------------------------------------------------------------- */
/* Helper functions */
static inline u16 c67x00_get_current_frame_number(struct c67x00_hcd *c67x00)
{
return c67x00_ll_husb_get_frame(c67x00->sie) & HOST_FRAME_MASK;
}
/**
* frame_add
* Software wraparound for framenumbers.
*/
static inline u16 frame_add(u16 a, u16 b)
{
return (a + b) & HOST_FRAME_MASK;
}
/**
* frame_after - is frame a after frame b
*/
static inline int frame_after(u16 a, u16 b)
{
return ((HOST_FRAME_MASK + a - b) & HOST_FRAME_MASK) <
(HOST_FRAME_MASK / 2);
}
/**
* frame_after_eq - is frame a after or equal to frame b
*/
static inline int frame_after_eq(u16 a, u16 b)
{
return ((HOST_FRAME_MASK + 1 + a - b) & HOST_FRAME_MASK) <
(HOST_FRAME_MASK / 2);
}
/* -------------------------------------------------------------------------- */
/**
* c67x00_release_urb - remove link from all tds to this urb
* Disconnects the urb from it's tds, so that it can be given back.
* pre: urb->hcpriv != NULL
*/
static void c67x00_release_urb(struct c67x00_hcd *c67x00, struct urb *urb)
{
struct c67x00_td *td;
struct c67x00_urb_priv *urbp;
BUG_ON(!urb);
c67x00->urb_count--;
if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
c67x00->urb_iso_count--;
if (c67x00->urb_iso_count == 0)
c67x00->max_frame_bw = MAX_FRAME_BW_STD;
}
/* TODO this might be not so efficient when we've got many urbs!
* Alternatives:
* * only clear when needed
* * keep a list of tds with each urbp
*/
list_for_each_entry(td, &c67x00->td_list, td_list)
if (urb == td->urb)
td->urb = NULL;
urbp = urb->hcpriv;
urb->hcpriv = NULL;
list_del(&urbp->hep_node);
kfree(urbp);
}
/* -------------------------------------------------------------------------- */
static struct c67x00_ep_data *
c67x00_ep_data_alloc(struct c67x00_hcd *c67x00, struct urb *urb)
{
struct usb_host_endpoint *hep = urb->ep;
struct c67x00_ep_data *ep_data;
int type;
c67x00->current_frame = c67x00_get_current_frame_number(c67x00);
/* Check if endpoint already has a c67x00_ep_data struct allocated */
if (hep->hcpriv) {
ep_data = hep->hcpriv;
if (frame_after(c67x00->current_frame, ep_data->next_frame))
ep_data->next_frame =
frame_add(c67x00->current_frame, 1);
return hep->hcpriv;
}
/* Allocate and initialize a new c67x00 endpoint data structure */
ep_data = kzalloc(sizeof(*ep_data), GFP_ATOMIC);
if (!ep_data)
return NULL;
INIT_LIST_HEAD(&ep_data->queue);
INIT_LIST_HEAD(&ep_data->node);
ep_data->hep = hep;
/* hold a reference to udev as long as this endpoint lives,
* this is needed to possibly fix the data toggle */
ep_data->dev = usb_get_dev(urb->dev);
hep->hcpriv = ep_data;
/* For ISOC and INT endpoints, start ASAP: */
ep_data->next_frame = frame_add(c67x00->current_frame, 1);
/* Add the endpoint data to one of the pipe lists; must be added
in order of endpoint address */
type = usb_pipetype(urb->pipe);
if (list_empty(&ep_data->node)) {
list_add(&ep_data->node, &c67x00->list[type]);
} else {
struct c67x00_ep_data *prev;
list_for_each_entry(prev, &c67x00->list[type], node) {
if (prev->hep->desc.bEndpointAddress >
hep->desc.bEndpointAddress) {
list_add(&ep_data->node, prev->node.prev);
break;
}
}
}
return ep_data;
}
static int c67x00_ep_data_free(struct usb_host_endpoint *hep)
{
struct c67x00_ep_data *ep_data = hep->hcpriv;
if (!ep_data)
return 0;
if (!list_empty(&ep_data->queue))
return -EBUSY;
usb_put_dev(ep_data->dev);
list_del(&ep_data->queue);
list_del(&ep_data->node);
kfree(ep_data);
hep->hcpriv = NULL;
return 0;
}
void c67x00_endpoint_disable(struct usb_hcd *hcd, struct usb_host_endpoint *ep)
{
struct c67x00_hcd *c67x00 = hcd_to_c67x00_hcd(hcd);
unsigned long flags;
if (!list_empty(&ep->urb_list))
dev_warn(c67x00_hcd_dev(c67x00), "error: urb list not empty\n");
spin_lock_irqsave(&c67x00->lock, flags);
/* loop waiting for all transfers in the endpoint queue to complete */
while (c67x00_ep_data_free(ep)) {
/* Drop the lock so we can sleep waiting for the hardware */
spin_unlock_irqrestore(&c67x00->lock, flags);
/* it could happen that we reinitialize this completion, while
* somebody was waiting for that completion. The timeout and
* while loop handle such cases, but this might be improved */
INIT_COMPLETION(c67x00->endpoint_disable);
c67x00_sched_kick(c67x00);
wait_for_completion_timeout(&c67x00->endpoint_disable, 1 * HZ);
spin_lock_irqsave(&c67x00->lock, flags);
}
spin_unlock_irqrestore(&c67x00->lock, flags);
}
/* -------------------------------------------------------------------------- */
static inline int get_root_port(struct usb_device *dev)
{
while (dev->parent->parent)
dev = dev->parent;
return dev->portnum;
}
int c67x00_urb_enqueue(struct usb_hcd *hcd,
struct urb *urb, gfp_t mem_flags)
{
int ret;
unsigned long flags;
struct c67x00_urb_priv *urbp;
struct c67x00_hcd *c67x00 = hcd_to_c67x00_hcd(hcd);
int port = get_root_port(urb->dev)-1;
spin_lock_irqsave(&c67x00->lock, flags);
/* Make sure host controller is running */
if (!HC_IS_RUNNING(hcd->state)) {
ret = -ENODEV;
goto err_not_linked;
}
ret = usb_hcd_link_urb_to_ep(hcd, urb);
if (ret)
goto err_not_linked;
/* Allocate and initialize urb private data */
urbp = kzalloc(sizeof(*urbp), mem_flags);
if (!urbp) {
ret = -ENOMEM;
goto err_urbp;
}
INIT_LIST_HEAD(&urbp->hep_node);
urbp->urb = urb;
urbp->port = port;
urbp->ep_data = c67x00_ep_data_alloc(c67x00, urb);
if (!urbp->ep_data) {
ret = -ENOMEM;
goto err_epdata;
}
/* TODO claim bandwidth with usb_claim_bandwidth?
* also release it somewhere! */
urb->hcpriv = urbp;
urb->actual_length = 0; /* Nothing received/transmitted yet */
switch (usb_pipetype(urb->pipe)) {
case PIPE_CONTROL:
urb->interval = SETUP_STAGE;
break;
case PIPE_INTERRUPT:
break;
case PIPE_BULK:
break;
case PIPE_ISOCHRONOUS:
if (c67x00->urb_iso_count == 0)
c67x00->max_frame_bw = MAX_FRAME_BW_ISO;
c67x00->urb_iso_count++;
/* Assume always URB_ISO_ASAP, FIXME */
if (list_empty(&urbp->ep_data->queue))
urb->start_frame = urbp->ep_data->next_frame;
else {
/* Go right after the last one */
struct urb *last_urb;
last_urb = list_entry(urbp->ep_data->queue.prev,
struct c67x00_urb_priv,
hep_node)->urb;
urb->start_frame =
frame_add(last_urb->start_frame,
last_urb->number_of_packets *
last_urb->interval);
}
urbp->cnt = 0;
break;
}
/* Add the URB to the endpoint queue */
list_add_tail(&urbp->hep_node, &urbp->ep_data->queue);
/* If this is the only URB, kick start the controller */
if (!c67x00->urb_count++)
c67x00_ll_hpi_enable_sofeop(c67x00->sie);
c67x00_sched_kick(c67x00);
spin_unlock_irqrestore(&c67x00->lock, flags);
return 0;
err_epdata:
kfree(urbp);
err_urbp:
usb_hcd_unlink_urb_from_ep(hcd, urb);
err_not_linked:
spin_unlock_irqrestore(&c67x00->lock, flags);
return ret;
}
int c67x00_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
{
struct c67x00_hcd *c67x00 = hcd_to_c67x00_hcd(hcd);
unsigned long flags;
int rc;
spin_lock_irqsave(&c67x00->lock, flags);
rc = usb_hcd_check_unlink_urb(hcd, urb, status);
if (rc)
goto done;
c67x00_release_urb(c67x00, urb);
usb_hcd_unlink_urb_from_ep(hcd, urb);
spin_unlock(&c67x00->lock);
usb_hcd_giveback_urb(hcd, urb, status);
spin_lock(&c67x00->lock);
spin_unlock_irqrestore(&c67x00->lock, flags);
return 0;
done:
spin_unlock_irqrestore(&c67x00->lock, flags);
return rc;
}
/* -------------------------------------------------------------------------- */
/*
* pre: c67x00 locked, urb unlocked
*/
static void
c67x00_giveback_urb(struct c67x00_hcd *c67x00, struct urb *urb, int status)
{
struct c67x00_urb_priv *urbp;
if (!urb)
return;
urbp = urb->hcpriv;
urbp->status = status;
list_del_init(&urbp->hep_node);
c67x00_release_urb(c67x00, urb);
usb_hcd_unlink_urb_from_ep(c67x00_hcd_to_hcd(c67x00), urb);
spin_unlock(&c67x00->lock);
usb_hcd_giveback_urb(c67x00_hcd_to_hcd(c67x00), urb, urbp->status);
spin_lock(&c67x00->lock);
}
/* -------------------------------------------------------------------------- */
static int c67x00_claim_frame_bw(struct c67x00_hcd *c67x00, struct urb *urb,
int len, int periodic)
{
struct c67x00_urb_priv *urbp = urb->hcpriv;
int bit_time;
/* According to the C67x00 BIOS user manual, page 3-18,19, the
* following calculations provide the full speed bit times for
* a transaction.
*
* FS(in) = 112.5 + 9.36*BC + HOST_DELAY
* FS(in,iso) = 90.5 + 9.36*BC + HOST_DELAY
* FS(out) = 112.5 + 9.36*BC + HOST_DELAY
* FS(out,iso) = 78.4 + 9.36*BC + HOST_DELAY
* LS(in) = 802.4 + 75.78*BC + HOST_DELAY
* LS(out) = 802.6 + 74.67*BC + HOST_DELAY
*
* HOST_DELAY == 106 for the c67200 and c67300.
*/
/* make calculations in 1/100 bit times to maintain resolution */
if (urbp->ep_data->dev->speed == USB_SPEED_LOW) {
/* Low speed pipe */
if (usb_pipein(urb->pipe))
bit_time = 80240 + 7578*len;
else
bit_time = 80260 + 7467*len;
} else {
/* FS pipes */
if (usb_pipeisoc(urb->pipe))
bit_time = usb_pipein(urb->pipe) ? 9050 : 7840;
else
bit_time = 11250;
bit_time += 936*len;
}
/* Scale back down to integer bit times. Use a host delay of 106.
* (this is the only place it is used) */
bit_time = ((bit_time+50) / 100) + 106;
if (unlikely(bit_time + c67x00->bandwidth_allocated >=
c67x00->max_frame_bw))
return -EMSGSIZE;
if (unlikely(c67x00->next_td_addr + CY_TD_SIZE >=
c67x00->td_base_addr + SIE_TD_SIZE))
return -EMSGSIZE;
if (unlikely(c67x00->next_buf_addr + len >=
c67x00->buf_base_addr + SIE_TD_BUF_SIZE))
return -EMSGSIZE;
if (periodic) {
if (unlikely(bit_time + c67x00->periodic_bw_allocated >=
MAX_PERIODIC_BW(c67x00->max_frame_bw)))
return -EMSGSIZE;
c67x00->periodic_bw_allocated += bit_time;
}
c67x00->bandwidth_allocated += bit_time;
return 0;
}
/* -------------------------------------------------------------------------- */
/**
* td_addr and buf_addr must be word aligned
*/
static int c67x00_create_td(struct c67x00_hcd *c67x00, struct urb *urb,
void *data, int len, int pid, int toggle,
unsigned long privdata)
{
struct c67x00_td *td;
struct c67x00_urb_priv *urbp = urb->hcpriv;
const __u8 active_flag = 1, retry_cnt = 1;
__u8 cmd = 0;
int tt = 0;
if (c67x00_claim_frame_bw(c67x00, urb, len, usb_pipeisoc(urb->pipe)
|| usb_pipeint(urb->pipe)))
return -EMSGSIZE; /* Not really an error, but expected */
td = kzalloc(sizeof(*td), GFP_ATOMIC);
if (!td)
return -ENOMEM;
td->pipe = urb->pipe;
td->ep_data = urbp->ep_data;
if ((td_udev(td)->speed == USB_SPEED_LOW) &&
!(c67x00->low_speed_ports & (1 << urbp->port)))
cmd |= PREAMBLE_EN;
switch (usb_pipetype(td->pipe)) {
case PIPE_ISOCHRONOUS:
tt = TT_ISOCHRONOUS;
cmd |= ISO_EN;
break;
case PIPE_CONTROL:
tt = TT_CONTROL;
break;
case PIPE_BULK:
tt = TT_BULK;
break;
case PIPE_INTERRUPT:
tt = TT_INTERRUPT;
break;
}
if (toggle)
cmd |= SEQ_SEL;
cmd |= ARM_EN;
/* SW part */
td->td_addr = c67x00->next_td_addr;
c67x00->next_td_addr = c67x00->next_td_addr + CY_TD_SIZE;
/* HW part */
td->ly_base_addr = __cpu_to_le16(c67x00->next_buf_addr);
td->port_length = __cpu_to_le16((c67x00->sie->sie_num << 15) |
(urbp->port << 14) | (len & 0x3FF));
td->pid_ep = ((pid & 0xF) << TD_PIDEP_OFFSET) |
(usb_pipeendpoint(td->pipe) & 0xF);
td->dev_addr = usb_pipedevice(td->pipe) & 0x7F;
td->ctrl_reg = cmd;
td->status = 0;
td->retry_cnt = (tt << TT_OFFSET) | (active_flag << 4) | retry_cnt;
td->residue = 0;
td->next_td_addr = __cpu_to_le16(c67x00->next_td_addr);
/* SW part */
td->data = data;
td->urb = urb;
td->privdata = privdata;
c67x00->next_buf_addr += (len + 1) & ~0x01; /* properly align */
list_add_tail(&td->td_list, &c67x00->td_list);
return 0;
}
static inline void c67x00_release_td(struct c67x00_td *td)
{
list_del_init(&td->td_list);
kfree(td);
}
/* -------------------------------------------------------------------------- */
static int c67x00_add_data_urb(struct c67x00_hcd *c67x00, struct urb *urb)
{
int remaining;
int toggle;
int pid;
int ret = 0;
int maxps;
int need_empty;
toggle = usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe),
usb_pipeout(urb->pipe));
remaining = urb->transfer_buffer_length - urb->actual_length;
maxps = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
need_empty = (urb->transfer_flags & URB_ZERO_PACKET) &&
usb_pipeout(urb->pipe) && !(remaining % maxps);
while (remaining || need_empty) {
int len;
char *td_buf;
len = (remaining > maxps) ? maxps : remaining;
if (!len)
need_empty = 0;
pid = usb_pipeout(urb->pipe) ? USB_PID_OUT : USB_PID_IN;
td_buf = urb->transfer_buffer + urb->transfer_buffer_length -
remaining;
ret = c67x00_create_td(c67x00, urb, td_buf, len, pid, toggle,
DATA_STAGE);
if (ret)
return ret; /* td wasn't created */
toggle ^= 1;
remaining -= len;
if (usb_pipecontrol(urb->pipe))
break;
}
return 0;
}
/**
* return 0 in case more bandwidth is available, else errorcode
*/
static int c67x00_add_ctrl_urb(struct c67x00_hcd *c67x00, struct urb *urb)
{
int ret;
int pid;
switch (urb->interval) {
default:
case SETUP_STAGE:
ret = c67x00_create_td(c67x00, urb, urb->setup_packet,
8, USB_PID_SETUP, 0, SETUP_STAGE);
if (ret)
return ret;
urb->interval = SETUP_STAGE;
usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
usb_pipeout(urb->pipe), 1);
break;
case DATA_STAGE:
if (urb->transfer_buffer_length) {
ret = c67x00_add_data_urb(c67x00, urb);
if (ret)
return ret;
break;
} /* else fallthrough */
case STATUS_STAGE:
pid = !usb_pipeout(urb->pipe) ? USB_PID_OUT : USB_PID_IN;
ret = c67x00_create_td(c67x00, urb, NULL, 0, pid, 1,
STATUS_STAGE);
if (ret)
return ret;
break;
}
return 0;
}
/*
* return 0 in case more bandwidth is available, else errorcode
*/
static int c67x00_add_int_urb(struct c67x00_hcd *c67x00, struct urb *urb)
{
struct c67x00_urb_priv *urbp = urb->hcpriv;
if (frame_after_eq(c67x00->current_frame, urbp->ep_data->next_frame)) {
urbp->ep_data->next_frame =
frame_add(urbp->ep_data->next_frame, urb->interval);
return c67x00_add_data_urb(c67x00, urb);
}
return 0;
}
static int c67x00_add_iso_urb(struct c67x00_hcd *c67x00, struct urb *urb)
{
struct c67x00_urb_priv *urbp = urb->hcpriv;
if (frame_after_eq(c67x00->current_frame, urbp->ep_data->next_frame)) {
char *td_buf;
int len, pid, ret;
BUG_ON(urbp->cnt >= urb->number_of_packets);
td_buf = urb->transfer_buffer +
urb->iso_frame_desc[urbp->cnt].offset;
len = urb->iso_frame_desc[urbp->cnt].length;
pid = usb_pipeout(urb->pipe) ? USB_PID_OUT : USB_PID_IN;
ret = c67x00_create_td(c67x00, urb, td_buf, len, pid, 0,
urbp->cnt);
if (ret) {
printk(KERN_DEBUG "create failed: %d\n", ret);
urb->iso_frame_desc[urbp->cnt].actual_length = 0;
urb->iso_frame_desc[urbp->cnt].status = ret;
if (urbp->cnt + 1 == urb->number_of_packets)
c67x00_giveback_urb(c67x00, urb, 0);
}
urbp->ep_data->next_frame =
frame_add(urbp->ep_data->next_frame, urb->interval);
urbp->cnt++;
}
return 0;
}
/* -------------------------------------------------------------------------- */
static void c67x00_fill_from_list(struct c67x00_hcd *c67x00, int type,
int (*add)(struct c67x00_hcd *, struct urb *))
{
struct c67x00_ep_data *ep_data;
struct urb *urb;
/* traverse every endpoint on the list */
list_for_each_entry(ep_data, &c67x00->list[type], node) {
if (!list_empty(&ep_data->queue)) {
/* and add the first urb */
/* isochronous transfer rely on this */
urb = list_entry(ep_data->queue.next,
struct c67x00_urb_priv,
hep_node)->urb;
add(c67x00, urb);
}
}
}
static void c67x00_fill_frame(struct c67x00_hcd *c67x00)
{
struct c67x00_td *td, *ttd;
/* Check if we can proceed */
if (!list_empty(&c67x00->td_list)) {
dev_warn(c67x00_hcd_dev(c67x00),
"TD list not empty! This should not happen!\n");
list_for_each_entry_safe(td, ttd, &c67x00->td_list, td_list) {
dbg_td(c67x00, td, "Unprocessed td");
c67x00_release_td(td);
}
}
/* Reinitialize variables */
c67x00->bandwidth_allocated = 0;
c67x00->periodic_bw_allocated = 0;
c67x00->next_td_addr = c67x00->td_base_addr;
c67x00->next_buf_addr = c67x00->buf_base_addr;
/* Fill the list */
c67x00_fill_from_list(c67x00, PIPE_ISOCHRONOUS, c67x00_add_iso_urb);
c67x00_fill_from_list(c67x00, PIPE_INTERRUPT, c67x00_add_int_urb);
c67x00_fill_from_list(c67x00, PIPE_CONTROL, c67x00_add_ctrl_urb);
c67x00_fill_from_list(c67x00, PIPE_BULK, c67x00_add_data_urb);
}
/* -------------------------------------------------------------------------- */
/**
* Get TD from C67X00
*/
static inline void
c67x00_parse_td(struct c67x00_hcd *c67x00, struct c67x00_td *td)
{
c67x00_ll_read_mem_le16(c67x00->sie->dev,
td->td_addr, td, CY_TD_SIZE);
if (usb_pipein(td->pipe) && td_actual_bytes(td))
c67x00_ll_read_mem_le16(c67x00->sie->dev, td_ly_base_addr(td),
td->data, td_actual_bytes(td));
}
static int c67x00_td_to_error(struct c67x00_hcd *c67x00, struct c67x00_td *td)
{
if (td->status & TD_STATUSMASK_ERR) {
dbg_td(c67x00, td, "ERROR_FLAG");
return -EILSEQ;
}
if (td->status & TD_STATUSMASK_STALL) {
/* dbg_td(c67x00, td, "STALL"); */
return -EPIPE;
}
if (td->status & TD_STATUSMASK_TMOUT) {
dbg_td(c67x00, td, "TIMEOUT");
return -ETIMEDOUT;
}
return 0;
}
static inline int c67x00_end_of_data(struct c67x00_td *td)
{
int maxps, need_empty, remaining;
struct urb *urb = td->urb;
int act_bytes;
act_bytes = td_actual_bytes(td);
if (unlikely(!act_bytes))
return 1; /* This was an empty packet */
maxps = usb_maxpacket(td_udev(td), td->pipe, usb_pipeout(td->pipe));
if (unlikely(act_bytes < maxps))
return 1; /* Smaller then full packet */
remaining = urb->transfer_buffer_length - urb->actual_length;
need_empty = (urb->transfer_flags & URB_ZERO_PACKET) &&
usb_pipeout(urb->pipe) && !(remaining % maxps);
if (unlikely(!remaining && !need_empty))
return 1;
return 0;
}
/* -------------------------------------------------------------------------- */
/* Remove all td's from the list which come
* after last_td and are meant for the same pipe.
* This is used when a short packet has occurred */
static inline void c67x00_clear_pipe(struct c67x00_hcd *c67x00,
struct c67x00_td *last_td)
{
struct c67x00_td *td, *tmp;
td = last_td;
tmp = last_td;
while (td->td_list.next != &c67x00->td_list) {
td = list_entry(td->td_list.next, struct c67x00_td, td_list);
if (td->pipe == last_td->pipe) {
c67x00_release_td(td);
td = tmp;
}
tmp = td;
}
}
/* -------------------------------------------------------------------------- */
static void c67x00_handle_successful_td(struct c67x00_hcd *c67x00,
struct c67x00_td *td)
{
struct urb *urb = td->urb;
if (!urb)
return;
urb->actual_length += td_actual_bytes(td);
switch (usb_pipetype(td->pipe)) {
/* isochronous tds are handled separately */
case PIPE_CONTROL:
switch (td->privdata) {
case SETUP_STAGE:
urb->interval =
urb->transfer_buffer_length ?
DATA_STAGE : STATUS_STAGE;
/* Don't count setup_packet with normal data: */
urb->actual_length = 0;
break;
case DATA_STAGE:
if (c67x00_end_of_data(td)) {
urb->interval = STATUS_STAGE;
c67x00_clear_pipe(c67x00, td);
}
break;
case STATUS_STAGE:
urb->interval = 0;
c67x00_giveback_urb(c67x00, urb, 0);
break;
}
break;
case PIPE_INTERRUPT:
case PIPE_BULK:
if (unlikely(c67x00_end_of_data(td))) {
c67x00_clear_pipe(c67x00, td);
c67x00_giveback_urb(c67x00, urb, 0);
}
break;
}
}
static void c67x00_handle_isoc(struct c67x00_hcd *c67x00, struct c67x00_td *td)
{
struct urb *urb = td->urb;
struct c67x00_urb_priv *urbp;
int cnt;
if (!urb)
return;
urbp = urb->hcpriv;
cnt = td->privdata;
if (td->status & TD_ERROR_MASK)
urb->error_count++;
urb->iso_frame_desc[cnt].actual_length = td_actual_bytes(td);
urb->iso_frame_desc[cnt].status = c67x00_td_to_error(c67x00, td);
if (cnt + 1 == urb->number_of_packets) /* Last packet */
c67x00_giveback_urb(c67x00, urb, 0);
}
/* -------------------------------------------------------------------------- */
/**
* c67x00_check_td_list - handle tds which have been processed by the c67x00
* pre: current_td == 0
*/
static inline void c67x00_check_td_list(struct c67x00_hcd *c67x00)
{
struct c67x00_td *td, *tmp;
struct urb *urb;
int ack_ok;
int clear_endpoint;
list_for_each_entry_safe(td, tmp, &c67x00->td_list, td_list) {
/* get the TD */
c67x00_parse_td(c67x00, td);
urb = td->urb; /* urb can be NULL! */
ack_ok = 0;
clear_endpoint = 1;
/* Handle isochronous transfers separately */
if (usb_pipeisoc(td->pipe)) {
clear_endpoint = 0;
c67x00_handle_isoc(c67x00, td);
goto cont;
}
/* When an error occurs, all td's for that pipe go into an
* inactive state. This state matches successful transfers so
* we must make sure not to service them. */
if (td->status & TD_ERROR_MASK) {
c67x00_giveback_urb(c67x00, urb,
c67x00_td_to_error(c67x00, td));
goto cont;
}
if ((td->status & TD_STATUSMASK_NAK) || !td_sequence_ok(td) ||
!td_acked(td))
goto cont;
/* Sequence ok and acked, don't need to fix toggle */
ack_ok = 1;
if (unlikely(td->status & TD_STATUSMASK_OVF)) {
if (td_residue(td) & TD_RESIDUE_OVERFLOW) {
/* Overflow */
c67x00_giveback_urb(c67x00, urb, -EOVERFLOW);
goto cont;
}
}
clear_endpoint = 0;
c67x00_handle_successful_td(c67x00, td);
cont:
if (clear_endpoint)
c67x00_clear_pipe(c67x00, td);
if (ack_ok)
usb_settoggle(td_udev(td), usb_pipeendpoint(td->pipe),
usb_pipeout(td->pipe),
!(td->ctrl_reg & SEQ_SEL));
/* next in list could have been removed, due to clear_pipe! */
tmp = list_entry(td->td_list.next, typeof(*td), td_list);
c67x00_release_td(td);
}
}
/* -------------------------------------------------------------------------- */
static inline int c67x00_all_tds_processed(struct c67x00_hcd *c67x00)
{
/* If all tds are processed, we can check the previous frame (if
* there was any) and start our next frame.
*/
return !c67x00_ll_husb_get_current_td(c67x00->sie);
}
/**
* Send td to C67X00
*/
static void c67x00_send_td(struct c67x00_hcd *c67x00, struct c67x00_td *td)
{
int len = td_length(td);
if (len && ((td->pid_ep & TD_PIDEPMASK_PID) != TD_PID_IN))
c67x00_ll_write_mem_le16(c67x00->sie->dev, td_ly_base_addr(td),
td->data, len);
c67x00_ll_write_mem_le16(c67x00->sie->dev,
td->td_addr, td, CY_TD_SIZE);
}
static void c67x00_send_frame(struct c67x00_hcd *c67x00)
{
struct c67x00_td *td;
if (list_empty(&c67x00->td_list))
dev_warn(c67x00_hcd_dev(c67x00),
"%s: td list should not be empty here!\n",
__func__);
list_for_each_entry(td, &c67x00->td_list, td_list) {
if (td->td_list.next == &c67x00->td_list)
td->next_td_addr = 0; /* Last td in list */
c67x00_send_td(c67x00, td);
}
c67x00_ll_husb_set_current_td(c67x00->sie, c67x00->td_base_addr);
}
/* -------------------------------------------------------------------------- */
/**
* c67x00_do_work - Schedulers state machine
*/
static void c67x00_do_work(struct c67x00_hcd *c67x00)
{
spin_lock(&c67x00->lock);
/* Make sure all tds are processed */
if (!c67x00_all_tds_processed(c67x00))
goto out;
c67x00_check_td_list(c67x00);
/* no td's are being processed (current == 0)
* and all have been "checked" */
complete(&c67x00->endpoint_disable);
if (!list_empty(&c67x00->td_list))
goto out;
c67x00->current_frame = c67x00_get_current_frame_number(c67x00);
if (c67x00->current_frame == c67x00->last_frame)
goto out; /* Don't send tds in same frame */
c67x00->last_frame = c67x00->current_frame;
/* If no urbs are scheduled, our work is done */
if (!c67x00->urb_count) {
c67x00_ll_hpi_disable_sofeop(c67x00->sie);
goto out;
}
c67x00_fill_frame(c67x00);
if (!list_empty(&c67x00->td_list))
/* TD's have been added to the frame */
c67x00_send_frame(c67x00);
out:
spin_unlock(&c67x00->lock);
}
/* -------------------------------------------------------------------------- */
static void c67x00_sched_tasklet(unsigned long __c67x00)
{
struct c67x00_hcd *c67x00 = (struct c67x00_hcd *)__c67x00;
c67x00_do_work(c67x00);
}
void c67x00_sched_kick(struct c67x00_hcd *c67x00)
{
tasklet_hi_schedule(&c67x00->tasklet);
}
int c67x00_sched_start_scheduler(struct c67x00_hcd *c67x00)
{
tasklet_init(&c67x00->tasklet, c67x00_sched_tasklet,
(unsigned long)c67x00);
return 0;
}
void c67x00_sched_stop_scheduler(struct c67x00_hcd *c67x00)
{
tasklet_kill(&c67x00->tasklet);
}
| gpl-2.0 |
andip71/boeffla-kernel-oos-opx | drivers/isdn/gigaset/asyncdata.c | 9557 | 17022 | /*
* Common data handling layer for ser_gigaset and usb_gigaset
*
* Copyright (c) 2005 by Tilman Schmidt <tilman@imap.cc>,
* Hansjoerg Lipp <hjlipp@web.de>,
* Stefan Eilers.
*
* =====================================================================
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
* =====================================================================
*/
#include "gigaset.h"
#include <linux/crc-ccitt.h>
#include <linux/bitrev.h>
#include <linux/export.h>
/* check if byte must be stuffed/escaped
* I'm not sure which data should be encoded.
* Therefore I will go the hard way and encode every value
* less than 0x20, the flag sequence and the control escape char.
*/
static inline int muststuff(unsigned char c)
{
if (c < PPP_TRANS) return 1;
if (c == PPP_FLAG) return 1;
if (c == PPP_ESCAPE) return 1;
/* other possible candidates: */
/* 0x91: XON with parity set */
/* 0x93: XOFF with parity set */
return 0;
}
/* == data input =========================================================== */
/* process a block of received bytes in command mode
* (mstate != MS_LOCKED && (inputstate & INS_command))
* Append received bytes to the command response buffer and forward them
* line by line to the response handler. Exit whenever a mode/state change
* might have occurred.
* Note: Received lines may be terminated by CR, LF, or CR LF, which will be
* removed before passing the line to the response handler.
* Return value:
* number of processed bytes
*/
static unsigned cmd_loop(unsigned numbytes, struct inbuf_t *inbuf)
{
unsigned char *src = inbuf->data + inbuf->head;
struct cardstate *cs = inbuf->cs;
unsigned cbytes = cs->cbytes;
unsigned procbytes = 0;
unsigned char c;
while (procbytes < numbytes) {
c = *src++;
procbytes++;
switch (c) {
case '\n':
if (cbytes == 0 && cs->respdata[0] == '\r') {
/* collapse LF with preceding CR */
cs->respdata[0] = 0;
break;
}
/* --v-- fall through --v-- */
case '\r':
/* end of message line, pass to response handler */
if (cbytes >= MAX_RESP_SIZE) {
dev_warn(cs->dev, "response too large (%d)\n",
cbytes);
cbytes = MAX_RESP_SIZE;
}
cs->cbytes = cbytes;
gigaset_dbg_buffer(DEBUG_TRANSCMD, "received response",
cbytes, cs->respdata);
gigaset_handle_modem_response(cs);
cbytes = 0;
/* store EOL byte for CRLF collapsing */
cs->respdata[0] = c;
/* cs->dle may have changed */
if (cs->dle && !(inbuf->inputstate & INS_DLE_command))
inbuf->inputstate &= ~INS_command;
/* return for reevaluating state */
goto exit;
case DLE_FLAG:
if (inbuf->inputstate & INS_DLE_char) {
/* quoted DLE: clear quote flag */
inbuf->inputstate &= ~INS_DLE_char;
} else if (cs->dle ||
(inbuf->inputstate & INS_DLE_command)) {
/* DLE escape, pass up for handling */
inbuf->inputstate |= INS_DLE_char;
goto exit;
}
/* quoted or not in DLE mode: treat as regular data */
/* --v-- fall through --v-- */
default:
/* append to line buffer if possible */
if (cbytes < MAX_RESP_SIZE)
cs->respdata[cbytes] = c;
cbytes++;
}
}
exit:
cs->cbytes = cbytes;
return procbytes;
}
/* process a block of received bytes in lock mode
* All received bytes are passed unmodified to the tty i/f.
* Return value:
* number of processed bytes
*/
static unsigned lock_loop(unsigned numbytes, struct inbuf_t *inbuf)
{
unsigned char *src = inbuf->data + inbuf->head;
gigaset_dbg_buffer(DEBUG_LOCKCMD, "received response", numbytes, src);
gigaset_if_receive(inbuf->cs, src, numbytes);
return numbytes;
}
/* process a block of received bytes in HDLC data mode
* (mstate != MS_LOCKED && !(inputstate & INS_command) && proto2 == L2_HDLC)
* Collect HDLC frames, undoing byte stuffing and watching for DLE escapes.
* When a frame is complete, check the FCS and pass valid frames to the LL.
* If DLE is encountered, return immediately to let the caller handle it.
* Return value:
* number of processed bytes
*/
static unsigned hdlc_loop(unsigned numbytes, struct inbuf_t *inbuf)
{
struct cardstate *cs = inbuf->cs;
struct bc_state *bcs = cs->bcs;
int inputstate = bcs->inputstate;
__u16 fcs = bcs->rx_fcs;
struct sk_buff *skb = bcs->rx_skb;
unsigned char *src = inbuf->data + inbuf->head;
unsigned procbytes = 0;
unsigned char c;
if (inputstate & INS_byte_stuff) {
if (!numbytes)
return 0;
inputstate &= ~INS_byte_stuff;
goto byte_stuff;
}
while (procbytes < numbytes) {
c = *src++;
procbytes++;
if (c == DLE_FLAG) {
if (inputstate & INS_DLE_char) {
/* quoted DLE: clear quote flag */
inputstate &= ~INS_DLE_char;
} else if (cs->dle || (inputstate & INS_DLE_command)) {
/* DLE escape, pass up for handling */
inputstate |= INS_DLE_char;
break;
}
}
if (c == PPP_ESCAPE) {
/* byte stuffing indicator: pull in next byte */
if (procbytes >= numbytes) {
/* end of buffer, save for later processing */
inputstate |= INS_byte_stuff;
break;
}
byte_stuff:
c = *src++;
procbytes++;
if (c == DLE_FLAG) {
if (inputstate & INS_DLE_char) {
/* quoted DLE: clear quote flag */
inputstate &= ~INS_DLE_char;
} else if (cs->dle ||
(inputstate & INS_DLE_command)) {
/* DLE escape, pass up for handling */
inputstate |=
INS_DLE_char | INS_byte_stuff;
break;
}
}
c ^= PPP_TRANS;
#ifdef CONFIG_GIGASET_DEBUG
if (!muststuff(c))
gig_dbg(DEBUG_HDLC, "byte stuffed: 0x%02x", c);
#endif
} else if (c == PPP_FLAG) {
/* end of frame: process content if any */
if (inputstate & INS_have_data) {
gig_dbg(DEBUG_HDLC,
"7e----------------------------");
/* check and pass received frame */
if (!skb) {
/* skipped frame */
gigaset_isdn_rcv_err(bcs);
} else if (skb->len < 2) {
/* frame too short for FCS */
dev_warn(cs->dev,
"short frame (%d)\n",
skb->len);
gigaset_isdn_rcv_err(bcs);
dev_kfree_skb_any(skb);
} else if (fcs != PPP_GOODFCS) {
/* frame check error */
dev_err(cs->dev,
"Checksum failed, %u bytes corrupted!\n",
skb->len);
gigaset_isdn_rcv_err(bcs);
dev_kfree_skb_any(skb);
} else {
/* good frame */
__skb_trim(skb, skb->len - 2);
gigaset_skb_rcvd(bcs, skb);
}
/* prepare reception of next frame */
inputstate &= ~INS_have_data;
skb = gigaset_new_rx_skb(bcs);
} else {
/* empty frame (7E 7E) */
#ifdef CONFIG_GIGASET_DEBUG
++bcs->emptycount;
#endif
if (!skb) {
/* skipped (?) */
gigaset_isdn_rcv_err(bcs);
skb = gigaset_new_rx_skb(bcs);
}
}
fcs = PPP_INITFCS;
continue;
#ifdef CONFIG_GIGASET_DEBUG
} else if (muststuff(c)) {
/* Should not happen. Possible after ZDLE=1<CR><LF>. */
gig_dbg(DEBUG_HDLC, "not byte stuffed: 0x%02x", c);
#endif
}
/* regular data byte, append to skb */
#ifdef CONFIG_GIGASET_DEBUG
if (!(inputstate & INS_have_data)) {
gig_dbg(DEBUG_HDLC, "7e (%d x) ================",
bcs->emptycount);
bcs->emptycount = 0;
}
#endif
inputstate |= INS_have_data;
if (skb) {
if (skb->len >= bcs->rx_bufsize) {
dev_warn(cs->dev, "received packet too long\n");
dev_kfree_skb_any(skb);
/* skip remainder of packet */
bcs->rx_skb = skb = NULL;
} else {
*__skb_put(skb, 1) = c;
fcs = crc_ccitt_byte(fcs, c);
}
}
}
bcs->inputstate = inputstate;
bcs->rx_fcs = fcs;
return procbytes;
}
/* process a block of received bytes in transparent data mode
* (mstate != MS_LOCKED && !(inputstate & INS_command) && proto2 != L2_HDLC)
* Invert bytes, undoing byte stuffing and watching for DLE escapes.
* If DLE is encountered, return immediately to let the caller handle it.
* Return value:
* number of processed bytes
*/
static unsigned iraw_loop(unsigned numbytes, struct inbuf_t *inbuf)
{
struct cardstate *cs = inbuf->cs;
struct bc_state *bcs = cs->bcs;
int inputstate = bcs->inputstate;
struct sk_buff *skb = bcs->rx_skb;
unsigned char *src = inbuf->data + inbuf->head;
unsigned procbytes = 0;
unsigned char c;
if (!skb) {
/* skip this block */
gigaset_new_rx_skb(bcs);
return numbytes;
}
while (procbytes < numbytes && skb->len < bcs->rx_bufsize) {
c = *src++;
procbytes++;
if (c == DLE_FLAG) {
if (inputstate & INS_DLE_char) {
/* quoted DLE: clear quote flag */
inputstate &= ~INS_DLE_char;
} else if (cs->dle || (inputstate & INS_DLE_command)) {
/* DLE escape, pass up for handling */
inputstate |= INS_DLE_char;
break;
}
}
/* regular data byte: append to current skb */
inputstate |= INS_have_data;
*__skb_put(skb, 1) = bitrev8(c);
}
/* pass data up */
if (inputstate & INS_have_data) {
gigaset_skb_rcvd(bcs, skb);
inputstate &= ~INS_have_data;
gigaset_new_rx_skb(bcs);
}
bcs->inputstate = inputstate;
return procbytes;
}
/* process DLE escapes
* Called whenever a DLE sequence might be encountered in the input stream.
* Either processes the entire DLE sequence or, if that isn't possible,
* notes the fact that an initial DLE has been received in the INS_DLE_char
* inputstate flag and resumes processing of the sequence on the next call.
*/
static void handle_dle(struct inbuf_t *inbuf)
{
struct cardstate *cs = inbuf->cs;
if (cs->mstate == MS_LOCKED)
return; /* no DLE processing in lock mode */
if (!(inbuf->inputstate & INS_DLE_char)) {
/* no DLE pending */
if (inbuf->data[inbuf->head] == DLE_FLAG &&
(cs->dle || inbuf->inputstate & INS_DLE_command)) {
/* start of DLE sequence */
inbuf->head++;
if (inbuf->head == inbuf->tail ||
inbuf->head == RBUFSIZE) {
/* end of buffer, save for later processing */
inbuf->inputstate |= INS_DLE_char;
return;
}
} else {
/* regular data byte */
return;
}
}
/* consume pending DLE */
inbuf->inputstate &= ~INS_DLE_char;
switch (inbuf->data[inbuf->head]) {
case 'X': /* begin of event message */
if (inbuf->inputstate & INS_command)
dev_notice(cs->dev,
"received <DLE>X in command mode\n");
inbuf->inputstate |= INS_command | INS_DLE_command;
inbuf->head++; /* byte consumed */
break;
case '.': /* end of event message */
if (!(inbuf->inputstate & INS_DLE_command))
dev_notice(cs->dev,
"received <DLE>. without <DLE>X\n");
inbuf->inputstate &= ~INS_DLE_command;
/* return to data mode if in DLE mode */
if (cs->dle)
inbuf->inputstate &= ~INS_command;
inbuf->head++; /* byte consumed */
break;
case DLE_FLAG: /* DLE in data stream */
/* mark as quoted */
inbuf->inputstate |= INS_DLE_char;
if (!(cs->dle || inbuf->inputstate & INS_DLE_command))
dev_notice(cs->dev,
"received <DLE><DLE> not in DLE mode\n");
break; /* quoted byte left in buffer */
default:
dev_notice(cs->dev, "received <DLE><%02x>\n",
inbuf->data[inbuf->head]);
/* quoted byte left in buffer */
}
}
/**
* gigaset_m10x_input() - process a block of data received from the device
* @inbuf: received data and device descriptor structure.
*
* Called by hardware module {ser,usb}_gigaset with a block of received
* bytes. Separates the bytes received over the serial data channel into
* user data and command replies (locked/unlocked) according to the
* current state of the interface.
*/
void gigaset_m10x_input(struct inbuf_t *inbuf)
{
struct cardstate *cs = inbuf->cs;
unsigned numbytes, procbytes;
gig_dbg(DEBUG_INTR, "buffer state: %u -> %u", inbuf->head, inbuf->tail);
while (inbuf->head != inbuf->tail) {
/* check for DLE escape */
handle_dle(inbuf);
/* process a contiguous block of bytes */
numbytes = (inbuf->head > inbuf->tail ?
RBUFSIZE : inbuf->tail) - inbuf->head;
gig_dbg(DEBUG_INTR, "processing %u bytes", numbytes);
/*
* numbytes may be 0 if handle_dle() ate the last byte.
* This does no harm, *_loop() will just return 0 immediately.
*/
if (cs->mstate == MS_LOCKED)
procbytes = lock_loop(numbytes, inbuf);
else if (inbuf->inputstate & INS_command)
procbytes = cmd_loop(numbytes, inbuf);
else if (cs->bcs->proto2 == L2_HDLC)
procbytes = hdlc_loop(numbytes, inbuf);
else
procbytes = iraw_loop(numbytes, inbuf);
inbuf->head += procbytes;
/* check for buffer wraparound */
if (inbuf->head >= RBUFSIZE)
inbuf->head = 0;
gig_dbg(DEBUG_INTR, "head set to %u", inbuf->head);
}
}
EXPORT_SYMBOL_GPL(gigaset_m10x_input);
/* == data output ========================================================== */
/*
* Encode a data packet into an octet stuffed HDLC frame with FCS,
* opening and closing flags, preserving headroom data.
* parameters:
* skb skb containing original packet (freed upon return)
* Return value:
* pointer to newly allocated skb containing the result frame
* and the original link layer header, NULL on error
*/
static struct sk_buff *HDLC_Encode(struct sk_buff *skb)
{
struct sk_buff *hdlc_skb;
__u16 fcs;
unsigned char c;
unsigned char *cp;
int len;
unsigned int stuf_cnt;
stuf_cnt = 0;
fcs = PPP_INITFCS;
cp = skb->data;
len = skb->len;
while (len--) {
if (muststuff(*cp))
stuf_cnt++;
fcs = crc_ccitt_byte(fcs, *cp++);
}
fcs ^= 0xffff; /* complement */
/* size of new buffer: original size + number of stuffing bytes
* + 2 bytes FCS + 2 stuffing bytes for FCS (if needed) + 2 flag bytes
* + room for link layer header
*/
hdlc_skb = dev_alloc_skb(skb->len + stuf_cnt + 6 + skb->mac_len);
if (!hdlc_skb) {
dev_kfree_skb_any(skb);
return NULL;
}
/* Copy link layer header into new skb */
skb_reset_mac_header(hdlc_skb);
skb_reserve(hdlc_skb, skb->mac_len);
memcpy(skb_mac_header(hdlc_skb), skb_mac_header(skb), skb->mac_len);
hdlc_skb->mac_len = skb->mac_len;
/* Add flag sequence in front of everything.. */
*(skb_put(hdlc_skb, 1)) = PPP_FLAG;
/* Perform byte stuffing while copying data. */
while (skb->len--) {
if (muststuff(*skb->data)) {
*(skb_put(hdlc_skb, 1)) = PPP_ESCAPE;
*(skb_put(hdlc_skb, 1)) = (*skb->data++) ^ PPP_TRANS;
} else
*(skb_put(hdlc_skb, 1)) = *skb->data++;
}
/* Finally add FCS (byte stuffed) and flag sequence */
c = (fcs & 0x00ff); /* least significant byte first */
if (muststuff(c)) {
*(skb_put(hdlc_skb, 1)) = PPP_ESCAPE;
c ^= PPP_TRANS;
}
*(skb_put(hdlc_skb, 1)) = c;
c = ((fcs >> 8) & 0x00ff);
if (muststuff(c)) {
*(skb_put(hdlc_skb, 1)) = PPP_ESCAPE;
c ^= PPP_TRANS;
}
*(skb_put(hdlc_skb, 1)) = c;
*(skb_put(hdlc_skb, 1)) = PPP_FLAG;
dev_kfree_skb_any(skb);
return hdlc_skb;
}
/*
* Encode a data packet into an octet stuffed raw bit inverted frame,
* preserving headroom data.
* parameters:
* skb skb containing original packet (freed upon return)
* Return value:
* pointer to newly allocated skb containing the result frame
* and the original link layer header, NULL on error
*/
static struct sk_buff *iraw_encode(struct sk_buff *skb)
{
struct sk_buff *iraw_skb;
unsigned char c;
unsigned char *cp;
int len;
/* size of new buffer (worst case = every byte must be stuffed):
* 2 * original size + room for link layer header
*/
iraw_skb = dev_alloc_skb(2 * skb->len + skb->mac_len);
if (!iraw_skb) {
dev_kfree_skb_any(skb);
return NULL;
}
/* copy link layer header into new skb */
skb_reset_mac_header(iraw_skb);
skb_reserve(iraw_skb, skb->mac_len);
memcpy(skb_mac_header(iraw_skb), skb_mac_header(skb), skb->mac_len);
iraw_skb->mac_len = skb->mac_len;
/* copy and stuff data */
cp = skb->data;
len = skb->len;
while (len--) {
c = bitrev8(*cp++);
if (c == DLE_FLAG)
*(skb_put(iraw_skb, 1)) = c;
*(skb_put(iraw_skb, 1)) = c;
}
dev_kfree_skb_any(skb);
return iraw_skb;
}
/**
* gigaset_m10x_send_skb() - queue an skb for sending
* @bcs: B channel descriptor structure.
* @skb: data to send.
*
* Called by LL to encode and queue an skb for sending, and start
* transmission if necessary.
* Once the payload data has been transmitted completely, gigaset_skb_sent()
* will be called with the skb's link layer header preserved.
*
* Return value:
* number of bytes accepted for sending (skb->len) if ok,
* error code < 0 (eg. -ENOMEM) on error
*/
int gigaset_m10x_send_skb(struct bc_state *bcs, struct sk_buff *skb)
{
struct cardstate *cs = bcs->cs;
unsigned len = skb->len;
unsigned long flags;
if (bcs->proto2 == L2_HDLC)
skb = HDLC_Encode(skb);
else
skb = iraw_encode(skb);
if (!skb) {
dev_err(cs->dev,
"unable to allocate memory for encoding!\n");
return -ENOMEM;
}
skb_queue_tail(&bcs->squeue, skb);
spin_lock_irqsave(&cs->lock, flags);
if (cs->connected)
tasklet_schedule(&cs->write_tasklet);
spin_unlock_irqrestore(&cs->lock, flags);
return len; /* ok so far */
}
EXPORT_SYMBOL_GPL(gigaset_m10x_send_skb);
| gpl-2.0 |
cuckata23/android_kernel_motorola_msm8226 | arch/arm/plat-iop/setup.c | 9557 | 1124 | /*
* arch/arm/plat-iop/setup.c
*
* Author: Nicolas Pitre <nico@fluxnic.net>
* Copyright (C) 2001 MontaVista Software, Inc.
* Copyright (C) 2004 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/mm.h>
#include <linux/init.h>
#include <asm/mach/map.h>
#include <asm/hardware/iop3xx.h>
/*
* Standard IO mapping for all IOP3xx based systems. Note that
* the IOP3xx OCCDR must be mapped uncached and unbuffered.
*/
static struct map_desc iop3xx_std_desc[] __initdata = {
{ /* mem mapped registers */
.virtual = IOP3XX_PERIPHERAL_VIRT_BASE,
.pfn = __phys_to_pfn(IOP3XX_PERIPHERAL_PHYS_BASE),
.length = IOP3XX_PERIPHERAL_SIZE,
.type = MT_UNCACHED,
}, { /* PCI IO space */
.virtual = IOP3XX_PCI_LOWER_IO_VA,
.pfn = __phys_to_pfn(IOP3XX_PCI_LOWER_IO_PA),
.length = IOP3XX_PCI_IO_WINDOW_SIZE,
.type = MT_DEVICE,
},
};
void __init iop3xx_map_io(void)
{
iotable_init(iop3xx_std_desc, ARRAY_SIZE(iop3xx_std_desc));
}
| gpl-2.0 |
wimpknocker/android_kernel_samsung_viennalte | drivers/net/wireless/zd1211rw/zd_rf_uw2453.c | 10581 | 15773 | /* ZD1211 USB-WLAN driver for Linux
*
* Copyright (C) 2005-2007 Ulrich Kunitz <kune@deine-taler.de>
* Copyright (C) 2006-2007 Daniel Drake <dsd@gentoo.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include "zd_rf.h"
#include "zd_usb.h"
#include "zd_chip.h"
/* This RF programming code is based upon the code found in v2.16.0.0 of the
* ZyDAS vendor driver. Unlike other RF's, Ubec publish full technical specs
* for this RF on their website, so we're able to understand more than
* usual as to what is going on. Thumbs up for Ubec for doing that. */
/* The 3-wire serial interface provides access to 8 write-only registers.
* The data format is a 4 bit register address followed by a 20 bit value. */
#define UW2453_REGWRITE(reg, val) ((((reg) & 0xf) << 20) | ((val) & 0xfffff))
/* For channel tuning, we have to configure registers 1 (synthesizer), 2 (synth
* fractional divide ratio) and 3 (VCO config).
*
* We configure the RF to produce an interrupt when the PLL is locked onto
* the configured frequency. During initialization, we run through a variety
* of different VCO configurations on channel 1 until we detect a PLL lock.
* When this happens, we remember which VCO configuration produced the lock
* and use it later. Actually, we use the configuration *after* the one that
* produced the lock, which seems odd, but it works.
*
* If we do not see a PLL lock on any standard VCO config, we fall back on an
* autocal configuration, which has a fixed (as opposed to per-channel) VCO
* config and different synth values from the standard set (divide ratio
* is still shared with the standard set). */
/* The per-channel synth values for all standard VCO configurations. These get
* written to register 1. */
static const u8 uw2453_std_synth[] = {
RF_CHANNEL( 1) = 0x47,
RF_CHANNEL( 2) = 0x47,
RF_CHANNEL( 3) = 0x67,
RF_CHANNEL( 4) = 0x67,
RF_CHANNEL( 5) = 0x67,
RF_CHANNEL( 6) = 0x67,
RF_CHANNEL( 7) = 0x57,
RF_CHANNEL( 8) = 0x57,
RF_CHANNEL( 9) = 0x57,
RF_CHANNEL(10) = 0x57,
RF_CHANNEL(11) = 0x77,
RF_CHANNEL(12) = 0x77,
RF_CHANNEL(13) = 0x77,
RF_CHANNEL(14) = 0x4f,
};
/* This table stores the synthesizer fractional divide ratio for *all* VCO
* configurations (both standard and autocal). These get written to register 2.
*/
static const u16 uw2453_synth_divide[] = {
RF_CHANNEL( 1) = 0x999,
RF_CHANNEL( 2) = 0x99b,
RF_CHANNEL( 3) = 0x998,
RF_CHANNEL( 4) = 0x99a,
RF_CHANNEL( 5) = 0x999,
RF_CHANNEL( 6) = 0x99b,
RF_CHANNEL( 7) = 0x998,
RF_CHANNEL( 8) = 0x99a,
RF_CHANNEL( 9) = 0x999,
RF_CHANNEL(10) = 0x99b,
RF_CHANNEL(11) = 0x998,
RF_CHANNEL(12) = 0x99a,
RF_CHANNEL(13) = 0x999,
RF_CHANNEL(14) = 0xccc,
};
/* Here is the data for all the standard VCO configurations. We shrink our
* table a little by observing that both channels in a consecutive pair share
* the same value. We also observe that the high 4 bits ([0:3] in the specs)
* are all 'Reserved' and are always set to 0x4 - we chop them off in the data
* below. */
#define CHAN_TO_PAIRIDX(a) ((a - 1) / 2)
#define RF_CHANPAIR(a,b) [CHAN_TO_PAIRIDX(a)]
static const u16 uw2453_std_vco_cfg[][7] = {
{ /* table 1 */
RF_CHANPAIR( 1, 2) = 0x664d,
RF_CHANPAIR( 3, 4) = 0x604d,
RF_CHANPAIR( 5, 6) = 0x6675,
RF_CHANPAIR( 7, 8) = 0x6475,
RF_CHANPAIR( 9, 10) = 0x6655,
RF_CHANPAIR(11, 12) = 0x6455,
RF_CHANPAIR(13, 14) = 0x6665,
},
{ /* table 2 */
RF_CHANPAIR( 1, 2) = 0x666d,
RF_CHANPAIR( 3, 4) = 0x606d,
RF_CHANPAIR( 5, 6) = 0x664d,
RF_CHANPAIR( 7, 8) = 0x644d,
RF_CHANPAIR( 9, 10) = 0x6675,
RF_CHANPAIR(11, 12) = 0x6475,
RF_CHANPAIR(13, 14) = 0x6655,
},
{ /* table 3 */
RF_CHANPAIR( 1, 2) = 0x665d,
RF_CHANPAIR( 3, 4) = 0x605d,
RF_CHANPAIR( 5, 6) = 0x666d,
RF_CHANPAIR( 7, 8) = 0x646d,
RF_CHANPAIR( 9, 10) = 0x664d,
RF_CHANPAIR(11, 12) = 0x644d,
RF_CHANPAIR(13, 14) = 0x6675,
},
{ /* table 4 */
RF_CHANPAIR( 1, 2) = 0x667d,
RF_CHANPAIR( 3, 4) = 0x607d,
RF_CHANPAIR( 5, 6) = 0x665d,
RF_CHANPAIR( 7, 8) = 0x645d,
RF_CHANPAIR( 9, 10) = 0x666d,
RF_CHANPAIR(11, 12) = 0x646d,
RF_CHANPAIR(13, 14) = 0x664d,
},
{ /* table 5 */
RF_CHANPAIR( 1, 2) = 0x6643,
RF_CHANPAIR( 3, 4) = 0x6043,
RF_CHANPAIR( 5, 6) = 0x667d,
RF_CHANPAIR( 7, 8) = 0x647d,
RF_CHANPAIR( 9, 10) = 0x665d,
RF_CHANPAIR(11, 12) = 0x645d,
RF_CHANPAIR(13, 14) = 0x666d,
},
{ /* table 6 */
RF_CHANPAIR( 1, 2) = 0x6663,
RF_CHANPAIR( 3, 4) = 0x6063,
RF_CHANPAIR( 5, 6) = 0x6643,
RF_CHANPAIR( 7, 8) = 0x6443,
RF_CHANPAIR( 9, 10) = 0x667d,
RF_CHANPAIR(11, 12) = 0x647d,
RF_CHANPAIR(13, 14) = 0x665d,
},
{ /* table 7 */
RF_CHANPAIR( 1, 2) = 0x6653,
RF_CHANPAIR( 3, 4) = 0x6053,
RF_CHANPAIR( 5, 6) = 0x6663,
RF_CHANPAIR( 7, 8) = 0x6463,
RF_CHANPAIR( 9, 10) = 0x6643,
RF_CHANPAIR(11, 12) = 0x6443,
RF_CHANPAIR(13, 14) = 0x667d,
},
{ /* table 8 */
RF_CHANPAIR( 1, 2) = 0x6673,
RF_CHANPAIR( 3, 4) = 0x6073,
RF_CHANPAIR( 5, 6) = 0x6653,
RF_CHANPAIR( 7, 8) = 0x6453,
RF_CHANPAIR( 9, 10) = 0x6663,
RF_CHANPAIR(11, 12) = 0x6463,
RF_CHANPAIR(13, 14) = 0x6643,
},
{ /* table 9 */
RF_CHANPAIR( 1, 2) = 0x664b,
RF_CHANPAIR( 3, 4) = 0x604b,
RF_CHANPAIR( 5, 6) = 0x6673,
RF_CHANPAIR( 7, 8) = 0x6473,
RF_CHANPAIR( 9, 10) = 0x6653,
RF_CHANPAIR(11, 12) = 0x6453,
RF_CHANPAIR(13, 14) = 0x6663,
},
{ /* table 10 */
RF_CHANPAIR( 1, 2) = 0x666b,
RF_CHANPAIR( 3, 4) = 0x606b,
RF_CHANPAIR( 5, 6) = 0x664b,
RF_CHANPAIR( 7, 8) = 0x644b,
RF_CHANPAIR( 9, 10) = 0x6673,
RF_CHANPAIR(11, 12) = 0x6473,
RF_CHANPAIR(13, 14) = 0x6653,
},
{ /* table 11 */
RF_CHANPAIR( 1, 2) = 0x665b,
RF_CHANPAIR( 3, 4) = 0x605b,
RF_CHANPAIR( 5, 6) = 0x666b,
RF_CHANPAIR( 7, 8) = 0x646b,
RF_CHANPAIR( 9, 10) = 0x664b,
RF_CHANPAIR(11, 12) = 0x644b,
RF_CHANPAIR(13, 14) = 0x6673,
},
};
/* The per-channel synth values for autocal. These get written to register 1. */
static const u16 uw2453_autocal_synth[] = {
RF_CHANNEL( 1) = 0x6847,
RF_CHANNEL( 2) = 0x6847,
RF_CHANNEL( 3) = 0x6867,
RF_CHANNEL( 4) = 0x6867,
RF_CHANNEL( 5) = 0x6867,
RF_CHANNEL( 6) = 0x6867,
RF_CHANNEL( 7) = 0x6857,
RF_CHANNEL( 8) = 0x6857,
RF_CHANNEL( 9) = 0x6857,
RF_CHANNEL(10) = 0x6857,
RF_CHANNEL(11) = 0x6877,
RF_CHANNEL(12) = 0x6877,
RF_CHANNEL(13) = 0x6877,
RF_CHANNEL(14) = 0x684f,
};
/* The VCO configuration for autocal (all channels) */
static const u16 UW2453_AUTOCAL_VCO_CFG = 0x6662;
/* TX gain settings. The array index corresponds to the TX power integration
* values found in the EEPROM. The values get written to register 7. */
static u32 uw2453_txgain[] = {
[0x00] = 0x0e313,
[0x01] = 0x0fb13,
[0x02] = 0x0e093,
[0x03] = 0x0f893,
[0x04] = 0x0ea93,
[0x05] = 0x1f093,
[0x06] = 0x1f493,
[0x07] = 0x1f693,
[0x08] = 0x1f393,
[0x09] = 0x1f35b,
[0x0a] = 0x1e6db,
[0x0b] = 0x1ff3f,
[0x0c] = 0x1ffff,
[0x0d] = 0x361d7,
[0x0e] = 0x37fbf,
[0x0f] = 0x3ff8b,
[0x10] = 0x3ff33,
[0x11] = 0x3fb3f,
[0x12] = 0x3ffff,
};
/* RF-specific structure */
struct uw2453_priv {
/* index into synth/VCO config tables where PLL lock was found
* -1 means autocal */
int config;
};
#define UW2453_PRIV(rf) ((struct uw2453_priv *) (rf)->priv)
static int uw2453_synth_set_channel(struct zd_chip *chip, int channel,
bool autocal)
{
int r;
int idx = channel - 1;
u32 val;
if (autocal)
val = UW2453_REGWRITE(1, uw2453_autocal_synth[idx]);
else
val = UW2453_REGWRITE(1, uw2453_std_synth[idx]);
r = zd_rfwrite_locked(chip, val, RF_RV_BITS);
if (r)
return r;
return zd_rfwrite_locked(chip,
UW2453_REGWRITE(2, uw2453_synth_divide[idx]), RF_RV_BITS);
}
static int uw2453_write_vco_cfg(struct zd_chip *chip, u16 value)
{
/* vendor driver always sets these upper bits even though the specs say
* they are reserved */
u32 val = 0x40000 | value;
return zd_rfwrite_locked(chip, UW2453_REGWRITE(3, val), RF_RV_BITS);
}
static int uw2453_init_mode(struct zd_chip *chip)
{
static const u32 rv[] = {
UW2453_REGWRITE(0, 0x25f98), /* enter IDLE mode */
UW2453_REGWRITE(0, 0x25f9a), /* enter CAL_VCO mode */
UW2453_REGWRITE(0, 0x25f94), /* enter RX/TX mode */
UW2453_REGWRITE(0, 0x27fd4), /* power down RSSI circuit */
};
return zd_rfwritev_locked(chip, rv, ARRAY_SIZE(rv), RF_RV_BITS);
}
static int uw2453_set_tx_gain_level(struct zd_chip *chip, int channel)
{
u8 int_value = chip->pwr_int_values[channel - 1];
if (int_value >= ARRAY_SIZE(uw2453_txgain)) {
dev_dbg_f(zd_chip_dev(chip), "can't configure TX gain for "
"int value %x on channel %d\n", int_value, channel);
return 0;
}
return zd_rfwrite_locked(chip,
UW2453_REGWRITE(7, uw2453_txgain[int_value]), RF_RV_BITS);
}
static int uw2453_init_hw(struct zd_rf *rf)
{
int i, r;
int found_config = -1;
u16 intr_status;
struct zd_chip *chip = zd_rf_to_chip(rf);
static const struct zd_ioreq16 ioreqs[] = {
{ ZD_CR10, 0x89 }, { ZD_CR15, 0x20 },
{ ZD_CR17, 0x28 }, /* 6112 no change */
{ ZD_CR23, 0x38 }, { ZD_CR24, 0x20 }, { ZD_CR26, 0x93 },
{ ZD_CR27, 0x15 }, { ZD_CR28, 0x3e }, { ZD_CR29, 0x00 },
{ ZD_CR33, 0x28 }, { ZD_CR34, 0x30 },
{ ZD_CR35, 0x43 }, /* 6112 3e->43 */
{ ZD_CR41, 0x24 }, { ZD_CR44, 0x32 },
{ ZD_CR46, 0x92 }, /* 6112 96->92 */
{ ZD_CR47, 0x1e },
{ ZD_CR48, 0x04 }, /* 5602 Roger */
{ ZD_CR49, 0xfa }, { ZD_CR79, 0x58 }, { ZD_CR80, 0x30 },
{ ZD_CR81, 0x30 }, { ZD_CR87, 0x0a }, { ZD_CR89, 0x04 },
{ ZD_CR91, 0x00 }, { ZD_CR92, 0x0a }, { ZD_CR98, 0x8d },
{ ZD_CR99, 0x28 }, { ZD_CR100, 0x02 },
{ ZD_CR101, 0x09 }, /* 6112 13->1f 6220 1f->13 6407 13->9 */
{ ZD_CR102, 0x27 },
{ ZD_CR106, 0x1c }, /* 5d07 5112 1f->1c 6220 1c->1f
* 6221 1f->1c
*/
{ ZD_CR107, 0x1c }, /* 6220 1c->1a 5221 1a->1c */
{ ZD_CR109, 0x13 },
{ ZD_CR110, 0x1f }, /* 6112 13->1f 6221 1f->13 6407 13->0x09 */
{ ZD_CR111, 0x13 }, { ZD_CR112, 0x1f }, { ZD_CR113, 0x27 },
{ ZD_CR114, 0x23 }, /* 6221 27->23 */
{ ZD_CR115, 0x24 }, /* 6112 24->1c 6220 1c->24 */
{ ZD_CR116, 0x24 }, /* 6220 1c->24 */
{ ZD_CR117, 0xfa }, /* 6112 fa->f8 6220 f8->f4 6220 f4->fa */
{ ZD_CR118, 0xf0 }, /* 5d07 6112 f0->f2 6220 f2->f0 */
{ ZD_CR119, 0x1a }, /* 6112 1a->10 6220 10->14 6220 14->1a */
{ ZD_CR120, 0x4f },
{ ZD_CR121, 0x1f }, /* 6220 4f->1f */
{ ZD_CR122, 0xf0 }, { ZD_CR123, 0x57 }, { ZD_CR125, 0xad },
{ ZD_CR126, 0x6c }, { ZD_CR127, 0x03 },
{ ZD_CR128, 0x14 }, /* 6302 12->11 */
{ ZD_CR129, 0x12 }, /* 6301 10->0f */
{ ZD_CR130, 0x10 }, { ZD_CR137, 0x50 }, { ZD_CR138, 0xa8 },
{ ZD_CR144, 0xac }, { ZD_CR146, 0x20 }, { ZD_CR252, 0xff },
{ ZD_CR253, 0xff },
};
static const u32 rv[] = {
UW2453_REGWRITE(4, 0x2b), /* configure receiver gain */
UW2453_REGWRITE(5, 0x19e4f), /* configure transmitter gain */
UW2453_REGWRITE(6, 0xf81ad), /* enable RX/TX filter tuning */
UW2453_REGWRITE(7, 0x3fffe), /* disable TX gain in test mode */
/* enter CAL_FIL mode, TX gain set by registers, RX gain set by pins,
* RSSI circuit powered down, reduced RSSI range */
UW2453_REGWRITE(0, 0x25f9c), /* 5d01 cal_fil */
/* synthesizer configuration for channel 1 */
UW2453_REGWRITE(1, 0x47),
UW2453_REGWRITE(2, 0x999),
/* disable manual VCO band selection */
UW2453_REGWRITE(3, 0x7602),
/* enable manual VCO band selection, configure current level */
UW2453_REGWRITE(3, 0x46063),
};
r = zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs));
if (r)
return r;
r = zd_rfwritev_locked(chip, rv, ARRAY_SIZE(rv), RF_RV_BITS);
if (r)
return r;
r = uw2453_init_mode(chip);
if (r)
return r;
/* Try all standard VCO configuration settings on channel 1 */
for (i = 0; i < ARRAY_SIZE(uw2453_std_vco_cfg) - 1; i++) {
/* Configure synthesizer for channel 1 */
r = uw2453_synth_set_channel(chip, 1, false);
if (r)
return r;
/* Write VCO config */
r = uw2453_write_vco_cfg(chip, uw2453_std_vco_cfg[i][0]);
if (r)
return r;
/* ack interrupt event */
r = zd_iowrite16_locked(chip, 0x0f, UW2453_INTR_REG);
if (r)
return r;
/* check interrupt status */
r = zd_ioread16_locked(chip, &intr_status, UW2453_INTR_REG);
if (r)
return r;
if (!(intr_status & 0xf)) {
dev_dbg_f(zd_chip_dev(chip),
"PLL locked on configuration %d\n", i);
found_config = i;
break;
}
}
if (found_config == -1) {
/* autocal */
dev_dbg_f(zd_chip_dev(chip),
"PLL did not lock, using autocal\n");
r = uw2453_synth_set_channel(chip, 1, true);
if (r)
return r;
r = uw2453_write_vco_cfg(chip, UW2453_AUTOCAL_VCO_CFG);
if (r)
return r;
}
/* To match the vendor driver behaviour, we use the configuration after
* the one that produced a lock. */
UW2453_PRIV(rf)->config = found_config + 1;
return zd_iowrite16_locked(chip, 0x06, ZD_CR203);
}
static int uw2453_set_channel(struct zd_rf *rf, u8 channel)
{
int r;
u16 vco_cfg;
int config = UW2453_PRIV(rf)->config;
bool autocal = (config == -1);
struct zd_chip *chip = zd_rf_to_chip(rf);
static const struct zd_ioreq16 ioreqs[] = {
{ ZD_CR80, 0x30 }, { ZD_CR81, 0x30 }, { ZD_CR79, 0x58 },
{ ZD_CR12, 0xf0 }, { ZD_CR77, 0x1b }, { ZD_CR78, 0x58 },
};
r = uw2453_synth_set_channel(chip, channel, autocal);
if (r)
return r;
if (autocal)
vco_cfg = UW2453_AUTOCAL_VCO_CFG;
else
vco_cfg = uw2453_std_vco_cfg[config][CHAN_TO_PAIRIDX(channel)];
r = uw2453_write_vco_cfg(chip, vco_cfg);
if (r)
return r;
r = uw2453_init_mode(chip);
if (r)
return r;
r = zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs));
if (r)
return r;
r = uw2453_set_tx_gain_level(chip, channel);
if (r)
return r;
return zd_iowrite16_locked(chip, 0x06, ZD_CR203);
}
static int uw2453_switch_radio_on(struct zd_rf *rf)
{
int r;
struct zd_chip *chip = zd_rf_to_chip(rf);
struct zd_ioreq16 ioreqs[] = {
{ ZD_CR11, 0x00 }, { ZD_CR251, 0x3f },
};
/* enter RXTX mode */
r = zd_rfwrite_locked(chip, UW2453_REGWRITE(0, 0x25f94), RF_RV_BITS);
if (r)
return r;
if (zd_chip_is_zd1211b(chip))
ioreqs[1].value = 0x7f;
return zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs));
}
static int uw2453_switch_radio_off(struct zd_rf *rf)
{
int r;
struct zd_chip *chip = zd_rf_to_chip(rf);
static const struct zd_ioreq16 ioreqs[] = {
{ ZD_CR11, 0x04 }, { ZD_CR251, 0x2f },
};
/* enter IDLE mode */
/* FIXME: shouldn't we go to SLEEP? sent email to zydas */
r = zd_rfwrite_locked(chip, UW2453_REGWRITE(0, 0x25f90), RF_RV_BITS);
if (r)
return r;
return zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs));
}
static void uw2453_clear(struct zd_rf *rf)
{
kfree(rf->priv);
}
int zd_rf_init_uw2453(struct zd_rf *rf)
{
rf->init_hw = uw2453_init_hw;
rf->set_channel = uw2453_set_channel;
rf->switch_radio_on = uw2453_switch_radio_on;
rf->switch_radio_off = uw2453_switch_radio_off;
rf->patch_6m_band_edge = zd_rf_generic_patch_6m;
rf->clear = uw2453_clear;
/* we have our own TX integration code */
rf->update_channel_int = 0;
rf->priv = kmalloc(sizeof(struct uw2453_priv), GFP_KERNEL);
if (rf->priv == NULL)
return -ENOMEM;
return 0;
}
| gpl-2.0 |
sktjdgns1189/android_kernel_pantech_ef56s | drivers/net/wireless/zd1211rw/zd_rf_uw2453.c | 10581 | 15773 | /* ZD1211 USB-WLAN driver for Linux
*
* Copyright (C) 2005-2007 Ulrich Kunitz <kune@deine-taler.de>
* Copyright (C) 2006-2007 Daniel Drake <dsd@gentoo.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include "zd_rf.h"
#include "zd_usb.h"
#include "zd_chip.h"
/* This RF programming code is based upon the code found in v2.16.0.0 of the
* ZyDAS vendor driver. Unlike other RF's, Ubec publish full technical specs
* for this RF on their website, so we're able to understand more than
* usual as to what is going on. Thumbs up for Ubec for doing that. */
/* The 3-wire serial interface provides access to 8 write-only registers.
* The data format is a 4 bit register address followed by a 20 bit value. */
#define UW2453_REGWRITE(reg, val) ((((reg) & 0xf) << 20) | ((val) & 0xfffff))
/* For channel tuning, we have to configure registers 1 (synthesizer), 2 (synth
* fractional divide ratio) and 3 (VCO config).
*
* We configure the RF to produce an interrupt when the PLL is locked onto
* the configured frequency. During initialization, we run through a variety
* of different VCO configurations on channel 1 until we detect a PLL lock.
* When this happens, we remember which VCO configuration produced the lock
* and use it later. Actually, we use the configuration *after* the one that
* produced the lock, which seems odd, but it works.
*
* If we do not see a PLL lock on any standard VCO config, we fall back on an
* autocal configuration, which has a fixed (as opposed to per-channel) VCO
* config and different synth values from the standard set (divide ratio
* is still shared with the standard set). */
/* The per-channel synth values for all standard VCO configurations. These get
* written to register 1. */
static const u8 uw2453_std_synth[] = {
RF_CHANNEL( 1) = 0x47,
RF_CHANNEL( 2) = 0x47,
RF_CHANNEL( 3) = 0x67,
RF_CHANNEL( 4) = 0x67,
RF_CHANNEL( 5) = 0x67,
RF_CHANNEL( 6) = 0x67,
RF_CHANNEL( 7) = 0x57,
RF_CHANNEL( 8) = 0x57,
RF_CHANNEL( 9) = 0x57,
RF_CHANNEL(10) = 0x57,
RF_CHANNEL(11) = 0x77,
RF_CHANNEL(12) = 0x77,
RF_CHANNEL(13) = 0x77,
RF_CHANNEL(14) = 0x4f,
};
/* This table stores the synthesizer fractional divide ratio for *all* VCO
* configurations (both standard and autocal). These get written to register 2.
*/
static const u16 uw2453_synth_divide[] = {
RF_CHANNEL( 1) = 0x999,
RF_CHANNEL( 2) = 0x99b,
RF_CHANNEL( 3) = 0x998,
RF_CHANNEL( 4) = 0x99a,
RF_CHANNEL( 5) = 0x999,
RF_CHANNEL( 6) = 0x99b,
RF_CHANNEL( 7) = 0x998,
RF_CHANNEL( 8) = 0x99a,
RF_CHANNEL( 9) = 0x999,
RF_CHANNEL(10) = 0x99b,
RF_CHANNEL(11) = 0x998,
RF_CHANNEL(12) = 0x99a,
RF_CHANNEL(13) = 0x999,
RF_CHANNEL(14) = 0xccc,
};
/* Here is the data for all the standard VCO configurations. We shrink our
* table a little by observing that both channels in a consecutive pair share
* the same value. We also observe that the high 4 bits ([0:3] in the specs)
* are all 'Reserved' and are always set to 0x4 - we chop them off in the data
* below. */
#define CHAN_TO_PAIRIDX(a) ((a - 1) / 2)
#define RF_CHANPAIR(a,b) [CHAN_TO_PAIRIDX(a)]
static const u16 uw2453_std_vco_cfg[][7] = {
{ /* table 1 */
RF_CHANPAIR( 1, 2) = 0x664d,
RF_CHANPAIR( 3, 4) = 0x604d,
RF_CHANPAIR( 5, 6) = 0x6675,
RF_CHANPAIR( 7, 8) = 0x6475,
RF_CHANPAIR( 9, 10) = 0x6655,
RF_CHANPAIR(11, 12) = 0x6455,
RF_CHANPAIR(13, 14) = 0x6665,
},
{ /* table 2 */
RF_CHANPAIR( 1, 2) = 0x666d,
RF_CHANPAIR( 3, 4) = 0x606d,
RF_CHANPAIR( 5, 6) = 0x664d,
RF_CHANPAIR( 7, 8) = 0x644d,
RF_CHANPAIR( 9, 10) = 0x6675,
RF_CHANPAIR(11, 12) = 0x6475,
RF_CHANPAIR(13, 14) = 0x6655,
},
{ /* table 3 */
RF_CHANPAIR( 1, 2) = 0x665d,
RF_CHANPAIR( 3, 4) = 0x605d,
RF_CHANPAIR( 5, 6) = 0x666d,
RF_CHANPAIR( 7, 8) = 0x646d,
RF_CHANPAIR( 9, 10) = 0x664d,
RF_CHANPAIR(11, 12) = 0x644d,
RF_CHANPAIR(13, 14) = 0x6675,
},
{ /* table 4 */
RF_CHANPAIR( 1, 2) = 0x667d,
RF_CHANPAIR( 3, 4) = 0x607d,
RF_CHANPAIR( 5, 6) = 0x665d,
RF_CHANPAIR( 7, 8) = 0x645d,
RF_CHANPAIR( 9, 10) = 0x666d,
RF_CHANPAIR(11, 12) = 0x646d,
RF_CHANPAIR(13, 14) = 0x664d,
},
{ /* table 5 */
RF_CHANPAIR( 1, 2) = 0x6643,
RF_CHANPAIR( 3, 4) = 0x6043,
RF_CHANPAIR( 5, 6) = 0x667d,
RF_CHANPAIR( 7, 8) = 0x647d,
RF_CHANPAIR( 9, 10) = 0x665d,
RF_CHANPAIR(11, 12) = 0x645d,
RF_CHANPAIR(13, 14) = 0x666d,
},
{ /* table 6 */
RF_CHANPAIR( 1, 2) = 0x6663,
RF_CHANPAIR( 3, 4) = 0x6063,
RF_CHANPAIR( 5, 6) = 0x6643,
RF_CHANPAIR( 7, 8) = 0x6443,
RF_CHANPAIR( 9, 10) = 0x667d,
RF_CHANPAIR(11, 12) = 0x647d,
RF_CHANPAIR(13, 14) = 0x665d,
},
{ /* table 7 */
RF_CHANPAIR( 1, 2) = 0x6653,
RF_CHANPAIR( 3, 4) = 0x6053,
RF_CHANPAIR( 5, 6) = 0x6663,
RF_CHANPAIR( 7, 8) = 0x6463,
RF_CHANPAIR( 9, 10) = 0x6643,
RF_CHANPAIR(11, 12) = 0x6443,
RF_CHANPAIR(13, 14) = 0x667d,
},
{ /* table 8 */
RF_CHANPAIR( 1, 2) = 0x6673,
RF_CHANPAIR( 3, 4) = 0x6073,
RF_CHANPAIR( 5, 6) = 0x6653,
RF_CHANPAIR( 7, 8) = 0x6453,
RF_CHANPAIR( 9, 10) = 0x6663,
RF_CHANPAIR(11, 12) = 0x6463,
RF_CHANPAIR(13, 14) = 0x6643,
},
{ /* table 9 */
RF_CHANPAIR( 1, 2) = 0x664b,
RF_CHANPAIR( 3, 4) = 0x604b,
RF_CHANPAIR( 5, 6) = 0x6673,
RF_CHANPAIR( 7, 8) = 0x6473,
RF_CHANPAIR( 9, 10) = 0x6653,
RF_CHANPAIR(11, 12) = 0x6453,
RF_CHANPAIR(13, 14) = 0x6663,
},
{ /* table 10 */
RF_CHANPAIR( 1, 2) = 0x666b,
RF_CHANPAIR( 3, 4) = 0x606b,
RF_CHANPAIR( 5, 6) = 0x664b,
RF_CHANPAIR( 7, 8) = 0x644b,
RF_CHANPAIR( 9, 10) = 0x6673,
RF_CHANPAIR(11, 12) = 0x6473,
RF_CHANPAIR(13, 14) = 0x6653,
},
{ /* table 11 */
RF_CHANPAIR( 1, 2) = 0x665b,
RF_CHANPAIR( 3, 4) = 0x605b,
RF_CHANPAIR( 5, 6) = 0x666b,
RF_CHANPAIR( 7, 8) = 0x646b,
RF_CHANPAIR( 9, 10) = 0x664b,
RF_CHANPAIR(11, 12) = 0x644b,
RF_CHANPAIR(13, 14) = 0x6673,
},
};
/* The per-channel synth values for autocal. These get written to register 1. */
static const u16 uw2453_autocal_synth[] = {
RF_CHANNEL( 1) = 0x6847,
RF_CHANNEL( 2) = 0x6847,
RF_CHANNEL( 3) = 0x6867,
RF_CHANNEL( 4) = 0x6867,
RF_CHANNEL( 5) = 0x6867,
RF_CHANNEL( 6) = 0x6867,
RF_CHANNEL( 7) = 0x6857,
RF_CHANNEL( 8) = 0x6857,
RF_CHANNEL( 9) = 0x6857,
RF_CHANNEL(10) = 0x6857,
RF_CHANNEL(11) = 0x6877,
RF_CHANNEL(12) = 0x6877,
RF_CHANNEL(13) = 0x6877,
RF_CHANNEL(14) = 0x684f,
};
/* The VCO configuration for autocal (all channels) */
static const u16 UW2453_AUTOCAL_VCO_CFG = 0x6662;
/* TX gain settings. The array index corresponds to the TX power integration
* values found in the EEPROM. The values get written to register 7. */
static u32 uw2453_txgain[] = {
[0x00] = 0x0e313,
[0x01] = 0x0fb13,
[0x02] = 0x0e093,
[0x03] = 0x0f893,
[0x04] = 0x0ea93,
[0x05] = 0x1f093,
[0x06] = 0x1f493,
[0x07] = 0x1f693,
[0x08] = 0x1f393,
[0x09] = 0x1f35b,
[0x0a] = 0x1e6db,
[0x0b] = 0x1ff3f,
[0x0c] = 0x1ffff,
[0x0d] = 0x361d7,
[0x0e] = 0x37fbf,
[0x0f] = 0x3ff8b,
[0x10] = 0x3ff33,
[0x11] = 0x3fb3f,
[0x12] = 0x3ffff,
};
/* RF-specific structure */
struct uw2453_priv {
/* index into synth/VCO config tables where PLL lock was found
* -1 means autocal */
int config;
};
#define UW2453_PRIV(rf) ((struct uw2453_priv *) (rf)->priv)
static int uw2453_synth_set_channel(struct zd_chip *chip, int channel,
bool autocal)
{
int r;
int idx = channel - 1;
u32 val;
if (autocal)
val = UW2453_REGWRITE(1, uw2453_autocal_synth[idx]);
else
val = UW2453_REGWRITE(1, uw2453_std_synth[idx]);
r = zd_rfwrite_locked(chip, val, RF_RV_BITS);
if (r)
return r;
return zd_rfwrite_locked(chip,
UW2453_REGWRITE(2, uw2453_synth_divide[idx]), RF_RV_BITS);
}
static int uw2453_write_vco_cfg(struct zd_chip *chip, u16 value)
{
/* vendor driver always sets these upper bits even though the specs say
* they are reserved */
u32 val = 0x40000 | value;
return zd_rfwrite_locked(chip, UW2453_REGWRITE(3, val), RF_RV_BITS);
}
static int uw2453_init_mode(struct zd_chip *chip)
{
static const u32 rv[] = {
UW2453_REGWRITE(0, 0x25f98), /* enter IDLE mode */
UW2453_REGWRITE(0, 0x25f9a), /* enter CAL_VCO mode */
UW2453_REGWRITE(0, 0x25f94), /* enter RX/TX mode */
UW2453_REGWRITE(0, 0x27fd4), /* power down RSSI circuit */
};
return zd_rfwritev_locked(chip, rv, ARRAY_SIZE(rv), RF_RV_BITS);
}
static int uw2453_set_tx_gain_level(struct zd_chip *chip, int channel)
{
u8 int_value = chip->pwr_int_values[channel - 1];
if (int_value >= ARRAY_SIZE(uw2453_txgain)) {
dev_dbg_f(zd_chip_dev(chip), "can't configure TX gain for "
"int value %x on channel %d\n", int_value, channel);
return 0;
}
return zd_rfwrite_locked(chip,
UW2453_REGWRITE(7, uw2453_txgain[int_value]), RF_RV_BITS);
}
static int uw2453_init_hw(struct zd_rf *rf)
{
int i, r;
int found_config = -1;
u16 intr_status;
struct zd_chip *chip = zd_rf_to_chip(rf);
static const struct zd_ioreq16 ioreqs[] = {
{ ZD_CR10, 0x89 }, { ZD_CR15, 0x20 },
{ ZD_CR17, 0x28 }, /* 6112 no change */
{ ZD_CR23, 0x38 }, { ZD_CR24, 0x20 }, { ZD_CR26, 0x93 },
{ ZD_CR27, 0x15 }, { ZD_CR28, 0x3e }, { ZD_CR29, 0x00 },
{ ZD_CR33, 0x28 }, { ZD_CR34, 0x30 },
{ ZD_CR35, 0x43 }, /* 6112 3e->43 */
{ ZD_CR41, 0x24 }, { ZD_CR44, 0x32 },
{ ZD_CR46, 0x92 }, /* 6112 96->92 */
{ ZD_CR47, 0x1e },
{ ZD_CR48, 0x04 }, /* 5602 Roger */
{ ZD_CR49, 0xfa }, { ZD_CR79, 0x58 }, { ZD_CR80, 0x30 },
{ ZD_CR81, 0x30 }, { ZD_CR87, 0x0a }, { ZD_CR89, 0x04 },
{ ZD_CR91, 0x00 }, { ZD_CR92, 0x0a }, { ZD_CR98, 0x8d },
{ ZD_CR99, 0x28 }, { ZD_CR100, 0x02 },
{ ZD_CR101, 0x09 }, /* 6112 13->1f 6220 1f->13 6407 13->9 */
{ ZD_CR102, 0x27 },
{ ZD_CR106, 0x1c }, /* 5d07 5112 1f->1c 6220 1c->1f
* 6221 1f->1c
*/
{ ZD_CR107, 0x1c }, /* 6220 1c->1a 5221 1a->1c */
{ ZD_CR109, 0x13 },
{ ZD_CR110, 0x1f }, /* 6112 13->1f 6221 1f->13 6407 13->0x09 */
{ ZD_CR111, 0x13 }, { ZD_CR112, 0x1f }, { ZD_CR113, 0x27 },
{ ZD_CR114, 0x23 }, /* 6221 27->23 */
{ ZD_CR115, 0x24 }, /* 6112 24->1c 6220 1c->24 */
{ ZD_CR116, 0x24 }, /* 6220 1c->24 */
{ ZD_CR117, 0xfa }, /* 6112 fa->f8 6220 f8->f4 6220 f4->fa */
{ ZD_CR118, 0xf0 }, /* 5d07 6112 f0->f2 6220 f2->f0 */
{ ZD_CR119, 0x1a }, /* 6112 1a->10 6220 10->14 6220 14->1a */
{ ZD_CR120, 0x4f },
{ ZD_CR121, 0x1f }, /* 6220 4f->1f */
{ ZD_CR122, 0xf0 }, { ZD_CR123, 0x57 }, { ZD_CR125, 0xad },
{ ZD_CR126, 0x6c }, { ZD_CR127, 0x03 },
{ ZD_CR128, 0x14 }, /* 6302 12->11 */
{ ZD_CR129, 0x12 }, /* 6301 10->0f */
{ ZD_CR130, 0x10 }, { ZD_CR137, 0x50 }, { ZD_CR138, 0xa8 },
{ ZD_CR144, 0xac }, { ZD_CR146, 0x20 }, { ZD_CR252, 0xff },
{ ZD_CR253, 0xff },
};
static const u32 rv[] = {
UW2453_REGWRITE(4, 0x2b), /* configure receiver gain */
UW2453_REGWRITE(5, 0x19e4f), /* configure transmitter gain */
UW2453_REGWRITE(6, 0xf81ad), /* enable RX/TX filter tuning */
UW2453_REGWRITE(7, 0x3fffe), /* disable TX gain in test mode */
/* enter CAL_FIL mode, TX gain set by registers, RX gain set by pins,
* RSSI circuit powered down, reduced RSSI range */
UW2453_REGWRITE(0, 0x25f9c), /* 5d01 cal_fil */
/* synthesizer configuration for channel 1 */
UW2453_REGWRITE(1, 0x47),
UW2453_REGWRITE(2, 0x999),
/* disable manual VCO band selection */
UW2453_REGWRITE(3, 0x7602),
/* enable manual VCO band selection, configure current level */
UW2453_REGWRITE(3, 0x46063),
};
r = zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs));
if (r)
return r;
r = zd_rfwritev_locked(chip, rv, ARRAY_SIZE(rv), RF_RV_BITS);
if (r)
return r;
r = uw2453_init_mode(chip);
if (r)
return r;
/* Try all standard VCO configuration settings on channel 1 */
for (i = 0; i < ARRAY_SIZE(uw2453_std_vco_cfg) - 1; i++) {
/* Configure synthesizer for channel 1 */
r = uw2453_synth_set_channel(chip, 1, false);
if (r)
return r;
/* Write VCO config */
r = uw2453_write_vco_cfg(chip, uw2453_std_vco_cfg[i][0]);
if (r)
return r;
/* ack interrupt event */
r = zd_iowrite16_locked(chip, 0x0f, UW2453_INTR_REG);
if (r)
return r;
/* check interrupt status */
r = zd_ioread16_locked(chip, &intr_status, UW2453_INTR_REG);
if (r)
return r;
if (!(intr_status & 0xf)) {
dev_dbg_f(zd_chip_dev(chip),
"PLL locked on configuration %d\n", i);
found_config = i;
break;
}
}
if (found_config == -1) {
/* autocal */
dev_dbg_f(zd_chip_dev(chip),
"PLL did not lock, using autocal\n");
r = uw2453_synth_set_channel(chip, 1, true);
if (r)
return r;
r = uw2453_write_vco_cfg(chip, UW2453_AUTOCAL_VCO_CFG);
if (r)
return r;
}
/* To match the vendor driver behaviour, we use the configuration after
* the one that produced a lock. */
UW2453_PRIV(rf)->config = found_config + 1;
return zd_iowrite16_locked(chip, 0x06, ZD_CR203);
}
static int uw2453_set_channel(struct zd_rf *rf, u8 channel)
{
int r;
u16 vco_cfg;
int config = UW2453_PRIV(rf)->config;
bool autocal = (config == -1);
struct zd_chip *chip = zd_rf_to_chip(rf);
static const struct zd_ioreq16 ioreqs[] = {
{ ZD_CR80, 0x30 }, { ZD_CR81, 0x30 }, { ZD_CR79, 0x58 },
{ ZD_CR12, 0xf0 }, { ZD_CR77, 0x1b }, { ZD_CR78, 0x58 },
};
r = uw2453_synth_set_channel(chip, channel, autocal);
if (r)
return r;
if (autocal)
vco_cfg = UW2453_AUTOCAL_VCO_CFG;
else
vco_cfg = uw2453_std_vco_cfg[config][CHAN_TO_PAIRIDX(channel)];
r = uw2453_write_vco_cfg(chip, vco_cfg);
if (r)
return r;
r = uw2453_init_mode(chip);
if (r)
return r;
r = zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs));
if (r)
return r;
r = uw2453_set_tx_gain_level(chip, channel);
if (r)
return r;
return zd_iowrite16_locked(chip, 0x06, ZD_CR203);
}
static int uw2453_switch_radio_on(struct zd_rf *rf)
{
int r;
struct zd_chip *chip = zd_rf_to_chip(rf);
struct zd_ioreq16 ioreqs[] = {
{ ZD_CR11, 0x00 }, { ZD_CR251, 0x3f },
};
/* enter RXTX mode */
r = zd_rfwrite_locked(chip, UW2453_REGWRITE(0, 0x25f94), RF_RV_BITS);
if (r)
return r;
if (zd_chip_is_zd1211b(chip))
ioreqs[1].value = 0x7f;
return zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs));
}
static int uw2453_switch_radio_off(struct zd_rf *rf)
{
int r;
struct zd_chip *chip = zd_rf_to_chip(rf);
static const struct zd_ioreq16 ioreqs[] = {
{ ZD_CR11, 0x04 }, { ZD_CR251, 0x2f },
};
/* enter IDLE mode */
/* FIXME: shouldn't we go to SLEEP? sent email to zydas */
r = zd_rfwrite_locked(chip, UW2453_REGWRITE(0, 0x25f90), RF_RV_BITS);
if (r)
return r;
return zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs));
}
static void uw2453_clear(struct zd_rf *rf)
{
kfree(rf->priv);
}
int zd_rf_init_uw2453(struct zd_rf *rf)
{
rf->init_hw = uw2453_init_hw;
rf->set_channel = uw2453_set_channel;
rf->switch_radio_on = uw2453_switch_radio_on;
rf->switch_radio_off = uw2453_switch_radio_off;
rf->patch_6m_band_edge = zd_rf_generic_patch_6m;
rf->clear = uw2453_clear;
/* we have our own TX integration code */
rf->update_channel_int = 0;
rf->priv = kmalloc(sizeof(struct uw2453_priv), GFP_KERNEL);
if (rf->priv == NULL)
return -ENOMEM;
return 0;
}
| gpl-2.0 |
Split-Screen/android_kernel_lge_v500 | arch/x86/math-emu/reg_convert.c | 14421 | 1629 | /*---------------------------------------------------------------------------+
| reg_convert.c |
| |
| Convert register representation. |
| |
| Copyright (C) 1992,1993,1994,1996,1997 |
| W. Metzenthen, 22 Parker St, Ormond, Vic 3163, Australia |
| E-mail billm@suburbia.net |
| |
| |
+---------------------------------------------------------------------------*/
#include "exception.h"
#include "fpu_emu.h"
int FPU_to_exp16(FPU_REG const *a, FPU_REG *x)
{
int sign = getsign(a);
*(long long *)&(x->sigl) = *(const long long *)&(a->sigl);
/* Set up the exponent as a 16 bit quantity. */
setexponent16(x, exponent(a));
if (exponent16(x) == EXP_UNDER) {
/* The number is a de-normal or pseudodenormal. */
/* We only deal with the significand and exponent. */
if (x->sigh & 0x80000000) {
/* Is a pseudodenormal. */
/* This is non-80486 behaviour because the number
loses its 'denormal' identity. */
addexponent(x, 1);
} else {
/* Is a denormal. */
addexponent(x, 1);
FPU_normalize_nuo(x);
}
}
if (!(x->sigh & 0x80000000)) {
EXCEPTION(EX_INTERNAL | 0x180);
}
return sign;
}
| gpl-2.0 |
itgb/opCloudRouter | qca/src/u-boot/common/cmd_version.c | 86 | 1382 | /*
* Copyright 2000-2009
* Wolfgang Denk, DENX Software Engineering, wd@denx.de.
*
* See file CREDITS for list of people who contributed to this
* project.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston,
* MA 02111-1307 USA
*/
#include <common.h>
#include <command.h>
#include <version.h>
#include <linux/compiler.h>
const char __weak version_string[] = U_BOOT_VERSION_STRING;
int do_version(cmd_tbl_t *cmdtp, int flag, int argc, char * const argv[])
{
printf("\n%s\n", version_string);
#ifdef CC_VERSION_STRING
puts(CC_VERSION_STRING "\n");
#endif
#ifdef LD_VERSION_STRING
puts(LD_VERSION_STRING "\n");
#endif
return 0;
}
U_BOOT_CMD(
version, 1, 1, do_version,
"print monitor, compiler and linker version",
""
);
| gpl-2.0 |
RockchipOpensourceCommunity/kernel-rockchip-next | sound/soc/codecs/wm8731.c | 86 | 19660 | /*
* wm8731.c -- WM8731 ALSA SoC Audio driver
*
* Copyright 2005 Openedhand Ltd.
* Copyright 2006-12 Wolfson Microelectronics, plc
*
* Author: Richard Purdie <richard@openedhand.com>
*
* Based on wm8753.c by Liam Girdwood
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/pm.h>
#include <linux/i2c.h>
#include <linux/slab.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/spi/spi.h>
#include <linux/of_device.h>
#include <linux/mutex.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
#include <sound/initval.h>
#include <sound/tlv.h>
#include "wm8731.h"
#define WM8731_NUM_SUPPLIES 4
static const char *wm8731_supply_names[WM8731_NUM_SUPPLIES] = {
"AVDD",
"HPVDD",
"DCVDD",
"DBVDD",
};
/* codec private data */
struct wm8731_priv {
struct regmap *regmap;
struct regulator_bulk_data supplies[WM8731_NUM_SUPPLIES];
const struct snd_pcm_hw_constraint_list *constraints;
unsigned int sysclk;
int sysclk_type;
int playback_fs;
bool deemph;
struct mutex lock;
};
/*
* wm8731 register cache
*/
static const struct reg_default wm8731_reg_defaults[] = {
{ 0, 0x0097 },
{ 1, 0x0097 },
{ 2, 0x0079 },
{ 3, 0x0079 },
{ 4, 0x000a },
{ 5, 0x0008 },
{ 6, 0x009f },
{ 7, 0x000a },
{ 8, 0x0000 },
{ 9, 0x0000 },
};
static bool wm8731_volatile(struct device *dev, unsigned int reg)
{
return reg == WM8731_RESET;
}
static bool wm8731_writeable(struct device *dev, unsigned int reg)
{
return reg <= WM8731_RESET;
}
#define wm8731_reset(c) snd_soc_write(c, WM8731_RESET, 0)
static const char *wm8731_input_select[] = {"Line In", "Mic"};
static SOC_ENUM_SINGLE_DECL(wm8731_insel_enum,
WM8731_APANA, 2, wm8731_input_select);
static int wm8731_deemph[] = { 0, 32000, 44100, 48000 };
static int wm8731_set_deemph(struct snd_soc_codec *codec)
{
struct wm8731_priv *wm8731 = snd_soc_codec_get_drvdata(codec);
int val, i, best;
/* If we're using deemphasis select the nearest available sample
* rate.
*/
if (wm8731->deemph) {
best = 1;
for (i = 2; i < ARRAY_SIZE(wm8731_deemph); i++) {
if (abs(wm8731_deemph[i] - wm8731->playback_fs) <
abs(wm8731_deemph[best] - wm8731->playback_fs))
best = i;
}
val = best << 1;
} else {
best = 0;
val = 0;
}
dev_dbg(codec->dev, "Set deemphasis %d (%dHz)\n",
best, wm8731_deemph[best]);
return snd_soc_update_bits(codec, WM8731_APDIGI, 0x6, val);
}
static int wm8731_get_deemph(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
struct wm8731_priv *wm8731 = snd_soc_codec_get_drvdata(codec);
ucontrol->value.integer.value[0] = wm8731->deemph;
return 0;
}
static int wm8731_put_deemph(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
struct wm8731_priv *wm8731 = snd_soc_codec_get_drvdata(codec);
int deemph = ucontrol->value.integer.value[0];
int ret = 0;
if (deemph > 1)
return -EINVAL;
mutex_lock(&wm8731->lock);
if (wm8731->deemph != deemph) {
wm8731->deemph = deemph;
wm8731_set_deemph(codec);
ret = 1;
}
mutex_unlock(&wm8731->lock);
return ret;
}
static const DECLARE_TLV_DB_SCALE(in_tlv, -3450, 150, 0);
static const DECLARE_TLV_DB_SCALE(sidetone_tlv, -1500, 300, 0);
static const DECLARE_TLV_DB_SCALE(out_tlv, -12100, 100, 1);
static const DECLARE_TLV_DB_SCALE(mic_tlv, 0, 2000, 0);
static const struct snd_kcontrol_new wm8731_snd_controls[] = {
SOC_DOUBLE_R_TLV("Master Playback Volume", WM8731_LOUT1V, WM8731_ROUT1V,
0, 127, 0, out_tlv),
SOC_DOUBLE_R("Master Playback ZC Switch", WM8731_LOUT1V, WM8731_ROUT1V,
7, 1, 0),
SOC_DOUBLE_R_TLV("Capture Volume", WM8731_LINVOL, WM8731_RINVOL, 0, 31, 0,
in_tlv),
SOC_DOUBLE_R("Line Capture Switch", WM8731_LINVOL, WM8731_RINVOL, 7, 1, 1),
SOC_SINGLE_TLV("Mic Boost Volume", WM8731_APANA, 0, 1, 0, mic_tlv),
SOC_SINGLE("Mic Capture Switch", WM8731_APANA, 1, 1, 1),
SOC_SINGLE_TLV("Sidetone Playback Volume", WM8731_APANA, 6, 3, 1,
sidetone_tlv),
SOC_SINGLE("ADC High Pass Filter Switch", WM8731_APDIGI, 0, 1, 1),
SOC_SINGLE("Store DC Offset Switch", WM8731_APDIGI, 4, 1, 0),
SOC_SINGLE_BOOL_EXT("Playback Deemphasis Switch", 0,
wm8731_get_deemph, wm8731_put_deemph),
};
/* Output Mixer */
static const struct snd_kcontrol_new wm8731_output_mixer_controls[] = {
SOC_DAPM_SINGLE("Line Bypass Switch", WM8731_APANA, 3, 1, 0),
SOC_DAPM_SINGLE("Mic Sidetone Switch", WM8731_APANA, 5, 1, 0),
SOC_DAPM_SINGLE("HiFi Playback Switch", WM8731_APANA, 4, 1, 0),
};
/* Input mux */
static const struct snd_kcontrol_new wm8731_input_mux_controls =
SOC_DAPM_ENUM("Input Select", wm8731_insel_enum);
static const struct snd_soc_dapm_widget wm8731_dapm_widgets[] = {
SND_SOC_DAPM_SUPPLY("ACTIVE",WM8731_ACTIVE, 0, 0, NULL, 0),
SND_SOC_DAPM_SUPPLY("OSC", WM8731_PWR, 5, 1, NULL, 0),
SND_SOC_DAPM_MIXER("Output Mixer", WM8731_PWR, 4, 1,
&wm8731_output_mixer_controls[0],
ARRAY_SIZE(wm8731_output_mixer_controls)),
SND_SOC_DAPM_DAC("DAC", "HiFi Playback", WM8731_PWR, 3, 1),
SND_SOC_DAPM_OUTPUT("LOUT"),
SND_SOC_DAPM_OUTPUT("LHPOUT"),
SND_SOC_DAPM_OUTPUT("ROUT"),
SND_SOC_DAPM_OUTPUT("RHPOUT"),
SND_SOC_DAPM_ADC("ADC", "HiFi Capture", WM8731_PWR, 2, 1),
SND_SOC_DAPM_MUX("Input Mux", SND_SOC_NOPM, 0, 0, &wm8731_input_mux_controls),
SND_SOC_DAPM_PGA("Line Input", WM8731_PWR, 0, 1, NULL, 0),
SND_SOC_DAPM_MICBIAS("Mic Bias", WM8731_PWR, 1, 1),
SND_SOC_DAPM_INPUT("MICIN"),
SND_SOC_DAPM_INPUT("RLINEIN"),
SND_SOC_DAPM_INPUT("LLINEIN"),
};
static int wm8731_check_osc(struct snd_soc_dapm_widget *source,
struct snd_soc_dapm_widget *sink)
{
struct snd_soc_codec *codec = snd_soc_dapm_to_codec(source->dapm);
struct wm8731_priv *wm8731 = snd_soc_codec_get_drvdata(codec);
return wm8731->sysclk_type == WM8731_SYSCLK_XTAL;
}
static const struct snd_soc_dapm_route wm8731_intercon[] = {
{"DAC", NULL, "OSC", wm8731_check_osc},
{"ADC", NULL, "OSC", wm8731_check_osc},
{"DAC", NULL, "ACTIVE"},
{"ADC", NULL, "ACTIVE"},
/* output mixer */
{"Output Mixer", "Line Bypass Switch", "Line Input"},
{"Output Mixer", "HiFi Playback Switch", "DAC"},
{"Output Mixer", "Mic Sidetone Switch", "Mic Bias"},
/* outputs */
{"RHPOUT", NULL, "Output Mixer"},
{"ROUT", NULL, "Output Mixer"},
{"LHPOUT", NULL, "Output Mixer"},
{"LOUT", NULL, "Output Mixer"},
/* input mux */
{"Input Mux", "Line In", "Line Input"},
{"Input Mux", "Mic", "Mic Bias"},
{"ADC", NULL, "Input Mux"},
/* inputs */
{"Line Input", NULL, "LLINEIN"},
{"Line Input", NULL, "RLINEIN"},
{"Mic Bias", NULL, "MICIN"},
};
struct _coeff_div {
u32 mclk;
u32 rate;
u16 fs;
u8 sr:4;
u8 bosr:1;
u8 usb:1;
};
/* codec mclk clock divider coefficients */
static const struct _coeff_div coeff_div[] = {
/* 48k */
{12288000, 48000, 256, 0x0, 0x0, 0x0},
{18432000, 48000, 384, 0x0, 0x1, 0x0},
{12000000, 48000, 250, 0x0, 0x0, 0x1},
/* 32k */
{12288000, 32000, 384, 0x6, 0x0, 0x0},
{18432000, 32000, 576, 0x6, 0x1, 0x0},
{12000000, 32000, 375, 0x6, 0x0, 0x1},
/* 8k */
{12288000, 8000, 1536, 0x3, 0x0, 0x0},
{18432000, 8000, 2304, 0x3, 0x1, 0x0},
{11289600, 8000, 1408, 0xb, 0x0, 0x0},
{16934400, 8000, 2112, 0xb, 0x1, 0x0},
{12000000, 8000, 1500, 0x3, 0x0, 0x1},
/* 96k */
{12288000, 96000, 128, 0x7, 0x0, 0x0},
{18432000, 96000, 192, 0x7, 0x1, 0x0},
{12000000, 96000, 125, 0x7, 0x0, 0x1},
/* 44.1k */
{11289600, 44100, 256, 0x8, 0x0, 0x0},
{16934400, 44100, 384, 0x8, 0x1, 0x0},
{12000000, 44100, 272, 0x8, 0x1, 0x1},
/* 88.2k */
{11289600, 88200, 128, 0xf, 0x0, 0x0},
{16934400, 88200, 192, 0xf, 0x1, 0x0},
{12000000, 88200, 136, 0xf, 0x1, 0x1},
};
/* rates constraints */
static const unsigned int wm8731_rates_12000000[] = {
8000, 32000, 44100, 48000, 96000, 88200,
};
static const unsigned int wm8731_rates_12288000_18432000[] = {
8000, 32000, 48000, 96000,
};
static const unsigned int wm8731_rates_11289600_16934400[] = {
8000, 44100, 88200,
};
static const struct snd_pcm_hw_constraint_list wm8731_constraints_12000000 = {
.list = wm8731_rates_12000000,
.count = ARRAY_SIZE(wm8731_rates_12000000),
};
static const
struct snd_pcm_hw_constraint_list wm8731_constraints_12288000_18432000 = {
.list = wm8731_rates_12288000_18432000,
.count = ARRAY_SIZE(wm8731_rates_12288000_18432000),
};
static const
struct snd_pcm_hw_constraint_list wm8731_constraints_11289600_16934400 = {
.list = wm8731_rates_11289600_16934400,
.count = ARRAY_SIZE(wm8731_rates_11289600_16934400),
};
static inline int get_coeff(int mclk, int rate)
{
int i;
for (i = 0; i < ARRAY_SIZE(coeff_div); i++) {
if (coeff_div[i].rate == rate && coeff_div[i].mclk == mclk)
return i;
}
return 0;
}
static int wm8731_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
{
struct snd_soc_codec *codec = dai->codec;
struct wm8731_priv *wm8731 = snd_soc_codec_get_drvdata(codec);
u16 iface = snd_soc_read(codec, WM8731_IFACE) & 0xfff3;
int i = get_coeff(wm8731->sysclk, params_rate(params));
u16 srate = (coeff_div[i].sr << 2) |
(coeff_div[i].bosr << 1) | coeff_div[i].usb;
wm8731->playback_fs = params_rate(params);
snd_soc_write(codec, WM8731_SRATE, srate);
/* bit size */
switch (params_width(params)) {
case 16:
break;
case 20:
iface |= 0x0004;
break;
case 24:
iface |= 0x0008;
break;
}
wm8731_set_deemph(codec);
snd_soc_write(codec, WM8731_IFACE, iface);
return 0;
}
static int wm8731_mute(struct snd_soc_dai *dai, int mute)
{
struct snd_soc_codec *codec = dai->codec;
u16 mute_reg = snd_soc_read(codec, WM8731_APDIGI) & 0xfff7;
if (mute)
snd_soc_write(codec, WM8731_APDIGI, mute_reg | 0x8);
else
snd_soc_write(codec, WM8731_APDIGI, mute_reg);
return 0;
}
static int wm8731_set_dai_sysclk(struct snd_soc_dai *codec_dai,
int clk_id, unsigned int freq, int dir)
{
struct snd_soc_codec *codec = codec_dai->codec;
struct wm8731_priv *wm8731 = snd_soc_codec_get_drvdata(codec);
switch (clk_id) {
case WM8731_SYSCLK_XTAL:
case WM8731_SYSCLK_MCLK:
wm8731->sysclk_type = clk_id;
break;
default:
return -EINVAL;
}
switch (freq) {
case 0:
wm8731->constraints = NULL;
break;
case 12000000:
wm8731->constraints = &wm8731_constraints_12000000;
break;
case 12288000:
case 18432000:
wm8731->constraints = &wm8731_constraints_12288000_18432000;
break;
case 16934400:
case 11289600:
wm8731->constraints = &wm8731_constraints_11289600_16934400;
break;
default:
return -EINVAL;
}
wm8731->sysclk = freq;
snd_soc_dapm_sync(&codec->dapm);
return 0;
}
static int wm8731_set_dai_fmt(struct snd_soc_dai *codec_dai,
unsigned int fmt)
{
struct snd_soc_codec *codec = codec_dai->codec;
u16 iface = 0;
/* set master/slave audio interface */
switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
case SND_SOC_DAIFMT_CBM_CFM:
iface |= 0x0040;
break;
case SND_SOC_DAIFMT_CBS_CFS:
break;
default:
return -EINVAL;
}
/* interface format */
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_I2S:
iface |= 0x0002;
break;
case SND_SOC_DAIFMT_RIGHT_J:
break;
case SND_SOC_DAIFMT_LEFT_J:
iface |= 0x0001;
break;
case SND_SOC_DAIFMT_DSP_A:
iface |= 0x0013;
break;
case SND_SOC_DAIFMT_DSP_B:
iface |= 0x0003;
break;
default:
return -EINVAL;
}
/* clock inversion */
switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
case SND_SOC_DAIFMT_NB_NF:
break;
case SND_SOC_DAIFMT_IB_IF:
iface |= 0x0090;
break;
case SND_SOC_DAIFMT_IB_NF:
iface |= 0x0080;
break;
case SND_SOC_DAIFMT_NB_IF:
iface |= 0x0010;
break;
default:
return -EINVAL;
}
/* set iface */
snd_soc_write(codec, WM8731_IFACE, iface);
return 0;
}
static int wm8731_set_bias_level(struct snd_soc_codec *codec,
enum snd_soc_bias_level level)
{
struct wm8731_priv *wm8731 = snd_soc_codec_get_drvdata(codec);
int ret;
u16 reg;
switch (level) {
case SND_SOC_BIAS_ON:
break;
case SND_SOC_BIAS_PREPARE:
break;
case SND_SOC_BIAS_STANDBY:
if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
ret = regulator_bulk_enable(ARRAY_SIZE(wm8731->supplies),
wm8731->supplies);
if (ret != 0)
return ret;
regcache_sync(wm8731->regmap);
}
/* Clear PWROFF, gate CLKOUT, everything else as-is */
reg = snd_soc_read(codec, WM8731_PWR) & 0xff7f;
snd_soc_write(codec, WM8731_PWR, reg | 0x0040);
break;
case SND_SOC_BIAS_OFF:
snd_soc_write(codec, WM8731_PWR, 0xffff);
regulator_bulk_disable(ARRAY_SIZE(wm8731->supplies),
wm8731->supplies);
regcache_mark_dirty(wm8731->regmap);
break;
}
codec->dapm.bias_level = level;
return 0;
}
static int wm8731_startup(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct wm8731_priv *wm8731 = snd_soc_codec_get_drvdata(dai->codec);
if (wm8731->constraints)
snd_pcm_hw_constraint_list(substream->runtime, 0,
SNDRV_PCM_HW_PARAM_RATE,
wm8731->constraints);
return 0;
}
#define WM8731_RATES SNDRV_PCM_RATE_8000_96000
#define WM8731_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\
SNDRV_PCM_FMTBIT_S24_LE)
static const struct snd_soc_dai_ops wm8731_dai_ops = {
.startup = wm8731_startup,
.hw_params = wm8731_hw_params,
.digital_mute = wm8731_mute,
.set_sysclk = wm8731_set_dai_sysclk,
.set_fmt = wm8731_set_dai_fmt,
};
static struct snd_soc_dai_driver wm8731_dai = {
.name = "wm8731-hifi",
.playback = {
.stream_name = "Playback",
.channels_min = 1,
.channels_max = 2,
.rates = WM8731_RATES,
.formats = WM8731_FORMATS,},
.capture = {
.stream_name = "Capture",
.channels_min = 1,
.channels_max = 2,
.rates = WM8731_RATES,
.formats = WM8731_FORMATS,},
.ops = &wm8731_dai_ops,
.symmetric_rates = 1,
};
static int wm8731_probe(struct snd_soc_codec *codec)
{
struct wm8731_priv *wm8731 = snd_soc_codec_get_drvdata(codec);
int ret = 0, i;
for (i = 0; i < ARRAY_SIZE(wm8731->supplies); i++)
wm8731->supplies[i].supply = wm8731_supply_names[i];
ret = devm_regulator_bulk_get(codec->dev, ARRAY_SIZE(wm8731->supplies),
wm8731->supplies);
if (ret != 0) {
dev_err(codec->dev, "Failed to request supplies: %d\n", ret);
return ret;
}
ret = regulator_bulk_enable(ARRAY_SIZE(wm8731->supplies),
wm8731->supplies);
if (ret != 0) {
dev_err(codec->dev, "Failed to enable supplies: %d\n", ret);
return ret;
}
ret = wm8731_reset(codec);
if (ret < 0) {
dev_err(codec->dev, "Failed to issue reset: %d\n", ret);
goto err_regulator_enable;
}
wm8731_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
/* Latch the update bits */
snd_soc_update_bits(codec, WM8731_LOUT1V, 0x100, 0);
snd_soc_update_bits(codec, WM8731_ROUT1V, 0x100, 0);
snd_soc_update_bits(codec, WM8731_LINVOL, 0x100, 0);
snd_soc_update_bits(codec, WM8731_RINVOL, 0x100, 0);
/* Disable bypass path by default */
snd_soc_update_bits(codec, WM8731_APANA, 0x8, 0);
/* Regulators will have been enabled by bias management */
regulator_bulk_disable(ARRAY_SIZE(wm8731->supplies), wm8731->supplies);
return 0;
err_regulator_enable:
regulator_bulk_disable(ARRAY_SIZE(wm8731->supplies), wm8731->supplies);
return ret;
}
/* power down chip */
static int wm8731_remove(struct snd_soc_codec *codec)
{
struct wm8731_priv *wm8731 = snd_soc_codec_get_drvdata(codec);
regulator_bulk_disable(ARRAY_SIZE(wm8731->supplies), wm8731->supplies);
return 0;
}
static struct snd_soc_codec_driver soc_codec_dev_wm8731 = {
.probe = wm8731_probe,
.remove = wm8731_remove,
.set_bias_level = wm8731_set_bias_level,
.suspend_bias_off = true,
.dapm_widgets = wm8731_dapm_widgets,
.num_dapm_widgets = ARRAY_SIZE(wm8731_dapm_widgets),
.dapm_routes = wm8731_intercon,
.num_dapm_routes = ARRAY_SIZE(wm8731_intercon),
.controls = wm8731_snd_controls,
.num_controls = ARRAY_SIZE(wm8731_snd_controls),
};
static const struct of_device_id wm8731_of_match[] = {
{ .compatible = "wlf,wm8731", },
{ }
};
MODULE_DEVICE_TABLE(of, wm8731_of_match);
static const struct regmap_config wm8731_regmap = {
.reg_bits = 7,
.val_bits = 9,
.max_register = WM8731_RESET,
.volatile_reg = wm8731_volatile,
.writeable_reg = wm8731_writeable,
.cache_type = REGCACHE_RBTREE,
.reg_defaults = wm8731_reg_defaults,
.num_reg_defaults = ARRAY_SIZE(wm8731_reg_defaults),
};
#if defined(CONFIG_SPI_MASTER)
static int wm8731_spi_probe(struct spi_device *spi)
{
struct wm8731_priv *wm8731;
int ret;
wm8731 = devm_kzalloc(&spi->dev, sizeof(*wm8731), GFP_KERNEL);
if (wm8731 == NULL)
return -ENOMEM;
mutex_init(&wm8731->lock);
wm8731->regmap = devm_regmap_init_spi(spi, &wm8731_regmap);
if (IS_ERR(wm8731->regmap)) {
ret = PTR_ERR(wm8731->regmap);
dev_err(&spi->dev, "Failed to allocate register map: %d\n",
ret);
return ret;
}
spi_set_drvdata(spi, wm8731);
ret = snd_soc_register_codec(&spi->dev,
&soc_codec_dev_wm8731, &wm8731_dai, 1);
if (ret != 0) {
dev_err(&spi->dev, "Failed to register CODEC: %d\n", ret);
return ret;
}
return 0;
}
static int wm8731_spi_remove(struct spi_device *spi)
{
snd_soc_unregister_codec(&spi->dev);
return 0;
}
static struct spi_driver wm8731_spi_driver = {
.driver = {
.name = "wm8731",
.owner = THIS_MODULE,
.of_match_table = wm8731_of_match,
},
.probe = wm8731_spi_probe,
.remove = wm8731_spi_remove,
};
#endif /* CONFIG_SPI_MASTER */
#if IS_ENABLED(CONFIG_I2C)
static int wm8731_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
struct wm8731_priv *wm8731;
int ret;
wm8731 = devm_kzalloc(&i2c->dev, sizeof(struct wm8731_priv),
GFP_KERNEL);
if (wm8731 == NULL)
return -ENOMEM;
mutex_init(&wm8731->lock);
wm8731->regmap = devm_regmap_init_i2c(i2c, &wm8731_regmap);
if (IS_ERR(wm8731->regmap)) {
ret = PTR_ERR(wm8731->regmap);
dev_err(&i2c->dev, "Failed to allocate register map: %d\n",
ret);
return ret;
}
i2c_set_clientdata(i2c, wm8731);
ret = snd_soc_register_codec(&i2c->dev,
&soc_codec_dev_wm8731, &wm8731_dai, 1);
if (ret != 0) {
dev_err(&i2c->dev, "Failed to register CODEC: %d\n", ret);
return ret;
}
return 0;
}
static int wm8731_i2c_remove(struct i2c_client *client)
{
snd_soc_unregister_codec(&client->dev);
return 0;
}
static const struct i2c_device_id wm8731_i2c_id[] = {
{ "wm8731", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, wm8731_i2c_id);
static struct i2c_driver wm8731_i2c_driver = {
.driver = {
.name = "wm8731",
.owner = THIS_MODULE,
.of_match_table = wm8731_of_match,
},
.probe = wm8731_i2c_probe,
.remove = wm8731_i2c_remove,
.id_table = wm8731_i2c_id,
};
#endif
static int __init wm8731_modinit(void)
{
int ret = 0;
#if IS_ENABLED(CONFIG_I2C)
ret = i2c_add_driver(&wm8731_i2c_driver);
if (ret != 0) {
printk(KERN_ERR "Failed to register WM8731 I2C driver: %d\n",
ret);
}
#endif
#if defined(CONFIG_SPI_MASTER)
ret = spi_register_driver(&wm8731_spi_driver);
if (ret != 0) {
printk(KERN_ERR "Failed to register WM8731 SPI driver: %d\n",
ret);
}
#endif
return ret;
}
module_init(wm8731_modinit);
static void __exit wm8731_exit(void)
{
#if IS_ENABLED(CONFIG_I2C)
i2c_del_driver(&wm8731_i2c_driver);
#endif
#if defined(CONFIG_SPI_MASTER)
spi_unregister_driver(&wm8731_spi_driver);
#endif
}
module_exit(wm8731_exit);
MODULE_DESCRIPTION("ASoC WM8731 driver");
MODULE_AUTHOR("Richard Purdie");
MODULE_LICENSE("GPL");
| gpl-2.0 |
DerickBeckwith/linux-stable | mm/vmscan.c | 86 | 109787 | /*
* linux/mm/vmscan.c
*
* Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
*
* Swap reorganised 29.12.95, Stephen Tweedie.
* kswapd added: 7.1.96 sct
* Removed kswapd_ctl limits, and swap out as many pages as needed
* to bring the system back to freepages.high: 2.4.97, Rik van Riel.
* Zone aware kswapd started 02/00, Kanoj Sarcar (kanoj@sgi.com).
* Multiqueue VM started 5.8.00, Rik van Riel.
*/
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/gfp.h>
#include <linux/kernel_stat.h>
#include <linux/swap.h>
#include <linux/pagemap.h>
#include <linux/init.h>
#include <linux/highmem.h>
#include <linux/vmpressure.h>
#include <linux/vmstat.h>
#include <linux/file.h>
#include <linux/writeback.h>
#include <linux/blkdev.h>
#include <linux/buffer_head.h> /* for try_to_release_page(),
buffer_heads_over_limit */
#include <linux/mm_inline.h>
#include <linux/backing-dev.h>
#include <linux/rmap.h>
#include <linux/topology.h>
#include <linux/cpu.h>
#include <linux/cpuset.h>
#include <linux/compaction.h>
#include <linux/notifier.h>
#include <linux/rwsem.h>
#include <linux/delay.h>
#include <linux/kthread.h>
#include <linux/freezer.h>
#include <linux/memcontrol.h>
#include <linux/delayacct.h>
#include <linux/sysctl.h>
#include <linux/oom.h>
#include <linux/prefetch.h>
#include <asm/tlbflush.h>
#include <asm/div64.h>
#include <linux/swapops.h>
#include <linux/balloon_compaction.h>
#include "internal.h"
#define CREATE_TRACE_POINTS
#include <trace/events/vmscan.h>
struct scan_control {
/* Incremented by the number of inactive pages that were scanned */
unsigned long nr_scanned;
/* Number of pages freed so far during a call to shrink_zones() */
unsigned long nr_reclaimed;
/* How many pages shrink_list() should reclaim */
unsigned long nr_to_reclaim;
unsigned long hibernation_mode;
/* This context's GFP mask */
gfp_t gfp_mask;
int may_writepage;
/* Can mapped pages be reclaimed? */
int may_unmap;
/* Can pages be swapped as part of reclaim? */
int may_swap;
int order;
/* Scan (total_size >> priority) pages at once */
int priority;
/*
* The memory cgroup that hit its limit and as a result is the
* primary target of this reclaim invocation.
*/
struct mem_cgroup *target_mem_cgroup;
/*
* Nodemask of nodes allowed by the caller. If NULL, all nodes
* are scanned.
*/
nodemask_t *nodemask;
};
#define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
#ifdef ARCH_HAS_PREFETCH
#define prefetch_prev_lru_page(_page, _base, _field) \
do { \
if ((_page)->lru.prev != _base) { \
struct page *prev; \
\
prev = lru_to_page(&(_page->lru)); \
prefetch(&prev->_field); \
} \
} while (0)
#else
#define prefetch_prev_lru_page(_page, _base, _field) do { } while (0)
#endif
#ifdef ARCH_HAS_PREFETCHW
#define prefetchw_prev_lru_page(_page, _base, _field) \
do { \
if ((_page)->lru.prev != _base) { \
struct page *prev; \
\
prev = lru_to_page(&(_page->lru)); \
prefetchw(&prev->_field); \
} \
} while (0)
#else
#define prefetchw_prev_lru_page(_page, _base, _field) do { } while (0)
#endif
/*
* From 0 .. 100. Higher means more swappy.
*/
int vm_swappiness = 60;
unsigned long vm_total_pages; /* The total number of pages which the VM controls */
static LIST_HEAD(shrinker_list);
static DECLARE_RWSEM(shrinker_rwsem);
#ifdef CONFIG_MEMCG
static bool global_reclaim(struct scan_control *sc)
{
return !sc->target_mem_cgroup;
}
#else
static bool global_reclaim(struct scan_control *sc)
{
return true;
}
#endif
static unsigned long zone_reclaimable_pages(struct zone *zone)
{
int nr;
nr = zone_page_state(zone, NR_ACTIVE_FILE) +
zone_page_state(zone, NR_INACTIVE_FILE);
if (get_nr_swap_pages() > 0)
nr += zone_page_state(zone, NR_ACTIVE_ANON) +
zone_page_state(zone, NR_INACTIVE_ANON);
return nr;
}
bool zone_reclaimable(struct zone *zone)
{
return zone->pages_scanned < zone_reclaimable_pages(zone) * 6;
}
static unsigned long get_lru_size(struct lruvec *lruvec, enum lru_list lru)
{
if (!mem_cgroup_disabled())
return mem_cgroup_get_lru_size(lruvec, lru);
return zone_page_state(lruvec_zone(lruvec), NR_LRU_BASE + lru);
}
/*
* Add a shrinker callback to be called from the vm.
*/
int register_shrinker(struct shrinker *shrinker)
{
size_t size = sizeof(*shrinker->nr_deferred);
/*
* If we only have one possible node in the system anyway, save
* ourselves the trouble and disable NUMA aware behavior. This way we
* will save memory and some small loop time later.
*/
if (nr_node_ids == 1)
shrinker->flags &= ~SHRINKER_NUMA_AWARE;
if (shrinker->flags & SHRINKER_NUMA_AWARE)
size *= nr_node_ids;
shrinker->nr_deferred = kzalloc(size, GFP_KERNEL);
if (!shrinker->nr_deferred)
return -ENOMEM;
down_write(&shrinker_rwsem);
list_add_tail(&shrinker->list, &shrinker_list);
up_write(&shrinker_rwsem);
return 0;
}
EXPORT_SYMBOL(register_shrinker);
/*
* Remove one
*/
void unregister_shrinker(struct shrinker *shrinker)
{
down_write(&shrinker_rwsem);
list_del(&shrinker->list);
up_write(&shrinker_rwsem);
kfree(shrinker->nr_deferred);
}
EXPORT_SYMBOL(unregister_shrinker);
#define SHRINK_BATCH 128
static unsigned long
shrink_slab_node(struct shrink_control *shrinkctl, struct shrinker *shrinker,
unsigned long nr_pages_scanned, unsigned long lru_pages)
{
unsigned long freed = 0;
unsigned long long delta;
long total_scan;
long max_pass;
long nr;
long new_nr;
int nid = shrinkctl->nid;
long batch_size = shrinker->batch ? shrinker->batch
: SHRINK_BATCH;
max_pass = shrinker->count_objects(shrinker, shrinkctl);
if (max_pass == 0)
return 0;
/*
* copy the current shrinker scan count into a local variable
* and zero it so that other concurrent shrinker invocations
* don't also do this scanning work.
*/
nr = atomic_long_xchg(&shrinker->nr_deferred[nid], 0);
total_scan = nr;
delta = (4 * nr_pages_scanned) / shrinker->seeks;
delta *= max_pass;
do_div(delta, lru_pages + 1);
total_scan += delta;
if (total_scan < 0) {
printk(KERN_ERR
"shrink_slab: %pF negative objects to delete nr=%ld\n",
shrinker->scan_objects, total_scan);
total_scan = max_pass;
}
/*
* We need to avoid excessive windup on filesystem shrinkers
* due to large numbers of GFP_NOFS allocations causing the
* shrinkers to return -1 all the time. This results in a large
* nr being built up so when a shrink that can do some work
* comes along it empties the entire cache due to nr >>>
* max_pass. This is bad for sustaining a working set in
* memory.
*
* Hence only allow the shrinker to scan the entire cache when
* a large delta change is calculated directly.
*/
if (delta < max_pass / 4)
total_scan = min(total_scan, max_pass / 2);
/*
* Avoid risking looping forever due to too large nr value:
* never try to free more than twice the estimate number of
* freeable entries.
*/
if (total_scan > max_pass * 2)
total_scan = max_pass * 2;
trace_mm_shrink_slab_start(shrinker, shrinkctl, nr,
nr_pages_scanned, lru_pages,
max_pass, delta, total_scan);
/*
* Normally, we should not scan less than batch_size objects in one
* pass to avoid too frequent shrinker calls, but if the slab has less
* than batch_size objects in total and we are really tight on memory,
* we will try to reclaim all available objects, otherwise we can end
* up failing allocations although there are plenty of reclaimable
* objects spread over several slabs with usage less than the
* batch_size.
*
* We detect the "tight on memory" situations by looking at the total
* number of objects we want to scan (total_scan). If it is greater
* than the total number of objects on slab (max_pass), we must be
* scanning at high prio and therefore should try to reclaim as much as
* possible.
*/
while (total_scan >= batch_size ||
total_scan >= max_pass) {
unsigned long ret;
unsigned long nr_to_scan = min(batch_size, total_scan);
shrinkctl->nr_to_scan = nr_to_scan;
ret = shrinker->scan_objects(shrinker, shrinkctl);
if (ret == SHRINK_STOP)
break;
freed += ret;
count_vm_events(SLABS_SCANNED, nr_to_scan);
total_scan -= nr_to_scan;
cond_resched();
}
/*
* move the unused scan count back into the shrinker in a
* manner that handles concurrent updates. If we exhausted the
* scan, there is no need to do an update.
*/
if (total_scan > 0)
new_nr = atomic_long_add_return(total_scan,
&shrinker->nr_deferred[nid]);
else
new_nr = atomic_long_read(&shrinker->nr_deferred[nid]);
trace_mm_shrink_slab_end(shrinker, freed, nr, new_nr);
return freed;
}
/*
* Call the shrink functions to age shrinkable caches
*
* Here we assume it costs one seek to replace a lru page and that it also
* takes a seek to recreate a cache object. With this in mind we age equal
* percentages of the lru and ageable caches. This should balance the seeks
* generated by these structures.
*
* If the vm encountered mapped pages on the LRU it increase the pressure on
* slab to avoid swapping.
*
* We do weird things to avoid (scanned*seeks*entries) overflowing 32 bits.
*
* `lru_pages' represents the number of on-LRU pages in all the zones which
* are eligible for the caller's allocation attempt. It is used for balancing
* slab reclaim versus page reclaim.
*
* Returns the number of slab objects which we shrunk.
*/
unsigned long shrink_slab(struct shrink_control *shrinkctl,
unsigned long nr_pages_scanned,
unsigned long lru_pages)
{
struct shrinker *shrinker;
unsigned long freed = 0;
if (nr_pages_scanned == 0)
nr_pages_scanned = SWAP_CLUSTER_MAX;
if (!down_read_trylock(&shrinker_rwsem)) {
/*
* If we would return 0, our callers would understand that we
* have nothing else to shrink and give up trying. By returning
* 1 we keep it going and assume we'll be able to shrink next
* time.
*/
freed = 1;
goto out;
}
list_for_each_entry(shrinker, &shrinker_list, list) {
if (!(shrinker->flags & SHRINKER_NUMA_AWARE)) {
shrinkctl->nid = 0;
freed += shrink_slab_node(shrinkctl, shrinker,
nr_pages_scanned, lru_pages);
continue;
}
for_each_node_mask(shrinkctl->nid, shrinkctl->nodes_to_scan) {
if (node_online(shrinkctl->nid))
freed += shrink_slab_node(shrinkctl, shrinker,
nr_pages_scanned, lru_pages);
}
}
up_read(&shrinker_rwsem);
out:
cond_resched();
return freed;
}
static inline int is_page_cache_freeable(struct page *page)
{
/*
* A freeable page cache page is referenced only by the caller
* that isolated the page, the page cache radix tree and
* optional buffer heads at page->private.
*/
return page_count(page) - page_has_private(page) == 2;
}
static int may_write_to_queue(struct backing_dev_info *bdi,
struct scan_control *sc)
{
if (current->flags & PF_SWAPWRITE)
return 1;
if (!bdi_write_congested(bdi))
return 1;
if (bdi == current->backing_dev_info)
return 1;
return 0;
}
/*
* We detected a synchronous write error writing a page out. Probably
* -ENOSPC. We need to propagate that into the address_space for a subsequent
* fsync(), msync() or close().
*
* The tricky part is that after writepage we cannot touch the mapping: nothing
* prevents it from being freed up. But we have a ref on the page and once
* that page is locked, the mapping is pinned.
*
* We're allowed to run sleeping lock_page() here because we know the caller has
* __GFP_FS.
*/
static void handle_write_error(struct address_space *mapping,
struct page *page, int error)
{
lock_page(page);
if (page_mapping(page) == mapping)
mapping_set_error(mapping, error);
unlock_page(page);
}
/* possible outcome of pageout() */
typedef enum {
/* failed to write page out, page is locked */
PAGE_KEEP,
/* move page to the active list, page is locked */
PAGE_ACTIVATE,
/* page has been sent to the disk successfully, page is unlocked */
PAGE_SUCCESS,
/* page is clean and locked */
PAGE_CLEAN,
} pageout_t;
/*
* pageout is called by shrink_page_list() for each dirty page.
* Calls ->writepage().
*/
static pageout_t pageout(struct page *page, struct address_space *mapping,
struct scan_control *sc)
{
/*
* If the page is dirty, only perform writeback if that write
* will be non-blocking. To prevent this allocation from being
* stalled by pagecache activity. But note that there may be
* stalls if we need to run get_block(). We could test
* PagePrivate for that.
*
* If this process is currently in __generic_file_aio_write() against
* this page's queue, we can perform writeback even if that
* will block.
*
* If the page is swapcache, write it back even if that would
* block, for some throttling. This happens by accident, because
* swap_backing_dev_info is bust: it doesn't reflect the
* congestion state of the swapdevs. Easy to fix, if needed.
*/
if (!is_page_cache_freeable(page))
return PAGE_KEEP;
if (!mapping) {
/*
* Some data journaling orphaned pages can have
* page->mapping == NULL while being dirty with clean buffers.
*/
if (page_has_private(page)) {
if (try_to_free_buffers(page)) {
ClearPageDirty(page);
printk("%s: orphaned page\n", __func__);
return PAGE_CLEAN;
}
}
return PAGE_KEEP;
}
if (mapping->a_ops->writepage == NULL)
return PAGE_ACTIVATE;
if (!may_write_to_queue(mapping->backing_dev_info, sc))
return PAGE_KEEP;
if (clear_page_dirty_for_io(page)) {
int res;
struct writeback_control wbc = {
.sync_mode = WB_SYNC_NONE,
.nr_to_write = SWAP_CLUSTER_MAX,
.range_start = 0,
.range_end = LLONG_MAX,
.for_reclaim = 1,
};
SetPageReclaim(page);
res = mapping->a_ops->writepage(page, &wbc);
if (res < 0)
handle_write_error(mapping, page, res);
if (res == AOP_WRITEPAGE_ACTIVATE) {
ClearPageReclaim(page);
return PAGE_ACTIVATE;
}
if (!PageWriteback(page)) {
/* synchronous write or broken a_ops? */
ClearPageReclaim(page);
}
trace_mm_vmscan_writepage(page, trace_reclaim_flags(page));
inc_zone_page_state(page, NR_VMSCAN_WRITE);
return PAGE_SUCCESS;
}
return PAGE_CLEAN;
}
/*
* Same as remove_mapping, but if the page is removed from the mapping, it
* gets returned with a refcount of 0.
*/
static int __remove_mapping(struct address_space *mapping, struct page *page)
{
BUG_ON(!PageLocked(page));
BUG_ON(mapping != page_mapping(page));
spin_lock_irq(&mapping->tree_lock);
/*
* The non racy check for a busy page.
*
* Must be careful with the order of the tests. When someone has
* a ref to the page, it may be possible that they dirty it then
* drop the reference. So if PageDirty is tested before page_count
* here, then the following race may occur:
*
* get_user_pages(&page);
* [user mapping goes away]
* write_to(page);
* !PageDirty(page) [good]
* SetPageDirty(page);
* put_page(page);
* !page_count(page) [good, discard it]
*
* [oops, our write_to data is lost]
*
* Reversing the order of the tests ensures such a situation cannot
* escape unnoticed. The smp_rmb is needed to ensure the page->flags
* load is not satisfied before that of page->_count.
*
* Note that if SetPageDirty is always performed via set_page_dirty,
* and thus under tree_lock, then this ordering is not required.
*/
if (!page_freeze_refs(page, 2))
goto cannot_free;
/* note: atomic_cmpxchg in page_freeze_refs provides the smp_rmb */
if (unlikely(PageDirty(page))) {
page_unfreeze_refs(page, 2);
goto cannot_free;
}
if (PageSwapCache(page)) {
swp_entry_t swap = { .val = page_private(page) };
__delete_from_swap_cache(page);
spin_unlock_irq(&mapping->tree_lock);
swapcache_free(swap, page);
} else {
void (*freepage)(struct page *);
freepage = mapping->a_ops->freepage;
__delete_from_page_cache(page);
spin_unlock_irq(&mapping->tree_lock);
mem_cgroup_uncharge_cache_page(page);
if (freepage != NULL)
freepage(page);
}
return 1;
cannot_free:
spin_unlock_irq(&mapping->tree_lock);
return 0;
}
/*
* Attempt to detach a locked page from its ->mapping. If it is dirty or if
* someone else has a ref on the page, abort and return 0. If it was
* successfully detached, return 1. Assumes the caller has a single ref on
* this page.
*/
int remove_mapping(struct address_space *mapping, struct page *page)
{
if (__remove_mapping(mapping, page)) {
/*
* Unfreezing the refcount with 1 rather than 2 effectively
* drops the pagecache ref for us without requiring another
* atomic operation.
*/
page_unfreeze_refs(page, 1);
return 1;
}
return 0;
}
/**
* putback_lru_page - put previously isolated page onto appropriate LRU list
* @page: page to be put back to appropriate lru list
*
* Add previously isolated @page to appropriate LRU list.
* Page may still be unevictable for other reasons.
*
* lru_lock must not be held, interrupts must be enabled.
*/
void putback_lru_page(struct page *page)
{
bool is_unevictable;
int was_unevictable = PageUnevictable(page);
VM_BUG_ON_PAGE(PageLRU(page), page);
redo:
ClearPageUnevictable(page);
if (page_evictable(page)) {
/*
* For evictable pages, we can use the cache.
* In event of a race, worst case is we end up with an
* unevictable page on [in]active list.
* We know how to handle that.
*/
is_unevictable = false;
lru_cache_add(page);
} else {
/*
* Put unevictable pages directly on zone's unevictable
* list.
*/
is_unevictable = true;
add_page_to_unevictable_list(page);
/*
* When racing with an mlock or AS_UNEVICTABLE clearing
* (page is unlocked) make sure that if the other thread
* does not observe our setting of PG_lru and fails
* isolation/check_move_unevictable_pages,
* we see PG_mlocked/AS_UNEVICTABLE cleared below and move
* the page back to the evictable list.
*
* The other side is TestClearPageMlocked() or shmem_lock().
*/
smp_mb();
}
/*
* page's status can change while we move it among lru. If an evictable
* page is on unevictable list, it never be freed. To avoid that,
* check after we added it to the list, again.
*/
if (is_unevictable && page_evictable(page)) {
if (!isolate_lru_page(page)) {
put_page(page);
goto redo;
}
/* This means someone else dropped this page from LRU
* So, it will be freed or putback to LRU again. There is
* nothing to do here.
*/
}
if (was_unevictable && !is_unevictable)
count_vm_event(UNEVICTABLE_PGRESCUED);
else if (!was_unevictable && is_unevictable)
count_vm_event(UNEVICTABLE_PGCULLED);
put_page(page); /* drop ref from isolate */
}
enum page_references {
PAGEREF_RECLAIM,
PAGEREF_RECLAIM_CLEAN,
PAGEREF_KEEP,
PAGEREF_ACTIVATE,
};
static enum page_references page_check_references(struct page *page,
struct scan_control *sc)
{
int referenced_ptes, referenced_page;
unsigned long vm_flags;
referenced_ptes = page_referenced(page, 1, sc->target_mem_cgroup,
&vm_flags);
referenced_page = TestClearPageReferenced(page);
/*
* Mlock lost the isolation race with us. Let try_to_unmap()
* move the page to the unevictable list.
*/
if (vm_flags & VM_LOCKED)
return PAGEREF_RECLAIM;
if (referenced_ptes) {
if (PageSwapBacked(page))
return PAGEREF_ACTIVATE;
/*
* All mapped pages start out with page table
* references from the instantiating fault, so we need
* to look twice if a mapped file page is used more
* than once.
*
* Mark it and spare it for another trip around the
* inactive list. Another page table reference will
* lead to its activation.
*
* Note: the mark is set for activated pages as well
* so that recently deactivated but used pages are
* quickly recovered.
*/
SetPageReferenced(page);
if (referenced_page || referenced_ptes > 1)
return PAGEREF_ACTIVATE;
/*
* Activate file-backed executable pages after first usage.
*/
if (vm_flags & VM_EXEC)
return PAGEREF_ACTIVATE;
return PAGEREF_KEEP;
}
/* Reclaim if clean, defer dirty pages to writeback */
if (referenced_page && !PageSwapBacked(page))
return PAGEREF_RECLAIM_CLEAN;
return PAGEREF_RECLAIM;
}
/* Check if a page is dirty or under writeback */
static void page_check_dirty_writeback(struct page *page,
bool *dirty, bool *writeback)
{
struct address_space *mapping;
/*
* Anonymous pages are not handled by flushers and must be written
* from reclaim context. Do not stall reclaim based on them
*/
if (!page_is_file_cache(page)) {
*dirty = false;
*writeback = false;
return;
}
/* By default assume that the page flags are accurate */
*dirty = PageDirty(page);
*writeback = PageWriteback(page);
/* Verify dirty/writeback state if the filesystem supports it */
if (!page_has_private(page))
return;
mapping = page_mapping(page);
if (mapping && mapping->a_ops->is_dirty_writeback)
mapping->a_ops->is_dirty_writeback(page, dirty, writeback);
}
/*
* shrink_page_list() returns the number of reclaimed pages
*/
static unsigned long shrink_page_list(struct list_head *page_list,
struct zone *zone,
struct scan_control *sc,
enum ttu_flags ttu_flags,
unsigned long *ret_nr_dirty,
unsigned long *ret_nr_unqueued_dirty,
unsigned long *ret_nr_congested,
unsigned long *ret_nr_writeback,
unsigned long *ret_nr_immediate,
bool force_reclaim)
{
LIST_HEAD(ret_pages);
LIST_HEAD(free_pages);
int pgactivate = 0;
unsigned long nr_unqueued_dirty = 0;
unsigned long nr_dirty = 0;
unsigned long nr_congested = 0;
unsigned long nr_reclaimed = 0;
unsigned long nr_writeback = 0;
unsigned long nr_immediate = 0;
cond_resched();
mem_cgroup_uncharge_start();
while (!list_empty(page_list)) {
struct address_space *mapping;
struct page *page;
int may_enter_fs;
enum page_references references = PAGEREF_RECLAIM_CLEAN;
bool dirty, writeback;
cond_resched();
page = lru_to_page(page_list);
list_del(&page->lru);
if (!trylock_page(page))
goto keep;
VM_BUG_ON_PAGE(PageActive(page), page);
VM_BUG_ON_PAGE(page_zone(page) != zone, page);
sc->nr_scanned++;
if (unlikely(!page_evictable(page)))
goto cull_mlocked;
if (!sc->may_unmap && page_mapped(page))
goto keep_locked;
/* Double the slab pressure for mapped and swapcache pages */
if (page_mapped(page) || PageSwapCache(page))
sc->nr_scanned++;
may_enter_fs = (sc->gfp_mask & __GFP_FS) ||
(PageSwapCache(page) && (sc->gfp_mask & __GFP_IO));
/*
* The number of dirty pages determines if a zone is marked
* reclaim_congested which affects wait_iff_congested. kswapd
* will stall and start writing pages if the tail of the LRU
* is all dirty unqueued pages.
*/
page_check_dirty_writeback(page, &dirty, &writeback);
if (dirty || writeback)
nr_dirty++;
if (dirty && !writeback)
nr_unqueued_dirty++;
/*
* Treat this page as congested if the underlying BDI is or if
* pages are cycling through the LRU so quickly that the
* pages marked for immediate reclaim are making it to the
* end of the LRU a second time.
*/
mapping = page_mapping(page);
if ((mapping && bdi_write_congested(mapping->backing_dev_info)) ||
(writeback && PageReclaim(page)))
nr_congested++;
/*
* If a page at the tail of the LRU is under writeback, there
* are three cases to consider.
*
* 1) If reclaim is encountering an excessive number of pages
* under writeback and this page is both under writeback and
* PageReclaim then it indicates that pages are being queued
* for IO but are being recycled through the LRU before the
* IO can complete. Waiting on the page itself risks an
* indefinite stall if it is impossible to writeback the
* page due to IO error or disconnected storage so instead
* note that the LRU is being scanned too quickly and the
* caller can stall after page list has been processed.
*
* 2) Global reclaim encounters a page, memcg encounters a
* page that is not marked for immediate reclaim or
* the caller does not have __GFP_IO. In this case mark
* the page for immediate reclaim and continue scanning.
*
* __GFP_IO is checked because a loop driver thread might
* enter reclaim, and deadlock if it waits on a page for
* which it is needed to do the write (loop masks off
* __GFP_IO|__GFP_FS for this reason); but more thought
* would probably show more reasons.
*
* Don't require __GFP_FS, since we're not going into the
* FS, just waiting on its writeback completion. Worryingly,
* ext4 gfs2 and xfs allocate pages with
* grab_cache_page_write_begin(,,AOP_FLAG_NOFS), so testing
* may_enter_fs here is liable to OOM on them.
*
* 3) memcg encounters a page that is not already marked
* PageReclaim. memcg does not have any dirty pages
* throttling so we could easily OOM just because too many
* pages are in writeback and there is nothing else to
* reclaim. Wait for the writeback to complete.
*/
if (PageWriteback(page)) {
/* Case 1 above */
if (current_is_kswapd() &&
PageReclaim(page) &&
zone_is_reclaim_writeback(zone)) {
nr_immediate++;
goto keep_locked;
/* Case 2 above */
} else if (global_reclaim(sc) ||
!PageReclaim(page) || !(sc->gfp_mask & __GFP_IO)) {
/*
* This is slightly racy - end_page_writeback()
* might have just cleared PageReclaim, then
* setting PageReclaim here end up interpreted
* as PageReadahead - but that does not matter
* enough to care. What we do want is for this
* page to have PageReclaim set next time memcg
* reclaim reaches the tests above, so it will
* then wait_on_page_writeback() to avoid OOM;
* and it's also appropriate in global reclaim.
*/
SetPageReclaim(page);
nr_writeback++;
goto keep_locked;
/* Case 3 above */
} else {
wait_on_page_writeback(page);
}
}
if (!force_reclaim)
references = page_check_references(page, sc);
switch (references) {
case PAGEREF_ACTIVATE:
goto activate_locked;
case PAGEREF_KEEP:
goto keep_locked;
case PAGEREF_RECLAIM:
case PAGEREF_RECLAIM_CLEAN:
; /* try to reclaim the page below */
}
/*
* Anonymous process memory has backing store?
* Try to allocate it some swap space here.
*/
if (PageAnon(page) && !PageSwapCache(page)) {
if (!(sc->gfp_mask & __GFP_IO))
goto keep_locked;
if (!add_to_swap(page, page_list))
goto activate_locked;
may_enter_fs = 1;
/* Adding to swap updated mapping */
mapping = page_mapping(page);
}
/*
* The page is mapped into the page tables of one or more
* processes. Try to unmap it here.
*/
if (page_mapped(page) && mapping) {
switch (try_to_unmap(page, ttu_flags)) {
case SWAP_FAIL:
goto activate_locked;
case SWAP_AGAIN:
goto keep_locked;
case SWAP_MLOCK:
goto cull_mlocked;
case SWAP_SUCCESS:
; /* try to free the page below */
}
}
if (PageDirty(page)) {
/*
* Only kswapd can writeback filesystem pages to
* avoid risk of stack overflow but only writeback
* if many dirty pages have been encountered.
*/
if (page_is_file_cache(page) &&
(!current_is_kswapd() ||
!zone_is_reclaim_dirty(zone))) {
/*
* Immediately reclaim when written back.
* Similar in principal to deactivate_page()
* except we already have the page isolated
* and know it's dirty
*/
inc_zone_page_state(page, NR_VMSCAN_IMMEDIATE);
SetPageReclaim(page);
goto keep_locked;
}
if (references == PAGEREF_RECLAIM_CLEAN)
goto keep_locked;
if (!may_enter_fs)
goto keep_locked;
if (!sc->may_writepage)
goto keep_locked;
/* Page is dirty, try to write it out here */
switch (pageout(page, mapping, sc)) {
case PAGE_KEEP:
goto keep_locked;
case PAGE_ACTIVATE:
goto activate_locked;
case PAGE_SUCCESS:
if (PageWriteback(page))
goto keep;
if (PageDirty(page))
goto keep;
/*
* A synchronous write - probably a ramdisk. Go
* ahead and try to reclaim the page.
*/
if (!trylock_page(page))
goto keep;
if (PageDirty(page) || PageWriteback(page))
goto keep_locked;
mapping = page_mapping(page);
case PAGE_CLEAN:
; /* try to free the page below */
}
}
/*
* If the page has buffers, try to free the buffer mappings
* associated with this page. If we succeed we try to free
* the page as well.
*
* We do this even if the page is PageDirty().
* try_to_release_page() does not perform I/O, but it is
* possible for a page to have PageDirty set, but it is actually
* clean (all its buffers are clean). This happens if the
* buffers were written out directly, with submit_bh(). ext3
* will do this, as well as the blockdev mapping.
* try_to_release_page() will discover that cleanness and will
* drop the buffers and mark the page clean - it can be freed.
*
* Rarely, pages can have buffers and no ->mapping. These are
* the pages which were not successfully invalidated in
* truncate_complete_page(). We try to drop those buffers here
* and if that worked, and the page is no longer mapped into
* process address space (page_count == 1) it can be freed.
* Otherwise, leave the page on the LRU so it is swappable.
*/
if (page_has_private(page)) {
if (!try_to_release_page(page, sc->gfp_mask))
goto activate_locked;
if (!mapping && page_count(page) == 1) {
unlock_page(page);
if (put_page_testzero(page))
goto free_it;
else {
/*
* rare race with speculative reference.
* the speculative reference will free
* this page shortly, so we may
* increment nr_reclaimed here (and
* leave it off the LRU).
*/
nr_reclaimed++;
continue;
}
}
}
if (!mapping || !__remove_mapping(mapping, page))
goto keep_locked;
/*
* At this point, we have no other references and there is
* no way to pick any more up (removed from LRU, removed
* from pagecache). Can use non-atomic bitops now (and
* we obviously don't have to worry about waking up a process
* waiting on the page lock, because there are no references.
*/
__clear_page_locked(page);
free_it:
nr_reclaimed++;
/*
* Is there need to periodically free_page_list? It would
* appear not as the counts should be low
*/
list_add(&page->lru, &free_pages);
continue;
cull_mlocked:
if (PageSwapCache(page))
try_to_free_swap(page);
unlock_page(page);
putback_lru_page(page);
continue;
activate_locked:
/* Not a candidate for swapping, so reclaim swap space. */
if (PageSwapCache(page) && vm_swap_full())
try_to_free_swap(page);
VM_BUG_ON_PAGE(PageActive(page), page);
SetPageActive(page);
pgactivate++;
keep_locked:
unlock_page(page);
keep:
list_add(&page->lru, &ret_pages);
VM_BUG_ON_PAGE(PageLRU(page) || PageUnevictable(page), page);
}
free_hot_cold_page_list(&free_pages, 1);
list_splice(&ret_pages, page_list);
count_vm_events(PGACTIVATE, pgactivate);
mem_cgroup_uncharge_end();
*ret_nr_dirty += nr_dirty;
*ret_nr_congested += nr_congested;
*ret_nr_unqueued_dirty += nr_unqueued_dirty;
*ret_nr_writeback += nr_writeback;
*ret_nr_immediate += nr_immediate;
return nr_reclaimed;
}
unsigned long reclaim_clean_pages_from_list(struct zone *zone,
struct list_head *page_list)
{
struct scan_control sc = {
.gfp_mask = GFP_KERNEL,
.priority = DEF_PRIORITY,
.may_unmap = 1,
};
unsigned long ret, dummy1, dummy2, dummy3, dummy4, dummy5;
struct page *page, *next;
LIST_HEAD(clean_pages);
list_for_each_entry_safe(page, next, page_list, lru) {
if (page_is_file_cache(page) && !PageDirty(page) &&
!isolated_balloon_page(page)) {
ClearPageActive(page);
list_move(&page->lru, &clean_pages);
}
}
ret = shrink_page_list(&clean_pages, zone, &sc,
TTU_UNMAP|TTU_IGNORE_ACCESS,
&dummy1, &dummy2, &dummy3, &dummy4, &dummy5, true);
list_splice(&clean_pages, page_list);
__mod_zone_page_state(zone, NR_ISOLATED_FILE, -ret);
return ret;
}
/*
* Attempt to remove the specified page from its LRU. Only take this page
* if it is of the appropriate PageActive status. Pages which are being
* freed elsewhere are also ignored.
*
* page: page to consider
* mode: one of the LRU isolation modes defined above
*
* returns 0 on success, -ve errno on failure.
*/
int __isolate_lru_page(struct page *page, isolate_mode_t mode)
{
int ret = -EINVAL;
/* Only take pages on the LRU. */
if (!PageLRU(page))
return ret;
/* Compaction should not handle unevictable pages but CMA can do so */
if (PageUnevictable(page) && !(mode & ISOLATE_UNEVICTABLE))
return ret;
ret = -EBUSY;
/*
* To minimise LRU disruption, the caller can indicate that it only
* wants to isolate pages it will be able to operate on without
* blocking - clean pages for the most part.
*
* ISOLATE_CLEAN means that only clean pages should be isolated. This
* is used by reclaim when it is cannot write to backing storage
*
* ISOLATE_ASYNC_MIGRATE is used to indicate that it only wants to pages
* that it is possible to migrate without blocking
*/
if (mode & (ISOLATE_CLEAN|ISOLATE_ASYNC_MIGRATE)) {
/* All the caller can do on PageWriteback is block */
if (PageWriteback(page))
return ret;
if (PageDirty(page)) {
struct address_space *mapping;
/* ISOLATE_CLEAN means only clean pages */
if (mode & ISOLATE_CLEAN)
return ret;
/*
* Only pages without mappings or that have a
* ->migratepage callback are possible to migrate
* without blocking
*/
mapping = page_mapping(page);
if (mapping && !mapping->a_ops->migratepage)
return ret;
}
}
if ((mode & ISOLATE_UNMAPPED) && page_mapped(page))
return ret;
if (likely(get_page_unless_zero(page))) {
/*
* Be careful not to clear PageLRU until after we're
* sure the page is not being freed elsewhere -- the
* page release code relies on it.
*/
ClearPageLRU(page);
ret = 0;
}
return ret;
}
/*
* zone->lru_lock is heavily contended. Some of the functions that
* shrink the lists perform better by taking out a batch of pages
* and working on them outside the LRU lock.
*
* For pagecache intensive workloads, this function is the hottest
* spot in the kernel (apart from copy_*_user functions).
*
* Appropriate locks must be held before calling this function.
*
* @nr_to_scan: The number of pages to look through on the list.
* @lruvec: The LRU vector to pull pages from.
* @dst: The temp list to put pages on to.
* @nr_scanned: The number of pages that were scanned.
* @sc: The scan_control struct for this reclaim session
* @mode: One of the LRU isolation modes
* @lru: LRU list id for isolating
*
* returns how many pages were moved onto *@dst.
*/
static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
struct lruvec *lruvec, struct list_head *dst,
unsigned long *nr_scanned, struct scan_control *sc,
isolate_mode_t mode, enum lru_list lru)
{
struct list_head *src = &lruvec->lists[lru];
unsigned long nr_taken = 0;
unsigned long scan;
for (scan = 0; scan < nr_to_scan && !list_empty(src); scan++) {
struct page *page;
int nr_pages;
page = lru_to_page(src);
prefetchw_prev_lru_page(page, src, flags);
VM_BUG_ON_PAGE(!PageLRU(page), page);
switch (__isolate_lru_page(page, mode)) {
case 0:
nr_pages = hpage_nr_pages(page);
mem_cgroup_update_lru_size(lruvec, lru, -nr_pages);
list_move(&page->lru, dst);
nr_taken += nr_pages;
break;
case -EBUSY:
/* else it is being freed elsewhere */
list_move(&page->lru, src);
continue;
default:
BUG();
}
}
*nr_scanned = scan;
trace_mm_vmscan_lru_isolate(sc->order, nr_to_scan, scan,
nr_taken, mode, is_file_lru(lru));
return nr_taken;
}
/**
* isolate_lru_page - tries to isolate a page from its LRU list
* @page: page to isolate from its LRU list
*
* Isolates a @page from an LRU list, clears PageLRU and adjusts the
* vmstat statistic corresponding to whatever LRU list the page was on.
*
* Returns 0 if the page was removed from an LRU list.
* Returns -EBUSY if the page was not on an LRU list.
*
* The returned page will have PageLRU() cleared. If it was found on
* the active list, it will have PageActive set. If it was found on
* the unevictable list, it will have the PageUnevictable bit set. That flag
* may need to be cleared by the caller before letting the page go.
*
* The vmstat statistic corresponding to the list on which the page was
* found will be decremented.
*
* Restrictions:
* (1) Must be called with an elevated refcount on the page. This is a
* fundamentnal difference from isolate_lru_pages (which is called
* without a stable reference).
* (2) the lru_lock must not be held.
* (3) interrupts must be enabled.
*/
int isolate_lru_page(struct page *page)
{
int ret = -EBUSY;
VM_BUG_ON_PAGE(!page_count(page), page);
if (PageLRU(page)) {
struct zone *zone = page_zone(page);
struct lruvec *lruvec;
spin_lock_irq(&zone->lru_lock);
lruvec = mem_cgroup_page_lruvec(page, zone);
if (PageLRU(page)) {
int lru = page_lru(page);
get_page(page);
ClearPageLRU(page);
del_page_from_lru_list(page, lruvec, lru);
ret = 0;
}
spin_unlock_irq(&zone->lru_lock);
}
return ret;
}
/*
* A direct reclaimer may isolate SWAP_CLUSTER_MAX pages from the LRU list and
* then get resheduled. When there are massive number of tasks doing page
* allocation, such sleeping direct reclaimers may keep piling up on each CPU,
* the LRU list will go small and be scanned faster than necessary, leading to
* unnecessary swapping, thrashing and OOM.
*/
static int too_many_isolated(struct zone *zone, int file,
struct scan_control *sc)
{
unsigned long inactive, isolated;
if (current_is_kswapd())
return 0;
if (!global_reclaim(sc))
return 0;
if (file) {
inactive = zone_page_state(zone, NR_INACTIVE_FILE);
isolated = zone_page_state(zone, NR_ISOLATED_FILE);
} else {
inactive = zone_page_state(zone, NR_INACTIVE_ANON);
isolated = zone_page_state(zone, NR_ISOLATED_ANON);
}
/*
* GFP_NOIO/GFP_NOFS callers are allowed to isolate more pages, so they
* won't get blocked by normal direct-reclaimers, forming a circular
* deadlock.
*/
if ((sc->gfp_mask & GFP_IOFS) == GFP_IOFS)
inactive >>= 3;
return isolated > inactive;
}
static noinline_for_stack void
putback_inactive_pages(struct lruvec *lruvec, struct list_head *page_list)
{
struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
struct zone *zone = lruvec_zone(lruvec);
LIST_HEAD(pages_to_free);
/*
* Put back any unfreeable pages.
*/
while (!list_empty(page_list)) {
struct page *page = lru_to_page(page_list);
int lru;
VM_BUG_ON_PAGE(PageLRU(page), page);
list_del(&page->lru);
if (unlikely(!page_evictable(page))) {
spin_unlock_irq(&zone->lru_lock);
putback_lru_page(page);
spin_lock_irq(&zone->lru_lock);
continue;
}
lruvec = mem_cgroup_page_lruvec(page, zone);
SetPageLRU(page);
lru = page_lru(page);
add_page_to_lru_list(page, lruvec, lru);
if (is_active_lru(lru)) {
int file = is_file_lru(lru);
int numpages = hpage_nr_pages(page);
reclaim_stat->recent_rotated[file] += numpages;
}
if (put_page_testzero(page)) {
__ClearPageLRU(page);
__ClearPageActive(page);
del_page_from_lru_list(page, lruvec, lru);
if (unlikely(PageCompound(page))) {
spin_unlock_irq(&zone->lru_lock);
(*get_compound_page_dtor(page))(page);
spin_lock_irq(&zone->lru_lock);
} else
list_add(&page->lru, &pages_to_free);
}
}
/*
* To save our caller's stack, now use input list for pages to free.
*/
list_splice(&pages_to_free, page_list);
}
/*
* shrink_inactive_list() is a helper for shrink_zone(). It returns the number
* of reclaimed pages
*/
static noinline_for_stack unsigned long
shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
struct scan_control *sc, enum lru_list lru)
{
LIST_HEAD(page_list);
unsigned long nr_scanned;
unsigned long nr_reclaimed = 0;
unsigned long nr_taken;
unsigned long nr_dirty = 0;
unsigned long nr_congested = 0;
unsigned long nr_unqueued_dirty = 0;
unsigned long nr_writeback = 0;
unsigned long nr_immediate = 0;
isolate_mode_t isolate_mode = 0;
int file = is_file_lru(lru);
struct zone *zone = lruvec_zone(lruvec);
struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
while (unlikely(too_many_isolated(zone, file, sc))) {
congestion_wait(BLK_RW_ASYNC, HZ/10);
/* We are about to die and free our memory. Return now. */
if (fatal_signal_pending(current))
return SWAP_CLUSTER_MAX;
}
lru_add_drain();
if (!sc->may_unmap)
isolate_mode |= ISOLATE_UNMAPPED;
if (!sc->may_writepage)
isolate_mode |= ISOLATE_CLEAN;
spin_lock_irq(&zone->lru_lock);
nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &page_list,
&nr_scanned, sc, isolate_mode, lru);
__mod_zone_page_state(zone, NR_LRU_BASE + lru, -nr_taken);
__mod_zone_page_state(zone, NR_ISOLATED_ANON + file, nr_taken);
if (global_reclaim(sc)) {
zone->pages_scanned += nr_scanned;
if (current_is_kswapd())
__count_zone_vm_events(PGSCAN_KSWAPD, zone, nr_scanned);
else
__count_zone_vm_events(PGSCAN_DIRECT, zone, nr_scanned);
}
spin_unlock_irq(&zone->lru_lock);
if (nr_taken == 0)
return 0;
nr_reclaimed = shrink_page_list(&page_list, zone, sc, TTU_UNMAP,
&nr_dirty, &nr_unqueued_dirty, &nr_congested,
&nr_writeback, &nr_immediate,
false);
spin_lock_irq(&zone->lru_lock);
reclaim_stat->recent_scanned[file] += nr_taken;
if (global_reclaim(sc)) {
if (current_is_kswapd())
__count_zone_vm_events(PGSTEAL_KSWAPD, zone,
nr_reclaimed);
else
__count_zone_vm_events(PGSTEAL_DIRECT, zone,
nr_reclaimed);
}
putback_inactive_pages(lruvec, &page_list);
__mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken);
spin_unlock_irq(&zone->lru_lock);
free_hot_cold_page_list(&page_list, 1);
/*
* If reclaim is isolating dirty pages under writeback, it implies
* that the long-lived page allocation rate is exceeding the page
* laundering rate. Either the global limits are not being effective
* at throttling processes due to the page distribution throughout
* zones or there is heavy usage of a slow backing device. The
* only option is to throttle from reclaim context which is not ideal
* as there is no guarantee the dirtying process is throttled in the
* same way balance_dirty_pages() manages.
*
* Once a zone is flagged ZONE_WRITEBACK, kswapd will count the number
* of pages under pages flagged for immediate reclaim and stall if any
* are encountered in the nr_immediate check below.
*/
if (nr_writeback && nr_writeback == nr_taken)
zone_set_flag(zone, ZONE_WRITEBACK);
/*
* memcg will stall in page writeback so only consider forcibly
* stalling for global reclaim
*/
if (global_reclaim(sc)) {
/*
* Tag a zone as congested if all the dirty pages scanned were
* backed by a congested BDI and wait_iff_congested will stall.
*/
if (nr_dirty && nr_dirty == nr_congested)
zone_set_flag(zone, ZONE_CONGESTED);
/*
* If dirty pages are scanned that are not queued for IO, it
* implies that flushers are not keeping up. In this case, flag
* the zone ZONE_TAIL_LRU_DIRTY and kswapd will start writing
* pages from reclaim context. It will forcibly stall in the
* next check.
*/
if (nr_unqueued_dirty == nr_taken)
zone_set_flag(zone, ZONE_TAIL_LRU_DIRTY);
/*
* In addition, if kswapd scans pages marked marked for
* immediate reclaim and under writeback (nr_immediate), it
* implies that pages are cycling through the LRU faster than
* they are written so also forcibly stall.
*/
if (nr_unqueued_dirty == nr_taken || nr_immediate)
congestion_wait(BLK_RW_ASYNC, HZ/10);
}
/*
* Stall direct reclaim for IO completions if underlying BDIs or zone
* is congested. Allow kswapd to continue until it starts encountering
* unqueued dirty pages or cycling through the LRU too quickly.
*/
if (!sc->hibernation_mode && !current_is_kswapd())
wait_iff_congested(zone, BLK_RW_ASYNC, HZ/10);
trace_mm_vmscan_lru_shrink_inactive(zone->zone_pgdat->node_id,
zone_idx(zone),
nr_scanned, nr_reclaimed,
sc->priority,
trace_shrink_flags(file));
return nr_reclaimed;
}
/*
* This moves pages from the active list to the inactive list.
*
* We move them the other way if the page is referenced by one or more
* processes, from rmap.
*
* If the pages are mostly unmapped, the processing is fast and it is
* appropriate to hold zone->lru_lock across the whole operation. But if
* the pages are mapped, the processing is slow (page_referenced()) so we
* should drop zone->lru_lock around each page. It's impossible to balance
* this, so instead we remove the pages from the LRU while processing them.
* It is safe to rely on PG_active against the non-LRU pages in here because
* nobody will play with that bit on a non-LRU page.
*
* The downside is that we have to touch page->_count against each page.
* But we had to alter page->flags anyway.
*/
static void move_active_pages_to_lru(struct lruvec *lruvec,
struct list_head *list,
struct list_head *pages_to_free,
enum lru_list lru)
{
struct zone *zone = lruvec_zone(lruvec);
unsigned long pgmoved = 0;
struct page *page;
int nr_pages;
while (!list_empty(list)) {
page = lru_to_page(list);
lruvec = mem_cgroup_page_lruvec(page, zone);
VM_BUG_ON_PAGE(PageLRU(page), page);
SetPageLRU(page);
nr_pages = hpage_nr_pages(page);
mem_cgroup_update_lru_size(lruvec, lru, nr_pages);
list_move(&page->lru, &lruvec->lists[lru]);
pgmoved += nr_pages;
if (put_page_testzero(page)) {
__ClearPageLRU(page);
__ClearPageActive(page);
del_page_from_lru_list(page, lruvec, lru);
if (unlikely(PageCompound(page))) {
spin_unlock_irq(&zone->lru_lock);
(*get_compound_page_dtor(page))(page);
spin_lock_irq(&zone->lru_lock);
} else
list_add(&page->lru, pages_to_free);
}
}
__mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved);
if (!is_active_lru(lru))
__count_vm_events(PGDEACTIVATE, pgmoved);
}
static void shrink_active_list(unsigned long nr_to_scan,
struct lruvec *lruvec,
struct scan_control *sc,
enum lru_list lru)
{
unsigned long nr_taken;
unsigned long nr_scanned;
unsigned long vm_flags;
LIST_HEAD(l_hold); /* The pages which were snipped off */
LIST_HEAD(l_active);
LIST_HEAD(l_inactive);
struct page *page;
struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
unsigned long nr_rotated = 0;
isolate_mode_t isolate_mode = 0;
int file = is_file_lru(lru);
struct zone *zone = lruvec_zone(lruvec);
lru_add_drain();
if (!sc->may_unmap)
isolate_mode |= ISOLATE_UNMAPPED;
if (!sc->may_writepage)
isolate_mode |= ISOLATE_CLEAN;
spin_lock_irq(&zone->lru_lock);
nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &l_hold,
&nr_scanned, sc, isolate_mode, lru);
if (global_reclaim(sc))
zone->pages_scanned += nr_scanned;
reclaim_stat->recent_scanned[file] += nr_taken;
__count_zone_vm_events(PGREFILL, zone, nr_scanned);
__mod_zone_page_state(zone, NR_LRU_BASE + lru, -nr_taken);
__mod_zone_page_state(zone, NR_ISOLATED_ANON + file, nr_taken);
spin_unlock_irq(&zone->lru_lock);
while (!list_empty(&l_hold)) {
cond_resched();
page = lru_to_page(&l_hold);
list_del(&page->lru);
if (unlikely(!page_evictable(page))) {
putback_lru_page(page);
continue;
}
if (unlikely(buffer_heads_over_limit)) {
if (page_has_private(page) && trylock_page(page)) {
if (page_has_private(page))
try_to_release_page(page, 0);
unlock_page(page);
}
}
if (page_referenced(page, 0, sc->target_mem_cgroup,
&vm_flags)) {
nr_rotated += hpage_nr_pages(page);
/*
* Identify referenced, file-backed active pages and
* give them one more trip around the active list. So
* that executable code get better chances to stay in
* memory under moderate memory pressure. Anon pages
* are not likely to be evicted by use-once streaming
* IO, plus JVM can create lots of anon VM_EXEC pages,
* so we ignore them here.
*/
if ((vm_flags & VM_EXEC) && page_is_file_cache(page)) {
list_add(&page->lru, &l_active);
continue;
}
}
ClearPageActive(page); /* we are de-activating */
list_add(&page->lru, &l_inactive);
}
/*
* Move pages back to the lru list.
*/
spin_lock_irq(&zone->lru_lock);
/*
* Count referenced pages from currently used mappings as rotated,
* even though only some of them are actually re-activated. This
* helps balance scan pressure between file and anonymous pages in
* get_scan_ratio.
*/
reclaim_stat->recent_rotated[file] += nr_rotated;
move_active_pages_to_lru(lruvec, &l_active, &l_hold, lru);
move_active_pages_to_lru(lruvec, &l_inactive, &l_hold, lru - LRU_ACTIVE);
__mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken);
spin_unlock_irq(&zone->lru_lock);
free_hot_cold_page_list(&l_hold, 1);
}
#ifdef CONFIG_SWAP
static int inactive_anon_is_low_global(struct zone *zone)
{
unsigned long active, inactive;
active = zone_page_state(zone, NR_ACTIVE_ANON);
inactive = zone_page_state(zone, NR_INACTIVE_ANON);
if (inactive * zone->inactive_ratio < active)
return 1;
return 0;
}
/**
* inactive_anon_is_low - check if anonymous pages need to be deactivated
* @lruvec: LRU vector to check
*
* Returns true if the zone does not have enough inactive anon pages,
* meaning some active anon pages need to be deactivated.
*/
static int inactive_anon_is_low(struct lruvec *lruvec)
{
/*
* If we don't have swap space, anonymous page deactivation
* is pointless.
*/
if (!total_swap_pages)
return 0;
if (!mem_cgroup_disabled())
return mem_cgroup_inactive_anon_is_low(lruvec);
return inactive_anon_is_low_global(lruvec_zone(lruvec));
}
#else
static inline int inactive_anon_is_low(struct lruvec *lruvec)
{
return 0;
}
#endif
/**
* inactive_file_is_low - check if file pages need to be deactivated
* @lruvec: LRU vector to check
*
* When the system is doing streaming IO, memory pressure here
* ensures that active file pages get deactivated, until more
* than half of the file pages are on the inactive list.
*
* Once we get to that situation, protect the system's working
* set from being evicted by disabling active file page aging.
*
* This uses a different ratio than the anonymous pages, because
* the page cache uses a use-once replacement algorithm.
*/
static int inactive_file_is_low(struct lruvec *lruvec)
{
unsigned long inactive;
unsigned long active;
inactive = get_lru_size(lruvec, LRU_INACTIVE_FILE);
active = get_lru_size(lruvec, LRU_ACTIVE_FILE);
return active > inactive;
}
static int inactive_list_is_low(struct lruvec *lruvec, enum lru_list lru)
{
if (is_file_lru(lru))
return inactive_file_is_low(lruvec);
else
return inactive_anon_is_low(lruvec);
}
static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
struct lruvec *lruvec, struct scan_control *sc)
{
if (is_active_lru(lru)) {
if (inactive_list_is_low(lruvec, lru))
shrink_active_list(nr_to_scan, lruvec, sc, lru);
return 0;
}
return shrink_inactive_list(nr_to_scan, lruvec, sc, lru);
}
static int vmscan_swappiness(struct scan_control *sc)
{
if (global_reclaim(sc))
return vm_swappiness;
return mem_cgroup_swappiness(sc->target_mem_cgroup);
}
enum scan_balance {
SCAN_EQUAL,
SCAN_FRACT,
SCAN_ANON,
SCAN_FILE,
};
/*
* Determine how aggressively the anon and file LRU lists should be
* scanned. The relative value of each set of LRU lists is determined
* by looking at the fraction of the pages scanned we did rotate back
* onto the active list instead of evict.
*
* nr[0] = anon inactive pages to scan; nr[1] = anon active pages to scan
* nr[2] = file inactive pages to scan; nr[3] = file active pages to scan
*/
static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
unsigned long *nr)
{
struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
u64 fraction[2];
u64 denominator = 0; /* gcc */
struct zone *zone = lruvec_zone(lruvec);
unsigned long anon_prio, file_prio;
enum scan_balance scan_balance;
unsigned long anon, file, free;
bool force_scan = false;
unsigned long ap, fp;
enum lru_list lru;
/*
* If the zone or memcg is small, nr[l] can be 0. This
* results in no scanning on this priority and a potential
* priority drop. Global direct reclaim can go to the next
* zone and tends to have no problems. Global kswapd is for
* zone balancing and it needs to scan a minimum amount. When
* reclaiming for a memcg, a priority drop can cause high
* latencies, so it's better to scan a minimum amount there as
* well.
*/
if (current_is_kswapd() && !zone_reclaimable(zone))
force_scan = true;
if (!global_reclaim(sc))
force_scan = true;
/* If we have no swap space, do not bother scanning anon pages. */
if (!sc->may_swap || (get_nr_swap_pages() <= 0)) {
scan_balance = SCAN_FILE;
goto out;
}
/*
* Global reclaim will swap to prevent OOM even with no
* swappiness, but memcg users want to use this knob to
* disable swapping for individual groups completely when
* using the memory controller's swap limit feature would be
* too expensive.
*/
if (!global_reclaim(sc) && !vmscan_swappiness(sc)) {
scan_balance = SCAN_FILE;
goto out;
}
/*
* Do not apply any pressure balancing cleverness when the
* system is close to OOM, scan both anon and file equally
* (unless the swappiness setting disagrees with swapping).
*/
if (!sc->priority && vmscan_swappiness(sc)) {
scan_balance = SCAN_EQUAL;
goto out;
}
anon = get_lru_size(lruvec, LRU_ACTIVE_ANON) +
get_lru_size(lruvec, LRU_INACTIVE_ANON);
file = get_lru_size(lruvec, LRU_ACTIVE_FILE) +
get_lru_size(lruvec, LRU_INACTIVE_FILE);
/*
* If it's foreseeable that reclaiming the file cache won't be
* enough to get the zone back into a desirable shape, we have
* to swap. Better start now and leave the - probably heavily
* thrashing - remaining file pages alone.
*/
if (global_reclaim(sc)) {
free = zone_page_state(zone, NR_FREE_PAGES);
if (unlikely(file + free <= high_wmark_pages(zone))) {
scan_balance = SCAN_ANON;
goto out;
}
}
/*
* There is enough inactive page cache, do not reclaim
* anything from the anonymous working set right now.
*/
if (!inactive_file_is_low(lruvec)) {
scan_balance = SCAN_FILE;
goto out;
}
scan_balance = SCAN_FRACT;
/*
* With swappiness at 100, anonymous and file have the same priority.
* This scanning priority is essentially the inverse of IO cost.
*/
anon_prio = vmscan_swappiness(sc);
file_prio = 200 - anon_prio;
/*
* OK, so we have swap space and a fair amount of page cache
* pages. We use the recently rotated / recently scanned
* ratios to determine how valuable each cache is.
*
* Because workloads change over time (and to avoid overflow)
* we keep these statistics as a floating average, which ends
* up weighing recent references more than old ones.
*
* anon in [0], file in [1]
*/
spin_lock_irq(&zone->lru_lock);
if (unlikely(reclaim_stat->recent_scanned[0] > anon / 4)) {
reclaim_stat->recent_scanned[0] /= 2;
reclaim_stat->recent_rotated[0] /= 2;
}
if (unlikely(reclaim_stat->recent_scanned[1] > file / 4)) {
reclaim_stat->recent_scanned[1] /= 2;
reclaim_stat->recent_rotated[1] /= 2;
}
/*
* The amount of pressure on anon vs file pages is inversely
* proportional to the fraction of recently scanned pages on
* each list that were recently referenced and in active use.
*/
ap = anon_prio * (reclaim_stat->recent_scanned[0] + 1);
ap /= reclaim_stat->recent_rotated[0] + 1;
fp = file_prio * (reclaim_stat->recent_scanned[1] + 1);
fp /= reclaim_stat->recent_rotated[1] + 1;
spin_unlock_irq(&zone->lru_lock);
fraction[0] = ap;
fraction[1] = fp;
denominator = ap + fp + 1;
out:
for_each_evictable_lru(lru) {
int file = is_file_lru(lru);
unsigned long size;
unsigned long scan;
size = get_lru_size(lruvec, lru);
scan = size >> sc->priority;
if (!scan && force_scan)
scan = min(size, SWAP_CLUSTER_MAX);
switch (scan_balance) {
case SCAN_EQUAL:
/* Scan lists relative to size */
break;
case SCAN_FRACT:
/*
* Scan types proportional to swappiness and
* their relative recent reclaim efficiency.
*/
scan = div64_u64(scan * fraction[file], denominator);
break;
case SCAN_FILE:
case SCAN_ANON:
/* Scan one type exclusively */
if ((scan_balance == SCAN_FILE) != file)
scan = 0;
break;
default:
/* Look ma, no brain */
BUG();
}
nr[lru] = scan;
}
}
/*
* This is a basic per-zone page freer. Used by both kswapd and direct reclaim.
*/
static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
{
unsigned long nr[NR_LRU_LISTS];
unsigned long targets[NR_LRU_LISTS];
unsigned long nr_to_scan;
enum lru_list lru;
unsigned long nr_reclaimed = 0;
unsigned long nr_to_reclaim = sc->nr_to_reclaim;
struct blk_plug plug;
bool scan_adjusted = false;
get_scan_count(lruvec, sc, nr);
/* Record the original scan target for proportional adjustments later */
memcpy(targets, nr, sizeof(nr));
blk_start_plug(&plug);
while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
nr[LRU_INACTIVE_FILE]) {
unsigned long nr_anon, nr_file, percentage;
unsigned long nr_scanned;
for_each_evictable_lru(lru) {
if (nr[lru]) {
nr_to_scan = min(nr[lru], SWAP_CLUSTER_MAX);
nr[lru] -= nr_to_scan;
nr_reclaimed += shrink_list(lru, nr_to_scan,
lruvec, sc);
}
}
if (nr_reclaimed < nr_to_reclaim || scan_adjusted)
continue;
/*
* For global direct reclaim, reclaim only the number of pages
* requested. Less care is taken to scan proportionally as it
* is more important to minimise direct reclaim stall latency
* than it is to properly age the LRU lists.
*/
if (global_reclaim(sc) && !current_is_kswapd())
break;
/*
* For kswapd and memcg, reclaim at least the number of pages
* requested. Ensure that the anon and file LRUs shrink
* proportionally what was requested by get_scan_count(). We
* stop reclaiming one LRU and reduce the amount scanning
* proportional to the original scan target.
*/
nr_file = nr[LRU_INACTIVE_FILE] + nr[LRU_ACTIVE_FILE];
nr_anon = nr[LRU_INACTIVE_ANON] + nr[LRU_ACTIVE_ANON];
if (nr_file > nr_anon) {
unsigned long scan_target = targets[LRU_INACTIVE_ANON] +
targets[LRU_ACTIVE_ANON] + 1;
lru = LRU_BASE;
percentage = nr_anon * 100 / scan_target;
} else {
unsigned long scan_target = targets[LRU_INACTIVE_FILE] +
targets[LRU_ACTIVE_FILE] + 1;
lru = LRU_FILE;
percentage = nr_file * 100 / scan_target;
}
/* Stop scanning the smaller of the LRU */
nr[lru] = 0;
nr[lru + LRU_ACTIVE] = 0;
/*
* Recalculate the other LRU scan count based on its original
* scan target and the percentage scanning already complete
*/
lru = (lru == LRU_FILE) ? LRU_BASE : LRU_FILE;
nr_scanned = targets[lru] - nr[lru];
nr[lru] = targets[lru] * (100 - percentage) / 100;
nr[lru] -= min(nr[lru], nr_scanned);
lru += LRU_ACTIVE;
nr_scanned = targets[lru] - nr[lru];
nr[lru] = targets[lru] * (100 - percentage) / 100;
nr[lru] -= min(nr[lru], nr_scanned);
scan_adjusted = true;
}
blk_finish_plug(&plug);
sc->nr_reclaimed += nr_reclaimed;
/*
* Even if we did not try to evict anon pages at all, we want to
* rebalance the anon lru active/inactive ratio.
*/
if (inactive_anon_is_low(lruvec))
shrink_active_list(SWAP_CLUSTER_MAX, lruvec,
sc, LRU_ACTIVE_ANON);
throttle_vm_writeout(sc->gfp_mask);
}
/* Use reclaim/compaction for costly allocs or under memory pressure */
static bool in_reclaim_compaction(struct scan_control *sc)
{
if (IS_ENABLED(CONFIG_COMPACTION) && sc->order &&
(sc->order > PAGE_ALLOC_COSTLY_ORDER ||
sc->priority < DEF_PRIORITY - 2))
return true;
return false;
}
/*
* Reclaim/compaction is used for high-order allocation requests. It reclaims
* order-0 pages before compacting the zone. should_continue_reclaim() returns
* true if more pages should be reclaimed such that when the page allocator
* calls try_to_compact_zone() that it will have enough free pages to succeed.
* It will give up earlier than that if there is difficulty reclaiming pages.
*/
static inline bool should_continue_reclaim(struct zone *zone,
unsigned long nr_reclaimed,
unsigned long nr_scanned,
struct scan_control *sc)
{
unsigned long pages_for_compaction;
unsigned long inactive_lru_pages;
/* If not in reclaim/compaction mode, stop */
if (!in_reclaim_compaction(sc))
return false;
/* Consider stopping depending on scan and reclaim activity */
if (sc->gfp_mask & __GFP_REPEAT) {
/*
* For __GFP_REPEAT allocations, stop reclaiming if the
* full LRU list has been scanned and we are still failing
* to reclaim pages. This full LRU scan is potentially
* expensive but a __GFP_REPEAT caller really wants to succeed
*/
if (!nr_reclaimed && !nr_scanned)
return false;
} else {
/*
* For non-__GFP_REPEAT allocations which can presumably
* fail without consequence, stop if we failed to reclaim
* any pages from the last SWAP_CLUSTER_MAX number of
* pages that were scanned. This will return to the
* caller faster at the risk reclaim/compaction and
* the resulting allocation attempt fails
*/
if (!nr_reclaimed)
return false;
}
/*
* If we have not reclaimed enough pages for compaction and the
* inactive lists are large enough, continue reclaiming
*/
pages_for_compaction = (2UL << sc->order);
inactive_lru_pages = zone_page_state(zone, NR_INACTIVE_FILE);
if (get_nr_swap_pages() > 0)
inactive_lru_pages += zone_page_state(zone, NR_INACTIVE_ANON);
if (sc->nr_reclaimed < pages_for_compaction &&
inactive_lru_pages > pages_for_compaction)
return true;
/* If compaction would go ahead or the allocation would succeed, stop */
switch (compaction_suitable(zone, sc->order)) {
case COMPACT_PARTIAL:
case COMPACT_CONTINUE:
return false;
default:
return true;
}
}
static void shrink_zone(struct zone *zone, struct scan_control *sc)
{
unsigned long nr_reclaimed, nr_scanned;
do {
struct mem_cgroup *root = sc->target_mem_cgroup;
struct mem_cgroup_reclaim_cookie reclaim = {
.zone = zone,
.priority = sc->priority,
};
struct mem_cgroup *memcg;
nr_reclaimed = sc->nr_reclaimed;
nr_scanned = sc->nr_scanned;
memcg = mem_cgroup_iter(root, NULL, &reclaim);
do {
struct lruvec *lruvec;
lruvec = mem_cgroup_zone_lruvec(zone, memcg);
shrink_lruvec(lruvec, sc);
/*
* Direct reclaim and kswapd have to scan all memory
* cgroups to fulfill the overall scan target for the
* zone.
*
* Limit reclaim, on the other hand, only cares about
* nr_to_reclaim pages to be reclaimed and it will
* retry with decreasing priority if one round over the
* whole hierarchy is not sufficient.
*/
if (!global_reclaim(sc) &&
sc->nr_reclaimed >= sc->nr_to_reclaim) {
mem_cgroup_iter_break(root, memcg);
break;
}
memcg = mem_cgroup_iter(root, memcg, &reclaim);
} while (memcg);
vmpressure(sc->gfp_mask, sc->target_mem_cgroup,
sc->nr_scanned - nr_scanned,
sc->nr_reclaimed - nr_reclaimed);
} while (should_continue_reclaim(zone, sc->nr_reclaimed - nr_reclaimed,
sc->nr_scanned - nr_scanned, sc));
}
/* Returns true if compaction should go ahead for a high-order request */
static inline bool compaction_ready(struct zone *zone, struct scan_control *sc)
{
unsigned long balance_gap, watermark;
bool watermark_ok;
/* Do not consider compaction for orders reclaim is meant to satisfy */
if (sc->order <= PAGE_ALLOC_COSTLY_ORDER)
return false;
/*
* Compaction takes time to run and there are potentially other
* callers using the pages just freed. Continue reclaiming until
* there is a buffer of free pages available to give compaction
* a reasonable chance of completing and allocating the page
*/
balance_gap = min(low_wmark_pages(zone),
(zone->managed_pages + KSWAPD_ZONE_BALANCE_GAP_RATIO-1) /
KSWAPD_ZONE_BALANCE_GAP_RATIO);
watermark = high_wmark_pages(zone) + balance_gap + (2UL << sc->order);
watermark_ok = zone_watermark_ok_safe(zone, 0, watermark, 0, 0);
/*
* If compaction is deferred, reclaim up to a point where
* compaction will have a chance of success when re-enabled
*/
if (compaction_deferred(zone, sc->order))
return watermark_ok;
/* If compaction is not ready to start, keep reclaiming */
if (!compaction_suitable(zone, sc->order))
return false;
return watermark_ok;
}
/*
* This is the direct reclaim path, for page-allocating processes. We only
* try to reclaim pages from zones which will satisfy the caller's allocation
* request.
*
* We reclaim from a zone even if that zone is over high_wmark_pages(zone).
* Because:
* a) The caller may be trying to free *extra* pages to satisfy a higher-order
* allocation or
* b) The target zone may be at high_wmark_pages(zone) but the lower zones
* must go *over* high_wmark_pages(zone) to satisfy the `incremental min'
* zone defense algorithm.
*
* If a zone is deemed to be full of pinned pages then just give it a light
* scan then give up on it.
*
* This function returns true if a zone is being reclaimed for a costly
* high-order allocation and compaction is ready to begin. This indicates to
* the caller that it should consider retrying the allocation instead of
* further reclaim.
*/
static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
{
struct zoneref *z;
struct zone *zone;
unsigned long nr_soft_reclaimed;
unsigned long nr_soft_scanned;
bool aborted_reclaim = false;
/*
* If the number of buffer_heads in the machine exceeds the maximum
* allowed level, force direct reclaim to scan the highmem zone as
* highmem pages could be pinning lowmem pages storing buffer_heads
*/
if (buffer_heads_over_limit)
sc->gfp_mask |= __GFP_HIGHMEM;
for_each_zone_zonelist_nodemask(zone, z, zonelist,
gfp_zone(sc->gfp_mask), sc->nodemask) {
if (!populated_zone(zone))
continue;
/*
* Take care memory controller reclaiming has small influence
* to global LRU.
*/
if (global_reclaim(sc)) {
if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
continue;
if (sc->priority != DEF_PRIORITY &&
!zone_reclaimable(zone))
continue; /* Let kswapd poll it */
if (IS_ENABLED(CONFIG_COMPACTION)) {
/*
* If we already have plenty of memory free for
* compaction in this zone, don't free any more.
* Even though compaction is invoked for any
* non-zero order, only frequent costly order
* reclamation is disruptive enough to become a
* noticeable problem, like transparent huge
* page allocations.
*/
if (compaction_ready(zone, sc)) {
aborted_reclaim = true;
continue;
}
}
/*
* This steals pages from memory cgroups over softlimit
* and returns the number of reclaimed pages and
* scanned pages. This works for global memory pressure
* and balancing, not for a memcg's limit.
*/
nr_soft_scanned = 0;
nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone,
sc->order, sc->gfp_mask,
&nr_soft_scanned);
sc->nr_reclaimed += nr_soft_reclaimed;
sc->nr_scanned += nr_soft_scanned;
/* need some check for avoid more shrink_zone() */
}
shrink_zone(zone, sc);
}
return aborted_reclaim;
}
/* All zones in zonelist are unreclaimable? */
static bool all_unreclaimable(struct zonelist *zonelist,
struct scan_control *sc)
{
struct zoneref *z;
struct zone *zone;
for_each_zone_zonelist_nodemask(zone, z, zonelist,
gfp_zone(sc->gfp_mask), sc->nodemask) {
if (!populated_zone(zone))
continue;
if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
continue;
if (zone_reclaimable(zone))
return false;
}
return true;
}
/*
* This is the main entry point to direct page reclaim.
*
* If a full scan of the inactive list fails to free enough memory then we
* are "out of memory" and something needs to be killed.
*
* If the caller is !__GFP_FS then the probability of a failure is reasonably
* high - the zone may be full of dirty or under-writeback pages, which this
* caller can't do much about. We kick the writeback threads and take explicit
* naps in the hope that some of these pages can be written. But if the
* allocating task holds filesystem locks which prevent writeout this might not
* work, and the allocation attempt will fail.
*
* returns: 0, if no pages reclaimed
* else, the number of pages reclaimed
*/
static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
struct scan_control *sc,
struct shrink_control *shrink)
{
unsigned long total_scanned = 0;
struct reclaim_state *reclaim_state = current->reclaim_state;
struct zoneref *z;
struct zone *zone;
unsigned long writeback_threshold;
bool aborted_reclaim;
delayacct_freepages_start();
if (global_reclaim(sc))
count_vm_event(ALLOCSTALL);
do {
vmpressure_prio(sc->gfp_mask, sc->target_mem_cgroup,
sc->priority);
sc->nr_scanned = 0;
aborted_reclaim = shrink_zones(zonelist, sc);
/*
* Don't shrink slabs when reclaiming memory from over limit
* cgroups but do shrink slab at least once when aborting
* reclaim for compaction to avoid unevenly scanning file/anon
* LRU pages over slab pages.
*/
if (global_reclaim(sc)) {
unsigned long lru_pages = 0;
nodes_clear(shrink->nodes_to_scan);
for_each_zone_zonelist(zone, z, zonelist,
gfp_zone(sc->gfp_mask)) {
if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
continue;
lru_pages += zone_reclaimable_pages(zone);
node_set(zone_to_nid(zone),
shrink->nodes_to_scan);
}
shrink_slab(shrink, sc->nr_scanned, lru_pages);
if (reclaim_state) {
sc->nr_reclaimed += reclaim_state->reclaimed_slab;
reclaim_state->reclaimed_slab = 0;
}
}
total_scanned += sc->nr_scanned;
if (sc->nr_reclaimed >= sc->nr_to_reclaim)
goto out;
/*
* If we're getting trouble reclaiming, start doing
* writepage even in laptop mode.
*/
if (sc->priority < DEF_PRIORITY - 2)
sc->may_writepage = 1;
/*
* Try to write back as many pages as we just scanned. This
* tends to cause slow streaming writers to write data to the
* disk smoothly, at the dirtying rate, which is nice. But
* that's undesirable in laptop mode, where we *want* lumpy
* writeout. So in laptop mode, write out the whole world.
*/
writeback_threshold = sc->nr_to_reclaim + sc->nr_to_reclaim / 2;
if (total_scanned > writeback_threshold) {
wakeup_flusher_threads(laptop_mode ? 0 : total_scanned,
WB_REASON_TRY_TO_FREE_PAGES);
sc->may_writepage = 1;
}
} while (--sc->priority >= 0 && !aborted_reclaim);
out:
delayacct_freepages_end();
if (sc->nr_reclaimed)
return sc->nr_reclaimed;
/*
* As hibernation is going on, kswapd is freezed so that it can't mark
* the zone into all_unreclaimable. Thus bypassing all_unreclaimable
* check.
*/
if (oom_killer_disabled)
return 0;
/* Aborted reclaim to try compaction? don't OOM, then */
if (aborted_reclaim)
return 1;
/* top priority shrink_zones still had more to do? don't OOM, then */
if (global_reclaim(sc) && !all_unreclaimable(zonelist, sc))
return 1;
return 0;
}
static bool pfmemalloc_watermark_ok(pg_data_t *pgdat)
{
struct zone *zone;
unsigned long pfmemalloc_reserve = 0;
unsigned long free_pages = 0;
int i;
bool wmark_ok;
for (i = 0; i <= ZONE_NORMAL; i++) {
zone = &pgdat->node_zones[i];
pfmemalloc_reserve += min_wmark_pages(zone);
free_pages += zone_page_state(zone, NR_FREE_PAGES);
}
wmark_ok = free_pages > pfmemalloc_reserve / 2;
/* kswapd must be awake if processes are being throttled */
if (!wmark_ok && waitqueue_active(&pgdat->kswapd_wait)) {
pgdat->classzone_idx = min(pgdat->classzone_idx,
(enum zone_type)ZONE_NORMAL);
wake_up_interruptible(&pgdat->kswapd_wait);
}
return wmark_ok;
}
/*
* Throttle direct reclaimers if backing storage is backed by the network
* and the PFMEMALLOC reserve for the preferred node is getting dangerously
* depleted. kswapd will continue to make progress and wake the processes
* when the low watermark is reached.
*
* Returns true if a fatal signal was delivered during throttling. If this
* happens, the page allocator should not consider triggering the OOM killer.
*/
static bool throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist,
nodemask_t *nodemask)
{
struct zone *zone;
int high_zoneidx = gfp_zone(gfp_mask);
pg_data_t *pgdat;
/*
* Kernel threads should not be throttled as they may be indirectly
* responsible for cleaning pages necessary for reclaim to make forward
* progress. kjournald for example may enter direct reclaim while
* committing a transaction where throttling it could forcing other
* processes to block on log_wait_commit().
*/
if (current->flags & PF_KTHREAD)
goto out;
/*
* If a fatal signal is pending, this process should not throttle.
* It should return quickly so it can exit and free its memory
*/
if (fatal_signal_pending(current))
goto out;
/* Check if the pfmemalloc reserves are ok */
first_zones_zonelist(zonelist, high_zoneidx, NULL, &zone);
pgdat = zone->zone_pgdat;
if (pfmemalloc_watermark_ok(pgdat))
goto out;
/* Account for the throttling */
count_vm_event(PGSCAN_DIRECT_THROTTLE);
/*
* If the caller cannot enter the filesystem, it's possible that it
* is due to the caller holding an FS lock or performing a journal
* transaction in the case of a filesystem like ext[3|4]. In this case,
* it is not safe to block on pfmemalloc_wait as kswapd could be
* blocked waiting on the same lock. Instead, throttle for up to a
* second before continuing.
*/
if (!(gfp_mask & __GFP_FS)) {
wait_event_interruptible_timeout(pgdat->pfmemalloc_wait,
pfmemalloc_watermark_ok(pgdat), HZ);
goto check_pending;
}
/* Throttle until kswapd wakes the process */
wait_event_killable(zone->zone_pgdat->pfmemalloc_wait,
pfmemalloc_watermark_ok(pgdat));
check_pending:
if (fatal_signal_pending(current))
return true;
out:
return false;
}
unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
gfp_t gfp_mask, nodemask_t *nodemask)
{
unsigned long nr_reclaimed;
struct scan_control sc = {
.gfp_mask = (gfp_mask = memalloc_noio_flags(gfp_mask)),
.may_writepage = !laptop_mode,
.nr_to_reclaim = SWAP_CLUSTER_MAX,
.may_unmap = 1,
.may_swap = 1,
.order = order,
.priority = DEF_PRIORITY,
.target_mem_cgroup = NULL,
.nodemask = nodemask,
};
struct shrink_control shrink = {
.gfp_mask = sc.gfp_mask,
};
/*
* Do not enter reclaim if fatal signal was delivered while throttled.
* 1 is returned so that the page allocator does not OOM kill at this
* point.
*/
if (throttle_direct_reclaim(gfp_mask, zonelist, nodemask))
return 1;
trace_mm_vmscan_direct_reclaim_begin(order,
sc.may_writepage,
gfp_mask);
nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink);
trace_mm_vmscan_direct_reclaim_end(nr_reclaimed);
return nr_reclaimed;
}
#ifdef CONFIG_MEMCG
unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *memcg,
gfp_t gfp_mask, bool noswap,
struct zone *zone,
unsigned long *nr_scanned)
{
struct scan_control sc = {
.nr_scanned = 0,
.nr_to_reclaim = SWAP_CLUSTER_MAX,
.may_writepage = !laptop_mode,
.may_unmap = 1,
.may_swap = !noswap,
.order = 0,
.priority = 0,
.target_mem_cgroup = memcg,
};
struct lruvec *lruvec = mem_cgroup_zone_lruvec(zone, memcg);
sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
(GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
trace_mm_vmscan_memcg_softlimit_reclaim_begin(sc.order,
sc.may_writepage,
sc.gfp_mask);
/*
* NOTE: Although we can get the priority field, using it
* here is not a good idea, since it limits the pages we can scan.
* if we don't reclaim here, the shrink_zone from balance_pgdat
* will pick up pages from other mem cgroup's as well. We hack
* the priority and make it zero.
*/
shrink_lruvec(lruvec, &sc);
trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed);
*nr_scanned = sc.nr_scanned;
return sc.nr_reclaimed;
}
unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
gfp_t gfp_mask,
bool noswap)
{
struct zonelist *zonelist;
unsigned long nr_reclaimed;
int nid;
struct scan_control sc = {
.may_writepage = !laptop_mode,
.may_unmap = 1,
.may_swap = !noswap,
.nr_to_reclaim = SWAP_CLUSTER_MAX,
.order = 0,
.priority = DEF_PRIORITY,
.target_mem_cgroup = memcg,
.nodemask = NULL, /* we don't care the placement */
.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
(GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK),
};
struct shrink_control shrink = {
.gfp_mask = sc.gfp_mask,
};
/*
* Unlike direct reclaim via alloc_pages(), memcg's reclaim doesn't
* take care of from where we get pages. So the node where we start the
* scan does not need to be the current node.
*/
nid = mem_cgroup_select_victim_node(memcg);
zonelist = NODE_DATA(nid)->node_zonelists;
trace_mm_vmscan_memcg_reclaim_begin(0,
sc.may_writepage,
sc.gfp_mask);
nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink);
trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed);
return nr_reclaimed;
}
#endif
static void age_active_anon(struct zone *zone, struct scan_control *sc)
{
struct mem_cgroup *memcg;
if (!total_swap_pages)
return;
memcg = mem_cgroup_iter(NULL, NULL, NULL);
do {
struct lruvec *lruvec = mem_cgroup_zone_lruvec(zone, memcg);
if (inactive_anon_is_low(lruvec))
shrink_active_list(SWAP_CLUSTER_MAX, lruvec,
sc, LRU_ACTIVE_ANON);
memcg = mem_cgroup_iter(NULL, memcg, NULL);
} while (memcg);
}
static bool zone_balanced(struct zone *zone, int order,
unsigned long balance_gap, int classzone_idx)
{
if (!zone_watermark_ok_safe(zone, order, high_wmark_pages(zone) +
balance_gap, classzone_idx, 0))
return false;
if (IS_ENABLED(CONFIG_COMPACTION) && order &&
!compaction_suitable(zone, order))
return false;
return true;
}
/*
* pgdat_balanced() is used when checking if a node is balanced.
*
* For order-0, all zones must be balanced!
*
* For high-order allocations only zones that meet watermarks and are in a
* zone allowed by the callers classzone_idx are added to balanced_pages. The
* total of balanced pages must be at least 25% of the zones allowed by
* classzone_idx for the node to be considered balanced. Forcing all zones to
* be balanced for high orders can cause excessive reclaim when there are
* imbalanced zones.
* The choice of 25% is due to
* o a 16M DMA zone that is balanced will not balance a zone on any
* reasonable sized machine
* o On all other machines, the top zone must be at least a reasonable
* percentage of the middle zones. For example, on 32-bit x86, highmem
* would need to be at least 256M for it to be balance a whole node.
* Similarly, on x86-64 the Normal zone would need to be at least 1G
* to balance a node on its own. These seemed like reasonable ratios.
*/
static bool pgdat_balanced(pg_data_t *pgdat, int order, int classzone_idx)
{
unsigned long managed_pages = 0;
unsigned long balanced_pages = 0;
int i;
/* Check the watermark levels */
for (i = 0; i <= classzone_idx; i++) {
struct zone *zone = pgdat->node_zones + i;
if (!populated_zone(zone))
continue;
managed_pages += zone->managed_pages;
/*
* A special case here:
*
* balance_pgdat() skips over all_unreclaimable after
* DEF_PRIORITY. Effectively, it considers them balanced so
* they must be considered balanced here as well!
*/
if (!zone_reclaimable(zone)) {
balanced_pages += zone->managed_pages;
continue;
}
if (zone_balanced(zone, order, 0, i))
balanced_pages += zone->managed_pages;
else if (!order)
return false;
}
if (order)
return balanced_pages >= (managed_pages >> 2);
else
return true;
}
/*
* Prepare kswapd for sleeping. This verifies that there are no processes
* waiting in throttle_direct_reclaim() and that watermarks have been met.
*
* Returns true if kswapd is ready to sleep
*/
static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, long remaining,
int classzone_idx)
{
/* If a direct reclaimer woke kswapd within HZ/10, it's premature */
if (remaining)
return false;
/*
* There is a potential race between when kswapd checks its watermarks
* and a process gets throttled. There is also a potential race if
* processes get throttled, kswapd wakes, a large process exits therby
* balancing the zones that causes kswapd to miss a wakeup. If kswapd
* is going to sleep, no process should be sleeping on pfmemalloc_wait
* so wake them now if necessary. If necessary, processes will wake
* kswapd and get throttled again
*/
if (waitqueue_active(&pgdat->pfmemalloc_wait)) {
wake_up(&pgdat->pfmemalloc_wait);
return false;
}
return pgdat_balanced(pgdat, order, classzone_idx);
}
/*
* kswapd shrinks the zone by the number of pages required to reach
* the high watermark.
*
* Returns true if kswapd scanned at least the requested number of pages to
* reclaim or if the lack of progress was due to pages under writeback.
* This is used to determine if the scanning priority needs to be raised.
*/
static bool kswapd_shrink_zone(struct zone *zone,
int classzone_idx,
struct scan_control *sc,
unsigned long lru_pages,
unsigned long *nr_attempted)
{
int testorder = sc->order;
unsigned long balance_gap;
struct reclaim_state *reclaim_state = current->reclaim_state;
struct shrink_control shrink = {
.gfp_mask = sc->gfp_mask,
};
bool lowmem_pressure;
/* Reclaim above the high watermark. */
sc->nr_to_reclaim = max(SWAP_CLUSTER_MAX, high_wmark_pages(zone));
/*
* Kswapd reclaims only single pages with compaction enabled. Trying
* too hard to reclaim until contiguous free pages have become
* available can hurt performance by evicting too much useful data
* from memory. Do not reclaim more than needed for compaction.
*/
if (IS_ENABLED(CONFIG_COMPACTION) && sc->order &&
compaction_suitable(zone, sc->order) !=
COMPACT_SKIPPED)
testorder = 0;
/*
* We put equal pressure on every zone, unless one zone has way too
* many pages free already. The "too many pages" is defined as the
* high wmark plus a "gap" where the gap is either the low
* watermark or 1% of the zone, whichever is smaller.
*/
balance_gap = min(low_wmark_pages(zone),
(zone->managed_pages + KSWAPD_ZONE_BALANCE_GAP_RATIO-1) /
KSWAPD_ZONE_BALANCE_GAP_RATIO);
/*
* If there is no low memory pressure or the zone is balanced then no
* reclaim is necessary
*/
lowmem_pressure = (buffer_heads_over_limit && is_highmem(zone));
if (!lowmem_pressure && zone_balanced(zone, testorder,
balance_gap, classzone_idx))
return true;
shrink_zone(zone, sc);
nodes_clear(shrink.nodes_to_scan);
node_set(zone_to_nid(zone), shrink.nodes_to_scan);
reclaim_state->reclaimed_slab = 0;
shrink_slab(&shrink, sc->nr_scanned, lru_pages);
sc->nr_reclaimed += reclaim_state->reclaimed_slab;
/* Account for the number of pages attempted to reclaim */
*nr_attempted += sc->nr_to_reclaim;
zone_clear_flag(zone, ZONE_WRITEBACK);
/*
* If a zone reaches its high watermark, consider it to be no longer
* congested. It's possible there are dirty pages backed by congested
* BDIs but as pressure is relieved, speculatively avoid congestion
* waits.
*/
if (zone_reclaimable(zone) &&
zone_balanced(zone, testorder, 0, classzone_idx)) {
zone_clear_flag(zone, ZONE_CONGESTED);
zone_clear_flag(zone, ZONE_TAIL_LRU_DIRTY);
}
return sc->nr_scanned >= sc->nr_to_reclaim;
}
/*
* For kswapd, balance_pgdat() will work across all this node's zones until
* they are all at high_wmark_pages(zone).
*
* Returns the final order kswapd was reclaiming at
*
* There is special handling here for zones which are full of pinned pages.
* This can happen if the pages are all mlocked, or if they are all used by
* device drivers (say, ZONE_DMA). Or if they are all in use by hugetlb.
* What we do is to detect the case where all pages in the zone have been
* scanned twice and there has been zero successful reclaim. Mark the zone as
* dead and from now on, only perform a short scan. Basically we're polling
* the zone for when the problem goes away.
*
* kswapd scans the zones in the highmem->normal->dma direction. It skips
* zones which have free_pages > high_wmark_pages(zone), but once a zone is
* found to have free_pages <= high_wmark_pages(zone), we scan that zone and the
* lower zones regardless of the number of free pages in the lower zones. This
* interoperates with the page allocator fallback scheme to ensure that aging
* of pages is balanced across the zones.
*/
static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
int *classzone_idx)
{
int i;
int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */
unsigned long nr_soft_reclaimed;
unsigned long nr_soft_scanned;
struct scan_control sc = {
.gfp_mask = GFP_KERNEL,
.priority = DEF_PRIORITY,
.may_unmap = 1,
.may_swap = 1,
.may_writepage = !laptop_mode,
.order = order,
.target_mem_cgroup = NULL,
};
count_vm_event(PAGEOUTRUN);
do {
unsigned long lru_pages = 0;
unsigned long nr_attempted = 0;
bool raise_priority = true;
bool pgdat_needs_compaction = (order > 0);
sc.nr_reclaimed = 0;
/*
* Scan in the highmem->dma direction for the highest
* zone which needs scanning
*/
for (i = pgdat->nr_zones - 1; i >= 0; i--) {
struct zone *zone = pgdat->node_zones + i;
if (!populated_zone(zone))
continue;
if (sc.priority != DEF_PRIORITY &&
!zone_reclaimable(zone))
continue;
/*
* Do some background aging of the anon list, to give
* pages a chance to be referenced before reclaiming.
*/
age_active_anon(zone, &sc);
/*
* If the number of buffer_heads in the machine
* exceeds the maximum allowed level and this node
* has a highmem zone, force kswapd to reclaim from
* it to relieve lowmem pressure.
*/
if (buffer_heads_over_limit && is_highmem_idx(i)) {
end_zone = i;
break;
}
if (!zone_balanced(zone, order, 0, 0)) {
end_zone = i;
break;
} else {
/*
* If balanced, clear the dirty and congested
* flags
*/
zone_clear_flag(zone, ZONE_CONGESTED);
zone_clear_flag(zone, ZONE_TAIL_LRU_DIRTY);
}
}
if (i < 0)
goto out;
for (i = 0; i <= end_zone; i++) {
struct zone *zone = pgdat->node_zones + i;
if (!populated_zone(zone))
continue;
lru_pages += zone_reclaimable_pages(zone);
/*
* If any zone is currently balanced then kswapd will
* not call compaction as it is expected that the
* necessary pages are already available.
*/
if (pgdat_needs_compaction &&
zone_watermark_ok(zone, order,
low_wmark_pages(zone),
*classzone_idx, 0))
pgdat_needs_compaction = false;
}
/*
* If we're getting trouble reclaiming, start doing writepage
* even in laptop mode.
*/
if (sc.priority < DEF_PRIORITY - 2)
sc.may_writepage = 1;
/*
* Now scan the zone in the dma->highmem direction, stopping
* at the last zone which needs scanning.
*
* We do this because the page allocator works in the opposite
* direction. This prevents the page allocator from allocating
* pages behind kswapd's direction of progress, which would
* cause too much scanning of the lower zones.
*/
for (i = 0; i <= end_zone; i++) {
struct zone *zone = pgdat->node_zones + i;
if (!populated_zone(zone))
continue;
if (sc.priority != DEF_PRIORITY &&
!zone_reclaimable(zone))
continue;
sc.nr_scanned = 0;
nr_soft_scanned = 0;
/*
* Call soft limit reclaim before calling shrink_zone.
*/
nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone,
order, sc.gfp_mask,
&nr_soft_scanned);
sc.nr_reclaimed += nr_soft_reclaimed;
/*
* There should be no need to raise the scanning
* priority if enough pages are already being scanned
* that that high watermark would be met at 100%
* efficiency.
*/
if (kswapd_shrink_zone(zone, end_zone, &sc,
lru_pages, &nr_attempted))
raise_priority = false;
}
/*
* If the low watermark is met there is no need for processes
* to be throttled on pfmemalloc_wait as they should not be
* able to safely make forward progress. Wake them
*/
if (waitqueue_active(&pgdat->pfmemalloc_wait) &&
pfmemalloc_watermark_ok(pgdat))
wake_up(&pgdat->pfmemalloc_wait);
/*
* Fragmentation may mean that the system cannot be rebalanced
* for high-order allocations in all zones. If twice the
* allocation size has been reclaimed and the zones are still
* not balanced then recheck the watermarks at order-0 to
* prevent kswapd reclaiming excessively. Assume that a
* process requested a high-order can direct reclaim/compact.
*/
if (order && sc.nr_reclaimed >= 2UL << order)
order = sc.order = 0;
/* Check if kswapd should be suspending */
if (try_to_freeze() || kthread_should_stop())
break;
/*
* Compact if necessary and kswapd is reclaiming at least the
* high watermark number of pages as requsted
*/
if (pgdat_needs_compaction && sc.nr_reclaimed > nr_attempted)
compact_pgdat(pgdat, order);
/*
* Raise priority if scanning rate is too low or there was no
* progress in reclaiming pages
*/
if (raise_priority || !sc.nr_reclaimed)
sc.priority--;
} while (sc.priority >= 1 &&
!pgdat_balanced(pgdat, order, *classzone_idx));
out:
/*
* Return the order we were reclaiming at so prepare_kswapd_sleep()
* makes a decision on the order we were last reclaiming at. However,
* if another caller entered the allocator slow path while kswapd
* was awake, order will remain at the higher level
*/
*classzone_idx = end_zone;
return order;
}
static void kswapd_try_to_sleep(pg_data_t *pgdat, int order, int classzone_idx)
{
long remaining = 0;
DEFINE_WAIT(wait);
if (freezing(current) || kthread_should_stop())
return;
prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
/* Try to sleep for a short interval */
if (prepare_kswapd_sleep(pgdat, order, remaining, classzone_idx)) {
remaining = schedule_timeout(HZ/10);
finish_wait(&pgdat->kswapd_wait, &wait);
prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
}
/*
* After a short sleep, check if it was a premature sleep. If not, then
* go fully to sleep until explicitly woken up.
*/
if (prepare_kswapd_sleep(pgdat, order, remaining, classzone_idx)) {
trace_mm_vmscan_kswapd_sleep(pgdat->node_id);
/*
* vmstat counters are not perfectly accurate and the estimated
* value for counters such as NR_FREE_PAGES can deviate from the
* true value by nr_online_cpus * threshold. To avoid the zone
* watermarks being breached while under pressure, we reduce the
* per-cpu vmstat threshold while kswapd is awake and restore
* them before going back to sleep.
*/
set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold);
/*
* Compaction records what page blocks it recently failed to
* isolate pages from and skips them in the future scanning.
* When kswapd is going to sleep, it is reasonable to assume
* that pages and compaction may succeed so reset the cache.
*/
reset_isolation_suitable(pgdat);
if (!kthread_should_stop())
schedule();
set_pgdat_percpu_threshold(pgdat, calculate_pressure_threshold);
} else {
if (remaining)
count_vm_event(KSWAPD_LOW_WMARK_HIT_QUICKLY);
else
count_vm_event(KSWAPD_HIGH_WMARK_HIT_QUICKLY);
}
finish_wait(&pgdat->kswapd_wait, &wait);
}
/*
* The background pageout daemon, started as a kernel thread
* from the init process.
*
* This basically trickles out pages so that we have _some_
* free memory available even if there is no other activity
* that frees anything up. This is needed for things like routing
* etc, where we otherwise might have all activity going on in
* asynchronous contexts that cannot page things out.
*
* If there are applications that are active memory-allocators
* (most normal use), this basically shouldn't matter.
*/
static int kswapd(void *p)
{
unsigned long order, new_order;
unsigned balanced_order;
int classzone_idx, new_classzone_idx;
int balanced_classzone_idx;
pg_data_t *pgdat = (pg_data_t*)p;
struct task_struct *tsk = current;
struct reclaim_state reclaim_state = {
.reclaimed_slab = 0,
};
const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
lockdep_set_current_reclaim_state(GFP_KERNEL);
if (!cpumask_empty(cpumask))
set_cpus_allowed_ptr(tsk, cpumask);
current->reclaim_state = &reclaim_state;
/*
* Tell the memory management that we're a "memory allocator",
* and that if we need more memory we should get access to it
* regardless (see "__alloc_pages()"). "kswapd" should
* never get caught in the normal page freeing logic.
*
* (Kswapd normally doesn't need memory anyway, but sometimes
* you need a small amount of memory in order to be able to
* page out something else, and this flag essentially protects
* us from recursively trying to free more memory as we're
* trying to free the first piece of memory in the first place).
*/
tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
set_freezable();
order = new_order = 0;
balanced_order = 0;
classzone_idx = new_classzone_idx = pgdat->nr_zones - 1;
balanced_classzone_idx = classzone_idx;
for ( ; ; ) {
bool ret;
/*
* If the last balance_pgdat was unsuccessful it's unlikely a
* new request of a similar or harder type will succeed soon
* so consider going to sleep on the basis we reclaimed at
*/
if (balanced_classzone_idx >= new_classzone_idx &&
balanced_order == new_order) {
new_order = pgdat->kswapd_max_order;
new_classzone_idx = pgdat->classzone_idx;
pgdat->kswapd_max_order = 0;
pgdat->classzone_idx = pgdat->nr_zones - 1;
}
if (order < new_order || classzone_idx > new_classzone_idx) {
/*
* Don't sleep if someone wants a larger 'order'
* allocation or has tigher zone constraints
*/
order = new_order;
classzone_idx = new_classzone_idx;
} else {
kswapd_try_to_sleep(pgdat, balanced_order,
balanced_classzone_idx);
order = pgdat->kswapd_max_order;
classzone_idx = pgdat->classzone_idx;
new_order = order;
new_classzone_idx = classzone_idx;
pgdat->kswapd_max_order = 0;
pgdat->classzone_idx = pgdat->nr_zones - 1;
}
ret = try_to_freeze();
if (kthread_should_stop())
break;
/*
* We can speed up thawing tasks if we don't call balance_pgdat
* after returning from the refrigerator
*/
if (!ret) {
trace_mm_vmscan_kswapd_wake(pgdat->node_id, order);
balanced_classzone_idx = classzone_idx;
balanced_order = balance_pgdat(pgdat, order,
&balanced_classzone_idx);
}
}
current->reclaim_state = NULL;
return 0;
}
/*
* A zone is low on free memory, so wake its kswapd task to service it.
*/
void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx)
{
pg_data_t *pgdat;
if (!populated_zone(zone))
return;
if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
return;
pgdat = zone->zone_pgdat;
if (pgdat->kswapd_max_order < order) {
pgdat->kswapd_max_order = order;
pgdat->classzone_idx = min(pgdat->classzone_idx, classzone_idx);
}
if (!waitqueue_active(&pgdat->kswapd_wait))
return;
if (zone_balanced(zone, order, 0, 0))
return;
trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, zone_idx(zone), order);
wake_up_interruptible(&pgdat->kswapd_wait);
}
#ifdef CONFIG_HIBERNATION
/*
* Try to free `nr_to_reclaim' of memory, system-wide, and return the number of
* freed pages.
*
* Rather than trying to age LRUs the aim is to preserve the overall
* LRU order by reclaiming preferentially
* inactive > active > active referenced > active mapped
*/
unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
{
struct reclaim_state reclaim_state;
struct scan_control sc = {
.gfp_mask = GFP_HIGHUSER_MOVABLE,
.may_swap = 1,
.may_unmap = 1,
.may_writepage = 1,
.nr_to_reclaim = nr_to_reclaim,
.hibernation_mode = 1,
.order = 0,
.priority = DEF_PRIORITY,
};
struct shrink_control shrink = {
.gfp_mask = sc.gfp_mask,
};
struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask);
struct task_struct *p = current;
unsigned long nr_reclaimed;
p->flags |= PF_MEMALLOC;
lockdep_set_current_reclaim_state(sc.gfp_mask);
reclaim_state.reclaimed_slab = 0;
p->reclaim_state = &reclaim_state;
nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink);
p->reclaim_state = NULL;
lockdep_clear_current_reclaim_state();
p->flags &= ~PF_MEMALLOC;
return nr_reclaimed;
}
#endif /* CONFIG_HIBERNATION */
/* It's optimal to keep kswapds on the same CPUs as their memory, but
not required for correctness. So if the last cpu in a node goes
away, we get changed to run anywhere: as the first one comes back,
restore their cpu bindings. */
static int cpu_callback(struct notifier_block *nfb, unsigned long action,
void *hcpu)
{
int nid;
if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) {
for_each_node_state(nid, N_MEMORY) {
pg_data_t *pgdat = NODE_DATA(nid);
const struct cpumask *mask;
mask = cpumask_of_node(pgdat->node_id);
if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids)
/* One of our CPUs online: restore mask */
set_cpus_allowed_ptr(pgdat->kswapd, mask);
}
}
return NOTIFY_OK;
}
/*
* This kswapd start function will be called by init and node-hot-add.
* On node-hot-add, kswapd will moved to proper cpus if cpus are hot-added.
*/
int kswapd_run(int nid)
{
pg_data_t *pgdat = NODE_DATA(nid);
int ret = 0;
if (pgdat->kswapd)
return 0;
pgdat->kswapd = kthread_run(kswapd, pgdat, "kswapd%d", nid);
if (IS_ERR(pgdat->kswapd)) {
/* failure at boot is fatal */
BUG_ON(system_state == SYSTEM_BOOTING);
pr_err("Failed to start kswapd on node %d\n", nid);
ret = PTR_ERR(pgdat->kswapd);
pgdat->kswapd = NULL;
}
return ret;
}
/*
* Called by memory hotplug when all memory in a node is offlined. Caller must
* hold lock_memory_hotplug().
*/
void kswapd_stop(int nid)
{
struct task_struct *kswapd = NODE_DATA(nid)->kswapd;
if (kswapd) {
kthread_stop(kswapd);
NODE_DATA(nid)->kswapd = NULL;
}
}
static int __init kswapd_init(void)
{
int nid;
swap_setup();
for_each_node_state(nid, N_MEMORY)
kswapd_run(nid);
hotcpu_notifier(cpu_callback, 0);
return 0;
}
module_init(kswapd_init)
#ifdef CONFIG_NUMA
/*
* Zone reclaim mode
*
* If non-zero call zone_reclaim when the number of free pages falls below
* the watermarks.
*/
int zone_reclaim_mode __read_mostly;
#define RECLAIM_OFF 0
#define RECLAIM_ZONE (1<<0) /* Run shrink_inactive_list on the zone */
#define RECLAIM_WRITE (1<<1) /* Writeout pages during reclaim */
#define RECLAIM_SWAP (1<<2) /* Swap pages out during reclaim */
/*
* Priority for ZONE_RECLAIM. This determines the fraction of pages
* of a node considered for each zone_reclaim. 4 scans 1/16th of
* a zone.
*/
#define ZONE_RECLAIM_PRIORITY 4
/*
* Percentage of pages in a zone that must be unmapped for zone_reclaim to
* occur.
*/
int sysctl_min_unmapped_ratio = 1;
/*
* If the number of slab pages in a zone grows beyond this percentage then
* slab reclaim needs to occur.
*/
int sysctl_min_slab_ratio = 5;
static inline unsigned long zone_unmapped_file_pages(struct zone *zone)
{
unsigned long file_mapped = zone_page_state(zone, NR_FILE_MAPPED);
unsigned long file_lru = zone_page_state(zone, NR_INACTIVE_FILE) +
zone_page_state(zone, NR_ACTIVE_FILE);
/*
* It's possible for there to be more file mapped pages than
* accounted for by the pages on the file LRU lists because
* tmpfs pages accounted for as ANON can also be FILE_MAPPED
*/
return (file_lru > file_mapped) ? (file_lru - file_mapped) : 0;
}
/* Work out how many page cache pages we can reclaim in this reclaim_mode */
static long zone_pagecache_reclaimable(struct zone *zone)
{
long nr_pagecache_reclaimable;
long delta = 0;
/*
* If RECLAIM_SWAP is set, then all file pages are considered
* potentially reclaimable. Otherwise, we have to worry about
* pages like swapcache and zone_unmapped_file_pages() provides
* a better estimate
*/
if (zone_reclaim_mode & RECLAIM_SWAP)
nr_pagecache_reclaimable = zone_page_state(zone, NR_FILE_PAGES);
else
nr_pagecache_reclaimable = zone_unmapped_file_pages(zone);
/* If we can't clean pages, remove dirty pages from consideration */
if (!(zone_reclaim_mode & RECLAIM_WRITE))
delta += zone_page_state(zone, NR_FILE_DIRTY);
/* Watch for any possible underflows due to delta */
if (unlikely(delta > nr_pagecache_reclaimable))
delta = nr_pagecache_reclaimable;
return nr_pagecache_reclaimable - delta;
}
/*
* Try to free up some pages from this zone through reclaim.
*/
static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
{
/* Minimum pages needed in order to stay on node */
const unsigned long nr_pages = 1 << order;
struct task_struct *p = current;
struct reclaim_state reclaim_state;
struct scan_control sc = {
.may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE),
.may_unmap = !!(zone_reclaim_mode & RECLAIM_SWAP),
.may_swap = 1,
.nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX),
.gfp_mask = (gfp_mask = memalloc_noio_flags(gfp_mask)),
.order = order,
.priority = ZONE_RECLAIM_PRIORITY,
};
struct shrink_control shrink = {
.gfp_mask = sc.gfp_mask,
};
unsigned long nr_slab_pages0, nr_slab_pages1;
cond_resched();
/*
* We need to be able to allocate from the reserves for RECLAIM_SWAP
* and we also need to be able to write out pages for RECLAIM_WRITE
* and RECLAIM_SWAP.
*/
p->flags |= PF_MEMALLOC | PF_SWAPWRITE;
lockdep_set_current_reclaim_state(gfp_mask);
reclaim_state.reclaimed_slab = 0;
p->reclaim_state = &reclaim_state;
if (zone_pagecache_reclaimable(zone) > zone->min_unmapped_pages) {
/*
* Free memory by calling shrink zone with increasing
* priorities until we have enough memory freed.
*/
do {
shrink_zone(zone, &sc);
} while (sc.nr_reclaimed < nr_pages && --sc.priority >= 0);
}
nr_slab_pages0 = zone_page_state(zone, NR_SLAB_RECLAIMABLE);
if (nr_slab_pages0 > zone->min_slab_pages) {
/*
* shrink_slab() does not currently allow us to determine how
* many pages were freed in this zone. So we take the current
* number of slab pages and shake the slab until it is reduced
* by the same nr_pages that we used for reclaiming unmapped
* pages.
*/
nodes_clear(shrink.nodes_to_scan);
node_set(zone_to_nid(zone), shrink.nodes_to_scan);
for (;;) {
unsigned long lru_pages = zone_reclaimable_pages(zone);
/* No reclaimable slab or very low memory pressure */
if (!shrink_slab(&shrink, sc.nr_scanned, lru_pages))
break;
/* Freed enough memory */
nr_slab_pages1 = zone_page_state(zone,
NR_SLAB_RECLAIMABLE);
if (nr_slab_pages1 + nr_pages <= nr_slab_pages0)
break;
}
/*
* Update nr_reclaimed by the number of slab pages we
* reclaimed from this zone.
*/
nr_slab_pages1 = zone_page_state(zone, NR_SLAB_RECLAIMABLE);
if (nr_slab_pages1 < nr_slab_pages0)
sc.nr_reclaimed += nr_slab_pages0 - nr_slab_pages1;
}
p->reclaim_state = NULL;
current->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE);
lockdep_clear_current_reclaim_state();
return sc.nr_reclaimed >= nr_pages;
}
int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
{
int node_id;
int ret;
/*
* Zone reclaim reclaims unmapped file backed pages and
* slab pages if we are over the defined limits.
*
* A small portion of unmapped file backed pages is needed for
* file I/O otherwise pages read by file I/O will be immediately
* thrown out if the zone is overallocated. So we do not reclaim
* if less than a specified percentage of the zone is used by
* unmapped file backed pages.
*/
if (zone_pagecache_reclaimable(zone) <= zone->min_unmapped_pages &&
zone_page_state(zone, NR_SLAB_RECLAIMABLE) <= zone->min_slab_pages)
return ZONE_RECLAIM_FULL;
if (!zone_reclaimable(zone))
return ZONE_RECLAIM_FULL;
/*
* Do not scan if the allocation should not be delayed.
*/
if (!(gfp_mask & __GFP_WAIT) || (current->flags & PF_MEMALLOC))
return ZONE_RECLAIM_NOSCAN;
/*
* Only run zone reclaim on the local zone or on zones that do not
* have associated processors. This will favor the local processor
* over remote processors and spread off node memory allocations
* as wide as possible.
*/
node_id = zone_to_nid(zone);
if (node_state(node_id, N_CPU) && node_id != numa_node_id())
return ZONE_RECLAIM_NOSCAN;
if (zone_test_and_set_flag(zone, ZONE_RECLAIM_LOCKED))
return ZONE_RECLAIM_NOSCAN;
ret = __zone_reclaim(zone, gfp_mask, order);
zone_clear_flag(zone, ZONE_RECLAIM_LOCKED);
if (!ret)
count_vm_event(PGSCAN_ZONE_RECLAIM_FAILED);
return ret;
}
#endif
/*
* page_evictable - test whether a page is evictable
* @page: the page to test
*
* Test whether page is evictable--i.e., should be placed on active/inactive
* lists vs unevictable list.
*
* Reasons page might not be evictable:
* (1) page's mapping marked unevictable
* (2) page is part of an mlocked VMA
*
*/
int page_evictable(struct page *page)
{
return !mapping_unevictable(page_mapping(page)) && !PageMlocked(page);
}
#ifdef CONFIG_SHMEM
/**
* check_move_unevictable_pages - check pages for evictability and move to appropriate zone lru list
* @pages: array of pages to check
* @nr_pages: number of pages to check
*
* Checks pages for evictability and moves them to the appropriate lru list.
*
* This function is only used for SysV IPC SHM_UNLOCK.
*/
void check_move_unevictable_pages(struct page **pages, int nr_pages)
{
struct lruvec *lruvec;
struct zone *zone = NULL;
int pgscanned = 0;
int pgrescued = 0;
int i;
for (i = 0; i < nr_pages; i++) {
struct page *page = pages[i];
struct zone *pagezone;
pgscanned++;
pagezone = page_zone(page);
if (pagezone != zone) {
if (zone)
spin_unlock_irq(&zone->lru_lock);
zone = pagezone;
spin_lock_irq(&zone->lru_lock);
}
lruvec = mem_cgroup_page_lruvec(page, zone);
if (!PageLRU(page) || !PageUnevictable(page))
continue;
if (page_evictable(page)) {
enum lru_list lru = page_lru_base_type(page);
VM_BUG_ON_PAGE(PageActive(page), page);
ClearPageUnevictable(page);
del_page_from_lru_list(page, lruvec, LRU_UNEVICTABLE);
add_page_to_lru_list(page, lruvec, lru);
pgrescued++;
}
}
if (zone) {
__count_vm_events(UNEVICTABLE_PGRESCUED, pgrescued);
__count_vm_events(UNEVICTABLE_PGSCANNED, pgscanned);
spin_unlock_irq(&zone->lru_lock);
}
}
#endif /* CONFIG_SHMEM */
static void warn_scan_unevictable_pages(void)
{
printk_once(KERN_WARNING
"%s: The scan_unevictable_pages sysctl/node-interface has been "
"disabled for lack of a legitimate use case. If you have "
"one, please send an email to linux-mm@kvack.org.\n",
current->comm);
}
/*
* scan_unevictable_pages [vm] sysctl handler. On demand re-scan of
* all nodes' unevictable lists for evictable pages
*/
unsigned long scan_unevictable_pages;
int scan_unevictable_handler(struct ctl_table *table, int write,
void __user *buffer,
size_t *length, loff_t *ppos)
{
warn_scan_unevictable_pages();
proc_doulongvec_minmax(table, write, buffer, length, ppos);
scan_unevictable_pages = 0;
return 0;
}
#ifdef CONFIG_NUMA
/*
* per node 'scan_unevictable_pages' attribute. On demand re-scan of
* a specified node's per zone unevictable lists for evictable pages.
*/
static ssize_t read_scan_unevictable_node(struct device *dev,
struct device_attribute *attr,
char *buf)
{
warn_scan_unevictable_pages();
return sprintf(buf, "0\n"); /* always zero; should fit... */
}
static ssize_t write_scan_unevictable_node(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
warn_scan_unevictable_pages();
return 1;
}
static DEVICE_ATTR(scan_unevictable_pages, S_IRUGO | S_IWUSR,
read_scan_unevictable_node,
write_scan_unevictable_node);
int scan_unevictable_register_node(struct node *node)
{
return device_create_file(&node->dev, &dev_attr_scan_unevictable_pages);
}
void scan_unevictable_unregister_node(struct node *node)
{
device_remove_file(&node->dev, &dev_attr_scan_unevictable_pages);
}
#endif
| gpl-2.0 |
v1ron/linux-mainline | drivers/net/wireless/ath/ath9k/rng.c | 86 | 2748 | /*
* Copyright (c) 2015 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/hw_random.h>
#include <linux/kthread.h>
#include "ath9k.h"
#include "hw.h"
#include "ar9003_phy.h"
#define ATH9K_RNG_BUF_SIZE 320
#define ATH9K_RNG_ENTROPY(x) (((x) * 8 * 320) >> 10) /* quality: 320/1024 */
static int ath9k_rng_data_read(struct ath_softc *sc, u32 *buf, u32 buf_size)
{
int i, j;
u32 v1, v2, rng_last = sc->rng_last;
struct ath_hw *ah = sc->sc_ah;
ath9k_ps_wakeup(sc);
REG_RMW_FIELD(ah, AR_PHY_TEST, AR_PHY_TEST_BBB_OBS_SEL, 1);
REG_CLR_BIT(ah, AR_PHY_TEST, AR_PHY_TEST_RX_OBS_SEL_BIT5);
REG_RMW_FIELD(ah, AR_PHY_TEST_CTL_STATUS, AR_PHY_TEST_CTL_RX_OBS_SEL, 0);
for (i = 0, j = 0; i < buf_size; i++) {
v1 = REG_READ(ah, AR_PHY_TST_ADC) & 0xffff;
v2 = REG_READ(ah, AR_PHY_TST_ADC) & 0xffff;
/* wait for data ready */
if (v1 && v2 && rng_last != v1 && v1 != v2 && v1 != 0xffff &&
v2 != 0xffff)
buf[j++] = (v1 << 16) | v2;
rng_last = v2;
}
ath9k_ps_restore(sc);
sc->rng_last = rng_last;
return j << 2;
}
static int ath9k_rng_kthread(void *data)
{
int bytes_read;
struct ath_softc *sc = data;
u32 *rng_buf;
rng_buf = kmalloc_array(ATH9K_RNG_BUF_SIZE, sizeof(u32), GFP_KERNEL);
if (!rng_buf)
goto out;
while (!kthread_should_stop()) {
bytes_read = ath9k_rng_data_read(sc, rng_buf,
ATH9K_RNG_BUF_SIZE);
if (unlikely(!bytes_read)) {
msleep_interruptible(10);
continue;
}
/* sleep until entropy bits under write_wakeup_threshold */
add_hwgenerator_randomness((void *)rng_buf, bytes_read,
ATH9K_RNG_ENTROPY(bytes_read));
}
kfree(rng_buf);
out:
sc->rng_task = NULL;
return 0;
}
void ath9k_rng_start(struct ath_softc *sc)
{
struct ath_hw *ah = sc->sc_ah;
if (sc->rng_task)
return;
if (!AR_SREV_9300_20_OR_LATER(ah))
return;
sc->rng_task = kthread_run(ath9k_rng_kthread, sc, "ath9k-hwrng");
if (IS_ERR(sc->rng_task))
sc->rng_task = NULL;
}
void ath9k_rng_stop(struct ath_softc *sc)
{
if (sc->rng_task)
kthread_stop(sc->rng_task);
}
| gpl-2.0 |
matthiasbock/VideoLAN-for-Allwinner-A10-VPU | test/libvlc/media_list.c | 86 | 4219 | /*
* media_list.c - libvlc smoke test
*
* $Id$
*/
/**********************************************************************
* Copyright (C) 2007 Rémi Denis-Courmont. *
* This program is free software; you can redistribute and/or modify *
* it under the terms of the GNU General Public License as published *
* by the Free Software Foundation; version 2 of the license, or (at *
* your option) any later version. *
* *
* This program is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. *
* See the GNU General Public License for more details. *
* *
* You should have received a copy of the GNU General Public License *
* along with this program; if not, you can get it from: *
* http://www.gnu.org/copyleft/gpl.html *
**********************************************************************/
#include "test.h"
static void test_media_list (const char ** argv, int argc)
{
libvlc_instance_t *vlc;
libvlc_media_t *md1, *md2, *md3, *md4;
libvlc_media_list_t *ml;
int ret;
log ("Testing media_list\n");
vlc = libvlc_new (argc, argv);
assert (vlc != NULL);
ml = libvlc_media_list_new (vlc);
assert (ml != NULL);
md1 = libvlc_media_new_path (vlc, "/dev/null");
assert (md1 != NULL);
md2 = libvlc_media_new_path (vlc, "/dev/null");
assert (md2 != NULL);
md3 = libvlc_media_new_path (vlc, "/dev/null");
assert (md3 != NULL);
ret = libvlc_media_list_add_media (ml, md1);
assert (!ret);
ret = libvlc_media_list_add_media (ml, md2);
assert (!ret);
assert( libvlc_media_list_count (ml) == 2 );
assert( libvlc_media_list_index_of_item (ml, md1) == 0 );
assert( libvlc_media_list_index_of_item (ml, md2) == 1 );
ret = libvlc_media_list_remove_index (ml, 0); /* removing first item */
assert (!ret);
/* test if second item was moved on first place */
assert( libvlc_media_list_index_of_item (ml, md2) == 0 );
ret = libvlc_media_list_add_media (ml, md1); /* add 2 items */
assert (!ret);
ret = libvlc_media_list_add_media (ml, md1);
assert (!ret);
/* there should be 3 pieces */
assert( libvlc_media_list_count (ml) == 3 );
ret = libvlc_media_list_insert_media (ml, md3, 2);
assert (!ret);
/* there should be 4 pieces */
assert( libvlc_media_list_count (ml) == 4 );
/* test inserting on right place */
assert( libvlc_media_list_index_of_item (ml, md3) == 2 );
/* test right returning descriptor*/
assert ( libvlc_media_list_item_at_index (ml, 0) == md2 );
assert ( libvlc_media_list_item_at_index (ml, 2) == md3 );
/* test if give errors, when it should */
/* have 4 items, so index 4 should give error */
ret = libvlc_media_list_remove_index (ml, 4);
assert (ret == -1);
ret = libvlc_media_list_remove_index (ml, 100);
assert (ret == -1);
ret = libvlc_media_list_remove_index (ml, -1);
assert (ret == -1);
/* getting non valid items */
libvlc_media_t * p_non_exist =
libvlc_media_list_item_at_index (ml, 4);
assert (p_non_exist == NULL);
p_non_exist = libvlc_media_list_item_at_index (ml, 100);
assert (p_non_exist == NULL);
p_non_exist = libvlc_media_list_item_at_index (ml, -1);
assert (p_non_exist == NULL);
md4 = libvlc_media_new_path (vlc, "/dev/null");
assert (md4 != NULL);
/* try to find non inserted item */
int i_non_exist = 0;
i_non_exist = libvlc_media_list_index_of_item (ml, md4);
assert ( i_non_exist == -1 );
libvlc_media_release (md1);
libvlc_media_release (md2);
libvlc_media_release (md3);
libvlc_media_release (md4);
libvlc_media_list_release (ml);
libvlc_release (vlc);
}
int main (void)
{
test_init();
test_media_list (test_defaults_args, test_defaults_nargs);
return 0;
}
| gpl-2.0 |
ISTweak/android_kernel_panasonic_p01d-cm9 | arch/arm/mach-ixp4xx/avila-setup.c | 854 | 4634 | /*
* arch/arm/mach-ixp4xx/avila-setup.c
*
* Gateworks Avila board-setup
*
* Author: Michael-Luke Jones <mlj28@cam.ac.uk>
*
* Based on ixdp-setup.c
* Copyright (C) 2003-2005 MontaVista Software, Inc.
*
* Author: Deepak Saxena <dsaxena@plexity.net>
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/device.h>
#include <linux/serial.h>
#include <linux/tty.h>
#include <linux/serial_8250.h>
#include <linux/i2c-gpio.h>
#include <asm/types.h>
#include <asm/setup.h>
#include <asm/memory.h>
#include <mach/hardware.h>
#include <asm/mach-types.h>
#include <asm/irq.h>
#include <asm/mach/arch.h>
#include <asm/mach/flash.h>
#define AVILA_SDA_PIN 7
#define AVILA_SCL_PIN 6
static struct flash_platform_data avila_flash_data = {
.map_name = "cfi_probe",
.width = 2,
};
static struct resource avila_flash_resource = {
.flags = IORESOURCE_MEM,
};
static struct platform_device avila_flash = {
.name = "IXP4XX-Flash",
.id = 0,
.dev = {
.platform_data = &avila_flash_data,
},
.num_resources = 1,
.resource = &avila_flash_resource,
};
static struct i2c_gpio_platform_data avila_i2c_gpio_data = {
.sda_pin = AVILA_SDA_PIN,
.scl_pin = AVILA_SCL_PIN,
};
static struct platform_device avila_i2c_gpio = {
.name = "i2c-gpio",
.id = 0,
.dev = {
.platform_data = &avila_i2c_gpio_data,
},
};
static struct resource avila_uart_resources[] = {
{
.start = IXP4XX_UART1_BASE_PHYS,
.end = IXP4XX_UART1_BASE_PHYS + 0x0fff,
.flags = IORESOURCE_MEM
},
{
.start = IXP4XX_UART2_BASE_PHYS,
.end = IXP4XX_UART2_BASE_PHYS + 0x0fff,
.flags = IORESOURCE_MEM
}
};
static struct plat_serial8250_port avila_uart_data[] = {
{
.mapbase = IXP4XX_UART1_BASE_PHYS,
.membase = (char *)IXP4XX_UART1_BASE_VIRT + REG_OFFSET,
.irq = IRQ_IXP4XX_UART1,
.flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST,
.iotype = UPIO_MEM,
.regshift = 2,
.uartclk = IXP4XX_UART_XTAL,
},
{
.mapbase = IXP4XX_UART2_BASE_PHYS,
.membase = (char *)IXP4XX_UART2_BASE_VIRT + REG_OFFSET,
.irq = IRQ_IXP4XX_UART2,
.flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST,
.iotype = UPIO_MEM,
.regshift = 2,
.uartclk = IXP4XX_UART_XTAL,
},
{ },
};
static struct platform_device avila_uart = {
.name = "serial8250",
.id = PLAT8250_DEV_PLATFORM,
.dev.platform_data = avila_uart_data,
.num_resources = 2,
.resource = avila_uart_resources
};
static struct resource avila_pata_resources[] = {
{
.flags = IORESOURCE_MEM
},
{
.flags = IORESOURCE_MEM,
},
{
.name = "intrq",
.start = IRQ_IXP4XX_GPIO12,
.end = IRQ_IXP4XX_GPIO12,
.flags = IORESOURCE_IRQ,
},
};
static struct ixp4xx_pata_data avila_pata_data = {
.cs0_bits = 0xbfff0043,
.cs1_bits = 0xbfff0043,
};
static struct platform_device avila_pata = {
.name = "pata_ixp4xx_cf",
.id = 0,
.dev.platform_data = &avila_pata_data,
.num_resources = ARRAY_SIZE(avila_pata_resources),
.resource = avila_pata_resources,
};
static struct platform_device *avila_devices[] __initdata = {
&avila_i2c_gpio,
&avila_flash,
&avila_uart
};
static void __init avila_init(void)
{
ixp4xx_sys_init();
avila_flash_resource.start = IXP4XX_EXP_BUS_BASE(0);
avila_flash_resource.end =
IXP4XX_EXP_BUS_BASE(0) + ixp4xx_exp_bus_size - 1;
platform_add_devices(avila_devices, ARRAY_SIZE(avila_devices));
avila_pata_resources[0].start = IXP4XX_EXP_BUS_BASE(1);
avila_pata_resources[0].end = IXP4XX_EXP_BUS_END(1);
avila_pata_resources[1].start = IXP4XX_EXP_BUS_BASE(2);
avila_pata_resources[1].end = IXP4XX_EXP_BUS_END(2);
avila_pata_data.cs0_cfg = IXP4XX_EXP_CS1;
avila_pata_data.cs1_cfg = IXP4XX_EXP_CS2;
platform_device_register(&avila_pata);
}
MACHINE_START(AVILA, "Gateworks Avila Network Platform")
/* Maintainer: Deepak Saxena <dsaxena@plexity.net> */
.phys_io = IXP4XX_PERIPHERAL_BASE_PHYS,
.io_pg_offst = ((IXP4XX_PERIPHERAL_BASE_VIRT) >> 18) & 0xfffc,
.map_io = ixp4xx_map_io,
.init_irq = ixp4xx_init_irq,
.timer = &ixp4xx_timer,
.boot_params = 0x0100,
.init_machine = avila_init,
MACHINE_END
/*
* Loft is functionally equivalent to Avila except that it has a
* different number for the maximum PCI devices. The MACHINE
* structure below is identical to Avila except for the comment.
*/
#ifdef CONFIG_MACH_LOFT
MACHINE_START(LOFT, "Giant Shoulder Inc Loft board")
/* Maintainer: Tom Billman <kernel@giantshoulderinc.com> */
.phys_io = IXP4XX_PERIPHERAL_BASE_PHYS,
.io_pg_offst = ((IXP4XX_PERIPHERAL_BASE_VIRT) >> 18) & 0xfffc,
.map_io = ixp4xx_map_io,
.init_irq = ixp4xx_init_irq,
.timer = &ixp4xx_timer,
.boot_params = 0x0100,
.init_machine = avila_init,
MACHINE_END
#endif
| gpl-2.0 |
Zex/linux | lib/list_sort.c | 854 | 6985 |
#define pr_fmt(fmt) "list_sort_test: " fmt
#include <linux/kernel.h>
#include <linux/bug.h>
#include <linux/compiler.h>
#include <linux/export.h>
#include <linux/string.h>
#include <linux/list_sort.h>
#include <linux/list.h>
#define MAX_LIST_LENGTH_BITS 20
/*
* Returns a list organized in an intermediate format suited
* to chaining of merge() calls: null-terminated, no reserved or
* sentinel head node, "prev" links not maintained.
*/
static struct list_head *merge(void *priv,
int (*cmp)(void *priv, struct list_head *a,
struct list_head *b),
struct list_head *a, struct list_head *b)
{
struct list_head head, *tail = &head;
while (a && b) {
/* if equal, take 'a' -- important for sort stability */
if ((*cmp)(priv, a, b) <= 0) {
tail->next = a;
a = a->next;
} else {
tail->next = b;
b = b->next;
}
tail = tail->next;
}
tail->next = a?:b;
return head.next;
}
/*
* Combine final list merge with restoration of standard doubly-linked
* list structure. This approach duplicates code from merge(), but
* runs faster than the tidier alternatives of either a separate final
* prev-link restoration pass, or maintaining the prev links
* throughout.
*/
static void merge_and_restore_back_links(void *priv,
int (*cmp)(void *priv, struct list_head *a,
struct list_head *b),
struct list_head *head,
struct list_head *a, struct list_head *b)
{
struct list_head *tail = head;
u8 count = 0;
while (a && b) {
/* if equal, take 'a' -- important for sort stability */
if ((*cmp)(priv, a, b) <= 0) {
tail->next = a;
a->prev = tail;
a = a->next;
} else {
tail->next = b;
b->prev = tail;
b = b->next;
}
tail = tail->next;
}
tail->next = a ? : b;
do {
/*
* In worst cases this loop may run many iterations.
* Continue callbacks to the client even though no
* element comparison is needed, so the client's cmp()
* routine can invoke cond_resched() periodically.
*/
if (unlikely(!(++count)))
(*cmp)(priv, tail->next, tail->next);
tail->next->prev = tail;
tail = tail->next;
} while (tail->next);
tail->next = head;
head->prev = tail;
}
/**
* list_sort - sort a list
* @priv: private data, opaque to list_sort(), passed to @cmp
* @head: the list to sort
* @cmp: the elements comparison function
*
* This function implements "merge sort", which has O(nlog(n))
* complexity.
*
* The comparison function @cmp must return a negative value if @a
* should sort before @b, and a positive value if @a should sort after
* @b. If @a and @b are equivalent, and their original relative
* ordering is to be preserved, @cmp must return 0.
*/
void list_sort(void *priv, struct list_head *head,
int (*cmp)(void *priv, struct list_head *a,
struct list_head *b))
{
struct list_head *part[MAX_LIST_LENGTH_BITS+1]; /* sorted partial lists
-- last slot is a sentinel */
int lev; /* index into part[] */
int max_lev = 0;
struct list_head *list;
if (list_empty(head))
return;
memset(part, 0, sizeof(part));
head->prev->next = NULL;
list = head->next;
while (list) {
struct list_head *cur = list;
list = list->next;
cur->next = NULL;
for (lev = 0; part[lev]; lev++) {
cur = merge(priv, cmp, part[lev], cur);
part[lev] = NULL;
}
if (lev > max_lev) {
if (unlikely(lev >= ARRAY_SIZE(part)-1)) {
printk_once(KERN_DEBUG "list too long for efficiency\n");
lev--;
}
max_lev = lev;
}
part[lev] = cur;
}
for (lev = 0; lev < max_lev; lev++)
if (part[lev])
list = merge(priv, cmp, part[lev], list);
merge_and_restore_back_links(priv, cmp, head, part[max_lev], list);
}
EXPORT_SYMBOL(list_sort);
#ifdef CONFIG_TEST_LIST_SORT
#include <linux/slab.h>
#include <linux/random.h>
/*
* The pattern of set bits in the list length determines which cases
* are hit in list_sort().
*/
#define TEST_LIST_LEN (512+128+2) /* not including head */
#define TEST_POISON1 0xDEADBEEF
#define TEST_POISON2 0xA324354C
struct debug_el {
unsigned int poison1;
struct list_head list;
unsigned int poison2;
int value;
unsigned serial;
};
/* Array, containing pointers to all elements in the test list */
static struct debug_el **elts __initdata;
static int __init check(struct debug_el *ela, struct debug_el *elb)
{
if (ela->serial >= TEST_LIST_LEN) {
pr_err("error: incorrect serial %d\n", ela->serial);
return -EINVAL;
}
if (elb->serial >= TEST_LIST_LEN) {
pr_err("error: incorrect serial %d\n", elb->serial);
return -EINVAL;
}
if (elts[ela->serial] != ela || elts[elb->serial] != elb) {
pr_err("error: phantom element\n");
return -EINVAL;
}
if (ela->poison1 != TEST_POISON1 || ela->poison2 != TEST_POISON2) {
pr_err("error: bad poison: %#x/%#x\n",
ela->poison1, ela->poison2);
return -EINVAL;
}
if (elb->poison1 != TEST_POISON1 || elb->poison2 != TEST_POISON2) {
pr_err("error: bad poison: %#x/%#x\n",
elb->poison1, elb->poison2);
return -EINVAL;
}
return 0;
}
static int __init cmp(void *priv, struct list_head *a, struct list_head *b)
{
struct debug_el *ela, *elb;
ela = container_of(a, struct debug_el, list);
elb = container_of(b, struct debug_el, list);
check(ela, elb);
return ela->value - elb->value;
}
static int __init list_sort_test(void)
{
int i, count = 1, err = -ENOMEM;
struct debug_el *el;
struct list_head *cur;
LIST_HEAD(head);
pr_debug("start testing list_sort()\n");
elts = kcalloc(TEST_LIST_LEN, sizeof(*elts), GFP_KERNEL);
if (!elts) {
pr_err("error: cannot allocate memory\n");
return err;
}
for (i = 0; i < TEST_LIST_LEN; i++) {
el = kmalloc(sizeof(*el), GFP_KERNEL);
if (!el) {
pr_err("error: cannot allocate memory\n");
goto exit;
}
/* force some equivalencies */
el->value = prandom_u32() % (TEST_LIST_LEN / 3);
el->serial = i;
el->poison1 = TEST_POISON1;
el->poison2 = TEST_POISON2;
elts[i] = el;
list_add_tail(&el->list, &head);
}
list_sort(NULL, &head, cmp);
err = -EINVAL;
for (cur = head.next; cur->next != &head; cur = cur->next) {
struct debug_el *el1;
int cmp_result;
if (cur->next->prev != cur) {
pr_err("error: list is corrupted\n");
goto exit;
}
cmp_result = cmp(NULL, cur, cur->next);
if (cmp_result > 0) {
pr_err("error: list is not sorted\n");
goto exit;
}
el = container_of(cur, struct debug_el, list);
el1 = container_of(cur->next, struct debug_el, list);
if (cmp_result == 0 && el->serial >= el1->serial) {
pr_err("error: order of equivalent elements not "
"preserved\n");
goto exit;
}
if (check(el, el1)) {
pr_err("error: element check failed\n");
goto exit;
}
count++;
}
if (head.prev != cur) {
pr_err("error: list is corrupted\n");
goto exit;
}
if (count != TEST_LIST_LEN) {
pr_err("error: bad list length %d", count);
goto exit;
}
err = 0;
exit:
for (i = 0; i < TEST_LIST_LEN; i++)
kfree(elts[i]);
kfree(elts);
return err;
}
late_initcall(list_sort_test);
#endif /* CONFIG_TEST_LIST_SORT */
| gpl-2.0 |
tifler/linux-mainline | drivers/ata/sata_promise.c | 1110 | 34835 | /*
* sata_promise.c - Promise SATA
*
* Maintained by: Tejun Heo <tj@kernel.org>
* Mikael Pettersson
* Please ALWAYS copy linux-ide@vger.kernel.org
* on emails.
*
* Copyright 2003-2004 Red Hat, Inc.
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; see the file COPYING. If not, write to
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
*
*
* libata documentation is available via 'make {ps|pdf}docs',
* as Documentation/DocBook/libata.*
*
* Hardware information only available under NDA.
*
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/gfp.h>
#include <linux/pci.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/device.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_cmnd.h>
#include <linux/libata.h>
#include "sata_promise.h"
#define DRV_NAME "sata_promise"
#define DRV_VERSION "2.12"
enum {
PDC_MAX_PORTS = 4,
PDC_MMIO_BAR = 3,
PDC_MAX_PRD = LIBATA_MAX_PRD - 1, /* -1 for ASIC PRD bug workaround */
/* host register offsets (from host->iomap[PDC_MMIO_BAR]) */
PDC_INT_SEQMASK = 0x40, /* Mask of asserted SEQ INTs */
PDC_FLASH_CTL = 0x44, /* Flash control register */
PDC_PCI_CTL = 0x48, /* PCI control/status reg */
PDC_SATA_PLUG_CSR = 0x6C, /* SATA Plug control/status reg */
PDC2_SATA_PLUG_CSR = 0x60, /* SATAII Plug control/status reg */
PDC_TBG_MODE = 0x41C, /* TBG mode (not SATAII) */
PDC_SLEW_CTL = 0x470, /* slew rate control reg (not SATAII) */
/* per-port ATA register offsets (from ap->ioaddr.cmd_addr) */
PDC_FEATURE = 0x04, /* Feature/Error reg (per port) */
PDC_SECTOR_COUNT = 0x08, /* Sector count reg (per port) */
PDC_SECTOR_NUMBER = 0x0C, /* Sector number reg (per port) */
PDC_CYLINDER_LOW = 0x10, /* Cylinder low reg (per port) */
PDC_CYLINDER_HIGH = 0x14, /* Cylinder high reg (per port) */
PDC_DEVICE = 0x18, /* Device/Head reg (per port) */
PDC_COMMAND = 0x1C, /* Command/status reg (per port) */
PDC_ALTSTATUS = 0x38, /* Alternate-status/device-control reg (per port) */
PDC_PKT_SUBMIT = 0x40, /* Command packet pointer addr */
PDC_GLOBAL_CTL = 0x48, /* Global control/status (per port) */
PDC_CTLSTAT = 0x60, /* IDE control and status (per port) */
/* per-port SATA register offsets (from ap->ioaddr.scr_addr) */
PDC_SATA_ERROR = 0x04,
PDC_PHYMODE4 = 0x14,
PDC_LINK_LAYER_ERRORS = 0x6C,
PDC_FPDMA_CTLSTAT = 0xD8,
PDC_INTERNAL_DEBUG_1 = 0xF8, /* also used for PATA */
PDC_INTERNAL_DEBUG_2 = 0xFC, /* also used for PATA */
/* PDC_FPDMA_CTLSTAT bit definitions */
PDC_FPDMA_CTLSTAT_RESET = 1 << 3,
PDC_FPDMA_CTLSTAT_DMASETUP_INT_FLAG = 1 << 10,
PDC_FPDMA_CTLSTAT_SETDB_INT_FLAG = 1 << 11,
/* PDC_GLOBAL_CTL bit definitions */
PDC_PH_ERR = (1 << 8), /* PCI error while loading packet */
PDC_SH_ERR = (1 << 9), /* PCI error while loading S/G table */
PDC_DH_ERR = (1 << 10), /* PCI error while loading data */
PDC2_HTO_ERR = (1 << 12), /* host bus timeout */
PDC2_ATA_HBA_ERR = (1 << 13), /* error during SATA DATA FIS transmission */
PDC2_ATA_DMA_CNT_ERR = (1 << 14), /* DMA DATA FIS size differs from S/G count */
PDC_OVERRUN_ERR = (1 << 19), /* S/G byte count larger than HD requires */
PDC_UNDERRUN_ERR = (1 << 20), /* S/G byte count less than HD requires */
PDC_DRIVE_ERR = (1 << 21), /* drive error */
PDC_PCI_SYS_ERR = (1 << 22), /* PCI system error */
PDC1_PCI_PARITY_ERR = (1 << 23), /* PCI parity error (from SATA150 driver) */
PDC1_ERR_MASK = PDC1_PCI_PARITY_ERR,
PDC2_ERR_MASK = PDC2_HTO_ERR | PDC2_ATA_HBA_ERR |
PDC2_ATA_DMA_CNT_ERR,
PDC_ERR_MASK = PDC_PH_ERR | PDC_SH_ERR | PDC_DH_ERR |
PDC_OVERRUN_ERR | PDC_UNDERRUN_ERR |
PDC_DRIVE_ERR | PDC_PCI_SYS_ERR |
PDC1_ERR_MASK | PDC2_ERR_MASK,
board_2037x = 0, /* FastTrak S150 TX2plus */
board_2037x_pata = 1, /* FastTrak S150 TX2plus PATA port */
board_20319 = 2, /* FastTrak S150 TX4 */
board_20619 = 3, /* FastTrak TX4000 */
board_2057x = 4, /* SATAII150 Tx2plus */
board_2057x_pata = 5, /* SATAII150 Tx2plus PATA port */
board_40518 = 6, /* SATAII150 Tx4 */
PDC_HAS_PATA = (1 << 1), /* PDC20375/20575 has PATA */
/* Sequence counter control registers bit definitions */
PDC_SEQCNTRL_INT_MASK = (1 << 5), /* Sequence Interrupt Mask */
/* Feature register values */
PDC_FEATURE_ATAPI_PIO = 0x00, /* ATAPI data xfer by PIO */
PDC_FEATURE_ATAPI_DMA = 0x01, /* ATAPI data xfer by DMA */
/* Device/Head register values */
PDC_DEVICE_SATA = 0xE0, /* Device/Head value for SATA devices */
/* PDC_CTLSTAT bit definitions */
PDC_DMA_ENABLE = (1 << 7),
PDC_IRQ_DISABLE = (1 << 10),
PDC_RESET = (1 << 11), /* HDMA reset */
PDC_COMMON_FLAGS = ATA_FLAG_PIO_POLLING,
/* ap->flags bits */
PDC_FLAG_GEN_II = (1 << 24),
PDC_FLAG_SATA_PATA = (1 << 25), /* supports SATA + PATA */
PDC_FLAG_4_PORTS = (1 << 26), /* 4 ports */
};
struct pdc_port_priv {
u8 *pkt;
dma_addr_t pkt_dma;
};
struct pdc_host_priv {
spinlock_t hard_reset_lock;
};
static int pdc_sata_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
static int pdc_sata_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
static int pdc_ata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
static int pdc_common_port_start(struct ata_port *ap);
static int pdc_sata_port_start(struct ata_port *ap);
static void pdc_qc_prep(struct ata_queued_cmd *qc);
static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
static int pdc_check_atapi_dma(struct ata_queued_cmd *qc);
static int pdc_old_sata_check_atapi_dma(struct ata_queued_cmd *qc);
static void pdc_irq_clear(struct ata_port *ap);
static unsigned int pdc_qc_issue(struct ata_queued_cmd *qc);
static void pdc_freeze(struct ata_port *ap);
static void pdc_sata_freeze(struct ata_port *ap);
static void pdc_thaw(struct ata_port *ap);
static void pdc_sata_thaw(struct ata_port *ap);
static int pdc_pata_softreset(struct ata_link *link, unsigned int *class,
unsigned long deadline);
static int pdc_sata_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline);
static void pdc_error_handler(struct ata_port *ap);
static void pdc_post_internal_cmd(struct ata_queued_cmd *qc);
static int pdc_pata_cable_detect(struct ata_port *ap);
static int pdc_sata_cable_detect(struct ata_port *ap);
static struct scsi_host_template pdc_ata_sht = {
ATA_BASE_SHT(DRV_NAME),
.sg_tablesize = PDC_MAX_PRD,
.dma_boundary = ATA_DMA_BOUNDARY,
};
static const struct ata_port_operations pdc_common_ops = {
.inherits = &ata_sff_port_ops,
.sff_tf_load = pdc_tf_load_mmio,
.sff_exec_command = pdc_exec_command_mmio,
.check_atapi_dma = pdc_check_atapi_dma,
.qc_prep = pdc_qc_prep,
.qc_issue = pdc_qc_issue,
.sff_irq_clear = pdc_irq_clear,
.lost_interrupt = ATA_OP_NULL,
.post_internal_cmd = pdc_post_internal_cmd,
.error_handler = pdc_error_handler,
};
static struct ata_port_operations pdc_sata_ops = {
.inherits = &pdc_common_ops,
.cable_detect = pdc_sata_cable_detect,
.freeze = pdc_sata_freeze,
.thaw = pdc_sata_thaw,
.scr_read = pdc_sata_scr_read,
.scr_write = pdc_sata_scr_write,
.port_start = pdc_sata_port_start,
.hardreset = pdc_sata_hardreset,
};
/* First-generation chips need a more restrictive ->check_atapi_dma op,
and ->freeze/thaw that ignore the hotplug controls. */
static struct ata_port_operations pdc_old_sata_ops = {
.inherits = &pdc_sata_ops,
.freeze = pdc_freeze,
.thaw = pdc_thaw,
.check_atapi_dma = pdc_old_sata_check_atapi_dma,
};
static struct ata_port_operations pdc_pata_ops = {
.inherits = &pdc_common_ops,
.cable_detect = pdc_pata_cable_detect,
.freeze = pdc_freeze,
.thaw = pdc_thaw,
.port_start = pdc_common_port_start,
.softreset = pdc_pata_softreset,
};
static const struct ata_port_info pdc_port_info[] = {
[board_2037x] =
{
.flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA |
PDC_FLAG_SATA_PATA,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
.port_ops = &pdc_old_sata_ops,
},
[board_2037x_pata] =
{
.flags = PDC_COMMON_FLAGS | ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
.port_ops = &pdc_pata_ops,
},
[board_20319] =
{
.flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA |
PDC_FLAG_4_PORTS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
.port_ops = &pdc_old_sata_ops,
},
[board_20619] =
{
.flags = PDC_COMMON_FLAGS | ATA_FLAG_SLAVE_POSS |
PDC_FLAG_4_PORTS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
.port_ops = &pdc_pata_ops,
},
[board_2057x] =
{
.flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA |
PDC_FLAG_GEN_II | PDC_FLAG_SATA_PATA,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
.port_ops = &pdc_sata_ops,
},
[board_2057x_pata] =
{
.flags = PDC_COMMON_FLAGS | ATA_FLAG_SLAVE_POSS |
PDC_FLAG_GEN_II,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
.port_ops = &pdc_pata_ops,
},
[board_40518] =
{
.flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA |
PDC_FLAG_GEN_II | PDC_FLAG_4_PORTS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
.port_ops = &pdc_sata_ops,
},
};
static const struct pci_device_id pdc_ata_pci_tbl[] = {
{ PCI_VDEVICE(PROMISE, 0x3371), board_2037x },
{ PCI_VDEVICE(PROMISE, 0x3373), board_2037x },
{ PCI_VDEVICE(PROMISE, 0x3375), board_2037x },
{ PCI_VDEVICE(PROMISE, 0x3376), board_2037x },
{ PCI_VDEVICE(PROMISE, 0x3570), board_2057x },
{ PCI_VDEVICE(PROMISE, 0x3571), board_2057x },
{ PCI_VDEVICE(PROMISE, 0x3574), board_2057x },
{ PCI_VDEVICE(PROMISE, 0x3577), board_2057x },
{ PCI_VDEVICE(PROMISE, 0x3d73), board_2057x },
{ PCI_VDEVICE(PROMISE, 0x3d75), board_2057x },
{ PCI_VDEVICE(PROMISE, 0x3318), board_20319 },
{ PCI_VDEVICE(PROMISE, 0x3319), board_20319 },
{ PCI_VDEVICE(PROMISE, 0x3515), board_40518 },
{ PCI_VDEVICE(PROMISE, 0x3519), board_40518 },
{ PCI_VDEVICE(PROMISE, 0x3d17), board_40518 },
{ PCI_VDEVICE(PROMISE, 0x3d18), board_40518 },
{ PCI_VDEVICE(PROMISE, 0x6629), board_20619 },
{ } /* terminate list */
};
static struct pci_driver pdc_ata_pci_driver = {
.name = DRV_NAME,
.id_table = pdc_ata_pci_tbl,
.probe = pdc_ata_init_one,
.remove = ata_pci_remove_one,
};
static int pdc_common_port_start(struct ata_port *ap)
{
struct device *dev = ap->host->dev;
struct pdc_port_priv *pp;
int rc;
/* we use the same prd table as bmdma, allocate it */
rc = ata_bmdma_port_start(ap);
if (rc)
return rc;
pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
if (!pp)
return -ENOMEM;
pp->pkt = dmam_alloc_coherent(dev, 128, &pp->pkt_dma, GFP_KERNEL);
if (!pp->pkt)
return -ENOMEM;
ap->private_data = pp;
return 0;
}
static int pdc_sata_port_start(struct ata_port *ap)
{
int rc;
rc = pdc_common_port_start(ap);
if (rc)
return rc;
/* fix up PHYMODE4 align timing */
if (ap->flags & PDC_FLAG_GEN_II) {
void __iomem *sata_mmio = ap->ioaddr.scr_addr;
unsigned int tmp;
tmp = readl(sata_mmio + PDC_PHYMODE4);
tmp = (tmp & ~3) | 1; /* set bits 1:0 = 0:1 */
writel(tmp, sata_mmio + PDC_PHYMODE4);
}
return 0;
}
static void pdc_fpdma_clear_interrupt_flag(struct ata_port *ap)
{
void __iomem *sata_mmio = ap->ioaddr.scr_addr;
u32 tmp;
tmp = readl(sata_mmio + PDC_FPDMA_CTLSTAT);
tmp |= PDC_FPDMA_CTLSTAT_DMASETUP_INT_FLAG;
tmp |= PDC_FPDMA_CTLSTAT_SETDB_INT_FLAG;
/* It's not allowed to write to the entire FPDMA_CTLSTAT register
when NCQ is running. So do a byte-sized write to bits 10 and 11. */
writeb(tmp >> 8, sata_mmio + PDC_FPDMA_CTLSTAT + 1);
readb(sata_mmio + PDC_FPDMA_CTLSTAT + 1); /* flush */
}
static void pdc_fpdma_reset(struct ata_port *ap)
{
void __iomem *sata_mmio = ap->ioaddr.scr_addr;
u8 tmp;
tmp = (u8)readl(sata_mmio + PDC_FPDMA_CTLSTAT);
tmp &= 0x7F;
tmp |= PDC_FPDMA_CTLSTAT_RESET;
writeb(tmp, sata_mmio + PDC_FPDMA_CTLSTAT);
readl(sata_mmio + PDC_FPDMA_CTLSTAT); /* flush */
udelay(100);
tmp &= ~PDC_FPDMA_CTLSTAT_RESET;
writeb(tmp, sata_mmio + PDC_FPDMA_CTLSTAT);
readl(sata_mmio + PDC_FPDMA_CTLSTAT); /* flush */
pdc_fpdma_clear_interrupt_flag(ap);
}
static void pdc_not_at_command_packet_phase(struct ata_port *ap)
{
void __iomem *sata_mmio = ap->ioaddr.scr_addr;
unsigned int i;
u32 tmp;
/* check not at ASIC packet command phase */
for (i = 0; i < 100; ++i) {
writel(0, sata_mmio + PDC_INTERNAL_DEBUG_1);
tmp = readl(sata_mmio + PDC_INTERNAL_DEBUG_2);
if ((tmp & 0xF) != 1)
break;
udelay(100);
}
}
static void pdc_clear_internal_debug_record_error_register(struct ata_port *ap)
{
void __iomem *sata_mmio = ap->ioaddr.scr_addr;
writel(0xffffffff, sata_mmio + PDC_SATA_ERROR);
writel(0xffff0000, sata_mmio + PDC_LINK_LAYER_ERRORS);
}
static void pdc_reset_port(struct ata_port *ap)
{
void __iomem *ata_ctlstat_mmio = ap->ioaddr.cmd_addr + PDC_CTLSTAT;
unsigned int i;
u32 tmp;
if (ap->flags & PDC_FLAG_GEN_II)
pdc_not_at_command_packet_phase(ap);
tmp = readl(ata_ctlstat_mmio);
tmp |= PDC_RESET;
writel(tmp, ata_ctlstat_mmio);
for (i = 11; i > 0; i--) {
tmp = readl(ata_ctlstat_mmio);
if (tmp & PDC_RESET)
break;
udelay(100);
tmp |= PDC_RESET;
writel(tmp, ata_ctlstat_mmio);
}
tmp &= ~PDC_RESET;
writel(tmp, ata_ctlstat_mmio);
readl(ata_ctlstat_mmio); /* flush */
if (sata_scr_valid(&ap->link) && (ap->flags & PDC_FLAG_GEN_II)) {
pdc_fpdma_reset(ap);
pdc_clear_internal_debug_record_error_register(ap);
}
}
static int pdc_pata_cable_detect(struct ata_port *ap)
{
u8 tmp;
void __iomem *ata_mmio = ap->ioaddr.cmd_addr;
tmp = readb(ata_mmio + PDC_CTLSTAT + 3);
if (tmp & 0x01)
return ATA_CBL_PATA40;
return ATA_CBL_PATA80;
}
static int pdc_sata_cable_detect(struct ata_port *ap)
{
return ATA_CBL_SATA;
}
static int pdc_sata_scr_read(struct ata_link *link,
unsigned int sc_reg, u32 *val)
{
if (sc_reg > SCR_CONTROL)
return -EINVAL;
*val = readl(link->ap->ioaddr.scr_addr + (sc_reg * 4));
return 0;
}
static int pdc_sata_scr_write(struct ata_link *link,
unsigned int sc_reg, u32 val)
{
if (sc_reg > SCR_CONTROL)
return -EINVAL;
writel(val, link->ap->ioaddr.scr_addr + (sc_reg * 4));
return 0;
}
static void pdc_atapi_pkt(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
dma_addr_t sg_table = ap->bmdma_prd_dma;
unsigned int cdb_len = qc->dev->cdb_len;
u8 *cdb = qc->cdb;
struct pdc_port_priv *pp = ap->private_data;
u8 *buf = pp->pkt;
__le32 *buf32 = (__le32 *) buf;
unsigned int dev_sel, feature;
/* set control bits (byte 0), zero delay seq id (byte 3),
* and seq id (byte 2)
*/
switch (qc->tf.protocol) {
case ATAPI_PROT_DMA:
if (!(qc->tf.flags & ATA_TFLAG_WRITE))
buf32[0] = cpu_to_le32(PDC_PKT_READ);
else
buf32[0] = 0;
break;
case ATAPI_PROT_NODATA:
buf32[0] = cpu_to_le32(PDC_PKT_NODATA);
break;
default:
BUG();
break;
}
buf32[1] = cpu_to_le32(sg_table); /* S/G table addr */
buf32[2] = 0; /* no next-packet */
/* select drive */
if (sata_scr_valid(&ap->link))
dev_sel = PDC_DEVICE_SATA;
else
dev_sel = qc->tf.device;
buf[12] = (1 << 5) | ATA_REG_DEVICE;
buf[13] = dev_sel;
buf[14] = (1 << 5) | ATA_REG_DEVICE | PDC_PKT_CLEAR_BSY;
buf[15] = dev_sel; /* once more, waiting for BSY to clear */
buf[16] = (1 << 5) | ATA_REG_NSECT;
buf[17] = qc->tf.nsect;
buf[18] = (1 << 5) | ATA_REG_LBAL;
buf[19] = qc->tf.lbal;
/* set feature and byte counter registers */
if (qc->tf.protocol != ATAPI_PROT_DMA)
feature = PDC_FEATURE_ATAPI_PIO;
else
feature = PDC_FEATURE_ATAPI_DMA;
buf[20] = (1 << 5) | ATA_REG_FEATURE;
buf[21] = feature;
buf[22] = (1 << 5) | ATA_REG_BYTEL;
buf[23] = qc->tf.lbam;
buf[24] = (1 << 5) | ATA_REG_BYTEH;
buf[25] = qc->tf.lbah;
/* send ATAPI packet command 0xA0 */
buf[26] = (1 << 5) | ATA_REG_CMD;
buf[27] = qc->tf.command;
/* select drive and check DRQ */
buf[28] = (1 << 5) | ATA_REG_DEVICE | PDC_PKT_WAIT_DRDY;
buf[29] = dev_sel;
/* we can represent cdb lengths 2/4/6/8/10/12/14/16 */
BUG_ON(cdb_len & ~0x1E);
/* append the CDB as the final part */
buf[30] = (((cdb_len >> 1) & 7) << 5) | ATA_REG_DATA | PDC_LAST_REG;
memcpy(buf+31, cdb, cdb_len);
}
/**
* pdc_fill_sg - Fill PCI IDE PRD table
* @qc: Metadata associated with taskfile to be transferred
*
* Fill PCI IDE PRD (scatter-gather) table with segments
* associated with the current disk command.
* Make sure hardware does not choke on it.
*
* LOCKING:
* spin_lock_irqsave(host lock)
*
*/
static void pdc_fill_sg(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct ata_bmdma_prd *prd = ap->bmdma_prd;
struct scatterlist *sg;
const u32 SG_COUNT_ASIC_BUG = 41*4;
unsigned int si, idx;
u32 len;
if (!(qc->flags & ATA_QCFLAG_DMAMAP))
return;
idx = 0;
for_each_sg(qc->sg, sg, qc->n_elem, si) {
u32 addr, offset;
u32 sg_len;
/* determine if physical DMA addr spans 64K boundary.
* Note h/w doesn't support 64-bit, so we unconditionally
* truncate dma_addr_t to u32.
*/
addr = (u32) sg_dma_address(sg);
sg_len = sg_dma_len(sg);
while (sg_len) {
offset = addr & 0xffff;
len = sg_len;
if ((offset + sg_len) > 0x10000)
len = 0x10000 - offset;
prd[idx].addr = cpu_to_le32(addr);
prd[idx].flags_len = cpu_to_le32(len & 0xffff);
VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
idx++;
sg_len -= len;
addr += len;
}
}
len = le32_to_cpu(prd[idx - 1].flags_len);
if (len > SG_COUNT_ASIC_BUG) {
u32 addr;
VPRINTK("Splitting last PRD.\n");
addr = le32_to_cpu(prd[idx - 1].addr);
prd[idx - 1].flags_len = cpu_to_le32(len - SG_COUNT_ASIC_BUG);
VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx - 1, addr, SG_COUNT_ASIC_BUG);
addr = addr + len - SG_COUNT_ASIC_BUG;
len = SG_COUNT_ASIC_BUG;
prd[idx].addr = cpu_to_le32(addr);
prd[idx].flags_len = cpu_to_le32(len);
VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
idx++;
}
prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
}
static void pdc_qc_prep(struct ata_queued_cmd *qc)
{
struct pdc_port_priv *pp = qc->ap->private_data;
unsigned int i;
VPRINTK("ENTER\n");
switch (qc->tf.protocol) {
case ATA_PROT_DMA:
pdc_fill_sg(qc);
/*FALLTHROUGH*/
case ATA_PROT_NODATA:
i = pdc_pkt_header(&qc->tf, qc->ap->bmdma_prd_dma,
qc->dev->devno, pp->pkt);
if (qc->tf.flags & ATA_TFLAG_LBA48)
i = pdc_prep_lba48(&qc->tf, pp->pkt, i);
else
i = pdc_prep_lba28(&qc->tf, pp->pkt, i);
pdc_pkt_footer(&qc->tf, pp->pkt, i);
break;
case ATAPI_PROT_PIO:
pdc_fill_sg(qc);
break;
case ATAPI_PROT_DMA:
pdc_fill_sg(qc);
/*FALLTHROUGH*/
case ATAPI_PROT_NODATA:
pdc_atapi_pkt(qc);
break;
default:
break;
}
}
static int pdc_is_sataii_tx4(unsigned long flags)
{
const unsigned long mask = PDC_FLAG_GEN_II | PDC_FLAG_4_PORTS;
return (flags & mask) == mask;
}
static unsigned int pdc_port_no_to_ata_no(unsigned int port_no,
int is_sataii_tx4)
{
static const unsigned char sataii_tx4_port_remap[4] = { 3, 1, 0, 2};
return is_sataii_tx4 ? sataii_tx4_port_remap[port_no] : port_no;
}
static unsigned int pdc_sata_nr_ports(const struct ata_port *ap)
{
return (ap->flags & PDC_FLAG_4_PORTS) ? 4 : 2;
}
static unsigned int pdc_sata_ata_port_to_ata_no(const struct ata_port *ap)
{
const struct ata_host *host = ap->host;
unsigned int nr_ports = pdc_sata_nr_ports(ap);
unsigned int i;
for (i = 0; i < nr_ports && host->ports[i] != ap; ++i)
;
BUG_ON(i >= nr_ports);
return pdc_port_no_to_ata_no(i, pdc_is_sataii_tx4(ap->flags));
}
static void pdc_freeze(struct ata_port *ap)
{
void __iomem *ata_mmio = ap->ioaddr.cmd_addr;
u32 tmp;
tmp = readl(ata_mmio + PDC_CTLSTAT);
tmp |= PDC_IRQ_DISABLE;
tmp &= ~PDC_DMA_ENABLE;
writel(tmp, ata_mmio + PDC_CTLSTAT);
readl(ata_mmio + PDC_CTLSTAT); /* flush */
}
static void pdc_sata_freeze(struct ata_port *ap)
{
struct ata_host *host = ap->host;
void __iomem *host_mmio = host->iomap[PDC_MMIO_BAR];
unsigned int hotplug_offset = PDC2_SATA_PLUG_CSR;
unsigned int ata_no = pdc_sata_ata_port_to_ata_no(ap);
u32 hotplug_status;
/* Disable hotplug events on this port.
*
* Locking:
* 1) hotplug register accesses must be serialised via host->lock
* 2) ap->lock == &ap->host->lock
* 3) ->freeze() and ->thaw() are called with ap->lock held
*/
hotplug_status = readl(host_mmio + hotplug_offset);
hotplug_status |= 0x11 << (ata_no + 16);
writel(hotplug_status, host_mmio + hotplug_offset);
readl(host_mmio + hotplug_offset); /* flush */
pdc_freeze(ap);
}
static void pdc_thaw(struct ata_port *ap)
{
void __iomem *ata_mmio = ap->ioaddr.cmd_addr;
u32 tmp;
/* clear IRQ */
readl(ata_mmio + PDC_COMMAND);
/* turn IRQ back on */
tmp = readl(ata_mmio + PDC_CTLSTAT);
tmp &= ~PDC_IRQ_DISABLE;
writel(tmp, ata_mmio + PDC_CTLSTAT);
readl(ata_mmio + PDC_CTLSTAT); /* flush */
}
static void pdc_sata_thaw(struct ata_port *ap)
{
struct ata_host *host = ap->host;
void __iomem *host_mmio = host->iomap[PDC_MMIO_BAR];
unsigned int hotplug_offset = PDC2_SATA_PLUG_CSR;
unsigned int ata_no = pdc_sata_ata_port_to_ata_no(ap);
u32 hotplug_status;
pdc_thaw(ap);
/* Enable hotplug events on this port.
* Locking: see pdc_sata_freeze().
*/
hotplug_status = readl(host_mmio + hotplug_offset);
hotplug_status |= 0x11 << ata_no;
hotplug_status &= ~(0x11 << (ata_no + 16));
writel(hotplug_status, host_mmio + hotplug_offset);
readl(host_mmio + hotplug_offset); /* flush */
}
static int pdc_pata_softreset(struct ata_link *link, unsigned int *class,
unsigned long deadline)
{
pdc_reset_port(link->ap);
return ata_sff_softreset(link, class, deadline);
}
static unsigned int pdc_ata_port_to_ata_no(const struct ata_port *ap)
{
void __iomem *ata_mmio = ap->ioaddr.cmd_addr;
void __iomem *host_mmio = ap->host->iomap[PDC_MMIO_BAR];
/* ata_mmio == host_mmio + 0x200 + ata_no * 0x80 */
return (ata_mmio - host_mmio - 0x200) / 0x80;
}
static void pdc_hard_reset_port(struct ata_port *ap)
{
void __iomem *host_mmio = ap->host->iomap[PDC_MMIO_BAR];
void __iomem *pcictl_b1_mmio = host_mmio + PDC_PCI_CTL + 1;
unsigned int ata_no = pdc_ata_port_to_ata_no(ap);
struct pdc_host_priv *hpriv = ap->host->private_data;
u8 tmp;
spin_lock(&hpriv->hard_reset_lock);
tmp = readb(pcictl_b1_mmio);
tmp &= ~(0x10 << ata_no);
writeb(tmp, pcictl_b1_mmio);
readb(pcictl_b1_mmio); /* flush */
udelay(100);
tmp |= (0x10 << ata_no);
writeb(tmp, pcictl_b1_mmio);
readb(pcictl_b1_mmio); /* flush */
spin_unlock(&hpriv->hard_reset_lock);
}
static int pdc_sata_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline)
{
if (link->ap->flags & PDC_FLAG_GEN_II)
pdc_not_at_command_packet_phase(link->ap);
/* hotplug IRQs should have been masked by pdc_sata_freeze() */
pdc_hard_reset_port(link->ap);
pdc_reset_port(link->ap);
/* sata_promise can't reliably acquire the first D2H Reg FIS
* after hardreset. Do non-waiting hardreset and request
* follow-up SRST.
*/
return sata_std_hardreset(link, class, deadline);
}
static void pdc_error_handler(struct ata_port *ap)
{
if (!(ap->pflags & ATA_PFLAG_FROZEN))
pdc_reset_port(ap);
ata_sff_error_handler(ap);
}
static void pdc_post_internal_cmd(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
/* make DMA engine forget about the failed command */
if (qc->flags & ATA_QCFLAG_FAILED)
pdc_reset_port(ap);
}
static void pdc_error_intr(struct ata_port *ap, struct ata_queued_cmd *qc,
u32 port_status, u32 err_mask)
{
struct ata_eh_info *ehi = &ap->link.eh_info;
unsigned int ac_err_mask = 0;
ata_ehi_clear_desc(ehi);
ata_ehi_push_desc(ehi, "port_status 0x%08x", port_status);
port_status &= err_mask;
if (port_status & PDC_DRIVE_ERR)
ac_err_mask |= AC_ERR_DEV;
if (port_status & (PDC_OVERRUN_ERR | PDC_UNDERRUN_ERR))
ac_err_mask |= AC_ERR_OTHER;
if (port_status & (PDC2_ATA_HBA_ERR | PDC2_ATA_DMA_CNT_ERR))
ac_err_mask |= AC_ERR_ATA_BUS;
if (port_status & (PDC_PH_ERR | PDC_SH_ERR | PDC_DH_ERR | PDC2_HTO_ERR
| PDC_PCI_SYS_ERR | PDC1_PCI_PARITY_ERR))
ac_err_mask |= AC_ERR_HOST_BUS;
if (sata_scr_valid(&ap->link)) {
u32 serror;
pdc_sata_scr_read(&ap->link, SCR_ERROR, &serror);
ehi->serror |= serror;
}
qc->err_mask |= ac_err_mask;
pdc_reset_port(ap);
ata_port_abort(ap);
}
static unsigned int pdc_host_intr(struct ata_port *ap,
struct ata_queued_cmd *qc)
{
unsigned int handled = 0;
void __iomem *ata_mmio = ap->ioaddr.cmd_addr;
u32 port_status, err_mask;
err_mask = PDC_ERR_MASK;
if (ap->flags & PDC_FLAG_GEN_II)
err_mask &= ~PDC1_ERR_MASK;
else
err_mask &= ~PDC2_ERR_MASK;
port_status = readl(ata_mmio + PDC_GLOBAL_CTL);
if (unlikely(port_status & err_mask)) {
pdc_error_intr(ap, qc, port_status, err_mask);
return 1;
}
switch (qc->tf.protocol) {
case ATA_PROT_DMA:
case ATA_PROT_NODATA:
case ATAPI_PROT_DMA:
case ATAPI_PROT_NODATA:
qc->err_mask |= ac_err_mask(ata_wait_idle(ap));
ata_qc_complete(qc);
handled = 1;
break;
default:
ap->stats.idle_irq++;
break;
}
return handled;
}
static void pdc_irq_clear(struct ata_port *ap)
{
void __iomem *ata_mmio = ap->ioaddr.cmd_addr;
readl(ata_mmio + PDC_COMMAND);
}
static irqreturn_t pdc_interrupt(int irq, void *dev_instance)
{
struct ata_host *host = dev_instance;
struct ata_port *ap;
u32 mask = 0;
unsigned int i, tmp;
unsigned int handled = 0;
void __iomem *host_mmio;
unsigned int hotplug_offset, ata_no;
u32 hotplug_status;
int is_sataii_tx4;
VPRINTK("ENTER\n");
if (!host || !host->iomap[PDC_MMIO_BAR]) {
VPRINTK("QUICK EXIT\n");
return IRQ_NONE;
}
host_mmio = host->iomap[PDC_MMIO_BAR];
spin_lock(&host->lock);
/* read and clear hotplug flags for all ports */
if (host->ports[0]->flags & PDC_FLAG_GEN_II) {
hotplug_offset = PDC2_SATA_PLUG_CSR;
hotplug_status = readl(host_mmio + hotplug_offset);
if (hotplug_status & 0xff)
writel(hotplug_status | 0xff, host_mmio + hotplug_offset);
hotplug_status &= 0xff; /* clear uninteresting bits */
} else
hotplug_status = 0;
/* reading should also clear interrupts */
mask = readl(host_mmio + PDC_INT_SEQMASK);
if (mask == 0xffffffff && hotplug_status == 0) {
VPRINTK("QUICK EXIT 2\n");
goto done_irq;
}
mask &= 0xffff; /* only 16 SEQIDs possible */
if (mask == 0 && hotplug_status == 0) {
VPRINTK("QUICK EXIT 3\n");
goto done_irq;
}
writel(mask, host_mmio + PDC_INT_SEQMASK);
is_sataii_tx4 = pdc_is_sataii_tx4(host->ports[0]->flags);
for (i = 0; i < host->n_ports; i++) {
VPRINTK("port %u\n", i);
ap = host->ports[i];
/* check for a plug or unplug event */
ata_no = pdc_port_no_to_ata_no(i, is_sataii_tx4);
tmp = hotplug_status & (0x11 << ata_no);
if (tmp) {
struct ata_eh_info *ehi = &ap->link.eh_info;
ata_ehi_clear_desc(ehi);
ata_ehi_hotplugged(ehi);
ata_ehi_push_desc(ehi, "hotplug_status %#x", tmp);
ata_port_freeze(ap);
++handled;
continue;
}
/* check for a packet interrupt */
tmp = mask & (1 << (i + 1));
if (tmp) {
struct ata_queued_cmd *qc;
qc = ata_qc_from_tag(ap, ap->link.active_tag);
if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
handled += pdc_host_intr(ap, qc);
}
}
VPRINTK("EXIT\n");
done_irq:
spin_unlock(&host->lock);
return IRQ_RETVAL(handled);
}
static void pdc_packet_start(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct pdc_port_priv *pp = ap->private_data;
void __iomem *host_mmio = ap->host->iomap[PDC_MMIO_BAR];
void __iomem *ata_mmio = ap->ioaddr.cmd_addr;
unsigned int port_no = ap->port_no;
u8 seq = (u8) (port_no + 1);
VPRINTK("ENTER, ap %p\n", ap);
writel(0x00000001, host_mmio + (seq * 4));
readl(host_mmio + (seq * 4)); /* flush */
pp->pkt[2] = seq;
wmb(); /* flush PRD, pkt writes */
writel(pp->pkt_dma, ata_mmio + PDC_PKT_SUBMIT);
readl(ata_mmio + PDC_PKT_SUBMIT); /* flush */
}
static unsigned int pdc_qc_issue(struct ata_queued_cmd *qc)
{
switch (qc->tf.protocol) {
case ATAPI_PROT_NODATA:
if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
break;
/*FALLTHROUGH*/
case ATA_PROT_NODATA:
if (qc->tf.flags & ATA_TFLAG_POLLING)
break;
/*FALLTHROUGH*/
case ATAPI_PROT_DMA:
case ATA_PROT_DMA:
pdc_packet_start(qc);
return 0;
default:
break;
}
return ata_sff_qc_issue(qc);
}
static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
{
WARN_ON(tf->protocol == ATA_PROT_DMA || tf->protocol == ATAPI_PROT_DMA);
ata_sff_tf_load(ap, tf);
}
static void pdc_exec_command_mmio(struct ata_port *ap,
const struct ata_taskfile *tf)
{
WARN_ON(tf->protocol == ATA_PROT_DMA || tf->protocol == ATAPI_PROT_DMA);
ata_sff_exec_command(ap, tf);
}
static int pdc_check_atapi_dma(struct ata_queued_cmd *qc)
{
u8 *scsicmd = qc->scsicmd->cmnd;
int pio = 1; /* atapi dma off by default */
/* Whitelist commands that may use DMA. */
switch (scsicmd[0]) {
case WRITE_12:
case WRITE_10:
case WRITE_6:
case READ_12:
case READ_10:
case READ_6:
case 0xad: /* READ_DVD_STRUCTURE */
case 0xbe: /* READ_CD */
pio = 0;
}
/* -45150 (FFFF4FA2) to -1 (FFFFFFFF) shall use PIO mode */
if (scsicmd[0] == WRITE_10) {
unsigned int lba =
(scsicmd[2] << 24) |
(scsicmd[3] << 16) |
(scsicmd[4] << 8) |
scsicmd[5];
if (lba >= 0xFFFF4FA2)
pio = 1;
}
return pio;
}
static int pdc_old_sata_check_atapi_dma(struct ata_queued_cmd *qc)
{
/* First generation chips cannot use ATAPI DMA on SATA ports */
return 1;
}
static void pdc_ata_setup_port(struct ata_port *ap,
void __iomem *base, void __iomem *scr_addr)
{
ap->ioaddr.cmd_addr = base;
ap->ioaddr.data_addr = base;
ap->ioaddr.feature_addr =
ap->ioaddr.error_addr = base + 0x4;
ap->ioaddr.nsect_addr = base + 0x8;
ap->ioaddr.lbal_addr = base + 0xc;
ap->ioaddr.lbam_addr = base + 0x10;
ap->ioaddr.lbah_addr = base + 0x14;
ap->ioaddr.device_addr = base + 0x18;
ap->ioaddr.command_addr =
ap->ioaddr.status_addr = base + 0x1c;
ap->ioaddr.altstatus_addr =
ap->ioaddr.ctl_addr = base + 0x38;
ap->ioaddr.scr_addr = scr_addr;
}
static void pdc_host_init(struct ata_host *host)
{
void __iomem *host_mmio = host->iomap[PDC_MMIO_BAR];
int is_gen2 = host->ports[0]->flags & PDC_FLAG_GEN_II;
int hotplug_offset;
u32 tmp;
if (is_gen2)
hotplug_offset = PDC2_SATA_PLUG_CSR;
else
hotplug_offset = PDC_SATA_PLUG_CSR;
/*
* Except for the hotplug stuff, this is voodoo from the
* Promise driver. Label this entire section
* "TODO: figure out why we do this"
*/
/* enable BMR_BURST, maybe change FIFO_SHD to 8 dwords */
tmp = readl(host_mmio + PDC_FLASH_CTL);
tmp |= 0x02000; /* bit 13 (enable bmr burst) */
if (!is_gen2)
tmp |= 0x10000; /* bit 16 (fifo threshold at 8 dw) */
writel(tmp, host_mmio + PDC_FLASH_CTL);
/* clear plug/unplug flags for all ports */
tmp = readl(host_mmio + hotplug_offset);
writel(tmp | 0xff, host_mmio + hotplug_offset);
tmp = readl(host_mmio + hotplug_offset);
if (is_gen2) /* unmask plug/unplug ints */
writel(tmp & ~0xff0000, host_mmio + hotplug_offset);
else /* mask plug/unplug ints */
writel(tmp | 0xff0000, host_mmio + hotplug_offset);
/* don't initialise TBG or SLEW on 2nd generation chips */
if (is_gen2)
return;
/* reduce TBG clock to 133 Mhz. */
tmp = readl(host_mmio + PDC_TBG_MODE);
tmp &= ~0x30000; /* clear bit 17, 16*/
tmp |= 0x10000; /* set bit 17:16 = 0:1 */
writel(tmp, host_mmio + PDC_TBG_MODE);
readl(host_mmio + PDC_TBG_MODE); /* flush */
msleep(10);
/* adjust slew rate control register. */
tmp = readl(host_mmio + PDC_SLEW_CTL);
tmp &= 0xFFFFF03F; /* clear bit 11 ~ 6 */
tmp |= 0x00000900; /* set bit 11-9 = 100b , bit 8-6 = 100 */
writel(tmp, host_mmio + PDC_SLEW_CTL);
}
static int pdc_ata_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
const struct ata_port_info *pi = &pdc_port_info[ent->driver_data];
const struct ata_port_info *ppi[PDC_MAX_PORTS];
struct ata_host *host;
struct pdc_host_priv *hpriv;
void __iomem *host_mmio;
int n_ports, i, rc;
int is_sataii_tx4;
ata_print_version_once(&pdev->dev, DRV_VERSION);
/* enable and acquire resources */
rc = pcim_enable_device(pdev);
if (rc)
return rc;
rc = pcim_iomap_regions(pdev, 1 << PDC_MMIO_BAR, DRV_NAME);
if (rc == -EBUSY)
pcim_pin_device(pdev);
if (rc)
return rc;
host_mmio = pcim_iomap_table(pdev)[PDC_MMIO_BAR];
/* determine port configuration and setup host */
n_ports = 2;
if (pi->flags & PDC_FLAG_4_PORTS)
n_ports = 4;
for (i = 0; i < n_ports; i++)
ppi[i] = pi;
if (pi->flags & PDC_FLAG_SATA_PATA) {
u8 tmp = readb(host_mmio + PDC_FLASH_CTL + 1);
if (!(tmp & 0x80))
ppi[n_ports++] = pi + 1;
}
host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
if (!host) {
dev_err(&pdev->dev, "failed to allocate host\n");
return -ENOMEM;
}
hpriv = devm_kzalloc(&pdev->dev, sizeof *hpriv, GFP_KERNEL);
if (!hpriv)
return -ENOMEM;
spin_lock_init(&hpriv->hard_reset_lock);
host->private_data = hpriv;
host->iomap = pcim_iomap_table(pdev);
is_sataii_tx4 = pdc_is_sataii_tx4(pi->flags);
for (i = 0; i < host->n_ports; i++) {
struct ata_port *ap = host->ports[i];
unsigned int ata_no = pdc_port_no_to_ata_no(i, is_sataii_tx4);
unsigned int ata_offset = 0x200 + ata_no * 0x80;
unsigned int scr_offset = 0x400 + ata_no * 0x100;
pdc_ata_setup_port(ap, host_mmio + ata_offset, host_mmio + scr_offset);
ata_port_pbar_desc(ap, PDC_MMIO_BAR, -1, "mmio");
ata_port_pbar_desc(ap, PDC_MMIO_BAR, ata_offset, "ata");
}
/* initialize adapter */
pdc_host_init(host);
rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK);
if (rc)
return rc;
rc = dma_set_coherent_mask(&pdev->dev, ATA_DMA_MASK);
if (rc)
return rc;
/* start host, request IRQ and attach */
pci_set_master(pdev);
return ata_host_activate(host, pdev->irq, pdc_interrupt, IRQF_SHARED,
&pdc_ata_sht);
}
module_pci_driver(pdc_ata_pci_driver);
MODULE_AUTHOR("Jeff Garzik");
MODULE_DESCRIPTION("Promise ATA TX2/TX4/TX4000 low-level driver");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, pdc_ata_pci_tbl);
MODULE_VERSION(DRV_VERSION);
| gpl-2.0 |
sayeed99/flareM_old | drivers/usb/musb/musb_gadget_ep0.c | 3158 | 27724 | /*
* MUSB OTG peripheral driver ep0 handling
*
* Copyright 2005 Mentor Graphics Corporation
* Copyright (C) 2005-2006 by Texas Instruments
* Copyright (C) 2006-2007 Nokia Corporation
* Copyright (C) 2008-2009 MontaVista Software, Inc. <source@mvista.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA
*
* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
* NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/timer.h>
#include <linux/spinlock.h>
#include <linux/device.h>
#include <linux/interrupt.h>
#include "musb_core.h"
/* ep0 is always musb->endpoints[0].ep_in */
#define next_ep0_request(musb) next_in_request(&(musb)->endpoints[0])
/*
* locking note: we use only the controller lock, for simpler correctness.
* It's always held with IRQs blocked.
*
* It protects the ep0 request queue as well as ep0_state, not just the
* controller and indexed registers. And that lock stays held unless it
* needs to be dropped to allow reentering this driver ... like upcalls to
* the gadget driver, or adjusting endpoint halt status.
*/
static char *decode_ep0stage(u8 stage)
{
switch (stage) {
case MUSB_EP0_STAGE_IDLE: return "idle";
case MUSB_EP0_STAGE_SETUP: return "setup";
case MUSB_EP0_STAGE_TX: return "in";
case MUSB_EP0_STAGE_RX: return "out";
case MUSB_EP0_STAGE_ACKWAIT: return "wait";
case MUSB_EP0_STAGE_STATUSIN: return "in/status";
case MUSB_EP0_STAGE_STATUSOUT: return "out/status";
default: return "?";
}
}
/* handle a standard GET_STATUS request
* Context: caller holds controller lock
*/
static int service_tx_status_request(
struct musb *musb,
const struct usb_ctrlrequest *ctrlrequest)
{
void __iomem *mbase = musb->mregs;
int handled = 1;
u8 result[2], epnum = 0;
const u8 recip = ctrlrequest->bRequestType & USB_RECIP_MASK;
result[1] = 0;
switch (recip) {
case USB_RECIP_DEVICE:
result[0] = musb->is_self_powered << USB_DEVICE_SELF_POWERED;
result[0] |= musb->may_wakeup << USB_DEVICE_REMOTE_WAKEUP;
if (musb->g.is_otg) {
result[0] |= musb->g.b_hnp_enable
<< USB_DEVICE_B_HNP_ENABLE;
result[0] |= musb->g.a_alt_hnp_support
<< USB_DEVICE_A_ALT_HNP_SUPPORT;
result[0] |= musb->g.a_hnp_support
<< USB_DEVICE_A_HNP_SUPPORT;
}
break;
case USB_RECIP_INTERFACE:
result[0] = 0;
break;
case USB_RECIP_ENDPOINT: {
int is_in;
struct musb_ep *ep;
u16 tmp;
void __iomem *regs;
epnum = (u8) ctrlrequest->wIndex;
if (!epnum) {
result[0] = 0;
break;
}
is_in = epnum & USB_DIR_IN;
if (is_in) {
epnum &= 0x0f;
ep = &musb->endpoints[epnum].ep_in;
} else {
ep = &musb->endpoints[epnum].ep_out;
}
regs = musb->endpoints[epnum].regs;
if (epnum >= MUSB_C_NUM_EPS || !ep->desc) {
handled = -EINVAL;
break;
}
musb_ep_select(mbase, epnum);
if (is_in)
tmp = musb_readw(regs, MUSB_TXCSR)
& MUSB_TXCSR_P_SENDSTALL;
else
tmp = musb_readw(regs, MUSB_RXCSR)
& MUSB_RXCSR_P_SENDSTALL;
musb_ep_select(mbase, 0);
result[0] = tmp ? 1 : 0;
} break;
default:
/* class, vendor, etc ... delegate */
handled = 0;
break;
}
/* fill up the fifo; caller updates csr0 */
if (handled > 0) {
u16 len = le16_to_cpu(ctrlrequest->wLength);
if (len > 2)
len = 2;
musb_write_fifo(&musb->endpoints[0], len, result);
}
return handled;
}
/*
* handle a control-IN request, the end0 buffer contains the current request
* that is supposed to be a standard control request. Assumes the fifo to
* be at least 2 bytes long.
*
* @return 0 if the request was NOT HANDLED,
* < 0 when error
* > 0 when the request is processed
*
* Context: caller holds controller lock
*/
static int
service_in_request(struct musb *musb, const struct usb_ctrlrequest *ctrlrequest)
{
int handled = 0; /* not handled */
if ((ctrlrequest->bRequestType & USB_TYPE_MASK)
== USB_TYPE_STANDARD) {
switch (ctrlrequest->bRequest) {
case USB_REQ_GET_STATUS:
handled = service_tx_status_request(musb,
ctrlrequest);
break;
/* case USB_REQ_SYNC_FRAME: */
default:
break;
}
}
return handled;
}
/*
* Context: caller holds controller lock
*/
static void musb_g_ep0_giveback(struct musb *musb, struct usb_request *req)
{
musb_g_giveback(&musb->endpoints[0].ep_in, req, 0);
}
/*
* Tries to start B-device HNP negotiation if enabled via sysfs
*/
static inline void musb_try_b_hnp_enable(struct musb *musb)
{
void __iomem *mbase = musb->mregs;
u8 devctl;
dev_dbg(musb->controller, "HNP: Setting HR\n");
devctl = musb_readb(mbase, MUSB_DEVCTL);
musb_writeb(mbase, MUSB_DEVCTL, devctl | MUSB_DEVCTL_HR);
}
/*
* Handle all control requests with no DATA stage, including standard
* requests such as:
* USB_REQ_SET_CONFIGURATION, USB_REQ_SET_INTERFACE, unrecognized
* always delegated to the gadget driver
* USB_REQ_SET_ADDRESS, USB_REQ_CLEAR_FEATURE, USB_REQ_SET_FEATURE
* always handled here, except for class/vendor/... features
*
* Context: caller holds controller lock
*/
static int
service_zero_data_request(struct musb *musb,
struct usb_ctrlrequest *ctrlrequest)
__releases(musb->lock)
__acquires(musb->lock)
{
int handled = -EINVAL;
void __iomem *mbase = musb->mregs;
const u8 recip = ctrlrequest->bRequestType & USB_RECIP_MASK;
/* the gadget driver handles everything except what we MUST handle */
if ((ctrlrequest->bRequestType & USB_TYPE_MASK)
== USB_TYPE_STANDARD) {
switch (ctrlrequest->bRequest) {
case USB_REQ_SET_ADDRESS:
/* change it after the status stage */
musb->set_address = true;
musb->address = (u8) (ctrlrequest->wValue & 0x7f);
handled = 1;
break;
case USB_REQ_CLEAR_FEATURE:
switch (recip) {
case USB_RECIP_DEVICE:
if (ctrlrequest->wValue
!= USB_DEVICE_REMOTE_WAKEUP)
break;
musb->may_wakeup = 0;
handled = 1;
break;
case USB_RECIP_INTERFACE:
break;
case USB_RECIP_ENDPOINT:{
const u8 epnum =
ctrlrequest->wIndex & 0x0f;
struct musb_ep *musb_ep;
struct musb_hw_ep *ep;
struct musb_request *request;
void __iomem *regs;
int is_in;
u16 csr;
if (epnum == 0 || epnum >= MUSB_C_NUM_EPS ||
ctrlrequest->wValue != USB_ENDPOINT_HALT)
break;
ep = musb->endpoints + epnum;
regs = ep->regs;
is_in = ctrlrequest->wIndex & USB_DIR_IN;
if (is_in)
musb_ep = &ep->ep_in;
else
musb_ep = &ep->ep_out;
if (!musb_ep->desc)
break;
handled = 1;
/* Ignore request if endpoint is wedged */
if (musb_ep->wedged)
break;
musb_ep_select(mbase, epnum);
if (is_in) {
csr = musb_readw(regs, MUSB_TXCSR);
csr |= MUSB_TXCSR_CLRDATATOG |
MUSB_TXCSR_P_WZC_BITS;
csr &= ~(MUSB_TXCSR_P_SENDSTALL |
MUSB_TXCSR_P_SENTSTALL |
MUSB_TXCSR_TXPKTRDY);
musb_writew(regs, MUSB_TXCSR, csr);
} else {
csr = musb_readw(regs, MUSB_RXCSR);
csr |= MUSB_RXCSR_CLRDATATOG |
MUSB_RXCSR_P_WZC_BITS;
csr &= ~(MUSB_RXCSR_P_SENDSTALL |
MUSB_RXCSR_P_SENTSTALL);
musb_writew(regs, MUSB_RXCSR, csr);
}
/* Maybe start the first request in the queue */
request = next_request(musb_ep);
if (!musb_ep->busy && request) {
dev_dbg(musb->controller, "restarting the request\n");
musb_ep_restart(musb, request);
}
/* select ep0 again */
musb_ep_select(mbase, 0);
} break;
default:
/* class, vendor, etc ... delegate */
handled = 0;
break;
}
break;
case USB_REQ_SET_FEATURE:
switch (recip) {
case USB_RECIP_DEVICE:
handled = 1;
switch (ctrlrequest->wValue) {
case USB_DEVICE_REMOTE_WAKEUP:
musb->may_wakeup = 1;
break;
case USB_DEVICE_TEST_MODE:
if (musb->g.speed != USB_SPEED_HIGH)
goto stall;
if (ctrlrequest->wIndex & 0xff)
goto stall;
switch (ctrlrequest->wIndex >> 8) {
case 1:
pr_debug("TEST_J\n");
/* TEST_J */
musb->test_mode_nr =
MUSB_TEST_J;
break;
case 2:
/* TEST_K */
pr_debug("TEST_K\n");
musb->test_mode_nr =
MUSB_TEST_K;
break;
case 3:
/* TEST_SE0_NAK */
pr_debug("TEST_SE0_NAK\n");
musb->test_mode_nr =
MUSB_TEST_SE0_NAK;
break;
case 4:
/* TEST_PACKET */
pr_debug("TEST_PACKET\n");
musb->test_mode_nr =
MUSB_TEST_PACKET;
break;
case 0xc0:
/* TEST_FORCE_HS */
pr_debug("TEST_FORCE_HS\n");
musb->test_mode_nr =
MUSB_TEST_FORCE_HS;
break;
case 0xc1:
/* TEST_FORCE_FS */
pr_debug("TEST_FORCE_FS\n");
musb->test_mode_nr =
MUSB_TEST_FORCE_FS;
break;
case 0xc2:
/* TEST_FIFO_ACCESS */
pr_debug("TEST_FIFO_ACCESS\n");
musb->test_mode_nr =
MUSB_TEST_FIFO_ACCESS;
break;
case 0xc3:
/* TEST_FORCE_HOST */
pr_debug("TEST_FORCE_HOST\n");
musb->test_mode_nr =
MUSB_TEST_FORCE_HOST;
break;
default:
goto stall;
}
/* enter test mode after irq */
if (handled > 0)
musb->test_mode = true;
break;
case USB_DEVICE_B_HNP_ENABLE:
if (!musb->g.is_otg)
goto stall;
musb->g.b_hnp_enable = 1;
musb_try_b_hnp_enable(musb);
break;
case USB_DEVICE_A_HNP_SUPPORT:
if (!musb->g.is_otg)
goto stall;
musb->g.a_hnp_support = 1;
break;
case USB_DEVICE_A_ALT_HNP_SUPPORT:
if (!musb->g.is_otg)
goto stall;
musb->g.a_alt_hnp_support = 1;
break;
case USB_DEVICE_DEBUG_MODE:
handled = 0;
break;
stall:
default:
handled = -EINVAL;
break;
}
break;
case USB_RECIP_INTERFACE:
break;
case USB_RECIP_ENDPOINT:{
const u8 epnum =
ctrlrequest->wIndex & 0x0f;
struct musb_ep *musb_ep;
struct musb_hw_ep *ep;
void __iomem *regs;
int is_in;
u16 csr;
if (epnum == 0 || epnum >= MUSB_C_NUM_EPS ||
ctrlrequest->wValue != USB_ENDPOINT_HALT)
break;
ep = musb->endpoints + epnum;
regs = ep->regs;
is_in = ctrlrequest->wIndex & USB_DIR_IN;
if (is_in)
musb_ep = &ep->ep_in;
else
musb_ep = &ep->ep_out;
if (!musb_ep->desc)
break;
musb_ep_select(mbase, epnum);
if (is_in) {
csr = musb_readw(regs, MUSB_TXCSR);
if (csr & MUSB_TXCSR_FIFONOTEMPTY)
csr |= MUSB_TXCSR_FLUSHFIFO;
csr |= MUSB_TXCSR_P_SENDSTALL
| MUSB_TXCSR_CLRDATATOG
| MUSB_TXCSR_P_WZC_BITS;
musb_writew(regs, MUSB_TXCSR, csr);
} else {
csr = musb_readw(regs, MUSB_RXCSR);
csr |= MUSB_RXCSR_P_SENDSTALL
| MUSB_RXCSR_FLUSHFIFO
| MUSB_RXCSR_CLRDATATOG
| MUSB_RXCSR_P_WZC_BITS;
musb_writew(regs, MUSB_RXCSR, csr);
}
/* select ep0 again */
musb_ep_select(mbase, 0);
handled = 1;
} break;
default:
/* class, vendor, etc ... delegate */
handled = 0;
break;
}
break;
default:
/* delegate SET_CONFIGURATION, etc */
handled = 0;
}
} else
handled = 0;
return handled;
}
/* we have an ep0out data packet
* Context: caller holds controller lock
*/
static void ep0_rxstate(struct musb *musb)
{
void __iomem *regs = musb->control_ep->regs;
struct musb_request *request;
struct usb_request *req;
u16 count, csr;
request = next_ep0_request(musb);
req = &request->request;
/* read packet and ack; or stall because of gadget driver bug:
* should have provided the rx buffer before setup() returned.
*/
if (req) {
void *buf = req->buf + req->actual;
unsigned len = req->length - req->actual;
/* read the buffer */
count = musb_readb(regs, MUSB_COUNT0);
if (count > len) {
req->status = -EOVERFLOW;
count = len;
}
if (count > 0) {
musb_read_fifo(&musb->endpoints[0], count, buf);
req->actual += count;
}
csr = MUSB_CSR0_P_SVDRXPKTRDY;
if (count < 64 || req->actual == req->length) {
musb->ep0_state = MUSB_EP0_STAGE_STATUSIN;
csr |= MUSB_CSR0_P_DATAEND;
} else
req = NULL;
} else
csr = MUSB_CSR0_P_SVDRXPKTRDY | MUSB_CSR0_P_SENDSTALL;
/* Completion handler may choose to stall, e.g. because the
* message just received holds invalid data.
*/
if (req) {
musb->ackpend = csr;
musb_g_ep0_giveback(musb, req);
if (!musb->ackpend)
return;
musb->ackpend = 0;
}
musb_ep_select(musb->mregs, 0);
musb_writew(regs, MUSB_CSR0, csr);
}
/*
* transmitting to the host (IN), this code might be called from IRQ
* and from kernel thread.
*
* Context: caller holds controller lock
*/
static void ep0_txstate(struct musb *musb)
{
void __iomem *regs = musb->control_ep->regs;
struct musb_request *req = next_ep0_request(musb);
struct usb_request *request;
u16 csr = MUSB_CSR0_TXPKTRDY;
u8 *fifo_src;
u8 fifo_count;
if (!req) {
/* WARN_ON(1); */
dev_dbg(musb->controller, "odd; csr0 %04x\n", musb_readw(regs, MUSB_CSR0));
return;
}
request = &req->request;
/* load the data */
fifo_src = (u8 *) request->buf + request->actual;
fifo_count = min((unsigned) MUSB_EP0_FIFOSIZE,
request->length - request->actual);
musb_write_fifo(&musb->endpoints[0], fifo_count, fifo_src);
request->actual += fifo_count;
/* update the flags */
if (fifo_count < MUSB_MAX_END0_PACKET
|| (request->actual == request->length
&& !request->zero)) {
musb->ep0_state = MUSB_EP0_STAGE_STATUSOUT;
csr |= MUSB_CSR0_P_DATAEND;
} else
request = NULL;
/* report completions as soon as the fifo's loaded; there's no
* win in waiting till this last packet gets acked. (other than
* very precise fault reporting, needed by USB TMC; possible with
* this hardware, but not usable from portable gadget drivers.)
*/
if (request) {
musb->ackpend = csr;
musb_g_ep0_giveback(musb, request);
if (!musb->ackpend)
return;
musb->ackpend = 0;
}
/* send it out, triggering a "txpktrdy cleared" irq */
musb_ep_select(musb->mregs, 0);
musb_writew(regs, MUSB_CSR0, csr);
}
/*
* Read a SETUP packet (struct usb_ctrlrequest) from the hardware.
* Fields are left in USB byte-order.
*
* Context: caller holds controller lock.
*/
static void
musb_read_setup(struct musb *musb, struct usb_ctrlrequest *req)
{
struct musb_request *r;
void __iomem *regs = musb->control_ep->regs;
musb_read_fifo(&musb->endpoints[0], sizeof *req, (u8 *)req);
/* NOTE: earlier 2.6 versions changed setup packets to host
* order, but now USB packets always stay in USB byte order.
*/
dev_dbg(musb->controller, "SETUP req%02x.%02x v%04x i%04x l%d\n",
req->bRequestType,
req->bRequest,
le16_to_cpu(req->wValue),
le16_to_cpu(req->wIndex),
le16_to_cpu(req->wLength));
/* clean up any leftover transfers */
r = next_ep0_request(musb);
if (r)
musb_g_ep0_giveback(musb, &r->request);
/* For zero-data requests we want to delay the STATUS stage to
* avoid SETUPEND errors. If we read data (OUT), delay accepting
* packets until there's a buffer to store them in.
*
* If we write data, the controller acts happier if we enable
* the TX FIFO right away, and give the controller a moment
* to switch modes...
*/
musb->set_address = false;
musb->ackpend = MUSB_CSR0_P_SVDRXPKTRDY;
if (req->wLength == 0) {
if (req->bRequestType & USB_DIR_IN)
musb->ackpend |= MUSB_CSR0_TXPKTRDY;
musb->ep0_state = MUSB_EP0_STAGE_ACKWAIT;
} else if (req->bRequestType & USB_DIR_IN) {
musb->ep0_state = MUSB_EP0_STAGE_TX;
musb_writew(regs, MUSB_CSR0, MUSB_CSR0_P_SVDRXPKTRDY);
while ((musb_readw(regs, MUSB_CSR0)
& MUSB_CSR0_RXPKTRDY) != 0)
cpu_relax();
musb->ackpend = 0;
} else
musb->ep0_state = MUSB_EP0_STAGE_RX;
}
static int
forward_to_driver(struct musb *musb, const struct usb_ctrlrequest *ctrlrequest)
__releases(musb->lock)
__acquires(musb->lock)
{
int retval;
if (!musb->gadget_driver)
return -EOPNOTSUPP;
spin_unlock(&musb->lock);
retval = musb->gadget_driver->setup(&musb->g, ctrlrequest);
spin_lock(&musb->lock);
return retval;
}
/*
* Handle peripheral ep0 interrupt
*
* Context: irq handler; we won't re-enter the driver that way.
*/
irqreturn_t musb_g_ep0_irq(struct musb *musb)
{
u16 csr;
u16 len;
void __iomem *mbase = musb->mregs;
void __iomem *regs = musb->endpoints[0].regs;
irqreturn_t retval = IRQ_NONE;
musb_ep_select(mbase, 0); /* select ep0 */
csr = musb_readw(regs, MUSB_CSR0);
len = musb_readb(regs, MUSB_COUNT0);
dev_dbg(musb->controller, "csr %04x, count %d, ep0stage %s\n",
csr, len, decode_ep0stage(musb->ep0_state));
if (csr & MUSB_CSR0_P_DATAEND) {
/*
* If DATAEND is set we should not call the callback,
* hence the status stage is not complete.
*/
return IRQ_HANDLED;
}
/* I sent a stall.. need to acknowledge it now.. */
if (csr & MUSB_CSR0_P_SENTSTALL) {
musb_writew(regs, MUSB_CSR0,
csr & ~MUSB_CSR0_P_SENTSTALL);
retval = IRQ_HANDLED;
musb->ep0_state = MUSB_EP0_STAGE_IDLE;
csr = musb_readw(regs, MUSB_CSR0);
}
/* request ended "early" */
if (csr & MUSB_CSR0_P_SETUPEND) {
musb_writew(regs, MUSB_CSR0, MUSB_CSR0_P_SVDSETUPEND);
retval = IRQ_HANDLED;
/* Transition into the early status phase */
switch (musb->ep0_state) {
case MUSB_EP0_STAGE_TX:
musb->ep0_state = MUSB_EP0_STAGE_STATUSOUT;
break;
case MUSB_EP0_STAGE_RX:
musb->ep0_state = MUSB_EP0_STAGE_STATUSIN;
break;
default:
ERR("SetupEnd came in a wrong ep0stage %s\n",
decode_ep0stage(musb->ep0_state));
}
csr = musb_readw(regs, MUSB_CSR0);
/* NOTE: request may need completion */
}
/* docs from Mentor only describe tx, rx, and idle/setup states.
* we need to handle nuances around status stages, and also the
* case where status and setup stages come back-to-back ...
*/
switch (musb->ep0_state) {
case MUSB_EP0_STAGE_TX:
/* irq on clearing txpktrdy */
if ((csr & MUSB_CSR0_TXPKTRDY) == 0) {
ep0_txstate(musb);
retval = IRQ_HANDLED;
}
break;
case MUSB_EP0_STAGE_RX:
/* irq on set rxpktrdy */
if (csr & MUSB_CSR0_RXPKTRDY) {
ep0_rxstate(musb);
retval = IRQ_HANDLED;
}
break;
case MUSB_EP0_STAGE_STATUSIN:
/* end of sequence #2 (OUT/RX state) or #3 (no data) */
/* update address (if needed) only @ the end of the
* status phase per usb spec, which also guarantees
* we get 10 msec to receive this irq... until this
* is done we won't see the next packet.
*/
if (musb->set_address) {
musb->set_address = false;
musb_writeb(mbase, MUSB_FADDR, musb->address);
}
/* enter test mode if needed (exit by reset) */
else if (musb->test_mode) {
dev_dbg(musb->controller, "entering TESTMODE\n");
if (MUSB_TEST_PACKET == musb->test_mode_nr)
musb_load_testpacket(musb);
musb_writeb(mbase, MUSB_TESTMODE,
musb->test_mode_nr);
}
/* FALLTHROUGH */
case MUSB_EP0_STAGE_STATUSOUT:
/* end of sequence #1: write to host (TX state) */
{
struct musb_request *req;
req = next_ep0_request(musb);
if (req)
musb_g_ep0_giveback(musb, &req->request);
}
/*
* In case when several interrupts can get coalesced,
* check to see if we've already received a SETUP packet...
*/
if (csr & MUSB_CSR0_RXPKTRDY)
goto setup;
retval = IRQ_HANDLED;
musb->ep0_state = MUSB_EP0_STAGE_IDLE;
break;
case MUSB_EP0_STAGE_IDLE:
/*
* This state is typically (but not always) indiscernible
* from the status states since the corresponding interrupts
* tend to happen within too little period of time (with only
* a zero-length packet in between) and so get coalesced...
*/
retval = IRQ_HANDLED;
musb->ep0_state = MUSB_EP0_STAGE_SETUP;
/* FALLTHROUGH */
case MUSB_EP0_STAGE_SETUP:
setup:
if (csr & MUSB_CSR0_RXPKTRDY) {
struct usb_ctrlrequest setup;
int handled = 0;
if (len != 8) {
ERR("SETUP packet len %d != 8 ?\n", len);
break;
}
musb_read_setup(musb, &setup);
retval = IRQ_HANDLED;
/* sometimes the RESET won't be reported */
if (unlikely(musb->g.speed == USB_SPEED_UNKNOWN)) {
u8 power;
printk(KERN_NOTICE "%s: peripheral reset "
"irq lost!\n",
musb_driver_name);
power = musb_readb(mbase, MUSB_POWER);
musb->g.speed = (power & MUSB_POWER_HSMODE)
? USB_SPEED_HIGH : USB_SPEED_FULL;
}
switch (musb->ep0_state) {
/* sequence #3 (no data stage), includes requests
* we can't forward (notably SET_ADDRESS and the
* device/endpoint feature set/clear operations)
* plus SET_CONFIGURATION and others we must
*/
case MUSB_EP0_STAGE_ACKWAIT:
handled = service_zero_data_request(
musb, &setup);
/*
* We're expecting no data in any case, so
* always set the DATAEND bit -- doing this
* here helps avoid SetupEnd interrupt coming
* in the idle stage when we're stalling...
*/
musb->ackpend |= MUSB_CSR0_P_DATAEND;
/* status stage might be immediate */
if (handled > 0)
musb->ep0_state =
MUSB_EP0_STAGE_STATUSIN;
break;
/* sequence #1 (IN to host), includes GET_STATUS
* requests that we can't forward, GET_DESCRIPTOR
* and others that we must
*/
case MUSB_EP0_STAGE_TX:
handled = service_in_request(musb, &setup);
if (handled > 0) {
musb->ackpend = MUSB_CSR0_TXPKTRDY
| MUSB_CSR0_P_DATAEND;
musb->ep0_state =
MUSB_EP0_STAGE_STATUSOUT;
}
break;
/* sequence #2 (OUT from host), always forward */
default: /* MUSB_EP0_STAGE_RX */
break;
}
dev_dbg(musb->controller, "handled %d, csr %04x, ep0stage %s\n",
handled, csr,
decode_ep0stage(musb->ep0_state));
/* unless we need to delegate this to the gadget
* driver, we know how to wrap this up: csr0 has
* not yet been written.
*/
if (handled < 0)
goto stall;
else if (handled > 0)
goto finish;
handled = forward_to_driver(musb, &setup);
if (handled < 0) {
musb_ep_select(mbase, 0);
stall:
dev_dbg(musb->controller, "stall (%d)\n", handled);
musb->ackpend |= MUSB_CSR0_P_SENDSTALL;
musb->ep0_state = MUSB_EP0_STAGE_IDLE;
finish:
musb_writew(regs, MUSB_CSR0,
musb->ackpend);
musb->ackpend = 0;
}
}
break;
case MUSB_EP0_STAGE_ACKWAIT:
/* This should not happen. But happens with tusb6010 with
* g_file_storage and high speed. Do nothing.
*/
retval = IRQ_HANDLED;
break;
default:
/* "can't happen" */
WARN_ON(1);
musb_writew(regs, MUSB_CSR0, MUSB_CSR0_P_SENDSTALL);
musb->ep0_state = MUSB_EP0_STAGE_IDLE;
break;
}
return retval;
}
static int
musb_g_ep0_enable(struct usb_ep *ep, const struct usb_endpoint_descriptor *desc)
{
/* always enabled */
return -EINVAL;
}
static int musb_g_ep0_disable(struct usb_ep *e)
{
/* always enabled */
return -EINVAL;
}
static int
musb_g_ep0_queue(struct usb_ep *e, struct usb_request *r, gfp_t gfp_flags)
{
struct musb_ep *ep;
struct musb_request *req;
struct musb *musb;
int status;
unsigned long lockflags;
void __iomem *regs;
if (!e || !r)
return -EINVAL;
ep = to_musb_ep(e);
musb = ep->musb;
regs = musb->control_ep->regs;
req = to_musb_request(r);
req->musb = musb;
req->request.actual = 0;
req->request.status = -EINPROGRESS;
req->tx = ep->is_in;
spin_lock_irqsave(&musb->lock, lockflags);
if (!list_empty(&ep->req_list)) {
status = -EBUSY;
goto cleanup;
}
switch (musb->ep0_state) {
case MUSB_EP0_STAGE_RX: /* control-OUT data */
case MUSB_EP0_STAGE_TX: /* control-IN data */
case MUSB_EP0_STAGE_ACKWAIT: /* zero-length data */
status = 0;
break;
default:
dev_dbg(musb->controller, "ep0 request queued in state %d\n",
musb->ep0_state);
status = -EINVAL;
goto cleanup;
}
/* add request to the list */
list_add_tail(&req->list, &ep->req_list);
dev_dbg(musb->controller, "queue to %s (%s), length=%d\n",
ep->name, ep->is_in ? "IN/TX" : "OUT/RX",
req->request.length);
musb_ep_select(musb->mregs, 0);
/* sequence #1, IN ... start writing the data */
if (musb->ep0_state == MUSB_EP0_STAGE_TX)
ep0_txstate(musb);
/* sequence #3, no-data ... issue IN status */
else if (musb->ep0_state == MUSB_EP0_STAGE_ACKWAIT) {
if (req->request.length)
status = -EINVAL;
else {
musb->ep0_state = MUSB_EP0_STAGE_STATUSIN;
musb_writew(regs, MUSB_CSR0,
musb->ackpend | MUSB_CSR0_P_DATAEND);
musb->ackpend = 0;
musb_g_ep0_giveback(ep->musb, r);
}
/* else for sequence #2 (OUT), caller provides a buffer
* before the next packet arrives. deferred responses
* (after SETUP is acked) are racey.
*/
} else if (musb->ackpend) {
musb_writew(regs, MUSB_CSR0, musb->ackpend);
musb->ackpend = 0;
}
cleanup:
spin_unlock_irqrestore(&musb->lock, lockflags);
return status;
}
static int musb_g_ep0_dequeue(struct usb_ep *ep, struct usb_request *req)
{
/* we just won't support this */
return -EINVAL;
}
static int musb_g_ep0_halt(struct usb_ep *e, int value)
{
struct musb_ep *ep;
struct musb *musb;
void __iomem *base, *regs;
unsigned long flags;
int status;
u16 csr;
if (!e || !value)
return -EINVAL;
ep = to_musb_ep(e);
musb = ep->musb;
base = musb->mregs;
regs = musb->control_ep->regs;
status = 0;
spin_lock_irqsave(&musb->lock, flags);
if (!list_empty(&ep->req_list)) {
status = -EBUSY;
goto cleanup;
}
musb_ep_select(base, 0);
csr = musb->ackpend;
switch (musb->ep0_state) {
/* Stalls are usually issued after parsing SETUP packet, either
* directly in irq context from setup() or else later.
*/
case MUSB_EP0_STAGE_TX: /* control-IN data */
case MUSB_EP0_STAGE_ACKWAIT: /* STALL for zero-length data */
case MUSB_EP0_STAGE_RX: /* control-OUT data */
csr = musb_readw(regs, MUSB_CSR0);
/* FALLTHROUGH */
/* It's also OK to issue stalls during callbacks when a non-empty
* DATA stage buffer has been read (or even written).
*/
case MUSB_EP0_STAGE_STATUSIN: /* control-OUT status */
case MUSB_EP0_STAGE_STATUSOUT: /* control-IN status */
csr |= MUSB_CSR0_P_SENDSTALL;
musb_writew(regs, MUSB_CSR0, csr);
musb->ep0_state = MUSB_EP0_STAGE_IDLE;
musb->ackpend = 0;
break;
default:
dev_dbg(musb->controller, "ep0 can't halt in state %d\n", musb->ep0_state);
status = -EINVAL;
}
cleanup:
spin_unlock_irqrestore(&musb->lock, flags);
return status;
}
const struct usb_ep_ops musb_g_ep0_ops = {
.enable = musb_g_ep0_enable,
.disable = musb_g_ep0_disable,
.alloc_request = musb_alloc_request,
.free_request = musb_free_request,
.queue = musb_g_ep0_queue,
.dequeue = musb_g_ep0_dequeue,
.set_halt = musb_g_ep0_halt,
};
| gpl-2.0 |
zanezam/boeffla-kernel-oos-bacon | arch/arm/mach-msm/rpc_fsusb.c | 3414 | 5734 | /* Copyright (c) 2009, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/err.h>
#include <linux/module.h>
#include <mach/rpc_hsusb.h>
#include <mach/msm_hsusb.h>
#include <mach/msm_rpcrouter.h>
#include <mach/board.h>
#define PM_APP_OTG_PROG 0x30000080
#define PM_APP_OTG_VERS 0x00010001
#define PM_APP_OTG_INIT_PHY 17
#define PM_APP_OTG_RESET_PHY 18
#define PM_APP_OTG_SUSPEND_PHY 7
#define PM_APP_OTG_RESUME_PHY 8
#define PM_APP_OTG_DEV_DISCONNECTED 9
#define PM_APP_OTG_SET_WAKEUP 10
#define PM_APP_OTG_ACQUIRE_BUS 3
#define PM_APP_OTG_RELINQUISH_BUS 4
#define PM_APP_OTG_INIT_DONE_CB_PROC 1
#define PM_APP_OTG_HOST_INIT_CB_PROC 3
#define PM_APP_OTG_REMOTE_DEV_LOST_CB_PROC 8
#define PM_APP_OTG_REMOTE_DEV_RESUMED_CB_PROC 9
#define PM_APP_OTG_ERROR_NOTIFY_CB_PROC 11
#define NUM_OF_CALLBACKS 11
static struct msm_rpc_client *client;
static struct msm_otg_ops *host_ops;
static int msm_fsusb_rpc_arg(struct msm_rpc_client *client,
void *buf, void *data)
{
int i, size = 0;
uint32_t proc = *(uint32_t *)data;
switch (proc) {
case PM_APP_OTG_INIT_PHY: {
for (i = 0; i < NUM_OF_CALLBACKS; i++) {
*((uint32_t *)buf) = cpu_to_be32(0x11111111);
size += sizeof(uint32_t);
buf += sizeof(uint32_t);
}
/* sleep_assert callback fucntion will be registered locally*/
*((uint32_t *)buf) = cpu_to_be32(0xffffffff);
size += sizeof(uint32_t);
break;
}
case PM_APP_OTG_SET_WAKEUP: {
*((uint32_t *)buf) = cpu_to_be32(1);
size += sizeof(uint32_t);
break;
}
case PM_APP_OTG_ACQUIRE_BUS: {
*((uint32_t *)buf) = cpu_to_be32(0xffffffff);
size += sizeof(uint32_t);
break;
}
default:
pr_info("%s: No arguments expected\n", __func__);
}
return size;
}
int msm_fsusb_init_phy(void)
{
uint32_t data = PM_APP_OTG_INIT_PHY;
return msm_rpc_client_req(client,
PM_APP_OTG_INIT_PHY,
msm_fsusb_rpc_arg, &data,
NULL, NULL, -1);
}
EXPORT_SYMBOL(msm_fsusb_init_phy);
int msm_fsusb_reset_phy(void)
{
return msm_rpc_client_req(client,
PM_APP_OTG_RESET_PHY,
NULL, NULL,
NULL, NULL, -1);
}
EXPORT_SYMBOL(msm_fsusb_reset_phy);
int msm_fsusb_suspend_phy(void)
{
return msm_rpc_client_req(client,
PM_APP_OTG_SUSPEND_PHY,
NULL, NULL,
NULL, NULL, -1);
}
EXPORT_SYMBOL(msm_fsusb_suspend_phy);
int msm_fsusb_resume_phy(void)
{
return msm_rpc_client_req(client,
PM_APP_OTG_RESUME_PHY,
NULL, NULL,
NULL, NULL, -1);
}
EXPORT_SYMBOL(msm_fsusb_resume_phy);
int msm_fsusb_remote_dev_disconnected(void)
{
return msm_rpc_client_req(client,
PM_APP_OTG_DEV_DISCONNECTED,
NULL, NULL,
NULL, NULL, -1);
}
EXPORT_SYMBOL(msm_fsusb_remote_dev_disconnected);
int msm_fsusb_set_remote_wakeup(void)
{
uint32_t data = PM_APP_OTG_SET_WAKEUP;
return msm_rpc_client_req(client,
PM_APP_OTG_SET_WAKEUP,
msm_fsusb_rpc_arg, &data,
NULL, NULL, -1);
}
EXPORT_SYMBOL(msm_fsusb_set_remote_wakeup);
static int msm_fsusb_acquire_bus(void)
{
uint32_t data = PM_APP_OTG_ACQUIRE_BUS;
return msm_rpc_client_req(client,
PM_APP_OTG_ACQUIRE_BUS,
msm_fsusb_rpc_arg, &data,
NULL, NULL, -1);
}
static int msm_fsusb_relinquish_bus(void)
{
return msm_rpc_client_req(client,
PM_APP_OTG_RELINQUISH_BUS,
NULL, NULL,
NULL, NULL, -1);
}
static void msm_fsusb_request_session(void)
{
int ret;
ret = msm_fsusb_relinquish_bus();
if (ret < 0)
pr_err("relinquish_bus rpc failed\n");
ret = msm_fsusb_acquire_bus();
if (ret < 0)
pr_err("acquire_bus rpc failed\n");
}
static int msm_fsusb_cb_func(struct msm_rpc_client *client,
void *buffer, int in_size)
{
struct rpc_request_hdr *req;
int rc;
req = buffer;
msm_rpc_start_accepted_reply(client, be32_to_cpu(req->xid),
RPC_ACCEPTSTAT_SUCCESS);
rc = msm_rpc_send_accepted_reply(client, 0);
if (rc) {
pr_err("%s: sending reply failed: %d\n", __func__, rc);
return rc;
}
switch (be32_to_cpu(req->procedure)) {
case PM_APP_OTG_INIT_DONE_CB_PROC: {
pr_debug("pm_app_otg_init_done callback received");
msm_fsusb_request_session();
break;
}
case PM_APP_OTG_HOST_INIT_CB_PROC: {
pr_debug("pm_app_otg_host_init_cb_proc callback received");
host_ops->request(host_ops->handle, REQUEST_START);
break;
}
case PM_APP_OTG_REMOTE_DEV_LOST_CB_PROC: {
pr_debug("pm_app_otg_remote_dev_lost_cb_proc"
" callback received");
msm_fsusb_acquire_bus();
host_ops->request(host_ops->handle, REQUEST_STOP);
break;
}
case PM_APP_OTG_REMOTE_DEV_RESUMED_CB_PROC: {
pr_debug("pm_app_otg_remote_dev_resumed_cb_proc"
"callback received");
host_ops->request(host_ops->handle, REQUEST_RESUME);
break;
}
case PM_APP_OTG_ERROR_NOTIFY_CB_PROC: {
pr_err("pm_app_otg_error_notify_cb_proc callback received");
break;
}
default:
pr_err("%s: unknown callback(proc = %d) received\n",
__func__, req->procedure);
}
return 0;
}
int msm_fsusb_rpc_init(struct msm_otg_ops *ops)
{
host_ops = ops;
client = msm_rpc_register_client("fsusb",
PM_APP_OTG_PROG,
PM_APP_OTG_VERS, 1,
msm_fsusb_cb_func);
if (IS_ERR(client)) {
pr_err("%s: couldn't open rpc client\n", __func__);
return PTR_ERR(client);
}
return 0;
}
EXPORT_SYMBOL(msm_fsusb_rpc_init);
void msm_fsusb_rpc_deinit(void)
{
msm_rpc_unregister_client(client);
}
EXPORT_SYMBOL(msm_fsusb_rpc_deinit);
| gpl-2.0 |
psyke83/kernel_samsung_gio2europa | drivers/w1/masters/matrox_w1.c | 4438 | 6028 | /*
* matrox_w1.c
*
* Copyright (c) 2004 Evgeniy Polyakov <johnpol@2ka.mipt.ru>
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <asm/types.h>
#include <asm/atomic.h>
#include <asm/io.h>
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/timer.h>
#include <linux/slab.h>
#include <linux/pci_ids.h>
#include <linux/pci.h>
#include "../w1.h"
#include "../w1_int.h"
#include "../w1_log.h"
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Evgeniy Polyakov <johnpol@2ka.mipt.ru>");
MODULE_DESCRIPTION("Driver for transport(Dallas 1-wire prtocol) over VGA DDC(matrox gpio).");
static struct pci_device_id matrox_w1_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_MATROX, PCI_DEVICE_ID_MATROX_G400) },
{ },
};
MODULE_DEVICE_TABLE(pci, matrox_w1_tbl);
static int __devinit matrox_w1_probe(struct pci_dev *, const struct pci_device_id *);
static void __devexit matrox_w1_remove(struct pci_dev *);
static struct pci_driver matrox_w1_pci_driver = {
.name = "matrox_w1",
.id_table = matrox_w1_tbl,
.probe = matrox_w1_probe,
.remove = __devexit_p(matrox_w1_remove),
};
/*
* Matrox G400 DDC registers.
*/
#define MATROX_G400_DDC_CLK (1<<4)
#define MATROX_G400_DDC_DATA (1<<1)
#define MATROX_BASE 0x3C00
#define MATROX_STATUS 0x1e14
#define MATROX_PORT_INDEX_OFFSET 0x00
#define MATROX_PORT_DATA_OFFSET 0x0A
#define MATROX_GET_CONTROL 0x2A
#define MATROX_GET_DATA 0x2B
#define MATROX_CURSOR_CTL 0x06
struct matrox_device
{
void __iomem *base_addr;
void __iomem *port_index;
void __iomem *port_data;
u8 data_mask;
unsigned long phys_addr;
void __iomem *virt_addr;
unsigned long found;
struct w1_bus_master *bus_master;
};
static u8 matrox_w1_read_ddc_bit(void *);
static void matrox_w1_write_ddc_bit(void *, u8);
/*
* These functions read and write DDC Data bit.
*
* Using tristate pins, since i can't find any open-drain pin in whole motherboard.
* Unfortunately we can't connect to Intel's 82801xx IO controller
* since we don't know motherboard schema, which has pretty unused(may be not) GPIO.
*
* I've heard that PIIX also has open drain pin.
*
* Port mapping.
*/
static __inline__ u8 matrox_w1_read_reg(struct matrox_device *dev, u8 reg)
{
u8 ret;
writeb(reg, dev->port_index);
ret = readb(dev->port_data);
barrier();
return ret;
}
static __inline__ void matrox_w1_write_reg(struct matrox_device *dev, u8 reg, u8 val)
{
writeb(reg, dev->port_index);
writeb(val, dev->port_data);
wmb();
}
static void matrox_w1_write_ddc_bit(void *data, u8 bit)
{
u8 ret;
struct matrox_device *dev = data;
if (bit)
bit = 0;
else
bit = dev->data_mask;
ret = matrox_w1_read_reg(dev, MATROX_GET_CONTROL);
matrox_w1_write_reg(dev, MATROX_GET_CONTROL, ((ret & ~dev->data_mask) | bit));
matrox_w1_write_reg(dev, MATROX_GET_DATA, 0x00);
}
static u8 matrox_w1_read_ddc_bit(void *data)
{
u8 ret;
struct matrox_device *dev = data;
ret = matrox_w1_read_reg(dev, MATROX_GET_DATA);
return ret;
}
static void matrox_w1_hw_init(struct matrox_device *dev)
{
matrox_w1_write_reg(dev, MATROX_GET_DATA, 0xFF);
matrox_w1_write_reg(dev, MATROX_GET_CONTROL, 0x00);
}
static int __devinit matrox_w1_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct matrox_device *dev;
int err;
assert(pdev != NULL);
assert(ent != NULL);
if (pdev->vendor != PCI_VENDOR_ID_MATROX || pdev->device != PCI_DEVICE_ID_MATROX_G400)
return -ENODEV;
dev = kzalloc(sizeof(struct matrox_device) +
sizeof(struct w1_bus_master), GFP_KERNEL);
if (!dev) {
dev_err(&pdev->dev,
"%s: Failed to create new matrox_device object.\n",
__func__);
return -ENOMEM;
}
dev->bus_master = (struct w1_bus_master *)(dev + 1);
/*
* True for G400, for some other we need resource 0, see drivers/video/matrox/matroxfb_base.c
*/
dev->phys_addr = pci_resource_start(pdev, 1);
dev->virt_addr = ioremap_nocache(dev->phys_addr, 16384);
if (!dev->virt_addr) {
dev_err(&pdev->dev, "%s: failed to ioremap(0x%lx, %d).\n",
__func__, dev->phys_addr, 16384);
err = -EIO;
goto err_out_free_device;
}
dev->base_addr = dev->virt_addr + MATROX_BASE;
dev->port_index = dev->base_addr + MATROX_PORT_INDEX_OFFSET;
dev->port_data = dev->base_addr + MATROX_PORT_DATA_OFFSET;
dev->data_mask = (MATROX_G400_DDC_DATA);
matrox_w1_hw_init(dev);
dev->bus_master->data = dev;
dev->bus_master->read_bit = &matrox_w1_read_ddc_bit;
dev->bus_master->write_bit = &matrox_w1_write_ddc_bit;
err = w1_add_master_device(dev->bus_master);
if (err)
goto err_out_free_device;
pci_set_drvdata(pdev, dev);
dev->found = 1;
dev_info(&pdev->dev, "Matrox G400 GPIO transport layer for 1-wire.\n");
return 0;
err_out_free_device:
if (dev->virt_addr)
iounmap(dev->virt_addr);
kfree(dev);
return err;
}
static void __devexit matrox_w1_remove(struct pci_dev *pdev)
{
struct matrox_device *dev = pci_get_drvdata(pdev);
assert(dev != NULL);
if (dev->found) {
w1_remove_master_device(dev->bus_master);
iounmap(dev->virt_addr);
}
kfree(dev);
}
static int __init matrox_w1_init(void)
{
return pci_register_driver(&matrox_w1_pci_driver);
}
static void __exit matrox_w1_fini(void)
{
pci_unregister_driver(&matrox_w1_pci_driver);
}
module_init(matrox_w1_init);
module_exit(matrox_w1_fini);
| gpl-2.0 |
AuxXxi/caf_kernel | arch/openrisc/kernel/idle.c | 4694 | 1886 | /*
* OpenRISC idle.c
*
* Linux architectural port borrowing liberally from similar works of
* others. All original copyrights apply as per the original source
* declaration.
*
* Modifications for the OpenRISC architecture:
* Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
* Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Idle daemon for or32. Idle daemon will handle any action
* that needs to be taken when the system becomes idle.
*/
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/stddef.h>
#include <linux/unistd.h>
#include <linux/ptrace.h>
#include <linux/slab.h>
#include <linux/tick.h>
#include <asm/pgtable.h>
#include <asm/uaccess.h>
#include <asm/io.h>
#include <asm/processor.h>
#include <asm/mmu.h>
#include <asm/cache.h>
#include <asm/pgalloc.h>
void (*powersave) (void) = NULL;
static inline void pm_idle(void)
{
barrier();
}
void cpu_idle(void)
{
set_thread_flag(TIF_POLLING_NRFLAG);
/* endless idle loop with no priority at all */
while (1) {
tick_nohz_idle_enter();
rcu_idle_enter();
while (!need_resched()) {
check_pgt_cache();
rmb();
clear_thread_flag(TIF_POLLING_NRFLAG);
local_irq_disable();
/* Don't trace irqs off for idle */
stop_critical_timings();
if (!need_resched() && powersave != NULL)
powersave();
start_critical_timings();
local_irq_enable();
set_thread_flag(TIF_POLLING_NRFLAG);
}
rcu_idle_exit();
tick_nohz_idle_exit();
preempt_enable_no_resched();
schedule();
preempt_disable();
}
}
| gpl-2.0 |
nbars/Custom-Kernel-SM-P600 | kernel-src/drivers/scsi/qla4xxx/ql4_iocb.c | 4950 | 14246 | /*
* QLogic iSCSI HBA Driver
* Copyright (c) 2003-2010 QLogic Corporation
*
* See LICENSE.qla4xxx for copyright and licensing details.
*/
#include "ql4_def.h"
#include "ql4_glbl.h"
#include "ql4_dbg.h"
#include "ql4_inline.h"
#include <scsi/scsi_tcq.h>
static int
qla4xxx_space_in_req_ring(struct scsi_qla_host *ha, uint16_t req_cnt)
{
uint16_t cnt;
/* Calculate number of free request entries. */
if ((req_cnt + 2) >= ha->req_q_count) {
cnt = (uint16_t) ha->isp_ops->rd_shdw_req_q_out(ha);
if (ha->request_in < cnt)
ha->req_q_count = cnt - ha->request_in;
else
ha->req_q_count = REQUEST_QUEUE_DEPTH -
(ha->request_in - cnt);
}
/* Check if room for request in request ring. */
if ((req_cnt + 2) < ha->req_q_count)
return 1;
else
return 0;
}
static void qla4xxx_advance_req_ring_ptr(struct scsi_qla_host *ha)
{
/* Advance request queue pointer */
if (ha->request_in == (REQUEST_QUEUE_DEPTH - 1)) {
ha->request_in = 0;
ha->request_ptr = ha->request_ring;
} else {
ha->request_in++;
ha->request_ptr++;
}
}
/**
* qla4xxx_get_req_pkt - returns a valid entry in request queue.
* @ha: Pointer to host adapter structure.
* @queue_entry: Pointer to pointer to queue entry structure
*
* This routine performs the following tasks:
* - returns the current request_in pointer (if queue not full)
* - advances the request_in pointer
* - checks for queue full
**/
static int qla4xxx_get_req_pkt(struct scsi_qla_host *ha,
struct queue_entry **queue_entry)
{
uint16_t req_cnt = 1;
if (qla4xxx_space_in_req_ring(ha, req_cnt)) {
*queue_entry = ha->request_ptr;
memset(*queue_entry, 0, sizeof(**queue_entry));
qla4xxx_advance_req_ring_ptr(ha);
ha->req_q_count -= req_cnt;
return QLA_SUCCESS;
}
return QLA_ERROR;
}
/**
* qla4xxx_send_marker_iocb - issues marker iocb to HBA
* @ha: Pointer to host adapter structure.
* @ddb_entry: Pointer to device database entry
* @lun: SCSI LUN
* @marker_type: marker identifier
*
* This routine issues a marker IOCB.
**/
int qla4xxx_send_marker_iocb(struct scsi_qla_host *ha,
struct ddb_entry *ddb_entry, int lun, uint16_t mrkr_mod)
{
struct qla4_marker_entry *marker_entry;
unsigned long flags = 0;
uint8_t status = QLA_SUCCESS;
/* Acquire hardware specific lock */
spin_lock_irqsave(&ha->hardware_lock, flags);
/* Get pointer to the queue entry for the marker */
if (qla4xxx_get_req_pkt(ha, (struct queue_entry **) &marker_entry) !=
QLA_SUCCESS) {
status = QLA_ERROR;
goto exit_send_marker;
}
/* Put the marker in the request queue */
marker_entry->hdr.entryType = ET_MARKER;
marker_entry->hdr.entryCount = 1;
marker_entry->target = cpu_to_le16(ddb_entry->fw_ddb_index);
marker_entry->modifier = cpu_to_le16(mrkr_mod);
int_to_scsilun(lun, &marker_entry->lun);
wmb();
/* Tell ISP it's got a new I/O request */
ha->isp_ops->queue_iocb(ha);
exit_send_marker:
spin_unlock_irqrestore(&ha->hardware_lock, flags);
return status;
}
static struct continuation_t1_entry *
qla4xxx_alloc_cont_entry(struct scsi_qla_host *ha)
{
struct continuation_t1_entry *cont_entry;
cont_entry = (struct continuation_t1_entry *)ha->request_ptr;
qla4xxx_advance_req_ring_ptr(ha);
/* Load packet defaults */
cont_entry->hdr.entryType = ET_CONTINUE;
cont_entry->hdr.entryCount = 1;
cont_entry->hdr.systemDefined = (uint8_t) cpu_to_le16(ha->request_in);
return cont_entry;
}
static uint16_t qla4xxx_calc_request_entries(uint16_t dsds)
{
uint16_t iocbs;
iocbs = 1;
if (dsds > COMMAND_SEG) {
iocbs += (dsds - COMMAND_SEG) / CONTINUE_SEG;
if ((dsds - COMMAND_SEG) % CONTINUE_SEG)
iocbs++;
}
return iocbs;
}
static void qla4xxx_build_scsi_iocbs(struct srb *srb,
struct command_t3_entry *cmd_entry,
uint16_t tot_dsds)
{
struct scsi_qla_host *ha;
uint16_t avail_dsds;
struct data_seg_a64 *cur_dsd;
struct scsi_cmnd *cmd;
struct scatterlist *sg;
int i;
cmd = srb->cmd;
ha = srb->ha;
if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
/* No data being transferred */
cmd_entry->ttlByteCnt = __constant_cpu_to_le32(0);
return;
}
avail_dsds = COMMAND_SEG;
cur_dsd = (struct data_seg_a64 *) & (cmd_entry->dataseg[0]);
scsi_for_each_sg(cmd, sg, tot_dsds, i) {
dma_addr_t sle_dma;
/* Allocate additional continuation packets? */
if (avail_dsds == 0) {
struct continuation_t1_entry *cont_entry;
cont_entry = qla4xxx_alloc_cont_entry(ha);
cur_dsd =
(struct data_seg_a64 *)
&cont_entry->dataseg[0];
avail_dsds = CONTINUE_SEG;
}
sle_dma = sg_dma_address(sg);
cur_dsd->base.addrLow = cpu_to_le32(LSDW(sle_dma));
cur_dsd->base.addrHigh = cpu_to_le32(MSDW(sle_dma));
cur_dsd->count = cpu_to_le32(sg_dma_len(sg));
avail_dsds--;
cur_dsd++;
}
}
/**
* qla4_8xxx_queue_iocb - Tell ISP it's got new request(s)
* @ha: pointer to host adapter structure.
*
* This routine notifies the ISP that one or more new request
* queue entries have been placed on the request queue.
**/
void qla4_8xxx_queue_iocb(struct scsi_qla_host *ha)
{
uint32_t dbval = 0;
dbval = 0x14 | (ha->func_num << 5);
dbval = dbval | (0 << 8) | (ha->request_in << 16);
qla4_8xxx_wr_32(ha, ha->nx_db_wr_ptr, ha->request_in);
}
/**
* qla4_8xxx_complete_iocb - Tell ISP we're done with response(s)
* @ha: pointer to host adapter structure.
*
* This routine notifies the ISP that one or more response/completion
* queue entries have been processed by the driver.
* This also clears the interrupt.
**/
void qla4_8xxx_complete_iocb(struct scsi_qla_host *ha)
{
writel(ha->response_out, &ha->qla4_8xxx_reg->rsp_q_out);
readl(&ha->qla4_8xxx_reg->rsp_q_out);
}
/**
* qla4xxx_queue_iocb - Tell ISP it's got new request(s)
* @ha: pointer to host adapter structure.
*
* This routine is notifies the ISP that one or more new request
* queue entries have been placed on the request queue.
**/
void qla4xxx_queue_iocb(struct scsi_qla_host *ha)
{
writel(ha->request_in, &ha->reg->req_q_in);
readl(&ha->reg->req_q_in);
}
/**
* qla4xxx_complete_iocb - Tell ISP we're done with response(s)
* @ha: pointer to host adapter structure.
*
* This routine is notifies the ISP that one or more response/completion
* queue entries have been processed by the driver.
* This also clears the interrupt.
**/
void qla4xxx_complete_iocb(struct scsi_qla_host *ha)
{
writel(ha->response_out, &ha->reg->rsp_q_out);
readl(&ha->reg->rsp_q_out);
}
/**
* qla4xxx_send_command_to_isp - issues command to HBA
* @ha: pointer to host adapter structure.
* @srb: pointer to SCSI Request Block to be sent to ISP
*
* This routine is called by qla4xxx_queuecommand to build an ISP
* command and pass it to the ISP for execution.
**/
int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb)
{
struct scsi_cmnd *cmd = srb->cmd;
struct ddb_entry *ddb_entry;
struct command_t3_entry *cmd_entry;
int nseg;
uint16_t tot_dsds;
uint16_t req_cnt;
unsigned long flags;
uint32_t index;
char tag[2];
/* Get real lun and adapter */
ddb_entry = srb->ddb;
tot_dsds = 0;
/* Acquire hardware specific lock */
spin_lock_irqsave(&ha->hardware_lock, flags);
index = (uint32_t)cmd->request->tag;
/*
* Check to see if adapter is online before placing request on
* request queue. If a reset occurs and a request is in the queue,
* the firmware will still attempt to process the request, retrieving
* garbage for pointers.
*/
if (!test_bit(AF_ONLINE, &ha->flags)) {
DEBUG2(printk("scsi%ld: %s: Adapter OFFLINE! "
"Do not issue command.\n",
ha->host_no, __func__));
goto queuing_error;
}
/* Calculate the number of request entries needed. */
nseg = scsi_dma_map(cmd);
if (nseg < 0)
goto queuing_error;
tot_dsds = nseg;
req_cnt = qla4xxx_calc_request_entries(tot_dsds);
if (!qla4xxx_space_in_req_ring(ha, req_cnt))
goto queuing_error;
/* total iocbs active */
if ((ha->iocb_cnt + req_cnt) >= REQUEST_QUEUE_DEPTH)
goto queuing_error;
/* Build command packet */
cmd_entry = (struct command_t3_entry *) ha->request_ptr;
memset(cmd_entry, 0, sizeof(struct command_t3_entry));
cmd_entry->hdr.entryType = ET_COMMAND;
cmd_entry->handle = cpu_to_le32(index);
cmd_entry->target = cpu_to_le16(ddb_entry->fw_ddb_index);
int_to_scsilun(cmd->device->lun, &cmd_entry->lun);
cmd_entry->ttlByteCnt = cpu_to_le32(scsi_bufflen(cmd));
memcpy(cmd_entry->cdb, cmd->cmnd, cmd->cmd_len);
cmd_entry->dataSegCnt = cpu_to_le16(tot_dsds);
cmd_entry->hdr.entryCount = req_cnt;
/* Set data transfer direction control flags
* NOTE: Look at data_direction bits iff there is data to be
* transferred, as the data direction bit is sometimed filled
* in when there is no data to be transferred */
cmd_entry->control_flags = CF_NO_DATA;
if (scsi_bufflen(cmd)) {
if (cmd->sc_data_direction == DMA_TO_DEVICE)
cmd_entry->control_flags = CF_WRITE;
else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
cmd_entry->control_flags = CF_READ;
ha->bytes_xfered += scsi_bufflen(cmd);
if (ha->bytes_xfered & ~0xFFFFF){
ha->total_mbytes_xferred += ha->bytes_xfered >> 20;
ha->bytes_xfered &= 0xFFFFF;
}
}
/* Set tagged queueing control flags */
cmd_entry->control_flags |= CF_SIMPLE_TAG;
if (scsi_populate_tag_msg(cmd, tag))
switch (tag[0]) {
case MSG_HEAD_TAG:
cmd_entry->control_flags |= CF_HEAD_TAG;
break;
case MSG_ORDERED_TAG:
cmd_entry->control_flags |= CF_ORDERED_TAG;
break;
}
qla4xxx_advance_req_ring_ptr(ha);
qla4xxx_build_scsi_iocbs(srb, cmd_entry, tot_dsds);
wmb();
srb->cmd->host_scribble = (unsigned char *)(unsigned long)index;
/* update counters */
srb->state = SRB_ACTIVE_STATE;
srb->flags |= SRB_DMA_VALID;
/* Track IOCB used */
ha->iocb_cnt += req_cnt;
srb->iocb_cnt = req_cnt;
ha->req_q_count -= req_cnt;
ha->isp_ops->queue_iocb(ha);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
return QLA_SUCCESS;
queuing_error:
if (tot_dsds)
scsi_dma_unmap(cmd);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
return QLA_ERROR;
}
int qla4xxx_send_passthru0(struct iscsi_task *task)
{
struct passthru0 *passthru_iocb;
struct iscsi_session *sess = task->conn->session;
struct ddb_entry *ddb_entry = sess->dd_data;
struct scsi_qla_host *ha = ddb_entry->ha;
struct ql4_task_data *task_data = task->dd_data;
uint16_t ctrl_flags = 0;
unsigned long flags;
int ret = QLA_ERROR;
spin_lock_irqsave(&ha->hardware_lock, flags);
task_data->iocb_req_cnt = 1;
/* Put the IOCB on the request queue */
if (!qla4xxx_space_in_req_ring(ha, task_data->iocb_req_cnt))
goto queuing_error;
passthru_iocb = (struct passthru0 *) ha->request_ptr;
memset(passthru_iocb, 0, sizeof(struct passthru0));
passthru_iocb->hdr.entryType = ET_PASSTHRU0;
passthru_iocb->hdr.systemDefined = SD_ISCSI_PDU;
passthru_iocb->hdr.entryCount = task_data->iocb_req_cnt;
passthru_iocb->handle = task->itt;
passthru_iocb->target = cpu_to_le16(ddb_entry->fw_ddb_index);
passthru_iocb->timeout = cpu_to_le16(PT_DEFAULT_TIMEOUT);
/* Setup the out & in DSDs */
if (task_data->req_len) {
memcpy((uint8_t *)task_data->req_buffer +
sizeof(struct iscsi_hdr), task->data, task->data_count);
ctrl_flags |= PT_FLAG_SEND_BUFFER;
passthru_iocb->out_dsd.base.addrLow =
cpu_to_le32(LSDW(task_data->req_dma));
passthru_iocb->out_dsd.base.addrHigh =
cpu_to_le32(MSDW(task_data->req_dma));
passthru_iocb->out_dsd.count =
cpu_to_le32(task->data_count +
sizeof(struct iscsi_hdr));
}
if (task_data->resp_len) {
passthru_iocb->in_dsd.base.addrLow =
cpu_to_le32(LSDW(task_data->resp_dma));
passthru_iocb->in_dsd.base.addrHigh =
cpu_to_le32(MSDW(task_data->resp_dma));
passthru_iocb->in_dsd.count =
cpu_to_le32(task_data->resp_len);
}
ctrl_flags |= (PT_FLAG_ISCSI_PDU | PT_FLAG_WAIT_4_RESPONSE);
passthru_iocb->control_flags = cpu_to_le16(ctrl_flags);
/* Update the request pointer */
qla4xxx_advance_req_ring_ptr(ha);
wmb();
/* Track IOCB used */
ha->iocb_cnt += task_data->iocb_req_cnt;
ha->req_q_count -= task_data->iocb_req_cnt;
ha->isp_ops->queue_iocb(ha);
ret = QLA_SUCCESS;
queuing_error:
spin_unlock_irqrestore(&ha->hardware_lock, flags);
return ret;
}
static struct mrb *qla4xxx_get_new_mrb(struct scsi_qla_host *ha)
{
struct mrb *mrb;
mrb = kzalloc(sizeof(*mrb), GFP_KERNEL);
if (!mrb)
return mrb;
mrb->ha = ha;
return mrb;
}
static int qla4xxx_send_mbox_iocb(struct scsi_qla_host *ha, struct mrb *mrb,
uint32_t *in_mbox)
{
int rval = QLA_SUCCESS;
uint32_t i;
unsigned long flags;
uint32_t index = 0;
/* Acquire hardware specific lock */
spin_lock_irqsave(&ha->hardware_lock, flags);
/* Get pointer to the queue entry for the marker */
rval = qla4xxx_get_req_pkt(ha, (struct queue_entry **) &(mrb->mbox));
if (rval != QLA_SUCCESS)
goto exit_mbox_iocb;
index = ha->mrb_index;
/* get valid mrb index*/
for (i = 0; i < MAX_MRB; i++) {
index++;
if (index == MAX_MRB)
index = 1;
if (ha->active_mrb_array[index] == NULL) {
ha->mrb_index = index;
break;
}
}
mrb->iocb_cnt = 1;
ha->active_mrb_array[index] = mrb;
mrb->mbox->handle = index;
mrb->mbox->hdr.entryType = ET_MBOX_CMD;
mrb->mbox->hdr.entryCount = mrb->iocb_cnt;
memcpy(mrb->mbox->in_mbox, in_mbox, 32);
mrb->mbox_cmd = in_mbox[0];
wmb();
ha->isp_ops->queue_iocb(ha);
exit_mbox_iocb:
spin_unlock_irqrestore(&ha->hardware_lock, flags);
return rval;
}
int qla4xxx_ping_iocb(struct scsi_qla_host *ha, uint32_t options,
uint32_t payload_size, uint32_t pid, uint8_t *ipaddr)
{
uint32_t in_mbox[8];
struct mrb *mrb = NULL;
int rval = QLA_SUCCESS;
memset(in_mbox, 0, sizeof(in_mbox));
mrb = qla4xxx_get_new_mrb(ha);
if (!mrb) {
DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: fail to get new mrb\n",
__func__));
rval = QLA_ERROR;
goto exit_ping;
}
in_mbox[0] = MBOX_CMD_PING;
in_mbox[1] = options;
memcpy(&in_mbox[2], &ipaddr[0], 4);
memcpy(&in_mbox[3], &ipaddr[4], 4);
memcpy(&in_mbox[4], &ipaddr[8], 4);
memcpy(&in_mbox[5], &ipaddr[12], 4);
in_mbox[6] = payload_size;
mrb->pid = pid;
rval = qla4xxx_send_mbox_iocb(ha, mrb, in_mbox);
if (rval != QLA_SUCCESS)
goto exit_ping;
return rval;
exit_ping:
kfree(mrb);
return rval;
}
| gpl-2.0 |
VegaDevTeam/android_kernel_pantech_ef52s | drivers/edac/edac_device_sysfs.c | 4950 | 24218 | /*
* file for managing the edac_device subsystem of devices for EDAC
*
* (C) 2007 SoftwareBitMaker
*
* This file may be distributed under the terms of the
* GNU General Public License.
*
* Written Doug Thompson <norsk5@xmission.com>
*
*/
#include <linux/ctype.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/edac.h>
#include "edac_core.h"
#include "edac_module.h"
#define EDAC_DEVICE_SYMLINK "device"
#define to_edacdev(k) container_of(k, struct edac_device_ctl_info, kobj)
#define to_edacdev_attr(a) container_of(a, struct edacdev_attribute, attr)
/*
* Set of edac_device_ctl_info attribute store/show functions
*/
/* 'log_ue' */
static ssize_t edac_device_ctl_log_ue_show(struct edac_device_ctl_info
*ctl_info, char *data)
{
return sprintf(data, "%u\n", ctl_info->log_ue);
}
static ssize_t edac_device_ctl_log_ue_store(struct edac_device_ctl_info
*ctl_info, const char *data,
size_t count)
{
/* if parameter is zero, turn off flag, if non-zero turn on flag */
ctl_info->log_ue = (simple_strtoul(data, NULL, 0) != 0);
return count;
}
/* 'log_ce' */
static ssize_t edac_device_ctl_log_ce_show(struct edac_device_ctl_info
*ctl_info, char *data)
{
return sprintf(data, "%u\n", ctl_info->log_ce);
}
static ssize_t edac_device_ctl_log_ce_store(struct edac_device_ctl_info
*ctl_info, const char *data,
size_t count)
{
/* if parameter is zero, turn off flag, if non-zero turn on flag */
ctl_info->log_ce = (simple_strtoul(data, NULL, 0) != 0);
return count;
}
/* 'panic_on_ue' */
static ssize_t edac_device_ctl_panic_on_ue_show(struct edac_device_ctl_info
*ctl_info, char *data)
{
return sprintf(data, "%u\n", ctl_info->panic_on_ue);
}
static ssize_t edac_device_ctl_panic_on_ue_store(struct edac_device_ctl_info
*ctl_info, const char *data,
size_t count)
{
/* if parameter is zero, turn off flag, if non-zero turn on flag */
ctl_info->panic_on_ue = (simple_strtoul(data, NULL, 0) != 0);
return count;
}
/* 'poll_msec' show and store functions*/
static ssize_t edac_device_ctl_poll_msec_show(struct edac_device_ctl_info
*ctl_info, char *data)
{
return sprintf(data, "%u\n", ctl_info->poll_msec);
}
static ssize_t edac_device_ctl_poll_msec_store(struct edac_device_ctl_info
*ctl_info, const char *data,
size_t count)
{
unsigned long value;
/* get the value and enforce that it is non-zero, must be at least
* one millisecond for the delay period, between scans
* Then cancel last outstanding delay for the work request
* and set a new one.
*/
value = simple_strtoul(data, NULL, 0);
edac_device_reset_delay_period(ctl_info, value);
return count;
}
/* edac_device_ctl_info specific attribute structure */
struct ctl_info_attribute {
struct attribute attr;
ssize_t(*show) (struct edac_device_ctl_info *, char *);
ssize_t(*store) (struct edac_device_ctl_info *, const char *, size_t);
};
#define to_ctl_info(k) container_of(k, struct edac_device_ctl_info, kobj)
#define to_ctl_info_attr(a) container_of(a,struct ctl_info_attribute,attr)
/* Function to 'show' fields from the edac_dev 'ctl_info' structure */
static ssize_t edac_dev_ctl_info_show(struct kobject *kobj,
struct attribute *attr, char *buffer)
{
struct edac_device_ctl_info *edac_dev = to_ctl_info(kobj);
struct ctl_info_attribute *ctl_info_attr = to_ctl_info_attr(attr);
if (ctl_info_attr->show)
return ctl_info_attr->show(edac_dev, buffer);
return -EIO;
}
/* Function to 'store' fields into the edac_dev 'ctl_info' structure */
static ssize_t edac_dev_ctl_info_store(struct kobject *kobj,
struct attribute *attr,
const char *buffer, size_t count)
{
struct edac_device_ctl_info *edac_dev = to_ctl_info(kobj);
struct ctl_info_attribute *ctl_info_attr = to_ctl_info_attr(attr);
if (ctl_info_attr->store)
return ctl_info_attr->store(edac_dev, buffer, count);
return -EIO;
}
/* edac_dev file operations for an 'ctl_info' */
static const struct sysfs_ops device_ctl_info_ops = {
.show = edac_dev_ctl_info_show,
.store = edac_dev_ctl_info_store
};
#define CTL_INFO_ATTR(_name,_mode,_show,_store) \
static struct ctl_info_attribute attr_ctl_info_##_name = { \
.attr = {.name = __stringify(_name), .mode = _mode }, \
.show = _show, \
.store = _store, \
};
/* Declare the various ctl_info attributes here and their respective ops */
CTL_INFO_ATTR(log_ue, S_IRUGO | S_IWUSR,
edac_device_ctl_log_ue_show, edac_device_ctl_log_ue_store);
CTL_INFO_ATTR(log_ce, S_IRUGO | S_IWUSR,
edac_device_ctl_log_ce_show, edac_device_ctl_log_ce_store);
CTL_INFO_ATTR(panic_on_ue, S_IRUGO | S_IWUSR,
edac_device_ctl_panic_on_ue_show,
edac_device_ctl_panic_on_ue_store);
CTL_INFO_ATTR(poll_msec, S_IRUGO | S_IWUSR,
edac_device_ctl_poll_msec_show, edac_device_ctl_poll_msec_store);
/* Base Attributes of the EDAC_DEVICE ECC object */
static struct ctl_info_attribute *device_ctrl_attr[] = {
&attr_ctl_info_panic_on_ue,
&attr_ctl_info_log_ue,
&attr_ctl_info_log_ce,
&attr_ctl_info_poll_msec,
NULL,
};
/*
* edac_device_ctrl_master_release
*
* called when the reference count for the 'main' kobj
* for a edac_device control struct reaches zero
*
* Reference count model:
* One 'main' kobject for each control structure allocated.
* That main kobj is initially set to one AND
* the reference count for the EDAC 'core' module is
* bumped by one, thus added 'keep in memory' dependency.
*
* Each new internal kobj (in instances and blocks) then
* bumps the 'main' kobject.
*
* When they are released their release functions decrement
* the 'main' kobj.
*
* When the main kobj reaches zero (0) then THIS function
* is called which then decrements the EDAC 'core' module.
* When the module reference count reaches zero then the
* module no longer has dependency on keeping the release
* function code in memory and module can be unloaded.
*
* This will support several control objects as well, each
* with its own 'main' kobj.
*/
static void edac_device_ctrl_master_release(struct kobject *kobj)
{
struct edac_device_ctl_info *edac_dev = to_edacdev(kobj);
debugf4("%s() control index=%d\n", __func__, edac_dev->dev_idx);
/* decrement the EDAC CORE module ref count */
module_put(edac_dev->owner);
/* free the control struct containing the 'main' kobj
* passed in to this routine
*/
kfree(edac_dev);
}
/* ktype for the main (master) kobject */
static struct kobj_type ktype_device_ctrl = {
.release = edac_device_ctrl_master_release,
.sysfs_ops = &device_ctl_info_ops,
.default_attrs = (struct attribute **)device_ctrl_attr,
};
/*
* edac_device_register_sysfs_main_kobj
*
* perform the high level setup for the new edac_device instance
*
* Return: 0 SUCCESS
* !0 FAILURE
*/
int edac_device_register_sysfs_main_kobj(struct edac_device_ctl_info *edac_dev)
{
struct bus_type *edac_subsys;
int err;
debugf1("%s()\n", __func__);
/* get the /sys/devices/system/edac reference */
edac_subsys = edac_get_sysfs_subsys();
if (edac_subsys == NULL) {
debugf1("%s() no edac_subsys error\n", __func__);
err = -ENODEV;
goto err_out;
}
/* Point to the 'edac_subsys' this instance 'reports' to */
edac_dev->edac_subsys = edac_subsys;
/* Init the devices's kobject */
memset(&edac_dev->kobj, 0, sizeof(struct kobject));
/* Record which module 'owns' this control structure
* and bump the ref count of the module
*/
edac_dev->owner = THIS_MODULE;
if (!try_module_get(edac_dev->owner)) {
err = -ENODEV;
goto err_mod_get;
}
/* register */
err = kobject_init_and_add(&edac_dev->kobj, &ktype_device_ctrl,
&edac_subsys->dev_root->kobj,
"%s", edac_dev->name);
if (err) {
debugf1("%s()Failed to register '.../edac/%s'\n",
__func__, edac_dev->name);
goto err_kobj_reg;
}
kobject_uevent(&edac_dev->kobj, KOBJ_ADD);
/* At this point, to 'free' the control struct,
* edac_device_unregister_sysfs_main_kobj() must be used
*/
debugf4("%s() Registered '.../edac/%s' kobject\n",
__func__, edac_dev->name);
return 0;
/* Error exit stack */
err_kobj_reg:
module_put(edac_dev->owner);
err_mod_get:
edac_put_sysfs_subsys();
err_out:
return err;
}
/*
* edac_device_unregister_sysfs_main_kobj:
* the '..../edac/<name>' kobject
*/
void edac_device_unregister_sysfs_main_kobj(struct edac_device_ctl_info *dev)
{
debugf0("%s()\n", __func__);
debugf4("%s() name of kobject is: %s\n",
__func__, kobject_name(&dev->kobj));
/*
* Unregister the edac device's kobject and
* allow for reference count to reach 0 at which point
* the callback will be called to:
* a) module_put() this module
* b) 'kfree' the memory
*/
kobject_put(&dev->kobj);
edac_put_sysfs_subsys();
}
/* edac_dev -> instance information */
/*
* Set of low-level instance attribute show functions
*/
static ssize_t instance_ue_count_show(struct edac_device_instance *instance,
char *data)
{
return sprintf(data, "%u\n", instance->counters.ue_count);
}
static ssize_t instance_ce_count_show(struct edac_device_instance *instance,
char *data)
{
return sprintf(data, "%u\n", instance->counters.ce_count);
}
#define to_instance(k) container_of(k, struct edac_device_instance, kobj)
#define to_instance_attr(a) container_of(a,struct instance_attribute,attr)
/* DEVICE instance kobject release() function */
static void edac_device_ctrl_instance_release(struct kobject *kobj)
{
struct edac_device_instance *instance;
debugf1("%s()\n", __func__);
/* map from this kobj to the main control struct
* and then dec the main kobj count
*/
instance = to_instance(kobj);
kobject_put(&instance->ctl->kobj);
}
/* instance specific attribute structure */
struct instance_attribute {
struct attribute attr;
ssize_t(*show) (struct edac_device_instance *, char *);
ssize_t(*store) (struct edac_device_instance *, const char *, size_t);
};
/* Function to 'show' fields from the edac_dev 'instance' structure */
static ssize_t edac_dev_instance_show(struct kobject *kobj,
struct attribute *attr, char *buffer)
{
struct edac_device_instance *instance = to_instance(kobj);
struct instance_attribute *instance_attr = to_instance_attr(attr);
if (instance_attr->show)
return instance_attr->show(instance, buffer);
return -EIO;
}
/* Function to 'store' fields into the edac_dev 'instance' structure */
static ssize_t edac_dev_instance_store(struct kobject *kobj,
struct attribute *attr,
const char *buffer, size_t count)
{
struct edac_device_instance *instance = to_instance(kobj);
struct instance_attribute *instance_attr = to_instance_attr(attr);
if (instance_attr->store)
return instance_attr->store(instance, buffer, count);
return -EIO;
}
/* edac_dev file operations for an 'instance' */
static const struct sysfs_ops device_instance_ops = {
.show = edac_dev_instance_show,
.store = edac_dev_instance_store
};
#define INSTANCE_ATTR(_name,_mode,_show,_store) \
static struct instance_attribute attr_instance_##_name = { \
.attr = {.name = __stringify(_name), .mode = _mode }, \
.show = _show, \
.store = _store, \
};
/*
* Define attributes visible for the edac_device instance object
* Each contains a pointer to a show and an optional set
* function pointer that does the low level output/input
*/
INSTANCE_ATTR(ce_count, S_IRUGO, instance_ce_count_show, NULL);
INSTANCE_ATTR(ue_count, S_IRUGO, instance_ue_count_show, NULL);
/* list of edac_dev 'instance' attributes */
static struct instance_attribute *device_instance_attr[] = {
&attr_instance_ce_count,
&attr_instance_ue_count,
NULL,
};
/* The 'ktype' for each edac_dev 'instance' */
static struct kobj_type ktype_instance_ctrl = {
.release = edac_device_ctrl_instance_release,
.sysfs_ops = &device_instance_ops,
.default_attrs = (struct attribute **)device_instance_attr,
};
/* edac_dev -> instance -> block information */
#define to_block(k) container_of(k, struct edac_device_block, kobj)
#define to_block_attr(a) \
container_of(a, struct edac_dev_sysfs_block_attribute, attr)
/*
* Set of low-level block attribute show functions
*/
static ssize_t block_ue_count_show(struct kobject *kobj,
struct attribute *attr, char *data)
{
struct edac_device_block *block = to_block(kobj);
return sprintf(data, "%u\n", block->counters.ue_count);
}
static ssize_t block_ce_count_show(struct kobject *kobj,
struct attribute *attr, char *data)
{
struct edac_device_block *block = to_block(kobj);
return sprintf(data, "%u\n", block->counters.ce_count);
}
/* DEVICE block kobject release() function */
static void edac_device_ctrl_block_release(struct kobject *kobj)
{
struct edac_device_block *block;
debugf1("%s()\n", __func__);
/* get the container of the kobj */
block = to_block(kobj);
/* map from 'block kobj' to 'block->instance->controller->main_kobj'
* now 'release' the block kobject
*/
kobject_put(&block->instance->ctl->kobj);
}
/* Function to 'show' fields from the edac_dev 'block' structure */
static ssize_t edac_dev_block_show(struct kobject *kobj,
struct attribute *attr, char *buffer)
{
struct edac_dev_sysfs_block_attribute *block_attr =
to_block_attr(attr);
if (block_attr->show)
return block_attr->show(kobj, attr, buffer);
return -EIO;
}
/* Function to 'store' fields into the edac_dev 'block' structure */
static ssize_t edac_dev_block_store(struct kobject *kobj,
struct attribute *attr,
const char *buffer, size_t count)
{
struct edac_dev_sysfs_block_attribute *block_attr;
block_attr = to_block_attr(attr);
if (block_attr->store)
return block_attr->store(kobj, attr, buffer, count);
return -EIO;
}
/* edac_dev file operations for a 'block' */
static const struct sysfs_ops device_block_ops = {
.show = edac_dev_block_show,
.store = edac_dev_block_store
};
#define BLOCK_ATTR(_name,_mode,_show,_store) \
static struct edac_dev_sysfs_block_attribute attr_block_##_name = { \
.attr = {.name = __stringify(_name), .mode = _mode }, \
.show = _show, \
.store = _store, \
};
BLOCK_ATTR(ce_count, S_IRUGO, block_ce_count_show, NULL);
BLOCK_ATTR(ue_count, S_IRUGO, block_ue_count_show, NULL);
/* list of edac_dev 'block' attributes */
static struct edac_dev_sysfs_block_attribute *device_block_attr[] = {
&attr_block_ce_count,
&attr_block_ue_count,
NULL,
};
/* The 'ktype' for each edac_dev 'block' */
static struct kobj_type ktype_block_ctrl = {
.release = edac_device_ctrl_block_release,
.sysfs_ops = &device_block_ops,
.default_attrs = (struct attribute **)device_block_attr,
};
/* block ctor/dtor code */
/*
* edac_device_create_block
*/
static int edac_device_create_block(struct edac_device_ctl_info *edac_dev,
struct edac_device_instance *instance,
struct edac_device_block *block)
{
int i;
int err;
struct edac_dev_sysfs_block_attribute *sysfs_attrib;
struct kobject *main_kobj;
debugf4("%s() Instance '%s' inst_p=%p block '%s' block_p=%p\n",
__func__, instance->name, instance, block->name, block);
debugf4("%s() block kobj=%p block kobj->parent=%p\n",
__func__, &block->kobj, &block->kobj.parent);
/* init this block's kobject */
memset(&block->kobj, 0, sizeof(struct kobject));
/* bump the main kobject's reference count for this controller
* and this instance is dependent on the main
*/
main_kobj = kobject_get(&edac_dev->kobj);
if (!main_kobj) {
err = -ENODEV;
goto err_out;
}
/* Add this block's kobject */
err = kobject_init_and_add(&block->kobj, &ktype_block_ctrl,
&instance->kobj,
"%s", block->name);
if (err) {
debugf1("%s() Failed to register instance '%s'\n",
__func__, block->name);
kobject_put(main_kobj);
err = -ENODEV;
goto err_out;
}
/* If there are driver level block attributes, then added them
* to the block kobject
*/
sysfs_attrib = block->block_attributes;
if (sysfs_attrib && block->nr_attribs) {
for (i = 0; i < block->nr_attribs; i++, sysfs_attrib++) {
debugf4("%s() creating block attrib='%s' "
"attrib->%p to kobj=%p\n",
__func__,
sysfs_attrib->attr.name,
sysfs_attrib, &block->kobj);
/* Create each block_attribute file */
err = sysfs_create_file(&block->kobj,
&sysfs_attrib->attr);
if (err)
goto err_on_attrib;
}
}
kobject_uevent(&block->kobj, KOBJ_ADD);
return 0;
/* Error unwind stack */
err_on_attrib:
kobject_put(&block->kobj);
err_out:
return err;
}
/*
* edac_device_delete_block(edac_dev,block);
*/
static void edac_device_delete_block(struct edac_device_ctl_info *edac_dev,
struct edac_device_block *block)
{
struct edac_dev_sysfs_block_attribute *sysfs_attrib;
int i;
/* if this block has 'attributes' then we need to iterate over the list
* and 'remove' the attributes on this block
*/
sysfs_attrib = block->block_attributes;
if (sysfs_attrib && block->nr_attribs) {
for (i = 0; i < block->nr_attribs; i++, sysfs_attrib++) {
/* remove each block_attrib file */
sysfs_remove_file(&block->kobj,
(struct attribute *) sysfs_attrib);
}
}
/* unregister this block's kobject, SEE:
* edac_device_ctrl_block_release() callback operation
*/
kobject_put(&block->kobj);
}
/* instance ctor/dtor code */
/*
* edac_device_create_instance
* create just one instance of an edac_device 'instance'
*/
static int edac_device_create_instance(struct edac_device_ctl_info *edac_dev,
int idx)
{
int i, j;
int err;
struct edac_device_instance *instance;
struct kobject *main_kobj;
instance = &edac_dev->instances[idx];
/* Init the instance's kobject */
memset(&instance->kobj, 0, sizeof(struct kobject));
instance->ctl = edac_dev;
/* bump the main kobject's reference count for this controller
* and this instance is dependent on the main
*/
main_kobj = kobject_get(&edac_dev->kobj);
if (!main_kobj) {
err = -ENODEV;
goto err_out;
}
/* Formally register this instance's kobject under the edac_device */
err = kobject_init_and_add(&instance->kobj, &ktype_instance_ctrl,
&edac_dev->kobj, "%s", instance->name);
if (err != 0) {
debugf2("%s() Failed to register instance '%s'\n",
__func__, instance->name);
kobject_put(main_kobj);
goto err_out;
}
debugf4("%s() now register '%d' blocks for instance %d\n",
__func__, instance->nr_blocks, idx);
/* register all blocks of this instance */
for (i = 0; i < instance->nr_blocks; i++) {
err = edac_device_create_block(edac_dev, instance,
&instance->blocks[i]);
if (err) {
/* If any fail, remove all previous ones */
for (j = 0; j < i; j++)
edac_device_delete_block(edac_dev,
&instance->blocks[j]);
goto err_release_instance_kobj;
}
}
kobject_uevent(&instance->kobj, KOBJ_ADD);
debugf4("%s() Registered instance %d '%s' kobject\n",
__func__, idx, instance->name);
return 0;
/* error unwind stack */
err_release_instance_kobj:
kobject_put(&instance->kobj);
err_out:
return err;
}
/*
* edac_device_remove_instance
* remove an edac_device instance
*/
static void edac_device_delete_instance(struct edac_device_ctl_info *edac_dev,
int idx)
{
struct edac_device_instance *instance;
int i;
instance = &edac_dev->instances[idx];
/* unregister all blocks in this instance */
for (i = 0; i < instance->nr_blocks; i++)
edac_device_delete_block(edac_dev, &instance->blocks[i]);
/* unregister this instance's kobject, SEE:
* edac_device_ctrl_instance_release() for callback operation
*/
kobject_put(&instance->kobj);
}
/*
* edac_device_create_instances
* create the first level of 'instances' for this device
* (ie 'cache' might have 'cache0', 'cache1', 'cache2', etc
*/
static int edac_device_create_instances(struct edac_device_ctl_info *edac_dev)
{
int i, j;
int err;
debugf0("%s()\n", __func__);
/* iterate over creation of the instances */
for (i = 0; i < edac_dev->nr_instances; i++) {
err = edac_device_create_instance(edac_dev, i);
if (err) {
/* unwind previous instances on error */
for (j = 0; j < i; j++)
edac_device_delete_instance(edac_dev, j);
return err;
}
}
return 0;
}
/*
* edac_device_delete_instances(edac_dev);
* unregister all the kobjects of the instances
*/
static void edac_device_delete_instances(struct edac_device_ctl_info *edac_dev)
{
int i;
/* iterate over creation of the instances */
for (i = 0; i < edac_dev->nr_instances; i++)
edac_device_delete_instance(edac_dev, i);
}
/* edac_dev sysfs ctor/dtor code */
/*
* edac_device_add_main_sysfs_attributes
* add some attributes to this instance's main kobject
*/
static int edac_device_add_main_sysfs_attributes(
struct edac_device_ctl_info *edac_dev)
{
struct edac_dev_sysfs_attribute *sysfs_attrib;
int err = 0;
sysfs_attrib = edac_dev->sysfs_attributes;
if (sysfs_attrib) {
/* iterate over the array and create an attribute for each
* entry in the list
*/
while (sysfs_attrib->attr.name != NULL) {
err = sysfs_create_file(&edac_dev->kobj,
(struct attribute*) sysfs_attrib);
if (err)
goto err_out;
sysfs_attrib++;
}
}
err_out:
return err;
}
/*
* edac_device_remove_main_sysfs_attributes
* remove any attributes to this instance's main kobject
*/
static void edac_device_remove_main_sysfs_attributes(
struct edac_device_ctl_info *edac_dev)
{
struct edac_dev_sysfs_attribute *sysfs_attrib;
/* if there are main attributes, defined, remove them. First,
* point to the start of the array and iterate over it
* removing each attribute listed from this device's instance's kobject
*/
sysfs_attrib = edac_dev->sysfs_attributes;
if (sysfs_attrib) {
while (sysfs_attrib->attr.name != NULL) {
sysfs_remove_file(&edac_dev->kobj,
(struct attribute *) sysfs_attrib);
sysfs_attrib++;
}
}
}
/*
* edac_device_create_sysfs() Constructor
*
* accept a created edac_device control structure
* and 'export' it to sysfs. The 'main' kobj should already have been
* created. 'instance' and 'block' kobjects should be registered
* along with any 'block' attributes from the low driver. In addition,
* the main attributes (if any) are connected to the main kobject of
* the control structure.
*
* Return:
* 0 Success
* !0 Failure
*/
int edac_device_create_sysfs(struct edac_device_ctl_info *edac_dev)
{
int err;
struct kobject *edac_kobj = &edac_dev->kobj;
debugf0("%s() idx=%d\n", __func__, edac_dev->dev_idx);
/* go create any main attributes callers wants */
err = edac_device_add_main_sysfs_attributes(edac_dev);
if (err) {
debugf0("%s() failed to add sysfs attribs\n", __func__);
goto err_out;
}
/* create a symlink from the edac device
* to the platform 'device' being used for this
*/
err = sysfs_create_link(edac_kobj,
&edac_dev->dev->kobj, EDAC_DEVICE_SYMLINK);
if (err) {
debugf0("%s() sysfs_create_link() returned err= %d\n",
__func__, err);
goto err_remove_main_attribs;
}
/* Create the first level instance directories
* In turn, the nested blocks beneath the instances will
* be registered as well
*/
err = edac_device_create_instances(edac_dev);
if (err) {
debugf0("%s() edac_device_create_instances() "
"returned err= %d\n", __func__, err);
goto err_remove_link;
}
debugf4("%s() create-instances done, idx=%d\n",
__func__, edac_dev->dev_idx);
return 0;
/* Error unwind stack */
err_remove_link:
/* remove the sym link */
sysfs_remove_link(&edac_dev->kobj, EDAC_DEVICE_SYMLINK);
err_remove_main_attribs:
edac_device_remove_main_sysfs_attributes(edac_dev);
err_out:
return err;
}
/*
* edac_device_remove_sysfs() destructor
*
* given an edac_device struct, tear down the kobject resources
*/
void edac_device_remove_sysfs(struct edac_device_ctl_info *edac_dev)
{
debugf0("%s()\n", __func__);
/* remove any main attributes for this device */
edac_device_remove_main_sysfs_attributes(edac_dev);
/* remove the device sym link */
sysfs_remove_link(&edac_dev->kobj, EDAC_DEVICE_SYMLINK);
/* walk the instance/block kobject tree, deconstructing it */
edac_device_delete_instances(edac_dev);
}
| gpl-2.0 |
Marvell-Semi/EBU_mainline_public | drivers/ata/pata_jmicron.c | 9046 | 4695 | /*
* pata_jmicron.c - JMicron ATA driver for non AHCI mode. This drives the
* PATA port of the controller. The SATA ports are
* driven by AHCI in the usual configuration although
* this driver can handle other setups if we need it.
*
* (c) 2006 Red Hat
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#include <linux/ata.h>
#define DRV_NAME "pata_jmicron"
#define DRV_VERSION "0.1.5"
typedef enum {
PORT_PATA0 = 0,
PORT_PATA1 = 1,
PORT_SATA = 2,
} port_type;
/**
* jmicron_pre_reset - check for 40/80 pin
* @link: ATA link
* @deadline: deadline jiffies for the operation
*
* Perform the PATA port setup we need.
*
* On the Jmicron 361/363 there is a single PATA port that can be mapped
* either as primary or secondary (or neither). We don't do any policy
* and setup here. We assume that has been done by init_one and the
* BIOS.
*/
static int jmicron_pre_reset(struct ata_link *link, unsigned long deadline)
{
struct ata_port *ap = link->ap;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
u32 control;
u32 control5;
int port_mask = 1<< (4 * ap->port_no);
int port = ap->port_no;
port_type port_map[2];
/* Check if our port is enabled */
pci_read_config_dword(pdev, 0x40, &control);
if ((control & port_mask) == 0)
return -ENOENT;
/* There are two basic mappings. One has the two SATA ports merged
as master/slave and the secondary as PATA, the other has only the
SATA port mapped */
if (control & (1 << 23)) {
port_map[0] = PORT_SATA;
port_map[1] = PORT_PATA0;
} else {
port_map[0] = PORT_SATA;
port_map[1] = PORT_SATA;
}
/* The 365/366 may have this bit set to map the second PATA port
as the internal primary channel */
pci_read_config_dword(pdev, 0x80, &control5);
if (control5 & (1<<24))
port_map[0] = PORT_PATA1;
/* The two ports may then be logically swapped by the firmware */
if (control & (1 << 22))
port = port ^ 1;
/*
* Now we know which physical port we are talking about we can
* actually do our cable checking etc. Thankfully we don't need
* to do the plumbing for other cases.
*/
switch (port_map[port]) {
case PORT_PATA0:
if ((control & (1 << 5)) == 0)
return -ENOENT;
if (control & (1 << 3)) /* 40/80 pin primary */
ap->cbl = ATA_CBL_PATA40;
else
ap->cbl = ATA_CBL_PATA80;
break;
case PORT_PATA1:
/* Bit 21 is set if the port is enabled */
if ((control5 & (1 << 21)) == 0)
return -ENOENT;
if (control5 & (1 << 19)) /* 40/80 pin secondary */
ap->cbl = ATA_CBL_PATA40;
else
ap->cbl = ATA_CBL_PATA80;
break;
case PORT_SATA:
ap->cbl = ATA_CBL_SATA;
break;
}
return ata_sff_prereset(link, deadline);
}
/* No PIO or DMA methods needed for this device */
static struct scsi_host_template jmicron_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
static struct ata_port_operations jmicron_ops = {
.inherits = &ata_bmdma_port_ops,
.prereset = jmicron_pre_reset,
};
/**
* jmicron_init_one - Register Jmicron ATA PCI device with kernel services
* @pdev: PCI device to register
* @ent: Entry in jmicron_pci_tbl matching with @pdev
*
* Called from kernel PCI layer.
*
* LOCKING:
* Inherited from PCI layer (may sleep).
*
* RETURNS:
* Zero on success, or -ERRNO value.
*/
static int jmicron_init_one (struct pci_dev *pdev, const struct pci_device_id *id)
{
static const struct ata_port_info info = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA5,
.port_ops = &jmicron_ops,
};
const struct ata_port_info *ppi[] = { &info, NULL };
return ata_pci_bmdma_init_one(pdev, ppi, &jmicron_sht, NULL, 0);
}
static const struct pci_device_id jmicron_pci_tbl[] = {
{ PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
PCI_CLASS_STORAGE_IDE << 8, 0xffff00, 0 },
{ } /* terminate list */
};
static struct pci_driver jmicron_pci_driver = {
.name = DRV_NAME,
.id_table = jmicron_pci_tbl,
.probe = jmicron_init_one,
.remove = ata_pci_remove_one,
#ifdef CONFIG_PM
.suspend = ata_pci_device_suspend,
.resume = ata_pci_device_resume,
#endif
};
static int __init jmicron_init(void)
{
return pci_register_driver(&jmicron_pci_driver);
}
static void __exit jmicron_exit(void)
{
pci_unregister_driver(&jmicron_pci_driver);
}
module_init(jmicron_init);
module_exit(jmicron_exit);
MODULE_AUTHOR("Alan Cox");
MODULE_DESCRIPTION("SCSI low-level driver for Jmicron PATA ports");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, jmicron_pci_tbl);
MODULE_VERSION(DRV_VERSION);
| gpl-2.0 |
OliverG96/android_kernel_samsung_golden | drivers/ata/pata_jmicron.c | 9046 | 4695 | /*
* pata_jmicron.c - JMicron ATA driver for non AHCI mode. This drives the
* PATA port of the controller. The SATA ports are
* driven by AHCI in the usual configuration although
* this driver can handle other setups if we need it.
*
* (c) 2006 Red Hat
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#include <linux/ata.h>
#define DRV_NAME "pata_jmicron"
#define DRV_VERSION "0.1.5"
typedef enum {
PORT_PATA0 = 0,
PORT_PATA1 = 1,
PORT_SATA = 2,
} port_type;
/**
* jmicron_pre_reset - check for 40/80 pin
* @link: ATA link
* @deadline: deadline jiffies for the operation
*
* Perform the PATA port setup we need.
*
* On the Jmicron 361/363 there is a single PATA port that can be mapped
* either as primary or secondary (or neither). We don't do any policy
* and setup here. We assume that has been done by init_one and the
* BIOS.
*/
static int jmicron_pre_reset(struct ata_link *link, unsigned long deadline)
{
struct ata_port *ap = link->ap;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
u32 control;
u32 control5;
int port_mask = 1<< (4 * ap->port_no);
int port = ap->port_no;
port_type port_map[2];
/* Check if our port is enabled */
pci_read_config_dword(pdev, 0x40, &control);
if ((control & port_mask) == 0)
return -ENOENT;
/* There are two basic mappings. One has the two SATA ports merged
as master/slave and the secondary as PATA, the other has only the
SATA port mapped */
if (control & (1 << 23)) {
port_map[0] = PORT_SATA;
port_map[1] = PORT_PATA0;
} else {
port_map[0] = PORT_SATA;
port_map[1] = PORT_SATA;
}
/* The 365/366 may have this bit set to map the second PATA port
as the internal primary channel */
pci_read_config_dword(pdev, 0x80, &control5);
if (control5 & (1<<24))
port_map[0] = PORT_PATA1;
/* The two ports may then be logically swapped by the firmware */
if (control & (1 << 22))
port = port ^ 1;
/*
* Now we know which physical port we are talking about we can
* actually do our cable checking etc. Thankfully we don't need
* to do the plumbing for other cases.
*/
switch (port_map[port]) {
case PORT_PATA0:
if ((control & (1 << 5)) == 0)
return -ENOENT;
if (control & (1 << 3)) /* 40/80 pin primary */
ap->cbl = ATA_CBL_PATA40;
else
ap->cbl = ATA_CBL_PATA80;
break;
case PORT_PATA1:
/* Bit 21 is set if the port is enabled */
if ((control5 & (1 << 21)) == 0)
return -ENOENT;
if (control5 & (1 << 19)) /* 40/80 pin secondary */
ap->cbl = ATA_CBL_PATA40;
else
ap->cbl = ATA_CBL_PATA80;
break;
case PORT_SATA:
ap->cbl = ATA_CBL_SATA;
break;
}
return ata_sff_prereset(link, deadline);
}
/* No PIO or DMA methods needed for this device */
static struct scsi_host_template jmicron_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
static struct ata_port_operations jmicron_ops = {
.inherits = &ata_bmdma_port_ops,
.prereset = jmicron_pre_reset,
};
/**
* jmicron_init_one - Register Jmicron ATA PCI device with kernel services
* @pdev: PCI device to register
* @ent: Entry in jmicron_pci_tbl matching with @pdev
*
* Called from kernel PCI layer.
*
* LOCKING:
* Inherited from PCI layer (may sleep).
*
* RETURNS:
* Zero on success, or -ERRNO value.
*/
static int jmicron_init_one (struct pci_dev *pdev, const struct pci_device_id *id)
{
static const struct ata_port_info info = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA5,
.port_ops = &jmicron_ops,
};
const struct ata_port_info *ppi[] = { &info, NULL };
return ata_pci_bmdma_init_one(pdev, ppi, &jmicron_sht, NULL, 0);
}
static const struct pci_device_id jmicron_pci_tbl[] = {
{ PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
PCI_CLASS_STORAGE_IDE << 8, 0xffff00, 0 },
{ } /* terminate list */
};
static struct pci_driver jmicron_pci_driver = {
.name = DRV_NAME,
.id_table = jmicron_pci_tbl,
.probe = jmicron_init_one,
.remove = ata_pci_remove_one,
#ifdef CONFIG_PM
.suspend = ata_pci_device_suspend,
.resume = ata_pci_device_resume,
#endif
};
static int __init jmicron_init(void)
{
return pci_register_driver(&jmicron_pci_driver);
}
static void __exit jmicron_exit(void)
{
pci_unregister_driver(&jmicron_pci_driver);
}
module_init(jmicron_init);
module_exit(jmicron_exit);
MODULE_AUTHOR("Alan Cox");
MODULE_DESCRIPTION("SCSI low-level driver for Jmicron PATA ports");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, jmicron_pci_tbl);
MODULE_VERSION(DRV_VERSION);
| gpl-2.0 |
shanzin/M7_Lollipop_Kernel | sound/oss/aedsp16.c | 14166 | 36434 | /*
sound/oss/aedsp16.c
Audio Excel DSP 16 software configuration routines
Copyright (C) 1995,1996,1997,1998 Riccardo Facchetti (fizban@tin.it)
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
* Include the main OSS Lite header file. It include all the os, OSS Lite, etc
* headers needed by this source.
*/
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/init.h>
#include "sound_config.h"
/*
READ THIS
This module started to configure the Audio Excel DSP 16 Sound Card.
Now works with the SC-6000 (old aedsp16) and new SC-6600 based cards.
NOTE: I have NO idea about Audio Excel DSP 16 III. If someone owns this
audio card and want to see the kernel support for it, please contact me.
Audio Excel DSP 16 is an SB pro II, Microsoft Sound System and MPU-401
compatible card.
It is software-only configurable (no jumpers to hard-set irq/dma/mpu-irq),
so before this module, the only way to configure the DSP under linux was
boot the MS-DOS loading the sound.sys device driver (this driver soft-
configure the sound board hardware by massaging someone of its registers),
and then ctrl-alt-del to boot linux with the DSP configured by the DOS
driver.
This module works configuring your Audio Excel DSP 16's irq, dma and
mpu-401-irq. The OSS Lite routines rely on the fact that if the
hardware is there, they can detect it. The problem with AEDSP16 is
that no hardware can be found by the probe routines if the sound card
is not configured properly. Sometimes the kernel probe routines can find
an SBPRO even when the card is not configured (this is the standard setup
of the card), but the SBPRO emulation don't work well if the card is not
properly initialized. For this reason
aedsp16_init_board()
routine is called before the OSS Lite probe routines try to detect the
hardware.
NOTE (READ THE NOTE TOO, IT CONTAIN USEFUL INFORMATIONS)
NOTE: Now it works with SC-6000 and SC-6600 based audio cards. The new cards
have no jumper switch at all. No more WSS or MPU-401 I/O port switches. They
have to be configured by software.
NOTE: The driver is merged with the new OSS Lite sound driver. It works
as a lowlevel driver.
The Audio Excel DSP 16 Sound Card emulates both SBPRO and MSS;
the OSS Lite sound driver can be configured for SBPRO and MSS cards
at the same time, but the aedsp16 can't be two cards!!
When we configure it, we have to choose the SBPRO or the MSS emulation
for AEDSP16. We also can install a *REAL* card of the other type (see [1]).
NOTE: If someone can test the combination AEDSP16+MSS or AEDSP16+SBPRO
please let me know if it works.
The MPU-401 support can be compiled in together with one of the other
two operating modes.
NOTE: This is something like plug-and-play: we have only to plug
the AEDSP16 board in the socket, and then configure and compile
a kernel that uses the AEDSP16 software configuration capability.
No jumper setting is needed!
For example, if you want AEDSP16 to be an SBPro, on irq 10, dma 3
you have just to make config the OSS Lite package, configuring
the AEDSP16 sound card, then activating the SBPro emulation mode
and at last configuring IRQ and DMA.
Compile the kernel and run it.
NOTE: This means for SC-6000 cards that you can choose irq and dma,
but not the I/O addresses. To change I/O addresses you have to set
them with jumpers. For SC-6600 cards you have no jumpers so you have
to set up your full card configuration in the make config.
You can change the irq/dma/mirq settings WITHOUT THE NEED to open
your computer and massage the jumpers (there are no irq/dma/mirq
jumpers to be configured anyway, only I/O BASE values have to be
configured with jumpers)
For some ununderstandable reason, the card default of irq 7, dma 1,
don't work for me. Seems to be an IRQ or DMA conflict. Under heavy
HDD work, the kernel start to erupt out a lot of messages like:
'Sound: DMA timed out - IRQ/DRQ config error?'
For what I can say, I have NOT any conflict at irq 7 (under linux I'm
using the lp polling driver), and dma line 1 is unused as stated by
/proc/dma. I can suppose this is a bug of AEDSP16. I know my hardware so
I'm pretty sure I have not any conflict, but may be I'm wrong. Who knows!
Anyway a setting of irq 10, dma 3 works really fine.
NOTE: if someone can use AEDSP16 with irq 7, dma 1, please let me know
the emulation mode, all the installed hardware and the hardware
configuration (irq and dma settings of all the hardware).
This init module should work with SBPRO+MSS, when one of the two is
the AEDSP16 emulation and the other the real card. (see [1])
For example:
AEDSP16 (0x220) in SBPRO emu (0x220) + real MSS + other
AEDSP16 (0x220) in MSS emu + real SBPRO (0x240) + other
MPU401 should work. (see [2])
[1]
---
Date: Mon, 29 Jul 1997 08:35:40 +0100
From: Mr S J Greenaway <sjg95@unixfe.rl.ac.uk>
[...]
Just to let you know got my Audio Excel (emulating a MSS) working
with my original SB16, thanks for the driver!
[...]
---
[2] Not tested by me for lack of hardware.
TODO, WISHES AND TECH
- About I/O ports allocation -
Request the 2x0h region (port base) in any case if we are using this card.
NOTE: the "aedsp16 (base)" string with which we are requesting the aedsp16
port base region (see code) does not mean necessarily that we are emulating
sbpro. Even if this region is the sbpro I/O ports region, we use this
region to access the control registers of the card, and if emulating
sbpro, I/O sbpro registers too. If we are emulating MSS, the sbpro
registers are not used, in no way, to emulate an sbpro: they are
used only for configuration purposes.
Started Fri Mar 17 16:13:18 MET 1995
v0.1 (ALPHA, was a user-level program called AudioExcelDSP16.c)
- Initial code.
v0.2 (ALPHA)
- Cleanups.
- Integrated with Linux voxware v 2.90-2 kernel sound driver.
- SoundBlaster Pro mode configuration.
- Microsoft Sound System mode configuration.
- MPU-401 mode configuration.
v0.3 (ALPHA)
- Cleanups.
- Rearranged the code to let aedsp16_init_board be more general.
- Erased the REALLY_SLOW_IO. We don't need it. Erased the linux/io.h
inclusion too. We rely on os.h
- Used the to get a variable
len string (we are not sure about the len of Copyright string).
This works with any SB and compatible.
- Added the code to request_region at device init (should go in
the main body of voxware).
v0.4 (BETA)
- Better configure.c patch for aedsp16 configuration (better
logic of inclusion of AEDSP16 support)
- Modified the conditional compilation to better support more than
one sound card of the emulated type (read the NOTES above)
- Moved the sb init routine from the attach to the very first
probe in sb_card.c
- Rearrangements and cleanups
- Wiped out some unnecessary code and variables: this is kernel
code so it is better save some TEXT and DATA
- Fixed the request_region code. We must allocate the aedsp16 (sbpro)
I/O ports in any case because they are used to access the DSP
configuration registers and we can not allow anyone to get them.
v0.5
- cleanups on comments
- prep for diffs against v3.0-proto-950402
v0.6
- removed the request_region()s when compiling the MODULE sound.o
because we are not allowed (by the actual voxware structure) to
release_region()
v0.7 (pre ALPHA, not distributed)
- started porting this module to kernel 1.3.84. Dummy probe/attach
routines.
v0.8 (ALPHA)
- attached all the init routines.
v0.9 (BETA)
- Integrated with linux-pre2.0.7
- Integrated with configuration scripts.
- Cleaned up and beautyfied the code.
v0.9.9 (BETA)
- Thanks to Piercarlo Grandi: corrected the conditonal compilation code.
Now only the code configured is compiled in, with some memory saving.
v0.9.10
- Integration into the sound/lowlevel/ section of the sound driver.
- Re-organized the code.
v0.9.11 (not distributed)
- Rewritten the init interface-routines to initialize the AEDSP16 in
one shot.
- More cosmetics.
- SC-6600 support.
- More soft/hard configuration.
v0.9.12
- Refined the v0.9.11 code with conditional compilation to distinguish
between SC-6000 and SC-6600 code.
v1.0.0
- Prep for merging with OSS Lite and Linux kernel 2.1.13
- Corrected a bug in request/check/release region calls (thanks to the
new kernel exception handling).
v1.1
- Revamped for integration with new modularized sound drivers: to enhance
the flexibility of modular version, I have removed all the conditional
compilation for SBPRO, MPU and MSS code. Now it is all managed with
the ae_config structure.
v1.2
- Module informations added.
- Removed aedsp16_delay_10msec(), now using mdelay(10)
- All data and funcs moved to .*.init section.
v1.3
Arnaldo Carvalho de Melo <acme@conectiva.com.br> - 2000/09/27
- got rid of check_region
Known Problems:
- Audio Excel DSP 16 III don't work with this driver.
Credits:
Many thanks to Gerald Britton <gbritton@CapAccess.org>. He helped me a
lot in testing the 0.9.11 and 0.9.12 versions of this driver.
*/
#define VERSION "1.3" /* Version of Audio Excel DSP 16 driver */
#undef AEDSP16_DEBUG /* Define this to 1 to enable debug code */
#undef AEDSP16_DEBUG_MORE /* Define this to 1 to enable more debug */
#undef AEDSP16_INFO /* Define this to 1 to enable info code */
#if defined(AEDSP16_DEBUG)
# define DBG(x) printk x
# if defined(AEDSP16_DEBUG_MORE)
# define DBG1(x) printk x
# else
# define DBG1(x)
# endif
#else
# define DBG(x)
# define DBG1(x)
#endif
/*
* Misc definitions
*/
#define TRUE 1
#define FALSE 0
/*
* Region Size for request/check/release region.
*/
#define IOBASE_REGION_SIZE 0x10
/*
* Hardware related defaults
*/
#define DEF_AEDSP16_IOB 0x220 /* 0x220(default) 0x240 */
#define DEF_AEDSP16_IRQ 7 /* 5 7(default) 9 10 11 */
#define DEF_AEDSP16_MRQ 0 /* 5 7 9 10 0(default), 0 means disable */
#define DEF_AEDSP16_DMA 1 /* 0 1(default) 3 */
/*
* Commands of AEDSP16's DSP (SBPRO+special).
* Some of them are COMMAND_xx, in the future they may change.
*/
#define WRITE_MDIRQ_CFG 0x50 /* Set M&I&DRQ mask (the real config) */
#define COMMAND_52 0x52 /* */
#define READ_HARD_CFG 0x58 /* Read Hardware Config (I/O base etc) */
#define COMMAND_5C 0x5c /* */
#define COMMAND_60 0x60 /* */
#define COMMAND_66 0x66 /* */
#define COMMAND_6C 0x6c /* */
#define COMMAND_6E 0x6e /* */
#define COMMAND_88 0x88 /* */
#define DSP_INIT_MSS 0x8c /* Enable Microsoft Sound System mode */
#define COMMAND_C5 0xc5 /* */
#define GET_DSP_VERSION 0xe1 /* Get DSP Version */
#define GET_DSP_COPYRIGHT 0xe3 /* Get DSP Copyright */
/*
* Offsets of AEDSP16 DSP I/O ports. The offset is added to base I/O port
* to have the actual I/O port.
* Register permissions are:
* (wo) == Write Only
* (ro) == Read Only
* (w-) == Write
* (r-) == Read
*/
#define DSP_RESET 0x06 /* offset of DSP RESET (wo) */
#define DSP_READ 0x0a /* offset of DSP READ (ro) */
#define DSP_WRITE 0x0c /* offset of DSP WRITE (w-) */
#define DSP_COMMAND 0x0c /* offset of DSP COMMAND (w-) */
#define DSP_STATUS 0x0c /* offset of DSP STATUS (r-) */
#define DSP_DATAVAIL 0x0e /* offset of DSP DATA AVAILABLE (ro) */
#define RETRY 10 /* Various retry values on I/O opera- */
#define STATUSRETRY 1000 /* tions. Sometimes we have to */
#define HARDRETRY 500000 /* wait for previous cmd to complete */
/*
* Size of character arrays that store name and version of sound card
*/
#define CARDNAMELEN 15 /* Size of the card's name in chars */
#define CARDVERLEN 10 /* Size of the card's version in chars */
#define CARDVERDIGITS 2 /* Number of digits in the version */
#if defined(CONFIG_SC6600)
/*
* Bitmapped flags of hard configuration
*/
/*
* Decode macros (xl == low byte, xh = high byte)
*/
#define IOBASE(xl) ((xl & 0x01)?0x240:0x220)
#define JOY(xl) (xl & 0x02)
#define MPUADDR(xl) ( \
(xl & 0x0C)?0x330: \
(xl & 0x08)?0x320: \
(xl & 0x04)?0x310: \
0x300)
#define WSSADDR(xl) ((xl & 0x10)?0xE80:0x530)
#define CDROM(xh) (xh & 0x20)
#define CDROMADDR(xh) (((xh & 0x1F) << 4) + 0x200)
/*
* Encode macros
*/
#define BLDIOBASE(xl, val) { \
xl &= ~0x01; \
if (val == 0x240) \
xl |= 0x01; \
}
#define BLDJOY(xl, val) { \
xl &= ~0x02; \
if (val == 1) \
xl |= 0x02; \
}
#define BLDMPUADDR(xl, val) { \
xl &= ~0x0C; \
switch (val) { \
case 0x330: \
xl |= 0x0C; \
break; \
case 0x320: \
xl |= 0x08; \
break; \
case 0x310: \
xl |= 0x04; \
break; \
case 0x300: \
xl |= 0x00; \
break; \
default: \
xl |= 0x00; \
break; \
} \
}
#define BLDWSSADDR(xl, val) { \
xl &= ~0x10; \
if (val == 0xE80) \
xl |= 0x10; \
}
#define BLDCDROM(xh, val) { \
xh &= ~0x20; \
if (val == 1) \
xh |= 0x20; \
}
#define BLDCDROMADDR(xh, val) { \
int tmp = val; \
tmp -= 0x200; \
tmp >>= 4; \
tmp &= 0x1F; \
xh |= tmp; \
xh &= 0x7F; \
xh |= 0x40; \
}
#endif /* CONFIG_SC6600 */
/*
* Bit mapped flags for calling aedsp16_init_board(), and saving the current
* emulation mode.
*/
#define INIT_NONE (0 )
#define INIT_SBPRO (1<<0)
#define INIT_MSS (1<<1)
#define INIT_MPU401 (1<<2)
static int soft_cfg __initdata = 0; /* bitmapped config */
static int soft_cfg_mss __initdata = 0; /* bitmapped mss config */
static int ver[CARDVERDIGITS] __initdata = {0, 0}; /* DSP Ver:
hi->ver[0] lo->ver[1] */
#if defined(CONFIG_SC6600)
static int hard_cfg[2] /* lo<-hard_cfg[0] hi<-hard_cfg[1] */
__initdata = { 0, 0};
#endif /* CONFIG_SC6600 */
#if defined(CONFIG_SC6600)
/* Decoded hard configuration */
struct d_hcfg {
int iobase;
int joystick;
int mpubase;
int wssbase;
int cdrom;
int cdrombase;
};
static struct d_hcfg decoded_hcfg __initdata = {0, };
#endif /* CONFIG_SC6600 */
/* orVals contain the values to be or'ed */
struct orVals {
int val; /* irq|mirq|dma */
int or; /* soft_cfg |= TheStruct.or */
};
/* aedsp16_info contain the audio card configuration */
struct aedsp16_info {
int base_io; /* base I/O address for accessing card */
int irq; /* irq value for DSP I/O */
int mpu_irq; /* irq for mpu401 interface I/O */
int dma; /* dma value for DSP I/O */
int mss_base; /* base I/O for Microsoft Sound System */
int mpu_base; /* base I/O for MPU-401 emulation */
int init; /* Initialization status of the card */
};
/*
* Magic values that the DSP will eat when configuring irq/mirq/dma
*/
/* DSP IRQ conversion array */
static struct orVals orIRQ[] __initdata = {
{0x05, 0x28},
{0x07, 0x08},
{0x09, 0x10},
{0x0a, 0x18},
{0x0b, 0x20},
{0x00, 0x00}
};
/* MPU-401 IRQ conversion array */
static struct orVals orMIRQ[] __initdata = {
{0x05, 0x04},
{0x07, 0x44},
{0x09, 0x84},
{0x0a, 0xc4},
{0x00, 0x00}
};
/* DMA Channels conversion array */
static struct orVals orDMA[] __initdata = {
{0x00, 0x01},
{0x01, 0x02},
{0x03, 0x03},
{0x00, 0x00}
};
static struct aedsp16_info ae_config = {
DEF_AEDSP16_IOB,
DEF_AEDSP16_IRQ,
DEF_AEDSP16_MRQ,
DEF_AEDSP16_DMA,
-1,
-1,
INIT_NONE
};
/*
* Buffers to store audio card informations
*/
static char DSPCopyright[CARDNAMELEN + 1] __initdata = {0, };
static char DSPVersion[CARDVERLEN + 1] __initdata = {0, };
static int __init aedsp16_wait_data(int port)
{
int loop = STATUSRETRY;
unsigned char ret = 0;
DBG1(("aedsp16_wait_data (0x%x): ", port));
do {
ret = inb(port + DSP_DATAVAIL);
/*
* Wait for data available (bit 7 of ret == 1)
*/
} while (!(ret & 0x80) && loop--);
if (ret & 0x80) {
DBG1(("success.\n"));
return TRUE;
}
DBG1(("failure.\n"));
return FALSE;
}
static int __init aedsp16_read(int port)
{
int inbyte;
DBG((" Read DSP Byte (0x%x): ", port));
if (aedsp16_wait_data(port) == FALSE) {
DBG(("failure.\n"));
return -1;
}
inbyte = inb(port + DSP_READ);
DBG(("read [0x%x]/{%c}.\n", inbyte, inbyte));
return inbyte;
}
static int __init aedsp16_test_dsp(int port)
{
return ((aedsp16_read(port) == 0xaa) ? TRUE : FALSE);
}
static int __init aedsp16_dsp_reset(int port)
{
/*
* Reset DSP
*/
DBG(("Reset DSP:\n"));
outb(1, (port + DSP_RESET));
udelay(10);
outb(0, (port + DSP_RESET));
udelay(10);
udelay(10);
if (aedsp16_test_dsp(port) == TRUE) {
DBG(("success.\n"));
return TRUE;
} else
DBG(("failure.\n"));
return FALSE;
}
static int __init aedsp16_write(int port, int cmd)
{
unsigned char ret;
int loop = HARDRETRY;
DBG((" Write DSP Byte (0x%x) [0x%x]: ", port, cmd));
do {
ret = inb(port + DSP_STATUS);
/*
* DSP ready to receive data if bit 7 of ret == 0
*/
if (!(ret & 0x80)) {
outb(cmd, port + DSP_COMMAND);
DBG(("success.\n"));
return 0;
}
} while (loop--);
DBG(("timeout.\n"));
printk("[AEDSP16] DSP Command (0x%x) timeout.\n", cmd);
return -1;
}
#if defined(CONFIG_SC6600)
#if defined(AEDSP16_INFO) || defined(AEDSP16_DEBUG)
void __init aedsp16_pinfo(void) {
DBG(("\n Base address: %x\n", decoded_hcfg.iobase));
DBG((" Joystick : %s present\n", decoded_hcfg.joystick?"":" not"));
DBG((" WSS addr : %x\n", decoded_hcfg.wssbase));
DBG((" MPU-401 addr: %x\n", decoded_hcfg.mpubase));
DBG((" CDROM : %s present\n", (decoded_hcfg.cdrom!=4)?"":" not"));
DBG((" CDROMADDR : %x\n\n", decoded_hcfg.cdrombase));
}
#endif
static void __init aedsp16_hard_decode(void) {
DBG((" aedsp16_hard_decode: 0x%x, 0x%x\n", hard_cfg[0], hard_cfg[1]));
/*
* Decode Cfg Bytes.
*/
decoded_hcfg.iobase = IOBASE(hard_cfg[0]);
decoded_hcfg.joystick = JOY(hard_cfg[0]);
decoded_hcfg.wssbase = WSSADDR(hard_cfg[0]);
decoded_hcfg.mpubase = MPUADDR(hard_cfg[0]);
decoded_hcfg.cdrom = CDROM(hard_cfg[1]);
decoded_hcfg.cdrombase = CDROMADDR(hard_cfg[1]);
#if defined(AEDSP16_INFO) || defined(AEDSP16_DEBUG)
printk(" Original sound card configuration:\n");
aedsp16_pinfo();
#endif
/*
* Now set up the real kernel configuration.
*/
decoded_hcfg.iobase = ae_config.base_io;
decoded_hcfg.wssbase = ae_config.mss_base;
decoded_hcfg.mpubase = ae_config.mpu_base;
#if defined(CONFIG_SC6600_JOY)
decoded_hcfg.joystick = CONFIG_SC6600_JOY; /* Enable */
#endif
#if defined(CONFIG_SC6600_CDROM)
decoded_hcfg.cdrom = CONFIG_SC6600_CDROM; /* 4:N-3:I-2:G-1:P-0:S */
#endif
#if defined(CONFIG_SC6600_CDROMBASE)
decoded_hcfg.cdrombase = CONFIG_SC6600_CDROMBASE; /* 0 Disable */
#endif
#if defined(AEDSP16_DEBUG)
DBG((" New Values:\n"));
aedsp16_pinfo();
#endif
DBG(("success.\n"));
}
static void __init aedsp16_hard_encode(void) {
DBG((" aedsp16_hard_encode: 0x%x, 0x%x\n", hard_cfg[0], hard_cfg[1]));
hard_cfg[0] = 0;
hard_cfg[1] = 0;
hard_cfg[0] |= 0x20;
BLDIOBASE (hard_cfg[0], decoded_hcfg.iobase);
BLDWSSADDR(hard_cfg[0], decoded_hcfg.wssbase);
BLDMPUADDR(hard_cfg[0], decoded_hcfg.mpubase);
BLDJOY(hard_cfg[0], decoded_hcfg.joystick);
BLDCDROM(hard_cfg[1], decoded_hcfg.cdrom);
BLDCDROMADDR(hard_cfg[1], decoded_hcfg.cdrombase);
#if defined(AEDSP16_DEBUG)
aedsp16_pinfo();
#endif
DBG((" aedsp16_hard_encode: 0x%x, 0x%x\n", hard_cfg[0], hard_cfg[1]));
DBG(("success.\n"));
}
static int __init aedsp16_hard_write(int port) {
DBG(("aedsp16_hard_write:\n"));
if (aedsp16_write(port, COMMAND_6C)) {
printk("[AEDSP16] CMD 0x%x: failed!\n", COMMAND_6C);
DBG(("failure.\n"));
return FALSE;
}
if (aedsp16_write(port, COMMAND_5C)) {
printk("[AEDSP16] CMD 0x%x: failed!\n", COMMAND_5C);
DBG(("failure.\n"));
return FALSE;
}
if (aedsp16_write(port, hard_cfg[0])) {
printk("[AEDSP16] DATA 0x%x: failed!\n", hard_cfg[0]);
DBG(("failure.\n"));
return FALSE;
}
if (aedsp16_write(port, hard_cfg[1])) {
printk("[AEDSP16] DATA 0x%x: failed!\n", hard_cfg[1]);
DBG(("failure.\n"));
return FALSE;
}
if (aedsp16_write(port, COMMAND_C5)) {
printk("[AEDSP16] CMD 0x%x: failed!\n", COMMAND_C5);
DBG(("failure.\n"));
return FALSE;
}
DBG(("success.\n"));
return TRUE;
}
static int __init aedsp16_hard_read(int port) {
DBG(("aedsp16_hard_read:\n"));
if (aedsp16_write(port, READ_HARD_CFG)) {
printk("[AEDSP16] CMD 0x%x: failed!\n", READ_HARD_CFG);
DBG(("failure.\n"));
return FALSE;
}
if ((hard_cfg[0] = aedsp16_read(port)) == -1) {
printk("[AEDSP16] aedsp16_read after CMD 0x%x: failed\n",
READ_HARD_CFG);
DBG(("failure.\n"));
return FALSE;
}
if ((hard_cfg[1] = aedsp16_read(port)) == -1) {
printk("[AEDSP16] aedsp16_read after CMD 0x%x: failed\n",
READ_HARD_CFG);
DBG(("failure.\n"));
return FALSE;
}
if (aedsp16_read(port) == -1) {
printk("[AEDSP16] aedsp16_read after CMD 0x%x: failed\n",
READ_HARD_CFG);
DBG(("failure.\n"));
return FALSE;
}
DBG(("success.\n"));
return TRUE;
}
static int __init aedsp16_ext_cfg_write(int port) {
int extcfg, val;
if (aedsp16_write(port, COMMAND_66)) {
printk("[AEDSP16] CMD 0x%x: failed!\n", COMMAND_66);
return FALSE;
}
extcfg = 7;
if (decoded_hcfg.cdrom != 2)
extcfg = 0x0F;
if ((decoded_hcfg.cdrom == 4) ||
(decoded_hcfg.cdrom == 3))
extcfg &= ~2;
if (decoded_hcfg.cdrombase == 0)
extcfg &= ~2;
if (decoded_hcfg.mpubase == 0)
extcfg &= ~1;
if (aedsp16_write(port, extcfg)) {
printk("[AEDSP16] Write extcfg: failed!\n");
return FALSE;
}
if (aedsp16_write(port, 0)) {
printk("[AEDSP16] Write extcfg: failed!\n");
return FALSE;
}
if (decoded_hcfg.cdrom == 3) {
if (aedsp16_write(port, COMMAND_52)) {
printk("[AEDSP16] CMD 0x%x: failed!\n", COMMAND_52);
return FALSE;
}
if ((val = aedsp16_read(port)) == -1) {
printk("[AEDSP16] aedsp16_read after CMD 0x%x: failed\n"
, COMMAND_52);
return FALSE;
}
val &= 0x7F;
if (aedsp16_write(port, COMMAND_60)) {
printk("[AEDSP16] CMD 0x%x: failed!\n", COMMAND_60);
return FALSE;
}
if (aedsp16_write(port, val)) {
printk("[AEDSP16] Write val: failed!\n");
return FALSE;
}
}
return TRUE;
}
#endif /* CONFIG_SC6600 */
static int __init aedsp16_cfg_write(int port) {
if (aedsp16_write(port, WRITE_MDIRQ_CFG)) {
printk("[AEDSP16] CMD 0x%x: failed!\n", WRITE_MDIRQ_CFG);
return FALSE;
}
if (aedsp16_write(port, soft_cfg)) {
printk("[AEDSP16] Initialization of (M)IRQ and DMA: failed!\n");
return FALSE;
}
return TRUE;
}
static int __init aedsp16_init_mss(int port)
{
DBG(("aedsp16_init_mss:\n"));
mdelay(10);
if (aedsp16_write(port, DSP_INIT_MSS)) {
printk("[AEDSP16] aedsp16_init_mss [0x%x]: failed!\n",
DSP_INIT_MSS);
DBG(("failure.\n"));
return FALSE;
}
mdelay(10);
if (aedsp16_cfg_write(port) == FALSE)
return FALSE;
outb(soft_cfg_mss, ae_config.mss_base);
DBG(("success.\n"));
return TRUE;
}
static int __init aedsp16_setup_board(int port) {
int loop = RETRY;
#if defined(CONFIG_SC6600)
int val = 0;
if (aedsp16_hard_read(port) == FALSE) {
printk("[AEDSP16] aedsp16_hard_read: failed!\n");
return FALSE;
}
if (aedsp16_write(port, COMMAND_52)) {
printk("[AEDSP16] CMD 0x%x: failed!\n", COMMAND_52);
return FALSE;
}
if ((val = aedsp16_read(port)) == -1) {
printk("[AEDSP16] aedsp16_read after CMD 0x%x: failed\n",
COMMAND_52);
return FALSE;
}
#endif
do {
if (aedsp16_write(port, COMMAND_88)) {
printk("[AEDSP16] CMD 0x%x: failed!\n", COMMAND_88);
return FALSE;
}
mdelay(10);
} while ((aedsp16_wait_data(port) == FALSE) && loop--);
if (aedsp16_read(port) == -1) {
printk("[AEDSP16] aedsp16_read after CMD 0x%x: failed\n",
COMMAND_88);
return FALSE;
}
#if !defined(CONFIG_SC6600)
if (aedsp16_write(port, COMMAND_5C)) {
printk("[AEDSP16] CMD 0x%x: failed!\n", COMMAND_5C);
return FALSE;
}
#endif
if (aedsp16_cfg_write(port) == FALSE)
return FALSE;
#if defined(CONFIG_SC6600)
if (aedsp16_write(port, COMMAND_60)) {
printk("[AEDSP16] CMD 0x%x: failed!\n", COMMAND_60);
return FALSE;
}
if (aedsp16_write(port, val)) {
printk("[AEDSP16] DATA 0x%x: failed!\n", val);
return FALSE;
}
if (aedsp16_write(port, COMMAND_6E)) {
printk("[AEDSP16] CMD 0x%x: failed!\n", COMMAND_6E);
return FALSE;
}
if (aedsp16_write(port, ver[0])) {
printk("[AEDSP16] DATA 0x%x: failed!\n", ver[0]);
return FALSE;
}
if (aedsp16_write(port, ver[1])) {
printk("[AEDSP16] DATA 0x%x: failed!\n", ver[1]);
return FALSE;
}
if (aedsp16_hard_write(port) == FALSE) {
printk("[AEDSP16] aedsp16_hard_write: failed!\n");
return FALSE;
}
if (aedsp16_write(port, COMMAND_5C)) {
printk("[AEDSP16] CMD 0x%x: failed!\n", COMMAND_5C);
return FALSE;
}
#if defined(THIS_IS_A_THING_I_HAVE_NOT_TESTED_YET)
if (aedsp16_cfg_write(port) == FALSE)
return FALSE;
#endif
#endif
return TRUE;
}
static int __init aedsp16_stdcfg(int port) {
if (aedsp16_write(port, WRITE_MDIRQ_CFG)) {
printk("[AEDSP16] CMD 0x%x: failed!\n", WRITE_MDIRQ_CFG);
return FALSE;
}
/*
* 0x0A == (IRQ 7, DMA 1, MIRQ 0)
*/
if (aedsp16_write(port, 0x0A)) {
printk("[AEDSP16] aedsp16_stdcfg: failed!\n");
return FALSE;
}
return TRUE;
}
static int __init aedsp16_dsp_version(int port)
{
int len = 0;
int ret;
DBG(("Get DSP Version:\n"));
if (aedsp16_write(ae_config.base_io, GET_DSP_VERSION)) {
printk("[AEDSP16] CMD 0x%x: failed!\n", GET_DSP_VERSION);
DBG(("failed.\n"));
return FALSE;
}
do {
if ((ret = aedsp16_read(port)) == -1) {
DBG(("failed.\n"));
return FALSE;
}
/*
* We already know how many int are stored (2), so we know when the
* string is finished.
*/
ver[len++] = ret;
} while (len < CARDVERDIGITS);
sprintf(DSPVersion, "%d.%d", ver[0], ver[1]);
DBG(("success.\n"));
return TRUE;
}
static int __init aedsp16_dsp_copyright(int port)
{
int len = 0;
int ret;
DBG(("Get DSP Copyright:\n"));
if (aedsp16_write(ae_config.base_io, GET_DSP_COPYRIGHT)) {
printk("[AEDSP16] CMD 0x%x: failed!\n", GET_DSP_COPYRIGHT);
DBG(("failed.\n"));
return FALSE;
}
do {
if ((ret = aedsp16_read(port)) == -1) {
/*
* If no more data available, return to the caller, no error if len>0.
* We have no other way to know when the string is finished.
*/
if (len)
break;
else {
DBG(("failed.\n"));
return FALSE;
}
}
DSPCopyright[len++] = ret;
} while (len < CARDNAMELEN);
DBG(("success.\n"));
return TRUE;
}
static void __init aedsp16_init_tables(void)
{
int i = 0;
memset(DSPCopyright, 0, CARDNAMELEN + 1);
memset(DSPVersion, 0, CARDVERLEN + 1);
for (i = 0; orIRQ[i].or; i++)
if (orIRQ[i].val == ae_config.irq) {
soft_cfg |= orIRQ[i].or;
soft_cfg_mss |= orIRQ[i].or;
}
for (i = 0; orMIRQ[i].or; i++)
if (orMIRQ[i].or == ae_config.mpu_irq)
soft_cfg |= orMIRQ[i].or;
for (i = 0; orDMA[i].or; i++)
if (orDMA[i].val == ae_config.dma) {
soft_cfg |= orDMA[i].or;
soft_cfg_mss |= orDMA[i].or;
}
}
static int __init aedsp16_init_board(void)
{
aedsp16_init_tables();
if (aedsp16_dsp_reset(ae_config.base_io) == FALSE) {
printk("[AEDSP16] aedsp16_dsp_reset: failed!\n");
return FALSE;
}
if (aedsp16_dsp_copyright(ae_config.base_io) == FALSE) {
printk("[AEDSP16] aedsp16_dsp_copyright: failed!\n");
return FALSE;
}
/*
* My AEDSP16 card return SC-6000 in DSPCopyright, so
* if we have something different, we have to be warned.
*/
if (strcmp("SC-6000", DSPCopyright))
printk("[AEDSP16] Warning: non SC-6000 audio card!\n");
if (aedsp16_dsp_version(ae_config.base_io) == FALSE) {
printk("[AEDSP16] aedsp16_dsp_version: failed!\n");
return FALSE;
}
if (aedsp16_stdcfg(ae_config.base_io) == FALSE) {
printk("[AEDSP16] aedsp16_stdcfg: failed!\n");
return FALSE;
}
#if defined(CONFIG_SC6600)
if (aedsp16_hard_read(ae_config.base_io) == FALSE) {
printk("[AEDSP16] aedsp16_hard_read: failed!\n");
return FALSE;
}
aedsp16_hard_decode();
aedsp16_hard_encode();
if (aedsp16_hard_write(ae_config.base_io) == FALSE) {
printk("[AEDSP16] aedsp16_hard_write: failed!\n");
return FALSE;
}
if (aedsp16_ext_cfg_write(ae_config.base_io) == FALSE) {
printk("[AEDSP16] aedsp16_ext_cfg_write: failed!\n");
return FALSE;
}
#endif /* CONFIG_SC6600 */
if (aedsp16_setup_board(ae_config.base_io) == FALSE) {
printk("[AEDSP16] aedsp16_setup_board: failed!\n");
return FALSE;
}
if (ae_config.mss_base != -1) {
if (ae_config.init & INIT_MSS) {
if (aedsp16_init_mss(ae_config.base_io) == FALSE) {
printk("[AEDSP16] Can not initialize"
"Microsoft Sound System mode.\n");
return FALSE;
}
}
}
#if !defined(MODULE) || defined(AEDSP16_INFO) || defined(AEDSP16_DEBUG)
printk("Audio Excel DSP 16 init v%s (%s %s) [",
VERSION, DSPCopyright,
DSPVersion);
if (ae_config.mpu_base != -1) {
if (ae_config.init & INIT_MPU401) {
printk("MPU401");
if ((ae_config.init & INIT_MSS) ||
(ae_config.init & INIT_SBPRO))
printk(" ");
}
}
if (ae_config.mss_base == -1) {
if (ae_config.init & INIT_SBPRO) {
printk("SBPro");
if (ae_config.init & INIT_MSS)
printk(" ");
}
}
if (ae_config.mss_base != -1)
if (ae_config.init & INIT_MSS)
printk("MSS");
printk("]\n");
#endif /* MODULE || AEDSP16_INFO || AEDSP16_DEBUG */
mdelay(10);
return TRUE;
}
static int __init init_aedsp16_sb(void)
{
DBG(("init_aedsp16_sb: "));
/*
* If the card is already init'ed MSS, we can not init it to SBPRO too
* because the board can not emulate simultaneously MSS and SBPRO.
*/
if (ae_config.init & INIT_MSS)
return FALSE;
if (ae_config.init & INIT_SBPRO)
return FALSE;
ae_config.init |= INIT_SBPRO;
DBG(("done.\n"));
return TRUE;
}
static void uninit_aedsp16_sb(void)
{
DBG(("uninit_aedsp16_sb: "));
ae_config.init &= ~INIT_SBPRO;
DBG(("done.\n"));
}
static int __init init_aedsp16_mss(void)
{
DBG(("init_aedsp16_mss: "));
/*
* If the card is already init'ed SBPRO, we can not init it to MSS too
* because the board can not emulate simultaneously MSS and SBPRO.
*/
if (ae_config.init & INIT_SBPRO)
return FALSE;
if (ae_config.init & INIT_MSS)
return FALSE;
/*
* We must allocate the CONFIG_AEDSP16_BASE region too because these are the
* I/O ports to access card's control registers.
*/
if (!(ae_config.init & INIT_MPU401)) {
if (!request_region(ae_config.base_io, IOBASE_REGION_SIZE,
"aedsp16 (base)")) {
printk(
"AEDSP16 BASE I/O port region is already in use.\n");
return FALSE;
}
}
ae_config.init |= INIT_MSS;
DBG(("done.\n"));
return TRUE;
}
static void uninit_aedsp16_mss(void)
{
DBG(("uninit_aedsp16_mss: "));
if ((!(ae_config.init & INIT_MPU401)) &&
(ae_config.init & INIT_MSS)) {
release_region(ae_config.base_io, IOBASE_REGION_SIZE);
DBG(("AEDSP16 base region released.\n"));
}
ae_config.init &= ~INIT_MSS;
DBG(("done.\n"));
}
static int __init init_aedsp16_mpu(void)
{
DBG(("init_aedsp16_mpu: "));
if (ae_config.init & INIT_MPU401)
return FALSE;
/*
* We must request the CONFIG_AEDSP16_BASE region too because these are the I/O
* ports to access card's control registers.
*/
if (!(ae_config.init & (INIT_MSS | INIT_SBPRO))) {
if (!request_region(ae_config.base_io, IOBASE_REGION_SIZE,
"aedsp16 (base)")) {
printk(
"AEDSP16 BASE I/O port region is already in use.\n");
return FALSE;
}
}
ae_config.init |= INIT_MPU401;
DBG(("done.\n"));
return TRUE;
}
static void uninit_aedsp16_mpu(void)
{
DBG(("uninit_aedsp16_mpu: "));
if ((!(ae_config.init & (INIT_MSS | INIT_SBPRO))) &&
(ae_config.init & INIT_MPU401)) {
release_region(ae_config.base_io, IOBASE_REGION_SIZE);
DBG(("AEDSP16 base region released.\n"));
}
ae_config.init &= ~INIT_MPU401;
DBG(("done.\n"));
}
static int __init init_aedsp16(void)
{
int initialized = FALSE;
DBG(("Initializing BASE[0x%x] IRQ[%d] DMA[%d] MIRQ[%d]\n",
ae_config.base_io,ae_config.irq,ae_config.dma,ae_config.mpu_irq));
if (ae_config.mss_base == -1) {
if (init_aedsp16_sb() == FALSE) {
uninit_aedsp16_sb();
} else {
initialized = TRUE;
}
}
if (ae_config.mpu_base != -1) {
if (init_aedsp16_mpu() == FALSE) {
uninit_aedsp16_mpu();
} else {
initialized = TRUE;
}
}
/*
* In the sequence of init routines, the MSS init MUST be the last!
* This because of the special register programming the MSS mode needs.
* A board reset would disable the MSS mode restoring the default SBPRO
* mode.
*/
if (ae_config.mss_base != -1) {
if (init_aedsp16_mss() == FALSE) {
uninit_aedsp16_mss();
} else {
initialized = TRUE;
}
}
if (initialized)
initialized = aedsp16_init_board();
return initialized;
}
static void __exit uninit_aedsp16(void)
{
if (ae_config.mss_base != -1)
uninit_aedsp16_mss();
else
uninit_aedsp16_sb();
if (ae_config.mpu_base != -1)
uninit_aedsp16_mpu();
}
static int __initdata io = -1;
static int __initdata irq = -1;
static int __initdata dma = -1;
static int __initdata mpu_irq = -1;
static int __initdata mss_base = -1;
static int __initdata mpu_base = -1;
module_param(io, int, 0);
MODULE_PARM_DESC(io, "I/O base address (0x220 0x240)");
module_param(irq, int, 0);
MODULE_PARM_DESC(irq, "IRQ line (5 7 9 10 11)");
module_param(dma, int, 0);
MODULE_PARM_DESC(dma, "dma line (0 1 3)");
module_param(mpu_irq, int, 0);
MODULE_PARM_DESC(mpu_irq, "MPU-401 IRQ line (5 7 9 10 0)");
module_param(mss_base, int, 0);
MODULE_PARM_DESC(mss_base, "MSS emulation I/O base address (0x530 0xE80)");
module_param(mpu_base, int, 0);
MODULE_PARM_DESC(mpu_base,"MPU-401 I/O base address (0x300 0x310 0x320 0x330)");
MODULE_AUTHOR("Riccardo Facchetti <fizban@tin.it>");
MODULE_DESCRIPTION("Audio Excel DSP 16 Driver Version " VERSION);
MODULE_LICENSE("GPL");
static int __init do_init_aedsp16(void) {
printk("Audio Excel DSP 16 init driver Copyright (C) Riccardo Facchetti 1995-98\n");
if (io == -1 || dma == -1 || irq == -1) {
printk(KERN_INFO "aedsp16: I/O, IRQ and DMA are mandatory\n");
return -EINVAL;
}
ae_config.base_io = io;
ae_config.irq = irq;
ae_config.dma = dma;
ae_config.mss_base = mss_base;
ae_config.mpu_base = mpu_base;
ae_config.mpu_irq = mpu_irq;
if (init_aedsp16() == FALSE) {
printk(KERN_ERR "aedsp16: initialization failed\n");
/*
* XXX
* What error should we return here ?
*/
return -EINVAL;
}
return 0;
}
static void __exit cleanup_aedsp16(void) {
uninit_aedsp16();
}
module_init(do_init_aedsp16);
module_exit(cleanup_aedsp16);
#ifndef MODULE
static int __init setup_aedsp16(char *str)
{
/* io, irq, dma, mss_io, mpu_io, mpu_irq */
int ints[7];
str = get_options(str, ARRAY_SIZE(ints), ints);
io = ints[1];
irq = ints[2];
dma = ints[3];
mss_base = ints[4];
mpu_base = ints[5];
mpu_irq = ints[6];
return 1;
}
__setup("aedsp16=", setup_aedsp16);
#endif
| gpl-2.0 |
mati865/vlc | modules/gui/skins2/win32/win32_graphics.cpp | 87 | 12047 | /*****************************************************************************
* win32_graphics.cpp
*****************************************************************************
* Copyright (C) 2003 the VideoLAN team
* $Id$
*
* Authors: Cyril Deguet <asmax@via.ecp.fr>
* Olivier Teulière <ipkiss@via.ecp.fr>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301, USA.
*****************************************************************************/
#ifdef WIN32_SKINS
#define WINVER 0x500
#include "win32_factory.hpp"
#include "win32_graphics.hpp"
#include "win32_window.hpp"
#include "../src/generic_bitmap.hpp"
#ifndef AC_SRC_ALPHA
#define AC_SRC_ALPHA 1
#endif
Win32Graphics::Win32Graphics( intf_thread_t *pIntf, int width, int height ):
OSGraphics( pIntf ), m_width( width ), m_height( height ), m_hDC( NULL )
{
HBITMAP hBmp;
HDC hDC = GetDC( NULL );
hBmp = CreateCompatibleBitmap( hDC, m_width, m_height );
ReleaseDC( NULL, hDC );
m_hDC = CreateCompatibleDC( NULL );
SelectObject( m_hDC, hBmp );
DeleteObject( hBmp );
// Create the mask
m_mask = CreateRectRgn( 0, 0, 0, 0 );
}
Win32Graphics::~Win32Graphics()
{
DeleteDC( m_hDC );
DeleteObject( m_mask );
}
void Win32Graphics::clear( int xDest, int yDest, int width, int height )
{
if( width <= 0 || height <= 0 )
{
// Clear the transparency mask
DeleteObject( m_mask );
m_mask = CreateRectRgn( 0, 0, 0, 0 );
}
else
{
HRGN mask = CreateRectRgn( xDest, yDest,
xDest + width, yDest + height );
CombineRgn( m_mask, m_mask, mask, RGN_DIFF );
DeleteObject( mask );
}
}
void Win32Graphics::drawBitmap( const GenericBitmap &rBitmap,
int xSrc, int ySrc, int xDest, int yDest,
int width, int height, bool blend )
{
(void)blend;
// check and adapt to source if needed
if( !checkBoundaries( 0, 0, rBitmap.getWidth(), rBitmap.getHeight(),
xSrc, ySrc, width, height ) )
{
msg_Err( getIntf(), "empty source! pls, debug your skin" );
return;
}
// check destination
if( !checkBoundaries( 0, 0, m_width, m_height,
xDest, yDest, width, height ) )
{
msg_Err( getIntf(), "out of reach destination! pls, debug your skin" );
return;
}
// Get a buffer on the image data
uint8_t *pBmpData = rBitmap.getData();
if( pBmpData == NULL )
{
// Nothing to draw
return;
}
void *pBits; // pointer to DIB section
// Fill a BITMAPINFO structure
BITMAPINFO bmpInfo;
memset( &bmpInfo, 0, sizeof( bmpInfo ) );
bmpInfo.bmiHeader.biSize = sizeof( BITMAPINFOHEADER );
bmpInfo.bmiHeader.biWidth = width;
bmpInfo.bmiHeader.biHeight = -height;
bmpInfo.bmiHeader.biPlanes = 1;
bmpInfo.bmiHeader.biBitCount = 32;
bmpInfo.bmiHeader.biCompression = BI_RGB;
bmpInfo.bmiHeader.biSizeImage = width * height * 4;
// Create a DIB (Device Independent Bitmap) and associate it with
// a temporary DC
HDC hDC = CreateCompatibleDC( m_hDC );
HBITMAP hBmp = CreateDIBSection( hDC, &bmpInfo, DIB_RGB_COLORS,
&pBits, NULL, 0 );
SelectObject( hDC, hBmp );
// Mask for transparency
HRGN mask = CreateRectRgn( 0, 0, 0, 0 );
// Skip the first lines of the image
pBmpData += 4 * ySrc * rBitmap.getWidth();
// Copy the bitmap on the image and compute the mask
for( int y = 0; y < height; y++ )
{
// Skip uninteresting bytes at the beginning of the line
pBmpData += 4 * xSrc;
// Flag to say whether the previous pixel on the line was visible
bool wasVisible = false;
// Beginning of the current visible segment on the line
int visibleSegmentStart = 0;
for( int x = 0; x < width; x++ )
{
uint8_t b = *(pBmpData++);
uint8_t g = *(pBmpData++);
uint8_t r = *(pBmpData++);
uint8_t a = *(pBmpData++);
// Draw the pixel
((UINT32 *)pBits)[x + y * width] =
(a << 24) | (r << 16) | (g << 8) | b;
if( a > 0 )
{
// Pixel is visible
if( ! wasVisible )
{
// Beginning of a visible segment
visibleSegmentStart = x;
}
wasVisible = true;
}
else
{
// Pixel is transparent
if( wasVisible )
{
// End of a visible segment: add it to the mask
addSegmentInRegion( mask, visibleSegmentStart, x, y );
}
wasVisible = false;
}
}
if( wasVisible )
{
// End of a visible segment: add it to the mask
addSegmentInRegion( mask, visibleSegmentStart, width, y );
}
// Skip uninteresting bytes at the end of the line
pBmpData += 4 * (rBitmap.getWidth() - width - xSrc);
}
// Apply the mask to the internal DC
OffsetRgn( mask, xDest, yDest );
SelectClipRgn( m_hDC, mask );
BLENDFUNCTION bf; // structure for alpha blending
bf.BlendOp = AC_SRC_OVER;
bf.BlendFlags = 0;
bf.SourceConstantAlpha = 0xff; // don't use constant alpha
bf.AlphaFormat = AC_SRC_ALPHA;
// Blend the image onto the internal DC
if( !AlphaBlend( m_hDC, xDest, yDest, width, height, hDC, 0, 0,
width, height, bf ) )
{
msg_Err( getIntf(), "AlphaBlend() failed" );
}
// Add the bitmap mask to the global graphics mask
CombineRgn( m_mask, m_mask, mask, RGN_OR );
// Do cleanup
DeleteObject( hBmp );
DeleteObject( mask );
DeleteDC( hDC );
}
void Win32Graphics::drawGraphics( const OSGraphics &rGraphics, int xSrc,
int ySrc, int xDest, int yDest, int width,
int height )
{
// check and adapt to source if needed
if( !checkBoundaries( 0, 0, rGraphics.getWidth(), rGraphics.getHeight(),
xSrc, ySrc, width, height ) )
{
msg_Err( getIntf(), "nothing to draw from graphics source" );
return;
}
// check destination
if( !checkBoundaries( 0, 0, m_width, m_height,
xDest, yDest, width, height ) )
{
msg_Err( getIntf(), "out of reach destination! pls, debug your skin" );
return;
}
// Create the mask for transparency
HRGN mask = CreateRectRgn( xSrc, ySrc, xSrc + width, ySrc + height );
CombineRgn( mask, ((Win32Graphics&)rGraphics).getMask(), mask, RGN_AND );
OffsetRgn( mask, xDest - xSrc, yDest - ySrc );
// Copy the image
HDC srcDC = ((Win32Graphics&)rGraphics).getDC();
SelectClipRgn( m_hDC, mask );
BitBlt( m_hDC, xDest, yDest, width, height, srcDC, xSrc, ySrc, SRCCOPY );
// Add the source mask to the mask of the graphics
CombineRgn( m_mask, mask, m_mask, RGN_OR );
DeleteObject( mask );
}
void Win32Graphics::fillRect( int left, int top, int width, int height,
uint32_t color )
{
// Update the mask with the rectangle area
HRGN newMask = CreateRectRgn( left, top, left + width, top + height );
CombineRgn( m_mask, m_mask, newMask, RGN_OR );
SelectClipRgn( m_hDC, m_mask );
DeleteObject( newMask );
// Create a brush with the color
int red = (color & 0xff0000) >> 16;
int green = (color & 0xff00) >> 8;
int blue = color & 0xff;
HBRUSH hBrush = CreateSolidBrush( RGB( red, green, blue ) );
// Draw the rectangle
RECT r;
r.left = left;
r.top = top;
r.right = left + width;
r.bottom = top + height;
FillRect( m_hDC, &r, hBrush );
DeleteObject( hBrush );
}
void Win32Graphics::drawRect( int left, int top, int width, int height,
uint32_t color )
{
// Update the mask with the rectangle
HRGN l1 = CreateRectRgn( left, top, left + width, top + 1 );
HRGN l2 = CreateRectRgn( left + width - 1, top,
left + width, top + height );
HRGN l3 = CreateRectRgn( left, top + height - 1,
left + width, top + height );
HRGN l4 = CreateRectRgn( left, top, left + 1, top + height );
CombineRgn( m_mask, m_mask, l1, RGN_OR );
CombineRgn( m_mask, m_mask, l2, RGN_OR );
CombineRgn( m_mask, m_mask, l3, RGN_OR );
CombineRgn( m_mask, m_mask, l4, RGN_OR );
DeleteObject( l1 );
DeleteObject( l2 );
DeleteObject( l3 );
DeleteObject( l4 );
SelectClipRgn( m_hDC, m_mask );
// Create a pen with the color
int red = (color & 0xff0000) >> 16;
int green = (color & 0xff00) >> 8;
int blue = color & 0xff;
HPEN hPen = CreatePen( PS_SOLID, 0, RGB( red, green, blue ) );
SelectObject( m_hDC, hPen );
// Draw the rectangle
MoveToEx( m_hDC, left, top, NULL );
LineTo( m_hDC, left + width - 1, top );
LineTo( m_hDC, left + width - 1, top + height - 1 );
LineTo( m_hDC, left, top + height - 1 );
LineTo( m_hDC, left, top );
// Delete the pen
DeleteObject( hPen );
}
void Win32Graphics::applyMaskToWindow( OSWindow &rWindow )
{
// Get window handle
HWND hWnd = ((Win32Window&)rWindow).getHandle();
// Apply the mask
// We need to copy the mask, because SetWindowRgn modifies it in our back
HRGN mask = CreateRectRgn( 0, 0, 0, 0 );
CombineRgn( mask, m_mask, NULL, RGN_COPY );
SetWindowRgn( hWnd, mask, TRUE );
}
void Win32Graphics::copyToWindow( OSWindow &rWindow, int xSrc, int ySrc,
int width, int height, int xDest, int yDest )
{
// Initialize painting
HWND hWnd = ((Win32Window&)rWindow).getHandle();
HDC wndDC = GetDC( hWnd );
HDC srcDC = m_hDC;
// Draw image on window
BitBlt( wndDC, xDest, yDest, width, height, srcDC, xSrc, ySrc, SRCCOPY );
// Release window device context
ReleaseDC( hWnd, wndDC );
}
bool Win32Graphics::hit( int x, int y ) const
{
return PtInRegion( m_mask, x, y ) != 0;
}
void Win32Graphics::addSegmentInRegion( HRGN &rMask, int start,
int end, int line )
{
HRGN buffer = CreateRectRgn( start, line, end, line + 1 );
CombineRgn( rMask, buffer, rMask, RGN_OR );
DeleteObject( buffer );
}
bool Win32Graphics::checkBoundaries( int x_src, int y_src,
int w_src, int h_src,
int& x_target, int& y_target,
int& w_target, int& h_target )
{
// set valid width and height
w_target = (w_target > 0) ? w_target : w_src;
h_target = (h_target > 0) ? h_target : h_src;
// clip source if needed
rect srcRegion( x_src, y_src, w_src, h_src );
rect targetRegion( x_target, y_target, w_target, h_target );
rect inter;
if( rect::intersect( srcRegion, targetRegion, &inter ) )
{
x_target = inter.x;
y_target = inter.y;
w_target = inter.width;
h_target = inter.height;
return true;
}
return false;
}
#endif
| gpl-2.0 |
kbauskar/percona-server | storage/ndb/test/src/NdbRestarts.cpp | 87 | 25090 | /*
Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <NdbRestarts.hpp>
#include <NDBT.hpp>
#include <string.h>
#include <NdbSleep.h>
#include <kernel/ndb_limits.h>
#include <signaldata/DumpStateOrd.hpp>
#include <NdbEnv.h>
#include <NDBT_Test.hpp>
#define F_ARGS NDBT_Context* ctx, NdbRestarter& _restarter, const NdbRestarts::NdbRestart* _restart
int restartRandomNodeGraceful(F_ARGS);
int restartRandomNodeAbort(F_ARGS);
int restartRandomNodeError(F_ARGS);
int restartRandomNodeInitial(F_ARGS);
int restartNFDuringNR(F_ARGS);
int restartMasterNodeError(F_ARGS);
int twoNodeFailure(F_ARGS);
int fiftyPercentFail(F_ARGS);
int twoMasterNodeFailure(F_ARGS);
int restartAllNodesGracfeul(F_ARGS);
int restartAllNodesAbort(F_ARGS);
int restartAllNodesError9999(F_ARGS);
int fiftyPercentStopAndWait(F_ARGS);
int restartNodeDuringLCP(F_ARGS);
int stopOnError(F_ARGS);
int getRandomNodeId(NdbRestarter& _restarter);
/**
* Define list of restarts
* - name of restart
* - function perfoming the restart
* - required number of nodes
* - ...
* - arg1, used depending of restart
* - arg2, used depending of restart
*/
const NdbRestarts::NdbRestart NdbRestarts::m_restarts[] = {
/*********************************************************
*
* NODE RESTARTS with 1 node restarted
*
*********************************************************/
/**
* Restart a randomly selected node
* with graceful shutdown
*/
NdbRestart("RestartRandomNode",
NODE_RESTART,
restartRandomNodeGraceful,
2),
/**
* Restart a randomly selected node
* with immediate(abort) shutdown
*/
NdbRestart("RestartRandomNodeAbort",
NODE_RESTART,
restartRandomNodeAbort,
2),
/**
* Restart a randomly selected node
* with error insert
*
*/
NdbRestart("RestartRandomNodeError",
NODE_RESTART,
restartRandomNodeError,
2),
/**
* Restart the master node
* with error insert
*/
NdbRestart("RestartMasterNodeError",
NODE_RESTART,
restartMasterNodeError,
2),
/**
* Restart a randomly selected node without fileystem
*
*/
NdbRestart("RestartRandomNodeInitial",
NODE_RESTART,
restartRandomNodeInitial,
2),
/**
* Restart a randomly selected node and then
* crash it while restarting
*
*/
NdbRestart("RestartNFDuringNR",
NODE_RESTART,
restartNFDuringNR,
2),
/**
* Set StopOnError and crash the node by sending
* SYSTEM_ERROR to it
*
*/
NdbRestart("StopOnError",
NODE_RESTART,
stopOnError,
1),
/*********************************************************
*
* MULTIPLE NODE RESTARTS with more than 1 node
*
*********************************************************/
/**
* 2 nodes restart, select nodes to restart randomly and restart
* with a small random delay between restarts
*/
NdbRestart("TwoNodeFailure",
MULTIPLE_NODE_RESTART,
twoNodeFailure,
4),
/**
* 2 nodes restart, select master nodes and restart with
* a small random delay between restarts
*/
NdbRestart("TwoMasterNodeFailure",
MULTIPLE_NODE_RESTART,
twoMasterNodeFailure,
4),
NdbRestart("FiftyPercentFail",
MULTIPLE_NODE_RESTART,
fiftyPercentFail,
2),
/*********************************************************
*
* SYSTEM RESTARTS
*
*********************************************************/
/**
* Restart all nodes with graceful shutdown
*
*/
NdbRestart("RestartAllNodes",
SYSTEM_RESTART,
restartAllNodesGracfeul,
1),
/**
* Restart all nodes immediately without
* graful shutdown
*/
NdbRestart("RestartAllNodesAbort",
SYSTEM_RESTART,
restartAllNodesAbort,
1),
/**
* Restart all nodes with error insert 9999
* TODO! We can later add more errors like 9998, 9997 etc.
*/
NdbRestart("RestartAllNodesError9999",
SYSTEM_RESTART,
restartAllNodesError9999,
1),
/**
* Stop 50% of all nodes with error insert 9999
* Wait for a random number of minutes
* Stop the rest of the nodes and then start all again
*/
NdbRestart("FiftyPercentStopAndWait",
SYSTEM_RESTART,
fiftyPercentStopAndWait,
2),
/**
* Restart a master node during LCP with error inserts.
*/
NdbRestart("RestartNodeDuringLCP",
NODE_RESTART,
restartNodeDuringLCP,
2),
};
const int NdbRestarts::m_NoOfRestarts = sizeof(m_restarts) / sizeof(NdbRestart);
const NdbRestarts::NdbErrorInsert NdbRestarts::m_errors[] = {
NdbErrorInsert("Error9999", 9999)
};
const int NdbRestarts::m_NoOfErrors = sizeof(m_errors) / sizeof(NdbErrorInsert);
NdbRestarts::NdbRestart::NdbRestart(const char* _name,
NdbRestartType _type,
restartFunc* _func,
int _requiredNodes,
int _arg1){
m_name = _name;
m_type = _type;
m_restartFunc = _func;
m_numRequiredNodes = _requiredNodes;
// m_arg1 = arg1;
}
int NdbRestarts::getNumRestarts(){
return m_NoOfRestarts;
}
const NdbRestarts::NdbRestart* NdbRestarts::getRestart(int _num){
if (_num >= m_NoOfRestarts)
return NULL;
return &m_restarts[_num];
}
const NdbRestarts::NdbRestart* NdbRestarts::getRestart(const char* _name){
for(int i = 0; i < m_NoOfRestarts; i++){
if (strcmp(m_restarts[i].m_name, _name) == 0){
return &m_restarts[i];
}
}
g_err << "The restart \""<< _name << "\" not found in NdbRestarts" << endl;
return NULL;
}
int NdbRestarts::executeRestart(NDBT_Context* ctx,
const NdbRestarts::NdbRestart* _restart,
unsigned int _timeout){
// Check that there are enough nodes in the cluster
// for this test
NdbRestarter restarter;
if (_restart->m_numRequiredNodes > restarter.getNumDbNodes()){
g_err << "This test requires " << _restart->m_numRequiredNodes << " nodes "
<< "there are only "<< restarter.getNumDbNodes() <<" nodes in cluster"
<< endl;
return NDBT_OK;
}
if (restarter.waitClusterStarted(120) != 0){
// If cluster is not started when we shall peform restart
// the restart can not be executed and the test fails
return NDBT_FAILED;
}
int res = _restart->m_restartFunc(ctx, restarter, _restart);
// Sleep a little waiting for nodes to react to command
NdbSleep_SecSleep(2);
if (_timeout == 0){
// If timeout == 0 wait for ever
while(restarter.waitClusterStarted(60) != 0)
g_err << "Cluster is not started after restart. Waiting 60s more..."
<< endl;
} else {
if (restarter.waitClusterStarted(_timeout) != 0){
g_err<<"Cluster failed to start" << endl;
res = NDBT_FAILED;
}
}
return res;
}
int NdbRestarts::executeRestart(NDBT_Context* ctx,
int _num,
unsigned int _timeout){
const NdbRestarts::NdbRestart* r = getRestart(_num);
if (r == NULL)
return NDBT_FAILED;
int res = executeRestart(ctx, r, _timeout);
return res;
}
int NdbRestarts::executeRestart(NDBT_Context* ctx,
const char* _name,
unsigned int _timeout){
const NdbRestarts::NdbRestart* r = getRestart(_name);
if (r == NULL)
return NDBT_FAILED;
int res = executeRestart(ctx, r, _timeout);
return res;
}
void NdbRestarts::listRestarts(NdbRestartType _type){
for(int i = 0; i < m_NoOfRestarts; i++){
if (m_restarts[i].m_type == _type)
ndbout << " " << m_restarts[i].m_name << ", min "
<< m_restarts[i].m_numRequiredNodes
<< " nodes"<< endl;
}
}
void NdbRestarts::listRestarts(){
ndbout << "NODE RESTARTS" << endl;
listRestarts(NODE_RESTART);
ndbout << "MULTIPLE NODE RESTARTS" << endl;
listRestarts(MULTIPLE_NODE_RESTART);
ndbout << "SYSTEM RESTARTS" << endl;
listRestarts(SYSTEM_RESTART);
}
NdbRestarts::NdbErrorInsert::NdbErrorInsert(const char* _name,
int _errorNo){
m_name = _name;
m_errorNo = _errorNo;
}
int NdbRestarts::getNumErrorInserts(){
return m_NoOfErrors;
}
const NdbRestarts::NdbErrorInsert* NdbRestarts::getError(int _num){
if (_num >= m_NoOfErrors)
return NULL;
return &m_errors[_num];
}
const NdbRestarts::NdbErrorInsert* NdbRestarts::getRandomError(){
int randomId = myRandom48(m_NoOfErrors);
return &m_errors[randomId];
}
/**
*
* IMPLEMENTATION OF THE DIFFERENT RESTARTS
* Each function should perform it's action
* and the returned NDBT_OK or NDBT_FAILED
*
*/
#define CHECK(b, m) { int _xx = b; if (!(_xx)) { \
ndbout << "ERR: "<< m \
<< " " << "File: " << __FILE__ \
<< " (Line: " << __LINE__ << ")" << "- " << _xx << endl; \
return NDBT_FAILED; } }
int restartRandomNodeGraceful(F_ARGS){
myRandom48Init((long)NdbTick_CurrentMillisecond());
int randomId = myRandom48(_restarter.getNumDbNodes());
int nodeId = _restarter.getDbNodeId(randomId);
g_info << _restart->m_name << ": node = "<<nodeId << endl;
CHECK(_restarter.restartOneDbNode(nodeId) == 0,
"Could not restart node "<<nodeId);
return NDBT_OK;
}
int restartRandomNodeAbort(F_ARGS){
myRandom48Init((long)NdbTick_CurrentMillisecond());
int randomId = myRandom48(_restarter.getNumDbNodes());
int nodeId = _restarter.getDbNodeId(randomId);
g_info << _restart->m_name << ": node = "<<nodeId << endl;
CHECK(_restarter.restartOneDbNode(nodeId, false, false, true) == 0,
"Could not restart node "<<nodeId);
return NDBT_OK;
}
int restartRandomNodeError(F_ARGS){
myRandom48Init((long)NdbTick_CurrentMillisecond());
int randomId = myRandom48(_restarter.getNumDbNodes());
int nodeId = _restarter.getDbNodeId(randomId);
ndbout << _restart->m_name << ": node = "<<nodeId << endl;
CHECK(_restarter.insertErrorInNode(nodeId, 9999) == 0,
"Could not restart node "<<nodeId);
return NDBT_OK;
}
int restartMasterNodeError(F_ARGS){
int nodeId = _restarter.getDbNodeId(0);
g_info << _restart->m_name << ": node = "<<nodeId << endl;
CHECK(_restarter.insertErrorInNode(nodeId, 39999) == 0,
"Could not restart node "<<nodeId);
return NDBT_OK;
}
int restartRandomNodeInitial(F_ARGS){
myRandom48Init((long)NdbTick_CurrentMillisecond());
int randomId = myRandom48(_restarter.getNumDbNodes());
int nodeId = _restarter.getDbNodeId(randomId);
g_info << _restart->m_name << ": node = "<<nodeId << endl;
CHECK(_restarter.restartOneDbNode(nodeId, true) == 0,
"Could not restart node "<<nodeId);
return NDBT_OK;
}
int twoNodeFailure(F_ARGS){
myRandom48Init((long)NdbTick_CurrentMillisecond());
int randomId = myRandom48(_restarter.getNumDbNodes());
int n[2];
n[0] = _restarter.getDbNodeId(randomId);
n[1] = _restarter.getRandomNodeOtherNodeGroup(n[0], rand());
g_info << _restart->m_name << ": node = "<< n[0] << endl;
int val2[] = { DumpStateOrd::CmvmiSetRestartOnErrorInsert, 1 };
CHECK(_restarter.dumpStateOneNode(n[0], val2, 2) == 0,
"Failed to dump");
CHECK(_restarter.dumpStateOneNode(n[1], val2, 2) == 0,
"Failed to dump");
CHECK(_restarter.insertErrorInNode(n[0], 9999) == 0,
"Could not restart node "<< n[0]);
// Create random value, max 3 secs
int max = 3000;
int ms = (myRandom48(max)) + 1;
g_info << "Waiting for " << ms << "(" << max
<< ") ms " << endl;
NdbSleep_MilliSleep(ms);
g_info << _restart->m_name << ": node = "<< n[1] << endl;
CHECK(_restarter.insertErrorInNode(n[1], 9999) == 0,
"Could not restart node "<< n[1]);
CHECK(_restarter.waitNodesNoStart(n, 2) == 0,
"Failed to wait nostart");
_restarter.startNodes(n, 2);
return NDBT_OK;
}
int twoMasterNodeFailure(F_ARGS){
int n[2];
n[0] = _restarter.getMasterNodeId();
n[1] = n[0];
do {
n[1] = _restarter.getNextMasterNodeId(n[1]);
} while(_restarter.getNodeGroup(n[0]) == _restarter.getNodeGroup(n[1]));
g_info << _restart->m_name << ": ";
g_info << "node0 = "<< n[0] << "(" << _restarter.getNodeGroup(n[0]) << ") ";
g_info << "node1 = "<< n[1] << "(" << _restarter.getNodeGroup(n[1]) << ") ";
g_info << endl;
int val2[] = { DumpStateOrd::CmvmiSetRestartOnErrorInsert, 1 };
CHECK(_restarter.dumpStateOneNode(n[0], val2, 2) == 0,
"Failed to dump");
CHECK(_restarter.dumpStateOneNode(n[1], val2, 2) == 0,
"Failed to dump");
CHECK(_restarter.insertErrorInNode(n[0], 9999) == 0,
"Could not restart node "<< n[0]);
// Create random value, max 3 secs
int max = 3000;
int ms = (myRandom48(max)) + 1;
g_info << "Waiting for " << ms << "(" << max
<< ") ms " << endl;
NdbSleep_MilliSleep(ms);
g_info << _restart->m_name << ": node = "<< n[1] << endl;
CHECK(_restarter.insertErrorInNode(n[1], 9999) == 0,
"Could not restart node "<< n[1]);
CHECK(_restarter.waitNodesNoStart(n, 2) == 0,
"Failed to wait nostart");
_restarter.startNodes(n, 2);
return NDBT_OK;
}
int get50PercentOfNodes(NdbRestarter& restarter,
int * _nodes){
// For now simply return all nodes with even node id
// TODO Check nodegroup and return one node from each
int num50Percent = restarter.getNumDbNodes() / 2;
assert(num50Percent <= MAX_NDB_NODES);
// Calculate which nodes to stop, select all even nodes
for (int i = 0; i < num50Percent; i++){
_nodes[i] = restarter.getDbNodeId(i*2);
}
return num50Percent;
}
int fiftyPercentFail(F_ARGS){
int nodes[MAX_NDB_NODES];
int numNodes = get50PercentOfNodes(_restarter, nodes);
// Stop the nodes, with nostart and abort
for (int i = 0; i < numNodes; i++){
g_info << "Stopping node "<< nodes[i] << endl;
int res = _restarter.restartOneDbNode(nodes[i], false, true, true);
CHECK(res == 0, "Could not stop node: "<< nodes[i]);
}
CHECK(_restarter.waitNodesNoStart(nodes, numNodes) == 0,
"waitNodesNoStart");
// Order all nodes to start
ndbout << "Starting all nodes" << endl;
CHECK(_restarter.startAll() == 0,
"Could not start all nodes");
return NDBT_OK;
}
int restartAllNodesGracfeul(F_ARGS){
g_info << _restart->m_name << endl;
// Restart graceful
CHECK(_restarter.restartAll() == 0,
"Could not restart all nodes");
return NDBT_OK;
}
int restartAllNodesAbort(F_ARGS){
g_info << _restart->m_name << endl;
// Restart abort
CHECK(_restarter.restartAll(false, false, true) == 0,
"Could not restart all nodes");
return NDBT_OK;
}
int restartAllNodesError9999(F_ARGS){
g_info << _restart->m_name << endl;
int val[] = { DumpStateOrd::CmvmiSetRestartOnErrorInsert, 1 } ;
CHECK(_restarter.dumpStateAllNodes(val, 2) == 0,
"failed to set RestartOnErrorInsert");
CHECK(_restarter.insertErrorInAllNodes(932) == 0,
"Failed to set error 932 (auto-restart on arbit error)");
// Restart with error insert
CHECK(_restarter.insertErrorInAllNodes(9999) == 0,
"Could not restart all nodes ");
CHECK(_restarter.waitClusterNoStart() == 0,
"Failed to wait not started");
_restarter.startAll();
return NDBT_OK;
}
int fiftyPercentStopAndWait(F_ARGS){
int nodes[MAX_NDB_NODES];
int numNodes = get50PercentOfNodes(_restarter, nodes);
// Stop the nodes, with nostart and abort
for (int i = 0; i < numNodes; i++){
g_info << "Stopping node "<<nodes[i] << endl;
int res = _restarter.restartOneDbNode(nodes[i], false, true, true);
CHECK(res == 0, "Could not stop node: "<< nodes[i]);
}
CHECK(_restarter.waitNodesNoStart(nodes, numNodes) == 0,
"waitNodesNoStart");
// Create random value, max 120 secs
int max = 120;
int seconds = (myRandom48(max)) + 1;
g_info << "Waiting for " << seconds << "(" << max
<< ") secs " << endl;
NdbSleep_SecSleep(seconds);
// Restart graceful
CHECK(_restarter.restartAll() == 0,
"Could not restart all nodes");
g_info << _restart->m_name << endl;
return NDBT_OK;
}
int
NFDuringNR_codes[] = {
7121,
5027,
7172,
6000,
6001,
7171,
7130,
7133,
7138,
7154,
7144,
5026,
7139,
7132,
5045,
7195, 7196,7197,7198,7199,
//LCP
8000,
8001,
5010,
7022,
7024,
7016,
7017,
5002
};
int restartNFDuringNR(F_ARGS){
myRandom48Init((long)NdbTick_CurrentMillisecond());
int i;
const int sz = sizeof(NFDuringNR_codes)/sizeof(NFDuringNR_codes[0]);
for(i = 0; i<sz; i++){
int randomId = myRandom48(_restarter.getNumDbNodes());
int nodeId = _restarter.getDbNodeId(randomId);
int error = NFDuringNR_codes[i];
g_err << _restart->m_name << ": node = " << nodeId
<< " error code = " << error << endl;
CHECK(_restarter.restartOneDbNode(nodeId, false, true, true) == 0,
"Could not restart node "<< nodeId);
CHECK(_restarter.waitNodesNoStart(&nodeId, 1) == 0,
"waitNodesNoStart failed");
int val[] = { DumpStateOrd::CmvmiSetRestartOnErrorInsert, 1 } ;
CHECK(_restarter.dumpStateOneNode(nodeId, val, 2) == 0,
"failed to set RestartOnErrorInsert");
CHECK(_restarter.insertErrorInNode(nodeId, error) == 0,
"failed to set error insert");
CHECK(_restarter.startNodes(&nodeId, 1) == 0,
"failed to start node");
NdbSleep_SecSleep(3);
CHECK(_restarter.waitNodesNoStart(&nodeId, 1) == 0,
"waitNodesNoStart failed");
CHECK(_restarter.startNodes(&nodeId, 1) == 0,
"failed to start node");
CHECK(_restarter.waitNodesStarted(&nodeId, 1) == 0,
"waitNodesStarted failed");
}
return NDBT_OK;
if(_restarter.getNumDbNodes() < 4)
return NDBT_OK;
char buf[256];
if(NdbEnv_GetEnv("USER", buf, 256) == 0 || strcmp(buf, "ejonore") != 0)
return NDBT_OK;
for(i = 0; i<sz && !ctx->isTestStopped(); i++){
const int randomId = myRandom48(_restarter.getNumDbNodes());
int nodeId = _restarter.getDbNodeId(randomId);
const int error = NFDuringNR_codes[i];
const int masterNodeId = _restarter.getMasterNodeId();
CHECK(masterNodeId > 0, "getMasterNodeId failed");
int crashNodeId = 0;
do {
int rand = myRandom48(1000);
crashNodeId = _restarter.getRandomNodeOtherNodeGroup(nodeId, rand);
} while(crashNodeId == masterNodeId);
CHECK(crashNodeId > 0, "getMasterNodeId failed");
g_info << _restart->m_name << " restarting node = " << nodeId
<< " error code = " << error
<< " crash node = " << crashNodeId << endl;
CHECK(_restarter.restartOneDbNode(nodeId, false, true, true) == 0,
"Could not restart node "<< nodeId);
CHECK(_restarter.waitNodesNoStart(&nodeId, 1) == 0,
"waitNodesNoStart failed");
int val[] = { DumpStateOrd::CmvmiSetRestartOnErrorInsert, 1 };
CHECK(_restarter.dumpStateOneNode(crashNodeId, val, 2) == 0,
"failed to set RestartOnErrorInsert");
CHECK(_restarter.insertErrorInNode(crashNodeId, error) == 0,
"failed to set error insert");
CHECK(_restarter.startNodes(&nodeId, 1) == 0,
"failed to start node");
CHECK(_restarter.waitClusterStarted() == 0,
"waitClusterStarted failed");
}
return NDBT_OK;
}
int
NRDuringLCP_Master_codes[] = {
7009, // Insert system error in master when local checkpoint is idle.
7010, // Insert system error in master when local checkpoint is in the
// state clcpStatus = CALCULATE_KEEP_GCI.
7013, // Insert system error in master when local checkpoint is in the
// state clcpStatus = COPY_GCI before sending COPY_GCIREQ.
7014, // Insert system error in master when local checkpoint is in the
// state clcpStatus = TC_CLOPSIZE before sending TC_CLOPSIZEREQ.
7015, // Insert system error in master when local checkpoint is in the
// state clcpStatus = START_LCP_ROUND before sending START_LCP_ROUND.
7019, // Insert system error in master when local checkpoint is in the
// state clcpStatus = IDLE before sending CONTINUEB(ZCHECK_TC_COUNTER).
7075, // Master. Don't send any LCP_FRAG_ORD(last=true)
// And crash when all have "not" been sent
7021, // Crash in master when receiving START_LCP_REQ
7023, // Crash in master when sending START_LCP_CONF
7025, // Crash in master when receiving LCP_FRAG_REP
7026, // Crash in master when changing state to LCP_TAB_COMPLETED
7027 // Crash in master when changing state to LCP_TAB_SAVED
};
int
NRDuringLCP_NonMaster_codes[] = {
7020, // Insert system error in local checkpoint participant at reception
// of COPY_GCIREQ.
8000, // Crash particpant when receiving TCGETOPSIZEREQ
8001, // Crash particpant when receiving TC_CLOPSIZEREQ
5010, // Crash any when receiving LCP_FRAGORD
7022, // Crash in !master when receiving START_LCP_REQ
7024, // Crash in !master when sending START_LCP_CONF
7016, // Crash in !master when receiving LCP_FRAG_REP
7017, // Crash in !master when changing state to LCP_TAB_COMPLETED
7018 // Crash in !master when changing state to LCP_TAB_SAVED
};
int restartNodeDuringLCP(F_ARGS) {
int i;
// Master
int val = DumpStateOrd::DihMinTimeBetweenLCP;
CHECK(_restarter.dumpStateAllNodes(&val, 1) == 0,
"Failed to set LCP to min value"); // Set LCP to min val
int sz = sizeof(NRDuringLCP_Master_codes)/
sizeof(NRDuringLCP_Master_codes[0]);
for(i = 0; i<sz; i++) {
int error = NRDuringLCP_Master_codes[i];
int masterNodeId = _restarter.getMasterNodeId();
CHECK(masterNodeId > 0, "getMasterNodeId failed");
ndbout << _restart->m_name << " restarting master node = " << masterNodeId
<< " error code = " << error << endl;
{
int val[] = { DumpStateOrd::CmvmiSetRestartOnErrorInsert, 1 };
CHECK(_restarter.dumpStateAllNodes(val, 2) == 0,
"failed to set RestartOnErrorInsert");
}
CHECK(_restarter.insertErrorInNode(masterNodeId, error) == 0,
"failed to set error insert");
CHECK(_restarter.waitNodesNoStart(&masterNodeId, 1, 300) == 0,
"failed to wait no start");
CHECK(_restarter.startNodes(&masterNodeId, 1) == 0,
"failed to start node");
CHECK(_restarter.waitClusterStarted(300) == 0,
"waitClusterStarted failed");
{
int val = DumpStateOrd::DihMinTimeBetweenLCP;
CHECK(_restarter.dumpStateOneNode(masterNodeId, &val, 1) == 0,
"failed to set error insert");
}
}
// NON-Master
sz = sizeof(NRDuringLCP_NonMaster_codes)/
sizeof(NRDuringLCP_NonMaster_codes[0]);
for(i = 0; i<sz; i++) {
int error = NRDuringLCP_NonMaster_codes[i];
int nodeId = getRandomNodeId(_restarter);
int masterNodeId = _restarter.getMasterNodeId();
CHECK(masterNodeId > 0, "getMasterNodeId failed");
while (nodeId == masterNodeId) {
nodeId = getRandomNodeId(_restarter);
}
ndbout << _restart->m_name << " restarting non-master node = " << nodeId
<< " error code = " << error << endl;
int val[] = { DumpStateOrd::CmvmiSetRestartOnErrorInsert, 1 };
CHECK(_restarter.dumpStateAllNodes(val, 2) == 0,
"failed to set RestartOnErrorInsert");
CHECK(_restarter.insertErrorInNode(nodeId, error) == 0,
"failed to set error insert");
CHECK(_restarter.waitNodesNoStart(&nodeId, 1, 300) == 0,
"failed to wait no start");
CHECK(_restarter.startNodes(&nodeId, 1) == 0,
"failed to start node");
CHECK(_restarter.waitClusterStarted(300) == 0,
"waitClusterStarted failed");
{
int val = DumpStateOrd::DihMinTimeBetweenLCP;
CHECK(_restarter.dumpStateOneNode(nodeId, &val, 1) == 0,
"failed to set error insert");
}
}
return NDBT_OK;
}
int stopOnError(F_ARGS){
myRandom48Init((long)NdbTick_CurrentMillisecond());
int randomId = myRandom48(_restarter.getNumDbNodes());
int nodeId = _restarter.getDbNodeId(randomId);
do {
g_info << _restart->m_name << ": node = " << nodeId
<< endl;
CHECK(_restarter.waitClusterStarted(300) == 0,
"waitClusterStarted failed");
int val = DumpStateOrd::NdbcntrTestStopOnError;
CHECK(_restarter.dumpStateOneNode(nodeId, &val, 1) == 0,
"failed to set NdbcntrTestStopOnError");
NdbSleep_SecSleep(3);
CHECK(_restarter.waitClusterStarted(300) == 0,
"waitClusterStarted failed");
} while (false);
return NDBT_OK;
}
int getRandomNodeId(NdbRestarter& _restarter) {
myRandom48Init((long)NdbTick_CurrentMillisecond());
int randomId = myRandom48(_restarter.getNumDbNodes());
int nodeId = _restarter.getDbNodeId(randomId);
return nodeId;
}
| gpl-2.0 |
davidmueller13/Audax_Kernel | drivers/net/wireless/ipsecdrvtl/bq.c | 87 | 23349 | /*
'src_nic_poclink.c' Obfuscated by COBF (Version 1.06 2006-01-07 by BB) at Sun Nov 16 20:01:26 2014
*/
#include"cobf.h"
#ifdef _WIN32
#if defined( UNDER_CE) && defined( bb341) || ! defined( bb343)
#define bb335 1
#define bb351 1
#else
#define bb340 bb348
#define bb331 1
#define bb345 1
#endif
#define bb333 1
#include"uncobf.h"
#include<ndis.h>
#include"cobf.h"
#ifdef UNDER_CE
#include"uncobf.h"
#include<ndiswan.h>
#include"cobf.h"
#endif
#include"uncobf.h"
#include<stdio.h>
#include<basetsd.h>
#include"cobf.h"
bba bbs bbl bbf, *bb1;bba bbs bbe bbq, *bb93;bba bb135 bb123, *bb358;
bba bbs bbl bb39, *bb72;bba bbs bb135 bbk, *bb59;bba bbe bbu, *bb134;
bba bbh bbf*bb89;
#ifdef bb311
bba bbd bb60, *bb122;
#endif
#else
#include"uncobf.h"
#include<linux/module.h>
#include<linux/ctype.h>
#include<linux/time.h>
#include<linux/slab.h>
#include"cobf.h"
#ifndef bb116
#define bb116
#ifdef _WIN32
#include"uncobf.h"
#include<wtypes.h>
#include"cobf.h"
#else
#ifdef bb112
#include"uncobf.h"
#include<linux/types.h>
#include"cobf.h"
#else
#include"uncobf.h"
#include<stddef.h>
#include<sys/types.h>
#include"cobf.h"
#endif
#endif
#ifdef _WIN32
bba bb119 bb215;
#else
bba bbe bbu, *bb134, *bb282;
#define bb202 1
#define bb201 0
bba bb275 bb292, *bb279, *bb217;bba bbe bb246, *bb262, *bb285;bba bbs
bbq, *bb93, *bb253;bba bb5 bb266, *bb288;bba bbs bb5 bb265, *bb284;
bba bb5 bb113, *bb211;bba bbs bb5 bb63, *bb287;bba bb63 bb219, *bb240
;bba bb63 bb252, *bb261;bba bb113 bb119, *bb251;bba bb256 bb259;bba
bb277 bb123;bba bb218 bb83;bba bb117 bb114;bba bb117 bb206;
#ifdef bb208
bba bb209 bb39, *bb72;bba bb250 bbk, *bb59;bba bb205 bbd, *bb29;bba
bb207 bb56, *bb120;
#else
bba bb232 bb39, *bb72;bba bb272 bbk, *bb59;bba bb276 bbd, *bb29;bba
bb239 bb56, *bb120;
#endif
bba bb39 bbf, *bb1, *bb249;bba bbk bb221, *bb264, *bb216;bba bbk bb269
, *bb271, *bb227;bba bbd bb60, *bb122, *bb273;bba bb83 bb37, *bb233, *
bb223;bba bbd bb230, *bb255, *bb260;bba bb114 bb248, *bb236, *bb237;
bba bb56 bb244, *bb289, *bb278;
#define bb141 bbb
bba bbb*bb210, *bb77;bba bbh bbb*bb286;bba bbl bb242;bba bbl*bb243;
bba bbh bbl*bb82;
#if defined( bb112)
bba bbe bb111;
#endif
bba bb111 bb19;bba bb19*bb257;bba bbh bb19*bb190;
#if defined( bb238) || defined( bb214)
bba bb19 bb36;bba bb19 bb118;
#else
bba bbl bb36;bba bbs bbl bb118;
#endif
bba bbh bb36*bb234;bba bb36*bb224;bba bb60 bb274, *bb281;bba bbb*
bb106;bba bb106*bb235;
#define bb213( bb35) bbi bb35##__ { bbe bb225; }; bba bbi bb35##__ * \
bb35
bba bbi{bb37 bb189,bb212,bb222,bb263;}bb220, *bb254, *bb283;bba bbi{
bb37 bb8,bb193;}bb245, *bb270, *bb231;bba bbi{bb37 bb228,bb268;}bb267
, *bb291, *bb241;
#endif
bba bbh bbf*bb89;
#endif
bba bbf bb101;
#define IN
#define OUT
#ifdef _DEBUG
#define bb146( bbc) bb32( bbc)
#else
#define bb146( bbc) ( bbb)( bbc)
#endif
bba bbe bb161, *bb173;
#define bb226 0
#define bb318 1
#define bb300 2
#define bb320 3
#define bb347 4
bba bbe bb357;bba bbb*bb121;
#endif
#ifdef _WIN32
#ifndef UNDER_CE
#define bb31 bb344
#define bb43 bb339
bba bbs bb5 bb31;bba bb5 bb43;
#endif
#else
#endif
#ifdef _WIN32
bbb*bb128(bb31 bb47);bbb bb109(bbb* );bbb*bb137(bb31 bb159,bb31 bb47);
#else
#define bb128( bbc) bb147(1, bbc, bb140)
#define bb109( bbc) bb342( bbc)
#define bb137( bbc, bbn) bb147( bbc, bbn, bb140)
#endif
#ifdef _WIN32
#define bb32( bbc) bb346( bbc)
#else
#ifdef _DEBUG
bbe bb144(bbh bbl*bb96,bbh bbl*bb25,bbs bb229);
#define bb32( bbc) ( bbb)(( bbc) || ( bb144(# bbc, __FILE__, __LINE__ \
)))
#else
#define bb32( bbc) (( bbb)0)
#endif
#endif
bb43 bb302(bb43*bb322);
#ifndef _WIN32
bbe bb327(bbh bbl*bbg);bbe bb324(bbh bbl*bb20,...);
#endif
#ifdef _WIN32
bba bb355 bb97;
#define bb139( bbc) bb330( bbc)
#define bb142( bbc) bb350( bbc)
#define bb133( bbc) bb356( bbc)
#define bb132( bbc) bb332( bbc)
#else
bba bb337 bb97;
#define bb139( bbc) ( bbb)( * bbc = bb334( bbc))
#define bb142( bbc) (( bbb)0)
#define bb133( bbc) bb349( bbc)
#define bb132( bbc) bb354( bbc)
#endif
#ifdef UNDER_CE
#define bb1916 64
#endif
#define bb951 bb53(0x0800)
#define bb1141 bb53(0x0806)
#define bb952 bb53(0x01f4)
#define bb948 bb53(0x1194)
#define bb1104 bb53(0x4000)
#define bb1140 bb53(0x2000)
#define bb1111 bb53(0x1FFF)
#define bb1066( bb8) (( bb8) & bb53(0x2000 | 0x1FFF))
#define bb1025( bb8) ((( bb196( bb8)) & 0x1FFF) << 3)
#define bb979( bb8) ((( bb8) & bb53(0x1FFF)) == 0)
#define bb493( bb8) (( bb8) & bb53(0x2000))
#define bb1030( bb8) (!( bb493( bb8)))
#pragma pack(push, 1)
bba bbi{bbf bb371[6 ];bbf bb1011[6 ];bbk bb373;}bb364, *bb381;bba bbi{
bbf bb445[6 ];bbk bb373;}bb1085, *bb1093;bba bbi{bbf bb958:4 ;bbf bb1090
:4 ;bbf bb1073;bbk bb370;bbk bb895;bbk bb576;bbf bb1004;bbf bb247;bbk
bb612;bbd bb310;bbd bb204;}bb326, *bb315;bba bbi{bbk bb1035;bbk bb1043
;bbf bb1038;bbf bb1046;bbk bb1060;bbf bb1084[6 ];bbd bb1039;bbf bb1037
[6 ];bbd bb1064;}bb1074, *bb1080;
#pragma pack(pop)
bba bbi{bbk bb280;bbk bb425;bbk bb1009;bbk bb319;}bb412, *bb352;bba
bbi{bbk bb280;bbk bb579;bbd bb546;bbd bb924;bbf bb92;bbf bb170;bbk
bb158;bbk bb319;bbk bb1020;}bb490, *bb313;bba bbi{bbf bb1079;bbf
bb1069;bbf bb1063;bbf bb1040;bbd bb1062;bbk bb1075;bbk bb374;bbd
bb1033;bbd bb1056;bbd bb1065;bbd bb1059;bbf bb1088[16 ];bbf bb1076[64 ]
;bbf bb25[128 ];bbf bb1034[64 ];}bb1083, *bb1072;bba bbi{bbd bb310;bbd
bb204;bbf bb917;bbf bb247;bbk bb915;}bb601, *bb564;
#if defined( _WIN32)
#define bb53( bbc) (((( bbc) & 0XFF00) >> 8) | ((( bbc) & 0X00FF) << \
8))
#define bb196( bbc) ( bb53( bbc))
#define bb443( bbc) (((( bbc) & 0XFF000000) >> 24) | ((( bbc) & \
0X00FF0000) >> 8) | ((( bbc) & 0X0000FF00) << 8) | ((( bbc) & \
0X000000FF) << 24))
#define bb494( bbc) ( bb443( bbc))
#endif
bbk bb930(bbh bbb*bb301);bbk bb680(bbh bbb*bb513,bbe bb21);bba bb83
bb4;bb10{bb98=0 ,bb363=-12000 ,bb338=-11999 ,bb377=-11998 ,bb663=-11997 ,
bb704=-11996 ,bb717=-11995 ,bb683=-11994 ,bb781=-11992 ,bb784=-11991 ,
bb716=-11990 ,bb730=-11989 ,bb833=-11988 ,bb633=-11987 ,bb887=-11986 ,
bb635=-11985 ,bb692=-11984 ,bb622=-11983 ,bb652=-11982 ,bb841=-11981 ,
bb903=-11980 ,bb672=-11979 ,bb815=-11978 ,bb835=-11977 ,bb584=-11976 ,
bb853=-11975 ,bb758=-11960 ,bb629=-11959 ,bb681=-11500 ,bb706=-11499 ,
bb641=-11498 ,bb789=-11497 ,bb876=-11496 ,bb757=-11495 ,bb678=-11494 ,
bb768=-11493 ,bb837=-11492 ,bb902=-11491 ,bb761=-11490 ,bb728=-11489 ,
bb664=-11488 ,bb685=-11487 ,bb848=-11486 ,bb660=-11485 ,bb661=-11484 ,
bb901=-11483 ,bb898=-11482 ,bb733=-11481 ,bb838=-11480 ,bb754=-11479 ,
bb643=-11478 ,bb752=-11477 ,bb755=-11476 ,bb624=-11475 ,bb799=-11474 ,
bb772=-11473 ,bb800=-11472 ,bb638=-11460 ,bb821=-11450 ,bb699=-11449 ,
bb700=-11448 ,bb727=-11447 ,bb832=-11446 ,bb766=-11445 ,bb886=-11444 ,
bb628=-11443 ,bb801=-11440 ,bb779=-11439 ,bb774=-11438 ,bb791=-11437 ,
bb861=-11436 ,bb662=-11435 ,bb804=-11420 ,bb531=-11419 ,bb565=-11418 ,
bb677=-11417 ,bb858=-11416 ,bb809=-11415 ,bb786=-11414 ,bb726=-11413 ,
bb854=-11412 ,bb634=-11411 ,bb671=-11410 ,bb767=-11409 ,bb899=-11408 ,
bb697=-11407 ,bb906=-11406 ,bb657=-11405 ,bb711=-11404 ,bb656=-11403 ,
bb751=-11402 ,bb738=-11401 ,bb670=-11400 ,bb882=-11399 ,bb780=-11398 ,
bb753=-11397 ,bb675=-11396 ,bb630=-11395 ,bb829=-11394 ,bb856=-11393 ,
bb796=-11392 ,bb871=-11391 ,bb828=-11390 ,bb723=-11389 ,bb842=-11388 ,
bb703=-11387 ,bb900=-11386 ,bb693=-11385 ,bb694=-11384 ,bb803=-11383 ,
bb646=-11382 ,bb892=-11381 ,bb731=-11380 ,bb776=-11379 ,bb805=-11378 ,
bb742=-11377 ,bb794=-11376 ,bb748=-11375 ,bb810=-11374 ,bb808=-11373 ,
bb676=-11372 ,bb862=-11371 ,bb896=-11370 ,bb625=-11369 ,bb839=-11368 ,
bb746=-11367 ,bb707=-11366 ,bb714=-11365 ,bb843=-11364 ,bb647=-11363 ,
bb387=-11350 ,bb885=bb387,bb705=-11349 ,bb658=-11348 ,bb686=-11347 ,bb642
=-11346 ,bb644=-11345 ,bb877=-11344 ,bb684=-11343 ,bb881=-11342 ,bb826=-
11341 ,bb750=-11340 ,bb689=-11339 ,bb395=-11338 ,bb792=-11337 ,bb688=bb395
,bb777=-11330 ,bb807=-11329 ,bb844=-11328 ,bb775=-11327 ,bb749=-11326 ,
bb648=-11325 ,bb811=-11324 ,bb640=-11320 ,bb814=-11319 ,bb859=-11318 ,
bb715=-11317 ,bb627=-11316 ,bb713=-11315 ,bb620=-11314 ,bb718=-11313 ,
bb637=-11312 ,bb639=-11300 ,bb771=-11299 ,bb745=-11298 ,bb696=-11297 ,
bb668=-11296 ,bb797=-11295 ,bb894=-11294 ,bb653=-11293 ,bb890=-11292 ,
bb666=-11291 ,bb619=-11290 ,bb709=-11289 ,bb874=-11288 ,bb631=-11287 ,
bb787=-11286 ,bb651=-11285 ,bb813=-11284 ,bb872=-11283 ,bb698=-11282 ,
bb695=-11281 ,bb708=-11280 ,bb818=-11279 ,bb732=-11250 ,bb830=-11249 ,
bb849=-11248 ,bb736=-11247 ,bb655=-11246 ,bb701=-11245 ,bb868=-11244 ,
bb743=-11243 ,bb621=-11242 ,bb802=-11240 ,bb649=-11239 ,bb724=-11238 ,
bb783=-11237 ,bb820=-11150 ,bb691=-11100 ,bb836=-11099 ,bb744=-11098 ,
bb864=-11097 ,bb793=-11096 ,bb785=-11095 ,bb759=-11094 ,bb617=-11093 ,
bb825=-11092 ,bb673=-11091 ,bb762=-11090 ,bb870=-11089 ,bb845=-11088 ,
bb908=-11087 ,bb831=-11086 ,bb712=-11085 ,bb893=-11050 ,bb740=-11049 ,
bb682=-10999 ,bb897=-10998 ,bb888=-10997 ,bb741=-10996 ,bb904=-10995 ,
bb674=-10994 ,bb880=-10993 ,bb846=-10992 ,bb636=-10991 ,bb618=-10990 ,
bb770=-10989 ,bb616=-10988 ,bb883=-10979 ,bb857=-10978 ,bb875=-10977 ,
bb878=-10976 ,bb734=-10975 ,bb798=-10974 ,};bba bbi bb452{bb1 bb74;bbd
bb125;bbd bb181;bbi bb452*bb94;}bbx;bb4 bb464(bbx*bb669,bbd bb911,bbx
*bb869,bbd bb823,bbd bb541);bb4 bb524(bbx*bbj,bbd bb92,bbh bbb*bb96,
bbd bb47);bb4 bb575(bbx*bbj,bbd bb92,bbb*bb130,bbd bb47);bbu bb819(
bbx*bbj,bbd bb92,bbh bbb*bb96,bbd bb47);bb4 bb596(bbx*bb86,bbf bb102,
bbx*bb58);bb4 bb679(bbx*bb86,bbu bb177,bbf*bb410);bb4 bb940(bbx*bb58,
bbf*bb386);bb4 bb957(bbh bbf*bb386,bbx*bb58);bb4 bb537(bbx*bb51,bbf
bb102,bbd*bb943);bb4 bb925(bbx*bb86,bbf bb102,bbf bb410,bbx*bb58);bbd
bb519(bbx*bb51);bbk bb544(bbx*bb51);bbb bb522(bbk bb151,bbx*bb51);bbb
bb540(bbx*bb51);bbb bb977(bbx*bb51,bbd*bb27);bbb bb1002(bbx*bb51,bbd*
bb27);bbb bb994(bbx*bb51,bbd bb27);bbb bb927(bbx*bb51,bbd bb27);bbb
bb985(bbx*bb51);bbu bb1018(bbf*bb51);bba bbi bb1019*bb989;bba bbi
bb1027*bb1029;bba bbi bb993*bb1026;bba bbi bb1001*bb1016;bba bbi
bb1024*bb1022;bba bbi bb990*bb987;bba bb10{bb551=0 ,bb578=1 ,bb589=2 ,
bb756=3 ,bb587=4 ,bb563=5 ,bb572=6 ,bb573=7 ,bb592=9 ,}bb421;bba bb10{bb615
=0 ,bb991,bb600,bb1014,bb910,bb926,bb909,bb920,bb928,bb929,bb918,}
bb517;
#pragma pack(push, 8)
#ifdef _MSC_VER
#pragma warning (disable:4200)
#endif
bba bbf bb178[4 ];bba bb10{bb1652=0 ,bb1467=1 ,}bb1394;bba bb10{bb1529=0
,bb1721=1 ,bb1557=2 ,bb1440=3 ,bb1660=4 ,bb1494=5 ,bb1636=6 ,bb1517=7 ,
bb1609=8 ,bb1521=9 ,bb1678=10 ,bb1510=11 ,bb1697=12 ,bb1713=13 ,bb1720=14 ,
bb1424=15 ,bb1453=16 ,bb1395=17 ,bb1602=18 ,bb1692=19 ,bb1643=20 ,bb1495=21
,bb1508=22 ,bb1475=23 ,bb1607=24 ,bb1608=25 ,bb1449=26 ,bb1581=27 ,bb1376=
28 ,bb1708=29 ,bb1688=30 ,bb1632=16300 ,bb1616=16301 ,bb1729=16384 ,bb1538=
24576 ,bb1465=24577 ,bb1439=24578 ,bb1480=34793 ,bb1382=40500 ,}bb626;bba
bb10{bb1462=0 ,bb1526=1 ,bb1456=2 ,bb1426=3 ,bb1701=4 ,bb1388=5 ,bb1671=6 ,
bb1476=7 ,bb1530=8 ,bb1400=9 ,bb1445=21 ,bb1490=22 ,bb1500=23 ,bb1447=24 ,
bb1542=25 ,bb1512=26 ,bb1463=27 ,bb1384=28 ,bb1481=29 ,bb1491=80 ,}bb632;
bba bb10{bb1639=0 ,bb1700=1 ,bb1552=2 ,bb1486=3 ,bb1523=4 ,}bb1663;bba bb10
{bb1687=0 ,bb1349=1 ,bb1167=2 ,bb1226=3 ,bb1290=4 ,bb1050=61440 ,bb1359=
61441 ,bb1117=61443 ,bb1301=61444 ,}bb473;bba bb10{bb1702=0 ,bb1498=1 ,
bb1563=2 ,}bb1681;bba bb10{bb1391=0 ,bb1727,bb1441,bb1454,bb1567,bb1499
,bb1637,bb1466,bb1606,bb1496,bb1397,bb1696,}bb729;bba bb10{bb1677=0 ,
bb1365=2 ,bb1327=3 ,bb1377=4 ,bb1321=9 ,bb1298=12 ,bb1363=13 ,bb1310=14 ,
bb1350=249 ,}bb623;bba bb10{bb1360=0 ,bb1300=1 ,bb1286=2 ,bb1429=3 ,bb1640
=4 ,bb1358=5 ,bb1330=12 ,bb1319=13 ,bb1369=14 ,bb1287=61440 ,}bb481;bba bb10
{bb1294=1 ,bb1308=2 ,bb1338=3 ,bb1541=4 ,bb1550=5 ,bb1450=6 ,bb1430=7 ,
bb1471=8 ,bb1455=9 ,bb1540=10 ,bb1304=11 ,bb396=12 ,bb1339=13 ,bb394=240 ,
bb1345=(128 <<16 )|bb394,bb1342=(192 <<16 )|bb394,bb1329=(256 <<16 )|bb394,
bb1303=(128 <<16 )|bb396,bb1295=(192 <<16 )|bb396,bb1366=(256 <<16 )|bb396,
}bb687;bba bb10{bb1297=0 ,bb1505=1 ,bb1368=2 ,bb1328=3 ,bb1460=4 ,}bb884;
bba bb10{bb1437=0 ,bb1574=1 ,bb1192=2 ,bb603=3 ,bb1235=4 ,}bb710;bba bb10{
bb1577=0 ,bb1528=1 ,bb1407=2 ,bb1473=5 ,bb1710=7 ,}bb488;bba bb10{bb1427=0
,bb1518=1 ,bb1603=2 ,bb1714=3 ,bb1484=4 ,bb1685=5 ,bb1648=6 ,bb391=7 ,bb1545
=65001 ,bb399=240 ,bb1487=(128 <<16 )|bb399,bb1506=(192 <<16 )|bb399,bb1515
=(256 <<16 )|bb399,bb1544=(128 <<16 )|bb391,bb1560=(192 <<16 )|bb391,bb1617
=(256 <<16 )|bb391,}bb795;bba bb10{bb1719=0 ,bb1461=1 ,bb1662=2 ,bb1573=3 ,
bb1474=4 ,bb1531=5 ,bb1568=6 ,bb1646=65001 ,}bb879;bba bb10{bb1679=0 ,
bb1527=1 ,bb1661=2 ,bb1555=3 ,bb1657=4 ,bb1613=5 ,bb1558=64221 ,bb1626=
64222 ,bb1659=64223 ,bb1675=64224 ,bb1711=65001 ,bb1683=65002 ,bb1553=
65003 ,bb1442=65004 ,bb1723=65005 ,bb1489=65006 ,bb1514=65007 ,bb1479=
65008 ,bb1712=65009 ,bb1478=65010 ,}bb891;bba bb10{bb1699=0 ,bb1416=1 ,
bb1434=2 ,}bb850;bba bb10{bb1410=0 ,bb1634=1 ,bb1635=2 ,bb1686=3 ,}bb860;
bba bb10{bb1594=0 ,bb1420=1 ,bb1436=2 ,bb1649=3 ,bb1601=4 ,bb1642=5 ,bb1504
=21 ,bb1570=6 ,bb1618=7 ,bb1537=8 ,bb1381=1000 ,}bb477;bba bb10{bb1412=0 ,
bb1667=1 ,bb1669=2 ,}bb725;bba bb10{bb1666=0 ,bb1629=1 ,bb1718=2 ,bb1438=3
,bb1472=4 ,}bb667;bba bb10{bb1532=0 ,bb1674=1 ,bb1393=1001 ,bb1715=1002 ,}
bb822;bba bb10{bb1562=0 ,bb1136=1 ,bb1078=2 ,bb1052=3 ,bb1113=4 ,bb1131=5 ,
bb1095=6 ,bb1698=100 ,bb1583=101 ,}bb480;bba bbi bb400{bb687 bb154;bb481
bb590;bb473 bb57;}bb400;bba bbi bb390{bb623 bb1352;bb481 bb590;bb473
bb57;}bb390;bba bbi bb389{bb884 bb1007;}bb389;bba bbi bb482{bb891
bb1628;bb879 bb414;bb795 bb154;bbu bb1488;bb488 bb650;}bb482;bba bbi
bb484{bbu bb610;bb400 bb308;bbu bb764;bb390 bb558;bbu bb773;bb389
bb604;bb488 bb650;}bb484;bba bbi bb446{bb178 bb954;bb178 bb1213;bb710
bb102;bb556{bbi{bb390 bb45;bbf bb552[64 ];bbf bb559[64 ];}bb558;bbi{
bb400 bb45;bbf bb1219[32 ];bbf bb1229[32 ];bbf bb552[64 ];bbf bb559[64 ];
bbf bb1203[16 ];}bb308;bbi{bb389 bb45;}bb604;}bb316;}bb446;bba bbi{bbd
bb851,bb580;bbf bb1142:1 ;bbf bb1157:1 ;bbf bb102;bbk bb437;}bb186;bba
bbi bb510{bbd bb11;bb186 bbc[64 *2 ];}bb510;
#ifdef UNDER_CE
bba bb43 bb378;
#else
bba bb83 bb378;
#endif
bba bbi bb199{bbi bb199*bb1469, *bb1389;bbd bb27;bbd bb1118;bb186
bb919[64 ];bb480 bb506;bbd bb1354;bbk bb1070;bbd bb553;bbd bb852;bbd
bb806;bbf bb489;bbf bb1332;bbf bb1130;bbd bb1031;bbd bb1383;bb378
bb569;bbk bb1279;bb446 bb402[3 ];bb378 bb1569;bbf bb1507[40 ];bbd bb595
;bbd bb1579;}bb199;bba bbi bb393{bbi bb393*bb1726;bb186 bb476;}bb393;
bba bbi bb735{bbu bb479;bbu bb489;bbd bb27;bbd bb595;bbf bb1516;bbk
bb1597;bbf*bb1549;bbd bb1422;bbf*bb1582;bbd bb1717;bbf*bb1379;bbd
bb1414;bbu bb1477;bbu bb1572;bb393*bb130;bbu bb1482;bb667 bb1522;bbd
bb1596;bb850 bb1709;bb480 bb506;bbk bb1373;bbd bb1534;bb822 bb1401;
bbd bb1650;bbd bb1622;bb729 bb1419;bbf*bb1408;bbd bb1415;bb477 bb867;
bbd bb1654;bbd bb1625;bbd bb1411;bbd bb1707;bbd bb1497;bb482*bb1539;
bbd bb1458;bb484*bb1509;bbd bb1399;bbd bb1533;bbd bb1653;}bb735;bba
bbi bb720{bbu bb479;bbd bb27;bb186 bb476;}bb720;bba bbi bb865{bb199*
bb317;bbu bb1571;bbf*bb1703;bbd bb1670;}bb865;bba bbi bb873{bbd bb27;
bb186 bb476;bbf bb1435;bbf bb1448;}bb873;bba bbi bb782{bbu bb479;bbu
bb1121;bbd bb27;bbf*bb1627;bbd bb1543;}bb782;bba bbi bb659{bbd bb27;
bbk bb1730;bbk bb1731;bbd bb151;bbf*bb48;}bb659;bba bbi bb824{bbu
bb1590;bbd bb27;bbd bb553;bbd bb852;bbd bb806;}bb824;bba bbi bb905{
bb626 bb1492;bbd bb27;bb632 bb1323;bbu bb1559;}bb905;bba bbi bb816{
bbf bb1676;bbf bb1392;bbf bb1691;bbf bb1589;bbf bb1576;bbf bb1605;bbf
bb1587;bbf bb1459;bbf bb1375;bbf bb1525;bbf bb1413;bbf bb1705;bbf
bb1728;bbf bb1409;bbf bb1405;bbf bb1443;bbf bb1621;bbf bb1385;bbf
bb1451;bbf bb511;bbf bb1551;bbf bb1614;bbf bb1535;bbf bb1690;bbf
bb1418;bbf bb1432;bbf bb1417;}bb816;bba bbi bb690{bbu bb1647;bbd bb487
;bbd bb1610;bb860 bb1428;bbk bb1633;bbu bb1520;bbu bb1565;bbu bb1655;
bbu bb1452;bbu bb1592;bbu bb1598;bbu bb1398;bbl bb1623[128 ];bbl bb1672
[128 ];bbl bb1599[128 ];bbl bb1421[256 ];bbl bb1638[128 ];bbl bb1446[128 ]
;bbd bb1593;bbf bb1566[8 ];bbf bb1406[8 ];}bb690;bba bbi bb654{bbd bb27
;bbd bb1387;}bb654;bba bbi bb855{bbd bb27;bbu bb489;}bb855;bba bbi
bb765{bbu bb1716;bbd bb513;bbd bb1161;}bb765;bba bbi bb747{bbd bb27;
bb477 bb867;bb725 bb1604;bbf*bb1580;bbd bb1591;}bb747;bba bb10{bb1404
=0 ,bb1556,bb1673,bb1386,bb1615,bb1536,bb1600,bb1680,bb1524,bb1585,
bb1586,bb1695,bb1706,bb1656,bb1402,bb1588,bb1464,bb1403,bb1624,bb1641
,}bb645;bba bbi bb1651 bb840;bba bb4( *bb1554)(bb840*bb1378,bbb*
bb1694,bb645 bb323,bbb*bb74);
#pragma pack(pop)
#ifdef _WIN32
#ifdef UNDER_CE
#define bb468 bb1704 bb599("1:")
#else
#define bb468 bb599("\\\\.\\IPSecTL")
#endif
#else
#define bb602 "ipsecdrvtl"
#define bb468 "/dev/" bb602
#ifndef bb116
#define bb116
#ifdef _WIN32
#include"uncobf.h"
#include<wtypes.h>
#include"cobf.h"
#else
#ifdef bb112
#include"uncobf.h"
#include<linux/types.h>
#include"cobf.h"
#else
#include"uncobf.h"
#include<stddef.h>
#include<sys/types.h>
#include"cobf.h"
#endif
#endif
#ifdef _WIN32
bba bb119 bb215;
#else
bba bbe bbu, *bb134, *bb282;
#define bb202 1
#define bb201 0
bba bb275 bb292, *bb279, *bb217;bba bbe bb246, *bb262, *bb285;bba bbs
bbq, *bb93, *bb253;bba bb5 bb266, *bb288;bba bbs bb5 bb265, *bb284;
bba bb5 bb113, *bb211;bba bbs bb5 bb63, *bb287;bba bb63 bb219, *bb240
;bba bb63 bb252, *bb261;bba bb113 bb119, *bb251;bba bb256 bb259;bba
bb277 bb123;bba bb218 bb83;bba bb117 bb114;bba bb117 bb206;
#ifdef bb208
bba bb209 bb39, *bb72;bba bb250 bbk, *bb59;bba bb205 bbd, *bb29;bba
bb207 bb56, *bb120;
#else
bba bb232 bb39, *bb72;bba bb272 bbk, *bb59;bba bb276 bbd, *bb29;bba
bb239 bb56, *bb120;
#endif
bba bb39 bbf, *bb1, *bb249;bba bbk bb221, *bb264, *bb216;bba bbk bb269
, *bb271, *bb227;bba bbd bb60, *bb122, *bb273;bba bb83 bb37, *bb233, *
bb223;bba bbd bb230, *bb255, *bb260;bba bb114 bb248, *bb236, *bb237;
bba bb56 bb244, *bb289, *bb278;
#define bb141 bbb
bba bbb*bb210, *bb77;bba bbh bbb*bb286;bba bbl bb242;bba bbl*bb243;
bba bbh bbl*bb82;
#if defined( bb112)
bba bbe bb111;
#endif
bba bb111 bb19;bba bb19*bb257;bba bbh bb19*bb190;
#if defined( bb238) || defined( bb214)
bba bb19 bb36;bba bb19 bb118;
#else
bba bbl bb36;bba bbs bbl bb118;
#endif
bba bbh bb36*bb234;bba bb36*bb224;bba bb60 bb274, *bb281;bba bbb*
bb106;bba bb106*bb235;
#define bb213( bb35) bbi bb35##__ { bbe bb225; }; bba bbi bb35##__ * \
bb35
bba bbi{bb37 bb189,bb212,bb222,bb263;}bb220, *bb254, *bb283;bba bbi{
bb37 bb8,bb193;}bb245, *bb270, *bb231;bba bbi{bb37 bb228,bb268;}bb267
, *bb291, *bb241;
#endif
bba bbh bbf*bb89;
#endif
#include"uncobf.h"
#include<linux/ioctl.h>
#include"cobf.h"
bba bbi{bb1 bb1346;bbd bb1322;bb1 bb1239;bbd bb1146;bbd bb448;}bb1202
;
#define bb1357 1
#endif
#pragma pack(push, 8)
bb10{bb1361=3 ,bb1355,bb1356,bb1425,};bba bbi{bbf bb104[4 ];}bb1284;bba
bbi{bbf bb104[4 ];}bb1234;bba bbi{bbd bb935;bbd bb27;}bb1269;bba bbi{
bbd bb131;bbf bb1218[8 ];}bb411;bba bb10{bb1222=0 ,bb1233,bb1251,bb1231
,bb1372}bb1252;bba bbi{bbf bb1122;bbd bb1071;bbf bb1364;}bb486;
#pragma pack(pop)
#pragma pack(push, 8)
bb10{bb1127=-5000 ,bb1108=-4000 ,bb998=-4999 ,bb988=-4998 ,bb1017=-4997 ,
bb981=-4996 ,bb1094=-4995 ,bb1087=-4994 ,bb1100=-4993 ,bb1015=-4992 ,
bb1081=-4991 };bb4 bb1133(bb4 bb1137,bbd bb1119,bbl*bb1102);bba bbi{
bb199 bb180;bbd bb1197;bbd bb1086;bbd bb1371;bbd bb1092;bbd bb1238;
bbd bb1278;bbd bb1276;bbd bb1237;bbd bb1247;bbd bb1280;bbd bb1249;bbu
bb1223;bb43 bb569,bb1158,bb1170;bbf bb371[6 ];}bb160;bba bbi bb478{bbi
bb478*bb94;bbf bb102;bbk bb1275;bbk bb1277;bbk bb1271;bbk bb1273;}
bb427;bba bbi bb778{bbi bb778*bb94;bbi bb478*bb1096;bbd bb27;bbf bb371
[6 ];}bb409;bba bb10{bb1147=0 ,bb1575,bb1042,bb1003,bb1012}bb203;bba bbi
{bbd bb383;bbd bb448;bbd bb512;bb411*bb923;bb97 bb966;}bb305;bba bbi{
bb486*bb460;bb409*bb1143;bbd bb585;bb427*bb538;bb97 bb605;bbq bb1120;
bbq bb534;bb160*bb508;bbu bb1258;bbk bb1171;bbk bb1110;bb305 bb1047;}
bb33, *bb1493;
#pragma pack(pop)
bba bbi bb983 bb1334, *bb79;bba bbi bb817{bbi bb817*bb321;bb1 bb470;
bbq bb561;bbd bb27;bbk bb437;bbq bb92;bb1 bb314;bbq bb456;bb1 bb535;
bbq bb548;bb1 bb1502;bb101 bb1367;bbf bb1307[6 ];bb101 bb964;bb101
bb1105;bb101 bb521;bb101 bb533;}bb174, *bb85;bba bbi bb863{bbi bb863*
bb94;bb174*bb321;bbd bb27;bbk bb542;bbk bb1470;bbq bb1444;bbq bb1584;
bbk bb1431;}bb1457, *bb457;bbu bb1266(bb33* *bb1215);bbb bb1285(bb33*
bbj);bb203 bb1267(bb33*bb108,bb381 bb450,bb315 bb138,bb352 bb413,
bb313 bb200);bb203 bb1246(bb33*bb108,bb381 bb450,bb315 bb138,bb352
bb413,bb313 bb200);bb203 bb1256(bb33*bb108,bb174*bb48,bb79 bb76);
bb203 bb1245(bb33*bb108,bb174*bb48,bb79 bb76);bb4 bb1243(bb33*bb108,
bb174*bb48,bbd*bb104);bb4 bb1155(bb79 bb76,bb33*bb108,bb174*bb48,
bb160*bb317,bbu bb594,bbu bb945);bba bbi bb1887{bb121 bb1908;bb121
bb1919;bb33*bb972;}bb1045, *bb1890;bbr bb1045 bb949;bbi bb983{bb121
bb1882;bbq bb1892;bbd bb1952;bb85 bb999;bb85 bb1935;bb85 bb1858;bb85
bb1891;bb85 bb1938;bb457 bb1857;bb457 bb1951;bb457 bb1903;bb97 bb1123
;bb101 bb1904;bb101 bb1945;bb101 bb1925;bb121 bb1948;bb121 bb1871;};
bbr bb79 bb1956;bbr bb97 bb1913;bbd bb1863(bbb*bb520,bbb*bb1879,bb161
*bb1135);bb161 bb1943(bb121 bb1955,bb121 bb1907,bb77 bb535,bbq bb548
,bb77 bb1116,bbq bb1106,bbq bb1124);
#ifdef UNDER_CE
#define bb591 16
#define bb1144 32
#else
#define bb591 128
#define bb1144 256
#endif
#define bb1109 bb591 *2
#define bb560 ( bb1109 * 2)
#define bb1900 bb560 * 2
#define bb1860 bb560 * 2
bbr bbq bb946;bb161 bb1782(bb60 bb980,bbb*bb40,bbq bb1091,bb122 bb1684
);bb141 bb1921(IN bb79 bb76,IN bb121 bb1915,IN bb1 bb535,IN bbq bb548
,IN bb77 bb1116,IN bbq bb1106,IN bbq bb1124);bb141 bb1905(IN bb79 bb76
);bbd bb1899(bb77 bb520,bb121 bb1917,bb77 bb1909,bbq bb1958,bb77
bb1854,bbq bb1850,bbq bb1912,bb161*bb1135);bbb bb1232(bb79 bb76,bb85*
bb543,bb85 bb48);bb85 bb1264(bb79 bb76,bb85*bb543);bbu bb1779(bb79
bb76);bbb bb1793(bb79 bb76);bb85 bb1468(bb173 bb360,bb79 bb76);bb85
bb1828(bb173 bb360,bb79 bb76);bb85 bb1772(bb173 bb360,bb79 bb76);
bb141 bb1665(bb79 bb76,bb85 bb48);bb141 bb1798(bb79 bb76,bb85 bb48);
bb141 bb1845(bb79 bb76,bb85 bb48);bbb bb1910(bb33*bbj,bbd bb294,bbh
bbf bb1185[6 ]);bbu bb1922(bb33*bbj,bbd bb294,bb409*bb428);bbb bb2036(
bb33*bbj);bbb bb2004(bb33*bbj,bbd bb294,bbh bbf bb1185[6 ],bbf bb102,
bbk bb404,bbk bb407);bbu bb2012(bb33*bbj,bbd bb294,bbf bb102,bbk bb404
,bbk bb407);bbu bb1853(bb33*bbj,bbf bb102,bbk bb404,bbk bb407);bbb
bb1987(bb33*bbj,bb427*bb538,bbq bb585);bb4 bb1777(bb33*bbj,bb486*
bb460);bbb bb2029(bb33*bbj);bbu bb1870(bb305*bbj,bbq bb512);bbb bb1848
(bb305*bbj);bbb bb1960(bb305*bbj);bbu bb1790(bb305*bbj,bb411*bb827);
bbu bb1930(bb305*bbj,bb411*bb827);bbb bb1933(bb33*bbj,bb178 bb104);
bbb bb1869(bb33*bbj,bb178 bb104);bbb bb1819(bb33*bbj,bbd bb27,bbd
bb935);bbu bb1868(bbd bb294);bb160*bb1789(bb33*bbj,bbd bb294,bbu bb594
);bb160*bb1844(bb33*bbj,bbd bb294,bbd bb104);bb160*bb1918(bb33*bbj,
bb178 bb104);bbb bb1937(bb510*bb40);bb160*bb1934(bb33*bbj,bb199*bb180
);bbb bb1881(bb33*bbj,bb178 bb104);bbb bb1859(bb33*bbj,bb178 bb104);
bbb bb1980(bb33*bbj);bbb bb1796(bb33*bbj);bbu bb1266(bb33* *bb1215){
bb33*bb108=bb137(1 ,bb12( *bb108));bbm(!bb108)bb99 bb2369;bbm(!bb1870(
&bb108->bb1047,16 ))bb99 bb2369;bb139(&bb108->bb605);{bb486 bb1886;
bb1886.bb1364=0 ;bb1886.bb1122=0 ;bb1886.bb1071=bb1222;bb1777(bb108,&
bb1886);}bb108->bb585=0 ;bb108->bb538=0 ;bb108->bb1110=bb948;bb108->
bb1171=bb952; *bb1215=bb108;bb2 1 ;bb2369:bbm(bb108)bb109(bb108); *
bb1215=bb90;bb2 0 ;}bbb bb1285(bb33*bbj){bbm(!bbj)bb2;bb1796(bbj);
bb2036(bbj);bb1987(bbj,bb90,0 );bb2029(bbj);bb142(&bbj->bb605);bb1848(
&bbj->bb1047);bb109(bbj);}
| gpl-2.0 |
XXMrHyde/android_kernel_motorola_msm8610 | drivers/net/usb/qmi_wwan.c | 343 | 23503 | /*
* Copyright (c) 2012 Bjørn Mork <bjorn@mork.no>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/ethtool.h>
#include <linux/mii.h>
#include <linux/usb.h>
#include <linux/usb/cdc.h>
#include <linux/usb/usbnet.h>
#include <linux/usb/cdc-wdm.h>
/* The name of the CDC Device Management driver */
#define DM_DRIVER "cdc_wdm"
/*
* This driver supports wwan (3G/LTE/?) devices using a vendor
* specific management protocol called Qualcomm MSM Interface (QMI) -
* in addition to the more common AT commands over serial interface
* management
*
* QMI is wrapped in CDC, using CDC encapsulated commands on the
* control ("master") interface of a two-interface CDC Union
* resembling standard CDC ECM. The devices do not use the control
* interface for any other CDC messages. Most likely because the
* management protocol is used in place of the standard CDC
* notifications NOTIFY_NETWORK_CONNECTION and NOTIFY_SPEED_CHANGE
*
* Handling a protocol like QMI is out of the scope for any driver.
* It can be exported as a character device using the cdc-wdm driver,
* which will enable userspace applications ("modem managers") to
* handle it. This may be required to use the network interface
* provided by the driver.
*
* These devices may alternatively/additionally be configured using AT
* commands on any of the serial interfaces driven by the option driver
*
* This driver binds only to the data ("slave") interface to enable
* the cdc-wdm driver to bind to the control interface. It still
* parses the CDC functional descriptors on the control interface to
* a) verify that this is indeed a handled interface (CDC Union
* header lists it as slave)
* b) get MAC address and other ethernet config from the CDC Ethernet
* header
* c) enable user bind requests against the control interface, which
* is the common way to bind to CDC Ethernet Control Model type
* interfaces
* d) provide a hint to the user about which interface is the
* corresponding management interface
*/
static int qmi_wwan_bind(struct usbnet *dev, struct usb_interface *intf)
{
int status = -1;
struct usb_interface *control = NULL;
u8 *buf = intf->cur_altsetting->extra;
int len = intf->cur_altsetting->extralen;
struct usb_interface_descriptor *desc = &intf->cur_altsetting->desc;
struct usb_cdc_union_desc *cdc_union = NULL;
struct usb_cdc_ether_desc *cdc_ether = NULL;
u32 required = 1 << USB_CDC_HEADER_TYPE | 1 << USB_CDC_UNION_TYPE;
u32 found = 0;
atomic_t *pmcount = (void *)&dev->data[1];
atomic_set(pmcount, 0);
/*
* assume a data interface has no additional descriptors and
* that the control and data interface are numbered
* consecutively - this holds for the Huawei device at least
*/
if (len == 0 && desc->bInterfaceNumber > 0) {
control = usb_ifnum_to_if(dev->udev, desc->bInterfaceNumber - 1);
if (!control)
goto err;
buf = control->cur_altsetting->extra;
len = control->cur_altsetting->extralen;
dev_dbg(&intf->dev, "guessing \"control\" => %s, \"data\" => this\n",
dev_name(&control->dev));
}
while (len > 3) {
struct usb_descriptor_header *h = (void *)buf;
/* ignore any misplaced descriptors */
if (h->bDescriptorType != USB_DT_CS_INTERFACE)
goto next_desc;
/* buf[2] is CDC descriptor subtype */
switch (buf[2]) {
case USB_CDC_HEADER_TYPE:
if (found & 1 << USB_CDC_HEADER_TYPE) {
dev_dbg(&intf->dev, "extra CDC header\n");
goto err;
}
if (h->bLength != sizeof(struct usb_cdc_header_desc)) {
dev_dbg(&intf->dev, "CDC header len %u\n", h->bLength);
goto err;
}
break;
case USB_CDC_UNION_TYPE:
if (found & 1 << USB_CDC_UNION_TYPE) {
dev_dbg(&intf->dev, "extra CDC union\n");
goto err;
}
if (h->bLength != sizeof(struct usb_cdc_union_desc)) {
dev_dbg(&intf->dev, "CDC union len %u\n", h->bLength);
goto err;
}
cdc_union = (struct usb_cdc_union_desc *)buf;
break;
case USB_CDC_ETHERNET_TYPE:
if (found & 1 << USB_CDC_ETHERNET_TYPE) {
dev_dbg(&intf->dev, "extra CDC ether\n");
goto err;
}
if (h->bLength != sizeof(struct usb_cdc_ether_desc)) {
dev_dbg(&intf->dev, "CDC ether len %u\n", h->bLength);
goto err;
}
cdc_ether = (struct usb_cdc_ether_desc *)buf;
break;
}
/*
* Remember which CDC functional descriptors we've seen. Works
* for all types we care about, of which USB_CDC_ETHERNET_TYPE
* (0x0f) is the highest numbered
*/
if (buf[2] < 32)
found |= 1 << buf[2];
next_desc:
len -= h->bLength;
buf += h->bLength;
}
/* did we find all the required ones? */
if ((found & required) != required) {
dev_err(&intf->dev, "CDC functional descriptors missing\n");
goto err;
}
/* give the user a helpful hint if trying to bind to the wrong interface */
if (cdc_union && desc->bInterfaceNumber == cdc_union->bMasterInterface0) {
dev_err(&intf->dev, "leaving \"control\" interface for " DM_DRIVER " - try binding to %s instead!\n",
dev_name(&usb_ifnum_to_if(dev->udev, cdc_union->bSlaveInterface0)->dev));
goto err;
}
/* errors aren't fatal - we can live with the dynamic address */
if (cdc_ether) {
dev->hard_mtu = le16_to_cpu(cdc_ether->wMaxSegmentSize);
usbnet_get_ethernet_addr(dev, cdc_ether->iMACAddress);
}
/* success! point the user to the management interface */
if (control)
dev_info(&intf->dev, "Use \"" DM_DRIVER "\" for QMI interface %s\n",
dev_name(&control->dev));
/* XXX: add a sysfs symlink somewhere to help management applications find it? */
/* collect bulk endpoints now that we know intf == "data" interface */
status = usbnet_get_endpoints(dev, intf);
err:
return status;
}
/* using a counter to merge subdriver requests with our own into a combined state */
static int qmi_wwan_manage_power(struct usbnet *dev, int on)
{
atomic_t *pmcount = (void *)&dev->data[1];
int rv = 0;
dev_dbg(&dev->intf->dev, "%s() pmcount=%d, on=%d\n", __func__, atomic_read(pmcount), on);
if ((on && atomic_add_return(1, pmcount) == 1) || (!on && atomic_dec_and_test(pmcount))) {
/* need autopm_get/put here to ensure the usbcore sees the new value */
rv = usb_autopm_get_interface(dev->intf);
if (rv < 0)
goto err;
dev->intf->needs_remote_wakeup = on;
usb_autopm_put_interface(dev->intf);
}
err:
return rv;
}
static int qmi_wwan_cdc_wdm_manage_power(struct usb_interface *intf, int on)
{
struct usbnet *dev = usb_get_intfdata(intf);
/* can be called while disconnecting */
if (!dev)
return 0;
return qmi_wwan_manage_power(dev, on);
}
/* Some devices combine the "control" and "data" functions into a
* single interface with all three endpoints: interrupt + bulk in and
* out
*
* Setting up cdc-wdm as a subdriver owning the interrupt endpoint
* will let it provide userspace access to the encapsulated QMI
* protocol without interfering with the usbnet operations.
*/
static int qmi_wwan_bind_shared(struct usbnet *dev, struct usb_interface *intf)
{
int rv;
struct usb_driver *subdriver = NULL;
atomic_t *pmcount = (void *)&dev->data[1];
/* ZTE makes devices where the interface descriptors and endpoint
* configurations of two or more interfaces are identical, even
* though the functions are completely different. If set, then
* driver_info->data is a bitmap of acceptable interface numbers
* allowing us to bind to one such interface without binding to
* all of them
*/
if (dev->driver_info->data &&
!test_bit(intf->cur_altsetting->desc.bInterfaceNumber, &dev->driver_info->data)) {
dev_info(&intf->dev, "not on our whitelist - ignored");
rv = -ENODEV;
goto err;
}
atomic_set(pmcount, 0);
/* collect all three endpoints */
rv = usbnet_get_endpoints(dev, intf);
if (rv < 0)
goto err;
/* require interrupt endpoint for subdriver */
if (!dev->status) {
rv = -EINVAL;
goto err;
}
subdriver = usb_cdc_wdm_register(intf, &dev->status->desc, 512, &qmi_wwan_cdc_wdm_manage_power);
if (IS_ERR(subdriver)) {
rv = PTR_ERR(subdriver);
goto err;
}
/* can't let usbnet use the interrupt endpoint */
dev->status = NULL;
/* save subdriver struct for suspend/resume wrappers */
dev->data[0] = (unsigned long)subdriver;
err:
return rv;
}
static void qmi_wwan_unbind_shared(struct usbnet *dev, struct usb_interface *intf)
{
struct usb_driver *subdriver = (void *)dev->data[0];
if (subdriver && subdriver->disconnect)
subdriver->disconnect(intf);
dev->data[0] = (unsigned long)NULL;
}
/* suspend/resume wrappers calling both usbnet and the cdc-wdm
* subdriver if present.
*
* NOTE: cdc-wdm also supports pre/post_reset, but we cannot provide
* wrappers for those without adding usbnet reset support first.
*/
static int qmi_wwan_suspend(struct usb_interface *intf, pm_message_t message)
{
struct usbnet *dev = usb_get_intfdata(intf);
struct usb_driver *subdriver = (void *)dev->data[0];
int ret;
ret = usbnet_suspend(intf, message);
if (ret < 0)
goto err;
if (subdriver && subdriver->suspend)
ret = subdriver->suspend(intf, message);
if (ret < 0)
usbnet_resume(intf);
err:
return ret;
}
static int qmi_wwan_resume(struct usb_interface *intf)
{
struct usbnet *dev = usb_get_intfdata(intf);
struct usb_driver *subdriver = (void *)dev->data[0];
int ret = 0;
if (subdriver && subdriver->resume)
ret = subdriver->resume(intf);
if (ret < 0)
goto err;
ret = usbnet_resume(intf);
if (ret < 0 && subdriver && subdriver->resume && subdriver->suspend)
subdriver->suspend(intf, PMSG_SUSPEND);
err:
return ret;
}
static const struct driver_info qmi_wwan_info = {
.description = "QMI speaking wwan device",
.flags = FLAG_WWAN,
.bind = qmi_wwan_bind,
.manage_power = qmi_wwan_manage_power,
};
static const struct driver_info qmi_wwan_shared = {
.description = "QMI speaking wwan device with combined interface",
.flags = FLAG_WWAN,
.bind = qmi_wwan_bind_shared,
.unbind = qmi_wwan_unbind_shared,
.manage_power = qmi_wwan_manage_power,
};
static const struct driver_info qmi_wwan_force_int0 = {
.description = "Qualcomm WWAN/QMI device",
.flags = FLAG_WWAN,
.bind = qmi_wwan_bind_shared,
.unbind = qmi_wwan_unbind_shared,
.manage_power = qmi_wwan_manage_power,
.data = BIT(0), /* interface whitelist bitmap */
};
static const struct driver_info qmi_wwan_force_int1 = {
.description = "Qualcomm WWAN/QMI device",
.flags = FLAG_WWAN,
.bind = qmi_wwan_bind_shared,
.unbind = qmi_wwan_unbind_shared,
.manage_power = qmi_wwan_manage_power,
.data = BIT(1), /* interface whitelist bitmap */
};
static const struct driver_info qmi_wwan_force_int2 = {
.description = "Qualcomm WWAN/QMI device",
.flags = FLAG_WWAN,
.bind = qmi_wwan_bind_shared,
.unbind = qmi_wwan_unbind_shared,
.manage_power = qmi_wwan_manage_power,
.data = BIT(2), /* interface whitelist bitmap */
};
static const struct driver_info qmi_wwan_force_int3 = {
.description = "Qualcomm WWAN/QMI device",
.flags = FLAG_WWAN,
.bind = qmi_wwan_bind_shared,
.unbind = qmi_wwan_unbind_shared,
.manage_power = qmi_wwan_manage_power,
.data = BIT(3), /* interface whitelist bitmap */
};
static const struct driver_info qmi_wwan_force_int4 = {
.description = "Qualcomm WWAN/QMI device",
.flags = FLAG_WWAN,
.bind = qmi_wwan_bind_shared,
.unbind = qmi_wwan_unbind_shared,
.manage_power = qmi_wwan_manage_power,
.data = BIT(4), /* interface whitelist bitmap */
};
/* Sierra Wireless provide equally useless interface descriptors
* Devices in QMI mode can be switched between two different
* configurations:
* a) USB interface #8 is QMI/wwan
* b) USB interfaces #8, #19 and #20 are QMI/wwan
*
* Both configurations provide a number of other interfaces (serial++),
* some of which have the same endpoint configuration as we expect, so
* a whitelist or blacklist is necessary.
*
* FIXME: The below whitelist should include BIT(20). It does not
* because I cannot get it to work...
*/
static const struct driver_info qmi_wwan_sierra = {
.description = "Sierra Wireless wwan/QMI device",
.flags = FLAG_WWAN,
.bind = qmi_wwan_bind_shared,
.unbind = qmi_wwan_unbind_shared,
.manage_power = qmi_wwan_manage_power,
.data = BIT(8) | BIT(19), /* interface whitelist bitmap */
};
#define HUAWEI_VENDOR_ID 0x12D1
/* Gobi 1000 QMI/wwan interface number is 3 according to qcserial */
#define QMI_GOBI1K_DEVICE(vend, prod) \
USB_DEVICE(vend, prod), \
.driver_info = (unsigned long)&qmi_wwan_force_int3
/* Gobi 2000 and Gobi 3000 QMI/wwan interface number is 0 according to qcserial */
#define QMI_GOBI_DEVICE(vend, prod) \
USB_DEVICE(vend, prod), \
.driver_info = (unsigned long)&qmi_wwan_force_int0
static const struct usb_device_id products[] = {
{ /* Huawei E392, E398 and possibly others sharing both device id and more... */
.match_flags = USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_INT_INFO,
.idVendor = HUAWEI_VENDOR_ID,
.bInterfaceClass = USB_CLASS_VENDOR_SPEC,
.bInterfaceSubClass = 1,
.bInterfaceProtocol = 8, /* NOTE: This is the *slave* interface of the CDC Union! */
.driver_info = (unsigned long)&qmi_wwan_info,
},
{ /* Vodafone/Huawei K5005 (12d1:14c8) and similar modems */
.match_flags = USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_INT_INFO,
.idVendor = HUAWEI_VENDOR_ID,
.bInterfaceClass = USB_CLASS_VENDOR_SPEC,
.bInterfaceSubClass = 1,
.bInterfaceProtocol = 56, /* NOTE: This is the *slave* interface of the CDC Union! */
.driver_info = (unsigned long)&qmi_wwan_info,
},
{ /* Huawei E392, E398 and possibly others in "Windows mode"
* using a combined control and data interface without any CDC
* functional descriptors
*/
.match_flags = USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_INT_INFO,
.idVendor = HUAWEI_VENDOR_ID,
.bInterfaceClass = USB_CLASS_VENDOR_SPEC,
.bInterfaceSubClass = 1,
.bInterfaceProtocol = 17,
.driver_info = (unsigned long)&qmi_wwan_shared,
},
{ /* Pantech UML290 */
.match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
.idVendor = 0x106c,
.idProduct = 0x3718,
.bInterfaceClass = 0xff,
.bInterfaceSubClass = 0xf0,
.bInterfaceProtocol = 0xff,
.driver_info = (unsigned long)&qmi_wwan_shared,
},
{ /* Pantech UML290 - newer firmware */
.match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
.idVendor = 0x106c,
.idProduct = 0x3718,
.bInterfaceClass = 0xff,
.bInterfaceSubClass = 0xf1,
.bInterfaceProtocol = 0xff,
.driver_info = (unsigned long)&qmi_wwan_shared,
},
{ /* ZTE MF820D */
.match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
.idVendor = 0x19d2,
.idProduct = 0x0167,
.bInterfaceClass = 0xff,
.bInterfaceSubClass = 0xff,
.bInterfaceProtocol = 0xff,
.driver_info = (unsigned long)&qmi_wwan_force_int4,
},
{ /* ZTE MF821D */
.match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
.idVendor = 0x19d2,
.idProduct = 0x0326,
.bInterfaceClass = 0xff,
.bInterfaceSubClass = 0xff,
.bInterfaceProtocol = 0xff,
.driver_info = (unsigned long)&qmi_wwan_force_int4,
},
{ /* ZTE (Vodafone) K3520-Z */
.match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
.idVendor = 0x19d2,
.idProduct = 0x0055,
.bInterfaceClass = 0xff,
.bInterfaceSubClass = 0xff,
.bInterfaceProtocol = 0xff,
.driver_info = (unsigned long)&qmi_wwan_force_int1,
},
{ /* ZTE (Vodafone) K3565-Z */
.match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
.idVendor = 0x19d2,
.idProduct = 0x0063,
.bInterfaceClass = 0xff,
.bInterfaceSubClass = 0xff,
.bInterfaceProtocol = 0xff,
.driver_info = (unsigned long)&qmi_wwan_force_int4,
},
{ /* ZTE (Vodafone) K3570-Z */
.match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
.idVendor = 0x19d2,
.idProduct = 0x1008,
.bInterfaceClass = 0xff,
.bInterfaceSubClass = 0xff,
.bInterfaceProtocol = 0xff,
.driver_info = (unsigned long)&qmi_wwan_force_int4,
},
{ /* ZTE (Vodafone) K3571-Z */
.match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
.idVendor = 0x19d2,
.idProduct = 0x1010,
.bInterfaceClass = 0xff,
.bInterfaceSubClass = 0xff,
.bInterfaceProtocol = 0xff,
.driver_info = (unsigned long)&qmi_wwan_force_int4,
},
{ /* ZTE (Vodafone) K3765-Z */
.match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
.idVendor = 0x19d2,
.idProduct = 0x2002,
.bInterfaceClass = 0xff,
.bInterfaceSubClass = 0xff,
.bInterfaceProtocol = 0xff,
.driver_info = (unsigned long)&qmi_wwan_force_int4,
},
{ /* ZTE (Vodafone) K4505-Z */
.match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
.idVendor = 0x19d2,
.idProduct = 0x0104,
.bInterfaceClass = 0xff,
.bInterfaceSubClass = 0xff,
.bInterfaceProtocol = 0xff,
.driver_info = (unsigned long)&qmi_wwan_force_int4,
},
{ /* ZTE (Vodafone) K5006-Z */
.match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
.idVendor = 0x19d2,
.idProduct = 0x1018,
.bInterfaceClass = 0xff,
.bInterfaceSubClass = 0xff,
.bInterfaceProtocol = 0xff,
.driver_info = (unsigned long)&qmi_wwan_force_int3,
},
{ /* ZTE MF60 */
.match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
.idVendor = 0x19d2,
.idProduct = 0x1402,
.bInterfaceClass = 0xff,
.bInterfaceSubClass = 0xff,
.bInterfaceProtocol = 0xff,
.driver_info = (unsigned long)&qmi_wwan_force_int2,
},
{ /* Sierra Wireless MC77xx in QMI mode */
.match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
.idVendor = 0x1199,
.idProduct = 0x68a2,
.bInterfaceClass = 0xff,
.bInterfaceSubClass = 0xff,
.bInterfaceProtocol = 0xff,
.driver_info = (unsigned long)&qmi_wwan_sierra,
},
{ /* Sierra Wireless MC7700 */
.match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
.idVendor = 0x0f3d,
.idProduct = 0x68a2,
.bInterfaceClass = 0xff,
.bInterfaceSubClass = 0xff,
.bInterfaceProtocol = 0xff,
.driver_info = (unsigned long)&qmi_wwan_sierra,
},
{ /* Sierra Wireless MC7750 */
.match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
.idVendor = 0x114f,
.idProduct = 0x68a2,
.bInterfaceClass = 0xff,
.bInterfaceSubClass = 0xff,
.bInterfaceProtocol = 0xff,
.driver_info = (unsigned long)&qmi_wwan_sierra,
},
{ /* Sierra Wireless EM7700 */
.match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
.idVendor = 0x1199,
.idProduct = 0x901c,
.bInterfaceClass = 0xff,
.bInterfaceSubClass = 0xff,
.bInterfaceProtocol = 0xff,
.driver_info = (unsigned long)&qmi_wwan_sierra,
},
/* Gobi 1000 devices */
{QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
{QMI_GOBI1K_DEVICE(0x03f0, 0x1f1d)}, /* HP un2400 Gobi Modem Device */
{QMI_GOBI1K_DEVICE(0x03f0, 0x371d)}, /* HP un2430 Mobile Broadband Module */
{QMI_GOBI1K_DEVICE(0x04da, 0x250d)}, /* Panasonic Gobi Modem device */
{QMI_GOBI1K_DEVICE(0x413c, 0x8172)}, /* Dell Gobi Modem device */
{QMI_GOBI1K_DEVICE(0x1410, 0xa001)}, /* Novatel Gobi Modem device */
{QMI_GOBI1K_DEVICE(0x0b05, 0x1776)}, /* Asus Gobi Modem device */
{QMI_GOBI1K_DEVICE(0x19d2, 0xfff3)}, /* ONDA Gobi Modem device */
{QMI_GOBI1K_DEVICE(0x05c6, 0x9001)}, /* Generic Gobi Modem device */
{QMI_GOBI1K_DEVICE(0x05c6, 0x9002)}, /* Generic Gobi Modem device */
{QMI_GOBI1K_DEVICE(0x05c6, 0x9202)}, /* Generic Gobi Modem device */
{QMI_GOBI1K_DEVICE(0x05c6, 0x9203)}, /* Generic Gobi Modem device */
{QMI_GOBI1K_DEVICE(0x05c6, 0x9222)}, /* Generic Gobi Modem device */
{QMI_GOBI1K_DEVICE(0x05c6, 0x9009)}, /* Generic Gobi Modem device */
/* Gobi 2000 and 3000 devices */
{QMI_GOBI_DEVICE(0x413c, 0x8186)}, /* Dell Gobi 2000 Modem device (N0218, VU936) */
{QMI_GOBI_DEVICE(0x05c6, 0x920b)}, /* Generic Gobi 2000 Modem device */
{QMI_GOBI_DEVICE(0x05c6, 0x9225)}, /* Sony Gobi 2000 Modem device (N0279, VU730) */
{QMI_GOBI_DEVICE(0x05c6, 0x9245)}, /* Samsung Gobi 2000 Modem device (VL176) */
{QMI_GOBI_DEVICE(0x03f0, 0x251d)}, /* HP Gobi 2000 Modem device (VP412) */
{QMI_GOBI_DEVICE(0x05c6, 0x9215)}, /* Acer Gobi 2000 Modem device (VP413) */
{QMI_GOBI_DEVICE(0x05c6, 0x9265)}, /* Asus Gobi 2000 Modem device (VR305) */
{QMI_GOBI_DEVICE(0x05c6, 0x9235)}, /* Top Global Gobi 2000 Modem device (VR306) */
{QMI_GOBI_DEVICE(0x05c6, 0x9275)}, /* iRex Technologies Gobi 2000 Modem device (VR307) */
{QMI_GOBI_DEVICE(0x1199, 0x68a5)}, /* Sierra Wireless Modem */
{QMI_GOBI_DEVICE(0x1199, 0x68a9)}, /* Sierra Wireless Modem */
{QMI_GOBI_DEVICE(0x1199, 0x9001)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
{QMI_GOBI_DEVICE(0x1199, 0x9002)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
{QMI_GOBI_DEVICE(0x1199, 0x9003)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
{QMI_GOBI_DEVICE(0x1199, 0x9004)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
{QMI_GOBI_DEVICE(0x1199, 0x9005)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
{QMI_GOBI_DEVICE(0x1199, 0x9006)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
{QMI_GOBI_DEVICE(0x1199, 0x9007)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
{QMI_GOBI_DEVICE(0x1199, 0x9008)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
{QMI_GOBI_DEVICE(0x1199, 0x9009)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
{QMI_GOBI_DEVICE(0x1199, 0x900a)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
{QMI_GOBI_DEVICE(0x1199, 0x9011)}, /* Sierra Wireless Gobi 2000 Modem device (MC8305) */
{QMI_GOBI_DEVICE(0x16d8, 0x8002)}, /* CMDTech Gobi 2000 Modem device (VU922) */
{QMI_GOBI_DEVICE(0x05c6, 0x9205)}, /* Gobi 2000 Modem device */
{QMI_GOBI_DEVICE(0x1199, 0x9013)}, /* Sierra Wireless Gobi 3000 Modem device (MC8355) */
{QMI_GOBI_DEVICE(0x1199, 0x9015)}, /* Sierra Wireless Gobi 3000 Modem device */
{QMI_GOBI_DEVICE(0x1199, 0x9019)}, /* Sierra Wireless Gobi 3000 Modem device */
{QMI_GOBI_DEVICE(0x1199, 0x901b)}, /* Sierra Wireless MC7770 */
{ } /* END */
};
MODULE_DEVICE_TABLE(usb, products);
static struct usb_driver qmi_wwan_driver = {
.name = "qmi_wwan",
.id_table = products,
.probe = usbnet_probe,
.disconnect = usbnet_disconnect,
.suspend = qmi_wwan_suspend,
.resume = qmi_wwan_resume,
.reset_resume = qmi_wwan_resume,
.supports_autosuspend = 1,
};
static int __init qmi_wwan_init(void)
{
return usb_register(&qmi_wwan_driver);
}
module_init(qmi_wwan_init);
static void __exit qmi_wwan_exit(void)
{
usb_deregister(&qmi_wwan_driver);
}
module_exit(qmi_wwan_exit);
MODULE_AUTHOR("Bjørn Mork <bjorn@mork.no>");
MODULE_DESCRIPTION("Qualcomm MSM Interface (QMI) WWAN driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
ali-filth/android_kernel_samsung_msm8226 | arch/arm/mach-msm/qdsp5v2/audio_evrc.c | 343 | 43107 | /*
* Copyright (c) 2008-2012, The Linux Foundation. All rights reserved.
*
* This code also borrows from audio_aac.c, which is
* Copyright (C) 2008 Google, Inc.
* Copyright (C) 2008 HTC Corporation
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
* See the GNU General Public License for more details.
* You should have received a copy of the GNU General Public License
* along with this program; if not, you can find it at http://www.fsf.org.
*/
#include <asm/atomic.h>
#include <asm/ioctls.h>
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/miscdevice.h>
#include <linux/uaccess.h>
#include <linux/kthread.h>
#include <linux/wait.h>
#include <linux/dma-mapping.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/list.h>
#include <linux/earlysuspend.h>
#include <linux/memory_alloc.h>
#include <linux/msm_audio.h>
#include <linux/slab.h>
#include <mach/msm_adsp.h>
#include <mach/iommu.h>
#include <mach/iommu_domains.h>
#include <mach/qdsp5v2/audio_dev_ctl.h>
#include <mach/qdsp5v2/qdsp5audppmsg.h>
#include <mach/qdsp5v2/qdsp5audplaycmdi.h>
#include <mach/qdsp5v2/qdsp5audplaymsg.h>
#include <mach/qdsp5v2/audio_dev_ctl.h>
#include <mach/qdsp5v2/audpp.h>
#include <mach/debug_mm.h>
#include <mach/msm_memtypes.h>
/* Hold 30 packets of 24 bytes each and 14 bytes of meta in */
#define BUFSZ 734
#define DMASZ (BUFSZ * 2)
#define AUDDEC_DEC_EVRC 12
#define PCM_BUFSZ_MIN 1624 /* 100ms worth of data and
and 24 bytes of meta out */
#define PCM_BUF_MAX_COUNT 5
/* DSP only accepts 5 buffers at most
* but support 2 buffers currently
*/
#define EVRC_DECODED_FRSZ 320 /* EVRC 20ms 8KHz mono PCM size */
#define ROUTING_MODE_FTRT 1
#define ROUTING_MODE_RT 2
/* Decoder status received from AUDPPTASK */
#define AUDPP_DEC_STATUS_SLEEP 0
#define AUDPP_DEC_STATUS_INIT 1
#define AUDPP_DEC_STATUS_CFG 2
#define AUDPP_DEC_STATUS_PLAY 3
#define AUDEVRC_METAFIELD_MASK 0xFFFF0000
#define AUDEVRC_EOS_FLG_OFFSET 0x0A /* Offset from beginning of buffer */
#define AUDEVRC_EOS_FLG_MASK 0x01
#define AUDEVRC_EOS_NONE 0x0 /* No EOS detected */
#define AUDEVRC_EOS_SET 0x1 /* EOS set in meta field */
#define AUDEVRC_EVENT_NUM 10 /* Default number of pre-allocated event packets */
struct buffer {
void *data;
unsigned size;
unsigned used; /* Input usage actual DSP produced PCM size */
unsigned addr;
unsigned short mfield_sz; /*only useful for data has meta field */
};
#ifdef CONFIG_HAS_EARLYSUSPEND
struct audevrc_suspend_ctl {
struct early_suspend node;
struct audio *audio;
};
#endif
struct audevrc_event{
struct list_head list;
int event_type;
union msm_audio_event_payload payload;
};
struct audio {
struct buffer out[2];
spinlock_t dsp_lock;
uint8_t out_head;
uint8_t out_tail;
uint8_t out_needed; /* number of buffers the dsp is waiting for */
atomic_t out_bytes;
struct mutex lock;
struct mutex write_lock;
wait_queue_head_t write_wait;
/* Host PCM section */
struct buffer in[PCM_BUF_MAX_COUNT];
struct mutex read_lock;
wait_queue_head_t read_wait; /* Wait queue for read */
char *read_data; /* pointer to reader buffer */
int32_t read_phys; /* physical address of reader buffer */
uint8_t read_next; /* index to input buffers to be read next */
uint8_t fill_next; /* index to buffer that DSP should be filling */
uint8_t pcm_buf_count; /* number of pcm buffer allocated */
/* ---- End of Host PCM section */
struct msm_adsp_module *audplay;
/* data allocated for various buffers */
char *data;
int32_t phys; /* physical address of write buffer */
void *map_v_read;
void *map_v_write;
int mfield; /* meta field embedded in data */
int rflush; /* Read flush */
int wflush; /* Write flush */
uint8_t opened:1;
uint8_t enabled:1;
uint8_t running:1;
uint8_t stopped:1; /* set when stopped, cleared on flush */
uint8_t pcm_feedback:1;
uint8_t buf_refresh:1;
int teos; /* valid only if tunnel mode & no data left for decoder */
enum msm_aud_decoder_state dec_state; /* Represents decoder state */
const char *module_name;
unsigned queue_id;
uint16_t dec_id;
uint32_t read_ptr_offset;
int16_t source;
#ifdef CONFIG_HAS_EARLYSUSPEND
struct audevrc_suspend_ctl suspend_ctl;
#endif
#ifdef CONFIG_DEBUG_FS
struct dentry *dentry;
#endif
wait_queue_head_t wait;
struct list_head free_event_queue;
struct list_head event_queue;
wait_queue_head_t event_wait;
spinlock_t event_queue_lock;
struct mutex get_event_lock;
int event_abort;
/* AV sync Info */
int avsync_flag; /* Flag to indicate feedback from DSP */
wait_queue_head_t avsync_wait;/* Wait queue for AV Sync Message */
/* flags, 48 bits sample/bytes counter per channel */
uint16_t avsync[AUDPP_AVSYNC_CH_COUNT * AUDPP_AVSYNC_NUM_WORDS + 1];
uint32_t device_events;
int eq_enable;
int eq_needs_commit;
struct audpp_cmd_cfg_object_params_eqalizer eq;
struct audpp_cmd_cfg_object_params_volume vol_pan;
};
static int auddec_dsp_config(struct audio *audio, int enable);
static void audpp_cmd_cfg_adec_params(struct audio *audio);
static void audpp_cmd_cfg_routing_mode(struct audio *audio);
static void audevrc_send_data(struct audio *audio, unsigned needed);
static void audevrc_dsp_event(void *private, unsigned id, uint16_t *msg);
static void audevrc_config_hostpcm(struct audio *audio);
static void audevrc_buffer_refresh(struct audio *audio);
#ifdef CONFIG_HAS_EARLYSUSPEND
static void audevrc_post_event(struct audio *audio, int type,
union msm_audio_event_payload payload);
#endif
/* must be called with audio->lock held */
static int audevrc_enable(struct audio *audio)
{
if (audio->enabled)
return 0;
audio->dec_state = MSM_AUD_DECODER_STATE_NONE;
audio->out_tail = 0;
audio->out_needed = 0;
if (msm_adsp_enable(audio->audplay)) {
MM_ERR("msm_adsp_enable(audplay) failed\n");
return -ENODEV;
}
if (audpp_enable(audio->dec_id, audevrc_dsp_event, audio)) {
MM_ERR("audpp_enable() failed\n");
msm_adsp_disable(audio->audplay);
return -ENODEV;
}
audio->enabled = 1;
return 0;
}
static void evrc_listner(u32 evt_id, union auddev_evt_data *evt_payload,
void *private_data)
{
struct audio *audio = (struct audio *) private_data;
switch (evt_id) {
case AUDDEV_EVT_DEV_RDY:
MM_DBG(":AUDDEV_EVT_DEV_RDY\n");
audio->source |= (0x1 << evt_payload->routing_id);
if (audio->running == 1 && audio->enabled == 1)
audpp_route_stream(audio->dec_id, audio->source);
break;
case AUDDEV_EVT_DEV_RLS:
MM_DBG(":AUDDEV_EVT_DEV_RLS\n");
audio->source &= ~(0x1 << evt_payload->routing_id);
if (audio->running == 1 && audio->enabled == 1)
audpp_route_stream(audio->dec_id, audio->source);
break;
case AUDDEV_EVT_STREAM_VOL_CHG:
audio->vol_pan.volume = evt_payload->session_vol;
MM_DBG(":AUDDEV_EVT_STREAM_VOL_CHG, stream vol %d\n",
audio->vol_pan.volume);
if (audio->running)
audpp_dsp_set_vol_pan(audio->dec_id, &audio->vol_pan,
POPP);
break;
default:
MM_ERR(":ERROR:wrong event\n");
break;
}
}
/* must be called with audio->lock held */
static int audevrc_disable(struct audio *audio)
{
int rc = 0;
if (audio->enabled) {
audio->enabled = 0;
audio->dec_state = MSM_AUD_DECODER_STATE_NONE;
auddec_dsp_config(audio, 0);
rc = wait_event_interruptible_timeout(audio->wait,
audio->dec_state != MSM_AUD_DECODER_STATE_NONE,
msecs_to_jiffies(MSM_AUD_DECODER_WAIT_MS));
if (rc == 0)
rc = -ETIMEDOUT;
else if (audio->dec_state != MSM_AUD_DECODER_STATE_CLOSE)
rc = -EFAULT;
else
rc = 0;
wake_up(&audio->write_wait);
wake_up(&audio->read_wait);
msm_adsp_disable(audio->audplay);
audpp_disable(audio->dec_id, audio);
audio->out_needed = 0;
}
return rc;
}
/* ------------------- dsp --------------------- */
static void audevrc_update_pcm_buf_entry(struct audio *audio,
uint32_t *payload)
{
uint8_t index;
unsigned long flags;
if (audio->rflush)
return;
spin_lock_irqsave(&audio->dsp_lock, flags);
for (index = 0; index < payload[1]; index++) {
if (audio->in[audio->fill_next].addr
== payload[2 + index * 2]) {
MM_DBG("in[%d] ready\n", audio->fill_next);
audio->in[audio->fill_next].used =
payload[3 + index * 2];
if ((++audio->fill_next) == audio->pcm_buf_count)
audio->fill_next = 0;
} else {
MM_ERR("expected=%x ret=%x\n",
audio->in[audio->fill_next].addr,
payload[1 + index * 2]);
break;
}
}
if (audio->in[audio->fill_next].used == 0) {
audevrc_buffer_refresh(audio);
} else {
MM_DBG("read cannot keep up\n");
audio->buf_refresh = 1;
}
wake_up(&audio->read_wait);
spin_unlock_irqrestore(&audio->dsp_lock, flags);
}
static void audplay_dsp_event(void *data, unsigned id, size_t len,
void (*getevent) (void *ptr, size_t len))
{
struct audio *audio = data;
uint32_t msg[28];
getevent(msg, sizeof(msg));
MM_DBG("msg_id=%x\n", id);
switch (id) {
case AUDPLAY_MSG_DEC_NEEDS_DATA:
audevrc_send_data(audio, 1);
break;
case AUDPLAY_MSG_BUFFER_UPDATE:
MM_DBG("\n"); /* Macro prints the file name and function */
audevrc_update_pcm_buf_entry(audio, msg);
break;
case ADSP_MESSAGE_ID:
MM_DBG("Received ADSP event: module enable(audplaytask)\n");
break;
default:
MM_ERR("unexpected message from decoder \n");
}
}
static void audevrc_dsp_event(void *private, unsigned id, uint16_t *msg)
{
struct audio *audio = private;
switch (id) {
case AUDPP_MSG_STATUS_MSG:{
unsigned status = msg[1];
switch (status) {
case AUDPP_DEC_STATUS_SLEEP: {
uint16_t reason = msg[2];
MM_DBG("decoder status:sleep reason = \
0x%04x\n", reason);
if ((reason == AUDPP_MSG_REASON_MEM)
|| (reason ==
AUDPP_MSG_REASON_NODECODER)) {
audio->dec_state =
MSM_AUD_DECODER_STATE_FAILURE;
wake_up(&audio->wait);
} else if (reason == AUDPP_MSG_REASON_NONE) {
/* decoder is in disable state */
audio->dec_state =
MSM_AUD_DECODER_STATE_CLOSE;
wake_up(&audio->wait);
}
break;
}
case AUDPP_DEC_STATUS_INIT:
MM_DBG("decoder status: init \n");
if (audio->pcm_feedback)
audpp_cmd_cfg_routing_mode(audio);
else
audpp_cmd_cfg_adec_params(audio);
break;
case AUDPP_DEC_STATUS_CFG:
MM_DBG("decoder status: cfg \n");
break;
case AUDPP_DEC_STATUS_PLAY:
MM_DBG("decoder status: play \n");
audpp_route_stream(audio->dec_id,
audio->source);
if (audio->pcm_feedback) {
audevrc_config_hostpcm(audio);
audevrc_buffer_refresh(audio);
}
audio->dec_state =
MSM_AUD_DECODER_STATE_SUCCESS;
wake_up(&audio->wait);
break;
default:
MM_ERR("unknown decoder status \n");
}
break;
}
case AUDPP_MSG_CFG_MSG:
if (msg[0] == AUDPP_MSG_ENA_ENA) {
MM_DBG("CFG_MSG ENABLE\n");
auddec_dsp_config(audio, 1);
audio->out_needed = 0;
audio->running = 1;
audpp_dsp_set_vol_pan(audio->dec_id, &audio->vol_pan,
POPP);
audpp_dsp_set_eq(audio->dec_id, audio->eq_enable,
&audio->eq, POPP);
} else if (msg[0] == AUDPP_MSG_ENA_DIS) {
MM_DBG("CFG_MSG DISABLE\n");
audio->running = 0;
} else {
MM_DBG("CFG_MSG %d?\n", msg[0]);
}
break;
case AUDPP_MSG_ROUTING_ACK:
MM_DBG("ROUTING_ACK\n");
audpp_cmd_cfg_adec_params(audio);
break;
case AUDPP_MSG_FLUSH_ACK:
MM_DBG("FLUSH_ACK\n");
audio->wflush = 0;
audio->rflush = 0;
wake_up(&audio->write_wait);
if (audio->pcm_feedback)
audevrc_buffer_refresh(audio);
break;
case AUDPP_MSG_PCMDMAMISSED:
MM_DBG("PCMDMAMISSED\n");
audio->teos = 1;
wake_up(&audio->write_wait);
break;
case AUDPP_MSG_AVSYNC_MSG:
MM_DBG("AUDPP_MSG_AVSYNC_MSG\n");
memcpy(&audio->avsync[0], msg, sizeof(audio->avsync));
audio->avsync_flag = 1;
wake_up(&audio->avsync_wait);
break;
default:
MM_ERR("UNKNOWN (%d)\n", id);
}
}
struct msm_adsp_ops audplay_adsp_ops_evrc = {
.event = audplay_dsp_event,
};
#define audplay_send_queue0(audio, cmd, len) \
msm_adsp_write(audio->audplay, audio->queue_id, \
cmd, len)
static int auddec_dsp_config(struct audio *audio, int enable)
{
struct audpp_cmd_cfg_dec_type cfg_dec_cmd;
memset(&cfg_dec_cmd, 0, sizeof(cfg_dec_cmd));
cfg_dec_cmd.cmd_id = AUDPP_CMD_CFG_DEC_TYPE;
if (enable)
cfg_dec_cmd.dec_cfg = AUDPP_CMD_UPDATDE_CFG_DEC |
AUDPP_CMD_ENA_DEC_V | AUDDEC_DEC_EVRC;
else
cfg_dec_cmd.dec_cfg = AUDPP_CMD_UPDATDE_CFG_DEC |
AUDPP_CMD_DIS_DEC_V;
cfg_dec_cmd.dm_mode = 0x0;
cfg_dec_cmd.stream_id = audio->dec_id;
return audpp_send_queue1(&cfg_dec_cmd, sizeof(cfg_dec_cmd));
}
static void audpp_cmd_cfg_adec_params(struct audio *audio)
{
struct audpp_cmd_cfg_adec_params_evrc cmd;
memset(&cmd, 0, sizeof(cmd));
cmd.common.cmd_id = AUDPP_CMD_CFG_ADEC_PARAMS;
cmd.common.length = sizeof(cmd);
cmd.common.dec_id = audio->dec_id;
cmd.common.input_sampling_frequency = 8000;
cmd.stereo_cfg = AUDPP_CMD_PCM_INTF_MONO_V;
audpp_send_queue2(&cmd, sizeof(cmd));
}
static void audpp_cmd_cfg_routing_mode(struct audio *audio)
{
struct audpp_cmd_routing_mode cmd;
MM_DBG("\n"); /* Macro prints the file name and function */
memset(&cmd, 0, sizeof(cmd));
cmd.cmd_id = AUDPP_CMD_ROUTING_MODE;
cmd.object_number = audio->dec_id;
if (audio->pcm_feedback)
cmd.routing_mode = ROUTING_MODE_FTRT;
else
cmd.routing_mode = ROUTING_MODE_RT;
audpp_send_queue1(&cmd, sizeof(cmd));
}
static int audplay_dsp_send_data_avail(struct audio *audio,
unsigned idx, unsigned len)
{
struct audplay_cmd_bitstream_data_avail_nt2 cmd;
cmd.cmd_id = AUDPLAY_CMD_BITSTREAM_DATA_AVAIL_NT2;
if (audio->mfield)
cmd.decoder_id = AUDEVRC_METAFIELD_MASK |
(audio->out[idx].mfield_sz >> 1);
else
cmd.decoder_id = audio->dec_id;
cmd.buf_ptr = audio->out[idx].addr;
cmd.buf_size = len / 2;
cmd.partition_number = 0;
return audplay_send_queue0(audio, &cmd, sizeof(cmd));
}
static void audevrc_buffer_refresh(struct audio *audio)
{
struct audplay_cmd_buffer_refresh refresh_cmd;
refresh_cmd.cmd_id = AUDPLAY_CMD_BUFFER_REFRESH;
refresh_cmd.num_buffers = 1;
refresh_cmd.buf0_address = audio->in[audio->fill_next].addr;
refresh_cmd.buf0_length = audio->in[audio->fill_next].size;
refresh_cmd.buf_read_count = 0;
MM_DBG("buf0_addr=%x buf0_len=%d\n", refresh_cmd.buf0_address,
refresh_cmd.buf0_length);
audplay_send_queue0(audio, &refresh_cmd, sizeof(refresh_cmd));
}
static void audevrc_config_hostpcm(struct audio *audio)
{
struct audplay_cmd_hpcm_buf_cfg cfg_cmd;
MM_DBG("\n"); /* Macro prints the file name and function */
cfg_cmd.cmd_id = AUDPLAY_CMD_HPCM_BUF_CFG;
cfg_cmd.max_buffers = 1;
cfg_cmd.byte_swap = 0;
cfg_cmd.hostpcm_config = (0x8000) | (0x4000);
cfg_cmd.feedback_frequency = 1;
cfg_cmd.partition_number = 0;
audplay_send_queue0(audio, &cfg_cmd, sizeof(cfg_cmd));
}
static void audevrc_send_data(struct audio *audio, unsigned needed)
{
struct buffer *frame;
unsigned long flags;
spin_lock_irqsave(&audio->dsp_lock, flags);
if (!audio->running)
goto done;
if (needed && !audio->wflush) {
/* We were called from the callback because the DSP
* requested more data. Note that the DSP does want
* more data, and if a buffer was in-flight, mark it
* as available (since the DSP must now be done with
* it).
*/
audio->out_needed = 1;
frame = audio->out + audio->out_tail;
if (frame->used == 0xffffffff) {
MM_DBG("frame %d free\n", audio->out_tail);
frame->used = 0;
audio->out_tail ^= 1;
wake_up(&audio->write_wait);
}
}
if (audio->out_needed) {
/* If the DSP currently wants data and we have a
* buffer available, we will send it and reset
* the needed flag. We'll mark the buffer as in-flight
* so that it won't be recycled until the next buffer
* is requested
*/
frame = audio->out + audio->out_tail;
if (frame->used) {
BUG_ON(frame->used == 0xffffffff);
MM_DBG("frame %d busy\n", audio->out_tail);
audplay_dsp_send_data_avail(audio, audio->out_tail,
frame->used);
frame->used = 0xffffffff;
audio->out_needed = 0;
}
}
done:
spin_unlock_irqrestore(&audio->dsp_lock, flags);
}
/* ------------------- device --------------------- */
static void audevrc_flush(struct audio *audio)
{
audio->out[0].used = 0;
audio->out[1].used = 0;
audio->out_head = 0;
audio->out_tail = 0;
audio->out_needed = 0;
atomic_set(&audio->out_bytes, 0);
}
static void audevrc_flush_pcm_buf(struct audio *audio)
{
uint8_t index;
for (index = 0; index < PCM_BUF_MAX_COUNT; index++)
audio->in[index].used = 0;
audio->buf_refresh = 0;
audio->read_next = 0;
audio->fill_next = 0;
}
static void audevrc_ioport_reset(struct audio *audio)
{
/* Make sure read/write thread are free from
* sleep and knowing that system is not able
* to process io request at the moment
*/
wake_up(&audio->write_wait);
mutex_lock(&audio->write_lock);
audevrc_flush(audio);
mutex_unlock(&audio->write_lock);
wake_up(&audio->read_wait);
mutex_lock(&audio->read_lock);
audevrc_flush_pcm_buf(audio);
mutex_unlock(&audio->read_lock);
audio->avsync_flag = 1;
wake_up(&audio->avsync_wait);
}
static int audevrc_events_pending(struct audio *audio)
{
unsigned long flags;
int empty;
spin_lock_irqsave(&audio->event_queue_lock, flags);
empty = !list_empty(&audio->event_queue);
spin_unlock_irqrestore(&audio->event_queue_lock, flags);
return empty || audio->event_abort;
}
static void audevrc_reset_event_queue(struct audio *audio)
{
unsigned long flags;
struct audevrc_event *drv_evt;
struct list_head *ptr, *next;
spin_lock_irqsave(&audio->event_queue_lock, flags);
list_for_each_safe(ptr, next, &audio->event_queue) {
drv_evt = list_first_entry(&audio->event_queue,
struct audevrc_event, list);
list_del(&drv_evt->list);
kfree(drv_evt);
}
list_for_each_safe(ptr, next, &audio->free_event_queue) {
drv_evt = list_first_entry(&audio->free_event_queue,
struct audevrc_event, list);
list_del(&drv_evt->list);
kfree(drv_evt);
}
spin_unlock_irqrestore(&audio->event_queue_lock, flags);
return;
}
static long audevrc_process_event_req(struct audio *audio, void __user *arg)
{
long rc;
struct msm_audio_event usr_evt;
struct audevrc_event *drv_evt = NULL;
int timeout;
unsigned long flags;
if (copy_from_user(&usr_evt, arg, sizeof(struct msm_audio_event)))
return -EFAULT;
timeout = (int) usr_evt.timeout_ms;
if (timeout > 0) {
rc = wait_event_interruptible_timeout(
audio->event_wait, audevrc_events_pending(audio),
msecs_to_jiffies(timeout));
if (rc == 0)
return -ETIMEDOUT;
} else {
rc = wait_event_interruptible(
audio->event_wait, audevrc_events_pending(audio));
}
if (rc < 0)
return rc;
if (audio->event_abort) {
audio->event_abort = 0;
return -ENODEV;
}
rc = 0;
spin_lock_irqsave(&audio->event_queue_lock, flags);
if (!list_empty(&audio->event_queue)) {
drv_evt = list_first_entry(&audio->event_queue,
struct audevrc_event, list);
list_del(&drv_evt->list);
}
if (drv_evt) {
usr_evt.event_type = drv_evt->event_type;
usr_evt.event_payload = drv_evt->payload;
list_add_tail(&drv_evt->list, &audio->free_event_queue);
} else
rc = -1;
spin_unlock_irqrestore(&audio->event_queue_lock, flags);
if (!rc && copy_to_user(arg, &usr_evt, sizeof(usr_evt)))
rc = -EFAULT;
return rc;
}
static int audio_enable_eq(struct audio *audio, int enable)
{
if (audio->eq_enable == enable && !audio->eq_needs_commit)
return 0;
audio->eq_enable = enable;
if (audio->running) {
audpp_dsp_set_eq(audio->dec_id, enable, &audio->eq, POPP);
audio->eq_needs_commit = 0;
}
return 0;
}
static int audio_get_avsync_data(struct audio *audio,
struct msm_audio_stats *stats)
{
int rc = -EINVAL;
unsigned long flags;
local_irq_save(flags);
if (audio->dec_id == audio->avsync[0] && audio->avsync_flag) {
/* av_sync sample count */
stats->sample_count = (audio->avsync[2] << 16) |
(audio->avsync[3]);
/* av_sync byte_count */
stats->byte_count = (audio->avsync[5] << 16) |
(audio->avsync[6]);
audio->avsync_flag = 0;
rc = 0;
}
local_irq_restore(flags);
return rc;
}
static long audevrc_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct audio *audio = file->private_data;
int rc = -EINVAL;
unsigned long flags = 0;
uint16_t enable_mask;
int enable;
int prev_state;
MM_DBG("cmd = %d\n", cmd);
if (cmd == AUDIO_GET_STATS) {
struct msm_audio_stats stats;
audio->avsync_flag = 0;
memset(&stats, 0, sizeof(stats));
if (audpp_query_avsync(audio->dec_id) < 0)
return rc;
rc = wait_event_interruptible_timeout(audio->avsync_wait,
(audio->avsync_flag == 1),
msecs_to_jiffies(AUDPP_AVSYNC_EVENT_TIMEOUT));
if (rc < 0)
return rc;
else if ((rc > 0) || ((rc == 0) && (audio->avsync_flag == 1))) {
if (audio_get_avsync_data(audio, &stats) < 0)
return rc;
if (copy_to_user((void *)arg, &stats, sizeof(stats)))
return -EFAULT;
return 0;
} else
return -EAGAIN;
}
switch (cmd) {
case AUDIO_ENABLE_AUDPP:
if (copy_from_user(&enable_mask, (void *) arg,
sizeof(enable_mask))) {
rc = -EFAULT;
break;
}
spin_lock_irqsave(&audio->dsp_lock, flags);
enable = (enable_mask & EQ_ENABLE) ? 1 : 0;
audio_enable_eq(audio, enable);
spin_unlock_irqrestore(&audio->dsp_lock, flags);
rc = 0;
break;
case AUDIO_SET_VOLUME:
spin_lock_irqsave(&audio->dsp_lock, flags);
audio->vol_pan.volume = arg;
if (audio->running)
audpp_dsp_set_vol_pan(audio->dec_id, &audio->vol_pan,
POPP);
spin_unlock_irqrestore(&audio->dsp_lock, flags);
rc = 0;
break;
case AUDIO_SET_PAN:
spin_lock_irqsave(&audio->dsp_lock, flags);
audio->vol_pan.pan = arg;
if (audio->running)
audpp_dsp_set_vol_pan(audio->dec_id, &audio->vol_pan,
POPP);
spin_unlock_irqrestore(&audio->dsp_lock, flags);
rc = 0;
break;
case AUDIO_SET_EQ:
prev_state = audio->eq_enable;
audio->eq_enable = 0;
if (copy_from_user(&audio->eq.num_bands, (void *) arg,
sizeof(audio->eq) -
(AUDPP_CMD_CFG_OBJECT_PARAMS_COMMON_LEN + 2))) {
rc = -EFAULT;
break;
}
audio->eq_enable = prev_state;
audio->eq_needs_commit = 1;
rc = 0;
break;
}
if (-EINVAL != rc)
return rc;
if (cmd == AUDIO_GET_EVENT) {
MM_DBG("AUDIO_GET_EVENT\n");
if (mutex_trylock(&audio->get_event_lock)) {
rc = audevrc_process_event_req(audio,
(void __user *) arg);
mutex_unlock(&audio->get_event_lock);
} else
rc = -EBUSY;
return rc;
}
if (cmd == AUDIO_ABORT_GET_EVENT) {
audio->event_abort = 1;
wake_up(&audio->event_wait);
return 0;
}
mutex_lock(&audio->lock);
switch (cmd) {
case AUDIO_START:
MM_DBG("AUDIO_START\n");
rc = audevrc_enable(audio);
if (!rc) {
rc = wait_event_interruptible_timeout(audio->wait,
audio->dec_state != MSM_AUD_DECODER_STATE_NONE,
msecs_to_jiffies(MSM_AUD_DECODER_WAIT_MS));
MM_INFO("dec_state %d rc = %d\n", audio->dec_state, rc);
if (audio->dec_state != MSM_AUD_DECODER_STATE_SUCCESS)
rc = -ENODEV;
else
rc = 0;
}
break;
case AUDIO_STOP:
MM_DBG("AUDIO_STOP\n");
rc = audevrc_disable(audio);
audio->stopped = 1;
audevrc_ioport_reset(audio);
audio->stopped = 0;
break;
case AUDIO_FLUSH:
MM_DBG("AUDIO_FLUSH\n");
audio->rflush = 1;
audio->wflush = 1;
audevrc_ioport_reset(audio);
if (audio->running) {
audpp_flush(audio->dec_id);
rc = wait_event_interruptible(audio->write_wait,
!audio->wflush);
if (rc < 0) {
MM_ERR("AUDIO_FLUSH interrupted\n");
rc = -EINTR;
}
} else {
audio->rflush = 0;
audio->wflush = 0;
}
break;
case AUDIO_SET_CONFIG:{
struct msm_audio_config config;
if (copy_from_user
(&config, (void *)arg, sizeof(config))) {
rc = -EFAULT;
break;
}
audio->mfield = config.meta_field;
rc = 0;
MM_DBG("AUDIO_SET_CONFIG applicable only \
for meta field configuration\n");
break;
}
case AUDIO_GET_CONFIG:{
struct msm_audio_config config;
config.buffer_size = BUFSZ;
config.buffer_count = 2;
config.sample_rate = 8000;
config.channel_count = 1;
config.meta_field = 0;
config.unused[0] = 0;
config.unused[1] = 0;
config.unused[2] = 0;
if (copy_to_user((void *)arg, &config, sizeof(config)))
rc = -EFAULT;
else
rc = 0;
break;
}
case AUDIO_GET_PCM_CONFIG:{
struct msm_audio_pcm_config config;
config.pcm_feedback = audio->pcm_feedback;
config.buffer_count = PCM_BUF_MAX_COUNT;
config.buffer_size = PCM_BUFSZ_MIN;
if (copy_to_user((void *)arg, &config, sizeof(config)))
rc = -EFAULT;
else
rc = 0;
break;
}
case AUDIO_SET_PCM_CONFIG:{
struct msm_audio_pcm_config config;
if (copy_from_user
(&config, (void *)arg, sizeof(config))) {
rc = -EFAULT;
break;
}
if (config.pcm_feedback != audio->pcm_feedback) {
MM_ERR("Not sufficient permission to"
"change the playback mode\n");
rc = -EACCES;
break;
}
if ((config.buffer_count > PCM_BUF_MAX_COUNT) ||
(config.buffer_count == 1))
config.buffer_count = PCM_BUF_MAX_COUNT;
if (config.buffer_size < PCM_BUFSZ_MIN)
config.buffer_size = PCM_BUFSZ_MIN;
/* Check if pcm feedback is required */
if ((config.pcm_feedback) && (!audio->read_data)) {
MM_DBG("allocate PCM buf %d\n",
config.buffer_count *
config.buffer_size);
audio->read_phys =
allocate_contiguous_ebi_nomap(
config.buffer_size *
config.buffer_count,
SZ_4K);
if (!audio->read_phys) {
rc = -ENOMEM;
break;
}
audio->map_v_read = ioremap(
audio->read_phys,
config.buffer_size *
config.buffer_count);
if (IS_ERR(audio->map_v_read)) {
MM_ERR("failed to map read"
" phy address\n");
rc = -ENOMEM;
free_contiguous_memory_by_paddr(
audio->read_phys);
} else {
uint8_t index;
uint32_t offset = 0;
audio->read_data =
audio->map_v_read;
audio->buf_refresh = 0;
audio->pcm_buf_count =
config.buffer_count;
audio->read_next = 0;
audio->fill_next = 0;
for (index = 0;
index < config.buffer_count;
index++) {
audio->in[index].data =
audio->read_data + offset;
audio->in[index].addr =
audio->read_phys + offset;
audio->in[index].size =
config.buffer_size;
audio->in[index].used = 0;
offset += config.buffer_size;
}
MM_DBG("read buf: phy addr \
0x%08x kernel addr 0x%08x\n",
audio->read_phys,
(int)audio->read_data);
rc = 0;
}
} else {
rc = 0;
}
break;
}
case AUDIO_PAUSE:
MM_DBG("AUDIO_PAUSE %ld\n", arg);
rc = audpp_pause(audio->dec_id, (int) arg);
break;
case AUDIO_GET_SESSION_ID:
if (copy_to_user((void *) arg, &audio->dec_id,
sizeof(unsigned short)))
rc = -EFAULT;
else
rc = 0;
break;
default:
rc = -EINVAL;
}
mutex_unlock(&audio->lock);
return rc;
}
/* Only useful in tunnel-mode */
static int audevrc_fsync(struct file *file, loff_t ppos1, loff_t ppos2, int datasync)
{
struct audio *audio = file->private_data;
int rc = 0;
MM_DBG("\n"); /* Macro prints the file name and function */
if (!audio->running || audio->pcm_feedback) {
rc = -EINVAL;
goto done_nolock;
}
mutex_lock(&audio->write_lock);
rc = wait_event_interruptible(audio->write_wait,
(!audio->out[0].used &&
!audio->out[1].used &&
audio->out_needed) || audio->wflush);
if (rc < 0)
goto done;
else if (audio->wflush) {
rc = -EBUSY;
goto done;
}
/* pcm dmamiss message is sent continously
* when decoder is starved so no race
* condition concern
*/
audio->teos = 0;
rc = wait_event_interruptible(audio->write_wait,
audio->teos || audio->wflush);
if (audio->wflush)
rc = -EBUSY;
done:
mutex_unlock(&audio->write_lock);
done_nolock:
return rc;
}
static ssize_t audevrc_read(struct file *file, char __user *buf, size_t count,
loff_t *pos)
{
struct audio *audio = file->private_data;
const char __user *start = buf;
int rc = 0;
if (!audio->pcm_feedback) {
return 0;
/* PCM feedback is not enabled. Nothing to read */
}
mutex_lock(&audio->read_lock);
MM_DBG("\n"); /* Macro prints the file name and function */
while (count > 0) {
rc = wait_event_interruptible(audio->read_wait,
(audio->in[audio->read_next].used > 0) ||
(audio->stopped) || (audio->rflush));
MM_DBG("wait terminated \n");
if (rc < 0)
break;
if (audio->stopped || audio->rflush) {
rc = -EBUSY;
break;
}
if (count < audio->in[audio->read_next].used) {
/* Read must happen in frame boundary. Since driver does
* not know frame size, read count must be greater or
* equal to size of PCM samples
*/
MM_DBG("read stop - partial frame\n");
break;
} else {
MM_DBG("read from in[%d]\n", audio->read_next);
if (copy_to_user
(buf, audio->in[audio->read_next].data,
audio->in[audio->read_next].used)) {
MM_ERR("invalid addr %x \n",
(unsigned int)buf);
rc = -EFAULT;
break;
}
count -= audio->in[audio->read_next].used;
buf += audio->in[audio->read_next].used;
audio->in[audio->read_next].used = 0;
if ((++audio->read_next) == audio->pcm_buf_count)
audio->read_next = 0;
break;
/* Force to exit while loop
* to prevent output thread
* sleep too long if data is
* not ready at this moment
*/
}
}
/* don't feed output buffer to HW decoder during flushing
* buffer refresh command will be sent once flush completes
* send buf refresh command here can confuse HW decoder
*/
if (audio->buf_refresh && !audio->rflush) {
audio->buf_refresh = 0;
MM_DBG("kick start pcm feedback again\n");
audevrc_buffer_refresh(audio);
}
mutex_unlock(&audio->read_lock);
if (buf > start)
rc = buf - start;
MM_DBG("read %d bytes\n", rc);
return rc;
}
static int audevrc_process_eos(struct audio *audio,
const char __user *buf_start, unsigned short mfield_size)
{
int rc = 0;
struct buffer *frame;
frame = audio->out + audio->out_head;
rc = wait_event_interruptible(audio->write_wait,
(audio->out_needed &&
audio->out[0].used == 0 &&
audio->out[1].used == 0)
|| (audio->stopped)
|| (audio->wflush));
if (rc < 0)
goto done;
if (audio->stopped || audio->wflush) {
rc = -EBUSY;
goto done;
}
if (copy_from_user(frame->data, buf_start, mfield_size)) {
rc = -EFAULT;
goto done;
}
frame->mfield_sz = mfield_size;
audio->out_head ^= 1;
frame->used = mfield_size;
audevrc_send_data(audio, 0);
done:
return rc;
}
static ssize_t audevrc_write(struct file *file, const char __user *buf,
size_t count, loff_t *pos)
{
struct audio *audio = file->private_data;
const char __user *start = buf;
struct buffer *frame;
size_t xfer;
char *cpy_ptr;
unsigned short mfield_size = 0;
int rc = 0, eos_condition = AUDEVRC_EOS_NONE;
MM_DBG("cnt=%d\n", count);
if (count & 1)
return -EINVAL;
mutex_lock(&audio->write_lock);
while (count > 0) {
frame = audio->out + audio->out_head;
cpy_ptr = frame->data;
rc = wait_event_interruptible(audio->write_wait,
(frame->used == 0)
|| (audio->stopped)
|| (audio->wflush));
if (rc < 0)
break;
if (audio->stopped || audio->wflush) {
rc = -EBUSY;
break;
}
if (audio->mfield) {
if (buf == start) {
/* Processing beginning of user buffer */
if (__get_user(mfield_size,
(unsigned short __user *) buf)) {
rc = -EFAULT;
break;
} else if (mfield_size > count) {
rc = -EINVAL;
break;
}
MM_DBG("mf offset_val %x\n", mfield_size);
if (copy_from_user(cpy_ptr, buf,
mfield_size)) {
rc = -EFAULT;
break;
}
/* Check if EOS flag is set and buffer has
* contains just meta field
*/
if (cpy_ptr[AUDEVRC_EOS_FLG_OFFSET] &
AUDEVRC_EOS_FLG_MASK) {
MM_DBG("eos set\n");
eos_condition = AUDEVRC_EOS_SET;
if (mfield_size == count) {
buf += mfield_size;
break;
} else
cpy_ptr[AUDEVRC_EOS_FLG_OFFSET] &=
~AUDEVRC_EOS_FLG_MASK;
}
/* Check EOS to see if */
cpy_ptr += mfield_size;
count -= mfield_size;
buf += mfield_size;
} else {
mfield_size = 0;
MM_DBG("continuous buffer\n");
}
frame->mfield_sz = mfield_size;
}
xfer = (count > (frame->size - mfield_size)) ?
(frame->size - mfield_size) : count;
if (copy_from_user(cpy_ptr, buf, xfer)) {
rc = -EFAULT;
break;
}
frame->used = xfer + mfield_size;
audio->out_head ^= 1;
count -= xfer;
buf += xfer;
audevrc_send_data(audio, 0);
}
if (eos_condition == AUDEVRC_EOS_SET)
rc = audevrc_process_eos(audio, start, mfield_size);
mutex_unlock(&audio->write_lock);
if (!rc) {
if (buf > start)
return buf - start;
}
return rc;
}
static int audevrc_release(struct inode *inode, struct file *file)
{
struct audio *audio = file->private_data;
MM_INFO("audio instance 0x%08x freeing\n", (int)audio);
mutex_lock(&audio->lock);
auddev_unregister_evt_listner(AUDDEV_CLNT_DEC, audio->dec_id);
audevrc_disable(audio);
audevrc_flush(audio);
audevrc_flush_pcm_buf(audio);
msm_adsp_put(audio->audplay);
audpp_adec_free(audio->dec_id);
#ifdef CONFIG_HAS_EARLYSUSPEND
unregister_early_suspend(&audio->suspend_ctl.node);
#endif
audio->event_abort = 1;
wake_up(&audio->event_wait);
audevrc_reset_event_queue(audio);
iounmap(audio->map_v_write);
free_contiguous_memory_by_paddr(audio->phys);
if (audio->read_data) {
iounmap(audio->map_v_read);
free_contiguous_memory_by_paddr(audio->read_phys);
}
mutex_unlock(&audio->lock);
#ifdef CONFIG_DEBUG_FS
if (audio->dentry)
debugfs_remove(audio->dentry);
#endif
kfree(audio);
return 0;
}
#ifdef CONFIG_HAS_EARLYSUSPEND
static void audevrc_post_event(struct audio *audio, int type,
union msm_audio_event_payload payload)
{
struct audevrc_event *e_node = NULL;
unsigned long flags;
spin_lock_irqsave(&audio->event_queue_lock, flags);
if (!list_empty(&audio->free_event_queue)) {
e_node = list_first_entry(&audio->free_event_queue,
struct audevrc_event, list);
list_del(&e_node->list);
} else {
e_node = kmalloc(sizeof(struct audevrc_event), GFP_ATOMIC);
if (!e_node) {
MM_ERR("No mem to post event %d\n", type);
return;
}
}
e_node->event_type = type;
e_node->payload = payload;
list_add_tail(&e_node->list, &audio->event_queue);
spin_unlock_irqrestore(&audio->event_queue_lock, flags);
wake_up(&audio->event_wait);
}
static void audevrc_suspend(struct early_suspend *h)
{
struct audevrc_suspend_ctl *ctl =
container_of(h, struct audevrc_suspend_ctl, node);
union msm_audio_event_payload payload;
MM_DBG("\n"); /* Macro prints the file name and function */
audevrc_post_event(ctl->audio, AUDIO_EVENT_SUSPEND, payload);
}
static void audevrc_resume(struct early_suspend *h)
{
struct audevrc_suspend_ctl *ctl =
container_of(h, struct audevrc_suspend_ctl, node);
union msm_audio_event_payload payload;
MM_DBG("\n"); /* Macro prints the file name and function */
audevrc_post_event(ctl->audio, AUDIO_EVENT_RESUME, payload);
}
#endif
#ifdef CONFIG_DEBUG_FS
static ssize_t audevrc_debug_open(struct inode *inode, struct file *file)
{
file->private_data = inode->i_private;
return 0;
}
static ssize_t audevrc_debug_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
const int debug_bufmax = 1024;
static char buffer[1024];
int n = 0, i;
struct audio *audio = file->private_data;
mutex_lock(&audio->lock);
n = scnprintf(buffer, debug_bufmax, "opened %d\n", audio->opened);
n += scnprintf(buffer + n, debug_bufmax - n,
"enabled %d\n", audio->enabled);
n += scnprintf(buffer + n, debug_bufmax - n,
"stopped %d\n", audio->stopped);
n += scnprintf(buffer + n, debug_bufmax - n,
"pcm_feedback %d\n", audio->pcm_feedback);
n += scnprintf(buffer + n, debug_bufmax - n,
"out_buf_sz %d\n", audio->out[0].size);
n += scnprintf(buffer + n, debug_bufmax - n,
"pcm_buf_count %d \n", audio->pcm_buf_count);
n += scnprintf(buffer + n, debug_bufmax - n,
"pcm_buf_sz %d \n", audio->in[0].size);
n += scnprintf(buffer + n, debug_bufmax - n,
"volume %x \n", audio->vol_pan.volume);
mutex_unlock(&audio->lock);
/* Following variables are only useful for debugging when
* when playback halts unexpectedly. Thus, no mutual exclusion
* enforced
*/
n += scnprintf(buffer + n, debug_bufmax - n,
"wflush %d\n", audio->wflush);
n += scnprintf(buffer + n, debug_bufmax - n,
"rflush %d\n", audio->rflush);
n += scnprintf(buffer + n, debug_bufmax - n,
"running %d \n", audio->running);
n += scnprintf(buffer + n, debug_bufmax - n,
"dec state %d \n", audio->dec_state);
n += scnprintf(buffer + n, debug_bufmax - n,
"out_needed %d \n", audio->out_needed);
n += scnprintf(buffer + n, debug_bufmax - n,
"out_head %d \n", audio->out_head);
n += scnprintf(buffer + n, debug_bufmax - n,
"out_tail %d \n", audio->out_tail);
n += scnprintf(buffer + n, debug_bufmax - n,
"out[0].used %d \n", audio->out[0].used);
n += scnprintf(buffer + n, debug_bufmax - n,
"out[1].used %d \n", audio->out[1].used);
n += scnprintf(buffer + n, debug_bufmax - n,
"buffer_refresh %d \n", audio->buf_refresh);
n += scnprintf(buffer + n, debug_bufmax - n,
"read_next %d \n", audio->read_next);
n += scnprintf(buffer + n, debug_bufmax - n,
"fill_next %d \n", audio->fill_next);
for (i = 0; i < audio->pcm_buf_count; i++)
n += scnprintf(buffer + n, debug_bufmax - n,
"in[%d].size %d \n", i, audio->in[i].used);
buffer[n] = 0;
return simple_read_from_buffer(buf, count, ppos, buffer, n);
}
static const struct file_operations audevrc_debug_fops = {
.read = audevrc_debug_read,
.open = audevrc_debug_open,
};
#endif
static int audevrc_open(struct inode *inode, struct file *file)
{
struct audio *audio = NULL;
int rc, dec_attrb, decid, i;
struct audevrc_event *e_node = NULL;
#ifdef CONFIG_DEBUG_FS
/* 4 bytes represents decoder number, 1 byte for terminate string */
char name[sizeof "msm_evrc_" + 5];
#endif
/* Allocate audio instance, set to zero */
audio = kzalloc(sizeof(struct audio), GFP_KERNEL);
if (!audio) {
MM_ERR("no memory to allocate audio instance\n");
rc = -ENOMEM;
goto done;
}
MM_INFO("audio instance 0x%08x created\n", (int)audio);
/* Allocate the decoder */
dec_attrb = AUDDEC_DEC_EVRC;
if ((file->f_mode & FMODE_WRITE) &&
(file->f_mode & FMODE_READ)) {
dec_attrb |= MSM_AUD_MODE_NONTUNNEL;
audio->pcm_feedback = NON_TUNNEL_MODE_PLAYBACK;
} else if ((file->f_mode & FMODE_WRITE) &&
!(file->f_mode & FMODE_READ)) {
dec_attrb |= MSM_AUD_MODE_TUNNEL;
audio->pcm_feedback = TUNNEL_MODE_PLAYBACK;
} else {
kfree(audio);
rc = -EACCES;
goto done;
}
decid = audpp_adec_alloc(dec_attrb, &audio->module_name,
&audio->queue_id);
if (decid < 0) {
MM_ERR("No free decoder available, freeing instance 0x%08x\n",
(int)audio);
rc = -ENODEV;
kfree(audio);
goto done;
}
audio->dec_id = decid & MSM_AUD_DECODER_MASK;
audio->phys = allocate_contiguous_ebi_nomap(DMASZ, SZ_4K);
if (!audio->phys) {
MM_ERR("could not allocate write buffers, freeing instance \
0x%08x\n", (int)audio);
rc = -ENOMEM;
audpp_adec_free(audio->dec_id);
kfree(audio);
goto done;
} else {
audio->map_v_write = ioremap(audio->phys, DMASZ);
if (IS_ERR(audio->map_v_write)) {
MM_ERR("failed to map write physical address, freeing \
instance 0x%08x\n", (int)audio);
rc = -ENOMEM;
free_contiguous_memory_by_paddr(audio->phys);
audpp_adec_free(audio->dec_id);
kfree(audio);
goto done;
}
audio->data = audio->map_v_write;
MM_DBG("write buf: phy addr 0x%08x kernel addr 0x%08x\n",
audio->phys, (int)audio->data);
}
rc = msm_adsp_get(audio->module_name, &audio->audplay,
&audplay_adsp_ops_evrc, audio);
if (rc) {
MM_ERR("failed to get %s module, freeing instance 0x%08x\n",
audio->module_name, (int)audio);
goto err;
}
/* Initialize all locks of audio instance */
mutex_init(&audio->lock);
mutex_init(&audio->write_lock);
mutex_init(&audio->read_lock);
mutex_init(&audio->get_event_lock);
spin_lock_init(&audio->dsp_lock);
init_waitqueue_head(&audio->write_wait);
init_waitqueue_head(&audio->read_wait);
INIT_LIST_HEAD(&audio->free_event_queue);
INIT_LIST_HEAD(&audio->event_queue);
init_waitqueue_head(&audio->wait);
init_waitqueue_head(&audio->event_wait);
spin_lock_init(&audio->event_queue_lock);
init_waitqueue_head(&audio->avsync_wait);
audio->out[0].data = audio->data + 0;
audio->out[0].addr = audio->phys + 0;
audio->out[0].size = BUFSZ;
audio->out[1].data = audio->data + BUFSZ;
audio->out[1].addr = audio->phys + BUFSZ;
audio->out[1].size = BUFSZ;
audio->vol_pan.volume = 0x3FFF;
audevrc_flush(audio);
file->private_data = audio;
audio->opened = 1;
audio->device_events = AUDDEV_EVT_DEV_RDY
|AUDDEV_EVT_DEV_RLS|
AUDDEV_EVT_STREAM_VOL_CHG;
rc = auddev_register_evt_listner(audio->device_events,
AUDDEV_CLNT_DEC,
audio->dec_id,
evrc_listner,
(void *)audio);
if (rc) {
MM_ERR("%s: failed to register listner\n", __func__);
goto event_err;
}
#ifdef CONFIG_DEBUG_FS
snprintf(name, sizeof name, "msm_evrc_%04x", audio->dec_id);
audio->dentry = debugfs_create_file(name, S_IFREG | S_IRUGO,
NULL, (void *) audio, &audevrc_debug_fops);
if (IS_ERR(audio->dentry))
MM_DBG("debugfs_create_file failed\n");
#endif
#ifdef CONFIG_HAS_EARLYSUSPEND
audio->suspend_ctl.node.level = EARLY_SUSPEND_LEVEL_DISABLE_FB;
audio->suspend_ctl.node.resume = audevrc_resume;
audio->suspend_ctl.node.suspend = audevrc_suspend;
audio->suspend_ctl.audio = audio;
register_early_suspend(&audio->suspend_ctl.node);
#endif
for (i = 0; i < AUDEVRC_EVENT_NUM; i++) {
e_node = kmalloc(sizeof(struct audevrc_event), GFP_KERNEL);
if (e_node)
list_add_tail(&e_node->list, &audio->free_event_queue);
else {
MM_ERR("event pkt alloc failed\n");
break;
}
}
done:
return rc;
event_err:
msm_adsp_put(audio->audplay);
err:
iounmap(audio->map_v_write);
free_contiguous_memory_by_paddr(audio->phys);
audpp_adec_free(audio->dec_id);
kfree(audio);
return rc;
}
static const struct file_operations audio_evrc_fops = {
.owner = THIS_MODULE,
.open = audevrc_open,
.release = audevrc_release,
.read = audevrc_read,
.write = audevrc_write,
.unlocked_ioctl = audevrc_ioctl,
.fsync = audevrc_fsync,
};
struct miscdevice audio_evrc_misc = {
.minor = MISC_DYNAMIC_MINOR,
.name = "msm_evrc",
.fops = &audio_evrc_fops,
};
static int __init audevrc_init(void)
{
return misc_register(&audio_evrc_misc);
}
static void __exit audevrc_exit(void)
{
misc_deregister(&audio_evrc_misc);
}
module_init(audevrc_init);
module_exit(audevrc_exit);
MODULE_DESCRIPTION("MSM EVRC driver");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
chhapil/Dorimanx-SG2-I9100-Kernel | drivers/media/i2c/noon010pc30.c | 599 | 20568 | /*
* Driver for SiliconFile NOON010PC30 CIF (1/11") Image Sensor with ISP
*
* Copyright (C) 2010 - 2011 Samsung Electronics Co., Ltd.
* Contact: Sylwester Nawrocki, <s.nawrocki@samsung.com>
*
* Initial register configuration based on a driver authored by
* HeungJun Kim <riverful.kim@samsung.com>.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <linux/delay.h>
#include <linux/gpio.h>
#include <linux/i2c.h>
#include <linux/slab.h>
#include <linux/regulator/consumer.h>
#include <media/noon010pc30.h>
#include <linux/videodev2.h>
#include <linux/module.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/v4l2-mediabus.h>
#include <media/v4l2-subdev.h>
static int debug;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "Enable module debug trace. Set to 1 to enable.");
#define MODULE_NAME "NOON010PC30"
/*
* Register offsets within a page
* b15..b8 - page id, b7..b0 - register address
*/
#define POWER_CTRL_REG 0x0001
#define PAGEMODE_REG 0x03
#define DEVICE_ID_REG 0x0004
#define NOON010PC30_ID 0x86
#define VDO_CTL_REG(n) (0x0010 + (n))
#define SYNC_CTL_REG 0x0012
/* Window size and position */
#define WIN_ROWH_REG 0x0013
#define WIN_ROWL_REG 0x0014
#define WIN_COLH_REG 0x0015
#define WIN_COLL_REG 0x0016
#define WIN_HEIGHTH_REG 0x0017
#define WIN_HEIGHTL_REG 0x0018
#define WIN_WIDTHH_REG 0x0019
#define WIN_WIDTHL_REG 0x001A
#define HBLANKH_REG 0x001B
#define HBLANKL_REG 0x001C
#define VSYNCH_REG 0x001D
#define VSYNCL_REG 0x001E
/* VSYNC control */
#define VS_CTL_REG(n) (0x00A1 + (n))
/* page 1 */
#define ISP_CTL_REG(n) (0x0110 + (n))
#define YOFS_REG 0x0119
#define DARK_YOFS_REG 0x011A
#define SAT_CTL_REG 0x0120
#define BSAT_REG 0x0121
#define RSAT_REG 0x0122
/* Color correction */
#define CMC_CTL_REG 0x0130
#define CMC_OFSGH_REG 0x0133
#define CMC_OFSGL_REG 0x0135
#define CMC_SIGN_REG 0x0136
#define CMC_GOFS_REG 0x0137
#define CMC_COEF_REG(n) (0x0138 + (n))
#define CMC_OFS_REG(n) (0x0141 + (n))
/* Gamma correction */
#define GMA_CTL_REG 0x0160
#define GMA_COEF_REG(n) (0x0161 + (n))
/* Lens Shading */
#define LENS_CTRL_REG 0x01D0
#define LENS_XCEN_REG 0x01D1
#define LENS_YCEN_REG 0x01D2
#define LENS_RC_REG 0x01D3
#define LENS_GC_REG 0x01D4
#define LENS_BC_REG 0x01D5
#define L_AGON_REG 0x01D6
#define L_AGOFF_REG 0x01D7
/* Page 3 - Auto Exposure */
#define AE_CTL_REG(n) (0x0310 + (n))
#define AE_CTL9_REG 0x032C
#define AE_CTL10_REG 0x032D
#define AE_YLVL_REG 0x031C
#define AE_YTH_REG(n) (0x031D + (n))
#define AE_WGT_REG 0x0326
#define EXP_TIMEH_REG 0x0333
#define EXP_TIMEM_REG 0x0334
#define EXP_TIMEL_REG 0x0335
#define EXP_MMINH_REG 0x0336
#define EXP_MMINL_REG 0x0337
#define EXP_MMAXH_REG 0x0338
#define EXP_MMAXM_REG 0x0339
#define EXP_MMAXL_REG 0x033A
/* Page 4 - Auto White Balance */
#define AWB_CTL_REG(n) (0x0410 + (n))
#define AWB_ENABE 0x80
#define AWB_WGHT_REG 0x0419
#define BGAIN_PAR_REG(n) (0x044F + (n))
/* Manual white balance, when AWB_CTL2[0]=1 */
#define MWB_RGAIN_REG 0x0466
#define MWB_BGAIN_REG 0x0467
/* The token to mark an array end */
#define REG_TERM 0xFFFF
struct noon010_format {
enum v4l2_mbus_pixelcode code;
enum v4l2_colorspace colorspace;
u16 ispctl1_reg;
};
struct noon010_frmsize {
u16 width;
u16 height;
int vid_ctl1;
};
static const char * const noon010_supply_name[] = {
"vdd_core", "vddio", "vdda"
};
#define NOON010_NUM_SUPPLIES ARRAY_SIZE(noon010_supply_name)
struct noon010_info {
struct v4l2_subdev sd;
struct media_pad pad;
struct v4l2_ctrl_handler hdl;
struct regulator_bulk_data supply[NOON010_NUM_SUPPLIES];
u32 gpio_nreset;
u32 gpio_nstby;
/* Protects the struct members below */
struct mutex lock;
const struct noon010_format *curr_fmt;
const struct noon010_frmsize *curr_win;
unsigned int apply_new_cfg:1;
unsigned int streaming:1;
unsigned int hflip:1;
unsigned int vflip:1;
unsigned int power:1;
u8 i2c_reg_page;
};
struct i2c_regval {
u16 addr;
u16 val;
};
/* Supported resolutions. */
static const struct noon010_frmsize noon010_sizes[] = {
{
.width = 352,
.height = 288,
.vid_ctl1 = 0,
}, {
.width = 176,
.height = 144,
.vid_ctl1 = 0x10,
}, {
.width = 88,
.height = 72,
.vid_ctl1 = 0x20,
},
};
/* Supported pixel formats. */
static const struct noon010_format noon010_formats[] = {
{
.code = V4L2_MBUS_FMT_YUYV8_2X8,
.colorspace = V4L2_COLORSPACE_JPEG,
.ispctl1_reg = 0x03,
}, {
.code = V4L2_MBUS_FMT_YVYU8_2X8,
.colorspace = V4L2_COLORSPACE_JPEG,
.ispctl1_reg = 0x02,
}, {
.code = V4L2_MBUS_FMT_VYUY8_2X8,
.colorspace = V4L2_COLORSPACE_JPEG,
.ispctl1_reg = 0,
}, {
.code = V4L2_MBUS_FMT_UYVY8_2X8,
.colorspace = V4L2_COLORSPACE_JPEG,
.ispctl1_reg = 0x01,
}, {
.code = V4L2_MBUS_FMT_RGB565_2X8_BE,
.colorspace = V4L2_COLORSPACE_JPEG,
.ispctl1_reg = 0x40,
},
};
static const struct i2c_regval noon010_base_regs[] = {
{ WIN_COLL_REG, 0x06 }, { HBLANKL_REG, 0x7C },
/* Color corection and saturation */
{ ISP_CTL_REG(0), 0x30 }, { ISP_CTL_REG(2), 0x30 },
{ YOFS_REG, 0x80 }, { DARK_YOFS_REG, 0x04 },
{ SAT_CTL_REG, 0x1F }, { BSAT_REG, 0x90 },
{ CMC_CTL_REG, 0x0F }, { CMC_OFSGH_REG, 0x3C },
{ CMC_OFSGL_REG, 0x2C }, { CMC_SIGN_REG, 0x3F },
{ CMC_COEF_REG(0), 0x79 }, { CMC_OFS_REG(0), 0x00 },
{ CMC_COEF_REG(1), 0x39 }, { CMC_OFS_REG(1), 0x00 },
{ CMC_COEF_REG(2), 0x00 }, { CMC_OFS_REG(2), 0x00 },
{ CMC_COEF_REG(3), 0x11 }, { CMC_OFS_REG(3), 0x8B },
{ CMC_COEF_REG(4), 0x65 }, { CMC_OFS_REG(4), 0x07 },
{ CMC_COEF_REG(5), 0x14 }, { CMC_OFS_REG(5), 0x04 },
{ CMC_COEF_REG(6), 0x01 }, { CMC_OFS_REG(6), 0x9C },
{ CMC_COEF_REG(7), 0x33 }, { CMC_OFS_REG(7), 0x89 },
{ CMC_COEF_REG(8), 0x74 }, { CMC_OFS_REG(8), 0x25 },
/* Automatic white balance */
{ AWB_CTL_REG(0), 0x78 }, { AWB_CTL_REG(1), 0x2E },
{ AWB_CTL_REG(2), 0x20 }, { AWB_CTL_REG(3), 0x85 },
/* Auto exposure */
{ AE_CTL_REG(0), 0xDC }, { AE_CTL_REG(1), 0x81 },
{ AE_CTL_REG(2), 0x30 }, { AE_CTL_REG(3), 0xA5 },
{ AE_CTL_REG(4), 0x40 }, { AE_CTL_REG(5), 0x51 },
{ AE_CTL_REG(6), 0x33 }, { AE_CTL_REG(7), 0x7E },
{ AE_CTL9_REG, 0x00 }, { AE_CTL10_REG, 0x02 },
{ AE_YLVL_REG, 0x44 }, { AE_YTH_REG(0), 0x34 },
{ AE_YTH_REG(1), 0x30 }, { AE_WGT_REG, 0xD5 },
/* Lens shading compensation */
{ LENS_CTRL_REG, 0x01 }, { LENS_XCEN_REG, 0x80 },
{ LENS_YCEN_REG, 0x70 }, { LENS_RC_REG, 0x53 },
{ LENS_GC_REG, 0x40 }, { LENS_BC_REG, 0x3E },
{ REG_TERM, 0 },
};
static inline struct noon010_info *to_noon010(struct v4l2_subdev *sd)
{
return container_of(sd, struct noon010_info, sd);
}
static inline struct v4l2_subdev *to_sd(struct v4l2_ctrl *ctrl)
{
return &container_of(ctrl->handler, struct noon010_info, hdl)->sd;
}
static inline int set_i2c_page(struct noon010_info *info,
struct i2c_client *client, unsigned int reg)
{
u32 page = reg >> 8 & 0xFF;
int ret = 0;
if (info->i2c_reg_page != page && (reg & 0xFF) != 0x03) {
ret = i2c_smbus_write_byte_data(client, PAGEMODE_REG, page);
if (!ret)
info->i2c_reg_page = page;
}
return ret;
}
static int cam_i2c_read(struct v4l2_subdev *sd, u32 reg_addr)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct noon010_info *info = to_noon010(sd);
int ret = set_i2c_page(info, client, reg_addr);
if (ret)
return ret;
return i2c_smbus_read_byte_data(client, reg_addr & 0xFF);
}
static int cam_i2c_write(struct v4l2_subdev *sd, u32 reg_addr, u32 val)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct noon010_info *info = to_noon010(sd);
int ret = set_i2c_page(info, client, reg_addr);
if (ret)
return ret;
return i2c_smbus_write_byte_data(client, reg_addr & 0xFF, val);
}
static inline int noon010_bulk_write_reg(struct v4l2_subdev *sd,
const struct i2c_regval *msg)
{
while (msg->addr != REG_TERM) {
int ret = cam_i2c_write(sd, msg->addr, msg->val);
if (ret)
return ret;
msg++;
}
return 0;
}
/* Device reset and sleep mode control */
static int noon010_power_ctrl(struct v4l2_subdev *sd, bool reset, bool sleep)
{
struct noon010_info *info = to_noon010(sd);
u8 reg = sleep ? 0xF1 : 0xF0;
int ret = 0;
if (reset) {
ret = cam_i2c_write(sd, POWER_CTRL_REG, reg | 0x02);
udelay(20);
}
if (!ret) {
ret = cam_i2c_write(sd, POWER_CTRL_REG, reg);
if (reset && !ret)
info->i2c_reg_page = -1;
}
return ret;
}
/* Automatic white balance control */
static int noon010_enable_autowhitebalance(struct v4l2_subdev *sd, int on)
{
int ret;
ret = cam_i2c_write(sd, AWB_CTL_REG(1), on ? 0x2E : 0x2F);
if (!ret)
ret = cam_i2c_write(sd, AWB_CTL_REG(0), on ? 0xFB : 0x7B);
return ret;
}
/* Called with struct noon010_info.lock mutex held */
static int noon010_set_flip(struct v4l2_subdev *sd, int hflip, int vflip)
{
struct noon010_info *info = to_noon010(sd);
int reg, ret;
reg = cam_i2c_read(sd, VDO_CTL_REG(1));
if (reg < 0)
return reg;
reg &= 0x7C;
if (hflip)
reg |= 0x01;
if (vflip)
reg |= 0x02;
ret = cam_i2c_write(sd, VDO_CTL_REG(1), reg | 0x80);
if (!ret) {
info->hflip = hflip;
info->vflip = vflip;
}
return ret;
}
/* Configure resolution and color format */
static int noon010_set_params(struct v4l2_subdev *sd)
{
struct noon010_info *info = to_noon010(sd);
int ret = cam_i2c_write(sd, VDO_CTL_REG(0),
info->curr_win->vid_ctl1);
if (ret)
return ret;
return cam_i2c_write(sd, ISP_CTL_REG(0),
info->curr_fmt->ispctl1_reg);
}
/* Find nearest matching image pixel size. */
static int noon010_try_frame_size(struct v4l2_mbus_framefmt *mf,
const struct noon010_frmsize **size)
{
unsigned int min_err = ~0;
int i = ARRAY_SIZE(noon010_sizes);
const struct noon010_frmsize *fsize = &noon010_sizes[0],
*match = NULL;
while (i--) {
int err = abs(fsize->width - mf->width)
+ abs(fsize->height - mf->height);
if (err < min_err) {
min_err = err;
match = fsize;
}
fsize++;
}
if (match) {
mf->width = match->width;
mf->height = match->height;
if (size)
*size = match;
return 0;
}
return -EINVAL;
}
/* Called with info.lock mutex held */
static int power_enable(struct noon010_info *info)
{
int ret;
if (info->power) {
v4l2_info(&info->sd, "%s: sensor is already on\n", __func__);
return 0;
}
if (gpio_is_valid(info->gpio_nstby))
gpio_set_value(info->gpio_nstby, 0);
if (gpio_is_valid(info->gpio_nreset))
gpio_set_value(info->gpio_nreset, 0);
ret = regulator_bulk_enable(NOON010_NUM_SUPPLIES, info->supply);
if (ret)
return ret;
if (gpio_is_valid(info->gpio_nreset)) {
msleep(50);
gpio_set_value(info->gpio_nreset, 1);
}
if (gpio_is_valid(info->gpio_nstby)) {
udelay(1000);
gpio_set_value(info->gpio_nstby, 1);
}
if (gpio_is_valid(info->gpio_nreset)) {
udelay(1000);
gpio_set_value(info->gpio_nreset, 0);
msleep(100);
gpio_set_value(info->gpio_nreset, 1);
msleep(20);
}
info->power = 1;
v4l2_dbg(1, debug, &info->sd, "%s: sensor is on\n", __func__);
return 0;
}
/* Called with info.lock mutex held */
static int power_disable(struct noon010_info *info)
{
int ret;
if (!info->power) {
v4l2_info(&info->sd, "%s: sensor is already off\n", __func__);
return 0;
}
ret = regulator_bulk_disable(NOON010_NUM_SUPPLIES, info->supply);
if (ret)
return ret;
if (gpio_is_valid(info->gpio_nstby))
gpio_set_value(info->gpio_nstby, 0);
if (gpio_is_valid(info->gpio_nreset))
gpio_set_value(info->gpio_nreset, 0);
info->power = 0;
v4l2_dbg(1, debug, &info->sd, "%s: sensor is off\n", __func__);
return 0;
}
static int noon010_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct v4l2_subdev *sd = to_sd(ctrl);
struct noon010_info *info = to_noon010(sd);
int ret = 0;
v4l2_dbg(1, debug, sd, "%s: ctrl_id: %d, value: %d\n",
__func__, ctrl->id, ctrl->val);
mutex_lock(&info->lock);
/*
* If the device is not powered up by the host driver do
* not apply any controls to H/W at this time. Instead
* the controls will be restored right after power-up.
*/
if (!info->power)
goto unlock;
switch (ctrl->id) {
case V4L2_CID_AUTO_WHITE_BALANCE:
ret = noon010_enable_autowhitebalance(sd, ctrl->val);
break;
case V4L2_CID_BLUE_BALANCE:
ret = cam_i2c_write(sd, MWB_BGAIN_REG, ctrl->val);
break;
case V4L2_CID_RED_BALANCE:
ret = cam_i2c_write(sd, MWB_RGAIN_REG, ctrl->val);
break;
default:
ret = -EINVAL;
}
unlock:
mutex_unlock(&info->lock);
return ret;
}
static int noon010_enum_mbus_code(struct v4l2_subdev *sd,
struct v4l2_subdev_fh *fh,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->index >= ARRAY_SIZE(noon010_formats))
return -EINVAL;
code->code = noon010_formats[code->index].code;
return 0;
}
static int noon010_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
struct v4l2_subdev_format *fmt)
{
struct noon010_info *info = to_noon010(sd);
struct v4l2_mbus_framefmt *mf;
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
if (fh) {
mf = v4l2_subdev_get_try_format(fh, 0);
fmt->format = *mf;
}
return 0;
}
mf = &fmt->format;
mutex_lock(&info->lock);
mf->width = info->curr_win->width;
mf->height = info->curr_win->height;
mf->code = info->curr_fmt->code;
mf->colorspace = info->curr_fmt->colorspace;
mf->field = V4L2_FIELD_NONE;
mutex_unlock(&info->lock);
return 0;
}
/* Return nearest media bus frame format. */
static const struct noon010_format *noon010_try_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *mf)
{
int i = ARRAY_SIZE(noon010_formats);
while (--i)
if (mf->code == noon010_formats[i].code)
break;
mf->code = noon010_formats[i].code;
return &noon010_formats[i];
}
static int noon010_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
struct v4l2_subdev_format *fmt)
{
struct noon010_info *info = to_noon010(sd);
const struct noon010_frmsize *size = NULL;
const struct noon010_format *nf;
struct v4l2_mbus_framefmt *mf;
int ret = 0;
nf = noon010_try_fmt(sd, &fmt->format);
noon010_try_frame_size(&fmt->format, &size);
fmt->format.colorspace = V4L2_COLORSPACE_JPEG;
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
if (fh) {
mf = v4l2_subdev_get_try_format(fh, 0);
*mf = fmt->format;
}
return 0;
}
mutex_lock(&info->lock);
if (!info->streaming) {
info->apply_new_cfg = 1;
info->curr_fmt = nf;
info->curr_win = size;
} else {
ret = -EBUSY;
}
mutex_unlock(&info->lock);
return ret;
}
/* Called with struct noon010_info.lock mutex held */
static int noon010_base_config(struct v4l2_subdev *sd)
{
int ret = noon010_bulk_write_reg(sd, noon010_base_regs);
if (!ret)
ret = noon010_set_params(sd);
if (!ret)
ret = noon010_set_flip(sd, 1, 0);
return ret;
}
static int noon010_s_power(struct v4l2_subdev *sd, int on)
{
struct noon010_info *info = to_noon010(sd);
int ret;
mutex_lock(&info->lock);
if (on) {
ret = power_enable(info);
if (!ret)
ret = noon010_base_config(sd);
} else {
noon010_power_ctrl(sd, false, true);
ret = power_disable(info);
}
mutex_unlock(&info->lock);
/* Restore the controls state */
if (!ret && on)
ret = v4l2_ctrl_handler_setup(&info->hdl);
return ret;
}
static int noon010_s_stream(struct v4l2_subdev *sd, int on)
{
struct noon010_info *info = to_noon010(sd);
int ret = 0;
mutex_lock(&info->lock);
if (!info->streaming != !on) {
ret = noon010_power_ctrl(sd, false, !on);
if (!ret)
info->streaming = on;
}
if (!ret && on && info->apply_new_cfg) {
ret = noon010_set_params(sd);
if (!ret)
info->apply_new_cfg = 0;
}
mutex_unlock(&info->lock);
return ret;
}
static int noon010_log_status(struct v4l2_subdev *sd)
{
struct noon010_info *info = to_noon010(sd);
v4l2_ctrl_handler_log_status(&info->hdl, sd->name);
return 0;
}
static int noon010_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
struct v4l2_mbus_framefmt *mf = v4l2_subdev_get_try_format(fh, 0);
mf->width = noon010_sizes[0].width;
mf->height = noon010_sizes[0].height;
mf->code = noon010_formats[0].code;
mf->colorspace = V4L2_COLORSPACE_JPEG;
mf->field = V4L2_FIELD_NONE;
return 0;
}
static const struct v4l2_subdev_internal_ops noon010_subdev_internal_ops = {
.open = noon010_open,
};
static const struct v4l2_ctrl_ops noon010_ctrl_ops = {
.s_ctrl = noon010_s_ctrl,
};
static const struct v4l2_subdev_core_ops noon010_core_ops = {
.s_power = noon010_s_power,
.log_status = noon010_log_status,
};
static struct v4l2_subdev_pad_ops noon010_pad_ops = {
.enum_mbus_code = noon010_enum_mbus_code,
.get_fmt = noon010_get_fmt,
.set_fmt = noon010_set_fmt,
};
static struct v4l2_subdev_video_ops noon010_video_ops = {
.s_stream = noon010_s_stream,
};
static const struct v4l2_subdev_ops noon010_ops = {
.core = &noon010_core_ops,
.pad = &noon010_pad_ops,
.video = &noon010_video_ops,
};
/* Return 0 if NOON010PC30L sensor type was detected or -ENODEV otherwise. */
static int noon010_detect(struct i2c_client *client, struct noon010_info *info)
{
int ret;
ret = power_enable(info);
if (ret)
return ret;
ret = i2c_smbus_read_byte_data(client, DEVICE_ID_REG);
if (ret < 0)
dev_err(&client->dev, "I2C read failed: 0x%X\n", ret);
power_disable(info);
return ret == NOON010PC30_ID ? 0 : -ENODEV;
}
static int noon010_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct noon010_info *info;
struct v4l2_subdev *sd;
const struct noon010pc30_platform_data *pdata
= client->dev.platform_data;
int ret;
int i;
if (!pdata) {
dev_err(&client->dev, "No platform data!\n");
return -EIO;
}
info = devm_kzalloc(&client->dev, sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
mutex_init(&info->lock);
sd = &info->sd;
v4l2_i2c_subdev_init(sd, client, &noon010_ops);
strlcpy(sd->name, MODULE_NAME, sizeof(sd->name));
sd->internal_ops = &noon010_subdev_internal_ops;
sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
v4l2_ctrl_handler_init(&info->hdl, 3);
v4l2_ctrl_new_std(&info->hdl, &noon010_ctrl_ops,
V4L2_CID_AUTO_WHITE_BALANCE, 0, 1, 1, 1);
v4l2_ctrl_new_std(&info->hdl, &noon010_ctrl_ops,
V4L2_CID_RED_BALANCE, 0, 127, 1, 64);
v4l2_ctrl_new_std(&info->hdl, &noon010_ctrl_ops,
V4L2_CID_BLUE_BALANCE, 0, 127, 1, 64);
sd->ctrl_handler = &info->hdl;
ret = info->hdl.error;
if (ret)
goto np_err;
info->i2c_reg_page = -1;
info->gpio_nreset = -EINVAL;
info->gpio_nstby = -EINVAL;
info->curr_fmt = &noon010_formats[0];
info->curr_win = &noon010_sizes[0];
if (gpio_is_valid(pdata->gpio_nreset)) {
ret = devm_gpio_request_one(&client->dev, pdata->gpio_nreset,
GPIOF_OUT_INIT_LOW,
"NOON010PC30 NRST");
if (ret) {
dev_err(&client->dev, "GPIO request error: %d\n", ret);
goto np_err;
}
info->gpio_nreset = pdata->gpio_nreset;
gpio_export(info->gpio_nreset, 0);
}
if (gpio_is_valid(pdata->gpio_nstby)) {
ret = devm_gpio_request_one(&client->dev, pdata->gpio_nstby,
GPIOF_OUT_INIT_LOW,
"NOON010PC30 NSTBY");
if (ret) {
dev_err(&client->dev, "GPIO request error: %d\n", ret);
goto np_err;
}
info->gpio_nstby = pdata->gpio_nstby;
gpio_export(info->gpio_nstby, 0);
}
for (i = 0; i < NOON010_NUM_SUPPLIES; i++)
info->supply[i].supply = noon010_supply_name[i];
ret = devm_regulator_bulk_get(&client->dev, NOON010_NUM_SUPPLIES,
info->supply);
if (ret)
goto np_err;
info->pad.flags = MEDIA_PAD_FL_SOURCE;
sd->entity.type = MEDIA_ENT_T_V4L2_SUBDEV_SENSOR;
ret = media_entity_init(&sd->entity, 1, &info->pad, 0);
if (ret < 0)
goto np_err;
ret = noon010_detect(client, info);
if (!ret)
return 0;
np_err:
v4l2_ctrl_handler_free(&info->hdl);
v4l2_device_unregister_subdev(sd);
return ret;
}
static int noon010_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct noon010_info *info = to_noon010(sd);
v4l2_device_unregister_subdev(sd);
v4l2_ctrl_handler_free(&info->hdl);
media_entity_cleanup(&sd->entity);
return 0;
}
static const struct i2c_device_id noon010_id[] = {
{ MODULE_NAME, 0 },
{ },
};
MODULE_DEVICE_TABLE(i2c, noon010_id);
static struct i2c_driver noon010_i2c_driver = {
.driver = {
.name = MODULE_NAME
},
.probe = noon010_probe,
.remove = noon010_remove,
.id_table = noon010_id,
};
module_i2c_driver(noon010_i2c_driver);
MODULE_DESCRIPTION("Siliconfile NOON010PC30 camera driver");
MODULE_AUTHOR("Sylwester Nawrocki <s.nawrocki@samsung.com>");
MODULE_LICENSE("GPL");
| gpl-2.0 |
larks/linux-rcu | arch/s390/appldata/appldata_mem.c | 599 | 4196 | /*
* arch/s390/appldata/appldata_mem.c
*
* Data gathering module for Linux-VM Monitor Stream, Stage 1.
* Collects data related to memory management.
*
* Copyright (C) 2003,2006 IBM Corporation, IBM Deutschland Entwicklung GmbH.
*
* Author: Gerald Schaefer <gerald.schaefer@de.ibm.com>
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/kernel_stat.h>
#include <linux/pagemap.h>
#include <linux/swap.h>
#include <asm/io.h>
#include "appldata.h"
#define P2K(x) ((x) << (PAGE_SHIFT - 10)) /* Converts #Pages to KB */
/*
* Memory data
*
* This is accessed as binary data by z/VM. If changes to it can't be avoided,
* the structure version (product ID, see appldata_base.c) needs to be changed
* as well and all documentation and z/VM applications using it must be
* updated.
*
* The record layout is documented in the Linux for zSeries Device Drivers
* book:
* http://oss.software.ibm.com/developerworks/opensource/linux390/index.shtml
*/
static struct appldata_mem_data {
u64 timestamp;
u32 sync_count_1; /* after VM collected the record data, */
u32 sync_count_2; /* sync_count_1 and sync_count_2 should be the
same. If not, the record has been updated on
the Linux side while VM was collecting the
(possibly corrupt) data */
u64 pgpgin; /* data read from disk */
u64 pgpgout; /* data written to disk */
u64 pswpin; /* pages swapped in */
u64 pswpout; /* pages swapped out */
u64 sharedram; /* sharedram is currently set to 0 */
u64 totalram; /* total main memory size */
u64 freeram; /* free main memory size */
u64 totalhigh; /* total high memory size */
u64 freehigh; /* free high memory size */
u64 bufferram; /* memory reserved for buffers, free cache */
u64 cached; /* size of (used) cache, w/o buffers */
u64 totalswap; /* total swap space size */
u64 freeswap; /* free swap space */
// New in 2.6 -->
u64 pgalloc; /* page allocations */
u64 pgfault; /* page faults (major+minor) */
u64 pgmajfault; /* page faults (major only) */
// <-- New in 2.6
} __attribute__((packed)) appldata_mem_data;
/*
* appldata_get_mem_data()
*
* gather memory data
*/
static void appldata_get_mem_data(void *data)
{
/*
* don't put large structures on the stack, we are
* serialized through the appldata_ops_mutex and can use static
*/
static struct sysinfo val;
unsigned long ev[NR_VM_EVENT_ITEMS];
struct appldata_mem_data *mem_data;
mem_data = data;
mem_data->sync_count_1++;
all_vm_events(ev);
mem_data->pgpgin = ev[PGPGIN] >> 1;
mem_data->pgpgout = ev[PGPGOUT] >> 1;
mem_data->pswpin = ev[PSWPIN];
mem_data->pswpout = ev[PSWPOUT];
mem_data->pgalloc = ev[PGALLOC_NORMAL];
#ifdef CONFIG_ZONE_DMA
mem_data->pgalloc += ev[PGALLOC_DMA];
#endif
mem_data->pgfault = ev[PGFAULT];
mem_data->pgmajfault = ev[PGMAJFAULT];
si_meminfo(&val);
mem_data->sharedram = val.sharedram;
mem_data->totalram = P2K(val.totalram);
mem_data->freeram = P2K(val.freeram);
mem_data->totalhigh = P2K(val.totalhigh);
mem_data->freehigh = P2K(val.freehigh);
mem_data->bufferram = P2K(val.bufferram);
mem_data->cached = P2K(global_page_state(NR_FILE_PAGES)
- val.bufferram);
si_swapinfo(&val);
mem_data->totalswap = P2K(val.totalswap);
mem_data->freeswap = P2K(val.freeswap);
mem_data->timestamp = get_clock();
mem_data->sync_count_2++;
}
static struct appldata_ops ops = {
.name = "mem",
.record_nr = APPLDATA_RECORD_MEM_ID,
.size = sizeof(struct appldata_mem_data),
.callback = &appldata_get_mem_data,
.data = &appldata_mem_data,
.owner = THIS_MODULE,
.mod_lvl = {0xF0, 0xF0}, /* EBCDIC "00" */
};
/*
* appldata_mem_init()
*
* init_data, register ops
*/
static int __init appldata_mem_init(void)
{
return appldata_register_ops(&ops);
}
/*
* appldata_mem_exit()
*
* unregister ops
*/
static void __exit appldata_mem_exit(void)
{
appldata_unregister_ops(&ops);
}
module_init(appldata_mem_init);
module_exit(appldata_mem_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Gerald Schaefer");
MODULE_DESCRIPTION("Linux-VM Monitor Stream, MEMORY statistics");
| gpl-2.0 |
MoKee/android_kernel_mediatek_sprout | arch/x86/kernel/acpi/sleep.c | 1623 | 3637 | /*
* sleep.c - x86-specific ACPI sleep support.
*
* Copyright (C) 2001-2003 Patrick Mochel
* Copyright (C) 2001-2003 Pavel Machek <pavel@ucw.cz>
*/
#include <linux/acpi.h>
#include <linux/bootmem.h>
#include <linux/memblock.h>
#include <linux/dmi.h>
#include <linux/cpumask.h>
#include <asm/segment.h>
#include <asm/desc.h>
#include <asm/pgtable.h>
#include <asm/cacheflush.h>
#include <asm/realmode.h>
#include "../../realmode/rm/wakeup.h"
#include "sleep.h"
unsigned long acpi_realmode_flags;
#if defined(CONFIG_SMP) && defined(CONFIG_64BIT)
static char temp_stack[4096];
#endif
/**
* acpi_suspend_lowlevel - save kernel state
*
* Create an identity mapped page table and copy the wakeup routine to
* low memory.
*/
int acpi_suspend_lowlevel(void)
{
struct wakeup_header *header =
(struct wakeup_header *) __va(real_mode_header->wakeup_header);
if (header->signature != WAKEUP_HEADER_SIGNATURE) {
printk(KERN_ERR "wakeup header does not match\n");
return -EINVAL;
}
header->video_mode = saved_video_mode;
header->pmode_behavior = 0;
#ifndef CONFIG_64BIT
native_store_gdt((struct desc_ptr *)&header->pmode_gdt);
/*
* We have to check that we can write back the value, and not
* just read it. At least on 90 nm Pentium M (Family 6, Model
* 13), reading an invalid MSR is not guaranteed to trap, see
* Erratum X4 in "Intel Pentium M Processor on 90 nm Process
* with 2-MB L2 Cache and Intel® Processor A100 and A110 on 90
* nm process with 512-KB L2 Cache Specification Update".
*/
if (!rdmsr_safe(MSR_EFER,
&header->pmode_efer_low,
&header->pmode_efer_high) &&
!wrmsr_safe(MSR_EFER,
header->pmode_efer_low,
header->pmode_efer_high))
header->pmode_behavior |= (1 << WAKEUP_BEHAVIOR_RESTORE_EFER);
#endif /* !CONFIG_64BIT */
header->pmode_cr0 = read_cr0();
if (__this_cpu_read(cpu_info.cpuid_level) >= 0) {
header->pmode_cr4 = read_cr4();
header->pmode_behavior |= (1 << WAKEUP_BEHAVIOR_RESTORE_CR4);
}
if (!rdmsr_safe(MSR_IA32_MISC_ENABLE,
&header->pmode_misc_en_low,
&header->pmode_misc_en_high) &&
!wrmsr_safe(MSR_IA32_MISC_ENABLE,
header->pmode_misc_en_low,
header->pmode_misc_en_high))
header->pmode_behavior |=
(1 << WAKEUP_BEHAVIOR_RESTORE_MISC_ENABLE);
header->realmode_flags = acpi_realmode_flags;
header->real_magic = 0x12345678;
#ifndef CONFIG_64BIT
header->pmode_entry = (u32)&wakeup_pmode_return;
header->pmode_cr3 = (u32)__pa_symbol(initial_page_table);
saved_magic = 0x12345678;
#else /* CONFIG_64BIT */
#ifdef CONFIG_SMP
stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
early_gdt_descr.address =
(unsigned long)get_cpu_gdt_table(smp_processor_id());
initial_gs = per_cpu_offset(smp_processor_id());
#endif
initial_code = (unsigned long)wakeup_long64;
saved_magic = 0x123456789abcdef0L;
#endif /* CONFIG_64BIT */
do_suspend_lowlevel();
return 0;
}
static int __init acpi_sleep_setup(char *str)
{
while ((str != NULL) && (*str != '\0')) {
if (strncmp(str, "s3_bios", 7) == 0)
acpi_realmode_flags |= 1;
if (strncmp(str, "s3_mode", 7) == 0)
acpi_realmode_flags |= 2;
if (strncmp(str, "s3_beep", 7) == 0)
acpi_realmode_flags |= 4;
#ifdef CONFIG_HIBERNATION
if (strncmp(str, "s4_nohwsig", 10) == 0)
acpi_no_s4_hw_signature();
#endif
if (strncmp(str, "nonvs", 5) == 0)
acpi_nvs_nosave();
if (strncmp(str, "nonvs_s3", 8) == 0)
acpi_nvs_nosave_s3();
if (strncmp(str, "old_ordering", 12) == 0)
acpi_old_suspend_ordering();
str = strchr(str, ',');
if (str != NULL)
str += strspn(str, ", \t");
}
return 1;
}
__setup("acpi_sleep=", acpi_sleep_setup);
| gpl-2.0 |
teleofis/OpenWRT | DLpatch/linux-3.18.29/arch/microblaze/mm/init.c | 1623 | 11479 | /*
* Copyright (C) 2007-2008 Michal Simek <monstr@monstr.eu>
* Copyright (C) 2006 Atmark Techno, Inc.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/bootmem.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/memblock.h>
#include <linux/mm.h> /* mem_init */
#include <linux/initrd.h>
#include <linux/pagemap.h>
#include <linux/pfn.h>
#include <linux/slab.h>
#include <linux/swap.h>
#include <linux/export.h>
#include <asm/page.h>
#include <asm/mmu_context.h>
#include <asm/pgalloc.h>
#include <asm/sections.h>
#include <asm/tlb.h>
#include <asm/fixmap.h>
/* Use for MMU and noMMU because of PCI generic code */
int mem_init_done;
#ifndef CONFIG_MMU
unsigned int __page_offset;
EXPORT_SYMBOL(__page_offset);
#else
static int init_bootmem_done;
#endif /* CONFIG_MMU */
char *klimit = _end;
/*
* Initialize the bootmem system and give it all the memory we
* have available.
*/
unsigned long memory_start;
EXPORT_SYMBOL(memory_start);
unsigned long memory_size;
EXPORT_SYMBOL(memory_size);
unsigned long lowmem_size;
#ifdef CONFIG_HIGHMEM
pte_t *kmap_pte;
EXPORT_SYMBOL(kmap_pte);
pgprot_t kmap_prot;
EXPORT_SYMBOL(kmap_prot);
static inline pte_t *virt_to_kpte(unsigned long vaddr)
{
return pte_offset_kernel(pmd_offset(pgd_offset_k(vaddr),
vaddr), vaddr);
}
static void __init highmem_init(void)
{
pr_debug("%x\n", (u32)PKMAP_BASE);
map_page(PKMAP_BASE, 0, 0); /* XXX gross */
pkmap_page_table = virt_to_kpte(PKMAP_BASE);
kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN));
kmap_prot = PAGE_KERNEL;
}
static void highmem_setup(void)
{
unsigned long pfn;
for (pfn = max_low_pfn; pfn < max_pfn; ++pfn) {
struct page *page = pfn_to_page(pfn);
/* FIXME not sure about */
if (!memblock_is_reserved(pfn << PAGE_SHIFT))
free_highmem_page(page);
}
}
#endif /* CONFIG_HIGHMEM */
/*
* paging_init() sets up the page tables - in fact we've already done this.
*/
static void __init paging_init(void)
{
unsigned long zones_size[MAX_NR_ZONES];
#ifdef CONFIG_MMU
int idx;
/* Setup fixmaps */
for (idx = 0; idx < __end_of_fixed_addresses; idx++)
clear_fixmap(idx);
#endif
/* Clean every zones */
memset(zones_size, 0, sizeof(zones_size));
#ifdef CONFIG_HIGHMEM
highmem_init();
zones_size[ZONE_DMA] = max_low_pfn;
zones_size[ZONE_HIGHMEM] = max_pfn;
#else
zones_size[ZONE_DMA] = max_pfn;
#endif
/* We don't have holes in memory map */
free_area_init_nodes(zones_size);
}
void __init setup_memory(void)
{
unsigned long map_size;
struct memblock_region *reg;
#ifndef CONFIG_MMU
u32 kernel_align_start, kernel_align_size;
/* Find main memory where is the kernel */
for_each_memblock(memory, reg) {
memory_start = (u32)reg->base;
lowmem_size = reg->size;
if ((memory_start <= (u32)_text) &&
((u32)_text <= (memory_start + lowmem_size - 1))) {
memory_size = lowmem_size;
PAGE_OFFSET = memory_start;
pr_info("%s: Main mem: 0x%x, size 0x%08x\n",
__func__, (u32) memory_start,
(u32) memory_size);
break;
}
}
if (!memory_start || !memory_size) {
panic("%s: Missing memory setting 0x%08x, size=0x%08x\n",
__func__, (u32) memory_start, (u32) memory_size);
}
/* reservation of region where is the kernel */
kernel_align_start = PAGE_DOWN((u32)_text);
/* ALIGN can be remove because _end in vmlinux.lds.S is align */
kernel_align_size = PAGE_UP((u32)klimit) - kernel_align_start;
pr_info("%s: kernel addr:0x%08x-0x%08x size=0x%08x\n",
__func__, kernel_align_start, kernel_align_start
+ kernel_align_size, kernel_align_size);
memblock_reserve(kernel_align_start, kernel_align_size);
#endif
/*
* Kernel:
* start: base phys address of kernel - page align
* end: base phys address of kernel - page align
*
* min_low_pfn - the first page (mm/bootmem.c - node_boot_start)
* max_low_pfn
* max_mapnr - the first unused page (mm/bootmem.c - node_low_pfn)
*/
/* memory start is from the kernel end (aligned) to higher addr */
min_low_pfn = memory_start >> PAGE_SHIFT; /* minimum for allocation */
/* RAM is assumed contiguous */
max_mapnr = memory_size >> PAGE_SHIFT;
max_low_pfn = ((u64)memory_start + (u64)lowmem_size) >> PAGE_SHIFT;
max_pfn = ((u64)memory_start + (u64)memory_size) >> PAGE_SHIFT;
pr_info("%s: max_mapnr: %#lx\n", __func__, max_mapnr);
pr_info("%s: min_low_pfn: %#lx\n", __func__, min_low_pfn);
pr_info("%s: max_low_pfn: %#lx\n", __func__, max_low_pfn);
pr_info("%s: max_pfn: %#lx\n", __func__, max_pfn);
/*
* Find an area to use for the bootmem bitmap.
* We look for the first area which is at least
* 128kB in length (128kB is enough for a bitmap
* for 4GB of memory, using 4kB pages), plus 1 page
* (in case the address isn't page-aligned).
*/
map_size = init_bootmem_node(NODE_DATA(0),
PFN_UP(TOPHYS((u32)klimit)), min_low_pfn, max_low_pfn);
memblock_reserve(PFN_UP(TOPHYS((u32)klimit)) << PAGE_SHIFT, map_size);
/* Add active regions with valid PFNs */
for_each_memblock(memory, reg) {
unsigned long start_pfn, end_pfn;
start_pfn = memblock_region_memory_base_pfn(reg);
end_pfn = memblock_region_memory_end_pfn(reg);
memblock_set_node(start_pfn << PAGE_SHIFT,
(end_pfn - start_pfn) << PAGE_SHIFT,
&memblock.memory, 0);
}
/* free bootmem is whole main memory */
free_bootmem_with_active_regions(0, max_low_pfn);
/* reserve allocate blocks */
for_each_memblock(reserved, reg) {
unsigned long top = reg->base + reg->size - 1;
pr_debug("reserved - 0x%08x-0x%08x, %lx, %lx\n",
(u32) reg->base, (u32) reg->size, top,
memory_start + lowmem_size - 1);
if (top <= (memory_start + lowmem_size - 1)) {
reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT);
} else if (reg->base < (memory_start + lowmem_size - 1)) {
unsigned long trunc_size = memory_start + lowmem_size -
reg->base;
reserve_bootmem(reg->base, trunc_size, BOOTMEM_DEFAULT);
}
}
/* XXX need to clip this if using highmem? */
sparse_memory_present_with_active_regions(0);
#ifdef CONFIG_MMU
init_bootmem_done = 1;
#endif
paging_init();
}
#ifdef CONFIG_BLK_DEV_INITRD
void free_initrd_mem(unsigned long start, unsigned long end)
{
free_reserved_area((void *)start, (void *)end, -1, "initrd");
}
#endif
void free_initmem(void)
{
free_initmem_default(-1);
}
void __init mem_init(void)
{
high_memory = (void *)__va(memory_start + lowmem_size - 1);
/* this will put all memory onto the freelists */
free_all_bootmem();
#ifdef CONFIG_HIGHMEM
highmem_setup();
#endif
mem_init_print_info(NULL);
#ifdef CONFIG_MMU
pr_info("Kernel virtual memory layout:\n");
pr_info(" * 0x%08lx..0x%08lx : fixmap\n", FIXADDR_START, FIXADDR_TOP);
#ifdef CONFIG_HIGHMEM
pr_info(" * 0x%08lx..0x%08lx : highmem PTEs\n",
PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP));
#endif /* CONFIG_HIGHMEM */
pr_info(" * 0x%08lx..0x%08lx : early ioremap\n",
ioremap_bot, ioremap_base);
pr_info(" * 0x%08lx..0x%08lx : vmalloc & ioremap\n",
(unsigned long)VMALLOC_START, VMALLOC_END);
#endif
mem_init_done = 1;
}
#ifndef CONFIG_MMU
int page_is_ram(unsigned long pfn)
{
return __range_ok(pfn, 0);
}
#else
int page_is_ram(unsigned long pfn)
{
return pfn < max_low_pfn;
}
/*
* Check for command-line options that affect what MMU_init will do.
*/
static void mm_cmdline_setup(void)
{
unsigned long maxmem = 0;
char *p = cmd_line;
/* Look for mem= option on command line */
p = strstr(cmd_line, "mem=");
if (p) {
p += 4;
maxmem = memparse(p, &p);
if (maxmem && memory_size > maxmem) {
memory_size = maxmem;
memblock.memory.regions[0].size = memory_size;
}
}
}
/*
* MMU_init_hw does the chip-specific initialization of the MMU hardware.
*/
static void __init mmu_init_hw(void)
{
/*
* The Zone Protection Register (ZPR) defines how protection will
* be applied to every page which is a member of a given zone. At
* present, we utilize only two of the zones.
* The zone index bits (of ZSEL) in the PTE are used for software
* indicators, except the LSB. For user access, zone 1 is used,
* for kernel access, zone 0 is used. We set all but zone 1
* to zero, allowing only kernel access as indicated in the PTE.
* For zone 1, we set a 01 binary (a value of 10 will not work)
* to allow user access as indicated in the PTE. This also allows
* kernel access as indicated in the PTE.
*/
__asm__ __volatile__ ("ori r11, r0, 0x10000000;" \
"mts rzpr, r11;"
: : : "r11");
}
/*
* MMU_init sets up the basic memory mappings for the kernel,
* including both RAM and possibly some I/O regions,
* and sets up the page tables and the MMU hardware ready to go.
*/
/* called from head.S */
asmlinkage void __init mmu_init(void)
{
unsigned int kstart, ksize;
if (!memblock.reserved.cnt) {
pr_emerg("Error memory count\n");
machine_restart(NULL);
}
if ((u32) memblock.memory.regions[0].size < 0x400000) {
pr_emerg("Memory must be greater than 4MB\n");
machine_restart(NULL);
}
if ((u32) memblock.memory.regions[0].size < kernel_tlb) {
pr_emerg("Kernel size is greater than memory node\n");
machine_restart(NULL);
}
/* Find main memory where the kernel is */
memory_start = (u32) memblock.memory.regions[0].base;
lowmem_size = memory_size = (u32) memblock.memory.regions[0].size;
if (lowmem_size > CONFIG_LOWMEM_SIZE) {
lowmem_size = CONFIG_LOWMEM_SIZE;
#ifndef CONFIG_HIGHMEM
memory_size = lowmem_size;
#endif
}
mm_cmdline_setup(); /* FIXME parse args from command line - not used */
/*
* Map out the kernel text/data/bss from the available physical
* memory.
*/
kstart = __pa(CONFIG_KERNEL_START); /* kernel start */
/* kernel size */
ksize = PAGE_ALIGN(((u32)_end - (u32)CONFIG_KERNEL_START));
memblock_reserve(kstart, ksize);
#if defined(CONFIG_BLK_DEV_INITRD)
/* Remove the init RAM disk from the available memory. */
if (initrd_start) {
unsigned long size;
size = initrd_end - initrd_start;
memblock_reserve(__virt_to_phys(initrd_start), size);
}
#endif /* CONFIG_BLK_DEV_INITRD */
/* Initialize the MMU hardware */
mmu_init_hw();
/* Map in all of RAM starting at CONFIG_KERNEL_START */
mapin_ram();
/* Extend vmalloc and ioremap area as big as possible */
#ifdef CONFIG_HIGHMEM
ioremap_base = ioremap_bot = PKMAP_BASE;
#else
ioremap_base = ioremap_bot = FIXADDR_START;
#endif
/* Initialize the context management stuff */
mmu_context_init();
/* Shortly after that, the entire linear mapping will be available */
/* This will also cause that unflatten device tree will be allocated
* inside 768MB limit */
memblock_set_current_limit(memory_start + lowmem_size - 1);
}
/* This is only called until mem_init is done. */
void __init *early_get_page(void)
{
void *p;
if (init_bootmem_done) {
p = alloc_bootmem_pages(PAGE_SIZE);
} else {
/*
* Mem start + kernel_tlb -> here is limit
* because of mem mapping from head.S
*/
p = __va(memblock_alloc_base(PAGE_SIZE, PAGE_SIZE,
memory_start + kernel_tlb));
}
return p;
}
#endif /* CONFIG_MMU */
void * __init_refok alloc_maybe_bootmem(size_t size, gfp_t mask)
{
if (mem_init_done)
return kmalloc(size, mask);
else
return alloc_bootmem(size);
}
void * __init_refok zalloc_maybe_bootmem(size_t size, gfp_t mask)
{
void *p;
if (mem_init_done)
p = kzalloc(size, mask);
else {
p = alloc_bootmem(size);
if (p)
memset(p, 0, size);
}
return p;
}
| gpl-2.0 |
Anik1199/android_kernel_mediatek_sprout | net/sunrpc/auth_gss/svcauth_gss.c | 1879 | 47314 | /*
* Neil Brown <neilb@cse.unsw.edu.au>
* J. Bruce Fields <bfields@umich.edu>
* Andy Adamson <andros@umich.edu>
* Dug Song <dugsong@monkey.org>
*
* RPCSEC_GSS server authentication.
* This implements RPCSEC_GSS as defined in rfc2203 (rpcsec_gss) and rfc2078
* (gssapi)
*
* The RPCSEC_GSS involves three stages:
* 1/ context creation
* 2/ data exchange
* 3/ context destruction
*
* Context creation is handled largely by upcalls to user-space.
* In particular, GSS_Accept_sec_context is handled by an upcall
* Data exchange is handled entirely within the kernel
* In particular, GSS_GetMIC, GSS_VerifyMIC, GSS_Seal, GSS_Unseal are in-kernel.
* Context destruction is handled in-kernel
* GSS_Delete_sec_context is in-kernel
*
* Context creation is initiated by a RPCSEC_GSS_INIT request arriving.
* The context handle and gss_token are used as a key into the rpcsec_init cache.
* The content of this cache includes some of the outputs of GSS_Accept_sec_context,
* being major_status, minor_status, context_handle, reply_token.
* These are sent back to the client.
* Sequence window management is handled by the kernel. The window size if currently
* a compile time constant.
*
* When user-space is happy that a context is established, it places an entry
* in the rpcsec_context cache. The key for this cache is the context_handle.
* The content includes:
* uid/gidlist - for determining access rights
* mechanism type
* mechanism specific information, such as a key
*
*/
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/module.h>
#include <linux/pagemap.h>
#include <linux/user_namespace.h>
#include <linux/sunrpc/auth_gss.h>
#include <linux/sunrpc/gss_err.h>
#include <linux/sunrpc/svcauth.h>
#include <linux/sunrpc/svcauth_gss.h>
#include <linux/sunrpc/cache.h>
#include "gss_rpc_upcall.h"
#ifdef RPC_DEBUG
# define RPCDBG_FACILITY RPCDBG_AUTH
#endif
/* The rpcsec_init cache is used for mapping RPCSEC_GSS_{,CONT_}INIT requests
* into replies.
*
* Key is context handle (\x if empty) and gss_token.
* Content is major_status minor_status (integers) context_handle, reply_token.
*
*/
static int netobj_equal(struct xdr_netobj *a, struct xdr_netobj *b)
{
return a->len == b->len && 0 == memcmp(a->data, b->data, a->len);
}
#define RSI_HASHBITS 6
#define RSI_HASHMAX (1<<RSI_HASHBITS)
struct rsi {
struct cache_head h;
struct xdr_netobj in_handle, in_token;
struct xdr_netobj out_handle, out_token;
int major_status, minor_status;
};
static struct rsi *rsi_update(struct cache_detail *cd, struct rsi *new, struct rsi *old);
static struct rsi *rsi_lookup(struct cache_detail *cd, struct rsi *item);
static void rsi_free(struct rsi *rsii)
{
kfree(rsii->in_handle.data);
kfree(rsii->in_token.data);
kfree(rsii->out_handle.data);
kfree(rsii->out_token.data);
}
static void rsi_put(struct kref *ref)
{
struct rsi *rsii = container_of(ref, struct rsi, h.ref);
rsi_free(rsii);
kfree(rsii);
}
static inline int rsi_hash(struct rsi *item)
{
return hash_mem(item->in_handle.data, item->in_handle.len, RSI_HASHBITS)
^ hash_mem(item->in_token.data, item->in_token.len, RSI_HASHBITS);
}
static int rsi_match(struct cache_head *a, struct cache_head *b)
{
struct rsi *item = container_of(a, struct rsi, h);
struct rsi *tmp = container_of(b, struct rsi, h);
return netobj_equal(&item->in_handle, &tmp->in_handle) &&
netobj_equal(&item->in_token, &tmp->in_token);
}
static int dup_to_netobj(struct xdr_netobj *dst, char *src, int len)
{
dst->len = len;
dst->data = (len ? kmemdup(src, len, GFP_KERNEL) : NULL);
if (len && !dst->data)
return -ENOMEM;
return 0;
}
static inline int dup_netobj(struct xdr_netobj *dst, struct xdr_netobj *src)
{
return dup_to_netobj(dst, src->data, src->len);
}
static void rsi_init(struct cache_head *cnew, struct cache_head *citem)
{
struct rsi *new = container_of(cnew, struct rsi, h);
struct rsi *item = container_of(citem, struct rsi, h);
new->out_handle.data = NULL;
new->out_handle.len = 0;
new->out_token.data = NULL;
new->out_token.len = 0;
new->in_handle.len = item->in_handle.len;
item->in_handle.len = 0;
new->in_token.len = item->in_token.len;
item->in_token.len = 0;
new->in_handle.data = item->in_handle.data;
item->in_handle.data = NULL;
new->in_token.data = item->in_token.data;
item->in_token.data = NULL;
}
static void update_rsi(struct cache_head *cnew, struct cache_head *citem)
{
struct rsi *new = container_of(cnew, struct rsi, h);
struct rsi *item = container_of(citem, struct rsi, h);
BUG_ON(new->out_handle.data || new->out_token.data);
new->out_handle.len = item->out_handle.len;
item->out_handle.len = 0;
new->out_token.len = item->out_token.len;
item->out_token.len = 0;
new->out_handle.data = item->out_handle.data;
item->out_handle.data = NULL;
new->out_token.data = item->out_token.data;
item->out_token.data = NULL;
new->major_status = item->major_status;
new->minor_status = item->minor_status;
}
static struct cache_head *rsi_alloc(void)
{
struct rsi *rsii = kmalloc(sizeof(*rsii), GFP_KERNEL);
if (rsii)
return &rsii->h;
else
return NULL;
}
static void rsi_request(struct cache_detail *cd,
struct cache_head *h,
char **bpp, int *blen)
{
struct rsi *rsii = container_of(h, struct rsi, h);
qword_addhex(bpp, blen, rsii->in_handle.data, rsii->in_handle.len);
qword_addhex(bpp, blen, rsii->in_token.data, rsii->in_token.len);
(*bpp)[-1] = '\n';
}
static int rsi_parse(struct cache_detail *cd,
char *mesg, int mlen)
{
/* context token expiry major minor context token */
char *buf = mesg;
char *ep;
int len;
struct rsi rsii, *rsip = NULL;
time_t expiry;
int status = -EINVAL;
memset(&rsii, 0, sizeof(rsii));
/* handle */
len = qword_get(&mesg, buf, mlen);
if (len < 0)
goto out;
status = -ENOMEM;
if (dup_to_netobj(&rsii.in_handle, buf, len))
goto out;
/* token */
len = qword_get(&mesg, buf, mlen);
status = -EINVAL;
if (len < 0)
goto out;
status = -ENOMEM;
if (dup_to_netobj(&rsii.in_token, buf, len))
goto out;
rsip = rsi_lookup(cd, &rsii);
if (!rsip)
goto out;
rsii.h.flags = 0;
/* expiry */
expiry = get_expiry(&mesg);
status = -EINVAL;
if (expiry == 0)
goto out;
/* major/minor */
len = qword_get(&mesg, buf, mlen);
if (len <= 0)
goto out;
rsii.major_status = simple_strtoul(buf, &ep, 10);
if (*ep)
goto out;
len = qword_get(&mesg, buf, mlen);
if (len <= 0)
goto out;
rsii.minor_status = simple_strtoul(buf, &ep, 10);
if (*ep)
goto out;
/* out_handle */
len = qword_get(&mesg, buf, mlen);
if (len < 0)
goto out;
status = -ENOMEM;
if (dup_to_netobj(&rsii.out_handle, buf, len))
goto out;
/* out_token */
len = qword_get(&mesg, buf, mlen);
status = -EINVAL;
if (len < 0)
goto out;
status = -ENOMEM;
if (dup_to_netobj(&rsii.out_token, buf, len))
goto out;
rsii.h.expiry_time = expiry;
rsip = rsi_update(cd, &rsii, rsip);
status = 0;
out:
rsi_free(&rsii);
if (rsip)
cache_put(&rsip->h, cd);
else
status = -ENOMEM;
return status;
}
static struct cache_detail rsi_cache_template = {
.owner = THIS_MODULE,
.hash_size = RSI_HASHMAX,
.name = "auth.rpcsec.init",
.cache_put = rsi_put,
.cache_request = rsi_request,
.cache_parse = rsi_parse,
.match = rsi_match,
.init = rsi_init,
.update = update_rsi,
.alloc = rsi_alloc,
};
static struct rsi *rsi_lookup(struct cache_detail *cd, struct rsi *item)
{
struct cache_head *ch;
int hash = rsi_hash(item);
ch = sunrpc_cache_lookup(cd, &item->h, hash);
if (ch)
return container_of(ch, struct rsi, h);
else
return NULL;
}
static struct rsi *rsi_update(struct cache_detail *cd, struct rsi *new, struct rsi *old)
{
struct cache_head *ch;
int hash = rsi_hash(new);
ch = sunrpc_cache_update(cd, &new->h,
&old->h, hash);
if (ch)
return container_of(ch, struct rsi, h);
else
return NULL;
}
/*
* The rpcsec_context cache is used to store a context that is
* used in data exchange.
* The key is a context handle. The content is:
* uid, gidlist, mechanism, service-set, mech-specific-data
*/
#define RSC_HASHBITS 10
#define RSC_HASHMAX (1<<RSC_HASHBITS)
#define GSS_SEQ_WIN 128
struct gss_svc_seq_data {
/* highest seq number seen so far: */
int sd_max;
/* for i such that sd_max-GSS_SEQ_WIN < i <= sd_max, the i-th bit of
* sd_win is nonzero iff sequence number i has been seen already: */
unsigned long sd_win[GSS_SEQ_WIN/BITS_PER_LONG];
spinlock_t sd_lock;
};
struct rsc {
struct cache_head h;
struct xdr_netobj handle;
struct svc_cred cred;
struct gss_svc_seq_data seqdata;
struct gss_ctx *mechctx;
};
static struct rsc *rsc_update(struct cache_detail *cd, struct rsc *new, struct rsc *old);
static struct rsc *rsc_lookup(struct cache_detail *cd, struct rsc *item);
static void rsc_free(struct rsc *rsci)
{
kfree(rsci->handle.data);
if (rsci->mechctx)
gss_delete_sec_context(&rsci->mechctx);
free_svc_cred(&rsci->cred);
}
static void rsc_put(struct kref *ref)
{
struct rsc *rsci = container_of(ref, struct rsc, h.ref);
rsc_free(rsci);
kfree(rsci);
}
static inline int
rsc_hash(struct rsc *rsci)
{
return hash_mem(rsci->handle.data, rsci->handle.len, RSC_HASHBITS);
}
static int
rsc_match(struct cache_head *a, struct cache_head *b)
{
struct rsc *new = container_of(a, struct rsc, h);
struct rsc *tmp = container_of(b, struct rsc, h);
return netobj_equal(&new->handle, &tmp->handle);
}
static void
rsc_init(struct cache_head *cnew, struct cache_head *ctmp)
{
struct rsc *new = container_of(cnew, struct rsc, h);
struct rsc *tmp = container_of(ctmp, struct rsc, h);
new->handle.len = tmp->handle.len;
tmp->handle.len = 0;
new->handle.data = tmp->handle.data;
tmp->handle.data = NULL;
new->mechctx = NULL;
new->cred.cr_group_info = NULL;
new->cred.cr_principal = NULL;
}
static void
update_rsc(struct cache_head *cnew, struct cache_head *ctmp)
{
struct rsc *new = container_of(cnew, struct rsc, h);
struct rsc *tmp = container_of(ctmp, struct rsc, h);
new->mechctx = tmp->mechctx;
tmp->mechctx = NULL;
memset(&new->seqdata, 0, sizeof(new->seqdata));
spin_lock_init(&new->seqdata.sd_lock);
new->cred = tmp->cred;
tmp->cred.cr_group_info = NULL;
new->cred.cr_principal = tmp->cred.cr_principal;
tmp->cred.cr_principal = NULL;
}
static struct cache_head *
rsc_alloc(void)
{
struct rsc *rsci = kmalloc(sizeof(*rsci), GFP_KERNEL);
if (rsci)
return &rsci->h;
else
return NULL;
}
static int rsc_parse(struct cache_detail *cd,
char *mesg, int mlen)
{
/* contexthandle expiry [ uid gid N <n gids> mechname ...mechdata... ] */
char *buf = mesg;
int id;
int len, rv;
struct rsc rsci, *rscp = NULL;
time_t expiry;
int status = -EINVAL;
struct gss_api_mech *gm = NULL;
memset(&rsci, 0, sizeof(rsci));
/* context handle */
len = qword_get(&mesg, buf, mlen);
if (len < 0) goto out;
status = -ENOMEM;
if (dup_to_netobj(&rsci.handle, buf, len))
goto out;
rsci.h.flags = 0;
/* expiry */
expiry = get_expiry(&mesg);
status = -EINVAL;
if (expiry == 0)
goto out;
rscp = rsc_lookup(cd, &rsci);
if (!rscp)
goto out;
/* uid, or NEGATIVE */
rv = get_int(&mesg, &id);
if (rv == -EINVAL)
goto out;
if (rv == -ENOENT)
set_bit(CACHE_NEGATIVE, &rsci.h.flags);
else {
int N, i;
/*
* NOTE: we skip uid_valid()/gid_valid() checks here:
* instead, * -1 id's are later mapped to the
* (export-specific) anonymous id by nfsd_setuser.
*
* (But supplementary gid's get no such special
* treatment so are checked for validity here.)
*/
/* uid */
rsci.cred.cr_uid = make_kuid(&init_user_ns, id);
/* gid */
if (get_int(&mesg, &id))
goto out;
rsci.cred.cr_gid = make_kgid(&init_user_ns, id);
/* number of additional gid's */
if (get_int(&mesg, &N))
goto out;
status = -ENOMEM;
rsci.cred.cr_group_info = groups_alloc(N);
if (rsci.cred.cr_group_info == NULL)
goto out;
/* gid's */
status = -EINVAL;
for (i=0; i<N; i++) {
kgid_t kgid;
if (get_int(&mesg, &id))
goto out;
kgid = make_kgid(&init_user_ns, id);
if (!gid_valid(kgid))
goto out;
GROUP_AT(rsci.cred.cr_group_info, i) = kgid;
}
/* mech name */
len = qword_get(&mesg, buf, mlen);
if (len < 0)
goto out;
gm = gss_mech_get_by_name(buf);
status = -EOPNOTSUPP;
if (!gm)
goto out;
status = -EINVAL;
/* mech-specific data: */
len = qword_get(&mesg, buf, mlen);
if (len < 0)
goto out;
status = gss_import_sec_context(buf, len, gm, &rsci.mechctx,
NULL, GFP_KERNEL);
if (status)
goto out;
/* get client name */
len = qword_get(&mesg, buf, mlen);
if (len > 0) {
rsci.cred.cr_principal = kstrdup(buf, GFP_KERNEL);
if (!rsci.cred.cr_principal) {
status = -ENOMEM;
goto out;
}
}
}
rsci.h.expiry_time = expiry;
rscp = rsc_update(cd, &rsci, rscp);
status = 0;
out:
gss_mech_put(gm);
rsc_free(&rsci);
if (rscp)
cache_put(&rscp->h, cd);
else
status = -ENOMEM;
return status;
}
static struct cache_detail rsc_cache_template = {
.owner = THIS_MODULE,
.hash_size = RSC_HASHMAX,
.name = "auth.rpcsec.context",
.cache_put = rsc_put,
.cache_parse = rsc_parse,
.match = rsc_match,
.init = rsc_init,
.update = update_rsc,
.alloc = rsc_alloc,
};
static struct rsc *rsc_lookup(struct cache_detail *cd, struct rsc *item)
{
struct cache_head *ch;
int hash = rsc_hash(item);
ch = sunrpc_cache_lookup(cd, &item->h, hash);
if (ch)
return container_of(ch, struct rsc, h);
else
return NULL;
}
static struct rsc *rsc_update(struct cache_detail *cd, struct rsc *new, struct rsc *old)
{
struct cache_head *ch;
int hash = rsc_hash(new);
ch = sunrpc_cache_update(cd, &new->h,
&old->h, hash);
if (ch)
return container_of(ch, struct rsc, h);
else
return NULL;
}
static struct rsc *
gss_svc_searchbyctx(struct cache_detail *cd, struct xdr_netobj *handle)
{
struct rsc rsci;
struct rsc *found;
memset(&rsci, 0, sizeof(rsci));
if (dup_to_netobj(&rsci.handle, handle->data, handle->len))
return NULL;
found = rsc_lookup(cd, &rsci);
rsc_free(&rsci);
if (!found)
return NULL;
if (cache_check(cd, &found->h, NULL))
return NULL;
return found;
}
/* Implements sequence number algorithm as specified in RFC 2203. */
static int
gss_check_seq_num(struct rsc *rsci, int seq_num)
{
struct gss_svc_seq_data *sd = &rsci->seqdata;
spin_lock(&sd->sd_lock);
if (seq_num > sd->sd_max) {
if (seq_num >= sd->sd_max + GSS_SEQ_WIN) {
memset(sd->sd_win,0,sizeof(sd->sd_win));
sd->sd_max = seq_num;
} else while (sd->sd_max < seq_num) {
sd->sd_max++;
__clear_bit(sd->sd_max % GSS_SEQ_WIN, sd->sd_win);
}
__set_bit(seq_num % GSS_SEQ_WIN, sd->sd_win);
goto ok;
} else if (seq_num <= sd->sd_max - GSS_SEQ_WIN) {
goto drop;
}
/* sd_max - GSS_SEQ_WIN < seq_num <= sd_max */
if (__test_and_set_bit(seq_num % GSS_SEQ_WIN, sd->sd_win))
goto drop;
ok:
spin_unlock(&sd->sd_lock);
return 1;
drop:
spin_unlock(&sd->sd_lock);
return 0;
}
static inline u32 round_up_to_quad(u32 i)
{
return (i + 3 ) & ~3;
}
static inline int
svc_safe_getnetobj(struct kvec *argv, struct xdr_netobj *o)
{
int l;
if (argv->iov_len < 4)
return -1;
o->len = svc_getnl(argv);
l = round_up_to_quad(o->len);
if (argv->iov_len < l)
return -1;
o->data = argv->iov_base;
argv->iov_base += l;
argv->iov_len -= l;
return 0;
}
static inline int
svc_safe_putnetobj(struct kvec *resv, struct xdr_netobj *o)
{
u8 *p;
if (resv->iov_len + 4 > PAGE_SIZE)
return -1;
svc_putnl(resv, o->len);
p = resv->iov_base + resv->iov_len;
resv->iov_len += round_up_to_quad(o->len);
if (resv->iov_len > PAGE_SIZE)
return -1;
memcpy(p, o->data, o->len);
memset(p + o->len, 0, round_up_to_quad(o->len) - o->len);
return 0;
}
/*
* Verify the checksum on the header and return SVC_OK on success.
* Otherwise, return SVC_DROP (in the case of a bad sequence number)
* or return SVC_DENIED and indicate error in authp.
*/
static int
gss_verify_header(struct svc_rqst *rqstp, struct rsc *rsci,
__be32 *rpcstart, struct rpc_gss_wire_cred *gc, __be32 *authp)
{
struct gss_ctx *ctx_id = rsci->mechctx;
struct xdr_buf rpchdr;
struct xdr_netobj checksum;
u32 flavor = 0;
struct kvec *argv = &rqstp->rq_arg.head[0];
struct kvec iov;
/* data to compute the checksum over: */
iov.iov_base = rpcstart;
iov.iov_len = (u8 *)argv->iov_base - (u8 *)rpcstart;
xdr_buf_from_iov(&iov, &rpchdr);
*authp = rpc_autherr_badverf;
if (argv->iov_len < 4)
return SVC_DENIED;
flavor = svc_getnl(argv);
if (flavor != RPC_AUTH_GSS)
return SVC_DENIED;
if (svc_safe_getnetobj(argv, &checksum))
return SVC_DENIED;
if (rqstp->rq_deferred) /* skip verification of revisited request */
return SVC_OK;
if (gss_verify_mic(ctx_id, &rpchdr, &checksum) != GSS_S_COMPLETE) {
*authp = rpcsec_gsserr_credproblem;
return SVC_DENIED;
}
if (gc->gc_seq > MAXSEQ) {
dprintk("RPC: svcauth_gss: discarding request with "
"large sequence number %d\n", gc->gc_seq);
*authp = rpcsec_gsserr_ctxproblem;
return SVC_DENIED;
}
if (!gss_check_seq_num(rsci, gc->gc_seq)) {
dprintk("RPC: svcauth_gss: discarding request with "
"old sequence number %d\n", gc->gc_seq);
return SVC_DROP;
}
return SVC_OK;
}
static int
gss_write_null_verf(struct svc_rqst *rqstp)
{
__be32 *p;
svc_putnl(rqstp->rq_res.head, RPC_AUTH_NULL);
p = rqstp->rq_res.head->iov_base + rqstp->rq_res.head->iov_len;
/* don't really need to check if head->iov_len > PAGE_SIZE ... */
*p++ = 0;
if (!xdr_ressize_check(rqstp, p))
return -1;
return 0;
}
static int
gss_write_verf(struct svc_rqst *rqstp, struct gss_ctx *ctx_id, u32 seq)
{
__be32 xdr_seq;
u32 maj_stat;
struct xdr_buf verf_data;
struct xdr_netobj mic;
__be32 *p;
struct kvec iov;
svc_putnl(rqstp->rq_res.head, RPC_AUTH_GSS);
xdr_seq = htonl(seq);
iov.iov_base = &xdr_seq;
iov.iov_len = sizeof(xdr_seq);
xdr_buf_from_iov(&iov, &verf_data);
p = rqstp->rq_res.head->iov_base + rqstp->rq_res.head->iov_len;
mic.data = (u8 *)(p + 1);
maj_stat = gss_get_mic(ctx_id, &verf_data, &mic);
if (maj_stat != GSS_S_COMPLETE)
return -1;
*p++ = htonl(mic.len);
memset((u8 *)p + mic.len, 0, round_up_to_quad(mic.len) - mic.len);
p += XDR_QUADLEN(mic.len);
if (!xdr_ressize_check(rqstp, p))
return -1;
return 0;
}
struct gss_domain {
struct auth_domain h;
u32 pseudoflavor;
};
static struct auth_domain *
find_gss_auth_domain(struct gss_ctx *ctx, u32 svc)
{
char *name;
name = gss_service_to_auth_domain_name(ctx->mech_type, svc);
if (!name)
return NULL;
return auth_domain_find(name);
}
static struct auth_ops svcauthops_gss;
u32 svcauth_gss_flavor(struct auth_domain *dom)
{
struct gss_domain *gd = container_of(dom, struct gss_domain, h);
return gd->pseudoflavor;
}
EXPORT_SYMBOL_GPL(svcauth_gss_flavor);
int
svcauth_gss_register_pseudoflavor(u32 pseudoflavor, char * name)
{
struct gss_domain *new;
struct auth_domain *test;
int stat = -ENOMEM;
new = kmalloc(sizeof(*new), GFP_KERNEL);
if (!new)
goto out;
kref_init(&new->h.ref);
new->h.name = kstrdup(name, GFP_KERNEL);
if (!new->h.name)
goto out_free_dom;
new->h.flavour = &svcauthops_gss;
new->pseudoflavor = pseudoflavor;
stat = 0;
test = auth_domain_lookup(name, &new->h);
if (test != &new->h) { /* Duplicate registration */
auth_domain_put(test);
kfree(new->h.name);
goto out_free_dom;
}
return 0;
out_free_dom:
kfree(new);
out:
return stat;
}
EXPORT_SYMBOL_GPL(svcauth_gss_register_pseudoflavor);
static inline int
read_u32_from_xdr_buf(struct xdr_buf *buf, int base, u32 *obj)
{
__be32 raw;
int status;
status = read_bytes_from_xdr_buf(buf, base, &raw, sizeof(*obj));
if (status)
return status;
*obj = ntohl(raw);
return 0;
}
/* It would be nice if this bit of code could be shared with the client.
* Obstacles:
* The client shouldn't malloc(), would have to pass in own memory.
* The server uses base of head iovec as read pointer, while the
* client uses separate pointer. */
static int
unwrap_integ_data(struct svc_rqst *rqstp, struct xdr_buf *buf, u32 seq, struct gss_ctx *ctx)
{
int stat = -EINVAL;
u32 integ_len, maj_stat;
struct xdr_netobj mic;
struct xdr_buf integ_buf;
/* Did we already verify the signature on the original pass through? */
if (rqstp->rq_deferred)
return 0;
integ_len = svc_getnl(&buf->head[0]);
if (integ_len & 3)
return stat;
if (integ_len > buf->len)
return stat;
if (xdr_buf_subsegment(buf, &integ_buf, 0, integ_len))
BUG();
/* copy out mic... */
if (read_u32_from_xdr_buf(buf, integ_len, &mic.len))
BUG();
if (mic.len > RPC_MAX_AUTH_SIZE)
return stat;
mic.data = kmalloc(mic.len, GFP_KERNEL);
if (!mic.data)
return stat;
if (read_bytes_from_xdr_buf(buf, integ_len + 4, mic.data, mic.len))
goto out;
maj_stat = gss_verify_mic(ctx, &integ_buf, &mic);
if (maj_stat != GSS_S_COMPLETE)
goto out;
if (svc_getnl(&buf->head[0]) != seq)
goto out;
/* trim off the mic at the end before returning */
xdr_buf_trim(buf, mic.len + 4);
stat = 0;
out:
kfree(mic.data);
return stat;
}
static inline int
total_buf_len(struct xdr_buf *buf)
{
return buf->head[0].iov_len + buf->page_len + buf->tail[0].iov_len;
}
static void
fix_priv_head(struct xdr_buf *buf, int pad)
{
if (buf->page_len == 0) {
/* We need to adjust head and buf->len in tandem in this
* case to make svc_defer() work--it finds the original
* buffer start using buf->len - buf->head[0].iov_len. */
buf->head[0].iov_len -= pad;
}
}
static int
unwrap_priv_data(struct svc_rqst *rqstp, struct xdr_buf *buf, u32 seq, struct gss_ctx *ctx)
{
u32 priv_len, maj_stat;
int pad, saved_len, remaining_len, offset;
rqstp->rq_splice_ok = 0;
priv_len = svc_getnl(&buf->head[0]);
if (rqstp->rq_deferred) {
/* Already decrypted last time through! The sequence number
* check at out_seq is unnecessary but harmless: */
goto out_seq;
}
/* buf->len is the number of bytes from the original start of the
* request to the end, where head[0].iov_len is just the bytes
* not yet read from the head, so these two values are different: */
remaining_len = total_buf_len(buf);
if (priv_len > remaining_len)
return -EINVAL;
pad = remaining_len - priv_len;
buf->len -= pad;
fix_priv_head(buf, pad);
/* Maybe it would be better to give gss_unwrap a length parameter: */
saved_len = buf->len;
buf->len = priv_len;
maj_stat = gss_unwrap(ctx, 0, buf);
pad = priv_len - buf->len;
buf->len = saved_len;
buf->len -= pad;
/* The upper layers assume the buffer is aligned on 4-byte boundaries.
* In the krb5p case, at least, the data ends up offset, so we need to
* move it around. */
/* XXX: This is very inefficient. It would be better to either do
* this while we encrypt, or maybe in the receive code, if we can peak
* ahead and work out the service and mechanism there. */
offset = buf->head[0].iov_len % 4;
if (offset) {
buf->buflen = RPCSVC_MAXPAYLOAD;
xdr_shift_buf(buf, offset);
fix_priv_head(buf, pad);
}
if (maj_stat != GSS_S_COMPLETE)
return -EINVAL;
out_seq:
if (svc_getnl(&buf->head[0]) != seq)
return -EINVAL;
return 0;
}
struct gss_svc_data {
/* decoded gss client cred: */
struct rpc_gss_wire_cred clcred;
/* save a pointer to the beginning of the encoded verifier,
* for use in encryption/checksumming in svcauth_gss_release: */
__be32 *verf_start;
struct rsc *rsci;
};
static int
svcauth_gss_set_client(struct svc_rqst *rqstp)
{
struct gss_svc_data *svcdata = rqstp->rq_auth_data;
struct rsc *rsci = svcdata->rsci;
struct rpc_gss_wire_cred *gc = &svcdata->clcred;
int stat;
/*
* A gss export can be specified either by:
* export *(sec=krb5,rw)
* or by
* export gss/krb5(rw)
* The latter is deprecated; but for backwards compatibility reasons
* the nfsd code will still fall back on trying it if the former
* doesn't work; so we try to make both available to nfsd, below.
*/
rqstp->rq_gssclient = find_gss_auth_domain(rsci->mechctx, gc->gc_svc);
if (rqstp->rq_gssclient == NULL)
return SVC_DENIED;
stat = svcauth_unix_set_client(rqstp);
if (stat == SVC_DROP || stat == SVC_CLOSE)
return stat;
return SVC_OK;
}
static inline int
gss_write_init_verf(struct cache_detail *cd, struct svc_rqst *rqstp,
struct xdr_netobj *out_handle, int *major_status)
{
struct rsc *rsci;
int rc;
if (*major_status != GSS_S_COMPLETE)
return gss_write_null_verf(rqstp);
rsci = gss_svc_searchbyctx(cd, out_handle);
if (rsci == NULL) {
*major_status = GSS_S_NO_CONTEXT;
return gss_write_null_verf(rqstp);
}
rc = gss_write_verf(rqstp, rsci->mechctx, GSS_SEQ_WIN);
cache_put(&rsci->h, cd);
return rc;
}
static inline int
gss_read_common_verf(struct rpc_gss_wire_cred *gc,
struct kvec *argv, __be32 *authp,
struct xdr_netobj *in_handle)
{
/* Read the verifier; should be NULL: */
*authp = rpc_autherr_badverf;
if (argv->iov_len < 2 * 4)
return SVC_DENIED;
if (svc_getnl(argv) != RPC_AUTH_NULL)
return SVC_DENIED;
if (svc_getnl(argv) != 0)
return SVC_DENIED;
/* Martial context handle and token for upcall: */
*authp = rpc_autherr_badcred;
if (gc->gc_proc == RPC_GSS_PROC_INIT && gc->gc_ctx.len != 0)
return SVC_DENIED;
if (dup_netobj(in_handle, &gc->gc_ctx))
return SVC_CLOSE;
*authp = rpc_autherr_badverf;
return 0;
}
static inline int
gss_read_verf(struct rpc_gss_wire_cred *gc,
struct kvec *argv, __be32 *authp,
struct xdr_netobj *in_handle,
struct xdr_netobj *in_token)
{
struct xdr_netobj tmpobj;
int res;
res = gss_read_common_verf(gc, argv, authp, in_handle);
if (res)
return res;
if (svc_safe_getnetobj(argv, &tmpobj)) {
kfree(in_handle->data);
return SVC_DENIED;
}
if (dup_netobj(in_token, &tmpobj)) {
kfree(in_handle->data);
return SVC_CLOSE;
}
return 0;
}
/* Ok this is really heavily depending on a set of semantics in
* how rqstp is set up by svc_recv and pages laid down by the
* server when reading a request. We are basically guaranteed that
* the token lays all down linearly across a set of pages, starting
* at iov_base in rq_arg.head[0] which happens to be the first of a
* set of pages stored in rq_pages[].
* rq_arg.head[0].iov_base will provide us the page_base to pass
* to the upcall.
*/
static inline int
gss_read_proxy_verf(struct svc_rqst *rqstp,
struct rpc_gss_wire_cred *gc, __be32 *authp,
struct xdr_netobj *in_handle,
struct gssp_in_token *in_token)
{
struct kvec *argv = &rqstp->rq_arg.head[0];
u32 inlen;
int res;
res = gss_read_common_verf(gc, argv, authp, in_handle);
if (res)
return res;
inlen = svc_getnl(argv);
if (inlen > (argv->iov_len + rqstp->rq_arg.page_len))
return SVC_DENIED;
in_token->pages = rqstp->rq_pages;
in_token->page_base = (ulong)argv->iov_base & ~PAGE_MASK;
in_token->page_len = inlen;
return 0;
}
static inline int
gss_write_resv(struct kvec *resv, size_t size_limit,
struct xdr_netobj *out_handle, struct xdr_netobj *out_token,
int major_status, int minor_status)
{
if (resv->iov_len + 4 > size_limit)
return -1;
svc_putnl(resv, RPC_SUCCESS);
if (svc_safe_putnetobj(resv, out_handle))
return -1;
if (resv->iov_len + 3 * 4 > size_limit)
return -1;
svc_putnl(resv, major_status);
svc_putnl(resv, minor_status);
svc_putnl(resv, GSS_SEQ_WIN);
if (svc_safe_putnetobj(resv, out_token))
return -1;
return 0;
}
/*
* Having read the cred already and found we're in the context
* initiation case, read the verifier and initiate (or check the results
* of) upcalls to userspace for help with context initiation. If
* the upcall results are available, write the verifier and result.
* Otherwise, drop the request pending an answer to the upcall.
*/
static int svcauth_gss_legacy_init(struct svc_rqst *rqstp,
struct rpc_gss_wire_cred *gc, __be32 *authp)
{
struct kvec *argv = &rqstp->rq_arg.head[0];
struct kvec *resv = &rqstp->rq_res.head[0];
struct rsi *rsip, rsikey;
int ret;
struct sunrpc_net *sn = net_generic(rqstp->rq_xprt->xpt_net, sunrpc_net_id);
memset(&rsikey, 0, sizeof(rsikey));
ret = gss_read_verf(gc, argv, authp,
&rsikey.in_handle, &rsikey.in_token);
if (ret)
return ret;
/* Perform upcall, or find upcall result: */
rsip = rsi_lookup(sn->rsi_cache, &rsikey);
rsi_free(&rsikey);
if (!rsip)
return SVC_CLOSE;
if (cache_check(sn->rsi_cache, &rsip->h, &rqstp->rq_chandle) < 0)
/* No upcall result: */
return SVC_CLOSE;
ret = SVC_CLOSE;
/* Got an answer to the upcall; use it: */
if (gss_write_init_verf(sn->rsc_cache, rqstp,
&rsip->out_handle, &rsip->major_status))
goto out;
if (gss_write_resv(resv, PAGE_SIZE,
&rsip->out_handle, &rsip->out_token,
rsip->major_status, rsip->minor_status))
goto out;
ret = SVC_COMPLETE;
out:
cache_put(&rsip->h, sn->rsi_cache);
return ret;
}
static int gss_proxy_save_rsc(struct cache_detail *cd,
struct gssp_upcall_data *ud,
uint64_t *handle)
{
struct rsc rsci, *rscp = NULL;
static atomic64_t ctxhctr;
long long ctxh;
struct gss_api_mech *gm = NULL;
time_t expiry;
int status = -EINVAL;
memset(&rsci, 0, sizeof(rsci));
/* context handle */
status = -ENOMEM;
/* the handle needs to be just a unique id,
* use a static counter */
ctxh = atomic64_inc_return(&ctxhctr);
/* make a copy for the caller */
*handle = ctxh;
/* make a copy for the rsc cache */
if (dup_to_netobj(&rsci.handle, (char *)handle, sizeof(uint64_t)))
goto out;
rscp = rsc_lookup(cd, &rsci);
if (!rscp)
goto out;
/* creds */
if (!ud->found_creds) {
/* userspace seem buggy, we should always get at least a
* mapping to nobody */
dprintk("RPC: No creds found, marking Negative!\n");
set_bit(CACHE_NEGATIVE, &rsci.h.flags);
} else {
/* steal creds */
rsci.cred = ud->creds;
memset(&ud->creds, 0, sizeof(struct svc_cred));
status = -EOPNOTSUPP;
/* get mech handle from OID */
gm = gss_mech_get_by_OID(&ud->mech_oid);
if (!gm)
goto out;
status = -EINVAL;
/* mech-specific data: */
status = gss_import_sec_context(ud->out_handle.data,
ud->out_handle.len,
gm, &rsci.mechctx,
&expiry, GFP_KERNEL);
if (status)
goto out;
}
rsci.h.expiry_time = expiry;
rscp = rsc_update(cd, &rsci, rscp);
status = 0;
out:
gss_mech_put(gm);
rsc_free(&rsci);
if (rscp)
cache_put(&rscp->h, cd);
else
status = -ENOMEM;
return status;
}
static int svcauth_gss_proxy_init(struct svc_rqst *rqstp,
struct rpc_gss_wire_cred *gc, __be32 *authp)
{
struct kvec *resv = &rqstp->rq_res.head[0];
struct xdr_netobj cli_handle;
struct gssp_upcall_data ud;
uint64_t handle;
int status;
int ret;
struct net *net = rqstp->rq_xprt->xpt_net;
struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
memset(&ud, 0, sizeof(ud));
ret = gss_read_proxy_verf(rqstp, gc, authp,
&ud.in_handle, &ud.in_token);
if (ret)
return ret;
ret = SVC_CLOSE;
/* Perform synchronous upcall to gss-proxy */
status = gssp_accept_sec_context_upcall(net, &ud);
if (status)
goto out;
dprintk("RPC: svcauth_gss: gss major status = %d\n",
ud.major_status);
switch (ud.major_status) {
case GSS_S_CONTINUE_NEEDED:
cli_handle = ud.out_handle;
break;
case GSS_S_COMPLETE:
status = gss_proxy_save_rsc(sn->rsc_cache, &ud, &handle);
if (status)
goto out;
cli_handle.data = (u8 *)&handle;
cli_handle.len = sizeof(handle);
break;
default:
ret = SVC_CLOSE;
goto out;
}
/* Got an answer to the upcall; use it: */
if (gss_write_init_verf(sn->rsc_cache, rqstp,
&cli_handle, &ud.major_status))
goto out;
if (gss_write_resv(resv, PAGE_SIZE,
&cli_handle, &ud.out_token,
ud.major_status, ud.minor_status))
goto out;
ret = SVC_COMPLETE;
out:
gssp_free_upcall_data(&ud);
return ret;
}
DEFINE_SPINLOCK(use_gssp_lock);
static bool use_gss_proxy(struct net *net)
{
struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
if (sn->use_gss_proxy != -1)
return sn->use_gss_proxy;
spin_lock(&use_gssp_lock);
/*
* If you wanted gss-proxy, you should have said so before
* starting to accept requests:
*/
sn->use_gss_proxy = 0;
spin_unlock(&use_gssp_lock);
return 0;
}
#ifdef CONFIG_PROC_FS
static int set_gss_proxy(struct net *net, int type)
{
struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
int ret = 0;
WARN_ON_ONCE(type != 0 && type != 1);
spin_lock(&use_gssp_lock);
if (sn->use_gss_proxy == -1 || sn->use_gss_proxy == type)
sn->use_gss_proxy = type;
else
ret = -EBUSY;
spin_unlock(&use_gssp_lock);
wake_up(&sn->gssp_wq);
return ret;
}
static inline bool gssp_ready(struct sunrpc_net *sn)
{
switch (sn->use_gss_proxy) {
case -1:
return false;
case 0:
return true;
case 1:
return sn->gssp_clnt;
}
WARN_ON_ONCE(1);
return false;
}
static int wait_for_gss_proxy(struct net *net, struct file *file)
{
struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
if (file->f_flags & O_NONBLOCK && !gssp_ready(sn))
return -EAGAIN;
return wait_event_interruptible(sn->gssp_wq, gssp_ready(sn));
}
static ssize_t write_gssp(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
struct net *net = PDE_DATA(file->f_path.dentry->d_inode);
char tbuf[20];
unsigned long i;
int res;
if (*ppos || count > sizeof(tbuf)-1)
return -EINVAL;
if (copy_from_user(tbuf, buf, count))
return -EFAULT;
tbuf[count] = 0;
res = kstrtoul(tbuf, 0, &i);
if (res)
return res;
if (i != 1)
return -EINVAL;
res = set_gss_proxy(net, 1);
if (res)
return res;
res = set_gssp_clnt(net);
if (res)
return res;
return count;
}
static ssize_t read_gssp(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
struct net *net = PDE_DATA(file->f_path.dentry->d_inode);
unsigned long p = *ppos;
char tbuf[10];
size_t len;
int ret;
ret = wait_for_gss_proxy(net, file);
if (ret)
return ret;
snprintf(tbuf, sizeof(tbuf), "%d\n", use_gss_proxy(net));
len = strlen(tbuf);
if (p >= len)
return 0;
len -= p;
if (len > count)
len = count;
if (copy_to_user(buf, (void *)(tbuf+p), len))
return -EFAULT;
*ppos += len;
return len;
}
static const struct file_operations use_gss_proxy_ops = {
.open = nonseekable_open,
.write = write_gssp,
.read = read_gssp,
};
static int create_use_gss_proxy_proc_entry(struct net *net)
{
struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
struct proc_dir_entry **p = &sn->use_gssp_proc;
sn->use_gss_proxy = -1;
*p = proc_create_data("use-gss-proxy", S_IFREG|S_IRUSR|S_IWUSR,
sn->proc_net_rpc,
&use_gss_proxy_ops, net);
if (!*p)
return -ENOMEM;
init_gssp_clnt(sn);
return 0;
}
static void destroy_use_gss_proxy_proc_entry(struct net *net)
{
struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
if (sn->use_gssp_proc) {
remove_proc_entry("use-gss-proxy", sn->proc_net_rpc);
clear_gssp_clnt(sn);
}
}
#else /* CONFIG_PROC_FS */
static int create_use_gss_proxy_proc_entry(struct net *net)
{
return 0;
}
static void destroy_use_gss_proxy_proc_entry(struct net *net) {}
#endif /* CONFIG_PROC_FS */
/*
* Accept an rpcsec packet.
* If context establishment, punt to user space
* If data exchange, verify/decrypt
* If context destruction, handle here
* In the context establishment and destruction case we encode
* response here and return SVC_COMPLETE.
*/
static int
svcauth_gss_accept(struct svc_rqst *rqstp, __be32 *authp)
{
struct kvec *argv = &rqstp->rq_arg.head[0];
struct kvec *resv = &rqstp->rq_res.head[0];
u32 crlen;
struct gss_svc_data *svcdata = rqstp->rq_auth_data;
struct rpc_gss_wire_cred *gc;
struct rsc *rsci = NULL;
__be32 *rpcstart;
__be32 *reject_stat = resv->iov_base + resv->iov_len;
int ret;
struct sunrpc_net *sn = net_generic(rqstp->rq_xprt->xpt_net, sunrpc_net_id);
dprintk("RPC: svcauth_gss: argv->iov_len = %zd\n",
argv->iov_len);
*authp = rpc_autherr_badcred;
if (!svcdata)
svcdata = kmalloc(sizeof(*svcdata), GFP_KERNEL);
if (!svcdata)
goto auth_err;
rqstp->rq_auth_data = svcdata;
svcdata->verf_start = NULL;
svcdata->rsci = NULL;
gc = &svcdata->clcred;
/* start of rpc packet is 7 u32's back from here:
* xid direction rpcversion prog vers proc flavour
*/
rpcstart = argv->iov_base;
rpcstart -= 7;
/* credential is:
* version(==1), proc(0,1,2,3), seq, service (1,2,3), handle
* at least 5 u32s, and is preceded by length, so that makes 6.
*/
if (argv->iov_len < 5 * 4)
goto auth_err;
crlen = svc_getnl(argv);
if (svc_getnl(argv) != RPC_GSS_VERSION)
goto auth_err;
gc->gc_proc = svc_getnl(argv);
gc->gc_seq = svc_getnl(argv);
gc->gc_svc = svc_getnl(argv);
if (svc_safe_getnetobj(argv, &gc->gc_ctx))
goto auth_err;
if (crlen != round_up_to_quad(gc->gc_ctx.len) + 5 * 4)
goto auth_err;
if ((gc->gc_proc != RPC_GSS_PROC_DATA) && (rqstp->rq_proc != 0))
goto auth_err;
*authp = rpc_autherr_badverf;
switch (gc->gc_proc) {
case RPC_GSS_PROC_INIT:
case RPC_GSS_PROC_CONTINUE_INIT:
if (use_gss_proxy(SVC_NET(rqstp)))
return svcauth_gss_proxy_init(rqstp, gc, authp);
else
return svcauth_gss_legacy_init(rqstp, gc, authp);
case RPC_GSS_PROC_DATA:
case RPC_GSS_PROC_DESTROY:
/* Look up the context, and check the verifier: */
*authp = rpcsec_gsserr_credproblem;
rsci = gss_svc_searchbyctx(sn->rsc_cache, &gc->gc_ctx);
if (!rsci)
goto auth_err;
switch (gss_verify_header(rqstp, rsci, rpcstart, gc, authp)) {
case SVC_OK:
break;
case SVC_DENIED:
goto auth_err;
case SVC_DROP:
goto drop;
}
break;
default:
*authp = rpc_autherr_rejectedcred;
goto auth_err;
}
/* now act upon the command: */
switch (gc->gc_proc) {
case RPC_GSS_PROC_DESTROY:
if (gss_write_verf(rqstp, rsci->mechctx, gc->gc_seq))
goto auth_err;
rsci->h.expiry_time = get_seconds();
set_bit(CACHE_NEGATIVE, &rsci->h.flags);
if (resv->iov_len + 4 > PAGE_SIZE)
goto drop;
svc_putnl(resv, RPC_SUCCESS);
goto complete;
case RPC_GSS_PROC_DATA:
*authp = rpcsec_gsserr_ctxproblem;
svcdata->verf_start = resv->iov_base + resv->iov_len;
if (gss_write_verf(rqstp, rsci->mechctx, gc->gc_seq))
goto auth_err;
rqstp->rq_cred = rsci->cred;
get_group_info(rsci->cred.cr_group_info);
*authp = rpc_autherr_badcred;
switch (gc->gc_svc) {
case RPC_GSS_SVC_NONE:
break;
case RPC_GSS_SVC_INTEGRITY:
/* placeholders for length and seq. number: */
svc_putnl(resv, 0);
svc_putnl(resv, 0);
if (unwrap_integ_data(rqstp, &rqstp->rq_arg,
gc->gc_seq, rsci->mechctx))
goto garbage_args;
break;
case RPC_GSS_SVC_PRIVACY:
/* placeholders for length and seq. number: */
svc_putnl(resv, 0);
svc_putnl(resv, 0);
if (unwrap_priv_data(rqstp, &rqstp->rq_arg,
gc->gc_seq, rsci->mechctx))
goto garbage_args;
break;
default:
goto auth_err;
}
svcdata->rsci = rsci;
cache_get(&rsci->h);
rqstp->rq_cred.cr_flavor = gss_svc_to_pseudoflavor(
rsci->mechctx->mech_type,
GSS_C_QOP_DEFAULT,
gc->gc_svc);
ret = SVC_OK;
goto out;
}
garbage_args:
ret = SVC_GARBAGE;
goto out;
auth_err:
/* Restore write pointer to its original value: */
xdr_ressize_check(rqstp, reject_stat);
ret = SVC_DENIED;
goto out;
complete:
ret = SVC_COMPLETE;
goto out;
drop:
ret = SVC_DROP;
out:
if (rsci)
cache_put(&rsci->h, sn->rsc_cache);
return ret;
}
static __be32 *
svcauth_gss_prepare_to_wrap(struct xdr_buf *resbuf, struct gss_svc_data *gsd)
{
__be32 *p;
u32 verf_len;
p = gsd->verf_start;
gsd->verf_start = NULL;
/* If the reply stat is nonzero, don't wrap: */
if (*(p-1) != rpc_success)
return NULL;
/* Skip the verifier: */
p += 1;
verf_len = ntohl(*p++);
p += XDR_QUADLEN(verf_len);
/* move accept_stat to right place: */
memcpy(p, p + 2, 4);
/* Also don't wrap if the accept stat is nonzero: */
if (*p != rpc_success) {
resbuf->head[0].iov_len -= 2 * 4;
return NULL;
}
p++;
return p;
}
static inline int
svcauth_gss_wrap_resp_integ(struct svc_rqst *rqstp)
{
struct gss_svc_data *gsd = (struct gss_svc_data *)rqstp->rq_auth_data;
struct rpc_gss_wire_cred *gc = &gsd->clcred;
struct xdr_buf *resbuf = &rqstp->rq_res;
struct xdr_buf integ_buf;
struct xdr_netobj mic;
struct kvec *resv;
__be32 *p;
int integ_offset, integ_len;
int stat = -EINVAL;
p = svcauth_gss_prepare_to_wrap(resbuf, gsd);
if (p == NULL)
goto out;
integ_offset = (u8 *)(p + 1) - (u8 *)resbuf->head[0].iov_base;
integ_len = resbuf->len - integ_offset;
BUG_ON(integ_len % 4);
*p++ = htonl(integ_len);
*p++ = htonl(gc->gc_seq);
if (xdr_buf_subsegment(resbuf, &integ_buf, integ_offset,
integ_len))
BUG();
if (resbuf->tail[0].iov_base == NULL) {
if (resbuf->head[0].iov_len + RPC_MAX_AUTH_SIZE > PAGE_SIZE)
goto out_err;
resbuf->tail[0].iov_base = resbuf->head[0].iov_base
+ resbuf->head[0].iov_len;
resbuf->tail[0].iov_len = 0;
resv = &resbuf->tail[0];
} else {
resv = &resbuf->tail[0];
}
mic.data = (u8 *)resv->iov_base + resv->iov_len + 4;
if (gss_get_mic(gsd->rsci->mechctx, &integ_buf, &mic))
goto out_err;
svc_putnl(resv, mic.len);
memset(mic.data + mic.len, 0,
round_up_to_quad(mic.len) - mic.len);
resv->iov_len += XDR_QUADLEN(mic.len) << 2;
/* not strictly required: */
resbuf->len += XDR_QUADLEN(mic.len) << 2;
BUG_ON(resv->iov_len > PAGE_SIZE);
out:
stat = 0;
out_err:
return stat;
}
static inline int
svcauth_gss_wrap_resp_priv(struct svc_rqst *rqstp)
{
struct gss_svc_data *gsd = (struct gss_svc_data *)rqstp->rq_auth_data;
struct rpc_gss_wire_cred *gc = &gsd->clcred;
struct xdr_buf *resbuf = &rqstp->rq_res;
struct page **inpages = NULL;
__be32 *p, *len;
int offset;
int pad;
p = svcauth_gss_prepare_to_wrap(resbuf, gsd);
if (p == NULL)
return 0;
len = p++;
offset = (u8 *)p - (u8 *)resbuf->head[0].iov_base;
*p++ = htonl(gc->gc_seq);
inpages = resbuf->pages;
/* XXX: Would be better to write some xdr helper functions for
* nfs{2,3,4}xdr.c that place the data right, instead of copying: */
/*
* If there is currently tail data, make sure there is
* room for the head, tail, and 2 * RPC_MAX_AUTH_SIZE in
* the page, and move the current tail data such that
* there is RPC_MAX_AUTH_SIZE slack space available in
* both the head and tail.
*/
if (resbuf->tail[0].iov_base) {
BUG_ON(resbuf->tail[0].iov_base >= resbuf->head[0].iov_base
+ PAGE_SIZE);
BUG_ON(resbuf->tail[0].iov_base < resbuf->head[0].iov_base);
if (resbuf->tail[0].iov_len + resbuf->head[0].iov_len
+ 2 * RPC_MAX_AUTH_SIZE > PAGE_SIZE)
return -ENOMEM;
memmove(resbuf->tail[0].iov_base + RPC_MAX_AUTH_SIZE,
resbuf->tail[0].iov_base,
resbuf->tail[0].iov_len);
resbuf->tail[0].iov_base += RPC_MAX_AUTH_SIZE;
}
/*
* If there is no current tail data, make sure there is
* room for the head data, and 2 * RPC_MAX_AUTH_SIZE in the
* allotted page, and set up tail information such that there
* is RPC_MAX_AUTH_SIZE slack space available in both the
* head and tail.
*/
if (resbuf->tail[0].iov_base == NULL) {
if (resbuf->head[0].iov_len + 2*RPC_MAX_AUTH_SIZE > PAGE_SIZE)
return -ENOMEM;
resbuf->tail[0].iov_base = resbuf->head[0].iov_base
+ resbuf->head[0].iov_len + RPC_MAX_AUTH_SIZE;
resbuf->tail[0].iov_len = 0;
}
if (gss_wrap(gsd->rsci->mechctx, offset, resbuf, inpages))
return -ENOMEM;
*len = htonl(resbuf->len - offset);
pad = 3 - ((resbuf->len - offset - 1)&3);
p = (__be32 *)(resbuf->tail[0].iov_base + resbuf->tail[0].iov_len);
memset(p, 0, pad);
resbuf->tail[0].iov_len += pad;
resbuf->len += pad;
return 0;
}
static int
svcauth_gss_release(struct svc_rqst *rqstp)
{
struct gss_svc_data *gsd = (struct gss_svc_data *)rqstp->rq_auth_data;
struct rpc_gss_wire_cred *gc = &gsd->clcred;
struct xdr_buf *resbuf = &rqstp->rq_res;
int stat = -EINVAL;
struct sunrpc_net *sn = net_generic(rqstp->rq_xprt->xpt_net, sunrpc_net_id);
if (gc->gc_proc != RPC_GSS_PROC_DATA)
goto out;
/* Release can be called twice, but we only wrap once. */
if (gsd->verf_start == NULL)
goto out;
/* normally not set till svc_send, but we need it here: */
/* XXX: what for? Do we mess it up the moment we call svc_putu32
* or whatever? */
resbuf->len = total_buf_len(resbuf);
switch (gc->gc_svc) {
case RPC_GSS_SVC_NONE:
break;
case RPC_GSS_SVC_INTEGRITY:
stat = svcauth_gss_wrap_resp_integ(rqstp);
if (stat)
goto out_err;
break;
case RPC_GSS_SVC_PRIVACY:
stat = svcauth_gss_wrap_resp_priv(rqstp);
if (stat)
goto out_err;
break;
/*
* For any other gc_svc value, svcauth_gss_accept() already set
* the auth_error appropriately; just fall through:
*/
}
out:
stat = 0;
out_err:
if (rqstp->rq_client)
auth_domain_put(rqstp->rq_client);
rqstp->rq_client = NULL;
if (rqstp->rq_gssclient)
auth_domain_put(rqstp->rq_gssclient);
rqstp->rq_gssclient = NULL;
if (rqstp->rq_cred.cr_group_info)
put_group_info(rqstp->rq_cred.cr_group_info);
rqstp->rq_cred.cr_group_info = NULL;
if (gsd->rsci)
cache_put(&gsd->rsci->h, sn->rsc_cache);
gsd->rsci = NULL;
return stat;
}
static void
svcauth_gss_domain_release(struct auth_domain *dom)
{
struct gss_domain *gd = container_of(dom, struct gss_domain, h);
kfree(dom->name);
kfree(gd);
}
static struct auth_ops svcauthops_gss = {
.name = "rpcsec_gss",
.owner = THIS_MODULE,
.flavour = RPC_AUTH_GSS,
.accept = svcauth_gss_accept,
.release = svcauth_gss_release,
.domain_release = svcauth_gss_domain_release,
.set_client = svcauth_gss_set_client,
};
static int rsi_cache_create_net(struct net *net)
{
struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
struct cache_detail *cd;
int err;
cd = cache_create_net(&rsi_cache_template, net);
if (IS_ERR(cd))
return PTR_ERR(cd);
err = cache_register_net(cd, net);
if (err) {
cache_destroy_net(cd, net);
return err;
}
sn->rsi_cache = cd;
return 0;
}
static void rsi_cache_destroy_net(struct net *net)
{
struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
struct cache_detail *cd = sn->rsi_cache;
sn->rsi_cache = NULL;
cache_purge(cd);
cache_unregister_net(cd, net);
cache_destroy_net(cd, net);
}
static int rsc_cache_create_net(struct net *net)
{
struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
struct cache_detail *cd;
int err;
cd = cache_create_net(&rsc_cache_template, net);
if (IS_ERR(cd))
return PTR_ERR(cd);
err = cache_register_net(cd, net);
if (err) {
cache_destroy_net(cd, net);
return err;
}
sn->rsc_cache = cd;
return 0;
}
static void rsc_cache_destroy_net(struct net *net)
{
struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
struct cache_detail *cd = sn->rsc_cache;
sn->rsc_cache = NULL;
cache_purge(cd);
cache_unregister_net(cd, net);
cache_destroy_net(cd, net);
}
int
gss_svc_init_net(struct net *net)
{
int rv;
rv = rsc_cache_create_net(net);
if (rv)
return rv;
rv = rsi_cache_create_net(net);
if (rv)
goto out1;
rv = create_use_gss_proxy_proc_entry(net);
if (rv)
goto out2;
return 0;
out2:
destroy_use_gss_proxy_proc_entry(net);
out1:
rsc_cache_destroy_net(net);
return rv;
}
void
gss_svc_shutdown_net(struct net *net)
{
destroy_use_gss_proxy_proc_entry(net);
rsi_cache_destroy_net(net);
rsc_cache_destroy_net(net);
}
int
gss_svc_init(void)
{
return svc_auth_register(RPC_AUTH_GSS, &svcauthops_gss);
}
void
gss_svc_shutdown(void)
{
svc_auth_unregister(RPC_AUTH_GSS);
}
| gpl-2.0 |
joel-porquet/tsar-linux | drivers/mtd/mtdsuper.c | 2135 | 5592 | /* MTD-based superblock management
*
* Copyright © 2001-2007 Red Hat, Inc. All Rights Reserved.
* Copyright © 2001-2010 David Woodhouse <dwmw2@infradead.org>
*
* Written by: David Howells <dhowells@redhat.com>
* David Woodhouse <dwmw2@infradead.org>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/mtd/super.h>
#include <linux/namei.h>
#include <linux/export.h>
#include <linux/ctype.h>
#include <linux/slab.h>
#include <linux/major.h>
/*
* compare superblocks to see if they're equivalent
* - they are if the underlying MTD device is the same
*/
static int get_sb_mtd_compare(struct super_block *sb, void *_mtd)
{
struct mtd_info *mtd = _mtd;
if (sb->s_mtd == mtd) {
pr_debug("MTDSB: Match on device %d (\"%s\")\n",
mtd->index, mtd->name);
return 1;
}
pr_debug("MTDSB: No match, device %d (\"%s\"), device %d (\"%s\")\n",
sb->s_mtd->index, sb->s_mtd->name, mtd->index, mtd->name);
return 0;
}
/*
* mark the superblock by the MTD device it is using
* - set the device number to be the correct MTD block device for pesuperstence
* of NFS exports
*/
static int get_sb_mtd_set(struct super_block *sb, void *_mtd)
{
struct mtd_info *mtd = _mtd;
sb->s_mtd = mtd;
sb->s_dev = MKDEV(MTD_BLOCK_MAJOR, mtd->index);
sb->s_bdi = mtd->backing_dev_info;
return 0;
}
/*
* get a superblock on an MTD-backed filesystem
*/
static struct dentry *mount_mtd_aux(struct file_system_type *fs_type, int flags,
const char *dev_name, void *data,
struct mtd_info *mtd,
int (*fill_super)(struct super_block *, void *, int))
{
struct super_block *sb;
int ret;
sb = sget(fs_type, get_sb_mtd_compare, get_sb_mtd_set, flags, mtd);
if (IS_ERR(sb))
goto out_error;
if (sb->s_root)
goto already_mounted;
/* fresh new superblock */
pr_debug("MTDSB: New superblock for device %d (\"%s\")\n",
mtd->index, mtd->name);
ret = fill_super(sb, data, flags & MS_SILENT ? 1 : 0);
if (ret < 0) {
deactivate_locked_super(sb);
return ERR_PTR(ret);
}
/* go */
sb->s_flags |= MS_ACTIVE;
return dget(sb->s_root);
/* new mountpoint for an already mounted superblock */
already_mounted:
pr_debug("MTDSB: Device %d (\"%s\") is already mounted\n",
mtd->index, mtd->name);
put_mtd_device(mtd);
return dget(sb->s_root);
out_error:
put_mtd_device(mtd);
return ERR_CAST(sb);
}
/*
* get a superblock on an MTD-backed filesystem by MTD device number
*/
static struct dentry *mount_mtd_nr(struct file_system_type *fs_type, int flags,
const char *dev_name, void *data, int mtdnr,
int (*fill_super)(struct super_block *, void *, int))
{
struct mtd_info *mtd;
mtd = get_mtd_device(NULL, mtdnr);
if (IS_ERR(mtd)) {
pr_debug("MTDSB: Device #%u doesn't appear to exist\n", mtdnr);
return ERR_CAST(mtd);
}
return mount_mtd_aux(fs_type, flags, dev_name, data, mtd, fill_super);
}
/*
* set up an MTD-based superblock
*/
struct dentry *mount_mtd(struct file_system_type *fs_type, int flags,
const char *dev_name, void *data,
int (*fill_super)(struct super_block *, void *, int))
{
#ifdef CONFIG_BLOCK
struct block_device *bdev;
int ret, major;
#endif
int mtdnr;
if (!dev_name)
return ERR_PTR(-EINVAL);
pr_debug("MTDSB: dev_name \"%s\"\n", dev_name);
/* the preferred way of mounting in future; especially when
* CONFIG_BLOCK=n - we specify the underlying MTD device by number or
* by name, so that we don't require block device support to be present
* in the kernel. */
if (dev_name[0] == 'm' && dev_name[1] == 't' && dev_name[2] == 'd') {
if (dev_name[3] == ':') {
struct mtd_info *mtd;
/* mount by MTD device name */
pr_debug("MTDSB: mtd:%%s, name \"%s\"\n",
dev_name + 4);
mtd = get_mtd_device_nm(dev_name + 4);
if (!IS_ERR(mtd))
return mount_mtd_aux(
fs_type, flags,
dev_name, data, mtd,
fill_super);
printk(KERN_NOTICE "MTD:"
" MTD device with name \"%s\" not found.\n",
dev_name + 4);
} else if (isdigit(dev_name[3])) {
/* mount by MTD device number name */
char *endptr;
mtdnr = simple_strtoul(dev_name + 3, &endptr, 0);
if (!*endptr) {
/* It was a valid number */
pr_debug("MTDSB: mtd%%d, mtdnr %d\n",
mtdnr);
return mount_mtd_nr(fs_type, flags,
dev_name, data,
mtdnr, fill_super);
}
}
}
#ifdef CONFIG_BLOCK
/* try the old way - the hack where we allowed users to mount
* /dev/mtdblock$(n) but didn't actually _use_ the blockdev
*/
bdev = lookup_bdev(dev_name);
if (IS_ERR(bdev)) {
ret = PTR_ERR(bdev);
pr_debug("MTDSB: lookup_bdev() returned %d\n", ret);
return ERR_PTR(ret);
}
pr_debug("MTDSB: lookup_bdev() returned 0\n");
ret = -EINVAL;
major = MAJOR(bdev->bd_dev);
mtdnr = MINOR(bdev->bd_dev);
bdput(bdev);
if (major != MTD_BLOCK_MAJOR)
goto not_an_MTD_device;
return mount_mtd_nr(fs_type, flags, dev_name, data, mtdnr, fill_super);
not_an_MTD_device:
#endif /* CONFIG_BLOCK */
if (!(flags & MS_SILENT))
printk(KERN_NOTICE
"MTD: Attempt to mount non-MTD device \"%s\"\n",
dev_name);
return ERR_PTR(-EINVAL);
}
EXPORT_SYMBOL_GPL(mount_mtd);
/*
* destroy an MTD-based superblock
*/
void kill_mtd_super(struct super_block *sb)
{
generic_shutdown_super(sb);
put_mtd_device(sb->s_mtd);
sb->s_mtd = NULL;
}
EXPORT_SYMBOL_GPL(kill_mtd_super);
| gpl-2.0 |
TEAM-RAZOR-DEVICES/kernel_asus_moorefield | fs/xfs/xfs_attr_remote.c | 2135 | 15249 | /*
* Copyright (c) 2000-2005 Silicon Graphics, Inc.
* Copyright (c) 2013 Red Hat, Inc.
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it would be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "xfs.h"
#include "xfs_fs.h"
#include "xfs_types.h"
#include "xfs_bit.h"
#include "xfs_log.h"
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_mount.h"
#include "xfs_error.h"
#include "xfs_da_btree.h"
#include "xfs_bmap_btree.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_alloc.h"
#include "xfs_inode_item.h"
#include "xfs_bmap.h"
#include "xfs_attr.h"
#include "xfs_attr_leaf.h"
#include "xfs_attr_remote.h"
#include "xfs_trans_space.h"
#include "xfs_trace.h"
#include "xfs_cksum.h"
#include "xfs_buf_item.h"
#define ATTR_RMTVALUE_MAPSIZE 1 /* # of map entries at once */
/*
* Each contiguous block has a header, so it is not just a simple attribute
* length to FSB conversion.
*/
int
xfs_attr3_rmt_blocks(
struct xfs_mount *mp,
int attrlen)
{
if (xfs_sb_version_hascrc(&mp->m_sb)) {
int buflen = XFS_ATTR3_RMT_BUF_SPACE(mp, mp->m_sb.sb_blocksize);
return (attrlen + buflen - 1) / buflen;
}
return XFS_B_TO_FSB(mp, attrlen);
}
/*
* Checking of the remote attribute header is split into two parts. The verifier
* does CRC, location and bounds checking, the unpacking function checks the
* attribute parameters and owner.
*/
static bool
xfs_attr3_rmt_hdr_ok(
struct xfs_mount *mp,
void *ptr,
xfs_ino_t ino,
uint32_t offset,
uint32_t size,
xfs_daddr_t bno)
{
struct xfs_attr3_rmt_hdr *rmt = ptr;
if (bno != be64_to_cpu(rmt->rm_blkno))
return false;
if (offset != be32_to_cpu(rmt->rm_offset))
return false;
if (size != be32_to_cpu(rmt->rm_bytes))
return false;
if (ino != be64_to_cpu(rmt->rm_owner))
return false;
/* ok */
return true;
}
static bool
xfs_attr3_rmt_verify(
struct xfs_mount *mp,
void *ptr,
int fsbsize,
xfs_daddr_t bno)
{
struct xfs_attr3_rmt_hdr *rmt = ptr;
if (!xfs_sb_version_hascrc(&mp->m_sb))
return false;
if (rmt->rm_magic != cpu_to_be32(XFS_ATTR3_RMT_MAGIC))
return false;
if (!uuid_equal(&rmt->rm_uuid, &mp->m_sb.sb_uuid))
return false;
if (be64_to_cpu(rmt->rm_blkno) != bno)
return false;
if (be32_to_cpu(rmt->rm_bytes) > fsbsize - sizeof(*rmt))
return false;
if (be32_to_cpu(rmt->rm_offset) +
be32_to_cpu(rmt->rm_bytes) >= XATTR_SIZE_MAX)
return false;
if (rmt->rm_owner == 0)
return false;
return true;
}
static void
xfs_attr3_rmt_read_verify(
struct xfs_buf *bp)
{
struct xfs_mount *mp = bp->b_target->bt_mount;
char *ptr;
int len;
bool corrupt = false;
xfs_daddr_t bno;
/* no verification of non-crc buffers */
if (!xfs_sb_version_hascrc(&mp->m_sb))
return;
ptr = bp->b_addr;
bno = bp->b_bn;
len = BBTOB(bp->b_length);
ASSERT(len >= XFS_LBSIZE(mp));
while (len > 0) {
if (!xfs_verify_cksum(ptr, XFS_LBSIZE(mp),
XFS_ATTR3_RMT_CRC_OFF)) {
corrupt = true;
break;
}
if (!xfs_attr3_rmt_verify(mp, ptr, XFS_LBSIZE(mp), bno)) {
corrupt = true;
break;
}
len -= XFS_LBSIZE(mp);
ptr += XFS_LBSIZE(mp);
bno += mp->m_bsize;
}
if (corrupt) {
XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, bp->b_addr);
xfs_buf_ioerror(bp, EFSCORRUPTED);
} else
ASSERT(len == 0);
}
static void
xfs_attr3_rmt_write_verify(
struct xfs_buf *bp)
{
struct xfs_mount *mp = bp->b_target->bt_mount;
struct xfs_buf_log_item *bip = bp->b_fspriv;
char *ptr;
int len;
xfs_daddr_t bno;
/* no verification of non-crc buffers */
if (!xfs_sb_version_hascrc(&mp->m_sb))
return;
ptr = bp->b_addr;
bno = bp->b_bn;
len = BBTOB(bp->b_length);
ASSERT(len >= XFS_LBSIZE(mp));
while (len > 0) {
if (!xfs_attr3_rmt_verify(mp, ptr, XFS_LBSIZE(mp), bno)) {
XFS_CORRUPTION_ERROR(__func__,
XFS_ERRLEVEL_LOW, mp, bp->b_addr);
xfs_buf_ioerror(bp, EFSCORRUPTED);
return;
}
if (bip) {
struct xfs_attr3_rmt_hdr *rmt;
rmt = (struct xfs_attr3_rmt_hdr *)ptr;
rmt->rm_lsn = cpu_to_be64(bip->bli_item.li_lsn);
}
xfs_update_cksum(ptr, XFS_LBSIZE(mp), XFS_ATTR3_RMT_CRC_OFF);
len -= XFS_LBSIZE(mp);
ptr += XFS_LBSIZE(mp);
bno += mp->m_bsize;
}
ASSERT(len == 0);
}
const struct xfs_buf_ops xfs_attr3_rmt_buf_ops = {
.verify_read = xfs_attr3_rmt_read_verify,
.verify_write = xfs_attr3_rmt_write_verify,
};
STATIC int
xfs_attr3_rmt_hdr_set(
struct xfs_mount *mp,
void *ptr,
xfs_ino_t ino,
uint32_t offset,
uint32_t size,
xfs_daddr_t bno)
{
struct xfs_attr3_rmt_hdr *rmt = ptr;
if (!xfs_sb_version_hascrc(&mp->m_sb))
return 0;
rmt->rm_magic = cpu_to_be32(XFS_ATTR3_RMT_MAGIC);
rmt->rm_offset = cpu_to_be32(offset);
rmt->rm_bytes = cpu_to_be32(size);
uuid_copy(&rmt->rm_uuid, &mp->m_sb.sb_uuid);
rmt->rm_owner = cpu_to_be64(ino);
rmt->rm_blkno = cpu_to_be64(bno);
return sizeof(struct xfs_attr3_rmt_hdr);
}
/*
* Helper functions to copy attribute data in and out of the one disk extents
*/
STATIC int
xfs_attr_rmtval_copyout(
struct xfs_mount *mp,
struct xfs_buf *bp,
xfs_ino_t ino,
int *offset,
int *valuelen,
char **dst)
{
char *src = bp->b_addr;
xfs_daddr_t bno = bp->b_bn;
int len = BBTOB(bp->b_length);
ASSERT(len >= XFS_LBSIZE(mp));
while (len > 0 && *valuelen > 0) {
int hdr_size = 0;
int byte_cnt = XFS_ATTR3_RMT_BUF_SPACE(mp, XFS_LBSIZE(mp));
byte_cnt = min_t(int, *valuelen, byte_cnt);
if (xfs_sb_version_hascrc(&mp->m_sb)) {
if (!xfs_attr3_rmt_hdr_ok(mp, src, ino, *offset,
byte_cnt, bno)) {
xfs_alert(mp,
"remote attribute header mismatch bno/off/len/owner (0x%llx/0x%x/Ox%x/0x%llx)",
bno, *offset, byte_cnt, ino);
return EFSCORRUPTED;
}
hdr_size = sizeof(struct xfs_attr3_rmt_hdr);
}
memcpy(*dst, src + hdr_size, byte_cnt);
/* roll buffer forwards */
len -= XFS_LBSIZE(mp);
src += XFS_LBSIZE(mp);
bno += mp->m_bsize;
/* roll attribute data forwards */
*valuelen -= byte_cnt;
*dst += byte_cnt;
*offset += byte_cnt;
}
return 0;
}
STATIC void
xfs_attr_rmtval_copyin(
struct xfs_mount *mp,
struct xfs_buf *bp,
xfs_ino_t ino,
int *offset,
int *valuelen,
char **src)
{
char *dst = bp->b_addr;
xfs_daddr_t bno = bp->b_bn;
int len = BBTOB(bp->b_length);
ASSERT(len >= XFS_LBSIZE(mp));
while (len > 0 && *valuelen > 0) {
int hdr_size;
int byte_cnt = XFS_ATTR3_RMT_BUF_SPACE(mp, XFS_LBSIZE(mp));
byte_cnt = min(*valuelen, byte_cnt);
hdr_size = xfs_attr3_rmt_hdr_set(mp, dst, ino, *offset,
byte_cnt, bno);
memcpy(dst + hdr_size, *src, byte_cnt);
/*
* If this is the last block, zero the remainder of it.
* Check that we are actually the last block, too.
*/
if (byte_cnt + hdr_size < XFS_LBSIZE(mp)) {
ASSERT(*valuelen - byte_cnt == 0);
ASSERT(len == XFS_LBSIZE(mp));
memset(dst + hdr_size + byte_cnt, 0,
XFS_LBSIZE(mp) - hdr_size - byte_cnt);
}
/* roll buffer forwards */
len -= XFS_LBSIZE(mp);
dst += XFS_LBSIZE(mp);
bno += mp->m_bsize;
/* roll attribute data forwards */
*valuelen -= byte_cnt;
*src += byte_cnt;
*offset += byte_cnt;
}
}
/*
* Read the value associated with an attribute from the out-of-line buffer
* that we stored it in.
*/
int
xfs_attr_rmtval_get(
struct xfs_da_args *args)
{
struct xfs_bmbt_irec map[ATTR_RMTVALUE_MAPSIZE];
struct xfs_mount *mp = args->dp->i_mount;
struct xfs_buf *bp;
xfs_dablk_t lblkno = args->rmtblkno;
char *dst = args->value;
int valuelen = args->valuelen;
int nmap;
int error;
int blkcnt = args->rmtblkcnt;
int i;
int offset = 0;
trace_xfs_attr_rmtval_get(args);
ASSERT(!(args->flags & ATTR_KERNOVAL));
while (valuelen > 0) {
nmap = ATTR_RMTVALUE_MAPSIZE;
error = xfs_bmapi_read(args->dp, (xfs_fileoff_t)lblkno,
blkcnt, map, &nmap,
XFS_BMAPI_ATTRFORK);
if (error)
return error;
ASSERT(nmap >= 1);
for (i = 0; (i < nmap) && (valuelen > 0); i++) {
xfs_daddr_t dblkno;
int dblkcnt;
ASSERT((map[i].br_startblock != DELAYSTARTBLOCK) &&
(map[i].br_startblock != HOLESTARTBLOCK));
dblkno = XFS_FSB_TO_DADDR(mp, map[i].br_startblock);
dblkcnt = XFS_FSB_TO_BB(mp, map[i].br_blockcount);
error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
dblkno, dblkcnt, 0, &bp,
&xfs_attr3_rmt_buf_ops);
if (error)
return error;
error = xfs_attr_rmtval_copyout(mp, bp, args->dp->i_ino,
&offset, &valuelen,
&dst);
xfs_buf_relse(bp);
if (error)
return error;
/* roll attribute extent map forwards */
lblkno += map[i].br_blockcount;
blkcnt -= map[i].br_blockcount;
}
}
ASSERT(valuelen == 0);
return 0;
}
/*
* Write the value associated with an attribute into the out-of-line buffer
* that we have defined for it.
*/
int
xfs_attr_rmtval_set(
struct xfs_da_args *args)
{
struct xfs_inode *dp = args->dp;
struct xfs_mount *mp = dp->i_mount;
struct xfs_bmbt_irec map;
xfs_dablk_t lblkno;
xfs_fileoff_t lfileoff = 0;
char *src = args->value;
int blkcnt;
int valuelen;
int nmap;
int error;
int offset = 0;
trace_xfs_attr_rmtval_set(args);
/*
* Find a "hole" in the attribute address space large enough for
* us to drop the new attribute's value into. Because CRC enable
* attributes have headers, we can't just do a straight byte to FSB
* conversion and have to take the header space into account.
*/
blkcnt = xfs_attr3_rmt_blocks(mp, args->valuelen);
error = xfs_bmap_first_unused(args->trans, args->dp, blkcnt, &lfileoff,
XFS_ATTR_FORK);
if (error)
return error;
args->rmtblkno = lblkno = (xfs_dablk_t)lfileoff;
args->rmtblkcnt = blkcnt;
/*
* Roll through the "value", allocating blocks on disk as required.
*/
while (blkcnt > 0) {
int committed;
/*
* Allocate a single extent, up to the size of the value.
*/
xfs_bmap_init(args->flist, args->firstblock);
nmap = 1;
error = xfs_bmapi_write(args->trans, dp, (xfs_fileoff_t)lblkno,
blkcnt,
XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA,
args->firstblock, args->total, &map, &nmap,
args->flist);
if (!error) {
error = xfs_bmap_finish(&args->trans, args->flist,
&committed);
}
if (error) {
ASSERT(committed);
args->trans = NULL;
xfs_bmap_cancel(args->flist);
return(error);
}
/*
* bmap_finish() may have committed the last trans and started
* a new one. We need the inode to be in all transactions.
*/
if (committed)
xfs_trans_ijoin(args->trans, dp, 0);
ASSERT(nmap == 1);
ASSERT((map.br_startblock != DELAYSTARTBLOCK) &&
(map.br_startblock != HOLESTARTBLOCK));
lblkno += map.br_blockcount;
blkcnt -= map.br_blockcount;
/*
* Start the next trans in the chain.
*/
error = xfs_trans_roll(&args->trans, dp);
if (error)
return (error);
}
/*
* Roll through the "value", copying the attribute value to the
* already-allocated blocks. Blocks are written synchronously
* so that we can know they are all on disk before we turn off
* the INCOMPLETE flag.
*/
lblkno = args->rmtblkno;
blkcnt = args->rmtblkcnt;
valuelen = args->valuelen;
while (valuelen > 0) {
struct xfs_buf *bp;
xfs_daddr_t dblkno;
int dblkcnt;
ASSERT(blkcnt > 0);
xfs_bmap_init(args->flist, args->firstblock);
nmap = 1;
error = xfs_bmapi_read(dp, (xfs_fileoff_t)lblkno,
blkcnt, &map, &nmap,
XFS_BMAPI_ATTRFORK);
if (error)
return(error);
ASSERT(nmap == 1);
ASSERT((map.br_startblock != DELAYSTARTBLOCK) &&
(map.br_startblock != HOLESTARTBLOCK));
dblkno = XFS_FSB_TO_DADDR(mp, map.br_startblock),
dblkcnt = XFS_FSB_TO_BB(mp, map.br_blockcount);
bp = xfs_buf_get(mp->m_ddev_targp, dblkno, dblkcnt, 0);
if (!bp)
return ENOMEM;
bp->b_ops = &xfs_attr3_rmt_buf_ops;
xfs_attr_rmtval_copyin(mp, bp, args->dp->i_ino, &offset,
&valuelen, &src);
error = xfs_bwrite(bp); /* GROT: NOTE: synchronous write */
xfs_buf_relse(bp);
if (error)
return error;
/* roll attribute extent map forwards */
lblkno += map.br_blockcount;
blkcnt -= map.br_blockcount;
}
ASSERT(valuelen == 0);
return 0;
}
/*
* Remove the value associated with an attribute by deleting the
* out-of-line buffer that it is stored on.
*/
int
xfs_attr_rmtval_remove(
struct xfs_da_args *args)
{
struct xfs_mount *mp = args->dp->i_mount;
xfs_dablk_t lblkno;
int blkcnt;
int error;
int done;
trace_xfs_attr_rmtval_remove(args);
/*
* Roll through the "value", invalidating the attribute value's blocks.
* Note that args->rmtblkcnt is the minimum number of data blocks we'll
* see for a CRC enabled remote attribute. Each extent will have a
* header, and so we may have more blocks than we realise here. If we
* fail to map the blocks correctly, we'll have problems with the buffer
* lookups.
*/
lblkno = args->rmtblkno;
blkcnt = args->rmtblkcnt;
while (blkcnt > 0) {
struct xfs_bmbt_irec map;
struct xfs_buf *bp;
xfs_daddr_t dblkno;
int dblkcnt;
int nmap;
/*
* Try to remember where we decided to put the value.
*/
nmap = 1;
error = xfs_bmapi_read(args->dp, (xfs_fileoff_t)lblkno,
blkcnt, &map, &nmap, XFS_BMAPI_ATTRFORK);
if (error)
return(error);
ASSERT(nmap == 1);
ASSERT((map.br_startblock != DELAYSTARTBLOCK) &&
(map.br_startblock != HOLESTARTBLOCK));
dblkno = XFS_FSB_TO_DADDR(mp, map.br_startblock),
dblkcnt = XFS_FSB_TO_BB(mp, map.br_blockcount);
/*
* If the "remote" value is in the cache, remove it.
*/
bp = xfs_incore(mp->m_ddev_targp, dblkno, dblkcnt, XBF_TRYLOCK);
if (bp) {
xfs_buf_stale(bp);
xfs_buf_relse(bp);
bp = NULL;
}
lblkno += map.br_blockcount;
blkcnt -= map.br_blockcount;
}
/*
* Keep de-allocating extents until the remote-value region is gone.
*/
lblkno = args->rmtblkno;
blkcnt = args->rmtblkcnt;
done = 0;
while (!done) {
int committed;
xfs_bmap_init(args->flist, args->firstblock);
error = xfs_bunmapi(args->trans, args->dp, lblkno, blkcnt,
XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA,
1, args->firstblock, args->flist,
&done);
if (!error) {
error = xfs_bmap_finish(&args->trans, args->flist,
&committed);
}
if (error) {
ASSERT(committed);
args->trans = NULL;
xfs_bmap_cancel(args->flist);
return error;
}
/*
* bmap_finish() may have committed the last trans and started
* a new one. We need the inode to be in all transactions.
*/
if (committed)
xfs_trans_ijoin(args->trans, args->dp, 0);
/*
* Close out trans and start the next one in the chain.
*/
error = xfs_trans_roll(&args->trans, args->dp);
if (error)
return (error);
}
return(0);
}
| gpl-2.0 |
anryl/shooteruICS | drivers/media/video/rj54n1cb0c.c | 2647 | 37269 | /*
* Driver for RJ54N1CB0C CMOS Image Sensor from Sharp
*
* Copyright (C) 2009, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/delay.h>
#include <linux/i2c.h>
#include <linux/slab.h>
#include <linux/videodev2.h>
#include <media/rj54n1cb0c.h>
#include <media/soc_camera.h>
#include <media/soc_mediabus.h>
#include <media/v4l2-subdev.h>
#include <media/v4l2-chip-ident.h>
#define RJ54N1_DEV_CODE 0x0400
#define RJ54N1_DEV_CODE2 0x0401
#define RJ54N1_OUT_SEL 0x0403
#define RJ54N1_XY_OUTPUT_SIZE_S_H 0x0404
#define RJ54N1_X_OUTPUT_SIZE_S_L 0x0405
#define RJ54N1_Y_OUTPUT_SIZE_S_L 0x0406
#define RJ54N1_XY_OUTPUT_SIZE_P_H 0x0407
#define RJ54N1_X_OUTPUT_SIZE_P_L 0x0408
#define RJ54N1_Y_OUTPUT_SIZE_P_L 0x0409
#define RJ54N1_LINE_LENGTH_PCK_S_H 0x040a
#define RJ54N1_LINE_LENGTH_PCK_S_L 0x040b
#define RJ54N1_LINE_LENGTH_PCK_P_H 0x040c
#define RJ54N1_LINE_LENGTH_PCK_P_L 0x040d
#define RJ54N1_RESIZE_N 0x040e
#define RJ54N1_RESIZE_N_STEP 0x040f
#define RJ54N1_RESIZE_STEP 0x0410
#define RJ54N1_RESIZE_HOLD_H 0x0411
#define RJ54N1_RESIZE_HOLD_L 0x0412
#define RJ54N1_H_OBEN_OFS 0x0413
#define RJ54N1_V_OBEN_OFS 0x0414
#define RJ54N1_RESIZE_CONTROL 0x0415
#define RJ54N1_STILL_CONTROL 0x0417
#define RJ54N1_INC_USE_SEL_H 0x0425
#define RJ54N1_INC_USE_SEL_L 0x0426
#define RJ54N1_MIRROR_STILL_MODE 0x0427
#define RJ54N1_INIT_START 0x0428
#define RJ54N1_SCALE_1_2_LEV 0x0429
#define RJ54N1_SCALE_4_LEV 0x042a
#define RJ54N1_Y_GAIN 0x04d8
#define RJ54N1_APT_GAIN_UP 0x04fa
#define RJ54N1_RA_SEL_UL 0x0530
#define RJ54N1_BYTE_SWAP 0x0531
#define RJ54N1_OUT_SIGPO 0x053b
#define RJ54N1_WB_SEL_WEIGHT_I 0x054e
#define RJ54N1_BIT8_WB 0x0569
#define RJ54N1_HCAPS_WB 0x056a
#define RJ54N1_VCAPS_WB 0x056b
#define RJ54N1_HCAPE_WB 0x056c
#define RJ54N1_VCAPE_WB 0x056d
#define RJ54N1_EXPOSURE_CONTROL 0x058c
#define RJ54N1_FRAME_LENGTH_S_H 0x0595
#define RJ54N1_FRAME_LENGTH_S_L 0x0596
#define RJ54N1_FRAME_LENGTH_P_H 0x0597
#define RJ54N1_FRAME_LENGTH_P_L 0x0598
#define RJ54N1_PEAK_H 0x05b7
#define RJ54N1_PEAK_50 0x05b8
#define RJ54N1_PEAK_60 0x05b9
#define RJ54N1_PEAK_DIFF 0x05ba
#define RJ54N1_IOC 0x05ef
#define RJ54N1_TG_BYPASS 0x0700
#define RJ54N1_PLL_L 0x0701
#define RJ54N1_PLL_N 0x0702
#define RJ54N1_PLL_EN 0x0704
#define RJ54N1_RATIO_TG 0x0706
#define RJ54N1_RATIO_T 0x0707
#define RJ54N1_RATIO_R 0x0708
#define RJ54N1_RAMP_TGCLK_EN 0x0709
#define RJ54N1_OCLK_DSP 0x0710
#define RJ54N1_RATIO_OP 0x0711
#define RJ54N1_RATIO_O 0x0712
#define RJ54N1_OCLK_SEL_EN 0x0713
#define RJ54N1_CLK_RST 0x0717
#define RJ54N1_RESET_STANDBY 0x0718
#define RJ54N1_FWFLG 0x07fe
#define E_EXCLK (1 << 7)
#define SOFT_STDBY (1 << 4)
#define SEN_RSTX (1 << 2)
#define TG_RSTX (1 << 1)
#define DSP_RSTX (1 << 0)
#define RESIZE_HOLD_SEL (1 << 2)
#define RESIZE_GO (1 << 1)
/*
* When cropping, the camera automatically centers the cropped region, there
* doesn't seem to be a way to specify an explicit location of the rectangle.
*/
#define RJ54N1_COLUMN_SKIP 0
#define RJ54N1_ROW_SKIP 0
#define RJ54N1_MAX_WIDTH 1600
#define RJ54N1_MAX_HEIGHT 1200
#define PLL_L 2
#define PLL_N 0x31
/* I2C addresses: 0x50, 0x51, 0x60, 0x61 */
/* RJ54N1CB0C has only one fixed colorspace per pixelcode */
struct rj54n1_datafmt {
enum v4l2_mbus_pixelcode code;
enum v4l2_colorspace colorspace;
};
/* Find a data format by a pixel code in an array */
static const struct rj54n1_datafmt *rj54n1_find_datafmt(
enum v4l2_mbus_pixelcode code, const struct rj54n1_datafmt *fmt,
int n)
{
int i;
for (i = 0; i < n; i++)
if (fmt[i].code == code)
return fmt + i;
return NULL;
}
static const struct rj54n1_datafmt rj54n1_colour_fmts[] = {
{V4L2_MBUS_FMT_YUYV8_2X8, V4L2_COLORSPACE_JPEG},
{V4L2_MBUS_FMT_YVYU8_2X8, V4L2_COLORSPACE_JPEG},
{V4L2_MBUS_FMT_RGB565_2X8_LE, V4L2_COLORSPACE_SRGB},
{V4L2_MBUS_FMT_RGB565_2X8_BE, V4L2_COLORSPACE_SRGB},
{V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_LE, V4L2_COLORSPACE_SRGB},
{V4L2_MBUS_FMT_SBGGR10_2X8_PADLO_LE, V4L2_COLORSPACE_SRGB},
{V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_BE, V4L2_COLORSPACE_SRGB},
{V4L2_MBUS_FMT_SBGGR10_2X8_PADLO_BE, V4L2_COLORSPACE_SRGB},
{V4L2_MBUS_FMT_SBGGR10_1X10, V4L2_COLORSPACE_SRGB},
};
struct rj54n1_clock_div {
u8 ratio_tg; /* can be 0 or an odd number */
u8 ratio_t;
u8 ratio_r;
u8 ratio_op;
u8 ratio_o;
};
struct rj54n1 {
struct v4l2_subdev subdev;
struct rj54n1_clock_div clk_div;
const struct rj54n1_datafmt *fmt;
struct v4l2_rect rect; /* Sensor window */
unsigned int tgclk_mhz;
bool auto_wb;
unsigned short width; /* Output window */
unsigned short height;
unsigned short resize; /* Sensor * 1024 / resize = Output */
unsigned short scale;
u8 bank;
};
struct rj54n1_reg_val {
u16 reg;
u8 val;
};
static const struct rj54n1_reg_val bank_4[] = {
{0x417, 0},
{0x42c, 0},
{0x42d, 0xf0},
{0x42e, 0},
{0x42f, 0x50},
{0x430, 0xf5},
{0x431, 0x16},
{0x432, 0x20},
{0x433, 0},
{0x434, 0xc8},
{0x43c, 8},
{0x43e, 0x90},
{0x445, 0x83},
{0x4ba, 0x58},
{0x4bb, 4},
{0x4bc, 0x20},
{0x4db, 4},
{0x4fe, 2},
};
static const struct rj54n1_reg_val bank_5[] = {
{0x514, 0},
{0x516, 0},
{0x518, 0},
{0x51a, 0},
{0x51d, 0xff},
{0x56f, 0x28},
{0x575, 0x40},
{0x5bc, 0x48},
{0x5c1, 6},
{0x5e5, 0x11},
{0x5e6, 0x43},
{0x5e7, 0x33},
{0x5e8, 0x21},
{0x5e9, 0x30},
{0x5ea, 0x0},
{0x5eb, 0xa5},
{0x5ec, 0xff},
{0x5fe, 2},
};
static const struct rj54n1_reg_val bank_7[] = {
{0x70a, 0},
{0x714, 0xff},
{0x715, 0xff},
{0x716, 0x1f},
{0x7FE, 2},
};
static const struct rj54n1_reg_val bank_8[] = {
{0x800, 0x00},
{0x801, 0x01},
{0x802, 0x61},
{0x805, 0x00},
{0x806, 0x00},
{0x807, 0x00},
{0x808, 0x00},
{0x809, 0x01},
{0x80A, 0x61},
{0x80B, 0x00},
{0x80C, 0x01},
{0x80D, 0x00},
{0x80E, 0x00},
{0x80F, 0x00},
{0x810, 0x00},
{0x811, 0x01},
{0x812, 0x61},
{0x813, 0x00},
{0x814, 0x11},
{0x815, 0x00},
{0x816, 0x41},
{0x817, 0x00},
{0x818, 0x51},
{0x819, 0x01},
{0x81A, 0x1F},
{0x81B, 0x00},
{0x81C, 0x01},
{0x81D, 0x00},
{0x81E, 0x11},
{0x81F, 0x00},
{0x820, 0x41},
{0x821, 0x00},
{0x822, 0x51},
{0x823, 0x00},
{0x824, 0x00},
{0x825, 0x00},
{0x826, 0x47},
{0x827, 0x01},
{0x828, 0x4F},
{0x829, 0x00},
{0x82A, 0x00},
{0x82B, 0x00},
{0x82C, 0x30},
{0x82D, 0x00},
{0x82E, 0x40},
{0x82F, 0x00},
{0x830, 0xB3},
{0x831, 0x00},
{0x832, 0xE3},
{0x833, 0x00},
{0x834, 0x00},
{0x835, 0x00},
{0x836, 0x00},
{0x837, 0x00},
{0x838, 0x00},
{0x839, 0x01},
{0x83A, 0x61},
{0x83B, 0x00},
{0x83C, 0x01},
{0x83D, 0x00},
{0x83E, 0x00},
{0x83F, 0x00},
{0x840, 0x00},
{0x841, 0x01},
{0x842, 0x61},
{0x843, 0x00},
{0x844, 0x1D},
{0x845, 0x00},
{0x846, 0x00},
{0x847, 0x00},
{0x848, 0x00},
{0x849, 0x01},
{0x84A, 0x1F},
{0x84B, 0x00},
{0x84C, 0x05},
{0x84D, 0x00},
{0x84E, 0x19},
{0x84F, 0x01},
{0x850, 0x21},
{0x851, 0x01},
{0x852, 0x5D},
{0x853, 0x00},
{0x854, 0x00},
{0x855, 0x00},
{0x856, 0x19},
{0x857, 0x01},
{0x858, 0x21},
{0x859, 0x00},
{0x85A, 0x00},
{0x85B, 0x00},
{0x85C, 0x00},
{0x85D, 0x00},
{0x85E, 0x00},
{0x85F, 0x00},
{0x860, 0xB3},
{0x861, 0x00},
{0x862, 0xE3},
{0x863, 0x00},
{0x864, 0x00},
{0x865, 0x00},
{0x866, 0x00},
{0x867, 0x00},
{0x868, 0x00},
{0x869, 0xE2},
{0x86A, 0x00},
{0x86B, 0x01},
{0x86C, 0x06},
{0x86D, 0x00},
{0x86E, 0x00},
{0x86F, 0x00},
{0x870, 0x60},
{0x871, 0x8C},
{0x872, 0x10},
{0x873, 0x00},
{0x874, 0xE0},
{0x875, 0x00},
{0x876, 0x27},
{0x877, 0x01},
{0x878, 0x00},
{0x879, 0x00},
{0x87A, 0x00},
{0x87B, 0x03},
{0x87C, 0x00},
{0x87D, 0x00},
{0x87E, 0x00},
{0x87F, 0x00},
{0x880, 0x00},
{0x881, 0x00},
{0x882, 0x00},
{0x883, 0x00},
{0x884, 0x00},
{0x885, 0x00},
{0x886, 0xF8},
{0x887, 0x00},
{0x888, 0x03},
{0x889, 0x00},
{0x88A, 0x64},
{0x88B, 0x00},
{0x88C, 0x03},
{0x88D, 0x00},
{0x88E, 0xB1},
{0x88F, 0x00},
{0x890, 0x03},
{0x891, 0x01},
{0x892, 0x1D},
{0x893, 0x00},
{0x894, 0x03},
{0x895, 0x01},
{0x896, 0x4B},
{0x897, 0x00},
{0x898, 0xE5},
{0x899, 0x00},
{0x89A, 0x01},
{0x89B, 0x00},
{0x89C, 0x01},
{0x89D, 0x04},
{0x89E, 0xC8},
{0x89F, 0x00},
{0x8A0, 0x01},
{0x8A1, 0x01},
{0x8A2, 0x61},
{0x8A3, 0x00},
{0x8A4, 0x01},
{0x8A5, 0x00},
{0x8A6, 0x00},
{0x8A7, 0x00},
{0x8A8, 0x00},
{0x8A9, 0x00},
{0x8AA, 0x7F},
{0x8AB, 0x03},
{0x8AC, 0x00},
{0x8AD, 0x00},
{0x8AE, 0x00},
{0x8AF, 0x00},
{0x8B0, 0x00},
{0x8B1, 0x00},
{0x8B6, 0x00},
{0x8B7, 0x01},
{0x8B8, 0x00},
{0x8B9, 0x00},
{0x8BA, 0x02},
{0x8BB, 0x00},
{0x8BC, 0xFF},
{0x8BD, 0x00},
{0x8FE, 2},
};
static const struct rj54n1_reg_val bank_10[] = {
{0x10bf, 0x69}
};
/* Clock dividers - these are default register values, divider = register + 1 */
static const struct rj54n1_clock_div clk_div = {
.ratio_tg = 3 /* default: 5 */,
.ratio_t = 4 /* default: 1 */,
.ratio_r = 4 /* default: 0 */,
.ratio_op = 1 /* default: 5 */,
.ratio_o = 9 /* default: 0 */,
};
static struct rj54n1 *to_rj54n1(const struct i2c_client *client)
{
return container_of(i2c_get_clientdata(client), struct rj54n1, subdev);
}
static int reg_read(struct i2c_client *client, const u16 reg)
{
struct rj54n1 *rj54n1 = to_rj54n1(client);
int ret;
/* set bank */
if (rj54n1->bank != reg >> 8) {
dev_dbg(&client->dev, "[0x%x] = 0x%x\n", 0xff, reg >> 8);
ret = i2c_smbus_write_byte_data(client, 0xff, reg >> 8);
if (ret < 0)
return ret;
rj54n1->bank = reg >> 8;
}
return i2c_smbus_read_byte_data(client, reg & 0xff);
}
static int reg_write(struct i2c_client *client, const u16 reg,
const u8 data)
{
struct rj54n1 *rj54n1 = to_rj54n1(client);
int ret;
/* set bank */
if (rj54n1->bank != reg >> 8) {
dev_dbg(&client->dev, "[0x%x] = 0x%x\n", 0xff, reg >> 8);
ret = i2c_smbus_write_byte_data(client, 0xff, reg >> 8);
if (ret < 0)
return ret;
rj54n1->bank = reg >> 8;
}
dev_dbg(&client->dev, "[0x%x] = 0x%x\n", reg & 0xff, data);
return i2c_smbus_write_byte_data(client, reg & 0xff, data);
}
static int reg_set(struct i2c_client *client, const u16 reg,
const u8 data, const u8 mask)
{
int ret;
ret = reg_read(client, reg);
if (ret < 0)
return ret;
return reg_write(client, reg, (ret & ~mask) | (data & mask));
}
static int reg_write_multiple(struct i2c_client *client,
const struct rj54n1_reg_val *rv, const int n)
{
int i, ret;
for (i = 0; i < n; i++) {
ret = reg_write(client, rv->reg, rv->val);
if (ret < 0)
return ret;
rv++;
}
return 0;
}
static int rj54n1_enum_fmt(struct v4l2_subdev *sd, unsigned int index,
enum v4l2_mbus_pixelcode *code)
{
if (index >= ARRAY_SIZE(rj54n1_colour_fmts))
return -EINVAL;
*code = rj54n1_colour_fmts[index].code;
return 0;
}
static int rj54n1_s_stream(struct v4l2_subdev *sd, int enable)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
/* Switch between preview and still shot modes */
return reg_set(client, RJ54N1_STILL_CONTROL, (!enable) << 7, 0x80);
}
static int rj54n1_set_bus_param(struct soc_camera_device *icd,
unsigned long flags)
{
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
struct i2c_client *client = v4l2_get_subdevdata(sd);
/* Figures 2.5-1 to 2.5-3 - default falling pixclk edge */
if (flags & SOCAM_PCLK_SAMPLE_RISING)
return reg_write(client, RJ54N1_OUT_SIGPO, 1 << 4);
else
return reg_write(client, RJ54N1_OUT_SIGPO, 0);
}
static unsigned long rj54n1_query_bus_param(struct soc_camera_device *icd)
{
struct soc_camera_link *icl = to_soc_camera_link(icd);
const unsigned long flags =
SOCAM_PCLK_SAMPLE_RISING | SOCAM_PCLK_SAMPLE_FALLING |
SOCAM_MASTER | SOCAM_DATAWIDTH_8 |
SOCAM_HSYNC_ACTIVE_HIGH | SOCAM_VSYNC_ACTIVE_HIGH |
SOCAM_DATA_ACTIVE_HIGH;
return soc_camera_apply_sensor_flags(icl, flags);
}
static int rj54n1_set_rect(struct i2c_client *client,
u16 reg_x, u16 reg_y, u16 reg_xy,
u32 width, u32 height)
{
int ret;
ret = reg_write(client, reg_xy,
((width >> 4) & 0x70) |
((height >> 8) & 7));
if (!ret)
ret = reg_write(client, reg_x, width & 0xff);
if (!ret)
ret = reg_write(client, reg_y, height & 0xff);
return ret;
}
/*
* Some commands, specifically certain initialisation sequences, require
* a commit operation.
*/
static int rj54n1_commit(struct i2c_client *client)
{
int ret = reg_write(client, RJ54N1_INIT_START, 1);
msleep(10);
if (!ret)
ret = reg_write(client, RJ54N1_INIT_START, 0);
return ret;
}
static int rj54n1_sensor_scale(struct v4l2_subdev *sd, s32 *in_w, s32 *in_h,
s32 *out_w, s32 *out_h);
static int rj54n1_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct rj54n1 *rj54n1 = to_rj54n1(client);
struct v4l2_rect *rect = &a->c;
int dummy = 0, output_w, output_h,
input_w = rect->width, input_h = rect->height;
int ret;
/* arbitrary minimum width and height, edges unimportant */
soc_camera_limit_side(&dummy, &input_w,
RJ54N1_COLUMN_SKIP, 8, RJ54N1_MAX_WIDTH);
soc_camera_limit_side(&dummy, &input_h,
RJ54N1_ROW_SKIP, 8, RJ54N1_MAX_HEIGHT);
output_w = (input_w * 1024 + rj54n1->resize / 2) / rj54n1->resize;
output_h = (input_h * 1024 + rj54n1->resize / 2) / rj54n1->resize;
dev_dbg(&client->dev, "Scaling for %dx%d : %u = %dx%d\n",
input_w, input_h, rj54n1->resize, output_w, output_h);
ret = rj54n1_sensor_scale(sd, &input_w, &input_h, &output_w, &output_h);
if (ret < 0)
return ret;
rj54n1->width = output_w;
rj54n1->height = output_h;
rj54n1->resize = ret;
rj54n1->rect.width = input_w;
rj54n1->rect.height = input_h;
return 0;
}
static int rj54n1_g_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct rj54n1 *rj54n1 = to_rj54n1(client);
a->c = rj54n1->rect;
a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
return 0;
}
static int rj54n1_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a)
{
a->bounds.left = RJ54N1_COLUMN_SKIP;
a->bounds.top = RJ54N1_ROW_SKIP;
a->bounds.width = RJ54N1_MAX_WIDTH;
a->bounds.height = RJ54N1_MAX_HEIGHT;
a->defrect = a->bounds;
a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
a->pixelaspect.numerator = 1;
a->pixelaspect.denominator = 1;
return 0;
}
static int rj54n1_g_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *mf)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct rj54n1 *rj54n1 = to_rj54n1(client);
mf->code = rj54n1->fmt->code;
mf->colorspace = rj54n1->fmt->colorspace;
mf->field = V4L2_FIELD_NONE;
mf->width = rj54n1->width;
mf->height = rj54n1->height;
return 0;
}
/*
* The actual geometry configuration routine. It scales the input window into
* the output one, updates the window sizes and returns an error or the resize
* coefficient on success. Note: we only use the "Fixed Scaling" on this camera.
*/
static int rj54n1_sensor_scale(struct v4l2_subdev *sd, s32 *in_w, s32 *in_h,
s32 *out_w, s32 *out_h)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct rj54n1 *rj54n1 = to_rj54n1(client);
unsigned int skip, resize, input_w = *in_w, input_h = *in_h,
output_w = *out_w, output_h = *out_h;
u16 inc_sel, wb_bit8, wb_left, wb_right, wb_top, wb_bottom;
unsigned int peak, peak_50, peak_60;
int ret;
/*
* We have a problem with crops, where the window is larger than 512x384
* and output window is larger than a half of the input one. In this
* case we have to either reduce the input window to equal or below
* 512x384 or the output window to equal or below 1/2 of the input.
*/
if (output_w > max(512U, input_w / 2)) {
if (2 * output_w > RJ54N1_MAX_WIDTH) {
input_w = RJ54N1_MAX_WIDTH;
output_w = RJ54N1_MAX_WIDTH / 2;
} else {
input_w = output_w * 2;
}
dev_dbg(&client->dev, "Adjusted output width: in %u, out %u\n",
input_w, output_w);
}
if (output_h > max(384U, input_h / 2)) {
if (2 * output_h > RJ54N1_MAX_HEIGHT) {
input_h = RJ54N1_MAX_HEIGHT;
output_h = RJ54N1_MAX_HEIGHT / 2;
} else {
input_h = output_h * 2;
}
dev_dbg(&client->dev, "Adjusted output height: in %u, out %u\n",
input_h, output_h);
}
/* Idea: use the read mode for snapshots, handle separate geometries */
ret = rj54n1_set_rect(client, RJ54N1_X_OUTPUT_SIZE_S_L,
RJ54N1_Y_OUTPUT_SIZE_S_L,
RJ54N1_XY_OUTPUT_SIZE_S_H, output_w, output_h);
if (!ret)
ret = rj54n1_set_rect(client, RJ54N1_X_OUTPUT_SIZE_P_L,
RJ54N1_Y_OUTPUT_SIZE_P_L,
RJ54N1_XY_OUTPUT_SIZE_P_H, output_w, output_h);
if (ret < 0)
return ret;
if (output_w > input_w && output_h > input_h) {
input_w = output_w;
input_h = output_h;
resize = 1024;
} else {
unsigned int resize_x, resize_y;
resize_x = (input_w * 1024 + output_w / 2) / output_w;
resize_y = (input_h * 1024 + output_h / 2) / output_h;
/* We want max(resize_x, resize_y), check if it still fits */
if (resize_x > resize_y &&
(output_h * resize_x + 512) / 1024 > RJ54N1_MAX_HEIGHT)
resize = (RJ54N1_MAX_HEIGHT * 1024 + output_h / 2) /
output_h;
else if (resize_y > resize_x &&
(output_w * resize_y + 512) / 1024 > RJ54N1_MAX_WIDTH)
resize = (RJ54N1_MAX_WIDTH * 1024 + output_w / 2) /
output_w;
else
resize = max(resize_x, resize_y);
/* Prohibited value ranges */
switch (resize) {
case 2040 ... 2047:
resize = 2039;
break;
case 4080 ... 4095:
resize = 4079;
break;
case 8160 ... 8191:
resize = 8159;
break;
case 16320 ... 16384:
resize = 16319;
}
}
/* Set scaling */
ret = reg_write(client, RJ54N1_RESIZE_HOLD_L, resize & 0xff);
if (!ret)
ret = reg_write(client, RJ54N1_RESIZE_HOLD_H, resize >> 8);
if (ret < 0)
return ret;
/*
* Configure a skipping bitmask. The sensor will select a skipping value
* among set bits automatically. This is very unclear in the datasheet
* too. I was told, in this register one enables all skipping values,
* that are required for a specific resize, and the camera selects
* automatically, which ones to use. But it is unclear how to identify,
* which cropping values are needed. Secondly, why don't we just set all
* bits and let the camera choose? Would it increase processing time and
* reduce the framerate? Using 0xfffc for INC_USE_SEL doesn't seem to
* improve the image quality or stability for larger frames (see comment
* above), but I didn't check the framerate.
*/
skip = min(resize / 1024, 15U);
inc_sel = 1 << skip;
if (inc_sel <= 2)
inc_sel = 0xc;
else if (resize & 1023 && skip < 15)
inc_sel |= 1 << (skip + 1);
ret = reg_write(client, RJ54N1_INC_USE_SEL_L, inc_sel & 0xfc);
if (!ret)
ret = reg_write(client, RJ54N1_INC_USE_SEL_H, inc_sel >> 8);
if (!rj54n1->auto_wb) {
/* Auto white balance window */
wb_left = output_w / 16;
wb_right = (3 * output_w / 4 - 3) / 4;
wb_top = output_h / 16;
wb_bottom = (3 * output_h / 4 - 3) / 4;
wb_bit8 = ((wb_left >> 2) & 0x40) | ((wb_top >> 4) & 0x10) |
((wb_right >> 6) & 4) | ((wb_bottom >> 8) & 1);
if (!ret)
ret = reg_write(client, RJ54N1_BIT8_WB, wb_bit8);
if (!ret)
ret = reg_write(client, RJ54N1_HCAPS_WB, wb_left);
if (!ret)
ret = reg_write(client, RJ54N1_VCAPS_WB, wb_top);
if (!ret)
ret = reg_write(client, RJ54N1_HCAPE_WB, wb_right);
if (!ret)
ret = reg_write(client, RJ54N1_VCAPE_WB, wb_bottom);
}
/* Antiflicker */
peak = 12 * RJ54N1_MAX_WIDTH * (1 << 14) * resize / rj54n1->tgclk_mhz /
10000;
peak_50 = peak / 6;
peak_60 = peak / 5;
if (!ret)
ret = reg_write(client, RJ54N1_PEAK_H,
((peak_50 >> 4) & 0xf0) | (peak_60 >> 8));
if (!ret)
ret = reg_write(client, RJ54N1_PEAK_50, peak_50);
if (!ret)
ret = reg_write(client, RJ54N1_PEAK_60, peak_60);
if (!ret)
ret = reg_write(client, RJ54N1_PEAK_DIFF, peak / 150);
/* Start resizing */
if (!ret)
ret = reg_write(client, RJ54N1_RESIZE_CONTROL,
RESIZE_HOLD_SEL | RESIZE_GO | 1);
if (ret < 0)
return ret;
/* Constant taken from manufacturer's example */
msleep(230);
ret = reg_write(client, RJ54N1_RESIZE_CONTROL, RESIZE_HOLD_SEL | 1);
if (ret < 0)
return ret;
*in_w = (output_w * resize + 512) / 1024;
*in_h = (output_h * resize + 512) / 1024;
*out_w = output_w;
*out_h = output_h;
dev_dbg(&client->dev, "Scaled for %dx%d : %u = %ux%u, skip %u\n",
*in_w, *in_h, resize, output_w, output_h, skip);
return resize;
}
static int rj54n1_set_clock(struct i2c_client *client)
{
struct rj54n1 *rj54n1 = to_rj54n1(client);
int ret;
/* Enable external clock */
ret = reg_write(client, RJ54N1_RESET_STANDBY, E_EXCLK | SOFT_STDBY);
/* Leave stand-by. Note: use this when implementing suspend / resume */
if (!ret)
ret = reg_write(client, RJ54N1_RESET_STANDBY, E_EXCLK);
if (!ret)
ret = reg_write(client, RJ54N1_PLL_L, PLL_L);
if (!ret)
ret = reg_write(client, RJ54N1_PLL_N, PLL_N);
/* TGCLK dividers */
if (!ret)
ret = reg_write(client, RJ54N1_RATIO_TG,
rj54n1->clk_div.ratio_tg);
if (!ret)
ret = reg_write(client, RJ54N1_RATIO_T,
rj54n1->clk_div.ratio_t);
if (!ret)
ret = reg_write(client, RJ54N1_RATIO_R,
rj54n1->clk_div.ratio_r);
/* Enable TGCLK & RAMP */
if (!ret)
ret = reg_write(client, RJ54N1_RAMP_TGCLK_EN, 3);
/* Disable clock output */
if (!ret)
ret = reg_write(client, RJ54N1_OCLK_DSP, 0);
/* Set divisors */
if (!ret)
ret = reg_write(client, RJ54N1_RATIO_OP,
rj54n1->clk_div.ratio_op);
if (!ret)
ret = reg_write(client, RJ54N1_RATIO_O,
rj54n1->clk_div.ratio_o);
/* Enable OCLK */
if (!ret)
ret = reg_write(client, RJ54N1_OCLK_SEL_EN, 1);
/* Use PLL for Timing Generator, write 2 to reserved bits */
if (!ret)
ret = reg_write(client, RJ54N1_TG_BYPASS, 2);
/* Take sensor out of reset */
if (!ret)
ret = reg_write(client, RJ54N1_RESET_STANDBY,
E_EXCLK | SEN_RSTX);
/* Enable PLL */
if (!ret)
ret = reg_write(client, RJ54N1_PLL_EN, 1);
/* Wait for PLL to stabilise */
msleep(10);
/* Enable clock to frequency divider */
if (!ret)
ret = reg_write(client, RJ54N1_CLK_RST, 1);
if (!ret)
ret = reg_read(client, RJ54N1_CLK_RST);
if (ret != 1) {
dev_err(&client->dev,
"Resetting RJ54N1CB0C clock failed: %d!\n", ret);
return -EIO;
}
/* Start the PLL */
ret = reg_set(client, RJ54N1_OCLK_DSP, 1, 1);
/* Enable OCLK */
if (!ret)
ret = reg_write(client, RJ54N1_OCLK_SEL_EN, 1);
return ret;
}
static int rj54n1_reg_init(struct i2c_client *client)
{
struct rj54n1 *rj54n1 = to_rj54n1(client);
int ret = rj54n1_set_clock(client);
if (!ret)
ret = reg_write_multiple(client, bank_7, ARRAY_SIZE(bank_7));
if (!ret)
ret = reg_write_multiple(client, bank_10, ARRAY_SIZE(bank_10));
/* Set binning divisors */
if (!ret)
ret = reg_write(client, RJ54N1_SCALE_1_2_LEV, 3 | (7 << 4));
if (!ret)
ret = reg_write(client, RJ54N1_SCALE_4_LEV, 0xf);
/* Switch to fixed resize mode */
if (!ret)
ret = reg_write(client, RJ54N1_RESIZE_CONTROL,
RESIZE_HOLD_SEL | 1);
/* Set gain */
if (!ret)
ret = reg_write(client, RJ54N1_Y_GAIN, 0x84);
/*
* Mirror the image back: default is upside down and left-to-right...
* Set manual preview / still shot switching
*/
if (!ret)
ret = reg_write(client, RJ54N1_MIRROR_STILL_MODE, 0x27);
if (!ret)
ret = reg_write_multiple(client, bank_4, ARRAY_SIZE(bank_4));
/* Auto exposure area */
if (!ret)
ret = reg_write(client, RJ54N1_EXPOSURE_CONTROL, 0x80);
/* Check current auto WB config */
if (!ret)
ret = reg_read(client, RJ54N1_WB_SEL_WEIGHT_I);
if (ret >= 0) {
rj54n1->auto_wb = ret & 0x80;
ret = reg_write_multiple(client, bank_5, ARRAY_SIZE(bank_5));
}
if (!ret)
ret = reg_write_multiple(client, bank_8, ARRAY_SIZE(bank_8));
if (!ret)
ret = reg_write(client, RJ54N1_RESET_STANDBY,
E_EXCLK | DSP_RSTX | SEN_RSTX);
/* Commit init */
if (!ret)
ret = rj54n1_commit(client);
/* Take DSP, TG, sensor out of reset */
if (!ret)
ret = reg_write(client, RJ54N1_RESET_STANDBY,
E_EXCLK | DSP_RSTX | TG_RSTX | SEN_RSTX);
/* Start register update? Same register as 0x?FE in many bank_* sets */
if (!ret)
ret = reg_write(client, RJ54N1_FWFLG, 2);
/* Constant taken from manufacturer's example */
msleep(700);
return ret;
}
static int rj54n1_try_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *mf)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct rj54n1 *rj54n1 = to_rj54n1(client);
const struct rj54n1_datafmt *fmt;
int align = mf->code == V4L2_MBUS_FMT_SBGGR10_1X10 ||
mf->code == V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_BE ||
mf->code == V4L2_MBUS_FMT_SBGGR10_2X8_PADLO_BE ||
mf->code == V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_LE ||
mf->code == V4L2_MBUS_FMT_SBGGR10_2X8_PADLO_LE;
dev_dbg(&client->dev, "%s: code = %d, width = %u, height = %u\n",
__func__, mf->code, mf->width, mf->height);
fmt = rj54n1_find_datafmt(mf->code, rj54n1_colour_fmts,
ARRAY_SIZE(rj54n1_colour_fmts));
if (!fmt) {
fmt = rj54n1->fmt;
mf->code = fmt->code;
}
mf->field = V4L2_FIELD_NONE;
mf->colorspace = fmt->colorspace;
v4l_bound_align_image(&mf->width, 112, RJ54N1_MAX_WIDTH, align,
&mf->height, 84, RJ54N1_MAX_HEIGHT, align, 0);
return 0;
}
static int rj54n1_s_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *mf)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct rj54n1 *rj54n1 = to_rj54n1(client);
const struct rj54n1_datafmt *fmt;
int output_w, output_h, max_w, max_h,
input_w = rj54n1->rect.width, input_h = rj54n1->rect.height;
int ret;
/*
* The host driver can call us without .try_fmt(), so, we have to take
* care ourseleves
*/
rj54n1_try_fmt(sd, mf);
/*
* Verify if the sensor has just been powered on. TODO: replace this
* with proper PM, when a suitable API is available.
*/
ret = reg_read(client, RJ54N1_RESET_STANDBY);
if (ret < 0)
return ret;
if (!(ret & E_EXCLK)) {
ret = rj54n1_reg_init(client);
if (ret < 0)
return ret;
}
dev_dbg(&client->dev, "%s: code = %d, width = %u, height = %u\n",
__func__, mf->code, mf->width, mf->height);
/* RA_SEL_UL is only relevant for raw modes, ignored otherwise. */
switch (mf->code) {
case V4L2_MBUS_FMT_YUYV8_2X8:
ret = reg_write(client, RJ54N1_OUT_SEL, 0);
if (!ret)
ret = reg_set(client, RJ54N1_BYTE_SWAP, 8, 8);
break;
case V4L2_MBUS_FMT_YVYU8_2X8:
ret = reg_write(client, RJ54N1_OUT_SEL, 0);
if (!ret)
ret = reg_set(client, RJ54N1_BYTE_SWAP, 0, 8);
break;
case V4L2_MBUS_FMT_RGB565_2X8_LE:
ret = reg_write(client, RJ54N1_OUT_SEL, 0x11);
if (!ret)
ret = reg_set(client, RJ54N1_BYTE_SWAP, 8, 8);
break;
case V4L2_MBUS_FMT_RGB565_2X8_BE:
ret = reg_write(client, RJ54N1_OUT_SEL, 0x11);
if (!ret)
ret = reg_set(client, RJ54N1_BYTE_SWAP, 0, 8);
break;
case V4L2_MBUS_FMT_SBGGR10_2X8_PADLO_LE:
ret = reg_write(client, RJ54N1_OUT_SEL, 4);
if (!ret)
ret = reg_set(client, RJ54N1_BYTE_SWAP, 8, 8);
if (!ret)
ret = reg_write(client, RJ54N1_RA_SEL_UL, 0);
break;
case V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_LE:
ret = reg_write(client, RJ54N1_OUT_SEL, 4);
if (!ret)
ret = reg_set(client, RJ54N1_BYTE_SWAP, 8, 8);
if (!ret)
ret = reg_write(client, RJ54N1_RA_SEL_UL, 8);
break;
case V4L2_MBUS_FMT_SBGGR10_2X8_PADLO_BE:
ret = reg_write(client, RJ54N1_OUT_SEL, 4);
if (!ret)
ret = reg_set(client, RJ54N1_BYTE_SWAP, 0, 8);
if (!ret)
ret = reg_write(client, RJ54N1_RA_SEL_UL, 0);
break;
case V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_BE:
ret = reg_write(client, RJ54N1_OUT_SEL, 4);
if (!ret)
ret = reg_set(client, RJ54N1_BYTE_SWAP, 0, 8);
if (!ret)
ret = reg_write(client, RJ54N1_RA_SEL_UL, 8);
break;
case V4L2_MBUS_FMT_SBGGR10_1X10:
ret = reg_write(client, RJ54N1_OUT_SEL, 5);
break;
default:
ret = -EINVAL;
}
/* Special case: a raw mode with 10 bits of data per clock tick */
if (!ret)
ret = reg_set(client, RJ54N1_OCLK_SEL_EN,
(mf->code == V4L2_MBUS_FMT_SBGGR10_1X10) << 1, 2);
if (ret < 0)
return ret;
/* Supported scales 1:1 >= scale > 1:16 */
max_w = mf->width * (16 * 1024 - 1) / 1024;
if (input_w > max_w)
input_w = max_w;
max_h = mf->height * (16 * 1024 - 1) / 1024;
if (input_h > max_h)
input_h = max_h;
output_w = mf->width;
output_h = mf->height;
ret = rj54n1_sensor_scale(sd, &input_w, &input_h, &output_w, &output_h);
if (ret < 0)
return ret;
fmt = rj54n1_find_datafmt(mf->code, rj54n1_colour_fmts,
ARRAY_SIZE(rj54n1_colour_fmts));
rj54n1->fmt = fmt;
rj54n1->resize = ret;
rj54n1->rect.width = input_w;
rj54n1->rect.height = input_h;
rj54n1->width = output_w;
rj54n1->height = output_h;
mf->width = output_w;
mf->height = output_h;
mf->field = V4L2_FIELD_NONE;
mf->colorspace = fmt->colorspace;
return 0;
}
static int rj54n1_g_chip_ident(struct v4l2_subdev *sd,
struct v4l2_dbg_chip_ident *id)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
if (id->match.type != V4L2_CHIP_MATCH_I2C_ADDR)
return -EINVAL;
if (id->match.addr != client->addr)
return -ENODEV;
id->ident = V4L2_IDENT_RJ54N1CB0C;
id->revision = 0;
return 0;
}
#ifdef CONFIG_VIDEO_ADV_DEBUG
static int rj54n1_g_register(struct v4l2_subdev *sd,
struct v4l2_dbg_register *reg)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
if (reg->match.type != V4L2_CHIP_MATCH_I2C_ADDR ||
reg->reg < 0x400 || reg->reg > 0x1fff)
/* Registers > 0x0800 are only available from Sharp support */
return -EINVAL;
if (reg->match.addr != client->addr)
return -ENODEV;
reg->size = 1;
reg->val = reg_read(client, reg->reg);
if (reg->val > 0xff)
return -EIO;
return 0;
}
static int rj54n1_s_register(struct v4l2_subdev *sd,
struct v4l2_dbg_register *reg)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
if (reg->match.type != V4L2_CHIP_MATCH_I2C_ADDR ||
reg->reg < 0x400 || reg->reg > 0x1fff)
/* Registers >= 0x0800 are only available from Sharp support */
return -EINVAL;
if (reg->match.addr != client->addr)
return -ENODEV;
if (reg_write(client, reg->reg, reg->val) < 0)
return -EIO;
return 0;
}
#endif
static const struct v4l2_queryctrl rj54n1_controls[] = {
{
.id = V4L2_CID_VFLIP,
.type = V4L2_CTRL_TYPE_BOOLEAN,
.name = "Flip Vertically",
.minimum = 0,
.maximum = 1,
.step = 1,
.default_value = 0,
}, {
.id = V4L2_CID_HFLIP,
.type = V4L2_CTRL_TYPE_BOOLEAN,
.name = "Flip Horizontally",
.minimum = 0,
.maximum = 1,
.step = 1,
.default_value = 0,
}, {
.id = V4L2_CID_GAIN,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "Gain",
.minimum = 0,
.maximum = 127,
.step = 1,
.default_value = 66,
.flags = V4L2_CTRL_FLAG_SLIDER,
}, {
.id = V4L2_CID_AUTO_WHITE_BALANCE,
.type = V4L2_CTRL_TYPE_BOOLEAN,
.name = "Auto white balance",
.minimum = 0,
.maximum = 1,
.step = 1,
.default_value = 1,
},
};
static struct soc_camera_ops rj54n1_ops = {
.set_bus_param = rj54n1_set_bus_param,
.query_bus_param = rj54n1_query_bus_param,
.controls = rj54n1_controls,
.num_controls = ARRAY_SIZE(rj54n1_controls),
};
static int rj54n1_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct rj54n1 *rj54n1 = to_rj54n1(client);
int data;
switch (ctrl->id) {
case V4L2_CID_VFLIP:
data = reg_read(client, RJ54N1_MIRROR_STILL_MODE);
if (data < 0)
return -EIO;
ctrl->value = !(data & 1);
break;
case V4L2_CID_HFLIP:
data = reg_read(client, RJ54N1_MIRROR_STILL_MODE);
if (data < 0)
return -EIO;
ctrl->value = !(data & 2);
break;
case V4L2_CID_GAIN:
data = reg_read(client, RJ54N1_Y_GAIN);
if (data < 0)
return -EIO;
ctrl->value = data / 2;
break;
case V4L2_CID_AUTO_WHITE_BALANCE:
ctrl->value = rj54n1->auto_wb;
break;
}
return 0;
}
static int rj54n1_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
{
int data;
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct rj54n1 *rj54n1 = to_rj54n1(client);
const struct v4l2_queryctrl *qctrl;
qctrl = soc_camera_find_qctrl(&rj54n1_ops, ctrl->id);
if (!qctrl)
return -EINVAL;
switch (ctrl->id) {
case V4L2_CID_VFLIP:
if (ctrl->value)
data = reg_set(client, RJ54N1_MIRROR_STILL_MODE, 0, 1);
else
data = reg_set(client, RJ54N1_MIRROR_STILL_MODE, 1, 1);
if (data < 0)
return -EIO;
break;
case V4L2_CID_HFLIP:
if (ctrl->value)
data = reg_set(client, RJ54N1_MIRROR_STILL_MODE, 0, 2);
else
data = reg_set(client, RJ54N1_MIRROR_STILL_MODE, 2, 2);
if (data < 0)
return -EIO;
break;
case V4L2_CID_GAIN:
if (ctrl->value > qctrl->maximum ||
ctrl->value < qctrl->minimum)
return -EINVAL;
else if (reg_write(client, RJ54N1_Y_GAIN, ctrl->value * 2) < 0)
return -EIO;
break;
case V4L2_CID_AUTO_WHITE_BALANCE:
/* Auto WB area - whole image */
if (reg_set(client, RJ54N1_WB_SEL_WEIGHT_I, ctrl->value << 7,
0x80) < 0)
return -EIO;
rj54n1->auto_wb = ctrl->value;
break;
}
return 0;
}
static struct v4l2_subdev_core_ops rj54n1_subdev_core_ops = {
.g_ctrl = rj54n1_g_ctrl,
.s_ctrl = rj54n1_s_ctrl,
.g_chip_ident = rj54n1_g_chip_ident,
#ifdef CONFIG_VIDEO_ADV_DEBUG
.g_register = rj54n1_g_register,
.s_register = rj54n1_s_register,
#endif
};
static struct v4l2_subdev_video_ops rj54n1_subdev_video_ops = {
.s_stream = rj54n1_s_stream,
.s_mbus_fmt = rj54n1_s_fmt,
.g_mbus_fmt = rj54n1_g_fmt,
.try_mbus_fmt = rj54n1_try_fmt,
.enum_mbus_fmt = rj54n1_enum_fmt,
.g_crop = rj54n1_g_crop,
.s_crop = rj54n1_s_crop,
.cropcap = rj54n1_cropcap,
};
static struct v4l2_subdev_ops rj54n1_subdev_ops = {
.core = &rj54n1_subdev_core_ops,
.video = &rj54n1_subdev_video_ops,
};
/*
* Interface active, can use i2c. If it fails, it can indeed mean, that
* this wasn't our capture interface, so, we wait for the right one
*/
static int rj54n1_video_probe(struct soc_camera_device *icd,
struct i2c_client *client,
struct rj54n1_pdata *priv)
{
int data1, data2;
int ret;
/* This could be a BUG_ON() or a WARN_ON(), or remove it completely */
if (!icd->dev.parent ||
to_soc_camera_host(icd->dev.parent)->nr != icd->iface)
return -ENODEV;
/* Read out the chip version register */
data1 = reg_read(client, RJ54N1_DEV_CODE);
data2 = reg_read(client, RJ54N1_DEV_CODE2);
if (data1 != 0x51 || data2 != 0x10) {
ret = -ENODEV;
dev_info(&client->dev, "No RJ54N1CB0C found, read 0x%x:0x%x\n",
data1, data2);
goto ei2c;
}
/* Configure IOCTL polarity from the platform data: 0 or 1 << 7. */
ret = reg_write(client, RJ54N1_IOC, priv->ioctl_high << 7);
if (ret < 0)
goto ei2c;
dev_info(&client->dev, "Detected a RJ54N1CB0C chip ID 0x%x:0x%x\n",
data1, data2);
ei2c:
return ret;
}
static int rj54n1_probe(struct i2c_client *client,
const struct i2c_device_id *did)
{
struct rj54n1 *rj54n1;
struct soc_camera_device *icd = client->dev.platform_data;
struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
struct soc_camera_link *icl;
struct rj54n1_pdata *rj54n1_priv;
int ret;
if (!icd) {
dev_err(&client->dev, "RJ54N1CB0C: missing soc-camera data!\n");
return -EINVAL;
}
icl = to_soc_camera_link(icd);
if (!icl || !icl->priv) {
dev_err(&client->dev, "RJ54N1CB0C: missing platform data!\n");
return -EINVAL;
}
rj54n1_priv = icl->priv;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) {
dev_warn(&adapter->dev,
"I2C-Adapter doesn't support I2C_FUNC_SMBUS_BYTE\n");
return -EIO;
}
rj54n1 = kzalloc(sizeof(struct rj54n1), GFP_KERNEL);
if (!rj54n1)
return -ENOMEM;
v4l2_i2c_subdev_init(&rj54n1->subdev, client, &rj54n1_subdev_ops);
icd->ops = &rj54n1_ops;
rj54n1->clk_div = clk_div;
rj54n1->rect.left = RJ54N1_COLUMN_SKIP;
rj54n1->rect.top = RJ54N1_ROW_SKIP;
rj54n1->rect.width = RJ54N1_MAX_WIDTH;
rj54n1->rect.height = RJ54N1_MAX_HEIGHT;
rj54n1->width = RJ54N1_MAX_WIDTH;
rj54n1->height = RJ54N1_MAX_HEIGHT;
rj54n1->fmt = &rj54n1_colour_fmts[0];
rj54n1->resize = 1024;
rj54n1->tgclk_mhz = (rj54n1_priv->mclk_freq / PLL_L * PLL_N) /
(clk_div.ratio_tg + 1) / (clk_div.ratio_t + 1);
ret = rj54n1_video_probe(icd, client, rj54n1_priv);
if (ret < 0) {
icd->ops = NULL;
kfree(rj54n1);
return ret;
}
return ret;
}
static int rj54n1_remove(struct i2c_client *client)
{
struct rj54n1 *rj54n1 = to_rj54n1(client);
struct soc_camera_device *icd = client->dev.platform_data;
struct soc_camera_link *icl = to_soc_camera_link(icd);
icd->ops = NULL;
if (icl->free_bus)
icl->free_bus(icl);
kfree(rj54n1);
return 0;
}
static const struct i2c_device_id rj54n1_id[] = {
{ "rj54n1cb0c", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, rj54n1_id);
static struct i2c_driver rj54n1_i2c_driver = {
.driver = {
.name = "rj54n1cb0c",
},
.probe = rj54n1_probe,
.remove = rj54n1_remove,
.id_table = rj54n1_id,
};
static int __init rj54n1_mod_init(void)
{
return i2c_add_driver(&rj54n1_i2c_driver);
}
static void __exit rj54n1_mod_exit(void)
{
i2c_del_driver(&rj54n1_i2c_driver);
}
module_init(rj54n1_mod_init);
module_exit(rj54n1_mod_exit);
MODULE_DESCRIPTION("Sharp RJ54N1CB0C Camera driver");
MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
myjang0507/Polaris | drivers/pwm/pwm-puv3.c | 3415 | 3546 | /*
* linux/arch/unicore32/kernel/pwm.c
*
* Code specific to PKUnity SoC and UniCore ISA
*
* Maintained by GUAN Xue-tao <gxt@mprc.pku.edu.cn>
* Copyright (C) 2001-2010 Guan Xuetao
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/pwm.h>
#include <asm/div64.h>
#include <mach/hardware.h>
struct puv3_pwm_chip {
struct pwm_chip chip;
void __iomem *base;
struct clk *clk;
};
static inline struct puv3_pwm_chip *to_puv3(struct pwm_chip *chip)
{
return container_of(chip, struct puv3_pwm_chip, chip);
}
/*
* period_ns = 10^9 * (PRESCALE + 1) * (PV + 1) / PWM_CLK_RATE
* duty_ns = 10^9 * (PRESCALE + 1) * DC / PWM_CLK_RATE
*/
static int puv3_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
int duty_ns, int period_ns)
{
unsigned long period_cycles, prescale, pv, dc;
struct puv3_pwm_chip *puv3 = to_puv3(chip);
unsigned long long c;
c = clk_get_rate(puv3->clk);
c = c * period_ns;
do_div(c, 1000000000);
period_cycles = c;
if (period_cycles < 1)
period_cycles = 1;
prescale = (period_cycles - 1) / 1024;
pv = period_cycles / (prescale + 1) - 1;
if (prescale > 63)
return -EINVAL;
if (duty_ns == period_ns)
dc = OST_PWMDCCR_FDCYCLE;
else
dc = (pv + 1) * duty_ns / period_ns;
/*
* NOTE: the clock to PWM has to be enabled first
* before writing to the registers
*/
clk_prepare_enable(puv3->clk);
writel(prescale, puv3->base + OST_PWM_PWCR);
writel(pv - dc, puv3->base + OST_PWM_DCCR);
writel(pv, puv3->base + OST_PWM_PCR);
clk_disable_unprepare(puv3->clk);
return 0;
}
static int puv3_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
{
struct puv3_pwm_chip *puv3 = to_puv3(chip);
return clk_prepare_enable(puv3->clk);
}
static void puv3_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
{
struct puv3_pwm_chip *puv3 = to_puv3(chip);
clk_disable_unprepare(puv3->clk);
}
static const struct pwm_ops puv3_pwm_ops = {
.config = puv3_pwm_config,
.enable = puv3_pwm_enable,
.disable = puv3_pwm_disable,
.owner = THIS_MODULE,
};
static int pwm_probe(struct platform_device *pdev)
{
struct puv3_pwm_chip *puv3;
struct resource *r;
int ret;
puv3 = devm_kzalloc(&pdev->dev, sizeof(*puv3), GFP_KERNEL);
if (puv3 == NULL) {
dev_err(&pdev->dev, "failed to allocate memory\n");
return -ENOMEM;
}
puv3->clk = devm_clk_get(&pdev->dev, "OST_CLK");
if (IS_ERR(puv3->clk))
return PTR_ERR(puv3->clk);
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
puv3->base = devm_ioremap_resource(&pdev->dev, r);
if (IS_ERR(puv3->base))
return PTR_ERR(puv3->base);
puv3->chip.dev = &pdev->dev;
puv3->chip.ops = &puv3_pwm_ops;
puv3->chip.base = -1;
puv3->chip.npwm = 1;
ret = pwmchip_add(&puv3->chip);
if (ret < 0) {
dev_err(&pdev->dev, "pwmchip_add() failed: %d\n", ret);
return ret;
}
platform_set_drvdata(pdev, puv3);
return 0;
}
static int pwm_remove(struct platform_device *pdev)
{
struct puv3_pwm_chip *puv3 = platform_get_drvdata(pdev);
return pwmchip_remove(&puv3->chip);
}
static struct platform_driver puv3_pwm_driver = {
.driver = {
.name = "PKUnity-v3-PWM",
},
.probe = pwm_probe,
.remove = pwm_remove,
};
module_platform_driver(puv3_pwm_driver);
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
omnirom/android_kernel_google_msm | drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | 3671 | 55348 | /**************************************************************************
*
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include "vmwgfx_kms.h"
/* Might need a hrtimer here? */
#define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1)
struct vmw_clip_rect {
int x1, x2, y1, y2;
};
/**
* Clip @num_rects number of @rects against @clip storing the
* results in @out_rects and the number of passed rects in @out_num.
*/
void vmw_clip_cliprects(struct drm_clip_rect *rects,
int num_rects,
struct vmw_clip_rect clip,
SVGASignedRect *out_rects,
int *out_num)
{
int i, k;
for (i = 0, k = 0; i < num_rects; i++) {
int x1 = max_t(int, clip.x1, rects[i].x1);
int y1 = max_t(int, clip.y1, rects[i].y1);
int x2 = min_t(int, clip.x2, rects[i].x2);
int y2 = min_t(int, clip.y2, rects[i].y2);
if (x1 >= x2)
continue;
if (y1 >= y2)
continue;
out_rects[k].left = x1;
out_rects[k].top = y1;
out_rects[k].right = x2;
out_rects[k].bottom = y2;
k++;
}
*out_num = k;
}
void vmw_display_unit_cleanup(struct vmw_display_unit *du)
{
if (du->cursor_surface)
vmw_surface_unreference(&du->cursor_surface);
if (du->cursor_dmabuf)
vmw_dmabuf_unreference(&du->cursor_dmabuf);
drm_crtc_cleanup(&du->crtc);
drm_encoder_cleanup(&du->encoder);
drm_connector_cleanup(&du->connector);
}
/*
* Display Unit Cursor functions
*/
int vmw_cursor_update_image(struct vmw_private *dev_priv,
u32 *image, u32 width, u32 height,
u32 hotspotX, u32 hotspotY)
{
struct {
u32 cmd;
SVGAFifoCmdDefineAlphaCursor cursor;
} *cmd;
u32 image_size = width * height * 4;
u32 cmd_size = sizeof(*cmd) + image_size;
if (!image)
return -EINVAL;
cmd = vmw_fifo_reserve(dev_priv, cmd_size);
if (unlikely(cmd == NULL)) {
DRM_ERROR("Fifo reserve failed.\n");
return -ENOMEM;
}
memset(cmd, 0, sizeof(*cmd));
memcpy(&cmd[1], image, image_size);
cmd->cmd = cpu_to_le32(SVGA_CMD_DEFINE_ALPHA_CURSOR);
cmd->cursor.id = cpu_to_le32(0);
cmd->cursor.width = cpu_to_le32(width);
cmd->cursor.height = cpu_to_le32(height);
cmd->cursor.hotspotX = cpu_to_le32(hotspotX);
cmd->cursor.hotspotY = cpu_to_le32(hotspotY);
vmw_fifo_commit(dev_priv, cmd_size);
return 0;
}
int vmw_cursor_update_dmabuf(struct vmw_private *dev_priv,
struct vmw_dma_buffer *dmabuf,
u32 width, u32 height,
u32 hotspotX, u32 hotspotY)
{
struct ttm_bo_kmap_obj map;
unsigned long kmap_offset;
unsigned long kmap_num;
void *virtual;
bool dummy;
int ret;
kmap_offset = 0;
kmap_num = (width*height*4 + PAGE_SIZE - 1) >> PAGE_SHIFT;
ret = ttm_bo_reserve(&dmabuf->base, true, false, false, 0);
if (unlikely(ret != 0)) {
DRM_ERROR("reserve failed\n");
return -EINVAL;
}
ret = ttm_bo_kmap(&dmabuf->base, kmap_offset, kmap_num, &map);
if (unlikely(ret != 0))
goto err_unreserve;
virtual = ttm_kmap_obj_virtual(&map, &dummy);
ret = vmw_cursor_update_image(dev_priv, virtual, width, height,
hotspotX, hotspotY);
ttm_bo_kunmap(&map);
err_unreserve:
ttm_bo_unreserve(&dmabuf->base);
return ret;
}
void vmw_cursor_update_position(struct vmw_private *dev_priv,
bool show, int x, int y)
{
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
uint32_t count;
iowrite32(show ? 1 : 0, fifo_mem + SVGA_FIFO_CURSOR_ON);
iowrite32(x, fifo_mem + SVGA_FIFO_CURSOR_X);
iowrite32(y, fifo_mem + SVGA_FIFO_CURSOR_Y);
count = ioread32(fifo_mem + SVGA_FIFO_CURSOR_COUNT);
iowrite32(++count, fifo_mem + SVGA_FIFO_CURSOR_COUNT);
}
int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
uint32_t handle, uint32_t width, uint32_t height)
{
struct vmw_private *dev_priv = vmw_priv(crtc->dev);
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
struct vmw_surface *surface = NULL;
struct vmw_dma_buffer *dmabuf = NULL;
int ret;
/* A lot of the code assumes this */
if (handle && (width != 64 || height != 64))
return -EINVAL;
if (handle) {
ret = vmw_user_lookup_handle(dev_priv, tfile,
handle, &surface, &dmabuf);
if (ret) {
DRM_ERROR("failed to find surface or dmabuf: %i\n", ret);
return -EINVAL;
}
}
/* need to do this before taking down old image */
if (surface && !surface->snooper.image) {
DRM_ERROR("surface not suitable for cursor\n");
vmw_surface_unreference(&surface);
return -EINVAL;
}
/* takedown old cursor */
if (du->cursor_surface) {
du->cursor_surface->snooper.crtc = NULL;
vmw_surface_unreference(&du->cursor_surface);
}
if (du->cursor_dmabuf)
vmw_dmabuf_unreference(&du->cursor_dmabuf);
/* setup new image */
if (surface) {
/* vmw_user_surface_lookup takes one reference */
du->cursor_surface = surface;
du->cursor_surface->snooper.crtc = crtc;
du->cursor_age = du->cursor_surface->snooper.age;
vmw_cursor_update_image(dev_priv, surface->snooper.image,
64, 64, du->hotspot_x, du->hotspot_y);
} else if (dmabuf) {
/* vmw_user_surface_lookup takes one reference */
du->cursor_dmabuf = dmabuf;
ret = vmw_cursor_update_dmabuf(dev_priv, dmabuf, width, height,
du->hotspot_x, du->hotspot_y);
} else {
vmw_cursor_update_position(dev_priv, false, 0, 0);
return 0;
}
vmw_cursor_update_position(dev_priv, true,
du->cursor_x + du->hotspot_x,
du->cursor_y + du->hotspot_y);
return 0;
}
int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
{
struct vmw_private *dev_priv = vmw_priv(crtc->dev);
struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
bool shown = du->cursor_surface || du->cursor_dmabuf ? true : false;
du->cursor_x = x + crtc->x;
du->cursor_y = y + crtc->y;
vmw_cursor_update_position(dev_priv, shown,
du->cursor_x + du->hotspot_x,
du->cursor_y + du->hotspot_y);
return 0;
}
void vmw_kms_cursor_snoop(struct vmw_surface *srf,
struct ttm_object_file *tfile,
struct ttm_buffer_object *bo,
SVGA3dCmdHeader *header)
{
struct ttm_bo_kmap_obj map;
unsigned long kmap_offset;
unsigned long kmap_num;
SVGA3dCopyBox *box;
unsigned box_count;
void *virtual;
bool dummy;
struct vmw_dma_cmd {
SVGA3dCmdHeader header;
SVGA3dCmdSurfaceDMA dma;
} *cmd;
int i, ret;
cmd = container_of(header, struct vmw_dma_cmd, header);
/* No snooper installed */
if (!srf->snooper.image)
return;
if (cmd->dma.host.face != 0 || cmd->dma.host.mipmap != 0) {
DRM_ERROR("face and mipmap for cursors should never != 0\n");
return;
}
if (cmd->header.size < 64) {
DRM_ERROR("at least one full copy box must be given\n");
return;
}
box = (SVGA3dCopyBox *)&cmd[1];
box_count = (cmd->header.size - sizeof(SVGA3dCmdSurfaceDMA)) /
sizeof(SVGA3dCopyBox);
if (cmd->dma.guest.ptr.offset % PAGE_SIZE ||
box->x != 0 || box->y != 0 || box->z != 0 ||
box->srcx != 0 || box->srcy != 0 || box->srcz != 0 ||
box->d != 1 || box_count != 1) {
/* TODO handle none page aligned offsets */
/* TODO handle more dst & src != 0 */
/* TODO handle more then one copy */
DRM_ERROR("Cant snoop dma request for cursor!\n");
DRM_ERROR("(%u, %u, %u) (%u, %u, %u) (%ux%ux%u) %u %u\n",
box->srcx, box->srcy, box->srcz,
box->x, box->y, box->z,
box->w, box->h, box->d, box_count,
cmd->dma.guest.ptr.offset);
return;
}
kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT;
kmap_num = (64*64*4) >> PAGE_SHIFT;
ret = ttm_bo_reserve(bo, true, false, false, 0);
if (unlikely(ret != 0)) {
DRM_ERROR("reserve failed\n");
return;
}
ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
if (unlikely(ret != 0))
goto err_unreserve;
virtual = ttm_kmap_obj_virtual(&map, &dummy);
if (box->w == 64 && cmd->dma.guest.pitch == 64*4) {
memcpy(srf->snooper.image, virtual, 64*64*4);
} else {
/* Image is unsigned pointer. */
for (i = 0; i < box->h; i++)
memcpy(srf->snooper.image + i * 64,
virtual + i * cmd->dma.guest.pitch,
box->w * 4);
}
srf->snooper.age++;
/* we can't call this function from this function since execbuf has
* reserved fifo space.
*
* if (srf->snooper.crtc)
* vmw_ldu_crtc_cursor_update_image(dev_priv,
* srf->snooper.image, 64, 64,
* du->hotspot_x, du->hotspot_y);
*/
ttm_bo_kunmap(&map);
err_unreserve:
ttm_bo_unreserve(bo);
}
void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
struct vmw_display_unit *du;
struct drm_crtc *crtc;
mutex_lock(&dev->mode_config.mutex);
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
du = vmw_crtc_to_du(crtc);
if (!du->cursor_surface ||
du->cursor_age == du->cursor_surface->snooper.age)
continue;
du->cursor_age = du->cursor_surface->snooper.age;
vmw_cursor_update_image(dev_priv,
du->cursor_surface->snooper.image,
64, 64, du->hotspot_x, du->hotspot_y);
}
mutex_unlock(&dev->mode_config.mutex);
}
/*
* Generic framebuffer code
*/
int vmw_framebuffer_create_handle(struct drm_framebuffer *fb,
struct drm_file *file_priv,
unsigned int *handle)
{
if (handle)
*handle = 0;
return 0;
}
/*
* Surface framebuffer code
*/
#define vmw_framebuffer_to_vfbs(x) \
container_of(x, struct vmw_framebuffer_surface, base.base)
struct vmw_framebuffer_surface {
struct vmw_framebuffer base;
struct vmw_surface *surface;
struct vmw_dma_buffer *buffer;
struct list_head head;
struct drm_master *master;
};
void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
{
struct vmw_framebuffer_surface *vfbs =
vmw_framebuffer_to_vfbs(framebuffer);
struct vmw_master *vmaster = vmw_master(vfbs->master);
mutex_lock(&vmaster->fb_surf_mutex);
list_del(&vfbs->head);
mutex_unlock(&vmaster->fb_surf_mutex);
drm_master_put(&vfbs->master);
drm_framebuffer_cleanup(framebuffer);
vmw_surface_unreference(&vfbs->surface);
ttm_base_object_unref(&vfbs->base.user_obj);
kfree(vfbs);
}
static int do_surface_dirty_sou(struct vmw_private *dev_priv,
struct drm_file *file_priv,
struct vmw_framebuffer *framebuffer,
unsigned flags, unsigned color,
struct drm_clip_rect *clips,
unsigned num_clips, int inc,
struct vmw_fence_obj **out_fence)
{
struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
struct drm_clip_rect *clips_ptr;
struct drm_clip_rect *tmp;
struct drm_crtc *crtc;
size_t fifo_size;
int i, num_units;
int ret = 0; /* silence warning */
int left, right, top, bottom;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdBlitSurfaceToScreen body;
} *cmd;
SVGASignedRect *blits;
num_units = 0;
list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list,
head) {
if (crtc->fb != &framebuffer->base)
continue;
units[num_units++] = vmw_crtc_to_du(crtc);
}
BUG_ON(!clips || !num_clips);
tmp = kzalloc(sizeof(*tmp) * num_clips, GFP_KERNEL);
if (unlikely(tmp == NULL)) {
DRM_ERROR("Temporary cliprect memory alloc failed.\n");
return -ENOMEM;
}
fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num_clips;
cmd = kzalloc(fifo_size, GFP_KERNEL);
if (unlikely(cmd == NULL)) {
DRM_ERROR("Temporary fifo memory alloc failed.\n");
ret = -ENOMEM;
goto out_free_tmp;
}
/* setup blits pointer */
blits = (SVGASignedRect *)&cmd[1];
/* initial clip region */
left = clips->x1;
right = clips->x2;
top = clips->y1;
bottom = clips->y2;
/* skip the first clip rect */
for (i = 1, clips_ptr = clips + inc;
i < num_clips; i++, clips_ptr += inc) {
left = min_t(int, left, (int)clips_ptr->x1);
right = max_t(int, right, (int)clips_ptr->x2);
top = min_t(int, top, (int)clips_ptr->y1);
bottom = max_t(int, bottom, (int)clips_ptr->y2);
}
/* only need to do this once */
memset(cmd, 0, fifo_size);
cmd->header.id = cpu_to_le32(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN);
cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header));
cmd->body.srcRect.left = left;
cmd->body.srcRect.right = right;
cmd->body.srcRect.top = top;
cmd->body.srcRect.bottom = bottom;
clips_ptr = clips;
for (i = 0; i < num_clips; i++, clips_ptr += inc) {
tmp[i].x1 = clips_ptr->x1 - left;
tmp[i].x2 = clips_ptr->x2 - left;
tmp[i].y1 = clips_ptr->y1 - top;
tmp[i].y2 = clips_ptr->y2 - top;
}
/* do per unit writing, reuse fifo for each */
for (i = 0; i < num_units; i++) {
struct vmw_display_unit *unit = units[i];
struct vmw_clip_rect clip;
int num;
clip.x1 = left - unit->crtc.x;
clip.y1 = top - unit->crtc.y;
clip.x2 = right - unit->crtc.x;
clip.y2 = bottom - unit->crtc.y;
/* skip any crtcs that misses the clip region */
if (clip.x1 >= unit->crtc.mode.hdisplay ||
clip.y1 >= unit->crtc.mode.vdisplay ||
clip.x2 <= 0 || clip.y2 <= 0)
continue;
/*
* In order for the clip rects to be correctly scaled
* the src and dest rects needs to be the same size.
*/
cmd->body.destRect.left = clip.x1;
cmd->body.destRect.right = clip.x2;
cmd->body.destRect.top = clip.y1;
cmd->body.destRect.bottom = clip.y2;
/* create a clip rect of the crtc in dest coords */
clip.x2 = unit->crtc.mode.hdisplay - clip.x1;
clip.y2 = unit->crtc.mode.vdisplay - clip.y1;
clip.x1 = 0 - clip.x1;
clip.y1 = 0 - clip.y1;
/* need to reset sid as it is changed by execbuf */
cmd->body.srcImage.sid = cpu_to_le32(framebuffer->user_handle);
cmd->body.destScreenId = unit->unit;
/* clip and write blits to cmd stream */
vmw_clip_cliprects(tmp, num_clips, clip, blits, &num);
/* if no cliprects hit skip this */
if (num == 0)
continue;
/* only return the last fence */
if (out_fence && *out_fence)
vmw_fence_obj_unreference(out_fence);
/* recalculate package length */
fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num;
cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header));
ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd,
fifo_size, 0, NULL, out_fence);
if (unlikely(ret != 0))
break;
}
kfree(cmd);
out_free_tmp:
kfree(tmp);
return ret;
}
int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
struct drm_file *file_priv,
unsigned flags, unsigned color,
struct drm_clip_rect *clips,
unsigned num_clips)
{
struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
struct vmw_master *vmaster = vmw_master(file_priv->master);
struct vmw_framebuffer_surface *vfbs =
vmw_framebuffer_to_vfbs(framebuffer);
struct drm_clip_rect norect;
int ret, inc = 1;
if (unlikely(vfbs->master != file_priv->master))
return -EINVAL;
/* Require ScreenObject support for 3D */
if (!dev_priv->sou_priv)
return -EINVAL;
ret = ttm_read_lock(&vmaster->lock, true);
if (unlikely(ret != 0))
return ret;
if (!num_clips) {
num_clips = 1;
clips = &norect;
norect.x1 = norect.y1 = 0;
norect.x2 = framebuffer->width;
norect.y2 = framebuffer->height;
} else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) {
num_clips /= 2;
inc = 2; /* skip source rects */
}
ret = do_surface_dirty_sou(dev_priv, file_priv, &vfbs->base,
flags, color,
clips, num_clips, inc, NULL);
ttm_read_unlock(&vmaster->lock);
return 0;
}
static struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = {
.destroy = vmw_framebuffer_surface_destroy,
.dirty = vmw_framebuffer_surface_dirty,
.create_handle = vmw_framebuffer_create_handle,
};
static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
struct drm_file *file_priv,
struct vmw_surface *surface,
struct vmw_framebuffer **out,
const struct drm_mode_fb_cmd
*mode_cmd)
{
struct drm_device *dev = dev_priv->dev;
struct vmw_framebuffer_surface *vfbs;
enum SVGA3dSurfaceFormat format;
struct vmw_master *vmaster = vmw_master(file_priv->master);
int ret;
/* 3D is only supported on HWv8 hosts which supports screen objects */
if (!dev_priv->sou_priv)
return -ENOSYS;
/*
* Sanity checks.
*/
/* Surface must be marked as a scanout. */
if (unlikely(!surface->scanout))
return -EINVAL;
if (unlikely(surface->mip_levels[0] != 1 ||
surface->num_sizes != 1 ||
surface->sizes[0].width < mode_cmd->width ||
surface->sizes[0].height < mode_cmd->height ||
surface->sizes[0].depth != 1)) {
DRM_ERROR("Incompatible surface dimensions "
"for requested mode.\n");
return -EINVAL;
}
switch (mode_cmd->depth) {
case 32:
format = SVGA3D_A8R8G8B8;
break;
case 24:
format = SVGA3D_X8R8G8B8;
break;
case 16:
format = SVGA3D_R5G6B5;
break;
case 15:
format = SVGA3D_A1R5G5B5;
break;
case 8:
format = SVGA3D_LUMINANCE8;
break;
default:
DRM_ERROR("Invalid color depth: %d\n", mode_cmd->depth);
return -EINVAL;
}
if (unlikely(format != surface->format)) {
DRM_ERROR("Invalid surface format for requested mode.\n");
return -EINVAL;
}
vfbs = kzalloc(sizeof(*vfbs), GFP_KERNEL);
if (!vfbs) {
ret = -ENOMEM;
goto out_err1;
}
ret = drm_framebuffer_init(dev, &vfbs->base.base,
&vmw_framebuffer_surface_funcs);
if (ret)
goto out_err2;
if (!vmw_surface_reference(surface)) {
DRM_ERROR("failed to reference surface %p\n", surface);
goto out_err3;
}
/* XXX get the first 3 from the surface info */
vfbs->base.base.bits_per_pixel = mode_cmd->bpp;
vfbs->base.base.pitches[0] = mode_cmd->pitch;
vfbs->base.base.depth = mode_cmd->depth;
vfbs->base.base.width = mode_cmd->width;
vfbs->base.base.height = mode_cmd->height;
vfbs->surface = surface;
vfbs->base.user_handle = mode_cmd->handle;
vfbs->master = drm_master_get(file_priv->master);
mutex_lock(&vmaster->fb_surf_mutex);
list_add_tail(&vfbs->head, &vmaster->fb_surf);
mutex_unlock(&vmaster->fb_surf_mutex);
*out = &vfbs->base;
return 0;
out_err3:
drm_framebuffer_cleanup(&vfbs->base.base);
out_err2:
kfree(vfbs);
out_err1:
return ret;
}
/*
* Dmabuf framebuffer code
*/
#define vmw_framebuffer_to_vfbd(x) \
container_of(x, struct vmw_framebuffer_dmabuf, base.base)
struct vmw_framebuffer_dmabuf {
struct vmw_framebuffer base;
struct vmw_dma_buffer *buffer;
};
void vmw_framebuffer_dmabuf_destroy(struct drm_framebuffer *framebuffer)
{
struct vmw_framebuffer_dmabuf *vfbd =
vmw_framebuffer_to_vfbd(framebuffer);
drm_framebuffer_cleanup(framebuffer);
vmw_dmabuf_unreference(&vfbd->buffer);
ttm_base_object_unref(&vfbd->base.user_obj);
kfree(vfbd);
}
static int do_dmabuf_dirty_ldu(struct vmw_private *dev_priv,
struct vmw_framebuffer *framebuffer,
unsigned flags, unsigned color,
struct drm_clip_rect *clips,
unsigned num_clips, int increment)
{
size_t fifo_size;
int i;
struct {
uint32_t header;
SVGAFifoCmdUpdate body;
} *cmd;
fifo_size = sizeof(*cmd) * num_clips;
cmd = vmw_fifo_reserve(dev_priv, fifo_size);
if (unlikely(cmd == NULL)) {
DRM_ERROR("Fifo reserve failed.\n");
return -ENOMEM;
}
memset(cmd, 0, fifo_size);
for (i = 0; i < num_clips; i++, clips += increment) {
cmd[i].header = cpu_to_le32(SVGA_CMD_UPDATE);
cmd[i].body.x = cpu_to_le32(clips->x1);
cmd[i].body.y = cpu_to_le32(clips->y1);
cmd[i].body.width = cpu_to_le32(clips->x2 - clips->x1);
cmd[i].body.height = cpu_to_le32(clips->y2 - clips->y1);
}
vmw_fifo_commit(dev_priv, fifo_size);
return 0;
}
static int do_dmabuf_define_gmrfb(struct drm_file *file_priv,
struct vmw_private *dev_priv,
struct vmw_framebuffer *framebuffer)
{
int depth = framebuffer->base.depth;
size_t fifo_size;
int ret;
struct {
uint32_t header;
SVGAFifoCmdDefineGMRFB body;
} *cmd;
/* Emulate RGBA support, contrary to svga_reg.h this is not
* supported by hosts. This is only a problem if we are reading
* this value later and expecting what we uploaded back.
*/
if (depth == 32)
depth = 24;
fifo_size = sizeof(*cmd);
cmd = kmalloc(fifo_size, GFP_KERNEL);
if (unlikely(cmd == NULL)) {
DRM_ERROR("Failed to allocate temporary cmd buffer.\n");
return -ENOMEM;
}
memset(cmd, 0, fifo_size);
cmd->header = SVGA_CMD_DEFINE_GMRFB;
cmd->body.format.bitsPerPixel = framebuffer->base.bits_per_pixel;
cmd->body.format.colorDepth = depth;
cmd->body.format.reserved = 0;
cmd->body.bytesPerLine = framebuffer->base.pitches[0];
cmd->body.ptr.gmrId = framebuffer->user_handle;
cmd->body.ptr.offset = 0;
ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd,
fifo_size, 0, NULL, NULL);
kfree(cmd);
return ret;
}
static int do_dmabuf_dirty_sou(struct drm_file *file_priv,
struct vmw_private *dev_priv,
struct vmw_framebuffer *framebuffer,
unsigned flags, unsigned color,
struct drm_clip_rect *clips,
unsigned num_clips, int increment,
struct vmw_fence_obj **out_fence)
{
struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
struct drm_clip_rect *clips_ptr;
int i, k, num_units, ret;
struct drm_crtc *crtc;
size_t fifo_size;
struct {
uint32_t header;
SVGAFifoCmdBlitGMRFBToScreen body;
} *blits;
ret = do_dmabuf_define_gmrfb(file_priv, dev_priv, framebuffer);
if (unlikely(ret != 0))
return ret; /* define_gmrfb prints warnings */
fifo_size = sizeof(*blits) * num_clips;
blits = kmalloc(fifo_size, GFP_KERNEL);
if (unlikely(blits == NULL)) {
DRM_ERROR("Failed to allocate temporary cmd buffer.\n");
return -ENOMEM;
}
num_units = 0;
list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, head) {
if (crtc->fb != &framebuffer->base)
continue;
units[num_units++] = vmw_crtc_to_du(crtc);
}
for (k = 0; k < num_units; k++) {
struct vmw_display_unit *unit = units[k];
int hit_num = 0;
clips_ptr = clips;
for (i = 0; i < num_clips; i++, clips_ptr += increment) {
int clip_x1 = clips_ptr->x1 - unit->crtc.x;
int clip_y1 = clips_ptr->y1 - unit->crtc.y;
int clip_x2 = clips_ptr->x2 - unit->crtc.x;
int clip_y2 = clips_ptr->y2 - unit->crtc.y;
int move_x, move_y;
/* skip any crtcs that misses the clip region */
if (clip_x1 >= unit->crtc.mode.hdisplay ||
clip_y1 >= unit->crtc.mode.vdisplay ||
clip_x2 <= 0 || clip_y2 <= 0)
continue;
/* clip size to crtc size */
clip_x2 = min_t(int, clip_x2, unit->crtc.mode.hdisplay);
clip_y2 = min_t(int, clip_y2, unit->crtc.mode.vdisplay);
/* translate both src and dest to bring clip into screen */
move_x = min_t(int, clip_x1, 0);
move_y = min_t(int, clip_y1, 0);
/* actual translate done here */
blits[hit_num].header = SVGA_CMD_BLIT_GMRFB_TO_SCREEN;
blits[hit_num].body.destScreenId = unit->unit;
blits[hit_num].body.srcOrigin.x = clips_ptr->x1 - move_x;
blits[hit_num].body.srcOrigin.y = clips_ptr->y1 - move_y;
blits[hit_num].body.destRect.left = clip_x1 - move_x;
blits[hit_num].body.destRect.top = clip_y1 - move_y;
blits[hit_num].body.destRect.right = clip_x2;
blits[hit_num].body.destRect.bottom = clip_y2;
hit_num++;
}
/* no clips hit the crtc */
if (hit_num == 0)
continue;
/* only return the last fence */
if (out_fence && *out_fence)
vmw_fence_obj_unreference(out_fence);
fifo_size = sizeof(*blits) * hit_num;
ret = vmw_execbuf_process(file_priv, dev_priv, NULL, blits,
fifo_size, 0, NULL, out_fence);
if (unlikely(ret != 0))
break;
}
kfree(blits);
return ret;
}
int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
struct drm_file *file_priv,
unsigned flags, unsigned color,
struct drm_clip_rect *clips,
unsigned num_clips)
{
struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
struct vmw_master *vmaster = vmw_master(file_priv->master);
struct vmw_framebuffer_dmabuf *vfbd =
vmw_framebuffer_to_vfbd(framebuffer);
struct drm_clip_rect norect;
int ret, increment = 1;
ret = ttm_read_lock(&vmaster->lock, true);
if (unlikely(ret != 0))
return ret;
if (!num_clips) {
num_clips = 1;
clips = &norect;
norect.x1 = norect.y1 = 0;
norect.x2 = framebuffer->width;
norect.y2 = framebuffer->height;
} else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) {
num_clips /= 2;
increment = 2;
}
if (dev_priv->ldu_priv) {
ret = do_dmabuf_dirty_ldu(dev_priv, &vfbd->base,
flags, color,
clips, num_clips, increment);
} else {
ret = do_dmabuf_dirty_sou(file_priv, dev_priv, &vfbd->base,
flags, color,
clips, num_clips, increment, NULL);
}
ttm_read_unlock(&vmaster->lock);
return ret;
}
static struct drm_framebuffer_funcs vmw_framebuffer_dmabuf_funcs = {
.destroy = vmw_framebuffer_dmabuf_destroy,
.dirty = vmw_framebuffer_dmabuf_dirty,
.create_handle = vmw_framebuffer_create_handle,
};
/**
* Pin the dmabuffer to the start of vram.
*/
static int vmw_framebuffer_dmabuf_pin(struct vmw_framebuffer *vfb)
{
struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
struct vmw_framebuffer_dmabuf *vfbd =
vmw_framebuffer_to_vfbd(&vfb->base);
int ret;
/* This code should not be used with screen objects */
BUG_ON(dev_priv->sou_priv);
vmw_overlay_pause_all(dev_priv);
ret = vmw_dmabuf_to_start_of_vram(dev_priv, vfbd->buffer, true, false);
vmw_overlay_resume_all(dev_priv);
WARN_ON(ret != 0);
return 0;
}
static int vmw_framebuffer_dmabuf_unpin(struct vmw_framebuffer *vfb)
{
struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
struct vmw_framebuffer_dmabuf *vfbd =
vmw_framebuffer_to_vfbd(&vfb->base);
if (!vfbd->buffer) {
WARN_ON(!vfbd->buffer);
return 0;
}
return vmw_dmabuf_unpin(dev_priv, vfbd->buffer, false);
}
static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
struct vmw_dma_buffer *dmabuf,
struct vmw_framebuffer **out,
const struct drm_mode_fb_cmd
*mode_cmd)
{
struct drm_device *dev = dev_priv->dev;
struct vmw_framebuffer_dmabuf *vfbd;
unsigned int requested_size;
int ret;
requested_size = mode_cmd->height * mode_cmd->pitch;
if (unlikely(requested_size > dmabuf->base.num_pages * PAGE_SIZE)) {
DRM_ERROR("Screen buffer object size is too small "
"for requested mode.\n");
return -EINVAL;
}
/* Limited framebuffer color depth support for screen objects */
if (dev_priv->sou_priv) {
switch (mode_cmd->depth) {
case 32:
case 24:
/* Only support 32 bpp for 32 and 24 depth fbs */
if (mode_cmd->bpp == 32)
break;
DRM_ERROR("Invalid color depth/bbp: %d %d\n",
mode_cmd->depth, mode_cmd->bpp);
return -EINVAL;
case 16:
case 15:
/* Only support 16 bpp for 16 and 15 depth fbs */
if (mode_cmd->bpp == 16)
break;
DRM_ERROR("Invalid color depth/bbp: %d %d\n",
mode_cmd->depth, mode_cmd->bpp);
return -EINVAL;
default:
DRM_ERROR("Invalid color depth: %d\n", mode_cmd->depth);
return -EINVAL;
}
}
vfbd = kzalloc(sizeof(*vfbd), GFP_KERNEL);
if (!vfbd) {
ret = -ENOMEM;
goto out_err1;
}
ret = drm_framebuffer_init(dev, &vfbd->base.base,
&vmw_framebuffer_dmabuf_funcs);
if (ret)
goto out_err2;
if (!vmw_dmabuf_reference(dmabuf)) {
DRM_ERROR("failed to reference dmabuf %p\n", dmabuf);
goto out_err3;
}
vfbd->base.base.bits_per_pixel = mode_cmd->bpp;
vfbd->base.base.pitches[0] = mode_cmd->pitch;
vfbd->base.base.depth = mode_cmd->depth;
vfbd->base.base.width = mode_cmd->width;
vfbd->base.base.height = mode_cmd->height;
if (!dev_priv->sou_priv) {
vfbd->base.pin = vmw_framebuffer_dmabuf_pin;
vfbd->base.unpin = vmw_framebuffer_dmabuf_unpin;
}
vfbd->base.dmabuf = true;
vfbd->buffer = dmabuf;
vfbd->base.user_handle = mode_cmd->handle;
*out = &vfbd->base;
return 0;
out_err3:
drm_framebuffer_cleanup(&vfbd->base.base);
out_err2:
kfree(vfbd);
out_err1:
return ret;
}
/*
* Generic Kernel modesetting functions
*/
static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
struct drm_file *file_priv,
struct drm_mode_fb_cmd2 *mode_cmd2)
{
struct vmw_private *dev_priv = vmw_priv(dev);
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
struct vmw_framebuffer *vfb = NULL;
struct vmw_surface *surface = NULL;
struct vmw_dma_buffer *bo = NULL;
struct ttm_base_object *user_obj;
struct drm_mode_fb_cmd mode_cmd;
int ret;
mode_cmd.width = mode_cmd2->width;
mode_cmd.height = mode_cmd2->height;
mode_cmd.pitch = mode_cmd2->pitches[0];
mode_cmd.handle = mode_cmd2->handles[0];
drm_fb_get_bpp_depth(mode_cmd2->pixel_format, &mode_cmd.depth,
&mode_cmd.bpp);
/**
* This code should be conditioned on Screen Objects not being used.
* If screen objects are used, we can allocate a GMR to hold the
* requested framebuffer.
*/
if (!vmw_kms_validate_mode_vram(dev_priv,
mode_cmd.pitch,
mode_cmd.height)) {
DRM_ERROR("VRAM size is too small for requested mode.\n");
return ERR_PTR(-ENOMEM);
}
/*
* Take a reference on the user object of the resource
* backing the kms fb. This ensures that user-space handle
* lookups on that resource will always work as long as
* it's registered with a kms framebuffer. This is important,
* since vmw_execbuf_process identifies resources in the
* command stream using user-space handles.
*/
user_obj = ttm_base_object_lookup(tfile, mode_cmd.handle);
if (unlikely(user_obj == NULL)) {
DRM_ERROR("Could not locate requested kms frame buffer.\n");
return ERR_PTR(-ENOENT);
}
/**
* End conditioned code.
*/
/* returns either a dmabuf or surface */
ret = vmw_user_lookup_handle(dev_priv, tfile,
mode_cmd.handle,
&surface, &bo);
if (ret)
goto err_out;
/* Create the new framebuffer depending one what we got back */
if (bo)
ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, bo, &vfb,
&mode_cmd);
else if (surface)
ret = vmw_kms_new_framebuffer_surface(dev_priv, file_priv,
surface, &vfb, &mode_cmd);
else
BUG();
err_out:
/* vmw_user_lookup_handle takes one ref so does new_fb */
if (bo)
vmw_dmabuf_unreference(&bo);
if (surface)
vmw_surface_unreference(&surface);
if (ret) {
DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
ttm_base_object_unref(&user_obj);
return ERR_PTR(ret);
} else
vfb->user_obj = user_obj;
return &vfb->base;
}
static struct drm_mode_config_funcs vmw_kms_funcs = {
.fb_create = vmw_kms_fb_create,
};
int vmw_kms_present(struct vmw_private *dev_priv,
struct drm_file *file_priv,
struct vmw_framebuffer *vfb,
struct vmw_surface *surface,
uint32_t sid,
int32_t destX, int32_t destY,
struct drm_vmw_rect *clips,
uint32_t num_clips)
{
struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
struct drm_clip_rect *tmp;
struct drm_crtc *crtc;
size_t fifo_size;
int i, k, num_units;
int ret = 0; /* silence warning */
int left, right, top, bottom;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdBlitSurfaceToScreen body;
} *cmd;
SVGASignedRect *blits;
num_units = 0;
list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, head) {
if (crtc->fb != &vfb->base)
continue;
units[num_units++] = vmw_crtc_to_du(crtc);
}
BUG_ON(surface == NULL);
BUG_ON(!clips || !num_clips);
tmp = kzalloc(sizeof(*tmp) * num_clips, GFP_KERNEL);
if (unlikely(tmp == NULL)) {
DRM_ERROR("Temporary cliprect memory alloc failed.\n");
return -ENOMEM;
}
fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num_clips;
cmd = kmalloc(fifo_size, GFP_KERNEL);
if (unlikely(cmd == NULL)) {
DRM_ERROR("Failed to allocate temporary fifo memory.\n");
ret = -ENOMEM;
goto out_free_tmp;
}
left = clips->x;
right = clips->x + clips->w;
top = clips->y;
bottom = clips->y + clips->h;
for (i = 1; i < num_clips; i++) {
left = min_t(int, left, (int)clips[i].x);
right = max_t(int, right, (int)clips[i].x + clips[i].w);
top = min_t(int, top, (int)clips[i].y);
bottom = max_t(int, bottom, (int)clips[i].y + clips[i].h);
}
/* only need to do this once */
memset(cmd, 0, fifo_size);
cmd->header.id = cpu_to_le32(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN);
blits = (SVGASignedRect *)&cmd[1];
cmd->body.srcRect.left = left;
cmd->body.srcRect.right = right;
cmd->body.srcRect.top = top;
cmd->body.srcRect.bottom = bottom;
for (i = 0; i < num_clips; i++) {
tmp[i].x1 = clips[i].x - left;
tmp[i].x2 = clips[i].x + clips[i].w - left;
tmp[i].y1 = clips[i].y - top;
tmp[i].y2 = clips[i].y + clips[i].h - top;
}
for (k = 0; k < num_units; k++) {
struct vmw_display_unit *unit = units[k];
struct vmw_clip_rect clip;
int num;
clip.x1 = left + destX - unit->crtc.x;
clip.y1 = top + destY - unit->crtc.y;
clip.x2 = right + destX - unit->crtc.x;
clip.y2 = bottom + destY - unit->crtc.y;
/* skip any crtcs that misses the clip region */
if (clip.x1 >= unit->crtc.mode.hdisplay ||
clip.y1 >= unit->crtc.mode.vdisplay ||
clip.x2 <= 0 || clip.y2 <= 0)
continue;
/*
* In order for the clip rects to be correctly scaled
* the src and dest rects needs to be the same size.
*/
cmd->body.destRect.left = clip.x1;
cmd->body.destRect.right = clip.x2;
cmd->body.destRect.top = clip.y1;
cmd->body.destRect.bottom = clip.y2;
/* create a clip rect of the crtc in dest coords */
clip.x2 = unit->crtc.mode.hdisplay - clip.x1;
clip.y2 = unit->crtc.mode.vdisplay - clip.y1;
clip.x1 = 0 - clip.x1;
clip.y1 = 0 - clip.y1;
/* need to reset sid as it is changed by execbuf */
cmd->body.srcImage.sid = sid;
cmd->body.destScreenId = unit->unit;
/* clip and write blits to cmd stream */
vmw_clip_cliprects(tmp, num_clips, clip, blits, &num);
/* if no cliprects hit skip this */
if (num == 0)
continue;
/* recalculate package length */
fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num;
cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header));
ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd,
fifo_size, 0, NULL, NULL);
if (unlikely(ret != 0))
break;
}
kfree(cmd);
out_free_tmp:
kfree(tmp);
return ret;
}
int vmw_kms_readback(struct vmw_private *dev_priv,
struct drm_file *file_priv,
struct vmw_framebuffer *vfb,
struct drm_vmw_fence_rep __user *user_fence_rep,
struct drm_vmw_rect *clips,
uint32_t num_clips)
{
struct vmw_framebuffer_dmabuf *vfbd =
vmw_framebuffer_to_vfbd(&vfb->base);
struct vmw_dma_buffer *dmabuf = vfbd->buffer;
struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
struct drm_crtc *crtc;
size_t fifo_size;
int i, k, ret, num_units, blits_pos;
struct {
uint32_t header;
SVGAFifoCmdDefineGMRFB body;
} *cmd;
struct {
uint32_t header;
SVGAFifoCmdBlitScreenToGMRFB body;
} *blits;
num_units = 0;
list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, head) {
if (crtc->fb != &vfb->base)
continue;
units[num_units++] = vmw_crtc_to_du(crtc);
}
BUG_ON(dmabuf == NULL);
BUG_ON(!clips || !num_clips);
/* take a safe guess at fifo size */
fifo_size = sizeof(*cmd) + sizeof(*blits) * num_clips * num_units;
cmd = kmalloc(fifo_size, GFP_KERNEL);
if (unlikely(cmd == NULL)) {
DRM_ERROR("Failed to allocate temporary fifo memory.\n");
return -ENOMEM;
}
memset(cmd, 0, fifo_size);
cmd->header = SVGA_CMD_DEFINE_GMRFB;
cmd->body.format.bitsPerPixel = vfb->base.bits_per_pixel;
cmd->body.format.colorDepth = vfb->base.depth;
cmd->body.format.reserved = 0;
cmd->body.bytesPerLine = vfb->base.pitches[0];
cmd->body.ptr.gmrId = vfb->user_handle;
cmd->body.ptr.offset = 0;
blits = (void *)&cmd[1];
blits_pos = 0;
for (i = 0; i < num_units; i++) {
struct drm_vmw_rect *c = clips;
for (k = 0; k < num_clips; k++, c++) {
/* transform clip coords to crtc origin based coords */
int clip_x1 = c->x - units[i]->crtc.x;
int clip_x2 = c->x - units[i]->crtc.x + c->w;
int clip_y1 = c->y - units[i]->crtc.y;
int clip_y2 = c->y - units[i]->crtc.y + c->h;
int dest_x = c->x;
int dest_y = c->y;
/* compensate for clipping, we negate
* a negative number and add that.
*/
if (clip_x1 < 0)
dest_x += -clip_x1;
if (clip_y1 < 0)
dest_y += -clip_y1;
/* clip */
clip_x1 = max(clip_x1, 0);
clip_y1 = max(clip_y1, 0);
clip_x2 = min(clip_x2, units[i]->crtc.mode.hdisplay);
clip_y2 = min(clip_y2, units[i]->crtc.mode.vdisplay);
/* and cull any rects that misses the crtc */
if (clip_x1 >= units[i]->crtc.mode.hdisplay ||
clip_y1 >= units[i]->crtc.mode.vdisplay ||
clip_x2 <= 0 || clip_y2 <= 0)
continue;
blits[blits_pos].header = SVGA_CMD_BLIT_SCREEN_TO_GMRFB;
blits[blits_pos].body.srcScreenId = units[i]->unit;
blits[blits_pos].body.destOrigin.x = dest_x;
blits[blits_pos].body.destOrigin.y = dest_y;
blits[blits_pos].body.srcRect.left = clip_x1;
blits[blits_pos].body.srcRect.top = clip_y1;
blits[blits_pos].body.srcRect.right = clip_x2;
blits[blits_pos].body.srcRect.bottom = clip_y2;
blits_pos++;
}
}
/* reset size here and use calculated exact size from loops */
fifo_size = sizeof(*cmd) + sizeof(*blits) * blits_pos;
ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd, fifo_size,
0, user_fence_rep, NULL);
kfree(cmd);
return ret;
}
int vmw_kms_init(struct vmw_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
int ret;
drm_mode_config_init(dev);
dev->mode_config.funcs = &vmw_kms_funcs;
dev->mode_config.min_width = 1;
dev->mode_config.min_height = 1;
/* assumed largest fb size */
dev->mode_config.max_width = 8192;
dev->mode_config.max_height = 8192;
ret = vmw_kms_init_screen_object_display(dev_priv);
if (ret) /* Fallback */
(void)vmw_kms_init_legacy_display_system(dev_priv);
return 0;
}
int vmw_kms_close(struct vmw_private *dev_priv)
{
/*
* Docs says we should take the lock before calling this function
* but since it destroys encoders and our destructor calls
* drm_encoder_cleanup which takes the lock we deadlock.
*/
drm_mode_config_cleanup(dev_priv->dev);
if (dev_priv->sou_priv)
vmw_kms_close_screen_object_display(dev_priv);
else
vmw_kms_close_legacy_display_system(dev_priv);
return 0;
}
int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_vmw_cursor_bypass_arg *arg = data;
struct vmw_display_unit *du;
struct drm_mode_object *obj;
struct drm_crtc *crtc;
int ret = 0;
mutex_lock(&dev->mode_config.mutex);
if (arg->flags & DRM_VMW_CURSOR_BYPASS_ALL) {
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
du = vmw_crtc_to_du(crtc);
du->hotspot_x = arg->xhot;
du->hotspot_y = arg->yhot;
}
mutex_unlock(&dev->mode_config.mutex);
return 0;
}
obj = drm_mode_object_find(dev, arg->crtc_id, DRM_MODE_OBJECT_CRTC);
if (!obj) {
ret = -EINVAL;
goto out;
}
crtc = obj_to_crtc(obj);
du = vmw_crtc_to_du(crtc);
du->hotspot_x = arg->xhot;
du->hotspot_y = arg->yhot;
out:
mutex_unlock(&dev->mode_config.mutex);
return ret;
}
int vmw_kms_write_svga(struct vmw_private *vmw_priv,
unsigned width, unsigned height, unsigned pitch,
unsigned bpp, unsigned depth)
{
if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, pitch);
else if (vmw_fifo_have_pitchlock(vmw_priv))
iowrite32(pitch, vmw_priv->mmio_virt + SVGA_FIFO_PITCHLOCK);
vmw_write(vmw_priv, SVGA_REG_WIDTH, width);
vmw_write(vmw_priv, SVGA_REG_HEIGHT, height);
vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, bpp);
if (vmw_read(vmw_priv, SVGA_REG_DEPTH) != depth) {
DRM_ERROR("Invalid depth %u for %u bpp, host expects %u\n",
depth, bpp, vmw_read(vmw_priv, SVGA_REG_DEPTH));
return -EINVAL;
}
return 0;
}
int vmw_kms_save_vga(struct vmw_private *vmw_priv)
{
struct vmw_vga_topology_state *save;
uint32_t i;
vmw_priv->vga_width = vmw_read(vmw_priv, SVGA_REG_WIDTH);
vmw_priv->vga_height = vmw_read(vmw_priv, SVGA_REG_HEIGHT);
vmw_priv->vga_bpp = vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL);
if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
vmw_priv->vga_pitchlock =
vmw_read(vmw_priv, SVGA_REG_PITCHLOCK);
else if (vmw_fifo_have_pitchlock(vmw_priv))
vmw_priv->vga_pitchlock = ioread32(vmw_priv->mmio_virt +
SVGA_FIFO_PITCHLOCK);
if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY))
return 0;
vmw_priv->num_displays = vmw_read(vmw_priv,
SVGA_REG_NUM_GUEST_DISPLAYS);
if (vmw_priv->num_displays == 0)
vmw_priv->num_displays = 1;
for (i = 0; i < vmw_priv->num_displays; ++i) {
save = &vmw_priv->vga_save[i];
vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, i);
save->primary = vmw_read(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY);
save->pos_x = vmw_read(vmw_priv, SVGA_REG_DISPLAY_POSITION_X);
save->pos_y = vmw_read(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y);
save->width = vmw_read(vmw_priv, SVGA_REG_DISPLAY_WIDTH);
save->height = vmw_read(vmw_priv, SVGA_REG_DISPLAY_HEIGHT);
vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
if (i == 0 && vmw_priv->num_displays == 1 &&
save->width == 0 && save->height == 0) {
/*
* It should be fairly safe to assume that these
* values are uninitialized.
*/
save->width = vmw_priv->vga_width - save->pos_x;
save->height = vmw_priv->vga_height - save->pos_y;
}
}
return 0;
}
int vmw_kms_restore_vga(struct vmw_private *vmw_priv)
{
struct vmw_vga_topology_state *save;
uint32_t i;
vmw_write(vmw_priv, SVGA_REG_WIDTH, vmw_priv->vga_width);
vmw_write(vmw_priv, SVGA_REG_HEIGHT, vmw_priv->vga_height);
vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, vmw_priv->vga_bpp);
if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
vmw_write(vmw_priv, SVGA_REG_PITCHLOCK,
vmw_priv->vga_pitchlock);
else if (vmw_fifo_have_pitchlock(vmw_priv))
iowrite32(vmw_priv->vga_pitchlock,
vmw_priv->mmio_virt + SVGA_FIFO_PITCHLOCK);
if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY))
return 0;
for (i = 0; i < vmw_priv->num_displays; ++i) {
save = &vmw_priv->vga_save[i];
vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, i);
vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, save->primary);
vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, save->pos_x);
vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, save->pos_y);
vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, save->width);
vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, save->height);
vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
}
return 0;
}
bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
uint32_t pitch,
uint32_t height)
{
return ((u64) pitch * (u64) height) < (u64) dev_priv->vram_size;
}
/**
* Function called by DRM code called with vbl_lock held.
*/
u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc)
{
return 0;
}
/**
* Function called by DRM code called with vbl_lock held.
*/
int vmw_enable_vblank(struct drm_device *dev, int crtc)
{
return -ENOSYS;
}
/**
* Function called by DRM code called with vbl_lock held.
*/
void vmw_disable_vblank(struct drm_device *dev, int crtc)
{
}
/*
* Small shared kms functions.
*/
int vmw_du_update_layout(struct vmw_private *dev_priv, unsigned num,
struct drm_vmw_rect *rects)
{
struct drm_device *dev = dev_priv->dev;
struct vmw_display_unit *du;
struct drm_connector *con;
mutex_lock(&dev->mode_config.mutex);
#if 0
{
unsigned int i;
DRM_INFO("%s: new layout ", __func__);
for (i = 0; i < num; i++)
DRM_INFO("(%i, %i %ux%u) ", rects[i].x, rects[i].y,
rects[i].w, rects[i].h);
DRM_INFO("\n");
}
#endif
list_for_each_entry(con, &dev->mode_config.connector_list, head) {
du = vmw_connector_to_du(con);
if (num > du->unit) {
du->pref_width = rects[du->unit].w;
du->pref_height = rects[du->unit].h;
du->pref_active = true;
du->gui_x = rects[du->unit].x;
du->gui_y = rects[du->unit].y;
} else {
du->pref_width = 800;
du->pref_height = 600;
du->pref_active = false;
}
con->status = vmw_du_connector_detect(con, true);
}
mutex_unlock(&dev->mode_config.mutex);
return 0;
}
int vmw_du_page_flip(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event)
{
struct vmw_private *dev_priv = vmw_priv(crtc->dev);
struct drm_framebuffer *old_fb = crtc->fb;
struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(fb);
struct drm_file *file_priv = event->base.file_priv;
struct vmw_fence_obj *fence = NULL;
struct drm_clip_rect clips;
int ret;
/* require ScreenObject support for page flipping */
if (!dev_priv->sou_priv)
return -ENOSYS;
if (!vmw_kms_screen_object_flippable(dev_priv, crtc))
return -EINVAL;
crtc->fb = fb;
/* do a full screen dirty update */
clips.x1 = clips.y1 = 0;
clips.x2 = fb->width;
clips.y2 = fb->height;
if (vfb->dmabuf)
ret = do_dmabuf_dirty_sou(file_priv, dev_priv, vfb,
0, 0, &clips, 1, 1, &fence);
else
ret = do_surface_dirty_sou(dev_priv, file_priv, vfb,
0, 0, &clips, 1, 1, &fence);
if (ret != 0)
goto out_no_fence;
if (!fence) {
ret = -EINVAL;
goto out_no_fence;
}
ret = vmw_event_fence_action_queue(file_priv, fence,
&event->base,
&event->event.tv_sec,
&event->event.tv_usec,
true);
/*
* No need to hold on to this now. The only cleanup
* we need to do if we fail is unref the fence.
*/
vmw_fence_obj_unreference(&fence);
if (vmw_crtc_to_du(crtc)->is_implicit)
vmw_kms_screen_object_update_implicit_fb(dev_priv, crtc);
return ret;
out_no_fence:
crtc->fb = old_fb;
return ret;
}
void vmw_du_crtc_save(struct drm_crtc *crtc)
{
}
void vmw_du_crtc_restore(struct drm_crtc *crtc)
{
}
void vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
u16 *r, u16 *g, u16 *b,
uint32_t start, uint32_t size)
{
struct vmw_private *dev_priv = vmw_priv(crtc->dev);
int i;
for (i = 0; i < size; i++) {
DRM_DEBUG("%d r/g/b = 0x%04x / 0x%04x / 0x%04x\n", i,
r[i], g[i], b[i]);
vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 0, r[i] >> 8);
vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 1, g[i] >> 8);
vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 2, b[i] >> 8);
}
}
void vmw_du_connector_dpms(struct drm_connector *connector, int mode)
{
}
void vmw_du_connector_save(struct drm_connector *connector)
{
}
void vmw_du_connector_restore(struct drm_connector *connector)
{
}
enum drm_connector_status
vmw_du_connector_detect(struct drm_connector *connector, bool force)
{
uint32_t num_displays;
struct drm_device *dev = connector->dev;
struct vmw_private *dev_priv = vmw_priv(dev);
struct vmw_display_unit *du = vmw_connector_to_du(connector);
mutex_lock(&dev_priv->hw_mutex);
num_displays = vmw_read(dev_priv, SVGA_REG_NUM_DISPLAYS);
mutex_unlock(&dev_priv->hw_mutex);
return ((vmw_connector_to_du(connector)->unit < num_displays &&
du->pref_active) ?
connector_status_connected : connector_status_disconnected);
}
static struct drm_display_mode vmw_kms_connector_builtin[] = {
/* 640x480@60Hz */
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
752, 800, 0, 480, 489, 492, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 800x600@60Hz */
{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
968, 1056, 0, 600, 601, 605, 628, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1024x768@60Hz */
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
1184, 1344, 0, 768, 771, 777, 806, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1152x864@75Hz */
{ DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
1344, 1600, 0, 864, 865, 868, 900, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1280x768@60Hz */
{ DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344,
1472, 1664, 0, 768, 771, 778, 798, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1280x800@60Hz */
{ DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352,
1480, 1680, 0, 800, 803, 809, 831, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1280x960@60Hz */
{ DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376,
1488, 1800, 0, 960, 961, 964, 1000, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1280x1024@60Hz */
{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328,
1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1360x768@60Hz */
{ DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424,
1536, 1792, 0, 768, 771, 777, 795, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1440x1050@60Hz */
{ DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488,
1632, 1864, 0, 1050, 1053, 1057, 1089, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1440x900@60Hz */
{ DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520,
1672, 1904, 0, 900, 903, 909, 934, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1600x1200@60Hz */
{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664,
1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1680x1050@60Hz */
{ DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784,
1960, 2240, 0, 1050, 1053, 1059, 1089, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1792x1344@60Hz */
{ DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920,
2120, 2448, 0, 1344, 1345, 1348, 1394, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1853x1392@60Hz */
{ DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952,
2176, 2528, 0, 1392, 1393, 1396, 1439, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1920x1200@60Hz */
{ DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056,
2256, 2592, 0, 1200, 1203, 1209, 1245, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1920x1440@60Hz */
{ DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048,
2256, 2600, 0, 1440, 1441, 1444, 1500, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 2560x1600@60Hz */
{ DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752,
3032, 3504, 0, 1600, 1603, 1609, 1658, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* Terminate */
{ DRM_MODE("", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) },
};
/**
* vmw_guess_mode_timing - Provide fake timings for a
* 60Hz vrefresh mode.
*
* @mode - Pointer to a struct drm_display_mode with hdisplay and vdisplay
* members filled in.
*/
static void vmw_guess_mode_timing(struct drm_display_mode *mode)
{
mode->hsync_start = mode->hdisplay + 50;
mode->hsync_end = mode->hsync_start + 50;
mode->htotal = mode->hsync_end + 50;
mode->vsync_start = mode->vdisplay + 50;
mode->vsync_end = mode->vsync_start + 50;
mode->vtotal = mode->vsync_end + 50;
mode->clock = (u32)mode->htotal * (u32)mode->vtotal / 100 * 6;
mode->vrefresh = drm_mode_vrefresh(mode);
}
int vmw_du_connector_fill_modes(struct drm_connector *connector,
uint32_t max_width, uint32_t max_height)
{
struct vmw_display_unit *du = vmw_connector_to_du(connector);
struct drm_device *dev = connector->dev;
struct vmw_private *dev_priv = vmw_priv(dev);
struct drm_display_mode *mode = NULL;
struct drm_display_mode *bmode;
struct drm_display_mode prefmode = { DRM_MODE("preferred",
DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
};
int i;
/* Add preferred mode */
{
mode = drm_mode_duplicate(dev, &prefmode);
if (!mode)
return 0;
mode->hdisplay = du->pref_width;
mode->vdisplay = du->pref_height;
vmw_guess_mode_timing(mode);
if (vmw_kms_validate_mode_vram(dev_priv, mode->hdisplay * 2,
mode->vdisplay)) {
drm_mode_probed_add(connector, mode);
} else {
drm_mode_destroy(dev, mode);
mode = NULL;
}
if (du->pref_mode) {
list_del_init(&du->pref_mode->head);
drm_mode_destroy(dev, du->pref_mode);
}
/* mode might be null here, this is intended */
du->pref_mode = mode;
}
for (i = 0; vmw_kms_connector_builtin[i].type != 0; i++) {
bmode = &vmw_kms_connector_builtin[i];
if (bmode->hdisplay > max_width ||
bmode->vdisplay > max_height)
continue;
if (!vmw_kms_validate_mode_vram(dev_priv, bmode->hdisplay * 2,
bmode->vdisplay))
continue;
mode = drm_mode_duplicate(dev, bmode);
if (!mode)
return 0;
mode->vrefresh = drm_mode_vrefresh(mode);
drm_mode_probed_add(connector, mode);
}
/* Move the prefered mode first, help apps pick the right mode. */
if (du->pref_mode)
list_move(&du->pref_mode->head, &connector->probed_modes);
drm_mode_connector_list_update(connector);
return 1;
}
int vmw_du_connector_set_property(struct drm_connector *connector,
struct drm_property *property,
uint64_t val)
{
return 0;
}
int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct vmw_private *dev_priv = vmw_priv(dev);
struct drm_vmw_update_layout_arg *arg =
(struct drm_vmw_update_layout_arg *)data;
struct vmw_master *vmaster = vmw_master(file_priv->master);
void __user *user_rects;
struct drm_vmw_rect *rects;
unsigned rects_size;
int ret;
int i;
struct drm_mode_config *mode_config = &dev->mode_config;
ret = ttm_read_lock(&vmaster->lock, true);
if (unlikely(ret != 0))
return ret;
if (!arg->num_outputs) {
struct drm_vmw_rect def_rect = {0, 0, 800, 600};
vmw_du_update_layout(dev_priv, 1, &def_rect);
goto out_unlock;
}
rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect);
rects = kcalloc(arg->num_outputs, sizeof(struct drm_vmw_rect),
GFP_KERNEL);
if (unlikely(!rects)) {
ret = -ENOMEM;
goto out_unlock;
}
user_rects = (void __user *)(unsigned long)arg->rects;
ret = copy_from_user(rects, user_rects, rects_size);
if (unlikely(ret != 0)) {
DRM_ERROR("Failed to get rects.\n");
ret = -EFAULT;
goto out_free;
}
for (i = 0; i < arg->num_outputs; ++i) {
if (rects[i].x < 0 ||
rects[i].y < 0 ||
rects[i].x + rects[i].w > mode_config->max_width ||
rects[i].y + rects[i].h > mode_config->max_height) {
DRM_ERROR("Invalid GUI layout.\n");
ret = -EINVAL;
goto out_free;
}
}
vmw_du_update_layout(dev_priv, arg->num_outputs, rects);
out_free:
kfree(rects);
out_unlock:
ttm_read_unlock(&vmaster->lock);
return ret;
}
| gpl-2.0 |
arnavgosain/android_kernel_sony_msm8x27 | drivers/base/platform.c | 4183 | 29390 | /*
* platform.c - platform 'pseudo' bus for legacy devices
*
* Copyright (c) 2002-3 Patrick Mochel
* Copyright (c) 2002-3 Open Source Development Labs
*
* This file is released under the GPLv2
*
* Please see Documentation/driver-model/platform.txt for more
* information.
*/
#include <linux/string.h>
#include <linux/platform_device.h>
#include <linux/of_device.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/dma-mapping.h>
#include <linux/bootmem.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/pm_runtime.h>
#include "base.h"
#define to_platform_driver(drv) (container_of((drv), struct platform_driver, \
driver))
struct device platform_bus = {
.init_name = "platform",
};
EXPORT_SYMBOL_GPL(platform_bus);
/**
* arch_setup_pdev_archdata - Allow manipulation of archdata before its used
* @pdev: platform device
*
* This is called before platform_device_add() such that any pdev_archdata may
* be setup before the platform_notifier is called. So if a user needs to
* manipulate any relevant information in the pdev_archdata they can do:
*
* platform_devic_alloc()
* ... manipulate ...
* platform_device_add()
*
* And if they don't care they can just call platform_device_register() and
* everything will just work out.
*/
void __weak arch_setup_pdev_archdata(struct platform_device *pdev)
{
}
/**
* platform_get_resource - get a resource for a device
* @dev: platform device
* @type: resource type
* @num: resource index
*/
struct resource *platform_get_resource(struct platform_device *dev,
unsigned int type, unsigned int num)
{
int i;
for (i = 0; i < dev->num_resources; i++) {
struct resource *r = &dev->resource[i];
if (type == resource_type(r) && num-- == 0)
return r;
}
return NULL;
}
EXPORT_SYMBOL_GPL(platform_get_resource);
/**
* platform_get_irq - get an IRQ for a device
* @dev: platform device
* @num: IRQ number index
*/
int platform_get_irq(struct platform_device *dev, unsigned int num)
{
struct resource *r = platform_get_resource(dev, IORESOURCE_IRQ, num);
return r ? r->start : -ENXIO;
}
EXPORT_SYMBOL_GPL(platform_get_irq);
/**
* platform_get_resource_byname - get a resource for a device by name
* @dev: platform device
* @type: resource type
* @name: resource name
*/
struct resource *platform_get_resource_byname(struct platform_device *dev,
unsigned int type,
const char *name)
{
int i;
for (i = 0; i < dev->num_resources; i++) {
struct resource *r = &dev->resource[i];
if (type == resource_type(r) && !strcmp(r->name, name))
return r;
}
return NULL;
}
EXPORT_SYMBOL_GPL(platform_get_resource_byname);
/**
* platform_get_irq - get an IRQ for a device
* @dev: platform device
* @name: IRQ name
*/
int platform_get_irq_byname(struct platform_device *dev, const char *name)
{
struct resource *r = platform_get_resource_byname(dev, IORESOURCE_IRQ,
name);
return r ? r->start : -ENXIO;
}
EXPORT_SYMBOL_GPL(platform_get_irq_byname);
/**
* platform_add_devices - add a numbers of platform devices
* @devs: array of platform devices to add
* @num: number of platform devices in array
*/
int platform_add_devices(struct platform_device **devs, int num)
{
int i, ret = 0;
for (i = 0; i < num; i++) {
ret = platform_device_register(devs[i]);
if (ret) {
while (--i >= 0)
platform_device_unregister(devs[i]);
break;
}
}
return ret;
}
EXPORT_SYMBOL_GPL(platform_add_devices);
struct platform_object {
struct platform_device pdev;
char name[1];
};
/**
* platform_device_put - destroy a platform device
* @pdev: platform device to free
*
* Free all memory associated with a platform device. This function must
* _only_ be externally called in error cases. All other usage is a bug.
*/
void platform_device_put(struct platform_device *pdev)
{
if (pdev)
put_device(&pdev->dev);
}
EXPORT_SYMBOL_GPL(platform_device_put);
static void platform_device_release(struct device *dev)
{
struct platform_object *pa = container_of(dev, struct platform_object,
pdev.dev);
of_device_node_put(&pa->pdev.dev);
kfree(pa->pdev.dev.platform_data);
kfree(pa->pdev.mfd_cell);
kfree(pa->pdev.resource);
kfree(pa);
}
/**
* platform_device_alloc - create a platform device
* @name: base name of the device we're adding
* @id: instance id
*
* Create a platform device object which can have other objects attached
* to it, and which will have attached objects freed when it is released.
*/
struct platform_device *platform_device_alloc(const char *name, int id)
{
struct platform_object *pa;
pa = kzalloc(sizeof(struct platform_object) + strlen(name), GFP_KERNEL);
if (pa) {
strcpy(pa->name, name);
pa->pdev.name = pa->name;
pa->pdev.id = id;
device_initialize(&pa->pdev.dev);
pa->pdev.dev.release = platform_device_release;
arch_setup_pdev_archdata(&pa->pdev);
}
return pa ? &pa->pdev : NULL;
}
EXPORT_SYMBOL_GPL(platform_device_alloc);
/**
* platform_device_add_resources - add resources to a platform device
* @pdev: platform device allocated by platform_device_alloc to add resources to
* @res: set of resources that needs to be allocated for the device
* @num: number of resources
*
* Add a copy of the resources to the platform device. The memory
* associated with the resources will be freed when the platform device is
* released.
*/
int platform_device_add_resources(struct platform_device *pdev,
const struct resource *res, unsigned int num)
{
struct resource *r = NULL;
if (res) {
r = kmemdup(res, sizeof(struct resource) * num, GFP_KERNEL);
if (!r)
return -ENOMEM;
}
kfree(pdev->resource);
pdev->resource = r;
pdev->num_resources = num;
return 0;
}
EXPORT_SYMBOL_GPL(platform_device_add_resources);
/**
* platform_device_add_data - add platform-specific data to a platform device
* @pdev: platform device allocated by platform_device_alloc to add resources to
* @data: platform specific data for this platform device
* @size: size of platform specific data
*
* Add a copy of platform specific data to the platform device's
* platform_data pointer. The memory associated with the platform data
* will be freed when the platform device is released.
*/
int platform_device_add_data(struct platform_device *pdev, const void *data,
size_t size)
{
void *d = NULL;
if (data) {
d = kmemdup(data, size, GFP_KERNEL);
if (!d)
return -ENOMEM;
}
kfree(pdev->dev.platform_data);
pdev->dev.platform_data = d;
return 0;
}
EXPORT_SYMBOL_GPL(platform_device_add_data);
/**
* platform_device_add - add a platform device to device hierarchy
* @pdev: platform device we're adding
*
* This is part 2 of platform_device_register(), though may be called
* separately _iff_ pdev was allocated by platform_device_alloc().
*/
int platform_device_add(struct platform_device *pdev)
{
int i, ret = 0;
if (!pdev)
return -EINVAL;
if (!pdev->dev.parent)
pdev->dev.parent = &platform_bus;
pdev->dev.bus = &platform_bus_type;
if (pdev->id != -1)
dev_set_name(&pdev->dev, "%s.%d", pdev->name, pdev->id);
else
dev_set_name(&pdev->dev, "%s", pdev->name);
for (i = 0; i < pdev->num_resources; i++) {
struct resource *p, *r = &pdev->resource[i];
if (r->name == NULL)
r->name = dev_name(&pdev->dev);
p = r->parent;
if (!p) {
if (resource_type(r) == IORESOURCE_MEM)
p = &iomem_resource;
else if (resource_type(r) == IORESOURCE_IO)
p = &ioport_resource;
}
if (p && insert_resource(p, r)) {
printk(KERN_ERR
"%s: failed to claim resource %d\n",
dev_name(&pdev->dev), i);
ret = -EBUSY;
goto failed;
}
}
pr_debug("Registering platform device '%s'. Parent at %s\n",
dev_name(&pdev->dev), dev_name(pdev->dev.parent));
ret = device_add(&pdev->dev);
if (ret == 0)
return ret;
failed:
while (--i >= 0) {
struct resource *r = &pdev->resource[i];
unsigned long type = resource_type(r);
if (type == IORESOURCE_MEM || type == IORESOURCE_IO)
release_resource(r);
}
return ret;
}
EXPORT_SYMBOL_GPL(platform_device_add);
/**
* platform_device_del - remove a platform-level device
* @pdev: platform device we're removing
*
* Note that this function will also release all memory- and port-based
* resources owned by the device (@dev->resource). This function must
* _only_ be externally called in error cases. All other usage is a bug.
*/
void platform_device_del(struct platform_device *pdev)
{
int i;
if (pdev) {
device_del(&pdev->dev);
for (i = 0; i < pdev->num_resources; i++) {
struct resource *r = &pdev->resource[i];
unsigned long type = resource_type(r);
if (type == IORESOURCE_MEM || type == IORESOURCE_IO)
release_resource(r);
}
}
}
EXPORT_SYMBOL_GPL(platform_device_del);
/**
* platform_device_register - add a platform-level device
* @pdev: platform device we're adding
*/
int platform_device_register(struct platform_device *pdev)
{
device_initialize(&pdev->dev);
arch_setup_pdev_archdata(pdev);
return platform_device_add(pdev);
}
EXPORT_SYMBOL_GPL(platform_device_register);
/**
* platform_device_unregister - unregister a platform-level device
* @pdev: platform device we're unregistering
*
* Unregistration is done in 2 steps. First we release all resources
* and remove it from the subsystem, then we drop reference count by
* calling platform_device_put().
*/
void platform_device_unregister(struct platform_device *pdev)
{
platform_device_del(pdev);
platform_device_put(pdev);
}
EXPORT_SYMBOL_GPL(platform_device_unregister);
/**
* platform_device_register_full - add a platform-level device with
* resources and platform-specific data
*
* @pdevinfo: data used to create device
*
* Returns &struct platform_device pointer on success, or ERR_PTR() on error.
*/
struct platform_device *platform_device_register_full(
const struct platform_device_info *pdevinfo)
{
int ret = -ENOMEM;
struct platform_device *pdev;
pdev = platform_device_alloc(pdevinfo->name, pdevinfo->id);
if (!pdev)
goto err_alloc;
pdev->dev.parent = pdevinfo->parent;
if (pdevinfo->dma_mask) {
/*
* This memory isn't freed when the device is put,
* I don't have a nice idea for that though. Conceptually
* dma_mask in struct device should not be a pointer.
* See http://thread.gmane.org/gmane.linux.kernel.pci/9081
*/
pdev->dev.dma_mask =
kmalloc(sizeof(*pdev->dev.dma_mask), GFP_KERNEL);
if (!pdev->dev.dma_mask)
goto err;
*pdev->dev.dma_mask = pdevinfo->dma_mask;
pdev->dev.coherent_dma_mask = pdevinfo->dma_mask;
}
ret = platform_device_add_resources(pdev,
pdevinfo->res, pdevinfo->num_res);
if (ret)
goto err;
ret = platform_device_add_data(pdev,
pdevinfo->data, pdevinfo->size_data);
if (ret)
goto err;
ret = platform_device_add(pdev);
if (ret) {
err:
kfree(pdev->dev.dma_mask);
err_alloc:
platform_device_put(pdev);
return ERR_PTR(ret);
}
return pdev;
}
EXPORT_SYMBOL_GPL(platform_device_register_full);
static int platform_drv_probe(struct device *_dev)
{
struct platform_driver *drv = to_platform_driver(_dev->driver);
struct platform_device *dev = to_platform_device(_dev);
return drv->probe(dev);
}
static int platform_drv_probe_fail(struct device *_dev)
{
return -ENXIO;
}
static int platform_drv_remove(struct device *_dev)
{
struct platform_driver *drv = to_platform_driver(_dev->driver);
struct platform_device *dev = to_platform_device(_dev);
return drv->remove(dev);
}
static void platform_drv_shutdown(struct device *_dev)
{
struct platform_driver *drv = to_platform_driver(_dev->driver);
struct platform_device *dev = to_platform_device(_dev);
drv->shutdown(dev);
}
/**
* platform_driver_register - register a driver for platform-level devices
* @drv: platform driver structure
*/
int platform_driver_register(struct platform_driver *drv)
{
drv->driver.bus = &platform_bus_type;
if (drv->probe)
drv->driver.probe = platform_drv_probe;
if (drv->remove)
drv->driver.remove = platform_drv_remove;
if (drv->shutdown)
drv->driver.shutdown = platform_drv_shutdown;
return driver_register(&drv->driver);
}
EXPORT_SYMBOL_GPL(platform_driver_register);
/**
* platform_driver_unregister - unregister a driver for platform-level devices
* @drv: platform driver structure
*/
void platform_driver_unregister(struct platform_driver *drv)
{
driver_unregister(&drv->driver);
}
EXPORT_SYMBOL_GPL(platform_driver_unregister);
/**
* platform_driver_probe - register driver for non-hotpluggable device
* @drv: platform driver structure
* @probe: the driver probe routine, probably from an __init section
*
* Use this instead of platform_driver_register() when you know the device
* is not hotpluggable and has already been registered, and you want to
* remove its run-once probe() infrastructure from memory after the driver
* has bound to the device.
*
* One typical use for this would be with drivers for controllers integrated
* into system-on-chip processors, where the controller devices have been
* configured as part of board setup.
*
* Returns zero if the driver registered and bound to a device, else returns
* a negative error code and with the driver not registered.
*/
int __init_or_module platform_driver_probe(struct platform_driver *drv,
int (*probe)(struct platform_device *))
{
int retval, code;
/* make sure driver won't have bind/unbind attributes */
drv->driver.suppress_bind_attrs = true;
/* temporary section violation during probe() */
drv->probe = probe;
retval = code = platform_driver_register(drv);
/*
* Fixup that section violation, being paranoid about code scanning
* the list of drivers in order to probe new devices. Check to see
* if the probe was successful, and make sure any forced probes of
* new devices fail.
*/
spin_lock(&drv->driver.bus->p->klist_drivers.k_lock);
drv->probe = NULL;
if (code == 0 && list_empty(&drv->driver.p->klist_devices.k_list))
retval = -ENODEV;
drv->driver.probe = platform_drv_probe_fail;
spin_unlock(&drv->driver.bus->p->klist_drivers.k_lock);
if (code != retval)
platform_driver_unregister(drv);
return retval;
}
EXPORT_SYMBOL_GPL(platform_driver_probe);
/**
* platform_create_bundle - register driver and create corresponding device
* @driver: platform driver structure
* @probe: the driver probe routine, probably from an __init section
* @res: set of resources that needs to be allocated for the device
* @n_res: number of resources
* @data: platform specific data for this platform device
* @size: size of platform specific data
*
* Use this in legacy-style modules that probe hardware directly and
* register a single platform device and corresponding platform driver.
*
* Returns &struct platform_device pointer on success, or ERR_PTR() on error.
*/
struct platform_device * __init_or_module platform_create_bundle(
struct platform_driver *driver,
int (*probe)(struct platform_device *),
struct resource *res, unsigned int n_res,
const void *data, size_t size)
{
struct platform_device *pdev;
int error;
pdev = platform_device_alloc(driver->driver.name, -1);
if (!pdev) {
error = -ENOMEM;
goto err_out;
}
error = platform_device_add_resources(pdev, res, n_res);
if (error)
goto err_pdev_put;
error = platform_device_add_data(pdev, data, size);
if (error)
goto err_pdev_put;
error = platform_device_add(pdev);
if (error)
goto err_pdev_put;
error = platform_driver_probe(driver, probe);
if (error)
goto err_pdev_del;
return pdev;
err_pdev_del:
platform_device_del(pdev);
err_pdev_put:
platform_device_put(pdev);
err_out:
return ERR_PTR(error);
}
EXPORT_SYMBOL_GPL(platform_create_bundle);
/* modalias support enables more hands-off userspace setup:
* (a) environment variable lets new-style hotplug events work once system is
* fully running: "modprobe $MODALIAS"
* (b) sysfs attribute lets new-style coldplug recover from hotplug events
* mishandled before system is fully running: "modprobe $(cat modalias)"
*/
static ssize_t modalias_show(struct device *dev, struct device_attribute *a,
char *buf)
{
struct platform_device *pdev = to_platform_device(dev);
int len = snprintf(buf, PAGE_SIZE, "platform:%s\n", pdev->name);
return (len >= PAGE_SIZE) ? (PAGE_SIZE - 1) : len;
}
static struct device_attribute platform_dev_attrs[] = {
__ATTR_RO(modalias),
__ATTR_NULL,
};
static int platform_uevent(struct device *dev, struct kobj_uevent_env *env)
{
struct platform_device *pdev = to_platform_device(dev);
int rc;
/* Some devices have extra OF data and an OF-style MODALIAS */
rc = of_device_uevent_modalias(dev,env);
if (rc != -ENODEV)
return rc;
add_uevent_var(env, "MODALIAS=%s%s", PLATFORM_MODULE_PREFIX,
pdev->name);
return 0;
}
static const struct platform_device_id *platform_match_id(
const struct platform_device_id *id,
struct platform_device *pdev)
{
while (id->name[0]) {
if (strcmp(pdev->name, id->name) == 0) {
pdev->id_entry = id;
return id;
}
id++;
}
return NULL;
}
/**
* platform_match - bind platform device to platform driver.
* @dev: device.
* @drv: driver.
*
* Platform device IDs are assumed to be encoded like this:
* "<name><instance>", where <name> is a short description of the type of
* device, like "pci" or "floppy", and <instance> is the enumerated
* instance of the device, like '0' or '42'. Driver IDs are simply
* "<name>". So, extract the <name> from the platform_device structure,
* and compare it against the name of the driver. Return whether they match
* or not.
*/
static int platform_match(struct device *dev, struct device_driver *drv)
{
struct platform_device *pdev = to_platform_device(dev);
struct platform_driver *pdrv = to_platform_driver(drv);
/* Attempt an OF style match first */
if (of_driver_match_device(dev, drv))
return 1;
/* Then try to match against the id table */
if (pdrv->id_table)
return platform_match_id(pdrv->id_table, pdev) != NULL;
/* fall-back to driver name match */
return (strcmp(pdev->name, drv->name) == 0);
}
#ifdef CONFIG_PM_SLEEP
static int platform_legacy_suspend(struct device *dev, pm_message_t mesg)
{
struct platform_driver *pdrv = to_platform_driver(dev->driver);
struct platform_device *pdev = to_platform_device(dev);
int ret = 0;
if (dev->driver && pdrv->suspend)
ret = pdrv->suspend(pdev, mesg);
return ret;
}
static int platform_legacy_resume(struct device *dev)
{
struct platform_driver *pdrv = to_platform_driver(dev->driver);
struct platform_device *pdev = to_platform_device(dev);
int ret = 0;
if (dev->driver && pdrv->resume)
ret = pdrv->resume(pdev);
return ret;
}
#endif /* CONFIG_PM_SLEEP */
#ifdef CONFIG_SUSPEND
int platform_pm_suspend(struct device *dev)
{
struct device_driver *drv = dev->driver;
int ret = 0;
if (!drv)
return 0;
if (drv->pm) {
if (drv->pm->suspend)
ret = drv->pm->suspend(dev);
} else {
ret = platform_legacy_suspend(dev, PMSG_SUSPEND);
}
return ret;
}
int platform_pm_resume(struct device *dev)
{
struct device_driver *drv = dev->driver;
int ret = 0;
if (!drv)
return 0;
if (drv->pm) {
if (drv->pm->resume)
ret = drv->pm->resume(dev);
} else {
ret = platform_legacy_resume(dev);
}
return ret;
}
#endif /* CONFIG_SUSPEND */
#ifdef CONFIG_HIBERNATE_CALLBACKS
int platform_pm_freeze(struct device *dev)
{
struct device_driver *drv = dev->driver;
int ret = 0;
if (!drv)
return 0;
if (drv->pm) {
if (drv->pm->freeze)
ret = drv->pm->freeze(dev);
} else {
ret = platform_legacy_suspend(dev, PMSG_FREEZE);
}
return ret;
}
int platform_pm_thaw(struct device *dev)
{
struct device_driver *drv = dev->driver;
int ret = 0;
if (!drv)
return 0;
if (drv->pm) {
if (drv->pm->thaw)
ret = drv->pm->thaw(dev);
} else {
ret = platform_legacy_resume(dev);
}
return ret;
}
int platform_pm_poweroff(struct device *dev)
{
struct device_driver *drv = dev->driver;
int ret = 0;
if (!drv)
return 0;
if (drv->pm) {
if (drv->pm->poweroff)
ret = drv->pm->poweroff(dev);
} else {
ret = platform_legacy_suspend(dev, PMSG_HIBERNATE);
}
return ret;
}
int platform_pm_restore(struct device *dev)
{
struct device_driver *drv = dev->driver;
int ret = 0;
if (!drv)
return 0;
if (drv->pm) {
if (drv->pm->restore)
ret = drv->pm->restore(dev);
} else {
ret = platform_legacy_resume(dev);
}
return ret;
}
#endif /* CONFIG_HIBERNATE_CALLBACKS */
static const struct dev_pm_ops platform_dev_pm_ops = {
.runtime_suspend = pm_generic_runtime_suspend,
.runtime_resume = pm_generic_runtime_resume,
.runtime_idle = pm_generic_runtime_idle,
USE_PLATFORM_PM_SLEEP_OPS
};
struct bus_type platform_bus_type = {
.name = "platform",
.dev_attrs = platform_dev_attrs,
.match = platform_match,
.uevent = platform_uevent,
.pm = &platform_dev_pm_ops,
};
EXPORT_SYMBOL_GPL(platform_bus_type);
int __init platform_bus_init(void)
{
int error;
early_platform_cleanup();
error = device_register(&platform_bus);
if (error)
return error;
error = bus_register(&platform_bus_type);
if (error)
device_unregister(&platform_bus);
return error;
}
#ifndef ARCH_HAS_DMA_GET_REQUIRED_MASK
u64 dma_get_required_mask(struct device *dev)
{
u32 low_totalram = ((max_pfn - 1) << PAGE_SHIFT);
u32 high_totalram = ((max_pfn - 1) >> (32 - PAGE_SHIFT));
u64 mask;
if (!high_totalram) {
/* convert to mask just covering totalram */
low_totalram = (1 << (fls(low_totalram) - 1));
low_totalram += low_totalram - 1;
mask = low_totalram;
} else {
high_totalram = (1 << (fls(high_totalram) - 1));
high_totalram += high_totalram - 1;
mask = (((u64)high_totalram) << 32) + 0xffffffff;
}
return mask;
}
EXPORT_SYMBOL_GPL(dma_get_required_mask);
#endif
static __initdata LIST_HEAD(early_platform_driver_list);
static __initdata LIST_HEAD(early_platform_device_list);
/**
* early_platform_driver_register - register early platform driver
* @epdrv: early_platform driver structure
* @buf: string passed from early_param()
*
* Helper function for early_platform_init() / early_platform_init_buffer()
*/
int __init early_platform_driver_register(struct early_platform_driver *epdrv,
char *buf)
{
char *tmp;
int n;
/* Simply add the driver to the end of the global list.
* Drivers will by default be put on the list in compiled-in order.
*/
if (!epdrv->list.next) {
INIT_LIST_HEAD(&epdrv->list);
list_add_tail(&epdrv->list, &early_platform_driver_list);
}
/* If the user has specified device then make sure the driver
* gets prioritized. The driver of the last device specified on
* command line will be put first on the list.
*/
n = strlen(epdrv->pdrv->driver.name);
if (buf && !strncmp(buf, epdrv->pdrv->driver.name, n)) {
list_move(&epdrv->list, &early_platform_driver_list);
/* Allow passing parameters after device name */
if (buf[n] == '\0' || buf[n] == ',')
epdrv->requested_id = -1;
else {
epdrv->requested_id = simple_strtoul(&buf[n + 1],
&tmp, 10);
if (buf[n] != '.' || (tmp == &buf[n + 1])) {
epdrv->requested_id = EARLY_PLATFORM_ID_ERROR;
n = 0;
} else
n += strcspn(&buf[n + 1], ",") + 1;
}
if (buf[n] == ',')
n++;
if (epdrv->bufsize) {
memcpy(epdrv->buffer, &buf[n],
min_t(int, epdrv->bufsize, strlen(&buf[n]) + 1));
epdrv->buffer[epdrv->bufsize - 1] = '\0';
}
}
return 0;
}
/**
* early_platform_add_devices - adds a number of early platform devices
* @devs: array of early platform devices to add
* @num: number of early platform devices in array
*
* Used by early architecture code to register early platform devices and
* their platform data.
*/
void __init early_platform_add_devices(struct platform_device **devs, int num)
{
struct device *dev;
int i;
/* simply add the devices to list */
for (i = 0; i < num; i++) {
dev = &devs[i]->dev;
if (!dev->devres_head.next) {
INIT_LIST_HEAD(&dev->devres_head);
list_add_tail(&dev->devres_head,
&early_platform_device_list);
}
}
}
/**
* early_platform_driver_register_all - register early platform drivers
* @class_str: string to identify early platform driver class
*
* Used by architecture code to register all early platform drivers
* for a certain class. If omitted then only early platform drivers
* with matching kernel command line class parameters will be registered.
*/
void __init early_platform_driver_register_all(char *class_str)
{
/* The "class_str" parameter may or may not be present on the kernel
* command line. If it is present then there may be more than one
* matching parameter.
*
* Since we register our early platform drivers using early_param()
* we need to make sure that they also get registered in the case
* when the parameter is missing from the kernel command line.
*
* We use parse_early_options() to make sure the early_param() gets
* called at least once. The early_param() may be called more than
* once since the name of the preferred device may be specified on
* the kernel command line. early_platform_driver_register() handles
* this case for us.
*/
parse_early_options(class_str);
}
/**
* early_platform_match - find early platform device matching driver
* @epdrv: early platform driver structure
* @id: id to match against
*/
static __init struct platform_device *
early_platform_match(struct early_platform_driver *epdrv, int id)
{
struct platform_device *pd;
list_for_each_entry(pd, &early_platform_device_list, dev.devres_head)
if (platform_match(&pd->dev, &epdrv->pdrv->driver))
if (pd->id == id)
return pd;
return NULL;
}
/**
* early_platform_left - check if early platform driver has matching devices
* @epdrv: early platform driver structure
* @id: return true if id or above exists
*/
static __init int early_platform_left(struct early_platform_driver *epdrv,
int id)
{
struct platform_device *pd;
list_for_each_entry(pd, &early_platform_device_list, dev.devres_head)
if (platform_match(&pd->dev, &epdrv->pdrv->driver))
if (pd->id >= id)
return 1;
return 0;
}
/**
* early_platform_driver_probe_id - probe drivers matching class_str and id
* @class_str: string to identify early platform driver class
* @id: id to match against
* @nr_probe: number of platform devices to successfully probe before exiting
*/
static int __init early_platform_driver_probe_id(char *class_str,
int id,
int nr_probe)
{
struct early_platform_driver *epdrv;
struct platform_device *match;
int match_id;
int n = 0;
int left = 0;
list_for_each_entry(epdrv, &early_platform_driver_list, list) {
/* only use drivers matching our class_str */
if (strcmp(class_str, epdrv->class_str))
continue;
if (id == -2) {
match_id = epdrv->requested_id;
left = 1;
} else {
match_id = id;
left += early_platform_left(epdrv, id);
/* skip requested id */
switch (epdrv->requested_id) {
case EARLY_PLATFORM_ID_ERROR:
case EARLY_PLATFORM_ID_UNSET:
break;
default:
if (epdrv->requested_id == id)
match_id = EARLY_PLATFORM_ID_UNSET;
}
}
switch (match_id) {
case EARLY_PLATFORM_ID_ERROR:
pr_warning("%s: unable to parse %s parameter\n",
class_str, epdrv->pdrv->driver.name);
/* fall-through */
case EARLY_PLATFORM_ID_UNSET:
match = NULL;
break;
default:
match = early_platform_match(epdrv, match_id);
}
if (match) {
/*
* Set up a sensible init_name to enable
* dev_name() and others to be used before the
* rest of the driver core is initialized.
*/
if (!match->dev.init_name && slab_is_available()) {
if (match->id != -1)
match->dev.init_name =
kasprintf(GFP_KERNEL, "%s.%d",
match->name,
match->id);
else
match->dev.init_name =
kasprintf(GFP_KERNEL, "%s",
match->name);
if (!match->dev.init_name)
return -ENOMEM;
}
if (epdrv->pdrv->probe(match))
pr_warning("%s: unable to probe %s early.\n",
class_str, match->name);
else
n++;
}
if (n >= nr_probe)
break;
}
if (left)
return n;
else
return -ENODEV;
}
/**
* early_platform_driver_probe - probe a class of registered drivers
* @class_str: string to identify early platform driver class
* @nr_probe: number of platform devices to successfully probe before exiting
* @user_only: only probe user specified early platform devices
*
* Used by architecture code to probe registered early platform drivers
* within a certain class. For probe to happen a registered early platform
* device matching a registered early platform driver is needed.
*/
int __init early_platform_driver_probe(char *class_str,
int nr_probe,
int user_only)
{
int k, n, i;
n = 0;
for (i = -2; n < nr_probe; i++) {
k = early_platform_driver_probe_id(class_str, i, nr_probe - n);
if (k < 0)
break;
n += k;
if (user_only)
break;
}
return n;
}
/**
* early_platform_cleanup - clean up early platform code
*/
void __init early_platform_cleanup(void)
{
struct platform_device *pd, *pd2;
/* clean up the devres list used to chain devices */
list_for_each_entry_safe(pd, pd2, &early_platform_device_list,
dev.devres_head) {
list_del(&pd->dev.devres_head);
memset(&pd->dev.devres_head, 0, sizeof(pd->dev.devres_head));
}
}
| gpl-2.0 |
faux123/htc-m7 | security/integrity/iint.c | 5207 | 3931 | /*
* Copyright (C) 2008 IBM Corporation
*
* Authors:
* Mimi Zohar <zohar@us.ibm.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation, version 2 of the
* License.
*
* File: integrity_iint.c
* - implements the integrity hooks: integrity_inode_alloc,
* integrity_inode_free
* - cache integrity information associated with an inode
* using a rbtree tree.
*/
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/rbtree.h>
#include "integrity.h"
static struct rb_root integrity_iint_tree = RB_ROOT;
static DEFINE_SPINLOCK(integrity_iint_lock);
static struct kmem_cache *iint_cache __read_mostly;
int iint_initialized;
/*
* __integrity_iint_find - return the iint associated with an inode
*/
static struct integrity_iint_cache *__integrity_iint_find(struct inode *inode)
{
struct integrity_iint_cache *iint;
struct rb_node *n = integrity_iint_tree.rb_node;
assert_spin_locked(&integrity_iint_lock);
while (n) {
iint = rb_entry(n, struct integrity_iint_cache, rb_node);
if (inode < iint->inode)
n = n->rb_left;
else if (inode > iint->inode)
n = n->rb_right;
else
break;
}
if (!n)
return NULL;
return iint;
}
/*
* integrity_iint_find - return the iint associated with an inode
*/
struct integrity_iint_cache *integrity_iint_find(struct inode *inode)
{
struct integrity_iint_cache *iint;
if (!IS_IMA(inode))
return NULL;
spin_lock(&integrity_iint_lock);
iint = __integrity_iint_find(inode);
spin_unlock(&integrity_iint_lock);
return iint;
}
static void iint_free(struct integrity_iint_cache *iint)
{
iint->version = 0;
iint->flags = 0UL;
iint->evm_status = INTEGRITY_UNKNOWN;
kmem_cache_free(iint_cache, iint);
}
/**
* integrity_inode_alloc - allocate an iint associated with an inode
* @inode: pointer to the inode
*/
int integrity_inode_alloc(struct inode *inode)
{
struct rb_node **p;
struct rb_node *new_node, *parent = NULL;
struct integrity_iint_cache *new_iint, *test_iint;
int rc;
new_iint = kmem_cache_alloc(iint_cache, GFP_NOFS);
if (!new_iint)
return -ENOMEM;
new_iint->inode = inode;
new_node = &new_iint->rb_node;
mutex_lock(&inode->i_mutex); /* i_flags */
spin_lock(&integrity_iint_lock);
p = &integrity_iint_tree.rb_node;
while (*p) {
parent = *p;
test_iint = rb_entry(parent, struct integrity_iint_cache,
rb_node);
rc = -EEXIST;
if (inode < test_iint->inode)
p = &(*p)->rb_left;
else if (inode > test_iint->inode)
p = &(*p)->rb_right;
else
goto out_err;
}
inode->i_flags |= S_IMA;
rb_link_node(new_node, parent, p);
rb_insert_color(new_node, &integrity_iint_tree);
spin_unlock(&integrity_iint_lock);
mutex_unlock(&inode->i_mutex); /* i_flags */
return 0;
out_err:
spin_unlock(&integrity_iint_lock);
mutex_unlock(&inode->i_mutex); /* i_flags */
iint_free(new_iint);
return rc;
}
/**
* integrity_inode_free - called on security_inode_free
* @inode: pointer to the inode
*
* Free the integrity information(iint) associated with an inode.
*/
void integrity_inode_free(struct inode *inode)
{
struct integrity_iint_cache *iint;
if (!IS_IMA(inode))
return;
spin_lock(&integrity_iint_lock);
iint = __integrity_iint_find(inode);
rb_erase(&iint->rb_node, &integrity_iint_tree);
spin_unlock(&integrity_iint_lock);
iint_free(iint);
}
static void init_once(void *foo)
{
struct integrity_iint_cache *iint = foo;
memset(iint, 0, sizeof *iint);
iint->version = 0;
iint->flags = 0UL;
mutex_init(&iint->mutex);
iint->evm_status = INTEGRITY_UNKNOWN;
}
static int __init integrity_iintcache_init(void)
{
iint_cache =
kmem_cache_create("iint_cache", sizeof(struct integrity_iint_cache),
0, SLAB_PANIC, init_once);
iint_initialized = 1;
return 0;
}
security_initcall(integrity_iintcache_init);
| gpl-2.0 |
andr00ib/kernel_v30c | drivers/mtd/nand/spia.c | 8023 | 4362 | /*
* drivers/mtd/nand/spia.c
*
* Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com)
*
*
* 10-29-2001 TG change to support hardwarespecific access
* to controllines (due to change in nand.c)
* page_cache added
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Overview:
* This is a device driver for the NAND flash device found on the
* SPIA board which utilizes the Toshiba TC58V64AFT part. This is
* a 64Mibit (8MiB x 8 bits) NAND flash device.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/partitions.h>
#include <asm/io.h>
/*
* MTD structure for SPIA board
*/
static struct mtd_info *spia_mtd = NULL;
/*
* Values specific to the SPIA board (used with EP7212 processor)
*/
#define SPIA_IO_BASE 0xd0000000 /* Start of EP7212 IO address space */
#define SPIA_FIO_BASE 0xf0000000 /* Address where flash is mapped */
#define SPIA_PEDR 0x0080 /*
* IO offset to Port E data register
* where the CLE, ALE and NCE pins
* are wired to.
*/
#define SPIA_PEDDR 0x00c0 /*
* IO offset to Port E data direction
* register so we can control the IO
* lines.
*/
/*
* Module stuff
*/
static int spia_io_base = SPIA_IO_BASE;
static int spia_fio_base = SPIA_FIO_BASE;
static int spia_pedr = SPIA_PEDR;
static int spia_peddr = SPIA_PEDDR;
module_param(spia_io_base, int, 0);
module_param(spia_fio_base, int, 0);
module_param(spia_pedr, int, 0);
module_param(spia_peddr, int, 0);
/*
* Define partitions for flash device
*/
static const struct mtd_partition partition_info[] = {
{
.name = "SPIA flash partition 1",
.offset = 0,
.size = 2 * 1024 * 1024},
{
.name = "SPIA flash partition 2",
.offset = 2 * 1024 * 1024,
.size = 6 * 1024 * 1024}
};
#define NUM_PARTITIONS 2
/*
* hardware specific access to control-lines
*
* ctrl:
* NAND_CNE: bit 0 -> bit 2
* NAND_CLE: bit 1 -> bit 0
* NAND_ALE: bit 2 -> bit 1
*/
static void spia_hwcontrol(struct mtd_info *mtd, int cmd)
{
struct nand_chip *chip = mtd->priv;
if (ctrl & NAND_CTRL_CHANGE) {
void __iomem *addr = spia_io_base + spia_pedr;
unsigned char bits;
bits = (ctrl & NAND_CNE) << 2;
bits |= (ctrl & NAND_CLE | NAND_ALE) >> 1;
writeb((readb(addr) & ~0x7) | bits, addr);
}
if (cmd != NAND_CMD_NONE)
writeb(cmd, chip->IO_ADDR_W);
}
/*
* Main initialization routine
*/
static int __init spia_init(void)
{
struct nand_chip *this;
/* Allocate memory for MTD device structure and private data */
spia_mtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL);
if (!spia_mtd) {
printk("Unable to allocate SPIA NAND MTD device structure.\n");
return -ENOMEM;
}
/* Get pointer to private data */
this = (struct nand_chip *)(&spia_mtd[1]);
/* Initialize structures */
memset(spia_mtd, 0, sizeof(struct mtd_info));
memset(this, 0, sizeof(struct nand_chip));
/* Link the private data with the MTD structure */
spia_mtd->priv = this;
spia_mtd->owner = THIS_MODULE;
/*
* Set GPIO Port E control register so that the pins are configured
* to be outputs for controlling the NAND flash.
*/
(*(volatile unsigned char *)(spia_io_base + spia_peddr)) = 0x07;
/* Set address of NAND IO lines */
this->IO_ADDR_R = (void __iomem *)spia_fio_base;
this->IO_ADDR_W = (void __iomem *)spia_fio_base;
/* Set address of hardware control function */
this->cmd_ctrl = spia_hwcontrol;
/* 15 us command delay time */
this->chip_delay = 15;
/* Scan to find existence of the device */
if (nand_scan(spia_mtd, 1)) {
kfree(spia_mtd);
return -ENXIO;
}
/* Register the partitions */
mtd_device_register(spia_mtd, partition_info, NUM_PARTITIONS);
/* Return happy */
return 0;
}
module_init(spia_init);
/*
* Clean up routine
*/
static void __exit spia_cleanup(void)
{
/* Release resources, unregister device */
nand_release(spia_mtd);
/* Free the MTD device structure */
kfree(spia_mtd);
}
module_exit(spia_cleanup);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Steven J. Hill <sjhill@realitydiluted.com");
MODULE_DESCRIPTION("Board-specific glue layer for NAND flash on SPIA board");
| gpl-2.0 |
MoKee/android_kernel_samsung_smdk4210 | sound/oss/pas2_mixer.c | 12887 | 8352 |
/*
* sound/oss/pas2_mixer.c
*
* Mixer routines for the Pro Audio Spectrum cards.
*/
/*
* Copyright (C) by Hannu Savolainen 1993-1997
*
* OSS/Free for Linux is distributed under the GNU GENERAL PUBLIC LICENSE (GPL)
* Version 2 (June 1991). See the "COPYING" file distributed with this software
* for more info.
*/
/*
* Thomas Sailer : ioctl code reworked (vmalloc/vfree removed)
* Bartlomiej Zolnierkiewicz : added __init to pas_init_mixer()
*/
#include <linux/init.h>
#include "sound_config.h"
#include "pas2.h"
#ifndef DEB
#define DEB(what) /* (what) */
#endif
extern int pas_translate_code;
extern char pas_model;
extern int *pas_osp;
extern int pas_audiodev;
static int rec_devices = (SOUND_MASK_MIC); /* Default recording source */
static int mode_control;
#define POSSIBLE_RECORDING_DEVICES (SOUND_MASK_SYNTH | SOUND_MASK_SPEAKER | SOUND_MASK_LINE | SOUND_MASK_MIC | \
SOUND_MASK_CD | SOUND_MASK_ALTPCM)
#define SUPPORTED_MIXER_DEVICES (SOUND_MASK_SYNTH | SOUND_MASK_PCM | SOUND_MASK_SPEAKER | SOUND_MASK_LINE | SOUND_MASK_MIC | \
SOUND_MASK_CD | SOUND_MASK_ALTPCM | SOUND_MASK_IMIX | \
SOUND_MASK_VOLUME | SOUND_MASK_BASS | SOUND_MASK_TREBLE | SOUND_MASK_RECLEV)
static int *levels;
static int default_levels[32] =
{
0x3232, /* Master Volume */
0x3232, /* Bass */
0x3232, /* Treble */
0x5050, /* FM */
0x4b4b, /* PCM */
0x3232, /* PC Speaker */
0x4b4b, /* Ext Line */
0x4b4b, /* Mic */
0x4b4b, /* CD */
0x6464, /* Recording monitor */
0x4b4b, /* SB PCM */
0x6464 /* Recording level */
};
void
mix_write(unsigned char data, int ioaddr)
{
/*
* The Revision D cards have a problem with their MVA508 interface. The
* kludge-o-rama fix is to make a 16-bit quantity with identical LSB and
* MSBs out of the output byte and to do a 16-bit out to the mixer port -
* 1. We need to do this because it isn't timing problem but chip access
* sequence problem.
*/
if (pas_model == 4)
{
outw(data | (data << 8), (ioaddr + pas_translate_code) - 1);
outb((0x80), 0);
} else
pas_write(data, ioaddr);
}
static int
mixer_output(int right_vol, int left_vol, int div, int bits,
int mixer) /* Input or output mixer */
{
int left = left_vol * div / 100;
int right = right_vol * div / 100;
if (bits & 0x10)
{
left |= mixer;
right |= mixer;
}
if (bits == 0x03 || bits == 0x04)
{
mix_write(0x80 | bits, 0x078B);
mix_write(left, 0x078B);
right_vol = left_vol;
} else
{
mix_write(0x80 | 0x20 | bits, 0x078B);
mix_write(left, 0x078B);
mix_write(0x80 | 0x40 | bits, 0x078B);
mix_write(right, 0x078B);
}
return (left_vol | (right_vol << 8));
}
static void
set_mode(int new_mode)
{
mix_write(0x80 | 0x05, 0x078B);
mix_write(new_mode, 0x078B);
mode_control = new_mode;
}
static int
pas_mixer_set(int whichDev, unsigned int level)
{
int left, right, devmask, changed, i, mixer = 0;
DEB(printk("static int pas_mixer_set(int whichDev = %d, unsigned int level = %X)\n", whichDev, level));
left = level & 0x7f;
right = (level & 0x7f00) >> 8;
if (whichDev < SOUND_MIXER_NRDEVICES) {
if ((1 << whichDev) & rec_devices)
mixer = 0x20;
else
mixer = 0x00;
}
switch (whichDev)
{
case SOUND_MIXER_VOLUME: /* Master volume (0-63) */
levels[whichDev] = mixer_output(right, left, 63, 0x01, 0);
break;
/*
* Note! Bass and Treble are mono devices. Will use just the left
* channel.
*/
case SOUND_MIXER_BASS: /* Bass (0-12) */
levels[whichDev] = mixer_output(right, left, 12, 0x03, 0);
break;
case SOUND_MIXER_TREBLE: /* Treble (0-12) */
levels[whichDev] = mixer_output(right, left, 12, 0x04, 0);
break;
case SOUND_MIXER_SYNTH: /* Internal synthesizer (0-31) */
levels[whichDev] = mixer_output(right, left, 31, 0x10 | 0x00, mixer);
break;
case SOUND_MIXER_PCM: /* PAS PCM (0-31) */
levels[whichDev] = mixer_output(right, left, 31, 0x10 | 0x05, mixer);
break;
case SOUND_MIXER_ALTPCM: /* SB PCM (0-31) */
levels[whichDev] = mixer_output(right, left, 31, 0x10 | 0x07, mixer);
break;
case SOUND_MIXER_SPEAKER: /* PC speaker (0-31) */
levels[whichDev] = mixer_output(right, left, 31, 0x10 | 0x06, mixer);
break;
case SOUND_MIXER_LINE: /* External line (0-31) */
levels[whichDev] = mixer_output(right, left, 31, 0x10 | 0x02, mixer);
break;
case SOUND_MIXER_CD: /* CD (0-31) */
levels[whichDev] = mixer_output(right, left, 31, 0x10 | 0x03, mixer);
break;
case SOUND_MIXER_MIC: /* External microphone (0-31) */
levels[whichDev] = mixer_output(right, left, 31, 0x10 | 0x04, mixer);
break;
case SOUND_MIXER_IMIX: /* Recording monitor (0-31) (Output mixer only) */
levels[whichDev] = mixer_output(right, left, 31, 0x10 | 0x01,
0x00);
break;
case SOUND_MIXER_RECLEV: /* Recording level (0-15) */
levels[whichDev] = mixer_output(right, left, 15, 0x02, 0);
break;
case SOUND_MIXER_RECSRC:
devmask = level & POSSIBLE_RECORDING_DEVICES;
changed = devmask ^ rec_devices;
rec_devices = devmask;
for (i = 0; i < SOUND_MIXER_NRDEVICES; i++)
if (changed & (1 << i))
{
pas_mixer_set(i, levels[i]);
}
return rec_devices;
break;
default:
return -EINVAL;
}
return (levels[whichDev]);
}
/*****/
static void
pas_mixer_reset(void)
{
int foo;
DEB(printk("pas2_mixer.c: void pas_mixer_reset(void)\n"));
for (foo = 0; foo < SOUND_MIXER_NRDEVICES; foo++)
pas_mixer_set(foo, levels[foo]);
set_mode(0x04 | 0x01);
}
static int pas_mixer_ioctl(int dev, unsigned int cmd, void __user *arg)
{
int level,v ;
int __user *p = (int __user *)arg;
DEB(printk("pas2_mixer.c: int pas_mixer_ioctl(unsigned int cmd = %X, unsigned int arg = %X)\n", cmd, arg));
if (cmd == SOUND_MIXER_PRIVATE1) { /* Set loudness bit */
if (get_user(level, p))
return -EFAULT;
if (level == -1) /* Return current settings */
level = (mode_control & 0x04);
else {
mode_control &= ~0x04;
if (level)
mode_control |= 0x04;
set_mode(mode_control);
}
level = !!level;
return put_user(level, p);
}
if (cmd == SOUND_MIXER_PRIVATE2) { /* Set enhance bit */
if (get_user(level, p))
return -EFAULT;
if (level == -1) { /* Return current settings */
if (!(mode_control & 0x03))
level = 0;
else
level = ((mode_control & 0x03) + 1) * 20;
} else {
int i = 0;
level &= 0x7f;
if (level)
i = (level / 20) - 1;
mode_control &= ~0x03;
mode_control |= i & 0x03;
set_mode(mode_control);
if (i)
i = (i + 1) * 20;
level = i;
}
return put_user(level, p);
}
if (cmd == SOUND_MIXER_PRIVATE3) { /* Set mute bit */
if (get_user(level, p))
return -EFAULT;
if (level == -1) /* Return current settings */
level = !(pas_read(0x0B8A) & 0x20);
else {
if (level)
pas_write(pas_read(0x0B8A) & (~0x20), 0x0B8A);
else
pas_write(pas_read(0x0B8A) | 0x20, 0x0B8A);
level = !(pas_read(0x0B8A) & 0x20);
}
return put_user(level, p);
}
if (((cmd >> 8) & 0xff) == 'M') {
if (get_user(v, p))
return -EFAULT;
if (_SIOC_DIR(cmd) & _SIOC_WRITE) {
v = pas_mixer_set(cmd & 0xff, v);
} else {
switch (cmd & 0xff) {
case SOUND_MIXER_RECSRC:
v = rec_devices;
break;
case SOUND_MIXER_STEREODEVS:
v = SUPPORTED_MIXER_DEVICES & ~(SOUND_MASK_BASS | SOUND_MASK_TREBLE);
break;
case SOUND_MIXER_DEVMASK:
v = SUPPORTED_MIXER_DEVICES;
break;
case SOUND_MIXER_RECMASK:
v = POSSIBLE_RECORDING_DEVICES & SUPPORTED_MIXER_DEVICES;
break;
case SOUND_MIXER_CAPS:
v = 0; /* No special capabilities */
break;
default:
v = levels[cmd & 0xff];
break;
}
}
return put_user(v, p);
}
return -EINVAL;
}
static struct mixer_operations pas_mixer_operations =
{
.owner = THIS_MODULE,
.id = "PAS16",
.name = "Pro Audio Spectrum 16",
.ioctl = pas_mixer_ioctl
};
int __init
pas_init_mixer(void)
{
int d;
levels = load_mixer_volumes("PAS16_1", default_levels, 1);
pas_mixer_reset();
if ((d = sound_alloc_mixerdev()) != -1)
{
audio_devs[pas_audiodev]->mixer_dev = d;
mixer_devs[d] = &pas_mixer_operations;
}
return 1;
}
| gpl-2.0 |
sjurbren/modem-ipc | drivers/hwmon/ibmaem.c | 88 | 27679 | /*
* A hwmon driver for the IBM System Director Active Energy Manager (AEM)
* temperature/power/energy sensors and capping functionality.
* Copyright (C) 2008 IBM
*
* Author: Darrick J. Wong <djwong@us.ibm.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/ipmi.h>
#include <linux/module.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/jiffies.h>
#include <linux/mutex.h>
#include <linux/kdev_t.h>
#include <linux/spinlock.h>
#include <linux/idr.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/platform_device.h>
#include <linux/math64.h>
#include <linux/time.h>
#define REFRESH_INTERVAL (HZ)
#define IPMI_TIMEOUT (30 * HZ)
#define DRVNAME "aem"
#define AEM_NETFN 0x2E
#define AEM_FIND_FW_CMD 0x80
#define AEM_ELEMENT_CMD 0x81
#define AEM_FW_INSTANCE_CMD 0x82
#define AEM_READ_ELEMENT_CFG 0x80
#define AEM_READ_BUFFER 0x81
#define AEM_READ_REGISTER 0x82
#define AEM_WRITE_REGISTER 0x83
#define AEM_SET_REG_MASK 0x84
#define AEM_CLEAR_REG_MASK 0x85
#define AEM_READ_ELEMENT_CFG2 0x86
#define AEM_CONTROL_ELEMENT 0
#define AEM_ENERGY_ELEMENT 1
#define AEM_CLOCK_ELEMENT 4
#define AEM_POWER_CAP_ELEMENT 7
#define AEM_EXHAUST_ELEMENT 9
#define AEM_POWER_ELEMENT 10
#define AEM_MODULE_TYPE_ID 0x0001
#define AEM2_NUM_ENERGY_REGS 2
#define AEM2_NUM_PCAP_REGS 6
#define AEM2_NUM_TEMP_REGS 2
#define AEM2_NUM_SENSORS 14
#define AEM1_NUM_ENERGY_REGS 1
#define AEM1_NUM_SENSORS 3
/* AEM 2.x has more energy registers */
#define AEM_NUM_ENERGY_REGS AEM2_NUM_ENERGY_REGS
/* AEM 2.x needs more sensor files */
#define AEM_NUM_SENSORS AEM2_NUM_SENSORS
#define POWER_CAP 0
#define POWER_CAP_MAX_HOTPLUG 1
#define POWER_CAP_MAX 2
#define POWER_CAP_MIN_WARNING 3
#define POWER_CAP_MIN 4
#define POWER_AUX 5
#define AEM_DEFAULT_POWER_INTERVAL 1000
#define AEM_MIN_POWER_INTERVAL 200
#define UJ_PER_MJ 1000L
static DEFINE_IDA(aem_ida);
static struct platform_driver aem_driver = {
.driver = {
.name = DRVNAME,
.bus = &platform_bus_type,
}
};
struct aem_ipmi_data {
struct completion read_complete;
struct ipmi_addr address;
ipmi_user_t user;
int interface;
struct kernel_ipmi_msg tx_message;
long tx_msgid;
void *rx_msg_data;
unsigned short rx_msg_len;
unsigned char rx_result;
int rx_recv_type;
struct device *bmc_device;
};
struct aem_ro_sensor_template {
char *label;
ssize_t (*show)(struct device *dev,
struct device_attribute *devattr,
char *buf);
int index;
};
struct aem_rw_sensor_template {
char *label;
ssize_t (*show)(struct device *dev,
struct device_attribute *devattr,
char *buf);
ssize_t (*set)(struct device *dev,
struct device_attribute *devattr,
const char *buf, size_t count);
int index;
};
struct aem_data {
struct list_head list;
struct device *hwmon_dev;
struct platform_device *pdev;
struct mutex lock;
char valid;
unsigned long last_updated; /* In jiffies */
u8 ver_major;
u8 ver_minor;
u8 module_handle;
int id;
struct aem_ipmi_data ipmi;
/* Function and buffer to update sensors */
void (*update)(struct aem_data *data);
struct aem_read_sensor_resp *rs_resp;
/*
* AEM 1.x sensors:
* Available sensors:
* Energy meter
* Power meter
*
* AEM 2.x sensors:
* Two energy meters
* Two power meters
* Two temperature sensors
* Six power cap registers
*/
/* sysfs attrs */
struct sensor_device_attribute sensors[AEM_NUM_SENSORS];
/* energy use in mJ */
u64 energy[AEM_NUM_ENERGY_REGS];
/* power sampling interval in ms */
unsigned long power_period[AEM_NUM_ENERGY_REGS];
/* Everything past here is for AEM2 only */
/* power caps in dW */
u16 pcap[AEM2_NUM_PCAP_REGS];
/* exhaust temperature in C */
u8 temp[AEM2_NUM_TEMP_REGS];
};
/* Data structures returned by the AEM firmware */
struct aem_iana_id {
u8 bytes[3];
};
static struct aem_iana_id system_x_id = {
.bytes = {0x4D, 0x4F, 0x00}
};
/* These are used to find AEM1 instances */
struct aem_find_firmware_req {
struct aem_iana_id id;
u8 rsvd;
__be16 index;
__be16 module_type_id;
} __packed;
struct aem_find_firmware_resp {
struct aem_iana_id id;
u8 num_instances;
} __packed;
/* These are used to find AEM2 instances */
struct aem_find_instance_req {
struct aem_iana_id id;
u8 instance_number;
__be16 module_type_id;
} __packed;
struct aem_find_instance_resp {
struct aem_iana_id id;
u8 num_instances;
u8 major;
u8 minor;
u8 module_handle;
u16 record_id;
} __packed;
/* These are used to query sensors */
struct aem_read_sensor_req {
struct aem_iana_id id;
u8 module_handle;
u8 element;
u8 subcommand;
u8 reg;
u8 rx_buf_size;
} __packed;
struct aem_read_sensor_resp {
struct aem_iana_id id;
u8 bytes[0];
} __packed;
/* Data structures to talk to the IPMI layer */
struct aem_driver_data {
struct list_head aem_devices;
struct ipmi_smi_watcher bmc_events;
struct ipmi_user_hndl ipmi_hndlrs;
};
static void aem_register_bmc(int iface, struct device *dev);
static void aem_bmc_gone(int iface);
static void aem_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data);
static void aem_remove_sensors(struct aem_data *data);
static int aem1_find_sensors(struct aem_data *data);
static int aem2_find_sensors(struct aem_data *data);
static void update_aem1_sensors(struct aem_data *data);
static void update_aem2_sensors(struct aem_data *data);
static struct aem_driver_data driver_data = {
.aem_devices = LIST_HEAD_INIT(driver_data.aem_devices),
.bmc_events = {
.owner = THIS_MODULE,
.new_smi = aem_register_bmc,
.smi_gone = aem_bmc_gone,
},
.ipmi_hndlrs = {
.ipmi_recv_hndl = aem_msg_handler,
},
};
/* Functions to talk to the IPMI layer */
/* Initialize IPMI address, message buffers and user data */
static int aem_init_ipmi_data(struct aem_ipmi_data *data, int iface,
struct device *bmc)
{
int err;
init_completion(&data->read_complete);
data->bmc_device = bmc;
/* Initialize IPMI address */
data->address.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
data->address.channel = IPMI_BMC_CHANNEL;
data->address.data[0] = 0;
data->interface = iface;
/* Initialize message buffers */
data->tx_msgid = 0;
data->tx_message.netfn = AEM_NETFN;
/* Create IPMI messaging interface user */
err = ipmi_create_user(data->interface, &driver_data.ipmi_hndlrs,
data, &data->user);
if (err < 0) {
dev_err(bmc, "Unable to register user with IPMI "
"interface %d\n", data->interface);
return -EACCES;
}
return 0;
}
/* Send an IPMI command */
static int aem_send_message(struct aem_ipmi_data *data)
{
int err;
err = ipmi_validate_addr(&data->address, sizeof(data->address));
if (err)
goto out;
data->tx_msgid++;
err = ipmi_request_settime(data->user, &data->address, data->tx_msgid,
&data->tx_message, data, 0, 0, 0);
if (err)
goto out1;
return 0;
out1:
dev_err(data->bmc_device, "request_settime=%x\n", err);
return err;
out:
dev_err(data->bmc_device, "validate_addr=%x\n", err);
return err;
}
/* Dispatch IPMI messages to callers */
static void aem_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data)
{
unsigned short rx_len;
struct aem_ipmi_data *data = user_msg_data;
if (msg->msgid != data->tx_msgid) {
dev_err(data->bmc_device, "Mismatch between received msgid "
"(%02x) and transmitted msgid (%02x)!\n",
(int)msg->msgid,
(int)data->tx_msgid);
ipmi_free_recv_msg(msg);
return;
}
data->rx_recv_type = msg->recv_type;
if (msg->msg.data_len > 0)
data->rx_result = msg->msg.data[0];
else
data->rx_result = IPMI_UNKNOWN_ERR_COMPLETION_CODE;
if (msg->msg.data_len > 1) {
rx_len = msg->msg.data_len - 1;
if (data->rx_msg_len < rx_len)
rx_len = data->rx_msg_len;
data->rx_msg_len = rx_len;
memcpy(data->rx_msg_data, msg->msg.data + 1, data->rx_msg_len);
} else
data->rx_msg_len = 0;
ipmi_free_recv_msg(msg);
complete(&data->read_complete);
}
/* Sensor support functions */
/* Read a sensor value; must be called with data->lock held */
static int aem_read_sensor(struct aem_data *data, u8 elt, u8 reg,
void *buf, size_t size)
{
int rs_size, res;
struct aem_read_sensor_req rs_req;
/* Use preallocated rx buffer */
struct aem_read_sensor_resp *rs_resp = data->rs_resp;
struct aem_ipmi_data *ipmi = &data->ipmi;
/* AEM registers are 1, 2, 4 or 8 bytes */
switch (size) {
case 1:
case 2:
case 4:
case 8:
break;
default:
return -EINVAL;
}
rs_req.id = system_x_id;
rs_req.module_handle = data->module_handle;
rs_req.element = elt;
rs_req.subcommand = AEM_READ_REGISTER;
rs_req.reg = reg;
rs_req.rx_buf_size = size;
ipmi->tx_message.cmd = AEM_ELEMENT_CMD;
ipmi->tx_message.data = (char *)&rs_req;
ipmi->tx_message.data_len = sizeof(rs_req);
rs_size = sizeof(*rs_resp) + size;
ipmi->rx_msg_data = rs_resp;
ipmi->rx_msg_len = rs_size;
aem_send_message(ipmi);
res = wait_for_completion_timeout(&ipmi->read_complete, IPMI_TIMEOUT);
if (!res) {
res = -ETIMEDOUT;
goto out;
}
if (ipmi->rx_result || ipmi->rx_msg_len != rs_size ||
memcmp(&rs_resp->id, &system_x_id, sizeof(system_x_id))) {
res = -ENOENT;
goto out;
}
switch (size) {
case 1: {
u8 *x = buf;
*x = rs_resp->bytes[0];
break;
}
case 2: {
u16 *x = buf;
*x = be16_to_cpup((__be16 *)rs_resp->bytes);
break;
}
case 4: {
u32 *x = buf;
*x = be32_to_cpup((__be32 *)rs_resp->bytes);
break;
}
case 8: {
u64 *x = buf;
*x = be64_to_cpup((__be64 *)rs_resp->bytes);
break;
}
}
res = 0;
out:
return res;
}
/* Update AEM energy registers */
static void update_aem_energy_one(struct aem_data *data, int which)
{
aem_read_sensor(data, AEM_ENERGY_ELEMENT, which,
&data->energy[which], 8);
}
static void update_aem_energy(struct aem_data *data)
{
update_aem_energy_one(data, 0);
if (data->ver_major < 2)
return;
update_aem_energy_one(data, 1);
}
/* Update all AEM1 sensors */
static void update_aem1_sensors(struct aem_data *data)
{
mutex_lock(&data->lock);
if (time_before(jiffies, data->last_updated + REFRESH_INTERVAL) &&
data->valid)
goto out;
update_aem_energy(data);
out:
mutex_unlock(&data->lock);
}
/* Update all AEM2 sensors */
static void update_aem2_sensors(struct aem_data *data)
{
int i;
mutex_lock(&data->lock);
if (time_before(jiffies, data->last_updated + REFRESH_INTERVAL) &&
data->valid)
goto out;
update_aem_energy(data);
aem_read_sensor(data, AEM_EXHAUST_ELEMENT, 0, &data->temp[0], 1);
aem_read_sensor(data, AEM_EXHAUST_ELEMENT, 1, &data->temp[1], 1);
for (i = POWER_CAP; i <= POWER_AUX; i++)
aem_read_sensor(data, AEM_POWER_CAP_ELEMENT, i,
&data->pcap[i], 2);
out:
mutex_unlock(&data->lock);
}
/* Delete an AEM instance */
static void aem_delete(struct aem_data *data)
{
list_del(&data->list);
aem_remove_sensors(data);
kfree(data->rs_resp);
hwmon_device_unregister(data->hwmon_dev);
ipmi_destroy_user(data->ipmi.user);
platform_set_drvdata(data->pdev, NULL);
platform_device_unregister(data->pdev);
ida_simple_remove(&aem_ida, data->id);
kfree(data);
}
/* Probe functions for AEM1 devices */
/* Retrieve version and module handle for an AEM1 instance */
static int aem_find_aem1_count(struct aem_ipmi_data *data)
{
int res;
struct aem_find_firmware_req ff_req;
struct aem_find_firmware_resp ff_resp;
ff_req.id = system_x_id;
ff_req.index = 0;
ff_req.module_type_id = cpu_to_be16(AEM_MODULE_TYPE_ID);
data->tx_message.cmd = AEM_FIND_FW_CMD;
data->tx_message.data = (char *)&ff_req;
data->tx_message.data_len = sizeof(ff_req);
data->rx_msg_data = &ff_resp;
data->rx_msg_len = sizeof(ff_resp);
aem_send_message(data);
res = wait_for_completion_timeout(&data->read_complete, IPMI_TIMEOUT);
if (!res)
return -ETIMEDOUT;
if (data->rx_result || data->rx_msg_len != sizeof(ff_resp) ||
memcmp(&ff_resp.id, &system_x_id, sizeof(system_x_id)))
return -ENOENT;
return ff_resp.num_instances;
}
/* Find and initialize one AEM1 instance */
static int aem_init_aem1_inst(struct aem_ipmi_data *probe, u8 module_handle)
{
struct aem_data *data;
int i;
int res = -ENOMEM;
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return res;
mutex_init(&data->lock);
/* Copy instance data */
data->ver_major = 1;
data->ver_minor = 0;
data->module_handle = module_handle;
for (i = 0; i < AEM1_NUM_ENERGY_REGS; i++)
data->power_period[i] = AEM_DEFAULT_POWER_INTERVAL;
/* Create sub-device for this fw instance */
data->id = ida_simple_get(&aem_ida, 0, 0, GFP_KERNEL);
if (data->id < 0)
goto id_err;
data->pdev = platform_device_alloc(DRVNAME, data->id);
if (!data->pdev)
goto dev_err;
data->pdev->dev.driver = &aem_driver.driver;
res = platform_device_add(data->pdev);
if (res)
goto ipmi_err;
platform_set_drvdata(data->pdev, data);
/* Set up IPMI interface */
res = aem_init_ipmi_data(&data->ipmi, probe->interface,
probe->bmc_device);
if (res)
goto ipmi_err;
/* Register with hwmon */
data->hwmon_dev = hwmon_device_register(&data->pdev->dev);
if (IS_ERR(data->hwmon_dev)) {
dev_err(&data->pdev->dev, "Unable to register hwmon "
"device for IPMI interface %d\n",
probe->interface);
res = PTR_ERR(data->hwmon_dev);
goto hwmon_reg_err;
}
data->update = update_aem1_sensors;
data->rs_resp = kzalloc(sizeof(*(data->rs_resp)) + 8, GFP_KERNEL);
if (!data->rs_resp) {
res = -ENOMEM;
goto alloc_resp_err;
}
/* Find sensors */
res = aem1_find_sensors(data);
if (res)
goto sensor_err;
/* Add to our list of AEM devices */
list_add_tail(&data->list, &driver_data.aem_devices);
dev_info(data->ipmi.bmc_device, "Found AEM v%d.%d at 0x%X\n",
data->ver_major, data->ver_minor,
data->module_handle);
return 0;
sensor_err:
kfree(data->rs_resp);
alloc_resp_err:
hwmon_device_unregister(data->hwmon_dev);
hwmon_reg_err:
ipmi_destroy_user(data->ipmi.user);
ipmi_err:
platform_set_drvdata(data->pdev, NULL);
platform_device_unregister(data->pdev);
dev_err:
ida_simple_remove(&aem_ida, data->id);
id_err:
kfree(data);
return res;
}
/* Find and initialize all AEM1 instances */
static void aem_init_aem1(struct aem_ipmi_data *probe)
{
int num, i, err;
num = aem_find_aem1_count(probe);
for (i = 0; i < num; i++) {
err = aem_init_aem1_inst(probe, i);
if (err) {
dev_err(probe->bmc_device,
"Error %d initializing AEM1 0x%X\n",
err, i);
}
}
}
/* Probe functions for AEM2 devices */
/* Retrieve version and module handle for an AEM2 instance */
static int aem_find_aem2(struct aem_ipmi_data *data,
struct aem_find_instance_resp *fi_resp,
int instance_num)
{
int res;
struct aem_find_instance_req fi_req;
fi_req.id = system_x_id;
fi_req.instance_number = instance_num;
fi_req.module_type_id = cpu_to_be16(AEM_MODULE_TYPE_ID);
data->tx_message.cmd = AEM_FW_INSTANCE_CMD;
data->tx_message.data = (char *)&fi_req;
data->tx_message.data_len = sizeof(fi_req);
data->rx_msg_data = fi_resp;
data->rx_msg_len = sizeof(*fi_resp);
aem_send_message(data);
res = wait_for_completion_timeout(&data->read_complete, IPMI_TIMEOUT);
if (!res)
return -ETIMEDOUT;
if (data->rx_result || data->rx_msg_len != sizeof(*fi_resp) ||
memcmp(&fi_resp->id, &system_x_id, sizeof(system_x_id)) ||
fi_resp->num_instances <= instance_num)
return -ENOENT;
return 0;
}
/* Find and initialize one AEM2 instance */
static int aem_init_aem2_inst(struct aem_ipmi_data *probe,
struct aem_find_instance_resp *fi_resp)
{
struct aem_data *data;
int i;
int res = -ENOMEM;
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return res;
mutex_init(&data->lock);
/* Copy instance data */
data->ver_major = fi_resp->major;
data->ver_minor = fi_resp->minor;
data->module_handle = fi_resp->module_handle;
for (i = 0; i < AEM2_NUM_ENERGY_REGS; i++)
data->power_period[i] = AEM_DEFAULT_POWER_INTERVAL;
/* Create sub-device for this fw instance */
data->id = ida_simple_get(&aem_ida, 0, 0, GFP_KERNEL);
if (data->id < 0)
goto id_err;
data->pdev = platform_device_alloc(DRVNAME, data->id);
if (!data->pdev)
goto dev_err;
data->pdev->dev.driver = &aem_driver.driver;
res = platform_device_add(data->pdev);
if (res)
goto ipmi_err;
platform_set_drvdata(data->pdev, data);
/* Set up IPMI interface */
res = aem_init_ipmi_data(&data->ipmi, probe->interface,
probe->bmc_device);
if (res)
goto ipmi_err;
/* Register with hwmon */
data->hwmon_dev = hwmon_device_register(&data->pdev->dev);
if (IS_ERR(data->hwmon_dev)) {
dev_err(&data->pdev->dev, "Unable to register hwmon "
"device for IPMI interface %d\n",
probe->interface);
res = PTR_ERR(data->hwmon_dev);
goto hwmon_reg_err;
}
data->update = update_aem2_sensors;
data->rs_resp = kzalloc(sizeof(*(data->rs_resp)) + 8, GFP_KERNEL);
if (!data->rs_resp) {
res = -ENOMEM;
goto alloc_resp_err;
}
/* Find sensors */
res = aem2_find_sensors(data);
if (res)
goto sensor_err;
/* Add to our list of AEM devices */
list_add_tail(&data->list, &driver_data.aem_devices);
dev_info(data->ipmi.bmc_device, "Found AEM v%d.%d at 0x%X\n",
data->ver_major, data->ver_minor,
data->module_handle);
return 0;
sensor_err:
kfree(data->rs_resp);
alloc_resp_err:
hwmon_device_unregister(data->hwmon_dev);
hwmon_reg_err:
ipmi_destroy_user(data->ipmi.user);
ipmi_err:
platform_set_drvdata(data->pdev, NULL);
platform_device_unregister(data->pdev);
dev_err:
ida_simple_remove(&aem_ida, data->id);
id_err:
kfree(data);
return res;
}
/* Find and initialize all AEM2 instances */
static void aem_init_aem2(struct aem_ipmi_data *probe)
{
struct aem_find_instance_resp fi_resp;
int err;
int i = 0;
while (!aem_find_aem2(probe, &fi_resp, i)) {
if (fi_resp.major != 2) {
dev_err(probe->bmc_device, "Unknown AEM v%d; please "
"report this to the maintainer.\n",
fi_resp.major);
i++;
continue;
}
err = aem_init_aem2_inst(probe, &fi_resp);
if (err) {
dev_err(probe->bmc_device,
"Error %d initializing AEM2 0x%X\n",
err, fi_resp.module_handle);
}
i++;
}
}
/* Probe a BMC for AEM firmware instances */
static void aem_register_bmc(int iface, struct device *dev)
{
struct aem_ipmi_data probe;
if (aem_init_ipmi_data(&probe, iface, dev))
return;
/* Ignore probe errors; they won't cause problems */
aem_init_aem1(&probe);
aem_init_aem2(&probe);
ipmi_destroy_user(probe.user);
}
/* Handle BMC deletion */
static void aem_bmc_gone(int iface)
{
struct aem_data *p1, *next1;
list_for_each_entry_safe(p1, next1, &driver_data.aem_devices, list)
if (p1->ipmi.interface == iface)
aem_delete(p1);
}
/* sysfs support functions */
/* AEM device name */
static ssize_t show_name(struct device *dev, struct device_attribute *devattr,
char *buf)
{
struct aem_data *data = dev_get_drvdata(dev);
return sprintf(buf, "%s%d\n", DRVNAME, data->ver_major);
}
static SENSOR_DEVICE_ATTR(name, S_IRUGO, show_name, NULL, 0);
/* AEM device version */
static ssize_t show_version(struct device *dev,
struct device_attribute *devattr,
char *buf)
{
struct aem_data *data = dev_get_drvdata(dev);
return sprintf(buf, "%d.%d\n", data->ver_major, data->ver_minor);
}
static SENSOR_DEVICE_ATTR(version, S_IRUGO, show_version, NULL, 0);
/* Display power use */
static ssize_t aem_show_power(struct device *dev,
struct device_attribute *devattr,
char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct aem_data *data = dev_get_drvdata(dev);
u64 before, after, delta, time;
signed long leftover;
struct timespec b, a;
mutex_lock(&data->lock);
update_aem_energy_one(data, attr->index);
getnstimeofday(&b);
before = data->energy[attr->index];
leftover = schedule_timeout_interruptible(
msecs_to_jiffies(data->power_period[attr->index])
);
if (leftover) {
mutex_unlock(&data->lock);
return 0;
}
update_aem_energy_one(data, attr->index);
getnstimeofday(&a);
after = data->energy[attr->index];
mutex_unlock(&data->lock);
time = timespec_to_ns(&a) - timespec_to_ns(&b);
delta = (after - before) * UJ_PER_MJ;
return sprintf(buf, "%llu\n",
(unsigned long long)div64_u64(delta * NSEC_PER_SEC, time));
}
/* Display energy use */
static ssize_t aem_show_energy(struct device *dev,
struct device_attribute *devattr,
char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct aem_data *a = dev_get_drvdata(dev);
mutex_lock(&a->lock);
update_aem_energy_one(a, attr->index);
mutex_unlock(&a->lock);
return sprintf(buf, "%llu\n",
(unsigned long long)a->energy[attr->index] * 1000);
}
/* Display power interval registers */
static ssize_t aem_show_power_period(struct device *dev,
struct device_attribute *devattr,
char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct aem_data *a = dev_get_drvdata(dev);
a->update(a);
return sprintf(buf, "%lu\n", a->power_period[attr->index]);
}
/* Set power interval registers */
static ssize_t aem_set_power_period(struct device *dev,
struct device_attribute *devattr,
const char *buf, size_t count)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct aem_data *a = dev_get_drvdata(dev);
unsigned long temp;
int res;
res = kstrtoul(buf, 10, &temp);
if (res)
return res;
if (temp < AEM_MIN_POWER_INTERVAL)
return -EINVAL;
mutex_lock(&a->lock);
a->power_period[attr->index] = temp;
mutex_unlock(&a->lock);
return count;
}
/* Discover sensors on an AEM device */
static int aem_register_sensors(struct aem_data *data,
struct aem_ro_sensor_template *ro,
struct aem_rw_sensor_template *rw)
{
struct device *dev = &data->pdev->dev;
struct sensor_device_attribute *sensors = data->sensors;
int err;
/* Set up read-only sensors */
while (ro->label) {
sysfs_attr_init(&sensors->dev_attr.attr);
sensors->dev_attr.attr.name = ro->label;
sensors->dev_attr.attr.mode = S_IRUGO;
sensors->dev_attr.show = ro->show;
sensors->index = ro->index;
err = device_create_file(dev, &sensors->dev_attr);
if (err) {
sensors->dev_attr.attr.name = NULL;
goto error;
}
sensors++;
ro++;
}
/* Set up read-write sensors */
while (rw->label) {
sysfs_attr_init(&sensors->dev_attr.attr);
sensors->dev_attr.attr.name = rw->label;
sensors->dev_attr.attr.mode = S_IRUGO | S_IWUSR;
sensors->dev_attr.show = rw->show;
sensors->dev_attr.store = rw->set;
sensors->index = rw->index;
err = device_create_file(dev, &sensors->dev_attr);
if (err) {
sensors->dev_attr.attr.name = NULL;
goto error;
}
sensors++;
rw++;
}
err = device_create_file(dev, &sensor_dev_attr_name.dev_attr);
if (err)
goto error;
err = device_create_file(dev, &sensor_dev_attr_version.dev_attr);
return err;
error:
aem_remove_sensors(data);
return err;
}
/* sysfs support functions for AEM2 sensors */
/* Display temperature use */
static ssize_t aem2_show_temp(struct device *dev,
struct device_attribute *devattr,
char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct aem_data *a = dev_get_drvdata(dev);
a->update(a);
return sprintf(buf, "%u\n", a->temp[attr->index] * 1000);
}
/* Display power-capping registers */
static ssize_t aem2_show_pcap_value(struct device *dev,
struct device_attribute *devattr,
char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct aem_data *a = dev_get_drvdata(dev);
a->update(a);
return sprintf(buf, "%u\n", a->pcap[attr->index] * 100000);
}
/* Remove sensors attached to an AEM device */
static void aem_remove_sensors(struct aem_data *data)
{
int i;
for (i = 0; i < AEM_NUM_SENSORS; i++) {
if (!data->sensors[i].dev_attr.attr.name)
continue;
device_remove_file(&data->pdev->dev,
&data->sensors[i].dev_attr);
}
device_remove_file(&data->pdev->dev,
&sensor_dev_attr_name.dev_attr);
device_remove_file(&data->pdev->dev,
&sensor_dev_attr_version.dev_attr);
}
/* Sensor probe functions */
/* Description of AEM1 sensors */
static struct aem_ro_sensor_template aem1_ro_sensors[] = {
{"energy1_input", aem_show_energy, 0},
{"power1_average", aem_show_power, 0},
{NULL, NULL, 0},
};
static struct aem_rw_sensor_template aem1_rw_sensors[] = {
{"power1_average_interval", aem_show_power_period, aem_set_power_period, 0},
{NULL, NULL, NULL, 0},
};
/* Description of AEM2 sensors */
static struct aem_ro_sensor_template aem2_ro_sensors[] = {
{"energy1_input", aem_show_energy, 0},
{"energy2_input", aem_show_energy, 1},
{"power1_average", aem_show_power, 0},
{"power2_average", aem_show_power, 1},
{"temp1_input", aem2_show_temp, 0},
{"temp2_input", aem2_show_temp, 1},
{"power4_average", aem2_show_pcap_value, POWER_CAP_MAX_HOTPLUG},
{"power5_average", aem2_show_pcap_value, POWER_CAP_MAX},
{"power6_average", aem2_show_pcap_value, POWER_CAP_MIN_WARNING},
{"power7_average", aem2_show_pcap_value, POWER_CAP_MIN},
{"power3_average", aem2_show_pcap_value, POWER_AUX},
{"power_cap", aem2_show_pcap_value, POWER_CAP},
{NULL, NULL, 0},
};
static struct aem_rw_sensor_template aem2_rw_sensors[] = {
{"power1_average_interval", aem_show_power_period, aem_set_power_period, 0},
{"power2_average_interval", aem_show_power_period, aem_set_power_period, 1},
{NULL, NULL, NULL, 0},
};
/* Set up AEM1 sensor attrs */
static int aem1_find_sensors(struct aem_data *data)
{
return aem_register_sensors(data, aem1_ro_sensors, aem1_rw_sensors);
}
/* Set up AEM2 sensor attrs */
static int aem2_find_sensors(struct aem_data *data)
{
return aem_register_sensors(data, aem2_ro_sensors, aem2_rw_sensors);
}
/* Module init/exit routines */
static int __init aem_init(void)
{
int res;
res = driver_register(&aem_driver.driver);
if (res) {
pr_err("Can't register aem driver\n");
return res;
}
res = ipmi_smi_watcher_register(&driver_data.bmc_events);
if (res)
goto ipmi_reg_err;
return 0;
ipmi_reg_err:
driver_unregister(&aem_driver.driver);
return res;
}
static void __exit aem_exit(void)
{
struct aem_data *p1, *next1;
ipmi_smi_watcher_unregister(&driver_data.bmc_events);
driver_unregister(&aem_driver.driver);
list_for_each_entry_safe(p1, next1, &driver_data.aem_devices, list)
aem_delete(p1);
}
MODULE_AUTHOR("Darrick J. Wong <djwong@us.ibm.com>");
MODULE_DESCRIPTION("IBM AEM power/temp/energy sensor driver");
MODULE_LICENSE("GPL");
module_init(aem_init);
module_exit(aem_exit);
MODULE_ALIAS("dmi:bvnIBM:*:pnIBMSystemx3350-*");
MODULE_ALIAS("dmi:bvnIBM:*:pnIBMSystemx3550-*");
MODULE_ALIAS("dmi:bvnIBM:*:pnIBMSystemx3650-*");
MODULE_ALIAS("dmi:bvnIBM:*:pnIBMSystemx3655-*");
MODULE_ALIAS("dmi:bvnIBM:*:pnIBMSystemx3755-*");
MODULE_ALIAS("dmi:bvnIBM:*:pnIBM3850M2/x3950M2-*");
MODULE_ALIAS("dmi:bvnIBM:*:pnIBMBladeHC10-*");
| gpl-2.0 |
HCDRJacob/u8800-2.6.32 | arch/x86/kernel/crash.c | 344 | 2471 | /*
* Architecture specific (i386/x86_64) functions for kexec based crash dumps.
*
* Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
*
* Copyright (C) IBM Corporation, 2004. All rights reserved.
*
*/
#include <linux/init.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/smp.h>
#include <linux/reboot.h>
#include <linux/kexec.h>
#include <linux/delay.h>
#include <linux/elf.h>
#include <linux/elfcore.h>
#include <asm/processor.h>
#include <asm/hardirq.h>
#include <asm/nmi.h>
#include <asm/hw_irq.h>
#include <asm/apic.h>
#include <asm/hpet.h>
#include <linux/kdebug.h>
#include <asm/cpu.h>
#include <asm/reboot.h>
#include <asm/virtext.h>
#include <asm/iommu.h>
#if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC)
static void kdump_nmi_callback(int cpu, struct die_args *args)
{
struct pt_regs *regs;
#ifdef CONFIG_X86_32
struct pt_regs fixed_regs;
#endif
regs = args->regs;
#ifdef CONFIG_X86_32
if (!user_mode_vm(regs)) {
crash_fixup_ss_esp(&fixed_regs, regs);
regs = &fixed_regs;
}
#endif
crash_save_cpu(regs, cpu);
/* Disable VMX or SVM if needed.
*
* We need to disable virtualization on all CPUs.
* Having VMX or SVM enabled on any CPU may break rebooting
* after the kdump kernel has finished its task.
*/
cpu_emergency_vmxoff();
cpu_emergency_svm_disable();
disable_local_APIC();
}
static void kdump_nmi_shootdown_cpus(void)
{
nmi_shootdown_cpus(kdump_nmi_callback);
disable_local_APIC();
}
#else
static void kdump_nmi_shootdown_cpus(void)
{
/* There are no cpus to shootdown */
}
#endif
void native_machine_crash_shutdown(struct pt_regs *regs)
{
/* This function is only called after the system
* has panicked or is otherwise in a critical state.
* The minimum amount of code to allow a kexec'd kernel
* to run successfully needs to happen here.
*
* In practice this means shooting down the other cpus in
* an SMP system.
*/
/* The kernel is broken so disable interrupts */
local_irq_disable();
kdump_nmi_shootdown_cpus();
/* Booting kdump kernel with VMX or SVM enabled won't work,
* because (among other limitations) we can't disable paging
* with the virt flags.
*/
cpu_emergency_vmxoff();
cpu_emergency_svm_disable();
lapic_shutdown();
#if defined(CONFIG_X86_IO_APIC)
disable_IO_APIC();
#endif
#ifdef CONFIG_HPET_TIMER
hpet_disable();
#endif
#ifdef CONFIG_X86_64
pci_iommu_shutdown();
#endif
crash_save_cpu(regs, safe_smp_processor_id());
}
| gpl-2.0 |
anoever/thunderbolt | drivers/acpi/acpica/utosi.c | 600 | 15841 | /******************************************************************************
*
* Module Name: utosi - Support for the _OSI predefined control method
*
*****************************************************************************/
/*
* Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* substantially similar to the "NO WARRANTY" disclaimer below
* ("Disclaimer") and any redistribution must be conditioned upon
* including a substantially similar Disclaimer requirement for further
* binary redistribution.
* 3. Neither the names of the above-listed copyright holders nor the names
* of any contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES.
*/
#include <acpi/acpi.h>
#include "accommon.h"
#define _COMPONENT ACPI_UTILITIES
ACPI_MODULE_NAME("utosi")
/******************************************************************************
*
* ACPICA policy for new _OSI strings:
*
* It is the stated policy of ACPICA that new _OSI strings will be integrated
* into this module as soon as possible after they are defined. It is strongly
* recommended that all ACPICA hosts mirror this policy and integrate any
* changes to this module as soon as possible. There are several historical
* reasons behind this policy:
*
* 1) New BIOSs tend to test only the case where the host responds TRUE to
* the latest version of Windows, which would respond to the latest/newest
* _OSI string. Not responding TRUE to the latest version of Windows will
* risk executing untested code paths throughout the DSDT and SSDTs.
*
* 2) If a new _OSI string is recognized only after a significant delay, this
* has the potential to cause problems on existing working machines because
* of the possibility that a new and different path through the ASL code
* will be executed.
*
* 3) New _OSI strings are tending to come out about once per year. A delay
* in recognizing a new string for a significant amount of time risks the
* release of another string which only compounds the initial problem.
*
*****************************************************************************/
/*
* Strings supported by the _OSI predefined control method (which is
* implemented internally within this module.)
*
* March 2009: Removed "Linux" as this host no longer wants to respond true
* for this string. Basically, the only safe OS strings are windows-related
* and in many or most cases represent the only test path within the
* BIOS-provided ASL code.
*
* The last element of each entry is used to track the newest version of
* Windows that the BIOS has requested.
*/
static struct acpi_interface_info acpi_default_supported_interfaces[] = {
/* Operating System Vendor Strings */
{"Windows 2000", NULL, 0, ACPI_OSI_WIN_2000}, /* Windows 2000 */
{"Windows 2001", NULL, 0, ACPI_OSI_WIN_XP}, /* Windows XP */
{"Windows 2001 SP1", NULL, 0, ACPI_OSI_WIN_XP_SP1}, /* Windows XP SP1 */
{"Windows 2001.1", NULL, 0, ACPI_OSI_WINSRV_2003}, /* Windows Server 2003 */
{"Windows 2001 SP2", NULL, 0, ACPI_OSI_WIN_XP_SP2}, /* Windows XP SP2 */
{"Windows 2001.1 SP1", NULL, 0, ACPI_OSI_WINSRV_2003_SP1}, /* Windows Server 2003 SP1 - Added 03/2006 */
{"Windows 2006", NULL, 0, ACPI_OSI_WIN_VISTA}, /* Windows vista - Added 03/2006 */
{"Windows 2006.1", NULL, 0, ACPI_OSI_WINSRV_2008}, /* Windows Server 2008 - Added 09/2009 */
{"Windows 2006 SP1", NULL, 0, ACPI_OSI_WIN_VISTA_SP1}, /* Windows Vista SP1 - Added 09/2009 */
{"Windows 2006 SP2", NULL, 0, ACPI_OSI_WIN_VISTA_SP2}, /* Windows Vista SP2 - Added 09/2010 */
{"Windows 2009", NULL, 0, ACPI_OSI_WIN_7}, /* Windows 7 and Server 2008 R2 - Added 09/2009 */
{"Windows 2012", NULL, 0, ACPI_OSI_WIN_8}, /* Windows 8 and Server 2012 - Added 08/2012 */
{"Windows 2013", NULL, 0, ACPI_OSI_WIN_8}, /* Windows 8.1 and Server 2012 R2 - Added 01/2014 */
/* Feature Group Strings */
{"Extended Address Space Descriptor", NULL, ACPI_OSI_FEATURE, 0},
/*
* All "optional" feature group strings (features that are implemented
* by the host) should be dynamically modified to VALID by the host via
* acpi_install_interface or acpi_update_interfaces. Such optional feature
* group strings are set as INVALID by default here.
*/
{"Module Device", NULL, ACPI_OSI_OPTIONAL_FEATURE, 0},
{"Processor Device", NULL, ACPI_OSI_OPTIONAL_FEATURE, 0},
{"3.0 Thermal Model", NULL, ACPI_OSI_OPTIONAL_FEATURE, 0},
{"3.0 _SCP Extensions", NULL, ACPI_OSI_OPTIONAL_FEATURE, 0},
{"Processor Aggregator Device", NULL, ACPI_OSI_OPTIONAL_FEATURE, 0}
};
/*******************************************************************************
*
* FUNCTION: acpi_ut_initialize_interfaces
*
* PARAMETERS: None
*
* RETURN: Status
*
* DESCRIPTION: Initialize the global _OSI supported interfaces list
*
******************************************************************************/
acpi_status acpi_ut_initialize_interfaces(void)
{
acpi_status status;
u32 i;
status = acpi_os_acquire_mutex(acpi_gbl_osi_mutex, ACPI_WAIT_FOREVER);
if (ACPI_FAILURE(status)) {
return (status);
}
acpi_gbl_supported_interfaces = acpi_default_supported_interfaces;
/* Link the static list of supported interfaces */
for (i = 0;
i < (ACPI_ARRAY_LENGTH(acpi_default_supported_interfaces) - 1);
i++) {
acpi_default_supported_interfaces[i].next =
&acpi_default_supported_interfaces[(acpi_size) i + 1];
}
acpi_os_release_mutex(acpi_gbl_osi_mutex);
return (AE_OK);
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_interface_terminate
*
* PARAMETERS: None
*
* RETURN: Status
*
* DESCRIPTION: Delete all interfaces in the global list. Sets
* acpi_gbl_supported_interfaces to NULL.
*
******************************************************************************/
acpi_status acpi_ut_interface_terminate(void)
{
acpi_status status;
struct acpi_interface_info *next_interface;
status = acpi_os_acquire_mutex(acpi_gbl_osi_mutex, ACPI_WAIT_FOREVER);
if (ACPI_FAILURE(status)) {
return (status);
}
next_interface = acpi_gbl_supported_interfaces;
while (next_interface) {
acpi_gbl_supported_interfaces = next_interface->next;
if (next_interface->flags & ACPI_OSI_DYNAMIC) {
/* Only interfaces added at runtime can be freed */
ACPI_FREE(next_interface->name);
ACPI_FREE(next_interface);
} else {
/* Interface is in static list. Reset it to invalid or valid. */
if (next_interface->flags & ACPI_OSI_DEFAULT_INVALID) {
next_interface->flags |= ACPI_OSI_INVALID;
} else {
next_interface->flags &= ~ACPI_OSI_INVALID;
}
}
next_interface = acpi_gbl_supported_interfaces;
}
acpi_os_release_mutex(acpi_gbl_osi_mutex);
return (AE_OK);
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_install_interface
*
* PARAMETERS: interface_name - The interface to install
*
* RETURN: Status
*
* DESCRIPTION: Install the interface into the global interface list.
* Caller MUST hold acpi_gbl_osi_mutex
*
******************************************************************************/
acpi_status acpi_ut_install_interface(acpi_string interface_name)
{
struct acpi_interface_info *interface_info;
/* Allocate info block and space for the name string */
interface_info =
ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_interface_info));
if (!interface_info) {
return (AE_NO_MEMORY);
}
interface_info->name =
ACPI_ALLOCATE_ZEROED(ACPI_STRLEN(interface_name) + 1);
if (!interface_info->name) {
ACPI_FREE(interface_info);
return (AE_NO_MEMORY);
}
/* Initialize new info and insert at the head of the global list */
ACPI_STRCPY(interface_info->name, interface_name);
interface_info->flags = ACPI_OSI_DYNAMIC;
interface_info->next = acpi_gbl_supported_interfaces;
acpi_gbl_supported_interfaces = interface_info;
return (AE_OK);
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_remove_interface
*
* PARAMETERS: interface_name - The interface to remove
*
* RETURN: Status
*
* DESCRIPTION: Remove the interface from the global interface list.
* Caller MUST hold acpi_gbl_osi_mutex
*
******************************************************************************/
acpi_status acpi_ut_remove_interface(acpi_string interface_name)
{
struct acpi_interface_info *previous_interface;
struct acpi_interface_info *next_interface;
previous_interface = next_interface = acpi_gbl_supported_interfaces;
while (next_interface) {
if (!ACPI_STRCMP(interface_name, next_interface->name)) {
/* Found: name is in either the static list or was added at runtime */
if (next_interface->flags & ACPI_OSI_DYNAMIC) {
/* Interface was added dynamically, remove and free it */
if (previous_interface == next_interface) {
acpi_gbl_supported_interfaces =
next_interface->next;
} else {
previous_interface->next =
next_interface->next;
}
ACPI_FREE(next_interface->name);
ACPI_FREE(next_interface);
} else {
/*
* Interface is in static list. If marked invalid, then it
* does not actually exist. Else, mark it invalid.
*/
if (next_interface->flags & ACPI_OSI_INVALID) {
return (AE_NOT_EXIST);
}
next_interface->flags |= ACPI_OSI_INVALID;
}
return (AE_OK);
}
previous_interface = next_interface;
next_interface = next_interface->next;
}
/* Interface was not found */
return (AE_NOT_EXIST);
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_update_interfaces
*
* PARAMETERS: action - Actions to be performed during the
* update
*
* RETURN: Status
*
* DESCRIPTION: Update _OSI interface strings, disabling or enabling OS vendor
* strings or/and feature group strings.
* Caller MUST hold acpi_gbl_osi_mutex
*
******************************************************************************/
acpi_status acpi_ut_update_interfaces(u8 action)
{
struct acpi_interface_info *next_interface;
next_interface = acpi_gbl_supported_interfaces;
while (next_interface) {
if (((next_interface->flags & ACPI_OSI_FEATURE) &&
(action & ACPI_FEATURE_STRINGS)) ||
(!(next_interface->flags & ACPI_OSI_FEATURE) &&
(action & ACPI_VENDOR_STRINGS))) {
if (action & ACPI_DISABLE_INTERFACES) {
/* Mark the interfaces as invalid */
next_interface->flags |= ACPI_OSI_INVALID;
} else {
/* Mark the interfaces as valid */
next_interface->flags &= ~ACPI_OSI_INVALID;
}
}
next_interface = next_interface->next;
}
return (AE_OK);
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_get_interface
*
* PARAMETERS: interface_name - The interface to find
*
* RETURN: struct acpi_interface_info if found. NULL if not found.
*
* DESCRIPTION: Search for the specified interface name in the global list.
* Caller MUST hold acpi_gbl_osi_mutex
*
******************************************************************************/
struct acpi_interface_info *acpi_ut_get_interface(acpi_string interface_name)
{
struct acpi_interface_info *next_interface;
next_interface = acpi_gbl_supported_interfaces;
while (next_interface) {
if (!ACPI_STRCMP(interface_name, next_interface->name)) {
return (next_interface);
}
next_interface = next_interface->next;
}
return (NULL);
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_osi_implementation
*
* PARAMETERS: walk_state - Current walk state
*
* RETURN: Status
*
* DESCRIPTION: Implementation of the _OSI predefined control method. When
* an invocation of _OSI is encountered in the system AML,
* control is transferred to this function.
*
******************************************************************************/
acpi_status acpi_ut_osi_implementation(struct acpi_walk_state * walk_state)
{
union acpi_operand_object *string_desc;
union acpi_operand_object *return_desc;
struct acpi_interface_info *interface_info;
acpi_interface_handler interface_handler;
acpi_status status;
u32 return_value;
ACPI_FUNCTION_TRACE(ut_osi_implementation);
/* Validate the string input argument (from the AML caller) */
string_desc = walk_state->arguments[0].object;
if (!string_desc || (string_desc->common.type != ACPI_TYPE_STRING)) {
return_ACPI_STATUS(AE_TYPE);
}
/* Create a return object */
return_desc = acpi_ut_create_internal_object(ACPI_TYPE_INTEGER);
if (!return_desc) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
/* Default return value is 0, NOT SUPPORTED */
return_value = 0;
status = acpi_os_acquire_mutex(acpi_gbl_osi_mutex, ACPI_WAIT_FOREVER);
if (ACPI_FAILURE(status)) {
acpi_ut_remove_reference(return_desc);
return_ACPI_STATUS(status);
}
/* Lookup the interface in the global _OSI list */
interface_info = acpi_ut_get_interface(string_desc->string.pointer);
if (interface_info && !(interface_info->flags & ACPI_OSI_INVALID)) {
/*
* The interface is supported.
* Update the osi_data if necessary. We keep track of the latest
* version of Windows that has been requested by the BIOS.
*/
if (interface_info->value > acpi_gbl_osi_data) {
acpi_gbl_osi_data = interface_info->value;
}
return_value = ACPI_UINT32_MAX;
}
acpi_os_release_mutex(acpi_gbl_osi_mutex);
/*
* Invoke an optional _OSI interface handler. The host OS may wish
* to do some interface-specific handling. For example, warn about
* certain interfaces or override the true/false support value.
*/
interface_handler = acpi_gbl_interface_handler;
if (interface_handler) {
return_value =
interface_handler(string_desc->string.pointer,
return_value);
}
ACPI_DEBUG_PRINT_RAW((ACPI_DB_INFO,
"ACPI: BIOS _OSI(\"%s\") is %ssupported\n",
string_desc->string.pointer,
return_value == 0 ? "not " : ""));
/* Complete the return object */
return_desc->integer.value = return_value;
walk_state->return_desc = return_desc;
return_ACPI_STATUS(AE_OK);
}
| gpl-2.0 |
felipito/linux-stable | drivers/acpi/acpica/nsutils.c | 600 | 20251 | /******************************************************************************
*
* Module Name: nsutils - Utilities for accessing ACPI namespace, accessing
* parents and siblings and Scope manipulation
*
*****************************************************************************/
/*
* Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* substantially similar to the "NO WARRANTY" disclaimer below
* ("Disclaimer") and any redistribution must be conditioned upon
* including a substantially similar Disclaimer requirement for further
* binary redistribution.
* 3. Neither the names of the above-listed copyright holders nor the names
* of any contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES.
*/
#include <acpi/acpi.h>
#include "accommon.h"
#include "acnamesp.h"
#include "amlcode.h"
#define _COMPONENT ACPI_NAMESPACE
ACPI_MODULE_NAME("nsutils")
/* Local prototypes */
#ifdef ACPI_OBSOLETE_FUNCTIONS
acpi_name acpi_ns_find_parent_name(struct acpi_namespace_node *node_to_search);
#endif
/*******************************************************************************
*
* FUNCTION: acpi_ns_print_node_pathname
*
* PARAMETERS: node - Object
* message - Prefix message
*
* DESCRIPTION: Print an object's full namespace pathname
* Manages allocation/freeing of a pathname buffer
*
******************************************************************************/
void
acpi_ns_print_node_pathname(struct acpi_namespace_node *node,
const char *message)
{
struct acpi_buffer buffer;
acpi_status status;
if (!node) {
acpi_os_printf("[NULL NAME]");
return;
}
/* Convert handle to full pathname and print it (with supplied message) */
buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER;
status = acpi_ns_handle_to_pathname(node, &buffer);
if (ACPI_SUCCESS(status)) {
if (message) {
acpi_os_printf("%s ", message);
}
acpi_os_printf("[%s] (Node %p)", (char *)buffer.pointer, node);
ACPI_FREE(buffer.pointer);
}
}
/*******************************************************************************
*
* FUNCTION: acpi_ns_get_type
*
* PARAMETERS: node - Parent Node to be examined
*
* RETURN: Type field from Node whose handle is passed
*
* DESCRIPTION: Return the type of a Namespace node
*
******************************************************************************/
acpi_object_type acpi_ns_get_type(struct acpi_namespace_node * node)
{
ACPI_FUNCTION_TRACE(ns_get_type);
if (!node) {
ACPI_WARNING((AE_INFO, "Null Node parameter"));
return_UINT8(ACPI_TYPE_ANY);
}
return_UINT8(node->type);
}
/*******************************************************************************
*
* FUNCTION: acpi_ns_local
*
* PARAMETERS: type - A namespace object type
*
* RETURN: LOCAL if names must be found locally in objects of the
* passed type, 0 if enclosing scopes should be searched
*
* DESCRIPTION: Returns scope rule for the given object type.
*
******************************************************************************/
u32 acpi_ns_local(acpi_object_type type)
{
ACPI_FUNCTION_TRACE(ns_local);
if (!acpi_ut_valid_object_type(type)) {
/* Type code out of range */
ACPI_WARNING((AE_INFO, "Invalid Object Type 0x%X", type));
return_UINT32(ACPI_NS_NORMAL);
}
return_UINT32(acpi_gbl_ns_properties[type] & ACPI_NS_LOCAL);
}
/*******************************************************************************
*
* FUNCTION: acpi_ns_get_internal_name_length
*
* PARAMETERS: info - Info struct initialized with the
* external name pointer.
*
* RETURN: None
*
* DESCRIPTION: Calculate the length of the internal (AML) namestring
* corresponding to the external (ASL) namestring.
*
******************************************************************************/
void acpi_ns_get_internal_name_length(struct acpi_namestring_info *info)
{
const char *next_external_char;
u32 i;
ACPI_FUNCTION_ENTRY();
next_external_char = info->external_name;
info->num_carats = 0;
info->num_segments = 0;
info->fully_qualified = FALSE;
/*
* For the internal name, the required length is 4 bytes per segment, plus
* 1 each for root_prefix, multi_name_prefix_op, segment count, trailing null
* (which is not really needed, but no there's harm in putting it there)
*
* strlen() + 1 covers the first name_seg, which has no path separator
*/
if (ACPI_IS_ROOT_PREFIX(*next_external_char)) {
info->fully_qualified = TRUE;
next_external_char++;
/* Skip redundant root_prefix, like \\_SB.PCI0.SBRG.EC0 */
while (ACPI_IS_ROOT_PREFIX(*next_external_char)) {
next_external_char++;
}
} else {
/* Handle Carat prefixes */
while (ACPI_IS_PARENT_PREFIX(*next_external_char)) {
info->num_carats++;
next_external_char++;
}
}
/*
* Determine the number of ACPI name "segments" by counting the number of
* path separators within the string. Start with one segment since the
* segment count is [(# separators) + 1], and zero separators is ok.
*/
if (*next_external_char) {
info->num_segments = 1;
for (i = 0; next_external_char[i]; i++) {
if (ACPI_IS_PATH_SEPARATOR(next_external_char[i])) {
info->num_segments++;
}
}
}
info->length = (ACPI_NAME_SIZE * info->num_segments) +
4 + info->num_carats;
info->next_external_char = next_external_char;
}
/*******************************************************************************
*
* FUNCTION: acpi_ns_build_internal_name
*
* PARAMETERS: info - Info struct fully initialized
*
* RETURN: Status
*
* DESCRIPTION: Construct the internal (AML) namestring
* corresponding to the external (ASL) namestring.
*
******************************************************************************/
acpi_status acpi_ns_build_internal_name(struct acpi_namestring_info *info)
{
u32 num_segments = info->num_segments;
char *internal_name = info->internal_name;
const char *external_name = info->next_external_char;
char *result = NULL;
u32 i;
ACPI_FUNCTION_TRACE(ns_build_internal_name);
/* Setup the correct prefixes, counts, and pointers */
if (info->fully_qualified) {
internal_name[0] = AML_ROOT_PREFIX;
if (num_segments <= 1) {
result = &internal_name[1];
} else if (num_segments == 2) {
internal_name[1] = AML_DUAL_NAME_PREFIX;
result = &internal_name[2];
} else {
internal_name[1] = AML_MULTI_NAME_PREFIX_OP;
internal_name[2] = (char)num_segments;
result = &internal_name[3];
}
} else {
/*
* Not fully qualified.
* Handle Carats first, then append the name segments
*/
i = 0;
if (info->num_carats) {
for (i = 0; i < info->num_carats; i++) {
internal_name[i] = AML_PARENT_PREFIX;
}
}
if (num_segments <= 1) {
result = &internal_name[i];
} else if (num_segments == 2) {
internal_name[i] = AML_DUAL_NAME_PREFIX;
result = &internal_name[(acpi_size) i + 1];
} else {
internal_name[i] = AML_MULTI_NAME_PREFIX_OP;
internal_name[(acpi_size) i + 1] = (char)num_segments;
result = &internal_name[(acpi_size) i + 2];
}
}
/* Build the name (minus path separators) */
for (; num_segments; num_segments--) {
for (i = 0; i < ACPI_NAME_SIZE; i++) {
if (ACPI_IS_PATH_SEPARATOR(*external_name) ||
(*external_name == 0)) {
/* Pad the segment with underscore(s) if segment is short */
result[i] = '_';
} else {
/* Convert the character to uppercase and save it */
result[i] =
(char)ACPI_TOUPPER((int)*external_name);
external_name++;
}
}
/* Now we must have a path separator, or the pathname is bad */
if (!ACPI_IS_PATH_SEPARATOR(*external_name) &&
(*external_name != 0)) {
return_ACPI_STATUS(AE_BAD_PATHNAME);
}
/* Move on the next segment */
external_name++;
result += ACPI_NAME_SIZE;
}
/* Terminate the string */
*result = 0;
if (info->fully_qualified) {
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
"Returning [%p] (abs) \"\\%s\"\n",
internal_name, internal_name));
} else {
ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Returning [%p] (rel) \"%s\"\n",
internal_name, internal_name));
}
return_ACPI_STATUS(AE_OK);
}
/*******************************************************************************
*
* FUNCTION: acpi_ns_internalize_name
*
* PARAMETERS: *external_name - External representation of name
* **Converted name - Where to return the resulting
* internal represention of the name
*
* RETURN: Status
*
* DESCRIPTION: Convert an external representation (e.g. "\_PR_.CPU0")
* to internal form (e.g. 5c 2f 02 5f 50 52 5f 43 50 55 30)
*
*******************************************************************************/
acpi_status
acpi_ns_internalize_name(const char *external_name, char **converted_name)
{
char *internal_name;
struct acpi_namestring_info info;
acpi_status status;
ACPI_FUNCTION_TRACE(ns_internalize_name);
if ((!external_name) || (*external_name == 0) || (!converted_name)) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
/* Get the length of the new internal name */
info.external_name = external_name;
acpi_ns_get_internal_name_length(&info);
/* We need a segment to store the internal name */
internal_name = ACPI_ALLOCATE_ZEROED(info.length);
if (!internal_name) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
/* Build the name */
info.internal_name = internal_name;
status = acpi_ns_build_internal_name(&info);
if (ACPI_FAILURE(status)) {
ACPI_FREE(internal_name);
return_ACPI_STATUS(status);
}
*converted_name = internal_name;
return_ACPI_STATUS(AE_OK);
}
/*******************************************************************************
*
* FUNCTION: acpi_ns_externalize_name
*
* PARAMETERS: internal_name_length - Lenth of the internal name below
* internal_name - Internal representation of name
* converted_name_length - Where the length is returned
* converted_name - Where the resulting external name
* is returned
*
* RETURN: Status
*
* DESCRIPTION: Convert internal name (e.g. 5c 2f 02 5f 50 52 5f 43 50 55 30)
* to its external (printable) form (e.g. "\_PR_.CPU0")
*
******************************************************************************/
acpi_status
acpi_ns_externalize_name(u32 internal_name_length,
const char *internal_name,
u32 * converted_name_length, char **converted_name)
{
u32 names_index = 0;
u32 num_segments = 0;
u32 required_length;
u32 prefix_length = 0;
u32 i = 0;
u32 j = 0;
ACPI_FUNCTION_TRACE(ns_externalize_name);
if (!internal_name_length || !internal_name || !converted_name) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
/* Check for a prefix (one '\' | one or more '^') */
switch (internal_name[0]) {
case AML_ROOT_PREFIX:
prefix_length = 1;
break;
case AML_PARENT_PREFIX:
for (i = 0; i < internal_name_length; i++) {
if (ACPI_IS_PARENT_PREFIX(internal_name[i])) {
prefix_length = i + 1;
} else {
break;
}
}
if (i == internal_name_length) {
prefix_length = i;
}
break;
default:
break;
}
/*
* Check for object names. Note that there could be 0-255 of these
* 4-byte elements.
*/
if (prefix_length < internal_name_length) {
switch (internal_name[prefix_length]) {
case AML_MULTI_NAME_PREFIX_OP:
/* <count> 4-byte names */
names_index = prefix_length + 2;
num_segments = (u8)
internal_name[(acpi_size) prefix_length + 1];
break;
case AML_DUAL_NAME_PREFIX:
/* Two 4-byte names */
names_index = prefix_length + 1;
num_segments = 2;
break;
case 0:
/* null_name */
names_index = 0;
num_segments = 0;
break;
default:
/* one 4-byte name */
names_index = prefix_length;
num_segments = 1;
break;
}
}
/*
* Calculate the length of converted_name, which equals the length
* of the prefix, length of all object names, length of any required
* punctuation ('.') between object names, plus the NULL terminator.
*/
required_length = prefix_length + (4 * num_segments) +
((num_segments > 0) ? (num_segments - 1) : 0) + 1;
/*
* Check to see if we're still in bounds. If not, there's a problem
* with internal_name (invalid format).
*/
if (required_length > internal_name_length) {
ACPI_ERROR((AE_INFO, "Invalid internal name"));
return_ACPI_STATUS(AE_BAD_PATHNAME);
}
/* Build the converted_name */
*converted_name = ACPI_ALLOCATE_ZEROED(required_length);
if (!(*converted_name)) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
j = 0;
for (i = 0; i < prefix_length; i++) {
(*converted_name)[j++] = internal_name[i];
}
if (num_segments > 0) {
for (i = 0; i < num_segments; i++) {
if (i > 0) {
(*converted_name)[j++] = '.';
}
/* Copy and validate the 4-char name segment */
ACPI_MOVE_NAME(&(*converted_name)[j],
&internal_name[names_index]);
acpi_ut_repair_name(&(*converted_name)[j]);
j += ACPI_NAME_SIZE;
names_index += ACPI_NAME_SIZE;
}
}
if (converted_name_length) {
*converted_name_length = (u32) required_length;
}
return_ACPI_STATUS(AE_OK);
}
/*******************************************************************************
*
* FUNCTION: acpi_ns_validate_handle
*
* PARAMETERS: handle - Handle to be validated and typecast to a
* namespace node.
*
* RETURN: A pointer to a namespace node
*
* DESCRIPTION: Convert a namespace handle to a namespace node. Handles special
* cases for the root node.
*
* NOTE: Real integer handles would allow for more verification
* and keep all pointers within this subsystem - however this introduces
* more overhead and has not been necessary to this point. Drivers
* holding handles are typically notified before a node becomes invalid
* due to a table unload.
*
******************************************************************************/
struct acpi_namespace_node *acpi_ns_validate_handle(acpi_handle handle)
{
ACPI_FUNCTION_ENTRY();
/* Parameter validation */
if ((!handle) || (handle == ACPI_ROOT_OBJECT)) {
return (acpi_gbl_root_node);
}
/* We can at least attempt to verify the handle */
if (ACPI_GET_DESCRIPTOR_TYPE(handle) != ACPI_DESC_TYPE_NAMED) {
return (NULL);
}
return (ACPI_CAST_PTR(struct acpi_namespace_node, handle));
}
/*******************************************************************************
*
* FUNCTION: acpi_ns_terminate
*
* PARAMETERS: none
*
* RETURN: none
*
* DESCRIPTION: free memory allocated for namespace and ACPI table storage.
*
******************************************************************************/
void acpi_ns_terminate(void)
{
acpi_status status;
ACPI_FUNCTION_TRACE(ns_terminate);
/*
* Free the entire namespace -- all nodes and all objects
* attached to the nodes
*/
acpi_ns_delete_namespace_subtree(acpi_gbl_root_node);
/* Delete any objects attached to the root node */
status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
if (ACPI_FAILURE(status)) {
return_VOID;
}
acpi_ns_delete_node(acpi_gbl_root_node);
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Namespace freed\n"));
return_VOID;
}
/*******************************************************************************
*
* FUNCTION: acpi_ns_opens_scope
*
* PARAMETERS: type - A valid namespace type
*
* RETURN: NEWSCOPE if the passed type "opens a name scope" according
* to the ACPI specification, else 0
*
******************************************************************************/
u32 acpi_ns_opens_scope(acpi_object_type type)
{
ACPI_FUNCTION_ENTRY();
if (type > ACPI_TYPE_LOCAL_MAX) {
/* type code out of range */
ACPI_WARNING((AE_INFO, "Invalid Object Type 0x%X", type));
return (ACPI_NS_NORMAL);
}
return (((u32)acpi_gbl_ns_properties[type]) & ACPI_NS_NEWSCOPE);
}
/*******************************************************************************
*
* FUNCTION: acpi_ns_get_node
*
* PARAMETERS: *pathname - Name to be found, in external (ASL) format. The
* \ (backslash) and ^ (carat) prefixes, and the
* . (period) to separate segments are supported.
* prefix_node - Root of subtree to be searched, or NS_ALL for the
* root of the name space. If Name is fully
* qualified (first s8 is '\'), the passed value
* of Scope will not be accessed.
* flags - Used to indicate whether to perform upsearch or
* not.
* return_node - Where the Node is returned
*
* DESCRIPTION: Look up a name relative to a given scope and return the
* corresponding Node. NOTE: Scope can be null.
*
* MUTEX: Locks namespace
*
******************************************************************************/
acpi_status
acpi_ns_get_node(struct acpi_namespace_node *prefix_node,
const char *pathname,
u32 flags, struct acpi_namespace_node **return_node)
{
union acpi_generic_state scope_info;
acpi_status status;
char *internal_path;
ACPI_FUNCTION_TRACE_PTR(ns_get_node, ACPI_CAST_PTR(char, pathname));
/* Simplest case is a null pathname */
if (!pathname) {
*return_node = prefix_node;
if (!prefix_node) {
*return_node = acpi_gbl_root_node;
}
return_ACPI_STATUS(AE_OK);
}
/* Quick check for a reference to the root */
if (ACPI_IS_ROOT_PREFIX(pathname[0]) && (!pathname[1])) {
*return_node = acpi_gbl_root_node;
return_ACPI_STATUS(AE_OK);
}
/* Convert path to internal representation */
status = acpi_ns_internalize_name(pathname, &internal_path);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
/* Must lock namespace during lookup */
status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
if (ACPI_FAILURE(status)) {
goto cleanup;
}
/* Setup lookup scope (search starting point) */
scope_info.scope.node = prefix_node;
/* Lookup the name in the namespace */
status = acpi_ns_lookup(&scope_info, internal_path, ACPI_TYPE_ANY,
ACPI_IMODE_EXECUTE,
(flags | ACPI_NS_DONT_OPEN_SCOPE), NULL,
return_node);
if (ACPI_FAILURE(status)) {
ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "%s, %s\n",
pathname, acpi_format_exception(status)));
}
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
cleanup:
ACPI_FREE(internal_path);
return_ACPI_STATUS(status);
}
| gpl-2.0 |
blackbox87/zte_skate_gb_kernel | arch/parisc/kernel/unaligned.c | 856 | 17658 | /*
* Unaligned memory access handler
*
* Copyright (C) 2001 Randolph Chung <tausq@debian.org>
* Significantly tweaked by LaMont Jones <lamont@debian.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
*/
#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/signal.h>
#include <linux/ratelimit.h>
#include <asm/uaccess.h>
/* #define DEBUG_UNALIGNED 1 */
#ifdef DEBUG_UNALIGNED
#define DPRINTF(fmt, args...) do { printk(KERN_DEBUG "%s:%d:%s ", __FILE__, __LINE__, __func__ ); printk(KERN_DEBUG fmt, ##args ); } while (0)
#else
#define DPRINTF(fmt, args...)
#endif
#ifdef CONFIG_64BIT
#define RFMT "%016lx"
#else
#define RFMT "%08lx"
#endif
#define FIXUP_BRANCH(lbl) \
"\tldil L%%" #lbl ", %%r1\n" \
"\tldo R%%" #lbl "(%%r1), %%r1\n" \
"\tbv,n %%r0(%%r1)\n"
/* If you use FIXUP_BRANCH, then you must list this clobber */
#define FIXUP_BRANCH_CLOBBER "r1"
/* 1111 1100 0000 0000 0001 0011 1100 0000 */
#define OPCODE1(a,b,c) ((a)<<26|(b)<<12|(c)<<6)
#define OPCODE2(a,b) ((a)<<26|(b)<<1)
#define OPCODE3(a,b) ((a)<<26|(b)<<2)
#define OPCODE4(a) ((a)<<26)
#define OPCODE1_MASK OPCODE1(0x3f,1,0xf)
#define OPCODE2_MASK OPCODE2(0x3f,1)
#define OPCODE3_MASK OPCODE3(0x3f,1)
#define OPCODE4_MASK OPCODE4(0x3f)
/* skip LDB - never unaligned (index) */
#define OPCODE_LDH_I OPCODE1(0x03,0,0x1)
#define OPCODE_LDW_I OPCODE1(0x03,0,0x2)
#define OPCODE_LDD_I OPCODE1(0x03,0,0x3)
#define OPCODE_LDDA_I OPCODE1(0x03,0,0x4)
#define OPCODE_LDCD_I OPCODE1(0x03,0,0x5)
#define OPCODE_LDWA_I OPCODE1(0x03,0,0x6)
#define OPCODE_LDCW_I OPCODE1(0x03,0,0x7)
/* skip LDB - never unaligned (short) */
#define OPCODE_LDH_S OPCODE1(0x03,1,0x1)
#define OPCODE_LDW_S OPCODE1(0x03,1,0x2)
#define OPCODE_LDD_S OPCODE1(0x03,1,0x3)
#define OPCODE_LDDA_S OPCODE1(0x03,1,0x4)
#define OPCODE_LDCD_S OPCODE1(0x03,1,0x5)
#define OPCODE_LDWA_S OPCODE1(0x03,1,0x6)
#define OPCODE_LDCW_S OPCODE1(0x03,1,0x7)
/* skip STB - never unaligned */
#define OPCODE_STH OPCODE1(0x03,1,0x9)
#define OPCODE_STW OPCODE1(0x03,1,0xa)
#define OPCODE_STD OPCODE1(0x03,1,0xb)
/* skip STBY - never unaligned */
/* skip STDBY - never unaligned */
#define OPCODE_STWA OPCODE1(0x03,1,0xe)
#define OPCODE_STDA OPCODE1(0x03,1,0xf)
#define OPCODE_FLDWX OPCODE1(0x09,0,0x0)
#define OPCODE_FLDWXR OPCODE1(0x09,0,0x1)
#define OPCODE_FSTWX OPCODE1(0x09,0,0x8)
#define OPCODE_FSTWXR OPCODE1(0x09,0,0x9)
#define OPCODE_FLDWS OPCODE1(0x09,1,0x0)
#define OPCODE_FLDWSR OPCODE1(0x09,1,0x1)
#define OPCODE_FSTWS OPCODE1(0x09,1,0x8)
#define OPCODE_FSTWSR OPCODE1(0x09,1,0x9)
#define OPCODE_FLDDX OPCODE1(0x0b,0,0x0)
#define OPCODE_FSTDX OPCODE1(0x0b,0,0x8)
#define OPCODE_FLDDS OPCODE1(0x0b,1,0x0)
#define OPCODE_FSTDS OPCODE1(0x0b,1,0x8)
#define OPCODE_LDD_L OPCODE2(0x14,0)
#define OPCODE_FLDD_L OPCODE2(0x14,1)
#define OPCODE_STD_L OPCODE2(0x1c,0)
#define OPCODE_FSTD_L OPCODE2(0x1c,1)
#define OPCODE_LDW_M OPCODE3(0x17,1)
#define OPCODE_FLDW_L OPCODE3(0x17,0)
#define OPCODE_FSTW_L OPCODE3(0x1f,0)
#define OPCODE_STW_M OPCODE3(0x1f,1)
#define OPCODE_LDH_L OPCODE4(0x11)
#define OPCODE_LDW_L OPCODE4(0x12)
#define OPCODE_LDWM OPCODE4(0x13)
#define OPCODE_STH_L OPCODE4(0x19)
#define OPCODE_STW_L OPCODE4(0x1A)
#define OPCODE_STWM OPCODE4(0x1B)
#define MAJOR_OP(i) (((i)>>26)&0x3f)
#define R1(i) (((i)>>21)&0x1f)
#define R2(i) (((i)>>16)&0x1f)
#define R3(i) ((i)&0x1f)
#define FR3(i) ((((i)<<1)&0x1f)|(((i)>>6)&1))
#define IM(i,n) (((i)>>1&((1<<(n-1))-1))|((i)&1?((0-1L)<<(n-1)):0))
#define IM5_2(i) IM((i)>>16,5)
#define IM5_3(i) IM((i),5)
#define IM14(i) IM((i),14)
#define ERR_NOTHANDLED -1
#define ERR_PAGEFAULT -2
int unaligned_enabled __read_mostly = 1;
void die_if_kernel (char *str, struct pt_regs *regs, long err);
static int emulate_ldh(struct pt_regs *regs, int toreg)
{
unsigned long saddr = regs->ior;
unsigned long val = 0;
int ret;
DPRINTF("load " RFMT ":" RFMT " to r%d for 2 bytes\n",
regs->isr, regs->ior, toreg);
__asm__ __volatile__ (
" mtsp %4, %%sr1\n"
"1: ldbs 0(%%sr1,%3), %%r20\n"
"2: ldbs 1(%%sr1,%3), %0\n"
" depw %%r20, 23, 24, %0\n"
" copy %%r0, %1\n"
"3: \n"
" .section .fixup,\"ax\"\n"
"4: ldi -2, %1\n"
FIXUP_BRANCH(3b)
" .previous\n"
ASM_EXCEPTIONTABLE_ENTRY(1b, 4b)
ASM_EXCEPTIONTABLE_ENTRY(2b, 4b)
: "=r" (val), "=r" (ret)
: "0" (val), "r" (saddr), "r" (regs->isr)
: "r20", FIXUP_BRANCH_CLOBBER );
DPRINTF("val = 0x" RFMT "\n", val);
if (toreg)
regs->gr[toreg] = val;
return ret;
}
static int emulate_ldw(struct pt_regs *regs, int toreg, int flop)
{
unsigned long saddr = regs->ior;
unsigned long val = 0;
int ret;
DPRINTF("load " RFMT ":" RFMT " to r%d for 4 bytes\n",
regs->isr, regs->ior, toreg);
__asm__ __volatile__ (
" zdep %3,28,2,%%r19\n" /* r19=(ofs&3)*8 */
" mtsp %4, %%sr1\n"
" depw %%r0,31,2,%3\n"
"1: ldw 0(%%sr1,%3),%0\n"
"2: ldw 4(%%sr1,%3),%%r20\n"
" subi 32,%%r19,%%r19\n"
" mtctl %%r19,11\n"
" vshd %0,%%r20,%0\n"
" copy %%r0, %1\n"
"3: \n"
" .section .fixup,\"ax\"\n"
"4: ldi -2, %1\n"
FIXUP_BRANCH(3b)
" .previous\n"
ASM_EXCEPTIONTABLE_ENTRY(1b, 4b)
ASM_EXCEPTIONTABLE_ENTRY(2b, 4b)
: "=r" (val), "=r" (ret)
: "0" (val), "r" (saddr), "r" (regs->isr)
: "r19", "r20", FIXUP_BRANCH_CLOBBER );
DPRINTF("val = 0x" RFMT "\n", val);
if (flop)
((__u32*)(regs->fr))[toreg] = val;
else if (toreg)
regs->gr[toreg] = val;
return ret;
}
static int emulate_ldd(struct pt_regs *regs, int toreg, int flop)
{
unsigned long saddr = regs->ior;
__u64 val = 0;
int ret;
DPRINTF("load " RFMT ":" RFMT " to r%d for 8 bytes\n",
regs->isr, regs->ior, toreg);
#ifdef CONFIG_PA20
#ifndef CONFIG_64BIT
if (!flop)
return -1;
#endif
__asm__ __volatile__ (
" depd,z %3,60,3,%%r19\n" /* r19=(ofs&7)*8 */
" mtsp %4, %%sr1\n"
" depd %%r0,63,3,%3\n"
"1: ldd 0(%%sr1,%3),%0\n"
"2: ldd 8(%%sr1,%3),%%r20\n"
" subi 64,%%r19,%%r19\n"
" mtsar %%r19\n"
" shrpd %0,%%r20,%%sar,%0\n"
" copy %%r0, %1\n"
"3: \n"
" .section .fixup,\"ax\"\n"
"4: ldi -2, %1\n"
FIXUP_BRANCH(3b)
" .previous\n"
ASM_EXCEPTIONTABLE_ENTRY(1b,4b)
ASM_EXCEPTIONTABLE_ENTRY(2b,4b)
: "=r" (val), "=r" (ret)
: "0" (val), "r" (saddr), "r" (regs->isr)
: "r19", "r20", FIXUP_BRANCH_CLOBBER );
#else
{
unsigned long valh=0,vall=0;
__asm__ __volatile__ (
" zdep %5,29,2,%%r19\n" /* r19=(ofs&3)*8 */
" mtsp %6, %%sr1\n"
" dep %%r0,31,2,%5\n"
"1: ldw 0(%%sr1,%5),%0\n"
"2: ldw 4(%%sr1,%5),%1\n"
"3: ldw 8(%%sr1,%5),%%r20\n"
" subi 32,%%r19,%%r19\n"
" mtsar %%r19\n"
" vshd %0,%1,%0\n"
" vshd %1,%%r20,%1\n"
" copy %%r0, %2\n"
"4: \n"
" .section .fixup,\"ax\"\n"
"5: ldi -2, %2\n"
FIXUP_BRANCH(4b)
" .previous\n"
ASM_EXCEPTIONTABLE_ENTRY(1b,5b)
ASM_EXCEPTIONTABLE_ENTRY(2b,5b)
ASM_EXCEPTIONTABLE_ENTRY(3b,5b)
: "=r" (valh), "=r" (vall), "=r" (ret)
: "0" (valh), "1" (vall), "r" (saddr), "r" (regs->isr)
: "r19", "r20", FIXUP_BRANCH_CLOBBER );
val=((__u64)valh<<32)|(__u64)vall;
}
#endif
DPRINTF("val = 0x%llx\n", val);
if (flop)
regs->fr[toreg] = val;
else if (toreg)
regs->gr[toreg] = val;
return ret;
}
static int emulate_sth(struct pt_regs *regs, int frreg)
{
unsigned long val = regs->gr[frreg];
int ret;
if (!frreg)
val = 0;
DPRINTF("store r%d (0x" RFMT ") to " RFMT ":" RFMT " for 2 bytes\n", frreg,
val, regs->isr, regs->ior);
__asm__ __volatile__ (
" mtsp %3, %%sr1\n"
" extrw,u %1, 23, 8, %%r19\n"
"1: stb %1, 1(%%sr1, %2)\n"
"2: stb %%r19, 0(%%sr1, %2)\n"
" copy %%r0, %0\n"
"3: \n"
" .section .fixup,\"ax\"\n"
"4: ldi -2, %0\n"
FIXUP_BRANCH(3b)
" .previous\n"
ASM_EXCEPTIONTABLE_ENTRY(1b,4b)
ASM_EXCEPTIONTABLE_ENTRY(2b,4b)
: "=r" (ret)
: "r" (val), "r" (regs->ior), "r" (regs->isr)
: "r19", FIXUP_BRANCH_CLOBBER );
return ret;
}
static int emulate_stw(struct pt_regs *regs, int frreg, int flop)
{
unsigned long val;
int ret;
if (flop)
val = ((__u32*)(regs->fr))[frreg];
else if (frreg)
val = regs->gr[frreg];
else
val = 0;
DPRINTF("store r%d (0x" RFMT ") to " RFMT ":" RFMT " for 4 bytes\n", frreg,
val, regs->isr, regs->ior);
__asm__ __volatile__ (
" mtsp %3, %%sr1\n"
" zdep %2, 28, 2, %%r19\n"
" dep %%r0, 31, 2, %2\n"
" mtsar %%r19\n"
" depwi,z -2, %%sar, 32, %%r19\n"
"1: ldw 0(%%sr1,%2),%%r20\n"
"2: ldw 4(%%sr1,%2),%%r21\n"
" vshd %%r0, %1, %%r22\n"
" vshd %1, %%r0, %%r1\n"
" and %%r20, %%r19, %%r20\n"
" andcm %%r21, %%r19, %%r21\n"
" or %%r22, %%r20, %%r20\n"
" or %%r1, %%r21, %%r21\n"
" stw %%r20,0(%%sr1,%2)\n"
" stw %%r21,4(%%sr1,%2)\n"
" copy %%r0, %0\n"
"3: \n"
" .section .fixup,\"ax\"\n"
"4: ldi -2, %0\n"
FIXUP_BRANCH(3b)
" .previous\n"
ASM_EXCEPTIONTABLE_ENTRY(1b,4b)
ASM_EXCEPTIONTABLE_ENTRY(2b,4b)
: "=r" (ret)
: "r" (val), "r" (regs->ior), "r" (regs->isr)
: "r19", "r20", "r21", "r22", "r1", FIXUP_BRANCH_CLOBBER );
return 0;
}
static int emulate_std(struct pt_regs *regs, int frreg, int flop)
{
__u64 val;
int ret;
if (flop)
val = regs->fr[frreg];
else if (frreg)
val = regs->gr[frreg];
else
val = 0;
DPRINTF("store r%d (0x%016llx) to " RFMT ":" RFMT " for 8 bytes\n", frreg,
val, regs->isr, regs->ior);
#ifdef CONFIG_PA20
#ifndef CONFIG_64BIT
if (!flop)
return -1;
#endif
__asm__ __volatile__ (
" mtsp %3, %%sr1\n"
" depd,z %2, 60, 3, %%r19\n"
" depd %%r0, 63, 3, %2\n"
" mtsar %%r19\n"
" depdi,z -2, %%sar, 64, %%r19\n"
"1: ldd 0(%%sr1,%2),%%r20\n"
"2: ldd 8(%%sr1,%2),%%r21\n"
" shrpd %%r0, %1, %%sar, %%r22\n"
" shrpd %1, %%r0, %%sar, %%r1\n"
" and %%r20, %%r19, %%r20\n"
" andcm %%r21, %%r19, %%r21\n"
" or %%r22, %%r20, %%r20\n"
" or %%r1, %%r21, %%r21\n"
"3: std %%r20,0(%%sr1,%2)\n"
"4: std %%r21,8(%%sr1,%2)\n"
" copy %%r0, %0\n"
"5: \n"
" .section .fixup,\"ax\"\n"
"6: ldi -2, %0\n"
FIXUP_BRANCH(5b)
" .previous\n"
ASM_EXCEPTIONTABLE_ENTRY(1b,6b)
ASM_EXCEPTIONTABLE_ENTRY(2b,6b)
ASM_EXCEPTIONTABLE_ENTRY(3b,6b)
ASM_EXCEPTIONTABLE_ENTRY(4b,6b)
: "=r" (ret)
: "r" (val), "r" (regs->ior), "r" (regs->isr)
: "r19", "r20", "r21", "r22", "r1", FIXUP_BRANCH_CLOBBER );
#else
{
unsigned long valh=(val>>32),vall=(val&0xffffffffl);
__asm__ __volatile__ (
" mtsp %4, %%sr1\n"
" zdep %2, 29, 2, %%r19\n"
" dep %%r0, 31, 2, %2\n"
" mtsar %%r19\n"
" zvdepi -2, 32, %%r19\n"
"1: ldw 0(%%sr1,%3),%%r20\n"
"2: ldw 8(%%sr1,%3),%%r21\n"
" vshd %1, %2, %%r1\n"
" vshd %%r0, %1, %1\n"
" vshd %2, %%r0, %2\n"
" and %%r20, %%r19, %%r20\n"
" andcm %%r21, %%r19, %%r21\n"
" or %1, %%r20, %1\n"
" or %2, %%r21, %2\n"
"3: stw %1,0(%%sr1,%1)\n"
"4: stw %%r1,4(%%sr1,%3)\n"
"5: stw %2,8(%%sr1,%3)\n"
" copy %%r0, %0\n"
"6: \n"
" .section .fixup,\"ax\"\n"
"7: ldi -2, %0\n"
FIXUP_BRANCH(6b)
" .previous\n"
ASM_EXCEPTIONTABLE_ENTRY(1b,7b)
ASM_EXCEPTIONTABLE_ENTRY(2b,7b)
ASM_EXCEPTIONTABLE_ENTRY(3b,7b)
ASM_EXCEPTIONTABLE_ENTRY(4b,7b)
ASM_EXCEPTIONTABLE_ENTRY(5b,7b)
: "=r" (ret)
: "r" (valh), "r" (vall), "r" (regs->ior), "r" (regs->isr)
: "r19", "r20", "r21", "r1", FIXUP_BRANCH_CLOBBER );
}
#endif
return ret;
}
void handle_unaligned(struct pt_regs *regs)
{
static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
unsigned long newbase = R1(regs->iir)?regs->gr[R1(regs->iir)]:0;
int modify = 0;
int ret = ERR_NOTHANDLED;
struct siginfo si;
register int flop=0; /* true if this is a flop */
/* log a message with pacing */
if (user_mode(regs)) {
if (current->thread.flags & PARISC_UAC_SIGBUS) {
goto force_sigbus;
}
if (!(current->thread.flags & PARISC_UAC_NOPRINT) &&
__ratelimit(&ratelimit)) {
char buf[256];
sprintf(buf, "%s(%d): unaligned access to 0x" RFMT " at ip=0x" RFMT "\n",
current->comm, task_pid_nr(current), regs->ior, regs->iaoq[0]);
printk(KERN_WARNING "%s", buf);
#ifdef DEBUG_UNALIGNED
show_regs(regs);
#endif
}
if (!unaligned_enabled)
goto force_sigbus;
}
/* handle modification - OK, it's ugly, see the instruction manual */
switch (MAJOR_OP(regs->iir))
{
case 0x03:
case 0x09:
case 0x0b:
if (regs->iir&0x20)
{
modify = 1;
if (regs->iir&0x1000) /* short loads */
if (regs->iir&0x200)
newbase += IM5_3(regs->iir);
else
newbase += IM5_2(regs->iir);
else if (regs->iir&0x2000) /* scaled indexed */
{
int shift=0;
switch (regs->iir & OPCODE1_MASK)
{
case OPCODE_LDH_I:
shift= 1; break;
case OPCODE_LDW_I:
shift= 2; break;
case OPCODE_LDD_I:
case OPCODE_LDDA_I:
shift= 3; break;
}
newbase += (R2(regs->iir)?regs->gr[R2(regs->iir)]:0)<<shift;
} else /* simple indexed */
newbase += (R2(regs->iir)?regs->gr[R2(regs->iir)]:0);
}
break;
case 0x13:
case 0x1b:
modify = 1;
newbase += IM14(regs->iir);
break;
case 0x14:
case 0x1c:
if (regs->iir&8)
{
modify = 1;
newbase += IM14(regs->iir&~0xe);
}
break;
case 0x16:
case 0x1e:
modify = 1;
newbase += IM14(regs->iir&6);
break;
case 0x17:
case 0x1f:
if (regs->iir&4)
{
modify = 1;
newbase += IM14(regs->iir&~4);
}
break;
}
/* TODO: make this cleaner... */
switch (regs->iir & OPCODE1_MASK)
{
case OPCODE_LDH_I:
case OPCODE_LDH_S:
ret = emulate_ldh(regs, R3(regs->iir));
break;
case OPCODE_LDW_I:
case OPCODE_LDWA_I:
case OPCODE_LDW_S:
case OPCODE_LDWA_S:
ret = emulate_ldw(regs, R3(regs->iir),0);
break;
case OPCODE_STH:
ret = emulate_sth(regs, R2(regs->iir));
break;
case OPCODE_STW:
case OPCODE_STWA:
ret = emulate_stw(regs, R2(regs->iir),0);
break;
#ifdef CONFIG_PA20
case OPCODE_LDD_I:
case OPCODE_LDDA_I:
case OPCODE_LDD_S:
case OPCODE_LDDA_S:
ret = emulate_ldd(regs, R3(regs->iir),0);
break;
case OPCODE_STD:
case OPCODE_STDA:
ret = emulate_std(regs, R2(regs->iir),0);
break;
#endif
case OPCODE_FLDWX:
case OPCODE_FLDWS:
case OPCODE_FLDWXR:
case OPCODE_FLDWSR:
flop=1;
ret = emulate_ldw(regs,FR3(regs->iir),1);
break;
case OPCODE_FLDDX:
case OPCODE_FLDDS:
flop=1;
ret = emulate_ldd(regs,R3(regs->iir),1);
break;
case OPCODE_FSTWX:
case OPCODE_FSTWS:
case OPCODE_FSTWXR:
case OPCODE_FSTWSR:
flop=1;
ret = emulate_stw(regs,FR3(regs->iir),1);
break;
case OPCODE_FSTDX:
case OPCODE_FSTDS:
flop=1;
ret = emulate_std(regs,R3(regs->iir),1);
break;
case OPCODE_LDCD_I:
case OPCODE_LDCW_I:
case OPCODE_LDCD_S:
case OPCODE_LDCW_S:
ret = ERR_NOTHANDLED; /* "undefined", but lets kill them. */
break;
}
#ifdef CONFIG_PA20
switch (regs->iir & OPCODE2_MASK)
{
case OPCODE_FLDD_L:
flop=1;
ret = emulate_ldd(regs,R2(regs->iir),1);
break;
case OPCODE_FSTD_L:
flop=1;
ret = emulate_std(regs, R2(regs->iir),1);
break;
#ifdef CONFIG_PA20
case OPCODE_LDD_L:
ret = emulate_ldd(regs, R2(regs->iir),0);
break;
case OPCODE_STD_L:
ret = emulate_std(regs, R2(regs->iir),0);
break;
#endif
}
#endif
switch (regs->iir & OPCODE3_MASK)
{
case OPCODE_FLDW_L:
flop=1;
ret = emulate_ldw(regs, R2(regs->iir),0);
break;
case OPCODE_LDW_M:
ret = emulate_ldw(regs, R2(regs->iir),1);
break;
case OPCODE_FSTW_L:
flop=1;
ret = emulate_stw(regs, R2(regs->iir),1);
break;
case OPCODE_STW_M:
ret = emulate_stw(regs, R2(regs->iir),0);
break;
}
switch (regs->iir & OPCODE4_MASK)
{
case OPCODE_LDH_L:
ret = emulate_ldh(regs, R2(regs->iir));
break;
case OPCODE_LDW_L:
case OPCODE_LDWM:
ret = emulate_ldw(regs, R2(regs->iir),0);
break;
case OPCODE_STH_L:
ret = emulate_sth(regs, R2(regs->iir));
break;
case OPCODE_STW_L:
case OPCODE_STWM:
ret = emulate_stw(regs, R2(regs->iir),0);
break;
}
if (modify && R1(regs->iir))
regs->gr[R1(regs->iir)] = newbase;
if (ret == ERR_NOTHANDLED)
printk(KERN_CRIT "Not-handled unaligned insn 0x%08lx\n", regs->iir);
DPRINTF("ret = %d\n", ret);
if (ret)
{
printk(KERN_CRIT "Unaligned handler failed, ret = %d\n", ret);
die_if_kernel("Unaligned data reference", regs, 28);
if (ret == ERR_PAGEFAULT)
{
si.si_signo = SIGSEGV;
si.si_errno = 0;
si.si_code = SEGV_MAPERR;
si.si_addr = (void __user *)regs->ior;
force_sig_info(SIGSEGV, &si, current);
}
else
{
force_sigbus:
/* couldn't handle it ... */
si.si_signo = SIGBUS;
si.si_errno = 0;
si.si_code = BUS_ADRALN;
si.si_addr = (void __user *)regs->ior;
force_sig_info(SIGBUS, &si, current);
}
return;
}
/* else we handled it, let life go on. */
regs->gr[0]|=PSW_N;
}
/*
* NB: check_unaligned() is only used for PCXS processors right
* now, so we only check for PA1.1 encodings at this point.
*/
int
check_unaligned(struct pt_regs *regs)
{
unsigned long align_mask;
/* Get alignment mask */
align_mask = 0UL;
switch (regs->iir & OPCODE1_MASK) {
case OPCODE_LDH_I:
case OPCODE_LDH_S:
case OPCODE_STH:
align_mask = 1UL;
break;
case OPCODE_LDW_I:
case OPCODE_LDWA_I:
case OPCODE_LDW_S:
case OPCODE_LDWA_S:
case OPCODE_STW:
case OPCODE_STWA:
align_mask = 3UL;
break;
default:
switch (regs->iir & OPCODE4_MASK) {
case OPCODE_LDH_L:
case OPCODE_STH_L:
align_mask = 1UL;
break;
case OPCODE_LDW_L:
case OPCODE_LDWM:
case OPCODE_STW_L:
case OPCODE_STWM:
align_mask = 3UL;
break;
}
break;
}
return (int)(regs->ior & align_mask);
}
| gpl-2.0 |
royorbs3/Leviathan-V1-Kernel-G925T | drivers/scsi/cxgbi/cxgb4i/cxgb4i.c | 2136 | 46151 | /*
* cxgb4i.c: Chelsio T4 iSCSI driver.
*
* Copyright (c) 2010 Chelsio Communications, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*
* Written by: Karen Xie (kxie@chelsio.com)
* Rakesh Ranjan (rranjan@chelsio.com)
*/
#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <scsi/scsi_host.h>
#include <net/tcp.h>
#include <net/dst.h>
#include <linux/netdevice.h>
#include "t4_msg.h"
#include "cxgb4.h"
#include "cxgb4_uld.h"
#include "t4fw_api.h"
#include "l2t.h"
#include "cxgb4i.h"
static unsigned int dbg_level;
#include "../libcxgbi.h"
#define DRV_MODULE_NAME "cxgb4i"
#define DRV_MODULE_DESC "Chelsio T4 iSCSI Driver"
#define DRV_MODULE_VERSION "0.9.1"
#define DRV_MODULE_RELDATE "Aug. 2010"
static char version[] =
DRV_MODULE_DESC " " DRV_MODULE_NAME
" v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
MODULE_AUTHOR("Chelsio Communications, Inc.");
MODULE_DESCRIPTION(DRV_MODULE_DESC);
MODULE_VERSION(DRV_MODULE_VERSION);
MODULE_LICENSE("GPL");
module_param(dbg_level, uint, 0644);
MODULE_PARM_DESC(dbg_level, "Debug flag (default=0)");
static int cxgb4i_rcv_win = 256 * 1024;
module_param(cxgb4i_rcv_win, int, 0644);
MODULE_PARM_DESC(cxgb4i_rcv_win, "TCP reveive window in bytes");
static int cxgb4i_snd_win = 128 * 1024;
module_param(cxgb4i_snd_win, int, 0644);
MODULE_PARM_DESC(cxgb4i_snd_win, "TCP send window in bytes");
static int cxgb4i_rx_credit_thres = 10 * 1024;
module_param(cxgb4i_rx_credit_thres, int, 0644);
MODULE_PARM_DESC(cxgb4i_rx_credit_thres,
"RX credits return threshold in bytes (default=10KB)");
static unsigned int cxgb4i_max_connect = (8 * 1024);
module_param(cxgb4i_max_connect, uint, 0644);
MODULE_PARM_DESC(cxgb4i_max_connect, "Maximum number of connections");
static unsigned short cxgb4i_sport_base = 20000;
module_param(cxgb4i_sport_base, ushort, 0644);
MODULE_PARM_DESC(cxgb4i_sport_base, "Starting port number (default 20000)");
typedef void (*cxgb4i_cplhandler_func)(struct cxgbi_device *, struct sk_buff *);
static void *t4_uld_add(const struct cxgb4_lld_info *);
static int t4_uld_rx_handler(void *, const __be64 *, const struct pkt_gl *);
static int t4_uld_state_change(void *, enum cxgb4_state state);
static const struct cxgb4_uld_info cxgb4i_uld_info = {
.name = DRV_MODULE_NAME,
.add = t4_uld_add,
.rx_handler = t4_uld_rx_handler,
.state_change = t4_uld_state_change,
};
static struct scsi_host_template cxgb4i_host_template = {
.module = THIS_MODULE,
.name = DRV_MODULE_NAME,
.proc_name = DRV_MODULE_NAME,
.can_queue = CXGB4I_SCSI_HOST_QDEPTH,
.queuecommand = iscsi_queuecommand,
.change_queue_depth = iscsi_change_queue_depth,
.sg_tablesize = SG_ALL,
.max_sectors = 0xFFFF,
.cmd_per_lun = ISCSI_DEF_CMD_PER_LUN,
.eh_abort_handler = iscsi_eh_abort,
.eh_device_reset_handler = iscsi_eh_device_reset,
.eh_target_reset_handler = iscsi_eh_recover_target,
.target_alloc = iscsi_target_alloc,
.use_clustering = DISABLE_CLUSTERING,
.this_id = -1,
};
static struct iscsi_transport cxgb4i_iscsi_transport = {
.owner = THIS_MODULE,
.name = DRV_MODULE_NAME,
.caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST |
CAP_DATADGST | CAP_DIGEST_OFFLOAD |
CAP_PADDING_OFFLOAD | CAP_TEXT_NEGO,
.attr_is_visible = cxgbi_attr_is_visible,
.get_host_param = cxgbi_get_host_param,
.set_host_param = cxgbi_set_host_param,
/* session management */
.create_session = cxgbi_create_session,
.destroy_session = cxgbi_destroy_session,
.get_session_param = iscsi_session_get_param,
/* connection management */
.create_conn = cxgbi_create_conn,
.bind_conn = cxgbi_bind_conn,
.destroy_conn = iscsi_tcp_conn_teardown,
.start_conn = iscsi_conn_start,
.stop_conn = iscsi_conn_stop,
.get_conn_param = iscsi_conn_get_param,
.set_param = cxgbi_set_conn_param,
.get_stats = cxgbi_get_conn_stats,
/* pdu xmit req from user space */
.send_pdu = iscsi_conn_send_pdu,
/* task */
.init_task = iscsi_tcp_task_init,
.xmit_task = iscsi_tcp_task_xmit,
.cleanup_task = cxgbi_cleanup_task,
/* pdu */
.alloc_pdu = cxgbi_conn_alloc_pdu,
.init_pdu = cxgbi_conn_init_pdu,
.xmit_pdu = cxgbi_conn_xmit_pdu,
.parse_pdu_itt = cxgbi_parse_pdu_itt,
/* TCP connect/disconnect */
.get_ep_param = cxgbi_get_ep_param,
.ep_connect = cxgbi_ep_connect,
.ep_poll = cxgbi_ep_poll,
.ep_disconnect = cxgbi_ep_disconnect,
/* Error recovery timeout call */
.session_recovery_timedout = iscsi_session_recovery_timedout,
};
static struct scsi_transport_template *cxgb4i_stt;
/*
* CPL (Chelsio Protocol Language) defines a message passing interface between
* the host driver and Chelsio asic.
* The section below implments CPLs that related to iscsi tcp connection
* open/close/abort and data send/receive.
*/
#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
#define RCV_BUFSIZ_MASK 0x3FFU
#define MAX_IMM_TX_PKT_LEN 128
static inline void set_queue(struct sk_buff *skb, unsigned int queue,
const struct cxgbi_sock *csk)
{
skb->queue_mapping = queue;
}
static int push_tx_frames(struct cxgbi_sock *, int);
/*
* is_ofld_imm - check whether a packet can be sent as immediate data
* @skb: the packet
*
* Returns true if a packet can be sent as an offload WR with immediate
* data. We currently use the same limit as for Ethernet packets.
*/
static inline int is_ofld_imm(const struct sk_buff *skb)
{
return skb->len <= (MAX_IMM_TX_PKT_LEN -
sizeof(struct fw_ofld_tx_data_wr));
}
static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
struct l2t_entry *e)
{
struct cpl_act_open_req *req;
int wscale = cxgbi_sock_compute_wscale(csk->mss_idx);
unsigned long long opt0;
unsigned int opt2;
unsigned int qid_atid = ((unsigned int)csk->atid) |
(((unsigned int)csk->rss_qid) << 14);
opt0 = KEEP_ALIVE(1) |
WND_SCALE(wscale) |
MSS_IDX(csk->mss_idx) |
L2T_IDX(((struct l2t_entry *)csk->l2t)->idx) |
TX_CHAN(csk->tx_chan) |
SMAC_SEL(csk->smac_idx) |
ULP_MODE(ULP_MODE_ISCSI) |
RCV_BUFSIZ(cxgb4i_rcv_win >> 10);
opt2 = RX_CHANNEL(0) |
RSS_QUEUE_VALID |
(1 << 20) | (1 << 22) |
RSS_QUEUE(csk->rss_qid);
set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id);
req = (struct cpl_act_open_req *)skb->head;
INIT_TP_WR(req, 0);
OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
qid_atid));
req->local_port = csk->saddr.sin_port;
req->peer_port = csk->daddr.sin_port;
req->local_ip = csk->saddr.sin_addr.s_addr;
req->peer_ip = csk->daddr.sin_addr.s_addr;
req->opt0 = cpu_to_be64(opt0);
req->params = 0;
req->opt2 = cpu_to_be32(opt2);
log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
"csk 0x%p, %pI4:%u-%pI4:%u, atid %d, qid %u.\n",
csk, &req->local_ip, ntohs(req->local_port),
&req->peer_ip, ntohs(req->peer_port),
csk->atid, csk->rss_qid);
cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t);
}
static void send_close_req(struct cxgbi_sock *csk)
{
struct sk_buff *skb = csk->cpl_close;
struct cpl_close_con_req *req = (struct cpl_close_con_req *)skb->head;
unsigned int tid = csk->tid;
log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
"csk 0x%p,%u,0x%lx, tid %u.\n",
csk, csk->state, csk->flags, csk->tid);
csk->cpl_close = NULL;
set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id);
INIT_TP_WR(req, tid);
OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, tid));
req->rsvd = 0;
cxgbi_sock_skb_entail(csk, skb);
if (csk->state >= CTP_ESTABLISHED)
push_tx_frames(csk, 1);
}
static void abort_arp_failure(void *handle, struct sk_buff *skb)
{
struct cxgbi_sock *csk = (struct cxgbi_sock *)handle;
struct cpl_abort_req *req;
log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
"csk 0x%p,%u,0x%lx, tid %u, abort.\n",
csk, csk->state, csk->flags, csk->tid);
req = (struct cpl_abort_req *)skb->data;
req->cmd = CPL_ABORT_NO_RST;
cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
}
static void send_abort_req(struct cxgbi_sock *csk)
{
struct cpl_abort_req *req;
struct sk_buff *skb = csk->cpl_abort_req;
if (unlikely(csk->state == CTP_ABORTING) || !skb || !csk->cdev)
return;
cxgbi_sock_set_state(csk, CTP_ABORTING);
cxgbi_sock_set_flag(csk, CTPF_ABORT_RPL_PENDING);
cxgbi_sock_purge_write_queue(csk);
csk->cpl_abort_req = NULL;
req = (struct cpl_abort_req *)skb->head;
set_queue(skb, CPL_PRIORITY_DATA, csk);
req->cmd = CPL_ABORT_SEND_RST;
t4_set_arp_err_handler(skb, csk, abort_arp_failure);
INIT_TP_WR(req, csk->tid);
OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, csk->tid));
req->rsvd0 = htonl(csk->snd_nxt);
req->rsvd1 = !cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT);
log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
"csk 0x%p,%u,0x%lx,%u, snd_nxt %u, 0x%x.\n",
csk, csk->state, csk->flags, csk->tid, csk->snd_nxt,
req->rsvd1);
cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t);
}
static void send_abort_rpl(struct cxgbi_sock *csk, int rst_status)
{
struct sk_buff *skb = csk->cpl_abort_rpl;
struct cpl_abort_rpl *rpl = (struct cpl_abort_rpl *)skb->head;
log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
"csk 0x%p,%u,0x%lx,%u, status %d.\n",
csk, csk->state, csk->flags, csk->tid, rst_status);
csk->cpl_abort_rpl = NULL;
set_queue(skb, CPL_PRIORITY_DATA, csk);
INIT_TP_WR(rpl, csk->tid);
OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, csk->tid));
rpl->cmd = rst_status;
cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
}
/*
* CPL connection rx data ack: host ->
* Send RX credits through an RX_DATA_ACK CPL message. Returns the number of
* credits sent.
*/
static u32 send_rx_credits(struct cxgbi_sock *csk, u32 credits)
{
struct sk_buff *skb;
struct cpl_rx_data_ack *req;
log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
"csk 0x%p,%u,0x%lx,%u, credit %u.\n",
csk, csk->state, csk->flags, csk->tid, credits);
skb = alloc_wr(sizeof(*req), 0, GFP_ATOMIC);
if (!skb) {
pr_info("csk 0x%p, credit %u, OOM.\n", csk, credits);
return 0;
}
req = (struct cpl_rx_data_ack *)skb->head;
set_wr_txq(skb, CPL_PRIORITY_ACK, csk->port_id);
INIT_TP_WR(req, csk->tid);
OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK,
csk->tid));
req->credit_dack = cpu_to_be32(RX_CREDITS(credits) | RX_FORCE_ACK(1));
cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
return credits;
}
/*
* sgl_len - calculates the size of an SGL of the given capacity
* @n: the number of SGL entries
* Calculates the number of flits needed for a scatter/gather list that
* can hold the given number of entries.
*/
static inline unsigned int sgl_len(unsigned int n)
{
n--;
return (3 * n) / 2 + (n & 1) + 2;
}
/*
* calc_tx_flits_ofld - calculate # of flits for an offload packet
* @skb: the packet
*
* Returns the number of flits needed for the given offload packet.
* These packets are already fully constructed and no additional headers
* will be added.
*/
static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb)
{
unsigned int flits, cnt;
if (is_ofld_imm(skb))
return DIV_ROUND_UP(skb->len, 8);
flits = skb_transport_offset(skb) / 8;
cnt = skb_shinfo(skb)->nr_frags;
if (skb->tail != skb->transport_header)
cnt++;
return flits + sgl_len(cnt);
}
static inline void send_tx_flowc_wr(struct cxgbi_sock *csk)
{
struct sk_buff *skb;
struct fw_flowc_wr *flowc;
int flowclen, i;
flowclen = 80;
skb = alloc_wr(flowclen, 0, GFP_ATOMIC);
flowc = (struct fw_flowc_wr *)skb->head;
flowc->op_to_nparams =
htonl(FW_WR_OP(FW_FLOWC_WR) | FW_FLOWC_WR_NPARAMS(8));
flowc->flowid_len16 =
htonl(FW_WR_LEN16(DIV_ROUND_UP(72, 16)) |
FW_WR_FLOWID(csk->tid));
flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
flowc->mnemval[0].val = htonl(csk->cdev->pfvf);
flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
flowc->mnemval[1].val = htonl(csk->tx_chan);
flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT;
flowc->mnemval[2].val = htonl(csk->tx_chan);
flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID;
flowc->mnemval[3].val = htonl(csk->rss_qid);
flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT;
flowc->mnemval[4].val = htonl(csk->snd_nxt);
flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT;
flowc->mnemval[5].val = htonl(csk->rcv_nxt);
flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF;
flowc->mnemval[6].val = htonl(cxgb4i_snd_win);
flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS;
flowc->mnemval[7].val = htonl(csk->advmss);
flowc->mnemval[8].mnemonic = 0;
flowc->mnemval[8].val = 0;
for (i = 0; i < 9; i++) {
flowc->mnemval[i].r4[0] = 0;
flowc->mnemval[i].r4[1] = 0;
flowc->mnemval[i].r4[2] = 0;
}
set_queue(skb, CPL_PRIORITY_DATA, csk);
log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
"csk 0x%p, tid 0x%x, %u,%u,%u,%u,%u,%u,%u.\n",
csk, csk->tid, 0, csk->tx_chan, csk->rss_qid,
csk->snd_nxt, csk->rcv_nxt, cxgb4i_snd_win,
csk->advmss);
cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
}
static inline void make_tx_data_wr(struct cxgbi_sock *csk, struct sk_buff *skb,
int dlen, int len, u32 credits, int compl)
{
struct fw_ofld_tx_data_wr *req;
unsigned int submode = cxgbi_skcb_ulp_mode(skb) & 3;
unsigned int wr_ulp_mode = 0;
req = (struct fw_ofld_tx_data_wr *)__skb_push(skb, sizeof(*req));
if (is_ofld_imm(skb)) {
req->op_to_immdlen = htonl(FW_WR_OP(FW_OFLD_TX_DATA_WR) |
FW_WR_COMPL(1) |
FW_WR_IMMDLEN(dlen));
req->flowid_len16 = htonl(FW_WR_FLOWID(csk->tid) |
FW_WR_LEN16(credits));
} else {
req->op_to_immdlen =
cpu_to_be32(FW_WR_OP(FW_OFLD_TX_DATA_WR) |
FW_WR_COMPL(1) |
FW_WR_IMMDLEN(0));
req->flowid_len16 =
cpu_to_be32(FW_WR_FLOWID(csk->tid) |
FW_WR_LEN16(credits));
}
if (submode)
wr_ulp_mode = FW_OFLD_TX_DATA_WR_ULPMODE(ULP2_MODE_ISCSI) |
FW_OFLD_TX_DATA_WR_ULPSUBMODE(submode);
req->tunnel_to_proxy = htonl(wr_ulp_mode |
FW_OFLD_TX_DATA_WR_SHOVE(skb_peek(&csk->write_queue) ? 0 : 1));
req->plen = htonl(len);
if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT))
cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT);
}
static void arp_failure_skb_discard(void *handle, struct sk_buff *skb)
{
kfree_skb(skb);
}
static int push_tx_frames(struct cxgbi_sock *csk, int req_completion)
{
int total_size = 0;
struct sk_buff *skb;
if (unlikely(csk->state < CTP_ESTABLISHED ||
csk->state == CTP_CLOSE_WAIT_1 || csk->state >= CTP_ABORTING)) {
log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK |
1 << CXGBI_DBG_PDU_TX,
"csk 0x%p,%u,0x%lx,%u, in closing state.\n",
csk, csk->state, csk->flags, csk->tid);
return 0;
}
while (csk->wr_cred && (skb = skb_peek(&csk->write_queue)) != NULL) {
int dlen = skb->len;
int len = skb->len;
unsigned int credits_needed;
skb_reset_transport_header(skb);
if (is_ofld_imm(skb))
credits_needed = DIV_ROUND_UP(dlen +
sizeof(struct fw_ofld_tx_data_wr), 16);
else
credits_needed = DIV_ROUND_UP(8*calc_tx_flits_ofld(skb)
+ sizeof(struct fw_ofld_tx_data_wr),
16);
if (csk->wr_cred < credits_needed) {
log_debug(1 << CXGBI_DBG_PDU_TX,
"csk 0x%p, skb %u/%u, wr %d < %u.\n",
csk, skb->len, skb->data_len,
credits_needed, csk->wr_cred);
break;
}
__skb_unlink(skb, &csk->write_queue);
set_queue(skb, CPL_PRIORITY_DATA, csk);
skb->csum = credits_needed;
csk->wr_cred -= credits_needed;
csk->wr_una_cred += credits_needed;
cxgbi_sock_enqueue_wr(csk, skb);
log_debug(1 << CXGBI_DBG_PDU_TX,
"csk 0x%p, skb %u/%u, wr %d, left %u, unack %u.\n",
csk, skb->len, skb->data_len, credits_needed,
csk->wr_cred, csk->wr_una_cred);
if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR))) {
if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) {
send_tx_flowc_wr(csk);
skb->csum += 5;
csk->wr_cred -= 5;
csk->wr_una_cred += 5;
}
len += cxgbi_ulp_extra_len(cxgbi_skcb_ulp_mode(skb));
make_tx_data_wr(csk, skb, dlen, len, credits_needed,
req_completion);
csk->snd_nxt += len;
cxgbi_skcb_clear_flag(skb, SKCBF_TX_NEED_HDR);
}
total_size += skb->truesize;
t4_set_arp_err_handler(skb, csk, arp_failure_skb_discard);
log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_TX,
"csk 0x%p,%u,0x%lx,%u, skb 0x%p, %u.\n",
csk, csk->state, csk->flags, csk->tid, skb, len);
cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t);
}
return total_size;
}
static inline void free_atid(struct cxgbi_sock *csk)
{
struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev);
if (cxgbi_sock_flag(csk, CTPF_HAS_ATID)) {
cxgb4_free_atid(lldi->tids, csk->atid);
cxgbi_sock_clear_flag(csk, CTPF_HAS_ATID);
cxgbi_sock_put(csk);
}
}
static void do_act_establish(struct cxgbi_device *cdev, struct sk_buff *skb)
{
struct cxgbi_sock *csk;
struct cpl_act_establish *req = (struct cpl_act_establish *)skb->data;
unsigned short tcp_opt = ntohs(req->tcp_opt);
unsigned int tid = GET_TID(req);
unsigned int atid = GET_TID_TID(ntohl(req->tos_atid));
struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
struct tid_info *t = lldi->tids;
u32 rcv_isn = be32_to_cpu(req->rcv_isn);
csk = lookup_atid(t, atid);
if (unlikely(!csk)) {
pr_err("NO conn. for atid %u, cdev 0x%p.\n", atid, cdev);
goto rel_skb;
}
if (csk->atid != atid) {
pr_err("bad conn atid %u, csk 0x%p,%u,0x%lx,tid %u, atid %u.\n",
atid, csk, csk->state, csk->flags, csk->tid, csk->atid);
goto rel_skb;
}
log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
"csk 0x%p,%u,0x%lx, tid %u, atid %u, rseq %u.\n",
csk, csk->state, csk->flags, tid, atid, rcv_isn);
cxgbi_sock_get(csk);
csk->tid = tid;
cxgb4_insert_tid(lldi->tids, csk, tid);
cxgbi_sock_set_flag(csk, CTPF_HAS_TID);
free_atid(csk);
spin_lock_bh(&csk->lock);
if (unlikely(csk->state != CTP_ACTIVE_OPEN))
pr_info("csk 0x%p,%u,0x%lx,%u, got EST.\n",
csk, csk->state, csk->flags, csk->tid);
if (csk->retry_timer.function) {
del_timer(&csk->retry_timer);
csk->retry_timer.function = NULL;
}
csk->copied_seq = csk->rcv_wup = csk->rcv_nxt = rcv_isn;
/*
* Causes the first RX_DATA_ACK to supply any Rx credits we couldn't
* pass through opt0.
*/
if (cxgb4i_rcv_win > (RCV_BUFSIZ_MASK << 10))
csk->rcv_wup -= cxgb4i_rcv_win - (RCV_BUFSIZ_MASK << 10);
csk->advmss = lldi->mtus[GET_TCPOPT_MSS(tcp_opt)] - 40;
if (GET_TCPOPT_TSTAMP(tcp_opt))
csk->advmss -= 12;
if (csk->advmss < 128)
csk->advmss = 128;
log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
"csk 0x%p, mss_idx %u, advmss %u.\n",
csk, GET_TCPOPT_MSS(tcp_opt), csk->advmss);
cxgbi_sock_established(csk, ntohl(req->snd_isn), ntohs(req->tcp_opt));
if (unlikely(cxgbi_sock_flag(csk, CTPF_ACTIVE_CLOSE_NEEDED)))
send_abort_req(csk);
else {
if (skb_queue_len(&csk->write_queue))
push_tx_frames(csk, 0);
cxgbi_conn_tx_open(csk);
}
spin_unlock_bh(&csk->lock);
rel_skb:
__kfree_skb(skb);
}
static int act_open_rpl_status_to_errno(int status)
{
switch (status) {
case CPL_ERR_CONN_RESET:
return -ECONNREFUSED;
case CPL_ERR_ARP_MISS:
return -EHOSTUNREACH;
case CPL_ERR_CONN_TIMEDOUT:
return -ETIMEDOUT;
case CPL_ERR_TCAM_FULL:
return -ENOMEM;
case CPL_ERR_CONN_EXIST:
return -EADDRINUSE;
default:
return -EIO;
}
}
static void csk_act_open_retry_timer(unsigned long data)
{
struct sk_buff *skb;
struct cxgbi_sock *csk = (struct cxgbi_sock *)data;
log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
"csk 0x%p,%u,0x%lx,%u.\n",
csk, csk->state, csk->flags, csk->tid);
cxgbi_sock_get(csk);
spin_lock_bh(&csk->lock);
skb = alloc_wr(sizeof(struct cpl_act_open_req), 0, GFP_ATOMIC);
if (!skb)
cxgbi_sock_fail_act_open(csk, -ENOMEM);
else {
skb->sk = (struct sock *)csk;
t4_set_arp_err_handler(skb, csk,
cxgbi_sock_act_open_req_arp_failure);
send_act_open_req(csk, skb, csk->l2t);
}
spin_unlock_bh(&csk->lock);
cxgbi_sock_put(csk);
}
static void do_act_open_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
{
struct cxgbi_sock *csk;
struct cpl_act_open_rpl *rpl = (struct cpl_act_open_rpl *)skb->data;
unsigned int tid = GET_TID(rpl);
unsigned int atid =
GET_TID_TID(GET_AOPEN_ATID(be32_to_cpu(rpl->atid_status)));
unsigned int status = GET_AOPEN_STATUS(be32_to_cpu(rpl->atid_status));
struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
struct tid_info *t = lldi->tids;
csk = lookup_atid(t, atid);
if (unlikely(!csk)) {
pr_err("NO matching conn. atid %u, tid %u.\n", atid, tid);
goto rel_skb;
}
pr_info("%pI4:%u-%pI4:%u, atid %u,%u, status %u, csk 0x%p,%u,0x%lx.\n",
&csk->saddr.sin_addr.s_addr, ntohs(csk->saddr.sin_port),
&csk->daddr.sin_addr.s_addr, ntohs(csk->daddr.sin_port),
atid, tid, status, csk, csk->state, csk->flags);
if (status == CPL_ERR_RTX_NEG_ADVICE)
goto rel_skb;
if (status && status != CPL_ERR_TCAM_FULL &&
status != CPL_ERR_CONN_EXIST &&
status != CPL_ERR_ARP_MISS)
cxgb4_remove_tid(lldi->tids, csk->port_id, GET_TID(rpl));
cxgbi_sock_get(csk);
spin_lock_bh(&csk->lock);
if (status == CPL_ERR_CONN_EXIST &&
csk->retry_timer.function != csk_act_open_retry_timer) {
csk->retry_timer.function = csk_act_open_retry_timer;
mod_timer(&csk->retry_timer, jiffies + HZ / 2);
} else
cxgbi_sock_fail_act_open(csk,
act_open_rpl_status_to_errno(status));
spin_unlock_bh(&csk->lock);
cxgbi_sock_put(csk);
rel_skb:
__kfree_skb(skb);
}
static void do_peer_close(struct cxgbi_device *cdev, struct sk_buff *skb)
{
struct cxgbi_sock *csk;
struct cpl_peer_close *req = (struct cpl_peer_close *)skb->data;
unsigned int tid = GET_TID(req);
struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
struct tid_info *t = lldi->tids;
csk = lookup_tid(t, tid);
if (unlikely(!csk)) {
pr_err("can't find connection for tid %u.\n", tid);
goto rel_skb;
}
log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
"csk 0x%p,%u,0x%lx,%u.\n",
csk, csk->state, csk->flags, csk->tid);
cxgbi_sock_rcv_peer_close(csk);
rel_skb:
__kfree_skb(skb);
}
static void do_close_con_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
{
struct cxgbi_sock *csk;
struct cpl_close_con_rpl *rpl = (struct cpl_close_con_rpl *)skb->data;
unsigned int tid = GET_TID(rpl);
struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
struct tid_info *t = lldi->tids;
csk = lookup_tid(t, tid);
if (unlikely(!csk)) {
pr_err("can't find connection for tid %u.\n", tid);
goto rel_skb;
}
log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
"csk 0x%p,%u,0x%lx,%u.\n",
csk, csk->state, csk->flags, csk->tid);
cxgbi_sock_rcv_close_conn_rpl(csk, ntohl(rpl->snd_nxt));
rel_skb:
__kfree_skb(skb);
}
static int abort_status_to_errno(struct cxgbi_sock *csk, int abort_reason,
int *need_rst)
{
switch (abort_reason) {
case CPL_ERR_BAD_SYN: /* fall through */
case CPL_ERR_CONN_RESET:
return csk->state > CTP_ESTABLISHED ?
-EPIPE : -ECONNRESET;
case CPL_ERR_XMIT_TIMEDOUT:
case CPL_ERR_PERSIST_TIMEDOUT:
case CPL_ERR_FINWAIT2_TIMEDOUT:
case CPL_ERR_KEEPALIVE_TIMEDOUT:
return -ETIMEDOUT;
default:
return -EIO;
}
}
static void do_abort_req_rss(struct cxgbi_device *cdev, struct sk_buff *skb)
{
struct cxgbi_sock *csk;
struct cpl_abort_req_rss *req = (struct cpl_abort_req_rss *)skb->data;
unsigned int tid = GET_TID(req);
struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
struct tid_info *t = lldi->tids;
int rst_status = CPL_ABORT_NO_RST;
csk = lookup_tid(t, tid);
if (unlikely(!csk)) {
pr_err("can't find connection for tid %u.\n", tid);
goto rel_skb;
}
log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
"csk 0x%p,%u,0x%lx, tid %u, status 0x%x.\n",
csk, csk->state, csk->flags, csk->tid, req->status);
if (req->status == CPL_ERR_RTX_NEG_ADVICE ||
req->status == CPL_ERR_PERSIST_NEG_ADVICE)
goto rel_skb;
cxgbi_sock_get(csk);
spin_lock_bh(&csk->lock);
if (!cxgbi_sock_flag(csk, CTPF_ABORT_REQ_RCVD)) {
cxgbi_sock_set_flag(csk, CTPF_ABORT_REQ_RCVD);
cxgbi_sock_set_state(csk, CTP_ABORTING);
goto done;
}
cxgbi_sock_clear_flag(csk, CTPF_ABORT_REQ_RCVD);
send_abort_rpl(csk, rst_status);
if (!cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) {
csk->err = abort_status_to_errno(csk, req->status, &rst_status);
cxgbi_sock_closed(csk);
}
done:
spin_unlock_bh(&csk->lock);
cxgbi_sock_put(csk);
rel_skb:
__kfree_skb(skb);
}
static void do_abort_rpl_rss(struct cxgbi_device *cdev, struct sk_buff *skb)
{
struct cxgbi_sock *csk;
struct cpl_abort_rpl_rss *rpl = (struct cpl_abort_rpl_rss *)skb->data;
unsigned int tid = GET_TID(rpl);
struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
struct tid_info *t = lldi->tids;
csk = lookup_tid(t, tid);
if (!csk)
goto rel_skb;
log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
"status 0x%x, csk 0x%p, s %u, 0x%lx.\n",
rpl->status, csk, csk ? csk->state : 0,
csk ? csk->flags : 0UL);
if (rpl->status == CPL_ERR_ABORT_FAILED)
goto rel_skb;
cxgbi_sock_rcv_abort_rpl(csk);
rel_skb:
__kfree_skb(skb);
}
static void do_rx_iscsi_hdr(struct cxgbi_device *cdev, struct sk_buff *skb)
{
struct cxgbi_sock *csk;
struct cpl_iscsi_hdr *cpl = (struct cpl_iscsi_hdr *)skb->data;
unsigned short pdu_len_ddp = be16_to_cpu(cpl->pdu_len_ddp);
unsigned int tid = GET_TID(cpl);
struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
struct tid_info *t = lldi->tids;
csk = lookup_tid(t, tid);
if (unlikely(!csk)) {
pr_err("can't find conn. for tid %u.\n", tid);
goto rel_skb;
}
log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
"csk 0x%p,%u,0x%lx, tid %u, skb 0x%p,%u, 0x%x.\n",
csk, csk->state, csk->flags, csk->tid, skb, skb->len,
pdu_len_ddp);
spin_lock_bh(&csk->lock);
if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) {
log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
"csk 0x%p,%u,0x%lx,%u, bad state.\n",
csk, csk->state, csk->flags, csk->tid);
if (csk->state != CTP_ABORTING)
goto abort_conn;
else
goto discard;
}
cxgbi_skcb_tcp_seq(skb) = ntohl(cpl->seq);
cxgbi_skcb_flags(skb) = 0;
skb_reset_transport_header(skb);
__skb_pull(skb, sizeof(*cpl));
__pskb_trim(skb, ntohs(cpl->len));
if (!csk->skb_ulp_lhdr) {
unsigned char *bhs;
unsigned int hlen, dlen;
log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
"csk 0x%p,%u,0x%lx, tid %u, skb 0x%p header.\n",
csk, csk->state, csk->flags, csk->tid, skb);
csk->skb_ulp_lhdr = skb;
cxgbi_skcb_set_flag(skb, SKCBF_RX_HDR);
if (cxgbi_skcb_tcp_seq(skb) != csk->rcv_nxt) {
pr_info("tid %u, CPL_ISCSI_HDR, bad seq, 0x%x/0x%x.\n",
csk->tid, cxgbi_skcb_tcp_seq(skb),
csk->rcv_nxt);
goto abort_conn;
}
bhs = skb->data;
hlen = ntohs(cpl->len);
dlen = ntohl(*(unsigned int *)(bhs + 4)) & 0xFFFFFF;
if ((hlen + dlen) != ISCSI_PDU_LEN(pdu_len_ddp) - 40) {
pr_info("tid 0x%x, CPL_ISCSI_HDR, pdu len "
"mismatch %u != %u + %u, seq 0x%x.\n",
csk->tid, ISCSI_PDU_LEN(pdu_len_ddp) - 40,
hlen, dlen, cxgbi_skcb_tcp_seq(skb));
goto abort_conn;
}
cxgbi_skcb_rx_pdulen(skb) = (hlen + dlen + 3) & (~0x3);
if (dlen)
cxgbi_skcb_rx_pdulen(skb) += csk->dcrc_len;
csk->rcv_nxt += cxgbi_skcb_rx_pdulen(skb);
log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
"csk 0x%p, skb 0x%p, 0x%x,%u+%u,0x%x,0x%x.\n",
csk, skb, *bhs, hlen, dlen,
ntohl(*((unsigned int *)(bhs + 16))),
ntohl(*((unsigned int *)(bhs + 24))));
} else {
struct sk_buff *lskb = csk->skb_ulp_lhdr;
cxgbi_skcb_set_flag(lskb, SKCBF_RX_DATA);
log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
"csk 0x%p,%u,0x%lx, skb 0x%p data, 0x%p.\n",
csk, csk->state, csk->flags, skb, lskb);
}
__skb_queue_tail(&csk->receive_queue, skb);
spin_unlock_bh(&csk->lock);
return;
abort_conn:
send_abort_req(csk);
discard:
spin_unlock_bh(&csk->lock);
rel_skb:
__kfree_skb(skb);
}
static void do_rx_data_ddp(struct cxgbi_device *cdev,
struct sk_buff *skb)
{
struct cxgbi_sock *csk;
struct sk_buff *lskb;
struct cpl_rx_data_ddp *rpl = (struct cpl_rx_data_ddp *)skb->data;
unsigned int tid = GET_TID(rpl);
struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
struct tid_info *t = lldi->tids;
unsigned int status = ntohl(rpl->ddpvld);
csk = lookup_tid(t, tid);
if (unlikely(!csk)) {
pr_err("can't find connection for tid %u.\n", tid);
goto rel_skb;
}
log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
"csk 0x%p,%u,0x%lx, skb 0x%p,0x%x, lhdr 0x%p.\n",
csk, csk->state, csk->flags, skb, status, csk->skb_ulp_lhdr);
spin_lock_bh(&csk->lock);
if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) {
log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
"csk 0x%p,%u,0x%lx,%u, bad state.\n",
csk, csk->state, csk->flags, csk->tid);
if (csk->state != CTP_ABORTING)
goto abort_conn;
else
goto discard;
}
if (!csk->skb_ulp_lhdr) {
pr_err("tid 0x%x, rcv RX_DATA_DDP w/o pdu bhs.\n", csk->tid);
goto abort_conn;
}
lskb = csk->skb_ulp_lhdr;
csk->skb_ulp_lhdr = NULL;
cxgbi_skcb_rx_ddigest(lskb) = ntohl(rpl->ulp_crc);
if (ntohs(rpl->len) != cxgbi_skcb_rx_pdulen(lskb))
pr_info("tid 0x%x, RX_DATA_DDP pdulen %u != %u.\n",
csk->tid, ntohs(rpl->len), cxgbi_skcb_rx_pdulen(lskb));
if (status & (1 << CPL_RX_DDP_STATUS_HCRC_SHIFT)) {
pr_info("csk 0x%p, lhdr 0x%p, status 0x%x, hcrc bad 0x%lx.\n",
csk, lskb, status, cxgbi_skcb_flags(lskb));
cxgbi_skcb_set_flag(lskb, SKCBF_RX_HCRC_ERR);
}
if (status & (1 << CPL_RX_DDP_STATUS_DCRC_SHIFT)) {
pr_info("csk 0x%p, lhdr 0x%p, status 0x%x, dcrc bad 0x%lx.\n",
csk, lskb, status, cxgbi_skcb_flags(lskb));
cxgbi_skcb_set_flag(lskb, SKCBF_RX_DCRC_ERR);
}
if (status & (1 << CPL_RX_DDP_STATUS_PAD_SHIFT)) {
log_debug(1 << CXGBI_DBG_PDU_RX,
"csk 0x%p, lhdr 0x%p, status 0x%x, pad bad.\n",
csk, lskb, status);
cxgbi_skcb_set_flag(lskb, SKCBF_RX_PAD_ERR);
}
if ((status & (1 << CPL_RX_DDP_STATUS_DDP_SHIFT)) &&
!cxgbi_skcb_test_flag(lskb, SKCBF_RX_DATA)) {
log_debug(1 << CXGBI_DBG_PDU_RX,
"csk 0x%p, lhdr 0x%p, 0x%x, data ddp'ed.\n",
csk, lskb, status);
cxgbi_skcb_set_flag(lskb, SKCBF_RX_DATA_DDPD);
}
log_debug(1 << CXGBI_DBG_PDU_RX,
"csk 0x%p, lskb 0x%p, f 0x%lx.\n",
csk, lskb, cxgbi_skcb_flags(lskb));
cxgbi_skcb_set_flag(lskb, SKCBF_RX_STATUS);
cxgbi_conn_pdu_ready(csk);
spin_unlock_bh(&csk->lock);
goto rel_skb;
abort_conn:
send_abort_req(csk);
discard:
spin_unlock_bh(&csk->lock);
rel_skb:
__kfree_skb(skb);
}
static void do_fw4_ack(struct cxgbi_device *cdev, struct sk_buff *skb)
{
struct cxgbi_sock *csk;
struct cpl_fw4_ack *rpl = (struct cpl_fw4_ack *)skb->data;
unsigned int tid = GET_TID(rpl);
struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
struct tid_info *t = lldi->tids;
csk = lookup_tid(t, tid);
if (unlikely(!csk))
pr_err("can't find connection for tid %u.\n", tid);
else {
log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
"csk 0x%p,%u,0x%lx,%u.\n",
csk, csk->state, csk->flags, csk->tid);
cxgbi_sock_rcv_wr_ack(csk, rpl->credits, ntohl(rpl->snd_una),
rpl->seq_vld);
}
__kfree_skb(skb);
}
static void do_set_tcb_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
{
struct cpl_set_tcb_rpl *rpl = (struct cpl_set_tcb_rpl *)skb->data;
unsigned int tid = GET_TID(rpl);
struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
struct tid_info *t = lldi->tids;
struct cxgbi_sock *csk;
csk = lookup_tid(t, tid);
if (!csk)
pr_err("can't find conn. for tid %u.\n", tid);
log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
"csk 0x%p,%u,%lx,%u, status 0x%x.\n",
csk, csk->state, csk->flags, csk->tid, rpl->status);
if (rpl->status != CPL_ERR_NONE)
pr_err("csk 0x%p,%u, SET_TCB_RPL status %u.\n",
csk, tid, rpl->status);
__kfree_skb(skb);
}
static int alloc_cpls(struct cxgbi_sock *csk)
{
csk->cpl_close = alloc_wr(sizeof(struct cpl_close_con_req),
0, GFP_KERNEL);
if (!csk->cpl_close)
return -ENOMEM;
csk->cpl_abort_req = alloc_wr(sizeof(struct cpl_abort_req),
0, GFP_KERNEL);
if (!csk->cpl_abort_req)
goto free_cpls;
csk->cpl_abort_rpl = alloc_wr(sizeof(struct cpl_abort_rpl),
0, GFP_KERNEL);
if (!csk->cpl_abort_rpl)
goto free_cpls;
return 0;
free_cpls:
cxgbi_sock_free_cpl_skbs(csk);
return -ENOMEM;
}
static inline void l2t_put(struct cxgbi_sock *csk)
{
if (csk->l2t) {
cxgb4_l2t_release(csk->l2t);
csk->l2t = NULL;
cxgbi_sock_put(csk);
}
}
static void release_offload_resources(struct cxgbi_sock *csk)
{
struct cxgb4_lld_info *lldi;
log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
"csk 0x%p,%u,0x%lx,%u.\n",
csk, csk->state, csk->flags, csk->tid);
cxgbi_sock_free_cpl_skbs(csk);
if (csk->wr_cred != csk->wr_max_cred) {
cxgbi_sock_purge_wr_queue(csk);
cxgbi_sock_reset_wr_list(csk);
}
l2t_put(csk);
if (cxgbi_sock_flag(csk, CTPF_HAS_ATID))
free_atid(csk);
else if (cxgbi_sock_flag(csk, CTPF_HAS_TID)) {
lldi = cxgbi_cdev_priv(csk->cdev);
cxgb4_remove_tid(lldi->tids, 0, csk->tid);
cxgbi_sock_clear_flag(csk, CTPF_HAS_TID);
cxgbi_sock_put(csk);
}
csk->dst = NULL;
csk->cdev = NULL;
}
static int init_act_open(struct cxgbi_sock *csk)
{
struct cxgbi_device *cdev = csk->cdev;
struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
struct net_device *ndev = cdev->ports[csk->port_id];
struct port_info *pi = netdev_priv(ndev);
struct sk_buff *skb = NULL;
struct neighbour *n;
unsigned int step;
log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
"csk 0x%p,%u,0x%lx,%u.\n",
csk, csk->state, csk->flags, csk->tid);
csk->atid = cxgb4_alloc_atid(lldi->tids, csk);
if (csk->atid < 0) {
pr_err("%s, NO atid available.\n", ndev->name);
return -EINVAL;
}
cxgbi_sock_set_flag(csk, CTPF_HAS_ATID);
cxgbi_sock_get(csk);
n = dst_neigh_lookup(csk->dst, &csk->daddr.sin_addr.s_addr);
if (!n) {
pr_err("%s, can't get neighbour of csk->dst.\n", ndev->name);
goto rel_resource;
}
csk->l2t = cxgb4_l2t_get(lldi->l2t, n, ndev, 0);
if (!csk->l2t) {
pr_err("%s, cannot alloc l2t.\n", ndev->name);
goto rel_resource;
}
cxgbi_sock_get(csk);
skb = alloc_wr(sizeof(struct cpl_act_open_req), 0, GFP_KERNEL);
if (!skb)
goto rel_resource;
skb->sk = (struct sock *)csk;
t4_set_arp_err_handler(skb, csk, cxgbi_sock_act_open_req_arp_failure);
if (!csk->mtu)
csk->mtu = dst_mtu(csk->dst);
cxgb4_best_mtu(lldi->mtus, csk->mtu, &csk->mss_idx);
csk->tx_chan = cxgb4_port_chan(ndev);
/* SMT two entries per row */
csk->smac_idx = ((cxgb4_port_viid(ndev) & 0x7F)) << 1;
step = lldi->ntxq / lldi->nchan;
csk->txq_idx = cxgb4_port_idx(ndev) * step;
step = lldi->nrxq / lldi->nchan;
csk->rss_qid = lldi->rxq_ids[cxgb4_port_idx(ndev) * step];
csk->wr_max_cred = csk->wr_cred = lldi->wr_cred;
csk->wr_una_cred = 0;
cxgbi_sock_reset_wr_list(csk);
csk->err = 0;
log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
"csk 0x%p,p%d,%s, %u,%u,%u, mss %u,%u, smac %u.\n",
csk, pi->port_id, ndev->name, csk->tx_chan,
csk->txq_idx, csk->rss_qid, csk->mtu, csk->mss_idx,
csk->smac_idx);
cxgbi_sock_set_state(csk, CTP_ACTIVE_OPEN);
send_act_open_req(csk, skb, csk->l2t);
neigh_release(n);
return 0;
rel_resource:
if (n)
neigh_release(n);
if (skb)
__kfree_skb(skb);
return -EINVAL;
}
cxgb4i_cplhandler_func cxgb4i_cplhandlers[NUM_CPL_CMDS] = {
[CPL_ACT_ESTABLISH] = do_act_establish,
[CPL_ACT_OPEN_RPL] = do_act_open_rpl,
[CPL_PEER_CLOSE] = do_peer_close,
[CPL_ABORT_REQ_RSS] = do_abort_req_rss,
[CPL_ABORT_RPL_RSS] = do_abort_rpl_rss,
[CPL_CLOSE_CON_RPL] = do_close_con_rpl,
[CPL_FW4_ACK] = do_fw4_ack,
[CPL_ISCSI_HDR] = do_rx_iscsi_hdr,
[CPL_SET_TCB_RPL] = do_set_tcb_rpl,
[CPL_RX_DATA_DDP] = do_rx_data_ddp,
};
int cxgb4i_ofld_init(struct cxgbi_device *cdev)
{
int rc;
if (cxgb4i_max_connect > CXGB4I_MAX_CONN)
cxgb4i_max_connect = CXGB4I_MAX_CONN;
rc = cxgbi_device_portmap_create(cdev, cxgb4i_sport_base,
cxgb4i_max_connect);
if (rc < 0)
return rc;
cdev->csk_release_offload_resources = release_offload_resources;
cdev->csk_push_tx_frames = push_tx_frames;
cdev->csk_send_abort_req = send_abort_req;
cdev->csk_send_close_req = send_close_req;
cdev->csk_send_rx_credits = send_rx_credits;
cdev->csk_alloc_cpls = alloc_cpls;
cdev->csk_init_act_open = init_act_open;
pr_info("cdev 0x%p, offload up, added.\n", cdev);
return 0;
}
/*
* functions to program the pagepod in h/w
*/
#define ULPMEM_IDATA_MAX_NPPODS 4 /* 256/PPOD_SIZE */
static inline void ulp_mem_io_set_hdr(struct ulp_mem_io *req,
unsigned int wr_len, unsigned int dlen,
unsigned int pm_addr)
{
struct ulptx_idata *idata = (struct ulptx_idata *)(req + 1);
INIT_ULPTX_WR(req, wr_len, 0, 0);
req->cmd = htonl(ULPTX_CMD(ULP_TX_MEM_WRITE) | (1 << 23));
req->dlen = htonl(ULP_MEMIO_DATA_LEN(dlen >> 5));
req->lock_addr = htonl(ULP_MEMIO_ADDR(pm_addr >> 5));
req->len16 = htonl(DIV_ROUND_UP(wr_len - sizeof(req->wr), 16));
idata->cmd_more = htonl(ULPTX_CMD(ULP_TX_SC_IMM));
idata->len = htonl(dlen);
}
static int ddp_ppod_write_idata(struct cxgbi_device *cdev, unsigned int port_id,
struct cxgbi_pagepod_hdr *hdr, unsigned int idx,
unsigned int npods,
struct cxgbi_gather_list *gl,
unsigned int gl_pidx)
{
struct cxgbi_ddp_info *ddp = cdev->ddp;
struct sk_buff *skb;
struct ulp_mem_io *req;
struct ulptx_idata *idata;
struct cxgbi_pagepod *ppod;
unsigned int pm_addr = idx * PPOD_SIZE + ddp->llimit;
unsigned int dlen = PPOD_SIZE * npods;
unsigned int wr_len = roundup(sizeof(struct ulp_mem_io) +
sizeof(struct ulptx_idata) + dlen, 16);
unsigned int i;
skb = alloc_wr(wr_len, 0, GFP_ATOMIC);
if (!skb) {
pr_err("cdev 0x%p, idx %u, npods %u, OOM.\n",
cdev, idx, npods);
return -ENOMEM;
}
req = (struct ulp_mem_io *)skb->head;
set_queue(skb, CPL_PRIORITY_CONTROL, NULL);
ulp_mem_io_set_hdr(req, wr_len, dlen, pm_addr);
idata = (struct ulptx_idata *)(req + 1);
ppod = (struct cxgbi_pagepod *)(idata + 1);
for (i = 0; i < npods; i++, ppod++, gl_pidx += PPOD_PAGES_MAX) {
if (!hdr && !gl)
cxgbi_ddp_ppod_clear(ppod);
else
cxgbi_ddp_ppod_set(ppod, hdr, gl, gl_pidx);
}
cxgb4_ofld_send(cdev->ports[port_id], skb);
return 0;
}
static int ddp_set_map(struct cxgbi_sock *csk, struct cxgbi_pagepod_hdr *hdr,
unsigned int idx, unsigned int npods,
struct cxgbi_gather_list *gl)
{
unsigned int i, cnt;
int err = 0;
for (i = 0; i < npods; i += cnt, idx += cnt) {
cnt = npods - i;
if (cnt > ULPMEM_IDATA_MAX_NPPODS)
cnt = ULPMEM_IDATA_MAX_NPPODS;
err = ddp_ppod_write_idata(csk->cdev, csk->port_id, hdr,
idx, cnt, gl, 4 * i);
if (err < 0)
break;
}
return err;
}
static void ddp_clear_map(struct cxgbi_hba *chba, unsigned int tag,
unsigned int idx, unsigned int npods)
{
unsigned int i, cnt;
int err;
for (i = 0; i < npods; i += cnt, idx += cnt) {
cnt = npods - i;
if (cnt > ULPMEM_IDATA_MAX_NPPODS)
cnt = ULPMEM_IDATA_MAX_NPPODS;
err = ddp_ppod_write_idata(chba->cdev, chba->port_id, NULL,
idx, cnt, NULL, 0);
if (err < 0)
break;
}
}
static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid,
int pg_idx, bool reply)
{
struct sk_buff *skb;
struct cpl_set_tcb_field *req;
if (!pg_idx || pg_idx >= DDP_PGIDX_MAX)
return 0;
skb = alloc_wr(sizeof(*req), 0, GFP_KERNEL);
if (!skb)
return -ENOMEM;
/* set up ulp page size */
req = (struct cpl_set_tcb_field *)skb->head;
INIT_TP_WR(req, csk->tid);
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid));
req->reply_ctrl = htons(NO_REPLY(reply) | QUEUENO(csk->rss_qid));
req->word_cookie = htons(0);
req->mask = cpu_to_be64(0x3 << 8);
req->val = cpu_to_be64(pg_idx << 8);
set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->port_id);
log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
"csk 0x%p, tid 0x%x, pg_idx %u.\n", csk, csk->tid, pg_idx);
cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
return 0;
}
static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
int hcrc, int dcrc, int reply)
{
struct sk_buff *skb;
struct cpl_set_tcb_field *req;
if (!hcrc && !dcrc)
return 0;
skb = alloc_wr(sizeof(*req), 0, GFP_KERNEL);
if (!skb)
return -ENOMEM;
csk->hcrc_len = (hcrc ? 4 : 0);
csk->dcrc_len = (dcrc ? 4 : 0);
/* set up ulp submode */
req = (struct cpl_set_tcb_field *)skb->head;
INIT_TP_WR(req, tid);
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
req->reply_ctrl = htons(NO_REPLY(reply) | QUEUENO(csk->rss_qid));
req->word_cookie = htons(0);
req->mask = cpu_to_be64(0x3 << 4);
req->val = cpu_to_be64(((hcrc ? ULP_CRC_HEADER : 0) |
(dcrc ? ULP_CRC_DATA : 0)) << 4);
set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->port_id);
log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
"csk 0x%p, tid 0x%x, crc %d,%d.\n", csk, csk->tid, hcrc, dcrc);
cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
return 0;
}
static int cxgb4i_ddp_init(struct cxgbi_device *cdev)
{
struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
struct cxgbi_ddp_info *ddp = cdev->ddp;
unsigned int tagmask, pgsz_factor[4];
int err;
if (ddp) {
kref_get(&ddp->refcnt);
pr_warn("cdev 0x%p, ddp 0x%p already set up.\n",
cdev, cdev->ddp);
return -EALREADY;
}
err = cxgbi_ddp_init(cdev, lldi->vr->iscsi.start,
lldi->vr->iscsi.start + lldi->vr->iscsi.size - 1,
lldi->iscsi_iolen, lldi->iscsi_iolen);
if (err < 0)
return err;
ddp = cdev->ddp;
tagmask = ddp->idx_mask << PPOD_IDX_SHIFT;
cxgbi_ddp_page_size_factor(pgsz_factor);
cxgb4_iscsi_init(lldi->ports[0], tagmask, pgsz_factor);
cdev->csk_ddp_setup_digest = ddp_setup_conn_digest;
cdev->csk_ddp_setup_pgidx = ddp_setup_conn_pgidx;
cdev->csk_ddp_set = ddp_set_map;
cdev->csk_ddp_clear = ddp_clear_map;
pr_info("cxgb4i 0x%p tag: sw %u, rsvd %u,%u, mask 0x%x.\n",
cdev, cdev->tag_format.sw_bits, cdev->tag_format.rsvd_bits,
cdev->tag_format.rsvd_shift, cdev->tag_format.rsvd_mask);
pr_info("cxgb4i 0x%p, nppods %u, bits %u, mask 0x%x,0x%x pkt %u/%u, "
" %u/%u.\n",
cdev, ddp->nppods, ddp->idx_bits, ddp->idx_mask,
ddp->rsvd_tag_mask, ddp->max_txsz, lldi->iscsi_iolen,
ddp->max_rxsz, lldi->iscsi_iolen);
pr_info("cxgb4i 0x%p max payload size: %u/%u, %u/%u.\n",
cdev, cdev->tx_max_size, ddp->max_txsz, cdev->rx_max_size,
ddp->max_rxsz);
return 0;
}
static void *t4_uld_add(const struct cxgb4_lld_info *lldi)
{
struct cxgbi_device *cdev;
struct port_info *pi;
int i, rc;
cdev = cxgbi_device_register(sizeof(*lldi), lldi->nports);
if (!cdev) {
pr_info("t4 device 0x%p, register failed.\n", lldi);
return NULL;
}
pr_info("0x%p,0x%x, ports %u,%s, chan %u, q %u,%u, wr %u.\n",
cdev, lldi->adapter_type, lldi->nports,
lldi->ports[0]->name, lldi->nchan, lldi->ntxq,
lldi->nrxq, lldi->wr_cred);
for (i = 0; i < lldi->nrxq; i++)
log_debug(1 << CXGBI_DBG_DEV,
"t4 0x%p, rxq id #%d: %u.\n",
cdev, i, lldi->rxq_ids[i]);
memcpy(cxgbi_cdev_priv(cdev), lldi, sizeof(*lldi));
cdev->flags = CXGBI_FLAG_DEV_T4;
cdev->pdev = lldi->pdev;
cdev->ports = lldi->ports;
cdev->nports = lldi->nports;
cdev->mtus = lldi->mtus;
cdev->nmtus = NMTUS;
cdev->snd_win = cxgb4i_snd_win;
cdev->rcv_win = cxgb4i_rcv_win;
cdev->rx_credit_thres = cxgb4i_rx_credit_thres;
cdev->skb_tx_rsvd = CXGB4I_TX_HEADER_LEN;
cdev->skb_rx_extra = sizeof(struct cpl_iscsi_hdr);
cdev->itp = &cxgb4i_iscsi_transport;
cdev->pfvf = FW_VIID_PFN_GET(cxgb4_port_viid(lldi->ports[0])) << 8;
pr_info("cdev 0x%p,%s, pfvf %u.\n",
cdev, lldi->ports[0]->name, cdev->pfvf);
rc = cxgb4i_ddp_init(cdev);
if (rc) {
pr_info("t4 0x%p ddp init failed.\n", cdev);
goto err_out;
}
rc = cxgb4i_ofld_init(cdev);
if (rc) {
pr_info("t4 0x%p ofld init failed.\n", cdev);
goto err_out;
}
rc = cxgbi_hbas_add(cdev, CXGB4I_MAX_LUN, CXGBI_MAX_CONN,
&cxgb4i_host_template, cxgb4i_stt);
if (rc)
goto err_out;
for (i = 0; i < cdev->nports; i++) {
pi = netdev_priv(lldi->ports[i]);
cdev->hbas[i]->port_id = pi->port_id;
}
return cdev;
err_out:
cxgbi_device_unregister(cdev);
return ERR_PTR(-ENOMEM);
}
#define RX_PULL_LEN 128
static int t4_uld_rx_handler(void *handle, const __be64 *rsp,
const struct pkt_gl *pgl)
{
const struct cpl_act_establish *rpl;
struct sk_buff *skb;
unsigned int opc;
struct cxgbi_device *cdev = handle;
if (pgl == NULL) {
unsigned int len = 64 - sizeof(struct rsp_ctrl) - 8;
skb = alloc_wr(len, 0, GFP_ATOMIC);
if (!skb)
goto nomem;
skb_copy_to_linear_data(skb, &rsp[1], len);
} else {
if (unlikely(*(u8 *)rsp != *(u8 *)pgl->va)) {
pr_info("? FL 0x%p,RSS%#llx,FL %#llx,len %u.\n",
pgl->va, be64_to_cpu(*rsp),
be64_to_cpu(*(u64 *)pgl->va),
pgl->tot_len);
return 0;
}
skb = cxgb4_pktgl_to_skb(pgl, RX_PULL_LEN, RX_PULL_LEN);
if (unlikely(!skb))
goto nomem;
}
rpl = (struct cpl_act_establish *)skb->data;
opc = rpl->ot.opcode;
log_debug(1 << CXGBI_DBG_TOE,
"cdev %p, opcode 0x%x(0x%x,0x%x), skb %p.\n",
cdev, opc, rpl->ot.opcode_tid, ntohl(rpl->ot.opcode_tid), skb);
if (cxgb4i_cplhandlers[opc])
cxgb4i_cplhandlers[opc](cdev, skb);
else {
pr_err("No handler for opcode 0x%x.\n", opc);
__kfree_skb(skb);
}
return 0;
nomem:
log_debug(1 << CXGBI_DBG_TOE, "OOM bailing out.\n");
return 1;
}
static int t4_uld_state_change(void *handle, enum cxgb4_state state)
{
struct cxgbi_device *cdev = handle;
switch (state) {
case CXGB4_STATE_UP:
pr_info("cdev 0x%p, UP.\n", cdev);
/* re-initialize */
break;
case CXGB4_STATE_START_RECOVERY:
pr_info("cdev 0x%p, RECOVERY.\n", cdev);
/* close all connections */
break;
case CXGB4_STATE_DOWN:
pr_info("cdev 0x%p, DOWN.\n", cdev);
break;
case CXGB4_STATE_DETACH:
pr_info("cdev 0x%p, DETACH.\n", cdev);
cxgbi_device_unregister(cdev);
break;
default:
pr_info("cdev 0x%p, unknown state %d.\n", cdev, state);
break;
}
return 0;
}
static int __init cxgb4i_init_module(void)
{
int rc;
printk(KERN_INFO "%s", version);
rc = cxgbi_iscsi_init(&cxgb4i_iscsi_transport, &cxgb4i_stt);
if (rc < 0)
return rc;
cxgb4_register_uld(CXGB4_ULD_ISCSI, &cxgb4i_uld_info);
return 0;
}
static void __exit cxgb4i_exit_module(void)
{
cxgb4_unregister_uld(CXGB4_ULD_ISCSI);
cxgbi_device_unregister_all(CXGBI_FLAG_DEV_T4);
cxgbi_iscsi_cleanup(&cxgb4i_iscsi_transport, &cxgb4i_stt);
}
module_init(cxgb4i_init_module);
module_exit(cxgb4i_exit_module);
| gpl-2.0 |
dperezde/little-penguin | linux-eudyptula/drivers/media/usb/stk1160/stk1160-core.c | 2648 | 10828 | /*
* STK1160 driver
*
* Copyright (C) 2012 Ezequiel Garcia
* <elezegarcia--a.t--gmail.com>
*
* Based on Easycap driver by R.M. Thomas
* Copyright (C) 2010 R.M. Thomas
* <rmthomas--a.t--sciolus.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* TODO:
*
* 1. (Try to) detect if we must register ac97 mixer
* 2. Support stream at lower speed: lower frame rate or lower frame size.
*
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/usb.h>
#include <linux/mm.h>
#include <linux/vmalloc.h>
#include <media/saa7115.h>
#include "stk1160.h"
#include "stk1160-reg.h"
static unsigned int input;
module_param(input, int, 0644);
MODULE_PARM_DESC(input, "Set default input");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Ezequiel Garcia");
MODULE_DESCRIPTION("STK1160 driver");
/* Devices supported by this driver */
static struct usb_device_id stk1160_id_table[] = {
{ USB_DEVICE(0x05e1, 0x0408) },
{ }
};
MODULE_DEVICE_TABLE(usb, stk1160_id_table);
/* saa7113 I2C address */
static unsigned short saa7113_addrs[] = {
0x4a >> 1,
I2C_CLIENT_END
};
/*
* Read/Write stk registers
*/
int stk1160_read_reg(struct stk1160 *dev, u16 reg, u8 *value)
{
int ret;
int pipe = usb_rcvctrlpipe(dev->udev, 0);
u8 *buf;
*value = 0;
buf = kmalloc(sizeof(u8), GFP_KERNEL);
if (!buf)
return -ENOMEM;
ret = usb_control_msg(dev->udev, pipe, 0x00,
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0x00, reg, buf, sizeof(u8), HZ);
if (ret < 0) {
stk1160_err("read failed on reg 0x%x (%d)\n",
reg, ret);
kfree(buf);
return ret;
}
*value = *buf;
kfree(buf);
return 0;
}
int stk1160_write_reg(struct stk1160 *dev, u16 reg, u16 value)
{
int ret;
int pipe = usb_sndctrlpipe(dev->udev, 0);
ret = usb_control_msg(dev->udev, pipe, 0x01,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
value, reg, NULL, 0, HZ);
if (ret < 0) {
stk1160_err("write failed on reg 0x%x (%d)\n",
reg, ret);
return ret;
}
return 0;
}
void stk1160_select_input(struct stk1160 *dev)
{
int route;
static const u8 gctrl[] = {
0x98, 0x90, 0x88, 0x80, 0x98
};
if (dev->ctl_input == STK1160_SVIDEO_INPUT)
route = SAA7115_SVIDEO3;
else
route = SAA7115_COMPOSITE0;
if (dev->ctl_input < ARRAY_SIZE(gctrl)) {
v4l2_device_call_all(&dev->v4l2_dev, 0, video, s_routing,
route, 0, 0);
stk1160_write_reg(dev, STK1160_GCTRL, gctrl[dev->ctl_input]);
}
}
/* TODO: We should break this into pieces */
static void stk1160_reg_reset(struct stk1160 *dev)
{
int i;
static const struct regval ctl[] = {
{STK1160_GCTRL+2, 0x0078},
{STK1160_RMCTL+1, 0x0000},
{STK1160_RMCTL+3, 0x0002},
{STK1160_PLLSO, 0x0010},
{STK1160_PLLSO+1, 0x0000},
{STK1160_PLLSO+2, 0x0014},
{STK1160_PLLSO+3, 0x000E},
{STK1160_PLLFD, 0x0046},
/* Timing generator setup */
{STK1160_TIGEN, 0x0012},
{STK1160_TICTL, 0x002D},
{STK1160_TICTL+1, 0x0001},
{STK1160_TICTL+2, 0x0000},
{STK1160_TICTL+3, 0x0000},
{STK1160_TIGEN, 0x0080},
{0xffff, 0xffff}
};
for (i = 0; ctl[i].reg != 0xffff; i++)
stk1160_write_reg(dev, ctl[i].reg, ctl[i].val);
}
static void stk1160_release(struct v4l2_device *v4l2_dev)
{
struct stk1160 *dev = container_of(v4l2_dev, struct stk1160, v4l2_dev);
stk1160_info("releasing all resources\n");
stk1160_i2c_unregister(dev);
v4l2_ctrl_handler_free(&dev->ctrl_handler);
v4l2_device_unregister(&dev->v4l2_dev);
kfree(dev->alt_max_pkt_size);
kfree(dev);
}
/* high bandwidth multiplier, as encoded in highspeed endpoint descriptors */
#define hb_mult(wMaxPacketSize) (1 + (((wMaxPacketSize) >> 11) & 0x03))
/*
* Scan usb interface and populate max_pkt_size array
* with information on each alternate setting.
* The array should be allocated by the caller.
*/
static int stk1160_scan_usb(struct usb_interface *intf, struct usb_device *udev,
unsigned int *max_pkt_size)
{
int i, e, sizedescr, size, ifnum;
const struct usb_endpoint_descriptor *desc;
bool has_video = false, has_audio = false;
const char *speed;
ifnum = intf->altsetting[0].desc.bInterfaceNumber;
/* Get endpoints */
for (i = 0; i < intf->num_altsetting; i++) {
for (e = 0; e < intf->altsetting[i].desc.bNumEndpoints; e++) {
/* This isn't clear enough, at least to me */
desc = &intf->altsetting[i].endpoint[e].desc;
sizedescr = le16_to_cpu(desc->wMaxPacketSize);
size = sizedescr & 0x7ff;
if (udev->speed == USB_SPEED_HIGH)
size = size * hb_mult(sizedescr);
if (usb_endpoint_xfer_isoc(desc) &&
usb_endpoint_dir_in(desc)) {
switch (desc->bEndpointAddress) {
case STK1160_EP_AUDIO:
has_audio = true;
break;
case STK1160_EP_VIDEO:
has_video = true;
max_pkt_size[i] = size;
break;
}
}
}
}
/* Is this even possible? */
if (!(has_audio || has_video)) {
dev_err(&udev->dev, "no audio or video endpoints found\n");
return -ENODEV;
}
switch (udev->speed) {
case USB_SPEED_LOW:
speed = "1.5";
break;
case USB_SPEED_FULL:
speed = "12";
break;
case USB_SPEED_HIGH:
speed = "480";
break;
default:
speed = "unknown";
}
dev_info(&udev->dev, "New device %s %s @ %s Mbps (%04x:%04x, interface %d, class %d)\n",
udev->manufacturer ? udev->manufacturer : "",
udev->product ? udev->product : "",
speed,
le16_to_cpu(udev->descriptor.idVendor),
le16_to_cpu(udev->descriptor.idProduct),
ifnum,
intf->altsetting->desc.bInterfaceNumber);
/* This should never happen, since we rejected audio interfaces */
if (has_audio)
dev_warn(&udev->dev, "audio interface %d found.\n\
This is not implemented by this driver,\
you should use snd-usb-audio instead\n", ifnum);
if (has_video)
dev_info(&udev->dev, "video interface %d found\n",
ifnum);
/*
* Make sure we have 480 Mbps of bandwidth, otherwise things like
* video stream wouldn't likely work, since 12 Mbps is generally
* not enough even for most streams.
*/
if (udev->speed != USB_SPEED_HIGH)
dev_warn(&udev->dev, "must be connected to a high-speed USB 2.0 port\n\
You may not be able to stream video smoothly\n");
return 0;
}
static int stk1160_probe(struct usb_interface *interface,
const struct usb_device_id *id)
{
int rc = 0;
unsigned int *alt_max_pkt_size; /* array of wMaxPacketSize */
struct usb_device *udev;
struct stk1160 *dev;
udev = interface_to_usbdev(interface);
/*
* Since usb audio class is supported by snd-usb-audio,
* we reject audio interface.
*/
if (interface->altsetting[0].desc.bInterfaceClass == USB_CLASS_AUDIO)
return -ENODEV;
/* Alloc an array for all possible max_pkt_size */
alt_max_pkt_size = kmalloc(sizeof(alt_max_pkt_size[0]) *
interface->num_altsetting, GFP_KERNEL);
if (alt_max_pkt_size == NULL)
return -ENOMEM;
/*
* Scan usb posibilities and populate alt_max_pkt_size array.
* Also, check if device speed is fast enough.
*/
rc = stk1160_scan_usb(interface, udev, alt_max_pkt_size);
if (rc < 0) {
kfree(alt_max_pkt_size);
return rc;
}
dev = kzalloc(sizeof(struct stk1160), GFP_KERNEL);
if (dev == NULL) {
kfree(alt_max_pkt_size);
return -ENOMEM;
}
dev->alt_max_pkt_size = alt_max_pkt_size;
dev->udev = udev;
dev->num_alt = interface->num_altsetting;
dev->ctl_input = input;
/* We save struct device for debug purposes only */
dev->dev = &interface->dev;
usb_set_intfdata(interface, dev);
/* initialize videobuf2 stuff */
rc = stk1160_vb2_setup(dev);
if (rc < 0)
goto free_err;
/*
* There is no need to take any locks here in probe
* because we register the device node as the *last* thing.
*/
spin_lock_init(&dev->buf_lock);
mutex_init(&dev->v4l_lock);
mutex_init(&dev->vb_queue_lock);
rc = v4l2_ctrl_handler_init(&dev->ctrl_handler, 0);
if (rc) {
stk1160_err("v4l2_ctrl_handler_init failed (%d)\n", rc);
goto free_err;
}
/*
* We obtain a v4l2_dev but defer
* registration of video device node as the last thing.
* There is no need to set the name if we give a device struct
*/
dev->v4l2_dev.release = stk1160_release;
dev->v4l2_dev.ctrl_handler = &dev->ctrl_handler;
rc = v4l2_device_register(dev->dev, &dev->v4l2_dev);
if (rc) {
stk1160_err("v4l2_device_register failed (%d)\n", rc);
goto free_ctrl;
}
rc = stk1160_i2c_register(dev);
if (rc < 0)
goto unreg_v4l2;
/*
* To the best of my knowledge stk1160 boards only have
* saa7113, but it doesn't hurt to support them all.
*/
dev->sd_saa7115 = v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap,
"saa7115_auto", 0, saa7113_addrs);
stk1160_info("driver ver %s successfully loaded\n",
STK1160_VERSION);
/* i2c reset saa711x */
v4l2_device_call_all(&dev->v4l2_dev, 0, core, reset, 0);
v4l2_device_call_all(&dev->v4l2_dev, 0, video, s_stream, 0);
/* reset stk1160 to default values */
stk1160_reg_reset(dev);
/* select default input */
stk1160_select_input(dev);
stk1160_ac97_register(dev);
rc = stk1160_video_register(dev);
if (rc < 0)
goto unreg_i2c;
return 0;
unreg_i2c:
stk1160_i2c_unregister(dev);
unreg_v4l2:
v4l2_device_unregister(&dev->v4l2_dev);
free_ctrl:
v4l2_ctrl_handler_free(&dev->ctrl_handler);
free_err:
kfree(alt_max_pkt_size);
kfree(dev);
return rc;
}
static void stk1160_disconnect(struct usb_interface *interface)
{
struct stk1160 *dev;
dev = usb_get_intfdata(interface);
usb_set_intfdata(interface, NULL);
/*
* Wait until all current v4l2 operation are finished
* then deallocate resources
*/
mutex_lock(&dev->vb_queue_lock);
mutex_lock(&dev->v4l_lock);
/* Here is the only place where isoc get released */
stk1160_uninit_isoc(dev);
/* ac97 unregister needs to be done before usb_device is cleared */
stk1160_ac97_unregister(dev);
stk1160_clear_queue(dev);
video_unregister_device(&dev->vdev);
v4l2_device_disconnect(&dev->v4l2_dev);
/* This way current users can detect device is gone */
dev->udev = NULL;
mutex_unlock(&dev->v4l_lock);
mutex_unlock(&dev->vb_queue_lock);
/*
* This calls stk1160_release if it's the last reference.
* therwise, release is posponed until there are no users left.
*/
v4l2_device_put(&dev->v4l2_dev);
}
static struct usb_driver stk1160_usb_driver = {
.name = "stk1160",
.id_table = stk1160_id_table,
.probe = stk1160_probe,
.disconnect = stk1160_disconnect,
};
module_usb_driver(stk1160_usb_driver);
| gpl-2.0 |
cmartinbaughman/HTC_Shooter_Kernel | drivers/staging/rts_pstor/rtsx_transport.c | 3160 | 19889 | /* Driver for Realtek PCI-Express card reader
*
* Copyright(c) 2009 Realtek Semiconductor Corp. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2, or (at your option) any
* later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, see <http://www.gnu.org/licenses/>.
*
* Author:
* wwang (wei_wang@realsil.com.cn)
* No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
*/
#include <linux/blkdev.h>
#include <linux/kthread.h>
#include <linux/sched.h>
#include "rtsx.h"
#include "rtsx_scsi.h"
#include "rtsx_transport.h"
#include "rtsx_chip.h"
#include "rtsx_card.h"
#include "debug.h"
/***********************************************************************
* Scatter-gather transfer buffer access routines
***********************************************************************/
/* Copy a buffer of length buflen to/from the srb's transfer buffer.
* (Note: for scatter-gather transfers (srb->use_sg > 0), srb->request_buffer
* points to a list of s-g entries and we ignore srb->request_bufflen.
* For non-scatter-gather transfers, srb->request_buffer points to the
* transfer buffer itself and srb->request_bufflen is the buffer's length.)
* Update the *index and *offset variables so that the next copy will
* pick up from where this one left off. */
unsigned int rtsx_stor_access_xfer_buf(unsigned char *buffer,
unsigned int buflen, struct scsi_cmnd *srb, unsigned int *index,
unsigned int *offset, enum xfer_buf_dir dir)
{
unsigned int cnt;
/* If not using scatter-gather, just transfer the data directly.
* Make certain it will fit in the available buffer space. */
if (scsi_sg_count(srb) == 0) {
if (*offset >= scsi_bufflen(srb))
return 0;
cnt = min(buflen, scsi_bufflen(srb) - *offset);
if (dir == TO_XFER_BUF)
memcpy((unsigned char *) scsi_sglist(srb) + *offset,
buffer, cnt);
else
memcpy(buffer, (unsigned char *) scsi_sglist(srb) +
*offset, cnt);
*offset += cnt;
/* Using scatter-gather. We have to go through the list one entry
* at a time. Each s-g entry contains some number of pages, and
* each page has to be kmap()'ed separately. If the page is already
* in kernel-addressable memory then kmap() will return its address.
* If the page is not directly accessible -- such as a user buffer
* located in high memory -- then kmap() will map it to a temporary
* position in the kernel's virtual address space. */
} else {
struct scatterlist *sg =
(struct scatterlist *) scsi_sglist(srb)
+ *index;
/* This loop handles a single s-g list entry, which may
* include multiple pages. Find the initial page structure
* and the starting offset within the page, and update
* the *offset and *index values for the next loop. */
cnt = 0;
while (cnt < buflen && *index < scsi_sg_count(srb)) {
struct page *page = sg_page(sg) +
((sg->offset + *offset) >> PAGE_SHIFT);
unsigned int poff =
(sg->offset + *offset) & (PAGE_SIZE-1);
unsigned int sglen = sg->length - *offset;
if (sglen > buflen - cnt) {
/* Transfer ends within this s-g entry */
sglen = buflen - cnt;
*offset += sglen;
} else {
/* Transfer continues to next s-g entry */
*offset = 0;
++*index;
++sg;
}
/* Transfer the data for all the pages in this
* s-g entry. For each page: call kmap(), do the
* transfer, and call kunmap() immediately after. */
while (sglen > 0) {
unsigned int plen = min(sglen, (unsigned int)
PAGE_SIZE - poff);
unsigned char *ptr = kmap(page);
if (dir == TO_XFER_BUF)
memcpy(ptr + poff, buffer + cnt, plen);
else
memcpy(buffer + cnt, ptr + poff, plen);
kunmap(page);
/* Start at the beginning of the next page */
poff = 0;
++page;
cnt += plen;
sglen -= plen;
}
}
}
/* Return the amount actually transferred */
return cnt;
}
/* Store the contents of buffer into srb's transfer buffer and set the
* SCSI residue. */
void rtsx_stor_set_xfer_buf(unsigned char *buffer,
unsigned int buflen, struct scsi_cmnd *srb)
{
unsigned int index = 0, offset = 0;
rtsx_stor_access_xfer_buf(buffer, buflen, srb, &index, &offset,
TO_XFER_BUF);
if (buflen < scsi_bufflen(srb))
scsi_set_resid(srb, scsi_bufflen(srb) - buflen);
}
void rtsx_stor_get_xfer_buf(unsigned char *buffer,
unsigned int buflen, struct scsi_cmnd *srb)
{
unsigned int index = 0, offset = 0;
rtsx_stor_access_xfer_buf(buffer, buflen, srb, &index, &offset,
FROM_XFER_BUF);
if (buflen < scsi_bufflen(srb))
scsi_set_resid(srb, scsi_bufflen(srb) - buflen);
}
/***********************************************************************
* Transport routines
***********************************************************************/
/* Invoke the transport and basic error-handling/recovery methods
*
* This is used to send the message to the device and receive the response.
*/
void rtsx_invoke_transport(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
int result;
result = rtsx_scsi_handler(srb, chip);
/* if the command gets aborted by the higher layers, we need to
* short-circuit all other processing
*/
if (rtsx_chk_stat(chip, RTSX_STAT_ABORT)) {
RTSX_DEBUGP("-- command was aborted\n");
srb->result = DID_ABORT << 16;
goto Handle_Errors;
}
/* if there is a transport error, reset and don't auto-sense */
if (result == TRANSPORT_ERROR) {
RTSX_DEBUGP("-- transport indicates error, resetting\n");
srb->result = DID_ERROR << 16;
goto Handle_Errors;
}
srb->result = SAM_STAT_GOOD;
/*
* If we have a failure, we're going to do a REQUEST_SENSE
* automatically. Note that we differentiate between a command
* "failure" and an "error" in the transport mechanism.
*/
if (result == TRANSPORT_FAILED) {
/* set the result so the higher layers expect this data */
srb->result = SAM_STAT_CHECK_CONDITION;
memcpy(srb->sense_buffer,
(unsigned char *)&(chip->sense_buffer[SCSI_LUN(srb)]),
sizeof(struct sense_data_t));
}
return;
/* Error and abort processing: try to resynchronize with the device
* by issuing a port reset. If that fails, try a class-specific
* device reset. */
Handle_Errors:
return;
}
void rtsx_add_cmd(struct rtsx_chip *chip,
u8 cmd_type, u16 reg_addr, u8 mask, u8 data)
{
u32 *cb = (u32 *)(chip->host_cmds_ptr);
u32 val = 0;
val |= (u32)(cmd_type & 0x03) << 30;
val |= (u32)(reg_addr & 0x3FFF) << 16;
val |= (u32)mask << 8;
val |= (u32)data;
spin_lock_irq(&chip->rtsx->reg_lock);
if (chip->ci < (HOST_CMDS_BUF_LEN / 4)) {
cb[(chip->ci)++] = cpu_to_le32(val);
}
spin_unlock_irq(&chip->rtsx->reg_lock);
}
void rtsx_send_cmd_no_wait(struct rtsx_chip *chip)
{
u32 val = 1 << 31;
rtsx_writel(chip, RTSX_HCBAR, chip->host_cmds_addr);
val |= (u32)(chip->ci * 4) & 0x00FFFFFF;
/* Hardware Auto Response */
val |= 0x40000000;
rtsx_writel(chip, RTSX_HCBCTLR, val);
}
int rtsx_send_cmd(struct rtsx_chip *chip, u8 card, int timeout)
{
struct rtsx_dev *rtsx = chip->rtsx;
struct completion trans_done;
u32 val = 1 << 31;
long timeleft;
int err = 0;
if (card == SD_CARD) {
rtsx->check_card_cd = SD_EXIST;
} else if (card == MS_CARD) {
rtsx->check_card_cd = MS_EXIST;
} else if (card == XD_CARD) {
rtsx->check_card_cd = XD_EXIST;
} else {
rtsx->check_card_cd = 0;
}
spin_lock_irq(&rtsx->reg_lock);
/* set up data structures for the wakeup system */
rtsx->done = &trans_done;
rtsx->trans_result = TRANS_NOT_READY;
init_completion(&trans_done);
rtsx->trans_state = STATE_TRANS_CMD;
rtsx_writel(chip, RTSX_HCBAR, chip->host_cmds_addr);
val |= (u32)(chip->ci * 4) & 0x00FFFFFF;
/* Hardware Auto Response */
val |= 0x40000000;
rtsx_writel(chip, RTSX_HCBCTLR, val);
spin_unlock_irq(&rtsx->reg_lock);
/* Wait for TRANS_OK_INT */
timeleft = wait_for_completion_interruptible_timeout(
&trans_done, timeout * HZ / 1000);
if (timeleft <= 0) {
RTSX_DEBUGP("chip->int_reg = 0x%x\n", chip->int_reg);
err = -ETIMEDOUT;
TRACE_GOTO(chip, finish_send_cmd);
}
spin_lock_irq(&rtsx->reg_lock);
if (rtsx->trans_result == TRANS_RESULT_FAIL) {
err = -EIO;
} else if (rtsx->trans_result == TRANS_RESULT_OK) {
err = 0;
}
spin_unlock_irq(&rtsx->reg_lock);
finish_send_cmd:
rtsx->done = NULL;
rtsx->trans_state = STATE_TRANS_NONE;
if (err < 0)
rtsx_stop_cmd(chip, card);
return err;
}
static inline void rtsx_add_sg_tbl(
struct rtsx_chip *chip, u32 addr, u32 len, u8 option)
{
u64 *sgb = (u64 *)(chip->host_sg_tbl_ptr);
u64 val = 0;
u32 temp_len = 0;
u8 temp_opt = 0;
do {
if (len > 0x80000) {
temp_len = 0x80000;
temp_opt = option & (~SG_END);
} else {
temp_len = len;
temp_opt = option;
}
val = ((u64)addr << 32) | ((u64)temp_len << 12) | temp_opt;
if (chip->sgi < (HOST_SG_TBL_BUF_LEN / 8))
sgb[(chip->sgi)++] = cpu_to_le64(val);
len -= temp_len;
addr += temp_len;
} while (len);
}
static int rtsx_transfer_sglist_adma_partial(struct rtsx_chip *chip, u8 card,
struct scatterlist *sg, int num_sg, unsigned int *index,
unsigned int *offset, int size,
enum dma_data_direction dma_dir, int timeout)
{
struct rtsx_dev *rtsx = chip->rtsx;
struct completion trans_done;
u8 dir;
int sg_cnt, i, resid;
int err = 0;
long timeleft;
u32 val = TRIG_DMA;
if ((sg == NULL) || (num_sg <= 0) || !offset || !index)
return -EIO;
if (dma_dir == DMA_TO_DEVICE) {
dir = HOST_TO_DEVICE;
} else if (dma_dir == DMA_FROM_DEVICE) {
dir = DEVICE_TO_HOST;
} else {
return -ENXIO;
}
if (card == SD_CARD) {
rtsx->check_card_cd = SD_EXIST;
} else if (card == MS_CARD) {
rtsx->check_card_cd = MS_EXIST;
} else if (card == XD_CARD) {
rtsx->check_card_cd = XD_EXIST;
} else {
rtsx->check_card_cd = 0;
}
spin_lock_irq(&rtsx->reg_lock);
/* set up data structures for the wakeup system */
rtsx->done = &trans_done;
rtsx->trans_state = STATE_TRANS_SG;
rtsx->trans_result = TRANS_NOT_READY;
spin_unlock_irq(&rtsx->reg_lock);
sg_cnt = dma_map_sg(&(rtsx->pci->dev), sg, num_sg, dma_dir);
resid = size;
chip->sgi = 0;
/* Usually the next entry will be @sg@ + 1, but if this sg element
* is part of a chained scatterlist, it could jump to the start of
* a new scatterlist array. So here we use sg_next to move to
* the proper sg
*/
for (i = 0; i < *index; i++)
sg = sg_next(sg);
for (i = *index; i < sg_cnt; i++) {
dma_addr_t addr;
unsigned int len;
u8 option;
addr = sg_dma_address(sg);
len = sg_dma_len(sg);
RTSX_DEBUGP("DMA addr: 0x%x, Len: 0x%x\n",
(unsigned int)addr, len);
RTSX_DEBUGP("*index = %d, *offset = %d\n", *index, *offset);
addr += *offset;
if ((len - *offset) > resid) {
*offset += resid;
len = resid;
resid = 0;
} else {
resid -= (len - *offset);
len -= *offset;
*offset = 0;
*index = *index + 1;
}
if ((i == (sg_cnt - 1)) || !resid) {
option = SG_VALID | SG_END | SG_TRANS_DATA;
} else {
option = SG_VALID | SG_TRANS_DATA;
}
rtsx_add_sg_tbl(chip, (u32)addr, (u32)len, option);
if (!resid)
break;
sg = sg_next(sg);
}
RTSX_DEBUGP("SG table count = %d\n", chip->sgi);
val |= (u32)(dir & 0x01) << 29;
val |= ADMA_MODE;
spin_lock_irq(&rtsx->reg_lock);
init_completion(&trans_done);
rtsx_writel(chip, RTSX_HDBAR, chip->host_sg_tbl_addr);
rtsx_writel(chip, RTSX_HDBCTLR, val);
spin_unlock_irq(&rtsx->reg_lock);
timeleft = wait_for_completion_interruptible_timeout(
&trans_done, timeout * HZ / 1000);
if (timeleft <= 0) {
RTSX_DEBUGP("Timeout (%s %d)\n", __func__, __LINE__);
RTSX_DEBUGP("chip->int_reg = 0x%x\n", chip->int_reg);
err = -ETIMEDOUT;
goto out;
}
spin_lock_irq(&rtsx->reg_lock);
if (rtsx->trans_result == TRANS_RESULT_FAIL) {
err = -EIO;
spin_unlock_irq(&rtsx->reg_lock);
goto out;
}
spin_unlock_irq(&rtsx->reg_lock);
/* Wait for TRANS_OK_INT */
spin_lock_irq(&rtsx->reg_lock);
if (rtsx->trans_result == TRANS_NOT_READY) {
init_completion(&trans_done);
spin_unlock_irq(&rtsx->reg_lock);
timeleft = wait_for_completion_interruptible_timeout(
&trans_done, timeout * HZ / 1000);
if (timeleft <= 0) {
RTSX_DEBUGP("Timeout (%s %d)\n", __func__, __LINE__);
RTSX_DEBUGP("chip->int_reg = 0x%x\n", chip->int_reg);
err = -ETIMEDOUT;
goto out;
}
} else {
spin_unlock_irq(&rtsx->reg_lock);
}
spin_lock_irq(&rtsx->reg_lock);
if (rtsx->trans_result == TRANS_RESULT_FAIL) {
err = -EIO;
} else if (rtsx->trans_result == TRANS_RESULT_OK) {
err = 0;
}
spin_unlock_irq(&rtsx->reg_lock);
out:
rtsx->done = NULL;
rtsx->trans_state = STATE_TRANS_NONE;
dma_unmap_sg(&(rtsx->pci->dev), sg, num_sg, dma_dir);
if (err < 0)
rtsx_stop_cmd(chip, card);
return err;
}
static int rtsx_transfer_sglist_adma(struct rtsx_chip *chip, u8 card,
struct scatterlist *sg, int num_sg,
enum dma_data_direction dma_dir, int timeout)
{
struct rtsx_dev *rtsx = chip->rtsx;
struct completion trans_done;
u8 dir;
int buf_cnt, i;
int err = 0;
long timeleft;
struct scatterlist *sg_ptr;
if ((sg == NULL) || (num_sg <= 0))
return -EIO;
if (dma_dir == DMA_TO_DEVICE) {
dir = HOST_TO_DEVICE;
} else if (dma_dir == DMA_FROM_DEVICE) {
dir = DEVICE_TO_HOST;
} else {
return -ENXIO;
}
if (card == SD_CARD) {
rtsx->check_card_cd = SD_EXIST;
} else if (card == MS_CARD) {
rtsx->check_card_cd = MS_EXIST;
} else if (card == XD_CARD) {
rtsx->check_card_cd = XD_EXIST;
} else {
rtsx->check_card_cd = 0;
}
spin_lock_irq(&rtsx->reg_lock);
/* set up data structures for the wakeup system */
rtsx->done = &trans_done;
rtsx->trans_state = STATE_TRANS_SG;
rtsx->trans_result = TRANS_NOT_READY;
spin_unlock_irq(&rtsx->reg_lock);
buf_cnt = dma_map_sg(&(rtsx->pci->dev), sg, num_sg, dma_dir);
sg_ptr = sg;
for (i = 0; i <= buf_cnt / (HOST_SG_TBL_BUF_LEN / 8); i++) {
u32 val = TRIG_DMA;
int sg_cnt, j;
if (i == buf_cnt / (HOST_SG_TBL_BUF_LEN / 8)) {
sg_cnt = buf_cnt % (HOST_SG_TBL_BUF_LEN / 8);
} else {
sg_cnt = (HOST_SG_TBL_BUF_LEN / 8);
}
chip->sgi = 0;
for (j = 0; j < sg_cnt; j++) {
dma_addr_t addr = sg_dma_address(sg_ptr);
unsigned int len = sg_dma_len(sg_ptr);
u8 option;
RTSX_DEBUGP("DMA addr: 0x%x, Len: 0x%x\n",
(unsigned int)addr, len);
if (j == (sg_cnt - 1)) {
option = SG_VALID | SG_END | SG_TRANS_DATA;
} else {
option = SG_VALID | SG_TRANS_DATA;
}
rtsx_add_sg_tbl(chip, (u32)addr, (u32)len, option);
sg_ptr = sg_next(sg_ptr);
}
RTSX_DEBUGP("SG table count = %d\n", chip->sgi);
val |= (u32)(dir & 0x01) << 29;
val |= ADMA_MODE;
spin_lock_irq(&rtsx->reg_lock);
init_completion(&trans_done);
rtsx_writel(chip, RTSX_HDBAR, chip->host_sg_tbl_addr);
rtsx_writel(chip, RTSX_HDBCTLR, val);
spin_unlock_irq(&rtsx->reg_lock);
timeleft = wait_for_completion_interruptible_timeout(
&trans_done, timeout * HZ / 1000);
if (timeleft <= 0) {
RTSX_DEBUGP("Timeout (%s %d)\n", __func__, __LINE__);
RTSX_DEBUGP("chip->int_reg = 0x%x\n", chip->int_reg);
err = -ETIMEDOUT;
goto out;
}
spin_lock_irq(&rtsx->reg_lock);
if (rtsx->trans_result == TRANS_RESULT_FAIL) {
err = -EIO;
spin_unlock_irq(&rtsx->reg_lock);
goto out;
}
spin_unlock_irq(&rtsx->reg_lock);
sg_ptr += sg_cnt;
}
/* Wait for TRANS_OK_INT */
spin_lock_irq(&rtsx->reg_lock);
if (rtsx->trans_result == TRANS_NOT_READY) {
init_completion(&trans_done);
spin_unlock_irq(&rtsx->reg_lock);
timeleft = wait_for_completion_interruptible_timeout(
&trans_done, timeout * HZ / 1000);
if (timeleft <= 0) {
RTSX_DEBUGP("Timeout (%s %d)\n", __func__, __LINE__);
RTSX_DEBUGP("chip->int_reg = 0x%x\n", chip->int_reg);
err = -ETIMEDOUT;
goto out;
}
} else {
spin_unlock_irq(&rtsx->reg_lock);
}
spin_lock_irq(&rtsx->reg_lock);
if (rtsx->trans_result == TRANS_RESULT_FAIL) {
err = -EIO;
} else if (rtsx->trans_result == TRANS_RESULT_OK) {
err = 0;
}
spin_unlock_irq(&rtsx->reg_lock);
out:
rtsx->done = NULL;
rtsx->trans_state = STATE_TRANS_NONE;
dma_unmap_sg(&(rtsx->pci->dev), sg, num_sg, dma_dir);
if (err < 0)
rtsx_stop_cmd(chip, card);
return err;
}
static int rtsx_transfer_buf(struct rtsx_chip *chip, u8 card, void *buf, size_t len,
enum dma_data_direction dma_dir, int timeout)
{
struct rtsx_dev *rtsx = chip->rtsx;
struct completion trans_done;
dma_addr_t addr;
u8 dir;
int err = 0;
u32 val = (1 << 31);
long timeleft;
if ((buf == NULL) || (len <= 0))
return -EIO;
if (dma_dir == DMA_TO_DEVICE) {
dir = HOST_TO_DEVICE;
} else if (dma_dir == DMA_FROM_DEVICE) {
dir = DEVICE_TO_HOST;
} else {
return -ENXIO;
}
addr = dma_map_single(&(rtsx->pci->dev), buf, len, dma_dir);
if (!addr)
return -ENOMEM;
if (card == SD_CARD) {
rtsx->check_card_cd = SD_EXIST;
} else if (card == MS_CARD) {
rtsx->check_card_cd = MS_EXIST;
} else if (card == XD_CARD) {
rtsx->check_card_cd = XD_EXIST;
} else {
rtsx->check_card_cd = 0;
}
val |= (u32)(dir & 0x01) << 29;
val |= (u32)(len & 0x00FFFFFF);
spin_lock_irq(&rtsx->reg_lock);
/* set up data structures for the wakeup system */
rtsx->done = &trans_done;
init_completion(&trans_done);
rtsx->trans_state = STATE_TRANS_BUF;
rtsx->trans_result = TRANS_NOT_READY;
rtsx_writel(chip, RTSX_HDBAR, addr);
rtsx_writel(chip, RTSX_HDBCTLR, val);
spin_unlock_irq(&rtsx->reg_lock);
/* Wait for TRANS_OK_INT */
timeleft = wait_for_completion_interruptible_timeout(
&trans_done, timeout * HZ / 1000);
if (timeleft <= 0) {
RTSX_DEBUGP("Timeout (%s %d)\n", __func__, __LINE__);
RTSX_DEBUGP("chip->int_reg = 0x%x\n", chip->int_reg);
err = -ETIMEDOUT;
goto out;
}
spin_lock_irq(&rtsx->reg_lock);
if (rtsx->trans_result == TRANS_RESULT_FAIL) {
err = -EIO;
} else if (rtsx->trans_result == TRANS_RESULT_OK) {
err = 0;
}
spin_unlock_irq(&rtsx->reg_lock);
out:
rtsx->done = NULL;
rtsx->trans_state = STATE_TRANS_NONE;
dma_unmap_single(&(rtsx->pci->dev), addr, len, dma_dir);
if (err < 0)
rtsx_stop_cmd(chip, card);
return err;
}
int rtsx_transfer_data_partial(struct rtsx_chip *chip, u8 card,
void *buf, size_t len, int use_sg, unsigned int *index,
unsigned int *offset, enum dma_data_direction dma_dir,
int timeout)
{
int err = 0;
/* don't transfer data during abort processing */
if (rtsx_chk_stat(chip, RTSX_STAT_ABORT))
return -EIO;
if (use_sg) {
err = rtsx_transfer_sglist_adma_partial(chip, card,
(struct scatterlist *)buf, use_sg,
index, offset, (int)len, dma_dir, timeout);
} else {
err = rtsx_transfer_buf(chip, card,
buf, len, dma_dir, timeout);
}
if (err < 0) {
if (RTSX_TST_DELINK(chip)) {
RTSX_CLR_DELINK(chip);
chip->need_reinit = SD_CARD | MS_CARD | XD_CARD;
rtsx_reinit_cards(chip, 1);
}
}
return err;
}
int rtsx_transfer_data(struct rtsx_chip *chip, u8 card, void *buf, size_t len,
int use_sg, enum dma_data_direction dma_dir, int timeout)
{
int err = 0;
RTSX_DEBUGP("use_sg = %d\n", use_sg);
/* don't transfer data during abort processing */
if (rtsx_chk_stat(chip, RTSX_STAT_ABORT))
return -EIO;
if (use_sg) {
err = rtsx_transfer_sglist_adma(chip, card,
(struct scatterlist *)buf,
use_sg, dma_dir, timeout);
} else {
err = rtsx_transfer_buf(chip, card, buf, len, dma_dir, timeout);
}
if (err < 0) {
if (RTSX_TST_DELINK(chip)) {
RTSX_CLR_DELINK(chip);
chip->need_reinit = SD_CARD | MS_CARD | XD_CARD;
rtsx_reinit_cards(chip, 1);
}
}
return err;
}
| gpl-2.0 |
ReflexBow/ghost | arch/microblaze/mm/highmem.c | 4696 | 2242 | /*
* highmem.c: virtual kernel memory mappings for high memory
*
* PowerPC version, stolen from the i386 version.
*
* Used in CONFIG_HIGHMEM systems for memory pages which
* are not addressable by direct kernel virtual addresses.
*
* Copyright (C) 1999 Gerhard Wichert, Siemens AG
* Gerhard.Wichert@pdb.siemens.de
*
*
* Redesigned the x86 32-bit VM architecture to deal with
* up to 16 Terrabyte physical memory. With current x86 CPUs
* we now support up to 64 Gigabytes physical RAM.
*
* Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
*
* Reworked for PowerPC by various contributors. Moved from
* highmem.h by Benjamin Herrenschmidt (c) 2009 IBM Corp.
*/
#include <linux/highmem.h>
#include <linux/module.h>
/*
* The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap
* gives a more generic (and caching) interface. But kmap_atomic can
* be used in IRQ contexts, so in some (very limited) cases we need
* it.
*/
#include <asm/tlbflush.h>
void *kmap_atomic_prot(struct page *page, pgprot_t prot)
{
unsigned long vaddr;
int idx, type;
/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
pagefault_disable();
if (!PageHighMem(page))
return page_address(page);
type = kmap_atomic_idx_push();
idx = type + KM_TYPE_NR*smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
#ifdef CONFIG_DEBUG_HIGHMEM
BUG_ON(!pte_none(*(kmap_pte-idx)));
#endif
set_pte_at(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot));
local_flush_tlb_page(NULL, vaddr);
return (void *) vaddr;
}
EXPORT_SYMBOL(kmap_atomic_prot);
void __kunmap_atomic(void *kvaddr)
{
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
int type;
if (vaddr < __fix_to_virt(FIX_KMAP_END)) {
pagefault_enable();
return;
}
type = kmap_atomic_idx();
#ifdef CONFIG_DEBUG_HIGHMEM
{
unsigned int idx;
idx = type + KM_TYPE_NR * smp_processor_id();
BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
/*
* force other mappings to Oops if they'll try to access
* this pte without first remap it
*/
pte_clear(&init_mm, vaddr, kmap_pte-idx);
local_flush_tlb_page(NULL, vaddr);
}
#endif
kmap_atomic_idx_pop();
pagefault_enable();
}
EXPORT_SYMBOL(__kunmap_atomic);
| gpl-2.0 |
binkybear/android_kernel_samsung_exynos5410 | drivers/s390/char/vmcp.c | 4952 | 5248 | /*
* Copyright IBM Corp. 2004,2010
* Interface implementation for communication with the z/VM control program
*
* Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
*
* z/VMs CP offers the possibility to issue commands via the diagnose code 8
* this driver implements a character device that issues these commands and
* returns the answer of CP.
*
* The idea of this driver is based on cpint from Neale Ferguson and #CP in CMS
*/
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/compat.h>
#include <linux/kernel.h>
#include <linux/miscdevice.h>
#include <linux/slab.h>
#include <linux/export.h>
#include <asm/compat.h>
#include <asm/cpcmd.h>
#include <asm/debug.h>
#include <asm/uaccess.h>
#include "vmcp.h"
static debug_info_t *vmcp_debug;
static int vmcp_open(struct inode *inode, struct file *file)
{
struct vmcp_session *session;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
session = kmalloc(sizeof(*session), GFP_KERNEL);
if (!session)
return -ENOMEM;
session->bufsize = PAGE_SIZE;
session->response = NULL;
session->resp_size = 0;
mutex_init(&session->mutex);
file->private_data = session;
return nonseekable_open(inode, file);
}
static int vmcp_release(struct inode *inode, struct file *file)
{
struct vmcp_session *session;
session = file->private_data;
file->private_data = NULL;
free_pages((unsigned long)session->response, get_order(session->bufsize));
kfree(session);
return 0;
}
static ssize_t
vmcp_read(struct file *file, char __user *buff, size_t count, loff_t *ppos)
{
ssize_t ret;
size_t size;
struct vmcp_session *session;
session = file->private_data;
if (mutex_lock_interruptible(&session->mutex))
return -ERESTARTSYS;
if (!session->response) {
mutex_unlock(&session->mutex);
return 0;
}
size = min_t(size_t, session->resp_size, session->bufsize);
ret = simple_read_from_buffer(buff, count, ppos,
session->response, size);
mutex_unlock(&session->mutex);
return ret;
}
static ssize_t
vmcp_write(struct file *file, const char __user *buff, size_t count,
loff_t *ppos)
{
char *cmd;
struct vmcp_session *session;
if (count > 240)
return -EINVAL;
cmd = kmalloc(count + 1, GFP_KERNEL);
if (!cmd)
return -ENOMEM;
if (copy_from_user(cmd, buff, count)) {
kfree(cmd);
return -EFAULT;
}
cmd[count] = '\0';
session = file->private_data;
if (mutex_lock_interruptible(&session->mutex)) {
kfree(cmd);
return -ERESTARTSYS;
}
if (!session->response)
session->response = (char *)__get_free_pages(GFP_KERNEL
| __GFP_REPEAT | GFP_DMA,
get_order(session->bufsize));
if (!session->response) {
mutex_unlock(&session->mutex);
kfree(cmd);
return -ENOMEM;
}
debug_text_event(vmcp_debug, 1, cmd);
session->resp_size = cpcmd(cmd, session->response, session->bufsize,
&session->resp_code);
mutex_unlock(&session->mutex);
kfree(cmd);
*ppos = 0; /* reset the file pointer after a command */
return count;
}
/*
* These ioctls are available, as the semantics of the diagnose 8 call
* does not fit very well into a Linux call. Diagnose X'08' is described in
* CP Programming Services SC24-6084-00
*
* VMCP_GETCODE: gives the CP return code back to user space
* VMCP_SETBUF: sets the response buffer for the next write call. diagnose 8
* expects adjacent pages in real storage and to make matters worse, we
* dont know the size of the response. Therefore we default to PAGESIZE and
* let userspace to change the response size, if userspace expects a bigger
* response
*/
static long vmcp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct vmcp_session *session;
int __user *argp;
int temp;
session = file->private_data;
if (is_compat_task())
argp = compat_ptr(arg);
else
argp = (int __user *)arg;
if (mutex_lock_interruptible(&session->mutex))
return -ERESTARTSYS;
switch (cmd) {
case VMCP_GETCODE:
temp = session->resp_code;
mutex_unlock(&session->mutex);
return put_user(temp, argp);
case VMCP_SETBUF:
free_pages((unsigned long)session->response,
get_order(session->bufsize));
session->response=NULL;
temp = get_user(session->bufsize, argp);
if (get_order(session->bufsize) > 8) {
session->bufsize = PAGE_SIZE;
temp = -EINVAL;
}
mutex_unlock(&session->mutex);
return temp;
case VMCP_GETSIZE:
temp = session->resp_size;
mutex_unlock(&session->mutex);
return put_user(temp, argp);
default:
mutex_unlock(&session->mutex);
return -ENOIOCTLCMD;
}
}
static const struct file_operations vmcp_fops = {
.owner = THIS_MODULE,
.open = vmcp_open,
.release = vmcp_release,
.read = vmcp_read,
.write = vmcp_write,
.unlocked_ioctl = vmcp_ioctl,
.compat_ioctl = vmcp_ioctl,
.llseek = no_llseek,
};
static struct miscdevice vmcp_dev = {
.name = "vmcp",
.minor = MISC_DYNAMIC_MINOR,
.fops = &vmcp_fops,
};
static int __init vmcp_init(void)
{
int ret;
if (!MACHINE_IS_VM)
return 0;
vmcp_debug = debug_register("vmcp", 1, 1, 240);
if (!vmcp_debug)
return -ENOMEM;
ret = debug_register_view(vmcp_debug, &debug_hex_ascii_view);
if (ret) {
debug_unregister(vmcp_debug);
return ret;
}
ret = misc_register(&vmcp_dev);
if (ret)
debug_unregister(vmcp_debug);
return ret;
}
device_initcall(vmcp_init);
| gpl-2.0 |
SeoDongMin/Gace_kernel | arch/x86/boot/mkcpustr.c | 9048 | 1251 | /* ----------------------------------------------------------------------- *
*
* Copyright 2008 rPath, Inc. - All Rights Reserved
*
* This file is part of the Linux kernel, and is made available under
* the terms of the GNU General Public License version 2 or (at your
* option) any later version; incorporated herein by reference.
*
* ----------------------------------------------------------------------- */
/*
* This is a host program to preprocess the CPU strings into a
* compact format suitable for the setup code.
*/
#include <stdio.h>
#include "../kernel/cpu/capflags.c"
int main(void)
{
int i, j;
const char *str;
printf("static const char x86_cap_strs[] =\n");
for (i = 0; i < NCAPINTS; i++) {
for (j = 0; j < 32; j++) {
str = x86_cap_flags[i*32+j];
if (i == NCAPINTS-1 && j == 31) {
/* The last entry must be unconditional; this
also consumes the compiler-added null
character */
if (!str)
str = "";
printf("\t\"\\x%02x\\x%02x\"\"%s\"\n",
i, j, str);
} else if (str) {
printf("#if REQUIRED_MASK%d & (1 << %d)\n"
"\t\"\\x%02x\\x%02x\"\"%s\\0\"\n"
"#endif\n",
i, j, i, j, str);
}
}
}
printf("\t;\n");
return 0;
}
| gpl-2.0 |
mmeslab/linux-nctusde | arch/mips/pci/pci-tx4927.c | 9048 | 2643 | /*
* Based on linux/arch/mips/txx9/rbtx4938/setup.c,
* and RBTX49xx patch from CELF patch archive.
*
* Copyright 2001, 2003-2005 MontaVista Software Inc.
* Copyright (C) 2004 by Ralf Baechle (ralf@linux-mips.org)
* (C) Copyright TOSHIBA CORPORATION 2000-2001, 2004-2007
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <asm/txx9/generic.h>
#include <asm/txx9/tx4927.h>
int __init tx4927_report_pciclk(void)
{
int pciclk = 0;
printk(KERN_INFO "PCIC --%s PCICLK:",
(__raw_readq(&tx4927_ccfgptr->ccfg) & TX4927_CCFG_PCI66) ?
" PCI66" : "");
if (__raw_readq(&tx4927_ccfgptr->pcfg) & TX4927_PCFG_PCICLKEN_ALL) {
u64 ccfg = __raw_readq(&tx4927_ccfgptr->ccfg);
switch ((unsigned long)ccfg &
TX4927_CCFG_PCIDIVMODE_MASK) {
case TX4927_CCFG_PCIDIVMODE_2_5:
pciclk = txx9_cpu_clock * 2 / 5; break;
case TX4927_CCFG_PCIDIVMODE_3:
pciclk = txx9_cpu_clock / 3; break;
case TX4927_CCFG_PCIDIVMODE_5:
pciclk = txx9_cpu_clock / 5; break;
case TX4927_CCFG_PCIDIVMODE_6:
pciclk = txx9_cpu_clock / 6; break;
}
printk("Internal(%u.%uMHz)",
(pciclk + 50000) / 1000000,
((pciclk + 50000) / 100000) % 10);
} else {
printk("External");
pciclk = -1;
}
printk("\n");
return pciclk;
}
int __init tx4927_pciclk66_setup(void)
{
int pciclk;
/* Assert M66EN */
tx4927_ccfg_set(TX4927_CCFG_PCI66);
/* Double PCICLK (if possible) */
if (__raw_readq(&tx4927_ccfgptr->pcfg) & TX4927_PCFG_PCICLKEN_ALL) {
unsigned int pcidivmode = 0;
u64 ccfg = __raw_readq(&tx4927_ccfgptr->ccfg);
pcidivmode = (unsigned long)ccfg &
TX4927_CCFG_PCIDIVMODE_MASK;
switch (pcidivmode) {
case TX4927_CCFG_PCIDIVMODE_5:
case TX4927_CCFG_PCIDIVMODE_2_5:
pcidivmode = TX4927_CCFG_PCIDIVMODE_2_5;
pciclk = txx9_cpu_clock * 2 / 5;
break;
case TX4927_CCFG_PCIDIVMODE_6:
case TX4927_CCFG_PCIDIVMODE_3:
default:
pcidivmode = TX4927_CCFG_PCIDIVMODE_3;
pciclk = txx9_cpu_clock / 3;
}
tx4927_ccfg_change(TX4927_CCFG_PCIDIVMODE_MASK,
pcidivmode);
printk(KERN_DEBUG "PCICLK: ccfg:%08lx\n",
(unsigned long)__raw_readq(&tx4927_ccfgptr->ccfg));
} else
pciclk = -1;
return pciclk;
}
void __init tx4927_setup_pcierr_irq(void)
{
if (request_irq(TXX9_IRQ_BASE + TX4927_IR_PCIERR,
tx4927_pcierr_interrupt,
0, "PCI error",
(void *)TX4927_PCIC_REG))
printk(KERN_WARNING "Failed to request irq for PCIERR\n");
}
| gpl-2.0 |
NoelMacwan/Kernel-10.4.1.B.0.101 | arch/powerpc/platforms/powermac/udbg_adb.c | 12120 | 5131 | #include <linux/string.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/bitops.h>
#include <linux/ptrace.h>
#include <linux/adb.h>
#include <linux/pmu.h>
#include <linux/cuda.h>
#include <asm/machdep.h>
#include <asm/io.h>
#include <asm/page.h>
#include <asm/xmon.h>
#include <asm/prom.h>
#include <asm/bootx.h>
#include <asm/errno.h>
#include <asm/pmac_feature.h>
#include <asm/processor.h>
#include <asm/delay.h>
#include <asm/btext.h>
#include <asm/time.h>
#include <asm/udbg.h>
/*
* This implementation is "special", it can "patch" the current
* udbg implementation and work on top of it. It must thus be
* initialized last
*/
static void (*udbg_adb_old_putc)(char c);
static int (*udbg_adb_old_getc)(void);
static int (*udbg_adb_old_getc_poll)(void);
static enum {
input_adb_none,
input_adb_pmu,
input_adb_cuda,
} input_type = input_adb_none;
int xmon_wants_key, xmon_adb_keycode;
static inline void udbg_adb_poll(void)
{
#ifdef CONFIG_ADB_PMU
if (input_type == input_adb_pmu)
pmu_poll_adb();
#endif /* CONFIG_ADB_PMU */
#ifdef CONFIG_ADB_CUDA
if (input_type == input_adb_cuda)
cuda_poll();
#endif /* CONFIG_ADB_CUDA */
}
#ifdef CONFIG_BOOTX_TEXT
static int udbg_adb_use_btext;
static int xmon_adb_shiftstate;
static unsigned char xmon_keytab[128] =
"asdfhgzxcv\000bqwer" /* 0x00 - 0x0f */
"yt123465=97-80]o" /* 0x10 - 0x1f */
"u[ip\rlj'k;\\,/nm." /* 0x20 - 0x2f */
"\t `\177\0\033\0\0\0\0\0\0\0\0\0\0" /* 0x30 - 0x3f */
"\0.\0*\0+\0\0\0\0\0/\r\0-\0" /* 0x40 - 0x4f */
"\0\0000123456789\0\0\0"; /* 0x50 - 0x5f */
static unsigned char xmon_shift_keytab[128] =
"ASDFHGZXCV\000BQWER" /* 0x00 - 0x0f */
"YT!@#$^%+(&_*)}O" /* 0x10 - 0x1f */
"U{IP\rLJ\"K:|<?NM>" /* 0x20 - 0x2f */
"\t ~\177\0\033\0\0\0\0\0\0\0\0\0\0" /* 0x30 - 0x3f */
"\0.\0*\0+\0\0\0\0\0/\r\0-\0" /* 0x40 - 0x4f */
"\0\0000123456789\0\0\0"; /* 0x50 - 0x5f */
static int udbg_adb_local_getc(void)
{
int k, t, on;
xmon_wants_key = 1;
for (;;) {
xmon_adb_keycode = -1;
t = 0;
on = 0;
k = -1;
do {
if (--t < 0) {
on = 1 - on;
btext_drawchar(on? 0xdb: 0x20);
btext_drawchar('\b');
t = 200000;
}
udbg_adb_poll();
if (udbg_adb_old_getc_poll)
k = udbg_adb_old_getc_poll();
} while (k == -1 && xmon_adb_keycode == -1);
if (on)
btext_drawstring(" \b");
if (k != -1)
return k;
k = xmon_adb_keycode;
/* test for shift keys */
if ((k & 0x7f) == 0x38 || (k & 0x7f) == 0x7b) {
xmon_adb_shiftstate = (k & 0x80) == 0;
continue;
}
if (k >= 0x80)
continue; /* ignore up transitions */
k = (xmon_adb_shiftstate? xmon_shift_keytab: xmon_keytab)[k];
if (k != 0)
break;
}
xmon_wants_key = 0;
return k;
}
#endif /* CONFIG_BOOTX_TEXT */
static int udbg_adb_getc(void)
{
#ifdef CONFIG_BOOTX_TEXT
if (udbg_adb_use_btext && input_type != input_adb_none)
return udbg_adb_local_getc();
#endif
if (udbg_adb_old_getc)
return udbg_adb_old_getc();
return -1;
}
/* getc_poll() is not really used, unless you have the xmon-over modem
* hack that doesn't quite concern us here, thus we just poll the low level
* ADB driver to prevent it from timing out and call back the original poll
* routine.
*/
static int udbg_adb_getc_poll(void)
{
udbg_adb_poll();
if (udbg_adb_old_getc_poll)
return udbg_adb_old_getc_poll();
return -1;
}
static void udbg_adb_putc(char c)
{
#ifdef CONFIG_BOOTX_TEXT
if (udbg_adb_use_btext)
btext_drawchar(c);
#endif
if (udbg_adb_old_putc)
return udbg_adb_old_putc(c);
}
void __init udbg_adb_init_early(void)
{
#ifdef CONFIG_BOOTX_TEXT
if (btext_find_display(1) == 0) {
udbg_adb_use_btext = 1;
udbg_putc = udbg_adb_putc;
}
#endif
}
int __init udbg_adb_init(int force_btext)
{
struct device_node *np;
/* Capture existing callbacks */
udbg_adb_old_putc = udbg_putc;
udbg_adb_old_getc = udbg_getc;
udbg_adb_old_getc_poll = udbg_getc_poll;
/* Check if our early init was already called */
if (udbg_adb_old_putc == udbg_adb_putc)
udbg_adb_old_putc = NULL;
#ifdef CONFIG_BOOTX_TEXT
if (udbg_adb_old_putc == btext_drawchar)
udbg_adb_old_putc = NULL;
#endif
/* Set ours as output */
udbg_putc = udbg_adb_putc;
udbg_getc = udbg_adb_getc;
udbg_getc_poll = udbg_adb_getc_poll;
#ifdef CONFIG_BOOTX_TEXT
/* Check if we should use btext output */
if (btext_find_display(force_btext) == 0)
udbg_adb_use_btext = 1;
#endif
/* See if there is a keyboard in the device tree with a parent
* of type "adb". If not, we return a failure, but we keep the
* bext output set for now
*/
for (np = NULL; (np = of_find_node_by_name(np, "keyboard")) != NULL;) {
struct device_node *parent = of_get_parent(np);
int found = (parent && strcmp(parent->type, "adb") == 0);
of_node_put(parent);
if (found)
break;
}
if (np == NULL)
return -ENODEV;
of_node_put(np);
#ifdef CONFIG_ADB_PMU
if (find_via_pmu())
input_type = input_adb_pmu;
#endif
#ifdef CONFIG_ADB_CUDA
if (find_via_cuda())
input_type = input_adb_cuda;
#endif
/* Same as above: nothing found, keep btext set for output */
if (input_type == input_adb_none)
return -ENODEV;
return 0;
}
| gpl-2.0 |
liuzhengcn/hc-adm-2.6.36 | drivers/scsi/bfa/bfa_ioc.c | 89 | 45394 | /*
* Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
* All rights reserved
* www.brocade.com
*
* Linux driver for Brocade Fibre Channel Host Bus Adapter.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License (GPL) Version 2 as
* published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#include <bfa.h>
#include <bfa_ioc.h>
#include <bfa_fwimg_priv.h>
#include <cna/bfa_cna_trcmod.h>
#include <cs/bfa_debug.h>
#include <bfi/bfi_ioc.h>
#include <bfi/bfi_ctreg.h>
#include <aen/bfa_aen_ioc.h>
#include <aen/bfa_aen.h>
#include <log/bfa_log_hal.h>
#include <defs/bfa_defs_pci.h>
BFA_TRC_FILE(CNA, IOC);
/**
* IOC local definitions
*/
#define BFA_IOC_TOV 2000 /* msecs */
#define BFA_IOC_HWSEM_TOV 500 /* msecs */
#define BFA_IOC_HB_TOV 500 /* msecs */
#define BFA_IOC_HWINIT_MAX 2
#define BFA_IOC_FWIMG_MINSZ (16 * 1024)
#define BFA_IOC_TOV_RECOVER BFA_IOC_HB_TOV
#define bfa_ioc_timer_start(__ioc) \
bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
bfa_ioc_timeout, (__ioc), BFA_IOC_TOV)
#define bfa_ioc_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->ioc_timer)
#define BFA_DBG_FWTRC_ENTS (BFI_IOC_TRC_ENTS)
#define BFA_DBG_FWTRC_LEN \
(BFA_DBG_FWTRC_ENTS * sizeof(struct bfa_trc_s) + \
(sizeof(struct bfa_trc_mod_s) - \
BFA_TRC_MAX * sizeof(struct bfa_trc_s)))
#define BFA_DBG_FWTRC_OFF(_fn) (BFI_IOC_TRC_OFF + BFA_DBG_FWTRC_LEN * (_fn))
/**
* Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details.
*/
#define bfa_ioc_firmware_lock(__ioc) \
((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
#define bfa_ioc_firmware_unlock(__ioc) \
((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
#define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
#define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
#define bfa_ioc_notify_hbfail(__ioc) \
((__ioc)->ioc_hwif->ioc_notify_hbfail(__ioc))
#define bfa_ioc_is_optrom(__ioc) \
(bfi_image_get_size(BFA_IOC_FWIMG_TYPE(__ioc)) < BFA_IOC_FWIMG_MINSZ)
bfa_boolean_t bfa_auto_recover = BFA_TRUE;
/*
* forward declarations
*/
static void bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc);
static void bfa_ioc_hw_sem_get_cancel(struct bfa_ioc_s *ioc);
static void bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force);
static void bfa_ioc_timeout(void *ioc);
static void bfa_ioc_send_enable(struct bfa_ioc_s *ioc);
static void bfa_ioc_send_disable(struct bfa_ioc_s *ioc);
static void bfa_ioc_send_getattr(struct bfa_ioc_s *ioc);
static void bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc);
static void bfa_ioc_hb_stop(struct bfa_ioc_s *ioc);
static void bfa_ioc_reset(struct bfa_ioc_s *ioc, bfa_boolean_t force);
static void bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc);
static void bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc);
static void bfa_ioc_recover(struct bfa_ioc_s *ioc);
static void bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc);
static void bfa_ioc_disable_comp(struct bfa_ioc_s *ioc);
static void bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc);
/**
* bfa_ioc_sm
*/
/**
* IOC state machine events
*/
enum ioc_event {
IOC_E_ENABLE = 1, /* IOC enable request */
IOC_E_DISABLE = 2, /* IOC disable request */
IOC_E_TIMEOUT = 3, /* f/w response timeout */
IOC_E_FWREADY = 4, /* f/w initialization done */
IOC_E_FWRSP_GETATTR = 5, /* IOC get attribute response */
IOC_E_FWRSP_ENABLE = 6, /* enable f/w response */
IOC_E_FWRSP_DISABLE = 7, /* disable f/w response */
IOC_E_HBFAIL = 8, /* heartbeat failure */
IOC_E_HWERROR = 9, /* hardware error interrupt */
IOC_E_SEMLOCKED = 10, /* h/w semaphore is locked */
IOC_E_DETACH = 11, /* driver detach cleanup */
};
bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc_s, enum ioc_event);
bfa_fsm_state_decl(bfa_ioc, fwcheck, struct bfa_ioc_s, enum ioc_event);
bfa_fsm_state_decl(bfa_ioc, mismatch, struct bfa_ioc_s, enum ioc_event);
bfa_fsm_state_decl(bfa_ioc, semwait, struct bfa_ioc_s, enum ioc_event);
bfa_fsm_state_decl(bfa_ioc, hwinit, struct bfa_ioc_s, enum ioc_event);
bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc_s, enum ioc_event);
bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc_s, enum ioc_event);
bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc_s, enum ioc_event);
bfa_fsm_state_decl(bfa_ioc, initfail, struct bfa_ioc_s, enum ioc_event);
bfa_fsm_state_decl(bfa_ioc, hbfail, struct bfa_ioc_s, enum ioc_event);
bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc_s, enum ioc_event);
bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc_s, enum ioc_event);
static struct bfa_sm_table_s ioc_sm_table[] = {
{BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET},
{BFA_SM(bfa_ioc_sm_fwcheck), BFA_IOC_FWMISMATCH},
{BFA_SM(bfa_ioc_sm_mismatch), BFA_IOC_FWMISMATCH},
{BFA_SM(bfa_ioc_sm_semwait), BFA_IOC_SEMWAIT},
{BFA_SM(bfa_ioc_sm_hwinit), BFA_IOC_HWINIT},
{BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_HWINIT},
{BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR},
{BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL},
{BFA_SM(bfa_ioc_sm_initfail), BFA_IOC_INITFAIL},
{BFA_SM(bfa_ioc_sm_hbfail), BFA_IOC_HBFAIL},
{BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
{BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
};
/**
* Reset entry actions -- initialize state machine
*/
static void
bfa_ioc_sm_reset_entry(struct bfa_ioc_s *ioc)
{
ioc->retry_count = 0;
ioc->auto_recover = bfa_auto_recover;
}
/**
* Beginning state. IOC is in reset state.
*/
static void
bfa_ioc_sm_reset(struct bfa_ioc_s *ioc, enum ioc_event event)
{
bfa_trc(ioc, event);
switch (event) {
case IOC_E_ENABLE:
bfa_fsm_set_state(ioc, bfa_ioc_sm_fwcheck);
break;
case IOC_E_DISABLE:
bfa_ioc_disable_comp(ioc);
break;
case IOC_E_DETACH:
break;
default:
bfa_sm_fault(ioc, event);
}
}
/**
* Semaphore should be acquired for version check.
*/
static void
bfa_ioc_sm_fwcheck_entry(struct bfa_ioc_s *ioc)
{
bfa_ioc_hw_sem_get(ioc);
}
/**
* Awaiting h/w semaphore to continue with version check.
*/
static void
bfa_ioc_sm_fwcheck(struct bfa_ioc_s *ioc, enum ioc_event event)
{
bfa_trc(ioc, event);
switch (event) {
case IOC_E_SEMLOCKED:
if (bfa_ioc_firmware_lock(ioc)) {
ioc->retry_count = 0;
bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit);
} else {
bfa_ioc_hw_sem_release(ioc);
bfa_fsm_set_state(ioc, bfa_ioc_sm_mismatch);
}
break;
case IOC_E_DISABLE:
bfa_ioc_disable_comp(ioc);
/*
* fall through
*/
case IOC_E_DETACH:
bfa_ioc_hw_sem_get_cancel(ioc);
bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
break;
case IOC_E_FWREADY:
break;
default:
bfa_sm_fault(ioc, event);
}
}
/**
* Notify enable completion callback and generate mismatch AEN.
*/
static void
bfa_ioc_sm_mismatch_entry(struct bfa_ioc_s *ioc)
{
/**
* Provide enable completion callback and AEN notification only once.
*/
if (ioc->retry_count == 0) {
ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
bfa_ioc_aen_post(ioc, BFA_IOC_AEN_FWMISMATCH);
}
ioc->retry_count++;
bfa_ioc_timer_start(ioc);
}
/**
* Awaiting firmware version match.
*/
static void
bfa_ioc_sm_mismatch(struct bfa_ioc_s *ioc, enum ioc_event event)
{
bfa_trc(ioc, event);
switch (event) {
case IOC_E_TIMEOUT:
bfa_fsm_set_state(ioc, bfa_ioc_sm_fwcheck);
break;
case IOC_E_DISABLE:
bfa_ioc_disable_comp(ioc);
/*
* fall through
*/
case IOC_E_DETACH:
bfa_ioc_timer_stop(ioc);
bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
break;
case IOC_E_FWREADY:
break;
default:
bfa_sm_fault(ioc, event);
}
}
/**
* Request for semaphore.
*/
static void
bfa_ioc_sm_semwait_entry(struct bfa_ioc_s *ioc)
{
bfa_ioc_hw_sem_get(ioc);
}
/**
* Awaiting semaphore for h/w initialzation.
*/
static void
bfa_ioc_sm_semwait(struct bfa_ioc_s *ioc, enum ioc_event event)
{
bfa_trc(ioc, event);
switch (event) {
case IOC_E_SEMLOCKED:
ioc->retry_count = 0;
bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit);
break;
case IOC_E_DISABLE:
bfa_ioc_hw_sem_get_cancel(ioc);
bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
break;
default:
bfa_sm_fault(ioc, event);
}
}
static void
bfa_ioc_sm_hwinit_entry(struct bfa_ioc_s *ioc)
{
bfa_ioc_timer_start(ioc);
bfa_ioc_reset(ioc, BFA_FALSE);
}
/**
* Hardware is being initialized. Interrupts are enabled.
* Holding hardware semaphore lock.
*/
static void
bfa_ioc_sm_hwinit(struct bfa_ioc_s *ioc, enum ioc_event event)
{
bfa_trc(ioc, event);
switch (event) {
case IOC_E_FWREADY:
bfa_ioc_timer_stop(ioc);
bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
break;
case IOC_E_HWERROR:
bfa_ioc_timer_stop(ioc);
/*
* fall through
*/
case IOC_E_TIMEOUT:
ioc->retry_count++;
if (ioc->retry_count < BFA_IOC_HWINIT_MAX) {
bfa_ioc_timer_start(ioc);
bfa_ioc_reset(ioc, BFA_TRUE);
break;
}
bfa_ioc_hw_sem_release(ioc);
bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
break;
case IOC_E_DISABLE:
bfa_ioc_hw_sem_release(ioc);
bfa_ioc_timer_stop(ioc);
bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
break;
default:
bfa_sm_fault(ioc, event);
}
}
static void
bfa_ioc_sm_enabling_entry(struct bfa_ioc_s *ioc)
{
bfa_ioc_timer_start(ioc);
bfa_ioc_send_enable(ioc);
}
/**
* Host IOC function is being enabled, awaiting response from firmware.
* Semaphore is acquired.
*/
static void
bfa_ioc_sm_enabling(struct bfa_ioc_s *ioc, enum ioc_event event)
{
bfa_trc(ioc, event);
switch (event) {
case IOC_E_FWRSP_ENABLE:
bfa_ioc_timer_stop(ioc);
bfa_ioc_hw_sem_release(ioc);
bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
break;
case IOC_E_HWERROR:
bfa_ioc_timer_stop(ioc);
/*
* fall through
*/
case IOC_E_TIMEOUT:
ioc->retry_count++;
if (ioc->retry_count < BFA_IOC_HWINIT_MAX) {
bfa_reg_write(ioc->ioc_regs.ioc_fwstate,
BFI_IOC_UNINIT);
bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit);
break;
}
bfa_ioc_hw_sem_release(ioc);
bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
break;
case IOC_E_DISABLE:
bfa_ioc_timer_stop(ioc);
bfa_ioc_hw_sem_release(ioc);
bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
break;
case IOC_E_FWREADY:
bfa_ioc_send_enable(ioc);
break;
default:
bfa_sm_fault(ioc, event);
}
}
static void
bfa_ioc_sm_getattr_entry(struct bfa_ioc_s *ioc)
{
bfa_ioc_timer_start(ioc);
bfa_ioc_send_getattr(ioc);
}
/**
* IOC configuration in progress. Timer is active.
*/
static void
bfa_ioc_sm_getattr(struct bfa_ioc_s *ioc, enum ioc_event event)
{
bfa_trc(ioc, event);
switch (event) {
case IOC_E_FWRSP_GETATTR:
bfa_ioc_timer_stop(ioc);
bfa_ioc_check_attr_wwns(ioc);
bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
break;
case IOC_E_HWERROR:
bfa_ioc_timer_stop(ioc);
/*
* fall through
*/
case IOC_E_TIMEOUT:
bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
break;
case IOC_E_DISABLE:
bfa_ioc_timer_stop(ioc);
bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
break;
default:
bfa_sm_fault(ioc, event);
}
}
static void
bfa_ioc_sm_op_entry(struct bfa_ioc_s *ioc)
{
ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
bfa_ioc_hb_monitor(ioc);
bfa_ioc_aen_post(ioc, BFA_IOC_AEN_ENABLE);
}
static void
bfa_ioc_sm_op(struct bfa_ioc_s *ioc, enum ioc_event event)
{
bfa_trc(ioc, event);
switch (event) {
case IOC_E_ENABLE:
break;
case IOC_E_DISABLE:
bfa_ioc_hb_stop(ioc);
bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
break;
case IOC_E_HWERROR:
case IOC_E_FWREADY:
/**
* Hard error or IOC recovery by other function.
* Treat it same as heartbeat failure.
*/
bfa_ioc_hb_stop(ioc);
/*
* !!! fall through !!!
*/
case IOC_E_HBFAIL:
bfa_fsm_set_state(ioc, bfa_ioc_sm_hbfail);
break;
default:
bfa_sm_fault(ioc, event);
}
}
static void
bfa_ioc_sm_disabling_entry(struct bfa_ioc_s *ioc)
{
bfa_ioc_aen_post(ioc, BFA_IOC_AEN_DISABLE);
bfa_ioc_timer_start(ioc);
bfa_ioc_send_disable(ioc);
}
/**
* IOC is being disabled
*/
static void
bfa_ioc_sm_disabling(struct bfa_ioc_s *ioc, enum ioc_event event)
{
bfa_trc(ioc, event);
switch (event) {
case IOC_E_FWRSP_DISABLE:
bfa_ioc_timer_stop(ioc);
bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
break;
case IOC_E_HWERROR:
bfa_ioc_timer_stop(ioc);
/*
* !!! fall through !!!
*/
case IOC_E_TIMEOUT:
bfa_reg_write(ioc->ioc_regs.ioc_fwstate, BFI_IOC_FAIL);
bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
break;
default:
bfa_sm_fault(ioc, event);
}
}
/**
* IOC disable completion entry.
*/
static void
bfa_ioc_sm_disabled_entry(struct bfa_ioc_s *ioc)
{
bfa_ioc_disable_comp(ioc);
}
static void
bfa_ioc_sm_disabled(struct bfa_ioc_s *ioc, enum ioc_event event)
{
bfa_trc(ioc, event);
switch (event) {
case IOC_E_ENABLE:
bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait);
break;
case IOC_E_DISABLE:
ioc->cbfn->disable_cbfn(ioc->bfa);
break;
case IOC_E_FWREADY:
break;
case IOC_E_DETACH:
bfa_ioc_firmware_unlock(ioc);
bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
break;
default:
bfa_sm_fault(ioc, event);
}
}
static void
bfa_ioc_sm_initfail_entry(struct bfa_ioc_s *ioc)
{
ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
bfa_ioc_timer_start(ioc);
}
/**
* Hardware initialization failed.
*/
static void
bfa_ioc_sm_initfail(struct bfa_ioc_s *ioc, enum ioc_event event)
{
bfa_trc(ioc, event);
switch (event) {
case IOC_E_DISABLE:
bfa_ioc_timer_stop(ioc);
bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
break;
case IOC_E_DETACH:
bfa_ioc_timer_stop(ioc);
bfa_ioc_firmware_unlock(ioc);
bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
break;
case IOC_E_TIMEOUT:
bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait);
break;
default:
bfa_sm_fault(ioc, event);
}
}
static void
bfa_ioc_sm_hbfail_entry(struct bfa_ioc_s *ioc)
{
struct list_head *qe;
struct bfa_ioc_hbfail_notify_s *notify;
/**
* Mark IOC as failed in hardware and stop firmware.
*/
bfa_ioc_lpu_stop(ioc);
bfa_reg_write(ioc->ioc_regs.ioc_fwstate, BFI_IOC_FAIL);
/**
* Notify other functions on HB failure.
*/
bfa_ioc_notify_hbfail(ioc);
/**
* Notify driver and common modules registered for notification.
*/
ioc->cbfn->hbfail_cbfn(ioc->bfa);
list_for_each(qe, &ioc->hb_notify_q) {
notify = (struct bfa_ioc_hbfail_notify_s *)qe;
notify->cbfn(notify->cbarg);
}
/**
* Flush any queued up mailbox requests.
*/
bfa_ioc_mbox_hbfail(ioc);
bfa_ioc_aen_post(ioc, BFA_IOC_AEN_HBFAIL);
/**
* Trigger auto-recovery after a delay.
*/
if (ioc->auto_recover) {
bfa_timer_begin(ioc->timer_mod, &ioc->ioc_timer,
bfa_ioc_timeout, ioc, BFA_IOC_TOV_RECOVER);
}
}
/**
* IOC heartbeat failure.
*/
static void
bfa_ioc_sm_hbfail(struct bfa_ioc_s *ioc, enum ioc_event event)
{
bfa_trc(ioc, event);
switch (event) {
case IOC_E_ENABLE:
ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
break;
case IOC_E_DISABLE:
if (ioc->auto_recover)
bfa_ioc_timer_stop(ioc);
bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
break;
case IOC_E_TIMEOUT:
bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait);
break;
case IOC_E_FWREADY:
/**
* Recovery is already initiated by other function.
*/
break;
case IOC_E_HWERROR:
/*
* HB failure notification, ignore.
*/
break;
default:
bfa_sm_fault(ioc, event);
}
}
/**
* bfa_ioc_pvt BFA IOC private functions
*/
static void
bfa_ioc_disable_comp(struct bfa_ioc_s *ioc)
{
struct list_head *qe;
struct bfa_ioc_hbfail_notify_s *notify;
ioc->cbfn->disable_cbfn(ioc->bfa);
/**
* Notify common modules registered for notification.
*/
list_for_each(qe, &ioc->hb_notify_q) {
notify = (struct bfa_ioc_hbfail_notify_s *)qe;
notify->cbfn(notify->cbarg);
}
}
void
bfa_ioc_sem_timeout(void *ioc_arg)
{
struct bfa_ioc_s *ioc = (struct bfa_ioc_s *)ioc_arg;
bfa_ioc_hw_sem_get(ioc);
}
bfa_boolean_t
bfa_ioc_sem_get(bfa_os_addr_t sem_reg)
{
u32 r32;
int cnt = 0;
#define BFA_SEM_SPINCNT 3000
r32 = bfa_reg_read(sem_reg);
while (r32 && (cnt < BFA_SEM_SPINCNT)) {
cnt++;
bfa_os_udelay(2);
r32 = bfa_reg_read(sem_reg);
}
if (r32 == 0)
return BFA_TRUE;
bfa_assert(cnt < BFA_SEM_SPINCNT);
return BFA_FALSE;
}
void
bfa_ioc_sem_release(bfa_os_addr_t sem_reg)
{
bfa_reg_write(sem_reg, 1);
}
static void
bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc)
{
u32 r32;
/**
* First read to the semaphore register will return 0, subsequent reads
* will return 1. Semaphore is released by writing 1 to the register
*/
r32 = bfa_reg_read(ioc->ioc_regs.ioc_sem_reg);
if (r32 == 0) {
bfa_fsm_send_event(ioc, IOC_E_SEMLOCKED);
return;
}
bfa_timer_begin(ioc->timer_mod, &ioc->sem_timer, bfa_ioc_sem_timeout,
ioc, BFA_IOC_HWSEM_TOV);
}
void
bfa_ioc_hw_sem_release(struct bfa_ioc_s *ioc)
{
bfa_reg_write(ioc->ioc_regs.ioc_sem_reg, 1);
}
static void
bfa_ioc_hw_sem_get_cancel(struct bfa_ioc_s *ioc)
{
bfa_timer_stop(&ioc->sem_timer);
}
/**
* Initialize LPU local memory (aka secondary memory / SRAM)
*/
static void
bfa_ioc_lmem_init(struct bfa_ioc_s *ioc)
{
u32 pss_ctl;
int i;
#define PSS_LMEM_INIT_TIME 10000
pss_ctl = bfa_reg_read(ioc->ioc_regs.pss_ctl_reg);
pss_ctl &= ~__PSS_LMEM_RESET;
pss_ctl |= __PSS_LMEM_INIT_EN;
pss_ctl |= __PSS_I2C_CLK_DIV(3UL); /* i2c workaround 12.5khz clock */
bfa_reg_write(ioc->ioc_regs.pss_ctl_reg, pss_ctl);
/**
* wait for memory initialization to be complete
*/
i = 0;
do {
pss_ctl = bfa_reg_read(ioc->ioc_regs.pss_ctl_reg);
i++;
} while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME));
/**
* If memory initialization is not successful, IOC timeout will catch
* such failures.
*/
bfa_assert(pss_ctl & __PSS_LMEM_INIT_DONE);
bfa_trc(ioc, pss_ctl);
pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN);
bfa_reg_write(ioc->ioc_regs.pss_ctl_reg, pss_ctl);
}
static void
bfa_ioc_lpu_start(struct bfa_ioc_s *ioc)
{
u32 pss_ctl;
/**
* Take processor out of reset.
*/
pss_ctl = bfa_reg_read(ioc->ioc_regs.pss_ctl_reg);
pss_ctl &= ~__PSS_LPU0_RESET;
bfa_reg_write(ioc->ioc_regs.pss_ctl_reg, pss_ctl);
}
static void
bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc)
{
u32 pss_ctl;
/**
* Put processors in reset.
*/
pss_ctl = bfa_reg_read(ioc->ioc_regs.pss_ctl_reg);
pss_ctl |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET);
bfa_reg_write(ioc->ioc_regs.pss_ctl_reg, pss_ctl);
}
/**
* Get driver and firmware versions.
*/
void
bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
{
u32 pgnum, pgoff;
u32 loff = 0;
int i;
u32 *fwsig = (u32 *) fwhdr;
pgnum = bfa_ioc_smem_pgnum(ioc, loff);
pgoff = bfa_ioc_smem_pgoff(ioc, loff);
bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum);
for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32));
i++) {
fwsig[i] = bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
loff += sizeof(u32);
}
}
/**
* Returns TRUE if same.
*/
bfa_boolean_t
bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
{
struct bfi_ioc_image_hdr_s *drv_fwhdr;
int i;
drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
bfi_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0);
for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) {
if (fwhdr->md5sum[i] != drv_fwhdr->md5sum[i]) {
bfa_trc(ioc, i);
bfa_trc(ioc, fwhdr->md5sum[i]);
bfa_trc(ioc, drv_fwhdr->md5sum[i]);
return BFA_FALSE;
}
}
bfa_trc(ioc, fwhdr->md5sum[0]);
return BFA_TRUE;
}
/**
* Return true if current running version is valid. Firmware signature and
* execution context (driver/bios) must match.
*/
static bfa_boolean_t
bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc)
{
struct bfi_ioc_image_hdr_s fwhdr, *drv_fwhdr;
/**
* If bios/efi boot (flash based) -- return true
*/
if (bfa_ioc_is_optrom(ioc))
return BFA_TRUE;
bfa_ioc_fwver_get(ioc, &fwhdr);
drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
bfi_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0);
if (fwhdr.signature != drv_fwhdr->signature) {
bfa_trc(ioc, fwhdr.signature);
bfa_trc(ioc, drv_fwhdr->signature);
return BFA_FALSE;
}
if (fwhdr.exec != drv_fwhdr->exec) {
bfa_trc(ioc, fwhdr.exec);
bfa_trc(ioc, drv_fwhdr->exec);
return BFA_FALSE;
}
return bfa_ioc_fwver_cmp(ioc, &fwhdr);
}
/**
* Conditionally flush any pending message from firmware at start.
*/
static void
bfa_ioc_msgflush(struct bfa_ioc_s *ioc)
{
u32 r32;
r32 = bfa_reg_read(ioc->ioc_regs.lpu_mbox_cmd);
if (r32)
bfa_reg_write(ioc->ioc_regs.lpu_mbox_cmd, 1);
}
static void
bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
{
enum bfi_ioc_state ioc_fwstate;
bfa_boolean_t fwvalid;
ioc_fwstate = bfa_reg_read(ioc->ioc_regs.ioc_fwstate);
if (force)
ioc_fwstate = BFI_IOC_UNINIT;
bfa_trc(ioc, ioc_fwstate);
/**
* check if firmware is valid
*/
fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
BFA_FALSE : bfa_ioc_fwver_valid(ioc);
if (!fwvalid) {
bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, ioc->pcidev.device_id);
return;
}
/**
* If hardware initialization is in progress (initialized by other IOC),
* just wait for an initialization completion interrupt.
*/
if (ioc_fwstate == BFI_IOC_INITING) {
bfa_trc(ioc, ioc_fwstate);
ioc->cbfn->reset_cbfn(ioc->bfa);
return;
}
/**
* If IOC function is disabled and firmware version is same,
* just re-enable IOC.
*
* If option rom, IOC must not be in operational state. With
* convergence, IOC will be in operational state when 2nd driver
* is loaded.
*/
if (ioc_fwstate == BFI_IOC_DISABLED ||
(!bfa_ioc_is_optrom(ioc) && ioc_fwstate == BFI_IOC_OP)) {
bfa_trc(ioc, ioc_fwstate);
/**
* When using MSI-X any pending firmware ready event should
* be flushed. Otherwise MSI-X interrupts are not delivered.
*/
bfa_ioc_msgflush(ioc);
ioc->cbfn->reset_cbfn(ioc->bfa);
bfa_fsm_send_event(ioc, IOC_E_FWREADY);
return;
}
/**
* Initialize the h/w for any other states.
*/
bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, ioc->pcidev.device_id);
}
static void
bfa_ioc_timeout(void *ioc_arg)
{
struct bfa_ioc_s *ioc = (struct bfa_ioc_s *)ioc_arg;
bfa_trc(ioc, 0);
bfa_fsm_send_event(ioc, IOC_E_TIMEOUT);
}
void
bfa_ioc_mbox_send(struct bfa_ioc_s *ioc, void *ioc_msg, int len)
{
u32 *msgp = (u32 *) ioc_msg;
u32 i;
bfa_trc(ioc, msgp[0]);
bfa_trc(ioc, len);
bfa_assert(len <= BFI_IOC_MSGLEN_MAX);
/*
* first write msg to mailbox registers
*/
for (i = 0; i < len / sizeof(u32); i++)
bfa_reg_write(ioc->ioc_regs.hfn_mbox + i * sizeof(u32),
bfa_os_wtole(msgp[i]));
for (; i < BFI_IOC_MSGLEN_MAX / sizeof(u32); i++)
bfa_reg_write(ioc->ioc_regs.hfn_mbox + i * sizeof(u32), 0);
/*
* write 1 to mailbox CMD to trigger LPU event
*/
bfa_reg_write(ioc->ioc_regs.hfn_mbox_cmd, 1);
(void)bfa_reg_read(ioc->ioc_regs.hfn_mbox_cmd);
}
static void
bfa_ioc_send_enable(struct bfa_ioc_s *ioc)
{
struct bfi_ioc_ctrl_req_s enable_req;
bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
bfa_ioc_portid(ioc));
enable_req.ioc_class = ioc->ioc_mc;
bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req_s));
}
static void
bfa_ioc_send_disable(struct bfa_ioc_s *ioc)
{
struct bfi_ioc_ctrl_req_s disable_req;
bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ,
bfa_ioc_portid(ioc));
bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req_s));
}
static void
bfa_ioc_send_getattr(struct bfa_ioc_s *ioc)
{
struct bfi_ioc_getattr_req_s attr_req;
bfi_h2i_set(attr_req.mh, BFI_MC_IOC, BFI_IOC_H2I_GETATTR_REQ,
bfa_ioc_portid(ioc));
bfa_dma_be_addr_set(attr_req.attr_addr, ioc->attr_dma.pa);
bfa_ioc_mbox_send(ioc, &attr_req, sizeof(attr_req));
}
static void
bfa_ioc_hb_check(void *cbarg)
{
struct bfa_ioc_s *ioc = cbarg;
u32 hb_count;
hb_count = bfa_reg_read(ioc->ioc_regs.heartbeat);
if (ioc->hb_count == hb_count) {
bfa_log(ioc->logm, BFA_LOG_HAL_HEARTBEAT_FAILURE,
hb_count);
bfa_ioc_recover(ioc);
return;
} else {
ioc->hb_count = hb_count;
}
bfa_ioc_mbox_poll(ioc);
bfa_timer_begin(ioc->timer_mod, &ioc->ioc_timer, bfa_ioc_hb_check,
ioc, BFA_IOC_HB_TOV);
}
static void
bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc)
{
ioc->hb_count = bfa_reg_read(ioc->ioc_regs.heartbeat);
bfa_timer_begin(ioc->timer_mod, &ioc->ioc_timer, bfa_ioc_hb_check, ioc,
BFA_IOC_HB_TOV);
}
static void
bfa_ioc_hb_stop(struct bfa_ioc_s *ioc)
{
bfa_timer_stop(&ioc->ioc_timer);
}
/**
* Initiate a full firmware download.
*/
static void
bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
u32 boot_param)
{
u32 *fwimg;
u32 pgnum, pgoff;
u32 loff = 0;
u32 chunkno = 0;
u32 i;
/**
* Initialize LMEM first before code download
*/
bfa_ioc_lmem_init(ioc);
/**
* Flash based firmware boot
*/
bfa_trc(ioc, bfi_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)));
if (bfa_ioc_is_optrom(ioc))
boot_type = BFI_BOOT_TYPE_FLASH;
fwimg = bfi_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), chunkno);
pgnum = bfa_ioc_smem_pgnum(ioc, loff);
pgoff = bfa_ioc_smem_pgoff(ioc, loff);
bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum);
for (i = 0; i < bfi_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)); i++) {
if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) {
chunkno = BFA_IOC_FLASH_CHUNK_NO(i);
fwimg = bfi_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc),
BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
}
/**
* write smem
*/
bfa_mem_write(ioc->ioc_regs.smem_page_start, loff,
fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)]);
loff += sizeof(u32);
/**
* handle page offset wrap around
*/
loff = PSS_SMEM_PGOFF(loff);
if (loff == 0) {
pgnum++;
bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum);
}
}
bfa_reg_write(ioc->ioc_regs.host_page_num_fn,
bfa_ioc_smem_pgnum(ioc, 0));
/*
* Set boot type and boot param at the end.
*/
bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_BOOT_TYPE_OFF,
bfa_os_swap32(boot_type));
bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_BOOT_PARAM_OFF,
bfa_os_swap32(boot_param));
}
static void
bfa_ioc_reset(struct bfa_ioc_s *ioc, bfa_boolean_t force)
{
bfa_ioc_hwinit(ioc, force);
}
/**
* Update BFA configuration from firmware configuration.
*/
static void
bfa_ioc_getattr_reply(struct bfa_ioc_s *ioc)
{
struct bfi_ioc_attr_s *attr = ioc->attr;
attr->adapter_prop = bfa_os_ntohl(attr->adapter_prop);
attr->card_type = bfa_os_ntohl(attr->card_type);
attr->maxfrsize = bfa_os_ntohs(attr->maxfrsize);
bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
}
/**
* Attach time initialization of mbox logic.
*/
static void
bfa_ioc_mbox_attach(struct bfa_ioc_s *ioc)
{
struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
int mc;
INIT_LIST_HEAD(&mod->cmd_q);
for (mc = 0; mc < BFI_MC_MAX; mc++) {
mod->mbhdlr[mc].cbfn = NULL;
mod->mbhdlr[mc].cbarg = ioc->bfa;
}
}
/**
* Mbox poll timer -- restarts any pending mailbox requests.
*/
static void
bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc)
{
struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
struct bfa_mbox_cmd_s *cmd;
u32 stat;
/**
* If no command pending, do nothing
*/
if (list_empty(&mod->cmd_q))
return;
/**
* If previous command is not yet fetched by firmware, do nothing
*/
stat = bfa_reg_read(ioc->ioc_regs.hfn_mbox_cmd);
if (stat)
return;
/**
* Enqueue command to firmware.
*/
bfa_q_deq(&mod->cmd_q, &cmd);
bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
}
/**
* Cleanup any pending requests.
*/
static void
bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc)
{
struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
struct bfa_mbox_cmd_s *cmd;
while (!list_empty(&mod->cmd_q))
bfa_q_deq(&mod->cmd_q, &cmd);
}
/**
* bfa_ioc_public
*/
/**
* Interface used by diag module to do firmware boot with memory test
* as the entry vector.
*/
void
bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_param)
{
bfa_os_addr_t rb;
bfa_ioc_stats(ioc, ioc_boots);
if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK)
return;
/**
* Initialize IOC state of all functions on a chip reset.
*/
rb = ioc->pcidev.pci_bar_kva;
if (boot_param == BFI_BOOT_TYPE_MEMTEST) {
bfa_reg_write((rb + BFA_IOC0_STATE_REG), BFI_IOC_MEMTEST);
bfa_reg_write((rb + BFA_IOC1_STATE_REG), BFI_IOC_MEMTEST);
} else {
bfa_reg_write((rb + BFA_IOC0_STATE_REG), BFI_IOC_INITING);
bfa_reg_write((rb + BFA_IOC1_STATE_REG), BFI_IOC_INITING);
}
bfa_ioc_msgflush(ioc);
bfa_ioc_download_fw(ioc, boot_type, boot_param);
/**
* Enable interrupts just before starting LPU
*/
ioc->cbfn->reset_cbfn(ioc->bfa);
bfa_ioc_lpu_start(ioc);
}
/**
* Enable/disable IOC failure auto recovery.
*/
void
bfa_ioc_auto_recover(bfa_boolean_t auto_recover)
{
bfa_auto_recover = auto_recover;
}
bfa_boolean_t
bfa_ioc_is_operational(struct bfa_ioc_s *ioc)
{
return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op);
}
void
bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg)
{
u32 *msgp = mbmsg;
u32 r32;
int i;
/**
* read the MBOX msg
*/
for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32));
i++) {
r32 = bfa_reg_read(ioc->ioc_regs.lpu_mbox +
i * sizeof(u32));
msgp[i] = bfa_os_htonl(r32);
}
/**
* turn off mailbox interrupt by clearing mailbox status
*/
bfa_reg_write(ioc->ioc_regs.lpu_mbox_cmd, 1);
bfa_reg_read(ioc->ioc_regs.lpu_mbox_cmd);
}
void
bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *m)
{
union bfi_ioc_i2h_msg_u *msg;
msg = (union bfi_ioc_i2h_msg_u *)m;
bfa_ioc_stats(ioc, ioc_isrs);
switch (msg->mh.msg_id) {
case BFI_IOC_I2H_HBEAT:
break;
case BFI_IOC_I2H_READY_EVENT:
bfa_fsm_send_event(ioc, IOC_E_FWREADY);
break;
case BFI_IOC_I2H_ENABLE_REPLY:
bfa_fsm_send_event(ioc, IOC_E_FWRSP_ENABLE);
break;
case BFI_IOC_I2H_DISABLE_REPLY:
bfa_fsm_send_event(ioc, IOC_E_FWRSP_DISABLE);
break;
case BFI_IOC_I2H_GETATTR_REPLY:
bfa_ioc_getattr_reply(ioc);
break;
default:
bfa_trc(ioc, msg->mh.msg_id);
bfa_assert(0);
}
}
/**
* IOC attach time initialization and setup.
*
* @param[in] ioc memory for IOC
* @param[in] bfa driver instance structure
* @param[in] trcmod kernel trace module
* @param[in] aen kernel aen event module
* @param[in] logm kernel logging module
*/
void
bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa, struct bfa_ioc_cbfn_s *cbfn,
struct bfa_timer_mod_s *timer_mod, struct bfa_trc_mod_s *trcmod,
struct bfa_aen_s *aen, struct bfa_log_mod_s *logm)
{
ioc->bfa = bfa;
ioc->cbfn = cbfn;
ioc->timer_mod = timer_mod;
ioc->trcmod = trcmod;
ioc->aen = aen;
ioc->logm = logm;
ioc->fcmode = BFA_FALSE;
ioc->pllinit = BFA_FALSE;
ioc->dbg_fwsave_once = BFA_TRUE;
bfa_ioc_mbox_attach(ioc);
INIT_LIST_HEAD(&ioc->hb_notify_q);
bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
}
/**
* Driver detach time IOC cleanup.
*/
void
bfa_ioc_detach(struct bfa_ioc_s *ioc)
{
bfa_fsm_send_event(ioc, IOC_E_DETACH);
}
/**
* Setup IOC PCI properties.
*
* @param[in] pcidev PCI device information for this IOC
*/
void
bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev,
enum bfi_mclass mc)
{
ioc->ioc_mc = mc;
ioc->pcidev = *pcidev;
ioc->ctdev = bfa_asic_id_ct(ioc->pcidev.device_id);
ioc->cna = ioc->ctdev && !ioc->fcmode;
/**
* Set asic specific interfaces. See bfa_ioc_cb.c and bfa_ioc_ct.c
*/
if (ioc->ctdev)
bfa_ioc_set_ct_hwif(ioc);
else
bfa_ioc_set_cb_hwif(ioc);
bfa_ioc_map_port(ioc);
bfa_ioc_reg_init(ioc);
}
/**
* Initialize IOC dma memory
*
* @param[in] dm_kva kernel virtual address of IOC dma memory
* @param[in] dm_pa physical address of IOC dma memory
*/
void
bfa_ioc_mem_claim(struct bfa_ioc_s *ioc, u8 *dm_kva, u64 dm_pa)
{
/**
* dma memory for firmware attribute
*/
ioc->attr_dma.kva = dm_kva;
ioc->attr_dma.pa = dm_pa;
ioc->attr = (struct bfi_ioc_attr_s *)dm_kva;
}
/**
* Return size of dma memory required.
*/
u32
bfa_ioc_meminfo(void)
{
return BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ);
}
void
bfa_ioc_enable(struct bfa_ioc_s *ioc)
{
bfa_ioc_stats(ioc, ioc_enables);
ioc->dbg_fwsave_once = BFA_TRUE;
bfa_fsm_send_event(ioc, IOC_E_ENABLE);
}
void
bfa_ioc_disable(struct bfa_ioc_s *ioc)
{
bfa_ioc_stats(ioc, ioc_disables);
bfa_fsm_send_event(ioc, IOC_E_DISABLE);
}
/**
* Returns memory required for saving firmware trace in case of crash.
* Driver must call this interface to allocate memory required for
* automatic saving of firmware trace. Driver should call
* bfa_ioc_debug_memclaim() right after bfa_ioc_attach() to setup this
* trace memory.
*/
int
bfa_ioc_debug_trcsz(bfa_boolean_t auto_recover)
{
return (auto_recover) ? BFA_DBG_FWTRC_LEN : 0;
}
/**
* Initialize memory for saving firmware trace. Driver must initialize
* trace memory before call bfa_ioc_enable().
*/
void
bfa_ioc_debug_memclaim(struct bfa_ioc_s *ioc, void *dbg_fwsave)
{
ioc->dbg_fwsave = dbg_fwsave;
ioc->dbg_fwsave_len = bfa_ioc_debug_trcsz(ioc->auto_recover);
}
u32
bfa_ioc_smem_pgnum(struct bfa_ioc_s *ioc, u32 fmaddr)
{
return PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, fmaddr);
}
u32
bfa_ioc_smem_pgoff(struct bfa_ioc_s *ioc, u32 fmaddr)
{
return PSS_SMEM_PGOFF(fmaddr);
}
/**
* Register mailbox message handler functions
*
* @param[in] ioc IOC instance
* @param[in] mcfuncs message class handler functions
*/
void
bfa_ioc_mbox_register(struct bfa_ioc_s *ioc, bfa_ioc_mbox_mcfunc_t *mcfuncs)
{
struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
int mc;
for (mc = 0; mc < BFI_MC_MAX; mc++)
mod->mbhdlr[mc].cbfn = mcfuncs[mc];
}
/**
* Register mailbox message handler function, to be called by common modules
*/
void
bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc,
bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg)
{
struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
mod->mbhdlr[mc].cbfn = cbfn;
mod->mbhdlr[mc].cbarg = cbarg;
}
/**
* Queue a mailbox command request to firmware. Waits if mailbox is busy.
* Responsibility of caller to serialize
*
* @param[in] ioc IOC instance
* @param[i] cmd Mailbox command
*/
void
bfa_ioc_mbox_queue(struct bfa_ioc_s *ioc, struct bfa_mbox_cmd_s *cmd)
{
struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
u32 stat;
/**
* If a previous command is pending, queue new command
*/
if (!list_empty(&mod->cmd_q)) {
list_add_tail(&cmd->qe, &mod->cmd_q);
return;
}
/**
* If mailbox is busy, queue command for poll timer
*/
stat = bfa_reg_read(ioc->ioc_regs.hfn_mbox_cmd);
if (stat) {
list_add_tail(&cmd->qe, &mod->cmd_q);
return;
}
/**
* mailbox is free -- queue command to firmware
*/
bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
}
/**
* Handle mailbox interrupts
*/
void
bfa_ioc_mbox_isr(struct bfa_ioc_s *ioc)
{
struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
struct bfi_mbmsg_s m;
int mc;
bfa_ioc_msgget(ioc, &m);
/**
* Treat IOC message class as special.
*/
mc = m.mh.msg_class;
if (mc == BFI_MC_IOC) {
bfa_ioc_isr(ioc, &m);
return;
}
if ((mc > BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL))
return;
mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m);
}
void
bfa_ioc_error_isr(struct bfa_ioc_s *ioc)
{
bfa_fsm_send_event(ioc, IOC_E_HWERROR);
}
void
bfa_ioc_set_fcmode(struct bfa_ioc_s *ioc)
{
ioc->fcmode = BFA_TRUE;
ioc->port_id = bfa_ioc_pcifn(ioc);
}
#ifndef BFA_BIOS_BUILD
/**
* return true if IOC is disabled
*/
bfa_boolean_t
bfa_ioc_is_disabled(struct bfa_ioc_s *ioc)
{
return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling)
|| bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled);
}
/**
* return true if IOC firmware is different.
*/
bfa_boolean_t
bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc)
{
return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_reset)
|| bfa_fsm_cmp_state(ioc, bfa_ioc_sm_fwcheck)
|| bfa_fsm_cmp_state(ioc, bfa_ioc_sm_mismatch);
}
#define bfa_ioc_state_disabled(__sm) \
(((__sm) == BFI_IOC_UNINIT) || \
((__sm) == BFI_IOC_INITING) || \
((__sm) == BFI_IOC_HWINIT) || \
((__sm) == BFI_IOC_DISABLED) || \
((__sm) == BFI_IOC_FAIL) || \
((__sm) == BFI_IOC_CFG_DISABLED))
/**
* Check if adapter is disabled -- both IOCs should be in a disabled
* state.
*/
bfa_boolean_t
bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc)
{
u32 ioc_state;
bfa_os_addr_t rb = ioc->pcidev.pci_bar_kva;
if (!bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled))
return BFA_FALSE;
ioc_state = bfa_reg_read(rb + BFA_IOC0_STATE_REG);
if (!bfa_ioc_state_disabled(ioc_state))
return BFA_FALSE;
ioc_state = bfa_reg_read(rb + BFA_IOC1_STATE_REG);
if (!bfa_ioc_state_disabled(ioc_state))
return BFA_FALSE;
return BFA_TRUE;
}
/**
* Add to IOC heartbeat failure notification queue. To be used by common
* modules such as
*/
void
bfa_ioc_hbfail_register(struct bfa_ioc_s *ioc,
struct bfa_ioc_hbfail_notify_s *notify)
{
list_add_tail(¬ify->qe, &ioc->hb_notify_q);
}
#define BFA_MFG_NAME "Brocade"
void
bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc,
struct bfa_adapter_attr_s *ad_attr)
{
struct bfi_ioc_attr_s *ioc_attr;
ioc_attr = ioc->attr;
bfa_ioc_get_adapter_serial_num(ioc, ad_attr->serial_num);
bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver);
bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver);
bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer);
bfa_os_memcpy(&ad_attr->vpd, &ioc_attr->vpd,
sizeof(struct bfa_mfg_vpd_s));
ad_attr->nports = bfa_ioc_get_nports(ioc);
ad_attr->max_speed = bfa_ioc_speed_sup(ioc);
bfa_ioc_get_adapter_model(ioc, ad_attr->model);
/* For now, model descr uses same model string */
bfa_ioc_get_adapter_model(ioc, ad_attr->model_descr);
ad_attr->card_type = ioc_attr->card_type;
ad_attr->is_mezz = bfa_mfg_is_mezz(ioc_attr->card_type);
if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop))
ad_attr->prototype = 1;
else
ad_attr->prototype = 0;
ad_attr->pwwn = bfa_ioc_get_pwwn(ioc);
ad_attr->mac = bfa_ioc_get_mac(ioc);
ad_attr->pcie_gen = ioc_attr->pcie_gen;
ad_attr->pcie_lanes = ioc_attr->pcie_lanes;
ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig;
ad_attr->asic_rev = ioc_attr->asic_rev;
bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver);
ad_attr->cna_capable = ioc->cna;
}
enum bfa_ioc_type_e
bfa_ioc_get_type(struct bfa_ioc_s *ioc)
{
if (!ioc->ctdev || ioc->fcmode)
return BFA_IOC_TYPE_FC;
else if (ioc->ioc_mc == BFI_MC_IOCFC)
return BFA_IOC_TYPE_FCoE;
else if (ioc->ioc_mc == BFI_MC_LL)
return BFA_IOC_TYPE_LL;
else {
bfa_assert(ioc->ioc_mc == BFI_MC_LL);
return BFA_IOC_TYPE_LL;
}
}
void
bfa_ioc_get_adapter_serial_num(struct bfa_ioc_s *ioc, char *serial_num)
{
bfa_os_memset((void *)serial_num, 0, BFA_ADAPTER_SERIAL_NUM_LEN);
bfa_os_memcpy((void *)serial_num,
(void *)ioc->attr->brcd_serialnum,
BFA_ADAPTER_SERIAL_NUM_LEN);
}
void
bfa_ioc_get_adapter_fw_ver(struct bfa_ioc_s *ioc, char *fw_ver)
{
bfa_os_memset((void *)fw_ver, 0, BFA_VERSION_LEN);
bfa_os_memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN);
}
void
bfa_ioc_get_pci_chip_rev(struct bfa_ioc_s *ioc, char *chip_rev)
{
bfa_assert(chip_rev);
bfa_os_memset((void *)chip_rev, 0, BFA_IOC_CHIP_REV_LEN);
chip_rev[0] = 'R';
chip_rev[1] = 'e';
chip_rev[2] = 'v';
chip_rev[3] = '-';
chip_rev[4] = ioc->attr->asic_rev;
chip_rev[5] = '\0';
}
void
bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc_s *ioc, char *optrom_ver)
{
bfa_os_memset((void *)optrom_ver, 0, BFA_VERSION_LEN);
bfa_os_memcpy(optrom_ver, ioc->attr->optrom_version,
BFA_VERSION_LEN);
}
void
bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s *ioc, char *manufacturer)
{
bfa_os_memset((void *)manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN);
bfa_os_memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
}
void
bfa_ioc_get_adapter_model(struct bfa_ioc_s *ioc, char *model)
{
struct bfi_ioc_attr_s *ioc_attr;
bfa_assert(model);
bfa_os_memset((void *)model, 0, BFA_ADAPTER_MODEL_NAME_LEN);
ioc_attr = ioc->attr;
/**
* model name
*/
snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u",
BFA_MFG_NAME, ioc_attr->card_type);
}
enum bfa_ioc_state
bfa_ioc_get_state(struct bfa_ioc_s *ioc)
{
return bfa_sm_to_state(ioc_sm_table, ioc->fsm);
}
void
bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr)
{
bfa_os_memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr_s));
ioc_attr->state = bfa_ioc_get_state(ioc);
ioc_attr->port_id = ioc->port_id;
ioc_attr->ioc_type = bfa_ioc_get_type(ioc);
bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr);
ioc_attr->pci_attr.device_id = ioc->pcidev.device_id;
ioc_attr->pci_attr.pcifn = ioc->pcidev.pci_func;
bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev);
}
/**
* bfa_wwn_public
*/
wwn_t
bfa_ioc_get_pwwn(struct bfa_ioc_s *ioc)
{
return ioc->attr->pwwn;
}
wwn_t
bfa_ioc_get_nwwn(struct bfa_ioc_s *ioc)
{
return ioc->attr->nwwn;
}
u64
bfa_ioc_get_adid(struct bfa_ioc_s *ioc)
{
return ioc->attr->mfg_pwwn;
}
mac_t
bfa_ioc_get_mac(struct bfa_ioc_s *ioc)
{
/*
* Currently mfg mac is used as FCoE enode mac (not configured by PBC)
*/
if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_FCoE)
return bfa_ioc_get_mfg_mac(ioc);
else
return ioc->attr->mac;
}
wwn_t
bfa_ioc_get_mfg_pwwn(struct bfa_ioc_s *ioc)
{
return ioc->attr->mfg_pwwn;
}
wwn_t
bfa_ioc_get_mfg_nwwn(struct bfa_ioc_s *ioc)
{
return ioc->attr->mfg_nwwn;
}
mac_t
bfa_ioc_get_mfg_mac(struct bfa_ioc_s *ioc)
{
mac_t mac;
mac = ioc->attr->mfg_mac;
mac.mac[MAC_ADDRLEN - 1] += bfa_ioc_pcifn(ioc);
return mac;
}
bfa_boolean_t
bfa_ioc_get_fcmode(struct bfa_ioc_s *ioc)
{
return ioc->fcmode || !bfa_asic_id_ct(ioc->pcidev.device_id);
}
/**
* Send AEN notification
*/
void
bfa_ioc_aen_post(struct bfa_ioc_s *ioc, enum bfa_ioc_aen_event event)
{
union bfa_aen_data_u aen_data;
struct bfa_log_mod_s *logmod = ioc->logm;
s32 inst_num = 0;
enum bfa_ioc_type_e ioc_type;
bfa_log(logmod, BFA_LOG_CREATE_ID(BFA_AEN_CAT_IOC, event), inst_num);
memset(&aen_data.ioc.pwwn, 0, sizeof(aen_data.ioc.pwwn));
memset(&aen_data.ioc.mac, 0, sizeof(aen_data.ioc.mac));
ioc_type = bfa_ioc_get_type(ioc);
switch (ioc_type) {
case BFA_IOC_TYPE_FC:
aen_data.ioc.pwwn = bfa_ioc_get_pwwn(ioc);
break;
case BFA_IOC_TYPE_FCoE:
aen_data.ioc.pwwn = bfa_ioc_get_pwwn(ioc);
aen_data.ioc.mac = bfa_ioc_get_mac(ioc);
break;
case BFA_IOC_TYPE_LL:
aen_data.ioc.mac = bfa_ioc_get_mac(ioc);
break;
default:
bfa_assert(ioc_type == BFA_IOC_TYPE_FC);
break;
}
aen_data.ioc.ioc_type = ioc_type;
}
/**
* Retrieve saved firmware trace from a prior IOC failure.
*/
bfa_status_t
bfa_ioc_debug_fwsave(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
{
int tlen;
if (ioc->dbg_fwsave_len == 0)
return BFA_STATUS_ENOFSAVE;
tlen = *trclen;
if (tlen > ioc->dbg_fwsave_len)
tlen = ioc->dbg_fwsave_len;
bfa_os_memcpy(trcdata, ioc->dbg_fwsave, tlen);
*trclen = tlen;
return BFA_STATUS_OK;
}
/**
* Clear saved firmware trace
*/
void
bfa_ioc_debug_fwsave_clear(struct bfa_ioc_s *ioc)
{
ioc->dbg_fwsave_once = BFA_TRUE;
}
/**
* Retrieve saved firmware trace from a prior IOC failure.
*/
bfa_status_t
bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
{
u32 pgnum;
u32 loff = BFA_DBG_FWTRC_OFF(bfa_ioc_portid(ioc));
int i, tlen;
u32 *tbuf = trcdata, r32;
bfa_trc(ioc, *trclen);
pgnum = bfa_ioc_smem_pgnum(ioc, loff);
loff = bfa_ioc_smem_pgoff(ioc, loff);
/*
* Hold semaphore to serialize pll init and fwtrc.
*/
if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg))
return BFA_STATUS_FAILED;
bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum);
tlen = *trclen;
if (tlen > BFA_DBG_FWTRC_LEN)
tlen = BFA_DBG_FWTRC_LEN;
tlen /= sizeof(u32);
bfa_trc(ioc, tlen);
for (i = 0; i < tlen; i++) {
r32 = bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
tbuf[i] = bfa_os_ntohl(r32);
loff += sizeof(u32);
/**
* handle page offset wrap around
*/
loff = PSS_SMEM_PGOFF(loff);
if (loff == 0) {
pgnum++;
bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum);
}
}
bfa_reg_write(ioc->ioc_regs.host_page_num_fn,
bfa_ioc_smem_pgnum(ioc, 0));
/*
* release semaphore.
*/
bfa_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg);
bfa_trc(ioc, pgnum);
*trclen = tlen * sizeof(u32);
return BFA_STATUS_OK;
}
/**
* Save firmware trace if configured.
*/
static void
bfa_ioc_debug_save(struct bfa_ioc_s *ioc)
{
int tlen;
if (ioc->dbg_fwsave_len) {
tlen = ioc->dbg_fwsave_len;
bfa_ioc_debug_fwtrc(ioc, ioc->dbg_fwsave, &tlen);
}
}
/**
* Firmware failure detected. Start recovery actions.
*/
static void
bfa_ioc_recover(struct bfa_ioc_s *ioc)
{
if (ioc->dbg_fwsave_once) {
ioc->dbg_fwsave_once = BFA_FALSE;
bfa_ioc_debug_save(ioc);
}
bfa_ioc_stats(ioc, ioc_hbfails);
bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
}
static void
bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc)
{
if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_LL)
return;
if (ioc->attr->nwwn == 0)
bfa_ioc_aen_post(ioc, BFA_IOC_AEN_INVALID_NWWN);
if (ioc->attr->pwwn == 0)
bfa_ioc_aen_post(ioc, BFA_IOC_AEN_INVALID_PWWN);
}
#endif
| gpl-2.0 |
robacklin/busybox | util-linux/mkfs_minix.c | 89 | 19376 | /* vi: set sw=4 ts=4: */
/*
* mkfs.c - make a linux (minix) file-system.
*
* (C) 1991 Linus Torvalds.
*
* Licensed under GPLv2, see file LICENSE in this source tree.
*/
/*
* DD.MM.YY
*
* 24.11.91 - Time began. Used the fsck sources to get started.
*
* 25.11.91 - Corrected some bugs. Added support for ".badblocks"
* The algorithm for ".badblocks" is a bit weird, but
* it should work. Oh, well.
*
* 25.01.92 - Added the -l option for getting the list of bad blocks
* out of a named file. (Dave Rivers, rivers@ponds.uucp)
*
* 28.02.92 - Added %-information when using -c.
*
* 28.02.93 - Added support for other namelengths than the original
* 14 characters so that I can test the new kernel routines..
*
* 09.10.93 - Make exit status conform to that required by fsutil
* (Rik Faith, faith@cs.unc.edu)
*
* 31.10.93 - Added inode request feature, for backup floppies: use
* 32 inodes, for a news partition use more.
* (Scott Heavner, sdh@po.cwru.edu)
*
* 03.01.94 - Added support for file system valid flag.
* (Dr. Wettstein, greg%wind.uucp@plains.nodak.edu)
*
* 30.10.94 - added support for v2 filesystem
* (Andreas Schwab, schwab@issan.informatik.uni-dortmund.de)
*
* 09.11.94 - Added test to prevent overwrite of mounted fs adapted
* from Theodore Ts'o's (tytso@athena.mit.edu) mke2fs
* program. (Daniel Quinlan, quinlan@yggdrasil.com)
*
* 03.20.95 - Clear first 512 bytes of filesystem to make certain that
* the filesystem is not misidentified as a MS-DOS FAT filesystem.
* (Daniel Quinlan, quinlan@yggdrasil.com)
*
* 02.07.96 - Added small patch from Russell King to make the program a
* good deal more portable (janl@math.uio.no)
*
* Usage: mkfs [-c | -l filename ] [-v] [-nXX] [-iXX] device [size-in-blocks]
*
* -c for readability checking (SLOW!)
* -l for getting a list of bad blocks from a file.
* -n for namelength (currently the kernel only uses 14 or 30)
* -i for number of inodes
* -v for v2 filesystem
*
* The device may be a block device or a image of one, but this isn't
* enforced (but it's not much fun on a character device :-).
*
* Modified for BusyBox by Erik Andersen <andersen@debian.org> --
* removed getopt based parser and added a hand rolled one.
*/
//usage:#define mkfs_minix_trivial_usage
//usage: "[-c | -l FILE] [-nXX] [-iXX] BLOCKDEV [KBYTES]"
//usage:#define mkfs_minix_full_usage "\n\n"
//usage: "Make a MINIX filesystem\n"
//usage: "\n -c Check device for bad blocks"
//usage: "\n -n [14|30] Maximum length of filenames"
//usage: "\n -i INODES Number of inodes for the filesystem"
//usage: "\n -l FILE Read bad blocks list from FILE"
//usage: "\n -v Make version 2 filesystem"
#include "libbb.h"
#include <mntent.h>
#include "minix.h"
/* Store the very same times/uids/gids for image consistency */
#if 1
# define CUR_TIME 0
# define GETUID 0
# define GETGID 0
#else
/* Was using this. Is it useful? NB: this will break testsuite */
# define CUR_TIME time(NULL)
# define GETUID getuid()
# define GETGID getgid()
#endif
enum {
MAX_GOOD_BLOCKS = 512,
TEST_BUFFER_BLOCKS = 16,
};
#if !ENABLE_FEATURE_MINIX2
enum { version2 = 0 };
#endif
enum { dev_fd = 3 };
struct globals {
#if ENABLE_FEATURE_MINIX2
smallint version2;
#define version2 G.version2
#endif
char *device_name;
uint32_t total_blocks;
int badblocks;
int namelen;
int dirsize;
int magic;
char *inode_buffer;
char *inode_map;
char *zone_map;
int used_good_blocks;
unsigned long req_nr_inodes;
unsigned currently_testing;
char root_block[BLOCK_SIZE];
char superblock_buffer[BLOCK_SIZE];
char boot_block_buffer[512];
unsigned short good_blocks_table[MAX_GOOD_BLOCKS];
/* check_blocks(): buffer[] was the biggest static in entire bbox */
char check_blocks_buffer[BLOCK_SIZE * TEST_BUFFER_BLOCKS];
unsigned short ind_block1[BLOCK_SIZE >> 1];
unsigned short dind_block1[BLOCK_SIZE >> 1];
unsigned long ind_block2[BLOCK_SIZE >> 2];
unsigned long dind_block2[BLOCK_SIZE >> 2];
};
#define G (*ptr_to_globals)
#define INIT_G() do { \
SET_PTR_TO_GLOBALS(xzalloc(sizeof(G))); \
} while (0)
static ALWAYS_INLINE unsigned div_roundup(unsigned size, unsigned n)
{
return (size + n-1) / n;
}
#define INODE_BUF1 (((struct minix1_inode*)G.inode_buffer) - 1)
#define INODE_BUF2 (((struct minix2_inode*)G.inode_buffer) - 1)
#define SB (*(struct minix_superblock*)G.superblock_buffer)
#define SB_INODES (SB.s_ninodes)
#define SB_IMAPS (SB.s_imap_blocks)
#define SB_ZMAPS (SB.s_zmap_blocks)
#define SB_FIRSTZONE (SB.s_firstdatazone)
#define SB_ZONE_SIZE (SB.s_log_zone_size)
#define SB_MAXSIZE (SB.s_max_size)
#define SB_MAGIC (SB.s_magic)
#if !ENABLE_FEATURE_MINIX2
# define SB_ZONES (SB.s_nzones)
# define INODE_BLOCKS div_roundup(SB_INODES, MINIX1_INODES_PER_BLOCK)
#else
# define SB_ZONES (version2 ? SB.s_zones : SB.s_nzones)
# define INODE_BLOCKS div_roundup(SB_INODES, \
(version2 ? MINIX2_INODES_PER_BLOCK : MINIX1_INODES_PER_BLOCK))
#endif
#define INODE_BUFFER_SIZE (INODE_BLOCKS * BLOCK_SIZE)
#define NORM_FIRSTZONE (2 + SB_IMAPS + SB_ZMAPS + INODE_BLOCKS)
/* Before you ask "where they come from?": */
/* setbit/clrbit are supplied by sys/param.h */
static int minix_bit(const char* a, unsigned i)
{
return a[i >> 3] & (1<<(i & 7));
}
static void minix_setbit(char *a, unsigned i)
{
setbit(a, i);
}
static void minix_clrbit(char *a, unsigned i)
{
clrbit(a, i);
}
/* Note: do not assume 0/1, it is 0/nonzero */
#define zone_in_use(x) minix_bit(G.zone_map,(x)-SB_FIRSTZONE+1)
/*#define inode_in_use(x) minix_bit(G.inode_map,(x))*/
#define mark_inode(x) minix_setbit(G.inode_map,(x))
#define unmark_inode(x) minix_clrbit(G.inode_map,(x))
#define mark_zone(x) minix_setbit(G.zone_map,(x)-SB_FIRSTZONE+1)
#define unmark_zone(x) minix_clrbit(G.zone_map,(x)-SB_FIRSTZONE+1)
#ifndef BLKGETSIZE
# define BLKGETSIZE _IO(0x12,96) /* return device size */
#endif
static long valid_offset(int fd, int offset)
{
char ch;
if (lseek(fd, offset, SEEK_SET) < 0)
return 0;
if (read(fd, &ch, 1) < 1)
return 0;
return 1;
}
static int count_blocks(int fd)
{
int high, low;
low = 0;
for (high = 1; valid_offset(fd, high); high *= 2)
low = high;
while (low < high - 1) {
const int mid = (low + high) / 2;
if (valid_offset(fd, mid))
low = mid;
else
high = mid;
}
valid_offset(fd, 0);
return (low + 1);
}
static int get_size(const char *file)
{
int fd;
long size;
fd = xopen(file, O_RDWR);
if (ioctl(fd, BLKGETSIZE, &size) >= 0) {
close(fd);
return (size * 512);
}
size = count_blocks(fd);
close(fd);
return size;
}
static void write_tables(void)
{
/* Mark the superblock valid. */
SB.s_state |= MINIX_VALID_FS;
SB.s_state &= ~MINIX_ERROR_FS;
msg_eol = "seek to 0 failed";
xlseek(dev_fd, 0, SEEK_SET);
msg_eol = "can't clear boot sector";
xwrite(dev_fd, G.boot_block_buffer, 512);
msg_eol = "seek to BLOCK_SIZE failed";
xlseek(dev_fd, BLOCK_SIZE, SEEK_SET);
msg_eol = "can't write superblock";
xwrite(dev_fd, G.superblock_buffer, BLOCK_SIZE);
msg_eol = "can't write inode map";
xwrite(dev_fd, G.inode_map, SB_IMAPS * BLOCK_SIZE);
msg_eol = "can't write zone map";
xwrite(dev_fd, G.zone_map, SB_ZMAPS * BLOCK_SIZE);
msg_eol = "can't write inodes";
xwrite(dev_fd, G.inode_buffer, INODE_BUFFER_SIZE);
msg_eol = "\n";
}
static void write_block(int blk, char *buffer)
{
xlseek(dev_fd, blk * BLOCK_SIZE, SEEK_SET);
xwrite(dev_fd, buffer, BLOCK_SIZE);
}
static int get_free_block(void)
{
int blk;
if (G.used_good_blocks + 1 >= MAX_GOOD_BLOCKS)
bb_error_msg_and_die("too many bad blocks");
if (G.used_good_blocks)
blk = G.good_blocks_table[G.used_good_blocks - 1] + 1;
else
blk = SB_FIRSTZONE;
while (blk < SB_ZONES && zone_in_use(blk))
blk++;
if (blk >= SB_ZONES)
bb_error_msg_and_die("not enough good blocks");
G.good_blocks_table[G.used_good_blocks] = blk;
G.used_good_blocks++;
return blk;
}
static void mark_good_blocks(void)
{
int blk;
for (blk = 0; blk < G.used_good_blocks; blk++)
mark_zone(G.good_blocks_table[blk]);
}
static int next(int zone)
{
if (!zone)
zone = SB_FIRSTZONE - 1;
while (++zone < SB_ZONES)
if (zone_in_use(zone))
return zone;
return 0;
}
static void make_bad_inode(void)
{
struct minix1_inode *inode = &INODE_BUF1[MINIX_BAD_INO];
int i, j, zone;
int ind = 0, dind = 0;
/* moved to globals to reduce stack usage
unsigned short ind_block[BLOCK_SIZE >> 1];
unsigned short dind_block[BLOCK_SIZE >> 1];
*/
#define ind_block (G.ind_block1)
#define dind_block (G.dind_block1)
#define NEXT_BAD (zone = next(zone))
if (!G.badblocks)
return;
mark_inode(MINIX_BAD_INO);
inode->i_nlinks = 1;
/* BTW, setting this makes all images different */
/* it's harder to check for bugs then - diff isn't helpful :(... */
inode->i_time = CUR_TIME;
inode->i_mode = S_IFREG + 0000;
inode->i_size = G.badblocks * BLOCK_SIZE;
zone = next(0);
for (i = 0; i < 7; i++) {
inode->i_zone[i] = zone;
if (!NEXT_BAD)
goto end_bad;
}
inode->i_zone[7] = ind = get_free_block();
memset(ind_block, 0, BLOCK_SIZE);
for (i = 0; i < 512; i++) {
ind_block[i] = zone;
if (!NEXT_BAD)
goto end_bad;
}
inode->i_zone[8] = dind = get_free_block();
memset(dind_block, 0, BLOCK_SIZE);
for (i = 0; i < 512; i++) {
write_block(ind, (char *) ind_block);
dind_block[i] = ind = get_free_block();
memset(ind_block, 0, BLOCK_SIZE);
for (j = 0; j < 512; j++) {
ind_block[j] = zone;
if (!NEXT_BAD)
goto end_bad;
}
}
bb_error_msg_and_die("too many bad blocks");
end_bad:
if (ind)
write_block(ind, (char *) ind_block);
if (dind)
write_block(dind, (char *) dind_block);
#undef ind_block
#undef dind_block
}
#if ENABLE_FEATURE_MINIX2
static void make_bad_inode2(void)
{
struct minix2_inode *inode = &INODE_BUF2[MINIX_BAD_INO];
int i, j, zone;
int ind = 0, dind = 0;
/* moved to globals to reduce stack usage
unsigned long ind_block[BLOCK_SIZE >> 2];
unsigned long dind_block[BLOCK_SIZE >> 2];
*/
#define ind_block (G.ind_block2)
#define dind_block (G.dind_block2)
if (!G.badblocks)
return;
mark_inode(MINIX_BAD_INO);
inode->i_nlinks = 1;
inode->i_atime = inode->i_mtime = inode->i_ctime = CUR_TIME;
inode->i_mode = S_IFREG + 0000;
inode->i_size = G.badblocks * BLOCK_SIZE;
zone = next(0);
for (i = 0; i < 7; i++) {
inode->i_zone[i] = zone;
if (!NEXT_BAD)
goto end_bad;
}
inode->i_zone[7] = ind = get_free_block();
memset(ind_block, 0, BLOCK_SIZE);
for (i = 0; i < 256; i++) {
ind_block[i] = zone;
if (!NEXT_BAD)
goto end_bad;
}
inode->i_zone[8] = dind = get_free_block();
memset(dind_block, 0, BLOCK_SIZE);
for (i = 0; i < 256; i++) {
write_block(ind, (char *) ind_block);
dind_block[i] = ind = get_free_block();
memset(ind_block, 0, BLOCK_SIZE);
for (j = 0; j < 256; j++) {
ind_block[j] = zone;
if (!NEXT_BAD)
goto end_bad;
}
}
/* Could make triple indirect block here */
bb_error_msg_and_die("too many bad blocks");
end_bad:
if (ind)
write_block(ind, (char *) ind_block);
if (dind)
write_block(dind, (char *) dind_block);
#undef ind_block
#undef dind_block
}
#else
void make_bad_inode2(void);
#endif
static void make_root_inode(void)
{
struct minix1_inode *inode = &INODE_BUF1[MINIX_ROOT_INO];
mark_inode(MINIX_ROOT_INO);
inode->i_zone[0] = get_free_block();
inode->i_nlinks = 2;
inode->i_time = CUR_TIME;
if (G.badblocks)
inode->i_size = 3 * G.dirsize;
else {
G.root_block[2 * G.dirsize] = '\0';
G.root_block[2 * G.dirsize + 1] = '\0';
inode->i_size = 2 * G.dirsize;
}
inode->i_mode = S_IFDIR + 0755;
inode->i_uid = GETUID;
if (inode->i_uid)
inode->i_gid = GETGID;
write_block(inode->i_zone[0], G.root_block);
}
#if ENABLE_FEATURE_MINIX2
static void make_root_inode2(void)
{
struct minix2_inode *inode = &INODE_BUF2[MINIX_ROOT_INO];
mark_inode(MINIX_ROOT_INO);
inode->i_zone[0] = get_free_block();
inode->i_nlinks = 2;
inode->i_atime = inode->i_mtime = inode->i_ctime = CUR_TIME;
if (G.badblocks)
inode->i_size = 3 * G.dirsize;
else {
G.root_block[2 * G.dirsize] = '\0';
G.root_block[2 * G.dirsize + 1] = '\0';
inode->i_size = 2 * G.dirsize;
}
inode->i_mode = S_IFDIR + 0755;
inode->i_uid = GETUID;
if (inode->i_uid)
inode->i_gid = GETGID;
write_block(inode->i_zone[0], G.root_block);
}
#else
void make_root_inode2(void);
#endif
/*
* Perform a test of a block; return the number of
* blocks readable.
*/
static size_t do_check(char *buffer, size_t try, unsigned current_block)
{
ssize_t got;
/* Seek to the correct loc. */
msg_eol = "seek failed during testing of blocks";
xlseek(dev_fd, current_block * BLOCK_SIZE, SEEK_SET);
msg_eol = "\n";
/* Try the read */
got = read(dev_fd, buffer, try * BLOCK_SIZE);
if (got < 0)
got = 0;
try = ((size_t)got) / BLOCK_SIZE;
if (got & (BLOCK_SIZE - 1))
fprintf(stderr, "Short read at block %u\n", (unsigned)(current_block + try));
return try;
}
static void alarm_intr(int alnum UNUSED_PARAM)
{
if (G.currently_testing >= SB_ZONES)
return;
signal(SIGALRM, alarm_intr);
alarm(5);
if (!G.currently_testing)
return;
printf("%d ...", G.currently_testing);
fflush_all();
}
static void check_blocks(void)
{
size_t try, got;
G.currently_testing = 0;
signal(SIGALRM, alarm_intr);
alarm(5);
while (G.currently_testing < SB_ZONES) {
msg_eol = "seek failed in check_blocks";
xlseek(dev_fd, G.currently_testing * BLOCK_SIZE, SEEK_SET);
msg_eol = "\n";
try = TEST_BUFFER_BLOCKS;
if (G.currently_testing + try > SB_ZONES)
try = SB_ZONES - G.currently_testing;
got = do_check(G.check_blocks_buffer, try, G.currently_testing);
G.currently_testing += got;
if (got == try)
continue;
if (G.currently_testing < SB_FIRSTZONE)
bb_error_msg_and_die("bad blocks before data-area: cannot make fs");
mark_zone(G.currently_testing);
G.badblocks++;
G.currently_testing++;
}
alarm(0);
printf("%d bad block(s)\n", G.badblocks);
}
static void get_list_blocks(char *filename)
{
FILE *listfile;
unsigned long blockno;
listfile = xfopen_for_read(filename);
while (!feof(listfile)) {
fscanf(listfile, "%ld\n", &blockno);
mark_zone(blockno);
G.badblocks++;
}
printf("%d bad block(s)\n", G.badblocks);
}
static void setup_tables(void)
{
unsigned long inodes;
unsigned norm_firstzone;
unsigned sb_zmaps;
unsigned i;
/* memset(G.superblock_buffer, 0, BLOCK_SIZE); */
/* memset(G.boot_block_buffer, 0, 512); */
SB_MAGIC = G.magic;
SB_ZONE_SIZE = 0;
SB_MAXSIZE = version2 ? 0x7fffffff : (7 + 512 + 512 * 512) * 1024;
if (version2)
SB.s_zones = G.total_blocks;
else
SB.s_nzones = G.total_blocks;
/* some magic nrs: 1 inode / 3 blocks */
if (G.req_nr_inodes == 0)
inodes = G.total_blocks / 3;
else
inodes = G.req_nr_inodes;
/* Round up inode count to fill block size */
if (version2)
inodes = (inodes + MINIX2_INODES_PER_BLOCK - 1) &
~(MINIX2_INODES_PER_BLOCK - 1);
else
inodes = (inodes + MINIX1_INODES_PER_BLOCK - 1) &
~(MINIX1_INODES_PER_BLOCK - 1);
if (inodes > 65535)
inodes = 65535;
SB_INODES = inodes;
SB_IMAPS = div_roundup(SB_INODES + 1, BITS_PER_BLOCK);
/* Real bad hack but overwise mkfs.minix can be thrown
* in infinite loop...
* try:
* dd if=/dev/zero of=test.fs count=10 bs=1024
* mkfs.minix -i 200 test.fs
*/
/* This code is not insane: NORM_FIRSTZONE is not a constant,
* it is calculated from SB_INODES, SB_IMAPS and SB_ZMAPS */
i = 999;
SB_ZMAPS = 0;
do {
norm_firstzone = NORM_FIRSTZONE;
sb_zmaps = div_roundup(G.total_blocks - norm_firstzone + 1, BITS_PER_BLOCK);
if (SB_ZMAPS == sb_zmaps) goto got_it;
SB_ZMAPS = sb_zmaps;
/* new SB_ZMAPS, need to recalc NORM_FIRSTZONE */
} while (--i);
bb_error_msg_and_die("incompatible size/inode count, try different -i N");
got_it:
SB_FIRSTZONE = norm_firstzone;
G.inode_map = xmalloc(SB_IMAPS * BLOCK_SIZE);
G.zone_map = xmalloc(SB_ZMAPS * BLOCK_SIZE);
memset(G.inode_map, 0xff, SB_IMAPS * BLOCK_SIZE);
memset(G.zone_map, 0xff, SB_ZMAPS * BLOCK_SIZE);
for (i = SB_FIRSTZONE; i < SB_ZONES; i++)
unmark_zone(i);
for (i = MINIX_ROOT_INO; i <= SB_INODES; i++)
unmark_inode(i);
G.inode_buffer = xzalloc(INODE_BUFFER_SIZE);
printf("%ld inodes\n", (long)SB_INODES);
printf("%ld blocks\n", (long)SB_ZONES);
printf("Firstdatazone=%ld (%ld)\n", (long)SB_FIRSTZONE, (long)norm_firstzone);
printf("Zonesize=%d\n", BLOCK_SIZE << SB_ZONE_SIZE);
printf("Maxsize=%ld\n", (long)SB_MAXSIZE);
}
int mkfs_minix_main(int argc, char **argv) MAIN_EXTERNALLY_VISIBLE;
int mkfs_minix_main(int argc UNUSED_PARAM, char **argv)
{
unsigned opt;
char *tmp;
struct stat statbuf;
char *str_i;
char *listfile = NULL;
INIT_G();
/* default (changed to 30, per Linus's suggestion, Sun Nov 21 08:05:07 1993) */
G.namelen = 30;
G.dirsize = 32;
G.magic = MINIX1_SUPER_MAGIC2;
if (INODE_SIZE1 * MINIX1_INODES_PER_BLOCK != BLOCK_SIZE)
bb_error_msg_and_die("bad inode size");
#if ENABLE_FEATURE_MINIX2
if (INODE_SIZE2 * MINIX2_INODES_PER_BLOCK != BLOCK_SIZE)
bb_error_msg_and_die("bad inode size");
#endif
opt_complementary = "n+"; /* -n N */
opt = getopt32(argv, "ci:l:n:v", &str_i, &listfile, &G.namelen);
argv += optind;
//if (opt & 1) -c
if (opt & 2) G.req_nr_inodes = xatoul(str_i); // -i
//if (opt & 4) -l
if (opt & 8) { // -n
if (G.namelen == 14) G.magic = MINIX1_SUPER_MAGIC;
else if (G.namelen == 30) G.magic = MINIX1_SUPER_MAGIC2;
else bb_show_usage();
G.dirsize = G.namelen + 2;
}
if (opt & 0x10) { // -v
#if ENABLE_FEATURE_MINIX2
version2 = 1;
#else
bb_error_msg_and_die("not compiled with minix v2 support");
#endif
}
G.device_name = *argv++;
if (!G.device_name)
bb_show_usage();
if (*argv)
G.total_blocks = xatou32(*argv);
else
G.total_blocks = get_size(G.device_name) / 1024;
if (G.total_blocks < 10)
bb_error_msg_and_die("must have at least 10 blocks");
if (version2) {
G.magic = MINIX2_SUPER_MAGIC2;
if (G.namelen == 14)
G.magic = MINIX2_SUPER_MAGIC;
} else if (G.total_blocks > 65535)
G.total_blocks = 65535;
/* Check if it is mounted */
if (find_mount_point(G.device_name, 0))
bb_error_msg_and_die("can't format mounted filesystem");
xmove_fd(xopen(G.device_name, O_RDWR), dev_fd);
xfstat(dev_fd, &statbuf, G.device_name);
if (!S_ISBLK(statbuf.st_mode))
opt &= ~1; // clear -c (check)
/* I don't know why someone has special code to prevent mkfs.minix
* on IDE devices. Why IDE but not SCSI, etc?... */
#if 0
else if (statbuf.st_rdev == 0x0300 || statbuf.st_rdev == 0x0340)
/* what is this? */
bb_error_msg_and_die("will not try "
"to make filesystem on '%s'", G.device_name);
#endif
tmp = G.root_block;
*(short *) tmp = 1;
strcpy(tmp + 2, ".");
tmp += G.dirsize;
*(short *) tmp = 1;
strcpy(tmp + 2, "..");
tmp += G.dirsize;
*(short *) tmp = 2;
strcpy(tmp + 2, ".badblocks");
setup_tables();
if (opt & 1) // -c ?
check_blocks();
else if (listfile)
get_list_blocks(listfile);
if (version2) {
make_root_inode2();
make_bad_inode2();
} else {
make_root_inode();
make_bad_inode();
}
mark_good_blocks();
write_tables();
return 0;
}
| gpl-2.0 |
IonKiwi/android_kernel_samsung_kccat6 | drivers/usb/gadget/f_rmnet.c | 89 | 37922 | /*
* Copyright (c) 2011-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/spinlock.h>
#include <mach/usb_gadget_xport.h>
#include <mach/usb_bam.h>
#include "u_ether.h"
#include "u_rmnet.h"
#include "gadget_chips.h"
static unsigned int rmnet_dl_max_pkt_per_xfer = 7;
module_param(rmnet_dl_max_pkt_per_xfer, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(rmnet_dl_max_pkt_per_xfer,
"Maximum packets per transfer for DL aggregation");
#define RMNET_NOTIFY_INTERVAL 5
#define RMNET_MAX_NOTIFY_SIZE sizeof(struct usb_cdc_notification)
#define ACM_CTRL_DTR (1 << 0)
/* TODO: use separate structures for data and
* control paths
*/
struct f_rmnet {
struct gether gether_port;
struct grmnet port;
int ifc_id;
u8 port_num;
atomic_t online;
atomic_t ctrl_online;
struct usb_composite_dev *cdev;
spinlock_t lock;
/* usb eps*/
struct usb_ep *notify;
struct usb_request *notify_req;
/* control info */
struct list_head cpkt_resp_q;
atomic_t notify_count;
unsigned long cpkts_len;
};
static unsigned int nr_rmnet_ports;
static unsigned int no_ctrl_smd_ports;
static unsigned int no_ctrl_qti_ports;
static unsigned int no_ctrl_hsic_ports;
static unsigned int no_ctrl_hsuart_ports;
static unsigned int no_data_bam_ports;
static unsigned int no_data_bam2bam_ports;
static unsigned int no_data_hsic_ports;
static unsigned int no_data_hsuart_ports;
static struct rmnet_ports {
enum transport_type data_xport;
enum transport_type ctrl_xport;
unsigned data_xport_num;
unsigned ctrl_xport_num;
unsigned port_num;
struct f_rmnet *port;
} rmnet_ports[NR_RMNET_PORTS];
static struct usb_interface_descriptor rmnet_interface_desc = {
.bLength = USB_DT_INTERFACE_SIZE,
.bDescriptorType = USB_DT_INTERFACE,
.bNumEndpoints = 3,
.bInterfaceClass = USB_CLASS_VENDOR_SPEC,
#ifdef CONFIG_USB_ANDROID_SAMSUNG_COMPOSITE
.bInterfaceSubClass = 0xE0,
.bInterfaceProtocol = 0x00,
#else
.bInterfaceSubClass = USB_CLASS_VENDOR_SPEC,
.bInterfaceProtocol = USB_CLASS_VENDOR_SPEC,
#endif
/* .iInterface = DYNAMIC */
};
/* Full speed support */
static struct usb_endpoint_descriptor rmnet_fs_notify_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_INT,
.wMaxPacketSize = __constant_cpu_to_le16(RMNET_MAX_NOTIFY_SIZE),
.bInterval = 1 << RMNET_NOTIFY_INTERVAL,
};
static struct usb_endpoint_descriptor rmnet_fs_in_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
.wMaxPacketSize = __constant_cpu_to_le16(64),
};
static struct usb_endpoint_descriptor rmnet_fs_out_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_OUT,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
.wMaxPacketSize = __constant_cpu_to_le16(64),
};
static struct usb_descriptor_header *rmnet_fs_function[] = {
(struct usb_descriptor_header *) &rmnet_interface_desc,
(struct usb_descriptor_header *) &rmnet_fs_notify_desc,
(struct usb_descriptor_header *) &rmnet_fs_in_desc,
(struct usb_descriptor_header *) &rmnet_fs_out_desc,
NULL,
};
/* High speed support */
static struct usb_endpoint_descriptor rmnet_hs_notify_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_INT,
.wMaxPacketSize = __constant_cpu_to_le16(RMNET_MAX_NOTIFY_SIZE),
.bInterval = RMNET_NOTIFY_INTERVAL + 4,
};
static struct usb_endpoint_descriptor rmnet_hs_in_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
.wMaxPacketSize = __constant_cpu_to_le16(512),
};
static struct usb_endpoint_descriptor rmnet_hs_out_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_OUT,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
.wMaxPacketSize = __constant_cpu_to_le16(512),
};
static struct usb_descriptor_header *rmnet_hs_function[] = {
(struct usb_descriptor_header *) &rmnet_interface_desc,
(struct usb_descriptor_header *) &rmnet_hs_notify_desc,
(struct usb_descriptor_header *) &rmnet_hs_in_desc,
(struct usb_descriptor_header *) &rmnet_hs_out_desc,
NULL,
};
/* Super speed support */
static struct usb_endpoint_descriptor rmnet_ss_notify_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_INT,
.wMaxPacketSize = __constant_cpu_to_le16(RMNET_MAX_NOTIFY_SIZE),
.bInterval = RMNET_NOTIFY_INTERVAL + 4,
};
static struct usb_ss_ep_comp_descriptor rmnet_ss_notify_comp_desc = {
.bLength = sizeof rmnet_ss_notify_comp_desc,
.bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
/* the following 3 values can be tweaked if necessary */
/* .bMaxBurst = 0, */
/* .bmAttributes = 0, */
.wBytesPerInterval = cpu_to_le16(RMNET_MAX_NOTIFY_SIZE),
};
static struct usb_endpoint_descriptor rmnet_ss_in_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
.wMaxPacketSize = __constant_cpu_to_le16(1024),
};
static struct usb_ss_ep_comp_descriptor rmnet_ss_in_comp_desc = {
.bLength = sizeof rmnet_ss_in_comp_desc,
.bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
/* the following 2 values can be tweaked if necessary */
/* .bMaxBurst = 0, */
/* .bmAttributes = 0, */
};
static struct usb_endpoint_descriptor rmnet_ss_out_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_OUT,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
.wMaxPacketSize = __constant_cpu_to_le16(1024),
};
static struct usb_ss_ep_comp_descriptor rmnet_ss_out_comp_desc = {
.bLength = sizeof rmnet_ss_out_comp_desc,
.bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
/* the following 2 values can be tweaked if necessary */
/* .bMaxBurst = 0, */
/* .bmAttributes = 0, */
};
static struct usb_descriptor_header *rmnet_ss_function[] = {
(struct usb_descriptor_header *) &rmnet_interface_desc,
(struct usb_descriptor_header *) &rmnet_ss_notify_desc,
(struct usb_descriptor_header *) &rmnet_ss_notify_comp_desc,
(struct usb_descriptor_header *) &rmnet_ss_in_desc,
(struct usb_descriptor_header *) &rmnet_ss_in_comp_desc,
(struct usb_descriptor_header *) &rmnet_ss_out_desc,
(struct usb_descriptor_header *) &rmnet_ss_out_comp_desc,
NULL,
};
/* String descriptors */
static struct usb_string rmnet_string_defs[] = {
[0].s = "RmNet",
{ } /* end of list */
};
static struct usb_gadget_strings rmnet_string_table = {
.language = 0x0409, /* en-us */
.strings = rmnet_string_defs,
};
static struct usb_gadget_strings *rmnet_strings[] = {
&rmnet_string_table,
NULL,
};
static void frmnet_ctrl_response_available(struct f_rmnet *dev);
/* ------- misc functions --------------------*/
static inline struct f_rmnet *func_to_rmnet(struct usb_function *f)
{
return container_of(f, struct f_rmnet, gether_port.func);
}
static inline struct f_rmnet *port_to_rmnet(struct grmnet *r)
{
return container_of(r, struct f_rmnet, port);
}
static struct usb_request *
frmnet_alloc_req(struct usb_ep *ep, unsigned len, gfp_t flags)
{
struct usb_request *req;
req = usb_ep_alloc_request(ep, flags);
if (!req)
return ERR_PTR(-ENOMEM);
req->buf = kmalloc(len, flags);
if (!req->buf) {
usb_ep_free_request(ep, req);
return ERR_PTR(-ENOMEM);
}
req->length = len;
return req;
}
void frmnet_free_req(struct usb_ep *ep, struct usb_request *req)
{
kfree(req->buf);
usb_ep_free_request(ep, req);
}
static struct rmnet_ctrl_pkt *rmnet_alloc_ctrl_pkt(unsigned len, gfp_t flags)
{
struct rmnet_ctrl_pkt *pkt;
pkt = kzalloc(sizeof(struct rmnet_ctrl_pkt), flags);
if (!pkt)
return ERR_PTR(-ENOMEM);
pkt->buf = kmalloc(len, flags);
if (!pkt->buf) {
kfree(pkt);
return ERR_PTR(-ENOMEM);
}
pkt->len = len;
return pkt;
}
static void rmnet_free_ctrl_pkt(struct rmnet_ctrl_pkt *pkt)
{
kfree(pkt->buf);
kfree(pkt);
}
/* -------------------------------------------*/
static int rmnet_gport_setup(void)
{
int ret;
int port_idx;
int i;
u8 base;
pr_debug("%s: bam ports: %u bam2bam ports: %u data hsic ports: %u data hsuart ports: %u"
" smd ports: %u ctrl hsic ports: %u ctrl hsuart ports: %u"
" nr_rmnet_ports: %u\n",
__func__, no_data_bam_ports, no_data_bam2bam_ports,
no_data_hsic_ports, no_data_hsuart_ports, no_ctrl_smd_ports,
no_ctrl_hsic_ports, no_ctrl_hsuart_ports, nr_rmnet_ports);
if (no_data_bam_ports || no_data_bam2bam_ports) {
ret = gbam_setup(no_data_bam_ports,
no_data_bam2bam_ports);
if (ret)
return ret;
}
if (no_ctrl_smd_ports) {
ret = gsmd_ctrl_setup(FRMNET_CTRL_CLIENT,
no_ctrl_smd_ports, &base);
if (ret)
return ret;
for (i = 0; i < nr_rmnet_ports; i++)
if (rmnet_ports[i].port)
rmnet_ports[i].port->port_num += base;
}
if (no_data_hsic_ports) {
port_idx = ghsic_data_setup(no_data_hsic_ports,
USB_GADGET_RMNET);
if (port_idx < 0)
return port_idx;
for (i = 0; i < nr_rmnet_ports; i++) {
if (rmnet_ports[i].data_xport ==
USB_GADGET_XPORT_HSIC) {
rmnet_ports[i].data_xport_num = port_idx;
port_idx++;
}
}
}
if (no_ctrl_hsic_ports) {
port_idx = ghsic_ctrl_setup(no_ctrl_hsic_ports,
USB_GADGET_RMNET);
if (port_idx < 0)
return port_idx;
for (i = 0; i < nr_rmnet_ports; i++) {
if (rmnet_ports[i].ctrl_xport ==
USB_GADGET_XPORT_HSIC) {
rmnet_ports[i].ctrl_xport_num = port_idx;
port_idx++;
}
}
}
if (no_data_hsuart_ports) {
port_idx = ghsuart_data_setup(no_data_hsuart_ports,
USB_GADGET_RMNET);
if (port_idx < 0)
return port_idx;
for (i = 0; i < nr_rmnet_ports; i++) {
if (rmnet_ports[i].data_xport ==
USB_GADGET_XPORT_HSUART) {
rmnet_ports[i].data_xport_num = port_idx;
port_idx++;
}
}
}
if (no_ctrl_hsuart_ports) {
port_idx = ghsuart_ctrl_setup(no_ctrl_hsuart_ports,
USB_GADGET_RMNET);
if (port_idx < 0)
return port_idx;
for (i = 0; i < nr_rmnet_ports; i++) {
if (rmnet_ports[i].ctrl_xport ==
USB_GADGET_XPORT_HSUART) {
rmnet_ports[i].ctrl_xport_num = port_idx;
port_idx++;
}
}
}
return 0;
}
static int gport_rmnet_connect(struct f_rmnet *dev, unsigned intf)
{
int ret;
unsigned port_num;
enum transport_type cxport = rmnet_ports[dev->port_num].ctrl_xport;
enum transport_type dxport = rmnet_ports[dev->port_num].data_xport;
int src_connection_idx = 0, dst_connection_idx = 0;
struct usb_gadget *gadget = dev->cdev->gadget;
void *net;
pr_debug("%s: ctrl xport: %s data xport: %s dev: %p portno: %d\n",
__func__, xport_to_str(cxport), xport_to_str(dxport),
dev, dev->port_num);
port_num = rmnet_ports[dev->port_num].ctrl_xport_num;
switch (cxport) {
case USB_GADGET_XPORT_SMD:
ret = gsmd_ctrl_connect(&dev->port, port_num);
if (ret) {
pr_err("%s: gsmd_ctrl_connect failed: err:%d\n",
__func__, ret);
return ret;
}
break;
case USB_GADGET_XPORT_QTI:
ret = gqti_ctrl_connect(&dev->port, port_num, intf);
if (ret) {
pr_err("%s: gqti_ctrl_connect failed: err:%d\n",
__func__, ret);
return ret;
}
break;
case USB_GADGET_XPORT_HSIC:
ret = ghsic_ctrl_connect(&dev->port, port_num);
if (ret) {
pr_err("%s: ghsic_ctrl_connect failed: err:%d\n",
__func__, ret);
return ret;
}
break;
case USB_GADGET_XPORT_HSUART:
ret = ghsuart_ctrl_connect(&dev->port, port_num);
if (ret) {
pr_err("%s: ghsuart_ctrl_connect failed: err:%d\n",
__func__, ret);
return ret;
}
break;
case USB_GADGET_XPORT_NONE:
break;
default:
pr_err("%s: Un-supported transport: %s\n", __func__,
xport_to_str(cxport));
return -ENODEV;
}
port_num = rmnet_ports[dev->port_num].data_xport_num;
switch (dxport) {
case USB_GADGET_XPORT_BAM2BAM:
src_connection_idx = usb_bam_get_connection_idx(gadget->name,
A2_P_BAM, USB_TO_PEER_PERIPHERAL, USB_BAM_DEVICE,
port_num);
dst_connection_idx = usb_bam_get_connection_idx(gadget->name,
A2_P_BAM, PEER_PERIPHERAL_TO_USB, USB_BAM_DEVICE,
port_num);
if (dst_connection_idx < 0 || src_connection_idx < 0) {
pr_err("%s: usb_bam_get_connection_idx failed\n",
__func__);
gsmd_ctrl_disconnect(&dev->port, port_num);
return -EINVAL;
}
case USB_GADGET_XPORT_BAM:
ret = gbam_connect(&dev->port, port_num,
dxport, src_connection_idx, dst_connection_idx);
if (ret) {
pr_err("%s: gbam_connect failed: err:%d\n",
__func__, ret);
gsmd_ctrl_disconnect(&dev->port, port_num);
return ret;
}
break;
case USB_GADGET_XPORT_BAM2BAM_IPA:
src_connection_idx = usb_bam_get_connection_idx(gadget->name,
IPA_P_BAM, USB_TO_PEER_PERIPHERAL, USB_BAM_DEVICE,
port_num);
dst_connection_idx = usb_bam_get_connection_idx(gadget->name,
IPA_P_BAM, PEER_PERIPHERAL_TO_USB, USB_BAM_DEVICE,
port_num);
if (dst_connection_idx < 0 || src_connection_idx < 0) {
pr_err("%s: usb_bam_get_connection_idx failed\n",
__func__);
gsmd_ctrl_disconnect(&dev->port, port_num);
return -EINVAL;
}
ret = gbam_connect(&dev->port, port_num,
dxport, src_connection_idx, dst_connection_idx);
if (ret) {
pr_err("%s: gbam_connect failed: err:%d\n",
__func__, ret);
if (cxport == USB_GADGET_XPORT_QTI)
gqti_ctrl_disconnect(&dev->port, port_num);
else
gsmd_ctrl_disconnect(&dev->port, port_num);
return ret;
}
break;
case USB_GADGET_XPORT_HSIC:
ret = ghsic_data_connect(&dev->port, port_num);
if (ret) {
pr_err("%s: ghsic_data_connect failed: err:%d\n",
__func__, ret);
ghsic_ctrl_disconnect(&dev->port, port_num);
return ret;
}
break;
case USB_GADGET_XPORT_HSUART:
ret = ghsuart_data_connect(&dev->port, port_num);
if (ret) {
pr_err("%s: ghsuart_data_connect failed: err:%d\n",
__func__, ret);
ghsuart_ctrl_disconnect(&dev->port, port_num);
return ret;
}
break;
case USB_GADGET_XPORT_ETHER:
net = gether_connect(&dev->gether_port);
if (IS_ERR(net)) {
pr_err("%s: gether_connect failed: err:%ld\n",
__func__, PTR_ERR(net));
if (cxport == USB_GADGET_XPORT_QTI)
gqti_ctrl_disconnect(&dev->port, port_num);
else
gsmd_ctrl_disconnect(&dev->port, port_num);
return PTR_ERR(net);
}
gether_update_dl_max_pkts_per_xfer(&dev->gether_port,
rmnet_dl_max_pkt_per_xfer);
gether_update_dl_max_xfer_size(&dev->gether_port, 16384);
break;
case USB_GADGET_XPORT_NONE:
break;
default:
pr_err("%s: Un-supported transport: %s\n", __func__,
xport_to_str(dxport));
return -ENODEV;
}
return 0;
}
static int gport_rmnet_disconnect(struct f_rmnet *dev)
{
unsigned port_num;
enum transport_type cxport = rmnet_ports[dev->port_num].ctrl_xport;
enum transport_type dxport = rmnet_ports[dev->port_num].data_xport;
pr_debug("%s: ctrl xport: %s data xport: %s dev: %p portno: %d\n",
__func__, xport_to_str(cxport), xport_to_str(dxport),
dev, dev->port_num);
port_num = rmnet_ports[dev->port_num].ctrl_xport_num;
switch (cxport) {
case USB_GADGET_XPORT_SMD:
gsmd_ctrl_disconnect(&dev->port, port_num);
break;
case USB_GADGET_XPORT_QTI:
gqti_ctrl_disconnect(&dev->port, port_num);
break;
case USB_GADGET_XPORT_HSIC:
ghsic_ctrl_disconnect(&dev->port, port_num);
break;
case USB_GADGET_XPORT_HSUART:
ghsuart_ctrl_disconnect(&dev->port, port_num);
break;
case USB_GADGET_XPORT_NONE:
break;
default:
pr_err("%s: Un-supported transport: %s\n", __func__,
xport_to_str(cxport));
return -ENODEV;
}
port_num = rmnet_ports[dev->port_num].data_xport_num;
switch (dxport) {
case USB_GADGET_XPORT_BAM:
case USB_GADGET_XPORT_BAM2BAM:
case USB_GADGET_XPORT_BAM2BAM_IPA:
gbam_disconnect(&dev->port, port_num, dxport);
break;
case USB_GADGET_XPORT_HSIC:
ghsic_data_disconnect(&dev->port, port_num);
break;
case USB_GADGET_XPORT_HSUART:
ghsuart_data_disconnect(&dev->port, port_num);
break;
case USB_GADGET_XPORT_ETHER:
gether_disconnect(&dev->gether_port);
break;
case USB_GADGET_XPORT_NONE:
break;
default:
pr_err("%s: Un-supported transport: %s\n", __func__,
xport_to_str(dxport));
return -ENODEV;
}
return 0;
}
static void frmnet_unbind(struct usb_configuration *c, struct usb_function *f)
{
struct f_rmnet *dev = func_to_rmnet(f);
pr_debug("%s: portno:%d\n", __func__, dev->port_num);
if (gadget_is_superspeed(c->cdev->gadget))
usb_free_descriptors(f->ss_descriptors);
if (gadget_is_dualspeed(c->cdev->gadget))
usb_free_descriptors(f->hs_descriptors);
usb_free_descriptors(f->fs_descriptors);
frmnet_free_req(dev->notify, dev->notify_req);
kfree(f->name);
}
static void frmnet_purge_responses(struct f_rmnet *dev)
{
unsigned long flags;
struct rmnet_ctrl_pkt *cpkt;
pr_debug("%s: port#%d\n", __func__, dev->port_num);
spin_lock_irqsave(&dev->lock, flags);
while (!list_empty(&dev->cpkt_resp_q)) {
cpkt = list_first_entry(&dev->cpkt_resp_q,
struct rmnet_ctrl_pkt, list);
list_del(&cpkt->list);
rmnet_free_ctrl_pkt(cpkt);
}
atomic_set(&dev->notify_count, 0);
spin_unlock_irqrestore(&dev->lock, flags);
}
static void frmnet_suspend(struct usb_function *f)
{
struct f_rmnet *dev = func_to_rmnet(f);
unsigned port_num;
enum transport_type dxport = rmnet_ports[dev->port_num].data_xport;
pr_debug("%s: data xport: %s dev: %p portno: %d\n",
__func__, xport_to_str(dxport),
dev, dev->port_num);
frmnet_purge_responses(dev);
port_num = rmnet_ports[dev->port_num].data_xport_num;
switch (dxport) {
case USB_GADGET_XPORT_BAM:
break;
case USB_GADGET_XPORT_BAM2BAM:
case USB_GADGET_XPORT_BAM2BAM_IPA:
gbam_suspend(&dev->port, port_num, dxport);
break;
case USB_GADGET_XPORT_HSIC:
break;
case USB_GADGET_XPORT_HSUART:
break;
case USB_GADGET_XPORT_ETHER:
break;
case USB_GADGET_XPORT_NONE:
break;
default:
pr_err("%s: Un-supported transport: %s\n", __func__,
xport_to_str(dxport));
}
}
static void frmnet_resume(struct usb_function *f)
{
struct f_rmnet *dev = func_to_rmnet(f);
unsigned port_num;
enum transport_type dxport = rmnet_ports[dev->port_num].data_xport;
pr_debug("%s: data xport: %s dev: %p portno: %d\n",
__func__, xport_to_str(dxport),
dev, dev->port_num);
port_num = rmnet_ports[dev->port_num].data_xport_num;
switch (dxport) {
case USB_GADGET_XPORT_BAM:
break;
case USB_GADGET_XPORT_BAM2BAM:
case USB_GADGET_XPORT_BAM2BAM_IPA:
gbam_resume(&dev->port, port_num, dxport);
break;
case USB_GADGET_XPORT_HSIC:
break;
case USB_GADGET_XPORT_HSUART:
break;
case USB_GADGET_XPORT_ETHER:
break;
case USB_GADGET_XPORT_NONE:
break;
default:
pr_err("%s: Un-supported transport: %s\n", __func__,
xport_to_str(dxport));
}
}
static void frmnet_disable(struct usb_function *f)
{
struct f_rmnet *dev = func_to_rmnet(f);
enum transport_type dxport = rmnet_ports[dev->port_num].data_xport;
struct usb_composite_dev *cdev = dev->cdev;
pr_debug("%s: port#%d\n", __func__, dev->port_num);
usb_ep_disable(dev->notify);
dev->notify->driver_data = NULL;
atomic_set(&dev->online, 0);
frmnet_purge_responses(dev);
if (dxport == USB_GADGET_XPORT_BAM2BAM_IPA &&
gadget_is_dwc3(cdev->gadget)) {
msm_ep_unconfig(dev->port.out);
msm_ep_unconfig(dev->port.in);
}
gport_rmnet_disconnect(dev);
}
static int
frmnet_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
{
struct f_rmnet *dev = func_to_rmnet(f);
enum transport_type dxport = rmnet_ports[dev->port_num].data_xport;
struct usb_composite_dev *cdev = dev->cdev;
int ret;
struct list_head *cpkt;
pr_debug("%s:dev:%p port#%d\n", __func__, dev, dev->port_num);
if (dev->notify->driver_data) {
pr_debug("%s: reset port:%d\n", __func__, dev->port_num);
usb_ep_disable(dev->notify);
}
ret = config_ep_by_speed(cdev->gadget, f, dev->notify);
if (ret) {
dev->notify->desc = NULL;
ERROR(cdev, "config_ep_by_speed failes for ep %s, result %d\n",
dev->notify->name, ret);
return ret;
}
ret = usb_ep_enable(dev->notify);
if (ret) {
pr_err("%s: usb ep#%s enable failed, err#%d\n",
__func__, dev->notify->name, ret);
return ret;
}
dev->notify->driver_data = dev;
if (!dev->port.in->desc || !dev->port.out->desc) {
if (config_ep_by_speed(cdev->gadget, f, dev->port.in) ||
config_ep_by_speed(cdev->gadget, f, dev->port.out)) {
dev->port.in->desc = NULL;
dev->port.out->desc = NULL;
return -EINVAL;
}
ret = gport_rmnet_connect(dev, intf);
}
if (dxport == USB_GADGET_XPORT_BAM2BAM_IPA &&
gadget_is_dwc3(cdev->gadget)) {
if (msm_ep_config(dev->port.in) ||
msm_ep_config(dev->port.out)) {
pr_err("%s: msm_ep_config failed\n", __func__);
return -EINVAL;
}
} else
pr_debug("Rmnet is being used with non DWC3 core\n");
atomic_set(&dev->online, 1);
/* In case notifications were aborted, but there are pending control
packets in the response queue, re-add the notifications */
list_for_each(cpkt, &dev->cpkt_resp_q)
frmnet_ctrl_response_available(dev);
return ret;
}
static void frmnet_ctrl_response_available(struct f_rmnet *dev)
{
struct usb_request *req = dev->notify_req;
struct usb_cdc_notification *event;
unsigned long flags;
int ret;
struct rmnet_ctrl_pkt *cpkt;
pr_debug("%s:dev:%p portno#%d\n", __func__, dev, dev->port_num);
spin_lock_irqsave(&dev->lock, flags);
if (!atomic_read(&dev->online) || !req || !req->buf) {
spin_unlock_irqrestore(&dev->lock, flags);
return;
}
if (atomic_inc_return(&dev->notify_count) != 1) {
spin_unlock_irqrestore(&dev->lock, flags);
return;
}
event = req->buf;
event->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS
| USB_RECIP_INTERFACE;
event->bNotificationType = USB_CDC_NOTIFY_RESPONSE_AVAILABLE;
event->wValue = cpu_to_le16(0);
event->wIndex = cpu_to_le16(dev->ifc_id);
event->wLength = cpu_to_le16(0);
spin_unlock_irqrestore(&dev->lock, flags);
ret = usb_ep_queue(dev->notify, dev->notify_req, GFP_ATOMIC);
if (ret) {
spin_lock_irqsave(&dev->lock, flags);
if (!list_empty(&dev->cpkt_resp_q)) {
atomic_dec(&dev->notify_count);
cpkt = list_first_entry(&dev->cpkt_resp_q,
struct rmnet_ctrl_pkt, list);
list_del(&cpkt->list);
rmnet_free_ctrl_pkt(cpkt);
}
spin_unlock_irqrestore(&dev->lock, flags);
pr_debug("ep enqueue error %d\n", ret);
}
}
static void frmnet_connect(struct grmnet *gr)
{
struct f_rmnet *dev;
if (!gr) {
pr_err("%s: Invalid grmnet:%p\n", __func__, gr);
return;
}
dev = port_to_rmnet(gr);
atomic_set(&dev->ctrl_online, 1);
}
static void frmnet_disconnect(struct grmnet *gr)
{
struct f_rmnet *dev;
struct usb_cdc_notification *event;
int status;
if (!gr) {
pr_err("%s: Invalid grmnet:%p\n", __func__, gr);
return;
}
dev = port_to_rmnet(gr);
atomic_set(&dev->ctrl_online, 0);
if (!atomic_read(&dev->online)) {
pr_debug("%s: nothing to do\n", __func__);
return;
}
usb_ep_fifo_flush(dev->notify);
event = dev->notify_req->buf;
event->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS
| USB_RECIP_INTERFACE;
event->bNotificationType = USB_CDC_NOTIFY_NETWORK_CONNECTION;
event->wValue = cpu_to_le16(0);
event->wIndex = cpu_to_le16(dev->ifc_id);
event->wLength = cpu_to_le16(0);
status = usb_ep_queue(dev->notify, dev->notify_req, GFP_ATOMIC);
if (status < 0) {
if (!atomic_read(&dev->online))
return;
pr_err("%s: rmnet notify ep enqueue error %d\n",
__func__, status);
}
frmnet_purge_responses(dev);
}
static int
frmnet_send_cpkt_response(void *gr, void *buf, size_t len)
{
struct f_rmnet *dev;
struct rmnet_ctrl_pkt *cpkt;
unsigned long flags;
if (!gr || !buf) {
pr_err("%s: Invalid grmnet/buf, grmnet:%p buf:%p\n",
__func__, gr, buf);
return -ENODEV;
}
cpkt = rmnet_alloc_ctrl_pkt(len, GFP_ATOMIC);
if (IS_ERR(cpkt)) {
pr_err("%s: Unable to allocate ctrl pkt\n", __func__);
return -ENOMEM;
}
memcpy(cpkt->buf, buf, len);
cpkt->len = len;
dev = port_to_rmnet(gr);
pr_debug("%s: dev:%p port#%d\n", __func__, dev, dev->port_num);
if (!atomic_read(&dev->online) || !atomic_read(&dev->ctrl_online)) {
rmnet_free_ctrl_pkt(cpkt);
return 0;
}
spin_lock_irqsave(&dev->lock, flags);
list_add_tail(&cpkt->list, &dev->cpkt_resp_q);
spin_unlock_irqrestore(&dev->lock, flags);
frmnet_ctrl_response_available(dev);
return 0;
}
static void
frmnet_cmd_complete(struct usb_ep *ep, struct usb_request *req)
{
struct f_rmnet *dev = req->context;
struct usb_composite_dev *cdev;
unsigned port_num;
if (!dev) {
pr_err("%s: rmnet dev is null\n", __func__);
return;
}
pr_debug("%s: dev:%p port#%d\n", __func__, dev, dev->port_num);
cdev = dev->cdev;
if (dev->port.send_encap_cmd) {
port_num = rmnet_ports[dev->port_num].ctrl_xport_num;
dev->port.send_encap_cmd(port_num, req->buf, req->actual);
}
}
static void frmnet_notify_complete(struct usb_ep *ep, struct usb_request *req)
{
struct f_rmnet *dev = req->context;
int status = req->status;
unsigned long flags;
struct rmnet_ctrl_pkt *cpkt;
pr_debug("%s: dev:%p port#%d\n", __func__, dev, dev->port_num);
switch (status) {
case -ECONNRESET:
case -ESHUTDOWN:
/* connection gone */
atomic_set(&dev->notify_count, 0);
break;
default:
pr_err("rmnet notify ep error %d\n", status);
/* FALLTHROUGH */
case 0:
if (!atomic_read(&dev->ctrl_online))
break;
if (atomic_dec_and_test(&dev->notify_count))
break;
status = usb_ep_queue(dev->notify, req, GFP_ATOMIC);
if (status) {
spin_lock_irqsave(&dev->lock, flags);
if (!list_empty(&dev->cpkt_resp_q)) {
atomic_dec(&dev->notify_count);
cpkt = list_first_entry(&dev->cpkt_resp_q,
struct rmnet_ctrl_pkt, list);
list_del(&cpkt->list);
rmnet_free_ctrl_pkt(cpkt);
}
spin_unlock_irqrestore(&dev->lock, flags);
pr_debug("ep enqueue error %d\n", status);
}
break;
}
}
static int
frmnet_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
{
struct f_rmnet *dev = func_to_rmnet(f);
struct usb_composite_dev *cdev = dev->cdev;
struct usb_request *req = cdev->req;
unsigned port_num;
u16 w_index = le16_to_cpu(ctrl->wIndex);
u16 w_value = le16_to_cpu(ctrl->wValue);
u16 w_length = le16_to_cpu(ctrl->wLength);
int ret = -EOPNOTSUPP;
pr_debug("%s:dev:%p port#%d\n", __func__, dev, dev->port_num);
if (!atomic_read(&dev->online)) {
pr_warning("%s: usb cable is not connected\n", __func__);
return -ENOTCONN;
}
switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
| USB_CDC_SEND_ENCAPSULATED_COMMAND:
pr_debug("%s: USB_CDC_SEND_ENCAPSULATED_COMMAND\n"
, __func__);
ret = w_length;
req->complete = frmnet_cmd_complete;
req->context = dev;
break;
case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
| USB_CDC_GET_ENCAPSULATED_RESPONSE:
pr_debug("%s: USB_CDC_GET_ENCAPSULATED_RESPONSE\n", __func__);
if (w_value) {
pr_err("%s: invalid w_value = %04x",
__func__ , w_value);
goto invalid;
} else {
unsigned len;
struct rmnet_ctrl_pkt *cpkt;
spin_lock(&dev->lock);
if (list_empty(&dev->cpkt_resp_q)) {
pr_err("ctrl resp queue empty "
" req%02x.%02x v%04x i%04x l%d\n",
ctrl->bRequestType, ctrl->bRequest,
w_value, w_index, w_length);
spin_unlock(&dev->lock);
goto invalid;
}
cpkt = list_first_entry(&dev->cpkt_resp_q,
struct rmnet_ctrl_pkt, list);
list_del(&cpkt->list);
spin_unlock(&dev->lock);
len = min_t(unsigned, w_length, cpkt->len);
memcpy(req->buf, cpkt->buf, len);
ret = len;
rmnet_free_ctrl_pkt(cpkt);
}
break;
case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
| USB_CDC_REQ_SET_CONTROL_LINE_STATE:
pr_debug("%s: USB_CDC_REQ_SET\n", __func__);
if (dev->port.notify_modem) {
port_num = rmnet_ports[dev->port_num].ctrl_xport_num;
dev->port.notify_modem(&dev->port, port_num, w_value);
}
ret = 0;
break;
default:
invalid:
DBG(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n",
ctrl->bRequestType, ctrl->bRequest,
w_value, w_index, w_length);
}
/* respond with data transfer or status phase? */
if (ret >= 0) {
VDBG(cdev, "rmnet req%02x.%02x v%04x i%04x l%d\n",
ctrl->bRequestType, ctrl->bRequest,
w_value, w_index, w_length);
req->zero = (ret < w_length);
req->length = ret;
ret = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
if (ret < 0)
ERROR(cdev, "rmnet ep0 enqueue err %d\n", ret);
}
return ret;
}
static int frmnet_bind(struct usb_configuration *c, struct usb_function *f)
{
struct f_rmnet *dev = func_to_rmnet(f);
struct usb_ep *ep;
struct usb_composite_dev *cdev = c->cdev;
int ret = -ENODEV;
pr_debug("%s: start binding\n", __func__);
dev->ifc_id = usb_interface_id(c, f);
if (dev->ifc_id < 0) {
pr_err("%s: unable to allocate ifc id, err:%d",
__func__, dev->ifc_id);
return dev->ifc_id;
}
rmnet_interface_desc.bInterfaceNumber = dev->ifc_id;
ep = usb_ep_autoconfig(cdev->gadget, &rmnet_fs_in_desc);
if (!ep) {
pr_err("%s: usb epin autoconfig failed\n", __func__);
return -ENODEV;
}
dev->port.in = ep;
/* Update same for u_ether which uses gether port struct */
dev->gether_port.in_ep = ep;
ep->driver_data = cdev;
ep = usb_ep_autoconfig(cdev->gadget, &rmnet_fs_out_desc);
if (!ep) {
pr_err("%s: usb epout autoconfig failed\n", __func__);
ret = -ENODEV;
goto ep_auto_out_fail;
}
dev->port.out = ep;
/* Update same for u_ether which uses gether port struct */
dev->gether_port.out_ep = ep;
ep->driver_data = cdev;
ep = usb_ep_autoconfig(cdev->gadget, &rmnet_fs_notify_desc);
if (!ep) {
pr_err("%s: usb epnotify autoconfig failed\n", __func__);
ret = -ENODEV;
goto ep_auto_notify_fail;
}
dev->notify = ep;
ep->driver_data = cdev;
dev->notify_req = frmnet_alloc_req(ep,
sizeof(struct usb_cdc_notification),
GFP_KERNEL);
if (IS_ERR(dev->notify_req)) {
pr_err("%s: unable to allocate memory for notify req\n",
__func__);
ret = -ENOMEM;
goto ep_notify_alloc_fail;
}
dev->notify_req->complete = frmnet_notify_complete;
dev->notify_req->context = dev;
ret = -ENOMEM;
f->fs_descriptors = usb_copy_descriptors(rmnet_fs_function);
if (!f->fs_descriptors) {
pr_err("%s: no descriptors,usb_copy descriptors(fs)failed\n",
__func__);
goto fail;
}
if (gadget_is_dualspeed(cdev->gadget)) {
rmnet_hs_in_desc.bEndpointAddress =
rmnet_fs_in_desc.bEndpointAddress;
rmnet_hs_out_desc.bEndpointAddress =
rmnet_fs_out_desc.bEndpointAddress;
rmnet_hs_notify_desc.bEndpointAddress =
rmnet_fs_notify_desc.bEndpointAddress;
/* copy descriptors, and track endpoint copies */
f->hs_descriptors = usb_copy_descriptors(rmnet_hs_function);
if (!f->hs_descriptors) {
pr_err("%s: no hs_descriptors,usb_copy descriptors(hs)failed\n",
__func__);
goto fail;
}
}
if (gadget_is_superspeed(cdev->gadget)) {
rmnet_ss_in_desc.bEndpointAddress =
rmnet_fs_in_desc.bEndpointAddress;
rmnet_ss_out_desc.bEndpointAddress =
rmnet_fs_out_desc.bEndpointAddress;
rmnet_ss_notify_desc.bEndpointAddress =
rmnet_fs_notify_desc.bEndpointAddress;
/* copy descriptors, and track endpoint copies */
f->ss_descriptors = usb_copy_descriptors(rmnet_ss_function);
if (!f->ss_descriptors) {
pr_err("%s: no ss_descriptors,usb_copy descriptors(ss)failed\n",
__func__);
goto fail;
}
}
pr_debug("%s: RmNet(%d) %s Speed, IN:%s OUT:%s\n",
__func__, dev->port_num,
gadget_is_dualspeed(cdev->gadget) ? "dual" : "full",
dev->port.in->name, dev->port.out->name);
return 0;
fail:
if (f->ss_descriptors)
usb_free_descriptors(f->ss_descriptors);
if (f->hs_descriptors)
usb_free_descriptors(f->hs_descriptors);
if (f->fs_descriptors)
usb_free_descriptors(f->fs_descriptors);
if (dev->notify_req)
frmnet_free_req(dev->notify, dev->notify_req);
ep_notify_alloc_fail:
dev->notify->driver_data = NULL;
dev->notify = NULL;
ep_auto_notify_fail:
dev->port.out->driver_data = NULL;
dev->port.out = NULL;
ep_auto_out_fail:
dev->port.in->driver_data = NULL;
dev->port.in = NULL;
return ret;
}
static int frmnet_bind_config(struct usb_configuration *c, unsigned portno)
{
int status;
struct f_rmnet *dev;
struct usb_function *f;
unsigned long flags;
pr_debug("%s: usb config:%p\n", __func__, c);
if (portno >= nr_rmnet_ports) {
pr_err("%s: supporting ports#%u port_id:%u", __func__,
nr_rmnet_ports, portno);
return -ENODEV;
}
dev = rmnet_ports[portno].port;
if (rmnet_ports[portno].data_xport == USB_GADGET_XPORT_ETHER) {
struct eth_dev *edev = gether_setup_name(c->cdev->gadget, NULL,
"usb_rmnet");
if (IS_ERR(edev)) {
pr_err("%s: gether_setup failed\n", __func__);
return PTR_ERR(edev);
}
dev->gether_port.ioport = edev;
}
if (rmnet_string_defs[0].id == 0) {
status = usb_string_id(c->cdev);
if (status < 0) {
pr_err("%s: failed to get string id, err:%d\n",
__func__, status);
return status;
}
rmnet_string_defs[0].id = status;
}
spin_lock_irqsave(&dev->lock, flags);
dev->cdev = c->cdev;
f = &dev->gether_port.func;
f->name = kasprintf(GFP_ATOMIC, "rmnet%d", portno);
spin_unlock_irqrestore(&dev->lock, flags);
if (!f->name) {
pr_err("%s: cannot allocate memory for name\n", __func__);
return -ENOMEM;
}
f->strings = rmnet_strings;
f->bind = frmnet_bind;
f->unbind = frmnet_unbind;
f->disable = frmnet_disable;
f->set_alt = frmnet_set_alt;
f->setup = frmnet_setup;
f->suspend = frmnet_suspend;
f->resume = frmnet_resume;
dev->port.send_cpkt_response = frmnet_send_cpkt_response;
dev->port.disconnect = frmnet_disconnect;
dev->port.connect = frmnet_connect;
dev->gether_port.cdc_filter = 0;
status = usb_add_function(c, f);
if (status) {
pr_err("%s: usb add function failed: %d\n",
__func__, status);
kfree(f->name);
return status;
}
pr_debug("%s: complete\n", __func__);
return status;
}
static void frmnet_unbind_config(void)
{
int i;
for (i = 0; i < nr_rmnet_ports; i++)
if (rmnet_ports[i].data_xport == USB_GADGET_XPORT_ETHER) {
gether_cleanup(rmnet_ports[i].port->gether_port.ioport);
rmnet_ports[i].port->gether_port.ioport = NULL;
}
}
static void frmnet_cleanup(void)
{
int i;
for (i = 0; i < nr_rmnet_ports; i++)
kfree(rmnet_ports[i].port);
gbam_cleanup();
nr_rmnet_ports = 0;
no_ctrl_smd_ports = 0;
no_ctrl_qti_ports = 0;
no_data_bam_ports = 0;
no_data_bam2bam_ports = 0;
no_ctrl_hsic_ports = 0;
no_data_hsic_ports = 0;
no_ctrl_hsuart_ports = 0;
no_data_hsuart_ports = 0;
}
static int frmnet_init_port(const char *ctrl_name, const char *data_name,
const char *port_name)
{
struct f_rmnet *dev;
struct rmnet_ports *rmnet_port;
int ret;
int i;
if (nr_rmnet_ports >= NR_RMNET_PORTS) {
pr_err("%s: Max-%d instances supported\n",
__func__, NR_RMNET_PORTS);
return -EINVAL;
}
pr_debug("%s: port#:%d, ctrl port: %s data port: %s\n",
__func__, nr_rmnet_ports, ctrl_name, data_name);
dev = kzalloc(sizeof(struct f_rmnet), GFP_KERNEL);
if (!dev) {
pr_err("%s: Unable to allocate rmnet device\n", __func__);
return -ENOMEM;
}
dev->port_num = nr_rmnet_ports;
spin_lock_init(&dev->lock);
INIT_LIST_HEAD(&dev->cpkt_resp_q);
rmnet_port = &rmnet_ports[nr_rmnet_ports];
rmnet_port->port = dev;
rmnet_port->port_num = nr_rmnet_ports;
rmnet_port->ctrl_xport = str_to_xport(ctrl_name);
rmnet_port->data_xport = str_to_xport(data_name);
switch (rmnet_port->ctrl_xport) {
case USB_GADGET_XPORT_SMD:
rmnet_port->ctrl_xport_num = no_ctrl_smd_ports;
no_ctrl_smd_ports++;
break;
case USB_GADGET_XPORT_QTI:
rmnet_port->ctrl_xport_num = no_ctrl_qti_ports;
no_ctrl_qti_ports++;
break;
case USB_GADGET_XPORT_HSIC:
ghsic_ctrl_set_port_name(port_name, ctrl_name);
rmnet_port->ctrl_xport_num = no_ctrl_hsic_ports;
no_ctrl_hsic_ports++;
break;
case USB_GADGET_XPORT_HSUART:
rmnet_port->ctrl_xport_num = no_ctrl_hsuart_ports;
no_ctrl_hsuart_ports++;
break;
case USB_GADGET_XPORT_NONE:
break;
default:
pr_err("%s: Un-supported transport: %u\n", __func__,
rmnet_port->ctrl_xport);
ret = -ENODEV;
goto fail_probe;
}
switch (rmnet_port->data_xport) {
case USB_GADGET_XPORT_BAM:
rmnet_port->data_xport_num = no_data_bam_ports;
no_data_bam_ports++;
break;
case USB_GADGET_XPORT_BAM2BAM:
case USB_GADGET_XPORT_BAM2BAM_IPA:
rmnet_port->data_xport_num = no_data_bam2bam_ports;
no_data_bam2bam_ports++;
break;
case USB_GADGET_XPORT_HSIC:
ghsic_data_set_port_name(port_name, data_name);
rmnet_port->data_xport_num = no_data_hsic_ports;
no_data_hsic_ports++;
break;
case USB_GADGET_XPORT_HSUART:
rmnet_port->data_xport_num = no_data_hsuart_ports;
no_data_hsuart_ports++;
break;
case USB_GADGET_XPORT_ETHER:
case USB_GADGET_XPORT_NONE:
break;
default:
pr_err("%s: Un-supported transport: %u\n", __func__,
rmnet_port->data_xport);
ret = -ENODEV;
goto fail_probe;
}
nr_rmnet_ports++;
return 0;
fail_probe:
for (i = 0; i < nr_rmnet_ports; i++)
kfree(rmnet_ports[i].port);
nr_rmnet_ports = 0;
no_ctrl_smd_ports = 0;
no_ctrl_qti_ports = 0;
no_data_bam_ports = 0;
no_ctrl_hsic_ports = 0;
no_data_hsic_ports = 0;
no_ctrl_hsuart_ports = 0;
no_data_hsuart_ports = 0;
return ret;
}
| gpl-2.0 |
Kra1o5/android_kernel_bq_rk3066 | drivers/mtk_wcn_combo/common/linux/osal.c | 89 | 27895 | /*! \file
\brief Declaration of library functions
Any definitions in this file will be shared among GLUE Layer and internal Driver Stack.
*/
/*******************************************************************************
* C O M P I L E R F L A G S
********************************************************************************
*/
/*******************************************************************************
* M A C R O S
********************************************************************************
*/
/*******************************************************************************
* E X T E R N A L R E F E R E N C E S
********************************************************************************
*/
#include "osal_typedef.h"
#include "osal.h"
/*******************************************************************************
* C O N S T A N T S
********************************************************************************
*/
/*******************************************************************************
* D A T A T Y P E S
********************************************************************************
*/
/*******************************************************************************
* P U B L I C D A T A
********************************************************************************
*/
/*******************************************************************************
* P R I V A T E D A T A
********************************************************************************
*/
/* CRC table for the CRC-16. The poly is 0x8005 (x^16 + x^15 + x^2 + 1) */
static UINT16 const crc16_table[256] = {
0x0000, 0xC0C1, 0xC181, 0x0140, 0xC301, 0x03C0, 0x0280, 0xC241,
0xC601, 0x06C0, 0x0780, 0xC741, 0x0500, 0xC5C1, 0xC481, 0x0440,
0xCC01, 0x0CC0, 0x0D80, 0xCD41, 0x0F00, 0xCFC1, 0xCE81, 0x0E40,
0x0A00, 0xCAC1, 0xCB81, 0x0B40, 0xC901, 0x09C0, 0x0880, 0xC841,
0xD801, 0x18C0, 0x1980, 0xD941, 0x1B00, 0xDBC1, 0xDA81, 0x1A40,
0x1E00, 0xDEC1, 0xDF81, 0x1F40, 0xDD01, 0x1DC0, 0x1C80, 0xDC41,
0x1400, 0xD4C1, 0xD581, 0x1540, 0xD701, 0x17C0, 0x1680, 0xD641,
0xD201, 0x12C0, 0x1380, 0xD341, 0x1100, 0xD1C1, 0xD081, 0x1040,
0xF001, 0x30C0, 0x3180, 0xF141, 0x3300, 0xF3C1, 0xF281, 0x3240,
0x3600, 0xF6C1, 0xF781, 0x3740, 0xF501, 0x35C0, 0x3480, 0xF441,
0x3C00, 0xFCC1, 0xFD81, 0x3D40, 0xFF01, 0x3FC0, 0x3E80, 0xFE41,
0xFA01, 0x3AC0, 0x3B80, 0xFB41, 0x3900, 0xF9C1, 0xF881, 0x3840,
0x2800, 0xE8C1, 0xE981, 0x2940, 0xEB01, 0x2BC0, 0x2A80, 0xEA41,
0xEE01, 0x2EC0, 0x2F80, 0xEF41, 0x2D00, 0xEDC1, 0xEC81, 0x2C40,
0xE401, 0x24C0, 0x2580, 0xE541, 0x2700, 0xE7C1, 0xE681, 0x2640,
0x2200, 0xE2C1, 0xE381, 0x2340, 0xE101, 0x21C0, 0x2080, 0xE041,
0xA001, 0x60C0, 0x6180, 0xA141, 0x6300, 0xA3C1, 0xA281, 0x6240,
0x6600, 0xA6C1, 0xA781, 0x6740, 0xA501, 0x65C0, 0x6480, 0xA441,
0x6C00, 0xACC1, 0xAD81, 0x6D40, 0xAF01, 0x6FC0, 0x6E80, 0xAE41,
0xAA01, 0x6AC0, 0x6B80, 0xAB41, 0x6900, 0xA9C1, 0xA881, 0x6840,
0x7800, 0xB8C1, 0xB981, 0x7940, 0xBB01, 0x7BC0, 0x7A80, 0xBA41,
0xBE01, 0x7EC0, 0x7F80, 0xBF41, 0x7D00, 0xBDC1, 0xBC81, 0x7C40,
0xB401, 0x74C0, 0x7580, 0xB541, 0x7700, 0xB7C1, 0xB681, 0x7640,
0x7200, 0xB2C1, 0xB381, 0x7340, 0xB101, 0x71C0, 0x7080, 0xB041,
0x5000, 0x90C1, 0x9181, 0x5140, 0x9301, 0x53C0, 0x5280, 0x9241,
0x9601, 0x56C0, 0x5780, 0x9741, 0x5500, 0x95C1, 0x9481, 0x5440,
0x9C01, 0x5CC0, 0x5D80, 0x9D41, 0x5F00, 0x9FC1, 0x9E81, 0x5E40,
0x5A00, 0x9AC1, 0x9B81, 0x5B40, 0x9901, 0x59C0, 0x5880, 0x9841,
0x8801, 0x48C0, 0x4980, 0x8941, 0x4B00, 0x8BC1, 0x8A81, 0x4A40,
0x4E00, 0x8EC1, 0x8F81, 0x4F40, 0x8D01, 0x4DC0, 0x4C80, 0x8C41,
0x4400, 0x84C1, 0x8581, 0x4540, 0x8701, 0x47C0, 0x4680, 0x8641,
0x8201, 0x42C0, 0x4380, 0x8341, 0x4100, 0x81C1, 0x8081, 0x4040
};
/*******************************************************************************
* F U N C T I O N D E C L A R A T I O N S
********************************************************************************
*/
/*******************************************************************************
* F U N C T I O N S
********************************************************************************
*/
/*string operations*/
_osal_inline_ UINT32 osal_strlen(const char *str)
{
return strlen(str);
}
_osal_inline_ INT32 osal_strcmp(const char *dst, const char *src)
{
return strcmp(dst, src);
}
_osal_inline_ INT32 osal_strncmp(const char *dst, const char *src, UINT32 len)
{
return strncmp(dst, src, len);
}
_osal_inline_ char * osal_strcpy(char *dst, const char *src)
{
return strcpy(dst, src);
}
_osal_inline_ char * osal_strncpy(char *dst, const char *src, UINT32 len)
{
return strncpy(dst, src, len);
}
_osal_inline_ char * osal_strcat(char *dst, const char *src)
{
return strcat(dst, src);
}
_osal_inline_ char * osal_strncat(char *dst, const char *src, UINT32 len)
{
return strncat(dst, src, len);
}
_osal_inline_ char * osal_strchr(const char *str, UINT8 c)
{
return strchr(str, c);
}
_osal_inline_ char * osal_strsep(char **str, const char *c)
{
return strsep(str, c);
}
_osal_inline_ void osal_bug_on(unsigned long val)
{
BUG_ON(val);
}
_osal_inline_ LONG osal_strtol(const char *str, char **c, UINT32 adecimal)
{
return simple_strtol(str, c, adecimal);
}
INT32 osal_snprintf(char *buf, UINT32 len, const char*fmt, ...)
{
INT32 iRet = 0;
va_list args;
/*va_start(args, fmt);*/
va_start(args, fmt);
/*iRet = snprintf(buf, len, fmt, args);*/// TODO: [FixMe][GeorgeKuo] BUG?
iRet = vsnprintf(buf, len, fmt, args);
va_end(args);
return iRet;
}
INT32 osal_print(const char *str, ...)
{
va_list args;
char tempString[DBG_LOG_STR_SIZE];
va_start(args, str);
vsnprintf(tempString, DBG_LOG_STR_SIZE, str, args);
va_end(args);
printk("%s",tempString);
return 0;
}
INT32 osal_dbg_print(const char *str, ...)
{
va_list args;
char tempString[DBG_LOG_STR_SIZE];
va_start(args, str);
vsnprintf(tempString, DBG_LOG_STR_SIZE, str, args);
va_end(args);
printk(KERN_DEBUG "%s",tempString);
return 0;
}
INT32 osal_dbg_assert(INT32 expr, const char *file, INT32 line)
{
if (!expr){
printk("%s (%d)\n", file, line);
/*BUG_ON(!expr);*/
#ifdef CFG_COMMON_GPIO_DBG_PIN
//package this part
mt_set_gpio_out(GPIO70, GPIO_OUT_ZERO);
printk("toggle GPIO70\n");
udelay(10);
mt_set_gpio_out(GPIO70, GPIO_OUT_ONE);
#endif
return 1;
}
return 0;
}
INT32 osal_dbg_assert_aee(const char *module, const char *detail_description){
osal_err_print("[WMT-ASSERT]""[E][Module]:%s, [INFO]%s\n", module, detail_description);
#if WMT_PLAT_ALPS
aee_kernel_warning(
module,
detail_description);
#endif
return 0;
}
INT32 osal_sprintf(char *str, const char *format, ...)
{
INT32 iRet = 0;
va_list args;
va_start(args, format);
iRet = vsnprintf(str, DBG_LOG_STR_SIZE, format, args);
va_end(args);
return iRet;
}
_osal_inline_ VOID* osal_malloc(UINT32 size)
{
return vmalloc(size);
}
_osal_inline_ VOID osal_free(const VOID *dst)
{
vfree(dst);
}
_osal_inline_ VOID* osal_memset(VOID *buf, INT32 i, UINT32 len)
{
return memset(buf, i, len);
}
_osal_inline_ VOID* osal_memcpy(VOID *dst, const VOID *src, UINT32 len)
{
return memcpy(dst, src, len);
}
_osal_inline_ INT32 osal_memcmp(const VOID *buf1, const VOID *buf2, UINT32 len)
{
return memcmp(buf1, buf2, len);
}
_osal_inline_ UINT16 osal_crc16(const UINT8 *buffer, const UINT32 length)
{
UINT16 crc = 0;
UINT32 i = 0;
//FIXME: Add STP checksum feature
crc = 0;
for (i = 0; i < length; i++, buffer++)
{
crc = (crc >> 8) ^ crc16_table[(crc ^ (*buffer)) & 0xff];
}
return crc;
}
/*
*OSAL layer Thread Opeartion releated APIs
*
*
*/
_osal_inline_ INT32
osal_thread_create (
P_OSAL_THREAD pThread
)
{
pThread->pThread = kthread_create(pThread->pThreadFunc,
pThread->pThreadData,
pThread->threadName);
if (NULL == pThread->pThread) {
return -1;
}
return 0;
}
_osal_inline_ INT32
osal_thread_run (
P_OSAL_THREAD pThread
)
{
if (pThread->pThread) {
wake_up_process(pThread->pThread);
return 0;
}
else {
return -1;
}
}
_osal_inline_ INT32
osal_thread_stop (
P_OSAL_THREAD pThread
)
{
INT32 iRet;
if ( (pThread) && (pThread->pThread) ) {
iRet = kthread_stop(pThread->pThread);
//pThread->pThread = NULL;
return iRet;
}
return -1;
}
_osal_inline_ INT32
osal_thread_should_stop (
P_OSAL_THREAD pThread
)
{
if ( (pThread) && (pThread->pThread) ) {
return kthread_should_stop();
}
else {
return 1;
}
}
_osal_inline_ INT32
osal_thread_wait_for_event (
P_OSAL_THREAD pThread,
P_OSAL_EVENT pEvent,
P_OSAL_EVENT_CHECKER pChecker
)
{
/* P_DEV_WMT pDevWmt;*/
if ( (pThread) && (pThread->pThread) && (pEvent) && (pChecker)) {
/* pDevWmt = (P_DEV_WMT)(pThread->pThreadData);*/
return wait_event_interruptible(pEvent->waitQueue,
(/*!RB_EMPTY(&pDevWmt->rActiveOpQ) ||*/ osal_thread_should_stop(pThread) || (*pChecker)(pThread)));
}
return -1;
}
_osal_inline_ INT32
osal_thread_destroy (
P_OSAL_THREAD pThread
)
{
if (pThread && (pThread->pThread)) {
kthread_stop(pThread->pThread);
pThread->pThread = NULL;
}
return 0;
}
/*
*OSAL layer Signal Opeartion releated APIs
*initialization
*wait for signal
*wait for signal timerout
*raise signal
*destroy a signal
*
*/
_osal_inline_ INT32
osal_signal_init (
P_OSAL_SIGNAL pSignal
)
{
if (pSignal) {
init_completion(&pSignal->comp);
return 0;
}
else {
return -1;
}
}
_osal_inline_ INT32
osal_wait_for_signal (
P_OSAL_SIGNAL pSignal
)
{
if (pSignal) {
wait_for_completion_interruptible(&pSignal->comp);
return 0;
}
else {
return -1;
}
}
_osal_inline_ INT32
osal_wait_for_signal_timeout (
P_OSAL_SIGNAL pSignal
)
{
/* return wait_for_completion_interruptible_timeout(&pSignal->comp, msecs_to_jiffies(pSignal->timeoutValue));*/
/* [ChangeFeature][George] gps driver may be closed by -ERESTARTSYS.
* Avoid using *interruptible" version in order to complete our jobs, such
* as function off gracefully.
*/
return wait_for_completion_timeout(&pSignal->comp, msecs_to_jiffies(pSignal->timeoutValue));
}
_osal_inline_ INT32
osal_raise_signal (
P_OSAL_SIGNAL pSignal
)
{
// TODO:[FixMe][GeorgeKuo]: DO sanity check here!!!
complete(&pSignal->comp);
return 0;
}
_osal_inline_ INT32
osal_signal_deinit (
P_OSAL_SIGNAL pSignal
)
{
// TODO:[FixMe][GeorgeKuo]: DO sanity check here!!!
pSignal->timeoutValue = 0;
return 0;
}
/*
*OSAL layer Event Opeartion releated APIs
*initialization
*wait for signal
*wait for signal timerout
*raise signal
*destroy a signal
*
*/
INT32 osal_event_init (
P_OSAL_EVENT pEvent
)
{
init_waitqueue_head(&pEvent->waitQueue);
return 0;
}
INT32 osal_wait_for_event(
P_OSAL_EVENT pEvent,
INT32 (*condition)(PVOID),
void *cond_pa
)
{
return wait_event_interruptible(pEvent->waitQueue, condition(cond_pa));
}
INT32 osal_wait_for_event_timeout(
P_OSAL_EVENT pEvent,
INT32 (*condition)(PVOID),
void *cond_pa
)
{
return wait_event_interruptible_timeout(pEvent->waitQueue, condition(cond_pa), msecs_to_jiffies(pEvent->timeoutValue));
}
INT32 osal_trigger_event(
P_OSAL_EVENT pEvent
)
{
INT32 ret = 0;
wake_up_interruptible(&pEvent->waitQueue);
return ret;
}
INT32
osal_event_deinit (
P_OSAL_EVENT pEvent
)
{
return 0;
}
_osal_inline_ LONG osal_wait_for_event_bit_set(P_OSAL_EVENT pEvent, PULONG pState, UINT32 bitOffset)
{
UINT32 ms = pEvent->timeoutValue;
if (ms != 0)
{
return wait_event_interruptible_timeout(pEvent->waitQueue, test_bit(bitOffset, pState), msecs_to_jiffies(ms));
}
else
{
return wait_event_interruptible(pEvent->waitQueue, test_bit(bitOffset, pState));
}
}
_osal_inline_ LONG osal_wait_for_event_bit_clr(P_OSAL_EVENT pEvent, PULONG pState, UINT32 bitOffset)
{
UINT32 ms = pEvent->timeoutValue;
if (ms != 0)
{
return wait_event_interruptible_timeout(pEvent->waitQueue, !test_bit(bitOffset, pState), msecs_to_jiffies(ms));
}
else
{
return wait_event_interruptible(pEvent->waitQueue, !test_bit(bitOffset, pState));
}
}
/*
*bit test and set/clear operations APIs
*
*
*/
#if OS_BIT_OPS_SUPPORT
#define osal_bit_op_lock(x)
#define osal_bit_op_unlock(x)
#else
_osal_inline_ INT32 osal_bit_op_lock(P_OSAL_UNSLEEPABLE_LOCK pLock)
{
return 0;
}
_osal_inline_ INT32 osal_bit_op_unlock(P_OSAL_UNSLEEPABLE_LOCK pLock)
{
return 0;
}
#endif
_osal_inline_ INT32 osal_clear_bit(UINT32 bitOffset, P_OSAL_BIT_OP_VAR pData)
{
osal_bit_op_lock(&(pData->opLock));
clear_bit(bitOffset, &pData->data);
osal_bit_op_unlock(&(pData->opLock));
return 0;
}
_osal_inline_ INT32 osal_set_bit(UINT32 bitOffset, P_OSAL_BIT_OP_VAR pData)
{
osal_bit_op_lock(&(pData->opLock));
set_bit(bitOffset, &pData->data);
osal_bit_op_unlock(&(pData->opLock));
return 0;
}
_osal_inline_ INT32 osal_test_bit(UINT32 bitOffset, P_OSAL_BIT_OP_VAR pData)
{
UINT32 iRet = 0;
osal_bit_op_lock(&(pData->opLock));
iRet = test_bit(bitOffset, &pData->data);
osal_bit_op_unlock(&(pData->opLock));
return iRet;
}
_osal_inline_ INT32 osal_test_and_clear_bit(UINT32 bitOffset, P_OSAL_BIT_OP_VAR pData)
{
UINT32 iRet = 0;
osal_bit_op_lock(&(pData->opLock));
iRet = test_and_clear_bit(bitOffset, &pData->data);
osal_bit_op_unlock(&(pData->opLock));
return iRet;
}
_osal_inline_ INT32 osal_test_and_set_bit(UINT32 bitOffset, P_OSAL_BIT_OP_VAR pData)
{
UINT32 iRet = 0;
osal_bit_op_lock(&(pData->opLock));
iRet = test_and_set_bit(bitOffset, &pData->data);
osal_bit_op_unlock(&(pData->opLock));
return iRet;
}
/*
*tiemr operations APIs
*create
*stop
* modify
*create
*delete
*
*/
INT32 osal_timer_create(P_OSAL_TIMER pTimer)
{
struct timer_list *timer = &pTimer->timer;
init_timer(timer);
timer->function = pTimer->timeoutHandler;
timer->data = (ULONG)pTimer->timeroutHandlerData;
return 0;
}
INT32 osal_timer_start(P_OSAL_TIMER pTimer, UINT32 ms)
{
struct timer_list *timer = &pTimer->timer;
timer->expires = jiffies + (ms/(1000/HZ));
add_timer(timer);
return 0;
}
INT32 osal_timer_stop(P_OSAL_TIMER pTimer)
{
struct timer_list *timer = &pTimer->timer;
del_timer(timer);
return 0;
}
INT32 osal_timer_stop_sync(P_OSAL_TIMER pTimer)
{
struct timer_list *timer = &pTimer->timer;
del_timer_sync(timer);
return 0;
}
INT32 osal_timer_modify(P_OSAL_TIMER pTimer, UINT32 ms)
{
mod_timer(&pTimer->timer, jiffies + (ms)/(1000/HZ));
return 0;
}
INT32 _osal_fifo_init(OSAL_FIFO *pFifo, UINT8 *buf, UINT32 size)
{
struct kfifo *fifo = NULL;
INT32 ret = -1;
if(!pFifo || pFifo->pFifoBody)
{
printk (KERN_ERR "pFifo must be !NULL, pFifo->pFifoBody must be NULL\n");
printk (KERN_ERR "pFifo(0x%p), pFifo->pFifoBody(0x%p)\n", pFifo, pFifo->pFifoBody);
return -1;
}
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35))
spin_lock_init(&pFifo->fifoSpinlock);
fifo = kfifo_alloc(size, /*GFP_KERNEL*/GFP_ATOMIC, &pFifo->fifoSpinlock);
if (NULL == fifo)
{
ret = -2;
}else
{
ret = 0;
}
#else
fifo = kzalloc(sizeof(struct kfifo), GFP_ATOMIC);
if (!buf){
/*fifo's buffer is not ready, we allocate automatically*/
ret = kfifo_alloc(fifo, size, /*GFP_KERNEL*/GFP_ATOMIC);
}else
{
if(is_power_of_2(size))
{
kfifo_init(fifo, buf, size);
ret = 0;
}
else
{
kfifo_free(fifo);
fifo = NULL;
ret = -1;
}
}
#endif
pFifo->pFifoBody = fifo;
return (ret < 0) ? (-1) : (0);
}
INT32 _osal_fifo_deinit(OSAL_FIFO *pFifo)
{
struct kfifo *fifo = NULL;
if(!pFifo || !pFifo->pFifoBody)
{
printk("%s:pFifo = NULL or pFifo->pFifoBody = NULL, error\n", __func__);
return -1;
}
fifo = (struct kfifo *)pFifo->pFifoBody;
if(fifo)
{
kfifo_free(fifo);
}
return 0;
}
INT32 _osal_fifo_size(OSAL_FIFO *pFifo)
{
struct kfifo *fifo = NULL;
INT32 ret = 0;
if(!pFifo || !pFifo->pFifoBody)
{
printk("%s:pFifo = NULL or pFifo->pFifoBody = NULL, error\n", __func__);
return -1;
}
fifo = (struct kfifo *)pFifo->pFifoBody;
if(fifo)
{
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35))
ret = fifo->size;
#else
ret = kfifo_size(fifo);
#endif
}
return ret;
}
/*returns unused bytes in fifo*/
INT32 _osal_fifo_avail_size(OSAL_FIFO *pFifo)
{
struct kfifo *fifo = NULL;
INT32 ret = 0;
if(!pFifo || !pFifo->pFifoBody)
{
printk("%s:pFifo = NULL or pFifo->pFifoBody = NULL, error\n", __func__);
return -1;
}
fifo = (struct kfifo *)pFifo->pFifoBody;
if(fifo)
{
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35))
ret = fifo->size - kfifo_len(fifo);
#else
ret = kfifo_avail(fifo);
#endif
}
return ret;
}
/*returns used bytes in fifo*/
INT32 _osal_fifo_len(OSAL_FIFO *pFifo)
{
struct kfifo *fifo = NULL;
INT32 ret = 0;
if(!pFifo || !pFifo->pFifoBody)
{
printk("%s:pFifo = NULL or pFifo->pFifoBody = NULL, error\n", __func__);
return -1;
}
fifo = (struct kfifo *)pFifo->pFifoBody;
if(fifo)
{
ret = kfifo_len(fifo);
}
return ret;
}
INT32 _osal_fifo_is_empty(OSAL_FIFO *pFifo)
{
struct kfifo *fifo = NULL;
INT32 ret = 0;
if(!pFifo || !pFifo->pFifoBody)
{
printk("%s:pFifo = NULL or pFifo->pFifoBody = NULL, error\n", __func__);
return -1;
}
fifo = (struct kfifo *)pFifo->pFifoBody;
if(fifo)
{
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35))
ret = (fifo->in == fifo->out);
#else
ret = kfifo_is_empty(fifo);
#endif
}
return ret;
}
INT32 _osal_fifo_is_full(OSAL_FIFO *pFifo)
{
struct kfifo *fifo = NULL;
INT32 ret = 0;
if(!pFifo || !pFifo->pFifoBody)
{
printk("%s:pFifo = NULL or pFifo->pFifoBody = NULL, error\n", __func__);
return -1;
}
fifo = (struct kfifo *)pFifo->pFifoBody;
if(fifo)
{
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35))
ret = (fifo->size == _osal_fifo_len(pFifo));
#else
ret = kfifo_is_full(fifo);
#endif
}
return ret;
}
INT32 _osal_fifo_data_in(OSAL_FIFO *pFifo, const VOID *buf, UINT32 len)
{
struct kfifo *fifo = NULL;
INT32 ret = 0;
if(!pFifo || !pFifo->pFifoBody)
{
printk("%s:pFifo = NULL or pFifo->pFifoBody = NULL, error\n", __func__);
return -1;
}
fifo = (struct kfifo *)pFifo->pFifoBody;
if(fifo && buf && (len <= _osal_fifo_avail_size(pFifo)))
{
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33))
ret = kfifo_put(fifo, buf, len);
#else
ret = kfifo_in(fifo, buf, len);
#endif
}
else
{
printk("%s: kfifo_in, error, len = %d, _osal_fifo_avail_size = %d, buf=%p\n",
__func__, len, _osal_fifo_avail_size(pFifo), buf);
ret = 0;
}
return ret;
}
INT32 _osal_fifo_data_out(OSAL_FIFO *pFifo, void *buf, UINT32 len)
{
struct kfifo *fifo = NULL;
INT32 ret = 0;
if(!pFifo || !pFifo->pFifoBody)
{
printk("%s:pFifo = NULL or pFifo->pFifoBody = NULL, error\n", __func__);
return -1;
}
fifo = (struct kfifo *)pFifo->pFifoBody;
if(fifo && buf && (len <= _osal_fifo_len(pFifo)))
{
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33))
ret = kfifo_get(fifo, buf, len);
#else
ret = kfifo_out(fifo, buf, len);
#endif
}
else
{
printk("%s: kfifo_out, error, len = %d, osal_fifo_len = %d, buf=%p\n",
__func__, len, _osal_fifo_len(pFifo), buf);
ret = 0;
}
return ret;
}
INT32 _osal_fifo_reset(OSAL_FIFO *pFifo)
{
struct kfifo *fifo = NULL;
if(!pFifo || !pFifo->pFifoBody)
{
printk("%s:pFifo = NULL or pFifo->pFifoBody = NULL, error\n", __func__);
return -1;
}
fifo = (struct kfifo *)pFifo->pFifoBody;
if(fifo)
{
kfifo_reset(fifo);
}
return 0;
}
INT32 osal_fifo_init(P_OSAL_FIFO pFifo, UINT8 *buffer, UINT32 size)
{
if(!pFifo)
{
printk("%s:pFifo = NULL, error\n", __func__);
return -1;
}
pFifo->FifoInit = _osal_fifo_init;
pFifo->FifoDeInit = _osal_fifo_deinit;
pFifo->FifoSz = _osal_fifo_size;
pFifo->FifoAvailSz = _osal_fifo_avail_size;
pFifo->FifoLen = _osal_fifo_len;
pFifo->FifoIsEmpty = _osal_fifo_is_empty;
pFifo->FifoIsFull = _osal_fifo_is_full;
pFifo->FifoDataIn = _osal_fifo_data_in;
pFifo->FifoDataOut = _osal_fifo_data_out;
pFifo->FifoReset = _osal_fifo_reset;
if(NULL != pFifo->pFifoBody)
{
printk("%s:Becasue pFifo room is avialable, we clear the room and allocate them again.\n", __func__);
pFifo->FifoDeInit(pFifo->pFifoBody);
pFifo->pFifoBody = NULL;
}
pFifo->FifoInit(pFifo, buffer, size);
return 0;
}
VOID osal_fifo_deinit(P_OSAL_FIFO pFifo)
{
if(pFifo)
{
pFifo->FifoDeInit(pFifo);
}
else
{
printk("%s:pFifo = NULL, error\n", __func__);
}
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35))
#else
if(pFifo->pFifoBody)
{
kfree(pFifo->pFifoBody);
}
#endif
}
INT32 osal_fifo_reset(P_OSAL_FIFO pFifo)
{
if(pFifo)
{
return pFifo->FifoReset(pFifo);
}
else
{
printk("%s:pFifo = NULL, error\n", __func__);
return -1;
}
}
UINT32 osal_fifo_in(P_OSAL_FIFO pFifo, PUINT8 buffer, UINT32 size)
{
if(pFifo)
{
return pFifo->FifoDataIn(pFifo, buffer, size);
}
else
{
printk("%s:pFifo = NULL, error\n", __func__);
return 0;
}
}
UINT32 osal_fifo_out(P_OSAL_FIFO pFifo, PUINT8 buffer, UINT32 size)
{
if(pFifo)
{
return pFifo->FifoDataOut(pFifo, buffer, size);
}
else
{
printk("%s:pFifo = NULL, error\n", __func__);
return 0;
}
}
UINT32 osal_fifo_len(P_OSAL_FIFO pFifo)
{
if(pFifo)
{
return pFifo->FifoLen(pFifo);
}
else
{
printk("%s:pFifo = NULL, error\n", __func__);
return 0;
}
}
UINT32 osal_fifo_sz(P_OSAL_FIFO pFifo)
{
if(pFifo)
{
return pFifo->FifoSz(pFifo);
}
else
{
printk("%s:pFifo = NULL, error\n", __func__);
return 0;
}
}
UINT32 osal_fifo_avail(P_OSAL_FIFO pFifo)
{
if(pFifo)
{
return pFifo->FifoAvailSz(pFifo);
}
else
{
printk("%s:pFifo = NULL, error\n", __func__);
return 0;
}
}
UINT32 osal_fifo_is_empty(P_OSAL_FIFO pFifo)
{
if(pFifo)
{
return pFifo->FifoIsEmpty(pFifo);
}
else
{
printk("%s:pFifo = NULL, error\n", __func__);
return 0;
}
}
UINT32 osal_fifo_is_full(P_OSAL_FIFO pFifo)
{
if(pFifo)
{
return pFifo->FifoIsFull(pFifo);
}
else
{
printk("%s:pFifo = NULL, error\n", __func__);
return 0;
}
}
INT32 osal_wake_lock_init(P_OSAL_WAKE_LOCK pLock)
{
if(!pLock)
{
return -1;
}
else
{
wake_lock_init(&pLock->wake_lock, WAKE_LOCK_SUSPEND, pLock->name);
return 0;
}
}
INT32 osal_wake_lock(P_OSAL_WAKE_LOCK pLock)
{
if(!pLock)
{
return -1;
}
else
{
wake_lock(&pLock->wake_lock);
return 0;
}
}
INT32 osal_wake_unlock(P_OSAL_WAKE_LOCK pLock)
{
if(!pLock)
{
return -1;
}
else
{
wake_unlock(&pLock->wake_lock);
return 0;
}
}
INT32 osal_wake_lock_count(P_OSAL_WAKE_LOCK pLock)
{
INT32 count = 0;
if(!pLock)
{
return -1;
}
else
{
count = wake_lock_active(&pLock->wake_lock);
return count;
}
}
/*
*sleepable lock operations APIs
*init
*lock
*unlock
*destroy
*
*/
#if !defined(CONFIG_PROVE_LOCKING)
INT32 osal_unsleepable_lock_init (P_OSAL_UNSLEEPABLE_LOCK pUSL)
{
spin_lock_init(&(pUSL->lock));
return 0;
}
#endif
INT32 osal_lock_unsleepable_lock (P_OSAL_UNSLEEPABLE_LOCK pUSL)
{
spin_lock_irqsave(&(pUSL->lock), pUSL->flag);
return 0;
}
INT32 osal_unlock_unsleepable_lock (P_OSAL_UNSLEEPABLE_LOCK pUSL)
{
spin_unlock_irqrestore(&(pUSL->lock), pUSL->flag);
return 0;
}
extern INT32 osal_unsleepable_lock_deinit (P_OSAL_UNSLEEPABLE_LOCK pUSL)
{
return 0;
}
/*
*unsleepable operations APIs
*init
*lock
*unlock
*destroy
*
*/
#if !defined(CONFIG_PROVE_LOCKING)
INT32 osal_sleepable_lock_init (P_OSAL_SLEEPABLE_LOCK pSL)
{
mutex_init (&pSL->lock);
return 0;
}
#endif
INT32 osal_lock_sleepable_lock (P_OSAL_SLEEPABLE_LOCK pSL)
{
return mutex_lock_killable(&pSL->lock);
}
INT32 osal_unlock_sleepable_lock (P_OSAL_SLEEPABLE_LOCK pSL)
{
mutex_unlock(&pSL->lock);
return 0;
}
INT32 osal_sleepable_lock_deinit (P_OSAL_SLEEPABLE_LOCK pSL)
{
mutex_destroy (&pSL->lock);
return 0;
}
INT32 osal_msleep(UINT32 ms)
{
msleep(ms);
return 0;
}
INT32 osal_gettimeofday(PINT32 sec, PINT32 usec)
{
INT32 ret = 0;
struct timeval now;
do_gettimeofday(&now);
if(sec != NULL)
*sec = now.tv_sec;
else
ret = -1;
if(usec != NULL)
*usec = now.tv_usec;
else
ret = -1;
return ret;
}
INT32 osal_printtimeofday(const PUINT8 prefix)
{
INT32 ret;
INT32 sec;
INT32 usec;
ret = osal_gettimeofday(&sec, &usec);
ret += osal_dbg_print("%s>sec=%d, usec=%d\n",prefix, sec, usec);
return ret;
}
VOID
osal_buffer_dump (
const UINT8 *buf,
const UINT8 *title,
const UINT32 len,
const UINT32 limit
)
{
INT32 k;
UINT32 dump_len;
printk("start of dump>[%s] len=%d, limit=%d,", title, len, limit);
dump_len = ((0 != limit) && (len > limit)) ? limit : len;
#if 0
if(limit != 0)
{
len = (len > limit)? (limit) : (len);
}
#endif
for (k = 0; k < dump_len ; k++) {
if((k != 0) && ( k % 16 == 0)) printk("\n");
printk("0x%02x ", buf[k]);
}
printk("<end of dump\n");
}
UINT32 osal_op_get_id(P_OSAL_OP pOp)
{
return (pOp) ? pOp->op.opId : 0xFFFFFFFF;
}
MTK_WCN_BOOL osal_op_is_wait_for_signal(P_OSAL_OP pOp)
{
return (pOp && pOp->signal.timeoutValue) ? MTK_WCN_BOOL_TRUE : MTK_WCN_BOOL_FALSE;
}
VOID osal_op_raise_signal(P_OSAL_OP pOp, INT32 result)
{
if (pOp)
{
pOp->result = result;
osal_raise_signal(&pOp->signal);
}
}
| gpl-2.0 |
chillwater/linux-3.4 | drivers/staging/comedi/kcomedilib/kcomedilib_main.c | 601 | 5405 | /*
kcomedilib/kcomedilib.c
a comedlib interface for kernel modules
COMEDI - Linux Control and Measurement Device Interface
Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
*/
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/fcntl.h>
#include <linux/mm.h>
#include <linux/io.h>
#include "../comedi.h"
#include "../comedilib.h"
#include "../comedidev.h"
MODULE_AUTHOR("David Schleef <ds@schleef.org>");
MODULE_DESCRIPTION("Comedi kernel library");
MODULE_LICENSE("GPL");
struct comedi_device *comedi_open(const char *filename)
{
struct comedi_device *dev, *retval = NULL;
unsigned int minor;
if (strncmp(filename, "/dev/comedi", 11) != 0)
return NULL;
if (kstrtouint(filename + 11, 0, &minor))
return NULL;
if (minor >= COMEDI_NUM_BOARD_MINORS)
return NULL;
dev = comedi_dev_get_from_minor(minor);
if (!dev)
return NULL;
down_read(&dev->attach_lock);
if (dev->attached)
retval = dev;
else
retval = NULL;
up_read(&dev->attach_lock);
if (retval == NULL)
comedi_dev_put(dev);
return retval;
}
EXPORT_SYMBOL_GPL(comedi_open);
int comedi_close(struct comedi_device *dev)
{
comedi_dev_put(dev);
return 0;
}
EXPORT_SYMBOL_GPL(comedi_close);
static int comedi_do_insn(struct comedi_device *dev,
struct comedi_insn *insn,
unsigned int *data)
{
struct comedi_subdevice *s;
int ret;
mutex_lock(&dev->mutex);
if (!dev->attached) {
ret = -EINVAL;
goto error;
}
/* a subdevice instruction */
if (insn->subdev >= dev->n_subdevices) {
ret = -EINVAL;
goto error;
}
s = &dev->subdevices[insn->subdev];
if (s->type == COMEDI_SUBD_UNUSED) {
dev_err(dev->class_dev,
"%d not useable subdevice\n", insn->subdev);
ret = -EIO;
goto error;
}
/* XXX check lock */
ret = comedi_check_chanlist(s, 1, &insn->chanspec);
if (ret < 0) {
dev_err(dev->class_dev, "bad chanspec\n");
ret = -EINVAL;
goto error;
}
if (s->busy) {
ret = -EBUSY;
goto error;
}
s->busy = dev;
switch (insn->insn) {
case INSN_BITS:
ret = s->insn_bits(dev, s, insn, data);
break;
case INSN_CONFIG:
/* XXX should check instruction length */
ret = s->insn_config(dev, s, insn, data);
break;
default:
ret = -EINVAL;
break;
}
s->busy = NULL;
error:
mutex_unlock(&dev->mutex);
return ret;
}
int comedi_dio_get_config(struct comedi_device *dev, unsigned int subdev,
unsigned int chan, unsigned int *io)
{
struct comedi_insn insn;
unsigned int data[2];
int ret;
memset(&insn, 0, sizeof(insn));
insn.insn = INSN_CONFIG;
insn.n = 2;
insn.subdev = subdev;
insn.chanspec = CR_PACK(chan, 0, 0);
data[0] = INSN_CONFIG_DIO_QUERY;
data[1] = 0;
ret = comedi_do_insn(dev, &insn, data);
if (ret >= 0)
*io = data[1];
return ret;
}
EXPORT_SYMBOL_GPL(comedi_dio_get_config);
int comedi_dio_config(struct comedi_device *dev, unsigned int subdev,
unsigned int chan, unsigned int io)
{
struct comedi_insn insn;
memset(&insn, 0, sizeof(insn));
insn.insn = INSN_CONFIG;
insn.n = 1;
insn.subdev = subdev;
insn.chanspec = CR_PACK(chan, 0, 0);
return comedi_do_insn(dev, &insn, &io);
}
EXPORT_SYMBOL_GPL(comedi_dio_config);
int comedi_dio_bitfield2(struct comedi_device *dev, unsigned int subdev,
unsigned int mask, unsigned int *bits,
unsigned int base_channel)
{
struct comedi_insn insn;
unsigned int data[2];
unsigned int n_chan;
unsigned int shift;
int ret;
base_channel = CR_CHAN(base_channel);
n_chan = comedi_get_n_channels(dev, subdev);
if (base_channel >= n_chan)
return -EINVAL;
memset(&insn, 0, sizeof(insn));
insn.insn = INSN_BITS;
insn.chanspec = base_channel;
insn.n = 2;
insn.subdev = subdev;
data[0] = mask;
data[1] = *bits;
/*
* Most drivers ignore the base channel in insn->chanspec.
* Fix this here if the subdevice has <= 32 channels.
*/
if (n_chan <= 32) {
shift = base_channel;
if (shift) {
insn.chanspec = 0;
data[0] <<= shift;
data[1] <<= shift;
}
} else {
shift = 0;
}
ret = comedi_do_insn(dev, &insn, data);
*bits = data[1] >> shift;
return ret;
}
EXPORT_SYMBOL_GPL(comedi_dio_bitfield2);
int comedi_find_subdevice_by_type(struct comedi_device *dev, int type,
unsigned int subd)
{
struct comedi_subdevice *s;
int ret = -ENODEV;
down_read(&dev->attach_lock);
if (dev->attached)
for (; subd < dev->n_subdevices; subd++) {
s = &dev->subdevices[subd];
if (s->type == type) {
ret = subd;
break;
}
}
up_read(&dev->attach_lock);
return ret;
}
EXPORT_SYMBOL_GPL(comedi_find_subdevice_by_type);
int comedi_get_n_channels(struct comedi_device *dev, unsigned int subdevice)
{
int n;
down_read(&dev->attach_lock);
if (!dev->attached || subdevice >= dev->n_subdevices)
n = 0;
else
n = dev->subdevices[subdevice].n_chan;
up_read(&dev->attach_lock);
return n;
}
EXPORT_SYMBOL_GPL(comedi_get_n_channels);
| gpl-2.0 |
lynxluna/linux-ginger | drivers/acpi/acpica/exmisc.c | 601 | 20272 |
/******************************************************************************
*
* Module Name: exmisc - ACPI AML (p-code) execution - specific opcodes
*
*****************************************************************************/
/*
* Copyright (C) 2000 - 2008, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* substantially similar to the "NO WARRANTY" disclaimer below
* ("Disclaimer") and any redistribution must be conditioned upon
* including a substantially similar Disclaimer requirement for further
* binary redistribution.
* 3. Neither the names of the above-listed copyright holders nor the names
* of any contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES.
*/
#include <acpi/acpi.h>
#include "accommon.h"
#include "acinterp.h"
#include "amlcode.h"
#include "amlresrc.h"
#define _COMPONENT ACPI_EXECUTER
ACPI_MODULE_NAME("exmisc")
/*******************************************************************************
*
* FUNCTION: acpi_ex_get_object_reference
*
* PARAMETERS: obj_desc - Create a reference to this object
* return_desc - Where to store the reference
* walk_state - Current state
*
* RETURN: Status
*
* DESCRIPTION: Obtain and return a "reference" to the target object
* Common code for the ref_of_op and the cond_ref_of_op.
*
******************************************************************************/
acpi_status
acpi_ex_get_object_reference(union acpi_operand_object *obj_desc,
union acpi_operand_object **return_desc,
struct acpi_walk_state *walk_state)
{
union acpi_operand_object *reference_obj;
union acpi_operand_object *referenced_obj;
ACPI_FUNCTION_TRACE_PTR(ex_get_object_reference, obj_desc);
*return_desc = NULL;
switch (ACPI_GET_DESCRIPTOR_TYPE(obj_desc)) {
case ACPI_DESC_TYPE_OPERAND:
if (obj_desc->common.type != ACPI_TYPE_LOCAL_REFERENCE) {
return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
}
/*
* Must be a reference to a Local or Arg
*/
switch (obj_desc->reference.class) {
case ACPI_REFCLASS_LOCAL:
case ACPI_REFCLASS_ARG:
case ACPI_REFCLASS_DEBUG:
/* The referenced object is the pseudo-node for the local/arg */
referenced_obj = obj_desc->reference.object;
break;
default:
ACPI_ERROR((AE_INFO, "Unknown Reference Class %2.2X",
obj_desc->reference.class));
return_ACPI_STATUS(AE_AML_INTERNAL);
}
break;
case ACPI_DESC_TYPE_NAMED:
/*
* A named reference that has already been resolved to a Node
*/
referenced_obj = obj_desc;
break;
default:
ACPI_ERROR((AE_INFO, "Invalid descriptor type %X",
ACPI_GET_DESCRIPTOR_TYPE(obj_desc)));
return_ACPI_STATUS(AE_TYPE);
}
/* Create a new reference object */
reference_obj =
acpi_ut_create_internal_object(ACPI_TYPE_LOCAL_REFERENCE);
if (!reference_obj) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
reference_obj->reference.class = ACPI_REFCLASS_REFOF;
reference_obj->reference.object = referenced_obj;
*return_desc = reference_obj;
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
"Object %p Type [%s], returning Reference %p\n",
obj_desc, acpi_ut_get_object_type_name(obj_desc),
*return_desc));
return_ACPI_STATUS(AE_OK);
}
/*******************************************************************************
*
* FUNCTION: acpi_ex_concat_template
*
* PARAMETERS: Operand0 - First source object
* Operand1 - Second source object
* actual_return_desc - Where to place the return object
* walk_state - Current walk state
*
* RETURN: Status
*
* DESCRIPTION: Concatenate two resource templates
*
******************************************************************************/
acpi_status
acpi_ex_concat_template(union acpi_operand_object *operand0,
union acpi_operand_object *operand1,
union acpi_operand_object **actual_return_desc,
struct acpi_walk_state *walk_state)
{
acpi_status status;
union acpi_operand_object *return_desc;
u8 *new_buf;
u8 *end_tag;
acpi_size length0;
acpi_size length1;
acpi_size new_length;
ACPI_FUNCTION_TRACE(ex_concat_template);
/*
* Find the end_tag descriptor in each resource template.
* Note1: returned pointers point TO the end_tag, not past it.
* Note2: zero-length buffers are allowed; treated like one end_tag
*/
/* Get the length of the first resource template */
status = acpi_ut_get_resource_end_tag(operand0, &end_tag);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
length0 = ACPI_PTR_DIFF(end_tag, operand0->buffer.pointer);
/* Get the length of the second resource template */
status = acpi_ut_get_resource_end_tag(operand1, &end_tag);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
length1 = ACPI_PTR_DIFF(end_tag, operand1->buffer.pointer);
/* Combine both lengths, minimum size will be 2 for end_tag */
new_length = length0 + length1 + sizeof(struct aml_resource_end_tag);
/* Create a new buffer object for the result (with one end_tag) */
return_desc = acpi_ut_create_buffer_object(new_length);
if (!return_desc) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
/*
* Copy the templates to the new buffer, 0 first, then 1 follows. One
* end_tag descriptor is copied from Operand1.
*/
new_buf = return_desc->buffer.pointer;
ACPI_MEMCPY(new_buf, operand0->buffer.pointer, length0);
ACPI_MEMCPY(new_buf + length0, operand1->buffer.pointer, length1);
/* Insert end_tag and set the checksum to zero, means "ignore checksum" */
new_buf[new_length - 1] = 0;
new_buf[new_length - 2] = ACPI_RESOURCE_NAME_END_TAG | 1;
/* Return the completed resource template */
*actual_return_desc = return_desc;
return_ACPI_STATUS(AE_OK);
}
/*******************************************************************************
*
* FUNCTION: acpi_ex_do_concatenate
*
* PARAMETERS: Operand0 - First source object
* Operand1 - Second source object
* actual_return_desc - Where to place the return object
* walk_state - Current walk state
*
* RETURN: Status
*
* DESCRIPTION: Concatenate two objects OF THE SAME TYPE.
*
******************************************************************************/
acpi_status
acpi_ex_do_concatenate(union acpi_operand_object *operand0,
union acpi_operand_object *operand1,
union acpi_operand_object **actual_return_desc,
struct acpi_walk_state *walk_state)
{
union acpi_operand_object *local_operand1 = operand1;
union acpi_operand_object *return_desc;
char *new_buf;
acpi_status status;
ACPI_FUNCTION_TRACE(ex_do_concatenate);
/*
* Convert the second operand if necessary. The first operand
* determines the type of the second operand, (See the Data Types
* section of the ACPI specification.) Both object types are
* guaranteed to be either Integer/String/Buffer by the operand
* resolution mechanism.
*/
switch (operand0->common.type) {
case ACPI_TYPE_INTEGER:
status =
acpi_ex_convert_to_integer(operand1, &local_operand1, 16);
break;
case ACPI_TYPE_STRING:
status = acpi_ex_convert_to_string(operand1, &local_operand1,
ACPI_IMPLICIT_CONVERT_HEX);
break;
case ACPI_TYPE_BUFFER:
status = acpi_ex_convert_to_buffer(operand1, &local_operand1);
break;
default:
ACPI_ERROR((AE_INFO, "Invalid object type: %X",
operand0->common.type));
status = AE_AML_INTERNAL;
}
if (ACPI_FAILURE(status)) {
goto cleanup;
}
/*
* Both operands are now known to be the same object type
* (Both are Integer, String, or Buffer), and we can now perform the
* concatenation.
*/
/*
* There are three cases to handle:
*
* 1) Two Integers concatenated to produce a new Buffer
* 2) Two Strings concatenated to produce a new String
* 3) Two Buffers concatenated to produce a new Buffer
*/
switch (operand0->common.type) {
case ACPI_TYPE_INTEGER:
/* Result of two Integers is a Buffer */
/* Need enough buffer space for two integers */
return_desc = acpi_ut_create_buffer_object((acpi_size)
ACPI_MUL_2
(acpi_gbl_integer_byte_width));
if (!return_desc) {
status = AE_NO_MEMORY;
goto cleanup;
}
new_buf = (char *)return_desc->buffer.pointer;
/* Copy the first integer, LSB first */
ACPI_MEMCPY(new_buf, &operand0->integer.value,
acpi_gbl_integer_byte_width);
/* Copy the second integer (LSB first) after the first */
ACPI_MEMCPY(new_buf + acpi_gbl_integer_byte_width,
&local_operand1->integer.value,
acpi_gbl_integer_byte_width);
break;
case ACPI_TYPE_STRING:
/* Result of two Strings is a String */
return_desc = acpi_ut_create_string_object(((acpi_size)
operand0->string.
length +
local_operand1->
string.length));
if (!return_desc) {
status = AE_NO_MEMORY;
goto cleanup;
}
new_buf = return_desc->string.pointer;
/* Concatenate the strings */
ACPI_STRCPY(new_buf, operand0->string.pointer);
ACPI_STRCPY(new_buf + operand0->string.length,
local_operand1->string.pointer);
break;
case ACPI_TYPE_BUFFER:
/* Result of two Buffers is a Buffer */
return_desc = acpi_ut_create_buffer_object(((acpi_size)
operand0->buffer.
length +
local_operand1->
buffer.length));
if (!return_desc) {
status = AE_NO_MEMORY;
goto cleanup;
}
new_buf = (char *)return_desc->buffer.pointer;
/* Concatenate the buffers */
ACPI_MEMCPY(new_buf, operand0->buffer.pointer,
operand0->buffer.length);
ACPI_MEMCPY(new_buf + operand0->buffer.length,
local_operand1->buffer.pointer,
local_operand1->buffer.length);
break;
default:
/* Invalid object type, should not happen here */
ACPI_ERROR((AE_INFO, "Invalid object type: %X",
operand0->common.type));
status = AE_AML_INTERNAL;
goto cleanup;
}
*actual_return_desc = return_desc;
cleanup:
if (local_operand1 != operand1) {
acpi_ut_remove_reference(local_operand1);
}
return_ACPI_STATUS(status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ex_do_math_op
*
* PARAMETERS: Opcode - AML opcode
* Integer0 - Integer operand #0
* Integer1 - Integer operand #1
*
* RETURN: Integer result of the operation
*
* DESCRIPTION: Execute a math AML opcode. The purpose of having all of the
* math functions here is to prevent a lot of pointer dereferencing
* to obtain the operands.
*
******************************************************************************/
acpi_integer
acpi_ex_do_math_op(u16 opcode, acpi_integer integer0, acpi_integer integer1)
{
ACPI_FUNCTION_ENTRY();
switch (opcode) {
case AML_ADD_OP: /* Add (Integer0, Integer1, Result) */
return (integer0 + integer1);
case AML_BIT_AND_OP: /* And (Integer0, Integer1, Result) */
return (integer0 & integer1);
case AML_BIT_NAND_OP: /* NAnd (Integer0, Integer1, Result) */
return (~(integer0 & integer1));
case AML_BIT_OR_OP: /* Or (Integer0, Integer1, Result) */
return (integer0 | integer1);
case AML_BIT_NOR_OP: /* NOr (Integer0, Integer1, Result) */
return (~(integer0 | integer1));
case AML_BIT_XOR_OP: /* XOr (Integer0, Integer1, Result) */
return (integer0 ^ integer1);
case AML_MULTIPLY_OP: /* Multiply (Integer0, Integer1, Result) */
return (integer0 * integer1);
case AML_SHIFT_LEFT_OP: /* shift_left (Operand, shift_count, Result) */
/*
* We need to check if the shiftcount is larger than the integer bit
* width since the behavior of this is not well-defined in the C language.
*/
if (integer1 >= acpi_gbl_integer_bit_width) {
return (0);
}
return (integer0 << integer1);
case AML_SHIFT_RIGHT_OP: /* shift_right (Operand, shift_count, Result) */
/*
* We need to check if the shiftcount is larger than the integer bit
* width since the behavior of this is not well-defined in the C language.
*/
if (integer1 >= acpi_gbl_integer_bit_width) {
return (0);
}
return (integer0 >> integer1);
case AML_SUBTRACT_OP: /* Subtract (Integer0, Integer1, Result) */
return (integer0 - integer1);
default:
return (0);
}
}
/*******************************************************************************
*
* FUNCTION: acpi_ex_do_logical_numeric_op
*
* PARAMETERS: Opcode - AML opcode
* Integer0 - Integer operand #0
* Integer1 - Integer operand #1
* logical_result - TRUE/FALSE result of the operation
*
* RETURN: Status
*
* DESCRIPTION: Execute a logical "Numeric" AML opcode. For these Numeric
* operators (LAnd and LOr), both operands must be integers.
*
* Note: cleanest machine code seems to be produced by the code
* below, rather than using statements of the form:
* Result = (Integer0 && Integer1);
*
******************************************************************************/
acpi_status
acpi_ex_do_logical_numeric_op(u16 opcode,
acpi_integer integer0,
acpi_integer integer1, u8 * logical_result)
{
acpi_status status = AE_OK;
u8 local_result = FALSE;
ACPI_FUNCTION_TRACE(ex_do_logical_numeric_op);
switch (opcode) {
case AML_LAND_OP: /* LAnd (Integer0, Integer1) */
if (integer0 && integer1) {
local_result = TRUE;
}
break;
case AML_LOR_OP: /* LOr (Integer0, Integer1) */
if (integer0 || integer1) {
local_result = TRUE;
}
break;
default:
status = AE_AML_INTERNAL;
break;
}
/* Return the logical result and status */
*logical_result = local_result;
return_ACPI_STATUS(status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ex_do_logical_op
*
* PARAMETERS: Opcode - AML opcode
* Operand0 - operand #0
* Operand1 - operand #1
* logical_result - TRUE/FALSE result of the operation
*
* RETURN: Status
*
* DESCRIPTION: Execute a logical AML opcode. The purpose of having all of the
* functions here is to prevent a lot of pointer dereferencing
* to obtain the operands and to simplify the generation of the
* logical value. For the Numeric operators (LAnd and LOr), both
* operands must be integers. For the other logical operators,
* operands can be any combination of Integer/String/Buffer. The
* first operand determines the type to which the second operand
* will be converted.
*
* Note: cleanest machine code seems to be produced by the code
* below, rather than using statements of the form:
* Result = (Operand0 == Operand1);
*
******************************************************************************/
acpi_status
acpi_ex_do_logical_op(u16 opcode,
union acpi_operand_object *operand0,
union acpi_operand_object *operand1, u8 * logical_result)
{
union acpi_operand_object *local_operand1 = operand1;
acpi_integer integer0;
acpi_integer integer1;
u32 length0;
u32 length1;
acpi_status status = AE_OK;
u8 local_result = FALSE;
int compare;
ACPI_FUNCTION_TRACE(ex_do_logical_op);
/*
* Convert the second operand if necessary. The first operand
* determines the type of the second operand, (See the Data Types
* section of the ACPI 3.0+ specification.) Both object types are
* guaranteed to be either Integer/String/Buffer by the operand
* resolution mechanism.
*/
switch (operand0->common.type) {
case ACPI_TYPE_INTEGER:
status =
acpi_ex_convert_to_integer(operand1, &local_operand1, 16);
break;
case ACPI_TYPE_STRING:
status = acpi_ex_convert_to_string(operand1, &local_operand1,
ACPI_IMPLICIT_CONVERT_HEX);
break;
case ACPI_TYPE_BUFFER:
status = acpi_ex_convert_to_buffer(operand1, &local_operand1);
break;
default:
status = AE_AML_INTERNAL;
break;
}
if (ACPI_FAILURE(status)) {
goto cleanup;
}
/*
* Two cases: 1) Both Integers, 2) Both Strings or Buffers
*/
if (operand0->common.type == ACPI_TYPE_INTEGER) {
/*
* 1) Both operands are of type integer
* Note: local_operand1 may have changed above
*/
integer0 = operand0->integer.value;
integer1 = local_operand1->integer.value;
switch (opcode) {
case AML_LEQUAL_OP: /* LEqual (Operand0, Operand1) */
if (integer0 == integer1) {
local_result = TRUE;
}
break;
case AML_LGREATER_OP: /* LGreater (Operand0, Operand1) */
if (integer0 > integer1) {
local_result = TRUE;
}
break;
case AML_LLESS_OP: /* LLess (Operand0, Operand1) */
if (integer0 < integer1) {
local_result = TRUE;
}
break;
default:
status = AE_AML_INTERNAL;
break;
}
} else {
/*
* 2) Both operands are Strings or both are Buffers
* Note: Code below takes advantage of common Buffer/String
* object fields. local_operand1 may have changed above. Use
* memcmp to handle nulls in buffers.
*/
length0 = operand0->buffer.length;
length1 = local_operand1->buffer.length;
/* Lexicographic compare: compare the data bytes */
compare = ACPI_MEMCMP(operand0->buffer.pointer,
local_operand1->buffer.pointer,
(length0 > length1) ? length1 : length0);
switch (opcode) {
case AML_LEQUAL_OP: /* LEqual (Operand0, Operand1) */
/* Length and all bytes must be equal */
if ((length0 == length1) && (compare == 0)) {
/* Length and all bytes match ==> TRUE */
local_result = TRUE;
}
break;
case AML_LGREATER_OP: /* LGreater (Operand0, Operand1) */
if (compare > 0) {
local_result = TRUE;
goto cleanup; /* TRUE */
}
if (compare < 0) {
goto cleanup; /* FALSE */
}
/* Bytes match (to shortest length), compare lengths */
if (length0 > length1) {
local_result = TRUE;
}
break;
case AML_LLESS_OP: /* LLess (Operand0, Operand1) */
if (compare > 0) {
goto cleanup; /* FALSE */
}
if (compare < 0) {
local_result = TRUE;
goto cleanup; /* TRUE */
}
/* Bytes match (to shortest length), compare lengths */
if (length0 < length1) {
local_result = TRUE;
}
break;
default:
status = AE_AML_INTERNAL;
break;
}
}
cleanup:
/* New object was created if implicit conversion performed - delete */
if (local_operand1 != operand1) {
acpi_ut_remove_reference(local_operand1);
}
/* Return the logical result and status */
*logical_result = local_result;
return_ACPI_STATUS(status);
}
| gpl-2.0 |
WildfireDEV/s6 | drivers/video/fb-puv3.c | 1369 | 22021 | /*
* Frame Buffer Driver for PKUnity-v3 Unigfx
* Code specific to PKUnity SoC and UniCore ISA
*
* Maintained by GUAN Xue-tao <gxt@mprc.pku.edu.cn>
* Copyright (C) 2001-2010 Guan Xuetao
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/fb.h>
#include <linux/init.h>
#include <linux/console.h>
#include <linux/mm.h>
#include <asm/sizes.h>
#include <asm/pgtable.h>
#include <mach/hardware.h>
/* Platform_data reserved for unifb registers. */
#define UNIFB_REGS_NUM 10
/* RAM reserved for the frame buffer. */
#define UNIFB_MEMSIZE (SZ_4M) /* 4 MB for 1024*768*32b */
/*
* cause UNIGFX don not have EDID
* all the modes are organized as follow
*/
static const struct fb_videomode unifb_modes[] = {
/* 0 640x480-60 VESA */
{ "640x480@60", 60, 640, 480, 25175000, 48, 16, 34, 10, 96, 1,
0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
/* 1 640x480-75 VESA */
{ "640x480@75", 75, 640, 480, 31500000, 120, 16, 18, 1, 64, 1,
0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
/* 2 800x600-60 VESA */
{ "800x600@60", 60, 800, 600, 40000000, 88, 40, 26, 1, 128, 1,
0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
/* 3 800x600-75 VESA */
{ "800x600@75", 75, 800, 600, 49500000, 160, 16, 23, 1, 80, 1,
0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
/* 4 1024x768-60 VESA */
{ "1024x768@60", 60, 1024, 768, 65000000, 160, 24, 34, 3, 136, 1,
0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
/* 5 1024x768-75 VESA */
{ "1024x768@75", 75, 1024, 768, 78750000, 176, 16, 30, 1, 96, 1,
0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
/* 6 1280x960-60 VESA */
{ "1280x960@60", 60, 1280, 960, 108000000, 312, 96, 38, 1, 112, 1,
0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
/* 7 1440x900-60 VESA */
{ "1440x900@60", 60, 1440, 900, 106500000, 232, 80, 30, 3, 152, 1,
0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
/* 8 FIXME 9 1024x600-60 VESA UNTESTED */
{ "1024x600@60", 60, 1024, 600, 50650000, 160, 24, 26, 1, 136, 1,
0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
/* 9 FIXME 10 1024x600-75 VESA UNTESTED */
{ "1024x600@75", 75, 1024, 600, 61500000, 176, 16, 23, 1, 96, 1,
0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
/* 10 FIXME 11 1366x768-60 VESA UNTESTED */
{ "1366x768@60", 60, 1366, 768, 85500000, 256, 58, 18, 1, 112, 3,
0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
};
static struct fb_var_screeninfo unifb_default = {
.xres = 640,
.yres = 480,
.xres_virtual = 640,
.yres_virtual = 480,
.bits_per_pixel = 16,
.red = { 11, 5, 0 },
.green = { 5, 6, 0 },
.blue = { 0, 5, 0 },
.activate = FB_ACTIVATE_NOW,
.height = -1,
.width = -1,
.pixclock = 25175000,
.left_margin = 48,
.right_margin = 16,
.upper_margin = 33,
.lower_margin = 10,
.hsync_len = 96,
.vsync_len = 2,
.vmode = FB_VMODE_NONINTERLACED,
};
static struct fb_fix_screeninfo unifb_fix = {
.id = "UNIGFX FB",
.type = FB_TYPE_PACKED_PIXELS,
.visual = FB_VISUAL_TRUECOLOR,
.xpanstep = 1,
.ypanstep = 1,
.ywrapstep = 1,
.accel = FB_ACCEL_NONE,
};
static void unifb_sync(struct fb_info *info)
{
/* TODO: may, this can be replaced by interrupt */
int cnt;
for (cnt = 0; cnt < 0x10000000; cnt++) {
if (readl(UGE_COMMAND) & 0x1000000)
return;
}
if (cnt > 0x8000000)
dev_warn(info->device, "Warning: UniGFX GE time out ...\n");
}
static void unifb_prim_fillrect(struct fb_info *info,
const struct fb_fillrect *region)
{
int awidth = region->width;
int aheight = region->height;
int m_iBpp = info->var.bits_per_pixel;
int screen_width = info->var.xres;
int src_sel = 1; /* from fg_color */
int pat_sel = 1;
int src_x0 = 0;
int dst_x0 = region->dx;
int src_y0 = 0;
int dst_y0 = region->dy;
int rop_alpha_sel = 0;
int rop_alpha_code = 0xCC;
int x_dir = 1;
int y_dir = 1;
int alpha_r = 0;
int alpha_sel = 0;
int dst_pitch = screen_width * (m_iBpp / 8);
int dst_offset = dst_y0 * dst_pitch + dst_x0 * (m_iBpp / 8);
int src_pitch = screen_width * (m_iBpp / 8);
int src_offset = src_y0 * src_pitch + src_x0 * (m_iBpp / 8);
unsigned int command = 0;
int clip_region = 0;
int clip_en = 0;
int tp_en = 0;
int fg_color = 0;
int bottom = info->var.yres - 1;
int right = info->var.xres - 1;
int top = 0;
bottom = (bottom << 16) | right;
command = (rop_alpha_sel << 26) | (pat_sel << 18) | (src_sel << 16)
| (x_dir << 20) | (y_dir << 21) | (command << 24)
| (clip_region << 23) | (clip_en << 22) | (tp_en << 27);
src_pitch = (dst_pitch << 16) | src_pitch;
awidth = awidth | (aheight << 16);
alpha_r = ((rop_alpha_code & 0xff) << 8) | (alpha_r & 0xff)
| (alpha_sel << 16);
src_x0 = (src_x0 & 0x1fff) | ((src_y0 & 0x1fff) << 16);
dst_x0 = (dst_x0 & 0x1fff) | ((dst_y0 & 0x1fff) << 16);
fg_color = region->color;
unifb_sync(info);
writel(((u32 *)(info->pseudo_palette))[fg_color], UGE_FCOLOR);
writel(0, UGE_BCOLOR);
writel(src_pitch, UGE_PITCH);
writel(src_offset, UGE_SRCSTART);
writel(dst_offset, UGE_DSTSTART);
writel(awidth, UGE_WIDHEIGHT);
writel(top, UGE_CLIP0);
writel(bottom, UGE_CLIP1);
writel(alpha_r, UGE_ROPALPHA);
writel(src_x0, UGE_SRCXY);
writel(dst_x0, UGE_DSTXY);
writel(command, UGE_COMMAND);
}
static void unifb_fillrect(struct fb_info *info,
const struct fb_fillrect *region)
{
struct fb_fillrect modded;
int vxres, vyres;
if (info->flags & FBINFO_HWACCEL_DISABLED) {
sys_fillrect(info, region);
return;
}
vxres = info->var.xres_virtual;
vyres = info->var.yres_virtual;
memcpy(&modded, region, sizeof(struct fb_fillrect));
if (!modded.width || !modded.height ||
modded.dx >= vxres || modded.dy >= vyres)
return;
if (modded.dx + modded.width > vxres)
modded.width = vxres - modded.dx;
if (modded.dy + modded.height > vyres)
modded.height = vyres - modded.dy;
unifb_prim_fillrect(info, &modded);
}
static void unifb_prim_copyarea(struct fb_info *info,
const struct fb_copyarea *area)
{
int awidth = area->width;
int aheight = area->height;
int m_iBpp = info->var.bits_per_pixel;
int screen_width = info->var.xres;
int src_sel = 2; /* from mem */
int pat_sel = 0;
int src_x0 = area->sx;
int dst_x0 = area->dx;
int src_y0 = area->sy;
int dst_y0 = area->dy;
int rop_alpha_sel = 0;
int rop_alpha_code = 0xCC;
int x_dir = 1;
int y_dir = 1;
int alpha_r = 0;
int alpha_sel = 0;
int dst_pitch = screen_width * (m_iBpp / 8);
int dst_offset = dst_y0 * dst_pitch + dst_x0 * (m_iBpp / 8);
int src_pitch = screen_width * (m_iBpp / 8);
int src_offset = src_y0 * src_pitch + src_x0 * (m_iBpp / 8);
unsigned int command = 0;
int clip_region = 0;
int clip_en = 1;
int tp_en = 0;
int top = 0;
int bottom = info->var.yres;
int right = info->var.xres;
int fg_color = 0;
int bg_color = 0;
if (src_x0 < 0)
src_x0 = 0;
if (src_y0 < 0)
src_y0 = 0;
if (src_y0 - dst_y0 > 0) {
y_dir = 1;
} else {
y_dir = 0;
src_offset = (src_y0 + aheight) * src_pitch +
src_x0 * (m_iBpp / 8);
dst_offset = (dst_y0 + aheight) * dst_pitch +
dst_x0 * (m_iBpp / 8);
src_y0 += aheight;
dst_y0 += aheight;
}
command = (rop_alpha_sel << 26) | (pat_sel << 18) | (src_sel << 16) |
(x_dir << 20) | (y_dir << 21) | (command << 24) |
(clip_region << 23) | (clip_en << 22) | (tp_en << 27);
src_pitch = (dst_pitch << 16) | src_pitch;
awidth = awidth | (aheight << 16);
alpha_r = ((rop_alpha_code & 0xff) << 8) | (alpha_r & 0xff) |
(alpha_sel << 16);
src_x0 = (src_x0 & 0x1fff) | ((src_y0 & 0x1fff) << 16);
dst_x0 = (dst_x0 & 0x1fff) | ((dst_y0 & 0x1fff) << 16);
bottom = (bottom << 16) | right;
unifb_sync(info);
writel(src_pitch, UGE_PITCH);
writel(src_offset, UGE_SRCSTART);
writel(dst_offset, UGE_DSTSTART);
writel(awidth, UGE_WIDHEIGHT);
writel(top, UGE_CLIP0);
writel(bottom, UGE_CLIP1);
writel(bg_color, UGE_BCOLOR);
writel(fg_color, UGE_FCOLOR);
writel(alpha_r, UGE_ROPALPHA);
writel(src_x0, UGE_SRCXY);
writel(dst_x0, UGE_DSTXY);
writel(command, UGE_COMMAND);
}
static void unifb_copyarea(struct fb_info *info, const struct fb_copyarea *area)
{
struct fb_copyarea modded;
u32 vxres, vyres;
modded.sx = area->sx;
modded.sy = area->sy;
modded.dx = area->dx;
modded.dy = area->dy;
modded.width = area->width;
modded.height = area->height;
if (info->flags & FBINFO_HWACCEL_DISABLED) {
sys_copyarea(info, area);
return;
}
vxres = info->var.xres_virtual;
vyres = info->var.yres_virtual;
if (!modded.width || !modded.height ||
modded.sx >= vxres || modded.sy >= vyres ||
modded.dx >= vxres || modded.dy >= vyres)
return;
if (modded.sx + modded.width > vxres)
modded.width = vxres - modded.sx;
if (modded.dx + modded.width > vxres)
modded.width = vxres - modded.dx;
if (modded.sy + modded.height > vyres)
modded.height = vyres - modded.sy;
if (modded.dy + modded.height > vyres)
modded.height = vyres - modded.dy;
unifb_prim_copyarea(info, &modded);
}
static void unifb_imageblit(struct fb_info *info, const struct fb_image *image)
{
sys_imageblit(info, image);
}
static u_long get_line_length(int xres_virtual, int bpp)
{
u_long length;
length = xres_virtual * bpp;
length = (length + 31) & ~31;
length >>= 3;
return length;
}
/*
* Setting the video mode has been split into two parts.
* First part, xxxfb_check_var, must not write anything
* to hardware, it should only verify and adjust var.
* This means it doesn't alter par but it does use hardware
* data from it to check this var.
*/
static int unifb_check_var(struct fb_var_screeninfo *var,
struct fb_info *info)
{
u_long line_length;
/*
* FB_VMODE_CONUPDATE and FB_VMODE_SMOOTH_XPAN are equal!
* as FB_VMODE_SMOOTH_XPAN is only used internally
*/
if (var->vmode & FB_VMODE_CONUPDATE) {
var->vmode |= FB_VMODE_YWRAP;
var->xoffset = info->var.xoffset;
var->yoffset = info->var.yoffset;
}
/*
* Some very basic checks
*/
if (!var->xres)
var->xres = 1;
if (!var->yres)
var->yres = 1;
if (var->xres > var->xres_virtual)
var->xres_virtual = var->xres;
if (var->yres > var->yres_virtual)
var->yres_virtual = var->yres;
if (var->bits_per_pixel <= 1)
var->bits_per_pixel = 1;
else if (var->bits_per_pixel <= 8)
var->bits_per_pixel = 8;
else if (var->bits_per_pixel <= 16)
var->bits_per_pixel = 16;
else if (var->bits_per_pixel <= 24)
var->bits_per_pixel = 24;
else if (var->bits_per_pixel <= 32)
var->bits_per_pixel = 32;
else
return -EINVAL;
if (var->xres_virtual < var->xoffset + var->xres)
var->xres_virtual = var->xoffset + var->xres;
if (var->yres_virtual < var->yoffset + var->yres)
var->yres_virtual = var->yoffset + var->yres;
/*
* Memory limit
*/
line_length =
get_line_length(var->xres_virtual, var->bits_per_pixel);
if (line_length * var->yres_virtual > UNIFB_MEMSIZE)
return -ENOMEM;
/*
* Now that we checked it we alter var. The reason being is that the
* video mode passed in might not work but slight changes to it might
* make it work. This way we let the user know what is acceptable.
*/
switch (var->bits_per_pixel) {
case 1:
case 8:
var->red.offset = 0;
var->red.length = 8;
var->green.offset = 0;
var->green.length = 8;
var->blue.offset = 0;
var->blue.length = 8;
var->transp.offset = 0;
var->transp.length = 0;
break;
case 16: /* RGBA 5551 */
if (var->transp.length) {
var->red.offset = 0;
var->red.length = 5;
var->green.offset = 5;
var->green.length = 5;
var->blue.offset = 10;
var->blue.length = 5;
var->transp.offset = 15;
var->transp.length = 1;
} else { /* RGB 565 */
var->red.offset = 11;
var->red.length = 5;
var->green.offset = 5;
var->green.length = 6;
var->blue.offset = 0;
var->blue.length = 5;
var->transp.offset = 0;
var->transp.length = 0;
}
break;
case 24: /* RGB 888 */
var->red.offset = 0;
var->red.length = 8;
var->green.offset = 8;
var->green.length = 8;
var->blue.offset = 16;
var->blue.length = 8;
var->transp.offset = 0;
var->transp.length = 0;
break;
case 32: /* RGBA 8888 */
var->red.offset = 16;
var->red.length = 8;
var->green.offset = 8;
var->green.length = 8;
var->blue.offset = 0;
var->blue.length = 8;
var->transp.offset = 24;
var->transp.length = 8;
break;
}
var->red.msb_right = 0;
var->green.msb_right = 0;
var->blue.msb_right = 0;
var->transp.msb_right = 0;
return 0;
}
/*
* This routine actually sets the video mode. It's in here where we
* the hardware state info->par and fix which can be affected by the
* change in par. For this driver it doesn't do much.
*/
static int unifb_set_par(struct fb_info *info)
{
int hTotal, vTotal, hSyncStart, hSyncEnd, vSyncStart, vSyncEnd;
int format;
#ifdef CONFIG_PUV3_PM
struct clk *clk_vga;
u32 pixclk = 0;
int i;
for (i = 0; i <= 10; i++) {
if (info->var.xres == unifb_modes[i].xres
&& info->var.yres == unifb_modes[i].yres
&& info->var.upper_margin == unifb_modes[i].upper_margin
&& info->var.lower_margin == unifb_modes[i].lower_margin
&& info->var.left_margin == unifb_modes[i].left_margin
&& info->var.right_margin == unifb_modes[i].right_margin
&& info->var.hsync_len == unifb_modes[i].hsync_len
&& info->var.vsync_len == unifb_modes[i].vsync_len) {
pixclk = unifb_modes[i].pixclock;
break;
}
}
/* set clock rate */
clk_vga = clk_get(info->device, "VGA_CLK");
if (clk_vga == ERR_PTR(-ENOENT))
return -ENOENT;
if (pixclk != 0) {
if (clk_set_rate(clk_vga, pixclk)) { /* set clock failed */
info->fix = unifb_fix;
info->var = unifb_default;
if (clk_set_rate(clk_vga, unifb_default.pixclock))
return -EINVAL;
}
}
#endif
info->fix.line_length = get_line_length(info->var.xres_virtual,
info->var.bits_per_pixel);
hSyncStart = info->var.xres + info->var.right_margin;
hSyncEnd = hSyncStart + info->var.hsync_len;
hTotal = hSyncEnd + info->var.left_margin;
vSyncStart = info->var.yres + info->var.lower_margin;
vSyncEnd = vSyncStart + info->var.vsync_len;
vTotal = vSyncEnd + info->var.upper_margin;
switch (info->var.bits_per_pixel) {
case 8:
format = UDE_CFG_DST8;
break;
case 16:
format = UDE_CFG_DST16;
break;
case 24:
format = UDE_CFG_DST24;
break;
case 32:
format = UDE_CFG_DST32;
break;
default:
return -EINVAL;
}
writel(info->fix.smem_start, UDE_FSA);
writel(info->var.yres, UDE_LS);
writel(get_line_length(info->var.xres,
info->var.bits_per_pixel) >> 3, UDE_PS);
/* >> 3 for hardware required. */
writel((hTotal << 16) | (info->var.xres), UDE_HAT);
writel(((hTotal - 1) << 16) | (info->var.xres - 1), UDE_HBT);
writel(((hSyncEnd - 1) << 16) | (hSyncStart - 1), UDE_HST);
writel((vTotal << 16) | (info->var.yres), UDE_VAT);
writel(((vTotal - 1) << 16) | (info->var.yres - 1), UDE_VBT);
writel(((vSyncEnd - 1) << 16) | (vSyncStart - 1), UDE_VST);
writel(UDE_CFG_GDEN_ENABLE | UDE_CFG_TIMEUP_ENABLE
| format | 0xC0000001, UDE_CFG);
return 0;
}
/*
* Set a single color register. The values supplied are already
* rounded down to the hardware's capabilities (according to the
* entries in the var structure). Return != 0 for invalid regno.
*/
static int unifb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
u_int transp, struct fb_info *info)
{
if (regno >= 256) /* no. of hw registers */
return 1;
/* grayscale works only partially under directcolor */
if (info->var.grayscale) {
/* grayscale = 0.30*R + 0.59*G + 0.11*B */
red = green = blue =
(red * 77 + green * 151 + blue * 28) >> 8;
}
#define CNVT_TOHW(val, width) ((((val)<<(width))+0x7FFF-(val))>>16)
switch (info->fix.visual) {
case FB_VISUAL_TRUECOLOR:
case FB_VISUAL_PSEUDOCOLOR:
red = CNVT_TOHW(red, info->var.red.length);
green = CNVT_TOHW(green, info->var.green.length);
blue = CNVT_TOHW(blue, info->var.blue.length);
transp = CNVT_TOHW(transp, info->var.transp.length);
break;
case FB_VISUAL_DIRECTCOLOR:
red = CNVT_TOHW(red, 8); /* expect 8 bit DAC */
green = CNVT_TOHW(green, 8);
blue = CNVT_TOHW(blue, 8);
/* hey, there is bug in transp handling... */
transp = CNVT_TOHW(transp, 8);
break;
}
#undef CNVT_TOHW
/* Truecolor has hardware independent palette */
if (info->fix.visual == FB_VISUAL_TRUECOLOR) {
u32 v;
if (regno >= 16)
return 1;
v = (red << info->var.red.offset) |
(green << info->var.green.offset) |
(blue << info->var.blue.offset) |
(transp << info->var.transp.offset);
switch (info->var.bits_per_pixel) {
case 8:
break;
case 16:
case 24:
case 32:
((u32 *) (info->pseudo_palette))[regno] = v;
break;
default:
return 1;
}
return 0;
}
return 0;
}
/*
* Pan or Wrap the Display
*
* This call looks only at xoffset, yoffset and the FB_VMODE_YWRAP flag
*/
static int unifb_pan_display(struct fb_var_screeninfo *var,
struct fb_info *info)
{
if (var->vmode & FB_VMODE_YWRAP) {
if (var->yoffset < 0
|| var->yoffset >= info->var.yres_virtual
|| var->xoffset)
return -EINVAL;
} else {
if (var->xoffset + info->var.xres > info->var.xres_virtual ||
var->yoffset + info->var.yres > info->var.yres_virtual)
return -EINVAL;
}
info->var.xoffset = var->xoffset;
info->var.yoffset = var->yoffset;
if (var->vmode & FB_VMODE_YWRAP)
info->var.vmode |= FB_VMODE_YWRAP;
else
info->var.vmode &= ~FB_VMODE_YWRAP;
return 0;
}
int unifb_mmap(struct fb_info *info,
struct vm_area_struct *vma)
{
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
return vm_iomap_memory(vma, info->fix.smem_start, info->fix.smem_len);
}
static struct fb_ops unifb_ops = {
.fb_read = fb_sys_read,
.fb_write = fb_sys_write,
.fb_check_var = unifb_check_var,
.fb_set_par = unifb_set_par,
.fb_setcolreg = unifb_setcolreg,
.fb_pan_display = unifb_pan_display,
.fb_fillrect = unifb_fillrect,
.fb_copyarea = unifb_copyarea,
.fb_imageblit = unifb_imageblit,
.fb_mmap = unifb_mmap,
};
/*
* Initialisation
*/
static int unifb_probe(struct platform_device *dev)
{
struct fb_info *info;
u32 unifb_regs[UNIFB_REGS_NUM];
int retval = -ENOMEM;
struct resource *iomem;
void *videomemory;
videomemory = (void *)__get_free_pages(GFP_KERNEL | __GFP_COMP,
get_order(UNIFB_MEMSIZE));
if (!videomemory)
goto err;
memset(videomemory, 0, UNIFB_MEMSIZE);
unifb_fix.smem_start = virt_to_phys(videomemory);
unifb_fix.smem_len = UNIFB_MEMSIZE;
iomem = platform_get_resource(dev, IORESOURCE_MEM, 0);
unifb_fix.mmio_start = iomem->start;
info = framebuffer_alloc(sizeof(u32)*256, &dev->dev);
if (!info)
goto err;
info->screen_base = (char __iomem *)videomemory;
info->fbops = &unifb_ops;
retval = fb_find_mode(&info->var, info, NULL,
unifb_modes, 10, &unifb_modes[0], 16);
if (!retval || (retval == 4))
info->var = unifb_default;
info->fix = unifb_fix;
info->pseudo_palette = info->par;
info->par = NULL;
info->flags = FBINFO_FLAG_DEFAULT;
#ifdef FB_ACCEL_PUV3_UNIGFX
info->fix.accel = FB_ACCEL_PUV3_UNIGFX;
#endif
retval = fb_alloc_cmap(&info->cmap, 256, 0);
if (retval < 0)
goto err1;
retval = register_framebuffer(info);
if (retval < 0)
goto err2;
platform_set_drvdata(dev, info);
platform_device_add_data(dev, unifb_regs, sizeof(u32) * UNIFB_REGS_NUM);
printk(KERN_INFO
"fb%d: Virtual frame buffer device, using %dM of video memory\n",
info->node, UNIFB_MEMSIZE >> 20);
return 0;
err2:
fb_dealloc_cmap(&info->cmap);
err1:
framebuffer_release(info);
err:
return retval;
}
static int unifb_remove(struct platform_device *dev)
{
struct fb_info *info = platform_get_drvdata(dev);
if (info) {
unregister_framebuffer(info);
fb_dealloc_cmap(&info->cmap);
framebuffer_release(info);
}
return 0;
}
#ifdef CONFIG_PM
static int unifb_resume(struct platform_device *dev)
{
int rc = 0;
u32 *unifb_regs = dev->dev.platform_data;
if (dev->dev.power.power_state.event == PM_EVENT_ON)
return 0;
console_lock();
if (dev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
writel(unifb_regs[0], UDE_FSA);
writel(unifb_regs[1], UDE_LS);
writel(unifb_regs[2], UDE_PS);
writel(unifb_regs[3], UDE_HAT);
writel(unifb_regs[4], UDE_HBT);
writel(unifb_regs[5], UDE_HST);
writel(unifb_regs[6], UDE_VAT);
writel(unifb_regs[7], UDE_VBT);
writel(unifb_regs[8], UDE_VST);
writel(unifb_regs[9], UDE_CFG);
}
dev->dev.power.power_state = PMSG_ON;
console_unlock();
return rc;
}
static int unifb_suspend(struct platform_device *dev, pm_message_t mesg)
{
u32 *unifb_regs = dev->dev.platform_data;
unifb_regs[0] = readl(UDE_FSA);
unifb_regs[1] = readl(UDE_LS);
unifb_regs[2] = readl(UDE_PS);
unifb_regs[3] = readl(UDE_HAT);
unifb_regs[4] = readl(UDE_HBT);
unifb_regs[5] = readl(UDE_HST);
unifb_regs[6] = readl(UDE_VAT);
unifb_regs[7] = readl(UDE_VBT);
unifb_regs[8] = readl(UDE_VST);
unifb_regs[9] = readl(UDE_CFG);
if (mesg.event == dev->dev.power.power_state.event)
return 0;
switch (mesg.event) {
case PM_EVENT_FREEZE: /* about to take snapshot */
case PM_EVENT_PRETHAW: /* before restoring snapshot */
goto done;
}
console_lock();
/* do nothing... */
console_unlock();
done:
dev->dev.power.power_state = mesg;
return 0;
}
#else
#define unifb_resume NULL
#define unifb_suspend NULL
#endif
static struct platform_driver unifb_driver = {
.probe = unifb_probe,
.remove = unifb_remove,
.resume = unifb_resume,
.suspend = unifb_suspend,
.driver = {
.name = "PKUnity-v3-UNIGFX",
},
};
static int __init unifb_init(void)
{
#ifndef MODULE
if (fb_get_options("unifb", NULL))
return -ENODEV;
#endif
return platform_driver_register(&unifb_driver);
}
module_init(unifb_init);
static void __exit unifb_exit(void)
{
platform_driver_unregister(&unifb_driver);
}
module_exit(unifb_exit);
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.