repo_name string | path string | copies string | size string | content string | license string |
|---|---|---|---|---|---|
edoko/android_samsung_galaxy_pop | drivers/bluetooth/hci_ldisc.c | 406 | 16319 | /*
*
* Bluetooth HCI UART driver
*
* Copyright (C) 2000-2001 Qualcomm Incorporated
* Copyright (C) 2002-2003 Maxim Krasnyansky <maxk@qualcomm.com>
* Copyright (C) 2004-2005 Marcel Holtmann <marcel@holtmann.org>
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/fcntl.h>
#include <linux/interrupt.h>
#include <linux/ptrace.h>
#include <linux/poll.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/signal.h>
#include <linux/ioctl.h>
#include <linux/skbuff.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
#include "hci_uart.h"
#define VERSION "2.2"
static int reset = 0;
static struct hci_uart_proto *hup[HCI_UART_MAX_PROTO];
int hci_uart_register_proto(struct hci_uart_proto *p)
{
if (p->id >= HCI_UART_MAX_PROTO)
return -EINVAL;
if (hup[p->id])
return -EEXIST;
hup[p->id] = p;
return 0;
}
int hci_uart_unregister_proto(struct hci_uart_proto *p)
{
if (p->id >= HCI_UART_MAX_PROTO)
return -EINVAL;
if (!hup[p->id])
return -EINVAL;
hup[p->id] = NULL;
return 0;
}
static struct hci_uart_proto *hci_uart_get_proto(unsigned int id)
{
if (id >= HCI_UART_MAX_PROTO)
return NULL;
return hup[id];
}
static inline void hci_uart_tx_complete(struct hci_uart *hu, int pkt_type)
{
struct hci_dev *hdev = hu->hdev;
#if defined(CONFIG_BT_CSR8811)
if(hdev == NULL)
return ;
#endif
/* Update HCI stat counters */
switch (pkt_type) {
case HCI_COMMAND_PKT:
hdev->stat.cmd_tx++;
break;
case HCI_ACLDATA_PKT:
hdev->stat.acl_tx++;
break;
case HCI_SCODATA_PKT:
hdev->stat.sco_tx++;
break;
}
}
static inline struct sk_buff *hci_uart_dequeue(struct hci_uart *hu)
{
struct sk_buff *skb = hu->tx_skb;
if (!skb)
skb = hu->proto->dequeue(hu);
else
hu->tx_skb = NULL;
return skb;
}
int hci_uart_tx_wakeup(struct hci_uart *hu)
{
struct tty_struct *tty = hu->tty;
struct hci_dev *hdev = hu->hdev;
struct sk_buff *skb;
#if defined(CONFIG_BT_CSR8811)
if(hdev == NULL)
return -1;
#endif
if (test_and_set_bit(HCI_UART_SENDING, &hu->tx_state)) {
set_bit(HCI_UART_TX_WAKEUP, &hu->tx_state);
return 0;
}
BT_DBG("");
restart:
clear_bit(HCI_UART_TX_WAKEUP, &hu->tx_state);
while ((skb = hci_uart_dequeue(hu))) {
int len;
/* Samsung Bluetooth Feature.2012.01.19
* Add wake_peer uart operation which is called before starting UART TX
*/
#if !defined(CONFIG_BT_CSR8811)
if (hdev->wake_peer)
hdev->wake_peer(hdev);
#endif
set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
len = tty->ops->write(tty, skb->data, skb->len);
hdev->stat.byte_tx += len;
skb_pull(skb, len);
if (skb->len) {
hu->tx_skb = skb;
break;
}
hci_uart_tx_complete(hu, bt_cb(skb)->pkt_type);
kfree_skb(skb);
}
if (test_bit(HCI_UART_TX_WAKEUP, &hu->tx_state))
goto restart;
clear_bit(HCI_UART_SENDING, &hu->tx_state);
return 0;
}
/* ------- Interface to HCI layer ------ */
/* Initialize device */
static int hci_uart_open(struct hci_dev *hdev)
{
BT_DBG("%s %p", hdev->name, hdev);
/* Nothing to do for UART driver */
set_bit(HCI_RUNNING, &hdev->flags);
return 0;
}
/* Reset device */
static int hci_uart_flush(struct hci_dev *hdev)
{
struct hci_uart *hu = (struct hci_uart *) hdev->driver_data;
struct tty_struct *tty = hu->tty;
BT_DBG("hdev %p tty %p", hdev, tty);
if (hu->tx_skb) {
kfree_skb(hu->tx_skb); hu->tx_skb = NULL;
}
/* Flush any pending characters in the driver and discipline. */
tty_ldisc_flush(tty);
tty_driver_flush_buffer(tty);
if (test_bit(HCI_UART_PROTO_SET, &hu->flags))
hu->proto->flush(hu);
return 0;
}
/* Close device */
static int hci_uart_close(struct hci_dev *hdev)
{
BT_DBG("hdev %p", hdev);
if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags))
return 0;
hci_uart_flush(hdev);
hdev->flush = NULL;
return 0;
}
/* Send frames from HCI layer */
static int hci_uart_send_frame(struct sk_buff *skb)
{
struct hci_dev* hdev = (struct hci_dev *) skb->dev;
struct hci_uart *hu;
if (!hdev) {
BT_ERR("Frame for unknown device (hdev=NULL)");
return -ENODEV;
}
if (!test_bit(HCI_RUNNING, &hdev->flags))
return -EBUSY;
hu = (struct hci_uart *) hdev->driver_data;
BT_DBG("%s: type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
hu->proto->enqueue(hu, skb);
hci_uart_tx_wakeup(hu);
return 0;
}
static void hci_uart_destruct(struct hci_dev *hdev)
{
if (!hdev)
return;
BT_DBG("%s", hdev->name);
}
/* ------ LDISC part ------ */
/* hci_uart_tty_open
*
* Called when line discipline changed to HCI_UART.
*
* Arguments:
* tty pointer to tty info structure
* Return Value:
* 0 if success, otherwise error code
*/
static int hci_uart_tty_open(struct tty_struct *tty)
{
struct hci_uart *hu = (void *) tty->disc_data;
BT_DBG("tty %p", tty);
/* FIXME: This btw is bogus, nothing requires the old ldisc to clear
the pointer */
if (hu)
return -EEXIST;
/* Error if the tty has no write op instead of leaving an exploitable
hole */
if (tty->ops->write == NULL)
return -EOPNOTSUPP;
if (!(hu = kzalloc(sizeof(struct hci_uart), GFP_KERNEL))) {
BT_ERR("Can't allocate control structure");
return -ENFILE;
}
tty->disc_data = hu;
hu->tty = tty;
tty->receive_room = 65536;
spin_lock_init(&hu->rx_lock);
/* Flush any pending characters in the driver and line discipline. */
/* FIXME: why is this needed. Note don't use ldisc_ref here as the
open path is before the ldisc is referencable */
if (tty->ldisc->ops->flush_buffer)
tty->ldisc->ops->flush_buffer(tty);
tty_driver_flush_buffer(tty);
return 0;
}
/* hci_uart_tty_close()
*
* Called when the line discipline is changed to something
* else, the tty is closed, or the tty detects a hangup.
*/
static void hci_uart_tty_close(struct tty_struct *tty)
{
struct hci_uart *hu = (void *)tty->disc_data;
BT_DBG("tty %p", tty);
/* Detach from the tty */
tty->disc_data = NULL;
if (hu) {
struct hci_dev *hdev = hu->hdev;
if (hdev)
hci_uart_close(hdev);
if (test_and_clear_bit(HCI_UART_PROTO_SET, &hu->flags)) {
if (hdev) {
hci_unregister_dev(hdev);
hci_free_dev(hdev);
}
hu->proto->close(hu);
}
kfree(hu);
}
}
/* hci_uart_tty_wakeup()
*
* Callback for transmit wakeup. Called when low level
* device driver can accept more send data.
*
* Arguments: tty pointer to associated tty instance data
* Return Value: None
*/
static void hci_uart_tty_wakeup(struct tty_struct *tty)
{
struct hci_uart *hu = (void *)tty->disc_data;
#if defined(CONFIG_BT_CSR8811)
if(hu->hdev == NULL)
return ;
#endif
BT_DBG("");
if (!hu)
return;
clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
if (tty != hu->tty)
return;
if (test_bit(HCI_UART_PROTO_SET, &hu->flags))
hci_uart_tx_wakeup(hu);
}
/* hci_uart_tty_receive()
*
* Called by tty low level driver when receive data is
* available.
*
* Arguments: tty pointer to tty isntance data
* data pointer to received data
* flags pointer to flags for data
* count count of received data in bytes
*
* Return Value: None
*/
static void hci_uart_tty_receive(struct tty_struct *tty, const u8 *data, char *flags, int count)
{
struct hci_uart *hu = (void *)tty->disc_data;
#if defined(CONFIG_BT_CSR8811)
if(hu->hdev == NULL)
return ;
#endif
if (!hu || tty != hu->tty)
return;
if (!test_bit(HCI_UART_PROTO_SET, &hu->flags))
return;
/* CSR8811 Project(Dayton.Kim) 2012.02.23 */
if (hu == NULL || hu->proto == NULL || hu->proto->recv == NULL || data == NULL)
return;
/* CSR8811 Project(Dayton.Kim) end */
spin_lock(&hu->rx_lock);
hu->proto->recv(hu, (void *) data, count);
hu->hdev->stat.byte_rx += count;
spin_unlock(&hu->rx_lock);
tty_unthrottle(tty);
}
static int hci_uart_register_dev(struct hci_uart *hu)
{
struct hci_dev *hdev;
BT_DBG("");
BT_ERR("hci_uart_register_dev");
/* Initialize and register HCI device */
hdev = hci_alloc_dev();
if (!hdev) {
BT_ERR("Can't allocate HCI device");
return -ENOMEM;
}
hu->hdev = hdev;
hdev->bus = HCI_UART;
hdev->driver_data = hu;
hdev->open = hci_uart_open;
hdev->close = hci_uart_close;
hdev->flush = hci_uart_flush;
hdev->send = hci_uart_send_frame;
hdev->destruct = hci_uart_destruct;
hdev->parent = hu->tty->dev;
hdev->owner = THIS_MODULE;
if (!reset)
set_bit(HCI_QUIRK_NO_RESET, &hdev->quirks);
if (test_bit(HCI_UART_RAW_DEVICE, &hu->hdev_flags))
set_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks);
if (hci_register_dev(hdev) < 0) {
BT_ERR("Can't register HCI device");
hci_free_dev(hdev);
return -ENODEV;
}
return 0;
}
static int hci_uart_set_proto(struct hci_uart *hu, int id)
{
struct hci_uart_proto *p;
int err;
p = hci_uart_get_proto(id);
if (!p)
return -EPROTONOSUPPORT;
err = p->open(hu);
if (err)
return err;
hu->proto = p;
err = hci_uart_register_dev(hu);
if (err) {
p->close(hu);
return err;
}
return 0;
}
/* hci_uart_tty_ioctl()
*
* Process IOCTL system call for the tty device.
*
* Arguments:
*
* tty pointer to tty instance data
* file pointer to open file object for device
* cmd IOCTL command code
* arg argument for IOCTL call (cmd dependent)
*
* Return Value: Command dependent
*/
static int hci_uart_tty_ioctl(struct tty_struct *tty, struct file * file,
unsigned int cmd, unsigned long arg)
{
struct hci_uart *hu = (void *)tty->disc_data;
int err = 0;
BT_DBG("");
/* Verify the status of the device */
if (!hu)
return -EBADF;
switch (cmd) {
case HCIUARTSETPROTO:
if (!test_and_set_bit(HCI_UART_PROTO_SET, &hu->flags)) {
err = hci_uart_set_proto(hu, arg);
if (err) {
clear_bit(HCI_UART_PROTO_SET, &hu->flags);
return err;
}
} else
return -EBUSY;
break;
case HCIUARTGETPROTO:
if (test_bit(HCI_UART_PROTO_SET, &hu->flags))
return hu->proto->id;
return -EUNATCH;
case HCIUARTGETDEVICE:
if (test_bit(HCI_UART_PROTO_SET, &hu->flags))
return hu->hdev->id;
return -EUNATCH;
case HCIUARTSETFLAGS:
if (test_bit(HCI_UART_PROTO_SET, &hu->flags))
return -EBUSY;
hu->hdev_flags = arg;
break;
case HCIUARTGETFLAGS:
return hu->hdev_flags;
default:
err = n_tty_ioctl_helper(tty, file, cmd, arg);
break;
};
return err;
}
/*
* We don't provide read/write/poll interface for user space.
*/
struct hci_uart_hook {
unsigned int len;
unsigned char *head;
unsigned char data[HCI_MAX_EVENT_SIZE + 1]; /* save packet type at data[0] and then place event packet */
};
static struct hci_uart_hook *hook;
static DECLARE_WAIT_QUEUE_HEAD(read_wait);
void hci_uart_tty_read_hook(struct sk_buff *skb)
{
if (!hook) {
BT_DBG("%s: hooking wasn't requested, skip it", __func__);
goto hci_uart_tty_read_hook_exit;
}
if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT) {
BT_DBG("%s: Packet type is %d, skip it", __func__, bt_cb(skb)->pkt_type);
goto hci_uart_tty_read_hook_exit;
}
BT_DBG("%s: Received len = %d", __func__, skb->len);
if (skb->len > sizeof(hook->data)-1) {
BT_DBG("Packet size exceeds max len, skip it");
goto hci_uart_tty_read_hook_exit;
}
memcpy(hook->data, &bt_cb(skb)->pkt_type, 1);
skb_copy_from_linear_data(skb, &hook->data[1], skb->len);
hook->len = skb->len + 1;
hci_uart_tty_read_hook_exit:
wake_up_interruptible(&read_wait);
}
EXPORT_SYMBOL(hci_uart_tty_read_hook);
static int hci_uart_tty_access_allowed(void)
{
char name[TASK_COMM_LEN];
get_task_comm(name, current_thread_info()->task);
BT_DBG("%s: %s", __func__, name);
if (strcmp(name, "brcm_poke_helpe")) {
BT_ERR("%s isn't allowed", name);
return -EACCES;
}
return 0;
}
static ssize_t hci_uart_tty_read(struct tty_struct *tty, struct file *file,
unsigned char __user *buf, size_t nr)
{
struct hci_uart *hu = (void *) tty->disc_data;
struct hci_dev *hdev = hu->hdev;
int ret = 0, count;
BT_DBG("%s: hu = 0x%p hci_dev = 0x%p, nr = %d", __func__, hu, hdev, nr);
ret = hci_uart_tty_access_allowed();
if (ret < 0)
return ret;
if (!hook)
return -ENOMEM;
if (!hook->len)
interruptible_sleep_on_timeout(&read_wait, 3 * HZ);
if (!hook->len) {
BT_INFO("No data to read");
} else {
count = nr > hook->len ? hook->len : nr;
ret = copy_to_user(buf, hook->head, count);
hook->len -= (count - ret);
hook->head += (count - ret);
ret = count - ret;
}
if (!hook->len) {
BT_DBG("%s: free hook", __func__);
kfree(hook);
hook = NULL;
}
BT_DBG("%s: ret = %d", __func__, ret);
return ret;
}
static ssize_t hci_uart_tty_write(struct tty_struct *tty, struct file *file,
const unsigned char *data, size_t count)
{
struct hci_uart *hu = (void *) tty->disc_data;
struct hci_dev *hdev = hu->hdev;
int ret;
BT_DBG("%s: hu = 0x%p, hci_dev = 0x%p", __func__, hu, hdev);
ret = hci_uart_tty_access_allowed();
if (ret < 0)
return ret;
if (!hdev)
return -ENODEV;
if (!hook)
hook = kzalloc(sizeof(*hook), GFP_KERNEL);
else {
/* Cuase brcm_poke_helper's read/write is serialized,
* it's almost safe to init hook data here
*/
BT_INFO("hook data still remains");
memset(hook, 0, sizeof(*hook));
}
if (!hook)
return -ENOMEM;
hook->head = hook->data;
hci_uart_flush(hdev);
#if 1
if (hdev->wake_peer)
hdev->wake_peer(hdev);
#endif
ret = tty->ops->write(tty, data, count);
BT_DBG("%s: ret = %d", __func__, ret);
return ret;
}
static unsigned int hci_uart_tty_poll(struct tty_struct *tty,
struct file *filp, poll_table *wait)
{
return 0;
}
static int __init hci_uart_init(void)
{
static struct tty_ldisc_ops hci_uart_ldisc;
int err;
BT_INFO("HCI UART driver ver %s", VERSION);
/* Register the tty discipline */
memset(&hci_uart_ldisc, 0, sizeof (hci_uart_ldisc));
hci_uart_ldisc.magic = TTY_LDISC_MAGIC;
hci_uart_ldisc.name = "n_hci";
hci_uart_ldisc.open = hci_uart_tty_open;
hci_uart_ldisc.close = hci_uart_tty_close;
hci_uart_ldisc.read = hci_uart_tty_read;
hci_uart_ldisc.write = hci_uart_tty_write;
hci_uart_ldisc.ioctl = hci_uart_tty_ioctl;
hci_uart_ldisc.poll = hci_uart_tty_poll;
hci_uart_ldisc.receive_buf = hci_uart_tty_receive;
hci_uart_ldisc.write_wakeup = hci_uart_tty_wakeup;
hci_uart_ldisc.owner = THIS_MODULE;
if ((err = tty_register_ldisc(N_HCI, &hci_uart_ldisc))) {
BT_ERR("HCI line discipline registration failed. (%d)", err);
return err;
}
#ifdef CONFIG_BT_HCIUART_H4
h4_init();
#endif
#ifdef CONFIG_BT_HCIUART_BCSP
bcsp_init();
#endif
#ifdef CONFIG_BT_HCIUART_LL
ll_init();
#endif
#ifdef CONFIG_BT_HCIUART_ATH3K
ath_init();
#endif
return 0;
}
static void __exit hci_uart_exit(void)
{
int err;
#ifdef CONFIG_BT_HCIUART_H4
h4_deinit();
#endif
#ifdef CONFIG_BT_HCIUART_BCSP
bcsp_deinit();
#endif
#ifdef CONFIG_BT_HCIUART_LL
ll_deinit();
#endif
#ifdef CONFIG_BT_HCIUART_ATH3K
ath_deinit();
#endif
/* Release tty registration of line discipline */
if ((err = tty_unregister_ldisc(N_HCI)))
BT_ERR("Can't unregister HCI line discipline (%d)", err);
}
module_init(hci_uart_init);
module_exit(hci_uart_exit);
module_param(reset, bool, 0644);
MODULE_PARM_DESC(reset, "Send HCI reset command on initialization");
MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
MODULE_DESCRIPTION("Bluetooth HCI UART driver ver " VERSION);
MODULE_VERSION(VERSION);
MODULE_LICENSE("GPL");
MODULE_ALIAS_LDISC(N_HCI);
| gpl-2.0 |
ender-chen/linux-rt-rpi | drivers/net/wireless/ath/wil6210/fw.c | 662 | 1545 | /*
* Copyright (c) 2014 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/firmware.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/crc32.h>
#include "wil6210.h"
#include "fw.h"
MODULE_FIRMWARE(WIL_FW_NAME);
/* target operations */
/* register read */
#define R(a) ioread32(wil->csr + HOSTADDR(a))
/* register write. wmb() to make sure it is completed */
#define W(a, v) do { iowrite32(v, wil->csr + HOSTADDR(a)); wmb(); } while (0)
/* register set = read, OR, write */
#define S(a, v) W(a, R(a) | v)
/* register clear = read, AND with inverted, write */
#define C(a, v) W(a, R(a) & ~v)
static
void wil_memset_toio_32(volatile void __iomem *dst, u32 val,
size_t count)
{
volatile u32 __iomem *d = dst;
for (count += 4; count > 4; count -= 4)
__raw_writel(val, d++);
}
#include "fw_inc.c"
| gpl-2.0 |
Razdroid/android_kernel_raspberrypi | arch/powerpc/platforms/512x/mpc512x_shared.c | 1942 | 12075 | /*
* Copyright (C) 2007,2008 Freescale Semiconductor, Inc. All rights reserved.
*
* Author: John Rigby <jrigby@freescale.com>
*
* Description:
* MPC512x Shared code
*
* This is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/of_platform.h>
#include <linux/fsl-diu-fb.h>
#include <linux/bootmem.h>
#include <sysdev/fsl_soc.h>
#include <asm/cacheflush.h>
#include <asm/machdep.h>
#include <asm/ipic.h>
#include <asm/prom.h>
#include <asm/time.h>
#include <asm/mpc5121.h>
#include <asm/mpc52xx_psc.h>
#include "mpc512x.h"
static struct mpc512x_reset_module __iomem *reset_module_base;
static void __init mpc512x_restart_init(void)
{
struct device_node *np;
np = of_find_compatible_node(NULL, NULL, "fsl,mpc5121-reset");
if (!np)
return;
reset_module_base = of_iomap(np, 0);
of_node_put(np);
}
void mpc512x_restart(char *cmd)
{
if (reset_module_base) {
/* Enable software reset "RSTE" */
out_be32(&reset_module_base->rpr, 0x52535445);
/* Set software hard reset */
out_be32(&reset_module_base->rcr, 0x2);
} else {
pr_err("Restart module not mapped.\n");
}
for (;;)
;
}
#if defined(CONFIG_FB_FSL_DIU) || defined(CONFIG_FB_FSL_DIU_MODULE)
struct fsl_diu_shared_fb {
u8 gamma[0x300]; /* 32-bit aligned! */
struct diu_ad ad0; /* 32-bit aligned! */
phys_addr_t fb_phys;
size_t fb_len;
bool in_use;
};
#define DIU_DIV_MASK 0x000000ff
void mpc512x_set_pixel_clock(unsigned int pixclock)
{
unsigned long bestval, bestfreq, speed, busfreq;
unsigned long minpixclock, maxpixclock, pixval;
struct mpc512x_ccm __iomem *ccm;
struct device_node *np;
u32 temp;
long err;
int i;
np = of_find_compatible_node(NULL, NULL, "fsl,mpc5121-clock");
if (!np) {
pr_err("Can't find clock control module.\n");
return;
}
ccm = of_iomap(np, 0);
of_node_put(np);
if (!ccm) {
pr_err("Can't map clock control module reg.\n");
return;
}
np = of_find_node_by_type(NULL, "cpu");
if (np) {
const unsigned int *prop =
of_get_property(np, "bus-frequency", NULL);
of_node_put(np);
if (prop) {
busfreq = *prop;
} else {
pr_err("Can't get bus-frequency property\n");
return;
}
} else {
pr_err("Can't find 'cpu' node.\n");
return;
}
/* Pixel Clock configuration */
pr_debug("DIU: Bus Frequency = %lu\n", busfreq);
speed = busfreq * 4; /* DIU_DIV ratio is 4 * CSB_CLK / DIU_CLK */
/* Calculate the pixel clock with the smallest error */
/* calculate the following in steps to avoid overflow */
pr_debug("DIU pixclock in ps - %d\n", pixclock);
temp = (1000000000 / pixclock) * 1000;
pixclock = temp;
pr_debug("DIU pixclock freq - %u\n", pixclock);
temp = temp / 20; /* pixclock * 0.05 */
pr_debug("deviation = %d\n", temp);
minpixclock = pixclock - temp;
maxpixclock = pixclock + temp;
pr_debug("DIU minpixclock - %lu\n", minpixclock);
pr_debug("DIU maxpixclock - %lu\n", maxpixclock);
pixval = speed/pixclock;
pr_debug("DIU pixval = %lu\n", pixval);
err = LONG_MAX;
bestval = pixval;
pr_debug("DIU bestval = %lu\n", bestval);
bestfreq = 0;
for (i = -1; i <= 1; i++) {
temp = speed / (pixval+i);
pr_debug("DIU test pixval i=%d, pixval=%lu, temp freq. = %u\n",
i, pixval, temp);
if ((temp < minpixclock) || (temp > maxpixclock))
pr_debug("DIU exceeds monitor range (%lu to %lu)\n",
minpixclock, maxpixclock);
else if (abs(temp - pixclock) < err) {
pr_debug("Entered the else if block %d\n", i);
err = abs(temp - pixclock);
bestval = pixval + i;
bestfreq = temp;
}
}
pr_debug("DIU chose = %lx\n", bestval);
pr_debug("DIU error = %ld\n NomPixClk ", err);
pr_debug("DIU: Best Freq = %lx\n", bestfreq);
/* Modify DIU_DIV in CCM SCFR1 */
temp = in_be32(&ccm->scfr1);
pr_debug("DIU: Current value of SCFR1: 0x%08x\n", temp);
temp &= ~DIU_DIV_MASK;
temp |= (bestval & DIU_DIV_MASK);
out_be32(&ccm->scfr1, temp);
pr_debug("DIU: Modified value of SCFR1: 0x%08x\n", temp);
iounmap(ccm);
}
enum fsl_diu_monitor_port
mpc512x_valid_monitor_port(enum fsl_diu_monitor_port port)
{
return FSL_DIU_PORT_DVI;
}
static struct fsl_diu_shared_fb __attribute__ ((__aligned__(8))) diu_shared_fb;
static inline void mpc512x_free_bootmem(struct page *page)
{
BUG_ON(PageTail(page));
BUG_ON(atomic_read(&page->_count) > 1);
free_reserved_page(page);
}
void mpc512x_release_bootmem(void)
{
unsigned long addr = diu_shared_fb.fb_phys & PAGE_MASK;
unsigned long size = diu_shared_fb.fb_len;
unsigned long start, end;
if (diu_shared_fb.in_use) {
start = PFN_UP(addr);
end = PFN_DOWN(addr + size);
for (; start < end; start++)
mpc512x_free_bootmem(pfn_to_page(start));
diu_shared_fb.in_use = false;
}
diu_ops.release_bootmem = NULL;
}
/*
* Check if DIU was pre-initialized. If so, perform steps
* needed to continue displaying through the whole boot process.
* Move area descriptor and gamma table elsewhere, they are
* destroyed by bootmem allocator otherwise. The frame buffer
* address range will be reserved in setup_arch() after bootmem
* allocator is up.
*/
void __init mpc512x_init_diu(void)
{
struct device_node *np;
struct diu __iomem *diu_reg;
phys_addr_t desc;
void __iomem *vaddr;
unsigned long mode, pix_fmt, res, bpp;
unsigned long dst;
np = of_find_compatible_node(NULL, NULL, "fsl,mpc5121-diu");
if (!np) {
pr_err("No DIU node\n");
return;
}
diu_reg = of_iomap(np, 0);
of_node_put(np);
if (!diu_reg) {
pr_err("Can't map DIU\n");
return;
}
mode = in_be32(&diu_reg->diu_mode);
if (mode == MFB_MODE0) {
pr_info("%s: DIU OFF\n", __func__);
goto out;
}
desc = in_be32(&diu_reg->desc[0]);
vaddr = ioremap(desc, sizeof(struct diu_ad));
if (!vaddr) {
pr_err("Can't map DIU area desc.\n");
goto out;
}
memcpy(&diu_shared_fb.ad0, vaddr, sizeof(struct diu_ad));
/* flush fb area descriptor */
dst = (unsigned long)&diu_shared_fb.ad0;
flush_dcache_range(dst, dst + sizeof(struct diu_ad) - 1);
res = in_be32(&diu_reg->disp_size);
pix_fmt = in_le32(vaddr);
bpp = ((pix_fmt >> 16) & 0x3) + 1;
diu_shared_fb.fb_phys = in_le32(vaddr + 4);
diu_shared_fb.fb_len = ((res & 0xfff0000) >> 16) * (res & 0xfff) * bpp;
diu_shared_fb.in_use = true;
iounmap(vaddr);
desc = in_be32(&diu_reg->gamma);
vaddr = ioremap(desc, sizeof(diu_shared_fb.gamma));
if (!vaddr) {
pr_err("Can't map DIU area desc.\n");
diu_shared_fb.in_use = false;
goto out;
}
memcpy(&diu_shared_fb.gamma, vaddr, sizeof(diu_shared_fb.gamma));
/* flush gamma table */
dst = (unsigned long)&diu_shared_fb.gamma;
flush_dcache_range(dst, dst + sizeof(diu_shared_fb.gamma) - 1);
iounmap(vaddr);
out_be32(&diu_reg->gamma, virt_to_phys(&diu_shared_fb.gamma));
out_be32(&diu_reg->desc[1], 0);
out_be32(&diu_reg->desc[2], 0);
out_be32(&diu_reg->desc[0], virt_to_phys(&diu_shared_fb.ad0));
out:
iounmap(diu_reg);
}
void __init mpc512x_setup_diu(void)
{
int ret;
/*
* We do not allocate and configure new area for bitmap buffer
* because it would requere copying bitmap data (splash image)
* and so negatively affect boot time. Instead we reserve the
* already configured frame buffer area so that it won't be
* destroyed. The starting address of the area to reserve and
* also it's length is passed to reserve_bootmem(). It will be
* freed later on first open of fbdev, when splash image is not
* needed any more.
*/
if (diu_shared_fb.in_use) {
ret = reserve_bootmem(diu_shared_fb.fb_phys,
diu_shared_fb.fb_len,
BOOTMEM_EXCLUSIVE);
if (ret) {
pr_err("%s: reserve bootmem failed\n", __func__);
diu_shared_fb.in_use = false;
}
}
diu_ops.set_pixel_clock = mpc512x_set_pixel_clock;
diu_ops.valid_monitor_port = mpc512x_valid_monitor_port;
diu_ops.release_bootmem = mpc512x_release_bootmem;
}
#endif
void __init mpc512x_init_IRQ(void)
{
struct device_node *np;
np = of_find_compatible_node(NULL, NULL, "fsl,mpc5121-ipic");
if (!np)
return;
ipic_init(np, 0);
of_node_put(np);
/*
* Initialize the default interrupt mapping priorities,
* in case the boot rom changed something on us.
*/
ipic_set_default_priority();
}
/*
* Nodes to do bus probe on, soc and localbus
*/
static struct of_device_id __initdata of_bus_ids[] = {
{ .compatible = "fsl,mpc5121-immr", },
{ .compatible = "fsl,mpc5121-localbus", },
{ .compatible = "fsl,mpc5121-mbx", },
{ .compatible = "fsl,mpc5121-nfc", },
{ .compatible = "fsl,mpc5121-sram", },
{ .compatible = "fsl,mpc5121-pci", },
{ .compatible = "gpio-leds", },
{},
};
void __init mpc512x_declare_of_platform_devices(void)
{
if (of_platform_bus_probe(NULL, of_bus_ids, NULL))
printk(KERN_ERR __FILE__ ": "
"Error while probing of_platform bus\n");
}
#define DEFAULT_FIFO_SIZE 16
const char *mpc512x_select_psc_compat(void)
{
if (of_machine_is_compatible("fsl,mpc5121"))
return "fsl,mpc5121-psc";
if (of_machine_is_compatible("fsl,mpc5125"))
return "fsl,mpc5125-psc";
return NULL;
}
static unsigned int __init get_fifo_size(struct device_node *np,
char *prop_name)
{
const unsigned int *fp;
fp = of_get_property(np, prop_name, NULL);
if (fp)
return *fp;
pr_warning("no %s property in %s node, defaulting to %d\n",
prop_name, np->full_name, DEFAULT_FIFO_SIZE);
return DEFAULT_FIFO_SIZE;
}
#define FIFOC(_base) ((struct mpc512x_psc_fifo __iomem *) \
((u32)(_base) + sizeof(struct mpc52xx_psc)))
/* Init PSC FIFO space for TX and RX slices */
void __init mpc512x_psc_fifo_init(void)
{
struct device_node *np;
void __iomem *psc;
unsigned int tx_fifo_size;
unsigned int rx_fifo_size;
const char *psc_compat;
int fifobase = 0; /* current fifo address in 32 bit words */
psc_compat = mpc512x_select_psc_compat();
if (!psc_compat) {
pr_err("%s: no compatible devices found\n", __func__);
return;
}
for_each_compatible_node(np, NULL, psc_compat) {
tx_fifo_size = get_fifo_size(np, "fsl,tx-fifo-size");
rx_fifo_size = get_fifo_size(np, "fsl,rx-fifo-size");
/* size in register is in 4 byte units */
tx_fifo_size /= 4;
rx_fifo_size /= 4;
if (!tx_fifo_size)
tx_fifo_size = 1;
if (!rx_fifo_size)
rx_fifo_size = 1;
psc = of_iomap(np, 0);
if (!psc) {
pr_err("%s: Can't map %s device\n",
__func__, np->full_name);
continue;
}
/* FIFO space is 4KiB, check if requested size is available */
if ((fifobase + tx_fifo_size + rx_fifo_size) > 0x1000) {
pr_err("%s: no fifo space available for %s\n",
__func__, np->full_name);
iounmap(psc);
/*
* chances are that another device requests less
* fifo space, so we continue.
*/
continue;
}
/* set tx and rx fifo size registers */
out_be32(&FIFOC(psc)->txsz, (fifobase << 16) | tx_fifo_size);
fifobase += tx_fifo_size;
out_be32(&FIFOC(psc)->rxsz, (fifobase << 16) | rx_fifo_size);
fifobase += rx_fifo_size;
/* reset and enable the slices */
out_be32(&FIFOC(psc)->txcmd, 0x80);
out_be32(&FIFOC(psc)->txcmd, 0x01);
out_be32(&FIFOC(psc)->rxcmd, 0x80);
out_be32(&FIFOC(psc)->rxcmd, 0x01);
iounmap(psc);
}
}
void __init mpc512x_init(void)
{
mpc5121_clk_init();
mpc512x_declare_of_platform_devices();
mpc512x_restart_init();
mpc512x_psc_fifo_init();
}
/**
* mpc512x_cs_config - Setup chip select configuration
* @cs: chip select number
* @val: chip select configuration value
*
* Perform chip select configuration for devices on LocalPlus Bus.
* Intended to dynamically reconfigure the chip select parameters
* for configurable devices on the bus.
*/
int mpc512x_cs_config(unsigned int cs, u32 val)
{
static struct mpc512x_lpc __iomem *lpc;
struct device_node *np;
if (cs > 7)
return -EINVAL;
if (!lpc) {
np = of_find_compatible_node(NULL, NULL, "fsl,mpc5121-lpc");
lpc = of_iomap(np, 0);
of_node_put(np);
if (!lpc)
return -ENOMEM;
}
out_be32(&lpc->cs_cfg[cs], val);
return 0;
}
EXPORT_SYMBOL(mpc512x_cs_config);
| gpl-2.0 |
shskyinfo/SKernel_Yu | drivers/net/wireless/orinoco/orinoco_plx.c | 2454 | 11721 | /* orinoco_plx.c
*
* Driver for Prism II devices which would usually be driven by orinoco_cs,
* but are connected to the PCI bus by a PLX9052.
*
* Current maintainers are:
* Pavel Roskin <proski AT gnu.org>
* and David Gibson <hermes AT gibson.dropbear.id.au>
*
* (C) Copyright David Gibson, IBM Corp. 2001-2003.
* Copyright (C) 2001 Daniel Barlow
*
* The contents of this file are subject to the Mozilla Public License
* Version 1.1 (the "License"); you may not use this file except in
* compliance with the License. You may obtain a copy of the License
* at http://www.mozilla.org/MPL/
*
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and
* limitations under the License.
*
* Alternatively, the contents of this file may be used under the
* terms of the GNU General Public License version 2 (the "GPL"), in
* which case the provisions of the GPL are applicable instead of the
* above. If you wish to allow the use of your version of this file
* only under the terms of the GPL and not to allow others to use your
* version of this file under the MPL, indicate your decision by
* deleting the provisions above and replace them with the notice and
* other provisions required by the GPL. If you do not delete the
* provisions above, a recipient may use your version of this file
* under either the MPL or the GPL.
*
* Here's the general details on how the PLX9052 adapter works:
*
* - Two PCI I/O address spaces, one 0x80 long which contains the
* PLX9052 registers, and one that's 0x40 long mapped to the PCMCIA
* slot I/O address space.
*
* - One PCI memory address space, mapped to the PCMCIA attribute space
* (containing the CIS).
*
* Using the later, you can read through the CIS data to make sure the
* card is compatible with the driver. Keep in mind that the PCMCIA
* spec specifies the CIS as the lower 8 bits of each word read from
* the CIS, so to read the bytes of the CIS, read every other byte
* (0,2,4,...). Passing that test, you need to enable the I/O address
* space on the PCMCIA card via the PCMCIA COR register. This is the
* first byte following the CIS. In my case (which may not have any
* relation to what's on the PRISM2 cards), COR was at offset 0x800
* within the PCI memory space. Write 0x41 to the COR register to
* enable I/O mode and to select level triggered interrupts. To
* confirm you actually succeeded, read the COR register back and make
* sure it actually got set to 0x41, in case you have an unexpected
* card inserted.
*
* Following that, you can treat the second PCI I/O address space (the
* one that's not 0x80 in length) as the PCMCIA I/O space.
*
* Note that in the Eumitcom's source for their drivers, they register
* the interrupt as edge triggered when registering it with the
* Windows kernel. I don't recall how to register edge triggered on
* Linux (if it can be done at all). But in some experimentation, I
* don't see much operational difference between using either
* interrupt mode. Don't mess with the interrupt mode in the COR
* register though, as the PLX9052 wants level triggers with the way
* the serial EEPROM configures it on the WL11000.
*
* There's some other little quirks related to timing that I bumped
* into, but I don't recall right now. Also, there's two variants of
* the WL11000 I've seen, revision A1 and T2. These seem to differ
* slightly in the timings configured in the wait-state generator in
* the PLX9052. There have also been some comments from Eumitcom that
* cards shouldn't be hot swapped, apparently due to risk of cooking
* the PLX9052. I'm unsure why they believe this, as I can't see
* anything in the design that would really cause a problem, except
* for crashing drivers not written to expect it. And having developed
* drivers for the WL11000, I'd say it's quite tricky to write code
* that will successfully deal with a hot unplug. Very odd things
* happen on the I/O side of things. But anyway, be warned. Despite
* that, I've hot-swapped a number of times during debugging and
* driver development for various reasons (stuck WAIT# line after the
* radio card's firmware locks up).
*/
#define DRIVER_NAME "orinoco_plx"
#define PFX DRIVER_NAME ": "
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/pci.h>
#include <pcmcia/cisreg.h>
#include "orinoco.h"
#include "orinoco_pci.h"
#define COR_OFFSET (0x3e0) /* COR attribute offset of Prism2 PC card */
#define COR_VALUE (COR_LEVEL_REQ | COR_FUNC_ENA) /* Enable PC card with interrupt in level trigger */
#define COR_RESET (0x80) /* reset bit in the COR register */
#define PLX_RESET_TIME (500) /* milliseconds */
#define PLX_INTCSR 0x4c /* Interrupt Control & Status Register */
#define PLX_INTCSR_INTEN (1 << 6) /* Interrupt Enable bit */
/*
* Do a soft reset of the card using the Configuration Option Register
*/
static int orinoco_plx_cor_reset(struct orinoco_private *priv)
{
struct hermes *hw = &priv->hw;
struct orinoco_pci_card *card = priv->card;
unsigned long timeout;
u16 reg;
iowrite8(COR_VALUE | COR_RESET, card->attr_io + COR_OFFSET);
mdelay(1);
iowrite8(COR_VALUE, card->attr_io + COR_OFFSET);
mdelay(1);
/* Just in case, wait more until the card is no longer busy */
timeout = jiffies + (PLX_RESET_TIME * HZ / 1000);
reg = hermes_read_regn(hw, CMD);
while (time_before(jiffies, timeout) && (reg & HERMES_CMD_BUSY)) {
mdelay(1);
reg = hermes_read_regn(hw, CMD);
}
/* Still busy? */
if (reg & HERMES_CMD_BUSY) {
printk(KERN_ERR PFX "Busy timeout\n");
return -ETIMEDOUT;
}
return 0;
}
static int orinoco_plx_hw_init(struct orinoco_pci_card *card)
{
int i;
u32 csr_reg;
static const u8 cis_magic[] = {
0x01, 0x03, 0x00, 0x00, 0xff, 0x17, 0x04, 0x67
};
printk(KERN_DEBUG PFX "CIS: ");
for (i = 0; i < 16; i++)
printk("%02X:", ioread8(card->attr_io + (i << 1)));
printk("\n");
/* Verify whether a supported PC card is present */
/* FIXME: we probably need to be smarted about this */
for (i = 0; i < sizeof(cis_magic); i++) {
if (cis_magic[i] != ioread8(card->attr_io + (i << 1))) {
printk(KERN_ERR PFX "The CIS value of Prism2 PC "
"card is unexpected\n");
return -ENODEV;
}
}
/* bjoern: We need to tell the card to enable interrupts, in
case the serial eprom didn't do this already. See the
PLX9052 data book, p8-1 and 8-24 for reference. */
csr_reg = ioread32(card->bridge_io + PLX_INTCSR);
if (!(csr_reg & PLX_INTCSR_INTEN)) {
csr_reg |= PLX_INTCSR_INTEN;
iowrite32(csr_reg, card->bridge_io + PLX_INTCSR);
csr_reg = ioread32(card->bridge_io + PLX_INTCSR);
if (!(csr_reg & PLX_INTCSR_INTEN)) {
printk(KERN_ERR PFX "Cannot enable interrupts\n");
return -EIO;
}
}
return 0;
}
static int orinoco_plx_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
int err;
struct orinoco_private *priv;
struct orinoco_pci_card *card;
void __iomem *hermes_io, *attr_io, *bridge_io;
err = pci_enable_device(pdev);
if (err) {
printk(KERN_ERR PFX "Cannot enable PCI device\n");
return err;
}
err = pci_request_regions(pdev, DRIVER_NAME);
if (err) {
printk(KERN_ERR PFX "Cannot obtain PCI resources\n");
goto fail_resources;
}
bridge_io = pci_iomap(pdev, 1, 0);
if (!bridge_io) {
printk(KERN_ERR PFX "Cannot map bridge registers\n");
err = -EIO;
goto fail_map_bridge;
}
attr_io = pci_iomap(pdev, 2, 0);
if (!attr_io) {
printk(KERN_ERR PFX "Cannot map PCMCIA attributes\n");
err = -EIO;
goto fail_map_attr;
}
hermes_io = pci_iomap(pdev, 3, 0);
if (!hermes_io) {
printk(KERN_ERR PFX "Cannot map chipset registers\n");
err = -EIO;
goto fail_map_hermes;
}
/* Allocate network device */
priv = alloc_orinocodev(sizeof(*card), &pdev->dev,
orinoco_plx_cor_reset, NULL);
if (!priv) {
printk(KERN_ERR PFX "Cannot allocate network device\n");
err = -ENOMEM;
goto fail_alloc;
}
card = priv->card;
card->bridge_io = bridge_io;
card->attr_io = attr_io;
hermes_struct_init(&priv->hw, hermes_io, HERMES_16BIT_REGSPACING);
err = request_irq(pdev->irq, orinoco_interrupt, IRQF_SHARED,
DRIVER_NAME, priv);
if (err) {
printk(KERN_ERR PFX "Cannot allocate IRQ %d\n", pdev->irq);
err = -EBUSY;
goto fail_irq;
}
err = orinoco_plx_hw_init(card);
if (err) {
printk(KERN_ERR PFX "Hardware initialization failed\n");
goto fail;
}
err = orinoco_plx_cor_reset(priv);
if (err) {
printk(KERN_ERR PFX "Initial reset failed\n");
goto fail;
}
err = orinoco_init(priv);
if (err) {
printk(KERN_ERR PFX "orinoco_init() failed\n");
goto fail;
}
err = orinoco_if_add(priv, 0, 0, NULL);
if (err) {
printk(KERN_ERR PFX "orinoco_if_add() failed\n");
goto fail;
}
pci_set_drvdata(pdev, priv);
return 0;
fail:
free_irq(pdev->irq, priv);
fail_irq:
pci_set_drvdata(pdev, NULL);
free_orinocodev(priv);
fail_alloc:
pci_iounmap(pdev, hermes_io);
fail_map_hermes:
pci_iounmap(pdev, attr_io);
fail_map_attr:
pci_iounmap(pdev, bridge_io);
fail_map_bridge:
pci_release_regions(pdev);
fail_resources:
pci_disable_device(pdev);
return err;
}
static void orinoco_plx_remove_one(struct pci_dev *pdev)
{
struct orinoco_private *priv = pci_get_drvdata(pdev);
struct orinoco_pci_card *card = priv->card;
orinoco_if_del(priv);
free_irq(pdev->irq, priv);
pci_set_drvdata(pdev, NULL);
free_orinocodev(priv);
pci_iounmap(pdev, priv->hw.iobase);
pci_iounmap(pdev, card->attr_io);
pci_iounmap(pdev, card->bridge_io);
pci_release_regions(pdev);
pci_disable_device(pdev);
}
static DEFINE_PCI_DEVICE_TABLE(orinoco_plx_id_table) = {
{0x111a, 0x1023, PCI_ANY_ID, PCI_ANY_ID,}, /* Siemens SpeedStream SS1023 */
{0x1385, 0x4100, PCI_ANY_ID, PCI_ANY_ID,}, /* Netgear MA301 */
{0x15e8, 0x0130, PCI_ANY_ID, PCI_ANY_ID,}, /* Correga - does this work? */
{0x1638, 0x1100, PCI_ANY_ID, PCI_ANY_ID,}, /* SMC EZConnect SMC2602W,
Eumitcom PCI WL11000,
Addtron AWA-100 */
{0x16ab, 0x1100, PCI_ANY_ID, PCI_ANY_ID,}, /* Global Sun Tech GL24110P */
{0x16ab, 0x1101, PCI_ANY_ID, PCI_ANY_ID,}, /* Reported working, but unknown */
{0x16ab, 0x1102, PCI_ANY_ID, PCI_ANY_ID,}, /* Linksys WDT11 */
{0x16ec, 0x3685, PCI_ANY_ID, PCI_ANY_ID,}, /* USR 2415 */
{0xec80, 0xec00, PCI_ANY_ID, PCI_ANY_ID,}, /* Belkin F5D6000 tested by
Brendan W. McAdams <rit AT jacked-in.org> */
{0x10b7, 0x7770, PCI_ANY_ID, PCI_ANY_ID,}, /* 3Com AirConnect PCI tested by
Damien Persohn <damien AT persohn.net> */
{0,},
};
MODULE_DEVICE_TABLE(pci, orinoco_plx_id_table);
static struct pci_driver orinoco_plx_driver = {
.name = DRIVER_NAME,
.id_table = orinoco_plx_id_table,
.probe = orinoco_plx_init_one,
.remove = orinoco_plx_remove_one,
.suspend = orinoco_pci_suspend,
.resume = orinoco_pci_resume,
};
static char version[] __initdata = DRIVER_NAME " " DRIVER_VERSION
" (Pavel Roskin <proski@gnu.org>,"
" David Gibson <hermes@gibson.dropbear.id.au>,"
" Daniel Barlow <dan@telent.net>)";
MODULE_AUTHOR("Daniel Barlow <dan@telent.net>");
MODULE_DESCRIPTION("Driver for wireless LAN cards using the PLX9052 PCI bridge");
MODULE_LICENSE("Dual MPL/GPL");
static int __init orinoco_plx_init(void)
{
printk(KERN_DEBUG "%s\n", version);
return pci_register_driver(&orinoco_plx_driver);
}
static void __exit orinoco_plx_exit(void)
{
pci_unregister_driver(&orinoco_plx_driver);
}
module_init(orinoco_plx_init);
module_exit(orinoco_plx_exit);
/*
* Local variables:
* c-indent-level: 8
* c-basic-offset: 8
* tab-width: 8
* End:
*/
| gpl-2.0 |
FireLord1/android_kernel_motorola_msm8916 | arch/microblaze/kernel/exceptions.c | 4246 | 4189 | /*
* HW exception handling
*
* Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
* Copyright (C) 2008 PetaLogix
*
* This file is subject to the terms and conditions of the GNU General
* Public License. See the file COPYING in the main directory of this
* archive for more details.
*/
/*
* This file handles the architecture-dependent parts of hardware exceptions
*/
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kallsyms.h>
#include <asm/exceptions.h>
#include <asm/entry.h> /* For KM CPU var */
#include <linux/uaccess.h>
#include <linux/errno.h>
#include <linux/ptrace.h>
#include <asm/current.h>
#include <asm/cacheflush.h>
#define MICROBLAZE_ILL_OPCODE_EXCEPTION 0x02
#define MICROBLAZE_IBUS_EXCEPTION 0x03
#define MICROBLAZE_DBUS_EXCEPTION 0x04
#define MICROBLAZE_DIV_ZERO_EXCEPTION 0x05
#define MICROBLAZE_FPU_EXCEPTION 0x06
#define MICROBLAZE_PRIVILEGED_EXCEPTION 0x07
static DEFINE_SPINLOCK(die_lock);
void die(const char *str, struct pt_regs *fp, long err)
{
console_verbose();
spin_lock_irq(&die_lock);
pr_warn("Oops: %s, sig: %ld\n", str, err);
show_regs(fp);
spin_unlock_irq(&die_lock);
/* do_exit() should take care of panic'ing from an interrupt
* context so we don't handle it here
*/
do_exit(err);
}
/* for user application debugging */
asmlinkage void sw_exception(struct pt_regs *regs)
{
_exception(SIGTRAP, regs, TRAP_BRKPT, regs->r16);
flush_dcache_range(regs->r16, regs->r16 + 0x4);
flush_icache_range(regs->r16, regs->r16 + 0x4);
}
void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
{
siginfo_t info;
if (kernel_mode(regs))
die("Exception in kernel mode", regs, signr);
info.si_signo = signr;
info.si_errno = 0;
info.si_code = code;
info.si_addr = (void __user *) addr;
force_sig_info(signr, &info, current);
}
asmlinkage void full_exception(struct pt_regs *regs, unsigned int type,
int fsr, int addr)
{
#ifdef CONFIG_MMU
addr = regs->pc;
#endif
#if 0
pr_warn("Exception %02x in %s mode, FSR=%08x PC=%08x ESR=%08x\n",
type, user_mode(regs) ? "user" : "kernel", fsr,
(unsigned int) regs->pc, (unsigned int) regs->esr);
#endif
switch (type & 0x1F) {
case MICROBLAZE_ILL_OPCODE_EXCEPTION:
if (user_mode(regs)) {
pr_debug("Illegal opcode exception in user mode\n");
_exception(SIGILL, regs, ILL_ILLOPC, addr);
return;
}
pr_warn("Illegal opcode exception in kernel mode.\n");
die("opcode exception", regs, SIGBUS);
break;
case MICROBLAZE_IBUS_EXCEPTION:
if (user_mode(regs)) {
pr_debug("Instruction bus error exception in user mode\n");
_exception(SIGBUS, regs, BUS_ADRERR, addr);
return;
}
pr_warn("Instruction bus error exception in kernel mode.\n");
die("bus exception", regs, SIGBUS);
break;
case MICROBLAZE_DBUS_EXCEPTION:
if (user_mode(regs)) {
pr_debug("Data bus error exception in user mode\n");
_exception(SIGBUS, regs, BUS_ADRERR, addr);
return;
}
pr_warn("Data bus error exception in kernel mode.\n");
die("bus exception", regs, SIGBUS);
break;
case MICROBLAZE_DIV_ZERO_EXCEPTION:
if (user_mode(regs)) {
pr_debug("Divide by zero exception in user mode\n");
_exception(SIGFPE, regs, FPE_INTDIV, addr);
return;
}
pr_warn("Divide by zero exception in kernel mode.\n");
die("Divide by zero exception", regs, SIGBUS);
break;
case MICROBLAZE_FPU_EXCEPTION:
pr_debug("FPU exception\n");
/* IEEE FP exception */
/* I removed fsr variable and use code var for storing fsr */
if (fsr & FSR_IO)
fsr = FPE_FLTINV;
else if (fsr & FSR_OF)
fsr = FPE_FLTOVF;
else if (fsr & FSR_UF)
fsr = FPE_FLTUND;
else if (fsr & FSR_DZ)
fsr = FPE_FLTDIV;
else if (fsr & FSR_DO)
fsr = FPE_FLTRES;
_exception(SIGFPE, regs, fsr, addr);
break;
#ifdef CONFIG_MMU
case MICROBLAZE_PRIVILEGED_EXCEPTION:
pr_debug("Privileged exception\n");
_exception(SIGILL, regs, ILL_PRVOPC, addr);
break;
#endif
default:
/* FIXME what to do in unexpected exception */
pr_warn("Unexpected exception %02x PC=%08x in %s mode\n",
type, (unsigned int) addr,
kernel_mode(regs) ? "kernel" : "user");
}
return;
}
| gpl-2.0 |
mason-hock/CHIP-linux-libre | CHIP-linux-libre/arch/microblaze/kernel/exceptions.c | 4246 | 4189 | /*
* HW exception handling
*
* Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
* Copyright (C) 2008 PetaLogix
*
* This file is subject to the terms and conditions of the GNU General
* Public License. See the file COPYING in the main directory of this
* archive for more details.
*/
/*
* This file handles the architecture-dependent parts of hardware exceptions
*/
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kallsyms.h>
#include <asm/exceptions.h>
#include <asm/entry.h> /* For KM CPU var */
#include <linux/uaccess.h>
#include <linux/errno.h>
#include <linux/ptrace.h>
#include <asm/current.h>
#include <asm/cacheflush.h>
#define MICROBLAZE_ILL_OPCODE_EXCEPTION 0x02
#define MICROBLAZE_IBUS_EXCEPTION 0x03
#define MICROBLAZE_DBUS_EXCEPTION 0x04
#define MICROBLAZE_DIV_ZERO_EXCEPTION 0x05
#define MICROBLAZE_FPU_EXCEPTION 0x06
#define MICROBLAZE_PRIVILEGED_EXCEPTION 0x07
static DEFINE_SPINLOCK(die_lock);
void die(const char *str, struct pt_regs *fp, long err)
{
console_verbose();
spin_lock_irq(&die_lock);
pr_warn("Oops: %s, sig: %ld\n", str, err);
show_regs(fp);
spin_unlock_irq(&die_lock);
/* do_exit() should take care of panic'ing from an interrupt
* context so we don't handle it here
*/
do_exit(err);
}
/* for user application debugging */
asmlinkage void sw_exception(struct pt_regs *regs)
{
_exception(SIGTRAP, regs, TRAP_BRKPT, regs->r16);
flush_dcache_range(regs->r16, regs->r16 + 0x4);
flush_icache_range(regs->r16, regs->r16 + 0x4);
}
void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
{
siginfo_t info;
if (kernel_mode(regs))
die("Exception in kernel mode", regs, signr);
info.si_signo = signr;
info.si_errno = 0;
info.si_code = code;
info.si_addr = (void __user *) addr;
force_sig_info(signr, &info, current);
}
asmlinkage void full_exception(struct pt_regs *regs, unsigned int type,
int fsr, int addr)
{
#ifdef CONFIG_MMU
addr = regs->pc;
#endif
#if 0
pr_warn("Exception %02x in %s mode, FSR=%08x PC=%08x ESR=%08x\n",
type, user_mode(regs) ? "user" : "kernel", fsr,
(unsigned int) regs->pc, (unsigned int) regs->esr);
#endif
switch (type & 0x1F) {
case MICROBLAZE_ILL_OPCODE_EXCEPTION:
if (user_mode(regs)) {
pr_debug("Illegal opcode exception in user mode\n");
_exception(SIGILL, regs, ILL_ILLOPC, addr);
return;
}
pr_warn("Illegal opcode exception in kernel mode.\n");
die("opcode exception", regs, SIGBUS);
break;
case MICROBLAZE_IBUS_EXCEPTION:
if (user_mode(regs)) {
pr_debug("Instruction bus error exception in user mode\n");
_exception(SIGBUS, regs, BUS_ADRERR, addr);
return;
}
pr_warn("Instruction bus error exception in kernel mode.\n");
die("bus exception", regs, SIGBUS);
break;
case MICROBLAZE_DBUS_EXCEPTION:
if (user_mode(regs)) {
pr_debug("Data bus error exception in user mode\n");
_exception(SIGBUS, regs, BUS_ADRERR, addr);
return;
}
pr_warn("Data bus error exception in kernel mode.\n");
die("bus exception", regs, SIGBUS);
break;
case MICROBLAZE_DIV_ZERO_EXCEPTION:
if (user_mode(regs)) {
pr_debug("Divide by zero exception in user mode\n");
_exception(SIGFPE, regs, FPE_INTDIV, addr);
return;
}
pr_warn("Divide by zero exception in kernel mode.\n");
die("Divide by zero exception", regs, SIGBUS);
break;
case MICROBLAZE_FPU_EXCEPTION:
pr_debug("FPU exception\n");
/* IEEE FP exception */
/* I removed fsr variable and use code var for storing fsr */
if (fsr & FSR_IO)
fsr = FPE_FLTINV;
else if (fsr & FSR_OF)
fsr = FPE_FLTOVF;
else if (fsr & FSR_UF)
fsr = FPE_FLTUND;
else if (fsr & FSR_DZ)
fsr = FPE_FLTDIV;
else if (fsr & FSR_DO)
fsr = FPE_FLTRES;
_exception(SIGFPE, regs, fsr, addr);
break;
#ifdef CONFIG_MMU
case MICROBLAZE_PRIVILEGED_EXCEPTION:
pr_debug("Privileged exception\n");
_exception(SIGILL, regs, ILL_PRVOPC, addr);
break;
#endif
default:
/* FIXME what to do in unexpected exception */
pr_warn("Unexpected exception %02x PC=%08x in %s mode\n",
type, (unsigned int) addr,
kernel_mode(regs) ? "kernel" : "user");
}
return;
}
| gpl-2.0 |
itgb/opCloudRouter | qca/src/linux/arch/mips/pci/ops-tx4927.c | 7574 | 15946 | /*
* Define the pci_ops for the PCIC on Toshiba TX4927, TX4938, etc.
*
* Based on linux/arch/mips/pci/ops-tx4938.c,
* linux/arch/mips/pci/fixup-rbtx4938.c,
* linux/arch/mips/txx9/rbtx4938/setup.c,
* and RBTX49xx patch from CELF patch archive.
*
* 2003-2005 (c) MontaVista Software, Inc.
* Copyright (C) 2004 by Ralf Baechle (ralf@linux-mips.org)
* (C) Copyright TOSHIBA CORPORATION 2000-2001, 2004-2007
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <asm/txx9/pci.h>
#include <asm/txx9/tx4927pcic.h>
static struct {
struct pci_controller *channel;
struct tx4927_pcic_reg __iomem *pcicptr;
} pcicptrs[2]; /* TX4938 has 2 pcic */
static void __init set_tx4927_pcicptr(struct pci_controller *channel,
struct tx4927_pcic_reg __iomem *pcicptr)
{
int i;
for (i = 0; i < ARRAY_SIZE(pcicptrs); i++) {
if (pcicptrs[i].channel == channel) {
pcicptrs[i].pcicptr = pcicptr;
return;
}
}
for (i = 0; i < ARRAY_SIZE(pcicptrs); i++) {
if (!pcicptrs[i].channel) {
pcicptrs[i].channel = channel;
pcicptrs[i].pcicptr = pcicptr;
return;
}
}
BUG();
}
struct tx4927_pcic_reg __iomem *get_tx4927_pcicptr(
struct pci_controller *channel)
{
int i;
for (i = 0; i < ARRAY_SIZE(pcicptrs); i++) {
if (pcicptrs[i].channel == channel)
return pcicptrs[i].pcicptr;
}
return NULL;
}
static int mkaddr(struct pci_bus *bus, unsigned int devfn, int where,
struct tx4927_pcic_reg __iomem *pcicptr)
{
if (bus->parent == NULL &&
devfn >= PCI_DEVFN(TX4927_PCIC_MAX_DEVNU, 0))
return -1;
__raw_writel(((bus->number & 0xff) << 0x10)
| ((devfn & 0xff) << 0x08) | (where & 0xfc)
| (bus->parent ? 1 : 0),
&pcicptr->g2pcfgadrs);
/* clear M_ABORT and Disable M_ABORT Int. */
__raw_writel((__raw_readl(&pcicptr->pcistatus) & 0x0000ffff)
| (PCI_STATUS_REC_MASTER_ABORT << 16),
&pcicptr->pcistatus);
return 0;
}
static int check_abort(struct tx4927_pcic_reg __iomem *pcicptr)
{
int code = PCIBIOS_SUCCESSFUL;
/* wait write cycle completion before checking error status */
while (__raw_readl(&pcicptr->pcicstatus) & TX4927_PCIC_PCICSTATUS_IWB)
;
if (__raw_readl(&pcicptr->pcistatus)
& (PCI_STATUS_REC_MASTER_ABORT << 16)) {
__raw_writel((__raw_readl(&pcicptr->pcistatus) & 0x0000ffff)
| (PCI_STATUS_REC_MASTER_ABORT << 16),
&pcicptr->pcistatus);
/* flush write buffer */
iob();
code = PCIBIOS_DEVICE_NOT_FOUND;
}
return code;
}
static u8 icd_readb(int offset, struct tx4927_pcic_reg __iomem *pcicptr)
{
#ifdef __BIG_ENDIAN
offset ^= 3;
#endif
return __raw_readb((void __iomem *)&pcicptr->g2pcfgdata + offset);
}
static u16 icd_readw(int offset, struct tx4927_pcic_reg __iomem *pcicptr)
{
#ifdef __BIG_ENDIAN
offset ^= 2;
#endif
return __raw_readw((void __iomem *)&pcicptr->g2pcfgdata + offset);
}
static u32 icd_readl(struct tx4927_pcic_reg __iomem *pcicptr)
{
return __raw_readl(&pcicptr->g2pcfgdata);
}
static void icd_writeb(u8 val, int offset,
struct tx4927_pcic_reg __iomem *pcicptr)
{
#ifdef __BIG_ENDIAN
offset ^= 3;
#endif
__raw_writeb(val, (void __iomem *)&pcicptr->g2pcfgdata + offset);
}
static void icd_writew(u16 val, int offset,
struct tx4927_pcic_reg __iomem *pcicptr)
{
#ifdef __BIG_ENDIAN
offset ^= 2;
#endif
__raw_writew(val, (void __iomem *)&pcicptr->g2pcfgdata + offset);
}
static void icd_writel(u32 val, struct tx4927_pcic_reg __iomem *pcicptr)
{
__raw_writel(val, &pcicptr->g2pcfgdata);
}
static struct tx4927_pcic_reg __iomem *pci_bus_to_pcicptr(struct pci_bus *bus)
{
struct pci_controller *channel = bus->sysdata;
return get_tx4927_pcicptr(channel);
}
static int tx4927_pci_config_read(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 *val)
{
struct tx4927_pcic_reg __iomem *pcicptr = pci_bus_to_pcicptr(bus);
if (mkaddr(bus, devfn, where, pcicptr)) {
*val = 0xffffffff;
return -1;
}
switch (size) {
case 1:
*val = icd_readb(where & 3, pcicptr);
break;
case 2:
*val = icd_readw(where & 3, pcicptr);
break;
default:
*val = icd_readl(pcicptr);
}
return check_abort(pcicptr);
}
static int tx4927_pci_config_write(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 val)
{
struct tx4927_pcic_reg __iomem *pcicptr = pci_bus_to_pcicptr(bus);
if (mkaddr(bus, devfn, where, pcicptr))
return -1;
switch (size) {
case 1:
icd_writeb(val, where & 3, pcicptr);
break;
case 2:
icd_writew(val, where & 3, pcicptr);
break;
default:
icd_writel(val, pcicptr);
}
return check_abort(pcicptr);
}
static struct pci_ops tx4927_pci_ops = {
.read = tx4927_pci_config_read,
.write = tx4927_pci_config_write,
};
static struct {
u8 trdyto;
u8 retryto;
u16 gbwc;
} tx4927_pci_opts __devinitdata = {
.trdyto = 0,
.retryto = 0,
.gbwc = 0xfe0, /* 4064 GBUSCLK for CCFG.GTOT=0b11 */
};
char *__devinit tx4927_pcibios_setup(char *str)
{
unsigned long val;
if (!strncmp(str, "trdyto=", 7)) {
if (strict_strtoul(str + 7, 0, &val) == 0)
tx4927_pci_opts.trdyto = val;
return NULL;
}
if (!strncmp(str, "retryto=", 8)) {
if (strict_strtoul(str + 8, 0, &val) == 0)
tx4927_pci_opts.retryto = val;
return NULL;
}
if (!strncmp(str, "gbwc=", 5)) {
if (strict_strtoul(str + 5, 0, &val) == 0)
tx4927_pci_opts.gbwc = val;
return NULL;
}
return str;
}
void __init tx4927_pcic_setup(struct tx4927_pcic_reg __iomem *pcicptr,
struct pci_controller *channel, int extarb)
{
int i;
unsigned long flags;
set_tx4927_pcicptr(channel, pcicptr);
if (!channel->pci_ops)
printk(KERN_INFO
"PCIC -- DID:%04x VID:%04x RID:%02x Arbiter:%s\n",
__raw_readl(&pcicptr->pciid) >> 16,
__raw_readl(&pcicptr->pciid) & 0xffff,
__raw_readl(&pcicptr->pciccrev) & 0xff,
extarb ? "External" : "Internal");
channel->pci_ops = &tx4927_pci_ops;
local_irq_save(flags);
/* Disable All Initiator Space */
__raw_writel(__raw_readl(&pcicptr->pciccfg)
& ~(TX4927_PCIC_PCICCFG_G2PMEN(0)
| TX4927_PCIC_PCICCFG_G2PMEN(1)
| TX4927_PCIC_PCICCFG_G2PMEN(2)
| TX4927_PCIC_PCICCFG_G2PIOEN),
&pcicptr->pciccfg);
/* GB->PCI mappings */
__raw_writel((channel->io_resource->end - channel->io_resource->start)
>> 4,
&pcicptr->g2piomask);
____raw_writeq((channel->io_resource->start +
channel->io_map_base - IO_BASE) |
#ifdef __BIG_ENDIAN
TX4927_PCIC_G2PIOGBASE_ECHG
#else
TX4927_PCIC_G2PIOGBASE_BSDIS
#endif
, &pcicptr->g2piogbase);
____raw_writeq(channel->io_resource->start - channel->io_offset,
&pcicptr->g2piopbase);
for (i = 0; i < 3; i++) {
__raw_writel(0, &pcicptr->g2pmmask[i]);
____raw_writeq(0, &pcicptr->g2pmgbase[i]);
____raw_writeq(0, &pcicptr->g2pmpbase[i]);
}
if (channel->mem_resource->end) {
__raw_writel((channel->mem_resource->end
- channel->mem_resource->start) >> 4,
&pcicptr->g2pmmask[0]);
____raw_writeq(channel->mem_resource->start |
#ifdef __BIG_ENDIAN
TX4927_PCIC_G2PMnGBASE_ECHG
#else
TX4927_PCIC_G2PMnGBASE_BSDIS
#endif
, &pcicptr->g2pmgbase[0]);
____raw_writeq(channel->mem_resource->start -
channel->mem_offset,
&pcicptr->g2pmpbase[0]);
}
/* PCI->GB mappings (I/O 256B) */
__raw_writel(0, &pcicptr->p2giopbase); /* 256B */
____raw_writeq(0, &pcicptr->p2giogbase);
/* PCI->GB mappings (MEM 512MB (64MB on R1.x)) */
__raw_writel(0, &pcicptr->p2gm0plbase);
__raw_writel(0, &pcicptr->p2gm0pubase);
____raw_writeq(TX4927_PCIC_P2GMnGBASE_TMEMEN |
#ifdef __BIG_ENDIAN
TX4927_PCIC_P2GMnGBASE_TECHG
#else
TX4927_PCIC_P2GMnGBASE_TBSDIS
#endif
, &pcicptr->p2gmgbase[0]);
/* PCI->GB mappings (MEM 16MB) */
__raw_writel(0xffffffff, &pcicptr->p2gm1plbase);
__raw_writel(0xffffffff, &pcicptr->p2gm1pubase);
____raw_writeq(0, &pcicptr->p2gmgbase[1]);
/* PCI->GB mappings (MEM 1MB) */
__raw_writel(0xffffffff, &pcicptr->p2gm2pbase); /* 1MB */
____raw_writeq(0, &pcicptr->p2gmgbase[2]);
/* Clear all (including IRBER) except for GBWC */
__raw_writel((tx4927_pci_opts.gbwc << 16)
& TX4927_PCIC_PCICCFG_GBWC_MASK,
&pcicptr->pciccfg);
/* Enable Initiator Memory Space */
if (channel->mem_resource->end)
__raw_writel(__raw_readl(&pcicptr->pciccfg)
| TX4927_PCIC_PCICCFG_G2PMEN(0),
&pcicptr->pciccfg);
/* Enable Initiator I/O Space */
if (channel->io_resource->end)
__raw_writel(__raw_readl(&pcicptr->pciccfg)
| TX4927_PCIC_PCICCFG_G2PIOEN,
&pcicptr->pciccfg);
/* Enable Initiator Config */
__raw_writel(__raw_readl(&pcicptr->pciccfg)
| TX4927_PCIC_PCICCFG_ICAEN | TX4927_PCIC_PCICCFG_TCAR,
&pcicptr->pciccfg);
/* Do not use MEMMUL, MEMINF: YMFPCI card causes M_ABORT. */
__raw_writel(0, &pcicptr->pcicfg1);
__raw_writel((__raw_readl(&pcicptr->g2ptocnt) & ~0xffff)
| (tx4927_pci_opts.trdyto & 0xff)
| ((tx4927_pci_opts.retryto & 0xff) << 8),
&pcicptr->g2ptocnt);
/* Clear All Local Bus Status */
__raw_writel(TX4927_PCIC_PCICSTATUS_ALL, &pcicptr->pcicstatus);
/* Enable All Local Bus Interrupts */
__raw_writel(TX4927_PCIC_PCICSTATUS_ALL, &pcicptr->pcicmask);
/* Clear All Initiator Status */
__raw_writel(TX4927_PCIC_G2PSTATUS_ALL, &pcicptr->g2pstatus);
/* Enable All Initiator Interrupts */
__raw_writel(TX4927_PCIC_G2PSTATUS_ALL, &pcicptr->g2pmask);
/* Clear All PCI Status Error */
__raw_writel((__raw_readl(&pcicptr->pcistatus) & 0x0000ffff)
| (TX4927_PCIC_PCISTATUS_ALL << 16),
&pcicptr->pcistatus);
/* Enable All PCI Status Error Interrupts */
__raw_writel(TX4927_PCIC_PCISTATUS_ALL, &pcicptr->pcimask);
if (!extarb) {
/* Reset Bus Arbiter */
__raw_writel(TX4927_PCIC_PBACFG_RPBA, &pcicptr->pbacfg);
__raw_writel(0, &pcicptr->pbabm);
/* Enable Bus Arbiter */
__raw_writel(TX4927_PCIC_PBACFG_PBAEN, &pcicptr->pbacfg);
}
__raw_writel(PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY
| PCI_COMMAND_PARITY | PCI_COMMAND_SERR,
&pcicptr->pcistatus);
local_irq_restore(flags);
printk(KERN_DEBUG
"PCI: COMMAND=%04x,PCIMASK=%04x,"
"TRDYTO=%02x,RETRYTO=%02x,GBWC=%03x\n",
__raw_readl(&pcicptr->pcistatus) & 0xffff,
__raw_readl(&pcicptr->pcimask) & 0xffff,
__raw_readl(&pcicptr->g2ptocnt) & 0xff,
(__raw_readl(&pcicptr->g2ptocnt) & 0xff00) >> 8,
(__raw_readl(&pcicptr->pciccfg) >> 16) & 0xfff);
}
static void tx4927_report_pcic_status1(struct tx4927_pcic_reg __iomem *pcicptr)
{
__u16 pcistatus = (__u16)(__raw_readl(&pcicptr->pcistatus) >> 16);
__u32 g2pstatus = __raw_readl(&pcicptr->g2pstatus);
__u32 pcicstatus = __raw_readl(&pcicptr->pcicstatus);
static struct {
__u32 flag;
const char *str;
} pcistat_tbl[] = {
{ PCI_STATUS_DETECTED_PARITY, "DetectedParityError" },
{ PCI_STATUS_SIG_SYSTEM_ERROR, "SignaledSystemError" },
{ PCI_STATUS_REC_MASTER_ABORT, "ReceivedMasterAbort" },
{ PCI_STATUS_REC_TARGET_ABORT, "ReceivedTargetAbort" },
{ PCI_STATUS_SIG_TARGET_ABORT, "SignaledTargetAbort" },
{ PCI_STATUS_PARITY, "MasterParityError" },
}, g2pstat_tbl[] = {
{ TX4927_PCIC_G2PSTATUS_TTOE, "TIOE" },
{ TX4927_PCIC_G2PSTATUS_RTOE, "RTOE" },
}, pcicstat_tbl[] = {
{ TX4927_PCIC_PCICSTATUS_PME, "PME" },
{ TX4927_PCIC_PCICSTATUS_TLB, "TLB" },
{ TX4927_PCIC_PCICSTATUS_NIB, "NIB" },
{ TX4927_PCIC_PCICSTATUS_ZIB, "ZIB" },
{ TX4927_PCIC_PCICSTATUS_PERR, "PERR" },
{ TX4927_PCIC_PCICSTATUS_SERR, "SERR" },
{ TX4927_PCIC_PCICSTATUS_GBE, "GBE" },
{ TX4927_PCIC_PCICSTATUS_IWB, "IWB" },
};
int i, cont;
printk(KERN_ERR "");
if (pcistatus & TX4927_PCIC_PCISTATUS_ALL) {
printk(KERN_CONT "pcistat:%04x(", pcistatus);
for (i = 0, cont = 0; i < ARRAY_SIZE(pcistat_tbl); i++)
if (pcistatus & pcistat_tbl[i].flag)
printk(KERN_CONT "%s%s",
cont++ ? " " : "", pcistat_tbl[i].str);
printk(KERN_CONT ") ");
}
if (g2pstatus & TX4927_PCIC_G2PSTATUS_ALL) {
printk(KERN_CONT "g2pstatus:%08x(", g2pstatus);
for (i = 0, cont = 0; i < ARRAY_SIZE(g2pstat_tbl); i++)
if (g2pstatus & g2pstat_tbl[i].flag)
printk(KERN_CONT "%s%s",
cont++ ? " " : "", g2pstat_tbl[i].str);
printk(KERN_CONT ") ");
}
if (pcicstatus & TX4927_PCIC_PCICSTATUS_ALL) {
printk(KERN_CONT "pcicstatus:%08x(", pcicstatus);
for (i = 0, cont = 0; i < ARRAY_SIZE(pcicstat_tbl); i++)
if (pcicstatus & pcicstat_tbl[i].flag)
printk(KERN_CONT "%s%s",
cont++ ? " " : "", pcicstat_tbl[i].str);
printk(KERN_CONT ")");
}
printk(KERN_CONT "\n");
}
void tx4927_report_pcic_status(void)
{
int i;
for (i = 0; i < ARRAY_SIZE(pcicptrs); i++) {
if (pcicptrs[i].pcicptr)
tx4927_report_pcic_status1(pcicptrs[i].pcicptr);
}
}
static void tx4927_dump_pcic_settings1(struct tx4927_pcic_reg __iomem *pcicptr)
{
int i;
__u32 __iomem *preg = (__u32 __iomem *)pcicptr;
printk(KERN_INFO "tx4927 pcic (0x%p) settings:", pcicptr);
for (i = 0; i < sizeof(struct tx4927_pcic_reg); i += 4, preg++) {
if (i % 32 == 0) {
printk(KERN_CONT "\n");
printk(KERN_INFO "%04x:", i);
}
/* skip registers with side-effects */
if (i == offsetof(struct tx4927_pcic_reg, g2pintack)
|| i == offsetof(struct tx4927_pcic_reg, g2pspc)
|| i == offsetof(struct tx4927_pcic_reg, g2pcfgadrs)
|| i == offsetof(struct tx4927_pcic_reg, g2pcfgdata)) {
printk(KERN_CONT " XXXXXXXX");
continue;
}
printk(KERN_CONT " %08x", __raw_readl(preg));
}
printk(KERN_CONT "\n");
}
void tx4927_dump_pcic_settings(void)
{
int i;
for (i = 0; i < ARRAY_SIZE(pcicptrs); i++) {
if (pcicptrs[i].pcicptr)
tx4927_dump_pcic_settings1(pcicptrs[i].pcicptr);
}
}
irqreturn_t tx4927_pcierr_interrupt(int irq, void *dev_id)
{
struct pt_regs *regs = get_irq_regs();
struct tx4927_pcic_reg __iomem *pcicptr =
(struct tx4927_pcic_reg __iomem *)(unsigned long)dev_id;
if (txx9_pci_err_action != TXX9_PCI_ERR_IGNORE) {
printk(KERN_WARNING "PCIERR interrupt at 0x%0*lx\n",
(int)(2 * sizeof(unsigned long)), regs->cp0_epc);
tx4927_report_pcic_status1(pcicptr);
}
if (txx9_pci_err_action != TXX9_PCI_ERR_PANIC) {
/* clear all pci errors */
__raw_writel((__raw_readl(&pcicptr->pcistatus) & 0x0000ffff)
| (TX4927_PCIC_PCISTATUS_ALL << 16),
&pcicptr->pcistatus);
__raw_writel(TX4927_PCIC_G2PSTATUS_ALL, &pcicptr->g2pstatus);
__raw_writel(TX4927_PCIC_PBASTATUS_ALL, &pcicptr->pbastatus);
__raw_writel(TX4927_PCIC_PCICSTATUS_ALL, &pcicptr->pcicstatus);
return IRQ_HANDLED;
}
console_verbose();
tx4927_dump_pcic_settings1(pcicptr);
panic("PCI error.");
}
#ifdef CONFIG_TOSHIBA_FPCIB0
static void __init tx4927_quirk_slc90e66_bridge(struct pci_dev *dev)
{
struct tx4927_pcic_reg __iomem *pcicptr = pci_bus_to_pcicptr(dev->bus);
if (!pcicptr)
return;
if (__raw_readl(&pcicptr->pbacfg) & TX4927_PCIC_PBACFG_PBAEN) {
/* Reset Bus Arbiter */
__raw_writel(TX4927_PCIC_PBACFG_RPBA, &pcicptr->pbacfg);
/*
* swap reqBP and reqXP (raise priority of SLC90E66).
* SLC90E66(PCI-ISA bridge) is connected to REQ2 on
* PCI Backplane board.
*/
__raw_writel(0x72543610, &pcicptr->pbareqport);
__raw_writel(0, &pcicptr->pbabm);
/* Use Fixed ParkMaster (required by SLC90E66) */
__raw_writel(TX4927_PCIC_PBACFG_FIXPA, &pcicptr->pbacfg);
/* Enable Bus Arbiter */
__raw_writel(TX4927_PCIC_PBACFG_FIXPA |
TX4927_PCIC_PBACFG_PBAEN,
&pcicptr->pbacfg);
printk(KERN_INFO "PCI: Use Fixed Park Master (REQPORT %08x)\n",
__raw_readl(&pcicptr->pbareqport));
}
}
#define PCI_DEVICE_ID_EFAR_SLC90E66_0 0x9460
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_EFAR, PCI_DEVICE_ID_EFAR_SLC90E66_0,
tx4927_quirk_slc90e66_bridge);
#endif
| gpl-2.0 |
neobuddy89/android_kernel_cyanogen_msm8916 | net/rose/rose_timer.c | 9622 | 4999 | /*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
* Copyright (C) 2002 Ralf Baechle DO1GRB (ralf@gnu.org)
*/
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/kernel.h>
#include <linux/jiffies.h>
#include <linux/timer.h>
#include <linux/string.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <net/ax25.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <net/tcp_states.h>
#include <linux/fcntl.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <net/rose.h>
static void rose_heartbeat_expiry(unsigned long);
static void rose_timer_expiry(unsigned long);
static void rose_idletimer_expiry(unsigned long);
void rose_start_heartbeat(struct sock *sk)
{
del_timer(&sk->sk_timer);
sk->sk_timer.data = (unsigned long)sk;
sk->sk_timer.function = &rose_heartbeat_expiry;
sk->sk_timer.expires = jiffies + 5 * HZ;
add_timer(&sk->sk_timer);
}
void rose_start_t1timer(struct sock *sk)
{
struct rose_sock *rose = rose_sk(sk);
del_timer(&rose->timer);
rose->timer.data = (unsigned long)sk;
rose->timer.function = &rose_timer_expiry;
rose->timer.expires = jiffies + rose->t1;
add_timer(&rose->timer);
}
void rose_start_t2timer(struct sock *sk)
{
struct rose_sock *rose = rose_sk(sk);
del_timer(&rose->timer);
rose->timer.data = (unsigned long)sk;
rose->timer.function = &rose_timer_expiry;
rose->timer.expires = jiffies + rose->t2;
add_timer(&rose->timer);
}
void rose_start_t3timer(struct sock *sk)
{
struct rose_sock *rose = rose_sk(sk);
del_timer(&rose->timer);
rose->timer.data = (unsigned long)sk;
rose->timer.function = &rose_timer_expiry;
rose->timer.expires = jiffies + rose->t3;
add_timer(&rose->timer);
}
void rose_start_hbtimer(struct sock *sk)
{
struct rose_sock *rose = rose_sk(sk);
del_timer(&rose->timer);
rose->timer.data = (unsigned long)sk;
rose->timer.function = &rose_timer_expiry;
rose->timer.expires = jiffies + rose->hb;
add_timer(&rose->timer);
}
void rose_start_idletimer(struct sock *sk)
{
struct rose_sock *rose = rose_sk(sk);
del_timer(&rose->idletimer);
if (rose->idle > 0) {
rose->idletimer.data = (unsigned long)sk;
rose->idletimer.function = &rose_idletimer_expiry;
rose->idletimer.expires = jiffies + rose->idle;
add_timer(&rose->idletimer);
}
}
void rose_stop_heartbeat(struct sock *sk)
{
del_timer(&sk->sk_timer);
}
void rose_stop_timer(struct sock *sk)
{
del_timer(&rose_sk(sk)->timer);
}
void rose_stop_idletimer(struct sock *sk)
{
del_timer(&rose_sk(sk)->idletimer);
}
static void rose_heartbeat_expiry(unsigned long param)
{
struct sock *sk = (struct sock *)param;
struct rose_sock *rose = rose_sk(sk);
bh_lock_sock(sk);
switch (rose->state) {
case ROSE_STATE_0:
/* Magic here: If we listen() and a new link dies before it
is accepted() it isn't 'dead' so doesn't get removed. */
if (sock_flag(sk, SOCK_DESTROY) ||
(sk->sk_state == TCP_LISTEN && sock_flag(sk, SOCK_DEAD))) {
bh_unlock_sock(sk);
rose_destroy_socket(sk);
return;
}
break;
case ROSE_STATE_3:
/*
* Check for the state of the receive buffer.
*/
if (atomic_read(&sk->sk_rmem_alloc) < (sk->sk_rcvbuf / 2) &&
(rose->condition & ROSE_COND_OWN_RX_BUSY)) {
rose->condition &= ~ROSE_COND_OWN_RX_BUSY;
rose->condition &= ~ROSE_COND_ACK_PENDING;
rose->vl = rose->vr;
rose_write_internal(sk, ROSE_RR);
rose_stop_timer(sk); /* HB */
break;
}
break;
}
rose_start_heartbeat(sk);
bh_unlock_sock(sk);
}
static void rose_timer_expiry(unsigned long param)
{
struct sock *sk = (struct sock *)param;
struct rose_sock *rose = rose_sk(sk);
bh_lock_sock(sk);
switch (rose->state) {
case ROSE_STATE_1: /* T1 */
case ROSE_STATE_4: /* T2 */
rose_write_internal(sk, ROSE_CLEAR_REQUEST);
rose->state = ROSE_STATE_2;
rose_start_t3timer(sk);
break;
case ROSE_STATE_2: /* T3 */
rose->neighbour->use--;
rose_disconnect(sk, ETIMEDOUT, -1, -1);
break;
case ROSE_STATE_3: /* HB */
if (rose->condition & ROSE_COND_ACK_PENDING) {
rose->condition &= ~ROSE_COND_ACK_PENDING;
rose_enquiry_response(sk);
}
break;
}
bh_unlock_sock(sk);
}
static void rose_idletimer_expiry(unsigned long param)
{
struct sock *sk = (struct sock *)param;
bh_lock_sock(sk);
rose_clear_queues(sk);
rose_write_internal(sk, ROSE_CLEAR_REQUEST);
rose_sk(sk)->state = ROSE_STATE_2;
rose_start_t3timer(sk);
sk->sk_state = TCP_CLOSE;
sk->sk_err = 0;
sk->sk_shutdown |= SEND_SHUTDOWN;
if (!sock_flag(sk, SOCK_DEAD)) {
sk->sk_state_change(sk);
sock_set_flag(sk, SOCK_DEAD);
}
bh_unlock_sock(sk);
}
| gpl-2.0 |
cnauman/linux | arch/cris/arch-v32/mach-a3/arbiter.c | 9878 | 17688 | /*
* Memory arbiter functions. Allocates bandwidth through the
* arbiter and sets up arbiter breakpoints.
*
* The algorithm first assigns slots to the clients that has specified
* bandwidth (e.g. ethernet) and then the remaining slots are divided
* on all the active clients.
*
* Copyright (c) 2004-2007 Axis Communications AB.
*
* The artpec-3 has two arbiters. The memory hierarchy looks like this:
*
*
* CPU DMAs
* | |
* | |
* -------------- ------------------
* | foo arbiter|----| Internal memory|
* -------------- ------------------
* |
* --------------
* | L2 cache |
* --------------
* |
* h264 etc |
* | |
* | |
* --------------
* | bar arbiter|
* --------------
* |
* ---------
* | SDRAM |
* ---------
*
*/
#include <hwregs/reg_map.h>
#include <hwregs/reg_rdwr.h>
#include <hwregs/marb_foo_defs.h>
#include <hwregs/marb_bar_defs.h>
#include <arbiter.h>
#include <hwregs/intr_vect.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/signal.h>
#include <linux/errno.h>
#include <linux/spinlock.h>
#include <asm/io.h>
#include <asm/irq_regs.h>
#define D(x)
struct crisv32_watch_entry {
unsigned long instance;
watch_callback *cb;
unsigned long start;
unsigned long end;
int used;
};
#define NUMBER_OF_BP 4
#define SDRAM_BANDWIDTH 400000000
#define INTMEM_BANDWIDTH 400000000
#define NBR_OF_SLOTS 64
#define NBR_OF_REGIONS 2
#define NBR_OF_CLIENTS 15
#define ARBITERS 2
#define UNASSIGNED 100
struct arbiter {
unsigned long instance;
int nbr_regions;
int nbr_clients;
int requested_slots[NBR_OF_REGIONS][NBR_OF_CLIENTS];
int active_clients[NBR_OF_REGIONS][NBR_OF_CLIENTS];
};
static struct crisv32_watch_entry watches[ARBITERS][NUMBER_OF_BP] =
{
{
{regi_marb_foo_bp0},
{regi_marb_foo_bp1},
{regi_marb_foo_bp2},
{regi_marb_foo_bp3}
},
{
{regi_marb_bar_bp0},
{regi_marb_bar_bp1},
{regi_marb_bar_bp2},
{regi_marb_bar_bp3}
}
};
struct arbiter arbiters[ARBITERS] =
{
{ /* L2 cache arbiter */
.instance = regi_marb_foo,
.nbr_regions = 2,
.nbr_clients = 15
},
{ /* DDR2 arbiter */
.instance = regi_marb_bar,
.nbr_regions = 1,
.nbr_clients = 9
}
};
static int max_bandwidth[NBR_OF_REGIONS] = {SDRAM_BANDWIDTH, INTMEM_BANDWIDTH};
DEFINE_SPINLOCK(arbiter_lock);
static irqreturn_t
crisv32_foo_arbiter_irq(int irq, void *dev_id);
static irqreturn_t
crisv32_bar_arbiter_irq(int irq, void *dev_id);
/*
* "I'm the arbiter, I know the score.
* From square one I'll be watching all 64."
* (memory arbiter slots, that is)
*
* Or in other words:
* Program the memory arbiter slots for "region" according to what's
* in requested_slots[] and active_clients[], while minimizing
* latency. A caller may pass a non-zero positive amount for
* "unused_slots", which must then be the unallocated, remaining
* number of slots, free to hand out to any client.
*/
static void crisv32_arbiter_config(int arbiter, int region, int unused_slots)
{
int slot;
int client;
int interval = 0;
/*
* This vector corresponds to the hardware arbiter slots (see
* the hardware documentation for semantics). We initialize
* each slot with a suitable sentinel value outside the valid
* range {0 .. NBR_OF_CLIENTS - 1} and replace them with
* client indexes. Then it's fed to the hardware.
*/
s8 val[NBR_OF_SLOTS];
for (slot = 0; slot < NBR_OF_SLOTS; slot++)
val[slot] = -1;
for (client = 0; client < arbiters[arbiter].nbr_clients; client++) {
int pos;
/* Allocate the requested non-zero number of slots, but
* also give clients with zero-requests one slot each
* while stocks last. We do the latter here, in client
* order. This makes sure zero-request clients are the
* first to get to any spare slots, else those slots
* could, when bandwidth is allocated close to the limit,
* all be allocated to low-index non-zero-request clients
* in the default-fill loop below. Another positive but
* secondary effect is a somewhat better spread of the
* zero-bandwidth clients in the vector, avoiding some of
* the latency that could otherwise be caused by the
* partitioning of non-zero-bandwidth clients at low
* indexes and zero-bandwidth clients at high
* indexes. (Note that this spreading can only affect the
* unallocated bandwidth.) All the above only matters for
* memory-intensive situations, of course.
*/
if (!arbiters[arbiter].requested_slots[region][client]) {
/*
* Skip inactive clients. Also skip zero-slot
* allocations in this pass when there are no known
* free slots.
*/
if (!arbiters[arbiter].active_clients[region][client] ||
unused_slots <= 0)
continue;
unused_slots--;
/* Only allocate one slot for this client. */
interval = NBR_OF_SLOTS;
} else
interval = NBR_OF_SLOTS /
arbiters[arbiter].requested_slots[region][client];
pos = 0;
while (pos < NBR_OF_SLOTS) {
if (val[pos] >= 0)
pos++;
else {
val[pos] = client;
pos += interval;
}
}
}
client = 0;
for (slot = 0; slot < NBR_OF_SLOTS; slot++) {
/*
* Allocate remaining slots in round-robin
* client-number order for active clients. For this
* pass, we ignore requested bandwidth and previous
* allocations.
*/
if (val[slot] < 0) {
int first = client;
while (!arbiters[arbiter].active_clients[region][client]) {
client = (client + 1) %
arbiters[arbiter].nbr_clients;
if (client == first)
break;
}
val[slot] = client;
client = (client + 1) % arbiters[arbiter].nbr_clients;
}
if (arbiter == 0) {
if (region == EXT_REGION)
REG_WR_INT_VECT(marb_foo, regi_marb_foo,
rw_l2_slots, slot, val[slot]);
else if (region == INT_REGION)
REG_WR_INT_VECT(marb_foo, regi_marb_foo,
rw_intm_slots, slot, val[slot]);
} else {
REG_WR_INT_VECT(marb_bar, regi_marb_bar,
rw_ddr2_slots, slot, val[slot]);
}
}
}
extern char _stext, _etext;
static void crisv32_arbiter_init(void)
{
static int initialized;
if (initialized)
return;
initialized = 1;
/*
* CPU caches are always set to active, but with zero
* bandwidth allocated. It should be ok to allocate zero
* bandwidth for the caches, because DMA for other channels
* will supposedly finish, once their programmed amount is
* done, and then the caches will get access according to the
* "fixed scheme" for unclaimed slots. Though, if for some
* use-case somewhere, there's a maximum CPU latency for
* e.g. some interrupt, we have to start allocating specific
* bandwidth for the CPU caches too.
*/
arbiters[0].active_clients[EXT_REGION][11] = 1;
arbiters[0].active_clients[EXT_REGION][12] = 1;
crisv32_arbiter_config(0, EXT_REGION, 0);
crisv32_arbiter_config(0, INT_REGION, 0);
crisv32_arbiter_config(1, EXT_REGION, 0);
if (request_irq(MEMARB_FOO_INTR_VECT, crisv32_foo_arbiter_irq,
IRQF_DISABLED, "arbiter", NULL))
printk(KERN_ERR "Couldn't allocate arbiter IRQ\n");
if (request_irq(MEMARB_BAR_INTR_VECT, crisv32_bar_arbiter_irq,
IRQF_DISABLED, "arbiter", NULL))
printk(KERN_ERR "Couldn't allocate arbiter IRQ\n");
#ifndef CONFIG_ETRAX_KGDB
/* Global watch for writes to kernel text segment. */
crisv32_arbiter_watch(virt_to_phys(&_stext), &_etext - &_stext,
MARB_CLIENTS(arbiter_all_clients, arbiter_bar_all_clients),
arbiter_all_write, NULL);
#endif
/* Set up max burst sizes by default */
REG_WR_INT(marb_bar, regi_marb_bar, rw_h264_rd_burst, 3);
REG_WR_INT(marb_bar, regi_marb_bar, rw_h264_wr_burst, 3);
REG_WR_INT(marb_bar, regi_marb_bar, rw_ccd_burst, 3);
REG_WR_INT(marb_bar, regi_marb_bar, rw_vin_wr_burst, 3);
REG_WR_INT(marb_bar, regi_marb_bar, rw_vin_rd_burst, 3);
REG_WR_INT(marb_bar, regi_marb_bar, rw_sclr_rd_burst, 3);
REG_WR_INT(marb_bar, regi_marb_bar, rw_vout_burst, 3);
REG_WR_INT(marb_bar, regi_marb_bar, rw_sclr_fifo_burst, 3);
REG_WR_INT(marb_bar, regi_marb_bar, rw_l2cache_burst, 3);
}
int crisv32_arbiter_allocate_bandwidth(int client, int region,
unsigned long bandwidth)
{
int i;
int total_assigned = 0;
int total_clients = 0;
int req;
int arbiter = 0;
crisv32_arbiter_init();
if (client & 0xffff0000) {
arbiter = 1;
client >>= 16;
}
for (i = 0; i < arbiters[arbiter].nbr_clients; i++) {
total_assigned += arbiters[arbiter].requested_slots[region][i];
total_clients += arbiters[arbiter].active_clients[region][i];
}
/* Avoid division by 0 for 0-bandwidth requests. */
req = bandwidth == 0
? 0 : NBR_OF_SLOTS / (max_bandwidth[region] / bandwidth);
/*
* We make sure that there are enough slots only for non-zero
* requests. Requesting 0 bandwidth *may* allocate slots,
* though if all bandwidth is allocated, such a client won't
* get any and will have to rely on getting memory access
* according to the fixed scheme that's the default when one
* of the slot-allocated clients doesn't claim their slot.
*/
if (total_assigned + req > NBR_OF_SLOTS)
return -ENOMEM;
arbiters[arbiter].active_clients[region][client] = 1;
arbiters[arbiter].requested_slots[region][client] = req;
crisv32_arbiter_config(arbiter, region, NBR_OF_SLOTS - total_assigned);
/* Propagate allocation from foo to bar */
if (arbiter == 0)
crisv32_arbiter_allocate_bandwidth(8 << 16,
EXT_REGION, bandwidth);
return 0;
}
/*
* Main entry for bandwidth deallocation.
*
* Strictly speaking, for a somewhat constant set of clients where
* each client gets a constant bandwidth and is just enabled or
* disabled (somewhat dynamically), no action is necessary here to
* avoid starvation for non-zero-allocation clients, as the allocated
* slots will just be unused. However, handing out those unused slots
* to active clients avoids needless latency if the "fixed scheme"
* would give unclaimed slots to an eager low-index client.
*/
void crisv32_arbiter_deallocate_bandwidth(int client, int region)
{
int i;
int total_assigned = 0;
int arbiter = 0;
if (client & 0xffff0000)
arbiter = 1;
arbiters[arbiter].requested_slots[region][client] = 0;
arbiters[arbiter].active_clients[region][client] = 0;
for (i = 0; i < arbiters[arbiter].nbr_clients; i++)
total_assigned += arbiters[arbiter].requested_slots[region][i];
crisv32_arbiter_config(arbiter, region, NBR_OF_SLOTS - total_assigned);
}
int crisv32_arbiter_watch(unsigned long start, unsigned long size,
unsigned long clients, unsigned long accesses,
watch_callback *cb)
{
int i;
int arbiter;
int used[2];
int ret = 0;
crisv32_arbiter_init();
if (start > 0x80000000) {
printk(KERN_ERR "Arbiter: %lX doesn't look like a "
"physical address", start);
return -EFAULT;
}
spin_lock(&arbiter_lock);
if (clients & 0xffff)
used[0] = 1;
if (clients & 0xffff0000)
used[1] = 1;
for (arbiter = 0; arbiter < ARBITERS; arbiter++) {
if (!used[arbiter])
continue;
for (i = 0; i < NUMBER_OF_BP; i++) {
if (!watches[arbiter][i].used) {
unsigned intr_mask;
if (arbiter)
intr_mask = REG_RD_INT(marb_bar,
regi_marb_bar, rw_intr_mask);
else
intr_mask = REG_RD_INT(marb_foo,
regi_marb_foo, rw_intr_mask);
watches[arbiter][i].used = 1;
watches[arbiter][i].start = start;
watches[arbiter][i].end = start + size;
watches[arbiter][i].cb = cb;
ret |= (i + 1) << (arbiter + 8);
if (arbiter) {
REG_WR_INT(marb_bar_bp,
watches[arbiter][i].instance,
rw_first_addr,
watches[arbiter][i].start);
REG_WR_INT(marb_bar_bp,
watches[arbiter][i].instance,
rw_last_addr,
watches[arbiter][i].end);
REG_WR_INT(marb_bar_bp,
watches[arbiter][i].instance,
rw_op, accesses);
REG_WR_INT(marb_bar_bp,
watches[arbiter][i].instance,
rw_clients,
clients & 0xffff);
} else {
REG_WR_INT(marb_foo_bp,
watches[arbiter][i].instance,
rw_first_addr,
watches[arbiter][i].start);
REG_WR_INT(marb_foo_bp,
watches[arbiter][i].instance,
rw_last_addr,
watches[arbiter][i].end);
REG_WR_INT(marb_foo_bp,
watches[arbiter][i].instance,
rw_op, accesses);
REG_WR_INT(marb_foo_bp,
watches[arbiter][i].instance,
rw_clients, clients >> 16);
}
if (i == 0)
intr_mask |= 1;
else if (i == 1)
intr_mask |= 2;
else if (i == 2)
intr_mask |= 4;
else if (i == 3)
intr_mask |= 8;
if (arbiter)
REG_WR_INT(marb_bar, regi_marb_bar,
rw_intr_mask, intr_mask);
else
REG_WR_INT(marb_foo, regi_marb_foo,
rw_intr_mask, intr_mask);
spin_unlock(&arbiter_lock);
break;
}
}
}
spin_unlock(&arbiter_lock);
if (ret)
return ret;
else
return -ENOMEM;
}
int crisv32_arbiter_unwatch(int id)
{
int arbiter;
int intr_mask;
crisv32_arbiter_init();
spin_lock(&arbiter_lock);
for (arbiter = 0; arbiter < ARBITERS; arbiter++) {
int id2;
if (arbiter)
intr_mask = REG_RD_INT(marb_bar, regi_marb_bar,
rw_intr_mask);
else
intr_mask = REG_RD_INT(marb_foo, regi_marb_foo,
rw_intr_mask);
id2 = (id & (0xff << (arbiter + 8))) >> (arbiter + 8);
if (id2 == 0)
continue;
id2--;
if ((id2 >= NUMBER_OF_BP) || (!watches[arbiter][id2].used)) {
spin_unlock(&arbiter_lock);
return -EINVAL;
}
memset(&watches[arbiter][id2], 0,
sizeof(struct crisv32_watch_entry));
if (id2 == 0)
intr_mask &= ~1;
else if (id2 == 1)
intr_mask &= ~2;
else if (id2 == 2)
intr_mask &= ~4;
else if (id2 == 3)
intr_mask &= ~8;
if (arbiter)
REG_WR_INT(marb_bar, regi_marb_bar, rw_intr_mask,
intr_mask);
else
REG_WR_INT(marb_foo, regi_marb_foo, rw_intr_mask,
intr_mask);
}
spin_unlock(&arbiter_lock);
return 0;
}
extern void show_registers(struct pt_regs *regs);
static irqreturn_t
crisv32_foo_arbiter_irq(int irq, void *dev_id)
{
reg_marb_foo_r_masked_intr masked_intr =
REG_RD(marb_foo, regi_marb_foo, r_masked_intr);
reg_marb_foo_bp_r_brk_clients r_clients;
reg_marb_foo_bp_r_brk_addr r_addr;
reg_marb_foo_bp_r_brk_op r_op;
reg_marb_foo_bp_r_brk_first_client r_first;
reg_marb_foo_bp_r_brk_size r_size;
reg_marb_foo_bp_rw_ack ack = {0};
reg_marb_foo_rw_ack_intr ack_intr = {
.bp0 = 1, .bp1 = 1, .bp2 = 1, .bp3 = 1
};
struct crisv32_watch_entry *watch;
unsigned arbiter = (unsigned)dev_id;
masked_intr = REG_RD(marb_foo, regi_marb_foo, r_masked_intr);
if (masked_intr.bp0)
watch = &watches[arbiter][0];
else if (masked_intr.bp1)
watch = &watches[arbiter][1];
else if (masked_intr.bp2)
watch = &watches[arbiter][2];
else if (masked_intr.bp3)
watch = &watches[arbiter][3];
else
return IRQ_NONE;
/* Retrieve all useful information and print it. */
r_clients = REG_RD(marb_foo_bp, watch->instance, r_brk_clients);
r_addr = REG_RD(marb_foo_bp, watch->instance, r_brk_addr);
r_op = REG_RD(marb_foo_bp, watch->instance, r_brk_op);
r_first = REG_RD(marb_foo_bp, watch->instance, r_brk_first_client);
r_size = REG_RD(marb_foo_bp, watch->instance, r_brk_size);
printk(KERN_DEBUG "Arbiter IRQ\n");
printk(KERN_DEBUG "Clients %X addr %X op %X first %X size %X\n",
REG_TYPE_CONV(int, reg_marb_foo_bp_r_brk_clients, r_clients),
REG_TYPE_CONV(int, reg_marb_foo_bp_r_brk_addr, r_addr),
REG_TYPE_CONV(int, reg_marb_foo_bp_r_brk_op, r_op),
REG_TYPE_CONV(int, reg_marb_foo_bp_r_brk_first_client, r_first),
REG_TYPE_CONV(int, reg_marb_foo_bp_r_brk_size, r_size));
REG_WR(marb_foo_bp, watch->instance, rw_ack, ack);
REG_WR(marb_foo, regi_marb_foo, rw_ack_intr, ack_intr);
printk(KERN_DEBUG "IRQ occurred at %X\n", (unsigned)get_irq_regs());
if (watch->cb)
watch->cb();
return IRQ_HANDLED;
}
static irqreturn_t
crisv32_bar_arbiter_irq(int irq, void *dev_id)
{
reg_marb_bar_r_masked_intr masked_intr =
REG_RD(marb_bar, regi_marb_bar, r_masked_intr);
reg_marb_bar_bp_r_brk_clients r_clients;
reg_marb_bar_bp_r_brk_addr r_addr;
reg_marb_bar_bp_r_brk_op r_op;
reg_marb_bar_bp_r_brk_first_client r_first;
reg_marb_bar_bp_r_brk_size r_size;
reg_marb_bar_bp_rw_ack ack = {0};
reg_marb_bar_rw_ack_intr ack_intr = {
.bp0 = 1, .bp1 = 1, .bp2 = 1, .bp3 = 1
};
struct crisv32_watch_entry *watch;
unsigned arbiter = (unsigned)dev_id;
masked_intr = REG_RD(marb_bar, regi_marb_bar, r_masked_intr);
if (masked_intr.bp0)
watch = &watches[arbiter][0];
else if (masked_intr.bp1)
watch = &watches[arbiter][1];
else if (masked_intr.bp2)
watch = &watches[arbiter][2];
else if (masked_intr.bp3)
watch = &watches[arbiter][3];
else
return IRQ_NONE;
/* Retrieve all useful information and print it. */
r_clients = REG_RD(marb_bar_bp, watch->instance, r_brk_clients);
r_addr = REG_RD(marb_bar_bp, watch->instance, r_brk_addr);
r_op = REG_RD(marb_bar_bp, watch->instance, r_brk_op);
r_first = REG_RD(marb_bar_bp, watch->instance, r_brk_first_client);
r_size = REG_RD(marb_bar_bp, watch->instance, r_brk_size);
printk(KERN_DEBUG "Arbiter IRQ\n");
printk(KERN_DEBUG "Clients %X addr %X op %X first %X size %X\n",
REG_TYPE_CONV(int, reg_marb_bar_bp_r_brk_clients, r_clients),
REG_TYPE_CONV(int, reg_marb_bar_bp_r_brk_addr, r_addr),
REG_TYPE_CONV(int, reg_marb_bar_bp_r_brk_op, r_op),
REG_TYPE_CONV(int, reg_marb_bar_bp_r_brk_first_client, r_first),
REG_TYPE_CONV(int, reg_marb_bar_bp_r_brk_size, r_size));
REG_WR(marb_bar_bp, watch->instance, rw_ack, ack);
REG_WR(marb_bar, regi_marb_bar, rw_ack_intr, ack_intr);
printk(KERN_DEBUG "IRQ occurred at %X\n", (unsigned)get_irq_regs()->erp);
if (watch->cb)
watch->cb();
return IRQ_HANDLED;
}
| gpl-2.0 |
Sony-Kitakami/android_kernel_sony | kernel/power/block_io.c | 10646 | 2428 | /*
* This file provides functions for block I/O operations on swap/file.
*
* Copyright (C) 1998,2001-2005 Pavel Machek <pavel@ucw.cz>
* Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
*
* This file is released under the GPLv2.
*/
#include <linux/bio.h>
#include <linux/kernel.h>
#include <linux/pagemap.h>
#include <linux/swap.h>
#include "power.h"
/**
* submit - submit BIO request.
* @rw: READ or WRITE.
* @off physical offset of page.
* @page: page we're reading or writing.
* @bio_chain: list of pending biod (for async reading)
*
* Straight from the textbook - allocate and initialize the bio.
* If we're reading, make sure the page is marked as dirty.
* Then submit it and, if @bio_chain == NULL, wait.
*/
static int submit(int rw, struct block_device *bdev, sector_t sector,
struct page *page, struct bio **bio_chain)
{
const int bio_rw = rw | REQ_SYNC;
struct bio *bio;
bio = bio_alloc(__GFP_WAIT | __GFP_HIGH, 1);
bio->bi_sector = sector;
bio->bi_bdev = bdev;
bio->bi_end_io = end_swap_bio_read;
if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
printk(KERN_ERR "PM: Adding page to bio failed at %llu\n",
(unsigned long long)sector);
bio_put(bio);
return -EFAULT;
}
lock_page(page);
bio_get(bio);
if (bio_chain == NULL) {
submit_bio(bio_rw, bio);
wait_on_page_locked(page);
if (rw == READ)
bio_set_pages_dirty(bio);
bio_put(bio);
} else {
if (rw == READ)
get_page(page); /* These pages are freed later */
bio->bi_private = *bio_chain;
*bio_chain = bio;
submit_bio(bio_rw, bio);
}
return 0;
}
int hib_bio_read_page(pgoff_t page_off, void *addr, struct bio **bio_chain)
{
return submit(READ, hib_resume_bdev, page_off * (PAGE_SIZE >> 9),
virt_to_page(addr), bio_chain);
}
int hib_bio_write_page(pgoff_t page_off, void *addr, struct bio **bio_chain)
{
return submit(WRITE, hib_resume_bdev, page_off * (PAGE_SIZE >> 9),
virt_to_page(addr), bio_chain);
}
int hib_wait_on_bio_chain(struct bio **bio_chain)
{
struct bio *bio;
struct bio *next_bio;
int ret = 0;
if (bio_chain == NULL)
return 0;
bio = *bio_chain;
if (bio == NULL)
return 0;
while (bio) {
struct page *page;
next_bio = bio->bi_private;
page = bio->bi_io_vec[0].bv_page;
wait_on_page_locked(page);
if (!PageUptodate(page) || PageError(page))
ret = -EIO;
put_page(page);
bio_put(bio);
bio = next_bio;
}
*bio_chain = NULL;
return ret;
}
| gpl-2.0 |
cmetcalf-tilera/linux-tile | net/mac80211/mesh_pathtbl.c | 151 | 30033 | /*
* Copyright (c) 2008, 2009 open80211s Ltd.
* Author: Luis Carlos Cobo <luisca@cozybit.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/etherdevice.h>
#include <linux/list.h>
#include <linux/random.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/string.h>
#include <net/mac80211.h>
#include "wme.h"
#include "ieee80211_i.h"
#include "mesh.h"
#ifdef CONFIG_MAC80211_VERBOSE_MPATH_DEBUG
#define mpath_dbg(fmt, args...) printk(KERN_DEBUG fmt, ##args)
#else
#define mpath_dbg(fmt, args...) do { (void)(0); } while (0)
#endif
/* There will be initially 2^INIT_PATHS_SIZE_ORDER buckets */
#define INIT_PATHS_SIZE_ORDER 2
/* Keep the mean chain length below this constant */
#define MEAN_CHAIN_LEN 2
#define MPATH_EXPIRED(mpath) ((mpath->flags & MESH_PATH_ACTIVE) && \
time_after(jiffies, mpath->exp_time) && \
!(mpath->flags & MESH_PATH_FIXED))
struct mpath_node {
struct hlist_node list;
struct rcu_head rcu;
/* This indirection allows two different tables to point to the same
* mesh_path structure, useful when resizing
*/
struct mesh_path *mpath;
};
static struct mesh_table __rcu *mesh_paths;
static struct mesh_table __rcu *mpp_paths; /* Store paths for MPP&MAP */
int mesh_paths_generation;
/* This lock will have the grow table function as writer and add / delete nodes
* as readers. RCU provides sufficient protection only when reading the table
* (i.e. doing lookups). Adding or adding or removing nodes requires we take
* the read lock or we risk operating on an old table. The write lock is only
* needed when modifying the number of buckets a table.
*/
static DEFINE_RWLOCK(pathtbl_resize_lock);
static inline struct mesh_table *resize_dereference_mesh_paths(void)
{
return rcu_dereference_protected(mesh_paths,
lockdep_is_held(&pathtbl_resize_lock));
}
static inline struct mesh_table *resize_dereference_mpp_paths(void)
{
return rcu_dereference_protected(mpp_paths,
lockdep_is_held(&pathtbl_resize_lock));
}
/*
* CAREFUL -- "tbl" must not be an expression,
* in particular not an rcu_dereference(), since
* it's used twice. So it is illegal to do
* for_each_mesh_entry(rcu_dereference(...), ...)
*/
#define for_each_mesh_entry(tbl, p, node, i) \
for (i = 0; i <= tbl->hash_mask; i++) \
hlist_for_each_entry_rcu(node, p, &tbl->hash_buckets[i], list)
static struct mesh_table *mesh_table_alloc(int size_order)
{
int i;
struct mesh_table *newtbl;
newtbl = kmalloc(sizeof(struct mesh_table), GFP_ATOMIC);
if (!newtbl)
return NULL;
newtbl->hash_buckets = kzalloc(sizeof(struct hlist_head) *
(1 << size_order), GFP_ATOMIC);
if (!newtbl->hash_buckets) {
kfree(newtbl);
return NULL;
}
newtbl->hashwlock = kmalloc(sizeof(spinlock_t) *
(1 << size_order), GFP_ATOMIC);
if (!newtbl->hashwlock) {
kfree(newtbl->hash_buckets);
kfree(newtbl);
return NULL;
}
newtbl->size_order = size_order;
newtbl->hash_mask = (1 << size_order) - 1;
atomic_set(&newtbl->entries, 0);
get_random_bytes(&newtbl->hash_rnd,
sizeof(newtbl->hash_rnd));
for (i = 0; i <= newtbl->hash_mask; i++)
spin_lock_init(&newtbl->hashwlock[i]);
spin_lock_init(&newtbl->gates_lock);
return newtbl;
}
static void __mesh_table_free(struct mesh_table *tbl)
{
kfree(tbl->hash_buckets);
kfree(tbl->hashwlock);
kfree(tbl);
}
static void mesh_table_free(struct mesh_table *tbl, bool free_leafs)
{
struct hlist_head *mesh_hash;
struct hlist_node *p, *q;
struct mpath_node *gate;
int i;
mesh_hash = tbl->hash_buckets;
for (i = 0; i <= tbl->hash_mask; i++) {
spin_lock_bh(&tbl->hashwlock[i]);
hlist_for_each_safe(p, q, &mesh_hash[i]) {
tbl->free_node(p, free_leafs);
atomic_dec(&tbl->entries);
}
spin_unlock_bh(&tbl->hashwlock[i]);
}
if (free_leafs) {
spin_lock_bh(&tbl->gates_lock);
hlist_for_each_entry_safe(gate, p, q,
tbl->known_gates, list) {
hlist_del(&gate->list);
kfree(gate);
}
kfree(tbl->known_gates);
spin_unlock_bh(&tbl->gates_lock);
}
__mesh_table_free(tbl);
}
static int mesh_table_grow(struct mesh_table *oldtbl,
struct mesh_table *newtbl)
{
struct hlist_head *oldhash;
struct hlist_node *p, *q;
int i;
if (atomic_read(&oldtbl->entries)
< oldtbl->mean_chain_len * (oldtbl->hash_mask + 1))
return -EAGAIN;
newtbl->free_node = oldtbl->free_node;
newtbl->mean_chain_len = oldtbl->mean_chain_len;
newtbl->copy_node = oldtbl->copy_node;
newtbl->known_gates = oldtbl->known_gates;
atomic_set(&newtbl->entries, atomic_read(&oldtbl->entries));
oldhash = oldtbl->hash_buckets;
for (i = 0; i <= oldtbl->hash_mask; i++)
hlist_for_each(p, &oldhash[i])
if (oldtbl->copy_node(p, newtbl) < 0)
goto errcopy;
return 0;
errcopy:
for (i = 0; i <= newtbl->hash_mask; i++) {
hlist_for_each_safe(p, q, &newtbl->hash_buckets[i])
oldtbl->free_node(p, 0);
}
return -ENOMEM;
}
static u32 mesh_table_hash(u8 *addr, struct ieee80211_sub_if_data *sdata,
struct mesh_table *tbl)
{
/* Use last four bytes of hw addr and interface index as hash index */
return jhash_2words(*(u32 *)(addr+2), sdata->dev->ifindex, tbl->hash_rnd)
& tbl->hash_mask;
}
/**
*
* mesh_path_assign_nexthop - update mesh path next hop
*
* @mpath: mesh path to update
* @sta: next hop to assign
*
* Locking: mpath->state_lock must be held when calling this function
*/
void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta)
{
struct sk_buff *skb;
struct ieee80211_hdr *hdr;
struct sk_buff_head tmpq;
unsigned long flags;
rcu_assign_pointer(mpath->next_hop, sta);
__skb_queue_head_init(&tmpq);
spin_lock_irqsave(&mpath->frame_queue.lock, flags);
while ((skb = __skb_dequeue(&mpath->frame_queue)) != NULL) {
hdr = (struct ieee80211_hdr *) skb->data;
memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN);
memcpy(hdr->addr2, mpath->sdata->vif.addr, ETH_ALEN);
__skb_queue_tail(&tmpq, skb);
}
skb_queue_splice(&tmpq, &mpath->frame_queue);
spin_unlock_irqrestore(&mpath->frame_queue.lock, flags);
}
static void prepare_for_gate(struct sk_buff *skb, char *dst_addr,
struct mesh_path *gate_mpath)
{
struct ieee80211_hdr *hdr;
struct ieee80211s_hdr *mshdr;
int mesh_hdrlen, hdrlen;
char *next_hop;
hdr = (struct ieee80211_hdr *) skb->data;
hdrlen = ieee80211_hdrlen(hdr->frame_control);
mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
if (!(mshdr->flags & MESH_FLAGS_AE)) {
/* size of the fixed part of the mesh header */
mesh_hdrlen = 6;
/* make room for the two extended addresses */
skb_push(skb, 2 * ETH_ALEN);
memmove(skb->data, hdr, hdrlen + mesh_hdrlen);
hdr = (struct ieee80211_hdr *) skb->data;
/* we preserve the previous mesh header and only add
* the new addreses */
mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
mshdr->flags = MESH_FLAGS_AE_A5_A6;
memcpy(mshdr->eaddr1, hdr->addr3, ETH_ALEN);
memcpy(mshdr->eaddr2, hdr->addr4, ETH_ALEN);
}
/* update next hop */
hdr = (struct ieee80211_hdr *) skb->data;
rcu_read_lock();
next_hop = rcu_dereference(gate_mpath->next_hop)->sta.addr;
memcpy(hdr->addr1, next_hop, ETH_ALEN);
rcu_read_unlock();
memcpy(hdr->addr2, gate_mpath->sdata->vif.addr, ETH_ALEN);
memcpy(hdr->addr3, dst_addr, ETH_ALEN);
}
/**
*
* mesh_path_move_to_queue - Move or copy frames from one mpath queue to another
*
* This function is used to transfer or copy frames from an unresolved mpath to
* a gate mpath. The function also adds the Address Extension field and
* updates the next hop.
*
* If a frame already has an Address Extension field, only the next hop and
* destination addresses are updated.
*
* The gate mpath must be an active mpath with a valid mpath->next_hop.
*
* @mpath: An active mpath the frames will be sent to (i.e. the gate)
* @from_mpath: The failed mpath
* @copy: When true, copy all the frames to the new mpath queue. When false,
* move them.
*/
static void mesh_path_move_to_queue(struct mesh_path *gate_mpath,
struct mesh_path *from_mpath,
bool copy)
{
struct sk_buff *skb, *cp_skb = NULL;
struct sk_buff_head gateq, failq;
unsigned long flags;
int num_skbs;
BUG_ON(gate_mpath == from_mpath);
BUG_ON(!gate_mpath->next_hop);
__skb_queue_head_init(&gateq);
__skb_queue_head_init(&failq);
spin_lock_irqsave(&from_mpath->frame_queue.lock, flags);
skb_queue_splice_init(&from_mpath->frame_queue, &failq);
spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags);
num_skbs = skb_queue_len(&failq);
while (num_skbs--) {
skb = __skb_dequeue(&failq);
if (copy) {
cp_skb = skb_copy(skb, GFP_ATOMIC);
if (cp_skb)
__skb_queue_tail(&failq, cp_skb);
}
prepare_for_gate(skb, gate_mpath->dst, gate_mpath);
__skb_queue_tail(&gateq, skb);
}
spin_lock_irqsave(&gate_mpath->frame_queue.lock, flags);
skb_queue_splice(&gateq, &gate_mpath->frame_queue);
mpath_dbg("Mpath queue for gate %pM has %d frames\n",
gate_mpath->dst,
skb_queue_len(&gate_mpath->frame_queue));
spin_unlock_irqrestore(&gate_mpath->frame_queue.lock, flags);
if (!copy)
return;
spin_lock_irqsave(&from_mpath->frame_queue.lock, flags);
skb_queue_splice(&failq, &from_mpath->frame_queue);
spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags);
}
static struct mesh_path *path_lookup(struct mesh_table *tbl, u8 *dst,
struct ieee80211_sub_if_data *sdata)
{
struct mesh_path *mpath;
struct hlist_node *n;
struct hlist_head *bucket;
struct mpath_node *node;
bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)];
hlist_for_each_entry_rcu(node, n, bucket, list) {
mpath = node->mpath;
if (mpath->sdata == sdata &&
memcmp(dst, mpath->dst, ETH_ALEN) == 0) {
if (MPATH_EXPIRED(mpath)) {
spin_lock_bh(&mpath->state_lock);
mpath->flags &= ~MESH_PATH_ACTIVE;
spin_unlock_bh(&mpath->state_lock);
}
return mpath;
}
}
return NULL;
}
/**
* mesh_path_lookup - look up a path in the mesh path table
* @dst: hardware address (ETH_ALEN length) of destination
* @sdata: local subif
*
* Returns: pointer to the mesh path structure, or NULL if not found
*
* Locking: must be called within a read rcu section.
*/
struct mesh_path *mesh_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
{
return path_lookup(rcu_dereference(mesh_paths), dst, sdata);
}
struct mesh_path *mpp_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
{
return path_lookup(rcu_dereference(mpp_paths), dst, sdata);
}
/**
* mesh_path_lookup_by_idx - look up a path in the mesh path table by its index
* @idx: index
* @sdata: local subif, or NULL for all entries
*
* Returns: pointer to the mesh path structure, or NULL if not found.
*
* Locking: must be called within a read rcu section.
*/
struct mesh_path *mesh_path_lookup_by_idx(int idx, struct ieee80211_sub_if_data *sdata)
{
struct mesh_table *tbl = rcu_dereference(mesh_paths);
struct mpath_node *node;
struct hlist_node *p;
int i;
int j = 0;
for_each_mesh_entry(tbl, p, node, i) {
if (sdata && node->mpath->sdata != sdata)
continue;
if (j++ == idx) {
if (MPATH_EXPIRED(node->mpath)) {
spin_lock_bh(&node->mpath->state_lock);
node->mpath->flags &= ~MESH_PATH_ACTIVE;
spin_unlock_bh(&node->mpath->state_lock);
}
return node->mpath;
}
}
return NULL;
}
static void mesh_gate_node_reclaim(struct rcu_head *rp)
{
struct mpath_node *node = container_of(rp, struct mpath_node, rcu);
kfree(node);
}
/**
* mesh_path_add_gate - add the given mpath to a mesh gate to our path table
* @mpath: gate path to add to table
*/
int mesh_path_add_gate(struct mesh_path *mpath)
{
struct mesh_table *tbl;
struct mpath_node *gate, *new_gate;
struct hlist_node *n;
int err;
rcu_read_lock();
tbl = rcu_dereference(mesh_paths);
hlist_for_each_entry_rcu(gate, n, tbl->known_gates, list)
if (gate->mpath == mpath) {
err = -EEXIST;
goto err_rcu;
}
new_gate = kzalloc(sizeof(struct mpath_node), GFP_ATOMIC);
if (!new_gate) {
err = -ENOMEM;
goto err_rcu;
}
mpath->is_gate = true;
mpath->sdata->u.mesh.num_gates++;
new_gate->mpath = mpath;
spin_lock_bh(&tbl->gates_lock);
hlist_add_head_rcu(&new_gate->list, tbl->known_gates);
spin_unlock_bh(&tbl->gates_lock);
rcu_read_unlock();
mpath_dbg("Mesh path (%s): Recorded new gate: %pM. %d known gates\n",
mpath->sdata->name, mpath->dst,
mpath->sdata->u.mesh.num_gates);
return 0;
err_rcu:
rcu_read_unlock();
return err;
}
/**
* mesh_gate_del - remove a mesh gate from the list of known gates
* @tbl: table which holds our list of known gates
* @mpath: gate mpath
*
* Returns: 0 on success
*
* Locking: must be called inside rcu_read_lock() section
*/
static int mesh_gate_del(struct mesh_table *tbl, struct mesh_path *mpath)
{
struct mpath_node *gate;
struct hlist_node *p, *q;
hlist_for_each_entry_safe(gate, p, q, tbl->known_gates, list)
if (gate->mpath == mpath) {
spin_lock_bh(&tbl->gates_lock);
hlist_del_rcu(&gate->list);
call_rcu(&gate->rcu, mesh_gate_node_reclaim);
spin_unlock_bh(&tbl->gates_lock);
mpath->sdata->u.mesh.num_gates--;
mpath->is_gate = false;
mpath_dbg("Mesh path (%s): Deleted gate: %pM. "
"%d known gates\n", mpath->sdata->name,
mpath->dst, mpath->sdata->u.mesh.num_gates);
break;
}
return 0;
}
/**
* mesh_gate_num - number of gates known to this interface
* @sdata: subif data
*/
int mesh_gate_num(struct ieee80211_sub_if_data *sdata)
{
return sdata->u.mesh.num_gates;
}
/**
* mesh_path_add - allocate and add a new path to the mesh path table
* @addr: destination address of the path (ETH_ALEN length)
* @sdata: local subif
*
* Returns: 0 on success
*
* State: the initial state of the new path is set to 0
*/
int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
struct ieee80211_local *local = sdata->local;
struct mesh_table *tbl;
struct mesh_path *mpath, *new_mpath;
struct mpath_node *node, *new_node;
struct hlist_head *bucket;
struct hlist_node *n;
int grow = 0;
int err = 0;
u32 hash_idx;
if (memcmp(dst, sdata->vif.addr, ETH_ALEN) == 0)
/* never add ourselves as neighbours */
return -ENOTSUPP;
if (is_multicast_ether_addr(dst))
return -ENOTSUPP;
if (atomic_add_unless(&sdata->u.mesh.mpaths, 1, MESH_MAX_MPATHS) == 0)
return -ENOSPC;
err = -ENOMEM;
new_mpath = kzalloc(sizeof(struct mesh_path), GFP_ATOMIC);
if (!new_mpath)
goto err_path_alloc;
new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
if (!new_node)
goto err_node_alloc;
read_lock_bh(&pathtbl_resize_lock);
memcpy(new_mpath->dst, dst, ETH_ALEN);
new_mpath->sdata = sdata;
new_mpath->flags = 0;
skb_queue_head_init(&new_mpath->frame_queue);
new_node->mpath = new_mpath;
new_mpath->timer.data = (unsigned long) new_mpath;
new_mpath->timer.function = mesh_path_timer;
new_mpath->exp_time = jiffies;
spin_lock_init(&new_mpath->state_lock);
init_timer(&new_mpath->timer);
tbl = resize_dereference_mesh_paths();
hash_idx = mesh_table_hash(dst, sdata, tbl);
bucket = &tbl->hash_buckets[hash_idx];
spin_lock_bh(&tbl->hashwlock[hash_idx]);
err = -EEXIST;
hlist_for_each_entry(node, n, bucket, list) {
mpath = node->mpath;
if (mpath->sdata == sdata && memcmp(dst, mpath->dst, ETH_ALEN) == 0)
goto err_exists;
}
hlist_add_head_rcu(&new_node->list, bucket);
if (atomic_inc_return(&tbl->entries) >=
tbl->mean_chain_len * (tbl->hash_mask + 1))
grow = 1;
mesh_paths_generation++;
spin_unlock_bh(&tbl->hashwlock[hash_idx]);
read_unlock_bh(&pathtbl_resize_lock);
if (grow) {
set_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags);
ieee80211_queue_work(&local->hw, &sdata->work);
}
return 0;
err_exists:
spin_unlock_bh(&tbl->hashwlock[hash_idx]);
read_unlock_bh(&pathtbl_resize_lock);
kfree(new_node);
err_node_alloc:
kfree(new_mpath);
err_path_alloc:
atomic_dec(&sdata->u.mesh.mpaths);
return err;
}
static void mesh_table_free_rcu(struct rcu_head *rcu)
{
struct mesh_table *tbl = container_of(rcu, struct mesh_table, rcu_head);
mesh_table_free(tbl, false);
}
void mesh_mpath_table_grow(void)
{
struct mesh_table *oldtbl, *newtbl;
write_lock_bh(&pathtbl_resize_lock);
oldtbl = resize_dereference_mesh_paths();
newtbl = mesh_table_alloc(oldtbl->size_order + 1);
if (!newtbl)
goto out;
if (mesh_table_grow(oldtbl, newtbl) < 0) {
__mesh_table_free(newtbl);
goto out;
}
rcu_assign_pointer(mesh_paths, newtbl);
call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu);
out:
write_unlock_bh(&pathtbl_resize_lock);
}
void mesh_mpp_table_grow(void)
{
struct mesh_table *oldtbl, *newtbl;
write_lock_bh(&pathtbl_resize_lock);
oldtbl = resize_dereference_mpp_paths();
newtbl = mesh_table_alloc(oldtbl->size_order + 1);
if (!newtbl)
goto out;
if (mesh_table_grow(oldtbl, newtbl) < 0) {
__mesh_table_free(newtbl);
goto out;
}
rcu_assign_pointer(mpp_paths, newtbl);
call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu);
out:
write_unlock_bh(&pathtbl_resize_lock);
}
int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
struct ieee80211_local *local = sdata->local;
struct mesh_table *tbl;
struct mesh_path *mpath, *new_mpath;
struct mpath_node *node, *new_node;
struct hlist_head *bucket;
struct hlist_node *n;
int grow = 0;
int err = 0;
u32 hash_idx;
if (memcmp(dst, sdata->vif.addr, ETH_ALEN) == 0)
/* never add ourselves as neighbours */
return -ENOTSUPP;
if (is_multicast_ether_addr(dst))
return -ENOTSUPP;
err = -ENOMEM;
new_mpath = kzalloc(sizeof(struct mesh_path), GFP_ATOMIC);
if (!new_mpath)
goto err_path_alloc;
new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
if (!new_node)
goto err_node_alloc;
read_lock_bh(&pathtbl_resize_lock);
memcpy(new_mpath->dst, dst, ETH_ALEN);
memcpy(new_mpath->mpp, mpp, ETH_ALEN);
new_mpath->sdata = sdata;
new_mpath->flags = 0;
skb_queue_head_init(&new_mpath->frame_queue);
new_node->mpath = new_mpath;
init_timer(&new_mpath->timer);
new_mpath->exp_time = jiffies;
spin_lock_init(&new_mpath->state_lock);
tbl = resize_dereference_mpp_paths();
hash_idx = mesh_table_hash(dst, sdata, tbl);
bucket = &tbl->hash_buckets[hash_idx];
spin_lock_bh(&tbl->hashwlock[hash_idx]);
err = -EEXIST;
hlist_for_each_entry(node, n, bucket, list) {
mpath = node->mpath;
if (mpath->sdata == sdata && memcmp(dst, mpath->dst, ETH_ALEN) == 0)
goto err_exists;
}
hlist_add_head_rcu(&new_node->list, bucket);
if (atomic_inc_return(&tbl->entries) >=
tbl->mean_chain_len * (tbl->hash_mask + 1))
grow = 1;
spin_unlock_bh(&tbl->hashwlock[hash_idx]);
read_unlock_bh(&pathtbl_resize_lock);
if (grow) {
set_bit(MESH_WORK_GROW_MPP_TABLE, &ifmsh->wrkq_flags);
ieee80211_queue_work(&local->hw, &sdata->work);
}
return 0;
err_exists:
spin_unlock_bh(&tbl->hashwlock[hash_idx]);
read_unlock_bh(&pathtbl_resize_lock);
kfree(new_node);
err_node_alloc:
kfree(new_mpath);
err_path_alloc:
return err;
}
/**
* mesh_plink_broken - deactivates paths and sends perr when a link breaks
*
* @sta: broken peer link
*
* This function must be called from the rate control algorithm if enough
* delivery errors suggest that a peer link is no longer usable.
*/
void mesh_plink_broken(struct sta_info *sta)
{
struct mesh_table *tbl;
static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
struct mesh_path *mpath;
struct mpath_node *node;
struct hlist_node *p;
struct ieee80211_sub_if_data *sdata = sta->sdata;
int i;
__le16 reason = cpu_to_le16(WLAN_REASON_MESH_PATH_DEST_UNREACHABLE);
rcu_read_lock();
tbl = rcu_dereference(mesh_paths);
for_each_mesh_entry(tbl, p, node, i) {
mpath = node->mpath;
if (rcu_dereference(mpath->next_hop) == sta &&
mpath->flags & MESH_PATH_ACTIVE &&
!(mpath->flags & MESH_PATH_FIXED)) {
spin_lock_bh(&mpath->state_lock);
mpath->flags &= ~MESH_PATH_ACTIVE;
++mpath->sn;
spin_unlock_bh(&mpath->state_lock);
mesh_path_error_tx(sdata->u.mesh.mshcfg.element_ttl,
mpath->dst, cpu_to_le32(mpath->sn),
reason, bcast, sdata);
}
}
rcu_read_unlock();
}
static void mesh_path_node_reclaim(struct rcu_head *rp)
{
struct mpath_node *node = container_of(rp, struct mpath_node, rcu);
struct ieee80211_sub_if_data *sdata = node->mpath->sdata;
del_timer_sync(&node->mpath->timer);
atomic_dec(&sdata->u.mesh.mpaths);
kfree(node->mpath);
kfree(node);
}
/* needs to be called with the corresponding hashwlock taken */
static void __mesh_path_del(struct mesh_table *tbl, struct mpath_node *node)
{
struct mesh_path *mpath;
mpath = node->mpath;
spin_lock(&mpath->state_lock);
mpath->flags |= MESH_PATH_RESOLVING;
if (mpath->is_gate)
mesh_gate_del(tbl, mpath);
hlist_del_rcu(&node->list);
call_rcu(&node->rcu, mesh_path_node_reclaim);
spin_unlock(&mpath->state_lock);
atomic_dec(&tbl->entries);
}
/**
* mesh_path_flush_by_nexthop - Deletes mesh paths if their next hop matches
*
* @sta - mesh peer to match
*
* RCU notes: this function is called when a mesh plink transitions from
* PLINK_ESTAB to any other state, since PLINK_ESTAB state is the only one that
* allows path creation. This will happen before the sta can be freed (because
* sta_info_destroy() calls this) so any reader in a rcu read block will be
* protected against the plink disappearing.
*/
void mesh_path_flush_by_nexthop(struct sta_info *sta)
{
struct mesh_table *tbl;
struct mesh_path *mpath;
struct mpath_node *node;
struct hlist_node *p;
int i;
rcu_read_lock();
read_lock_bh(&pathtbl_resize_lock);
tbl = resize_dereference_mesh_paths();
for_each_mesh_entry(tbl, p, node, i) {
mpath = node->mpath;
if (rcu_dereference(mpath->next_hop) == sta) {
spin_lock_bh(&tbl->hashwlock[i]);
__mesh_path_del(tbl, node);
spin_unlock_bh(&tbl->hashwlock[i]);
}
}
read_unlock_bh(&pathtbl_resize_lock);
rcu_read_unlock();
}
static void table_flush_by_iface(struct mesh_table *tbl,
struct ieee80211_sub_if_data *sdata)
{
struct mesh_path *mpath;
struct mpath_node *node;
struct hlist_node *p;
int i;
WARN_ON(!rcu_read_lock_held());
for_each_mesh_entry(tbl, p, node, i) {
mpath = node->mpath;
if (mpath->sdata != sdata)
continue;
spin_lock_bh(&tbl->hashwlock[i]);
__mesh_path_del(tbl, node);
spin_unlock_bh(&tbl->hashwlock[i]);
}
}
/**
* mesh_path_flush_by_iface - Deletes all mesh paths associated with a given iface
*
* This function deletes both mesh paths as well as mesh portal paths.
*
* @sdata - interface data to match
*
*/
void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata)
{
struct mesh_table *tbl;
rcu_read_lock();
read_lock_bh(&pathtbl_resize_lock);
tbl = resize_dereference_mesh_paths();
table_flush_by_iface(tbl, sdata);
tbl = resize_dereference_mpp_paths();
table_flush_by_iface(tbl, sdata);
read_unlock_bh(&pathtbl_resize_lock);
rcu_read_unlock();
}
/**
* mesh_path_del - delete a mesh path from the table
*
* @addr: dst address (ETH_ALEN length)
* @sdata: local subif
*
* Returns: 0 if successful
*/
int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata)
{
struct mesh_table *tbl;
struct mesh_path *mpath;
struct mpath_node *node;
struct hlist_head *bucket;
struct hlist_node *n;
int hash_idx;
int err = 0;
read_lock_bh(&pathtbl_resize_lock);
tbl = resize_dereference_mesh_paths();
hash_idx = mesh_table_hash(addr, sdata, tbl);
bucket = &tbl->hash_buckets[hash_idx];
spin_lock_bh(&tbl->hashwlock[hash_idx]);
hlist_for_each_entry(node, n, bucket, list) {
mpath = node->mpath;
if (mpath->sdata == sdata &&
memcmp(addr, mpath->dst, ETH_ALEN) == 0) {
__mesh_path_del(tbl, node);
goto enddel;
}
}
err = -ENXIO;
enddel:
mesh_paths_generation++;
spin_unlock_bh(&tbl->hashwlock[hash_idx]);
read_unlock_bh(&pathtbl_resize_lock);
return err;
}
/**
* mesh_path_tx_pending - sends pending frames in a mesh path queue
*
* @mpath: mesh path to activate
*
* Locking: the state_lock of the mpath structure must NOT be held when calling
* this function.
*/
void mesh_path_tx_pending(struct mesh_path *mpath)
{
if (mpath->flags & MESH_PATH_ACTIVE)
ieee80211_add_pending_skbs(mpath->sdata->local,
&mpath->frame_queue);
}
/**
* mesh_path_send_to_gates - sends pending frames to all known mesh gates
*
* @mpath: mesh path whose queue will be emptied
*
* If there is only one gate, the frames are transferred from the failed mpath
* queue to that gate's queue. If there are more than one gates, the frames
* are copied from each gate to the next. After frames are copied, the
* mpath queues are emptied onto the transmission queue.
*/
int mesh_path_send_to_gates(struct mesh_path *mpath)
{
struct ieee80211_sub_if_data *sdata = mpath->sdata;
struct hlist_node *n;
struct mesh_table *tbl;
struct mesh_path *from_mpath = mpath;
struct mpath_node *gate = NULL;
bool copy = false;
struct hlist_head *known_gates;
rcu_read_lock();
tbl = rcu_dereference(mesh_paths);
known_gates = tbl->known_gates;
rcu_read_unlock();
if (!known_gates)
return -EHOSTUNREACH;
hlist_for_each_entry_rcu(gate, n, known_gates, list) {
if (gate->mpath->sdata != sdata)
continue;
if (gate->mpath->flags & MESH_PATH_ACTIVE) {
mpath_dbg("Forwarding to %pM\n", gate->mpath->dst);
mesh_path_move_to_queue(gate->mpath, from_mpath, copy);
from_mpath = gate->mpath;
copy = true;
} else {
mpath_dbg("Not forwarding %p\n", gate->mpath);
mpath_dbg("flags %x\n", gate->mpath->flags);
}
}
hlist_for_each_entry_rcu(gate, n, known_gates, list)
if (gate->mpath->sdata == sdata) {
mpath_dbg("Sending to %pM\n", gate->mpath->dst);
mesh_path_tx_pending(gate->mpath);
}
return (from_mpath == mpath) ? -EHOSTUNREACH : 0;
}
/**
* mesh_path_discard_frame - discard a frame whose path could not be resolved
*
* @skb: frame to discard
* @sdata: network subif the frame was to be sent through
*
* Locking: the function must me called within a rcu_read_lock region
*/
void mesh_path_discard_frame(struct sk_buff *skb,
struct ieee80211_sub_if_data *sdata)
{
kfree_skb(skb);
sdata->u.mesh.mshstats.dropped_frames_no_route++;
}
/**
* mesh_path_flush_pending - free the pending queue of a mesh path
*
* @mpath: mesh path whose queue has to be freed
*
* Locking: the function must me called within a rcu_read_lock region
*/
void mesh_path_flush_pending(struct mesh_path *mpath)
{
struct sk_buff *skb;
while ((skb = skb_dequeue(&mpath->frame_queue)) != NULL)
mesh_path_discard_frame(skb, mpath->sdata);
}
/**
* mesh_path_fix_nexthop - force a specific next hop for a mesh path
*
* @mpath: the mesh path to modify
* @next_hop: the next hop to force
*
* Locking: this function must be called holding mpath->state_lock
*/
void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop)
{
spin_lock_bh(&mpath->state_lock);
mesh_path_assign_nexthop(mpath, next_hop);
mpath->sn = 0xffff;
mpath->metric = 0;
mpath->hop_count = 0;
mpath->exp_time = 0;
mpath->flags |= MESH_PATH_FIXED;
mesh_path_activate(mpath);
spin_unlock_bh(&mpath->state_lock);
mesh_path_tx_pending(mpath);
}
static void mesh_path_node_free(struct hlist_node *p, bool free_leafs)
{
struct mesh_path *mpath;
struct mpath_node *node = hlist_entry(p, struct mpath_node, list);
mpath = node->mpath;
hlist_del_rcu(p);
if (free_leafs) {
del_timer_sync(&mpath->timer);
kfree(mpath);
}
kfree(node);
}
static int mesh_path_node_copy(struct hlist_node *p, struct mesh_table *newtbl)
{
struct mesh_path *mpath;
struct mpath_node *node, *new_node;
u32 hash_idx;
new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
if (new_node == NULL)
return -ENOMEM;
node = hlist_entry(p, struct mpath_node, list);
mpath = node->mpath;
new_node->mpath = mpath;
hash_idx = mesh_table_hash(mpath->dst, mpath->sdata, newtbl);
hlist_add_head(&new_node->list,
&newtbl->hash_buckets[hash_idx]);
return 0;
}
int mesh_pathtbl_init(void)
{
struct mesh_table *tbl_path, *tbl_mpp;
int ret;
tbl_path = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
if (!tbl_path)
return -ENOMEM;
tbl_path->free_node = &mesh_path_node_free;
tbl_path->copy_node = &mesh_path_node_copy;
tbl_path->mean_chain_len = MEAN_CHAIN_LEN;
tbl_path->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC);
if (!tbl_path->known_gates) {
ret = -ENOMEM;
goto free_path;
}
INIT_HLIST_HEAD(tbl_path->known_gates);
tbl_mpp = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
if (!tbl_mpp) {
ret = -ENOMEM;
goto free_path;
}
tbl_mpp->free_node = &mesh_path_node_free;
tbl_mpp->copy_node = &mesh_path_node_copy;
tbl_mpp->mean_chain_len = MEAN_CHAIN_LEN;
tbl_mpp->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC);
if (!tbl_mpp->known_gates) {
ret = -ENOMEM;
goto free_mpp;
}
INIT_HLIST_HEAD(tbl_mpp->known_gates);
/* Need no locking since this is during init */
RCU_INIT_POINTER(mesh_paths, tbl_path);
RCU_INIT_POINTER(mpp_paths, tbl_mpp);
return 0;
free_mpp:
mesh_table_free(tbl_mpp, true);
free_path:
mesh_table_free(tbl_path, true);
return ret;
}
void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
{
struct mesh_table *tbl;
struct mesh_path *mpath;
struct mpath_node *node;
struct hlist_node *p;
int i;
rcu_read_lock();
tbl = rcu_dereference(mesh_paths);
for_each_mesh_entry(tbl, p, node, i) {
if (node->mpath->sdata != sdata)
continue;
mpath = node->mpath;
if ((!(mpath->flags & MESH_PATH_RESOLVING)) &&
(!(mpath->flags & MESH_PATH_FIXED)) &&
time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE))
mesh_path_del(mpath->dst, mpath->sdata);
}
rcu_read_unlock();
}
void mesh_pathtbl_unregister(void)
{
/* no need for locking during exit path */
mesh_table_free(rcu_dereference_protected(mesh_paths, 1), true);
mesh_table_free(rcu_dereference_protected(mpp_paths, 1), true);
}
| gpl-2.0 |
kozmikkick/tripndroid-endeavoru-3.4 | net/mac80211/wep.c | 151 | 9020 | /*
* Software WEP encryption implementation
* Copyright 2002, Jouni Malinen <jkmaline@cc.hut.fi>
* Copyright 2003, Instant802 Networks, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/netdevice.h>
#include <linux/types.h>
#include <linux/random.h>
#include <linux/compiler.h>
#include <linux/crc32.h>
#include <linux/crypto.h>
#include <linux/err.h>
#include <linux/mm.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <asm/unaligned.h>
#include <net/mac80211.h>
#include "ieee80211_i.h"
#include "wep.h"
int ieee80211_wep_init(struct ieee80211_local *local)
{
/* start WEP IV from a random value */
get_random_bytes(&local->wep_iv, WEP_IV_LEN);
local->wep_tx_tfm = crypto_alloc_cipher("arc4", 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(local->wep_tx_tfm)) {
local->wep_rx_tfm = ERR_PTR(-EINVAL);
return PTR_ERR(local->wep_tx_tfm);
}
local->wep_rx_tfm = crypto_alloc_cipher("arc4", 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(local->wep_rx_tfm)) {
crypto_free_cipher(local->wep_tx_tfm);
local->wep_tx_tfm = ERR_PTR(-EINVAL);
return PTR_ERR(local->wep_rx_tfm);
}
return 0;
}
void ieee80211_wep_free(struct ieee80211_local *local)
{
if (!IS_ERR(local->wep_tx_tfm))
crypto_free_cipher(local->wep_tx_tfm);
if (!IS_ERR(local->wep_rx_tfm))
crypto_free_cipher(local->wep_rx_tfm);
}
static inline bool ieee80211_wep_weak_iv(u32 iv, int keylen)
{
/*
* Fluhrer, Mantin, and Shamir have reported weaknesses in the
* key scheduling algorithm of RC4. At least IVs (KeyByte + 3,
* 0xff, N) can be used to speedup attacks, so avoid using them.
*/
if ((iv & 0xff00) == 0xff00) {
u8 B = (iv >> 16) & 0xff;
if (B >= 3 && B < 3 + keylen)
return true;
}
return false;
}
static void ieee80211_wep_get_iv(struct ieee80211_local *local,
int keylen, int keyidx, u8 *iv)
{
local->wep_iv++;
if (ieee80211_wep_weak_iv(local->wep_iv, keylen))
local->wep_iv += 0x0100;
if (!iv)
return;
*iv++ = (local->wep_iv >> 16) & 0xff;
*iv++ = (local->wep_iv >> 8) & 0xff;
*iv++ = local->wep_iv & 0xff;
*iv++ = keyidx << 6;
}
static u8 *ieee80211_wep_add_iv(struct ieee80211_local *local,
struct sk_buff *skb,
int keylen, int keyidx)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
unsigned int hdrlen;
u8 *newhdr;
hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PROTECTED);
if (WARN_ON(skb_tailroom(skb) < WEP_ICV_LEN ||
skb_headroom(skb) < WEP_IV_LEN))
return NULL;
hdrlen = ieee80211_hdrlen(hdr->frame_control);
newhdr = skb_push(skb, WEP_IV_LEN);
memmove(newhdr, newhdr + WEP_IV_LEN, hdrlen);
ieee80211_wep_get_iv(local, keylen, keyidx, newhdr + hdrlen);
return newhdr + hdrlen;
}
static void ieee80211_wep_remove_iv(struct ieee80211_local *local,
struct sk_buff *skb,
struct ieee80211_key *key)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
unsigned int hdrlen;
hdrlen = ieee80211_hdrlen(hdr->frame_control);
memmove(skb->data + WEP_IV_LEN, skb->data, hdrlen);
skb_pull(skb, WEP_IV_LEN);
}
/* Perform WEP encryption using given key. data buffer must have tailroom
* for 4-byte ICV. data_len must not include this ICV. Note: this function
* does _not_ add IV. data = RC4(data | CRC32(data)) */
int ieee80211_wep_encrypt_data(struct crypto_cipher *tfm, u8 *rc4key,
size_t klen, u8 *data, size_t data_len)
{
__le32 icv;
int i;
if (IS_ERR(tfm))
return -1;
icv = cpu_to_le32(~crc32_le(~0, data, data_len));
put_unaligned(icv, (__le32 *)(data + data_len));
crypto_cipher_setkey(tfm, rc4key, klen);
for (i = 0; i < data_len + WEP_ICV_LEN; i++)
crypto_cipher_encrypt_one(tfm, data + i, data + i);
return 0;
}
/* Perform WEP encryption on given skb. 4 bytes of extra space (IV) in the
* beginning of the buffer 4 bytes of extra space (ICV) in the end of the
* buffer will be added. Both IV and ICV will be transmitted, so the
* payload length increases with 8 bytes.
*
* WEP frame payload: IV + TX key idx, RC4(data), ICV = RC4(CRC32(data))
*/
int ieee80211_wep_encrypt(struct ieee80211_local *local,
struct sk_buff *skb,
const u8 *key, int keylen, int keyidx)
{
u8 *iv;
size_t len;
u8 rc4key[3 + WLAN_KEY_LEN_WEP104];
iv = ieee80211_wep_add_iv(local, skb, keylen, keyidx);
if (!iv)
return -1;
len = skb->len - (iv + WEP_IV_LEN - skb->data);
/* Prepend 24-bit IV to RC4 key */
memcpy(rc4key, iv, 3);
/* Copy rest of the WEP key (the secret part) */
memcpy(rc4key + 3, key, keylen);
/* Add room for ICV */
skb_put(skb, WEP_ICV_LEN);
return ieee80211_wep_encrypt_data(local->wep_tx_tfm, rc4key, keylen + 3,
iv + WEP_IV_LEN, len);
}
/* Perform WEP decryption using given key. data buffer includes encrypted
* payload, including 4-byte ICV, but _not_ IV. data_len must not include ICV.
* Return 0 on success and -1 on ICV mismatch. */
int ieee80211_wep_decrypt_data(struct crypto_cipher *tfm, u8 *rc4key,
size_t klen, u8 *data, size_t data_len)
{
__le32 crc;
int i;
if (IS_ERR(tfm))
return -1;
crypto_cipher_setkey(tfm, rc4key, klen);
for (i = 0; i < data_len + WEP_ICV_LEN; i++)
crypto_cipher_decrypt_one(tfm, data + i, data + i);
crc = cpu_to_le32(~crc32_le(~0, data, data_len));
if (memcmp(&crc, data + data_len, WEP_ICV_LEN) != 0)
/* ICV mismatch */
return -1;
return 0;
}
/* Perform WEP decryption on given skb. Buffer includes whole WEP part of
* the frame: IV (4 bytes), encrypted payload (including SNAP header),
* ICV (4 bytes). skb->len includes both IV and ICV.
*
* Returns 0 if frame was decrypted successfully and ICV was correct and -1 on
* failure. If frame is OK, IV and ICV will be removed, i.e., decrypted payload
* is moved to the beginning of the skb and skb length will be reduced.
*/
static int ieee80211_wep_decrypt(struct ieee80211_local *local,
struct sk_buff *skb,
struct ieee80211_key *key)
{
u32 klen;
u8 rc4key[3 + WLAN_KEY_LEN_WEP104];
u8 keyidx;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
unsigned int hdrlen;
size_t len;
int ret = 0;
if (!ieee80211_has_protected(hdr->frame_control))
return -1;
hdrlen = ieee80211_hdrlen(hdr->frame_control);
if (skb->len < hdrlen + WEP_IV_LEN + WEP_ICV_LEN)
return -1;
len = skb->len - hdrlen - WEP_IV_LEN - WEP_ICV_LEN;
keyidx = skb->data[hdrlen + 3] >> 6;
if (!key || keyidx != key->conf.keyidx)
return -1;
klen = 3 + key->conf.keylen;
/* Prepend 24-bit IV to RC4 key */
memcpy(rc4key, skb->data + hdrlen, 3);
/* Copy rest of the WEP key (the secret part) */
memcpy(rc4key + 3, key->conf.key, key->conf.keylen);
if (ieee80211_wep_decrypt_data(local->wep_rx_tfm, rc4key, klen,
skb->data + hdrlen + WEP_IV_LEN,
len))
ret = -1;
/* Trim ICV */
skb_trim(skb, skb->len - WEP_ICV_LEN);
/* Remove IV */
memmove(skb->data + WEP_IV_LEN, skb->data, hdrlen);
skb_pull(skb, WEP_IV_LEN);
return ret;
}
bool ieee80211_wep_is_weak_iv(struct sk_buff *skb, struct ieee80211_key *key)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
unsigned int hdrlen;
u8 *ivpos;
u32 iv;
if (!ieee80211_has_protected(hdr->frame_control))
return false;
hdrlen = ieee80211_hdrlen(hdr->frame_control);
ivpos = skb->data + hdrlen;
iv = (ivpos[0] << 16) | (ivpos[1] << 8) | ivpos[2];
return ieee80211_wep_weak_iv(iv, key->conf.keylen);
}
ieee80211_rx_result
ieee80211_crypto_wep_decrypt(struct ieee80211_rx_data *rx)
{
struct sk_buff *skb = rx->skb;
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
if (!ieee80211_is_data(hdr->frame_control) &&
!ieee80211_is_auth(hdr->frame_control))
return RX_CONTINUE;
if (!(status->flag & RX_FLAG_DECRYPTED)) {
if (ieee80211_wep_decrypt(rx->local, rx->skb, rx->key))
return RX_DROP_UNUSABLE;
} else if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
ieee80211_wep_remove_iv(rx->local, rx->skb, rx->key);
/* remove ICV */
skb_trim(rx->skb, rx->skb->len - WEP_ICV_LEN);
}
return RX_CONTINUE;
}
static int wep_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
if (!info->control.hw_key) {
if (ieee80211_wep_encrypt(tx->local, skb, tx->key->conf.key,
tx->key->conf.keylen,
tx->key->conf.keyidx))
return -1;
} else if (info->control.hw_key->flags &
IEEE80211_KEY_FLAG_GENERATE_IV) {
if (!ieee80211_wep_add_iv(tx->local, skb,
tx->key->conf.keylen,
tx->key->conf.keyidx))
return -1;
}
return 0;
}
ieee80211_tx_result
ieee80211_crypto_wep_encrypt(struct ieee80211_tx_data *tx)
{
struct sk_buff *skb;
ieee80211_tx_set_protected(tx);
skb_queue_walk(&tx->skbs, skb) {
if (wep_encrypt_skb(tx, skb) < 0) {
I802_DEBUG_INC(tx->local->tx_handlers_drop_wep);
return TX_DROP;
}
}
return TX_CONTINUE;
}
| gpl-2.0 |
toastcfh/CdMa-HeRoC-2.6.29 | arch/arm/mach-pxa/reset.c | 151 | 1820 | /*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/gpio.h>
#include <linux/io.h>
#include <asm/proc-fns.h>
#include <mach/pxa-regs.h>
#include <mach/reset.h>
unsigned int reset_status;
EXPORT_SYMBOL(reset_status);
static void do_hw_reset(void);
static int reset_gpio = -1;
int init_gpio_reset(int gpio, int output)
{
int rc;
rc = gpio_request(gpio, "reset generator");
if (rc) {
printk(KERN_ERR "Can't request reset_gpio\n");
goto out;
}
if (output)
rc = gpio_direction_output(gpio, 0);
else
rc = gpio_direction_input(gpio);
if (rc) {
printk(KERN_ERR "Can't configure reset_gpio\n");
gpio_free(gpio);
goto out;
}
out:
if (!rc)
reset_gpio = gpio;
return rc;
}
/*
* Trigger GPIO reset.
* This covers various types of logic connecting gpio pin
* to RESET pins (nRESET or GPIO_RESET):
*/
static void do_gpio_reset(void)
{
BUG_ON(reset_gpio == -1);
/* drive it low */
gpio_direction_output(reset_gpio, 0);
mdelay(2);
/* rising edge or drive high */
gpio_set_value(reset_gpio, 1);
mdelay(2);
/* falling edge */
gpio_set_value(reset_gpio, 0);
/* give it some time */
mdelay(10);
WARN_ON(1);
/* fallback */
do_hw_reset();
}
static void do_hw_reset(void)
{
/* Initialize the watchdog and let it fire */
OWER = OWER_WME;
OSSR = OSSR_M3;
OSMR3 = OSCR + 368640; /* ... in 100 ms */
}
void arch_reset(char mode)
{
clear_reset_status(RESET_STATUS_ALL);
switch (mode) {
case 's':
/* Jump into ROM at address 0 */
cpu_reset(0);
break;
case 'g':
do_gpio_reset();
break;
case 'h':
default:
do_hw_reset();
break;
}
}
| gpl-2.0 |
MoKee/android_kernel_samsung_smdk4412 | drivers/video/udlfb.c | 919 | 50312 | /*
* udlfb.c -- Framebuffer driver for DisplayLink USB controller
*
* Copyright (C) 2009 Roberto De Ioris <roberto@unbit.it>
* Copyright (C) 2009 Jaya Kumar <jayakumar.lkml@gmail.com>
* Copyright (C) 2009 Bernie Thompson <bernie@plugable.com>
*
* This file is subject to the terms and conditions of the GNU General Public
* License v2. See the file COPYING in the main directory of this archive for
* more details.
*
* Layout is based on skeletonfb by James Simmons and Geert Uytterhoeven,
* usb-skeleton by GregKH.
*
* Device-specific portions based on information from Displaylink, with work
* from Florian Echtler, Henrik Bjerregaard Pedersen, and others.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/usb.h>
#include <linux/uaccess.h>
#include <linux/mm.h>
#include <linux/fb.h>
#include <linux/vmalloc.h>
#include <linux/slab.h>
#include <linux/prefetch.h>
#include <linux/delay.h>
#include <linux/prefetch.h>
#include <video/udlfb.h>
#include "edid.h"
static struct fb_fix_screeninfo dlfb_fix = {
.id = "udlfb",
.type = FB_TYPE_PACKED_PIXELS,
.visual = FB_VISUAL_TRUECOLOR,
.xpanstep = 0,
.ypanstep = 0,
.ywrapstep = 0,
.accel = FB_ACCEL_NONE,
};
static const u32 udlfb_info_flags = FBINFO_DEFAULT | FBINFO_READS_FAST |
FBINFO_VIRTFB |
FBINFO_HWACCEL_IMAGEBLIT | FBINFO_HWACCEL_FILLRECT |
FBINFO_HWACCEL_COPYAREA | FBINFO_MISC_ALWAYS_SETPAR;
/*
* There are many DisplayLink-based products, all with unique PIDs. We are able
* to support all volume ones (circa 2009) with a single driver, so we match
* globally on VID. TODO: Probe() needs to detect when we might be running
* "future" chips, and bail on those, so a compatible driver can match.
*/
static struct usb_device_id id_table[] = {
{.idVendor = 0x17e9, .match_flags = USB_DEVICE_ID_MATCH_VENDOR,},
{},
};
MODULE_DEVICE_TABLE(usb, id_table);
/* module options */
static int console; /* Optionally allow fbcon to consume first framebuffer */
static int fb_defio; /* Optionally enable experimental fb_defio mmap support */
/* dlfb keeps a list of urbs for efficient bulk transfers */
static void dlfb_urb_completion(struct urb *urb);
static struct urb *dlfb_get_urb(struct dlfb_data *dev);
static int dlfb_submit_urb(struct dlfb_data *dev, struct urb * urb, size_t len);
static int dlfb_alloc_urb_list(struct dlfb_data *dev, int count, size_t size);
static void dlfb_free_urb_list(struct dlfb_data *dev);
/*
* All DisplayLink bulk operations start with 0xAF, followed by specific code
* All operations are written to buffers which then later get sent to device
*/
static char *dlfb_set_register(char *buf, u8 reg, u8 val)
{
*buf++ = 0xAF;
*buf++ = 0x20;
*buf++ = reg;
*buf++ = val;
return buf;
}
static char *dlfb_vidreg_lock(char *buf)
{
return dlfb_set_register(buf, 0xFF, 0x00);
}
static char *dlfb_vidreg_unlock(char *buf)
{
return dlfb_set_register(buf, 0xFF, 0xFF);
}
/*
* On/Off for driving the DisplayLink framebuffer to the display
* 0x00 H and V sync on
* 0x01 H and V sync off (screen blank but powered)
* 0x07 DPMS powerdown (requires modeset to come back)
*/
static char *dlfb_enable_hvsync(char *buf, bool enable)
{
if (enable)
return dlfb_set_register(buf, 0x1F, 0x00);
else
return dlfb_set_register(buf, 0x1F, 0x07);
}
static char *dlfb_set_color_depth(char *buf, u8 selection)
{
return dlfb_set_register(buf, 0x00, selection);
}
static char *dlfb_set_base16bpp(char *wrptr, u32 base)
{
/* the base pointer is 16 bits wide, 0x20 is hi byte. */
wrptr = dlfb_set_register(wrptr, 0x20, base >> 16);
wrptr = dlfb_set_register(wrptr, 0x21, base >> 8);
return dlfb_set_register(wrptr, 0x22, base);
}
/*
* DisplayLink HW has separate 16bpp and 8bpp framebuffers.
* In 24bpp modes, the low 323 RGB bits go in the 8bpp framebuffer
*/
static char *dlfb_set_base8bpp(char *wrptr, u32 base)
{
wrptr = dlfb_set_register(wrptr, 0x26, base >> 16);
wrptr = dlfb_set_register(wrptr, 0x27, base >> 8);
return dlfb_set_register(wrptr, 0x28, base);
}
static char *dlfb_set_register_16(char *wrptr, u8 reg, u16 value)
{
wrptr = dlfb_set_register(wrptr, reg, value >> 8);
return dlfb_set_register(wrptr, reg+1, value);
}
/*
* This is kind of weird because the controller takes some
* register values in a different byte order than other registers.
*/
static char *dlfb_set_register_16be(char *wrptr, u8 reg, u16 value)
{
wrptr = dlfb_set_register(wrptr, reg, value);
return dlfb_set_register(wrptr, reg+1, value >> 8);
}
/*
* LFSR is linear feedback shift register. The reason we have this is
* because the display controller needs to minimize the clock depth of
* various counters used in the display path. So this code reverses the
* provided value into the lfsr16 value by counting backwards to get
* the value that needs to be set in the hardware comparator to get the
* same actual count. This makes sense once you read above a couple of
* times and think about it from a hardware perspective.
*/
static u16 dlfb_lfsr16(u16 actual_count)
{
u32 lv = 0xFFFF; /* This is the lfsr value that the hw starts with */
while (actual_count--) {
lv = ((lv << 1) |
(((lv >> 15) ^ (lv >> 4) ^ (lv >> 2) ^ (lv >> 1)) & 1))
& 0xFFFF;
}
return (u16) lv;
}
/*
* This does LFSR conversion on the value that is to be written.
* See LFSR explanation above for more detail.
*/
static char *dlfb_set_register_lfsr16(char *wrptr, u8 reg, u16 value)
{
return dlfb_set_register_16(wrptr, reg, dlfb_lfsr16(value));
}
/*
* This takes a standard fbdev screeninfo struct and all of its monitor mode
* details and converts them into the DisplayLink equivalent register commands.
*/
static char *dlfb_set_vid_cmds(char *wrptr, struct fb_var_screeninfo *var)
{
u16 xds, yds;
u16 xde, yde;
u16 yec;
/* x display start */
xds = var->left_margin + var->hsync_len;
wrptr = dlfb_set_register_lfsr16(wrptr, 0x01, xds);
/* x display end */
xde = xds + var->xres;
wrptr = dlfb_set_register_lfsr16(wrptr, 0x03, xde);
/* y display start */
yds = var->upper_margin + var->vsync_len;
wrptr = dlfb_set_register_lfsr16(wrptr, 0x05, yds);
/* y display end */
yde = yds + var->yres;
wrptr = dlfb_set_register_lfsr16(wrptr, 0x07, yde);
/* x end count is active + blanking - 1 */
wrptr = dlfb_set_register_lfsr16(wrptr, 0x09,
xde + var->right_margin - 1);
/* libdlo hardcodes hsync start to 1 */
wrptr = dlfb_set_register_lfsr16(wrptr, 0x0B, 1);
/* hsync end is width of sync pulse + 1 */
wrptr = dlfb_set_register_lfsr16(wrptr, 0x0D, var->hsync_len + 1);
/* hpixels is active pixels */
wrptr = dlfb_set_register_16(wrptr, 0x0F, var->xres);
/* yendcount is vertical active + vertical blanking */
yec = var->yres + var->upper_margin + var->lower_margin +
var->vsync_len;
wrptr = dlfb_set_register_lfsr16(wrptr, 0x11, yec);
/* libdlo hardcodes vsync start to 0 */
wrptr = dlfb_set_register_lfsr16(wrptr, 0x13, 0);
/* vsync end is width of vsync pulse */
wrptr = dlfb_set_register_lfsr16(wrptr, 0x15, var->vsync_len);
/* vpixels is active pixels */
wrptr = dlfb_set_register_16(wrptr, 0x17, var->yres);
/* convert picoseconds to 5kHz multiple for pclk5k = x * 1E12/5k */
wrptr = dlfb_set_register_16be(wrptr, 0x1B,
200*1000*1000/var->pixclock);
return wrptr;
}
/*
* This takes a standard fbdev screeninfo struct that was fetched or prepared
* and then generates the appropriate command sequence that then drives the
* display controller.
*/
static int dlfb_set_video_mode(struct dlfb_data *dev,
struct fb_var_screeninfo *var)
{
char *buf;
char *wrptr;
int retval = 0;
int writesize;
struct urb *urb;
if (!atomic_read(&dev->usb_active))
return -EPERM;
urb = dlfb_get_urb(dev);
if (!urb)
return -ENOMEM;
buf = (char *) urb->transfer_buffer;
/*
* This first section has to do with setting the base address on the
* controller * associated with the display. There are 2 base
* pointers, currently, we only * use the 16 bpp segment.
*/
wrptr = dlfb_vidreg_lock(buf);
wrptr = dlfb_set_color_depth(wrptr, 0x00);
/* set base for 16bpp segment to 0 */
wrptr = dlfb_set_base16bpp(wrptr, 0);
/* set base for 8bpp segment to end of fb */
wrptr = dlfb_set_base8bpp(wrptr, dev->info->fix.smem_len);
wrptr = dlfb_set_vid_cmds(wrptr, var);
wrptr = dlfb_enable_hvsync(wrptr, true);
wrptr = dlfb_vidreg_unlock(wrptr);
writesize = wrptr - buf;
retval = dlfb_submit_urb(dev, urb, writesize);
return retval;
}
static int dlfb_ops_mmap(struct fb_info *info, struct vm_area_struct *vma)
{
unsigned long start = vma->vm_start;
unsigned long size = vma->vm_end - vma->vm_start;
unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
unsigned long page, pos;
if (offset + size > info->fix.smem_len)
return -EINVAL;
pos = (unsigned long)info->fix.smem_start + offset;
pr_notice("mmap() framebuffer addr:%lu size:%lu\n",
pos, size);
while (size > 0) {
page = vmalloc_to_pfn((void *)pos);
if (remap_pfn_range(vma, start, page, PAGE_SIZE, PAGE_SHARED))
return -EAGAIN;
start += PAGE_SIZE;
pos += PAGE_SIZE;
if (size > PAGE_SIZE)
size -= PAGE_SIZE;
else
size = 0;
}
vma->vm_flags |= VM_RESERVED; /* avoid to swap out this VMA */
return 0;
}
/*
* Trims identical data from front and back of line
* Sets new front buffer address and width
* And returns byte count of identical pixels
* Assumes CPU natural alignment (unsigned long)
* for back and front buffer ptrs and width
*/
static int dlfb_trim_hline(const u8 *bback, const u8 **bfront, int *width_bytes)
{
int j, k;
const unsigned long *back = (const unsigned long *) bback;
const unsigned long *front = (const unsigned long *) *bfront;
const int width = *width_bytes / sizeof(unsigned long);
int identical = width;
int start = width;
int end = width;
prefetch((void *) front);
prefetch((void *) back);
for (j = 0; j < width; j++) {
if (back[j] != front[j]) {
start = j;
break;
}
}
for (k = width - 1; k > j; k--) {
if (back[k] != front[k]) {
end = k+1;
break;
}
}
identical = start + (width - end);
*bfront = (u8 *) &front[start];
*width_bytes = (end - start) * sizeof(unsigned long);
return identical * sizeof(unsigned long);
}
/*
* Render a command stream for an encoded horizontal line segment of pixels.
*
* A command buffer holds several commands.
* It always begins with a fresh command header
* (the protocol doesn't require this, but we enforce it to allow
* multiple buffers to be potentially encoded and sent in parallel).
* A single command encodes one contiguous horizontal line of pixels
*
* The function relies on the client to do all allocation, so that
* rendering can be done directly to output buffers (e.g. USB URBs).
* The function fills the supplied command buffer, providing information
* on where it left off, so the client may call in again with additional
* buffers if the line will take several buffers to complete.
*
* A single command can transmit a maximum of 256 pixels,
* regardless of the compression ratio (protocol design limit).
* To the hardware, 0 for a size byte means 256
*
* Rather than 256 pixel commands which are either rl or raw encoded,
* the rlx command simply assumes alternating raw and rl spans within one cmd.
* This has a slightly larger header overhead, but produces more even results.
* It also processes all data (read and write) in a single pass.
* Performance benchmarks of common cases show it having just slightly better
* compression than 256 pixel raw or rle commands, with similar CPU consumpion.
* But for very rl friendly data, will compress not quite as well.
*/
static void dlfb_compress_hline(
const uint16_t **pixel_start_ptr,
const uint16_t *const pixel_end,
uint32_t *device_address_ptr,
uint8_t **command_buffer_ptr,
const uint8_t *const cmd_buffer_end)
{
const uint16_t *pixel = *pixel_start_ptr;
uint32_t dev_addr = *device_address_ptr;
uint8_t *cmd = *command_buffer_ptr;
const int bpp = 2;
while ((pixel_end > pixel) &&
(cmd_buffer_end - MIN_RLX_CMD_BYTES > cmd)) {
uint8_t *raw_pixels_count_byte = 0;
uint8_t *cmd_pixels_count_byte = 0;
const uint16_t *raw_pixel_start = 0;
const uint16_t *cmd_pixel_start, *cmd_pixel_end = 0;
prefetchw((void *) cmd); /* pull in one cache line at least */
*cmd++ = 0xAF;
*cmd++ = 0x6B;
*cmd++ = (uint8_t) ((dev_addr >> 16) & 0xFF);
*cmd++ = (uint8_t) ((dev_addr >> 8) & 0xFF);
*cmd++ = (uint8_t) ((dev_addr) & 0xFF);
cmd_pixels_count_byte = cmd++; /* we'll know this later */
cmd_pixel_start = pixel;
raw_pixels_count_byte = cmd++; /* we'll know this later */
raw_pixel_start = pixel;
cmd_pixel_end = pixel + min(MAX_CMD_PIXELS + 1,
min((int)(pixel_end - pixel),
(int)(cmd_buffer_end - cmd) / bpp));
prefetch_range((void *) pixel, (cmd_pixel_end - pixel) * bpp);
while (pixel < cmd_pixel_end) {
const uint16_t * const repeating_pixel = pixel;
*(uint16_t *)cmd = cpu_to_be16p(pixel);
cmd += 2;
pixel++;
if (unlikely((pixel < cmd_pixel_end) &&
(*pixel == *repeating_pixel))) {
/* go back and fill in raw pixel count */
*raw_pixels_count_byte = ((repeating_pixel -
raw_pixel_start) + 1) & 0xFF;
while ((pixel < cmd_pixel_end)
&& (*pixel == *repeating_pixel)) {
pixel++;
}
/* immediately after raw data is repeat byte */
*cmd++ = ((pixel - repeating_pixel) - 1) & 0xFF;
/* Then start another raw pixel span */
raw_pixel_start = pixel;
raw_pixels_count_byte = cmd++;
}
}
if (pixel > raw_pixel_start) {
/* finalize last RAW span */
*raw_pixels_count_byte = (pixel-raw_pixel_start) & 0xFF;
}
*cmd_pixels_count_byte = (pixel - cmd_pixel_start) & 0xFF;
dev_addr += (pixel - cmd_pixel_start) * bpp;
}
if (cmd_buffer_end <= MIN_RLX_CMD_BYTES + cmd) {
/* Fill leftover bytes with no-ops */
if (cmd_buffer_end > cmd)
memset(cmd, 0xAF, cmd_buffer_end - cmd);
cmd = (uint8_t *) cmd_buffer_end;
}
*command_buffer_ptr = cmd;
*pixel_start_ptr = pixel;
*device_address_ptr = dev_addr;
return;
}
/*
* There are 3 copies of every pixel: The front buffer that the fbdev
* client renders to, the actual framebuffer across the USB bus in hardware
* (that we can only write to, slowly, and can never read), and (optionally)
* our shadow copy that tracks what's been sent to that hardware buffer.
*/
static int dlfb_render_hline(struct dlfb_data *dev, struct urb **urb_ptr,
const char *front, char **urb_buf_ptr,
u32 byte_offset, u32 byte_width,
int *ident_ptr, int *sent_ptr)
{
const u8 *line_start, *line_end, *next_pixel;
u32 dev_addr = dev->base16 + byte_offset;
struct urb *urb = *urb_ptr;
u8 *cmd = *urb_buf_ptr;
u8 *cmd_end = (u8 *) urb->transfer_buffer + urb->transfer_buffer_length;
line_start = (u8 *) (front + byte_offset);
next_pixel = line_start;
line_end = next_pixel + byte_width;
if (dev->backing_buffer) {
int offset;
const u8 *back_start = (u8 *) (dev->backing_buffer
+ byte_offset);
*ident_ptr += dlfb_trim_hline(back_start, &next_pixel,
&byte_width);
offset = next_pixel - line_start;
line_end = next_pixel + byte_width;
dev_addr += offset;
back_start += offset;
line_start += offset;
memcpy((char *)back_start, (char *) line_start,
byte_width);
}
while (next_pixel < line_end) {
dlfb_compress_hline((const uint16_t **) &next_pixel,
(const uint16_t *) line_end, &dev_addr,
(u8 **) &cmd, (u8 *) cmd_end);
if (cmd >= cmd_end) {
int len = cmd - (u8 *) urb->transfer_buffer;
if (dlfb_submit_urb(dev, urb, len))
return 1; /* lost pixels is set */
*sent_ptr += len;
urb = dlfb_get_urb(dev);
if (!urb)
return 1; /* lost_pixels is set */
*urb_ptr = urb;
cmd = urb->transfer_buffer;
cmd_end = &cmd[urb->transfer_buffer_length];
}
}
*urb_buf_ptr = cmd;
return 0;
}
int dlfb_handle_damage(struct dlfb_data *dev, int x, int y,
int width, int height, char *data)
{
int i, ret;
char *cmd;
cycles_t start_cycles, end_cycles;
int bytes_sent = 0;
int bytes_identical = 0;
struct urb *urb;
int aligned_x;
start_cycles = get_cycles();
aligned_x = DL_ALIGN_DOWN(x, sizeof(unsigned long));
width = DL_ALIGN_UP(width + (x-aligned_x), sizeof(unsigned long));
x = aligned_x;
if ((width <= 0) ||
(x + width > dev->info->var.xres) ||
(y + height > dev->info->var.yres))
return -EINVAL;
if (!atomic_read(&dev->usb_active))
return 0;
urb = dlfb_get_urb(dev);
if (!urb)
return 0;
cmd = urb->transfer_buffer;
for (i = y; i < y + height ; i++) {
const int line_offset = dev->info->fix.line_length * i;
const int byte_offset = line_offset + (x * BPP);
if (dlfb_render_hline(dev, &urb,
(char *) dev->info->fix.smem_start,
&cmd, byte_offset, width * BPP,
&bytes_identical, &bytes_sent))
goto error;
}
if (cmd > (char *) urb->transfer_buffer) {
/* Send partial buffer remaining before exiting */
int len = cmd - (char *) urb->transfer_buffer;
ret = dlfb_submit_urb(dev, urb, len);
bytes_sent += len;
} else
dlfb_urb_completion(urb);
error:
atomic_add(bytes_sent, &dev->bytes_sent);
atomic_add(bytes_identical, &dev->bytes_identical);
atomic_add(width*height*2, &dev->bytes_rendered);
end_cycles = get_cycles();
atomic_add(((unsigned int) ((end_cycles - start_cycles)
>> 10)), /* Kcycles */
&dev->cpu_kcycles_used);
return 0;
}
/*
* Path triggered by usermode clients who write to filesystem
* e.g. cat filename > /dev/fb1
* Not used by X Windows or text-mode console. But useful for testing.
* Slow because of extra copy and we must assume all pixels dirty.
*/
static ssize_t dlfb_ops_write(struct fb_info *info, const char __user *buf,
size_t count, loff_t *ppos)
{
ssize_t result;
struct dlfb_data *dev = info->par;
u32 offset = (u32) *ppos;
result = fb_sys_write(info, buf, count, ppos);
if (result > 0) {
int start = max((int)(offset / info->fix.line_length), 0);
int lines = min((u32)((result / info->fix.line_length) + 1),
(u32)info->var.yres);
dlfb_handle_damage(dev, 0, start, info->var.xres,
lines, info->screen_base);
}
return result;
}
/* hardware has native COPY command (see libdlo), but not worth it for fbcon */
static void dlfb_ops_copyarea(struct fb_info *info,
const struct fb_copyarea *area)
{
struct dlfb_data *dev = info->par;
sys_copyarea(info, area);
dlfb_handle_damage(dev, area->dx, area->dy,
area->width, area->height, info->screen_base);
}
static void dlfb_ops_imageblit(struct fb_info *info,
const struct fb_image *image)
{
struct dlfb_data *dev = info->par;
sys_imageblit(info, image);
dlfb_handle_damage(dev, image->dx, image->dy,
image->width, image->height, info->screen_base);
}
static void dlfb_ops_fillrect(struct fb_info *info,
const struct fb_fillrect *rect)
{
struct dlfb_data *dev = info->par;
sys_fillrect(info, rect);
dlfb_handle_damage(dev, rect->dx, rect->dy, rect->width,
rect->height, info->screen_base);
}
/*
* NOTE: fb_defio.c is holding info->fbdefio.mutex
* Touching ANY framebuffer memory that triggers a page fault
* in fb_defio will cause a deadlock, when it also tries to
* grab the same mutex.
*/
static void dlfb_dpy_deferred_io(struct fb_info *info,
struct list_head *pagelist)
{
struct page *cur;
struct fb_deferred_io *fbdefio = info->fbdefio;
struct dlfb_data *dev = info->par;
struct urb *urb;
char *cmd;
cycles_t start_cycles, end_cycles;
int bytes_sent = 0;
int bytes_identical = 0;
int bytes_rendered = 0;
if (!fb_defio)
return;
if (!atomic_read(&dev->usb_active))
return;
start_cycles = get_cycles();
urb = dlfb_get_urb(dev);
if (!urb)
return;
cmd = urb->transfer_buffer;
/* walk the written page list and render each to device */
list_for_each_entry(cur, &fbdefio->pagelist, lru) {
if (dlfb_render_hline(dev, &urb, (char *) info->fix.smem_start,
&cmd, cur->index << PAGE_SHIFT,
PAGE_SIZE, &bytes_identical, &bytes_sent))
goto error;
bytes_rendered += PAGE_SIZE;
}
if (cmd > (char *) urb->transfer_buffer) {
/* Send partial buffer remaining before exiting */
int len = cmd - (char *) urb->transfer_buffer;
dlfb_submit_urb(dev, urb, len);
bytes_sent += len;
} else
dlfb_urb_completion(urb);
error:
atomic_add(bytes_sent, &dev->bytes_sent);
atomic_add(bytes_identical, &dev->bytes_identical);
atomic_add(bytes_rendered, &dev->bytes_rendered);
end_cycles = get_cycles();
atomic_add(((unsigned int) ((end_cycles - start_cycles)
>> 10)), /* Kcycles */
&dev->cpu_kcycles_used);
}
static int dlfb_get_edid(struct dlfb_data *dev, char *edid, int len)
{
int i;
int ret;
char *rbuf;
rbuf = kmalloc(2, GFP_KERNEL);
if (!rbuf)
return 0;
for (i = 0; i < len; i++) {
ret = usb_control_msg(dev->udev,
usb_rcvctrlpipe(dev->udev, 0), (0x02),
(0x80 | (0x02 << 5)), i << 8, 0xA1, rbuf, 2,
HZ);
if (ret < 1) {
pr_err("Read EDID byte %d failed err %x\n", i, ret);
i--;
break;
}
edid[i] = rbuf[1];
}
kfree(rbuf);
return i;
}
static int dlfb_ops_ioctl(struct fb_info *info, unsigned int cmd,
unsigned long arg)
{
struct dlfb_data *dev = info->par;
struct dloarea *area = NULL;
if (!atomic_read(&dev->usb_active))
return 0;
/* TODO: Update X server to get this from sysfs instead */
if (cmd == DLFB_IOCTL_RETURN_EDID) {
char *edid = (char *)arg;
if (copy_to_user(edid, dev->edid, dev->edid_size))
return -EFAULT;
return 0;
}
/* TODO: Help propose a standard fb.h ioctl to report mmap damage */
if (cmd == DLFB_IOCTL_REPORT_DAMAGE) {
/*
* If we have a damage-aware client, turn fb_defio "off"
* To avoid perf imact of unnecessary page fault handling.
* Done by resetting the delay for this fb_info to a very
* long period. Pages will become writable and stay that way.
* Reset to normal value when all clients have closed this fb.
*/
if (info->fbdefio)
info->fbdefio->delay = DL_DEFIO_WRITE_DISABLE;
area = (struct dloarea *)arg;
if (area->x < 0)
area->x = 0;
if (area->x > info->var.xres)
area->x = info->var.xres;
if (area->y < 0)
area->y = 0;
if (area->y > info->var.yres)
area->y = info->var.yres;
dlfb_handle_damage(dev, area->x, area->y, area->w, area->h,
info->screen_base);
}
return 0;
}
/* taken from vesafb */
static int
dlfb_ops_setcolreg(unsigned regno, unsigned red, unsigned green,
unsigned blue, unsigned transp, struct fb_info *info)
{
int err = 0;
if (regno >= info->cmap.len)
return 1;
if (regno < 16) {
if (info->var.red.offset == 10) {
/* 1:5:5:5 */
((u32 *) (info->pseudo_palette))[regno] =
((red & 0xf800) >> 1) |
((green & 0xf800) >> 6) | ((blue & 0xf800) >> 11);
} else {
/* 0:5:6:5 */
((u32 *) (info->pseudo_palette))[regno] =
((red & 0xf800)) |
((green & 0xfc00) >> 5) | ((blue & 0xf800) >> 11);
}
}
return err;
}
/*
* It's common for several clients to have framebuffer open simultaneously.
* e.g. both fbcon and X. Makes things interesting.
* Assumes caller is holding info->lock (for open and release at least)
*/
static int dlfb_ops_open(struct fb_info *info, int user)
{
struct dlfb_data *dev = info->par;
/*
* fbcon aggressively connects to first framebuffer it finds,
* preventing other clients (X) from working properly. Usually
* not what the user wants. Fail by default with option to enable.
*/
if ((user == 0) & (!console))
return -EBUSY;
/* If the USB device is gone, we don't accept new opens */
if (dev->virtualized)
return -ENODEV;
dev->fb_count++;
kref_get(&dev->kref);
if (fb_defio && (info->fbdefio == NULL)) {
/* enable defio at last moment if not disabled by client */
struct fb_deferred_io *fbdefio;
fbdefio = kmalloc(sizeof(struct fb_deferred_io), GFP_KERNEL);
if (fbdefio) {
fbdefio->delay = DL_DEFIO_WRITE_DELAY;
fbdefio->deferred_io = dlfb_dpy_deferred_io;
}
info->fbdefio = fbdefio;
fb_deferred_io_init(info);
}
pr_notice("open /dev/fb%d user=%d fb_info=%p count=%d\n",
info->node, user, info, dev->fb_count);
return 0;
}
/*
* Called when all client interfaces to start transactions have been disabled,
* and all references to our device instance (dlfb_data) are released.
* Every transaction must have a reference, so we know are fully spun down
*/
static void dlfb_free(struct kref *kref)
{
struct dlfb_data *dev = container_of(kref, struct dlfb_data, kref);
/* this function will wait for all in-flight urbs to complete */
if (dev->urbs.count > 0)
dlfb_free_urb_list(dev);
if (dev->backing_buffer)
vfree(dev->backing_buffer);
kfree(dev->edid);
pr_warn("freeing dlfb_data %p\n", dev);
kfree(dev);
}
static void dlfb_release_urb_work(struct work_struct *work)
{
struct urb_node *unode = container_of(work, struct urb_node,
release_urb_work.work);
up(&unode->dev->urbs.limit_sem);
}
static void dlfb_free_framebuffer_work(struct work_struct *work)
{
struct dlfb_data *dev = container_of(work, struct dlfb_data,
free_framebuffer_work.work);
struct fb_info *info = dev->info;
int node = info->node;
unregister_framebuffer(info);
if (info->cmap.len != 0)
fb_dealloc_cmap(&info->cmap);
if (info->monspecs.modedb)
fb_destroy_modedb(info->monspecs.modedb);
if (info->screen_base)
vfree(info->screen_base);
fb_destroy_modelist(&info->modelist);
dev->info = 0;
/* Assume info structure is freed after this point */
framebuffer_release(info);
pr_warn("fb_info for /dev/fb%d has been freed\n", node);
/* ref taken in probe() as part of registering framebfufer */
kref_put(&dev->kref, dlfb_free);
}
/*
* Assumes caller is holding info->lock mutex (for open and release at least)
*/
static int dlfb_ops_release(struct fb_info *info, int user)
{
struct dlfb_data *dev = info->par;
dev->fb_count--;
/* We can't free fb_info here - fbmem will touch it when we return */
if (dev->virtualized && (dev->fb_count == 0))
schedule_delayed_work(&dev->free_framebuffer_work, HZ);
if ((dev->fb_count == 0) && (info->fbdefio)) {
fb_deferred_io_cleanup(info);
kfree(info->fbdefio);
info->fbdefio = NULL;
info->fbops->fb_mmap = dlfb_ops_mmap;
}
pr_warn("released /dev/fb%d user=%d count=%d\n",
info->node, user, dev->fb_count);
kref_put(&dev->kref, dlfb_free);
return 0;
}
/*
* Check whether a video mode is supported by the DisplayLink chip
* We start from monitor's modes, so don't need to filter that here
*/
static int dlfb_is_valid_mode(struct fb_videomode *mode,
struct fb_info *info)
{
struct dlfb_data *dev = info->par;
if (mode->xres * mode->yres > dev->sku_pixel_limit) {
pr_warn("%dx%d beyond chip capabilities\n",
mode->xres, mode->yres);
return 0;
}
pr_info("%dx%d valid mode\n", mode->xres, mode->yres);
return 1;
}
static void dlfb_var_color_format(struct fb_var_screeninfo *var)
{
const struct fb_bitfield red = { 11, 5, 0 };
const struct fb_bitfield green = { 5, 6, 0 };
const struct fb_bitfield blue = { 0, 5, 0 };
var->bits_per_pixel = 16;
var->red = red;
var->green = green;
var->blue = blue;
}
static int dlfb_ops_check_var(struct fb_var_screeninfo *var,
struct fb_info *info)
{
struct fb_videomode mode;
/* TODO: support dynamically changing framebuffer size */
if ((var->xres * var->yres * 2) > info->fix.smem_len)
return -EINVAL;
/* set device-specific elements of var unrelated to mode */
dlfb_var_color_format(var);
fb_var_to_videomode(&mode, var);
if (!dlfb_is_valid_mode(&mode, info))
return -EINVAL;
return 0;
}
static int dlfb_ops_set_par(struct fb_info *info)
{
struct dlfb_data *dev = info->par;
int result;
u16 *pix_framebuffer;
int i;
pr_notice("set_par mode %dx%d\n", info->var.xres, info->var.yres);
result = dlfb_set_video_mode(dev, &info->var);
if ((result == 0) && (dev->fb_count == 0)) {
/* paint greenscreen */
pix_framebuffer = (u16 *) info->screen_base;
for (i = 0; i < info->fix.smem_len / 2; i++)
pix_framebuffer[i] = 0x37e6;
dlfb_handle_damage(dev, 0, 0, info->var.xres, info->var.yres,
info->screen_base);
}
return result;
}
/*
* In order to come back from full DPMS off, we need to set the mode again
*/
static int dlfb_ops_blank(int blank_mode, struct fb_info *info)
{
struct dlfb_data *dev = info->par;
if (blank_mode != FB_BLANK_UNBLANK) {
char *bufptr;
struct urb *urb;
urb = dlfb_get_urb(dev);
if (!urb)
return 0;
bufptr = (char *) urb->transfer_buffer;
bufptr = dlfb_vidreg_lock(bufptr);
bufptr = dlfb_enable_hvsync(bufptr, false);
bufptr = dlfb_vidreg_unlock(bufptr);
dlfb_submit_urb(dev, urb, bufptr -
(char *) urb->transfer_buffer);
} else {
dlfb_set_video_mode(dev, &info->var);
}
return 0;
}
static struct fb_ops dlfb_ops = {
.owner = THIS_MODULE,
.fb_read = fb_sys_read,
.fb_write = dlfb_ops_write,
.fb_setcolreg = dlfb_ops_setcolreg,
.fb_fillrect = dlfb_ops_fillrect,
.fb_copyarea = dlfb_ops_copyarea,
.fb_imageblit = dlfb_ops_imageblit,
.fb_mmap = dlfb_ops_mmap,
.fb_ioctl = dlfb_ops_ioctl,
.fb_open = dlfb_ops_open,
.fb_release = dlfb_ops_release,
.fb_blank = dlfb_ops_blank,
.fb_check_var = dlfb_ops_check_var,
.fb_set_par = dlfb_ops_set_par,
};
/*
* Assumes &info->lock held by caller
* Assumes no active clients have framebuffer open
*/
static int dlfb_realloc_framebuffer(struct dlfb_data *dev, struct fb_info *info)
{
int retval = -ENOMEM;
int old_len = info->fix.smem_len;
int new_len;
unsigned char *old_fb = info->screen_base;
unsigned char *new_fb;
unsigned char *new_back;
pr_warn("Reallocating framebuffer. Addresses will change!\n");
new_len = info->fix.line_length * info->var.yres;
if (PAGE_ALIGN(new_len) > old_len) {
/*
* Alloc system memory for virtual framebuffer
*/
new_fb = vmalloc(new_len);
if (!new_fb) {
pr_err("Virtual framebuffer alloc failed\n");
goto error;
}
if (info->screen_base) {
memcpy(new_fb, old_fb, old_len);
vfree(info->screen_base);
}
info->screen_base = new_fb;
info->fix.smem_len = PAGE_ALIGN(new_len);
info->fix.smem_start = (unsigned long) new_fb;
info->flags = udlfb_info_flags;
/*
* Second framebuffer copy to mirror the framebuffer state
* on the physical USB device. We can function without this.
* But with imperfect damage info we may send pixels over USB
* that were, in fact, unchanged - wasting limited USB bandwidth
*/
new_back = vzalloc(new_len);
if (!new_back)
pr_info("No shadow/backing buffer allocated\n");
else {
if (dev->backing_buffer)
vfree(dev->backing_buffer);
dev->backing_buffer = new_back;
}
}
retval = 0;
error:
return retval;
}
/*
* 1) Get EDID from hw, or use sw default
* 2) Parse into various fb_info structs
* 3) Allocate virtual framebuffer memory to back highest res mode
*
* Parses EDID into three places used by various parts of fbdev:
* fb_var_screeninfo contains the timing of the monitor's preferred mode
* fb_info.monspecs is full parsed EDID info, including monspecs.modedb
* fb_info.modelist is a linked list of all monitor & VESA modes which work
*
* If EDID is not readable/valid, then modelist is all VESA modes,
* monspecs is NULL, and fb_var_screeninfo is set to safe VESA mode
* Returns 0 if successful
*/
static int dlfb_setup_modes(struct dlfb_data *dev,
struct fb_info *info,
char *default_edid, size_t default_edid_size)
{
int i;
const struct fb_videomode *default_vmode = NULL;
int result = 0;
char *edid;
int tries = 3;
if (info->dev) /* only use mutex if info has been registered */
mutex_lock(&info->lock);
edid = kmalloc(EDID_LENGTH, GFP_KERNEL);
if (!edid) {
result = -ENOMEM;
goto error;
}
fb_destroy_modelist(&info->modelist);
memset(&info->monspecs, 0, sizeof(info->monspecs));
/*
* Try to (re)read EDID from hardware first
* EDID data may return, but not parse as valid
* Try again a few times, in case of e.g. analog cable noise
*/
while (tries--) {
i = dlfb_get_edid(dev, edid, EDID_LENGTH);
if (i >= EDID_LENGTH)
fb_edid_to_monspecs(edid, &info->monspecs);
if (info->monspecs.modedb_len > 0) {
dev->edid = edid;
dev->edid_size = i;
break;
}
}
/* If that fails, use a previously returned EDID if available */
if (info->monspecs.modedb_len == 0) {
pr_err("Unable to get valid EDID from device/display\n");
if (dev->edid) {
fb_edid_to_monspecs(dev->edid, &info->monspecs);
if (info->monspecs.modedb_len > 0)
pr_err("Using previously queried EDID\n");
}
}
/* If that fails, use the default EDID we were handed */
if (info->monspecs.modedb_len == 0) {
if (default_edid_size >= EDID_LENGTH) {
fb_edid_to_monspecs(default_edid, &info->monspecs);
if (info->monspecs.modedb_len > 0) {
memcpy(edid, default_edid, default_edid_size);
dev->edid = edid;
dev->edid_size = default_edid_size;
pr_err("Using default/backup EDID\n");
}
}
}
/* If we've got modes, let's pick a best default mode */
if (info->monspecs.modedb_len > 0) {
for (i = 0; i < info->monspecs.modedb_len; i++) {
if (dlfb_is_valid_mode(&info->monspecs.modedb[i], info))
fb_add_videomode(&info->monspecs.modedb[i],
&info->modelist);
else {
if (i == 0)
/* if we've removed top/best mode */
info->monspecs.misc
&= ~FB_MISC_1ST_DETAIL;
}
}
default_vmode = fb_find_best_display(&info->monspecs,
&info->modelist);
}
/* If everything else has failed, fall back to safe default mode */
if (default_vmode == NULL) {
struct fb_videomode fb_vmode = {0};
/*
* Add the standard VESA modes to our modelist
* Since we don't have EDID, there may be modes that
* overspec monitor and/or are incorrect aspect ratio, etc.
* But at least the user has a chance to choose
*/
for (i = 0; i < VESA_MODEDB_SIZE; i++) {
if (dlfb_is_valid_mode((struct fb_videomode *)
&vesa_modes[i], info))
fb_add_videomode(&vesa_modes[i],
&info->modelist);
}
/*
* default to resolution safe for projectors
* (since they are most common case without EDID)
*/
fb_vmode.xres = 800;
fb_vmode.yres = 600;
fb_vmode.refresh = 60;
default_vmode = fb_find_nearest_mode(&fb_vmode,
&info->modelist);
}
/* If we have good mode and no active clients*/
if ((default_vmode != NULL) && (dev->fb_count == 0)) {
fb_videomode_to_var(&info->var, default_vmode);
dlfb_var_color_format(&info->var);
/*
* with mode size info, we can now alloc our framebuffer.
*/
memcpy(&info->fix, &dlfb_fix, sizeof(dlfb_fix));
info->fix.line_length = info->var.xres *
(info->var.bits_per_pixel / 8);
result = dlfb_realloc_framebuffer(dev, info);
} else
result = -EINVAL;
error:
if (edid && (dev->edid != edid))
kfree(edid);
if (info->dev)
mutex_unlock(&info->lock);
return result;
}
static ssize_t metrics_bytes_rendered_show(struct device *fbdev,
struct device_attribute *a, char *buf) {
struct fb_info *fb_info = dev_get_drvdata(fbdev);
struct dlfb_data *dev = fb_info->par;
return snprintf(buf, PAGE_SIZE, "%u\n",
atomic_read(&dev->bytes_rendered));
}
static ssize_t metrics_bytes_identical_show(struct device *fbdev,
struct device_attribute *a, char *buf) {
struct fb_info *fb_info = dev_get_drvdata(fbdev);
struct dlfb_data *dev = fb_info->par;
return snprintf(buf, PAGE_SIZE, "%u\n",
atomic_read(&dev->bytes_identical));
}
static ssize_t metrics_bytes_sent_show(struct device *fbdev,
struct device_attribute *a, char *buf) {
struct fb_info *fb_info = dev_get_drvdata(fbdev);
struct dlfb_data *dev = fb_info->par;
return snprintf(buf, PAGE_SIZE, "%u\n",
atomic_read(&dev->bytes_sent));
}
static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
struct device_attribute *a, char *buf) {
struct fb_info *fb_info = dev_get_drvdata(fbdev);
struct dlfb_data *dev = fb_info->par;
return snprintf(buf, PAGE_SIZE, "%u\n",
atomic_read(&dev->cpu_kcycles_used));
}
static ssize_t edid_show(
struct file *filp,
struct kobject *kobj, struct bin_attribute *a,
char *buf, loff_t off, size_t count) {
struct device *fbdev = container_of(kobj, struct device, kobj);
struct fb_info *fb_info = dev_get_drvdata(fbdev);
struct dlfb_data *dev = fb_info->par;
if (dev->edid == NULL)
return 0;
if ((off >= dev->edid_size) || (count > dev->edid_size))
return 0;
if (off + count > dev->edid_size)
count = dev->edid_size - off;
pr_info("sysfs edid copy %p to %p, %d bytes\n",
dev->edid, buf, (int) count);
memcpy(buf, dev->edid, count);
return count;
}
static ssize_t edid_store(
struct file *filp,
struct kobject *kobj, struct bin_attribute *a,
char *src, loff_t src_off, size_t src_size) {
struct device *fbdev = container_of(kobj, struct device, kobj);
struct fb_info *fb_info = dev_get_drvdata(fbdev);
struct dlfb_data *dev = fb_info->par;
/* We only support write of entire EDID at once, no offset*/
if ((src_size != EDID_LENGTH) || (src_off != 0))
return 0;
dlfb_setup_modes(dev, fb_info, src, src_size);
if (dev->edid && (memcmp(src, dev->edid, src_size) == 0)) {
pr_info("sysfs written EDID is new default\n");
dlfb_ops_set_par(fb_info);
return src_size;
} else
return 0;
}
static ssize_t metrics_reset_store(struct device *fbdev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct fb_info *fb_info = dev_get_drvdata(fbdev);
struct dlfb_data *dev = fb_info->par;
atomic_set(&dev->bytes_rendered, 0);
atomic_set(&dev->bytes_identical, 0);
atomic_set(&dev->bytes_sent, 0);
atomic_set(&dev->cpu_kcycles_used, 0);
return count;
}
static struct bin_attribute edid_attr = {
.attr.name = "edid",
.attr.mode = 0666,
.size = EDID_LENGTH,
.read = edid_show,
.write = edid_store
};
static struct device_attribute fb_device_attrs[] = {
__ATTR_RO(metrics_bytes_rendered),
__ATTR_RO(metrics_bytes_identical),
__ATTR_RO(metrics_bytes_sent),
__ATTR_RO(metrics_cpu_kcycles_used),
__ATTR(metrics_reset, S_IWUSR, NULL, metrics_reset_store),
};
/*
* This is necessary before we can communicate with the display controller.
*/
static int dlfb_select_std_channel(struct dlfb_data *dev)
{
int ret;
u8 set_def_chn[] = { 0x57, 0xCD, 0xDC, 0xA7,
0x1C, 0x88, 0x5E, 0x15,
0x60, 0xFE, 0xC6, 0x97,
0x16, 0x3D, 0x47, 0xF2 };
ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
NR_USB_REQUEST_CHANNEL,
(USB_DIR_OUT | USB_TYPE_VENDOR), 0, 0,
set_def_chn, sizeof(set_def_chn), USB_CTRL_SET_TIMEOUT);
return ret;
}
static int dlfb_parse_vendor_descriptor(struct dlfb_data *dev,
struct usb_device *usbdev)
{
char *desc;
char *buf;
char *desc_end;
u8 total_len = 0;
buf = kzalloc(MAX_VENDOR_DESCRIPTOR_SIZE, GFP_KERNEL);
if (!buf)
return false;
desc = buf;
total_len = usb_get_descriptor(usbdev, 0x5f, /* vendor specific */
0, desc, MAX_VENDOR_DESCRIPTOR_SIZE);
if (total_len > 5) {
pr_info("vendor descriptor length:%x data:%02x %02x %02x %02x" \
"%02x %02x %02x %02x %02x %02x %02x\n",
total_len, desc[0],
desc[1], desc[2], desc[3], desc[4], desc[5], desc[6],
desc[7], desc[8], desc[9], desc[10]);
if ((desc[0] != total_len) || /* descriptor length */
(desc[1] != 0x5f) || /* vendor descriptor type */
(desc[2] != 0x01) || /* version (2 bytes) */
(desc[3] != 0x00) ||
(desc[4] != total_len - 2)) /* length after type */
goto unrecognized;
desc_end = desc + total_len;
desc += 5; /* the fixed header we've already parsed */
while (desc < desc_end) {
u8 length;
u16 key;
key = *((u16 *) desc);
desc += sizeof(u16);
length = *desc;
desc++;
switch (key) {
case 0x0200: { /* max_area */
u32 max_area;
max_area = le32_to_cpu(*((u32 *)desc));
pr_warn("DL chip limited to %d pixel modes\n",
max_area);
dev->sku_pixel_limit = max_area;
break;
}
default:
break;
}
desc += length;
}
}
goto success;
unrecognized:
/* allow udlfb to load for now even if firmware unrecognized */
pr_err("Unrecognized vendor firmware descriptor\n");
success:
kfree(buf);
return true;
}
static int dlfb_usb_probe(struct usb_interface *interface,
const struct usb_device_id *id)
{
struct usb_device *usbdev;
struct dlfb_data *dev = 0;
struct fb_info *info = 0;
int retval = -ENOMEM;
int i;
/* usb initialization */
usbdev = interface_to_usbdev(interface);
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (dev == NULL) {
err("dlfb_usb_probe: failed alloc of dev struct\n");
goto error;
}
/* we need to wait for both usb and fbdev to spin down on disconnect */
kref_init(&dev->kref); /* matching kref_put in usb .disconnect fn */
kref_get(&dev->kref); /* matching kref_put in free_framebuffer_work */
dev->udev = usbdev;
dev->gdev = &usbdev->dev; /* our generic struct device * */
usb_set_intfdata(interface, dev);
pr_info("%s %s - serial #%s\n",
usbdev->manufacturer, usbdev->product, usbdev->serial);
pr_info("vid_%04x&pid_%04x&rev_%04x driver's dlfb_data struct at %p\n",
usbdev->descriptor.idVendor, usbdev->descriptor.idProduct,
usbdev->descriptor.bcdDevice, dev);
pr_info("console enable=%d\n", console);
pr_info("fb_defio enable=%d\n", fb_defio);
dev->sku_pixel_limit = 2048 * 1152; /* default to maximum */
if (!dlfb_parse_vendor_descriptor(dev, usbdev)) {
pr_err("firmware not recognized. Assume incompatible device\n");
goto error;
}
if (!dlfb_alloc_urb_list(dev, WRITES_IN_FLIGHT, MAX_TRANSFER)) {
retval = -ENOMEM;
pr_err("dlfb_alloc_urb_list failed\n");
goto error;
}
/* We don't register a new USB class. Our client interface is fbdev */
/* allocates framebuffer driver structure, not framebuffer memory */
info = framebuffer_alloc(0, &usbdev->dev);
if (!info) {
retval = -ENOMEM;
pr_err("framebuffer_alloc failed\n");
goto error;
}
dev->info = info;
info->par = dev;
info->pseudo_palette = dev->pseudo_palette;
info->fbops = &dlfb_ops;
retval = fb_alloc_cmap(&info->cmap, 256, 0);
if (retval < 0) {
pr_err("fb_alloc_cmap failed %x\n", retval);
goto error;
}
INIT_DELAYED_WORK(&dev->free_framebuffer_work,
dlfb_free_framebuffer_work);
INIT_LIST_HEAD(&info->modelist);
retval = dlfb_setup_modes(dev, info, NULL, 0);
if (retval != 0) {
pr_err("unable to find common mode for display and adapter\n");
goto error;
}
/* ready to begin using device */
atomic_set(&dev->usb_active, 1);
dlfb_select_std_channel(dev);
dlfb_ops_check_var(&info->var, info);
dlfb_ops_set_par(info);
retval = register_framebuffer(info);
if (retval < 0) {
pr_err("register_framebuffer failed %d\n", retval);
goto error;
}
for (i = 0; i < ARRAY_SIZE(fb_device_attrs); i++) {
retval = device_create_file(info->dev, &fb_device_attrs[i]);
if (retval) {
pr_err("device_create_file failed %d\n", retval);
goto err_del_attrs;
}
}
retval = device_create_bin_file(info->dev, &edid_attr);
if (retval) {
pr_err("device_create_bin_file failed %d\n", retval);
goto err_del_attrs;
}
pr_info("DisplayLink USB device /dev/fb%d attached. %dx%d resolution."
" Using %dK framebuffer memory\n", info->node,
info->var.xres, info->var.yres,
((dev->backing_buffer) ?
info->fix.smem_len * 2 : info->fix.smem_len) >> 10);
return 0;
err_del_attrs:
for (i -= 1; i >= 0; i--)
device_remove_file(info->dev, &fb_device_attrs[i]);
error:
if (dev) {
if (info) {
if (info->cmap.len != 0)
fb_dealloc_cmap(&info->cmap);
if (info->monspecs.modedb)
fb_destroy_modedb(info->monspecs.modedb);
if (info->screen_base)
vfree(info->screen_base);
fb_destroy_modelist(&info->modelist);
framebuffer_release(info);
}
if (dev->backing_buffer)
vfree(dev->backing_buffer);
kref_put(&dev->kref, dlfb_free); /* ref for framebuffer */
kref_put(&dev->kref, dlfb_free); /* last ref from kref_init */
/* dev has been deallocated. Do not dereference */
}
return retval;
}
static void dlfb_usb_disconnect(struct usb_interface *interface)
{
struct dlfb_data *dev;
struct fb_info *info;
int i;
dev = usb_get_intfdata(interface);
info = dev->info;
pr_info("USB disconnect starting\n");
/* we virtualize until all fb clients release. Then we free */
dev->virtualized = true;
/* When non-active we'll update virtual framebuffer, but no new urbs */
atomic_set(&dev->usb_active, 0);
/* remove udlfb's sysfs interfaces */
for (i = 0; i < ARRAY_SIZE(fb_device_attrs); i++)
device_remove_file(info->dev, &fb_device_attrs[i]);
device_remove_bin_file(info->dev, &edid_attr);
unlink_framebuffer(info);
usb_set_intfdata(interface, NULL);
/* if clients still have us open, will be freed on last close */
if (dev->fb_count == 0)
schedule_delayed_work(&dev->free_framebuffer_work, 0);
/* release reference taken by kref_init in probe() */
kref_put(&dev->kref, dlfb_free);
/* consider dlfb_data freed */
return;
}
static struct usb_driver dlfb_driver = {
.name = "udlfb",
.probe = dlfb_usb_probe,
.disconnect = dlfb_usb_disconnect,
.id_table = id_table,
};
static int __init dlfb_module_init(void)
{
int res;
res = usb_register(&dlfb_driver);
if (res)
err("usb_register failed. Error number %d", res);
return res;
}
static void __exit dlfb_module_exit(void)
{
usb_deregister(&dlfb_driver);
}
module_init(dlfb_module_init);
module_exit(dlfb_module_exit);
static void dlfb_urb_completion(struct urb *urb)
{
struct urb_node *unode = urb->context;
struct dlfb_data *dev = unode->dev;
unsigned long flags;
/* sync/async unlink faults aren't errors */
if (urb->status) {
if (!(urb->status == -ENOENT ||
urb->status == -ECONNRESET ||
urb->status == -ESHUTDOWN)) {
pr_err("%s - nonzero write bulk status received: %d\n",
__func__, urb->status);
atomic_set(&dev->lost_pixels, 1);
}
}
urb->transfer_buffer_length = dev->urbs.size; /* reset to actual */
spin_lock_irqsave(&dev->urbs.lock, flags);
list_add_tail(&unode->entry, &dev->urbs.list);
dev->urbs.available++;
spin_unlock_irqrestore(&dev->urbs.lock, flags);
/*
* When using fb_defio, we deadlock if up() is called
* while another is waiting. So queue to another process.
*/
if (fb_defio)
schedule_delayed_work(&unode->release_urb_work, 0);
else
up(&dev->urbs.limit_sem);
}
static void dlfb_free_urb_list(struct dlfb_data *dev)
{
int count = dev->urbs.count;
struct list_head *node;
struct urb_node *unode;
struct urb *urb;
int ret;
unsigned long flags;
pr_notice("Waiting for completes and freeing all render urbs\n");
/* keep waiting and freeing, until we've got 'em all */
while (count--) {
/* Getting interrupted means a leak, but ok at shutdown*/
ret = down_interruptible(&dev->urbs.limit_sem);
if (ret)
break;
spin_lock_irqsave(&dev->urbs.lock, flags);
node = dev->urbs.list.next; /* have reserved one with sem */
list_del_init(node);
spin_unlock_irqrestore(&dev->urbs.lock, flags);
unode = list_entry(node, struct urb_node, entry);
urb = unode->urb;
/* Free each separately allocated piece */
usb_free_coherent(urb->dev, dev->urbs.size,
urb->transfer_buffer, urb->transfer_dma);
usb_free_urb(urb);
kfree(node);
}
}
static int dlfb_alloc_urb_list(struct dlfb_data *dev, int count, size_t size)
{
int i = 0;
struct urb *urb;
struct urb_node *unode;
char *buf;
spin_lock_init(&dev->urbs.lock);
dev->urbs.size = size;
INIT_LIST_HEAD(&dev->urbs.list);
while (i < count) {
unode = kzalloc(sizeof(struct urb_node), GFP_KERNEL);
if (!unode)
break;
unode->dev = dev;
INIT_DELAYED_WORK(&unode->release_urb_work,
dlfb_release_urb_work);
urb = usb_alloc_urb(0, GFP_KERNEL);
if (!urb) {
kfree(unode);
break;
}
unode->urb = urb;
buf = usb_alloc_coherent(dev->udev, MAX_TRANSFER, GFP_KERNEL,
&urb->transfer_dma);
if (!buf) {
kfree(unode);
usb_free_urb(urb);
break;
}
/* urb->transfer_buffer_length set to actual before submit */
usb_fill_bulk_urb(urb, dev->udev, usb_sndbulkpipe(dev->udev, 1),
buf, size, dlfb_urb_completion, unode);
urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
list_add_tail(&unode->entry, &dev->urbs.list);
i++;
}
sema_init(&dev->urbs.limit_sem, i);
dev->urbs.count = i;
dev->urbs.available = i;
pr_notice("allocated %d %d byte urbs\n", i, (int) size);
return i;
}
static struct urb *dlfb_get_urb(struct dlfb_data *dev)
{
int ret = 0;
struct list_head *entry;
struct urb_node *unode;
struct urb *urb = NULL;
unsigned long flags;
/* Wait for an in-flight buffer to complete and get re-queued */
ret = down_timeout(&dev->urbs.limit_sem, GET_URB_TIMEOUT);
if (ret) {
atomic_set(&dev->lost_pixels, 1);
pr_warn("wait for urb interrupted: %x available: %d\n",
ret, dev->urbs.available);
goto error;
}
spin_lock_irqsave(&dev->urbs.lock, flags);
BUG_ON(list_empty(&dev->urbs.list)); /* reserved one with limit_sem */
entry = dev->urbs.list.next;
list_del_init(entry);
dev->urbs.available--;
spin_unlock_irqrestore(&dev->urbs.lock, flags);
unode = list_entry(entry, struct urb_node, entry);
urb = unode->urb;
error:
return urb;
}
static int dlfb_submit_urb(struct dlfb_data *dev, struct urb *urb, size_t len)
{
int ret;
BUG_ON(len > dev->urbs.size);
urb->transfer_buffer_length = len; /* set to actual payload len */
ret = usb_submit_urb(urb, GFP_KERNEL);
if (ret) {
dlfb_urb_completion(urb); /* because no one else will */
atomic_set(&dev->lost_pixels, 1);
pr_err("usb_submit_urb error %x\n", ret);
}
return ret;
}
module_param(console, bool, S_IWUSR | S_IRUSR | S_IWGRP | S_IRGRP);
MODULE_PARM_DESC(console, "Allow fbcon to consume first framebuffer found");
module_param(fb_defio, bool, S_IWUSR | S_IRUSR | S_IWGRP | S_IRGRP);
MODULE_PARM_DESC(fb_defio, "Enable fb_defio mmap support. *Experimental*");
MODULE_AUTHOR("Roberto De Ioris <roberto@unbit.it>, "
"Jaya Kumar <jayakumar.lkml@gmail.com>, "
"Bernie Thompson <bernie@plugable.com>");
MODULE_DESCRIPTION("DisplayLink kernel framebuffer driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
moresushant48/android_kernel_cyanogen_msm8916 | arch/s390/kernel/crash_dump.c | 1943 | 10711 | /*
* S390 kdump implementation
*
* Copyright IBM Corp. 2011
* Author(s): Michael Holzheu <holzheu@linux.vnet.ibm.com>
*/
#include <linux/crash_dump.h>
#include <asm/lowcore.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/gfp.h>
#include <linux/slab.h>
#include <linux/bootmem.h>
#include <linux/elf.h>
#include <asm/os_info.h>
#include <asm/elf.h>
#include <asm/ipl.h>
#define PTR_ADD(x, y) (((char *) (x)) + ((unsigned long) (y)))
#define PTR_SUB(x, y) (((char *) (x)) - ((unsigned long) (y)))
#define PTR_DIFF(x, y) ((unsigned long)(((char *) (x)) - ((unsigned long) (y))))
/*
* Copy one page from "oldmem"
*
* For the kdump reserved memory this functions performs a swap operation:
* - [OLDMEM_BASE - OLDMEM_BASE + OLDMEM_SIZE] is mapped to [0 - OLDMEM_SIZE].
* - [0 - OLDMEM_SIZE] is mapped to [OLDMEM_BASE - OLDMEM_BASE + OLDMEM_SIZE]
*/
ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
size_t csize, unsigned long offset, int userbuf)
{
unsigned long src;
if (!csize)
return 0;
src = (pfn << PAGE_SHIFT) + offset;
if (src < OLDMEM_SIZE)
src += OLDMEM_BASE;
else if (src > OLDMEM_BASE &&
src < OLDMEM_BASE + OLDMEM_SIZE)
src -= OLDMEM_BASE;
if (userbuf)
copy_to_user_real((void __force __user *) buf, (void *) src,
csize);
else
memcpy_real(buf, (void *) src, csize);
return csize;
}
/*
* Copy memory from old kernel
*/
int copy_from_oldmem(void *dest, void *src, size_t count)
{
unsigned long copied = 0;
int rc;
if ((unsigned long) src < OLDMEM_SIZE) {
copied = min(count, OLDMEM_SIZE - (unsigned long) src);
rc = memcpy_real(dest, src + OLDMEM_BASE, copied);
if (rc)
return rc;
}
return memcpy_real(dest + copied, src + copied, count - copied);
}
/*
* Alloc memory and panic in case of ENOMEM
*/
static void *kzalloc_panic(int len)
{
void *rc;
rc = kzalloc(len, GFP_KERNEL);
if (!rc)
panic("s390 kdump kzalloc (%d) failed", len);
return rc;
}
/*
* Get memory layout and create hole for oldmem
*/
static struct mem_chunk *get_memory_layout(void)
{
struct mem_chunk *chunk_array;
chunk_array = kzalloc_panic(MEMORY_CHUNKS * sizeof(struct mem_chunk));
detect_memory_layout(chunk_array, 0);
create_mem_hole(chunk_array, OLDMEM_BASE, OLDMEM_SIZE);
return chunk_array;
}
/*
* Initialize ELF note
*/
static void *nt_init(void *buf, Elf64_Word type, void *desc, int d_len,
const char *name)
{
Elf64_Nhdr *note;
u64 len;
note = (Elf64_Nhdr *)buf;
note->n_namesz = strlen(name) + 1;
note->n_descsz = d_len;
note->n_type = type;
len = sizeof(Elf64_Nhdr);
memcpy(buf + len, name, note->n_namesz);
len = roundup(len + note->n_namesz, 4);
memcpy(buf + len, desc, note->n_descsz);
len = roundup(len + note->n_descsz, 4);
return PTR_ADD(buf, len);
}
/*
* Initialize prstatus note
*/
static void *nt_prstatus(void *ptr, struct save_area *sa)
{
struct elf_prstatus nt_prstatus;
static int cpu_nr = 1;
memset(&nt_prstatus, 0, sizeof(nt_prstatus));
memcpy(&nt_prstatus.pr_reg.gprs, sa->gp_regs, sizeof(sa->gp_regs));
memcpy(&nt_prstatus.pr_reg.psw, sa->psw, sizeof(sa->psw));
memcpy(&nt_prstatus.pr_reg.acrs, sa->acc_regs, sizeof(sa->acc_regs));
nt_prstatus.pr_pid = cpu_nr;
cpu_nr++;
return nt_init(ptr, NT_PRSTATUS, &nt_prstatus, sizeof(nt_prstatus),
"CORE");
}
/*
* Initialize fpregset (floating point) note
*/
static void *nt_fpregset(void *ptr, struct save_area *sa)
{
elf_fpregset_t nt_fpregset;
memset(&nt_fpregset, 0, sizeof(nt_fpregset));
memcpy(&nt_fpregset.fpc, &sa->fp_ctrl_reg, sizeof(sa->fp_ctrl_reg));
memcpy(&nt_fpregset.fprs, &sa->fp_regs, sizeof(sa->fp_regs));
return nt_init(ptr, NT_PRFPREG, &nt_fpregset, sizeof(nt_fpregset),
"CORE");
}
/*
* Initialize timer note
*/
static void *nt_s390_timer(void *ptr, struct save_area *sa)
{
return nt_init(ptr, NT_S390_TIMER, &sa->timer, sizeof(sa->timer),
KEXEC_CORE_NOTE_NAME);
}
/*
* Initialize TOD clock comparator note
*/
static void *nt_s390_tod_cmp(void *ptr, struct save_area *sa)
{
return nt_init(ptr, NT_S390_TODCMP, &sa->clk_cmp,
sizeof(sa->clk_cmp), KEXEC_CORE_NOTE_NAME);
}
/*
* Initialize TOD programmable register note
*/
static void *nt_s390_tod_preg(void *ptr, struct save_area *sa)
{
return nt_init(ptr, NT_S390_TODPREG, &sa->tod_reg,
sizeof(sa->tod_reg), KEXEC_CORE_NOTE_NAME);
}
/*
* Initialize control register note
*/
static void *nt_s390_ctrs(void *ptr, struct save_area *sa)
{
return nt_init(ptr, NT_S390_CTRS, &sa->ctrl_regs,
sizeof(sa->ctrl_regs), KEXEC_CORE_NOTE_NAME);
}
/*
* Initialize prefix register note
*/
static void *nt_s390_prefix(void *ptr, struct save_area *sa)
{
return nt_init(ptr, NT_S390_PREFIX, &sa->pref_reg,
sizeof(sa->pref_reg), KEXEC_CORE_NOTE_NAME);
}
/*
* Fill ELF notes for one CPU with save area registers
*/
void *fill_cpu_elf_notes(void *ptr, struct save_area *sa)
{
ptr = nt_prstatus(ptr, sa);
ptr = nt_fpregset(ptr, sa);
ptr = nt_s390_timer(ptr, sa);
ptr = nt_s390_tod_cmp(ptr, sa);
ptr = nt_s390_tod_preg(ptr, sa);
ptr = nt_s390_ctrs(ptr, sa);
ptr = nt_s390_prefix(ptr, sa);
return ptr;
}
/*
* Initialize prpsinfo note (new kernel)
*/
static void *nt_prpsinfo(void *ptr)
{
struct elf_prpsinfo prpsinfo;
memset(&prpsinfo, 0, sizeof(prpsinfo));
prpsinfo.pr_sname = 'R';
strcpy(prpsinfo.pr_fname, "vmlinux");
return nt_init(ptr, NT_PRPSINFO, &prpsinfo, sizeof(prpsinfo),
KEXEC_CORE_NOTE_NAME);
}
/*
* Get vmcoreinfo using lowcore->vmcore_info (new kernel)
*/
static void *get_vmcoreinfo_old(unsigned long *size)
{
char nt_name[11], *vmcoreinfo;
Elf64_Nhdr note;
void *addr;
if (copy_from_oldmem(&addr, &S390_lowcore.vmcore_info, sizeof(addr)))
return NULL;
memset(nt_name, 0, sizeof(nt_name));
if (copy_from_oldmem(¬e, addr, sizeof(note)))
return NULL;
if (copy_from_oldmem(nt_name, addr + sizeof(note), sizeof(nt_name) - 1))
return NULL;
if (strcmp(nt_name, "VMCOREINFO") != 0)
return NULL;
vmcoreinfo = kzalloc_panic(note.n_descsz);
if (copy_from_oldmem(vmcoreinfo, addr + 24, note.n_descsz))
return NULL;
*size = note.n_descsz;
return vmcoreinfo;
}
/*
* Initialize vmcoreinfo note (new kernel)
*/
static void *nt_vmcoreinfo(void *ptr)
{
unsigned long size;
void *vmcoreinfo;
vmcoreinfo = os_info_old_entry(OS_INFO_VMCOREINFO, &size);
if (!vmcoreinfo)
vmcoreinfo = get_vmcoreinfo_old(&size);
if (!vmcoreinfo)
return ptr;
return nt_init(ptr, 0, vmcoreinfo, size, "VMCOREINFO");
}
/*
* Initialize ELF header (new kernel)
*/
static void *ehdr_init(Elf64_Ehdr *ehdr, int mem_chunk_cnt)
{
memset(ehdr, 0, sizeof(*ehdr));
memcpy(ehdr->e_ident, ELFMAG, SELFMAG);
ehdr->e_ident[EI_CLASS] = ELFCLASS64;
ehdr->e_ident[EI_DATA] = ELFDATA2MSB;
ehdr->e_ident[EI_VERSION] = EV_CURRENT;
memset(ehdr->e_ident + EI_PAD, 0, EI_NIDENT - EI_PAD);
ehdr->e_type = ET_CORE;
ehdr->e_machine = EM_S390;
ehdr->e_version = EV_CURRENT;
ehdr->e_phoff = sizeof(Elf64_Ehdr);
ehdr->e_ehsize = sizeof(Elf64_Ehdr);
ehdr->e_phentsize = sizeof(Elf64_Phdr);
ehdr->e_phnum = mem_chunk_cnt + 1;
return ehdr + 1;
}
/*
* Return CPU count for ELF header (new kernel)
*/
static int get_cpu_cnt(void)
{
int i, cpus = 0;
for (i = 0; zfcpdump_save_areas[i]; i++) {
if (zfcpdump_save_areas[i]->pref_reg == 0)
continue;
cpus++;
}
return cpus;
}
/*
* Return memory chunk count for ELF header (new kernel)
*/
static int get_mem_chunk_cnt(void)
{
struct mem_chunk *chunk_array, *mem_chunk;
int i, cnt = 0;
chunk_array = get_memory_layout();
for (i = 0; i < MEMORY_CHUNKS; i++) {
mem_chunk = &chunk_array[i];
if (chunk_array[i].type != CHUNK_READ_WRITE &&
chunk_array[i].type != CHUNK_READ_ONLY)
continue;
if (mem_chunk->size == 0)
continue;
cnt++;
}
kfree(chunk_array);
return cnt;
}
/*
* Relocate pointer in order to allow vmcore code access the data
*/
static inline unsigned long relocate(unsigned long addr)
{
return OLDMEM_BASE + addr;
}
/*
* Initialize ELF loads (new kernel)
*/
static int loads_init(Elf64_Phdr *phdr, u64 loads_offset)
{
struct mem_chunk *chunk_array, *mem_chunk;
int i;
chunk_array = get_memory_layout();
for (i = 0; i < MEMORY_CHUNKS; i++) {
mem_chunk = &chunk_array[i];
if (mem_chunk->size == 0)
continue;
if (chunk_array[i].type != CHUNK_READ_WRITE &&
chunk_array[i].type != CHUNK_READ_ONLY)
continue;
else
phdr->p_filesz = mem_chunk->size;
phdr->p_type = PT_LOAD;
phdr->p_offset = mem_chunk->addr;
phdr->p_vaddr = mem_chunk->addr;
phdr->p_paddr = mem_chunk->addr;
phdr->p_memsz = mem_chunk->size;
phdr->p_flags = PF_R | PF_W | PF_X;
phdr->p_align = PAGE_SIZE;
phdr++;
}
kfree(chunk_array);
return i;
}
/*
* Initialize notes (new kernel)
*/
static void *notes_init(Elf64_Phdr *phdr, void *ptr, u64 notes_offset)
{
struct save_area *sa;
void *ptr_start = ptr;
int i;
ptr = nt_prpsinfo(ptr);
for (i = 0; zfcpdump_save_areas[i]; i++) {
sa = zfcpdump_save_areas[i];
if (sa->pref_reg == 0)
continue;
ptr = fill_cpu_elf_notes(ptr, sa);
}
ptr = nt_vmcoreinfo(ptr);
memset(phdr, 0, sizeof(*phdr));
phdr->p_type = PT_NOTE;
phdr->p_offset = relocate(notes_offset);
phdr->p_filesz = (unsigned long) PTR_SUB(ptr, ptr_start);
phdr->p_memsz = phdr->p_filesz;
return ptr;
}
/*
* Create ELF core header (new kernel)
*/
static void s390_elf_corehdr_create(char **elfcorebuf, size_t *elfcorebuf_sz)
{
Elf64_Phdr *phdr_notes, *phdr_loads;
int mem_chunk_cnt;
void *ptr, *hdr;
u32 alloc_size;
u64 hdr_off;
mem_chunk_cnt = get_mem_chunk_cnt();
alloc_size = 0x1000 + get_cpu_cnt() * 0x300 +
mem_chunk_cnt * sizeof(Elf64_Phdr);
hdr = kzalloc_panic(alloc_size);
/* Init elf header */
ptr = ehdr_init(hdr, mem_chunk_cnt);
/* Init program headers */
phdr_notes = ptr;
ptr = PTR_ADD(ptr, sizeof(Elf64_Phdr));
phdr_loads = ptr;
ptr = PTR_ADD(ptr, sizeof(Elf64_Phdr) * mem_chunk_cnt);
/* Init notes */
hdr_off = PTR_DIFF(ptr, hdr);
ptr = notes_init(phdr_notes, ptr, ((unsigned long) hdr) + hdr_off);
/* Init loads */
hdr_off = PTR_DIFF(ptr, hdr);
loads_init(phdr_loads, ((unsigned long) hdr) + hdr_off);
*elfcorebuf_sz = hdr_off;
*elfcorebuf = (void *) relocate((unsigned long) hdr);
BUG_ON(*elfcorebuf_sz > alloc_size);
}
/*
* Create kdump ELF core header in new kernel, if it has not been passed via
* the "elfcorehdr" kernel parameter
*/
static int setup_kdump_elfcorehdr(void)
{
size_t elfcorebuf_sz;
char *elfcorebuf;
if (!OLDMEM_BASE || is_kdump_kernel())
return -EINVAL;
s390_elf_corehdr_create(&elfcorebuf, &elfcorebuf_sz);
elfcorehdr_addr = (unsigned long long) elfcorebuf;
elfcorehdr_size = elfcorebuf_sz;
return 0;
}
subsys_initcall(setup_kdump_elfcorehdr);
| gpl-2.0 |
Negamann303/kernel-nk1-negalite-lt02ltespr | fs/cifs/connect.c | 2455 | 115352 | /*
* fs/cifs/connect.c
*
* Copyright (C) International Business Machines Corp., 2002,2009
* Author(s): Steve French (sfrench@us.ibm.com)
*
* This library is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published
* by the Free Software Foundation; either version 2.1 of the License, or
* (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
* the GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this library; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/fs.h>
#include <linux/net.h>
#include <linux/string.h>
#include <linux/list.h>
#include <linux/wait.h>
#include <linux/slab.h>
#include <linux/pagemap.h>
#include <linux/ctype.h>
#include <linux/utsname.h>
#include <linux/mempool.h>
#include <linux/delay.h>
#include <linux/completion.h>
#include <linux/kthread.h>
#include <linux/pagevec.h>
#include <linux/freezer.h>
#include <linux/namei.h>
#include <asm/uaccess.h>
#include <asm/processor.h>
#include <linux/inet.h>
#include <linux/module.h>
#include <keys/user-type.h>
#include <net/ipv6.h>
#include <linux/parser.h>
#include "cifspdu.h"
#include "cifsglob.h"
#include "cifsproto.h"
#include "cifs_unicode.h"
#include "cifs_debug.h"
#include "cifs_fs_sb.h"
#include "ntlmssp.h"
#include "nterr.h"
#include "rfc1002pdu.h"
#include "fscache.h"
#define CIFS_PORT 445
#define RFC1001_PORT 139
/* SMB echo "timeout" -- FIXME: tunable? */
#define SMB_ECHO_INTERVAL (60 * HZ)
extern mempool_t *cifs_req_poolp;
/* FIXME: should these be tunable? */
#define TLINK_ERROR_EXPIRE (1 * HZ)
#define TLINK_IDLE_EXPIRE (600 * HZ)
enum {
/* Mount options that take no arguments */
Opt_user_xattr, Opt_nouser_xattr,
Opt_forceuid, Opt_noforceuid,
Opt_noblocksend, Opt_noautotune,
Opt_hard, Opt_soft, Opt_perm, Opt_noperm,
Opt_mapchars, Opt_nomapchars, Opt_sfu,
Opt_nosfu, Opt_nodfs, Opt_posixpaths,
Opt_noposixpaths, Opt_nounix,
Opt_nocase,
Opt_brl, Opt_nobrl,
Opt_forcemandatorylock, Opt_setuids,
Opt_nosetuids, Opt_dynperm, Opt_nodynperm,
Opt_nohard, Opt_nosoft,
Opt_nointr, Opt_intr,
Opt_nostrictsync, Opt_strictsync,
Opt_serverino, Opt_noserverino,
Opt_rwpidforward, Opt_cifsacl, Opt_nocifsacl,
Opt_acl, Opt_noacl, Opt_locallease,
Opt_sign, Opt_seal, Opt_direct,
Opt_strictcache, Opt_noac,
Opt_fsc, Opt_mfsymlinks,
Opt_multiuser, Opt_sloppy,
/* Mount options which take numeric value */
Opt_backupuid, Opt_backupgid, Opt_uid,
Opt_cruid, Opt_gid, Opt_file_mode,
Opt_dirmode, Opt_port,
Opt_rsize, Opt_wsize, Opt_actimeo,
/* Mount options which take string value */
Opt_user, Opt_pass, Opt_ip,
Opt_unc, Opt_domain,
Opt_srcaddr, Opt_prefixpath,
Opt_iocharset, Opt_sockopt,
Opt_netbiosname, Opt_servern,
Opt_ver, Opt_sec,
/* Mount options to be ignored */
Opt_ignore,
/* Options which could be blank */
Opt_blank_pass,
Opt_blank_user,
Opt_blank_ip,
Opt_err
};
static const match_table_t cifs_mount_option_tokens = {
{ Opt_user_xattr, "user_xattr" },
{ Opt_nouser_xattr, "nouser_xattr" },
{ Opt_forceuid, "forceuid" },
{ Opt_noforceuid, "noforceuid" },
{ Opt_noblocksend, "noblocksend" },
{ Opt_noautotune, "noautotune" },
{ Opt_hard, "hard" },
{ Opt_soft, "soft" },
{ Opt_perm, "perm" },
{ Opt_noperm, "noperm" },
{ Opt_mapchars, "mapchars" },
{ Opt_nomapchars, "nomapchars" },
{ Opt_sfu, "sfu" },
{ Opt_nosfu, "nosfu" },
{ Opt_nodfs, "nodfs" },
{ Opt_posixpaths, "posixpaths" },
{ Opt_noposixpaths, "noposixpaths" },
{ Opt_nounix, "nounix" },
{ Opt_nounix, "nolinux" },
{ Opt_nocase, "nocase" },
{ Opt_nocase, "ignorecase" },
{ Opt_brl, "brl" },
{ Opt_nobrl, "nobrl" },
{ Opt_nobrl, "nolock" },
{ Opt_forcemandatorylock, "forcemandatorylock" },
{ Opt_forcemandatorylock, "forcemand" },
{ Opt_setuids, "setuids" },
{ Opt_nosetuids, "nosetuids" },
{ Opt_dynperm, "dynperm" },
{ Opt_nodynperm, "nodynperm" },
{ Opt_nohard, "nohard" },
{ Opt_nosoft, "nosoft" },
{ Opt_nointr, "nointr" },
{ Opt_intr, "intr" },
{ Opt_nostrictsync, "nostrictsync" },
{ Opt_strictsync, "strictsync" },
{ Opt_serverino, "serverino" },
{ Opt_noserverino, "noserverino" },
{ Opt_rwpidforward, "rwpidforward" },
{ Opt_cifsacl, "cifsacl" },
{ Opt_nocifsacl, "nocifsacl" },
{ Opt_acl, "acl" },
{ Opt_noacl, "noacl" },
{ Opt_locallease, "locallease" },
{ Opt_sign, "sign" },
{ Opt_seal, "seal" },
{ Opt_direct, "direct" },
{ Opt_direct, "directio" },
{ Opt_direct, "forcedirectio" },
{ Opt_strictcache, "strictcache" },
{ Opt_noac, "noac" },
{ Opt_fsc, "fsc" },
{ Opt_mfsymlinks, "mfsymlinks" },
{ Opt_multiuser, "multiuser" },
{ Opt_sloppy, "sloppy" },
{ Opt_backupuid, "backupuid=%s" },
{ Opt_backupgid, "backupgid=%s" },
{ Opt_uid, "uid=%s" },
{ Opt_cruid, "cruid=%s" },
{ Opt_gid, "gid=%s" },
{ Opt_file_mode, "file_mode=%s" },
{ Opt_dirmode, "dirmode=%s" },
{ Opt_dirmode, "dir_mode=%s" },
{ Opt_port, "port=%s" },
{ Opt_rsize, "rsize=%s" },
{ Opt_wsize, "wsize=%s" },
{ Opt_actimeo, "actimeo=%s" },
{ Opt_blank_user, "user=" },
{ Opt_blank_user, "username=" },
{ Opt_user, "user=%s" },
{ Opt_user, "username=%s" },
{ Opt_blank_pass, "pass=" },
{ Opt_pass, "pass=%s" },
{ Opt_pass, "password=%s" },
{ Opt_blank_ip, "ip=" },
{ Opt_blank_ip, "addr=" },
{ Opt_ip, "ip=%s" },
{ Opt_ip, "addr=%s" },
{ Opt_unc, "unc=%s" },
{ Opt_unc, "target=%s" },
{ Opt_unc, "path=%s" },
{ Opt_domain, "dom=%s" },
{ Opt_domain, "domain=%s" },
{ Opt_domain, "workgroup=%s" },
{ Opt_srcaddr, "srcaddr=%s" },
{ Opt_prefixpath, "prefixpath=%s" },
{ Opt_iocharset, "iocharset=%s" },
{ Opt_sockopt, "sockopt=%s" },
{ Opt_netbiosname, "netbiosname=%s" },
{ Opt_servern, "servern=%s" },
{ Opt_ver, "ver=%s" },
{ Opt_ver, "vers=%s" },
{ Opt_ver, "version=%s" },
{ Opt_sec, "sec=%s" },
{ Opt_ignore, "cred" },
{ Opt_ignore, "credentials" },
{ Opt_ignore, "cred=%s" },
{ Opt_ignore, "credentials=%s" },
{ Opt_ignore, "guest" },
{ Opt_ignore, "rw" },
{ Opt_ignore, "ro" },
{ Opt_ignore, "suid" },
{ Opt_ignore, "nosuid" },
{ Opt_ignore, "exec" },
{ Opt_ignore, "noexec" },
{ Opt_ignore, "nodev" },
{ Opt_ignore, "noauto" },
{ Opt_ignore, "dev" },
{ Opt_ignore, "mand" },
{ Opt_ignore, "nomand" },
{ Opt_ignore, "_netdev" },
{ Opt_err, NULL }
};
enum {
Opt_sec_krb5, Opt_sec_krb5i, Opt_sec_krb5p,
Opt_sec_ntlmsspi, Opt_sec_ntlmssp,
Opt_ntlm, Opt_sec_ntlmi, Opt_sec_ntlmv2i,
Opt_sec_nontlm, Opt_sec_lanman,
Opt_sec_none,
Opt_sec_err
};
static const match_table_t cifs_secflavor_tokens = {
{ Opt_sec_krb5, "krb5" },
{ Opt_sec_krb5i, "krb5i" },
{ Opt_sec_krb5p, "krb5p" },
{ Opt_sec_ntlmsspi, "ntlmsspi" },
{ Opt_sec_ntlmssp, "ntlmssp" },
{ Opt_ntlm, "ntlm" },
{ Opt_sec_ntlmi, "ntlmi" },
{ Opt_sec_ntlmv2i, "ntlmv2i" },
{ Opt_sec_nontlm, "nontlm" },
{ Opt_sec_lanman, "lanman" },
{ Opt_sec_none, "none" },
{ Opt_sec_err, NULL }
};
static int ip_connect(struct TCP_Server_Info *server);
static int generic_ip_connect(struct TCP_Server_Info *server);
static void tlink_rb_insert(struct rb_root *root, struct tcon_link *new_tlink);
static void cifs_prune_tlinks(struct work_struct *work);
static int cifs_setup_volume_info(struct smb_vol *volume_info, char *mount_data,
const char *devname);
/*
* cifs tcp session reconnection
*
* mark tcp session as reconnecting so temporarily locked
* mark all smb sessions as reconnecting for tcp session
* reconnect tcp session
* wake up waiters on reconnection? - (not needed currently)
*/
static int
cifs_reconnect(struct TCP_Server_Info *server)
{
int rc = 0;
struct list_head *tmp, *tmp2;
struct cifs_ses *ses;
struct cifs_tcon *tcon;
struct mid_q_entry *mid_entry;
struct list_head retry_list;
spin_lock(&GlobalMid_Lock);
if (server->tcpStatus == CifsExiting) {
/* the demux thread will exit normally
next time through the loop */
spin_unlock(&GlobalMid_Lock);
return rc;
} else
server->tcpStatus = CifsNeedReconnect;
spin_unlock(&GlobalMid_Lock);
server->maxBuf = 0;
cFYI(1, "Reconnecting tcp session");
/* before reconnecting the tcp session, mark the smb session (uid)
and the tid bad so they are not used until reconnected */
cFYI(1, "%s: marking sessions and tcons for reconnect", __func__);
spin_lock(&cifs_tcp_ses_lock);
list_for_each(tmp, &server->smb_ses_list) {
ses = list_entry(tmp, struct cifs_ses, smb_ses_list);
ses->need_reconnect = true;
ses->ipc_tid = 0;
list_for_each(tmp2, &ses->tcon_list) {
tcon = list_entry(tmp2, struct cifs_tcon, tcon_list);
tcon->need_reconnect = true;
}
}
spin_unlock(&cifs_tcp_ses_lock);
/* do not want to be sending data on a socket we are freeing */
cFYI(1, "%s: tearing down socket", __func__);
mutex_lock(&server->srv_mutex);
if (server->ssocket) {
cFYI(1, "State: 0x%x Flags: 0x%lx", server->ssocket->state,
server->ssocket->flags);
kernel_sock_shutdown(server->ssocket, SHUT_WR);
cFYI(1, "Post shutdown state: 0x%x Flags: 0x%lx",
server->ssocket->state,
server->ssocket->flags);
sock_release(server->ssocket);
server->ssocket = NULL;
}
server->sequence_number = 0;
server->session_estab = false;
kfree(server->session_key.response);
server->session_key.response = NULL;
server->session_key.len = 0;
server->lstrp = jiffies;
mutex_unlock(&server->srv_mutex);
/* mark submitted MIDs for retry and issue callback */
INIT_LIST_HEAD(&retry_list);
cFYI(1, "%s: moving mids to private list", __func__);
spin_lock(&GlobalMid_Lock);
list_for_each_safe(tmp, tmp2, &server->pending_mid_q) {
mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
if (mid_entry->mid_state == MID_REQUEST_SUBMITTED)
mid_entry->mid_state = MID_RETRY_NEEDED;
list_move(&mid_entry->qhead, &retry_list);
}
spin_unlock(&GlobalMid_Lock);
cFYI(1, "%s: issuing mid callbacks", __func__);
list_for_each_safe(tmp, tmp2, &retry_list) {
mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
list_del_init(&mid_entry->qhead);
mid_entry->callback(mid_entry);
}
do {
try_to_freeze();
/* we should try only the port we connected to before */
rc = generic_ip_connect(server);
if (rc) {
cFYI(1, "reconnect error %d", rc);
msleep(3000);
} else {
atomic_inc(&tcpSesReconnectCount);
spin_lock(&GlobalMid_Lock);
if (server->tcpStatus != CifsExiting)
server->tcpStatus = CifsNeedNegotiate;
spin_unlock(&GlobalMid_Lock);
}
} while (server->tcpStatus == CifsNeedReconnect);
return rc;
}
/*
return codes:
0 not a transact2, or all data present
>0 transact2 with that much data missing
-EINVAL = invalid transact2
*/
static int check2ndT2(char *buf)
{
struct smb_hdr *pSMB = (struct smb_hdr *)buf;
struct smb_t2_rsp *pSMBt;
int remaining;
__u16 total_data_size, data_in_this_rsp;
if (pSMB->Command != SMB_COM_TRANSACTION2)
return 0;
/* check for plausible wct, bcc and t2 data and parm sizes */
/* check for parm and data offset going beyond end of smb */
if (pSMB->WordCount != 10) { /* coalesce_t2 depends on this */
cFYI(1, "invalid transact2 word count");
return -EINVAL;
}
pSMBt = (struct smb_t2_rsp *)pSMB;
total_data_size = get_unaligned_le16(&pSMBt->t2_rsp.TotalDataCount);
data_in_this_rsp = get_unaligned_le16(&pSMBt->t2_rsp.DataCount);
if (total_data_size == data_in_this_rsp)
return 0;
else if (total_data_size < data_in_this_rsp) {
cFYI(1, "total data %d smaller than data in frame %d",
total_data_size, data_in_this_rsp);
return -EINVAL;
}
remaining = total_data_size - data_in_this_rsp;
cFYI(1, "missing %d bytes from transact2, check next response",
remaining);
if (total_data_size > CIFSMaxBufSize) {
cERROR(1, "TotalDataSize %d is over maximum buffer %d",
total_data_size, CIFSMaxBufSize);
return -EINVAL;
}
return remaining;
}
static int coalesce_t2(char *second_buf, struct smb_hdr *target_hdr)
{
struct smb_t2_rsp *pSMBs = (struct smb_t2_rsp *)second_buf;
struct smb_t2_rsp *pSMBt = (struct smb_t2_rsp *)target_hdr;
char *data_area_of_tgt;
char *data_area_of_src;
int remaining;
unsigned int byte_count, total_in_tgt;
__u16 tgt_total_cnt, src_total_cnt, total_in_src;
src_total_cnt = get_unaligned_le16(&pSMBs->t2_rsp.TotalDataCount);
tgt_total_cnt = get_unaligned_le16(&pSMBt->t2_rsp.TotalDataCount);
if (tgt_total_cnt != src_total_cnt)
cFYI(1, "total data count of primary and secondary t2 differ "
"source=%hu target=%hu", src_total_cnt, tgt_total_cnt);
total_in_tgt = get_unaligned_le16(&pSMBt->t2_rsp.DataCount);
remaining = tgt_total_cnt - total_in_tgt;
if (remaining < 0) {
cFYI(1, "Server sent too much data. tgt_total_cnt=%hu "
"total_in_tgt=%hu", tgt_total_cnt, total_in_tgt);
return -EPROTO;
}
if (remaining == 0) {
/* nothing to do, ignore */
cFYI(1, "no more data remains");
return 0;
}
total_in_src = get_unaligned_le16(&pSMBs->t2_rsp.DataCount);
if (remaining < total_in_src)
cFYI(1, "transact2 2nd response contains too much data");
/* find end of first SMB data area */
data_area_of_tgt = (char *)&pSMBt->hdr.Protocol +
get_unaligned_le16(&pSMBt->t2_rsp.DataOffset);
/* validate target area */
data_area_of_src = (char *)&pSMBs->hdr.Protocol +
get_unaligned_le16(&pSMBs->t2_rsp.DataOffset);
data_area_of_tgt += total_in_tgt;
total_in_tgt += total_in_src;
/* is the result too big for the field? */
if (total_in_tgt > USHRT_MAX) {
cFYI(1, "coalesced DataCount too large (%u)", total_in_tgt);
return -EPROTO;
}
put_unaligned_le16(total_in_tgt, &pSMBt->t2_rsp.DataCount);
/* fix up the BCC */
byte_count = get_bcc(target_hdr);
byte_count += total_in_src;
/* is the result too big for the field? */
if (byte_count > USHRT_MAX) {
cFYI(1, "coalesced BCC too large (%u)", byte_count);
return -EPROTO;
}
put_bcc(byte_count, target_hdr);
byte_count = be32_to_cpu(target_hdr->smb_buf_length);
byte_count += total_in_src;
/* don't allow buffer to overflow */
if (byte_count > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
cFYI(1, "coalesced BCC exceeds buffer size (%u)", byte_count);
return -ENOBUFS;
}
target_hdr->smb_buf_length = cpu_to_be32(byte_count);
/* copy second buffer into end of first buffer */
memcpy(data_area_of_tgt, data_area_of_src, total_in_src);
if (remaining != total_in_src) {
/* more responses to go */
cFYI(1, "waiting for more secondary responses");
return 1;
}
/* we are done */
cFYI(1, "found the last secondary response");
return 0;
}
static void
cifs_echo_request(struct work_struct *work)
{
int rc;
struct TCP_Server_Info *server = container_of(work,
struct TCP_Server_Info, echo.work);
/*
* We cannot send an echo until the NEGOTIATE_PROTOCOL request is
* done, which is indicated by maxBuf != 0. Also, no need to ping if
* we got a response recently
*/
if (server->maxBuf == 0 ||
time_before(jiffies, server->lstrp + SMB_ECHO_INTERVAL - HZ))
goto requeue_echo;
rc = CIFSSMBEcho(server);
if (rc)
cFYI(1, "Unable to send echo request to server: %s",
server->hostname);
requeue_echo:
queue_delayed_work(cifsiod_wq, &server->echo, SMB_ECHO_INTERVAL);
}
static bool
allocate_buffers(struct TCP_Server_Info *server)
{
if (!server->bigbuf) {
server->bigbuf = (char *)cifs_buf_get();
if (!server->bigbuf) {
cERROR(1, "No memory for large SMB response");
msleep(3000);
/* retry will check if exiting */
return false;
}
} else if (server->large_buf) {
/* we are reusing a dirty large buf, clear its start */
memset(server->bigbuf, 0, header_size());
}
if (!server->smallbuf) {
server->smallbuf = (char *)cifs_small_buf_get();
if (!server->smallbuf) {
cERROR(1, "No memory for SMB response");
msleep(1000);
/* retry will check if exiting */
return false;
}
/* beginning of smb buffer is cleared in our buf_get */
} else {
/* if existing small buf clear beginning */
memset(server->smallbuf, 0, header_size());
}
return true;
}
static bool
server_unresponsive(struct TCP_Server_Info *server)
{
/*
* We need to wait 2 echo intervals to make sure we handle such
* situations right:
* 1s client sends a normal SMB request
* 2s client gets a response
* 30s echo workqueue job pops, and decides we got a response recently
* and don't need to send another
* ...
* 65s kernel_recvmsg times out, and we see that we haven't gotten
* a response in >60s.
*/
if (server->tcpStatus == CifsGood &&
time_after(jiffies, server->lstrp + 2 * SMB_ECHO_INTERVAL)) {
cERROR(1, "Server %s has not responded in %d seconds. "
"Reconnecting...", server->hostname,
(2 * SMB_ECHO_INTERVAL) / HZ);
cifs_reconnect(server);
wake_up(&server->response_q);
return true;
}
return false;
}
/*
* kvec_array_init - clone a kvec array, and advance into it
* @new: pointer to memory for cloned array
* @iov: pointer to original array
* @nr_segs: number of members in original array
* @bytes: number of bytes to advance into the cloned array
*
* This function will copy the array provided in iov to a section of memory
* and advance the specified number of bytes into the new array. It returns
* the number of segments in the new array. "new" must be at least as big as
* the original iov array.
*/
static unsigned int
kvec_array_init(struct kvec *new, struct kvec *iov, unsigned int nr_segs,
size_t bytes)
{
size_t base = 0;
while (bytes || !iov->iov_len) {
int copy = min(bytes, iov->iov_len);
bytes -= copy;
base += copy;
if (iov->iov_len == base) {
iov++;
nr_segs--;
base = 0;
}
}
memcpy(new, iov, sizeof(*iov) * nr_segs);
new->iov_base += base;
new->iov_len -= base;
return nr_segs;
}
static struct kvec *
get_server_iovec(struct TCP_Server_Info *server, unsigned int nr_segs)
{
struct kvec *new_iov;
if (server->iov && nr_segs <= server->nr_iov)
return server->iov;
/* not big enough -- allocate a new one and release the old */
new_iov = kmalloc(sizeof(*new_iov) * nr_segs, GFP_NOFS);
if (new_iov) {
kfree(server->iov);
server->iov = new_iov;
server->nr_iov = nr_segs;
}
return new_iov;
}
int
cifs_readv_from_socket(struct TCP_Server_Info *server, struct kvec *iov_orig,
unsigned int nr_segs, unsigned int to_read)
{
int length = 0;
int total_read;
unsigned int segs;
struct msghdr smb_msg;
struct kvec *iov;
iov = get_server_iovec(server, nr_segs);
if (!iov)
return -ENOMEM;
smb_msg.msg_control = NULL;
smb_msg.msg_controllen = 0;
for (total_read = 0; to_read; total_read += length, to_read -= length) {
try_to_freeze();
if (server_unresponsive(server)) {
total_read = -EAGAIN;
break;
}
segs = kvec_array_init(iov, iov_orig, nr_segs, total_read);
length = kernel_recvmsg(server->ssocket, &smb_msg,
iov, segs, to_read, 0);
if (server->tcpStatus == CifsExiting) {
total_read = -ESHUTDOWN;
break;
} else if (server->tcpStatus == CifsNeedReconnect) {
cifs_reconnect(server);
total_read = -EAGAIN;
break;
} else if (length == -ERESTARTSYS ||
length == -EAGAIN ||
length == -EINTR) {
/*
* Minimum sleep to prevent looping, allowing socket
* to clear and app threads to set tcpStatus
* CifsNeedReconnect if server hung.
*/
usleep_range(1000, 2000);
length = 0;
continue;
} else if (length <= 0) {
cFYI(1, "Received no data or error: expecting %d "
"got %d", to_read, length);
cifs_reconnect(server);
total_read = -EAGAIN;
break;
}
}
return total_read;
}
int
cifs_read_from_socket(struct TCP_Server_Info *server, char *buf,
unsigned int to_read)
{
struct kvec iov;
iov.iov_base = buf;
iov.iov_len = to_read;
return cifs_readv_from_socket(server, &iov, 1, to_read);
}
static bool
is_smb_response(struct TCP_Server_Info *server, unsigned char type)
{
/*
* The first byte big endian of the length field,
* is actually not part of the length but the type
* with the most common, zero, as regular data.
*/
switch (type) {
case RFC1002_SESSION_MESSAGE:
/* Regular SMB response */
return true;
case RFC1002_SESSION_KEEP_ALIVE:
cFYI(1, "RFC 1002 session keep alive");
break;
case RFC1002_POSITIVE_SESSION_RESPONSE:
cFYI(1, "RFC 1002 positive session response");
break;
case RFC1002_NEGATIVE_SESSION_RESPONSE:
/*
* We get this from Windows 98 instead of an error on
* SMB negprot response.
*/
cFYI(1, "RFC 1002 negative session response");
/* give server a second to clean up */
msleep(1000);
/*
* Always try 445 first on reconnect since we get NACK
* on some if we ever connected to port 139 (the NACK
* is since we do not begin with RFC1001 session
* initialize frame).
*/
cifs_set_port((struct sockaddr *)&server->dstaddr, CIFS_PORT);
cifs_reconnect(server);
wake_up(&server->response_q);
break;
default:
cERROR(1, "RFC 1002 unknown response type 0x%x", type);
cifs_reconnect(server);
}
return false;
}
static struct mid_q_entry *
find_mid(struct TCP_Server_Info *server, char *buffer)
{
struct smb_hdr *buf = (struct smb_hdr *)buffer;
struct mid_q_entry *mid;
spin_lock(&GlobalMid_Lock);
list_for_each_entry(mid, &server->pending_mid_q, qhead) {
if (mid->mid == buf->Mid &&
mid->mid_state == MID_REQUEST_SUBMITTED &&
le16_to_cpu(mid->command) == buf->Command) {
spin_unlock(&GlobalMid_Lock);
return mid;
}
}
spin_unlock(&GlobalMid_Lock);
return NULL;
}
void
dequeue_mid(struct mid_q_entry *mid, bool malformed)
{
#ifdef CONFIG_CIFS_STATS2
mid->when_received = jiffies;
#endif
spin_lock(&GlobalMid_Lock);
if (!malformed)
mid->mid_state = MID_RESPONSE_RECEIVED;
else
mid->mid_state = MID_RESPONSE_MALFORMED;
list_del_init(&mid->qhead);
spin_unlock(&GlobalMid_Lock);
}
static void
handle_mid(struct mid_q_entry *mid, struct TCP_Server_Info *server,
char *buf, int malformed)
{
if (malformed == 0 && check2ndT2(buf) > 0) {
mid->multiRsp = true;
if (mid->resp_buf) {
/* merge response - fix up 1st*/
malformed = coalesce_t2(buf, mid->resp_buf);
if (malformed > 0)
return;
/* All parts received or packet is malformed. */
mid->multiEnd = true;
return dequeue_mid(mid, malformed);
}
if (!server->large_buf) {
/*FIXME: switch to already allocated largebuf?*/
cERROR(1, "1st trans2 resp needs bigbuf");
} else {
/* Have first buffer */
mid->resp_buf = buf;
mid->large_buf = true;
server->bigbuf = NULL;
}
return;
}
mid->resp_buf = buf;
mid->large_buf = server->large_buf;
/* Was previous buf put in mpx struct for multi-rsp? */
if (!mid->multiRsp) {
/* smb buffer will be freed by user thread */
if (server->large_buf)
server->bigbuf = NULL;
else
server->smallbuf = NULL;
}
dequeue_mid(mid, malformed);
}
static void clean_demultiplex_info(struct TCP_Server_Info *server)
{
int length;
/* take it off the list, if it's not already */
spin_lock(&cifs_tcp_ses_lock);
list_del_init(&server->tcp_ses_list);
spin_unlock(&cifs_tcp_ses_lock);
spin_lock(&GlobalMid_Lock);
server->tcpStatus = CifsExiting;
spin_unlock(&GlobalMid_Lock);
wake_up_all(&server->response_q);
/* check if we have blocked requests that need to free */
spin_lock(&server->req_lock);
if (server->credits <= 0)
server->credits = 1;
spin_unlock(&server->req_lock);
/*
* Although there should not be any requests blocked on this queue it
* can not hurt to be paranoid and try to wake up requests that may
* haven been blocked when more than 50 at time were on the wire to the
* same server - they now will see the session is in exit state and get
* out of SendReceive.
*/
wake_up_all(&server->request_q);
/* give those requests time to exit */
msleep(125);
if (server->ssocket) {
sock_release(server->ssocket);
server->ssocket = NULL;
}
if (!list_empty(&server->pending_mid_q)) {
struct list_head dispose_list;
struct mid_q_entry *mid_entry;
struct list_head *tmp, *tmp2;
INIT_LIST_HEAD(&dispose_list);
spin_lock(&GlobalMid_Lock);
list_for_each_safe(tmp, tmp2, &server->pending_mid_q) {
mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
cFYI(1, "Clearing mid 0x%llx", mid_entry->mid);
mid_entry->mid_state = MID_SHUTDOWN;
list_move(&mid_entry->qhead, &dispose_list);
}
spin_unlock(&GlobalMid_Lock);
/* now walk dispose list and issue callbacks */
list_for_each_safe(tmp, tmp2, &dispose_list) {
mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
cFYI(1, "Callback mid 0x%llx", mid_entry->mid);
list_del_init(&mid_entry->qhead);
mid_entry->callback(mid_entry);
}
/* 1/8th of sec is more than enough time for them to exit */
msleep(125);
}
if (!list_empty(&server->pending_mid_q)) {
/*
* mpx threads have not exited yet give them at least the smb
* send timeout time for long ops.
*
* Due to delays on oplock break requests, we need to wait at
* least 45 seconds before giving up on a request getting a
* response and going ahead and killing cifsd.
*/
cFYI(1, "Wait for exit from demultiplex thread");
msleep(46000);
/*
* If threads still have not exited they are probably never
* coming home not much else we can do but free the memory.
*/
}
kfree(server->hostname);
kfree(server->iov);
kfree(server);
length = atomic_dec_return(&tcpSesAllocCount);
if (length > 0)
mempool_resize(cifs_req_poolp, length + cifs_min_rcv,
GFP_KERNEL);
}
static int
standard_receive3(struct TCP_Server_Info *server, struct mid_q_entry *mid)
{
int length;
char *buf = server->smallbuf;
unsigned int pdu_length = get_rfc1002_length(buf);
/* make sure this will fit in a large buffer */
if (pdu_length > CIFSMaxBufSize + max_header_size() - 4) {
cERROR(1, "SMB response too long (%u bytes)",
pdu_length);
cifs_reconnect(server);
wake_up(&server->response_q);
return -EAGAIN;
}
/* switch to large buffer if too big for a small one */
if (pdu_length > MAX_CIFS_SMALL_BUFFER_SIZE - 4) {
server->large_buf = true;
memcpy(server->bigbuf, buf, server->total_read);
buf = server->bigbuf;
}
/* now read the rest */
length = cifs_read_from_socket(server, buf + header_size() - 1,
pdu_length - header_size() + 1 + 4);
if (length < 0)
return length;
server->total_read += length;
dump_smb(buf, server->total_read);
/*
* We know that we received enough to get to the MID as we
* checked the pdu_length earlier. Now check to see
* if the rest of the header is OK. We borrow the length
* var for the rest of the loop to avoid a new stack var.
*
* 48 bytes is enough to display the header and a little bit
* into the payload for debugging purposes.
*/
length = checkSMB(buf, server->total_read);
if (length != 0)
cifs_dump_mem("Bad SMB: ", buf,
min_t(unsigned int, server->total_read, 48));
if (!mid)
return length;
handle_mid(mid, server, buf, length);
return 0;
}
static int
cifs_demultiplex_thread(void *p)
{
int length;
struct TCP_Server_Info *server = p;
unsigned int pdu_length;
char *buf = NULL;
struct task_struct *task_to_wake = NULL;
struct mid_q_entry *mid_entry;
current->flags |= PF_MEMALLOC;
cFYI(1, "Demultiplex PID: %d", task_pid_nr(current));
length = atomic_inc_return(&tcpSesAllocCount);
if (length > 1)
mempool_resize(cifs_req_poolp, length + cifs_min_rcv,
GFP_KERNEL);
set_freezable();
while (server->tcpStatus != CifsExiting) {
if (try_to_freeze())
continue;
if (!allocate_buffers(server))
continue;
server->large_buf = false;
buf = server->smallbuf;
pdu_length = 4; /* enough to get RFC1001 header */
length = cifs_read_from_socket(server, buf, pdu_length);
if (length < 0)
continue;
server->total_read = length;
/*
* The right amount was read from socket - 4 bytes,
* so we can now interpret the length field.
*/
pdu_length = get_rfc1002_length(buf);
cFYI(1, "RFC1002 header 0x%x", pdu_length);
if (!is_smb_response(server, buf[0]))
continue;
/* make sure we have enough to get to the MID */
if (pdu_length < header_size() - 1 - 4) {
cERROR(1, "SMB response too short (%u bytes)",
pdu_length);
cifs_reconnect(server);
wake_up(&server->response_q);
continue;
}
/* read down to the MID */
length = cifs_read_from_socket(server, buf + 4,
header_size() - 1 - 4);
if (length < 0)
continue;
server->total_read += length;
mid_entry = find_mid(server, buf);
if (!mid_entry || !mid_entry->receive)
length = standard_receive3(server, mid_entry);
else
length = mid_entry->receive(server, mid_entry);
if (length < 0)
continue;
if (server->large_buf)
buf = server->bigbuf;
server->lstrp = jiffies;
if (mid_entry != NULL) {
if (!mid_entry->multiRsp || mid_entry->multiEnd)
mid_entry->callback(mid_entry);
} else if (!is_valid_oplock_break(buf, server)) {
cERROR(1, "No task to wake, unknown frame received! "
"NumMids %d", atomic_read(&midCount));
cifs_dump_mem("Received Data is: ", buf, header_size());
#ifdef CONFIG_CIFS_DEBUG2
cifs_dump_detail(buf);
cifs_dump_mids(server);
#endif /* CIFS_DEBUG2 */
}
} /* end while !EXITING */
/* buffer usually freed in free_mid - need to free it here on exit */
cifs_buf_release(server->bigbuf);
if (server->smallbuf) /* no sense logging a debug message if NULL */
cifs_small_buf_release(server->smallbuf);
task_to_wake = xchg(&server->tsk, NULL);
clean_demultiplex_info(server);
/* if server->tsk was NULL then wait for a signal before exiting */
if (!task_to_wake) {
set_current_state(TASK_INTERRUPTIBLE);
while (!signal_pending(current)) {
schedule();
set_current_state(TASK_INTERRUPTIBLE);
}
set_current_state(TASK_RUNNING);
}
module_put_and_exit(0);
}
/* extract the host portion of the UNC string */
static char *
extract_hostname(const char *unc)
{
const char *src;
char *dst, *delim;
unsigned int len;
/* skip double chars at beginning of string */
/* BB: check validity of these bytes? */
src = unc + 2;
/* delimiter between hostname and sharename is always '\\' now */
delim = strchr(src, '\\');
if (!delim)
return ERR_PTR(-EINVAL);
len = delim - src;
dst = kmalloc((len + 1), GFP_KERNEL);
if (dst == NULL)
return ERR_PTR(-ENOMEM);
memcpy(dst, src, len);
dst[len] = '\0';
return dst;
}
static int get_option_ul(substring_t args[], unsigned long *option)
{
int rc;
char *string;
string = match_strdup(args);
if (string == NULL)
return -ENOMEM;
rc = kstrtoul(string, 0, option);
kfree(string);
return rc;
}
static int cifs_parse_security_flavors(char *value,
struct smb_vol *vol)
{
substring_t args[MAX_OPT_ARGS];
switch (match_token(value, cifs_secflavor_tokens, args)) {
case Opt_sec_krb5:
vol->secFlg |= CIFSSEC_MAY_KRB5;
break;
case Opt_sec_krb5i:
vol->secFlg |= CIFSSEC_MAY_KRB5 | CIFSSEC_MUST_SIGN;
break;
case Opt_sec_krb5p:
/* vol->secFlg |= CIFSSEC_MUST_SEAL | CIFSSEC_MAY_KRB5; */
cERROR(1, "Krb5 cifs privacy not supported");
break;
case Opt_sec_ntlmssp:
vol->secFlg |= CIFSSEC_MAY_NTLMSSP;
break;
case Opt_sec_ntlmsspi:
vol->secFlg |= CIFSSEC_MAY_NTLMSSP | CIFSSEC_MUST_SIGN;
break;
case Opt_ntlm:
/* ntlm is default so can be turned off too */
vol->secFlg |= CIFSSEC_MAY_NTLM;
break;
case Opt_sec_ntlmi:
vol->secFlg |= CIFSSEC_MAY_NTLM | CIFSSEC_MUST_SIGN;
break;
case Opt_sec_nontlm:
vol->secFlg |= CIFSSEC_MAY_NTLMV2;
break;
case Opt_sec_ntlmv2i:
vol->secFlg |= CIFSSEC_MAY_NTLMV2 | CIFSSEC_MUST_SIGN;
break;
#ifdef CONFIG_CIFS_WEAK_PW_HASH
case Opt_sec_lanman:
vol->secFlg |= CIFSSEC_MAY_LANMAN;
break;
#endif
case Opt_sec_none:
vol->nullauth = 1;
break;
default:
cERROR(1, "bad security option: %s", value);
return 1;
}
return 0;
}
static int
cifs_parse_mount_options(const char *mountdata, const char *devname,
struct smb_vol *vol)
{
char *data, *end;
char *mountdata_copy = NULL, *options;
unsigned int temp_len, i, j;
char separator[2];
short int override_uid = -1;
short int override_gid = -1;
bool uid_specified = false;
bool gid_specified = false;
bool sloppy = false;
char *invalid = NULL;
char *nodename = utsname()->nodename;
char *string = NULL;
char *tmp_end, *value;
char delim;
separator[0] = ',';
separator[1] = 0;
delim = separator[0];
/*
* does not have to be perfect mapping since field is
* informational, only used for servers that do not support
* port 445 and it can be overridden at mount time
*/
memset(vol->source_rfc1001_name, 0x20, RFC1001_NAME_LEN);
for (i = 0; i < strnlen(nodename, RFC1001_NAME_LEN); i++)
vol->source_rfc1001_name[i] = toupper(nodename[i]);
vol->source_rfc1001_name[RFC1001_NAME_LEN] = 0;
/* null target name indicates to use *SMBSERVR default called name
if we end up sending RFC1001 session initialize */
vol->target_rfc1001_name[0] = 0;
vol->cred_uid = current_uid();
vol->linux_uid = current_uid();
vol->linux_gid = current_gid();
/* default to only allowing write access to owner of the mount */
vol->dir_mode = vol->file_mode = S_IRUGO | S_IXUGO | S_IWUSR;
/* vol->retry default is 0 (i.e. "soft" limited retry not hard retry) */
/* default is always to request posix paths. */
vol->posix_paths = 1;
/* default to using server inode numbers where available */
vol->server_ino = 1;
vol->actimeo = CIFS_DEF_ACTIMEO;
if (!mountdata)
goto cifs_parse_mount_err;
mountdata_copy = kstrndup(mountdata, PAGE_SIZE, GFP_KERNEL);
if (!mountdata_copy)
goto cifs_parse_mount_err;
options = mountdata_copy;
end = options + strlen(options);
if (strncmp(options, "sep=", 4) == 0) {
if (options[4] != 0) {
separator[0] = options[4];
options += 5;
} else {
cFYI(1, "Null separator not allowed");
}
}
vol->backupuid_specified = false; /* no backup intent for a user */
vol->backupgid_specified = false; /* no backup intent for a group */
while ((data = strsep(&options, separator)) != NULL) {
substring_t args[MAX_OPT_ARGS];
unsigned long option;
int token;
if (!*data)
continue;
token = match_token(data, cifs_mount_option_tokens, args);
switch (token) {
/* Ingnore the following */
case Opt_ignore:
break;
/* Boolean values */
case Opt_user_xattr:
vol->no_xattr = 0;
break;
case Opt_nouser_xattr:
vol->no_xattr = 1;
break;
case Opt_forceuid:
override_uid = 1;
break;
case Opt_noforceuid:
override_uid = 0;
break;
case Opt_noblocksend:
vol->noblocksnd = 1;
break;
case Opt_noautotune:
vol->noautotune = 1;
break;
case Opt_hard:
vol->retry = 1;
break;
case Opt_soft:
vol->retry = 0;
break;
case Opt_perm:
vol->noperm = 0;
break;
case Opt_noperm:
vol->noperm = 1;
break;
case Opt_mapchars:
vol->remap = 1;
break;
case Opt_nomapchars:
vol->remap = 0;
break;
case Opt_sfu:
vol->sfu_emul = 1;
break;
case Opt_nosfu:
vol->sfu_emul = 0;
break;
case Opt_nodfs:
vol->nodfs = 1;
break;
case Opt_posixpaths:
vol->posix_paths = 1;
break;
case Opt_noposixpaths:
vol->posix_paths = 0;
break;
case Opt_nounix:
vol->no_linux_ext = 1;
break;
case Opt_nocase:
vol->nocase = 1;
break;
case Opt_brl:
vol->nobrl = 0;
break;
case Opt_nobrl:
vol->nobrl = 1;
/*
* turn off mandatory locking in mode
* if remote locking is turned off since the
* local vfs will do advisory
*/
if (vol->file_mode ==
(S_IALLUGO & ~(S_ISUID | S_IXGRP)))
vol->file_mode = S_IALLUGO;
break;
case Opt_forcemandatorylock:
vol->mand_lock = 1;
break;
case Opt_setuids:
vol->setuids = 1;
break;
case Opt_nosetuids:
vol->setuids = 0;
break;
case Opt_dynperm:
vol->dynperm = true;
break;
case Opt_nodynperm:
vol->dynperm = false;
break;
case Opt_nohard:
vol->retry = 0;
break;
case Opt_nosoft:
vol->retry = 1;
break;
case Opt_nointr:
vol->intr = 0;
break;
case Opt_intr:
vol->intr = 1;
break;
case Opt_nostrictsync:
vol->nostrictsync = 1;
break;
case Opt_strictsync:
vol->nostrictsync = 0;
break;
case Opt_serverino:
vol->server_ino = 1;
break;
case Opt_noserverino:
vol->server_ino = 0;
break;
case Opt_rwpidforward:
vol->rwpidforward = 1;
break;
case Opt_cifsacl:
vol->cifs_acl = 1;
break;
case Opt_nocifsacl:
vol->cifs_acl = 0;
break;
case Opt_acl:
vol->no_psx_acl = 0;
break;
case Opt_noacl:
vol->no_psx_acl = 1;
break;
case Opt_locallease:
vol->local_lease = 1;
break;
case Opt_sign:
vol->secFlg |= CIFSSEC_MUST_SIGN;
break;
case Opt_seal:
/* we do not do the following in secFlags because seal
* is a per tree connection (mount) not a per socket
* or per-smb connection option in the protocol
* vol->secFlg |= CIFSSEC_MUST_SEAL;
*/
vol->seal = 1;
break;
case Opt_direct:
vol->direct_io = 1;
break;
case Opt_strictcache:
vol->strict_io = 1;
break;
case Opt_noac:
printk(KERN_WARNING "CIFS: Mount option noac not "
"supported. Instead set "
"/proc/fs/cifs/LookupCacheEnabled to 0\n");
break;
case Opt_fsc:
#ifndef CONFIG_CIFS_FSCACHE
cERROR(1, "FS-Cache support needs CONFIG_CIFS_FSCACHE "
"kernel config option set");
goto cifs_parse_mount_err;
#endif
vol->fsc = true;
break;
case Opt_mfsymlinks:
vol->mfsymlinks = true;
break;
case Opt_multiuser:
vol->multiuser = true;
break;
case Opt_sloppy:
sloppy = true;
break;
/* Numeric Values */
case Opt_backupuid:
if (get_option_ul(args, &option)) {
cERROR(1, "%s: Invalid backupuid value",
__func__);
goto cifs_parse_mount_err;
}
vol->backupuid = option;
vol->backupuid_specified = true;
break;
case Opt_backupgid:
if (get_option_ul(args, &option)) {
cERROR(1, "%s: Invalid backupgid value",
__func__);
goto cifs_parse_mount_err;
}
vol->backupgid = option;
vol->backupgid_specified = true;
break;
case Opt_uid:
if (get_option_ul(args, &option)) {
cERROR(1, "%s: Invalid uid value",
__func__);
goto cifs_parse_mount_err;
}
vol->linux_uid = option;
uid_specified = true;
break;
case Opt_cruid:
if (get_option_ul(args, &option)) {
cERROR(1, "%s: Invalid cruid value",
__func__);
goto cifs_parse_mount_err;
}
vol->cred_uid = option;
break;
case Opt_gid:
if (get_option_ul(args, &option)) {
cERROR(1, "%s: Invalid gid value",
__func__);
goto cifs_parse_mount_err;
}
vol->linux_gid = option;
gid_specified = true;
break;
case Opt_file_mode:
if (get_option_ul(args, &option)) {
cERROR(1, "%s: Invalid file_mode value",
__func__);
goto cifs_parse_mount_err;
}
vol->file_mode = option;
break;
case Opt_dirmode:
if (get_option_ul(args, &option)) {
cERROR(1, "%s: Invalid dir_mode value",
__func__);
goto cifs_parse_mount_err;
}
vol->dir_mode = option;
break;
case Opt_port:
if (get_option_ul(args, &option)) {
cERROR(1, "%s: Invalid port value",
__func__);
goto cifs_parse_mount_err;
}
vol->port = option;
break;
case Opt_rsize:
if (get_option_ul(args, &option)) {
cERROR(1, "%s: Invalid rsize value",
__func__);
goto cifs_parse_mount_err;
}
vol->rsize = option;
break;
case Opt_wsize:
if (get_option_ul(args, &option)) {
cERROR(1, "%s: Invalid wsize value",
__func__);
goto cifs_parse_mount_err;
}
vol->wsize = option;
break;
case Opt_actimeo:
if (get_option_ul(args, &option)) {
cERROR(1, "%s: Invalid actimeo value",
__func__);
goto cifs_parse_mount_err;
}
vol->actimeo = HZ * option;
if (vol->actimeo > CIFS_MAX_ACTIMEO) {
cERROR(1, "CIFS: attribute cache"
"timeout too large");
goto cifs_parse_mount_err;
}
break;
/* String Arguments */
case Opt_blank_user:
/* null user, ie. anonymous authentication */
vol->nullauth = 1;
vol->username = NULL;
break;
case Opt_user:
string = match_strdup(args);
if (string == NULL)
goto out_nomem;
if (strnlen(string, MAX_USERNAME_SIZE) >
MAX_USERNAME_SIZE) {
printk(KERN_WARNING "CIFS: username too long\n");
goto cifs_parse_mount_err;
}
vol->username = kstrdup(string, GFP_KERNEL);
if (!vol->username) {
printk(KERN_WARNING "CIFS: no memory "
"for username\n");
goto cifs_parse_mount_err;
}
break;
case Opt_blank_pass:
vol->password = NULL;
break;
case Opt_pass:
/* passwords have to be handled differently
* to allow the character used for deliminator
* to be passed within them
*/
/* Obtain the value string */
value = strchr(data, '=');
value++;
/* Set tmp_end to end of the string */
tmp_end = (char *) value + strlen(value);
/* Check if following character is the deliminator
* If yes, we have encountered a double deliminator
* reset the NULL character to the deliminator
*/
if (tmp_end < end && tmp_end[1] == delim)
tmp_end[0] = delim;
/* Keep iterating until we get to a single deliminator
* OR the end
*/
while ((tmp_end = strchr(tmp_end, delim)) != NULL &&
(tmp_end[1] == delim)) {
tmp_end = (char *) &tmp_end[2];
}
/* Reset var options to point to next element */
if (tmp_end) {
tmp_end[0] = '\0';
options = (char *) &tmp_end[1];
} else
/* Reached the end of the mount option string */
options = end;
/* Now build new password string */
temp_len = strlen(value);
vol->password = kzalloc(temp_len+1, GFP_KERNEL);
if (vol->password == NULL) {
printk(KERN_WARNING "CIFS: no memory "
"for password\n");
goto cifs_parse_mount_err;
}
for (i = 0, j = 0; i < temp_len; i++, j++) {
vol->password[j] = value[i];
if ((value[i] == delim) &&
value[i+1] == delim)
/* skip the second deliminator */
i++;
}
vol->password[j] = '\0';
break;
case Opt_blank_ip:
vol->UNCip = NULL;
break;
case Opt_ip:
string = match_strdup(args);
if (string == NULL)
goto out_nomem;
if (strnlen(string, INET6_ADDRSTRLEN) >
INET6_ADDRSTRLEN) {
printk(KERN_WARNING "CIFS: ip address "
"too long\n");
goto cifs_parse_mount_err;
}
vol->UNCip = kstrdup(string, GFP_KERNEL);
if (!vol->UNCip) {
printk(KERN_WARNING "CIFS: no memory "
"for UNC IP\n");
goto cifs_parse_mount_err;
}
break;
case Opt_unc:
string = match_strdup(args);
if (string == NULL)
goto out_nomem;
temp_len = strnlen(string, 300);
if (temp_len == 300) {
printk(KERN_WARNING "CIFS: UNC name too long\n");
goto cifs_parse_mount_err;
}
vol->UNC = kmalloc(temp_len+1, GFP_KERNEL);
if (vol->UNC == NULL) {
printk(KERN_WARNING "CIFS: no memory for UNC\n");
goto cifs_parse_mount_err;
}
strcpy(vol->UNC, string);
if (strncmp(string, "//", 2) == 0) {
vol->UNC[0] = '\\';
vol->UNC[1] = '\\';
} else if (strncmp(string, "\\\\", 2) != 0) {
printk(KERN_WARNING "CIFS: UNC Path does not "
"begin with // or \\\\\n");
goto cifs_parse_mount_err;
}
break;
case Opt_domain:
string = match_strdup(args);
if (string == NULL)
goto out_nomem;
if (strnlen(string, 256) == 256) {
printk(KERN_WARNING "CIFS: domain name too"
" long\n");
goto cifs_parse_mount_err;
}
vol->domainname = kstrdup(string, GFP_KERNEL);
if (!vol->domainname) {
printk(KERN_WARNING "CIFS: no memory "
"for domainname\n");
goto cifs_parse_mount_err;
}
cFYI(1, "Domain name set");
break;
case Opt_srcaddr:
string = match_strdup(args);
if (string == NULL)
goto out_nomem;
if (!cifs_convert_address(
(struct sockaddr *)&vol->srcaddr,
string, strlen(string))) {
printk(KERN_WARNING "CIFS: Could not parse"
" srcaddr: %s\n", string);
goto cifs_parse_mount_err;
}
break;
case Opt_prefixpath:
string = match_strdup(args);
if (string == NULL)
goto out_nomem;
temp_len = strnlen(string, 1024);
if (string[0] != '/')
temp_len++; /* missing leading slash */
if (temp_len > 1024) {
printk(KERN_WARNING "CIFS: prefix too long\n");
goto cifs_parse_mount_err;
}
vol->prepath = kmalloc(temp_len+1, GFP_KERNEL);
if (vol->prepath == NULL) {
printk(KERN_WARNING "CIFS: no memory "
"for path prefix\n");
goto cifs_parse_mount_err;
}
if (string[0] != '/') {
vol->prepath[0] = '/';
strcpy(vol->prepath+1, string);
} else
strcpy(vol->prepath, string);
break;
case Opt_iocharset:
string = match_strdup(args);
if (string == NULL)
goto out_nomem;
if (strnlen(string, 1024) >= 65) {
printk(KERN_WARNING "CIFS: iocharset name "
"too long.\n");
goto cifs_parse_mount_err;
}
if (strnicmp(string, "default", 7) != 0) {
vol->iocharset = kstrdup(string,
GFP_KERNEL);
if (!vol->iocharset) {
printk(KERN_WARNING "CIFS: no memory"
"for charset\n");
goto cifs_parse_mount_err;
}
}
/* if iocharset not set then load_nls_default
* is used by caller
*/
cFYI(1, "iocharset set to %s", string);
break;
case Opt_sockopt:
string = match_strdup(args);
if (string == NULL)
goto out_nomem;
if (strnicmp(string, "TCP_NODELAY", 11) == 0)
vol->sockopt_tcp_nodelay = 1;
break;
case Opt_netbiosname:
string = match_strdup(args);
if (string == NULL)
goto out_nomem;
memset(vol->source_rfc1001_name, 0x20,
RFC1001_NAME_LEN);
/*
* FIXME: are there cases in which a comma can
* be valid in workstation netbios name (and
* need special handling)?
*/
for (i = 0; i < RFC1001_NAME_LEN; i++) {
/* don't ucase netbiosname for user */
if (string[i] == 0)
break;
vol->source_rfc1001_name[i] = string[i];
}
/* The string has 16th byte zero still from
* set at top of the function
*/
if (i == RFC1001_NAME_LEN && string[i] != 0)
printk(KERN_WARNING "CIFS: netbiosname"
" longer than 15 truncated.\n");
break;
case Opt_servern:
/* servernetbiosname specified override *SMBSERVER */
string = match_strdup(args);
if (string == NULL)
goto out_nomem;
/* last byte, type, is 0x20 for servr type */
memset(vol->target_rfc1001_name, 0x20,
RFC1001_NAME_LEN_WITH_NULL);
/* BB are there cases in which a comma can be
valid in this workstation netbios name
(and need special handling)? */
/* user or mount helper must uppercase the
netbios name */
for (i = 0; i < 15; i++) {
if (string[i] == 0)
break;
vol->target_rfc1001_name[i] = string[i];
}
/* The string has 16th byte zero still from
set at top of the function */
if (i == RFC1001_NAME_LEN && string[i] != 0)
printk(KERN_WARNING "CIFS: server net"
"biosname longer than 15 truncated.\n");
break;
case Opt_ver:
string = match_strdup(args);
if (string == NULL)
goto out_nomem;
if (strnicmp(string, "cifs", 4) == 0 ||
strnicmp(string, "1", 1) == 0) {
/* This is the default */
break;
}
/* For all other value, error */
printk(KERN_WARNING "CIFS: Invalid version"
" specified\n");
goto cifs_parse_mount_err;
case Opt_sec:
string = match_strdup(args);
if (string == NULL)
goto out_nomem;
if (cifs_parse_security_flavors(string, vol) != 0)
goto cifs_parse_mount_err;
break;
default:
/*
* An option we don't recognize. Save it off for later
* if we haven't already found one
*/
if (!invalid)
invalid = data;
break;
}
/* Free up any allocated string */
kfree(string);
string = NULL;
}
if (!sloppy && invalid) {
printk(KERN_ERR "CIFS: Unknown mount option \"%s\"\n", invalid);
goto cifs_parse_mount_err;
}
#ifndef CONFIG_KEYS
/* Muliuser mounts require CONFIG_KEYS support */
if (vol->multiuser) {
cERROR(1, "Multiuser mounts require kernels with "
"CONFIG_KEYS enabled.");
goto cifs_parse_mount_err;
}
#endif
if (vol->UNCip == NULL)
vol->UNCip = &vol->UNC[2];
if (uid_specified)
vol->override_uid = override_uid;
else if (override_uid == 1)
printk(KERN_NOTICE "CIFS: ignoring forceuid mount option "
"specified with no uid= option.\n");
if (gid_specified)
vol->override_gid = override_gid;
else if (override_gid == 1)
printk(KERN_NOTICE "CIFS: ignoring forcegid mount option "
"specified with no gid= option.\n");
kfree(mountdata_copy);
return 0;
out_nomem:
printk(KERN_WARNING "Could not allocate temporary buffer\n");
cifs_parse_mount_err:
kfree(string);
kfree(mountdata_copy);
return 1;
}
/** Returns true if srcaddr isn't specified and rhs isn't
* specified, or if srcaddr is specified and
* matches the IP address of the rhs argument.
*/
static bool
srcip_matches(struct sockaddr *srcaddr, struct sockaddr *rhs)
{
switch (srcaddr->sa_family) {
case AF_UNSPEC:
return (rhs->sa_family == AF_UNSPEC);
case AF_INET: {
struct sockaddr_in *saddr4 = (struct sockaddr_in *)srcaddr;
struct sockaddr_in *vaddr4 = (struct sockaddr_in *)rhs;
return (saddr4->sin_addr.s_addr == vaddr4->sin_addr.s_addr);
}
case AF_INET6: {
struct sockaddr_in6 *saddr6 = (struct sockaddr_in6 *)srcaddr;
struct sockaddr_in6 *vaddr6 = (struct sockaddr_in6 *)&rhs;
return ipv6_addr_equal(&saddr6->sin6_addr, &vaddr6->sin6_addr);
}
default:
WARN_ON(1);
return false; /* don't expect to be here */
}
}
/*
* If no port is specified in addr structure, we try to match with 445 port
* and if it fails - with 139 ports. It should be called only if address
* families of server and addr are equal.
*/
static bool
match_port(struct TCP_Server_Info *server, struct sockaddr *addr)
{
__be16 port, *sport;
switch (addr->sa_family) {
case AF_INET:
sport = &((struct sockaddr_in *) &server->dstaddr)->sin_port;
port = ((struct sockaddr_in *) addr)->sin_port;
break;
case AF_INET6:
sport = &((struct sockaddr_in6 *) &server->dstaddr)->sin6_port;
port = ((struct sockaddr_in6 *) addr)->sin6_port;
break;
default:
WARN_ON(1);
return false;
}
if (!port) {
port = htons(CIFS_PORT);
if (port == *sport)
return true;
port = htons(RFC1001_PORT);
}
return port == *sport;
}
static bool
match_address(struct TCP_Server_Info *server, struct sockaddr *addr,
struct sockaddr *srcaddr)
{
switch (addr->sa_family) {
case AF_INET: {
struct sockaddr_in *addr4 = (struct sockaddr_in *)addr;
struct sockaddr_in *srv_addr4 =
(struct sockaddr_in *)&server->dstaddr;
if (addr4->sin_addr.s_addr != srv_addr4->sin_addr.s_addr)
return false;
break;
}
case AF_INET6: {
struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)addr;
struct sockaddr_in6 *srv_addr6 =
(struct sockaddr_in6 *)&server->dstaddr;
if (!ipv6_addr_equal(&addr6->sin6_addr,
&srv_addr6->sin6_addr))
return false;
if (addr6->sin6_scope_id != srv_addr6->sin6_scope_id)
return false;
break;
}
default:
WARN_ON(1);
return false; /* don't expect to be here */
}
if (!srcip_matches(srcaddr, (struct sockaddr *)&server->srcaddr))
return false;
return true;
}
static bool
match_security(struct TCP_Server_Info *server, struct smb_vol *vol)
{
unsigned int secFlags;
if (vol->secFlg & (~(CIFSSEC_MUST_SIGN | CIFSSEC_MUST_SEAL)))
secFlags = vol->secFlg;
else
secFlags = global_secflags | vol->secFlg;
switch (server->secType) {
case LANMAN:
if (!(secFlags & (CIFSSEC_MAY_LANMAN|CIFSSEC_MAY_PLNTXT)))
return false;
break;
case NTLMv2:
if (!(secFlags & CIFSSEC_MAY_NTLMV2))
return false;
break;
case NTLM:
if (!(secFlags & CIFSSEC_MAY_NTLM))
return false;
break;
case Kerberos:
if (!(secFlags & CIFSSEC_MAY_KRB5))
return false;
break;
case RawNTLMSSP:
if (!(secFlags & CIFSSEC_MAY_NTLMSSP))
return false;
break;
default:
/* shouldn't happen */
return false;
}
/* now check if signing mode is acceptable */
if ((secFlags & CIFSSEC_MAY_SIGN) == 0 &&
(server->sec_mode & SECMODE_SIGN_REQUIRED))
return false;
else if (((secFlags & CIFSSEC_MUST_SIGN) == CIFSSEC_MUST_SIGN) &&
(server->sec_mode &
(SECMODE_SIGN_ENABLED|SECMODE_SIGN_REQUIRED)) == 0)
return false;
return true;
}
static int match_server(struct TCP_Server_Info *server, struct sockaddr *addr,
struct smb_vol *vol)
{
if (!net_eq(cifs_net_ns(server), current->nsproxy->net_ns))
return 0;
if (!match_address(server, addr,
(struct sockaddr *)&vol->srcaddr))
return 0;
if (!match_port(server, addr))
return 0;
if (!match_security(server, vol))
return 0;
return 1;
}
static struct TCP_Server_Info *
cifs_find_tcp_session(struct sockaddr *addr, struct smb_vol *vol)
{
struct TCP_Server_Info *server;
spin_lock(&cifs_tcp_ses_lock);
list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) {
if (!match_server(server, addr, vol))
continue;
++server->srv_count;
spin_unlock(&cifs_tcp_ses_lock);
cFYI(1, "Existing tcp session with server found");
return server;
}
spin_unlock(&cifs_tcp_ses_lock);
return NULL;
}
static void
cifs_put_tcp_session(struct TCP_Server_Info *server)
{
struct task_struct *task;
spin_lock(&cifs_tcp_ses_lock);
if (--server->srv_count > 0) {
spin_unlock(&cifs_tcp_ses_lock);
return;
}
put_net(cifs_net_ns(server));
list_del_init(&server->tcp_ses_list);
spin_unlock(&cifs_tcp_ses_lock);
cancel_delayed_work_sync(&server->echo);
spin_lock(&GlobalMid_Lock);
server->tcpStatus = CifsExiting;
spin_unlock(&GlobalMid_Lock);
cifs_crypto_shash_release(server);
cifs_fscache_release_client_cookie(server);
kfree(server->session_key.response);
server->session_key.response = NULL;
server->session_key.len = 0;
task = xchg(&server->tsk, NULL);
if (task)
force_sig(SIGKILL, task);
}
static struct TCP_Server_Info *
cifs_get_tcp_session(struct smb_vol *volume_info)
{
struct TCP_Server_Info *tcp_ses = NULL;
struct sockaddr_storage addr;
struct sockaddr_in *sin_server = (struct sockaddr_in *) &addr;
struct sockaddr_in6 *sin_server6 = (struct sockaddr_in6 *) &addr;
int rc;
memset(&addr, 0, sizeof(struct sockaddr_storage));
cFYI(1, "UNC: %s ip: %s", volume_info->UNC, volume_info->UNCip);
if (volume_info->UNCip && volume_info->UNC) {
rc = cifs_fill_sockaddr((struct sockaddr *)&addr,
volume_info->UNCip,
strlen(volume_info->UNCip),
volume_info->port);
if (!rc) {
/* we failed translating address */
rc = -EINVAL;
goto out_err;
}
} else if (volume_info->UNCip) {
/* BB using ip addr as tcp_ses name to connect to the
DFS root below */
cERROR(1, "Connecting to DFS root not implemented yet");
rc = -EINVAL;
goto out_err;
} else /* which tcp_sess DFS root would we conect to */ {
cERROR(1, "CIFS mount error: No UNC path (e.g. -o "
"unc=//192.168.1.100/public) specified");
rc = -EINVAL;
goto out_err;
}
/* see if we already have a matching tcp_ses */
tcp_ses = cifs_find_tcp_session((struct sockaddr *)&addr, volume_info);
if (tcp_ses)
return tcp_ses;
tcp_ses = kzalloc(sizeof(struct TCP_Server_Info), GFP_KERNEL);
if (!tcp_ses) {
rc = -ENOMEM;
goto out_err;
}
rc = cifs_crypto_shash_allocate(tcp_ses);
if (rc) {
cERROR(1, "could not setup hash structures rc %d", rc);
goto out_err;
}
cifs_set_net_ns(tcp_ses, get_net(current->nsproxy->net_ns));
tcp_ses->hostname = extract_hostname(volume_info->UNC);
if (IS_ERR(tcp_ses->hostname)) {
rc = PTR_ERR(tcp_ses->hostname);
goto out_err_crypto_release;
}
tcp_ses->noblocksnd = volume_info->noblocksnd;
tcp_ses->noautotune = volume_info->noautotune;
tcp_ses->tcp_nodelay = volume_info->sockopt_tcp_nodelay;
tcp_ses->in_flight = 0;
tcp_ses->credits = 1;
init_waitqueue_head(&tcp_ses->response_q);
init_waitqueue_head(&tcp_ses->request_q);
INIT_LIST_HEAD(&tcp_ses->pending_mid_q);
mutex_init(&tcp_ses->srv_mutex);
memcpy(tcp_ses->workstation_RFC1001_name,
volume_info->source_rfc1001_name, RFC1001_NAME_LEN_WITH_NULL);
memcpy(tcp_ses->server_RFC1001_name,
volume_info->target_rfc1001_name, RFC1001_NAME_LEN_WITH_NULL);
tcp_ses->session_estab = false;
tcp_ses->sequence_number = 0;
tcp_ses->lstrp = jiffies;
spin_lock_init(&tcp_ses->req_lock);
INIT_LIST_HEAD(&tcp_ses->tcp_ses_list);
INIT_LIST_HEAD(&tcp_ses->smb_ses_list);
INIT_DELAYED_WORK(&tcp_ses->echo, cifs_echo_request);
/*
* at this point we are the only ones with the pointer
* to the struct since the kernel thread not created yet
* no need to spinlock this init of tcpStatus or srv_count
*/
tcp_ses->tcpStatus = CifsNew;
memcpy(&tcp_ses->srcaddr, &volume_info->srcaddr,
sizeof(tcp_ses->srcaddr));
++tcp_ses->srv_count;
if (addr.ss_family == AF_INET6) {
cFYI(1, "attempting ipv6 connect");
/* BB should we allow ipv6 on port 139? */
/* other OS never observed in Wild doing 139 with v6 */
memcpy(&tcp_ses->dstaddr, sin_server6,
sizeof(struct sockaddr_in6));
} else
memcpy(&tcp_ses->dstaddr, sin_server,
sizeof(struct sockaddr_in));
rc = ip_connect(tcp_ses);
if (rc < 0) {
cERROR(1, "Error connecting to socket. Aborting operation");
goto out_err_crypto_release;
}
/*
* since we're in a cifs function already, we know that
* this will succeed. No need for try_module_get().
*/
__module_get(THIS_MODULE);
tcp_ses->tsk = kthread_run(cifs_demultiplex_thread,
tcp_ses, "cifsd");
if (IS_ERR(tcp_ses->tsk)) {
rc = PTR_ERR(tcp_ses->tsk);
cERROR(1, "error %d create cifsd thread", rc);
module_put(THIS_MODULE);
goto out_err_crypto_release;
}
tcp_ses->tcpStatus = CifsNeedNegotiate;
/* thread spawned, put it on the list */
spin_lock(&cifs_tcp_ses_lock);
list_add(&tcp_ses->tcp_ses_list, &cifs_tcp_ses_list);
spin_unlock(&cifs_tcp_ses_lock);
cifs_fscache_get_client_cookie(tcp_ses);
/* queue echo request delayed work */
queue_delayed_work(cifsiod_wq, &tcp_ses->echo, SMB_ECHO_INTERVAL);
return tcp_ses;
out_err_crypto_release:
cifs_crypto_shash_release(tcp_ses);
put_net(cifs_net_ns(tcp_ses));
out_err:
if (tcp_ses) {
if (!IS_ERR(tcp_ses->hostname))
kfree(tcp_ses->hostname);
if (tcp_ses->ssocket)
sock_release(tcp_ses->ssocket);
kfree(tcp_ses);
}
return ERR_PTR(rc);
}
static int match_session(struct cifs_ses *ses, struct smb_vol *vol)
{
switch (ses->server->secType) {
case Kerberos:
if (vol->cred_uid != ses->cred_uid)
return 0;
break;
default:
/* NULL username means anonymous session */
if (ses->user_name == NULL) {
if (!vol->nullauth)
return 0;
break;
}
/* anything else takes username/password */
if (strncmp(ses->user_name,
vol->username ? vol->username : "",
MAX_USERNAME_SIZE))
return 0;
if (strlen(vol->username) != 0 &&
ses->password != NULL &&
strncmp(ses->password,
vol->password ? vol->password : "",
MAX_PASSWORD_SIZE))
return 0;
}
return 1;
}
static struct cifs_ses *
cifs_find_smb_ses(struct TCP_Server_Info *server, struct smb_vol *vol)
{
struct cifs_ses *ses;
spin_lock(&cifs_tcp_ses_lock);
list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
if (!match_session(ses, vol))
continue;
++ses->ses_count;
spin_unlock(&cifs_tcp_ses_lock);
return ses;
}
spin_unlock(&cifs_tcp_ses_lock);
return NULL;
}
static void
cifs_put_smb_ses(struct cifs_ses *ses)
{
int xid;
struct TCP_Server_Info *server = ses->server;
cFYI(1, "%s: ses_count=%d\n", __func__, ses->ses_count);
spin_lock(&cifs_tcp_ses_lock);
if (--ses->ses_count > 0) {
spin_unlock(&cifs_tcp_ses_lock);
return;
}
list_del_init(&ses->smb_ses_list);
spin_unlock(&cifs_tcp_ses_lock);
if (ses->status == CifsGood) {
xid = GetXid();
CIFSSMBLogoff(xid, ses);
_FreeXid(xid);
}
sesInfoFree(ses);
cifs_put_tcp_session(server);
}
#ifdef CONFIG_KEYS
/* strlen("cifs:a:") + INET6_ADDRSTRLEN + 1 */
#define CIFSCREDS_DESC_SIZE (7 + INET6_ADDRSTRLEN + 1)
/* Populate username and pw fields from keyring if possible */
static int
cifs_set_cifscreds(struct smb_vol *vol, struct cifs_ses *ses)
{
int rc = 0;
char *desc, *delim, *payload;
ssize_t len;
struct key *key;
struct TCP_Server_Info *server = ses->server;
struct sockaddr_in *sa;
struct sockaddr_in6 *sa6;
struct user_key_payload *upayload;
desc = kmalloc(CIFSCREDS_DESC_SIZE, GFP_KERNEL);
if (!desc)
return -ENOMEM;
/* try to find an address key first */
switch (server->dstaddr.ss_family) {
case AF_INET:
sa = (struct sockaddr_in *)&server->dstaddr;
sprintf(desc, "cifs:a:%pI4", &sa->sin_addr.s_addr);
break;
case AF_INET6:
sa6 = (struct sockaddr_in6 *)&server->dstaddr;
sprintf(desc, "cifs:a:%pI6c", &sa6->sin6_addr.s6_addr);
break;
default:
cFYI(1, "Bad ss_family (%hu)", server->dstaddr.ss_family);
rc = -EINVAL;
goto out_err;
}
cFYI(1, "%s: desc=%s", __func__, desc);
key = request_key(&key_type_logon, desc, "");
if (IS_ERR(key)) {
if (!ses->domainName) {
cFYI(1, "domainName is NULL");
rc = PTR_ERR(key);
goto out_err;
}
/* didn't work, try to find a domain key */
sprintf(desc, "cifs:d:%s", ses->domainName);
cFYI(1, "%s: desc=%s", __func__, desc);
key = request_key(&key_type_logon, desc, "");
if (IS_ERR(key)) {
rc = PTR_ERR(key);
goto out_err;
}
}
down_read(&key->sem);
upayload = key->payload.data;
if (IS_ERR_OR_NULL(upayload)) {
rc = upayload ? PTR_ERR(upayload) : -EINVAL;
goto out_key_put;
}
/* find first : in payload */
payload = (char *)upayload->data;
delim = strnchr(payload, upayload->datalen, ':');
cFYI(1, "payload=%s", payload);
if (!delim) {
cFYI(1, "Unable to find ':' in payload (datalen=%d)",
upayload->datalen);
rc = -EINVAL;
goto out_key_put;
}
len = delim - payload;
if (len > MAX_USERNAME_SIZE || len <= 0) {
cFYI(1, "Bad value from username search (len=%zd)", len);
rc = -EINVAL;
goto out_key_put;
}
vol->username = kstrndup(payload, len, GFP_KERNEL);
if (!vol->username) {
cFYI(1, "Unable to allocate %zd bytes for username", len);
rc = -ENOMEM;
goto out_key_put;
}
cFYI(1, "%s: username=%s", __func__, vol->username);
len = key->datalen - (len + 1);
if (len > MAX_PASSWORD_SIZE || len <= 0) {
cFYI(1, "Bad len for password search (len=%zd)", len);
rc = -EINVAL;
kfree(vol->username);
vol->username = NULL;
goto out_key_put;
}
++delim;
vol->password = kstrndup(delim, len, GFP_KERNEL);
if (!vol->password) {
cFYI(1, "Unable to allocate %zd bytes for password", len);
rc = -ENOMEM;
kfree(vol->username);
vol->username = NULL;
goto out_key_put;
}
out_key_put:
up_read(&key->sem);
key_put(key);
out_err:
kfree(desc);
cFYI(1, "%s: returning %d", __func__, rc);
return rc;
}
#else /* ! CONFIG_KEYS */
static inline int
cifs_set_cifscreds(struct smb_vol *vol __attribute__((unused)),
struct cifs_ses *ses __attribute__((unused)))
{
return -ENOSYS;
}
#endif /* CONFIG_KEYS */
static bool warned_on_ntlm; /* globals init to false automatically */
static struct cifs_ses *
cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info)
{
int rc = -ENOMEM, xid;
struct cifs_ses *ses;
struct sockaddr_in *addr = (struct sockaddr_in *)&server->dstaddr;
struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)&server->dstaddr;
xid = GetXid();
ses = cifs_find_smb_ses(server, volume_info);
if (ses) {
cFYI(1, "Existing smb sess found (status=%d)", ses->status);
mutex_lock(&ses->session_mutex);
rc = cifs_negotiate_protocol(xid, ses);
if (rc) {
mutex_unlock(&ses->session_mutex);
/* problem -- put our ses reference */
cifs_put_smb_ses(ses);
FreeXid(xid);
return ERR_PTR(rc);
}
if (ses->need_reconnect) {
cFYI(1, "Session needs reconnect");
rc = cifs_setup_session(xid, ses,
volume_info->local_nls);
if (rc) {
mutex_unlock(&ses->session_mutex);
/* problem -- put our reference */
cifs_put_smb_ses(ses);
FreeXid(xid);
return ERR_PTR(rc);
}
}
mutex_unlock(&ses->session_mutex);
/* existing SMB ses has a server reference already */
cifs_put_tcp_session(server);
FreeXid(xid);
return ses;
}
cFYI(1, "Existing smb sess not found");
ses = sesInfoAlloc();
if (ses == NULL)
goto get_ses_fail;
/* new SMB session uses our server ref */
ses->server = server;
if (server->dstaddr.ss_family == AF_INET6)
sprintf(ses->serverName, "%pI6", &addr6->sin6_addr);
else
sprintf(ses->serverName, "%pI4", &addr->sin_addr);
if (volume_info->username) {
ses->user_name = kstrdup(volume_info->username, GFP_KERNEL);
if (!ses->user_name)
goto get_ses_fail;
}
/* volume_info->password freed at unmount */
if (volume_info->password) {
ses->password = kstrdup(volume_info->password, GFP_KERNEL);
if (!ses->password)
goto get_ses_fail;
}
if (volume_info->domainname) {
ses->domainName = kstrdup(volume_info->domainname, GFP_KERNEL);
if (!ses->domainName)
goto get_ses_fail;
}
ses->cred_uid = volume_info->cred_uid;
ses->linux_uid = volume_info->linux_uid;
/* ntlmv2 is much stronger than ntlm security, and has been broadly
supported for many years, time to update default security mechanism */
if ((volume_info->secFlg == 0) && warned_on_ntlm == false) {
warned_on_ntlm = true;
cERROR(1, "default security mechanism requested. The default "
"security mechanism will be upgraded from ntlm to "
"ntlmv2 in kernel release 3.3");
}
ses->overrideSecFlg = volume_info->secFlg;
mutex_lock(&ses->session_mutex);
rc = cifs_negotiate_protocol(xid, ses);
if (!rc)
rc = cifs_setup_session(xid, ses, volume_info->local_nls);
mutex_unlock(&ses->session_mutex);
if (rc)
goto get_ses_fail;
/* success, put it on the list */
spin_lock(&cifs_tcp_ses_lock);
list_add(&ses->smb_ses_list, &server->smb_ses_list);
spin_unlock(&cifs_tcp_ses_lock);
FreeXid(xid);
return ses;
get_ses_fail:
sesInfoFree(ses);
FreeXid(xid);
return ERR_PTR(rc);
}
static int match_tcon(struct cifs_tcon *tcon, const char *unc)
{
if (tcon->tidStatus == CifsExiting)
return 0;
if (strncmp(tcon->treeName, unc, MAX_TREE_SIZE))
return 0;
return 1;
}
static struct cifs_tcon *
cifs_find_tcon(struct cifs_ses *ses, const char *unc)
{
struct list_head *tmp;
struct cifs_tcon *tcon;
spin_lock(&cifs_tcp_ses_lock);
list_for_each(tmp, &ses->tcon_list) {
tcon = list_entry(tmp, struct cifs_tcon, tcon_list);
if (!match_tcon(tcon, unc))
continue;
++tcon->tc_count;
spin_unlock(&cifs_tcp_ses_lock);
return tcon;
}
spin_unlock(&cifs_tcp_ses_lock);
return NULL;
}
static void
cifs_put_tcon(struct cifs_tcon *tcon)
{
int xid;
struct cifs_ses *ses = tcon->ses;
cFYI(1, "%s: tc_count=%d\n", __func__, tcon->tc_count);
spin_lock(&cifs_tcp_ses_lock);
if (--tcon->tc_count > 0) {
spin_unlock(&cifs_tcp_ses_lock);
return;
}
list_del_init(&tcon->tcon_list);
spin_unlock(&cifs_tcp_ses_lock);
xid = GetXid();
CIFSSMBTDis(xid, tcon);
_FreeXid(xid);
cifs_fscache_release_super_cookie(tcon);
tconInfoFree(tcon);
cifs_put_smb_ses(ses);
}
static struct cifs_tcon *
cifs_get_tcon(struct cifs_ses *ses, struct smb_vol *volume_info)
{
int rc, xid;
struct cifs_tcon *tcon;
tcon = cifs_find_tcon(ses, volume_info->UNC);
if (tcon) {
cFYI(1, "Found match on UNC path");
/* existing tcon already has a reference */
cifs_put_smb_ses(ses);
if (tcon->seal != volume_info->seal)
cERROR(1, "transport encryption setting "
"conflicts with existing tid");
return tcon;
}
tcon = tconInfoAlloc();
if (tcon == NULL) {
rc = -ENOMEM;
goto out_fail;
}
tcon->ses = ses;
if (volume_info->password) {
tcon->password = kstrdup(volume_info->password, GFP_KERNEL);
if (!tcon->password) {
rc = -ENOMEM;
goto out_fail;
}
}
if (strchr(volume_info->UNC + 3, '\\') == NULL
&& strchr(volume_info->UNC + 3, '/') == NULL) {
cERROR(1, "Missing share name");
rc = -ENODEV;
goto out_fail;
}
/* BB Do we need to wrap session_mutex around
* this TCon call and Unix SetFS as
* we do on SessSetup and reconnect? */
xid = GetXid();
rc = CIFSTCon(xid, ses, volume_info->UNC, tcon, volume_info->local_nls);
FreeXid(xid);
cFYI(1, "CIFS Tcon rc = %d", rc);
if (rc)
goto out_fail;
if (volume_info->nodfs) {
tcon->Flags &= ~SMB_SHARE_IS_IN_DFS;
cFYI(1, "DFS disabled (%d)", tcon->Flags);
}
tcon->seal = volume_info->seal;
/* we can have only one retry value for a connection
to a share so for resources mounted more than once
to the same server share the last value passed in
for the retry flag is used */
tcon->retry = volume_info->retry;
tcon->nocase = volume_info->nocase;
tcon->local_lease = volume_info->local_lease;
spin_lock(&cifs_tcp_ses_lock);
list_add(&tcon->tcon_list, &ses->tcon_list);
spin_unlock(&cifs_tcp_ses_lock);
cifs_fscache_get_super_cookie(tcon);
return tcon;
out_fail:
tconInfoFree(tcon);
return ERR_PTR(rc);
}
void
cifs_put_tlink(struct tcon_link *tlink)
{
if (!tlink || IS_ERR(tlink))
return;
if (!atomic_dec_and_test(&tlink->tl_count) ||
test_bit(TCON_LINK_IN_TREE, &tlink->tl_flags)) {
tlink->tl_time = jiffies;
return;
}
if (!IS_ERR(tlink_tcon(tlink)))
cifs_put_tcon(tlink_tcon(tlink));
kfree(tlink);
return;
}
static inline struct tcon_link *
cifs_sb_master_tlink(struct cifs_sb_info *cifs_sb)
{
return cifs_sb->master_tlink;
}
static int
compare_mount_options(struct super_block *sb, struct cifs_mnt_data *mnt_data)
{
struct cifs_sb_info *old = CIFS_SB(sb);
struct cifs_sb_info *new = mnt_data->cifs_sb;
if ((sb->s_flags & CIFS_MS_MASK) != (mnt_data->flags & CIFS_MS_MASK))
return 0;
if ((old->mnt_cifs_flags & CIFS_MOUNT_MASK) !=
(new->mnt_cifs_flags & CIFS_MOUNT_MASK))
return 0;
/*
* We want to share sb only if we don't specify an r/wsize or
* specified r/wsize is greater than or equal to existing one.
*/
if (new->wsize && new->wsize < old->wsize)
return 0;
if (new->rsize && new->rsize < old->rsize)
return 0;
if (old->mnt_uid != new->mnt_uid || old->mnt_gid != new->mnt_gid)
return 0;
if (old->mnt_file_mode != new->mnt_file_mode ||
old->mnt_dir_mode != new->mnt_dir_mode)
return 0;
if (strcmp(old->local_nls->charset, new->local_nls->charset))
return 0;
if (old->actimeo != new->actimeo)
return 0;
return 1;
}
int
cifs_match_super(struct super_block *sb, void *data)
{
struct cifs_mnt_data *mnt_data = (struct cifs_mnt_data *)data;
struct smb_vol *volume_info;
struct cifs_sb_info *cifs_sb;
struct TCP_Server_Info *tcp_srv;
struct cifs_ses *ses;
struct cifs_tcon *tcon;
struct tcon_link *tlink;
struct sockaddr_storage addr;
int rc = 0;
memset(&addr, 0, sizeof(struct sockaddr_storage));
spin_lock(&cifs_tcp_ses_lock);
cifs_sb = CIFS_SB(sb);
tlink = cifs_get_tlink(cifs_sb_master_tlink(cifs_sb));
if (IS_ERR(tlink)) {
spin_unlock(&cifs_tcp_ses_lock);
return rc;
}
tcon = tlink_tcon(tlink);
ses = tcon->ses;
tcp_srv = ses->server;
volume_info = mnt_data->vol;
if (!volume_info->UNCip || !volume_info->UNC)
goto out;
rc = cifs_fill_sockaddr((struct sockaddr *)&addr,
volume_info->UNCip,
strlen(volume_info->UNCip),
volume_info->port);
if (!rc)
goto out;
if (!match_server(tcp_srv, (struct sockaddr *)&addr, volume_info) ||
!match_session(ses, volume_info) ||
!match_tcon(tcon, volume_info->UNC)) {
rc = 0;
goto out;
}
rc = compare_mount_options(sb, mnt_data);
out:
spin_unlock(&cifs_tcp_ses_lock);
cifs_put_tlink(tlink);
return rc;
}
int
get_dfs_path(int xid, struct cifs_ses *pSesInfo, const char *old_path,
const struct nls_table *nls_codepage, unsigned int *pnum_referrals,
struct dfs_info3_param **preferrals, int remap)
{
char *temp_unc;
int rc = 0;
*pnum_referrals = 0;
*preferrals = NULL;
if (pSesInfo->ipc_tid == 0) {
temp_unc = kmalloc(2 /* for slashes */ +
strnlen(pSesInfo->serverName,
SERVER_NAME_LEN_WITH_NULL * 2)
+ 1 + 4 /* slash IPC$ */ + 2,
GFP_KERNEL);
if (temp_unc == NULL)
return -ENOMEM;
temp_unc[0] = '\\';
temp_unc[1] = '\\';
strcpy(temp_unc + 2, pSesInfo->serverName);
strcpy(temp_unc + 2 + strlen(pSesInfo->serverName), "\\IPC$");
rc = CIFSTCon(xid, pSesInfo, temp_unc, NULL, nls_codepage);
cFYI(1, "CIFS Tcon rc = %d ipc_tid = %d", rc, pSesInfo->ipc_tid);
kfree(temp_unc);
}
if (rc == 0)
rc = CIFSGetDFSRefer(xid, pSesInfo, old_path, preferrals,
pnum_referrals, nls_codepage, remap);
/* BB map targetUNCs to dfs_info3 structures, here or
in CIFSGetDFSRefer BB */
return rc;
}
#ifdef CONFIG_DEBUG_LOCK_ALLOC
static struct lock_class_key cifs_key[2];
static struct lock_class_key cifs_slock_key[2];
static inline void
cifs_reclassify_socket4(struct socket *sock)
{
struct sock *sk = sock->sk;
BUG_ON(sock_owned_by_user(sk));
sock_lock_init_class_and_name(sk, "slock-AF_INET-CIFS",
&cifs_slock_key[0], "sk_lock-AF_INET-CIFS", &cifs_key[0]);
}
static inline void
cifs_reclassify_socket6(struct socket *sock)
{
struct sock *sk = sock->sk;
BUG_ON(sock_owned_by_user(sk));
sock_lock_init_class_and_name(sk, "slock-AF_INET6-CIFS",
&cifs_slock_key[1], "sk_lock-AF_INET6-CIFS", &cifs_key[1]);
}
#else
static inline void
cifs_reclassify_socket4(struct socket *sock)
{
}
static inline void
cifs_reclassify_socket6(struct socket *sock)
{
}
#endif
/* See RFC1001 section 14 on representation of Netbios names */
static void rfc1002mangle(char *target, char *source, unsigned int length)
{
unsigned int i, j;
for (i = 0, j = 0; i < (length); i++) {
/* mask a nibble at a time and encode */
target[j] = 'A' + (0x0F & (source[i] >> 4));
target[j+1] = 'A' + (0x0F & source[i]);
j += 2;
}
}
static int
bind_socket(struct TCP_Server_Info *server)
{
int rc = 0;
if (server->srcaddr.ss_family != AF_UNSPEC) {
/* Bind to the specified local IP address */
struct socket *socket = server->ssocket;
rc = socket->ops->bind(socket,
(struct sockaddr *) &server->srcaddr,
sizeof(server->srcaddr));
if (rc < 0) {
struct sockaddr_in *saddr4;
struct sockaddr_in6 *saddr6;
saddr4 = (struct sockaddr_in *)&server->srcaddr;
saddr6 = (struct sockaddr_in6 *)&server->srcaddr;
if (saddr6->sin6_family == AF_INET6)
cERROR(1, "cifs: "
"Failed to bind to: %pI6c, error: %d\n",
&saddr6->sin6_addr, rc);
else
cERROR(1, "cifs: "
"Failed to bind to: %pI4, error: %d\n",
&saddr4->sin_addr.s_addr, rc);
}
}
return rc;
}
static int
ip_rfc1001_connect(struct TCP_Server_Info *server)
{
int rc = 0;
/*
* some servers require RFC1001 sessinit before sending
* negprot - BB check reconnection in case where second
* sessinit is sent but no second negprot
*/
struct rfc1002_session_packet *ses_init_buf;
struct smb_hdr *smb_buf;
ses_init_buf = kzalloc(sizeof(struct rfc1002_session_packet),
GFP_KERNEL);
if (ses_init_buf) {
ses_init_buf->trailer.session_req.called_len = 32;
if (server->server_RFC1001_name &&
server->server_RFC1001_name[0] != 0)
rfc1002mangle(ses_init_buf->trailer.
session_req.called_name,
server->server_RFC1001_name,
RFC1001_NAME_LEN_WITH_NULL);
else
rfc1002mangle(ses_init_buf->trailer.
session_req.called_name,
DEFAULT_CIFS_CALLED_NAME,
RFC1001_NAME_LEN_WITH_NULL);
ses_init_buf->trailer.session_req.calling_len = 32;
/*
* calling name ends in null (byte 16) from old smb
* convention.
*/
if (server->workstation_RFC1001_name &&
server->workstation_RFC1001_name[0] != 0)
rfc1002mangle(ses_init_buf->trailer.
session_req.calling_name,
server->workstation_RFC1001_name,
RFC1001_NAME_LEN_WITH_NULL);
else
rfc1002mangle(ses_init_buf->trailer.
session_req.calling_name,
"LINUX_CIFS_CLNT",
RFC1001_NAME_LEN_WITH_NULL);
ses_init_buf->trailer.session_req.scope1 = 0;
ses_init_buf->trailer.session_req.scope2 = 0;
smb_buf = (struct smb_hdr *)ses_init_buf;
/* sizeof RFC1002_SESSION_REQUEST with no scope */
smb_buf->smb_buf_length = cpu_to_be32(0x81000044);
rc = smb_send(server, smb_buf, 0x44);
kfree(ses_init_buf);
/*
* RFC1001 layer in at least one server
* requires very short break before negprot
* presumably because not expecting negprot
* to follow so fast. This is a simple
* solution that works without
* complicating the code and causes no
* significant slowing down on mount
* for everyone else
*/
usleep_range(1000, 2000);
}
/*
* else the negprot may still work without this
* even though malloc failed
*/
return rc;
}
static int
generic_ip_connect(struct TCP_Server_Info *server)
{
int rc = 0;
__be16 sport;
int slen, sfamily;
struct socket *socket = server->ssocket;
struct sockaddr *saddr;
saddr = (struct sockaddr *) &server->dstaddr;
if (server->dstaddr.ss_family == AF_INET6) {
sport = ((struct sockaddr_in6 *) saddr)->sin6_port;
slen = sizeof(struct sockaddr_in6);
sfamily = AF_INET6;
} else {
sport = ((struct sockaddr_in *) saddr)->sin_port;
slen = sizeof(struct sockaddr_in);
sfamily = AF_INET;
}
if (socket == NULL) {
rc = __sock_create(cifs_net_ns(server), sfamily, SOCK_STREAM,
IPPROTO_TCP, &socket, 1);
if (rc < 0) {
cERROR(1, "Error %d creating socket", rc);
server->ssocket = NULL;
return rc;
}
/* BB other socket options to set KEEPALIVE, NODELAY? */
cFYI(1, "Socket created");
server->ssocket = socket;
socket->sk->sk_allocation = GFP_NOFS;
if (sfamily == AF_INET6)
cifs_reclassify_socket6(socket);
else
cifs_reclassify_socket4(socket);
}
rc = bind_socket(server);
if (rc < 0)
return rc;
/*
* Eventually check for other socket options to change from
* the default. sock_setsockopt not used because it expects
* user space buffer
*/
socket->sk->sk_rcvtimeo = 7 * HZ;
socket->sk->sk_sndtimeo = 5 * HZ;
/* make the bufsizes depend on wsize/rsize and max requests */
if (server->noautotune) {
if (socket->sk->sk_sndbuf < (200 * 1024))
socket->sk->sk_sndbuf = 200 * 1024;
if (socket->sk->sk_rcvbuf < (140 * 1024))
socket->sk->sk_rcvbuf = 140 * 1024;
}
if (server->tcp_nodelay) {
int val = 1;
rc = kernel_setsockopt(socket, SOL_TCP, TCP_NODELAY,
(char *)&val, sizeof(val));
if (rc)
cFYI(1, "set TCP_NODELAY socket option error %d", rc);
}
cFYI(1, "sndbuf %d rcvbuf %d rcvtimeo 0x%lx",
socket->sk->sk_sndbuf,
socket->sk->sk_rcvbuf, socket->sk->sk_rcvtimeo);
rc = socket->ops->connect(socket, saddr, slen, 0);
if (rc < 0) {
cFYI(1, "Error %d connecting to server", rc);
sock_release(socket);
server->ssocket = NULL;
return rc;
}
if (sport == htons(RFC1001_PORT))
rc = ip_rfc1001_connect(server);
return rc;
}
static int
ip_connect(struct TCP_Server_Info *server)
{
__be16 *sport;
struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)&server->dstaddr;
struct sockaddr_in *addr = (struct sockaddr_in *)&server->dstaddr;
if (server->dstaddr.ss_family == AF_INET6)
sport = &addr6->sin6_port;
else
sport = &addr->sin_port;
if (*sport == 0) {
int rc;
/* try with 445 port at first */
*sport = htons(CIFS_PORT);
rc = generic_ip_connect(server);
if (rc >= 0)
return rc;
/* if it failed, try with 139 port */
*sport = htons(RFC1001_PORT);
}
return generic_ip_connect(server);
}
void reset_cifs_unix_caps(int xid, struct cifs_tcon *tcon,
struct cifs_sb_info *cifs_sb, struct smb_vol *vol_info)
{
/* if we are reconnecting then should we check to see if
* any requested capabilities changed locally e.g. via
* remount but we can not do much about it here
* if they have (even if we could detect it by the following)
* Perhaps we could add a backpointer to array of sb from tcon
* or if we change to make all sb to same share the same
* sb as NFS - then we only have one backpointer to sb.
* What if we wanted to mount the server share twice once with
* and once without posixacls or posix paths? */
__u64 saved_cap = le64_to_cpu(tcon->fsUnixInfo.Capability);
if (vol_info && vol_info->no_linux_ext) {
tcon->fsUnixInfo.Capability = 0;
tcon->unix_ext = 0; /* Unix Extensions disabled */
cFYI(1, "Linux protocol extensions disabled");
return;
} else if (vol_info)
tcon->unix_ext = 1; /* Unix Extensions supported */
if (tcon->unix_ext == 0) {
cFYI(1, "Unix extensions disabled so not set on reconnect");
return;
}
if (!CIFSSMBQFSUnixInfo(xid, tcon)) {
__u64 cap = le64_to_cpu(tcon->fsUnixInfo.Capability);
cFYI(1, "unix caps which server supports %lld", cap);
/* check for reconnect case in which we do not
want to change the mount behavior if we can avoid it */
if (vol_info == NULL) {
/* turn off POSIX ACL and PATHNAMES if not set
originally at mount time */
if ((saved_cap & CIFS_UNIX_POSIX_ACL_CAP) == 0)
cap &= ~CIFS_UNIX_POSIX_ACL_CAP;
if ((saved_cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) == 0) {
if (cap & CIFS_UNIX_POSIX_PATHNAMES_CAP)
cERROR(1, "POSIXPATH support change");
cap &= ~CIFS_UNIX_POSIX_PATHNAMES_CAP;
} else if ((cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) == 0) {
cERROR(1, "possible reconnect error");
cERROR(1, "server disabled POSIX path support");
}
}
if (cap & CIFS_UNIX_TRANSPORT_ENCRYPTION_MANDATORY_CAP)
cERROR(1, "per-share encryption not supported yet");
cap &= CIFS_UNIX_CAP_MASK;
if (vol_info && vol_info->no_psx_acl)
cap &= ~CIFS_UNIX_POSIX_ACL_CAP;
else if (CIFS_UNIX_POSIX_ACL_CAP & cap) {
cFYI(1, "negotiated posix acl support");
if (cifs_sb)
cifs_sb->mnt_cifs_flags |=
CIFS_MOUNT_POSIXACL;
}
if (vol_info && vol_info->posix_paths == 0)
cap &= ~CIFS_UNIX_POSIX_PATHNAMES_CAP;
else if (cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) {
cFYI(1, "negotiate posix pathnames");
if (cifs_sb)
cifs_sb->mnt_cifs_flags |=
CIFS_MOUNT_POSIX_PATHS;
}
cFYI(1, "Negotiate caps 0x%x", (int)cap);
#ifdef CONFIG_CIFS_DEBUG2
if (cap & CIFS_UNIX_FCNTL_CAP)
cFYI(1, "FCNTL cap");
if (cap & CIFS_UNIX_EXTATTR_CAP)
cFYI(1, "EXTATTR cap");
if (cap & CIFS_UNIX_POSIX_PATHNAMES_CAP)
cFYI(1, "POSIX path cap");
if (cap & CIFS_UNIX_XATTR_CAP)
cFYI(1, "XATTR cap");
if (cap & CIFS_UNIX_POSIX_ACL_CAP)
cFYI(1, "POSIX ACL cap");
if (cap & CIFS_UNIX_LARGE_READ_CAP)
cFYI(1, "very large read cap");
if (cap & CIFS_UNIX_LARGE_WRITE_CAP)
cFYI(1, "very large write cap");
if (cap & CIFS_UNIX_TRANSPORT_ENCRYPTION_CAP)
cFYI(1, "transport encryption cap");
if (cap & CIFS_UNIX_TRANSPORT_ENCRYPTION_MANDATORY_CAP)
cFYI(1, "mandatory transport encryption cap");
#endif /* CIFS_DEBUG2 */
if (CIFSSMBSetFSUnixInfo(xid, tcon, cap)) {
if (vol_info == NULL) {
cFYI(1, "resetting capabilities failed");
} else
cERROR(1, "Negotiating Unix capabilities "
"with the server failed. Consider "
"mounting with the Unix Extensions\n"
"disabled, if problems are found, "
"by specifying the nounix mount "
"option.");
}
}
}
void cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
struct cifs_sb_info *cifs_sb)
{
INIT_DELAYED_WORK(&cifs_sb->prune_tlinks, cifs_prune_tlinks);
spin_lock_init(&cifs_sb->tlink_tree_lock);
cifs_sb->tlink_tree = RB_ROOT;
/*
* Temporarily set r/wsize for matching superblock. If we end up using
* new sb then client will later negotiate it downward if needed.
*/
cifs_sb->rsize = pvolume_info->rsize;
cifs_sb->wsize = pvolume_info->wsize;
cifs_sb->mnt_uid = pvolume_info->linux_uid;
cifs_sb->mnt_gid = pvolume_info->linux_gid;
cifs_sb->mnt_file_mode = pvolume_info->file_mode;
cifs_sb->mnt_dir_mode = pvolume_info->dir_mode;
cFYI(1, "file mode: 0x%hx dir mode: 0x%hx",
cifs_sb->mnt_file_mode, cifs_sb->mnt_dir_mode);
cifs_sb->actimeo = pvolume_info->actimeo;
cifs_sb->local_nls = pvolume_info->local_nls;
if (pvolume_info->noperm)
cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NO_PERM;
if (pvolume_info->setuids)
cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_SET_UID;
if (pvolume_info->server_ino)
cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_SERVER_INUM;
if (pvolume_info->remap)
cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_MAP_SPECIAL_CHR;
if (pvolume_info->no_xattr)
cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NO_XATTR;
if (pvolume_info->sfu_emul)
cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_UNX_EMUL;
if (pvolume_info->nobrl)
cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NO_BRL;
if (pvolume_info->nostrictsync)
cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NOSSYNC;
if (pvolume_info->mand_lock)
cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NOPOSIXBRL;
if (pvolume_info->rwpidforward)
cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_RWPIDFORWARD;
if (pvolume_info->cifs_acl)
cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_CIFS_ACL;
if (pvolume_info->backupuid_specified) {
cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_CIFS_BACKUPUID;
cifs_sb->mnt_backupuid = pvolume_info->backupuid;
}
if (pvolume_info->backupgid_specified) {
cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_CIFS_BACKUPGID;
cifs_sb->mnt_backupgid = pvolume_info->backupgid;
}
if (pvolume_info->override_uid)
cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_OVERR_UID;
if (pvolume_info->override_gid)
cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_OVERR_GID;
if (pvolume_info->dynperm)
cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_DYNPERM;
if (pvolume_info->fsc)
cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_FSCACHE;
if (pvolume_info->multiuser)
cifs_sb->mnt_cifs_flags |= (CIFS_MOUNT_MULTIUSER |
CIFS_MOUNT_NO_PERM);
if (pvolume_info->strict_io)
cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_STRICT_IO;
if (pvolume_info->direct_io) {
cFYI(1, "mounting share using direct i/o");
cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_DIRECT_IO;
}
if (pvolume_info->mfsymlinks) {
if (pvolume_info->sfu_emul) {
cERROR(1, "mount option mfsymlinks ignored if sfu "
"mount option is used");
} else {
cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_MF_SYMLINKS;
}
}
if ((pvolume_info->cifs_acl) && (pvolume_info->dynperm))
cERROR(1, "mount option dynperm ignored if cifsacl "
"mount option supported");
}
/*
* When the server supports very large reads and writes via POSIX extensions,
* we can allow up to 2^24-1, minus the size of a READ/WRITE_AND_X header, not
* including the RFC1001 length.
*
* Note that this might make for "interesting" allocation problems during
* writeback however as we have to allocate an array of pointers for the
* pages. A 16M write means ~32kb page array with PAGE_CACHE_SIZE == 4096.
*
* For reads, there is a similar problem as we need to allocate an array
* of kvecs to handle the receive, though that should only need to be done
* once.
*/
#define CIFS_MAX_WSIZE ((1<<24) - 1 - sizeof(WRITE_REQ) + 4)
#define CIFS_MAX_RSIZE ((1<<24) - sizeof(READ_RSP) + 4)
/*
* When the server doesn't allow large posix writes, only allow a rsize/wsize
* of 2^17-1 minus the size of the call header. That allows for a read or
* write up to the maximum size described by RFC1002.
*/
#define CIFS_MAX_RFC1002_WSIZE ((1<<17) - 1 - sizeof(WRITE_REQ) + 4)
#define CIFS_MAX_RFC1002_RSIZE ((1<<17) - 1 - sizeof(READ_RSP) + 4)
/*
* The default wsize is 1M. find_get_pages seems to return a maximum of 256
* pages in a single call. With PAGE_CACHE_SIZE == 4k, this means we can fill
* a single wsize request with a single call.
*/
#define CIFS_DEFAULT_IOSIZE (1024 * 1024)
/*
* Windows only supports a max of 60kb reads and 65535 byte writes. Default to
* those values when posix extensions aren't in force. In actuality here, we
* use 65536 to allow for a write that is a multiple of 4k. Most servers seem
* to be ok with the extra byte even though Windows doesn't send writes that
* are that large.
*
* Citation:
*
* http://blogs.msdn.com/b/openspecification/archive/2009/04/10/smb-maximum-transmit-buffer-size-and-performance-tuning.aspx
*/
#define CIFS_DEFAULT_NON_POSIX_RSIZE (60 * 1024)
#define CIFS_DEFAULT_NON_POSIX_WSIZE (65536)
static unsigned int
cifs_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *pvolume_info)
{
__u64 unix_cap = le64_to_cpu(tcon->fsUnixInfo.Capability);
struct TCP_Server_Info *server = tcon->ses->server;
unsigned int wsize;
/* start with specified wsize, or default */
if (pvolume_info->wsize)
wsize = pvolume_info->wsize;
else if (tcon->unix_ext && (unix_cap & CIFS_UNIX_LARGE_WRITE_CAP))
wsize = CIFS_DEFAULT_IOSIZE;
else
wsize = CIFS_DEFAULT_NON_POSIX_WSIZE;
/* can server support 24-bit write sizes? (via UNIX extensions) */
if (!tcon->unix_ext || !(unix_cap & CIFS_UNIX_LARGE_WRITE_CAP))
wsize = min_t(unsigned int, wsize, CIFS_MAX_RFC1002_WSIZE);
/*
* no CAP_LARGE_WRITE_X or is signing enabled without CAP_UNIX set?
* Limit it to max buffer offered by the server, minus the size of the
* WRITEX header, not including the 4 byte RFC1001 length.
*/
if (!(server->capabilities & CAP_LARGE_WRITE_X) ||
(!(server->capabilities & CAP_UNIX) &&
(server->sec_mode & (SECMODE_SIGN_ENABLED|SECMODE_SIGN_REQUIRED))))
wsize = min_t(unsigned int, wsize,
server->maxBuf - sizeof(WRITE_REQ) + 4);
/* hard limit of CIFS_MAX_WSIZE */
wsize = min_t(unsigned int, wsize, CIFS_MAX_WSIZE);
return wsize;
}
static unsigned int
cifs_negotiate_rsize(struct cifs_tcon *tcon, struct smb_vol *pvolume_info)
{
__u64 unix_cap = le64_to_cpu(tcon->fsUnixInfo.Capability);
struct TCP_Server_Info *server = tcon->ses->server;
unsigned int rsize, defsize;
/*
* Set default value...
*
* HACK alert! Ancient servers have very small buffers. Even though
* MS-CIFS indicates that servers are only limited by the client's
* bufsize for reads, testing against win98se shows that it throws
* INVALID_PARAMETER errors if you try to request too large a read.
*
* If the server advertises a MaxBufferSize of less than one page,
* assume that it also can't satisfy reads larger than that either.
*
* FIXME: Is there a better heuristic for this?
*/
if (tcon->unix_ext && (unix_cap & CIFS_UNIX_LARGE_READ_CAP))
defsize = CIFS_DEFAULT_IOSIZE;
else if (server->capabilities & CAP_LARGE_READ_X)
defsize = CIFS_DEFAULT_NON_POSIX_RSIZE;
else if (server->maxBuf >= PAGE_CACHE_SIZE)
defsize = CIFSMaxBufSize;
else
defsize = server->maxBuf - sizeof(READ_RSP);
rsize = pvolume_info->rsize ? pvolume_info->rsize : defsize;
/*
* no CAP_LARGE_READ_X? Then MS-CIFS states that we must limit this to
* the client's MaxBufferSize.
*/
if (!(server->capabilities & CAP_LARGE_READ_X))
rsize = min_t(unsigned int, CIFSMaxBufSize, rsize);
/* hard limit of CIFS_MAX_RSIZE */
rsize = min_t(unsigned int, rsize, CIFS_MAX_RSIZE);
return rsize;
}
static int
is_path_accessible(int xid, struct cifs_tcon *tcon,
struct cifs_sb_info *cifs_sb, const char *full_path)
{
int rc;
FILE_ALL_INFO *pfile_info;
pfile_info = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
if (pfile_info == NULL)
return -ENOMEM;
rc = CIFSSMBQPathInfo(xid, tcon, full_path, pfile_info,
0 /* not legacy */, cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
if (rc == -EOPNOTSUPP || rc == -EINVAL)
rc = SMBQueryInformation(xid, tcon, full_path, pfile_info,
cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
kfree(pfile_info);
return rc;
}
static void
cleanup_volume_info_contents(struct smb_vol *volume_info)
{
kfree(volume_info->username);
kzfree(volume_info->password);
if (volume_info->UNCip != volume_info->UNC + 2)
kfree(volume_info->UNCip);
kfree(volume_info->UNC);
kfree(volume_info->domainname);
kfree(volume_info->iocharset);
kfree(volume_info->prepath);
}
void
cifs_cleanup_volume_info(struct smb_vol *volume_info)
{
if (!volume_info)
return;
cleanup_volume_info_contents(volume_info);
kfree(volume_info);
}
#ifdef CONFIG_CIFS_DFS_UPCALL
/* build_path_to_root returns full path to root when
* we do not have an exiting connection (tcon) */
static char *
build_unc_path_to_root(const struct smb_vol *vol,
const struct cifs_sb_info *cifs_sb)
{
char *full_path, *pos;
unsigned int pplen = vol->prepath ? strlen(vol->prepath) : 0;
unsigned int unc_len = strnlen(vol->UNC, MAX_TREE_SIZE + 1);
full_path = kmalloc(unc_len + pplen + 1, GFP_KERNEL);
if (full_path == NULL)
return ERR_PTR(-ENOMEM);
strncpy(full_path, vol->UNC, unc_len);
pos = full_path + unc_len;
if (pplen) {
strncpy(pos, vol->prepath, pplen);
pos += pplen;
}
*pos = '\0'; /* add trailing null */
convert_delimiter(full_path, CIFS_DIR_SEP(cifs_sb));
cFYI(1, "%s: full_path=%s", __func__, full_path);
return full_path;
}
/*
* Perform a dfs referral query for a share and (optionally) prefix
*
* If a referral is found, cifs_sb->mountdata will be (re-)allocated
* to a string containing updated options for the submount. Otherwise it
* will be left untouched.
*
* Returns the rc from get_dfs_path to the caller, which can be used to
* determine whether there were referrals.
*/
static int
expand_dfs_referral(int xid, struct cifs_ses *pSesInfo,
struct smb_vol *volume_info, struct cifs_sb_info *cifs_sb,
int check_prefix)
{
int rc;
unsigned int num_referrals = 0;
struct dfs_info3_param *referrals = NULL;
char *full_path = NULL, *ref_path = NULL, *mdata = NULL;
full_path = build_unc_path_to_root(volume_info, cifs_sb);
if (IS_ERR(full_path))
return PTR_ERR(full_path);
/* For DFS paths, skip the first '\' of the UNC */
ref_path = check_prefix ? full_path + 1 : volume_info->UNC + 1;
rc = get_dfs_path(xid, pSesInfo , ref_path, cifs_sb->local_nls,
&num_referrals, &referrals,
cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
if (!rc && num_referrals > 0) {
char *fake_devname = NULL;
mdata = cifs_compose_mount_options(cifs_sb->mountdata,
full_path + 1, referrals,
&fake_devname);
free_dfs_info_array(referrals, num_referrals);
if (IS_ERR(mdata)) {
rc = PTR_ERR(mdata);
mdata = NULL;
} else {
cleanup_volume_info_contents(volume_info);
memset(volume_info, '\0', sizeof(*volume_info));
rc = cifs_setup_volume_info(volume_info, mdata,
fake_devname);
}
kfree(fake_devname);
kfree(cifs_sb->mountdata);
cifs_sb->mountdata = mdata;
}
kfree(full_path);
return rc;
}
#endif
static int
cifs_setup_volume_info(struct smb_vol *volume_info, char *mount_data,
const char *devname)
{
int rc = 0;
if (cifs_parse_mount_options(mount_data, devname, volume_info))
return -EINVAL;
if (volume_info->nullauth) {
cFYI(1, "Anonymous login");
kfree(volume_info->username);
volume_info->username = NULL;
} else if (volume_info->username) {
/* BB fixme parse for domain name here */
cFYI(1, "Username: %s", volume_info->username);
} else {
cifserror("No username specified");
/* In userspace mount helper we can get user name from alternate
locations such as env variables and files on disk */
return -EINVAL;
}
/* this is needed for ASCII cp to Unicode converts */
if (volume_info->iocharset == NULL) {
/* load_nls_default cannot return null */
volume_info->local_nls = load_nls_default();
} else {
volume_info->local_nls = load_nls(volume_info->iocharset);
if (volume_info->local_nls == NULL) {
cERROR(1, "CIFS mount error: iocharset %s not found",
volume_info->iocharset);
return -ELIBACC;
}
}
return rc;
}
struct smb_vol *
cifs_get_volume_info(char *mount_data, const char *devname)
{
int rc;
struct smb_vol *volume_info;
volume_info = kzalloc(sizeof(struct smb_vol), GFP_KERNEL);
if (!volume_info)
return ERR_PTR(-ENOMEM);
rc = cifs_setup_volume_info(volume_info, mount_data, devname);
if (rc) {
cifs_cleanup_volume_info(volume_info);
volume_info = ERR_PTR(rc);
}
return volume_info;
}
int
cifs_mount(struct cifs_sb_info *cifs_sb, struct smb_vol *volume_info)
{
int rc;
int xid;
struct cifs_ses *pSesInfo;
struct cifs_tcon *tcon;
struct TCP_Server_Info *srvTcp;
char *full_path;
struct tcon_link *tlink;
#ifdef CONFIG_CIFS_DFS_UPCALL
int referral_walks_count = 0;
#endif
rc = bdi_setup_and_register(&cifs_sb->bdi, "cifs", BDI_CAP_MAP_COPY);
if (rc)
return rc;
#ifdef CONFIG_CIFS_DFS_UPCALL
try_mount_again:
/* cleanup activities if we're chasing a referral */
if (referral_walks_count) {
if (tcon)
cifs_put_tcon(tcon);
else if (pSesInfo)
cifs_put_smb_ses(pSesInfo);
FreeXid(xid);
}
#endif
rc = 0;
tcon = NULL;
pSesInfo = NULL;
srvTcp = NULL;
full_path = NULL;
tlink = NULL;
xid = GetXid();
/* get a reference to a tcp session */
srvTcp = cifs_get_tcp_session(volume_info);
if (IS_ERR(srvTcp)) {
rc = PTR_ERR(srvTcp);
bdi_destroy(&cifs_sb->bdi);
goto out;
}
/* get a reference to a SMB session */
pSesInfo = cifs_get_smb_ses(srvTcp, volume_info);
if (IS_ERR(pSesInfo)) {
rc = PTR_ERR(pSesInfo);
pSesInfo = NULL;
goto mount_fail_check;
}
/* search for existing tcon to this server share */
tcon = cifs_get_tcon(pSesInfo, volume_info);
if (IS_ERR(tcon)) {
rc = PTR_ERR(tcon);
tcon = NULL;
goto remote_path_check;
}
/* tell server which Unix caps we support */
if (tcon->ses->capabilities & CAP_UNIX) {
/* reset of caps checks mount to see if unix extensions
disabled for just this mount */
reset_cifs_unix_caps(xid, tcon, cifs_sb, volume_info);
if ((tcon->ses->server->tcpStatus == CifsNeedReconnect) &&
(le64_to_cpu(tcon->fsUnixInfo.Capability) &
CIFS_UNIX_TRANSPORT_ENCRYPTION_MANDATORY_CAP)) {
rc = -EACCES;
goto mount_fail_check;
}
} else
tcon->unix_ext = 0; /* server does not support them */
/* do not care if following two calls succeed - informational */
if (!tcon->ipc) {
CIFSSMBQFSDeviceInfo(xid, tcon);
CIFSSMBQFSAttributeInfo(xid, tcon);
}
cifs_sb->wsize = cifs_negotiate_wsize(tcon, volume_info);
cifs_sb->rsize = cifs_negotiate_rsize(tcon, volume_info);
/* tune readahead according to rsize */
cifs_sb->bdi.ra_pages = cifs_sb->rsize / PAGE_CACHE_SIZE;
remote_path_check:
#ifdef CONFIG_CIFS_DFS_UPCALL
/*
* Perform an unconditional check for whether there are DFS
* referrals for this path without prefix, to provide support
* for DFS referrals from w2k8 servers which don't seem to respond
* with PATH_NOT_COVERED to requests that include the prefix.
* Chase the referral if found, otherwise continue normally.
*/
if (referral_walks_count == 0) {
int refrc = expand_dfs_referral(xid, pSesInfo, volume_info,
cifs_sb, false);
if (!refrc) {
referral_walks_count++;
goto try_mount_again;
}
}
#endif
/* check if a whole path is not remote */
if (!rc && tcon) {
/* build_path_to_root works only when we have a valid tcon */
full_path = cifs_build_path_to_root(volume_info, cifs_sb, tcon);
if (full_path == NULL) {
rc = -ENOMEM;
goto mount_fail_check;
}
rc = is_path_accessible(xid, tcon, cifs_sb, full_path);
if (rc != 0 && rc != -EREMOTE) {
kfree(full_path);
goto mount_fail_check;
}
kfree(full_path);
}
/* get referral if needed */
if (rc == -EREMOTE) {
#ifdef CONFIG_CIFS_DFS_UPCALL
if (referral_walks_count > MAX_NESTED_LINKS) {
/*
* BB: when we implement proper loop detection,
* we will remove this check. But now we need it
* to prevent an indefinite loop if 'DFS tree' is
* misconfigured (i.e. has loops).
*/
rc = -ELOOP;
goto mount_fail_check;
}
rc = expand_dfs_referral(xid, pSesInfo, volume_info, cifs_sb,
true);
if (!rc) {
referral_walks_count++;
goto try_mount_again;
}
goto mount_fail_check;
#else /* No DFS support, return error on mount */
rc = -EOPNOTSUPP;
#endif
}
if (rc)
goto mount_fail_check;
/* now, hang the tcon off of the superblock */
tlink = kzalloc(sizeof *tlink, GFP_KERNEL);
if (tlink == NULL) {
rc = -ENOMEM;
goto mount_fail_check;
}
tlink->tl_uid = pSesInfo->linux_uid;
tlink->tl_tcon = tcon;
tlink->tl_time = jiffies;
set_bit(TCON_LINK_MASTER, &tlink->tl_flags);
set_bit(TCON_LINK_IN_TREE, &tlink->tl_flags);
cifs_sb->master_tlink = tlink;
spin_lock(&cifs_sb->tlink_tree_lock);
tlink_rb_insert(&cifs_sb->tlink_tree, tlink);
spin_unlock(&cifs_sb->tlink_tree_lock);
queue_delayed_work(cifsiod_wq, &cifs_sb->prune_tlinks,
TLINK_IDLE_EXPIRE);
mount_fail_check:
/* on error free sesinfo and tcon struct if needed */
if (rc) {
/* If find_unc succeeded then rc == 0 so we can not end */
/* up accidentally freeing someone elses tcon struct */
if (tcon)
cifs_put_tcon(tcon);
else if (pSesInfo)
cifs_put_smb_ses(pSesInfo);
else
cifs_put_tcp_session(srvTcp);
bdi_destroy(&cifs_sb->bdi);
}
out:
FreeXid(xid);
return rc;
}
/*
* Issue a TREE_CONNECT request. Note that for IPC$ shares, that the tcon
* pointer may be NULL.
*/
int
CIFSTCon(unsigned int xid, struct cifs_ses *ses,
const char *tree, struct cifs_tcon *tcon,
const struct nls_table *nls_codepage)
{
struct smb_hdr *smb_buffer;
struct smb_hdr *smb_buffer_response;
TCONX_REQ *pSMB;
TCONX_RSP *pSMBr;
unsigned char *bcc_ptr;
int rc = 0;
int length;
__u16 bytes_left, count;
if (ses == NULL)
return -EIO;
smb_buffer = cifs_buf_get();
if (smb_buffer == NULL)
return -ENOMEM;
smb_buffer_response = smb_buffer;
header_assemble(smb_buffer, SMB_COM_TREE_CONNECT_ANDX,
NULL /*no tid */ , 4 /*wct */ );
smb_buffer->Mid = GetNextMid(ses->server);
smb_buffer->Uid = ses->Suid;
pSMB = (TCONX_REQ *) smb_buffer;
pSMBr = (TCONX_RSP *) smb_buffer_response;
pSMB->AndXCommand = 0xFF;
pSMB->Flags = cpu_to_le16(TCON_EXTENDED_SECINFO);
bcc_ptr = &pSMB->Password[0];
if (!tcon || (ses->server->sec_mode & SECMODE_USER)) {
pSMB->PasswordLength = cpu_to_le16(1); /* minimum */
*bcc_ptr = 0; /* password is null byte */
bcc_ptr++; /* skip password */
/* already aligned so no need to do it below */
} else {
pSMB->PasswordLength = cpu_to_le16(CIFS_AUTH_RESP_SIZE);
/* BB FIXME add code to fail this if NTLMv2 or Kerberos
specified as required (when that support is added to
the vfs in the future) as only NTLM or the much
weaker LANMAN (which we do not send by default) is accepted
by Samba (not sure whether other servers allow
NTLMv2 password here) */
#ifdef CONFIG_CIFS_WEAK_PW_HASH
if ((global_secflags & CIFSSEC_MAY_LANMAN) &&
(ses->server->secType == LANMAN))
calc_lanman_hash(tcon->password, ses->server->cryptkey,
ses->server->sec_mode &
SECMODE_PW_ENCRYPT ? true : false,
bcc_ptr);
else
#endif /* CIFS_WEAK_PW_HASH */
rc = SMBNTencrypt(tcon->password, ses->server->cryptkey,
bcc_ptr, nls_codepage);
bcc_ptr += CIFS_AUTH_RESP_SIZE;
if (ses->capabilities & CAP_UNICODE) {
/* must align unicode strings */
*bcc_ptr = 0; /* null byte password */
bcc_ptr++;
}
}
if (ses->server->sec_mode &
(SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
smb_buffer->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
if (ses->capabilities & CAP_STATUS32) {
smb_buffer->Flags2 |= SMBFLG2_ERR_STATUS;
}
if (ses->capabilities & CAP_DFS) {
smb_buffer->Flags2 |= SMBFLG2_DFS;
}
if (ses->capabilities & CAP_UNICODE) {
smb_buffer->Flags2 |= SMBFLG2_UNICODE;
length =
cifs_strtoUTF16((__le16 *) bcc_ptr, tree,
6 /* max utf8 char length in bytes */ *
(/* server len*/ + 256 /* share len */), nls_codepage);
bcc_ptr += 2 * length; /* convert num 16 bit words to bytes */
bcc_ptr += 2; /* skip trailing null */
} else { /* ASCII */
strcpy(bcc_ptr, tree);
bcc_ptr += strlen(tree) + 1;
}
strcpy(bcc_ptr, "?????");
bcc_ptr += strlen("?????");
bcc_ptr += 1;
count = bcc_ptr - &pSMB->Password[0];
pSMB->hdr.smb_buf_length = cpu_to_be32(be32_to_cpu(
pSMB->hdr.smb_buf_length) + count);
pSMB->ByteCount = cpu_to_le16(count);
rc = SendReceive(xid, ses, smb_buffer, smb_buffer_response, &length,
0);
/* above now done in SendReceive */
if ((rc == 0) && (tcon != NULL)) {
bool is_unicode;
tcon->tidStatus = CifsGood;
tcon->need_reconnect = false;
tcon->tid = smb_buffer_response->Tid;
bcc_ptr = pByteArea(smb_buffer_response);
bytes_left = get_bcc(smb_buffer_response);
length = strnlen(bcc_ptr, bytes_left - 2);
if (smb_buffer->Flags2 & SMBFLG2_UNICODE)
is_unicode = true;
else
is_unicode = false;
/* skip service field (NB: this field is always ASCII) */
if (length == 3) {
if ((bcc_ptr[0] == 'I') && (bcc_ptr[1] == 'P') &&
(bcc_ptr[2] == 'C')) {
cFYI(1, "IPC connection");
tcon->ipc = 1;
}
} else if (length == 2) {
if ((bcc_ptr[0] == 'A') && (bcc_ptr[1] == ':')) {
/* the most common case */
cFYI(1, "disk share connection");
}
}
bcc_ptr += length + 1;
bytes_left -= (length + 1);
strncpy(tcon->treeName, tree, MAX_TREE_SIZE);
/* mostly informational -- no need to fail on error here */
kfree(tcon->nativeFileSystem);
tcon->nativeFileSystem = cifs_strndup_from_utf16(bcc_ptr,
bytes_left, is_unicode,
nls_codepage);
cFYI(1, "nativeFileSystem=%s", tcon->nativeFileSystem);
if ((smb_buffer_response->WordCount == 3) ||
(smb_buffer_response->WordCount == 7))
/* field is in same location */
tcon->Flags = le16_to_cpu(pSMBr->OptionalSupport);
else
tcon->Flags = 0;
cFYI(1, "Tcon flags: 0x%x ", tcon->Flags);
} else if ((rc == 0) && tcon == NULL) {
/* all we need to save for IPC$ connection */
ses->ipc_tid = smb_buffer_response->Tid;
}
cifs_buf_release(smb_buffer);
return rc;
}
void
cifs_umount(struct cifs_sb_info *cifs_sb)
{
struct rb_root *root = &cifs_sb->tlink_tree;
struct rb_node *node;
struct tcon_link *tlink;
cancel_delayed_work_sync(&cifs_sb->prune_tlinks);
spin_lock(&cifs_sb->tlink_tree_lock);
while ((node = rb_first(root))) {
tlink = rb_entry(node, struct tcon_link, tl_rbnode);
cifs_get_tlink(tlink);
clear_bit(TCON_LINK_IN_TREE, &tlink->tl_flags);
rb_erase(node, root);
spin_unlock(&cifs_sb->tlink_tree_lock);
cifs_put_tlink(tlink);
spin_lock(&cifs_sb->tlink_tree_lock);
}
spin_unlock(&cifs_sb->tlink_tree_lock);
bdi_destroy(&cifs_sb->bdi);
kfree(cifs_sb->mountdata);
unload_nls(cifs_sb->local_nls);
kfree(cifs_sb);
}
int cifs_negotiate_protocol(unsigned int xid, struct cifs_ses *ses)
{
int rc = 0;
struct TCP_Server_Info *server = ses->server;
/* only send once per connect */
if (server->maxBuf != 0)
return 0;
cifs_set_credits(server, 1);
rc = CIFSSMBNegotiate(xid, ses);
if (rc == -EAGAIN) {
/* retry only once on 1st time connection */
cifs_set_credits(server, 1);
rc = CIFSSMBNegotiate(xid, ses);
if (rc == -EAGAIN)
rc = -EHOSTDOWN;
}
if (rc == 0) {
spin_lock(&GlobalMid_Lock);
if (server->tcpStatus == CifsNeedNegotiate)
server->tcpStatus = CifsGood;
else
rc = -EHOSTDOWN;
spin_unlock(&GlobalMid_Lock);
}
return rc;
}
int cifs_setup_session(unsigned int xid, struct cifs_ses *ses,
struct nls_table *nls_info)
{
int rc = 0;
struct TCP_Server_Info *server = ses->server;
ses->flags = 0;
ses->capabilities = server->capabilities;
if (linuxExtEnabled == 0)
ses->capabilities &= (~CAP_UNIX);
cFYI(1, "Security Mode: 0x%x Capabilities: 0x%x TimeAdjust: %d",
server->sec_mode, server->capabilities, server->timeAdj);
rc = CIFS_SessSetup(xid, ses, nls_info);
if (rc) {
cERROR(1, "Send error in SessSetup = %d", rc);
} else {
mutex_lock(&ses->server->srv_mutex);
if (!server->session_estab) {
server->session_key.response = ses->auth_key.response;
server->session_key.len = ses->auth_key.len;
server->sequence_number = 0x2;
server->session_estab = true;
ses->auth_key.response = NULL;
}
mutex_unlock(&server->srv_mutex);
cFYI(1, "CIFS Session Established successfully");
spin_lock(&GlobalMid_Lock);
ses->status = CifsGood;
ses->need_reconnect = false;
spin_unlock(&GlobalMid_Lock);
}
kfree(ses->auth_key.response);
ses->auth_key.response = NULL;
ses->auth_key.len = 0;
kfree(ses->ntlmssp);
ses->ntlmssp = NULL;
return rc;
}
static int
cifs_set_vol_auth(struct smb_vol *vol, struct cifs_ses *ses)
{
switch (ses->server->secType) {
case Kerberos:
vol->secFlg = CIFSSEC_MUST_KRB5;
return 0;
case NTLMv2:
vol->secFlg = CIFSSEC_MUST_NTLMV2;
break;
case NTLM:
vol->secFlg = CIFSSEC_MUST_NTLM;
break;
case RawNTLMSSP:
vol->secFlg = CIFSSEC_MUST_NTLMSSP;
break;
case LANMAN:
vol->secFlg = CIFSSEC_MUST_LANMAN;
break;
}
return cifs_set_cifscreds(vol, ses);
}
static struct cifs_tcon *
cifs_construct_tcon(struct cifs_sb_info *cifs_sb, uid_t fsuid)
{
int rc;
struct cifs_tcon *master_tcon = cifs_sb_master_tcon(cifs_sb);
struct cifs_ses *ses;
struct cifs_tcon *tcon = NULL;
struct smb_vol *vol_info;
vol_info = kzalloc(sizeof(*vol_info), GFP_KERNEL);
if (vol_info == NULL)
return ERR_PTR(-ENOMEM);
vol_info->local_nls = cifs_sb->local_nls;
vol_info->linux_uid = fsuid;
vol_info->cred_uid = fsuid;
vol_info->UNC = master_tcon->treeName;
vol_info->retry = master_tcon->retry;
vol_info->nocase = master_tcon->nocase;
vol_info->local_lease = master_tcon->local_lease;
vol_info->no_linux_ext = !master_tcon->unix_ext;
rc = cifs_set_vol_auth(vol_info, master_tcon->ses);
if (rc) {
tcon = ERR_PTR(rc);
goto out;
}
/* get a reference for the same TCP session */
spin_lock(&cifs_tcp_ses_lock);
++master_tcon->ses->server->srv_count;
spin_unlock(&cifs_tcp_ses_lock);
ses = cifs_get_smb_ses(master_tcon->ses->server, vol_info);
if (IS_ERR(ses)) {
tcon = (struct cifs_tcon *)ses;
cifs_put_tcp_session(master_tcon->ses->server);
goto out;
}
tcon = cifs_get_tcon(ses, vol_info);
if (IS_ERR(tcon)) {
cifs_put_smb_ses(ses);
goto out;
}
if (ses->capabilities & CAP_UNIX)
reset_cifs_unix_caps(0, tcon, NULL, vol_info);
out:
kfree(vol_info->username);
kfree(vol_info->password);
kfree(vol_info);
return tcon;
}
struct cifs_tcon *
cifs_sb_master_tcon(struct cifs_sb_info *cifs_sb)
{
return tlink_tcon(cifs_sb_master_tlink(cifs_sb));
}
static int
cifs_sb_tcon_pending_wait(void *unused)
{
schedule();
return signal_pending(current) ? -ERESTARTSYS : 0;
}
/* find and return a tlink with given uid */
static struct tcon_link *
tlink_rb_search(struct rb_root *root, uid_t uid)
{
struct rb_node *node = root->rb_node;
struct tcon_link *tlink;
while (node) {
tlink = rb_entry(node, struct tcon_link, tl_rbnode);
if (tlink->tl_uid > uid)
node = node->rb_left;
else if (tlink->tl_uid < uid)
node = node->rb_right;
else
return tlink;
}
return NULL;
}
/* insert a tcon_link into the tree */
static void
tlink_rb_insert(struct rb_root *root, struct tcon_link *new_tlink)
{
struct rb_node **new = &(root->rb_node), *parent = NULL;
struct tcon_link *tlink;
while (*new) {
tlink = rb_entry(*new, struct tcon_link, tl_rbnode);
parent = *new;
if (tlink->tl_uid > new_tlink->tl_uid)
new = &((*new)->rb_left);
else
new = &((*new)->rb_right);
}
rb_link_node(&new_tlink->tl_rbnode, parent, new);
rb_insert_color(&new_tlink->tl_rbnode, root);
}
/*
* Find or construct an appropriate tcon given a cifs_sb and the fsuid of the
* current task.
*
* If the superblock doesn't refer to a multiuser mount, then just return
* the master tcon for the mount.
*
* First, search the rbtree for an existing tcon for this fsuid. If one
* exists, then check to see if it's pending construction. If it is then wait
* for construction to complete. Once it's no longer pending, check to see if
* it failed and either return an error or retry construction, depending on
* the timeout.
*
* If one doesn't exist then insert a new tcon_link struct into the tree and
* try to construct a new one.
*/
struct tcon_link *
cifs_sb_tlink(struct cifs_sb_info *cifs_sb)
{
int ret;
uid_t fsuid = current_fsuid();
struct tcon_link *tlink, *newtlink;
if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
return cifs_get_tlink(cifs_sb_master_tlink(cifs_sb));
spin_lock(&cifs_sb->tlink_tree_lock);
tlink = tlink_rb_search(&cifs_sb->tlink_tree, fsuid);
if (tlink)
cifs_get_tlink(tlink);
spin_unlock(&cifs_sb->tlink_tree_lock);
if (tlink == NULL) {
newtlink = kzalloc(sizeof(*tlink), GFP_KERNEL);
if (newtlink == NULL)
return ERR_PTR(-ENOMEM);
newtlink->tl_uid = fsuid;
newtlink->tl_tcon = ERR_PTR(-EACCES);
set_bit(TCON_LINK_PENDING, &newtlink->tl_flags);
set_bit(TCON_LINK_IN_TREE, &newtlink->tl_flags);
cifs_get_tlink(newtlink);
spin_lock(&cifs_sb->tlink_tree_lock);
/* was one inserted after previous search? */
tlink = tlink_rb_search(&cifs_sb->tlink_tree, fsuid);
if (tlink) {
cifs_get_tlink(tlink);
spin_unlock(&cifs_sb->tlink_tree_lock);
kfree(newtlink);
goto wait_for_construction;
}
tlink = newtlink;
tlink_rb_insert(&cifs_sb->tlink_tree, tlink);
spin_unlock(&cifs_sb->tlink_tree_lock);
} else {
wait_for_construction:
ret = wait_on_bit(&tlink->tl_flags, TCON_LINK_PENDING,
cifs_sb_tcon_pending_wait,
TASK_INTERRUPTIBLE);
if (ret) {
cifs_put_tlink(tlink);
return ERR_PTR(ret);
}
/* if it's good, return it */
if (!IS_ERR(tlink->tl_tcon))
return tlink;
/* return error if we tried this already recently */
if (time_before(jiffies, tlink->tl_time + TLINK_ERROR_EXPIRE)) {
cifs_put_tlink(tlink);
return ERR_PTR(-EACCES);
}
if (test_and_set_bit(TCON_LINK_PENDING, &tlink->tl_flags))
goto wait_for_construction;
}
tlink->tl_tcon = cifs_construct_tcon(cifs_sb, fsuid);
clear_bit(TCON_LINK_PENDING, &tlink->tl_flags);
wake_up_bit(&tlink->tl_flags, TCON_LINK_PENDING);
if (IS_ERR(tlink->tl_tcon)) {
cifs_put_tlink(tlink);
return ERR_PTR(-EACCES);
}
return tlink;
}
/*
* periodic workqueue job that scans tcon_tree for a superblock and closes
* out tcons.
*/
static void
cifs_prune_tlinks(struct work_struct *work)
{
struct cifs_sb_info *cifs_sb = container_of(work, struct cifs_sb_info,
prune_tlinks.work);
struct rb_root *root = &cifs_sb->tlink_tree;
struct rb_node *node = rb_first(root);
struct rb_node *tmp;
struct tcon_link *tlink;
/*
* Because we drop the spinlock in the loop in order to put the tlink
* it's not guarded against removal of links from the tree. The only
* places that remove entries from the tree are this function and
* umounts. Because this function is non-reentrant and is canceled
* before umount can proceed, this is safe.
*/
spin_lock(&cifs_sb->tlink_tree_lock);
node = rb_first(root);
while (node != NULL) {
tmp = node;
node = rb_next(tmp);
tlink = rb_entry(tmp, struct tcon_link, tl_rbnode);
if (test_bit(TCON_LINK_MASTER, &tlink->tl_flags) ||
atomic_read(&tlink->tl_count) != 0 ||
time_after(tlink->tl_time + TLINK_IDLE_EXPIRE, jiffies))
continue;
cifs_get_tlink(tlink);
clear_bit(TCON_LINK_IN_TREE, &tlink->tl_flags);
rb_erase(tmp, root);
spin_unlock(&cifs_sb->tlink_tree_lock);
cifs_put_tlink(tlink);
spin_lock(&cifs_sb->tlink_tree_lock);
}
spin_unlock(&cifs_sb->tlink_tree_lock);
queue_delayed_work(cifsiod_wq, &cifs_sb->prune_tlinks,
TLINK_IDLE_EXPIRE);
}
| gpl-2.0 |
Thunderoar/android_kernel_samsung_goyave3g | drivers/s390/char/sclp_rw.c | 2967 | 12927 | /*
* driver: reading from and writing to system console on S/390 via SCLP
*
* Copyright IBM Corp. 1999, 2009
*
* Author(s): Martin Peschke <mpeschke@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
#include <linux/kmod.h>
#include <linux/types.h>
#include <linux/err.h>
#include <linux/string.h>
#include <linux/spinlock.h>
#include <linux/ctype.h>
#include <asm/uaccess.h>
#include "sclp.h"
#include "sclp_rw.h"
/*
* The room for the SCCB (only for writing) is not equal to a pages size
* (as it is specified as the maximum size in the SCLP documentation)
* because of the additional data structure described above.
*/
#define MAX_SCCB_ROOM (PAGE_SIZE - sizeof(struct sclp_buffer))
static void sclp_rw_pm_event(struct sclp_register *reg,
enum sclp_pm_event sclp_pm_event)
{
sclp_console_pm_event(sclp_pm_event);
}
/* Event type structure for write message and write priority message */
static struct sclp_register sclp_rw_event = {
.send_mask = EVTYP_MSG_MASK | EVTYP_PMSGCMD_MASK,
.pm_event_fn = sclp_rw_pm_event,
};
/*
* Setup a sclp write buffer. Gets a page as input (4K) and returns
* a pointer to a struct sclp_buffer structure that is located at the
* end of the input page. This reduces the buffer space by a few
* bytes but simplifies things.
*/
struct sclp_buffer *
sclp_make_buffer(void *page, unsigned short columns, unsigned short htab)
{
struct sclp_buffer *buffer;
struct write_sccb *sccb;
sccb = (struct write_sccb *) page;
/*
* We keep the struct sclp_buffer structure at the end
* of the sccb page.
*/
buffer = ((struct sclp_buffer *) ((addr_t) sccb + PAGE_SIZE)) - 1;
buffer->sccb = sccb;
buffer->retry_count = 0;
buffer->mto_number = 0;
buffer->mto_char_sum = 0;
buffer->current_line = NULL;
buffer->current_length = 0;
buffer->columns = columns;
buffer->htab = htab;
/* initialize sccb */
memset(sccb, 0, sizeof(struct write_sccb));
sccb->header.length = sizeof(struct write_sccb);
sccb->msg_buf.header.length = sizeof(struct msg_buf);
sccb->msg_buf.header.type = EVTYP_MSG;
sccb->msg_buf.mdb.header.length = sizeof(struct mdb);
sccb->msg_buf.mdb.header.type = 1;
sccb->msg_buf.mdb.header.tag = 0xD4C4C240; /* ebcdic "MDB " */
sccb->msg_buf.mdb.header.revision_code = 1;
sccb->msg_buf.mdb.go.length = sizeof(struct go);
sccb->msg_buf.mdb.go.type = 1;
return buffer;
}
/*
* Return a pointer to the original page that has been used to create
* the buffer.
*/
void *
sclp_unmake_buffer(struct sclp_buffer *buffer)
{
return buffer->sccb;
}
/*
* Initialize a new Message Text Object (MTO) at the end of the provided buffer
* with enough room for max_len characters. Return 0 on success.
*/
static int
sclp_initialize_mto(struct sclp_buffer *buffer, int max_len)
{
struct write_sccb *sccb;
struct mto *mto;
int mto_size;
/* max size of new Message Text Object including message text */
mto_size = sizeof(struct mto) + max_len;
/* check if current buffer sccb can contain the mto */
sccb = buffer->sccb;
if ((MAX_SCCB_ROOM - sccb->header.length) < mto_size)
return -ENOMEM;
/* find address of new message text object */
mto = (struct mto *)(((addr_t) sccb) + sccb->header.length);
/*
* fill the new Message-Text Object,
* starting behind the former last byte of the SCCB
*/
memset(mto, 0, sizeof(struct mto));
mto->length = sizeof(struct mto);
mto->type = 4; /* message text object */
mto->line_type_flags = LNTPFLGS_ENDTEXT; /* end text */
/* set pointer to first byte after struct mto. */
buffer->current_line = (char *) (mto + 1);
buffer->current_length = 0;
return 0;
}
/*
* Finalize MTO initialized by sclp_initialize_mto(), updating the sizes of
* MTO, enclosing MDB, event buffer and SCCB.
*/
static void
sclp_finalize_mto(struct sclp_buffer *buffer)
{
struct write_sccb *sccb;
struct mto *mto;
int str_len, mto_size;
str_len = buffer->current_length;
buffer->current_line = NULL;
buffer->current_length = 0;
/* real size of new Message Text Object including message text */
mto_size = sizeof(struct mto) + str_len;
/* find address of new message text object */
sccb = buffer->sccb;
mto = (struct mto *)(((addr_t) sccb) + sccb->header.length);
/* set size of message text object */
mto->length = mto_size;
/*
* update values of sizes
* (SCCB, Event(Message) Buffer, Message Data Block)
*/
sccb->header.length += mto_size;
sccb->msg_buf.header.length += mto_size;
sccb->msg_buf.mdb.header.length += mto_size;
/*
* count number of buffered messages (= number of Message Text
* Objects) and number of buffered characters
* for the SCCB currently used for buffering and at all
*/
buffer->mto_number++;
buffer->mto_char_sum += str_len;
}
/*
* processing of a message including escape characters,
* returns number of characters written to the output sccb
* ("processed" means that is not guaranteed that the character have already
* been sent to the SCLP but that it will be done at least next time the SCLP
* is not busy)
*/
int
sclp_write(struct sclp_buffer *buffer, const unsigned char *msg, int count)
{
int spaces, i_msg;
int rc;
/*
* parse msg for escape sequences (\t,\v ...) and put formated
* msg into an mto (created by sclp_initialize_mto).
*
* We have to do this work ourselfs because there is no support for
* these characters on the native machine and only partial support
* under VM (Why does VM interpret \n but the native machine doesn't ?)
*
* Depending on i/o-control setting the message is always written
* immediately or we wait for a final new line maybe coming with the
* next message. Besides we avoid a buffer overrun by writing its
* content.
*
* RESTRICTIONS:
*
* \r and \b work within one line because we are not able to modify
* previous output that have already been accepted by the SCLP.
*
* \t combined with following \r is not correctly represented because
* \t is expanded to some spaces but \r does not know about a
* previous \t and decreases the current position by one column.
* This is in order to a slim and quick implementation.
*/
for (i_msg = 0; i_msg < count; i_msg++) {
switch (msg[i_msg]) {
case '\n': /* new line, line feed (ASCII) */
/* check if new mto needs to be created */
if (buffer->current_line == NULL) {
rc = sclp_initialize_mto(buffer, 0);
if (rc)
return i_msg;
}
sclp_finalize_mto(buffer);
break;
case '\a': /* bell, one for several times */
/* set SCLP sound alarm bit in General Object */
buffer->sccb->msg_buf.mdb.go.general_msg_flags |=
GNRLMSGFLGS_SNDALRM;
break;
case '\t': /* horizontal tabulator */
/* check if new mto needs to be created */
if (buffer->current_line == NULL) {
rc = sclp_initialize_mto(buffer,
buffer->columns);
if (rc)
return i_msg;
}
/* "go to (next htab-boundary + 1, same line)" */
do {
if (buffer->current_length >= buffer->columns)
break;
/* ok, add a blank */
*buffer->current_line++ = 0x40;
buffer->current_length++;
} while (buffer->current_length % buffer->htab);
break;
case '\f': /* form feed */
case '\v': /* vertical tabulator */
/* "go to (actual column, actual line + 1)" */
/* = new line, leading spaces */
if (buffer->current_line != NULL) {
spaces = buffer->current_length;
sclp_finalize_mto(buffer);
rc = sclp_initialize_mto(buffer,
buffer->columns);
if (rc)
return i_msg;
memset(buffer->current_line, 0x40, spaces);
buffer->current_line += spaces;
buffer->current_length = spaces;
} else {
/* one an empty line this is the same as \n */
rc = sclp_initialize_mto(buffer,
buffer->columns);
if (rc)
return i_msg;
sclp_finalize_mto(buffer);
}
break;
case '\b': /* backspace */
/* "go to (actual column - 1, actual line)" */
/* decrement counter indicating position, */
/* do not remove last character */
if (buffer->current_line != NULL &&
buffer->current_length > 0) {
buffer->current_length--;
buffer->current_line--;
}
break;
case 0x00: /* end of string */
/* transfer current line to SCCB */
if (buffer->current_line != NULL)
sclp_finalize_mto(buffer);
/* skip the rest of the message including the 0 byte */
i_msg = count - 1;
break;
default: /* no escape character */
/* do not output unprintable characters */
if (!isprint(msg[i_msg]))
break;
/* check if new mto needs to be created */
if (buffer->current_line == NULL) {
rc = sclp_initialize_mto(buffer,
buffer->columns);
if (rc)
return i_msg;
}
*buffer->current_line++ = sclp_ascebc(msg[i_msg]);
buffer->current_length++;
break;
}
/* check if current mto is full */
if (buffer->current_line != NULL &&
buffer->current_length >= buffer->columns)
sclp_finalize_mto(buffer);
}
/* return number of processed characters */
return i_msg;
}
/*
* Return the number of free bytes in the sccb
*/
int
sclp_buffer_space(struct sclp_buffer *buffer)
{
int count;
count = MAX_SCCB_ROOM - buffer->sccb->header.length;
if (buffer->current_line != NULL)
count -= sizeof(struct mto) + buffer->current_length;
return count;
}
/*
* Return number of characters in buffer
*/
int
sclp_chars_in_buffer(struct sclp_buffer *buffer)
{
int count;
count = buffer->mto_char_sum;
if (buffer->current_line != NULL)
count += buffer->current_length;
return count;
}
/*
* sets or provides some values that influence the drivers behaviour
*/
void
sclp_set_columns(struct sclp_buffer *buffer, unsigned short columns)
{
buffer->columns = columns;
if (buffer->current_line != NULL &&
buffer->current_length > buffer->columns)
sclp_finalize_mto(buffer);
}
void
sclp_set_htab(struct sclp_buffer *buffer, unsigned short htab)
{
buffer->htab = htab;
}
/*
* called by sclp_console_init and/or sclp_tty_init
*/
int
sclp_rw_init(void)
{
static int init_done = 0;
int rc;
if (init_done)
return 0;
rc = sclp_register(&sclp_rw_event);
if (rc == 0)
init_done = 1;
return rc;
}
#define SCLP_BUFFER_MAX_RETRY 1
/*
* second half of Write Event Data-function that has to be done after
* interruption indicating completion of Service Call.
*/
static void
sclp_writedata_callback(struct sclp_req *request, void *data)
{
int rc;
struct sclp_buffer *buffer;
struct write_sccb *sccb;
buffer = (struct sclp_buffer *) data;
sccb = buffer->sccb;
if (request->status == SCLP_REQ_FAILED) {
if (buffer->callback != NULL)
buffer->callback(buffer, -EIO);
return;
}
/* check SCLP response code and choose suitable action */
switch (sccb->header.response_code) {
case 0x0020 :
/* Normal completion, buffer processed, message(s) sent */
rc = 0;
break;
case 0x0340: /* Contained SCLP equipment check */
if (++buffer->retry_count > SCLP_BUFFER_MAX_RETRY) {
rc = -EIO;
break;
}
/* remove processed buffers and requeue rest */
if (sclp_remove_processed((struct sccb_header *) sccb) > 0) {
/* not all buffers were processed */
sccb->header.response_code = 0x0000;
buffer->request.status = SCLP_REQ_FILLED;
rc = sclp_add_request(request);
if (rc == 0)
return;
} else
rc = 0;
break;
case 0x0040: /* SCLP equipment check */
case 0x05f0: /* Target resource in improper state */
if (++buffer->retry_count > SCLP_BUFFER_MAX_RETRY) {
rc = -EIO;
break;
}
/* retry request */
sccb->header.response_code = 0x0000;
buffer->request.status = SCLP_REQ_FILLED;
rc = sclp_add_request(request);
if (rc == 0)
return;
break;
default:
if (sccb->header.response_code == 0x71f0)
rc = -ENOMEM;
else
rc = -EINVAL;
break;
}
if (buffer->callback != NULL)
buffer->callback(buffer, rc);
}
/*
* Setup the request structure in the struct sclp_buffer to do SCLP Write
* Event Data and pass the request to the core SCLP loop. Return zero on
* success, non-zero otherwise.
*/
int
sclp_emit_buffer(struct sclp_buffer *buffer,
void (*callback)(struct sclp_buffer *, int))
{
struct write_sccb *sccb;
/* add current line if there is one */
if (buffer->current_line != NULL)
sclp_finalize_mto(buffer);
/* Are there messages in the output buffer ? */
if (buffer->mto_number == 0)
return -EIO;
sccb = buffer->sccb;
if (sclp_rw_event.sclp_receive_mask & EVTYP_MSG_MASK)
/* Use normal write message */
sccb->msg_buf.header.type = EVTYP_MSG;
else if (sclp_rw_event.sclp_receive_mask & EVTYP_PMSGCMD_MASK)
/* Use write priority message */
sccb->msg_buf.header.type = EVTYP_PMSGCMD;
else
return -EOPNOTSUPP;
buffer->request.command = SCLP_CMDW_WRITE_EVENT_DATA;
buffer->request.status = SCLP_REQ_FILLED;
buffer->request.callback = sclp_writedata_callback;
buffer->request.callback_data = buffer;
buffer->request.sccb = sccb;
buffer->callback = callback;
return sclp_add_request(&buffer->request);
}
| gpl-2.0 |
MoKee/android_kernel_motorola_msm8960dt-common | arch/hexagon/kernel/ptrace.c | 3223 | 4897 | /*
* Ptrace support for Hexagon
*
* Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#include <generated/compile.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/errno.h>
#include <linux/ptrace.h>
#include <linux/regset.h>
#include <linux/user.h>
#include <linux/elf.h>
#include <asm/user.h>
static int genregs_get(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
int ret;
unsigned int dummy;
struct pt_regs *regs = task_pt_regs(target);
if (!regs)
return -EIO;
/* The general idea here is that the copyout must happen in
* exactly the same order in which the userspace expects these
* regs. Now, the sequence in userspace does not match the
* sequence in the kernel, so everything past the 32 gprs
* happens one at a time.
*/
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
®s->r00, 0, 32*sizeof(unsigned long));
#define ONEXT(KPT_REG, USR_REG) \
if (!ret) \
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, \
KPT_REG, offsetof(struct user_regs_struct, USR_REG), \
offsetof(struct user_regs_struct, USR_REG) + \
sizeof(unsigned long));
/* Must be exactly same sequence as struct user_regs_struct */
ONEXT(®s->sa0, sa0);
ONEXT(®s->lc0, lc0);
ONEXT(®s->sa1, sa1);
ONEXT(®s->lc1, lc1);
ONEXT(®s->m0, m0);
ONEXT(®s->m1, m1);
ONEXT(®s->usr, usr);
ONEXT(®s->preds, p3_0);
ONEXT(®s->gp, gp);
ONEXT(®s->ugp, ugp);
ONEXT(&pt_elr(regs), pc);
dummy = pt_cause(regs);
ONEXT(&dummy, cause);
ONEXT(&pt_badva(regs), badva);
/* Pad the rest with zeros, if needed */
if (!ret)
ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
offsetof(struct user_regs_struct, pad1), -1);
return ret;
}
static int genregs_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
int ret;
unsigned long bucket;
struct pt_regs *regs = task_pt_regs(target);
if (!regs)
return -EIO;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
®s->r00, 0, 32*sizeof(unsigned long));
#define INEXT(KPT_REG, USR_REG) \
if (!ret) \
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, \
KPT_REG, offsetof(struct user_regs_struct, USR_REG), \
offsetof(struct user_regs_struct, USR_REG) + \
sizeof(unsigned long));
/* Must be exactly same sequence as struct user_regs_struct */
INEXT(®s->sa0, sa0);
INEXT(®s->lc0, lc0);
INEXT(®s->sa1, sa1);
INEXT(®s->lc1, lc1);
INEXT(®s->m0, m0);
INEXT(®s->m1, m1);
INEXT(®s->usr, usr);
INEXT(®s->preds, p3_0);
INEXT(®s->gp, gp);
INEXT(®s->ugp, ugp);
INEXT(&pt_elr(regs), pc);
/* CAUSE and BADVA aren't writeable. */
INEXT(&bucket, cause);
INEXT(&bucket, badva);
/* Ignore the rest, if needed */
if (!ret)
ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
offsetof(struct user_regs_struct, pad1), -1);
if (ret)
return ret;
/*
* This is special; SP is actually restored by the VM via the
* special event record which is set by the special trap.
*/
regs->hvmer.vmpsp = regs->r29;
return 0;
}
enum hexagon_regset {
REGSET_GENERAL,
};
static const struct user_regset hexagon_regsets[] = {
[REGSET_GENERAL] = {
.core_note_type = NT_PRSTATUS,
.n = ELF_NGREG,
.size = sizeof(unsigned long),
.align = sizeof(unsigned long),
.get = genregs_get,
.set = genregs_set,
},
};
static const struct user_regset_view hexagon_user_view = {
.name = UTS_MACHINE,
.e_machine = ELF_ARCH,
.ei_osabi = ELF_OSABI,
.regsets = hexagon_regsets,
.n = ARRAY_SIZE(hexagon_regsets)
};
const struct user_regset_view *task_user_regset_view(struct task_struct *task)
{
return &hexagon_user_view;
}
void ptrace_disable(struct task_struct *child)
{
/* Boilerplate - resolves to null inline if no HW single-step */
user_disable_single_step(child);
}
long arch_ptrace(struct task_struct *child, long request,
unsigned long addr, unsigned long data)
{
return ptrace_request(child, request, addr, data);
}
| gpl-2.0 |
flzyup/gnexus_kernel | drivers/of/of_spi.c | 3223 | 2396 | /*
* SPI OF support routines
* Copyright (C) 2008 Secret Lab Technologies Ltd.
*
* Support routines for deriving SPI device attachments from the device
* tree.
*/
#include <linux/of.h>
#include <linux/device.h>
#include <linux/spi/spi.h>
#include <linux/of_irq.h>
#include <linux/of_spi.h>
/**
* of_register_spi_devices - Register child devices onto the SPI bus
* @master: Pointer to spi_master device
*
* Registers an spi_device for each child node of master node which has a 'reg'
* property.
*/
void of_register_spi_devices(struct spi_master *master)
{
struct spi_device *spi;
struct device_node *nc;
const __be32 *prop;
int rc;
int len;
if (!master->dev.of_node)
return;
for_each_child_of_node(master->dev.of_node, nc) {
/* Alloc an spi_device */
spi = spi_alloc_device(master);
if (!spi) {
dev_err(&master->dev, "spi_device alloc error for %s\n",
nc->full_name);
spi_dev_put(spi);
continue;
}
/* Select device driver */
if (of_modalias_node(nc, spi->modalias,
sizeof(spi->modalias)) < 0) {
dev_err(&master->dev, "cannot find modalias for %s\n",
nc->full_name);
spi_dev_put(spi);
continue;
}
/* Device address */
prop = of_get_property(nc, "reg", &len);
if (!prop || len < sizeof(*prop)) {
dev_err(&master->dev, "%s has no 'reg' property\n",
nc->full_name);
spi_dev_put(spi);
continue;
}
spi->chip_select = be32_to_cpup(prop);
/* Mode (clock phase/polarity/etc.) */
if (of_find_property(nc, "spi-cpha", NULL))
spi->mode |= SPI_CPHA;
if (of_find_property(nc, "spi-cpol", NULL))
spi->mode |= SPI_CPOL;
if (of_find_property(nc, "spi-cs-high", NULL))
spi->mode |= SPI_CS_HIGH;
/* Device speed */
prop = of_get_property(nc, "spi-max-frequency", &len);
if (!prop || len < sizeof(*prop)) {
dev_err(&master->dev, "%s has no 'spi-max-frequency' property\n",
nc->full_name);
spi_dev_put(spi);
continue;
}
spi->max_speed_hz = be32_to_cpup(prop);
/* IRQ */
spi->irq = irq_of_parse_and_map(nc, 0);
/* Store a pointer to the node in the device structure */
of_node_get(nc);
spi->dev.of_node = nc;
/* Register the new device */
request_module(spi->modalias);
rc = spi_add_device(spi);
if (rc) {
dev_err(&master->dev, "spi_device register error %s\n",
nc->full_name);
spi_dev_put(spi);
}
}
}
EXPORT_SYMBOL(of_register_spi_devices);
| gpl-2.0 |
davidmueller13/aospmmkernel2 | arch/arm/mach-omap2/omap_hwmod_3xxx_data.c | 3735 | 98135 | /*
* omap_hwmod_3xxx_data.c - hardware modules present on the OMAP3xxx chips
*
* Copyright (C) 2009-2011 Nokia Corporation
* Paul Walmsley
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* The data in this file should be completely autogeneratable from
* the TI hardware database or other technical documentation.
*
* XXX these should be marked initdata for multi-OMAP kernels
*/
#include <plat/omap_hwmod.h>
#include <mach/irqs.h>
#include <plat/cpu.h>
#include <plat/dma.h>
#include <plat/serial.h>
#include <plat/l3_3xxx.h>
#include <plat/l4_3xxx.h>
#include <plat/i2c.h>
#include <plat/gpio.h>
#include <plat/mmc.h>
#include <plat/mcbsp.h>
#include <plat/mcspi.h>
#include <plat/dmtimer.h>
#include "omap_hwmod_common_data.h"
#include "smartreflex.h"
#include "prm-regbits-34xx.h"
#include "cm-regbits-34xx.h"
#include "wd_timer.h"
#include <mach/am35xx.h>
/*
* OMAP3xxx hardware module integration data
*
* ALl of the data in this section should be autogeneratable from the
* TI hardware database or other technical documentation. Data that
* is driver-specific or driver-kernel integration-specific belongs
* elsewhere.
*/
static struct omap_hwmod omap3xxx_mpu_hwmod;
static struct omap_hwmod omap3xxx_iva_hwmod;
static struct omap_hwmod omap3xxx_l3_main_hwmod;
static struct omap_hwmod omap3xxx_l4_core_hwmod;
static struct omap_hwmod omap3xxx_l4_per_hwmod;
static struct omap_hwmod omap3xxx_wd_timer2_hwmod;
static struct omap_hwmod omap3430es1_dss_core_hwmod;
static struct omap_hwmod omap3xxx_dss_core_hwmod;
static struct omap_hwmod omap3xxx_dss_dispc_hwmod;
static struct omap_hwmod omap3xxx_dss_dsi1_hwmod;
static struct omap_hwmod omap3xxx_dss_rfbi_hwmod;
static struct omap_hwmod omap3xxx_dss_venc_hwmod;
static struct omap_hwmod omap3xxx_i2c1_hwmod;
static struct omap_hwmod omap3xxx_i2c2_hwmod;
static struct omap_hwmod omap3xxx_i2c3_hwmod;
static struct omap_hwmod omap3xxx_gpio1_hwmod;
static struct omap_hwmod omap3xxx_gpio2_hwmod;
static struct omap_hwmod omap3xxx_gpio3_hwmod;
static struct omap_hwmod omap3xxx_gpio4_hwmod;
static struct omap_hwmod omap3xxx_gpio5_hwmod;
static struct omap_hwmod omap3xxx_gpio6_hwmod;
static struct omap_hwmod omap34xx_sr1_hwmod;
static struct omap_hwmod omap34xx_sr2_hwmod;
static struct omap_hwmod omap34xx_mcspi1;
static struct omap_hwmod omap34xx_mcspi2;
static struct omap_hwmod omap34xx_mcspi3;
static struct omap_hwmod omap34xx_mcspi4;
static struct omap_hwmod omap3xxx_mmc1_hwmod;
static struct omap_hwmod omap3xxx_mmc2_hwmod;
static struct omap_hwmod omap3xxx_mmc3_hwmod;
static struct omap_hwmod am35xx_usbhsotg_hwmod;
static struct omap_hwmod omap3xxx_dma_system_hwmod;
static struct omap_hwmod omap3xxx_mcbsp1_hwmod;
static struct omap_hwmod omap3xxx_mcbsp2_hwmod;
static struct omap_hwmod omap3xxx_mcbsp3_hwmod;
static struct omap_hwmod omap3xxx_mcbsp4_hwmod;
static struct omap_hwmod omap3xxx_mcbsp5_hwmod;
static struct omap_hwmod omap3xxx_mcbsp2_sidetone_hwmod;
static struct omap_hwmod omap3xxx_mcbsp3_sidetone_hwmod;
static struct omap_hwmod omap3xxx_usb_host_hs_hwmod;
static struct omap_hwmod omap3xxx_usb_tll_hs_hwmod;
/* L3 -> L4_CORE interface */
static struct omap_hwmod_ocp_if omap3xxx_l3_main__l4_core = {
.master = &omap3xxx_l3_main_hwmod,
.slave = &omap3xxx_l4_core_hwmod,
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
/* L3 -> L4_PER interface */
static struct omap_hwmod_ocp_if omap3xxx_l3_main__l4_per = {
.master = &omap3xxx_l3_main_hwmod,
.slave = &omap3xxx_l4_per_hwmod,
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
/* L3 taret configuration and error log registers */
static struct omap_hwmod_irq_info omap3xxx_l3_main_irqs[] = {
{ .irq = INT_34XX_L3_DBG_IRQ },
{ .irq = INT_34XX_L3_APP_IRQ },
{ .irq = -1 }
};
static struct omap_hwmod_addr_space omap3xxx_l3_main_addrs[] = {
{
.pa_start = 0x68000000,
.pa_end = 0x6800ffff,
.flags = ADDR_TYPE_RT,
},
{ }
};
/* MPU -> L3 interface */
static struct omap_hwmod_ocp_if omap3xxx_mpu__l3_main = {
.master = &omap3xxx_mpu_hwmod,
.slave = &omap3xxx_l3_main_hwmod,
.addr = omap3xxx_l3_main_addrs,
.user = OCP_USER_MPU,
};
/* Slave interfaces on the L3 interconnect */
static struct omap_hwmod_ocp_if *omap3xxx_l3_main_slaves[] = {
&omap3xxx_mpu__l3_main,
};
/* DSS -> l3 */
static struct omap_hwmod_ocp_if omap3xxx_dss__l3 = {
.master = &omap3xxx_dss_core_hwmod,
.slave = &omap3xxx_l3_main_hwmod,
.fw = {
.omap2 = {
.l3_perm_bit = OMAP3_L3_CORE_FW_INIT_ID_DSS,
.flags = OMAP_FIREWALL_L3,
}
},
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
/* Master interfaces on the L3 interconnect */
static struct omap_hwmod_ocp_if *omap3xxx_l3_main_masters[] = {
&omap3xxx_l3_main__l4_core,
&omap3xxx_l3_main__l4_per,
};
/* L3 */
static struct omap_hwmod omap3xxx_l3_main_hwmod = {
.name = "l3_main",
.class = &l3_hwmod_class,
.mpu_irqs = omap3xxx_l3_main_irqs,
.masters = omap3xxx_l3_main_masters,
.masters_cnt = ARRAY_SIZE(omap3xxx_l3_main_masters),
.slaves = omap3xxx_l3_main_slaves,
.slaves_cnt = ARRAY_SIZE(omap3xxx_l3_main_slaves),
.flags = HWMOD_NO_IDLEST,
};
static struct omap_hwmod omap3xxx_l4_wkup_hwmod;
static struct omap_hwmod omap3xxx_uart1_hwmod;
static struct omap_hwmod omap3xxx_uart2_hwmod;
static struct omap_hwmod omap3xxx_uart3_hwmod;
static struct omap_hwmod omap3xxx_uart4_hwmod;
static struct omap_hwmod am35xx_uart4_hwmod;
static struct omap_hwmod omap3xxx_usbhsotg_hwmod;
/* l3_core -> usbhsotg interface */
static struct omap_hwmod_ocp_if omap3xxx_usbhsotg__l3 = {
.master = &omap3xxx_usbhsotg_hwmod,
.slave = &omap3xxx_l3_main_hwmod,
.clk = "core_l3_ick",
.user = OCP_USER_MPU,
};
/* l3_core -> am35xx_usbhsotg interface */
static struct omap_hwmod_ocp_if am35xx_usbhsotg__l3 = {
.master = &am35xx_usbhsotg_hwmod,
.slave = &omap3xxx_l3_main_hwmod,
.clk = "core_l3_ick",
.user = OCP_USER_MPU,
};
/* L4_CORE -> L4_WKUP interface */
static struct omap_hwmod_ocp_if omap3xxx_l4_core__l4_wkup = {
.master = &omap3xxx_l4_core_hwmod,
.slave = &omap3xxx_l4_wkup_hwmod,
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
/* L4 CORE -> MMC1 interface */
static struct omap_hwmod_ocp_if omap3xxx_l4_core__mmc1 = {
.master = &omap3xxx_l4_core_hwmod,
.slave = &omap3xxx_mmc1_hwmod,
.clk = "mmchs1_ick",
.addr = omap2430_mmc1_addr_space,
.user = OCP_USER_MPU | OCP_USER_SDMA,
.flags = OMAP_FIREWALL_L4
};
/* L4 CORE -> MMC2 interface */
static struct omap_hwmod_ocp_if omap3xxx_l4_core__mmc2 = {
.master = &omap3xxx_l4_core_hwmod,
.slave = &omap3xxx_mmc2_hwmod,
.clk = "mmchs2_ick",
.addr = omap2430_mmc2_addr_space,
.user = OCP_USER_MPU | OCP_USER_SDMA,
.flags = OMAP_FIREWALL_L4
};
/* L4 CORE -> MMC3 interface */
static struct omap_hwmod_addr_space omap3xxx_mmc3_addr_space[] = {
{
.pa_start = 0x480ad000,
.pa_end = 0x480ad1ff,
.flags = ADDR_TYPE_RT,
},
{ }
};
static struct omap_hwmod_ocp_if omap3xxx_l4_core__mmc3 = {
.master = &omap3xxx_l4_core_hwmod,
.slave = &omap3xxx_mmc3_hwmod,
.clk = "mmchs3_ick",
.addr = omap3xxx_mmc3_addr_space,
.user = OCP_USER_MPU | OCP_USER_SDMA,
.flags = OMAP_FIREWALL_L4
};
/* L4 CORE -> UART1 interface */
static struct omap_hwmod_addr_space omap3xxx_uart1_addr_space[] = {
{
.pa_start = OMAP3_UART1_BASE,
.pa_end = OMAP3_UART1_BASE + SZ_8K - 1,
.flags = ADDR_MAP_ON_INIT | ADDR_TYPE_RT,
},
{ }
};
static struct omap_hwmod_ocp_if omap3_l4_core__uart1 = {
.master = &omap3xxx_l4_core_hwmod,
.slave = &omap3xxx_uart1_hwmod,
.clk = "uart1_ick",
.addr = omap3xxx_uart1_addr_space,
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
/* L4 CORE -> UART2 interface */
static struct omap_hwmod_addr_space omap3xxx_uart2_addr_space[] = {
{
.pa_start = OMAP3_UART2_BASE,
.pa_end = OMAP3_UART2_BASE + SZ_1K - 1,
.flags = ADDR_MAP_ON_INIT | ADDR_TYPE_RT,
},
{ }
};
static struct omap_hwmod_ocp_if omap3_l4_core__uart2 = {
.master = &omap3xxx_l4_core_hwmod,
.slave = &omap3xxx_uart2_hwmod,
.clk = "uart2_ick",
.addr = omap3xxx_uart2_addr_space,
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
/* L4 PER -> UART3 interface */
static struct omap_hwmod_addr_space omap3xxx_uart3_addr_space[] = {
{
.pa_start = OMAP3_UART3_BASE,
.pa_end = OMAP3_UART3_BASE + SZ_1K - 1,
.flags = ADDR_MAP_ON_INIT | ADDR_TYPE_RT,
},
{ }
};
static struct omap_hwmod_ocp_if omap3_l4_per__uart3 = {
.master = &omap3xxx_l4_per_hwmod,
.slave = &omap3xxx_uart3_hwmod,
.clk = "uart3_ick",
.addr = omap3xxx_uart3_addr_space,
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
/* L4 PER -> UART4 interface */
static struct omap_hwmod_addr_space omap3xxx_uart4_addr_space[] = {
{
.pa_start = OMAP3_UART4_BASE,
.pa_end = OMAP3_UART4_BASE + SZ_1K - 1,
.flags = ADDR_MAP_ON_INIT | ADDR_TYPE_RT,
},
{ }
};
static struct omap_hwmod_ocp_if omap3_l4_per__uart4 = {
.master = &omap3xxx_l4_per_hwmod,
.slave = &omap3xxx_uart4_hwmod,
.clk = "uart4_ick",
.addr = omap3xxx_uart4_addr_space,
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
/* AM35xx: L4 CORE -> UART4 interface */
static struct omap_hwmod_addr_space am35xx_uart4_addr_space[] = {
{
.pa_start = OMAP3_UART4_AM35XX_BASE,
.pa_end = OMAP3_UART4_AM35XX_BASE + SZ_1K - 1,
.flags = ADDR_MAP_ON_INIT | ADDR_TYPE_RT,
},
};
static struct omap_hwmod_ocp_if am35xx_l4_core__uart4 = {
.master = &omap3xxx_l4_core_hwmod,
.slave = &am35xx_uart4_hwmod,
.clk = "uart4_ick",
.addr = am35xx_uart4_addr_space,
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
/* L4 CORE -> I2C1 interface */
static struct omap_hwmod_ocp_if omap3_l4_core__i2c1 = {
.master = &omap3xxx_l4_core_hwmod,
.slave = &omap3xxx_i2c1_hwmod,
.clk = "i2c1_ick",
.addr = omap2_i2c1_addr_space,
.fw = {
.omap2 = {
.l4_fw_region = OMAP3_L4_CORE_FW_I2C1_REGION,
.l4_prot_group = 7,
.flags = OMAP_FIREWALL_L4,
}
},
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
/* L4 CORE -> I2C2 interface */
static struct omap_hwmod_ocp_if omap3_l4_core__i2c2 = {
.master = &omap3xxx_l4_core_hwmod,
.slave = &omap3xxx_i2c2_hwmod,
.clk = "i2c2_ick",
.addr = omap2_i2c2_addr_space,
.fw = {
.omap2 = {
.l4_fw_region = OMAP3_L4_CORE_FW_I2C2_REGION,
.l4_prot_group = 7,
.flags = OMAP_FIREWALL_L4,
}
},
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
/* L4 CORE -> I2C3 interface */
static struct omap_hwmod_addr_space omap3xxx_i2c3_addr_space[] = {
{
.pa_start = 0x48060000,
.pa_end = 0x48060000 + SZ_128 - 1,
.flags = ADDR_TYPE_RT,
},
{ }
};
static struct omap_hwmod_ocp_if omap3_l4_core__i2c3 = {
.master = &omap3xxx_l4_core_hwmod,
.slave = &omap3xxx_i2c3_hwmod,
.clk = "i2c3_ick",
.addr = omap3xxx_i2c3_addr_space,
.fw = {
.omap2 = {
.l4_fw_region = OMAP3_L4_CORE_FW_I2C3_REGION,
.l4_prot_group = 7,
.flags = OMAP_FIREWALL_L4,
}
},
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
static struct omap_hwmod_irq_info omap3_smartreflex_mpu_irqs[] = {
{ .irq = 18},
{ .irq = -1 }
};
static struct omap_hwmod_irq_info omap3_smartreflex_core_irqs[] = {
{ .irq = 19},
{ .irq = -1 }
};
/* L4 CORE -> SR1 interface */
static struct omap_hwmod_addr_space omap3_sr1_addr_space[] = {
{
.pa_start = OMAP34XX_SR1_BASE,
.pa_end = OMAP34XX_SR1_BASE + SZ_1K - 1,
.flags = ADDR_TYPE_RT,
},
{ }
};
static struct omap_hwmod_ocp_if omap3_l4_core__sr1 = {
.master = &omap3xxx_l4_core_hwmod,
.slave = &omap34xx_sr1_hwmod,
.clk = "sr_l4_ick",
.addr = omap3_sr1_addr_space,
.user = OCP_USER_MPU,
};
/* L4 CORE -> SR1 interface */
static struct omap_hwmod_addr_space omap3_sr2_addr_space[] = {
{
.pa_start = OMAP34XX_SR2_BASE,
.pa_end = OMAP34XX_SR2_BASE + SZ_1K - 1,
.flags = ADDR_TYPE_RT,
},
{ }
};
static struct omap_hwmod_ocp_if omap3_l4_core__sr2 = {
.master = &omap3xxx_l4_core_hwmod,
.slave = &omap34xx_sr2_hwmod,
.clk = "sr_l4_ick",
.addr = omap3_sr2_addr_space,
.user = OCP_USER_MPU,
};
/*
* usbhsotg interface data
*/
static struct omap_hwmod_addr_space omap3xxx_usbhsotg_addrs[] = {
{
.pa_start = OMAP34XX_HSUSB_OTG_BASE,
.pa_end = OMAP34XX_HSUSB_OTG_BASE + SZ_4K - 1,
.flags = ADDR_TYPE_RT
},
{ }
};
/* l4_core -> usbhsotg */
static struct omap_hwmod_ocp_if omap3xxx_l4_core__usbhsotg = {
.master = &omap3xxx_l4_core_hwmod,
.slave = &omap3xxx_usbhsotg_hwmod,
.clk = "l4_ick",
.addr = omap3xxx_usbhsotg_addrs,
.user = OCP_USER_MPU,
};
static struct omap_hwmod_ocp_if *omap3xxx_usbhsotg_masters[] = {
&omap3xxx_usbhsotg__l3,
};
static struct omap_hwmod_ocp_if *omap3xxx_usbhsotg_slaves[] = {
&omap3xxx_l4_core__usbhsotg,
};
static struct omap_hwmod_addr_space am35xx_usbhsotg_addrs[] = {
{
.pa_start = AM35XX_IPSS_USBOTGSS_BASE,
.pa_end = AM35XX_IPSS_USBOTGSS_BASE + SZ_4K - 1,
.flags = ADDR_TYPE_RT
},
{ }
};
/* l4_core -> usbhsotg */
static struct omap_hwmod_ocp_if am35xx_l4_core__usbhsotg = {
.master = &omap3xxx_l4_core_hwmod,
.slave = &am35xx_usbhsotg_hwmod,
.clk = "l4_ick",
.addr = am35xx_usbhsotg_addrs,
.user = OCP_USER_MPU,
};
static struct omap_hwmod_ocp_if *am35xx_usbhsotg_masters[] = {
&am35xx_usbhsotg__l3,
};
static struct omap_hwmod_ocp_if *am35xx_usbhsotg_slaves[] = {
&am35xx_l4_core__usbhsotg,
};
/* Slave interfaces on the L4_CORE interconnect */
static struct omap_hwmod_ocp_if *omap3xxx_l4_core_slaves[] = {
&omap3xxx_l3_main__l4_core,
};
/* L4 CORE */
static struct omap_hwmod omap3xxx_l4_core_hwmod = {
.name = "l4_core",
.class = &l4_hwmod_class,
.slaves = omap3xxx_l4_core_slaves,
.slaves_cnt = ARRAY_SIZE(omap3xxx_l4_core_slaves),
.flags = HWMOD_NO_IDLEST,
};
/* Slave interfaces on the L4_PER interconnect */
static struct omap_hwmod_ocp_if *omap3xxx_l4_per_slaves[] = {
&omap3xxx_l3_main__l4_per,
};
/* L4 PER */
static struct omap_hwmod omap3xxx_l4_per_hwmod = {
.name = "l4_per",
.class = &l4_hwmod_class,
.slaves = omap3xxx_l4_per_slaves,
.slaves_cnt = ARRAY_SIZE(omap3xxx_l4_per_slaves),
.flags = HWMOD_NO_IDLEST,
};
/* Slave interfaces on the L4_WKUP interconnect */
static struct omap_hwmod_ocp_if *omap3xxx_l4_wkup_slaves[] = {
&omap3xxx_l4_core__l4_wkup,
};
/* L4 WKUP */
static struct omap_hwmod omap3xxx_l4_wkup_hwmod = {
.name = "l4_wkup",
.class = &l4_hwmod_class,
.slaves = omap3xxx_l4_wkup_slaves,
.slaves_cnt = ARRAY_SIZE(omap3xxx_l4_wkup_slaves),
.flags = HWMOD_NO_IDLEST,
};
/* Master interfaces on the MPU device */
static struct omap_hwmod_ocp_if *omap3xxx_mpu_masters[] = {
&omap3xxx_mpu__l3_main,
};
/* MPU */
static struct omap_hwmod omap3xxx_mpu_hwmod = {
.name = "mpu",
.class = &mpu_hwmod_class,
.main_clk = "arm_fck",
.masters = omap3xxx_mpu_masters,
.masters_cnt = ARRAY_SIZE(omap3xxx_mpu_masters),
};
/*
* IVA2_2 interface data
*/
/* IVA2 <- L3 interface */
static struct omap_hwmod_ocp_if omap3xxx_l3__iva = {
.master = &omap3xxx_l3_main_hwmod,
.slave = &omap3xxx_iva_hwmod,
.clk = "iva2_ck",
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
static struct omap_hwmod_ocp_if *omap3xxx_iva_masters[] = {
&omap3xxx_l3__iva,
};
/*
* IVA2 (IVA2)
*/
static struct omap_hwmod omap3xxx_iva_hwmod = {
.name = "iva",
.class = &iva_hwmod_class,
.masters = omap3xxx_iva_masters,
.masters_cnt = ARRAY_SIZE(omap3xxx_iva_masters),
};
/* timer class */
static struct omap_hwmod_class_sysconfig omap3xxx_timer_1ms_sysc = {
.rev_offs = 0x0000,
.sysc_offs = 0x0010,
.syss_offs = 0x0014,
.sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_CLOCKACTIVITY |
SYSC_HAS_ENAWAKEUP | SYSC_HAS_SOFTRESET |
SYSC_HAS_EMUFREE | SYSC_HAS_AUTOIDLE),
.idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
.sysc_fields = &omap_hwmod_sysc_type1,
};
static struct omap_hwmod_class omap3xxx_timer_1ms_hwmod_class = {
.name = "timer",
.sysc = &omap3xxx_timer_1ms_sysc,
.rev = OMAP_TIMER_IP_VERSION_1,
};
static struct omap_hwmod_class_sysconfig omap3xxx_timer_sysc = {
.rev_offs = 0x0000,
.sysc_offs = 0x0010,
.syss_offs = 0x0014,
.sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_ENAWAKEUP |
SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE),
.idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
.sysc_fields = &omap_hwmod_sysc_type1,
};
static struct omap_hwmod_class omap3xxx_timer_hwmod_class = {
.name = "timer",
.sysc = &omap3xxx_timer_sysc,
.rev = OMAP_TIMER_IP_VERSION_1,
};
/* secure timers dev attribute */
static struct omap_timer_capability_dev_attr capability_secure_dev_attr = {
.timer_capability = OMAP_TIMER_SECURE,
};
/* always-on timers dev attribute */
static struct omap_timer_capability_dev_attr capability_alwon_dev_attr = {
.timer_capability = OMAP_TIMER_ALWON,
};
/* pwm timers dev attribute */
static struct omap_timer_capability_dev_attr capability_pwm_dev_attr = {
.timer_capability = OMAP_TIMER_HAS_PWM,
};
/* timer1 */
static struct omap_hwmod omap3xxx_timer1_hwmod;
static struct omap_hwmod_addr_space omap3xxx_timer1_addrs[] = {
{
.pa_start = 0x48318000,
.pa_end = 0x48318000 + SZ_1K - 1,
.flags = ADDR_TYPE_RT
},
{ }
};
/* l4_wkup -> timer1 */
static struct omap_hwmod_ocp_if omap3xxx_l4_wkup__timer1 = {
.master = &omap3xxx_l4_wkup_hwmod,
.slave = &omap3xxx_timer1_hwmod,
.clk = "gpt1_ick",
.addr = omap3xxx_timer1_addrs,
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
/* timer1 slave port */
static struct omap_hwmod_ocp_if *omap3xxx_timer1_slaves[] = {
&omap3xxx_l4_wkup__timer1,
};
/* timer1 hwmod */
static struct omap_hwmod omap3xxx_timer1_hwmod = {
.name = "timer1",
.mpu_irqs = omap2_timer1_mpu_irqs,
.main_clk = "gpt1_fck",
.prcm = {
.omap2 = {
.prcm_reg_id = 1,
.module_bit = OMAP3430_EN_GPT1_SHIFT,
.module_offs = WKUP_MOD,
.idlest_reg_id = 1,
.idlest_idle_bit = OMAP3430_ST_GPT1_SHIFT,
},
},
.dev_attr = &capability_alwon_dev_attr,
.slaves = omap3xxx_timer1_slaves,
.slaves_cnt = ARRAY_SIZE(omap3xxx_timer1_slaves),
.class = &omap3xxx_timer_1ms_hwmod_class,
};
/* timer2 */
static struct omap_hwmod omap3xxx_timer2_hwmod;
static struct omap_hwmod_addr_space omap3xxx_timer2_addrs[] = {
{
.pa_start = 0x49032000,
.pa_end = 0x49032000 + SZ_1K - 1,
.flags = ADDR_TYPE_RT
},
{ }
};
/* l4_per -> timer2 */
static struct omap_hwmod_ocp_if omap3xxx_l4_per__timer2 = {
.master = &omap3xxx_l4_per_hwmod,
.slave = &omap3xxx_timer2_hwmod,
.clk = "gpt2_ick",
.addr = omap3xxx_timer2_addrs,
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
/* timer2 slave port */
static struct omap_hwmod_ocp_if *omap3xxx_timer2_slaves[] = {
&omap3xxx_l4_per__timer2,
};
/* timer2 hwmod */
static struct omap_hwmod omap3xxx_timer2_hwmod = {
.name = "timer2",
.mpu_irqs = omap2_timer2_mpu_irqs,
.main_clk = "gpt2_fck",
.prcm = {
.omap2 = {
.prcm_reg_id = 1,
.module_bit = OMAP3430_EN_GPT2_SHIFT,
.module_offs = OMAP3430_PER_MOD,
.idlest_reg_id = 1,
.idlest_idle_bit = OMAP3430_ST_GPT2_SHIFT,
},
},
.dev_attr = &capability_alwon_dev_attr,
.slaves = omap3xxx_timer2_slaves,
.slaves_cnt = ARRAY_SIZE(omap3xxx_timer2_slaves),
.class = &omap3xxx_timer_1ms_hwmod_class,
};
/* timer3 */
static struct omap_hwmod omap3xxx_timer3_hwmod;
static struct omap_hwmod_addr_space omap3xxx_timer3_addrs[] = {
{
.pa_start = 0x49034000,
.pa_end = 0x49034000 + SZ_1K - 1,
.flags = ADDR_TYPE_RT
},
{ }
};
/* l4_per -> timer3 */
static struct omap_hwmod_ocp_if omap3xxx_l4_per__timer3 = {
.master = &omap3xxx_l4_per_hwmod,
.slave = &omap3xxx_timer3_hwmod,
.clk = "gpt3_ick",
.addr = omap3xxx_timer3_addrs,
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
/* timer3 slave port */
static struct omap_hwmod_ocp_if *omap3xxx_timer3_slaves[] = {
&omap3xxx_l4_per__timer3,
};
/* timer3 hwmod */
static struct omap_hwmod omap3xxx_timer3_hwmod = {
.name = "timer3",
.mpu_irqs = omap2_timer3_mpu_irqs,
.main_clk = "gpt3_fck",
.prcm = {
.omap2 = {
.prcm_reg_id = 1,
.module_bit = OMAP3430_EN_GPT3_SHIFT,
.module_offs = OMAP3430_PER_MOD,
.idlest_reg_id = 1,
.idlest_idle_bit = OMAP3430_ST_GPT3_SHIFT,
},
},
.dev_attr = &capability_alwon_dev_attr,
.slaves = omap3xxx_timer3_slaves,
.slaves_cnt = ARRAY_SIZE(omap3xxx_timer3_slaves),
.class = &omap3xxx_timer_hwmod_class,
};
/* timer4 */
static struct omap_hwmod omap3xxx_timer4_hwmod;
static struct omap_hwmod_addr_space omap3xxx_timer4_addrs[] = {
{
.pa_start = 0x49036000,
.pa_end = 0x49036000 + SZ_1K - 1,
.flags = ADDR_TYPE_RT
},
{ }
};
/* l4_per -> timer4 */
static struct omap_hwmod_ocp_if omap3xxx_l4_per__timer4 = {
.master = &omap3xxx_l4_per_hwmod,
.slave = &omap3xxx_timer4_hwmod,
.clk = "gpt4_ick",
.addr = omap3xxx_timer4_addrs,
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
/* timer4 slave port */
static struct omap_hwmod_ocp_if *omap3xxx_timer4_slaves[] = {
&omap3xxx_l4_per__timer4,
};
/* timer4 hwmod */
static struct omap_hwmod omap3xxx_timer4_hwmod = {
.name = "timer4",
.mpu_irqs = omap2_timer4_mpu_irqs,
.main_clk = "gpt4_fck",
.prcm = {
.omap2 = {
.prcm_reg_id = 1,
.module_bit = OMAP3430_EN_GPT4_SHIFT,
.module_offs = OMAP3430_PER_MOD,
.idlest_reg_id = 1,
.idlest_idle_bit = OMAP3430_ST_GPT4_SHIFT,
},
},
.dev_attr = &capability_alwon_dev_attr,
.slaves = omap3xxx_timer4_slaves,
.slaves_cnt = ARRAY_SIZE(omap3xxx_timer4_slaves),
.class = &omap3xxx_timer_hwmod_class,
};
/* timer5 */
static struct omap_hwmod omap3xxx_timer5_hwmod;
static struct omap_hwmod_addr_space omap3xxx_timer5_addrs[] = {
{
.pa_start = 0x49038000,
.pa_end = 0x49038000 + SZ_1K - 1,
.flags = ADDR_TYPE_RT
},
{ }
};
/* l4_per -> timer5 */
static struct omap_hwmod_ocp_if omap3xxx_l4_per__timer5 = {
.master = &omap3xxx_l4_per_hwmod,
.slave = &omap3xxx_timer5_hwmod,
.clk = "gpt5_ick",
.addr = omap3xxx_timer5_addrs,
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
/* timer5 slave port */
static struct omap_hwmod_ocp_if *omap3xxx_timer5_slaves[] = {
&omap3xxx_l4_per__timer5,
};
/* timer5 hwmod */
static struct omap_hwmod omap3xxx_timer5_hwmod = {
.name = "timer5",
.mpu_irqs = omap2_timer5_mpu_irqs,
.main_clk = "gpt5_fck",
.prcm = {
.omap2 = {
.prcm_reg_id = 1,
.module_bit = OMAP3430_EN_GPT5_SHIFT,
.module_offs = OMAP3430_PER_MOD,
.idlest_reg_id = 1,
.idlest_idle_bit = OMAP3430_ST_GPT5_SHIFT,
},
},
.dev_attr = &capability_alwon_dev_attr,
.slaves = omap3xxx_timer5_slaves,
.slaves_cnt = ARRAY_SIZE(omap3xxx_timer5_slaves),
.class = &omap3xxx_timer_hwmod_class,
};
/* timer6 */
static struct omap_hwmod omap3xxx_timer6_hwmod;
static struct omap_hwmod_addr_space omap3xxx_timer6_addrs[] = {
{
.pa_start = 0x4903A000,
.pa_end = 0x4903A000 + SZ_1K - 1,
.flags = ADDR_TYPE_RT
},
{ }
};
/* l4_per -> timer6 */
static struct omap_hwmod_ocp_if omap3xxx_l4_per__timer6 = {
.master = &omap3xxx_l4_per_hwmod,
.slave = &omap3xxx_timer6_hwmod,
.clk = "gpt6_ick",
.addr = omap3xxx_timer6_addrs,
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
/* timer6 slave port */
static struct omap_hwmod_ocp_if *omap3xxx_timer6_slaves[] = {
&omap3xxx_l4_per__timer6,
};
/* timer6 hwmod */
static struct omap_hwmod omap3xxx_timer6_hwmod = {
.name = "timer6",
.mpu_irqs = omap2_timer6_mpu_irqs,
.main_clk = "gpt6_fck",
.prcm = {
.omap2 = {
.prcm_reg_id = 1,
.module_bit = OMAP3430_EN_GPT6_SHIFT,
.module_offs = OMAP3430_PER_MOD,
.idlest_reg_id = 1,
.idlest_idle_bit = OMAP3430_ST_GPT6_SHIFT,
},
},
.dev_attr = &capability_alwon_dev_attr,
.slaves = omap3xxx_timer6_slaves,
.slaves_cnt = ARRAY_SIZE(omap3xxx_timer6_slaves),
.class = &omap3xxx_timer_hwmod_class,
};
/* timer7 */
static struct omap_hwmod omap3xxx_timer7_hwmod;
static struct omap_hwmod_addr_space omap3xxx_timer7_addrs[] = {
{
.pa_start = 0x4903C000,
.pa_end = 0x4903C000 + SZ_1K - 1,
.flags = ADDR_TYPE_RT
},
{ }
};
/* l4_per -> timer7 */
static struct omap_hwmod_ocp_if omap3xxx_l4_per__timer7 = {
.master = &omap3xxx_l4_per_hwmod,
.slave = &omap3xxx_timer7_hwmod,
.clk = "gpt7_ick",
.addr = omap3xxx_timer7_addrs,
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
/* timer7 slave port */
static struct omap_hwmod_ocp_if *omap3xxx_timer7_slaves[] = {
&omap3xxx_l4_per__timer7,
};
/* timer7 hwmod */
static struct omap_hwmod omap3xxx_timer7_hwmod = {
.name = "timer7",
.mpu_irqs = omap2_timer7_mpu_irqs,
.main_clk = "gpt7_fck",
.prcm = {
.omap2 = {
.prcm_reg_id = 1,
.module_bit = OMAP3430_EN_GPT7_SHIFT,
.module_offs = OMAP3430_PER_MOD,
.idlest_reg_id = 1,
.idlest_idle_bit = OMAP3430_ST_GPT7_SHIFT,
},
},
.dev_attr = &capability_alwon_dev_attr,
.slaves = omap3xxx_timer7_slaves,
.slaves_cnt = ARRAY_SIZE(omap3xxx_timer7_slaves),
.class = &omap3xxx_timer_hwmod_class,
};
/* timer8 */
static struct omap_hwmod omap3xxx_timer8_hwmod;
static struct omap_hwmod_addr_space omap3xxx_timer8_addrs[] = {
{
.pa_start = 0x4903E000,
.pa_end = 0x4903E000 + SZ_1K - 1,
.flags = ADDR_TYPE_RT
},
{ }
};
/* l4_per -> timer8 */
static struct omap_hwmod_ocp_if omap3xxx_l4_per__timer8 = {
.master = &omap3xxx_l4_per_hwmod,
.slave = &omap3xxx_timer8_hwmod,
.clk = "gpt8_ick",
.addr = omap3xxx_timer8_addrs,
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
/* timer8 slave port */
static struct omap_hwmod_ocp_if *omap3xxx_timer8_slaves[] = {
&omap3xxx_l4_per__timer8,
};
/* timer8 hwmod */
static struct omap_hwmod omap3xxx_timer8_hwmod = {
.name = "timer8",
.mpu_irqs = omap2_timer8_mpu_irqs,
.main_clk = "gpt8_fck",
.prcm = {
.omap2 = {
.prcm_reg_id = 1,
.module_bit = OMAP3430_EN_GPT8_SHIFT,
.module_offs = OMAP3430_PER_MOD,
.idlest_reg_id = 1,
.idlest_idle_bit = OMAP3430_ST_GPT8_SHIFT,
},
},
.dev_attr = &capability_pwm_dev_attr,
.slaves = omap3xxx_timer8_slaves,
.slaves_cnt = ARRAY_SIZE(omap3xxx_timer8_slaves),
.class = &omap3xxx_timer_hwmod_class,
};
/* timer9 */
static struct omap_hwmod omap3xxx_timer9_hwmod;
static struct omap_hwmod_addr_space omap3xxx_timer9_addrs[] = {
{
.pa_start = 0x49040000,
.pa_end = 0x49040000 + SZ_1K - 1,
.flags = ADDR_TYPE_RT
},
{ }
};
/* l4_per -> timer9 */
static struct omap_hwmod_ocp_if omap3xxx_l4_per__timer9 = {
.master = &omap3xxx_l4_per_hwmod,
.slave = &omap3xxx_timer9_hwmod,
.clk = "gpt9_ick",
.addr = omap3xxx_timer9_addrs,
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
/* timer9 slave port */
static struct omap_hwmod_ocp_if *omap3xxx_timer9_slaves[] = {
&omap3xxx_l4_per__timer9,
};
/* timer9 hwmod */
static struct omap_hwmod omap3xxx_timer9_hwmod = {
.name = "timer9",
.mpu_irqs = omap2_timer9_mpu_irqs,
.main_clk = "gpt9_fck",
.prcm = {
.omap2 = {
.prcm_reg_id = 1,
.module_bit = OMAP3430_EN_GPT9_SHIFT,
.module_offs = OMAP3430_PER_MOD,
.idlest_reg_id = 1,
.idlest_idle_bit = OMAP3430_ST_GPT9_SHIFT,
},
},
.dev_attr = &capability_pwm_dev_attr,
.slaves = omap3xxx_timer9_slaves,
.slaves_cnt = ARRAY_SIZE(omap3xxx_timer9_slaves),
.class = &omap3xxx_timer_hwmod_class,
};
/* timer10 */
static struct omap_hwmod omap3xxx_timer10_hwmod;
/* l4_core -> timer10 */
static struct omap_hwmod_ocp_if omap3xxx_l4_core__timer10 = {
.master = &omap3xxx_l4_core_hwmod,
.slave = &omap3xxx_timer10_hwmod,
.clk = "gpt10_ick",
.addr = omap2_timer10_addrs,
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
/* timer10 slave port */
static struct omap_hwmod_ocp_if *omap3xxx_timer10_slaves[] = {
&omap3xxx_l4_core__timer10,
};
/* timer10 hwmod */
static struct omap_hwmod omap3xxx_timer10_hwmod = {
.name = "timer10",
.mpu_irqs = omap2_timer10_mpu_irqs,
.main_clk = "gpt10_fck",
.prcm = {
.omap2 = {
.prcm_reg_id = 1,
.module_bit = OMAP3430_EN_GPT10_SHIFT,
.module_offs = CORE_MOD,
.idlest_reg_id = 1,
.idlest_idle_bit = OMAP3430_ST_GPT10_SHIFT,
},
},
.dev_attr = &capability_pwm_dev_attr,
.slaves = omap3xxx_timer10_slaves,
.slaves_cnt = ARRAY_SIZE(omap3xxx_timer10_slaves),
.class = &omap3xxx_timer_1ms_hwmod_class,
};
/* timer11 */
static struct omap_hwmod omap3xxx_timer11_hwmod;
/* l4_core -> timer11 */
static struct omap_hwmod_ocp_if omap3xxx_l4_core__timer11 = {
.master = &omap3xxx_l4_core_hwmod,
.slave = &omap3xxx_timer11_hwmod,
.clk = "gpt11_ick",
.addr = omap2_timer11_addrs,
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
/* timer11 slave port */
static struct omap_hwmod_ocp_if *omap3xxx_timer11_slaves[] = {
&omap3xxx_l4_core__timer11,
};
/* timer11 hwmod */
static struct omap_hwmod omap3xxx_timer11_hwmod = {
.name = "timer11",
.mpu_irqs = omap2_timer11_mpu_irqs,
.main_clk = "gpt11_fck",
.prcm = {
.omap2 = {
.prcm_reg_id = 1,
.module_bit = OMAP3430_EN_GPT11_SHIFT,
.module_offs = CORE_MOD,
.idlest_reg_id = 1,
.idlest_idle_bit = OMAP3430_ST_GPT11_SHIFT,
},
},
.dev_attr = &capability_pwm_dev_attr,
.slaves = omap3xxx_timer11_slaves,
.slaves_cnt = ARRAY_SIZE(omap3xxx_timer11_slaves),
.class = &omap3xxx_timer_hwmod_class,
};
/* timer12*/
static struct omap_hwmod omap3xxx_timer12_hwmod;
static struct omap_hwmod_irq_info omap3xxx_timer12_mpu_irqs[] = {
{ .irq = 95, },
{ .irq = -1 }
};
static struct omap_hwmod_addr_space omap3xxx_timer12_addrs[] = {
{
.pa_start = 0x48304000,
.pa_end = 0x48304000 + SZ_1K - 1,
.flags = ADDR_TYPE_RT
},
{ }
};
/* l4_core -> timer12 */
static struct omap_hwmod_ocp_if omap3xxx_l4_core__timer12 = {
.master = &omap3xxx_l4_core_hwmod,
.slave = &omap3xxx_timer12_hwmod,
.clk = "gpt12_ick",
.addr = omap3xxx_timer12_addrs,
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
/* timer12 slave port */
static struct omap_hwmod_ocp_if *omap3xxx_timer12_slaves[] = {
&omap3xxx_l4_core__timer12,
};
/* timer12 hwmod */
static struct omap_hwmod omap3xxx_timer12_hwmod = {
.name = "timer12",
.mpu_irqs = omap3xxx_timer12_mpu_irqs,
.main_clk = "gpt12_fck",
.prcm = {
.omap2 = {
.prcm_reg_id = 1,
.module_bit = OMAP3430_EN_GPT12_SHIFT,
.module_offs = WKUP_MOD,
.idlest_reg_id = 1,
.idlest_idle_bit = OMAP3430_ST_GPT12_SHIFT,
},
},
.dev_attr = &capability_secure_dev_attr,
.slaves = omap3xxx_timer12_slaves,
.slaves_cnt = ARRAY_SIZE(omap3xxx_timer12_slaves),
.class = &omap3xxx_timer_hwmod_class,
};
/* l4_wkup -> wd_timer2 */
static struct omap_hwmod_addr_space omap3xxx_wd_timer2_addrs[] = {
{
.pa_start = 0x48314000,
.pa_end = 0x4831407f,
.flags = ADDR_TYPE_RT
},
{ }
};
static struct omap_hwmod_ocp_if omap3xxx_l4_wkup__wd_timer2 = {
.master = &omap3xxx_l4_wkup_hwmod,
.slave = &omap3xxx_wd_timer2_hwmod,
.clk = "wdt2_ick",
.addr = omap3xxx_wd_timer2_addrs,
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
/*
* 'wd_timer' class
* 32-bit watchdog upward counter that generates a pulse on the reset pin on
* overflow condition
*/
static struct omap_hwmod_class_sysconfig omap3xxx_wd_timer_sysc = {
.rev_offs = 0x0000,
.sysc_offs = 0x0010,
.syss_offs = 0x0014,
.sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_EMUFREE |
SYSC_HAS_ENAWAKEUP | SYSC_HAS_SOFTRESET |
SYSC_HAS_AUTOIDLE | SYSC_HAS_CLOCKACTIVITY |
SYSS_HAS_RESET_STATUS),
.idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
.sysc_fields = &omap_hwmod_sysc_type1,
};
/* I2C common */
static struct omap_hwmod_class_sysconfig i2c_sysc = {
.rev_offs = 0x00,
.sysc_offs = 0x20,
.syss_offs = 0x10,
.sysc_flags = (SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_SIDLEMODE |
SYSC_HAS_ENAWAKEUP | SYSC_HAS_SOFTRESET |
SYSC_HAS_AUTOIDLE | SYSS_HAS_RESET_STATUS),
.idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
.clockact = CLOCKACT_TEST_ICLK,
.sysc_fields = &omap_hwmod_sysc_type1,
};
static struct omap_hwmod_class omap3xxx_wd_timer_hwmod_class = {
.name = "wd_timer",
.sysc = &omap3xxx_wd_timer_sysc,
.pre_shutdown = &omap2_wd_timer_disable
};
/* wd_timer2 */
static struct omap_hwmod_ocp_if *omap3xxx_wd_timer2_slaves[] = {
&omap3xxx_l4_wkup__wd_timer2,
};
static struct omap_hwmod omap3xxx_wd_timer2_hwmod = {
.name = "wd_timer2",
.class = &omap3xxx_wd_timer_hwmod_class,
.main_clk = "wdt2_fck",
.prcm = {
.omap2 = {
.prcm_reg_id = 1,
.module_bit = OMAP3430_EN_WDT2_SHIFT,
.module_offs = WKUP_MOD,
.idlest_reg_id = 1,
.idlest_idle_bit = OMAP3430_ST_WDT2_SHIFT,
},
},
.slaves = omap3xxx_wd_timer2_slaves,
.slaves_cnt = ARRAY_SIZE(omap3xxx_wd_timer2_slaves),
/*
* XXX: Use software supervised mode, HW supervised smartidle seems to
* block CORE power domain idle transitions. Maybe a HW bug in wdt2?
*/
.flags = HWMOD_SWSUP_SIDLE,
};
/* UART1 */
static struct omap_hwmod_ocp_if *omap3xxx_uart1_slaves[] = {
&omap3_l4_core__uart1,
};
static struct omap_hwmod omap3xxx_uart1_hwmod = {
.name = "uart1",
.mpu_irqs = omap2_uart1_mpu_irqs,
.sdma_reqs = omap2_uart1_sdma_reqs,
.main_clk = "uart1_fck",
.prcm = {
.omap2 = {
.module_offs = CORE_MOD,
.prcm_reg_id = 1,
.module_bit = OMAP3430_EN_UART1_SHIFT,
.idlest_reg_id = 1,
.idlest_idle_bit = OMAP3430_EN_UART1_SHIFT,
},
},
.slaves = omap3xxx_uart1_slaves,
.slaves_cnt = ARRAY_SIZE(omap3xxx_uart1_slaves),
.class = &omap2_uart_class,
};
/* UART2 */
static struct omap_hwmod_ocp_if *omap3xxx_uart2_slaves[] = {
&omap3_l4_core__uart2,
};
static struct omap_hwmod omap3xxx_uart2_hwmod = {
.name = "uart2",
.mpu_irqs = omap2_uart2_mpu_irqs,
.sdma_reqs = omap2_uart2_sdma_reqs,
.main_clk = "uart2_fck",
.prcm = {
.omap2 = {
.module_offs = CORE_MOD,
.prcm_reg_id = 1,
.module_bit = OMAP3430_EN_UART2_SHIFT,
.idlest_reg_id = 1,
.idlest_idle_bit = OMAP3430_EN_UART2_SHIFT,
},
},
.slaves = omap3xxx_uart2_slaves,
.slaves_cnt = ARRAY_SIZE(omap3xxx_uart2_slaves),
.class = &omap2_uart_class,
};
/* UART3 */
static struct omap_hwmod_ocp_if *omap3xxx_uart3_slaves[] = {
&omap3_l4_per__uart3,
};
static struct omap_hwmod omap3xxx_uart3_hwmod = {
.name = "uart3",
.mpu_irqs = omap2_uart3_mpu_irqs,
.sdma_reqs = omap2_uart3_sdma_reqs,
.main_clk = "uart3_fck",
.prcm = {
.omap2 = {
.module_offs = OMAP3430_PER_MOD,
.prcm_reg_id = 1,
.module_bit = OMAP3430_EN_UART3_SHIFT,
.idlest_reg_id = 1,
.idlest_idle_bit = OMAP3430_EN_UART3_SHIFT,
},
},
.slaves = omap3xxx_uart3_slaves,
.slaves_cnt = ARRAY_SIZE(omap3xxx_uart3_slaves),
.class = &omap2_uart_class,
};
/* UART4 */
static struct omap_hwmod_irq_info uart4_mpu_irqs[] = {
{ .irq = INT_36XX_UART4_IRQ, },
{ .irq = -1 }
};
static struct omap_hwmod_dma_info uart4_sdma_reqs[] = {
{ .name = "rx", .dma_req = OMAP36XX_DMA_UART4_RX, },
{ .name = "tx", .dma_req = OMAP36XX_DMA_UART4_TX, },
{ .dma_req = -1 }
};
static struct omap_hwmod_ocp_if *omap3xxx_uart4_slaves[] = {
&omap3_l4_per__uart4,
};
static struct omap_hwmod omap3xxx_uart4_hwmod = {
.name = "uart4",
.mpu_irqs = uart4_mpu_irqs,
.sdma_reqs = uart4_sdma_reqs,
.main_clk = "uart4_fck",
.prcm = {
.omap2 = {
.module_offs = OMAP3430_PER_MOD,
.prcm_reg_id = 1,
.module_bit = OMAP3630_EN_UART4_SHIFT,
.idlest_reg_id = 1,
.idlest_idle_bit = OMAP3630_EN_UART4_SHIFT,
},
},
.slaves = omap3xxx_uart4_slaves,
.slaves_cnt = ARRAY_SIZE(omap3xxx_uart4_slaves),
.class = &omap2_uart_class,
};
static struct omap_hwmod_irq_info am35xx_uart4_mpu_irqs[] = {
{ .irq = INT_35XX_UART4_IRQ, },
};
static struct omap_hwmod_dma_info am35xx_uart4_sdma_reqs[] = {
{ .name = "rx", .dma_req = AM35XX_DMA_UART4_RX, },
{ .name = "tx", .dma_req = AM35XX_DMA_UART4_TX, },
};
static struct omap_hwmod_ocp_if *am35xx_uart4_slaves[] = {
&am35xx_l4_core__uart4,
};
static struct omap_hwmod am35xx_uart4_hwmod = {
.name = "uart4",
.mpu_irqs = am35xx_uart4_mpu_irqs,
.sdma_reqs = am35xx_uart4_sdma_reqs,
.main_clk = "uart4_fck",
.prcm = {
.omap2 = {
.module_offs = CORE_MOD,
.prcm_reg_id = 1,
.module_bit = OMAP3430_EN_UART4_SHIFT,
.idlest_reg_id = 1,
.idlest_idle_bit = OMAP3430_EN_UART4_SHIFT,
},
},
.slaves = am35xx_uart4_slaves,
.slaves_cnt = ARRAY_SIZE(am35xx_uart4_slaves),
.class = &omap2_uart_class,
};
static struct omap_hwmod_class i2c_class = {
.name = "i2c",
.sysc = &i2c_sysc,
.rev = OMAP_I2C_IP_VERSION_1,
.reset = &omap_i2c_reset,
};
static struct omap_hwmod_dma_info omap3xxx_dss_sdma_chs[] = {
{ .name = "dispc", .dma_req = 5 },
{ .name = "dsi1", .dma_req = 74 },
{ .dma_req = -1 }
};
/* dss */
/* dss master ports */
static struct omap_hwmod_ocp_if *omap3xxx_dss_masters[] = {
&omap3xxx_dss__l3,
};
/* l4_core -> dss */
static struct omap_hwmod_ocp_if omap3430es1_l4_core__dss = {
.master = &omap3xxx_l4_core_hwmod,
.slave = &omap3430es1_dss_core_hwmod,
.clk = "dss_ick",
.addr = omap2_dss_addrs,
.fw = {
.omap2 = {
.l4_fw_region = OMAP3ES1_L4_CORE_FW_DSS_CORE_REGION,
.l4_prot_group = OMAP3_L4_CORE_FW_DSS_PROT_GROUP,
.flags = OMAP_FIREWALL_L4,
}
},
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
static struct omap_hwmod_ocp_if omap3xxx_l4_core__dss = {
.master = &omap3xxx_l4_core_hwmod,
.slave = &omap3xxx_dss_core_hwmod,
.clk = "dss_ick",
.addr = omap2_dss_addrs,
.fw = {
.omap2 = {
.l4_fw_region = OMAP3_L4_CORE_FW_DSS_CORE_REGION,
.l4_prot_group = OMAP3_L4_CORE_FW_DSS_PROT_GROUP,
.flags = OMAP_FIREWALL_L4,
}
},
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
/* dss slave ports */
static struct omap_hwmod_ocp_if *omap3430es1_dss_slaves[] = {
&omap3430es1_l4_core__dss,
};
static struct omap_hwmod_ocp_if *omap3xxx_dss_slaves[] = {
&omap3xxx_l4_core__dss,
};
static struct omap_hwmod_opt_clk dss_opt_clks[] = {
/*
* The DSS HW needs all DSS clocks enabled during reset. The dss_core
* driver does not use these clocks.
*/
{ .role = "sys_clk", .clk = "dss2_alwon_fck" },
{ .role = "tv_clk", .clk = "dss_tv_fck" },
/* required only on OMAP3430 */
{ .role = "tv_dac_clk", .clk = "dss_96m_fck" },
};
static struct omap_hwmod omap3430es1_dss_core_hwmod = {
.name = "dss_core",
.class = &omap2_dss_hwmod_class,
.main_clk = "dss1_alwon_fck", /* instead of dss_fck */
.sdma_reqs = omap3xxx_dss_sdma_chs,
.prcm = {
.omap2 = {
.prcm_reg_id = 1,
.module_bit = OMAP3430_EN_DSS1_SHIFT,
.module_offs = OMAP3430_DSS_MOD,
.idlest_reg_id = 1,
.idlest_stdby_bit = OMAP3430ES1_ST_DSS_SHIFT,
},
},
.opt_clks = dss_opt_clks,
.opt_clks_cnt = ARRAY_SIZE(dss_opt_clks),
.slaves = omap3430es1_dss_slaves,
.slaves_cnt = ARRAY_SIZE(omap3430es1_dss_slaves),
.masters = omap3xxx_dss_masters,
.masters_cnt = ARRAY_SIZE(omap3xxx_dss_masters),
.flags = HWMOD_NO_IDLEST | HWMOD_CONTROL_OPT_CLKS_IN_RESET,
};
static struct omap_hwmod omap3xxx_dss_core_hwmod = {
.name = "dss_core",
.flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
.class = &omap2_dss_hwmod_class,
.main_clk = "dss1_alwon_fck", /* instead of dss_fck */
.sdma_reqs = omap3xxx_dss_sdma_chs,
.prcm = {
.omap2 = {
.prcm_reg_id = 1,
.module_bit = OMAP3430_EN_DSS1_SHIFT,
.module_offs = OMAP3430_DSS_MOD,
.idlest_reg_id = 1,
.idlest_idle_bit = OMAP3430ES2_ST_DSS_IDLE_SHIFT,
.idlest_stdby_bit = OMAP3430ES2_ST_DSS_STDBY_SHIFT,
},
},
.opt_clks = dss_opt_clks,
.opt_clks_cnt = ARRAY_SIZE(dss_opt_clks),
.slaves = omap3xxx_dss_slaves,
.slaves_cnt = ARRAY_SIZE(omap3xxx_dss_slaves),
.masters = omap3xxx_dss_masters,
.masters_cnt = ARRAY_SIZE(omap3xxx_dss_masters),
};
/*
* 'dispc' class
* display controller
*/
static struct omap_hwmod_class_sysconfig omap3_dispc_sysc = {
.rev_offs = 0x0000,
.sysc_offs = 0x0010,
.syss_offs = 0x0014,
.sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_MIDLEMODE |
SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE |
SYSC_HAS_ENAWAKEUP),
.idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART),
.sysc_fields = &omap_hwmod_sysc_type1,
};
static struct omap_hwmod_class omap3_dispc_hwmod_class = {
.name = "dispc",
.sysc = &omap3_dispc_sysc,
};
/* l4_core -> dss_dispc */
static struct omap_hwmod_ocp_if omap3xxx_l4_core__dss_dispc = {
.master = &omap3xxx_l4_core_hwmod,
.slave = &omap3xxx_dss_dispc_hwmod,
.clk = "dss_ick",
.addr = omap2_dss_dispc_addrs,
.fw = {
.omap2 = {
.l4_fw_region = OMAP3_L4_CORE_FW_DSS_DISPC_REGION,
.l4_prot_group = OMAP3_L4_CORE_FW_DSS_PROT_GROUP,
.flags = OMAP_FIREWALL_L4,
}
},
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
/* dss_dispc slave ports */
static struct omap_hwmod_ocp_if *omap3xxx_dss_dispc_slaves[] = {
&omap3xxx_l4_core__dss_dispc,
};
static struct omap_hwmod omap3xxx_dss_dispc_hwmod = {
.name = "dss_dispc",
.class = &omap3_dispc_hwmod_class,
.mpu_irqs = omap2_dispc_irqs,
.main_clk = "dss1_alwon_fck",
.prcm = {
.omap2 = {
.prcm_reg_id = 1,
.module_bit = OMAP3430_EN_DSS1_SHIFT,
.module_offs = OMAP3430_DSS_MOD,
},
},
.slaves = omap3xxx_dss_dispc_slaves,
.slaves_cnt = ARRAY_SIZE(omap3xxx_dss_dispc_slaves),
.flags = HWMOD_NO_IDLEST,
.dev_attr = &omap2_3_dss_dispc_dev_attr
};
/*
* 'dsi' class
* display serial interface controller
*/
static struct omap_hwmod_class omap3xxx_dsi_hwmod_class = {
.name = "dsi",
};
static struct omap_hwmod_irq_info omap3xxx_dsi1_irqs[] = {
{ .irq = 25 },
{ .irq = -1 }
};
/* dss_dsi1 */
static struct omap_hwmod_addr_space omap3xxx_dss_dsi1_addrs[] = {
{
.pa_start = 0x4804FC00,
.pa_end = 0x4804FFFF,
.flags = ADDR_TYPE_RT
},
{ }
};
/* l4_core -> dss_dsi1 */
static struct omap_hwmod_ocp_if omap3xxx_l4_core__dss_dsi1 = {
.master = &omap3xxx_l4_core_hwmod,
.slave = &omap3xxx_dss_dsi1_hwmod,
.clk = "dss_ick",
.addr = omap3xxx_dss_dsi1_addrs,
.fw = {
.omap2 = {
.l4_fw_region = OMAP3_L4_CORE_FW_DSS_DSI_REGION,
.l4_prot_group = OMAP3_L4_CORE_FW_DSS_PROT_GROUP,
.flags = OMAP_FIREWALL_L4,
}
},
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
/* dss_dsi1 slave ports */
static struct omap_hwmod_ocp_if *omap3xxx_dss_dsi1_slaves[] = {
&omap3xxx_l4_core__dss_dsi1,
};
static struct omap_hwmod_opt_clk dss_dsi1_opt_clks[] = {
{ .role = "sys_clk", .clk = "dss2_alwon_fck" },
};
static struct omap_hwmod omap3xxx_dss_dsi1_hwmod = {
.name = "dss_dsi1",
.class = &omap3xxx_dsi_hwmod_class,
.mpu_irqs = omap3xxx_dsi1_irqs,
.main_clk = "dss1_alwon_fck",
.prcm = {
.omap2 = {
.prcm_reg_id = 1,
.module_bit = OMAP3430_EN_DSS1_SHIFT,
.module_offs = OMAP3430_DSS_MOD,
},
},
.opt_clks = dss_dsi1_opt_clks,
.opt_clks_cnt = ARRAY_SIZE(dss_dsi1_opt_clks),
.slaves = omap3xxx_dss_dsi1_slaves,
.slaves_cnt = ARRAY_SIZE(omap3xxx_dss_dsi1_slaves),
.flags = HWMOD_NO_IDLEST,
};
/* l4_core -> dss_rfbi */
static struct omap_hwmod_ocp_if omap3xxx_l4_core__dss_rfbi = {
.master = &omap3xxx_l4_core_hwmod,
.slave = &omap3xxx_dss_rfbi_hwmod,
.clk = "dss_ick",
.addr = omap2_dss_rfbi_addrs,
.fw = {
.omap2 = {
.l4_fw_region = OMAP3_L4_CORE_FW_DSS_RFBI_REGION,
.l4_prot_group = OMAP3_L4_CORE_FW_DSS_PROT_GROUP ,
.flags = OMAP_FIREWALL_L4,
}
},
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
/* dss_rfbi slave ports */
static struct omap_hwmod_ocp_if *omap3xxx_dss_rfbi_slaves[] = {
&omap3xxx_l4_core__dss_rfbi,
};
static struct omap_hwmod_opt_clk dss_rfbi_opt_clks[] = {
{ .role = "ick", .clk = "dss_ick" },
};
static struct omap_hwmod omap3xxx_dss_rfbi_hwmod = {
.name = "dss_rfbi",
.class = &omap2_rfbi_hwmod_class,
.main_clk = "dss1_alwon_fck",
.prcm = {
.omap2 = {
.prcm_reg_id = 1,
.module_bit = OMAP3430_EN_DSS1_SHIFT,
.module_offs = OMAP3430_DSS_MOD,
},
},
.opt_clks = dss_rfbi_opt_clks,
.opt_clks_cnt = ARRAY_SIZE(dss_rfbi_opt_clks),
.slaves = omap3xxx_dss_rfbi_slaves,
.slaves_cnt = ARRAY_SIZE(omap3xxx_dss_rfbi_slaves),
.flags = HWMOD_NO_IDLEST,
};
/* l4_core -> dss_venc */
static struct omap_hwmod_ocp_if omap3xxx_l4_core__dss_venc = {
.master = &omap3xxx_l4_core_hwmod,
.slave = &omap3xxx_dss_venc_hwmod,
.clk = "dss_ick",
.addr = omap2_dss_venc_addrs,
.fw = {
.omap2 = {
.l4_fw_region = OMAP3_L4_CORE_FW_DSS_VENC_REGION,
.l4_prot_group = OMAP3_L4_CORE_FW_DSS_PROT_GROUP,
.flags = OMAP_FIREWALL_L4,
}
},
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
/* dss_venc slave ports */
static struct omap_hwmod_ocp_if *omap3xxx_dss_venc_slaves[] = {
&omap3xxx_l4_core__dss_venc,
};
static struct omap_hwmod_opt_clk dss_venc_opt_clks[] = {
/* required only on OMAP3430 */
{ .role = "tv_dac_clk", .clk = "dss_96m_fck" },
};
static struct omap_hwmod omap3xxx_dss_venc_hwmod = {
.name = "dss_venc",
.class = &omap2_venc_hwmod_class,
.main_clk = "dss_tv_fck",
.prcm = {
.omap2 = {
.prcm_reg_id = 1,
.module_bit = OMAP3430_EN_DSS1_SHIFT,
.module_offs = OMAP3430_DSS_MOD,
},
},
.opt_clks = dss_venc_opt_clks,
.opt_clks_cnt = ARRAY_SIZE(dss_venc_opt_clks),
.slaves = omap3xxx_dss_venc_slaves,
.slaves_cnt = ARRAY_SIZE(omap3xxx_dss_venc_slaves),
.flags = HWMOD_NO_IDLEST,
};
/* I2C1 */
static struct omap_i2c_dev_attr i2c1_dev_attr = {
.fifo_depth = 8, /* bytes */
.flags = OMAP_I2C_FLAG_APPLY_ERRATA_I207 |
OMAP_I2C_FLAG_RESET_REGS_POSTIDLE |
OMAP_I2C_FLAG_BUS_SHIFT_2,
};
static struct omap_hwmod_ocp_if *omap3xxx_i2c1_slaves[] = {
&omap3_l4_core__i2c1,
};
static struct omap_hwmod omap3xxx_i2c1_hwmod = {
.name = "i2c1",
.flags = HWMOD_16BIT_REG | HWMOD_SET_DEFAULT_CLOCKACT,
.mpu_irqs = omap2_i2c1_mpu_irqs,
.sdma_reqs = omap2_i2c1_sdma_reqs,
.main_clk = "i2c1_fck",
.prcm = {
.omap2 = {
.module_offs = CORE_MOD,
.prcm_reg_id = 1,
.module_bit = OMAP3430_EN_I2C1_SHIFT,
.idlest_reg_id = 1,
.idlest_idle_bit = OMAP3430_ST_I2C1_SHIFT,
},
},
.slaves = omap3xxx_i2c1_slaves,
.slaves_cnt = ARRAY_SIZE(omap3xxx_i2c1_slaves),
.class = &i2c_class,
.dev_attr = &i2c1_dev_attr,
};
/* I2C2 */
static struct omap_i2c_dev_attr i2c2_dev_attr = {
.fifo_depth = 8, /* bytes */
.flags = OMAP_I2C_FLAG_APPLY_ERRATA_I207 |
OMAP_I2C_FLAG_RESET_REGS_POSTIDLE |
OMAP_I2C_FLAG_BUS_SHIFT_2,
};
static struct omap_hwmod_ocp_if *omap3xxx_i2c2_slaves[] = {
&omap3_l4_core__i2c2,
};
static struct omap_hwmod omap3xxx_i2c2_hwmod = {
.name = "i2c2",
.flags = HWMOD_16BIT_REG | HWMOD_SET_DEFAULT_CLOCKACT,
.mpu_irqs = omap2_i2c2_mpu_irqs,
.sdma_reqs = omap2_i2c2_sdma_reqs,
.main_clk = "i2c2_fck",
.prcm = {
.omap2 = {
.module_offs = CORE_MOD,
.prcm_reg_id = 1,
.module_bit = OMAP3430_EN_I2C2_SHIFT,
.idlest_reg_id = 1,
.idlest_idle_bit = OMAP3430_ST_I2C2_SHIFT,
},
},
.slaves = omap3xxx_i2c2_slaves,
.slaves_cnt = ARRAY_SIZE(omap3xxx_i2c2_slaves),
.class = &i2c_class,
.dev_attr = &i2c2_dev_attr,
};
/* I2C3 */
static struct omap_i2c_dev_attr i2c3_dev_attr = {
.fifo_depth = 64, /* bytes */
.flags = OMAP_I2C_FLAG_APPLY_ERRATA_I207 |
OMAP_I2C_FLAG_RESET_REGS_POSTIDLE |
OMAP_I2C_FLAG_BUS_SHIFT_2,
};
static struct omap_hwmod_irq_info i2c3_mpu_irqs[] = {
{ .irq = INT_34XX_I2C3_IRQ, },
{ .irq = -1 }
};
static struct omap_hwmod_dma_info i2c3_sdma_reqs[] = {
{ .name = "tx", .dma_req = OMAP34XX_DMA_I2C3_TX },
{ .name = "rx", .dma_req = OMAP34XX_DMA_I2C3_RX },
{ .dma_req = -1 }
};
static struct omap_hwmod_ocp_if *omap3xxx_i2c3_slaves[] = {
&omap3_l4_core__i2c3,
};
static struct omap_hwmod omap3xxx_i2c3_hwmod = {
.name = "i2c3",
.flags = HWMOD_16BIT_REG | HWMOD_SET_DEFAULT_CLOCKACT,
.mpu_irqs = i2c3_mpu_irqs,
.sdma_reqs = i2c3_sdma_reqs,
.main_clk = "i2c3_fck",
.prcm = {
.omap2 = {
.module_offs = CORE_MOD,
.prcm_reg_id = 1,
.module_bit = OMAP3430_EN_I2C3_SHIFT,
.idlest_reg_id = 1,
.idlest_idle_bit = OMAP3430_ST_I2C3_SHIFT,
},
},
.slaves = omap3xxx_i2c3_slaves,
.slaves_cnt = ARRAY_SIZE(omap3xxx_i2c3_slaves),
.class = &i2c_class,
.dev_attr = &i2c3_dev_attr,
};
/* l4_wkup -> gpio1 */
static struct omap_hwmod_addr_space omap3xxx_gpio1_addrs[] = {
{
.pa_start = 0x48310000,
.pa_end = 0x483101ff,
.flags = ADDR_TYPE_RT
},
{ }
};
static struct omap_hwmod_ocp_if omap3xxx_l4_wkup__gpio1 = {
.master = &omap3xxx_l4_wkup_hwmod,
.slave = &omap3xxx_gpio1_hwmod,
.addr = omap3xxx_gpio1_addrs,
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
/* l4_per -> gpio2 */
static struct omap_hwmod_addr_space omap3xxx_gpio2_addrs[] = {
{
.pa_start = 0x49050000,
.pa_end = 0x490501ff,
.flags = ADDR_TYPE_RT
},
{ }
};
static struct omap_hwmod_ocp_if omap3xxx_l4_per__gpio2 = {
.master = &omap3xxx_l4_per_hwmod,
.slave = &omap3xxx_gpio2_hwmod,
.addr = omap3xxx_gpio2_addrs,
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
/* l4_per -> gpio3 */
static struct omap_hwmod_addr_space omap3xxx_gpio3_addrs[] = {
{
.pa_start = 0x49052000,
.pa_end = 0x490521ff,
.flags = ADDR_TYPE_RT
},
{ }
};
static struct omap_hwmod_ocp_if omap3xxx_l4_per__gpio3 = {
.master = &omap3xxx_l4_per_hwmod,
.slave = &omap3xxx_gpio3_hwmod,
.addr = omap3xxx_gpio3_addrs,
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
/* l4_per -> gpio4 */
static struct omap_hwmod_addr_space omap3xxx_gpio4_addrs[] = {
{
.pa_start = 0x49054000,
.pa_end = 0x490541ff,
.flags = ADDR_TYPE_RT
},
{ }
};
static struct omap_hwmod_ocp_if omap3xxx_l4_per__gpio4 = {
.master = &omap3xxx_l4_per_hwmod,
.slave = &omap3xxx_gpio4_hwmod,
.addr = omap3xxx_gpio4_addrs,
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
/* l4_per -> gpio5 */
static struct omap_hwmod_addr_space omap3xxx_gpio5_addrs[] = {
{
.pa_start = 0x49056000,
.pa_end = 0x490561ff,
.flags = ADDR_TYPE_RT
},
{ }
};
static struct omap_hwmod_ocp_if omap3xxx_l4_per__gpio5 = {
.master = &omap3xxx_l4_per_hwmod,
.slave = &omap3xxx_gpio5_hwmod,
.addr = omap3xxx_gpio5_addrs,
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
/* l4_per -> gpio6 */
static struct omap_hwmod_addr_space omap3xxx_gpio6_addrs[] = {
{
.pa_start = 0x49058000,
.pa_end = 0x490581ff,
.flags = ADDR_TYPE_RT
},
{ }
};
static struct omap_hwmod_ocp_if omap3xxx_l4_per__gpio6 = {
.master = &omap3xxx_l4_per_hwmod,
.slave = &omap3xxx_gpio6_hwmod,
.addr = omap3xxx_gpio6_addrs,
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
/*
* 'gpio' class
* general purpose io module
*/
static struct omap_hwmod_class_sysconfig omap3xxx_gpio_sysc = {
.rev_offs = 0x0000,
.sysc_offs = 0x0010,
.syss_offs = 0x0014,
.sysc_flags = (SYSC_HAS_ENAWAKEUP | SYSC_HAS_SIDLEMODE |
SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE |
SYSS_HAS_RESET_STATUS),
.idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
.sysc_fields = &omap_hwmod_sysc_type1,
};
static struct omap_hwmod_class omap3xxx_gpio_hwmod_class = {
.name = "gpio",
.sysc = &omap3xxx_gpio_sysc,
.rev = 1,
};
/* gpio_dev_attr*/
static struct omap_gpio_dev_attr gpio_dev_attr = {
.bank_width = 32,
.dbck_flag = true,
};
/* gpio1 */
static struct omap_hwmod_opt_clk gpio1_opt_clks[] = {
{ .role = "dbclk", .clk = "gpio1_dbck", },
};
static struct omap_hwmod_ocp_if *omap3xxx_gpio1_slaves[] = {
&omap3xxx_l4_wkup__gpio1,
};
static struct omap_hwmod omap3xxx_gpio1_hwmod = {
.name = "gpio1",
.flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
.mpu_irqs = omap2_gpio1_irqs,
.main_clk = "gpio1_ick",
.opt_clks = gpio1_opt_clks,
.opt_clks_cnt = ARRAY_SIZE(gpio1_opt_clks),
.prcm = {
.omap2 = {
.prcm_reg_id = 1,
.module_bit = OMAP3430_EN_GPIO1_SHIFT,
.module_offs = WKUP_MOD,
.idlest_reg_id = 1,
.idlest_idle_bit = OMAP3430_ST_GPIO1_SHIFT,
},
},
.slaves = omap3xxx_gpio1_slaves,
.slaves_cnt = ARRAY_SIZE(omap3xxx_gpio1_slaves),
.class = &omap3xxx_gpio_hwmod_class,
.dev_attr = &gpio_dev_attr,
};
/* gpio2 */
static struct omap_hwmod_opt_clk gpio2_opt_clks[] = {
{ .role = "dbclk", .clk = "gpio2_dbck", },
};
static struct omap_hwmod_ocp_if *omap3xxx_gpio2_slaves[] = {
&omap3xxx_l4_per__gpio2,
};
static struct omap_hwmod omap3xxx_gpio2_hwmod = {
.name = "gpio2",
.flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
.mpu_irqs = omap2_gpio2_irqs,
.main_clk = "gpio2_ick",
.opt_clks = gpio2_opt_clks,
.opt_clks_cnt = ARRAY_SIZE(gpio2_opt_clks),
.prcm = {
.omap2 = {
.prcm_reg_id = 1,
.module_bit = OMAP3430_EN_GPIO2_SHIFT,
.module_offs = OMAP3430_PER_MOD,
.idlest_reg_id = 1,
.idlest_idle_bit = OMAP3430_ST_GPIO2_SHIFT,
},
},
.slaves = omap3xxx_gpio2_slaves,
.slaves_cnt = ARRAY_SIZE(omap3xxx_gpio2_slaves),
.class = &omap3xxx_gpio_hwmod_class,
.dev_attr = &gpio_dev_attr,
};
/* gpio3 */
static struct omap_hwmod_opt_clk gpio3_opt_clks[] = {
{ .role = "dbclk", .clk = "gpio3_dbck", },
};
static struct omap_hwmod_ocp_if *omap3xxx_gpio3_slaves[] = {
&omap3xxx_l4_per__gpio3,
};
static struct omap_hwmod omap3xxx_gpio3_hwmod = {
.name = "gpio3",
.flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
.mpu_irqs = omap2_gpio3_irqs,
.main_clk = "gpio3_ick",
.opt_clks = gpio3_opt_clks,
.opt_clks_cnt = ARRAY_SIZE(gpio3_opt_clks),
.prcm = {
.omap2 = {
.prcm_reg_id = 1,
.module_bit = OMAP3430_EN_GPIO3_SHIFT,
.module_offs = OMAP3430_PER_MOD,
.idlest_reg_id = 1,
.idlest_idle_bit = OMAP3430_ST_GPIO3_SHIFT,
},
},
.slaves = omap3xxx_gpio3_slaves,
.slaves_cnt = ARRAY_SIZE(omap3xxx_gpio3_slaves),
.class = &omap3xxx_gpio_hwmod_class,
.dev_attr = &gpio_dev_attr,
};
/* gpio4 */
static struct omap_hwmod_opt_clk gpio4_opt_clks[] = {
{ .role = "dbclk", .clk = "gpio4_dbck", },
};
static struct omap_hwmod_ocp_if *omap3xxx_gpio4_slaves[] = {
&omap3xxx_l4_per__gpio4,
};
static struct omap_hwmod omap3xxx_gpio4_hwmod = {
.name = "gpio4",
.flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
.mpu_irqs = omap2_gpio4_irqs,
.main_clk = "gpio4_ick",
.opt_clks = gpio4_opt_clks,
.opt_clks_cnt = ARRAY_SIZE(gpio4_opt_clks),
.prcm = {
.omap2 = {
.prcm_reg_id = 1,
.module_bit = OMAP3430_EN_GPIO4_SHIFT,
.module_offs = OMAP3430_PER_MOD,
.idlest_reg_id = 1,
.idlest_idle_bit = OMAP3430_ST_GPIO4_SHIFT,
},
},
.slaves = omap3xxx_gpio4_slaves,
.slaves_cnt = ARRAY_SIZE(omap3xxx_gpio4_slaves),
.class = &omap3xxx_gpio_hwmod_class,
.dev_attr = &gpio_dev_attr,
};
/* gpio5 */
static struct omap_hwmod_irq_info omap3xxx_gpio5_irqs[] = {
{ .irq = 33 }, /* INT_34XX_GPIO_BANK5 */
{ .irq = -1 }
};
static struct omap_hwmod_opt_clk gpio5_opt_clks[] = {
{ .role = "dbclk", .clk = "gpio5_dbck", },
};
static struct omap_hwmod_ocp_if *omap3xxx_gpio5_slaves[] = {
&omap3xxx_l4_per__gpio5,
};
static struct omap_hwmod omap3xxx_gpio5_hwmod = {
.name = "gpio5",
.flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
.mpu_irqs = omap3xxx_gpio5_irqs,
.main_clk = "gpio5_ick",
.opt_clks = gpio5_opt_clks,
.opt_clks_cnt = ARRAY_SIZE(gpio5_opt_clks),
.prcm = {
.omap2 = {
.prcm_reg_id = 1,
.module_bit = OMAP3430_EN_GPIO5_SHIFT,
.module_offs = OMAP3430_PER_MOD,
.idlest_reg_id = 1,
.idlest_idle_bit = OMAP3430_ST_GPIO5_SHIFT,
},
},
.slaves = omap3xxx_gpio5_slaves,
.slaves_cnt = ARRAY_SIZE(omap3xxx_gpio5_slaves),
.class = &omap3xxx_gpio_hwmod_class,
.dev_attr = &gpio_dev_attr,
};
/* gpio6 */
static struct omap_hwmod_irq_info omap3xxx_gpio6_irqs[] = {
{ .irq = 34 }, /* INT_34XX_GPIO_BANK6 */
{ .irq = -1 }
};
static struct omap_hwmod_opt_clk gpio6_opt_clks[] = {
{ .role = "dbclk", .clk = "gpio6_dbck", },
};
static struct omap_hwmod_ocp_if *omap3xxx_gpio6_slaves[] = {
&omap3xxx_l4_per__gpio6,
};
static struct omap_hwmod omap3xxx_gpio6_hwmod = {
.name = "gpio6",
.flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
.mpu_irqs = omap3xxx_gpio6_irqs,
.main_clk = "gpio6_ick",
.opt_clks = gpio6_opt_clks,
.opt_clks_cnt = ARRAY_SIZE(gpio6_opt_clks),
.prcm = {
.omap2 = {
.prcm_reg_id = 1,
.module_bit = OMAP3430_EN_GPIO6_SHIFT,
.module_offs = OMAP3430_PER_MOD,
.idlest_reg_id = 1,
.idlest_idle_bit = OMAP3430_ST_GPIO6_SHIFT,
},
},
.slaves = omap3xxx_gpio6_slaves,
.slaves_cnt = ARRAY_SIZE(omap3xxx_gpio6_slaves),
.class = &omap3xxx_gpio_hwmod_class,
.dev_attr = &gpio_dev_attr,
};
/* dma_system -> L3 */
static struct omap_hwmod_ocp_if omap3xxx_dma_system__l3 = {
.master = &omap3xxx_dma_system_hwmod,
.slave = &omap3xxx_l3_main_hwmod,
.clk = "core_l3_ick",
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
/* dma attributes */
static struct omap_dma_dev_attr dma_dev_attr = {
.dev_caps = RESERVE_CHANNEL | DMA_LINKED_LCH | GLOBAL_PRIORITY |
IS_CSSA_32 | IS_CDSA_32 | IS_RW_PRIORITY,
.lch_count = 32,
};
static struct omap_hwmod_class_sysconfig omap3xxx_dma_sysc = {
.rev_offs = 0x0000,
.sysc_offs = 0x002c,
.syss_offs = 0x0028,
.sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET |
SYSC_HAS_MIDLEMODE | SYSC_HAS_CLOCKACTIVITY |
SYSC_HAS_EMUFREE | SYSC_HAS_AUTOIDLE |
SYSS_HAS_RESET_STATUS),
.idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART),
.sysc_fields = &omap_hwmod_sysc_type1,
};
static struct omap_hwmod_class omap3xxx_dma_hwmod_class = {
.name = "dma",
.sysc = &omap3xxx_dma_sysc,
};
/* dma_system */
static struct omap_hwmod_addr_space omap3xxx_dma_system_addrs[] = {
{
.pa_start = 0x48056000,
.pa_end = 0x48056fff,
.flags = ADDR_TYPE_RT
},
{ }
};
/* dma_system master ports */
static struct omap_hwmod_ocp_if *omap3xxx_dma_system_masters[] = {
&omap3xxx_dma_system__l3,
};
/* l4_cfg -> dma_system */
static struct omap_hwmod_ocp_if omap3xxx_l4_core__dma_system = {
.master = &omap3xxx_l4_core_hwmod,
.slave = &omap3xxx_dma_system_hwmod,
.clk = "core_l4_ick",
.addr = omap3xxx_dma_system_addrs,
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
/* dma_system slave ports */
static struct omap_hwmod_ocp_if *omap3xxx_dma_system_slaves[] = {
&omap3xxx_l4_core__dma_system,
};
static struct omap_hwmod omap3xxx_dma_system_hwmod = {
.name = "dma",
.class = &omap3xxx_dma_hwmod_class,
.mpu_irqs = omap2_dma_system_irqs,
.main_clk = "core_l3_ick",
.prcm = {
.omap2 = {
.module_offs = CORE_MOD,
.prcm_reg_id = 1,
.module_bit = OMAP3430_ST_SDMA_SHIFT,
.idlest_reg_id = 1,
.idlest_idle_bit = OMAP3430_ST_SDMA_SHIFT,
},
},
.slaves = omap3xxx_dma_system_slaves,
.slaves_cnt = ARRAY_SIZE(omap3xxx_dma_system_slaves),
.masters = omap3xxx_dma_system_masters,
.masters_cnt = ARRAY_SIZE(omap3xxx_dma_system_masters),
.dev_attr = &dma_dev_attr,
.flags = HWMOD_NO_IDLEST,
};
/*
* 'mcbsp' class
* multi channel buffered serial port controller
*/
static struct omap_hwmod_class_sysconfig omap3xxx_mcbsp_sysc = {
.sysc_offs = 0x008c,
.sysc_flags = (SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_ENAWAKEUP |
SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET),
.idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
.sysc_fields = &omap_hwmod_sysc_type1,
.clockact = 0x2,
};
static struct omap_hwmod_class omap3xxx_mcbsp_hwmod_class = {
.name = "mcbsp",
.sysc = &omap3xxx_mcbsp_sysc,
.rev = MCBSP_CONFIG_TYPE3,
};
/* mcbsp1 */
static struct omap_hwmod_irq_info omap3xxx_mcbsp1_irqs[] = {
{ .name = "irq", .irq = 16 },
{ .name = "tx", .irq = 59 },
{ .name = "rx", .irq = 60 },
{ .irq = -1 }
};
static struct omap_hwmod_addr_space omap3xxx_mcbsp1_addrs[] = {
{
.name = "mpu",
.pa_start = 0x48074000,
.pa_end = 0x480740ff,
.flags = ADDR_TYPE_RT
},
{ }
};
/* l4_core -> mcbsp1 */
static struct omap_hwmod_ocp_if omap3xxx_l4_core__mcbsp1 = {
.master = &omap3xxx_l4_core_hwmod,
.slave = &omap3xxx_mcbsp1_hwmod,
.clk = "mcbsp1_ick",
.addr = omap3xxx_mcbsp1_addrs,
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
/* mcbsp1 slave ports */
static struct omap_hwmod_ocp_if *omap3xxx_mcbsp1_slaves[] = {
&omap3xxx_l4_core__mcbsp1,
};
static struct omap_hwmod omap3xxx_mcbsp1_hwmod = {
.name = "mcbsp1",
.class = &omap3xxx_mcbsp_hwmod_class,
.mpu_irqs = omap3xxx_mcbsp1_irqs,
.sdma_reqs = omap2_mcbsp1_sdma_reqs,
.main_clk = "mcbsp1_fck",
.prcm = {
.omap2 = {
.prcm_reg_id = 1,
.module_bit = OMAP3430_EN_MCBSP1_SHIFT,
.module_offs = CORE_MOD,
.idlest_reg_id = 1,
.idlest_idle_bit = OMAP3430_ST_MCBSP1_SHIFT,
},
},
.slaves = omap3xxx_mcbsp1_slaves,
.slaves_cnt = ARRAY_SIZE(omap3xxx_mcbsp1_slaves),
};
/* mcbsp2 */
static struct omap_hwmod_irq_info omap3xxx_mcbsp2_irqs[] = {
{ .name = "irq", .irq = 17 },
{ .name = "tx", .irq = 62 },
{ .name = "rx", .irq = 63 },
{ .irq = -1 }
};
static struct omap_hwmod_addr_space omap3xxx_mcbsp2_addrs[] = {
{
.name = "mpu",
.pa_start = 0x49022000,
.pa_end = 0x490220ff,
.flags = ADDR_TYPE_RT
},
{ }
};
/* l4_per -> mcbsp2 */
static struct omap_hwmod_ocp_if omap3xxx_l4_per__mcbsp2 = {
.master = &omap3xxx_l4_per_hwmod,
.slave = &omap3xxx_mcbsp2_hwmod,
.clk = "mcbsp2_ick",
.addr = omap3xxx_mcbsp2_addrs,
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
/* mcbsp2 slave ports */
static struct omap_hwmod_ocp_if *omap3xxx_mcbsp2_slaves[] = {
&omap3xxx_l4_per__mcbsp2,
};
static struct omap_mcbsp_dev_attr omap34xx_mcbsp2_dev_attr = {
.sidetone = "mcbsp2_sidetone",
};
static struct omap_hwmod omap3xxx_mcbsp2_hwmod = {
.name = "mcbsp2",
.class = &omap3xxx_mcbsp_hwmod_class,
.mpu_irqs = omap3xxx_mcbsp2_irqs,
.sdma_reqs = omap2_mcbsp2_sdma_reqs,
.main_clk = "mcbsp2_fck",
.prcm = {
.omap2 = {
.prcm_reg_id = 1,
.module_bit = OMAP3430_EN_MCBSP2_SHIFT,
.module_offs = OMAP3430_PER_MOD,
.idlest_reg_id = 1,
.idlest_idle_bit = OMAP3430_ST_MCBSP2_SHIFT,
},
},
.slaves = omap3xxx_mcbsp2_slaves,
.slaves_cnt = ARRAY_SIZE(omap3xxx_mcbsp2_slaves),
.dev_attr = &omap34xx_mcbsp2_dev_attr,
};
/* mcbsp3 */
static struct omap_hwmod_irq_info omap3xxx_mcbsp3_irqs[] = {
{ .name = "irq", .irq = 22 },
{ .name = "tx", .irq = 89 },
{ .name = "rx", .irq = 90 },
{ .irq = -1 }
};
static struct omap_hwmod_addr_space omap3xxx_mcbsp3_addrs[] = {
{
.name = "mpu",
.pa_start = 0x49024000,
.pa_end = 0x490240ff,
.flags = ADDR_TYPE_RT
},
{ }
};
/* l4_per -> mcbsp3 */
static struct omap_hwmod_ocp_if omap3xxx_l4_per__mcbsp3 = {
.master = &omap3xxx_l4_per_hwmod,
.slave = &omap3xxx_mcbsp3_hwmod,
.clk = "mcbsp3_ick",
.addr = omap3xxx_mcbsp3_addrs,
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
/* mcbsp3 slave ports */
static struct omap_hwmod_ocp_if *omap3xxx_mcbsp3_slaves[] = {
&omap3xxx_l4_per__mcbsp3,
};
static struct omap_mcbsp_dev_attr omap34xx_mcbsp3_dev_attr = {
.sidetone = "mcbsp3_sidetone",
};
static struct omap_hwmod omap3xxx_mcbsp3_hwmod = {
.name = "mcbsp3",
.class = &omap3xxx_mcbsp_hwmod_class,
.mpu_irqs = omap3xxx_mcbsp3_irqs,
.sdma_reqs = omap2_mcbsp3_sdma_reqs,
.main_clk = "mcbsp3_fck",
.prcm = {
.omap2 = {
.prcm_reg_id = 1,
.module_bit = OMAP3430_EN_MCBSP3_SHIFT,
.module_offs = OMAP3430_PER_MOD,
.idlest_reg_id = 1,
.idlest_idle_bit = OMAP3430_ST_MCBSP3_SHIFT,
},
},
.slaves = omap3xxx_mcbsp3_slaves,
.slaves_cnt = ARRAY_SIZE(omap3xxx_mcbsp3_slaves),
.dev_attr = &omap34xx_mcbsp3_dev_attr,
};
/* mcbsp4 */
static struct omap_hwmod_irq_info omap3xxx_mcbsp4_irqs[] = {
{ .name = "irq", .irq = 23 },
{ .name = "tx", .irq = 54 },
{ .name = "rx", .irq = 55 },
{ .irq = -1 }
};
static struct omap_hwmod_dma_info omap3xxx_mcbsp4_sdma_chs[] = {
{ .name = "rx", .dma_req = 20 },
{ .name = "tx", .dma_req = 19 },
{ .dma_req = -1 }
};
static struct omap_hwmod_addr_space omap3xxx_mcbsp4_addrs[] = {
{
.name = "mpu",
.pa_start = 0x49026000,
.pa_end = 0x490260ff,
.flags = ADDR_TYPE_RT
},
{ }
};
/* l4_per -> mcbsp4 */
static struct omap_hwmod_ocp_if omap3xxx_l4_per__mcbsp4 = {
.master = &omap3xxx_l4_per_hwmod,
.slave = &omap3xxx_mcbsp4_hwmod,
.clk = "mcbsp4_ick",
.addr = omap3xxx_mcbsp4_addrs,
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
/* mcbsp4 slave ports */
static struct omap_hwmod_ocp_if *omap3xxx_mcbsp4_slaves[] = {
&omap3xxx_l4_per__mcbsp4,
};
static struct omap_hwmod omap3xxx_mcbsp4_hwmod = {
.name = "mcbsp4",
.class = &omap3xxx_mcbsp_hwmod_class,
.mpu_irqs = omap3xxx_mcbsp4_irqs,
.sdma_reqs = omap3xxx_mcbsp4_sdma_chs,
.main_clk = "mcbsp4_fck",
.prcm = {
.omap2 = {
.prcm_reg_id = 1,
.module_bit = OMAP3430_EN_MCBSP4_SHIFT,
.module_offs = OMAP3430_PER_MOD,
.idlest_reg_id = 1,
.idlest_idle_bit = OMAP3430_ST_MCBSP4_SHIFT,
},
},
.slaves = omap3xxx_mcbsp4_slaves,
.slaves_cnt = ARRAY_SIZE(omap3xxx_mcbsp4_slaves),
};
/* mcbsp5 */
static struct omap_hwmod_irq_info omap3xxx_mcbsp5_irqs[] = {
{ .name = "irq", .irq = 27 },
{ .name = "tx", .irq = 81 },
{ .name = "rx", .irq = 82 },
{ .irq = -1 }
};
static struct omap_hwmod_dma_info omap3xxx_mcbsp5_sdma_chs[] = {
{ .name = "rx", .dma_req = 22 },
{ .name = "tx", .dma_req = 21 },
{ .dma_req = -1 }
};
static struct omap_hwmod_addr_space omap3xxx_mcbsp5_addrs[] = {
{
.name = "mpu",
.pa_start = 0x48096000,
.pa_end = 0x480960ff,
.flags = ADDR_TYPE_RT
},
{ }
};
/* l4_core -> mcbsp5 */
static struct omap_hwmod_ocp_if omap3xxx_l4_core__mcbsp5 = {
.master = &omap3xxx_l4_core_hwmod,
.slave = &omap3xxx_mcbsp5_hwmod,
.clk = "mcbsp5_ick",
.addr = omap3xxx_mcbsp5_addrs,
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
/* mcbsp5 slave ports */
static struct omap_hwmod_ocp_if *omap3xxx_mcbsp5_slaves[] = {
&omap3xxx_l4_core__mcbsp5,
};
static struct omap_hwmod omap3xxx_mcbsp5_hwmod = {
.name = "mcbsp5",
.class = &omap3xxx_mcbsp_hwmod_class,
.mpu_irqs = omap3xxx_mcbsp5_irqs,
.sdma_reqs = omap3xxx_mcbsp5_sdma_chs,
.main_clk = "mcbsp5_fck",
.prcm = {
.omap2 = {
.prcm_reg_id = 1,
.module_bit = OMAP3430_EN_MCBSP5_SHIFT,
.module_offs = CORE_MOD,
.idlest_reg_id = 1,
.idlest_idle_bit = OMAP3430_ST_MCBSP5_SHIFT,
},
},
.slaves = omap3xxx_mcbsp5_slaves,
.slaves_cnt = ARRAY_SIZE(omap3xxx_mcbsp5_slaves),
};
/* 'mcbsp sidetone' class */
static struct omap_hwmod_class_sysconfig omap3xxx_mcbsp_sidetone_sysc = {
.sysc_offs = 0x0010,
.sysc_flags = SYSC_HAS_AUTOIDLE,
.sysc_fields = &omap_hwmod_sysc_type1,
};
static struct omap_hwmod_class omap3xxx_mcbsp_sidetone_hwmod_class = {
.name = "mcbsp_sidetone",
.sysc = &omap3xxx_mcbsp_sidetone_sysc,
};
/* mcbsp2_sidetone */
static struct omap_hwmod_irq_info omap3xxx_mcbsp2_sidetone_irqs[] = {
{ .name = "irq", .irq = 4 },
{ .irq = -1 }
};
static struct omap_hwmod_addr_space omap3xxx_mcbsp2_sidetone_addrs[] = {
{
.name = "sidetone",
.pa_start = 0x49028000,
.pa_end = 0x490280ff,
.flags = ADDR_TYPE_RT
},
{ }
};
/* l4_per -> mcbsp2_sidetone */
static struct omap_hwmod_ocp_if omap3xxx_l4_per__mcbsp2_sidetone = {
.master = &omap3xxx_l4_per_hwmod,
.slave = &omap3xxx_mcbsp2_sidetone_hwmod,
.clk = "mcbsp2_ick",
.addr = omap3xxx_mcbsp2_sidetone_addrs,
.user = OCP_USER_MPU,
};
/* mcbsp2_sidetone slave ports */
static struct omap_hwmod_ocp_if *omap3xxx_mcbsp2_sidetone_slaves[] = {
&omap3xxx_l4_per__mcbsp2_sidetone,
};
static struct omap_hwmod omap3xxx_mcbsp2_sidetone_hwmod = {
.name = "mcbsp2_sidetone",
.class = &omap3xxx_mcbsp_sidetone_hwmod_class,
.mpu_irqs = omap3xxx_mcbsp2_sidetone_irqs,
.main_clk = "mcbsp2_fck",
.prcm = {
.omap2 = {
.prcm_reg_id = 1,
.module_bit = OMAP3430_EN_MCBSP2_SHIFT,
.module_offs = OMAP3430_PER_MOD,
.idlest_reg_id = 1,
.idlest_idle_bit = OMAP3430_ST_MCBSP2_SHIFT,
},
},
.slaves = omap3xxx_mcbsp2_sidetone_slaves,
.slaves_cnt = ARRAY_SIZE(omap3xxx_mcbsp2_sidetone_slaves),
};
/* mcbsp3_sidetone */
static struct omap_hwmod_irq_info omap3xxx_mcbsp3_sidetone_irqs[] = {
{ .name = "irq", .irq = 5 },
{ .irq = -1 }
};
static struct omap_hwmod_addr_space omap3xxx_mcbsp3_sidetone_addrs[] = {
{
.name = "sidetone",
.pa_start = 0x4902A000,
.pa_end = 0x4902A0ff,
.flags = ADDR_TYPE_RT
},
{ }
};
/* l4_per -> mcbsp3_sidetone */
static struct omap_hwmod_ocp_if omap3xxx_l4_per__mcbsp3_sidetone = {
.master = &omap3xxx_l4_per_hwmod,
.slave = &omap3xxx_mcbsp3_sidetone_hwmod,
.clk = "mcbsp3_ick",
.addr = omap3xxx_mcbsp3_sidetone_addrs,
.user = OCP_USER_MPU,
};
/* mcbsp3_sidetone slave ports */
static struct omap_hwmod_ocp_if *omap3xxx_mcbsp3_sidetone_slaves[] = {
&omap3xxx_l4_per__mcbsp3_sidetone,
};
static struct omap_hwmod omap3xxx_mcbsp3_sidetone_hwmod = {
.name = "mcbsp3_sidetone",
.class = &omap3xxx_mcbsp_sidetone_hwmod_class,
.mpu_irqs = omap3xxx_mcbsp3_sidetone_irqs,
.main_clk = "mcbsp3_fck",
.prcm = {
.omap2 = {
.prcm_reg_id = 1,
.module_bit = OMAP3430_EN_MCBSP3_SHIFT,
.module_offs = OMAP3430_PER_MOD,
.idlest_reg_id = 1,
.idlest_idle_bit = OMAP3430_ST_MCBSP3_SHIFT,
},
},
.slaves = omap3xxx_mcbsp3_sidetone_slaves,
.slaves_cnt = ARRAY_SIZE(omap3xxx_mcbsp3_sidetone_slaves),
};
/* SR common */
static struct omap_hwmod_sysc_fields omap34xx_sr_sysc_fields = {
.clkact_shift = 20,
};
static struct omap_hwmod_class_sysconfig omap34xx_sr_sysc = {
.sysc_offs = 0x24,
.sysc_flags = (SYSC_HAS_CLOCKACTIVITY | SYSC_NO_CACHE),
.clockact = CLOCKACT_TEST_ICLK,
.sysc_fields = &omap34xx_sr_sysc_fields,
};
static struct omap_hwmod_class omap34xx_smartreflex_hwmod_class = {
.name = "smartreflex",
.sysc = &omap34xx_sr_sysc,
.rev = 1,
};
static struct omap_hwmod_sysc_fields omap36xx_sr_sysc_fields = {
.sidle_shift = 24,
.enwkup_shift = 26
};
static struct omap_hwmod_class_sysconfig omap36xx_sr_sysc = {
.sysc_offs = 0x38,
.idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
.sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_ENAWAKEUP |
SYSC_NO_CACHE),
.sysc_fields = &omap36xx_sr_sysc_fields,
};
static struct omap_hwmod_class omap36xx_smartreflex_hwmod_class = {
.name = "smartreflex",
.sysc = &omap36xx_sr_sysc,
.rev = 2,
};
/* SR1 */
static struct omap_smartreflex_dev_attr sr1_dev_attr = {
.sensor_voltdm_name = "mpu_iva",
};
static struct omap_hwmod_ocp_if *omap3_sr1_slaves[] = {
&omap3_l4_core__sr1,
};
static struct omap_hwmod omap34xx_sr1_hwmod = {
.name = "sr1_hwmod",
.class = &omap34xx_smartreflex_hwmod_class,
.main_clk = "sr1_fck",
.prcm = {
.omap2 = {
.prcm_reg_id = 1,
.module_bit = OMAP3430_EN_SR1_SHIFT,
.module_offs = WKUP_MOD,
.idlest_reg_id = 1,
.idlest_idle_bit = OMAP3430_EN_SR1_SHIFT,
},
},
.slaves = omap3_sr1_slaves,
.slaves_cnt = ARRAY_SIZE(omap3_sr1_slaves),
.dev_attr = &sr1_dev_attr,
.mpu_irqs = omap3_smartreflex_mpu_irqs,
.flags = HWMOD_SET_DEFAULT_CLOCKACT,
};
static struct omap_hwmod omap36xx_sr1_hwmod = {
.name = "sr1_hwmod",
.class = &omap36xx_smartreflex_hwmod_class,
.main_clk = "sr1_fck",
.prcm = {
.omap2 = {
.prcm_reg_id = 1,
.module_bit = OMAP3430_EN_SR1_SHIFT,
.module_offs = WKUP_MOD,
.idlest_reg_id = 1,
.idlest_idle_bit = OMAP3430_EN_SR1_SHIFT,
},
},
.slaves = omap3_sr1_slaves,
.slaves_cnt = ARRAY_SIZE(omap3_sr1_slaves),
.dev_attr = &sr1_dev_attr,
.mpu_irqs = omap3_smartreflex_mpu_irqs,
};
/* SR2 */
static struct omap_smartreflex_dev_attr sr2_dev_attr = {
.sensor_voltdm_name = "core",
};
static struct omap_hwmod_ocp_if *omap3_sr2_slaves[] = {
&omap3_l4_core__sr2,
};
static struct omap_hwmod omap34xx_sr2_hwmod = {
.name = "sr2_hwmod",
.class = &omap34xx_smartreflex_hwmod_class,
.main_clk = "sr2_fck",
.prcm = {
.omap2 = {
.prcm_reg_id = 1,
.module_bit = OMAP3430_EN_SR2_SHIFT,
.module_offs = WKUP_MOD,
.idlest_reg_id = 1,
.idlest_idle_bit = OMAP3430_EN_SR2_SHIFT,
},
},
.slaves = omap3_sr2_slaves,
.slaves_cnt = ARRAY_SIZE(omap3_sr2_slaves),
.dev_attr = &sr2_dev_attr,
.mpu_irqs = omap3_smartreflex_core_irqs,
.flags = HWMOD_SET_DEFAULT_CLOCKACT,
};
static struct omap_hwmod omap36xx_sr2_hwmod = {
.name = "sr2_hwmod",
.class = &omap36xx_smartreflex_hwmod_class,
.main_clk = "sr2_fck",
.prcm = {
.omap2 = {
.prcm_reg_id = 1,
.module_bit = OMAP3430_EN_SR2_SHIFT,
.module_offs = WKUP_MOD,
.idlest_reg_id = 1,
.idlest_idle_bit = OMAP3430_EN_SR2_SHIFT,
},
},
.slaves = omap3_sr2_slaves,
.slaves_cnt = ARRAY_SIZE(omap3_sr2_slaves),
.dev_attr = &sr2_dev_attr,
.mpu_irqs = omap3_smartreflex_core_irqs,
};
/*
* 'mailbox' class
* mailbox module allowing communication between the on-chip processors
* using a queued mailbox-interrupt mechanism.
*/
static struct omap_hwmod_class_sysconfig omap3xxx_mailbox_sysc = {
.rev_offs = 0x000,
.sysc_offs = 0x010,
.syss_offs = 0x014,
.sysc_flags = (SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_SIDLEMODE |
SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE),
.idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
.sysc_fields = &omap_hwmod_sysc_type1,
};
static struct omap_hwmod_class omap3xxx_mailbox_hwmod_class = {
.name = "mailbox",
.sysc = &omap3xxx_mailbox_sysc,
};
static struct omap_hwmod omap3xxx_mailbox_hwmod;
static struct omap_hwmod_irq_info omap3xxx_mailbox_irqs[] = {
{ .irq = 26 },
{ .irq = -1 }
};
static struct omap_hwmod_addr_space omap3xxx_mailbox_addrs[] = {
{
.pa_start = 0x48094000,
.pa_end = 0x480941ff,
.flags = ADDR_TYPE_RT,
},
{ }
};
/* l4_core -> mailbox */
static struct omap_hwmod_ocp_if omap3xxx_l4_core__mailbox = {
.master = &omap3xxx_l4_core_hwmod,
.slave = &omap3xxx_mailbox_hwmod,
.addr = omap3xxx_mailbox_addrs,
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
/* mailbox slave ports */
static struct omap_hwmod_ocp_if *omap3xxx_mailbox_slaves[] = {
&omap3xxx_l4_core__mailbox,
};
static struct omap_hwmod omap3xxx_mailbox_hwmod = {
.name = "mailbox",
.class = &omap3xxx_mailbox_hwmod_class,
.mpu_irqs = omap3xxx_mailbox_irqs,
.main_clk = "mailboxes_ick",
.prcm = {
.omap2 = {
.prcm_reg_id = 1,
.module_bit = OMAP3430_EN_MAILBOXES_SHIFT,
.module_offs = CORE_MOD,
.idlest_reg_id = 1,
.idlest_idle_bit = OMAP3430_ST_MAILBOXES_SHIFT,
},
},
.slaves = omap3xxx_mailbox_slaves,
.slaves_cnt = ARRAY_SIZE(omap3xxx_mailbox_slaves),
};
/* l4 core -> mcspi1 interface */
static struct omap_hwmod_ocp_if omap34xx_l4_core__mcspi1 = {
.master = &omap3xxx_l4_core_hwmod,
.slave = &omap34xx_mcspi1,
.clk = "mcspi1_ick",
.addr = omap2_mcspi1_addr_space,
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
/* l4 core -> mcspi2 interface */
static struct omap_hwmod_ocp_if omap34xx_l4_core__mcspi2 = {
.master = &omap3xxx_l4_core_hwmod,
.slave = &omap34xx_mcspi2,
.clk = "mcspi2_ick",
.addr = omap2_mcspi2_addr_space,
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
/* l4 core -> mcspi3 interface */
static struct omap_hwmod_ocp_if omap34xx_l4_core__mcspi3 = {
.master = &omap3xxx_l4_core_hwmod,
.slave = &omap34xx_mcspi3,
.clk = "mcspi3_ick",
.addr = omap2430_mcspi3_addr_space,
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
/* l4 core -> mcspi4 interface */
static struct omap_hwmod_addr_space omap34xx_mcspi4_addr_space[] = {
{
.pa_start = 0x480ba000,
.pa_end = 0x480ba0ff,
.flags = ADDR_TYPE_RT,
},
{ }
};
static struct omap_hwmod_ocp_if omap34xx_l4_core__mcspi4 = {
.master = &omap3xxx_l4_core_hwmod,
.slave = &omap34xx_mcspi4,
.clk = "mcspi4_ick",
.addr = omap34xx_mcspi4_addr_space,
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
/*
* 'mcspi' class
* multichannel serial port interface (mcspi) / master/slave synchronous serial
* bus
*/
static struct omap_hwmod_class_sysconfig omap34xx_mcspi_sysc = {
.rev_offs = 0x0000,
.sysc_offs = 0x0010,
.syss_offs = 0x0014,
.sysc_flags = (SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_SIDLEMODE |
SYSC_HAS_ENAWAKEUP | SYSC_HAS_SOFTRESET |
SYSC_HAS_AUTOIDLE | SYSS_HAS_RESET_STATUS),
.idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
.sysc_fields = &omap_hwmod_sysc_type1,
};
static struct omap_hwmod_class omap34xx_mcspi_class = {
.name = "mcspi",
.sysc = &omap34xx_mcspi_sysc,
.rev = OMAP3_MCSPI_REV,
};
/* mcspi1 */
static struct omap_hwmod_ocp_if *omap34xx_mcspi1_slaves[] = {
&omap34xx_l4_core__mcspi1,
};
static struct omap2_mcspi_dev_attr omap_mcspi1_dev_attr = {
.num_chipselect = 4,
};
static struct omap_hwmod omap34xx_mcspi1 = {
.name = "mcspi1",
.mpu_irqs = omap2_mcspi1_mpu_irqs,
.sdma_reqs = omap2_mcspi1_sdma_reqs,
.main_clk = "mcspi1_fck",
.prcm = {
.omap2 = {
.module_offs = CORE_MOD,
.prcm_reg_id = 1,
.module_bit = OMAP3430_EN_MCSPI1_SHIFT,
.idlest_reg_id = 1,
.idlest_idle_bit = OMAP3430_ST_MCSPI1_SHIFT,
},
},
.slaves = omap34xx_mcspi1_slaves,
.slaves_cnt = ARRAY_SIZE(omap34xx_mcspi1_slaves),
.class = &omap34xx_mcspi_class,
.dev_attr = &omap_mcspi1_dev_attr,
};
/* mcspi2 */
static struct omap_hwmod_ocp_if *omap34xx_mcspi2_slaves[] = {
&omap34xx_l4_core__mcspi2,
};
static struct omap2_mcspi_dev_attr omap_mcspi2_dev_attr = {
.num_chipselect = 2,
};
static struct omap_hwmod omap34xx_mcspi2 = {
.name = "mcspi2",
.mpu_irqs = omap2_mcspi2_mpu_irqs,
.sdma_reqs = omap2_mcspi2_sdma_reqs,
.main_clk = "mcspi2_fck",
.prcm = {
.omap2 = {
.module_offs = CORE_MOD,
.prcm_reg_id = 1,
.module_bit = OMAP3430_EN_MCSPI2_SHIFT,
.idlest_reg_id = 1,
.idlest_idle_bit = OMAP3430_ST_MCSPI2_SHIFT,
},
},
.slaves = omap34xx_mcspi2_slaves,
.slaves_cnt = ARRAY_SIZE(omap34xx_mcspi2_slaves),
.class = &omap34xx_mcspi_class,
.dev_attr = &omap_mcspi2_dev_attr,
};
/* mcspi3 */
static struct omap_hwmod_irq_info omap34xx_mcspi3_mpu_irqs[] = {
{ .name = "irq", .irq = 91 }, /* 91 */
{ .irq = -1 }
};
static struct omap_hwmod_dma_info omap34xx_mcspi3_sdma_reqs[] = {
{ .name = "tx0", .dma_req = 15 },
{ .name = "rx0", .dma_req = 16 },
{ .name = "tx1", .dma_req = 23 },
{ .name = "rx1", .dma_req = 24 },
{ .dma_req = -1 }
};
static struct omap_hwmod_ocp_if *omap34xx_mcspi3_slaves[] = {
&omap34xx_l4_core__mcspi3,
};
static struct omap2_mcspi_dev_attr omap_mcspi3_dev_attr = {
.num_chipselect = 2,
};
static struct omap_hwmod omap34xx_mcspi3 = {
.name = "mcspi3",
.mpu_irqs = omap34xx_mcspi3_mpu_irqs,
.sdma_reqs = omap34xx_mcspi3_sdma_reqs,
.main_clk = "mcspi3_fck",
.prcm = {
.omap2 = {
.module_offs = CORE_MOD,
.prcm_reg_id = 1,
.module_bit = OMAP3430_EN_MCSPI3_SHIFT,
.idlest_reg_id = 1,
.idlest_idle_bit = OMAP3430_ST_MCSPI3_SHIFT,
},
},
.slaves = omap34xx_mcspi3_slaves,
.slaves_cnt = ARRAY_SIZE(omap34xx_mcspi3_slaves),
.class = &omap34xx_mcspi_class,
.dev_attr = &omap_mcspi3_dev_attr,
};
/* SPI4 */
static struct omap_hwmod_irq_info omap34xx_mcspi4_mpu_irqs[] = {
{ .name = "irq", .irq = INT_34XX_SPI4_IRQ }, /* 48 */
{ .irq = -1 }
};
static struct omap_hwmod_dma_info omap34xx_mcspi4_sdma_reqs[] = {
{ .name = "tx0", .dma_req = 70 }, /* DMA_SPI4_TX0 */
{ .name = "rx0", .dma_req = 71 }, /* DMA_SPI4_RX0 */
{ .dma_req = -1 }
};
static struct omap_hwmod_ocp_if *omap34xx_mcspi4_slaves[] = {
&omap34xx_l4_core__mcspi4,
};
static struct omap2_mcspi_dev_attr omap_mcspi4_dev_attr = {
.num_chipselect = 1,
};
static struct omap_hwmod omap34xx_mcspi4 = {
.name = "mcspi4",
.mpu_irqs = omap34xx_mcspi4_mpu_irqs,
.sdma_reqs = omap34xx_mcspi4_sdma_reqs,
.main_clk = "mcspi4_fck",
.prcm = {
.omap2 = {
.module_offs = CORE_MOD,
.prcm_reg_id = 1,
.module_bit = OMAP3430_EN_MCSPI4_SHIFT,
.idlest_reg_id = 1,
.idlest_idle_bit = OMAP3430_ST_MCSPI4_SHIFT,
},
},
.slaves = omap34xx_mcspi4_slaves,
.slaves_cnt = ARRAY_SIZE(omap34xx_mcspi4_slaves),
.class = &omap34xx_mcspi_class,
.dev_attr = &omap_mcspi4_dev_attr,
};
/*
* usbhsotg
*/
static struct omap_hwmod_class_sysconfig omap3xxx_usbhsotg_sysc = {
.rev_offs = 0x0400,
.sysc_offs = 0x0404,
.syss_offs = 0x0408,
.sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_MIDLEMODE|
SYSC_HAS_ENAWAKEUP | SYSC_HAS_SOFTRESET |
SYSC_HAS_AUTOIDLE),
.idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART),
.sysc_fields = &omap_hwmod_sysc_type1,
};
static struct omap_hwmod_class usbotg_class = {
.name = "usbotg",
.sysc = &omap3xxx_usbhsotg_sysc,
};
/* usb_otg_hs */
static struct omap_hwmod_irq_info omap3xxx_usbhsotg_mpu_irqs[] = {
{ .name = "mc", .irq = 92 },
{ .name = "dma", .irq = 93 },
{ .irq = -1 }
};
static struct omap_hwmod omap3xxx_usbhsotg_hwmod = {
.name = "usb_otg_hs",
.mpu_irqs = omap3xxx_usbhsotg_mpu_irqs,
.main_clk = "hsotgusb_ick",
.prcm = {
.omap2 = {
.prcm_reg_id = 1,
.module_bit = OMAP3430_EN_HSOTGUSB_SHIFT,
.module_offs = CORE_MOD,
.idlest_reg_id = 1,
.idlest_idle_bit = OMAP3430ES2_ST_HSOTGUSB_IDLE_SHIFT,
.idlest_stdby_bit = OMAP3430ES2_ST_HSOTGUSB_STDBY_SHIFT
},
},
.masters = omap3xxx_usbhsotg_masters,
.masters_cnt = ARRAY_SIZE(omap3xxx_usbhsotg_masters),
.slaves = omap3xxx_usbhsotg_slaves,
.slaves_cnt = ARRAY_SIZE(omap3xxx_usbhsotg_slaves),
.class = &usbotg_class,
/*
* Erratum ID: i479 idle_req / idle_ack mechanism potentially
* broken when autoidle is enabled
* workaround is to disable the autoidle bit at module level.
*/
.flags = HWMOD_NO_OCP_AUTOIDLE | HWMOD_SWSUP_SIDLE
| HWMOD_SWSUP_MSTANDBY,
};
/* usb_otg_hs */
static struct omap_hwmod_irq_info am35xx_usbhsotg_mpu_irqs[] = {
{ .name = "mc", .irq = 71 },
{ .irq = -1 }
};
static struct omap_hwmod_class am35xx_usbotg_class = {
.name = "am35xx_usbotg",
.sysc = NULL,
};
static struct omap_hwmod am35xx_usbhsotg_hwmod = {
.name = "am35x_otg_hs",
.mpu_irqs = am35xx_usbhsotg_mpu_irqs,
.main_clk = NULL,
.prcm = {
.omap2 = {
},
},
.masters = am35xx_usbhsotg_masters,
.masters_cnt = ARRAY_SIZE(am35xx_usbhsotg_masters),
.slaves = am35xx_usbhsotg_slaves,
.slaves_cnt = ARRAY_SIZE(am35xx_usbhsotg_slaves),
.class = &am35xx_usbotg_class,
};
/* MMC/SD/SDIO common */
static struct omap_hwmod_class_sysconfig omap34xx_mmc_sysc = {
.rev_offs = 0x1fc,
.sysc_offs = 0x10,
.syss_offs = 0x14,
.sysc_flags = (SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_SIDLEMODE |
SYSC_HAS_ENAWAKEUP | SYSC_HAS_SOFTRESET |
SYSC_HAS_AUTOIDLE | SYSS_HAS_RESET_STATUS),
.idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
.sysc_fields = &omap_hwmod_sysc_type1,
};
static struct omap_hwmod_class omap34xx_mmc_class = {
.name = "mmc",
.sysc = &omap34xx_mmc_sysc,
};
/* MMC/SD/SDIO1 */
static struct omap_hwmod_irq_info omap34xx_mmc1_mpu_irqs[] = {
{ .irq = 83, },
{ .irq = -1 }
};
static struct omap_hwmod_dma_info omap34xx_mmc1_sdma_reqs[] = {
{ .name = "tx", .dma_req = 61, },
{ .name = "rx", .dma_req = 62, },
{ .dma_req = -1 }
};
static struct omap_hwmod_opt_clk omap34xx_mmc1_opt_clks[] = {
{ .role = "dbck", .clk = "omap_32k_fck", },
};
static struct omap_hwmod_ocp_if *omap3xxx_mmc1_slaves[] = {
&omap3xxx_l4_core__mmc1,
};
static struct omap_mmc_dev_attr mmc1_dev_attr = {
.flags = OMAP_HSMMC_SUPPORTS_DUAL_VOLT,
};
/* See 35xx errata 2.1.1.128 in SPRZ278F */
static struct omap_mmc_dev_attr mmc1_pre_es3_dev_attr = {
.flags = (OMAP_HSMMC_SUPPORTS_DUAL_VOLT |
OMAP_HSMMC_BROKEN_MULTIBLOCK_READ),
};
static struct omap_hwmod omap3xxx_pre_es3_mmc1_hwmod = {
.name = "mmc1",
.mpu_irqs = omap34xx_mmc1_mpu_irqs,
.sdma_reqs = omap34xx_mmc1_sdma_reqs,
.opt_clks = omap34xx_mmc1_opt_clks,
.opt_clks_cnt = ARRAY_SIZE(omap34xx_mmc1_opt_clks),
.main_clk = "mmchs1_fck",
.prcm = {
.omap2 = {
.module_offs = CORE_MOD,
.prcm_reg_id = 1,
.module_bit = OMAP3430_EN_MMC1_SHIFT,
.idlest_reg_id = 1,
.idlest_idle_bit = OMAP3430_ST_MMC1_SHIFT,
},
},
.dev_attr = &mmc1_pre_es3_dev_attr,
.slaves = omap3xxx_mmc1_slaves,
.slaves_cnt = ARRAY_SIZE(omap3xxx_mmc1_slaves),
.class = &omap34xx_mmc_class,
};
static struct omap_hwmod omap3xxx_es3plus_mmc1_hwmod = {
.name = "mmc1",
.mpu_irqs = omap34xx_mmc1_mpu_irqs,
.sdma_reqs = omap34xx_mmc1_sdma_reqs,
.opt_clks = omap34xx_mmc1_opt_clks,
.opt_clks_cnt = ARRAY_SIZE(omap34xx_mmc1_opt_clks),
.main_clk = "mmchs1_fck",
.prcm = {
.omap2 = {
.module_offs = CORE_MOD,
.prcm_reg_id = 1,
.module_bit = OMAP3430_EN_MMC1_SHIFT,
.idlest_reg_id = 1,
.idlest_idle_bit = OMAP3430_ST_MMC1_SHIFT,
},
},
.dev_attr = &mmc1_dev_attr,
.slaves = omap3xxx_mmc1_slaves,
.slaves_cnt = ARRAY_SIZE(omap3xxx_mmc1_slaves),
.class = &omap34xx_mmc_class,
};
/* MMC/SD/SDIO2 */
static struct omap_hwmod_irq_info omap34xx_mmc2_mpu_irqs[] = {
{ .irq = INT_24XX_MMC2_IRQ, },
{ .irq = -1 }
};
static struct omap_hwmod_dma_info omap34xx_mmc2_sdma_reqs[] = {
{ .name = "tx", .dma_req = 47, },
{ .name = "rx", .dma_req = 48, },
{ .dma_req = -1 }
};
static struct omap_hwmod_opt_clk omap34xx_mmc2_opt_clks[] = {
{ .role = "dbck", .clk = "omap_32k_fck", },
};
static struct omap_hwmod_ocp_if *omap3xxx_mmc2_slaves[] = {
&omap3xxx_l4_core__mmc2,
};
/* See 35xx errata 2.1.1.128 in SPRZ278F */
static struct omap_mmc_dev_attr mmc2_pre_es3_dev_attr = {
.flags = OMAP_HSMMC_BROKEN_MULTIBLOCK_READ,
};
static struct omap_hwmod omap3xxx_pre_es3_mmc2_hwmod = {
.name = "mmc2",
.mpu_irqs = omap34xx_mmc2_mpu_irqs,
.sdma_reqs = omap34xx_mmc2_sdma_reqs,
.opt_clks = omap34xx_mmc2_opt_clks,
.opt_clks_cnt = ARRAY_SIZE(omap34xx_mmc2_opt_clks),
.main_clk = "mmchs2_fck",
.prcm = {
.omap2 = {
.module_offs = CORE_MOD,
.prcm_reg_id = 1,
.module_bit = OMAP3430_EN_MMC2_SHIFT,
.idlest_reg_id = 1,
.idlest_idle_bit = OMAP3430_ST_MMC2_SHIFT,
},
},
.dev_attr = &mmc2_pre_es3_dev_attr,
.slaves = omap3xxx_mmc2_slaves,
.slaves_cnt = ARRAY_SIZE(omap3xxx_mmc2_slaves),
.class = &omap34xx_mmc_class,
};
static struct omap_hwmod omap3xxx_es3plus_mmc2_hwmod = {
.name = "mmc2",
.mpu_irqs = omap34xx_mmc2_mpu_irqs,
.sdma_reqs = omap34xx_mmc2_sdma_reqs,
.opt_clks = omap34xx_mmc2_opt_clks,
.opt_clks_cnt = ARRAY_SIZE(omap34xx_mmc2_opt_clks),
.main_clk = "mmchs2_fck",
.prcm = {
.omap2 = {
.module_offs = CORE_MOD,
.prcm_reg_id = 1,
.module_bit = OMAP3430_EN_MMC2_SHIFT,
.idlest_reg_id = 1,
.idlest_idle_bit = OMAP3430_ST_MMC2_SHIFT,
},
},
.slaves = omap3xxx_mmc2_slaves,
.slaves_cnt = ARRAY_SIZE(omap3xxx_mmc2_slaves),
.class = &omap34xx_mmc_class,
};
/* MMC/SD/SDIO3 */
static struct omap_hwmod_irq_info omap34xx_mmc3_mpu_irqs[] = {
{ .irq = 94, },
{ .irq = -1 }
};
static struct omap_hwmod_dma_info omap34xx_mmc3_sdma_reqs[] = {
{ .name = "tx", .dma_req = 77, },
{ .name = "rx", .dma_req = 78, },
{ .dma_req = -1 }
};
static struct omap_hwmod_opt_clk omap34xx_mmc3_opt_clks[] = {
{ .role = "dbck", .clk = "omap_32k_fck", },
};
static struct omap_hwmod_ocp_if *omap3xxx_mmc3_slaves[] = {
&omap3xxx_l4_core__mmc3,
};
static struct omap_hwmod omap3xxx_mmc3_hwmod = {
.name = "mmc3",
.mpu_irqs = omap34xx_mmc3_mpu_irqs,
.sdma_reqs = omap34xx_mmc3_sdma_reqs,
.opt_clks = omap34xx_mmc3_opt_clks,
.opt_clks_cnt = ARRAY_SIZE(omap34xx_mmc3_opt_clks),
.main_clk = "mmchs3_fck",
.prcm = {
.omap2 = {
.prcm_reg_id = 1,
.module_bit = OMAP3430_EN_MMC3_SHIFT,
.idlest_reg_id = 1,
.idlest_idle_bit = OMAP3430_ST_MMC3_SHIFT,
},
},
.slaves = omap3xxx_mmc3_slaves,
.slaves_cnt = ARRAY_SIZE(omap3xxx_mmc3_slaves),
.class = &omap34xx_mmc_class,
};
/*
* 'usb_host_hs' class
* high-speed multi-port usb host controller
*/
static struct omap_hwmod_ocp_if omap3xxx_usb_host_hs__l3_main_2 = {
.master = &omap3xxx_usb_host_hs_hwmod,
.slave = &omap3xxx_l3_main_hwmod,
.clk = "core_l3_ick",
.user = OCP_USER_MPU,
};
static struct omap_hwmod_class_sysconfig omap3xxx_usb_host_hs_sysc = {
.rev_offs = 0x0000,
.sysc_offs = 0x0010,
.syss_offs = 0x0014,
.sysc_flags = (SYSC_HAS_MIDLEMODE | SYSC_HAS_CLOCKACTIVITY |
SYSC_HAS_SIDLEMODE | SYSC_HAS_ENAWAKEUP |
SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE),
.idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART),
.sysc_fields = &omap_hwmod_sysc_type1,
};
static struct omap_hwmod_class omap3xxx_usb_host_hs_hwmod_class = {
.name = "usb_host_hs",
.sysc = &omap3xxx_usb_host_hs_sysc,
};
static struct omap_hwmod_ocp_if *omap3xxx_usb_host_hs_masters[] = {
&omap3xxx_usb_host_hs__l3_main_2,
};
static struct omap_hwmod_addr_space omap3xxx_usb_host_hs_addrs[] = {
{
.name = "uhh",
.pa_start = 0x48064000,
.pa_end = 0x480643ff,
.flags = ADDR_TYPE_RT
},
{
.name = "ohci",
.pa_start = 0x48064400,
.pa_end = 0x480647ff,
},
{
.name = "ehci",
.pa_start = 0x48064800,
.pa_end = 0x48064cff,
},
{}
};
static struct omap_hwmod_ocp_if omap3xxx_l4_core__usb_host_hs = {
.master = &omap3xxx_l4_core_hwmod,
.slave = &omap3xxx_usb_host_hs_hwmod,
.clk = "usbhost_ick",
.addr = omap3xxx_usb_host_hs_addrs,
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
static struct omap_hwmod_ocp_if *omap3xxx_usb_host_hs_slaves[] = {
&omap3xxx_l4_core__usb_host_hs,
};
static struct omap_hwmod_opt_clk omap3xxx_usb_host_hs_opt_clks[] = {
{ .role = "ehci_logic_fck", .clk = "usbhost_120m_fck", },
};
static struct omap_hwmod_irq_info omap3xxx_usb_host_hs_irqs[] = {
{ .name = "ohci-irq", .irq = 76 },
{ .name = "ehci-irq", .irq = 77 },
{ .irq = -1 }
};
static struct omap_hwmod omap3xxx_usb_host_hs_hwmod = {
.name = "usb_host_hs",
.class = &omap3xxx_usb_host_hs_hwmod_class,
.clkdm_name = "l3_init_clkdm",
.mpu_irqs = omap3xxx_usb_host_hs_irqs,
.main_clk = "usbhost_48m_fck",
.prcm = {
.omap2 = {
.module_offs = OMAP3430ES2_USBHOST_MOD,
.prcm_reg_id = 1,
.module_bit = OMAP3430ES2_EN_USBHOST1_SHIFT,
.idlest_reg_id = 1,
.idlest_idle_bit = OMAP3430ES2_ST_USBHOST_IDLE_SHIFT,
.idlest_stdby_bit = OMAP3430ES2_ST_USBHOST_STDBY_SHIFT,
},
},
.opt_clks = omap3xxx_usb_host_hs_opt_clks,
.opt_clks_cnt = ARRAY_SIZE(omap3xxx_usb_host_hs_opt_clks),
.slaves = omap3xxx_usb_host_hs_slaves,
.slaves_cnt = ARRAY_SIZE(omap3xxx_usb_host_hs_slaves),
.masters = omap3xxx_usb_host_hs_masters,
.masters_cnt = ARRAY_SIZE(omap3xxx_usb_host_hs_masters),
/*
* Errata: USBHOST Configured In Smart-Idle Can Lead To a Deadlock
* id: i660
*
* Description:
* In the following configuration :
* - USBHOST module is set to smart-idle mode
* - PRCM asserts idle_req to the USBHOST module ( This typically
* happens when the system is going to a low power mode : all ports
* have been suspended, the master part of the USBHOST module has
* entered the standby state, and SW has cut the functional clocks)
* - an USBHOST interrupt occurs before the module is able to answer
* idle_ack, typically a remote wakeup IRQ.
* Then the USB HOST module will enter a deadlock situation where it
* is no more accessible nor functional.
*
* Workaround:
* Don't use smart idle; use only force idle, hence HWMOD_SWSUP_SIDLE
*/
/*
* Errata: USB host EHCI may stall when entering smart-standby mode
* Id: i571
*
* Description:
* When the USBHOST module is set to smart-standby mode, and when it is
* ready to enter the standby state (i.e. all ports are suspended and
* all attached devices are in suspend mode), then it can wrongly assert
* the Mstandby signal too early while there are still some residual OCP
* transactions ongoing. If this condition occurs, the internal state
* machine may go to an undefined state and the USB link may be stuck
* upon the next resume.
*
* Workaround:
* Don't use smart standby; use only force standby,
* hence HWMOD_SWSUP_MSTANDBY
*/
/*
* During system boot; If the hwmod framework resets the module
* the module will have smart idle settings; which can lead to deadlock
* (above Errata Id:i660); so, dont reset the module during boot;
* Use HWMOD_INIT_NO_RESET.
*/
.flags = HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY |
HWMOD_INIT_NO_RESET,
};
/*
* 'usb_tll_hs' class
* usb_tll_hs module is the adapter on the usb_host_hs ports
*/
static struct omap_hwmod_class_sysconfig omap3xxx_usb_tll_hs_sysc = {
.rev_offs = 0x0000,
.sysc_offs = 0x0010,
.syss_offs = 0x0014,
.sysc_flags = (SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_SIDLEMODE |
SYSC_HAS_ENAWAKEUP | SYSC_HAS_SOFTRESET |
SYSC_HAS_AUTOIDLE),
.idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
.sysc_fields = &omap_hwmod_sysc_type1,
};
static struct omap_hwmod_class omap3xxx_usb_tll_hs_hwmod_class = {
.name = "usb_tll_hs",
.sysc = &omap3xxx_usb_tll_hs_sysc,
};
static struct omap_hwmod_irq_info omap3xxx_usb_tll_hs_irqs[] = {
{ .name = "tll-irq", .irq = 78 },
{ .irq = -1 }
};
static struct omap_hwmod_addr_space omap3xxx_usb_tll_hs_addrs[] = {
{
.name = "tll",
.pa_start = 0x48062000,
.pa_end = 0x48062fff,
.flags = ADDR_TYPE_RT
},
{}
};
static struct omap_hwmod_ocp_if omap3xxx_l4_core__usb_tll_hs = {
.master = &omap3xxx_l4_core_hwmod,
.slave = &omap3xxx_usb_tll_hs_hwmod,
.clk = "usbtll_ick",
.addr = omap3xxx_usb_tll_hs_addrs,
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
static struct omap_hwmod_ocp_if *omap3xxx_usb_tll_hs_slaves[] = {
&omap3xxx_l4_core__usb_tll_hs,
};
static struct omap_hwmod omap3xxx_usb_tll_hs_hwmod = {
.name = "usb_tll_hs",
.class = &omap3xxx_usb_tll_hs_hwmod_class,
.clkdm_name = "l3_init_clkdm",
.mpu_irqs = omap3xxx_usb_tll_hs_irqs,
.main_clk = "usbtll_fck",
.prcm = {
.omap2 = {
.module_offs = CORE_MOD,
.prcm_reg_id = 3,
.module_bit = OMAP3430ES2_EN_USBTLL_SHIFT,
.idlest_reg_id = 3,
.idlest_idle_bit = OMAP3430ES2_ST_USBTLL_SHIFT,
},
},
.slaves = omap3xxx_usb_tll_hs_slaves,
.slaves_cnt = ARRAY_SIZE(omap3xxx_usb_tll_hs_slaves),
};
static __initdata struct omap_hwmod *omap3xxx_hwmods[] = {
&omap3xxx_l3_main_hwmod,
&omap3xxx_l4_core_hwmod,
&omap3xxx_l4_per_hwmod,
&omap3xxx_l4_wkup_hwmod,
&omap3xxx_mmc3_hwmod,
&omap3xxx_mpu_hwmod,
&omap3xxx_timer1_hwmod,
&omap3xxx_timer2_hwmod,
&omap3xxx_timer3_hwmod,
&omap3xxx_timer4_hwmod,
&omap3xxx_timer5_hwmod,
&omap3xxx_timer6_hwmod,
&omap3xxx_timer7_hwmod,
&omap3xxx_timer8_hwmod,
&omap3xxx_timer9_hwmod,
&omap3xxx_timer10_hwmod,
&omap3xxx_timer11_hwmod,
&omap3xxx_wd_timer2_hwmod,
&omap3xxx_uart1_hwmod,
&omap3xxx_uart2_hwmod,
&omap3xxx_uart3_hwmod,
/* i2c class */
&omap3xxx_i2c1_hwmod,
&omap3xxx_i2c2_hwmod,
&omap3xxx_i2c3_hwmod,
/* gpio class */
&omap3xxx_gpio1_hwmod,
&omap3xxx_gpio2_hwmod,
&omap3xxx_gpio3_hwmod,
&omap3xxx_gpio4_hwmod,
&omap3xxx_gpio5_hwmod,
&omap3xxx_gpio6_hwmod,
/* dma_system class*/
&omap3xxx_dma_system_hwmod,
/* mcbsp class */
&omap3xxx_mcbsp1_hwmod,
&omap3xxx_mcbsp2_hwmod,
&omap3xxx_mcbsp3_hwmod,
&omap3xxx_mcbsp4_hwmod,
&omap3xxx_mcbsp5_hwmod,
&omap3xxx_mcbsp2_sidetone_hwmod,
&omap3xxx_mcbsp3_sidetone_hwmod,
/* mcspi class */
&omap34xx_mcspi1,
&omap34xx_mcspi2,
&omap34xx_mcspi3,
&omap34xx_mcspi4,
NULL,
};
/* GP-only hwmods */
static __initdata struct omap_hwmod *omap3xxx_gp_hwmods[] = {
&omap3xxx_timer12_hwmod,
NULL
};
/* 3430ES1-only hwmods */
static __initdata struct omap_hwmod *omap3430es1_hwmods[] = {
&omap3430es1_dss_core_hwmod,
NULL
};
/* 3430ES2+-only hwmods */
static __initdata struct omap_hwmod *omap3430es2plus_hwmods[] = {
&omap3xxx_dss_core_hwmod,
&omap3xxx_usbhsotg_hwmod,
&omap3xxx_usb_host_hs_hwmod,
&omap3xxx_usb_tll_hs_hwmod,
NULL
};
/* <= 3430ES3-only hwmods */
static struct omap_hwmod *omap3430_pre_es3_hwmods[] __initdata = {
&omap3xxx_pre_es3_mmc1_hwmod,
&omap3xxx_pre_es3_mmc2_hwmod,
NULL
};
/* 3430ES3+-only hwmods */
static struct omap_hwmod *omap3430_es3plus_hwmods[] __initdata = {
&omap3xxx_es3plus_mmc1_hwmod,
&omap3xxx_es3plus_mmc2_hwmod,
NULL
};
/* 34xx-only hwmods (all ES revisions) */
static __initdata struct omap_hwmod *omap34xx_hwmods[] = {
&omap3xxx_iva_hwmod,
&omap34xx_sr1_hwmod,
&omap34xx_sr2_hwmod,
&omap3xxx_mailbox_hwmod,
NULL
};
/* 36xx-only hwmods (all ES revisions) */
static __initdata struct omap_hwmod *omap36xx_hwmods[] = {
&omap3xxx_iva_hwmod,
&omap3xxx_uart4_hwmod,
&omap3xxx_dss_core_hwmod,
&omap36xx_sr1_hwmod,
&omap36xx_sr2_hwmod,
&omap3xxx_usbhsotg_hwmod,
&omap3xxx_mailbox_hwmod,
&omap3xxx_usb_host_hs_hwmod,
&omap3xxx_usb_tll_hs_hwmod,
&omap3xxx_es3plus_mmc1_hwmod,
&omap3xxx_es3plus_mmc2_hwmod,
NULL
};
static __initdata struct omap_hwmod *am35xx_hwmods[] = {
&omap3xxx_dss_core_hwmod, /* XXX ??? */
&am35xx_usbhsotg_hwmod,
&am35xx_uart4_hwmod,
&omap3xxx_usb_host_hs_hwmod,
&omap3xxx_usb_tll_hs_hwmod,
&omap3xxx_es3plus_mmc1_hwmod,
&omap3xxx_es3plus_mmc2_hwmod,
NULL
};
static __initdata struct omap_hwmod *omap3xxx_dss_hwmods[] = {
/* dss class */
&omap3xxx_dss_dispc_hwmod,
&omap3xxx_dss_dsi1_hwmod,
&omap3xxx_dss_rfbi_hwmod,
&omap3xxx_dss_venc_hwmod,
NULL
};
int __init omap3xxx_hwmod_init(void)
{
int r;
struct omap_hwmod **h = NULL;
unsigned int rev;
/* Register hwmods common to all OMAP3 */
r = omap_hwmod_register(omap3xxx_hwmods);
if (r < 0)
return r;
/* Register GP-only hwmods. */
if (omap_type() == OMAP2_DEVICE_TYPE_GP) {
r = omap_hwmod_register(omap3xxx_gp_hwmods);
if (r < 0)
return r;
}
rev = omap_rev();
/*
* Register hwmods common to individual OMAP3 families, all
* silicon revisions (e.g., 34xx, or AM3505/3517, or 36xx)
* All possible revisions should be included in this conditional.
*/
if (rev == OMAP3430_REV_ES1_0 || rev == OMAP3430_REV_ES2_0 ||
rev == OMAP3430_REV_ES2_1 || rev == OMAP3430_REV_ES3_0 ||
rev == OMAP3430_REV_ES3_1 || rev == OMAP3430_REV_ES3_1_2) {
h = omap34xx_hwmods;
} else if (rev == OMAP3517_REV_ES1_0 || rev == OMAP3517_REV_ES1_1) {
h = am35xx_hwmods;
} else if (rev == OMAP3630_REV_ES1_0 || rev == OMAP3630_REV_ES1_1 ||
rev == OMAP3630_REV_ES1_2) {
h = omap36xx_hwmods;
} else {
WARN(1, "OMAP3 hwmod family init: unknown chip type\n");
return -EINVAL;
};
r = omap_hwmod_register(h);
if (r < 0)
return r;
/*
* Register hwmods specific to certain ES levels of a
* particular family of silicon (e.g., 34xx ES1.0)
*/
h = NULL;
if (rev == OMAP3430_REV_ES1_0) {
h = omap3430es1_hwmods;
} else if (rev == OMAP3430_REV_ES2_0 || rev == OMAP3430_REV_ES2_1 ||
rev == OMAP3430_REV_ES3_0 || rev == OMAP3430_REV_ES3_1 ||
rev == OMAP3430_REV_ES3_1_2) {
h = omap3430es2plus_hwmods;
};
if (h) {
r = omap_hwmod_register(h);
if (r < 0)
return r;
}
h = NULL;
if (rev == OMAP3430_REV_ES1_0 || rev == OMAP3430_REV_ES2_0 ||
rev == OMAP3430_REV_ES2_1) {
h = omap3430_pre_es3_hwmods;
} else if (rev == OMAP3430_REV_ES3_0 || rev == OMAP3430_REV_ES3_1 ||
rev == OMAP3430_REV_ES3_1_2) {
h = omap3430_es3plus_hwmods;
};
if (h)
r = omap_hwmod_register(h);
if (r < 0)
return r;
/*
* DSS code presumes that dss_core hwmod is handled first,
* _before_ any other DSS related hwmods so register common
* DSS hwmods last to ensure that dss_core is already registered.
* Otherwise some change things may happen, for ex. if dispc
* is handled before dss_core and DSS is enabled in bootloader
* DIPSC will be reset with outputs enabled which sometimes leads
* to unrecoverable L3 error.
* XXX The long-term fix to this is to ensure modules are set up
* in dependency order in the hwmod core code.
*/
r = omap_hwmod_register(omap3xxx_dss_hwmods);
return r;
}
| gpl-2.0 |
agat63/N861_ZTE_kernel | drivers/ide/au1xxx-ide.c | 3991 | 15330 | /*
* BRIEF MODULE DESCRIPTION
* AMD Alchemy Au1xxx IDE interface routines over the Static Bus
*
* Copyright (c) 2003-2005 AMD, Personal Connectivity Solutions
*
* This program is free software; you can redistribute it and/or modify it under
* the terms of the GNU General Public License as published by the Free Software
* Foundation; either version 2 of the License, or (at your option) any later
* version.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 675 Mass Ave, Cambridge, MA 02139, USA.
*
* Note: for more information, please refer "AMD Alchemy Au1200/Au1550 IDE
* Interface and Linux Device Driver" Application Note.
*/
#include <linux/types.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/init.h>
#include <linux/ide.h>
#include <linux/scatterlist.h>
#include <asm/mach-au1x00/au1xxx.h>
#include <asm/mach-au1x00/au1xxx_dbdma.h>
#include <asm/mach-au1x00/au1xxx_ide.h>
#define DRV_NAME "au1200-ide"
#define DRV_AUTHOR "Enrico Walther <enrico.walther@amd.com> / Pete Popov <ppopov@embeddedalley.com>"
/* enable the burstmode in the dbdma */
#define IDE_AU1XXX_BURSTMODE 1
static _auide_hwif auide_hwif;
#if defined(CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA)
static inline void auide_insw(unsigned long port, void *addr, u32 count)
{
_auide_hwif *ahwif = &auide_hwif;
chan_tab_t *ctp;
au1x_ddma_desc_t *dp;
if (!au1xxx_dbdma_put_dest(ahwif->rx_chan, virt_to_phys(addr),
count << 1, DDMA_FLAGS_NOIE)) {
printk(KERN_ERR "%s failed %d\n", __func__, __LINE__);
return;
}
ctp = *((chan_tab_t **)ahwif->rx_chan);
dp = ctp->cur_ptr;
while (dp->dscr_cmd0 & DSCR_CMD0_V)
;
ctp->cur_ptr = au1xxx_ddma_get_nextptr_virt(dp);
}
static inline void auide_outsw(unsigned long port, void *addr, u32 count)
{
_auide_hwif *ahwif = &auide_hwif;
chan_tab_t *ctp;
au1x_ddma_desc_t *dp;
if (!au1xxx_dbdma_put_source(ahwif->tx_chan, virt_to_phys(addr),
count << 1, DDMA_FLAGS_NOIE)) {
printk(KERN_ERR "%s failed %d\n", __func__, __LINE__);
return;
}
ctp = *((chan_tab_t **)ahwif->tx_chan);
dp = ctp->cur_ptr;
while (dp->dscr_cmd0 & DSCR_CMD0_V)
;
ctp->cur_ptr = au1xxx_ddma_get_nextptr_virt(dp);
}
static void au1xxx_input_data(ide_drive_t *drive, struct ide_cmd *cmd,
void *buf, unsigned int len)
{
auide_insw(drive->hwif->io_ports.data_addr, buf, (len + 1) / 2);
}
static void au1xxx_output_data(ide_drive_t *drive, struct ide_cmd *cmd,
void *buf, unsigned int len)
{
auide_outsw(drive->hwif->io_ports.data_addr, buf, (len + 1) / 2);
}
#endif
static void au1xxx_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
int mem_sttime = 0, mem_stcfg = au_readl(MEM_STCFG2);
switch (drive->pio_mode - XFER_PIO_0) {
case 0:
mem_sttime = SBC_IDE_TIMING(PIO0);
/* set configuration for RCS2# */
mem_stcfg |= TS_MASK;
mem_stcfg &= ~TCSOE_MASK;
mem_stcfg &= ~TOECS_MASK;
mem_stcfg |= SBC_IDE_PIO0_TCSOE | SBC_IDE_PIO0_TOECS;
break;
case 1:
mem_sttime = SBC_IDE_TIMING(PIO1);
/* set configuration for RCS2# */
mem_stcfg |= TS_MASK;
mem_stcfg &= ~TCSOE_MASK;
mem_stcfg &= ~TOECS_MASK;
mem_stcfg |= SBC_IDE_PIO1_TCSOE | SBC_IDE_PIO1_TOECS;
break;
case 2:
mem_sttime = SBC_IDE_TIMING(PIO2);
/* set configuration for RCS2# */
mem_stcfg &= ~TS_MASK;
mem_stcfg &= ~TCSOE_MASK;
mem_stcfg &= ~TOECS_MASK;
mem_stcfg |= SBC_IDE_PIO2_TCSOE | SBC_IDE_PIO2_TOECS;
break;
case 3:
mem_sttime = SBC_IDE_TIMING(PIO3);
/* set configuration for RCS2# */
mem_stcfg &= ~TS_MASK;
mem_stcfg &= ~TCSOE_MASK;
mem_stcfg &= ~TOECS_MASK;
mem_stcfg |= SBC_IDE_PIO3_TCSOE | SBC_IDE_PIO3_TOECS;
break;
case 4:
mem_sttime = SBC_IDE_TIMING(PIO4);
/* set configuration for RCS2# */
mem_stcfg &= ~TS_MASK;
mem_stcfg &= ~TCSOE_MASK;
mem_stcfg &= ~TOECS_MASK;
mem_stcfg |= SBC_IDE_PIO4_TCSOE | SBC_IDE_PIO4_TOECS;
break;
}
au_writel(mem_sttime,MEM_STTIME2);
au_writel(mem_stcfg,MEM_STCFG2);
}
static void auide_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
int mem_sttime = 0, mem_stcfg = au_readl(MEM_STCFG2);
switch (drive->dma_mode) {
#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
case XFER_MW_DMA_2:
mem_sttime = SBC_IDE_TIMING(MDMA2);
/* set configuration for RCS2# */
mem_stcfg &= ~TS_MASK;
mem_stcfg &= ~TCSOE_MASK;
mem_stcfg &= ~TOECS_MASK;
mem_stcfg |= SBC_IDE_MDMA2_TCSOE | SBC_IDE_MDMA2_TOECS;
break;
case XFER_MW_DMA_1:
mem_sttime = SBC_IDE_TIMING(MDMA1);
/* set configuration for RCS2# */
mem_stcfg &= ~TS_MASK;
mem_stcfg &= ~TCSOE_MASK;
mem_stcfg &= ~TOECS_MASK;
mem_stcfg |= SBC_IDE_MDMA1_TCSOE | SBC_IDE_MDMA1_TOECS;
break;
case XFER_MW_DMA_0:
mem_sttime = SBC_IDE_TIMING(MDMA0);
/* set configuration for RCS2# */
mem_stcfg |= TS_MASK;
mem_stcfg &= ~TCSOE_MASK;
mem_stcfg &= ~TOECS_MASK;
mem_stcfg |= SBC_IDE_MDMA0_TCSOE | SBC_IDE_MDMA0_TOECS;
break;
#endif
}
au_writel(mem_sttime,MEM_STTIME2);
au_writel(mem_stcfg,MEM_STCFG2);
}
/*
* Multi-Word DMA + DbDMA functions
*/
#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
static int auide_build_dmatable(ide_drive_t *drive, struct ide_cmd *cmd)
{
ide_hwif_t *hwif = drive->hwif;
_auide_hwif *ahwif = &auide_hwif;
struct scatterlist *sg;
int i = cmd->sg_nents, count = 0;
int iswrite = !!(cmd->tf_flags & IDE_TFLAG_WRITE);
/* Save for interrupt context */
ahwif->drive = drive;
/* fill the descriptors */
sg = hwif->sg_table;
while (i && sg_dma_len(sg)) {
u32 cur_addr;
u32 cur_len;
cur_addr = sg_dma_address(sg);
cur_len = sg_dma_len(sg);
while (cur_len) {
u32 flags = DDMA_FLAGS_NOIE;
unsigned int tc = (cur_len < 0xfe00)? cur_len: 0xfe00;
if (++count >= PRD_ENTRIES) {
printk(KERN_WARNING "%s: DMA table too small\n",
drive->name);
return 0;
}
/* Lets enable intr for the last descriptor only */
if (1==i)
flags = DDMA_FLAGS_IE;
else
flags = DDMA_FLAGS_NOIE;
if (iswrite) {
if (!au1xxx_dbdma_put_source(ahwif->tx_chan,
sg_phys(sg), tc, flags)) {
printk(KERN_ERR "%s failed %d\n",
__func__, __LINE__);
}
} else {
if (!au1xxx_dbdma_put_dest(ahwif->rx_chan,
sg_phys(sg), tc, flags)) {
printk(KERN_ERR "%s failed %d\n",
__func__, __LINE__);
}
}
cur_addr += tc;
cur_len -= tc;
}
sg = sg_next(sg);
i--;
}
if (count)
return 1;
return 0; /* revert to PIO for this request */
}
static int auide_dma_end(ide_drive_t *drive)
{
return 0;
}
static void auide_dma_start(ide_drive_t *drive )
{
}
static int auide_dma_setup(ide_drive_t *drive, struct ide_cmd *cmd)
{
if (auide_build_dmatable(drive, cmd) == 0)
return 1;
return 0;
}
static int auide_dma_test_irq(ide_drive_t *drive)
{
/* If dbdma didn't execute the STOP command yet, the
* active bit is still set
*/
drive->waiting_for_dma++;
if (drive->waiting_for_dma >= DMA_WAIT_TIMEOUT) {
printk(KERN_WARNING "%s: timeout waiting for ddma to complete\n",
drive->name);
return 1;
}
udelay(10);
return 0;
}
static void auide_dma_host_set(ide_drive_t *drive, int on)
{
}
static void auide_ddma_tx_callback(int irq, void *param)
{
}
static void auide_ddma_rx_callback(int irq, void *param)
{
}
#endif /* end CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA */
static void auide_init_dbdma_dev(dbdev_tab_t *dev, u32 dev_id, u32 tsize, u32 devwidth, u32 flags)
{
dev->dev_id = dev_id;
dev->dev_physaddr = (u32)IDE_PHYS_ADDR;
dev->dev_intlevel = 0;
dev->dev_intpolarity = 0;
dev->dev_tsize = tsize;
dev->dev_devwidth = devwidth;
dev->dev_flags = flags;
}
#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
static const struct ide_dma_ops au1xxx_dma_ops = {
.dma_host_set = auide_dma_host_set,
.dma_setup = auide_dma_setup,
.dma_start = auide_dma_start,
.dma_end = auide_dma_end,
.dma_test_irq = auide_dma_test_irq,
.dma_lost_irq = ide_dma_lost_irq,
};
static int auide_ddma_init(ide_hwif_t *hwif, const struct ide_port_info *d)
{
_auide_hwif *auide = &auide_hwif;
dbdev_tab_t source_dev_tab, target_dev_tab;
u32 dev_id, tsize, devwidth, flags;
dev_id = IDE_DDMA_REQ;
tsize = 8; /* 1 */
devwidth = 32; /* 16 */
#ifdef IDE_AU1XXX_BURSTMODE
flags = DEV_FLAGS_SYNC | DEV_FLAGS_BURSTABLE;
#else
flags = DEV_FLAGS_SYNC;
#endif
/* setup dev_tab for tx channel */
auide_init_dbdma_dev( &source_dev_tab,
dev_id,
tsize, devwidth, DEV_FLAGS_OUT | flags);
auide->tx_dev_id = au1xxx_ddma_add_device( &source_dev_tab );
auide_init_dbdma_dev( &source_dev_tab,
dev_id,
tsize, devwidth, DEV_FLAGS_IN | flags);
auide->rx_dev_id = au1xxx_ddma_add_device( &source_dev_tab );
/* We also need to add a target device for the DMA */
auide_init_dbdma_dev( &target_dev_tab,
(u32)DSCR_CMD0_ALWAYS,
tsize, devwidth, DEV_FLAGS_ANYUSE);
auide->target_dev_id = au1xxx_ddma_add_device(&target_dev_tab);
/* Get a channel for TX */
auide->tx_chan = au1xxx_dbdma_chan_alloc(auide->target_dev_id,
auide->tx_dev_id,
auide_ddma_tx_callback,
(void*)auide);
/* Get a channel for RX */
auide->rx_chan = au1xxx_dbdma_chan_alloc(auide->rx_dev_id,
auide->target_dev_id,
auide_ddma_rx_callback,
(void*)auide);
auide->tx_desc_head = (void*)au1xxx_dbdma_ring_alloc(auide->tx_chan,
NUM_DESCRIPTORS);
auide->rx_desc_head = (void*)au1xxx_dbdma_ring_alloc(auide->rx_chan,
NUM_DESCRIPTORS);
/* FIXME: check return value */
(void)ide_allocate_dma_engine(hwif);
au1xxx_dbdma_start( auide->tx_chan );
au1xxx_dbdma_start( auide->rx_chan );
return 0;
}
#else
static int auide_ddma_init(ide_hwif_t *hwif, const struct ide_port_info *d)
{
_auide_hwif *auide = &auide_hwif;
dbdev_tab_t source_dev_tab;
int flags;
#ifdef IDE_AU1XXX_BURSTMODE
flags = DEV_FLAGS_SYNC | DEV_FLAGS_BURSTABLE;
#else
flags = DEV_FLAGS_SYNC;
#endif
/* setup dev_tab for tx channel */
auide_init_dbdma_dev( &source_dev_tab,
(u32)DSCR_CMD0_ALWAYS,
8, 32, DEV_FLAGS_OUT | flags);
auide->tx_dev_id = au1xxx_ddma_add_device( &source_dev_tab );
auide_init_dbdma_dev( &source_dev_tab,
(u32)DSCR_CMD0_ALWAYS,
8, 32, DEV_FLAGS_IN | flags);
auide->rx_dev_id = au1xxx_ddma_add_device( &source_dev_tab );
/* Get a channel for TX */
auide->tx_chan = au1xxx_dbdma_chan_alloc(DSCR_CMD0_ALWAYS,
auide->tx_dev_id,
NULL,
(void*)auide);
/* Get a channel for RX */
auide->rx_chan = au1xxx_dbdma_chan_alloc(auide->rx_dev_id,
DSCR_CMD0_ALWAYS,
NULL,
(void*)auide);
auide->tx_desc_head = (void*)au1xxx_dbdma_ring_alloc(auide->tx_chan,
NUM_DESCRIPTORS);
auide->rx_desc_head = (void*)au1xxx_dbdma_ring_alloc(auide->rx_chan,
NUM_DESCRIPTORS);
au1xxx_dbdma_start( auide->tx_chan );
au1xxx_dbdma_start( auide->rx_chan );
return 0;
}
#endif
static void auide_setup_ports(struct ide_hw *hw, _auide_hwif *ahwif)
{
int i;
unsigned long *ata_regs = hw->io_ports_array;
/* FIXME? */
for (i = 0; i < 8; i++)
*ata_regs++ = ahwif->regbase + (i << IDE_REG_SHIFT);
/* set the Alternative Status register */
*ata_regs = ahwif->regbase + (14 << IDE_REG_SHIFT);
}
#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA
static const struct ide_tp_ops au1xxx_tp_ops = {
.exec_command = ide_exec_command,
.read_status = ide_read_status,
.read_altstatus = ide_read_altstatus,
.write_devctl = ide_write_devctl,
.dev_select = ide_dev_select,
.tf_load = ide_tf_load,
.tf_read = ide_tf_read,
.input_data = au1xxx_input_data,
.output_data = au1xxx_output_data,
};
#endif
static const struct ide_port_ops au1xxx_port_ops = {
.set_pio_mode = au1xxx_set_pio_mode,
.set_dma_mode = auide_set_dma_mode,
};
static const struct ide_port_info au1xxx_port_info = {
.init_dma = auide_ddma_init,
#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA
.tp_ops = &au1xxx_tp_ops,
#endif
.port_ops = &au1xxx_port_ops,
#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
.dma_ops = &au1xxx_dma_ops,
#endif
.host_flags = IDE_HFLAG_POST_SET_MODE |
IDE_HFLAG_NO_IO_32BIT |
IDE_HFLAG_UNMASK_IRQS,
.pio_mask = ATA_PIO4,
#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
.mwdma_mask = ATA_MWDMA2,
#endif
.chipset = ide_au1xxx,
};
static int au_ide_probe(struct platform_device *dev)
{
_auide_hwif *ahwif = &auide_hwif;
struct resource *res;
struct ide_host *host;
int ret = 0;
struct ide_hw hw, *hws[] = { &hw };
#if defined(CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA)
char *mode = "MWDMA2";
#elif defined(CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA)
char *mode = "PIO+DDMA(offload)";
#endif
memset(&auide_hwif, 0, sizeof(_auide_hwif));
ahwif->irq = platform_get_irq(dev, 0);
res = platform_get_resource(dev, IORESOURCE_MEM, 0);
if (res == NULL) {
pr_debug("%s %d: no base address\n", DRV_NAME, dev->id);
ret = -ENODEV;
goto out;
}
if (ahwif->irq < 0) {
pr_debug("%s %d: no IRQ\n", DRV_NAME, dev->id);
ret = -ENODEV;
goto out;
}
if (!request_mem_region(res->start, resource_size(res), dev->name)) {
pr_debug("%s: request_mem_region failed\n", DRV_NAME);
ret = -EBUSY;
goto out;
}
ahwif->regbase = (u32)ioremap(res->start, resource_size(res));
if (ahwif->regbase == 0) {
ret = -ENOMEM;
goto out;
}
memset(&hw, 0, sizeof(hw));
auide_setup_ports(&hw, ahwif);
hw.irq = ahwif->irq;
hw.dev = &dev->dev;
ret = ide_host_add(&au1xxx_port_info, hws, 1, &host);
if (ret)
goto out;
auide_hwif.hwif = host->ports[0];
platform_set_drvdata(dev, host);
printk(KERN_INFO "Au1xxx IDE(builtin) configured for %s\n", mode );
out:
return ret;
}
static int au_ide_remove(struct platform_device *dev)
{
struct resource *res;
struct ide_host *host = platform_get_drvdata(dev);
_auide_hwif *ahwif = &auide_hwif;
ide_host_remove(host);
iounmap((void *)ahwif->regbase);
res = platform_get_resource(dev, IORESOURCE_MEM, 0);
release_mem_region(res->start, resource_size(res));
return 0;
}
static struct platform_driver au1200_ide_driver = {
.driver = {
.name = "au1200-ide",
.owner = THIS_MODULE,
},
.probe = au_ide_probe,
.remove = au_ide_remove,
};
static int __init au_ide_init(void)
{
return platform_driver_register(&au1200_ide_driver);
}
static void __exit au_ide_exit(void)
{
platform_driver_unregister(&au1200_ide_driver);
}
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("AU1200 IDE driver");
module_init(au_ide_init);
module_exit(au_ide_exit);
| gpl-2.0 |
Team-Blackout/AOZP_Blackout_edition | drivers/ide/tc86c001.c | 3991 | 7733 | /*
* Copyright (C) 2002 Toshiba Corporation
* Copyright (C) 2005-2006 MontaVista Software, Inc. <source@mvista.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/ide.h>
#define DRV_NAME "tc86c001"
static void tc86c001_set_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
unsigned long scr_port = hwif->config_data + (drive->dn ? 0x02 : 0x00);
u16 mode, scr = inw(scr_port);
const u8 speed = drive->dma_mode;
switch (speed) {
case XFER_UDMA_4: mode = 0x00c0; break;
case XFER_UDMA_3: mode = 0x00b0; break;
case XFER_UDMA_2: mode = 0x00a0; break;
case XFER_UDMA_1: mode = 0x0090; break;
case XFER_UDMA_0: mode = 0x0080; break;
case XFER_MW_DMA_2: mode = 0x0070; break;
case XFER_MW_DMA_1: mode = 0x0060; break;
case XFER_MW_DMA_0: mode = 0x0050; break;
case XFER_PIO_4: mode = 0x0400; break;
case XFER_PIO_3: mode = 0x0300; break;
case XFER_PIO_2: mode = 0x0200; break;
case XFER_PIO_1: mode = 0x0100; break;
case XFER_PIO_0:
default: mode = 0x0000; break;
}
scr &= (speed < XFER_MW_DMA_0) ? 0xf8ff : 0xff0f;
scr |= mode;
outw(scr, scr_port);
}
static void tc86c001_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
drive->dma_mode = drive->pio_mode;
tc86c001_set_mode(hwif, drive);
}
/*
* HACKITY HACK
*
* This is a workaround for the limitation 5 of the TC86C001 IDE controller:
* if a DMA transfer terminates prematurely, the controller leaves the device's
* interrupt request (INTRQ) pending and does not generate a PCI interrupt (or
* set the interrupt bit in the DMA status register), thus no PCI interrupt
* will occur until a DMA transfer has been successfully completed.
*
* We work around this by initiating dummy, zero-length DMA transfer on
* a DMA timeout expiration. I found no better way to do this with the current
* IDE core than to temporarily replace a higher level driver's timer expiry
* handler with our own backing up to that handler in case our recovery fails.
*/
static int tc86c001_timer_expiry(ide_drive_t *drive)
{
ide_hwif_t *hwif = drive->hwif;
ide_expiry_t *expiry = ide_get_hwifdata(hwif);
u8 dma_stat = inb(hwif->dma_base + ATA_DMA_STATUS);
/* Restore a higher level driver's expiry handler first. */
hwif->expiry = expiry;
if ((dma_stat & 5) == 1) { /* DMA active and no interrupt */
unsigned long sc_base = hwif->config_data;
unsigned long twcr_port = sc_base + (drive->dn ? 0x06 : 0x04);
u8 dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD);
printk(KERN_WARNING "%s: DMA interrupt possibly stuck, "
"attempting recovery...\n", drive->name);
/* Stop DMA */
outb(dma_cmd & ~0x01, hwif->dma_base + ATA_DMA_CMD);
/* Setup the dummy DMA transfer */
outw(0, sc_base + 0x0a); /* Sector Count */
outw(0, twcr_port); /* Transfer Word Count 1 or 2 */
/* Start the dummy DMA transfer */
/* clear R_OR_WCTR for write */
outb(0x00, hwif->dma_base + ATA_DMA_CMD);
/* set START_STOPBM */
outb(0x01, hwif->dma_base + ATA_DMA_CMD);
/*
* If an interrupt was pending, it should come thru shortly.
* If not, a higher level driver's expiry handler should
* eventually cause some kind of recovery from the DMA stall.
*/
return WAIT_MIN_SLEEP;
}
/* Chain to the restored expiry handler if DMA wasn't active. */
if (likely(expiry != NULL))
return expiry(drive);
/* If there was no handler, "emulate" that for ide_timer_expiry()... */
return -1;
}
static void tc86c001_dma_start(ide_drive_t *drive)
{
ide_hwif_t *hwif = drive->hwif;
unsigned long sc_base = hwif->config_data;
unsigned long twcr_port = sc_base + (drive->dn ? 0x06 : 0x04);
unsigned long nsectors = blk_rq_sectors(hwif->rq);
/*
* We have to manually load the sector count and size into
* the appropriate system control registers for DMA to work
* with LBA48 and ATAPI devices...
*/
outw(nsectors, sc_base + 0x0a); /* Sector Count */
outw(SECTOR_SIZE / 2, twcr_port); /* Transfer Word Count 1/2 */
/* Install our timeout expiry hook, saving the current handler... */
ide_set_hwifdata(hwif, hwif->expiry);
hwif->expiry = &tc86c001_timer_expiry;
ide_dma_start(drive);
}
static u8 tc86c001_cable_detect(ide_hwif_t *hwif)
{
struct pci_dev *dev = to_pci_dev(hwif->dev);
unsigned long sc_base = pci_resource_start(dev, 5);
u16 scr1 = inw(sc_base + 0x00);
/*
* System Control 1 Register bit 13 (PDIAGN):
* 0=80-pin cable, 1=40-pin cable
*/
return (scr1 & 0x2000) ? ATA_CBL_PATA40 : ATA_CBL_PATA80;
}
static void __devinit init_hwif_tc86c001(ide_hwif_t *hwif)
{
struct pci_dev *dev = to_pci_dev(hwif->dev);
unsigned long sc_base = pci_resource_start(dev, 5);
u16 scr1 = inw(sc_base + 0x00);
/* System Control 1 Register bit 15 (Soft Reset) set */
outw(scr1 | 0x8000, sc_base + 0x00);
/* System Control 1 Register bit 14 (FIFO Reset) set */
outw(scr1 | 0x4000, sc_base + 0x00);
/* System Control 1 Register: reset clear */
outw(scr1 & ~0xc000, sc_base + 0x00);
/* Store the system control register base for convenience... */
hwif->config_data = sc_base;
if (!hwif->dma_base)
return;
/*
* Sector Count Control Register bits 0 and 1 set:
* software sets Sector Count Register for master and slave device
*/
outw(0x0003, sc_base + 0x0c);
/* Sector Count Register limit */
hwif->rqsize = 0xffff;
}
static const struct ide_port_ops tc86c001_port_ops = {
.set_pio_mode = tc86c001_set_pio_mode,
.set_dma_mode = tc86c001_set_mode,
.cable_detect = tc86c001_cable_detect,
};
static const struct ide_dma_ops tc86c001_dma_ops = {
.dma_host_set = ide_dma_host_set,
.dma_setup = ide_dma_setup,
.dma_start = tc86c001_dma_start,
.dma_end = ide_dma_end,
.dma_test_irq = ide_dma_test_irq,
.dma_lost_irq = ide_dma_lost_irq,
.dma_timer_expiry = ide_dma_sff_timer_expiry,
.dma_sff_read_status = ide_dma_sff_read_status,
};
static const struct ide_port_info tc86c001_chipset __devinitdata = {
.name = DRV_NAME,
.init_hwif = init_hwif_tc86c001,
.port_ops = &tc86c001_port_ops,
.dma_ops = &tc86c001_dma_ops,
.host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_OFF_BOARD,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA4,
};
static int __devinit tc86c001_init_one(struct pci_dev *dev,
const struct pci_device_id *id)
{
int rc;
rc = pci_enable_device(dev);
if (rc)
goto out;
rc = pci_request_region(dev, 5, DRV_NAME);
if (rc) {
printk(KERN_ERR DRV_NAME ": system control regs already in use");
goto out_disable;
}
rc = ide_pci_init_one(dev, &tc86c001_chipset, NULL);
if (rc)
goto out_release;
goto out;
out_release:
pci_release_region(dev, 5);
out_disable:
pci_disable_device(dev);
out:
return rc;
}
static void __devexit tc86c001_remove(struct pci_dev *dev)
{
ide_pci_remove(dev);
pci_release_region(dev, 5);
pci_disable_device(dev);
}
static const struct pci_device_id tc86c001_pci_tbl[] = {
{ PCI_VDEVICE(TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_TC86C001_IDE), 0 },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, tc86c001_pci_tbl);
static struct pci_driver tc86c001_pci_driver = {
.name = "TC86C001",
.id_table = tc86c001_pci_tbl,
.probe = tc86c001_init_one,
.remove = __devexit_p(tc86c001_remove),
};
static int __init tc86c001_ide_init(void)
{
return ide_pci_register_driver(&tc86c001_pci_driver);
}
static void __exit tc86c001_ide_exit(void)
{
pci_unregister_driver(&tc86c001_pci_driver);
}
module_init(tc86c001_ide_init);
module_exit(tc86c001_ide_exit);
MODULE_AUTHOR("MontaVista Software, Inc. <source@mvista.com>");
MODULE_DESCRIPTION("PCI driver module for TC86C001 IDE");
MODULE_LICENSE("GPL");
| gpl-2.0 |
LeroViten/LerNex-Bacon | arch/sh/kernel/cpu/sh4a/setup-sh7780.c | 4503 | 16020 | /*
* SH7780 Setup
*
* Copyright (C) 2006 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/platform_device.h>
#include <linux/init.h>
#include <linux/serial.h>
#include <linux/io.h>
#include <linux/serial_sci.h>
#include <linux/sh_dma.h>
#include <linux/sh_timer.h>
#include <cpu/dma-register.h>
static struct plat_sci_port scif0_platform_data = {
.mapbase = 0xffe00000,
.flags = UPF_BOOT_AUTOCONF,
.scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1,
.scbrr_algo_id = SCBRR_ALGO_1,
.type = PORT_SCIF,
.irqs = { 40, 40, 40, 40 },
.regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE,
};
static struct platform_device scif0_device = {
.name = "sh-sci",
.id = 0,
.dev = {
.platform_data = &scif0_platform_data,
},
};
static struct plat_sci_port scif1_platform_data = {
.mapbase = 0xffe10000,
.flags = UPF_BOOT_AUTOCONF,
.scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1,
.scbrr_algo_id = SCBRR_ALGO_1,
.type = PORT_SCIF,
.irqs = { 76, 76, 76, 76 },
.regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE,
};
static struct platform_device scif1_device = {
.name = "sh-sci",
.id = 1,
.dev = {
.platform_data = &scif1_platform_data,
},
};
static struct sh_timer_config tmu0_platform_data = {
.channel_offset = 0x04,
.timer_bit = 0,
.clockevent_rating = 200,
};
static struct resource tmu0_resources[] = {
[0] = {
.start = 0xffd80008,
.end = 0xffd80013,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = 28,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device tmu0_device = {
.name = "sh_tmu",
.id = 0,
.dev = {
.platform_data = &tmu0_platform_data,
},
.resource = tmu0_resources,
.num_resources = ARRAY_SIZE(tmu0_resources),
};
static struct sh_timer_config tmu1_platform_data = {
.channel_offset = 0x10,
.timer_bit = 1,
.clocksource_rating = 200,
};
static struct resource tmu1_resources[] = {
[0] = {
.start = 0xffd80014,
.end = 0xffd8001f,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = 29,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device tmu1_device = {
.name = "sh_tmu",
.id = 1,
.dev = {
.platform_data = &tmu1_platform_data,
},
.resource = tmu1_resources,
.num_resources = ARRAY_SIZE(tmu1_resources),
};
static struct sh_timer_config tmu2_platform_data = {
.channel_offset = 0x1c,
.timer_bit = 2,
};
static struct resource tmu2_resources[] = {
[0] = {
.start = 0xffd80020,
.end = 0xffd8002f,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = 30,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device tmu2_device = {
.name = "sh_tmu",
.id = 2,
.dev = {
.platform_data = &tmu2_platform_data,
},
.resource = tmu2_resources,
.num_resources = ARRAY_SIZE(tmu2_resources),
};
static struct sh_timer_config tmu3_platform_data = {
.channel_offset = 0x04,
.timer_bit = 0,
};
static struct resource tmu3_resources[] = {
[0] = {
.start = 0xffdc0008,
.end = 0xffdc0013,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = 96,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device tmu3_device = {
.name = "sh_tmu",
.id = 3,
.dev = {
.platform_data = &tmu3_platform_data,
},
.resource = tmu3_resources,
.num_resources = ARRAY_SIZE(tmu3_resources),
};
static struct sh_timer_config tmu4_platform_data = {
.channel_offset = 0x10,
.timer_bit = 1,
};
static struct resource tmu4_resources[] = {
[0] = {
.start = 0xffdc0014,
.end = 0xffdc001f,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = 97,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device tmu4_device = {
.name = "sh_tmu",
.id = 4,
.dev = {
.platform_data = &tmu4_platform_data,
},
.resource = tmu4_resources,
.num_resources = ARRAY_SIZE(tmu4_resources),
};
static struct sh_timer_config tmu5_platform_data = {
.channel_offset = 0x1c,
.timer_bit = 2,
};
static struct resource tmu5_resources[] = {
[0] = {
.start = 0xffdc0020,
.end = 0xffdc002b,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = 98,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device tmu5_device = {
.name = "sh_tmu",
.id = 5,
.dev = {
.platform_data = &tmu5_platform_data,
},
.resource = tmu5_resources,
.num_resources = ARRAY_SIZE(tmu5_resources),
};
static struct resource rtc_resources[] = {
[0] = {
.start = 0xffe80000,
.end = 0xffe80000 + 0x58 - 1,
.flags = IORESOURCE_IO,
},
[1] = {
/* Shared Period/Carry/Alarm IRQ */
.start = 20,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device rtc_device = {
.name = "sh-rtc",
.id = -1,
.num_resources = ARRAY_SIZE(rtc_resources),
.resource = rtc_resources,
};
/* DMA */
static const struct sh_dmae_channel sh7780_dmae0_channels[] = {
{
.offset = 0,
.dmars = 0,
.dmars_bit = 0,
}, {
.offset = 0x10,
.dmars = 0,
.dmars_bit = 8,
}, {
.offset = 0x20,
.dmars = 4,
.dmars_bit = 0,
}, {
.offset = 0x30,
.dmars = 4,
.dmars_bit = 8,
}, {
.offset = 0x50,
.dmars = 8,
.dmars_bit = 0,
}, {
.offset = 0x60,
.dmars = 8,
.dmars_bit = 8,
}
};
static const struct sh_dmae_channel sh7780_dmae1_channels[] = {
{
.offset = 0,
}, {
.offset = 0x10,
}, {
.offset = 0x20,
}, {
.offset = 0x30,
}, {
.offset = 0x50,
}, {
.offset = 0x60,
}
};
static const unsigned int ts_shift[] = TS_SHIFT;
static struct sh_dmae_pdata dma0_platform_data = {
.channel = sh7780_dmae0_channels,
.channel_num = ARRAY_SIZE(sh7780_dmae0_channels),
.ts_low_shift = CHCR_TS_LOW_SHIFT,
.ts_low_mask = CHCR_TS_LOW_MASK,
.ts_high_shift = CHCR_TS_HIGH_SHIFT,
.ts_high_mask = CHCR_TS_HIGH_MASK,
.ts_shift = ts_shift,
.ts_shift_num = ARRAY_SIZE(ts_shift),
.dmaor_init = DMAOR_INIT,
};
static struct sh_dmae_pdata dma1_platform_data = {
.channel = sh7780_dmae1_channels,
.channel_num = ARRAY_SIZE(sh7780_dmae1_channels),
.ts_low_shift = CHCR_TS_LOW_SHIFT,
.ts_low_mask = CHCR_TS_LOW_MASK,
.ts_high_shift = CHCR_TS_HIGH_SHIFT,
.ts_high_mask = CHCR_TS_HIGH_MASK,
.ts_shift = ts_shift,
.ts_shift_num = ARRAY_SIZE(ts_shift),
.dmaor_init = DMAOR_INIT,
};
static struct resource sh7780_dmae0_resources[] = {
[0] = {
/* Channel registers and DMAOR */
.start = 0xfc808020,
.end = 0xfc80808f,
.flags = IORESOURCE_MEM,
},
[1] = {
/* DMARSx */
.start = 0xfc809000,
.end = 0xfc80900b,
.flags = IORESOURCE_MEM,
},
{
/* Real DMA error IRQ is 38, and channel IRQs are 34-37, 44-45 */
.name = "error_irq",
.start = 34,
.end = 34,
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE,
},
};
static struct resource sh7780_dmae1_resources[] = {
[0] = {
/* Channel registers and DMAOR */
.start = 0xfc818020,
.end = 0xfc81808f,
.flags = IORESOURCE_MEM,
},
/* DMAC1 has no DMARS */
{
/* Real DMA error IRQ is 38, and channel IRQs are 46-47, 92-95 */
.name = "error_irq",
.start = 46,
.end = 46,
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE,
},
};
static struct platform_device dma0_device = {
.name = "sh-dma-engine",
.id = 0,
.resource = sh7780_dmae0_resources,
.num_resources = ARRAY_SIZE(sh7780_dmae0_resources),
.dev = {
.platform_data = &dma0_platform_data,
},
};
static struct platform_device dma1_device = {
.name = "sh-dma-engine",
.id = 1,
.resource = sh7780_dmae1_resources,
.num_resources = ARRAY_SIZE(sh7780_dmae1_resources),
.dev = {
.platform_data = &dma1_platform_data,
},
};
static struct platform_device *sh7780_devices[] __initdata = {
&scif0_device,
&scif1_device,
&tmu0_device,
&tmu1_device,
&tmu2_device,
&tmu3_device,
&tmu4_device,
&tmu5_device,
&rtc_device,
&dma0_device,
&dma1_device,
};
static int __init sh7780_devices_setup(void)
{
return platform_add_devices(sh7780_devices,
ARRAY_SIZE(sh7780_devices));
}
arch_initcall(sh7780_devices_setup);
static struct platform_device *sh7780_early_devices[] __initdata = {
&scif0_device,
&scif1_device,
&tmu0_device,
&tmu1_device,
&tmu2_device,
&tmu3_device,
&tmu4_device,
&tmu5_device,
};
void __init plat_early_device_setup(void)
{
if (mach_is_sh2007()) {
scif0_platform_data.scscr &= ~SCSCR_CKE1;
scif0_platform_data.scbrr_algo_id = SCBRR_ALGO_2;
scif1_platform_data.scscr &= ~SCSCR_CKE1;
scif1_platform_data.scbrr_algo_id = SCBRR_ALGO_2;
}
early_platform_add_devices(sh7780_early_devices,
ARRAY_SIZE(sh7780_early_devices));
}
enum {
UNUSED = 0,
/* interrupt sources */
IRL_LLLL, IRL_LLLH, IRL_LLHL, IRL_LLHH,
IRL_LHLL, IRL_LHLH, IRL_LHHL, IRL_LHHH,
IRL_HLLL, IRL_HLLH, IRL_HLHL, IRL_HLHH,
IRL_HHLL, IRL_HHLH, IRL_HHHL,
IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7,
RTC, WDT, TMU0, TMU1, TMU2, TMU2_TICPI,
HUDI, DMAC0, SCIF0, DMAC1, CMT, HAC,
PCISERR, PCIINTA, PCIINTB, PCIINTC, PCIINTD, PCIC5,
SCIF1, SIOF, HSPI, MMCIF, TMU3, TMU4, TMU5, SSI, FLCTL, GPIO,
/* interrupt groups */
TMU012, TMU345,
};
static struct intc_vect vectors[] __initdata = {
INTC_VECT(RTC, 0x480), INTC_VECT(RTC, 0x4a0),
INTC_VECT(RTC, 0x4c0),
INTC_VECT(WDT, 0x560),
INTC_VECT(TMU0, 0x580), INTC_VECT(TMU1, 0x5a0),
INTC_VECT(TMU2, 0x5c0), INTC_VECT(TMU2_TICPI, 0x5e0),
INTC_VECT(HUDI, 0x600),
INTC_VECT(DMAC0, 0x640), INTC_VECT(DMAC0, 0x660),
INTC_VECT(DMAC0, 0x680), INTC_VECT(DMAC0, 0x6a0),
INTC_VECT(DMAC0, 0x6c0),
INTC_VECT(SCIF0, 0x700), INTC_VECT(SCIF0, 0x720),
INTC_VECT(SCIF0, 0x740), INTC_VECT(SCIF0, 0x760),
INTC_VECT(DMAC0, 0x780), INTC_VECT(DMAC0, 0x7a0),
INTC_VECT(DMAC1, 0x7c0), INTC_VECT(DMAC1, 0x7e0),
INTC_VECT(CMT, 0x900), INTC_VECT(HAC, 0x980),
INTC_VECT(PCISERR, 0xa00), INTC_VECT(PCIINTA, 0xa20),
INTC_VECT(PCIINTB, 0xa40), INTC_VECT(PCIINTC, 0xa60),
INTC_VECT(PCIINTD, 0xa80), INTC_VECT(PCIC5, 0xaa0),
INTC_VECT(PCIC5, 0xac0), INTC_VECT(PCIC5, 0xae0),
INTC_VECT(PCIC5, 0xb00), INTC_VECT(PCIC5, 0xb20),
INTC_VECT(SCIF1, 0xb80), INTC_VECT(SCIF1, 0xba0),
INTC_VECT(SCIF1, 0xbc0), INTC_VECT(SCIF1, 0xbe0),
INTC_VECT(SIOF, 0xc00), INTC_VECT(HSPI, 0xc80),
INTC_VECT(MMCIF, 0xd00), INTC_VECT(MMCIF, 0xd20),
INTC_VECT(MMCIF, 0xd40), INTC_VECT(MMCIF, 0xd60),
INTC_VECT(DMAC1, 0xd80), INTC_VECT(DMAC1, 0xda0),
INTC_VECT(DMAC1, 0xdc0), INTC_VECT(DMAC1, 0xde0),
INTC_VECT(TMU3, 0xe00), INTC_VECT(TMU4, 0xe20),
INTC_VECT(TMU5, 0xe40),
INTC_VECT(SSI, 0xe80),
INTC_VECT(FLCTL, 0xf00), INTC_VECT(FLCTL, 0xf20),
INTC_VECT(FLCTL, 0xf40), INTC_VECT(FLCTL, 0xf60),
INTC_VECT(GPIO, 0xf80), INTC_VECT(GPIO, 0xfa0),
INTC_VECT(GPIO, 0xfc0), INTC_VECT(GPIO, 0xfe0),
};
static struct intc_group groups[] __initdata = {
INTC_GROUP(TMU012, TMU0, TMU1, TMU2, TMU2_TICPI),
INTC_GROUP(TMU345, TMU3, TMU4, TMU5),
};
static struct intc_mask_reg mask_registers[] __initdata = {
{ 0xffd40038, 0xffd4003c, 32, /* INT2MSKR / INT2MSKCR */
{ 0, 0, 0, 0, 0, 0, GPIO, FLCTL,
SSI, MMCIF, HSPI, SIOF, PCIC5, PCIINTD, PCIINTC, PCIINTB,
PCIINTA, PCISERR, HAC, CMT, 0, 0, DMAC1, DMAC0,
HUDI, 0, WDT, SCIF1, SCIF0, RTC, TMU345, TMU012 } },
};
static struct intc_prio_reg prio_registers[] __initdata = {
{ 0xffd40000, 0, 32, 8, /* INT2PRI0 */ { TMU0, TMU1,
TMU2, TMU2_TICPI } },
{ 0xffd40004, 0, 32, 8, /* INT2PRI1 */ { TMU3, TMU4, TMU5, RTC } },
{ 0xffd40008, 0, 32, 8, /* INT2PRI2 */ { SCIF0, SCIF1, WDT } },
{ 0xffd4000c, 0, 32, 8, /* INT2PRI3 */ { HUDI, DMAC0, DMAC1 } },
{ 0xffd40010, 0, 32, 8, /* INT2PRI4 */ { CMT, HAC,
PCISERR, PCIINTA, } },
{ 0xffd40014, 0, 32, 8, /* INT2PRI5 */ { PCIINTB, PCIINTC,
PCIINTD, PCIC5 } },
{ 0xffd40018, 0, 32, 8, /* INT2PRI6 */ { SIOF, HSPI, MMCIF, SSI } },
{ 0xffd4001c, 0, 32, 8, /* INT2PRI7 */ { FLCTL, GPIO } },
};
static DECLARE_INTC_DESC(intc_desc, "sh7780", vectors, groups,
mask_registers, prio_registers, NULL);
/* Support for external interrupt pins in IRQ mode */
static struct intc_vect irq_vectors[] __initdata = {
INTC_VECT(IRQ0, 0x240), INTC_VECT(IRQ1, 0x280),
INTC_VECT(IRQ2, 0x2c0), INTC_VECT(IRQ3, 0x300),
INTC_VECT(IRQ4, 0x340), INTC_VECT(IRQ5, 0x380),
INTC_VECT(IRQ6, 0x3c0), INTC_VECT(IRQ7, 0x200),
};
static struct intc_mask_reg irq_mask_registers[] __initdata = {
{ 0xffd00044, 0xffd00064, 32, /* INTMSK0 / INTMSKCLR0 */
{ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
};
static struct intc_prio_reg irq_prio_registers[] __initdata = {
{ 0xffd00010, 0, 32, 4, /* INTPRI */ { IRQ0, IRQ1, IRQ2, IRQ3,
IRQ4, IRQ5, IRQ6, IRQ7 } },
};
static struct intc_sense_reg irq_sense_registers[] __initdata = {
{ 0xffd0001c, 32, 2, /* ICR1 */ { IRQ0, IRQ1, IRQ2, IRQ3,
IRQ4, IRQ5, IRQ6, IRQ7 } },
};
static struct intc_mask_reg irq_ack_registers[] __initdata = {
{ 0xffd00024, 0, 32, /* INTREQ */
{ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
};
static DECLARE_INTC_DESC_ACK(intc_irq_desc, "sh7780-irq", irq_vectors,
NULL, irq_mask_registers, irq_prio_registers,
irq_sense_registers, irq_ack_registers);
/* External interrupt pins in IRL mode */
static struct intc_vect irl_vectors[] __initdata = {
INTC_VECT(IRL_LLLL, 0x200), INTC_VECT(IRL_LLLH, 0x220),
INTC_VECT(IRL_LLHL, 0x240), INTC_VECT(IRL_LLHH, 0x260),
INTC_VECT(IRL_LHLL, 0x280), INTC_VECT(IRL_LHLH, 0x2a0),
INTC_VECT(IRL_LHHL, 0x2c0), INTC_VECT(IRL_LHHH, 0x2e0),
INTC_VECT(IRL_HLLL, 0x300), INTC_VECT(IRL_HLLH, 0x320),
INTC_VECT(IRL_HLHL, 0x340), INTC_VECT(IRL_HLHH, 0x360),
INTC_VECT(IRL_HHLL, 0x380), INTC_VECT(IRL_HHLH, 0x3a0),
INTC_VECT(IRL_HHHL, 0x3c0),
};
static struct intc_mask_reg irl3210_mask_registers[] __initdata = {
{ 0xffd40080, 0xffd40084, 32, /* INTMSK2 / INTMSKCLR2 */
{ IRL_LLLL, IRL_LLLH, IRL_LLHL, IRL_LLHH,
IRL_LHLL, IRL_LHLH, IRL_LHHL, IRL_LHHH,
IRL_HLLL, IRL_HLLH, IRL_HLHL, IRL_HLHH,
IRL_HHLL, IRL_HHLH, IRL_HHHL, } },
};
static struct intc_mask_reg irl7654_mask_registers[] __initdata = {
{ 0xffd40080, 0xffd40084, 32, /* INTMSK2 / INTMSKCLR2 */
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
IRL_LLLL, IRL_LLLH, IRL_LLHL, IRL_LLHH,
IRL_LHLL, IRL_LHLH, IRL_LHHL, IRL_LHHH,
IRL_HLLL, IRL_HLLH, IRL_HLHL, IRL_HLHH,
IRL_HHLL, IRL_HHLH, IRL_HHHL, } },
};
static DECLARE_INTC_DESC(intc_irl7654_desc, "sh7780-irl7654", irl_vectors,
NULL, irl7654_mask_registers, NULL, NULL);
static DECLARE_INTC_DESC(intc_irl3210_desc, "sh7780-irl3210", irl_vectors,
NULL, irl3210_mask_registers, NULL, NULL);
#define INTC_ICR0 0xffd00000
#define INTC_INTMSK0 0xffd00044
#define INTC_INTMSK1 0xffd00048
#define INTC_INTMSK2 0xffd40080
#define INTC_INTMSKCLR1 0xffd00068
#define INTC_INTMSKCLR2 0xffd40084
void __init plat_irq_setup(void)
{
/* disable IRQ7-0 */
__raw_writel(0xff000000, INTC_INTMSK0);
/* disable IRL3-0 + IRL7-4 */
__raw_writel(0xc0000000, INTC_INTMSK1);
__raw_writel(0xfffefffe, INTC_INTMSK2);
/* select IRL mode for IRL3-0 + IRL7-4 */
__raw_writel(__raw_readl(INTC_ICR0) & ~0x00c00000, INTC_ICR0);
/* disable holding function, ie enable "SH-4 Mode" */
__raw_writel(__raw_readl(INTC_ICR0) | 0x00200000, INTC_ICR0);
register_intc_controller(&intc_desc);
}
void __init plat_irq_setup_pins(int mode)
{
switch (mode) {
case IRQ_MODE_IRQ:
/* select IRQ mode for IRL3-0 + IRL7-4 */
__raw_writel(__raw_readl(INTC_ICR0) | 0x00c00000, INTC_ICR0);
register_intc_controller(&intc_irq_desc);
break;
case IRQ_MODE_IRL7654:
/* enable IRL7-4 but don't provide any masking */
__raw_writel(0x40000000, INTC_INTMSKCLR1);
__raw_writel(0x0000fffe, INTC_INTMSKCLR2);
break;
case IRQ_MODE_IRL3210:
/* enable IRL0-3 but don't provide any masking */
__raw_writel(0x80000000, INTC_INTMSKCLR1);
__raw_writel(0xfffe0000, INTC_INTMSKCLR2);
break;
case IRQ_MODE_IRL7654_MASK:
/* enable IRL7-4 and mask using cpu intc controller */
__raw_writel(0x40000000, INTC_INTMSKCLR1);
register_intc_controller(&intc_irl7654_desc);
break;
case IRQ_MODE_IRL3210_MASK:
/* enable IRL0-3 and mask using cpu intc controller */
__raw_writel(0x80000000, INTC_INTMSKCLR1);
register_intc_controller(&intc_irl3210_desc);
break;
default:
BUG();
}
}
| gpl-2.0 |
davidevinavil/kernel_s500_cm10 | arch/sparc/kernel/auxio_64.c | 7575 | 3195 | /* auxio.c: Probing for the Sparc AUXIO register at boot time.
*
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
*
* Refactoring for unified NCR/PCIO support 2002 Eric Brower (ebrower@usa.net)
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/of_device.h>
#include <asm/prom.h>
#include <asm/io.h>
#include <asm/auxio.h>
void __iomem *auxio_register = NULL;
EXPORT_SYMBOL(auxio_register);
enum auxio_type {
AUXIO_TYPE_NODEV,
AUXIO_TYPE_SBUS,
AUXIO_TYPE_EBUS
};
static enum auxio_type auxio_devtype = AUXIO_TYPE_NODEV;
static DEFINE_SPINLOCK(auxio_lock);
static void __auxio_rmw(u8 bits_on, u8 bits_off, int ebus)
{
if (auxio_register) {
unsigned long flags;
u8 regval, newval;
spin_lock_irqsave(&auxio_lock, flags);
regval = (ebus ?
(u8) readl(auxio_register) :
sbus_readb(auxio_register));
newval = regval | bits_on;
newval &= ~bits_off;
if (!ebus)
newval &= ~AUXIO_AUX1_MASK;
if (ebus)
writel((u32) newval, auxio_register);
else
sbus_writeb(newval, auxio_register);
spin_unlock_irqrestore(&auxio_lock, flags);
}
}
static void __auxio_set_bit(u8 bit, int on, int ebus)
{
u8 bits_on = (ebus ? AUXIO_PCIO_LED : AUXIO_AUX1_LED);
u8 bits_off = 0;
if (!on) {
u8 tmp = bits_off;
bits_off = bits_on;
bits_on = tmp;
}
__auxio_rmw(bits_on, bits_off, ebus);
}
void auxio_set_led(int on)
{
int ebus = auxio_devtype == AUXIO_TYPE_EBUS;
u8 bit;
bit = (ebus ? AUXIO_PCIO_LED : AUXIO_AUX1_LED);
__auxio_set_bit(bit, on, ebus);
}
EXPORT_SYMBOL(auxio_set_led);
static void __auxio_sbus_set_lte(int on)
{
__auxio_set_bit(AUXIO_AUX1_LTE, on, 0);
}
void auxio_set_lte(int on)
{
switch(auxio_devtype) {
case AUXIO_TYPE_SBUS:
__auxio_sbus_set_lte(on);
break;
case AUXIO_TYPE_EBUS:
/* FALL-THROUGH */
default:
break;
}
}
EXPORT_SYMBOL(auxio_set_lte);
static const struct of_device_id auxio_match[] = {
{
.name = "auxio",
},
{},
};
MODULE_DEVICE_TABLE(of, auxio_match);
static int __devinit auxio_probe(struct platform_device *dev)
{
struct device_node *dp = dev->dev.of_node;
unsigned long size;
if (!strcmp(dp->parent->name, "ebus")) {
auxio_devtype = AUXIO_TYPE_EBUS;
size = sizeof(u32);
} else if (!strcmp(dp->parent->name, "sbus")) {
auxio_devtype = AUXIO_TYPE_SBUS;
size = 1;
} else {
printk("auxio: Unknown parent bus type [%s]\n",
dp->parent->name);
return -ENODEV;
}
auxio_register = of_ioremap(&dev->resource[0], 0, size, "auxio");
if (!auxio_register)
return -ENODEV;
printk(KERN_INFO "AUXIO: Found device at %s\n",
dp->full_name);
if (auxio_devtype == AUXIO_TYPE_EBUS)
auxio_set_led(AUXIO_LED_ON);
return 0;
}
static struct platform_driver auxio_driver = {
.probe = auxio_probe,
.driver = {
.name = "auxio",
.owner = THIS_MODULE,
.of_match_table = auxio_match,
},
};
static int __init auxio_init(void)
{
return platform_driver_register(&auxio_driver);
}
/* Must be after subsys_initcall() so that busses are probed. Must
* be before device_initcall() because things like the floppy driver
* need to use the AUXIO register.
*/
fs_initcall(auxio_init);
| gpl-2.0 |
basr/Hammerhead | net/mac80211/spectmgmt.c | 7831 | 2891 | /*
* spectrum management
*
* Copyright 2003, Jouni Malinen <jkmaline@cc.hut.fi>
* Copyright 2002-2005, Instant802 Networks, Inc.
* Copyright 2005-2006, Devicescape Software, Inc.
* Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
* Copyright 2007, Michael Wu <flamingice@sourmilk.net>
* Copyright 2007-2008, Intel Corporation
* Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/ieee80211.h>
#include <net/cfg80211.h>
#include <net/mac80211.h>
#include "ieee80211_i.h"
#include "sta_info.h"
#include "wme.h"
static void ieee80211_send_refuse_measurement_request(struct ieee80211_sub_if_data *sdata,
struct ieee80211_msrment_ie *request_ie,
const u8 *da, const u8 *bssid,
u8 dialog_token)
{
struct ieee80211_local *local = sdata->local;
struct sk_buff *skb;
struct ieee80211_mgmt *msr_report;
skb = dev_alloc_skb(sizeof(*msr_report) + local->hw.extra_tx_headroom +
sizeof(struct ieee80211_msrment_ie));
if (!skb)
return;
skb_reserve(skb, local->hw.extra_tx_headroom);
msr_report = (struct ieee80211_mgmt *)skb_put(skb, 24);
memset(msr_report, 0, 24);
memcpy(msr_report->da, da, ETH_ALEN);
memcpy(msr_report->sa, sdata->vif.addr, ETH_ALEN);
memcpy(msr_report->bssid, bssid, ETH_ALEN);
msr_report->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
IEEE80211_STYPE_ACTION);
skb_put(skb, 1 + sizeof(msr_report->u.action.u.measurement));
msr_report->u.action.category = WLAN_CATEGORY_SPECTRUM_MGMT;
msr_report->u.action.u.measurement.action_code =
WLAN_ACTION_SPCT_MSR_RPRT;
msr_report->u.action.u.measurement.dialog_token = dialog_token;
msr_report->u.action.u.measurement.element_id = WLAN_EID_MEASURE_REPORT;
msr_report->u.action.u.measurement.length =
sizeof(struct ieee80211_msrment_ie);
memset(&msr_report->u.action.u.measurement.msr_elem, 0,
sizeof(struct ieee80211_msrment_ie));
msr_report->u.action.u.measurement.msr_elem.token = request_ie->token;
msr_report->u.action.u.measurement.msr_elem.mode |=
IEEE80211_SPCT_MSR_RPRT_MODE_REFUSED;
msr_report->u.action.u.measurement.msr_elem.type = request_ie->type;
ieee80211_tx_skb(sdata, skb);
}
void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata,
struct ieee80211_mgmt *mgmt,
size_t len)
{
/*
* Ignoring measurement request is spec violation.
* Mandatory measurements must be reported optional
* measurements might be refused or reported incapable
* For now just refuse
* TODO: Answer basic measurement as unmeasured
*/
ieee80211_send_refuse_measurement_request(sdata,
&mgmt->u.action.u.measurement.msr_elem,
mgmt->sa, mgmt->bssid,
mgmt->u.action.u.measurement.dialog_token);
}
| gpl-2.0 |
Tesla-Redux-Devices/android_kernel_samsung_trlte | drivers/isdn/mISDN/dsp_tones.c | 9623 | 17374 | /*
* Audio support data for ISDN4Linux.
*
* Copyright Andreas Eversberg (jolly@eversberg.eu)
*
* This software may be used and distributed according to the terms
* of the GNU General Public License, incorporated herein by reference.
*
*/
#include <linux/gfp.h>
#include <linux/mISDNif.h>
#include <linux/mISDNdsp.h>
#include "core.h"
#include "dsp.h"
#define DATA_S sample_silence
#define SIZE_S (&sizeof_silence)
#define DATA_GA sample_german_all
#define SIZE_GA (&sizeof_german_all)
#define DATA_GO sample_german_old
#define SIZE_GO (&sizeof_german_old)
#define DATA_DT sample_american_dialtone
#define SIZE_DT (&sizeof_american_dialtone)
#define DATA_RI sample_american_ringing
#define SIZE_RI (&sizeof_american_ringing)
#define DATA_BU sample_american_busy
#define SIZE_BU (&sizeof_american_busy)
#define DATA_S1 sample_special1
#define SIZE_S1 (&sizeof_special1)
#define DATA_S2 sample_special2
#define SIZE_S2 (&sizeof_special2)
#define DATA_S3 sample_special3
#define SIZE_S3 (&sizeof_special3)
/***************/
/* tones loops */
/***************/
/* all tones are alaw encoded */
/* the last sample+1 is in phase with the first sample. the error is low */
static u8 sample_german_all[] = {
0x80, 0xab, 0x81, 0x6d, 0xfd, 0xdd, 0x5d, 0x9d,
0x4d, 0xd1, 0x89, 0x88, 0xd0, 0x4c, 0x9c, 0x5c,
0xdc, 0xfc, 0x6c,
0x80, 0xab, 0x81, 0x6d, 0xfd, 0xdd, 0x5d, 0x9d,
0x4d, 0xd1, 0x89, 0x88, 0xd0, 0x4c, 0x9c, 0x5c,
0xdc, 0xfc, 0x6c,
0x80, 0xab, 0x81, 0x6d, 0xfd, 0xdd, 0x5d, 0x9d,
0x4d, 0xd1, 0x89, 0x88, 0xd0, 0x4c, 0x9c, 0x5c,
0xdc, 0xfc, 0x6c,
0x80, 0xab, 0x81, 0x6d, 0xfd, 0xdd, 0x5d, 0x9d,
0x4d, 0xd1, 0x89, 0x88, 0xd0, 0x4c, 0x9c, 0x5c,
0xdc, 0xfc, 0x6c,
};
static u32 sizeof_german_all = sizeof(sample_german_all);
static u8 sample_german_old[] = {
0xec, 0x68, 0xe1, 0x6d, 0x6d, 0x91, 0x51, 0xed,
0x6d, 0x01, 0x1e, 0x10, 0x0c, 0x90, 0x60, 0x70,
0x8c,
0xec, 0x68, 0xe1, 0x6d, 0x6d, 0x91, 0x51, 0xed,
0x6d, 0x01, 0x1e, 0x10, 0x0c, 0x90, 0x60, 0x70,
0x8c,
0xec, 0x68, 0xe1, 0x6d, 0x6d, 0x91, 0x51, 0xed,
0x6d, 0x01, 0x1e, 0x10, 0x0c, 0x90, 0x60, 0x70,
0x8c,
0xec, 0x68, 0xe1, 0x6d, 0x6d, 0x91, 0x51, 0xed,
0x6d, 0x01, 0x1e, 0x10, 0x0c, 0x90, 0x60, 0x70,
0x8c,
};
static u32 sizeof_german_old = sizeof(sample_german_old);
static u8 sample_american_dialtone[] = {
0x2a, 0x18, 0x90, 0x6c, 0x4c, 0xbc, 0x4c, 0x6c,
0x10, 0x58, 0x32, 0xb9, 0x31, 0x2d, 0x8d, 0x0d,
0x8d, 0x2d, 0x31, 0x99, 0x0f, 0x28, 0x60, 0xf0,
0xd0, 0x50, 0xd0, 0x30, 0x60, 0x08, 0x8e, 0x67,
0x09, 0x19, 0x21, 0xe1, 0xd9, 0xb9, 0x29, 0x67,
0x83, 0x02, 0xce, 0xbe, 0xee, 0x1a, 0x1b, 0xef,
0xbf, 0xcf, 0x03, 0x82, 0x66, 0x28, 0xb8, 0xd8,
0xe0, 0x20, 0x18, 0x08, 0x66, 0x8f, 0x09, 0x61,
0x31, 0xd1, 0x51, 0xd1, 0xf1, 0x61, 0x29, 0x0e,
0x98, 0x30, 0x2c, 0x8c, 0x0c, 0x8c, 0x2c, 0x30,
0xb8, 0x33, 0x59, 0x11, 0x6d, 0x4d, 0xbd, 0x4d,
0x6d, 0x91, 0x19,
};
static u32 sizeof_american_dialtone = sizeof(sample_american_dialtone);
static u8 sample_american_ringing[] = {
0x2a, 0xe0, 0xac, 0x0c, 0xbc, 0x4c, 0x8c, 0x90,
0x48, 0xc7, 0xc1, 0xed, 0xcd, 0x4d, 0xcd, 0xed,
0xc1, 0xb7, 0x08, 0x30, 0xec, 0xcc, 0xcc, 0x8c,
0x10, 0x58, 0x1a, 0x99, 0x71, 0xed, 0x8d, 0x8d,
0x2d, 0x41, 0x89, 0x9e, 0x20, 0x70, 0x2c, 0xec,
0x2c, 0x70, 0x20, 0x86, 0x77, 0xe1, 0x31, 0x11,
0xd1, 0xf1, 0x81, 0x09, 0xa3, 0x56, 0x58, 0x00,
0x40, 0xc0, 0x60, 0x38, 0x46, 0x43, 0x57, 0x39,
0xd9, 0x59, 0x99, 0xc9, 0x77, 0x2f, 0x2e, 0xc6,
0xd6, 0x28, 0xd6, 0x36, 0x26, 0x2e, 0x8a, 0xa3,
0x43, 0x63, 0x4b, 0x4a, 0x62, 0x42, 0xa2, 0x8b,
0x2f, 0x27, 0x37, 0xd7, 0x29, 0xd7, 0xc7, 0x2f,
0x2e, 0x76, 0xc8, 0x98, 0x58, 0xd8, 0x38, 0x56,
0x42, 0x47, 0x39, 0x61, 0xc1, 0x41, 0x01, 0x59,
0x57, 0xa2, 0x08, 0x80, 0xf0, 0xd0, 0x10, 0x30,
0xe0, 0x76, 0x87, 0x21, 0x71, 0x2d, 0xed, 0x2d,
0x71, 0x21, 0x9f, 0x88, 0x40, 0x2c, 0x8c, 0x8c,
0xec, 0x70, 0x98, 0x1b, 0x59, 0x11, 0x8d, 0xcd,
0xcd, 0xed, 0x31, 0x09, 0xb6, 0xc0, 0xec, 0xcc,
0x4c, 0xcc, 0xec, 0xc0, 0xc6, 0x49, 0x91, 0x8d,
0x4d, 0xbd, 0x0d, 0xad, 0xe1,
};
static u32 sizeof_american_ringing = sizeof(sample_american_ringing);
static u8 sample_american_busy[] = {
0x2a, 0x00, 0x6c, 0x4c, 0x4c, 0x6c, 0xb0, 0x66,
0x99, 0x11, 0x6d, 0x8d, 0x2d, 0x41, 0xd7, 0x96,
0x60, 0xf0, 0x70, 0x40, 0x58, 0xf6, 0x53, 0x57,
0x09, 0x89, 0xd7, 0x5f, 0xe3, 0x2a, 0xe3, 0x5f,
0xd7, 0x89, 0x09, 0x57, 0x53, 0xf6, 0x58, 0x40,
0x70, 0xf0, 0x60, 0x96, 0xd7, 0x41, 0x2d, 0x8d,
0x6d, 0x11, 0x99, 0x66, 0xb0, 0x6c, 0x4c, 0x4c,
0x6c, 0x00, 0x2a, 0x01, 0x6d, 0x4d, 0x4d, 0x6d,
0xb1, 0x67, 0x98, 0x10, 0x6c, 0x8c, 0x2c, 0x40,
0xd6, 0x97, 0x61, 0xf1, 0x71, 0x41, 0x59, 0xf7,
0x52, 0x56, 0x08, 0x88, 0xd6, 0x5e, 0xe2, 0x2a,
0xe2, 0x5e, 0xd6, 0x88, 0x08, 0x56, 0x52, 0xf7,
0x59, 0x41, 0x71, 0xf1, 0x61, 0x97, 0xd6, 0x40,
0x2c, 0x8c, 0x6c, 0x10, 0x98, 0x67, 0xb1, 0x6d,
0x4d, 0x4d, 0x6d, 0x01,
};
static u32 sizeof_american_busy = sizeof(sample_american_busy);
static u8 sample_special1[] = {
0x2a, 0x2c, 0xbc, 0x6c, 0xd6, 0x71, 0xbd, 0x0d,
0xd9, 0x80, 0xcc, 0x4c, 0x40, 0x39, 0x0d, 0xbd,
0x11, 0x86, 0xec, 0xbc, 0xec, 0x0e, 0x51, 0xbd,
0x8d, 0x89, 0x30, 0x4c, 0xcc, 0xe0, 0xe1, 0xcd,
0x4d, 0x31, 0x88, 0x8c, 0xbc, 0x50, 0x0f, 0xed,
0xbd, 0xed, 0x87, 0x10, 0xbc, 0x0c, 0x38, 0x41,
0x4d, 0xcd, 0x81, 0xd8, 0x0c, 0xbc, 0x70, 0xd7,
0x6d, 0xbd, 0x2d,
};
static u32 sizeof_special1 = sizeof(sample_special1);
static u8 sample_special2[] = {
0x2a, 0xcc, 0x8c, 0xd7, 0x4d, 0x2d, 0x18, 0xbc,
0x10, 0xc1, 0xbd, 0xc1, 0x10, 0xbc, 0x18, 0x2d,
0x4d, 0xd7, 0x8c, 0xcc, 0x2a, 0xcd, 0x8d, 0xd6,
0x4c, 0x2c, 0x19, 0xbd, 0x11, 0xc0, 0xbc, 0xc0,
0x11, 0xbd, 0x19, 0x2c, 0x4c, 0xd6, 0x8d, 0xcd,
0x2a, 0xcc, 0x8c, 0xd7, 0x4d, 0x2d, 0x18, 0xbc,
0x10, 0xc1, 0xbd, 0xc1, 0x10, 0xbc, 0x18, 0x2d,
0x4d, 0xd7, 0x8c, 0xcc, 0x2a, 0xcd, 0x8d, 0xd6,
0x4c, 0x2c, 0x19, 0xbd, 0x11, 0xc0, 0xbc, 0xc0,
0x11, 0xbd, 0x19, 0x2c, 0x4c, 0xd6, 0x8d, 0xcd,
};
static u32 sizeof_special2 = sizeof(sample_special2);
static u8 sample_special3[] = {
0x2a, 0xbc, 0x18, 0xcd, 0x11, 0x2c, 0x8c, 0xc1,
0x4d, 0xd6, 0xbc, 0xd6, 0x4d, 0xc1, 0x8c, 0x2c,
0x11, 0xcd, 0x18, 0xbc, 0x2a, 0xbd, 0x19, 0xcc,
0x10, 0x2d, 0x8d, 0xc0, 0x4c, 0xd7, 0xbd, 0xd7,
0x4c, 0xc0, 0x8d, 0x2d, 0x10, 0xcc, 0x19, 0xbd,
0x2a, 0xbc, 0x18, 0xcd, 0x11, 0x2c, 0x8c, 0xc1,
0x4d, 0xd6, 0xbc, 0xd6, 0x4d, 0xc1, 0x8c, 0x2c,
0x11, 0xcd, 0x18, 0xbc, 0x2a, 0xbd, 0x19, 0xcc,
0x10, 0x2d, 0x8d, 0xc0, 0x4c, 0xd7, 0xbd, 0xd7,
0x4c, 0xc0, 0x8d, 0x2d, 0x10, 0xcc, 0x19, 0xbd,
};
static u32 sizeof_special3 = sizeof(sample_special3);
static u8 sample_silence[] = {
0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a,
0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a,
0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a,
0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a,
0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a,
0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a,
0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a,
0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a,
0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a,
0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a,
0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a,
0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a,
};
static u32 sizeof_silence = sizeof(sample_silence);
struct tones_samples {
u32 *len;
u8 *data;
};
static struct
tones_samples samples[] = {
{&sizeof_german_all, sample_german_all},
{&sizeof_german_old, sample_german_old},
{&sizeof_american_dialtone, sample_american_dialtone},
{&sizeof_american_ringing, sample_american_ringing},
{&sizeof_american_busy, sample_american_busy},
{&sizeof_special1, sample_special1},
{&sizeof_special2, sample_special2},
{&sizeof_special3, sample_special3},
{NULL, NULL},
};
/***********************************
* generate ulaw from alaw samples *
***********************************/
void
dsp_audio_generate_ulaw_samples(void)
{
int i, j;
i = 0;
while (samples[i].len) {
j = 0;
while (j < (*samples[i].len)) {
samples[i].data[j] =
dsp_audio_alaw_to_ulaw[samples[i].data[j]];
j++;
}
i++;
}
}
/****************************
* tone sequence definition *
****************************/
static struct pattern {
int tone;
u8 *data[10];
u32 *siz[10];
u32 seq[10];
} pattern[] = {
{TONE_GERMAN_DIALTONE,
{DATA_GA, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL},
{SIZE_GA, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL},
{1900, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
{TONE_GERMAN_OLDDIALTONE,
{DATA_GO, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL},
{SIZE_GO, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL},
{1998, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
{TONE_AMERICAN_DIALTONE,
{DATA_DT, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL},
{SIZE_DT, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL},
{8000, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
{TONE_GERMAN_DIALPBX,
{DATA_GA, DATA_S, DATA_GA, DATA_S, DATA_GA, DATA_S, NULL, NULL, NULL,
NULL},
{SIZE_GA, SIZE_S, SIZE_GA, SIZE_S, SIZE_GA, SIZE_S, NULL, NULL, NULL,
NULL},
{2000, 2000, 2000, 2000, 2000, 12000, 0, 0, 0, 0} },
{TONE_GERMAN_OLDDIALPBX,
{DATA_GO, DATA_S, DATA_GO, DATA_S, DATA_GO, DATA_S, NULL, NULL, NULL,
NULL},
{SIZE_GO, SIZE_S, SIZE_GO, SIZE_S, SIZE_GO, SIZE_S, NULL, NULL, NULL,
NULL},
{2000, 2000, 2000, 2000, 2000, 12000, 0, 0, 0, 0} },
{TONE_AMERICAN_DIALPBX,
{DATA_DT, DATA_S, DATA_DT, DATA_S, DATA_DT, DATA_S, NULL, NULL, NULL,
NULL},
{SIZE_DT, SIZE_S, SIZE_DT, SIZE_S, SIZE_DT, SIZE_S, NULL, NULL, NULL,
NULL},
{2000, 2000, 2000, 2000, 2000, 12000, 0, 0, 0, 0} },
{TONE_GERMAN_RINGING,
{DATA_GA, DATA_S, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL},
{SIZE_GA, SIZE_S, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL},
{8000, 32000, 0, 0, 0, 0, 0, 0, 0, 0} },
{TONE_GERMAN_OLDRINGING,
{DATA_GO, DATA_S, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL},
{SIZE_GO, SIZE_S, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL},
{8000, 40000, 0, 0, 0, 0, 0, 0, 0, 0} },
{TONE_AMERICAN_RINGING,
{DATA_RI, DATA_S, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL},
{SIZE_RI, SIZE_S, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL},
{8000, 32000, 0, 0, 0, 0, 0, 0, 0, 0} },
{TONE_GERMAN_RINGPBX,
{DATA_GA, DATA_S, DATA_GA, DATA_S, NULL, NULL, NULL, NULL, NULL, NULL},
{SIZE_GA, SIZE_S, SIZE_GA, SIZE_S, NULL, NULL, NULL, NULL, NULL, NULL},
{4000, 4000, 4000, 28000, 0, 0, 0, 0, 0, 0} },
{TONE_GERMAN_OLDRINGPBX,
{DATA_GO, DATA_S, DATA_GO, DATA_S, NULL, NULL, NULL, NULL, NULL, NULL},
{SIZE_GO, SIZE_S, SIZE_GO, SIZE_S, NULL, NULL, NULL, NULL, NULL, NULL},
{4000, 4000, 4000, 28000, 0, 0, 0, 0, 0, 0} },
{TONE_AMERICAN_RINGPBX,
{DATA_RI, DATA_S, DATA_RI, DATA_S, NULL, NULL, NULL, NULL, NULL, NULL},
{SIZE_RI, SIZE_S, SIZE_RI, SIZE_S, NULL, NULL, NULL, NULL, NULL, NULL},
{4000, 4000, 4000, 28000, 0, 0, 0, 0, 0, 0} },
{TONE_GERMAN_BUSY,
{DATA_GA, DATA_S, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL},
{SIZE_GA, SIZE_S, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL},
{4000, 4000, 0, 0, 0, 0, 0, 0, 0, 0} },
{TONE_GERMAN_OLDBUSY,
{DATA_GO, DATA_S, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL},
{SIZE_GO, SIZE_S, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL},
{1000, 5000, 0, 0, 0, 0, 0, 0, 0, 0} },
{TONE_AMERICAN_BUSY,
{DATA_BU, DATA_S, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL},
{SIZE_BU, SIZE_S, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL},
{4000, 4000, 0, 0, 0, 0, 0, 0, 0, 0} },
{TONE_GERMAN_HANGUP,
{DATA_GA, DATA_S, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL},
{SIZE_GA, SIZE_S, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL},
{4000, 4000, 0, 0, 0, 0, 0, 0, 0, 0} },
{TONE_GERMAN_OLDHANGUP,
{DATA_GO, DATA_S, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL},
{SIZE_GO, SIZE_S, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL},
{1000, 5000, 0, 0, 0, 0, 0, 0, 0, 0} },
{TONE_AMERICAN_HANGUP,
{DATA_DT, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL},
{SIZE_DT, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL},
{8000, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
{TONE_SPECIAL_INFO,
{DATA_S1, DATA_S2, DATA_S3, DATA_S, NULL, NULL, NULL, NULL, NULL, NULL},
{SIZE_S1, SIZE_S2, SIZE_S3, SIZE_S, NULL, NULL, NULL, NULL, NULL, NULL},
{2666, 2666, 2666, 8002, 0, 0, 0, 0, 0, 0} },
{TONE_GERMAN_GASSENBESETZT,
{DATA_GA, DATA_S, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL},
{SIZE_GA, SIZE_S, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL},
{2000, 2000, 0, 0, 0, 0, 0, 0, 0, 0} },
{TONE_GERMAN_AUFSCHALTTON,
{DATA_GO, DATA_S, DATA_GO, DATA_S, NULL, NULL, NULL, NULL, NULL, NULL},
{SIZE_GO, SIZE_S, SIZE_GO, SIZE_S, NULL, NULL, NULL, NULL, NULL, NULL},
{1000, 5000, 1000, 17000, 0, 0, 0, 0, 0, 0} },
{0,
{NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL},
{NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL},
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
};
/******************
* copy tone data *
******************/
/* an sk_buff is generated from the number of samples needed.
* the count will be changed and may begin from 0 each pattern period.
* the clue is to precalculate the pointers and legths to use only one
* memcpy per function call, or two memcpy if the tone sequence changes.
*
* pattern - the type of the pattern
* count - the sample from the beginning of the pattern (phase)
* len - the number of bytes
*
* return - the sk_buff with the sample
*
* if tones has finished (e.g. knocking tone), dsp->tones is turned off
*/
void dsp_tone_copy(struct dsp *dsp, u8 *data, int len)
{
int index, count, start, num;
struct pattern *pat;
struct dsp_tone *tone = &dsp->tone;
/* if we have no tone, we copy silence */
if (!tone->tone) {
memset(data, dsp_silence, len);
return;
}
/* process pattern */
pat = (struct pattern *)tone->pattern;
/* points to the current pattern */
index = tone->index; /* gives current sequence index */
count = tone->count; /* gives current sample */
/* copy sample */
while (len) {
/* find sample to start with */
while (42) {
/* wrap around */
if (!pat->seq[index]) {
count = 0;
index = 0;
}
/* check if we are currently playing this tone */
if (count < pat->seq[index])
break;
if (dsp_debug & DEBUG_DSP_TONE)
printk(KERN_DEBUG "%s: reaching next sequence "
"(index=%d)\n", __func__, index);
count -= pat->seq[index];
index++;
}
/* calculate start and number of samples */
start = count % (*(pat->siz[index]));
num = len;
if (num + count > pat->seq[index])
num = pat->seq[index] - count;
if (num + start > (*(pat->siz[index])))
num = (*(pat->siz[index])) - start;
/* copy memory */
memcpy(data, pat->data[index] + start, num);
/* reduce length */
data += num;
count += num;
len -= num;
}
tone->index = index;
tone->count = count;
/* return sk_buff */
return;
}
/*******************************
* send HW message to hfc card *
*******************************/
static void
dsp_tone_hw_message(struct dsp *dsp, u8 *sample, int len)
{
struct sk_buff *nskb;
/* unlocking is not required, because we don't expect a response */
nskb = _alloc_mISDN_skb(PH_CONTROL_REQ,
(len) ? HFC_SPL_LOOP_ON : HFC_SPL_LOOP_OFF, len, sample,
GFP_ATOMIC);
if (nskb) {
if (dsp->ch.peer) {
if (dsp->ch.recv(dsp->ch.peer, nskb))
dev_kfree_skb(nskb);
} else
dev_kfree_skb(nskb);
}
}
/*****************
* timer expires *
*****************/
void
dsp_tone_timeout(void *arg)
{
struct dsp *dsp = arg;
struct dsp_tone *tone = &dsp->tone;
struct pattern *pat = (struct pattern *)tone->pattern;
int index = tone->index;
if (!tone->tone)
return;
index++;
if (!pat->seq[index])
index = 0;
tone->index = index;
/* set next tone */
if (pat->data[index] == DATA_S)
dsp_tone_hw_message(dsp, NULL, 0);
else
dsp_tone_hw_message(dsp, pat->data[index], *(pat->siz[index]));
/* set timer */
init_timer(&tone->tl);
tone->tl.expires = jiffies + (pat->seq[index] * HZ) / 8000;
add_timer(&tone->tl);
}
/********************
* set/release tone *
********************/
/*
* tones are relaized by streaming or by special loop commands if supported
* by hardware. when hardware is used, the patterns will be controlled by
* timers.
*/
int
dsp_tone(struct dsp *dsp, int tone)
{
struct pattern *pat;
int i;
struct dsp_tone *tonet = &dsp->tone;
tonet->software = 0;
tonet->hardware = 0;
/* we turn off the tone */
if (!tone) {
if (dsp->features.hfc_loops && timer_pending(&tonet->tl))
del_timer(&tonet->tl);
if (dsp->features.hfc_loops)
dsp_tone_hw_message(dsp, NULL, 0);
tonet->tone = 0;
return 0;
}
pat = NULL;
i = 0;
while (pattern[i].tone) {
if (pattern[i].tone == tone) {
pat = &pattern[i];
break;
}
i++;
}
if (!pat) {
printk(KERN_WARNING "dsp: given tone 0x%x is invalid\n", tone);
return -EINVAL;
}
if (dsp_debug & DEBUG_DSP_TONE)
printk(KERN_DEBUG "%s: now starting tone %d (index=%d)\n",
__func__, tone, 0);
tonet->tone = tone;
tonet->pattern = pat;
tonet->index = 0;
tonet->count = 0;
if (dsp->features.hfc_loops) {
tonet->hardware = 1;
/* set first tone */
dsp_tone_hw_message(dsp, pat->data[0], *(pat->siz[0]));
/* set timer */
if (timer_pending(&tonet->tl))
del_timer(&tonet->tl);
init_timer(&tonet->tl);
tonet->tl.expires = jiffies + (pat->seq[0] * HZ) / 8000;
add_timer(&tonet->tl);
} else {
tonet->software = 1;
}
return 0;
}
| gpl-2.0 |
jpoirier/linux | drivers/regulator/max8925-regulator.c | 152 | 9488 | /*
* Regulators driver for Maxim max8925
*
* Copyright (C) 2009 Marvell International Ltd.
* Haojian Zhuang <haojian.zhuang@marvell.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/err.h>
#include <linux/i2c.h>
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
#include <linux/mfd/max8925.h>
#include <linux/of.h>
#include <linux/regulator/of_regulator.h>
#define SD1_DVM_VMIN 850000
#define SD1_DVM_VMAX 1000000
#define SD1_DVM_STEP 50000
#define SD1_DVM_SHIFT 5 /* SDCTL1 bit5 */
#define SD1_DVM_EN 6 /* SDV1 bit 6 */
/* bit definitions in LDO control registers */
#define LDO_SEQ_I2C 0x7 /* Power U/D by i2c */
#define LDO_SEQ_MASK 0x7 /* Power U/D sequence mask */
#define LDO_SEQ_SHIFT 2 /* Power U/D sequence offset */
#define LDO_I2C_EN 0x1 /* Enable by i2c */
#define LDO_I2C_EN_MASK 0x1 /* Enable mask by i2c */
#define LDO_I2C_EN_SHIFT 0 /* Enable offset by i2c */
struct max8925_regulator_info {
struct regulator_desc desc;
struct i2c_client *i2c;
int vol_reg;
int enable_reg;
};
static int max8925_set_voltage_sel(struct regulator_dev *rdev,
unsigned int selector)
{
struct max8925_regulator_info *info = rdev_get_drvdata(rdev);
unsigned char mask = rdev->desc->n_voltages - 1;
return max8925_set_bits(info->i2c, info->vol_reg, mask, selector);
}
static int max8925_get_voltage_sel(struct regulator_dev *rdev)
{
struct max8925_regulator_info *info = rdev_get_drvdata(rdev);
unsigned char data, mask;
int ret;
ret = max8925_reg_read(info->i2c, info->vol_reg);
if (ret < 0)
return ret;
mask = rdev->desc->n_voltages - 1;
data = ret & mask;
return data;
}
static int max8925_enable(struct regulator_dev *rdev)
{
struct max8925_regulator_info *info = rdev_get_drvdata(rdev);
return max8925_set_bits(info->i2c, info->enable_reg,
LDO_SEQ_MASK << LDO_SEQ_SHIFT |
LDO_I2C_EN_MASK << LDO_I2C_EN_SHIFT,
LDO_SEQ_I2C << LDO_SEQ_SHIFT |
LDO_I2C_EN << LDO_I2C_EN_SHIFT);
}
static int max8925_disable(struct regulator_dev *rdev)
{
struct max8925_regulator_info *info = rdev_get_drvdata(rdev);
return max8925_set_bits(info->i2c, info->enable_reg,
LDO_SEQ_MASK << LDO_SEQ_SHIFT |
LDO_I2C_EN_MASK << LDO_I2C_EN_SHIFT,
LDO_SEQ_I2C << LDO_SEQ_SHIFT);
}
static int max8925_is_enabled(struct regulator_dev *rdev)
{
struct max8925_regulator_info *info = rdev_get_drvdata(rdev);
int ldo_seq, ret;
ret = max8925_reg_read(info->i2c, info->enable_reg);
if (ret < 0)
return ret;
ldo_seq = (ret >> LDO_SEQ_SHIFT) & LDO_SEQ_MASK;
if (ldo_seq != LDO_SEQ_I2C)
return 1;
else
return ret & (LDO_I2C_EN_MASK << LDO_I2C_EN_SHIFT);
}
static int max8925_set_dvm_voltage(struct regulator_dev *rdev, int uV)
{
struct max8925_regulator_info *info = rdev_get_drvdata(rdev);
unsigned char data, mask;
if (uV < SD1_DVM_VMIN || uV > SD1_DVM_VMAX)
return -EINVAL;
data = DIV_ROUND_UP(uV - SD1_DVM_VMIN, SD1_DVM_STEP);
data <<= SD1_DVM_SHIFT;
mask = 3 << SD1_DVM_SHIFT;
return max8925_set_bits(info->i2c, info->enable_reg, mask, data);
}
static int max8925_set_dvm_enable(struct regulator_dev *rdev)
{
struct max8925_regulator_info *info = rdev_get_drvdata(rdev);
return max8925_set_bits(info->i2c, info->vol_reg, 1 << SD1_DVM_EN,
1 << SD1_DVM_EN);
}
static int max8925_set_dvm_disable(struct regulator_dev *rdev)
{
struct max8925_regulator_info *info = rdev_get_drvdata(rdev);
return max8925_set_bits(info->i2c, info->vol_reg, 1 << SD1_DVM_EN, 0);
}
static struct regulator_ops max8925_regulator_sdv_ops = {
.map_voltage = regulator_map_voltage_linear,
.list_voltage = regulator_list_voltage_linear,
.set_voltage_sel = max8925_set_voltage_sel,
.get_voltage_sel = max8925_get_voltage_sel,
.enable = max8925_enable,
.disable = max8925_disable,
.is_enabled = max8925_is_enabled,
.set_suspend_voltage = max8925_set_dvm_voltage,
.set_suspend_enable = max8925_set_dvm_enable,
.set_suspend_disable = max8925_set_dvm_disable,
};
static struct regulator_ops max8925_regulator_ldo_ops = {
.map_voltage = regulator_map_voltage_linear,
.list_voltage = regulator_list_voltage_linear,
.set_voltage_sel = max8925_set_voltage_sel,
.get_voltage_sel = max8925_get_voltage_sel,
.enable = max8925_enable,
.disable = max8925_disable,
.is_enabled = max8925_is_enabled,
};
#define MAX8925_SDV(_id, min, max, step) \
{ \
.desc = { \
.name = "SDV" #_id, \
.ops = &max8925_regulator_sdv_ops, \
.type = REGULATOR_VOLTAGE, \
.id = MAX8925_ID_SD##_id, \
.owner = THIS_MODULE, \
.n_voltages = 64, \
.min_uV = min * 1000, \
.uV_step = step * 1000, \
}, \
.vol_reg = MAX8925_SDV##_id, \
.enable_reg = MAX8925_SDCTL##_id, \
}
#define MAX8925_LDO(_id, min, max, step) \
{ \
.desc = { \
.name = "LDO" #_id, \
.ops = &max8925_regulator_ldo_ops, \
.type = REGULATOR_VOLTAGE, \
.id = MAX8925_ID_LDO##_id, \
.owner = THIS_MODULE, \
.n_voltages = 64, \
.min_uV = min * 1000, \
.uV_step = step * 1000, \
}, \
.vol_reg = MAX8925_LDOVOUT##_id, \
.enable_reg = MAX8925_LDOCTL##_id, \
}
#ifdef CONFIG_OF
static struct of_regulator_match max8925_regulator_matches[] = {
{ .name = "SDV1",},
{ .name = "SDV2",},
{ .name = "SDV3",},
{ .name = "LDO1",},
{ .name = "LDO2",},
{ .name = "LDO3",},
{ .name = "LDO4",},
{ .name = "LDO5",},
{ .name = "LDO6",},
{ .name = "LDO7",},
{ .name = "LDO8",},
{ .name = "LDO9",},
{ .name = "LDO10",},
{ .name = "LDO11",},
{ .name = "LDO12",},
{ .name = "LDO13",},
{ .name = "LDO14",},
{ .name = "LDO15",},
{ .name = "LDO16",},
{ .name = "LDO17",},
{ .name = "LDO18",},
{ .name = "LDO19",},
{ .name = "LDO20",},
};
#endif
static struct max8925_regulator_info max8925_regulator_info[] = {
MAX8925_SDV(1, 637.5, 1425, 12.5),
MAX8925_SDV(2, 650, 2225, 25),
MAX8925_SDV(3, 750, 3900, 50),
MAX8925_LDO(1, 750, 3900, 50),
MAX8925_LDO(2, 650, 2250, 25),
MAX8925_LDO(3, 650, 2250, 25),
MAX8925_LDO(4, 750, 3900, 50),
MAX8925_LDO(5, 750, 3900, 50),
MAX8925_LDO(6, 750, 3900, 50),
MAX8925_LDO(7, 750, 3900, 50),
MAX8925_LDO(8, 750, 3900, 50),
MAX8925_LDO(9, 750, 3900, 50),
MAX8925_LDO(10, 750, 3900, 50),
MAX8925_LDO(11, 750, 3900, 50),
MAX8925_LDO(12, 750, 3900, 50),
MAX8925_LDO(13, 750, 3900, 50),
MAX8925_LDO(14, 750, 3900, 50),
MAX8925_LDO(15, 750, 3900, 50),
MAX8925_LDO(16, 750, 3900, 50),
MAX8925_LDO(17, 650, 2250, 25),
MAX8925_LDO(18, 650, 2250, 25),
MAX8925_LDO(19, 750, 3900, 50),
MAX8925_LDO(20, 750, 3900, 50),
};
#ifdef CONFIG_OF
static int max8925_regulator_dt_init(struct platform_device *pdev,
struct regulator_config *config,
int ridx)
{
struct device_node *nproot, *np;
int rcount;
nproot = of_node_get(pdev->dev.parent->of_node);
if (!nproot)
return -ENODEV;
np = of_get_child_by_name(nproot, "regulators");
if (!np) {
dev_err(&pdev->dev, "failed to find regulators node\n");
return -ENODEV;
}
rcount = of_regulator_match(&pdev->dev, np,
&max8925_regulator_matches[ridx], 1);
of_node_put(np);
if (rcount < 0)
return rcount;
config->init_data = max8925_regulator_matches[ridx].init_data;
config->of_node = max8925_regulator_matches[ridx].of_node;
return 0;
}
#else
#define max8925_regulator_dt_init(x, y, z) (-1)
#endif
static int max8925_regulator_probe(struct platform_device *pdev)
{
struct max8925_chip *chip = dev_get_drvdata(pdev->dev.parent);
struct regulator_init_data *pdata = dev_get_platdata(&pdev->dev);
struct regulator_config config = { };
struct max8925_regulator_info *ri;
struct resource *res;
struct regulator_dev *rdev;
int i, regulator_idx;
res = platform_get_resource(pdev, IORESOURCE_REG, 0);
if (!res) {
dev_err(&pdev->dev, "No REG resource!\n");
return -EINVAL;
}
for (i = 0; i < ARRAY_SIZE(max8925_regulator_info); i++) {
ri = &max8925_regulator_info[i];
if (ri->vol_reg == res->start) {
regulator_idx = i;
break;
}
}
if (i == ARRAY_SIZE(max8925_regulator_info)) {
dev_err(&pdev->dev, "Failed to find regulator %llu\n",
(unsigned long long)res->start);
return -EINVAL;
}
ri->i2c = chip->i2c;
config.dev = &pdev->dev;
config.driver_data = ri;
if (max8925_regulator_dt_init(pdev, &config, regulator_idx))
if (pdata)
config.init_data = pdata;
rdev = devm_regulator_register(&pdev->dev, &ri->desc, &config);
if (IS_ERR(rdev)) {
dev_err(&pdev->dev, "failed to register regulator %s\n",
ri->desc.name);
return PTR_ERR(rdev);
}
platform_set_drvdata(pdev, rdev);
return 0;
}
static struct platform_driver max8925_regulator_driver = {
.driver = {
.name = "max8925-regulator",
.owner = THIS_MODULE,
},
.probe = max8925_regulator_probe,
};
static int __init max8925_regulator_init(void)
{
return platform_driver_register(&max8925_regulator_driver);
}
subsys_initcall(max8925_regulator_init);
static void __exit max8925_regulator_exit(void)
{
platform_driver_unregister(&max8925_regulator_driver);
}
module_exit(max8925_regulator_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Haojian Zhuang <haojian.zhuang@marvell.com>");
MODULE_DESCRIPTION("Regulator Driver for Maxim 8925 PMIC");
MODULE_ALIAS("platform:max8925-regulator");
| gpl-2.0 |
Chadder43/Asus-T200TA-Linux | drivers/net/ethernet/intel/i40e/i40e_fcoe.c | 408 | 44998 | /*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
* Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along
* with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* Contact Information:
* e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
******************************************************************************/
#include <linux/if_ether.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/fc/fc_fs.h>
#include <scsi/fc/fc_fip.h>
#include <scsi/fc/fc_fcoe.h>
#include <scsi/libfc.h>
#include <scsi/libfcoe.h>
#include <uapi/linux/dcbnl.h>
#include "i40e.h"
#include "i40e_fcoe.h"
/**
* i40e_rx_is_fip - returns true if the rx packet type is FIP
* @ptype: the packet type field from rx descriptor write-back
**/
static inline bool i40e_rx_is_fip(u16 ptype)
{
return ptype == I40E_RX_PTYPE_L2_FIP_PAY2;
}
/**
* i40e_rx_is_fcoe - returns true if the rx packet type is FCoE
* @ptype: the packet type field from rx descriptor write-back
**/
static inline bool i40e_rx_is_fcoe(u16 ptype)
{
return (ptype >= I40E_RX_PTYPE_L2_FCOE_PAY3) &&
(ptype <= I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER);
}
/**
* i40e_fcoe_sof_is_class2 - returns true if this is a FC Class 2 SOF
* @sof: the FCoE start of frame delimiter
**/
static inline bool i40e_fcoe_sof_is_class2(u8 sof)
{
return (sof == FC_SOF_I2) || (sof == FC_SOF_N2);
}
/**
* i40e_fcoe_sof_is_class3 - returns true if this is a FC Class 3 SOF
* @sof: the FCoE start of frame delimiter
**/
static inline bool i40e_fcoe_sof_is_class3(u8 sof)
{
return (sof == FC_SOF_I3) || (sof == FC_SOF_N3);
}
/**
* i40e_fcoe_sof_is_supported - returns true if the FC SOF is supported by HW
* @sof: the input SOF value from the frame
**/
static inline bool i40e_fcoe_sof_is_supported(u8 sof)
{
return i40e_fcoe_sof_is_class2(sof) ||
i40e_fcoe_sof_is_class3(sof);
}
/**
* i40e_fcoe_fc_sof - pull the SOF from FCoE header in the frame
* @skb: the frame whose EOF is to be pulled from
**/
static inline int i40e_fcoe_fc_sof(struct sk_buff *skb, u8 *sof)
{
*sof = ((struct fcoe_hdr *)skb_network_header(skb))->fcoe_sof;
if (!i40e_fcoe_sof_is_supported(*sof))
return -EINVAL;
return 0;
}
/**
* i40e_fcoe_eof_is_supported - returns true if the EOF is supported by HW
* @eof: the input EOF value from the frame
**/
static inline bool i40e_fcoe_eof_is_supported(u8 eof)
{
return (eof == FC_EOF_N) || (eof == FC_EOF_T) ||
(eof == FC_EOF_NI) || (eof == FC_EOF_A);
}
/**
* i40e_fcoe_fc_eof - pull EOF from FCoE trailer in the frame
* @skb: the frame whose EOF is to be pulled from
**/
static inline int i40e_fcoe_fc_eof(struct sk_buff *skb, u8 *eof)
{
/* the first byte of the last dword is EOF */
skb_copy_bits(skb, skb->len - 4, eof, 1);
if (!i40e_fcoe_eof_is_supported(*eof))
return -EINVAL;
return 0;
}
/**
* i40e_fcoe_ctxt_eof - convert input FC EOF for descriptor programming
* @eof: the input eof value from the frame
*
* The FC EOF is converted to the value understood by HW for descriptor
* programming. Never call this w/o calling i40e_fcoe_eof_is_supported()
* first.
**/
static inline u32 i40e_fcoe_ctxt_eof(u8 eof)
{
switch (eof) {
case FC_EOF_N:
return I40E_TX_DESC_CMD_L4T_EOFT_EOF_N;
case FC_EOF_T:
return I40E_TX_DESC_CMD_L4T_EOFT_EOF_T;
case FC_EOF_NI:
return I40E_TX_DESC_CMD_L4T_EOFT_EOF_NI;
case FC_EOF_A:
return I40E_TX_DESC_CMD_L4T_EOFT_EOF_A;
default:
/* FIXME: still returns 0 */
pr_err("Unrecognized EOF %x\n", eof);
return 0;
}
}
/**
* i40e_fcoe_xid_is_valid - returns true if the exchange id is valid
* @xid: the exchange id
**/
static inline bool i40e_fcoe_xid_is_valid(u16 xid)
{
return (xid != FC_XID_UNKNOWN) && (xid < I40E_FCOE_DDP_MAX);
}
/**
* i40e_fcoe_ddp_unmap - unmap the mapped sglist associated
* @pf: pointer to pf
* @ddp: sw DDP context
*
* Unmap the scatter-gather list associated with the given SW DDP context
*
* Returns: data length already ddp-ed in bytes
*
**/
static inline void i40e_fcoe_ddp_unmap(struct i40e_pf *pf,
struct i40e_fcoe_ddp *ddp)
{
if (test_and_set_bit(__I40E_FCOE_DDP_UNMAPPED, &ddp->flags))
return;
if (ddp->sgl) {
dma_unmap_sg(&pf->pdev->dev, ddp->sgl, ddp->sgc,
DMA_FROM_DEVICE);
ddp->sgl = NULL;
ddp->sgc = 0;
}
if (ddp->pool) {
dma_pool_free(ddp->pool, ddp->udl, ddp->udp);
ddp->pool = NULL;
}
}
/**
* i40e_fcoe_ddp_clear - clear the given SW DDP context
* @ddp - SW DDP context
**/
static inline void i40e_fcoe_ddp_clear(struct i40e_fcoe_ddp *ddp)
{
memset(ddp, 0, sizeof(struct i40e_fcoe_ddp));
ddp->xid = FC_XID_UNKNOWN;
ddp->flags = __I40E_FCOE_DDP_NONE;
}
/**
* i40e_fcoe_progid_is_fcoe - check if the prog_id is for FCoE
* @id: the prog id for the programming status Rx descriptor write-back
**/
static inline bool i40e_fcoe_progid_is_fcoe(u8 id)
{
return (id == I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_PROG_STATUS) ||
(id == I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_INVL_STATUS);
}
/**
* i40e_fcoe_fc_get_xid - get xid from the frame header
* @fh: the fc frame header
*
* In case the incoming frame's exchange is originated from
* the initiator, then received frame's exchange id is ANDed
* with fc_cpu_mask bits to get the same cpu on which exchange
* was originated, otherwise just use the current cpu.
*
* Returns ox_id if exchange originator, rx_id if responder
**/
static inline u16 i40e_fcoe_fc_get_xid(struct fc_frame_header *fh)
{
u32 f_ctl = ntoh24(fh->fh_f_ctl);
return (f_ctl & FC_FC_EX_CTX) ?
be16_to_cpu(fh->fh_ox_id) :
be16_to_cpu(fh->fh_rx_id);
}
/**
* i40e_fcoe_fc_frame_header - get fc frame header from skb
* @skb: packet
*
* This checks if there is a VLAN header and returns the data
* pointer to the start of the fc_frame_header.
*
* Returns pointer to the fc_frame_header
**/
static inline struct fc_frame_header *i40e_fcoe_fc_frame_header(
struct sk_buff *skb)
{
void *fh = skb->data + sizeof(struct fcoe_hdr);
if (eth_hdr(skb)->h_proto == htons(ETH_P_8021Q))
fh += sizeof(struct vlan_hdr);
return (struct fc_frame_header *)fh;
}
/**
* i40e_fcoe_ddp_put - release the DDP context for a given exchange id
* @netdev: the corresponding net_device
* @xid: the exchange id that corresponding DDP context will be released
*
* This is the implementation of net_device_ops.ndo_fcoe_ddp_done
* and it is expected to be called by ULD, i.e., FCP layer of libfc
* to release the corresponding ddp context when the I/O is done.
*
* Returns : data length already ddp-ed in bytes
**/
static int i40e_fcoe_ddp_put(struct net_device *netdev, u16 xid)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_pf *pf = np->vsi->back;
struct i40e_fcoe *fcoe = &pf->fcoe;
int len = 0;
struct i40e_fcoe_ddp *ddp = &fcoe->ddp[xid];
if (!fcoe || !ddp)
goto out;
if (test_bit(__I40E_FCOE_DDP_DONE, &ddp->flags))
len = ddp->len;
i40e_fcoe_ddp_unmap(pf, ddp);
out:
return len;
}
/**
* i40e_fcoe_sw_init - sets up the HW for FCoE
* @pf: pointer to pf
*
* Returns 0 if FCoE is supported otherwise the error code
**/
int i40e_init_pf_fcoe(struct i40e_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
u32 val;
pf->flags &= ~I40E_FLAG_FCOE_ENABLED;
pf->num_fcoe_qps = 0;
pf->fcoe_hmc_cntx_num = 0;
pf->fcoe_hmc_filt_num = 0;
if (!pf->hw.func_caps.fcoe) {
dev_info(&pf->pdev->dev, "FCoE capability is disabled\n");
return 0;
}
if (!pf->hw.func_caps.dcb) {
dev_warn(&pf->pdev->dev,
"Hardware is not DCB capable not enabling FCoE.\n");
return 0;
}
/* enable FCoE hash filter */
val = rd32(hw, I40E_PFQF_HENA(1));
val |= 1 << (I40E_FILTER_PCTYPE_FCOE_OX - 32);
val |= 1 << (I40E_FILTER_PCTYPE_FCOE_RX - 32);
val &= I40E_PFQF_HENA_PTYPE_ENA_MASK;
wr32(hw, I40E_PFQF_HENA(1), val);
/* enable flag */
pf->flags |= I40E_FLAG_FCOE_ENABLED;
pf->num_fcoe_qps = I40E_DEFAULT_FCOE;
/* Reserve 4K DDP contexts and 20K filter size for FCoE */
pf->fcoe_hmc_cntx_num = (1 << I40E_DMA_CNTX_SIZE_4K) *
I40E_DMA_CNTX_BASE_SIZE;
pf->fcoe_hmc_filt_num = pf->fcoe_hmc_cntx_num +
(1 << I40E_HASH_FILTER_SIZE_16K) *
I40E_HASH_FILTER_BASE_SIZE;
/* FCoE object: max 16K filter buckets and 4K DMA contexts */
pf->filter_settings.fcoe_filt_num = I40E_HASH_FILTER_SIZE_16K;
pf->filter_settings.fcoe_cntx_num = I40E_DMA_CNTX_SIZE_4K;
/* Setup max frame with FCoE_MTU plus L2 overheads */
val = rd32(hw, I40E_GLFCOE_RCTL);
val &= ~I40E_GLFCOE_RCTL_MAX_SIZE_MASK;
val |= ((FCOE_MTU + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN)
<< I40E_GLFCOE_RCTL_MAX_SIZE_SHIFT);
wr32(hw, I40E_GLFCOE_RCTL, val);
dev_info(&pf->pdev->dev, "FCoE is supported.\n");
return 0;
}
/**
* i40e_get_fcoe_tc_map - Return TC map for FCoE APP
* @pf: pointer to pf
*
**/
u8 i40e_get_fcoe_tc_map(struct i40e_pf *pf)
{
struct i40e_ieee_app_priority_table app;
struct i40e_hw *hw = &pf->hw;
u8 enabled_tc = 0;
u8 tc, i;
/* Get the FCoE APP TLV */
struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
for (i = 0; i < dcbcfg->numapps; i++) {
app = dcbcfg->app[i];
if (app.selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE &&
app.protocolid == ETH_P_FCOE) {
tc = dcbcfg->etscfg.prioritytable[app.priority];
enabled_tc |= (1 << tc);
break;
}
}
/* TC0 if there is no TC defined for FCoE APP TLV */
enabled_tc = enabled_tc ? enabled_tc : 0x1;
return enabled_tc;
}
/**
* i40e_fcoe_vsi_init - prepares the VSI context for creating a FCoE VSI
* @vsi: pointer to the associated VSI struct
* @ctxt: pointer to the associated VSI context to be passed to HW
*
* Returns 0 on success or < 0 on error
**/
int i40e_fcoe_vsi_init(struct i40e_vsi *vsi, struct i40e_vsi_context *ctxt)
{
struct i40e_aqc_vsi_properties_data *info = &ctxt->info;
struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
u8 enabled_tc = 0;
if (!(pf->flags & I40E_FLAG_FCOE_ENABLED)) {
dev_err(&pf->pdev->dev,
"FCoE is not enabled for this device\n");
return -EPERM;
}
/* initialize the hardware for FCoE */
ctxt->pf_num = hw->pf_id;
ctxt->vf_num = 0;
ctxt->uplink_seid = vsi->uplink_seid;
ctxt->connection_type = 0x1;
ctxt->flags = I40E_AQ_VSI_TYPE_PF;
/* FCoE VSI would need the following sections */
info->valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID |
I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
/* FCoE VSI does not need these sections */
info->valid_sections &= cpu_to_le16(~(I40E_AQ_VSI_PROP_SECURITY_VALID |
I40E_AQ_VSI_PROP_VLAN_VALID |
I40E_AQ_VSI_PROP_CAS_PV_VALID |
I40E_AQ_VSI_PROP_INGRESS_UP_VALID |
I40E_AQ_VSI_PROP_EGRESS_UP_VALID));
enabled_tc = i40e_get_fcoe_tc_map(pf);
i40e_vsi_setup_queue_map(vsi, ctxt, enabled_tc, true);
/* set up queue option section: only enable FCoE */
info->queueing_opt_flags = I40E_AQ_VSI_QUE_OPT_FCOE_ENA;
return 0;
}
/**
* i40e_fcoe_enable - this is the implementation of ndo_fcoe_enable,
* indicating the upper FCoE protocol stack is ready to use FCoE
* offload features.
*
* @netdev: pointer to the netdev that FCoE is created on
*
* Returns 0 on success
*
* in RTNL
*
**/
int i40e_fcoe_enable(struct net_device *netdev)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
struct i40e_fcoe *fcoe = &pf->fcoe;
if (!(pf->flags & I40E_FLAG_FCOE_ENABLED)) {
netdev_err(netdev, "HW does not support FCoE.\n");
return -ENODEV;
}
if (vsi->type != I40E_VSI_FCOE) {
netdev_err(netdev, "interface does not support FCoE.\n");
return -EBUSY;
}
atomic_inc(&fcoe->refcnt);
return 0;
}
/**
* i40e_fcoe_disable- disables FCoE for upper FCoE protocol stack.
* @dev: pointer to the netdev that FCoE is created on
*
* Returns 0 on success
*
**/
int i40e_fcoe_disable(struct net_device *netdev)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
struct i40e_fcoe *fcoe = &pf->fcoe;
if (!(pf->flags & I40E_FLAG_FCOE_ENABLED)) {
netdev_err(netdev, "device does not support FCoE\n");
return -ENODEV;
}
if (vsi->type != I40E_VSI_FCOE)
return -EBUSY;
if (!atomic_dec_and_test(&fcoe->refcnt))
return -EINVAL;
netdev_info(netdev, "FCoE disabled\n");
return 0;
}
/**
* i40e_fcoe_dma_pool_free - free the per cpu pool for FCoE DDP
* @fcoe: the FCoE sw object
* @dev: the device that the pool is associated with
* @cpu: the cpu for this pool
*
**/
static void i40e_fcoe_dma_pool_free(struct i40e_fcoe *fcoe,
struct device *dev,
unsigned int cpu)
{
struct i40e_fcoe_ddp_pool *ddp_pool;
ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu);
if (!ddp_pool->pool) {
dev_warn(dev, "DDP pool already freed for cpu %d\n", cpu);
return;
}
dma_pool_destroy(ddp_pool->pool);
ddp_pool->pool = NULL;
}
/**
* i40e_fcoe_dma_pool_create - per cpu pool for FCoE DDP
* @fcoe: the FCoE sw object
* @dev: the device that the pool is associated with
* @cpu: the cpu for this pool
*
* Returns 0 on successful or non zero on failure
*
**/
static int i40e_fcoe_dma_pool_create(struct i40e_fcoe *fcoe,
struct device *dev,
unsigned int cpu)
{
struct i40e_fcoe_ddp_pool *ddp_pool;
struct dma_pool *pool;
char pool_name[32];
ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu);
if (ddp_pool && ddp_pool->pool) {
dev_warn(dev, "DDP pool already allocated for cpu %d\n", cpu);
return 0;
}
snprintf(pool_name, sizeof(pool_name), "i40e_fcoe_ddp_%d", cpu);
pool = dma_pool_create(pool_name, dev, I40E_FCOE_DDP_PTR_MAX,
I40E_FCOE_DDP_PTR_ALIGN, PAGE_SIZE);
if (!pool) {
dev_err(dev, "dma_pool_create %s failed\n", pool_name);
return -ENOMEM;
}
ddp_pool->pool = pool;
return 0;
}
/**
* i40e_fcoe_free_ddp_resources - release FCoE DDP resources
* @vsi: the vsi FCoE is associated with
*
**/
void i40e_fcoe_free_ddp_resources(struct i40e_vsi *vsi)
{
struct i40e_pf *pf = vsi->back;
struct i40e_fcoe *fcoe = &pf->fcoe;
int cpu, i;
/* do nothing if not FCoE VSI */
if (vsi->type != I40E_VSI_FCOE)
return;
/* do nothing if no DDP pools were allocated */
if (!fcoe->ddp_pool)
return;
for (i = 0; i < I40E_FCOE_DDP_MAX; i++)
i40e_fcoe_ddp_put(vsi->netdev, i);
for_each_possible_cpu(cpu)
i40e_fcoe_dma_pool_free(fcoe, &pf->pdev->dev, cpu);
free_percpu(fcoe->ddp_pool);
fcoe->ddp_pool = NULL;
netdev_info(vsi->netdev, "VSI %d,%d FCoE DDP resources released\n",
vsi->id, vsi->seid);
}
/**
* i40e_fcoe_setup_ddp_resources - allocate per cpu DDP resources
* @vsi: the VSI FCoE is associated with
*
* Returns 0 on successful or non zero on failure
*
**/
int i40e_fcoe_setup_ddp_resources(struct i40e_vsi *vsi)
{
struct i40e_pf *pf = vsi->back;
struct device *dev = &pf->pdev->dev;
struct i40e_fcoe *fcoe = &pf->fcoe;
unsigned int cpu;
int i;
if (vsi->type != I40E_VSI_FCOE)
return -ENODEV;
/* do nothing if no DDP pools were allocated */
if (fcoe->ddp_pool)
return -EEXIST;
/* allocate per CPU memory to track DDP pools */
fcoe->ddp_pool = alloc_percpu(struct i40e_fcoe_ddp_pool);
if (!fcoe->ddp_pool) {
dev_err(&pf->pdev->dev, "failed to allocate percpu DDP\n");
return -ENOMEM;
}
/* allocate pci pool for each cpu */
for_each_possible_cpu(cpu) {
if (!i40e_fcoe_dma_pool_create(fcoe, dev, cpu))
continue;
dev_err(dev, "failed to alloc DDP pool on cpu:%d\n", cpu);
i40e_fcoe_free_ddp_resources(vsi);
return -ENOMEM;
}
/* initialize the sw context */
for (i = 0; i < I40E_FCOE_DDP_MAX; i++)
i40e_fcoe_ddp_clear(&fcoe->ddp[i]);
netdev_info(vsi->netdev, "VSI %d,%d FCoE DDP resources allocated\n",
vsi->id, vsi->seid);
return 0;
}
/**
* i40e_fcoe_handle_status - check the Programming Status for FCoE
* @rx_ring: the Rx ring for this descriptor
* @rx_desc: the Rx descriptor for Programming Status, not a packet descriptor.
*
* Check if this is the Rx Programming Status descriptor write-back for FCoE.
* This is used to verify if the context/filter programming or invalidation
* requested by SW to the HW is successful or not and take actions accordingly.
**/
void i40e_fcoe_handle_status(struct i40e_ring *rx_ring,
union i40e_rx_desc *rx_desc, u8 prog_id)
{
struct i40e_pf *pf = rx_ring->vsi->back;
struct i40e_fcoe *fcoe = &pf->fcoe;
struct i40e_fcoe_ddp *ddp;
u32 error;
u16 xid;
u64 qw;
/* we only care for FCoE here */
if (!i40e_fcoe_progid_is_fcoe(prog_id))
return;
xid = le32_to_cpu(rx_desc->wb.qword0.hi_dword.fcoe_param) &
(I40E_FCOE_DDP_MAX - 1);
if (!i40e_fcoe_xid_is_valid(xid))
return;
ddp = &fcoe->ddp[xid];
WARN_ON(xid != ddp->xid);
qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
error = (qw & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
/* DDP context programming status: failure or success */
if (prog_id == I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_PROG_STATUS) {
if (I40E_RX_PROG_FCOE_ERROR_TBL_FULL(error)) {
dev_err(&pf->pdev->dev, "xid %x ddp->xid %x TABLE FULL\n",
xid, ddp->xid);
ddp->prerr |= I40E_RX_PROG_FCOE_ERROR_TBL_FULL_BIT;
}
if (I40E_RX_PROG_FCOE_ERROR_CONFLICT(error)) {
dev_err(&pf->pdev->dev, "xid %x ddp->xid %x CONFLICT\n",
xid, ddp->xid);
ddp->prerr |= I40E_RX_PROG_FCOE_ERROR_CONFLICT_BIT;
}
}
/* DDP context invalidation status: failure or success */
if (prog_id == I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_INVL_STATUS) {
if (I40E_RX_PROG_FCOE_ERROR_INVLFAIL(error)) {
dev_err(&pf->pdev->dev, "xid %x ddp->xid %x INVALIDATION FAILURE\n",
xid, ddp->xid);
ddp->prerr |= I40E_RX_PROG_FCOE_ERROR_INVLFAIL_BIT;
}
/* clear the flag so we can retry invalidation */
clear_bit(__I40E_FCOE_DDP_ABORTED, &ddp->flags);
}
/* unmap DMA */
i40e_fcoe_ddp_unmap(pf, ddp);
i40e_fcoe_ddp_clear(ddp);
}
/**
* i40e_fcoe_handle_offload - check ddp status and mark it done
* @adapter: i40e adapter
* @rx_desc: advanced rx descriptor
* @skb: the skb holding the received data
*
* This checks ddp status.
*
* Returns : < 0 indicates an error or not a FCOE ddp, 0 indicates
* not passing the skb to ULD, > 0 indicates is the length of data
* being ddped.
*
**/
int i40e_fcoe_handle_offload(struct i40e_ring *rx_ring,
union i40e_rx_desc *rx_desc,
struct sk_buff *skb)
{
struct i40e_pf *pf = rx_ring->vsi->back;
struct i40e_fcoe *fcoe = &pf->fcoe;
struct fc_frame_header *fh = NULL;
struct i40e_fcoe_ddp *ddp = NULL;
u32 status, fltstat;
u32 error, fcerr;
int rc = -EINVAL;
u16 ptype;
u16 xid;
u64 qw;
/* check this rxd is for programming status */
qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
/* packet descriptor, check packet type */
ptype = (qw & I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT;
if (!i40e_rx_is_fcoe(ptype))
goto out_no_ddp;
error = (qw & I40E_RXD_QW1_ERROR_MASK) >> I40E_RXD_QW1_ERROR_SHIFT;
fcerr = (error >> I40E_RX_DESC_ERROR_L3L4E_SHIFT) &
I40E_RX_DESC_FCOE_ERROR_MASK;
/* check stateless offload error */
if (unlikely(fcerr == I40E_RX_DESC_ERROR_L3L4E_PROT)) {
dev_err(&pf->pdev->dev, "Protocol Error\n");
skb->ip_summed = CHECKSUM_NONE;
} else {
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
/* check hw status on ddp */
status = (qw & I40E_RXD_QW1_STATUS_MASK) >> I40E_RXD_QW1_STATUS_SHIFT;
fltstat = (status >> I40E_RX_DESC_STATUS_FLTSTAT_SHIFT) &
I40E_RX_DESC_FLTSTAT_FCMASK;
/* now we are ready to check DDP */
fh = i40e_fcoe_fc_frame_header(skb);
xid = i40e_fcoe_fc_get_xid(fh);
if (!i40e_fcoe_xid_is_valid(xid))
goto out_no_ddp;
/* non DDP normal receive, return to the protocol stack */
if (fltstat == I40E_RX_DESC_FLTSTAT_NOMTCH)
goto out_no_ddp;
/* do we have a sw ddp context setup ? */
ddp = &fcoe->ddp[xid];
if (!ddp->sgl)
goto out_no_ddp;
/* fetch xid from hw rxd wb, which should match up the sw ctxt */
xid = le16_to_cpu(rx_desc->wb.qword0.lo_dword.mirr_fcoe.fcoe_ctx_id);
if (ddp->xid != xid) {
dev_err(&pf->pdev->dev, "xid 0x%x does not match ctx_xid 0x%x\n",
ddp->xid, xid);
goto out_put_ddp;
}
/* the same exchange has already errored out */
if (ddp->fcerr) {
dev_err(&pf->pdev->dev, "xid 0x%x fcerr 0x%x reported fcer 0x%x\n",
xid, ddp->fcerr, fcerr);
goto out_put_ddp;
}
/* fcoe param is valid by now with correct DDPed length */
ddp->len = le32_to_cpu(rx_desc->wb.qword0.hi_dword.fcoe_param);
ddp->fcerr = fcerr;
/* header posting only, useful only for target mode and debugging */
if (fltstat == I40E_RX_DESC_FLTSTAT_DDP) {
/* For target mode, we get header of the last packet but it
* does not have the FCoE trailer field, i.e., CRC and EOF
* Ordered Set since they are offloaded by the HW, so fill
* it up correspondingly to allow the packet to pass through
* to the upper protocol stack.
*/
u32 f_ctl = ntoh24(fh->fh_f_ctl);
if ((f_ctl & FC_FC_END_SEQ) &&
(fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA)) {
struct fcoe_crc_eof *crc = NULL;
crc = (struct fcoe_crc_eof *)skb_put(skb, sizeof(*crc));
crc->fcoe_eof = FC_EOF_T;
} else {
/* otherwise, drop the header only frame */
rc = 0;
goto out_no_ddp;
}
}
out_put_ddp:
/* either we got RSP or we have an error, unmap DMA in both cases */
i40e_fcoe_ddp_unmap(pf, ddp);
if (ddp->len && !ddp->fcerr) {
int pkts;
rc = ddp->len;
i40e_fcoe_ddp_clear(ddp);
ddp->len = rc;
pkts = DIV_ROUND_UP(rc, 2048);
rx_ring->stats.bytes += rc;
rx_ring->stats.packets += pkts;
rx_ring->q_vector->rx.total_bytes += rc;
rx_ring->q_vector->rx.total_packets += pkts;
set_bit(__I40E_FCOE_DDP_DONE, &ddp->flags);
}
out_no_ddp:
return rc;
}
/**
* i40e_fcoe_ddp_setup - called to set up ddp context
* @netdev: the corresponding net_device
* @xid: the exchange id requesting ddp
* @sgl: the scatter-gather list for this request
* @sgc: the number of scatter-gather items
* @target_mode: indicates this is a DDP request for target
*
* Returns : 1 for success and 0 for no DDP on this I/O
**/
static int i40e_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
struct scatterlist *sgl, unsigned int sgc,
int target_mode)
{
static const unsigned int bufflen = I40E_FCOE_DDP_BUF_MIN;
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_fcoe_ddp_pool *ddp_pool;
struct i40e_pf *pf = np->vsi->back;
struct i40e_fcoe *fcoe = &pf->fcoe;
unsigned int i, j, dmacount;
struct i40e_fcoe_ddp *ddp;
unsigned int firstoff = 0;
unsigned int thisoff = 0;
unsigned int thislen = 0;
struct scatterlist *sg;
dma_addr_t addr = 0;
unsigned int len;
if (xid >= I40E_FCOE_DDP_MAX) {
dev_warn(&pf->pdev->dev, "xid=0x%x out-of-range\n", xid);
return 0;
}
/* no DDP if we are already down or resetting */
if (test_bit(__I40E_DOWN, &pf->state) ||
test_bit(__I40E_NEEDS_RESTART, &pf->state)) {
dev_info(&pf->pdev->dev, "xid=0x%x device in reset/down\n",
xid);
return 0;
}
ddp = &fcoe->ddp[xid];
if (ddp->sgl) {
dev_info(&pf->pdev->dev, "xid 0x%x w/ non-null sgl=%p nents=%d\n",
xid, ddp->sgl, ddp->sgc);
return 0;
}
i40e_fcoe_ddp_clear(ddp);
if (!fcoe->ddp_pool) {
dev_info(&pf->pdev->dev, "No DDP pool, xid 0x%x\n", xid);
return 0;
}
ddp_pool = per_cpu_ptr(fcoe->ddp_pool, get_cpu());
if (!ddp_pool->pool) {
dev_info(&pf->pdev->dev, "No percpu ddp pool, xid 0x%x\n", xid);
goto out_noddp;
}
/* setup dma from scsi command sgl */
dmacount = dma_map_sg(&pf->pdev->dev, sgl, sgc, DMA_FROM_DEVICE);
if (dmacount == 0) {
dev_info(&pf->pdev->dev, "dma_map_sg for sgl %p, sgc %d failed\n",
sgl, sgc);
goto out_noddp_unmap;
}
/* alloc the udl from our ddp pool */
ddp->udl = dma_pool_alloc(ddp_pool->pool, GFP_ATOMIC, &ddp->udp);
if (!ddp->udl) {
dev_info(&pf->pdev->dev,
"Failed allocated ddp context, xid 0x%x\n", xid);
goto out_noddp_unmap;
}
j = 0;
ddp->len = 0;
for_each_sg(sgl, sg, dmacount, i) {
addr = sg_dma_address(sg);
len = sg_dma_len(sg);
ddp->len += len;
while (len) {
/* max number of buffers allowed in one DDP context */
if (j >= I40E_FCOE_DDP_BUFFCNT_MAX) {
dev_info(&pf->pdev->dev,
"xid=%x:%d,%d,%d:addr=%llx not enough descriptors\n",
xid, i, j, dmacount, (u64)addr);
goto out_noddp_free;
}
/* get the offset of length of current buffer */
thisoff = addr & ((dma_addr_t)bufflen - 1);
thislen = min_t(unsigned int, (bufflen - thisoff), len);
/* all but the 1st buffer (j == 0)
* must be aligned on bufflen
*/
if ((j != 0) && (thisoff))
goto out_noddp_free;
/* all but the last buffer
* ((i == (dmacount - 1)) && (thislen == len))
* must end at bufflen
*/
if (((i != (dmacount - 1)) || (thislen != len)) &&
((thislen + thisoff) != bufflen))
goto out_noddp_free;
ddp->udl[j] = (u64)(addr - thisoff);
/* only the first buffer may have none-zero offset */
if (j == 0)
firstoff = thisoff;
len -= thislen;
addr += thislen;
j++;
}
}
/* only the last buffer may have non-full bufflen */
ddp->lastsize = thisoff + thislen;
ddp->firstoff = firstoff;
ddp->list_len = j;
ddp->pool = ddp_pool->pool;
ddp->sgl = sgl;
ddp->sgc = sgc;
ddp->xid = xid;
if (target_mode)
set_bit(__I40E_FCOE_DDP_TARGET, &ddp->flags);
set_bit(__I40E_FCOE_DDP_INITALIZED, &ddp->flags);
put_cpu();
return 1; /* Success */
out_noddp_free:
dma_pool_free(ddp->pool, ddp->udl, ddp->udp);
i40e_fcoe_ddp_clear(ddp);
out_noddp_unmap:
dma_unmap_sg(&pf->pdev->dev, sgl, sgc, DMA_FROM_DEVICE);
out_noddp:
put_cpu();
return 0;
}
/**
* i40e_fcoe_ddp_get - called to set up ddp context in initiator mode
* @netdev: the corresponding net_device
* @xid: the exchange id requesting ddp
* @sgl: the scatter-gather list for this request
* @sgc: the number of scatter-gather items
*
* This is the implementation of net_device_ops.ndo_fcoe_ddp_setup
* and is expected to be called from ULD, e.g., FCP layer of libfc
* to set up ddp for the corresponding xid of the given sglist for
* the corresponding I/O.
*
* Returns : 1 for success and 0 for no ddp
**/
static int i40e_fcoe_ddp_get(struct net_device *netdev, u16 xid,
struct scatterlist *sgl, unsigned int sgc)
{
return i40e_fcoe_ddp_setup(netdev, xid, sgl, sgc, 0);
}
/**
* i40e_fcoe_ddp_target - called to set up ddp context in target mode
* @netdev: the corresponding net_device
* @xid: the exchange id requesting ddp
* @sgl: the scatter-gather list for this request
* @sgc: the number of scatter-gather items
*
* This is the implementation of net_device_ops.ndo_fcoe_ddp_target
* and is expected to be called from ULD, e.g., FCP layer of libfc
* to set up ddp for the corresponding xid of the given sglist for
* the corresponding I/O. The DDP in target mode is a write I/O request
* from the initiator.
*
* Returns : 1 for success and 0 for no ddp
**/
static int i40e_fcoe_ddp_target(struct net_device *netdev, u16 xid,
struct scatterlist *sgl, unsigned int sgc)
{
return i40e_fcoe_ddp_setup(netdev, xid, sgl, sgc, 1);
}
/**
* i40e_fcoe_program_ddp - programs the HW DDP related descriptors
* @tx_ring: transmit ring for this packet
* @skb: the packet to be sent out
* @sof: the SOF to indicate class of service
*
* Determine if it is READ/WRITE command, and finds out if there is
* a matching SW DDP context for this command. DDP is applicable
* only in case of READ if initiator or WRITE in case of
* responder (via checking XFER_RDY).
*
* Note: caller checks sof and ddp sw context
*
* Returns : none
*
**/
static void i40e_fcoe_program_ddp(struct i40e_ring *tx_ring,
struct sk_buff *skb,
struct i40e_fcoe_ddp *ddp, u8 sof)
{
struct i40e_fcoe_filter_context_desc *filter_desc = NULL;
struct i40e_fcoe_queue_context_desc *queue_desc = NULL;
struct i40e_fcoe_ddp_context_desc *ddp_desc = NULL;
struct i40e_pf *pf = tx_ring->vsi->back;
u16 i = tx_ring->next_to_use;
struct fc_frame_header *fh;
u64 flags_rsvd_lanq = 0;
bool target_mode;
/* check if abort is still pending */
if (test_bit(__I40E_FCOE_DDP_ABORTED, &ddp->flags)) {
dev_warn(&pf->pdev->dev,
"DDP abort is still pending xid:%hx and ddp->flags:%lx:\n",
ddp->xid, ddp->flags);
return;
}
/* set the flag to indicate this is programmed */
if (test_and_set_bit(__I40E_FCOE_DDP_PROGRAMMED, &ddp->flags)) {
dev_warn(&pf->pdev->dev,
"DDP is already programmed for xid:%hx and ddp->flags:%lx:\n",
ddp->xid, ddp->flags);
return;
}
/* Prepare the DDP context descriptor */
ddp_desc = I40E_DDP_CONTEXT_DESC(tx_ring, i);
i++;
if (i == tx_ring->count)
i = 0;
ddp_desc->type_cmd_foff_lsize =
cpu_to_le64(I40E_TX_DESC_DTYPE_DDP_CTX |
((u64)I40E_FCOE_DDP_CTX_DESC_BSIZE_4K <<
I40E_FCOE_DDP_CTX_QW1_CMD_SHIFT) |
((u64)ddp->firstoff <<
I40E_FCOE_DDP_CTX_QW1_FOFF_SHIFT) |
((u64)ddp->lastsize <<
I40E_FCOE_DDP_CTX_QW1_LSIZE_SHIFT));
ddp_desc->rsvd = cpu_to_le64(0);
/* target mode needs last packet in the sequence */
target_mode = test_bit(__I40E_FCOE_DDP_TARGET, &ddp->flags);
if (target_mode)
ddp_desc->type_cmd_foff_lsize |=
cpu_to_le64(I40E_FCOE_DDP_CTX_DESC_LASTSEQH);
/* Prepare queue_context descriptor */
queue_desc = I40E_QUEUE_CONTEXT_DESC(tx_ring, i++);
if (i == tx_ring->count)
i = 0;
queue_desc->dmaindx_fbase = cpu_to_le64(ddp->xid | ((u64)ddp->udp));
queue_desc->flen_tph = cpu_to_le64(ddp->list_len |
((u64)(I40E_FCOE_QUEUE_CTX_DESC_TPHRDESC |
I40E_FCOE_QUEUE_CTX_DESC_TPHDATA) <<
I40E_FCOE_QUEUE_CTX_QW1_TPH_SHIFT));
/* Prepare filter_context_desc */
filter_desc = I40E_FILTER_CONTEXT_DESC(tx_ring, i);
i++;
if (i == tx_ring->count)
i = 0;
fh = (struct fc_frame_header *)skb_transport_header(skb);
filter_desc->param = cpu_to_le32(ntohl(fh->fh_parm_offset));
filter_desc->seqn = cpu_to_le16(ntohs(fh->fh_seq_cnt));
filter_desc->rsvd_dmaindx = cpu_to_le16(ddp->xid <<
I40E_FCOE_FILTER_CTX_QW0_DMAINDX_SHIFT);
flags_rsvd_lanq = I40E_FCOE_FILTER_CTX_DESC_CTYP_DDP;
flags_rsvd_lanq |= (u64)(target_mode ?
I40E_FCOE_FILTER_CTX_DESC_ENODE_RSP :
I40E_FCOE_FILTER_CTX_DESC_ENODE_INIT);
flags_rsvd_lanq |= (u64)((sof == FC_SOF_I2 || sof == FC_SOF_N2) ?
I40E_FCOE_FILTER_CTX_DESC_FC_CLASS2 :
I40E_FCOE_FILTER_CTX_DESC_FC_CLASS3);
flags_rsvd_lanq |= ((u64)skb->queue_mapping <<
I40E_FCOE_FILTER_CTX_QW1_LANQINDX_SHIFT);
filter_desc->flags_rsvd_lanq = cpu_to_le64(flags_rsvd_lanq);
/* By this time, all offload related descriptors has been programmed */
tx_ring->next_to_use = i;
}
/**
* i40e_fcoe_invalidate_ddp - invalidates DDP in case of abort
* @tx_ring: transmit ring for this packet
* @skb: the packet associated w/ this DDP invalidation, i.e., ABTS
* @ddp: the SW DDP context for this DDP
*
* Programs the Tx context descriptor to do DDP invalidation.
**/
static void i40e_fcoe_invalidate_ddp(struct i40e_ring *tx_ring,
struct sk_buff *skb,
struct i40e_fcoe_ddp *ddp)
{
struct i40e_tx_context_desc *context_desc;
int i;
if (test_and_set_bit(__I40E_FCOE_DDP_ABORTED, &ddp->flags))
return;
i = tx_ring->next_to_use;
context_desc = I40E_TX_CTXTDESC(tx_ring, i);
i++;
if (i == tx_ring->count)
i = 0;
context_desc->tunneling_params = cpu_to_le32(0);
context_desc->l2tag2 = cpu_to_le16(0);
context_desc->rsvd = cpu_to_le16(0);
context_desc->type_cmd_tso_mss = cpu_to_le64(
I40E_TX_DESC_DTYPE_FCOE_CTX |
(I40E_FCOE_TX_CTX_DESC_OPCODE_DDP_CTX_INVL <<
I40E_TXD_CTX_QW1_CMD_SHIFT) |
(I40E_FCOE_TX_CTX_DESC_OPCODE_SINGLE_SEND <<
I40E_TXD_CTX_QW1_CMD_SHIFT));
tx_ring->next_to_use = i;
}
/**
* i40e_fcoe_handle_ddp - check we should setup or invalidate DDP
* @tx_ring: transmit ring for this packet
* @skb: the packet to be sent out
* @sof: the SOF to indicate class of service
*
* Determine if it is ABTS/READ/XFER_RDY, and finds out if there is
* a matching SW DDP context for this command. DDP is applicable
* only in case of READ if initiator or WRITE in case of
* responder (via checking XFER_RDY). In case this is an ABTS, send
* just invalidate the context.
**/
static void i40e_fcoe_handle_ddp(struct i40e_ring *tx_ring,
struct sk_buff *skb, u8 sof)
{
struct i40e_pf *pf = tx_ring->vsi->back;
struct i40e_fcoe *fcoe = &pf->fcoe;
struct fc_frame_header *fh;
struct i40e_fcoe_ddp *ddp;
u32 f_ctl;
u8 r_ctl;
u16 xid;
fh = (struct fc_frame_header *)skb_transport_header(skb);
f_ctl = ntoh24(fh->fh_f_ctl);
r_ctl = fh->fh_r_ctl;
ddp = NULL;
if ((r_ctl == FC_RCTL_DD_DATA_DESC) && (f_ctl & FC_FC_EX_CTX)) {
/* exchange responder? if so, XFER_RDY for write */
xid = ntohs(fh->fh_rx_id);
if (i40e_fcoe_xid_is_valid(xid)) {
ddp = &fcoe->ddp[xid];
if ((ddp->xid == xid) &&
(test_bit(__I40E_FCOE_DDP_TARGET, &ddp->flags)))
i40e_fcoe_program_ddp(tx_ring, skb, ddp, sof);
}
} else if (r_ctl == FC_RCTL_DD_UNSOL_CMD) {
/* exchange originator, check READ cmd */
xid = ntohs(fh->fh_ox_id);
if (i40e_fcoe_xid_is_valid(xid)) {
ddp = &fcoe->ddp[xid];
if ((ddp->xid == xid) &&
(!test_bit(__I40E_FCOE_DDP_TARGET, &ddp->flags)))
i40e_fcoe_program_ddp(tx_ring, skb, ddp, sof);
}
} else if (r_ctl == FC_RCTL_BA_ABTS) {
/* exchange originator, check ABTS */
xid = ntohs(fh->fh_ox_id);
if (i40e_fcoe_xid_is_valid(xid)) {
ddp = &fcoe->ddp[xid];
if ((ddp->xid == xid) &&
(!test_bit(__I40E_FCOE_DDP_TARGET, &ddp->flags)))
i40e_fcoe_invalidate_ddp(tx_ring, skb, ddp);
}
}
}
/**
* i40e_fcoe_tso - set up FCoE TSO
* @tx_ring: ring to send buffer on
* @skb: send buffer
* @tx_flags: collected send information
* @hdr_len: the tso header length
* @sof: the SOF to indicate class of service
*
* Note must already have sof checked to be either class 2 or class 3 before
* calling this function.
*
* Returns 1 to indicate sequence segmentation offload is properly setup
* or returns 0 to indicate no tso is needed, otherwise returns error
* code to drop the frame.
**/
static int i40e_fcoe_tso(struct i40e_ring *tx_ring,
struct sk_buff *skb,
u32 tx_flags, u8 *hdr_len, u8 sof)
{
struct i40e_tx_context_desc *context_desc;
u32 cd_type, cd_cmd, cd_tso_len, cd_mss;
struct fc_frame_header *fh;
u64 cd_type_cmd_tso_mss;
/* must match gso type as FCoE */
if (!skb_is_gso(skb))
return 0;
/* is it the expected gso type for FCoE ?*/
if (skb_shinfo(skb)->gso_type != SKB_GSO_FCOE) {
netdev_err(skb->dev,
"wrong gso type %d:expecting SKB_GSO_FCOE\n",
skb_shinfo(skb)->gso_type);
return -EINVAL;
}
/* header and trailer are inserted by hw */
*hdr_len = skb_transport_offset(skb) + sizeof(struct fc_frame_header) +
sizeof(struct fcoe_crc_eof);
/* check sof to decide a class 2 or 3 TSO */
if (likely(i40e_fcoe_sof_is_class3(sof)))
cd_cmd = I40E_FCOE_TX_CTX_DESC_OPCODE_TSO_FC_CLASS3;
else
cd_cmd = I40E_FCOE_TX_CTX_DESC_OPCODE_TSO_FC_CLASS2;
/* param field valid? */
fh = (struct fc_frame_header *)skb_transport_header(skb);
if (fh->fh_f_ctl[2] & FC_FC_REL_OFF)
cd_cmd |= I40E_FCOE_TX_CTX_DESC_RELOFF;
/* fill the field values */
cd_type = I40E_TX_DESC_DTYPE_FCOE_CTX;
cd_tso_len = skb->len - *hdr_len;
cd_mss = skb_shinfo(skb)->gso_size;
cd_type_cmd_tso_mss =
((u64)cd_type << I40E_TXD_CTX_QW1_DTYPE_SHIFT) |
((u64)cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
((u64)cd_tso_len << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
((u64)cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
/* grab the next descriptor */
context_desc = I40E_TX_CTXTDESC(tx_ring, tx_ring->next_to_use);
tx_ring->next_to_use++;
if (tx_ring->next_to_use == tx_ring->count)
tx_ring->next_to_use = 0;
context_desc->tunneling_params = 0;
context_desc->l2tag2 = cpu_to_le16((tx_flags & I40E_TX_FLAGS_VLAN_MASK)
>> I40E_TX_FLAGS_VLAN_SHIFT);
context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
return 1;
}
/**
* i40e_fcoe_tx_map - build the tx descriptor
* @tx_ring: ring to send buffer on
* @skb: send buffer
* @first: first buffer info buffer to use
* @tx_flags: collected send information
* @hdr_len: ptr to the size of the packet header
* @eof: the frame eof value
*
* Note, for FCoE, sof and eof are already checked
**/
static void i40e_fcoe_tx_map(struct i40e_ring *tx_ring,
struct sk_buff *skb,
struct i40e_tx_buffer *first,
u32 tx_flags, u8 hdr_len, u8 eof)
{
u32 td_offset = 0;
u32 td_cmd = 0;
u32 maclen;
/* insert CRC */
td_cmd = I40E_TX_DESC_CMD_ICRC;
/* setup MACLEN */
maclen = skb_network_offset(skb);
if (tx_flags & I40E_TX_FLAGS_SW_VLAN)
maclen += sizeof(struct vlan_hdr);
if (skb->protocol == htons(ETH_P_FCOE)) {
/* for FCoE, maclen should exclude ether type */
maclen -= 2;
/* setup type as FCoE and EOF insertion */
td_cmd |= (I40E_TX_DESC_CMD_FCOET | i40e_fcoe_ctxt_eof(eof));
/* setup FCoELEN and FCLEN */
td_offset |= ((((sizeof(struct fcoe_hdr) + 2) >> 2) <<
I40E_TX_DESC_LENGTH_IPLEN_SHIFT) |
((sizeof(struct fc_frame_header) >> 2) <<
I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT));
/* trim to exclude trailer */
pskb_trim(skb, skb->len - sizeof(struct fcoe_crc_eof));
}
/* MACLEN is ether header length in words not bytes */
td_offset |= (maclen >> 1) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
return i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
td_cmd, td_offset);
}
/**
* i40e_fcoe_set_skb_header - adjust skb header point for FIP/FCoE/FC
* @skb: the skb to be adjusted
*
* Returns true if this skb is a FCoE/FIP or VLAN carried FCoE/FIP and then
* adjusts the skb header pointers correspondingly. Otherwise, returns false.
**/
static inline int i40e_fcoe_set_skb_header(struct sk_buff *skb)
{
__be16 protocol = skb->protocol;
skb_reset_mac_header(skb);
skb->mac_len = sizeof(struct ethhdr);
if (protocol == htons(ETH_P_8021Q)) {
struct vlan_ethhdr *veth = (struct vlan_ethhdr *)eth_hdr(skb);
protocol = veth->h_vlan_encapsulated_proto;
skb->mac_len += sizeof(struct vlan_hdr);
}
/* FCoE or FIP only */
if ((protocol != htons(ETH_P_FIP)) &&
(protocol != htons(ETH_P_FCOE)))
return -EINVAL;
/* set header to L2 of FCoE/FIP */
skb_set_network_header(skb, skb->mac_len);
if (protocol == htons(ETH_P_FIP))
return 0;
/* set header to L3 of FC */
skb_set_transport_header(skb, skb->mac_len + sizeof(struct fcoe_hdr));
return 0;
}
/**
* i40e_fcoe_xmit_frame - transmit buffer
* @skb: send buffer
* @netdev: the fcoe netdev
*
* Returns 0 if sent, else an error code
**/
static netdev_tx_t i40e_fcoe_xmit_frame(struct sk_buff *skb,
struct net_device *netdev)
{
struct i40e_netdev_priv *np = netdev_priv(skb->dev);
struct i40e_vsi *vsi = np->vsi;
struct i40e_ring *tx_ring = vsi->tx_rings[skb->queue_mapping];
struct i40e_tx_buffer *first;
u32 tx_flags = 0;
u8 hdr_len = 0;
u8 sof = 0;
u8 eof = 0;
int fso;
if (i40e_fcoe_set_skb_header(skb))
goto out_drop;
if (!i40e_xmit_descriptor_count(skb, tx_ring))
return NETDEV_TX_BUSY;
/* prepare the xmit flags */
if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
goto out_drop;
/* record the location of the first descriptor for this packet */
first = &tx_ring->tx_bi[tx_ring->next_to_use];
/* FIP is a regular L2 traffic w/o offload */
if (skb->protocol == htons(ETH_P_FIP))
goto out_send;
/* check sof and eof, only supports FC Class 2 or 3 */
if (i40e_fcoe_fc_sof(skb, &sof) || i40e_fcoe_fc_eof(skb, &eof)) {
netdev_err(netdev, "SOF/EOF error:%02x - %02x\n", sof, eof);
goto out_drop;
}
/* always do FCCRC for FCoE */
tx_flags |= I40E_TX_FLAGS_FCCRC;
/* check we should do sequence offload */
fso = i40e_fcoe_tso(tx_ring, skb, tx_flags, &hdr_len, sof);
if (fso < 0)
goto out_drop;
else if (fso)
tx_flags |= I40E_TX_FLAGS_FSO;
else
i40e_fcoe_handle_ddp(tx_ring, skb, sof);
out_send:
/* send out the packet */
i40e_fcoe_tx_map(tx_ring, skb, first, tx_flags, hdr_len, eof);
i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
return NETDEV_TX_OK;
out_drop:
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
/**
* i40e_fcoe_change_mtu - NDO callback to change the Maximum Transfer Unit
* @netdev: network interface device structure
* @new_mtu: new value for maximum frame size
*
* Returns error as operation not permitted
*
**/
static int i40e_fcoe_change_mtu(struct net_device *netdev, int new_mtu)
{
netdev_warn(netdev, "MTU change is not supported on FCoE interfaces\n");
return -EPERM;
}
/**
* i40e_fcoe_set_features - set the netdev feature flags
* @netdev: ptr to the netdev being adjusted
* @features: the feature set that the stack is suggesting
*
**/
static int i40e_fcoe_set_features(struct net_device *netdev,
netdev_features_t features)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
if (features & NETIF_F_HW_VLAN_CTAG_RX)
i40e_vlan_stripping_enable(vsi);
else
i40e_vlan_stripping_disable(vsi);
return 0;
}
static const struct net_device_ops i40e_fcoe_netdev_ops = {
.ndo_open = i40e_open,
.ndo_stop = i40e_close,
.ndo_get_stats64 = i40e_get_netdev_stats_struct,
.ndo_set_rx_mode = i40e_set_rx_mode,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = i40e_set_mac,
.ndo_change_mtu = i40e_fcoe_change_mtu,
.ndo_do_ioctl = i40e_ioctl,
.ndo_tx_timeout = i40e_tx_timeout,
.ndo_vlan_rx_add_vid = i40e_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = i40e_vlan_rx_kill_vid,
.ndo_setup_tc = i40e_setup_tc,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = i40e_netpoll,
#endif
.ndo_start_xmit = i40e_fcoe_xmit_frame,
.ndo_fcoe_enable = i40e_fcoe_enable,
.ndo_fcoe_disable = i40e_fcoe_disable,
.ndo_fcoe_ddp_setup = i40e_fcoe_ddp_get,
.ndo_fcoe_ddp_done = i40e_fcoe_ddp_put,
.ndo_fcoe_ddp_target = i40e_fcoe_ddp_target,
.ndo_set_features = i40e_fcoe_set_features,
};
/**
* i40e_fcoe_config_netdev - prepares the VSI context for creating a FCoE VSI
* @vsi: pointer to the associated VSI struct
* @ctxt: pointer to the associated VSI context to be passed to HW
*
* Returns 0 on success or < 0 on error
**/
void i40e_fcoe_config_netdev(struct net_device *netdev, struct i40e_vsi *vsi)
{
struct i40e_hw *hw = &vsi->back->hw;
struct i40e_pf *pf = vsi->back;
if (vsi->type != I40E_VSI_FCOE)
return;
netdev->features = (NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_CTAG_FILTER);
netdev->vlan_features = netdev->features;
netdev->vlan_features &= ~(NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_CTAG_FILTER);
netdev->fcoe_ddp_xid = I40E_FCOE_DDP_MAX - 1;
netdev->features |= NETIF_F_ALL_FCOE;
netdev->vlan_features |= NETIF_F_ALL_FCOE;
netdev->hw_features |= netdev->features;
netdev->priv_flags |= IFF_UNICAST_FLT;
netdev->priv_flags |= IFF_SUPP_NOFCS;
strlcpy(netdev->name, "fcoe%d", IFNAMSIZ-1);
netdev->mtu = FCOE_MTU;
SET_NETDEV_DEV(netdev, &pf->pdev->dev);
i40e_add_filter(vsi, hw->mac.san_addr, 0, false, false);
i40e_add_filter(vsi, (u8[6]) FC_FCOE_FLOGI_MAC, 0, false, false);
i40e_add_filter(vsi, FIP_ALL_FCOE_MACS, 0, false, false);
i40e_add_filter(vsi, FIP_ALL_ENODE_MACS, 0, false, false);
i40e_add_filter(vsi, FIP_ALL_VN2VN_MACS, 0, false, false);
i40e_add_filter(vsi, FIP_ALL_P2P_MACS, 0, false, false);
/* use san mac */
ether_addr_copy(netdev->dev_addr, hw->mac.san_addr);
ether_addr_copy(netdev->perm_addr, hw->mac.san_addr);
/* fcoe netdev ops */
netdev->netdev_ops = &i40e_fcoe_netdev_ops;
}
/**
* i40e_fcoe_vsi_setup - allocate and set up FCoE VSI
* @pf: the pf that VSI is associated with
*
**/
void i40e_fcoe_vsi_setup(struct i40e_pf *pf)
{
struct i40e_vsi *vsi;
u16 seid;
int i;
if (!(pf->flags & I40E_FLAG_FCOE_ENABLED))
return;
BUG_ON(!pf->vsi[pf->lan_vsi]);
for (i = 0; i < pf->num_alloc_vsi; i++) {
vsi = pf->vsi[i];
if (vsi && vsi->type == I40E_VSI_FCOE) {
dev_warn(&pf->pdev->dev,
"FCoE VSI already created\n");
return;
}
}
seid = pf->vsi[pf->lan_vsi]->seid;
vsi = i40e_vsi_setup(pf, I40E_VSI_FCOE, seid, 0);
if (vsi) {
dev_dbg(&pf->pdev->dev,
"Successfully created FCoE VSI seid %d id %d uplink_seid %d pf seid %d\n",
vsi->seid, vsi->id, vsi->uplink_seid, seid);
} else {
dev_info(&pf->pdev->dev, "Failed to create FCoE VSI\n");
}
}
| gpl-2.0 |
suesai/linux | drivers/mtd/ubi/fastmap-wl.c | 664 | 8696 | /*
* Copyright (c) 2012 Linutronix GmbH
* Copyright (c) 2014 sigma star gmbh
* Author: Richard Weinberger <richard@nod.at>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
* the GNU General Public License for more details.
*
*/
/**
* update_fastmap_work_fn - calls ubi_update_fastmap from a work queue
* @wrk: the work description object
*/
static void update_fastmap_work_fn(struct work_struct *wrk)
{
struct ubi_device *ubi = container_of(wrk, struct ubi_device, fm_work);
ubi_update_fastmap(ubi);
spin_lock(&ubi->wl_lock);
ubi->fm_work_scheduled = 0;
spin_unlock(&ubi->wl_lock);
}
/**
* find_anchor_wl_entry - find wear-leveling entry to used as anchor PEB.
* @root: the RB-tree where to look for
*/
static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root)
{
struct rb_node *p;
struct ubi_wl_entry *e, *victim = NULL;
int max_ec = UBI_MAX_ERASECOUNTER;
ubi_rb_for_each_entry(p, e, root, u.rb) {
if (e->pnum < UBI_FM_MAX_START && e->ec < max_ec) {
victim = e;
max_ec = e->ec;
}
}
return victim;
}
/**
* return_unused_pool_pebs - returns unused PEB to the free tree.
* @ubi: UBI device description object
* @pool: fastmap pool description object
*/
static void return_unused_pool_pebs(struct ubi_device *ubi,
struct ubi_fm_pool *pool)
{
int i;
struct ubi_wl_entry *e;
for (i = pool->used; i < pool->size; i++) {
e = ubi->lookuptbl[pool->pebs[i]];
wl_tree_add(e, &ubi->free);
ubi->free_count++;
}
}
static int anchor_pebs_avalible(struct rb_root *root)
{
struct rb_node *p;
struct ubi_wl_entry *e;
ubi_rb_for_each_entry(p, e, root, u.rb)
if (e->pnum < UBI_FM_MAX_START)
return 1;
return 0;
}
/**
* ubi_wl_get_fm_peb - find a physical erase block with a given maximal number.
* @ubi: UBI device description object
* @anchor: This PEB will be used as anchor PEB by fastmap
*
* The function returns a physical erase block with a given maximal number
* and removes it from the wl subsystem.
* Must be called with wl_lock held!
*/
struct ubi_wl_entry *ubi_wl_get_fm_peb(struct ubi_device *ubi, int anchor)
{
struct ubi_wl_entry *e = NULL;
if (!ubi->free.rb_node || (ubi->free_count - ubi->beb_rsvd_pebs < 1))
goto out;
if (anchor)
e = find_anchor_wl_entry(&ubi->free);
else
e = find_mean_wl_entry(ubi, &ubi->free);
if (!e)
goto out;
self_check_in_wl_tree(ubi, e, &ubi->free);
/* remove it from the free list,
* the wl subsystem does no longer know this erase block */
rb_erase(&e->u.rb, &ubi->free);
ubi->free_count--;
out:
return e;
}
/**
* ubi_refill_pools - refills all fastmap PEB pools.
* @ubi: UBI device description object
*/
void ubi_refill_pools(struct ubi_device *ubi)
{
struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
struct ubi_fm_pool *pool = &ubi->fm_pool;
struct ubi_wl_entry *e;
int enough;
spin_lock(&ubi->wl_lock);
return_unused_pool_pebs(ubi, wl_pool);
return_unused_pool_pebs(ubi, pool);
wl_pool->size = 0;
pool->size = 0;
for (;;) {
enough = 0;
if (pool->size < pool->max_size) {
if (!ubi->free.rb_node)
break;
e = wl_get_wle(ubi);
if (!e)
break;
pool->pebs[pool->size] = e->pnum;
pool->size++;
} else
enough++;
if (wl_pool->size < wl_pool->max_size) {
if (!ubi->free.rb_node ||
(ubi->free_count - ubi->beb_rsvd_pebs < 5))
break;
e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
self_check_in_wl_tree(ubi, e, &ubi->free);
rb_erase(&e->u.rb, &ubi->free);
ubi->free_count--;
wl_pool->pebs[wl_pool->size] = e->pnum;
wl_pool->size++;
} else
enough++;
if (enough == 2)
break;
}
wl_pool->used = 0;
pool->used = 0;
spin_unlock(&ubi->wl_lock);
}
/**
* ubi_wl_get_peb - get a physical eraseblock.
* @ubi: UBI device description object
*
* This function returns a physical eraseblock in case of success and a
* negative error code in case of failure.
* Returns with ubi->fm_eba_sem held in read mode!
*/
int ubi_wl_get_peb(struct ubi_device *ubi)
{
int ret, retried = 0;
struct ubi_fm_pool *pool = &ubi->fm_pool;
struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
again:
down_read(&ubi->fm_eba_sem);
spin_lock(&ubi->wl_lock);
/* We check here also for the WL pool because at this point we can
* refill the WL pool synchronous. */
if (pool->used == pool->size || wl_pool->used == wl_pool->size) {
spin_unlock(&ubi->wl_lock);
up_read(&ubi->fm_eba_sem);
ret = ubi_update_fastmap(ubi);
if (ret) {
ubi_msg(ubi, "Unable to write a new fastmap: %i", ret);
down_read(&ubi->fm_eba_sem);
return -ENOSPC;
}
down_read(&ubi->fm_eba_sem);
spin_lock(&ubi->wl_lock);
}
if (pool->used == pool->size) {
spin_unlock(&ubi->wl_lock);
if (retried) {
ubi_err(ubi, "Unable to get a free PEB from user WL pool");
ret = -ENOSPC;
goto out;
}
retried = 1;
up_read(&ubi->fm_eba_sem);
goto again;
}
ubi_assert(pool->used < pool->size);
ret = pool->pebs[pool->used++];
prot_queue_add(ubi, ubi->lookuptbl[ret]);
spin_unlock(&ubi->wl_lock);
out:
return ret;
}
/* get_peb_for_wl - returns a PEB to be used internally by the WL sub-system.
*
* @ubi: UBI device description object
*/
static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
{
struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
int pnum;
if (pool->used == pool->size) {
/* We cannot update the fastmap here because this
* function is called in atomic context.
* Let's fail here and refill/update it as soon as possible. */
if (!ubi->fm_work_scheduled) {
ubi->fm_work_scheduled = 1;
schedule_work(&ubi->fm_work);
}
return NULL;
}
pnum = pool->pebs[pool->used++];
return ubi->lookuptbl[pnum];
}
/**
* ubi_ensure_anchor_pebs - schedule wear-leveling to produce an anchor PEB.
* @ubi: UBI device description object
*/
int ubi_ensure_anchor_pebs(struct ubi_device *ubi)
{
struct ubi_work *wrk;
spin_lock(&ubi->wl_lock);
if (ubi->wl_scheduled) {
spin_unlock(&ubi->wl_lock);
return 0;
}
ubi->wl_scheduled = 1;
spin_unlock(&ubi->wl_lock);
wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
if (!wrk) {
spin_lock(&ubi->wl_lock);
ubi->wl_scheduled = 0;
spin_unlock(&ubi->wl_lock);
return -ENOMEM;
}
wrk->anchor = 1;
wrk->func = &wear_leveling_worker;
schedule_ubi_work(ubi, wrk);
return 0;
}
/**
* ubi_wl_put_fm_peb - returns a PEB used in a fastmap to the wear-leveling
* sub-system.
* see: ubi_wl_put_peb()
*
* @ubi: UBI device description object
* @fm_e: physical eraseblock to return
* @lnum: the last used logical eraseblock number for the PEB
* @torture: if this physical eraseblock has to be tortured
*/
int ubi_wl_put_fm_peb(struct ubi_device *ubi, struct ubi_wl_entry *fm_e,
int lnum, int torture)
{
struct ubi_wl_entry *e;
int vol_id, pnum = fm_e->pnum;
dbg_wl("PEB %d", pnum);
ubi_assert(pnum >= 0);
ubi_assert(pnum < ubi->peb_count);
spin_lock(&ubi->wl_lock);
e = ubi->lookuptbl[pnum];
/* This can happen if we recovered from a fastmap the very
* first time and writing now a new one. In this case the wl system
* has never seen any PEB used by the original fastmap.
*/
if (!e) {
e = fm_e;
ubi_assert(e->ec >= 0);
ubi->lookuptbl[pnum] = e;
}
spin_unlock(&ubi->wl_lock);
vol_id = lnum ? UBI_FM_DATA_VOLUME_ID : UBI_FM_SB_VOLUME_ID;
return schedule_erase(ubi, e, vol_id, lnum, torture);
}
/**
* ubi_is_erase_work - checks whether a work is erase work.
* @wrk: The work object to be checked
*/
int ubi_is_erase_work(struct ubi_work *wrk)
{
return wrk->func == erase_worker;
}
static void ubi_fastmap_close(struct ubi_device *ubi)
{
int i;
flush_work(&ubi->fm_work);
return_unused_pool_pebs(ubi, &ubi->fm_pool);
return_unused_pool_pebs(ubi, &ubi->fm_wl_pool);
if (ubi->fm) {
for (i = 0; i < ubi->fm->used_blocks; i++)
kfree(ubi->fm->e[i]);
}
kfree(ubi->fm);
}
/**
* may_reserve_for_fm - tests whether a PEB shall be reserved for fastmap.
* See find_mean_wl_entry()
*
* @ubi: UBI device description object
* @e: physical eraseblock to return
* @root: RB tree to test against.
*/
static struct ubi_wl_entry *may_reserve_for_fm(struct ubi_device *ubi,
struct ubi_wl_entry *e,
struct rb_root *root) {
if (e && !ubi->fm_disabled && !ubi->fm &&
e->pnum < UBI_FM_MAX_START)
e = rb_entry(rb_next(root->rb_node),
struct ubi_wl_entry, u.rb);
return e;
}
| gpl-2.0 |
xhjcehust/virtio_net_driver | net/tipc/sysctl.c | 664 | 2361 | /*
* net/tipc/sysctl.c: sysctl interface to TIPC subsystem
*
* Copyright (c) 2013, Wind River Systems
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the names of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "core.h"
#include <linux/sysctl.h>
static struct ctl_table_header *tipc_ctl_hdr;
static struct ctl_table tipc_table[] = {
{
.procname = "tipc_rmem",
.data = &sysctl_tipc_rmem,
.maxlen = sizeof(sysctl_tipc_rmem),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{}
};
int tipc_register_sysctl(void)
{
tipc_ctl_hdr = register_net_sysctl(&init_net, "net/tipc", tipc_table);
if (tipc_ctl_hdr == NULL)
return -ENOMEM;
return 0;
}
void tipc_unregister_sysctl(void)
{
unregister_net_sysctl_table(tipc_ctl_hdr);
}
| gpl-2.0 |
TheTypoMaster/linux | security/selinux/ss/conditional.c | 920 | 14159 | /* Authors: Karl MacMillan <kmacmillan@tresys.com>
* Frank Mayer <mayerf@tresys.com>
*
* Copyright (C) 2003 - 2004 Tresys Technology, LLC
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, version 2.
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include "security.h"
#include "conditional.h"
/*
* cond_evaluate_expr evaluates a conditional expr
* in reverse polish notation. It returns true (1), false (0),
* or undefined (-1). Undefined occurs when the expression
* exceeds the stack depth of COND_EXPR_MAXDEPTH.
*/
static int cond_evaluate_expr(struct policydb *p, struct cond_expr *expr)
{
struct cond_expr *cur;
int s[COND_EXPR_MAXDEPTH];
int sp = -1;
for (cur = expr; cur; cur = cur->next) {
switch (cur->expr_type) {
case COND_BOOL:
if (sp == (COND_EXPR_MAXDEPTH - 1))
return -1;
sp++;
s[sp] = p->bool_val_to_struct[cur->bool - 1]->state;
break;
case COND_NOT:
if (sp < 0)
return -1;
s[sp] = !s[sp];
break;
case COND_OR:
if (sp < 1)
return -1;
sp--;
s[sp] |= s[sp + 1];
break;
case COND_AND:
if (sp < 1)
return -1;
sp--;
s[sp] &= s[sp + 1];
break;
case COND_XOR:
if (sp < 1)
return -1;
sp--;
s[sp] ^= s[sp + 1];
break;
case COND_EQ:
if (sp < 1)
return -1;
sp--;
s[sp] = (s[sp] == s[sp + 1]);
break;
case COND_NEQ:
if (sp < 1)
return -1;
sp--;
s[sp] = (s[sp] != s[sp + 1]);
break;
default:
return -1;
}
}
return s[0];
}
/*
* evaluate_cond_node evaluates the conditional stored in
* a struct cond_node and if the result is different than the
* current state of the node it sets the rules in the true/false
* list appropriately. If the result of the expression is undefined
* all of the rules are disabled for safety.
*/
int evaluate_cond_node(struct policydb *p, struct cond_node *node)
{
int new_state;
struct cond_av_list *cur;
new_state = cond_evaluate_expr(p, node->expr);
if (new_state != node->cur_state) {
node->cur_state = new_state;
if (new_state == -1)
printk(KERN_ERR "SELinux: expression result was undefined - disabling all rules.\n");
/* turn the rules on or off */
for (cur = node->true_list; cur; cur = cur->next) {
if (new_state <= 0)
cur->node->key.specified &= ~AVTAB_ENABLED;
else
cur->node->key.specified |= AVTAB_ENABLED;
}
for (cur = node->false_list; cur; cur = cur->next) {
/* -1 or 1 */
if (new_state)
cur->node->key.specified &= ~AVTAB_ENABLED;
else
cur->node->key.specified |= AVTAB_ENABLED;
}
}
return 0;
}
int cond_policydb_init(struct policydb *p)
{
int rc;
p->bool_val_to_struct = NULL;
p->cond_list = NULL;
rc = avtab_init(&p->te_cond_avtab);
if (rc)
return rc;
return 0;
}
static void cond_av_list_destroy(struct cond_av_list *list)
{
struct cond_av_list *cur, *next;
for (cur = list; cur; cur = next) {
next = cur->next;
/* the avtab_ptr_t node is destroy by the avtab */
kfree(cur);
}
}
static void cond_node_destroy(struct cond_node *node)
{
struct cond_expr *cur_expr, *next_expr;
for (cur_expr = node->expr; cur_expr; cur_expr = next_expr) {
next_expr = cur_expr->next;
kfree(cur_expr);
}
cond_av_list_destroy(node->true_list);
cond_av_list_destroy(node->false_list);
kfree(node);
}
static void cond_list_destroy(struct cond_node *list)
{
struct cond_node *next, *cur;
if (list == NULL)
return;
for (cur = list; cur; cur = next) {
next = cur->next;
cond_node_destroy(cur);
}
}
void cond_policydb_destroy(struct policydb *p)
{
kfree(p->bool_val_to_struct);
avtab_destroy(&p->te_cond_avtab);
cond_list_destroy(p->cond_list);
}
int cond_init_bool_indexes(struct policydb *p)
{
kfree(p->bool_val_to_struct);
p->bool_val_to_struct =
kmalloc(p->p_bools.nprim * sizeof(struct cond_bool_datum *), GFP_KERNEL);
if (!p->bool_val_to_struct)
return -ENOMEM;
return 0;
}
int cond_destroy_bool(void *key, void *datum, void *p)
{
kfree(key);
kfree(datum);
return 0;
}
int cond_index_bool(void *key, void *datum, void *datap)
{
struct policydb *p;
struct cond_bool_datum *booldatum;
struct flex_array *fa;
booldatum = datum;
p = datap;
if (!booldatum->value || booldatum->value > p->p_bools.nprim)
return -EINVAL;
fa = p->sym_val_to_name[SYM_BOOLS];
if (flex_array_put_ptr(fa, booldatum->value - 1, key,
GFP_KERNEL | __GFP_ZERO))
BUG();
p->bool_val_to_struct[booldatum->value - 1] = booldatum;
return 0;
}
static int bool_isvalid(struct cond_bool_datum *b)
{
if (!(b->state == 0 || b->state == 1))
return 0;
return 1;
}
int cond_read_bool(struct policydb *p, struct hashtab *h, void *fp)
{
char *key = NULL;
struct cond_bool_datum *booldatum;
__le32 buf[3];
u32 len;
int rc;
booldatum = kzalloc(sizeof(struct cond_bool_datum), GFP_KERNEL);
if (!booldatum)
return -ENOMEM;
rc = next_entry(buf, fp, sizeof buf);
if (rc)
goto err;
booldatum->value = le32_to_cpu(buf[0]);
booldatum->state = le32_to_cpu(buf[1]);
rc = -EINVAL;
if (!bool_isvalid(booldatum))
goto err;
len = le32_to_cpu(buf[2]);
rc = -ENOMEM;
key = kmalloc(len + 1, GFP_KERNEL);
if (!key)
goto err;
rc = next_entry(key, fp, len);
if (rc)
goto err;
key[len] = '\0';
rc = hashtab_insert(h, key, booldatum);
if (rc)
goto err;
return 0;
err:
cond_destroy_bool(key, booldatum, NULL);
return rc;
}
struct cond_insertf_data {
struct policydb *p;
struct cond_av_list *other;
struct cond_av_list *head;
struct cond_av_list *tail;
};
static int cond_insertf(struct avtab *a, struct avtab_key *k, struct avtab_datum *d, void *ptr)
{
struct cond_insertf_data *data = ptr;
struct policydb *p = data->p;
struct cond_av_list *other = data->other, *list, *cur;
struct avtab_node *node_ptr;
u8 found;
int rc = -EINVAL;
/*
* For type rules we have to make certain there aren't any
* conflicting rules by searching the te_avtab and the
* cond_te_avtab.
*/
if (k->specified & AVTAB_TYPE) {
if (avtab_search(&p->te_avtab, k)) {
printk(KERN_ERR "SELinux: type rule already exists outside of a conditional.\n");
goto err;
}
/*
* If we are reading the false list other will be a pointer to
* the true list. We can have duplicate entries if there is only
* 1 other entry and it is in our true list.
*
* If we are reading the true list (other == NULL) there shouldn't
* be any other entries.
*/
if (other) {
node_ptr = avtab_search_node(&p->te_cond_avtab, k);
if (node_ptr) {
if (avtab_search_node_next(node_ptr, k->specified)) {
printk(KERN_ERR "SELinux: too many conflicting type rules.\n");
goto err;
}
found = 0;
for (cur = other; cur; cur = cur->next) {
if (cur->node == node_ptr) {
found = 1;
break;
}
}
if (!found) {
printk(KERN_ERR "SELinux: conflicting type rules.\n");
goto err;
}
}
} else {
if (avtab_search(&p->te_cond_avtab, k)) {
printk(KERN_ERR "SELinux: conflicting type rules when adding type rule for true.\n");
goto err;
}
}
}
node_ptr = avtab_insert_nonunique(&p->te_cond_avtab, k, d);
if (!node_ptr) {
printk(KERN_ERR "SELinux: could not insert rule.\n");
rc = -ENOMEM;
goto err;
}
list = kzalloc(sizeof(struct cond_av_list), GFP_KERNEL);
if (!list) {
rc = -ENOMEM;
goto err;
}
list->node = node_ptr;
if (!data->head)
data->head = list;
else
data->tail->next = list;
data->tail = list;
return 0;
err:
cond_av_list_destroy(data->head);
data->head = NULL;
return rc;
}
static int cond_read_av_list(struct policydb *p, void *fp, struct cond_av_list **ret_list, struct cond_av_list *other)
{
int i, rc;
__le32 buf[1];
u32 len;
struct cond_insertf_data data;
*ret_list = NULL;
len = 0;
rc = next_entry(buf, fp, sizeof(u32));
if (rc)
return rc;
len = le32_to_cpu(buf[0]);
if (len == 0)
return 0;
data.p = p;
data.other = other;
data.head = NULL;
data.tail = NULL;
for (i = 0; i < len; i++) {
rc = avtab_read_item(&p->te_cond_avtab, fp, p, cond_insertf,
&data);
if (rc)
return rc;
}
*ret_list = data.head;
return 0;
}
static int expr_isvalid(struct policydb *p, struct cond_expr *expr)
{
if (expr->expr_type <= 0 || expr->expr_type > COND_LAST) {
printk(KERN_ERR "SELinux: conditional expressions uses unknown operator.\n");
return 0;
}
if (expr->bool > p->p_bools.nprim) {
printk(KERN_ERR "SELinux: conditional expressions uses unknown bool.\n");
return 0;
}
return 1;
}
static int cond_read_node(struct policydb *p, struct cond_node *node, void *fp)
{
__le32 buf[2];
u32 len, i;
int rc;
struct cond_expr *expr = NULL, *last = NULL;
rc = next_entry(buf, fp, sizeof(u32) * 2);
if (rc)
goto err;
node->cur_state = le32_to_cpu(buf[0]);
/* expr */
len = le32_to_cpu(buf[1]);
for (i = 0; i < len; i++) {
rc = next_entry(buf, fp, sizeof(u32) * 2);
if (rc)
goto err;
rc = -ENOMEM;
expr = kzalloc(sizeof(struct cond_expr), GFP_KERNEL);
if (!expr)
goto err;
expr->expr_type = le32_to_cpu(buf[0]);
expr->bool = le32_to_cpu(buf[1]);
if (!expr_isvalid(p, expr)) {
rc = -EINVAL;
kfree(expr);
goto err;
}
if (i == 0)
node->expr = expr;
else
last->next = expr;
last = expr;
}
rc = cond_read_av_list(p, fp, &node->true_list, NULL);
if (rc)
goto err;
rc = cond_read_av_list(p, fp, &node->false_list, node->true_list);
if (rc)
goto err;
return 0;
err:
cond_node_destroy(node);
return rc;
}
int cond_read_list(struct policydb *p, void *fp)
{
struct cond_node *node, *last = NULL;
__le32 buf[1];
u32 i, len;
int rc;
rc = next_entry(buf, fp, sizeof buf);
if (rc)
return rc;
len = le32_to_cpu(buf[0]);
rc = avtab_alloc(&(p->te_cond_avtab), p->te_avtab.nel);
if (rc)
goto err;
for (i = 0; i < len; i++) {
rc = -ENOMEM;
node = kzalloc(sizeof(struct cond_node), GFP_KERNEL);
if (!node)
goto err;
rc = cond_read_node(p, node, fp);
if (rc)
goto err;
if (i == 0)
p->cond_list = node;
else
last->next = node;
last = node;
}
return 0;
err:
cond_list_destroy(p->cond_list);
p->cond_list = NULL;
return rc;
}
int cond_write_bool(void *vkey, void *datum, void *ptr)
{
char *key = vkey;
struct cond_bool_datum *booldatum = datum;
struct policy_data *pd = ptr;
void *fp = pd->fp;
__le32 buf[3];
u32 len;
int rc;
len = strlen(key);
buf[0] = cpu_to_le32(booldatum->value);
buf[1] = cpu_to_le32(booldatum->state);
buf[2] = cpu_to_le32(len);
rc = put_entry(buf, sizeof(u32), 3, fp);
if (rc)
return rc;
rc = put_entry(key, 1, len, fp);
if (rc)
return rc;
return 0;
}
/*
* cond_write_cond_av_list doesn't write out the av_list nodes.
* Instead it writes out the key/value pairs from the avtab. This
* is necessary because there is no way to uniquely identifying rules
* in the avtab so it is not possible to associate individual rules
* in the avtab with a conditional without saving them as part of
* the conditional. This means that the avtab with the conditional
* rules will not be saved but will be rebuilt on policy load.
*/
static int cond_write_av_list(struct policydb *p,
struct cond_av_list *list, struct policy_file *fp)
{
__le32 buf[1];
struct cond_av_list *cur_list;
u32 len;
int rc;
len = 0;
for (cur_list = list; cur_list != NULL; cur_list = cur_list->next)
len++;
buf[0] = cpu_to_le32(len);
rc = put_entry(buf, sizeof(u32), 1, fp);
if (rc)
return rc;
if (len == 0)
return 0;
for (cur_list = list; cur_list != NULL; cur_list = cur_list->next) {
rc = avtab_write_item(p, cur_list->node, fp);
if (rc)
return rc;
}
return 0;
}
static int cond_write_node(struct policydb *p, struct cond_node *node,
struct policy_file *fp)
{
struct cond_expr *cur_expr;
__le32 buf[2];
int rc;
u32 len = 0;
buf[0] = cpu_to_le32(node->cur_state);
rc = put_entry(buf, sizeof(u32), 1, fp);
if (rc)
return rc;
for (cur_expr = node->expr; cur_expr != NULL; cur_expr = cur_expr->next)
len++;
buf[0] = cpu_to_le32(len);
rc = put_entry(buf, sizeof(u32), 1, fp);
if (rc)
return rc;
for (cur_expr = node->expr; cur_expr != NULL; cur_expr = cur_expr->next) {
buf[0] = cpu_to_le32(cur_expr->expr_type);
buf[1] = cpu_to_le32(cur_expr->bool);
rc = put_entry(buf, sizeof(u32), 2, fp);
if (rc)
return rc;
}
rc = cond_write_av_list(p, node->true_list, fp);
if (rc)
return rc;
rc = cond_write_av_list(p, node->false_list, fp);
if (rc)
return rc;
return 0;
}
int cond_write_list(struct policydb *p, struct cond_node *list, void *fp)
{
struct cond_node *cur;
u32 len;
__le32 buf[1];
int rc;
len = 0;
for (cur = list; cur != NULL; cur = cur->next)
len++;
buf[0] = cpu_to_le32(len);
rc = put_entry(buf, sizeof(u32), 1, fp);
if (rc)
return rc;
for (cur = list; cur != NULL; cur = cur->next) {
rc = cond_write_node(p, cur, fp);
if (rc)
return rc;
}
return 0;
}
/* Determine whether additional permissions are granted by the conditional
* av table, and if so, add them to the result
*/
void cond_compute_av(struct avtab *ctab, struct avtab_key *key, struct av_decision *avd)
{
struct avtab_node *node;
if (!ctab || !key || !avd)
return;
for (node = avtab_search_node(ctab, key); node;
node = avtab_search_node_next(node, key->specified)) {
if ((u16)(AVTAB_ALLOWED|AVTAB_ENABLED) ==
(node->key.specified & (AVTAB_ALLOWED|AVTAB_ENABLED)))
avd->allowed |= node->datum.data;
if ((u16)(AVTAB_AUDITDENY|AVTAB_ENABLED) ==
(node->key.specified & (AVTAB_AUDITDENY|AVTAB_ENABLED)))
/* Since a '0' in an auditdeny mask represents a
* permission we do NOT want to audit (dontaudit), we use
* the '&' operand to ensure that all '0's in the mask
* are retained (much unlike the allow and auditallow cases).
*/
avd->auditdeny &= node->datum.data;
if ((u16)(AVTAB_AUDITALLOW|AVTAB_ENABLED) ==
(node->key.specified & (AVTAB_AUDITALLOW|AVTAB_ENABLED)))
avd->auditallow |= node->datum.data;
}
return;
}
| gpl-2.0 |
ARMP/bproj-black | drivers/acpi/acpica/utresrc.c | 2968 | 16620 | /*******************************************************************************
*
* Module Name: utresrc - Resource management utilities
*
******************************************************************************/
/*
* Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* substantially similar to the "NO WARRANTY" disclaimer below
* ("Disclaimer") and any redistribution must be conditioned upon
* including a substantially similar Disclaimer requirement for further
* binary redistribution.
* 3. Neither the names of the above-listed copyright holders nor the names
* of any contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES.
*/
#include <acpi/acpi.h>
#include "accommon.h"
#include "amlresrc.h"
#define _COMPONENT ACPI_UTILITIES
ACPI_MODULE_NAME("utresrc")
#if defined(ACPI_DISASSEMBLER) || defined (ACPI_DEBUGGER)
/*
* Strings used to decode resource descriptors.
* Used by both the disassembler and the debugger resource dump routines
*/
const char *acpi_gbl_bm_decode[] = {
"NotBusMaster",
"BusMaster"
};
const char *acpi_gbl_config_decode[] = {
"0 - Good Configuration",
"1 - Acceptable Configuration",
"2 - Suboptimal Configuration",
"3 - ***Invalid Configuration***",
};
const char *acpi_gbl_consume_decode[] = {
"ResourceProducer",
"ResourceConsumer"
};
const char *acpi_gbl_dec_decode[] = {
"PosDecode",
"SubDecode"
};
const char *acpi_gbl_he_decode[] = {
"Level",
"Edge"
};
const char *acpi_gbl_io_decode[] = {
"Decode10",
"Decode16"
};
const char *acpi_gbl_ll_decode[] = {
"ActiveHigh",
"ActiveLow"
};
const char *acpi_gbl_max_decode[] = {
"MaxNotFixed",
"MaxFixed"
};
const char *acpi_gbl_mem_decode[] = {
"NonCacheable",
"Cacheable",
"WriteCombining",
"Prefetchable"
};
const char *acpi_gbl_min_decode[] = {
"MinNotFixed",
"MinFixed"
};
const char *acpi_gbl_mtp_decode[] = {
"AddressRangeMemory",
"AddressRangeReserved",
"AddressRangeACPI",
"AddressRangeNVS"
};
const char *acpi_gbl_rng_decode[] = {
"InvalidRanges",
"NonISAOnlyRanges",
"ISAOnlyRanges",
"EntireRange"
};
const char *acpi_gbl_rw_decode[] = {
"ReadOnly",
"ReadWrite"
};
const char *acpi_gbl_shr_decode[] = {
"Exclusive",
"Shared"
};
const char *acpi_gbl_siz_decode[] = {
"Transfer8",
"Transfer8_16",
"Transfer16",
"InvalidSize"
};
const char *acpi_gbl_trs_decode[] = {
"DenseTranslation",
"SparseTranslation"
};
const char *acpi_gbl_ttp_decode[] = {
"TypeStatic",
"TypeTranslation"
};
const char *acpi_gbl_typ_decode[] = {
"Compatibility",
"TypeA",
"TypeB",
"TypeF"
};
#endif
/*
* Base sizes of the raw AML resource descriptors, indexed by resource type.
* Zero indicates a reserved (and therefore invalid) resource type.
*/
const u8 acpi_gbl_resource_aml_sizes[] = {
/* Small descriptors */
0,
0,
0,
0,
ACPI_AML_SIZE_SMALL(struct aml_resource_irq),
ACPI_AML_SIZE_SMALL(struct aml_resource_dma),
ACPI_AML_SIZE_SMALL(struct aml_resource_start_dependent),
ACPI_AML_SIZE_SMALL(struct aml_resource_end_dependent),
ACPI_AML_SIZE_SMALL(struct aml_resource_io),
ACPI_AML_SIZE_SMALL(struct aml_resource_fixed_io),
0,
0,
0,
0,
ACPI_AML_SIZE_SMALL(struct aml_resource_vendor_small),
ACPI_AML_SIZE_SMALL(struct aml_resource_end_tag),
/* Large descriptors */
0,
ACPI_AML_SIZE_LARGE(struct aml_resource_memory24),
ACPI_AML_SIZE_LARGE(struct aml_resource_generic_register),
0,
ACPI_AML_SIZE_LARGE(struct aml_resource_vendor_large),
ACPI_AML_SIZE_LARGE(struct aml_resource_memory32),
ACPI_AML_SIZE_LARGE(struct aml_resource_fixed_memory32),
ACPI_AML_SIZE_LARGE(struct aml_resource_address32),
ACPI_AML_SIZE_LARGE(struct aml_resource_address16),
ACPI_AML_SIZE_LARGE(struct aml_resource_extended_irq),
ACPI_AML_SIZE_LARGE(struct aml_resource_address64),
ACPI_AML_SIZE_LARGE(struct aml_resource_extended_address64)
};
/*
* Resource types, used to validate the resource length field.
* The length of fixed-length types must match exactly, variable
* lengths must meet the minimum required length, etc.
* Zero indicates a reserved (and therefore invalid) resource type.
*/
static const u8 acpi_gbl_resource_types[] = {
/* Small descriptors */
0,
0,
0,
0,
ACPI_SMALL_VARIABLE_LENGTH,
ACPI_FIXED_LENGTH,
ACPI_SMALL_VARIABLE_LENGTH,
ACPI_FIXED_LENGTH,
ACPI_FIXED_LENGTH,
ACPI_FIXED_LENGTH,
0,
0,
0,
0,
ACPI_VARIABLE_LENGTH,
ACPI_FIXED_LENGTH,
/* Large descriptors */
0,
ACPI_FIXED_LENGTH,
ACPI_FIXED_LENGTH,
0,
ACPI_VARIABLE_LENGTH,
ACPI_FIXED_LENGTH,
ACPI_FIXED_LENGTH,
ACPI_VARIABLE_LENGTH,
ACPI_VARIABLE_LENGTH,
ACPI_VARIABLE_LENGTH,
ACPI_VARIABLE_LENGTH,
ACPI_FIXED_LENGTH
};
/*******************************************************************************
*
* FUNCTION: acpi_ut_walk_aml_resources
*
* PARAMETERS: Aml - Pointer to the raw AML resource template
* aml_length - Length of the entire template
* user_function - Called once for each descriptor found. If
* NULL, a pointer to the end_tag is returned
* Context - Passed to user_function
*
* RETURN: Status
*
* DESCRIPTION: Walk a raw AML resource list(buffer). User function called
* once for each resource found.
*
******************************************************************************/
acpi_status
acpi_ut_walk_aml_resources(u8 * aml,
acpi_size aml_length,
acpi_walk_aml_callback user_function, void **context)
{
acpi_status status;
u8 *end_aml;
u8 resource_index;
u32 length;
u32 offset = 0;
ACPI_FUNCTION_TRACE(ut_walk_aml_resources);
/* The absolute minimum resource template is one end_tag descriptor */
if (aml_length < sizeof(struct aml_resource_end_tag)) {
return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG);
}
/* Point to the end of the resource template buffer */
end_aml = aml + aml_length;
/* Walk the byte list, abort on any invalid descriptor type or length */
while (aml < end_aml) {
/* Validate the Resource Type and Resource Length */
status = acpi_ut_validate_resource(aml, &resource_index);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
/* Get the length of this descriptor */
length = acpi_ut_get_descriptor_length(aml);
/* Invoke the user function */
if (user_function) {
status =
user_function(aml, length, offset, resource_index,
context);
if (ACPI_FAILURE(status)) {
return (status);
}
}
/* An end_tag descriptor terminates this resource template */
if (acpi_ut_get_resource_type(aml) ==
ACPI_RESOURCE_NAME_END_TAG) {
/*
* There must be at least one more byte in the buffer for
* the 2nd byte of the end_tag
*/
if ((aml + 1) >= end_aml) {
return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG);
}
/* Return the pointer to the end_tag if requested */
if (!user_function) {
*context = aml;
}
/* Normal exit */
return_ACPI_STATUS(AE_OK);
}
aml += length;
offset += length;
}
/* Did not find an end_tag descriptor */
return (AE_AML_NO_RESOURCE_END_TAG);
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_validate_resource
*
* PARAMETERS: Aml - Pointer to the raw AML resource descriptor
* return_index - Where the resource index is returned. NULL
* if the index is not required.
*
* RETURN: Status, and optionally the Index into the global resource tables
*
* DESCRIPTION: Validate an AML resource descriptor by checking the Resource
* Type and Resource Length. Returns an index into the global
* resource information/dispatch tables for later use.
*
******************************************************************************/
acpi_status acpi_ut_validate_resource(void *aml, u8 * return_index)
{
u8 resource_type;
u8 resource_index;
acpi_rs_length resource_length;
acpi_rs_length minimum_resource_length;
ACPI_FUNCTION_ENTRY();
/*
* 1) Validate the resource_type field (Byte 0)
*/
resource_type = ACPI_GET8(aml);
/*
* Byte 0 contains the descriptor name (Resource Type)
* Examine the large/small bit in the resource header
*/
if (resource_type & ACPI_RESOURCE_NAME_LARGE) {
/* Verify the large resource type (name) against the max */
if (resource_type > ACPI_RESOURCE_NAME_LARGE_MAX) {
return (AE_AML_INVALID_RESOURCE_TYPE);
}
/*
* Large Resource Type -- bits 6:0 contain the name
* Translate range 0x80-0x8B to index range 0x10-0x1B
*/
resource_index = (u8) (resource_type - 0x70);
} else {
/*
* Small Resource Type -- bits 6:3 contain the name
* Shift range to index range 0x00-0x0F
*/
resource_index = (u8)
((resource_type & ACPI_RESOURCE_NAME_SMALL_MASK) >> 3);
}
/* Check validity of the resource type, zero indicates name is invalid */
if (!acpi_gbl_resource_types[resource_index]) {
return (AE_AML_INVALID_RESOURCE_TYPE);
}
/*
* 2) Validate the resource_length field. This ensures that the length
* is at least reasonable, and guarantees that it is non-zero.
*/
resource_length = acpi_ut_get_resource_length(aml);
minimum_resource_length = acpi_gbl_resource_aml_sizes[resource_index];
/* Validate based upon the type of resource - fixed length or variable */
switch (acpi_gbl_resource_types[resource_index]) {
case ACPI_FIXED_LENGTH:
/* Fixed length resource, length must match exactly */
if (resource_length != minimum_resource_length) {
return (AE_AML_BAD_RESOURCE_LENGTH);
}
break;
case ACPI_VARIABLE_LENGTH:
/* Variable length resource, length must be at least the minimum */
if (resource_length < minimum_resource_length) {
return (AE_AML_BAD_RESOURCE_LENGTH);
}
break;
case ACPI_SMALL_VARIABLE_LENGTH:
/* Small variable length resource, length can be (Min) or (Min-1) */
if ((resource_length > minimum_resource_length) ||
(resource_length < (minimum_resource_length - 1))) {
return (AE_AML_BAD_RESOURCE_LENGTH);
}
break;
default:
/* Shouldn't happen (because of validation earlier), but be sure */
return (AE_AML_INVALID_RESOURCE_TYPE);
}
/* Optionally return the resource table index */
if (return_index) {
*return_index = resource_index;
}
return (AE_OK);
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_get_resource_type
*
* PARAMETERS: Aml - Pointer to the raw AML resource descriptor
*
* RETURN: The Resource Type with no extraneous bits (except the
* Large/Small descriptor bit -- this is left alone)
*
* DESCRIPTION: Extract the Resource Type/Name from the first byte of
* a resource descriptor.
*
******************************************************************************/
u8 acpi_ut_get_resource_type(void *aml)
{
ACPI_FUNCTION_ENTRY();
/*
* Byte 0 contains the descriptor name (Resource Type)
* Examine the large/small bit in the resource header
*/
if (ACPI_GET8(aml) & ACPI_RESOURCE_NAME_LARGE) {
/* Large Resource Type -- bits 6:0 contain the name */
return (ACPI_GET8(aml));
} else {
/* Small Resource Type -- bits 6:3 contain the name */
return ((u8) (ACPI_GET8(aml) & ACPI_RESOURCE_NAME_SMALL_MASK));
}
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_get_resource_length
*
* PARAMETERS: Aml - Pointer to the raw AML resource descriptor
*
* RETURN: Byte Length
*
* DESCRIPTION: Get the "Resource Length" of a raw AML descriptor. By
* definition, this does not include the size of the descriptor
* header or the length field itself.
*
******************************************************************************/
u16 acpi_ut_get_resource_length(void *aml)
{
acpi_rs_length resource_length;
ACPI_FUNCTION_ENTRY();
/*
* Byte 0 contains the descriptor name (Resource Type)
* Examine the large/small bit in the resource header
*/
if (ACPI_GET8(aml) & ACPI_RESOURCE_NAME_LARGE) {
/* Large Resource type -- bytes 1-2 contain the 16-bit length */
ACPI_MOVE_16_TO_16(&resource_length, ACPI_ADD_PTR(u8, aml, 1));
} else {
/* Small Resource type -- bits 2:0 of byte 0 contain the length */
resource_length = (u16) (ACPI_GET8(aml) &
ACPI_RESOURCE_NAME_SMALL_LENGTH_MASK);
}
return (resource_length);
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_get_resource_header_length
*
* PARAMETERS: Aml - Pointer to the raw AML resource descriptor
*
* RETURN: Length of the AML header (depends on large/small descriptor)
*
* DESCRIPTION: Get the length of the header for this resource.
*
******************************************************************************/
u8 acpi_ut_get_resource_header_length(void *aml)
{
ACPI_FUNCTION_ENTRY();
/* Examine the large/small bit in the resource header */
if (ACPI_GET8(aml) & ACPI_RESOURCE_NAME_LARGE) {
return (sizeof(struct aml_resource_large_header));
} else {
return (sizeof(struct aml_resource_small_header));
}
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_get_descriptor_length
*
* PARAMETERS: Aml - Pointer to the raw AML resource descriptor
*
* RETURN: Byte length
*
* DESCRIPTION: Get the total byte length of a raw AML descriptor, including the
* length of the descriptor header and the length field itself.
* Used to walk descriptor lists.
*
******************************************************************************/
u32 acpi_ut_get_descriptor_length(void *aml)
{
ACPI_FUNCTION_ENTRY();
/*
* Get the Resource Length (does not include header length) and add
* the header length (depends on if this is a small or large resource)
*/
return (acpi_ut_get_resource_length(aml) +
acpi_ut_get_resource_header_length(aml));
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_get_resource_end_tag
*
* PARAMETERS: obj_desc - The resource template buffer object
* end_tag - Where the pointer to the end_tag is returned
*
* RETURN: Status, pointer to the end tag
*
* DESCRIPTION: Find the end_tag resource descriptor in an AML resource template
* Note: allows a buffer length of zero.
*
******************************************************************************/
acpi_status
acpi_ut_get_resource_end_tag(union acpi_operand_object * obj_desc,
u8 ** end_tag)
{
acpi_status status;
ACPI_FUNCTION_TRACE(ut_get_resource_end_tag);
/* Allow a buffer length of zero */
if (!obj_desc->buffer.length) {
*end_tag = obj_desc->buffer.pointer;
return_ACPI_STATUS(AE_OK);
}
/* Validate the template and get a pointer to the end_tag */
status = acpi_ut_walk_aml_resources(obj_desc->buffer.pointer,
obj_desc->buffer.length, NULL,
(void **)end_tag);
return_ACPI_STATUS(status);
}
| gpl-2.0 |
Redmi-dev/android_kernel_xiaomi_msm8226 | arch/powerpc/kvm/book3s_64_mmu_hv.c | 3480 | 27022 | /*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, version 2, as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
*/
#include <linux/types.h>
#include <linux/string.h>
#include <linux/kvm.h>
#include <linux/kvm_host.h>
#include <linux/highmem.h>
#include <linux/gfp.h>
#include <linux/slab.h>
#include <linux/hugetlb.h>
#include <linux/vmalloc.h>
#include <asm/tlbflush.h>
#include <asm/kvm_ppc.h>
#include <asm/kvm_book3s.h>
#include <asm/mmu-hash64.h>
#include <asm/hvcall.h>
#include <asm/synch.h>
#include <asm/ppc-opcode.h>
#include <asm/cputable.h>
/* POWER7 has 10-bit LPIDs, PPC970 has 6-bit LPIDs */
#define MAX_LPID_970 63
#define NR_LPIDS (LPID_RSVD + 1)
unsigned long lpid_inuse[BITS_TO_LONGS(NR_LPIDS)];
long kvmppc_alloc_hpt(struct kvm *kvm)
{
unsigned long hpt;
unsigned long lpid;
struct revmap_entry *rev;
struct kvmppc_linear_info *li;
/* Allocate guest's hashed page table */
li = kvm_alloc_hpt();
if (li) {
/* using preallocated memory */
hpt = (ulong)li->base_virt;
kvm->arch.hpt_li = li;
} else {
/* using dynamic memory */
hpt = __get_free_pages(GFP_KERNEL|__GFP_ZERO|__GFP_REPEAT|
__GFP_NOWARN, HPT_ORDER - PAGE_SHIFT);
}
if (!hpt) {
pr_err("kvm_alloc_hpt: Couldn't alloc HPT\n");
return -ENOMEM;
}
kvm->arch.hpt_virt = hpt;
/* Allocate reverse map array */
rev = vmalloc(sizeof(struct revmap_entry) * HPT_NPTE);
if (!rev) {
pr_err("kvmppc_alloc_hpt: Couldn't alloc reverse map array\n");
goto out_freehpt;
}
kvm->arch.revmap = rev;
/* Allocate the guest's logical partition ID */
do {
lpid = find_first_zero_bit(lpid_inuse, NR_LPIDS);
if (lpid >= NR_LPIDS) {
pr_err("kvm_alloc_hpt: No LPIDs free\n");
goto out_freeboth;
}
} while (test_and_set_bit(lpid, lpid_inuse));
kvm->arch.sdr1 = __pa(hpt) | (HPT_ORDER - 18);
kvm->arch.lpid = lpid;
pr_info("KVM guest htab at %lx, LPID %lx\n", hpt, lpid);
return 0;
out_freeboth:
vfree(rev);
out_freehpt:
free_pages(hpt, HPT_ORDER - PAGE_SHIFT);
return -ENOMEM;
}
void kvmppc_free_hpt(struct kvm *kvm)
{
clear_bit(kvm->arch.lpid, lpid_inuse);
vfree(kvm->arch.revmap);
if (kvm->arch.hpt_li)
kvm_release_hpt(kvm->arch.hpt_li);
else
free_pages(kvm->arch.hpt_virt, HPT_ORDER - PAGE_SHIFT);
}
/* Bits in first HPTE dword for pagesize 4k, 64k or 16M */
static inline unsigned long hpte0_pgsize_encoding(unsigned long pgsize)
{
return (pgsize > 0x1000) ? HPTE_V_LARGE : 0;
}
/* Bits in second HPTE dword for pagesize 4k, 64k or 16M */
static inline unsigned long hpte1_pgsize_encoding(unsigned long pgsize)
{
return (pgsize == 0x10000) ? 0x1000 : 0;
}
void kvmppc_map_vrma(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot,
unsigned long porder)
{
unsigned long i;
unsigned long npages;
unsigned long hp_v, hp_r;
unsigned long addr, hash;
unsigned long psize;
unsigned long hp0, hp1;
long ret;
psize = 1ul << porder;
npages = memslot->npages >> (porder - PAGE_SHIFT);
/* VRMA can't be > 1TB */
if (npages > 1ul << (40 - porder))
npages = 1ul << (40 - porder);
/* Can't use more than 1 HPTE per HPTEG */
if (npages > HPT_NPTEG)
npages = HPT_NPTEG;
hp0 = HPTE_V_1TB_SEG | (VRMA_VSID << (40 - 16)) |
HPTE_V_BOLTED | hpte0_pgsize_encoding(psize);
hp1 = hpte1_pgsize_encoding(psize) |
HPTE_R_R | HPTE_R_C | HPTE_R_M | PP_RWXX;
for (i = 0; i < npages; ++i) {
addr = i << porder;
/* can't use hpt_hash since va > 64 bits */
hash = (i ^ (VRMA_VSID ^ (VRMA_VSID << 25))) & HPT_HASH_MASK;
/*
* We assume that the hash table is empty and no
* vcpus are using it at this stage. Since we create
* at most one HPTE per HPTEG, we just assume entry 7
* is available and use it.
*/
hash = (hash << 3) + 7;
hp_v = hp0 | ((addr >> 16) & ~0x7fUL);
hp_r = hp1 | addr;
ret = kvmppc_virtmode_h_enter(vcpu, H_EXACT, hash, hp_v, hp_r);
if (ret != H_SUCCESS) {
pr_err("KVM: map_vrma at %lx failed, ret=%ld\n",
addr, ret);
break;
}
}
}
int kvmppc_mmu_hv_init(void)
{
unsigned long host_lpid, rsvd_lpid;
if (!cpu_has_feature(CPU_FTR_HVMODE))
return -EINVAL;
memset(lpid_inuse, 0, sizeof(lpid_inuse));
if (cpu_has_feature(CPU_FTR_ARCH_206)) {
host_lpid = mfspr(SPRN_LPID); /* POWER7 */
rsvd_lpid = LPID_RSVD;
} else {
host_lpid = 0; /* PPC970 */
rsvd_lpid = MAX_LPID_970;
}
set_bit(host_lpid, lpid_inuse);
/* rsvd_lpid is reserved for use in partition switching */
set_bit(rsvd_lpid, lpid_inuse);
return 0;
}
void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
{
}
static void kvmppc_mmu_book3s_64_hv_reset_msr(struct kvm_vcpu *vcpu)
{
kvmppc_set_msr(vcpu, MSR_SF | MSR_ME);
}
/*
* This is called to get a reference to a guest page if there isn't
* one already in the kvm->arch.slot_phys[][] arrays.
*/
static long kvmppc_get_guest_page(struct kvm *kvm, unsigned long gfn,
struct kvm_memory_slot *memslot,
unsigned long psize)
{
unsigned long start;
long np, err;
struct page *page, *hpage, *pages[1];
unsigned long s, pgsize;
unsigned long *physp;
unsigned int is_io, got, pgorder;
struct vm_area_struct *vma;
unsigned long pfn, i, npages;
physp = kvm->arch.slot_phys[memslot->id];
if (!physp)
return -EINVAL;
if (physp[gfn - memslot->base_gfn])
return 0;
is_io = 0;
got = 0;
page = NULL;
pgsize = psize;
err = -EINVAL;
start = gfn_to_hva_memslot(memslot, gfn);
/* Instantiate and get the page we want access to */
np = get_user_pages_fast(start, 1, 1, pages);
if (np != 1) {
/* Look up the vma for the page */
down_read(¤t->mm->mmap_sem);
vma = find_vma(current->mm, start);
if (!vma || vma->vm_start > start ||
start + psize > vma->vm_end ||
!(vma->vm_flags & VM_PFNMAP))
goto up_err;
is_io = hpte_cache_bits(pgprot_val(vma->vm_page_prot));
pfn = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
/* check alignment of pfn vs. requested page size */
if (psize > PAGE_SIZE && (pfn & ((psize >> PAGE_SHIFT) - 1)))
goto up_err;
up_read(¤t->mm->mmap_sem);
} else {
page = pages[0];
got = KVMPPC_GOT_PAGE;
/* See if this is a large page */
s = PAGE_SIZE;
if (PageHuge(page)) {
hpage = compound_head(page);
s <<= compound_order(hpage);
/* Get the whole large page if slot alignment is ok */
if (s > psize && slot_is_aligned(memslot, s) &&
!(memslot->userspace_addr & (s - 1))) {
start &= ~(s - 1);
pgsize = s;
get_page(hpage);
put_page(page);
page = hpage;
}
}
if (s < psize)
goto out;
pfn = page_to_pfn(page);
}
npages = pgsize >> PAGE_SHIFT;
pgorder = __ilog2(npages);
physp += (gfn - memslot->base_gfn) & ~(npages - 1);
spin_lock(&kvm->arch.slot_phys_lock);
for (i = 0; i < npages; ++i) {
if (!physp[i]) {
physp[i] = ((pfn + i) << PAGE_SHIFT) +
got + is_io + pgorder;
got = 0;
}
}
spin_unlock(&kvm->arch.slot_phys_lock);
err = 0;
out:
if (got)
put_page(page);
return err;
up_err:
up_read(¤t->mm->mmap_sem);
return err;
}
/*
* We come here on a H_ENTER call from the guest when we are not
* using mmu notifiers and we don't have the requested page pinned
* already.
*/
long kvmppc_virtmode_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
long pte_index, unsigned long pteh, unsigned long ptel)
{
struct kvm *kvm = vcpu->kvm;
unsigned long psize, gpa, gfn;
struct kvm_memory_slot *memslot;
long ret;
if (kvm->arch.using_mmu_notifiers)
goto do_insert;
psize = hpte_page_size(pteh, ptel);
if (!psize)
return H_PARAMETER;
pteh &= ~(HPTE_V_HVLOCK | HPTE_V_ABSENT | HPTE_V_VALID);
/* Find the memslot (if any) for this address */
gpa = (ptel & HPTE_R_RPN) & ~(psize - 1);
gfn = gpa >> PAGE_SHIFT;
memslot = gfn_to_memslot(kvm, gfn);
if (memslot && !(memslot->flags & KVM_MEMSLOT_INVALID)) {
if (!slot_is_aligned(memslot, psize))
return H_PARAMETER;
if (kvmppc_get_guest_page(kvm, gfn, memslot, psize) < 0)
return H_PARAMETER;
}
do_insert:
/* Protect linux PTE lookup from page table destruction */
rcu_read_lock_sched(); /* this disables preemption too */
vcpu->arch.pgdir = current->mm->pgd;
ret = kvmppc_h_enter(vcpu, flags, pte_index, pteh, ptel);
rcu_read_unlock_sched();
if (ret == H_TOO_HARD) {
/* this can't happen */
pr_err("KVM: Oops, kvmppc_h_enter returned too hard!\n");
ret = H_RESOURCE; /* or something */
}
return ret;
}
static struct kvmppc_slb *kvmppc_mmu_book3s_hv_find_slbe(struct kvm_vcpu *vcpu,
gva_t eaddr)
{
u64 mask;
int i;
for (i = 0; i < vcpu->arch.slb_nr; i++) {
if (!(vcpu->arch.slb[i].orige & SLB_ESID_V))
continue;
if (vcpu->arch.slb[i].origv & SLB_VSID_B_1T)
mask = ESID_MASK_1T;
else
mask = ESID_MASK;
if (((vcpu->arch.slb[i].orige ^ eaddr) & mask) == 0)
return &vcpu->arch.slb[i];
}
return NULL;
}
static unsigned long kvmppc_mmu_get_real_addr(unsigned long v, unsigned long r,
unsigned long ea)
{
unsigned long ra_mask;
ra_mask = hpte_page_size(v, r) - 1;
return (r & HPTE_R_RPN & ~ra_mask) | (ea & ra_mask);
}
static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
struct kvmppc_pte *gpte, bool data)
{
struct kvm *kvm = vcpu->kvm;
struct kvmppc_slb *slbe;
unsigned long slb_v;
unsigned long pp, key;
unsigned long v, gr;
unsigned long *hptep;
int index;
int virtmode = vcpu->arch.shregs.msr & (data ? MSR_DR : MSR_IR);
/* Get SLB entry */
if (virtmode) {
slbe = kvmppc_mmu_book3s_hv_find_slbe(vcpu, eaddr);
if (!slbe)
return -EINVAL;
slb_v = slbe->origv;
} else {
/* real mode access */
slb_v = vcpu->kvm->arch.vrma_slb_v;
}
/* Find the HPTE in the hash table */
index = kvmppc_hv_find_lock_hpte(kvm, eaddr, slb_v,
HPTE_V_VALID | HPTE_V_ABSENT);
if (index < 0)
return -ENOENT;
hptep = (unsigned long *)(kvm->arch.hpt_virt + (index << 4));
v = hptep[0] & ~HPTE_V_HVLOCK;
gr = kvm->arch.revmap[index].guest_rpte;
/* Unlock the HPTE */
asm volatile("lwsync" : : : "memory");
hptep[0] = v;
gpte->eaddr = eaddr;
gpte->vpage = ((v & HPTE_V_AVPN) << 4) | ((eaddr >> 12) & 0xfff);
/* Get PP bits and key for permission check */
pp = gr & (HPTE_R_PP0 | HPTE_R_PP);
key = (vcpu->arch.shregs.msr & MSR_PR) ? SLB_VSID_KP : SLB_VSID_KS;
key &= slb_v;
/* Calculate permissions */
gpte->may_read = hpte_read_permission(pp, key);
gpte->may_write = hpte_write_permission(pp, key);
gpte->may_execute = gpte->may_read && !(gr & (HPTE_R_N | HPTE_R_G));
/* Storage key permission check for POWER7 */
if (data && virtmode && cpu_has_feature(CPU_FTR_ARCH_206)) {
int amrfield = hpte_get_skey_perm(gr, vcpu->arch.amr);
if (amrfield & 1)
gpte->may_read = 0;
if (amrfield & 2)
gpte->may_write = 0;
}
/* Get the guest physical address */
gpte->raddr = kvmppc_mmu_get_real_addr(v, gr, eaddr);
return 0;
}
/*
* Quick test for whether an instruction is a load or a store.
* If the instruction is a load or a store, then this will indicate
* which it is, at least on server processors. (Embedded processors
* have some external PID instructions that don't follow the rule
* embodied here.) If the instruction isn't a load or store, then
* this doesn't return anything useful.
*/
static int instruction_is_store(unsigned int instr)
{
unsigned int mask;
mask = 0x10000000;
if ((instr & 0xfc000000) == 0x7c000000)
mask = 0x100; /* major opcode 31 */
return (instr & mask) != 0;
}
static int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned long gpa, int is_store)
{
int ret;
u32 last_inst;
unsigned long srr0 = kvmppc_get_pc(vcpu);
/* We try to load the last instruction. We don't let
* emulate_instruction do it as it doesn't check what
* kvmppc_ld returns.
* If we fail, we just return to the guest and try executing it again.
*/
if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED) {
ret = kvmppc_ld(vcpu, &srr0, sizeof(u32), &last_inst, false);
if (ret != EMULATE_DONE || last_inst == KVM_INST_FETCH_FAILED)
return RESUME_GUEST;
vcpu->arch.last_inst = last_inst;
}
/*
* WARNING: We do not know for sure whether the instruction we just
* read from memory is the same that caused the fault in the first
* place. If the instruction we read is neither an load or a store,
* then it can't access memory, so we don't need to worry about
* enforcing access permissions. So, assuming it is a load or
* store, we just check that its direction (load or store) is
* consistent with the original fault, since that's what we
* checked the access permissions against. If there is a mismatch
* we just return and retry the instruction.
*/
if (instruction_is_store(vcpu->arch.last_inst) != !!is_store)
return RESUME_GUEST;
/*
* Emulated accesses are emulated by looking at the hash for
* translation once, then performing the access later. The
* translation could be invalidated in the meantime in which
* point performing the subsequent memory access on the old
* physical address could possibly be a security hole for the
* guest (but not the host).
*
* This is less of an issue for MMIO stores since they aren't
* globally visible. It could be an issue for MMIO loads to
* a certain extent but we'll ignore it for now.
*/
vcpu->arch.paddr_accessed = gpa;
return kvmppc_emulate_mmio(run, vcpu);
}
int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned long ea, unsigned long dsisr)
{
struct kvm *kvm = vcpu->kvm;
unsigned long *hptep, hpte[3], r;
unsigned long mmu_seq, psize, pte_size;
unsigned long gfn, hva, pfn;
struct kvm_memory_slot *memslot;
unsigned long *rmap;
struct revmap_entry *rev;
struct page *page, *pages[1];
long index, ret, npages;
unsigned long is_io;
unsigned int writing, write_ok;
struct vm_area_struct *vma;
unsigned long rcbits;
/*
* Real-mode code has already searched the HPT and found the
* entry we're interested in. Lock the entry and check that
* it hasn't changed. If it has, just return and re-execute the
* instruction.
*/
if (ea != vcpu->arch.pgfault_addr)
return RESUME_GUEST;
index = vcpu->arch.pgfault_index;
hptep = (unsigned long *)(kvm->arch.hpt_virt + (index << 4));
rev = &kvm->arch.revmap[index];
preempt_disable();
while (!try_lock_hpte(hptep, HPTE_V_HVLOCK))
cpu_relax();
hpte[0] = hptep[0] & ~HPTE_V_HVLOCK;
hpte[1] = hptep[1];
hpte[2] = r = rev->guest_rpte;
asm volatile("lwsync" : : : "memory");
hptep[0] = hpte[0];
preempt_enable();
if (hpte[0] != vcpu->arch.pgfault_hpte[0] ||
hpte[1] != vcpu->arch.pgfault_hpte[1])
return RESUME_GUEST;
/* Translate the logical address and get the page */
psize = hpte_page_size(hpte[0], r);
gfn = hpte_rpn(r, psize);
memslot = gfn_to_memslot(kvm, gfn);
/* No memslot means it's an emulated MMIO region */
if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) {
unsigned long gpa = (gfn << PAGE_SHIFT) | (ea & (psize - 1));
return kvmppc_hv_emulate_mmio(run, vcpu, gpa,
dsisr & DSISR_ISSTORE);
}
if (!kvm->arch.using_mmu_notifiers)
return -EFAULT; /* should never get here */
/* used to check for invalidations in progress */
mmu_seq = kvm->mmu_notifier_seq;
smp_rmb();
is_io = 0;
pfn = 0;
page = NULL;
pte_size = PAGE_SIZE;
writing = (dsisr & DSISR_ISSTORE) != 0;
/* If writing != 0, then the HPTE must allow writing, if we get here */
write_ok = writing;
hva = gfn_to_hva_memslot(memslot, gfn);
npages = get_user_pages_fast(hva, 1, writing, pages);
if (npages < 1) {
/* Check if it's an I/O mapping */
down_read(¤t->mm->mmap_sem);
vma = find_vma(current->mm, hva);
if (vma && vma->vm_start <= hva && hva + psize <= vma->vm_end &&
(vma->vm_flags & VM_PFNMAP)) {
pfn = vma->vm_pgoff +
((hva - vma->vm_start) >> PAGE_SHIFT);
pte_size = psize;
is_io = hpte_cache_bits(pgprot_val(vma->vm_page_prot));
write_ok = vma->vm_flags & VM_WRITE;
}
up_read(¤t->mm->mmap_sem);
if (!pfn)
return -EFAULT;
} else {
page = pages[0];
if (PageHuge(page)) {
page = compound_head(page);
pte_size <<= compound_order(page);
}
/* if the guest wants write access, see if that is OK */
if (!writing && hpte_is_writable(r)) {
pte_t *ptep, pte;
/*
* We need to protect against page table destruction
* while looking up and updating the pte.
*/
rcu_read_lock_sched();
ptep = find_linux_pte_or_hugepte(current->mm->pgd,
hva, NULL);
if (ptep && pte_present(*ptep)) {
pte = kvmppc_read_update_linux_pte(ptep, 1);
if (pte_write(pte))
write_ok = 1;
}
rcu_read_unlock_sched();
}
pfn = page_to_pfn(page);
}
ret = -EFAULT;
if (psize > pte_size)
goto out_put;
/* Check WIMG vs. the actual page we're accessing */
if (!hpte_cache_flags_ok(r, is_io)) {
if (is_io)
return -EFAULT;
/*
* Allow guest to map emulated device memory as
* uncacheable, but actually make it cacheable.
*/
r = (r & ~(HPTE_R_W|HPTE_R_I|HPTE_R_G)) | HPTE_R_M;
}
/* Set the HPTE to point to pfn */
r = (r & ~(HPTE_R_PP0 - pte_size)) | (pfn << PAGE_SHIFT);
if (hpte_is_writable(r) && !write_ok)
r = hpte_make_readonly(r);
ret = RESUME_GUEST;
preempt_disable();
while (!try_lock_hpte(hptep, HPTE_V_HVLOCK))
cpu_relax();
if ((hptep[0] & ~HPTE_V_HVLOCK) != hpte[0] || hptep[1] != hpte[1] ||
rev->guest_rpte != hpte[2])
/* HPTE has been changed under us; let the guest retry */
goto out_unlock;
hpte[0] = (hpte[0] & ~HPTE_V_ABSENT) | HPTE_V_VALID;
rmap = &memslot->rmap[gfn - memslot->base_gfn];
lock_rmap(rmap);
/* Check if we might have been invalidated; let the guest retry if so */
ret = RESUME_GUEST;
if (mmu_notifier_retry(vcpu, mmu_seq)) {
unlock_rmap(rmap);
goto out_unlock;
}
/* Only set R/C in real HPTE if set in both *rmap and guest_rpte */
rcbits = *rmap >> KVMPPC_RMAP_RC_SHIFT;
r &= rcbits | ~(HPTE_R_R | HPTE_R_C);
if (hptep[0] & HPTE_V_VALID) {
/* HPTE was previously valid, so we need to invalidate it */
unlock_rmap(rmap);
hptep[0] |= HPTE_V_ABSENT;
kvmppc_invalidate_hpte(kvm, hptep, index);
/* don't lose previous R and C bits */
r |= hptep[1] & (HPTE_R_R | HPTE_R_C);
} else {
kvmppc_add_revmap_chain(kvm, rev, rmap, index, 0);
}
hptep[1] = r;
eieio();
hptep[0] = hpte[0];
asm volatile("ptesync" : : : "memory");
preempt_enable();
if (page && hpte_is_writable(r))
SetPageDirty(page);
out_put:
if (page) {
/*
* We drop pages[0] here, not page because page might
* have been set to the head page of a compound, but
* we have to drop the reference on the correct tail
* page to match the get inside gup()
*/
put_page(pages[0]);
}
return ret;
out_unlock:
hptep[0] &= ~HPTE_V_HVLOCK;
preempt_enable();
goto out_put;
}
static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
int (*handler)(struct kvm *kvm, unsigned long *rmapp,
unsigned long gfn))
{
int ret;
int retval = 0;
struct kvm_memslots *slots;
struct kvm_memory_slot *memslot;
slots = kvm_memslots(kvm);
kvm_for_each_memslot(memslot, slots) {
unsigned long start = memslot->userspace_addr;
unsigned long end;
end = start + (memslot->npages << PAGE_SHIFT);
if (hva >= start && hva < end) {
gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT;
ret = handler(kvm, &memslot->rmap[gfn_offset],
memslot->base_gfn + gfn_offset);
retval |= ret;
}
}
return retval;
}
static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
unsigned long gfn)
{
struct revmap_entry *rev = kvm->arch.revmap;
unsigned long h, i, j;
unsigned long *hptep;
unsigned long ptel, psize, rcbits;
for (;;) {
lock_rmap(rmapp);
if (!(*rmapp & KVMPPC_RMAP_PRESENT)) {
unlock_rmap(rmapp);
break;
}
/*
* To avoid an ABBA deadlock with the HPTE lock bit,
* we can't spin on the HPTE lock while holding the
* rmap chain lock.
*/
i = *rmapp & KVMPPC_RMAP_INDEX;
hptep = (unsigned long *) (kvm->arch.hpt_virt + (i << 4));
if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) {
/* unlock rmap before spinning on the HPTE lock */
unlock_rmap(rmapp);
while (hptep[0] & HPTE_V_HVLOCK)
cpu_relax();
continue;
}
j = rev[i].forw;
if (j == i) {
/* chain is now empty */
*rmapp &= ~(KVMPPC_RMAP_PRESENT | KVMPPC_RMAP_INDEX);
} else {
/* remove i from chain */
h = rev[i].back;
rev[h].forw = j;
rev[j].back = h;
rev[i].forw = rev[i].back = i;
*rmapp = (*rmapp & ~KVMPPC_RMAP_INDEX) | j;
}
/* Now check and modify the HPTE */
ptel = rev[i].guest_rpte;
psize = hpte_page_size(hptep[0], ptel);
if ((hptep[0] & HPTE_V_VALID) &&
hpte_rpn(ptel, psize) == gfn) {
hptep[0] |= HPTE_V_ABSENT;
kvmppc_invalidate_hpte(kvm, hptep, i);
/* Harvest R and C */
rcbits = hptep[1] & (HPTE_R_R | HPTE_R_C);
*rmapp |= rcbits << KVMPPC_RMAP_RC_SHIFT;
rev[i].guest_rpte = ptel | rcbits;
}
unlock_rmap(rmapp);
hptep[0] &= ~HPTE_V_HVLOCK;
}
return 0;
}
int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
{
if (kvm->arch.using_mmu_notifiers)
kvm_handle_hva(kvm, hva, kvm_unmap_rmapp);
return 0;
}
static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
unsigned long gfn)
{
struct revmap_entry *rev = kvm->arch.revmap;
unsigned long head, i, j;
unsigned long *hptep;
int ret = 0;
retry:
lock_rmap(rmapp);
if (*rmapp & KVMPPC_RMAP_REFERENCED) {
*rmapp &= ~KVMPPC_RMAP_REFERENCED;
ret = 1;
}
if (!(*rmapp & KVMPPC_RMAP_PRESENT)) {
unlock_rmap(rmapp);
return ret;
}
i = head = *rmapp & KVMPPC_RMAP_INDEX;
do {
hptep = (unsigned long *) (kvm->arch.hpt_virt + (i << 4));
j = rev[i].forw;
/* If this HPTE isn't referenced, ignore it */
if (!(hptep[1] & HPTE_R_R))
continue;
if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) {
/* unlock rmap before spinning on the HPTE lock */
unlock_rmap(rmapp);
while (hptep[0] & HPTE_V_HVLOCK)
cpu_relax();
goto retry;
}
/* Now check and modify the HPTE */
if ((hptep[0] & HPTE_V_VALID) && (hptep[1] & HPTE_R_R)) {
kvmppc_clear_ref_hpte(kvm, hptep, i);
rev[i].guest_rpte |= HPTE_R_R;
ret = 1;
}
hptep[0] &= ~HPTE_V_HVLOCK;
} while ((i = j) != head);
unlock_rmap(rmapp);
return ret;
}
int kvm_age_hva(struct kvm *kvm, unsigned long hva)
{
if (!kvm->arch.using_mmu_notifiers)
return 0;
return kvm_handle_hva(kvm, hva, kvm_age_rmapp);
}
static int kvm_test_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
unsigned long gfn)
{
struct revmap_entry *rev = kvm->arch.revmap;
unsigned long head, i, j;
unsigned long *hp;
int ret = 1;
if (*rmapp & KVMPPC_RMAP_REFERENCED)
return 1;
lock_rmap(rmapp);
if (*rmapp & KVMPPC_RMAP_REFERENCED)
goto out;
if (*rmapp & KVMPPC_RMAP_PRESENT) {
i = head = *rmapp & KVMPPC_RMAP_INDEX;
do {
hp = (unsigned long *)(kvm->arch.hpt_virt + (i << 4));
j = rev[i].forw;
if (hp[1] & HPTE_R_R)
goto out;
} while ((i = j) != head);
}
ret = 0;
out:
unlock_rmap(rmapp);
return ret;
}
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
{
if (!kvm->arch.using_mmu_notifiers)
return 0;
return kvm_handle_hva(kvm, hva, kvm_test_age_rmapp);
}
void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
{
if (!kvm->arch.using_mmu_notifiers)
return;
kvm_handle_hva(kvm, hva, kvm_unmap_rmapp);
}
static int kvm_test_clear_dirty(struct kvm *kvm, unsigned long *rmapp)
{
struct revmap_entry *rev = kvm->arch.revmap;
unsigned long head, i, j;
unsigned long *hptep;
int ret = 0;
retry:
lock_rmap(rmapp);
if (*rmapp & KVMPPC_RMAP_CHANGED) {
*rmapp &= ~KVMPPC_RMAP_CHANGED;
ret = 1;
}
if (!(*rmapp & KVMPPC_RMAP_PRESENT)) {
unlock_rmap(rmapp);
return ret;
}
i = head = *rmapp & KVMPPC_RMAP_INDEX;
do {
hptep = (unsigned long *) (kvm->arch.hpt_virt + (i << 4));
j = rev[i].forw;
if (!(hptep[1] & HPTE_R_C))
continue;
if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) {
/* unlock rmap before spinning on the HPTE lock */
unlock_rmap(rmapp);
while (hptep[0] & HPTE_V_HVLOCK)
cpu_relax();
goto retry;
}
/* Now check and modify the HPTE */
if ((hptep[0] & HPTE_V_VALID) && (hptep[1] & HPTE_R_C)) {
/* need to make it temporarily absent to clear C */
hptep[0] |= HPTE_V_ABSENT;
kvmppc_invalidate_hpte(kvm, hptep, i);
hptep[1] &= ~HPTE_R_C;
eieio();
hptep[0] = (hptep[0] & ~HPTE_V_ABSENT) | HPTE_V_VALID;
rev[i].guest_rpte |= HPTE_R_C;
ret = 1;
}
hptep[0] &= ~HPTE_V_HVLOCK;
} while ((i = j) != head);
unlock_rmap(rmapp);
return ret;
}
long kvmppc_hv_get_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
{
unsigned long i;
unsigned long *rmapp, *map;
preempt_disable();
rmapp = memslot->rmap;
map = memslot->dirty_bitmap;
for (i = 0; i < memslot->npages; ++i) {
if (kvm_test_clear_dirty(kvm, rmapp))
__set_bit_le(i, map);
++rmapp;
}
preempt_enable();
return 0;
}
void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long gpa,
unsigned long *nb_ret)
{
struct kvm_memory_slot *memslot;
unsigned long gfn = gpa >> PAGE_SHIFT;
struct page *page, *pages[1];
int npages;
unsigned long hva, psize, offset;
unsigned long pa;
unsigned long *physp;
memslot = gfn_to_memslot(kvm, gfn);
if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID))
return NULL;
if (!kvm->arch.using_mmu_notifiers) {
physp = kvm->arch.slot_phys[memslot->id];
if (!physp)
return NULL;
physp += gfn - memslot->base_gfn;
pa = *physp;
if (!pa) {
if (kvmppc_get_guest_page(kvm, gfn, memslot,
PAGE_SIZE) < 0)
return NULL;
pa = *physp;
}
page = pfn_to_page(pa >> PAGE_SHIFT);
get_page(page);
} else {
hva = gfn_to_hva_memslot(memslot, gfn);
npages = get_user_pages_fast(hva, 1, 1, pages);
if (npages < 1)
return NULL;
page = pages[0];
}
psize = PAGE_SIZE;
if (PageHuge(page)) {
page = compound_head(page);
psize <<= compound_order(page);
}
offset = gpa & (psize - 1);
if (nb_ret)
*nb_ret = psize - offset;
return page_address(page) + offset;
}
void kvmppc_unpin_guest_page(struct kvm *kvm, void *va)
{
struct page *page = virt_to_page(va);
put_page(page);
}
void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu)
{
struct kvmppc_mmu *mmu = &vcpu->arch.mmu;
if (cpu_has_feature(CPU_FTR_ARCH_206))
vcpu->arch.slb_nr = 32; /* POWER7 */
else
vcpu->arch.slb_nr = 64;
mmu->xlate = kvmppc_mmu_book3s_64_hv_xlate;
mmu->reset_msr = kvmppc_mmu_book3s_64_hv_reset_msr;
vcpu->arch.hflags |= BOOK3S_HFLAG_SLB;
}
| gpl-2.0 |
profglavcho/sdwq | arch/frv/kernel/frv_ksyms.c | 3736 | 2999 | #include <linux/module.h>
#include <linux/linkage.h>
#include <linux/sched.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/user.h>
#include <linux/elfcore.h>
#include <linux/in6.h>
#include <linux/interrupt.h>
#include <asm/setup.h>
#include <asm/pgalloc.h>
#include <asm/irq.h>
#include <asm/io.h>
#include <asm/checksum.h>
#include <asm/hardirq.h>
#include <asm/cacheflush.h>
extern long __memcpy_user(void *dst, const void *src, size_t count);
extern long __memset_user(void *dst, const void *src, size_t count);
/* platform dependent support */
EXPORT_SYMBOL(__ioremap);
EXPORT_SYMBOL(iounmap);
EXPORT_SYMBOL(ip_fast_csum);
#if 0
EXPORT_SYMBOL(local_irq_count);
EXPORT_SYMBOL(local_bh_count);
#endif
EXPORT_SYMBOL(__res_bus_clock_speed_HZ);
EXPORT_SYMBOL(__page_offset);
EXPORT_SYMBOL(__memcpy_user);
EXPORT_SYMBOL(__memset_user);
EXPORT_SYMBOL(frv_dcache_writeback);
EXPORT_SYMBOL(frv_cache_invalidate);
EXPORT_SYMBOL(frv_icache_invalidate);
EXPORT_SYMBOL(frv_cache_wback_inv);
#ifndef CONFIG_MMU
EXPORT_SYMBOL(memory_start);
EXPORT_SYMBOL(memory_end);
#endif
EXPORT_SYMBOL(__debug_bug_trap);
/* The following are special because they're not called
explicitly (the C compiler generates them). Fortunately,
their interface isn't gonna change any time soon now, so
it's OK to leave it out of version control. */
EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(memset);
EXPORT_SYMBOL(__outsl_ns);
EXPORT_SYMBOL(__insl_ns);
#ifdef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS
EXPORT_SYMBOL(atomic_test_and_ANDNOT_mask);
EXPORT_SYMBOL(atomic_test_and_OR_mask);
EXPORT_SYMBOL(atomic_test_and_XOR_mask);
EXPORT_SYMBOL(atomic_add_return);
EXPORT_SYMBOL(atomic_sub_return);
EXPORT_SYMBOL(__xchg_32);
EXPORT_SYMBOL(__cmpxchg_32);
#endif
EXPORT_SYMBOL(atomic64_add_return);
EXPORT_SYMBOL(atomic64_sub_return);
EXPORT_SYMBOL(__xchg_64);
EXPORT_SYMBOL(__cmpxchg_64);
EXPORT_SYMBOL(__debug_bug_printk);
EXPORT_SYMBOL(__delay_loops_MHz);
/*
* libgcc functions - functions that are used internally by the
* compiler... (prototypes are not correct though, but that
* doesn't really matter since they're not versioned).
*/
extern void __gcc_bcmp(void);
extern void __ashldi3(void);
extern void __ashrdi3(void);
extern void __cmpdi2(void);
extern void __divdi3(void);
extern void __lshrdi3(void);
extern void __moddi3(void);
extern void __muldi3(void);
extern void __mulll(void);
extern void __umulll(void);
extern void __negdi2(void);
extern void __ucmpdi2(void);
extern void __udivdi3(void);
extern void __udivmoddi4(void);
extern void __umoddi3(void);
/* gcc lib functions */
//EXPORT_SYMBOL(__gcc_bcmp);
EXPORT_SYMBOL(__ashldi3);
EXPORT_SYMBOL(__ashrdi3);
//EXPORT_SYMBOL(__cmpdi2);
//EXPORT_SYMBOL(__divdi3);
EXPORT_SYMBOL(__lshrdi3);
//EXPORT_SYMBOL(__moddi3);
EXPORT_SYMBOL(__muldi3);
EXPORT_SYMBOL(__mulll);
EXPORT_SYMBOL(__umulll);
EXPORT_SYMBOL(__negdi2);
EXPORT_SYMBOL(__ucmpdi2);
//EXPORT_SYMBOL(__udivdi3);
//EXPORT_SYMBOL(__udivmoddi4);
//EXPORT_SYMBOL(__umoddi3);
| gpl-2.0 |
SonsOfAndroid/android_kernel_samsung_d2_SOA | drivers/media/common/tuners/mt2131.c | 4248 | 8357 | /*
* Driver for Microtune MT2131 "QAM/8VSB single chip tuner"
*
* Copyright (c) 2006 Steven Toth <stoth@linuxtv.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/dvb/frontend.h>
#include <linux/i2c.h>
#include <linux/slab.h>
#include "dvb_frontend.h"
#include "mt2131.h"
#include "mt2131_priv.h"
static int debug;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "Turn on/off debugging (default:off).");
#define dprintk(level,fmt, arg...) if (debug >= level) \
printk(KERN_INFO "%s: " fmt, "mt2131", ## arg)
static u8 mt2131_config1[] = {
0x01,
0x50, 0x00, 0x50, 0x80, 0x00, 0x49, 0xfa, 0x88,
0x08, 0x77, 0x41, 0x04, 0x00, 0x00, 0x00, 0x32,
0x7f, 0xda, 0x4c, 0x00, 0x10, 0xaa, 0x78, 0x80,
0xff, 0x68, 0xa0, 0xff, 0xdd, 0x00, 0x00
};
static u8 mt2131_config2[] = {
0x10,
0x7f, 0xc8, 0x0a, 0x5f, 0x00, 0x04
};
static int mt2131_readreg(struct mt2131_priv *priv, u8 reg, u8 *val)
{
struct i2c_msg msg[2] = {
{ .addr = priv->cfg->i2c_address, .flags = 0,
.buf = ®, .len = 1 },
{ .addr = priv->cfg->i2c_address, .flags = I2C_M_RD,
.buf = val, .len = 1 },
};
if (i2c_transfer(priv->i2c, msg, 2) != 2) {
printk(KERN_WARNING "mt2131 I2C read failed\n");
return -EREMOTEIO;
}
return 0;
}
static int mt2131_writereg(struct mt2131_priv *priv, u8 reg, u8 val)
{
u8 buf[2] = { reg, val };
struct i2c_msg msg = { .addr = priv->cfg->i2c_address, .flags = 0,
.buf = buf, .len = 2 };
if (i2c_transfer(priv->i2c, &msg, 1) != 1) {
printk(KERN_WARNING "mt2131 I2C write failed\n");
return -EREMOTEIO;
}
return 0;
}
static int mt2131_writeregs(struct mt2131_priv *priv,u8 *buf, u8 len)
{
struct i2c_msg msg = { .addr = priv->cfg->i2c_address,
.flags = 0, .buf = buf, .len = len };
if (i2c_transfer(priv->i2c, &msg, 1) != 1) {
printk(KERN_WARNING "mt2131 I2C write failed (len=%i)\n",
(int)len);
return -EREMOTEIO;
}
return 0;
}
static int mt2131_set_params(struct dvb_frontend *fe,
struct dvb_frontend_parameters *params)
{
struct mt2131_priv *priv;
int ret=0, i;
u32 freq;
u8 if_band_center;
u32 f_lo1, f_lo2;
u32 div1, num1, div2, num2;
u8 b[8];
u8 lockval = 0;
priv = fe->tuner_priv;
if (fe->ops.info.type == FE_OFDM)
priv->bandwidth = params->u.ofdm.bandwidth;
else
priv->bandwidth = 0;
freq = params->frequency / 1000; // Hz -> kHz
dprintk(1, "%s() freq=%d\n", __func__, freq);
f_lo1 = freq + MT2131_IF1 * 1000;
f_lo1 = (f_lo1 / 250) * 250;
f_lo2 = f_lo1 - freq - MT2131_IF2;
priv->frequency = (f_lo1 - f_lo2 - MT2131_IF2) * 1000;
/* Frequency LO1 = 16MHz * (DIV1 + NUM1/8192 ) */
num1 = f_lo1 * 64 / (MT2131_FREF / 128);
div1 = num1 / 8192;
num1 &= 0x1fff;
/* Frequency LO2 = 16MHz * (DIV2 + NUM2/8192 ) */
num2 = f_lo2 * 64 / (MT2131_FREF / 128);
div2 = num2 / 8192;
num2 &= 0x1fff;
if (freq <= 82500) if_band_center = 0x00; else
if (freq <= 137500) if_band_center = 0x01; else
if (freq <= 192500) if_band_center = 0x02; else
if (freq <= 247500) if_band_center = 0x03; else
if (freq <= 302500) if_band_center = 0x04; else
if (freq <= 357500) if_band_center = 0x05; else
if (freq <= 412500) if_band_center = 0x06; else
if (freq <= 467500) if_band_center = 0x07; else
if (freq <= 522500) if_band_center = 0x08; else
if (freq <= 577500) if_band_center = 0x09; else
if (freq <= 632500) if_band_center = 0x0A; else
if (freq <= 687500) if_band_center = 0x0B; else
if (freq <= 742500) if_band_center = 0x0C; else
if (freq <= 797500) if_band_center = 0x0D; else
if (freq <= 852500) if_band_center = 0x0E; else
if (freq <= 907500) if_band_center = 0x0F; else
if (freq <= 962500) if_band_center = 0x10; else
if (freq <= 1017500) if_band_center = 0x11; else
if (freq <= 1072500) if_band_center = 0x12; else if_band_center = 0x13;
b[0] = 1;
b[1] = (num1 >> 5) & 0xFF;
b[2] = (num1 & 0x1F);
b[3] = div1;
b[4] = (num2 >> 5) & 0xFF;
b[5] = num2 & 0x1F;
b[6] = div2;
dprintk(1, "IF1: %dMHz IF2: %dMHz\n", MT2131_IF1, MT2131_IF2);
dprintk(1, "PLL freq=%dkHz band=%d\n", (int)freq, (int)if_band_center);
dprintk(1, "PLL f_lo1=%dkHz f_lo2=%dkHz\n", (int)f_lo1, (int)f_lo2);
dprintk(1, "PLL div1=%d num1=%d div2=%d num2=%d\n",
(int)div1, (int)num1, (int)div2, (int)num2);
dprintk(1, "PLL [1..6]: %2x %2x %2x %2x %2x %2x\n",
(int)b[1], (int)b[2], (int)b[3], (int)b[4], (int)b[5],
(int)b[6]);
ret = mt2131_writeregs(priv,b,7);
if (ret < 0)
return ret;
mt2131_writereg(priv, 0x0b, if_band_center);
/* Wait for lock */
i = 0;
do {
mt2131_readreg(priv, 0x08, &lockval);
if ((lockval & 0x88) == 0x88)
break;
msleep(4);
i++;
} while (i < 10);
return ret;
}
static int mt2131_get_frequency(struct dvb_frontend *fe, u32 *frequency)
{
struct mt2131_priv *priv = fe->tuner_priv;
dprintk(1, "%s()\n", __func__);
*frequency = priv->frequency;
return 0;
}
static int mt2131_get_bandwidth(struct dvb_frontend *fe, u32 *bandwidth)
{
struct mt2131_priv *priv = fe->tuner_priv;
dprintk(1, "%s()\n", __func__);
*bandwidth = priv->bandwidth;
return 0;
}
static int mt2131_get_status(struct dvb_frontend *fe, u32 *status)
{
struct mt2131_priv *priv = fe->tuner_priv;
u8 lock_status = 0;
u8 afc_status = 0;
*status = 0;
mt2131_readreg(priv, 0x08, &lock_status);
if ((lock_status & 0x88) == 0x88)
*status = TUNER_STATUS_LOCKED;
mt2131_readreg(priv, 0x09, &afc_status);
dprintk(1, "%s() - LO Status = 0x%x, AFC Status = 0x%x\n",
__func__, lock_status, afc_status);
return 0;
}
static int mt2131_init(struct dvb_frontend *fe)
{
struct mt2131_priv *priv = fe->tuner_priv;
int ret;
dprintk(1, "%s()\n", __func__);
if ((ret = mt2131_writeregs(priv, mt2131_config1,
sizeof(mt2131_config1))) < 0)
return ret;
mt2131_writereg(priv, 0x0b, 0x09);
mt2131_writereg(priv, 0x15, 0x47);
mt2131_writereg(priv, 0x07, 0xf2);
mt2131_writereg(priv, 0x0b, 0x01);
if ((ret = mt2131_writeregs(priv, mt2131_config2,
sizeof(mt2131_config2))) < 0)
return ret;
return ret;
}
static int mt2131_release(struct dvb_frontend *fe)
{
dprintk(1, "%s()\n", __func__);
kfree(fe->tuner_priv);
fe->tuner_priv = NULL;
return 0;
}
static const struct dvb_tuner_ops mt2131_tuner_ops = {
.info = {
.name = "Microtune MT2131",
.frequency_min = 48000000,
.frequency_max = 860000000,
.frequency_step = 50000,
},
.release = mt2131_release,
.init = mt2131_init,
.set_params = mt2131_set_params,
.get_frequency = mt2131_get_frequency,
.get_bandwidth = mt2131_get_bandwidth,
.get_status = mt2131_get_status
};
struct dvb_frontend * mt2131_attach(struct dvb_frontend *fe,
struct i2c_adapter *i2c,
struct mt2131_config *cfg, u16 if1)
{
struct mt2131_priv *priv = NULL;
u8 id = 0;
dprintk(1, "%s()\n", __func__);
priv = kzalloc(sizeof(struct mt2131_priv), GFP_KERNEL);
if (priv == NULL)
return NULL;
priv->cfg = cfg;
priv->bandwidth = 6000000; /* 6MHz */
priv->i2c = i2c;
if (mt2131_readreg(priv, 0, &id) != 0) {
kfree(priv);
return NULL;
}
if ( (id != 0x3E) && (id != 0x3F) ) {
printk(KERN_ERR "MT2131: Device not found at addr 0x%02x\n",
cfg->i2c_address);
kfree(priv);
return NULL;
}
printk(KERN_INFO "MT2131: successfully identified at address 0x%02x\n",
cfg->i2c_address);
memcpy(&fe->ops.tuner_ops, &mt2131_tuner_ops,
sizeof(struct dvb_tuner_ops));
fe->tuner_priv = priv;
return fe;
}
EXPORT_SYMBOL(mt2131_attach);
MODULE_AUTHOR("Steven Toth");
MODULE_DESCRIPTION("Microtune MT2131 silicon tuner driver");
MODULE_LICENSE("GPL");
/*
* Local variables:
* c-basic-offset: 8
*/
| gpl-2.0 |
spezi77/hellspawn-N4 | drivers/net/tokenring/olympic.c | 4760 | 62700 | /*
* olympic.c (c) 1999 Peter De Schrijver All Rights Reserved
* 1999/2000 Mike Phillips (mikep@linuxtr.net)
*
* Linux driver for IBM PCI tokenring cards based on the Pit/Pit-Phy/Olympic
* chipset.
*
* Base Driver Skeleton:
* Written 1993-94 by Donald Becker.
*
* Copyright 1993 United States Government as represented by the
* Director, National Security Agency.
*
* Thanks to Erik De Cock, Adrian Bridgett and Frank Fiene for their
* assistance and perserverance with the testing of this driver.
*
* This software may be used and distributed according to the terms
* of the GNU General Public License, incorporated herein by reference.
*
* 4/27/99 - Alpha Release 0.1.0
* First release to the public
*
* 6/8/99 - Official Release 0.2.0
* Merged into the kernel code
* 8/18/99 - Updated driver for 2.3.13 kernel to use new pci
* resource. Driver also reports the card name returned by
* the pci resource.
* 1/11/00 - Added spinlocks for smp
* 2/23/00 - Updated to dev_kfree_irq
* 3/10/00 - Fixed FDX enable which triggered other bugs also
* squashed.
* 5/20/00 - Changes to handle Olympic on LinuxPPC. Endian changes.
* The odd thing about the changes is that the fix for
* endian issues with the big-endian data in the arb, asb...
* was to always swab() the bytes, no matter what CPU.
* That's because the read[wl]() functions always swap the
* bytes on the way in on PPC.
* Fixing the hardware descriptors was another matter,
* because they weren't going through read[wl](), there all
* the results had to be in memory in le32 values. kdaaker
*
* 12/23/00 - Added minimal Cardbus support (Thanks Donald).
*
* 03/09/01 - Add new pci api, dev_base_lock, general clean up.
*
* 03/27/01 - Add new dma pci (Thanks to Kyle Lucke) and alloc_trdev
* Change proc_fs behaviour, now one entry per adapter.
*
* 04/09/01 - Couple of bug fixes to the dma unmaps and ejecting the
* adapter when live does not take the system down with it.
*
* 06/02/01 - Clean up, copy skb for small packets
*
* 06/22/01 - Add EISR error handling routines
*
* 07/19/01 - Improve bad LAA reporting, strip out freemem
* into a separate function, its called from 3
* different places now.
* 02/09/02 - Replaced sleep_on.
* 03/01/02 - Replace access to several registers from 32 bit to
* 16 bit. Fixes alignment errors on PPC 64 bit machines.
* Thanks to Al Trautman for this one.
* 03/10/02 - Fix BUG in arb_cmd. Bug was there all along but was
* silently ignored until the error checking code
* went into version 1.0.0
* 06/04/02 - Add correct start up sequence for the cardbus adapters.
* Required for strict compliance with pci power mgmt specs.
* To Do:
*
* Wake on lan
*
* If Problems do Occur
* Most problems can be rectified by either closing and opening the interface
* (ifconfig down and up) or rmmod and insmod'ing the driver (a bit difficult
* if compiled into the kernel).
*/
/* Change OLYMPIC_DEBUG to 1 to get verbose, and I mean really verbose, messages */
#define OLYMPIC_DEBUG 0
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/timer.h>
#include <linux/in.h>
#include <linux/ioport.h>
#include <linux/seq_file.h>
#include <linux/string.h>
#include <linux/proc_fs.h>
#include <linux/ptrace.h>
#include <linux/skbuff.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/trdevice.h>
#include <linux/stddef.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/spinlock.h>
#include <linux/bitops.h>
#include <linux/jiffies.h>
#include <net/checksum.h>
#include <net/net_namespace.h>
#include <asm/io.h>
#include "olympic.h"
/* I've got to put some intelligence into the version number so that Peter and I know
* which version of the code somebody has got.
* Version Number = a.b.c.d where a.b.c is the level of code and d is the latest author.
* So 0.0.1.pds = Peter, 0.0.1.mlp = Mike
*
* Official releases will only have an a.b.c version number format.
*/
static char version[] =
"Olympic.c v1.0.5 6/04/02 - Peter De Schrijver & Mike Phillips" ;
static char *open_maj_error[] = {"No error", "Lobe Media Test", "Physical Insertion",
"Address Verification", "Neighbor Notification (Ring Poll)",
"Request Parameters","FDX Registration Request",
"FDX Duplicate Address Check", "Station registration Query Wait",
"Unknown stage"};
static char *open_min_error[] = {"No error", "Function Failure", "Signal Lost", "Wire Fault",
"Ring Speed Mismatch", "Timeout","Ring Failure","Ring Beaconing",
"Duplicate Node Address","Request Parameters","Remove Received",
"Reserved", "Reserved", "No Monitor Detected for RPL",
"Monitor Contention failer for RPL", "FDX Protocol Error"};
/* Module parameters */
MODULE_AUTHOR("Mike Phillips <mikep@linuxtr.net>") ;
MODULE_DESCRIPTION("Olympic PCI/Cardbus Chipset Driver") ;
/* Ring Speed 0,4,16,100
* 0 = Autosense
* 4,16 = Selected speed only, no autosense
* This allows the card to be the first on the ring
* and become the active monitor.
* 100 = Nothing at present, 100mbps is autodetected
* if FDX is turned on. May be implemented in the future to
* fail if 100mpbs is not detected.
*
* WARNING: Some hubs will allow you to insert
* at the wrong speed
*/
static int ringspeed[OLYMPIC_MAX_ADAPTERS] = {0,} ;
module_param_array(ringspeed, int, NULL, 0);
/* Packet buffer size */
static int pkt_buf_sz[OLYMPIC_MAX_ADAPTERS] = {0,} ;
module_param_array(pkt_buf_sz, int, NULL, 0) ;
/* Message Level */
static int message_level[OLYMPIC_MAX_ADAPTERS] = {0,} ;
module_param_array(message_level, int, NULL, 0) ;
/* Change network_monitor to receive mac frames through the arb channel.
* Will also create a /proc/net/olympic_tr%d entry, where %d is the tr
* device, i.e. tr0, tr1 etc.
* Intended to be used to create a ring-error reporting network module
* i.e. it will give you the source address of beaconers on the ring
*/
static int network_monitor[OLYMPIC_MAX_ADAPTERS] = {0,};
module_param_array(network_monitor, int, NULL, 0);
static DEFINE_PCI_DEVICE_TABLE(olympic_pci_tbl) = {
{PCI_VENDOR_ID_IBM,PCI_DEVICE_ID_IBM_TR_WAKE,PCI_ANY_ID,PCI_ANY_ID,},
{ } /* Terminating Entry */
};
MODULE_DEVICE_TABLE(pci,olympic_pci_tbl) ;
static int olympic_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
static int olympic_init(struct net_device *dev);
static int olympic_open(struct net_device *dev);
static netdev_tx_t olympic_xmit(struct sk_buff *skb,
struct net_device *dev);
static int olympic_close(struct net_device *dev);
static void olympic_set_rx_mode(struct net_device *dev);
static void olympic_freemem(struct net_device *dev) ;
static irqreturn_t olympic_interrupt(int irq, void *dev_id);
static int olympic_set_mac_address(struct net_device *dev, void *addr) ;
static void olympic_arb_cmd(struct net_device *dev);
static int olympic_change_mtu(struct net_device *dev, int mtu);
static void olympic_srb_bh(struct net_device *dev) ;
static void olympic_asb_bh(struct net_device *dev) ;
static const struct file_operations olympic_proc_ops;
static const struct net_device_ops olympic_netdev_ops = {
.ndo_open = olympic_open,
.ndo_stop = olympic_close,
.ndo_start_xmit = olympic_xmit,
.ndo_change_mtu = olympic_change_mtu,
.ndo_set_rx_mode = olympic_set_rx_mode,
.ndo_set_mac_address = olympic_set_mac_address,
};
static int __devinit olympic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct net_device *dev ;
struct olympic_private *olympic_priv;
static int card_no = -1 ;
int i ;
card_no++ ;
if ((i = pci_enable_device(pdev))) {
return i ;
}
pci_set_master(pdev);
if ((i = pci_request_regions(pdev,"olympic"))) {
goto op_disable_dev;
}
dev = alloc_trdev(sizeof(struct olympic_private)) ;
if (!dev) {
i = -ENOMEM;
goto op_release_dev;
}
olympic_priv = netdev_priv(dev) ;
spin_lock_init(&olympic_priv->olympic_lock) ;
init_waitqueue_head(&olympic_priv->srb_wait);
init_waitqueue_head(&olympic_priv->trb_wait);
#if OLYMPIC_DEBUG
printk(KERN_INFO "pci_device: %p, dev:%p, dev->priv: %p\n", pdev, dev, netdev_priv(dev));
#endif
dev->irq=pdev->irq;
dev->base_addr=pci_resource_start(pdev, 0);
olympic_priv->olympic_card_name = pci_name(pdev);
olympic_priv->pdev = pdev;
olympic_priv->olympic_mmio = ioremap(pci_resource_start(pdev,1),256);
olympic_priv->olympic_lap = ioremap(pci_resource_start(pdev,2),2048);
if (!olympic_priv->olympic_mmio || !olympic_priv->olympic_lap) {
goto op_free_iomap;
}
if ((pkt_buf_sz[card_no] < 100) || (pkt_buf_sz[card_no] > 18000) )
olympic_priv->pkt_buf_sz = PKT_BUF_SZ ;
else
olympic_priv->pkt_buf_sz = pkt_buf_sz[card_no] ;
dev->mtu = olympic_priv->pkt_buf_sz - TR_HLEN ;
olympic_priv->olympic_ring_speed = ringspeed[card_no] ;
olympic_priv->olympic_message_level = message_level[card_no] ;
olympic_priv->olympic_network_monitor = network_monitor[card_no];
if ((i = olympic_init(dev))) {
goto op_free_iomap;
}
dev->netdev_ops = &olympic_netdev_ops;
SET_NETDEV_DEV(dev, &pdev->dev);
pci_set_drvdata(pdev,dev) ;
register_netdev(dev) ;
printk("Olympic: %s registered as: %s\n",olympic_priv->olympic_card_name,dev->name);
if (olympic_priv->olympic_network_monitor) { /* Must go after register_netdev as we need the device name */
char proc_name[20] ;
strcpy(proc_name,"olympic_") ;
strcat(proc_name,dev->name) ;
proc_create_data(proc_name, 0, init_net.proc_net, &olympic_proc_ops, dev);
printk("Olympic: Network Monitor information: /proc/%s\n",proc_name);
}
return 0 ;
op_free_iomap:
if (olympic_priv->olympic_mmio)
iounmap(olympic_priv->olympic_mmio);
if (olympic_priv->olympic_lap)
iounmap(olympic_priv->olympic_lap);
free_netdev(dev);
op_release_dev:
pci_release_regions(pdev);
op_disable_dev:
pci_disable_device(pdev);
return i;
}
static int olympic_init(struct net_device *dev)
{
struct olympic_private *olympic_priv;
u8 __iomem *olympic_mmio, *init_srb,*adapter_addr;
unsigned long t;
unsigned int uaa_addr;
olympic_priv=netdev_priv(dev);
olympic_mmio=olympic_priv->olympic_mmio;
printk("%s\n", version);
printk("%s. I/O at %hx, MMIO at %p, LAP at %p, using irq %d\n", olympic_priv->olympic_card_name, (unsigned int) dev->base_addr,olympic_priv->olympic_mmio, olympic_priv->olympic_lap, dev->irq);
writel(readl(olympic_mmio+BCTL) | BCTL_SOFTRESET,olympic_mmio+BCTL);
t=jiffies;
while((readl(olympic_mmio+BCTL)) & BCTL_SOFTRESET) {
schedule();
if(time_after(jiffies, t + 40*HZ)) {
printk(KERN_ERR "IBM PCI tokenring card not responding.\n");
return -ENODEV;
}
}
/* Needed for cardbus */
if(!(readl(olympic_mmio+BCTL) & BCTL_MODE_INDICATOR)) {
writel(readl(olympic_priv->olympic_mmio+FERMASK)|FERMASK_INT_BIT, olympic_mmio+FERMASK);
}
#if OLYMPIC_DEBUG
printk("BCTL: %x\n",readl(olympic_mmio+BCTL));
printk("GPR: %x\n",readw(olympic_mmio+GPR));
printk("SISRMASK: %x\n",readl(olympic_mmio+SISR_MASK));
#endif
/* Aaaahhh, You have got to be real careful setting GPR, the card
holds the previous values from flash memory, including autosense
and ring speed */
writel(readl(olympic_mmio+BCTL)|BCTL_MIMREB,olympic_mmio+BCTL);
if (olympic_priv->olympic_ring_speed == 0) { /* Autosense */
writew(readw(olympic_mmio+GPR)|GPR_AUTOSENSE,olympic_mmio+GPR);
if (olympic_priv->olympic_message_level)
printk(KERN_INFO "%s: Ringspeed autosense mode on\n",olympic_priv->olympic_card_name);
} else if (olympic_priv->olympic_ring_speed == 16) {
if (olympic_priv->olympic_message_level)
printk(KERN_INFO "%s: Trying to open at 16 Mbps as requested\n", olympic_priv->olympic_card_name);
writew(GPR_16MBPS, olympic_mmio+GPR);
} else if (olympic_priv->olympic_ring_speed == 4) {
if (olympic_priv->olympic_message_level)
printk(KERN_INFO "%s: Trying to open at 4 Mbps as requested\n", olympic_priv->olympic_card_name) ;
writew(0, olympic_mmio+GPR);
}
writew(readw(olympic_mmio+GPR)|GPR_NEPTUNE_BF,olympic_mmio+GPR);
#if OLYMPIC_DEBUG
printk("GPR = %x\n",readw(olympic_mmio + GPR) ) ;
#endif
/* Solo has been paused to meet the Cardbus power
* specs if the adapter is cardbus. Check to
* see its been paused and then restart solo. The
* adapter should set the pause bit within 1 second.
*/
if(!(readl(olympic_mmio+BCTL) & BCTL_MODE_INDICATOR)) {
t=jiffies;
while (!(readl(olympic_mmio+CLKCTL) & CLKCTL_PAUSE)) {
schedule() ;
if(time_after(jiffies, t + 2*HZ)) {
printk(KERN_ERR "IBM Cardbus tokenring adapter not responsing.\n") ;
return -ENODEV;
}
}
writel(readl(olympic_mmio+CLKCTL) & ~CLKCTL_PAUSE, olympic_mmio+CLKCTL) ;
}
/* start solo init */
writel((1<<15),olympic_mmio+SISR_MASK_SUM);
t=jiffies;
while(!((readl(olympic_mmio+SISR_RR)) & SISR_SRB_REPLY)) {
schedule();
if(time_after(jiffies, t + 15*HZ)) {
printk(KERN_ERR "IBM PCI tokenring card not responding.\n");
return -ENODEV;
}
}
writel(readw(olympic_mmio+LAPWWO),olympic_mmio+LAPA);
#if OLYMPIC_DEBUG
printk("LAPWWO: %x, LAPA: %x\n",readl(olympic_mmio+LAPWWO), readl(olympic_mmio+LAPA));
#endif
init_srb=olympic_priv->olympic_lap + ((readw(olympic_mmio+LAPWWO)) & (~0xf800));
#if OLYMPIC_DEBUG
{
int i;
printk("init_srb(%p): ",init_srb);
for(i=0;i<20;i++)
printk("%x ",readb(init_srb+i));
printk("\n");
}
#endif
if(readw(init_srb+6)) {
printk(KERN_INFO "tokenring card initialization failed. errorcode : %x\n",readw(init_srb+6));
return -ENODEV;
}
if (olympic_priv->olympic_message_level) {
if ( readb(init_srb +2) & 0x40) {
printk(KERN_INFO "Olympic: Adapter is FDX capable.\n") ;
} else {
printk(KERN_INFO "Olympic: Adapter cannot do FDX.\n");
}
}
uaa_addr=swab16(readw(init_srb+8));
#if OLYMPIC_DEBUG
printk("UAA resides at %x\n",uaa_addr);
#endif
writel(uaa_addr,olympic_mmio+LAPA);
adapter_addr=olympic_priv->olympic_lap + (uaa_addr & (~0xf800));
memcpy_fromio(&dev->dev_addr[0], adapter_addr,6);
#if OLYMPIC_DEBUG
printk("adapter address: %pM\n", dev->dev_addr);
#endif
olympic_priv->olympic_addr_table_addr = swab16(readw(init_srb + 12));
olympic_priv->olympic_parms_addr = swab16(readw(init_srb + 14));
return 0;
}
static int olympic_open(struct net_device *dev)
{
struct olympic_private *olympic_priv=netdev_priv(dev);
u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio,*init_srb;
unsigned long flags, t;
int i, open_finished = 1 ;
u8 resp, err;
DECLARE_WAITQUEUE(wait,current) ;
olympic_init(dev);
if (request_irq(dev->irq, olympic_interrupt, IRQF_SHARED , "olympic",
dev))
return -EAGAIN;
#if OLYMPIC_DEBUG
printk("BMCTL: %x\n",readl(olympic_mmio+BMCTL_SUM));
printk("pending ints: %x\n",readl(olympic_mmio+SISR_RR));
#endif
writel(SISR_MI,olympic_mmio+SISR_MASK_SUM);
writel(SISR_MI | SISR_SRB_REPLY, olympic_mmio+SISR_MASK); /* more ints later, doesn't stop arb cmd interrupt */
writel(LISR_LIE,olympic_mmio+LISR); /* more ints later */
/* adapter is closed, so SRB is pointed to by LAPWWO */
writel(readw(olympic_mmio+LAPWWO),olympic_mmio+LAPA);
init_srb=olympic_priv->olympic_lap + ((readw(olympic_mmio+LAPWWO)) & (~0xf800));
#if OLYMPIC_DEBUG
printk("LAPWWO: %x, LAPA: %x\n",readw(olympic_mmio+LAPWWO), readl(olympic_mmio+LAPA));
printk("SISR Mask = %04x\n", readl(olympic_mmio+SISR_MASK));
printk("Before the open command\n");
#endif
do {
memset_io(init_srb,0,SRB_COMMAND_SIZE);
writeb(SRB_OPEN_ADAPTER,init_srb) ; /* open */
writeb(OLYMPIC_CLEAR_RET_CODE,init_srb+2);
/* If Network Monitor, instruct card to copy MAC frames through the ARB */
if (olympic_priv->olympic_network_monitor)
writew(swab16(OPEN_ADAPTER_ENABLE_FDX | OPEN_ADAPTER_PASS_ADC_MAC | OPEN_ADAPTER_PASS_ATT_MAC | OPEN_ADAPTER_PASS_BEACON), init_srb+8);
else
writew(swab16(OPEN_ADAPTER_ENABLE_FDX), init_srb+8);
/* Test OR of first 3 bytes as its totally possible for
* someone to set the first 2 bytes to be zero, although this
* is an error, the first byte must have bit 6 set to 1 */
if (olympic_priv->olympic_laa[0] | olympic_priv->olympic_laa[1] | olympic_priv->olympic_laa[2]) {
writeb(olympic_priv->olympic_laa[0],init_srb+12);
writeb(olympic_priv->olympic_laa[1],init_srb+13);
writeb(olympic_priv->olympic_laa[2],init_srb+14);
writeb(olympic_priv->olympic_laa[3],init_srb+15);
writeb(olympic_priv->olympic_laa[4],init_srb+16);
writeb(olympic_priv->olympic_laa[5],init_srb+17);
memcpy(dev->dev_addr,olympic_priv->olympic_laa,dev->addr_len) ;
}
writeb(1,init_srb+30);
spin_lock_irqsave(&olympic_priv->olympic_lock,flags);
olympic_priv->srb_queued=1;
writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM);
spin_unlock_irqrestore(&olympic_priv->olympic_lock,flags);
t = jiffies ;
add_wait_queue(&olympic_priv->srb_wait,&wait) ;
set_current_state(TASK_INTERRUPTIBLE) ;
while(olympic_priv->srb_queued) {
schedule() ;
if(signal_pending(current)) {
printk(KERN_WARNING "%s: Signal received in open.\n",
dev->name);
printk(KERN_WARNING "SISR=%x LISR=%x\n",
readl(olympic_mmio+SISR),
readl(olympic_mmio+LISR));
olympic_priv->srb_queued=0;
break;
}
if (time_after(jiffies, t + 10*HZ)) {
printk(KERN_WARNING "%s: SRB timed out.\n",dev->name);
olympic_priv->srb_queued=0;
break ;
}
set_current_state(TASK_INTERRUPTIBLE) ;
}
remove_wait_queue(&olympic_priv->srb_wait,&wait) ;
set_current_state(TASK_RUNNING) ;
olympic_priv->srb_queued = 0 ;
#if OLYMPIC_DEBUG
printk("init_srb(%p): ",init_srb);
for(i=0;i<20;i++)
printk("%02x ",readb(init_srb+i));
printk("\n");
#endif
/* If we get the same return response as we set, the interrupt wasn't raised and the open
* timed out.
*/
switch (resp = readb(init_srb+2)) {
case OLYMPIC_CLEAR_RET_CODE:
printk(KERN_WARNING "%s: Adapter Open time out or error.\n", dev->name) ;
goto out;
case 0:
open_finished = 1;
break;
case 0x07:
if (!olympic_priv->olympic_ring_speed && open_finished) { /* Autosense , first time around */
printk(KERN_WARNING "%s: Retrying at different ring speed\n", dev->name);
open_finished = 0 ;
continue;
}
err = readb(init_srb+7);
if (!olympic_priv->olympic_ring_speed && ((err & 0x0f) == 0x0d)) {
printk(KERN_WARNING "%s: Tried to autosense ring speed with no monitors present\n",dev->name);
printk(KERN_WARNING "%s: Please try again with a specified ring speed\n",dev->name);
} else {
printk(KERN_WARNING "%s: %s - %s\n", dev->name,
open_maj_error[(err & 0xf0) >> 4],
open_min_error[(err & 0x0f)]);
}
goto out;
case 0x32:
printk(KERN_WARNING "%s: Invalid LAA: %pM\n",
dev->name, olympic_priv->olympic_laa);
goto out;
default:
printk(KERN_WARNING "%s: Bad OPEN response: %x\n", dev->name, resp);
goto out;
}
} while (!(open_finished)) ; /* Will only loop if ring speed mismatch re-open attempted && autosense is on */
if (readb(init_srb+18) & (1<<3))
if (olympic_priv->olympic_message_level)
printk(KERN_INFO "%s: Opened in FDX Mode\n",dev->name);
if (readb(init_srb+18) & (1<<1))
olympic_priv->olympic_ring_speed = 100 ;
else if (readb(init_srb+18) & 1)
olympic_priv->olympic_ring_speed = 16 ;
else
olympic_priv->olympic_ring_speed = 4 ;
if (olympic_priv->olympic_message_level)
printk(KERN_INFO "%s: Opened in %d Mbps mode\n",dev->name, olympic_priv->olympic_ring_speed);
olympic_priv->asb = swab16(readw(init_srb+8));
olympic_priv->srb = swab16(readw(init_srb+10));
olympic_priv->arb = swab16(readw(init_srb+12));
olympic_priv->trb = swab16(readw(init_srb+16));
olympic_priv->olympic_receive_options = 0x01 ;
olympic_priv->olympic_copy_all_options = 0 ;
/* setup rx ring */
writel((3<<16),olympic_mmio+BMCTL_RWM); /* Ensure end of frame generated interrupts */
writel(BMCTL_RX_DIS|3,olympic_mmio+BMCTL_RWM); /* Yes, this the enables RX channel */
for(i=0;i<OLYMPIC_RX_RING_SIZE;i++) {
struct sk_buff *skb;
skb=dev_alloc_skb(olympic_priv->pkt_buf_sz);
if(skb == NULL)
break;
skb->dev = dev;
olympic_priv->olympic_rx_ring[i].buffer = cpu_to_le32(pci_map_single(olympic_priv->pdev,
skb->data,olympic_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE)) ;
olympic_priv->olympic_rx_ring[i].res_length = cpu_to_le32(olympic_priv->pkt_buf_sz);
olympic_priv->rx_ring_skb[i]=skb;
}
if (i==0) {
printk(KERN_WARNING "%s: Not enough memory to allocate rx buffers. Adapter disabled\n",dev->name);
goto out;
}
olympic_priv->rx_ring_dma_addr = pci_map_single(olympic_priv->pdev,olympic_priv->olympic_rx_ring,
sizeof(struct olympic_rx_desc) * OLYMPIC_RX_RING_SIZE, PCI_DMA_TODEVICE);
writel(olympic_priv->rx_ring_dma_addr, olympic_mmio+RXDESCQ);
writel(olympic_priv->rx_ring_dma_addr, olympic_mmio+RXCDA);
writew(i, olympic_mmio+RXDESCQCNT);
olympic_priv->rx_status_ring_dma_addr = pci_map_single(olympic_priv->pdev, olympic_priv->olympic_rx_status_ring,
sizeof(struct olympic_rx_status) * OLYMPIC_RX_RING_SIZE, PCI_DMA_FROMDEVICE);
writel(olympic_priv->rx_status_ring_dma_addr, olympic_mmio+RXSTATQ);
writel(olympic_priv->rx_status_ring_dma_addr, olympic_mmio+RXCSA);
olympic_priv->rx_ring_last_received = OLYMPIC_RX_RING_SIZE - 1; /* last processed rx status */
olympic_priv->rx_status_last_received = OLYMPIC_RX_RING_SIZE - 1;
writew(i, olympic_mmio+RXSTATQCNT);
#if OLYMPIC_DEBUG
printk("# of rx buffers: %d, RXENQ: %x\n",i, readw(olympic_mmio+RXENQ));
printk("RXCSA: %x, rx_status_ring[0]: %p\n",readl(olympic_mmio+RXCSA),&olympic_priv->olympic_rx_status_ring[0]);
printk(" stat_ring[1]: %p, stat_ring[2]: %p, stat_ring[3]: %p\n", &(olympic_priv->olympic_rx_status_ring[1]), &(olympic_priv->olympic_rx_status_ring[2]), &(olympic_priv->olympic_rx_status_ring[3]) );
printk(" stat_ring[4]: %p, stat_ring[5]: %p, stat_ring[6]: %p\n", &(olympic_priv->olympic_rx_status_ring[4]), &(olympic_priv->olympic_rx_status_ring[5]), &(olympic_priv->olympic_rx_status_ring[6]) );
printk(" stat_ring[7]: %p\n", &(olympic_priv->olympic_rx_status_ring[7]) );
printk("RXCDA: %x, rx_ring[0]: %p\n",readl(olympic_mmio+RXCDA),&olympic_priv->olympic_rx_ring[0]);
printk("Rx_ring_dma_addr = %08x, rx_status_dma_addr = %08x\n",
olympic_priv->rx_ring_dma_addr,olympic_priv->rx_status_ring_dma_addr) ;
#endif
writew((((readw(olympic_mmio+RXENQ)) & 0x8000) ^ 0x8000) | i,olympic_mmio+RXENQ);
#if OLYMPIC_DEBUG
printk("# of rx buffers: %d, RXENQ: %x\n",i, readw(olympic_mmio+RXENQ));
printk("RXCSA: %x, rx_ring[0]: %p\n",readl(olympic_mmio+RXCSA),&olympic_priv->olympic_rx_status_ring[0]);
printk("RXCDA: %x, rx_ring[0]: %p\n",readl(olympic_mmio+RXCDA),&olympic_priv->olympic_rx_ring[0]);
#endif
writel(SISR_RX_STATUS | SISR_RX_NOBUF,olympic_mmio+SISR_MASK_SUM);
/* setup tx ring */
writel(BMCTL_TX1_DIS,olympic_mmio+BMCTL_RWM); /* Yes, this enables TX channel 1 */
for(i=0;i<OLYMPIC_TX_RING_SIZE;i++)
olympic_priv->olympic_tx_ring[i].buffer=cpu_to_le32(0xdeadbeef);
olympic_priv->free_tx_ring_entries=OLYMPIC_TX_RING_SIZE;
olympic_priv->tx_ring_dma_addr = pci_map_single(olympic_priv->pdev,olympic_priv->olympic_tx_ring,
sizeof(struct olympic_tx_desc) * OLYMPIC_TX_RING_SIZE,PCI_DMA_TODEVICE) ;
writel(olympic_priv->tx_ring_dma_addr, olympic_mmio+TXDESCQ_1);
writel(olympic_priv->tx_ring_dma_addr, olympic_mmio+TXCDA_1);
writew(OLYMPIC_TX_RING_SIZE, olympic_mmio+TXDESCQCNT_1);
olympic_priv->tx_status_ring_dma_addr = pci_map_single(olympic_priv->pdev, olympic_priv->olympic_tx_status_ring,
sizeof(struct olympic_tx_status) * OLYMPIC_TX_RING_SIZE, PCI_DMA_FROMDEVICE);
writel(olympic_priv->tx_status_ring_dma_addr,olympic_mmio+TXSTATQ_1);
writel(olympic_priv->tx_status_ring_dma_addr,olympic_mmio+TXCSA_1);
writew(OLYMPIC_TX_RING_SIZE,olympic_mmio+TXSTATQCNT_1);
olympic_priv->tx_ring_free=0; /* next entry in tx ring to use */
olympic_priv->tx_ring_last_status=OLYMPIC_TX_RING_SIZE-1; /* last processed tx status */
writel(0xffffffff, olympic_mmio+EISR_RWM) ; /* clean the eisr */
writel(0,olympic_mmio+EISR) ;
writel(EISR_MASK_OPTIONS,olympic_mmio+EISR_MASK) ; /* enables most of the TX error interrupts */
writel(SISR_TX1_EOF | SISR_ADAPTER_CHECK | SISR_ARB_CMD | SISR_TRB_REPLY | SISR_ASB_FREE | SISR_ERR,olympic_mmio+SISR_MASK_SUM);
#if OLYMPIC_DEBUG
printk("BMCTL: %x\n",readl(olympic_mmio+BMCTL_SUM));
printk("SISR MASK: %x\n",readl(olympic_mmio+SISR_MASK));
#endif
if (olympic_priv->olympic_network_monitor) {
u8 __iomem *oat;
u8 __iomem *opt;
u8 addr[6];
oat = (olympic_priv->olympic_lap + olympic_priv->olympic_addr_table_addr);
opt = (olympic_priv->olympic_lap + olympic_priv->olympic_parms_addr);
for (i = 0; i < 6; i++)
addr[i] = readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+i);
printk("%s: Node Address: %pM\n", dev->name, addr);
printk("%s: Functional Address: %02x:%02x:%02x:%02x\n",dev->name,
readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)),
readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+1),
readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+2),
readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+3));
for (i = 0; i < 6; i++)
addr[i] = readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+i);
printk("%s: NAUN Address: %pM\n", dev->name, addr);
}
netif_start_queue(dev);
return 0;
out:
free_irq(dev->irq, dev);
return -EIO;
}
/*
* When we enter the rx routine we do not know how many frames have been
* queued on the rx channel. Therefore we start at the next rx status
* position and travel around the receive ring until we have completed
* all the frames.
*
* This means that we may process the frame before we receive the end
* of frame interrupt. This is why we always test the status instead
* of blindly processing the next frame.
*
* We also remove the last 4 bytes from the packet as well, these are
* just token ring trailer info and upset protocols that don't check
* their own length, i.e. SNA.
*
*/
static void olympic_rx(struct net_device *dev)
{
struct olympic_private *olympic_priv=netdev_priv(dev);
u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio;
struct olympic_rx_status *rx_status;
struct olympic_rx_desc *rx_desc ;
int rx_ring_last_received,length, buffer_cnt, cpy_length, frag_len;
struct sk_buff *skb, *skb2;
int i;
rx_status=&(olympic_priv->olympic_rx_status_ring[(olympic_priv->rx_status_last_received + 1) & (OLYMPIC_RX_RING_SIZE - 1)]) ;
while (rx_status->status_buffercnt) {
u32 l_status_buffercnt;
olympic_priv->rx_status_last_received++ ;
olympic_priv->rx_status_last_received &= (OLYMPIC_RX_RING_SIZE -1);
#if OLYMPIC_DEBUG
printk("rx status: %x rx len: %x\n", le32_to_cpu(rx_status->status_buffercnt), le32_to_cpu(rx_status->fragmentcnt_framelen));
#endif
length = le32_to_cpu(rx_status->fragmentcnt_framelen) & 0xffff;
buffer_cnt = le32_to_cpu(rx_status->status_buffercnt) & 0xffff;
i = buffer_cnt ; /* Need buffer_cnt later for rxenq update */
frag_len = le32_to_cpu(rx_status->fragmentcnt_framelen) >> 16;
#if OLYMPIC_DEBUG
printk("length: %x, frag_len: %x, buffer_cnt: %x\n", length, frag_len, buffer_cnt);
#endif
l_status_buffercnt = le32_to_cpu(rx_status->status_buffercnt);
if(l_status_buffercnt & 0xC0000000) {
if (l_status_buffercnt & 0x3B000000) {
if (olympic_priv->olympic_message_level) {
if (l_status_buffercnt & (1<<29)) /* Rx Frame Truncated */
printk(KERN_WARNING "%s: Rx Frame Truncated\n",dev->name);
if (l_status_buffercnt & (1<<28)) /*Rx receive overrun */
printk(KERN_WARNING "%s: Rx Frame Receive overrun\n",dev->name);
if (l_status_buffercnt & (1<<27)) /* No receive buffers */
printk(KERN_WARNING "%s: No receive buffers\n",dev->name);
if (l_status_buffercnt & (1<<25)) /* Receive frame error detect */
printk(KERN_WARNING "%s: Receive frame error detect\n",dev->name);
if (l_status_buffercnt & (1<<24)) /* Received Error Detect */
printk(KERN_WARNING "%s: Received Error Detect\n",dev->name);
}
olympic_priv->rx_ring_last_received += i ;
olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1) ;
dev->stats.rx_errors++;
} else {
if (buffer_cnt == 1) {
skb = dev_alloc_skb(max_t(int, olympic_priv->pkt_buf_sz,length)) ;
} else {
skb = dev_alloc_skb(length) ;
}
if (skb == NULL) {
printk(KERN_WARNING "%s: Not enough memory to copy packet to upper layers.\n",dev->name) ;
dev->stats.rx_dropped++;
/* Update counters even though we don't transfer the frame */
olympic_priv->rx_ring_last_received += i ;
olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1) ;
} else {
/* Optimise based upon number of buffers used.
If only one buffer is used we can simply swap the buffers around.
If more than one then we must use the new buffer and copy the information
first. Ideally all frames would be in a single buffer, this can be tuned by
altering the buffer size. If the length of the packet is less than
1500 bytes we're going to copy it over anyway to stop packets getting
dropped from sockets with buffers smaller than our pkt_buf_sz. */
if (buffer_cnt==1) {
olympic_priv->rx_ring_last_received++ ;
olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1);
rx_ring_last_received = olympic_priv->rx_ring_last_received ;
if (length > 1500) {
skb2=olympic_priv->rx_ring_skb[rx_ring_last_received] ;
/* unmap buffer */
pci_unmap_single(olympic_priv->pdev,
le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer),
olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
skb_put(skb2,length-4);
skb2->protocol = tr_type_trans(skb2,dev);
olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer =
cpu_to_le32(pci_map_single(olympic_priv->pdev, skb->data,
olympic_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE));
olympic_priv->olympic_rx_ring[rx_ring_last_received].res_length =
cpu_to_le32(olympic_priv->pkt_buf_sz);
olympic_priv->rx_ring_skb[rx_ring_last_received] = skb ;
netif_rx(skb2) ;
} else {
pci_dma_sync_single_for_cpu(olympic_priv->pdev,
le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer),
olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
skb_copy_from_linear_data(olympic_priv->rx_ring_skb[rx_ring_last_received],
skb_put(skb,length - 4),
length - 4);
pci_dma_sync_single_for_device(olympic_priv->pdev,
le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer),
olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
skb->protocol = tr_type_trans(skb,dev) ;
netif_rx(skb) ;
}
} else {
do { /* Walk the buffers */
olympic_priv->rx_ring_last_received++ ;
olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1);
rx_ring_last_received = olympic_priv->rx_ring_last_received ;
pci_dma_sync_single_for_cpu(olympic_priv->pdev,
le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer),
olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
rx_desc = &(olympic_priv->olympic_rx_ring[rx_ring_last_received]);
cpy_length = (i == 1 ? frag_len : le32_to_cpu(rx_desc->res_length));
skb_copy_from_linear_data(olympic_priv->rx_ring_skb[rx_ring_last_received],
skb_put(skb, cpy_length),
cpy_length);
pci_dma_sync_single_for_device(olympic_priv->pdev,
le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer),
olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
} while (--i) ;
skb_trim(skb,skb->len-4) ;
skb->protocol = tr_type_trans(skb,dev);
netif_rx(skb) ;
}
dev->stats.rx_packets++ ;
dev->stats.rx_bytes += length ;
} /* if skb == null */
} /* If status & 0x3b */
} else { /*if buffercnt & 0xC */
olympic_priv->rx_ring_last_received += i ;
olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE - 1) ;
}
rx_status->fragmentcnt_framelen = 0 ;
rx_status->status_buffercnt = 0 ;
rx_status = &(olympic_priv->olympic_rx_status_ring[(olympic_priv->rx_status_last_received+1) & (OLYMPIC_RX_RING_SIZE -1) ]);
writew((((readw(olympic_mmio+RXENQ)) & 0x8000) ^ 0x8000) | buffer_cnt , olympic_mmio+RXENQ);
} /* while */
}
static void olympic_freemem(struct net_device *dev)
{
struct olympic_private *olympic_priv=netdev_priv(dev);
int i;
for(i=0;i<OLYMPIC_RX_RING_SIZE;i++) {
if (olympic_priv->rx_ring_skb[olympic_priv->rx_status_last_received] != NULL) {
dev_kfree_skb_irq(olympic_priv->rx_ring_skb[olympic_priv->rx_status_last_received]);
olympic_priv->rx_ring_skb[olympic_priv->rx_status_last_received] = NULL;
}
if (olympic_priv->olympic_rx_ring[olympic_priv->rx_status_last_received].buffer != cpu_to_le32(0xdeadbeef)) {
pci_unmap_single(olympic_priv->pdev,
le32_to_cpu(olympic_priv->olympic_rx_ring[olympic_priv->rx_status_last_received].buffer),
olympic_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE);
}
olympic_priv->rx_status_last_received++;
olympic_priv->rx_status_last_received&=OLYMPIC_RX_RING_SIZE-1;
}
/* unmap rings */
pci_unmap_single(olympic_priv->pdev, olympic_priv->rx_status_ring_dma_addr,
sizeof(struct olympic_rx_status) * OLYMPIC_RX_RING_SIZE, PCI_DMA_FROMDEVICE);
pci_unmap_single(olympic_priv->pdev, olympic_priv->rx_ring_dma_addr,
sizeof(struct olympic_rx_desc) * OLYMPIC_RX_RING_SIZE, PCI_DMA_TODEVICE);
pci_unmap_single(olympic_priv->pdev, olympic_priv->tx_status_ring_dma_addr,
sizeof(struct olympic_tx_status) * OLYMPIC_TX_RING_SIZE, PCI_DMA_FROMDEVICE);
pci_unmap_single(olympic_priv->pdev, olympic_priv->tx_ring_dma_addr,
sizeof(struct olympic_tx_desc) * OLYMPIC_TX_RING_SIZE, PCI_DMA_TODEVICE);
return ;
}
static irqreturn_t olympic_interrupt(int irq, void *dev_id)
{
struct net_device *dev= (struct net_device *)dev_id;
struct olympic_private *olympic_priv=netdev_priv(dev);
u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio;
u32 sisr;
u8 __iomem *adapter_check_area ;
/*
* Read sisr but don't reset it yet.
* The indication bit may have been set but the interrupt latch
* bit may not be set, so we'd lose the interrupt later.
*/
sisr=readl(olympic_mmio+SISR) ;
if (!(sisr & SISR_MI)) /* Interrupt isn't for us */
return IRQ_NONE;
sisr=readl(olympic_mmio+SISR_RR) ; /* Read & Reset sisr */
spin_lock(&olympic_priv->olympic_lock);
/* Hotswap gives us this on removal */
if (sisr == 0xffffffff) {
printk(KERN_WARNING "%s: Hotswap adapter removal.\n",dev->name) ;
spin_unlock(&olympic_priv->olympic_lock) ;
return IRQ_NONE;
}
if (sisr & (SISR_SRB_REPLY | SISR_TX1_EOF | SISR_RX_STATUS | SISR_ADAPTER_CHECK |
SISR_ASB_FREE | SISR_ARB_CMD | SISR_TRB_REPLY | SISR_RX_NOBUF | SISR_ERR)) {
/* If we ever get this the adapter is seriously dead. Only a reset is going to
* bring it back to life. We're talking pci bus errors and such like :( */
if((sisr & SISR_ERR) && (readl(olympic_mmio+EISR) & EISR_MASK_OPTIONS)) {
printk(KERN_ERR "Olympic: EISR Error, EISR=%08x\n",readl(olympic_mmio+EISR)) ;
printk(KERN_ERR "The adapter must be reset to clear this condition.\n") ;
printk(KERN_ERR "Please report this error to the driver maintainer and/\n") ;
printk(KERN_ERR "or the linux-tr mailing list.\n") ;
wake_up_interruptible(&olympic_priv->srb_wait);
spin_unlock(&olympic_priv->olympic_lock) ;
return IRQ_HANDLED;
} /* SISR_ERR */
if(sisr & SISR_SRB_REPLY) {
if(olympic_priv->srb_queued==1) {
wake_up_interruptible(&olympic_priv->srb_wait);
} else if (olympic_priv->srb_queued==2) {
olympic_srb_bh(dev) ;
}
olympic_priv->srb_queued=0;
} /* SISR_SRB_REPLY */
/* We shouldn't ever miss the Tx interrupt, but the you never know, hence the loop to ensure
we get all tx completions. */
if (sisr & SISR_TX1_EOF) {
while(olympic_priv->olympic_tx_status_ring[(olympic_priv->tx_ring_last_status + 1) & (OLYMPIC_TX_RING_SIZE-1)].status) {
olympic_priv->tx_ring_last_status++;
olympic_priv->tx_ring_last_status &= (OLYMPIC_TX_RING_SIZE-1);
olympic_priv->free_tx_ring_entries++;
dev->stats.tx_bytes += olympic_priv->tx_ring_skb[olympic_priv->tx_ring_last_status]->len;
dev->stats.tx_packets++ ;
pci_unmap_single(olympic_priv->pdev,
le32_to_cpu(olympic_priv->olympic_tx_ring[olympic_priv->tx_ring_last_status].buffer),
olympic_priv->tx_ring_skb[olympic_priv->tx_ring_last_status]->len,PCI_DMA_TODEVICE);
dev_kfree_skb_irq(olympic_priv->tx_ring_skb[olympic_priv->tx_ring_last_status]);
olympic_priv->olympic_tx_ring[olympic_priv->tx_ring_last_status].buffer=cpu_to_le32(0xdeadbeef);
olympic_priv->olympic_tx_status_ring[olympic_priv->tx_ring_last_status].status=0;
}
netif_wake_queue(dev);
} /* SISR_TX1_EOF */
if (sisr & SISR_RX_STATUS) {
olympic_rx(dev);
} /* SISR_RX_STATUS */
if (sisr & SISR_ADAPTER_CHECK) {
netif_stop_queue(dev);
printk(KERN_WARNING "%s: Adapter Check Interrupt Raised, 8 bytes of information follow:\n", dev->name);
writel(readl(olympic_mmio+LAPWWC),olympic_mmio+LAPA);
adapter_check_area = olympic_priv->olympic_lap + ((readl(olympic_mmio+LAPWWC)) & (~0xf800)) ;
printk(KERN_WARNING "%s: Bytes %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n",dev->name, readb(adapter_check_area+0), readb(adapter_check_area+1), readb(adapter_check_area+2), readb(adapter_check_area+3), readb(adapter_check_area+4), readb(adapter_check_area+5), readb(adapter_check_area+6), readb(adapter_check_area+7)) ;
spin_unlock(&olympic_priv->olympic_lock) ;
return IRQ_HANDLED;
} /* SISR_ADAPTER_CHECK */
if (sisr & SISR_ASB_FREE) {
/* Wake up anything that is waiting for the asb response */
if (olympic_priv->asb_queued) {
olympic_asb_bh(dev) ;
}
} /* SISR_ASB_FREE */
if (sisr & SISR_ARB_CMD) {
olympic_arb_cmd(dev) ;
} /* SISR_ARB_CMD */
if (sisr & SISR_TRB_REPLY) {
/* Wake up anything that is waiting for the trb response */
if (olympic_priv->trb_queued) {
wake_up_interruptible(&olympic_priv->trb_wait);
}
olympic_priv->trb_queued = 0 ;
} /* SISR_TRB_REPLY */
if (sisr & SISR_RX_NOBUF) {
/* According to the documentation, we don't have to do anything, but trapping it keeps it out of
/var/log/messages. */
} /* SISR_RX_NOBUF */
} else {
printk(KERN_WARNING "%s: Unexpected interrupt: %x\n",dev->name, sisr);
printk(KERN_WARNING "%s: SISR_MASK: %x\n",dev->name, readl(olympic_mmio+SISR_MASK)) ;
} /* One if the interrupts we want */
writel(SISR_MI,olympic_mmio+SISR_MASK_SUM);
spin_unlock(&olympic_priv->olympic_lock) ;
return IRQ_HANDLED;
}
static netdev_tx_t olympic_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct olympic_private *olympic_priv=netdev_priv(dev);
u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio;
unsigned long flags ;
spin_lock_irqsave(&olympic_priv->olympic_lock, flags);
netif_stop_queue(dev);
if(olympic_priv->free_tx_ring_entries) {
olympic_priv->olympic_tx_ring[olympic_priv->tx_ring_free].buffer =
cpu_to_le32(pci_map_single(olympic_priv->pdev, skb->data, skb->len,PCI_DMA_TODEVICE));
olympic_priv->olympic_tx_ring[olympic_priv->tx_ring_free].status_length = cpu_to_le32(skb->len | (0x80000000));
olympic_priv->tx_ring_skb[olympic_priv->tx_ring_free]=skb;
olympic_priv->free_tx_ring_entries--;
olympic_priv->tx_ring_free++;
olympic_priv->tx_ring_free &= (OLYMPIC_TX_RING_SIZE-1);
writew((((readw(olympic_mmio+TXENQ_1)) & 0x8000) ^ 0x8000) | 1,olympic_mmio+TXENQ_1);
netif_wake_queue(dev);
spin_unlock_irqrestore(&olympic_priv->olympic_lock,flags);
return NETDEV_TX_OK;
} else {
spin_unlock_irqrestore(&olympic_priv->olympic_lock,flags);
return NETDEV_TX_BUSY;
}
}
static int olympic_close(struct net_device *dev)
{
struct olympic_private *olympic_priv=netdev_priv(dev);
u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio,*srb;
unsigned long t,flags;
DECLARE_WAITQUEUE(wait,current) ;
netif_stop_queue(dev);
writel(olympic_priv->srb,olympic_mmio+LAPA);
srb=olympic_priv->olympic_lap + (olympic_priv->srb & (~0xf800));
writeb(SRB_CLOSE_ADAPTER,srb+0);
writeb(0,srb+1);
writeb(OLYMPIC_CLEAR_RET_CODE,srb+2);
add_wait_queue(&olympic_priv->srb_wait,&wait) ;
set_current_state(TASK_INTERRUPTIBLE) ;
spin_lock_irqsave(&olympic_priv->olympic_lock,flags);
olympic_priv->srb_queued=1;
writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM);
spin_unlock_irqrestore(&olympic_priv->olympic_lock,flags);
while(olympic_priv->srb_queued) {
t = schedule_timeout_interruptible(60*HZ);
if(signal_pending(current)) {
printk(KERN_WARNING "%s: SRB timed out.\n",dev->name);
printk(KERN_WARNING "SISR=%x MISR=%x\n",readl(olympic_mmio+SISR),readl(olympic_mmio+LISR));
olympic_priv->srb_queued=0;
break;
}
if (t == 0) {
printk(KERN_WARNING "%s: SRB timed out. May not be fatal.\n",dev->name);
}
olympic_priv->srb_queued=0;
}
remove_wait_queue(&olympic_priv->srb_wait,&wait) ;
olympic_priv->rx_status_last_received++;
olympic_priv->rx_status_last_received&=OLYMPIC_RX_RING_SIZE-1;
olympic_freemem(dev) ;
/* reset tx/rx fifo's and busmaster logic */
writel(readl(olympic_mmio+BCTL)|(3<<13),olympic_mmio+BCTL);
udelay(1);
writel(readl(olympic_mmio+BCTL)&~(3<<13),olympic_mmio+BCTL);
#if OLYMPIC_DEBUG
{
int i ;
printk("srb(%p): ",srb);
for(i=0;i<4;i++)
printk("%x ",readb(srb+i));
printk("\n");
}
#endif
free_irq(dev->irq,dev);
return 0;
}
static void olympic_set_rx_mode(struct net_device *dev)
{
struct olympic_private *olympic_priv = netdev_priv(dev);
u8 __iomem *olympic_mmio = olympic_priv->olympic_mmio ;
u8 options = 0;
u8 __iomem *srb;
struct netdev_hw_addr *ha;
unsigned char dev_mc_address[4] ;
writel(olympic_priv->srb,olympic_mmio+LAPA);
srb=olympic_priv->olympic_lap + (olympic_priv->srb & (~0xf800));
options = olympic_priv->olympic_copy_all_options;
if (dev->flags&IFF_PROMISC)
options |= 0x61 ;
else
options &= ~0x61 ;
/* Only issue the srb if there is a change in options */
if ((options ^ olympic_priv->olympic_copy_all_options)) {
/* Now to issue the srb command to alter the copy.all.options */
writeb(SRB_MODIFY_RECEIVE_OPTIONS,srb);
writeb(0,srb+1);
writeb(OLYMPIC_CLEAR_RET_CODE,srb+2);
writeb(0,srb+3);
writeb(olympic_priv->olympic_receive_options,srb+4);
writeb(options,srb+5);
olympic_priv->srb_queued=2; /* Can't sleep, use srb_bh */
writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM);
olympic_priv->olympic_copy_all_options = options ;
return ;
}
/* Set the functional addresses we need for multicast */
dev_mc_address[0] = dev_mc_address[1] = dev_mc_address[2] = dev_mc_address[3] = 0 ;
netdev_for_each_mc_addr(ha, dev) {
dev_mc_address[0] |= ha->addr[2];
dev_mc_address[1] |= ha->addr[3];
dev_mc_address[2] |= ha->addr[4];
dev_mc_address[3] |= ha->addr[5];
}
writeb(SRB_SET_FUNC_ADDRESS,srb+0);
writeb(0,srb+1);
writeb(OLYMPIC_CLEAR_RET_CODE,srb+2);
writeb(0,srb+3);
writeb(0,srb+4);
writeb(0,srb+5);
writeb(dev_mc_address[0],srb+6);
writeb(dev_mc_address[1],srb+7);
writeb(dev_mc_address[2],srb+8);
writeb(dev_mc_address[3],srb+9);
olympic_priv->srb_queued = 2 ;
writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM);
}
static void olympic_srb_bh(struct net_device *dev)
{
struct olympic_private *olympic_priv = netdev_priv(dev);
u8 __iomem *olympic_mmio = olympic_priv->olympic_mmio ;
u8 __iomem *srb;
writel(olympic_priv->srb,olympic_mmio+LAPA);
srb=olympic_priv->olympic_lap + (olympic_priv->srb & (~0xf800));
switch (readb(srb)) {
/* SRB_MODIFY_RECEIVE_OPTIONS i.e. set_multicast_list options (promiscuous)
* At some point we should do something if we get an error, such as
* resetting the IFF_PROMISC flag in dev
*/
case SRB_MODIFY_RECEIVE_OPTIONS:
switch (readb(srb+2)) {
case 0x01:
printk(KERN_WARNING "%s: Unrecognized srb command\n",dev->name) ;
break ;
case 0x04:
printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name);
break ;
default:
if (olympic_priv->olympic_message_level)
printk(KERN_WARNING "%s: Receive Options Modified to %x,%x\n",dev->name,olympic_priv->olympic_copy_all_options, olympic_priv->olympic_receive_options) ;
break ;
} /* switch srb[2] */
break ;
/* SRB_SET_GROUP_ADDRESS - Multicast group setting
*/
case SRB_SET_GROUP_ADDRESS:
switch (readb(srb+2)) {
case 0x00:
break ;
case 0x01:
printk(KERN_WARNING "%s: Unrecognized srb command\n",dev->name);
break ;
case 0x04:
printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name);
break ;
case 0x3c:
printk(KERN_WARNING "%s: Group/Functional address indicator bits not set correctly\n",dev->name) ;
break ;
case 0x3e: /* If we ever implement individual multicast addresses, will need to deal with this */
printk(KERN_WARNING "%s: Group address registers full\n",dev->name) ;
break ;
case 0x55:
printk(KERN_INFO "%s: Group Address already set.\n",dev->name) ;
break ;
default:
break ;
} /* switch srb[2] */
break ;
/* SRB_RESET_GROUP_ADDRESS - Remove a multicast address from group list
*/
case SRB_RESET_GROUP_ADDRESS:
switch (readb(srb+2)) {
case 0x00:
break ;
case 0x01:
printk(KERN_WARNING "%s: Unrecognized srb command\n",dev->name);
break ;
case 0x04:
printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ;
break ;
case 0x39: /* Must deal with this if individual multicast addresses used */
printk(KERN_INFO "%s: Group address not found\n",dev->name);
break ;
default:
break ;
} /* switch srb[2] */
break ;
/* SRB_SET_FUNC_ADDRESS - Called by the set_rx_mode
*/
case SRB_SET_FUNC_ADDRESS:
switch (readb(srb+2)) {
case 0x00:
if (olympic_priv->olympic_message_level)
printk(KERN_INFO "%s: Functional Address Mask Set\n",dev->name);
break ;
case 0x01:
printk(KERN_WARNING "%s: Unrecognized srb command\n",dev->name);
break ;
case 0x04:
printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ;
break ;
default:
break ;
} /* switch srb[2] */
break ;
/* SRB_READ_LOG - Read and reset the adapter error counters
*/
case SRB_READ_LOG:
switch (readb(srb+2)) {
case 0x00:
if (olympic_priv->olympic_message_level)
printk(KERN_INFO "%s: Read Log issued\n",dev->name) ;
break ;
case 0x01:
printk(KERN_WARNING "%s: Unrecognized srb command\n",dev->name);
break ;
case 0x04:
printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ;
break ;
} /* switch srb[2] */
break ;
/* SRB_READ_SR_COUNTERS - Read and reset the source routing bridge related counters */
case SRB_READ_SR_COUNTERS:
switch (readb(srb+2)) {
case 0x00:
if (olympic_priv->olympic_message_level)
printk(KERN_INFO "%s: Read Source Routing Counters issued\n",dev->name) ;
break ;
case 0x01:
printk(KERN_WARNING "%s: Unrecognized srb command\n",dev->name);
break ;
case 0x04:
printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ;
break ;
default:
break ;
} /* switch srb[2] */
break ;
default:
printk(KERN_WARNING "%s: Unrecognized srb bh return value.\n",dev->name);
break ;
} /* switch srb[0] */
}
static int olympic_set_mac_address (struct net_device *dev, void *addr)
{
struct sockaddr *saddr = addr ;
struct olympic_private *olympic_priv = netdev_priv(dev);
if (netif_running(dev)) {
printk(KERN_WARNING "%s: Cannot set mac/laa address while card is open\n", dev->name) ;
return -EIO ;
}
memcpy(olympic_priv->olympic_laa, saddr->sa_data,dev->addr_len) ;
if (olympic_priv->olympic_message_level) {
printk(KERN_INFO "%s: MAC/LAA Set to = %x.%x.%x.%x.%x.%x\n",dev->name, olympic_priv->olympic_laa[0],
olympic_priv->olympic_laa[1], olympic_priv->olympic_laa[2],
olympic_priv->olympic_laa[3], olympic_priv->olympic_laa[4],
olympic_priv->olympic_laa[5]);
}
return 0 ;
}
static void olympic_arb_cmd(struct net_device *dev)
{
struct olympic_private *olympic_priv = netdev_priv(dev);
u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio;
u8 __iomem *arb_block, *asb_block, *srb ;
u8 header_len ;
u16 frame_len, buffer_len ;
struct sk_buff *mac_frame ;
u8 __iomem *buf_ptr ;
u8 __iomem *frame_data ;
u16 buff_off ;
u16 lan_status = 0, lan_status_diff ; /* Initialize to stop compiler warning */
u8 fdx_prot_error ;
u16 next_ptr;
arb_block = (olympic_priv->olympic_lap + olympic_priv->arb) ;
asb_block = (olympic_priv->olympic_lap + olympic_priv->asb) ;
srb = (olympic_priv->olympic_lap + olympic_priv->srb) ;
if (readb(arb_block+0) == ARB_RECEIVE_DATA) { /* Receive.data, MAC frames */
header_len = readb(arb_block+8) ; /* 802.5 Token-Ring Header Length */
frame_len = swab16(readw(arb_block + 10)) ;
buff_off = swab16(readw(arb_block + 6)) ;
buf_ptr = olympic_priv->olympic_lap + buff_off ;
#if OLYMPIC_DEBUG
{
int i;
frame_data = buf_ptr+offsetof(struct mac_receive_buffer,frame_data) ;
for (i=0 ; i < 14 ; i++) {
printk("Loc %d = %02x\n",i,readb(frame_data + i));
}
printk("next %04x, fs %02x, len %04x\n",readw(buf_ptr+offsetof(struct mac_receive_buffer,next)), readb(buf_ptr+offsetof(struct mac_receive_buffer,frame_status)), readw(buf_ptr+offsetof(struct mac_receive_buffer,buffer_length)));
}
#endif
mac_frame = dev_alloc_skb(frame_len) ;
if (!mac_frame) {
printk(KERN_WARNING "%s: Memory squeeze, dropping frame.\n", dev->name);
goto drop_frame;
}
/* Walk the buffer chain, creating the frame */
do {
frame_data = buf_ptr+offsetof(struct mac_receive_buffer,frame_data) ;
buffer_len = swab16(readw(buf_ptr+offsetof(struct mac_receive_buffer,buffer_length)));
memcpy_fromio(skb_put(mac_frame, buffer_len), frame_data , buffer_len ) ;
next_ptr=readw(buf_ptr+offsetof(struct mac_receive_buffer,next));
} while (next_ptr && (buf_ptr=olympic_priv->olympic_lap + swab16(next_ptr)));
mac_frame->protocol = tr_type_trans(mac_frame, dev);
if (olympic_priv->olympic_network_monitor) {
struct trh_hdr *mac_hdr;
printk(KERN_WARNING "%s: Received MAC Frame, details:\n",dev->name);
mac_hdr = tr_hdr(mac_frame);
printk(KERN_WARNING "%s: MAC Frame Dest. Addr: %pM\n",
dev->name, mac_hdr->daddr);
printk(KERN_WARNING "%s: MAC Frame Srce. Addr: %pM\n",
dev->name, mac_hdr->saddr);
}
netif_rx(mac_frame);
drop_frame:
/* Now tell the card we have dealt with the received frame */
/* Set LISR Bit 1 */
writel(LISR_ARB_FREE,olympic_priv->olympic_mmio + LISR_SUM);
/* Is the ASB free ? */
if (readb(asb_block + 2) != 0xff) {
olympic_priv->asb_queued = 1 ;
writel(LISR_ASB_FREE_REQ,olympic_priv->olympic_mmio+LISR_SUM);
return ;
/* Drop out and wait for the bottom half to be run */
}
writeb(ASB_RECEIVE_DATA,asb_block); /* Receive data */
writeb(OLYMPIC_CLEAR_RET_CODE,asb_block+2); /* Necessary ?? */
writeb(readb(arb_block+6),asb_block+6); /* Must send the address back to the adapter */
writeb(readb(arb_block+7),asb_block+7); /* To let it know we have dealt with the data */
writel(LISR_ASB_REPLY | LISR_ASB_FREE_REQ,olympic_priv->olympic_mmio+LISR_SUM);
olympic_priv->asb_queued = 2 ;
return ;
} else if (readb(arb_block) == ARB_LAN_CHANGE_STATUS) { /* Lan.change.status */
lan_status = swab16(readw(arb_block+6));
fdx_prot_error = readb(arb_block+8) ;
/* Issue ARB Free */
writel(LISR_ARB_FREE,olympic_priv->olympic_mmio+LISR_SUM);
lan_status_diff = olympic_priv->olympic_lan_status ^ lan_status ;
if (lan_status_diff & (LSC_LWF | LSC_ARW | LSC_FPE | LSC_RR) ) {
if (lan_status_diff & LSC_LWF)
printk(KERN_WARNING "%s: Short circuit detected on the lobe\n",dev->name);
if (lan_status_diff & LSC_ARW)
printk(KERN_WARNING "%s: Auto removal error\n",dev->name);
if (lan_status_diff & LSC_FPE)
printk(KERN_WARNING "%s: FDX Protocol Error\n",dev->name);
if (lan_status_diff & LSC_RR)
printk(KERN_WARNING "%s: Force remove MAC frame received\n",dev->name);
/* Adapter has been closed by the hardware */
/* reset tx/rx fifo's and busmaster logic */
writel(readl(olympic_mmio+BCTL)|(3<<13),olympic_mmio+BCTL);
udelay(1);
writel(readl(olympic_mmio+BCTL)&~(3<<13),olympic_mmio+BCTL);
netif_stop_queue(dev);
olympic_priv->srb = readw(olympic_priv->olympic_lap + LAPWWO) ;
printk(KERN_WARNING "%s: Adapter has been closed\n", dev->name);
} /* If serious error */
if (olympic_priv->olympic_message_level) {
if (lan_status_diff & LSC_SIG_LOSS)
printk(KERN_WARNING "%s: No receive signal detected\n", dev->name);
if (lan_status_diff & LSC_HARD_ERR)
printk(KERN_INFO "%s: Beaconing\n",dev->name);
if (lan_status_diff & LSC_SOFT_ERR)
printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame\n",dev->name);
if (lan_status_diff & LSC_TRAN_BCN)
printk(KERN_INFO "%s: We are transmitting the beacon, aaah\n",dev->name);
if (lan_status_diff & LSC_SS)
printk(KERN_INFO "%s: Single Station on the ring\n", dev->name);
if (lan_status_diff & LSC_RING_REC)
printk(KERN_INFO "%s: Ring recovery ongoing\n",dev->name);
if (lan_status_diff & LSC_FDX_MODE)
printk(KERN_INFO "%s: Operating in FDX mode\n",dev->name);
}
if (lan_status_diff & LSC_CO) {
if (olympic_priv->olympic_message_level)
printk(KERN_INFO "%s: Counter Overflow\n", dev->name);
/* Issue READ.LOG command */
writeb(SRB_READ_LOG, srb);
writeb(0,srb+1);
writeb(OLYMPIC_CLEAR_RET_CODE,srb+2);
writeb(0,srb+3);
writeb(0,srb+4);
writeb(0,srb+5);
olympic_priv->srb_queued=2; /* Can't sleep, use srb_bh */
writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM);
}
if (lan_status_diff & LSC_SR_CO) {
if (olympic_priv->olympic_message_level)
printk(KERN_INFO "%s: Source routing counters overflow\n", dev->name);
/* Issue a READ.SR.COUNTERS */
writeb(SRB_READ_SR_COUNTERS,srb);
writeb(0,srb+1);
writeb(OLYMPIC_CLEAR_RET_CODE,srb+2);
writeb(0,srb+3);
olympic_priv->srb_queued=2; /* Can't sleep, use srb_bh */
writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM);
}
olympic_priv->olympic_lan_status = lan_status ;
} /* Lan.change.status */
else
printk(KERN_WARNING "%s: Unknown arb command\n", dev->name);
}
static void olympic_asb_bh(struct net_device *dev)
{
struct olympic_private *olympic_priv = netdev_priv(dev);
u8 __iomem *arb_block, *asb_block ;
arb_block = (olympic_priv->olympic_lap + olympic_priv->arb) ;
asb_block = (olympic_priv->olympic_lap + olympic_priv->asb) ;
if (olympic_priv->asb_queued == 1) { /* Dropped through the first time */
writeb(ASB_RECEIVE_DATA,asb_block); /* Receive data */
writeb(OLYMPIC_CLEAR_RET_CODE,asb_block+2); /* Necessary ?? */
writeb(readb(arb_block+6),asb_block+6); /* Must send the address back to the adapter */
writeb(readb(arb_block+7),asb_block+7); /* To let it know we have dealt with the data */
writel(LISR_ASB_REPLY | LISR_ASB_FREE_REQ,olympic_priv->olympic_mmio+LISR_SUM);
olympic_priv->asb_queued = 2 ;
return ;
}
if (olympic_priv->asb_queued == 2) {
switch (readb(asb_block+2)) {
case 0x01:
printk(KERN_WARNING "%s: Unrecognized command code\n", dev->name);
break ;
case 0x26:
printk(KERN_WARNING "%s: Unrecognized buffer address\n", dev->name);
break ;
case 0xFF:
/* Valid response, everything should be ok again */
break ;
default:
printk(KERN_WARNING "%s: Invalid return code in asb\n",dev->name);
break ;
}
}
olympic_priv->asb_queued = 0 ;
}
static int olympic_change_mtu(struct net_device *dev, int mtu)
{
struct olympic_private *olympic_priv = netdev_priv(dev);
u16 max_mtu ;
if (olympic_priv->olympic_ring_speed == 4)
max_mtu = 4500 ;
else
max_mtu = 18000 ;
if (mtu > max_mtu)
return -EINVAL ;
if (mtu < 100)
return -EINVAL ;
dev->mtu = mtu ;
olympic_priv->pkt_buf_sz = mtu + TR_HLEN ;
return 0 ;
}
static int olympic_proc_show(struct seq_file *m, void *v)
{
struct net_device *dev = m->private;
struct olympic_private *olympic_priv=netdev_priv(dev);
u8 __iomem *oat = (olympic_priv->olympic_lap + olympic_priv->olympic_addr_table_addr) ;
u8 __iomem *opt = (olympic_priv->olympic_lap + olympic_priv->olympic_parms_addr) ;
u8 addr[6];
u8 addr2[6];
int i;
seq_printf(m,
"IBM Pit/Pit-Phy/Olympic Chipset Token Ring Adapter %s\n",dev->name);
seq_printf(m, "\n%6s: Adapter Address : Node Address : Functional Addr\n",
dev->name);
for (i = 0 ; i < 6 ; i++)
addr[i] = readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr) + i);
seq_printf(m, "%6s: %pM : %pM : %02x:%02x:%02x:%02x\n",
dev->name,
dev->dev_addr, addr,
readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)),
readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+1),
readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+2),
readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+3));
seq_printf(m, "\n%6s: Token Ring Parameters Table:\n", dev->name);
seq_printf(m, "%6s: Physical Addr : Up Node Address : Poll Address : AccPri : Auth Src : Att Code :\n",
dev->name) ;
for (i = 0 ; i < 6 ; i++)
addr[i] = readb(opt+offsetof(struct olympic_parameters_table, up_node_addr) + i);
for (i = 0 ; i < 6 ; i++)
addr2[i] = readb(opt+offsetof(struct olympic_parameters_table, poll_addr) + i);
seq_printf(m, "%6s: %02x:%02x:%02x:%02x : %pM : %pM : %04x : %04x : %04x :\n",
dev->name,
readb(opt+offsetof(struct olympic_parameters_table, phys_addr)),
readb(opt+offsetof(struct olympic_parameters_table, phys_addr)+1),
readb(opt+offsetof(struct olympic_parameters_table, phys_addr)+2),
readb(opt+offsetof(struct olympic_parameters_table, phys_addr)+3),
addr, addr2,
swab16(readw(opt+offsetof(struct olympic_parameters_table, acc_priority))),
swab16(readw(opt+offsetof(struct olympic_parameters_table, auth_source_class))),
swab16(readw(opt+offsetof(struct olympic_parameters_table, att_code))));
seq_printf(m, "%6s: Source Address : Bcn T : Maj. V : Lan St : Lcl Rg : Mon Err : Frame Correl : \n",
dev->name) ;
for (i = 0 ; i < 6 ; i++)
addr[i] = readb(opt+offsetof(struct olympic_parameters_table, source_addr) + i);
seq_printf(m, "%6s: %pM : %04x : %04x : %04x : %04x : %04x : %04x : \n",
dev->name, addr,
swab16(readw(opt+offsetof(struct olympic_parameters_table, beacon_type))),
swab16(readw(opt+offsetof(struct olympic_parameters_table, major_vector))),
swab16(readw(opt+offsetof(struct olympic_parameters_table, lan_status))),
swab16(readw(opt+offsetof(struct olympic_parameters_table, local_ring))),
swab16(readw(opt+offsetof(struct olympic_parameters_table, mon_error))),
swab16(readw(opt+offsetof(struct olympic_parameters_table, frame_correl))));
seq_printf(m, "%6s: Beacon Details : Tx : Rx : NAUN Node Address : NAUN Node Phys : \n",
dev->name) ;
for (i = 0 ; i < 6 ; i++)
addr[i] = readb(opt+offsetof(struct olympic_parameters_table, beacon_naun) + i);
seq_printf(m, "%6s: : %02x : %02x : %pM : %02x:%02x:%02x:%02x : \n",
dev->name,
swab16(readw(opt+offsetof(struct olympic_parameters_table, beacon_transmit))),
swab16(readw(opt+offsetof(struct olympic_parameters_table, beacon_receive))),
addr,
readb(opt+offsetof(struct olympic_parameters_table, beacon_phys)),
readb(opt+offsetof(struct olympic_parameters_table, beacon_phys)+1),
readb(opt+offsetof(struct olympic_parameters_table, beacon_phys)+2),
readb(opt+offsetof(struct olympic_parameters_table, beacon_phys)+3));
return 0;
}
static int olympic_proc_open(struct inode *inode, struct file *file)
{
return single_open(file, olympic_proc_show, PDE(inode)->data);
}
static const struct file_operations olympic_proc_ops = {
.open = olympic_proc_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static void __devexit olympic_remove_one(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev) ;
struct olympic_private *olympic_priv=netdev_priv(dev);
if (olympic_priv->olympic_network_monitor) {
char proc_name[20] ;
strcpy(proc_name,"olympic_") ;
strcat(proc_name,dev->name) ;
remove_proc_entry(proc_name,init_net.proc_net);
}
unregister_netdev(dev) ;
iounmap(olympic_priv->olympic_mmio) ;
iounmap(olympic_priv->olympic_lap) ;
pci_release_regions(pdev) ;
pci_set_drvdata(pdev,NULL) ;
free_netdev(dev) ;
}
static struct pci_driver olympic_driver = {
.name = "olympic",
.id_table = olympic_pci_tbl,
.probe = olympic_probe,
.remove = __devexit_p(olympic_remove_one),
};
static int __init olympic_pci_init(void)
{
return pci_register_driver(&olympic_driver) ;
}
static void __exit olympic_pci_cleanup(void)
{
pci_unregister_driver(&olympic_driver) ;
}
module_init(olympic_pci_init) ;
module_exit(olympic_pci_cleanup) ;
MODULE_LICENSE("GPL");
| gpl-2.0 |
jfdsmabalot/kernel_moto-x | drivers/usb/gadget/amd5536udc.c | 4760 | 86145 | /*
* amd5536.c -- AMD 5536 UDC high/full speed USB device controller
*
* Copyright (C) 2005-2007 AMD (http://www.amd.com)
* Author: Thomas Dahlmann
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
/*
* The AMD5536 UDC is part of the x86 southbridge AMD Geode CS5536.
* It is a USB Highspeed DMA capable USB device controller. Beside ep0 it
* provides 4 IN and 4 OUT endpoints (bulk or interrupt type).
*
* Make sure that UDC is assigned to port 4 by BIOS settings (port can also
* be used as host port) and UOC bits PAD_EN and APU are set (should be done
* by BIOS init).
*
* UDC DMA requires 32-bit aligned buffers so DMA with gadget ether does not
* work without updating NET_IP_ALIGN. Or PIO mode (module param "use_dma=0")
* can be used with gadget ether.
*/
/* debug control */
/* #define UDC_VERBOSE */
/* Driver strings */
#define UDC_MOD_DESCRIPTION "AMD 5536 UDC - USB Device Controller"
#define UDC_DRIVER_VERSION_STRING "01.00.0206"
/* system */
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/ioport.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/timer.h>
#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/ioctl.h>
#include <linux/fs.h>
#include <linux/dmapool.h>
#include <linux/moduleparam.h>
#include <linux/device.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/prefetch.h>
#include <asm/byteorder.h>
#include <asm/unaligned.h>
/* gadget stack */
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
/* udc specific */
#include "amd5536udc.h"
static void udc_tasklet_disconnect(unsigned long);
static void empty_req_queue(struct udc_ep *);
static int udc_probe(struct udc *dev);
static void udc_basic_init(struct udc *dev);
static void udc_setup_endpoints(struct udc *dev);
static void udc_soft_reset(struct udc *dev);
static struct udc_request *udc_alloc_bna_dummy(struct udc_ep *ep);
static void udc_free_request(struct usb_ep *usbep, struct usb_request *usbreq);
static int udc_free_dma_chain(struct udc *dev, struct udc_request *req);
static int udc_create_dma_chain(struct udc_ep *ep, struct udc_request *req,
unsigned long buf_len, gfp_t gfp_flags);
static int udc_remote_wakeup(struct udc *dev);
static int udc_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id);
static void udc_pci_remove(struct pci_dev *pdev);
/* description */
static const char mod_desc[] = UDC_MOD_DESCRIPTION;
static const char name[] = "amd5536udc";
/* structure to hold endpoint function pointers */
static const struct usb_ep_ops udc_ep_ops;
/* received setup data */
static union udc_setup_data setup_data;
/* pointer to device object */
static struct udc *udc;
/* irq spin lock for soft reset */
static DEFINE_SPINLOCK(udc_irq_spinlock);
/* stall spin lock */
static DEFINE_SPINLOCK(udc_stall_spinlock);
/*
* slave mode: pending bytes in rx fifo after nyet,
* used if EPIN irq came but no req was available
*/
static unsigned int udc_rxfifo_pending;
/* count soft resets after suspend to avoid loop */
static int soft_reset_occured;
static int soft_reset_after_usbreset_occured;
/* timer */
static struct timer_list udc_timer;
static int stop_timer;
/* set_rde -- Is used to control enabling of RX DMA. Problem is
* that UDC has only one bit (RDE) to enable/disable RX DMA for
* all OUT endpoints. So we have to handle race conditions like
* when OUT data reaches the fifo but no request was queued yet.
* This cannot be solved by letting the RX DMA disabled until a
* request gets queued because there may be other OUT packets
* in the FIFO (important for not blocking control traffic).
* The value of set_rde controls the correspondig timer.
*
* set_rde -1 == not used, means it is alloed to be set to 0 or 1
* set_rde 0 == do not touch RDE, do no start the RDE timer
* set_rde 1 == timer function will look whether FIFO has data
* set_rde 2 == set by timer function to enable RX DMA on next call
*/
static int set_rde = -1;
static DECLARE_COMPLETION(on_exit);
static struct timer_list udc_pollstall_timer;
static int stop_pollstall_timer;
static DECLARE_COMPLETION(on_pollstall_exit);
/* tasklet for usb disconnect */
static DECLARE_TASKLET(disconnect_tasklet, udc_tasklet_disconnect,
(unsigned long) &udc);
/* endpoint names used for print */
static const char ep0_string[] = "ep0in";
static const char *const ep_string[] = {
ep0_string,
"ep1in-int", "ep2in-bulk", "ep3in-bulk", "ep4in-bulk", "ep5in-bulk",
"ep6in-bulk", "ep7in-bulk", "ep8in-bulk", "ep9in-bulk", "ep10in-bulk",
"ep11in-bulk", "ep12in-bulk", "ep13in-bulk", "ep14in-bulk",
"ep15in-bulk", "ep0out", "ep1out-bulk", "ep2out-bulk", "ep3out-bulk",
"ep4out-bulk", "ep5out-bulk", "ep6out-bulk", "ep7out-bulk",
"ep8out-bulk", "ep9out-bulk", "ep10out-bulk", "ep11out-bulk",
"ep12out-bulk", "ep13out-bulk", "ep14out-bulk", "ep15out-bulk"
};
/* DMA usage flag */
static bool use_dma = 1;
/* packet per buffer dma */
static bool use_dma_ppb = 1;
/* with per descr. update */
static bool use_dma_ppb_du;
/* buffer fill mode */
static int use_dma_bufferfill_mode;
/* full speed only mode */
static bool use_fullspeed;
/* tx buffer size for high speed */
static unsigned long hs_tx_buf = UDC_EPIN_BUFF_SIZE;
/* module parameters */
module_param(use_dma, bool, S_IRUGO);
MODULE_PARM_DESC(use_dma, "true for DMA");
module_param(use_dma_ppb, bool, S_IRUGO);
MODULE_PARM_DESC(use_dma_ppb, "true for DMA in packet per buffer mode");
module_param(use_dma_ppb_du, bool, S_IRUGO);
MODULE_PARM_DESC(use_dma_ppb_du,
"true for DMA in packet per buffer mode with descriptor update");
module_param(use_fullspeed, bool, S_IRUGO);
MODULE_PARM_DESC(use_fullspeed, "true for fullspeed only");
/*---------------------------------------------------------------------------*/
/* Prints UDC device registers and endpoint irq registers */
static void print_regs(struct udc *dev)
{
DBG(dev, "------- Device registers -------\n");
DBG(dev, "dev config = %08x\n", readl(&dev->regs->cfg));
DBG(dev, "dev control = %08x\n", readl(&dev->regs->ctl));
DBG(dev, "dev status = %08x\n", readl(&dev->regs->sts));
DBG(dev, "\n");
DBG(dev, "dev int's = %08x\n", readl(&dev->regs->irqsts));
DBG(dev, "dev intmask = %08x\n", readl(&dev->regs->irqmsk));
DBG(dev, "\n");
DBG(dev, "dev ep int's = %08x\n", readl(&dev->regs->ep_irqsts));
DBG(dev, "dev ep intmask = %08x\n", readl(&dev->regs->ep_irqmsk));
DBG(dev, "\n");
DBG(dev, "USE DMA = %d\n", use_dma);
if (use_dma && use_dma_ppb && !use_dma_ppb_du) {
DBG(dev, "DMA mode = PPBNDU (packet per buffer "
"WITHOUT desc. update)\n");
dev_info(&dev->pdev->dev, "DMA mode (%s)\n", "PPBNDU");
} else if (use_dma && use_dma_ppb && use_dma_ppb_du) {
DBG(dev, "DMA mode = PPBDU (packet per buffer "
"WITH desc. update)\n");
dev_info(&dev->pdev->dev, "DMA mode (%s)\n", "PPBDU");
}
if (use_dma && use_dma_bufferfill_mode) {
DBG(dev, "DMA mode = BF (buffer fill mode)\n");
dev_info(&dev->pdev->dev, "DMA mode (%s)\n", "BF");
}
if (!use_dma)
dev_info(&dev->pdev->dev, "FIFO mode\n");
DBG(dev, "-------------------------------------------------------\n");
}
/* Masks unused interrupts */
static int udc_mask_unused_interrupts(struct udc *dev)
{
u32 tmp;
/* mask all dev interrupts */
tmp = AMD_BIT(UDC_DEVINT_SVC) |
AMD_BIT(UDC_DEVINT_ENUM) |
AMD_BIT(UDC_DEVINT_US) |
AMD_BIT(UDC_DEVINT_UR) |
AMD_BIT(UDC_DEVINT_ES) |
AMD_BIT(UDC_DEVINT_SI) |
AMD_BIT(UDC_DEVINT_SOF)|
AMD_BIT(UDC_DEVINT_SC);
writel(tmp, &dev->regs->irqmsk);
/* mask all ep interrupts */
writel(UDC_EPINT_MSK_DISABLE_ALL, &dev->regs->ep_irqmsk);
return 0;
}
/* Enables endpoint 0 interrupts */
static int udc_enable_ep0_interrupts(struct udc *dev)
{
u32 tmp;
DBG(dev, "udc_enable_ep0_interrupts()\n");
/* read irq mask */
tmp = readl(&dev->regs->ep_irqmsk);
/* enable ep0 irq's */
tmp &= AMD_UNMASK_BIT(UDC_EPINT_IN_EP0)
& AMD_UNMASK_BIT(UDC_EPINT_OUT_EP0);
writel(tmp, &dev->regs->ep_irqmsk);
return 0;
}
/* Enables device interrupts for SET_INTF and SET_CONFIG */
static int udc_enable_dev_setup_interrupts(struct udc *dev)
{
u32 tmp;
DBG(dev, "enable device interrupts for setup data\n");
/* read irq mask */
tmp = readl(&dev->regs->irqmsk);
/* enable SET_INTERFACE, SET_CONFIG and other needed irq's */
tmp &= AMD_UNMASK_BIT(UDC_DEVINT_SI)
& AMD_UNMASK_BIT(UDC_DEVINT_SC)
& AMD_UNMASK_BIT(UDC_DEVINT_UR)
& AMD_UNMASK_BIT(UDC_DEVINT_SVC)
& AMD_UNMASK_BIT(UDC_DEVINT_ENUM);
writel(tmp, &dev->regs->irqmsk);
return 0;
}
/* Calculates fifo start of endpoint based on preceding endpoints */
static int udc_set_txfifo_addr(struct udc_ep *ep)
{
struct udc *dev;
u32 tmp;
int i;
if (!ep || !(ep->in))
return -EINVAL;
dev = ep->dev;
ep->txfifo = dev->txfifo;
/* traverse ep's */
for (i = 0; i < ep->num; i++) {
if (dev->ep[i].regs) {
/* read fifo size */
tmp = readl(&dev->ep[i].regs->bufin_framenum);
tmp = AMD_GETBITS(tmp, UDC_EPIN_BUFF_SIZE);
ep->txfifo += tmp;
}
}
return 0;
}
/* CNAK pending field: bit0 = ep0in, bit16 = ep0out */
static u32 cnak_pending;
static void UDC_QUEUE_CNAK(struct udc_ep *ep, unsigned num)
{
if (readl(&ep->regs->ctl) & AMD_BIT(UDC_EPCTL_NAK)) {
DBG(ep->dev, "NAK could not be cleared for ep%d\n", num);
cnak_pending |= 1 << (num);
ep->naking = 1;
} else
cnak_pending = cnak_pending & (~(1 << (num)));
}
/* Enables endpoint, is called by gadget driver */
static int
udc_ep_enable(struct usb_ep *usbep, const struct usb_endpoint_descriptor *desc)
{
struct udc_ep *ep;
struct udc *dev;
u32 tmp;
unsigned long iflags;
u8 udc_csr_epix;
unsigned maxpacket;
if (!usbep
|| usbep->name == ep0_string
|| !desc
|| desc->bDescriptorType != USB_DT_ENDPOINT)
return -EINVAL;
ep = container_of(usbep, struct udc_ep, ep);
dev = ep->dev;
DBG(dev, "udc_ep_enable() ep %d\n", ep->num);
if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
return -ESHUTDOWN;
spin_lock_irqsave(&dev->lock, iflags);
ep->desc = desc;
ep->halted = 0;
/* set traffic type */
tmp = readl(&dev->ep[ep->num].regs->ctl);
tmp = AMD_ADDBITS(tmp, desc->bmAttributes, UDC_EPCTL_ET);
writel(tmp, &dev->ep[ep->num].regs->ctl);
/* set max packet size */
maxpacket = usb_endpoint_maxp(desc);
tmp = readl(&dev->ep[ep->num].regs->bufout_maxpkt);
tmp = AMD_ADDBITS(tmp, maxpacket, UDC_EP_MAX_PKT_SIZE);
ep->ep.maxpacket = maxpacket;
writel(tmp, &dev->ep[ep->num].regs->bufout_maxpkt);
/* IN ep */
if (ep->in) {
/* ep ix in UDC CSR register space */
udc_csr_epix = ep->num;
/* set buffer size (tx fifo entries) */
tmp = readl(&dev->ep[ep->num].regs->bufin_framenum);
/* double buffering: fifo size = 2 x max packet size */
tmp = AMD_ADDBITS(
tmp,
maxpacket * UDC_EPIN_BUFF_SIZE_MULT
/ UDC_DWORD_BYTES,
UDC_EPIN_BUFF_SIZE);
writel(tmp, &dev->ep[ep->num].regs->bufin_framenum);
/* calc. tx fifo base addr */
udc_set_txfifo_addr(ep);
/* flush fifo */
tmp = readl(&ep->regs->ctl);
tmp |= AMD_BIT(UDC_EPCTL_F);
writel(tmp, &ep->regs->ctl);
/* OUT ep */
} else {
/* ep ix in UDC CSR register space */
udc_csr_epix = ep->num - UDC_CSR_EP_OUT_IX_OFS;
/* set max packet size UDC CSR */
tmp = readl(&dev->csr->ne[ep->num - UDC_CSR_EP_OUT_IX_OFS]);
tmp = AMD_ADDBITS(tmp, maxpacket,
UDC_CSR_NE_MAX_PKT);
writel(tmp, &dev->csr->ne[ep->num - UDC_CSR_EP_OUT_IX_OFS]);
if (use_dma && !ep->in) {
/* alloc and init BNA dummy request */
ep->bna_dummy_req = udc_alloc_bna_dummy(ep);
ep->bna_occurred = 0;
}
if (ep->num != UDC_EP0OUT_IX)
dev->data_ep_enabled = 1;
}
/* set ep values */
tmp = readl(&dev->csr->ne[udc_csr_epix]);
/* max packet */
tmp = AMD_ADDBITS(tmp, maxpacket, UDC_CSR_NE_MAX_PKT);
/* ep number */
tmp = AMD_ADDBITS(tmp, desc->bEndpointAddress, UDC_CSR_NE_NUM);
/* ep direction */
tmp = AMD_ADDBITS(tmp, ep->in, UDC_CSR_NE_DIR);
/* ep type */
tmp = AMD_ADDBITS(tmp, desc->bmAttributes, UDC_CSR_NE_TYPE);
/* ep config */
tmp = AMD_ADDBITS(tmp, ep->dev->cur_config, UDC_CSR_NE_CFG);
/* ep interface */
tmp = AMD_ADDBITS(tmp, ep->dev->cur_intf, UDC_CSR_NE_INTF);
/* ep alt */
tmp = AMD_ADDBITS(tmp, ep->dev->cur_alt, UDC_CSR_NE_ALT);
/* write reg */
writel(tmp, &dev->csr->ne[udc_csr_epix]);
/* enable ep irq */
tmp = readl(&dev->regs->ep_irqmsk);
tmp &= AMD_UNMASK_BIT(ep->num);
writel(tmp, &dev->regs->ep_irqmsk);
/*
* clear NAK by writing CNAK
* avoid BNA for OUT DMA, don't clear NAK until DMA desc. written
*/
if (!use_dma || ep->in) {
tmp = readl(&ep->regs->ctl);
tmp |= AMD_BIT(UDC_EPCTL_CNAK);
writel(tmp, &ep->regs->ctl);
ep->naking = 0;
UDC_QUEUE_CNAK(ep, ep->num);
}
tmp = desc->bEndpointAddress;
DBG(dev, "%s enabled\n", usbep->name);
spin_unlock_irqrestore(&dev->lock, iflags);
return 0;
}
/* Resets endpoint */
static void ep_init(struct udc_regs __iomem *regs, struct udc_ep *ep)
{
u32 tmp;
VDBG(ep->dev, "ep-%d reset\n", ep->num);
ep->desc = NULL;
ep->ep.desc = NULL;
ep->ep.ops = &udc_ep_ops;
INIT_LIST_HEAD(&ep->queue);
ep->ep.maxpacket = (u16) ~0;
/* set NAK */
tmp = readl(&ep->regs->ctl);
tmp |= AMD_BIT(UDC_EPCTL_SNAK);
writel(tmp, &ep->regs->ctl);
ep->naking = 1;
/* disable interrupt */
tmp = readl(®s->ep_irqmsk);
tmp |= AMD_BIT(ep->num);
writel(tmp, ®s->ep_irqmsk);
if (ep->in) {
/* unset P and IN bit of potential former DMA */
tmp = readl(&ep->regs->ctl);
tmp &= AMD_UNMASK_BIT(UDC_EPCTL_P);
writel(tmp, &ep->regs->ctl);
tmp = readl(&ep->regs->sts);
tmp |= AMD_BIT(UDC_EPSTS_IN);
writel(tmp, &ep->regs->sts);
/* flush the fifo */
tmp = readl(&ep->regs->ctl);
tmp |= AMD_BIT(UDC_EPCTL_F);
writel(tmp, &ep->regs->ctl);
}
/* reset desc pointer */
writel(0, &ep->regs->desptr);
}
/* Disables endpoint, is called by gadget driver */
static int udc_ep_disable(struct usb_ep *usbep)
{
struct udc_ep *ep = NULL;
unsigned long iflags;
if (!usbep)
return -EINVAL;
ep = container_of(usbep, struct udc_ep, ep);
if (usbep->name == ep0_string || !ep->desc)
return -EINVAL;
DBG(ep->dev, "Disable ep-%d\n", ep->num);
spin_lock_irqsave(&ep->dev->lock, iflags);
udc_free_request(&ep->ep, &ep->bna_dummy_req->req);
empty_req_queue(ep);
ep_init(ep->dev->regs, ep);
spin_unlock_irqrestore(&ep->dev->lock, iflags);
return 0;
}
/* Allocates request packet, called by gadget driver */
static struct usb_request *
udc_alloc_request(struct usb_ep *usbep, gfp_t gfp)
{
struct udc_request *req;
struct udc_data_dma *dma_desc;
struct udc_ep *ep;
if (!usbep)
return NULL;
ep = container_of(usbep, struct udc_ep, ep);
VDBG(ep->dev, "udc_alloc_req(): ep%d\n", ep->num);
req = kzalloc(sizeof(struct udc_request), gfp);
if (!req)
return NULL;
req->req.dma = DMA_DONT_USE;
INIT_LIST_HEAD(&req->queue);
if (ep->dma) {
/* ep0 in requests are allocated from data pool here */
dma_desc = pci_pool_alloc(ep->dev->data_requests, gfp,
&req->td_phys);
if (!dma_desc) {
kfree(req);
return NULL;
}
VDBG(ep->dev, "udc_alloc_req: req = %p dma_desc = %p, "
"td_phys = %lx\n",
req, dma_desc,
(unsigned long)req->td_phys);
/* prevent from using desc. - set HOST BUSY */
dma_desc->status = AMD_ADDBITS(dma_desc->status,
UDC_DMA_STP_STS_BS_HOST_BUSY,
UDC_DMA_STP_STS_BS);
dma_desc->bufptr = cpu_to_le32(DMA_DONT_USE);
req->td_data = dma_desc;
req->td_data_last = NULL;
req->chain_len = 1;
}
return &req->req;
}
/* Frees request packet, called by gadget driver */
static void
udc_free_request(struct usb_ep *usbep, struct usb_request *usbreq)
{
struct udc_ep *ep;
struct udc_request *req;
if (!usbep || !usbreq)
return;
ep = container_of(usbep, struct udc_ep, ep);
req = container_of(usbreq, struct udc_request, req);
VDBG(ep->dev, "free_req req=%p\n", req);
BUG_ON(!list_empty(&req->queue));
if (req->td_data) {
VDBG(ep->dev, "req->td_data=%p\n", req->td_data);
/* free dma chain if created */
if (req->chain_len > 1)
udc_free_dma_chain(ep->dev, req);
pci_pool_free(ep->dev->data_requests, req->td_data,
req->td_phys);
}
kfree(req);
}
/* Init BNA dummy descriptor for HOST BUSY and pointing to itself */
static void udc_init_bna_dummy(struct udc_request *req)
{
if (req) {
/* set last bit */
req->td_data->status |= AMD_BIT(UDC_DMA_IN_STS_L);
/* set next pointer to itself */
req->td_data->next = req->td_phys;
/* set HOST BUSY */
req->td_data->status
= AMD_ADDBITS(req->td_data->status,
UDC_DMA_STP_STS_BS_DMA_DONE,
UDC_DMA_STP_STS_BS);
#ifdef UDC_VERBOSE
pr_debug("bna desc = %p, sts = %08x\n",
req->td_data, req->td_data->status);
#endif
}
}
/* Allocate BNA dummy descriptor */
static struct udc_request *udc_alloc_bna_dummy(struct udc_ep *ep)
{
struct udc_request *req = NULL;
struct usb_request *_req = NULL;
/* alloc the dummy request */
_req = udc_alloc_request(&ep->ep, GFP_ATOMIC);
if (_req) {
req = container_of(_req, struct udc_request, req);
ep->bna_dummy_req = req;
udc_init_bna_dummy(req);
}
return req;
}
/* Write data to TX fifo for IN packets */
static void
udc_txfifo_write(struct udc_ep *ep, struct usb_request *req)
{
u8 *req_buf;
u32 *buf;
int i, j;
unsigned bytes = 0;
unsigned remaining = 0;
if (!req || !ep)
return;
req_buf = req->buf + req->actual;
prefetch(req_buf);
remaining = req->length - req->actual;
buf = (u32 *) req_buf;
bytes = ep->ep.maxpacket;
if (bytes > remaining)
bytes = remaining;
/* dwords first */
for (i = 0; i < bytes / UDC_DWORD_BYTES; i++)
writel(*(buf + i), ep->txfifo);
/* remaining bytes must be written by byte access */
for (j = 0; j < bytes % UDC_DWORD_BYTES; j++) {
writeb((u8)(*(buf + i) >> (j << UDC_BITS_PER_BYTE_SHIFT)),
ep->txfifo);
}
/* dummy write confirm */
writel(0, &ep->regs->confirm);
}
/* Read dwords from RX fifo for OUT transfers */
static int udc_rxfifo_read_dwords(struct udc *dev, u32 *buf, int dwords)
{
int i;
VDBG(dev, "udc_read_dwords(): %d dwords\n", dwords);
for (i = 0; i < dwords; i++)
*(buf + i) = readl(dev->rxfifo);
return 0;
}
/* Read bytes from RX fifo for OUT transfers */
static int udc_rxfifo_read_bytes(struct udc *dev, u8 *buf, int bytes)
{
int i, j;
u32 tmp;
VDBG(dev, "udc_read_bytes(): %d bytes\n", bytes);
/* dwords first */
for (i = 0; i < bytes / UDC_DWORD_BYTES; i++)
*((u32 *)(buf + (i<<2))) = readl(dev->rxfifo);
/* remaining bytes must be read by byte access */
if (bytes % UDC_DWORD_BYTES) {
tmp = readl(dev->rxfifo);
for (j = 0; j < bytes % UDC_DWORD_BYTES; j++) {
*(buf + (i<<2) + j) = (u8)(tmp & UDC_BYTE_MASK);
tmp = tmp >> UDC_BITS_PER_BYTE;
}
}
return 0;
}
/* Read data from RX fifo for OUT transfers */
static int
udc_rxfifo_read(struct udc_ep *ep, struct udc_request *req)
{
u8 *buf;
unsigned buf_space;
unsigned bytes = 0;
unsigned finished = 0;
/* received number bytes */
bytes = readl(&ep->regs->sts);
bytes = AMD_GETBITS(bytes, UDC_EPSTS_RX_PKT_SIZE);
buf_space = req->req.length - req->req.actual;
buf = req->req.buf + req->req.actual;
if (bytes > buf_space) {
if ((buf_space % ep->ep.maxpacket) != 0) {
DBG(ep->dev,
"%s: rx %d bytes, rx-buf space = %d bytesn\n",
ep->ep.name, bytes, buf_space);
req->req.status = -EOVERFLOW;
}
bytes = buf_space;
}
req->req.actual += bytes;
/* last packet ? */
if (((bytes % ep->ep.maxpacket) != 0) || (!bytes)
|| ((req->req.actual == req->req.length) && !req->req.zero))
finished = 1;
/* read rx fifo bytes */
VDBG(ep->dev, "ep %s: rxfifo read %d bytes\n", ep->ep.name, bytes);
udc_rxfifo_read_bytes(ep->dev, buf, bytes);
return finished;
}
/* create/re-init a DMA descriptor or a DMA descriptor chain */
static int prep_dma(struct udc_ep *ep, struct udc_request *req, gfp_t gfp)
{
int retval = 0;
u32 tmp;
VDBG(ep->dev, "prep_dma\n");
VDBG(ep->dev, "prep_dma ep%d req->td_data=%p\n",
ep->num, req->td_data);
/* set buffer pointer */
req->td_data->bufptr = req->req.dma;
/* set last bit */
req->td_data->status |= AMD_BIT(UDC_DMA_IN_STS_L);
/* build/re-init dma chain if maxpkt scatter mode, not for EP0 */
if (use_dma_ppb) {
retval = udc_create_dma_chain(ep, req, ep->ep.maxpacket, gfp);
if (retval != 0) {
if (retval == -ENOMEM)
DBG(ep->dev, "Out of DMA memory\n");
return retval;
}
if (ep->in) {
if (req->req.length == ep->ep.maxpacket) {
/* write tx bytes */
req->td_data->status =
AMD_ADDBITS(req->td_data->status,
ep->ep.maxpacket,
UDC_DMA_IN_STS_TXBYTES);
}
}
}
if (ep->in) {
VDBG(ep->dev, "IN: use_dma_ppb=%d req->req.len=%d "
"maxpacket=%d ep%d\n",
use_dma_ppb, req->req.length,
ep->ep.maxpacket, ep->num);
/*
* if bytes < max packet then tx bytes must
* be written in packet per buffer mode
*/
if (!use_dma_ppb || req->req.length < ep->ep.maxpacket
|| ep->num == UDC_EP0OUT_IX
|| ep->num == UDC_EP0IN_IX) {
/* write tx bytes */
req->td_data->status =
AMD_ADDBITS(req->td_data->status,
req->req.length,
UDC_DMA_IN_STS_TXBYTES);
/* reset frame num */
req->td_data->status =
AMD_ADDBITS(req->td_data->status,
0,
UDC_DMA_IN_STS_FRAMENUM);
}
/* set HOST BUSY */
req->td_data->status =
AMD_ADDBITS(req->td_data->status,
UDC_DMA_STP_STS_BS_HOST_BUSY,
UDC_DMA_STP_STS_BS);
} else {
VDBG(ep->dev, "OUT set host ready\n");
/* set HOST READY */
req->td_data->status =
AMD_ADDBITS(req->td_data->status,
UDC_DMA_STP_STS_BS_HOST_READY,
UDC_DMA_STP_STS_BS);
/* clear NAK by writing CNAK */
if (ep->naking) {
tmp = readl(&ep->regs->ctl);
tmp |= AMD_BIT(UDC_EPCTL_CNAK);
writel(tmp, &ep->regs->ctl);
ep->naking = 0;
UDC_QUEUE_CNAK(ep, ep->num);
}
}
return retval;
}
/* Completes request packet ... caller MUST hold lock */
static void
complete_req(struct udc_ep *ep, struct udc_request *req, int sts)
__releases(ep->dev->lock)
__acquires(ep->dev->lock)
{
struct udc *dev;
unsigned halted;
VDBG(ep->dev, "complete_req(): ep%d\n", ep->num);
dev = ep->dev;
/* unmap DMA */
if (ep->dma)
usb_gadget_unmap_request(&dev->gadget, &req->req, ep->in);
halted = ep->halted;
ep->halted = 1;
/* set new status if pending */
if (req->req.status == -EINPROGRESS)
req->req.status = sts;
/* remove from ep queue */
list_del_init(&req->queue);
VDBG(ep->dev, "req %p => complete %d bytes at %s with sts %d\n",
&req->req, req->req.length, ep->ep.name, sts);
spin_unlock(&dev->lock);
req->req.complete(&ep->ep, &req->req);
spin_lock(&dev->lock);
ep->halted = halted;
}
/* frees pci pool descriptors of a DMA chain */
static int udc_free_dma_chain(struct udc *dev, struct udc_request *req)
{
int ret_val = 0;
struct udc_data_dma *td;
struct udc_data_dma *td_last = NULL;
unsigned int i;
DBG(dev, "free chain req = %p\n", req);
/* do not free first desc., will be done by free for request */
td_last = req->td_data;
td = phys_to_virt(td_last->next);
for (i = 1; i < req->chain_len; i++) {
pci_pool_free(dev->data_requests, td,
(dma_addr_t) td_last->next);
td_last = td;
td = phys_to_virt(td_last->next);
}
return ret_val;
}
/* Iterates to the end of a DMA chain and returns last descriptor */
static struct udc_data_dma *udc_get_last_dma_desc(struct udc_request *req)
{
struct udc_data_dma *td;
td = req->td_data;
while (td && !(td->status & AMD_BIT(UDC_DMA_IN_STS_L)))
td = phys_to_virt(td->next);
return td;
}
/* Iterates to the end of a DMA chain and counts bytes received */
static u32 udc_get_ppbdu_rxbytes(struct udc_request *req)
{
struct udc_data_dma *td;
u32 count;
td = req->td_data;
/* received number bytes */
count = AMD_GETBITS(td->status, UDC_DMA_OUT_STS_RXBYTES);
while (td && !(td->status & AMD_BIT(UDC_DMA_IN_STS_L))) {
td = phys_to_virt(td->next);
/* received number bytes */
if (td) {
count += AMD_GETBITS(td->status,
UDC_DMA_OUT_STS_RXBYTES);
}
}
return count;
}
/* Creates or re-inits a DMA chain */
static int udc_create_dma_chain(
struct udc_ep *ep,
struct udc_request *req,
unsigned long buf_len, gfp_t gfp_flags
)
{
unsigned long bytes = req->req.length;
unsigned int i;
dma_addr_t dma_addr;
struct udc_data_dma *td = NULL;
struct udc_data_dma *last = NULL;
unsigned long txbytes;
unsigned create_new_chain = 0;
unsigned len;
VDBG(ep->dev, "udc_create_dma_chain: bytes=%ld buf_len=%ld\n",
bytes, buf_len);
dma_addr = DMA_DONT_USE;
/* unset L bit in first desc for OUT */
if (!ep->in)
req->td_data->status &= AMD_CLEAR_BIT(UDC_DMA_IN_STS_L);
/* alloc only new desc's if not already available */
len = req->req.length / ep->ep.maxpacket;
if (req->req.length % ep->ep.maxpacket)
len++;
if (len > req->chain_len) {
/* shorter chain already allocated before */
if (req->chain_len > 1)
udc_free_dma_chain(ep->dev, req);
req->chain_len = len;
create_new_chain = 1;
}
td = req->td_data;
/* gen. required number of descriptors and buffers */
for (i = buf_len; i < bytes; i += buf_len) {
/* create or determine next desc. */
if (create_new_chain) {
td = pci_pool_alloc(ep->dev->data_requests,
gfp_flags, &dma_addr);
if (!td)
return -ENOMEM;
td->status = 0;
} else if (i == buf_len) {
/* first td */
td = (struct udc_data_dma *) phys_to_virt(
req->td_data->next);
td->status = 0;
} else {
td = (struct udc_data_dma *) phys_to_virt(last->next);
td->status = 0;
}
if (td)
td->bufptr = req->req.dma + i; /* assign buffer */
else
break;
/* short packet ? */
if ((bytes - i) >= buf_len) {
txbytes = buf_len;
} else {
/* short packet */
txbytes = bytes - i;
}
/* link td and assign tx bytes */
if (i == buf_len) {
if (create_new_chain)
req->td_data->next = dma_addr;
/*
else
req->td_data->next = virt_to_phys(td);
*/
/* write tx bytes */
if (ep->in) {
/* first desc */
req->td_data->status =
AMD_ADDBITS(req->td_data->status,
ep->ep.maxpacket,
UDC_DMA_IN_STS_TXBYTES);
/* second desc */
td->status = AMD_ADDBITS(td->status,
txbytes,
UDC_DMA_IN_STS_TXBYTES);
}
} else {
if (create_new_chain)
last->next = dma_addr;
/*
else
last->next = virt_to_phys(td);
*/
if (ep->in) {
/* write tx bytes */
td->status = AMD_ADDBITS(td->status,
txbytes,
UDC_DMA_IN_STS_TXBYTES);
}
}
last = td;
}
/* set last bit */
if (td) {
td->status |= AMD_BIT(UDC_DMA_IN_STS_L);
/* last desc. points to itself */
req->td_data_last = td;
}
return 0;
}
/* Enabling RX DMA */
static void udc_set_rde(struct udc *dev)
{
u32 tmp;
VDBG(dev, "udc_set_rde()\n");
/* stop RDE timer */
if (timer_pending(&udc_timer)) {
set_rde = 0;
mod_timer(&udc_timer, jiffies - 1);
}
/* set RDE */
tmp = readl(&dev->regs->ctl);
tmp |= AMD_BIT(UDC_DEVCTL_RDE);
writel(tmp, &dev->regs->ctl);
}
/* Queues a request packet, called by gadget driver */
static int
udc_queue(struct usb_ep *usbep, struct usb_request *usbreq, gfp_t gfp)
{
int retval = 0;
u8 open_rxfifo = 0;
unsigned long iflags;
struct udc_ep *ep;
struct udc_request *req;
struct udc *dev;
u32 tmp;
/* check the inputs */
req = container_of(usbreq, struct udc_request, req);
if (!usbep || !usbreq || !usbreq->complete || !usbreq->buf
|| !list_empty(&req->queue))
return -EINVAL;
ep = container_of(usbep, struct udc_ep, ep);
if (!ep->desc && (ep->num != 0 && ep->num != UDC_EP0OUT_IX))
return -EINVAL;
VDBG(ep->dev, "udc_queue(): ep%d-in=%d\n", ep->num, ep->in);
dev = ep->dev;
if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
return -ESHUTDOWN;
/* map dma (usually done before) */
if (ep->dma) {
VDBG(dev, "DMA map req %p\n", req);
retval = usb_gadget_map_request(&udc->gadget, usbreq, ep->in);
if (retval)
return retval;
}
VDBG(dev, "%s queue req %p, len %d req->td_data=%p buf %p\n",
usbep->name, usbreq, usbreq->length,
req->td_data, usbreq->buf);
spin_lock_irqsave(&dev->lock, iflags);
usbreq->actual = 0;
usbreq->status = -EINPROGRESS;
req->dma_done = 0;
/* on empty queue just do first transfer */
if (list_empty(&ep->queue)) {
/* zlp */
if (usbreq->length == 0) {
/* IN zlp's are handled by hardware */
complete_req(ep, req, 0);
VDBG(dev, "%s: zlp\n", ep->ep.name);
/*
* if set_config or set_intf is waiting for ack by zlp
* then set CSR_DONE
*/
if (dev->set_cfg_not_acked) {
tmp = readl(&dev->regs->ctl);
tmp |= AMD_BIT(UDC_DEVCTL_CSR_DONE);
writel(tmp, &dev->regs->ctl);
dev->set_cfg_not_acked = 0;
}
/* setup command is ACK'ed now by zlp */
if (dev->waiting_zlp_ack_ep0in) {
/* clear NAK by writing CNAK in EP0_IN */
tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
tmp |= AMD_BIT(UDC_EPCTL_CNAK);
writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
dev->ep[UDC_EP0IN_IX].naking = 0;
UDC_QUEUE_CNAK(&dev->ep[UDC_EP0IN_IX],
UDC_EP0IN_IX);
dev->waiting_zlp_ack_ep0in = 0;
}
goto finished;
}
if (ep->dma) {
retval = prep_dma(ep, req, gfp);
if (retval != 0)
goto finished;
/* write desc pointer to enable DMA */
if (ep->in) {
/* set HOST READY */
req->td_data->status =
AMD_ADDBITS(req->td_data->status,
UDC_DMA_IN_STS_BS_HOST_READY,
UDC_DMA_IN_STS_BS);
}
/* disabled rx dma while descriptor update */
if (!ep->in) {
/* stop RDE timer */
if (timer_pending(&udc_timer)) {
set_rde = 0;
mod_timer(&udc_timer, jiffies - 1);
}
/* clear RDE */
tmp = readl(&dev->regs->ctl);
tmp &= AMD_UNMASK_BIT(UDC_DEVCTL_RDE);
writel(tmp, &dev->regs->ctl);
open_rxfifo = 1;
/*
* if BNA occurred then let BNA dummy desc.
* point to current desc.
*/
if (ep->bna_occurred) {
VDBG(dev, "copy to BNA dummy desc.\n");
memcpy(ep->bna_dummy_req->td_data,
req->td_data,
sizeof(struct udc_data_dma));
}
}
/* write desc pointer */
writel(req->td_phys, &ep->regs->desptr);
/* clear NAK by writing CNAK */
if (ep->naking) {
tmp = readl(&ep->regs->ctl);
tmp |= AMD_BIT(UDC_EPCTL_CNAK);
writel(tmp, &ep->regs->ctl);
ep->naking = 0;
UDC_QUEUE_CNAK(ep, ep->num);
}
if (ep->in) {
/* enable ep irq */
tmp = readl(&dev->regs->ep_irqmsk);
tmp &= AMD_UNMASK_BIT(ep->num);
writel(tmp, &dev->regs->ep_irqmsk);
}
} else if (ep->in) {
/* enable ep irq */
tmp = readl(&dev->regs->ep_irqmsk);
tmp &= AMD_UNMASK_BIT(ep->num);
writel(tmp, &dev->regs->ep_irqmsk);
}
} else if (ep->dma) {
/*
* prep_dma not used for OUT ep's, this is not possible
* for PPB modes, because of chain creation reasons
*/
if (ep->in) {
retval = prep_dma(ep, req, gfp);
if (retval != 0)
goto finished;
}
}
VDBG(dev, "list_add\n");
/* add request to ep queue */
if (req) {
list_add_tail(&req->queue, &ep->queue);
/* open rxfifo if out data queued */
if (open_rxfifo) {
/* enable DMA */
req->dma_going = 1;
udc_set_rde(dev);
if (ep->num != UDC_EP0OUT_IX)
dev->data_ep_queued = 1;
}
/* stop OUT naking */
if (!ep->in) {
if (!use_dma && udc_rxfifo_pending) {
DBG(dev, "udc_queue(): pending bytes in "
"rxfifo after nyet\n");
/*
* read pending bytes afer nyet:
* referring to isr
*/
if (udc_rxfifo_read(ep, req)) {
/* finish */
complete_req(ep, req, 0);
}
udc_rxfifo_pending = 0;
}
}
}
finished:
spin_unlock_irqrestore(&dev->lock, iflags);
return retval;
}
/* Empty request queue of an endpoint; caller holds spinlock */
static void empty_req_queue(struct udc_ep *ep)
{
struct udc_request *req;
ep->halted = 1;
while (!list_empty(&ep->queue)) {
req = list_entry(ep->queue.next,
struct udc_request,
queue);
complete_req(ep, req, -ESHUTDOWN);
}
}
/* Dequeues a request packet, called by gadget driver */
static int udc_dequeue(struct usb_ep *usbep, struct usb_request *usbreq)
{
struct udc_ep *ep;
struct udc_request *req;
unsigned halted;
unsigned long iflags;
ep = container_of(usbep, struct udc_ep, ep);
if (!usbep || !usbreq || (!ep->desc && (ep->num != 0
&& ep->num != UDC_EP0OUT_IX)))
return -EINVAL;
req = container_of(usbreq, struct udc_request, req);
spin_lock_irqsave(&ep->dev->lock, iflags);
halted = ep->halted;
ep->halted = 1;
/* request in processing or next one */
if (ep->queue.next == &req->queue) {
if (ep->dma && req->dma_going) {
if (ep->in)
ep->cancel_transfer = 1;
else {
u32 tmp;
u32 dma_sts;
/* stop potential receive DMA */
tmp = readl(&udc->regs->ctl);
writel(tmp & AMD_UNMASK_BIT(UDC_DEVCTL_RDE),
&udc->regs->ctl);
/*
* Cancel transfer later in ISR
* if descriptor was touched.
*/
dma_sts = AMD_GETBITS(req->td_data->status,
UDC_DMA_OUT_STS_BS);
if (dma_sts != UDC_DMA_OUT_STS_BS_HOST_READY)
ep->cancel_transfer = 1;
else {
udc_init_bna_dummy(ep->req);
writel(ep->bna_dummy_req->td_phys,
&ep->regs->desptr);
}
writel(tmp, &udc->regs->ctl);
}
}
}
complete_req(ep, req, -ECONNRESET);
ep->halted = halted;
spin_unlock_irqrestore(&ep->dev->lock, iflags);
return 0;
}
/* Halt or clear halt of endpoint */
static int
udc_set_halt(struct usb_ep *usbep, int halt)
{
struct udc_ep *ep;
u32 tmp;
unsigned long iflags;
int retval = 0;
if (!usbep)
return -EINVAL;
pr_debug("set_halt %s: halt=%d\n", usbep->name, halt);
ep = container_of(usbep, struct udc_ep, ep);
if (!ep->desc && (ep->num != 0 && ep->num != UDC_EP0OUT_IX))
return -EINVAL;
if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
return -ESHUTDOWN;
spin_lock_irqsave(&udc_stall_spinlock, iflags);
/* halt or clear halt */
if (halt) {
if (ep->num == 0)
ep->dev->stall_ep0in = 1;
else {
/*
* set STALL
* rxfifo empty not taken into acount
*/
tmp = readl(&ep->regs->ctl);
tmp |= AMD_BIT(UDC_EPCTL_S);
writel(tmp, &ep->regs->ctl);
ep->halted = 1;
/* setup poll timer */
if (!timer_pending(&udc_pollstall_timer)) {
udc_pollstall_timer.expires = jiffies +
HZ * UDC_POLLSTALL_TIMER_USECONDS
/ (1000 * 1000);
if (!stop_pollstall_timer) {
DBG(ep->dev, "start polltimer\n");
add_timer(&udc_pollstall_timer);
}
}
}
} else {
/* ep is halted by set_halt() before */
if (ep->halted) {
tmp = readl(&ep->regs->ctl);
/* clear stall bit */
tmp = tmp & AMD_CLEAR_BIT(UDC_EPCTL_S);
/* clear NAK by writing CNAK */
tmp |= AMD_BIT(UDC_EPCTL_CNAK);
writel(tmp, &ep->regs->ctl);
ep->halted = 0;
UDC_QUEUE_CNAK(ep, ep->num);
}
}
spin_unlock_irqrestore(&udc_stall_spinlock, iflags);
return retval;
}
/* gadget interface */
static const struct usb_ep_ops udc_ep_ops = {
.enable = udc_ep_enable,
.disable = udc_ep_disable,
.alloc_request = udc_alloc_request,
.free_request = udc_free_request,
.queue = udc_queue,
.dequeue = udc_dequeue,
.set_halt = udc_set_halt,
/* fifo ops not implemented */
};
/*-------------------------------------------------------------------------*/
/* Get frame counter (not implemented) */
static int udc_get_frame(struct usb_gadget *gadget)
{
return -EOPNOTSUPP;
}
/* Remote wakeup gadget interface */
static int udc_wakeup(struct usb_gadget *gadget)
{
struct udc *dev;
if (!gadget)
return -EINVAL;
dev = container_of(gadget, struct udc, gadget);
udc_remote_wakeup(dev);
return 0;
}
static int amd5536_start(struct usb_gadget_driver *driver,
int (*bind)(struct usb_gadget *));
static int amd5536_stop(struct usb_gadget_driver *driver);
/* gadget operations */
static const struct usb_gadget_ops udc_ops = {
.wakeup = udc_wakeup,
.get_frame = udc_get_frame,
.start = amd5536_start,
.stop = amd5536_stop,
};
/* Setups endpoint parameters, adds endpoints to linked list */
static void make_ep_lists(struct udc *dev)
{
/* make gadget ep lists */
INIT_LIST_HEAD(&dev->gadget.ep_list);
list_add_tail(&dev->ep[UDC_EPIN_STATUS_IX].ep.ep_list,
&dev->gadget.ep_list);
list_add_tail(&dev->ep[UDC_EPIN_IX].ep.ep_list,
&dev->gadget.ep_list);
list_add_tail(&dev->ep[UDC_EPOUT_IX].ep.ep_list,
&dev->gadget.ep_list);
/* fifo config */
dev->ep[UDC_EPIN_STATUS_IX].fifo_depth = UDC_EPIN_SMALLINT_BUFF_SIZE;
if (dev->gadget.speed == USB_SPEED_FULL)
dev->ep[UDC_EPIN_IX].fifo_depth = UDC_FS_EPIN_BUFF_SIZE;
else if (dev->gadget.speed == USB_SPEED_HIGH)
dev->ep[UDC_EPIN_IX].fifo_depth = hs_tx_buf;
dev->ep[UDC_EPOUT_IX].fifo_depth = UDC_RXFIFO_SIZE;
}
/* init registers at driver load time */
static int startup_registers(struct udc *dev)
{
u32 tmp;
/* init controller by soft reset */
udc_soft_reset(dev);
/* mask not needed interrupts */
udc_mask_unused_interrupts(dev);
/* put into initial config */
udc_basic_init(dev);
/* link up all endpoints */
udc_setup_endpoints(dev);
/* program speed */
tmp = readl(&dev->regs->cfg);
if (use_fullspeed)
tmp = AMD_ADDBITS(tmp, UDC_DEVCFG_SPD_FS, UDC_DEVCFG_SPD);
else
tmp = AMD_ADDBITS(tmp, UDC_DEVCFG_SPD_HS, UDC_DEVCFG_SPD);
writel(tmp, &dev->regs->cfg);
return 0;
}
/* Inits UDC context */
static void udc_basic_init(struct udc *dev)
{
u32 tmp;
DBG(dev, "udc_basic_init()\n");
dev->gadget.speed = USB_SPEED_UNKNOWN;
/* stop RDE timer */
if (timer_pending(&udc_timer)) {
set_rde = 0;
mod_timer(&udc_timer, jiffies - 1);
}
/* stop poll stall timer */
if (timer_pending(&udc_pollstall_timer))
mod_timer(&udc_pollstall_timer, jiffies - 1);
/* disable DMA */
tmp = readl(&dev->regs->ctl);
tmp &= AMD_UNMASK_BIT(UDC_DEVCTL_RDE);
tmp &= AMD_UNMASK_BIT(UDC_DEVCTL_TDE);
writel(tmp, &dev->regs->ctl);
/* enable dynamic CSR programming */
tmp = readl(&dev->regs->cfg);
tmp |= AMD_BIT(UDC_DEVCFG_CSR_PRG);
/* set self powered */
tmp |= AMD_BIT(UDC_DEVCFG_SP);
/* set remote wakeupable */
tmp |= AMD_BIT(UDC_DEVCFG_RWKP);
writel(tmp, &dev->regs->cfg);
make_ep_lists(dev);
dev->data_ep_enabled = 0;
dev->data_ep_queued = 0;
}
/* Sets initial endpoint parameters */
static void udc_setup_endpoints(struct udc *dev)
{
struct udc_ep *ep;
u32 tmp;
u32 reg;
DBG(dev, "udc_setup_endpoints()\n");
/* read enum speed */
tmp = readl(&dev->regs->sts);
tmp = AMD_GETBITS(tmp, UDC_DEVSTS_ENUM_SPEED);
if (tmp == UDC_DEVSTS_ENUM_SPEED_HIGH)
dev->gadget.speed = USB_SPEED_HIGH;
else if (tmp == UDC_DEVSTS_ENUM_SPEED_FULL)
dev->gadget.speed = USB_SPEED_FULL;
/* set basic ep parameters */
for (tmp = 0; tmp < UDC_EP_NUM; tmp++) {
ep = &dev->ep[tmp];
ep->dev = dev;
ep->ep.name = ep_string[tmp];
ep->num = tmp;
/* txfifo size is calculated at enable time */
ep->txfifo = dev->txfifo;
/* fifo size */
if (tmp < UDC_EPIN_NUM) {
ep->fifo_depth = UDC_TXFIFO_SIZE;
ep->in = 1;
} else {
ep->fifo_depth = UDC_RXFIFO_SIZE;
ep->in = 0;
}
ep->regs = &dev->ep_regs[tmp];
/*
* ep will be reset only if ep was not enabled before to avoid
* disabling ep interrupts when ENUM interrupt occurs but ep is
* not enabled by gadget driver
*/
if (!ep->desc)
ep_init(dev->regs, ep);
if (use_dma) {
/*
* ep->dma is not really used, just to indicate that
* DMA is active: remove this
* dma regs = dev control regs
*/
ep->dma = &dev->regs->ctl;
/* nak OUT endpoints until enable - not for ep0 */
if (tmp != UDC_EP0IN_IX && tmp != UDC_EP0OUT_IX
&& tmp > UDC_EPIN_NUM) {
/* set NAK */
reg = readl(&dev->ep[tmp].regs->ctl);
reg |= AMD_BIT(UDC_EPCTL_SNAK);
writel(reg, &dev->ep[tmp].regs->ctl);
dev->ep[tmp].naking = 1;
}
}
}
/* EP0 max packet */
if (dev->gadget.speed == USB_SPEED_FULL) {
dev->ep[UDC_EP0IN_IX].ep.maxpacket = UDC_FS_EP0IN_MAX_PKT_SIZE;
dev->ep[UDC_EP0OUT_IX].ep.maxpacket =
UDC_FS_EP0OUT_MAX_PKT_SIZE;
} else if (dev->gadget.speed == USB_SPEED_HIGH) {
dev->ep[UDC_EP0IN_IX].ep.maxpacket = UDC_EP0IN_MAX_PKT_SIZE;
dev->ep[UDC_EP0OUT_IX].ep.maxpacket = UDC_EP0OUT_MAX_PKT_SIZE;
}
/*
* with suspend bug workaround, ep0 params for gadget driver
* are set at gadget driver bind() call
*/
dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IX].ep;
dev->ep[UDC_EP0IN_IX].halted = 0;
INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
/* init cfg/alt/int */
dev->cur_config = 0;
dev->cur_intf = 0;
dev->cur_alt = 0;
}
/* Bringup after Connect event, initial bringup to be ready for ep0 events */
static void usb_connect(struct udc *dev)
{
dev_info(&dev->pdev->dev, "USB Connect\n");
dev->connected = 1;
/* put into initial config */
udc_basic_init(dev);
/* enable device setup interrupts */
udc_enable_dev_setup_interrupts(dev);
}
/*
* Calls gadget with disconnect event and resets the UDC and makes
* initial bringup to be ready for ep0 events
*/
static void usb_disconnect(struct udc *dev)
{
dev_info(&dev->pdev->dev, "USB Disconnect\n");
dev->connected = 0;
/* mask interrupts */
udc_mask_unused_interrupts(dev);
/* REVISIT there doesn't seem to be a point to having this
* talk to a tasklet ... do it directly, we already hold
* the spinlock needed to process the disconnect.
*/
tasklet_schedule(&disconnect_tasklet);
}
/* Tasklet for disconnect to be outside of interrupt context */
static void udc_tasklet_disconnect(unsigned long par)
{
struct udc *dev = (struct udc *)(*((struct udc **) par));
u32 tmp;
DBG(dev, "Tasklet disconnect\n");
spin_lock_irq(&dev->lock);
if (dev->driver) {
spin_unlock(&dev->lock);
dev->driver->disconnect(&dev->gadget);
spin_lock(&dev->lock);
/* empty queues */
for (tmp = 0; tmp < UDC_EP_NUM; tmp++)
empty_req_queue(&dev->ep[tmp]);
}
/* disable ep0 */
ep_init(dev->regs,
&dev->ep[UDC_EP0IN_IX]);
if (!soft_reset_occured) {
/* init controller by soft reset */
udc_soft_reset(dev);
soft_reset_occured++;
}
/* re-enable dev interrupts */
udc_enable_dev_setup_interrupts(dev);
/* back to full speed ? */
if (use_fullspeed) {
tmp = readl(&dev->regs->cfg);
tmp = AMD_ADDBITS(tmp, UDC_DEVCFG_SPD_FS, UDC_DEVCFG_SPD);
writel(tmp, &dev->regs->cfg);
}
spin_unlock_irq(&dev->lock);
}
/* Reset the UDC core */
static void udc_soft_reset(struct udc *dev)
{
unsigned long flags;
DBG(dev, "Soft reset\n");
/*
* reset possible waiting interrupts, because int.
* status is lost after soft reset,
* ep int. status reset
*/
writel(UDC_EPINT_MSK_DISABLE_ALL, &dev->regs->ep_irqsts);
/* device int. status reset */
writel(UDC_DEV_MSK_DISABLE, &dev->regs->irqsts);
spin_lock_irqsave(&udc_irq_spinlock, flags);
writel(AMD_BIT(UDC_DEVCFG_SOFTRESET), &dev->regs->cfg);
readl(&dev->regs->cfg);
spin_unlock_irqrestore(&udc_irq_spinlock, flags);
}
/* RDE timer callback to set RDE bit */
static void udc_timer_function(unsigned long v)
{
u32 tmp;
spin_lock_irq(&udc_irq_spinlock);
if (set_rde > 0) {
/*
* open the fifo if fifo was filled on last timer call
* conditionally
*/
if (set_rde > 1) {
/* set RDE to receive setup data */
tmp = readl(&udc->regs->ctl);
tmp |= AMD_BIT(UDC_DEVCTL_RDE);
writel(tmp, &udc->regs->ctl);
set_rde = -1;
} else if (readl(&udc->regs->sts)
& AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY)) {
/*
* if fifo empty setup polling, do not just
* open the fifo
*/
udc_timer.expires = jiffies + HZ/UDC_RDE_TIMER_DIV;
if (!stop_timer)
add_timer(&udc_timer);
} else {
/*
* fifo contains data now, setup timer for opening
* the fifo when timer expires to be able to receive
* setup packets, when data packets gets queued by
* gadget layer then timer will forced to expire with
* set_rde=0 (RDE is set in udc_queue())
*/
set_rde++;
/* debug: lhadmot_timer_start = 221070 */
udc_timer.expires = jiffies + HZ*UDC_RDE_TIMER_SECONDS;
if (!stop_timer)
add_timer(&udc_timer);
}
} else
set_rde = -1; /* RDE was set by udc_queue() */
spin_unlock_irq(&udc_irq_spinlock);
if (stop_timer)
complete(&on_exit);
}
/* Handle halt state, used in stall poll timer */
static void udc_handle_halt_state(struct udc_ep *ep)
{
u32 tmp;
/* set stall as long not halted */
if (ep->halted == 1) {
tmp = readl(&ep->regs->ctl);
/* STALL cleared ? */
if (!(tmp & AMD_BIT(UDC_EPCTL_S))) {
/*
* FIXME: MSC spec requires that stall remains
* even on receivng of CLEAR_FEATURE HALT. So
* we would set STALL again here to be compliant.
* But with current mass storage drivers this does
* not work (would produce endless host retries).
* So we clear halt on CLEAR_FEATURE.
*
DBG(ep->dev, "ep %d: set STALL again\n", ep->num);
tmp |= AMD_BIT(UDC_EPCTL_S);
writel(tmp, &ep->regs->ctl);*/
/* clear NAK by writing CNAK */
tmp |= AMD_BIT(UDC_EPCTL_CNAK);
writel(tmp, &ep->regs->ctl);
ep->halted = 0;
UDC_QUEUE_CNAK(ep, ep->num);
}
}
}
/* Stall timer callback to poll S bit and set it again after */
static void udc_pollstall_timer_function(unsigned long v)
{
struct udc_ep *ep;
int halted = 0;
spin_lock_irq(&udc_stall_spinlock);
/*
* only one IN and OUT endpoints are handled
* IN poll stall
*/
ep = &udc->ep[UDC_EPIN_IX];
udc_handle_halt_state(ep);
if (ep->halted)
halted = 1;
/* OUT poll stall */
ep = &udc->ep[UDC_EPOUT_IX];
udc_handle_halt_state(ep);
if (ep->halted)
halted = 1;
/* setup timer again when still halted */
if (!stop_pollstall_timer && halted) {
udc_pollstall_timer.expires = jiffies +
HZ * UDC_POLLSTALL_TIMER_USECONDS
/ (1000 * 1000);
add_timer(&udc_pollstall_timer);
}
spin_unlock_irq(&udc_stall_spinlock);
if (stop_pollstall_timer)
complete(&on_pollstall_exit);
}
/* Inits endpoint 0 so that SETUP packets are processed */
static void activate_control_endpoints(struct udc *dev)
{
u32 tmp;
DBG(dev, "activate_control_endpoints\n");
/* flush fifo */
tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
tmp |= AMD_BIT(UDC_EPCTL_F);
writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
/* set ep0 directions */
dev->ep[UDC_EP0IN_IX].in = 1;
dev->ep[UDC_EP0OUT_IX].in = 0;
/* set buffer size (tx fifo entries) of EP0_IN */
tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->bufin_framenum);
if (dev->gadget.speed == USB_SPEED_FULL)
tmp = AMD_ADDBITS(tmp, UDC_FS_EPIN0_BUFF_SIZE,
UDC_EPIN_BUFF_SIZE);
else if (dev->gadget.speed == USB_SPEED_HIGH)
tmp = AMD_ADDBITS(tmp, UDC_EPIN0_BUFF_SIZE,
UDC_EPIN_BUFF_SIZE);
writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->bufin_framenum);
/* set max packet size of EP0_IN */
tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->bufout_maxpkt);
if (dev->gadget.speed == USB_SPEED_FULL)
tmp = AMD_ADDBITS(tmp, UDC_FS_EP0IN_MAX_PKT_SIZE,
UDC_EP_MAX_PKT_SIZE);
else if (dev->gadget.speed == USB_SPEED_HIGH)
tmp = AMD_ADDBITS(tmp, UDC_EP0IN_MAX_PKT_SIZE,
UDC_EP_MAX_PKT_SIZE);
writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->bufout_maxpkt);
/* set max packet size of EP0_OUT */
tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->bufout_maxpkt);
if (dev->gadget.speed == USB_SPEED_FULL)
tmp = AMD_ADDBITS(tmp, UDC_FS_EP0OUT_MAX_PKT_SIZE,
UDC_EP_MAX_PKT_SIZE);
else if (dev->gadget.speed == USB_SPEED_HIGH)
tmp = AMD_ADDBITS(tmp, UDC_EP0OUT_MAX_PKT_SIZE,
UDC_EP_MAX_PKT_SIZE);
writel(tmp, &dev->ep[UDC_EP0OUT_IX].regs->bufout_maxpkt);
/* set max packet size of EP0 in UDC CSR */
tmp = readl(&dev->csr->ne[0]);
if (dev->gadget.speed == USB_SPEED_FULL)
tmp = AMD_ADDBITS(tmp, UDC_FS_EP0OUT_MAX_PKT_SIZE,
UDC_CSR_NE_MAX_PKT);
else if (dev->gadget.speed == USB_SPEED_HIGH)
tmp = AMD_ADDBITS(tmp, UDC_EP0OUT_MAX_PKT_SIZE,
UDC_CSR_NE_MAX_PKT);
writel(tmp, &dev->csr->ne[0]);
if (use_dma) {
dev->ep[UDC_EP0OUT_IX].td->status |=
AMD_BIT(UDC_DMA_OUT_STS_L);
/* write dma desc address */
writel(dev->ep[UDC_EP0OUT_IX].td_stp_dma,
&dev->ep[UDC_EP0OUT_IX].regs->subptr);
writel(dev->ep[UDC_EP0OUT_IX].td_phys,
&dev->ep[UDC_EP0OUT_IX].regs->desptr);
/* stop RDE timer */
if (timer_pending(&udc_timer)) {
set_rde = 0;
mod_timer(&udc_timer, jiffies - 1);
}
/* stop pollstall timer */
if (timer_pending(&udc_pollstall_timer))
mod_timer(&udc_pollstall_timer, jiffies - 1);
/* enable DMA */
tmp = readl(&dev->regs->ctl);
tmp |= AMD_BIT(UDC_DEVCTL_MODE)
| AMD_BIT(UDC_DEVCTL_RDE)
| AMD_BIT(UDC_DEVCTL_TDE);
if (use_dma_bufferfill_mode)
tmp |= AMD_BIT(UDC_DEVCTL_BF);
else if (use_dma_ppb_du)
tmp |= AMD_BIT(UDC_DEVCTL_DU);
writel(tmp, &dev->regs->ctl);
}
/* clear NAK by writing CNAK for EP0IN */
tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
tmp |= AMD_BIT(UDC_EPCTL_CNAK);
writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
dev->ep[UDC_EP0IN_IX].naking = 0;
UDC_QUEUE_CNAK(&dev->ep[UDC_EP0IN_IX], UDC_EP0IN_IX);
/* clear NAK by writing CNAK for EP0OUT */
tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->ctl);
tmp |= AMD_BIT(UDC_EPCTL_CNAK);
writel(tmp, &dev->ep[UDC_EP0OUT_IX].regs->ctl);
dev->ep[UDC_EP0OUT_IX].naking = 0;
UDC_QUEUE_CNAK(&dev->ep[UDC_EP0OUT_IX], UDC_EP0OUT_IX);
}
/* Make endpoint 0 ready for control traffic */
static int setup_ep0(struct udc *dev)
{
activate_control_endpoints(dev);
/* enable ep0 interrupts */
udc_enable_ep0_interrupts(dev);
/* enable device setup interrupts */
udc_enable_dev_setup_interrupts(dev);
return 0;
}
/* Called by gadget driver to register itself */
static int amd5536_start(struct usb_gadget_driver *driver,
int (*bind)(struct usb_gadget *))
{
struct udc *dev = udc;
int retval;
u32 tmp;
if (!driver || !bind || !driver->setup
|| driver->max_speed < USB_SPEED_HIGH)
return -EINVAL;
if (!dev)
return -ENODEV;
if (dev->driver)
return -EBUSY;
driver->driver.bus = NULL;
dev->driver = driver;
dev->gadget.dev.driver = &driver->driver;
retval = bind(&dev->gadget);
/* Some gadget drivers use both ep0 directions.
* NOTE: to gadget driver, ep0 is just one endpoint...
*/
dev->ep[UDC_EP0OUT_IX].ep.driver_data =
dev->ep[UDC_EP0IN_IX].ep.driver_data;
if (retval) {
DBG(dev, "binding to %s returning %d\n",
driver->driver.name, retval);
dev->driver = NULL;
dev->gadget.dev.driver = NULL;
return retval;
}
/* get ready for ep0 traffic */
setup_ep0(dev);
/* clear SD */
tmp = readl(&dev->regs->ctl);
tmp = tmp & AMD_CLEAR_BIT(UDC_DEVCTL_SD);
writel(tmp, &dev->regs->ctl);
usb_connect(dev);
return 0;
}
/* shutdown requests and disconnect from gadget */
static void
shutdown(struct udc *dev, struct usb_gadget_driver *driver)
__releases(dev->lock)
__acquires(dev->lock)
{
int tmp;
if (dev->gadget.speed != USB_SPEED_UNKNOWN) {
spin_unlock(&dev->lock);
driver->disconnect(&dev->gadget);
spin_lock(&dev->lock);
}
/* empty queues and init hardware */
udc_basic_init(dev);
for (tmp = 0; tmp < UDC_EP_NUM; tmp++)
empty_req_queue(&dev->ep[tmp]);
udc_setup_endpoints(dev);
}
/* Called by gadget driver to unregister itself */
static int amd5536_stop(struct usb_gadget_driver *driver)
{
struct udc *dev = udc;
unsigned long flags;
u32 tmp;
if (!dev)
return -ENODEV;
if (!driver || driver != dev->driver || !driver->unbind)
return -EINVAL;
spin_lock_irqsave(&dev->lock, flags);
udc_mask_unused_interrupts(dev);
shutdown(dev, driver);
spin_unlock_irqrestore(&dev->lock, flags);
driver->unbind(&dev->gadget);
dev->gadget.dev.driver = NULL;
dev->driver = NULL;
/* set SD */
tmp = readl(&dev->regs->ctl);
tmp |= AMD_BIT(UDC_DEVCTL_SD);
writel(tmp, &dev->regs->ctl);
DBG(dev, "%s: unregistered\n", driver->driver.name);
return 0;
}
/* Clear pending NAK bits */
static void udc_process_cnak_queue(struct udc *dev)
{
u32 tmp;
u32 reg;
/* check epin's */
DBG(dev, "CNAK pending queue processing\n");
for (tmp = 0; tmp < UDC_EPIN_NUM_USED; tmp++) {
if (cnak_pending & (1 << tmp)) {
DBG(dev, "CNAK pending for ep%d\n", tmp);
/* clear NAK by writing CNAK */
reg = readl(&dev->ep[tmp].regs->ctl);
reg |= AMD_BIT(UDC_EPCTL_CNAK);
writel(reg, &dev->ep[tmp].regs->ctl);
dev->ep[tmp].naking = 0;
UDC_QUEUE_CNAK(&dev->ep[tmp], dev->ep[tmp].num);
}
}
/* ... and ep0out */
if (cnak_pending & (1 << UDC_EP0OUT_IX)) {
DBG(dev, "CNAK pending for ep%d\n", UDC_EP0OUT_IX);
/* clear NAK by writing CNAK */
reg = readl(&dev->ep[UDC_EP0OUT_IX].regs->ctl);
reg |= AMD_BIT(UDC_EPCTL_CNAK);
writel(reg, &dev->ep[UDC_EP0OUT_IX].regs->ctl);
dev->ep[UDC_EP0OUT_IX].naking = 0;
UDC_QUEUE_CNAK(&dev->ep[UDC_EP0OUT_IX],
dev->ep[UDC_EP0OUT_IX].num);
}
}
/* Enabling RX DMA after setup packet */
static void udc_ep0_set_rde(struct udc *dev)
{
if (use_dma) {
/*
* only enable RXDMA when no data endpoint enabled
* or data is queued
*/
if (!dev->data_ep_enabled || dev->data_ep_queued) {
udc_set_rde(dev);
} else {
/*
* setup timer for enabling RDE (to not enable
* RXFIFO DMA for data endpoints to early)
*/
if (set_rde != 0 && !timer_pending(&udc_timer)) {
udc_timer.expires =
jiffies + HZ/UDC_RDE_TIMER_DIV;
set_rde = 1;
if (!stop_timer)
add_timer(&udc_timer);
}
}
}
}
/* Interrupt handler for data OUT traffic */
static irqreturn_t udc_data_out_isr(struct udc *dev, int ep_ix)
{
irqreturn_t ret_val = IRQ_NONE;
u32 tmp;
struct udc_ep *ep;
struct udc_request *req;
unsigned int count;
struct udc_data_dma *td = NULL;
unsigned dma_done;
VDBG(dev, "ep%d irq\n", ep_ix);
ep = &dev->ep[ep_ix];
tmp = readl(&ep->regs->sts);
if (use_dma) {
/* BNA event ? */
if (tmp & AMD_BIT(UDC_EPSTS_BNA)) {
DBG(dev, "BNA ep%dout occurred - DESPTR = %x\n",
ep->num, readl(&ep->regs->desptr));
/* clear BNA */
writel(tmp | AMD_BIT(UDC_EPSTS_BNA), &ep->regs->sts);
if (!ep->cancel_transfer)
ep->bna_occurred = 1;
else
ep->cancel_transfer = 0;
ret_val = IRQ_HANDLED;
goto finished;
}
}
/* HE event ? */
if (tmp & AMD_BIT(UDC_EPSTS_HE)) {
dev_err(&dev->pdev->dev, "HE ep%dout occurred\n", ep->num);
/* clear HE */
writel(tmp | AMD_BIT(UDC_EPSTS_HE), &ep->regs->sts);
ret_val = IRQ_HANDLED;
goto finished;
}
if (!list_empty(&ep->queue)) {
/* next request */
req = list_entry(ep->queue.next,
struct udc_request, queue);
} else {
req = NULL;
udc_rxfifo_pending = 1;
}
VDBG(dev, "req = %p\n", req);
/* fifo mode */
if (!use_dma) {
/* read fifo */
if (req && udc_rxfifo_read(ep, req)) {
ret_val = IRQ_HANDLED;
/* finish */
complete_req(ep, req, 0);
/* next request */
if (!list_empty(&ep->queue) && !ep->halted) {
req = list_entry(ep->queue.next,
struct udc_request, queue);
} else
req = NULL;
}
/* DMA */
} else if (!ep->cancel_transfer && req != NULL) {
ret_val = IRQ_HANDLED;
/* check for DMA done */
if (!use_dma_ppb) {
dma_done = AMD_GETBITS(req->td_data->status,
UDC_DMA_OUT_STS_BS);
/* packet per buffer mode - rx bytes */
} else {
/*
* if BNA occurred then recover desc. from
* BNA dummy desc.
*/
if (ep->bna_occurred) {
VDBG(dev, "Recover desc. from BNA dummy\n");
memcpy(req->td_data, ep->bna_dummy_req->td_data,
sizeof(struct udc_data_dma));
ep->bna_occurred = 0;
udc_init_bna_dummy(ep->req);
}
td = udc_get_last_dma_desc(req);
dma_done = AMD_GETBITS(td->status, UDC_DMA_OUT_STS_BS);
}
if (dma_done == UDC_DMA_OUT_STS_BS_DMA_DONE) {
/* buffer fill mode - rx bytes */
if (!use_dma_ppb) {
/* received number bytes */
count = AMD_GETBITS(req->td_data->status,
UDC_DMA_OUT_STS_RXBYTES);
VDBG(dev, "rx bytes=%u\n", count);
/* packet per buffer mode - rx bytes */
} else {
VDBG(dev, "req->td_data=%p\n", req->td_data);
VDBG(dev, "last desc = %p\n", td);
/* received number bytes */
if (use_dma_ppb_du) {
/* every desc. counts bytes */
count = udc_get_ppbdu_rxbytes(req);
} else {
/* last desc. counts bytes */
count = AMD_GETBITS(td->status,
UDC_DMA_OUT_STS_RXBYTES);
if (!count && req->req.length
== UDC_DMA_MAXPACKET) {
/*
* on 64k packets the RXBYTES
* field is zero
*/
count = UDC_DMA_MAXPACKET;
}
}
VDBG(dev, "last desc rx bytes=%u\n", count);
}
tmp = req->req.length - req->req.actual;
if (count > tmp) {
if ((tmp % ep->ep.maxpacket) != 0) {
DBG(dev, "%s: rx %db, space=%db\n",
ep->ep.name, count, tmp);
req->req.status = -EOVERFLOW;
}
count = tmp;
}
req->req.actual += count;
req->dma_going = 0;
/* complete request */
complete_req(ep, req, 0);
/* next request */
if (!list_empty(&ep->queue) && !ep->halted) {
req = list_entry(ep->queue.next,
struct udc_request,
queue);
/*
* DMA may be already started by udc_queue()
* called by gadget drivers completion
* routine. This happens when queue
* holds one request only.
*/
if (req->dma_going == 0) {
/* next dma */
if (prep_dma(ep, req, GFP_ATOMIC) != 0)
goto finished;
/* write desc pointer */
writel(req->td_phys,
&ep->regs->desptr);
req->dma_going = 1;
/* enable DMA */
udc_set_rde(dev);
}
} else {
/*
* implant BNA dummy descriptor to allow
* RXFIFO opening by RDE
*/
if (ep->bna_dummy_req) {
/* write desc pointer */
writel(ep->bna_dummy_req->td_phys,
&ep->regs->desptr);
ep->bna_occurred = 0;
}
/*
* schedule timer for setting RDE if queue
* remains empty to allow ep0 packets pass
* through
*/
if (set_rde != 0
&& !timer_pending(&udc_timer)) {
udc_timer.expires =
jiffies
+ HZ*UDC_RDE_TIMER_SECONDS;
set_rde = 1;
if (!stop_timer)
add_timer(&udc_timer);
}
if (ep->num != UDC_EP0OUT_IX)
dev->data_ep_queued = 0;
}
} else {
/*
* RX DMA must be reenabled for each desc in PPBDU mode
* and must be enabled for PPBNDU mode in case of BNA
*/
udc_set_rde(dev);
}
} else if (ep->cancel_transfer) {
ret_val = IRQ_HANDLED;
ep->cancel_transfer = 0;
}
/* check pending CNAKS */
if (cnak_pending) {
/* CNAk processing when rxfifo empty only */
if (readl(&dev->regs->sts) & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY))
udc_process_cnak_queue(dev);
}
/* clear OUT bits in ep status */
writel(UDC_EPSTS_OUT_CLEAR, &ep->regs->sts);
finished:
return ret_val;
}
/* Interrupt handler for data IN traffic */
static irqreturn_t udc_data_in_isr(struct udc *dev, int ep_ix)
{
irqreturn_t ret_val = IRQ_NONE;
u32 tmp;
u32 epsts;
struct udc_ep *ep;
struct udc_request *req;
struct udc_data_dma *td;
unsigned dma_done;
unsigned len;
ep = &dev->ep[ep_ix];
epsts = readl(&ep->regs->sts);
if (use_dma) {
/* BNA ? */
if (epsts & AMD_BIT(UDC_EPSTS_BNA)) {
dev_err(&dev->pdev->dev,
"BNA ep%din occurred - DESPTR = %08lx\n",
ep->num,
(unsigned long) readl(&ep->regs->desptr));
/* clear BNA */
writel(epsts, &ep->regs->sts);
ret_val = IRQ_HANDLED;
goto finished;
}
}
/* HE event ? */
if (epsts & AMD_BIT(UDC_EPSTS_HE)) {
dev_err(&dev->pdev->dev,
"HE ep%dn occurred - DESPTR = %08lx\n",
ep->num, (unsigned long) readl(&ep->regs->desptr));
/* clear HE */
writel(epsts | AMD_BIT(UDC_EPSTS_HE), &ep->regs->sts);
ret_val = IRQ_HANDLED;
goto finished;
}
/* DMA completion */
if (epsts & AMD_BIT(UDC_EPSTS_TDC)) {
VDBG(dev, "TDC set- completion\n");
ret_val = IRQ_HANDLED;
if (!ep->cancel_transfer && !list_empty(&ep->queue)) {
req = list_entry(ep->queue.next,
struct udc_request, queue);
/*
* length bytes transferred
* check dma done of last desc. in PPBDU mode
*/
if (use_dma_ppb_du) {
td = udc_get_last_dma_desc(req);
if (td) {
dma_done =
AMD_GETBITS(td->status,
UDC_DMA_IN_STS_BS);
/* don't care DMA done */
req->req.actual = req->req.length;
}
} else {
/* assume all bytes transferred */
req->req.actual = req->req.length;
}
if (req->req.actual == req->req.length) {
/* complete req */
complete_req(ep, req, 0);
req->dma_going = 0;
/* further request available ? */
if (list_empty(&ep->queue)) {
/* disable interrupt */
tmp = readl(&dev->regs->ep_irqmsk);
tmp |= AMD_BIT(ep->num);
writel(tmp, &dev->regs->ep_irqmsk);
}
}
}
ep->cancel_transfer = 0;
}
/*
* status reg has IN bit set and TDC not set (if TDC was handled,
* IN must not be handled (UDC defect) ?
*/
if ((epsts & AMD_BIT(UDC_EPSTS_IN))
&& !(epsts & AMD_BIT(UDC_EPSTS_TDC))) {
ret_val = IRQ_HANDLED;
if (!list_empty(&ep->queue)) {
/* next request */
req = list_entry(ep->queue.next,
struct udc_request, queue);
/* FIFO mode */
if (!use_dma) {
/* write fifo */
udc_txfifo_write(ep, &req->req);
len = req->req.length - req->req.actual;
if (len > ep->ep.maxpacket)
len = ep->ep.maxpacket;
req->req.actual += len;
if (req->req.actual == req->req.length
|| (len != ep->ep.maxpacket)) {
/* complete req */
complete_req(ep, req, 0);
}
/* DMA */
} else if (req && !req->dma_going) {
VDBG(dev, "IN DMA : req=%p req->td_data=%p\n",
req, req->td_data);
if (req->td_data) {
req->dma_going = 1;
/*
* unset L bit of first desc.
* for chain
*/
if (use_dma_ppb && req->req.length >
ep->ep.maxpacket) {
req->td_data->status &=
AMD_CLEAR_BIT(
UDC_DMA_IN_STS_L);
}
/* write desc pointer */
writel(req->td_phys, &ep->regs->desptr);
/* set HOST READY */
req->td_data->status =
AMD_ADDBITS(
req->td_data->status,
UDC_DMA_IN_STS_BS_HOST_READY,
UDC_DMA_IN_STS_BS);
/* set poll demand bit */
tmp = readl(&ep->regs->ctl);
tmp |= AMD_BIT(UDC_EPCTL_P);
writel(tmp, &ep->regs->ctl);
}
}
} else if (!use_dma && ep->in) {
/* disable interrupt */
tmp = readl(
&dev->regs->ep_irqmsk);
tmp |= AMD_BIT(ep->num);
writel(tmp,
&dev->regs->ep_irqmsk);
}
}
/* clear status bits */
writel(epsts, &ep->regs->sts);
finished:
return ret_val;
}
/* Interrupt handler for Control OUT traffic */
static irqreturn_t udc_control_out_isr(struct udc *dev)
__releases(dev->lock)
__acquires(dev->lock)
{
irqreturn_t ret_val = IRQ_NONE;
u32 tmp;
int setup_supported;
u32 count;
int set = 0;
struct udc_ep *ep;
struct udc_ep *ep_tmp;
ep = &dev->ep[UDC_EP0OUT_IX];
/* clear irq */
writel(AMD_BIT(UDC_EPINT_OUT_EP0), &dev->regs->ep_irqsts);
tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->sts);
/* check BNA and clear if set */
if (tmp & AMD_BIT(UDC_EPSTS_BNA)) {
VDBG(dev, "ep0: BNA set\n");
writel(AMD_BIT(UDC_EPSTS_BNA),
&dev->ep[UDC_EP0OUT_IX].regs->sts);
ep->bna_occurred = 1;
ret_val = IRQ_HANDLED;
goto finished;
}
/* type of data: SETUP or DATA 0 bytes */
tmp = AMD_GETBITS(tmp, UDC_EPSTS_OUT);
VDBG(dev, "data_typ = %x\n", tmp);
/* setup data */
if (tmp == UDC_EPSTS_OUT_SETUP) {
ret_val = IRQ_HANDLED;
ep->dev->stall_ep0in = 0;
dev->waiting_zlp_ack_ep0in = 0;
/* set NAK for EP0_IN */
tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
tmp |= AMD_BIT(UDC_EPCTL_SNAK);
writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
dev->ep[UDC_EP0IN_IX].naking = 1;
/* get setup data */
if (use_dma) {
/* clear OUT bits in ep status */
writel(UDC_EPSTS_OUT_CLEAR,
&dev->ep[UDC_EP0OUT_IX].regs->sts);
setup_data.data[0] =
dev->ep[UDC_EP0OUT_IX].td_stp->data12;
setup_data.data[1] =
dev->ep[UDC_EP0OUT_IX].td_stp->data34;
/* set HOST READY */
dev->ep[UDC_EP0OUT_IX].td_stp->status =
UDC_DMA_STP_STS_BS_HOST_READY;
} else {
/* read fifo */
udc_rxfifo_read_dwords(dev, setup_data.data, 2);
}
/* determine direction of control data */
if ((setup_data.request.bRequestType & USB_DIR_IN) != 0) {
dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IX].ep;
/* enable RDE */
udc_ep0_set_rde(dev);
set = 0;
} else {
dev->gadget.ep0 = &dev->ep[UDC_EP0OUT_IX].ep;
/*
* implant BNA dummy descriptor to allow RXFIFO opening
* by RDE
*/
if (ep->bna_dummy_req) {
/* write desc pointer */
writel(ep->bna_dummy_req->td_phys,
&dev->ep[UDC_EP0OUT_IX].regs->desptr);
ep->bna_occurred = 0;
}
set = 1;
dev->ep[UDC_EP0OUT_IX].naking = 1;
/*
* setup timer for enabling RDE (to not enable
* RXFIFO DMA for data to early)
*/
set_rde = 1;
if (!timer_pending(&udc_timer)) {
udc_timer.expires = jiffies +
HZ/UDC_RDE_TIMER_DIV;
if (!stop_timer)
add_timer(&udc_timer);
}
}
/*
* mass storage reset must be processed here because
* next packet may be a CLEAR_FEATURE HALT which would not
* clear the stall bit when no STALL handshake was received
* before (autostall can cause this)
*/
if (setup_data.data[0] == UDC_MSCRES_DWORD0
&& setup_data.data[1] == UDC_MSCRES_DWORD1) {
DBG(dev, "MSC Reset\n");
/*
* clear stall bits
* only one IN and OUT endpoints are handled
*/
ep_tmp = &udc->ep[UDC_EPIN_IX];
udc_set_halt(&ep_tmp->ep, 0);
ep_tmp = &udc->ep[UDC_EPOUT_IX];
udc_set_halt(&ep_tmp->ep, 0);
}
/* call gadget with setup data received */
spin_unlock(&dev->lock);
setup_supported = dev->driver->setup(&dev->gadget,
&setup_data.request);
spin_lock(&dev->lock);
tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
/* ep0 in returns data (not zlp) on IN phase */
if (setup_supported >= 0 && setup_supported <
UDC_EP0IN_MAXPACKET) {
/* clear NAK by writing CNAK in EP0_IN */
tmp |= AMD_BIT(UDC_EPCTL_CNAK);
writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
dev->ep[UDC_EP0IN_IX].naking = 0;
UDC_QUEUE_CNAK(&dev->ep[UDC_EP0IN_IX], UDC_EP0IN_IX);
/* if unsupported request then stall */
} else if (setup_supported < 0) {
tmp |= AMD_BIT(UDC_EPCTL_S);
writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
} else
dev->waiting_zlp_ack_ep0in = 1;
/* clear NAK by writing CNAK in EP0_OUT */
if (!set) {
tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->ctl);
tmp |= AMD_BIT(UDC_EPCTL_CNAK);
writel(tmp, &dev->ep[UDC_EP0OUT_IX].regs->ctl);
dev->ep[UDC_EP0OUT_IX].naking = 0;
UDC_QUEUE_CNAK(&dev->ep[UDC_EP0OUT_IX], UDC_EP0OUT_IX);
}
if (!use_dma) {
/* clear OUT bits in ep status */
writel(UDC_EPSTS_OUT_CLEAR,
&dev->ep[UDC_EP0OUT_IX].regs->sts);
}
/* data packet 0 bytes */
} else if (tmp == UDC_EPSTS_OUT_DATA) {
/* clear OUT bits in ep status */
writel(UDC_EPSTS_OUT_CLEAR, &dev->ep[UDC_EP0OUT_IX].regs->sts);
/* get setup data: only 0 packet */
if (use_dma) {
/* no req if 0 packet, just reactivate */
if (list_empty(&dev->ep[UDC_EP0OUT_IX].queue)) {
VDBG(dev, "ZLP\n");
/* set HOST READY */
dev->ep[UDC_EP0OUT_IX].td->status =
AMD_ADDBITS(
dev->ep[UDC_EP0OUT_IX].td->status,
UDC_DMA_OUT_STS_BS_HOST_READY,
UDC_DMA_OUT_STS_BS);
/* enable RDE */
udc_ep0_set_rde(dev);
ret_val = IRQ_HANDLED;
} else {
/* control write */
ret_val |= udc_data_out_isr(dev, UDC_EP0OUT_IX);
/* re-program desc. pointer for possible ZLPs */
writel(dev->ep[UDC_EP0OUT_IX].td_phys,
&dev->ep[UDC_EP0OUT_IX].regs->desptr);
/* enable RDE */
udc_ep0_set_rde(dev);
}
} else {
/* received number bytes */
count = readl(&dev->ep[UDC_EP0OUT_IX].regs->sts);
count = AMD_GETBITS(count, UDC_EPSTS_RX_PKT_SIZE);
/* out data for fifo mode not working */
count = 0;
/* 0 packet or real data ? */
if (count != 0) {
ret_val |= udc_data_out_isr(dev, UDC_EP0OUT_IX);
} else {
/* dummy read confirm */
readl(&dev->ep[UDC_EP0OUT_IX].regs->confirm);
ret_val = IRQ_HANDLED;
}
}
}
/* check pending CNAKS */
if (cnak_pending) {
/* CNAk processing when rxfifo empty only */
if (readl(&dev->regs->sts) & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY))
udc_process_cnak_queue(dev);
}
finished:
return ret_val;
}
/* Interrupt handler for Control IN traffic */
static irqreturn_t udc_control_in_isr(struct udc *dev)
{
irqreturn_t ret_val = IRQ_NONE;
u32 tmp;
struct udc_ep *ep;
struct udc_request *req;
unsigned len;
ep = &dev->ep[UDC_EP0IN_IX];
/* clear irq */
writel(AMD_BIT(UDC_EPINT_IN_EP0), &dev->regs->ep_irqsts);
tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->sts);
/* DMA completion */
if (tmp & AMD_BIT(UDC_EPSTS_TDC)) {
VDBG(dev, "isr: TDC clear\n");
ret_val = IRQ_HANDLED;
/* clear TDC bit */
writel(AMD_BIT(UDC_EPSTS_TDC),
&dev->ep[UDC_EP0IN_IX].regs->sts);
/* status reg has IN bit set ? */
} else if (tmp & AMD_BIT(UDC_EPSTS_IN)) {
ret_val = IRQ_HANDLED;
if (ep->dma) {
/* clear IN bit */
writel(AMD_BIT(UDC_EPSTS_IN),
&dev->ep[UDC_EP0IN_IX].regs->sts);
}
if (dev->stall_ep0in) {
DBG(dev, "stall ep0in\n");
/* halt ep0in */
tmp = readl(&ep->regs->ctl);
tmp |= AMD_BIT(UDC_EPCTL_S);
writel(tmp, &ep->regs->ctl);
} else {
if (!list_empty(&ep->queue)) {
/* next request */
req = list_entry(ep->queue.next,
struct udc_request, queue);
if (ep->dma) {
/* write desc pointer */
writel(req->td_phys, &ep->regs->desptr);
/* set HOST READY */
req->td_data->status =
AMD_ADDBITS(
req->td_data->status,
UDC_DMA_STP_STS_BS_HOST_READY,
UDC_DMA_STP_STS_BS);
/* set poll demand bit */
tmp =
readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
tmp |= AMD_BIT(UDC_EPCTL_P);
writel(tmp,
&dev->ep[UDC_EP0IN_IX].regs->ctl);
/* all bytes will be transferred */
req->req.actual = req->req.length;
/* complete req */
complete_req(ep, req, 0);
} else {
/* write fifo */
udc_txfifo_write(ep, &req->req);
/* lengh bytes transferred */
len = req->req.length - req->req.actual;
if (len > ep->ep.maxpacket)
len = ep->ep.maxpacket;
req->req.actual += len;
if (req->req.actual == req->req.length
|| (len != ep->ep.maxpacket)) {
/* complete req */
complete_req(ep, req, 0);
}
}
}
}
ep->halted = 0;
dev->stall_ep0in = 0;
if (!ep->dma) {
/* clear IN bit */
writel(AMD_BIT(UDC_EPSTS_IN),
&dev->ep[UDC_EP0IN_IX].regs->sts);
}
}
return ret_val;
}
/* Interrupt handler for global device events */
static irqreturn_t udc_dev_isr(struct udc *dev, u32 dev_irq)
__releases(dev->lock)
__acquires(dev->lock)
{
irqreturn_t ret_val = IRQ_NONE;
u32 tmp;
u32 cfg;
struct udc_ep *ep;
u16 i;
u8 udc_csr_epix;
/* SET_CONFIG irq ? */
if (dev_irq & AMD_BIT(UDC_DEVINT_SC)) {
ret_val = IRQ_HANDLED;
/* read config value */
tmp = readl(&dev->regs->sts);
cfg = AMD_GETBITS(tmp, UDC_DEVSTS_CFG);
DBG(dev, "SET_CONFIG interrupt: config=%d\n", cfg);
dev->cur_config = cfg;
dev->set_cfg_not_acked = 1;
/* make usb request for gadget driver */
memset(&setup_data, 0 , sizeof(union udc_setup_data));
setup_data.request.bRequest = USB_REQ_SET_CONFIGURATION;
setup_data.request.wValue = cpu_to_le16(dev->cur_config);
/* programm the NE registers */
for (i = 0; i < UDC_EP_NUM; i++) {
ep = &dev->ep[i];
if (ep->in) {
/* ep ix in UDC CSR register space */
udc_csr_epix = ep->num;
/* OUT ep */
} else {
/* ep ix in UDC CSR register space */
udc_csr_epix = ep->num - UDC_CSR_EP_OUT_IX_OFS;
}
tmp = readl(&dev->csr->ne[udc_csr_epix]);
/* ep cfg */
tmp = AMD_ADDBITS(tmp, ep->dev->cur_config,
UDC_CSR_NE_CFG);
/* write reg */
writel(tmp, &dev->csr->ne[udc_csr_epix]);
/* clear stall bits */
ep->halted = 0;
tmp = readl(&ep->regs->ctl);
tmp = tmp & AMD_CLEAR_BIT(UDC_EPCTL_S);
writel(tmp, &ep->regs->ctl);
}
/* call gadget zero with setup data received */
spin_unlock(&dev->lock);
tmp = dev->driver->setup(&dev->gadget, &setup_data.request);
spin_lock(&dev->lock);
} /* SET_INTERFACE ? */
if (dev_irq & AMD_BIT(UDC_DEVINT_SI)) {
ret_val = IRQ_HANDLED;
dev->set_cfg_not_acked = 1;
/* read interface and alt setting values */
tmp = readl(&dev->regs->sts);
dev->cur_alt = AMD_GETBITS(tmp, UDC_DEVSTS_ALT);
dev->cur_intf = AMD_GETBITS(tmp, UDC_DEVSTS_INTF);
/* make usb request for gadget driver */
memset(&setup_data, 0 , sizeof(union udc_setup_data));
setup_data.request.bRequest = USB_REQ_SET_INTERFACE;
setup_data.request.bRequestType = USB_RECIP_INTERFACE;
setup_data.request.wValue = cpu_to_le16(dev->cur_alt);
setup_data.request.wIndex = cpu_to_le16(dev->cur_intf);
DBG(dev, "SET_INTERFACE interrupt: alt=%d intf=%d\n",
dev->cur_alt, dev->cur_intf);
/* programm the NE registers */
for (i = 0; i < UDC_EP_NUM; i++) {
ep = &dev->ep[i];
if (ep->in) {
/* ep ix in UDC CSR register space */
udc_csr_epix = ep->num;
/* OUT ep */
} else {
/* ep ix in UDC CSR register space */
udc_csr_epix = ep->num - UDC_CSR_EP_OUT_IX_OFS;
}
/* UDC CSR reg */
/* set ep values */
tmp = readl(&dev->csr->ne[udc_csr_epix]);
/* ep interface */
tmp = AMD_ADDBITS(tmp, ep->dev->cur_intf,
UDC_CSR_NE_INTF);
/* tmp = AMD_ADDBITS(tmp, 2, UDC_CSR_NE_INTF); */
/* ep alt */
tmp = AMD_ADDBITS(tmp, ep->dev->cur_alt,
UDC_CSR_NE_ALT);
/* write reg */
writel(tmp, &dev->csr->ne[udc_csr_epix]);
/* clear stall bits */
ep->halted = 0;
tmp = readl(&ep->regs->ctl);
tmp = tmp & AMD_CLEAR_BIT(UDC_EPCTL_S);
writel(tmp, &ep->regs->ctl);
}
/* call gadget zero with setup data received */
spin_unlock(&dev->lock);
tmp = dev->driver->setup(&dev->gadget, &setup_data.request);
spin_lock(&dev->lock);
} /* USB reset */
if (dev_irq & AMD_BIT(UDC_DEVINT_UR)) {
DBG(dev, "USB Reset interrupt\n");
ret_val = IRQ_HANDLED;
/* allow soft reset when suspend occurs */
soft_reset_occured = 0;
dev->waiting_zlp_ack_ep0in = 0;
dev->set_cfg_not_acked = 0;
/* mask not needed interrupts */
udc_mask_unused_interrupts(dev);
/* call gadget to resume and reset configs etc. */
spin_unlock(&dev->lock);
if (dev->sys_suspended && dev->driver->resume) {
dev->driver->resume(&dev->gadget);
dev->sys_suspended = 0;
}
dev->driver->disconnect(&dev->gadget);
spin_lock(&dev->lock);
/* disable ep0 to empty req queue */
empty_req_queue(&dev->ep[UDC_EP0IN_IX]);
ep_init(dev->regs, &dev->ep[UDC_EP0IN_IX]);
/* soft reset when rxfifo not empty */
tmp = readl(&dev->regs->sts);
if (!(tmp & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY))
&& !soft_reset_after_usbreset_occured) {
udc_soft_reset(dev);
soft_reset_after_usbreset_occured++;
}
/*
* DMA reset to kill potential old DMA hw hang,
* POLL bit is already reset by ep_init() through
* disconnect()
*/
DBG(dev, "DMA machine reset\n");
tmp = readl(&dev->regs->cfg);
writel(tmp | AMD_BIT(UDC_DEVCFG_DMARST), &dev->regs->cfg);
writel(tmp, &dev->regs->cfg);
/* put into initial config */
udc_basic_init(dev);
/* enable device setup interrupts */
udc_enable_dev_setup_interrupts(dev);
/* enable suspend interrupt */
tmp = readl(&dev->regs->irqmsk);
tmp &= AMD_UNMASK_BIT(UDC_DEVINT_US);
writel(tmp, &dev->regs->irqmsk);
} /* USB suspend */
if (dev_irq & AMD_BIT(UDC_DEVINT_US)) {
DBG(dev, "USB Suspend interrupt\n");
ret_val = IRQ_HANDLED;
if (dev->driver->suspend) {
spin_unlock(&dev->lock);
dev->sys_suspended = 1;
dev->driver->suspend(&dev->gadget);
spin_lock(&dev->lock);
}
} /* new speed ? */
if (dev_irq & AMD_BIT(UDC_DEVINT_ENUM)) {
DBG(dev, "ENUM interrupt\n");
ret_val = IRQ_HANDLED;
soft_reset_after_usbreset_occured = 0;
/* disable ep0 to empty req queue */
empty_req_queue(&dev->ep[UDC_EP0IN_IX]);
ep_init(dev->regs, &dev->ep[UDC_EP0IN_IX]);
/* link up all endpoints */
udc_setup_endpoints(dev);
dev_info(&dev->pdev->dev, "Connect: %s\n",
usb_speed_string(dev->gadget.speed));
/* init ep 0 */
activate_control_endpoints(dev);
/* enable ep0 interrupts */
udc_enable_ep0_interrupts(dev);
}
/* session valid change interrupt */
if (dev_irq & AMD_BIT(UDC_DEVINT_SVC)) {
DBG(dev, "USB SVC interrupt\n");
ret_val = IRQ_HANDLED;
/* check that session is not valid to detect disconnect */
tmp = readl(&dev->regs->sts);
if (!(tmp & AMD_BIT(UDC_DEVSTS_SESSVLD))) {
/* disable suspend interrupt */
tmp = readl(&dev->regs->irqmsk);
tmp |= AMD_BIT(UDC_DEVINT_US);
writel(tmp, &dev->regs->irqmsk);
DBG(dev, "USB Disconnect (session valid low)\n");
/* cleanup on disconnect */
usb_disconnect(udc);
}
}
return ret_val;
}
/* Interrupt Service Routine, see Linux Kernel Doc for parameters */
static irqreturn_t udc_irq(int irq, void *pdev)
{
struct udc *dev = pdev;
u32 reg;
u16 i;
u32 ep_irq;
irqreturn_t ret_val = IRQ_NONE;
spin_lock(&dev->lock);
/* check for ep irq */
reg = readl(&dev->regs->ep_irqsts);
if (reg) {
if (reg & AMD_BIT(UDC_EPINT_OUT_EP0))
ret_val |= udc_control_out_isr(dev);
if (reg & AMD_BIT(UDC_EPINT_IN_EP0))
ret_val |= udc_control_in_isr(dev);
/*
* data endpoint
* iterate ep's
*/
for (i = 1; i < UDC_EP_NUM; i++) {
ep_irq = 1 << i;
if (!(reg & ep_irq) || i == UDC_EPINT_OUT_EP0)
continue;
/* clear irq status */
writel(ep_irq, &dev->regs->ep_irqsts);
/* irq for out ep ? */
if (i > UDC_EPIN_NUM)
ret_val |= udc_data_out_isr(dev, i);
else
ret_val |= udc_data_in_isr(dev, i);
}
}
/* check for dev irq */
reg = readl(&dev->regs->irqsts);
if (reg) {
/* clear irq */
writel(reg, &dev->regs->irqsts);
ret_val |= udc_dev_isr(dev, reg);
}
spin_unlock(&dev->lock);
return ret_val;
}
/* Tears down device */
static void gadget_release(struct device *pdev)
{
struct amd5536udc *dev = dev_get_drvdata(pdev);
kfree(dev);
}
/* Cleanup on device remove */
static void udc_remove(struct udc *dev)
{
/* remove timer */
stop_timer++;
if (timer_pending(&udc_timer))
wait_for_completion(&on_exit);
if (udc_timer.data)
del_timer_sync(&udc_timer);
/* remove pollstall timer */
stop_pollstall_timer++;
if (timer_pending(&udc_pollstall_timer))
wait_for_completion(&on_pollstall_exit);
if (udc_pollstall_timer.data)
del_timer_sync(&udc_pollstall_timer);
udc = NULL;
}
/* Reset all pci context */
static void udc_pci_remove(struct pci_dev *pdev)
{
struct udc *dev;
dev = pci_get_drvdata(pdev);
usb_del_gadget_udc(&udc->gadget);
/* gadget driver must not be registered */
BUG_ON(dev->driver != NULL);
/* dma pool cleanup */
if (dev->data_requests)
pci_pool_destroy(dev->data_requests);
if (dev->stp_requests) {
/* cleanup DMA desc's for ep0in */
pci_pool_free(dev->stp_requests,
dev->ep[UDC_EP0OUT_IX].td_stp,
dev->ep[UDC_EP0OUT_IX].td_stp_dma);
pci_pool_free(dev->stp_requests,
dev->ep[UDC_EP0OUT_IX].td,
dev->ep[UDC_EP0OUT_IX].td_phys);
pci_pool_destroy(dev->stp_requests);
}
/* reset controller */
writel(AMD_BIT(UDC_DEVCFG_SOFTRESET), &dev->regs->cfg);
if (dev->irq_registered)
free_irq(pdev->irq, dev);
if (dev->regs)
iounmap(dev->regs);
if (dev->mem_region)
release_mem_region(pci_resource_start(pdev, 0),
pci_resource_len(pdev, 0));
if (dev->active)
pci_disable_device(pdev);
device_unregister(&dev->gadget.dev);
pci_set_drvdata(pdev, NULL);
udc_remove(dev);
}
/* create dma pools on init */
static int init_dma_pools(struct udc *dev)
{
struct udc_stp_dma *td_stp;
struct udc_data_dma *td_data;
int retval;
/* consistent DMA mode setting ? */
if (use_dma_ppb) {
use_dma_bufferfill_mode = 0;
} else {
use_dma_ppb_du = 0;
use_dma_bufferfill_mode = 1;
}
/* DMA setup */
dev->data_requests = dma_pool_create("data_requests", NULL,
sizeof(struct udc_data_dma), 0, 0);
if (!dev->data_requests) {
DBG(dev, "can't get request data pool\n");
retval = -ENOMEM;
goto finished;
}
/* EP0 in dma regs = dev control regs */
dev->ep[UDC_EP0IN_IX].dma = &dev->regs->ctl;
/* dma desc for setup data */
dev->stp_requests = dma_pool_create("setup requests", NULL,
sizeof(struct udc_stp_dma), 0, 0);
if (!dev->stp_requests) {
DBG(dev, "can't get stp request pool\n");
retval = -ENOMEM;
goto finished;
}
/* setup */
td_stp = dma_pool_alloc(dev->stp_requests, GFP_KERNEL,
&dev->ep[UDC_EP0OUT_IX].td_stp_dma);
if (td_stp == NULL) {
retval = -ENOMEM;
goto finished;
}
dev->ep[UDC_EP0OUT_IX].td_stp = td_stp;
/* data: 0 packets !? */
td_data = dma_pool_alloc(dev->stp_requests, GFP_KERNEL,
&dev->ep[UDC_EP0OUT_IX].td_phys);
if (td_data == NULL) {
retval = -ENOMEM;
goto finished;
}
dev->ep[UDC_EP0OUT_IX].td = td_data;
return 0;
finished:
return retval;
}
/* Called by pci bus driver to init pci context */
static int udc_pci_probe(
struct pci_dev *pdev,
const struct pci_device_id *id
)
{
struct udc *dev;
unsigned long resource;
unsigned long len;
int retval = 0;
/* one udc only */
if (udc) {
dev_dbg(&pdev->dev, "already probed\n");
return -EBUSY;
}
/* init */
dev = kzalloc(sizeof(struct udc), GFP_KERNEL);
if (!dev) {
retval = -ENOMEM;
goto finished;
}
/* pci setup */
if (pci_enable_device(pdev) < 0) {
kfree(dev);
dev = NULL;
retval = -ENODEV;
goto finished;
}
dev->active = 1;
/* PCI resource allocation */
resource = pci_resource_start(pdev, 0);
len = pci_resource_len(pdev, 0);
if (!request_mem_region(resource, len, name)) {
dev_dbg(&pdev->dev, "pci device used already\n");
kfree(dev);
dev = NULL;
retval = -EBUSY;
goto finished;
}
dev->mem_region = 1;
dev->virt_addr = ioremap_nocache(resource, len);
if (dev->virt_addr == NULL) {
dev_dbg(&pdev->dev, "start address cannot be mapped\n");
kfree(dev);
dev = NULL;
retval = -EFAULT;
goto finished;
}
if (!pdev->irq) {
dev_err(&dev->pdev->dev, "irq not set\n");
kfree(dev);
dev = NULL;
retval = -ENODEV;
goto finished;
}
spin_lock_init(&dev->lock);
/* udc csr registers base */
dev->csr = dev->virt_addr + UDC_CSR_ADDR;
/* dev registers base */
dev->regs = dev->virt_addr + UDC_DEVCFG_ADDR;
/* ep registers base */
dev->ep_regs = dev->virt_addr + UDC_EPREGS_ADDR;
/* fifo's base */
dev->rxfifo = (u32 __iomem *)(dev->virt_addr + UDC_RXFIFO_ADDR);
dev->txfifo = (u32 __iomem *)(dev->virt_addr + UDC_TXFIFO_ADDR);
if (request_irq(pdev->irq, udc_irq, IRQF_SHARED, name, dev) != 0) {
dev_dbg(&dev->pdev->dev, "request_irq(%d) fail\n", pdev->irq);
kfree(dev);
dev = NULL;
retval = -EBUSY;
goto finished;
}
dev->irq_registered = 1;
pci_set_drvdata(pdev, dev);
/* chip revision for Hs AMD5536 */
dev->chiprev = pdev->revision;
pci_set_master(pdev);
pci_try_set_mwi(pdev);
/* init dma pools */
if (use_dma) {
retval = init_dma_pools(dev);
if (retval != 0)
goto finished;
}
dev->phys_addr = resource;
dev->irq = pdev->irq;
dev->pdev = pdev;
dev->gadget.dev.parent = &pdev->dev;
dev->gadget.dev.dma_mask = pdev->dev.dma_mask;
/* general probing */
if (udc_probe(dev) == 0)
return 0;
finished:
if (dev)
udc_pci_remove(pdev);
return retval;
}
/* general probe */
static int udc_probe(struct udc *dev)
{
char tmp[128];
u32 reg;
int retval;
/* mark timer as not initialized */
udc_timer.data = 0;
udc_pollstall_timer.data = 0;
/* device struct setup */
dev->gadget.ops = &udc_ops;
dev_set_name(&dev->gadget.dev, "gadget");
dev->gadget.dev.release = gadget_release;
dev->gadget.name = name;
dev->gadget.max_speed = USB_SPEED_HIGH;
/* init registers, interrupts, ... */
startup_registers(dev);
dev_info(&dev->pdev->dev, "%s\n", mod_desc);
snprintf(tmp, sizeof tmp, "%d", dev->irq);
dev_info(&dev->pdev->dev,
"irq %s, pci mem %08lx, chip rev %02x(Geode5536 %s)\n",
tmp, dev->phys_addr, dev->chiprev,
(dev->chiprev == UDC_HSA0_REV) ? "A0" : "B1");
strcpy(tmp, UDC_DRIVER_VERSION_STRING);
if (dev->chiprev == UDC_HSA0_REV) {
dev_err(&dev->pdev->dev, "chip revision is A0; too old\n");
retval = -ENODEV;
goto finished;
}
dev_info(&dev->pdev->dev,
"driver version: %s(for Geode5536 B1)\n", tmp);
udc = dev;
retval = usb_add_gadget_udc(&udc->pdev->dev, &dev->gadget);
if (retval)
goto finished;
retval = device_register(&dev->gadget.dev);
if (retval) {
usb_del_gadget_udc(&dev->gadget);
put_device(&dev->gadget.dev);
goto finished;
}
/* timer init */
init_timer(&udc_timer);
udc_timer.function = udc_timer_function;
udc_timer.data = 1;
/* timer pollstall init */
init_timer(&udc_pollstall_timer);
udc_pollstall_timer.function = udc_pollstall_timer_function;
udc_pollstall_timer.data = 1;
/* set SD */
reg = readl(&dev->regs->ctl);
reg |= AMD_BIT(UDC_DEVCTL_SD);
writel(reg, &dev->regs->ctl);
/* print dev register info */
print_regs(dev);
return 0;
finished:
return retval;
}
/* Initiates a remote wakeup */
static int udc_remote_wakeup(struct udc *dev)
{
unsigned long flags;
u32 tmp;
DBG(dev, "UDC initiates remote wakeup\n");
spin_lock_irqsave(&dev->lock, flags);
tmp = readl(&dev->regs->ctl);
tmp |= AMD_BIT(UDC_DEVCTL_RES);
writel(tmp, &dev->regs->ctl);
tmp &= AMD_CLEAR_BIT(UDC_DEVCTL_RES);
writel(tmp, &dev->regs->ctl);
spin_unlock_irqrestore(&dev->lock, flags);
return 0;
}
/* PCI device parameters */
static DEFINE_PCI_DEVICE_TABLE(pci_id) = {
{
PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x2096),
.class = (PCI_CLASS_SERIAL_USB << 8) | 0xfe,
.class_mask = 0xffffffff,
},
{},
};
MODULE_DEVICE_TABLE(pci, pci_id);
/* PCI functions */
static struct pci_driver udc_pci_driver = {
.name = (char *) name,
.id_table = pci_id,
.probe = udc_pci_probe,
.remove = udc_pci_remove,
};
/* Inits driver */
static int __init init(void)
{
return pci_register_driver(&udc_pci_driver);
}
module_init(init);
/* Cleans driver */
static void __exit cleanup(void)
{
pci_unregister_driver(&udc_pci_driver);
}
module_exit(cleanup);
MODULE_DESCRIPTION(UDC_MOD_DESCRIPTION);
MODULE_AUTHOR("Thomas Dahlmann");
MODULE_LICENSE("GPL");
| gpl-2.0 |
Thecrazyskull/kernel_msm | fs/ecryptfs/read_write.c | 5016 | 8866 | /**
* eCryptfs: Linux filesystem encryption layer
*
* Copyright (C) 2007 International Business Machines Corp.
* Author(s): Michael A. Halcrow <mahalcro@us.ibm.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
* 02111-1307, USA.
*/
#include <linux/fs.h>
#include <linux/pagemap.h>
#include "ecryptfs_kernel.h"
/**
* ecryptfs_write_lower
* @ecryptfs_inode: The eCryptfs inode
* @data: Data to write
* @offset: Byte offset in the lower file to which to write the data
* @size: Number of bytes from @data to write at @offset in the lower
* file
*
* Write data to the lower file.
*
* Returns bytes written on success; less than zero on error
*/
int ecryptfs_write_lower(struct inode *ecryptfs_inode, char *data,
loff_t offset, size_t size)
{
struct file *lower_file;
mm_segment_t fs_save;
ssize_t rc;
lower_file = ecryptfs_inode_to_private(ecryptfs_inode)->lower_file;
if (!lower_file)
return -EIO;
fs_save = get_fs();
set_fs(get_ds());
rc = vfs_write(lower_file, data, size, &offset);
set_fs(fs_save);
mark_inode_dirty_sync(ecryptfs_inode);
return rc;
}
/**
* ecryptfs_write_lower_page_segment
* @ecryptfs_inode: The eCryptfs inode
* @page_for_lower: The page containing the data to be written to the
* lower file
* @offset_in_page: The offset in the @page_for_lower from which to
* start writing the data
* @size: The amount of data from @page_for_lower to write to the
* lower file
*
* Determines the byte offset in the file for the given page and
* offset within the page, maps the page, and makes the call to write
* the contents of @page_for_lower to the lower inode.
*
* Returns zero on success; non-zero otherwise
*/
int ecryptfs_write_lower_page_segment(struct inode *ecryptfs_inode,
struct page *page_for_lower,
size_t offset_in_page, size_t size)
{
char *virt;
loff_t offset;
int rc;
offset = ((((loff_t)page_for_lower->index) << PAGE_CACHE_SHIFT)
+ offset_in_page);
virt = kmap(page_for_lower);
rc = ecryptfs_write_lower(ecryptfs_inode, virt, offset, size);
if (rc > 0)
rc = 0;
kunmap(page_for_lower);
return rc;
}
/**
* ecryptfs_write
* @ecryptfs_inode: The eCryptfs file into which to write
* @data: Virtual address where data to write is located
* @offset: Offset in the eCryptfs file at which to begin writing the
* data from @data
* @size: The number of bytes to write from @data
*
* Write an arbitrary amount of data to an arbitrary location in the
* eCryptfs inode page cache. This is done on a page-by-page, and then
* by an extent-by-extent, basis; individual extents are encrypted and
* written to the lower page cache (via VFS writes). This function
* takes care of all the address translation to locations in the lower
* filesystem; it also handles truncate events, writing out zeros
* where necessary.
*
* Returns zero on success; non-zero otherwise
*/
int ecryptfs_write(struct inode *ecryptfs_inode, char *data, loff_t offset,
size_t size)
{
struct page *ecryptfs_page;
struct ecryptfs_crypt_stat *crypt_stat;
char *ecryptfs_page_virt;
loff_t ecryptfs_file_size = i_size_read(ecryptfs_inode);
loff_t data_offset = 0;
loff_t pos;
int rc = 0;
crypt_stat = &ecryptfs_inode_to_private(ecryptfs_inode)->crypt_stat;
/*
* if we are writing beyond current size, then start pos
* at the current size - we'll fill in zeros from there.
*/
if (offset > ecryptfs_file_size)
pos = ecryptfs_file_size;
else
pos = offset;
while (pos < (offset + size)) {
pgoff_t ecryptfs_page_idx = (pos >> PAGE_CACHE_SHIFT);
size_t start_offset_in_page = (pos & ~PAGE_CACHE_MASK);
size_t num_bytes = (PAGE_CACHE_SIZE - start_offset_in_page);
loff_t total_remaining_bytes = ((offset + size) - pos);
if (fatal_signal_pending(current)) {
rc = -EINTR;
break;
}
if (num_bytes > total_remaining_bytes)
num_bytes = total_remaining_bytes;
if (pos < offset) {
/* remaining zeros to write, up to destination offset */
loff_t total_remaining_zeros = (offset - pos);
if (num_bytes > total_remaining_zeros)
num_bytes = total_remaining_zeros;
}
ecryptfs_page = ecryptfs_get_locked_page(ecryptfs_inode,
ecryptfs_page_idx);
if (IS_ERR(ecryptfs_page)) {
rc = PTR_ERR(ecryptfs_page);
printk(KERN_ERR "%s: Error getting page at "
"index [%ld] from eCryptfs inode "
"mapping; rc = [%d]\n", __func__,
ecryptfs_page_idx, rc);
goto out;
}
ecryptfs_page_virt = kmap_atomic(ecryptfs_page);
/*
* pos: where we're now writing, offset: where the request was
* If current pos is before request, we are filling zeros
* If we are at or beyond request, we are writing the *data*
* If we're in a fresh page beyond eof, zero it in either case
*/
if (pos < offset || !start_offset_in_page) {
/* We are extending past the previous end of the file.
* Fill in zero values to the end of the page */
memset(((char *)ecryptfs_page_virt
+ start_offset_in_page), 0,
PAGE_CACHE_SIZE - start_offset_in_page);
}
/* pos >= offset, we are now writing the data request */
if (pos >= offset) {
memcpy(((char *)ecryptfs_page_virt
+ start_offset_in_page),
(data + data_offset), num_bytes);
data_offset += num_bytes;
}
kunmap_atomic(ecryptfs_page_virt);
flush_dcache_page(ecryptfs_page);
SetPageUptodate(ecryptfs_page);
unlock_page(ecryptfs_page);
if (crypt_stat->flags & ECRYPTFS_ENCRYPTED)
rc = ecryptfs_encrypt_page(ecryptfs_page);
else
rc = ecryptfs_write_lower_page_segment(ecryptfs_inode,
ecryptfs_page,
start_offset_in_page,
data_offset);
page_cache_release(ecryptfs_page);
if (rc) {
printk(KERN_ERR "%s: Error encrypting "
"page; rc = [%d]\n", __func__, rc);
goto out;
}
pos += num_bytes;
}
if (pos > ecryptfs_file_size) {
i_size_write(ecryptfs_inode, pos);
if (crypt_stat->flags & ECRYPTFS_ENCRYPTED) {
int rc2;
rc2 = ecryptfs_write_inode_size_to_metadata(
ecryptfs_inode);
if (rc2) {
printk(KERN_ERR "Problem with "
"ecryptfs_write_inode_size_to_metadata; "
"rc = [%d]\n", rc2);
if (!rc)
rc = rc2;
goto out;
}
}
}
out:
return rc;
}
/**
* ecryptfs_read_lower
* @data: The read data is stored here by this function
* @offset: Byte offset in the lower file from which to read the data
* @size: Number of bytes to read from @offset of the lower file and
* store into @data
* @ecryptfs_inode: The eCryptfs inode
*
* Read @size bytes of data at byte offset @offset from the lower
* inode into memory location @data.
*
* Returns bytes read on success; 0 on EOF; less than zero on error
*/
int ecryptfs_read_lower(char *data, loff_t offset, size_t size,
struct inode *ecryptfs_inode)
{
struct file *lower_file;
mm_segment_t fs_save;
ssize_t rc;
lower_file = ecryptfs_inode_to_private(ecryptfs_inode)->lower_file;
if (!lower_file)
return -EIO;
fs_save = get_fs();
set_fs(get_ds());
rc = vfs_read(lower_file, data, size, &offset);
set_fs(fs_save);
return rc;
}
/**
* ecryptfs_read_lower_page_segment
* @page_for_ecryptfs: The page into which data for eCryptfs will be
* written
* @offset_in_page: Offset in @page_for_ecryptfs from which to start
* writing
* @size: The number of bytes to write into @page_for_ecryptfs
* @ecryptfs_inode: The eCryptfs inode
*
* Determines the byte offset in the file for the given page and
* offset within the page, maps the page, and makes the call to read
* the contents of @page_for_ecryptfs from the lower inode.
*
* Returns zero on success; non-zero otherwise
*/
int ecryptfs_read_lower_page_segment(struct page *page_for_ecryptfs,
pgoff_t page_index,
size_t offset_in_page, size_t size,
struct inode *ecryptfs_inode)
{
char *virt;
loff_t offset;
int rc;
offset = ((((loff_t)page_index) << PAGE_CACHE_SHIFT) + offset_in_page);
virt = kmap(page_for_ecryptfs);
rc = ecryptfs_read_lower(virt, offset, size, ecryptfs_inode);
if (rc > 0)
rc = 0;
kunmap(page_for_ecryptfs);
flush_dcache_page(page_for_ecryptfs);
return rc;
}
| gpl-2.0 |
luckasfb/android_kernel_iocean_x7 | drivers/net/wireless/b43/sdio.c | 5528 | 4746 | /*
* Broadcom B43 wireless driver
*
* SDIO over Sonics Silicon Backplane bus glue for b43.
*
* Copyright (C) 2009 Albert Herranz
* Copyright (C) 2009 Michael Buesch <m@bues.ch>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or (at
* your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/mmc/card.h>
#include <linux/mmc/sdio_func.h>
#include <linux/mmc/sdio_ids.h>
#include <linux/slab.h>
#include <linux/ssb/ssb.h>
#include "sdio.h"
#include "b43.h"
#define HNBU_CHIPID 0x01 /* vendor & device id */
#define B43_SDIO_BLOCK_SIZE 64 /* rx fifo max size in bytes */
static const struct b43_sdio_quirk {
u16 vendor;
u16 device;
unsigned int quirks;
} b43_sdio_quirks[] = {
{ 0x14E4, 0x4318, SSB_QUIRK_SDIO_READ_AFTER_WRITE32, },
{ },
};
static unsigned int b43_sdio_get_quirks(u16 vendor, u16 device)
{
const struct b43_sdio_quirk *q;
for (q = b43_sdio_quirks; q->quirks; q++) {
if (vendor == q->vendor && device == q->device)
return q->quirks;
}
return 0;
}
static void b43_sdio_interrupt_dispatcher(struct sdio_func *func)
{
struct b43_sdio *sdio = sdio_get_drvdata(func);
struct b43_wldev *dev = sdio->irq_handler_opaque;
if (unlikely(b43_status(dev) < B43_STAT_STARTED))
return;
sdio_release_host(func);
sdio->irq_handler(dev);
sdio_claim_host(func);
}
int b43_sdio_request_irq(struct b43_wldev *dev,
void (*handler)(struct b43_wldev *dev))
{
struct ssb_bus *bus = dev->dev->sdev->bus;
struct sdio_func *func = bus->host_sdio;
struct b43_sdio *sdio = sdio_get_drvdata(func);
int err;
sdio->irq_handler_opaque = dev;
sdio->irq_handler = handler;
sdio_claim_host(func);
err = sdio_claim_irq(func, b43_sdio_interrupt_dispatcher);
sdio_release_host(func);
return err;
}
void b43_sdio_free_irq(struct b43_wldev *dev)
{
struct ssb_bus *bus = dev->dev->sdev->bus;
struct sdio_func *func = bus->host_sdio;
struct b43_sdio *sdio = sdio_get_drvdata(func);
sdio_claim_host(func);
sdio_release_irq(func);
sdio_release_host(func);
sdio->irq_handler_opaque = NULL;
sdio->irq_handler = NULL;
}
static int __devinit b43_sdio_probe(struct sdio_func *func,
const struct sdio_device_id *id)
{
struct b43_sdio *sdio;
struct sdio_func_tuple *tuple;
u16 vendor = 0, device = 0;
int error;
/* Look for the card chip identifier. */
tuple = func->tuples;
while (tuple) {
switch (tuple->code) {
case 0x80:
switch (tuple->data[0]) {
case HNBU_CHIPID:
if (tuple->size != 5)
break;
vendor = tuple->data[1] | (tuple->data[2]<<8);
device = tuple->data[3] | (tuple->data[4]<<8);
dev_info(&func->dev, "Chip ID %04x:%04x\n",
vendor, device);
break;
default:
break;
}
break;
default:
break;
}
tuple = tuple->next;
}
if (!vendor || !device) {
error = -ENODEV;
goto out;
}
sdio_claim_host(func);
error = sdio_set_block_size(func, B43_SDIO_BLOCK_SIZE);
if (error) {
dev_err(&func->dev, "failed to set block size to %u bytes,"
" error %d\n", B43_SDIO_BLOCK_SIZE, error);
goto err_release_host;
}
error = sdio_enable_func(func);
if (error) {
dev_err(&func->dev, "failed to enable func, error %d\n", error);
goto err_release_host;
}
sdio_release_host(func);
sdio = kzalloc(sizeof(*sdio), GFP_KERNEL);
if (!sdio) {
error = -ENOMEM;
dev_err(&func->dev, "failed to allocate ssb bus\n");
goto err_disable_func;
}
error = ssb_bus_sdiobus_register(&sdio->ssb, func,
b43_sdio_get_quirks(vendor, device));
if (error) {
dev_err(&func->dev, "failed to register ssb sdio bus,"
" error %d\n", error);
goto err_free_ssb;
}
sdio_set_drvdata(func, sdio);
return 0;
err_free_ssb:
kfree(sdio);
err_disable_func:
sdio_claim_host(func);
sdio_disable_func(func);
err_release_host:
sdio_release_host(func);
out:
return error;
}
static void __devexit b43_sdio_remove(struct sdio_func *func)
{
struct b43_sdio *sdio = sdio_get_drvdata(func);
ssb_bus_unregister(&sdio->ssb);
sdio_claim_host(func);
sdio_disable_func(func);
sdio_release_host(func);
kfree(sdio);
sdio_set_drvdata(func, NULL);
}
static const struct sdio_device_id b43_sdio_ids[] = {
{ SDIO_DEVICE(0x02d0, 0x044b) }, /* Nintendo Wii WLAN daughter card */
{ SDIO_DEVICE(0x0092, 0x0004) }, /* C-guys, Inc. EW-CG1102GC */
{ },
};
static struct sdio_driver b43_sdio_driver = {
.name = "b43-sdio",
.id_table = b43_sdio_ids,
.probe = b43_sdio_probe,
.remove = b43_sdio_remove,
};
int b43_sdio_init(void)
{
return sdio_register_driver(&b43_sdio_driver);
}
void b43_sdio_exit(void)
{
sdio_unregister_driver(&b43_sdio_driver);
}
| gpl-2.0 |
Inventor1938/boeffla-kernel-cm-oneplus3 | drivers/dma/ipu/ipu_idmac.c | 409 | 46457 | /*
* Copyright (C) 2008
* Guennadi Liakhovetski, DENX Software Engineering, <lg@denx.de>
*
* Copyright (C) 2005-2007 Freescale Semiconductor, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/dma-mapping.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/err.h>
#include <linux/spinlock.h>
#include <linux/delay.h>
#include <linux/list.h>
#include <linux/clk.h>
#include <linux/vmalloc.h>
#include <linux/string.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/dma/ipu-dma.h>
#include "../dmaengine.h"
#include "ipu_intern.h"
#define FS_VF_IN_VALID 0x00000002
#define FS_ENC_IN_VALID 0x00000001
static int ipu_disable_channel(struct idmac *idmac, struct idmac_channel *ichan,
bool wait_for_stop);
/*
* There can be only one, we could allocate it dynamically, but then we'd have
* to add an extra parameter to some functions, and use something as ugly as
* struct ipu *ipu = to_ipu(to_idmac(ichan->dma_chan.device));
* in the ISR
*/
static struct ipu ipu_data;
#define to_ipu(id) container_of(id, struct ipu, idmac)
static u32 __idmac_read_icreg(struct ipu *ipu, unsigned long reg)
{
return __raw_readl(ipu->reg_ic + reg);
}
#define idmac_read_icreg(ipu, reg) __idmac_read_icreg(ipu, reg - IC_CONF)
static void __idmac_write_icreg(struct ipu *ipu, u32 value, unsigned long reg)
{
__raw_writel(value, ipu->reg_ic + reg);
}
#define idmac_write_icreg(ipu, v, reg) __idmac_write_icreg(ipu, v, reg - IC_CONF)
static u32 idmac_read_ipureg(struct ipu *ipu, unsigned long reg)
{
return __raw_readl(ipu->reg_ipu + reg);
}
static void idmac_write_ipureg(struct ipu *ipu, u32 value, unsigned long reg)
{
__raw_writel(value, ipu->reg_ipu + reg);
}
/*****************************************************************************
* IPU / IC common functions
*/
static void dump_idmac_reg(struct ipu *ipu)
{
dev_dbg(ipu->dev, "IDMAC_CONF 0x%x, IC_CONF 0x%x, IDMAC_CHA_EN 0x%x, "
"IDMAC_CHA_PRI 0x%x, IDMAC_CHA_BUSY 0x%x\n",
idmac_read_icreg(ipu, IDMAC_CONF),
idmac_read_icreg(ipu, IC_CONF),
idmac_read_icreg(ipu, IDMAC_CHA_EN),
idmac_read_icreg(ipu, IDMAC_CHA_PRI),
idmac_read_icreg(ipu, IDMAC_CHA_BUSY));
dev_dbg(ipu->dev, "BUF0_RDY 0x%x, BUF1_RDY 0x%x, CUR_BUF 0x%x, "
"DB_MODE 0x%x, TASKS_STAT 0x%x\n",
idmac_read_ipureg(ipu, IPU_CHA_BUF0_RDY),
idmac_read_ipureg(ipu, IPU_CHA_BUF1_RDY),
idmac_read_ipureg(ipu, IPU_CHA_CUR_BUF),
idmac_read_ipureg(ipu, IPU_CHA_DB_MODE_SEL),
idmac_read_ipureg(ipu, IPU_TASKS_STAT));
}
static uint32_t bytes_per_pixel(enum pixel_fmt fmt)
{
switch (fmt) {
case IPU_PIX_FMT_GENERIC: /* generic data */
case IPU_PIX_FMT_RGB332:
case IPU_PIX_FMT_YUV420P:
case IPU_PIX_FMT_YUV422P:
default:
return 1;
case IPU_PIX_FMT_RGB565:
case IPU_PIX_FMT_YUYV:
case IPU_PIX_FMT_UYVY:
return 2;
case IPU_PIX_FMT_BGR24:
case IPU_PIX_FMT_RGB24:
return 3;
case IPU_PIX_FMT_GENERIC_32: /* generic data */
case IPU_PIX_FMT_BGR32:
case IPU_PIX_FMT_RGB32:
case IPU_PIX_FMT_ABGR32:
return 4;
}
}
/* Enable direct write to memory by the Camera Sensor Interface */
static void ipu_ic_enable_task(struct ipu *ipu, enum ipu_channel channel)
{
uint32_t ic_conf, mask;
switch (channel) {
case IDMAC_IC_0:
mask = IC_CONF_PRPENC_EN;
break;
case IDMAC_IC_7:
mask = IC_CONF_RWS_EN | IC_CONF_PRPENC_EN;
break;
default:
return;
}
ic_conf = idmac_read_icreg(ipu, IC_CONF) | mask;
idmac_write_icreg(ipu, ic_conf, IC_CONF);
}
/* Called under spin_lock_irqsave(&ipu_data.lock) */
static void ipu_ic_disable_task(struct ipu *ipu, enum ipu_channel channel)
{
uint32_t ic_conf, mask;
switch (channel) {
case IDMAC_IC_0:
mask = IC_CONF_PRPENC_EN;
break;
case IDMAC_IC_7:
mask = IC_CONF_RWS_EN | IC_CONF_PRPENC_EN;
break;
default:
return;
}
ic_conf = idmac_read_icreg(ipu, IC_CONF) & ~mask;
idmac_write_icreg(ipu, ic_conf, IC_CONF);
}
static uint32_t ipu_channel_status(struct ipu *ipu, enum ipu_channel channel)
{
uint32_t stat = TASK_STAT_IDLE;
uint32_t task_stat_reg = idmac_read_ipureg(ipu, IPU_TASKS_STAT);
switch (channel) {
case IDMAC_IC_7:
stat = (task_stat_reg & TSTAT_CSI2MEM_MASK) >>
TSTAT_CSI2MEM_OFFSET;
break;
case IDMAC_IC_0:
case IDMAC_SDC_0:
case IDMAC_SDC_1:
default:
break;
}
return stat;
}
struct chan_param_mem_planar {
/* Word 0 */
u32 xv:10;
u32 yv:10;
u32 xb:12;
u32 yb:12;
u32 res1:2;
u32 nsb:1;
u32 lnpb:6;
u32 ubo_l:11;
u32 ubo_h:15;
u32 vbo_l:17;
u32 vbo_h:9;
u32 res2:3;
u32 fw:12;
u32 fh_l:8;
u32 fh_h:4;
u32 res3:28;
/* Word 1 */
u32 eba0;
u32 eba1;
u32 bpp:3;
u32 sl:14;
u32 pfs:3;
u32 bam:3;
u32 res4:2;
u32 npb:6;
u32 res5:1;
u32 sat:2;
u32 res6:30;
} __attribute__ ((packed));
struct chan_param_mem_interleaved {
/* Word 0 */
u32 xv:10;
u32 yv:10;
u32 xb:12;
u32 yb:12;
u32 sce:1;
u32 res1:1;
u32 nsb:1;
u32 lnpb:6;
u32 sx:10;
u32 sy_l:1;
u32 sy_h:9;
u32 ns:10;
u32 sm:10;
u32 sdx_l:3;
u32 sdx_h:2;
u32 sdy:5;
u32 sdrx:1;
u32 sdry:1;
u32 sdr1:1;
u32 res2:2;
u32 fw:12;
u32 fh_l:8;
u32 fh_h:4;
u32 res3:28;
/* Word 1 */
u32 eba0;
u32 eba1;
u32 bpp:3;
u32 sl:14;
u32 pfs:3;
u32 bam:3;
u32 res4:2;
u32 npb:6;
u32 res5:1;
u32 sat:2;
u32 scc:1;
u32 ofs0:5;
u32 ofs1:5;
u32 ofs2:5;
u32 ofs3:5;
u32 wid0:3;
u32 wid1:3;
u32 wid2:3;
u32 wid3:3;
u32 dec_sel:1;
u32 res6:28;
} __attribute__ ((packed));
union chan_param_mem {
struct chan_param_mem_planar pp;
struct chan_param_mem_interleaved ip;
};
static void ipu_ch_param_set_plane_offset(union chan_param_mem *params,
u32 u_offset, u32 v_offset)
{
params->pp.ubo_l = u_offset & 0x7ff;
params->pp.ubo_h = u_offset >> 11;
params->pp.vbo_l = v_offset & 0x1ffff;
params->pp.vbo_h = v_offset >> 17;
}
static void ipu_ch_param_set_size(union chan_param_mem *params,
uint32_t pixel_fmt, uint16_t width,
uint16_t height, uint16_t stride)
{
u32 u_offset;
u32 v_offset;
params->pp.fw = width - 1;
params->pp.fh_l = height - 1;
params->pp.fh_h = (height - 1) >> 8;
params->pp.sl = stride - 1;
switch (pixel_fmt) {
case IPU_PIX_FMT_GENERIC:
/*Represents 8-bit Generic data */
params->pp.bpp = 3;
params->pp.pfs = 7;
params->pp.npb = 31;
params->pp.sat = 2; /* SAT = use 32-bit access */
break;
case IPU_PIX_FMT_GENERIC_32:
/*Represents 32-bit Generic data */
params->pp.bpp = 0;
params->pp.pfs = 7;
params->pp.npb = 7;
params->pp.sat = 2; /* SAT = use 32-bit access */
break;
case IPU_PIX_FMT_RGB565:
params->ip.bpp = 2;
params->ip.pfs = 4;
params->ip.npb = 15;
params->ip.sat = 2; /* SAT = 32-bit access */
params->ip.ofs0 = 0; /* Red bit offset */
params->ip.ofs1 = 5; /* Green bit offset */
params->ip.ofs2 = 11; /* Blue bit offset */
params->ip.ofs3 = 16; /* Alpha bit offset */
params->ip.wid0 = 4; /* Red bit width - 1 */
params->ip.wid1 = 5; /* Green bit width - 1 */
params->ip.wid2 = 4; /* Blue bit width - 1 */
break;
case IPU_PIX_FMT_BGR24:
params->ip.bpp = 1; /* 24 BPP & RGB PFS */
params->ip.pfs = 4;
params->ip.npb = 7;
params->ip.sat = 2; /* SAT = 32-bit access */
params->ip.ofs0 = 0; /* Red bit offset */
params->ip.ofs1 = 8; /* Green bit offset */
params->ip.ofs2 = 16; /* Blue bit offset */
params->ip.ofs3 = 24; /* Alpha bit offset */
params->ip.wid0 = 7; /* Red bit width - 1 */
params->ip.wid1 = 7; /* Green bit width - 1 */
params->ip.wid2 = 7; /* Blue bit width - 1 */
break;
case IPU_PIX_FMT_RGB24:
params->ip.bpp = 1; /* 24 BPP & RGB PFS */
params->ip.pfs = 4;
params->ip.npb = 7;
params->ip.sat = 2; /* SAT = 32-bit access */
params->ip.ofs0 = 16; /* Red bit offset */
params->ip.ofs1 = 8; /* Green bit offset */
params->ip.ofs2 = 0; /* Blue bit offset */
params->ip.ofs3 = 24; /* Alpha bit offset */
params->ip.wid0 = 7; /* Red bit width - 1 */
params->ip.wid1 = 7; /* Green bit width - 1 */
params->ip.wid2 = 7; /* Blue bit width - 1 */
break;
case IPU_PIX_FMT_BGRA32:
case IPU_PIX_FMT_BGR32:
case IPU_PIX_FMT_ABGR32:
params->ip.bpp = 0;
params->ip.pfs = 4;
params->ip.npb = 7;
params->ip.sat = 2; /* SAT = 32-bit access */
params->ip.ofs0 = 8; /* Red bit offset */
params->ip.ofs1 = 16; /* Green bit offset */
params->ip.ofs2 = 24; /* Blue bit offset */
params->ip.ofs3 = 0; /* Alpha bit offset */
params->ip.wid0 = 7; /* Red bit width - 1 */
params->ip.wid1 = 7; /* Green bit width - 1 */
params->ip.wid2 = 7; /* Blue bit width - 1 */
params->ip.wid3 = 7; /* Alpha bit width - 1 */
break;
case IPU_PIX_FMT_RGBA32:
case IPU_PIX_FMT_RGB32:
params->ip.bpp = 0;
params->ip.pfs = 4;
params->ip.npb = 7;
params->ip.sat = 2; /* SAT = 32-bit access */
params->ip.ofs0 = 24; /* Red bit offset */
params->ip.ofs1 = 16; /* Green bit offset */
params->ip.ofs2 = 8; /* Blue bit offset */
params->ip.ofs3 = 0; /* Alpha bit offset */
params->ip.wid0 = 7; /* Red bit width - 1 */
params->ip.wid1 = 7; /* Green bit width - 1 */
params->ip.wid2 = 7; /* Blue bit width - 1 */
params->ip.wid3 = 7; /* Alpha bit width - 1 */
break;
case IPU_PIX_FMT_UYVY:
params->ip.bpp = 2;
params->ip.pfs = 6;
params->ip.npb = 7;
params->ip.sat = 2; /* SAT = 32-bit access */
break;
case IPU_PIX_FMT_YUV420P2:
case IPU_PIX_FMT_YUV420P:
params->ip.bpp = 3;
params->ip.pfs = 3;
params->ip.npb = 7;
params->ip.sat = 2; /* SAT = 32-bit access */
u_offset = stride * height;
v_offset = u_offset + u_offset / 4;
ipu_ch_param_set_plane_offset(params, u_offset, v_offset);
break;
case IPU_PIX_FMT_YVU422P:
params->ip.bpp = 3;
params->ip.pfs = 2;
params->ip.npb = 7;
params->ip.sat = 2; /* SAT = 32-bit access */
v_offset = stride * height;
u_offset = v_offset + v_offset / 2;
ipu_ch_param_set_plane_offset(params, u_offset, v_offset);
break;
case IPU_PIX_FMT_YUV422P:
params->ip.bpp = 3;
params->ip.pfs = 2;
params->ip.npb = 7;
params->ip.sat = 2; /* SAT = 32-bit access */
u_offset = stride * height;
v_offset = u_offset + u_offset / 2;
ipu_ch_param_set_plane_offset(params, u_offset, v_offset);
break;
default:
dev_err(ipu_data.dev,
"mx3 ipu: unimplemented pixel format %d\n", pixel_fmt);
break;
}
params->pp.nsb = 1;
}
static void ipu_ch_param_set_buffer(union chan_param_mem *params,
dma_addr_t buf0, dma_addr_t buf1)
{
params->pp.eba0 = buf0;
params->pp.eba1 = buf1;
}
static void ipu_ch_param_set_rotation(union chan_param_mem *params,
enum ipu_rotate_mode rotate)
{
params->pp.bam = rotate;
}
static void ipu_write_param_mem(uint32_t addr, uint32_t *data,
uint32_t num_words)
{
for (; num_words > 0; num_words--) {
dev_dbg(ipu_data.dev,
"write param mem - addr = 0x%08X, data = 0x%08X\n",
addr, *data);
idmac_write_ipureg(&ipu_data, addr, IPU_IMA_ADDR);
idmac_write_ipureg(&ipu_data, *data++, IPU_IMA_DATA);
addr++;
if ((addr & 0x7) == 5) {
addr &= ~0x7; /* set to word 0 */
addr += 8; /* increment to next row */
}
}
}
static int calc_resize_coeffs(uint32_t in_size, uint32_t out_size,
uint32_t *resize_coeff,
uint32_t *downsize_coeff)
{
uint32_t temp_size;
uint32_t temp_downsize;
*resize_coeff = 1 << 13;
*downsize_coeff = 1 << 13;
/* Cannot downsize more than 8:1 */
if (out_size << 3 < in_size)
return -EINVAL;
/* compute downsizing coefficient */
temp_downsize = 0;
temp_size = in_size;
while (temp_size >= out_size * 2 && temp_downsize < 2) {
temp_size >>= 1;
temp_downsize++;
}
*downsize_coeff = temp_downsize;
/*
* compute resizing coefficient using the following formula:
* resize_coeff = M*(SI -1)/(SO - 1)
* where M = 2^13, SI - input size, SO - output size
*/
*resize_coeff = (8192L * (temp_size - 1)) / (out_size - 1);
if (*resize_coeff >= 16384L) {
dev_err(ipu_data.dev, "Warning! Overflow on resize coeff.\n");
*resize_coeff = 0x3FFF;
}
dev_dbg(ipu_data.dev, "resizing from %u -> %u pixels, "
"downsize=%u, resize=%u.%lu (reg=%u)\n", in_size, out_size,
*downsize_coeff, *resize_coeff >= 8192L ? 1 : 0,
((*resize_coeff & 0x1FFF) * 10000L) / 8192L, *resize_coeff);
return 0;
}
static enum ipu_color_space format_to_colorspace(enum pixel_fmt fmt)
{
switch (fmt) {
case IPU_PIX_FMT_RGB565:
case IPU_PIX_FMT_BGR24:
case IPU_PIX_FMT_RGB24:
case IPU_PIX_FMT_BGR32:
case IPU_PIX_FMT_RGB32:
return IPU_COLORSPACE_RGB;
default:
return IPU_COLORSPACE_YCBCR;
}
}
static int ipu_ic_init_prpenc(struct ipu *ipu,
union ipu_channel_param *params, bool src_is_csi)
{
uint32_t reg, ic_conf;
uint32_t downsize_coeff, resize_coeff;
enum ipu_color_space in_fmt, out_fmt;
/* Setup vertical resizing */
calc_resize_coeffs(params->video.in_height,
params->video.out_height,
&resize_coeff, &downsize_coeff);
reg = (downsize_coeff << 30) | (resize_coeff << 16);
/* Setup horizontal resizing */
calc_resize_coeffs(params->video.in_width,
params->video.out_width,
&resize_coeff, &downsize_coeff);
reg |= (downsize_coeff << 14) | resize_coeff;
/* Setup color space conversion */
in_fmt = format_to_colorspace(params->video.in_pixel_fmt);
out_fmt = format_to_colorspace(params->video.out_pixel_fmt);
/*
* Colourspace conversion unsupported yet - see _init_csc() in
* Freescale sources
*/
if (in_fmt != out_fmt) {
dev_err(ipu->dev, "Colourspace conversion unsupported!\n");
return -EOPNOTSUPP;
}
idmac_write_icreg(ipu, reg, IC_PRP_ENC_RSC);
ic_conf = idmac_read_icreg(ipu, IC_CONF);
if (src_is_csi)
ic_conf &= ~IC_CONF_RWS_EN;
else
ic_conf |= IC_CONF_RWS_EN;
idmac_write_icreg(ipu, ic_conf, IC_CONF);
return 0;
}
static uint32_t dma_param_addr(uint32_t dma_ch)
{
/* Channel Parameter Memory */
return 0x10000 | (dma_ch << 4);
}
static void ipu_channel_set_priority(struct ipu *ipu, enum ipu_channel channel,
bool prio)
{
u32 reg = idmac_read_icreg(ipu, IDMAC_CHA_PRI);
if (prio)
reg |= 1UL << channel;
else
reg &= ~(1UL << channel);
idmac_write_icreg(ipu, reg, IDMAC_CHA_PRI);
dump_idmac_reg(ipu);
}
static uint32_t ipu_channel_conf_mask(enum ipu_channel channel)
{
uint32_t mask;
switch (channel) {
case IDMAC_IC_0:
case IDMAC_IC_7:
mask = IPU_CONF_CSI_EN | IPU_CONF_IC_EN;
break;
case IDMAC_SDC_0:
case IDMAC_SDC_1:
mask = IPU_CONF_SDC_EN | IPU_CONF_DI_EN;
break;
default:
mask = 0;
break;
}
return mask;
}
/**
* ipu_enable_channel() - enable an IPU channel.
* @idmac: IPU DMAC context.
* @ichan: IDMAC channel.
* @return: 0 on success or negative error code on failure.
*/
static int ipu_enable_channel(struct idmac *idmac, struct idmac_channel *ichan)
{
struct ipu *ipu = to_ipu(idmac);
enum ipu_channel channel = ichan->dma_chan.chan_id;
uint32_t reg;
unsigned long flags;
spin_lock_irqsave(&ipu->lock, flags);
/* Reset to buffer 0 */
idmac_write_ipureg(ipu, 1UL << channel, IPU_CHA_CUR_BUF);
ichan->active_buffer = 0;
ichan->status = IPU_CHANNEL_ENABLED;
switch (channel) {
case IDMAC_SDC_0:
case IDMAC_SDC_1:
case IDMAC_IC_7:
ipu_channel_set_priority(ipu, channel, true);
default:
break;
}
reg = idmac_read_icreg(ipu, IDMAC_CHA_EN);
idmac_write_icreg(ipu, reg | (1UL << channel), IDMAC_CHA_EN);
ipu_ic_enable_task(ipu, channel);
spin_unlock_irqrestore(&ipu->lock, flags);
return 0;
}
/**
* ipu_init_channel_buffer() - initialize a buffer for logical IPU channel.
* @ichan: IDMAC channel.
* @pixel_fmt: pixel format of buffer. Pixel format is a FOURCC ASCII code.
* @width: width of buffer in pixels.
* @height: height of buffer in pixels.
* @stride: stride length of buffer in pixels.
* @rot_mode: rotation mode of buffer. A rotation setting other than
* IPU_ROTATE_VERT_FLIP should only be used for input buffers of
* rotation channels.
* @phyaddr_0: buffer 0 physical address.
* @phyaddr_1: buffer 1 physical address. Setting this to a value other than
* NULL enables double buffering mode.
* @return: 0 on success or negative error code on failure.
*/
static int ipu_init_channel_buffer(struct idmac_channel *ichan,
enum pixel_fmt pixel_fmt,
uint16_t width, uint16_t height,
uint32_t stride,
enum ipu_rotate_mode rot_mode,
dma_addr_t phyaddr_0, dma_addr_t phyaddr_1)
{
enum ipu_channel channel = ichan->dma_chan.chan_id;
struct idmac *idmac = to_idmac(ichan->dma_chan.device);
struct ipu *ipu = to_ipu(idmac);
union chan_param_mem params = {};
unsigned long flags;
uint32_t reg;
uint32_t stride_bytes;
stride_bytes = stride * bytes_per_pixel(pixel_fmt);
if (stride_bytes % 4) {
dev_err(ipu->dev,
"Stride length must be 32-bit aligned, stride = %d, bytes = %d\n",
stride, stride_bytes);
return -EINVAL;
}
/* IC channel's stride must be a multiple of 8 pixels */
if ((channel <= IDMAC_IC_13) && (stride % 8)) {
dev_err(ipu->dev, "Stride must be 8 pixel multiple\n");
return -EINVAL;
}
/* Build parameter memory data for DMA channel */
ipu_ch_param_set_size(¶ms, pixel_fmt, width, height, stride_bytes);
ipu_ch_param_set_buffer(¶ms, phyaddr_0, phyaddr_1);
ipu_ch_param_set_rotation(¶ms, rot_mode);
spin_lock_irqsave(&ipu->lock, flags);
ipu_write_param_mem(dma_param_addr(channel), (uint32_t *)¶ms, 10);
reg = idmac_read_ipureg(ipu, IPU_CHA_DB_MODE_SEL);
if (phyaddr_1)
reg |= 1UL << channel;
else
reg &= ~(1UL << channel);
idmac_write_ipureg(ipu, reg, IPU_CHA_DB_MODE_SEL);
ichan->status = IPU_CHANNEL_READY;
spin_unlock_irqrestore(&ipu->lock, flags);
return 0;
}
/**
* ipu_select_buffer() - mark a channel's buffer as ready.
* @channel: channel ID.
* @buffer_n: buffer number to mark ready.
*/
static void ipu_select_buffer(enum ipu_channel channel, int buffer_n)
{
/* No locking - this is a write-one-to-set register, cleared by IPU */
if (buffer_n == 0)
/* Mark buffer 0 as ready. */
idmac_write_ipureg(&ipu_data, 1UL << channel, IPU_CHA_BUF0_RDY);
else
/* Mark buffer 1 as ready. */
idmac_write_ipureg(&ipu_data, 1UL << channel, IPU_CHA_BUF1_RDY);
}
/**
* ipu_update_channel_buffer() - update physical address of a channel buffer.
* @ichan: IDMAC channel.
* @buffer_n: buffer number to update.
* 0 or 1 are the only valid values.
* @phyaddr: buffer physical address.
*/
/* Called under spin_lock(_irqsave)(&ichan->lock) */
static void ipu_update_channel_buffer(struct idmac_channel *ichan,
int buffer_n, dma_addr_t phyaddr)
{
enum ipu_channel channel = ichan->dma_chan.chan_id;
uint32_t reg;
unsigned long flags;
spin_lock_irqsave(&ipu_data.lock, flags);
if (buffer_n == 0) {
reg = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF0_RDY);
if (reg & (1UL << channel)) {
ipu_ic_disable_task(&ipu_data, channel);
ichan->status = IPU_CHANNEL_READY;
}
/* 44.3.3.1.9 - Row Number 1 (WORD1, offset 0) */
idmac_write_ipureg(&ipu_data, dma_param_addr(channel) +
0x0008UL, IPU_IMA_ADDR);
idmac_write_ipureg(&ipu_data, phyaddr, IPU_IMA_DATA);
} else {
reg = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF1_RDY);
if (reg & (1UL << channel)) {
ipu_ic_disable_task(&ipu_data, channel);
ichan->status = IPU_CHANNEL_READY;
}
/* Check if double-buffering is already enabled */
reg = idmac_read_ipureg(&ipu_data, IPU_CHA_DB_MODE_SEL);
if (!(reg & (1UL << channel)))
idmac_write_ipureg(&ipu_data, reg | (1UL << channel),
IPU_CHA_DB_MODE_SEL);
/* 44.3.3.1.9 - Row Number 1 (WORD1, offset 1) */
idmac_write_ipureg(&ipu_data, dma_param_addr(channel) +
0x0009UL, IPU_IMA_ADDR);
idmac_write_ipureg(&ipu_data, phyaddr, IPU_IMA_DATA);
}
spin_unlock_irqrestore(&ipu_data.lock, flags);
}
/* Called under spin_lock_irqsave(&ichan->lock) */
static int ipu_submit_buffer(struct idmac_channel *ichan,
struct idmac_tx_desc *desc, struct scatterlist *sg, int buf_idx)
{
unsigned int chan_id = ichan->dma_chan.chan_id;
struct device *dev = &ichan->dma_chan.dev->device;
if (async_tx_test_ack(&desc->txd))
return -EINTR;
/*
* On first invocation this shouldn't be necessary, the call to
* ipu_init_channel_buffer() above will set addresses for us, so we
* could make it conditional on status >= IPU_CHANNEL_ENABLED, but
* doing it again shouldn't hurt either.
*/
ipu_update_channel_buffer(ichan, buf_idx, sg_dma_address(sg));
ipu_select_buffer(chan_id, buf_idx);
dev_dbg(dev, "Updated sg %p on channel 0x%x buffer %d\n",
sg, chan_id, buf_idx);
return 0;
}
/* Called under spin_lock_irqsave(&ichan->lock) */
static int ipu_submit_channel_buffers(struct idmac_channel *ichan,
struct idmac_tx_desc *desc)
{
struct scatterlist *sg;
int i, ret = 0;
for (i = 0, sg = desc->sg; i < 2 && sg; i++) {
if (!ichan->sg[i]) {
ichan->sg[i] = sg;
ret = ipu_submit_buffer(ichan, desc, sg, i);
if (ret < 0)
return ret;
sg = sg_next(sg);
}
}
return ret;
}
static dma_cookie_t idmac_tx_submit(struct dma_async_tx_descriptor *tx)
{
struct idmac_tx_desc *desc = to_tx_desc(tx);
struct idmac_channel *ichan = to_idmac_chan(tx->chan);
struct idmac *idmac = to_idmac(tx->chan->device);
struct ipu *ipu = to_ipu(idmac);
struct device *dev = &ichan->dma_chan.dev->device;
dma_cookie_t cookie;
unsigned long flags;
int ret;
/* Sanity check */
if (!list_empty(&desc->list)) {
/* The descriptor doesn't belong to client */
dev_err(dev, "Descriptor %p not prepared!\n", tx);
return -EBUSY;
}
mutex_lock(&ichan->chan_mutex);
async_tx_clear_ack(tx);
if (ichan->status < IPU_CHANNEL_READY) {
struct idmac_video_param *video = &ichan->params.video;
/*
* Initial buffer assignment - the first two sg-entries from
* the descriptor will end up in the IDMAC buffers
*/
dma_addr_t dma_1 = sg_is_last(desc->sg) ? 0 :
sg_dma_address(&desc->sg[1]);
WARN_ON(ichan->sg[0] || ichan->sg[1]);
cookie = ipu_init_channel_buffer(ichan,
video->out_pixel_fmt,
video->out_width,
video->out_height,
video->out_stride,
IPU_ROTATE_NONE,
sg_dma_address(&desc->sg[0]),
dma_1);
if (cookie < 0)
goto out;
}
dev_dbg(dev, "Submitting sg %p\n", &desc->sg[0]);
cookie = dma_cookie_assign(tx);
/* ipu->lock can be taken under ichan->lock, but not v.v. */
spin_lock_irqsave(&ichan->lock, flags);
list_add_tail(&desc->list, &ichan->queue);
/* submit_buffers() atomically verifies and fills empty sg slots */
ret = ipu_submit_channel_buffers(ichan, desc);
spin_unlock_irqrestore(&ichan->lock, flags);
if (ret < 0) {
cookie = ret;
goto dequeue;
}
if (ichan->status < IPU_CHANNEL_ENABLED) {
ret = ipu_enable_channel(idmac, ichan);
if (ret < 0) {
cookie = ret;
goto dequeue;
}
}
dump_idmac_reg(ipu);
dequeue:
if (cookie < 0) {
spin_lock_irqsave(&ichan->lock, flags);
list_del_init(&desc->list);
spin_unlock_irqrestore(&ichan->lock, flags);
tx->cookie = cookie;
ichan->dma_chan.cookie = cookie;
}
out:
mutex_unlock(&ichan->chan_mutex);
return cookie;
}
/* Called with ichan->chan_mutex held */
static int idmac_desc_alloc(struct idmac_channel *ichan, int n)
{
struct idmac_tx_desc *desc = vmalloc(n * sizeof(struct idmac_tx_desc));
struct idmac *idmac = to_idmac(ichan->dma_chan.device);
if (!desc)
return -ENOMEM;
/* No interrupts, just disable the tasklet for a moment */
tasklet_disable(&to_ipu(idmac)->tasklet);
ichan->n_tx_desc = n;
ichan->desc = desc;
INIT_LIST_HEAD(&ichan->queue);
INIT_LIST_HEAD(&ichan->free_list);
while (n--) {
struct dma_async_tx_descriptor *txd = &desc->txd;
memset(txd, 0, sizeof(*txd));
dma_async_tx_descriptor_init(txd, &ichan->dma_chan);
txd->tx_submit = idmac_tx_submit;
list_add(&desc->list, &ichan->free_list);
desc++;
}
tasklet_enable(&to_ipu(idmac)->tasklet);
return 0;
}
/**
* ipu_init_channel() - initialize an IPU channel.
* @idmac: IPU DMAC context.
* @ichan: pointer to the channel object.
* @return 0 on success or negative error code on failure.
*/
static int ipu_init_channel(struct idmac *idmac, struct idmac_channel *ichan)
{
union ipu_channel_param *params = &ichan->params;
uint32_t ipu_conf;
enum ipu_channel channel = ichan->dma_chan.chan_id;
unsigned long flags;
uint32_t reg;
struct ipu *ipu = to_ipu(idmac);
int ret = 0, n_desc = 0;
dev_dbg(ipu->dev, "init channel = %d\n", channel);
if (channel != IDMAC_SDC_0 && channel != IDMAC_SDC_1 &&
channel != IDMAC_IC_7)
return -EINVAL;
spin_lock_irqsave(&ipu->lock, flags);
switch (channel) {
case IDMAC_IC_7:
n_desc = 16;
reg = idmac_read_icreg(ipu, IC_CONF);
idmac_write_icreg(ipu, reg & ~IC_CONF_CSI_MEM_WR_EN, IC_CONF);
break;
case IDMAC_IC_0:
n_desc = 16;
reg = idmac_read_ipureg(ipu, IPU_FS_PROC_FLOW);
idmac_write_ipureg(ipu, reg & ~FS_ENC_IN_VALID, IPU_FS_PROC_FLOW);
ret = ipu_ic_init_prpenc(ipu, params, true);
break;
case IDMAC_SDC_0:
case IDMAC_SDC_1:
n_desc = 4;
default:
break;
}
ipu->channel_init_mask |= 1L << channel;
/* Enable IPU sub module */
ipu_conf = idmac_read_ipureg(ipu, IPU_CONF) |
ipu_channel_conf_mask(channel);
idmac_write_ipureg(ipu, ipu_conf, IPU_CONF);
spin_unlock_irqrestore(&ipu->lock, flags);
if (n_desc && !ichan->desc)
ret = idmac_desc_alloc(ichan, n_desc);
dump_idmac_reg(ipu);
return ret;
}
/**
* ipu_uninit_channel() - uninitialize an IPU channel.
* @idmac: IPU DMAC context.
* @ichan: pointer to the channel object.
*/
static void ipu_uninit_channel(struct idmac *idmac, struct idmac_channel *ichan)
{
enum ipu_channel channel = ichan->dma_chan.chan_id;
unsigned long flags;
uint32_t reg;
unsigned long chan_mask = 1UL << channel;
uint32_t ipu_conf;
struct ipu *ipu = to_ipu(idmac);
spin_lock_irqsave(&ipu->lock, flags);
if (!(ipu->channel_init_mask & chan_mask)) {
dev_err(ipu->dev, "Channel already uninitialized %d\n",
channel);
spin_unlock_irqrestore(&ipu->lock, flags);
return;
}
/* Reset the double buffer */
reg = idmac_read_ipureg(ipu, IPU_CHA_DB_MODE_SEL);
idmac_write_ipureg(ipu, reg & ~chan_mask, IPU_CHA_DB_MODE_SEL);
ichan->sec_chan_en = false;
switch (channel) {
case IDMAC_IC_7:
reg = idmac_read_icreg(ipu, IC_CONF);
idmac_write_icreg(ipu, reg & ~(IC_CONF_RWS_EN | IC_CONF_PRPENC_EN),
IC_CONF);
break;
case IDMAC_IC_0:
reg = idmac_read_icreg(ipu, IC_CONF);
idmac_write_icreg(ipu, reg & ~(IC_CONF_PRPENC_EN | IC_CONF_PRPENC_CSC1),
IC_CONF);
break;
case IDMAC_SDC_0:
case IDMAC_SDC_1:
default:
break;
}
ipu->channel_init_mask &= ~(1L << channel);
ipu_conf = idmac_read_ipureg(ipu, IPU_CONF) &
~ipu_channel_conf_mask(channel);
idmac_write_ipureg(ipu, ipu_conf, IPU_CONF);
spin_unlock_irqrestore(&ipu->lock, flags);
ichan->n_tx_desc = 0;
vfree(ichan->desc);
ichan->desc = NULL;
}
/**
* ipu_disable_channel() - disable an IPU channel.
* @idmac: IPU DMAC context.
* @ichan: channel object pointer.
* @wait_for_stop: flag to set whether to wait for channel end of frame or
* return immediately.
* @return: 0 on success or negative error code on failure.
*/
static int ipu_disable_channel(struct idmac *idmac, struct idmac_channel *ichan,
bool wait_for_stop)
{
enum ipu_channel channel = ichan->dma_chan.chan_id;
struct ipu *ipu = to_ipu(idmac);
uint32_t reg;
unsigned long flags;
unsigned long chan_mask = 1UL << channel;
unsigned int timeout;
if (wait_for_stop && channel != IDMAC_SDC_1 && channel != IDMAC_SDC_0) {
timeout = 40;
/* This waiting always fails. Related to spurious irq problem */
while ((idmac_read_icreg(ipu, IDMAC_CHA_BUSY) & chan_mask) ||
(ipu_channel_status(ipu, channel) == TASK_STAT_ACTIVE)) {
timeout--;
msleep(10);
if (!timeout) {
dev_dbg(ipu->dev,
"Warning: timeout waiting for channel %u to "
"stop: buf0_rdy = 0x%08X, buf1_rdy = 0x%08X, "
"busy = 0x%08X, tstat = 0x%08X\n", channel,
idmac_read_ipureg(ipu, IPU_CHA_BUF0_RDY),
idmac_read_ipureg(ipu, IPU_CHA_BUF1_RDY),
idmac_read_icreg(ipu, IDMAC_CHA_BUSY),
idmac_read_ipureg(ipu, IPU_TASKS_STAT));
break;
}
}
dev_dbg(ipu->dev, "timeout = %d * 10ms\n", 40 - timeout);
}
/* SDC BG and FG must be disabled before DMA is disabled */
if (wait_for_stop && (channel == IDMAC_SDC_0 ||
channel == IDMAC_SDC_1)) {
for (timeout = 5;
timeout && !ipu_irq_status(ichan->eof_irq); timeout--)
msleep(5);
}
spin_lock_irqsave(&ipu->lock, flags);
/* Disable IC task */
ipu_ic_disable_task(ipu, channel);
/* Disable DMA channel(s) */
reg = idmac_read_icreg(ipu, IDMAC_CHA_EN);
idmac_write_icreg(ipu, reg & ~chan_mask, IDMAC_CHA_EN);
spin_unlock_irqrestore(&ipu->lock, flags);
return 0;
}
static struct scatterlist *idmac_sg_next(struct idmac_channel *ichan,
struct idmac_tx_desc **desc, struct scatterlist *sg)
{
struct scatterlist *sgnew = sg ? sg_next(sg) : NULL;
if (sgnew)
/* next sg-element in this list */
return sgnew;
if ((*desc)->list.next == &ichan->queue)
/* No more descriptors on the queue */
return NULL;
/* Fetch next descriptor */
*desc = list_entry((*desc)->list.next, struct idmac_tx_desc, list);
return (*desc)->sg;
}
/*
* We have several possibilities here:
* current BUF next BUF
*
* not last sg next not last sg
* not last sg next last sg
* last sg first sg from next descriptor
* last sg NULL
*
* Besides, the descriptor queue might be empty or not. We process all these
* cases carefully.
*/
static irqreturn_t idmac_interrupt(int irq, void *dev_id)
{
struct idmac_channel *ichan = dev_id;
struct device *dev = &ichan->dma_chan.dev->device;
unsigned int chan_id = ichan->dma_chan.chan_id;
struct scatterlist **sg, *sgnext, *sgnew = NULL;
/* Next transfer descriptor */
struct idmac_tx_desc *desc, *descnew;
dma_async_tx_callback callback;
void *callback_param;
bool done = false;
u32 ready0, ready1, curbuf, err;
unsigned long flags;
/* IDMAC has cleared the respective BUFx_RDY bit, we manage the buffer */
dev_dbg(dev, "IDMAC irq %d, buf %d\n", irq, ichan->active_buffer);
spin_lock_irqsave(&ipu_data.lock, flags);
ready0 = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF0_RDY);
ready1 = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF1_RDY);
curbuf = idmac_read_ipureg(&ipu_data, IPU_CHA_CUR_BUF);
err = idmac_read_ipureg(&ipu_data, IPU_INT_STAT_4);
if (err & (1 << chan_id)) {
idmac_write_ipureg(&ipu_data, 1 << chan_id, IPU_INT_STAT_4);
spin_unlock_irqrestore(&ipu_data.lock, flags);
/*
* Doing this
* ichan->sg[0] = ichan->sg[1] = NULL;
* you can force channel re-enable on the next tx_submit(), but
* this is dirty - think about descriptors with multiple
* sg elements.
*/
dev_warn(dev, "NFB4EOF on channel %d, ready %x, %x, cur %x\n",
chan_id, ready0, ready1, curbuf);
return IRQ_HANDLED;
}
spin_unlock_irqrestore(&ipu_data.lock, flags);
/* Other interrupts do not interfere with this channel */
spin_lock(&ichan->lock);
if (unlikely((ichan->active_buffer && (ready1 >> chan_id) & 1) ||
(!ichan->active_buffer && (ready0 >> chan_id) & 1)
)) {
spin_unlock(&ichan->lock);
dev_dbg(dev,
"IRQ with active buffer still ready on channel %x, "
"active %d, ready %x, %x!\n", chan_id,
ichan->active_buffer, ready0, ready1);
return IRQ_NONE;
}
if (unlikely(list_empty(&ichan->queue))) {
ichan->sg[ichan->active_buffer] = NULL;
spin_unlock(&ichan->lock);
dev_err(dev,
"IRQ without queued buffers on channel %x, active %d, "
"ready %x, %x!\n", chan_id,
ichan->active_buffer, ready0, ready1);
return IRQ_NONE;
}
/*
* active_buffer is a software flag, it shows which buffer we are
* currently expecting back from the hardware, IDMAC should be
* processing the other buffer already
*/
sg = &ichan->sg[ichan->active_buffer];
sgnext = ichan->sg[!ichan->active_buffer];
if (!*sg) {
spin_unlock(&ichan->lock);
return IRQ_HANDLED;
}
desc = list_entry(ichan->queue.next, struct idmac_tx_desc, list);
descnew = desc;
dev_dbg(dev, "IDMAC irq %d, dma %#llx, next dma %#llx, current %d, curbuf %#x\n",
irq, (u64)sg_dma_address(*sg),
sgnext ? (u64)sg_dma_address(sgnext) : 0,
ichan->active_buffer, curbuf);
/* Find the descriptor of sgnext */
sgnew = idmac_sg_next(ichan, &descnew, *sg);
if (sgnext != sgnew)
dev_err(dev, "Submitted buffer %p, next buffer %p\n", sgnext, sgnew);
/*
* if sgnext == NULL sg must be the last element in a scatterlist and
* queue must be empty
*/
if (unlikely(!sgnext)) {
if (!WARN_ON(sg_next(*sg)))
dev_dbg(dev, "Underrun on channel %x\n", chan_id);
ichan->sg[!ichan->active_buffer] = sgnew;
if (unlikely(sgnew)) {
ipu_submit_buffer(ichan, descnew, sgnew, !ichan->active_buffer);
} else {
spin_lock_irqsave(&ipu_data.lock, flags);
ipu_ic_disable_task(&ipu_data, chan_id);
spin_unlock_irqrestore(&ipu_data.lock, flags);
ichan->status = IPU_CHANNEL_READY;
/* Continue to check for complete descriptor */
}
}
/* Calculate and submit the next sg element */
sgnew = idmac_sg_next(ichan, &descnew, sgnew);
if (unlikely(!sg_next(*sg)) || !sgnext) {
/*
* Last element in scatterlist done, remove from the queue,
* _init for debugging
*/
list_del_init(&desc->list);
done = true;
}
*sg = sgnew;
if (likely(sgnew) &&
ipu_submit_buffer(ichan, descnew, sgnew, ichan->active_buffer) < 0) {
callback = descnew->txd.callback;
callback_param = descnew->txd.callback_param;
list_del_init(&descnew->list);
spin_unlock(&ichan->lock);
if (callback)
callback(callback_param);
spin_lock(&ichan->lock);
}
/* Flip the active buffer - even if update above failed */
ichan->active_buffer = !ichan->active_buffer;
if (done)
dma_cookie_complete(&desc->txd);
callback = desc->txd.callback;
callback_param = desc->txd.callback_param;
spin_unlock(&ichan->lock);
if (done && (desc->txd.flags & DMA_PREP_INTERRUPT) && callback)
callback(callback_param);
return IRQ_HANDLED;
}
static void ipu_gc_tasklet(unsigned long arg)
{
struct ipu *ipu = (struct ipu *)arg;
int i;
for (i = 0; i < IPU_CHANNELS_NUM; i++) {
struct idmac_channel *ichan = ipu->channel + i;
struct idmac_tx_desc *desc;
unsigned long flags;
struct scatterlist *sg;
int j, k;
for (j = 0; j < ichan->n_tx_desc; j++) {
desc = ichan->desc + j;
spin_lock_irqsave(&ichan->lock, flags);
if (async_tx_test_ack(&desc->txd)) {
list_move(&desc->list, &ichan->free_list);
for_each_sg(desc->sg, sg, desc->sg_len, k) {
if (ichan->sg[0] == sg)
ichan->sg[0] = NULL;
else if (ichan->sg[1] == sg)
ichan->sg[1] = NULL;
}
async_tx_clear_ack(&desc->txd);
}
spin_unlock_irqrestore(&ichan->lock, flags);
}
}
}
/* Allocate and initialise a transfer descriptor. */
static struct dma_async_tx_descriptor *idmac_prep_slave_sg(struct dma_chan *chan,
struct scatterlist *sgl, unsigned int sg_len,
enum dma_transfer_direction direction, unsigned long tx_flags,
void *context)
{
struct idmac_channel *ichan = to_idmac_chan(chan);
struct idmac_tx_desc *desc = NULL;
struct dma_async_tx_descriptor *txd = NULL;
unsigned long flags;
/* We only can handle these three channels so far */
if (chan->chan_id != IDMAC_SDC_0 && chan->chan_id != IDMAC_SDC_1 &&
chan->chan_id != IDMAC_IC_7)
return NULL;
if (!is_slave_direction(direction)) {
dev_err(chan->device->dev, "Invalid DMA direction %d!\n", direction);
return NULL;
}
mutex_lock(&ichan->chan_mutex);
spin_lock_irqsave(&ichan->lock, flags);
if (!list_empty(&ichan->free_list)) {
desc = list_entry(ichan->free_list.next,
struct idmac_tx_desc, list);
list_del_init(&desc->list);
desc->sg_len = sg_len;
desc->sg = sgl;
txd = &desc->txd;
txd->flags = tx_flags;
}
spin_unlock_irqrestore(&ichan->lock, flags);
mutex_unlock(&ichan->chan_mutex);
tasklet_schedule(&to_ipu(to_idmac(chan->device))->tasklet);
return txd;
}
/* Re-select the current buffer and re-activate the channel */
static void idmac_issue_pending(struct dma_chan *chan)
{
struct idmac_channel *ichan = to_idmac_chan(chan);
struct idmac *idmac = to_idmac(chan->device);
struct ipu *ipu = to_ipu(idmac);
unsigned long flags;
/* This is not always needed, but doesn't hurt either */
spin_lock_irqsave(&ipu->lock, flags);
ipu_select_buffer(chan->chan_id, ichan->active_buffer);
spin_unlock_irqrestore(&ipu->lock, flags);
/*
* Might need to perform some parts of initialisation from
* ipu_enable_channel(), but not all, we do not want to reset to buffer
* 0, don't need to set priority again either, but re-enabling the task
* and the channel might be a good idea.
*/
}
static int __idmac_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
unsigned long arg)
{
struct idmac_channel *ichan = to_idmac_chan(chan);
struct idmac *idmac = to_idmac(chan->device);
struct ipu *ipu = to_ipu(idmac);
struct list_head *list, *tmp;
unsigned long flags;
int i;
switch (cmd) {
case DMA_PAUSE:
spin_lock_irqsave(&ipu->lock, flags);
ipu_ic_disable_task(ipu, chan->chan_id);
/* Return all descriptors into "prepared" state */
list_for_each_safe(list, tmp, &ichan->queue)
list_del_init(list);
ichan->sg[0] = NULL;
ichan->sg[1] = NULL;
spin_unlock_irqrestore(&ipu->lock, flags);
ichan->status = IPU_CHANNEL_INITIALIZED;
break;
case DMA_TERMINATE_ALL:
ipu_disable_channel(idmac, ichan,
ichan->status >= IPU_CHANNEL_ENABLED);
tasklet_disable(&ipu->tasklet);
/* ichan->queue is modified in ISR, have to spinlock */
spin_lock_irqsave(&ichan->lock, flags);
list_splice_init(&ichan->queue, &ichan->free_list);
if (ichan->desc)
for (i = 0; i < ichan->n_tx_desc; i++) {
struct idmac_tx_desc *desc = ichan->desc + i;
if (list_empty(&desc->list))
/* Descriptor was prepared, but not submitted */
list_add(&desc->list, &ichan->free_list);
async_tx_clear_ack(&desc->txd);
}
ichan->sg[0] = NULL;
ichan->sg[1] = NULL;
spin_unlock_irqrestore(&ichan->lock, flags);
tasklet_enable(&ipu->tasklet);
ichan->status = IPU_CHANNEL_INITIALIZED;
break;
default:
return -ENOSYS;
}
return 0;
}
static int idmac_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
unsigned long arg)
{
struct idmac_channel *ichan = to_idmac_chan(chan);
int ret;
mutex_lock(&ichan->chan_mutex);
ret = __idmac_control(chan, cmd, arg);
mutex_unlock(&ichan->chan_mutex);
return ret;
}
#ifdef DEBUG
static irqreturn_t ic_sof_irq(int irq, void *dev_id)
{
struct idmac_channel *ichan = dev_id;
printk(KERN_DEBUG "Got SOF IRQ %d on Channel %d\n",
irq, ichan->dma_chan.chan_id);
disable_irq_nosync(irq);
return IRQ_HANDLED;
}
static irqreturn_t ic_eof_irq(int irq, void *dev_id)
{
struct idmac_channel *ichan = dev_id;
printk(KERN_DEBUG "Got EOF IRQ %d on Channel %d\n",
irq, ichan->dma_chan.chan_id);
disable_irq_nosync(irq);
return IRQ_HANDLED;
}
static int ic_sof = -EINVAL, ic_eof = -EINVAL;
#endif
static int idmac_alloc_chan_resources(struct dma_chan *chan)
{
struct idmac_channel *ichan = to_idmac_chan(chan);
struct idmac *idmac = to_idmac(chan->device);
int ret;
/* dmaengine.c now guarantees to only offer free channels */
BUG_ON(chan->client_count > 1);
WARN_ON(ichan->status != IPU_CHANNEL_FREE);
dma_cookie_init(chan);
ret = ipu_irq_map(chan->chan_id);
if (ret < 0)
goto eimap;
ichan->eof_irq = ret;
/*
* Important to first disable the channel, because maybe someone
* used it before us, e.g., the bootloader
*/
ipu_disable_channel(idmac, ichan, true);
ret = ipu_init_channel(idmac, ichan);
if (ret < 0)
goto eichan;
ret = request_irq(ichan->eof_irq, idmac_interrupt, 0,
ichan->eof_name, ichan);
if (ret < 0)
goto erirq;
#ifdef DEBUG
if (chan->chan_id == IDMAC_IC_7) {
ic_sof = ipu_irq_map(69);
if (ic_sof > 0) {
ret = request_irq(ic_sof, ic_sof_irq, 0, "IC SOF", ichan);
if (ret)
dev_err(&chan->dev->device, "request irq failed for IC SOF");
}
ic_eof = ipu_irq_map(70);
if (ic_eof > 0) {
ret = request_irq(ic_eof, ic_eof_irq, 0, "IC EOF", ichan);
if (ret)
dev_err(&chan->dev->device, "request irq failed for IC EOF");
}
}
#endif
ichan->status = IPU_CHANNEL_INITIALIZED;
dev_dbg(&chan->dev->device, "Found channel 0x%x, irq %d\n",
chan->chan_id, ichan->eof_irq);
return ret;
erirq:
ipu_uninit_channel(idmac, ichan);
eichan:
ipu_irq_unmap(chan->chan_id);
eimap:
return ret;
}
static void idmac_free_chan_resources(struct dma_chan *chan)
{
struct idmac_channel *ichan = to_idmac_chan(chan);
struct idmac *idmac = to_idmac(chan->device);
mutex_lock(&ichan->chan_mutex);
__idmac_control(chan, DMA_TERMINATE_ALL, 0);
if (ichan->status > IPU_CHANNEL_FREE) {
#ifdef DEBUG
if (chan->chan_id == IDMAC_IC_7) {
if (ic_sof > 0) {
free_irq(ic_sof, ichan);
ipu_irq_unmap(69);
ic_sof = -EINVAL;
}
if (ic_eof > 0) {
free_irq(ic_eof, ichan);
ipu_irq_unmap(70);
ic_eof = -EINVAL;
}
}
#endif
free_irq(ichan->eof_irq, ichan);
ipu_irq_unmap(chan->chan_id);
}
ichan->status = IPU_CHANNEL_FREE;
ipu_uninit_channel(idmac, ichan);
mutex_unlock(&ichan->chan_mutex);
tasklet_schedule(&to_ipu(idmac)->tasklet);
}
static enum dma_status idmac_tx_status(struct dma_chan *chan,
dma_cookie_t cookie, struct dma_tx_state *txstate)
{
return dma_cookie_status(chan, cookie, txstate);
}
static int __init ipu_idmac_init(struct ipu *ipu)
{
struct idmac *idmac = &ipu->idmac;
struct dma_device *dma = &idmac->dma;
int i;
dma_cap_set(DMA_SLAVE, dma->cap_mask);
dma_cap_set(DMA_PRIVATE, dma->cap_mask);
/* Compulsory common fields */
dma->dev = ipu->dev;
dma->device_alloc_chan_resources = idmac_alloc_chan_resources;
dma->device_free_chan_resources = idmac_free_chan_resources;
dma->device_tx_status = idmac_tx_status;
dma->device_issue_pending = idmac_issue_pending;
/* Compulsory for DMA_SLAVE fields */
dma->device_prep_slave_sg = idmac_prep_slave_sg;
dma->device_control = idmac_control;
INIT_LIST_HEAD(&dma->channels);
for (i = 0; i < IPU_CHANNELS_NUM; i++) {
struct idmac_channel *ichan = ipu->channel + i;
struct dma_chan *dma_chan = &ichan->dma_chan;
spin_lock_init(&ichan->lock);
mutex_init(&ichan->chan_mutex);
ichan->status = IPU_CHANNEL_FREE;
ichan->sec_chan_en = false;
snprintf(ichan->eof_name, sizeof(ichan->eof_name), "IDMAC EOF %d", i);
dma_chan->device = &idmac->dma;
dma_cookie_init(dma_chan);
dma_chan->chan_id = i;
list_add_tail(&dma_chan->device_node, &dma->channels);
}
idmac_write_icreg(ipu, 0x00000070, IDMAC_CONF);
return dma_async_device_register(&idmac->dma);
}
static void ipu_idmac_exit(struct ipu *ipu)
{
int i;
struct idmac *idmac = &ipu->idmac;
for (i = 0; i < IPU_CHANNELS_NUM; i++) {
struct idmac_channel *ichan = ipu->channel + i;
idmac_control(&ichan->dma_chan, DMA_TERMINATE_ALL, 0);
}
dma_async_device_unregister(&idmac->dma);
}
/*****************************************************************************
* IPU common probe / remove
*/
static int __init ipu_probe(struct platform_device *pdev)
{
struct resource *mem_ipu, *mem_ic;
int ret;
spin_lock_init(&ipu_data.lock);
mem_ipu = platform_get_resource(pdev, IORESOURCE_MEM, 0);
mem_ic = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (!mem_ipu || !mem_ic)
return -EINVAL;
ipu_data.dev = &pdev->dev;
platform_set_drvdata(pdev, &ipu_data);
ret = platform_get_irq(pdev, 0);
if (ret < 0)
goto err_noirq;
ipu_data.irq_fn = ret;
ret = platform_get_irq(pdev, 1);
if (ret < 0)
goto err_noirq;
ipu_data.irq_err = ret;
dev_dbg(&pdev->dev, "fn irq %u, err irq %u\n",
ipu_data.irq_fn, ipu_data.irq_err);
/* Remap IPU common registers */
ipu_data.reg_ipu = ioremap(mem_ipu->start, resource_size(mem_ipu));
if (!ipu_data.reg_ipu) {
ret = -ENOMEM;
goto err_ioremap_ipu;
}
/* Remap Image Converter and Image DMA Controller registers */
ipu_data.reg_ic = ioremap(mem_ic->start, resource_size(mem_ic));
if (!ipu_data.reg_ic) {
ret = -ENOMEM;
goto err_ioremap_ic;
}
/* Get IPU clock */
ipu_data.ipu_clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(ipu_data.ipu_clk)) {
ret = PTR_ERR(ipu_data.ipu_clk);
goto err_clk_get;
}
/* Make sure IPU HSP clock is running */
clk_prepare_enable(ipu_data.ipu_clk);
/* Disable all interrupts */
idmac_write_ipureg(&ipu_data, 0, IPU_INT_CTRL_1);
idmac_write_ipureg(&ipu_data, 0, IPU_INT_CTRL_2);
idmac_write_ipureg(&ipu_data, 0, IPU_INT_CTRL_3);
idmac_write_ipureg(&ipu_data, 0, IPU_INT_CTRL_4);
idmac_write_ipureg(&ipu_data, 0, IPU_INT_CTRL_5);
dev_dbg(&pdev->dev, "%s @ 0x%08lx, fn irq %u, err irq %u\n", pdev->name,
(unsigned long)mem_ipu->start, ipu_data.irq_fn, ipu_data.irq_err);
ret = ipu_irq_attach_irq(&ipu_data, pdev);
if (ret < 0)
goto err_attach_irq;
/* Initialize DMA engine */
ret = ipu_idmac_init(&ipu_data);
if (ret < 0)
goto err_idmac_init;
tasklet_init(&ipu_data.tasklet, ipu_gc_tasklet, (unsigned long)&ipu_data);
ipu_data.dev = &pdev->dev;
dev_dbg(ipu_data.dev, "IPU initialized\n");
return 0;
err_idmac_init:
err_attach_irq:
ipu_irq_detach_irq(&ipu_data, pdev);
clk_disable_unprepare(ipu_data.ipu_clk);
clk_put(ipu_data.ipu_clk);
err_clk_get:
iounmap(ipu_data.reg_ic);
err_ioremap_ic:
iounmap(ipu_data.reg_ipu);
err_ioremap_ipu:
err_noirq:
dev_err(&pdev->dev, "Failed to probe IPU: %d\n", ret);
return ret;
}
static int ipu_remove(struct platform_device *pdev)
{
struct ipu *ipu = platform_get_drvdata(pdev);
ipu_idmac_exit(ipu);
ipu_irq_detach_irq(ipu, pdev);
clk_disable_unprepare(ipu->ipu_clk);
clk_put(ipu->ipu_clk);
iounmap(ipu->reg_ic);
iounmap(ipu->reg_ipu);
tasklet_kill(&ipu->tasklet);
return 0;
}
/*
* We need two MEM resources - with IPU-common and Image Converter registers,
* including PF_CONF and IDMAC_* registers, and two IRQs - function and error
*/
static struct platform_driver ipu_platform_driver = {
.driver = {
.name = "ipu-core",
.owner = THIS_MODULE,
},
.remove = ipu_remove,
};
static int __init ipu_init(void)
{
return platform_driver_probe(&ipu_platform_driver, ipu_probe);
}
subsys_initcall(ipu_init);
MODULE_DESCRIPTION("IPU core driver");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Guennadi Liakhovetski <lg@denx.de>");
MODULE_ALIAS("platform:ipu-core");
| gpl-2.0 |
OwnROM-Devices/OwnKernel-sprout | net/wireless/ibss.c | 665 | 12964 | /*
* Some IBSS support code for cfg80211.
*
* Copyright 2009 Johannes Berg <johannes@sipsolutions.net>
*/
#include <linux/etherdevice.h>
#include <linux/if_arp.h>
#include <linux/slab.h>
#include <linux/export.h>
#include <net/cfg80211.h>
#include "wext-compat.h"
#include "nl80211.h"
#include "rdev-ops.h"
void __cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct cfg80211_bss *bss;
#ifdef CONFIG_CFG80211_WEXT
union iwreq_data wrqu;
#endif
if (WARN_ON(wdev->iftype != NL80211_IFTYPE_ADHOC))
return;
if (!wdev->ssid_len)
return;
bss = cfg80211_get_bss(wdev->wiphy, NULL, bssid,
wdev->ssid, wdev->ssid_len,
WLAN_CAPABILITY_IBSS, WLAN_CAPABILITY_IBSS);
if (WARN_ON(!bss))
return;
if (wdev->current_bss) {
cfg80211_unhold_bss(wdev->current_bss);
cfg80211_put_bss(wdev->wiphy, &wdev->current_bss->pub);
}
cfg80211_hold_bss(bss_from_pub(bss));
wdev->current_bss = bss_from_pub(bss);
wdev->sme_state = CFG80211_SME_CONNECTED;
cfg80211_upload_connect_keys(wdev);
nl80211_send_ibss_bssid(wiphy_to_dev(wdev->wiphy), dev, bssid,
GFP_KERNEL);
#ifdef CONFIG_CFG80211_WEXT
memset(&wrqu, 0, sizeof(wrqu));
memcpy(wrqu.ap_addr.sa_data, bssid, ETH_ALEN);
wireless_send_event(dev, SIOCGIWAP, &wrqu, NULL);
#endif
}
void cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid, gfp_t gfp)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
struct cfg80211_event *ev;
unsigned long flags;
trace_cfg80211_ibss_joined(dev, bssid);
CFG80211_DEV_WARN_ON(wdev->sme_state != CFG80211_SME_CONNECTING);
ev = kzalloc(sizeof(*ev), gfp);
if (!ev)
return;
ev->type = EVENT_IBSS_JOINED;
memcpy(ev->cr.bssid, bssid, ETH_ALEN);
spin_lock_irqsave(&wdev->event_lock, flags);
list_add_tail(&ev->list, &wdev->event_list);
spin_unlock_irqrestore(&wdev->event_lock, flags);
queue_work(cfg80211_wq, &rdev->event_work);
}
EXPORT_SYMBOL(cfg80211_ibss_joined);
int __cfg80211_join_ibss(struct cfg80211_registered_device *rdev,
struct net_device *dev,
struct cfg80211_ibss_params *params,
struct cfg80211_cached_keys *connkeys)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
int err;
ASSERT_WDEV_LOCK(wdev);
if (wdev->ssid_len)
return -EALREADY;
if (!params->basic_rates) {
/*
* If no rates were explicitly configured,
* use the mandatory rate set for 11b or
* 11a for maximum compatibility.
*/
struct ieee80211_supported_band *sband =
rdev->wiphy.bands[params->chandef.chan->band];
int j;
u32 flag = params->chandef.chan->band == IEEE80211_BAND_5GHZ ?
IEEE80211_RATE_MANDATORY_A :
IEEE80211_RATE_MANDATORY_B;
for (j = 0; j < sband->n_bitrates; j++) {
if (sband->bitrates[j].flags & flag)
params->basic_rates |= BIT(j);
}
}
if (WARN_ON(wdev->connect_keys))
kfree(wdev->connect_keys);
wdev->connect_keys = connkeys;
wdev->ibss_fixed = params->channel_fixed;
#ifdef CONFIG_CFG80211_WEXT
wdev->wext.ibss.chandef = params->chandef;
#endif
wdev->sme_state = CFG80211_SME_CONNECTING;
err = cfg80211_can_use_chan(rdev, wdev, params->chandef.chan,
params->channel_fixed
? CHAN_MODE_SHARED
: CHAN_MODE_EXCLUSIVE);
if (err) {
wdev->connect_keys = NULL;
return err;
}
err = rdev_join_ibss(rdev, dev, params);
if (err) {
wdev->connect_keys = NULL;
wdev->sme_state = CFG80211_SME_IDLE;
return err;
}
memcpy(wdev->ssid, params->ssid, params->ssid_len);
wdev->ssid_len = params->ssid_len;
return 0;
}
int cfg80211_join_ibss(struct cfg80211_registered_device *rdev,
struct net_device *dev,
struct cfg80211_ibss_params *params,
struct cfg80211_cached_keys *connkeys)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
int err;
mutex_lock(&rdev->devlist_mtx);
wdev_lock(wdev);
err = __cfg80211_join_ibss(rdev, dev, params, connkeys);
wdev_unlock(wdev);
mutex_unlock(&rdev->devlist_mtx);
return err;
}
static void __cfg80211_clear_ibss(struct net_device *dev, bool nowext)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
int i;
ASSERT_WDEV_LOCK(wdev);
kfree(wdev->connect_keys);
wdev->connect_keys = NULL;
/*
* Delete all the keys ... pairwise keys can't really
* exist any more anyway, but default keys might.
*/
if (rdev->ops->del_key)
for (i = 0; i < 6; i++)
rdev_del_key(rdev, dev, i, false, NULL);
if (wdev->current_bss) {
cfg80211_unhold_bss(wdev->current_bss);
cfg80211_put_bss(wdev->wiphy, &wdev->current_bss->pub);
}
wdev->current_bss = NULL;
wdev->sme_state = CFG80211_SME_IDLE;
wdev->ssid_len = 0;
#ifdef CONFIG_CFG80211_WEXT
if (!nowext)
wdev->wext.ibss.ssid_len = 0;
#endif
}
void cfg80211_clear_ibss(struct net_device *dev, bool nowext)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
wdev_lock(wdev);
__cfg80211_clear_ibss(dev, nowext);
wdev_unlock(wdev);
}
int __cfg80211_leave_ibss(struct cfg80211_registered_device *rdev,
struct net_device *dev, bool nowext)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
int err;
ASSERT_WDEV_LOCK(wdev);
if (!wdev->ssid_len)
return -ENOLINK;
err = rdev_leave_ibss(rdev, dev);
if (err)
return err;
__cfg80211_clear_ibss(dev, nowext);
return 0;
}
int cfg80211_leave_ibss(struct cfg80211_registered_device *rdev,
struct net_device *dev, bool nowext)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
int err;
wdev_lock(wdev);
err = __cfg80211_leave_ibss(rdev, dev, nowext);
wdev_unlock(wdev);
return err;
}
#ifdef CONFIG_CFG80211_WEXT
int cfg80211_ibss_wext_join(struct cfg80211_registered_device *rdev,
struct wireless_dev *wdev)
{
struct cfg80211_cached_keys *ck = NULL;
enum ieee80211_band band;
int i, err;
ASSERT_WDEV_LOCK(wdev);
if (!wdev->wext.ibss.beacon_interval)
wdev->wext.ibss.beacon_interval = 100;
/* try to find an IBSS channel if none requested ... */
if (!wdev->wext.ibss.chandef.chan) {
wdev->wext.ibss.chandef.width = NL80211_CHAN_WIDTH_20_NOHT;
for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
struct ieee80211_supported_band *sband;
struct ieee80211_channel *chan;
sband = rdev->wiphy.bands[band];
if (!sband)
continue;
for (i = 0; i < sband->n_channels; i++) {
chan = &sband->channels[i];
if (chan->flags & IEEE80211_CHAN_NO_IBSS)
continue;
if (chan->flags & IEEE80211_CHAN_DISABLED)
continue;
wdev->wext.ibss.chandef.chan = chan;
wdev->wext.ibss.chandef.center_freq1 =
chan->center_freq;
break;
}
if (wdev->wext.ibss.chandef.chan)
break;
}
if (!wdev->wext.ibss.chandef.chan)
return -EINVAL;
}
/* don't join -- SSID is not there */
if (!wdev->wext.ibss.ssid_len)
return 0;
if (!netif_running(wdev->netdev))
return 0;
if (wdev->wext.keys) {
wdev->wext.keys->def = wdev->wext.default_key;
wdev->wext.keys->defmgmt = wdev->wext.default_mgmt_key;
}
wdev->wext.ibss.privacy = wdev->wext.default_key != -1;
if (wdev->wext.keys) {
ck = kmemdup(wdev->wext.keys, sizeof(*ck), GFP_KERNEL);
if (!ck)
return -ENOMEM;
for (i = 0; i < 6; i++)
ck->params[i].key = ck->data[i];
}
err = __cfg80211_join_ibss(rdev, wdev->netdev,
&wdev->wext.ibss, ck);
if (err)
kfree(ck);
return err;
}
int cfg80211_ibss_wext_siwfreq(struct net_device *dev,
struct iw_request_info *info,
struct iw_freq *wextfreq, char *extra)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
struct ieee80211_channel *chan = NULL;
int err, freq;
/* call only for ibss! */
if (WARN_ON(wdev->iftype != NL80211_IFTYPE_ADHOC))
return -EINVAL;
if (!rdev->ops->join_ibss)
return -EOPNOTSUPP;
freq = cfg80211_wext_freq(wdev->wiphy, wextfreq);
if (freq < 0)
return freq;
if (freq) {
chan = ieee80211_get_channel(wdev->wiphy, freq);
if (!chan)
return -EINVAL;
if (chan->flags & IEEE80211_CHAN_NO_IBSS ||
chan->flags & IEEE80211_CHAN_DISABLED)
return -EINVAL;
}
if (wdev->wext.ibss.chandef.chan == chan)
return 0;
wdev_lock(wdev);
err = 0;
if (wdev->ssid_len)
err = __cfg80211_leave_ibss(rdev, dev, true);
wdev_unlock(wdev);
if (err)
return err;
if (chan) {
wdev->wext.ibss.chandef.chan = chan;
wdev->wext.ibss.chandef.width = NL80211_CHAN_WIDTH_20_NOHT;
wdev->wext.ibss.chandef.center_freq1 = freq;
wdev->wext.ibss.channel_fixed = true;
} else {
/* cfg80211_ibss_wext_join will pick one if needed */
wdev->wext.ibss.channel_fixed = false;
}
mutex_lock(&rdev->devlist_mtx);
wdev_lock(wdev);
err = cfg80211_ibss_wext_join(rdev, wdev);
wdev_unlock(wdev);
mutex_unlock(&rdev->devlist_mtx);
return err;
}
int cfg80211_ibss_wext_giwfreq(struct net_device *dev,
struct iw_request_info *info,
struct iw_freq *freq, char *extra)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct ieee80211_channel *chan = NULL;
/* call only for ibss! */
if (WARN_ON(wdev->iftype != NL80211_IFTYPE_ADHOC))
return -EINVAL;
wdev_lock(wdev);
if (wdev->current_bss)
chan = wdev->current_bss->pub.channel;
else if (wdev->wext.ibss.chandef.chan)
chan = wdev->wext.ibss.chandef.chan;
wdev_unlock(wdev);
if (chan) {
freq->m = chan->center_freq;
freq->e = 6;
return 0;
}
/* no channel if not joining */
return -EINVAL;
}
int cfg80211_ibss_wext_siwessid(struct net_device *dev,
struct iw_request_info *info,
struct iw_point *data, char *ssid)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
size_t len = data->length;
int err;
/* call only for ibss! */
if (WARN_ON(wdev->iftype != NL80211_IFTYPE_ADHOC))
return -EINVAL;
if (!rdev->ops->join_ibss)
return -EOPNOTSUPP;
wdev_lock(wdev);
err = 0;
if (wdev->ssid_len)
err = __cfg80211_leave_ibss(rdev, dev, true);
wdev_unlock(wdev);
if (err)
return err;
/* iwconfig uses nul termination in SSID.. */
if (len > 0 && ssid[len - 1] == '\0')
len--;
wdev->wext.ibss.ssid = wdev->ssid;
memcpy(wdev->wext.ibss.ssid, ssid, len);
wdev->wext.ibss.ssid_len = len;
mutex_lock(&rdev->devlist_mtx);
wdev_lock(wdev);
err = cfg80211_ibss_wext_join(rdev, wdev);
wdev_unlock(wdev);
mutex_unlock(&rdev->devlist_mtx);
return err;
}
int cfg80211_ibss_wext_giwessid(struct net_device *dev,
struct iw_request_info *info,
struct iw_point *data, char *ssid)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
/* call only for ibss! */
if (WARN_ON(wdev->iftype != NL80211_IFTYPE_ADHOC))
return -EINVAL;
data->flags = 0;
wdev_lock(wdev);
if (wdev->ssid_len) {
data->flags = 1;
data->length = wdev->ssid_len;
memcpy(ssid, wdev->ssid, data->length);
} else if (wdev->wext.ibss.ssid && wdev->wext.ibss.ssid_len) {
data->flags = 1;
data->length = wdev->wext.ibss.ssid_len;
memcpy(ssid, wdev->wext.ibss.ssid, data->length);
}
wdev_unlock(wdev);
return 0;
}
int cfg80211_ibss_wext_siwap(struct net_device *dev,
struct iw_request_info *info,
struct sockaddr *ap_addr, char *extra)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
u8 *bssid = ap_addr->sa_data;
int err;
/* call only for ibss! */
if (WARN_ON(wdev->iftype != NL80211_IFTYPE_ADHOC))
return -EINVAL;
if (!rdev->ops->join_ibss)
return -EOPNOTSUPP;
if (ap_addr->sa_family != ARPHRD_ETHER)
return -EINVAL;
/* automatic mode */
if (is_zero_ether_addr(bssid) || is_broadcast_ether_addr(bssid))
bssid = NULL;
/* both automatic */
if (!bssid && !wdev->wext.ibss.bssid)
return 0;
/* fixed already - and no change */
if (wdev->wext.ibss.bssid && bssid &&
ether_addr_equal(bssid, wdev->wext.ibss.bssid))
return 0;
wdev_lock(wdev);
err = 0;
if (wdev->ssid_len)
err = __cfg80211_leave_ibss(rdev, dev, true);
wdev_unlock(wdev);
if (err)
return err;
if (bssid) {
memcpy(wdev->wext.bssid, bssid, ETH_ALEN);
wdev->wext.ibss.bssid = wdev->wext.bssid;
} else
wdev->wext.ibss.bssid = NULL;
mutex_lock(&rdev->devlist_mtx);
wdev_lock(wdev);
err = cfg80211_ibss_wext_join(rdev, wdev);
wdev_unlock(wdev);
mutex_unlock(&rdev->devlist_mtx);
return err;
}
int cfg80211_ibss_wext_giwap(struct net_device *dev,
struct iw_request_info *info,
struct sockaddr *ap_addr, char *extra)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
/* call only for ibss! */
if (WARN_ON(wdev->iftype != NL80211_IFTYPE_ADHOC))
return -EINVAL;
ap_addr->sa_family = ARPHRD_ETHER;
wdev_lock(wdev);
if (wdev->current_bss)
memcpy(ap_addr->sa_data, wdev->current_bss->pub.bssid, ETH_ALEN);
else if (wdev->wext.ibss.bssid)
memcpy(ap_addr->sa_data, wdev->wext.ibss.bssid, ETH_ALEN);
else
memset(ap_addr->sa_data, 0, ETH_ALEN);
wdev_unlock(wdev);
return 0;
}
#endif
| gpl-2.0 |
thicklizard/AwesomeSauce1_1 | drivers/power/pda_power.c | 1945 | 10610 | /*
* Common power driver for PDAs and phones with one or two external
* power supplies (AC/USB) connected to main and backup batteries,
* and optional builtin charger.
*
* Copyright © 2007 Anton Vorontsov <cbou@mail.ru>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/power_supply.h>
#include <linux/pda_power.h>
#include <linux/regulator/consumer.h>
#include <linux/timer.h>
#include <linux/jiffies.h>
#include <linux/usb/otg.h>
static inline unsigned int get_irq_flags(struct resource *res)
{
unsigned int flags = IRQF_SAMPLE_RANDOM | IRQF_SHARED;
flags |= res->flags & IRQF_TRIGGER_MASK;
return flags;
}
static struct device *dev;
static struct pda_power_pdata *pdata;
static struct resource *ac_irq, *usb_irq;
static struct timer_list charger_timer;
static struct timer_list supply_timer;
static struct timer_list polling_timer;
static int polling;
#ifdef CONFIG_USB_OTG_UTILS
static struct otg_transceiver *transceiver;
#endif
static struct regulator *ac_draw;
enum {
PDA_PSY_OFFLINE = 0,
PDA_PSY_ONLINE = 1,
PDA_PSY_TO_CHANGE,
};
static int new_ac_status = -1;
static int new_usb_status = -1;
static int ac_status = -1;
static int usb_status = -1;
static int pda_power_get_property(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val)
{
switch (psp) {
case POWER_SUPPLY_PROP_ONLINE:
if (psy->type == POWER_SUPPLY_TYPE_MAINS)
val->intval = pdata->is_ac_online ?
pdata->is_ac_online() : 0;
else
val->intval = pdata->is_usb_online ?
pdata->is_usb_online() : 0;
break;
default:
return -EINVAL;
}
return 0;
}
static enum power_supply_property pda_power_props[] = {
POWER_SUPPLY_PROP_ONLINE,
};
static char *pda_power_supplied_to[] = {
"main-battery",
"backup-battery",
};
static struct power_supply pda_psy_ac = {
.name = "ac",
.type = POWER_SUPPLY_TYPE_MAINS,
.supplied_to = pda_power_supplied_to,
.num_supplicants = ARRAY_SIZE(pda_power_supplied_to),
.properties = pda_power_props,
.num_properties = ARRAY_SIZE(pda_power_props),
.get_property = pda_power_get_property,
};
static struct power_supply pda_psy_usb = {
.name = "usb",
.type = POWER_SUPPLY_TYPE_USB,
.supplied_to = pda_power_supplied_to,
.num_supplicants = ARRAY_SIZE(pda_power_supplied_to),
.properties = pda_power_props,
.num_properties = ARRAY_SIZE(pda_power_props),
.get_property = pda_power_get_property,
};
static void update_status(void)
{
if (pdata->is_ac_online)
new_ac_status = !!pdata->is_ac_online();
if (pdata->is_usb_online)
new_usb_status = !!pdata->is_usb_online();
}
static void update_charger(void)
{
static int regulator_enabled;
int max_uA = pdata->ac_max_uA;
if (pdata->set_charge) {
if (new_ac_status > 0) {
dev_dbg(dev, "charger on (AC)\n");
pdata->set_charge(PDA_POWER_CHARGE_AC);
} else if (new_usb_status > 0) {
dev_dbg(dev, "charger on (USB)\n");
pdata->set_charge(PDA_POWER_CHARGE_USB);
} else {
dev_dbg(dev, "charger off\n");
pdata->set_charge(0);
}
} else if (ac_draw) {
if (new_ac_status > 0) {
regulator_set_current_limit(ac_draw, max_uA, max_uA);
if (!regulator_enabled) {
dev_dbg(dev, "charger on (AC)\n");
regulator_enable(ac_draw);
regulator_enabled = 1;
}
} else {
if (regulator_enabled) {
dev_dbg(dev, "charger off\n");
regulator_disable(ac_draw);
regulator_enabled = 0;
}
}
}
}
static void supply_timer_func(unsigned long unused)
{
if (ac_status == PDA_PSY_TO_CHANGE) {
ac_status = new_ac_status;
power_supply_changed(&pda_psy_ac);
}
if (usb_status == PDA_PSY_TO_CHANGE) {
usb_status = new_usb_status;
power_supply_changed(&pda_psy_usb);
}
}
static void psy_changed(void)
{
update_charger();
/*
* Okay, charger set. Now wait a bit before notifying supplicants,
* charge power should stabilize.
*/
mod_timer(&supply_timer,
jiffies + msecs_to_jiffies(pdata->wait_for_charger));
}
static void charger_timer_func(unsigned long unused)
{
update_status();
psy_changed();
}
static irqreturn_t power_changed_isr(int irq, void *power_supply)
{
if (power_supply == &pda_psy_ac)
ac_status = PDA_PSY_TO_CHANGE;
else if (power_supply == &pda_psy_usb)
usb_status = PDA_PSY_TO_CHANGE;
else
return IRQ_NONE;
/*
* Wait a bit before reading ac/usb line status and setting charger,
* because ac/usb status readings may lag from irq.
*/
mod_timer(&charger_timer,
jiffies + msecs_to_jiffies(pdata->wait_for_status));
return IRQ_HANDLED;
}
static void polling_timer_func(unsigned long unused)
{
int changed = 0;
dev_dbg(dev, "polling...\n");
update_status();
if (!ac_irq && new_ac_status != ac_status) {
ac_status = PDA_PSY_TO_CHANGE;
changed = 1;
}
if (!usb_irq && new_usb_status != usb_status) {
usb_status = PDA_PSY_TO_CHANGE;
changed = 1;
}
if (changed)
psy_changed();
mod_timer(&polling_timer,
jiffies + msecs_to_jiffies(pdata->polling_interval));
}
#ifdef CONFIG_USB_OTG_UTILS
static int otg_is_usb_online(void)
{
return (transceiver->state == OTG_STATE_B_PERIPHERAL);
}
#endif
static int pda_power_probe(struct platform_device *pdev)
{
int ret = 0;
dev = &pdev->dev;
if (pdev->id != -1) {
dev_err(dev, "it's meaningless to register several "
"pda_powers; use id = -1\n");
ret = -EINVAL;
goto wrongid;
}
pdata = pdev->dev.platform_data;
if (pdata->init) {
ret = pdata->init(dev);
if (ret < 0)
goto init_failed;
}
update_status();
update_charger();
if (!pdata->wait_for_status)
pdata->wait_for_status = 500;
if (!pdata->wait_for_charger)
pdata->wait_for_charger = 500;
if (!pdata->polling_interval)
pdata->polling_interval = 2000;
if (!pdata->ac_max_uA)
pdata->ac_max_uA = 500000;
setup_timer(&charger_timer, charger_timer_func, 0);
setup_timer(&supply_timer, supply_timer_func, 0);
ac_irq = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "ac");
usb_irq = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "usb");
if (pdata->supplied_to) {
pda_psy_ac.supplied_to = pdata->supplied_to;
pda_psy_ac.num_supplicants = pdata->num_supplicants;
pda_psy_usb.supplied_to = pdata->supplied_to;
pda_psy_usb.num_supplicants = pdata->num_supplicants;
}
ac_draw = regulator_get(dev, "ac_draw");
if (IS_ERR(ac_draw)) {
dev_dbg(dev, "couldn't get ac_draw regulator\n");
ac_draw = NULL;
ret = PTR_ERR(ac_draw);
}
if (pdata->is_ac_online) {
ret = power_supply_register(&pdev->dev, &pda_psy_ac);
if (ret) {
dev_err(dev, "failed to register %s power supply\n",
pda_psy_ac.name);
goto ac_supply_failed;
}
if (ac_irq) {
ret = request_irq(ac_irq->start, power_changed_isr,
get_irq_flags(ac_irq), ac_irq->name,
&pda_psy_ac);
if (ret) {
dev_err(dev, "request ac irq failed\n");
goto ac_irq_failed;
}
} else {
polling = 1;
}
}
#ifdef CONFIG_USB_OTG_UTILS
transceiver = otg_get_transceiver();
if (transceiver && !pdata->is_usb_online) {
pdata->is_usb_online = otg_is_usb_online;
}
#endif
if (pdata->is_usb_online) {
ret = power_supply_register(&pdev->dev, &pda_psy_usb);
if (ret) {
dev_err(dev, "failed to register %s power supply\n",
pda_psy_usb.name);
goto usb_supply_failed;
}
if (usb_irq) {
ret = request_irq(usb_irq->start, power_changed_isr,
get_irq_flags(usb_irq),
usb_irq->name, &pda_psy_usb);
if (ret) {
dev_err(dev, "request usb irq failed\n");
goto usb_irq_failed;
}
} else {
polling = 1;
}
}
if (polling) {
dev_dbg(dev, "will poll for status\n");
setup_timer(&polling_timer, polling_timer_func, 0);
mod_timer(&polling_timer,
jiffies + msecs_to_jiffies(pdata->polling_interval));
}
if (ac_irq || usb_irq)
device_init_wakeup(&pdev->dev, 1);
return 0;
usb_irq_failed:
if (pdata->is_usb_online)
power_supply_unregister(&pda_psy_usb);
usb_supply_failed:
if (pdata->is_ac_online && ac_irq)
free_irq(ac_irq->start, &pda_psy_ac);
#ifdef CONFIG_USB_OTG_UTILS
if (transceiver)
otg_put_transceiver(transceiver);
#endif
ac_irq_failed:
if (pdata->is_ac_online)
power_supply_unregister(&pda_psy_ac);
ac_supply_failed:
if (ac_draw) {
regulator_put(ac_draw);
ac_draw = NULL;
}
if (pdata->exit)
pdata->exit(dev);
init_failed:
wrongid:
return ret;
}
static int pda_power_remove(struct platform_device *pdev)
{
if (pdata->is_usb_online && usb_irq)
free_irq(usb_irq->start, &pda_psy_usb);
if (pdata->is_ac_online && ac_irq)
free_irq(ac_irq->start, &pda_psy_ac);
if (polling)
del_timer_sync(&polling_timer);
del_timer_sync(&charger_timer);
del_timer_sync(&supply_timer);
if (pdata->is_usb_online)
power_supply_unregister(&pda_psy_usb);
if (pdata->is_ac_online)
power_supply_unregister(&pda_psy_ac);
#ifdef CONFIG_USB_OTG_UTILS
if (transceiver)
otg_put_transceiver(transceiver);
#endif
if (ac_draw) {
regulator_put(ac_draw);
ac_draw = NULL;
}
if (pdata->exit)
pdata->exit(dev);
return 0;
}
#ifdef CONFIG_PM
static int ac_wakeup_enabled;
static int usb_wakeup_enabled;
static int pda_power_suspend(struct platform_device *pdev, pm_message_t state)
{
if (pdata->suspend) {
int ret = pdata->suspend(state);
if (ret)
return ret;
}
if (device_may_wakeup(&pdev->dev)) {
if (ac_irq)
ac_wakeup_enabled = !enable_irq_wake(ac_irq->start);
if (usb_irq)
usb_wakeup_enabled = !enable_irq_wake(usb_irq->start);
}
return 0;
}
static int pda_power_resume(struct platform_device *pdev)
{
if (device_may_wakeup(&pdev->dev)) {
if (usb_irq && usb_wakeup_enabled)
disable_irq_wake(usb_irq->start);
if (ac_irq && ac_wakeup_enabled)
disable_irq_wake(ac_irq->start);
}
if (pdata->resume)
return pdata->resume();
return 0;
}
#else
#define pda_power_suspend NULL
#define pda_power_resume NULL
#endif /* CONFIG_PM */
MODULE_ALIAS("platform:pda-power");
static struct platform_driver pda_power_pdrv = {
.driver = {
.name = "pda-power",
},
.probe = pda_power_probe,
.remove = pda_power_remove,
.suspend = pda_power_suspend,
.resume = pda_power_resume,
};
static int __init pda_power_init(void)
{
return platform_driver_register(&pda_power_pdrv);
}
static void __exit pda_power_exit(void)
{
platform_driver_unregister(&pda_power_pdrv);
}
module_init(pda_power_init);
module_exit(pda_power_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Anton Vorontsov <cbou@mail.ru>");
| gpl-2.0 |
Jackeagle/android_kernel_samsung_a7lte | arch/mips/kvm/kvm_trap_emul.c | 2457 | 12751 | /*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* KVM/MIPS: Deliver/Emulate exceptions to the guest kernel
*
* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
* Authors: Sanjay Lal <sanjayl@kymasys.com>
*/
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/vmalloc.h>
#include <linux/kvm_host.h>
#include "kvm_mips_opcode.h"
#include "kvm_mips_int.h"
static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva)
{
gpa_t gpa;
uint32_t kseg = KSEGX(gva);
if ((kseg == CKSEG0) || (kseg == CKSEG1))
gpa = CPHYSADDR(gva);
else {
printk("%s: cannot find GPA for GVA: %#lx\n", __func__, gva);
kvm_mips_dump_host_tlbs();
gpa = KVM_INVALID_ADDR;
}
#ifdef DEBUG
kvm_debug("%s: gva %#lx, gpa: %#llx\n", __func__, gva, gpa);
#endif
return gpa;
}
static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu)
{
struct kvm_run *run = vcpu->run;
uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
unsigned long cause = vcpu->arch.host_cp0_cause;
enum emulation_result er = EMULATE_DONE;
int ret = RESUME_GUEST;
if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1) {
er = kvm_mips_emulate_fpu_exc(cause, opc, run, vcpu);
} else
er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
switch (er) {
case EMULATE_DONE:
ret = RESUME_GUEST;
break;
case EMULATE_FAIL:
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = RESUME_HOST;
break;
case EMULATE_WAIT:
run->exit_reason = KVM_EXIT_INTR;
ret = RESUME_HOST;
break;
default:
BUG();
}
return ret;
}
static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu)
{
struct kvm_run *run = vcpu->run;
uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
unsigned long cause = vcpu->arch.host_cp0_cause;
enum emulation_result er = EMULATE_DONE;
int ret = RESUME_GUEST;
if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
|| KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
#ifdef DEBUG
kvm_debug
("USER/KSEG23 ADDR TLB MOD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n",
cause, opc, badvaddr);
#endif
er = kvm_mips_handle_tlbmod(cause, opc, run, vcpu);
if (er == EMULATE_DONE)
ret = RESUME_GUEST;
else {
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = RESUME_HOST;
}
} else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
/* XXXKYMA: The guest kernel does not expect to get this fault when we are not
* using HIGHMEM. Need to address this in a HIGHMEM kernel
*/
printk
("TLB MOD fault not handled, cause %#lx, PC: %p, BadVaddr: %#lx\n",
cause, opc, badvaddr);
kvm_mips_dump_host_tlbs();
kvm_arch_vcpu_dump_regs(vcpu);
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = RESUME_HOST;
} else {
printk
("Illegal TLB Mod fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
cause, opc, badvaddr);
kvm_mips_dump_host_tlbs();
kvm_arch_vcpu_dump_regs(vcpu);
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = RESUME_HOST;
}
return ret;
}
static int kvm_trap_emul_handle_tlb_st_miss(struct kvm_vcpu *vcpu)
{
struct kvm_run *run = vcpu->run;
uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
unsigned long cause = vcpu->arch.host_cp0_cause;
enum emulation_result er = EMULATE_DONE;
int ret = RESUME_GUEST;
if (((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR)
&& KVM_GUEST_KERNEL_MODE(vcpu)) {
if (kvm_mips_handle_commpage_tlb_fault(badvaddr, vcpu) < 0) {
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = RESUME_HOST;
}
} else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
|| KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
#ifdef DEBUG
kvm_debug
("USER ADDR TLB LD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n",
cause, opc, badvaddr);
#endif
er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu);
if (er == EMULATE_DONE)
ret = RESUME_GUEST;
else {
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = RESUME_HOST;
}
} else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
/* All KSEG0 faults are handled by KVM, as the guest kernel does not
* expect to ever get them
*/
if (kvm_mips_handle_kseg0_tlb_fault
(vcpu->arch.host_cp0_badvaddr, vcpu) < 0) {
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = RESUME_HOST;
}
} else {
kvm_err
("Illegal TLB LD fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
cause, opc, badvaddr);
kvm_mips_dump_host_tlbs();
kvm_arch_vcpu_dump_regs(vcpu);
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = RESUME_HOST;
}
return ret;
}
static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu *vcpu)
{
struct kvm_run *run = vcpu->run;
uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
unsigned long cause = vcpu->arch.host_cp0_cause;
enum emulation_result er = EMULATE_DONE;
int ret = RESUME_GUEST;
if (((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR)
&& KVM_GUEST_KERNEL_MODE(vcpu)) {
if (kvm_mips_handle_commpage_tlb_fault(badvaddr, vcpu) < 0) {
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = RESUME_HOST;
}
} else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
|| KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
#ifdef DEBUG
kvm_debug("USER ADDR TLB ST fault: PC: %#lx, BadVaddr: %#lx\n",
vcpu->arch.pc, badvaddr);
#endif
/* User Address (UA) fault, this could happen if
* (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this
* case we pass on the fault to the guest kernel and let it handle it.
* (2) TLB entry is present in the Guest TLB but not in the shadow, in this
* case we inject the TLB from the Guest TLB into the shadow host TLB
*/
er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu);
if (er == EMULATE_DONE)
ret = RESUME_GUEST;
else {
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = RESUME_HOST;
}
} else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
if (kvm_mips_handle_kseg0_tlb_fault
(vcpu->arch.host_cp0_badvaddr, vcpu) < 0) {
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = RESUME_HOST;
}
} else {
printk
("Illegal TLB ST fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
cause, opc, badvaddr);
kvm_mips_dump_host_tlbs();
kvm_arch_vcpu_dump_regs(vcpu);
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = RESUME_HOST;
}
return ret;
}
static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu)
{
struct kvm_run *run = vcpu->run;
uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
unsigned long cause = vcpu->arch.host_cp0_cause;
enum emulation_result er = EMULATE_DONE;
int ret = RESUME_GUEST;
if (KVM_GUEST_KERNEL_MODE(vcpu)
&& (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) {
#ifdef DEBUG
kvm_debug("Emulate Store to MMIO space\n");
#endif
er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
if (er == EMULATE_FAIL) {
printk("Emulate Store to MMIO space failed\n");
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = RESUME_HOST;
} else {
run->exit_reason = KVM_EXIT_MMIO;
ret = RESUME_HOST;
}
} else {
printk
("Address Error (STORE): cause %#lx, PC: %p, BadVaddr: %#lx\n",
cause, opc, badvaddr);
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = RESUME_HOST;
}
return ret;
}
static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu)
{
struct kvm_run *run = vcpu->run;
uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
unsigned long cause = vcpu->arch.host_cp0_cause;
enum emulation_result er = EMULATE_DONE;
int ret = RESUME_GUEST;
if (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1) {
#ifdef DEBUG
kvm_debug("Emulate Load from MMIO space @ %#lx\n", badvaddr);
#endif
er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
if (er == EMULATE_FAIL) {
printk("Emulate Load from MMIO space failed\n");
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = RESUME_HOST;
} else {
run->exit_reason = KVM_EXIT_MMIO;
ret = RESUME_HOST;
}
} else {
printk
("Address Error (LOAD): cause %#lx, PC: %p, BadVaddr: %#lx\n",
cause, opc, badvaddr);
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = RESUME_HOST;
er = EMULATE_FAIL;
}
return ret;
}
static int kvm_trap_emul_handle_syscall(struct kvm_vcpu *vcpu)
{
struct kvm_run *run = vcpu->run;
uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
unsigned long cause = vcpu->arch.host_cp0_cause;
enum emulation_result er = EMULATE_DONE;
int ret = RESUME_GUEST;
er = kvm_mips_emulate_syscall(cause, opc, run, vcpu);
if (er == EMULATE_DONE)
ret = RESUME_GUEST;
else {
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = RESUME_HOST;
}
return ret;
}
static int kvm_trap_emul_handle_res_inst(struct kvm_vcpu *vcpu)
{
struct kvm_run *run = vcpu->run;
uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
unsigned long cause = vcpu->arch.host_cp0_cause;
enum emulation_result er = EMULATE_DONE;
int ret = RESUME_GUEST;
er = kvm_mips_handle_ri(cause, opc, run, vcpu);
if (er == EMULATE_DONE)
ret = RESUME_GUEST;
else {
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = RESUME_HOST;
}
return ret;
}
static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu)
{
struct kvm_run *run = vcpu->run;
uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
unsigned long cause = vcpu->arch.host_cp0_cause;
enum emulation_result er = EMULATE_DONE;
int ret = RESUME_GUEST;
er = kvm_mips_emulate_bp_exc(cause, opc, run, vcpu);
if (er == EMULATE_DONE)
ret = RESUME_GUEST;
else {
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = RESUME_HOST;
}
return ret;
}
static int kvm_trap_emul_vm_init(struct kvm *kvm)
{
return 0;
}
static int kvm_trap_emul_vcpu_init(struct kvm_vcpu *vcpu)
{
return 0;
}
static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
uint32_t config1;
int vcpu_id = vcpu->vcpu_id;
/* Arch specific stuff, set up config registers properly so that the
* guest will come up as expected, for now we simulate a
* MIPS 24kc
*/
kvm_write_c0_guest_prid(cop0, 0x00019300);
kvm_write_c0_guest_config(cop0,
MIPS_CONFIG0 | (0x1 << CP0C0_AR) |
(MMU_TYPE_R4000 << CP0C0_MT));
/* Read the cache characteristics from the host Config1 Register */
config1 = (read_c0_config1() & ~0x7f);
/* Set up MMU size */
config1 &= ~(0x3f << 25);
config1 |= ((KVM_MIPS_GUEST_TLB_SIZE - 1) << 25);
/* We unset some bits that we aren't emulating */
config1 &=
~((1 << CP0C1_C2) | (1 << CP0C1_MD) | (1 << CP0C1_PC) |
(1 << CP0C1_WR) | (1 << CP0C1_CA));
kvm_write_c0_guest_config1(cop0, config1);
kvm_write_c0_guest_config2(cop0, MIPS_CONFIG2);
/* MIPS_CONFIG2 | (read_c0_config2() & 0xfff) */
kvm_write_c0_guest_config3(cop0,
MIPS_CONFIG3 | (0 << CP0C3_VInt) | (1 <<
CP0C3_ULRI));
/* Set Wait IE/IXMT Ignore in Config7, IAR, AR */
kvm_write_c0_guest_config7(cop0, (MIPS_CONF7_WII) | (1 << 10));
/* Setup IntCtl defaults, compatibilty mode for timer interrupts (HW5) */
kvm_write_c0_guest_intctl(cop0, 0xFC000000);
/* Put in vcpu id as CPUNum into Ebase Reg to handle SMP Guests */
kvm_write_c0_guest_ebase(cop0, KVM_GUEST_KSEG0 | (vcpu_id & 0xFF));
return 0;
}
static struct kvm_mips_callbacks kvm_trap_emul_callbacks = {
/* exit handlers */
.handle_cop_unusable = kvm_trap_emul_handle_cop_unusable,
.handle_tlb_mod = kvm_trap_emul_handle_tlb_mod,
.handle_tlb_st_miss = kvm_trap_emul_handle_tlb_st_miss,
.handle_tlb_ld_miss = kvm_trap_emul_handle_tlb_ld_miss,
.handle_addr_err_st = kvm_trap_emul_handle_addr_err_st,
.handle_addr_err_ld = kvm_trap_emul_handle_addr_err_ld,
.handle_syscall = kvm_trap_emul_handle_syscall,
.handle_res_inst = kvm_trap_emul_handle_res_inst,
.handle_break = kvm_trap_emul_handle_break,
.vm_init = kvm_trap_emul_vm_init,
.vcpu_init = kvm_trap_emul_vcpu_init,
.vcpu_setup = kvm_trap_emul_vcpu_setup,
.gva_to_gpa = kvm_trap_emul_gva_to_gpa_cb,
.queue_timer_int = kvm_mips_queue_timer_int_cb,
.dequeue_timer_int = kvm_mips_dequeue_timer_int_cb,
.queue_io_int = kvm_mips_queue_io_int_cb,
.dequeue_io_int = kvm_mips_dequeue_io_int_cb,
.irq_deliver = kvm_mips_irq_deliver_cb,
.irq_clear = kvm_mips_irq_clear_cb,
};
int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks)
{
*install_callbacks = &kvm_trap_emul_callbacks;
return 0;
}
| gpl-2.0 |
rachitrawat/Vengeance-Kernel-MSM7x27-JLO | drivers/md/md.c | 2457 | 218979 | /*
md.c : Multiple Devices driver for Linux
Copyright (C) 1998, 1999, 2000 Ingo Molnar
completely rewritten, based on the MD driver code from Marc Zyngier
Changes:
- RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar
- RAID-6 extensions by H. Peter Anvin <hpa@zytor.com>
- boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net>
- kerneld support by Boris Tobotras <boris@xtalk.msk.su>
- kmod support by: Cyrus Durgin
- RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com>
- Devfs support by Richard Gooch <rgooch@atnf.csiro.au>
- lots of fixes and improvements to the RAID1/RAID5 and generic
RAID code (such as request based resynchronization):
Neil Brown <neilb@cse.unsw.edu.au>.
- persistent bitmap code
Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2, or (at your option)
any later version.
You should have received a copy of the GNU General Public License
(for example /usr/src/linux/COPYING); if not, write to the Free
Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/kthread.h>
#include <linux/blkdev.h>
#include <linux/sysctl.h>
#include <linux/seq_file.h>
#include <linux/fs.h>
#include <linux/poll.h>
#include <linux/ctype.h>
#include <linux/string.h>
#include <linux/hdreg.h>
#include <linux/proc_fs.h>
#include <linux/random.h>
#include <linux/module.h>
#include <linux/reboot.h>
#include <linux/file.h>
#include <linux/compat.h>
#include <linux/delay.h>
#include <linux/raid/md_p.h>
#include <linux/raid/md_u.h>
#include <linux/slab.h>
#include "md.h"
#include "bitmap.h"
#ifndef MODULE
static void autostart_arrays(int part);
#endif
/* pers_list is a list of registered personalities protected
* by pers_lock.
* pers_lock does extra service to protect accesses to
* mddev->thread when the mutex cannot be held.
*/
static LIST_HEAD(pers_list);
static DEFINE_SPINLOCK(pers_lock);
static void md_print_devices(void);
static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
static struct workqueue_struct *md_wq;
static struct workqueue_struct *md_misc_wq;
#define MD_BUG(x...) { printk("md: bug in file %s, line %d\n", __FILE__, __LINE__); md_print_devices(); }
/*
* Default number of read corrections we'll attempt on an rdev
* before ejecting it from the array. We divide the read error
* count by 2 for every hour elapsed between read errors.
*/
#define MD_DEFAULT_MAX_CORRECTED_READ_ERRORS 20
/*
* Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit'
* is 1000 KB/sec, so the extra system load does not show up that much.
* Increase it if you want to have more _guaranteed_ speed. Note that
* the RAID driver will use the maximum available bandwidth if the IO
* subsystem is idle. There is also an 'absolute maximum' reconstruction
* speed limit - in case reconstruction slows down your system despite
* idle IO detection.
*
* you can change it via /proc/sys/dev/raid/speed_limit_min and _max.
* or /sys/block/mdX/md/sync_speed_{min,max}
*/
static int sysctl_speed_limit_min = 1000;
static int sysctl_speed_limit_max = 200000;
static inline int speed_min(struct mddev *mddev)
{
return mddev->sync_speed_min ?
mddev->sync_speed_min : sysctl_speed_limit_min;
}
static inline int speed_max(struct mddev *mddev)
{
return mddev->sync_speed_max ?
mddev->sync_speed_max : sysctl_speed_limit_max;
}
static struct ctl_table_header *raid_table_header;
static ctl_table raid_table[] = {
{
.procname = "speed_limit_min",
.data = &sysctl_speed_limit_min,
.maxlen = sizeof(int),
.mode = S_IRUGO|S_IWUSR,
.proc_handler = proc_dointvec,
},
{
.procname = "speed_limit_max",
.data = &sysctl_speed_limit_max,
.maxlen = sizeof(int),
.mode = S_IRUGO|S_IWUSR,
.proc_handler = proc_dointvec,
},
{ }
};
static ctl_table raid_dir_table[] = {
{
.procname = "raid",
.maxlen = 0,
.mode = S_IRUGO|S_IXUGO,
.child = raid_table,
},
{ }
};
static ctl_table raid_root_table[] = {
{
.procname = "dev",
.maxlen = 0,
.mode = 0555,
.child = raid_dir_table,
},
{ }
};
static const struct block_device_operations md_fops;
static int start_readonly;
/* bio_clone_mddev
* like bio_clone, but with a local bio set
*/
static void mddev_bio_destructor(struct bio *bio)
{
struct mddev *mddev, **mddevp;
mddevp = (void*)bio;
mddev = mddevp[-1];
bio_free(bio, mddev->bio_set);
}
struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
struct mddev *mddev)
{
struct bio *b;
struct mddev **mddevp;
if (!mddev || !mddev->bio_set)
return bio_alloc(gfp_mask, nr_iovecs);
b = bio_alloc_bioset(gfp_mask, nr_iovecs,
mddev->bio_set);
if (!b)
return NULL;
mddevp = (void*)b;
mddevp[-1] = mddev;
b->bi_destructor = mddev_bio_destructor;
return b;
}
EXPORT_SYMBOL_GPL(bio_alloc_mddev);
struct bio *bio_clone_mddev(struct bio *bio, gfp_t gfp_mask,
struct mddev *mddev)
{
struct bio *b;
struct mddev **mddevp;
if (!mddev || !mddev->bio_set)
return bio_clone(bio, gfp_mask);
b = bio_alloc_bioset(gfp_mask, bio->bi_max_vecs,
mddev->bio_set);
if (!b)
return NULL;
mddevp = (void*)b;
mddevp[-1] = mddev;
b->bi_destructor = mddev_bio_destructor;
__bio_clone(b, bio);
if (bio_integrity(bio)) {
int ret;
ret = bio_integrity_clone(b, bio, gfp_mask, mddev->bio_set);
if (ret < 0) {
bio_put(b);
return NULL;
}
}
return b;
}
EXPORT_SYMBOL_GPL(bio_clone_mddev);
void md_trim_bio(struct bio *bio, int offset, int size)
{
/* 'bio' is a cloned bio which we need to trim to match
* the given offset and size.
* This requires adjusting bi_sector, bi_size, and bi_io_vec
*/
int i;
struct bio_vec *bvec;
int sofar = 0;
size <<= 9;
if (offset == 0 && size == bio->bi_size)
return;
bio->bi_sector += offset;
bio->bi_size = size;
offset <<= 9;
clear_bit(BIO_SEG_VALID, &bio->bi_flags);
while (bio->bi_idx < bio->bi_vcnt &&
bio->bi_io_vec[bio->bi_idx].bv_len <= offset) {
/* remove this whole bio_vec */
offset -= bio->bi_io_vec[bio->bi_idx].bv_len;
bio->bi_idx++;
}
if (bio->bi_idx < bio->bi_vcnt) {
bio->bi_io_vec[bio->bi_idx].bv_offset += offset;
bio->bi_io_vec[bio->bi_idx].bv_len -= offset;
}
/* avoid any complications with bi_idx being non-zero*/
if (bio->bi_idx) {
memmove(bio->bi_io_vec, bio->bi_io_vec+bio->bi_idx,
(bio->bi_vcnt - bio->bi_idx) * sizeof(struct bio_vec));
bio->bi_vcnt -= bio->bi_idx;
bio->bi_idx = 0;
}
/* Make sure vcnt and last bv are not too big */
bio_for_each_segment(bvec, bio, i) {
if (sofar + bvec->bv_len > size)
bvec->bv_len = size - sofar;
if (bvec->bv_len == 0) {
bio->bi_vcnt = i;
break;
}
sofar += bvec->bv_len;
}
}
EXPORT_SYMBOL_GPL(md_trim_bio);
/*
* We have a system wide 'event count' that is incremented
* on any 'interesting' event, and readers of /proc/mdstat
* can use 'poll' or 'select' to find out when the event
* count increases.
*
* Events are:
* start array, stop array, error, add device, remove device,
* start build, activate spare
*/
static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
static atomic_t md_event_count;
void md_new_event(struct mddev *mddev)
{
atomic_inc(&md_event_count);
wake_up(&md_event_waiters);
}
EXPORT_SYMBOL_GPL(md_new_event);
/* Alternate version that can be called from interrupts
* when calling sysfs_notify isn't needed.
*/
static void md_new_event_inintr(struct mddev *mddev)
{
atomic_inc(&md_event_count);
wake_up(&md_event_waiters);
}
/*
* Enables to iterate over all existing md arrays
* all_mddevs_lock protects this list.
*/
static LIST_HEAD(all_mddevs);
static DEFINE_SPINLOCK(all_mddevs_lock);
/*
* iterates through all used mddevs in the system.
* We take care to grab the all_mddevs_lock whenever navigating
* the list, and to always hold a refcount when unlocked.
* Any code which breaks out of this loop while own
* a reference to the current mddev and must mddev_put it.
*/
#define for_each_mddev(_mddev,_tmp) \
\
for (({ spin_lock(&all_mddevs_lock); \
_tmp = all_mddevs.next; \
_mddev = NULL;}); \
({ if (_tmp != &all_mddevs) \
mddev_get(list_entry(_tmp, struct mddev, all_mddevs));\
spin_unlock(&all_mddevs_lock); \
if (_mddev) mddev_put(_mddev); \
_mddev = list_entry(_tmp, struct mddev, all_mddevs); \
_tmp != &all_mddevs;}); \
({ spin_lock(&all_mddevs_lock); \
_tmp = _tmp->next;}) \
)
/* Rather than calling directly into the personality make_request function,
* IO requests come here first so that we can check if the device is
* being suspended pending a reconfiguration.
* We hold a refcount over the call to ->make_request. By the time that
* call has finished, the bio has been linked into some internal structure
* and so is visible to ->quiesce(), so we don't need the refcount any more.
*/
static void md_make_request(struct request_queue *q, struct bio *bio)
{
const int rw = bio_data_dir(bio);
struct mddev *mddev = q->queuedata;
int cpu;
unsigned int sectors;
if (mddev == NULL || mddev->pers == NULL
|| !mddev->ready) {
bio_io_error(bio);
return;
}
smp_rmb(); /* Ensure implications of 'active' are visible */
rcu_read_lock();
if (mddev->suspended) {
DEFINE_WAIT(__wait);
for (;;) {
prepare_to_wait(&mddev->sb_wait, &__wait,
TASK_UNINTERRUPTIBLE);
if (!mddev->suspended)
break;
rcu_read_unlock();
schedule();
rcu_read_lock();
}
finish_wait(&mddev->sb_wait, &__wait);
}
atomic_inc(&mddev->active_io);
rcu_read_unlock();
/*
* save the sectors now since our bio can
* go away inside make_request
*/
sectors = bio_sectors(bio);
mddev->pers->make_request(mddev, bio);
cpu = part_stat_lock();
part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], sectors);
part_stat_unlock();
if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended)
wake_up(&mddev->sb_wait);
}
/* mddev_suspend makes sure no new requests are submitted
* to the device, and that any requests that have been submitted
* are completely handled.
* Once ->stop is called and completes, the module will be completely
* unused.
*/
void mddev_suspend(struct mddev *mddev)
{
BUG_ON(mddev->suspended);
mddev->suspended = 1;
synchronize_rcu();
wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0);
mddev->pers->quiesce(mddev, 1);
del_timer_sync(&mddev->safemode_timer);
}
EXPORT_SYMBOL_GPL(mddev_suspend);
void mddev_resume(struct mddev *mddev)
{
mddev->suspended = 0;
wake_up(&mddev->sb_wait);
mddev->pers->quiesce(mddev, 0);
md_wakeup_thread(mddev->thread);
md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
}
EXPORT_SYMBOL_GPL(mddev_resume);
int mddev_congested(struct mddev *mddev, int bits)
{
return mddev->suspended;
}
EXPORT_SYMBOL(mddev_congested);
/*
* Generic flush handling for md
*/
static void md_end_flush(struct bio *bio, int err)
{
struct md_rdev *rdev = bio->bi_private;
struct mddev *mddev = rdev->mddev;
rdev_dec_pending(rdev, mddev);
if (atomic_dec_and_test(&mddev->flush_pending)) {
/* The pre-request flush has finished */
queue_work(md_wq, &mddev->flush_work);
}
bio_put(bio);
}
static void md_submit_flush_data(struct work_struct *ws);
static void submit_flushes(struct work_struct *ws)
{
struct mddev *mddev = container_of(ws, struct mddev, flush_work);
struct md_rdev *rdev;
INIT_WORK(&mddev->flush_work, md_submit_flush_data);
atomic_set(&mddev->flush_pending, 1);
rcu_read_lock();
rdev_for_each_rcu(rdev, mddev)
if (rdev->raid_disk >= 0 &&
!test_bit(Faulty, &rdev->flags)) {
/* Take two references, one is dropped
* when request finishes, one after
* we reclaim rcu_read_lock
*/
struct bio *bi;
atomic_inc(&rdev->nr_pending);
atomic_inc(&rdev->nr_pending);
rcu_read_unlock();
bi = bio_alloc_mddev(GFP_KERNEL, 0, mddev);
bi->bi_end_io = md_end_flush;
bi->bi_private = rdev;
bi->bi_bdev = rdev->bdev;
atomic_inc(&mddev->flush_pending);
submit_bio(WRITE_FLUSH, bi);
rcu_read_lock();
rdev_dec_pending(rdev, mddev);
}
rcu_read_unlock();
if (atomic_dec_and_test(&mddev->flush_pending))
queue_work(md_wq, &mddev->flush_work);
}
static void md_submit_flush_data(struct work_struct *ws)
{
struct mddev *mddev = container_of(ws, struct mddev, flush_work);
struct bio *bio = mddev->flush_bio;
if (bio->bi_size == 0)
/* an empty barrier - all done */
bio_endio(bio, 0);
else {
bio->bi_rw &= ~REQ_FLUSH;
mddev->pers->make_request(mddev, bio);
}
mddev->flush_bio = NULL;
wake_up(&mddev->sb_wait);
}
void md_flush_request(struct mddev *mddev, struct bio *bio)
{
spin_lock_irq(&mddev->write_lock);
wait_event_lock_irq(mddev->sb_wait,
!mddev->flush_bio,
mddev->write_lock, /*nothing*/);
mddev->flush_bio = bio;
spin_unlock_irq(&mddev->write_lock);
INIT_WORK(&mddev->flush_work, submit_flushes);
queue_work(md_wq, &mddev->flush_work);
}
EXPORT_SYMBOL(md_flush_request);
/* Support for plugging.
* This mirrors the plugging support in request_queue, but does not
* require having a whole queue or request structures.
* We allocate an md_plug_cb for each md device and each thread it gets
* plugged on. This links tot the private plug_handle structure in the
* personality data where we keep a count of the number of outstanding
* plugs so other code can see if a plug is active.
*/
struct md_plug_cb {
struct blk_plug_cb cb;
struct mddev *mddev;
};
static void plugger_unplug(struct blk_plug_cb *cb)
{
struct md_plug_cb *mdcb = container_of(cb, struct md_plug_cb, cb);
if (atomic_dec_and_test(&mdcb->mddev->plug_cnt))
md_wakeup_thread(mdcb->mddev->thread);
kfree(mdcb);
}
/* Check that an unplug wakeup will come shortly.
* If not, wakeup the md thread immediately
*/
int mddev_check_plugged(struct mddev *mddev)
{
struct blk_plug *plug = current->plug;
struct md_plug_cb *mdcb;
if (!plug)
return 0;
list_for_each_entry(mdcb, &plug->cb_list, cb.list) {
if (mdcb->cb.callback == plugger_unplug &&
mdcb->mddev == mddev) {
/* Already on the list, move to top */
if (mdcb != list_first_entry(&plug->cb_list,
struct md_plug_cb,
cb.list))
list_move(&mdcb->cb.list, &plug->cb_list);
return 1;
}
}
/* Not currently on the callback list */
mdcb = kmalloc(sizeof(*mdcb), GFP_ATOMIC);
if (!mdcb)
return 0;
mdcb->mddev = mddev;
mdcb->cb.callback = plugger_unplug;
atomic_inc(&mddev->plug_cnt);
list_add(&mdcb->cb.list, &plug->cb_list);
return 1;
}
EXPORT_SYMBOL_GPL(mddev_check_plugged);
static inline struct mddev *mddev_get(struct mddev *mddev)
{
atomic_inc(&mddev->active);
return mddev;
}
static void mddev_delayed_delete(struct work_struct *ws);
static void mddev_put(struct mddev *mddev)
{
struct bio_set *bs = NULL;
if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
return;
if (!mddev->raid_disks && list_empty(&mddev->disks) &&
mddev->ctime == 0 && !mddev->hold_active) {
/* Array is not configured at all, and not held active,
* so destroy it */
list_del_init(&mddev->all_mddevs);
bs = mddev->bio_set;
mddev->bio_set = NULL;
if (mddev->gendisk) {
/* We did a probe so need to clean up. Call
* queue_work inside the spinlock so that
* flush_workqueue() after mddev_find will
* succeed in waiting for the work to be done.
*/
INIT_WORK(&mddev->del_work, mddev_delayed_delete);
queue_work(md_misc_wq, &mddev->del_work);
} else
kfree(mddev);
}
spin_unlock(&all_mddevs_lock);
if (bs)
bioset_free(bs);
}
void mddev_init(struct mddev *mddev)
{
mutex_init(&mddev->open_mutex);
mutex_init(&mddev->reconfig_mutex);
mutex_init(&mddev->bitmap_info.mutex);
INIT_LIST_HEAD(&mddev->disks);
INIT_LIST_HEAD(&mddev->all_mddevs);
init_timer(&mddev->safemode_timer);
atomic_set(&mddev->active, 1);
atomic_set(&mddev->openers, 0);
atomic_set(&mddev->active_io, 0);
atomic_set(&mddev->plug_cnt, 0);
spin_lock_init(&mddev->write_lock);
atomic_set(&mddev->flush_pending, 0);
init_waitqueue_head(&mddev->sb_wait);
init_waitqueue_head(&mddev->recovery_wait);
mddev->reshape_position = MaxSector;
mddev->resync_min = 0;
mddev->resync_max = MaxSector;
mddev->level = LEVEL_NONE;
}
EXPORT_SYMBOL_GPL(mddev_init);
static struct mddev * mddev_find(dev_t unit)
{
struct mddev *mddev, *new = NULL;
if (unit && MAJOR(unit) != MD_MAJOR)
unit &= ~((1<<MdpMinorShift)-1);
retry:
spin_lock(&all_mddevs_lock);
if (unit) {
list_for_each_entry(mddev, &all_mddevs, all_mddevs)
if (mddev->unit == unit) {
mddev_get(mddev);
spin_unlock(&all_mddevs_lock);
kfree(new);
return mddev;
}
if (new) {
list_add(&new->all_mddevs, &all_mddevs);
spin_unlock(&all_mddevs_lock);
new->hold_active = UNTIL_IOCTL;
return new;
}
} else if (new) {
/* find an unused unit number */
static int next_minor = 512;
int start = next_minor;
int is_free = 0;
int dev = 0;
while (!is_free) {
dev = MKDEV(MD_MAJOR, next_minor);
next_minor++;
if (next_minor > MINORMASK)
next_minor = 0;
if (next_minor == start) {
/* Oh dear, all in use. */
spin_unlock(&all_mddevs_lock);
kfree(new);
return NULL;
}
is_free = 1;
list_for_each_entry(mddev, &all_mddevs, all_mddevs)
if (mddev->unit == dev) {
is_free = 0;
break;
}
}
new->unit = dev;
new->md_minor = MINOR(dev);
new->hold_active = UNTIL_STOP;
list_add(&new->all_mddevs, &all_mddevs);
spin_unlock(&all_mddevs_lock);
return new;
}
spin_unlock(&all_mddevs_lock);
new = kzalloc(sizeof(*new), GFP_KERNEL);
if (!new)
return NULL;
new->unit = unit;
if (MAJOR(unit) == MD_MAJOR)
new->md_minor = MINOR(unit);
else
new->md_minor = MINOR(unit) >> MdpMinorShift;
mddev_init(new);
goto retry;
}
static inline int mddev_lock(struct mddev * mddev)
{
return mutex_lock_interruptible(&mddev->reconfig_mutex);
}
static inline int mddev_is_locked(struct mddev *mddev)
{
return mutex_is_locked(&mddev->reconfig_mutex);
}
static inline int mddev_trylock(struct mddev * mddev)
{
return mutex_trylock(&mddev->reconfig_mutex);
}
static struct attribute_group md_redundancy_group;
static void mddev_unlock(struct mddev * mddev)
{
if (mddev->to_remove) {
/* These cannot be removed under reconfig_mutex as
* an access to the files will try to take reconfig_mutex
* while holding the file unremovable, which leads to
* a deadlock.
* So hold set sysfs_active while the remove in happeing,
* and anything else which might set ->to_remove or my
* otherwise change the sysfs namespace will fail with
* -EBUSY if sysfs_active is still set.
* We set sysfs_active under reconfig_mutex and elsewhere
* test it under the same mutex to ensure its correct value
* is seen.
*/
struct attribute_group *to_remove = mddev->to_remove;
mddev->to_remove = NULL;
mddev->sysfs_active = 1;
mutex_unlock(&mddev->reconfig_mutex);
if (mddev->kobj.sd) {
if (to_remove != &md_redundancy_group)
sysfs_remove_group(&mddev->kobj, to_remove);
if (mddev->pers == NULL ||
mddev->pers->sync_request == NULL) {
sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
if (mddev->sysfs_action)
sysfs_put(mddev->sysfs_action);
mddev->sysfs_action = NULL;
}
}
mddev->sysfs_active = 0;
} else
mutex_unlock(&mddev->reconfig_mutex);
/* As we've dropped the mutex we need a spinlock to
* make sure the thread doesn't disappear
*/
spin_lock(&pers_lock);
md_wakeup_thread(mddev->thread);
spin_unlock(&pers_lock);
}
static struct md_rdev * find_rdev_nr(struct mddev *mddev, int nr)
{
struct md_rdev *rdev;
rdev_for_each(rdev, mddev)
if (rdev->desc_nr == nr)
return rdev;
return NULL;
}
static struct md_rdev * find_rdev(struct mddev * mddev, dev_t dev)
{
struct md_rdev *rdev;
rdev_for_each(rdev, mddev)
if (rdev->bdev->bd_dev == dev)
return rdev;
return NULL;
}
static struct md_personality *find_pers(int level, char *clevel)
{
struct md_personality *pers;
list_for_each_entry(pers, &pers_list, list) {
if (level != LEVEL_NONE && pers->level == level)
return pers;
if (strcmp(pers->name, clevel)==0)
return pers;
}
return NULL;
}
/* return the offset of the super block in 512byte sectors */
static inline sector_t calc_dev_sboffset(struct md_rdev *rdev)
{
sector_t num_sectors = i_size_read(rdev->bdev->bd_inode) / 512;
return MD_NEW_SIZE_SECTORS(num_sectors);
}
static int alloc_disk_sb(struct md_rdev * rdev)
{
if (rdev->sb_page)
MD_BUG();
rdev->sb_page = alloc_page(GFP_KERNEL);
if (!rdev->sb_page) {
printk(KERN_ALERT "md: out of memory.\n");
return -ENOMEM;
}
return 0;
}
static void free_disk_sb(struct md_rdev * rdev)
{
if (rdev->sb_page) {
put_page(rdev->sb_page);
rdev->sb_loaded = 0;
rdev->sb_page = NULL;
rdev->sb_start = 0;
rdev->sectors = 0;
}
if (rdev->bb_page) {
put_page(rdev->bb_page);
rdev->bb_page = NULL;
}
}
static void super_written(struct bio *bio, int error)
{
struct md_rdev *rdev = bio->bi_private;
struct mddev *mddev = rdev->mddev;
if (error || !test_bit(BIO_UPTODATE, &bio->bi_flags)) {
printk("md: super_written gets error=%d, uptodate=%d\n",
error, test_bit(BIO_UPTODATE, &bio->bi_flags));
WARN_ON(test_bit(BIO_UPTODATE, &bio->bi_flags));
md_error(mddev, rdev);
}
if (atomic_dec_and_test(&mddev->pending_writes))
wake_up(&mddev->sb_wait);
bio_put(bio);
}
void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
sector_t sector, int size, struct page *page)
{
/* write first size bytes of page to sector of rdev
* Increment mddev->pending_writes before returning
* and decrement it on completion, waking up sb_wait
* if zero is reached.
* If an error occurred, call md_error
*/
struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, mddev);
bio->bi_bdev = rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev;
bio->bi_sector = sector;
bio_add_page(bio, page, size, 0);
bio->bi_private = rdev;
bio->bi_end_io = super_written;
atomic_inc(&mddev->pending_writes);
submit_bio(WRITE_FLUSH_FUA, bio);
}
void md_super_wait(struct mddev *mddev)
{
/* wait for all superblock writes that were scheduled to complete */
DEFINE_WAIT(wq);
for(;;) {
prepare_to_wait(&mddev->sb_wait, &wq, TASK_UNINTERRUPTIBLE);
if (atomic_read(&mddev->pending_writes)==0)
break;
schedule();
}
finish_wait(&mddev->sb_wait, &wq);
}
static void bi_complete(struct bio *bio, int error)
{
complete((struct completion*)bio->bi_private);
}
int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
struct page *page, int rw, bool metadata_op)
{
struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, rdev->mddev);
struct completion event;
int ret;
rw |= REQ_SYNC;
bio->bi_bdev = (metadata_op && rdev->meta_bdev) ?
rdev->meta_bdev : rdev->bdev;
if (metadata_op)
bio->bi_sector = sector + rdev->sb_start;
else
bio->bi_sector = sector + rdev->data_offset;
bio_add_page(bio, page, size, 0);
init_completion(&event);
bio->bi_private = &event;
bio->bi_end_io = bi_complete;
submit_bio(rw, bio);
wait_for_completion(&event);
ret = test_bit(BIO_UPTODATE, &bio->bi_flags);
bio_put(bio);
return ret;
}
EXPORT_SYMBOL_GPL(sync_page_io);
static int read_disk_sb(struct md_rdev * rdev, int size)
{
char b[BDEVNAME_SIZE];
if (!rdev->sb_page) {
MD_BUG();
return -EINVAL;
}
if (rdev->sb_loaded)
return 0;
if (!sync_page_io(rdev, 0, size, rdev->sb_page, READ, true))
goto fail;
rdev->sb_loaded = 1;
return 0;
fail:
printk(KERN_WARNING "md: disabled device %s, could not read superblock.\n",
bdevname(rdev->bdev,b));
return -EINVAL;
}
static int uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2)
{
return sb1->set_uuid0 == sb2->set_uuid0 &&
sb1->set_uuid1 == sb2->set_uuid1 &&
sb1->set_uuid2 == sb2->set_uuid2 &&
sb1->set_uuid3 == sb2->set_uuid3;
}
static int sb_equal(mdp_super_t *sb1, mdp_super_t *sb2)
{
int ret;
mdp_super_t *tmp1, *tmp2;
tmp1 = kmalloc(sizeof(*tmp1),GFP_KERNEL);
tmp2 = kmalloc(sizeof(*tmp2),GFP_KERNEL);
if (!tmp1 || !tmp2) {
ret = 0;
printk(KERN_INFO "md.c sb_equal(): failed to allocate memory!\n");
goto abort;
}
*tmp1 = *sb1;
*tmp2 = *sb2;
/*
* nr_disks is not constant
*/
tmp1->nr_disks = 0;
tmp2->nr_disks = 0;
ret = (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4) == 0);
abort:
kfree(tmp1);
kfree(tmp2);
return ret;
}
static u32 md_csum_fold(u32 csum)
{
csum = (csum & 0xffff) + (csum >> 16);
return (csum & 0xffff) + (csum >> 16);
}
static unsigned int calc_sb_csum(mdp_super_t * sb)
{
u64 newcsum = 0;
u32 *sb32 = (u32*)sb;
int i;
unsigned int disk_csum, csum;
disk_csum = sb->sb_csum;
sb->sb_csum = 0;
for (i = 0; i < MD_SB_BYTES/4 ; i++)
newcsum += sb32[i];
csum = (newcsum & 0xffffffff) + (newcsum>>32);
#ifdef CONFIG_ALPHA
/* This used to use csum_partial, which was wrong for several
* reasons including that different results are returned on
* different architectures. It isn't critical that we get exactly
* the same return value as before (we always csum_fold before
* testing, and that removes any differences). However as we
* know that csum_partial always returned a 16bit value on
* alphas, do a fold to maximise conformity to previous behaviour.
*/
sb->sb_csum = md_csum_fold(disk_csum);
#else
sb->sb_csum = disk_csum;
#endif
return csum;
}
/*
* Handle superblock details.
* We want to be able to handle multiple superblock formats
* so we have a common interface to them all, and an array of
* different handlers.
* We rely on user-space to write the initial superblock, and support
* reading and updating of superblocks.
* Interface methods are:
* int load_super(struct md_rdev *dev, struct md_rdev *refdev, int minor_version)
* loads and validates a superblock on dev.
* if refdev != NULL, compare superblocks on both devices
* Return:
* 0 - dev has a superblock that is compatible with refdev
* 1 - dev has a superblock that is compatible and newer than refdev
* so dev should be used as the refdev in future
* -EINVAL superblock incompatible or invalid
* -othererror e.g. -EIO
*
* int validate_super(struct mddev *mddev, struct md_rdev *dev)
* Verify that dev is acceptable into mddev.
* The first time, mddev->raid_disks will be 0, and data from
* dev should be merged in. Subsequent calls check that dev
* is new enough. Return 0 or -EINVAL
*
* void sync_super(struct mddev *mddev, struct md_rdev *dev)
* Update the superblock for rdev with data in mddev
* This does not write to disc.
*
*/
struct super_type {
char *name;
struct module *owner;
int (*load_super)(struct md_rdev *rdev, struct md_rdev *refdev,
int minor_version);
int (*validate_super)(struct mddev *mddev, struct md_rdev *rdev);
void (*sync_super)(struct mddev *mddev, struct md_rdev *rdev);
unsigned long long (*rdev_size_change)(struct md_rdev *rdev,
sector_t num_sectors);
};
/*
* Check that the given mddev has no bitmap.
*
* This function is called from the run method of all personalities that do not
* support bitmaps. It prints an error message and returns non-zero if mddev
* has a bitmap. Otherwise, it returns 0.
*
*/
int md_check_no_bitmap(struct mddev *mddev)
{
if (!mddev->bitmap_info.file && !mddev->bitmap_info.offset)
return 0;
printk(KERN_ERR "%s: bitmaps are not supported for %s\n",
mdname(mddev), mddev->pers->name);
return 1;
}
EXPORT_SYMBOL(md_check_no_bitmap);
/*
* load_super for 0.90.0
*/
static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version)
{
char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
mdp_super_t *sb;
int ret;
/*
* Calculate the position of the superblock (512byte sectors),
* it's at the end of the disk.
*
* It also happens to be a multiple of 4Kb.
*/
rdev->sb_start = calc_dev_sboffset(rdev);
ret = read_disk_sb(rdev, MD_SB_BYTES);
if (ret) return ret;
ret = -EINVAL;
bdevname(rdev->bdev, b);
sb = page_address(rdev->sb_page);
if (sb->md_magic != MD_SB_MAGIC) {
printk(KERN_ERR "md: invalid raid superblock magic on %s\n",
b);
goto abort;
}
if (sb->major_version != 0 ||
sb->minor_version < 90 ||
sb->minor_version > 91) {
printk(KERN_WARNING "Bad version number %d.%d on %s\n",
sb->major_version, sb->minor_version,
b);
goto abort;
}
if (sb->raid_disks <= 0)
goto abort;
if (md_csum_fold(calc_sb_csum(sb)) != md_csum_fold(sb->sb_csum)) {
printk(KERN_WARNING "md: invalid superblock checksum on %s\n",
b);
goto abort;
}
rdev->preferred_minor = sb->md_minor;
rdev->data_offset = 0;
rdev->sb_size = MD_SB_BYTES;
rdev->badblocks.shift = -1;
if (sb->level == LEVEL_MULTIPATH)
rdev->desc_nr = -1;
else
rdev->desc_nr = sb->this_disk.number;
if (!refdev) {
ret = 1;
} else {
__u64 ev1, ev2;
mdp_super_t *refsb = page_address(refdev->sb_page);
if (!uuid_equal(refsb, sb)) {
printk(KERN_WARNING "md: %s has different UUID to %s\n",
b, bdevname(refdev->bdev,b2));
goto abort;
}
if (!sb_equal(refsb, sb)) {
printk(KERN_WARNING "md: %s has same UUID"
" but different superblock to %s\n",
b, bdevname(refdev->bdev, b2));
goto abort;
}
ev1 = md_event(sb);
ev2 = md_event(refsb);
if (ev1 > ev2)
ret = 1;
else
ret = 0;
}
rdev->sectors = rdev->sb_start;
/* Limit to 4TB as metadata cannot record more than that */
if (rdev->sectors >= (2ULL << 32))
rdev->sectors = (2ULL << 32) - 2;
if (rdev->sectors < ((sector_t)sb->size) * 2 && sb->level >= 1)
/* "this cannot possibly happen" ... */
ret = -EINVAL;
abort:
return ret;
}
/*
* validate_super for 0.90.0
*/
static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev)
{
mdp_disk_t *desc;
mdp_super_t *sb = page_address(rdev->sb_page);
__u64 ev1 = md_event(sb);
rdev->raid_disk = -1;
clear_bit(Faulty, &rdev->flags);
clear_bit(In_sync, &rdev->flags);
clear_bit(WriteMostly, &rdev->flags);
if (mddev->raid_disks == 0) {
mddev->major_version = 0;
mddev->minor_version = sb->minor_version;
mddev->patch_version = sb->patch_version;
mddev->external = 0;
mddev->chunk_sectors = sb->chunk_size >> 9;
mddev->ctime = sb->ctime;
mddev->utime = sb->utime;
mddev->level = sb->level;
mddev->clevel[0] = 0;
mddev->layout = sb->layout;
mddev->raid_disks = sb->raid_disks;
mddev->dev_sectors = ((sector_t)sb->size) * 2;
mddev->events = ev1;
mddev->bitmap_info.offset = 0;
mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
if (mddev->minor_version >= 91) {
mddev->reshape_position = sb->reshape_position;
mddev->delta_disks = sb->delta_disks;
mddev->new_level = sb->new_level;
mddev->new_layout = sb->new_layout;
mddev->new_chunk_sectors = sb->new_chunk >> 9;
} else {
mddev->reshape_position = MaxSector;
mddev->delta_disks = 0;
mddev->new_level = mddev->level;
mddev->new_layout = mddev->layout;
mddev->new_chunk_sectors = mddev->chunk_sectors;
}
if (sb->state & (1<<MD_SB_CLEAN))
mddev->recovery_cp = MaxSector;
else {
if (sb->events_hi == sb->cp_events_hi &&
sb->events_lo == sb->cp_events_lo) {
mddev->recovery_cp = sb->recovery_cp;
} else
mddev->recovery_cp = 0;
}
memcpy(mddev->uuid+0, &sb->set_uuid0, 4);
memcpy(mddev->uuid+4, &sb->set_uuid1, 4);
memcpy(mddev->uuid+8, &sb->set_uuid2, 4);
memcpy(mddev->uuid+12,&sb->set_uuid3, 4);
mddev->max_disks = MD_SB_DISKS;
if (sb->state & (1<<MD_SB_BITMAP_PRESENT) &&
mddev->bitmap_info.file == NULL)
mddev->bitmap_info.offset =
mddev->bitmap_info.default_offset;
} else if (mddev->pers == NULL) {
/* Insist on good event counter while assembling, except
* for spares (which don't need an event count) */
++ev1;
if (sb->disks[rdev->desc_nr].state & (
(1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE)))
if (ev1 < mddev->events)
return -EINVAL;
} else if (mddev->bitmap) {
/* if adding to array with a bitmap, then we can accept an
* older device ... but not too old.
*/
if (ev1 < mddev->bitmap->events_cleared)
return 0;
} else {
if (ev1 < mddev->events)
/* just a hot-add of a new device, leave raid_disk at -1 */
return 0;
}
if (mddev->level != LEVEL_MULTIPATH) {
desc = sb->disks + rdev->desc_nr;
if (desc->state & (1<<MD_DISK_FAULTY))
set_bit(Faulty, &rdev->flags);
else if (desc->state & (1<<MD_DISK_SYNC) /* &&
desc->raid_disk < mddev->raid_disks */) {
set_bit(In_sync, &rdev->flags);
rdev->raid_disk = desc->raid_disk;
} else if (desc->state & (1<<MD_DISK_ACTIVE)) {
/* active but not in sync implies recovery up to
* reshape position. We don't know exactly where
* that is, so set to zero for now */
if (mddev->minor_version >= 91) {
rdev->recovery_offset = 0;
rdev->raid_disk = desc->raid_disk;
}
}
if (desc->state & (1<<MD_DISK_WRITEMOSTLY))
set_bit(WriteMostly, &rdev->flags);
} else /* MULTIPATH are always insync */
set_bit(In_sync, &rdev->flags);
return 0;
}
/*
* sync_super for 0.90.0
*/
static void super_90_sync(struct mddev *mddev, struct md_rdev *rdev)
{
mdp_super_t *sb;
struct md_rdev *rdev2;
int next_spare = mddev->raid_disks;
/* make rdev->sb match mddev data..
*
* 1/ zero out disks
* 2/ Add info for each disk, keeping track of highest desc_nr (next_spare);
* 3/ any empty disks < next_spare become removed
*
* disks[0] gets initialised to REMOVED because
* we cannot be sure from other fields if it has
* been initialised or not.
*/
int i;
int active=0, working=0,failed=0,spare=0,nr_disks=0;
rdev->sb_size = MD_SB_BYTES;
sb = page_address(rdev->sb_page);
memset(sb, 0, sizeof(*sb));
sb->md_magic = MD_SB_MAGIC;
sb->major_version = mddev->major_version;
sb->patch_version = mddev->patch_version;
sb->gvalid_words = 0; /* ignored */
memcpy(&sb->set_uuid0, mddev->uuid+0, 4);
memcpy(&sb->set_uuid1, mddev->uuid+4, 4);
memcpy(&sb->set_uuid2, mddev->uuid+8, 4);
memcpy(&sb->set_uuid3, mddev->uuid+12,4);
sb->ctime = mddev->ctime;
sb->level = mddev->level;
sb->size = mddev->dev_sectors / 2;
sb->raid_disks = mddev->raid_disks;
sb->md_minor = mddev->md_minor;
sb->not_persistent = 0;
sb->utime = mddev->utime;
sb->state = 0;
sb->events_hi = (mddev->events>>32);
sb->events_lo = (u32)mddev->events;
if (mddev->reshape_position == MaxSector)
sb->minor_version = 90;
else {
sb->minor_version = 91;
sb->reshape_position = mddev->reshape_position;
sb->new_level = mddev->new_level;
sb->delta_disks = mddev->delta_disks;
sb->new_layout = mddev->new_layout;
sb->new_chunk = mddev->new_chunk_sectors << 9;
}
mddev->minor_version = sb->minor_version;
if (mddev->in_sync)
{
sb->recovery_cp = mddev->recovery_cp;
sb->cp_events_hi = (mddev->events>>32);
sb->cp_events_lo = (u32)mddev->events;
if (mddev->recovery_cp == MaxSector)
sb->state = (1<< MD_SB_CLEAN);
} else
sb->recovery_cp = 0;
sb->layout = mddev->layout;
sb->chunk_size = mddev->chunk_sectors << 9;
if (mddev->bitmap && mddev->bitmap_info.file == NULL)
sb->state |= (1<<MD_SB_BITMAP_PRESENT);
sb->disks[0].state = (1<<MD_DISK_REMOVED);
rdev_for_each(rdev2, mddev) {
mdp_disk_t *d;
int desc_nr;
int is_active = test_bit(In_sync, &rdev2->flags);
if (rdev2->raid_disk >= 0 &&
sb->minor_version >= 91)
/* we have nowhere to store the recovery_offset,
* but if it is not below the reshape_position,
* we can piggy-back on that.
*/
is_active = 1;
if (rdev2->raid_disk < 0 ||
test_bit(Faulty, &rdev2->flags))
is_active = 0;
if (is_active)
desc_nr = rdev2->raid_disk;
else
desc_nr = next_spare++;
rdev2->desc_nr = desc_nr;
d = &sb->disks[rdev2->desc_nr];
nr_disks++;
d->number = rdev2->desc_nr;
d->major = MAJOR(rdev2->bdev->bd_dev);
d->minor = MINOR(rdev2->bdev->bd_dev);
if (is_active)
d->raid_disk = rdev2->raid_disk;
else
d->raid_disk = rdev2->desc_nr; /* compatibility */
if (test_bit(Faulty, &rdev2->flags))
d->state = (1<<MD_DISK_FAULTY);
else if (is_active) {
d->state = (1<<MD_DISK_ACTIVE);
if (test_bit(In_sync, &rdev2->flags))
d->state |= (1<<MD_DISK_SYNC);
active++;
working++;
} else {
d->state = 0;
spare++;
working++;
}
if (test_bit(WriteMostly, &rdev2->flags))
d->state |= (1<<MD_DISK_WRITEMOSTLY);
}
/* now set the "removed" and "faulty" bits on any missing devices */
for (i=0 ; i < mddev->raid_disks ; i++) {
mdp_disk_t *d = &sb->disks[i];
if (d->state == 0 && d->number == 0) {
d->number = i;
d->raid_disk = i;
d->state = (1<<MD_DISK_REMOVED);
d->state |= (1<<MD_DISK_FAULTY);
failed++;
}
}
sb->nr_disks = nr_disks;
sb->active_disks = active;
sb->working_disks = working;
sb->failed_disks = failed;
sb->spare_disks = spare;
sb->this_disk = sb->disks[rdev->desc_nr];
sb->sb_csum = calc_sb_csum(sb);
}
/*
* rdev_size_change for 0.90.0
*/
static unsigned long long
super_90_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
{
if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
return 0; /* component must fit device */
if (rdev->mddev->bitmap_info.offset)
return 0; /* can't move bitmap */
rdev->sb_start = calc_dev_sboffset(rdev);
if (!num_sectors || num_sectors > rdev->sb_start)
num_sectors = rdev->sb_start;
/* Limit to 4TB as metadata cannot record more than that.
* 4TB == 2^32 KB, or 2*2^32 sectors.
*/
if (num_sectors >= (2ULL << 32))
num_sectors = (2ULL << 32) - 2;
md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
rdev->sb_page);
md_super_wait(rdev->mddev);
return num_sectors;
}
/*
* version 1 superblock
*/
static __le32 calc_sb_1_csum(struct mdp_superblock_1 * sb)
{
__le32 disk_csum;
u32 csum;
unsigned long long newcsum;
int size = 256 + le32_to_cpu(sb->max_dev)*2;
__le32 *isuper = (__le32*)sb;
int i;
disk_csum = sb->sb_csum;
sb->sb_csum = 0;
newcsum = 0;
for (i=0; size>=4; size -= 4 )
newcsum += le32_to_cpu(*isuper++);
if (size == 2)
newcsum += le16_to_cpu(*(__le16*) isuper);
csum = (newcsum & 0xffffffff) + (newcsum >> 32);
sb->sb_csum = disk_csum;
return cpu_to_le32(csum);
}
static int md_set_badblocks(struct badblocks *bb, sector_t s, int sectors,
int acknowledged);
static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version)
{
struct mdp_superblock_1 *sb;
int ret;
sector_t sb_start;
char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
int bmask;
/*
* Calculate the position of the superblock in 512byte sectors.
* It is always aligned to a 4K boundary and
* depeding on minor_version, it can be:
* 0: At least 8K, but less than 12K, from end of device
* 1: At start of device
* 2: 4K from start of device.
*/
switch(minor_version) {
case 0:
sb_start = i_size_read(rdev->bdev->bd_inode) >> 9;
sb_start -= 8*2;
sb_start &= ~(sector_t)(4*2-1);
break;
case 1:
sb_start = 0;
break;
case 2:
sb_start = 8;
break;
default:
return -EINVAL;
}
rdev->sb_start = sb_start;
/* superblock is rarely larger than 1K, but it can be larger,
* and it is safe to read 4k, so we do that
*/
ret = read_disk_sb(rdev, 4096);
if (ret) return ret;
sb = page_address(rdev->sb_page);
if (sb->magic != cpu_to_le32(MD_SB_MAGIC) ||
sb->major_version != cpu_to_le32(1) ||
le32_to_cpu(sb->max_dev) > (4096-256)/2 ||
le64_to_cpu(sb->super_offset) != rdev->sb_start ||
(le32_to_cpu(sb->feature_map) & ~MD_FEATURE_ALL) != 0)
return -EINVAL;
if (calc_sb_1_csum(sb) != sb->sb_csum) {
printk("md: invalid superblock checksum on %s\n",
bdevname(rdev->bdev,b));
return -EINVAL;
}
if (le64_to_cpu(sb->data_size) < 10) {
printk("md: data_size too small on %s\n",
bdevname(rdev->bdev,b));
return -EINVAL;
}
rdev->preferred_minor = 0xffff;
rdev->data_offset = le64_to_cpu(sb->data_offset);
atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
if (rdev->sb_size & bmask)
rdev->sb_size = (rdev->sb_size | bmask) + 1;
if (minor_version
&& rdev->data_offset < sb_start + (rdev->sb_size/512))
return -EINVAL;
if (sb->level == cpu_to_le32(LEVEL_MULTIPATH))
rdev->desc_nr = -1;
else
rdev->desc_nr = le32_to_cpu(sb->dev_number);
if (!rdev->bb_page) {
rdev->bb_page = alloc_page(GFP_KERNEL);
if (!rdev->bb_page)
return -ENOMEM;
}
if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BAD_BLOCKS) &&
rdev->badblocks.count == 0) {
/* need to load the bad block list.
* Currently we limit it to one page.
*/
s32 offset;
sector_t bb_sector;
u64 *bbp;
int i;
int sectors = le16_to_cpu(sb->bblog_size);
if (sectors > (PAGE_SIZE / 512))
return -EINVAL;
offset = le32_to_cpu(sb->bblog_offset);
if (offset == 0)
return -EINVAL;
bb_sector = (long long)offset;
if (!sync_page_io(rdev, bb_sector, sectors << 9,
rdev->bb_page, READ, true))
return -EIO;
bbp = (u64 *)page_address(rdev->bb_page);
rdev->badblocks.shift = sb->bblog_shift;
for (i = 0 ; i < (sectors << (9-3)) ; i++, bbp++) {
u64 bb = le64_to_cpu(*bbp);
int count = bb & (0x3ff);
u64 sector = bb >> 10;
sector <<= sb->bblog_shift;
count <<= sb->bblog_shift;
if (bb + 1 == 0)
break;
if (md_set_badblocks(&rdev->badblocks,
sector, count, 1) == 0)
return -EINVAL;
}
} else if (sb->bblog_offset == 0)
rdev->badblocks.shift = -1;
if (!refdev) {
ret = 1;
} else {
__u64 ev1, ev2;
struct mdp_superblock_1 *refsb = page_address(refdev->sb_page);
if (memcmp(sb->set_uuid, refsb->set_uuid, 16) != 0 ||
sb->level != refsb->level ||
sb->layout != refsb->layout ||
sb->chunksize != refsb->chunksize) {
printk(KERN_WARNING "md: %s has strangely different"
" superblock to %s\n",
bdevname(rdev->bdev,b),
bdevname(refdev->bdev,b2));
return -EINVAL;
}
ev1 = le64_to_cpu(sb->events);
ev2 = le64_to_cpu(refsb->events);
if (ev1 > ev2)
ret = 1;
else
ret = 0;
}
if (minor_version)
rdev->sectors = (i_size_read(rdev->bdev->bd_inode) >> 9) -
le64_to_cpu(sb->data_offset);
else
rdev->sectors = rdev->sb_start;
if (rdev->sectors < le64_to_cpu(sb->data_size))
return -EINVAL;
rdev->sectors = le64_to_cpu(sb->data_size);
if (le64_to_cpu(sb->size) > rdev->sectors)
return -EINVAL;
return ret;
}
static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
{
struct mdp_superblock_1 *sb = page_address(rdev->sb_page);
__u64 ev1 = le64_to_cpu(sb->events);
rdev->raid_disk = -1;
clear_bit(Faulty, &rdev->flags);
clear_bit(In_sync, &rdev->flags);
clear_bit(WriteMostly, &rdev->flags);
if (mddev->raid_disks == 0) {
mddev->major_version = 1;
mddev->patch_version = 0;
mddev->external = 0;
mddev->chunk_sectors = le32_to_cpu(sb->chunksize);
mddev->ctime = le64_to_cpu(sb->ctime) & ((1ULL << 32)-1);
mddev->utime = le64_to_cpu(sb->utime) & ((1ULL << 32)-1);
mddev->level = le32_to_cpu(sb->level);
mddev->clevel[0] = 0;
mddev->layout = le32_to_cpu(sb->layout);
mddev->raid_disks = le32_to_cpu(sb->raid_disks);
mddev->dev_sectors = le64_to_cpu(sb->size);
mddev->events = ev1;
mddev->bitmap_info.offset = 0;
mddev->bitmap_info.default_offset = 1024 >> 9;
mddev->recovery_cp = le64_to_cpu(sb->resync_offset);
memcpy(mddev->uuid, sb->set_uuid, 16);
mddev->max_disks = (4096-256)/2;
if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) &&
mddev->bitmap_info.file == NULL )
mddev->bitmap_info.offset =
(__s32)le32_to_cpu(sb->bitmap_offset);
if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
mddev->reshape_position = le64_to_cpu(sb->reshape_position);
mddev->delta_disks = le32_to_cpu(sb->delta_disks);
mddev->new_level = le32_to_cpu(sb->new_level);
mddev->new_layout = le32_to_cpu(sb->new_layout);
mddev->new_chunk_sectors = le32_to_cpu(sb->new_chunk);
} else {
mddev->reshape_position = MaxSector;
mddev->delta_disks = 0;
mddev->new_level = mddev->level;
mddev->new_layout = mddev->layout;
mddev->new_chunk_sectors = mddev->chunk_sectors;
}
} else if (mddev->pers == NULL) {
/* Insist of good event counter while assembling, except for
* spares (which don't need an event count) */
++ev1;
if (rdev->desc_nr >= 0 &&
rdev->desc_nr < le32_to_cpu(sb->max_dev) &&
le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < 0xfffe)
if (ev1 < mddev->events)
return -EINVAL;
} else if (mddev->bitmap) {
/* If adding to array with a bitmap, then we can accept an
* older device, but not too old.
*/
if (ev1 < mddev->bitmap->events_cleared)
return 0;
} else {
if (ev1 < mddev->events)
/* just a hot-add of a new device, leave raid_disk at -1 */
return 0;
}
if (mddev->level != LEVEL_MULTIPATH) {
int role;
if (rdev->desc_nr < 0 ||
rdev->desc_nr >= le32_to_cpu(sb->max_dev)) {
role = 0xffff;
rdev->desc_nr = -1;
} else
role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
switch(role) {
case 0xffff: /* spare */
break;
case 0xfffe: /* faulty */
set_bit(Faulty, &rdev->flags);
break;
default:
if ((le32_to_cpu(sb->feature_map) &
MD_FEATURE_RECOVERY_OFFSET))
rdev->recovery_offset = le64_to_cpu(sb->recovery_offset);
else
set_bit(In_sync, &rdev->flags);
rdev->raid_disk = role;
break;
}
if (sb->devflags & WriteMostly1)
set_bit(WriteMostly, &rdev->flags);
if (le32_to_cpu(sb->feature_map) & MD_FEATURE_REPLACEMENT)
set_bit(Replacement, &rdev->flags);
} else /* MULTIPATH are always insync */
set_bit(In_sync, &rdev->flags);
return 0;
}
static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
{
struct mdp_superblock_1 *sb;
struct md_rdev *rdev2;
int max_dev, i;
/* make rdev->sb match mddev and rdev data. */
sb = page_address(rdev->sb_page);
sb->feature_map = 0;
sb->pad0 = 0;
sb->recovery_offset = cpu_to_le64(0);
memset(sb->pad1, 0, sizeof(sb->pad1));
memset(sb->pad3, 0, sizeof(sb->pad3));
sb->utime = cpu_to_le64((__u64)mddev->utime);
sb->events = cpu_to_le64(mddev->events);
if (mddev->in_sync)
sb->resync_offset = cpu_to_le64(mddev->recovery_cp);
else
sb->resync_offset = cpu_to_le64(0);
sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
sb->raid_disks = cpu_to_le32(mddev->raid_disks);
sb->size = cpu_to_le64(mddev->dev_sectors);
sb->chunksize = cpu_to_le32(mddev->chunk_sectors);
sb->level = cpu_to_le32(mddev->level);
sb->layout = cpu_to_le32(mddev->layout);
if (test_bit(WriteMostly, &rdev->flags))
sb->devflags |= WriteMostly1;
else
sb->devflags &= ~WriteMostly1;
if (mddev->bitmap && mddev->bitmap_info.file == NULL) {
sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_info.offset);
sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET);
}
if (rdev->raid_disk >= 0 &&
!test_bit(In_sync, &rdev->flags)) {
sb->feature_map |=
cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET);
sb->recovery_offset =
cpu_to_le64(rdev->recovery_offset);
}
if (test_bit(Replacement, &rdev->flags))
sb->feature_map |=
cpu_to_le32(MD_FEATURE_REPLACEMENT);
if (mddev->reshape_position != MaxSector) {
sb->feature_map |= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE);
sb->reshape_position = cpu_to_le64(mddev->reshape_position);
sb->new_layout = cpu_to_le32(mddev->new_layout);
sb->delta_disks = cpu_to_le32(mddev->delta_disks);
sb->new_level = cpu_to_le32(mddev->new_level);
sb->new_chunk = cpu_to_le32(mddev->new_chunk_sectors);
}
if (rdev->badblocks.count == 0)
/* Nothing to do for bad blocks*/ ;
else if (sb->bblog_offset == 0)
/* Cannot record bad blocks on this device */
md_error(mddev, rdev);
else {
struct badblocks *bb = &rdev->badblocks;
u64 *bbp = (u64 *)page_address(rdev->bb_page);
u64 *p = bb->page;
sb->feature_map |= cpu_to_le32(MD_FEATURE_BAD_BLOCKS);
if (bb->changed) {
unsigned seq;
retry:
seq = read_seqbegin(&bb->lock);
memset(bbp, 0xff, PAGE_SIZE);
for (i = 0 ; i < bb->count ; i++) {
u64 internal_bb = *p++;
u64 store_bb = ((BB_OFFSET(internal_bb) << 10)
| BB_LEN(internal_bb));
*bbp++ = cpu_to_le64(store_bb);
}
bb->changed = 0;
if (read_seqretry(&bb->lock, seq))
goto retry;
bb->sector = (rdev->sb_start +
(int)le32_to_cpu(sb->bblog_offset));
bb->size = le16_to_cpu(sb->bblog_size);
}
}
max_dev = 0;
rdev_for_each(rdev2, mddev)
if (rdev2->desc_nr+1 > max_dev)
max_dev = rdev2->desc_nr+1;
if (max_dev > le32_to_cpu(sb->max_dev)) {
int bmask;
sb->max_dev = cpu_to_le32(max_dev);
rdev->sb_size = max_dev * 2 + 256;
bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
if (rdev->sb_size & bmask)
rdev->sb_size = (rdev->sb_size | bmask) + 1;
} else
max_dev = le32_to_cpu(sb->max_dev);
for (i=0; i<max_dev;i++)
sb->dev_roles[i] = cpu_to_le16(0xfffe);
rdev_for_each(rdev2, mddev) {
i = rdev2->desc_nr;
if (test_bit(Faulty, &rdev2->flags))
sb->dev_roles[i] = cpu_to_le16(0xfffe);
else if (test_bit(In_sync, &rdev2->flags))
sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
else if (rdev2->raid_disk >= 0)
sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
else
sb->dev_roles[i] = cpu_to_le16(0xffff);
}
sb->sb_csum = calc_sb_1_csum(sb);
}
static unsigned long long
super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
{
struct mdp_superblock_1 *sb;
sector_t max_sectors;
if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
return 0; /* component must fit device */
if (rdev->sb_start < rdev->data_offset) {
/* minor versions 1 and 2; superblock before data */
max_sectors = i_size_read(rdev->bdev->bd_inode) >> 9;
max_sectors -= rdev->data_offset;
if (!num_sectors || num_sectors > max_sectors)
num_sectors = max_sectors;
} else if (rdev->mddev->bitmap_info.offset) {
/* minor version 0 with bitmap we can't move */
return 0;
} else {
/* minor version 0; superblock after data */
sector_t sb_start;
sb_start = (i_size_read(rdev->bdev->bd_inode) >> 9) - 8*2;
sb_start &= ~(sector_t)(4*2 - 1);
max_sectors = rdev->sectors + sb_start - rdev->sb_start;
if (!num_sectors || num_sectors > max_sectors)
num_sectors = max_sectors;
rdev->sb_start = sb_start;
}
sb = page_address(rdev->sb_page);
sb->data_size = cpu_to_le64(num_sectors);
sb->super_offset = rdev->sb_start;
sb->sb_csum = calc_sb_1_csum(sb);
md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
rdev->sb_page);
md_super_wait(rdev->mddev);
return num_sectors;
}
static struct super_type super_types[] = {
[0] = {
.name = "0.90.0",
.owner = THIS_MODULE,
.load_super = super_90_load,
.validate_super = super_90_validate,
.sync_super = super_90_sync,
.rdev_size_change = super_90_rdev_size_change,
},
[1] = {
.name = "md-1",
.owner = THIS_MODULE,
.load_super = super_1_load,
.validate_super = super_1_validate,
.sync_super = super_1_sync,
.rdev_size_change = super_1_rdev_size_change,
},
};
static void sync_super(struct mddev *mddev, struct md_rdev *rdev)
{
if (mddev->sync_super) {
mddev->sync_super(mddev, rdev);
return;
}
BUG_ON(mddev->major_version >= ARRAY_SIZE(super_types));
super_types[mddev->major_version].sync_super(mddev, rdev);
}
static int match_mddev_units(struct mddev *mddev1, struct mddev *mddev2)
{
struct md_rdev *rdev, *rdev2;
rcu_read_lock();
rdev_for_each_rcu(rdev, mddev1)
rdev_for_each_rcu(rdev2, mddev2)
if (rdev->bdev->bd_contains ==
rdev2->bdev->bd_contains) {
rcu_read_unlock();
return 1;
}
rcu_read_unlock();
return 0;
}
static LIST_HEAD(pending_raid_disks);
/*
* Try to register data integrity profile for an mddev
*
* This is called when an array is started and after a disk has been kicked
* from the array. It only succeeds if all working and active component devices
* are integrity capable with matching profiles.
*/
int md_integrity_register(struct mddev *mddev)
{
struct md_rdev *rdev, *reference = NULL;
if (list_empty(&mddev->disks))
return 0; /* nothing to do */
if (!mddev->gendisk || blk_get_integrity(mddev->gendisk))
return 0; /* shouldn't register, or already is */
rdev_for_each(rdev, mddev) {
/* skip spares and non-functional disks */
if (test_bit(Faulty, &rdev->flags))
continue;
if (rdev->raid_disk < 0)
continue;
if (!reference) {
/* Use the first rdev as the reference */
reference = rdev;
continue;
}
/* does this rdev's profile match the reference profile? */
if (blk_integrity_compare(reference->bdev->bd_disk,
rdev->bdev->bd_disk) < 0)
return -EINVAL;
}
if (!reference || !bdev_get_integrity(reference->bdev))
return 0;
/*
* All component devices are integrity capable and have matching
* profiles, register the common profile for the md device.
*/
if (blk_integrity_register(mddev->gendisk,
bdev_get_integrity(reference->bdev)) != 0) {
printk(KERN_ERR "md: failed to register integrity for %s\n",
mdname(mddev));
return -EINVAL;
}
printk(KERN_NOTICE "md: data integrity enabled on %s\n", mdname(mddev));
if (bioset_integrity_create(mddev->bio_set, BIO_POOL_SIZE)) {
printk(KERN_ERR "md: failed to create integrity pool for %s\n",
mdname(mddev));
return -EINVAL;
}
return 0;
}
EXPORT_SYMBOL(md_integrity_register);
/* Disable data integrity if non-capable/non-matching disk is being added */
void md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev)
{
struct blk_integrity *bi_rdev = bdev_get_integrity(rdev->bdev);
struct blk_integrity *bi_mddev = blk_get_integrity(mddev->gendisk);
if (!bi_mddev) /* nothing to do */
return;
if (rdev->raid_disk < 0) /* skip spares */
return;
if (bi_rdev && blk_integrity_compare(mddev->gendisk,
rdev->bdev->bd_disk) >= 0)
return;
printk(KERN_NOTICE "disabling data integrity on %s\n", mdname(mddev));
blk_integrity_unregister(mddev->gendisk);
}
EXPORT_SYMBOL(md_integrity_add_rdev);
static int bind_rdev_to_array(struct md_rdev * rdev, struct mddev * mddev)
{
char b[BDEVNAME_SIZE];
struct kobject *ko;
char *s;
int err;
if (rdev->mddev) {
MD_BUG();
return -EINVAL;
}
/* prevent duplicates */
if (find_rdev(mddev, rdev->bdev->bd_dev))
return -EEXIST;
/* make sure rdev->sectors exceeds mddev->dev_sectors */
if (rdev->sectors && (mddev->dev_sectors == 0 ||
rdev->sectors < mddev->dev_sectors)) {
if (mddev->pers) {
/* Cannot change size, so fail
* If mddev->level <= 0, then we don't care
* about aligning sizes (e.g. linear)
*/
if (mddev->level > 0)
return -ENOSPC;
} else
mddev->dev_sectors = rdev->sectors;
}
/* Verify rdev->desc_nr is unique.
* If it is -1, assign a free number, else
* check number is not in use
*/
if (rdev->desc_nr < 0) {
int choice = 0;
if (mddev->pers) choice = mddev->raid_disks;
while (find_rdev_nr(mddev, choice))
choice++;
rdev->desc_nr = choice;
} else {
if (find_rdev_nr(mddev, rdev->desc_nr))
return -EBUSY;
}
if (mddev->max_disks && rdev->desc_nr >= mddev->max_disks) {
printk(KERN_WARNING "md: %s: array is limited to %d devices\n",
mdname(mddev), mddev->max_disks);
return -EBUSY;
}
bdevname(rdev->bdev,b);
while ( (s=strchr(b, '/')) != NULL)
*s = '!';
rdev->mddev = mddev;
printk(KERN_INFO "md: bind<%s>\n", b);
if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b)))
goto fail;
ko = &part_to_dev(rdev->bdev->bd_part)->kobj;
if (sysfs_create_link(&rdev->kobj, ko, "block"))
/* failure here is OK */;
rdev->sysfs_state = sysfs_get_dirent_safe(rdev->kobj.sd, "state");
list_add_rcu(&rdev->same_set, &mddev->disks);
bd_link_disk_holder(rdev->bdev, mddev->gendisk);
/* May as well allow recovery to be retried once */
mddev->recovery_disabled++;
return 0;
fail:
printk(KERN_WARNING "md: failed to register dev-%s for %s\n",
b, mdname(mddev));
return err;
}
static void md_delayed_delete(struct work_struct *ws)
{
struct md_rdev *rdev = container_of(ws, struct md_rdev, del_work);
kobject_del(&rdev->kobj);
kobject_put(&rdev->kobj);
}
static void unbind_rdev_from_array(struct md_rdev * rdev)
{
char b[BDEVNAME_SIZE];
if (!rdev->mddev) {
MD_BUG();
return;
}
bd_unlink_disk_holder(rdev->bdev, rdev->mddev->gendisk);
list_del_rcu(&rdev->same_set);
printk(KERN_INFO "md: unbind<%s>\n", bdevname(rdev->bdev,b));
rdev->mddev = NULL;
sysfs_remove_link(&rdev->kobj, "block");
sysfs_put(rdev->sysfs_state);
rdev->sysfs_state = NULL;
kfree(rdev->badblocks.page);
rdev->badblocks.count = 0;
rdev->badblocks.page = NULL;
/* We need to delay this, otherwise we can deadlock when
* writing to 'remove' to "dev/state". We also need
* to delay it due to rcu usage.
*/
synchronize_rcu();
INIT_WORK(&rdev->del_work, md_delayed_delete);
kobject_get(&rdev->kobj);
queue_work(md_misc_wq, &rdev->del_work);
}
/*
* prevent the device from being mounted, repartitioned or
* otherwise reused by a RAID array (or any other kernel
* subsystem), by bd_claiming the device.
*/
static int lock_rdev(struct md_rdev *rdev, dev_t dev, int shared)
{
int err = 0;
struct block_device *bdev;
char b[BDEVNAME_SIZE];
bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
shared ? (struct md_rdev *)lock_rdev : rdev);
if (IS_ERR(bdev)) {
printk(KERN_ERR "md: could not open %s.\n",
__bdevname(dev, b));
return PTR_ERR(bdev);
}
rdev->bdev = bdev;
return err;
}
static void unlock_rdev(struct md_rdev *rdev)
{
struct block_device *bdev = rdev->bdev;
rdev->bdev = NULL;
if (!bdev)
MD_BUG();
blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
}
void md_autodetect_dev(dev_t dev);
static void export_rdev(struct md_rdev * rdev)
{
char b[BDEVNAME_SIZE];
printk(KERN_INFO "md: export_rdev(%s)\n",
bdevname(rdev->bdev,b));
if (rdev->mddev)
MD_BUG();
free_disk_sb(rdev);
#ifndef MODULE
if (test_bit(AutoDetected, &rdev->flags))
md_autodetect_dev(rdev->bdev->bd_dev);
#endif
unlock_rdev(rdev);
kobject_put(&rdev->kobj);
}
static void kick_rdev_from_array(struct md_rdev * rdev)
{
unbind_rdev_from_array(rdev);
export_rdev(rdev);
}
static void export_array(struct mddev *mddev)
{
struct md_rdev *rdev, *tmp;
rdev_for_each_safe(rdev, tmp, mddev) {
if (!rdev->mddev) {
MD_BUG();
continue;
}
kick_rdev_from_array(rdev);
}
if (!list_empty(&mddev->disks))
MD_BUG();
mddev->raid_disks = 0;
mddev->major_version = 0;
}
static void print_desc(mdp_disk_t *desc)
{
printk(" DISK<N:%d,(%d,%d),R:%d,S:%d>\n", desc->number,
desc->major,desc->minor,desc->raid_disk,desc->state);
}
static void print_sb_90(mdp_super_t *sb)
{
int i;
printk(KERN_INFO
"md: SB: (V:%d.%d.%d) ID:<%08x.%08x.%08x.%08x> CT:%08x\n",
sb->major_version, sb->minor_version, sb->patch_version,
sb->set_uuid0, sb->set_uuid1, sb->set_uuid2, sb->set_uuid3,
sb->ctime);
printk(KERN_INFO "md: L%d S%08d ND:%d RD:%d md%d LO:%d CS:%d\n",
sb->level, sb->size, sb->nr_disks, sb->raid_disks,
sb->md_minor, sb->layout, sb->chunk_size);
printk(KERN_INFO "md: UT:%08x ST:%d AD:%d WD:%d"
" FD:%d SD:%d CSUM:%08x E:%08lx\n",
sb->utime, sb->state, sb->active_disks, sb->working_disks,
sb->failed_disks, sb->spare_disks,
sb->sb_csum, (unsigned long)sb->events_lo);
printk(KERN_INFO);
for (i = 0; i < MD_SB_DISKS; i++) {
mdp_disk_t *desc;
desc = sb->disks + i;
if (desc->number || desc->major || desc->minor ||
desc->raid_disk || (desc->state && (desc->state != 4))) {
printk(" D %2d: ", i);
print_desc(desc);
}
}
printk(KERN_INFO "md: THIS: ");
print_desc(&sb->this_disk);
}
static void print_sb_1(struct mdp_superblock_1 *sb)
{
__u8 *uuid;
uuid = sb->set_uuid;
printk(KERN_INFO
"md: SB: (V:%u) (F:0x%08x) Array-ID:<%pU>\n"
"md: Name: \"%s\" CT:%llu\n",
le32_to_cpu(sb->major_version),
le32_to_cpu(sb->feature_map),
uuid,
sb->set_name,
(unsigned long long)le64_to_cpu(sb->ctime)
& MD_SUPERBLOCK_1_TIME_SEC_MASK);
uuid = sb->device_uuid;
printk(KERN_INFO
"md: L%u SZ%llu RD:%u LO:%u CS:%u DO:%llu DS:%llu SO:%llu"
" RO:%llu\n"
"md: Dev:%08x UUID: %pU\n"
"md: (F:0x%08x) UT:%llu Events:%llu ResyncOffset:%llu CSUM:0x%08x\n"
"md: (MaxDev:%u) \n",
le32_to_cpu(sb->level),
(unsigned long long)le64_to_cpu(sb->size),
le32_to_cpu(sb->raid_disks),
le32_to_cpu(sb->layout),
le32_to_cpu(sb->chunksize),
(unsigned long long)le64_to_cpu(sb->data_offset),
(unsigned long long)le64_to_cpu(sb->data_size),
(unsigned long long)le64_to_cpu(sb->super_offset),
(unsigned long long)le64_to_cpu(sb->recovery_offset),
le32_to_cpu(sb->dev_number),
uuid,
sb->devflags,
(unsigned long long)le64_to_cpu(sb->utime) & MD_SUPERBLOCK_1_TIME_SEC_MASK,
(unsigned long long)le64_to_cpu(sb->events),
(unsigned long long)le64_to_cpu(sb->resync_offset),
le32_to_cpu(sb->sb_csum),
le32_to_cpu(sb->max_dev)
);
}
static void print_rdev(struct md_rdev *rdev, int major_version)
{
char b[BDEVNAME_SIZE];
printk(KERN_INFO "md: rdev %s, Sect:%08llu F:%d S:%d DN:%u\n",
bdevname(rdev->bdev, b), (unsigned long long)rdev->sectors,
test_bit(Faulty, &rdev->flags), test_bit(In_sync, &rdev->flags),
rdev->desc_nr);
if (rdev->sb_loaded) {
printk(KERN_INFO "md: rdev superblock (MJ:%d):\n", major_version);
switch (major_version) {
case 0:
print_sb_90(page_address(rdev->sb_page));
break;
case 1:
print_sb_1(page_address(rdev->sb_page));
break;
}
} else
printk(KERN_INFO "md: no rdev superblock!\n");
}
static void md_print_devices(void)
{
struct list_head *tmp;
struct md_rdev *rdev;
struct mddev *mddev;
char b[BDEVNAME_SIZE];
printk("\n");
printk("md: **********************************\n");
printk("md: * <COMPLETE RAID STATE PRINTOUT> *\n");
printk("md: **********************************\n");
for_each_mddev(mddev, tmp) {
if (mddev->bitmap)
bitmap_print_sb(mddev->bitmap);
else
printk("%s: ", mdname(mddev));
rdev_for_each(rdev, mddev)
printk("<%s>", bdevname(rdev->bdev,b));
printk("\n");
rdev_for_each(rdev, mddev)
print_rdev(rdev, mddev->major_version);
}
printk("md: **********************************\n");
printk("\n");
}
static void sync_sbs(struct mddev * mddev, int nospares)
{
/* Update each superblock (in-memory image), but
* if we are allowed to, skip spares which already
* have the right event counter, or have one earlier
* (which would mean they aren't being marked as dirty
* with the rest of the array)
*/
struct md_rdev *rdev;
rdev_for_each(rdev, mddev) {
if (rdev->sb_events == mddev->events ||
(nospares &&
rdev->raid_disk < 0 &&
rdev->sb_events+1 == mddev->events)) {
/* Don't update this superblock */
rdev->sb_loaded = 2;
} else {
sync_super(mddev, rdev);
rdev->sb_loaded = 1;
}
}
}
static void md_update_sb(struct mddev * mddev, int force_change)
{
struct md_rdev *rdev;
int sync_req;
int nospares = 0;
int any_badblocks_changed = 0;
repeat:
/* First make sure individual recovery_offsets are correct */
rdev_for_each(rdev, mddev) {
if (rdev->raid_disk >= 0 &&
mddev->delta_disks >= 0 &&
!test_bit(In_sync, &rdev->flags) &&
mddev->curr_resync_completed > rdev->recovery_offset)
rdev->recovery_offset = mddev->curr_resync_completed;
}
if (!mddev->persistent) {
clear_bit(MD_CHANGE_CLEAN, &mddev->flags);
clear_bit(MD_CHANGE_DEVS, &mddev->flags);
if (!mddev->external) {
clear_bit(MD_CHANGE_PENDING, &mddev->flags);
rdev_for_each(rdev, mddev) {
if (rdev->badblocks.changed) {
rdev->badblocks.changed = 0;
md_ack_all_badblocks(&rdev->badblocks);
md_error(mddev, rdev);
}
clear_bit(Blocked, &rdev->flags);
clear_bit(BlockedBadBlocks, &rdev->flags);
wake_up(&rdev->blocked_wait);
}
}
wake_up(&mddev->sb_wait);
return;
}
spin_lock_irq(&mddev->write_lock);
mddev->utime = get_seconds();
if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags))
force_change = 1;
if (test_and_clear_bit(MD_CHANGE_CLEAN, &mddev->flags))
/* just a clean<-> dirty transition, possibly leave spares alone,
* though if events isn't the right even/odd, we will have to do
* spares after all
*/
nospares = 1;
if (force_change)
nospares = 0;
if (mddev->degraded)
/* If the array is degraded, then skipping spares is both
* dangerous and fairly pointless.
* Dangerous because a device that was removed from the array
* might have a event_count that still looks up-to-date,
* so it can be re-added without a resync.
* Pointless because if there are any spares to skip,
* then a recovery will happen and soon that array won't
* be degraded any more and the spare can go back to sleep then.
*/
nospares = 0;
sync_req = mddev->in_sync;
/* If this is just a dirty<->clean transition, and the array is clean
* and 'events' is odd, we can roll back to the previous clean state */
if (nospares
&& (mddev->in_sync && mddev->recovery_cp == MaxSector)
&& mddev->can_decrease_events
&& mddev->events != 1) {
mddev->events--;
mddev->can_decrease_events = 0;
} else {
/* otherwise we have to go forward and ... */
mddev->events ++;
mddev->can_decrease_events = nospares;
}
if (!mddev->events) {
/*
* oops, this 64-bit counter should never wrap.
* Either we are in around ~1 trillion A.C., assuming
* 1 reboot per second, or we have a bug:
*/
MD_BUG();
mddev->events --;
}
rdev_for_each(rdev, mddev) {
if (rdev->badblocks.changed)
any_badblocks_changed++;
if (test_bit(Faulty, &rdev->flags))
set_bit(FaultRecorded, &rdev->flags);
}
sync_sbs(mddev, nospares);
spin_unlock_irq(&mddev->write_lock);
pr_debug("md: updating %s RAID superblock on device (in sync %d)\n",
mdname(mddev), mddev->in_sync);
bitmap_update_sb(mddev->bitmap);
rdev_for_each(rdev, mddev) {
char b[BDEVNAME_SIZE];
if (rdev->sb_loaded != 1)
continue; /* no noise on spare devices */
if (!test_bit(Faulty, &rdev->flags) &&
rdev->saved_raid_disk == -1) {
md_super_write(mddev,rdev,
rdev->sb_start, rdev->sb_size,
rdev->sb_page);
pr_debug("md: (write) %s's sb offset: %llu\n",
bdevname(rdev->bdev, b),
(unsigned long long)rdev->sb_start);
rdev->sb_events = mddev->events;
if (rdev->badblocks.size) {
md_super_write(mddev, rdev,
rdev->badblocks.sector,
rdev->badblocks.size << 9,
rdev->bb_page);
rdev->badblocks.size = 0;
}
} else if (test_bit(Faulty, &rdev->flags))
pr_debug("md: %s (skipping faulty)\n",
bdevname(rdev->bdev, b));
else
pr_debug("(skipping incremental s/r ");
if (mddev->level == LEVEL_MULTIPATH)
/* only need to write one superblock... */
break;
}
md_super_wait(mddev);
/* if there was a failure, MD_CHANGE_DEVS was set, and we re-write super */
spin_lock_irq(&mddev->write_lock);
if (mddev->in_sync != sync_req ||
test_bit(MD_CHANGE_DEVS, &mddev->flags)) {
/* have to write it out again */
spin_unlock_irq(&mddev->write_lock);
goto repeat;
}
clear_bit(MD_CHANGE_PENDING, &mddev->flags);
spin_unlock_irq(&mddev->write_lock);
wake_up(&mddev->sb_wait);
if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
sysfs_notify(&mddev->kobj, NULL, "sync_completed");
rdev_for_each(rdev, mddev) {
if (test_and_clear_bit(FaultRecorded, &rdev->flags))
clear_bit(Blocked, &rdev->flags);
if (any_badblocks_changed)
md_ack_all_badblocks(&rdev->badblocks);
clear_bit(BlockedBadBlocks, &rdev->flags);
wake_up(&rdev->blocked_wait);
}
}
/* words written to sysfs files may, or may not, be \n terminated.
* We want to accept with case. For this we use cmd_match.
*/
static int cmd_match(const char *cmd, const char *str)
{
/* See if cmd, written into a sysfs file, matches
* str. They must either be the same, or cmd can
* have a trailing newline
*/
while (*cmd && *str && *cmd == *str) {
cmd++;
str++;
}
if (*cmd == '\n')
cmd++;
if (*str || *cmd)
return 0;
return 1;
}
struct rdev_sysfs_entry {
struct attribute attr;
ssize_t (*show)(struct md_rdev *, char *);
ssize_t (*store)(struct md_rdev *, const char *, size_t);
};
static ssize_t
state_show(struct md_rdev *rdev, char *page)
{
char *sep = "";
size_t len = 0;
if (test_bit(Faulty, &rdev->flags) ||
rdev->badblocks.unacked_exist) {
len+= sprintf(page+len, "%sfaulty",sep);
sep = ",";
}
if (test_bit(In_sync, &rdev->flags)) {
len += sprintf(page+len, "%sin_sync",sep);
sep = ",";
}
if (test_bit(WriteMostly, &rdev->flags)) {
len += sprintf(page+len, "%swrite_mostly",sep);
sep = ",";
}
if (test_bit(Blocked, &rdev->flags) ||
(rdev->badblocks.unacked_exist
&& !test_bit(Faulty, &rdev->flags))) {
len += sprintf(page+len, "%sblocked", sep);
sep = ",";
}
if (!test_bit(Faulty, &rdev->flags) &&
!test_bit(In_sync, &rdev->flags)) {
len += sprintf(page+len, "%sspare", sep);
sep = ",";
}
if (test_bit(WriteErrorSeen, &rdev->flags)) {
len += sprintf(page+len, "%swrite_error", sep);
sep = ",";
}
if (test_bit(WantReplacement, &rdev->flags)) {
len += sprintf(page+len, "%swant_replacement", sep);
sep = ",";
}
if (test_bit(Replacement, &rdev->flags)) {
len += sprintf(page+len, "%sreplacement", sep);
sep = ",";
}
return len+sprintf(page+len, "\n");
}
static ssize_t
state_store(struct md_rdev *rdev, const char *buf, size_t len)
{
/* can write
* faulty - simulates an error
* remove - disconnects the device
* writemostly - sets write_mostly
* -writemostly - clears write_mostly
* blocked - sets the Blocked flags
* -blocked - clears the Blocked and possibly simulates an error
* insync - sets Insync providing device isn't active
* write_error - sets WriteErrorSeen
* -write_error - clears WriteErrorSeen
*/
int err = -EINVAL;
if (cmd_match(buf, "faulty") && rdev->mddev->pers) {
md_error(rdev->mddev, rdev);
if (test_bit(Faulty, &rdev->flags))
err = 0;
else
err = -EBUSY;
} else if (cmd_match(buf, "remove")) {
if (rdev->raid_disk >= 0)
err = -EBUSY;
else {
struct mddev *mddev = rdev->mddev;
kick_rdev_from_array(rdev);
if (mddev->pers)
md_update_sb(mddev, 1);
md_new_event(mddev);
err = 0;
}
} else if (cmd_match(buf, "writemostly")) {
set_bit(WriteMostly, &rdev->flags);
err = 0;
} else if (cmd_match(buf, "-writemostly")) {
clear_bit(WriteMostly, &rdev->flags);
err = 0;
} else if (cmd_match(buf, "blocked")) {
set_bit(Blocked, &rdev->flags);
err = 0;
} else if (cmd_match(buf, "-blocked")) {
if (!test_bit(Faulty, &rdev->flags) &&
rdev->badblocks.unacked_exist) {
/* metadata handler doesn't understand badblocks,
* so we need to fail the device
*/
md_error(rdev->mddev, rdev);
}
clear_bit(Blocked, &rdev->flags);
clear_bit(BlockedBadBlocks, &rdev->flags);
wake_up(&rdev->blocked_wait);
set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
md_wakeup_thread(rdev->mddev->thread);
err = 0;
} else if (cmd_match(buf, "insync") && rdev->raid_disk == -1) {
set_bit(In_sync, &rdev->flags);
err = 0;
} else if (cmd_match(buf, "write_error")) {
set_bit(WriteErrorSeen, &rdev->flags);
err = 0;
} else if (cmd_match(buf, "-write_error")) {
clear_bit(WriteErrorSeen, &rdev->flags);
err = 0;
} else if (cmd_match(buf, "want_replacement")) {
/* Any non-spare device that is not a replacement can
* become want_replacement at any time, but we then need to
* check if recovery is needed.
*/
if (rdev->raid_disk >= 0 &&
!test_bit(Replacement, &rdev->flags))
set_bit(WantReplacement, &rdev->flags);
set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
md_wakeup_thread(rdev->mddev->thread);
err = 0;
} else if (cmd_match(buf, "-want_replacement")) {
/* Clearing 'want_replacement' is always allowed.
* Once replacements starts it is too late though.
*/
err = 0;
clear_bit(WantReplacement, &rdev->flags);
} else if (cmd_match(buf, "replacement")) {
/* Can only set a device as a replacement when array has not
* yet been started. Once running, replacement is automatic
* from spares, or by assigning 'slot'.
*/
if (rdev->mddev->pers)
err = -EBUSY;
else {
set_bit(Replacement, &rdev->flags);
err = 0;
}
} else if (cmd_match(buf, "-replacement")) {
/* Similarly, can only clear Replacement before start */
if (rdev->mddev->pers)
err = -EBUSY;
else {
clear_bit(Replacement, &rdev->flags);
err = 0;
}
}
if (!err)
sysfs_notify_dirent_safe(rdev->sysfs_state);
return err ? err : len;
}
static struct rdev_sysfs_entry rdev_state =
__ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
static ssize_t
errors_show(struct md_rdev *rdev, char *page)
{
return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
}
static ssize_t
errors_store(struct md_rdev *rdev, const char *buf, size_t len)
{
char *e;
unsigned long n = simple_strtoul(buf, &e, 10);
if (*buf && (*e == 0 || *e == '\n')) {
atomic_set(&rdev->corrected_errors, n);
return len;
}
return -EINVAL;
}
static struct rdev_sysfs_entry rdev_errors =
__ATTR(errors, S_IRUGO|S_IWUSR, errors_show, errors_store);
static ssize_t
slot_show(struct md_rdev *rdev, char *page)
{
if (rdev->raid_disk < 0)
return sprintf(page, "none\n");
else
return sprintf(page, "%d\n", rdev->raid_disk);
}
static ssize_t
slot_store(struct md_rdev *rdev, const char *buf, size_t len)
{
char *e;
int err;
int slot = simple_strtoul(buf, &e, 10);
if (strncmp(buf, "none", 4)==0)
slot = -1;
else if (e==buf || (*e && *e!= '\n'))
return -EINVAL;
if (rdev->mddev->pers && slot == -1) {
/* Setting 'slot' on an active array requires also
* updating the 'rd%d' link, and communicating
* with the personality with ->hot_*_disk.
* For now we only support removing
* failed/spare devices. This normally happens automatically,
* but not when the metadata is externally managed.
*/
if (rdev->raid_disk == -1)
return -EEXIST;
/* personality does all needed checks */
if (rdev->mddev->pers->hot_remove_disk == NULL)
return -EINVAL;
err = rdev->mddev->pers->
hot_remove_disk(rdev->mddev, rdev);
if (err)
return err;
sysfs_unlink_rdev(rdev->mddev, rdev);
rdev->raid_disk = -1;
set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
md_wakeup_thread(rdev->mddev->thread);
} else if (rdev->mddev->pers) {
/* Activating a spare .. or possibly reactivating
* if we ever get bitmaps working here.
*/
if (rdev->raid_disk != -1)
return -EBUSY;
if (test_bit(MD_RECOVERY_RUNNING, &rdev->mddev->recovery))
return -EBUSY;
if (rdev->mddev->pers->hot_add_disk == NULL)
return -EINVAL;
if (slot >= rdev->mddev->raid_disks &&
slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks)
return -ENOSPC;
rdev->raid_disk = slot;
if (test_bit(In_sync, &rdev->flags))
rdev->saved_raid_disk = slot;
else
rdev->saved_raid_disk = -1;
clear_bit(In_sync, &rdev->flags);
err = rdev->mddev->pers->
hot_add_disk(rdev->mddev, rdev);
if (err) {
rdev->raid_disk = -1;
return err;
} else
sysfs_notify_dirent_safe(rdev->sysfs_state);
if (sysfs_link_rdev(rdev->mddev, rdev))
/* failure here is OK */;
/* don't wakeup anyone, leave that to userspace. */
} else {
if (slot >= rdev->mddev->raid_disks &&
slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks)
return -ENOSPC;
rdev->raid_disk = slot;
/* assume it is working */
clear_bit(Faulty, &rdev->flags);
clear_bit(WriteMostly, &rdev->flags);
set_bit(In_sync, &rdev->flags);
sysfs_notify_dirent_safe(rdev->sysfs_state);
}
return len;
}
static struct rdev_sysfs_entry rdev_slot =
__ATTR(slot, S_IRUGO|S_IWUSR, slot_show, slot_store);
static ssize_t
offset_show(struct md_rdev *rdev, char *page)
{
return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset);
}
static ssize_t
offset_store(struct md_rdev *rdev, const char *buf, size_t len)
{
char *e;
unsigned long long offset = simple_strtoull(buf, &e, 10);
if (e==buf || (*e && *e != '\n'))
return -EINVAL;
if (rdev->mddev->pers && rdev->raid_disk >= 0)
return -EBUSY;
if (rdev->sectors && rdev->mddev->external)
/* Must set offset before size, so overlap checks
* can be sane */
return -EBUSY;
rdev->data_offset = offset;
return len;
}
static struct rdev_sysfs_entry rdev_offset =
__ATTR(offset, S_IRUGO|S_IWUSR, offset_show, offset_store);
static ssize_t
rdev_size_show(struct md_rdev *rdev, char *page)
{
return sprintf(page, "%llu\n", (unsigned long long)rdev->sectors / 2);
}
static int overlaps(sector_t s1, sector_t l1, sector_t s2, sector_t l2)
{
/* check if two start/length pairs overlap */
if (s1+l1 <= s2)
return 0;
if (s2+l2 <= s1)
return 0;
return 1;
}
static int strict_blocks_to_sectors(const char *buf, sector_t *sectors)
{
unsigned long long blocks;
sector_t new;
if (strict_strtoull(buf, 10, &blocks) < 0)
return -EINVAL;
if (blocks & 1ULL << (8 * sizeof(blocks) - 1))
return -EINVAL; /* sector conversion overflow */
new = blocks * 2;
if (new != blocks * 2)
return -EINVAL; /* unsigned long long to sector_t overflow */
*sectors = new;
return 0;
}
static ssize_t
rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len)
{
struct mddev *my_mddev = rdev->mddev;
sector_t oldsectors = rdev->sectors;
sector_t sectors;
if (strict_blocks_to_sectors(buf, §ors) < 0)
return -EINVAL;
if (my_mddev->pers && rdev->raid_disk >= 0) {
if (my_mddev->persistent) {
sectors = super_types[my_mddev->major_version].
rdev_size_change(rdev, sectors);
if (!sectors)
return -EBUSY;
} else if (!sectors)
sectors = (i_size_read(rdev->bdev->bd_inode) >> 9) -
rdev->data_offset;
}
if (sectors < my_mddev->dev_sectors)
return -EINVAL; /* component must fit device */
rdev->sectors = sectors;
if (sectors > oldsectors && my_mddev->external) {
/* need to check that all other rdevs with the same ->bdev
* do not overlap. We need to unlock the mddev to avoid
* a deadlock. We have already changed rdev->sectors, and if
* we have to change it back, we will have the lock again.
*/
struct mddev *mddev;
int overlap = 0;
struct list_head *tmp;
mddev_unlock(my_mddev);
for_each_mddev(mddev, tmp) {
struct md_rdev *rdev2;
mddev_lock(mddev);
rdev_for_each(rdev2, mddev)
if (rdev->bdev == rdev2->bdev &&
rdev != rdev2 &&
overlaps(rdev->data_offset, rdev->sectors,
rdev2->data_offset,
rdev2->sectors)) {
overlap = 1;
break;
}
mddev_unlock(mddev);
if (overlap) {
mddev_put(mddev);
break;
}
}
mddev_lock(my_mddev);
if (overlap) {
/* Someone else could have slipped in a size
* change here, but doing so is just silly.
* We put oldsectors back because we *know* it is
* safe, and trust userspace not to race with
* itself
*/
rdev->sectors = oldsectors;
return -EBUSY;
}
}
return len;
}
static struct rdev_sysfs_entry rdev_size =
__ATTR(size, S_IRUGO|S_IWUSR, rdev_size_show, rdev_size_store);
static ssize_t recovery_start_show(struct md_rdev *rdev, char *page)
{
unsigned long long recovery_start = rdev->recovery_offset;
if (test_bit(In_sync, &rdev->flags) ||
recovery_start == MaxSector)
return sprintf(page, "none\n");
return sprintf(page, "%llu\n", recovery_start);
}
static ssize_t recovery_start_store(struct md_rdev *rdev, const char *buf, size_t len)
{
unsigned long long recovery_start;
if (cmd_match(buf, "none"))
recovery_start = MaxSector;
else if (strict_strtoull(buf, 10, &recovery_start))
return -EINVAL;
if (rdev->mddev->pers &&
rdev->raid_disk >= 0)
return -EBUSY;
rdev->recovery_offset = recovery_start;
if (recovery_start == MaxSector)
set_bit(In_sync, &rdev->flags);
else
clear_bit(In_sync, &rdev->flags);
return len;
}
static struct rdev_sysfs_entry rdev_recovery_start =
__ATTR(recovery_start, S_IRUGO|S_IWUSR, recovery_start_show, recovery_start_store);
static ssize_t
badblocks_show(struct badblocks *bb, char *page, int unack);
static ssize_t
badblocks_store(struct badblocks *bb, const char *page, size_t len, int unack);
static ssize_t bb_show(struct md_rdev *rdev, char *page)
{
return badblocks_show(&rdev->badblocks, page, 0);
}
static ssize_t bb_store(struct md_rdev *rdev, const char *page, size_t len)
{
int rv = badblocks_store(&rdev->badblocks, page, len, 0);
/* Maybe that ack was all we needed */
if (test_and_clear_bit(BlockedBadBlocks, &rdev->flags))
wake_up(&rdev->blocked_wait);
return rv;
}
static struct rdev_sysfs_entry rdev_bad_blocks =
__ATTR(bad_blocks, S_IRUGO|S_IWUSR, bb_show, bb_store);
static ssize_t ubb_show(struct md_rdev *rdev, char *page)
{
return badblocks_show(&rdev->badblocks, page, 1);
}
static ssize_t ubb_store(struct md_rdev *rdev, const char *page, size_t len)
{
return badblocks_store(&rdev->badblocks, page, len, 1);
}
static struct rdev_sysfs_entry rdev_unack_bad_blocks =
__ATTR(unacknowledged_bad_blocks, S_IRUGO|S_IWUSR, ubb_show, ubb_store);
static struct attribute *rdev_default_attrs[] = {
&rdev_state.attr,
&rdev_errors.attr,
&rdev_slot.attr,
&rdev_offset.attr,
&rdev_size.attr,
&rdev_recovery_start.attr,
&rdev_bad_blocks.attr,
&rdev_unack_bad_blocks.attr,
NULL,
};
static ssize_t
rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
{
struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj);
struct mddev *mddev = rdev->mddev;
ssize_t rv;
if (!entry->show)
return -EIO;
rv = mddev ? mddev_lock(mddev) : -EBUSY;
if (!rv) {
if (rdev->mddev == NULL)
rv = -EBUSY;
else
rv = entry->show(rdev, page);
mddev_unlock(mddev);
}
return rv;
}
static ssize_t
rdev_attr_store(struct kobject *kobj, struct attribute *attr,
const char *page, size_t length)
{
struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj);
ssize_t rv;
struct mddev *mddev = rdev->mddev;
if (!entry->store)
return -EIO;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
rv = mddev ? mddev_lock(mddev): -EBUSY;
if (!rv) {
if (rdev->mddev == NULL)
rv = -EBUSY;
else
rv = entry->store(rdev, page, length);
mddev_unlock(mddev);
}
return rv;
}
static void rdev_free(struct kobject *ko)
{
struct md_rdev *rdev = container_of(ko, struct md_rdev, kobj);
kfree(rdev);
}
static const struct sysfs_ops rdev_sysfs_ops = {
.show = rdev_attr_show,
.store = rdev_attr_store,
};
static struct kobj_type rdev_ktype = {
.release = rdev_free,
.sysfs_ops = &rdev_sysfs_ops,
.default_attrs = rdev_default_attrs,
};
int md_rdev_init(struct md_rdev *rdev)
{
rdev->desc_nr = -1;
rdev->saved_raid_disk = -1;
rdev->raid_disk = -1;
rdev->flags = 0;
rdev->data_offset = 0;
rdev->sb_events = 0;
rdev->last_read_error.tv_sec = 0;
rdev->last_read_error.tv_nsec = 0;
rdev->sb_loaded = 0;
rdev->bb_page = NULL;
atomic_set(&rdev->nr_pending, 0);
atomic_set(&rdev->read_errors, 0);
atomic_set(&rdev->corrected_errors, 0);
INIT_LIST_HEAD(&rdev->same_set);
init_waitqueue_head(&rdev->blocked_wait);
/* Add space to store bad block list.
* This reserves the space even on arrays where it cannot
* be used - I wonder if that matters
*/
rdev->badblocks.count = 0;
rdev->badblocks.shift = 0;
rdev->badblocks.page = kmalloc(PAGE_SIZE, GFP_KERNEL);
seqlock_init(&rdev->badblocks.lock);
if (rdev->badblocks.page == NULL)
return -ENOMEM;
return 0;
}
EXPORT_SYMBOL_GPL(md_rdev_init);
/*
* Import a device. If 'super_format' >= 0, then sanity check the superblock
*
* mark the device faulty if:
*
* - the device is nonexistent (zero size)
* - the device has no valid superblock
*
* a faulty rdev _never_ has rdev->sb set.
*/
static struct md_rdev *md_import_device(dev_t newdev, int super_format, int super_minor)
{
char b[BDEVNAME_SIZE];
int err;
struct md_rdev *rdev;
sector_t size;
rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
if (!rdev) {
printk(KERN_ERR "md: could not alloc mem for new device!\n");
return ERR_PTR(-ENOMEM);
}
err = md_rdev_init(rdev);
if (err)
goto abort_free;
err = alloc_disk_sb(rdev);
if (err)
goto abort_free;
err = lock_rdev(rdev, newdev, super_format == -2);
if (err)
goto abort_free;
kobject_init(&rdev->kobj, &rdev_ktype);
size = i_size_read(rdev->bdev->bd_inode) >> BLOCK_SIZE_BITS;
if (!size) {
printk(KERN_WARNING
"md: %s has zero or unknown size, marking faulty!\n",
bdevname(rdev->bdev,b));
err = -EINVAL;
goto abort_free;
}
if (super_format >= 0) {
err = super_types[super_format].
load_super(rdev, NULL, super_minor);
if (err == -EINVAL) {
printk(KERN_WARNING
"md: %s does not have a valid v%d.%d "
"superblock, not importing!\n",
bdevname(rdev->bdev,b),
super_format, super_minor);
goto abort_free;
}
if (err < 0) {
printk(KERN_WARNING
"md: could not read %s's sb, not importing!\n",
bdevname(rdev->bdev,b));
goto abort_free;
}
}
if (super_format == -1)
/* hot-add for 0.90, or non-persistent: so no badblocks */
rdev->badblocks.shift = -1;
return rdev;
abort_free:
if (rdev->bdev)
unlock_rdev(rdev);
free_disk_sb(rdev);
kfree(rdev->badblocks.page);
kfree(rdev);
return ERR_PTR(err);
}
/*
* Check a full RAID array for plausibility
*/
static void analyze_sbs(struct mddev * mddev)
{
int i;
struct md_rdev *rdev, *freshest, *tmp;
char b[BDEVNAME_SIZE];
freshest = NULL;
rdev_for_each_safe(rdev, tmp, mddev)
switch (super_types[mddev->major_version].
load_super(rdev, freshest, mddev->minor_version)) {
case 1:
freshest = rdev;
break;
case 0:
break;
default:
printk( KERN_ERR \
"md: fatal superblock inconsistency in %s"
" -- removing from array\n",
bdevname(rdev->bdev,b));
kick_rdev_from_array(rdev);
}
super_types[mddev->major_version].
validate_super(mddev, freshest);
i = 0;
rdev_for_each_safe(rdev, tmp, mddev) {
if (mddev->max_disks &&
(rdev->desc_nr >= mddev->max_disks ||
i > mddev->max_disks)) {
printk(KERN_WARNING
"md: %s: %s: only %d devices permitted\n",
mdname(mddev), bdevname(rdev->bdev, b),
mddev->max_disks);
kick_rdev_from_array(rdev);
continue;
}
if (rdev != freshest)
if (super_types[mddev->major_version].
validate_super(mddev, rdev)) {
printk(KERN_WARNING "md: kicking non-fresh %s"
" from array!\n",
bdevname(rdev->bdev,b));
kick_rdev_from_array(rdev);
continue;
}
if (mddev->level == LEVEL_MULTIPATH) {
rdev->desc_nr = i++;
rdev->raid_disk = rdev->desc_nr;
set_bit(In_sync, &rdev->flags);
} else if (rdev->raid_disk >= (mddev->raid_disks - min(0, mddev->delta_disks))) {
rdev->raid_disk = -1;
clear_bit(In_sync, &rdev->flags);
}
}
}
/* Read a fixed-point number.
* Numbers in sysfs attributes should be in "standard" units where
* possible, so time should be in seconds.
* However we internally use a a much smaller unit such as
* milliseconds or jiffies.
* This function takes a decimal number with a possible fractional
* component, and produces an integer which is the result of
* multiplying that number by 10^'scale'.
* all without any floating-point arithmetic.
*/
int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale)
{
unsigned long result = 0;
long decimals = -1;
while (isdigit(*cp) || (*cp == '.' && decimals < 0)) {
if (*cp == '.')
decimals = 0;
else if (decimals < scale) {
unsigned int value;
value = *cp - '0';
result = result * 10 + value;
if (decimals >= 0)
decimals++;
}
cp++;
}
if (*cp == '\n')
cp++;
if (*cp)
return -EINVAL;
if (decimals < 0)
decimals = 0;
while (decimals < scale) {
result *= 10;
decimals ++;
}
*res = result;
return 0;
}
static void md_safemode_timeout(unsigned long data);
static ssize_t
safe_delay_show(struct mddev *mddev, char *page)
{
int msec = (mddev->safemode_delay*1000)/HZ;
return sprintf(page, "%d.%03d\n", msec/1000, msec%1000);
}
static ssize_t
safe_delay_store(struct mddev *mddev, const char *cbuf, size_t len)
{
unsigned long msec;
if (strict_strtoul_scaled(cbuf, &msec, 3) < 0)
return -EINVAL;
if (msec == 0)
mddev->safemode_delay = 0;
else {
unsigned long old_delay = mddev->safemode_delay;
mddev->safemode_delay = (msec*HZ)/1000;
if (mddev->safemode_delay == 0)
mddev->safemode_delay = 1;
if (mddev->safemode_delay < old_delay)
md_safemode_timeout((unsigned long)mddev);
}
return len;
}
static struct md_sysfs_entry md_safe_delay =
__ATTR(safe_mode_delay, S_IRUGO|S_IWUSR,safe_delay_show, safe_delay_store);
static ssize_t
level_show(struct mddev *mddev, char *page)
{
struct md_personality *p = mddev->pers;
if (p)
return sprintf(page, "%s\n", p->name);
else if (mddev->clevel[0])
return sprintf(page, "%s\n", mddev->clevel);
else if (mddev->level != LEVEL_NONE)
return sprintf(page, "%d\n", mddev->level);
else
return 0;
}
static ssize_t
level_store(struct mddev *mddev, const char *buf, size_t len)
{
char clevel[16];
ssize_t rv = len;
struct md_personality *pers;
long level;
void *priv;
struct md_rdev *rdev;
if (mddev->pers == NULL) {
if (len == 0)
return 0;
if (len >= sizeof(mddev->clevel))
return -ENOSPC;
strncpy(mddev->clevel, buf, len);
if (mddev->clevel[len-1] == '\n')
len--;
mddev->clevel[len] = 0;
mddev->level = LEVEL_NONE;
return rv;
}
/* request to change the personality. Need to ensure:
* - array is not engaged in resync/recovery/reshape
* - old personality can be suspended
* - new personality will access other array.
*/
if (mddev->sync_thread ||
mddev->reshape_position != MaxSector ||
mddev->sysfs_active)
return -EBUSY;
if (!mddev->pers->quiesce) {
printk(KERN_WARNING "md: %s: %s does not support online personality change\n",
mdname(mddev), mddev->pers->name);
return -EINVAL;
}
/* Now find the new personality */
if (len == 0 || len >= sizeof(clevel))
return -EINVAL;
strncpy(clevel, buf, len);
if (clevel[len-1] == '\n')
len--;
clevel[len] = 0;
if (strict_strtol(clevel, 10, &level))
level = LEVEL_NONE;
if (request_module("md-%s", clevel) != 0)
request_module("md-level-%s", clevel);
spin_lock(&pers_lock);
pers = find_pers(level, clevel);
if (!pers || !try_module_get(pers->owner)) {
spin_unlock(&pers_lock);
printk(KERN_WARNING "md: personality %s not loaded\n", clevel);
return -EINVAL;
}
spin_unlock(&pers_lock);
if (pers == mddev->pers) {
/* Nothing to do! */
module_put(pers->owner);
return rv;
}
if (!pers->takeover) {
module_put(pers->owner);
printk(KERN_WARNING "md: %s: %s does not support personality takeover\n",
mdname(mddev), clevel);
return -EINVAL;
}
rdev_for_each(rdev, mddev)
rdev->new_raid_disk = rdev->raid_disk;
/* ->takeover must set new_* and/or delta_disks
* if it succeeds, and may set them when it fails.
*/
priv = pers->takeover(mddev);
if (IS_ERR(priv)) {
mddev->new_level = mddev->level;
mddev->new_layout = mddev->layout;
mddev->new_chunk_sectors = mddev->chunk_sectors;
mddev->raid_disks -= mddev->delta_disks;
mddev->delta_disks = 0;
module_put(pers->owner);
printk(KERN_WARNING "md: %s: %s would not accept array\n",
mdname(mddev), clevel);
return PTR_ERR(priv);
}
/* Looks like we have a winner */
mddev_suspend(mddev);
mddev->pers->stop(mddev);
if (mddev->pers->sync_request == NULL &&
pers->sync_request != NULL) {
/* need to add the md_redundancy_group */
if (sysfs_create_group(&mddev->kobj, &md_redundancy_group))
printk(KERN_WARNING
"md: cannot register extra attributes for %s\n",
mdname(mddev));
mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, NULL, "sync_action");
}
if (mddev->pers->sync_request != NULL &&
pers->sync_request == NULL) {
/* need to remove the md_redundancy_group */
if (mddev->to_remove == NULL)
mddev->to_remove = &md_redundancy_group;
}
if (mddev->pers->sync_request == NULL &&
mddev->external) {
/* We are converting from a no-redundancy array
* to a redundancy array and metadata is managed
* externally so we need to be sure that writes
* won't block due to a need to transition
* clean->dirty
* until external management is started.
*/
mddev->in_sync = 0;
mddev->safemode_delay = 0;
mddev->safemode = 0;
}
rdev_for_each(rdev, mddev) {
if (rdev->raid_disk < 0)
continue;
if (rdev->new_raid_disk >= mddev->raid_disks)
rdev->new_raid_disk = -1;
if (rdev->new_raid_disk == rdev->raid_disk)
continue;
sysfs_unlink_rdev(mddev, rdev);
}
rdev_for_each(rdev, mddev) {
if (rdev->raid_disk < 0)
continue;
if (rdev->new_raid_disk == rdev->raid_disk)
continue;
rdev->raid_disk = rdev->new_raid_disk;
if (rdev->raid_disk < 0)
clear_bit(In_sync, &rdev->flags);
else {
if (sysfs_link_rdev(mddev, rdev))
printk(KERN_WARNING "md: cannot register rd%d"
" for %s after level change\n",
rdev->raid_disk, mdname(mddev));
}
}
module_put(mddev->pers->owner);
mddev->pers = pers;
mddev->private = priv;
strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
mddev->level = mddev->new_level;
mddev->layout = mddev->new_layout;
mddev->chunk_sectors = mddev->new_chunk_sectors;
mddev->delta_disks = 0;
mddev->degraded = 0;
if (mddev->pers->sync_request == NULL) {
/* this is now an array without redundancy, so
* it must always be in_sync
*/
mddev->in_sync = 1;
del_timer_sync(&mddev->safemode_timer);
}
pers->run(mddev);
mddev_resume(mddev);
set_bit(MD_CHANGE_DEVS, &mddev->flags);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
sysfs_notify(&mddev->kobj, NULL, "level");
md_new_event(mddev);
return rv;
}
static struct md_sysfs_entry md_level =
__ATTR(level, S_IRUGO|S_IWUSR, level_show, level_store);
static ssize_t
layout_show(struct mddev *mddev, char *page)
{
/* just a number, not meaningful for all levels */
if (mddev->reshape_position != MaxSector &&
mddev->layout != mddev->new_layout)
return sprintf(page, "%d (%d)\n",
mddev->new_layout, mddev->layout);
return sprintf(page, "%d\n", mddev->layout);
}
static ssize_t
layout_store(struct mddev *mddev, const char *buf, size_t len)
{
char *e;
unsigned long n = simple_strtoul(buf, &e, 10);
if (!*buf || (*e && *e != '\n'))
return -EINVAL;
if (mddev->pers) {
int err;
if (mddev->pers->check_reshape == NULL)
return -EBUSY;
mddev->new_layout = n;
err = mddev->pers->check_reshape(mddev);
if (err) {
mddev->new_layout = mddev->layout;
return err;
}
} else {
mddev->new_layout = n;
if (mddev->reshape_position == MaxSector)
mddev->layout = n;
}
return len;
}
static struct md_sysfs_entry md_layout =
__ATTR(layout, S_IRUGO|S_IWUSR, layout_show, layout_store);
static ssize_t
raid_disks_show(struct mddev *mddev, char *page)
{
if (mddev->raid_disks == 0)
return 0;
if (mddev->reshape_position != MaxSector &&
mddev->delta_disks != 0)
return sprintf(page, "%d (%d)\n", mddev->raid_disks,
mddev->raid_disks - mddev->delta_disks);
return sprintf(page, "%d\n", mddev->raid_disks);
}
static int update_raid_disks(struct mddev *mddev, int raid_disks);
static ssize_t
raid_disks_store(struct mddev *mddev, const char *buf, size_t len)
{
char *e;
int rv = 0;
unsigned long n = simple_strtoul(buf, &e, 10);
if (!*buf || (*e && *e != '\n'))
return -EINVAL;
if (mddev->pers)
rv = update_raid_disks(mddev, n);
else if (mddev->reshape_position != MaxSector) {
int olddisks = mddev->raid_disks - mddev->delta_disks;
mddev->delta_disks = n - olddisks;
mddev->raid_disks = n;
} else
mddev->raid_disks = n;
return rv ? rv : len;
}
static struct md_sysfs_entry md_raid_disks =
__ATTR(raid_disks, S_IRUGO|S_IWUSR, raid_disks_show, raid_disks_store);
static ssize_t
chunk_size_show(struct mddev *mddev, char *page)
{
if (mddev->reshape_position != MaxSector &&
mddev->chunk_sectors != mddev->new_chunk_sectors)
return sprintf(page, "%d (%d)\n",
mddev->new_chunk_sectors << 9,
mddev->chunk_sectors << 9);
return sprintf(page, "%d\n", mddev->chunk_sectors << 9);
}
static ssize_t
chunk_size_store(struct mddev *mddev, const char *buf, size_t len)
{
char *e;
unsigned long n = simple_strtoul(buf, &e, 10);
if (!*buf || (*e && *e != '\n'))
return -EINVAL;
if (mddev->pers) {
int err;
if (mddev->pers->check_reshape == NULL)
return -EBUSY;
mddev->new_chunk_sectors = n >> 9;
err = mddev->pers->check_reshape(mddev);
if (err) {
mddev->new_chunk_sectors = mddev->chunk_sectors;
return err;
}
} else {
mddev->new_chunk_sectors = n >> 9;
if (mddev->reshape_position == MaxSector)
mddev->chunk_sectors = n >> 9;
}
return len;
}
static struct md_sysfs_entry md_chunk_size =
__ATTR(chunk_size, S_IRUGO|S_IWUSR, chunk_size_show, chunk_size_store);
static ssize_t
resync_start_show(struct mddev *mddev, char *page)
{
if (mddev->recovery_cp == MaxSector)
return sprintf(page, "none\n");
return sprintf(page, "%llu\n", (unsigned long long)mddev->recovery_cp);
}
static ssize_t
resync_start_store(struct mddev *mddev, const char *buf, size_t len)
{
char *e;
unsigned long long n = simple_strtoull(buf, &e, 10);
if (mddev->pers && !test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
return -EBUSY;
if (cmd_match(buf, "none"))
n = MaxSector;
else if (!*buf || (*e && *e != '\n'))
return -EINVAL;
mddev->recovery_cp = n;
return len;
}
static struct md_sysfs_entry md_resync_start =
__ATTR(resync_start, S_IRUGO|S_IWUSR, resync_start_show, resync_start_store);
/*
* The array state can be:
*
* clear
* No devices, no size, no level
* Equivalent to STOP_ARRAY ioctl
* inactive
* May have some settings, but array is not active
* all IO results in error
* When written, doesn't tear down array, but just stops it
* suspended (not supported yet)
* All IO requests will block. The array can be reconfigured.
* Writing this, if accepted, will block until array is quiescent
* readonly
* no resync can happen. no superblocks get written.
* write requests fail
* read-auto
* like readonly, but behaves like 'clean' on a write request.
*
* clean - no pending writes, but otherwise active.
* When written to inactive array, starts without resync
* If a write request arrives then
* if metadata is known, mark 'dirty' and switch to 'active'.
* if not known, block and switch to write-pending
* If written to an active array that has pending writes, then fails.
* active
* fully active: IO and resync can be happening.
* When written to inactive array, starts with resync
*
* write-pending
* clean, but writes are blocked waiting for 'active' to be written.
*
* active-idle
* like active, but no writes have been seen for a while (100msec).
*
*/
enum array_state { clear, inactive, suspended, readonly, read_auto, clean, active,
write_pending, active_idle, bad_word};
static char *array_states[] = {
"clear", "inactive", "suspended", "readonly", "read-auto", "clean", "active",
"write-pending", "active-idle", NULL };
static int match_word(const char *word, char **list)
{
int n;
for (n=0; list[n]; n++)
if (cmd_match(word, list[n]))
break;
return n;
}
static ssize_t
array_state_show(struct mddev *mddev, char *page)
{
enum array_state st = inactive;
if (mddev->pers)
switch(mddev->ro) {
case 1:
st = readonly;
break;
case 2:
st = read_auto;
break;
case 0:
if (mddev->in_sync)
st = clean;
else if (test_bit(MD_CHANGE_PENDING, &mddev->flags))
st = write_pending;
else if (mddev->safemode)
st = active_idle;
else
st = active;
}
else {
if (list_empty(&mddev->disks) &&
mddev->raid_disks == 0 &&
mddev->dev_sectors == 0)
st = clear;
else
st = inactive;
}
return sprintf(page, "%s\n", array_states[st]);
}
static int do_md_stop(struct mddev * mddev, int ro, int is_open);
static int md_set_readonly(struct mddev * mddev, int is_open);
static int do_md_run(struct mddev * mddev);
static int restart_array(struct mddev *mddev);
static ssize_t
array_state_store(struct mddev *mddev, const char *buf, size_t len)
{
int err = -EINVAL;
enum array_state st = match_word(buf, array_states);
switch(st) {
case bad_word:
break;
case clear:
/* stopping an active array */
if (atomic_read(&mddev->openers) > 0)
return -EBUSY;
err = do_md_stop(mddev, 0, 0);
break;
case inactive:
/* stopping an active array */
if (mddev->pers) {
if (atomic_read(&mddev->openers) > 0)
return -EBUSY;
err = do_md_stop(mddev, 2, 0);
} else
err = 0; /* already inactive */
break;
case suspended:
break; /* not supported yet */
case readonly:
if (mddev->pers)
err = md_set_readonly(mddev, 0);
else {
mddev->ro = 1;
set_disk_ro(mddev->gendisk, 1);
err = do_md_run(mddev);
}
break;
case read_auto:
if (mddev->pers) {
if (mddev->ro == 0)
err = md_set_readonly(mddev, 0);
else if (mddev->ro == 1)
err = restart_array(mddev);
if (err == 0) {
mddev->ro = 2;
set_disk_ro(mddev->gendisk, 0);
}
} else {
mddev->ro = 2;
err = do_md_run(mddev);
}
break;
case clean:
if (mddev->pers) {
restart_array(mddev);
spin_lock_irq(&mddev->write_lock);
if (atomic_read(&mddev->writes_pending) == 0) {
if (mddev->in_sync == 0) {
mddev->in_sync = 1;
if (mddev->safemode == 1)
mddev->safemode = 0;
set_bit(MD_CHANGE_CLEAN, &mddev->flags);
}
err = 0;
} else
err = -EBUSY;
spin_unlock_irq(&mddev->write_lock);
} else
err = -EINVAL;
break;
case active:
if (mddev->pers) {
restart_array(mddev);
clear_bit(MD_CHANGE_PENDING, &mddev->flags);
wake_up(&mddev->sb_wait);
err = 0;
} else {
mddev->ro = 0;
set_disk_ro(mddev->gendisk, 0);
err = do_md_run(mddev);
}
break;
case write_pending:
case active_idle:
/* these cannot be set */
break;
}
if (err)
return err;
else {
if (mddev->hold_active == UNTIL_IOCTL)
mddev->hold_active = 0;
sysfs_notify_dirent_safe(mddev->sysfs_state);
return len;
}
}
static struct md_sysfs_entry md_array_state =
__ATTR(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store);
static ssize_t
max_corrected_read_errors_show(struct mddev *mddev, char *page) {
return sprintf(page, "%d\n",
atomic_read(&mddev->max_corr_read_errors));
}
static ssize_t
max_corrected_read_errors_store(struct mddev *mddev, const char *buf, size_t len)
{
char *e;
unsigned long n = simple_strtoul(buf, &e, 10);
if (*buf && (*e == 0 || *e == '\n')) {
atomic_set(&mddev->max_corr_read_errors, n);
return len;
}
return -EINVAL;
}
static struct md_sysfs_entry max_corr_read_errors =
__ATTR(max_read_errors, S_IRUGO|S_IWUSR, max_corrected_read_errors_show,
max_corrected_read_errors_store);
static ssize_t
null_show(struct mddev *mddev, char *page)
{
return -EINVAL;
}
static ssize_t
new_dev_store(struct mddev *mddev, const char *buf, size_t len)
{
/* buf must be %d:%d\n? giving major and minor numbers */
/* The new device is added to the array.
* If the array has a persistent superblock, we read the
* superblock to initialise info and check validity.
* Otherwise, only checking done is that in bind_rdev_to_array,
* which mainly checks size.
*/
char *e;
int major = simple_strtoul(buf, &e, 10);
int minor;
dev_t dev;
struct md_rdev *rdev;
int err;
if (!*buf || *e != ':' || !e[1] || e[1] == '\n')
return -EINVAL;
minor = simple_strtoul(e+1, &e, 10);
if (*e && *e != '\n')
return -EINVAL;
dev = MKDEV(major, minor);
if (major != MAJOR(dev) ||
minor != MINOR(dev))
return -EOVERFLOW;
if (mddev->persistent) {
rdev = md_import_device(dev, mddev->major_version,
mddev->minor_version);
if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) {
struct md_rdev *rdev0
= list_entry(mddev->disks.next,
struct md_rdev, same_set);
err = super_types[mddev->major_version]
.load_super(rdev, rdev0, mddev->minor_version);
if (err < 0)
goto out;
}
} else if (mddev->external)
rdev = md_import_device(dev, -2, -1);
else
rdev = md_import_device(dev, -1, -1);
if (IS_ERR(rdev))
return PTR_ERR(rdev);
err = bind_rdev_to_array(rdev, mddev);
out:
if (err)
export_rdev(rdev);
return err ? err : len;
}
static struct md_sysfs_entry md_new_device =
__ATTR(new_dev, S_IWUSR, null_show, new_dev_store);
static ssize_t
bitmap_store(struct mddev *mddev, const char *buf, size_t len)
{
char *end;
unsigned long chunk, end_chunk;
if (!mddev->bitmap)
goto out;
/* buf should be <chunk> <chunk> ... or <chunk>-<chunk> ... (range) */
while (*buf) {
chunk = end_chunk = simple_strtoul(buf, &end, 0);
if (buf == end) break;
if (*end == '-') { /* range */
buf = end + 1;
end_chunk = simple_strtoul(buf, &end, 0);
if (buf == end) break;
}
if (*end && !isspace(*end)) break;
bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk);
buf = skip_spaces(end);
}
bitmap_unplug(mddev->bitmap); /* flush the bits to disk */
out:
return len;
}
static struct md_sysfs_entry md_bitmap =
__ATTR(bitmap_set_bits, S_IWUSR, null_show, bitmap_store);
static ssize_t
size_show(struct mddev *mddev, char *page)
{
return sprintf(page, "%llu\n",
(unsigned long long)mddev->dev_sectors / 2);
}
static int update_size(struct mddev *mddev, sector_t num_sectors);
static ssize_t
size_store(struct mddev *mddev, const char *buf, size_t len)
{
/* If array is inactive, we can reduce the component size, but
* not increase it (except from 0).
* If array is active, we can try an on-line resize
*/
sector_t sectors;
int err = strict_blocks_to_sectors(buf, §ors);
if (err < 0)
return err;
if (mddev->pers) {
err = update_size(mddev, sectors);
md_update_sb(mddev, 1);
} else {
if (mddev->dev_sectors == 0 ||
mddev->dev_sectors > sectors)
mddev->dev_sectors = sectors;
else
err = -ENOSPC;
}
return err ? err : len;
}
static struct md_sysfs_entry md_size =
__ATTR(component_size, S_IRUGO|S_IWUSR, size_show, size_store);
/* Metdata version.
* This is one of
* 'none' for arrays with no metadata (good luck...)
* 'external' for arrays with externally managed metadata,
* or N.M for internally known formats
*/
static ssize_t
metadata_show(struct mddev *mddev, char *page)
{
if (mddev->persistent)
return sprintf(page, "%d.%d\n",
mddev->major_version, mddev->minor_version);
else if (mddev->external)
return sprintf(page, "external:%s\n", mddev->metadata_type);
else
return sprintf(page, "none\n");
}
static ssize_t
metadata_store(struct mddev *mddev, const char *buf, size_t len)
{
int major, minor;
char *e;
/* Changing the details of 'external' metadata is
* always permitted. Otherwise there must be
* no devices attached to the array.
*/
if (mddev->external && strncmp(buf, "external:", 9) == 0)
;
else if (!list_empty(&mddev->disks))
return -EBUSY;
if (cmd_match(buf, "none")) {
mddev->persistent = 0;
mddev->external = 0;
mddev->major_version = 0;
mddev->minor_version = 90;
return len;
}
if (strncmp(buf, "external:", 9) == 0) {
size_t namelen = len-9;
if (namelen >= sizeof(mddev->metadata_type))
namelen = sizeof(mddev->metadata_type)-1;
strncpy(mddev->metadata_type, buf+9, namelen);
mddev->metadata_type[namelen] = 0;
if (namelen && mddev->metadata_type[namelen-1] == '\n')
mddev->metadata_type[--namelen] = 0;
mddev->persistent = 0;
mddev->external = 1;
mddev->major_version = 0;
mddev->minor_version = 90;
return len;
}
major = simple_strtoul(buf, &e, 10);
if (e==buf || *e != '.')
return -EINVAL;
buf = e+1;
minor = simple_strtoul(buf, &e, 10);
if (e==buf || (*e && *e != '\n') )
return -EINVAL;
if (major >= ARRAY_SIZE(super_types) || super_types[major].name == NULL)
return -ENOENT;
mddev->major_version = major;
mddev->minor_version = minor;
mddev->persistent = 1;
mddev->external = 0;
return len;
}
static struct md_sysfs_entry md_metadata =
__ATTR(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store);
static ssize_t
action_show(struct mddev *mddev, char *page)
{
char *type = "idle";
if (test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
type = "frozen";
else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
(!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))) {
if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
type = "reshape";
else if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
type = "resync";
else if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
type = "check";
else
type = "repair";
} else if (test_bit(MD_RECOVERY_RECOVER, &mddev->recovery))
type = "recover";
}
return sprintf(page, "%s\n", type);
}
static void reap_sync_thread(struct mddev *mddev);
static ssize_t
action_store(struct mddev *mddev, const char *page, size_t len)
{
if (!mddev->pers || !mddev->pers->sync_request)
return -EINVAL;
if (cmd_match(page, "frozen"))
set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
else
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
if (cmd_match(page, "idle") || cmd_match(page, "frozen")) {
if (mddev->sync_thread) {
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
reap_sync_thread(mddev);
}
} else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
return -EBUSY;
else if (cmd_match(page, "resync"))
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
else if (cmd_match(page, "recover")) {
set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
} else if (cmd_match(page, "reshape")) {
int err;
if (mddev->pers->start_reshape == NULL)
return -EINVAL;
err = mddev->pers->start_reshape(mddev);
if (err)
return err;
sysfs_notify(&mddev->kobj, NULL, "degraded");
} else {
if (cmd_match(page, "check"))
set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
else if (!cmd_match(page, "repair"))
return -EINVAL;
set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
}
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
sysfs_notify_dirent_safe(mddev->sysfs_action);
return len;
}
static ssize_t
mismatch_cnt_show(struct mddev *mddev, char *page)
{
return sprintf(page, "%llu\n",
(unsigned long long) mddev->resync_mismatches);
}
static struct md_sysfs_entry md_scan_mode =
__ATTR(sync_action, S_IRUGO|S_IWUSR, action_show, action_store);
static struct md_sysfs_entry md_mismatches = __ATTR_RO(mismatch_cnt);
static ssize_t
sync_min_show(struct mddev *mddev, char *page)
{
return sprintf(page, "%d (%s)\n", speed_min(mddev),
mddev->sync_speed_min ? "local": "system");
}
static ssize_t
sync_min_store(struct mddev *mddev, const char *buf, size_t len)
{
int min;
char *e;
if (strncmp(buf, "system", 6)==0) {
mddev->sync_speed_min = 0;
return len;
}
min = simple_strtoul(buf, &e, 10);
if (buf == e || (*e && *e != '\n') || min <= 0)
return -EINVAL;
mddev->sync_speed_min = min;
return len;
}
static struct md_sysfs_entry md_sync_min =
__ATTR(sync_speed_min, S_IRUGO|S_IWUSR, sync_min_show, sync_min_store);
static ssize_t
sync_max_show(struct mddev *mddev, char *page)
{
return sprintf(page, "%d (%s)\n", speed_max(mddev),
mddev->sync_speed_max ? "local": "system");
}
static ssize_t
sync_max_store(struct mddev *mddev, const char *buf, size_t len)
{
int max;
char *e;
if (strncmp(buf, "system", 6)==0) {
mddev->sync_speed_max = 0;
return len;
}
max = simple_strtoul(buf, &e, 10);
if (buf == e || (*e && *e != '\n') || max <= 0)
return -EINVAL;
mddev->sync_speed_max = max;
return len;
}
static struct md_sysfs_entry md_sync_max =
__ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store);
static ssize_t
degraded_show(struct mddev *mddev, char *page)
{
return sprintf(page, "%d\n", mddev->degraded);
}
static struct md_sysfs_entry md_degraded = __ATTR_RO(degraded);
static ssize_t
sync_force_parallel_show(struct mddev *mddev, char *page)
{
return sprintf(page, "%d\n", mddev->parallel_resync);
}
static ssize_t
sync_force_parallel_store(struct mddev *mddev, const char *buf, size_t len)
{
long n;
if (strict_strtol(buf, 10, &n))
return -EINVAL;
if (n != 0 && n != 1)
return -EINVAL;
mddev->parallel_resync = n;
if (mddev->sync_thread)
wake_up(&resync_wait);
return len;
}
/* force parallel resync, even with shared block devices */
static struct md_sysfs_entry md_sync_force_parallel =
__ATTR(sync_force_parallel, S_IRUGO|S_IWUSR,
sync_force_parallel_show, sync_force_parallel_store);
static ssize_t
sync_speed_show(struct mddev *mddev, char *page)
{
unsigned long resync, dt, db;
if (mddev->curr_resync == 0)
return sprintf(page, "none\n");
resync = mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active);
dt = (jiffies - mddev->resync_mark) / HZ;
if (!dt) dt++;
db = resync - mddev->resync_mark_cnt;
return sprintf(page, "%lu\n", db/dt/2); /* K/sec */
}
static struct md_sysfs_entry md_sync_speed = __ATTR_RO(sync_speed);
static ssize_t
sync_completed_show(struct mddev *mddev, char *page)
{
unsigned long long max_sectors, resync;
if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
return sprintf(page, "none\n");
if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
max_sectors = mddev->resync_max_sectors;
else
max_sectors = mddev->dev_sectors;
resync = mddev->curr_resync_completed;
return sprintf(page, "%llu / %llu\n", resync, max_sectors);
}
static struct md_sysfs_entry md_sync_completed = __ATTR_RO(sync_completed);
static ssize_t
min_sync_show(struct mddev *mddev, char *page)
{
return sprintf(page, "%llu\n",
(unsigned long long)mddev->resync_min);
}
static ssize_t
min_sync_store(struct mddev *mddev, const char *buf, size_t len)
{
unsigned long long min;
if (strict_strtoull(buf, 10, &min))
return -EINVAL;
if (min > mddev->resync_max)
return -EINVAL;
if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
return -EBUSY;
/* Must be a multiple of chunk_size */
if (mddev->chunk_sectors) {
sector_t temp = min;
if (sector_div(temp, mddev->chunk_sectors))
return -EINVAL;
}
mddev->resync_min = min;
return len;
}
static struct md_sysfs_entry md_min_sync =
__ATTR(sync_min, S_IRUGO|S_IWUSR, min_sync_show, min_sync_store);
static ssize_t
max_sync_show(struct mddev *mddev, char *page)
{
if (mddev->resync_max == MaxSector)
return sprintf(page, "max\n");
else
return sprintf(page, "%llu\n",
(unsigned long long)mddev->resync_max);
}
static ssize_t
max_sync_store(struct mddev *mddev, const char *buf, size_t len)
{
if (strncmp(buf, "max", 3) == 0)
mddev->resync_max = MaxSector;
else {
unsigned long long max;
if (strict_strtoull(buf, 10, &max))
return -EINVAL;
if (max < mddev->resync_min)
return -EINVAL;
if (max < mddev->resync_max &&
mddev->ro == 0 &&
test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
return -EBUSY;
/* Must be a multiple of chunk_size */
if (mddev->chunk_sectors) {
sector_t temp = max;
if (sector_div(temp, mddev->chunk_sectors))
return -EINVAL;
}
mddev->resync_max = max;
}
wake_up(&mddev->recovery_wait);
return len;
}
static struct md_sysfs_entry md_max_sync =
__ATTR(sync_max, S_IRUGO|S_IWUSR, max_sync_show, max_sync_store);
static ssize_t
suspend_lo_show(struct mddev *mddev, char *page)
{
return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_lo);
}
static ssize_t
suspend_lo_store(struct mddev *mddev, const char *buf, size_t len)
{
char *e;
unsigned long long new = simple_strtoull(buf, &e, 10);
unsigned long long old = mddev->suspend_lo;
if (mddev->pers == NULL ||
mddev->pers->quiesce == NULL)
return -EINVAL;
if (buf == e || (*e && *e != '\n'))
return -EINVAL;
mddev->suspend_lo = new;
if (new >= old)
/* Shrinking suspended region */
mddev->pers->quiesce(mddev, 2);
else {
/* Expanding suspended region - need to wait */
mddev->pers->quiesce(mddev, 1);
mddev->pers->quiesce(mddev, 0);
}
return len;
}
static struct md_sysfs_entry md_suspend_lo =
__ATTR(suspend_lo, S_IRUGO|S_IWUSR, suspend_lo_show, suspend_lo_store);
static ssize_t
suspend_hi_show(struct mddev *mddev, char *page)
{
return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_hi);
}
static ssize_t
suspend_hi_store(struct mddev *mddev, const char *buf, size_t len)
{
char *e;
unsigned long long new = simple_strtoull(buf, &e, 10);
unsigned long long old = mddev->suspend_hi;
if (mddev->pers == NULL ||
mddev->pers->quiesce == NULL)
return -EINVAL;
if (buf == e || (*e && *e != '\n'))
return -EINVAL;
mddev->suspend_hi = new;
if (new <= old)
/* Shrinking suspended region */
mddev->pers->quiesce(mddev, 2);
else {
/* Expanding suspended region - need to wait */
mddev->pers->quiesce(mddev, 1);
mddev->pers->quiesce(mddev, 0);
}
return len;
}
static struct md_sysfs_entry md_suspend_hi =
__ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store);
static ssize_t
reshape_position_show(struct mddev *mddev, char *page)
{
if (mddev->reshape_position != MaxSector)
return sprintf(page, "%llu\n",
(unsigned long long)mddev->reshape_position);
strcpy(page, "none\n");
return 5;
}
static ssize_t
reshape_position_store(struct mddev *mddev, const char *buf, size_t len)
{
char *e;
unsigned long long new = simple_strtoull(buf, &e, 10);
if (mddev->pers)
return -EBUSY;
if (buf == e || (*e && *e != '\n'))
return -EINVAL;
mddev->reshape_position = new;
mddev->delta_disks = 0;
mddev->new_level = mddev->level;
mddev->new_layout = mddev->layout;
mddev->new_chunk_sectors = mddev->chunk_sectors;
return len;
}
static struct md_sysfs_entry md_reshape_position =
__ATTR(reshape_position, S_IRUGO|S_IWUSR, reshape_position_show,
reshape_position_store);
static ssize_t
array_size_show(struct mddev *mddev, char *page)
{
if (mddev->external_size)
return sprintf(page, "%llu\n",
(unsigned long long)mddev->array_sectors/2);
else
return sprintf(page, "default\n");
}
static ssize_t
array_size_store(struct mddev *mddev, const char *buf, size_t len)
{
sector_t sectors;
if (strncmp(buf, "default", 7) == 0) {
if (mddev->pers)
sectors = mddev->pers->size(mddev, 0, 0);
else
sectors = mddev->array_sectors;
mddev->external_size = 0;
} else {
if (strict_blocks_to_sectors(buf, §ors) < 0)
return -EINVAL;
if (mddev->pers && mddev->pers->size(mddev, 0, 0) < sectors)
return -E2BIG;
mddev->external_size = 1;
}
mddev->array_sectors = sectors;
if (mddev->pers) {
set_capacity(mddev->gendisk, mddev->array_sectors);
revalidate_disk(mddev->gendisk);
}
return len;
}
static struct md_sysfs_entry md_array_size =
__ATTR(array_size, S_IRUGO|S_IWUSR, array_size_show,
array_size_store);
static struct attribute *md_default_attrs[] = {
&md_level.attr,
&md_layout.attr,
&md_raid_disks.attr,
&md_chunk_size.attr,
&md_size.attr,
&md_resync_start.attr,
&md_metadata.attr,
&md_new_device.attr,
&md_safe_delay.attr,
&md_array_state.attr,
&md_reshape_position.attr,
&md_array_size.attr,
&max_corr_read_errors.attr,
NULL,
};
static struct attribute *md_redundancy_attrs[] = {
&md_scan_mode.attr,
&md_mismatches.attr,
&md_sync_min.attr,
&md_sync_max.attr,
&md_sync_speed.attr,
&md_sync_force_parallel.attr,
&md_sync_completed.attr,
&md_min_sync.attr,
&md_max_sync.attr,
&md_suspend_lo.attr,
&md_suspend_hi.attr,
&md_bitmap.attr,
&md_degraded.attr,
NULL,
};
static struct attribute_group md_redundancy_group = {
.name = NULL,
.attrs = md_redundancy_attrs,
};
static ssize_t
md_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
{
struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
struct mddev *mddev = container_of(kobj, struct mddev, kobj);
ssize_t rv;
if (!entry->show)
return -EIO;
spin_lock(&all_mddevs_lock);
if (list_empty(&mddev->all_mddevs)) {
spin_unlock(&all_mddevs_lock);
return -EBUSY;
}
mddev_get(mddev);
spin_unlock(&all_mddevs_lock);
rv = mddev_lock(mddev);
if (!rv) {
rv = entry->show(mddev, page);
mddev_unlock(mddev);
}
mddev_put(mddev);
return rv;
}
static ssize_t
md_attr_store(struct kobject *kobj, struct attribute *attr,
const char *page, size_t length)
{
struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
struct mddev *mddev = container_of(kobj, struct mddev, kobj);
ssize_t rv;
if (!entry->store)
return -EIO;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
spin_lock(&all_mddevs_lock);
if (list_empty(&mddev->all_mddevs)) {
spin_unlock(&all_mddevs_lock);
return -EBUSY;
}
mddev_get(mddev);
spin_unlock(&all_mddevs_lock);
rv = mddev_lock(mddev);
if (!rv) {
rv = entry->store(mddev, page, length);
mddev_unlock(mddev);
}
mddev_put(mddev);
return rv;
}
static void md_free(struct kobject *ko)
{
struct mddev *mddev = container_of(ko, struct mddev, kobj);
if (mddev->sysfs_state)
sysfs_put(mddev->sysfs_state);
if (mddev->gendisk) {
del_gendisk(mddev->gendisk);
put_disk(mddev->gendisk);
}
if (mddev->queue)
blk_cleanup_queue(mddev->queue);
kfree(mddev);
}
static const struct sysfs_ops md_sysfs_ops = {
.show = md_attr_show,
.store = md_attr_store,
};
static struct kobj_type md_ktype = {
.release = md_free,
.sysfs_ops = &md_sysfs_ops,
.default_attrs = md_default_attrs,
};
int mdp_major = 0;
static void mddev_delayed_delete(struct work_struct *ws)
{
struct mddev *mddev = container_of(ws, struct mddev, del_work);
sysfs_remove_group(&mddev->kobj, &md_bitmap_group);
kobject_del(&mddev->kobj);
kobject_put(&mddev->kobj);
}
static int md_alloc(dev_t dev, char *name)
{
static DEFINE_MUTEX(disks_mutex);
struct mddev *mddev = mddev_find(dev);
struct gendisk *disk;
int partitioned;
int shift;
int unit;
int error;
if (!mddev)
return -ENODEV;
partitioned = (MAJOR(mddev->unit) != MD_MAJOR);
shift = partitioned ? MdpMinorShift : 0;
unit = MINOR(mddev->unit) >> shift;
/* wait for any previous instance of this device to be
* completely removed (mddev_delayed_delete).
*/
flush_workqueue(md_misc_wq);
mutex_lock(&disks_mutex);
error = -EEXIST;
if (mddev->gendisk)
goto abort;
if (name) {
/* Need to ensure that 'name' is not a duplicate.
*/
struct mddev *mddev2;
spin_lock(&all_mddevs_lock);
list_for_each_entry(mddev2, &all_mddevs, all_mddevs)
if (mddev2->gendisk &&
strcmp(mddev2->gendisk->disk_name, name) == 0) {
spin_unlock(&all_mddevs_lock);
goto abort;
}
spin_unlock(&all_mddevs_lock);
}
error = -ENOMEM;
mddev->queue = blk_alloc_queue(GFP_KERNEL);
if (!mddev->queue)
goto abort;
mddev->queue->queuedata = mddev;
blk_queue_make_request(mddev->queue, md_make_request);
blk_set_stacking_limits(&mddev->queue->limits);
disk = alloc_disk(1 << shift);
if (!disk) {
blk_cleanup_queue(mddev->queue);
mddev->queue = NULL;
goto abort;
}
disk->major = MAJOR(mddev->unit);
disk->first_minor = unit << shift;
if (name)
strcpy(disk->disk_name, name);
else if (partitioned)
sprintf(disk->disk_name, "md_d%d", unit);
else
sprintf(disk->disk_name, "md%d", unit);
disk->fops = &md_fops;
disk->private_data = mddev;
disk->queue = mddev->queue;
blk_queue_flush(mddev->queue, REQ_FLUSH | REQ_FUA);
/* Allow extended partitions. This makes the
* 'mdp' device redundant, but we can't really
* remove it now.
*/
disk->flags |= GENHD_FL_EXT_DEVT;
mddev->gendisk = disk;
/* As soon as we call add_disk(), another thread could get
* through to md_open, so make sure it doesn't get too far
*/
mutex_lock(&mddev->open_mutex);
add_disk(disk);
error = kobject_init_and_add(&mddev->kobj, &md_ktype,
&disk_to_dev(disk)->kobj, "%s", "md");
if (error) {
/* This isn't possible, but as kobject_init_and_add is marked
* __must_check, we must do something with the result
*/
printk(KERN_WARNING "md: cannot register %s/md - name in use\n",
disk->disk_name);
error = 0;
}
if (mddev->kobj.sd &&
sysfs_create_group(&mddev->kobj, &md_bitmap_group))
printk(KERN_DEBUG "pointless warning\n");
mutex_unlock(&mddev->open_mutex);
abort:
mutex_unlock(&disks_mutex);
if (!error && mddev->kobj.sd) {
kobject_uevent(&mddev->kobj, KOBJ_ADD);
mddev->sysfs_state = sysfs_get_dirent_safe(mddev->kobj.sd, "array_state");
}
mddev_put(mddev);
return error;
}
static struct kobject *md_probe(dev_t dev, int *part, void *data)
{
md_alloc(dev, NULL);
return NULL;
}
static int add_named_array(const char *val, struct kernel_param *kp)
{
/* val must be "md_*" where * is not all digits.
* We allocate an array with a large free minor number, and
* set the name to val. val must not already be an active name.
*/
int len = strlen(val);
char buf[DISK_NAME_LEN];
while (len && val[len-1] == '\n')
len--;
if (len >= DISK_NAME_LEN)
return -E2BIG;
strlcpy(buf, val, len+1);
if (strncmp(buf, "md_", 3) != 0)
return -EINVAL;
return md_alloc(0, buf);
}
static void md_safemode_timeout(unsigned long data)
{
struct mddev *mddev = (struct mddev *) data;
if (!atomic_read(&mddev->writes_pending)) {
mddev->safemode = 1;
if (mddev->external)
sysfs_notify_dirent_safe(mddev->sysfs_state);
}
md_wakeup_thread(mddev->thread);
}
static int start_dirty_degraded;
int md_run(struct mddev *mddev)
{
int err;
struct md_rdev *rdev;
struct md_personality *pers;
if (list_empty(&mddev->disks))
/* cannot run an array with no devices.. */
return -EINVAL;
if (mddev->pers)
return -EBUSY;
/* Cannot run until previous stop completes properly */
if (mddev->sysfs_active)
return -EBUSY;
/*
* Analyze all RAID superblock(s)
*/
if (!mddev->raid_disks) {
if (!mddev->persistent)
return -EINVAL;
analyze_sbs(mddev);
}
if (mddev->level != LEVEL_NONE)
request_module("md-level-%d", mddev->level);
else if (mddev->clevel[0])
request_module("md-%s", mddev->clevel);
/*
* Drop all container device buffers, from now on
* the only valid external interface is through the md
* device.
*/
rdev_for_each(rdev, mddev) {
if (test_bit(Faulty, &rdev->flags))
continue;
sync_blockdev(rdev->bdev);
invalidate_bdev(rdev->bdev);
/* perform some consistency tests on the device.
* We don't want the data to overlap the metadata,
* Internal Bitmap issues have been handled elsewhere.
*/
if (rdev->meta_bdev) {
/* Nothing to check */;
} else if (rdev->data_offset < rdev->sb_start) {
if (mddev->dev_sectors &&
rdev->data_offset + mddev->dev_sectors
> rdev->sb_start) {
printk("md: %s: data overlaps metadata\n",
mdname(mddev));
return -EINVAL;
}
} else {
if (rdev->sb_start + rdev->sb_size/512
> rdev->data_offset) {
printk("md: %s: metadata overlaps data\n",
mdname(mddev));
return -EINVAL;
}
}
sysfs_notify_dirent_safe(rdev->sysfs_state);
}
if (mddev->bio_set == NULL)
mddev->bio_set = bioset_create(BIO_POOL_SIZE,
sizeof(struct mddev *));
spin_lock(&pers_lock);
pers = find_pers(mddev->level, mddev->clevel);
if (!pers || !try_module_get(pers->owner)) {
spin_unlock(&pers_lock);
if (mddev->level != LEVEL_NONE)
printk(KERN_WARNING "md: personality for level %d is not loaded!\n",
mddev->level);
else
printk(KERN_WARNING "md: personality for level %s is not loaded!\n",
mddev->clevel);
return -EINVAL;
}
mddev->pers = pers;
spin_unlock(&pers_lock);
if (mddev->level != pers->level) {
mddev->level = pers->level;
mddev->new_level = pers->level;
}
strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
if (mddev->reshape_position != MaxSector &&
pers->start_reshape == NULL) {
/* This personality cannot handle reshaping... */
mddev->pers = NULL;
module_put(pers->owner);
return -EINVAL;
}
if (pers->sync_request) {
/* Warn if this is a potentially silly
* configuration.
*/
char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
struct md_rdev *rdev2;
int warned = 0;
rdev_for_each(rdev, mddev)
rdev_for_each(rdev2, mddev) {
if (rdev < rdev2 &&
rdev->bdev->bd_contains ==
rdev2->bdev->bd_contains) {
printk(KERN_WARNING
"%s: WARNING: %s appears to be"
" on the same physical disk as"
" %s.\n",
mdname(mddev),
bdevname(rdev->bdev,b),
bdevname(rdev2->bdev,b2));
warned = 1;
}
}
if (warned)
printk(KERN_WARNING
"True protection against single-disk"
" failure might be compromised.\n");
}
mddev->recovery = 0;
/* may be over-ridden by personality */
mddev->resync_max_sectors = mddev->dev_sectors;
mddev->ok_start_degraded = start_dirty_degraded;
if (start_readonly && mddev->ro == 0)
mddev->ro = 2; /* read-only, but switch on first write */
err = mddev->pers->run(mddev);
if (err)
printk(KERN_ERR "md: pers->run() failed ...\n");
else if (mddev->pers->size(mddev, 0, 0) < mddev->array_sectors) {
WARN_ONCE(!mddev->external_size, "%s: default size too small,"
" but 'external_size' not in effect?\n", __func__);
printk(KERN_ERR
"md: invalid array_size %llu > default size %llu\n",
(unsigned long long)mddev->array_sectors / 2,
(unsigned long long)mddev->pers->size(mddev, 0, 0) / 2);
err = -EINVAL;
mddev->pers->stop(mddev);
}
if (err == 0 && mddev->pers->sync_request) {
err = bitmap_create(mddev);
if (err) {
printk(KERN_ERR "%s: failed to create bitmap (%d)\n",
mdname(mddev), err);
mddev->pers->stop(mddev);
}
}
if (err) {
module_put(mddev->pers->owner);
mddev->pers = NULL;
bitmap_destroy(mddev);
return err;
}
if (mddev->pers->sync_request) {
if (mddev->kobj.sd &&
sysfs_create_group(&mddev->kobj, &md_redundancy_group))
printk(KERN_WARNING
"md: cannot register extra attributes for %s\n",
mdname(mddev));
mddev->sysfs_action = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_action");
} else if (mddev->ro == 2) /* auto-readonly not meaningful */
mddev->ro = 0;
atomic_set(&mddev->writes_pending,0);
atomic_set(&mddev->max_corr_read_errors,
MD_DEFAULT_MAX_CORRECTED_READ_ERRORS);
mddev->safemode = 0;
mddev->safemode_timer.function = md_safemode_timeout;
mddev->safemode_timer.data = (unsigned long) mddev;
mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */
mddev->in_sync = 1;
smp_wmb();
mddev->ready = 1;
rdev_for_each(rdev, mddev)
if (rdev->raid_disk >= 0)
if (sysfs_link_rdev(mddev, rdev))
/* failure here is OK */;
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
if (mddev->flags)
md_update_sb(mddev, 0);
md_new_event(mddev);
sysfs_notify_dirent_safe(mddev->sysfs_state);
sysfs_notify_dirent_safe(mddev->sysfs_action);
sysfs_notify(&mddev->kobj, NULL, "degraded");
return 0;
}
EXPORT_SYMBOL_GPL(md_run);
static int do_md_run(struct mddev *mddev)
{
int err;
err = md_run(mddev);
if (err)
goto out;
err = bitmap_load(mddev);
if (err) {
bitmap_destroy(mddev);
goto out;
}
md_wakeup_thread(mddev->thread);
md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
set_capacity(mddev->gendisk, mddev->array_sectors);
revalidate_disk(mddev->gendisk);
mddev->changed = 1;
kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
out:
return err;
}
static int restart_array(struct mddev *mddev)
{
struct gendisk *disk = mddev->gendisk;
/* Complain if it has no devices */
if (list_empty(&mddev->disks))
return -ENXIO;
if (!mddev->pers)
return -EINVAL;
if (!mddev->ro)
return -EBUSY;
mddev->safemode = 0;
mddev->ro = 0;
set_disk_ro(disk, 0);
printk(KERN_INFO "md: %s switched to read-write mode.\n",
mdname(mddev));
/* Kick recovery or resync if necessary */
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
md_wakeup_thread(mddev->sync_thread);
sysfs_notify_dirent_safe(mddev->sysfs_state);
return 0;
}
/* similar to deny_write_access, but accounts for our holding a reference
* to the file ourselves */
static int deny_bitmap_write_access(struct file * file)
{
struct inode *inode = file->f_mapping->host;
spin_lock(&inode->i_lock);
if (atomic_read(&inode->i_writecount) > 1) {
spin_unlock(&inode->i_lock);
return -ETXTBSY;
}
atomic_set(&inode->i_writecount, -1);
spin_unlock(&inode->i_lock);
return 0;
}
void restore_bitmap_write_access(struct file *file)
{
struct inode *inode = file->f_mapping->host;
spin_lock(&inode->i_lock);
atomic_set(&inode->i_writecount, 1);
spin_unlock(&inode->i_lock);
}
static void md_clean(struct mddev *mddev)
{
mddev->array_sectors = 0;
mddev->external_size = 0;
mddev->dev_sectors = 0;
mddev->raid_disks = 0;
mddev->recovery_cp = 0;
mddev->resync_min = 0;
mddev->resync_max = MaxSector;
mddev->reshape_position = MaxSector;
mddev->external = 0;
mddev->persistent = 0;
mddev->level = LEVEL_NONE;
mddev->clevel[0] = 0;
mddev->flags = 0;
mddev->ro = 0;
mddev->metadata_type[0] = 0;
mddev->chunk_sectors = 0;
mddev->ctime = mddev->utime = 0;
mddev->layout = 0;
mddev->max_disks = 0;
mddev->events = 0;
mddev->can_decrease_events = 0;
mddev->delta_disks = 0;
mddev->new_level = LEVEL_NONE;
mddev->new_layout = 0;
mddev->new_chunk_sectors = 0;
mddev->curr_resync = 0;
mddev->resync_mismatches = 0;
mddev->suspend_lo = mddev->suspend_hi = 0;
mddev->sync_speed_min = mddev->sync_speed_max = 0;
mddev->recovery = 0;
mddev->in_sync = 0;
mddev->changed = 0;
mddev->degraded = 0;
mddev->safemode = 0;
mddev->merge_check_needed = 0;
mddev->bitmap_info.offset = 0;
mddev->bitmap_info.default_offset = 0;
mddev->bitmap_info.chunksize = 0;
mddev->bitmap_info.daemon_sleep = 0;
mddev->bitmap_info.max_write_behind = 0;
}
static void __md_stop_writes(struct mddev *mddev)
{
if (mddev->sync_thread) {
set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
reap_sync_thread(mddev);
}
del_timer_sync(&mddev->safemode_timer);
bitmap_flush(mddev);
md_super_wait(mddev);
if (!mddev->in_sync || mddev->flags) {
/* mark array as shutdown cleanly */
mddev->in_sync = 1;
md_update_sb(mddev, 1);
}
}
void md_stop_writes(struct mddev *mddev)
{
mddev_lock(mddev);
__md_stop_writes(mddev);
mddev_unlock(mddev);
}
EXPORT_SYMBOL_GPL(md_stop_writes);
void md_stop(struct mddev *mddev)
{
mddev->ready = 0;
mddev->pers->stop(mddev);
if (mddev->pers->sync_request && mddev->to_remove == NULL)
mddev->to_remove = &md_redundancy_group;
module_put(mddev->pers->owner);
mddev->pers = NULL;
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
}
EXPORT_SYMBOL_GPL(md_stop);
static int md_set_readonly(struct mddev *mddev, int is_open)
{
int err = 0;
mutex_lock(&mddev->open_mutex);
if (atomic_read(&mddev->openers) > is_open) {
printk("md: %s still in use.\n",mdname(mddev));
err = -EBUSY;
goto out;
}
if (mddev->pers) {
__md_stop_writes(mddev);
err = -ENXIO;
if (mddev->ro==1)
goto out;
mddev->ro = 1;
set_disk_ro(mddev->gendisk, 1);
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
sysfs_notify_dirent_safe(mddev->sysfs_state);
err = 0;
}
out:
mutex_unlock(&mddev->open_mutex);
return err;
}
/* mode:
* 0 - completely stop and dis-assemble array
* 2 - stop but do not disassemble array
*/
static int do_md_stop(struct mddev * mddev, int mode, int is_open)
{
struct gendisk *disk = mddev->gendisk;
struct md_rdev *rdev;
mutex_lock(&mddev->open_mutex);
if (atomic_read(&mddev->openers) > is_open ||
mddev->sysfs_active) {
printk("md: %s still in use.\n",mdname(mddev));
mutex_unlock(&mddev->open_mutex);
return -EBUSY;
}
if (mddev->pers) {
if (mddev->ro)
set_disk_ro(disk, 0);
__md_stop_writes(mddev);
md_stop(mddev);
mddev->queue->merge_bvec_fn = NULL;
mddev->queue->backing_dev_info.congested_fn = NULL;
/* tell userspace to handle 'inactive' */
sysfs_notify_dirent_safe(mddev->sysfs_state);
rdev_for_each(rdev, mddev)
if (rdev->raid_disk >= 0)
sysfs_unlink_rdev(mddev, rdev);
set_capacity(disk, 0);
mutex_unlock(&mddev->open_mutex);
mddev->changed = 1;
revalidate_disk(disk);
if (mddev->ro)
mddev->ro = 0;
} else
mutex_unlock(&mddev->open_mutex);
/*
* Free resources if final stop
*/
if (mode == 0) {
printk(KERN_INFO "md: %s stopped.\n", mdname(mddev));
bitmap_destroy(mddev);
if (mddev->bitmap_info.file) {
restore_bitmap_write_access(mddev->bitmap_info.file);
fput(mddev->bitmap_info.file);
mddev->bitmap_info.file = NULL;
}
mddev->bitmap_info.offset = 0;
export_array(mddev);
md_clean(mddev);
kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
if (mddev->hold_active == UNTIL_STOP)
mddev->hold_active = 0;
}
blk_integrity_unregister(disk);
md_new_event(mddev);
sysfs_notify_dirent_safe(mddev->sysfs_state);
return 0;
}
#ifndef MODULE
static void autorun_array(struct mddev *mddev)
{
struct md_rdev *rdev;
int err;
if (list_empty(&mddev->disks))
return;
printk(KERN_INFO "md: running: ");
rdev_for_each(rdev, mddev) {
char b[BDEVNAME_SIZE];
printk("<%s>", bdevname(rdev->bdev,b));
}
printk("\n");
err = do_md_run(mddev);
if (err) {
printk(KERN_WARNING "md: do_md_run() returned %d\n", err);
do_md_stop(mddev, 0, 0);
}
}
/*
* lets try to run arrays based on all disks that have arrived
* until now. (those are in pending_raid_disks)
*
* the method: pick the first pending disk, collect all disks with
* the same UUID, remove all from the pending list and put them into
* the 'same_array' list. Then order this list based on superblock
* update time (freshest comes first), kick out 'old' disks and
* compare superblocks. If everything's fine then run it.
*
* If "unit" is allocated, then bump its reference count
*/
static void autorun_devices(int part)
{
struct md_rdev *rdev0, *rdev, *tmp;
struct mddev *mddev;
char b[BDEVNAME_SIZE];
printk(KERN_INFO "md: autorun ...\n");
while (!list_empty(&pending_raid_disks)) {
int unit;
dev_t dev;
LIST_HEAD(candidates);
rdev0 = list_entry(pending_raid_disks.next,
struct md_rdev, same_set);
printk(KERN_INFO "md: considering %s ...\n",
bdevname(rdev0->bdev,b));
INIT_LIST_HEAD(&candidates);
rdev_for_each_list(rdev, tmp, &pending_raid_disks)
if (super_90_load(rdev, rdev0, 0) >= 0) {
printk(KERN_INFO "md: adding %s ...\n",
bdevname(rdev->bdev,b));
list_move(&rdev->same_set, &candidates);
}
/*
* now we have a set of devices, with all of them having
* mostly sane superblocks. It's time to allocate the
* mddev.
*/
if (part) {
dev = MKDEV(mdp_major,
rdev0->preferred_minor << MdpMinorShift);
unit = MINOR(dev) >> MdpMinorShift;
} else {
dev = MKDEV(MD_MAJOR, rdev0->preferred_minor);
unit = MINOR(dev);
}
if (rdev0->preferred_minor != unit) {
printk(KERN_INFO "md: unit number in %s is bad: %d\n",
bdevname(rdev0->bdev, b), rdev0->preferred_minor);
break;
}
md_probe(dev, NULL, NULL);
mddev = mddev_find(dev);
if (!mddev || !mddev->gendisk) {
if (mddev)
mddev_put(mddev);
printk(KERN_ERR
"md: cannot allocate memory for md drive.\n");
break;
}
if (mddev_lock(mddev))
printk(KERN_WARNING "md: %s locked, cannot run\n",
mdname(mddev));
else if (mddev->raid_disks || mddev->major_version
|| !list_empty(&mddev->disks)) {
printk(KERN_WARNING
"md: %s already running, cannot run %s\n",
mdname(mddev), bdevname(rdev0->bdev,b));
mddev_unlock(mddev);
} else {
printk(KERN_INFO "md: created %s\n", mdname(mddev));
mddev->persistent = 1;
rdev_for_each_list(rdev, tmp, &candidates) {
list_del_init(&rdev->same_set);
if (bind_rdev_to_array(rdev, mddev))
export_rdev(rdev);
}
autorun_array(mddev);
mddev_unlock(mddev);
}
/* on success, candidates will be empty, on error
* it won't...
*/
rdev_for_each_list(rdev, tmp, &candidates) {
list_del_init(&rdev->same_set);
export_rdev(rdev);
}
mddev_put(mddev);
}
printk(KERN_INFO "md: ... autorun DONE.\n");
}
#endif /* !MODULE */
static int get_version(void __user * arg)
{
mdu_version_t ver;
ver.major = MD_MAJOR_VERSION;
ver.minor = MD_MINOR_VERSION;
ver.patchlevel = MD_PATCHLEVEL_VERSION;
if (copy_to_user(arg, &ver, sizeof(ver)))
return -EFAULT;
return 0;
}
static int get_array_info(struct mddev * mddev, void __user * arg)
{
mdu_array_info_t info;
int nr,working,insync,failed,spare;
struct md_rdev *rdev;
nr=working=insync=failed=spare=0;
rdev_for_each(rdev, mddev) {
nr++;
if (test_bit(Faulty, &rdev->flags))
failed++;
else {
working++;
if (test_bit(In_sync, &rdev->flags))
insync++;
else
spare++;
}
}
info.major_version = mddev->major_version;
info.minor_version = mddev->minor_version;
info.patch_version = MD_PATCHLEVEL_VERSION;
info.ctime = mddev->ctime;
info.level = mddev->level;
info.size = mddev->dev_sectors / 2;
if (info.size != mddev->dev_sectors / 2) /* overflow */
info.size = -1;
info.nr_disks = nr;
info.raid_disks = mddev->raid_disks;
info.md_minor = mddev->md_minor;
info.not_persistent= !mddev->persistent;
info.utime = mddev->utime;
info.state = 0;
if (mddev->in_sync)
info.state = (1<<MD_SB_CLEAN);
if (mddev->bitmap && mddev->bitmap_info.offset)
info.state = (1<<MD_SB_BITMAP_PRESENT);
info.active_disks = insync;
info.working_disks = working;
info.failed_disks = failed;
info.spare_disks = spare;
info.layout = mddev->layout;
info.chunk_size = mddev->chunk_sectors << 9;
if (copy_to_user(arg, &info, sizeof(info)))
return -EFAULT;
return 0;
}
static int get_bitmap_file(struct mddev * mddev, void __user * arg)
{
mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */
char *ptr, *buf = NULL;
int err = -ENOMEM;
if (md_allow_write(mddev))
file = kmalloc(sizeof(*file), GFP_NOIO);
else
file = kmalloc(sizeof(*file), GFP_KERNEL);
if (!file)
goto out;
/* bitmap disabled, zero the first byte and copy out */
if (!mddev->bitmap || !mddev->bitmap->file) {
file->pathname[0] = '\0';
goto copy_out;
}
buf = kmalloc(sizeof(file->pathname), GFP_KERNEL);
if (!buf)
goto out;
ptr = d_path(&mddev->bitmap->file->f_path, buf, sizeof(file->pathname));
if (IS_ERR(ptr))
goto out;
strcpy(file->pathname, ptr);
copy_out:
err = 0;
if (copy_to_user(arg, file, sizeof(*file)))
err = -EFAULT;
out:
kfree(buf);
kfree(file);
return err;
}
static int get_disk_info(struct mddev * mddev, void __user * arg)
{
mdu_disk_info_t info;
struct md_rdev *rdev;
if (copy_from_user(&info, arg, sizeof(info)))
return -EFAULT;
rdev = find_rdev_nr(mddev, info.number);
if (rdev) {
info.major = MAJOR(rdev->bdev->bd_dev);
info.minor = MINOR(rdev->bdev->bd_dev);
info.raid_disk = rdev->raid_disk;
info.state = 0;
if (test_bit(Faulty, &rdev->flags))
info.state |= (1<<MD_DISK_FAULTY);
else if (test_bit(In_sync, &rdev->flags)) {
info.state |= (1<<MD_DISK_ACTIVE);
info.state |= (1<<MD_DISK_SYNC);
}
if (test_bit(WriteMostly, &rdev->flags))
info.state |= (1<<MD_DISK_WRITEMOSTLY);
} else {
info.major = info.minor = 0;
info.raid_disk = -1;
info.state = (1<<MD_DISK_REMOVED);
}
if (copy_to_user(arg, &info, sizeof(info)))
return -EFAULT;
return 0;
}
static int add_new_disk(struct mddev * mddev, mdu_disk_info_t *info)
{
char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
struct md_rdev *rdev;
dev_t dev = MKDEV(info->major,info->minor);
if (info->major != MAJOR(dev) || info->minor != MINOR(dev))
return -EOVERFLOW;
if (!mddev->raid_disks) {
int err;
/* expecting a device which has a superblock */
rdev = md_import_device(dev, mddev->major_version, mddev->minor_version);
if (IS_ERR(rdev)) {
printk(KERN_WARNING
"md: md_import_device returned %ld\n",
PTR_ERR(rdev));
return PTR_ERR(rdev);
}
if (!list_empty(&mddev->disks)) {
struct md_rdev *rdev0
= list_entry(mddev->disks.next,
struct md_rdev, same_set);
err = super_types[mddev->major_version]
.load_super(rdev, rdev0, mddev->minor_version);
if (err < 0) {
printk(KERN_WARNING
"md: %s has different UUID to %s\n",
bdevname(rdev->bdev,b),
bdevname(rdev0->bdev,b2));
export_rdev(rdev);
return -EINVAL;
}
}
err = bind_rdev_to_array(rdev, mddev);
if (err)
export_rdev(rdev);
return err;
}
/*
* add_new_disk can be used once the array is assembled
* to add "hot spares". They must already have a superblock
* written
*/
if (mddev->pers) {
int err;
if (!mddev->pers->hot_add_disk) {
printk(KERN_WARNING
"%s: personality does not support diskops!\n",
mdname(mddev));
return -EINVAL;
}
if (mddev->persistent)
rdev = md_import_device(dev, mddev->major_version,
mddev->minor_version);
else
rdev = md_import_device(dev, -1, -1);
if (IS_ERR(rdev)) {
printk(KERN_WARNING
"md: md_import_device returned %ld\n",
PTR_ERR(rdev));
return PTR_ERR(rdev);
}
/* set saved_raid_disk if appropriate */
if (!mddev->persistent) {
if (info->state & (1<<MD_DISK_SYNC) &&
info->raid_disk < mddev->raid_disks) {
rdev->raid_disk = info->raid_disk;
set_bit(In_sync, &rdev->flags);
} else
rdev->raid_disk = -1;
} else
super_types[mddev->major_version].
validate_super(mddev, rdev);
if ((info->state & (1<<MD_DISK_SYNC)) &&
(!test_bit(In_sync, &rdev->flags) ||
rdev->raid_disk != info->raid_disk)) {
/* This was a hot-add request, but events doesn't
* match, so reject it.
*/
export_rdev(rdev);
return -EINVAL;
}
if (test_bit(In_sync, &rdev->flags))
rdev->saved_raid_disk = rdev->raid_disk;
else
rdev->saved_raid_disk = -1;
clear_bit(In_sync, &rdev->flags); /* just to be sure */
if (info->state & (1<<MD_DISK_WRITEMOSTLY))
set_bit(WriteMostly, &rdev->flags);
else
clear_bit(WriteMostly, &rdev->flags);
rdev->raid_disk = -1;
err = bind_rdev_to_array(rdev, mddev);
if (!err && !mddev->pers->hot_remove_disk) {
/* If there is hot_add_disk but no hot_remove_disk
* then added disks for geometry changes,
* and should be added immediately.
*/
super_types[mddev->major_version].
validate_super(mddev, rdev);
err = mddev->pers->hot_add_disk(mddev, rdev);
if (err)
unbind_rdev_from_array(rdev);
}
if (err)
export_rdev(rdev);
else
sysfs_notify_dirent_safe(rdev->sysfs_state);
md_update_sb(mddev, 1);
if (mddev->degraded)
set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
if (!err)
md_new_event(mddev);
md_wakeup_thread(mddev->thread);
return err;
}
/* otherwise, add_new_disk is only allowed
* for major_version==0 superblocks
*/
if (mddev->major_version != 0) {
printk(KERN_WARNING "%s: ADD_NEW_DISK not supported\n",
mdname(mddev));
return -EINVAL;
}
if (!(info->state & (1<<MD_DISK_FAULTY))) {
int err;
rdev = md_import_device(dev, -1, 0);
if (IS_ERR(rdev)) {
printk(KERN_WARNING
"md: error, md_import_device() returned %ld\n",
PTR_ERR(rdev));
return PTR_ERR(rdev);
}
rdev->desc_nr = info->number;
if (info->raid_disk < mddev->raid_disks)
rdev->raid_disk = info->raid_disk;
else
rdev->raid_disk = -1;
if (rdev->raid_disk < mddev->raid_disks)
if (info->state & (1<<MD_DISK_SYNC))
set_bit(In_sync, &rdev->flags);
if (info->state & (1<<MD_DISK_WRITEMOSTLY))
set_bit(WriteMostly, &rdev->flags);
if (!mddev->persistent) {
printk(KERN_INFO "md: nonpersistent superblock ...\n");
rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512;
} else
rdev->sb_start = calc_dev_sboffset(rdev);
rdev->sectors = rdev->sb_start;
err = bind_rdev_to_array(rdev, mddev);
if (err) {
export_rdev(rdev);
return err;
}
}
return 0;
}
static int hot_remove_disk(struct mddev * mddev, dev_t dev)
{
char b[BDEVNAME_SIZE];
struct md_rdev *rdev;
rdev = find_rdev(mddev, dev);
if (!rdev)
return -ENXIO;
if (rdev->raid_disk >= 0)
goto busy;
kick_rdev_from_array(rdev);
md_update_sb(mddev, 1);
md_new_event(mddev);
return 0;
busy:
printk(KERN_WARNING "md: cannot remove active disk %s from %s ...\n",
bdevname(rdev->bdev,b), mdname(mddev));
return -EBUSY;
}
static int hot_add_disk(struct mddev * mddev, dev_t dev)
{
char b[BDEVNAME_SIZE];
int err;
struct md_rdev *rdev;
if (!mddev->pers)
return -ENODEV;
if (mddev->major_version != 0) {
printk(KERN_WARNING "%s: HOT_ADD may only be used with"
" version-0 superblocks.\n",
mdname(mddev));
return -EINVAL;
}
if (!mddev->pers->hot_add_disk) {
printk(KERN_WARNING
"%s: personality does not support diskops!\n",
mdname(mddev));
return -EINVAL;
}
rdev = md_import_device(dev, -1, 0);
if (IS_ERR(rdev)) {
printk(KERN_WARNING
"md: error, md_import_device() returned %ld\n",
PTR_ERR(rdev));
return -EINVAL;
}
if (mddev->persistent)
rdev->sb_start = calc_dev_sboffset(rdev);
else
rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512;
rdev->sectors = rdev->sb_start;
if (test_bit(Faulty, &rdev->flags)) {
printk(KERN_WARNING
"md: can not hot-add faulty %s disk to %s!\n",
bdevname(rdev->bdev,b), mdname(mddev));
err = -EINVAL;
goto abort_export;
}
clear_bit(In_sync, &rdev->flags);
rdev->desc_nr = -1;
rdev->saved_raid_disk = -1;
err = bind_rdev_to_array(rdev, mddev);
if (err)
goto abort_export;
/*
* The rest should better be atomic, we can have disk failures
* noticed in interrupt contexts ...
*/
rdev->raid_disk = -1;
md_update_sb(mddev, 1);
/*
* Kick recovery, maybe this spare has to be added to the
* array immediately.
*/
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
md_new_event(mddev);
return 0;
abort_export:
export_rdev(rdev);
return err;
}
static int set_bitmap_file(struct mddev *mddev, int fd)
{
int err;
if (mddev->pers) {
if (!mddev->pers->quiesce)
return -EBUSY;
if (mddev->recovery || mddev->sync_thread)
return -EBUSY;
/* we should be able to change the bitmap.. */
}
if (fd >= 0) {
if (mddev->bitmap)
return -EEXIST; /* cannot add when bitmap is present */
mddev->bitmap_info.file = fget(fd);
if (mddev->bitmap_info.file == NULL) {
printk(KERN_ERR "%s: error: failed to get bitmap file\n",
mdname(mddev));
return -EBADF;
}
err = deny_bitmap_write_access(mddev->bitmap_info.file);
if (err) {
printk(KERN_ERR "%s: error: bitmap file is already in use\n",
mdname(mddev));
fput(mddev->bitmap_info.file);
mddev->bitmap_info.file = NULL;
return err;
}
mddev->bitmap_info.offset = 0; /* file overrides offset */
} else if (mddev->bitmap == NULL)
return -ENOENT; /* cannot remove what isn't there */
err = 0;
if (mddev->pers) {
mddev->pers->quiesce(mddev, 1);
if (fd >= 0) {
err = bitmap_create(mddev);
if (!err)
err = bitmap_load(mddev);
}
if (fd < 0 || err) {
bitmap_destroy(mddev);
fd = -1; /* make sure to put the file */
}
mddev->pers->quiesce(mddev, 0);
}
if (fd < 0) {
if (mddev->bitmap_info.file) {
restore_bitmap_write_access(mddev->bitmap_info.file);
fput(mddev->bitmap_info.file);
}
mddev->bitmap_info.file = NULL;
}
return err;
}
/*
* set_array_info is used two different ways
* The original usage is when creating a new array.
* In this usage, raid_disks is > 0 and it together with
* level, size, not_persistent,layout,chunksize determine the
* shape of the array.
* This will always create an array with a type-0.90.0 superblock.
* The newer usage is when assembling an array.
* In this case raid_disks will be 0, and the major_version field is
* use to determine which style super-blocks are to be found on the devices.
* The minor and patch _version numbers are also kept incase the
* super_block handler wishes to interpret them.
*/
static int set_array_info(struct mddev * mddev, mdu_array_info_t *info)
{
if (info->raid_disks == 0) {
/* just setting version number for superblock loading */
if (info->major_version < 0 ||
info->major_version >= ARRAY_SIZE(super_types) ||
super_types[info->major_version].name == NULL) {
/* maybe try to auto-load a module? */
printk(KERN_INFO
"md: superblock version %d not known\n",
info->major_version);
return -EINVAL;
}
mddev->major_version = info->major_version;
mddev->minor_version = info->minor_version;
mddev->patch_version = info->patch_version;
mddev->persistent = !info->not_persistent;
/* ensure mddev_put doesn't delete this now that there
* is some minimal configuration.
*/
mddev->ctime = get_seconds();
return 0;
}
mddev->major_version = MD_MAJOR_VERSION;
mddev->minor_version = MD_MINOR_VERSION;
mddev->patch_version = MD_PATCHLEVEL_VERSION;
mddev->ctime = get_seconds();
mddev->level = info->level;
mddev->clevel[0] = 0;
mddev->dev_sectors = 2 * (sector_t)info->size;
mddev->raid_disks = info->raid_disks;
/* don't set md_minor, it is determined by which /dev/md* was
* openned
*/
if (info->state & (1<<MD_SB_CLEAN))
mddev->recovery_cp = MaxSector;
else
mddev->recovery_cp = 0;
mddev->persistent = ! info->not_persistent;
mddev->external = 0;
mddev->layout = info->layout;
mddev->chunk_sectors = info->chunk_size >> 9;
mddev->max_disks = MD_SB_DISKS;
if (mddev->persistent)
mddev->flags = 0;
set_bit(MD_CHANGE_DEVS, &mddev->flags);
mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
mddev->bitmap_info.offset = 0;
mddev->reshape_position = MaxSector;
/*
* Generate a 128 bit UUID
*/
get_random_bytes(mddev->uuid, 16);
mddev->new_level = mddev->level;
mddev->new_chunk_sectors = mddev->chunk_sectors;
mddev->new_layout = mddev->layout;
mddev->delta_disks = 0;
return 0;
}
void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors)
{
WARN(!mddev_is_locked(mddev), "%s: unlocked mddev!\n", __func__);
if (mddev->external_size)
return;
mddev->array_sectors = array_sectors;
}
EXPORT_SYMBOL(md_set_array_sectors);
static int update_size(struct mddev *mddev, sector_t num_sectors)
{
struct md_rdev *rdev;
int rv;
int fit = (num_sectors == 0);
if (mddev->pers->resize == NULL)
return -EINVAL;
/* The "num_sectors" is the number of sectors of each device that
* is used. This can only make sense for arrays with redundancy.
* linear and raid0 always use whatever space is available. We can only
* consider changing this number if no resync or reconstruction is
* happening, and if the new size is acceptable. It must fit before the
* sb_start or, if that is <data_offset, it must fit before the size
* of each device. If num_sectors is zero, we find the largest size
* that fits.
*/
if (mddev->sync_thread)
return -EBUSY;
if (mddev->bitmap)
/* Sorry, cannot grow a bitmap yet, just remove it,
* grow, and re-add.
*/
return -EBUSY;
rdev_for_each(rdev, mddev) {
sector_t avail = rdev->sectors;
if (fit && (num_sectors == 0 || num_sectors > avail))
num_sectors = avail;
if (avail < num_sectors)
return -ENOSPC;
}
rv = mddev->pers->resize(mddev, num_sectors);
if (!rv)
revalidate_disk(mddev->gendisk);
return rv;
}
static int update_raid_disks(struct mddev *mddev, int raid_disks)
{
int rv;
/* change the number of raid disks */
if (mddev->pers->check_reshape == NULL)
return -EINVAL;
if (raid_disks <= 0 ||
(mddev->max_disks && raid_disks >= mddev->max_disks))
return -EINVAL;
if (mddev->sync_thread || mddev->reshape_position != MaxSector)
return -EBUSY;
mddev->delta_disks = raid_disks - mddev->raid_disks;
rv = mddev->pers->check_reshape(mddev);
if (rv < 0)
mddev->delta_disks = 0;
return rv;
}
/*
* update_array_info is used to change the configuration of an
* on-line array.
* The version, ctime,level,size,raid_disks,not_persistent, layout,chunk_size
* fields in the info are checked against the array.
* Any differences that cannot be handled will cause an error.
* Normally, only one change can be managed at a time.
*/
static int update_array_info(struct mddev *mddev, mdu_array_info_t *info)
{
int rv = 0;
int cnt = 0;
int state = 0;
/* calculate expected state,ignoring low bits */
if (mddev->bitmap && mddev->bitmap_info.offset)
state |= (1 << MD_SB_BITMAP_PRESENT);
if (mddev->major_version != info->major_version ||
mddev->minor_version != info->minor_version ||
/* mddev->patch_version != info->patch_version || */
mddev->ctime != info->ctime ||
mddev->level != info->level ||
/* mddev->layout != info->layout || */
!mddev->persistent != info->not_persistent||
mddev->chunk_sectors != info->chunk_size >> 9 ||
/* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */
((state^info->state) & 0xfffffe00)
)
return -EINVAL;
/* Check there is only one change */
if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
cnt++;
if (mddev->raid_disks != info->raid_disks)
cnt++;
if (mddev->layout != info->layout)
cnt++;
if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT))
cnt++;
if (cnt == 0)
return 0;
if (cnt > 1)
return -EINVAL;
if (mddev->layout != info->layout) {
/* Change layout
* we don't need to do anything at the md level, the
* personality will take care of it all.
*/
if (mddev->pers->check_reshape == NULL)
return -EINVAL;
else {
mddev->new_layout = info->layout;
rv = mddev->pers->check_reshape(mddev);
if (rv)
mddev->new_layout = mddev->layout;
return rv;
}
}
if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
rv = update_size(mddev, (sector_t)info->size * 2);
if (mddev->raid_disks != info->raid_disks)
rv = update_raid_disks(mddev, info->raid_disks);
if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) {
if (mddev->pers->quiesce == NULL)
return -EINVAL;
if (mddev->recovery || mddev->sync_thread)
return -EBUSY;
if (info->state & (1<<MD_SB_BITMAP_PRESENT)) {
/* add the bitmap */
if (mddev->bitmap)
return -EEXIST;
if (mddev->bitmap_info.default_offset == 0)
return -EINVAL;
mddev->bitmap_info.offset =
mddev->bitmap_info.default_offset;
mddev->pers->quiesce(mddev, 1);
rv = bitmap_create(mddev);
if (!rv)
rv = bitmap_load(mddev);
if (rv)
bitmap_destroy(mddev);
mddev->pers->quiesce(mddev, 0);
} else {
/* remove the bitmap */
if (!mddev->bitmap)
return -ENOENT;
if (mddev->bitmap->file)
return -EINVAL;
mddev->pers->quiesce(mddev, 1);
bitmap_destroy(mddev);
mddev->pers->quiesce(mddev, 0);
mddev->bitmap_info.offset = 0;
}
}
md_update_sb(mddev, 1);
return rv;
}
static int set_disk_faulty(struct mddev *mddev, dev_t dev)
{
struct md_rdev *rdev;
if (mddev->pers == NULL)
return -ENODEV;
rdev = find_rdev(mddev, dev);
if (!rdev)
return -ENODEV;
md_error(mddev, rdev);
if (!test_bit(Faulty, &rdev->flags))
return -EBUSY;
return 0;
}
/*
* We have a problem here : there is no easy way to give a CHS
* virtual geometry. We currently pretend that we have a 2 heads
* 4 sectors (with a BIG number of cylinders...). This drives
* dosfs just mad... ;-)
*/
static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo)
{
struct mddev *mddev = bdev->bd_disk->private_data;
geo->heads = 2;
geo->sectors = 4;
geo->cylinders = mddev->array_sectors / 8;
return 0;
}
static int md_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
int err = 0;
void __user *argp = (void __user *)arg;
struct mddev *mddev = NULL;
int ro;
switch (cmd) {
case RAID_VERSION:
case GET_ARRAY_INFO:
case GET_DISK_INFO:
break;
default:
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
}
/*
* Commands dealing with the RAID driver but not any
* particular array:
*/
switch (cmd)
{
case RAID_VERSION:
err = get_version(argp);
goto done;
case PRINT_RAID_DEBUG:
err = 0;
md_print_devices();
goto done;
#ifndef MODULE
case RAID_AUTORUN:
err = 0;
autostart_arrays(arg);
goto done;
#endif
default:;
}
/*
* Commands creating/starting a new array:
*/
mddev = bdev->bd_disk->private_data;
if (!mddev) {
BUG();
goto abort;
}
err = mddev_lock(mddev);
if (err) {
printk(KERN_INFO
"md: ioctl lock interrupted, reason %d, cmd %d\n",
err, cmd);
goto abort;
}
switch (cmd)
{
case SET_ARRAY_INFO:
{
mdu_array_info_t info;
if (!arg)
memset(&info, 0, sizeof(info));
else if (copy_from_user(&info, argp, sizeof(info))) {
err = -EFAULT;
goto abort_unlock;
}
if (mddev->pers) {
err = update_array_info(mddev, &info);
if (err) {
printk(KERN_WARNING "md: couldn't update"
" array info. %d\n", err);
goto abort_unlock;
}
goto done_unlock;
}
if (!list_empty(&mddev->disks)) {
printk(KERN_WARNING
"md: array %s already has disks!\n",
mdname(mddev));
err = -EBUSY;
goto abort_unlock;
}
if (mddev->raid_disks) {
printk(KERN_WARNING
"md: array %s already initialised!\n",
mdname(mddev));
err = -EBUSY;
goto abort_unlock;
}
err = set_array_info(mddev, &info);
if (err) {
printk(KERN_WARNING "md: couldn't set"
" array info. %d\n", err);
goto abort_unlock;
}
}
goto done_unlock;
default:;
}
/*
* Commands querying/configuring an existing array:
*/
/* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY,
* RUN_ARRAY, and GET_ and SET_BITMAP_FILE are allowed */
if ((!mddev->raid_disks && !mddev->external)
&& cmd != ADD_NEW_DISK && cmd != STOP_ARRAY
&& cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE
&& cmd != GET_BITMAP_FILE) {
err = -ENODEV;
goto abort_unlock;
}
/*
* Commands even a read-only array can execute:
*/
switch (cmd)
{
case GET_ARRAY_INFO:
err = get_array_info(mddev, argp);
goto done_unlock;
case GET_BITMAP_FILE:
err = get_bitmap_file(mddev, argp);
goto done_unlock;
case GET_DISK_INFO:
err = get_disk_info(mddev, argp);
goto done_unlock;
case RESTART_ARRAY_RW:
err = restart_array(mddev);
goto done_unlock;
case STOP_ARRAY:
err = do_md_stop(mddev, 0, 1);
goto done_unlock;
case STOP_ARRAY_RO:
err = md_set_readonly(mddev, 1);
goto done_unlock;
case BLKROSET:
if (get_user(ro, (int __user *)(arg))) {
err = -EFAULT;
goto done_unlock;
}
err = -EINVAL;
/* if the bdev is going readonly the value of mddev->ro
* does not matter, no writes are coming
*/
if (ro)
goto done_unlock;
/* are we are already prepared for writes? */
if (mddev->ro != 1)
goto done_unlock;
/* transitioning to readauto need only happen for
* arrays that call md_write_start
*/
if (mddev->pers) {
err = restart_array(mddev);
if (err == 0) {
mddev->ro = 2;
set_disk_ro(mddev->gendisk, 0);
}
}
goto done_unlock;
}
/*
* The remaining ioctls are changing the state of the
* superblock, so we do not allow them on read-only arrays.
* However non-MD ioctls (e.g. get-size) will still come through
* here and hit the 'default' below, so only disallow
* 'md' ioctls, and switch to rw mode if started auto-readonly.
*/
if (_IOC_TYPE(cmd) == MD_MAJOR && mddev->ro && mddev->pers) {
if (mddev->ro == 2) {
mddev->ro = 0;
sysfs_notify_dirent_safe(mddev->sysfs_state);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
} else {
err = -EROFS;
goto abort_unlock;
}
}
switch (cmd)
{
case ADD_NEW_DISK:
{
mdu_disk_info_t info;
if (copy_from_user(&info, argp, sizeof(info)))
err = -EFAULT;
else
err = add_new_disk(mddev, &info);
goto done_unlock;
}
case HOT_REMOVE_DISK:
err = hot_remove_disk(mddev, new_decode_dev(arg));
goto done_unlock;
case HOT_ADD_DISK:
err = hot_add_disk(mddev, new_decode_dev(arg));
goto done_unlock;
case SET_DISK_FAULTY:
err = set_disk_faulty(mddev, new_decode_dev(arg));
goto done_unlock;
case RUN_ARRAY:
err = do_md_run(mddev);
goto done_unlock;
case SET_BITMAP_FILE:
err = set_bitmap_file(mddev, (int)arg);
goto done_unlock;
default:
err = -EINVAL;
goto abort_unlock;
}
done_unlock:
abort_unlock:
if (mddev->hold_active == UNTIL_IOCTL &&
err != -EINVAL)
mddev->hold_active = 0;
mddev_unlock(mddev);
return err;
done:
if (err)
MD_BUG();
abort:
return err;
}
#ifdef CONFIG_COMPAT
static int md_compat_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
switch (cmd) {
case HOT_REMOVE_DISK:
case HOT_ADD_DISK:
case SET_DISK_FAULTY:
case SET_BITMAP_FILE:
/* These take in integer arg, do not convert */
break;
default:
arg = (unsigned long)compat_ptr(arg);
break;
}
return md_ioctl(bdev, mode, cmd, arg);
}
#endif /* CONFIG_COMPAT */
static int md_open(struct block_device *bdev, fmode_t mode)
{
/*
* Succeed if we can lock the mddev, which confirms that
* it isn't being stopped right now.
*/
struct mddev *mddev = mddev_find(bdev->bd_dev);
int err;
if (mddev->gendisk != bdev->bd_disk) {
/* we are racing with mddev_put which is discarding this
* bd_disk.
*/
mddev_put(mddev);
/* Wait until bdev->bd_disk is definitely gone */
flush_workqueue(md_misc_wq);
/* Then retry the open from the top */
return -ERESTARTSYS;
}
BUG_ON(mddev != bdev->bd_disk->private_data);
if ((err = mutex_lock_interruptible(&mddev->open_mutex)))
goto out;
err = 0;
atomic_inc(&mddev->openers);
mutex_unlock(&mddev->open_mutex);
check_disk_change(bdev);
out:
return err;
}
static int md_release(struct gendisk *disk, fmode_t mode)
{
struct mddev *mddev = disk->private_data;
BUG_ON(!mddev);
atomic_dec(&mddev->openers);
mddev_put(mddev);
return 0;
}
static int md_media_changed(struct gendisk *disk)
{
struct mddev *mddev = disk->private_data;
return mddev->changed;
}
static int md_revalidate(struct gendisk *disk)
{
struct mddev *mddev = disk->private_data;
mddev->changed = 0;
return 0;
}
static const struct block_device_operations md_fops =
{
.owner = THIS_MODULE,
.open = md_open,
.release = md_release,
.ioctl = md_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = md_compat_ioctl,
#endif
.getgeo = md_getgeo,
.media_changed = md_media_changed,
.revalidate_disk= md_revalidate,
};
static int md_thread(void * arg)
{
struct md_thread *thread = arg;
/*
* md_thread is a 'system-thread', it's priority should be very
* high. We avoid resource deadlocks individually in each
* raid personality. (RAID5 does preallocation) We also use RR and
* the very same RT priority as kswapd, thus we will never get
* into a priority inversion deadlock.
*
* we definitely have to have equal or higher priority than
* bdflush, otherwise bdflush will deadlock if there are too
* many dirty RAID5 blocks.
*/
allow_signal(SIGKILL);
while (!kthread_should_stop()) {
/* We need to wait INTERRUPTIBLE so that
* we don't add to the load-average.
* That means we need to be sure no signals are
* pending
*/
if (signal_pending(current))
flush_signals(current);
wait_event_interruptible_timeout
(thread->wqueue,
test_bit(THREAD_WAKEUP, &thread->flags)
|| kthread_should_stop(),
thread->timeout);
clear_bit(THREAD_WAKEUP, &thread->flags);
if (!kthread_should_stop())
thread->run(thread->mddev);
}
return 0;
}
void md_wakeup_thread(struct md_thread *thread)
{
if (thread) {
pr_debug("md: waking up MD thread %s.\n", thread->tsk->comm);
set_bit(THREAD_WAKEUP, &thread->flags);
wake_up(&thread->wqueue);
}
}
struct md_thread *md_register_thread(void (*run) (struct mddev *), struct mddev *mddev,
const char *name)
{
struct md_thread *thread;
thread = kzalloc(sizeof(struct md_thread), GFP_KERNEL);
if (!thread)
return NULL;
init_waitqueue_head(&thread->wqueue);
thread->run = run;
thread->mddev = mddev;
thread->timeout = MAX_SCHEDULE_TIMEOUT;
thread->tsk = kthread_run(md_thread, thread,
"%s_%s",
mdname(thread->mddev),
name ?: mddev->pers->name);
if (IS_ERR(thread->tsk)) {
kfree(thread);
return NULL;
}
return thread;
}
void md_unregister_thread(struct md_thread **threadp)
{
struct md_thread *thread = *threadp;
if (!thread)
return;
pr_debug("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk));
/* Locking ensures that mddev_unlock does not wake_up a
* non-existent thread
*/
spin_lock(&pers_lock);
*threadp = NULL;
spin_unlock(&pers_lock);
kthread_stop(thread->tsk);
kfree(thread);
}
void md_error(struct mddev *mddev, struct md_rdev *rdev)
{
if (!mddev) {
MD_BUG();
return;
}
if (!rdev || test_bit(Faulty, &rdev->flags))
return;
if (!mddev->pers || !mddev->pers->error_handler)
return;
mddev->pers->error_handler(mddev,rdev);
if (mddev->degraded)
set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
sysfs_notify_dirent_safe(rdev->sysfs_state);
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
if (mddev->event_work.func)
queue_work(md_misc_wq, &mddev->event_work);
md_new_event_inintr(mddev);
}
/* seq_file implementation /proc/mdstat */
static void status_unused(struct seq_file *seq)
{
int i = 0;
struct md_rdev *rdev;
seq_printf(seq, "unused devices: ");
list_for_each_entry(rdev, &pending_raid_disks, same_set) {
char b[BDEVNAME_SIZE];
i++;
seq_printf(seq, "%s ",
bdevname(rdev->bdev,b));
}
if (!i)
seq_printf(seq, "<none>");
seq_printf(seq, "\n");
}
static void status_resync(struct seq_file *seq, struct mddev * mddev)
{
sector_t max_sectors, resync, res;
unsigned long dt, db;
sector_t rt;
int scale;
unsigned int per_milli;
resync = mddev->curr_resync - atomic_read(&mddev->recovery_active);
if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
max_sectors = mddev->resync_max_sectors;
else
max_sectors = mddev->dev_sectors;
/*
* Should not happen.
*/
if (!max_sectors) {
MD_BUG();
return;
}
/* Pick 'scale' such that (resync>>scale)*1000 will fit
* in a sector_t, and (max_sectors>>scale) will fit in a
* u32, as those are the requirements for sector_div.
* Thus 'scale' must be at least 10
*/
scale = 10;
if (sizeof(sector_t) > sizeof(unsigned long)) {
while ( max_sectors/2 > (1ULL<<(scale+32)))
scale++;
}
res = (resync>>scale)*1000;
sector_div(res, (u32)((max_sectors>>scale)+1));
per_milli = res;
{
int i, x = per_milli/50, y = 20-x;
seq_printf(seq, "[");
for (i = 0; i < x; i++)
seq_printf(seq, "=");
seq_printf(seq, ">");
for (i = 0; i < y; i++)
seq_printf(seq, ".");
seq_printf(seq, "] ");
}
seq_printf(seq, " %s =%3u.%u%% (%llu/%llu)",
(test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)?
"reshape" :
(test_bit(MD_RECOVERY_CHECK, &mddev->recovery)?
"check" :
(test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ?
"resync" : "recovery"))),
per_milli/10, per_milli % 10,
(unsigned long long) resync/2,
(unsigned long long) max_sectors/2);
/*
* dt: time from mark until now
* db: blocks written from mark until now
* rt: remaining time
*
* rt is a sector_t, so could be 32bit or 64bit.
* So we divide before multiply in case it is 32bit and close
* to the limit.
* We scale the divisor (db) by 32 to avoid losing precision
* near the end of resync when the number of remaining sectors
* is close to 'db'.
* We then divide rt by 32 after multiplying by db to compensate.
* The '+1' avoids division by zero if db is very small.
*/
dt = ((jiffies - mddev->resync_mark) / HZ);
if (!dt) dt++;
db = (mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active))
- mddev->resync_mark_cnt;
rt = max_sectors - resync; /* number of remaining sectors */
sector_div(rt, db/32+1);
rt *= dt;
rt >>= 5;
seq_printf(seq, " finish=%lu.%lumin", (unsigned long)rt / 60,
((unsigned long)rt % 60)/6);
seq_printf(seq, " speed=%ldK/sec", db/2/dt);
}
static void *md_seq_start(struct seq_file *seq, loff_t *pos)
{
struct list_head *tmp;
loff_t l = *pos;
struct mddev *mddev;
if (l >= 0x10000)
return NULL;
if (!l--)
/* header */
return (void*)1;
spin_lock(&all_mddevs_lock);
list_for_each(tmp,&all_mddevs)
if (!l--) {
mddev = list_entry(tmp, struct mddev, all_mddevs);
mddev_get(mddev);
spin_unlock(&all_mddevs_lock);
return mddev;
}
spin_unlock(&all_mddevs_lock);
if (!l--)
return (void*)2;/* tail */
return NULL;
}
static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct list_head *tmp;
struct mddev *next_mddev, *mddev = v;
++*pos;
if (v == (void*)2)
return NULL;
spin_lock(&all_mddevs_lock);
if (v == (void*)1)
tmp = all_mddevs.next;
else
tmp = mddev->all_mddevs.next;
if (tmp != &all_mddevs)
next_mddev = mddev_get(list_entry(tmp,struct mddev,all_mddevs));
else {
next_mddev = (void*)2;
*pos = 0x10000;
}
spin_unlock(&all_mddevs_lock);
if (v != (void*)1)
mddev_put(mddev);
return next_mddev;
}
static void md_seq_stop(struct seq_file *seq, void *v)
{
struct mddev *mddev = v;
if (mddev && v != (void*)1 && v != (void*)2)
mddev_put(mddev);
}
static int md_seq_show(struct seq_file *seq, void *v)
{
struct mddev *mddev = v;
sector_t sectors;
struct md_rdev *rdev;
if (v == (void*)1) {
struct md_personality *pers;
seq_printf(seq, "Personalities : ");
spin_lock(&pers_lock);
list_for_each_entry(pers, &pers_list, list)
seq_printf(seq, "[%s] ", pers->name);
spin_unlock(&pers_lock);
seq_printf(seq, "\n");
seq->poll_event = atomic_read(&md_event_count);
return 0;
}
if (v == (void*)2) {
status_unused(seq);
return 0;
}
if (mddev_lock(mddev) < 0)
return -EINTR;
if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) {
seq_printf(seq, "%s : %sactive", mdname(mddev),
mddev->pers ? "" : "in");
if (mddev->pers) {
if (mddev->ro==1)
seq_printf(seq, " (read-only)");
if (mddev->ro==2)
seq_printf(seq, " (auto-read-only)");
seq_printf(seq, " %s", mddev->pers->name);
}
sectors = 0;
rdev_for_each(rdev, mddev) {
char b[BDEVNAME_SIZE];
seq_printf(seq, " %s[%d]",
bdevname(rdev->bdev,b), rdev->desc_nr);
if (test_bit(WriteMostly, &rdev->flags))
seq_printf(seq, "(W)");
if (test_bit(Faulty, &rdev->flags)) {
seq_printf(seq, "(F)");
continue;
}
if (rdev->raid_disk < 0)
seq_printf(seq, "(S)"); /* spare */
if (test_bit(Replacement, &rdev->flags))
seq_printf(seq, "(R)");
sectors += rdev->sectors;
}
if (!list_empty(&mddev->disks)) {
if (mddev->pers)
seq_printf(seq, "\n %llu blocks",
(unsigned long long)
mddev->array_sectors / 2);
else
seq_printf(seq, "\n %llu blocks",
(unsigned long long)sectors / 2);
}
if (mddev->persistent) {
if (mddev->major_version != 0 ||
mddev->minor_version != 90) {
seq_printf(seq," super %d.%d",
mddev->major_version,
mddev->minor_version);
}
} else if (mddev->external)
seq_printf(seq, " super external:%s",
mddev->metadata_type);
else
seq_printf(seq, " super non-persistent");
if (mddev->pers) {
mddev->pers->status(seq, mddev);
seq_printf(seq, "\n ");
if (mddev->pers->sync_request) {
if (mddev->curr_resync > 2) {
status_resync(seq, mddev);
seq_printf(seq, "\n ");
} else if (mddev->curr_resync == 1 || mddev->curr_resync == 2)
seq_printf(seq, "\tresync=DELAYED\n ");
else if (mddev->recovery_cp < MaxSector)
seq_printf(seq, "\tresync=PENDING\n ");
}
} else
seq_printf(seq, "\n ");
bitmap_status(seq, mddev->bitmap);
seq_printf(seq, "\n");
}
mddev_unlock(mddev);
return 0;
}
static const struct seq_operations md_seq_ops = {
.start = md_seq_start,
.next = md_seq_next,
.stop = md_seq_stop,
.show = md_seq_show,
};
static int md_seq_open(struct inode *inode, struct file *file)
{
struct seq_file *seq;
int error;
error = seq_open(file, &md_seq_ops);
if (error)
return error;
seq = file->private_data;
seq->poll_event = atomic_read(&md_event_count);
return error;
}
static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
{
struct seq_file *seq = filp->private_data;
int mask;
poll_wait(filp, &md_event_waiters, wait);
/* always allow read */
mask = POLLIN | POLLRDNORM;
if (seq->poll_event != atomic_read(&md_event_count))
mask |= POLLERR | POLLPRI;
return mask;
}
static const struct file_operations md_seq_fops = {
.owner = THIS_MODULE,
.open = md_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release_private,
.poll = mdstat_poll,
};
int register_md_personality(struct md_personality *p)
{
spin_lock(&pers_lock);
list_add_tail(&p->list, &pers_list);
printk(KERN_INFO "md: %s personality registered for level %d\n", p->name, p->level);
spin_unlock(&pers_lock);
return 0;
}
int unregister_md_personality(struct md_personality *p)
{
printk(KERN_INFO "md: %s personality unregistered\n", p->name);
spin_lock(&pers_lock);
list_del_init(&p->list);
spin_unlock(&pers_lock);
return 0;
}
static int is_mddev_idle(struct mddev *mddev, int init)
{
struct md_rdev * rdev;
int idle;
int curr_events;
idle = 1;
rcu_read_lock();
rdev_for_each_rcu(rdev, mddev) {
struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
(int)part_stat_read(&disk->part0, sectors[1]) -
atomic_read(&disk->sync_io);
/* sync IO will cause sync_io to increase before the disk_stats
* as sync_io is counted when a request starts, and
* disk_stats is counted when it completes.
* So resync activity will cause curr_events to be smaller than
* when there was no such activity.
* non-sync IO will cause disk_stat to increase without
* increasing sync_io so curr_events will (eventually)
* be larger than it was before. Once it becomes
* substantially larger, the test below will cause
* the array to appear non-idle, and resync will slow
* down.
* If there is a lot of outstanding resync activity when
* we set last_event to curr_events, then all that activity
* completing might cause the array to appear non-idle
* and resync will be slowed down even though there might
* not have been non-resync activity. This will only
* happen once though. 'last_events' will soon reflect
* the state where there is little or no outstanding
* resync requests, and further resync activity will
* always make curr_events less than last_events.
*
*/
if (init || curr_events - rdev->last_events > 64) {
rdev->last_events = curr_events;
idle = 0;
}
}
rcu_read_unlock();
return idle;
}
void md_done_sync(struct mddev *mddev, int blocks, int ok)
{
/* another "blocks" (512byte) blocks have been synced */
atomic_sub(blocks, &mddev->recovery_active);
wake_up(&mddev->recovery_wait);
if (!ok) {
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
md_wakeup_thread(mddev->thread);
// stop recovery, signal do_sync ....
}
}
/* md_write_start(mddev, bi)
* If we need to update some array metadata (e.g. 'active' flag
* in superblock) before writing, schedule a superblock update
* and wait for it to complete.
*/
void md_write_start(struct mddev *mddev, struct bio *bi)
{
int did_change = 0;
if (bio_data_dir(bi) != WRITE)
return;
BUG_ON(mddev->ro == 1);
if (mddev->ro == 2) {
/* need to switch to read/write */
mddev->ro = 0;
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
md_wakeup_thread(mddev->sync_thread);
did_change = 1;
}
atomic_inc(&mddev->writes_pending);
if (mddev->safemode == 1)
mddev->safemode = 0;
if (mddev->in_sync) {
spin_lock_irq(&mddev->write_lock);
if (mddev->in_sync) {
mddev->in_sync = 0;
set_bit(MD_CHANGE_CLEAN, &mddev->flags);
set_bit(MD_CHANGE_PENDING, &mddev->flags);
md_wakeup_thread(mddev->thread);
did_change = 1;
}
spin_unlock_irq(&mddev->write_lock);
}
if (did_change)
sysfs_notify_dirent_safe(mddev->sysfs_state);
wait_event(mddev->sb_wait,
!test_bit(MD_CHANGE_PENDING, &mddev->flags));
}
void md_write_end(struct mddev *mddev)
{
if (atomic_dec_and_test(&mddev->writes_pending)) {
if (mddev->safemode == 2)
md_wakeup_thread(mddev->thread);
else if (mddev->safemode_delay)
mod_timer(&mddev->safemode_timer, jiffies + mddev->safemode_delay);
}
}
/* md_allow_write(mddev)
* Calling this ensures that the array is marked 'active' so that writes
* may proceed without blocking. It is important to call this before
* attempting a GFP_KERNEL allocation while holding the mddev lock.
* Must be called with mddev_lock held.
*
* In the ->external case MD_CHANGE_CLEAN can not be cleared until mddev->lock
* is dropped, so return -EAGAIN after notifying userspace.
*/
int md_allow_write(struct mddev *mddev)
{
if (!mddev->pers)
return 0;
if (mddev->ro)
return 0;
if (!mddev->pers->sync_request)
return 0;
spin_lock_irq(&mddev->write_lock);
if (mddev->in_sync) {
mddev->in_sync = 0;
set_bit(MD_CHANGE_CLEAN, &mddev->flags);
set_bit(MD_CHANGE_PENDING, &mddev->flags);
if (mddev->safemode_delay &&
mddev->safemode == 0)
mddev->safemode = 1;
spin_unlock_irq(&mddev->write_lock);
md_update_sb(mddev, 0);
sysfs_notify_dirent_safe(mddev->sysfs_state);
} else
spin_unlock_irq(&mddev->write_lock);
if (test_bit(MD_CHANGE_PENDING, &mddev->flags))
return -EAGAIN;
else
return 0;
}
EXPORT_SYMBOL_GPL(md_allow_write);
#define SYNC_MARKS 10
#define SYNC_MARK_STEP (3*HZ)
void md_do_sync(struct mddev *mddev)
{
struct mddev *mddev2;
unsigned int currspeed = 0,
window;
sector_t max_sectors,j, io_sectors;
unsigned long mark[SYNC_MARKS];
sector_t mark_cnt[SYNC_MARKS];
int last_mark,m;
struct list_head *tmp;
sector_t last_check;
int skipped = 0;
struct md_rdev *rdev;
char *desc;
/* just incase thread restarts... */
if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
return;
if (mddev->ro) /* never try to sync a read-only array */
return;
if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
desc = "data-check";
else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
desc = "requested-resync";
else
desc = "resync";
} else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
desc = "reshape";
else
desc = "recovery";
/* we overload curr_resync somewhat here.
* 0 == not engaged in resync at all
* 2 == checking that there is no conflict with another sync
* 1 == like 2, but have yielded to allow conflicting resync to
* commense
* other == active in resync - this many blocks
*
* Before starting a resync we must have set curr_resync to
* 2, and then checked that every "conflicting" array has curr_resync
* less than ours. When we find one that is the same or higher
* we wait on resync_wait. To avoid deadlock, we reduce curr_resync
* to 1 if we choose to yield (based arbitrarily on address of mddev structure).
* This will mean we have to start checking from the beginning again.
*
*/
do {
mddev->curr_resync = 2;
try_again:
if (kthread_should_stop())
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
goto skip;
for_each_mddev(mddev2, tmp) {
if (mddev2 == mddev)
continue;
if (!mddev->parallel_resync
&& mddev2->curr_resync
&& match_mddev_units(mddev, mddev2)) {
DEFINE_WAIT(wq);
if (mddev < mddev2 && mddev->curr_resync == 2) {
/* arbitrarily yield */
mddev->curr_resync = 1;
wake_up(&resync_wait);
}
if (mddev > mddev2 && mddev->curr_resync == 1)
/* no need to wait here, we can wait the next
* time 'round when curr_resync == 2
*/
continue;
/* We need to wait 'interruptible' so as not to
* contribute to the load average, and not to
* be caught by 'softlockup'
*/
prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE);
if (!kthread_should_stop() &&
mddev2->curr_resync >= mddev->curr_resync) {
printk(KERN_INFO "md: delaying %s of %s"
" until %s has finished (they"
" share one or more physical units)\n",
desc, mdname(mddev), mdname(mddev2));
mddev_put(mddev2);
if (signal_pending(current))
flush_signals(current);
schedule();
finish_wait(&resync_wait, &wq);
goto try_again;
}
finish_wait(&resync_wait, &wq);
}
}
} while (mddev->curr_resync < 2);
j = 0;
if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
/* resync follows the size requested by the personality,
* which defaults to physical size, but can be virtual size
*/
max_sectors = mddev->resync_max_sectors;
mddev->resync_mismatches = 0;
/* we don't use the checkpoint if there's a bitmap */
if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
j = mddev->resync_min;
else if (!mddev->bitmap)
j = mddev->recovery_cp;
} else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
max_sectors = mddev->dev_sectors;
else {
/* recovery follows the physical size of devices */
max_sectors = mddev->dev_sectors;
j = MaxSector;
rcu_read_lock();
rdev_for_each_rcu(rdev, mddev)
if (rdev->raid_disk >= 0 &&
!test_bit(Faulty, &rdev->flags) &&
!test_bit(In_sync, &rdev->flags) &&
rdev->recovery_offset < j)
j = rdev->recovery_offset;
rcu_read_unlock();
}
printk(KERN_INFO "md: %s of RAID array %s\n", desc, mdname(mddev));
printk(KERN_INFO "md: minimum _guaranteed_ speed:"
" %d KB/sec/disk.\n", speed_min(mddev));
printk(KERN_INFO "md: using maximum available idle IO bandwidth "
"(but not more than %d KB/sec) for %s.\n",
speed_max(mddev), desc);
is_mddev_idle(mddev, 1); /* this initializes IO event counters */
io_sectors = 0;
for (m = 0; m < SYNC_MARKS; m++) {
mark[m] = jiffies;
mark_cnt[m] = io_sectors;
}
last_mark = 0;
mddev->resync_mark = mark[last_mark];
mddev->resync_mark_cnt = mark_cnt[last_mark];
/*
* Tune reconstruction:
*/
window = 32*(PAGE_SIZE/512);
printk(KERN_INFO "md: using %dk window, over a total of %lluk.\n",
window/2, (unsigned long long)max_sectors/2);
atomic_set(&mddev->recovery_active, 0);
last_check = 0;
if (j>2) {
printk(KERN_INFO
"md: resuming %s of %s from checkpoint.\n",
desc, mdname(mddev));
mddev->curr_resync = j;
}
mddev->curr_resync_completed = j;
while (j < max_sectors) {
sector_t sectors;
skipped = 0;
if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
((mddev->curr_resync > mddev->curr_resync_completed &&
(mddev->curr_resync - mddev->curr_resync_completed)
> (max_sectors >> 4)) ||
(j - mddev->curr_resync_completed)*2
>= mddev->resync_max - mddev->curr_resync_completed
)) {
/* time to update curr_resync_completed */
wait_event(mddev->recovery_wait,
atomic_read(&mddev->recovery_active) == 0);
mddev->curr_resync_completed = j;
set_bit(MD_CHANGE_CLEAN, &mddev->flags);
sysfs_notify(&mddev->kobj, NULL, "sync_completed");
}
while (j >= mddev->resync_max && !kthread_should_stop()) {
/* As this condition is controlled by user-space,
* we can block indefinitely, so use '_interruptible'
* to avoid triggering warnings.
*/
flush_signals(current); /* just in case */
wait_event_interruptible(mddev->recovery_wait,
mddev->resync_max > j
|| kthread_should_stop());
}
if (kthread_should_stop())
goto interrupted;
sectors = mddev->pers->sync_request(mddev, j, &skipped,
currspeed < speed_min(mddev));
if (sectors == 0) {
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
goto out;
}
if (!skipped) { /* actual IO requested */
io_sectors += sectors;
atomic_add(sectors, &mddev->recovery_active);
}
if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
break;
j += sectors;
if (j>1) mddev->curr_resync = j;
mddev->curr_mark_cnt = io_sectors;
if (last_check == 0)
/* this is the earliest that rebuild will be
* visible in /proc/mdstat
*/
md_new_event(mddev);
if (last_check + window > io_sectors || j == max_sectors)
continue;
last_check = io_sectors;
repeat:
if (time_after_eq(jiffies, mark[last_mark] + SYNC_MARK_STEP )) {
/* step marks */
int next = (last_mark+1) % SYNC_MARKS;
mddev->resync_mark = mark[next];
mddev->resync_mark_cnt = mark_cnt[next];
mark[next] = jiffies;
mark_cnt[next] = io_sectors - atomic_read(&mddev->recovery_active);
last_mark = next;
}
if (kthread_should_stop())
goto interrupted;
/*
* this loop exits only if either when we are slower than
* the 'hard' speed limit, or the system was IO-idle for
* a jiffy.
* the system might be non-idle CPU-wise, but we only care
* about not overloading the IO subsystem. (things like an
* e2fsck being done on the RAID array should execute fast)
*/
cond_resched();
currspeed = ((unsigned long)(io_sectors-mddev->resync_mark_cnt))/2
/((jiffies-mddev->resync_mark)/HZ +1) +1;
if (currspeed > speed_min(mddev)) {
if ((currspeed > speed_max(mddev)) ||
!is_mddev_idle(mddev, 0)) {
msleep(500);
goto repeat;
}
}
}
printk(KERN_INFO "md: %s: %s done.\n",mdname(mddev), desc);
/*
* this also signals 'finished resyncing' to md_stop
*/
out:
wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
/* tell personality that we are finished */
mddev->pers->sync_request(mddev, max_sectors, &skipped, 1);
if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
mddev->curr_resync > 2) {
if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
if (mddev->curr_resync >= mddev->recovery_cp) {
printk(KERN_INFO
"md: checkpointing %s of %s.\n",
desc, mdname(mddev));
mddev->recovery_cp =
mddev->curr_resync_completed;
}
} else
mddev->recovery_cp = MaxSector;
} else {
if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
mddev->curr_resync = MaxSector;
rcu_read_lock();
rdev_for_each_rcu(rdev, mddev)
if (rdev->raid_disk >= 0 &&
mddev->delta_disks >= 0 &&
!test_bit(Faulty, &rdev->flags) &&
!test_bit(In_sync, &rdev->flags) &&
rdev->recovery_offset < mddev->curr_resync)
rdev->recovery_offset = mddev->curr_resync;
rcu_read_unlock();
}
}
skip:
set_bit(MD_CHANGE_DEVS, &mddev->flags);
if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
/* We completed so min/max setting can be forgotten if used. */
if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
mddev->resync_min = 0;
mddev->resync_max = MaxSector;
} else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
mddev->resync_min = mddev->curr_resync_completed;
mddev->curr_resync = 0;
wake_up(&resync_wait);
set_bit(MD_RECOVERY_DONE, &mddev->recovery);
md_wakeup_thread(mddev->thread);
return;
interrupted:
/*
* got a signal, exit.
*/
printk(KERN_INFO
"md: md_do_sync() got signal ... exiting\n");
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
goto out;
}
EXPORT_SYMBOL_GPL(md_do_sync);
static int remove_and_add_spares(struct mddev *mddev)
{
struct md_rdev *rdev;
int spares = 0;
int removed = 0;
mddev->curr_resync_completed = 0;
rdev_for_each(rdev, mddev)
if (rdev->raid_disk >= 0 &&
!test_bit(Blocked, &rdev->flags) &&
(test_bit(Faulty, &rdev->flags) ||
! test_bit(In_sync, &rdev->flags)) &&
atomic_read(&rdev->nr_pending)==0) {
if (mddev->pers->hot_remove_disk(
mddev, rdev) == 0) {
sysfs_unlink_rdev(mddev, rdev);
rdev->raid_disk = -1;
removed++;
}
}
if (removed)
sysfs_notify(&mddev->kobj, NULL,
"degraded");
rdev_for_each(rdev, mddev) {
if (rdev->raid_disk >= 0 &&
!test_bit(In_sync, &rdev->flags) &&
!test_bit(Faulty, &rdev->flags))
spares++;
if (rdev->raid_disk < 0
&& !test_bit(Faulty, &rdev->flags)) {
rdev->recovery_offset = 0;
if (mddev->pers->
hot_add_disk(mddev, rdev) == 0) {
if (sysfs_link_rdev(mddev, rdev))
/* failure here is OK */;
spares++;
md_new_event(mddev);
set_bit(MD_CHANGE_DEVS, &mddev->flags);
}
}
}
return spares;
}
static void reap_sync_thread(struct mddev *mddev)
{
struct md_rdev *rdev;
/* resync has finished, collect result */
md_unregister_thread(&mddev->sync_thread);
if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
/* success...*/
/* activate any spares */
if (mddev->pers->spare_active(mddev))
sysfs_notify(&mddev->kobj, NULL,
"degraded");
}
if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
mddev->pers->finish_reshape)
mddev->pers->finish_reshape(mddev);
/* If array is no-longer degraded, then any saved_raid_disk
* information must be scrapped. Also if any device is now
* In_sync we must scrape the saved_raid_disk for that device
* do the superblock for an incrementally recovered device
* written out.
*/
rdev_for_each(rdev, mddev)
if (!mddev->degraded ||
test_bit(In_sync, &rdev->flags))
rdev->saved_raid_disk = -1;
md_update_sb(mddev, 1);
clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
/* flag recovery needed just to double check */
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
sysfs_notify_dirent_safe(mddev->sysfs_action);
md_new_event(mddev);
if (mddev->event_work.func)
queue_work(md_misc_wq, &mddev->event_work);
}
/*
* This routine is regularly called by all per-raid-array threads to
* deal with generic issues like resync and super-block update.
* Raid personalities that don't have a thread (linear/raid0) do not
* need this as they never do any recovery or update the superblock.
*
* It does not do any resync itself, but rather "forks" off other threads
* to do that as needed.
* When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in
* "->recovery" and create a thread at ->sync_thread.
* When the thread finishes it sets MD_RECOVERY_DONE
* and wakeups up this thread which will reap the thread and finish up.
* This thread also removes any faulty devices (with nr_pending == 0).
*
* The overall approach is:
* 1/ if the superblock needs updating, update it.
* 2/ If a recovery thread is running, don't do anything else.
* 3/ If recovery has finished, clean up, possibly marking spares active.
* 4/ If there are any faulty devices, remove them.
* 5/ If array is degraded, try to add spares devices
* 6/ If array has spares or is not in-sync, start a resync thread.
*/
void md_check_recovery(struct mddev *mddev)
{
if (mddev->suspended)
return;
if (mddev->bitmap)
bitmap_daemon_work(mddev);
if (signal_pending(current)) {
if (mddev->pers->sync_request && !mddev->external) {
printk(KERN_INFO "md: %s in immediate safe mode\n",
mdname(mddev));
mddev->safemode = 2;
}
flush_signals(current);
}
if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
return;
if ( ! (
(mddev->flags & ~ (1<<MD_CHANGE_PENDING)) ||
test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
(mddev->external == 0 && mddev->safemode == 1) ||
(mddev->safemode == 2 && ! atomic_read(&mddev->writes_pending)
&& !mddev->in_sync && mddev->recovery_cp == MaxSector)
))
return;
if (mddev_trylock(mddev)) {
int spares = 0;
if (mddev->ro) {
/* Only thing we do on a ro array is remove
* failed devices.
*/
struct md_rdev *rdev;
rdev_for_each(rdev, mddev)
if (rdev->raid_disk >= 0 &&
!test_bit(Blocked, &rdev->flags) &&
test_bit(Faulty, &rdev->flags) &&
atomic_read(&rdev->nr_pending)==0) {
if (mddev->pers->hot_remove_disk(
mddev, rdev) == 0) {
sysfs_unlink_rdev(mddev, rdev);
rdev->raid_disk = -1;
}
}
clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
goto unlock;
}
if (!mddev->external) {
int did_change = 0;
spin_lock_irq(&mddev->write_lock);
if (mddev->safemode &&
!atomic_read(&mddev->writes_pending) &&
!mddev->in_sync &&
mddev->recovery_cp == MaxSector) {
mddev->in_sync = 1;
did_change = 1;
set_bit(MD_CHANGE_CLEAN, &mddev->flags);
}
if (mddev->safemode == 1)
mddev->safemode = 0;
spin_unlock_irq(&mddev->write_lock);
if (did_change)
sysfs_notify_dirent_safe(mddev->sysfs_state);
}
if (mddev->flags)
md_update_sb(mddev, 0);
if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
!test_bit(MD_RECOVERY_DONE, &mddev->recovery)) {
/* resync/recovery still happening */
clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
goto unlock;
}
if (mddev->sync_thread) {
reap_sync_thread(mddev);
goto unlock;
}
/* Set RUNNING before clearing NEEDED to avoid
* any transients in the value of "sync_action".
*/
set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
/* Clear some bits that don't mean anything, but
* might be left set
*/
clear_bit(MD_RECOVERY_INTR, &mddev->recovery);
clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
if (!test_and_clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
goto unlock;
/* no recovery is running.
* remove any failed drives, then
* add spares if possible.
* Spare are also removed and re-added, to allow
* the personality to fail the re-add.
*/
if (mddev->reshape_position != MaxSector) {
if (mddev->pers->check_reshape == NULL ||
mddev->pers->check_reshape(mddev) != 0)
/* Cannot proceed */
goto unlock;
set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
} else if ((spares = remove_and_add_spares(mddev))) {
clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
} else if (mddev->recovery_cp < MaxSector) {
set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
} else if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
/* nothing to be done ... */
goto unlock;
if (mddev->pers->sync_request) {
if (spares && mddev->bitmap && ! mddev->bitmap->file) {
/* We are adding a device or devices to an array
* which has the bitmap stored on all devices.
* So make sure all bitmap pages get written
*/
bitmap_write_all(mddev->bitmap);
}
mddev->sync_thread = md_register_thread(md_do_sync,
mddev,
"resync");
if (!mddev->sync_thread) {
printk(KERN_ERR "%s: could not start resync"
" thread...\n",
mdname(mddev));
/* leave the spares where they are, it shouldn't hurt */
clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
} else
md_wakeup_thread(mddev->sync_thread);
sysfs_notify_dirent_safe(mddev->sysfs_action);
md_new_event(mddev);
}
unlock:
if (!mddev->sync_thread) {
clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
if (test_and_clear_bit(MD_RECOVERY_RECOVER,
&mddev->recovery))
if (mddev->sysfs_action)
sysfs_notify_dirent_safe(mddev->sysfs_action);
}
mddev_unlock(mddev);
}
}
void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev)
{
sysfs_notify_dirent_safe(rdev->sysfs_state);
wait_event_timeout(rdev->blocked_wait,
!test_bit(Blocked, &rdev->flags) &&
!test_bit(BlockedBadBlocks, &rdev->flags),
msecs_to_jiffies(5000));
rdev_dec_pending(rdev, mddev);
}
EXPORT_SYMBOL(md_wait_for_blocked_rdev);
/* Bad block management.
* We can record which blocks on each device are 'bad' and so just
* fail those blocks, or that stripe, rather than the whole device.
* Entries in the bad-block table are 64bits wide. This comprises:
* Length of bad-range, in sectors: 0-511 for lengths 1-512
* Start of bad-range, sector offset, 54 bits (allows 8 exbibytes)
* A 'shift' can be set so that larger blocks are tracked and
* consequently larger devices can be covered.
* 'Acknowledged' flag - 1 bit. - the most significant bit.
*
* Locking of the bad-block table uses a seqlock so md_is_badblock
* might need to retry if it is very unlucky.
* We will sometimes want to check for bad blocks in a bi_end_io function,
* so we use the write_seqlock_irq variant.
*
* When looking for a bad block we specify a range and want to
* know if any block in the range is bad. So we binary-search
* to the last range that starts at-or-before the given endpoint,
* (or "before the sector after the target range")
* then see if it ends after the given start.
* We return
* 0 if there are no known bad blocks in the range
* 1 if there are known bad block which are all acknowledged
* -1 if there are bad blocks which have not yet been acknowledged in metadata.
* plus the start/length of the first bad section we overlap.
*/
int md_is_badblock(struct badblocks *bb, sector_t s, int sectors,
sector_t *first_bad, int *bad_sectors)
{
int hi;
int lo = 0;
u64 *p = bb->page;
int rv = 0;
sector_t target = s + sectors;
unsigned seq;
if (bb->shift > 0) {
/* round the start down, and the end up */
s >>= bb->shift;
target += (1<<bb->shift) - 1;
target >>= bb->shift;
sectors = target - s;
}
/* 'target' is now the first block after the bad range */
retry:
seq = read_seqbegin(&bb->lock);
hi = bb->count;
/* Binary search between lo and hi for 'target'
* i.e. for the last range that starts before 'target'
*/
/* INVARIANT: ranges before 'lo' and at-or-after 'hi'
* are known not to be the last range before target.
* VARIANT: hi-lo is the number of possible
* ranges, and decreases until it reaches 1
*/
while (hi - lo > 1) {
int mid = (lo + hi) / 2;
sector_t a = BB_OFFSET(p[mid]);
if (a < target)
/* This could still be the one, earlier ranges
* could not. */
lo = mid;
else
/* This and later ranges are definitely out. */
hi = mid;
}
/* 'lo' might be the last that started before target, but 'hi' isn't */
if (hi > lo) {
/* need to check all range that end after 's' to see if
* any are unacknowledged.
*/
while (lo >= 0 &&
BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > s) {
if (BB_OFFSET(p[lo]) < target) {
/* starts before the end, and finishes after
* the start, so they must overlap
*/
if (rv != -1 && BB_ACK(p[lo]))
rv = 1;
else
rv = -1;
*first_bad = BB_OFFSET(p[lo]);
*bad_sectors = BB_LEN(p[lo]);
}
lo--;
}
}
if (read_seqretry(&bb->lock, seq))
goto retry;
return rv;
}
EXPORT_SYMBOL_GPL(md_is_badblock);
/*
* Add a range of bad blocks to the table.
* This might extend the table, or might contract it
* if two adjacent ranges can be merged.
* We binary-search to find the 'insertion' point, then
* decide how best to handle it.
*/
static int md_set_badblocks(struct badblocks *bb, sector_t s, int sectors,
int acknowledged)
{
u64 *p;
int lo, hi;
int rv = 1;
if (bb->shift < 0)
/* badblocks are disabled */
return 0;
if (bb->shift) {
/* round the start down, and the end up */
sector_t next = s + sectors;
s >>= bb->shift;
next += (1<<bb->shift) - 1;
next >>= bb->shift;
sectors = next - s;
}
write_seqlock_irq(&bb->lock);
p = bb->page;
lo = 0;
hi = bb->count;
/* Find the last range that starts at-or-before 's' */
while (hi - lo > 1) {
int mid = (lo + hi) / 2;
sector_t a = BB_OFFSET(p[mid]);
if (a <= s)
lo = mid;
else
hi = mid;
}
if (hi > lo && BB_OFFSET(p[lo]) > s)
hi = lo;
if (hi > lo) {
/* we found a range that might merge with the start
* of our new range
*/
sector_t a = BB_OFFSET(p[lo]);
sector_t e = a + BB_LEN(p[lo]);
int ack = BB_ACK(p[lo]);
if (e >= s) {
/* Yes, we can merge with a previous range */
if (s == a && s + sectors >= e)
/* new range covers old */
ack = acknowledged;
else
ack = ack && acknowledged;
if (e < s + sectors)
e = s + sectors;
if (e - a <= BB_MAX_LEN) {
p[lo] = BB_MAKE(a, e-a, ack);
s = e;
} else {
/* does not all fit in one range,
* make p[lo] maximal
*/
if (BB_LEN(p[lo]) != BB_MAX_LEN)
p[lo] = BB_MAKE(a, BB_MAX_LEN, ack);
s = a + BB_MAX_LEN;
}
sectors = e - s;
}
}
if (sectors && hi < bb->count) {
/* 'hi' points to the first range that starts after 's'.
* Maybe we can merge with the start of that range */
sector_t a = BB_OFFSET(p[hi]);
sector_t e = a + BB_LEN(p[hi]);
int ack = BB_ACK(p[hi]);
if (a <= s + sectors) {
/* merging is possible */
if (e <= s + sectors) {
/* full overlap */
e = s + sectors;
ack = acknowledged;
} else
ack = ack && acknowledged;
a = s;
if (e - a <= BB_MAX_LEN) {
p[hi] = BB_MAKE(a, e-a, ack);
s = e;
} else {
p[hi] = BB_MAKE(a, BB_MAX_LEN, ack);
s = a + BB_MAX_LEN;
}
sectors = e - s;
lo = hi;
hi++;
}
}
if (sectors == 0 && hi < bb->count) {
/* we might be able to combine lo and hi */
/* Note: 's' is at the end of 'lo' */
sector_t a = BB_OFFSET(p[hi]);
int lolen = BB_LEN(p[lo]);
int hilen = BB_LEN(p[hi]);
int newlen = lolen + hilen - (s - a);
if (s >= a && newlen < BB_MAX_LEN) {
/* yes, we can combine them */
int ack = BB_ACK(p[lo]) && BB_ACK(p[hi]);
p[lo] = BB_MAKE(BB_OFFSET(p[lo]), newlen, ack);
memmove(p + hi, p + hi + 1,
(bb->count - hi - 1) * 8);
bb->count--;
}
}
while (sectors) {
/* didn't merge (it all).
* Need to add a range just before 'hi' */
if (bb->count >= MD_MAX_BADBLOCKS) {
/* No room for more */
rv = 0;
break;
} else {
int this_sectors = sectors;
memmove(p + hi + 1, p + hi,
(bb->count - hi) * 8);
bb->count++;
if (this_sectors > BB_MAX_LEN)
this_sectors = BB_MAX_LEN;
p[hi] = BB_MAKE(s, this_sectors, acknowledged);
sectors -= this_sectors;
s += this_sectors;
}
}
bb->changed = 1;
if (!acknowledged)
bb->unacked_exist = 1;
write_sequnlock_irq(&bb->lock);
return rv;
}
int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
int acknowledged)
{
int rv = md_set_badblocks(&rdev->badblocks,
s + rdev->data_offset, sectors, acknowledged);
if (rv) {
/* Make sure they get written out promptly */
sysfs_notify_dirent_safe(rdev->sysfs_state);
set_bit(MD_CHANGE_CLEAN, &rdev->mddev->flags);
md_wakeup_thread(rdev->mddev->thread);
}
return rv;
}
EXPORT_SYMBOL_GPL(rdev_set_badblocks);
/*
* Remove a range of bad blocks from the table.
* This may involve extending the table if we spilt a region,
* but it must not fail. So if the table becomes full, we just
* drop the remove request.
*/
static int md_clear_badblocks(struct badblocks *bb, sector_t s, int sectors)
{
u64 *p;
int lo, hi;
sector_t target = s + sectors;
int rv = 0;
if (bb->shift > 0) {
/* When clearing we round the start up and the end down.
* This should not matter as the shift should align with
* the block size and no rounding should ever be needed.
* However it is better the think a block is bad when it
* isn't than to think a block is not bad when it is.
*/
s += (1<<bb->shift) - 1;
s >>= bb->shift;
target >>= bb->shift;
sectors = target - s;
}
write_seqlock_irq(&bb->lock);
p = bb->page;
lo = 0;
hi = bb->count;
/* Find the last range that starts before 'target' */
while (hi - lo > 1) {
int mid = (lo + hi) / 2;
sector_t a = BB_OFFSET(p[mid]);
if (a < target)
lo = mid;
else
hi = mid;
}
if (hi > lo) {
/* p[lo] is the last range that could overlap the
* current range. Earlier ranges could also overlap,
* but only this one can overlap the end of the range.
*/
if (BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > target) {
/* Partial overlap, leave the tail of this range */
int ack = BB_ACK(p[lo]);
sector_t a = BB_OFFSET(p[lo]);
sector_t end = a + BB_LEN(p[lo]);
if (a < s) {
/* we need to split this range */
if (bb->count >= MD_MAX_BADBLOCKS) {
rv = 0;
goto out;
}
memmove(p+lo+1, p+lo, (bb->count - lo) * 8);
bb->count++;
p[lo] = BB_MAKE(a, s-a, ack);
lo++;
}
p[lo] = BB_MAKE(target, end - target, ack);
/* there is no longer an overlap */
hi = lo;
lo--;
}
while (lo >= 0 &&
BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > s) {
/* This range does overlap */
if (BB_OFFSET(p[lo]) < s) {
/* Keep the early parts of this range. */
int ack = BB_ACK(p[lo]);
sector_t start = BB_OFFSET(p[lo]);
p[lo] = BB_MAKE(start, s - start, ack);
/* now low doesn't overlap, so.. */
break;
}
lo--;
}
/* 'lo' is strictly before, 'hi' is strictly after,
* anything between needs to be discarded
*/
if (hi - lo > 1) {
memmove(p+lo+1, p+hi, (bb->count - hi) * 8);
bb->count -= (hi - lo - 1);
}
}
bb->changed = 1;
out:
write_sequnlock_irq(&bb->lock);
return rv;
}
int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors)
{
return md_clear_badblocks(&rdev->badblocks,
s + rdev->data_offset,
sectors);
}
EXPORT_SYMBOL_GPL(rdev_clear_badblocks);
/*
* Acknowledge all bad blocks in a list.
* This only succeeds if ->changed is clear. It is used by
* in-kernel metadata updates
*/
void md_ack_all_badblocks(struct badblocks *bb)
{
if (bb->page == NULL || bb->changed)
/* no point even trying */
return;
write_seqlock_irq(&bb->lock);
if (bb->changed == 0 && bb->unacked_exist) {
u64 *p = bb->page;
int i;
for (i = 0; i < bb->count ; i++) {
if (!BB_ACK(p[i])) {
sector_t start = BB_OFFSET(p[i]);
int len = BB_LEN(p[i]);
p[i] = BB_MAKE(start, len, 1);
}
}
bb->unacked_exist = 0;
}
write_sequnlock_irq(&bb->lock);
}
EXPORT_SYMBOL_GPL(md_ack_all_badblocks);
/* sysfs access to bad-blocks list.
* We present two files.
* 'bad-blocks' lists sector numbers and lengths of ranges that
* are recorded as bad. The list is truncated to fit within
* the one-page limit of sysfs.
* Writing "sector length" to this file adds an acknowledged
* bad block list.
* 'unacknowledged-bad-blocks' lists bad blocks that have not yet
* been acknowledged. Writing to this file adds bad blocks
* without acknowledging them. This is largely for testing.
*/
static ssize_t
badblocks_show(struct badblocks *bb, char *page, int unack)
{
size_t len;
int i;
u64 *p = bb->page;
unsigned seq;
if (bb->shift < 0)
return 0;
retry:
seq = read_seqbegin(&bb->lock);
len = 0;
i = 0;
while (len < PAGE_SIZE && i < bb->count) {
sector_t s = BB_OFFSET(p[i]);
unsigned int length = BB_LEN(p[i]);
int ack = BB_ACK(p[i]);
i++;
if (unack && ack)
continue;
len += snprintf(page+len, PAGE_SIZE-len, "%llu %u\n",
(unsigned long long)s << bb->shift,
length << bb->shift);
}
if (unack && len == 0)
bb->unacked_exist = 0;
if (read_seqretry(&bb->lock, seq))
goto retry;
return len;
}
#define DO_DEBUG 1
static ssize_t
badblocks_store(struct badblocks *bb, const char *page, size_t len, int unack)
{
unsigned long long sector;
int length;
char newline;
#ifdef DO_DEBUG
/* Allow clearing via sysfs *only* for testing/debugging.
* Normally only a successful write may clear a badblock
*/
int clear = 0;
if (page[0] == '-') {
clear = 1;
page++;
}
#endif /* DO_DEBUG */
switch (sscanf(page, "%llu %d%c", §or, &length, &newline)) {
case 3:
if (newline != '\n')
return -EINVAL;
case 2:
if (length <= 0)
return -EINVAL;
break;
default:
return -EINVAL;
}
#ifdef DO_DEBUG
if (clear) {
md_clear_badblocks(bb, sector, length);
return len;
}
#endif /* DO_DEBUG */
if (md_set_badblocks(bb, sector, length, !unack))
return len;
else
return -ENOSPC;
}
static int md_notify_reboot(struct notifier_block *this,
unsigned long code, void *x)
{
struct list_head *tmp;
struct mddev *mddev;
int need_delay = 0;
for_each_mddev(mddev, tmp) {
if (mddev_trylock(mddev)) {
if (mddev->pers)
__md_stop_writes(mddev);
mddev->safemode = 2;
mddev_unlock(mddev);
}
need_delay = 1;
}
/*
* certain more exotic SCSI devices are known to be
* volatile wrt too early system reboots. While the
* right place to handle this issue is the given
* driver, we do want to have a safe RAID driver ...
*/
if (need_delay)
mdelay(1000*1);
return NOTIFY_DONE;
}
static struct notifier_block md_notifier = {
.notifier_call = md_notify_reboot,
.next = NULL,
.priority = INT_MAX, /* before any real devices */
};
static void md_geninit(void)
{
pr_debug("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t));
proc_create("mdstat", S_IRUGO, NULL, &md_seq_fops);
}
static int __init md_init(void)
{
int ret = -ENOMEM;
md_wq = alloc_workqueue("md", WQ_MEM_RECLAIM, 0);
if (!md_wq)
goto err_wq;
md_misc_wq = alloc_workqueue("md_misc", 0, 0);
if (!md_misc_wq)
goto err_misc_wq;
if ((ret = register_blkdev(MD_MAJOR, "md")) < 0)
goto err_md;
if ((ret = register_blkdev(0, "mdp")) < 0)
goto err_mdp;
mdp_major = ret;
blk_register_region(MKDEV(MD_MAJOR, 0), 1UL<<MINORBITS, THIS_MODULE,
md_probe, NULL, NULL);
blk_register_region(MKDEV(mdp_major, 0), 1UL<<MINORBITS, THIS_MODULE,
md_probe, NULL, NULL);
register_reboot_notifier(&md_notifier);
raid_table_header = register_sysctl_table(raid_root_table);
md_geninit();
return 0;
err_mdp:
unregister_blkdev(MD_MAJOR, "md");
err_md:
destroy_workqueue(md_misc_wq);
err_misc_wq:
destroy_workqueue(md_wq);
err_wq:
return ret;
}
#ifndef MODULE
/*
* Searches all registered partitions for autorun RAID arrays
* at boot time.
*/
static LIST_HEAD(all_detected_devices);
struct detected_devices_node {
struct list_head list;
dev_t dev;
};
void md_autodetect_dev(dev_t dev)
{
struct detected_devices_node *node_detected_dev;
node_detected_dev = kzalloc(sizeof(*node_detected_dev), GFP_KERNEL);
if (node_detected_dev) {
node_detected_dev->dev = dev;
list_add_tail(&node_detected_dev->list, &all_detected_devices);
} else {
printk(KERN_CRIT "md: md_autodetect_dev: kzalloc failed"
", skipping dev(%d,%d)\n", MAJOR(dev), MINOR(dev));
}
}
static void autostart_arrays(int part)
{
struct md_rdev *rdev;
struct detected_devices_node *node_detected_dev;
dev_t dev;
int i_scanned, i_passed;
i_scanned = 0;
i_passed = 0;
printk(KERN_INFO "md: Autodetecting RAID arrays.\n");
while (!list_empty(&all_detected_devices) && i_scanned < INT_MAX) {
i_scanned++;
node_detected_dev = list_entry(all_detected_devices.next,
struct detected_devices_node, list);
list_del(&node_detected_dev->list);
dev = node_detected_dev->dev;
kfree(node_detected_dev);
rdev = md_import_device(dev,0, 90);
if (IS_ERR(rdev))
continue;
if (test_bit(Faulty, &rdev->flags)) {
MD_BUG();
continue;
}
set_bit(AutoDetected, &rdev->flags);
list_add(&rdev->same_set, &pending_raid_disks);
i_passed++;
}
printk(KERN_INFO "md: Scanned %d and added %d devices.\n",
i_scanned, i_passed);
autorun_devices(part);
}
#endif /* !MODULE */
static __exit void md_exit(void)
{
struct mddev *mddev;
struct list_head *tmp;
blk_unregister_region(MKDEV(MD_MAJOR,0), 1U << MINORBITS);
blk_unregister_region(MKDEV(mdp_major,0), 1U << MINORBITS);
unregister_blkdev(MD_MAJOR,"md");
unregister_blkdev(mdp_major, "mdp");
unregister_reboot_notifier(&md_notifier);
unregister_sysctl_table(raid_table_header);
remove_proc_entry("mdstat", NULL);
for_each_mddev(mddev, tmp) {
export_array(mddev);
mddev->hold_active = 0;
}
destroy_workqueue(md_misc_wq);
destroy_workqueue(md_wq);
}
subsys_initcall(md_init);
module_exit(md_exit)
static int get_ro(char *buffer, struct kernel_param *kp)
{
return sprintf(buffer, "%d", start_readonly);
}
static int set_ro(const char *val, struct kernel_param *kp)
{
char *e;
int num = simple_strtoul(val, &e, 10);
if (*val && (*e == '\0' || *e == '\n')) {
start_readonly = num;
return 0;
}
return -EINVAL;
}
module_param_call(start_ro, set_ro, get_ro, NULL, S_IRUSR|S_IWUSR);
module_param(start_dirty_degraded, int, S_IRUGO|S_IWUSR);
module_param_call(new_array, add_named_array, NULL, NULL, S_IWUSR);
EXPORT_SYMBOL(register_md_personality);
EXPORT_SYMBOL(unregister_md_personality);
EXPORT_SYMBOL(md_error);
EXPORT_SYMBOL(md_done_sync);
EXPORT_SYMBOL(md_write_start);
EXPORT_SYMBOL(md_write_end);
EXPORT_SYMBOL(md_register_thread);
EXPORT_SYMBOL(md_unregister_thread);
EXPORT_SYMBOL(md_wakeup_thread);
EXPORT_SYMBOL(md_check_recovery);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("MD RAID framework");
MODULE_ALIAS("md");
MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR);
| gpl-2.0 |
deepongi/android_kernel_sony_msm | drivers/scsi/initio.c | 2713 | 83304 | /**************************************************************************
* Initio 9100 device driver for Linux.
*
* Copyright (c) 1994-1998 Initio Corporation
* Copyright (c) 1998 Bas Vermeulen <bvermeul@blackstar.xs4all.nl>
* Copyright (c) 2004 Christoph Hellwig <hch@lst.de>
* Copyright (c) 2007 Red Hat
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; see the file COPYING. If not, write to
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
*
*
*************************************************************************
*
* DESCRIPTION:
*
* This is the Linux low-level SCSI driver for Initio INI-9X00U/UW SCSI host
* adapters
*
* 08/06/97 hc - v1.01h
* - Support inic-940 and inic-935
* 09/26/97 hc - v1.01i
* - Make correction from J.W. Schultz suggestion
* 10/13/97 hc - Support reset function
* 10/21/97 hc - v1.01j
* - Support 32 LUN (SCSI 3)
* 01/14/98 hc - v1.01k
* - Fix memory allocation problem
* 03/04/98 hc - v1.01l
* - Fix tape rewind which will hang the system problem
* - Set can_queue to initio_num_scb
* 06/25/98 hc - v1.01m
* - Get it work for kernel version >= 2.1.75
* - Dynamic assign SCSI bus reset holding time in initio_init()
* 07/02/98 hc - v1.01n
* - Support 0002134A
* 08/07/98 hc - v1.01o
* - Change the initio_abort_srb routine to use scsi_done. <01>
* 09/07/98 hl - v1.02
* - Change the INI9100U define and proc_dir_entry to
* reflect the newer Kernel 2.1.118, but the v1.o1o
* should work with Kernel 2.1.118.
* 09/20/98 wh - v1.02a
* - Support Abort command.
* - Handle reset routine.
* 09/21/98 hl - v1.03
* - remove comments.
* 12/09/98 bv - v1.03a
* - Removed unused code
* 12/13/98 bv - v1.03b
* - Remove cli() locking for kernels >= 2.1.95. This uses
* spinlocks to serialize access to the pSRB_head and
* pSRB_tail members of the HCS structure.
* 09/01/99 bv - v1.03d
* - Fixed a deadlock problem in SMP.
* 21/01/99 bv - v1.03e
* - Add support for the Domex 3192U PCI SCSI
* This is a slightly modified patch by
* Brian Macy <bmacy@sunshinecomputing.com>
* 22/02/99 bv - v1.03f
* - Didn't detect the INIC-950 in 2.0.x correctly.
* Now fixed.
* 05/07/99 bv - v1.03g
* - Changed the assumption that HZ = 100
* 10/17/03 mc - v1.04
* - added new DMA API support
* 06/01/04 jmd - v1.04a
* - Re-add reset_bus support
**************************************************************************/
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/delay.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/spinlock.h>
#include <linux/stat.h>
#include <linux/kernel.h>
#include <linux/proc_fs.h>
#include <linux/string.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/slab.h>
#include <linux/jiffies.h>
#include <linux/dma-mapping.h>
#include <asm/io.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_tcq.h>
#include "initio.h"
#define SENSE_SIZE 14
#define i91u_MAXQUEUE 2
#define i91u_REVID "Initio INI-9X00U/UW SCSI device driver; Revision: 1.04a"
#define I950_DEVICE_ID 0x9500 /* Initio's inic-950 product ID */
#define I940_DEVICE_ID 0x9400 /* Initio's inic-940 product ID */
#define I935_DEVICE_ID 0x9401 /* Initio's inic-935 product ID */
#define I920_DEVICE_ID 0x0002 /* Initio's other product ID */
#ifdef DEBUG_i91u
static unsigned int i91u_debug = DEBUG_DEFAULT;
#endif
static int initio_tag_enable = 1;
#ifdef DEBUG_i91u
static int setup_debug = 0;
#endif
static void i91uSCBPost(u8 * pHcb, u8 * pScb);
/* PCI Devices supported by this driver */
static struct pci_device_id i91u_pci_devices[] = {
{ PCI_VENDOR_ID_INIT, I950_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{ PCI_VENDOR_ID_INIT, I940_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{ PCI_VENDOR_ID_INIT, I935_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{ PCI_VENDOR_ID_INIT, I920_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{ PCI_VENDOR_ID_DOMEX, I920_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{ }
};
MODULE_DEVICE_TABLE(pci, i91u_pci_devices);
#define DEBUG_INTERRUPT 0
#define DEBUG_QUEUE 0
#define DEBUG_STATE 0
#define INT_DISC 0
/*--- forward references ---*/
static struct scsi_ctrl_blk *initio_find_busy_scb(struct initio_host * host, u16 tarlun);
static struct scsi_ctrl_blk *initio_find_done_scb(struct initio_host * host);
static int tulip_main(struct initio_host * host);
static int initio_next_state(struct initio_host * host);
static int initio_state_1(struct initio_host * host);
static int initio_state_2(struct initio_host * host);
static int initio_state_3(struct initio_host * host);
static int initio_state_4(struct initio_host * host);
static int initio_state_5(struct initio_host * host);
static int initio_state_6(struct initio_host * host);
static int initio_state_7(struct initio_host * host);
static int initio_xfer_data_in(struct initio_host * host);
static int initio_xfer_data_out(struct initio_host * host);
static int initio_xpad_in(struct initio_host * host);
static int initio_xpad_out(struct initio_host * host);
static int initio_status_msg(struct initio_host * host);
static int initio_msgin(struct initio_host * host);
static int initio_msgin_sync(struct initio_host * host);
static int initio_msgin_accept(struct initio_host * host);
static int initio_msgout_reject(struct initio_host * host);
static int initio_msgin_extend(struct initio_host * host);
static int initio_msgout_ide(struct initio_host * host);
static int initio_msgout_abort_targ(struct initio_host * host);
static int initio_msgout_abort_tag(struct initio_host * host);
static int initio_bus_device_reset(struct initio_host * host);
static void initio_select_atn(struct initio_host * host, struct scsi_ctrl_blk * scb);
static void initio_select_atn3(struct initio_host * host, struct scsi_ctrl_blk * scb);
static void initio_select_atn_stop(struct initio_host * host, struct scsi_ctrl_blk * scb);
static int int_initio_busfree(struct initio_host * host);
static int int_initio_scsi_rst(struct initio_host * host);
static int int_initio_bad_seq(struct initio_host * host);
static int int_initio_resel(struct initio_host * host);
static int initio_sync_done(struct initio_host * host);
static int wdtr_done(struct initio_host * host);
static int wait_tulip(struct initio_host * host);
static int initio_wait_done_disc(struct initio_host * host);
static int initio_wait_disc(struct initio_host * host);
static void tulip_scsi(struct initio_host * host);
static int initio_post_scsi_rst(struct initio_host * host);
static void initio_se2_ew_en(unsigned long base);
static void initio_se2_ew_ds(unsigned long base);
static int initio_se2_rd_all(unsigned long base);
static void initio_se2_update_all(unsigned long base); /* setup default pattern */
static void initio_read_eeprom(unsigned long base);
/* ---- INTERNAL VARIABLES ---- */
static NVRAM i91unvram;
static NVRAM *i91unvramp;
static u8 i91udftNvRam[64] =
{
/*----------- header -----------*/
0x25, 0xc9, /* Signature */
0x40, /* Size */
0x01, /* Revision */
/* -- Host Adapter Structure -- */
0x95, /* ModelByte0 */
0x00, /* ModelByte1 */
0x00, /* ModelInfo */
0x01, /* NumOfCh */
NBC1_DEFAULT, /* BIOSConfig1 */
0, /* BIOSConfig2 */
0, /* HAConfig1 */
0, /* HAConfig2 */
/* SCSI channel 0 and target Structure */
7, /* SCSIid */
NCC1_DEFAULT, /* SCSIconfig1 */
0, /* SCSIconfig2 */
0x10, /* NumSCSItarget */
NTC_DEFAULT, NTC_DEFAULT, NTC_DEFAULT, NTC_DEFAULT,
NTC_DEFAULT, NTC_DEFAULT, NTC_DEFAULT, NTC_DEFAULT,
NTC_DEFAULT, NTC_DEFAULT, NTC_DEFAULT, NTC_DEFAULT,
NTC_DEFAULT, NTC_DEFAULT, NTC_DEFAULT, NTC_DEFAULT,
/* SCSI channel 1 and target Structure */
7, /* SCSIid */
NCC1_DEFAULT, /* SCSIconfig1 */
0, /* SCSIconfig2 */
0x10, /* NumSCSItarget */
NTC_DEFAULT, NTC_DEFAULT, NTC_DEFAULT, NTC_DEFAULT,
NTC_DEFAULT, NTC_DEFAULT, NTC_DEFAULT, NTC_DEFAULT,
NTC_DEFAULT, NTC_DEFAULT, NTC_DEFAULT, NTC_DEFAULT,
NTC_DEFAULT, NTC_DEFAULT, NTC_DEFAULT, NTC_DEFAULT,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0}; /* - CheckSum - */
static u8 initio_rate_tbl[8] = /* fast 20 */
{
/* nanosecond divide by 4 */
12, /* 50ns, 20M */
18, /* 75ns, 13.3M */
25, /* 100ns, 10M */
31, /* 125ns, 8M */
37, /* 150ns, 6.6M */
43, /* 175ns, 5.7M */
50, /* 200ns, 5M */
62 /* 250ns, 4M */
};
static void initio_do_pause(unsigned amount)
{
/* Pause for amount jiffies */
unsigned long the_time = jiffies + amount;
while (time_before_eq(jiffies, the_time))
cpu_relax();
}
/*-- forward reference --*/
/******************************************************************
Input: instruction for Serial E2PROM
EX: se2_rd(0 call se2_instr() to send address and read command
StartBit OP_Code Address Data
--------- -------- ------------------ -------
1 1 , 0 A5,A4,A3,A2,A1,A0 D15-D0
+-----------------------------------------------------
|
CS -----+
+--+ +--+ +--+ +--+ +--+
^ | ^ | ^ | ^ | ^ |
| | | | | | | | | |
CLK -------+ +--+ +--+ +--+ +--+ +--
(leading edge trigger)
+--1-----1--+
| SB OP | OP A5 A4
DI ----+ +--0------------------
(address and cmd sent to nvram)
-------------------------------------------+
|
DO +---
(data sent from nvram)
******************************************************************/
/**
* initio_se2_instr - bitbang an instruction
* @base: Base of InitIO controller
* @instr: Instruction for serial E2PROM
*
* Bitbang an instruction out to the serial E2Prom
*/
static void initio_se2_instr(unsigned long base, u8 instr)
{
int i;
u8 b;
outb(SE2CS | SE2DO, base + TUL_NVRAM); /* cs+start bit */
udelay(30);
outb(SE2CS | SE2CLK | SE2DO, base + TUL_NVRAM); /* +CLK */
udelay(30);
for (i = 0; i < 8; i++) {
if (instr & 0x80)
b = SE2CS | SE2DO; /* -CLK+dataBit */
else
b = SE2CS; /* -CLK */
outb(b, base + TUL_NVRAM);
udelay(30);
outb(b | SE2CLK, base + TUL_NVRAM); /* +CLK */
udelay(30);
instr <<= 1;
}
outb(SE2CS, base + TUL_NVRAM); /* -CLK */
udelay(30);
}
/**
* initio_se2_ew_en - Enable erase/write
* @base: Base address of InitIO controller
*
* Enable erase/write state of serial EEPROM
*/
void initio_se2_ew_en(unsigned long base)
{
initio_se2_instr(base, 0x30); /* EWEN */
outb(0, base + TUL_NVRAM); /* -CS */
udelay(30);
}
/**
* initio_se2_ew_ds - Disable erase/write
* @base: Base address of InitIO controller
*
* Disable erase/write state of serial EEPROM
*/
void initio_se2_ew_ds(unsigned long base)
{
initio_se2_instr(base, 0); /* EWDS */
outb(0, base + TUL_NVRAM); /* -CS */
udelay(30);
}
/**
* initio_se2_rd - read E2PROM word
* @base: Base of InitIO controller
* @addr: Address of word in E2PROM
*
* Read a word from the NV E2PROM device
*/
static u16 initio_se2_rd(unsigned long base, u8 addr)
{
u8 instr, rb;
u16 val = 0;
int i;
instr = (u8) (addr | 0x80);
initio_se2_instr(base, instr); /* READ INSTR */
for (i = 15; i >= 0; i--) {
outb(SE2CS | SE2CLK, base + TUL_NVRAM); /* +CLK */
udelay(30);
outb(SE2CS, base + TUL_NVRAM); /* -CLK */
/* sample data after the following edge of clock */
rb = inb(base + TUL_NVRAM);
rb &= SE2DI;
val += (rb << i);
udelay(30); /* 6/20/95 */
}
outb(0, base + TUL_NVRAM); /* no chip select */
udelay(30);
return val;
}
/**
* initio_se2_wr - read E2PROM word
* @base: Base of InitIO controller
* @addr: Address of word in E2PROM
* @val: Value to write
*
* Write a word to the NV E2PROM device. Used when recovering from
* a problem with the NV.
*/
static void initio_se2_wr(unsigned long base, u8 addr, u16 val)
{
u8 rb;
u8 instr;
int i;
instr = (u8) (addr | 0x40);
initio_se2_instr(base, instr); /* WRITE INSTR */
for (i = 15; i >= 0; i--) {
if (val & 0x8000)
outb(SE2CS | SE2DO, base + TUL_NVRAM); /* -CLK+dataBit 1 */
else
outb(SE2CS, base + TUL_NVRAM); /* -CLK+dataBit 0 */
udelay(30);
outb(SE2CS | SE2CLK, base + TUL_NVRAM); /* +CLK */
udelay(30);
val <<= 1;
}
outb(SE2CS, base + TUL_NVRAM); /* -CLK */
udelay(30);
outb(0, base + TUL_NVRAM); /* -CS */
udelay(30);
outb(SE2CS, base + TUL_NVRAM); /* +CS */
udelay(30);
for (;;) {
outb(SE2CS | SE2CLK, base + TUL_NVRAM); /* +CLK */
udelay(30);
outb(SE2CS, base + TUL_NVRAM); /* -CLK */
udelay(30);
if ((rb = inb(base + TUL_NVRAM)) & SE2DI)
break; /* write complete */
}
outb(0, base + TUL_NVRAM); /* -CS */
}
/**
* initio_se2_rd_all - read hostadapter NV configuration
* @base: Base address of InitIO controller
*
* Reads the E2PROM data into main memory. Ensures that the checksum
* and header marker are valid. Returns 1 on success -1 on error.
*/
static int initio_se2_rd_all(unsigned long base)
{
int i;
u16 chksum = 0;
u16 *np;
i91unvramp = &i91unvram;
np = (u16 *) i91unvramp;
for (i = 0; i < 32; i++)
*np++ = initio_se2_rd(base, i);
/* Is signature "ini" ok ? */
if (i91unvramp->NVM_Signature != INI_SIGNATURE)
return -1;
/* Is ckecksum ok ? */
np = (u16 *) i91unvramp;
for (i = 0; i < 31; i++)
chksum += *np++;
if (i91unvramp->NVM_CheckSum != chksum)
return -1;
return 1;
}
/**
* initio_se2_update_all - Update E2PROM
* @base: Base of InitIO controller
*
* Update the E2PROM by wrting any changes into the E2PROM
* chip, rewriting the checksum.
*/
static void initio_se2_update_all(unsigned long base)
{ /* setup default pattern */
int i;
u16 chksum = 0;
u16 *np, *np1;
i91unvramp = &i91unvram;
/* Calculate checksum first */
np = (u16 *) i91udftNvRam;
for (i = 0; i < 31; i++)
chksum += *np++;
*np = chksum;
initio_se2_ew_en(base); /* Enable write */
np = (u16 *) i91udftNvRam;
np1 = (u16 *) i91unvramp;
for (i = 0; i < 32; i++, np++, np1++) {
if (*np != *np1)
initio_se2_wr(base, i, *np);
}
initio_se2_ew_ds(base); /* Disable write */
}
/**
* initio_read_eeprom - Retrieve configuration
* @base: Base of InitIO Host Adapter
*
* Retrieve the host adapter configuration data from E2Prom. If the
* data is invalid then the defaults are used and are also restored
* into the E2PROM. This forms the access point for the SCSI driver
* into the E2PROM layer, the other functions for the E2PROM are all
* internal use.
*
* Must be called single threaded, uses a shared global area.
*/
static void initio_read_eeprom(unsigned long base)
{
u8 gctrl;
i91unvramp = &i91unvram;
/* Enable EEProm programming */
gctrl = inb(base + TUL_GCTRL);
outb(gctrl | TUL_GCTRL_EEPROM_BIT, base + TUL_GCTRL);
if (initio_se2_rd_all(base) != 1) {
initio_se2_update_all(base); /* setup default pattern */
initio_se2_rd_all(base); /* load again */
}
/* Disable EEProm programming */
gctrl = inb(base + TUL_GCTRL);
outb(gctrl & ~TUL_GCTRL_EEPROM_BIT, base + TUL_GCTRL);
}
/**
* initio_stop_bm - stop bus master
* @host: InitIO we are stopping
*
* Stop any pending DMA operation, aborting the DMA if necessary
*/
static void initio_stop_bm(struct initio_host * host)
{
if (inb(host->addr + TUL_XStatus) & XPEND) { /* if DMA xfer is pending, abort DMA xfer */
outb(TAX_X_ABT | TAX_X_CLR_FIFO, host->addr + TUL_XCmd);
/* wait Abort DMA xfer done */
while ((inb(host->addr + TUL_Int) & XABT) == 0)
cpu_relax();
}
outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0);
}
/**
* initio_reset_scsi - Reset SCSI host controller
* @host: InitIO host to reset
* @seconds: Recovery time
*
* Perform a full reset of the SCSI subsystem.
*/
static int initio_reset_scsi(struct initio_host * host, int seconds)
{
outb(TSC_RST_BUS, host->addr + TUL_SCtrl0);
while (!((host->jsint = inb(host->addr + TUL_SInt)) & TSS_SCSIRST_INT))
cpu_relax();
/* reset tulip chip */
outb(0, host->addr + TUL_SSignal);
/* Stall for a while, wait for target's firmware ready,make it 2 sec ! */
/* SONY 5200 tape drive won't work if only stall for 1 sec */
/* FIXME: this is a very long busy wait right now */
initio_do_pause(seconds * HZ);
inb(host->addr + TUL_SInt);
return SCSI_RESET_SUCCESS;
}
/**
* initio_init - set up an InitIO host adapter
* @host: InitIO host adapter
* @num_scbs: Number of SCBS
* @bios_addr: BIOS address
*
* Set up the host adapter and devices according to the configuration
* retrieved from the E2PROM.
*
* Locking: Calls E2PROM layer code which is not re-enterable so must
* run single threaded for now.
*/
static void initio_init(struct initio_host * host, u8 *bios_addr)
{
int i;
u8 *flags;
u8 *heads;
/* Get E2Prom configuration */
initio_read_eeprom(host->addr);
if (i91unvramp->NVM_SCSIInfo[0].NVM_NumOfTarg == 8)
host->max_tar = 8;
else
host->max_tar = 16;
host->config = i91unvramp->NVM_SCSIInfo[0].NVM_ChConfig1;
host->scsi_id = i91unvramp->NVM_SCSIInfo[0].NVM_ChSCSIID;
host->idmask = ~(1 << host->scsi_id);
#ifdef CHK_PARITY
/* Enable parity error response */
outb(inb(host->addr + TUL_PCMD) | 0x40, host->addr + TUL_PCMD);
#endif
/* Mask all the interrupt */
outb(0x1F, host->addr + TUL_Mask);
initio_stop_bm(host);
/* --- Initialize the tulip --- */
outb(TSC_RST_CHIP, host->addr + TUL_SCtrl0);
/* program HBA's SCSI ID */
outb(host->scsi_id << 4, host->addr + TUL_SScsiId);
/* Enable Initiator Mode ,phase latch,alternate sync period mode,
disable SCSI reset */
if (host->config & HCC_EN_PAR)
host->sconf1 = (TSC_INITDEFAULT | TSC_EN_SCSI_PAR);
else
host->sconf1 = (TSC_INITDEFAULT);
outb(host->sconf1, host->addr + TUL_SConfig);
/* Enable HW reselect */
outb(TSC_HW_RESELECT, host->addr + TUL_SCtrl1);
outb(0, host->addr + TUL_SPeriod);
/* selection time out = 250 ms */
outb(153, host->addr + TUL_STimeOut);
/* Enable SCSI terminator */
outb((host->config & (HCC_ACT_TERM1 | HCC_ACT_TERM2)),
host->addr + TUL_XCtrl);
outb(((host->config & HCC_AUTO_TERM) >> 4) |
(inb(host->addr + TUL_GCTRL1) & 0xFE),
host->addr + TUL_GCTRL1);
for (i = 0,
flags = & (i91unvramp->NVM_SCSIInfo[0].NVM_Targ0Config),
heads = bios_addr + 0x180;
i < host->max_tar;
i++, flags++) {
host->targets[i].flags = *flags & ~(TCF_SYNC_DONE | TCF_WDTR_DONE);
if (host->targets[i].flags & TCF_EN_255)
host->targets[i].drv_flags = TCF_DRV_255_63;
else
host->targets[i].drv_flags = 0;
host->targets[i].js_period = 0;
host->targets[i].sconfig0 = host->sconf1;
host->targets[i].heads = *heads++;
if (host->targets[i].heads == 255)
host->targets[i].drv_flags = TCF_DRV_255_63;
else
host->targets[i].drv_flags = 0;
host->targets[i].sectors = *heads++;
host->targets[i].flags &= ~TCF_BUSY;
host->act_tags[i] = 0;
host->max_tags[i] = 0xFF;
} /* for */
printk("i91u: PCI Base=0x%04X, IRQ=%d, BIOS=0x%04X0, SCSI ID=%d\n",
host->addr, host->pci_dev->irq,
host->bios_addr, host->scsi_id);
/* Reset SCSI Bus */
if (host->config & HCC_SCSI_RESET) {
printk(KERN_INFO "i91u: Reset SCSI Bus ... \n");
initio_reset_scsi(host, 10);
}
outb(0x17, host->addr + TUL_SCFG1);
outb(0xE9, host->addr + TUL_SIntEnable);
}
/**
* initio_alloc_scb - Allocate an SCB
* @host: InitIO host we are allocating for
*
* Walk the SCB list for the controller and allocate a free SCB if
* one exists.
*/
static struct scsi_ctrl_blk *initio_alloc_scb(struct initio_host *host)
{
struct scsi_ctrl_blk *scb;
unsigned long flags;
spin_lock_irqsave(&host->avail_lock, flags);
if ((scb = host->first_avail) != NULL) {
#if DEBUG_QUEUE
printk("find scb at %p\n", scb);
#endif
if ((host->first_avail = scb->next) == NULL)
host->last_avail = NULL;
scb->next = NULL;
scb->status = SCB_RENT;
}
spin_unlock_irqrestore(&host->avail_lock, flags);
return scb;
}
/**
* initio_release_scb - Release an SCB
* @host: InitIO host that owns the SCB
* @cmnd: SCB command block being returned
*
* Return an allocated SCB to the host free list
*/
static void initio_release_scb(struct initio_host * host, struct scsi_ctrl_blk * cmnd)
{
unsigned long flags;
#if DEBUG_QUEUE
printk("Release SCB %p; ", cmnd);
#endif
spin_lock_irqsave(&(host->avail_lock), flags);
cmnd->srb = NULL;
cmnd->status = 0;
cmnd->next = NULL;
if (host->last_avail != NULL) {
host->last_avail->next = cmnd;
host->last_avail = cmnd;
} else {
host->first_avail = cmnd;
host->last_avail = cmnd;
}
spin_unlock_irqrestore(&(host->avail_lock), flags);
}
/***************************************************************************/
static void initio_append_pend_scb(struct initio_host * host, struct scsi_ctrl_blk * scbp)
{
#if DEBUG_QUEUE
printk("Append pend SCB %p; ", scbp);
#endif
scbp->status = SCB_PEND;
scbp->next = NULL;
if (host->last_pending != NULL) {
host->last_pending->next = scbp;
host->last_pending = scbp;
} else {
host->first_pending = scbp;
host->last_pending = scbp;
}
}
/***************************************************************************/
static void initio_push_pend_scb(struct initio_host * host, struct scsi_ctrl_blk * scbp)
{
#if DEBUG_QUEUE
printk("Push pend SCB %p; ", scbp);
#endif
scbp->status = SCB_PEND;
if ((scbp->next = host->first_pending) != NULL) {
host->first_pending = scbp;
} else {
host->first_pending = scbp;
host->last_pending = scbp;
}
}
static struct scsi_ctrl_blk *initio_find_first_pend_scb(struct initio_host * host)
{
struct scsi_ctrl_blk *first;
first = host->first_pending;
while (first != NULL) {
if (first->opcode != ExecSCSI)
return first;
if (first->tagmsg == 0) {
if ((host->act_tags[first->target] == 0) &&
!(host->targets[first->target].flags & TCF_BUSY))
return first;
} else {
if ((host->act_tags[first->target] >=
host->max_tags[first->target]) |
(host->targets[first->target].flags & TCF_BUSY)) {
first = first->next;
continue;
}
return first;
}
first = first->next;
}
return first;
}
static void initio_unlink_pend_scb(struct initio_host * host, struct scsi_ctrl_blk * scb)
{
struct scsi_ctrl_blk *tmp, *prev;
#if DEBUG_QUEUE
printk("unlink pend SCB %p; ", scb);
#endif
prev = tmp = host->first_pending;
while (tmp != NULL) {
if (scb == tmp) { /* Unlink this SCB */
if (tmp == host->first_pending) {
if ((host->first_pending = tmp->next) == NULL)
host->last_pending = NULL;
} else {
prev->next = tmp->next;
if (tmp == host->last_pending)
host->last_pending = prev;
}
tmp->next = NULL;
break;
}
prev = tmp;
tmp = tmp->next;
}
}
static void initio_append_busy_scb(struct initio_host * host, struct scsi_ctrl_blk * scbp)
{
#if DEBUG_QUEUE
printk("append busy SCB %p; ", scbp);
#endif
if (scbp->tagmsg)
host->act_tags[scbp->target]++;
else
host->targets[scbp->target].flags |= TCF_BUSY;
scbp->status = SCB_BUSY;
scbp->next = NULL;
if (host->last_busy != NULL) {
host->last_busy->next = scbp;
host->last_busy = scbp;
} else {
host->first_busy = scbp;
host->last_busy = scbp;
}
}
/***************************************************************************/
static struct scsi_ctrl_blk *initio_pop_busy_scb(struct initio_host * host)
{
struct scsi_ctrl_blk *tmp;
if ((tmp = host->first_busy) != NULL) {
if ((host->first_busy = tmp->next) == NULL)
host->last_busy = NULL;
tmp->next = NULL;
if (tmp->tagmsg)
host->act_tags[tmp->target]--;
else
host->targets[tmp->target].flags &= ~TCF_BUSY;
}
#if DEBUG_QUEUE
printk("Pop busy SCB %p; ", tmp);
#endif
return tmp;
}
/***************************************************************************/
static void initio_unlink_busy_scb(struct initio_host * host, struct scsi_ctrl_blk * scb)
{
struct scsi_ctrl_blk *tmp, *prev;
#if DEBUG_QUEUE
printk("unlink busy SCB %p; ", scb);
#endif
prev = tmp = host->first_busy;
while (tmp != NULL) {
if (scb == tmp) { /* Unlink this SCB */
if (tmp == host->first_busy) {
if ((host->first_busy = tmp->next) == NULL)
host->last_busy = NULL;
} else {
prev->next = tmp->next;
if (tmp == host->last_busy)
host->last_busy = prev;
}
tmp->next = NULL;
if (tmp->tagmsg)
host->act_tags[tmp->target]--;
else
host->targets[tmp->target].flags &= ~TCF_BUSY;
break;
}
prev = tmp;
tmp = tmp->next;
}
return;
}
struct scsi_ctrl_blk *initio_find_busy_scb(struct initio_host * host, u16 tarlun)
{
struct scsi_ctrl_blk *tmp, *prev;
u16 scbp_tarlun;
prev = tmp = host->first_busy;
while (tmp != NULL) {
scbp_tarlun = (tmp->lun << 8) | (tmp->target);
if (scbp_tarlun == tarlun) { /* Unlink this SCB */
break;
}
prev = tmp;
tmp = tmp->next;
}
#if DEBUG_QUEUE
printk("find busy SCB %p; ", tmp);
#endif
return tmp;
}
static void initio_append_done_scb(struct initio_host * host, struct scsi_ctrl_blk * scbp)
{
#if DEBUG_QUEUE
printk("append done SCB %p; ", scbp);
#endif
scbp->status = SCB_DONE;
scbp->next = NULL;
if (host->last_done != NULL) {
host->last_done->next = scbp;
host->last_done = scbp;
} else {
host->first_done = scbp;
host->last_done = scbp;
}
}
struct scsi_ctrl_blk *initio_find_done_scb(struct initio_host * host)
{
struct scsi_ctrl_blk *tmp;
if ((tmp = host->first_done) != NULL) {
if ((host->first_done = tmp->next) == NULL)
host->last_done = NULL;
tmp->next = NULL;
}
#if DEBUG_QUEUE
printk("find done SCB %p; ",tmp);
#endif
return tmp;
}
static int initio_abort_srb(struct initio_host * host, struct scsi_cmnd *srbp)
{
unsigned long flags;
struct scsi_ctrl_blk *tmp, *prev;
spin_lock_irqsave(&host->semaph_lock, flags);
if ((host->semaph == 0) && (host->active == NULL)) {
/* disable Jasmin SCSI Int */
outb(0x1F, host->addr + TUL_Mask);
spin_unlock_irqrestore(&host->semaph_lock, flags);
/* FIXME: synchronize_irq needed ? */
tulip_main(host);
spin_lock_irqsave(&host->semaph_lock, flags);
host->semaph = 1;
outb(0x0F, host->addr + TUL_Mask);
spin_unlock_irqrestore(&host->semaph_lock, flags);
return SCSI_ABORT_SNOOZE;
}
prev = tmp = host->first_pending; /* Check Pend queue */
while (tmp != NULL) {
/* 07/27/98 */
if (tmp->srb == srbp) {
if (tmp == host->active) {
spin_unlock_irqrestore(&host->semaph_lock, flags);
return SCSI_ABORT_BUSY;
} else if (tmp == host->first_pending) {
if ((host->first_pending = tmp->next) == NULL)
host->last_pending = NULL;
} else {
prev->next = tmp->next;
if (tmp == host->last_pending)
host->last_pending = prev;
}
tmp->hastat = HOST_ABORTED;
tmp->flags |= SCF_DONE;
if (tmp->flags & SCF_POST)
(*tmp->post) ((u8 *) host, (u8 *) tmp);
spin_unlock_irqrestore(&host->semaph_lock, flags);
return SCSI_ABORT_SUCCESS;
}
prev = tmp;
tmp = tmp->next;
}
prev = tmp = host->first_busy; /* Check Busy queue */
while (tmp != NULL) {
if (tmp->srb == srbp) {
if (tmp == host->active) {
spin_unlock_irqrestore(&host->semaph_lock, flags);
return SCSI_ABORT_BUSY;
} else if (tmp->tagmsg == 0) {
spin_unlock_irqrestore(&host->semaph_lock, flags);
return SCSI_ABORT_BUSY;
} else {
host->act_tags[tmp->target]--;
if (tmp == host->first_busy) {
if ((host->first_busy = tmp->next) == NULL)
host->last_busy = NULL;
} else {
prev->next = tmp->next;
if (tmp == host->last_busy)
host->last_busy = prev;
}
tmp->next = NULL;
tmp->hastat = HOST_ABORTED;
tmp->flags |= SCF_DONE;
if (tmp->flags & SCF_POST)
(*tmp->post) ((u8 *) host, (u8 *) tmp);
spin_unlock_irqrestore(&host->semaph_lock, flags);
return SCSI_ABORT_SUCCESS;
}
}
prev = tmp;
tmp = tmp->next;
}
spin_unlock_irqrestore(&host->semaph_lock, flags);
return SCSI_ABORT_NOT_RUNNING;
}
/***************************************************************************/
static int initio_bad_seq(struct initio_host * host)
{
struct scsi_ctrl_blk *scb;
printk("initio_bad_seg c=%d\n", host->index);
if ((scb = host->active) != NULL) {
initio_unlink_busy_scb(host, scb);
scb->hastat = HOST_BAD_PHAS;
scb->tastat = 0;
initio_append_done_scb(host, scb);
}
initio_stop_bm(host);
initio_reset_scsi(host, 8); /* 7/29/98 */
return initio_post_scsi_rst(host);
}
/************************************************************************/
static void initio_exec_scb(struct initio_host * host, struct scsi_ctrl_blk * scb)
{
unsigned long flags;
scb->mode = 0;
scb->sgidx = 0;
scb->sgmax = scb->sglen;
spin_lock_irqsave(&host->semaph_lock, flags);
initio_append_pend_scb(host, scb); /* Append this SCB to Pending queue */
/* VVVVV 07/21/98 */
if (host->semaph == 1) {
/* Disable Jasmin SCSI Int */
outb(0x1F, host->addr + TUL_Mask);
host->semaph = 0;
spin_unlock_irqrestore(&host->semaph_lock, flags);
tulip_main(host);
spin_lock_irqsave(&host->semaph_lock, flags);
host->semaph = 1;
outb(0x0F, host->addr + TUL_Mask);
}
spin_unlock_irqrestore(&host->semaph_lock, flags);
return;
}
/***************************************************************************/
static int initio_isr(struct initio_host * host)
{
if (inb(host->addr + TUL_Int) & TSS_INT_PENDING) {
if (host->semaph == 1) {
outb(0x1F, host->addr + TUL_Mask);
/* Disable Tulip SCSI Int */
host->semaph = 0;
tulip_main(host);
host->semaph = 1;
outb(0x0F, host->addr + TUL_Mask);
return 1;
}
}
return 0;
}
static int tulip_main(struct initio_host * host)
{
struct scsi_ctrl_blk *scb;
for (;;) {
tulip_scsi(host); /* Call tulip_scsi */
/* Walk the list of completed SCBs */
while ((scb = initio_find_done_scb(host)) != NULL) { /* find done entry */
if (scb->tastat == INI_QUEUE_FULL) {
host->max_tags[scb->target] =
host->act_tags[scb->target] - 1;
scb->tastat = 0;
initio_append_pend_scb(host, scb);
continue;
}
if (!(scb->mode & SCM_RSENS)) { /* not in auto req. sense mode */
if (scb->tastat == 2) {
/* clr sync. nego flag */
if (scb->flags & SCF_SENSE) {
u8 len;
len = scb->senselen;
if (len == 0)
len = 1;
scb->buflen = scb->senselen;
scb->bufptr = scb->senseptr;
scb->flags &= ~(SCF_SG | SCF_DIR); /* for xfer_data_in */
/* so, we won't report wrong direction in xfer_data_in,
and won't report HOST_DO_DU in state_6 */
scb->mode = SCM_RSENS;
scb->ident &= 0xBF; /* Disable Disconnect */
scb->tagmsg = 0;
scb->tastat = 0;
scb->cdblen = 6;
scb->cdb[0] = SCSICMD_RequestSense;
scb->cdb[1] = 0;
scb->cdb[2] = 0;
scb->cdb[3] = 0;
scb->cdb[4] = len;
scb->cdb[5] = 0;
initio_push_pend_scb(host, scb);
break;
}
}
} else { /* in request sense mode */
if (scb->tastat == 2) { /* check contition status again after sending
requset sense cmd 0x3 */
scb->hastat = HOST_BAD_PHAS;
}
scb->tastat = 2;
}
scb->flags |= SCF_DONE;
if (scb->flags & SCF_POST) {
/* FIXME: only one post method and lose casts */
(*scb->post) ((u8 *) host, (u8 *) scb);
}
} /* while */
/* find_active: */
if (inb(host->addr + TUL_SStatus0) & TSS_INT_PENDING)
continue;
if (host->active) /* return to OS and wait for xfer_done_ISR/Selected_ISR */
return 1; /* return to OS, enable interrupt */
/* Check pending SCB */
if (initio_find_first_pend_scb(host) == NULL)
return 1; /* return to OS, enable interrupt */
} /* End of for loop */
/* statement won't reach here */
}
static void tulip_scsi(struct initio_host * host)
{
struct scsi_ctrl_blk *scb;
struct target_control *active_tc;
/* make sure to service interrupt asap */
if ((host->jsstatus0 = inb(host->addr + TUL_SStatus0)) & TSS_INT_PENDING) {
host->phase = host->jsstatus0 & TSS_PH_MASK;
host->jsstatus1 = inb(host->addr + TUL_SStatus1);
host->jsint = inb(host->addr + TUL_SInt);
if (host->jsint & TSS_SCSIRST_INT) { /* SCSI bus reset detected */
int_initio_scsi_rst(host);
return;
}
if (host->jsint & TSS_RESEL_INT) { /* if selected/reselected interrupt */
if (int_initio_resel(host) == 0)
initio_next_state(host);
return;
}
if (host->jsint & TSS_SEL_TIMEOUT) {
int_initio_busfree(host);
return;
}
if (host->jsint & TSS_DISC_INT) { /* BUS disconnection */
int_initio_busfree(host); /* unexpected bus free or sel timeout */
return;
}
if (host->jsint & (TSS_FUNC_COMP | TSS_BUS_SERV)) { /* func complete or Bus service */
if ((scb = host->active) != NULL)
initio_next_state(host);
return;
}
}
if (host->active != NULL)
return;
if ((scb = initio_find_first_pend_scb(host)) == NULL)
return;
/* program HBA's SCSI ID & target SCSI ID */
outb((host->scsi_id << 4) | (scb->target & 0x0F),
host->addr + TUL_SScsiId);
if (scb->opcode == ExecSCSI) {
active_tc = &host->targets[scb->target];
if (scb->tagmsg)
active_tc->drv_flags |= TCF_DRV_EN_TAG;
else
active_tc->drv_flags &= ~TCF_DRV_EN_TAG;
outb(active_tc->js_period, host->addr + TUL_SPeriod);
if ((active_tc->flags & (TCF_WDTR_DONE | TCF_NO_WDTR)) == 0) { /* do wdtr negotiation */
initio_select_atn_stop(host, scb);
} else {
if ((active_tc->flags & (TCF_SYNC_DONE | TCF_NO_SYNC_NEGO)) == 0) { /* do sync negotiation */
initio_select_atn_stop(host, scb);
} else {
if (scb->tagmsg)
initio_select_atn3(host, scb);
else
initio_select_atn(host, scb);
}
}
if (scb->flags & SCF_POLL) {
while (wait_tulip(host) != -1) {
if (initio_next_state(host) == -1)
break;
}
}
} else if (scb->opcode == BusDevRst) {
initio_select_atn_stop(host, scb);
scb->next_state = 8;
if (scb->flags & SCF_POLL) {
while (wait_tulip(host) != -1) {
if (initio_next_state(host) == -1)
break;
}
}
} else if (scb->opcode == AbortCmd) {
if (initio_abort_srb(host, scb->srb) != 0) {
initio_unlink_pend_scb(host, scb);
initio_release_scb(host, scb);
} else {
scb->opcode = BusDevRst;
initio_select_atn_stop(host, scb);
scb->next_state = 8;
}
} else {
initio_unlink_pend_scb(host, scb);
scb->hastat = 0x16; /* bad command */
initio_append_done_scb(host, scb);
}
return;
}
/**
* initio_next_state - Next SCSI state
* @host: InitIO host we are processing
*
* Progress the active command block along the state machine
* until we hit a state which we must wait for activity to occur.
*
* Returns zero or a negative code.
*/
static int initio_next_state(struct initio_host * host)
{
int next;
next = host->active->next_state;
for (;;) {
switch (next) {
case 1:
next = initio_state_1(host);
break;
case 2:
next = initio_state_2(host);
break;
case 3:
next = initio_state_3(host);
break;
case 4:
next = initio_state_4(host);
break;
case 5:
next = initio_state_5(host);
break;
case 6:
next = initio_state_6(host);
break;
case 7:
next = initio_state_7(host);
break;
case 8:
return initio_bus_device_reset(host);
default:
return initio_bad_seq(host);
}
if (next <= 0)
return next;
}
}
/**
* initio_state_1 - SCSI state machine
* @host: InitIO host we are controlling
*
* Perform SCSI state processing for Select/Attention/Stop
*/
static int initio_state_1(struct initio_host * host)
{
struct scsi_ctrl_blk *scb = host->active;
struct target_control *active_tc = host->active_tc;
#if DEBUG_STATE
printk("-s1-");
#endif
/* Move the SCB from pending to busy */
initio_unlink_pend_scb(host, scb);
initio_append_busy_scb(host, scb);
outb(active_tc->sconfig0, host->addr + TUL_SConfig );
/* ATN on */
if (host->phase == MSG_OUT) {
outb(TSC_EN_BUS_IN | TSC_HW_RESELECT, host->addr + TUL_SCtrl1);
outb(scb->ident, host->addr + TUL_SFifo);
if (scb->tagmsg) {
outb(scb->tagmsg, host->addr + TUL_SFifo);
outb(scb->tagid, host->addr + TUL_SFifo);
}
if ((active_tc->flags & (TCF_WDTR_DONE | TCF_NO_WDTR)) == 0) {
active_tc->flags |= TCF_WDTR_DONE;
outb(MSG_EXTEND, host->addr + TUL_SFifo);
outb(2, host->addr + TUL_SFifo); /* Extended msg length */
outb(3, host->addr + TUL_SFifo); /* Sync request */
outb(1, host->addr + TUL_SFifo); /* Start from 16 bits */
} else if ((active_tc->flags & (TCF_SYNC_DONE | TCF_NO_SYNC_NEGO)) == 0) {
active_tc->flags |= TCF_SYNC_DONE;
outb(MSG_EXTEND, host->addr + TUL_SFifo);
outb(3, host->addr + TUL_SFifo); /* extended msg length */
outb(1, host->addr + TUL_SFifo); /* sync request */
outb(initio_rate_tbl[active_tc->flags & TCF_SCSI_RATE], host->addr + TUL_SFifo);
outb(MAX_OFFSET, host->addr + TUL_SFifo); /* REQ/ACK offset */
}
outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
if (wait_tulip(host) == -1)
return -1;
}
outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0);
outb((inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7)), host->addr + TUL_SSignal);
/* Into before CDB xfer */
return 3;
}
/**
* initio_state_2 - SCSI state machine
* @host: InitIO host we are controlling
*
* state after selection with attention
* state after selection with attention3
*/
static int initio_state_2(struct initio_host * host)
{
struct scsi_ctrl_blk *scb = host->active;
struct target_control *active_tc = host->active_tc;
#if DEBUG_STATE
printk("-s2-");
#endif
initio_unlink_pend_scb(host, scb);
initio_append_busy_scb(host, scb);
outb(active_tc->sconfig0, host->addr + TUL_SConfig);
if (host->jsstatus1 & TSS_CMD_PH_CMP)
return 4;
outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0);
outb((inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7)), host->addr + TUL_SSignal);
/* Into before CDB xfer */
return 3;
}
/**
* initio_state_3 - SCSI state machine
* @host: InitIO host we are controlling
*
* state before CDB xfer is done
*/
static int initio_state_3(struct initio_host * host)
{
struct scsi_ctrl_blk *scb = host->active;
struct target_control *active_tc = host->active_tc;
int i;
#if DEBUG_STATE
printk("-s3-");
#endif
for (;;) {
switch (host->phase) {
case CMD_OUT: /* Command out phase */
for (i = 0; i < (int) scb->cdblen; i++)
outb(scb->cdb[i], host->addr + TUL_SFifo);
outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
if (wait_tulip(host) == -1)
return -1;
if (host->phase == CMD_OUT)
return initio_bad_seq(host);
return 4;
case MSG_IN: /* Message in phase */
scb->next_state = 3;
if (initio_msgin(host) == -1)
return -1;
break;
case STATUS_IN: /* Status phase */
if (initio_status_msg(host) == -1)
return -1;
break;
case MSG_OUT: /* Message out phase */
if (active_tc->flags & (TCF_SYNC_DONE | TCF_NO_SYNC_NEGO)) {
outb(MSG_NOP, host->addr + TUL_SFifo); /* msg nop */
outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
if (wait_tulip(host) == -1)
return -1;
} else {
active_tc->flags |= TCF_SYNC_DONE;
outb(MSG_EXTEND, host->addr + TUL_SFifo);
outb(3, host->addr + TUL_SFifo); /* ext. msg len */
outb(1, host->addr + TUL_SFifo); /* sync request */
outb(initio_rate_tbl[active_tc->flags & TCF_SCSI_RATE], host->addr + TUL_SFifo);
outb(MAX_OFFSET, host->addr + TUL_SFifo); /* REQ/ACK offset */
outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
if (wait_tulip(host) == -1)
return -1;
outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0);
outb(inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7), host->addr + TUL_SSignal);
}
break;
default:
return initio_bad_seq(host);
}
}
}
/**
* initio_state_4 - SCSI state machine
* @host: InitIO host we are controlling
*
* SCSI state machine. State 4
*/
static int initio_state_4(struct initio_host * host)
{
struct scsi_ctrl_blk *scb = host->active;
#if DEBUG_STATE
printk("-s4-");
#endif
if ((scb->flags & SCF_DIR) == SCF_NO_XF) {
return 6; /* Go to state 6 (After data) */
}
for (;;) {
if (scb->buflen == 0)
return 6;
switch (host->phase) {
case STATUS_IN: /* Status phase */
if ((scb->flags & SCF_DIR) != 0) /* if direction bit set then report data underrun */
scb->hastat = HOST_DO_DU;
if ((initio_status_msg(host)) == -1)
return -1;
break;
case MSG_IN: /* Message in phase */
scb->next_state = 0x4;
if (initio_msgin(host) == -1)
return -1;
break;
case MSG_OUT: /* Message out phase */
if (host->jsstatus0 & TSS_PAR_ERROR) {
scb->buflen = 0;
scb->hastat = HOST_DO_DU;
if (initio_msgout_ide(host) == -1)
return -1;
return 6;
} else {
outb(MSG_NOP, host->addr + TUL_SFifo); /* msg nop */
outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
if (wait_tulip(host) == -1)
return -1;
}
break;
case DATA_IN: /* Data in phase */
return initio_xfer_data_in(host);
case DATA_OUT: /* Data out phase */
return initio_xfer_data_out(host);
default:
return initio_bad_seq(host);
}
}
}
/**
* initio_state_5 - SCSI state machine
* @host: InitIO host we are controlling
*
* State after dma xfer done or phase change before xfer done
*/
static int initio_state_5(struct initio_host * host)
{
struct scsi_ctrl_blk *scb = host->active;
long cnt, xcnt; /* cannot use unsigned !! code: if (xcnt < 0) */
#if DEBUG_STATE
printk("-s5-");
#endif
/*------ get remaining count -------*/
cnt = inl(host->addr + TUL_SCnt0) & 0x0FFFFFF;
if (inb(host->addr + TUL_XCmd) & 0x20) {
/* ----------------------- DATA_IN ----------------------------- */
/* check scsi parity error */
if (host->jsstatus0 & TSS_PAR_ERROR)
scb->hastat = HOST_DO_DU;
if (inb(host->addr + TUL_XStatus) & XPEND) { /* DMA xfer pending, Send STOP */
/* tell Hardware scsi xfer has been terminated */
outb(inb(host->addr + TUL_XCtrl) | 0x80, host->addr + TUL_XCtrl);
/* wait until DMA xfer not pending */
while (inb(host->addr + TUL_XStatus) & XPEND)
cpu_relax();
}
} else {
/*-------- DATA OUT -----------*/
if ((inb(host->addr + TUL_SStatus1) & TSS_XFER_CMP) == 0) {
if (host->active_tc->js_period & TSC_WIDE_SCSI)
cnt += (inb(host->addr + TUL_SFifoCnt) & 0x1F) << 1;
else
cnt += (inb(host->addr + TUL_SFifoCnt) & 0x1F);
}
if (inb(host->addr + TUL_XStatus) & XPEND) { /* if DMA xfer is pending, abort DMA xfer */
outb(TAX_X_ABT, host->addr + TUL_XCmd);
/* wait Abort DMA xfer done */
while ((inb(host->addr + TUL_Int) & XABT) == 0)
cpu_relax();
}
if ((cnt == 1) && (host->phase == DATA_OUT)) {
outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
if (wait_tulip(host) == -1)
return -1;
cnt = 0;
} else {
if ((inb(host->addr + TUL_SStatus1) & TSS_XFER_CMP) == 0)
outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0);
}
}
if (cnt == 0) {
scb->buflen = 0;
return 6; /* After Data */
}
/* Update active data pointer */
xcnt = (long) scb->buflen - cnt; /* xcnt== bytes already xferred */
scb->buflen = (u32) cnt; /* cnt == bytes left to be xferred */
if (scb->flags & SCF_SG) {
struct sg_entry *sgp;
unsigned long i;
sgp = &scb->sglist[scb->sgidx];
for (i = scb->sgidx; i < scb->sgmax; sgp++, i++) {
xcnt -= (long) sgp->len;
if (xcnt < 0) { /* this sgp xfer half done */
xcnt += (long) sgp->len; /* xcnt == bytes xferred in this sgp */
sgp->data += (u32) xcnt; /* new ptr to be xfer */
sgp->len -= (u32) xcnt; /* new len to be xfer */
scb->bufptr += ((u32) (i - scb->sgidx) << 3);
/* new SG table ptr */
scb->sglen = (u8) (scb->sgmax - i);
/* new SG table len */
scb->sgidx = (u16) i;
/* for next disc and come in this loop */
return 4; /* Go to state 4 */
}
/* else (xcnt >= 0 , i.e. this sgp already xferred */
} /* for */
return 6; /* Go to state 6 */
} else {
scb->bufptr += (u32) xcnt;
}
return 4; /* Go to state 4 */
}
/**
* initio_state_6 - SCSI state machine
* @host: InitIO host we are controlling
*
* State after Data phase
*/
static int initio_state_6(struct initio_host * host)
{
struct scsi_ctrl_blk *scb = host->active;
#if DEBUG_STATE
printk("-s6-");
#endif
for (;;) {
switch (host->phase) {
case STATUS_IN: /* Status phase */
if ((initio_status_msg(host)) == -1)
return -1;
break;
case MSG_IN: /* Message in phase */
scb->next_state = 6;
if ((initio_msgin(host)) == -1)
return -1;
break;
case MSG_OUT: /* Message out phase */
outb(MSG_NOP, host->addr + TUL_SFifo); /* msg nop */
outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
if (wait_tulip(host) == -1)
return -1;
break;
case DATA_IN: /* Data in phase */
return initio_xpad_in(host);
case DATA_OUT: /* Data out phase */
return initio_xpad_out(host);
default:
return initio_bad_seq(host);
}
}
}
/**
* initio_state_7 - SCSI state machine
* @host: InitIO host we are controlling
*
*/
int initio_state_7(struct initio_host * host)
{
int cnt, i;
#if DEBUG_STATE
printk("-s7-");
#endif
/* flush SCSI FIFO */
cnt = inb(host->addr + TUL_SFifoCnt) & 0x1F;
if (cnt) {
for (i = 0; i < cnt; i++)
inb(host->addr + TUL_SFifo);
}
switch (host->phase) {
case DATA_IN: /* Data in phase */
case DATA_OUT: /* Data out phase */
return initio_bad_seq(host);
default:
return 6; /* Go to state 6 */
}
}
/**
* initio_xfer_data_in - Commence data input
* @host: InitIO host in use
*
* Commence a block of data transfer. The transfer itself will
* be managed by the controller and we will get a completion (or
* failure) interrupt.
*/
static int initio_xfer_data_in(struct initio_host * host)
{
struct scsi_ctrl_blk *scb = host->active;
if ((scb->flags & SCF_DIR) == SCF_DOUT)
return 6; /* wrong direction */
outl(scb->buflen, host->addr + TUL_SCnt0);
outb(TSC_XF_DMA_IN, host->addr + TUL_SCmd); /* 7/25/95 */
if (scb->flags & SCF_SG) { /* S/G xfer */
outl(((u32) scb->sglen) << 3, host->addr + TUL_XCntH);
outl(scb->bufptr, host->addr + TUL_XAddH);
outb(TAX_SG_IN, host->addr + TUL_XCmd);
} else {
outl(scb->buflen, host->addr + TUL_XCntH);
outl(scb->bufptr, host->addr + TUL_XAddH);
outb(TAX_X_IN, host->addr + TUL_XCmd);
}
scb->next_state = 0x5;
return 0; /* return to OS, wait xfer done , let jas_isr come in */
}
/**
* initio_xfer_data_out - Commence data output
* @host: InitIO host in use
*
* Commence a block of data transfer. The transfer itself will
* be managed by the controller and we will get a completion (or
* failure) interrupt.
*/
static int initio_xfer_data_out(struct initio_host * host)
{
struct scsi_ctrl_blk *scb = host->active;
if ((scb->flags & SCF_DIR) == SCF_DIN)
return 6; /* wrong direction */
outl(scb->buflen, host->addr + TUL_SCnt0);
outb(TSC_XF_DMA_OUT, host->addr + TUL_SCmd);
if (scb->flags & SCF_SG) { /* S/G xfer */
outl(((u32) scb->sglen) << 3, host->addr + TUL_XCntH);
outl(scb->bufptr, host->addr + TUL_XAddH);
outb(TAX_SG_OUT, host->addr + TUL_XCmd);
} else {
outl(scb->buflen, host->addr + TUL_XCntH);
outl(scb->bufptr, host->addr + TUL_XAddH);
outb(TAX_X_OUT, host->addr + TUL_XCmd);
}
scb->next_state = 0x5;
return 0; /* return to OS, wait xfer done , let jas_isr come in */
}
int initio_xpad_in(struct initio_host * host)
{
struct scsi_ctrl_blk *scb = host->active;
struct target_control *active_tc = host->active_tc;
if ((scb->flags & SCF_DIR) != SCF_NO_DCHK)
scb->hastat = HOST_DO_DU; /* over run */
for (;;) {
if (active_tc->js_period & TSC_WIDE_SCSI)
outl(2, host->addr + TUL_SCnt0);
else
outl(1, host->addr + TUL_SCnt0);
outb(TSC_XF_FIFO_IN, host->addr + TUL_SCmd);
if (wait_tulip(host) == -1)
return -1;
if (host->phase != DATA_IN) {
outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0);
return 6;
}
inb(host->addr + TUL_SFifo);
}
}
int initio_xpad_out(struct initio_host * host)
{
struct scsi_ctrl_blk *scb = host->active;
struct target_control *active_tc = host->active_tc;
if ((scb->flags & SCF_DIR) != SCF_NO_DCHK)
scb->hastat = HOST_DO_DU; /* over run */
for (;;) {
if (active_tc->js_period & TSC_WIDE_SCSI)
outl(2, host->addr + TUL_SCnt0);
else
outl(1, host->addr + TUL_SCnt0);
outb(0, host->addr + TUL_SFifo);
outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
if ((wait_tulip(host)) == -1)
return -1;
if (host->phase != DATA_OUT) { /* Disable wide CPU to allow read 16 bits */
outb(TSC_HW_RESELECT, host->addr + TUL_SCtrl1);
outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0);
return 6;
}
}
}
int initio_status_msg(struct initio_host * host)
{ /* status & MSG_IN */
struct scsi_ctrl_blk *scb = host->active;
u8 msg;
outb(TSC_CMD_COMP, host->addr + TUL_SCmd);
if (wait_tulip(host) == -1)
return -1;
/* get status */
scb->tastat = inb(host->addr + TUL_SFifo);
if (host->phase == MSG_OUT) {
if (host->jsstatus0 & TSS_PAR_ERROR)
outb(MSG_PARITY, host->addr + TUL_SFifo);
else
outb(MSG_NOP, host->addr + TUL_SFifo);
outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
return wait_tulip(host);
}
if (host->phase == MSG_IN) {
msg = inb(host->addr + TUL_SFifo);
if (host->jsstatus0 & TSS_PAR_ERROR) { /* Parity error */
if ((initio_msgin_accept(host)) == -1)
return -1;
if (host->phase != MSG_OUT)
return initio_bad_seq(host);
outb(MSG_PARITY, host->addr + TUL_SFifo);
outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
return wait_tulip(host);
}
if (msg == 0) { /* Command complete */
if ((scb->tastat & 0x18) == 0x10) /* No link support */
return initio_bad_seq(host);
outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0);
outb(TSC_MSG_ACCEPT, host->addr + TUL_SCmd);
return initio_wait_done_disc(host);
}
if (msg == MSG_LINK_COMP || msg == MSG_LINK_FLAG) {
if ((scb->tastat & 0x18) == 0x10)
return initio_msgin_accept(host);
}
}
return initio_bad_seq(host);
}
/* scsi bus free */
int int_initio_busfree(struct initio_host * host)
{
struct scsi_ctrl_blk *scb = host->active;
if (scb != NULL) {
if (scb->status & SCB_SELECT) { /* selection timeout */
initio_unlink_pend_scb(host, scb);
scb->hastat = HOST_SEL_TOUT;
initio_append_done_scb(host, scb);
} else { /* Unexpected bus free */
initio_unlink_busy_scb(host, scb);
scb->hastat = HOST_BUS_FREE;
initio_append_done_scb(host, scb);
}
host->active = NULL;
host->active_tc = NULL;
}
outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0); /* Flush SCSI FIFO */
outb(TSC_INITDEFAULT, host->addr + TUL_SConfig);
outb(TSC_HW_RESELECT, host->addr + TUL_SCtrl1); /* Enable HW reselect */
return -1;
}
/**
* int_initio_scsi_rst - SCSI reset occurred
* @host: Host seeing the reset
*
* A SCSI bus reset has occurred. Clean up any pending transfer
* the hardware is doing by DMA and then abort all active and
* disconnected commands. The mid layer should sort the rest out
* for us
*/
static int int_initio_scsi_rst(struct initio_host * host)
{
struct scsi_ctrl_blk *scb;
int i;
/* if DMA xfer is pending, abort DMA xfer */
if (inb(host->addr + TUL_XStatus) & 0x01) {
outb(TAX_X_ABT | TAX_X_CLR_FIFO, host->addr + TUL_XCmd);
/* wait Abort DMA xfer done */
while ((inb(host->addr + TUL_Int) & 0x04) == 0)
cpu_relax();
outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0);
}
/* Abort all active & disconnected scb */
while ((scb = initio_pop_busy_scb(host)) != NULL) {
scb->hastat = HOST_BAD_PHAS;
initio_append_done_scb(host, scb);
}
host->active = NULL;
host->active_tc = NULL;
/* clr sync nego. done flag */
for (i = 0; i < host->max_tar; i++)
host->targets[i].flags &= ~(TCF_SYNC_DONE | TCF_WDTR_DONE);
return -1;
}
/**
* int_initio_scsi_resel - Reselection occurred
* @host: InitIO host adapter
*
* A SCSI reselection event has been signalled and the interrupt
* is now being processed. Work out which command block needs attention
* and continue processing that command.
*/
int int_initio_resel(struct initio_host * host)
{
struct scsi_ctrl_blk *scb;
struct target_control *active_tc;
u8 tag, msg = 0;
u8 tar, lun;
if ((scb = host->active) != NULL) {
/* FIXME: Why check and not just clear ? */
if (scb->status & SCB_SELECT) /* if waiting for selection complete */
scb->status &= ~SCB_SELECT;
host->active = NULL;
}
/* --------- get target id---------------------- */
tar = inb(host->addr + TUL_SBusId);
/* ------ get LUN from Identify message----------- */
lun = inb(host->addr + TUL_SIdent) & 0x0F;
/* 07/22/98 from 0x1F -> 0x0F */
active_tc = &host->targets[tar];
host->active_tc = active_tc;
outb(active_tc->sconfig0, host->addr + TUL_SConfig);
outb(active_tc->js_period, host->addr + TUL_SPeriod);
/* ------------- tag queueing ? ------------------- */
if (active_tc->drv_flags & TCF_DRV_EN_TAG) {
if ((initio_msgin_accept(host)) == -1)
return -1;
if (host->phase != MSG_IN)
goto no_tag;
outl(1, host->addr + TUL_SCnt0);
outb(TSC_XF_FIFO_IN, host->addr + TUL_SCmd);
if (wait_tulip(host) == -1)
return -1;
msg = inb(host->addr + TUL_SFifo); /* Read Tag Message */
if (msg < MSG_STAG || msg > MSG_OTAG) /* Is simple Tag */
goto no_tag;
if (initio_msgin_accept(host) == -1)
return -1;
if (host->phase != MSG_IN)
goto no_tag;
outl(1, host->addr + TUL_SCnt0);
outb(TSC_XF_FIFO_IN, host->addr + TUL_SCmd);
if (wait_tulip(host) == -1)
return -1;
tag = inb(host->addr + TUL_SFifo); /* Read Tag ID */
scb = host->scb + tag;
if (scb->target != tar || scb->lun != lun) {
return initio_msgout_abort_tag(host);
}
if (scb->status != SCB_BUSY) { /* 03/24/95 */
return initio_msgout_abort_tag(host);
}
host->active = scb;
if ((initio_msgin_accept(host)) == -1)
return -1;
} else { /* No tag */
no_tag:
if ((scb = initio_find_busy_scb(host, tar | (lun << 8))) == NULL) {
return initio_msgout_abort_targ(host);
}
host->active = scb;
if (!(active_tc->drv_flags & TCF_DRV_EN_TAG)) {
if ((initio_msgin_accept(host)) == -1)
return -1;
}
}
return 0;
}
/**
* int_initio_bad_seq - out of phase
* @host: InitIO host flagging event
*
* We have ended up out of phase somehow. Reset the host controller
* and throw all our toys out of the pram. Let the midlayer clean up
*/
static int int_initio_bad_seq(struct initio_host * host)
{ /* target wrong phase */
struct scsi_ctrl_blk *scb;
int i;
initio_reset_scsi(host, 10);
while ((scb = initio_pop_busy_scb(host)) != NULL) {
scb->hastat = HOST_BAD_PHAS;
initio_append_done_scb(host, scb);
}
for (i = 0; i < host->max_tar; i++)
host->targets[i].flags &= ~(TCF_SYNC_DONE | TCF_WDTR_DONE);
return -1;
}
/**
* initio_msgout_abort_targ - abort a tag
* @host: InitIO host
*
* Abort when the target/lun does not match or when our SCB is not
* busy. Used by untagged commands.
*/
static int initio_msgout_abort_targ(struct initio_host * host)
{
outb(((inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7)) | TSC_SET_ATN), host->addr + TUL_SSignal);
if (initio_msgin_accept(host) == -1)
return -1;
if (host->phase != MSG_OUT)
return initio_bad_seq(host);
outb(MSG_ABORT, host->addr + TUL_SFifo);
outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
return initio_wait_disc(host);
}
/**
* initio_msgout_abort_tag - abort a tag
* @host: InitIO host
*
* Abort when the target/lun does not match or when our SCB is not
* busy. Used for tagged commands.
*/
static int initio_msgout_abort_tag(struct initio_host * host)
{
outb(((inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7)) | TSC_SET_ATN), host->addr + TUL_SSignal);
if (initio_msgin_accept(host) == -1)
return -1;
if (host->phase != MSG_OUT)
return initio_bad_seq(host);
outb(MSG_ABORT_TAG, host->addr + TUL_SFifo);
outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
return initio_wait_disc(host);
}
/**
* initio_msgin - Message in
* @host: InitIO Host
*
* Process incoming message
*/
static int initio_msgin(struct initio_host * host)
{
struct target_control *active_tc;
for (;;) {
outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0);
outl(1, host->addr + TUL_SCnt0);
outb(TSC_XF_FIFO_IN, host->addr + TUL_SCmd);
if (wait_tulip(host) == -1)
return -1;
switch (inb(host->addr + TUL_SFifo)) {
case MSG_DISC: /* Disconnect msg */
outb(TSC_MSG_ACCEPT, host->addr + TUL_SCmd);
return initio_wait_disc(host);
case MSG_SDP:
case MSG_RESTORE:
case MSG_NOP:
initio_msgin_accept(host);
break;
case MSG_REJ: /* Clear ATN first */
outb((inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7)),
host->addr + TUL_SSignal);
active_tc = host->active_tc;
if ((active_tc->flags & (TCF_SYNC_DONE | TCF_NO_SYNC_NEGO)) == 0) /* do sync nego */
outb(((inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7)) | TSC_SET_ATN),
host->addr + TUL_SSignal);
initio_msgin_accept(host);
break;
case MSG_EXTEND: /* extended msg */
initio_msgin_extend(host);
break;
case MSG_IGNOREWIDE:
initio_msgin_accept(host);
break;
case MSG_COMP:
outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0);
outb(TSC_MSG_ACCEPT, host->addr + TUL_SCmd);
return initio_wait_done_disc(host);
default:
initio_msgout_reject(host);
break;
}
if (host->phase != MSG_IN)
return host->phase;
}
/* statement won't reach here */
}
static int initio_msgout_reject(struct initio_host * host)
{
outb(((inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7)) | TSC_SET_ATN), host->addr + TUL_SSignal);
if (initio_msgin_accept(host) == -1)
return -1;
if (host->phase == MSG_OUT) {
outb(MSG_REJ, host->addr + TUL_SFifo); /* Msg reject */
outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
return wait_tulip(host);
}
return host->phase;
}
static int initio_msgout_ide(struct initio_host * host)
{
outb(MSG_IDE, host->addr + TUL_SFifo); /* Initiator Detected Error */
outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
return wait_tulip(host);
}
static int initio_msgin_extend(struct initio_host * host)
{
u8 len, idx;
if (initio_msgin_accept(host) != MSG_IN)
return host->phase;
/* Get extended msg length */
outl(1, host->addr + TUL_SCnt0);
outb(TSC_XF_FIFO_IN, host->addr + TUL_SCmd);
if (wait_tulip(host) == -1)
return -1;
len = inb(host->addr + TUL_SFifo);
host->msg[0] = len;
for (idx = 1; len != 0; len--) {
if ((initio_msgin_accept(host)) != MSG_IN)
return host->phase;
outl(1, host->addr + TUL_SCnt0);
outb(TSC_XF_FIFO_IN, host->addr + TUL_SCmd);
if (wait_tulip(host) == -1)
return -1;
host->msg[idx++] = inb(host->addr + TUL_SFifo);
}
if (host->msg[1] == 1) { /* if it's synchronous data transfer request */
u8 r;
if (host->msg[0] != 3) /* if length is not right */
return initio_msgout_reject(host);
if (host->active_tc->flags & TCF_NO_SYNC_NEGO) { /* Set OFFSET=0 to do async, nego back */
host->msg[3] = 0;
} else {
if (initio_msgin_sync(host) == 0 &&
(host->active_tc->flags & TCF_SYNC_DONE)) {
initio_sync_done(host);
return initio_msgin_accept(host);
}
}
r = inb(host->addr + TUL_SSignal);
outb((r & (TSC_SET_ACK | 7)) | TSC_SET_ATN,
host->addr + TUL_SSignal);
if (initio_msgin_accept(host) != MSG_OUT)
return host->phase;
/* sync msg out */
outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0);
initio_sync_done(host);
outb(MSG_EXTEND, host->addr + TUL_SFifo);
outb(3, host->addr + TUL_SFifo);
outb(1, host->addr + TUL_SFifo);
outb(host->msg[2], host->addr + TUL_SFifo);
outb(host->msg[3], host->addr + TUL_SFifo);
outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
return wait_tulip(host);
}
if (host->msg[0] != 2 || host->msg[1] != 3)
return initio_msgout_reject(host);
/* if it's WIDE DATA XFER REQ */
if (host->active_tc->flags & TCF_NO_WDTR) {
host->msg[2] = 0;
} else {
if (host->msg[2] > 2) /* > 32 bits */
return initio_msgout_reject(host);
if (host->msg[2] == 2) { /* == 32 */
host->msg[2] = 1;
} else {
if ((host->active_tc->flags & TCF_NO_WDTR) == 0) {
wdtr_done(host);
if ((host->active_tc->flags & (TCF_SYNC_DONE | TCF_NO_SYNC_NEGO)) == 0)
outb(((inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7)) | TSC_SET_ATN), host->addr + TUL_SSignal);
return initio_msgin_accept(host);
}
}
}
outb(((inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7)) | TSC_SET_ATN), host->addr + TUL_SSignal);
if (initio_msgin_accept(host) != MSG_OUT)
return host->phase;
/* WDTR msg out */
outb(MSG_EXTEND, host->addr + TUL_SFifo);
outb(2, host->addr + TUL_SFifo);
outb(3, host->addr + TUL_SFifo);
outb(host->msg[2], host->addr + TUL_SFifo);
outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
return wait_tulip(host);
}
static int initio_msgin_sync(struct initio_host * host)
{
char default_period;
default_period = initio_rate_tbl[host->active_tc->flags & TCF_SCSI_RATE];
if (host->msg[3] > MAX_OFFSET) {
host->msg[3] = MAX_OFFSET;
if (host->msg[2] < default_period) {
host->msg[2] = default_period;
return 1;
}
if (host->msg[2] >= 59) /* Change to async */
host->msg[3] = 0;
return 1;
}
/* offset requests asynchronous transfers ? */
if (host->msg[3] == 0) {
return 0;
}
if (host->msg[2] < default_period) {
host->msg[2] = default_period;
return 1;
}
if (host->msg[2] >= 59) {
host->msg[3] = 0;
return 1;
}
return 0;
}
static int wdtr_done(struct initio_host * host)
{
host->active_tc->flags &= ~TCF_SYNC_DONE;
host->active_tc->flags |= TCF_WDTR_DONE;
host->active_tc->js_period = 0;
if (host->msg[2]) /* if 16 bit */
host->active_tc->js_period |= TSC_WIDE_SCSI;
host->active_tc->sconfig0 &= ~TSC_ALT_PERIOD;
outb(host->active_tc->sconfig0, host->addr + TUL_SConfig);
outb(host->active_tc->js_period, host->addr + TUL_SPeriod);
return 1;
}
static int initio_sync_done(struct initio_host * host)
{
int i;
host->active_tc->flags |= TCF_SYNC_DONE;
if (host->msg[3]) {
host->active_tc->js_period |= host->msg[3];
for (i = 0; i < 8; i++) {
if (initio_rate_tbl[i] >= host->msg[2]) /* pick the big one */
break;
}
host->active_tc->js_period |= (i << 4);
host->active_tc->sconfig0 |= TSC_ALT_PERIOD;
}
outb(host->active_tc->sconfig0, host->addr + TUL_SConfig);
outb(host->active_tc->js_period, host->addr + TUL_SPeriod);
return -1;
}
static int initio_post_scsi_rst(struct initio_host * host)
{
struct scsi_ctrl_blk *scb;
struct target_control *active_tc;
int i;
host->active = NULL;
host->active_tc = NULL;
host->flags = 0;
while ((scb = initio_pop_busy_scb(host)) != NULL) {
scb->hastat = HOST_BAD_PHAS;
initio_append_done_scb(host, scb);
}
/* clear sync done flag */
active_tc = &host->targets[0];
for (i = 0; i < host->max_tar; active_tc++, i++) {
active_tc->flags &= ~(TCF_SYNC_DONE | TCF_WDTR_DONE);
/* Initialize the sync. xfer register values to an asyn xfer */
active_tc->js_period = 0;
active_tc->sconfig0 = host->sconf1;
host->act_tags[0] = 0; /* 07/22/98 */
host->targets[i].flags &= ~TCF_BUSY; /* 07/22/98 */
} /* for */
return -1;
}
static void initio_select_atn_stop(struct initio_host * host, struct scsi_ctrl_blk * scb)
{
scb->status |= SCB_SELECT;
scb->next_state = 0x1;
host->active = scb;
host->active_tc = &host->targets[scb->target];
outb(TSC_SELATNSTOP, host->addr + TUL_SCmd);
}
static void initio_select_atn(struct initio_host * host, struct scsi_ctrl_blk * scb)
{
int i;
scb->status |= SCB_SELECT;
scb->next_state = 0x2;
outb(scb->ident, host->addr + TUL_SFifo);
for (i = 0; i < (int) scb->cdblen; i++)
outb(scb->cdb[i], host->addr + TUL_SFifo);
host->active_tc = &host->targets[scb->target];
host->active = scb;
outb(TSC_SEL_ATN, host->addr + TUL_SCmd);
}
static void initio_select_atn3(struct initio_host * host, struct scsi_ctrl_blk * scb)
{
int i;
scb->status |= SCB_SELECT;
scb->next_state = 0x2;
outb(scb->ident, host->addr + TUL_SFifo);
outb(scb->tagmsg, host->addr + TUL_SFifo);
outb(scb->tagid, host->addr + TUL_SFifo);
for (i = 0; i < scb->cdblen; i++)
outb(scb->cdb[i], host->addr + TUL_SFifo);
host->active_tc = &host->targets[scb->target];
host->active = scb;
outb(TSC_SEL_ATN3, host->addr + TUL_SCmd);
}
/**
* initio_bus_device_reset - SCSI Bus Device Reset
* @host: InitIO host to reset
*
* Perform a device reset and abort all pending SCBs for the
* victim device
*/
int initio_bus_device_reset(struct initio_host * host)
{
struct scsi_ctrl_blk *scb = host->active;
struct target_control *active_tc = host->active_tc;
struct scsi_ctrl_blk *tmp, *prev;
u8 tar;
if (host->phase != MSG_OUT)
return int_initio_bad_seq(host); /* Unexpected phase */
initio_unlink_pend_scb(host, scb);
initio_release_scb(host, scb);
tar = scb->target; /* target */
active_tc->flags &= ~(TCF_SYNC_DONE | TCF_WDTR_DONE | TCF_BUSY);
/* clr sync. nego & WDTR flags 07/22/98 */
/* abort all SCB with same target */
prev = tmp = host->first_busy; /* Check Busy queue */
while (tmp != NULL) {
if (tmp->target == tar) {
/* unlink it */
if (tmp == host->first_busy) {
if ((host->first_busy = tmp->next) == NULL)
host->last_busy = NULL;
} else {
prev->next = tmp->next;
if (tmp == host->last_busy)
host->last_busy = prev;
}
tmp->hastat = HOST_ABORTED;
initio_append_done_scb(host, tmp);
}
/* Previous haven't change */
else {
prev = tmp;
}
tmp = tmp->next;
}
outb(MSG_DEVRST, host->addr + TUL_SFifo);
outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
return initio_wait_disc(host);
}
static int initio_msgin_accept(struct initio_host * host)
{
outb(TSC_MSG_ACCEPT, host->addr + TUL_SCmd);
return wait_tulip(host);
}
static int wait_tulip(struct initio_host * host)
{
while (!((host->jsstatus0 = inb(host->addr + TUL_SStatus0))
& TSS_INT_PENDING))
cpu_relax();
host->jsint = inb(host->addr + TUL_SInt);
host->phase = host->jsstatus0 & TSS_PH_MASK;
host->jsstatus1 = inb(host->addr + TUL_SStatus1);
if (host->jsint & TSS_RESEL_INT) /* if SCSI bus reset detected */
return int_initio_resel(host);
if (host->jsint & TSS_SEL_TIMEOUT) /* if selected/reselected timeout interrupt */
return int_initio_busfree(host);
if (host->jsint & TSS_SCSIRST_INT) /* if SCSI bus reset detected */
return int_initio_scsi_rst(host);
if (host->jsint & TSS_DISC_INT) { /* BUS disconnection */
if (host->flags & HCF_EXPECT_DONE_DISC) {
outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0); /* Flush SCSI FIFO */
initio_unlink_busy_scb(host, host->active);
host->active->hastat = 0;
initio_append_done_scb(host, host->active);
host->active = NULL;
host->active_tc = NULL;
host->flags &= ~HCF_EXPECT_DONE_DISC;
outb(TSC_INITDEFAULT, host->addr + TUL_SConfig);
outb(TSC_HW_RESELECT, host->addr + TUL_SCtrl1); /* Enable HW reselect */
return -1;
}
if (host->flags & HCF_EXPECT_DISC) {
outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0); /* Flush SCSI FIFO */
host->active = NULL;
host->active_tc = NULL;
host->flags &= ~HCF_EXPECT_DISC;
outb(TSC_INITDEFAULT, host->addr + TUL_SConfig);
outb(TSC_HW_RESELECT, host->addr + TUL_SCtrl1); /* Enable HW reselect */
return -1;
}
return int_initio_busfree(host);
}
/* The old code really does the below. Can probably be removed */
if (host->jsint & (TSS_FUNC_COMP | TSS_BUS_SERV))
return host->phase;
return host->phase;
}
static int initio_wait_disc(struct initio_host * host)
{
while (!((host->jsstatus0 = inb(host->addr + TUL_SStatus0)) & TSS_INT_PENDING))
cpu_relax();
host->jsint = inb(host->addr + TUL_SInt);
if (host->jsint & TSS_SCSIRST_INT) /* if SCSI bus reset detected */
return int_initio_scsi_rst(host);
if (host->jsint & TSS_DISC_INT) { /* BUS disconnection */
outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0); /* Flush SCSI FIFO */
outb(TSC_INITDEFAULT, host->addr + TUL_SConfig);
outb(TSC_HW_RESELECT, host->addr + TUL_SCtrl1); /* Enable HW reselect */
host->active = NULL;
return -1;
}
return initio_bad_seq(host);
}
static int initio_wait_done_disc(struct initio_host * host)
{
while (!((host->jsstatus0 = inb(host->addr + TUL_SStatus0))
& TSS_INT_PENDING))
cpu_relax();
host->jsint = inb(host->addr + TUL_SInt);
if (host->jsint & TSS_SCSIRST_INT) /* if SCSI bus reset detected */
return int_initio_scsi_rst(host);
if (host->jsint & TSS_DISC_INT) { /* BUS disconnection */
outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0); /* Flush SCSI FIFO */
outb(TSC_INITDEFAULT, host->addr + TUL_SConfig);
outb(TSC_HW_RESELECT, host->addr + TUL_SCtrl1); /* Enable HW reselect */
initio_unlink_busy_scb(host, host->active);
initio_append_done_scb(host, host->active);
host->active = NULL;
return -1;
}
return initio_bad_seq(host);
}
/**
* i91u_intr - IRQ handler
* @irqno: IRQ number
* @dev_id: IRQ identifier
*
* Take the relevant locks and then invoke the actual isr processing
* code under the lock.
*/
static irqreturn_t i91u_intr(int irqno, void *dev_id)
{
struct Scsi_Host *dev = dev_id;
unsigned long flags;
int r;
spin_lock_irqsave(dev->host_lock, flags);
r = initio_isr((struct initio_host *)dev->hostdata);
spin_unlock_irqrestore(dev->host_lock, flags);
if (r)
return IRQ_HANDLED;
else
return IRQ_NONE;
}
/**
* initio_build_scb - Build the mappings and SCB
* @host: InitIO host taking the command
* @cblk: Firmware command block
* @cmnd: SCSI midlayer command block
*
* Translate the abstract SCSI command into a firmware command block
* suitable for feeding to the InitIO host controller. This also requires
* we build the scatter gather lists and ensure they are mapped properly.
*/
static void initio_build_scb(struct initio_host * host, struct scsi_ctrl_blk * cblk, struct scsi_cmnd * cmnd)
{ /* Create corresponding SCB */
struct scatterlist *sglist;
struct sg_entry *sg; /* Pointer to SG list */
int i, nseg;
long total_len;
dma_addr_t dma_addr;
/* Fill in the command headers */
cblk->post = i91uSCBPost; /* i91u's callback routine */
cblk->srb = cmnd;
cblk->opcode = ExecSCSI;
cblk->flags = SCF_POST; /* After SCSI done, call post routine */
cblk->target = cmnd->device->id;
cblk->lun = cmnd->device->lun;
cblk->ident = cmnd->device->lun | DISC_ALLOW;
cblk->flags |= SCF_SENSE; /* Turn on auto request sense */
/* Map the sense buffer into bus memory */
dma_addr = dma_map_single(&host->pci_dev->dev, cmnd->sense_buffer,
SENSE_SIZE, DMA_FROM_DEVICE);
cblk->senseptr = (u32)dma_addr;
cblk->senselen = SENSE_SIZE;
cmnd->SCp.ptr = (char *)(unsigned long)dma_addr;
cblk->cdblen = cmnd->cmd_len;
/* Clear the returned status */
cblk->hastat = 0;
cblk->tastat = 0;
/* Command the command */
memcpy(cblk->cdb, cmnd->cmnd, cmnd->cmd_len);
/* Set up tags */
if (cmnd->device->tagged_supported) { /* Tag Support */
cblk->tagmsg = SIMPLE_QUEUE_TAG; /* Do simple tag only */
} else {
cblk->tagmsg = 0; /* No tag support */
}
/* todo handle map_sg error */
nseg = scsi_dma_map(cmnd);
BUG_ON(nseg < 0);
if (nseg) {
dma_addr = dma_map_single(&host->pci_dev->dev, &cblk->sglist[0],
sizeof(struct sg_entry) * TOTAL_SG_ENTRY,
DMA_BIDIRECTIONAL);
cblk->bufptr = (u32)dma_addr;
cmnd->SCp.dma_handle = dma_addr;
cblk->sglen = nseg;
cblk->flags |= SCF_SG; /* Turn on SG list flag */
total_len = 0;
sg = &cblk->sglist[0];
scsi_for_each_sg(cmnd, sglist, cblk->sglen, i) {
sg->data = cpu_to_le32((u32)sg_dma_address(sglist));
sg->len = cpu_to_le32((u32)sg_dma_len(sglist));
total_len += sg_dma_len(sglist);
++sg;
}
cblk->buflen = (scsi_bufflen(cmnd) > total_len) ?
total_len : scsi_bufflen(cmnd);
} else { /* No data transfer required */
cblk->buflen = 0;
cblk->sglen = 0;
}
}
/**
* i91u_queuecommand - Queue a new command if possible
* @cmd: SCSI command block from the mid layer
* @done: Completion handler
*
* Attempts to queue a new command with the host adapter. Will return
* zero if successful or indicate a host busy condition if not (which
* will cause the mid layer to call us again later with the command)
*/
static int i91u_queuecommand_lck(struct scsi_cmnd *cmd,
void (*done)(struct scsi_cmnd *))
{
struct initio_host *host = (struct initio_host *) cmd->device->host->hostdata;
struct scsi_ctrl_blk *cmnd;
cmd->scsi_done = done;
cmnd = initio_alloc_scb(host);
if (!cmnd)
return SCSI_MLQUEUE_HOST_BUSY;
initio_build_scb(host, cmnd, cmd);
initio_exec_scb(host, cmnd);
return 0;
}
static DEF_SCSI_QCMD(i91u_queuecommand)
/**
* i91u_bus_reset - reset the SCSI bus
* @cmnd: Command block we want to trigger the reset for
*
* Initiate a SCSI bus reset sequence
*/
static int i91u_bus_reset(struct scsi_cmnd * cmnd)
{
struct initio_host *host;
host = (struct initio_host *) cmnd->device->host->hostdata;
spin_lock_irq(cmnd->device->host->host_lock);
initio_reset_scsi(host, 0);
spin_unlock_irq(cmnd->device->host->host_lock);
return SUCCESS;
}
/**
* i91u_biospararm - return the "logical geometry
* @sdev: SCSI device
* @dev; Matching block device
* @capacity: Sector size of drive
* @info_array: Return space for BIOS geometry
*
* Map the device geometry in a manner compatible with the host
* controller BIOS behaviour.
*
* FIXME: limited to 2^32 sector devices.
*/
static int i91u_biosparam(struct scsi_device *sdev, struct block_device *dev,
sector_t capacity, int *info_array)
{
struct initio_host *host; /* Point to Host adapter control block */
struct target_control *tc;
host = (struct initio_host *) sdev->host->hostdata;
tc = &host->targets[sdev->id];
if (tc->heads) {
info_array[0] = tc->heads;
info_array[1] = tc->sectors;
info_array[2] = (unsigned long)capacity / tc->heads / tc->sectors;
} else {
if (tc->drv_flags & TCF_DRV_255_63) {
info_array[0] = 255;
info_array[1] = 63;
info_array[2] = (unsigned long)capacity / 255 / 63;
} else {
info_array[0] = 64;
info_array[1] = 32;
info_array[2] = (unsigned long)capacity >> 11;
}
}
#if defined(DEBUG_BIOSPARAM)
if (i91u_debug & debug_biosparam) {
printk("bios geometry: head=%d, sec=%d, cyl=%d\n",
info_array[0], info_array[1], info_array[2]);
printk("WARNING: check, if the bios geometry is correct.\n");
}
#endif
return 0;
}
/**
* i91u_unmap_scb - Unmap a command
* @pci_dev: PCI device the command is for
* @cmnd: The command itself
*
* Unmap any PCI mapping/IOMMU resources allocated when the command
* was mapped originally as part of initio_build_scb
*/
static void i91u_unmap_scb(struct pci_dev *pci_dev, struct scsi_cmnd *cmnd)
{
/* auto sense buffer */
if (cmnd->SCp.ptr) {
dma_unmap_single(&pci_dev->dev,
(dma_addr_t)((unsigned long)cmnd->SCp.ptr),
SENSE_SIZE, DMA_FROM_DEVICE);
cmnd->SCp.ptr = NULL;
}
/* request buffer */
if (scsi_sg_count(cmnd)) {
dma_unmap_single(&pci_dev->dev, cmnd->SCp.dma_handle,
sizeof(struct sg_entry) * TOTAL_SG_ENTRY,
DMA_BIDIRECTIONAL);
scsi_dma_unmap(cmnd);
}
}
/**
* i91uSCBPost - SCSI callback
* @host: Pointer to host adapter control block.
* @cmnd: Pointer to SCSI control block.
*
* This is callback routine be called when tulip finish one
* SCSI command.
*/
static void i91uSCBPost(u8 * host_mem, u8 * cblk_mem)
{
struct scsi_cmnd *cmnd; /* Pointer to SCSI request block */
struct initio_host *host;
struct scsi_ctrl_blk *cblk;
host = (struct initio_host *) host_mem;
cblk = (struct scsi_ctrl_blk *) cblk_mem;
if ((cmnd = cblk->srb) == NULL) {
printk(KERN_ERR "i91uSCBPost: SRB pointer is empty\n");
WARN_ON(1);
initio_release_scb(host, cblk); /* Release SCB for current channel */
return;
}
/*
* Remap the firmware error status into a mid layer one
*/
switch (cblk->hastat) {
case 0x0:
case 0xa: /* Linked command complete without error and linked normally */
case 0xb: /* Linked command complete without error interrupt generated */
cblk->hastat = 0;
break;
case 0x11: /* Selection time out-The initiator selection or target
reselection was not complete within the SCSI Time out period */
cblk->hastat = DID_TIME_OUT;
break;
case 0x14: /* Target bus phase sequence failure-An invalid bus phase or bus
phase sequence was requested by the target. The host adapter
will generate a SCSI Reset Condition, notifying the host with
a SCRD interrupt */
cblk->hastat = DID_RESET;
break;
case 0x1a: /* SCB Aborted. 07/21/98 */
cblk->hastat = DID_ABORT;
break;
case 0x12: /* Data overrun/underrun-The target attempted to transfer more data
than was allocated by the Data Length field or the sum of the
Scatter / Gather Data Length fields. */
case 0x13: /* Unexpected bus free-The target dropped the SCSI BSY at an unexpected time. */
case 0x16: /* Invalid SCB Operation Code. */
default:
printk("ini9100u: %x %x\n", cblk->hastat, cblk->tastat);
cblk->hastat = DID_ERROR; /* Couldn't find any better */
break;
}
cmnd->result = cblk->tastat | (cblk->hastat << 16);
i91u_unmap_scb(host->pci_dev, cmnd);
cmnd->scsi_done(cmnd); /* Notify system DONE */
initio_release_scb(host, cblk); /* Release SCB for current channel */
}
static struct scsi_host_template initio_template = {
.proc_name = "INI9100U",
.name = "Initio INI-9X00U/UW SCSI device driver",
.queuecommand = i91u_queuecommand,
.eh_bus_reset_handler = i91u_bus_reset,
.bios_param = i91u_biosparam,
.can_queue = MAX_TARGETS * i91u_MAXQUEUE,
.this_id = 1,
.sg_tablesize = SG_ALL,
.cmd_per_lun = 1,
.use_clustering = ENABLE_CLUSTERING,
};
static int initio_probe_one(struct pci_dev *pdev,
const struct pci_device_id *id)
{
struct Scsi_Host *shost;
struct initio_host *host;
u32 reg;
u16 bios_seg;
struct scsi_ctrl_blk *scb, *tmp, *prev = NULL /* silence gcc */;
int num_scb, i, error;
error = pci_enable_device(pdev);
if (error)
return error;
pci_read_config_dword(pdev, 0x44, (u32 *) & reg);
bios_seg = (u16) (reg & 0xFF);
if (((reg & 0xFF00) >> 8) == 0xFF)
reg = 0;
bios_seg = (bios_seg << 8) + ((u16) ((reg & 0xFF00) >> 8));
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
printk(KERN_WARNING "i91u: Could not set 32 bit DMA mask\n");
error = -ENODEV;
goto out_disable_device;
}
shost = scsi_host_alloc(&initio_template, sizeof(struct initio_host));
if (!shost) {
printk(KERN_WARNING "initio: Could not allocate host structure.\n");
error = -ENOMEM;
goto out_disable_device;
}
host = (struct initio_host *)shost->hostdata;
memset(host, 0, sizeof(struct initio_host));
host->addr = pci_resource_start(pdev, 0);
host->bios_addr = bios_seg;
if (!request_region(host->addr, 256, "i91u")) {
printk(KERN_WARNING "initio: I/O port range 0x%x is busy.\n", host->addr);
error = -ENODEV;
goto out_host_put;
}
if (initio_tag_enable) /* 1.01i */
num_scb = MAX_TARGETS * i91u_MAXQUEUE;
else
num_scb = MAX_TARGETS + 3; /* 1-tape, 1-CD_ROM, 1- extra */
for (; num_scb >= MAX_TARGETS + 3; num_scb--) {
i = num_scb * sizeof(struct scsi_ctrl_blk);
if ((scb = kzalloc(i, GFP_DMA)) != NULL)
break;
}
if (!scb) {
printk(KERN_WARNING "initio: Cannot allocate SCB array.\n");
error = -ENOMEM;
goto out_release_region;
}
host->pci_dev = pdev;
host->semaph = 1;
spin_lock_init(&host->semaph_lock);
host->num_scbs = num_scb;
host->scb = scb;
host->next_pending = scb;
host->next_avail = scb;
for (i = 0, tmp = scb; i < num_scb; i++, tmp++) {
tmp->tagid = i;
if (i != 0)
prev->next = tmp;
prev = tmp;
}
prev->next = NULL;
host->scb_end = tmp;
host->first_avail = scb;
host->last_avail = prev;
spin_lock_init(&host->avail_lock);
initio_init(host, phys_to_virt(((u32)bios_seg << 4)));
host->jsstatus0 = 0;
shost->io_port = host->addr;
shost->n_io_port = 0xff;
shost->can_queue = num_scb; /* 03/05/98 */
shost->unique_id = host->addr;
shost->max_id = host->max_tar;
shost->max_lun = 32; /* 10/21/97 */
shost->irq = pdev->irq;
shost->this_id = host->scsi_id; /* Assign HCS index */
shost->base = host->addr;
shost->sg_tablesize = TOTAL_SG_ENTRY;
error = request_irq(pdev->irq, i91u_intr, IRQF_DISABLED|IRQF_SHARED, "i91u", shost);
if (error < 0) {
printk(KERN_WARNING "initio: Unable to request IRQ %d\n", pdev->irq);
goto out_free_scbs;
}
pci_set_drvdata(pdev, shost);
error = scsi_add_host(shost, &pdev->dev);
if (error)
goto out_free_irq;
scsi_scan_host(shost);
return 0;
out_free_irq:
free_irq(pdev->irq, shost);
out_free_scbs:
kfree(host->scb);
out_release_region:
release_region(host->addr, 256);
out_host_put:
scsi_host_put(shost);
out_disable_device:
pci_disable_device(pdev);
return error;
}
/**
* initio_remove_one - control shutdown
* @pdev: PCI device being released
*
* Release the resources assigned to this adapter after it has
* finished being used.
*/
static void initio_remove_one(struct pci_dev *pdev)
{
struct Scsi_Host *host = pci_get_drvdata(pdev);
struct initio_host *s = (struct initio_host *)host->hostdata;
scsi_remove_host(host);
free_irq(pdev->irq, host);
release_region(s->addr, 256);
scsi_host_put(host);
pci_disable_device(pdev);
}
MODULE_LICENSE("GPL");
static struct pci_device_id initio_pci_tbl[] = {
{PCI_VENDOR_ID_INIT, 0x9500, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{PCI_VENDOR_ID_INIT, 0x9400, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{PCI_VENDOR_ID_INIT, 0x9401, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{PCI_VENDOR_ID_INIT, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{PCI_VENDOR_ID_DOMEX, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{0,}
};
MODULE_DEVICE_TABLE(pci, initio_pci_tbl);
static struct pci_driver initio_pci_driver = {
.name = "initio",
.id_table = initio_pci_tbl,
.probe = initio_probe_one,
.remove = initio_remove_one,
};
static int __init initio_init_driver(void)
{
return pci_register_driver(&initio_pci_driver);
}
static void __exit initio_exit_driver(void)
{
pci_unregister_driver(&initio_pci_driver);
}
MODULE_DESCRIPTION("Initio INI-9X00U/UW SCSI device driver");
MODULE_AUTHOR("Initio Corporation");
MODULE_LICENSE("GPL");
module_init(initio_init_driver);
module_exit(initio_exit_driver);
| gpl-2.0 |
icoolguy1995/Elixer-1 | drivers/acpi/sysfs.c | 2969 | 18930 | /*
* sysfs.c - ACPI sysfs interface to userspace.
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/moduleparam.h>
#include <acpi/acpi_drivers.h>
#define _COMPONENT ACPI_SYSTEM_COMPONENT
ACPI_MODULE_NAME("sysfs");
#define PREFIX "ACPI: "
#ifdef CONFIG_ACPI_DEBUG
/*
* ACPI debug sysfs I/F, including:
* /sys/modules/acpi/parameters/debug_layer
* /sys/modules/acpi/parameters/debug_level
* /sys/modules/acpi/parameters/trace_method_name
* /sys/modules/acpi/parameters/trace_state
* /sys/modules/acpi/parameters/trace_debug_layer
* /sys/modules/acpi/parameters/trace_debug_level
*/
struct acpi_dlayer {
const char *name;
unsigned long value;
};
struct acpi_dlevel {
const char *name;
unsigned long value;
};
#define ACPI_DEBUG_INIT(v) { .name = #v, .value = v }
static const struct acpi_dlayer acpi_debug_layers[] = {
ACPI_DEBUG_INIT(ACPI_UTILITIES),
ACPI_DEBUG_INIT(ACPI_HARDWARE),
ACPI_DEBUG_INIT(ACPI_EVENTS),
ACPI_DEBUG_INIT(ACPI_TABLES),
ACPI_DEBUG_INIT(ACPI_NAMESPACE),
ACPI_DEBUG_INIT(ACPI_PARSER),
ACPI_DEBUG_INIT(ACPI_DISPATCHER),
ACPI_DEBUG_INIT(ACPI_EXECUTER),
ACPI_DEBUG_INIT(ACPI_RESOURCES),
ACPI_DEBUG_INIT(ACPI_CA_DEBUGGER),
ACPI_DEBUG_INIT(ACPI_OS_SERVICES),
ACPI_DEBUG_INIT(ACPI_CA_DISASSEMBLER),
ACPI_DEBUG_INIT(ACPI_COMPILER),
ACPI_DEBUG_INIT(ACPI_TOOLS),
ACPI_DEBUG_INIT(ACPI_BUS_COMPONENT),
ACPI_DEBUG_INIT(ACPI_AC_COMPONENT),
ACPI_DEBUG_INIT(ACPI_BATTERY_COMPONENT),
ACPI_DEBUG_INIT(ACPI_BUTTON_COMPONENT),
ACPI_DEBUG_INIT(ACPI_SBS_COMPONENT),
ACPI_DEBUG_INIT(ACPI_FAN_COMPONENT),
ACPI_DEBUG_INIT(ACPI_PCI_COMPONENT),
ACPI_DEBUG_INIT(ACPI_POWER_COMPONENT),
ACPI_DEBUG_INIT(ACPI_CONTAINER_COMPONENT),
ACPI_DEBUG_INIT(ACPI_SYSTEM_COMPONENT),
ACPI_DEBUG_INIT(ACPI_THERMAL_COMPONENT),
ACPI_DEBUG_INIT(ACPI_MEMORY_DEVICE_COMPONENT),
ACPI_DEBUG_INIT(ACPI_VIDEO_COMPONENT),
ACPI_DEBUG_INIT(ACPI_PROCESSOR_COMPONENT),
};
static const struct acpi_dlevel acpi_debug_levels[] = {
ACPI_DEBUG_INIT(ACPI_LV_INIT),
ACPI_DEBUG_INIT(ACPI_LV_DEBUG_OBJECT),
ACPI_DEBUG_INIT(ACPI_LV_INFO),
ACPI_DEBUG_INIT(ACPI_LV_INIT_NAMES),
ACPI_DEBUG_INIT(ACPI_LV_PARSE),
ACPI_DEBUG_INIT(ACPI_LV_LOAD),
ACPI_DEBUG_INIT(ACPI_LV_DISPATCH),
ACPI_DEBUG_INIT(ACPI_LV_EXEC),
ACPI_DEBUG_INIT(ACPI_LV_NAMES),
ACPI_DEBUG_INIT(ACPI_LV_OPREGION),
ACPI_DEBUG_INIT(ACPI_LV_BFIELD),
ACPI_DEBUG_INIT(ACPI_LV_TABLES),
ACPI_DEBUG_INIT(ACPI_LV_VALUES),
ACPI_DEBUG_INIT(ACPI_LV_OBJECTS),
ACPI_DEBUG_INIT(ACPI_LV_RESOURCES),
ACPI_DEBUG_INIT(ACPI_LV_USER_REQUESTS),
ACPI_DEBUG_INIT(ACPI_LV_PACKAGE),
ACPI_DEBUG_INIT(ACPI_LV_ALLOCATIONS),
ACPI_DEBUG_INIT(ACPI_LV_FUNCTIONS),
ACPI_DEBUG_INIT(ACPI_LV_OPTIMIZATIONS),
ACPI_DEBUG_INIT(ACPI_LV_MUTEX),
ACPI_DEBUG_INIT(ACPI_LV_THREADS),
ACPI_DEBUG_INIT(ACPI_LV_IO),
ACPI_DEBUG_INIT(ACPI_LV_INTERRUPTS),
ACPI_DEBUG_INIT(ACPI_LV_AML_DISASSEMBLE),
ACPI_DEBUG_INIT(ACPI_LV_VERBOSE_INFO),
ACPI_DEBUG_INIT(ACPI_LV_FULL_TABLES),
ACPI_DEBUG_INIT(ACPI_LV_EVENTS),
};
static int param_get_debug_layer(char *buffer, const struct kernel_param *kp)
{
int result = 0;
int i;
result = sprintf(buffer, "%-25s\tHex SET\n", "Description");
for (i = 0; i < ARRAY_SIZE(acpi_debug_layers); i++) {
result += sprintf(buffer + result, "%-25s\t0x%08lX [%c]\n",
acpi_debug_layers[i].name,
acpi_debug_layers[i].value,
(acpi_dbg_layer & acpi_debug_layers[i].value)
? '*' : ' ');
}
result +=
sprintf(buffer + result, "%-25s\t0x%08X [%c]\n", "ACPI_ALL_DRIVERS",
ACPI_ALL_DRIVERS,
(acpi_dbg_layer & ACPI_ALL_DRIVERS) ==
ACPI_ALL_DRIVERS ? '*' : (acpi_dbg_layer & ACPI_ALL_DRIVERS)
== 0 ? ' ' : '-');
result +=
sprintf(buffer + result,
"--\ndebug_layer = 0x%08X ( * = enabled)\n",
acpi_dbg_layer);
return result;
}
static int param_get_debug_level(char *buffer, const struct kernel_param *kp)
{
int result = 0;
int i;
result = sprintf(buffer, "%-25s\tHex SET\n", "Description");
for (i = 0; i < ARRAY_SIZE(acpi_debug_levels); i++) {
result += sprintf(buffer + result, "%-25s\t0x%08lX [%c]\n",
acpi_debug_levels[i].name,
acpi_debug_levels[i].value,
(acpi_dbg_level & acpi_debug_levels[i].value)
? '*' : ' ');
}
result +=
sprintf(buffer + result, "--\ndebug_level = 0x%08X (* = enabled)\n",
acpi_dbg_level);
return result;
}
static const struct kernel_param_ops param_ops_debug_layer = {
.set = param_set_uint,
.get = param_get_debug_layer,
};
static const struct kernel_param_ops param_ops_debug_level = {
.set = param_set_uint,
.get = param_get_debug_level,
};
module_param_cb(debug_layer, ¶m_ops_debug_layer, &acpi_dbg_layer, 0644);
module_param_cb(debug_level, ¶m_ops_debug_level, &acpi_dbg_level, 0644);
static char trace_method_name[6];
module_param_string(trace_method_name, trace_method_name, 6, 0644);
static unsigned int trace_debug_layer;
module_param(trace_debug_layer, uint, 0644);
static unsigned int trace_debug_level;
module_param(trace_debug_level, uint, 0644);
static int param_set_trace_state(const char *val, struct kernel_param *kp)
{
int result = 0;
if (!strncmp(val, "enable", strlen("enable") - 1)) {
result = acpi_debug_trace(trace_method_name, trace_debug_level,
trace_debug_layer, 0);
if (result)
result = -EBUSY;
goto exit;
}
if (!strncmp(val, "disable", strlen("disable") - 1)) {
int name = 0;
result = acpi_debug_trace((char *)&name, trace_debug_level,
trace_debug_layer, 0);
if (result)
result = -EBUSY;
goto exit;
}
if (!strncmp(val, "1", 1)) {
result = acpi_debug_trace(trace_method_name, trace_debug_level,
trace_debug_layer, 1);
if (result)
result = -EBUSY;
goto exit;
}
result = -EINVAL;
exit:
return result;
}
static int param_get_trace_state(char *buffer, struct kernel_param *kp)
{
if (!acpi_gbl_trace_method_name)
return sprintf(buffer, "disable");
else {
if (acpi_gbl_trace_flags & 1)
return sprintf(buffer, "1");
else
return sprintf(buffer, "enable");
}
return 0;
}
module_param_call(trace_state, param_set_trace_state, param_get_trace_state,
NULL, 0644);
#endif /* CONFIG_ACPI_DEBUG */
/* /sys/modules/acpi/parameters/aml_debug_output */
module_param_named(aml_debug_output, acpi_gbl_enable_aml_debug_object,
bool, 0644);
MODULE_PARM_DESC(aml_debug_output,
"To enable/disable the ACPI Debug Object output.");
/* /sys/module/acpi/parameters/acpica_version */
static int param_get_acpica_version(char *buffer, struct kernel_param *kp)
{
int result;
result = sprintf(buffer, "%x", ACPI_CA_VERSION);
return result;
}
module_param_call(acpica_version, NULL, param_get_acpica_version, NULL, 0444);
/*
* ACPI table sysfs I/F:
* /sys/firmware/acpi/tables/
* /sys/firmware/acpi/tables/dynamic/
*/
static LIST_HEAD(acpi_table_attr_list);
static struct kobject *tables_kobj;
static struct kobject *dynamic_tables_kobj;
struct acpi_table_attr {
struct bin_attribute attr;
char name[8];
int instance;
struct list_head node;
};
static ssize_t acpi_table_show(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf,
loff_t offset, size_t count)
{
struct acpi_table_attr *table_attr =
container_of(bin_attr, struct acpi_table_attr, attr);
struct acpi_table_header *table_header = NULL;
acpi_status status;
char name[ACPI_NAME_SIZE];
if (strncmp(table_attr->name, "NULL", 4))
memcpy(name, table_attr->name, ACPI_NAME_SIZE);
else
memcpy(name, "\0\0\0\0", 4);
status = acpi_get_table(name, table_attr->instance, &table_header);
if (ACPI_FAILURE(status))
return -ENODEV;
return memory_read_from_buffer(buf, count, &offset,
table_header, table_header->length);
}
static void acpi_table_attr_init(struct acpi_table_attr *table_attr,
struct acpi_table_header *table_header)
{
struct acpi_table_header *header = NULL;
struct acpi_table_attr *attr = NULL;
sysfs_attr_init(&table_attr->attr.attr);
if (table_header->signature[0] != '\0')
memcpy(table_attr->name, table_header->signature,
ACPI_NAME_SIZE);
else
memcpy(table_attr->name, "NULL", 4);
list_for_each_entry(attr, &acpi_table_attr_list, node) {
if (!memcmp(table_attr->name, attr->name, ACPI_NAME_SIZE))
if (table_attr->instance < attr->instance)
table_attr->instance = attr->instance;
}
table_attr->instance++;
if (table_attr->instance > 1 || (table_attr->instance == 1 &&
!acpi_get_table
(table_header->signature, 2, &header)))
sprintf(table_attr->name + ACPI_NAME_SIZE, "%d",
table_attr->instance);
table_attr->attr.size = 0;
table_attr->attr.read = acpi_table_show;
table_attr->attr.attr.name = table_attr->name;
table_attr->attr.attr.mode = 0400;
return;
}
static acpi_status
acpi_sysfs_table_handler(u32 event, void *table, void *context)
{
struct acpi_table_attr *table_attr;
switch (event) {
case ACPI_TABLE_EVENT_LOAD:
table_attr =
kzalloc(sizeof(struct acpi_table_attr), GFP_KERNEL);
if (!table_attr)
return AE_NO_MEMORY;
acpi_table_attr_init(table_attr, table);
if (sysfs_create_bin_file(dynamic_tables_kobj,
&table_attr->attr)) {
kfree(table_attr);
return AE_ERROR;
} else
list_add_tail(&table_attr->node, &acpi_table_attr_list);
break;
case ACPI_TABLE_EVENT_UNLOAD:
/*
* we do not need to do anything right now
* because the table is not deleted from the
* global table list when unloading it.
*/
break;
default:
return AE_BAD_PARAMETER;
}
return AE_OK;
}
static int acpi_tables_sysfs_init(void)
{
struct acpi_table_attr *table_attr;
struct acpi_table_header *table_header = NULL;
int table_index = 0;
int result;
tables_kobj = kobject_create_and_add("tables", acpi_kobj);
if (!tables_kobj)
goto err;
dynamic_tables_kobj = kobject_create_and_add("dynamic", tables_kobj);
if (!dynamic_tables_kobj)
goto err_dynamic_tables;
do {
result = acpi_get_table_by_index(table_index, &table_header);
if (!result) {
table_index++;
table_attr = NULL;
table_attr =
kzalloc(sizeof(struct acpi_table_attr), GFP_KERNEL);
if (!table_attr)
return -ENOMEM;
acpi_table_attr_init(table_attr, table_header);
result =
sysfs_create_bin_file(tables_kobj,
&table_attr->attr);
if (result) {
kfree(table_attr);
return result;
} else
list_add_tail(&table_attr->node,
&acpi_table_attr_list);
}
} while (!result);
kobject_uevent(tables_kobj, KOBJ_ADD);
kobject_uevent(dynamic_tables_kobj, KOBJ_ADD);
result = acpi_install_table_handler(acpi_sysfs_table_handler, NULL);
return result == AE_OK ? 0 : -EINVAL;
err_dynamic_tables:
kobject_put(tables_kobj);
err:
return -ENOMEM;
}
/*
* Detailed ACPI IRQ counters:
* /sys/firmware/acpi/interrupts/
*/
u32 acpi_irq_handled;
u32 acpi_irq_not_handled;
#define COUNT_GPE 0
#define COUNT_SCI 1 /* acpi_irq_handled */
#define COUNT_SCI_NOT 2 /* acpi_irq_not_handled */
#define COUNT_ERROR 3 /* other */
#define NUM_COUNTERS_EXTRA 4
struct event_counter {
u32 count;
u32 flags;
};
static struct event_counter *all_counters;
static u32 num_gpes;
static u32 num_counters;
static struct attribute **all_attrs;
static u32 acpi_gpe_count;
static struct attribute_group interrupt_stats_attr_group = {
.name = "interrupts",
};
static struct kobj_attribute *counter_attrs;
static void delete_gpe_attr_array(void)
{
struct event_counter *tmp = all_counters;
all_counters = NULL;
kfree(tmp);
if (counter_attrs) {
int i;
for (i = 0; i < num_gpes; i++)
kfree(counter_attrs[i].attr.name);
kfree(counter_attrs);
}
kfree(all_attrs);
return;
}
static void gpe_count(u32 gpe_number)
{
acpi_gpe_count++;
if (!all_counters)
return;
if (gpe_number < num_gpes)
all_counters[gpe_number].count++;
else
all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS +
COUNT_ERROR].count++;
return;
}
static void fixed_event_count(u32 event_number)
{
if (!all_counters)
return;
if (event_number < ACPI_NUM_FIXED_EVENTS)
all_counters[num_gpes + event_number].count++;
else
all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS +
COUNT_ERROR].count++;
return;
}
static void acpi_gbl_event_handler(u32 event_type, acpi_handle device,
u32 event_number, void *context)
{
if (event_type == ACPI_EVENT_TYPE_GPE)
gpe_count(event_number);
if (event_type == ACPI_EVENT_TYPE_FIXED)
fixed_event_count(event_number);
}
static int get_status(u32 index, acpi_event_status *status,
acpi_handle *handle)
{
int result = 0;
if (index >= num_gpes + ACPI_NUM_FIXED_EVENTS)
goto end;
if (index < num_gpes) {
result = acpi_get_gpe_device(index, handle);
if (result) {
ACPI_EXCEPTION((AE_INFO, AE_NOT_FOUND,
"Invalid GPE 0x%x\n", index));
goto end;
}
result = acpi_get_gpe_status(*handle, index, status);
} else if (index < (num_gpes + ACPI_NUM_FIXED_EVENTS))
result = acpi_get_event_status(index - num_gpes, status);
end:
return result;
}
static ssize_t counter_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
int index = attr - counter_attrs;
int size;
acpi_handle handle;
acpi_event_status status;
int result = 0;
all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI].count =
acpi_irq_handled;
all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI_NOT].count =
acpi_irq_not_handled;
all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_GPE].count =
acpi_gpe_count;
size = sprintf(buf, "%8d", all_counters[index].count);
/* "gpe_all" or "sci" */
if (index >= num_gpes + ACPI_NUM_FIXED_EVENTS)
goto end;
result = get_status(index, &status, &handle);
if (result)
goto end;
if (!(status & ACPI_EVENT_FLAG_HANDLE))
size += sprintf(buf + size, " invalid");
else if (status & ACPI_EVENT_FLAG_ENABLED)
size += sprintf(buf + size, " enabled");
else if (status & ACPI_EVENT_FLAG_WAKE_ENABLED)
size += sprintf(buf + size, " wake_enabled");
else
size += sprintf(buf + size, " disabled");
end:
size += sprintf(buf + size, "\n");
return result ? result : size;
}
/*
* counter_set() sets the specified counter.
* setting the total "sci" file to any value clears all counters.
* enable/disable/clear a gpe/fixed event in user space.
*/
static ssize_t counter_set(struct kobject *kobj,
struct kobj_attribute *attr, const char *buf,
size_t size)
{
int index = attr - counter_attrs;
acpi_event_status status;
acpi_handle handle;
int result = 0;
if (index == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI) {
int i;
for (i = 0; i < num_counters; ++i)
all_counters[i].count = 0;
acpi_gpe_count = 0;
acpi_irq_handled = 0;
acpi_irq_not_handled = 0;
goto end;
}
/* show the event status for both GPEs and Fixed Events */
result = get_status(index, &status, &handle);
if (result)
goto end;
if (!(status & ACPI_EVENT_FLAG_HANDLE)) {
printk(KERN_WARNING PREFIX
"Can not change Invalid GPE/Fixed Event status\n");
return -EINVAL;
}
if (index < num_gpes) {
if (!strcmp(buf, "disable\n") &&
(status & ACPI_EVENT_FLAG_ENABLED))
result = acpi_disable_gpe(handle, index);
else if (!strcmp(buf, "enable\n") &&
!(status & ACPI_EVENT_FLAG_ENABLED))
result = acpi_enable_gpe(handle, index);
else if (!strcmp(buf, "clear\n") &&
(status & ACPI_EVENT_FLAG_SET))
result = acpi_clear_gpe(handle, index);
else
all_counters[index].count = strtoul(buf, NULL, 0);
} else if (index < num_gpes + ACPI_NUM_FIXED_EVENTS) {
int event = index - num_gpes;
if (!strcmp(buf, "disable\n") &&
(status & ACPI_EVENT_FLAG_ENABLED))
result = acpi_disable_event(event, ACPI_NOT_ISR);
else if (!strcmp(buf, "enable\n") &&
!(status & ACPI_EVENT_FLAG_ENABLED))
result = acpi_enable_event(event, ACPI_NOT_ISR);
else if (!strcmp(buf, "clear\n") &&
(status & ACPI_EVENT_FLAG_SET))
result = acpi_clear_event(event);
else
all_counters[index].count = strtoul(buf, NULL, 0);
} else
all_counters[index].count = strtoul(buf, NULL, 0);
if (ACPI_FAILURE(result))
result = -EINVAL;
end:
return result ? result : size;
}
void acpi_irq_stats_init(void)
{
acpi_status status;
int i;
if (all_counters)
return;
num_gpes = acpi_current_gpe_count;
num_counters = num_gpes + ACPI_NUM_FIXED_EVENTS + NUM_COUNTERS_EXTRA;
all_attrs = kzalloc(sizeof(struct attribute *) * (num_counters + 1),
GFP_KERNEL);
if (all_attrs == NULL)
return;
all_counters = kzalloc(sizeof(struct event_counter) * (num_counters),
GFP_KERNEL);
if (all_counters == NULL)
goto fail;
status = acpi_install_global_event_handler(acpi_gbl_event_handler, NULL);
if (ACPI_FAILURE(status))
goto fail;
counter_attrs = kzalloc(sizeof(struct kobj_attribute) * (num_counters),
GFP_KERNEL);
if (counter_attrs == NULL)
goto fail;
for (i = 0; i < num_counters; ++i) {
char buffer[12];
char *name;
if (i < num_gpes)
sprintf(buffer, "gpe%02X", i);
else if (i == num_gpes + ACPI_EVENT_PMTIMER)
sprintf(buffer, "ff_pmtimer");
else if (i == num_gpes + ACPI_EVENT_GLOBAL)
sprintf(buffer, "ff_gbl_lock");
else if (i == num_gpes + ACPI_EVENT_POWER_BUTTON)
sprintf(buffer, "ff_pwr_btn");
else if (i == num_gpes + ACPI_EVENT_SLEEP_BUTTON)
sprintf(buffer, "ff_slp_btn");
else if (i == num_gpes + ACPI_EVENT_RTC)
sprintf(buffer, "ff_rt_clk");
else if (i == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_GPE)
sprintf(buffer, "gpe_all");
else if (i == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI)
sprintf(buffer, "sci");
else if (i == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI_NOT)
sprintf(buffer, "sci_not");
else if (i == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_ERROR)
sprintf(buffer, "error");
else
sprintf(buffer, "bug%02X", i);
name = kzalloc(strlen(buffer) + 1, GFP_KERNEL);
if (name == NULL)
goto fail;
strncpy(name, buffer, strlen(buffer) + 1);
sysfs_attr_init(&counter_attrs[i].attr);
counter_attrs[i].attr.name = name;
counter_attrs[i].attr.mode = 0644;
counter_attrs[i].show = counter_show;
counter_attrs[i].store = counter_set;
all_attrs[i] = &counter_attrs[i].attr;
}
interrupt_stats_attr_group.attrs = all_attrs;
if (!sysfs_create_group(acpi_kobj, &interrupt_stats_attr_group))
return;
fail:
delete_gpe_attr_array();
return;
}
static void __exit interrupt_stats_exit(void)
{
sysfs_remove_group(acpi_kobj, &interrupt_stats_attr_group);
delete_gpe_attr_array();
return;
}
static ssize_t
acpi_show_profile(struct device *dev, struct device_attribute *attr,
char *buf)
{
return sprintf(buf, "%d\n", acpi_gbl_FADT.preferred_profile);
}
static const struct device_attribute pm_profile_attr =
__ATTR(pm_profile, S_IRUGO, acpi_show_profile, NULL);
int __init acpi_sysfs_init(void)
{
int result;
result = acpi_tables_sysfs_init();
if (result)
return result;
result = sysfs_create_file(acpi_kobj, &pm_profile_attr.attr);
return result;
}
| gpl-2.0 |
kamarush/android_kernel_lge_hammerhead | drivers/net/ethernet/sfc/txc43128_phy.c | 7321 | 16313 | /****************************************************************************
* Driver for Solarflare Solarstorm network controllers and boards
* Copyright 2006-2011 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
/*
* Driver for Transwitch/Mysticom CX4 retimer
* see www.transwitch.com, part is TXC-43128
*/
#include <linux/delay.h>
#include <linux/slab.h>
#include "efx.h"
#include "mdio_10g.h"
#include "phy.h"
#include "nic.h"
/* We expect these MMDs to be in the package */
#define TXC_REQUIRED_DEVS (MDIO_DEVS_PCS | \
MDIO_DEVS_PMAPMD | \
MDIO_DEVS_PHYXS)
#define TXC_LOOPBACKS ((1 << LOOPBACK_PCS) | \
(1 << LOOPBACK_PMAPMD) | \
(1 << LOOPBACK_PHYXS_WS))
/**************************************************************************
*
* Compile-time config
*
**************************************************************************
*/
#define TXCNAME "TXC43128"
/* Total length of time we'll wait for the PHY to come out of reset (ms) */
#define TXC_MAX_RESET_TIME 500
/* Interval between checks (ms) */
#define TXC_RESET_WAIT 10
/* How long to run BIST (us) */
#define TXC_BIST_DURATION 50
/**************************************************************************
*
* Register definitions
*
**************************************************************************
*/
/* Command register */
#define TXC_GLRGS_GLCMD 0xc004
/* Useful bits in command register */
/* Lane power-down */
#define TXC_GLCMD_L01PD_LBN 5
#define TXC_GLCMD_L23PD_LBN 6
/* Limited SW reset: preserves configuration but
* initiates a logic reset. Self-clearing */
#define TXC_GLCMD_LMTSWRST_LBN 14
/* Signal Quality Control */
#define TXC_GLRGS_GSGQLCTL 0xc01a
/* Enable bit */
#define TXC_GSGQLCT_SGQLEN_LBN 15
/* Lane selection */
#define TXC_GSGQLCT_LNSL_LBN 13
#define TXC_GSGQLCT_LNSL_WIDTH 2
/* Analog TX control */
#define TXC_ALRGS_ATXCTL 0xc040
/* Lane power-down */
#define TXC_ATXCTL_TXPD3_LBN 15
#define TXC_ATXCTL_TXPD2_LBN 14
#define TXC_ATXCTL_TXPD1_LBN 13
#define TXC_ATXCTL_TXPD0_LBN 12
/* Amplitude on lanes 0, 1 */
#define TXC_ALRGS_ATXAMP0 0xc041
/* Amplitude on lanes 2, 3 */
#define TXC_ALRGS_ATXAMP1 0xc042
/* Bit position of value for lane 0 (or 2) */
#define TXC_ATXAMP_LANE02_LBN 3
/* Bit position of value for lane 1 (or 3) */
#define TXC_ATXAMP_LANE13_LBN 11
#define TXC_ATXAMP_1280_mV 0
#define TXC_ATXAMP_1200_mV 8
#define TXC_ATXAMP_1120_mV 12
#define TXC_ATXAMP_1060_mV 14
#define TXC_ATXAMP_0820_mV 25
#define TXC_ATXAMP_0720_mV 26
#define TXC_ATXAMP_0580_mV 27
#define TXC_ATXAMP_0440_mV 28
#define TXC_ATXAMP_0820_BOTH \
((TXC_ATXAMP_0820_mV << TXC_ATXAMP_LANE02_LBN) \
| (TXC_ATXAMP_0820_mV << TXC_ATXAMP_LANE13_LBN))
#define TXC_ATXAMP_DEFAULT 0x6060 /* From databook */
/* Preemphasis on lanes 0, 1 */
#define TXC_ALRGS_ATXPRE0 0xc043
/* Preemphasis on lanes 2, 3 */
#define TXC_ALRGS_ATXPRE1 0xc044
#define TXC_ATXPRE_NONE 0
#define TXC_ATXPRE_DEFAULT 0x1010 /* From databook */
#define TXC_ALRGS_ARXCTL 0xc045
/* Lane power-down */
#define TXC_ARXCTL_RXPD3_LBN 15
#define TXC_ARXCTL_RXPD2_LBN 14
#define TXC_ARXCTL_RXPD1_LBN 13
#define TXC_ARXCTL_RXPD0_LBN 12
/* Main control */
#define TXC_MRGS_CTL 0xc340
/* Bits in main control */
#define TXC_MCTL_RESET_LBN 15 /* Self clear */
#define TXC_MCTL_TXLED_LBN 14 /* 1 to show align status */
#define TXC_MCTL_RXLED_LBN 13 /* 1 to show align status */
/* GPIO output */
#define TXC_GPIO_OUTPUT 0xc346
#define TXC_GPIO_DIR 0xc348
/* Vendor-specific BIST registers */
#define TXC_BIST_CTL 0xc280
#define TXC_BIST_TXFRMCNT 0xc281
#define TXC_BIST_RX0FRMCNT 0xc282
#define TXC_BIST_RX1FRMCNT 0xc283
#define TXC_BIST_RX2FRMCNT 0xc284
#define TXC_BIST_RX3FRMCNT 0xc285
#define TXC_BIST_RX0ERRCNT 0xc286
#define TXC_BIST_RX1ERRCNT 0xc287
#define TXC_BIST_RX2ERRCNT 0xc288
#define TXC_BIST_RX3ERRCNT 0xc289
/* BIST type (controls bit patter in test) */
#define TXC_BIST_CTRL_TYPE_LBN 10
#define TXC_BIST_CTRL_TYPE_TSD 0 /* TranSwitch Deterministic */
#define TXC_BIST_CTRL_TYPE_CRP 1 /* CRPAT standard */
#define TXC_BIST_CTRL_TYPE_CJP 2 /* CJPAT standard */
#define TXC_BIST_CTRL_TYPE_TSR 3 /* TranSwitch pseudo-random */
/* Set this to 1 for 10 bit and 0 for 8 bit */
#define TXC_BIST_CTRL_B10EN_LBN 12
/* Enable BIST (write 0 to disable) */
#define TXC_BIST_CTRL_ENAB_LBN 13
/* Stop BIST (self-clears when stop complete) */
#define TXC_BIST_CTRL_STOP_LBN 14
/* Start BIST (cleared by writing 1 to STOP) */
#define TXC_BIST_CTRL_STRT_LBN 15
/* Mt. Diablo test configuration */
#define TXC_MTDIABLO_CTRL 0xc34f
#define TXC_MTDIABLO_CTRL_PMA_LOOP_LBN 10
struct txc43128_data {
unsigned long bug10934_timer;
enum efx_phy_mode phy_mode;
enum efx_loopback_mode loopback_mode;
};
/* The PHY sometimes needs a reset to bring the link back up. So long as
* it reports link down, we reset it every 5 seconds.
*/
#define BUG10934_RESET_INTERVAL (5 * HZ)
/* Perform a reset that doesn't clear configuration changes */
static void txc_reset_logic(struct efx_nic *efx);
/* Set the output value of a gpio */
void falcon_txc_set_gpio_val(struct efx_nic *efx, int pin, int on)
{
efx_mdio_set_flag(efx, MDIO_MMD_PHYXS, TXC_GPIO_OUTPUT, 1 << pin, on);
}
/* Set up the GPIO direction register */
void falcon_txc_set_gpio_dir(struct efx_nic *efx, int pin, int dir)
{
efx_mdio_set_flag(efx, MDIO_MMD_PHYXS, TXC_GPIO_DIR, 1 << pin, dir);
}
/* Reset the PMA/PMD MMD. The documentation is explicit that this does a
* global reset (it's less clear what reset of other MMDs does).*/
static int txc_reset_phy(struct efx_nic *efx)
{
int rc = efx_mdio_reset_mmd(efx, MDIO_MMD_PMAPMD,
TXC_MAX_RESET_TIME / TXC_RESET_WAIT,
TXC_RESET_WAIT);
if (rc < 0)
goto fail;
/* Check that all the MMDs we expect are present and responding. */
rc = efx_mdio_check_mmds(efx, TXC_REQUIRED_DEVS);
if (rc < 0)
goto fail;
return 0;
fail:
netif_err(efx, hw, efx->net_dev, TXCNAME ": reset timed out!\n");
return rc;
}
/* Run a single BIST on one MMD */
static int txc_bist_one(struct efx_nic *efx, int mmd, int test)
{
int ctrl, bctl;
int lane;
int rc = 0;
/* Set PMA to test into loopback using Mt Diablo reg as per app note */
ctrl = efx_mdio_read(efx, MDIO_MMD_PCS, TXC_MTDIABLO_CTRL);
ctrl |= (1 << TXC_MTDIABLO_CTRL_PMA_LOOP_LBN);
efx_mdio_write(efx, MDIO_MMD_PCS, TXC_MTDIABLO_CTRL, ctrl);
/* The BIST app. note lists these as 3 distinct steps. */
/* Set the BIST type */
bctl = (test << TXC_BIST_CTRL_TYPE_LBN);
efx_mdio_write(efx, mmd, TXC_BIST_CTL, bctl);
/* Set the BSTEN bit in the BIST Control register to enable */
bctl |= (1 << TXC_BIST_CTRL_ENAB_LBN);
efx_mdio_write(efx, mmd, TXC_BIST_CTL, bctl);
/* Set the BSTRT bit in the BIST Control register */
efx_mdio_write(efx, mmd, TXC_BIST_CTL,
bctl | (1 << TXC_BIST_CTRL_STRT_LBN));
/* Wait. */
udelay(TXC_BIST_DURATION);
/* Set the BSTOP bit in the BIST Control register */
bctl |= (1 << TXC_BIST_CTRL_STOP_LBN);
efx_mdio_write(efx, mmd, TXC_BIST_CTL, bctl);
/* The STOP bit should go off when things have stopped */
while (bctl & (1 << TXC_BIST_CTRL_STOP_LBN))
bctl = efx_mdio_read(efx, mmd, TXC_BIST_CTL);
/* Check all the error counts are 0 and all the frame counts are
non-zero */
for (lane = 0; lane < 4; lane++) {
int count = efx_mdio_read(efx, mmd, TXC_BIST_RX0ERRCNT + lane);
if (count != 0) {
netif_err(efx, hw, efx->net_dev, TXCNAME": BIST error. "
"Lane %d had %d errs\n", lane, count);
rc = -EIO;
}
count = efx_mdio_read(efx, mmd, TXC_BIST_RX0FRMCNT + lane);
if (count == 0) {
netif_err(efx, hw, efx->net_dev, TXCNAME": BIST error. "
"Lane %d got 0 frames\n", lane);
rc = -EIO;
}
}
if (rc == 0)
netif_info(efx, hw, efx->net_dev, TXCNAME": BIST pass\n");
/* Disable BIST */
efx_mdio_write(efx, mmd, TXC_BIST_CTL, 0);
/* Turn off loopback */
ctrl &= ~(1 << TXC_MTDIABLO_CTRL_PMA_LOOP_LBN);
efx_mdio_write(efx, MDIO_MMD_PCS, TXC_MTDIABLO_CTRL, ctrl);
return rc;
}
static int txc_bist(struct efx_nic *efx)
{
return txc_bist_one(efx, MDIO_MMD_PCS, TXC_BIST_CTRL_TYPE_TSD);
}
/* Push the non-configurable defaults into the PHY. This must be
* done after every full reset */
static void txc_apply_defaults(struct efx_nic *efx)
{
int mctrl;
/* Turn amplitude down and preemphasis off on the host side
* (PHY<->MAC) as this is believed less likely to upset Falcon
* and no adverse effects have been noted. It probably also
* saves a picowatt or two */
/* Turn off preemphasis */
efx_mdio_write(efx, MDIO_MMD_PHYXS, TXC_ALRGS_ATXPRE0, TXC_ATXPRE_NONE);
efx_mdio_write(efx, MDIO_MMD_PHYXS, TXC_ALRGS_ATXPRE1, TXC_ATXPRE_NONE);
/* Turn down the amplitude */
efx_mdio_write(efx, MDIO_MMD_PHYXS,
TXC_ALRGS_ATXAMP0, TXC_ATXAMP_0820_BOTH);
efx_mdio_write(efx, MDIO_MMD_PHYXS,
TXC_ALRGS_ATXAMP1, TXC_ATXAMP_0820_BOTH);
/* Set the line side amplitude and preemphasis to the databook
* defaults as an erratum causes them to be 0 on at least some
* PHY rev.s */
efx_mdio_write(efx, MDIO_MMD_PMAPMD,
TXC_ALRGS_ATXPRE0, TXC_ATXPRE_DEFAULT);
efx_mdio_write(efx, MDIO_MMD_PMAPMD,
TXC_ALRGS_ATXPRE1, TXC_ATXPRE_DEFAULT);
efx_mdio_write(efx, MDIO_MMD_PMAPMD,
TXC_ALRGS_ATXAMP0, TXC_ATXAMP_DEFAULT);
efx_mdio_write(efx, MDIO_MMD_PMAPMD,
TXC_ALRGS_ATXAMP1, TXC_ATXAMP_DEFAULT);
/* Set up the LEDs */
mctrl = efx_mdio_read(efx, MDIO_MMD_PHYXS, TXC_MRGS_CTL);
/* Set the Green and Red LEDs to their default modes */
mctrl &= ~((1 << TXC_MCTL_TXLED_LBN) | (1 << TXC_MCTL_RXLED_LBN));
efx_mdio_write(efx, MDIO_MMD_PHYXS, TXC_MRGS_CTL, mctrl);
/* Databook recommends doing this after configuration changes */
txc_reset_logic(efx);
falcon_board(efx)->type->init_phy(efx);
}
static int txc43128_phy_probe(struct efx_nic *efx)
{
struct txc43128_data *phy_data;
/* Allocate phy private storage */
phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL);
if (!phy_data)
return -ENOMEM;
efx->phy_data = phy_data;
phy_data->phy_mode = efx->phy_mode;
efx->mdio.mmds = TXC_REQUIRED_DEVS;
efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
efx->loopback_modes = TXC_LOOPBACKS | FALCON_XMAC_LOOPBACKS;
return 0;
}
/* Initialisation entry point for this PHY driver */
static int txc43128_phy_init(struct efx_nic *efx)
{
int rc;
rc = txc_reset_phy(efx);
if (rc < 0)
return rc;
rc = txc_bist(efx);
if (rc < 0)
return rc;
txc_apply_defaults(efx);
return 0;
}
/* Set the lane power down state in the global registers */
static void txc_glrgs_lane_power(struct efx_nic *efx, int mmd)
{
int pd = (1 << TXC_GLCMD_L01PD_LBN) | (1 << TXC_GLCMD_L23PD_LBN);
int ctl = efx_mdio_read(efx, mmd, TXC_GLRGS_GLCMD);
if (!(efx->phy_mode & PHY_MODE_LOW_POWER))
ctl &= ~pd;
else
ctl |= pd;
efx_mdio_write(efx, mmd, TXC_GLRGS_GLCMD, ctl);
}
/* Set the lane power down state in the analog control registers */
static void txc_analog_lane_power(struct efx_nic *efx, int mmd)
{
int txpd = (1 << TXC_ATXCTL_TXPD3_LBN) | (1 << TXC_ATXCTL_TXPD2_LBN)
| (1 << TXC_ATXCTL_TXPD1_LBN) | (1 << TXC_ATXCTL_TXPD0_LBN);
int rxpd = (1 << TXC_ARXCTL_RXPD3_LBN) | (1 << TXC_ARXCTL_RXPD2_LBN)
| (1 << TXC_ARXCTL_RXPD1_LBN) | (1 << TXC_ARXCTL_RXPD0_LBN);
int txctl = efx_mdio_read(efx, mmd, TXC_ALRGS_ATXCTL);
int rxctl = efx_mdio_read(efx, mmd, TXC_ALRGS_ARXCTL);
if (!(efx->phy_mode & PHY_MODE_LOW_POWER)) {
txctl &= ~txpd;
rxctl &= ~rxpd;
} else {
txctl |= txpd;
rxctl |= rxpd;
}
efx_mdio_write(efx, mmd, TXC_ALRGS_ATXCTL, txctl);
efx_mdio_write(efx, mmd, TXC_ALRGS_ARXCTL, rxctl);
}
static void txc_set_power(struct efx_nic *efx)
{
/* According to the data book, all the MMDs can do low power */
efx_mdio_set_mmds_lpower(efx,
!!(efx->phy_mode & PHY_MODE_LOW_POWER),
TXC_REQUIRED_DEVS);
/* Global register bank is in PCS, PHY XS. These control the host
* side and line side settings respectively. */
txc_glrgs_lane_power(efx, MDIO_MMD_PCS);
txc_glrgs_lane_power(efx, MDIO_MMD_PHYXS);
/* Analog register bank in PMA/PMD, PHY XS */
txc_analog_lane_power(efx, MDIO_MMD_PMAPMD);
txc_analog_lane_power(efx, MDIO_MMD_PHYXS);
}
static void txc_reset_logic_mmd(struct efx_nic *efx, int mmd)
{
int val = efx_mdio_read(efx, mmd, TXC_GLRGS_GLCMD);
int tries = 50;
val |= (1 << TXC_GLCMD_LMTSWRST_LBN);
efx_mdio_write(efx, mmd, TXC_GLRGS_GLCMD, val);
while (tries--) {
val = efx_mdio_read(efx, mmd, TXC_GLRGS_GLCMD);
if (!(val & (1 << TXC_GLCMD_LMTSWRST_LBN)))
break;
udelay(1);
}
if (!tries)
netif_info(efx, hw, efx->net_dev,
TXCNAME " Logic reset timed out!\n");
}
/* Perform a logic reset. This preserves the configuration registers
* and is needed for some configuration changes to take effect */
static void txc_reset_logic(struct efx_nic *efx)
{
/* The data sheet claims we can do the logic reset on either the
* PCS or the PHYXS and the result is a reset of both host- and
* line-side logic. */
txc_reset_logic_mmd(efx, MDIO_MMD_PCS);
}
static bool txc43128_phy_read_link(struct efx_nic *efx)
{
return efx_mdio_links_ok(efx, TXC_REQUIRED_DEVS);
}
static int txc43128_phy_reconfigure(struct efx_nic *efx)
{
struct txc43128_data *phy_data = efx->phy_data;
enum efx_phy_mode mode_change = efx->phy_mode ^ phy_data->phy_mode;
bool loop_change = LOOPBACK_CHANGED(phy_data, efx, TXC_LOOPBACKS);
if (efx->phy_mode & mode_change & PHY_MODE_TX_DISABLED) {
txc_reset_phy(efx);
txc_apply_defaults(efx);
falcon_reset_xaui(efx);
mode_change &= ~PHY_MODE_TX_DISABLED;
}
efx_mdio_transmit_disable(efx);
efx_mdio_phy_reconfigure(efx);
if (mode_change & PHY_MODE_LOW_POWER)
txc_set_power(efx);
/* The data sheet claims this is required after every reconfiguration
* (note at end of 7.1), but we mustn't do it when nothing changes as
* it glitches the link, and reconfigure gets called on link change,
* so we get an IRQ storm on link up. */
if (loop_change || mode_change)
txc_reset_logic(efx);
phy_data->phy_mode = efx->phy_mode;
phy_data->loopback_mode = efx->loopback_mode;
return 0;
}
static void txc43128_phy_fini(struct efx_nic *efx)
{
/* Disable link events */
efx_mdio_write(efx, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL, 0);
}
static void txc43128_phy_remove(struct efx_nic *efx)
{
kfree(efx->phy_data);
efx->phy_data = NULL;
}
/* Periodic callback: this exists mainly to poll link status as we
* don't use LASI interrupts */
static bool txc43128_phy_poll(struct efx_nic *efx)
{
struct txc43128_data *data = efx->phy_data;
bool was_up = efx->link_state.up;
efx->link_state.up = txc43128_phy_read_link(efx);
efx->link_state.speed = 10000;
efx->link_state.fd = true;
efx->link_state.fc = efx->wanted_fc;
if (efx->link_state.up || (efx->loopback_mode != LOOPBACK_NONE)) {
data->bug10934_timer = jiffies;
} else {
if (time_after_eq(jiffies, (data->bug10934_timer +
BUG10934_RESET_INTERVAL))) {
data->bug10934_timer = jiffies;
txc_reset_logic(efx);
}
}
return efx->link_state.up != was_up;
}
static const char *const txc43128_test_names[] = {
"bist"
};
static const char *txc43128_test_name(struct efx_nic *efx, unsigned int index)
{
if (index < ARRAY_SIZE(txc43128_test_names))
return txc43128_test_names[index];
return NULL;
}
static int txc43128_run_tests(struct efx_nic *efx, int *results, unsigned flags)
{
int rc;
if (!(flags & ETH_TEST_FL_OFFLINE))
return 0;
rc = txc_reset_phy(efx);
if (rc < 0)
return rc;
rc = txc_bist(efx);
txc_apply_defaults(efx);
results[0] = rc ? -1 : 1;
return rc;
}
static void txc43128_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
{
mdio45_ethtool_gset(&efx->mdio, ecmd);
}
const struct efx_phy_operations falcon_txc_phy_ops = {
.probe = txc43128_phy_probe,
.init = txc43128_phy_init,
.reconfigure = txc43128_phy_reconfigure,
.poll = txc43128_phy_poll,
.fini = txc43128_phy_fini,
.remove = txc43128_phy_remove,
.get_settings = txc43128_get_settings,
.set_settings = efx_mdio_set_settings,
.test_alive = efx_mdio_test_alive,
.run_tests = txc43128_run_tests,
.test_name = txc43128_test_name,
};
| gpl-2.0 |
championswimmer/android_kernel_sony_msm8930 | arch/sh/kernel/kprobes.c | 7833 | 15267 | /*
* Kernel probes (kprobes) for SuperH
*
* Copyright (C) 2007 Chris Smith <chris.smith@st.com>
* Copyright (C) 2006 Lineo Solutions, Inc.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/kprobes.h>
#include <linux/module.h>
#include <linux/ptrace.h>
#include <linux/preempt.h>
#include <linux/kdebug.h>
#include <linux/slab.h>
#include <asm/cacheflush.h>
#include <asm/uaccess.h>
DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
static DEFINE_PER_CPU(struct kprobe, saved_current_opcode);
static DEFINE_PER_CPU(struct kprobe, saved_next_opcode);
static DEFINE_PER_CPU(struct kprobe, saved_next_opcode2);
#define OPCODE_JMP(x) (((x) & 0xF0FF) == 0x402b)
#define OPCODE_JSR(x) (((x) & 0xF0FF) == 0x400b)
#define OPCODE_BRA(x) (((x) & 0xF000) == 0xa000)
#define OPCODE_BRAF(x) (((x) & 0xF0FF) == 0x0023)
#define OPCODE_BSR(x) (((x) & 0xF000) == 0xb000)
#define OPCODE_BSRF(x) (((x) & 0xF0FF) == 0x0003)
#define OPCODE_BF_S(x) (((x) & 0xFF00) == 0x8f00)
#define OPCODE_BT_S(x) (((x) & 0xFF00) == 0x8d00)
#define OPCODE_BF(x) (((x) & 0xFF00) == 0x8b00)
#define OPCODE_BT(x) (((x) & 0xFF00) == 0x8900)
#define OPCODE_RTS(x) (((x) & 0x000F) == 0x000b)
#define OPCODE_RTE(x) (((x) & 0xFFFF) == 0x002b)
int __kprobes arch_prepare_kprobe(struct kprobe *p)
{
kprobe_opcode_t opcode = *(kprobe_opcode_t *) (p->addr);
if (OPCODE_RTE(opcode))
return -EFAULT; /* Bad breakpoint */
p->opcode = opcode;
return 0;
}
void __kprobes arch_copy_kprobe(struct kprobe *p)
{
memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
p->opcode = *p->addr;
}
void __kprobes arch_arm_kprobe(struct kprobe *p)
{
*p->addr = BREAKPOINT_INSTRUCTION;
flush_icache_range((unsigned long)p->addr,
(unsigned long)p->addr + sizeof(kprobe_opcode_t));
}
void __kprobes arch_disarm_kprobe(struct kprobe *p)
{
*p->addr = p->opcode;
flush_icache_range((unsigned long)p->addr,
(unsigned long)p->addr + sizeof(kprobe_opcode_t));
}
int __kprobes arch_trampoline_kprobe(struct kprobe *p)
{
if (*p->addr == BREAKPOINT_INSTRUCTION)
return 1;
return 0;
}
/**
* If an illegal slot instruction exception occurs for an address
* containing a kprobe, remove the probe.
*
* Returns 0 if the exception was handled successfully, 1 otherwise.
*/
int __kprobes kprobe_handle_illslot(unsigned long pc)
{
struct kprobe *p = get_kprobe((kprobe_opcode_t *) pc + 1);
if (p != NULL) {
printk("Warning: removing kprobe from delay slot: 0x%.8x\n",
(unsigned int)pc + 2);
unregister_kprobe(p);
return 0;
}
return 1;
}
void __kprobes arch_remove_kprobe(struct kprobe *p)
{
struct kprobe *saved = &__get_cpu_var(saved_next_opcode);
if (saved->addr) {
arch_disarm_kprobe(p);
arch_disarm_kprobe(saved);
saved->addr = NULL;
saved->opcode = 0;
saved = &__get_cpu_var(saved_next_opcode2);
if (saved->addr) {
arch_disarm_kprobe(saved);
saved->addr = NULL;
saved->opcode = 0;
}
}
}
static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
{
kcb->prev_kprobe.kp = kprobe_running();
kcb->prev_kprobe.status = kcb->kprobe_status;
}
static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
{
__get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
kcb->kprobe_status = kcb->prev_kprobe.status;
}
static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
struct kprobe_ctlblk *kcb)
{
__get_cpu_var(current_kprobe) = p;
}
/*
* Singlestep is implemented by disabling the current kprobe and setting one
* on the next instruction, following branches. Two probes are set if the
* branch is conditional.
*/
static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
{
__get_cpu_var(saved_current_opcode).addr = (kprobe_opcode_t *)regs->pc;
if (p != NULL) {
struct kprobe *op1, *op2;
arch_disarm_kprobe(p);
op1 = &__get_cpu_var(saved_next_opcode);
op2 = &__get_cpu_var(saved_next_opcode2);
if (OPCODE_JSR(p->opcode) || OPCODE_JMP(p->opcode)) {
unsigned int reg_nr = ((p->opcode >> 8) & 0x000F);
op1->addr = (kprobe_opcode_t *) regs->regs[reg_nr];
} else if (OPCODE_BRA(p->opcode) || OPCODE_BSR(p->opcode)) {
unsigned long disp = (p->opcode & 0x0FFF);
op1->addr =
(kprobe_opcode_t *) (regs->pc + 4 + disp * 2);
} else if (OPCODE_BRAF(p->opcode) || OPCODE_BSRF(p->opcode)) {
unsigned int reg_nr = ((p->opcode >> 8) & 0x000F);
op1->addr =
(kprobe_opcode_t *) (regs->pc + 4 +
regs->regs[reg_nr]);
} else if (OPCODE_RTS(p->opcode)) {
op1->addr = (kprobe_opcode_t *) regs->pr;
} else if (OPCODE_BF(p->opcode) || OPCODE_BT(p->opcode)) {
unsigned long disp = (p->opcode & 0x00FF);
/* case 1 */
op1->addr = p->addr + 1;
/* case 2 */
op2->addr =
(kprobe_opcode_t *) (regs->pc + 4 + disp * 2);
op2->opcode = *(op2->addr);
arch_arm_kprobe(op2);
} else if (OPCODE_BF_S(p->opcode) || OPCODE_BT_S(p->opcode)) {
unsigned long disp = (p->opcode & 0x00FF);
/* case 1 */
op1->addr = p->addr + 2;
/* case 2 */
op2->addr =
(kprobe_opcode_t *) (regs->pc + 4 + disp * 2);
op2->opcode = *(op2->addr);
arch_arm_kprobe(op2);
} else {
op1->addr = p->addr + 1;
}
op1->opcode = *(op1->addr);
arch_arm_kprobe(op1);
}
}
/* Called with kretprobe_lock held */
void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
struct pt_regs *regs)
{
ri->ret_addr = (kprobe_opcode_t *) regs->pr;
/* Replace the return addr with trampoline addr */
regs->pr = (unsigned long)kretprobe_trampoline;
}
static int __kprobes kprobe_handler(struct pt_regs *regs)
{
struct kprobe *p;
int ret = 0;
kprobe_opcode_t *addr = NULL;
struct kprobe_ctlblk *kcb;
/*
* We don't want to be preempted for the entire
* duration of kprobe processing
*/
preempt_disable();
kcb = get_kprobe_ctlblk();
addr = (kprobe_opcode_t *) (regs->pc);
/* Check we're not actually recursing */
if (kprobe_running()) {
p = get_kprobe(addr);
if (p) {
if (kcb->kprobe_status == KPROBE_HIT_SS &&
*p->ainsn.insn == BREAKPOINT_INSTRUCTION) {
goto no_kprobe;
}
/* We have reentered the kprobe_handler(), since
* another probe was hit while within the handler.
* We here save the original kprobes variables and
* just single step on the instruction of the new probe
* without calling any user handlers.
*/
save_previous_kprobe(kcb);
set_current_kprobe(p, regs, kcb);
kprobes_inc_nmissed_count(p);
prepare_singlestep(p, regs);
kcb->kprobe_status = KPROBE_REENTER;
return 1;
} else {
p = __get_cpu_var(current_kprobe);
if (p->break_handler && p->break_handler(p, regs)) {
goto ss_probe;
}
}
goto no_kprobe;
}
p = get_kprobe(addr);
if (!p) {
/* Not one of ours: let kernel handle it */
if (*(kprobe_opcode_t *)addr != BREAKPOINT_INSTRUCTION) {
/*
* The breakpoint instruction was removed right
* after we hit it. Another cpu has removed
* either a probepoint or a debugger breakpoint
* at this address. In either case, no further
* handling of this interrupt is appropriate.
*/
ret = 1;
}
goto no_kprobe;
}
set_current_kprobe(p, regs, kcb);
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
if (p->pre_handler && p->pre_handler(p, regs))
/* handler has already set things up, so skip ss setup */
return 1;
ss_probe:
prepare_singlestep(p, regs);
kcb->kprobe_status = KPROBE_HIT_SS;
return 1;
no_kprobe:
preempt_enable_no_resched();
return ret;
}
/*
* For function-return probes, init_kprobes() establishes a probepoint
* here. When a retprobed function returns, this probe is hit and
* trampoline_probe_handler() runs, calling the kretprobe's handler.
*/
static void __used kretprobe_trampoline_holder(void)
{
asm volatile (".globl kretprobe_trampoline\n"
"kretprobe_trampoline:\n\t"
"nop\n");
}
/*
* Called when we hit the probe point at kretprobe_trampoline
*/
int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
{
struct kretprobe_instance *ri = NULL;
struct hlist_head *head, empty_rp;
struct hlist_node *node, *tmp;
unsigned long flags, orig_ret_address = 0;
unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
INIT_HLIST_HEAD(&empty_rp);
kretprobe_hash_lock(current, &head, &flags);
/*
* It is possible to have multiple instances associated with a given
* task either because an multiple functions in the call path
* have a return probe installed on them, and/or more then one return
* return probe was registered for a target function.
*
* We can handle this because:
* - instances are always inserted at the head of the list
* - when multiple return probes are registered for the same
* function, the first instance's ret_addr will point to the
* real return address, and all the rest will point to
* kretprobe_trampoline
*/
hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
if (ri->task != current)
/* another task is sharing our hash bucket */
continue;
if (ri->rp && ri->rp->handler) {
__get_cpu_var(current_kprobe) = &ri->rp->kp;
ri->rp->handler(ri, regs);
__get_cpu_var(current_kprobe) = NULL;
}
orig_ret_address = (unsigned long)ri->ret_addr;
recycle_rp_inst(ri, &empty_rp);
if (orig_ret_address != trampoline_address)
/*
* This is the real return address. Any other
* instances associated with this task are for
* other calls deeper on the call stack
*/
break;
}
kretprobe_assert(ri, orig_ret_address, trampoline_address);
regs->pc = orig_ret_address;
kretprobe_hash_unlock(current, &flags);
preempt_enable_no_resched();
hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
hlist_del(&ri->hlist);
kfree(ri);
}
return orig_ret_address;
}
static int __kprobes post_kprobe_handler(struct pt_regs *regs)
{
struct kprobe *cur = kprobe_running();
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
kprobe_opcode_t *addr = NULL;
struct kprobe *p = NULL;
if (!cur)
return 0;
if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
kcb->kprobe_status = KPROBE_HIT_SSDONE;
cur->post_handler(cur, regs, 0);
}
p = &__get_cpu_var(saved_next_opcode);
if (p->addr) {
arch_disarm_kprobe(p);
p->addr = NULL;
p->opcode = 0;
addr = __get_cpu_var(saved_current_opcode).addr;
__get_cpu_var(saved_current_opcode).addr = NULL;
p = get_kprobe(addr);
arch_arm_kprobe(p);
p = &__get_cpu_var(saved_next_opcode2);
if (p->addr) {
arch_disarm_kprobe(p);
p->addr = NULL;
p->opcode = 0;
}
}
/* Restore back the original saved kprobes variables and continue. */
if (kcb->kprobe_status == KPROBE_REENTER) {
restore_previous_kprobe(kcb);
goto out;
}
reset_current_kprobe();
out:
preempt_enable_no_resched();
return 1;
}
int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
{
struct kprobe *cur = kprobe_running();
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
const struct exception_table_entry *entry;
switch (kcb->kprobe_status) {
case KPROBE_HIT_SS:
case KPROBE_REENTER:
/*
* We are here because the instruction being single
* stepped caused a page fault. We reset the current
* kprobe, point the pc back to the probe address
* and allow the page fault handler to continue as a
* normal page fault.
*/
regs->pc = (unsigned long)cur->addr;
if (kcb->kprobe_status == KPROBE_REENTER)
restore_previous_kprobe(kcb);
else
reset_current_kprobe();
preempt_enable_no_resched();
break;
case KPROBE_HIT_ACTIVE:
case KPROBE_HIT_SSDONE:
/*
* We increment the nmissed count for accounting,
* we can also use npre/npostfault count for accounting
* these specific fault cases.
*/
kprobes_inc_nmissed_count(cur);
/*
* We come here because instructions in the pre/post
* handler caused the page_fault, this could happen
* if handler tries to access user space by
* copy_from_user(), get_user() etc. Let the
* user-specified handler try to fix it first.
*/
if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
return 1;
/*
* In case the user-specified fault handler returned
* zero, try to fix up.
*/
if ((entry = search_exception_tables(regs->pc)) != NULL) {
regs->pc = entry->fixup;
return 1;
}
/*
* fixup_exception() could not handle it,
* Let do_page_fault() fix it.
*/
break;
default:
break;
}
return 0;
}
/*
* Wrapper routine to for handling exceptions.
*/
int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
unsigned long val, void *data)
{
struct kprobe *p = NULL;
struct die_args *args = (struct die_args *)data;
int ret = NOTIFY_DONE;
kprobe_opcode_t *addr = NULL;
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
addr = (kprobe_opcode_t *) (args->regs->pc);
if (val == DIE_TRAP) {
if (!kprobe_running()) {
if (kprobe_handler(args->regs)) {
ret = NOTIFY_STOP;
} else {
/* Not a kprobe trap */
ret = NOTIFY_DONE;
}
} else {
p = get_kprobe(addr);
if ((kcb->kprobe_status == KPROBE_HIT_SS) ||
(kcb->kprobe_status == KPROBE_REENTER)) {
if (post_kprobe_handler(args->regs))
ret = NOTIFY_STOP;
} else {
if (kprobe_handler(args->regs)) {
ret = NOTIFY_STOP;
} else {
p = __get_cpu_var(current_kprobe);
if (p->break_handler &&
p->break_handler(p, args->regs))
ret = NOTIFY_STOP;
}
}
}
}
return ret;
}
int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
{
struct jprobe *jp = container_of(p, struct jprobe, kp);
unsigned long addr;
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
kcb->jprobe_saved_regs = *regs;
kcb->jprobe_saved_r15 = regs->regs[15];
addr = kcb->jprobe_saved_r15;
/*
* TBD: As Linus pointed out, gcc assumes that the callee
* owns the argument space and could overwrite it, e.g.
* tailcall optimization. So, to be absolutely safe
* we also save and restore enough stack bytes to cover
* the argument area.
*/
memcpy(kcb->jprobes_stack, (kprobe_opcode_t *) addr,
MIN_STACK_SIZE(addr));
regs->pc = (unsigned long)(jp->entry);
return 1;
}
void __kprobes jprobe_return(void)
{
asm volatile ("trapa #0x3a\n\t" "jprobe_return_end:\n\t" "nop\n\t");
}
int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
{
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
unsigned long stack_addr = kcb->jprobe_saved_r15;
u8 *addr = (u8 *)regs->pc;
if ((addr >= (u8 *)jprobe_return) &&
(addr <= (u8 *)jprobe_return_end)) {
*regs = kcb->jprobe_saved_regs;
memcpy((kprobe_opcode_t *)stack_addr, kcb->jprobes_stack,
MIN_STACK_SIZE(stack_addr));
kcb->kprobe_status = KPROBE_HIT_SS;
preempt_enable_no_resched();
return 1;
}
return 0;
}
static struct kprobe trampoline_p = {
.addr = (kprobe_opcode_t *)&kretprobe_trampoline,
.pre_handler = trampoline_probe_handler
};
int __init arch_init_kprobes(void)
{
return register_kprobe(&trampoline_p);
}
| gpl-2.0 |
bju2000/lge-kernel-gproj | arch/ia64/kvm/kvm_fw.c | 11673 | 16389 | /*
* PAL/SAL call delegation
*
* Copyright (c) 2004 Li Susie <susie.li@intel.com>
* Copyright (c) 2005 Yu Ke <ke.yu@intel.com>
* Copyright (c) 2007 Xiantao Zhang <xiantao.zhang@intel.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 59 Temple
* Place - Suite 330, Boston, MA 02111-1307 USA.
*/
#include <linux/kvm_host.h>
#include <linux/smp.h>
#include <asm/sn/addrs.h>
#include <asm/sn/clksupport.h>
#include <asm/sn/shub_mmr.h>
#include "vti.h"
#include "misc.h"
#include <asm/pal.h>
#include <asm/sal.h>
#include <asm/tlb.h>
/*
* Handy macros to make sure that the PAL return values start out
* as something meaningful.
*/
#define INIT_PAL_STATUS_UNIMPLEMENTED(x) \
{ \
x.status = PAL_STATUS_UNIMPLEMENTED; \
x.v0 = 0; \
x.v1 = 0; \
x.v2 = 0; \
}
#define INIT_PAL_STATUS_SUCCESS(x) \
{ \
x.status = PAL_STATUS_SUCCESS; \
x.v0 = 0; \
x.v1 = 0; \
x.v2 = 0; \
}
static void kvm_get_pal_call_data(struct kvm_vcpu *vcpu,
u64 *gr28, u64 *gr29, u64 *gr30, u64 *gr31) {
struct exit_ctl_data *p;
if (vcpu) {
p = &vcpu->arch.exit_data;
if (p->exit_reason == EXIT_REASON_PAL_CALL) {
*gr28 = p->u.pal_data.gr28;
*gr29 = p->u.pal_data.gr29;
*gr30 = p->u.pal_data.gr30;
*gr31 = p->u.pal_data.gr31;
return ;
}
}
printk(KERN_DEBUG"Failed to get vcpu pal data!!!\n");
}
static void set_pal_result(struct kvm_vcpu *vcpu,
struct ia64_pal_retval result) {
struct exit_ctl_data *p;
p = kvm_get_exit_data(vcpu);
if (p->exit_reason == EXIT_REASON_PAL_CALL) {
p->u.pal_data.ret = result;
return ;
}
INIT_PAL_STATUS_UNIMPLEMENTED(p->u.pal_data.ret);
}
static void set_sal_result(struct kvm_vcpu *vcpu,
struct sal_ret_values result) {
struct exit_ctl_data *p;
p = kvm_get_exit_data(vcpu);
if (p->exit_reason == EXIT_REASON_SAL_CALL) {
p->u.sal_data.ret = result;
return ;
}
printk(KERN_WARNING"Failed to set sal result!!\n");
}
struct cache_flush_args {
u64 cache_type;
u64 operation;
u64 progress;
long status;
};
cpumask_t cpu_cache_coherent_map;
static void remote_pal_cache_flush(void *data)
{
struct cache_flush_args *args = data;
long status;
u64 progress = args->progress;
status = ia64_pal_cache_flush(args->cache_type, args->operation,
&progress, NULL);
if (status != 0)
args->status = status;
}
static struct ia64_pal_retval pal_cache_flush(struct kvm_vcpu *vcpu)
{
u64 gr28, gr29, gr30, gr31;
struct ia64_pal_retval result = {0, 0, 0, 0};
struct cache_flush_args args = {0, 0, 0, 0};
long psr;
gr28 = gr29 = gr30 = gr31 = 0;
kvm_get_pal_call_data(vcpu, &gr28, &gr29, &gr30, &gr31);
if (gr31 != 0)
printk(KERN_ERR"vcpu:%p called cache_flush error!\n", vcpu);
/* Always call Host Pal in int=1 */
gr30 &= ~PAL_CACHE_FLUSH_CHK_INTRS;
args.cache_type = gr29;
args.operation = gr30;
smp_call_function(remote_pal_cache_flush,
(void *)&args, 1);
if (args.status != 0)
printk(KERN_ERR"pal_cache_flush error!,"
"status:0x%lx\n", args.status);
/*
* Call Host PAL cache flush
* Clear psr.ic when call PAL_CACHE_FLUSH
*/
local_irq_save(psr);
result.status = ia64_pal_cache_flush(gr29, gr30, &result.v1,
&result.v0);
local_irq_restore(psr);
if (result.status != 0)
printk(KERN_ERR"vcpu:%p crashed due to cache_flush err:%ld"
"in1:%lx,in2:%lx\n",
vcpu, result.status, gr29, gr30);
#if 0
if (gr29 == PAL_CACHE_TYPE_COHERENT) {
cpus_setall(vcpu->arch.cache_coherent_map);
cpu_clear(vcpu->cpu, vcpu->arch.cache_coherent_map);
cpus_setall(cpu_cache_coherent_map);
cpu_clear(vcpu->cpu, cpu_cache_coherent_map);
}
#endif
return result;
}
struct ia64_pal_retval pal_cache_summary(struct kvm_vcpu *vcpu)
{
struct ia64_pal_retval result;
PAL_CALL(result, PAL_CACHE_SUMMARY, 0, 0, 0);
return result;
}
static struct ia64_pal_retval pal_freq_base(struct kvm_vcpu *vcpu)
{
struct ia64_pal_retval result;
PAL_CALL(result, PAL_FREQ_BASE, 0, 0, 0);
/*
* PAL_FREQ_BASE may not be implemented in some platforms,
* call SAL instead.
*/
if (result.v0 == 0) {
result.status = ia64_sal_freq_base(SAL_FREQ_BASE_PLATFORM,
&result.v0,
&result.v1);
result.v2 = 0;
}
return result;
}
/*
* On the SGI SN2, the ITC isn't stable. Emulation backed by the SN2
* RTC is used instead. This function patches the ratios from SAL
* to match the RTC before providing them to the guest.
*/
static void sn2_patch_itc_freq_ratios(struct ia64_pal_retval *result)
{
struct pal_freq_ratio *ratio;
unsigned long sal_freq, sal_drift, factor;
result->status = ia64_sal_freq_base(SAL_FREQ_BASE_PLATFORM,
&sal_freq, &sal_drift);
ratio = (struct pal_freq_ratio *)&result->v2;
factor = ((sal_freq * 3) + (sn_rtc_cycles_per_second / 2)) /
sn_rtc_cycles_per_second;
ratio->num = 3;
ratio->den = factor;
}
static struct ia64_pal_retval pal_freq_ratios(struct kvm_vcpu *vcpu)
{
struct ia64_pal_retval result;
PAL_CALL(result, PAL_FREQ_RATIOS, 0, 0, 0);
if (vcpu->kvm->arch.is_sn2)
sn2_patch_itc_freq_ratios(&result);
return result;
}
static struct ia64_pal_retval pal_logical_to_physica(struct kvm_vcpu *vcpu)
{
struct ia64_pal_retval result;
INIT_PAL_STATUS_UNIMPLEMENTED(result);
return result;
}
static struct ia64_pal_retval pal_platform_addr(struct kvm_vcpu *vcpu)
{
struct ia64_pal_retval result;
INIT_PAL_STATUS_SUCCESS(result);
return result;
}
static struct ia64_pal_retval pal_proc_get_features(struct kvm_vcpu *vcpu)
{
struct ia64_pal_retval result = {0, 0, 0, 0};
long in0, in1, in2, in3;
kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3);
result.status = ia64_pal_proc_get_features(&result.v0, &result.v1,
&result.v2, in2);
return result;
}
static struct ia64_pal_retval pal_register_info(struct kvm_vcpu *vcpu)
{
struct ia64_pal_retval result = {0, 0, 0, 0};
long in0, in1, in2, in3;
kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3);
result.status = ia64_pal_register_info(in1, &result.v1, &result.v2);
return result;
}
static struct ia64_pal_retval pal_cache_info(struct kvm_vcpu *vcpu)
{
pal_cache_config_info_t ci;
long status;
unsigned long in0, in1, in2, in3, r9, r10;
kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3);
status = ia64_pal_cache_config_info(in1, in2, &ci);
r9 = ci.pcci_info_1.pcci1_data;
r10 = ci.pcci_info_2.pcci2_data;
return ((struct ia64_pal_retval){status, r9, r10, 0});
}
#define GUEST_IMPL_VA_MSB 59
#define GUEST_RID_BITS 18
static struct ia64_pal_retval pal_vm_summary(struct kvm_vcpu *vcpu)
{
pal_vm_info_1_u_t vminfo1;
pal_vm_info_2_u_t vminfo2;
struct ia64_pal_retval result;
PAL_CALL(result, PAL_VM_SUMMARY, 0, 0, 0);
if (!result.status) {
vminfo1.pvi1_val = result.v0;
vminfo1.pal_vm_info_1_s.max_itr_entry = 8;
vminfo1.pal_vm_info_1_s.max_dtr_entry = 8;
result.v0 = vminfo1.pvi1_val;
vminfo2.pal_vm_info_2_s.impl_va_msb = GUEST_IMPL_VA_MSB;
vminfo2.pal_vm_info_2_s.rid_size = GUEST_RID_BITS;
result.v1 = vminfo2.pvi2_val;
}
return result;
}
static struct ia64_pal_retval pal_vm_info(struct kvm_vcpu *vcpu)
{
struct ia64_pal_retval result;
unsigned long in0, in1, in2, in3;
kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3);
result.status = ia64_pal_vm_info(in1, in2,
(pal_tc_info_u_t *)&result.v1, &result.v2);
return result;
}
static u64 kvm_get_pal_call_index(struct kvm_vcpu *vcpu)
{
u64 index = 0;
struct exit_ctl_data *p;
p = kvm_get_exit_data(vcpu);
if (p->exit_reason == EXIT_REASON_PAL_CALL)
index = p->u.pal_data.gr28;
return index;
}
static void prepare_for_halt(struct kvm_vcpu *vcpu)
{
vcpu->arch.timer_pending = 1;
vcpu->arch.timer_fired = 0;
}
static struct ia64_pal_retval pal_perf_mon_info(struct kvm_vcpu *vcpu)
{
long status;
unsigned long in0, in1, in2, in3, r9;
unsigned long pm_buffer[16];
kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3);
status = ia64_pal_perf_mon_info(pm_buffer,
(pal_perf_mon_info_u_t *) &r9);
if (status != 0) {
printk(KERN_DEBUG"PAL_PERF_MON_INFO fails ret=%ld\n", status);
} else {
if (in1)
memcpy((void *)in1, pm_buffer, sizeof(pm_buffer));
else {
status = PAL_STATUS_EINVAL;
printk(KERN_WARNING"Invalid parameters "
"for PAL call:0x%lx!\n", in0);
}
}
return (struct ia64_pal_retval){status, r9, 0, 0};
}
static struct ia64_pal_retval pal_halt_info(struct kvm_vcpu *vcpu)
{
unsigned long in0, in1, in2, in3;
long status;
unsigned long res = 1000UL | (1000UL << 16) | (10UL << 32)
| (1UL << 61) | (1UL << 60);
kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3);
if (in1) {
memcpy((void *)in1, &res, sizeof(res));
status = 0;
} else{
status = PAL_STATUS_EINVAL;
printk(KERN_WARNING"Invalid parameters "
"for PAL call:0x%lx!\n", in0);
}
return (struct ia64_pal_retval){status, 0, 0, 0};
}
static struct ia64_pal_retval pal_mem_attrib(struct kvm_vcpu *vcpu)
{
unsigned long r9;
long status;
status = ia64_pal_mem_attrib(&r9);
return (struct ia64_pal_retval){status, r9, 0, 0};
}
static void remote_pal_prefetch_visibility(void *v)
{
s64 trans_type = (s64)v;
ia64_pal_prefetch_visibility(trans_type);
}
static struct ia64_pal_retval pal_prefetch_visibility(struct kvm_vcpu *vcpu)
{
struct ia64_pal_retval result = {0, 0, 0, 0};
unsigned long in0, in1, in2, in3;
kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3);
result.status = ia64_pal_prefetch_visibility(in1);
if (result.status == 0) {
/* Must be performed on all remote processors
in the coherence domain. */
smp_call_function(remote_pal_prefetch_visibility,
(void *)in1, 1);
/* Unnecessary on remote processor for other vcpus!*/
result.status = 1;
}
return result;
}
static void remote_pal_mc_drain(void *v)
{
ia64_pal_mc_drain();
}
static struct ia64_pal_retval pal_get_brand_info(struct kvm_vcpu *vcpu)
{
struct ia64_pal_retval result = {0, 0, 0, 0};
unsigned long in0, in1, in2, in3;
kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3);
if (in1 == 0 && in2) {
char brand_info[128];
result.status = ia64_pal_get_brand_info(brand_info);
if (result.status == PAL_STATUS_SUCCESS)
memcpy((void *)in2, brand_info, 128);
} else {
result.status = PAL_STATUS_REQUIRES_MEMORY;
printk(KERN_WARNING"Invalid parameters for "
"PAL call:0x%lx!\n", in0);
}
return result;
}
int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
u64 gr28;
struct ia64_pal_retval result;
int ret = 1;
gr28 = kvm_get_pal_call_index(vcpu);
switch (gr28) {
case PAL_CACHE_FLUSH:
result = pal_cache_flush(vcpu);
break;
case PAL_MEM_ATTRIB:
result = pal_mem_attrib(vcpu);
break;
case PAL_CACHE_SUMMARY:
result = pal_cache_summary(vcpu);
break;
case PAL_PERF_MON_INFO:
result = pal_perf_mon_info(vcpu);
break;
case PAL_HALT_INFO:
result = pal_halt_info(vcpu);
break;
case PAL_HALT_LIGHT:
{
INIT_PAL_STATUS_SUCCESS(result);
prepare_for_halt(vcpu);
if (kvm_highest_pending_irq(vcpu) == -1)
ret = kvm_emulate_halt(vcpu);
}
break;
case PAL_PREFETCH_VISIBILITY:
result = pal_prefetch_visibility(vcpu);
break;
case PAL_MC_DRAIN:
result.status = ia64_pal_mc_drain();
/* FIXME: All vcpus likely call PAL_MC_DRAIN.
That causes the congestion. */
smp_call_function(remote_pal_mc_drain, NULL, 1);
break;
case PAL_FREQ_RATIOS:
result = pal_freq_ratios(vcpu);
break;
case PAL_FREQ_BASE:
result = pal_freq_base(vcpu);
break;
case PAL_LOGICAL_TO_PHYSICAL :
result = pal_logical_to_physica(vcpu);
break;
case PAL_VM_SUMMARY :
result = pal_vm_summary(vcpu);
break;
case PAL_VM_INFO :
result = pal_vm_info(vcpu);
break;
case PAL_PLATFORM_ADDR :
result = pal_platform_addr(vcpu);
break;
case PAL_CACHE_INFO:
result = pal_cache_info(vcpu);
break;
case PAL_PTCE_INFO:
INIT_PAL_STATUS_SUCCESS(result);
result.v1 = (1L << 32) | 1L;
break;
case PAL_REGISTER_INFO:
result = pal_register_info(vcpu);
break;
case PAL_VM_PAGE_SIZE:
result.status = ia64_pal_vm_page_size(&result.v0,
&result.v1);
break;
case PAL_RSE_INFO:
result.status = ia64_pal_rse_info(&result.v0,
(pal_hints_u_t *)&result.v1);
break;
case PAL_PROC_GET_FEATURES:
result = pal_proc_get_features(vcpu);
break;
case PAL_DEBUG_INFO:
result.status = ia64_pal_debug_info(&result.v0,
&result.v1);
break;
case PAL_VERSION:
result.status = ia64_pal_version(
(pal_version_u_t *)&result.v0,
(pal_version_u_t *)&result.v1);
break;
case PAL_FIXED_ADDR:
result.status = PAL_STATUS_SUCCESS;
result.v0 = vcpu->vcpu_id;
break;
case PAL_BRAND_INFO:
result = pal_get_brand_info(vcpu);
break;
case PAL_GET_PSTATE:
case PAL_CACHE_SHARED_INFO:
INIT_PAL_STATUS_UNIMPLEMENTED(result);
break;
default:
INIT_PAL_STATUS_UNIMPLEMENTED(result);
printk(KERN_WARNING"kvm: Unsupported pal call,"
" index:0x%lx\n", gr28);
}
set_pal_result(vcpu, result);
return ret;
}
static struct sal_ret_values sal_emulator(struct kvm *kvm,
long index, unsigned long in1,
unsigned long in2, unsigned long in3,
unsigned long in4, unsigned long in5,
unsigned long in6, unsigned long in7)
{
unsigned long r9 = 0;
unsigned long r10 = 0;
long r11 = 0;
long status;
status = 0;
switch (index) {
case SAL_FREQ_BASE:
status = ia64_sal_freq_base(in1, &r9, &r10);
break;
case SAL_PCI_CONFIG_READ:
printk(KERN_WARNING"kvm: Not allowed to call here!"
" SAL_PCI_CONFIG_READ\n");
break;
case SAL_PCI_CONFIG_WRITE:
printk(KERN_WARNING"kvm: Not allowed to call here!"
" SAL_PCI_CONFIG_WRITE\n");
break;
case SAL_SET_VECTORS:
if (in1 == SAL_VECTOR_OS_BOOT_RENDEZ) {
if (in4 != 0 || in5 != 0 || in6 != 0 || in7 != 0) {
status = -2;
} else {
kvm->arch.rdv_sal_data.boot_ip = in2;
kvm->arch.rdv_sal_data.boot_gp = in3;
}
printk("Rendvous called! iip:%lx\n\n", in2);
} else
printk(KERN_WARNING"kvm: CALLED SAL_SET_VECTORS %lu."
"ignored...\n", in1);
break;
case SAL_GET_STATE_INFO:
/* No more info. */
status = -5;
r9 = 0;
break;
case SAL_GET_STATE_INFO_SIZE:
/* Return a dummy size. */
status = 0;
r9 = 128;
break;
case SAL_CLEAR_STATE_INFO:
/* Noop. */
break;
case SAL_MC_RENDEZ:
printk(KERN_WARNING
"kvm: called SAL_MC_RENDEZ. ignored...\n");
break;
case SAL_MC_SET_PARAMS:
printk(KERN_WARNING
"kvm: called SAL_MC_SET_PARAMS.ignored!\n");
break;
case SAL_CACHE_FLUSH:
if (1) {
/*Flush using SAL.
This method is faster but has a side
effect on other vcpu running on
this cpu. */
status = ia64_sal_cache_flush(in1);
} else {
/*Maybe need to implement the method
without side effect!*/
status = 0;
}
break;
case SAL_CACHE_INIT:
printk(KERN_WARNING
"kvm: called SAL_CACHE_INIT. ignored...\n");
break;
case SAL_UPDATE_PAL:
printk(KERN_WARNING
"kvm: CALLED SAL_UPDATE_PAL. ignored...\n");
break;
default:
printk(KERN_WARNING"kvm: called SAL_CALL with unknown index."
" index:%ld\n", index);
status = -1;
break;
}
return ((struct sal_ret_values) {status, r9, r10, r11});
}
static void kvm_get_sal_call_data(struct kvm_vcpu *vcpu, u64 *in0, u64 *in1,
u64 *in2, u64 *in3, u64 *in4, u64 *in5, u64 *in6, u64 *in7){
struct exit_ctl_data *p;
p = kvm_get_exit_data(vcpu);
if (p->exit_reason == EXIT_REASON_SAL_CALL) {
*in0 = p->u.sal_data.in0;
*in1 = p->u.sal_data.in1;
*in2 = p->u.sal_data.in2;
*in3 = p->u.sal_data.in3;
*in4 = p->u.sal_data.in4;
*in5 = p->u.sal_data.in5;
*in6 = p->u.sal_data.in6;
*in7 = p->u.sal_data.in7;
return ;
}
*in0 = 0;
}
void kvm_sal_emul(struct kvm_vcpu *vcpu)
{
struct sal_ret_values result;
u64 index, in1, in2, in3, in4, in5, in6, in7;
kvm_get_sal_call_data(vcpu, &index, &in1, &in2,
&in3, &in4, &in5, &in6, &in7);
result = sal_emulator(vcpu->kvm, index, in1, in2, in3,
in4, in5, in6, in7);
set_sal_result(vcpu, result);
}
| gpl-2.0 |
garwynn/I727_JB_Kernel_v1 | kernel/gcov/base.c | 12185 | 3697 | /*
* This code maintains a list of active profiling data structures.
*
* Copyright IBM Corp. 2009
* Author(s): Peter Oberparleiter <oberpar@linux.vnet.ibm.com>
*
* Uses gcc-internal data definitions.
* Based on the gcov-kernel patch by:
* Hubertus Franke <frankeh@us.ibm.com>
* Nigel Hinds <nhinds@us.ibm.com>
* Rajan Ravindran <rajancr@us.ibm.com>
* Peter Oberparleiter <oberpar@linux.vnet.ibm.com>
* Paul Larson
*/
#define pr_fmt(fmt) "gcov: " fmt
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include "gcov.h"
static struct gcov_info *gcov_info_head;
static int gcov_events_enabled;
static DEFINE_MUTEX(gcov_lock);
/*
* __gcov_init is called by gcc-generated constructor code for each object
* file compiled with -fprofile-arcs.
*/
void __gcov_init(struct gcov_info *info)
{
static unsigned int gcov_version;
mutex_lock(&gcov_lock);
if (gcov_version == 0) {
gcov_version = info->version;
/*
* Printing gcc's version magic may prove useful for debugging
* incompatibility reports.
*/
pr_info("version magic: 0x%x\n", gcov_version);
}
/*
* Add new profiling data structure to list and inform event
* listener.
*/
info->next = gcov_info_head;
gcov_info_head = info;
if (gcov_events_enabled)
gcov_event(GCOV_ADD, info);
mutex_unlock(&gcov_lock);
}
EXPORT_SYMBOL(__gcov_init);
/*
* These functions may be referenced by gcc-generated profiling code but serve
* no function for kernel profiling.
*/
void __gcov_flush(void)
{
/* Unused. */
}
EXPORT_SYMBOL(__gcov_flush);
void __gcov_merge_add(gcov_type *counters, unsigned int n_counters)
{
/* Unused. */
}
EXPORT_SYMBOL(__gcov_merge_add);
void __gcov_merge_single(gcov_type *counters, unsigned int n_counters)
{
/* Unused. */
}
EXPORT_SYMBOL(__gcov_merge_single);
void __gcov_merge_delta(gcov_type *counters, unsigned int n_counters)
{
/* Unused. */
}
EXPORT_SYMBOL(__gcov_merge_delta);
/**
* gcov_enable_events - enable event reporting through gcov_event()
*
* Turn on reporting of profiling data load/unload-events through the
* gcov_event() callback. Also replay all previous events once. This function
* is needed because some events are potentially generated too early for the
* callback implementation to handle them initially.
*/
void gcov_enable_events(void)
{
struct gcov_info *info;
mutex_lock(&gcov_lock);
gcov_events_enabled = 1;
/* Perform event callback for previously registered entries. */
for (info = gcov_info_head; info; info = info->next)
gcov_event(GCOV_ADD, info);
mutex_unlock(&gcov_lock);
}
#ifdef CONFIG_MODULES
static inline int within(void *addr, void *start, unsigned long size)
{
return ((addr >= start) && (addr < start + size));
}
/* Update list and generate events when modules are unloaded. */
static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
void *data)
{
struct module *mod = data;
struct gcov_info *info;
struct gcov_info *prev;
if (event != MODULE_STATE_GOING)
return NOTIFY_OK;
mutex_lock(&gcov_lock);
prev = NULL;
/* Remove entries located in module from linked list. */
for (info = gcov_info_head; info; info = info->next) {
if (within(info, mod->module_core, mod->core_size)) {
if (prev)
prev->next = info->next;
else
gcov_info_head = info->next;
if (gcov_events_enabled)
gcov_event(GCOV_REMOVE, info);
} else
prev = info;
}
mutex_unlock(&gcov_lock);
return NOTIFY_OK;
}
static struct notifier_block gcov_nb = {
.notifier_call = gcov_module_notifier,
};
static int __init gcov_init(void)
{
return register_module_notifier(&gcov_nb);
}
device_initcall(gcov_init);
#endif /* CONFIG_MODULES */
| gpl-2.0 |
jiangchao87/m8uhl | arch/m32r/kernel/setup.c | 12953 | 10349 | /*
* linux/arch/m32r/kernel/setup.c
*
* Setup routines for Renesas M32R
*
* Copyright (c) 2001, 2002 Hiroyuki Kondo, Hirokazu Takata,
* Hitoshi Yamamoto
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/stddef.h>
#include <linux/fs.h>
#include <linux/sched.h>
#include <linux/ioport.h>
#include <linux/mm.h>
#include <linux/bootmem.h>
#include <linux/console.h>
#include <linux/initrd.h>
#include <linux/major.h>
#include <linux/root_dev.h>
#include <linux/seq_file.h>
#include <linux/timex.h>
#include <linux/screen_info.h>
#include <linux/cpu.h>
#include <linux/nodemask.h>
#include <linux/pfn.h>
#include <asm/processor.h>
#include <asm/pgtable.h>
#include <asm/io.h>
#include <asm/mmu_context.h>
#include <asm/m32r.h>
#include <asm/setup.h>
#include <asm/sections.h>
#ifdef CONFIG_MMU
extern void init_mmu(void);
#endif
extern char _end[];
/*
* Machine setup..
*/
struct cpuinfo_m32r boot_cpu_data;
#ifdef CONFIG_BLK_DEV_RAM
extern int rd_doload; /* 1 = load ramdisk, 0 = don't load */
extern int rd_prompt; /* 1 = prompt for ramdisk, 0 = don't prompt */
extern int rd_image_start; /* starting block # of image */
#endif
#if defined(CONFIG_VGA_CONSOLE)
struct screen_info screen_info = {
.orig_video_lines = 25,
.orig_video_cols = 80,
.orig_video_mode = 0,
.orig_video_ega_bx = 0,
.orig_video_isVGA = 1,
.orig_video_points = 8
};
#endif
extern int root_mountflags;
static char __initdata command_line[COMMAND_LINE_SIZE];
static struct resource data_resource = {
.name = "Kernel data",
.start = 0,
.end = 0,
.flags = IORESOURCE_BUSY | IORESOURCE_MEM
};
static struct resource code_resource = {
.name = "Kernel code",
.start = 0,
.end = 0,
.flags = IORESOURCE_BUSY | IORESOURCE_MEM
};
unsigned long memory_start;
unsigned long memory_end;
void __init setup_arch(char **);
int get_cpuinfo(char *);
static __inline__ void parse_mem_cmdline(char ** cmdline_p)
{
char c = ' ';
char *to = command_line;
char *from = COMMAND_LINE;
int len = 0;
int usermem = 0;
/* Save unparsed command line copy for /proc/cmdline */
memcpy(boot_command_line, COMMAND_LINE, COMMAND_LINE_SIZE);
boot_command_line[COMMAND_LINE_SIZE-1] = '\0';
memory_start = (unsigned long)CONFIG_MEMORY_START+PAGE_OFFSET;
memory_end = memory_start+(unsigned long)CONFIG_MEMORY_SIZE;
for ( ; ; ) {
if (c == ' ' && !memcmp(from, "mem=", 4)) {
if (to != command_line)
to--;
{
unsigned long mem_size;
usermem = 1;
mem_size = memparse(from+4, &from);
memory_end = memory_start + mem_size;
}
}
c = *(from++);
if (!c)
break;
if (COMMAND_LINE_SIZE <= ++len)
break;
*(to++) = c;
}
*to = '\0';
*cmdline_p = command_line;
if (usermem)
printk(KERN_INFO "user-defined physical RAM map:\n");
}
#ifndef CONFIG_DISCONTIGMEM
static unsigned long __init setup_memory(void)
{
unsigned long start_pfn, max_low_pfn, bootmap_size;
start_pfn = PFN_UP( __pa(_end) );
max_low_pfn = PFN_DOWN( __pa(memory_end) );
/*
* Initialize the boot-time allocator (with low memory only):
*/
bootmap_size = init_bootmem_node(NODE_DATA(0), start_pfn,
CONFIG_MEMORY_START>>PAGE_SHIFT, max_low_pfn);
/*
* Register fully available low RAM pages with the bootmem allocator.
*/
{
unsigned long curr_pfn;
unsigned long last_pfn;
unsigned long pages;
/*
* We are rounding up the start address of usable memory:
*/
curr_pfn = PFN_UP(__pa(memory_start));
/*
* ... and at the end of the usable range downwards:
*/
last_pfn = PFN_DOWN(__pa(memory_end));
if (last_pfn > max_low_pfn)
last_pfn = max_low_pfn;
pages = last_pfn - curr_pfn;
free_bootmem(PFN_PHYS(curr_pfn), PFN_PHYS(pages));
}
/*
* Reserve the kernel text and
* Reserve the bootmem bitmap. We do this in two steps (first step
* was init_bootmem()), because this catches the (definitely buggy)
* case of us accidentally initializing the bootmem allocator with
* an invalid RAM area.
*/
reserve_bootmem(CONFIG_MEMORY_START + PAGE_SIZE,
(PFN_PHYS(start_pfn) + bootmap_size + PAGE_SIZE - 1)
- CONFIG_MEMORY_START,
BOOTMEM_DEFAULT);
/*
* reserve physical page 0 - it's a special BIOS page on many boxes,
* enabling clean reboots, SMP operation, laptop functions.
*/
reserve_bootmem(CONFIG_MEMORY_START, PAGE_SIZE, BOOTMEM_DEFAULT);
/*
* reserve memory hole
*/
#ifdef CONFIG_MEMHOLE
reserve_bootmem(CONFIG_MEMHOLE_START, CONFIG_MEMHOLE_SIZE,
BOOTMEM_DEFAULT);
#endif
#ifdef CONFIG_BLK_DEV_INITRD
if (LOADER_TYPE && INITRD_START) {
if (INITRD_START + INITRD_SIZE <= (max_low_pfn << PAGE_SHIFT)) {
reserve_bootmem(INITRD_START, INITRD_SIZE,
BOOTMEM_DEFAULT);
initrd_start = INITRD_START + PAGE_OFFSET;
initrd_end = initrd_start + INITRD_SIZE;
printk("initrd:start[%08lx],size[%08lx]\n",
initrd_start, INITRD_SIZE);
} else {
printk("initrd extends beyond end of memory "
"(0x%08lx > 0x%08lx)\ndisabling initrd\n",
INITRD_START + INITRD_SIZE,
max_low_pfn << PAGE_SHIFT);
initrd_start = 0;
}
}
#endif
return max_low_pfn;
}
#else /* CONFIG_DISCONTIGMEM */
extern unsigned long setup_memory(void);
#endif /* CONFIG_DISCONTIGMEM */
void __init setup_arch(char **cmdline_p)
{
ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV);
boot_cpu_data.cpu_clock = M32R_CPUCLK;
boot_cpu_data.bus_clock = M32R_BUSCLK;
boot_cpu_data.timer_divide = M32R_TIMER_DIVIDE;
#ifdef CONFIG_BLK_DEV_RAM
rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
#endif
if (!MOUNT_ROOT_RDONLY)
root_mountflags &= ~MS_RDONLY;
#ifdef CONFIG_VT
#if defined(CONFIG_VGA_CONSOLE)
conswitchp = &vga_con;
#elif defined(CONFIG_DUMMY_CONSOLE)
conswitchp = &dummy_con;
#endif
#endif
#ifdef CONFIG_DISCONTIGMEM
nodes_clear(node_online_map);
node_set_online(0);
node_set_online(1);
#endif /* CONFIG_DISCONTIGMEM */
init_mm.start_code = (unsigned long) _text;
init_mm.end_code = (unsigned long) _etext;
init_mm.end_data = (unsigned long) _edata;
init_mm.brk = (unsigned long) _end;
code_resource.start = virt_to_phys(_text);
code_resource.end = virt_to_phys(_etext)-1;
data_resource.start = virt_to_phys(_etext);
data_resource.end = virt_to_phys(_edata)-1;
parse_mem_cmdline(cmdline_p);
setup_memory();
paging_init();
}
static struct cpu cpu_devices[NR_CPUS];
static int __init topology_init(void)
{
int i;
for_each_present_cpu(i)
register_cpu(&cpu_devices[i], i);
return 0;
}
subsys_initcall(topology_init);
#ifdef CONFIG_PROC_FS
/*
* Get CPU information for use by the procfs.
*/
static int show_cpuinfo(struct seq_file *m, void *v)
{
struct cpuinfo_m32r *c = v;
unsigned long cpu = c - cpu_data;
#ifdef CONFIG_SMP
if (!cpu_online(cpu))
return 0;
#endif /* CONFIG_SMP */
seq_printf(m, "processor\t: %ld\n", cpu);
#if defined(CONFIG_CHIP_VDEC2)
seq_printf(m, "cpu family\t: VDEC2\n"
"cache size\t: Unknown\n");
#elif defined(CONFIG_CHIP_M32700)
seq_printf(m,"cpu family\t: M32700\n"
"cache size\t: I-8KB/D-8KB\n");
#elif defined(CONFIG_CHIP_M32102)
seq_printf(m,"cpu family\t: M32102\n"
"cache size\t: I-8KB\n");
#elif defined(CONFIG_CHIP_OPSP)
seq_printf(m,"cpu family\t: OPSP\n"
"cache size\t: I-8KB/D-8KB\n");
#elif defined(CONFIG_CHIP_MP)
seq_printf(m, "cpu family\t: M32R-MP\n"
"cache size\t: I-xxKB/D-xxKB\n");
#elif defined(CONFIG_CHIP_M32104)
seq_printf(m,"cpu family\t: M32104\n"
"cache size\t: I-8KB/D-8KB\n");
#else
seq_printf(m, "cpu family\t: Unknown\n");
#endif
seq_printf(m, "bogomips\t: %lu.%02lu\n",
c->loops_per_jiffy/(500000/HZ),
(c->loops_per_jiffy/(5000/HZ)) % 100);
#if defined(CONFIG_PLAT_MAPPI)
seq_printf(m, "Machine\t\t: Mappi Evaluation board\n");
#elif defined(CONFIG_PLAT_MAPPI2)
seq_printf(m, "Machine\t\t: Mappi-II Evaluation board\n");
#elif defined(CONFIG_PLAT_MAPPI3)
seq_printf(m, "Machine\t\t: Mappi-III Evaluation board\n");
#elif defined(CONFIG_PLAT_M32700UT)
seq_printf(m, "Machine\t\t: M32700UT Evaluation board\n");
#elif defined(CONFIG_PLAT_OPSPUT)
seq_printf(m, "Machine\t\t: OPSPUT Evaluation board\n");
#elif defined(CONFIG_PLAT_USRV)
seq_printf(m, "Machine\t\t: uServer\n");
#elif defined(CONFIG_PLAT_OAKS32R)
seq_printf(m, "Machine\t\t: OAKS32R\n");
#elif defined(CONFIG_PLAT_M32104UT)
seq_printf(m, "Machine\t\t: M3T-M32104UT uT Engine board\n");
#else
seq_printf(m, "Machine\t\t: Unknown\n");
#endif
#define PRINT_CLOCK(name, value) \
seq_printf(m, name " clock\t: %d.%02dMHz\n", \
((value) / 1000000), ((value) % 1000000)/10000)
PRINT_CLOCK("CPU", (int)c->cpu_clock);
PRINT_CLOCK("Bus", (int)c->bus_clock);
seq_printf(m, "\n");
return 0;
}
static void *c_start(struct seq_file *m, loff_t *pos)
{
return *pos < NR_CPUS ? cpu_data + *pos : NULL;
}
static void *c_next(struct seq_file *m, void *v, loff_t *pos)
{
++*pos;
return c_start(m, pos);
}
static void c_stop(struct seq_file *m, void *v)
{
}
const struct seq_operations cpuinfo_op = {
.start = c_start,
.next = c_next,
.stop = c_stop,
.show = show_cpuinfo,
};
#endif /* CONFIG_PROC_FS */
unsigned long cpu_initialized __initdata = 0;
/*
* cpu_init() initializes state that is per-CPU. Some data is already
* initialized (naturally) in the bootstrap process.
* We reload them nevertheless, this function acts as a
* 'CPU state barrier', nothing should get across.
*/
#if defined(CONFIG_CHIP_VDEC2) || defined(CONFIG_CHIP_XNUX2) \
|| defined(CONFIG_CHIP_M32700) || defined(CONFIG_CHIP_M32102) \
|| defined(CONFIG_CHIP_OPSP) || defined(CONFIG_CHIP_M32104)
void __init cpu_init (void)
{
int cpu_id = smp_processor_id();
if (test_and_set_bit(cpu_id, &cpu_initialized)) {
printk(KERN_WARNING "CPU#%d already initialized!\n", cpu_id);
for ( ; ; )
local_irq_enable();
}
printk(KERN_INFO "Initializing CPU#%d\n", cpu_id);
/* Set up and load the per-CPU TSS and LDT */
atomic_inc(&init_mm.mm_count);
current->active_mm = &init_mm;
if (current->mm)
BUG();
/* Force FPU initialization */
current_thread_info()->status = 0;
clear_used_math();
#ifdef CONFIG_MMU
/* Set up MMU */
init_mmu();
#endif
/* Set up ICUIMASK */
outl(0x00070000, M32R_ICU_IMASK_PORTL); /* imask=111 */
}
#endif /* defined(CONFIG_CHIP_VDEC2) ... */
| gpl-2.0 |
archfan/xu4-linux | fs/ocfs2/suballoc.c | 154 | 78736 | /* -*- mode: c; c-basic-offset: 8; -*-
* vim: noexpandtab sw=8 ts=8 sts=0:
*
* suballoc.c
*
* metadata alloc and free
* Inspired by ext3 block groups.
*
* Copyright (C) 2002, 2004 Oracle. All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this program; if not, write to the
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 021110-1307, USA.
*/
#include <linux/fs.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/highmem.h>
#include <cluster/masklog.h>
#include "ocfs2.h"
#include "alloc.h"
#include "blockcheck.h"
#include "dlmglue.h"
#include "inode.h"
#include "journal.h"
#include "localalloc.h"
#include "suballoc.h"
#include "super.h"
#include "sysfile.h"
#include "uptodate.h"
#include "ocfs2_trace.h"
#include "buffer_head_io.h"
#define NOT_ALLOC_NEW_GROUP 0
#define ALLOC_NEW_GROUP 0x1
#define ALLOC_GROUPS_FROM_GLOBAL 0x2
#define OCFS2_MAX_TO_STEAL 1024
struct ocfs2_suballoc_result {
u64 sr_bg_blkno; /* The bg we allocated from. Set
to 0 when a block group is
contiguous. */
u64 sr_bg_stable_blkno; /*
* Doesn't change, always
* set to target block
* group descriptor
* block.
*/
u64 sr_blkno; /* The first allocated block */
unsigned int sr_bit_offset; /* The bit in the bg */
unsigned int sr_bits; /* How many bits we claimed */
};
static u64 ocfs2_group_from_res(struct ocfs2_suballoc_result *res)
{
if (res->sr_blkno == 0)
return 0;
if (res->sr_bg_blkno)
return res->sr_bg_blkno;
return ocfs2_which_suballoc_group(res->sr_blkno, res->sr_bit_offset);
}
static inline void ocfs2_debug_bg(struct ocfs2_group_desc *bg);
static inline void ocfs2_debug_suballoc_inode(struct ocfs2_dinode *fe);
static inline u16 ocfs2_find_victim_chain(struct ocfs2_chain_list *cl);
static int ocfs2_block_group_fill(handle_t *handle,
struct inode *alloc_inode,
struct buffer_head *bg_bh,
u64 group_blkno,
unsigned int group_clusters,
u16 my_chain,
struct ocfs2_chain_list *cl);
static int ocfs2_block_group_alloc(struct ocfs2_super *osb,
struct inode *alloc_inode,
struct buffer_head *bh,
u64 max_block,
u64 *last_alloc_group,
int flags);
static int ocfs2_cluster_group_search(struct inode *inode,
struct buffer_head *group_bh,
u32 bits_wanted, u32 min_bits,
u64 max_block,
struct ocfs2_suballoc_result *res);
static int ocfs2_block_group_search(struct inode *inode,
struct buffer_head *group_bh,
u32 bits_wanted, u32 min_bits,
u64 max_block,
struct ocfs2_suballoc_result *res);
static int ocfs2_claim_suballoc_bits(struct ocfs2_alloc_context *ac,
handle_t *handle,
u32 bits_wanted,
u32 min_bits,
struct ocfs2_suballoc_result *res);
static int ocfs2_test_bg_bit_allocatable(struct buffer_head *bg_bh,
int nr);
static int ocfs2_relink_block_group(handle_t *handle,
struct inode *alloc_inode,
struct buffer_head *fe_bh,
struct buffer_head *bg_bh,
struct buffer_head *prev_bg_bh,
u16 chain);
static inline int ocfs2_block_group_reasonably_empty(struct ocfs2_group_desc *bg,
u32 wanted);
static inline u32 ocfs2_desc_bitmap_to_cluster_off(struct inode *inode,
u64 bg_blkno,
u16 bg_bit_off);
static inline void ocfs2_block_to_cluster_group(struct inode *inode,
u64 data_blkno,
u64 *bg_blkno,
u16 *bg_bit_off);
static int ocfs2_reserve_clusters_with_limit(struct ocfs2_super *osb,
u32 bits_wanted, u64 max_block,
int flags,
struct ocfs2_alloc_context **ac);
void ocfs2_free_ac_resource(struct ocfs2_alloc_context *ac)
{
struct inode *inode = ac->ac_inode;
if (inode) {
if (ac->ac_which != OCFS2_AC_USE_LOCAL)
ocfs2_inode_unlock(inode, 1);
mutex_unlock(&inode->i_mutex);
iput(inode);
ac->ac_inode = NULL;
}
brelse(ac->ac_bh);
ac->ac_bh = NULL;
ac->ac_resv = NULL;
kfree(ac->ac_find_loc_priv);
ac->ac_find_loc_priv = NULL;
}
void ocfs2_free_alloc_context(struct ocfs2_alloc_context *ac)
{
ocfs2_free_ac_resource(ac);
kfree(ac);
}
static u32 ocfs2_bits_per_group(struct ocfs2_chain_list *cl)
{
return (u32)le16_to_cpu(cl->cl_cpg) * (u32)le16_to_cpu(cl->cl_bpc);
}
#define do_error(fmt, ...) \
do { \
if (resize) \
mlog(ML_ERROR, fmt, ##__VA_ARGS__); \
else \
return ocfs2_error(sb, fmt, ##__VA_ARGS__); \
} while (0)
static int ocfs2_validate_gd_self(struct super_block *sb,
struct buffer_head *bh,
int resize)
{
struct ocfs2_group_desc *gd = (struct ocfs2_group_desc *)bh->b_data;
if (!OCFS2_IS_VALID_GROUP_DESC(gd)) {
do_error("Group descriptor #%llu has bad signature %.*s\n",
(unsigned long long)bh->b_blocknr, 7,
gd->bg_signature);
}
if (le64_to_cpu(gd->bg_blkno) != bh->b_blocknr) {
do_error("Group descriptor #%llu has an invalid bg_blkno of %llu\n",
(unsigned long long)bh->b_blocknr,
(unsigned long long)le64_to_cpu(gd->bg_blkno));
}
if (le32_to_cpu(gd->bg_generation) != OCFS2_SB(sb)->fs_generation) {
do_error("Group descriptor #%llu has an invalid fs_generation of #%u\n",
(unsigned long long)bh->b_blocknr,
le32_to_cpu(gd->bg_generation));
}
if (le16_to_cpu(gd->bg_free_bits_count) > le16_to_cpu(gd->bg_bits)) {
do_error("Group descriptor #%llu has bit count %u but claims that %u are free\n",
(unsigned long long)bh->b_blocknr,
le16_to_cpu(gd->bg_bits),
le16_to_cpu(gd->bg_free_bits_count));
}
if (le16_to_cpu(gd->bg_bits) > (8 * le16_to_cpu(gd->bg_size))) {
do_error("Group descriptor #%llu has bit count %u but max bitmap bits of %u\n",
(unsigned long long)bh->b_blocknr,
le16_to_cpu(gd->bg_bits),
8 * le16_to_cpu(gd->bg_size));
}
return 0;
}
static int ocfs2_validate_gd_parent(struct super_block *sb,
struct ocfs2_dinode *di,
struct buffer_head *bh,
int resize)
{
unsigned int max_bits;
struct ocfs2_group_desc *gd = (struct ocfs2_group_desc *)bh->b_data;
if (di->i_blkno != gd->bg_parent_dinode) {
do_error("Group descriptor #%llu has bad parent pointer (%llu, expected %llu)\n",
(unsigned long long)bh->b_blocknr,
(unsigned long long)le64_to_cpu(gd->bg_parent_dinode),
(unsigned long long)le64_to_cpu(di->i_blkno));
}
max_bits = le16_to_cpu(di->id2.i_chain.cl_cpg) * le16_to_cpu(di->id2.i_chain.cl_bpc);
if (le16_to_cpu(gd->bg_bits) > max_bits) {
do_error("Group descriptor #%llu has bit count of %u\n",
(unsigned long long)bh->b_blocknr,
le16_to_cpu(gd->bg_bits));
}
/* In resize, we may meet the case bg_chain == cl_next_free_rec. */
if ((le16_to_cpu(gd->bg_chain) >
le16_to_cpu(di->id2.i_chain.cl_next_free_rec)) ||
((le16_to_cpu(gd->bg_chain) ==
le16_to_cpu(di->id2.i_chain.cl_next_free_rec)) && !resize)) {
do_error("Group descriptor #%llu has bad chain %u\n",
(unsigned long long)bh->b_blocknr,
le16_to_cpu(gd->bg_chain));
}
return 0;
}
#undef do_error
/*
* This version only prints errors. It does not fail the filesystem, and
* exists only for resize.
*/
int ocfs2_check_group_descriptor(struct super_block *sb,
struct ocfs2_dinode *di,
struct buffer_head *bh)
{
int rc;
struct ocfs2_group_desc *gd = (struct ocfs2_group_desc *)bh->b_data;
BUG_ON(!buffer_uptodate(bh));
/*
* If the ecc fails, we return the error but otherwise
* leave the filesystem running. We know any error is
* local to this block.
*/
rc = ocfs2_validate_meta_ecc(sb, bh->b_data, &gd->bg_check);
if (rc) {
mlog(ML_ERROR,
"Checksum failed for group descriptor %llu\n",
(unsigned long long)bh->b_blocknr);
} else
rc = ocfs2_validate_gd_self(sb, bh, 1);
if (!rc)
rc = ocfs2_validate_gd_parent(sb, di, bh, 1);
return rc;
}
static int ocfs2_validate_group_descriptor(struct super_block *sb,
struct buffer_head *bh)
{
int rc;
struct ocfs2_group_desc *gd = (struct ocfs2_group_desc *)bh->b_data;
trace_ocfs2_validate_group_descriptor(
(unsigned long long)bh->b_blocknr);
BUG_ON(!buffer_uptodate(bh));
/*
* If the ecc fails, we return the error but otherwise
* leave the filesystem running. We know any error is
* local to this block.
*/
rc = ocfs2_validate_meta_ecc(sb, bh->b_data, &gd->bg_check);
if (rc)
return rc;
/*
* Errors after here are fatal.
*/
return ocfs2_validate_gd_self(sb, bh, 0);
}
int ocfs2_read_group_descriptor(struct inode *inode, struct ocfs2_dinode *di,
u64 gd_blkno, struct buffer_head **bh)
{
int rc;
struct buffer_head *tmp = *bh;
rc = ocfs2_read_block(INODE_CACHE(inode), gd_blkno, &tmp,
ocfs2_validate_group_descriptor);
if (rc)
goto out;
rc = ocfs2_validate_gd_parent(inode->i_sb, di, tmp, 0);
if (rc) {
brelse(tmp);
goto out;
}
/* If ocfs2_read_block() got us a new bh, pass it up. */
if (!*bh)
*bh = tmp;
out:
return rc;
}
static void ocfs2_bg_discontig_add_extent(struct ocfs2_super *osb,
struct ocfs2_group_desc *bg,
struct ocfs2_chain_list *cl,
u64 p_blkno, unsigned int clusters)
{
struct ocfs2_extent_list *el = &bg->bg_list;
struct ocfs2_extent_rec *rec;
BUG_ON(!ocfs2_supports_discontig_bg(osb));
if (!el->l_next_free_rec)
el->l_count = cpu_to_le16(ocfs2_extent_recs_per_gd(osb->sb));
rec = &el->l_recs[le16_to_cpu(el->l_next_free_rec)];
rec->e_blkno = cpu_to_le64(p_blkno);
rec->e_cpos = cpu_to_le32(le16_to_cpu(bg->bg_bits) /
le16_to_cpu(cl->cl_bpc));
rec->e_leaf_clusters = cpu_to_le16(clusters);
le16_add_cpu(&bg->bg_bits, clusters * le16_to_cpu(cl->cl_bpc));
le16_add_cpu(&bg->bg_free_bits_count,
clusters * le16_to_cpu(cl->cl_bpc));
le16_add_cpu(&el->l_next_free_rec, 1);
}
static int ocfs2_block_group_fill(handle_t *handle,
struct inode *alloc_inode,
struct buffer_head *bg_bh,
u64 group_blkno,
unsigned int group_clusters,
u16 my_chain,
struct ocfs2_chain_list *cl)
{
int status = 0;
struct ocfs2_super *osb = OCFS2_SB(alloc_inode->i_sb);
struct ocfs2_group_desc *bg = (struct ocfs2_group_desc *) bg_bh->b_data;
struct super_block * sb = alloc_inode->i_sb;
if (((unsigned long long) bg_bh->b_blocknr) != group_blkno) {
status = ocfs2_error(alloc_inode->i_sb,
"group block (%llu) != b_blocknr (%llu)\n",
(unsigned long long)group_blkno,
(unsigned long long) bg_bh->b_blocknr);
goto bail;
}
status = ocfs2_journal_access_gd(handle,
INODE_CACHE(alloc_inode),
bg_bh,
OCFS2_JOURNAL_ACCESS_CREATE);
if (status < 0) {
mlog_errno(status);
goto bail;
}
memset(bg, 0, sb->s_blocksize);
strcpy(bg->bg_signature, OCFS2_GROUP_DESC_SIGNATURE);
bg->bg_generation = cpu_to_le32(OCFS2_SB(sb)->fs_generation);
bg->bg_size = cpu_to_le16(ocfs2_group_bitmap_size(sb, 1,
osb->s_feature_incompat));
bg->bg_chain = cpu_to_le16(my_chain);
bg->bg_next_group = cl->cl_recs[my_chain].c_blkno;
bg->bg_parent_dinode = cpu_to_le64(OCFS2_I(alloc_inode)->ip_blkno);
bg->bg_blkno = cpu_to_le64(group_blkno);
if (group_clusters == le16_to_cpu(cl->cl_cpg))
bg->bg_bits = cpu_to_le16(ocfs2_bits_per_group(cl));
else
ocfs2_bg_discontig_add_extent(osb, bg, cl, group_blkno,
group_clusters);
/* set the 1st bit in the bitmap to account for the descriptor block */
ocfs2_set_bit(0, (unsigned long *)bg->bg_bitmap);
bg->bg_free_bits_count = cpu_to_le16(le16_to_cpu(bg->bg_bits) - 1);
ocfs2_journal_dirty(handle, bg_bh);
/* There is no need to zero out or otherwise initialize the
* other blocks in a group - All valid FS metadata in a block
* group stores the superblock fs_generation value at
* allocation time. */
bail:
if (status)
mlog_errno(status);
return status;
}
static inline u16 ocfs2_find_smallest_chain(struct ocfs2_chain_list *cl)
{
u16 curr, best;
best = curr = 0;
while (curr < le16_to_cpu(cl->cl_count)) {
if (le32_to_cpu(cl->cl_recs[best].c_total) >
le32_to_cpu(cl->cl_recs[curr].c_total))
best = curr;
curr++;
}
return best;
}
static struct buffer_head *
ocfs2_block_group_alloc_contig(struct ocfs2_super *osb, handle_t *handle,
struct inode *alloc_inode,
struct ocfs2_alloc_context *ac,
struct ocfs2_chain_list *cl)
{
int status;
u32 bit_off, num_bits;
u64 bg_blkno;
struct buffer_head *bg_bh;
unsigned int alloc_rec = ocfs2_find_smallest_chain(cl);
status = ocfs2_claim_clusters(handle, ac,
le16_to_cpu(cl->cl_cpg), &bit_off,
&num_bits);
if (status < 0) {
if (status != -ENOSPC)
mlog_errno(status);
goto bail;
}
/* setup the group */
bg_blkno = ocfs2_clusters_to_blocks(osb->sb, bit_off);
trace_ocfs2_block_group_alloc_contig(
(unsigned long long)bg_blkno, alloc_rec);
bg_bh = sb_getblk(osb->sb, bg_blkno);
if (!bg_bh) {
status = -ENOMEM;
mlog_errno(status);
goto bail;
}
ocfs2_set_new_buffer_uptodate(INODE_CACHE(alloc_inode), bg_bh);
status = ocfs2_block_group_fill(handle, alloc_inode, bg_bh,
bg_blkno, num_bits, alloc_rec, cl);
if (status < 0) {
brelse(bg_bh);
mlog_errno(status);
}
bail:
return status ? ERR_PTR(status) : bg_bh;
}
static int ocfs2_block_group_claim_bits(struct ocfs2_super *osb,
handle_t *handle,
struct ocfs2_alloc_context *ac,
unsigned int min_bits,
u32 *bit_off, u32 *num_bits)
{
int status = 0;
while (min_bits) {
status = ocfs2_claim_clusters(handle, ac, min_bits,
bit_off, num_bits);
if (status != -ENOSPC)
break;
min_bits >>= 1;
}
return status;
}
static int ocfs2_block_group_grow_discontig(handle_t *handle,
struct inode *alloc_inode,
struct buffer_head *bg_bh,
struct ocfs2_alloc_context *ac,
struct ocfs2_chain_list *cl,
unsigned int min_bits)
{
int status;
struct ocfs2_super *osb = OCFS2_SB(alloc_inode->i_sb);
struct ocfs2_group_desc *bg =
(struct ocfs2_group_desc *)bg_bh->b_data;
unsigned int needed = le16_to_cpu(cl->cl_cpg) -
le16_to_cpu(bg->bg_bits) / le16_to_cpu(cl->cl_bpc);
u32 p_cpos, clusters;
u64 p_blkno;
struct ocfs2_extent_list *el = &bg->bg_list;
status = ocfs2_journal_access_gd(handle,
INODE_CACHE(alloc_inode),
bg_bh,
OCFS2_JOURNAL_ACCESS_CREATE);
if (status < 0) {
mlog_errno(status);
goto bail;
}
while ((needed > 0) && (le16_to_cpu(el->l_next_free_rec) <
le16_to_cpu(el->l_count))) {
if (min_bits > needed)
min_bits = needed;
status = ocfs2_block_group_claim_bits(osb, handle, ac,
min_bits, &p_cpos,
&clusters);
if (status < 0) {
if (status != -ENOSPC)
mlog_errno(status);
goto bail;
}
p_blkno = ocfs2_clusters_to_blocks(osb->sb, p_cpos);
ocfs2_bg_discontig_add_extent(osb, bg, cl, p_blkno,
clusters);
min_bits = clusters;
needed = le16_to_cpu(cl->cl_cpg) -
le16_to_cpu(bg->bg_bits) / le16_to_cpu(cl->cl_bpc);
}
if (needed > 0) {
/*
* We have used up all the extent rec but can't fill up
* the cpg. So bail out.
*/
status = -ENOSPC;
goto bail;
}
ocfs2_journal_dirty(handle, bg_bh);
bail:
return status;
}
static void ocfs2_bg_alloc_cleanup(handle_t *handle,
struct ocfs2_alloc_context *cluster_ac,
struct inode *alloc_inode,
struct buffer_head *bg_bh)
{
int i, ret;
struct ocfs2_group_desc *bg;
struct ocfs2_extent_list *el;
struct ocfs2_extent_rec *rec;
if (!bg_bh)
return;
bg = (struct ocfs2_group_desc *)bg_bh->b_data;
el = &bg->bg_list;
for (i = 0; i < le16_to_cpu(el->l_next_free_rec); i++) {
rec = &el->l_recs[i];
ret = ocfs2_free_clusters(handle, cluster_ac->ac_inode,
cluster_ac->ac_bh,
le64_to_cpu(rec->e_blkno),
le16_to_cpu(rec->e_leaf_clusters));
if (ret)
mlog_errno(ret);
/* Try all the clusters to free */
}
ocfs2_remove_from_cache(INODE_CACHE(alloc_inode), bg_bh);
brelse(bg_bh);
}
static struct buffer_head *
ocfs2_block_group_alloc_discontig(handle_t *handle,
struct inode *alloc_inode,
struct ocfs2_alloc_context *ac,
struct ocfs2_chain_list *cl)
{
int status;
u32 bit_off, num_bits;
u64 bg_blkno;
unsigned int min_bits = le16_to_cpu(cl->cl_cpg) >> 1;
struct buffer_head *bg_bh = NULL;
unsigned int alloc_rec = ocfs2_find_smallest_chain(cl);
struct ocfs2_super *osb = OCFS2_SB(alloc_inode->i_sb);
if (!ocfs2_supports_discontig_bg(osb)) {
status = -ENOSPC;
goto bail;
}
status = ocfs2_extend_trans(handle,
ocfs2_calc_bg_discontig_credits(osb->sb));
if (status) {
mlog_errno(status);
goto bail;
}
/*
* We're going to be grabbing from multiple cluster groups.
* We don't have enough credits to relink them all, and the
* cluster groups will be staying in cache for the duration of
* this operation.
*/
ac->ac_disable_chain_relink = 1;
/* Claim the first region */
status = ocfs2_block_group_claim_bits(osb, handle, ac, min_bits,
&bit_off, &num_bits);
if (status < 0) {
if (status != -ENOSPC)
mlog_errno(status);
goto bail;
}
min_bits = num_bits;
/* setup the group */
bg_blkno = ocfs2_clusters_to_blocks(osb->sb, bit_off);
trace_ocfs2_block_group_alloc_discontig(
(unsigned long long)bg_blkno, alloc_rec);
bg_bh = sb_getblk(osb->sb, bg_blkno);
if (!bg_bh) {
status = -ENOMEM;
mlog_errno(status);
goto bail;
}
ocfs2_set_new_buffer_uptodate(INODE_CACHE(alloc_inode), bg_bh);
status = ocfs2_block_group_fill(handle, alloc_inode, bg_bh,
bg_blkno, num_bits, alloc_rec, cl);
if (status < 0) {
mlog_errno(status);
goto bail;
}
status = ocfs2_block_group_grow_discontig(handle, alloc_inode,
bg_bh, ac, cl, min_bits);
if (status)
mlog_errno(status);
bail:
if (status)
ocfs2_bg_alloc_cleanup(handle, ac, alloc_inode, bg_bh);
return status ? ERR_PTR(status) : bg_bh;
}
/*
* We expect the block group allocator to already be locked.
*/
static int ocfs2_block_group_alloc(struct ocfs2_super *osb,
struct inode *alloc_inode,
struct buffer_head *bh,
u64 max_block,
u64 *last_alloc_group,
int flags)
{
int status, credits;
struct ocfs2_dinode *fe = (struct ocfs2_dinode *) bh->b_data;
struct ocfs2_chain_list *cl;
struct ocfs2_alloc_context *ac = NULL;
handle_t *handle = NULL;
u16 alloc_rec;
struct buffer_head *bg_bh = NULL;
struct ocfs2_group_desc *bg;
BUG_ON(ocfs2_is_cluster_bitmap(alloc_inode));
cl = &fe->id2.i_chain;
status = ocfs2_reserve_clusters_with_limit(osb,
le16_to_cpu(cl->cl_cpg),
max_block, flags, &ac);
if (status < 0) {
if (status != -ENOSPC)
mlog_errno(status);
goto bail;
}
credits = ocfs2_calc_group_alloc_credits(osb->sb,
le16_to_cpu(cl->cl_cpg));
handle = ocfs2_start_trans(osb, credits);
if (IS_ERR(handle)) {
status = PTR_ERR(handle);
handle = NULL;
mlog_errno(status);
goto bail;
}
if (last_alloc_group && *last_alloc_group != 0) {
trace_ocfs2_block_group_alloc(
(unsigned long long)*last_alloc_group);
ac->ac_last_group = *last_alloc_group;
}
bg_bh = ocfs2_block_group_alloc_contig(osb, handle, alloc_inode,
ac, cl);
if (IS_ERR(bg_bh) && (PTR_ERR(bg_bh) == -ENOSPC))
bg_bh = ocfs2_block_group_alloc_discontig(handle,
alloc_inode,
ac, cl);
if (IS_ERR(bg_bh)) {
status = PTR_ERR(bg_bh);
bg_bh = NULL;
if (status != -ENOSPC)
mlog_errno(status);
goto bail;
}
bg = (struct ocfs2_group_desc *) bg_bh->b_data;
status = ocfs2_journal_access_di(handle, INODE_CACHE(alloc_inode),
bh, OCFS2_JOURNAL_ACCESS_WRITE);
if (status < 0) {
mlog_errno(status);
goto bail;
}
alloc_rec = le16_to_cpu(bg->bg_chain);
le32_add_cpu(&cl->cl_recs[alloc_rec].c_free,
le16_to_cpu(bg->bg_free_bits_count));
le32_add_cpu(&cl->cl_recs[alloc_rec].c_total,
le16_to_cpu(bg->bg_bits));
cl->cl_recs[alloc_rec].c_blkno = bg->bg_blkno;
if (le16_to_cpu(cl->cl_next_free_rec) < le16_to_cpu(cl->cl_count))
le16_add_cpu(&cl->cl_next_free_rec, 1);
le32_add_cpu(&fe->id1.bitmap1.i_used, le16_to_cpu(bg->bg_bits) -
le16_to_cpu(bg->bg_free_bits_count));
le32_add_cpu(&fe->id1.bitmap1.i_total, le16_to_cpu(bg->bg_bits));
le32_add_cpu(&fe->i_clusters, le16_to_cpu(cl->cl_cpg));
ocfs2_journal_dirty(handle, bh);
spin_lock(&OCFS2_I(alloc_inode)->ip_lock);
OCFS2_I(alloc_inode)->ip_clusters = le32_to_cpu(fe->i_clusters);
fe->i_size = cpu_to_le64(ocfs2_clusters_to_bytes(alloc_inode->i_sb,
le32_to_cpu(fe->i_clusters)));
spin_unlock(&OCFS2_I(alloc_inode)->ip_lock);
i_size_write(alloc_inode, le64_to_cpu(fe->i_size));
alloc_inode->i_blocks = ocfs2_inode_sector_count(alloc_inode);
ocfs2_update_inode_fsync_trans(handle, alloc_inode, 0);
status = 0;
/* save the new last alloc group so that the caller can cache it. */
if (last_alloc_group)
*last_alloc_group = ac->ac_last_group;
bail:
if (handle)
ocfs2_commit_trans(osb, handle);
if (ac)
ocfs2_free_alloc_context(ac);
brelse(bg_bh);
if (status)
mlog_errno(status);
return status;
}
static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
struct ocfs2_alloc_context *ac,
int type,
u32 slot,
u64 *last_alloc_group,
int flags)
{
int status;
u32 bits_wanted = ac->ac_bits_wanted;
struct inode *alloc_inode;
struct buffer_head *bh = NULL;
struct ocfs2_dinode *fe;
u32 free_bits;
alloc_inode = ocfs2_get_system_file_inode(osb, type, slot);
if (!alloc_inode) {
mlog_errno(-EINVAL);
return -EINVAL;
}
mutex_lock(&alloc_inode->i_mutex);
status = ocfs2_inode_lock(alloc_inode, &bh, 1);
if (status < 0) {
mutex_unlock(&alloc_inode->i_mutex);
iput(alloc_inode);
mlog_errno(status);
return status;
}
ac->ac_inode = alloc_inode;
ac->ac_alloc_slot = slot;
fe = (struct ocfs2_dinode *) bh->b_data;
/* The bh was validated by the inode read inside
* ocfs2_inode_lock(). Any corruption is a code bug. */
BUG_ON(!OCFS2_IS_VALID_DINODE(fe));
if (!(fe->i_flags & cpu_to_le32(OCFS2_CHAIN_FL))) {
status = ocfs2_error(alloc_inode->i_sb,
"Invalid chain allocator %llu\n",
(unsigned long long)le64_to_cpu(fe->i_blkno));
goto bail;
}
free_bits = le32_to_cpu(fe->id1.bitmap1.i_total) -
le32_to_cpu(fe->id1.bitmap1.i_used);
if (bits_wanted > free_bits) {
/* cluster bitmap never grows */
if (ocfs2_is_cluster_bitmap(alloc_inode)) {
trace_ocfs2_reserve_suballoc_bits_nospc(bits_wanted,
free_bits);
status = -ENOSPC;
goto bail;
}
if (!(flags & ALLOC_NEW_GROUP)) {
trace_ocfs2_reserve_suballoc_bits_no_new_group(
slot, bits_wanted, free_bits);
status = -ENOSPC;
goto bail;
}
status = ocfs2_block_group_alloc(osb, alloc_inode, bh,
ac->ac_max_block,
last_alloc_group, flags);
if (status < 0) {
if (status != -ENOSPC)
mlog_errno(status);
goto bail;
}
atomic_inc(&osb->alloc_stats.bg_extends);
/* You should never ask for this much metadata */
BUG_ON(bits_wanted >
(le32_to_cpu(fe->id1.bitmap1.i_total)
- le32_to_cpu(fe->id1.bitmap1.i_used)));
}
get_bh(bh);
ac->ac_bh = bh;
bail:
brelse(bh);
if (status)
mlog_errno(status);
return status;
}
static void ocfs2_init_inode_steal_slot(struct ocfs2_super *osb)
{
spin_lock(&osb->osb_lock);
osb->s_inode_steal_slot = OCFS2_INVALID_SLOT;
spin_unlock(&osb->osb_lock);
atomic_set(&osb->s_num_inodes_stolen, 0);
}
static void ocfs2_init_meta_steal_slot(struct ocfs2_super *osb)
{
spin_lock(&osb->osb_lock);
osb->s_meta_steal_slot = OCFS2_INVALID_SLOT;
spin_unlock(&osb->osb_lock);
atomic_set(&osb->s_num_meta_stolen, 0);
}
void ocfs2_init_steal_slots(struct ocfs2_super *osb)
{
ocfs2_init_inode_steal_slot(osb);
ocfs2_init_meta_steal_slot(osb);
}
static void __ocfs2_set_steal_slot(struct ocfs2_super *osb, int slot, int type)
{
spin_lock(&osb->osb_lock);
if (type == INODE_ALLOC_SYSTEM_INODE)
osb->s_inode_steal_slot = slot;
else if (type == EXTENT_ALLOC_SYSTEM_INODE)
osb->s_meta_steal_slot = slot;
spin_unlock(&osb->osb_lock);
}
static int __ocfs2_get_steal_slot(struct ocfs2_super *osb, int type)
{
int slot = OCFS2_INVALID_SLOT;
spin_lock(&osb->osb_lock);
if (type == INODE_ALLOC_SYSTEM_INODE)
slot = osb->s_inode_steal_slot;
else if (type == EXTENT_ALLOC_SYSTEM_INODE)
slot = osb->s_meta_steal_slot;
spin_unlock(&osb->osb_lock);
return slot;
}
static int ocfs2_get_inode_steal_slot(struct ocfs2_super *osb)
{
return __ocfs2_get_steal_slot(osb, INODE_ALLOC_SYSTEM_INODE);
}
static int ocfs2_get_meta_steal_slot(struct ocfs2_super *osb)
{
return __ocfs2_get_steal_slot(osb, EXTENT_ALLOC_SYSTEM_INODE);
}
static int ocfs2_steal_resource(struct ocfs2_super *osb,
struct ocfs2_alloc_context *ac,
int type)
{
int i, status = -ENOSPC;
int slot = __ocfs2_get_steal_slot(osb, type);
/* Start to steal resource from the first slot after ours. */
if (slot == OCFS2_INVALID_SLOT)
slot = osb->slot_num + 1;
for (i = 0; i < osb->max_slots; i++, slot++) {
if (slot == osb->max_slots)
slot = 0;
if (slot == osb->slot_num)
continue;
status = ocfs2_reserve_suballoc_bits(osb, ac,
type,
(u32)slot, NULL,
NOT_ALLOC_NEW_GROUP);
if (status >= 0) {
__ocfs2_set_steal_slot(osb, slot, type);
break;
}
ocfs2_free_ac_resource(ac);
}
return status;
}
static int ocfs2_steal_inode(struct ocfs2_super *osb,
struct ocfs2_alloc_context *ac)
{
return ocfs2_steal_resource(osb, ac, INODE_ALLOC_SYSTEM_INODE);
}
static int ocfs2_steal_meta(struct ocfs2_super *osb,
struct ocfs2_alloc_context *ac)
{
return ocfs2_steal_resource(osb, ac, EXTENT_ALLOC_SYSTEM_INODE);
}
int ocfs2_reserve_new_metadata_blocks(struct ocfs2_super *osb,
int blocks,
struct ocfs2_alloc_context **ac)
{
int status;
int slot = ocfs2_get_meta_steal_slot(osb);
*ac = kzalloc(sizeof(struct ocfs2_alloc_context), GFP_KERNEL);
if (!(*ac)) {
status = -ENOMEM;
mlog_errno(status);
goto bail;
}
(*ac)->ac_bits_wanted = blocks;
(*ac)->ac_which = OCFS2_AC_USE_META;
(*ac)->ac_group_search = ocfs2_block_group_search;
if (slot != OCFS2_INVALID_SLOT &&
atomic_read(&osb->s_num_meta_stolen) < OCFS2_MAX_TO_STEAL)
goto extent_steal;
atomic_set(&osb->s_num_meta_stolen, 0);
status = ocfs2_reserve_suballoc_bits(osb, (*ac),
EXTENT_ALLOC_SYSTEM_INODE,
(u32)osb->slot_num, NULL,
ALLOC_GROUPS_FROM_GLOBAL|ALLOC_NEW_GROUP);
if (status >= 0) {
status = 0;
if (slot != OCFS2_INVALID_SLOT)
ocfs2_init_meta_steal_slot(osb);
goto bail;
} else if (status < 0 && status != -ENOSPC) {
mlog_errno(status);
goto bail;
}
ocfs2_free_ac_resource(*ac);
extent_steal:
status = ocfs2_steal_meta(osb, *ac);
atomic_inc(&osb->s_num_meta_stolen);
if (status < 0) {
if (status != -ENOSPC)
mlog_errno(status);
goto bail;
}
status = 0;
bail:
if ((status < 0) && *ac) {
ocfs2_free_alloc_context(*ac);
*ac = NULL;
}
if (status)
mlog_errno(status);
return status;
}
int ocfs2_reserve_new_metadata(struct ocfs2_super *osb,
struct ocfs2_extent_list *root_el,
struct ocfs2_alloc_context **ac)
{
return ocfs2_reserve_new_metadata_blocks(osb,
ocfs2_extend_meta_needed(root_el),
ac);
}
int ocfs2_reserve_new_inode(struct ocfs2_super *osb,
struct ocfs2_alloc_context **ac)
{
int status;
int slot = ocfs2_get_inode_steal_slot(osb);
u64 alloc_group;
*ac = kzalloc(sizeof(struct ocfs2_alloc_context), GFP_KERNEL);
if (!(*ac)) {
status = -ENOMEM;
mlog_errno(status);
goto bail;
}
(*ac)->ac_bits_wanted = 1;
(*ac)->ac_which = OCFS2_AC_USE_INODE;
(*ac)->ac_group_search = ocfs2_block_group_search;
/*
* stat(2) can't handle i_ino > 32bits, so we tell the
* lower levels not to allocate us a block group past that
* limit. The 'inode64' mount option avoids this behavior.
*/
if (!(osb->s_mount_opt & OCFS2_MOUNT_INODE64))
(*ac)->ac_max_block = (u32)~0U;
/*
* slot is set when we successfully steal inode from other nodes.
* It is reset in 3 places:
* 1. when we flush the truncate log
* 2. when we complete local alloc recovery.
* 3. when we successfully allocate from our own slot.
* After it is set, we will go on stealing inodes until we find the
* need to check our slots to see whether there is some space for us.
*/
if (slot != OCFS2_INVALID_SLOT &&
atomic_read(&osb->s_num_inodes_stolen) < OCFS2_MAX_TO_STEAL)
goto inode_steal;
atomic_set(&osb->s_num_inodes_stolen, 0);
alloc_group = osb->osb_inode_alloc_group;
status = ocfs2_reserve_suballoc_bits(osb, *ac,
INODE_ALLOC_SYSTEM_INODE,
(u32)osb->slot_num,
&alloc_group,
ALLOC_NEW_GROUP |
ALLOC_GROUPS_FROM_GLOBAL);
if (status >= 0) {
status = 0;
spin_lock(&osb->osb_lock);
osb->osb_inode_alloc_group = alloc_group;
spin_unlock(&osb->osb_lock);
trace_ocfs2_reserve_new_inode_new_group(
(unsigned long long)alloc_group);
/*
* Some inodes must be freed by us, so try to allocate
* from our own next time.
*/
if (slot != OCFS2_INVALID_SLOT)
ocfs2_init_inode_steal_slot(osb);
goto bail;
} else if (status < 0 && status != -ENOSPC) {
mlog_errno(status);
goto bail;
}
ocfs2_free_ac_resource(*ac);
inode_steal:
status = ocfs2_steal_inode(osb, *ac);
atomic_inc(&osb->s_num_inodes_stolen);
if (status < 0) {
if (status != -ENOSPC)
mlog_errno(status);
goto bail;
}
status = 0;
bail:
if ((status < 0) && *ac) {
ocfs2_free_alloc_context(*ac);
*ac = NULL;
}
if (status)
mlog_errno(status);
return status;
}
/* local alloc code has to do the same thing, so rather than do this
* twice.. */
int ocfs2_reserve_cluster_bitmap_bits(struct ocfs2_super *osb,
struct ocfs2_alloc_context *ac)
{
int status;
ac->ac_which = OCFS2_AC_USE_MAIN;
ac->ac_group_search = ocfs2_cluster_group_search;
status = ocfs2_reserve_suballoc_bits(osb, ac,
GLOBAL_BITMAP_SYSTEM_INODE,
OCFS2_INVALID_SLOT, NULL,
ALLOC_NEW_GROUP);
if (status < 0 && status != -ENOSPC) {
mlog_errno(status);
goto bail;
}
bail:
return status;
}
/* Callers don't need to care which bitmap (local alloc or main) to
* use so we figure it out for them, but unfortunately this clutters
* things a bit. */
static int ocfs2_reserve_clusters_with_limit(struct ocfs2_super *osb,
u32 bits_wanted, u64 max_block,
int flags,
struct ocfs2_alloc_context **ac)
{
int status;
*ac = kzalloc(sizeof(struct ocfs2_alloc_context), GFP_KERNEL);
if (!(*ac)) {
status = -ENOMEM;
mlog_errno(status);
goto bail;
}
(*ac)->ac_bits_wanted = bits_wanted;
(*ac)->ac_max_block = max_block;
status = -ENOSPC;
if (!(flags & ALLOC_GROUPS_FROM_GLOBAL) &&
ocfs2_alloc_should_use_local(osb, bits_wanted)) {
status = ocfs2_reserve_local_alloc_bits(osb,
bits_wanted,
*ac);
if ((status < 0) && (status != -ENOSPC)) {
mlog_errno(status);
goto bail;
}
}
if (status == -ENOSPC) {
status = ocfs2_reserve_cluster_bitmap_bits(osb, *ac);
if (status < 0) {
if (status != -ENOSPC)
mlog_errno(status);
goto bail;
}
}
status = 0;
bail:
if ((status < 0) && *ac) {
ocfs2_free_alloc_context(*ac);
*ac = NULL;
}
if (status)
mlog_errno(status);
return status;
}
int ocfs2_reserve_clusters(struct ocfs2_super *osb,
u32 bits_wanted,
struct ocfs2_alloc_context **ac)
{
return ocfs2_reserve_clusters_with_limit(osb, bits_wanted, 0,
ALLOC_NEW_GROUP, ac);
}
/*
* More or less lifted from ext3. I'll leave their description below:
*
* "For ext3 allocations, we must not reuse any blocks which are
* allocated in the bitmap buffer's "last committed data" copy. This
* prevents deletes from freeing up the page for reuse until we have
* committed the delete transaction.
*
* If we didn't do this, then deleting something and reallocating it as
* data would allow the old block to be overwritten before the
* transaction committed (because we force data to disk before commit).
* This would lead to corruption if we crashed between overwriting the
* data and committing the delete.
*
* @@@ We may want to make this allocation behaviour conditional on
* data-writes at some point, and disable it for metadata allocations or
* sync-data inodes."
*
* Note: OCFS2 already does this differently for metadata vs data
* allocations, as those bitmaps are separate and undo access is never
* called on a metadata group descriptor.
*/
static int ocfs2_test_bg_bit_allocatable(struct buffer_head *bg_bh,
int nr)
{
struct ocfs2_group_desc *bg = (struct ocfs2_group_desc *) bg_bh->b_data;
int ret;
if (ocfs2_test_bit(nr, (unsigned long *)bg->bg_bitmap))
return 0;
if (!buffer_jbd(bg_bh))
return 1;
jbd_lock_bh_state(bg_bh);
bg = (struct ocfs2_group_desc *) bh2jh(bg_bh)->b_committed_data;
if (bg)
ret = !ocfs2_test_bit(nr, (unsigned long *)bg->bg_bitmap);
else
ret = 1;
jbd_unlock_bh_state(bg_bh);
return ret;
}
static int ocfs2_block_group_find_clear_bits(struct ocfs2_super *osb,
struct buffer_head *bg_bh,
unsigned int bits_wanted,
unsigned int total_bits,
struct ocfs2_suballoc_result *res)
{
void *bitmap;
u16 best_offset, best_size;
int offset, start, found, status = 0;
struct ocfs2_group_desc *bg = (struct ocfs2_group_desc *) bg_bh->b_data;
/* Callers got this descriptor from
* ocfs2_read_group_descriptor(). Any corruption is a code bug. */
BUG_ON(!OCFS2_IS_VALID_GROUP_DESC(bg));
found = start = best_offset = best_size = 0;
bitmap = bg->bg_bitmap;
while((offset = ocfs2_find_next_zero_bit(bitmap, total_bits, start)) != -1) {
if (offset == total_bits)
break;
if (!ocfs2_test_bg_bit_allocatable(bg_bh, offset)) {
/* We found a zero, but we can't use it as it
* hasn't been put to disk yet! */
found = 0;
start = offset + 1;
} else if (offset == start) {
/* we found a zero */
found++;
/* move start to the next bit to test */
start++;
} else {
/* got a zero after some ones */
found = 1;
start = offset + 1;
}
if (found > best_size) {
best_size = found;
best_offset = start - found;
}
/* we got everything we needed */
if (found == bits_wanted) {
/* mlog(0, "Found it all!\n"); */
break;
}
}
if (best_size) {
res->sr_bit_offset = best_offset;
res->sr_bits = best_size;
} else {
status = -ENOSPC;
/* No error log here -- see the comment above
* ocfs2_test_bg_bit_allocatable */
}
return status;
}
int ocfs2_block_group_set_bits(handle_t *handle,
struct inode *alloc_inode,
struct ocfs2_group_desc *bg,
struct buffer_head *group_bh,
unsigned int bit_off,
unsigned int num_bits)
{
int status;
void *bitmap = bg->bg_bitmap;
int journal_type = OCFS2_JOURNAL_ACCESS_WRITE;
/* All callers get the descriptor via
* ocfs2_read_group_descriptor(). Any corruption is a code bug. */
BUG_ON(!OCFS2_IS_VALID_GROUP_DESC(bg));
BUG_ON(le16_to_cpu(bg->bg_free_bits_count) < num_bits);
trace_ocfs2_block_group_set_bits(bit_off, num_bits);
if (ocfs2_is_cluster_bitmap(alloc_inode))
journal_type = OCFS2_JOURNAL_ACCESS_UNDO;
status = ocfs2_journal_access_gd(handle,
INODE_CACHE(alloc_inode),
group_bh,
journal_type);
if (status < 0) {
mlog_errno(status);
goto bail;
}
le16_add_cpu(&bg->bg_free_bits_count, -num_bits);
if (le16_to_cpu(bg->bg_free_bits_count) > le16_to_cpu(bg->bg_bits)) {
return ocfs2_error(alloc_inode->i_sb, "Group descriptor # %llu has bit count %u but claims %u are freed. num_bits %d\n",
(unsigned long long)le64_to_cpu(bg->bg_blkno),
le16_to_cpu(bg->bg_bits),
le16_to_cpu(bg->bg_free_bits_count),
num_bits);
}
while(num_bits--)
ocfs2_set_bit(bit_off++, bitmap);
ocfs2_journal_dirty(handle, group_bh);
bail:
return status;
}
/* find the one with the most empty bits */
static inline u16 ocfs2_find_victim_chain(struct ocfs2_chain_list *cl)
{
u16 curr, best;
BUG_ON(!cl->cl_next_free_rec);
best = curr = 0;
while (curr < le16_to_cpu(cl->cl_next_free_rec)) {
if (le32_to_cpu(cl->cl_recs[curr].c_free) >
le32_to_cpu(cl->cl_recs[best].c_free))
best = curr;
curr++;
}
BUG_ON(best >= le16_to_cpu(cl->cl_next_free_rec));
return best;
}
static int ocfs2_relink_block_group(handle_t *handle,
struct inode *alloc_inode,
struct buffer_head *fe_bh,
struct buffer_head *bg_bh,
struct buffer_head *prev_bg_bh,
u16 chain)
{
int status;
/* there is a really tiny chance the journal calls could fail,
* but we wouldn't want inconsistent blocks in *any* case. */
u64 bg_ptr, prev_bg_ptr;
struct ocfs2_dinode *fe = (struct ocfs2_dinode *) fe_bh->b_data;
struct ocfs2_group_desc *bg = (struct ocfs2_group_desc *) bg_bh->b_data;
struct ocfs2_group_desc *prev_bg = (struct ocfs2_group_desc *) prev_bg_bh->b_data;
/* The caller got these descriptors from
* ocfs2_read_group_descriptor(). Any corruption is a code bug. */
BUG_ON(!OCFS2_IS_VALID_GROUP_DESC(bg));
BUG_ON(!OCFS2_IS_VALID_GROUP_DESC(prev_bg));
trace_ocfs2_relink_block_group(
(unsigned long long)le64_to_cpu(fe->i_blkno), chain,
(unsigned long long)le64_to_cpu(bg->bg_blkno),
(unsigned long long)le64_to_cpu(prev_bg->bg_blkno));
bg_ptr = le64_to_cpu(bg->bg_next_group);
prev_bg_ptr = le64_to_cpu(prev_bg->bg_next_group);
status = ocfs2_journal_access_gd(handle, INODE_CACHE(alloc_inode),
prev_bg_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (status < 0)
goto out;
prev_bg->bg_next_group = bg->bg_next_group;
ocfs2_journal_dirty(handle, prev_bg_bh);
status = ocfs2_journal_access_gd(handle, INODE_CACHE(alloc_inode),
bg_bh, OCFS2_JOURNAL_ACCESS_WRITE);
if (status < 0)
goto out_rollback_prev_bg;
bg->bg_next_group = fe->id2.i_chain.cl_recs[chain].c_blkno;
ocfs2_journal_dirty(handle, bg_bh);
status = ocfs2_journal_access_di(handle, INODE_CACHE(alloc_inode),
fe_bh, OCFS2_JOURNAL_ACCESS_WRITE);
if (status < 0)
goto out_rollback_bg;
fe->id2.i_chain.cl_recs[chain].c_blkno = bg->bg_blkno;
ocfs2_journal_dirty(handle, fe_bh);
out:
if (status < 0)
mlog_errno(status);
return status;
out_rollback_bg:
bg->bg_next_group = cpu_to_le64(bg_ptr);
out_rollback_prev_bg:
prev_bg->bg_next_group = cpu_to_le64(prev_bg_ptr);
goto out;
}
static inline int ocfs2_block_group_reasonably_empty(struct ocfs2_group_desc *bg,
u32 wanted)
{
return le16_to_cpu(bg->bg_free_bits_count) > wanted;
}
/* return 0 on success, -ENOSPC to keep searching and any other < 0
* value on error. */
static int ocfs2_cluster_group_search(struct inode *inode,
struct buffer_head *group_bh,
u32 bits_wanted, u32 min_bits,
u64 max_block,
struct ocfs2_suballoc_result *res)
{
int search = -ENOSPC;
int ret;
u64 blkoff;
struct ocfs2_group_desc *gd = (struct ocfs2_group_desc *) group_bh->b_data;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
unsigned int max_bits, gd_cluster_off;
BUG_ON(!ocfs2_is_cluster_bitmap(inode));
if (gd->bg_free_bits_count) {
max_bits = le16_to_cpu(gd->bg_bits);
/* Tail groups in cluster bitmaps which aren't cpg
* aligned are prone to partial extension by a failed
* fs resize. If the file system resize never got to
* update the dinode cluster count, then we don't want
* to trust any clusters past it, regardless of what
* the group descriptor says. */
gd_cluster_off = ocfs2_blocks_to_clusters(inode->i_sb,
le64_to_cpu(gd->bg_blkno));
if ((gd_cluster_off + max_bits) >
OCFS2_I(inode)->ip_clusters) {
max_bits = OCFS2_I(inode)->ip_clusters - gd_cluster_off;
trace_ocfs2_cluster_group_search_wrong_max_bits(
(unsigned long long)le64_to_cpu(gd->bg_blkno),
le16_to_cpu(gd->bg_bits),
OCFS2_I(inode)->ip_clusters, max_bits);
}
ret = ocfs2_block_group_find_clear_bits(OCFS2_SB(inode->i_sb),
group_bh, bits_wanted,
max_bits, res);
if (ret)
return ret;
if (max_block) {
blkoff = ocfs2_clusters_to_blocks(inode->i_sb,
gd_cluster_off +
res->sr_bit_offset +
res->sr_bits);
trace_ocfs2_cluster_group_search_max_block(
(unsigned long long)blkoff,
(unsigned long long)max_block);
if (blkoff > max_block)
return -ENOSPC;
}
/* ocfs2_block_group_find_clear_bits() might
* return success, but we still want to return
* -ENOSPC unless it found the minimum number
* of bits. */
if (min_bits <= res->sr_bits)
search = 0; /* success */
else if (res->sr_bits) {
/*
* Don't show bits which we'll be returning
* for allocation to the local alloc bitmap.
*/
ocfs2_local_alloc_seen_free_bits(osb, res->sr_bits);
}
}
return search;
}
static int ocfs2_block_group_search(struct inode *inode,
struct buffer_head *group_bh,
u32 bits_wanted, u32 min_bits,
u64 max_block,
struct ocfs2_suballoc_result *res)
{
int ret = -ENOSPC;
u64 blkoff;
struct ocfs2_group_desc *bg = (struct ocfs2_group_desc *) group_bh->b_data;
BUG_ON(min_bits != 1);
BUG_ON(ocfs2_is_cluster_bitmap(inode));
if (bg->bg_free_bits_count) {
ret = ocfs2_block_group_find_clear_bits(OCFS2_SB(inode->i_sb),
group_bh, bits_wanted,
le16_to_cpu(bg->bg_bits),
res);
if (!ret && max_block) {
blkoff = le64_to_cpu(bg->bg_blkno) +
res->sr_bit_offset + res->sr_bits;
trace_ocfs2_block_group_search_max_block(
(unsigned long long)blkoff,
(unsigned long long)max_block);
if (blkoff > max_block)
ret = -ENOSPC;
}
}
return ret;
}
int ocfs2_alloc_dinode_update_counts(struct inode *inode,
handle_t *handle,
struct buffer_head *di_bh,
u32 num_bits,
u16 chain)
{
int ret;
u32 tmp_used;
struct ocfs2_dinode *di = (struct ocfs2_dinode *) di_bh->b_data;
struct ocfs2_chain_list *cl = (struct ocfs2_chain_list *) &di->id2.i_chain;
ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret < 0) {
mlog_errno(ret);
goto out;
}
tmp_used = le32_to_cpu(di->id1.bitmap1.i_used);
di->id1.bitmap1.i_used = cpu_to_le32(num_bits + tmp_used);
le32_add_cpu(&cl->cl_recs[chain].c_free, -num_bits);
ocfs2_journal_dirty(handle, di_bh);
out:
return ret;
}
void ocfs2_rollback_alloc_dinode_counts(struct inode *inode,
struct buffer_head *di_bh,
u32 num_bits,
u16 chain)
{
u32 tmp_used;
struct ocfs2_dinode *di = (struct ocfs2_dinode *) di_bh->b_data;
struct ocfs2_chain_list *cl;
cl = (struct ocfs2_chain_list *)&di->id2.i_chain;
tmp_used = le32_to_cpu(di->id1.bitmap1.i_used);
di->id1.bitmap1.i_used = cpu_to_le32(tmp_used - num_bits);
le32_add_cpu(&cl->cl_recs[chain].c_free, num_bits);
}
static int ocfs2_bg_discontig_fix_by_rec(struct ocfs2_suballoc_result *res,
struct ocfs2_extent_rec *rec,
struct ocfs2_chain_list *cl)
{
unsigned int bpc = le16_to_cpu(cl->cl_bpc);
unsigned int bitoff = le32_to_cpu(rec->e_cpos) * bpc;
unsigned int bitcount = le16_to_cpu(rec->e_leaf_clusters) * bpc;
if (res->sr_bit_offset < bitoff)
return 0;
if (res->sr_bit_offset >= (bitoff + bitcount))
return 0;
res->sr_blkno = le64_to_cpu(rec->e_blkno) +
(res->sr_bit_offset - bitoff);
if ((res->sr_bit_offset + res->sr_bits) > (bitoff + bitcount))
res->sr_bits = (bitoff + bitcount) - res->sr_bit_offset;
return 1;
}
static void ocfs2_bg_discontig_fix_result(struct ocfs2_alloc_context *ac,
struct ocfs2_group_desc *bg,
struct ocfs2_suballoc_result *res)
{
int i;
u64 bg_blkno = res->sr_bg_blkno; /* Save off */
struct ocfs2_extent_rec *rec;
struct ocfs2_dinode *di = (struct ocfs2_dinode *)ac->ac_bh->b_data;
struct ocfs2_chain_list *cl = &di->id2.i_chain;
if (ocfs2_is_cluster_bitmap(ac->ac_inode)) {
res->sr_blkno = 0;
return;
}
res->sr_blkno = res->sr_bg_blkno + res->sr_bit_offset;
res->sr_bg_blkno = 0; /* Clear it for contig block groups */
if (!ocfs2_supports_discontig_bg(OCFS2_SB(ac->ac_inode->i_sb)) ||
!bg->bg_list.l_next_free_rec)
return;
for (i = 0; i < le16_to_cpu(bg->bg_list.l_next_free_rec); i++) {
rec = &bg->bg_list.l_recs[i];
if (ocfs2_bg_discontig_fix_by_rec(res, rec, cl)) {
res->sr_bg_blkno = bg_blkno; /* Restore */
break;
}
}
}
static int ocfs2_search_one_group(struct ocfs2_alloc_context *ac,
handle_t *handle,
u32 bits_wanted,
u32 min_bits,
struct ocfs2_suballoc_result *res,
u16 *bits_left)
{
int ret;
struct buffer_head *group_bh = NULL;
struct ocfs2_group_desc *gd;
struct ocfs2_dinode *di = (struct ocfs2_dinode *)ac->ac_bh->b_data;
struct inode *alloc_inode = ac->ac_inode;
ret = ocfs2_read_group_descriptor(alloc_inode, di,
res->sr_bg_blkno, &group_bh);
if (ret < 0) {
mlog_errno(ret);
return ret;
}
gd = (struct ocfs2_group_desc *) group_bh->b_data;
ret = ac->ac_group_search(alloc_inode, group_bh, bits_wanted, min_bits,
ac->ac_max_block, res);
if (ret < 0) {
if (ret != -ENOSPC)
mlog_errno(ret);
goto out;
}
if (!ret)
ocfs2_bg_discontig_fix_result(ac, gd, res);
/*
* sr_bg_blkno might have been changed by
* ocfs2_bg_discontig_fix_result
*/
res->sr_bg_stable_blkno = group_bh->b_blocknr;
if (ac->ac_find_loc_only)
goto out_loc_only;
ret = ocfs2_alloc_dinode_update_counts(alloc_inode, handle, ac->ac_bh,
res->sr_bits,
le16_to_cpu(gd->bg_chain));
if (ret < 0) {
mlog_errno(ret);
goto out;
}
ret = ocfs2_block_group_set_bits(handle, alloc_inode, gd, group_bh,
res->sr_bit_offset, res->sr_bits);
if (ret < 0) {
ocfs2_rollback_alloc_dinode_counts(alloc_inode, ac->ac_bh,
res->sr_bits,
le16_to_cpu(gd->bg_chain));
mlog_errno(ret);
}
out_loc_only:
*bits_left = le16_to_cpu(gd->bg_free_bits_count);
out:
brelse(group_bh);
return ret;
}
static int ocfs2_search_chain(struct ocfs2_alloc_context *ac,
handle_t *handle,
u32 bits_wanted,
u32 min_bits,
struct ocfs2_suballoc_result *res,
u16 *bits_left)
{
int status;
u16 chain;
u64 next_group;
struct inode *alloc_inode = ac->ac_inode;
struct buffer_head *group_bh = NULL;
struct buffer_head *prev_group_bh = NULL;
struct ocfs2_dinode *fe = (struct ocfs2_dinode *) ac->ac_bh->b_data;
struct ocfs2_chain_list *cl = (struct ocfs2_chain_list *) &fe->id2.i_chain;
struct ocfs2_group_desc *bg;
chain = ac->ac_chain;
trace_ocfs2_search_chain_begin(
(unsigned long long)OCFS2_I(alloc_inode)->ip_blkno,
bits_wanted, chain);
status = ocfs2_read_group_descriptor(alloc_inode, fe,
le64_to_cpu(cl->cl_recs[chain].c_blkno),
&group_bh);
if (status < 0) {
mlog_errno(status);
goto bail;
}
bg = (struct ocfs2_group_desc *) group_bh->b_data;
status = -ENOSPC;
/* for now, the chain search is a bit simplistic. We just use
* the 1st group with any empty bits. */
while ((status = ac->ac_group_search(alloc_inode, group_bh,
bits_wanted, min_bits,
ac->ac_max_block,
res)) == -ENOSPC) {
if (!bg->bg_next_group)
break;
brelse(prev_group_bh);
prev_group_bh = NULL;
next_group = le64_to_cpu(bg->bg_next_group);
prev_group_bh = group_bh;
group_bh = NULL;
status = ocfs2_read_group_descriptor(alloc_inode, fe,
next_group, &group_bh);
if (status < 0) {
mlog_errno(status);
goto bail;
}
bg = (struct ocfs2_group_desc *) group_bh->b_data;
}
if (status < 0) {
if (status != -ENOSPC)
mlog_errno(status);
goto bail;
}
trace_ocfs2_search_chain_succ(
(unsigned long long)le64_to_cpu(bg->bg_blkno), res->sr_bits);
res->sr_bg_blkno = le64_to_cpu(bg->bg_blkno);
BUG_ON(res->sr_bits == 0);
if (!status)
ocfs2_bg_discontig_fix_result(ac, bg, res);
/*
* sr_bg_blkno might have been changed by
* ocfs2_bg_discontig_fix_result
*/
res->sr_bg_stable_blkno = group_bh->b_blocknr;
/*
* Keep track of previous block descriptor read. When
* we find a target, if we have read more than X
* number of descriptors, and the target is reasonably
* empty, relink him to top of his chain.
*
* We've read 0 extra blocks and only send one more to
* the transaction, yet the next guy to search has a
* much easier time.
*
* Do this *after* figuring out how many bits we're taking out
* of our target group.
*/
if (!ac->ac_disable_chain_relink &&
(prev_group_bh) &&
(ocfs2_block_group_reasonably_empty(bg, res->sr_bits))) {
status = ocfs2_relink_block_group(handle, alloc_inode,
ac->ac_bh, group_bh,
prev_group_bh, chain);
if (status < 0) {
mlog_errno(status);
goto bail;
}
}
if (ac->ac_find_loc_only)
goto out_loc_only;
status = ocfs2_alloc_dinode_update_counts(alloc_inode, handle,
ac->ac_bh, res->sr_bits,
chain);
if (status) {
mlog_errno(status);
goto bail;
}
status = ocfs2_block_group_set_bits(handle,
alloc_inode,
bg,
group_bh,
res->sr_bit_offset,
res->sr_bits);
if (status < 0) {
ocfs2_rollback_alloc_dinode_counts(alloc_inode,
ac->ac_bh, res->sr_bits, chain);
mlog_errno(status);
goto bail;
}
trace_ocfs2_search_chain_end(
(unsigned long long)le64_to_cpu(fe->i_blkno),
res->sr_bits);
out_loc_only:
*bits_left = le16_to_cpu(bg->bg_free_bits_count);
bail:
brelse(group_bh);
brelse(prev_group_bh);
if (status)
mlog_errno(status);
return status;
}
/* will give out up to bits_wanted contiguous bits. */
static int ocfs2_claim_suballoc_bits(struct ocfs2_alloc_context *ac,
handle_t *handle,
u32 bits_wanted,
u32 min_bits,
struct ocfs2_suballoc_result *res)
{
int status;
u16 victim, i;
u16 bits_left = 0;
u64 hint = ac->ac_last_group;
struct ocfs2_chain_list *cl;
struct ocfs2_dinode *fe;
BUG_ON(ac->ac_bits_given >= ac->ac_bits_wanted);
BUG_ON(bits_wanted > (ac->ac_bits_wanted - ac->ac_bits_given));
BUG_ON(!ac->ac_bh);
fe = (struct ocfs2_dinode *) ac->ac_bh->b_data;
/* The bh was validated by the inode read during
* ocfs2_reserve_suballoc_bits(). Any corruption is a code bug. */
BUG_ON(!OCFS2_IS_VALID_DINODE(fe));
if (le32_to_cpu(fe->id1.bitmap1.i_used) >=
le32_to_cpu(fe->id1.bitmap1.i_total)) {
status = ocfs2_error(ac->ac_inode->i_sb,
"Chain allocator dinode %llu has %u used bits but only %u total\n",
(unsigned long long)le64_to_cpu(fe->i_blkno),
le32_to_cpu(fe->id1.bitmap1.i_used),
le32_to_cpu(fe->id1.bitmap1.i_total));
goto bail;
}
res->sr_bg_blkno = hint;
if (res->sr_bg_blkno) {
/* Attempt to short-circuit the usual search mechanism
* by jumping straight to the most recently used
* allocation group. This helps us maintain some
* contiguousness across allocations. */
status = ocfs2_search_one_group(ac, handle, bits_wanted,
min_bits, res, &bits_left);
if (!status)
goto set_hint;
if (status < 0 && status != -ENOSPC) {
mlog_errno(status);
goto bail;
}
}
cl = (struct ocfs2_chain_list *) &fe->id2.i_chain;
victim = ocfs2_find_victim_chain(cl);
ac->ac_chain = victim;
status = ocfs2_search_chain(ac, handle, bits_wanted, min_bits,
res, &bits_left);
if (!status) {
if (ocfs2_is_cluster_bitmap(ac->ac_inode))
hint = res->sr_bg_blkno;
else
hint = ocfs2_group_from_res(res);
goto set_hint;
}
if (status < 0 && status != -ENOSPC) {
mlog_errno(status);
goto bail;
}
trace_ocfs2_claim_suballoc_bits(victim);
/* If we didn't pick a good victim, then just default to
* searching each chain in order. Don't allow chain relinking
* because we only calculate enough journal credits for one
* relink per alloc. */
ac->ac_disable_chain_relink = 1;
for (i = 0; i < le16_to_cpu(cl->cl_next_free_rec); i ++) {
if (i == victim)
continue;
if (!cl->cl_recs[i].c_free)
continue;
ac->ac_chain = i;
status = ocfs2_search_chain(ac, handle, bits_wanted, min_bits,
res, &bits_left);
if (!status) {
hint = ocfs2_group_from_res(res);
break;
}
if (status < 0 && status != -ENOSPC) {
mlog_errno(status);
goto bail;
}
}
set_hint:
if (status != -ENOSPC) {
/* If the next search of this group is not likely to
* yield a suitable extent, then we reset the last
* group hint so as to not waste a disk read */
if (bits_left < min_bits)
ac->ac_last_group = 0;
else
ac->ac_last_group = hint;
}
bail:
if (status)
mlog_errno(status);
return status;
}
int ocfs2_claim_metadata(handle_t *handle,
struct ocfs2_alloc_context *ac,
u32 bits_wanted,
u64 *suballoc_loc,
u16 *suballoc_bit_start,
unsigned int *num_bits,
u64 *blkno_start)
{
int status;
struct ocfs2_suballoc_result res = { .sr_blkno = 0, };
BUG_ON(!ac);
BUG_ON(ac->ac_bits_wanted < (ac->ac_bits_given + bits_wanted));
BUG_ON(ac->ac_which != OCFS2_AC_USE_META);
status = ocfs2_claim_suballoc_bits(ac,
handle,
bits_wanted,
1,
&res);
if (status < 0) {
mlog_errno(status);
goto bail;
}
atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
*suballoc_loc = res.sr_bg_blkno;
*suballoc_bit_start = res.sr_bit_offset;
*blkno_start = res.sr_blkno;
ac->ac_bits_given += res.sr_bits;
*num_bits = res.sr_bits;
status = 0;
bail:
if (status)
mlog_errno(status);
return status;
}
static void ocfs2_init_inode_ac_group(struct inode *dir,
struct buffer_head *parent_di_bh,
struct ocfs2_alloc_context *ac)
{
struct ocfs2_dinode *di = (struct ocfs2_dinode *)parent_di_bh->b_data;
/*
* Try to allocate inodes from some specific group.
*
* If the parent dir has recorded the last group used in allocation,
* cool, use it. Otherwise if we try to allocate new inode from the
* same slot the parent dir belongs to, use the same chunk.
*
* We are very careful here to avoid the mistake of setting
* ac_last_group to a group descriptor from a different (unlocked) slot.
*/
if (OCFS2_I(dir)->ip_last_used_group &&
OCFS2_I(dir)->ip_last_used_slot == ac->ac_alloc_slot)
ac->ac_last_group = OCFS2_I(dir)->ip_last_used_group;
else if (le16_to_cpu(di->i_suballoc_slot) == ac->ac_alloc_slot) {
if (di->i_suballoc_loc)
ac->ac_last_group = le64_to_cpu(di->i_suballoc_loc);
else
ac->ac_last_group = ocfs2_which_suballoc_group(
le64_to_cpu(di->i_blkno),
le16_to_cpu(di->i_suballoc_bit));
}
}
static inline void ocfs2_save_inode_ac_group(struct inode *dir,
struct ocfs2_alloc_context *ac)
{
OCFS2_I(dir)->ip_last_used_group = ac->ac_last_group;
OCFS2_I(dir)->ip_last_used_slot = ac->ac_alloc_slot;
}
int ocfs2_find_new_inode_loc(struct inode *dir,
struct buffer_head *parent_fe_bh,
struct ocfs2_alloc_context *ac,
u64 *fe_blkno)
{
int ret;
handle_t *handle = NULL;
struct ocfs2_suballoc_result *res;
BUG_ON(!ac);
BUG_ON(ac->ac_bits_given != 0);
BUG_ON(ac->ac_bits_wanted != 1);
BUG_ON(ac->ac_which != OCFS2_AC_USE_INODE);
res = kzalloc(sizeof(*res), GFP_NOFS);
if (res == NULL) {
ret = -ENOMEM;
mlog_errno(ret);
goto out;
}
ocfs2_init_inode_ac_group(dir, parent_fe_bh, ac);
/*
* The handle started here is for chain relink. Alternatively,
* we could just disable relink for these calls.
*/
handle = ocfs2_start_trans(OCFS2_SB(dir->i_sb), OCFS2_SUBALLOC_ALLOC);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
handle = NULL;
mlog_errno(ret);
goto out;
}
/*
* This will instruct ocfs2_claim_suballoc_bits and
* ocfs2_search_one_group to search but save actual allocation
* for later.
*/
ac->ac_find_loc_only = 1;
ret = ocfs2_claim_suballoc_bits(ac, handle, 1, 1, res);
if (ret < 0) {
mlog_errno(ret);
goto out;
}
ac->ac_find_loc_priv = res;
*fe_blkno = res->sr_blkno;
ocfs2_update_inode_fsync_trans(handle, dir, 0);
out:
if (handle)
ocfs2_commit_trans(OCFS2_SB(dir->i_sb), handle);
if (ret)
kfree(res);
return ret;
}
int ocfs2_claim_new_inode_at_loc(handle_t *handle,
struct inode *dir,
struct ocfs2_alloc_context *ac,
u64 *suballoc_loc,
u16 *suballoc_bit,
u64 di_blkno)
{
int ret;
u16 chain;
struct ocfs2_suballoc_result *res = ac->ac_find_loc_priv;
struct buffer_head *bg_bh = NULL;
struct ocfs2_group_desc *bg;
struct ocfs2_dinode *di = (struct ocfs2_dinode *) ac->ac_bh->b_data;
/*
* Since di_blkno is being passed back in, we check for any
* inconsistencies which may have happened between
* calls. These are code bugs as di_blkno is not expected to
* change once returned from ocfs2_find_new_inode_loc()
*/
BUG_ON(res->sr_blkno != di_blkno);
ret = ocfs2_read_group_descriptor(ac->ac_inode, di,
res->sr_bg_stable_blkno, &bg_bh);
if (ret) {
mlog_errno(ret);
goto out;
}
bg = (struct ocfs2_group_desc *) bg_bh->b_data;
chain = le16_to_cpu(bg->bg_chain);
ret = ocfs2_alloc_dinode_update_counts(ac->ac_inode, handle,
ac->ac_bh, res->sr_bits,
chain);
if (ret) {
mlog_errno(ret);
goto out;
}
ret = ocfs2_block_group_set_bits(handle,
ac->ac_inode,
bg,
bg_bh,
res->sr_bit_offset,
res->sr_bits);
if (ret < 0) {
ocfs2_rollback_alloc_dinode_counts(ac->ac_inode,
ac->ac_bh, res->sr_bits, chain);
mlog_errno(ret);
goto out;
}
trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
res->sr_bits);
atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
BUG_ON(res->sr_bits != 1);
*suballoc_loc = res->sr_bg_blkno;
*suballoc_bit = res->sr_bit_offset;
ac->ac_bits_given++;
ocfs2_save_inode_ac_group(dir, ac);
out:
brelse(bg_bh);
return ret;
}
int ocfs2_claim_new_inode(handle_t *handle,
struct inode *dir,
struct buffer_head *parent_fe_bh,
struct ocfs2_alloc_context *ac,
u64 *suballoc_loc,
u16 *suballoc_bit,
u64 *fe_blkno)
{
int status;
struct ocfs2_suballoc_result res;
BUG_ON(!ac);
BUG_ON(ac->ac_bits_given != 0);
BUG_ON(ac->ac_bits_wanted != 1);
BUG_ON(ac->ac_which != OCFS2_AC_USE_INODE);
ocfs2_init_inode_ac_group(dir, parent_fe_bh, ac);
status = ocfs2_claim_suballoc_bits(ac,
handle,
1,
1,
&res);
if (status < 0) {
mlog_errno(status);
goto bail;
}
atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
BUG_ON(res.sr_bits != 1);
*suballoc_loc = res.sr_bg_blkno;
*suballoc_bit = res.sr_bit_offset;
*fe_blkno = res.sr_blkno;
ac->ac_bits_given++;
ocfs2_save_inode_ac_group(dir, ac);
status = 0;
bail:
if (status)
mlog_errno(status);
return status;
}
/* translate a group desc. blkno and it's bitmap offset into
* disk cluster offset. */
static inline u32 ocfs2_desc_bitmap_to_cluster_off(struct inode *inode,
u64 bg_blkno,
u16 bg_bit_off)
{
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
u32 cluster = 0;
BUG_ON(!ocfs2_is_cluster_bitmap(inode));
if (bg_blkno != osb->first_cluster_group_blkno)
cluster = ocfs2_blocks_to_clusters(inode->i_sb, bg_blkno);
cluster += (u32) bg_bit_off;
return cluster;
}
/* given a cluster offset, calculate which block group it belongs to
* and return that block offset. */
u64 ocfs2_which_cluster_group(struct inode *inode, u32 cluster)
{
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
u32 group_no;
BUG_ON(!ocfs2_is_cluster_bitmap(inode));
group_no = cluster / osb->bitmap_cpg;
if (!group_no)
return osb->first_cluster_group_blkno;
return ocfs2_clusters_to_blocks(inode->i_sb,
group_no * osb->bitmap_cpg);
}
/* given the block number of a cluster start, calculate which cluster
* group and descriptor bitmap offset that corresponds to. */
static inline void ocfs2_block_to_cluster_group(struct inode *inode,
u64 data_blkno,
u64 *bg_blkno,
u16 *bg_bit_off)
{
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
u32 data_cluster = ocfs2_blocks_to_clusters(osb->sb, data_blkno);
BUG_ON(!ocfs2_is_cluster_bitmap(inode));
*bg_blkno = ocfs2_which_cluster_group(inode,
data_cluster);
if (*bg_blkno == osb->first_cluster_group_blkno)
*bg_bit_off = (u16) data_cluster;
else
*bg_bit_off = (u16) ocfs2_blocks_to_clusters(osb->sb,
data_blkno - *bg_blkno);
}
/*
* min_bits - minimum contiguous chunk from this total allocation we
* can handle. set to what we asked for originally for a full
* contig. allocation, set to '1' to indicate we can deal with extents
* of any size.
*/
int __ocfs2_claim_clusters(handle_t *handle,
struct ocfs2_alloc_context *ac,
u32 min_clusters,
u32 max_clusters,
u32 *cluster_start,
u32 *num_clusters)
{
int status;
unsigned int bits_wanted = max_clusters;
struct ocfs2_suballoc_result res = { .sr_blkno = 0, };
struct ocfs2_super *osb = OCFS2_SB(ac->ac_inode->i_sb);
BUG_ON(ac->ac_bits_given >= ac->ac_bits_wanted);
BUG_ON(ac->ac_which != OCFS2_AC_USE_LOCAL
&& ac->ac_which != OCFS2_AC_USE_MAIN);
if (ac->ac_which == OCFS2_AC_USE_LOCAL) {
WARN_ON(min_clusters > 1);
status = ocfs2_claim_local_alloc_bits(osb,
handle,
ac,
bits_wanted,
cluster_start,
num_clusters);
if (!status)
atomic_inc(&osb->alloc_stats.local_data);
} else {
if (min_clusters > (osb->bitmap_cpg - 1)) {
/* The only paths asking for contiguousness
* should know about this already. */
mlog(ML_ERROR, "minimum allocation requested %u exceeds "
"group bitmap size %u!\n", min_clusters,
osb->bitmap_cpg);
status = -ENOSPC;
goto bail;
}
/* clamp the current request down to a realistic size. */
if (bits_wanted > (osb->bitmap_cpg - 1))
bits_wanted = osb->bitmap_cpg - 1;
status = ocfs2_claim_suballoc_bits(ac,
handle,
bits_wanted,
min_clusters,
&res);
if (!status) {
BUG_ON(res.sr_blkno); /* cluster alloc can't set */
*cluster_start =
ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
res.sr_bg_blkno,
res.sr_bit_offset);
atomic_inc(&osb->alloc_stats.bitmap_data);
*num_clusters = res.sr_bits;
}
}
if (status < 0) {
if (status != -ENOSPC)
mlog_errno(status);
goto bail;
}
ac->ac_bits_given += *num_clusters;
bail:
if (status)
mlog_errno(status);
return status;
}
int ocfs2_claim_clusters(handle_t *handle,
struct ocfs2_alloc_context *ac,
u32 min_clusters,
u32 *cluster_start,
u32 *num_clusters)
{
unsigned int bits_wanted = ac->ac_bits_wanted - ac->ac_bits_given;
return __ocfs2_claim_clusters(handle, ac, min_clusters,
bits_wanted, cluster_start, num_clusters);
}
static int ocfs2_block_group_clear_bits(handle_t *handle,
struct inode *alloc_inode,
struct ocfs2_group_desc *bg,
struct buffer_head *group_bh,
unsigned int bit_off,
unsigned int num_bits,
void (*undo_fn)(unsigned int bit,
unsigned long *bmap))
{
int status;
unsigned int tmp;
struct ocfs2_group_desc *undo_bg = NULL;
/* The caller got this descriptor from
* ocfs2_read_group_descriptor(). Any corruption is a code bug. */
BUG_ON(!OCFS2_IS_VALID_GROUP_DESC(bg));
trace_ocfs2_block_group_clear_bits(bit_off, num_bits);
BUG_ON(undo_fn && !ocfs2_is_cluster_bitmap(alloc_inode));
status = ocfs2_journal_access_gd(handle, INODE_CACHE(alloc_inode),
group_bh,
undo_fn ?
OCFS2_JOURNAL_ACCESS_UNDO :
OCFS2_JOURNAL_ACCESS_WRITE);
if (status < 0) {
mlog_errno(status);
goto bail;
}
if (undo_fn) {
jbd_lock_bh_state(group_bh);
undo_bg = (struct ocfs2_group_desc *)
bh2jh(group_bh)->b_committed_data;
BUG_ON(!undo_bg);
}
tmp = num_bits;
while(tmp--) {
ocfs2_clear_bit((bit_off + tmp),
(unsigned long *) bg->bg_bitmap);
if (undo_fn)
undo_fn(bit_off + tmp,
(unsigned long *) undo_bg->bg_bitmap);
}
le16_add_cpu(&bg->bg_free_bits_count, num_bits);
if (le16_to_cpu(bg->bg_free_bits_count) > le16_to_cpu(bg->bg_bits)) {
return ocfs2_error(alloc_inode->i_sb, "Group descriptor # %llu has bit count %u but claims %u are freed. num_bits %d\n",
(unsigned long long)le64_to_cpu(bg->bg_blkno),
le16_to_cpu(bg->bg_bits),
le16_to_cpu(bg->bg_free_bits_count),
num_bits);
}
if (undo_fn)
jbd_unlock_bh_state(group_bh);
ocfs2_journal_dirty(handle, group_bh);
bail:
return status;
}
/*
* expects the suballoc inode to already be locked.
*/
static int _ocfs2_free_suballoc_bits(handle_t *handle,
struct inode *alloc_inode,
struct buffer_head *alloc_bh,
unsigned int start_bit,
u64 bg_blkno,
unsigned int count,
void (*undo_fn)(unsigned int bit,
unsigned long *bitmap))
{
int status = 0;
u32 tmp_used;
struct ocfs2_dinode *fe = (struct ocfs2_dinode *) alloc_bh->b_data;
struct ocfs2_chain_list *cl = &fe->id2.i_chain;
struct buffer_head *group_bh = NULL;
struct ocfs2_group_desc *group;
/* The alloc_bh comes from ocfs2_free_dinode() or
* ocfs2_free_clusters(). The callers have all locked the
* allocator and gotten alloc_bh from the lock call. This
* validates the dinode buffer. Any corruption that has happened
* is a code bug. */
BUG_ON(!OCFS2_IS_VALID_DINODE(fe));
BUG_ON((count + start_bit) > ocfs2_bits_per_group(cl));
trace_ocfs2_free_suballoc_bits(
(unsigned long long)OCFS2_I(alloc_inode)->ip_blkno,
(unsigned long long)bg_blkno,
start_bit, count);
status = ocfs2_read_group_descriptor(alloc_inode, fe, bg_blkno,
&group_bh);
if (status < 0) {
mlog_errno(status);
goto bail;
}
group = (struct ocfs2_group_desc *) group_bh->b_data;
BUG_ON((count + start_bit) > le16_to_cpu(group->bg_bits));
status = ocfs2_block_group_clear_bits(handle, alloc_inode,
group, group_bh,
start_bit, count, undo_fn);
if (status < 0) {
mlog_errno(status);
goto bail;
}
status = ocfs2_journal_access_di(handle, INODE_CACHE(alloc_inode),
alloc_bh, OCFS2_JOURNAL_ACCESS_WRITE);
if (status < 0) {
mlog_errno(status);
ocfs2_block_group_set_bits(handle, alloc_inode, group, group_bh,
start_bit, count);
goto bail;
}
le32_add_cpu(&cl->cl_recs[le16_to_cpu(group->bg_chain)].c_free,
count);
tmp_used = le32_to_cpu(fe->id1.bitmap1.i_used);
fe->id1.bitmap1.i_used = cpu_to_le32(tmp_used - count);
ocfs2_journal_dirty(handle, alloc_bh);
bail:
brelse(group_bh);
if (status)
mlog_errno(status);
return status;
}
int ocfs2_free_suballoc_bits(handle_t *handle,
struct inode *alloc_inode,
struct buffer_head *alloc_bh,
unsigned int start_bit,
u64 bg_blkno,
unsigned int count)
{
return _ocfs2_free_suballoc_bits(handle, alloc_inode, alloc_bh,
start_bit, bg_blkno, count, NULL);
}
int ocfs2_free_dinode(handle_t *handle,
struct inode *inode_alloc_inode,
struct buffer_head *inode_alloc_bh,
struct ocfs2_dinode *di)
{
u64 blk = le64_to_cpu(di->i_blkno);
u16 bit = le16_to_cpu(di->i_suballoc_bit);
u64 bg_blkno = ocfs2_which_suballoc_group(blk, bit);
if (di->i_suballoc_loc)
bg_blkno = le64_to_cpu(di->i_suballoc_loc);
return ocfs2_free_suballoc_bits(handle, inode_alloc_inode,
inode_alloc_bh, bit, bg_blkno, 1);
}
static int _ocfs2_free_clusters(handle_t *handle,
struct inode *bitmap_inode,
struct buffer_head *bitmap_bh,
u64 start_blk,
unsigned int num_clusters,
void (*undo_fn)(unsigned int bit,
unsigned long *bitmap))
{
int status;
u16 bg_start_bit;
u64 bg_blkno;
struct ocfs2_dinode *fe;
/* You can't ever have a contiguous set of clusters
* bigger than a block group bitmap so we never have to worry
* about looping on them.
* This is expensive. We can safely remove once this stuff has
* gotten tested really well. */
BUG_ON(start_blk != ocfs2_clusters_to_blocks(bitmap_inode->i_sb, ocfs2_blocks_to_clusters(bitmap_inode->i_sb, start_blk)));
fe = (struct ocfs2_dinode *) bitmap_bh->b_data;
ocfs2_block_to_cluster_group(bitmap_inode, start_blk, &bg_blkno,
&bg_start_bit);
trace_ocfs2_free_clusters((unsigned long long)bg_blkno,
(unsigned long long)start_blk,
bg_start_bit, num_clusters);
status = _ocfs2_free_suballoc_bits(handle, bitmap_inode, bitmap_bh,
bg_start_bit, bg_blkno,
num_clusters, undo_fn);
if (status < 0) {
mlog_errno(status);
goto out;
}
ocfs2_local_alloc_seen_free_bits(OCFS2_SB(bitmap_inode->i_sb),
num_clusters);
out:
if (status)
mlog_errno(status);
return status;
}
int ocfs2_free_clusters(handle_t *handle,
struct inode *bitmap_inode,
struct buffer_head *bitmap_bh,
u64 start_blk,
unsigned int num_clusters)
{
return _ocfs2_free_clusters(handle, bitmap_inode, bitmap_bh,
start_blk, num_clusters,
_ocfs2_set_bit);
}
/*
* Give never-used clusters back to the global bitmap. We don't need
* to protect these bits in the undo buffer.
*/
int ocfs2_release_clusters(handle_t *handle,
struct inode *bitmap_inode,
struct buffer_head *bitmap_bh,
u64 start_blk,
unsigned int num_clusters)
{
return _ocfs2_free_clusters(handle, bitmap_inode, bitmap_bh,
start_blk, num_clusters,
_ocfs2_clear_bit);
}
static inline void ocfs2_debug_bg(struct ocfs2_group_desc *bg)
{
printk("Block Group:\n");
printk("bg_signature: %s\n", bg->bg_signature);
printk("bg_size: %u\n", bg->bg_size);
printk("bg_bits: %u\n", bg->bg_bits);
printk("bg_free_bits_count: %u\n", bg->bg_free_bits_count);
printk("bg_chain: %u\n", bg->bg_chain);
printk("bg_generation: %u\n", le32_to_cpu(bg->bg_generation));
printk("bg_next_group: %llu\n",
(unsigned long long)bg->bg_next_group);
printk("bg_parent_dinode: %llu\n",
(unsigned long long)bg->bg_parent_dinode);
printk("bg_blkno: %llu\n",
(unsigned long long)bg->bg_blkno);
}
static inline void ocfs2_debug_suballoc_inode(struct ocfs2_dinode *fe)
{
int i;
printk("Suballoc Inode %llu:\n", (unsigned long long)fe->i_blkno);
printk("i_signature: %s\n", fe->i_signature);
printk("i_size: %llu\n",
(unsigned long long)fe->i_size);
printk("i_clusters: %u\n", fe->i_clusters);
printk("i_generation: %u\n",
le32_to_cpu(fe->i_generation));
printk("id1.bitmap1.i_used: %u\n",
le32_to_cpu(fe->id1.bitmap1.i_used));
printk("id1.bitmap1.i_total: %u\n",
le32_to_cpu(fe->id1.bitmap1.i_total));
printk("id2.i_chain.cl_cpg: %u\n", fe->id2.i_chain.cl_cpg);
printk("id2.i_chain.cl_bpc: %u\n", fe->id2.i_chain.cl_bpc);
printk("id2.i_chain.cl_count: %u\n", fe->id2.i_chain.cl_count);
printk("id2.i_chain.cl_next_free_rec: %u\n",
fe->id2.i_chain.cl_next_free_rec);
for(i = 0; i < fe->id2.i_chain.cl_next_free_rec; i++) {
printk("fe->id2.i_chain.cl_recs[%d].c_free: %u\n", i,
fe->id2.i_chain.cl_recs[i].c_free);
printk("fe->id2.i_chain.cl_recs[%d].c_total: %u\n", i,
fe->id2.i_chain.cl_recs[i].c_total);
printk("fe->id2.i_chain.cl_recs[%d].c_blkno: %llu\n", i,
(unsigned long long)fe->id2.i_chain.cl_recs[i].c_blkno);
}
}
/*
* For a given allocation, determine which allocators will need to be
* accessed, and lock them, reserving the appropriate number of bits.
*
* Sparse file systems call this from ocfs2_write_begin_nolock()
* and ocfs2_allocate_unwritten_extents().
*
* File systems which don't support holes call this from
* ocfs2_extend_allocation().
*/
int ocfs2_lock_allocators(struct inode *inode,
struct ocfs2_extent_tree *et,
u32 clusters_to_add, u32 extents_to_split,
struct ocfs2_alloc_context **data_ac,
struct ocfs2_alloc_context **meta_ac)
{
int ret = 0, num_free_extents;
unsigned int max_recs_needed = clusters_to_add + 2 * extents_to_split;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
*meta_ac = NULL;
if (data_ac)
*data_ac = NULL;
BUG_ON(clusters_to_add != 0 && data_ac == NULL);
num_free_extents = ocfs2_num_free_extents(osb, et);
if (num_free_extents < 0) {
ret = num_free_extents;
mlog_errno(ret);
goto out;
}
/*
* Sparse allocation file systems need to be more conservative
* with reserving room for expansion - the actual allocation
* happens while we've got a journal handle open so re-taking
* a cluster lock (because we ran out of room for another
* extent) will violate ordering rules.
*
* Most of the time we'll only be seeing this 1 cluster at a time
* anyway.
*
* Always lock for any unwritten extents - we might want to
* add blocks during a split.
*/
if (!num_free_extents ||
(ocfs2_sparse_alloc(osb) && num_free_extents < max_recs_needed)) {
ret = ocfs2_reserve_new_metadata(osb, et->et_root_el, meta_ac);
if (ret < 0) {
if (ret != -ENOSPC)
mlog_errno(ret);
goto out;
}
}
if (clusters_to_add == 0)
goto out;
ret = ocfs2_reserve_clusters(osb, clusters_to_add, data_ac);
if (ret < 0) {
if (ret != -ENOSPC)
mlog_errno(ret);
goto out;
}
out:
if (ret) {
if (*meta_ac) {
ocfs2_free_alloc_context(*meta_ac);
*meta_ac = NULL;
}
/*
* We cannot have an error and a non null *data_ac.
*/
}
return ret;
}
/*
* Read the inode specified by blkno to get suballoc_slot and
* suballoc_bit.
*/
static int ocfs2_get_suballoc_slot_bit(struct ocfs2_super *osb, u64 blkno,
u16 *suballoc_slot, u64 *group_blkno,
u16 *suballoc_bit)
{
int status;
struct buffer_head *inode_bh = NULL;
struct ocfs2_dinode *inode_fe;
trace_ocfs2_get_suballoc_slot_bit((unsigned long long)blkno);
/* dirty read disk */
status = ocfs2_read_blocks_sync(osb, blkno, 1, &inode_bh);
if (status < 0) {
mlog(ML_ERROR, "read block %llu failed %d\n",
(unsigned long long)blkno, status);
goto bail;
}
inode_fe = (struct ocfs2_dinode *) inode_bh->b_data;
if (!OCFS2_IS_VALID_DINODE(inode_fe)) {
mlog(ML_ERROR, "invalid inode %llu requested\n",
(unsigned long long)blkno);
status = -EINVAL;
goto bail;
}
if (le16_to_cpu(inode_fe->i_suballoc_slot) != (u16)OCFS2_INVALID_SLOT &&
(u32)le16_to_cpu(inode_fe->i_suballoc_slot) > osb->max_slots - 1) {
mlog(ML_ERROR, "inode %llu has invalid suballoc slot %u\n",
(unsigned long long)blkno,
(u32)le16_to_cpu(inode_fe->i_suballoc_slot));
status = -EINVAL;
goto bail;
}
if (suballoc_slot)
*suballoc_slot = le16_to_cpu(inode_fe->i_suballoc_slot);
if (suballoc_bit)
*suballoc_bit = le16_to_cpu(inode_fe->i_suballoc_bit);
if (group_blkno)
*group_blkno = le64_to_cpu(inode_fe->i_suballoc_loc);
bail:
brelse(inode_bh);
if (status)
mlog_errno(status);
return status;
}
/*
* test whether bit is SET in allocator bitmap or not. on success, 0
* is returned and *res is 1 for SET; 0 otherwise. when fails, errno
* is returned and *res is meaningless. Call this after you have
* cluster locked against suballoc, or you may get a result based on
* non-up2date contents
*/
static int ocfs2_test_suballoc_bit(struct ocfs2_super *osb,
struct inode *suballoc,
struct buffer_head *alloc_bh,
u64 group_blkno, u64 blkno,
u16 bit, int *res)
{
struct ocfs2_dinode *alloc_di;
struct ocfs2_group_desc *group;
struct buffer_head *group_bh = NULL;
u64 bg_blkno;
int status;
trace_ocfs2_test_suballoc_bit((unsigned long long)blkno,
(unsigned int)bit);
alloc_di = (struct ocfs2_dinode *)alloc_bh->b_data;
if ((bit + 1) > ocfs2_bits_per_group(&alloc_di->id2.i_chain)) {
mlog(ML_ERROR, "suballoc bit %u out of range of %u\n",
(unsigned int)bit,
ocfs2_bits_per_group(&alloc_di->id2.i_chain));
status = -EINVAL;
goto bail;
}
bg_blkno = group_blkno ? group_blkno :
ocfs2_which_suballoc_group(blkno, bit);
status = ocfs2_read_group_descriptor(suballoc, alloc_di, bg_blkno,
&group_bh);
if (status < 0) {
mlog(ML_ERROR, "read group %llu failed %d\n",
(unsigned long long)bg_blkno, status);
goto bail;
}
group = (struct ocfs2_group_desc *) group_bh->b_data;
*res = ocfs2_test_bit(bit, (unsigned long *)group->bg_bitmap);
bail:
brelse(group_bh);
if (status)
mlog_errno(status);
return status;
}
/*
* Test if the bit representing this inode (blkno) is set in the
* suballocator.
*
* On success, 0 is returned and *res is 1 for SET; 0 otherwise.
*
* In the event of failure, a negative value is returned and *res is
* meaningless.
*
* Callers must make sure to hold nfs_sync_lock to prevent
* ocfs2_delete_inode() on another node from accessing the same
* suballocator concurrently.
*/
int ocfs2_test_inode_bit(struct ocfs2_super *osb, u64 blkno, int *res)
{
int status;
u64 group_blkno = 0;
u16 suballoc_bit = 0, suballoc_slot = 0;
struct inode *inode_alloc_inode;
struct buffer_head *alloc_bh = NULL;
trace_ocfs2_test_inode_bit((unsigned long long)blkno);
status = ocfs2_get_suballoc_slot_bit(osb, blkno, &suballoc_slot,
&group_blkno, &suballoc_bit);
if (status < 0) {
mlog(ML_ERROR, "get alloc slot and bit failed %d\n", status);
goto bail;
}
inode_alloc_inode =
ocfs2_get_system_file_inode(osb, INODE_ALLOC_SYSTEM_INODE,
suballoc_slot);
if (!inode_alloc_inode) {
/* the error code could be inaccurate, but we are not able to
* get the correct one. */
status = -EINVAL;
mlog(ML_ERROR, "unable to get alloc inode in slot %u\n",
(u32)suballoc_slot);
goto bail;
}
mutex_lock(&inode_alloc_inode->i_mutex);
status = ocfs2_inode_lock(inode_alloc_inode, &alloc_bh, 0);
if (status < 0) {
mutex_unlock(&inode_alloc_inode->i_mutex);
iput(inode_alloc_inode);
mlog(ML_ERROR, "lock on alloc inode on slot %u failed %d\n",
(u32)suballoc_slot, status);
goto bail;
}
status = ocfs2_test_suballoc_bit(osb, inode_alloc_inode, alloc_bh,
group_blkno, blkno, suballoc_bit, res);
if (status < 0)
mlog(ML_ERROR, "test suballoc bit failed %d\n", status);
ocfs2_inode_unlock(inode_alloc_inode, 0);
mutex_unlock(&inode_alloc_inode->i_mutex);
iput(inode_alloc_inode);
brelse(alloc_bh);
bail:
if (status)
mlog_errno(status);
return status;
}
| gpl-2.0 |
humberos/android_kernel_samsung_smdk4412 | drivers/media/video/samsung/mali/linux/mali_kernel_linux.c | 154 | 19940 | /**
* Copyright (C) 2010-2012 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
*
* A copy of the licence is included with the program, and can also be obtained from Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
/**
* @file mali_kernel_linux.c
* Implementation of the Linux device driver entrypoints
*/
#include <linux/module.h> /* kernel module definitions */
#include <linux/fs.h> /* file system operations */
#include <linux/cdev.h> /* character device definitions */
#include <linux/mm.h> /* memory manager definitions */
#include <linux/mali/mali_utgard_ioctl.h>
#include "mali_kernel_common.h"
#include "mali_session.h"
#include "mali_kernel_core.h"
#include "mali_osk.h"
#include "mali_kernel_linux.h"
#include "mali_ukk.h"
#include "mali_ukk_wrappers.h"
#include "mali_kernel_pm.h"
#include "mali_kernel_sysfs.h"
#include "mali_platform.h"
#include "mali_kernel_license.h"
#include "mali_dma_buf.h"
/* Streamline support for the Mali driver */
#if defined(CONFIG_TRACEPOINTS) && MALI_TIMELINE_PROFILING_ENABLED
/* Ask Linux to create the tracepoints */
#define CREATE_TRACE_POINTS
#include "mali_linux_trace.h"
#endif /* CONFIG_TRACEPOINTS */
static _mali_osk_errcode_t initialize_kernel_device(void);
static int initialize_sysfs(void);
static void terminate_kernel_device(void);
/* from the __malidrv_build_info.c file that is generated during build */
extern const char *__malidrv_build_info(void);
/* Module parameter to control log level */
int mali_debug_level = 2;
module_param(mali_debug_level, int, S_IRUSR | S_IWUSR | S_IWGRP | S_IRGRP | S_IROTH); /* rw-rw-r-- */
MODULE_PARM_DESC(mali_debug_level, "Higher number, more dmesg output");
/* By default the module uses any available major, but it's possible to set it at load time to a specific number */
#if MALI_MAJOR_PREDEFINE
int mali_major = 244;
#else
int mali_major = 0;
#endif
module_param(mali_major, int, S_IRUGO); /* r--r--r-- */
MODULE_PARM_DESC(mali_major, "Device major number");
module_param(mali_hang_check_interval, int, S_IRUSR | S_IWUSR | S_IWGRP | S_IRGRP | S_IROTH);
MODULE_PARM_DESC(mali_hang_check_interval, "Interval at which to check for progress after the hw watchdog has been triggered");
module_param(mali_max_job_runtime, int, S_IRUSR | S_IWUSR | S_IWGRP | S_IRGRP | S_IROTH);
MODULE_PARM_DESC(mali_max_job_runtime, "Maximum allowed job runtime in msecs.\nJobs will be killed after this no matter what");
extern int mali_l2_max_reads;
module_param(mali_l2_max_reads, int, S_IRUSR | S_IRGRP | S_IROTH);
MODULE_PARM_DESC(mali_l2_max_reads, "Maximum reads for Mali L2 cache");
#if MALI_TIMELINE_PROFILING_ENABLED
extern int mali_boot_profiling;
module_param(mali_boot_profiling, int, S_IRUSR | S_IRGRP | S_IROTH);
MODULE_PARM_DESC(mali_boot_profiling, "Start profiling as a part of Mali driver initialization");
#endif
/* Export symbols from common code: mali_user_settings.c */
#include "mali_user_settings_db.h"
EXPORT_SYMBOL(mali_set_user_setting);
EXPORT_SYMBOL(mali_get_user_setting);
#if MALI_DVFS_ENABLED
extern int mali_dvfs_control;
module_param(mali_dvfs_control, int, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP| S_IROTH); /* rw-rw-r-- */
MODULE_PARM_DESC(mali_dvfs_control, "Mali Current DVFS");
#if defined(CONFIG_CPU_EXYNOS4210)
#else
extern int step0_clk;
module_param(step0_clk, int, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP| S_IROTH); /* rw-rw-r-- */
MODULE_PARM_DESC(step0_clk, "Mali Current step0_clk");
#ifdef DEBUG
extern int step0_vol;
module_param(step0_vol, int, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP| S_IROTH); /* rw-rw-r-- */
MODULE_PARM_DESC(step0_vol, "Mali Current step0_vol");
#endif
#if (MALI_DVFS_STEPS > 1)
extern int step1_clk;
module_param(step1_clk, int, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP| S_IROTH); /* rw-rw-r-- */
MODULE_PARM_DESC(step1_clk, "Mali Current step1_clk");
extern int step0_up;
module_param(step0_up, int, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP| S_IROTH); /* rw-rw-r-- */
MODULE_PARM_DESC(step0_up, "Mali Current step0_up");
extern int step1_down;
module_param(step1_down, int, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP| S_IROTH); /* rw-rw-r-- */
MODULE_PARM_DESC(step1_down, "Mali Current step1_down");
#ifdef DEBUG
extern int step1_vol;
module_param(step1_vol, int, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP| S_IROTH); /* rw-rw-r-- */
MODULE_PARM_DESC(step1_vol, "Mali Current step1_vol");
#endif
#if (MALI_DVFS_STEPS > 2)
extern int step2_clk;
module_param(step2_clk, int, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP| S_IROTH); /* rw-rw-r-- */
MODULE_PARM_DESC(step2_clk, "Mali Current step2_clk");
extern int step1_up;
module_param(step1_up, int, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP| S_IROTH); /* rw-rw-r-- */
MODULE_PARM_DESC(step1_up, "Mali Current step1_up");
extern int step2_down;
module_param(step2_down, int, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP| S_IROTH); /* rw-rw-r-- */
MODULE_PARM_DESC(step2_down, "Mali Current step2_down");
#ifdef DEBUG
extern int step2_vol;
module_param(step2_vol, int, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP| S_IROTH); /* rw-rw-r-- */
MODULE_PARM_DESC(step2_vol, "Mali Current step2_vol");
#endif
#if (MALI_DVFS_STEPS > 3)
extern int step3_clk;
module_param(step3_clk, int, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP| S_IROTH); /* rw-rw-r-- */
MODULE_PARM_DESC(step3_clk, "Mali Current step3_clk");
extern int step2_up;
module_param(step2_up, int, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP| S_IROTH); /* rw-rw-r-- */
MODULE_PARM_DESC(step2_up, "Mali Current step2_up");
extern int step3_down;
module_param(step3_down, int, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP| S_IROTH); /* rw-rw-r-- */
MODULE_PARM_DESC(step3_down, "Mali Current step3_down");
#ifdef DEBUG
extern int step3_vol;
module_param(step3_vol, int, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP| S_IROTH); /* rw-rw-r-- */
MODULE_PARM_DESC(step3_vol, "Mali Current step3_vol");
#endif
#endif
#endif
#endif
#endif
extern int mali_gpu_clk;
module_param(mali_gpu_clk, int, S_IRUSR | S_IRGRP | S_IROTH); /* r--r--r-- */
MODULE_PARM_DESC(mali_gpu_clk, "Mali Current Clock");
extern int mali_gpu_vol;
module_param(mali_gpu_vol, int, S_IRUSR | S_IRGRP | S_IROTH); /* r--r--r-- */
MODULE_PARM_DESC(mali_gpu_vol, "Mali Current Voltage");
extern int gpu_power_state;
module_param(gpu_power_state, int, S_IRUSR | S_IRGRP | S_IROTH); /* r--r--r-- */
MODULE_PARM_DESC(gpu_power_state, "Mali Power State");
#endif
static char mali_dev_name[] = "mali"; /* should be const, but the functions we call requires non-cost */
/* the mali device */
static struct mali_dev device;
static int mali_open(struct inode *inode, struct file *filp);
static int mali_release(struct inode *inode, struct file *filp);
#ifdef HAVE_UNLOCKED_IOCTL
static long mali_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
#else
static int mali_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg);
#endif
static int mali_mmap(struct file * filp, struct vm_area_struct * vma);
/* Linux char file operations provided by the Mali module */
struct file_operations mali_fops =
{
.owner = THIS_MODULE,
.open = mali_open,
.release = mali_release,
#ifdef HAVE_UNLOCKED_IOCTL
.unlocked_ioctl = mali_ioctl,
#else
.ioctl = mali_ioctl,
#endif
.mmap = mali_mmap
};
int mali_driver_init(void)
{
int ret = 0;
MALI_DEBUG_PRINT(2, ("\n"));
MALI_DEBUG_PRINT(2, ("Inserting Mali v%d device driver. \n",_MALI_API_VERSION));
MALI_DEBUG_PRINT(2, ("Compiled: %s, time: %s.\n", __DATE__, __TIME__));
MALI_DEBUG_PRINT(2, ("Driver revision: %s\n", SVN_REV_STRING));
ret = _mali_dev_platform_register();
if (0 != ret) goto platform_register_failed;
ret = map_errcode(initialize_kernel_device());
if (0 != ret) goto initialize_kernel_device_failed;
ret = map_errcode(mali_platform_init());
if (0 != ret) goto platform_init_failed;
mali_osk_low_level_mem_init();
ret = map_errcode(mali_initialize_subsystems());
if (0 != ret) goto initialize_subsystems_failed;
ret = initialize_sysfs();
if (0 != ret) goto initialize_sysfs_failed;
MALI_PRINT(("Mali device driver loaded\n"));
return 0; /* Success */
/* Error handling */
initialize_sysfs_failed:
mali_terminate_subsystems();
initialize_subsystems_failed:
mali_osk_low_level_mem_term();
mali_platform_deinit();
platform_init_failed:
terminate_kernel_device();
initialize_kernel_device_failed:
_mali_dev_platform_unregister();
platform_register_failed:
return ret;
}
void mali_driver_exit(void)
{
MALI_DEBUG_PRINT(2, ("\n"));
MALI_DEBUG_PRINT(2, ("Unloading Mali v%d device driver.\n",_MALI_API_VERSION));
/* No need to terminate sysfs, this will be done automatically along with device termination */
mali_terminate_subsystems();
mali_osk_low_level_mem_term();
mali_platform_deinit();
terminate_kernel_device();
_mali_dev_platform_unregister();
#if MALI_LICENSE_IS_GPL
/* @@@@ clean up the work queues! This should not be terminated here, since it isn't inited in the function above! */
flush_workqueue(mali_wq);
destroy_workqueue(mali_wq);
mali_wq = NULL;
#endif
MALI_PRINT(("Mali device driver unloaded\n"));
}
static int initialize_kernel_device(void)
{
int err;
dev_t dev = 0;
if (0 == mali_major)
{
/* auto select a major */
err = alloc_chrdev_region(&dev, 0/*first minor*/, 1/*count*/, mali_dev_name);
mali_major = MAJOR(dev);
}
else
{
/* use load time defined major number */
dev = MKDEV(mali_major, 0);
err = register_chrdev_region(dev, 1/*count*/, mali_dev_name);
}
if (err)
{
goto init_chrdev_err;
}
memset(&device, 0, sizeof(device));
/* initialize our char dev data */
cdev_init(&device.cdev, &mali_fops);
device.cdev.owner = THIS_MODULE;
device.cdev.ops = &mali_fops;
/* register char dev with the kernel */
err = cdev_add(&device.cdev, dev, 1/*count*/);
if (err)
{
goto init_cdev_err;
}
/* Success! */
return 0;
init_cdev_err:
unregister_chrdev_region(dev, 1/*count*/);
init_chrdev_err:
return err;
}
static int initialize_sysfs(void)
{
dev_t dev = MKDEV(mali_major, 0);
return mali_sysfs_register(&device, dev, mali_dev_name);
}
static void terminate_kernel_device(void)
{
dev_t dev = MKDEV(mali_major, 0);
mali_sysfs_unregister(&device, dev, mali_dev_name);
/* unregister char device */
cdev_del(&device.cdev);
/* free major */
unregister_chrdev_region(dev, 1/*count*/);
return;
}
/** @note munmap handler is done by vma close handler */
static int mali_mmap(struct file * filp, struct vm_area_struct * vma)
{
struct mali_session_data * session_data;
_mali_uk_mem_mmap_s args = {0, };
session_data = (struct mali_session_data *)filp->private_data;
if (NULL == session_data)
{
MALI_PRINT_ERROR(("mmap called without any session data available\n"));
return -EFAULT;
}
MALI_DEBUG_PRINT(4, ("MMap() handler: start=0x%08X, phys=0x%08X, size=0x%08X vma->flags 0x%08x\n", (unsigned int)vma->vm_start, (unsigned int)(vma->vm_pgoff << PAGE_SHIFT), (unsigned int)(vma->vm_end - vma->vm_start), vma->vm_flags));
/* Re-pack the arguments that mmap() packed for us */
args.ctx = session_data;
args.phys_addr = vma->vm_pgoff << PAGE_SHIFT;
args.size = vma->vm_end - vma->vm_start;
args.ukk_private = vma;
if ( VM_SHARED== (VM_SHARED & vma->vm_flags))
{
args.cache_settings = MALI_CACHE_STANDARD ;
MALI_DEBUG_PRINT(3,("Allocate - Standard - Size: %d kb\n", args.size/1024));
}
else
{
args.cache_settings = MALI_CACHE_GP_READ_ALLOCATE;
MALI_DEBUG_PRINT(3,("Allocate - GP Cached - Size: %d kb\n", args.size/1024));
}
/* Setting it equal to VM_SHARED and not Private, which would have made the later io_remap fail for MALI_CACHE_GP_READ_ALLOCATE */
vma->vm_flags = 0x000000fb;
/* Call the common mmap handler */
MALI_CHECK(_MALI_OSK_ERR_OK ==_mali_ukk_mem_mmap( &args ), -EFAULT);
return 0;
}
static int mali_open(struct inode *inode, struct file *filp)
{
struct mali_session_data * session_data;
_mali_osk_errcode_t err;
/* input validation */
if (0 != MINOR(inode->i_rdev)) return -ENODEV;
/* allocated struct to track this session */
err = _mali_ukk_open((void **)&session_data);
if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
/* initialize file pointer */
filp->f_pos = 0;
/* link in our session data */
filp->private_data = (void*)session_data;
return 0;
}
static int mali_release(struct inode *inode, struct file *filp)
{
_mali_osk_errcode_t err;
/* input validation */
if (0 != MINOR(inode->i_rdev)) return -ENODEV;
err = _mali_ukk_close((void **)&filp->private_data);
if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
return 0;
}
int map_errcode( _mali_osk_errcode_t err )
{
switch(err)
{
case _MALI_OSK_ERR_OK : return 0;
case _MALI_OSK_ERR_FAULT: return -EFAULT;
case _MALI_OSK_ERR_INVALID_FUNC: return -ENOTTY;
case _MALI_OSK_ERR_INVALID_ARGS: return -EINVAL;
case _MALI_OSK_ERR_NOMEM: return -ENOMEM;
case _MALI_OSK_ERR_TIMEOUT: return -ETIMEDOUT;
case _MALI_OSK_ERR_RESTARTSYSCALL: return -ERESTARTSYS;
case _MALI_OSK_ERR_ITEM_NOT_FOUND: return -ENOENT;
default: return -EFAULT;
}
}
#ifdef HAVE_UNLOCKED_IOCTL
static long mali_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
#else
static int mali_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg)
#endif
{
int err;
struct mali_session_data *session_data;
#ifndef HAVE_UNLOCKED_IOCTL
/* inode not used */
(void)inode;
#endif
MALI_DEBUG_PRINT(7, ("Ioctl received 0x%08X 0x%08lX\n", cmd, arg));
session_data = (struct mali_session_data *)filp->private_data;
if (NULL == session_data)
{
MALI_DEBUG_PRINT(7, ("filp->private_data was NULL\n"));
return -ENOTTY;
}
if (NULL == (void *)arg)
{
MALI_DEBUG_PRINT(7, ("arg was NULL\n"));
return -ENOTTY;
}
switch(cmd)
{
case MALI_IOC_WAIT_FOR_NOTIFICATION:
err = wait_for_notification_wrapper(session_data, (_mali_uk_wait_for_notification_s __user *)arg);
break;
case MALI_IOC_GET_API_VERSION:
err = get_api_version_wrapper(session_data, (_mali_uk_get_api_version_s __user *)arg);
break;
case MALI_IOC_POST_NOTIFICATION:
err = post_notification_wrapper(session_data, (_mali_uk_post_notification_s __user *)arg);
break;
case MALI_IOC_GET_USER_SETTINGS:
err = get_user_settings_wrapper(session_data, (_mali_uk_get_user_settings_s __user *)arg);
break;
#if MALI_TIMELINE_PROFILING_ENABLED
case MALI_IOC_PROFILING_START:
err = profiling_start_wrapper(session_data, (_mali_uk_profiling_start_s __user *)arg);
break;
case MALI_IOC_PROFILING_ADD_EVENT:
err = profiling_add_event_wrapper(session_data, (_mali_uk_profiling_add_event_s __user *)arg);
break;
case MALI_IOC_PROFILING_STOP:
err = profiling_stop_wrapper(session_data, (_mali_uk_profiling_stop_s __user *)arg);
break;
case MALI_IOC_PROFILING_GET_EVENT:
err = profiling_get_event_wrapper(session_data, (_mali_uk_profiling_get_event_s __user *)arg);
break;
case MALI_IOC_PROFILING_CLEAR:
err = profiling_clear_wrapper(session_data, (_mali_uk_profiling_clear_s __user *)arg);
break;
case MALI_IOC_PROFILING_GET_CONFIG:
/* Deprecated: still compatible with get_user_settings */
err = get_user_settings_wrapper(session_data, (_mali_uk_get_user_settings_s __user *)arg);
break;
case MALI_IOC_PROFILING_REPORT_SW_COUNTERS:
err = profiling_report_sw_counters_wrapper(session_data, (_mali_uk_sw_counters_report_s __user *)arg);
break;
#else
case MALI_IOC_PROFILING_START: /* FALL-THROUGH */
case MALI_IOC_PROFILING_ADD_EVENT: /* FALL-THROUGH */
case MALI_IOC_PROFILING_STOP: /* FALL-THROUGH */
case MALI_IOC_PROFILING_GET_EVENT: /* FALL-THROUGH */
case MALI_IOC_PROFILING_CLEAR: /* FALL-THROUGH */
case MALI_IOC_PROFILING_GET_CONFIG: /* FALL-THROUGH */
case MALI_IOC_PROFILING_REPORT_SW_COUNTERS: /* FALL-THROUGH */
MALI_DEBUG_PRINT(2, ("Profiling not supported\n"));
err = -ENOTTY;
break;
#endif
case MALI_IOC_MEM_INIT:
err = mem_init_wrapper(session_data, (_mali_uk_init_mem_s __user *)arg);
break;
case MALI_IOC_MEM_TERM:
err = mem_term_wrapper(session_data, (_mali_uk_term_mem_s __user *)arg);
break;
case MALI_IOC_MEM_MAP_EXT:
err = mem_map_ext_wrapper(session_data, (_mali_uk_map_external_mem_s __user *)arg);
break;
case MALI_IOC_MEM_UNMAP_EXT:
err = mem_unmap_ext_wrapper(session_data, (_mali_uk_unmap_external_mem_s __user *)arg);
break;
case MALI_IOC_MEM_QUERY_MMU_PAGE_TABLE_DUMP_SIZE:
err = mem_query_mmu_page_table_dump_size_wrapper(session_data, (_mali_uk_query_mmu_page_table_dump_size_s __user *)arg);
break;
case MALI_IOC_MEM_DUMP_MMU_PAGE_TABLE:
err = mem_dump_mmu_page_table_wrapper(session_data, (_mali_uk_dump_mmu_page_table_s __user *)arg);
break;
#if MALI_USE_UNIFIED_MEMORY_PROVIDER != 0
case MALI_IOC_MEM_ATTACH_UMP:
err = mem_attach_ump_wrapper(session_data, (_mali_uk_attach_ump_mem_s __user *)arg);
break;
case MALI_IOC_MEM_RELEASE_UMP:
err = mem_release_ump_wrapper(session_data, (_mali_uk_release_ump_mem_s __user *)arg);
break;
#else
case MALI_IOC_MEM_ATTACH_UMP:
case MALI_IOC_MEM_RELEASE_UMP: /* FALL-THROUGH */
MALI_DEBUG_PRINT(2, ("UMP not supported\n"));
err = -ENOTTY;
break;
#endif
#ifdef CONFIG_DMA_SHARED_BUFFER
case MALI_IOC_MEM_ATTACH_DMA_BUF:
err = mali_attach_dma_buf(session_data, (_mali_uk_attach_dma_buf_s __user *)arg);
break;
case MALI_IOC_MEM_RELEASE_DMA_BUF:
err = mali_release_dma_buf(session_data, (_mali_uk_release_dma_buf_s __user *)arg);
break;
case MALI_IOC_MEM_DMA_BUF_GET_SIZE:
err = mali_dma_buf_get_size(session_data, (_mali_uk_dma_buf_get_size_s __user *)arg);
break;
#else
case MALI_IOC_MEM_DMA_BUF_GET_SIZE: /* FALL-THROUGH */
MALI_DEBUG_PRINT(2, ("DMA-BUF not supported\n"));
err = -ENOTTY;
break;
#endif
case MALI_IOC_PP_START_JOB:
err = pp_start_job_wrapper(session_data, (_mali_uk_pp_start_job_s __user *)arg);
break;
case MALI_IOC_PP_NUMBER_OF_CORES_GET:
err = pp_get_number_of_cores_wrapper(session_data, (_mali_uk_get_pp_number_of_cores_s __user *)arg);
break;
case MALI_IOC_PP_CORE_VERSION_GET:
err = pp_get_core_version_wrapper(session_data, (_mali_uk_get_pp_core_version_s __user *)arg);
break;
case MALI_IOC_PP_DISABLE_WB:
err = pp_disable_wb_wrapper(session_data, (_mali_uk_pp_disable_wb_s __user *)arg);
break;
case MALI_IOC_GP2_START_JOB:
err = gp_start_job_wrapper(session_data, (_mali_uk_gp_start_job_s __user *)arg);
break;
case MALI_IOC_GP2_NUMBER_OF_CORES_GET:
err = gp_get_number_of_cores_wrapper(session_data, (_mali_uk_get_gp_number_of_cores_s __user *)arg);
break;
case MALI_IOC_GP2_CORE_VERSION_GET:
err = gp_get_core_version_wrapper(session_data, (_mali_uk_get_gp_core_version_s __user *)arg);
break;
case MALI_IOC_GP2_SUSPEND_RESPONSE:
err = gp_suspend_response_wrapper(session_data, (_mali_uk_gp_suspend_response_s __user *)arg);
break;
case MALI_IOC_VSYNC_EVENT_REPORT:
err = vsync_event_report_wrapper(session_data, (_mali_uk_vsync_event_report_s __user *)arg);
break;
case MALI_IOC_MEM_GET_BIG_BLOCK: /* Fallthrough */
case MALI_IOC_MEM_FREE_BIG_BLOCK:
MALI_PRINT_ERROR(("Non-MMU mode is no longer supported.\n"));
err = -ENOTTY;
break;
default:
MALI_DEBUG_PRINT(2, ("No handler for ioctl 0x%08X 0x%08lX\n", cmd, arg));
err = -ENOTTY;
};
return err;
}
module_init(mali_driver_init);
module_exit(mali_driver_exit);
MODULE_LICENSE(MALI_KERNEL_LINUX_LICENSE);
MODULE_AUTHOR("ARM Ltd.");
MODULE_VERSION(SVN_REV_STRING);
| gpl-2.0 |
javilonas/Ptah-GT-I9300_OLD | fs/nfs/dns_resolve.c | 922 | 8011 | /*
* linux/fs/nfs/dns_resolve.c
*
* Copyright (c) 2009 Trond Myklebust <Trond.Myklebust@netapp.com>
*
* Resolves DNS hostnames into valid ip addresses
*/
#ifdef CONFIG_NFS_USE_KERNEL_DNS
#include <linux/sunrpc/clnt.h>
#include <linux/dns_resolver.h>
ssize_t nfs_dns_resolve_name(char *name, size_t namelen,
struct sockaddr *sa, size_t salen)
{
ssize_t ret;
char *ip_addr = NULL;
int ip_len;
ip_len = dns_query(NULL, name, namelen, NULL, &ip_addr, NULL);
if (ip_len > 0)
ret = rpc_pton(ip_addr, ip_len, sa, salen);
else
ret = -ESRCH;
kfree(ip_addr);
return ret;
}
#else
#include <linux/hash.h>
#include <linux/string.h>
#include <linux/kmod.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/socket.h>
#include <linux/seq_file.h>
#include <linux/inet.h>
#include <linux/sunrpc/clnt.h>
#include <linux/sunrpc/cache.h>
#include <linux/sunrpc/svcauth.h>
#include "dns_resolve.h"
#include "cache_lib.h"
#define NFS_DNS_HASHBITS 4
#define NFS_DNS_HASHTBL_SIZE (1 << NFS_DNS_HASHBITS)
static struct cache_head *nfs_dns_table[NFS_DNS_HASHTBL_SIZE];
struct nfs_dns_ent {
struct cache_head h;
char *hostname;
size_t namelen;
struct sockaddr_storage addr;
size_t addrlen;
};
static void nfs_dns_ent_update(struct cache_head *cnew,
struct cache_head *ckey)
{
struct nfs_dns_ent *new;
struct nfs_dns_ent *key;
new = container_of(cnew, struct nfs_dns_ent, h);
key = container_of(ckey, struct nfs_dns_ent, h);
memcpy(&new->addr, &key->addr, key->addrlen);
new->addrlen = key->addrlen;
}
static void nfs_dns_ent_init(struct cache_head *cnew,
struct cache_head *ckey)
{
struct nfs_dns_ent *new;
struct nfs_dns_ent *key;
new = container_of(cnew, struct nfs_dns_ent, h);
key = container_of(ckey, struct nfs_dns_ent, h);
kfree(new->hostname);
new->hostname = kstrndup(key->hostname, key->namelen, GFP_KERNEL);
if (new->hostname) {
new->namelen = key->namelen;
nfs_dns_ent_update(cnew, ckey);
} else {
new->namelen = 0;
new->addrlen = 0;
}
}
static void nfs_dns_ent_put(struct kref *ref)
{
struct nfs_dns_ent *item;
item = container_of(ref, struct nfs_dns_ent, h.ref);
kfree(item->hostname);
kfree(item);
}
static struct cache_head *nfs_dns_ent_alloc(void)
{
struct nfs_dns_ent *item = kmalloc(sizeof(*item), GFP_KERNEL);
if (item != NULL) {
item->hostname = NULL;
item->namelen = 0;
item->addrlen = 0;
return &item->h;
}
return NULL;
};
static unsigned int nfs_dns_hash(const struct nfs_dns_ent *key)
{
return hash_str(key->hostname, NFS_DNS_HASHBITS);
}
static void nfs_dns_request(struct cache_detail *cd,
struct cache_head *ch,
char **bpp, int *blen)
{
struct nfs_dns_ent *key = container_of(ch, struct nfs_dns_ent, h);
qword_add(bpp, blen, key->hostname);
(*bpp)[-1] = '\n';
}
static int nfs_dns_upcall(struct cache_detail *cd,
struct cache_head *ch)
{
struct nfs_dns_ent *key = container_of(ch, struct nfs_dns_ent, h);
int ret;
ret = nfs_cache_upcall(cd, key->hostname);
if (ret)
ret = sunrpc_cache_pipe_upcall(cd, ch, nfs_dns_request);
return ret;
}
static int nfs_dns_match(struct cache_head *ca,
struct cache_head *cb)
{
struct nfs_dns_ent *a;
struct nfs_dns_ent *b;
a = container_of(ca, struct nfs_dns_ent, h);
b = container_of(cb, struct nfs_dns_ent, h);
if (a->namelen == 0 || a->namelen != b->namelen)
return 0;
return memcmp(a->hostname, b->hostname, a->namelen) == 0;
}
static int nfs_dns_show(struct seq_file *m, struct cache_detail *cd,
struct cache_head *h)
{
struct nfs_dns_ent *item;
long ttl;
if (h == NULL) {
seq_puts(m, "# ip address hostname ttl\n");
return 0;
}
item = container_of(h, struct nfs_dns_ent, h);
ttl = item->h.expiry_time - seconds_since_boot();
if (ttl < 0)
ttl = 0;
if (!test_bit(CACHE_NEGATIVE, &h->flags)) {
char buf[INET6_ADDRSTRLEN+IPV6_SCOPE_ID_LEN+1];
rpc_ntop((struct sockaddr *)&item->addr, buf, sizeof(buf));
seq_printf(m, "%15s ", buf);
} else
seq_puts(m, "<none> ");
seq_printf(m, "%15s %ld\n", item->hostname, ttl);
return 0;
}
static struct nfs_dns_ent *nfs_dns_lookup(struct cache_detail *cd,
struct nfs_dns_ent *key)
{
struct cache_head *ch;
ch = sunrpc_cache_lookup(cd,
&key->h,
nfs_dns_hash(key));
if (!ch)
return NULL;
return container_of(ch, struct nfs_dns_ent, h);
}
static struct nfs_dns_ent *nfs_dns_update(struct cache_detail *cd,
struct nfs_dns_ent *new,
struct nfs_dns_ent *key)
{
struct cache_head *ch;
ch = sunrpc_cache_update(cd,
&new->h, &key->h,
nfs_dns_hash(key));
if (!ch)
return NULL;
return container_of(ch, struct nfs_dns_ent, h);
}
static int nfs_dns_parse(struct cache_detail *cd, char *buf, int buflen)
{
char buf1[NFS_DNS_HOSTNAME_MAXLEN+1];
struct nfs_dns_ent key, *item;
unsigned int ttl;
ssize_t len;
int ret = -EINVAL;
if (buf[buflen-1] != '\n')
goto out;
buf[buflen-1] = '\0';
len = qword_get(&buf, buf1, sizeof(buf1));
if (len <= 0)
goto out;
key.addrlen = rpc_pton(buf1, len,
(struct sockaddr *)&key.addr,
sizeof(key.addr));
len = qword_get(&buf, buf1, sizeof(buf1));
if (len <= 0)
goto out;
key.hostname = buf1;
key.namelen = len;
memset(&key.h, 0, sizeof(key.h));
if (get_uint(&buf, &ttl) < 0)
goto out;
if (ttl == 0)
goto out;
key.h.expiry_time = ttl + seconds_since_boot();
ret = -ENOMEM;
item = nfs_dns_lookup(cd, &key);
if (item == NULL)
goto out;
if (key.addrlen == 0)
set_bit(CACHE_NEGATIVE, &key.h.flags);
item = nfs_dns_update(cd, &key, item);
if (item == NULL)
goto out;
ret = 0;
cache_put(&item->h, cd);
out:
return ret;
}
static struct cache_detail nfs_dns_resolve = {
.owner = THIS_MODULE,
.hash_size = NFS_DNS_HASHTBL_SIZE,
.hash_table = nfs_dns_table,
.name = "dns_resolve",
.cache_put = nfs_dns_ent_put,
.cache_upcall = nfs_dns_upcall,
.cache_parse = nfs_dns_parse,
.cache_show = nfs_dns_show,
.match = nfs_dns_match,
.init = nfs_dns_ent_init,
.update = nfs_dns_ent_update,
.alloc = nfs_dns_ent_alloc,
};
static int do_cache_lookup(struct cache_detail *cd,
struct nfs_dns_ent *key,
struct nfs_dns_ent **item,
struct nfs_cache_defer_req *dreq)
{
int ret = -ENOMEM;
*item = nfs_dns_lookup(cd, key);
if (*item) {
ret = cache_check(cd, &(*item)->h, &dreq->req);
if (ret)
*item = NULL;
}
return ret;
}
static int do_cache_lookup_nowait(struct cache_detail *cd,
struct nfs_dns_ent *key,
struct nfs_dns_ent **item)
{
int ret = -ENOMEM;
*item = nfs_dns_lookup(cd, key);
if (!*item)
goto out_err;
ret = -ETIMEDOUT;
if (!test_bit(CACHE_VALID, &(*item)->h.flags)
|| (*item)->h.expiry_time < seconds_since_boot()
|| cd->flush_time > (*item)->h.last_refresh)
goto out_put;
ret = -ENOENT;
if (test_bit(CACHE_NEGATIVE, &(*item)->h.flags))
goto out_put;
return 0;
out_put:
cache_put(&(*item)->h, cd);
out_err:
*item = NULL;
return ret;
}
static int do_cache_lookup_wait(struct cache_detail *cd,
struct nfs_dns_ent *key,
struct nfs_dns_ent **item)
{
struct nfs_cache_defer_req *dreq;
int ret = -ENOMEM;
dreq = nfs_cache_defer_req_alloc();
if (!dreq)
goto out;
ret = do_cache_lookup(cd, key, item, dreq);
if (ret == -EAGAIN) {
ret = nfs_cache_wait_for_upcall(dreq);
if (!ret)
ret = do_cache_lookup_nowait(cd, key, item);
}
nfs_cache_defer_req_put(dreq);
out:
return ret;
}
ssize_t nfs_dns_resolve_name(char *name, size_t namelen,
struct sockaddr *sa, size_t salen)
{
struct nfs_dns_ent key = {
.hostname = name,
.namelen = namelen,
};
struct nfs_dns_ent *item = NULL;
ssize_t ret;
ret = do_cache_lookup_wait(&nfs_dns_resolve, &key, &item);
if (ret == 0) {
if (salen >= item->addrlen) {
memcpy(sa, &item->addr, item->addrlen);
ret = item->addrlen;
} else
ret = -EOVERFLOW;
cache_put(&item->h, &nfs_dns_resolve);
} else if (ret == -ENOENT)
ret = -ESRCH;
return ret;
}
int nfs_dns_resolver_init(void)
{
return nfs_cache_register(&nfs_dns_resolve);
}
void nfs_dns_resolver_destroy(void)
{
nfs_cache_unregister(&nfs_dns_resolve);
}
#endif
| gpl-2.0 |
lategoodbye/linux-lcd6610 | drivers/mtd/nand/sm_common.c | 1946 | 4567 | /*
* Copyright © 2009 - Maxim Levitsky
* Common routines & support for xD format
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/mtd/nand.h>
#include <linux/module.h>
#include <linux/sizes.h>
#include "sm_common.h"
static struct nand_ecclayout nand_oob_sm = {
.eccbytes = 6,
.eccpos = {8, 9, 10, 13, 14, 15},
.oobfree = {
{.offset = 0 , .length = 4}, /* reserved */
{.offset = 6 , .length = 2}, /* LBA1 */
{.offset = 11, .length = 2} /* LBA2 */
}
};
/* NOTE: This layout is is not compatabable with SmartMedia, */
/* because the 256 byte devices have page depenent oob layout */
/* However it does preserve the bad block markers */
/* If you use smftl, it will bypass this and work correctly */
/* If you not, then you break SmartMedia compliance anyway */
static struct nand_ecclayout nand_oob_sm_small = {
.eccbytes = 3,
.eccpos = {0, 1, 2},
.oobfree = {
{.offset = 3 , .length = 2}, /* reserved */
{.offset = 6 , .length = 2}, /* LBA1 */
}
};
static int sm_block_markbad(struct mtd_info *mtd, loff_t ofs)
{
struct mtd_oob_ops ops;
struct sm_oob oob;
int ret;
memset(&oob, -1, SM_OOB_SIZE);
oob.block_status = 0x0F;
/* As long as this function is called on erase block boundaries
it will work correctly for 256 byte nand */
ops.mode = MTD_OPS_PLACE_OOB;
ops.ooboffs = 0;
ops.ooblen = mtd->oobsize;
ops.oobbuf = (void *)&oob;
ops.datbuf = NULL;
ret = mtd_write_oob(mtd, ofs, &ops);
if (ret < 0 || ops.oobretlen != SM_OOB_SIZE) {
printk(KERN_NOTICE
"sm_common: can't mark sector at %i as bad\n",
(int)ofs);
return -EIO;
}
return 0;
}
static struct nand_flash_dev nand_smartmedia_flash_ids[] = {
LEGACY_ID_NAND("SmartMedia 2MiB 3,3V ROM", 0x5d, 2, SZ_8K, NAND_ROM),
LEGACY_ID_NAND("SmartMedia 4MiB 3,3V", 0xe3, 4, SZ_8K, 0),
LEGACY_ID_NAND("SmartMedia 4MiB 3,3/5V", 0xe5, 4, SZ_8K, 0),
LEGACY_ID_NAND("SmartMedia 4MiB 5V", 0x6b, 4, SZ_8K, 0),
LEGACY_ID_NAND("SmartMedia 4MiB 3,3V ROM", 0xd5, 4, SZ_8K, NAND_ROM),
LEGACY_ID_NAND("SmartMedia 8MiB 3,3V", 0xe6, 8, SZ_8K, 0),
LEGACY_ID_NAND("SmartMedia 8MiB 3,3V ROM", 0xd6, 8, SZ_8K, NAND_ROM),
LEGACY_ID_NAND("SmartMedia 16MiB 3,3V", 0x73, 16, SZ_16K, 0),
LEGACY_ID_NAND("SmartMedia 16MiB 3,3V ROM", 0x57, 16, SZ_16K, NAND_ROM),
LEGACY_ID_NAND("SmartMedia 32MiB 3,3V", 0x75, 32, SZ_16K, 0),
LEGACY_ID_NAND("SmartMedia 32MiB 3,3V ROM", 0x58, 32, SZ_16K, NAND_ROM),
LEGACY_ID_NAND("SmartMedia 64MiB 3,3V", 0x76, 64, SZ_16K, 0),
LEGACY_ID_NAND("SmartMedia 64MiB 3,3V ROM", 0xd9, 64, SZ_16K, NAND_ROM),
LEGACY_ID_NAND("SmartMedia 128MiB 3,3V", 0x79, 128, SZ_16K, 0),
LEGACY_ID_NAND("SmartMedia 128MiB 3,3V ROM", 0xda, 128, SZ_16K, NAND_ROM),
LEGACY_ID_NAND("SmartMedia 256MiB 3, 3V", 0x71, 256, SZ_16K, 0),
LEGACY_ID_NAND("SmartMedia 256MiB 3,3V ROM", 0x5b, 256, SZ_16K, NAND_ROM),
{NULL}
};
static struct nand_flash_dev nand_xd_flash_ids[] = {
LEGACY_ID_NAND("xD 16MiB 3,3V", 0x73, 16, SZ_16K, 0),
LEGACY_ID_NAND("xD 32MiB 3,3V", 0x75, 32, SZ_16K, 0),
LEGACY_ID_NAND("xD 64MiB 3,3V", 0x76, 64, SZ_16K, 0),
LEGACY_ID_NAND("xD 128MiB 3,3V", 0x79, 128, SZ_16K, 0),
LEGACY_ID_NAND("xD 256MiB 3,3V", 0x71, 256, SZ_16K, NAND_BROKEN_XD),
LEGACY_ID_NAND("xD 512MiB 3,3V", 0xdc, 512, SZ_16K, NAND_BROKEN_XD),
LEGACY_ID_NAND("xD 1GiB 3,3V", 0xd3, 1024, SZ_16K, NAND_BROKEN_XD),
LEGACY_ID_NAND("xD 2GiB 3,3V", 0xd5, 2048, SZ_16K, NAND_BROKEN_XD),
{NULL}
};
int sm_register_device(struct mtd_info *mtd, int smartmedia)
{
struct nand_chip *chip = mtd->priv;
int ret;
chip->options |= NAND_SKIP_BBTSCAN;
/* Scan for card properties */
ret = nand_scan_ident(mtd, 1, smartmedia ?
nand_smartmedia_flash_ids : nand_xd_flash_ids);
if (ret)
return ret;
/* Bad block marker position */
chip->badblockpos = 0x05;
chip->badblockbits = 7;
chip->block_markbad = sm_block_markbad;
/* ECC layout */
if (mtd->writesize == SM_SECTOR_SIZE)
chip->ecc.layout = &nand_oob_sm;
else if (mtd->writesize == SM_SMALL_PAGE)
chip->ecc.layout = &nand_oob_sm_small;
else
return -ENODEV;
ret = nand_scan_tail(mtd);
if (ret)
return ret;
return mtd_device_register(mtd, NULL, 0);
}
EXPORT_SYMBOL_GPL(sm_register_device);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Maxim Levitsky <maximlevitsky@gmail.com>");
MODULE_DESCRIPTION("Common SmartMedia/xD functions");
| gpl-2.0 |
kashifmin/KashKernel_MT6589 | drivers/pinctrl/pinctrl-tegra.c | 1946 | 13342 | /*
* Driver for the NVIDIA Tegra pinmux
*
* Copyright (c) 2011, NVIDIA CORPORATION. All rights reserved.
*
* Derived from code:
* Copyright (C) 2010 Google, Inc.
* Copyright (C) 2010 NVIDIA Corporation
* Copyright (C) 2009-2011 ST-Ericsson AB
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#include <linux/err.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/pinctrl/pinctrl.h>
#include <linux/pinctrl/pinmux.h>
#include <linux/pinctrl/pinconf.h>
#include <mach/pinconf-tegra.h>
#include "pinctrl-tegra.h"
#define DRIVER_NAME "tegra-pinmux-disabled"
struct tegra_pmx {
struct device *dev;
struct pinctrl_dev *pctl;
const struct tegra_pinctrl_soc_data *soc;
int nbanks;
void __iomem **regs;
};
static inline u32 pmx_readl(struct tegra_pmx *pmx, u32 bank, u32 reg)
{
return readl(pmx->regs[bank] + reg);
}
static inline void pmx_writel(struct tegra_pmx *pmx, u32 val, u32 bank, u32 reg)
{
writel(val, pmx->regs[bank] + reg);
}
static int tegra_pinctrl_list_groups(struct pinctrl_dev *pctldev,
unsigned group)
{
struct tegra_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
if (group >= pmx->soc->ngroups)
return -EINVAL;
return 0;
}
static const char *tegra_pinctrl_get_group_name(struct pinctrl_dev *pctldev,
unsigned group)
{
struct tegra_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
if (group >= pmx->soc->ngroups)
return NULL;
return pmx->soc->groups[group].name;
}
static int tegra_pinctrl_get_group_pins(struct pinctrl_dev *pctldev,
unsigned group,
const unsigned **pins,
unsigned *num_pins)
{
struct tegra_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
if (group >= pmx->soc->ngroups)
return -EINVAL;
*pins = pmx->soc->groups[group].pins;
*num_pins = pmx->soc->groups[group].npins;
return 0;
}
static void tegra_pinctrl_pin_dbg_show(struct pinctrl_dev *pctldev,
struct seq_file *s,
unsigned offset)
{
seq_printf(s, " " DRIVER_NAME);
}
static struct pinctrl_ops tegra_pinctrl_ops = {
.list_groups = tegra_pinctrl_list_groups,
.get_group_name = tegra_pinctrl_get_group_name,
.get_group_pins = tegra_pinctrl_get_group_pins,
.pin_dbg_show = tegra_pinctrl_pin_dbg_show,
};
static int tegra_pinctrl_list_funcs(struct pinctrl_dev *pctldev,
unsigned function)
{
struct tegra_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
if (function >= pmx->soc->nfunctions)
return -EINVAL;
return 0;
}
static const char *tegra_pinctrl_get_func_name(struct pinctrl_dev *pctldev,
unsigned function)
{
struct tegra_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
if (function >= pmx->soc->nfunctions)
return NULL;
return pmx->soc->functions[function].name;
}
static int tegra_pinctrl_get_func_groups(struct pinctrl_dev *pctldev,
unsigned function,
const char * const **groups,
unsigned * const num_groups)
{
struct tegra_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
if (function >= pmx->soc->nfunctions)
return -EINVAL;
*groups = pmx->soc->functions[function].groups;
*num_groups = pmx->soc->functions[function].ngroups;
return 0;
}
static int tegra_pinctrl_enable(struct pinctrl_dev *pctldev, unsigned function,
unsigned group)
{
struct tegra_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
const struct tegra_pingroup *g;
int i;
u32 val;
if (group >= pmx->soc->ngroups)
return -EINVAL;
g = &pmx->soc->groups[group];
if (g->mux_reg < 0)
return -EINVAL;
for (i = 0; i < ARRAY_SIZE(g->funcs); i++) {
if (g->funcs[i] == function)
break;
}
if (i == ARRAY_SIZE(g->funcs))
return -EINVAL;
val = pmx_readl(pmx, g->mux_bank, g->mux_reg);
val &= ~(0x3 << g->mux_bit);
val |= i << g->mux_bit;
pmx_writel(pmx, val, g->mux_bank, g->mux_reg);
return 0;
}
static void tegra_pinctrl_disable(struct pinctrl_dev *pctldev,
unsigned function, unsigned group)
{
struct tegra_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
const struct tegra_pingroup *g;
u32 val;
if (group >= pmx->soc->ngroups)
return;
g = &pmx->soc->groups[group];
if (g->mux_reg < 0)
return;
val = pmx_readl(pmx, g->mux_bank, g->mux_reg);
val &= ~(0x3 << g->mux_bit);
val |= g->func_safe << g->mux_bit;
pmx_writel(pmx, val, g->mux_bank, g->mux_reg);
}
static struct pinmux_ops tegra_pinmux_ops = {
.list_functions = tegra_pinctrl_list_funcs,
.get_function_name = tegra_pinctrl_get_func_name,
.get_function_groups = tegra_pinctrl_get_func_groups,
.enable = tegra_pinctrl_enable,
.disable = tegra_pinctrl_disable,
};
static int tegra_pinconf_reg(struct tegra_pmx *pmx,
const struct tegra_pingroup *g,
enum tegra_pinconf_param param,
s8 *bank, s16 *reg, s8 *bit, s8 *width)
{
switch (param) {
case TEGRA_PINCONF_PARAM_PULL:
*bank = g->pupd_bank;
*reg = g->pupd_reg;
*bit = g->pupd_bit;
*width = 2;
break;
case TEGRA_PINCONF_PARAM_TRISTATE:
*bank = g->tri_bank;
*reg = g->tri_reg;
*bit = g->tri_bit;
*width = 1;
break;
case TEGRA_PINCONF_PARAM_ENABLE_INPUT:
*bank = g->einput_bank;
*reg = g->einput_reg;
*bit = g->einput_bit;
*width = 1;
break;
case TEGRA_PINCONF_PARAM_OPEN_DRAIN:
*bank = g->odrain_bank;
*reg = g->odrain_reg;
*bit = g->odrain_bit;
*width = 1;
break;
case TEGRA_PINCONF_PARAM_LOCK:
*bank = g->lock_bank;
*reg = g->lock_reg;
*bit = g->lock_bit;
*width = 1;
break;
case TEGRA_PINCONF_PARAM_IORESET:
*bank = g->ioreset_bank;
*reg = g->ioreset_reg;
*bit = g->ioreset_bit;
*width = 1;
break;
case TEGRA_PINCONF_PARAM_HIGH_SPEED_MODE:
*bank = g->drv_bank;
*reg = g->drv_reg;
*bit = g->hsm_bit;
*width = 1;
break;
case TEGRA_PINCONF_PARAM_SCHMITT:
*bank = g->drv_bank;
*reg = g->drv_reg;
*bit = g->schmitt_bit;
*width = 1;
break;
case TEGRA_PINCONF_PARAM_LOW_POWER_MODE:
*bank = g->drv_bank;
*reg = g->drv_reg;
*bit = g->lpmd_bit;
*width = 1;
break;
case TEGRA_PINCONF_PARAM_DRIVE_DOWN_STRENGTH:
*bank = g->drv_bank;
*reg = g->drv_reg;
*bit = g->drvdn_bit;
*width = g->drvdn_width;
break;
case TEGRA_PINCONF_PARAM_DRIVE_UP_STRENGTH:
*bank = g->drv_bank;
*reg = g->drv_reg;
*bit = g->drvup_bit;
*width = g->drvup_width;
break;
case TEGRA_PINCONF_PARAM_SLEW_RATE_FALLING:
*bank = g->drv_bank;
*reg = g->drv_reg;
*bit = g->slwf_bit;
*width = g->slwf_width;
break;
case TEGRA_PINCONF_PARAM_SLEW_RATE_RISING:
*bank = g->drv_bank;
*reg = g->drv_reg;
*bit = g->slwr_bit;
*width = g->slwr_width;
break;
default:
dev_err(pmx->dev, "Invalid config param %04x\n", param);
return -ENOTSUPP;
}
if (*reg < 0) {
dev_err(pmx->dev,
"Config param %04x not supported on group %s\n",
param, g->name);
return -ENOTSUPP;
}
return 0;
}
static int tegra_pinconf_get(struct pinctrl_dev *pctldev,
unsigned pin, unsigned long *config)
{
return -ENOTSUPP;
}
static int tegra_pinconf_set(struct pinctrl_dev *pctldev,
unsigned pin, unsigned long config)
{
return -ENOTSUPP;
}
static int tegra_pinconf_group_get(struct pinctrl_dev *pctldev,
unsigned group, unsigned long *config)
{
struct tegra_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
enum tegra_pinconf_param param = TEGRA_PINCONF_UNPACK_PARAM(*config);
u16 arg;
const struct tegra_pingroup *g;
int ret;
s8 bank, bit, width;
s16 reg;
u32 val, mask;
if (group >= pmx->soc->ngroups)
return -EINVAL;
g = &pmx->soc->groups[group];
ret = tegra_pinconf_reg(pmx, g, param, &bank, ®, &bit, &width);
if (ret < 0)
return ret;
val = pmx_readl(pmx, bank, reg);
mask = (1 << width) - 1;
arg = (val >> bit) & mask;
*config = TEGRA_PINCONF_PACK(param, arg);
return 0;
}
static int tegra_pinconf_group_set(struct pinctrl_dev *pctldev,
unsigned group, unsigned long config)
{
struct tegra_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
enum tegra_pinconf_param param = TEGRA_PINCONF_UNPACK_PARAM(config);
u16 arg = TEGRA_PINCONF_UNPACK_ARG(config);
const struct tegra_pingroup *g;
int ret;
s8 bank, bit, width;
s16 reg;
u32 val, mask;
if (group >= pmx->soc->ngroups)
return -EINVAL;
g = &pmx->soc->groups[group];
ret = tegra_pinconf_reg(pmx, g, param, &bank, ®, &bit, &width);
if (ret < 0)
return ret;
val = pmx_readl(pmx, bank, reg);
/* LOCK can't be cleared */
if (param == TEGRA_PINCONF_PARAM_LOCK) {
if ((val & BIT(bit)) && !arg)
return -EINVAL;
}
/* Special-case Boolean values; allow any non-zero as true */
if (width == 1)
arg = !!arg;
/* Range-check user-supplied value */
mask = (1 << width) - 1;
if (arg & ~mask)
return -EINVAL;
/* Update register */
val &= ~(mask << bit);
val |= arg << bit;
pmx_writel(pmx, val, bank, reg);
return 0;
}
static void tegra_pinconf_dbg_show(struct pinctrl_dev *pctldev,
struct seq_file *s, unsigned offset)
{
}
static void tegra_pinconf_group_dbg_show(struct pinctrl_dev *pctldev,
struct seq_file *s, unsigned selector)
{
}
struct pinconf_ops tegra_pinconf_ops = {
.pin_config_get = tegra_pinconf_get,
.pin_config_set = tegra_pinconf_set,
.pin_config_group_get = tegra_pinconf_group_get,
.pin_config_group_set = tegra_pinconf_group_set,
.pin_config_dbg_show = tegra_pinconf_dbg_show,
.pin_config_group_dbg_show = tegra_pinconf_group_dbg_show,
};
static struct pinctrl_gpio_range tegra_pinctrl_gpio_range = {
.name = "Tegra GPIOs",
.id = 0,
.base = 0,
};
static struct pinctrl_desc tegra_pinctrl_desc = {
.name = DRIVER_NAME,
.pctlops = &tegra_pinctrl_ops,
.pmxops = &tegra_pinmux_ops,
.confops = &tegra_pinconf_ops,
.owner = THIS_MODULE,
};
static struct of_device_id tegra_pinctrl_of_match[] __devinitdata = {
#ifdef CONFIG_PINCTRL_TEGRA20
{
.compatible = "nvidia,tegra20-pinmux-disabled",
.data = tegra20_pinctrl_init,
},
#endif
#ifdef CONFIG_PINCTRL_TEGRA30
{
.compatible = "nvidia,tegra30-pinmux-disabled",
.data = tegra30_pinctrl_init,
},
#endif
{},
};
static int __devinit tegra_pinctrl_probe(struct platform_device *pdev)
{
const struct of_device_id *match;
tegra_pinctrl_soc_initf initf = NULL;
struct tegra_pmx *pmx;
struct resource *res;
int i;
match = of_match_device(tegra_pinctrl_of_match, &pdev->dev);
if (match)
initf = (tegra_pinctrl_soc_initf)match->data;
#ifdef CONFIG_PINCTRL_TEGRA20
if (!initf)
initf = tegra20_pinctrl_init;
#endif
if (!initf) {
dev_err(&pdev->dev,
"Could not determine SoC-specific init func\n");
return -EINVAL;
}
pmx = devm_kzalloc(&pdev->dev, sizeof(*pmx), GFP_KERNEL);
if (!pmx) {
dev_err(&pdev->dev, "Can't alloc tegra_pmx\n");
return -ENOMEM;
}
pmx->dev = &pdev->dev;
(*initf)(&pmx->soc);
tegra_pinctrl_gpio_range.npins = pmx->soc->ngpios;
tegra_pinctrl_desc.pins = pmx->soc->pins;
tegra_pinctrl_desc.npins = pmx->soc->npins;
for (i = 0; ; i++) {
res = platform_get_resource(pdev, IORESOURCE_MEM, i);
if (!res)
break;
}
pmx->nbanks = i;
pmx->regs = devm_kzalloc(&pdev->dev, pmx->nbanks * sizeof(*pmx->regs),
GFP_KERNEL);
if (!pmx->regs) {
dev_err(&pdev->dev, "Can't alloc regs pointer\n");
return -ENODEV;
}
for (i = 0; i < pmx->nbanks; i++) {
res = platform_get_resource(pdev, IORESOURCE_MEM, i);
if (!res) {
dev_err(&pdev->dev, "Missing MEM resource\n");
return -ENODEV;
}
if (!devm_request_mem_region(&pdev->dev, res->start,
resource_size(res),
dev_name(&pdev->dev))) {
dev_err(&pdev->dev,
"Couldn't request MEM resource %d\n", i);
return -ENODEV;
}
pmx->regs[i] = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
if (!pmx->regs[i]) {
dev_err(&pdev->dev, "Couldn't ioremap regs %d\n", i);
return -ENODEV;
}
}
pmx->pctl = pinctrl_register(&tegra_pinctrl_desc, &pdev->dev, pmx);
if (IS_ERR(pmx->pctl)) {
dev_err(&pdev->dev, "Couldn't register pinctrl driver\n");
return PTR_ERR(pmx->pctl);
}
pinctrl_add_gpio_range(pmx->pctl, &tegra_pinctrl_gpio_range);
platform_set_drvdata(pdev, pmx);
dev_dbg(&pdev->dev, "Probed Tegra pinctrl driver\n");
return 0;
}
static int __devexit tegra_pinctrl_remove(struct platform_device *pdev)
{
struct tegra_pmx *pmx = platform_get_drvdata(pdev);
pinctrl_remove_gpio_range(pmx->pctl, &tegra_pinctrl_gpio_range);
pinctrl_unregister(pmx->pctl);
return 0;
}
static struct platform_driver tegra_pinctrl_driver = {
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
.of_match_table = tegra_pinctrl_of_match,
},
.probe = tegra_pinctrl_probe,
.remove = __devexit_p(tegra_pinctrl_remove),
};
static int __init tegra_pinctrl_init(void)
{
return platform_driver_register(&tegra_pinctrl_driver);
}
arch_initcall(tegra_pinctrl_init);
static void __exit tegra_pinctrl_exit(void)
{
platform_driver_unregister(&tegra_pinctrl_driver);
}
module_exit(tegra_pinctrl_exit);
MODULE_AUTHOR("Stephen Warren <swarren@nvidia.com>");
MODULE_DESCRIPTION("NVIDIA Tegra pinctrl driver");
MODULE_LICENSE("GPL v2");
MODULE_DEVICE_TABLE(of, tegra_pinctrl_of_match);
| gpl-2.0 |
k2wl/3.0.y | drivers/acpi/acpica/utcopy.c | 3226 | 28561 | /******************************************************************************
*
* Module Name: utcopy - Internal to external object translation utilities
*
*****************************************************************************/
/*
* Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* substantially similar to the "NO WARRANTY" disclaimer below
* ("Disclaimer") and any redistribution must be conditioned upon
* including a substantially similar Disclaimer requirement for further
* binary redistribution.
* 3. Neither the names of the above-listed copyright holders nor the names
* of any contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES.
*/
#include <acpi/acpi.h>
#include "accommon.h"
#include "acnamesp.h"
#define _COMPONENT ACPI_UTILITIES
ACPI_MODULE_NAME("utcopy")
/* Local prototypes */
static acpi_status
acpi_ut_copy_isimple_to_esimple(union acpi_operand_object *internal_object,
union acpi_object *external_object,
u8 * data_space, acpi_size * buffer_space_used);
static acpi_status
acpi_ut_copy_ielement_to_ielement(u8 object_type,
union acpi_operand_object *source_object,
union acpi_generic_state *state,
void *context);
static acpi_status
acpi_ut_copy_ipackage_to_epackage(union acpi_operand_object *internal_object,
u8 * buffer, acpi_size * space_used);
static acpi_status
acpi_ut_copy_esimple_to_isimple(union acpi_object *user_obj,
union acpi_operand_object **return_obj);
static acpi_status
acpi_ut_copy_epackage_to_ipackage(union acpi_object *external_object,
union acpi_operand_object **internal_object);
static acpi_status
acpi_ut_copy_simple_object(union acpi_operand_object *source_desc,
union acpi_operand_object *dest_desc);
static acpi_status
acpi_ut_copy_ielement_to_eelement(u8 object_type,
union acpi_operand_object *source_object,
union acpi_generic_state *state,
void *context);
static acpi_status
acpi_ut_copy_ipackage_to_ipackage(union acpi_operand_object *source_obj,
union acpi_operand_object *dest_obj,
struct acpi_walk_state *walk_state);
/*******************************************************************************
*
* FUNCTION: acpi_ut_copy_isimple_to_esimple
*
* PARAMETERS: internal_object - Source object to be copied
* external_object - Where to return the copied object
* data_space - Where object data is returned (such as
* buffer and string data)
* buffer_space_used - Length of data_space that was used
*
* RETURN: Status
*
* DESCRIPTION: This function is called to copy a simple internal object to
* an external object.
*
* The data_space buffer is assumed to have sufficient space for
* the object.
*
******************************************************************************/
static acpi_status
acpi_ut_copy_isimple_to_esimple(union acpi_operand_object *internal_object,
union acpi_object *external_object,
u8 * data_space, acpi_size * buffer_space_used)
{
acpi_status status = AE_OK;
ACPI_FUNCTION_TRACE(ut_copy_isimple_to_esimple);
*buffer_space_used = 0;
/*
* Check for NULL object case (could be an uninitialized
* package element)
*/
if (!internal_object) {
return_ACPI_STATUS(AE_OK);
}
/* Always clear the external object */
ACPI_MEMSET(external_object, 0, sizeof(union acpi_object));
/*
* In general, the external object will be the same type as
* the internal object
*/
external_object->type = internal_object->common.type;
/* However, only a limited number of external types are supported */
switch (internal_object->common.type) {
case ACPI_TYPE_STRING:
external_object->string.pointer = (char *)data_space;
external_object->string.length = internal_object->string.length;
*buffer_space_used = ACPI_ROUND_UP_TO_NATIVE_WORD((acpi_size)
internal_object->
string.
length + 1);
ACPI_MEMCPY((void *)data_space,
(void *)internal_object->string.pointer,
(acpi_size) internal_object->string.length + 1);
break;
case ACPI_TYPE_BUFFER:
external_object->buffer.pointer = data_space;
external_object->buffer.length = internal_object->buffer.length;
*buffer_space_used =
ACPI_ROUND_UP_TO_NATIVE_WORD(internal_object->string.
length);
ACPI_MEMCPY((void *)data_space,
(void *)internal_object->buffer.pointer,
internal_object->buffer.length);
break;
case ACPI_TYPE_INTEGER:
external_object->integer.value = internal_object->integer.value;
break;
case ACPI_TYPE_LOCAL_REFERENCE:
/* This is an object reference. */
switch (internal_object->reference.class) {
case ACPI_REFCLASS_NAME:
/*
* For namepath, return the object handle ("reference")
* We are referring to the namespace node
*/
external_object->reference.handle =
internal_object->reference.node;
external_object->reference.actual_type =
acpi_ns_get_type(internal_object->reference.node);
break;
default:
/* All other reference types are unsupported */
return_ACPI_STATUS(AE_TYPE);
}
break;
case ACPI_TYPE_PROCESSOR:
external_object->processor.proc_id =
internal_object->processor.proc_id;
external_object->processor.pblk_address =
internal_object->processor.address;
external_object->processor.pblk_length =
internal_object->processor.length;
break;
case ACPI_TYPE_POWER:
external_object->power_resource.system_level =
internal_object->power_resource.system_level;
external_object->power_resource.resource_order =
internal_object->power_resource.resource_order;
break;
default:
/*
* There is no corresponding external object type
*/
ACPI_ERROR((AE_INFO,
"Unsupported object type, cannot convert to external object: %s",
acpi_ut_get_type_name(internal_object->common.
type)));
return_ACPI_STATUS(AE_SUPPORT);
}
return_ACPI_STATUS(status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_copy_ielement_to_eelement
*
* PARAMETERS: acpi_pkg_callback
*
* RETURN: Status
*
* DESCRIPTION: Copy one package element to another package element
*
******************************************************************************/
static acpi_status
acpi_ut_copy_ielement_to_eelement(u8 object_type,
union acpi_operand_object *source_object,
union acpi_generic_state *state,
void *context)
{
acpi_status status = AE_OK;
struct acpi_pkg_info *info = (struct acpi_pkg_info *)context;
acpi_size object_space;
u32 this_index;
union acpi_object *target_object;
ACPI_FUNCTION_ENTRY();
this_index = state->pkg.index;
target_object = (union acpi_object *)
&((union acpi_object *)(state->pkg.dest_object))->package.
elements[this_index];
switch (object_type) {
case ACPI_COPY_TYPE_SIMPLE:
/*
* This is a simple or null object
*/
status = acpi_ut_copy_isimple_to_esimple(source_object,
target_object,
info->free_space,
&object_space);
if (ACPI_FAILURE(status)) {
return (status);
}
break;
case ACPI_COPY_TYPE_PACKAGE:
/*
* Build the package object
*/
target_object->type = ACPI_TYPE_PACKAGE;
target_object->package.count = source_object->package.count;
target_object->package.elements =
ACPI_CAST_PTR(union acpi_object, info->free_space);
/*
* Pass the new package object back to the package walk routine
*/
state->pkg.this_target_obj = target_object;
/*
* Save space for the array of objects (Package elements)
* update the buffer length counter
*/
object_space = ACPI_ROUND_UP_TO_NATIVE_WORD((acpi_size)
target_object->
package.count *
sizeof(union
acpi_object));
break;
default:
return (AE_BAD_PARAMETER);
}
info->free_space += object_space;
info->length += object_space;
return (status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_copy_ipackage_to_epackage
*
* PARAMETERS: internal_object - Pointer to the object we are returning
* Buffer - Where the object is returned
* space_used - Where the object length is returned
*
* RETURN: Status
*
* DESCRIPTION: This function is called to place a package object in a user
* buffer. A package object by definition contains other objects.
*
* The buffer is assumed to have sufficient space for the object.
* The caller must have verified the buffer length needed using
* the acpi_ut_get_object_size function before calling this function.
*
******************************************************************************/
static acpi_status
acpi_ut_copy_ipackage_to_epackage(union acpi_operand_object *internal_object,
u8 * buffer, acpi_size * space_used)
{
union acpi_object *external_object;
acpi_status status;
struct acpi_pkg_info info;
ACPI_FUNCTION_TRACE(ut_copy_ipackage_to_epackage);
/*
* First package at head of the buffer
*/
external_object = ACPI_CAST_PTR(union acpi_object, buffer);
/*
* Free space begins right after the first package
*/
info.length = ACPI_ROUND_UP_TO_NATIVE_WORD(sizeof(union acpi_object));
info.free_space =
buffer + ACPI_ROUND_UP_TO_NATIVE_WORD(sizeof(union acpi_object));
info.object_space = 0;
info.num_packages = 1;
external_object->type = internal_object->common.type;
external_object->package.count = internal_object->package.count;
external_object->package.elements = ACPI_CAST_PTR(union acpi_object,
info.free_space);
/*
* Leave room for an array of ACPI_OBJECTS in the buffer
* and move the free space past it
*/
info.length += (acpi_size) external_object->package.count *
ACPI_ROUND_UP_TO_NATIVE_WORD(sizeof(union acpi_object));
info.free_space += external_object->package.count *
ACPI_ROUND_UP_TO_NATIVE_WORD(sizeof(union acpi_object));
status = acpi_ut_walk_package_tree(internal_object, external_object,
acpi_ut_copy_ielement_to_eelement,
&info);
*space_used = info.length;
return_ACPI_STATUS(status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_copy_iobject_to_eobject
*
* PARAMETERS: internal_object - The internal object to be converted
* ret_buffer - Where the object is returned
*
* RETURN: Status
*
* DESCRIPTION: This function is called to build an API object to be returned
* to the caller.
*
******************************************************************************/
acpi_status
acpi_ut_copy_iobject_to_eobject(union acpi_operand_object *internal_object,
struct acpi_buffer *ret_buffer)
{
acpi_status status;
ACPI_FUNCTION_TRACE(ut_copy_iobject_to_eobject);
if (internal_object->common.type == ACPI_TYPE_PACKAGE) {
/*
* Package object: Copy all subobjects (including
* nested packages)
*/
status = acpi_ut_copy_ipackage_to_epackage(internal_object,
ret_buffer->pointer,
&ret_buffer->length);
} else {
/*
* Build a simple object (no nested objects)
*/
status = acpi_ut_copy_isimple_to_esimple(internal_object,
ACPI_CAST_PTR(union
acpi_object,
ret_buffer->
pointer),
ACPI_ADD_PTR(u8,
ret_buffer->
pointer,
ACPI_ROUND_UP_TO_NATIVE_WORD
(sizeof
(union
acpi_object))),
&ret_buffer->length);
/*
* build simple does not include the object size in the length
* so we add it in here
*/
ret_buffer->length += sizeof(union acpi_object);
}
return_ACPI_STATUS(status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_copy_esimple_to_isimple
*
* PARAMETERS: external_object - The external object to be converted
* ret_internal_object - Where the internal object is returned
*
* RETURN: Status
*
* DESCRIPTION: This function copies an external object to an internal one.
* NOTE: Pointers can be copied, we don't need to copy data.
* (The pointers have to be valid in our address space no matter
* what we do with them!)
*
******************************************************************************/
static acpi_status
acpi_ut_copy_esimple_to_isimple(union acpi_object *external_object,
union acpi_operand_object **ret_internal_object)
{
union acpi_operand_object *internal_object;
ACPI_FUNCTION_TRACE(ut_copy_esimple_to_isimple);
/*
* Simple types supported are: String, Buffer, Integer
*/
switch (external_object->type) {
case ACPI_TYPE_STRING:
case ACPI_TYPE_BUFFER:
case ACPI_TYPE_INTEGER:
case ACPI_TYPE_LOCAL_REFERENCE:
internal_object = acpi_ut_create_internal_object((u8)
external_object->
type);
if (!internal_object) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
break;
case ACPI_TYPE_ANY: /* This is the case for a NULL object */
*ret_internal_object = NULL;
return_ACPI_STATUS(AE_OK);
default:
/* All other types are not supported */
ACPI_ERROR((AE_INFO,
"Unsupported object type, cannot convert to internal object: %s",
acpi_ut_get_type_name(external_object->type)));
return_ACPI_STATUS(AE_SUPPORT);
}
/* Must COPY string and buffer contents */
switch (external_object->type) {
case ACPI_TYPE_STRING:
internal_object->string.pointer =
ACPI_ALLOCATE_ZEROED((acpi_size)
external_object->string.length + 1);
if (!internal_object->string.pointer) {
goto error_exit;
}
ACPI_MEMCPY(internal_object->string.pointer,
external_object->string.pointer,
external_object->string.length);
internal_object->string.length = external_object->string.length;
break;
case ACPI_TYPE_BUFFER:
internal_object->buffer.pointer =
ACPI_ALLOCATE_ZEROED(external_object->buffer.length);
if (!internal_object->buffer.pointer) {
goto error_exit;
}
ACPI_MEMCPY(internal_object->buffer.pointer,
external_object->buffer.pointer,
external_object->buffer.length);
internal_object->buffer.length = external_object->buffer.length;
/* Mark buffer data valid */
internal_object->buffer.flags |= AOPOBJ_DATA_VALID;
break;
case ACPI_TYPE_INTEGER:
internal_object->integer.value = external_object->integer.value;
break;
case ACPI_TYPE_LOCAL_REFERENCE:
/* TBD: should validate incoming handle */
internal_object->reference.class = ACPI_REFCLASS_NAME;
internal_object->reference.node =
external_object->reference.handle;
break;
default:
/* Other types can't get here */
break;
}
*ret_internal_object = internal_object;
return_ACPI_STATUS(AE_OK);
error_exit:
acpi_ut_remove_reference(internal_object);
return_ACPI_STATUS(AE_NO_MEMORY);
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_copy_epackage_to_ipackage
*
* PARAMETERS: external_object - The external object to be converted
* internal_object - Where the internal object is returned
*
* RETURN: Status
*
* DESCRIPTION: Copy an external package object to an internal package.
* Handles nested packages.
*
******************************************************************************/
static acpi_status
acpi_ut_copy_epackage_to_ipackage(union acpi_object *external_object,
union acpi_operand_object **internal_object)
{
acpi_status status = AE_OK;
union acpi_operand_object *package_object;
union acpi_operand_object **package_elements;
u32 i;
ACPI_FUNCTION_TRACE(ut_copy_epackage_to_ipackage);
/* Create the package object */
package_object =
acpi_ut_create_package_object(external_object->package.count);
if (!package_object) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
package_elements = package_object->package.elements;
/*
* Recursive implementation. Probably ok, since nested external packages
* as parameters should be very rare.
*/
for (i = 0; i < external_object->package.count; i++) {
status =
acpi_ut_copy_eobject_to_iobject(&external_object->package.
elements[i],
&package_elements[i]);
if (ACPI_FAILURE(status)) {
/* Truncate package and delete it */
package_object->package.count = i;
package_elements[i] = NULL;
acpi_ut_remove_reference(package_object);
return_ACPI_STATUS(status);
}
}
/* Mark package data valid */
package_object->package.flags |= AOPOBJ_DATA_VALID;
*internal_object = package_object;
return_ACPI_STATUS(status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_copy_eobject_to_iobject
*
* PARAMETERS: external_object - The external object to be converted
* internal_object - Where the internal object is returned
*
* RETURN: Status
*
* DESCRIPTION: Converts an external object to an internal object.
*
******************************************************************************/
acpi_status
acpi_ut_copy_eobject_to_iobject(union acpi_object *external_object,
union acpi_operand_object **internal_object)
{
acpi_status status;
ACPI_FUNCTION_TRACE(ut_copy_eobject_to_iobject);
if (external_object->type == ACPI_TYPE_PACKAGE) {
status =
acpi_ut_copy_epackage_to_ipackage(external_object,
internal_object);
} else {
/*
* Build a simple object (no nested objects)
*/
status =
acpi_ut_copy_esimple_to_isimple(external_object,
internal_object);
}
return_ACPI_STATUS(status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_copy_simple_object
*
* PARAMETERS: source_desc - The internal object to be copied
* dest_desc - New target object
*
* RETURN: Status
*
* DESCRIPTION: Simple copy of one internal object to another. Reference count
* of the destination object is preserved.
*
******************************************************************************/
static acpi_status
acpi_ut_copy_simple_object(union acpi_operand_object *source_desc,
union acpi_operand_object *dest_desc)
{
u16 reference_count;
union acpi_operand_object *next_object;
acpi_status status;
acpi_size copy_size;
/* Save fields from destination that we don't want to overwrite */
reference_count = dest_desc->common.reference_count;
next_object = dest_desc->common.next_object;
/*
* Copy the entire source object over the destination object.
* Note: Source can be either an operand object or namespace node.
*/
copy_size = sizeof(union acpi_operand_object);
if (ACPI_GET_DESCRIPTOR_TYPE(source_desc) == ACPI_DESC_TYPE_NAMED) {
copy_size = sizeof(struct acpi_namespace_node);
}
ACPI_MEMCPY(ACPI_CAST_PTR(char, dest_desc),
ACPI_CAST_PTR(char, source_desc), copy_size);
/* Restore the saved fields */
dest_desc->common.reference_count = reference_count;
dest_desc->common.next_object = next_object;
/* New object is not static, regardless of source */
dest_desc->common.flags &= ~AOPOBJ_STATIC_POINTER;
/* Handle the objects with extra data */
switch (dest_desc->common.type) {
case ACPI_TYPE_BUFFER:
/*
* Allocate and copy the actual buffer if and only if:
* 1) There is a valid buffer pointer
* 2) The buffer has a length > 0
*/
if ((source_desc->buffer.pointer) &&
(source_desc->buffer.length)) {
dest_desc->buffer.pointer =
ACPI_ALLOCATE(source_desc->buffer.length);
if (!dest_desc->buffer.pointer) {
return (AE_NO_MEMORY);
}
/* Copy the actual buffer data */
ACPI_MEMCPY(dest_desc->buffer.pointer,
source_desc->buffer.pointer,
source_desc->buffer.length);
}
break;
case ACPI_TYPE_STRING:
/*
* Allocate and copy the actual string if and only if:
* 1) There is a valid string pointer
* (Pointer to a NULL string is allowed)
*/
if (source_desc->string.pointer) {
dest_desc->string.pointer =
ACPI_ALLOCATE((acpi_size) source_desc->string.
length + 1);
if (!dest_desc->string.pointer) {
return (AE_NO_MEMORY);
}
/* Copy the actual string data */
ACPI_MEMCPY(dest_desc->string.pointer,
source_desc->string.pointer,
(acpi_size) source_desc->string.length + 1);
}
break;
case ACPI_TYPE_LOCAL_REFERENCE:
/*
* We copied the reference object, so we now must add a reference
* to the object pointed to by the reference
*
* DDBHandle reference (from Load/load_table) is a special reference,
* it does not have a Reference.Object, so does not need to
* increase the reference count
*/
if (source_desc->reference.class == ACPI_REFCLASS_TABLE) {
break;
}
acpi_ut_add_reference(source_desc->reference.object);
break;
case ACPI_TYPE_REGION:
/*
* We copied the Region Handler, so we now must add a reference
*/
if (dest_desc->region.handler) {
acpi_ut_add_reference(dest_desc->region.handler);
}
break;
/*
* For Mutex and Event objects, we cannot simply copy the underlying
* OS object. We must create a new one.
*/
case ACPI_TYPE_MUTEX:
status = acpi_os_create_mutex(&dest_desc->mutex.os_mutex);
if (ACPI_FAILURE(status)) {
return status;
}
break;
case ACPI_TYPE_EVENT:
status = acpi_os_create_semaphore(ACPI_NO_UNIT_LIMIT, 0,
&dest_desc->event.
os_semaphore);
if (ACPI_FAILURE(status)) {
return status;
}
break;
default:
/* Nothing to do for other simple objects */
break;
}
return (AE_OK);
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_copy_ielement_to_ielement
*
* PARAMETERS: acpi_pkg_callback
*
* RETURN: Status
*
* DESCRIPTION: Copy one package element to another package element
*
******************************************************************************/
static acpi_status
acpi_ut_copy_ielement_to_ielement(u8 object_type,
union acpi_operand_object *source_object,
union acpi_generic_state *state,
void *context)
{
acpi_status status = AE_OK;
u32 this_index;
union acpi_operand_object **this_target_ptr;
union acpi_operand_object *target_object;
ACPI_FUNCTION_ENTRY();
this_index = state->pkg.index;
this_target_ptr = (union acpi_operand_object **)
&state->pkg.dest_object->package.elements[this_index];
switch (object_type) {
case ACPI_COPY_TYPE_SIMPLE:
/* A null source object indicates a (legal) null package element */
if (source_object) {
/*
* This is a simple object, just copy it
*/
target_object =
acpi_ut_create_internal_object(source_object->
common.type);
if (!target_object) {
return (AE_NO_MEMORY);
}
status =
acpi_ut_copy_simple_object(source_object,
target_object);
if (ACPI_FAILURE(status)) {
goto error_exit;
}
*this_target_ptr = target_object;
} else {
/* Pass through a null element */
*this_target_ptr = NULL;
}
break;
case ACPI_COPY_TYPE_PACKAGE:
/*
* This object is a package - go down another nesting level
* Create and build the package object
*/
target_object =
acpi_ut_create_package_object(source_object->package.count);
if (!target_object) {
return (AE_NO_MEMORY);
}
target_object->common.flags = source_object->common.flags;
/* Pass the new package object back to the package walk routine */
state->pkg.this_target_obj = target_object;
/* Store the object pointer in the parent package object */
*this_target_ptr = target_object;
break;
default:
return (AE_BAD_PARAMETER);
}
return (status);
error_exit:
acpi_ut_remove_reference(target_object);
return (status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_copy_ipackage_to_ipackage
*
* PARAMETERS: source_obj - Pointer to the source package object
* dest_obj - Where the internal object is returned
* walk_state - Current Walk state descriptor
*
* RETURN: Status
*
* DESCRIPTION: This function is called to copy an internal package object
* into another internal package object.
*
******************************************************************************/
static acpi_status
acpi_ut_copy_ipackage_to_ipackage(union acpi_operand_object *source_obj,
union acpi_operand_object *dest_obj,
struct acpi_walk_state *walk_state)
{
acpi_status status = AE_OK;
ACPI_FUNCTION_TRACE(ut_copy_ipackage_to_ipackage);
dest_obj->common.type = source_obj->common.type;
dest_obj->common.flags = source_obj->common.flags;
dest_obj->package.count = source_obj->package.count;
/*
* Create the object array and walk the source package tree
*/
dest_obj->package.elements = ACPI_ALLOCATE_ZEROED(((acpi_size)
source_obj->package.
count +
1) * sizeof(void *));
if (!dest_obj->package.elements) {
ACPI_ERROR((AE_INFO, "Package allocation failure"));
return_ACPI_STATUS(AE_NO_MEMORY);
}
/*
* Copy the package element-by-element by walking the package "tree".
* This handles nested packages of arbitrary depth.
*/
status = acpi_ut_walk_package_tree(source_obj, dest_obj,
acpi_ut_copy_ielement_to_ielement,
walk_state);
if (ACPI_FAILURE(status)) {
/* On failure, delete the destination package object */
acpi_ut_remove_reference(dest_obj);
}
return_ACPI_STATUS(status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_copy_iobject_to_iobject
*
* PARAMETERS: source_desc - The internal object to be copied
* dest_desc - Where the copied object is returned
* walk_state - Current walk state
*
* RETURN: Status
*
* DESCRIPTION: Copy an internal object to a new internal object
*
******************************************************************************/
acpi_status
acpi_ut_copy_iobject_to_iobject(union acpi_operand_object *source_desc,
union acpi_operand_object **dest_desc,
struct acpi_walk_state *walk_state)
{
acpi_status status = AE_OK;
ACPI_FUNCTION_TRACE(ut_copy_iobject_to_iobject);
/* Create the top level object */
*dest_desc = acpi_ut_create_internal_object(source_desc->common.type);
if (!*dest_desc) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
/* Copy the object and possible subobjects */
if (source_desc->common.type == ACPI_TYPE_PACKAGE) {
status =
acpi_ut_copy_ipackage_to_ipackage(source_desc, *dest_desc,
walk_state);
} else {
status = acpi_ut_copy_simple_object(source_desc, *dest_desc);
}
return_ACPI_STATUS(status);
}
| gpl-2.0 |
faust93/F93_LGE975_KK_Kernel | drivers/input/touchscreen/tsc2007.c | 3482 | 12486 | /*
* drivers/input/touchscreen/tsc2007.c
*
* Copyright (c) 2008 MtekVision Co., Ltd.
* Kwangwoo Lee <kwlee@mtekvision.com>
*
* Using code from:
* - ads7846.c
* Copyright (c) 2005 David Brownell
* Copyright (c) 2006 Nokia Corporation
* - corgi_ts.c
* Copyright (C) 2004-2005 Richard Purdie
* - omap_ts.[hc], ads7846.h, ts_osk.c
* Copyright (C) 2002 MontaVista Software
* Copyright (C) 2004 Texas Instruments
* Copyright (C) 2005 Dirk Behme
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/i2c.h>
#include <linux/i2c/tsc2007.h>
#include <linux/pm.h>
#if defined(CONFIG_HAS_EARLYSUSPEND)
#include <linux/earlysuspend.h>
#define TSC2007_SUSPEND_LEVEL 1
#endif
#define TSC2007_MEASURE_TEMP0 (0x0 << 4)
#define TSC2007_MEASURE_AUX (0x2 << 4)
#define TSC2007_MEASURE_TEMP1 (0x4 << 4)
#define TSC2007_ACTIVATE_XN (0x8 << 4)
#define TSC2007_ACTIVATE_YN (0x9 << 4)
#define TSC2007_ACTIVATE_YP_XN (0xa << 4)
#define TSC2007_SETUP (0xb << 4)
#define TSC2007_MEASURE_X (0xc << 4)
#define TSC2007_MEASURE_Y (0xd << 4)
#define TSC2007_MEASURE_Z1 (0xe << 4)
#define TSC2007_MEASURE_Z2 (0xf << 4)
#define TSC2007_POWER_OFF_IRQ_EN (0x0 << 2)
#define TSC2007_ADC_ON_IRQ_DIS0 (0x1 << 2)
#define TSC2007_ADC_OFF_IRQ_EN (0x2 << 2)
#define TSC2007_ADC_ON_IRQ_DIS1 (0x3 << 2)
#define TSC2007_12BIT (0x0 << 1)
#define TSC2007_8BIT (0x1 << 1)
#define MAX_12BIT ((1 << 12) - 1)
#define ADC_ON_12BIT (TSC2007_12BIT | TSC2007_ADC_ON_IRQ_DIS0)
#define READ_Y (ADC_ON_12BIT | TSC2007_MEASURE_Y)
#define READ_Z1 (ADC_ON_12BIT | TSC2007_MEASURE_Z1)
#define READ_Z2 (ADC_ON_12BIT | TSC2007_MEASURE_Z2)
#define READ_X (ADC_ON_12BIT | TSC2007_MEASURE_X)
#define PWRDOWN (TSC2007_12BIT | TSC2007_POWER_OFF_IRQ_EN)
struct ts_event {
u16 x;
u16 y;
u16 z1, z2;
};
struct tsc2007 {
struct input_dev *input;
char phys[32];
struct delayed_work work;
struct i2c_client *client;
u16 model;
u16 x_plate_ohms;
u16 max_rt;
unsigned long poll_delay;
unsigned long poll_period;
u16 min_x;
u16 max_x;
u16 min_y;
u16 max_y;
bool pendown;
int irq;
bool invert_x;
bool invert_y;
bool invert_z1;
bool invert_z2;
int (*get_pendown_state)(void);
void (*clear_penirq)(void);
int (*power_shutdown)(bool);
#if defined(CONFIG_HAS_EARLYSUSPEND)
struct early_suspend early_suspend;
#endif
};
static inline int tsc2007_xfer(struct tsc2007 *tsc, u8 cmd)
{
s32 data;
u16 val;
data = i2c_smbus_read_word_data(tsc->client, cmd);
if (data < 0) {
dev_err(&tsc->client->dev, "i2c io error: %d\n", data);
return data;
}
/* The protocol and raw data format from i2c interface:
* S Addr Wr [A] Comm [A] S Addr Rd [A] [DataLow] A [DataHigh] NA P
* Where DataLow has [D11-D4], DataHigh has [D3-D0 << 4 | Dummy 4bit].
*/
val = swab16(data) >> 4;
dev_dbg(&tsc->client->dev, "data: 0x%x, val: 0x%x\n", data, val);
return val;
}
static void tsc2007_read_values(struct tsc2007 *tsc, struct ts_event *tc)
{
/* y- still on; turn on only y+ (and ADC) */
tc->y = tsc2007_xfer(tsc, READ_Y);
/* turn y- off, x+ on, then leave in lowpower */
tc->x = tsc2007_xfer(tsc, READ_X);
/* turn y+ off, x- on; we'll use formula #1 */
tc->z1 = tsc2007_xfer(tsc, READ_Z1);
tc->z2 = tsc2007_xfer(tsc, READ_Z2);
if (tsc->invert_x == true)
tc->x = MAX_12BIT - tc->x;
if (tsc->invert_y == true)
tc->y = MAX_12BIT - tc->y;
if (tsc->invert_z1 == true)
tc->z1 = MAX_12BIT - tc->z1;
if (tsc->invert_z2 == true)
tc->z2 = MAX_12BIT - tc->z2;
/* Prepare for next touch reading - power down ADC, enable PENIRQ */
tsc2007_xfer(tsc, PWRDOWN);
}
static u32 tsc2007_calculate_pressure(struct tsc2007 *tsc, struct ts_event *tc)
{
u32 rt = 0;
/* range filtering */
if (tc->x == MAX_12BIT)
tc->x = 0;
if (likely(tc->x && tc->z1)) {
/* compute touch pressure resistance using equation #1 */
rt = tc->z2 - tc->z1;
rt *= tc->x;
rt *= tsc->x_plate_ohms;
rt /= tc->z1;
rt = (rt + 2047) >> 12;
}
return rt;
}
static void tsc2007_send_up_event(struct tsc2007 *tsc)
{
struct input_dev *input = tsc->input;
dev_dbg(&tsc->client->dev, "UP\n");
input_report_key(input, BTN_TOUCH, 0);
input_report_abs(input, ABS_PRESSURE, 0);
input_sync(input);
}
static void tsc2007_work(struct work_struct *work)
{
struct tsc2007 *ts =
container_of(to_delayed_work(work), struct tsc2007, work);
bool debounced = false;
struct ts_event tc;
u32 rt;
/*
* NOTE: We can't rely on the pressure to determine the pen down
* state, even though this controller has a pressure sensor.
* The pressure value can fluctuate for quite a while after
* lifting the pen and in some cases may not even settle at the
* expected value.
*
* The only safe way to check for the pen up condition is in the
* work function by reading the pen signal state (it's a GPIO
* and IRQ). Unfortunately such callback is not always available,
* in that case we have rely on the pressure anyway.
*/
if (ts->get_pendown_state) {
if (unlikely(!ts->get_pendown_state())) {
tsc2007_send_up_event(ts);
ts->pendown = false;
goto out;
}
dev_dbg(&ts->client->dev, "pen is still down\n");
}
tsc2007_read_values(ts, &tc);
rt = tsc2007_calculate_pressure(ts, &tc);
if (rt > ts->max_rt) {
/*
* Sample found inconsistent by debouncing or pressure is
* beyond the maximum. Don't report it to user space,
* repeat at least once more the measurement.
*/
dev_dbg(&ts->client->dev, "ignored pressure %d\n", rt);
debounced = true;
goto out;
}
if (rt) {
struct input_dev *input = ts->input;
if (!ts->pendown) {
dev_dbg(&ts->client->dev, "DOWN\n");
input_report_key(input, BTN_TOUCH, 1);
ts->pendown = true;
}
input_report_abs(input, ABS_X, tc.x);
input_report_abs(input, ABS_Y, tc.y);
input_report_abs(input, ABS_PRESSURE, rt);
input_sync(input);
dev_dbg(&ts->client->dev, "point(%4d,%4d), pressure (%4u)\n",
tc.x, tc.y, rt);
} else if (!ts->get_pendown_state && ts->pendown) {
/*
* We don't have callback to check pendown state, so we
* have to assume that since pressure reported is 0 the
* pen was lifted up.
*/
tsc2007_send_up_event(ts);
ts->pendown = false;
}
out:
if (ts->pendown || debounced)
schedule_delayed_work(&ts->work,
msecs_to_jiffies(ts->poll_period));
else
enable_irq(ts->irq);
}
static irqreturn_t tsc2007_irq(int irq, void *handle)
{
struct tsc2007 *ts = handle;
if (!ts->get_pendown_state || likely(ts->get_pendown_state())) {
disable_irq_nosync(ts->irq);
schedule_delayed_work(&ts->work,
msecs_to_jiffies(ts->poll_delay));
}
if (ts->clear_penirq)
ts->clear_penirq();
return IRQ_HANDLED;
}
static void tsc2007_free_irq(struct tsc2007 *ts)
{
free_irq(ts->irq, ts);
if (cancel_delayed_work_sync(&ts->work)) {
/*
* Work was pending, therefore we need to enable
* IRQ here to balance the disable_irq() done in the
* interrupt handler.
*/
enable_irq(ts->irq);
}
}
#ifdef CONFIG_PM
static int tsc2007_suspend(struct device *dev)
{
int rc;
struct tsc2007 *ts = dev_get_drvdata(dev);
disable_irq(ts->irq);
if (cancel_delayed_work_sync(&ts->work))
enable_irq(ts->irq);
if (ts->power_shutdown) {
rc = ts->power_shutdown(true);
if (rc) {
pr_err("%s: Power off failed, suspend failed (%d)\n",
__func__, rc);
return rc;
}
}
return 0;
}
static int tsc2007_resume(struct device *dev)
{
int rc;
struct tsc2007 *ts = dev_get_drvdata(dev);
if (ts->power_shutdown) {
rc = ts->power_shutdown(false);
if (rc) {
pr_err("%s: Power on failed, resume failed (%d)\n",
__func__, rc);
return rc;
}
}
enable_irq(ts->irq);
return 0;
}
#ifdef CONFIG_HAS_EARLYSUSPEND
static void tsc2007_early_suspend(struct early_suspend *h)
{
struct tsc2007 *ts = container_of(h, struct tsc2007, early_suspend);
tsc2007_suspend(&ts->client->dev);
}
static void tsc2007_late_resume(struct early_suspend *h)
{
struct tsc2007 *ts = container_of(h, struct tsc2007, early_suspend);
tsc2007_resume(&ts->client->dev);
}
#endif
static const struct dev_pm_ops tsc2007_pm_ops = {
#ifndef CONFIG_HAS_EARLYSUSPEND
.suspend = tsc2007_suspend,
.resume = tsc2007_resume,
#endif
};
#endif
static int __devinit tsc2007_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct tsc2007 *ts;
struct tsc2007_platform_data *pdata = client->dev.platform_data;
struct input_dev *input_dev;
int err;
if (!pdata) {
dev_err(&client->dev, "platform data is required!\n");
return -EINVAL;
}
if (!i2c_check_functionality(client->adapter,
I2C_FUNC_SMBUS_READ_WORD_DATA))
return -EIO;
ts = kzalloc(sizeof(struct tsc2007), GFP_KERNEL);
input_dev = input_allocate_device();
if (!ts || !input_dev) {
err = -ENOMEM;
goto err_free_mem;
}
ts->client = client;
ts->irq = client->irq;
ts->input = input_dev;
INIT_DELAYED_WORK(&ts->work, tsc2007_work);
ts->model = pdata->model;
ts->x_plate_ohms = pdata->x_plate_ohms;
ts->max_rt = pdata->max_rt ? : MAX_12BIT;
ts->poll_delay = pdata->poll_delay ? : 1;
ts->poll_period = pdata->poll_period ? : 1;
ts->get_pendown_state = pdata->get_pendown_state;
ts->clear_penirq = pdata->clear_penirq;
ts->invert_x = pdata->invert_x;
ts->invert_y = pdata->invert_y;
ts->invert_z1 = pdata->invert_z1;
ts->invert_z2 = pdata->invert_z2;
ts->min_x = pdata->min_x ? pdata->min_x : 0;
ts->max_x = pdata->max_x ? pdata->max_x : MAX_12BIT;
ts->min_y = pdata->min_y ? pdata->min_y : 0;
ts->max_y = pdata->max_y ? pdata->max_y : MAX_12BIT;
ts->power_shutdown = pdata->power_shutdown;
snprintf(ts->phys, sizeof(ts->phys),
"%s/input0", dev_name(&client->dev));
input_dev->name = "TSC2007 Touchscreen";
input_dev->phys = ts->phys;
input_dev->id.bustype = BUS_I2C;
input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
__set_bit(INPUT_PROP_DIRECT, input_dev->propbit);
input_set_abs_params(input_dev, ABS_X, ts->min_x,
ts->max_x, pdata->fuzzx, 0);
input_set_abs_params(input_dev, ABS_Y, ts->min_y,
ts->max_y, pdata->fuzzy, 0);
input_set_abs_params(input_dev, ABS_PRESSURE, 0, MAX_12BIT,
pdata->fuzzz, 0);
if (pdata->init_platform_hw)
pdata->init_platform_hw();
err = request_irq(ts->irq, tsc2007_irq, pdata->irq_flags,
client->dev.driver->name, ts);
if (err < 0) {
dev_err(&client->dev, "irq %d busy?\n", ts->irq);
goto err_free_mem;
}
/* Prepare for touch readings - power down ADC and enable PENIRQ */
err = tsc2007_xfer(ts, PWRDOWN);
if (err < 0)
goto err_free_irq;
err = input_register_device(input_dev);
if (err)
goto err_free_irq;
#ifdef CONFIG_HAS_EARLYSUSPEND
ts->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN +
TSC2007_SUSPEND_LEVEL;
ts->early_suspend.suspend = tsc2007_early_suspend;
ts->early_suspend.resume = tsc2007_late_resume;
register_early_suspend(&ts->early_suspend);
#endif
i2c_set_clientdata(client, ts);
return 0;
err_free_irq:
tsc2007_free_irq(ts);
if (pdata->exit_platform_hw)
pdata->exit_platform_hw();
err_free_mem:
input_free_device(input_dev);
kfree(ts);
return err;
}
static int __devexit tsc2007_remove(struct i2c_client *client)
{
struct tsc2007 *ts = i2c_get_clientdata(client);
struct tsc2007_platform_data *pdata = client->dev.platform_data;
tsc2007_free_irq(ts);
if (pdata->exit_platform_hw)
pdata->exit_platform_hw();
#ifdef CONFIG_HAS_EARLYSUSPEND
unregister_early_suspend(&ts->early_suspend);
#endif
input_unregister_device(ts->input);
kfree(ts);
return 0;
}
static const struct i2c_device_id tsc2007_idtable[] = {
{ "tsc2007", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, tsc2007_idtable);
static struct i2c_driver tsc2007_driver = {
.driver = {
.owner = THIS_MODULE,
.name = "tsc2007",
#ifdef CONFIG_PM
.pm = &tsc2007_pm_ops,
#endif
},
.id_table = tsc2007_idtable,
.probe = tsc2007_probe,
.remove = __devexit_p(tsc2007_remove),
};
module_i2c_driver(tsc2007_driver);
MODULE_AUTHOR("Kwangwoo Lee <kwlee@mtekvision.com>");
MODULE_DESCRIPTION("TSC2007 TouchScreen Driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
CyanogenMod/android_kernel_htc_msm8994 | drivers/isdn/mISDN/layer1.c | 4762 | 9951 | /*
*
* Author Karsten Keil <kkeil@novell.com>
*
* Copyright 2008 by Karsten Keil <kkeil@novell.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/mISDNhw.h>
#include "core.h"
#include "layer1.h"
#include "fsm.h"
static u_int *debug;
struct layer1 {
u_long Flags;
struct FsmInst l1m;
struct FsmTimer timer3;
struct FsmTimer timerX;
int delay;
int t3_value;
struct dchannel *dch;
dchannel_l1callback *dcb;
};
#define TIMER3_DEFAULT_VALUE 7000
static
struct Fsm l1fsm_s = {NULL, 0, 0, NULL, NULL};
enum {
ST_L1_F2,
ST_L1_F3,
ST_L1_F4,
ST_L1_F5,
ST_L1_F6,
ST_L1_F7,
ST_L1_F8,
};
#define L1S_STATE_COUNT (ST_L1_F8 + 1)
static char *strL1SState[] =
{
"ST_L1_F2",
"ST_L1_F3",
"ST_L1_F4",
"ST_L1_F5",
"ST_L1_F6",
"ST_L1_F7",
"ST_L1_F8",
};
enum {
EV_PH_ACTIVATE,
EV_PH_DEACTIVATE,
EV_RESET_IND,
EV_DEACT_CNF,
EV_DEACT_IND,
EV_POWER_UP,
EV_ANYSIG_IND,
EV_INFO2_IND,
EV_INFO4_IND,
EV_TIMER_DEACT,
EV_TIMER_ACT,
EV_TIMER3,
};
#define L1_EVENT_COUNT (EV_TIMER3 + 1)
static char *strL1Event[] =
{
"EV_PH_ACTIVATE",
"EV_PH_DEACTIVATE",
"EV_RESET_IND",
"EV_DEACT_CNF",
"EV_DEACT_IND",
"EV_POWER_UP",
"EV_ANYSIG_IND",
"EV_INFO2_IND",
"EV_INFO4_IND",
"EV_TIMER_DEACT",
"EV_TIMER_ACT",
"EV_TIMER3",
};
static void
l1m_debug(struct FsmInst *fi, char *fmt, ...)
{
struct layer1 *l1 = fi->userdata;
struct va_format vaf;
va_list va;
va_start(va, fmt);
vaf.fmt = fmt;
vaf.va = &va;
printk(KERN_DEBUG "%s: %pV\n", dev_name(&l1->dch->dev.dev), &vaf);
va_end(va);
}
static void
l1_reset(struct FsmInst *fi, int event, void *arg)
{
mISDN_FsmChangeState(fi, ST_L1_F3);
}
static void
l1_deact_cnf(struct FsmInst *fi, int event, void *arg)
{
struct layer1 *l1 = fi->userdata;
mISDN_FsmChangeState(fi, ST_L1_F3);
if (test_bit(FLG_L1_ACTIVATING, &l1->Flags))
l1->dcb(l1->dch, HW_POWERUP_REQ);
}
static void
l1_deact_req_s(struct FsmInst *fi, int event, void *arg)
{
struct layer1 *l1 = fi->userdata;
mISDN_FsmChangeState(fi, ST_L1_F3);
mISDN_FsmRestartTimer(&l1->timerX, 550, EV_TIMER_DEACT, NULL, 2);
test_and_set_bit(FLG_L1_DEACTTIMER, &l1->Flags);
}
static void
l1_power_up_s(struct FsmInst *fi, int event, void *arg)
{
struct layer1 *l1 = fi->userdata;
if (test_bit(FLG_L1_ACTIVATING, &l1->Flags)) {
mISDN_FsmChangeState(fi, ST_L1_F4);
l1->dcb(l1->dch, INFO3_P8);
} else
mISDN_FsmChangeState(fi, ST_L1_F3);
}
static void
l1_go_F5(struct FsmInst *fi, int event, void *arg)
{
mISDN_FsmChangeState(fi, ST_L1_F5);
}
static void
l1_go_F8(struct FsmInst *fi, int event, void *arg)
{
mISDN_FsmChangeState(fi, ST_L1_F8);
}
static void
l1_info2_ind(struct FsmInst *fi, int event, void *arg)
{
struct layer1 *l1 = fi->userdata;
mISDN_FsmChangeState(fi, ST_L1_F6);
l1->dcb(l1->dch, INFO3_P8);
}
static void
l1_info4_ind(struct FsmInst *fi, int event, void *arg)
{
struct layer1 *l1 = fi->userdata;
mISDN_FsmChangeState(fi, ST_L1_F7);
l1->dcb(l1->dch, INFO3_P8);
if (test_and_clear_bit(FLG_L1_DEACTTIMER, &l1->Flags))
mISDN_FsmDelTimer(&l1->timerX, 4);
if (!test_bit(FLG_L1_ACTIVATED, &l1->Flags)) {
if (test_and_clear_bit(FLG_L1_T3RUN, &l1->Flags))
mISDN_FsmDelTimer(&l1->timer3, 3);
mISDN_FsmRestartTimer(&l1->timerX, 110, EV_TIMER_ACT, NULL, 2);
test_and_set_bit(FLG_L1_ACTTIMER, &l1->Flags);
}
}
static void
l1_timer3(struct FsmInst *fi, int event, void *arg)
{
struct layer1 *l1 = fi->userdata;
test_and_clear_bit(FLG_L1_T3RUN, &l1->Flags);
if (test_and_clear_bit(FLG_L1_ACTIVATING, &l1->Flags)) {
if (test_and_clear_bit(FLG_L1_DBLOCKED, &l1->Flags))
l1->dcb(l1->dch, HW_D_NOBLOCKED);
l1->dcb(l1->dch, PH_DEACTIVATE_IND);
}
if (l1->l1m.state != ST_L1_F6) {
mISDN_FsmChangeState(fi, ST_L1_F3);
/* do not force anything here, we need send INFO 0 */
}
}
static void
l1_timer_act(struct FsmInst *fi, int event, void *arg)
{
struct layer1 *l1 = fi->userdata;
test_and_clear_bit(FLG_L1_ACTTIMER, &l1->Flags);
test_and_set_bit(FLG_L1_ACTIVATED, &l1->Flags);
l1->dcb(l1->dch, PH_ACTIVATE_IND);
}
static void
l1_timer_deact(struct FsmInst *fi, int event, void *arg)
{
struct layer1 *l1 = fi->userdata;
test_and_clear_bit(FLG_L1_DEACTTIMER, &l1->Flags);
test_and_clear_bit(FLG_L1_ACTIVATED, &l1->Flags);
if (test_and_clear_bit(FLG_L1_DBLOCKED, &l1->Flags))
l1->dcb(l1->dch, HW_D_NOBLOCKED);
l1->dcb(l1->dch, PH_DEACTIVATE_IND);
l1->dcb(l1->dch, HW_DEACT_REQ);
}
static void
l1_activate_s(struct FsmInst *fi, int event, void *arg)
{
struct layer1 *l1 = fi->userdata;
mISDN_FsmRestartTimer(&l1->timer3, l1->t3_value, EV_TIMER3, NULL, 2);
test_and_set_bit(FLG_L1_T3RUN, &l1->Flags);
/* Tell HW to send INFO 1 */
l1->dcb(l1->dch, HW_RESET_REQ);
}
static void
l1_activate_no(struct FsmInst *fi, int event, void *arg)
{
struct layer1 *l1 = fi->userdata;
if ((!test_bit(FLG_L1_DEACTTIMER, &l1->Flags)) &&
(!test_bit(FLG_L1_T3RUN, &l1->Flags))) {
test_and_clear_bit(FLG_L1_ACTIVATING, &l1->Flags);
if (test_and_clear_bit(FLG_L1_DBLOCKED, &l1->Flags))
l1->dcb(l1->dch, HW_D_NOBLOCKED);
l1->dcb(l1->dch, PH_DEACTIVATE_IND);
}
}
static struct FsmNode L1SFnList[] =
{
{ST_L1_F3, EV_PH_ACTIVATE, l1_activate_s},
{ST_L1_F6, EV_PH_ACTIVATE, l1_activate_no},
{ST_L1_F8, EV_PH_ACTIVATE, l1_activate_no},
{ST_L1_F3, EV_RESET_IND, l1_reset},
{ST_L1_F4, EV_RESET_IND, l1_reset},
{ST_L1_F5, EV_RESET_IND, l1_reset},
{ST_L1_F6, EV_RESET_IND, l1_reset},
{ST_L1_F7, EV_RESET_IND, l1_reset},
{ST_L1_F8, EV_RESET_IND, l1_reset},
{ST_L1_F3, EV_DEACT_CNF, l1_deact_cnf},
{ST_L1_F4, EV_DEACT_CNF, l1_deact_cnf},
{ST_L1_F5, EV_DEACT_CNF, l1_deact_cnf},
{ST_L1_F6, EV_DEACT_CNF, l1_deact_cnf},
{ST_L1_F7, EV_DEACT_CNF, l1_deact_cnf},
{ST_L1_F8, EV_DEACT_CNF, l1_deact_cnf},
{ST_L1_F6, EV_DEACT_IND, l1_deact_req_s},
{ST_L1_F7, EV_DEACT_IND, l1_deact_req_s},
{ST_L1_F8, EV_DEACT_IND, l1_deact_req_s},
{ST_L1_F3, EV_POWER_UP, l1_power_up_s},
{ST_L1_F4, EV_ANYSIG_IND, l1_go_F5},
{ST_L1_F6, EV_ANYSIG_IND, l1_go_F8},
{ST_L1_F7, EV_ANYSIG_IND, l1_go_F8},
{ST_L1_F3, EV_INFO2_IND, l1_info2_ind},
{ST_L1_F4, EV_INFO2_IND, l1_info2_ind},
{ST_L1_F5, EV_INFO2_IND, l1_info2_ind},
{ST_L1_F7, EV_INFO2_IND, l1_info2_ind},
{ST_L1_F8, EV_INFO2_IND, l1_info2_ind},
{ST_L1_F3, EV_INFO4_IND, l1_info4_ind},
{ST_L1_F4, EV_INFO4_IND, l1_info4_ind},
{ST_L1_F5, EV_INFO4_IND, l1_info4_ind},
{ST_L1_F6, EV_INFO4_IND, l1_info4_ind},
{ST_L1_F8, EV_INFO4_IND, l1_info4_ind},
{ST_L1_F3, EV_TIMER3, l1_timer3},
{ST_L1_F4, EV_TIMER3, l1_timer3},
{ST_L1_F5, EV_TIMER3, l1_timer3},
{ST_L1_F6, EV_TIMER3, l1_timer3},
{ST_L1_F8, EV_TIMER3, l1_timer3},
{ST_L1_F7, EV_TIMER_ACT, l1_timer_act},
{ST_L1_F3, EV_TIMER_DEACT, l1_timer_deact},
{ST_L1_F4, EV_TIMER_DEACT, l1_timer_deact},
{ST_L1_F5, EV_TIMER_DEACT, l1_timer_deact},
{ST_L1_F6, EV_TIMER_DEACT, l1_timer_deact},
{ST_L1_F7, EV_TIMER_DEACT, l1_timer_deact},
{ST_L1_F8, EV_TIMER_DEACT, l1_timer_deact},
};
static void
release_l1(struct layer1 *l1) {
mISDN_FsmDelTimer(&l1->timerX, 0);
mISDN_FsmDelTimer(&l1->timer3, 0);
if (l1->dch)
l1->dch->l1 = NULL;
module_put(THIS_MODULE);
kfree(l1);
}
int
l1_event(struct layer1 *l1, u_int event)
{
int err = 0;
if (!l1)
return -EINVAL;
switch (event) {
case HW_RESET_IND:
mISDN_FsmEvent(&l1->l1m, EV_RESET_IND, NULL);
break;
case HW_DEACT_IND:
mISDN_FsmEvent(&l1->l1m, EV_DEACT_IND, NULL);
break;
case HW_POWERUP_IND:
mISDN_FsmEvent(&l1->l1m, EV_POWER_UP, NULL);
break;
case HW_DEACT_CNF:
mISDN_FsmEvent(&l1->l1m, EV_DEACT_CNF, NULL);
break;
case ANYSIGNAL:
mISDN_FsmEvent(&l1->l1m, EV_ANYSIG_IND, NULL);
break;
case LOSTFRAMING:
mISDN_FsmEvent(&l1->l1m, EV_ANYSIG_IND, NULL);
break;
case INFO2:
mISDN_FsmEvent(&l1->l1m, EV_INFO2_IND, NULL);
break;
case INFO4_P8:
mISDN_FsmEvent(&l1->l1m, EV_INFO4_IND, NULL);
break;
case INFO4_P10:
mISDN_FsmEvent(&l1->l1m, EV_INFO4_IND, NULL);
break;
case PH_ACTIVATE_REQ:
if (test_bit(FLG_L1_ACTIVATED, &l1->Flags))
l1->dcb(l1->dch, PH_ACTIVATE_IND);
else {
test_and_set_bit(FLG_L1_ACTIVATING, &l1->Flags);
mISDN_FsmEvent(&l1->l1m, EV_PH_ACTIVATE, NULL);
}
break;
case CLOSE_CHANNEL:
release_l1(l1);
break;
default:
if ((event & ~HW_TIMER3_VMASK) == HW_TIMER3_VALUE) {
int val = event & HW_TIMER3_VMASK;
if (val < 5)
val = 5;
if (val > 30)
val = 30;
l1->t3_value = val;
break;
}
if (*debug & DEBUG_L1)
printk(KERN_DEBUG "%s %x unhandled\n",
__func__, event);
err = -EINVAL;
}
return err;
}
EXPORT_SYMBOL(l1_event);
int
create_l1(struct dchannel *dch, dchannel_l1callback *dcb) {
struct layer1 *nl1;
nl1 = kzalloc(sizeof(struct layer1), GFP_ATOMIC);
if (!nl1) {
printk(KERN_ERR "kmalloc struct layer1 failed\n");
return -ENOMEM;
}
nl1->l1m.fsm = &l1fsm_s;
nl1->l1m.state = ST_L1_F3;
nl1->Flags = 0;
nl1->t3_value = TIMER3_DEFAULT_VALUE;
nl1->l1m.debug = *debug & DEBUG_L1_FSM;
nl1->l1m.userdata = nl1;
nl1->l1m.userint = 0;
nl1->l1m.printdebug = l1m_debug;
nl1->dch = dch;
nl1->dcb = dcb;
mISDN_FsmInitTimer(&nl1->l1m, &nl1->timer3);
mISDN_FsmInitTimer(&nl1->l1m, &nl1->timerX);
__module_get(THIS_MODULE);
dch->l1 = nl1;
return 0;
}
EXPORT_SYMBOL(create_l1);
int
l1_init(u_int *deb)
{
debug = deb;
l1fsm_s.state_count = L1S_STATE_COUNT;
l1fsm_s.event_count = L1_EVENT_COUNT;
l1fsm_s.strEvent = strL1Event;
l1fsm_s.strState = strL1SState;
mISDN_FsmNew(&l1fsm_s, L1SFnList, ARRAY_SIZE(L1SFnList));
return 0;
}
void
l1_cleanup(void)
{
mISDN_FsmFree(&l1fsm_s);
}
| gpl-2.0 |
krizky82/Xperia-2011-Kernel-2.6.32.X-ICS | arch/frv/kernel/frv_ksyms.c | 9114 | 3029 | #include <linux/module.h>
#include <linux/linkage.h>
#include <linux/sched.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/user.h>
#include <linux/elfcore.h>
#include <linux/in6.h>
#include <linux/interrupt.h>
#include <asm/setup.h>
#include <asm/pgalloc.h>
#include <asm/irq.h>
#include <asm/io.h>
#include <asm/checksum.h>
#include <asm/hardirq.h>
#include <asm/cacheflush.h>
extern long __memcpy_user(void *dst, const void *src, size_t count);
extern long __memset_user(void *dst, const void *src, size_t count);
/* platform dependent support */
EXPORT_SYMBOL(__ioremap);
EXPORT_SYMBOL(iounmap);
EXPORT_SYMBOL(ip_fast_csum);
#if 0
EXPORT_SYMBOL(local_irq_count);
EXPORT_SYMBOL(local_bh_count);
#endif
EXPORT_SYMBOL(kernel_thread);
EXPORT_SYMBOL(__res_bus_clock_speed_HZ);
EXPORT_SYMBOL(__page_offset);
EXPORT_SYMBOL(__memcpy_user);
EXPORT_SYMBOL(__memset_user);
EXPORT_SYMBOL(frv_dcache_writeback);
EXPORT_SYMBOL(frv_cache_invalidate);
EXPORT_SYMBOL(frv_icache_invalidate);
EXPORT_SYMBOL(frv_cache_wback_inv);
#ifndef CONFIG_MMU
EXPORT_SYMBOL(memory_start);
EXPORT_SYMBOL(memory_end);
#endif
EXPORT_SYMBOL(__debug_bug_trap);
/* The following are special because they're not called
explicitly (the C compiler generates them). Fortunately,
their interface isn't gonna change any time soon now, so
it's OK to leave it out of version control. */
EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(memset);
EXPORT_SYMBOL(__outsl_ns);
EXPORT_SYMBOL(__insl_ns);
#ifdef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS
EXPORT_SYMBOL(atomic_test_and_ANDNOT_mask);
EXPORT_SYMBOL(atomic_test_and_OR_mask);
EXPORT_SYMBOL(atomic_test_and_XOR_mask);
EXPORT_SYMBOL(atomic_add_return);
EXPORT_SYMBOL(atomic_sub_return);
EXPORT_SYMBOL(__xchg_32);
EXPORT_SYMBOL(__cmpxchg_32);
#endif
EXPORT_SYMBOL(atomic64_add_return);
EXPORT_SYMBOL(atomic64_sub_return);
EXPORT_SYMBOL(__xchg_64);
EXPORT_SYMBOL(__cmpxchg_64);
EXPORT_SYMBOL(__debug_bug_printk);
EXPORT_SYMBOL(__delay_loops_MHz);
/*
* libgcc functions - functions that are used internally by the
* compiler... (prototypes are not correct though, but that
* doesn't really matter since they're not versioned).
*/
extern void __gcc_bcmp(void);
extern void __ashldi3(void);
extern void __ashrdi3(void);
extern void __cmpdi2(void);
extern void __divdi3(void);
extern void __lshrdi3(void);
extern void __moddi3(void);
extern void __muldi3(void);
extern void __mulll(void);
extern void __umulll(void);
extern void __negdi2(void);
extern void __ucmpdi2(void);
extern void __udivdi3(void);
extern void __udivmoddi4(void);
extern void __umoddi3(void);
/* gcc lib functions */
//EXPORT_SYMBOL(__gcc_bcmp);
EXPORT_SYMBOL(__ashldi3);
EXPORT_SYMBOL(__ashrdi3);
//EXPORT_SYMBOL(__cmpdi2);
//EXPORT_SYMBOL(__divdi3);
EXPORT_SYMBOL(__lshrdi3);
//EXPORT_SYMBOL(__moddi3);
EXPORT_SYMBOL(__muldi3);
EXPORT_SYMBOL(__mulll);
EXPORT_SYMBOL(__umulll);
EXPORT_SYMBOL(__negdi2);
EXPORT_SYMBOL(__ucmpdi2);
//EXPORT_SYMBOL(__udivdi3);
//EXPORT_SYMBOL(__udivmoddi4);
//EXPORT_SYMBOL(__umoddi3);
| gpl-2.0 |
Split-Screen/android_kernel_samsung_jf | sound/core/oss/copy.c | 15514 | 2883 | /*
* Linear conversion Plug-In
* Copyright (c) 2000 by Abramo Bagnara <abramo@alsa-project.org>
*
*
* This library is free software; you can redistribute it and/or modify
* it under the terms of the GNU Library General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Library General Public License for more details.
*
* You should have received a copy of the GNU Library General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/time.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include "pcm_plugin.h"
static snd_pcm_sframes_t copy_transfer(struct snd_pcm_plugin *plugin,
const struct snd_pcm_plugin_channel *src_channels,
struct snd_pcm_plugin_channel *dst_channels,
snd_pcm_uframes_t frames)
{
unsigned int channel;
unsigned int nchannels;
if (snd_BUG_ON(!plugin || !src_channels || !dst_channels))
return -ENXIO;
if (frames == 0)
return 0;
nchannels = plugin->src_format.channels;
for (channel = 0; channel < nchannels; channel++) {
if (snd_BUG_ON(src_channels->area.first % 8 ||
src_channels->area.step % 8))
return -ENXIO;
if (snd_BUG_ON(dst_channels->area.first % 8 ||
dst_channels->area.step % 8))
return -ENXIO;
if (!src_channels->enabled) {
if (dst_channels->wanted)
snd_pcm_area_silence(&dst_channels->area, 0, frames, plugin->dst_format.format);
dst_channels->enabled = 0;
continue;
}
dst_channels->enabled = 1;
snd_pcm_area_copy(&src_channels->area, 0, &dst_channels->area, 0, frames, plugin->src_format.format);
src_channels++;
dst_channels++;
}
return frames;
}
int snd_pcm_plugin_build_copy(struct snd_pcm_substream *plug,
struct snd_pcm_plugin_format *src_format,
struct snd_pcm_plugin_format *dst_format,
struct snd_pcm_plugin **r_plugin)
{
int err;
struct snd_pcm_plugin *plugin;
int width;
if (snd_BUG_ON(!r_plugin))
return -ENXIO;
*r_plugin = NULL;
if (snd_BUG_ON(src_format->format != dst_format->format))
return -ENXIO;
if (snd_BUG_ON(src_format->rate != dst_format->rate))
return -ENXIO;
if (snd_BUG_ON(src_format->channels != dst_format->channels))
return -ENXIO;
width = snd_pcm_format_physical_width(src_format->format);
if (snd_BUG_ON(width <= 0))
return -ENXIO;
err = snd_pcm_plugin_build(plug, "copy", src_format, dst_format,
0, &plugin);
if (err < 0)
return err;
plugin->transfer = copy_transfer;
*r_plugin = plugin;
return 0;
}
| gpl-2.0 |
jevinskie/ps3-gcc | gcc/testsuite/gfortran.dg/iso_fortran_env_2.f90 | 155 | 1137 | ! { dg-do compile }
module iso_fortran_env
logical :: x
end module iso_fortran_env
subroutine bar1
use , intrinsic :: iso_fortran_env
print *, character_storage_size
end
subroutine bar2
use, intrinsic :: iso_fortran_env
print *, character_storage_size
end
subroutine bar3
use,intrinsic :: iso_fortran_env
print *, character_storage_size
end
subroutine bar4
use,intrinsic::iso_fortran_env
print *, character_storage_size
end
subroutine bar5
use ,intrinsic :: iso_fortran_env
print *, character_storage_size
end
subroutine foo1
use :: iso_fortran_env
print *, x
end
subroutine foo2
use:: iso_fortran_env
print *, x
end
subroutine foo3
use::iso_fortran_env
print *, x
end
subroutine foo4
use ::iso_fortran_env
print *, x
end
subroutine gee1
use , non_intrinsic :: iso_fortran_env
print *, x
end
subroutine gee2
use, non_intrinsic :: iso_fortran_env
print *, x
end
subroutine gee3
use,non_intrinsic :: iso_fortran_env
print *, x
end
subroutine gee4
use,non_intrinsic::iso_fortran_env
print *, x
end
subroutine gee5
use ,non_intrinsic :: iso_fortran_env
print *, x
end
| gpl-2.0 |
dmitriy103/amethyst-bravo-kernel | arch/arm/mach-versatile/versatile_pb.c | 155 | 3052 | /*
* linux/arch/arm/mach-versatile/versatile_pb.c
*
* Copyright (C) 2004 ARM Limited
* Copyright (C) 2000 Deep Blue Solutions Ltd
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/init.h>
#include <linux/device.h>
#include <linux/sysdev.h>
#include <linux/amba/bus.h>
#include <linux/amba/pl061.h>
#include <linux/amba/mmci.h>
#include <linux/io.h>
#include <mach/hardware.h>
#include <asm/irq.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include "core.h"
#if 1
#define IRQ_MMCI1A IRQ_VICSOURCE23
#else
#define IRQ_MMCI1A IRQ_SIC_MMCI1A
#endif
static struct mmci_platform_data mmc1_plat_data = {
.ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34,
.status = mmc_status,
.gpio_wp = -1,
.gpio_cd = -1,
};
static struct pl061_platform_data gpio2_plat_data = {
.gpio_base = 16,
.irq_base = IRQ_GPIO2_START,
};
static struct pl061_platform_data gpio3_plat_data = {
.gpio_base = 24,
.irq_base = IRQ_GPIO3_START,
};
#define UART3_IRQ { IRQ_SIC_UART3, NO_IRQ }
#define UART3_DMA { 0x86, 0x87 }
#define SCI1_IRQ { IRQ_SIC_SCI3, NO_IRQ }
#define SCI1_DMA { 0x88, 0x89 }
#define MMCI1_IRQ { IRQ_MMCI1A, IRQ_SIC_MMCI1B }
#define MMCI1_DMA { 0x85, 0 }
/*
* These devices are connected via the core APB bridge
*/
#define GPIO2_IRQ { IRQ_GPIOINT2, NO_IRQ }
#define GPIO2_DMA { 0, 0 }
#define GPIO3_IRQ { IRQ_GPIOINT3, NO_IRQ }
#define GPIO3_DMA { 0, 0 }
/*
* These devices are connected via the DMA APB bridge
*/
/* FPGA Primecells */
AMBA_DEVICE(uart3, "fpga:09", UART3, NULL);
AMBA_DEVICE(sci1, "fpga:0a", SCI1, NULL);
AMBA_DEVICE(mmc1, "fpga:0b", MMCI1, &mmc1_plat_data);
/* DevChip Primecells */
AMBA_DEVICE(gpio2, "dev:e6", GPIO2, &gpio2_plat_data);
AMBA_DEVICE(gpio3, "dev:e7", GPIO3, &gpio3_plat_data);
static struct amba_device *amba_devs[] __initdata = {
&uart3_device,
&gpio2_device,
&gpio3_device,
&sci1_device,
&mmc1_device,
};
static void __init versatile_pb_init(void)
{
int i;
versatile_init();
for (i = 0; i < ARRAY_SIZE(amba_devs); i++) {
struct amba_device *d = amba_devs[i];
amba_device_register(d, &iomem_resource);
}
}
MACHINE_START(VERSATILE_PB, "ARM-Versatile PB")
/* Maintainer: ARM Ltd/Deep Blue Solutions Ltd */
.boot_params = 0x00000100,
.map_io = versatile_map_io,
.init_irq = versatile_init_irq,
.timer = &versatile_timer,
.init_machine = versatile_pb_init,
MACHINE_END
| gpl-2.0 |
florpor/nvmeqemu | slirp/tcp_output.c | 155 | 14394 | /*
* Copyright (c) 1982, 1986, 1988, 1990, 1993
* The Regents of the University of California. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)tcp_output.c 8.3 (Berkeley) 12/30/93
* tcp_output.c,v 1.3 1994/09/15 10:36:55 davidg Exp
*/
/*
* Changes and additions relating to SLiRP
* Copyright (c) 1995 Danny Gasparovski.
*
* Please read the file COPYRIGHT for the
* terms and conditions of the copyright.
*/
#include <slirp.h>
static const u_char tcp_outflags[TCP_NSTATES] = {
TH_RST|TH_ACK, 0, TH_SYN, TH_SYN|TH_ACK,
TH_ACK, TH_ACK, TH_FIN|TH_ACK, TH_FIN|TH_ACK,
TH_FIN|TH_ACK, TH_ACK, TH_ACK,
};
#define MAX_TCPOPTLEN 32 /* max # bytes that go in options */
/*
* Tcp output routine: figure out what should be sent and send it.
*/
int
tcp_output(struct tcpcb *tp)
{
register struct socket *so = tp->t_socket;
register long len, win;
int off, flags, error;
register struct mbuf *m;
register struct tcpiphdr *ti;
u_char opt[MAX_TCPOPTLEN];
unsigned optlen, hdrlen;
int idle, sendalot;
DEBUG_CALL("tcp_output");
DEBUG_ARG("tp = %lx", (long )tp);
/*
* Determine length of data that should be transmitted,
* and flags that will be used.
* If there is some data or critical controls (SYN, RST)
* to send, then transmit; otherwise, investigate further.
*/
idle = (tp->snd_max == tp->snd_una);
if (idle && tp->t_idle >= tp->t_rxtcur)
/*
* We have been idle for "a while" and no acks are
* expected to clock out any data we send --
* slow start to get ack "clock" running again.
*/
tp->snd_cwnd = tp->t_maxseg;
again:
sendalot = 0;
off = tp->snd_nxt - tp->snd_una;
win = min(tp->snd_wnd, tp->snd_cwnd);
flags = tcp_outflags[tp->t_state];
DEBUG_MISC((dfd, " --- tcp_output flags = 0x%x\n",flags));
/*
* If in persist timeout with window of 0, send 1 byte.
* Otherwise, if window is small but nonzero
* and timer expired, we will send what we can
* and go to transmit state.
*/
if (tp->t_force) {
if (win == 0) {
/*
* If we still have some data to send, then
* clear the FIN bit. Usually this would
* happen below when it realizes that we
* aren't sending all the data. However,
* if we have exactly 1 byte of unset data,
* then it won't clear the FIN bit below,
* and if we are in persist state, we wind
* up sending the packet without recording
* that we sent the FIN bit.
*
* We can't just blindly clear the FIN bit,
* because if we don't have any more data
* to send then the probe will be the FIN
* itself.
*/
if (off < so->so_snd.sb_cc)
flags &= ~TH_FIN;
win = 1;
} else {
tp->t_timer[TCPT_PERSIST] = 0;
tp->t_rxtshift = 0;
}
}
len = min(so->so_snd.sb_cc, win) - off;
if (len < 0) {
/*
* If FIN has been sent but not acked,
* but we haven't been called to retransmit,
* len will be -1. Otherwise, window shrank
* after we sent into it. If window shrank to 0,
* cancel pending retransmit and pull snd_nxt
* back to (closed) window. We will enter persist
* state below. If the window didn't close completely,
* just wait for an ACK.
*/
len = 0;
if (win == 0) {
tp->t_timer[TCPT_REXMT] = 0;
tp->snd_nxt = tp->snd_una;
}
}
if (len > tp->t_maxseg) {
len = tp->t_maxseg;
sendalot = 1;
}
if (SEQ_LT(tp->snd_nxt + len, tp->snd_una + so->so_snd.sb_cc))
flags &= ~TH_FIN;
win = sbspace(&so->so_rcv);
/*
* Sender silly window avoidance. If connection is idle
* and can send all data, a maximum segment,
* at least a maximum default-size segment do it,
* or are forced, do it; otherwise don't bother.
* If peer's buffer is tiny, then send
* when window is at least half open.
* If retransmitting (possibly after persist timer forced us
* to send into a small window), then must resend.
*/
if (len) {
if (len == tp->t_maxseg)
goto send;
if ((1 || idle || tp->t_flags & TF_NODELAY) &&
len + off >= so->so_snd.sb_cc)
goto send;
if (tp->t_force)
goto send;
if (len >= tp->max_sndwnd / 2 && tp->max_sndwnd > 0)
goto send;
if (SEQ_LT(tp->snd_nxt, tp->snd_max))
goto send;
}
/*
* Compare available window to amount of window
* known to peer (as advertised window less
* next expected input). If the difference is at least two
* max size segments, or at least 50% of the maximum possible
* window, then want to send a window update to peer.
*/
if (win > 0) {
/*
* "adv" is the amount we can increase the window,
* taking into account that we are limited by
* TCP_MAXWIN << tp->rcv_scale.
*/
long adv = min(win, (long)TCP_MAXWIN << tp->rcv_scale) -
(tp->rcv_adv - tp->rcv_nxt);
if (adv >= (long) (2 * tp->t_maxseg))
goto send;
if (2 * adv >= (long) so->so_rcv.sb_datalen)
goto send;
}
/*
* Send if we owe peer an ACK.
*/
if (tp->t_flags & TF_ACKNOW)
goto send;
if (flags & (TH_SYN|TH_RST))
goto send;
if (SEQ_GT(tp->snd_up, tp->snd_una))
goto send;
/*
* If our state indicates that FIN should be sent
* and we have not yet done so, or we're retransmitting the FIN,
* then we need to send.
*/
if (flags & TH_FIN &&
((tp->t_flags & TF_SENTFIN) == 0 || tp->snd_nxt == tp->snd_una))
goto send;
/*
* TCP window updates are not reliable, rather a polling protocol
* using ``persist'' packets is used to insure receipt of window
* updates. The three ``states'' for the output side are:
* idle not doing retransmits or persists
* persisting to move a small or zero window
* (re)transmitting and thereby not persisting
*
* tp->t_timer[TCPT_PERSIST]
* is set when we are in persist state.
* tp->t_force
* is set when we are called to send a persist packet.
* tp->t_timer[TCPT_REXMT]
* is set when we are retransmitting
* The output side is idle when both timers are zero.
*
* If send window is too small, there is data to transmit, and no
* retransmit or persist is pending, then go to persist state.
* If nothing happens soon, send when timer expires:
* if window is nonzero, transmit what we can,
* otherwise force out a byte.
*/
if (so->so_snd.sb_cc && tp->t_timer[TCPT_REXMT] == 0 &&
tp->t_timer[TCPT_PERSIST] == 0) {
tp->t_rxtshift = 0;
tcp_setpersist(tp);
}
/*
* No reason to send a segment, just return.
*/
return (0);
send:
/*
* Before ESTABLISHED, force sending of initial options
* unless TCP set not to do any options.
* NOTE: we assume that the IP/TCP header plus TCP options
* always fit in a single mbuf, leaving room for a maximum
* link header, i.e.
* max_linkhdr + sizeof (struct tcpiphdr) + optlen <= MHLEN
*/
optlen = 0;
hdrlen = sizeof (struct tcpiphdr);
if (flags & TH_SYN) {
tp->snd_nxt = tp->iss;
if ((tp->t_flags & TF_NOOPT) == 0) {
uint16_t mss;
opt[0] = TCPOPT_MAXSEG;
opt[1] = 4;
mss = htons((uint16_t) tcp_mss(tp, 0));
memcpy((caddr_t)(opt + 2), (caddr_t)&mss, sizeof(mss));
optlen = 4;
}
}
hdrlen += optlen;
/*
* Adjust data length if insertion of options will
* bump the packet length beyond the t_maxseg length.
*/
if (len > tp->t_maxseg - optlen) {
len = tp->t_maxseg - optlen;
sendalot = 1;
}
/*
* Grab a header mbuf, attaching a copy of data to
* be transmitted, and initialize the header from
* the template for sends on this connection.
*/
if (len) {
m = m_get(so->slirp);
if (m == NULL) {
error = 1;
goto out;
}
m->m_data += IF_MAXLINKHDR;
m->m_len = hdrlen;
sbcopy(&so->so_snd, off, (int) len, mtod(m, caddr_t) + hdrlen);
m->m_len += len;
/*
* If we're sending everything we've got, set PUSH.
* (This will keep happy those implementations which only
* give data to the user when a buffer fills or
* a PUSH comes in.)
*/
if (off + len == so->so_snd.sb_cc)
flags |= TH_PUSH;
} else {
m = m_get(so->slirp);
if (m == NULL) {
error = 1;
goto out;
}
m->m_data += IF_MAXLINKHDR;
m->m_len = hdrlen;
}
ti = mtod(m, struct tcpiphdr *);
memcpy((caddr_t)ti, &tp->t_template, sizeof (struct tcpiphdr));
/*
* Fill in fields, remembering maximum advertised
* window for use in delaying messages about window sizes.
* If resending a FIN, be sure not to use a new sequence number.
*/
if (flags & TH_FIN && tp->t_flags & TF_SENTFIN &&
tp->snd_nxt == tp->snd_max)
tp->snd_nxt--;
/*
* If we are doing retransmissions, then snd_nxt will
* not reflect the first unsent octet. For ACK only
* packets, we do not want the sequence number of the
* retransmitted packet, we want the sequence number
* of the next unsent octet. So, if there is no data
* (and no SYN or FIN), use snd_max instead of snd_nxt
* when filling in ti_seq. But if we are in persist
* state, snd_max might reflect one byte beyond the
* right edge of the window, so use snd_nxt in that
* case, since we know we aren't doing a retransmission.
* (retransmit and persist are mutually exclusive...)
*/
if (len || (flags & (TH_SYN|TH_FIN)) || tp->t_timer[TCPT_PERSIST])
ti->ti_seq = htonl(tp->snd_nxt);
else
ti->ti_seq = htonl(tp->snd_max);
ti->ti_ack = htonl(tp->rcv_nxt);
if (optlen) {
memcpy((caddr_t)(ti + 1), (caddr_t)opt, optlen);
ti->ti_off = (sizeof (struct tcphdr) + optlen) >> 2;
}
ti->ti_flags = flags;
/*
* Calculate receive window. Don't shrink window,
* but avoid silly window syndrome.
*/
if (win < (long)(so->so_rcv.sb_datalen / 4) && win < (long)tp->t_maxseg)
win = 0;
if (win > (long)TCP_MAXWIN << tp->rcv_scale)
win = (long)TCP_MAXWIN << tp->rcv_scale;
if (win < (long)(tp->rcv_adv - tp->rcv_nxt))
win = (long)(tp->rcv_adv - tp->rcv_nxt);
ti->ti_win = htons((uint16_t) (win>>tp->rcv_scale));
if (SEQ_GT(tp->snd_up, tp->snd_una)) {
ti->ti_urp = htons((uint16_t)(tp->snd_up - ntohl(ti->ti_seq)));
ti->ti_flags |= TH_URG;
} else
/*
* If no urgent pointer to send, then we pull
* the urgent pointer to the left edge of the send window
* so that it doesn't drift into the send window on sequence
* number wraparound.
*/
tp->snd_up = tp->snd_una; /* drag it along */
/*
* Put TCP length in extended header, and then
* checksum extended header and data.
*/
if (len + optlen)
ti->ti_len = htons((uint16_t)(sizeof (struct tcphdr) +
optlen + len));
ti->ti_sum = cksum(m, (int)(hdrlen + len));
/*
* In transmit state, time the transmission and arrange for
* the retransmit. In persist state, just set snd_max.
*/
if (tp->t_force == 0 || tp->t_timer[TCPT_PERSIST] == 0) {
tcp_seq startseq = tp->snd_nxt;
/*
* Advance snd_nxt over sequence space of this segment.
*/
if (flags & (TH_SYN|TH_FIN)) {
if (flags & TH_SYN)
tp->snd_nxt++;
if (flags & TH_FIN) {
tp->snd_nxt++;
tp->t_flags |= TF_SENTFIN;
}
}
tp->snd_nxt += len;
if (SEQ_GT(tp->snd_nxt, tp->snd_max)) {
tp->snd_max = tp->snd_nxt;
/*
* Time this transmission if not a retransmission and
* not currently timing anything.
*/
if (tp->t_rtt == 0) {
tp->t_rtt = 1;
tp->t_rtseq = startseq;
}
}
/*
* Set retransmit timer if not currently set,
* and not doing an ack or a keep-alive probe.
* Initial value for retransmit timer is smoothed
* round-trip time + 2 * round-trip time variance.
* Initialize shift counter which is used for backoff
* of retransmit time.
*/
if (tp->t_timer[TCPT_REXMT] == 0 &&
tp->snd_nxt != tp->snd_una) {
tp->t_timer[TCPT_REXMT] = tp->t_rxtcur;
if (tp->t_timer[TCPT_PERSIST]) {
tp->t_timer[TCPT_PERSIST] = 0;
tp->t_rxtshift = 0;
}
}
} else
if (SEQ_GT(tp->snd_nxt + len, tp->snd_max))
tp->snd_max = tp->snd_nxt + len;
/*
* Fill in IP length and desired time to live and
* send to IP level. There should be a better way
* to handle ttl and tos; we could keep them in
* the template, but need a way to checksum without them.
*/
m->m_len = hdrlen + len; /* XXX Needed? m_len should be correct */
{
((struct ip *)ti)->ip_len = m->m_len;
((struct ip *)ti)->ip_ttl = IPDEFTTL;
((struct ip *)ti)->ip_tos = so->so_iptos;
error = ip_output(so, m);
}
if (error) {
out:
return (error);
}
/*
* Data sent (as far as we can tell).
* If this advertises a larger window than any other segment,
* then remember the size of the advertised window.
* Any pending ACK has now been sent.
*/
if (win > 0 && SEQ_GT(tp->rcv_nxt+win, tp->rcv_adv))
tp->rcv_adv = tp->rcv_nxt + win;
tp->last_ack_sent = tp->rcv_nxt;
tp->t_flags &= ~(TF_ACKNOW|TF_DELACK);
if (sendalot)
goto again;
return (0);
}
void
tcp_setpersist(struct tcpcb *tp)
{
int t = ((tp->t_srtt >> 2) + tp->t_rttvar) >> 1;
/*
* Start/restart persistence timer.
*/
TCPT_RANGESET(tp->t_timer[TCPT_PERSIST],
t * tcp_backoff[tp->t_rxtshift],
TCPTV_PERSMIN, TCPTV_PERSMAX);
if (tp->t_rxtshift < TCP_MAXRXTSHIFT)
tp->t_rxtshift++;
}
| gpl-2.0 |
fmaker/kernel_msm | drivers/usb/serial/io_ti.c | 667 | 78730 | /*
* Edgeport USB Serial Converter driver
*
* Copyright (C) 2000-2002 Inside Out Networks, All rights reserved.
* Copyright (C) 2001-2002 Greg Kroah-Hartman <greg@kroah.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* Supports the following devices:
* EP/1 EP/2 EP/4 EP/21 EP/22 EP/221 EP/42 EP/421 WATCHPORT
*
* For questions or problems with this driver, contact Inside Out
* Networks technical support, or Peter Berger <pberger@brimson.com>,
* or Al Borchers <alborchers@steinerpoint.com>.
*
* Version history:
*
* July 11, 2002 Removed 4 port device structure since all TI UMP
* chips have only 2 ports
* David Iacovelli (davidi@ionetworks.com)
*
*/
#include <linux/kernel.h>
#include <linux/jiffies.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/mutex.h>
#include <linux/serial.h>
#include <linux/kfifo.h>
#include <linux/ioctl.h>
#include <linux/firmware.h>
#include <linux/uaccess.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
#include "io_16654.h"
#include "io_usbvend.h"
#include "io_ti.h"
/*
* Version Information
*/
#define DRIVER_VERSION "v0.7mode043006"
#define DRIVER_AUTHOR "Greg Kroah-Hartman <greg@kroah.com> and David Iacovelli"
#define DRIVER_DESC "Edgeport USB Serial Driver"
#define EPROM_PAGE_SIZE 64
/* different hardware types */
#define HARDWARE_TYPE_930 0
#define HARDWARE_TYPE_TIUMP 1
/* IOCTL_PRIVATE_TI_GET_MODE Definitions */
#define TI_MODE_CONFIGURING 0 /* Device has not entered start device */
#define TI_MODE_BOOT 1 /* Staying in boot mode */
#define TI_MODE_DOWNLOAD 2 /* Made it to download mode */
#define TI_MODE_TRANSITIONING 3 /* Currently in boot mode but
transitioning to download mode */
/* read urb state */
#define EDGE_READ_URB_RUNNING 0
#define EDGE_READ_URB_STOPPING 1
#define EDGE_READ_URB_STOPPED 2
#define EDGE_CLOSING_WAIT 4000 /* in .01 sec */
#define EDGE_OUT_BUF_SIZE 1024
/* Product information read from the Edgeport */
struct product_info {
int TiMode; /* Current TI Mode */
__u8 hardware_type; /* Type of hardware */
} __attribute__((packed));
struct edgeport_port {
__u16 uart_base;
__u16 dma_address;
__u8 shadow_msr;
__u8 shadow_mcr;
__u8 shadow_lsr;
__u8 lsr_mask;
__u32 ump_read_timeout; /* Number of milliseconds the UMP will
wait without data before completing
a read short */
int baud_rate;
int close_pending;
int lsr_event;
struct async_icount icount;
wait_queue_head_t delta_msr_wait; /* for handling sleeping while
waiting for msr change to
happen */
struct edgeport_serial *edge_serial;
struct usb_serial_port *port;
__u8 bUartMode; /* Port type, 0: RS232, etc. */
spinlock_t ep_lock;
int ep_read_urb_state;
int ep_write_urb_in_use;
struct kfifo write_fifo;
};
struct edgeport_serial {
struct product_info product_info;
u8 TI_I2C_Type; /* Type of I2C in UMP */
u8 TiReadI2C; /* Set to TRUE if we have read the
I2c in Boot Mode */
struct mutex es_lock;
int num_ports_open;
struct usb_serial *serial;
};
/* Devices that this driver supports */
static const struct usb_device_id edgeport_1port_id_table[] = {
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_1) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_TI3410_EDGEPORT_1) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_TI3410_EDGEPORT_1I) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_PROXIMITY) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_MOTION) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_MOISTURE) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_TEMPERATURE) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_HUMIDITY) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_POWER) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_LIGHT) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_RADIATION) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_DISTANCE) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_ACCELERATION) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_PROX_DIST) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_PLUS_PWR_HP4CD) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_PLUS_PWR_PCI) },
{ }
};
static const struct usb_device_id edgeport_2port_id_table[] = {
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_2) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_2C) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_2I) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_421) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_21) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_42) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_4) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_4I) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_22I) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_221C) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_22C) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_21C) },
/* The 4, 8 and 16 port devices show up as multiple 2 port devices */
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_4S) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_8) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_8S) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_416) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_416B) },
{ }
};
/* Devices that this driver supports */
static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_1) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_TI3410_EDGEPORT_1) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_TI3410_EDGEPORT_1I) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_PROXIMITY) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_MOTION) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_MOISTURE) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_TEMPERATURE) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_HUMIDITY) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_POWER) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_LIGHT) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_RADIATION) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_DISTANCE) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_ACCELERATION) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_WP_PROX_DIST) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_PLUS_PWR_HP4CD) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_PLUS_PWR_PCI) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_2) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_2C) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_2I) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_421) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_21) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_42) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_4) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_4I) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_22I) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_221C) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_22C) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_21C) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_4S) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_8) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_8S) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_416) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_416B) },
{ }
};
MODULE_DEVICE_TABLE(usb, id_table_combined);
static struct usb_driver io_driver = {
.name = "io_ti",
.probe = usb_serial_probe,
.disconnect = usb_serial_disconnect,
.id_table = id_table_combined,
.no_dynamic_id = 1,
};
static unsigned char OperationalMajorVersion;
static unsigned char OperationalMinorVersion;
static unsigned short OperationalBuildNumber;
static int debug;
static int closing_wait = EDGE_CLOSING_WAIT;
static int ignore_cpu_rev;
static int default_uart_mode; /* RS232 */
static void edge_tty_recv(struct device *dev, struct tty_struct *tty,
unsigned char *data, int length);
static void stop_read(struct edgeport_port *edge_port);
static int restart_read(struct edgeport_port *edge_port);
static void edge_set_termios(struct tty_struct *tty,
struct usb_serial_port *port, struct ktermios *old_termios);
static void edge_send(struct tty_struct *tty);
/* sysfs attributes */
static int edge_create_sysfs_attrs(struct usb_serial_port *port);
static int edge_remove_sysfs_attrs(struct usb_serial_port *port);
static int ti_vread_sync(struct usb_device *dev, __u8 request,
__u16 value, __u16 index, u8 *data, int size)
{
int status;
status = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), request,
(USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN),
value, index, data, size, 1000);
if (status < 0)
return status;
if (status != size) {
dbg("%s - wanted to write %d, but only wrote %d",
__func__, size, status);
return -ECOMM;
}
return 0;
}
static int ti_vsend_sync(struct usb_device *dev, __u8 request,
__u16 value, __u16 index, u8 *data, int size)
{
int status;
status = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), request,
(USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT),
value, index, data, size, 1000);
if (status < 0)
return status;
if (status != size) {
dbg("%s - wanted to write %d, but only wrote %d",
__func__, size, status);
return -ECOMM;
}
return 0;
}
static int send_cmd(struct usb_device *dev, __u8 command,
__u8 moduleid, __u16 value, u8 *data,
int size)
{
return ti_vsend_sync(dev, command, value, moduleid, data, size);
}
/* clear tx/rx buffers and fifo in TI UMP */
static int purge_port(struct usb_serial_port *port, __u16 mask)
{
int port_number = port->number - port->serial->minor;
dbg("%s - port %d, mask %x", __func__, port_number, mask);
return send_cmd(port->serial->dev,
UMPC_PURGE_PORT,
(__u8)(UMPM_UART1_PORT + port_number),
mask,
NULL,
0);
}
/**
* read_download_mem - Read edgeport memory from TI chip
* @dev: usb device pointer
* @start_address: Device CPU address at which to read
* @length: Length of above data
* @address_type: Can read both XDATA and I2C
* @buffer: pointer to input data buffer
*/
static int read_download_mem(struct usb_device *dev, int start_address,
int length, __u8 address_type, __u8 *buffer)
{
int status = 0;
__u8 read_length;
__be16 be_start_address;
dbg("%s - @ %x for %d", __func__, start_address, length);
/* Read in blocks of 64 bytes
* (TI firmware can't handle more than 64 byte reads)
*/
while (length) {
if (length > 64)
read_length = 64;
else
read_length = (__u8)length;
if (read_length > 1) {
dbg("%s - @ %x for %d", __func__,
start_address, read_length);
}
be_start_address = cpu_to_be16(start_address);
status = ti_vread_sync(dev, UMPC_MEMORY_READ,
(__u16)address_type,
(__force __u16)be_start_address,
buffer, read_length);
if (status) {
dbg("%s - ERROR %x", __func__, status);
return status;
}
if (read_length > 1)
usb_serial_debug_data(debug, &dev->dev, __func__,
read_length, buffer);
/* Update pointers/length */
start_address += read_length;
buffer += read_length;
length -= read_length;
}
return status;
}
static int read_ram(struct usb_device *dev, int start_address,
int length, __u8 *buffer)
{
return read_download_mem(dev, start_address, length,
DTK_ADDR_SPACE_XDATA, buffer);
}
/* Read edgeport memory to a given block */
static int read_boot_mem(struct edgeport_serial *serial,
int start_address, int length, __u8 *buffer)
{
int status = 0;
int i;
for (i = 0; i < length; i++) {
status = ti_vread_sync(serial->serial->dev,
UMPC_MEMORY_READ, serial->TI_I2C_Type,
(__u16)(start_address+i), &buffer[i], 0x01);
if (status) {
dbg("%s - ERROR %x", __func__, status);
return status;
}
}
dbg("%s - start_address = %x, length = %d",
__func__, start_address, length);
usb_serial_debug_data(debug, &serial->serial->dev->dev,
__func__, length, buffer);
serial->TiReadI2C = 1;
return status;
}
/* Write given block to TI EPROM memory */
static int write_boot_mem(struct edgeport_serial *serial,
int start_address, int length, __u8 *buffer)
{
int status = 0;
int i;
u8 *temp;
/* Must do a read before write */
if (!serial->TiReadI2C) {
temp = kmalloc(1, GFP_KERNEL);
if (!temp) {
dev_err(&serial->serial->dev->dev,
"%s - out of memory\n", __func__);
return -ENOMEM;
}
status = read_boot_mem(serial, 0, 1, temp);
kfree(temp);
if (status)
return status;
}
for (i = 0; i < length; ++i) {
status = ti_vsend_sync(serial->serial->dev,
UMPC_MEMORY_WRITE, buffer[i],
(__u16)(i + start_address), NULL, 0);
if (status)
return status;
}
dbg("%s - start_sddr = %x, length = %d",
__func__, start_address, length);
usb_serial_debug_data(debug, &serial->serial->dev->dev,
__func__, length, buffer);
return status;
}
/* Write edgeport I2C memory to TI chip */
static int write_i2c_mem(struct edgeport_serial *serial,
int start_address, int length, __u8 address_type, __u8 *buffer)
{
int status = 0;
int write_length;
__be16 be_start_address;
/* We can only send a maximum of 1 aligned byte page at a time */
/* calulate the number of bytes left in the first page */
write_length = EPROM_PAGE_SIZE -
(start_address & (EPROM_PAGE_SIZE - 1));
if (write_length > length)
write_length = length;
dbg("%s - BytesInFirstPage Addr = %x, length = %d",
__func__, start_address, write_length);
usb_serial_debug_data(debug, &serial->serial->dev->dev,
__func__, write_length, buffer);
/* Write first page */
be_start_address = cpu_to_be16(start_address);
status = ti_vsend_sync(serial->serial->dev,
UMPC_MEMORY_WRITE, (__u16)address_type,
(__force __u16)be_start_address,
buffer, write_length);
if (status) {
dbg("%s - ERROR %d", __func__, status);
return status;
}
length -= write_length;
start_address += write_length;
buffer += write_length;
/* We should be aligned now -- can write
max page size bytes at a time */
while (length) {
if (length > EPROM_PAGE_SIZE)
write_length = EPROM_PAGE_SIZE;
else
write_length = length;
dbg("%s - Page Write Addr = %x, length = %d",
__func__, start_address, write_length);
usb_serial_debug_data(debug, &serial->serial->dev->dev,
__func__, write_length, buffer);
/* Write next page */
be_start_address = cpu_to_be16(start_address);
status = ti_vsend_sync(serial->serial->dev, UMPC_MEMORY_WRITE,
(__u16)address_type,
(__force __u16)be_start_address,
buffer, write_length);
if (status) {
dev_err(&serial->serial->dev->dev, "%s - ERROR %d\n",
__func__, status);
return status;
}
length -= write_length;
start_address += write_length;
buffer += write_length;
}
return status;
}
/* Examine the UMP DMA registers and LSR
*
* Check the MSBit of the X and Y DMA byte count registers.
* A zero in this bit indicates that the TX DMA buffers are empty
* then check the TX Empty bit in the UART.
*/
static int tx_active(struct edgeport_port *port)
{
int status;
struct out_endpoint_desc_block *oedb;
__u8 *lsr;
int bytes_left = 0;
oedb = kmalloc(sizeof(*oedb), GFP_KERNEL);
if (!oedb) {
dev_err(&port->port->dev, "%s - out of memory\n", __func__);
return -ENOMEM;
}
lsr = kmalloc(1, GFP_KERNEL); /* Sigh, that's right, just one byte,
as not all platforms can do DMA
from stack */
if (!lsr) {
kfree(oedb);
return -ENOMEM;
}
/* Read the DMA Count Registers */
status = read_ram(port->port->serial->dev, port->dma_address,
sizeof(*oedb), (void *)oedb);
if (status)
goto exit_is_tx_active;
dbg("%s - XByteCount 0x%X", __func__, oedb->XByteCount);
/* and the LSR */
status = read_ram(port->port->serial->dev,
port->uart_base + UMPMEM_OFFS_UART_LSR, 1, lsr);
if (status)
goto exit_is_tx_active;
dbg("%s - LSR = 0x%X", __func__, *lsr);
/* If either buffer has data or we are transmitting then return TRUE */
if ((oedb->XByteCount & 0x80) != 0)
bytes_left += 64;
if ((*lsr & UMP_UART_LSR_TX_MASK) == 0)
bytes_left += 1;
/* We return Not Active if we get any kind of error */
exit_is_tx_active:
dbg("%s - return %d", __func__, bytes_left);
kfree(lsr);
kfree(oedb);
return bytes_left;
}
static void chase_port(struct edgeport_port *port, unsigned long timeout,
int flush)
{
int baud_rate;
struct tty_struct *tty = tty_port_tty_get(&port->port->port);
wait_queue_t wait;
unsigned long flags;
if (!timeout)
timeout = (HZ * EDGE_CLOSING_WAIT)/100;
/* wait for data to drain from the buffer */
spin_lock_irqsave(&port->ep_lock, flags);
init_waitqueue_entry(&wait, current);
add_wait_queue(&tty->write_wait, &wait);
for (;;) {
set_current_state(TASK_INTERRUPTIBLE);
if (kfifo_len(&port->write_fifo) == 0
|| timeout == 0 || signal_pending(current)
|| !usb_get_intfdata(port->port->serial->interface))
/* disconnect */
break;
spin_unlock_irqrestore(&port->ep_lock, flags);
timeout = schedule_timeout(timeout);
spin_lock_irqsave(&port->ep_lock, flags);
}
set_current_state(TASK_RUNNING);
remove_wait_queue(&tty->write_wait, &wait);
if (flush)
kfifo_reset_out(&port->write_fifo);
spin_unlock_irqrestore(&port->ep_lock, flags);
tty_kref_put(tty);
/* wait for data to drain from the device */
timeout += jiffies;
while ((long)(jiffies - timeout) < 0 && !signal_pending(current)
&& usb_get_intfdata(port->port->serial->interface)) {
/* not disconnected */
if (!tx_active(port))
break;
msleep(10);
}
/* disconnected */
if (!usb_get_intfdata(port->port->serial->interface))
return;
/* wait one more character time, based on baud rate */
/* (tx_active doesn't seem to wait for the last byte) */
baud_rate = port->baud_rate;
if (baud_rate == 0)
baud_rate = 50;
msleep(max(1, DIV_ROUND_UP(10000, baud_rate)));
}
static int choose_config(struct usb_device *dev)
{
/*
* There may be multiple configurations on this device, in which case
* we would need to read and parse all of them to find out which one
* we want. However, we just support one config at this point,
* configuration # 1, which is Config Descriptor 0.
*/
dbg("%s - Number of Interfaces = %d",
__func__, dev->config->desc.bNumInterfaces);
dbg("%s - MAX Power = %d",
__func__, dev->config->desc.bMaxPower * 2);
if (dev->config->desc.bNumInterfaces != 1) {
dev_err(&dev->dev, "%s - bNumInterfaces is not 1, ERROR!\n",
__func__);
return -ENODEV;
}
return 0;
}
static int read_rom(struct edgeport_serial *serial,
int start_address, int length, __u8 *buffer)
{
int status;
if (serial->product_info.TiMode == TI_MODE_DOWNLOAD) {
status = read_download_mem(serial->serial->dev,
start_address,
length,
serial->TI_I2C_Type,
buffer);
} else {
status = read_boot_mem(serial, start_address, length,
buffer);
}
return status;
}
static int write_rom(struct edgeport_serial *serial, int start_address,
int length, __u8 *buffer)
{
if (serial->product_info.TiMode == TI_MODE_BOOT)
return write_boot_mem(serial, start_address, length,
buffer);
if (serial->product_info.TiMode == TI_MODE_DOWNLOAD)
return write_i2c_mem(serial, start_address, length,
serial->TI_I2C_Type, buffer);
return -EINVAL;
}
/* Read a descriptor header from I2C based on type */
static int get_descriptor_addr(struct edgeport_serial *serial,
int desc_type, struct ti_i2c_desc *rom_desc)
{
int start_address;
int status;
/* Search for requested descriptor in I2C */
start_address = 2;
do {
status = read_rom(serial,
start_address,
sizeof(struct ti_i2c_desc),
(__u8 *)rom_desc);
if (status)
return 0;
if (rom_desc->Type == desc_type)
return start_address;
start_address = start_address + sizeof(struct ti_i2c_desc)
+ rom_desc->Size;
} while ((start_address < TI_MAX_I2C_SIZE) && rom_desc->Type);
return 0;
}
/* Validate descriptor checksum */
static int valid_csum(struct ti_i2c_desc *rom_desc, __u8 *buffer)
{
__u16 i;
__u8 cs = 0;
for (i = 0; i < rom_desc->Size; i++)
cs = (__u8)(cs + buffer[i]);
if (cs != rom_desc->CheckSum) {
dbg("%s - Mismatch %x - %x", __func__, rom_desc->CheckSum, cs);
return -EINVAL;
}
return 0;
}
/* Make sure that the I2C image is good */
static int check_i2c_image(struct edgeport_serial *serial)
{
struct device *dev = &serial->serial->dev->dev;
int status = 0;
struct ti_i2c_desc *rom_desc;
int start_address = 2;
__u8 *buffer;
__u16 ttype;
rom_desc = kmalloc(sizeof(*rom_desc), GFP_KERNEL);
if (!rom_desc) {
dev_err(dev, "%s - out of memory\n", __func__);
return -ENOMEM;
}
buffer = kmalloc(TI_MAX_I2C_SIZE, GFP_KERNEL);
if (!buffer) {
dev_err(dev, "%s - out of memory when allocating buffer\n",
__func__);
kfree(rom_desc);
return -ENOMEM;
}
/* Read the first byte (Signature0) must be 0x52 or 0x10 */
status = read_rom(serial, 0, 1, buffer);
if (status)
goto out;
if (*buffer != UMP5152 && *buffer != UMP3410) {
dev_err(dev, "%s - invalid buffer signature\n", __func__);
status = -ENODEV;
goto out;
}
do {
/* Validate the I2C */
status = read_rom(serial,
start_address,
sizeof(struct ti_i2c_desc),
(__u8 *)rom_desc);
if (status)
break;
if ((start_address + sizeof(struct ti_i2c_desc) +
rom_desc->Size) > TI_MAX_I2C_SIZE) {
status = -ENODEV;
dbg("%s - structure too big, erroring out.", __func__);
break;
}
dbg("%s Type = 0x%x", __func__, rom_desc->Type);
/* Skip type 2 record */
ttype = rom_desc->Type & 0x0f;
if (ttype != I2C_DESC_TYPE_FIRMWARE_BASIC
&& ttype != I2C_DESC_TYPE_FIRMWARE_AUTO) {
/* Read the descriptor data */
status = read_rom(serial, start_address +
sizeof(struct ti_i2c_desc),
rom_desc->Size, buffer);
if (status)
break;
status = valid_csum(rom_desc, buffer);
if (status)
break;
}
start_address = start_address + sizeof(struct ti_i2c_desc) +
rom_desc->Size;
} while ((rom_desc->Type != I2C_DESC_TYPE_ION) &&
(start_address < TI_MAX_I2C_SIZE));
if ((rom_desc->Type != I2C_DESC_TYPE_ION) ||
(start_address > TI_MAX_I2C_SIZE))
status = -ENODEV;
out:
kfree(buffer);
kfree(rom_desc);
return status;
}
static int get_manuf_info(struct edgeport_serial *serial, __u8 *buffer)
{
int status;
int start_address;
struct ti_i2c_desc *rom_desc;
struct edge_ti_manuf_descriptor *desc;
rom_desc = kmalloc(sizeof(*rom_desc), GFP_KERNEL);
if (!rom_desc) {
dev_err(&serial->serial->dev->dev, "%s - out of memory\n",
__func__);
return -ENOMEM;
}
start_address = get_descriptor_addr(serial, I2C_DESC_TYPE_ION,
rom_desc);
if (!start_address) {
dbg("%s - Edge Descriptor not found in I2C", __func__);
status = -ENODEV;
goto exit;
}
/* Read the descriptor data */
status = read_rom(serial, start_address+sizeof(struct ti_i2c_desc),
rom_desc->Size, buffer);
if (status)
goto exit;
status = valid_csum(rom_desc, buffer);
desc = (struct edge_ti_manuf_descriptor *)buffer;
dbg("%s - IonConfig 0x%x", __func__, desc->IonConfig);
dbg("%s - Version %d", __func__, desc->Version);
dbg("%s - Cpu/Board 0x%x", __func__, desc->CpuRev_BoardRev);
dbg("%s - NumPorts %d", __func__, desc->NumPorts);
dbg("%s - NumVirtualPorts %d", __func__, desc->NumVirtualPorts);
dbg("%s - TotalPorts %d", __func__, desc->TotalPorts);
exit:
kfree(rom_desc);
return status;
}
/* Build firmware header used for firmware update */
static int build_i2c_fw_hdr(__u8 *header, struct device *dev)
{
__u8 *buffer;
int buffer_size;
int i;
int err;
__u8 cs = 0;
struct ti_i2c_desc *i2c_header;
struct ti_i2c_image_header *img_header;
struct ti_i2c_firmware_rec *firmware_rec;
const struct firmware *fw;
const char *fw_name = "edgeport/down3.bin";
/* In order to update the I2C firmware we must change the type 2 record
* to type 0xF2. This will force the UMP to come up in Boot Mode.
* Then while in boot mode, the driver will download the latest
* firmware (padded to 15.5k) into the UMP ram. And finally when the
* device comes back up in download mode the driver will cause the new
* firmware to be copied from the UMP Ram to I2C and the firmware will
* update the record type from 0xf2 to 0x02.
*/
/* Allocate a 15.5k buffer + 2 bytes for version number
* (Firmware Record) */
buffer_size = (((1024 * 16) - 512 ) +
sizeof(struct ti_i2c_firmware_rec));
buffer = kmalloc(buffer_size, GFP_KERNEL);
if (!buffer) {
dev_err(dev, "%s - out of memory\n", __func__);
return -ENOMEM;
}
// Set entire image of 0xffs
memset(buffer, 0xff, buffer_size);
err = request_firmware(&fw, fw_name, dev);
if (err) {
printk(KERN_ERR "Failed to load image \"%s\" err %d\n",
fw_name, err);
kfree(buffer);
return err;
}
/* Save Download Version Number */
OperationalMajorVersion = fw->data[0];
OperationalMinorVersion = fw->data[1];
OperationalBuildNumber = fw->data[2] | (fw->data[3] << 8);
/* Copy version number into firmware record */
firmware_rec = (struct ti_i2c_firmware_rec *)buffer;
firmware_rec->Ver_Major = OperationalMajorVersion;
firmware_rec->Ver_Minor = OperationalMinorVersion;
/* Pointer to fw_down memory image */
img_header = (struct ti_i2c_image_header *)&fw->data[4];
memcpy(buffer + sizeof(struct ti_i2c_firmware_rec),
&fw->data[4 + sizeof(struct ti_i2c_image_header)],
le16_to_cpu(img_header->Length));
release_firmware(fw);
for (i=0; i < buffer_size; i++) {
cs = (__u8)(cs + buffer[i]);
}
kfree(buffer);
/* Build new header */
i2c_header = (struct ti_i2c_desc *)header;
firmware_rec = (struct ti_i2c_firmware_rec*)i2c_header->Data;
i2c_header->Type = I2C_DESC_TYPE_FIRMWARE_BLANK;
i2c_header->Size = (__u16)buffer_size;
i2c_header->CheckSum = cs;
firmware_rec->Ver_Major = OperationalMajorVersion;
firmware_rec->Ver_Minor = OperationalMinorVersion;
return 0;
}
/* Try to figure out what type of I2c we have */
static int i2c_type_bootmode(struct edgeport_serial *serial)
{
int status;
u8 *data;
data = kmalloc(1, GFP_KERNEL);
if (!data) {
dev_err(&serial->serial->dev->dev,
"%s - out of memory\n", __func__);
return -ENOMEM;
}
/* Try to read type 2 */
status = ti_vread_sync(serial->serial->dev, UMPC_MEMORY_READ,
DTK_ADDR_SPACE_I2C_TYPE_II, 0, data, 0x01);
if (status)
dbg("%s - read 2 status error = %d", __func__, status);
else
dbg("%s - read 2 data = 0x%x", __func__, *data);
if ((!status) && (*data == UMP5152 || *data == UMP3410)) {
dbg("%s - ROM_TYPE_II", __func__);
serial->TI_I2C_Type = DTK_ADDR_SPACE_I2C_TYPE_II;
goto out;
}
/* Try to read type 3 */
status = ti_vread_sync(serial->serial->dev, UMPC_MEMORY_READ,
DTK_ADDR_SPACE_I2C_TYPE_III, 0, data, 0x01);
if (status)
dbg("%s - read 3 status error = %d", __func__, status);
else
dbg("%s - read 2 data = 0x%x", __func__, *data);
if ((!status) && (*data == UMP5152 || *data == UMP3410)) {
dbg("%s - ROM_TYPE_III", __func__);
serial->TI_I2C_Type = DTK_ADDR_SPACE_I2C_TYPE_III;
goto out;
}
dbg("%s - Unknown", __func__);
serial->TI_I2C_Type = DTK_ADDR_SPACE_I2C_TYPE_II;
status = -ENODEV;
out:
kfree(data);
return status;
}
static int bulk_xfer(struct usb_serial *serial, void *buffer,
int length, int *num_sent)
{
int status;
status = usb_bulk_msg(serial->dev,
usb_sndbulkpipe(serial->dev,
serial->port[0]->bulk_out_endpointAddress),
buffer, length, num_sent, 1000);
return status;
}
/* Download given firmware image to the device (IN BOOT MODE) */
static int download_code(struct edgeport_serial *serial, __u8 *image,
int image_length)
{
int status = 0;
int pos;
int transfer;
int done;
/* Transfer firmware image */
for (pos = 0; pos < image_length; ) {
/* Read the next buffer from file */
transfer = image_length - pos;
if (transfer > EDGE_FW_BULK_MAX_PACKET_SIZE)
transfer = EDGE_FW_BULK_MAX_PACKET_SIZE;
/* Transfer data */
status = bulk_xfer(serial->serial, &image[pos],
transfer, &done);
if (status)
break;
/* Advance buffer pointer */
pos += done;
}
return status;
}
/* FIXME!!! */
static int config_boot_dev(struct usb_device *dev)
{
return 0;
}
static int ti_cpu_rev(struct edge_ti_manuf_descriptor *desc)
{
return TI_GET_CPU_REVISION(desc->CpuRev_BoardRev);
}
/**
* DownloadTIFirmware - Download run-time operating firmware to the TI5052
*
* This routine downloads the main operating code into the TI5052, using the
* boot code already burned into E2PROM or ROM.
*/
static int download_fw(struct edgeport_serial *serial)
{
struct device *dev = &serial->serial->dev->dev;
int status = 0;
int start_address;
struct edge_ti_manuf_descriptor *ti_manuf_desc;
struct usb_interface_descriptor *interface;
int download_cur_ver;
int download_new_ver;
/* This routine is entered by both the BOOT mode and the Download mode
* We can determine which code is running by the reading the config
* descriptor and if we have only one bulk pipe it is in boot mode
*/
serial->product_info.hardware_type = HARDWARE_TYPE_TIUMP;
/* Default to type 2 i2c */
serial->TI_I2C_Type = DTK_ADDR_SPACE_I2C_TYPE_II;
status = choose_config(serial->serial->dev);
if (status)
return status;
interface = &serial->serial->interface->cur_altsetting->desc;
if (!interface) {
dev_err(dev, "%s - no interface set, error!\n", __func__);
return -ENODEV;
}
/*
* Setup initial mode -- the default mode 0 is TI_MODE_CONFIGURING
* if we have more than one endpoint we are definitely in download
* mode
*/
if (interface->bNumEndpoints > 1)
serial->product_info.TiMode = TI_MODE_DOWNLOAD;
else
/* Otherwise we will remain in configuring mode */
serial->product_info.TiMode = TI_MODE_CONFIGURING;
/********************************************************************/
/* Download Mode */
/********************************************************************/
if (serial->product_info.TiMode == TI_MODE_DOWNLOAD) {
struct ti_i2c_desc *rom_desc;
dbg("%s - RUNNING IN DOWNLOAD MODE", __func__);
status = check_i2c_image(serial);
if (status) {
dbg("%s - DOWNLOAD MODE -- BAD I2C", __func__);
return status;
}
/* Validate Hardware version number
* Read Manufacturing Descriptor from TI Based Edgeport
*/
ti_manuf_desc = kmalloc(sizeof(*ti_manuf_desc), GFP_KERNEL);
if (!ti_manuf_desc) {
dev_err(dev, "%s - out of memory.\n", __func__);
return -ENOMEM;
}
status = get_manuf_info(serial, (__u8 *)ti_manuf_desc);
if (status) {
kfree(ti_manuf_desc);
return status;
}
/* Check version number of ION descriptor */
if (!ignore_cpu_rev && ti_cpu_rev(ti_manuf_desc) < 2) {
dbg("%s - Wrong CPU Rev %d (Must be 2)",
__func__, ti_cpu_rev(ti_manuf_desc));
kfree(ti_manuf_desc);
return -EINVAL;
}
rom_desc = kmalloc(sizeof(*rom_desc), GFP_KERNEL);
if (!rom_desc) {
dev_err(dev, "%s - out of memory.\n", __func__);
kfree(ti_manuf_desc);
return -ENOMEM;
}
/* Search for type 2 record (firmware record) */
start_address = get_descriptor_addr(serial,
I2C_DESC_TYPE_FIRMWARE_BASIC, rom_desc);
if (start_address != 0) {
struct ti_i2c_firmware_rec *firmware_version;
u8 *record;
dbg("%s - Found Type FIRMWARE (Type 2) record",
__func__);
firmware_version = kmalloc(sizeof(*firmware_version),
GFP_KERNEL);
if (!firmware_version) {
dev_err(dev, "%s - out of memory.\n", __func__);
kfree(rom_desc);
kfree(ti_manuf_desc);
return -ENOMEM;
}
/* Validate version number
* Read the descriptor data
*/
status = read_rom(serial, start_address +
sizeof(struct ti_i2c_desc),
sizeof(struct ti_i2c_firmware_rec),
(__u8 *)firmware_version);
if (status) {
kfree(firmware_version);
kfree(rom_desc);
kfree(ti_manuf_desc);
return status;
}
/* Check version number of download with current
version in I2c */
download_cur_ver = (firmware_version->Ver_Major << 8) +
(firmware_version->Ver_Minor);
download_new_ver = (OperationalMajorVersion << 8) +
(OperationalMinorVersion);
dbg("%s - >> FW Versions Device %d.%d Driver %d.%d",
__func__,
firmware_version->Ver_Major,
firmware_version->Ver_Minor,
OperationalMajorVersion,
OperationalMinorVersion);
/* Check if we have an old version in the I2C and
update if necessary */
if (download_cur_ver < download_new_ver) {
dbg("%s - Update I2C dld from %d.%d to %d.%d",
__func__,
firmware_version->Ver_Major,
firmware_version->Ver_Minor,
OperationalMajorVersion,
OperationalMinorVersion);
record = kmalloc(1, GFP_KERNEL);
if (!record) {
dev_err(dev, "%s - out of memory.\n",
__func__);
kfree(firmware_version);
kfree(rom_desc);
kfree(ti_manuf_desc);
return -ENOMEM;
}
/* In order to update the I2C firmware we must
* change the type 2 record to type 0xF2. This
* will force the UMP to come up in Boot Mode.
* Then while in boot mode, the driver will
* download the latest firmware (padded to
* 15.5k) into the UMP ram. Finally when the
* device comes back up in download mode the
* driver will cause the new firmware to be
* copied from the UMP Ram to I2C and the
* firmware will update the record type from
* 0xf2 to 0x02.
*/
*record = I2C_DESC_TYPE_FIRMWARE_BLANK;
/* Change the I2C Firmware record type to
0xf2 to trigger an update */
status = write_rom(serial, start_address,
sizeof(*record), record);
if (status) {
kfree(record);
kfree(firmware_version);
kfree(rom_desc);
kfree(ti_manuf_desc);
return status;
}
/* verify the write -- must do this in order
* for write to complete before we do the
* hardware reset
*/
status = read_rom(serial,
start_address,
sizeof(*record),
record);
if (status) {
kfree(record);
kfree(firmware_version);
kfree(rom_desc);
kfree(ti_manuf_desc);
return status;
}
if (*record != I2C_DESC_TYPE_FIRMWARE_BLANK) {
dev_err(dev,
"%s - error resetting device\n",
__func__);
kfree(record);
kfree(firmware_version);
kfree(rom_desc);
kfree(ti_manuf_desc);
return -ENODEV;
}
dbg("%s - HARDWARE RESET", __func__);
/* Reset UMP -- Back to BOOT MODE */
status = ti_vsend_sync(serial->serial->dev,
UMPC_HARDWARE_RESET,
0, 0, NULL, 0);
dbg("%s - HARDWARE RESET return %d",
__func__, status);
/* return an error on purpose. */
kfree(record);
kfree(firmware_version);
kfree(rom_desc);
kfree(ti_manuf_desc);
return -ENODEV;
}
kfree(firmware_version);
}
/* Search for type 0xF2 record (firmware blank record) */
else if ((start_address = get_descriptor_addr(serial, I2C_DESC_TYPE_FIRMWARE_BLANK, rom_desc)) != 0) {
#define HEADER_SIZE (sizeof(struct ti_i2c_desc) + \
sizeof(struct ti_i2c_firmware_rec))
__u8 *header;
__u8 *vheader;
header = kmalloc(HEADER_SIZE, GFP_KERNEL);
if (!header) {
dev_err(dev, "%s - out of memory.\n", __func__);
kfree(rom_desc);
kfree(ti_manuf_desc);
return -ENOMEM;
}
vheader = kmalloc(HEADER_SIZE, GFP_KERNEL);
if (!vheader) {
dev_err(dev, "%s - out of memory.\n", __func__);
kfree(header);
kfree(rom_desc);
kfree(ti_manuf_desc);
return -ENOMEM;
}
dbg("%s - Found Type BLANK FIRMWARE (Type F2) record",
__func__);
/*
* In order to update the I2C firmware we must change
* the type 2 record to type 0xF2. This will force the
* UMP to come up in Boot Mode. Then while in boot
* mode, the driver will download the latest firmware
* (padded to 15.5k) into the UMP ram. Finally when the
* device comes back up in download mode the driver
* will cause the new firmware to be copied from the
* UMP Ram to I2C and the firmware will update the
* record type from 0xf2 to 0x02.
*/
status = build_i2c_fw_hdr(header, dev);
if (status) {
kfree(vheader);
kfree(header);
kfree(rom_desc);
kfree(ti_manuf_desc);
return status;
}
/* Update I2C with type 0xf2 record with correct
size and checksum */
status = write_rom(serial,
start_address,
HEADER_SIZE,
header);
if (status) {
kfree(vheader);
kfree(header);
kfree(rom_desc);
kfree(ti_manuf_desc);
return status;
}
/* verify the write -- must do this in order for
write to complete before we do the hardware reset */
status = read_rom(serial, start_address,
HEADER_SIZE, vheader);
if (status) {
dbg("%s - can't read header back", __func__);
kfree(vheader);
kfree(header);
kfree(rom_desc);
kfree(ti_manuf_desc);
return status;
}
if (memcmp(vheader, header, HEADER_SIZE)) {
dbg("%s - write download record failed",
__func__);
kfree(vheader);
kfree(header);
kfree(rom_desc);
kfree(ti_manuf_desc);
return status;
}
kfree(vheader);
kfree(header);
dbg("%s - Start firmware update", __func__);
/* Tell firmware to copy download image into I2C */
status = ti_vsend_sync(serial->serial->dev,
UMPC_COPY_DNLD_TO_I2C, 0, 0, NULL, 0);
dbg("%s - Update complete 0x%x", __func__, status);
if (status) {
dev_err(dev,
"%s - UMPC_COPY_DNLD_TO_I2C failed\n",
__func__);
kfree(rom_desc);
kfree(ti_manuf_desc);
return status;
}
}
// The device is running the download code
kfree(rom_desc);
kfree(ti_manuf_desc);
return 0;
}
/********************************************************************/
/* Boot Mode */
/********************************************************************/
dbg("%s - RUNNING IN BOOT MODE", __func__);
/* Configure the TI device so we can use the BULK pipes for download */
status = config_boot_dev(serial->serial->dev);
if (status)
return status;
if (le16_to_cpu(serial->serial->dev->descriptor.idVendor)
!= USB_VENDOR_ID_ION) {
dbg("%s - VID = 0x%x", __func__,
le16_to_cpu(serial->serial->dev->descriptor.idVendor));
serial->TI_I2C_Type = DTK_ADDR_SPACE_I2C_TYPE_II;
goto stayinbootmode;
}
/* We have an ION device (I2c Must be programmed)
Determine I2C image type */
if (i2c_type_bootmode(serial))
goto stayinbootmode;
/* Check for ION Vendor ID and that the I2C is valid */
if (!check_i2c_image(serial)) {
struct ti_i2c_image_header *header;
int i;
__u8 cs = 0;
__u8 *buffer;
int buffer_size;
int err;
const struct firmware *fw;
const char *fw_name = "edgeport/down3.bin";
/* Validate Hardware version number
* Read Manufacturing Descriptor from TI Based Edgeport
*/
ti_manuf_desc = kmalloc(sizeof(*ti_manuf_desc), GFP_KERNEL);
if (!ti_manuf_desc) {
dev_err(dev, "%s - out of memory.\n", __func__);
return -ENOMEM;
}
status = get_manuf_info(serial, (__u8 *)ti_manuf_desc);
if (status) {
kfree(ti_manuf_desc);
goto stayinbootmode;
}
/* Check for version 2 */
if (!ignore_cpu_rev && ti_cpu_rev(ti_manuf_desc) < 2) {
dbg("%s - Wrong CPU Rev %d (Must be 2)",
__func__, ti_cpu_rev(ti_manuf_desc));
kfree(ti_manuf_desc);
goto stayinbootmode;
}
kfree(ti_manuf_desc);
/*
* In order to update the I2C firmware we must change the type
* 2 record to type 0xF2. This will force the UMP to come up
* in Boot Mode. Then while in boot mode, the driver will
* download the latest firmware (padded to 15.5k) into the
* UMP ram. Finally when the device comes back up in download
* mode the driver will cause the new firmware to be copied
* from the UMP Ram to I2C and the firmware will update the
* record type from 0xf2 to 0x02.
*
* Do we really have to copy the whole firmware image,
* or could we do this in place!
*/
/* Allocate a 15.5k buffer + 3 byte header */
buffer_size = (((1024 * 16) - 512) +
sizeof(struct ti_i2c_image_header));
buffer = kmalloc(buffer_size, GFP_KERNEL);
if (!buffer) {
dev_err(dev, "%s - out of memory\n", __func__);
return -ENOMEM;
}
/* Initialize the buffer to 0xff (pad the buffer) */
memset(buffer, 0xff, buffer_size);
err = request_firmware(&fw, fw_name, dev);
if (err) {
printk(KERN_ERR "Failed to load image \"%s\" err %d\n",
fw_name, err);
kfree(buffer);
return err;
}
memcpy(buffer, &fw->data[4], fw->size - 4);
release_firmware(fw);
for (i = sizeof(struct ti_i2c_image_header);
i < buffer_size; i++) {
cs = (__u8)(cs + buffer[i]);
}
header = (struct ti_i2c_image_header *)buffer;
/* update length and checksum after padding */
header->Length = cpu_to_le16((__u16)(buffer_size -
sizeof(struct ti_i2c_image_header)));
header->CheckSum = cs;
/* Download the operational code */
dbg("%s - Downloading operational code image (TI UMP)",
__func__);
status = download_code(serial, buffer, buffer_size);
kfree(buffer);
if (status) {
dbg("%s - Error downloading operational code image",
__func__);
return status;
}
/* Device will reboot */
serial->product_info.TiMode = TI_MODE_TRANSITIONING;
dbg("%s - Download successful -- Device rebooting...",
__func__);
/* return an error on purpose */
return -ENODEV;
}
stayinbootmode:
/* Eprom is invalid or blank stay in boot mode */
dbg("%s - STAYING IN BOOT MODE", __func__);
serial->product_info.TiMode = TI_MODE_BOOT;
return 0;
}
static int ti_do_config(struct edgeport_port *port, int feature, int on)
{
int port_number = port->port->number - port->port->serial->minor;
on = !!on; /* 1 or 0 not bitmask */
return send_cmd(port->port->serial->dev,
feature, (__u8)(UMPM_UART1_PORT + port_number),
on, NULL, 0);
}
static int restore_mcr(struct edgeport_port *port, __u8 mcr)
{
int status = 0;
dbg("%s - %x", __func__, mcr);
status = ti_do_config(port, UMPC_SET_CLR_DTR, mcr & MCR_DTR);
if (status)
return status;
status = ti_do_config(port, UMPC_SET_CLR_RTS, mcr & MCR_RTS);
if (status)
return status;
return ti_do_config(port, UMPC_SET_CLR_LOOPBACK, mcr & MCR_LOOPBACK);
}
/* Convert TI LSR to standard UART flags */
static __u8 map_line_status(__u8 ti_lsr)
{
__u8 lsr = 0;
#define MAP_FLAG(flagUmp, flagUart) \
if (ti_lsr & flagUmp) \
lsr |= flagUart;
MAP_FLAG(UMP_UART_LSR_OV_MASK, LSR_OVER_ERR) /* overrun */
MAP_FLAG(UMP_UART_LSR_PE_MASK, LSR_PAR_ERR) /* parity error */
MAP_FLAG(UMP_UART_LSR_FE_MASK, LSR_FRM_ERR) /* framing error */
MAP_FLAG(UMP_UART_LSR_BR_MASK, LSR_BREAK) /* break detected */
MAP_FLAG(UMP_UART_LSR_RX_MASK, LSR_RX_AVAIL) /* rx data available */
MAP_FLAG(UMP_UART_LSR_TX_MASK, LSR_TX_EMPTY) /* tx hold reg empty */
#undef MAP_FLAG
return lsr;
}
static void handle_new_msr(struct edgeport_port *edge_port, __u8 msr)
{
struct async_icount *icount;
struct tty_struct *tty;
dbg("%s - %02x", __func__, msr);
if (msr & (EDGEPORT_MSR_DELTA_CTS | EDGEPORT_MSR_DELTA_DSR |
EDGEPORT_MSR_DELTA_RI | EDGEPORT_MSR_DELTA_CD)) {
icount = &edge_port->icount;
/* update input line counters */
if (msr & EDGEPORT_MSR_DELTA_CTS)
icount->cts++;
if (msr & EDGEPORT_MSR_DELTA_DSR)
icount->dsr++;
if (msr & EDGEPORT_MSR_DELTA_CD)
icount->dcd++;
if (msr & EDGEPORT_MSR_DELTA_RI)
icount->rng++;
wake_up_interruptible(&edge_port->delta_msr_wait);
}
/* Save the new modem status */
edge_port->shadow_msr = msr & 0xf0;
tty = tty_port_tty_get(&edge_port->port->port);
/* handle CTS flow control */
if (tty && C_CRTSCTS(tty)) {
if (msr & EDGEPORT_MSR_CTS) {
tty->hw_stopped = 0;
tty_wakeup(tty);
} else {
tty->hw_stopped = 1;
}
}
tty_kref_put(tty);
return;
}
static void handle_new_lsr(struct edgeport_port *edge_port, int lsr_data,
__u8 lsr, __u8 data)
{
struct async_icount *icount;
__u8 new_lsr = (__u8)(lsr & (__u8)(LSR_OVER_ERR | LSR_PAR_ERR |
LSR_FRM_ERR | LSR_BREAK));
struct tty_struct *tty;
dbg("%s - %02x", __func__, new_lsr);
edge_port->shadow_lsr = lsr;
if (new_lsr & LSR_BREAK)
/*
* Parity and Framing errors only count if they
* occur exclusive of a break being received.
*/
new_lsr &= (__u8)(LSR_OVER_ERR | LSR_BREAK);
/* Place LSR data byte into Rx buffer */
if (lsr_data) {
tty = tty_port_tty_get(&edge_port->port->port);
if (tty) {
edge_tty_recv(&edge_port->port->dev, tty, &data, 1);
tty_kref_put(tty);
}
}
/* update input line counters */
icount = &edge_port->icount;
if (new_lsr & LSR_BREAK)
icount->brk++;
if (new_lsr & LSR_OVER_ERR)
icount->overrun++;
if (new_lsr & LSR_PAR_ERR)
icount->parity++;
if (new_lsr & LSR_FRM_ERR)
icount->frame++;
}
static void edge_interrupt_callback(struct urb *urb)
{
struct edgeport_serial *edge_serial = urb->context;
struct usb_serial_port *port;
struct edgeport_port *edge_port;
unsigned char *data = urb->transfer_buffer;
int length = urb->actual_length;
int port_number;
int function;
int retval;
__u8 lsr;
__u8 msr;
int status = urb->status;
dbg("%s", __func__);
switch (status) {
case 0:
/* success */
break;
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
/* this urb is terminated, clean up */
dbg("%s - urb shutting down with status: %d",
__func__, status);
return;
default:
dev_err(&urb->dev->dev, "%s - nonzero urb status received: "
"%d\n", __func__, status);
goto exit;
}
if (!length) {
dbg("%s - no data in urb", __func__);
goto exit;
}
usb_serial_debug_data(debug, &edge_serial->serial->dev->dev,
__func__, length, data);
if (length != 2) {
dbg("%s - expecting packet of size 2, got %d",
__func__, length);
goto exit;
}
port_number = TIUMP_GET_PORT_FROM_CODE(data[0]);
function = TIUMP_GET_FUNC_FROM_CODE(data[0]);
dbg("%s - port_number %d, function %d, info 0x%x",
__func__, port_number, function, data[1]);
port = edge_serial->serial->port[port_number];
edge_port = usb_get_serial_port_data(port);
if (!edge_port) {
dbg("%s - edge_port not found", __func__);
return;
}
switch (function) {
case TIUMP_INTERRUPT_CODE_LSR:
lsr = map_line_status(data[1]);
if (lsr & UMP_UART_LSR_DATA_MASK) {
/* Save the LSR event for bulk read
completion routine */
dbg("%s - LSR Event Port %u LSR Status = %02x",
__func__, port_number, lsr);
edge_port->lsr_event = 1;
edge_port->lsr_mask = lsr;
} else {
dbg("%s - ===== Port %d LSR Status = %02x ======",
__func__, port_number, lsr);
handle_new_lsr(edge_port, 0, lsr, 0);
}
break;
case TIUMP_INTERRUPT_CODE_MSR: /* MSR */
/* Copy MSR from UMP */
msr = data[1];
dbg("%s - ===== Port %u MSR Status = %02x ======",
__func__, port_number, msr);
handle_new_msr(edge_port, msr);
break;
default:
dev_err(&urb->dev->dev,
"%s - Unknown Interrupt code from UMP %x\n",
__func__, data[1]);
break;
}
exit:
retval = usb_submit_urb(urb, GFP_ATOMIC);
if (retval)
dev_err(&urb->dev->dev,
"%s - usb_submit_urb failed with result %d\n",
__func__, retval);
}
static void edge_bulk_in_callback(struct urb *urb)
{
struct edgeport_port *edge_port = urb->context;
unsigned char *data = urb->transfer_buffer;
struct tty_struct *tty;
int retval = 0;
int port_number;
int status = urb->status;
dbg("%s", __func__);
switch (status) {
case 0:
/* success */
break;
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
/* this urb is terminated, clean up */
dbg("%s - urb shutting down with status: %d",
__func__, status);
return;
default:
dev_err(&urb->dev->dev,
"%s - nonzero read bulk status received: %d\n",
__func__, status);
}
if (status == -EPIPE)
goto exit;
if (status) {
dev_err(&urb->dev->dev, "%s - stopping read!\n", __func__);
return;
}
port_number = edge_port->port->number - edge_port->port->serial->minor;
if (edge_port->lsr_event) {
edge_port->lsr_event = 0;
dbg("%s ===== Port %u LSR Status = %02x, Data = %02x ======",
__func__, port_number, edge_port->lsr_mask, *data);
handle_new_lsr(edge_port, 1, edge_port->lsr_mask, *data);
/* Adjust buffer length/pointer */
--urb->actual_length;
++data;
}
tty = tty_port_tty_get(&edge_port->port->port);
if (tty && urb->actual_length) {
usb_serial_debug_data(debug, &edge_port->port->dev,
__func__, urb->actual_length, data);
if (edge_port->close_pending)
dbg("%s - close pending, dropping data on the floor",
__func__);
else
edge_tty_recv(&edge_port->port->dev, tty, data,
urb->actual_length);
edge_port->icount.rx += urb->actual_length;
}
tty_kref_put(tty);
exit:
/* continue read unless stopped */
spin_lock(&edge_port->ep_lock);
if (edge_port->ep_read_urb_state == EDGE_READ_URB_RUNNING) {
urb->dev = edge_port->port->serial->dev;
retval = usb_submit_urb(urb, GFP_ATOMIC);
} else if (edge_port->ep_read_urb_state == EDGE_READ_URB_STOPPING) {
edge_port->ep_read_urb_state = EDGE_READ_URB_STOPPED;
}
spin_unlock(&edge_port->ep_lock);
if (retval)
dev_err(&urb->dev->dev,
"%s - usb_submit_urb failed with result %d\n",
__func__, retval);
}
static void edge_tty_recv(struct device *dev, struct tty_struct *tty,
unsigned char *data, int length)
{
int queued;
queued = tty_insert_flip_string(tty, data, length);
if (queued < length)
dev_err(dev, "%s - dropping data, %d bytes lost\n",
__func__, length - queued);
tty_flip_buffer_push(tty);
}
static void edge_bulk_out_callback(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
struct edgeport_port *edge_port = usb_get_serial_port_data(port);
int status = urb->status;
struct tty_struct *tty;
dbg("%s - port %d", __func__, port->number);
edge_port->ep_write_urb_in_use = 0;
switch (status) {
case 0:
/* success */
break;
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
/* this urb is terminated, clean up */
dbg("%s - urb shutting down with status: %d",
__func__, status);
return;
default:
dev_err(&urb->dev->dev, "%s - nonzero write bulk status "
"received: %d\n", __func__, status);
}
/* send any buffered data */
tty = tty_port_tty_get(&port->port);
edge_send(tty);
tty_kref_put(tty);
}
static int edge_open(struct tty_struct *tty, struct usb_serial_port *port)
{
struct edgeport_port *edge_port = usb_get_serial_port_data(port);
struct edgeport_serial *edge_serial;
struct usb_device *dev;
struct urb *urb;
int port_number;
int status;
u16 open_settings;
u8 transaction_timeout;
dbg("%s - port %d", __func__, port->number);
if (edge_port == NULL)
return -ENODEV;
port_number = port->number - port->serial->minor;
switch (port_number) {
case 0:
edge_port->uart_base = UMPMEM_BASE_UART1;
edge_port->dma_address = UMPD_OEDB1_ADDRESS;
break;
case 1:
edge_port->uart_base = UMPMEM_BASE_UART2;
edge_port->dma_address = UMPD_OEDB2_ADDRESS;
break;
default:
dev_err(&port->dev, "Unknown port number!!!\n");
return -ENODEV;
}
dbg("%s - port_number = %d, uart_base = %04x, dma_address = %04x",
__func__, port_number, edge_port->uart_base,
edge_port->dma_address);
dev = port->serial->dev;
memset(&(edge_port->icount), 0x00, sizeof(edge_port->icount));
init_waitqueue_head(&edge_port->delta_msr_wait);
/* turn off loopback */
status = ti_do_config(edge_port, UMPC_SET_CLR_LOOPBACK, 0);
if (status) {
dev_err(&port->dev,
"%s - cannot send clear loopback command, %d\n",
__func__, status);
return status;
}
/* set up the port settings */
if (tty)
edge_set_termios(tty, port, tty->termios);
/* open up the port */
/* milliseconds to timeout for DMA transfer */
transaction_timeout = 2;
edge_port->ump_read_timeout =
max(20, ((transaction_timeout * 3) / 2));
/* milliseconds to timeout for DMA transfer */
open_settings = (u8)(UMP_DMA_MODE_CONTINOUS |
UMP_PIPE_TRANS_TIMEOUT_ENA |
(transaction_timeout << 2));
dbg("%s - Sending UMPC_OPEN_PORT", __func__);
/* Tell TI to open and start the port */
status = send_cmd(dev, UMPC_OPEN_PORT,
(u8)(UMPM_UART1_PORT + port_number), open_settings, NULL, 0);
if (status) {
dev_err(&port->dev, "%s - cannot send open command, %d\n",
__func__, status);
return status;
}
/* Start the DMA? */
status = send_cmd(dev, UMPC_START_PORT,
(u8)(UMPM_UART1_PORT + port_number), 0, NULL, 0);
if (status) {
dev_err(&port->dev, "%s - cannot send start DMA command, %d\n",
__func__, status);
return status;
}
/* Clear TX and RX buffers in UMP */
status = purge_port(port, UMP_PORT_DIR_OUT | UMP_PORT_DIR_IN);
if (status) {
dev_err(&port->dev,
"%s - cannot send clear buffers command, %d\n",
__func__, status);
return status;
}
/* Read Initial MSR */
status = ti_vread_sync(dev, UMPC_READ_MSR, 0,
(__u16)(UMPM_UART1_PORT + port_number),
&edge_port->shadow_msr, 1);
if (status) {
dev_err(&port->dev, "%s - cannot send read MSR command, %d\n",
__func__, status);
return status;
}
dbg("ShadowMSR 0x%X", edge_port->shadow_msr);
/* Set Initial MCR */
edge_port->shadow_mcr = MCR_RTS | MCR_DTR;
dbg("ShadowMCR 0x%X", edge_port->shadow_mcr);
edge_serial = edge_port->edge_serial;
if (mutex_lock_interruptible(&edge_serial->es_lock))
return -ERESTARTSYS;
if (edge_serial->num_ports_open == 0) {
/* we are the first port to open, post the interrupt urb */
urb = edge_serial->serial->port[0]->interrupt_in_urb;
if (!urb) {
dev_err(&port->dev,
"%s - no interrupt urb present, exiting\n",
__func__);
status = -EINVAL;
goto release_es_lock;
}
urb->complete = edge_interrupt_callback;
urb->context = edge_serial;
urb->dev = dev;
status = usb_submit_urb(urb, GFP_KERNEL);
if (status) {
dev_err(&port->dev,
"%s - usb_submit_urb failed with value %d\n",
__func__, status);
goto release_es_lock;
}
}
/*
* reset the data toggle on the bulk endpoints to work around bug in
* host controllers where things get out of sync some times
*/
usb_clear_halt(dev, port->write_urb->pipe);
usb_clear_halt(dev, port->read_urb->pipe);
/* start up our bulk read urb */
urb = port->read_urb;
if (!urb) {
dev_err(&port->dev, "%s - no read urb present, exiting\n",
__func__);
status = -EINVAL;
goto unlink_int_urb;
}
edge_port->ep_read_urb_state = EDGE_READ_URB_RUNNING;
urb->complete = edge_bulk_in_callback;
urb->context = edge_port;
urb->dev = dev;
status = usb_submit_urb(urb, GFP_KERNEL);
if (status) {
dev_err(&port->dev,
"%s - read bulk usb_submit_urb failed with value %d\n",
__func__, status);
goto unlink_int_urb;
}
++edge_serial->num_ports_open;
dbg("%s - exited", __func__);
goto release_es_lock;
unlink_int_urb:
if (edge_port->edge_serial->num_ports_open == 0)
usb_kill_urb(port->serial->port[0]->interrupt_in_urb);
release_es_lock:
mutex_unlock(&edge_serial->es_lock);
return status;
}
static void edge_close(struct usb_serial_port *port)
{
struct edgeport_serial *edge_serial;
struct edgeport_port *edge_port;
int port_number;
int status;
dbg("%s - port %d", __func__, port->number);
edge_serial = usb_get_serial_data(port->serial);
edge_port = usb_get_serial_port_data(port);
if (edge_serial == NULL || edge_port == NULL)
return;
/* The bulkreadcompletion routine will check
* this flag and dump add read data */
edge_port->close_pending = 1;
/* chase the port close and flush */
chase_port(edge_port, (HZ * closing_wait) / 100, 1);
usb_kill_urb(port->read_urb);
usb_kill_urb(port->write_urb);
edge_port->ep_write_urb_in_use = 0;
/* assuming we can still talk to the device,
* send a close port command to it */
dbg("%s - send umpc_close_port", __func__);
port_number = port->number - port->serial->minor;
status = send_cmd(port->serial->dev,
UMPC_CLOSE_PORT,
(__u8)(UMPM_UART1_PORT + port_number),
0,
NULL,
0);
mutex_lock(&edge_serial->es_lock);
--edge_port->edge_serial->num_ports_open;
if (edge_port->edge_serial->num_ports_open <= 0) {
/* last port is now closed, let's shut down our interrupt urb */
usb_kill_urb(port->serial->port[0]->interrupt_in_urb);
edge_port->edge_serial->num_ports_open = 0;
}
mutex_unlock(&edge_serial->es_lock);
edge_port->close_pending = 0;
dbg("%s - exited", __func__);
}
static int edge_write(struct tty_struct *tty, struct usb_serial_port *port,
const unsigned char *data, int count)
{
struct edgeport_port *edge_port = usb_get_serial_port_data(port);
dbg("%s - port %d", __func__, port->number);
if (count == 0) {
dbg("%s - write request of 0 bytes", __func__);
return 0;
}
if (edge_port == NULL)
return -ENODEV;
if (edge_port->close_pending == 1)
return -ENODEV;
count = kfifo_in_locked(&edge_port->write_fifo, data, count,
&edge_port->ep_lock);
edge_send(tty);
return count;
}
static void edge_send(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
int count, result;
struct edgeport_port *edge_port = usb_get_serial_port_data(port);
unsigned long flags;
dbg("%s - port %d", __func__, port->number);
spin_lock_irqsave(&edge_port->ep_lock, flags);
if (edge_port->ep_write_urb_in_use) {
spin_unlock_irqrestore(&edge_port->ep_lock, flags);
return;
}
count = kfifo_out(&edge_port->write_fifo,
port->write_urb->transfer_buffer,
port->bulk_out_size);
if (count == 0) {
spin_unlock_irqrestore(&edge_port->ep_lock, flags);
return;
}
edge_port->ep_write_urb_in_use = 1;
spin_unlock_irqrestore(&edge_port->ep_lock, flags);
usb_serial_debug_data(debug, &port->dev, __func__, count,
port->write_urb->transfer_buffer);
/* set up our urb */
usb_fill_bulk_urb(port->write_urb, port->serial->dev,
usb_sndbulkpipe(port->serial->dev,
port->bulk_out_endpointAddress),
port->write_urb->transfer_buffer, count,
edge_bulk_out_callback,
port);
/* send the data out the bulk port */
result = usb_submit_urb(port->write_urb, GFP_ATOMIC);
if (result) {
dev_err(&port->dev,
"%s - failed submitting write urb, error %d\n",
__func__, result);
edge_port->ep_write_urb_in_use = 0;
/* TODO: reschedule edge_send */
} else
edge_port->icount.tx += count;
/* wakeup any process waiting for writes to complete */
/* there is now more room in the buffer for new writes */
if (tty)
tty_wakeup(tty);
}
static int edge_write_room(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct edgeport_port *edge_port = usb_get_serial_port_data(port);
int room = 0;
unsigned long flags;
dbg("%s - port %d", __func__, port->number);
if (edge_port == NULL)
return 0;
if (edge_port->close_pending == 1)
return 0;
spin_lock_irqsave(&edge_port->ep_lock, flags);
room = kfifo_avail(&edge_port->write_fifo);
spin_unlock_irqrestore(&edge_port->ep_lock, flags);
dbg("%s - returns %d", __func__, room);
return room;
}
static int edge_chars_in_buffer(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct edgeport_port *edge_port = usb_get_serial_port_data(port);
int chars = 0;
unsigned long flags;
dbg("%s - port %d", __func__, port->number);
if (edge_port == NULL)
return 0;
if (edge_port->close_pending == 1)
return 0;
spin_lock_irqsave(&edge_port->ep_lock, flags);
chars = kfifo_len(&edge_port->write_fifo);
spin_unlock_irqrestore(&edge_port->ep_lock, flags);
dbg("%s - returns %d", __func__, chars);
return chars;
}
static void edge_throttle(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct edgeport_port *edge_port = usb_get_serial_port_data(port);
int status;
dbg("%s - port %d", __func__, port->number);
if (edge_port == NULL)
return;
/* if we are implementing XON/XOFF, send the stop character */
if (I_IXOFF(tty)) {
unsigned char stop_char = STOP_CHAR(tty);
status = edge_write(tty, port, &stop_char, 1);
if (status <= 0) {
dev_err(&port->dev, "%s - failed to write stop character, %d\n", __func__, status);
}
}
/* if we are implementing RTS/CTS, stop reads */
/* and the Edgeport will clear the RTS line */
if (C_CRTSCTS(tty))
stop_read(edge_port);
}
static void edge_unthrottle(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct edgeport_port *edge_port = usb_get_serial_port_data(port);
int status;
dbg("%s - port %d", __func__, port->number);
if (edge_port == NULL)
return;
/* if we are implementing XON/XOFF, send the start character */
if (I_IXOFF(tty)) {
unsigned char start_char = START_CHAR(tty);
status = edge_write(tty, port, &start_char, 1);
if (status <= 0) {
dev_err(&port->dev, "%s - failed to write start character, %d\n", __func__, status);
}
}
/* if we are implementing RTS/CTS, restart reads */
/* are the Edgeport will assert the RTS line */
if (C_CRTSCTS(tty)) {
status = restart_read(edge_port);
if (status)
dev_err(&port->dev,
"%s - read bulk usb_submit_urb failed: %d\n",
__func__, status);
}
}
static void stop_read(struct edgeport_port *edge_port)
{
unsigned long flags;
spin_lock_irqsave(&edge_port->ep_lock, flags);
if (edge_port->ep_read_urb_state == EDGE_READ_URB_RUNNING)
edge_port->ep_read_urb_state = EDGE_READ_URB_STOPPING;
edge_port->shadow_mcr &= ~MCR_RTS;
spin_unlock_irqrestore(&edge_port->ep_lock, flags);
}
static int restart_read(struct edgeport_port *edge_port)
{
struct urb *urb;
int status = 0;
unsigned long flags;
spin_lock_irqsave(&edge_port->ep_lock, flags);
if (edge_port->ep_read_urb_state == EDGE_READ_URB_STOPPED) {
urb = edge_port->port->read_urb;
urb->complete = edge_bulk_in_callback;
urb->context = edge_port;
urb->dev = edge_port->port->serial->dev;
status = usb_submit_urb(urb, GFP_ATOMIC);
}
edge_port->ep_read_urb_state = EDGE_READ_URB_RUNNING;
edge_port->shadow_mcr |= MCR_RTS;
spin_unlock_irqrestore(&edge_port->ep_lock, flags);
return status;
}
static void change_port_settings(struct tty_struct *tty,
struct edgeport_port *edge_port, struct ktermios *old_termios)
{
struct ump_uart_config *config;
int baud;
unsigned cflag;
int status;
int port_number = edge_port->port->number -
edge_port->port->serial->minor;
dbg("%s - port %d", __func__, edge_port->port->number);
config = kmalloc (sizeof (*config), GFP_KERNEL);
if (!config) {
*tty->termios = *old_termios;
dev_err(&edge_port->port->dev, "%s - out of memory\n",
__func__);
return;
}
cflag = tty->termios->c_cflag;
config->wFlags = 0;
/* These flags must be set */
config->wFlags |= UMP_MASK_UART_FLAGS_RECEIVE_MS_INT;
config->wFlags |= UMP_MASK_UART_FLAGS_AUTO_START_ON_ERR;
config->bUartMode = (__u8)(edge_port->bUartMode);
switch (cflag & CSIZE) {
case CS5:
config->bDataBits = UMP_UART_CHAR5BITS;
dbg("%s - data bits = 5", __func__);
break;
case CS6:
config->bDataBits = UMP_UART_CHAR6BITS;
dbg("%s - data bits = 6", __func__);
break;
case CS7:
config->bDataBits = UMP_UART_CHAR7BITS;
dbg("%s - data bits = 7", __func__);
break;
default:
case CS8:
config->bDataBits = UMP_UART_CHAR8BITS;
dbg("%s - data bits = 8", __func__);
break;
}
if (cflag & PARENB) {
if (cflag & PARODD) {
config->wFlags |= UMP_MASK_UART_FLAGS_PARITY;
config->bParity = UMP_UART_ODDPARITY;
dbg("%s - parity = odd", __func__);
} else {
config->wFlags |= UMP_MASK_UART_FLAGS_PARITY;
config->bParity = UMP_UART_EVENPARITY;
dbg("%s - parity = even", __func__);
}
} else {
config->bParity = UMP_UART_NOPARITY;
dbg("%s - parity = none", __func__);
}
if (cflag & CSTOPB) {
config->bStopBits = UMP_UART_STOPBIT2;
dbg("%s - stop bits = 2", __func__);
} else {
config->bStopBits = UMP_UART_STOPBIT1;
dbg("%s - stop bits = 1", __func__);
}
/* figure out the flow control settings */
if (cflag & CRTSCTS) {
config->wFlags |= UMP_MASK_UART_FLAGS_OUT_X_CTS_FLOW;
config->wFlags |= UMP_MASK_UART_FLAGS_RTS_FLOW;
dbg("%s - RTS/CTS is enabled", __func__);
} else {
dbg("%s - RTS/CTS is disabled", __func__);
tty->hw_stopped = 0;
restart_read(edge_port);
}
/* if we are implementing XON/XOFF, set the start and stop
character in the device */
config->cXon = START_CHAR(tty);
config->cXoff = STOP_CHAR(tty);
/* if we are implementing INBOUND XON/XOFF */
if (I_IXOFF(tty)) {
config->wFlags |= UMP_MASK_UART_FLAGS_IN_X;
dbg("%s - INBOUND XON/XOFF is enabled, XON = %2x, XOFF = %2x",
__func__, config->cXon, config->cXoff);
} else
dbg("%s - INBOUND XON/XOFF is disabled", __func__);
/* if we are implementing OUTBOUND XON/XOFF */
if (I_IXON(tty)) {
config->wFlags |= UMP_MASK_UART_FLAGS_OUT_X;
dbg("%s - OUTBOUND XON/XOFF is enabled, XON = %2x, XOFF = %2x",
__func__, config->cXon, config->cXoff);
} else
dbg("%s - OUTBOUND XON/XOFF is disabled", __func__);
tty->termios->c_cflag &= ~CMSPAR;
/* Round the baud rate */
baud = tty_get_baud_rate(tty);
if (!baud) {
/* pick a default, any default... */
baud = 9600;
} else
tty_encode_baud_rate(tty, baud, baud);
edge_port->baud_rate = baud;
config->wBaudRate = (__u16)((461550L + baud/2) / baud);
/* FIXME: Recompute actual baud from divisor here */
dbg("%s - baud rate = %d, wBaudRate = %d", __func__, baud,
config->wBaudRate);
dbg("wBaudRate: %d", (int)(461550L / config->wBaudRate));
dbg("wFlags: 0x%x", config->wFlags);
dbg("bDataBits: %d", config->bDataBits);
dbg("bParity: %d", config->bParity);
dbg("bStopBits: %d", config->bStopBits);
dbg("cXon: %d", config->cXon);
dbg("cXoff: %d", config->cXoff);
dbg("bUartMode: %d", config->bUartMode);
/* move the word values into big endian mode */
cpu_to_be16s(&config->wFlags);
cpu_to_be16s(&config->wBaudRate);
status = send_cmd(edge_port->port->serial->dev, UMPC_SET_CONFIG,
(__u8)(UMPM_UART1_PORT + port_number),
0, (__u8 *)config, sizeof(*config));
if (status)
dbg("%s - error %d when trying to write config to device",
__func__, status);
kfree(config);
return;
}
static void edge_set_termios(struct tty_struct *tty,
struct usb_serial_port *port, struct ktermios *old_termios)
{
struct edgeport_port *edge_port = usb_get_serial_port_data(port);
unsigned int cflag;
cflag = tty->termios->c_cflag;
dbg("%s - clfag %08x iflag %08x", __func__,
tty->termios->c_cflag, tty->termios->c_iflag);
dbg("%s - old clfag %08x old iflag %08x", __func__,
old_termios->c_cflag, old_termios->c_iflag);
dbg("%s - port %d", __func__, port->number);
if (edge_port == NULL)
return;
/* change the port settings to the new ones specified */
change_port_settings(tty, edge_port, old_termios);
return;
}
static int edge_tiocmset(struct tty_struct *tty, struct file *file,
unsigned int set, unsigned int clear)
{
struct usb_serial_port *port = tty->driver_data;
struct edgeport_port *edge_port = usb_get_serial_port_data(port);
unsigned int mcr;
unsigned long flags;
dbg("%s - port %d", __func__, port->number);
spin_lock_irqsave(&edge_port->ep_lock, flags);
mcr = edge_port->shadow_mcr;
if (set & TIOCM_RTS)
mcr |= MCR_RTS;
if (set & TIOCM_DTR)
mcr |= MCR_DTR;
if (set & TIOCM_LOOP)
mcr |= MCR_LOOPBACK;
if (clear & TIOCM_RTS)
mcr &= ~MCR_RTS;
if (clear & TIOCM_DTR)
mcr &= ~MCR_DTR;
if (clear & TIOCM_LOOP)
mcr &= ~MCR_LOOPBACK;
edge_port->shadow_mcr = mcr;
spin_unlock_irqrestore(&edge_port->ep_lock, flags);
restore_mcr(edge_port, mcr);
return 0;
}
static int edge_tiocmget(struct tty_struct *tty, struct file *file)
{
struct usb_serial_port *port = tty->driver_data;
struct edgeport_port *edge_port = usb_get_serial_port_data(port);
unsigned int result = 0;
unsigned int msr;
unsigned int mcr;
unsigned long flags;
dbg("%s - port %d", __func__, port->number);
spin_lock_irqsave(&edge_port->ep_lock, flags);
msr = edge_port->shadow_msr;
mcr = edge_port->shadow_mcr;
result = ((mcr & MCR_DTR) ? TIOCM_DTR: 0) /* 0x002 */
| ((mcr & MCR_RTS) ? TIOCM_RTS: 0) /* 0x004 */
| ((msr & EDGEPORT_MSR_CTS) ? TIOCM_CTS: 0) /* 0x020 */
| ((msr & EDGEPORT_MSR_CD) ? TIOCM_CAR: 0) /* 0x040 */
| ((msr & EDGEPORT_MSR_RI) ? TIOCM_RI: 0) /* 0x080 */
| ((msr & EDGEPORT_MSR_DSR) ? TIOCM_DSR: 0); /* 0x100 */
dbg("%s -- %x", __func__, result);
spin_unlock_irqrestore(&edge_port->ep_lock, flags);
return result;
}
static int get_serial_info(struct edgeport_port *edge_port,
struct serial_struct __user *retinfo)
{
struct serial_struct tmp;
if (!retinfo)
return -EFAULT;
memset(&tmp, 0, sizeof(tmp));
tmp.type = PORT_16550A;
tmp.line = edge_port->port->serial->minor;
tmp.port = edge_port->port->number;
tmp.irq = 0;
tmp.flags = ASYNC_SKIP_TEST | ASYNC_AUTO_IRQ;
tmp.xmit_fifo_size = edge_port->port->bulk_out_size;
tmp.baud_base = 9600;
tmp.close_delay = 5*HZ;
tmp.closing_wait = closing_wait;
if (copy_to_user(retinfo, &tmp, sizeof(*retinfo)))
return -EFAULT;
return 0;
}
static int edge_ioctl(struct tty_struct *tty, struct file *file,
unsigned int cmd, unsigned long arg)
{
struct usb_serial_port *port = tty->driver_data;
struct edgeport_port *edge_port = usb_get_serial_port_data(port);
struct async_icount cnow;
struct async_icount cprev;
dbg("%s - port %d, cmd = 0x%x", __func__, port->number, cmd);
switch (cmd) {
case TIOCGSERIAL:
dbg("%s - (%d) TIOCGSERIAL", __func__, port->number);
return get_serial_info(edge_port,
(struct serial_struct __user *) arg);
case TIOCMIWAIT:
dbg("%s - (%d) TIOCMIWAIT", __func__, port->number);
cprev = edge_port->icount;
while (1) {
interruptible_sleep_on(&edge_port->delta_msr_wait);
/* see if a signal did it */
if (signal_pending(current))
return -ERESTARTSYS;
cnow = edge_port->icount;
if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr &&
cnow.dcd == cprev.dcd && cnow.cts == cprev.cts)
return -EIO; /* no change => error */
if (((arg & TIOCM_RNG) && (cnow.rng != cprev.rng)) ||
((arg & TIOCM_DSR) && (cnow.dsr != cprev.dsr)) ||
((arg & TIOCM_CD) && (cnow.dcd != cprev.dcd)) ||
((arg & TIOCM_CTS) && (cnow.cts != cprev.cts))) {
return 0;
}
cprev = cnow;
}
/* not reached */
break;
case TIOCGICOUNT:
dbg("%s - (%d) TIOCGICOUNT RX=%d, TX=%d", __func__,
port->number, edge_port->icount.rx, edge_port->icount.tx);
if (copy_to_user((void __user *)arg, &edge_port->icount,
sizeof(edge_port->icount)))
return -EFAULT;
return 0;
}
return -ENOIOCTLCMD;
}
static void edge_break(struct tty_struct *tty, int break_state)
{
struct usb_serial_port *port = tty->driver_data;
struct edgeport_port *edge_port = usb_get_serial_port_data(port);
int status;
int bv = 0; /* Off */
dbg("%s - state = %d", __func__, break_state);
/* chase the port close */
chase_port(edge_port, 0, 0);
if (break_state == -1)
bv = 1; /* On */
status = ti_do_config(edge_port, UMPC_SET_CLR_BREAK, bv);
if (status)
dbg("%s - error %d sending break set/clear command.",
__func__, status);
}
static int edge_startup(struct usb_serial *serial)
{
struct edgeport_serial *edge_serial;
struct edgeport_port *edge_port;
struct usb_device *dev;
int status;
int i;
dev = serial->dev;
/* create our private serial structure */
edge_serial = kzalloc(sizeof(struct edgeport_serial), GFP_KERNEL);
if (edge_serial == NULL) {
dev_err(&serial->dev->dev, "%s - Out of memory\n", __func__);
return -ENOMEM;
}
mutex_init(&edge_serial->es_lock);
edge_serial->serial = serial;
usb_set_serial_data(serial, edge_serial);
status = download_fw(edge_serial);
if (status) {
kfree(edge_serial);
return status;
}
/* set up our port private structures */
for (i = 0; i < serial->num_ports; ++i) {
edge_port = kzalloc(sizeof(struct edgeport_port), GFP_KERNEL);
if (edge_port == NULL) {
dev_err(&serial->dev->dev, "%s - Out of memory\n",
__func__);
goto cleanup;
}
spin_lock_init(&edge_port->ep_lock);
if (kfifo_alloc(&edge_port->write_fifo, EDGE_OUT_BUF_SIZE,
GFP_KERNEL)) {
dev_err(&serial->dev->dev, "%s - Out of memory\n",
__func__);
kfree(edge_port);
goto cleanup;
}
edge_port->port = serial->port[i];
edge_port->edge_serial = edge_serial;
usb_set_serial_port_data(serial->port[i], edge_port);
edge_port->bUartMode = default_uart_mode;
}
return 0;
cleanup:
for (--i; i >= 0; --i) {
edge_port = usb_get_serial_port_data(serial->port[i]);
kfifo_free(&edge_port->write_fifo);
kfree(edge_port);
usb_set_serial_port_data(serial->port[i], NULL);
}
kfree(edge_serial);
usb_set_serial_data(serial, NULL);
return -ENOMEM;
}
static void edge_disconnect(struct usb_serial *serial)
{
int i;
struct edgeport_port *edge_port;
dbg("%s", __func__);
for (i = 0; i < serial->num_ports; ++i) {
edge_port = usb_get_serial_port_data(serial->port[i]);
edge_remove_sysfs_attrs(edge_port->port);
}
}
static void edge_release(struct usb_serial *serial)
{
int i;
struct edgeport_port *edge_port;
dbg("%s", __func__);
for (i = 0; i < serial->num_ports; ++i) {
edge_port = usb_get_serial_port_data(serial->port[i]);
kfifo_free(&edge_port->write_fifo);
kfree(edge_port);
}
kfree(usb_get_serial_data(serial));
}
/* Sysfs Attributes */
static ssize_t show_uart_mode(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct usb_serial_port *port = to_usb_serial_port(dev);
struct edgeport_port *edge_port = usb_get_serial_port_data(port);
return sprintf(buf, "%d\n", edge_port->bUartMode);
}
static ssize_t store_uart_mode(struct device *dev,
struct device_attribute *attr, const char *valbuf, size_t count)
{
struct usb_serial_port *port = to_usb_serial_port(dev);
struct edgeport_port *edge_port = usb_get_serial_port_data(port);
unsigned int v = simple_strtoul(valbuf, NULL, 0);
dbg("%s: setting uart_mode = %d", __func__, v);
if (v < 256)
edge_port->bUartMode = v;
else
dev_err(dev, "%s - uart_mode %d is invalid\n", __func__, v);
return count;
}
static DEVICE_ATTR(uart_mode, S_IWUSR | S_IRUGO, show_uart_mode,
store_uart_mode);
static int edge_create_sysfs_attrs(struct usb_serial_port *port)
{
return device_create_file(&port->dev, &dev_attr_uart_mode);
}
static int edge_remove_sysfs_attrs(struct usb_serial_port *port)
{
device_remove_file(&port->dev, &dev_attr_uart_mode);
return 0;
}
static struct usb_serial_driver edgeport_1port_device = {
.driver = {
.owner = THIS_MODULE,
.name = "edgeport_ti_1",
},
.description = "Edgeport TI 1 port adapter",
.usb_driver = &io_driver,
.id_table = edgeport_1port_id_table,
.num_ports = 1,
.open = edge_open,
.close = edge_close,
.throttle = edge_throttle,
.unthrottle = edge_unthrottle,
.attach = edge_startup,
.disconnect = edge_disconnect,
.release = edge_release,
.port_probe = edge_create_sysfs_attrs,
.ioctl = edge_ioctl,
.set_termios = edge_set_termios,
.tiocmget = edge_tiocmget,
.tiocmset = edge_tiocmset,
.write = edge_write,
.write_room = edge_write_room,
.chars_in_buffer = edge_chars_in_buffer,
.break_ctl = edge_break,
.read_int_callback = edge_interrupt_callback,
.read_bulk_callback = edge_bulk_in_callback,
.write_bulk_callback = edge_bulk_out_callback,
};
static struct usb_serial_driver edgeport_2port_device = {
.driver = {
.owner = THIS_MODULE,
.name = "edgeport_ti_2",
},
.description = "Edgeport TI 2 port adapter",
.usb_driver = &io_driver,
.id_table = edgeport_2port_id_table,
.num_ports = 2,
.open = edge_open,
.close = edge_close,
.throttle = edge_throttle,
.unthrottle = edge_unthrottle,
.attach = edge_startup,
.disconnect = edge_disconnect,
.release = edge_release,
.port_probe = edge_create_sysfs_attrs,
.ioctl = edge_ioctl,
.set_termios = edge_set_termios,
.tiocmget = edge_tiocmget,
.tiocmset = edge_tiocmset,
.write = edge_write,
.write_room = edge_write_room,
.chars_in_buffer = edge_chars_in_buffer,
.break_ctl = edge_break,
.read_int_callback = edge_interrupt_callback,
.read_bulk_callback = edge_bulk_in_callback,
.write_bulk_callback = edge_bulk_out_callback,
};
static int __init edgeport_init(void)
{
int retval;
retval = usb_serial_register(&edgeport_1port_device);
if (retval)
goto failed_1port_device_register;
retval = usb_serial_register(&edgeport_2port_device);
if (retval)
goto failed_2port_device_register;
retval = usb_register(&io_driver);
if (retval)
goto failed_usb_register;
printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
DRIVER_DESC "\n");
return 0;
failed_usb_register:
usb_serial_deregister(&edgeport_2port_device);
failed_2port_device_register:
usb_serial_deregister(&edgeport_1port_device);
failed_1port_device_register:
return retval;
}
static void __exit edgeport_exit(void)
{
usb_deregister(&io_driver);
usb_serial_deregister(&edgeport_1port_device);
usb_serial_deregister(&edgeport_2port_device);
}
module_init(edgeport_init);
module_exit(edgeport_exit);
/* Module information */
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
MODULE_FIRMWARE("edgeport/down3.bin");
module_param(debug, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(debug, "Debug enabled or not");
module_param(closing_wait, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(closing_wait, "Maximum wait for data to drain, in .01 secs");
module_param(ignore_cpu_rev, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(ignore_cpu_rev,
"Ignore the cpu revision when connecting to a device");
module_param(default_uart_mode, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(default_uart_mode, "Default uart_mode, 0=RS232, ...");
| gpl-2.0 |
atinm/android_kernel_samsung_manta | arch/powerpc/platforms/pseries/processor_idle.c | 1947 | 7642 | /*
* processor_idle - idle state cpuidle driver.
* Adapted from drivers/idle/intel_idle.c and
* drivers/acpi/processor_idle.c
*
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/moduleparam.h>
#include <linux/cpuidle.h>
#include <linux/cpu.h>
#include <asm/paca.h>
#include <asm/reg.h>
#include <asm/machdep.h>
#include <asm/firmware.h>
#include <asm/runlatch.h>
#include "plpar_wrappers.h"
#include "pseries.h"
struct cpuidle_driver pseries_idle_driver = {
.name = "pseries_idle",
.owner = THIS_MODULE,
};
#define MAX_IDLE_STATE_COUNT 2
static int max_idle_state = MAX_IDLE_STATE_COUNT - 1;
static struct cpuidle_device __percpu *pseries_cpuidle_devices;
static struct cpuidle_state *cpuidle_state_table;
void update_smt_snooze_delay(int snooze)
{
struct cpuidle_driver *drv = cpuidle_get_driver();
if (drv)
drv->states[0].target_residency = snooze;
}
static inline void idle_loop_prolog(unsigned long *in_purr, ktime_t *kt_before)
{
*kt_before = ktime_get_real();
*in_purr = mfspr(SPRN_PURR);
/*
* Indicate to the HV that we are idle. Now would be
* a good time to find other work to dispatch.
*/
get_lppaca()->idle = 1;
}
static inline s64 idle_loop_epilog(unsigned long in_purr, ktime_t kt_before)
{
get_lppaca()->wait_state_cycles += mfspr(SPRN_PURR) - in_purr;
get_lppaca()->idle = 0;
return ktime_to_us(ktime_sub(ktime_get_real(), kt_before));
}
static int snooze_loop(struct cpuidle_device *dev,
struct cpuidle_driver *drv,
int index)
{
unsigned long in_purr;
ktime_t kt_before;
unsigned long start_snooze;
long snooze = drv->states[0].target_residency;
idle_loop_prolog(&in_purr, &kt_before);
if (snooze) {
start_snooze = get_tb() + snooze * tb_ticks_per_usec;
local_irq_enable();
set_thread_flag(TIF_POLLING_NRFLAG);
while ((snooze < 0) || (get_tb() < start_snooze)) {
if (need_resched() || cpu_is_offline(dev->cpu))
goto out;
ppc64_runlatch_off();
HMT_low();
HMT_very_low();
}
HMT_medium();
clear_thread_flag(TIF_POLLING_NRFLAG);
smp_mb();
local_irq_disable();
}
out:
HMT_medium();
dev->last_residency =
(int)idle_loop_epilog(in_purr, kt_before);
return index;
}
static void check_and_cede_processor(void)
{
/*
* Ensure our interrupt state is properly tracked,
* also checks if no interrupt has occurred while we
* were soft-disabled
*/
if (prep_irq_for_idle()) {
cede_processor();
#ifdef CONFIG_TRACE_IRQFLAGS
/* Ensure that H_CEDE returns with IRQs on */
if (WARN_ON(!(mfmsr() & MSR_EE)))
__hard_irq_enable();
#endif
}
}
static int dedicated_cede_loop(struct cpuidle_device *dev,
struct cpuidle_driver *drv,
int index)
{
unsigned long in_purr;
ktime_t kt_before;
idle_loop_prolog(&in_purr, &kt_before);
get_lppaca()->donate_dedicated_cpu = 1;
ppc64_runlatch_off();
HMT_medium();
check_and_cede_processor();
get_lppaca()->donate_dedicated_cpu = 0;
dev->last_residency =
(int)idle_loop_epilog(in_purr, kt_before);
return index;
}
static int shared_cede_loop(struct cpuidle_device *dev,
struct cpuidle_driver *drv,
int index)
{
unsigned long in_purr;
ktime_t kt_before;
idle_loop_prolog(&in_purr, &kt_before);
/*
* Yield the processor to the hypervisor. We return if
* an external interrupt occurs (which are driven prior
* to returning here) or if a prod occurs from another
* processor. When returning here, external interrupts
* are enabled.
*/
check_and_cede_processor();
dev->last_residency =
(int)idle_loop_epilog(in_purr, kt_before);
return index;
}
/*
* States for dedicated partition case.
*/
static struct cpuidle_state dedicated_states[MAX_IDLE_STATE_COUNT] = {
{ /* Snooze */
.name = "snooze",
.desc = "snooze",
.flags = CPUIDLE_FLAG_TIME_VALID,
.exit_latency = 0,
.target_residency = 0,
.enter = &snooze_loop },
{ /* CEDE */
.name = "CEDE",
.desc = "CEDE",
.flags = CPUIDLE_FLAG_TIME_VALID,
.exit_latency = 1,
.target_residency = 10,
.enter = &dedicated_cede_loop },
};
/*
* States for shared partition case.
*/
static struct cpuidle_state shared_states[MAX_IDLE_STATE_COUNT] = {
{ /* Shared Cede */
.name = "Shared Cede",
.desc = "Shared Cede",
.flags = CPUIDLE_FLAG_TIME_VALID,
.exit_latency = 0,
.target_residency = 0,
.enter = &shared_cede_loop },
};
int pseries_notify_cpuidle_add_cpu(int cpu)
{
struct cpuidle_device *dev =
per_cpu_ptr(pseries_cpuidle_devices, cpu);
if (dev && cpuidle_get_driver()) {
cpuidle_disable_device(dev);
cpuidle_enable_device(dev);
}
return 0;
}
/*
* pseries_cpuidle_driver_init()
*/
static int pseries_cpuidle_driver_init(void)
{
int idle_state;
struct cpuidle_driver *drv = &pseries_idle_driver;
drv->state_count = 0;
for (idle_state = 0; idle_state < MAX_IDLE_STATE_COUNT; ++idle_state) {
if (idle_state > max_idle_state)
break;
/* is the state not enabled? */
if (cpuidle_state_table[idle_state].enter == NULL)
continue;
drv->states[drv->state_count] = /* structure copy */
cpuidle_state_table[idle_state];
if (cpuidle_state_table == dedicated_states)
drv->states[drv->state_count].target_residency =
__get_cpu_var(smt_snooze_delay);
drv->state_count += 1;
}
return 0;
}
/* pseries_idle_devices_uninit(void)
* unregister cpuidle devices and de-allocate memory
*/
static void pseries_idle_devices_uninit(void)
{
int i;
struct cpuidle_device *dev;
for_each_possible_cpu(i) {
dev = per_cpu_ptr(pseries_cpuidle_devices, i);
cpuidle_unregister_device(dev);
}
free_percpu(pseries_cpuidle_devices);
return;
}
/* pseries_idle_devices_init()
* allocate, initialize and register cpuidle device
*/
static int pseries_idle_devices_init(void)
{
int i;
struct cpuidle_driver *drv = &pseries_idle_driver;
struct cpuidle_device *dev;
pseries_cpuidle_devices = alloc_percpu(struct cpuidle_device);
if (pseries_cpuidle_devices == NULL)
return -ENOMEM;
for_each_possible_cpu(i) {
dev = per_cpu_ptr(pseries_cpuidle_devices, i);
dev->state_count = drv->state_count;
dev->cpu = i;
if (cpuidle_register_device(dev)) {
printk(KERN_DEBUG \
"cpuidle_register_device %d failed!\n", i);
return -EIO;
}
}
return 0;
}
/*
* pseries_idle_probe()
* Choose state table for shared versus dedicated partition
*/
static int pseries_idle_probe(void)
{
if (!firmware_has_feature(FW_FEATURE_SPLPAR))
return -ENODEV;
if (cpuidle_disable != IDLE_NO_OVERRIDE)
return -ENODEV;
if (max_idle_state == 0) {
printk(KERN_DEBUG "pseries processor idle disabled.\n");
return -EPERM;
}
if (get_lppaca()->shared_proc)
cpuidle_state_table = shared_states;
else
cpuidle_state_table = dedicated_states;
return 0;
}
static int __init pseries_processor_idle_init(void)
{
int retval;
retval = pseries_idle_probe();
if (retval)
return retval;
pseries_cpuidle_driver_init();
retval = cpuidle_register_driver(&pseries_idle_driver);
if (retval) {
printk(KERN_DEBUG "Registration of pseries driver failed.\n");
return retval;
}
retval = pseries_idle_devices_init();
if (retval) {
pseries_idle_devices_uninit();
cpuidle_unregister_driver(&pseries_idle_driver);
return retval;
}
printk(KERN_DEBUG "pseries_idle_driver registered\n");
return 0;
}
static void __exit pseries_processor_idle_exit(void)
{
pseries_idle_devices_uninit();
cpuidle_unregister_driver(&pseries_idle_driver);
return;
}
module_init(pseries_processor_idle_init);
module_exit(pseries_processor_idle_exit);
MODULE_AUTHOR("Deepthi Dharwar <deepthi@linux.vnet.ibm.com>");
MODULE_DESCRIPTION("Cpuidle driver for POWER");
MODULE_LICENSE("GPL");
| gpl-2.0 |
Gerrett84/android_kernel_smdk4210_tw | fs/ceph/super.c | 1947 | 22695 |
#include <linux/ceph/ceph_debug.h>
#include <linux/backing-dev.h>
#include <linux/ctype.h>
#include <linux/fs.h>
#include <linux/inet.h>
#include <linux/in6.h>
#include <linux/module.h>
#include <linux/mount.h>
#include <linux/parser.h>
#include <linux/sched.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/statfs.h>
#include <linux/string.h>
#include "super.h"
#include "mds_client.h"
#include <linux/ceph/decode.h>
#include <linux/ceph/mon_client.h>
#include <linux/ceph/auth.h>
#include <linux/ceph/debugfs.h>
/*
* Ceph superblock operations
*
* Handle the basics of mounting, unmounting.
*/
/*
* super ops
*/
static void ceph_put_super(struct super_block *s)
{
struct ceph_fs_client *fsc = ceph_sb_to_client(s);
dout("put_super\n");
ceph_mdsc_close_sessions(fsc->mdsc);
/*
* ensure we release the bdi before put_anon_super releases
* the device name.
*/
if (s->s_bdi == &fsc->backing_dev_info) {
bdi_unregister(&fsc->backing_dev_info);
s->s_bdi = NULL;
}
return;
}
static int ceph_statfs(struct dentry *dentry, struct kstatfs *buf)
{
struct ceph_fs_client *fsc = ceph_inode_to_client(dentry->d_inode);
struct ceph_monmap *monmap = fsc->client->monc.monmap;
struct ceph_statfs st;
u64 fsid;
int err;
dout("statfs\n");
err = ceph_monc_do_statfs(&fsc->client->monc, &st);
if (err < 0)
return err;
/* fill in kstatfs */
buf->f_type = CEPH_SUPER_MAGIC; /* ?? */
/*
* express utilization in terms of large blocks to avoid
* overflow on 32-bit machines.
*/
buf->f_bsize = 1 << CEPH_BLOCK_SHIFT;
buf->f_blocks = le64_to_cpu(st.kb) >> (CEPH_BLOCK_SHIFT-10);
buf->f_bfree = (le64_to_cpu(st.kb) - le64_to_cpu(st.kb_used)) >>
(CEPH_BLOCK_SHIFT-10);
buf->f_bavail = le64_to_cpu(st.kb_avail) >> (CEPH_BLOCK_SHIFT-10);
buf->f_files = le64_to_cpu(st.num_objects);
buf->f_ffree = -1;
buf->f_namelen = NAME_MAX;
buf->f_frsize = PAGE_CACHE_SIZE;
/* leave fsid little-endian, regardless of host endianness */
fsid = *(u64 *)(&monmap->fsid) ^ *((u64 *)&monmap->fsid + 1);
buf->f_fsid.val[0] = fsid & 0xffffffff;
buf->f_fsid.val[1] = fsid >> 32;
return 0;
}
static int ceph_sync_fs(struct super_block *sb, int wait)
{
struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
if (!wait) {
dout("sync_fs (non-blocking)\n");
ceph_flush_dirty_caps(fsc->mdsc);
dout("sync_fs (non-blocking) done\n");
return 0;
}
dout("sync_fs (blocking)\n");
ceph_osdc_sync(&fsc->client->osdc);
ceph_mdsc_sync(fsc->mdsc);
dout("sync_fs (blocking) done\n");
return 0;
}
/*
* mount options
*/
enum {
Opt_wsize,
Opt_rsize,
Opt_caps_wanted_delay_min,
Opt_caps_wanted_delay_max,
Opt_cap_release_safety,
Opt_readdir_max_entries,
Opt_readdir_max_bytes,
Opt_congestion_kb,
Opt_last_int,
/* int args above */
Opt_snapdirname,
Opt_last_string,
/* string args above */
Opt_dirstat,
Opt_nodirstat,
Opt_rbytes,
Opt_norbytes,
Opt_noasyncreaddir,
Opt_ino32,
};
static match_table_t fsopt_tokens = {
{Opt_wsize, "wsize=%d"},
{Opt_rsize, "rsize=%d"},
{Opt_caps_wanted_delay_min, "caps_wanted_delay_min=%d"},
{Opt_caps_wanted_delay_max, "caps_wanted_delay_max=%d"},
{Opt_cap_release_safety, "cap_release_safety=%d"},
{Opt_readdir_max_entries, "readdir_max_entries=%d"},
{Opt_readdir_max_bytes, "readdir_max_bytes=%d"},
{Opt_congestion_kb, "write_congestion_kb=%d"},
/* int args above */
{Opt_snapdirname, "snapdirname=%s"},
/* string args above */
{Opt_dirstat, "dirstat"},
{Opt_nodirstat, "nodirstat"},
{Opt_rbytes, "rbytes"},
{Opt_norbytes, "norbytes"},
{Opt_noasyncreaddir, "noasyncreaddir"},
{Opt_ino32, "ino32"},
{-1, NULL}
};
static int parse_fsopt_token(char *c, void *private)
{
struct ceph_mount_options *fsopt = private;
substring_t argstr[MAX_OPT_ARGS];
int token, intval, ret;
token = match_token((char *)c, fsopt_tokens, argstr);
if (token < 0)
return -EINVAL;
if (token < Opt_last_int) {
ret = match_int(&argstr[0], &intval);
if (ret < 0) {
pr_err("bad mount option arg (not int) "
"at '%s'\n", c);
return ret;
}
dout("got int token %d val %d\n", token, intval);
} else if (token > Opt_last_int && token < Opt_last_string) {
dout("got string token %d val %s\n", token,
argstr[0].from);
} else {
dout("got token %d\n", token);
}
switch (token) {
case Opt_snapdirname:
kfree(fsopt->snapdir_name);
fsopt->snapdir_name = kstrndup(argstr[0].from,
argstr[0].to-argstr[0].from,
GFP_KERNEL);
if (!fsopt->snapdir_name)
return -ENOMEM;
break;
/* misc */
case Opt_wsize:
fsopt->wsize = intval;
break;
case Opt_rsize:
fsopt->rsize = intval;
break;
case Opt_caps_wanted_delay_min:
fsopt->caps_wanted_delay_min = intval;
break;
case Opt_caps_wanted_delay_max:
fsopt->caps_wanted_delay_max = intval;
break;
case Opt_readdir_max_entries:
fsopt->max_readdir = intval;
break;
case Opt_readdir_max_bytes:
fsopt->max_readdir_bytes = intval;
break;
case Opt_congestion_kb:
fsopt->congestion_kb = intval;
break;
case Opt_dirstat:
fsopt->flags |= CEPH_MOUNT_OPT_DIRSTAT;
break;
case Opt_nodirstat:
fsopt->flags &= ~CEPH_MOUNT_OPT_DIRSTAT;
break;
case Opt_rbytes:
fsopt->flags |= CEPH_MOUNT_OPT_RBYTES;
break;
case Opt_norbytes:
fsopt->flags &= ~CEPH_MOUNT_OPT_RBYTES;
break;
case Opt_noasyncreaddir:
fsopt->flags |= CEPH_MOUNT_OPT_NOASYNCREADDIR;
break;
case Opt_ino32:
fsopt->flags |= CEPH_MOUNT_OPT_INO32;
break;
default:
BUG_ON(token);
}
return 0;
}
static void destroy_mount_options(struct ceph_mount_options *args)
{
dout("destroy_mount_options %p\n", args);
kfree(args->snapdir_name);
kfree(args);
}
static int strcmp_null(const char *s1, const char *s2)
{
if (!s1 && !s2)
return 0;
if (s1 && !s2)
return -1;
if (!s1 && s2)
return 1;
return strcmp(s1, s2);
}
static int compare_mount_options(struct ceph_mount_options *new_fsopt,
struct ceph_options *new_opt,
struct ceph_fs_client *fsc)
{
struct ceph_mount_options *fsopt1 = new_fsopt;
struct ceph_mount_options *fsopt2 = fsc->mount_options;
int ofs = offsetof(struct ceph_mount_options, snapdir_name);
int ret;
ret = memcmp(fsopt1, fsopt2, ofs);
if (ret)
return ret;
ret = strcmp_null(fsopt1->snapdir_name, fsopt2->snapdir_name);
if (ret)
return ret;
return ceph_compare_options(new_opt, fsc->client);
}
static int parse_mount_options(struct ceph_mount_options **pfsopt,
struct ceph_options **popt,
int flags, char *options,
const char *dev_name,
const char **path)
{
struct ceph_mount_options *fsopt;
const char *dev_name_end;
int err = -ENOMEM;
fsopt = kzalloc(sizeof(*fsopt), GFP_KERNEL);
if (!fsopt)
return -ENOMEM;
dout("parse_mount_options %p, dev_name '%s'\n", fsopt, dev_name);
fsopt->sb_flags = flags;
fsopt->flags = CEPH_MOUNT_OPT_DEFAULT;
fsopt->rsize = CEPH_RSIZE_DEFAULT;
fsopt->snapdir_name = kstrdup(CEPH_SNAPDIRNAME_DEFAULT, GFP_KERNEL);
fsopt->caps_wanted_delay_min = CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT;
fsopt->caps_wanted_delay_max = CEPH_CAPS_WANTED_DELAY_MAX_DEFAULT;
fsopt->cap_release_safety = CEPH_CAP_RELEASE_SAFETY_DEFAULT;
fsopt->max_readdir = CEPH_MAX_READDIR_DEFAULT;
fsopt->max_readdir_bytes = CEPH_MAX_READDIR_BYTES_DEFAULT;
fsopt->congestion_kb = default_congestion_kb();
/* ip1[:port1][,ip2[:port2]...]:/subdir/in/fs */
err = -EINVAL;
if (!dev_name)
goto out;
*path = strstr(dev_name, ":/");
if (*path == NULL) {
pr_err("device name is missing path (no :/ in %s)\n",
dev_name);
goto out;
}
dev_name_end = *path;
dout("device name '%.*s'\n", (int)(dev_name_end - dev_name), dev_name);
/* path on server */
*path += 2;
dout("server path '%s'\n", *path);
err = ceph_parse_options(popt, options, dev_name, dev_name_end,
parse_fsopt_token, (void *)fsopt);
if (err)
goto out;
/* success */
*pfsopt = fsopt;
return 0;
out:
destroy_mount_options(fsopt);
return err;
}
/**
* ceph_show_options - Show mount options in /proc/mounts
* @m: seq_file to write to
* @mnt: mount descriptor
*/
static int ceph_show_options(struct seq_file *m, struct vfsmount *mnt)
{
struct ceph_fs_client *fsc = ceph_sb_to_client(mnt->mnt_sb);
struct ceph_mount_options *fsopt = fsc->mount_options;
struct ceph_options *opt = fsc->client->options;
if (opt->flags & CEPH_OPT_FSID)
seq_printf(m, ",fsid=%pU", &opt->fsid);
if (opt->flags & CEPH_OPT_NOSHARE)
seq_puts(m, ",noshare");
if (opt->flags & CEPH_OPT_NOCRC)
seq_puts(m, ",nocrc");
if (opt->name)
seq_printf(m, ",name=%s", opt->name);
if (opt->key)
seq_puts(m, ",secret=<hidden>");
if (opt->mount_timeout != CEPH_MOUNT_TIMEOUT_DEFAULT)
seq_printf(m, ",mount_timeout=%d", opt->mount_timeout);
if (opt->osd_idle_ttl != CEPH_OSD_IDLE_TTL_DEFAULT)
seq_printf(m, ",osd_idle_ttl=%d", opt->osd_idle_ttl);
if (opt->osd_timeout != CEPH_OSD_TIMEOUT_DEFAULT)
seq_printf(m, ",osdtimeout=%d", opt->osd_timeout);
if (opt->osd_keepalive_timeout != CEPH_OSD_KEEPALIVE_DEFAULT)
seq_printf(m, ",osdkeepalivetimeout=%d",
opt->osd_keepalive_timeout);
if (fsopt->flags & CEPH_MOUNT_OPT_DIRSTAT)
seq_puts(m, ",dirstat");
if ((fsopt->flags & CEPH_MOUNT_OPT_RBYTES) == 0)
seq_puts(m, ",norbytes");
if (fsopt->flags & CEPH_MOUNT_OPT_NOASYNCREADDIR)
seq_puts(m, ",noasyncreaddir");
if (fsopt->wsize)
seq_printf(m, ",wsize=%d", fsopt->wsize);
if (fsopt->rsize != CEPH_RSIZE_DEFAULT)
seq_printf(m, ",rsize=%d", fsopt->rsize);
if (fsopt->congestion_kb != default_congestion_kb())
seq_printf(m, ",write_congestion_kb=%d", fsopt->congestion_kb);
if (fsopt->caps_wanted_delay_min != CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT)
seq_printf(m, ",caps_wanted_delay_min=%d",
fsopt->caps_wanted_delay_min);
if (fsopt->caps_wanted_delay_max != CEPH_CAPS_WANTED_DELAY_MAX_DEFAULT)
seq_printf(m, ",caps_wanted_delay_max=%d",
fsopt->caps_wanted_delay_max);
if (fsopt->cap_release_safety != CEPH_CAP_RELEASE_SAFETY_DEFAULT)
seq_printf(m, ",cap_release_safety=%d",
fsopt->cap_release_safety);
if (fsopt->max_readdir != CEPH_MAX_READDIR_DEFAULT)
seq_printf(m, ",readdir_max_entries=%d", fsopt->max_readdir);
if (fsopt->max_readdir_bytes != CEPH_MAX_READDIR_BYTES_DEFAULT)
seq_printf(m, ",readdir_max_bytes=%d", fsopt->max_readdir_bytes);
if (strcmp(fsopt->snapdir_name, CEPH_SNAPDIRNAME_DEFAULT))
seq_printf(m, ",snapdirname=%s", fsopt->snapdir_name);
return 0;
}
/*
* handle any mon messages the standard library doesn't understand.
* return error if we don't either.
*/
static int extra_mon_dispatch(struct ceph_client *client, struct ceph_msg *msg)
{
struct ceph_fs_client *fsc = client->private;
int type = le16_to_cpu(msg->hdr.type);
switch (type) {
case CEPH_MSG_MDS_MAP:
ceph_mdsc_handle_map(fsc->mdsc, msg);
return 0;
default:
return -1;
}
}
/*
* create a new fs client
*/
struct ceph_fs_client *create_fs_client(struct ceph_mount_options *fsopt,
struct ceph_options *opt)
{
struct ceph_fs_client *fsc;
int err = -ENOMEM;
fsc = kzalloc(sizeof(*fsc), GFP_KERNEL);
if (!fsc)
return ERR_PTR(-ENOMEM);
fsc->client = ceph_create_client(opt, fsc);
if (IS_ERR(fsc->client)) {
err = PTR_ERR(fsc->client);
goto fail;
}
fsc->client->extra_mon_dispatch = extra_mon_dispatch;
fsc->client->supported_features |= CEPH_FEATURE_FLOCK |
CEPH_FEATURE_DIRLAYOUTHASH;
fsc->client->monc.want_mdsmap = 1;
fsc->mount_options = fsopt;
fsc->sb = NULL;
fsc->mount_state = CEPH_MOUNT_MOUNTING;
atomic_long_set(&fsc->writeback_count, 0);
err = bdi_init(&fsc->backing_dev_info);
if (err < 0)
goto fail_client;
err = -ENOMEM;
/*
* The number of concurrent works can be high but they don't need
* to be processed in parallel, limit concurrency.
*/
fsc->wb_wq = alloc_workqueue("ceph-writeback", 0, 1);
if (fsc->wb_wq == NULL)
goto fail_bdi;
fsc->pg_inv_wq = alloc_workqueue("ceph-pg-invalid", 0, 1);
if (fsc->pg_inv_wq == NULL)
goto fail_wb_wq;
fsc->trunc_wq = alloc_workqueue("ceph-trunc", 0, 1);
if (fsc->trunc_wq == NULL)
goto fail_pg_inv_wq;
/* set up mempools */
err = -ENOMEM;
fsc->wb_pagevec_pool = mempool_create_kmalloc_pool(10,
fsc->mount_options->wsize >> PAGE_CACHE_SHIFT);
if (!fsc->wb_pagevec_pool)
goto fail_trunc_wq;
/* caps */
fsc->min_caps = fsopt->max_readdir;
return fsc;
fail_trunc_wq:
destroy_workqueue(fsc->trunc_wq);
fail_pg_inv_wq:
destroy_workqueue(fsc->pg_inv_wq);
fail_wb_wq:
destroy_workqueue(fsc->wb_wq);
fail_bdi:
bdi_destroy(&fsc->backing_dev_info);
fail_client:
ceph_destroy_client(fsc->client);
fail:
kfree(fsc);
return ERR_PTR(err);
}
void destroy_fs_client(struct ceph_fs_client *fsc)
{
dout("destroy_fs_client %p\n", fsc);
destroy_workqueue(fsc->wb_wq);
destroy_workqueue(fsc->pg_inv_wq);
destroy_workqueue(fsc->trunc_wq);
bdi_destroy(&fsc->backing_dev_info);
mempool_destroy(fsc->wb_pagevec_pool);
destroy_mount_options(fsc->mount_options);
ceph_fs_debugfs_cleanup(fsc);
ceph_destroy_client(fsc->client);
kfree(fsc);
dout("destroy_fs_client %p done\n", fsc);
}
/*
* caches
*/
struct kmem_cache *ceph_inode_cachep;
struct kmem_cache *ceph_cap_cachep;
struct kmem_cache *ceph_dentry_cachep;
struct kmem_cache *ceph_file_cachep;
static void ceph_inode_init_once(void *foo)
{
struct ceph_inode_info *ci = foo;
inode_init_once(&ci->vfs_inode);
}
static int __init init_caches(void)
{
ceph_inode_cachep = kmem_cache_create("ceph_inode_info",
sizeof(struct ceph_inode_info),
__alignof__(struct ceph_inode_info),
(SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD),
ceph_inode_init_once);
if (ceph_inode_cachep == NULL)
return -ENOMEM;
ceph_cap_cachep = KMEM_CACHE(ceph_cap,
SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD);
if (ceph_cap_cachep == NULL)
goto bad_cap;
ceph_dentry_cachep = KMEM_CACHE(ceph_dentry_info,
SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD);
if (ceph_dentry_cachep == NULL)
goto bad_dentry;
ceph_file_cachep = KMEM_CACHE(ceph_file_info,
SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD);
if (ceph_file_cachep == NULL)
goto bad_file;
return 0;
bad_file:
kmem_cache_destroy(ceph_dentry_cachep);
bad_dentry:
kmem_cache_destroy(ceph_cap_cachep);
bad_cap:
kmem_cache_destroy(ceph_inode_cachep);
return -ENOMEM;
}
static void destroy_caches(void)
{
kmem_cache_destroy(ceph_inode_cachep);
kmem_cache_destroy(ceph_cap_cachep);
kmem_cache_destroy(ceph_dentry_cachep);
kmem_cache_destroy(ceph_file_cachep);
}
/*
* ceph_umount_begin - initiate forced umount. Tear down down the
* mount, skipping steps that may hang while waiting for server(s).
*/
static void ceph_umount_begin(struct super_block *sb)
{
struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
dout("ceph_umount_begin - starting forced umount\n");
if (!fsc)
return;
fsc->mount_state = CEPH_MOUNT_SHUTDOWN;
return;
}
static const struct super_operations ceph_super_ops = {
.alloc_inode = ceph_alloc_inode,
.destroy_inode = ceph_destroy_inode,
.write_inode = ceph_write_inode,
.sync_fs = ceph_sync_fs,
.put_super = ceph_put_super,
.show_options = ceph_show_options,
.statfs = ceph_statfs,
.umount_begin = ceph_umount_begin,
};
/*
* Bootstrap mount by opening the root directory. Note the mount
* @started time from caller, and time out if this takes too long.
*/
static struct dentry *open_root_dentry(struct ceph_fs_client *fsc,
const char *path,
unsigned long started)
{
struct ceph_mds_client *mdsc = fsc->mdsc;
struct ceph_mds_request *req = NULL;
int err;
struct dentry *root;
/* open dir */
dout("open_root_inode opening '%s'\n", path);
req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, USE_ANY_MDS);
if (IS_ERR(req))
return ERR_CAST(req);
req->r_path1 = kstrdup(path, GFP_NOFS);
req->r_ino1.ino = CEPH_INO_ROOT;
req->r_ino1.snap = CEPH_NOSNAP;
req->r_started = started;
req->r_timeout = fsc->client->options->mount_timeout * HZ;
req->r_args.getattr.mask = cpu_to_le32(CEPH_STAT_CAP_INODE);
req->r_num_caps = 2;
err = ceph_mdsc_do_request(mdsc, NULL, req);
if (err == 0) {
dout("open_root_inode success\n");
if (ceph_ino(req->r_target_inode) == CEPH_INO_ROOT &&
fsc->sb->s_root == NULL)
root = d_alloc_root(req->r_target_inode);
else
root = d_obtain_alias(req->r_target_inode);
req->r_target_inode = NULL;
dout("open_root_inode success, root dentry is %p\n", root);
} else {
root = ERR_PTR(err);
}
ceph_mdsc_put_request(req);
return root;
}
/*
* mount: join the ceph cluster, and open root directory.
*/
static struct dentry *ceph_real_mount(struct ceph_fs_client *fsc,
const char *path)
{
int err;
unsigned long started = jiffies; /* note the start time */
struct dentry *root;
int first = 0; /* first vfsmount for this super_block */
dout("mount start\n");
mutex_lock(&fsc->client->mount_mutex);
err = __ceph_open_session(fsc->client, started);
if (err < 0)
goto out;
dout("mount opening root\n");
root = open_root_dentry(fsc, "", started);
if (IS_ERR(root)) {
err = PTR_ERR(root);
goto out;
}
if (fsc->sb->s_root) {
dput(root);
} else {
fsc->sb->s_root = root;
first = 1;
err = ceph_fs_debugfs_init(fsc);
if (err < 0)
goto fail;
}
if (path[0] == 0) {
dget(root);
} else {
dout("mount opening base mountpoint\n");
root = open_root_dentry(fsc, path, started);
if (IS_ERR(root)) {
err = PTR_ERR(root);
goto fail;
}
}
fsc->mount_state = CEPH_MOUNT_MOUNTED;
dout("mount success\n");
mutex_unlock(&fsc->client->mount_mutex);
return root;
out:
mutex_unlock(&fsc->client->mount_mutex);
return ERR_PTR(err);
fail:
if (first) {
dput(fsc->sb->s_root);
fsc->sb->s_root = NULL;
}
goto out;
}
static int ceph_set_super(struct super_block *s, void *data)
{
struct ceph_fs_client *fsc = data;
int ret;
dout("set_super %p data %p\n", s, data);
s->s_flags = fsc->mount_options->sb_flags;
s->s_maxbytes = 1ULL << 40; /* temp value until we get mdsmap */
s->s_fs_info = fsc;
fsc->sb = s;
s->s_op = &ceph_super_ops;
s->s_export_op = &ceph_export_ops;
s->s_time_gran = 1000; /* 1000 ns == 1 us */
ret = set_anon_super(s, NULL); /* what is that second arg for? */
if (ret != 0)
goto fail;
return ret;
fail:
s->s_fs_info = NULL;
fsc->sb = NULL;
return ret;
}
/*
* share superblock if same fs AND options
*/
static int ceph_compare_super(struct super_block *sb, void *data)
{
struct ceph_fs_client *new = data;
struct ceph_mount_options *fsopt = new->mount_options;
struct ceph_options *opt = new->client->options;
struct ceph_fs_client *other = ceph_sb_to_client(sb);
dout("ceph_compare_super %p\n", sb);
if (compare_mount_options(fsopt, opt, other)) {
dout("monitor(s)/mount options don't match\n");
return 0;
}
if ((opt->flags & CEPH_OPT_FSID) &&
ceph_fsid_compare(&opt->fsid, &other->client->fsid)) {
dout("fsid doesn't match\n");
return 0;
}
if (fsopt->sb_flags != other->mount_options->sb_flags) {
dout("flags differ\n");
return 0;
}
return 1;
}
/*
* construct our own bdi so we can control readahead, etc.
*/
static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
static int ceph_register_bdi(struct super_block *sb,
struct ceph_fs_client *fsc)
{
int err;
/* set ra_pages based on rsize mount option? */
if (fsc->mount_options->rsize >= PAGE_CACHE_SIZE)
fsc->backing_dev_info.ra_pages =
(fsc->mount_options->rsize + PAGE_CACHE_SIZE - 1)
>> PAGE_SHIFT;
err = bdi_register(&fsc->backing_dev_info, NULL, "ceph-%d",
atomic_long_inc_return(&bdi_seq));
if (!err)
sb->s_bdi = &fsc->backing_dev_info;
return err;
}
static struct dentry *ceph_mount(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data)
{
struct super_block *sb;
struct ceph_fs_client *fsc;
struct dentry *res;
int err;
int (*compare_super)(struct super_block *, void *) = ceph_compare_super;
const char *path = NULL;
struct ceph_mount_options *fsopt = NULL;
struct ceph_options *opt = NULL;
dout("ceph_mount\n");
err = parse_mount_options(&fsopt, &opt, flags, data, dev_name, &path);
if (err < 0) {
res = ERR_PTR(err);
goto out_final;
}
/* create client (which we may/may not use) */
fsc = create_fs_client(fsopt, opt);
if (IS_ERR(fsc)) {
res = ERR_CAST(fsc);
kfree(fsopt);
kfree(opt);
goto out_final;
}
err = ceph_mdsc_init(fsc);
if (err < 0) {
res = ERR_PTR(err);
goto out;
}
if (ceph_test_opt(fsc->client, NOSHARE))
compare_super = NULL;
sb = sget(fs_type, compare_super, ceph_set_super, fsc);
if (IS_ERR(sb)) {
res = ERR_CAST(sb);
goto out;
}
if (ceph_sb_to_client(sb) != fsc) {
ceph_mdsc_destroy(fsc);
destroy_fs_client(fsc);
fsc = ceph_sb_to_client(sb);
dout("get_sb got existing client %p\n", fsc);
} else {
dout("get_sb using new client %p\n", fsc);
err = ceph_register_bdi(sb, fsc);
if (err < 0) {
res = ERR_PTR(err);
goto out_splat;
}
}
res = ceph_real_mount(fsc, path);
if (IS_ERR(res))
goto out_splat;
dout("root %p inode %p ino %llx.%llx\n", res,
res->d_inode, ceph_vinop(res->d_inode));
return res;
out_splat:
ceph_mdsc_close_sessions(fsc->mdsc);
deactivate_locked_super(sb);
goto out_final;
out:
ceph_mdsc_destroy(fsc);
destroy_fs_client(fsc);
out_final:
dout("ceph_mount fail %ld\n", PTR_ERR(res));
return res;
}
static void ceph_kill_sb(struct super_block *s)
{
struct ceph_fs_client *fsc = ceph_sb_to_client(s);
dout("kill_sb %p\n", s);
ceph_mdsc_pre_umount(fsc->mdsc);
kill_anon_super(s); /* will call put_super after sb is r/o */
ceph_mdsc_destroy(fsc);
destroy_fs_client(fsc);
}
static struct file_system_type ceph_fs_type = {
.owner = THIS_MODULE,
.name = "ceph",
.mount = ceph_mount,
.kill_sb = ceph_kill_sb,
.fs_flags = FS_RENAME_DOES_D_MOVE,
};
#define _STRINGIFY(x) #x
#define STRINGIFY(x) _STRINGIFY(x)
static int __init init_ceph(void)
{
int ret = init_caches();
if (ret)
goto out;
ret = register_filesystem(&ceph_fs_type);
if (ret)
goto out_icache;
pr_info("loaded (mds proto %d)\n", CEPH_MDSC_PROTOCOL);
return 0;
out_icache:
destroy_caches();
out:
return ret;
}
static void __exit exit_ceph(void)
{
dout("exit_ceph\n");
unregister_filesystem(&ceph_fs_type);
destroy_caches();
}
module_init(init_ceph);
module_exit(exit_ceph);
MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
MODULE_AUTHOR("Patience Warnick <patience@newdream.net>");
MODULE_DESCRIPTION("Ceph filesystem for Linux");
MODULE_LICENSE("GPL");
| gpl-2.0 |
chase2534/gtab47.freekern | arch/arm/mach-s3c64xx/mach-anw6410.c | 2203 | 6035 | /* linux/arch/arm/mach-s3c64xx/mach-anw6410.c
*
* Copyright 2008 Openmoko, Inc.
* Copyright 2008 Simtec Electronics
* Ben Dooks <ben@simtec.co.uk>
* http://armlinux.simtec.co.uk/
* Copyright 2009 Kwangwoo Lee
* Kwangwoo Lee <kwangwoo.lee@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/timer.h>
#include <linux/init.h>
#include <linux/serial_core.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/i2c.h>
#include <linux/fb.h>
#include <linux/gpio.h>
#include <linux/delay.h>
#include <linux/dm9000.h>
#include <video/platform_lcd.h>
#include <video/samsung_fimd.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <asm/mach/irq.h>
#include <mach/hardware.h>
#include <mach/map.h>
#include <asm/irq.h>
#include <asm/mach-types.h>
#include <plat/regs-serial.h>
#include <linux/platform_data/i2c-s3c2410.h>
#include <plat/fb.h>
#include <plat/clock.h>
#include <plat/devs.h>
#include <plat/cpu.h>
#include <mach/regs-gpio.h>
#include <plat/samsung-time.h>
#include "common.h"
#include "regs-modem.h"
/* DM9000 */
#define ANW6410_PA_DM9000 (0x18000000)
/* A hardware buffer to control external devices is mapped at 0x30000000.
* It can not be read. So current status must be kept in anw6410_extdev_status.
*/
#define ANW6410_VA_EXTDEV S3C_ADDR(0x02000000)
#define ANW6410_PA_EXTDEV (0x30000000)
#define ANW6410_EN_DM9000 (1<<11)
#define ANW6410_EN_LCD (1<<14)
static __u32 anw6410_extdev_status;
static struct s3c2410_uartcfg anw6410_uartcfgs[] __initdata = {
[0] = {
.hwport = 0,
.flags = 0,
.ucon = 0x3c5,
.ulcon = 0x03,
.ufcon = 0x51,
},
[1] = {
.hwport = 1,
.flags = 0,
.ucon = 0x3c5,
.ulcon = 0x03,
.ufcon = 0x51,
},
};
/* framebuffer and LCD setup. */
static void __init anw6410_lcd_mode_set(void)
{
u32 tmp;
/* set the LCD type */
tmp = __raw_readl(S3C64XX_SPCON);
tmp &= ~S3C64XX_SPCON_LCD_SEL_MASK;
tmp |= S3C64XX_SPCON_LCD_SEL_RGB;
__raw_writel(tmp, S3C64XX_SPCON);
/* remove the LCD bypass */
tmp = __raw_readl(S3C64XX_MODEM_MIFPCON);
tmp &= ~MIFPCON_LCD_BYPASS;
__raw_writel(tmp, S3C64XX_MODEM_MIFPCON);
}
/* GPF1 = LCD panel power
* GPF4 = LCD backlight control
*/
static void anw6410_lcd_power_set(struct plat_lcd_data *pd,
unsigned int power)
{
if (power) {
anw6410_extdev_status |= (ANW6410_EN_LCD << 16);
__raw_writel(anw6410_extdev_status, ANW6410_VA_EXTDEV);
gpio_direction_output(S3C64XX_GPF(1), 1);
gpio_direction_output(S3C64XX_GPF(4), 1);
} else {
anw6410_extdev_status &= ~(ANW6410_EN_LCD << 16);
__raw_writel(anw6410_extdev_status, ANW6410_VA_EXTDEV);
gpio_direction_output(S3C64XX_GPF(1), 0);
gpio_direction_output(S3C64XX_GPF(4), 0);
}
}
static struct plat_lcd_data anw6410_lcd_power_data = {
.set_power = anw6410_lcd_power_set,
};
static struct platform_device anw6410_lcd_powerdev = {
.name = "platform-lcd",
.dev.parent = &s3c_device_fb.dev,
.dev.platform_data = &anw6410_lcd_power_data,
};
static struct s3c_fb_pd_win anw6410_fb_win0 = {
.max_bpp = 32,
.default_bpp = 16,
.xres = 800,
.yres = 480,
};
static struct fb_videomode anw6410_lcd_timing = {
.left_margin = 8,
.right_margin = 13,
.upper_margin = 7,
.lower_margin = 5,
.hsync_len = 3,
.vsync_len = 1,
.xres = 800,
.yres = 480,
};
/* 405566 clocks per frame => 60Hz refresh requires 24333960Hz clock */
static struct s3c_fb_platdata anw6410_lcd_pdata __initdata = {
.setup_gpio = s3c64xx_fb_gpio_setup_24bpp,
.vtiming = &anw6410_lcd_timing,
.win[0] = &anw6410_fb_win0,
.vidcon0 = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB,
.vidcon1 = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC,
};
/* DM9000AEP 10/100 ethernet controller */
static void __init anw6410_dm9000_enable(void)
{
anw6410_extdev_status |= (ANW6410_EN_DM9000 << 16);
__raw_writel(anw6410_extdev_status, ANW6410_VA_EXTDEV);
}
static struct resource anw6410_dm9000_resource[] = {
[0] = DEFINE_RES_MEM(ANW6410_PA_DM9000, 4),
[1] = DEFINE_RES_MEM(ANW6410_PA_DM9000 + 4, 501),
[2] = DEFINE_RES_NAMED(IRQ_EINT(15), 1, NULL, IORESOURCE_IRQ \
| IRQF_TRIGGER_HIGH),
};
static struct dm9000_plat_data anw6410_dm9000_pdata = {
.flags = (DM9000_PLATF_16BITONLY | DM9000_PLATF_NO_EEPROM),
/* dev_addr can be set to provide hwaddr. */
};
static struct platform_device anw6410_device_eth = {
.name = "dm9000",
.id = -1,
.num_resources = ARRAY_SIZE(anw6410_dm9000_resource),
.resource = anw6410_dm9000_resource,
.dev = {
.platform_data = &anw6410_dm9000_pdata,
},
};
static struct map_desc anw6410_iodesc[] __initdata = {
{
.virtual = (unsigned long)ANW6410_VA_EXTDEV,
.pfn = __phys_to_pfn(ANW6410_PA_EXTDEV),
.length = SZ_64K,
.type = MT_DEVICE,
},
};
static struct platform_device *anw6410_devices[] __initdata = {
&s3c_device_fb,
&anw6410_lcd_powerdev,
&anw6410_device_eth,
};
static void __init anw6410_map_io(void)
{
s3c64xx_init_io(anw6410_iodesc, ARRAY_SIZE(anw6410_iodesc));
s3c24xx_init_clocks(12000000);
s3c24xx_init_uarts(anw6410_uartcfgs, ARRAY_SIZE(anw6410_uartcfgs));
samsung_set_timer_source(SAMSUNG_PWM3, SAMSUNG_PWM4);
anw6410_lcd_mode_set();
}
static void __init anw6410_machine_init(void)
{
s3c_fb_set_platdata(&anw6410_lcd_pdata);
gpio_request(S3C64XX_GPF(1), "panel power");
gpio_request(S3C64XX_GPF(4), "LCD backlight");
anw6410_dm9000_enable();
platform_add_devices(anw6410_devices, ARRAY_SIZE(anw6410_devices));
}
MACHINE_START(ANW6410, "A&W6410")
/* Maintainer: Kwangwoo Lee <kwangwoo.lee@gmail.com> */
.atag_offset = 0x100,
.init_irq = s3c6410_init_irq,
.map_io = anw6410_map_io,
.init_machine = anw6410_machine_init,
.init_late = s3c64xx_init_late,
.init_time = samsung_timer_init,
.restart = s3c64xx_restart,
MACHINE_END
| gpl-2.0 |
mkl0301/linux | drivers/scsi/qla2xxx/qla_mid.c | 2203 | 21694 | /*
* QLogic Fibre Channel HBA Driver
* Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
#include "qla_def.h"
#include "qla_gbl.h"
#include "qla_target.h"
#include <linux/moduleparam.h>
#include <linux/vmalloc.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <scsi/scsi_tcq.h>
#include <scsi/scsicam.h>
#include <linux/delay.h>
void
qla2x00_vp_stop_timer(scsi_qla_host_t *vha)
{
if (vha->vp_idx && vha->timer_active) {
del_timer_sync(&vha->timer);
vha->timer_active = 0;
}
}
static uint32_t
qla24xx_allocate_vp_id(scsi_qla_host_t *vha)
{
uint32_t vp_id;
struct qla_hw_data *ha = vha->hw;
unsigned long flags;
/* Find an empty slot and assign an vp_id */
mutex_lock(&ha->vport_lock);
vp_id = find_first_zero_bit(ha->vp_idx_map, ha->max_npiv_vports + 1);
if (vp_id > ha->max_npiv_vports) {
ql_dbg(ql_dbg_vport, vha, 0xa000,
"vp_id %d is bigger than max-supported %d.\n",
vp_id, ha->max_npiv_vports);
mutex_unlock(&ha->vport_lock);
return vp_id;
}
set_bit(vp_id, ha->vp_idx_map);
ha->num_vhosts++;
vha->vp_idx = vp_id;
spin_lock_irqsave(&ha->vport_slock, flags);
list_add_tail(&vha->list, &ha->vp_list);
qlt_update_vp_map(vha, SET_VP_IDX);
spin_unlock_irqrestore(&ha->vport_slock, flags);
mutex_unlock(&ha->vport_lock);
return vp_id;
}
void
qla24xx_deallocate_vp_id(scsi_qla_host_t *vha)
{
uint16_t vp_id;
struct qla_hw_data *ha = vha->hw;
unsigned long flags = 0;
mutex_lock(&ha->vport_lock);
/*
* Wait for all pending activities to finish before removing vport from
* the list.
* Lock needs to be held for safe removal from the list (it
* ensures no active vp_list traversal while the vport is removed
* from the queue)
*/
spin_lock_irqsave(&ha->vport_slock, flags);
while (atomic_read(&vha->vref_count)) {
spin_unlock_irqrestore(&ha->vport_slock, flags);
msleep(500);
spin_lock_irqsave(&ha->vport_slock, flags);
}
list_del(&vha->list);
qlt_update_vp_map(vha, RESET_VP_IDX);
spin_unlock_irqrestore(&ha->vport_slock, flags);
vp_id = vha->vp_idx;
ha->num_vhosts--;
clear_bit(vp_id, ha->vp_idx_map);
mutex_unlock(&ha->vport_lock);
}
static scsi_qla_host_t *
qla24xx_find_vhost_by_name(struct qla_hw_data *ha, uint8_t *port_name)
{
scsi_qla_host_t *vha;
struct scsi_qla_host *tvha;
unsigned long flags;
spin_lock_irqsave(&ha->vport_slock, flags);
/* Locate matching device in database. */
list_for_each_entry_safe(vha, tvha, &ha->vp_list, list) {
if (!memcmp(port_name, vha->port_name, WWN_SIZE)) {
spin_unlock_irqrestore(&ha->vport_slock, flags);
return vha;
}
}
spin_unlock_irqrestore(&ha->vport_slock, flags);
return NULL;
}
/*
* qla2x00_mark_vp_devices_dead
* Updates fcport state when device goes offline.
*
* Input:
* ha = adapter block pointer.
* fcport = port structure pointer.
*
* Return:
* None.
*
* Context:
*/
static void
qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha)
{
/*
* !!! NOTE !!!
* This function, if called in contexts other than vp create, disable
* or delete, please make sure this is synchronized with the
* delete thread.
*/
fc_port_t *fcport;
list_for_each_entry(fcport, &vha->vp_fcports, list) {
ql_dbg(ql_dbg_vport, vha, 0xa001,
"Marking port dead, loop_id=0x%04x : %x.\n",
fcport->loop_id, fcport->vha->vp_idx);
qla2x00_mark_device_lost(vha, fcport, 0, 0);
qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED);
}
}
int
qla24xx_disable_vp(scsi_qla_host_t *vha)
{
unsigned long flags;
int ret;
ret = qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
atomic_set(&vha->loop_state, LOOP_DOWN);
atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
/* Remove port id from vp target map */
spin_lock_irqsave(&vha->hw->vport_slock, flags);
qlt_update_vp_map(vha, RESET_AL_PA);
spin_unlock_irqrestore(&vha->hw->vport_slock, flags);
qla2x00_mark_vp_devices_dead(vha);
atomic_set(&vha->vp_state, VP_FAILED);
vha->flags.management_server_logged_in = 0;
if (ret == QLA_SUCCESS) {
fc_vport_set_state(vha->fc_vport, FC_VPORT_DISABLED);
} else {
fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
return -1;
}
return 0;
}
int
qla24xx_enable_vp(scsi_qla_host_t *vha)
{
int ret;
struct qla_hw_data *ha = vha->hw;
scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
/* Check if physical ha port is Up */
if (atomic_read(&base_vha->loop_state) == LOOP_DOWN ||
atomic_read(&base_vha->loop_state) == LOOP_DEAD ||
!(ha->current_topology & ISP_CFG_F)) {
vha->vp_err_state = VP_ERR_PORTDWN;
fc_vport_set_state(vha->fc_vport, FC_VPORT_LINKDOWN);
goto enable_failed;
}
/* Initialize the new vport unless it is a persistent port */
mutex_lock(&ha->vport_lock);
ret = qla24xx_modify_vp_config(vha);
mutex_unlock(&ha->vport_lock);
if (ret != QLA_SUCCESS) {
fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
goto enable_failed;
}
ql_dbg(ql_dbg_taskm, vha, 0x801a,
"Virtual port with id: %d - Enabled.\n", vha->vp_idx);
return 0;
enable_failed:
ql_dbg(ql_dbg_taskm, vha, 0x801b,
"Virtual port with id: %d - Disabled.\n", vha->vp_idx);
return 1;
}
static void
qla24xx_configure_vp(scsi_qla_host_t *vha)
{
struct fc_vport *fc_vport;
int ret;
fc_vport = vha->fc_vport;
ql_dbg(ql_dbg_vport, vha, 0xa002,
"%s: change request #3.\n", __func__);
ret = qla2x00_send_change_request(vha, 0x3, vha->vp_idx);
if (ret != QLA_SUCCESS) {
ql_dbg(ql_dbg_vport, vha, 0xa003, "Failed to enable "
"receiving of RSCN requests: 0x%x.\n", ret);
return;
} else {
/* Corresponds to SCR enabled */
clear_bit(VP_SCR_NEEDED, &vha->vp_flags);
}
vha->flags.online = 1;
if (qla24xx_configure_vhba(vha))
return;
atomic_set(&vha->vp_state, VP_ACTIVE);
fc_vport_set_state(fc_vport, FC_VPORT_ACTIVE);
}
void
qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb)
{
scsi_qla_host_t *vha;
struct qla_hw_data *ha = rsp->hw;
int i = 0;
unsigned long flags;
spin_lock_irqsave(&ha->vport_slock, flags);
list_for_each_entry(vha, &ha->vp_list, list) {
if (vha->vp_idx) {
atomic_inc(&vha->vref_count);
spin_unlock_irqrestore(&ha->vport_slock, flags);
switch (mb[0]) {
case MBA_LIP_OCCURRED:
case MBA_LOOP_UP:
case MBA_LOOP_DOWN:
case MBA_LIP_RESET:
case MBA_POINT_TO_POINT:
case MBA_CHG_IN_CONNECTION:
case MBA_PORT_UPDATE:
case MBA_RSCN_UPDATE:
ql_dbg(ql_dbg_async, vha, 0x5024,
"Async_event for VP[%d], mb=0x%x vha=%p.\n",
i, *mb, vha);
qla2x00_async_event(vha, rsp, mb);
break;
}
spin_lock_irqsave(&ha->vport_slock, flags);
atomic_dec(&vha->vref_count);
}
i++;
}
spin_unlock_irqrestore(&ha->vport_slock, flags);
}
int
qla2x00_vp_abort_isp(scsi_qla_host_t *vha)
{
/*
* Physical port will do most of the abort and recovery work. We can
* just treat it as a loop down
*/
if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
atomic_set(&vha->loop_state, LOOP_DOWN);
qla2x00_mark_all_devices_lost(vha, 0);
} else {
if (!atomic_read(&vha->loop_down_timer))
atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
}
/*
* To exclusively reset vport, we need to log it out first. Note: this
* control_vp can fail if ISP reset is already issued, this is
* expected, as the vp would be already logged out due to ISP reset.
*/
if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
ql_dbg(ql_dbg_taskm, vha, 0x801d,
"Scheduling enable of Vport %d.\n", vha->vp_idx);
return qla24xx_enable_vp(vha);
}
static int
qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
{
ql_dbg(ql_dbg_dpc + ql_dbg_verbose, vha, 0x4012,
"Entering %s vp_flags: 0x%lx.\n", __func__, vha->vp_flags);
qla2x00_do_work(vha);
if (test_and_clear_bit(VP_IDX_ACQUIRED, &vha->vp_flags)) {
/* VP acquired. complete port configuration */
ql_dbg(ql_dbg_dpc, vha, 0x4014,
"Configure VP scheduled.\n");
qla24xx_configure_vp(vha);
ql_dbg(ql_dbg_dpc, vha, 0x4015,
"Configure VP end.\n");
return 0;
}
if (test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags)) {
ql_dbg(ql_dbg_dpc, vha, 0x4016,
"FCPort update scheduled.\n");
qla2x00_update_fcports(vha);
clear_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags);
ql_dbg(ql_dbg_dpc, vha, 0x4017,
"FCPort update end.\n");
}
if ((test_and_clear_bit(RELOGIN_NEEDED, &vha->dpc_flags)) &&
!test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) &&
atomic_read(&vha->loop_state) != LOOP_DOWN) {
ql_dbg(ql_dbg_dpc, vha, 0x4018,
"Relogin needed scheduled.\n");
qla2x00_relogin(vha);
ql_dbg(ql_dbg_dpc, vha, 0x4019,
"Relogin needed end.\n");
}
if (test_and_clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) &&
(!(test_and_set_bit(RESET_ACTIVE, &vha->dpc_flags)))) {
clear_bit(RESET_ACTIVE, &vha->dpc_flags);
}
if (test_and_clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags))) {
ql_dbg(ql_dbg_dpc, vha, 0x401a,
"Loop resync scheduled.\n");
qla2x00_loop_resync(vha);
clear_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags);
ql_dbg(ql_dbg_dpc, vha, 0x401b,
"Loop resync end.\n");
}
}
ql_dbg(ql_dbg_dpc + ql_dbg_verbose, vha, 0x401c,
"Exiting %s.\n", __func__);
return 0;
}
void
qla2x00_do_dpc_all_vps(scsi_qla_host_t *vha)
{
int ret;
struct qla_hw_data *ha = vha->hw;
scsi_qla_host_t *vp;
unsigned long flags = 0;
if (vha->vp_idx)
return;
if (list_empty(&ha->vp_list))
return;
clear_bit(VP_DPC_NEEDED, &vha->dpc_flags);
if (!(ha->current_topology & ISP_CFG_F))
return;
spin_lock_irqsave(&ha->vport_slock, flags);
list_for_each_entry(vp, &ha->vp_list, list) {
if (vp->vp_idx) {
atomic_inc(&vp->vref_count);
spin_unlock_irqrestore(&ha->vport_slock, flags);
ret = qla2x00_do_dpc_vp(vp);
spin_lock_irqsave(&ha->vport_slock, flags);
atomic_dec(&vp->vref_count);
}
}
spin_unlock_irqrestore(&ha->vport_slock, flags);
}
int
qla24xx_vport_create_req_sanity_check(struct fc_vport *fc_vport)
{
scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
struct qla_hw_data *ha = base_vha->hw;
scsi_qla_host_t *vha;
uint8_t port_name[WWN_SIZE];
if (fc_vport->roles != FC_PORT_ROLE_FCP_INITIATOR)
return VPCERR_UNSUPPORTED;
/* Check up the F/W and H/W support NPIV */
if (!ha->flags.npiv_supported)
return VPCERR_UNSUPPORTED;
/* Check up whether npiv supported switch presented */
if (!(ha->switch_cap & FLOGI_MID_SUPPORT))
return VPCERR_NO_FABRIC_SUPP;
/* Check up unique WWPN */
u64_to_wwn(fc_vport->port_name, port_name);
if (!memcmp(port_name, base_vha->port_name, WWN_SIZE))
return VPCERR_BAD_WWN;
vha = qla24xx_find_vhost_by_name(ha, port_name);
if (vha)
return VPCERR_BAD_WWN;
/* Check up max-npiv-supports */
if (ha->num_vhosts > ha->max_npiv_vports) {
ql_dbg(ql_dbg_vport, vha, 0xa004,
"num_vhosts %ud is bigger "
"than max_npiv_vports %ud.\n",
ha->num_vhosts, ha->max_npiv_vports);
return VPCERR_UNSUPPORTED;
}
return 0;
}
scsi_qla_host_t *
qla24xx_create_vhost(struct fc_vport *fc_vport)
{
scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
struct qla_hw_data *ha = base_vha->hw;
scsi_qla_host_t *vha;
struct scsi_host_template *sht = &qla2xxx_driver_template;
struct Scsi_Host *host;
vha = qla2x00_create_host(sht, ha);
if (!vha) {
ql_log(ql_log_warn, vha, 0xa005,
"scsi_host_alloc() failed for vport.\n");
return(NULL);
}
host = vha->host;
fc_vport->dd_data = vha;
/* New host info */
u64_to_wwn(fc_vport->node_name, vha->node_name);
u64_to_wwn(fc_vport->port_name, vha->port_name);
vha->fc_vport = fc_vport;
vha->device_flags = 0;
vha->vp_idx = qla24xx_allocate_vp_id(vha);
if (vha->vp_idx > ha->max_npiv_vports) {
ql_dbg(ql_dbg_vport, vha, 0xa006,
"Couldn't allocate vp_id.\n");
goto create_vhost_failed;
}
vha->mgmt_svr_loop_id = 10 + vha->vp_idx;
vha->dpc_flags = 0L;
/*
* To fix the issue of processing a parent's RSCN for the vport before
* its SCR is complete.
*/
set_bit(VP_SCR_NEEDED, &vha->vp_flags);
atomic_set(&vha->loop_state, LOOP_DOWN);
atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
qla2x00_start_timer(vha, qla2x00_timer, WATCH_INTERVAL);
vha->req = base_vha->req;
host->can_queue = base_vha->req->length + 128;
host->cmd_per_lun = 3;
if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif)
host->max_cmd_len = 32;
else
host->max_cmd_len = MAX_CMDSZ;
host->max_channel = MAX_BUSES - 1;
host->max_lun = ql2xmaxlun;
host->unique_id = host->host_no;
host->max_id = ha->max_fibre_devices;
host->transportt = qla2xxx_transport_vport_template;
ql_dbg(ql_dbg_vport, vha, 0xa007,
"Detect vport hba %ld at address = %p.\n",
vha->host_no, vha);
vha->flags.init_done = 1;
mutex_lock(&ha->vport_lock);
set_bit(vha->vp_idx, ha->vp_idx_map);
ha->cur_vport_count++;
mutex_unlock(&ha->vport_lock);
return vha;
create_vhost_failed:
return NULL;
}
static void
qla25xx_free_req_que(struct scsi_qla_host *vha, struct req_que *req)
{
struct qla_hw_data *ha = vha->hw;
uint16_t que_id = req->id;
dma_free_coherent(&ha->pdev->dev, (req->length + 1) *
sizeof(request_t), req->ring, req->dma);
req->ring = NULL;
req->dma = 0;
if (que_id) {
ha->req_q_map[que_id] = NULL;
mutex_lock(&ha->vport_lock);
clear_bit(que_id, ha->req_qid_map);
mutex_unlock(&ha->vport_lock);
}
kfree(req->outstanding_cmds);
kfree(req);
req = NULL;
}
static void
qla25xx_free_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
{
struct qla_hw_data *ha = vha->hw;
uint16_t que_id = rsp->id;
if (rsp->msix && rsp->msix->have_irq) {
free_irq(rsp->msix->vector, rsp);
rsp->msix->have_irq = 0;
rsp->msix->rsp = NULL;
}
dma_free_coherent(&ha->pdev->dev, (rsp->length + 1) *
sizeof(response_t), rsp->ring, rsp->dma);
rsp->ring = NULL;
rsp->dma = 0;
if (que_id) {
ha->rsp_q_map[que_id] = NULL;
mutex_lock(&ha->vport_lock);
clear_bit(que_id, ha->rsp_qid_map);
mutex_unlock(&ha->vport_lock);
}
kfree(rsp);
rsp = NULL;
}
int
qla25xx_delete_req_que(struct scsi_qla_host *vha, struct req_que *req)
{
int ret = -1;
if (req) {
req->options |= BIT_0;
ret = qla25xx_init_req_que(vha, req);
}
if (ret == QLA_SUCCESS)
qla25xx_free_req_que(vha, req);
return ret;
}
static int
qla25xx_delete_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
{
int ret = -1;
if (rsp) {
rsp->options |= BIT_0;
ret = qla25xx_init_rsp_que(vha, rsp);
}
if (ret == QLA_SUCCESS)
qla25xx_free_rsp_que(vha, rsp);
return ret;
}
/* Delete all queues for a given vhost */
int
qla25xx_delete_queues(struct scsi_qla_host *vha)
{
int cnt, ret = 0;
struct req_que *req = NULL;
struct rsp_que *rsp = NULL;
struct qla_hw_data *ha = vha->hw;
/* Delete request queues */
for (cnt = 1; cnt < ha->max_req_queues; cnt++) {
req = ha->req_q_map[cnt];
if (req) {
ret = qla25xx_delete_req_que(vha, req);
if (ret != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x00ea,
"Couldn't delete req que %d.\n",
req->id);
return ret;
}
}
}
/* Delete response queues */
for (cnt = 1; cnt < ha->max_rsp_queues; cnt++) {
rsp = ha->rsp_q_map[cnt];
if (rsp) {
ret = qla25xx_delete_rsp_que(vha, rsp);
if (ret != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x00eb,
"Couldn't delete rsp que %d.\n",
rsp->id);
return ret;
}
}
}
return ret;
}
int
qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
uint8_t vp_idx, uint16_t rid, int rsp_que, uint8_t qos)
{
int ret = 0;
struct req_que *req = NULL;
struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
uint16_t que_id = 0;
device_reg_t __iomem *reg;
uint32_t cnt;
req = kzalloc(sizeof(struct req_que), GFP_KERNEL);
if (req == NULL) {
ql_log(ql_log_fatal, base_vha, 0x00d9,
"Failed to allocate memory for request queue.\n");
goto failed;
}
req->length = REQUEST_ENTRY_CNT_24XX;
req->ring = dma_alloc_coherent(&ha->pdev->dev,
(req->length + 1) * sizeof(request_t),
&req->dma, GFP_KERNEL);
if (req->ring == NULL) {
ql_log(ql_log_fatal, base_vha, 0x00da,
"Failed to allocate memory for request_ring.\n");
goto que_failed;
}
ret = qla2x00_alloc_outstanding_cmds(ha, req);
if (ret != QLA_SUCCESS)
goto que_failed;
mutex_lock(&ha->vport_lock);
que_id = find_first_zero_bit(ha->req_qid_map, ha->max_req_queues);
if (que_id >= ha->max_req_queues) {
mutex_unlock(&ha->vport_lock);
ql_log(ql_log_warn, base_vha, 0x00db,
"No resources to create additional request queue.\n");
goto que_failed;
}
set_bit(que_id, ha->req_qid_map);
ha->req_q_map[que_id] = req;
req->rid = rid;
req->vp_idx = vp_idx;
req->qos = qos;
ql_dbg(ql_dbg_multiq, base_vha, 0xc002,
"queue_id=%d rid=%d vp_idx=%d qos=%d.\n",
que_id, req->rid, req->vp_idx, req->qos);
ql_dbg(ql_dbg_init, base_vha, 0x00dc,
"queue_id=%d rid=%d vp_idx=%d qos=%d.\n",
que_id, req->rid, req->vp_idx, req->qos);
if (rsp_que < 0)
req->rsp = NULL;
else
req->rsp = ha->rsp_q_map[rsp_que];
/* Use alternate PCI bus number */
if (MSB(req->rid))
options |= BIT_4;
/* Use alternate PCI devfn */
if (LSB(req->rid))
options |= BIT_5;
req->options = options;
ql_dbg(ql_dbg_multiq, base_vha, 0xc003,
"options=0x%x.\n", req->options);
ql_dbg(ql_dbg_init, base_vha, 0x00dd,
"options=0x%x.\n", req->options);
for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++)
req->outstanding_cmds[cnt] = NULL;
req->current_outstanding_cmd = 1;
req->ring_ptr = req->ring;
req->ring_index = 0;
req->cnt = req->length;
req->id = que_id;
reg = ISP_QUE_REG(ha, que_id);
req->max_q_depth = ha->req_q_map[0]->max_q_depth;
mutex_unlock(&ha->vport_lock);
ql_dbg(ql_dbg_multiq, base_vha, 0xc004,
"ring_ptr=%p ring_index=%d, "
"cnt=%d id=%d max_q_depth=%d.\n",
req->ring_ptr, req->ring_index,
req->cnt, req->id, req->max_q_depth);
ql_dbg(ql_dbg_init, base_vha, 0x00de,
"ring_ptr=%p ring_index=%d, "
"cnt=%d id=%d max_q_depth=%d.\n",
req->ring_ptr, req->ring_index, req->cnt,
req->id, req->max_q_depth);
ret = qla25xx_init_req_que(base_vha, req);
if (ret != QLA_SUCCESS) {
ql_log(ql_log_fatal, base_vha, 0x00df,
"%s failed.\n", __func__);
mutex_lock(&ha->vport_lock);
clear_bit(que_id, ha->req_qid_map);
mutex_unlock(&ha->vport_lock);
goto que_failed;
}
return req->id;
que_failed:
qla25xx_free_req_que(base_vha, req);
failed:
return 0;
}
static void qla_do_work(struct work_struct *work)
{
unsigned long flags;
struct rsp_que *rsp = container_of(work, struct rsp_que, q_work);
struct scsi_qla_host *vha;
struct qla_hw_data *ha = rsp->hw;
spin_lock_irqsave(&rsp->hw->hardware_lock, flags);
vha = pci_get_drvdata(ha->pdev);
qla24xx_process_response_queue(vha, rsp);
spin_unlock_irqrestore(&rsp->hw->hardware_lock, flags);
}
/* create response queue */
int
qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
uint8_t vp_idx, uint16_t rid, int req)
{
int ret = 0;
struct rsp_que *rsp = NULL;
struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
uint16_t que_id = 0;
device_reg_t __iomem *reg;
rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL);
if (rsp == NULL) {
ql_log(ql_log_warn, base_vha, 0x0066,
"Failed to allocate memory for response queue.\n");
goto failed;
}
rsp->length = RESPONSE_ENTRY_CNT_MQ;
rsp->ring = dma_alloc_coherent(&ha->pdev->dev,
(rsp->length + 1) * sizeof(response_t),
&rsp->dma, GFP_KERNEL);
if (rsp->ring == NULL) {
ql_log(ql_log_warn, base_vha, 0x00e1,
"Failed to allocate memory for response ring.\n");
goto que_failed;
}
mutex_lock(&ha->vport_lock);
que_id = find_first_zero_bit(ha->rsp_qid_map, ha->max_rsp_queues);
if (que_id >= ha->max_rsp_queues) {
mutex_unlock(&ha->vport_lock);
ql_log(ql_log_warn, base_vha, 0x00e2,
"No resources to create additional request queue.\n");
goto que_failed;
}
set_bit(que_id, ha->rsp_qid_map);
if (ha->flags.msix_enabled)
rsp->msix = &ha->msix_entries[que_id + 1];
else
ql_log(ql_log_warn, base_vha, 0x00e3,
"MSIX not enalbled.\n");
ha->rsp_q_map[que_id] = rsp;
rsp->rid = rid;
rsp->vp_idx = vp_idx;
rsp->hw = ha;
ql_dbg(ql_dbg_init, base_vha, 0x00e4,
"queue_id=%d rid=%d vp_idx=%d hw=%p.\n",
que_id, rsp->rid, rsp->vp_idx, rsp->hw);
/* Use alternate PCI bus number */
if (MSB(rsp->rid))
options |= BIT_4;
/* Use alternate PCI devfn */
if (LSB(rsp->rid))
options |= BIT_5;
/* Enable MSIX handshake mode on for uncapable adapters */
if (!IS_MSIX_NACK_CAPABLE(ha))
options |= BIT_6;
rsp->options = options;
rsp->id = que_id;
reg = ISP_QUE_REG(ha, que_id);
rsp->rsp_q_in = ®->isp25mq.rsp_q_in;
rsp->rsp_q_out = ®->isp25mq.rsp_q_out;
mutex_unlock(&ha->vport_lock);
ql_dbg(ql_dbg_multiq, base_vha, 0xc00b,
"options=%x id=%d rsp_q_in=%p rsp_q_out=%p",
rsp->options, rsp->id, rsp->rsp_q_in,
rsp->rsp_q_out);
ql_dbg(ql_dbg_init, base_vha, 0x00e5,
"options=%x id=%d rsp_q_in=%p rsp_q_out=%p",
rsp->options, rsp->id, rsp->rsp_q_in,
rsp->rsp_q_out);
ret = qla25xx_request_irq(rsp);
if (ret)
goto que_failed;
ret = qla25xx_init_rsp_que(base_vha, rsp);
if (ret != QLA_SUCCESS) {
ql_log(ql_log_fatal, base_vha, 0x00e7,
"%s failed.\n", __func__);
mutex_lock(&ha->vport_lock);
clear_bit(que_id, ha->rsp_qid_map);
mutex_unlock(&ha->vport_lock);
goto que_failed;
}
if (req >= 0)
rsp->req = ha->req_q_map[req];
else
rsp->req = NULL;
qla2x00_init_response_q_entries(rsp);
if (rsp->hw->wq)
INIT_WORK(&rsp->q_work, qla_do_work);
return rsp->id;
que_failed:
qla25xx_free_rsp_que(base_vha, rsp);
failed:
return 0;
}
| gpl-2.0 |
OpenDesireProject/android_kernel_htc_msm7x30 | arch/arm/mach-omap1/board-h2-mmc.c | 2971 | 1823 | /*
* linux/arch/arm/mach-omap1/board-h2-mmc.c
*
* Copyright (C) 2007 Instituto Nokia de Tecnologia - INdT
* Author: Felipe Balbi <felipe.lima@indt.org.br>
*
* This code is based on linux/arch/arm/mach-omap2/board-n800-mmc.c, which is:
* Copyright (C) 2006 Nokia Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/platform_device.h>
#include <linux/i2c/tps65010.h>
#include <plat/mmc.h>
#include <mach/gpio.h>
#include "board-h2.h"
#if defined(CONFIG_MMC_OMAP) || defined(CONFIG_MMC_OMAP_MODULE)
static int mmc_set_power(struct device *dev, int slot, int power_on,
int vdd)
{
gpio_set_value(H2_TPS_GPIO_MMC_PWR_EN, power_on);
return 0;
}
static int mmc_late_init(struct device *dev)
{
int ret = gpio_request(H2_TPS_GPIO_MMC_PWR_EN, "MMC power");
if (ret < 0)
return ret;
gpio_direction_output(H2_TPS_GPIO_MMC_PWR_EN, 0);
return ret;
}
static void mmc_cleanup(struct device *dev)
{
gpio_free(H2_TPS_GPIO_MMC_PWR_EN);
}
/*
* H2 could use the following functions tested:
* - mmc_get_cover_state that uses OMAP_MPUIO(1)
* - mmc_get_wp that uses OMAP_MPUIO(3)
*/
static struct omap_mmc_platform_data mmc1_data = {
.nr_slots = 1,
.init = mmc_late_init,
.cleanup = mmc_cleanup,
.dma_mask = 0xffffffff,
.slots[0] = {
.set_power = mmc_set_power,
.ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34,
.name = "mmcblk",
},
};
static struct omap_mmc_platform_data *mmc_data[OMAP16XX_NR_MMC];
void __init h2_mmc_init(void)
{
mmc_data[0] = &mmc1_data;
omap1_init_mmc(mmc_data, OMAP16XX_NR_MMC);
}
#else
void __init h2_mmc_init(void)
{
}
#endif
| gpl-2.0 |
jpastuszek/linux-3.0.6-gentoo-ec2 | arch/arm/mach-ixp23xx/espresso.c | 2971 | 2109 | /*
* arch/arm/mach-ixp23xx/espresso.c
*
* Double Espresso-specific routines
*
* Author: Lennert Buytenhek <buytenh@wantstofly.org>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/serial.h>
#include <linux/tty.h>
#include <linux/bitops.h>
#include <linux/ioport.h>
#include <linux/serial_8250.h>
#include <linux/serial_core.h>
#include <linux/device.h>
#include <linux/mm.h>
#include <linux/pci.h>
#include <linux/mtd/physmap.h>
#include <asm/types.h>
#include <asm/setup.h>
#include <asm/memory.h>
#include <mach/hardware.h>
#include <asm/mach-types.h>
#include <asm/irq.h>
#include <asm/system.h>
#include <asm/tlbflush.h>
#include <asm/pgtable.h>
#include <asm/mach/map.h>
#include <asm/mach/irq.h>
#include <asm/mach/arch.h>
#include <asm/mach/pci.h>
static int __init espresso_pci_init(void)
{
if (machine_is_espresso())
ixp23xx_pci_slave_init();
return 0;
};
subsys_initcall(espresso_pci_init);
static struct physmap_flash_data espresso_flash_data = {
.width = 2,
};
static struct resource espresso_flash_resource = {
.start = 0x90000000,
.end = 0x91ffffff,
.flags = IORESOURCE_MEM,
};
static struct platform_device espresso_flash = {
.name = "physmap-flash",
.id = 0,
.dev = {
.platform_data = &espresso_flash_data,
},
.num_resources = 1,
.resource = &espresso_flash_resource,
};
static void __init espresso_init(void)
{
platform_device_register(&espresso_flash);
/*
* Mark flash as writeable.
*/
IXP23XX_EXP_CS0[0] |= IXP23XX_FLASH_WRITABLE;
IXP23XX_EXP_CS0[1] |= IXP23XX_FLASH_WRITABLE;
ixp23xx_sys_init();
}
MACHINE_START(ESPRESSO, "IP Fabrics Double Espresso")
/* Maintainer: Lennert Buytenhek */
.map_io = ixp23xx_map_io,
.init_irq = ixp23xx_init_irq,
.timer = &ixp23xx_timer,
.boot_params = 0x00000100,
.init_machine = espresso_init,
MACHINE_END
| gpl-2.0 |
jcsullins/kernel-tenderloin-3.0 | arch/arm/mach-omap1/board-h2-mmc.c | 2971 | 1823 | /*
* linux/arch/arm/mach-omap1/board-h2-mmc.c
*
* Copyright (C) 2007 Instituto Nokia de Tecnologia - INdT
* Author: Felipe Balbi <felipe.lima@indt.org.br>
*
* This code is based on linux/arch/arm/mach-omap2/board-n800-mmc.c, which is:
* Copyright (C) 2006 Nokia Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/platform_device.h>
#include <linux/i2c/tps65010.h>
#include <plat/mmc.h>
#include <mach/gpio.h>
#include "board-h2.h"
#if defined(CONFIG_MMC_OMAP) || defined(CONFIG_MMC_OMAP_MODULE)
static int mmc_set_power(struct device *dev, int slot, int power_on,
int vdd)
{
gpio_set_value(H2_TPS_GPIO_MMC_PWR_EN, power_on);
return 0;
}
static int mmc_late_init(struct device *dev)
{
int ret = gpio_request(H2_TPS_GPIO_MMC_PWR_EN, "MMC power");
if (ret < 0)
return ret;
gpio_direction_output(H2_TPS_GPIO_MMC_PWR_EN, 0);
return ret;
}
static void mmc_cleanup(struct device *dev)
{
gpio_free(H2_TPS_GPIO_MMC_PWR_EN);
}
/*
* H2 could use the following functions tested:
* - mmc_get_cover_state that uses OMAP_MPUIO(1)
* - mmc_get_wp that uses OMAP_MPUIO(3)
*/
static struct omap_mmc_platform_data mmc1_data = {
.nr_slots = 1,
.init = mmc_late_init,
.cleanup = mmc_cleanup,
.dma_mask = 0xffffffff,
.slots[0] = {
.set_power = mmc_set_power,
.ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34,
.name = "mmcblk",
},
};
static struct omap_mmc_platform_data *mmc_data[OMAP16XX_NR_MMC];
void __init h2_mmc_init(void)
{
mmc_data[0] = &mmc1_data;
omap1_init_mmc(mmc_data, OMAP16XX_NR_MMC);
}
#else
void __init h2_mmc_init(void)
{
}
#endif
| gpl-2.0 |
Toygoon/ToyKernel-Team.TCP_SGS2 | drivers/acpi/acpica/exmutex.c | 3227 | 15331 |
/******************************************************************************
*
* Module Name: exmutex - ASL Mutex Acquire/Release functions
*
*****************************************************************************/
/*
* Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* substantially similar to the "NO WARRANTY" disclaimer below
* ("Disclaimer") and any redistribution must be conditioned upon
* including a substantially similar Disclaimer requirement for further
* binary redistribution.
* 3. Neither the names of the above-listed copyright holders nor the names
* of any contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES.
*/
#include <acpi/acpi.h>
#include "accommon.h"
#include "acinterp.h"
#include "acevents.h"
#define _COMPONENT ACPI_EXECUTER
ACPI_MODULE_NAME("exmutex")
/* Local prototypes */
static void
acpi_ex_link_mutex(union acpi_operand_object *obj_desc,
struct acpi_thread_state *thread);
/*******************************************************************************
*
* FUNCTION: acpi_ex_unlink_mutex
*
* PARAMETERS: obj_desc - The mutex to be unlinked
*
* RETURN: None
*
* DESCRIPTION: Remove a mutex from the "AcquiredMutex" list
*
******************************************************************************/
void acpi_ex_unlink_mutex(union acpi_operand_object *obj_desc)
{
struct acpi_thread_state *thread = obj_desc->mutex.owner_thread;
if (!thread) {
return;
}
/* Doubly linked list */
if (obj_desc->mutex.next) {
(obj_desc->mutex.next)->mutex.prev = obj_desc->mutex.prev;
}
if (obj_desc->mutex.prev) {
(obj_desc->mutex.prev)->mutex.next = obj_desc->mutex.next;
/*
* Migrate the previous sync level associated with this mutex to
* the previous mutex on the list so that it may be preserved.
* This handles the case where several mutexes have been acquired
* at the same level, but are not released in opposite order.
*/
(obj_desc->mutex.prev)->mutex.original_sync_level =
obj_desc->mutex.original_sync_level;
} else {
thread->acquired_mutex_list = obj_desc->mutex.next;
}
}
/*******************************************************************************
*
* FUNCTION: acpi_ex_link_mutex
*
* PARAMETERS: obj_desc - The mutex to be linked
* Thread - Current executing thread object
*
* RETURN: None
*
* DESCRIPTION: Add a mutex to the "AcquiredMutex" list for this walk
*
******************************************************************************/
static void
acpi_ex_link_mutex(union acpi_operand_object *obj_desc,
struct acpi_thread_state *thread)
{
union acpi_operand_object *list_head;
list_head = thread->acquired_mutex_list;
/* This object will be the first object in the list */
obj_desc->mutex.prev = NULL;
obj_desc->mutex.next = list_head;
/* Update old first object to point back to this object */
if (list_head) {
list_head->mutex.prev = obj_desc;
}
/* Update list head */
thread->acquired_mutex_list = obj_desc;
}
/*******************************************************************************
*
* FUNCTION: acpi_ex_acquire_mutex_object
*
* PARAMETERS: Timeout - Timeout in milliseconds
* obj_desc - Mutex object
* thread_id - Current thread state
*
* RETURN: Status
*
* DESCRIPTION: Acquire an AML mutex, low-level interface. Provides a common
* path that supports multiple acquires by the same thread.
*
* MUTEX: Interpreter must be locked
*
* NOTE: This interface is called from three places:
* 1) From acpi_ex_acquire_mutex, via an AML Acquire() operator
* 2) From acpi_ex_acquire_global_lock when an AML Field access requires the
* global lock
* 3) From the external interface, acpi_acquire_global_lock
*
******************************************************************************/
acpi_status
acpi_ex_acquire_mutex_object(u16 timeout,
union acpi_operand_object *obj_desc,
acpi_thread_id thread_id)
{
acpi_status status;
ACPI_FUNCTION_TRACE_PTR(ex_acquire_mutex_object, obj_desc);
if (!obj_desc) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
/* Support for multiple acquires by the owning thread */
if (obj_desc->mutex.thread_id == thread_id) {
/*
* The mutex is already owned by this thread, just increment the
* acquisition depth
*/
obj_desc->mutex.acquisition_depth++;
return_ACPI_STATUS(AE_OK);
}
/* Acquire the mutex, wait if necessary. Special case for Global Lock */
if (obj_desc == acpi_gbl_global_lock_mutex) {
status = acpi_ev_acquire_global_lock(timeout);
} else {
status = acpi_ex_system_wait_mutex(obj_desc->mutex.os_mutex,
timeout);
}
if (ACPI_FAILURE(status)) {
/* Includes failure from a timeout on time_desc */
return_ACPI_STATUS(status);
}
/* Acquired the mutex: update mutex object */
obj_desc->mutex.thread_id = thread_id;
obj_desc->mutex.acquisition_depth = 1;
obj_desc->mutex.original_sync_level = 0;
obj_desc->mutex.owner_thread = NULL; /* Used only for AML Acquire() */
return_ACPI_STATUS(AE_OK);
}
/*******************************************************************************
*
* FUNCTION: acpi_ex_acquire_mutex
*
* PARAMETERS: time_desc - Timeout integer
* obj_desc - Mutex object
* walk_state - Current method execution state
*
* RETURN: Status
*
* DESCRIPTION: Acquire an AML mutex
*
******************************************************************************/
acpi_status
acpi_ex_acquire_mutex(union acpi_operand_object *time_desc,
union acpi_operand_object *obj_desc,
struct acpi_walk_state *walk_state)
{
acpi_status status;
ACPI_FUNCTION_TRACE_PTR(ex_acquire_mutex, obj_desc);
if (!obj_desc) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
/* Must have a valid thread state struct */
if (!walk_state->thread) {
ACPI_ERROR((AE_INFO,
"Cannot acquire Mutex [%4.4s], null thread info",
acpi_ut_get_node_name(obj_desc->mutex.node)));
return_ACPI_STATUS(AE_AML_INTERNAL);
}
/*
* Current sync level must be less than or equal to the sync level of the
* mutex. This mechanism provides some deadlock prevention
*/
if (walk_state->thread->current_sync_level > obj_desc->mutex.sync_level) {
ACPI_ERROR((AE_INFO,
"Cannot acquire Mutex [%4.4s], current SyncLevel is too large (%u)",
acpi_ut_get_node_name(obj_desc->mutex.node),
walk_state->thread->current_sync_level));
return_ACPI_STATUS(AE_AML_MUTEX_ORDER);
}
status = acpi_ex_acquire_mutex_object((u16) time_desc->integer.value,
obj_desc,
walk_state->thread->thread_id);
if (ACPI_SUCCESS(status) && obj_desc->mutex.acquisition_depth == 1) {
/* Save Thread object, original/current sync levels */
obj_desc->mutex.owner_thread = walk_state->thread;
obj_desc->mutex.original_sync_level =
walk_state->thread->current_sync_level;
walk_state->thread->current_sync_level =
obj_desc->mutex.sync_level;
/* Link the mutex to the current thread for force-unlock at method exit */
acpi_ex_link_mutex(obj_desc, walk_state->thread);
}
return_ACPI_STATUS(status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ex_release_mutex_object
*
* PARAMETERS: obj_desc - The object descriptor for this op
*
* RETURN: Status
*
* DESCRIPTION: Release a previously acquired Mutex, low level interface.
* Provides a common path that supports multiple releases (after
* previous multiple acquires) by the same thread.
*
* MUTEX: Interpreter must be locked
*
* NOTE: This interface is called from three places:
* 1) From acpi_ex_release_mutex, via an AML Acquire() operator
* 2) From acpi_ex_release_global_lock when an AML Field access requires the
* global lock
* 3) From the external interface, acpi_release_global_lock
*
******************************************************************************/
acpi_status acpi_ex_release_mutex_object(union acpi_operand_object *obj_desc)
{
acpi_status status = AE_OK;
ACPI_FUNCTION_TRACE(ex_release_mutex_object);
if (obj_desc->mutex.acquisition_depth == 0) {
return (AE_NOT_ACQUIRED);
}
/* Match multiple Acquires with multiple Releases */
obj_desc->mutex.acquisition_depth--;
if (obj_desc->mutex.acquisition_depth != 0) {
/* Just decrement the depth and return */
return_ACPI_STATUS(AE_OK);
}
if (obj_desc->mutex.owner_thread) {
/* Unlink the mutex from the owner's list */
acpi_ex_unlink_mutex(obj_desc);
obj_desc->mutex.owner_thread = NULL;
}
/* Release the mutex, special case for Global Lock */
if (obj_desc == acpi_gbl_global_lock_mutex) {
status = acpi_ev_release_global_lock();
} else {
acpi_os_release_mutex(obj_desc->mutex.os_mutex);
}
/* Clear mutex info */
obj_desc->mutex.thread_id = 0;
return_ACPI_STATUS(status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ex_release_mutex
*
* PARAMETERS: obj_desc - The object descriptor for this op
* walk_state - Current method execution state
*
* RETURN: Status
*
* DESCRIPTION: Release a previously acquired Mutex.
*
******************************************************************************/
acpi_status
acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
struct acpi_walk_state *walk_state)
{
acpi_status status = AE_OK;
u8 previous_sync_level;
struct acpi_thread_state *owner_thread;
ACPI_FUNCTION_TRACE(ex_release_mutex);
if (!obj_desc) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
owner_thread = obj_desc->mutex.owner_thread;
/* The mutex must have been previously acquired in order to release it */
if (!owner_thread) {
ACPI_ERROR((AE_INFO,
"Cannot release Mutex [%4.4s], not acquired",
acpi_ut_get_node_name(obj_desc->mutex.node)));
return_ACPI_STATUS(AE_AML_MUTEX_NOT_ACQUIRED);
}
/* Must have a valid thread. */
if (!walk_state->thread) {
ACPI_ERROR((AE_INFO,
"Cannot release Mutex [%4.4s], null thread info",
acpi_ut_get_node_name(obj_desc->mutex.node)));
return_ACPI_STATUS(AE_AML_INTERNAL);
}
/*
* The Mutex is owned, but this thread must be the owner.
* Special case for Global Lock, any thread can release
*/
if ((owner_thread->thread_id != walk_state->thread->thread_id) &&
(obj_desc != acpi_gbl_global_lock_mutex)) {
ACPI_ERROR((AE_INFO,
"Thread %u cannot release Mutex [%4.4s] acquired by thread %u",
(u32)walk_state->thread->thread_id,
acpi_ut_get_node_name(obj_desc->mutex.node),
(u32)owner_thread->thread_id));
return_ACPI_STATUS(AE_AML_NOT_OWNER);
}
/*
* The sync level of the mutex must be equal to the current sync level. In
* other words, the current level means that at least one mutex at that
* level is currently being held. Attempting to release a mutex of a
* different level can only mean that the mutex ordering rule is being
* violated. This behavior is clarified in ACPI 4.0 specification.
*/
if (obj_desc->mutex.sync_level != owner_thread->current_sync_level) {
ACPI_ERROR((AE_INFO,
"Cannot release Mutex [%4.4s], SyncLevel mismatch: mutex %u current %u",
acpi_ut_get_node_name(obj_desc->mutex.node),
obj_desc->mutex.sync_level,
walk_state->thread->current_sync_level));
return_ACPI_STATUS(AE_AML_MUTEX_ORDER);
}
/*
* Get the previous sync_level from the head of the acquired mutex list.
* This handles the case where several mutexes at the same level have been
* acquired, but are not released in reverse order.
*/
previous_sync_level =
owner_thread->acquired_mutex_list->mutex.original_sync_level;
status = acpi_ex_release_mutex_object(obj_desc);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
if (obj_desc->mutex.acquisition_depth == 0) {
/* Restore the previous sync_level */
owner_thread->current_sync_level = previous_sync_level;
}
return_ACPI_STATUS(status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ex_release_all_mutexes
*
* PARAMETERS: Thread - Current executing thread object
*
* RETURN: Status
*
* DESCRIPTION: Release all mutexes held by this thread
*
* NOTE: This function is called as the thread is exiting the interpreter.
* Mutexes are not released when an individual control method is exited, but
* only when the parent thread actually exits the interpreter. This allows one
* method to acquire a mutex, and a different method to release it, as long as
* this is performed underneath a single parent control method.
*
******************************************************************************/
void acpi_ex_release_all_mutexes(struct acpi_thread_state *thread)
{
union acpi_operand_object *next = thread->acquired_mutex_list;
union acpi_operand_object *obj_desc;
ACPI_FUNCTION_ENTRY();
/* Traverse the list of owned mutexes, releasing each one */
while (next) {
obj_desc = next;
next = obj_desc->mutex.next;
obj_desc->mutex.prev = NULL;
obj_desc->mutex.next = NULL;
obj_desc->mutex.acquisition_depth = 0;
/* Release the mutex, special case for Global Lock */
if (obj_desc == acpi_gbl_global_lock_mutex) {
/* Ignore errors */
(void)acpi_ev_release_global_lock();
} else {
acpi_os_release_mutex(obj_desc->mutex.os_mutex);
}
/* Mark mutex unowned */
obj_desc->mutex.owner_thread = NULL;
obj_desc->mutex.thread_id = 0;
/* Update Thread sync_level (Last mutex is the important one) */
thread->current_sync_level =
obj_desc->mutex.original_sync_level;
}
}
| gpl-2.0 |
AOSP-Jaguar/android_kernel_sony_msm8974 | drivers/net/wireless/ath/ath9k/ar9003_phy.c | 3227 | 44609 | /*
* Copyright (c) 2010-2011 Atheros Communications Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/export.h>
#include "hw.h"
#include "ar9003_phy.h"
static const int firstep_table[] =
/* level: 0 1 2 3 4 5 6 7 8 */
{ -4, -2, 0, 2, 4, 6, 8, 10, 12 }; /* lvl 0-8, default 2 */
static const int cycpwrThr1_table[] =
/* level: 0 1 2 3 4 5 6 7 8 */
{ -6, -4, -2, 0, 2, 4, 6, 8 }; /* lvl 0-7, default 3 */
/*
* register values to turn OFDM weak signal detection OFF
*/
static const int m1ThreshLow_off = 127;
static const int m2ThreshLow_off = 127;
static const int m1Thresh_off = 127;
static const int m2Thresh_off = 127;
static const int m2CountThr_off = 31;
static const int m2CountThrLow_off = 63;
static const int m1ThreshLowExt_off = 127;
static const int m2ThreshLowExt_off = 127;
static const int m1ThreshExt_off = 127;
static const int m2ThreshExt_off = 127;
/**
* ar9003_hw_set_channel - set channel on single-chip device
* @ah: atheros hardware structure
* @chan:
*
* This is the function to change channel on single-chip devices, that is
* for AR9300 family of chipsets.
*
* This function takes the channel value in MHz and sets
* hardware channel value. Assumes writes have been enabled to analog bus.
*
* Actual Expression,
*
* For 2GHz channel,
* Channel Frequency = (3/4) * freq_ref * (chansel[8:0] + chanfrac[16:0]/2^17)
* (freq_ref = 40MHz)
*
* For 5GHz channel,
* Channel Frequency = (3/2) * freq_ref * (chansel[8:0] + chanfrac[16:0]/2^10)
* (freq_ref = 40MHz/(24>>amodeRefSel))
*
* For 5GHz channels which are 5MHz spaced,
* Channel Frequency = (3/2) * freq_ref * (chansel[8:0] + chanfrac[16:0]/2^17)
* (freq_ref = 40MHz)
*/
static int ar9003_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
{
u16 bMode, fracMode = 0, aModeRefSel = 0;
u32 freq, channelSel = 0, reg32 = 0;
struct chan_centers centers;
int loadSynthChannel;
ath9k_hw_get_channel_centers(ah, chan, ¢ers);
freq = centers.synth_center;
if (freq < 4800) { /* 2 GHz, fractional mode */
if (AR_SREV_9330(ah)) {
u32 chan_frac;
u32 div;
if (ah->is_clk_25mhz)
div = 75;
else
div = 120;
channelSel = (freq * 4) / div;
chan_frac = (((freq * 4) % div) * 0x20000) / div;
channelSel = (channelSel << 17) | chan_frac;
} else if (AR_SREV_9485(ah)) {
u32 chan_frac;
/*
* freq_ref = 40 / (refdiva >> amoderefsel); where refdiva=1 and amoderefsel=0
* ndiv = ((chan_mhz * 4) / 3) / freq_ref;
* chansel = int(ndiv), chanfrac = (ndiv - chansel) * 0x20000
*/
channelSel = (freq * 4) / 120;
chan_frac = (((freq * 4) % 120) * 0x20000) / 120;
channelSel = (channelSel << 17) | chan_frac;
} else if (AR_SREV_9340(ah)) {
if (ah->is_clk_25mhz) {
u32 chan_frac;
channelSel = (freq * 2) / 75;
chan_frac = (((freq * 2) % 75) * 0x20000) / 75;
channelSel = (channelSel << 17) | chan_frac;
} else
channelSel = CHANSEL_2G(freq) >> 1;
} else
channelSel = CHANSEL_2G(freq);
/* Set to 2G mode */
bMode = 1;
} else {
if (AR_SREV_9340(ah) && ah->is_clk_25mhz) {
u32 chan_frac;
channelSel = (freq * 2) / 75;
chan_frac = (((freq * 2) % 75) * 0x20000) / 75;
channelSel = (channelSel << 17) | chan_frac;
} else {
channelSel = CHANSEL_5G(freq);
/* Doubler is ON, so, divide channelSel by 2. */
channelSel >>= 1;
}
/* Set to 5G mode */
bMode = 0;
}
/* Enable fractional mode for all channels */
fracMode = 1;
aModeRefSel = 0;
loadSynthChannel = 0;
reg32 = (bMode << 29);
REG_WRITE(ah, AR_PHY_SYNTH_CONTROL, reg32);
/* Enable Long shift Select for Synthesizer */
REG_RMW_FIELD(ah, AR_PHY_65NM_CH0_SYNTH4,
AR_PHY_SYNTH4_LONG_SHIFT_SELECT, 1);
/* Program Synth. setting */
reg32 = (channelSel << 2) | (fracMode << 30) |
(aModeRefSel << 28) | (loadSynthChannel << 31);
REG_WRITE(ah, AR_PHY_65NM_CH0_SYNTH7, reg32);
/* Toggle Load Synth channel bit */
loadSynthChannel = 1;
reg32 = (channelSel << 2) | (fracMode << 30) |
(aModeRefSel << 28) | (loadSynthChannel << 31);
REG_WRITE(ah, AR_PHY_65NM_CH0_SYNTH7, reg32);
ah->curchan = chan;
ah->curchan_rad_index = -1;
return 0;
}
/**
* ar9003_hw_spur_mitigate_mrc_cck - convert baseband spur frequency
* @ah: atheros hardware structure
* @chan:
*
* For single-chip solutions. Converts to baseband spur frequency given the
* input channel frequency and compute register settings below.
*
* Spur mitigation for MRC CCK
*/
static void ar9003_hw_spur_mitigate_mrc_cck(struct ath_hw *ah,
struct ath9k_channel *chan)
{
static const u32 spur_freq[4] = { 2420, 2440, 2464, 2480 };
int cur_bb_spur, negative = 0, cck_spur_freq;
int i;
int range, max_spur_cnts, synth_freq;
u8 *spur_fbin_ptr = NULL;
/*
* Need to verify range +/- 10 MHz in control channel, otherwise spur
* is out-of-band and can be ignored.
*/
if (AR_SREV_9485(ah) || AR_SREV_9340(ah) || AR_SREV_9330(ah)) {
spur_fbin_ptr = ar9003_get_spur_chan_ptr(ah,
IS_CHAN_2GHZ(chan));
if (spur_fbin_ptr[0] == 0) /* No spur */
return;
max_spur_cnts = 5;
if (IS_CHAN_HT40(chan)) {
range = 19;
if (REG_READ_FIELD(ah, AR_PHY_GEN_CTRL,
AR_PHY_GC_DYN2040_PRI_CH) == 0)
synth_freq = chan->channel + 10;
else
synth_freq = chan->channel - 10;
} else {
range = 10;
synth_freq = chan->channel;
}
} else {
range = AR_SREV_9462(ah) ? 5 : 10;
max_spur_cnts = 4;
synth_freq = chan->channel;
}
for (i = 0; i < max_spur_cnts; i++) {
if (AR_SREV_9462(ah) && (i == 0 || i == 3))
continue;
negative = 0;
if (AR_SREV_9485(ah) || AR_SREV_9340(ah) || AR_SREV_9330(ah))
cur_bb_spur = FBIN2FREQ(spur_fbin_ptr[i],
IS_CHAN_2GHZ(chan)) - synth_freq;
else
cur_bb_spur = spur_freq[i] - synth_freq;
if (cur_bb_spur < 0) {
negative = 1;
cur_bb_spur = -cur_bb_spur;
}
if (cur_bb_spur < range) {
cck_spur_freq = (int)((cur_bb_spur << 19) / 11);
if (negative == 1)
cck_spur_freq = -cck_spur_freq;
cck_spur_freq = cck_spur_freq & 0xfffff;
REG_RMW_FIELD(ah, AR_PHY_AGC_CONTROL,
AR_PHY_AGC_CONTROL_YCOK_MAX, 0x7);
REG_RMW_FIELD(ah, AR_PHY_CCK_SPUR_MIT,
AR_PHY_CCK_SPUR_MIT_SPUR_RSSI_THR, 0x7f);
REG_RMW_FIELD(ah, AR_PHY_CCK_SPUR_MIT,
AR_PHY_CCK_SPUR_MIT_SPUR_FILTER_TYPE,
0x2);
REG_RMW_FIELD(ah, AR_PHY_CCK_SPUR_MIT,
AR_PHY_CCK_SPUR_MIT_USE_CCK_SPUR_MIT,
0x1);
REG_RMW_FIELD(ah, AR_PHY_CCK_SPUR_MIT,
AR_PHY_CCK_SPUR_MIT_CCK_SPUR_FREQ,
cck_spur_freq);
return;
}
}
REG_RMW_FIELD(ah, AR_PHY_AGC_CONTROL,
AR_PHY_AGC_CONTROL_YCOK_MAX, 0x5);
REG_RMW_FIELD(ah, AR_PHY_CCK_SPUR_MIT,
AR_PHY_CCK_SPUR_MIT_USE_CCK_SPUR_MIT, 0x0);
REG_RMW_FIELD(ah, AR_PHY_CCK_SPUR_MIT,
AR_PHY_CCK_SPUR_MIT_CCK_SPUR_FREQ, 0x0);
}
/* Clean all spur register fields */
static void ar9003_hw_spur_ofdm_clear(struct ath_hw *ah)
{
REG_RMW_FIELD(ah, AR_PHY_TIMING4,
AR_PHY_TIMING4_ENABLE_SPUR_FILTER, 0);
REG_RMW_FIELD(ah, AR_PHY_TIMING11,
AR_PHY_TIMING11_SPUR_FREQ_SD, 0);
REG_RMW_FIELD(ah, AR_PHY_TIMING11,
AR_PHY_TIMING11_SPUR_DELTA_PHASE, 0);
REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
AR_PHY_SFCORR_EXT_SPUR_SUBCHANNEL_SD, 0);
REG_RMW_FIELD(ah, AR_PHY_TIMING11,
AR_PHY_TIMING11_USE_SPUR_FILTER_IN_AGC, 0);
REG_RMW_FIELD(ah, AR_PHY_TIMING11,
AR_PHY_TIMING11_USE_SPUR_FILTER_IN_SELFCOR, 0);
REG_RMW_FIELD(ah, AR_PHY_TIMING4,
AR_PHY_TIMING4_ENABLE_SPUR_RSSI, 0);
REG_RMW_FIELD(ah, AR_PHY_SPUR_REG,
AR_PHY_SPUR_REG_EN_VIT_SPUR_RSSI, 0);
REG_RMW_FIELD(ah, AR_PHY_SPUR_REG,
AR_PHY_SPUR_REG_ENABLE_NF_RSSI_SPUR_MIT, 0);
REG_RMW_FIELD(ah, AR_PHY_SPUR_REG,
AR_PHY_SPUR_REG_ENABLE_MASK_PPM, 0);
REG_RMW_FIELD(ah, AR_PHY_TIMING4,
AR_PHY_TIMING4_ENABLE_PILOT_MASK, 0);
REG_RMW_FIELD(ah, AR_PHY_TIMING4,
AR_PHY_TIMING4_ENABLE_CHAN_MASK, 0);
REG_RMW_FIELD(ah, AR_PHY_PILOT_SPUR_MASK,
AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_IDX_A, 0);
REG_RMW_FIELD(ah, AR_PHY_SPUR_MASK_A,
AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_IDX_A, 0);
REG_RMW_FIELD(ah, AR_PHY_CHAN_SPUR_MASK,
AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_IDX_A, 0);
REG_RMW_FIELD(ah, AR_PHY_PILOT_SPUR_MASK,
AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_A, 0);
REG_RMW_FIELD(ah, AR_PHY_CHAN_SPUR_MASK,
AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_A, 0);
REG_RMW_FIELD(ah, AR_PHY_SPUR_MASK_A,
AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_A, 0);
REG_RMW_FIELD(ah, AR_PHY_SPUR_REG,
AR_PHY_SPUR_REG_MASK_RATE_CNTL, 0);
}
static void ar9003_hw_spur_ofdm(struct ath_hw *ah,
int freq_offset,
int spur_freq_sd,
int spur_delta_phase,
int spur_subchannel_sd)
{
int mask_index = 0;
/* OFDM Spur mitigation */
REG_RMW_FIELD(ah, AR_PHY_TIMING4,
AR_PHY_TIMING4_ENABLE_SPUR_FILTER, 0x1);
REG_RMW_FIELD(ah, AR_PHY_TIMING11,
AR_PHY_TIMING11_SPUR_FREQ_SD, spur_freq_sd);
REG_RMW_FIELD(ah, AR_PHY_TIMING11,
AR_PHY_TIMING11_SPUR_DELTA_PHASE, spur_delta_phase);
REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
AR_PHY_SFCORR_EXT_SPUR_SUBCHANNEL_SD, spur_subchannel_sd);
REG_RMW_FIELD(ah, AR_PHY_TIMING11,
AR_PHY_TIMING11_USE_SPUR_FILTER_IN_AGC, 0x1);
REG_RMW_FIELD(ah, AR_PHY_TIMING11,
AR_PHY_TIMING11_USE_SPUR_FILTER_IN_SELFCOR, 0x1);
REG_RMW_FIELD(ah, AR_PHY_TIMING4,
AR_PHY_TIMING4_ENABLE_SPUR_RSSI, 0x1);
REG_RMW_FIELD(ah, AR_PHY_SPUR_REG,
AR_PHY_SPUR_REG_SPUR_RSSI_THRESH, 34);
REG_RMW_FIELD(ah, AR_PHY_SPUR_REG,
AR_PHY_SPUR_REG_EN_VIT_SPUR_RSSI, 1);
if (REG_READ_FIELD(ah, AR_PHY_MODE,
AR_PHY_MODE_DYNAMIC) == 0x1)
REG_RMW_FIELD(ah, AR_PHY_SPUR_REG,
AR_PHY_SPUR_REG_ENABLE_NF_RSSI_SPUR_MIT, 1);
mask_index = (freq_offset << 4) / 5;
if (mask_index < 0)
mask_index = mask_index - 1;
mask_index = mask_index & 0x7f;
REG_RMW_FIELD(ah, AR_PHY_SPUR_REG,
AR_PHY_SPUR_REG_ENABLE_MASK_PPM, 0x1);
REG_RMW_FIELD(ah, AR_PHY_TIMING4,
AR_PHY_TIMING4_ENABLE_PILOT_MASK, 0x1);
REG_RMW_FIELD(ah, AR_PHY_TIMING4,
AR_PHY_TIMING4_ENABLE_CHAN_MASK, 0x1);
REG_RMW_FIELD(ah, AR_PHY_PILOT_SPUR_MASK,
AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_IDX_A, mask_index);
REG_RMW_FIELD(ah, AR_PHY_SPUR_MASK_A,
AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_IDX_A, mask_index);
REG_RMW_FIELD(ah, AR_PHY_CHAN_SPUR_MASK,
AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_IDX_A, mask_index);
REG_RMW_FIELD(ah, AR_PHY_PILOT_SPUR_MASK,
AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_A, 0xc);
REG_RMW_FIELD(ah, AR_PHY_CHAN_SPUR_MASK,
AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_A, 0xc);
REG_RMW_FIELD(ah, AR_PHY_SPUR_MASK_A,
AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_A, 0xa0);
REG_RMW_FIELD(ah, AR_PHY_SPUR_REG,
AR_PHY_SPUR_REG_MASK_RATE_CNTL, 0xff);
}
static void ar9003_hw_spur_ofdm_work(struct ath_hw *ah,
struct ath9k_channel *chan,
int freq_offset)
{
int spur_freq_sd = 0;
int spur_subchannel_sd = 0;
int spur_delta_phase = 0;
if (IS_CHAN_HT40(chan)) {
if (freq_offset < 0) {
if (REG_READ_FIELD(ah, AR_PHY_GEN_CTRL,
AR_PHY_GC_DYN2040_PRI_CH) == 0x0)
spur_subchannel_sd = 1;
else
spur_subchannel_sd = 0;
spur_freq_sd = ((freq_offset + 10) << 9) / 11;
} else {
if (REG_READ_FIELD(ah, AR_PHY_GEN_CTRL,
AR_PHY_GC_DYN2040_PRI_CH) == 0x0)
spur_subchannel_sd = 0;
else
spur_subchannel_sd = 1;
spur_freq_sd = ((freq_offset - 10) << 9) / 11;
}
spur_delta_phase = (freq_offset << 17) / 5;
} else {
spur_subchannel_sd = 0;
spur_freq_sd = (freq_offset << 9) /11;
spur_delta_phase = (freq_offset << 18) / 5;
}
spur_freq_sd = spur_freq_sd & 0x3ff;
spur_delta_phase = spur_delta_phase & 0xfffff;
ar9003_hw_spur_ofdm(ah,
freq_offset,
spur_freq_sd,
spur_delta_phase,
spur_subchannel_sd);
}
/* Spur mitigation for OFDM */
static void ar9003_hw_spur_mitigate_ofdm(struct ath_hw *ah,
struct ath9k_channel *chan)
{
int synth_freq;
int range = 10;
int freq_offset = 0;
int mode;
u8* spurChansPtr;
unsigned int i;
struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
if (IS_CHAN_5GHZ(chan)) {
spurChansPtr = &(eep->modalHeader5G.spurChans[0]);
mode = 0;
}
else {
spurChansPtr = &(eep->modalHeader2G.spurChans[0]);
mode = 1;
}
if (spurChansPtr[0] == 0)
return; /* No spur in the mode */
if (IS_CHAN_HT40(chan)) {
range = 19;
if (REG_READ_FIELD(ah, AR_PHY_GEN_CTRL,
AR_PHY_GC_DYN2040_PRI_CH) == 0x0)
synth_freq = chan->channel - 10;
else
synth_freq = chan->channel + 10;
} else {
range = 10;
synth_freq = chan->channel;
}
ar9003_hw_spur_ofdm_clear(ah);
for (i = 0; i < AR_EEPROM_MODAL_SPURS && spurChansPtr[i]; i++) {
freq_offset = FBIN2FREQ(spurChansPtr[i], mode) - synth_freq;
if (abs(freq_offset) < range) {
ar9003_hw_spur_ofdm_work(ah, chan, freq_offset);
break;
}
}
}
static void ar9003_hw_spur_mitigate(struct ath_hw *ah,
struct ath9k_channel *chan)
{
ar9003_hw_spur_mitigate_mrc_cck(ah, chan);
ar9003_hw_spur_mitigate_ofdm(ah, chan);
}
static u32 ar9003_hw_compute_pll_control(struct ath_hw *ah,
struct ath9k_channel *chan)
{
u32 pll;
pll = SM(0x5, AR_RTC_9300_PLL_REFDIV);
if (chan && IS_CHAN_HALF_RATE(chan))
pll |= SM(0x1, AR_RTC_9300_PLL_CLKSEL);
else if (chan && IS_CHAN_QUARTER_RATE(chan))
pll |= SM(0x2, AR_RTC_9300_PLL_CLKSEL);
pll |= SM(0x2c, AR_RTC_9300_PLL_DIV);
return pll;
}
static void ar9003_hw_set_channel_regs(struct ath_hw *ah,
struct ath9k_channel *chan)
{
u32 phymode;
u32 enableDacFifo = 0;
enableDacFifo =
(REG_READ(ah, AR_PHY_GEN_CTRL) & AR_PHY_GC_ENABLE_DAC_FIFO);
/* Enable 11n HT, 20 MHz */
phymode = AR_PHY_GC_HT_EN | AR_PHY_GC_SINGLE_HT_LTF1 |
AR_PHY_GC_SHORT_GI_40 | enableDacFifo;
/* Configure baseband for dynamic 20/40 operation */
if (IS_CHAN_HT40(chan)) {
phymode |= AR_PHY_GC_DYN2040_EN;
/* Configure control (primary) channel at +-10MHz */
if ((chan->chanmode == CHANNEL_A_HT40PLUS) ||
(chan->chanmode == CHANNEL_G_HT40PLUS))
phymode |= AR_PHY_GC_DYN2040_PRI_CH;
}
/* make sure we preserve INI settings */
phymode |= REG_READ(ah, AR_PHY_GEN_CTRL);
/* turn off Green Field detection for STA for now */
phymode &= ~AR_PHY_GC_GF_DETECT_EN;
REG_WRITE(ah, AR_PHY_GEN_CTRL, phymode);
/* Configure MAC for 20/40 operation */
ath9k_hw_set11nmac2040(ah);
/* global transmit timeout (25 TUs default)*/
REG_WRITE(ah, AR_GTXTO, 25 << AR_GTXTO_TIMEOUT_LIMIT_S);
/* carrier sense timeout */
REG_WRITE(ah, AR_CST, 0xF << AR_CST_TIMEOUT_LIMIT_S);
}
static void ar9003_hw_init_bb(struct ath_hw *ah,
struct ath9k_channel *chan)
{
u32 synthDelay;
/*
* Wait for the frequency synth to settle (synth goes on
* via AR_PHY_ACTIVE_EN). Read the phy active delay register.
* Value is in 100ns increments.
*/
synthDelay = REG_READ(ah, AR_PHY_RX_DELAY) & AR_PHY_RX_DELAY_DELAY;
if (IS_CHAN_B(chan))
synthDelay = (4 * synthDelay) / 22;
else
synthDelay /= 10;
/* Activate the PHY (includes baseband activate + synthesizer on) */
REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN);
/*
* There is an issue if the AP starts the calibration before
* the base band timeout completes. This could result in the
* rx_clear false triggering. As a workaround we add delay an
* extra BASE_ACTIVATE_DELAY usecs to ensure this condition
* does not happen.
*/
udelay(synthDelay + BASE_ACTIVATE_DELAY);
}
static void ar9003_hw_set_chain_masks(struct ath_hw *ah, u8 rx, u8 tx)
{
switch (rx) {
case 0x5:
REG_SET_BIT(ah, AR_PHY_ANALOG_SWAP,
AR_PHY_SWAP_ALT_CHAIN);
case 0x3:
case 0x1:
case 0x2:
case 0x7:
REG_WRITE(ah, AR_PHY_RX_CHAINMASK, rx);
REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, rx);
break;
default:
break;
}
if ((ah->caps.hw_caps & ATH9K_HW_CAP_APM) && (tx == 0x7))
REG_WRITE(ah, AR_SELFGEN_MASK, 0x3);
else if (AR_SREV_9462(ah))
/* xxx only when MCI support is enabled */
REG_WRITE(ah, AR_SELFGEN_MASK, 0x3);
else
REG_WRITE(ah, AR_SELFGEN_MASK, tx);
if (tx == 0x5) {
REG_SET_BIT(ah, AR_PHY_ANALOG_SWAP,
AR_PHY_SWAP_ALT_CHAIN);
}
}
/*
* Override INI values with chip specific configuration.
*/
static void ar9003_hw_override_ini(struct ath_hw *ah)
{
u32 val;
/*
* Set the RX_ABORT and RX_DIS and clear it only after
* RXE is set for MAC. This prevents frames with
* corrupted descriptor status.
*/
REG_SET_BIT(ah, AR_DIAG_SW, (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
/*
* For AR9280 and above, there is a new feature that allows
* Multicast search based on both MAC Address and Key ID. By default,
* this feature is enabled. But since the driver is not using this
* feature, we switch it off; otherwise multicast search based on
* MAC addr only will fail.
*/
val = REG_READ(ah, AR_PCU_MISC_MODE2) & (~AR_ADHOC_MCAST_KEYID_ENABLE);
REG_WRITE(ah, AR_PCU_MISC_MODE2,
val | AR_AGG_WEP_ENABLE_FIX | AR_AGG_WEP_ENABLE);
REG_SET_BIT(ah, AR_PHY_CCK_DETECT,
AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV);
}
static void ar9003_hw_prog_ini(struct ath_hw *ah,
struct ar5416IniArray *iniArr,
int column)
{
unsigned int i, regWrites = 0;
/* New INI format: Array may be undefined (pre, core, post arrays) */
if (!iniArr->ia_array)
return;
/*
* New INI format: Pre, core, and post arrays for a given subsystem
* may be modal (> 2 columns) or non-modal (2 columns). Determine if
* the array is non-modal and force the column to 1.
*/
if (column >= iniArr->ia_columns)
column = 1;
for (i = 0; i < iniArr->ia_rows; i++) {
u32 reg = INI_RA(iniArr, i, 0);
u32 val = INI_RA(iniArr, i, column);
REG_WRITE(ah, reg, val);
DO_DELAY(regWrites);
}
}
static int ar9003_hw_process_ini(struct ath_hw *ah,
struct ath9k_channel *chan)
{
unsigned int regWrites = 0, i;
u32 modesIndex;
switch (chan->chanmode) {
case CHANNEL_A:
case CHANNEL_A_HT20:
modesIndex = 1;
break;
case CHANNEL_A_HT40PLUS:
case CHANNEL_A_HT40MINUS:
modesIndex = 2;
break;
case CHANNEL_G:
case CHANNEL_G_HT20:
case CHANNEL_B:
modesIndex = 4;
break;
case CHANNEL_G_HT40PLUS:
case CHANNEL_G_HT40MINUS:
modesIndex = 3;
break;
default:
return -EINVAL;
}
for (i = 0; i < ATH_INI_NUM_SPLIT; i++) {
ar9003_hw_prog_ini(ah, &ah->iniSOC[i], modesIndex);
ar9003_hw_prog_ini(ah, &ah->iniMac[i], modesIndex);
ar9003_hw_prog_ini(ah, &ah->iniBB[i], modesIndex);
ar9003_hw_prog_ini(ah, &ah->iniRadio[i], modesIndex);
if (i == ATH_INI_POST && AR_SREV_9462_20(ah))
ar9003_hw_prog_ini(ah,
&ah->ini_radio_post_sys2ant,
modesIndex);
}
REG_WRITE_ARRAY(&ah->iniModesRxGain, 1, regWrites);
REG_WRITE_ARRAY(&ah->iniModesTxGain, modesIndex, regWrites);
/*
* For 5GHz channels requiring Fast Clock, apply
* different modal values.
*/
if (IS_CHAN_A_FAST_CLOCK(ah, chan))
REG_WRITE_ARRAY(&ah->iniModesFastClock,
modesIndex, regWrites);
REG_WRITE_ARRAY(&ah->iniAdditional, 1, regWrites);
if (AR_SREV_9462(ah))
ar9003_hw_prog_ini(ah, &ah->ini_BTCOEX_MAX_TXPWR, 1);
if (chan->channel == 2484)
ar9003_hw_prog_ini(ah, &ah->ini_japan2484, 1);
ah->modes_index = modesIndex;
ar9003_hw_override_ini(ah);
ar9003_hw_set_channel_regs(ah, chan);
ar9003_hw_set_chain_masks(ah, ah->rxchainmask, ah->txchainmask);
ath9k_hw_apply_txpower(ah, chan, false);
if (AR_SREV_9462(ah)) {
if (REG_READ_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_0,
AR_PHY_TX_IQCAL_CONTROL_0_ENABLE_TXIQ_CAL))
ah->enabled_cals |= TX_IQ_CAL;
else
ah->enabled_cals &= ~TX_IQ_CAL;
if (REG_READ(ah, AR_PHY_CL_CAL_CTL) & AR_PHY_CL_CAL_ENABLE)
ah->enabled_cals |= TX_CL_CAL;
else
ah->enabled_cals &= ~TX_CL_CAL;
}
return 0;
}
static void ar9003_hw_set_rfmode(struct ath_hw *ah,
struct ath9k_channel *chan)
{
u32 rfMode = 0;
if (chan == NULL)
return;
rfMode |= (IS_CHAN_B(chan) || IS_CHAN_G(chan))
? AR_PHY_MODE_DYNAMIC : AR_PHY_MODE_OFDM;
if (IS_CHAN_A_FAST_CLOCK(ah, chan))
rfMode |= (AR_PHY_MODE_DYNAMIC | AR_PHY_MODE_DYN_CCK_DISABLE);
REG_WRITE(ah, AR_PHY_MODE, rfMode);
}
static void ar9003_hw_mark_phy_inactive(struct ath_hw *ah)
{
REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_DIS);
}
static void ar9003_hw_set_delta_slope(struct ath_hw *ah,
struct ath9k_channel *chan)
{
u32 coef_scaled, ds_coef_exp, ds_coef_man;
u32 clockMhzScaled = 0x64000000;
struct chan_centers centers;
/*
* half and quarter rate can divide the scaled clock by 2 or 4
* scale for selected channel bandwidth
*/
if (IS_CHAN_HALF_RATE(chan))
clockMhzScaled = clockMhzScaled >> 1;
else if (IS_CHAN_QUARTER_RATE(chan))
clockMhzScaled = clockMhzScaled >> 2;
/*
* ALGO -> coef = 1e8/fcarrier*fclock/40;
* scaled coef to provide precision for this floating calculation
*/
ath9k_hw_get_channel_centers(ah, chan, ¢ers);
coef_scaled = clockMhzScaled / centers.synth_center;
ath9k_hw_get_delta_slope_vals(ah, coef_scaled, &ds_coef_man,
&ds_coef_exp);
REG_RMW_FIELD(ah, AR_PHY_TIMING3,
AR_PHY_TIMING3_DSC_MAN, ds_coef_man);
REG_RMW_FIELD(ah, AR_PHY_TIMING3,
AR_PHY_TIMING3_DSC_EXP, ds_coef_exp);
/*
* For Short GI,
* scaled coeff is 9/10 that of normal coeff
*/
coef_scaled = (9 * coef_scaled) / 10;
ath9k_hw_get_delta_slope_vals(ah, coef_scaled, &ds_coef_man,
&ds_coef_exp);
/* for short gi */
REG_RMW_FIELD(ah, AR_PHY_SGI_DELTA,
AR_PHY_SGI_DSC_MAN, ds_coef_man);
REG_RMW_FIELD(ah, AR_PHY_SGI_DELTA,
AR_PHY_SGI_DSC_EXP, ds_coef_exp);
}
static bool ar9003_hw_rfbus_req(struct ath_hw *ah)
{
REG_WRITE(ah, AR_PHY_RFBUS_REQ, AR_PHY_RFBUS_REQ_EN);
return ath9k_hw_wait(ah, AR_PHY_RFBUS_GRANT, AR_PHY_RFBUS_GRANT_EN,
AR_PHY_RFBUS_GRANT_EN, AH_WAIT_TIMEOUT);
}
/*
* Wait for the frequency synth to settle (synth goes on via PHY_ACTIVE_EN).
* Read the phy active delay register. Value is in 100ns increments.
*/
static void ar9003_hw_rfbus_done(struct ath_hw *ah)
{
u32 synthDelay = REG_READ(ah, AR_PHY_RX_DELAY) & AR_PHY_RX_DELAY_DELAY;
if (IS_CHAN_B(ah->curchan))
synthDelay = (4 * synthDelay) / 22;
else
synthDelay /= 10;
udelay(synthDelay + BASE_ACTIVATE_DELAY);
REG_WRITE(ah, AR_PHY_RFBUS_REQ, 0);
}
static bool ar9003_hw_ani_control(struct ath_hw *ah,
enum ath9k_ani_cmd cmd, int param)
{
struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_channel *chan = ah->curchan;
struct ar5416AniState *aniState = &chan->ani;
s32 value, value2;
switch (cmd & ah->ani_function) {
case ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION:{
/*
* on == 1 means ofdm weak signal detection is ON
* on == 1 is the default, for less noise immunity
*
* on == 0 means ofdm weak signal detection is OFF
* on == 0 means more noise imm
*/
u32 on = param ? 1 : 0;
/*
* make register setting for default
* (weak sig detect ON) come from INI file
*/
int m1ThreshLow = on ?
aniState->iniDef.m1ThreshLow : m1ThreshLow_off;
int m2ThreshLow = on ?
aniState->iniDef.m2ThreshLow : m2ThreshLow_off;
int m1Thresh = on ?
aniState->iniDef.m1Thresh : m1Thresh_off;
int m2Thresh = on ?
aniState->iniDef.m2Thresh : m2Thresh_off;
int m2CountThr = on ?
aniState->iniDef.m2CountThr : m2CountThr_off;
int m2CountThrLow = on ?
aniState->iniDef.m2CountThrLow : m2CountThrLow_off;
int m1ThreshLowExt = on ?
aniState->iniDef.m1ThreshLowExt : m1ThreshLowExt_off;
int m2ThreshLowExt = on ?
aniState->iniDef.m2ThreshLowExt : m2ThreshLowExt_off;
int m1ThreshExt = on ?
aniState->iniDef.m1ThreshExt : m1ThreshExt_off;
int m2ThreshExt = on ?
aniState->iniDef.m2ThreshExt : m2ThreshExt_off;
REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
AR_PHY_SFCORR_LOW_M1_THRESH_LOW,
m1ThreshLow);
REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
AR_PHY_SFCORR_LOW_M2_THRESH_LOW,
m2ThreshLow);
REG_RMW_FIELD(ah, AR_PHY_SFCORR,
AR_PHY_SFCORR_M1_THRESH, m1Thresh);
REG_RMW_FIELD(ah, AR_PHY_SFCORR,
AR_PHY_SFCORR_M2_THRESH, m2Thresh);
REG_RMW_FIELD(ah, AR_PHY_SFCORR,
AR_PHY_SFCORR_M2COUNT_THR, m2CountThr);
REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW,
m2CountThrLow);
REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
AR_PHY_SFCORR_EXT_M1_THRESH_LOW, m1ThreshLowExt);
REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
AR_PHY_SFCORR_EXT_M2_THRESH_LOW, m2ThreshLowExt);
REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
AR_PHY_SFCORR_EXT_M1_THRESH, m1ThreshExt);
REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
AR_PHY_SFCORR_EXT_M2_THRESH, m2ThreshExt);
if (on)
REG_SET_BIT(ah, AR_PHY_SFCORR_LOW,
AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);
else
REG_CLR_BIT(ah, AR_PHY_SFCORR_LOW,
AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);
if (!on != aniState->ofdmWeakSigDetectOff) {
ath_dbg(common, ANI,
"** ch %d: ofdm weak signal: %s=>%s\n",
chan->channel,
!aniState->ofdmWeakSigDetectOff ?
"on" : "off",
on ? "on" : "off");
if (on)
ah->stats.ast_ani_ofdmon++;
else
ah->stats.ast_ani_ofdmoff++;
aniState->ofdmWeakSigDetectOff = !on;
}
break;
}
case ATH9K_ANI_FIRSTEP_LEVEL:{
u32 level = param;
if (level >= ARRAY_SIZE(firstep_table)) {
ath_dbg(common, ANI,
"ATH9K_ANI_FIRSTEP_LEVEL: level out of range (%u > %zu)\n",
level, ARRAY_SIZE(firstep_table));
return false;
}
/*
* make register setting relative to default
* from INI file & cap value
*/
value = firstep_table[level] -
firstep_table[ATH9K_ANI_FIRSTEP_LVL_NEW] +
aniState->iniDef.firstep;
if (value < ATH9K_SIG_FIRSTEP_SETTING_MIN)
value = ATH9K_SIG_FIRSTEP_SETTING_MIN;
if (value > ATH9K_SIG_FIRSTEP_SETTING_MAX)
value = ATH9K_SIG_FIRSTEP_SETTING_MAX;
REG_RMW_FIELD(ah, AR_PHY_FIND_SIG,
AR_PHY_FIND_SIG_FIRSTEP,
value);
/*
* we need to set first step low register too
* make register setting relative to default
* from INI file & cap value
*/
value2 = firstep_table[level] -
firstep_table[ATH9K_ANI_FIRSTEP_LVL_NEW] +
aniState->iniDef.firstepLow;
if (value2 < ATH9K_SIG_FIRSTEP_SETTING_MIN)
value2 = ATH9K_SIG_FIRSTEP_SETTING_MIN;
if (value2 > ATH9K_SIG_FIRSTEP_SETTING_MAX)
value2 = ATH9K_SIG_FIRSTEP_SETTING_MAX;
REG_RMW_FIELD(ah, AR_PHY_FIND_SIG_LOW,
AR_PHY_FIND_SIG_LOW_FIRSTEP_LOW, value2);
if (level != aniState->firstepLevel) {
ath_dbg(common, ANI,
"** ch %d: level %d=>%d[def:%d] firstep[level]=%d ini=%d\n",
chan->channel,
aniState->firstepLevel,
level,
ATH9K_ANI_FIRSTEP_LVL_NEW,
value,
aniState->iniDef.firstep);
ath_dbg(common, ANI,
"** ch %d: level %d=>%d[def:%d] firstep_low[level]=%d ini=%d\n",
chan->channel,
aniState->firstepLevel,
level,
ATH9K_ANI_FIRSTEP_LVL_NEW,
value2,
aniState->iniDef.firstepLow);
if (level > aniState->firstepLevel)
ah->stats.ast_ani_stepup++;
else if (level < aniState->firstepLevel)
ah->stats.ast_ani_stepdown++;
aniState->firstepLevel = level;
}
break;
}
case ATH9K_ANI_SPUR_IMMUNITY_LEVEL:{
u32 level = param;
if (level >= ARRAY_SIZE(cycpwrThr1_table)) {
ath_dbg(common, ANI,
"ATH9K_ANI_SPUR_IMMUNITY_LEVEL: level out of range (%u > %zu)\n",
level, ARRAY_SIZE(cycpwrThr1_table));
return false;
}
/*
* make register setting relative to default
* from INI file & cap value
*/
value = cycpwrThr1_table[level] -
cycpwrThr1_table[ATH9K_ANI_SPUR_IMMUNE_LVL_NEW] +
aniState->iniDef.cycpwrThr1;
if (value < ATH9K_SIG_SPUR_IMM_SETTING_MIN)
value = ATH9K_SIG_SPUR_IMM_SETTING_MIN;
if (value > ATH9K_SIG_SPUR_IMM_SETTING_MAX)
value = ATH9K_SIG_SPUR_IMM_SETTING_MAX;
REG_RMW_FIELD(ah, AR_PHY_TIMING5,
AR_PHY_TIMING5_CYCPWR_THR1,
value);
/*
* set AR_PHY_EXT_CCA for extension channel
* make register setting relative to default
* from INI file & cap value
*/
value2 = cycpwrThr1_table[level] -
cycpwrThr1_table[ATH9K_ANI_SPUR_IMMUNE_LVL_NEW] +
aniState->iniDef.cycpwrThr1Ext;
if (value2 < ATH9K_SIG_SPUR_IMM_SETTING_MIN)
value2 = ATH9K_SIG_SPUR_IMM_SETTING_MIN;
if (value2 > ATH9K_SIG_SPUR_IMM_SETTING_MAX)
value2 = ATH9K_SIG_SPUR_IMM_SETTING_MAX;
REG_RMW_FIELD(ah, AR_PHY_EXT_CCA,
AR_PHY_EXT_CYCPWR_THR1, value2);
if (level != aniState->spurImmunityLevel) {
ath_dbg(common, ANI,
"** ch %d: level %d=>%d[def:%d] cycpwrThr1[level]=%d ini=%d\n",
chan->channel,
aniState->spurImmunityLevel,
level,
ATH9K_ANI_SPUR_IMMUNE_LVL_NEW,
value,
aniState->iniDef.cycpwrThr1);
ath_dbg(common, ANI,
"** ch %d: level %d=>%d[def:%d] cycpwrThr1Ext[level]=%d ini=%d\n",
chan->channel,
aniState->spurImmunityLevel,
level,
ATH9K_ANI_SPUR_IMMUNE_LVL_NEW,
value2,
aniState->iniDef.cycpwrThr1Ext);
if (level > aniState->spurImmunityLevel)
ah->stats.ast_ani_spurup++;
else if (level < aniState->spurImmunityLevel)
ah->stats.ast_ani_spurdown++;
aniState->spurImmunityLevel = level;
}
break;
}
case ATH9K_ANI_MRC_CCK:{
/*
* is_on == 1 means MRC CCK ON (default, less noise imm)
* is_on == 0 means MRC CCK is OFF (more noise imm)
*/
bool is_on = param ? 1 : 0;
REG_RMW_FIELD(ah, AR_PHY_MRC_CCK_CTRL,
AR_PHY_MRC_CCK_ENABLE, is_on);
REG_RMW_FIELD(ah, AR_PHY_MRC_CCK_CTRL,
AR_PHY_MRC_CCK_MUX_REG, is_on);
if (!is_on != aniState->mrcCCKOff) {
ath_dbg(common, ANI, "** ch %d: MRC CCK: %s=>%s\n",
chan->channel,
!aniState->mrcCCKOff ? "on" : "off",
is_on ? "on" : "off");
if (is_on)
ah->stats.ast_ani_ccklow++;
else
ah->stats.ast_ani_cckhigh++;
aniState->mrcCCKOff = !is_on;
}
break;
}
case ATH9K_ANI_PRESENT:
break;
default:
ath_dbg(common, ANI, "invalid cmd %u\n", cmd);
return false;
}
ath_dbg(common, ANI,
"ANI parameters: SI=%d, ofdmWS=%s FS=%d MRCcck=%s listenTime=%d ofdmErrs=%d cckErrs=%d\n",
aniState->spurImmunityLevel,
!aniState->ofdmWeakSigDetectOff ? "on" : "off",
aniState->firstepLevel,
!aniState->mrcCCKOff ? "on" : "off",
aniState->listenTime,
aniState->ofdmPhyErrCount,
aniState->cckPhyErrCount);
return true;
}
static void ar9003_hw_do_getnf(struct ath_hw *ah,
int16_t nfarray[NUM_NF_READINGS])
{
#define AR_PHY_CH_MINCCA_PWR 0x1FF00000
#define AR_PHY_CH_MINCCA_PWR_S 20
#define AR_PHY_CH_EXT_MINCCA_PWR 0x01FF0000
#define AR_PHY_CH_EXT_MINCCA_PWR_S 16
int16_t nf;
int i;
for (i = 0; i < AR9300_MAX_CHAINS; i++) {
if (ah->rxchainmask & BIT(i)) {
nf = MS(REG_READ(ah, ah->nf_regs[i]),
AR_PHY_CH_MINCCA_PWR);
nfarray[i] = sign_extend32(nf, 8);
if (IS_CHAN_HT40(ah->curchan)) {
u8 ext_idx = AR9300_MAX_CHAINS + i;
nf = MS(REG_READ(ah, ah->nf_regs[ext_idx]),
AR_PHY_CH_EXT_MINCCA_PWR);
nfarray[ext_idx] = sign_extend32(nf, 8);
}
}
}
}
static void ar9003_hw_set_nf_limits(struct ath_hw *ah)
{
ah->nf_2g.max = AR_PHY_CCA_MAX_GOOD_VAL_9300_2GHZ;
ah->nf_2g.min = AR_PHY_CCA_MIN_GOOD_VAL_9300_2GHZ;
ah->nf_2g.nominal = AR_PHY_CCA_NOM_VAL_9300_2GHZ;
ah->nf_5g.max = AR_PHY_CCA_MAX_GOOD_VAL_9300_5GHZ;
ah->nf_5g.min = AR_PHY_CCA_MIN_GOOD_VAL_9300_5GHZ;
ah->nf_5g.nominal = AR_PHY_CCA_NOM_VAL_9300_5GHZ;
if (AR_SREV_9330(ah))
ah->nf_2g.nominal = AR_PHY_CCA_NOM_VAL_9330_2GHZ;
if (AR_SREV_9462(ah)) {
ah->nf_2g.min = AR_PHY_CCA_MIN_GOOD_VAL_9462_2GHZ;
ah->nf_2g.nominal = AR_PHY_CCA_NOM_VAL_9462_2GHZ;
ah->nf_5g.min = AR_PHY_CCA_MIN_GOOD_VAL_9462_5GHZ;
ah->nf_5g.nominal = AR_PHY_CCA_NOM_VAL_9462_5GHZ;
}
}
/*
* Initialize the ANI register values with default (ini) values.
* This routine is called during a (full) hardware reset after
* all the registers are initialised from the INI.
*/
static void ar9003_hw_ani_cache_ini_regs(struct ath_hw *ah)
{
struct ar5416AniState *aniState;
struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_channel *chan = ah->curchan;
struct ath9k_ani_default *iniDef;
u32 val;
aniState = &ah->curchan->ani;
iniDef = &aniState->iniDef;
ath_dbg(common, ANI, "ver %d.%d opmode %u chan %d Mhz/0x%x\n",
ah->hw_version.macVersion,
ah->hw_version.macRev,
ah->opmode,
chan->channel,
chan->channelFlags);
val = REG_READ(ah, AR_PHY_SFCORR);
iniDef->m1Thresh = MS(val, AR_PHY_SFCORR_M1_THRESH);
iniDef->m2Thresh = MS(val, AR_PHY_SFCORR_M2_THRESH);
iniDef->m2CountThr = MS(val, AR_PHY_SFCORR_M2COUNT_THR);
val = REG_READ(ah, AR_PHY_SFCORR_LOW);
iniDef->m1ThreshLow = MS(val, AR_PHY_SFCORR_LOW_M1_THRESH_LOW);
iniDef->m2ThreshLow = MS(val, AR_PHY_SFCORR_LOW_M2_THRESH_LOW);
iniDef->m2CountThrLow = MS(val, AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW);
val = REG_READ(ah, AR_PHY_SFCORR_EXT);
iniDef->m1ThreshExt = MS(val, AR_PHY_SFCORR_EXT_M1_THRESH);
iniDef->m2ThreshExt = MS(val, AR_PHY_SFCORR_EXT_M2_THRESH);
iniDef->m1ThreshLowExt = MS(val, AR_PHY_SFCORR_EXT_M1_THRESH_LOW);
iniDef->m2ThreshLowExt = MS(val, AR_PHY_SFCORR_EXT_M2_THRESH_LOW);
iniDef->firstep = REG_READ_FIELD(ah,
AR_PHY_FIND_SIG,
AR_PHY_FIND_SIG_FIRSTEP);
iniDef->firstepLow = REG_READ_FIELD(ah,
AR_PHY_FIND_SIG_LOW,
AR_PHY_FIND_SIG_LOW_FIRSTEP_LOW);
iniDef->cycpwrThr1 = REG_READ_FIELD(ah,
AR_PHY_TIMING5,
AR_PHY_TIMING5_CYCPWR_THR1);
iniDef->cycpwrThr1Ext = REG_READ_FIELD(ah,
AR_PHY_EXT_CCA,
AR_PHY_EXT_CYCPWR_THR1);
/* these levels just got reset to defaults by the INI */
aniState->spurImmunityLevel = ATH9K_ANI_SPUR_IMMUNE_LVL_NEW;
aniState->firstepLevel = ATH9K_ANI_FIRSTEP_LVL_NEW;
aniState->ofdmWeakSigDetectOff = !ATH9K_ANI_USE_OFDM_WEAK_SIG;
aniState->mrcCCKOff = !ATH9K_ANI_ENABLE_MRC_CCK;
}
static void ar9003_hw_set_radar_params(struct ath_hw *ah,
struct ath_hw_radar_conf *conf)
{
u32 radar_0 = 0, radar_1 = 0;
if (!conf) {
REG_CLR_BIT(ah, AR_PHY_RADAR_0, AR_PHY_RADAR_0_ENA);
return;
}
radar_0 |= AR_PHY_RADAR_0_ENA | AR_PHY_RADAR_0_FFT_ENA;
radar_0 |= SM(conf->fir_power, AR_PHY_RADAR_0_FIRPWR);
radar_0 |= SM(conf->radar_rssi, AR_PHY_RADAR_0_RRSSI);
radar_0 |= SM(conf->pulse_height, AR_PHY_RADAR_0_HEIGHT);
radar_0 |= SM(conf->pulse_rssi, AR_PHY_RADAR_0_PRSSI);
radar_0 |= SM(conf->pulse_inband, AR_PHY_RADAR_0_INBAND);
radar_1 |= AR_PHY_RADAR_1_MAX_RRSSI;
radar_1 |= AR_PHY_RADAR_1_BLOCK_CHECK;
radar_1 |= SM(conf->pulse_maxlen, AR_PHY_RADAR_1_MAXLEN);
radar_1 |= SM(conf->pulse_inband_step, AR_PHY_RADAR_1_RELSTEP_THRESH);
radar_1 |= SM(conf->radar_inband, AR_PHY_RADAR_1_RELPWR_THRESH);
REG_WRITE(ah, AR_PHY_RADAR_0, radar_0);
REG_WRITE(ah, AR_PHY_RADAR_1, radar_1);
if (conf->ext_channel)
REG_SET_BIT(ah, AR_PHY_RADAR_EXT, AR_PHY_RADAR_EXT_ENA);
else
REG_CLR_BIT(ah, AR_PHY_RADAR_EXT, AR_PHY_RADAR_EXT_ENA);
}
static void ar9003_hw_set_radar_conf(struct ath_hw *ah)
{
struct ath_hw_radar_conf *conf = &ah->radar_conf;
conf->fir_power = -28;
conf->radar_rssi = 0;
conf->pulse_height = 10;
conf->pulse_rssi = 24;
conf->pulse_inband = 8;
conf->pulse_maxlen = 255;
conf->pulse_inband_step = 12;
conf->radar_inband = 8;
}
static void ar9003_hw_antdiv_comb_conf_get(struct ath_hw *ah,
struct ath_hw_antcomb_conf *antconf)
{
u32 regval;
regval = REG_READ(ah, AR_PHY_MC_GAIN_CTRL);
antconf->main_lna_conf = (regval & AR_PHY_9485_ANT_DIV_MAIN_LNACONF) >>
AR_PHY_9485_ANT_DIV_MAIN_LNACONF_S;
antconf->alt_lna_conf = (regval & AR_PHY_9485_ANT_DIV_ALT_LNACONF) >>
AR_PHY_9485_ANT_DIV_ALT_LNACONF_S;
antconf->fast_div_bias = (regval & AR_PHY_9485_ANT_FAST_DIV_BIAS) >>
AR_PHY_9485_ANT_FAST_DIV_BIAS_S;
if (AR_SREV_9330_11(ah)) {
antconf->lna1_lna2_delta = -9;
antconf->div_group = 1;
} else if (AR_SREV_9485(ah)) {
antconf->lna1_lna2_delta = -9;
antconf->div_group = 2;
} else {
antconf->lna1_lna2_delta = -3;
antconf->div_group = 0;
}
}
static void ar9003_hw_antdiv_comb_conf_set(struct ath_hw *ah,
struct ath_hw_antcomb_conf *antconf)
{
u32 regval;
regval = REG_READ(ah, AR_PHY_MC_GAIN_CTRL);
regval &= ~(AR_PHY_9485_ANT_DIV_MAIN_LNACONF |
AR_PHY_9485_ANT_DIV_ALT_LNACONF |
AR_PHY_9485_ANT_FAST_DIV_BIAS |
AR_PHY_9485_ANT_DIV_MAIN_GAINTB |
AR_PHY_9485_ANT_DIV_ALT_GAINTB);
regval |= ((antconf->main_lna_conf <<
AR_PHY_9485_ANT_DIV_MAIN_LNACONF_S)
& AR_PHY_9485_ANT_DIV_MAIN_LNACONF);
regval |= ((antconf->alt_lna_conf << AR_PHY_9485_ANT_DIV_ALT_LNACONF_S)
& AR_PHY_9485_ANT_DIV_ALT_LNACONF);
regval |= ((antconf->fast_div_bias << AR_PHY_9485_ANT_FAST_DIV_BIAS_S)
& AR_PHY_9485_ANT_FAST_DIV_BIAS);
regval |= ((antconf->main_gaintb << AR_PHY_9485_ANT_DIV_MAIN_GAINTB_S)
& AR_PHY_9485_ANT_DIV_MAIN_GAINTB);
regval |= ((antconf->alt_gaintb << AR_PHY_9485_ANT_DIV_ALT_GAINTB_S)
& AR_PHY_9485_ANT_DIV_ALT_GAINTB);
REG_WRITE(ah, AR_PHY_MC_GAIN_CTRL, regval);
}
static int ar9003_hw_fast_chan_change(struct ath_hw *ah,
struct ath9k_channel *chan,
u8 *ini_reloaded)
{
unsigned int regWrites = 0;
u32 modesIndex;
switch (chan->chanmode) {
case CHANNEL_A:
case CHANNEL_A_HT20:
modesIndex = 1;
break;
case CHANNEL_A_HT40PLUS:
case CHANNEL_A_HT40MINUS:
modesIndex = 2;
break;
case CHANNEL_G:
case CHANNEL_G_HT20:
case CHANNEL_B:
modesIndex = 4;
break;
case CHANNEL_G_HT40PLUS:
case CHANNEL_G_HT40MINUS:
modesIndex = 3;
break;
default:
return -EINVAL;
}
if (modesIndex == ah->modes_index) {
*ini_reloaded = false;
goto set_rfmode;
}
ar9003_hw_prog_ini(ah, &ah->iniSOC[ATH_INI_POST], modesIndex);
ar9003_hw_prog_ini(ah, &ah->iniMac[ATH_INI_POST], modesIndex);
ar9003_hw_prog_ini(ah, &ah->iniBB[ATH_INI_POST], modesIndex);
ar9003_hw_prog_ini(ah, &ah->iniRadio[ATH_INI_POST], modesIndex);
if (AR_SREV_9462_20(ah))
ar9003_hw_prog_ini(ah,
&ah->ini_radio_post_sys2ant,
modesIndex);
REG_WRITE_ARRAY(&ah->iniModesTxGain, modesIndex, regWrites);
/*
* For 5GHz channels requiring Fast Clock, apply
* different modal values.
*/
if (IS_CHAN_A_FAST_CLOCK(ah, chan))
REG_WRITE_ARRAY(&ah->iniModesFastClock, modesIndex, regWrites);
REG_WRITE_ARRAY(&ah->iniAdditional, 1, regWrites);
ah->modes_index = modesIndex;
*ini_reloaded = true;
set_rfmode:
ar9003_hw_set_rfmode(ah, chan);
return 0;
}
void ar9003_hw_attach_phy_ops(struct ath_hw *ah)
{
struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
struct ath_hw_ops *ops = ath9k_hw_ops(ah);
static const u32 ar9300_cca_regs[6] = {
AR_PHY_CCA_0,
AR_PHY_CCA_1,
AR_PHY_CCA_2,
AR_PHY_EXT_CCA,
AR_PHY_EXT_CCA_1,
AR_PHY_EXT_CCA_2,
};
priv_ops->rf_set_freq = ar9003_hw_set_channel;
priv_ops->spur_mitigate_freq = ar9003_hw_spur_mitigate;
priv_ops->compute_pll_control = ar9003_hw_compute_pll_control;
priv_ops->set_channel_regs = ar9003_hw_set_channel_regs;
priv_ops->init_bb = ar9003_hw_init_bb;
priv_ops->process_ini = ar9003_hw_process_ini;
priv_ops->set_rfmode = ar9003_hw_set_rfmode;
priv_ops->mark_phy_inactive = ar9003_hw_mark_phy_inactive;
priv_ops->set_delta_slope = ar9003_hw_set_delta_slope;
priv_ops->rfbus_req = ar9003_hw_rfbus_req;
priv_ops->rfbus_done = ar9003_hw_rfbus_done;
priv_ops->ani_control = ar9003_hw_ani_control;
priv_ops->do_getnf = ar9003_hw_do_getnf;
priv_ops->ani_cache_ini_regs = ar9003_hw_ani_cache_ini_regs;
priv_ops->set_radar_params = ar9003_hw_set_radar_params;
priv_ops->fast_chan_change = ar9003_hw_fast_chan_change;
ops->antdiv_comb_conf_get = ar9003_hw_antdiv_comb_conf_get;
ops->antdiv_comb_conf_set = ar9003_hw_antdiv_comb_conf_set;
ar9003_hw_set_nf_limits(ah);
ar9003_hw_set_radar_conf(ah);
memcpy(ah->nf_regs, ar9300_cca_regs, sizeof(ah->nf_regs));
}
void ar9003_hw_bb_watchdog_config(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
u32 idle_tmo_ms = ah->bb_watchdog_timeout_ms;
u32 val, idle_count;
if (!idle_tmo_ms) {
/* disable IRQ, disable chip-reset for BB panic */
REG_WRITE(ah, AR_PHY_WATCHDOG_CTL_2,
REG_READ(ah, AR_PHY_WATCHDOG_CTL_2) &
~(AR_PHY_WATCHDOG_RST_ENABLE |
AR_PHY_WATCHDOG_IRQ_ENABLE));
/* disable watchdog in non-IDLE mode, disable in IDLE mode */
REG_WRITE(ah, AR_PHY_WATCHDOG_CTL_1,
REG_READ(ah, AR_PHY_WATCHDOG_CTL_1) &
~(AR_PHY_WATCHDOG_NON_IDLE_ENABLE |
AR_PHY_WATCHDOG_IDLE_ENABLE));
ath_dbg(common, RESET, "Disabled BB Watchdog\n");
return;
}
/* enable IRQ, disable chip-reset for BB watchdog */
val = REG_READ(ah, AR_PHY_WATCHDOG_CTL_2) & AR_PHY_WATCHDOG_CNTL2_MASK;
REG_WRITE(ah, AR_PHY_WATCHDOG_CTL_2,
(val | AR_PHY_WATCHDOG_IRQ_ENABLE) &
~AR_PHY_WATCHDOG_RST_ENABLE);
/* bound limit to 10 secs */
if (idle_tmo_ms > 10000)
idle_tmo_ms = 10000;
/*
* The time unit for watchdog event is 2^15 44/88MHz cycles.
*
* For HT20 we have a time unit of 2^15/44 MHz = .74 ms per tick
* For HT40 we have a time unit of 2^15/88 MHz = .37 ms per tick
*
* Given we use fast clock now in 5 GHz, these time units should
* be common for both 2 GHz and 5 GHz.
*/
idle_count = (100 * idle_tmo_ms) / 74;
if (ah->curchan && IS_CHAN_HT40(ah->curchan))
idle_count = (100 * idle_tmo_ms) / 37;
/*
* enable watchdog in non-IDLE mode, disable in IDLE mode,
* set idle time-out.
*/
REG_WRITE(ah, AR_PHY_WATCHDOG_CTL_1,
AR_PHY_WATCHDOG_NON_IDLE_ENABLE |
AR_PHY_WATCHDOG_IDLE_MASK |
(AR_PHY_WATCHDOG_NON_IDLE_MASK & (idle_count << 2)));
ath_dbg(common, RESET, "Enabled BB Watchdog timeout (%u ms)\n",
idle_tmo_ms);
}
void ar9003_hw_bb_watchdog_read(struct ath_hw *ah)
{
/*
* we want to avoid printing in ISR context so we save the
* watchdog status to be printed later in bottom half context.
*/
ah->bb_watchdog_last_status = REG_READ(ah, AR_PHY_WATCHDOG_STATUS);
/*
* the watchdog timer should reset on status read but to be sure
* sure we write 0 to the watchdog status bit.
*/
REG_WRITE(ah, AR_PHY_WATCHDOG_STATUS,
ah->bb_watchdog_last_status & ~AR_PHY_WATCHDOG_STATUS_CLR);
}
void ar9003_hw_bb_watchdog_dbg_info(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
u32 status;
if (likely(!(common->debug_mask & ATH_DBG_RESET)))
return;
status = ah->bb_watchdog_last_status;
ath_dbg(common, RESET,
"\n==== BB update: BB status=0x%08x ====\n", status);
ath_dbg(common, RESET,
"** BB state: wd=%u det=%u rdar=%u rOFDM=%d rCCK=%u tOFDM=%u tCCK=%u agc=%u src=%u **\n",
MS(status, AR_PHY_WATCHDOG_INFO),
MS(status, AR_PHY_WATCHDOG_DET_HANG),
MS(status, AR_PHY_WATCHDOG_RADAR_SM),
MS(status, AR_PHY_WATCHDOG_RX_OFDM_SM),
MS(status, AR_PHY_WATCHDOG_RX_CCK_SM),
MS(status, AR_PHY_WATCHDOG_TX_OFDM_SM),
MS(status, AR_PHY_WATCHDOG_TX_CCK_SM),
MS(status, AR_PHY_WATCHDOG_AGC_SM),
MS(status, AR_PHY_WATCHDOG_SRCH_SM));
ath_dbg(common, RESET, "** BB WD cntl: cntl1=0x%08x cntl2=0x%08x **\n",
REG_READ(ah, AR_PHY_WATCHDOG_CTL_1),
REG_READ(ah, AR_PHY_WATCHDOG_CTL_2));
ath_dbg(common, RESET, "** BB mode: BB_gen_controls=0x%08x **\n",
REG_READ(ah, AR_PHY_GEN_CTRL));
#define PCT(_field) (common->cc_survey._field * 100 / common->cc_survey.cycles)
if (common->cc_survey.cycles)
ath_dbg(common, RESET,
"** BB busy times: rx_clear=%d%%, rx_frame=%d%%, tx_frame=%d%% **\n",
PCT(rx_busy), PCT(rx_frame), PCT(tx_frame));
ath_dbg(common, RESET, "==== BB update: done ====\n\n");
}
EXPORT_SYMBOL(ar9003_hw_bb_watchdog_dbg_info);
void ar9003_hw_disable_phy_restart(struct ath_hw *ah)
{
u32 val;
/* While receiving unsupported rate frame rx state machine
* gets into a state 0xb and if phy_restart happens in that
* state, BB would go hang. If RXSM is in 0xb state after
* first bb panic, ensure to disable the phy_restart.
*/
if (!((MS(ah->bb_watchdog_last_status,
AR_PHY_WATCHDOG_RX_OFDM_SM) == 0xb) ||
ah->bb_hang_rx_ofdm))
return;
ah->bb_hang_rx_ofdm = true;
val = REG_READ(ah, AR_PHY_RESTART);
val &= ~AR_PHY_RESTART_ENA;
REG_WRITE(ah, AR_PHY_RESTART, val);
}
EXPORT_SYMBOL(ar9003_hw_disable_phy_restart);
| gpl-2.0 |
TeamRegular/android_kernel_lge_e2nxx-stock | drivers/scsi/hpsa.c | 3227 | 133932 | /*
* Disk Array driver for HP Smart Array SAS controllers
* Copyright 2000, 2009 Hewlett-Packard Development Company, L.P.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
* Questions/Comments/Bugfixes to iss_storagedev@hp.com
*
*/
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/pci-aspm.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/fs.h>
#include <linux/timer.h>
#include <linux/seq_file.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/compat.h>
#include <linux/blktrace_api.h>
#include <linux/uaccess.h>
#include <linux/io.h>
#include <linux/dma-mapping.h>
#include <linux/completion.h>
#include <linux/moduleparam.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_tcq.h>
#include <linux/cciss_ioctl.h>
#include <linux/string.h>
#include <linux/bitmap.h>
#include <linux/atomic.h>
#include <linux/kthread.h>
#include <linux/jiffies.h>
#include "hpsa_cmd.h"
#include "hpsa.h"
/* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */
#define HPSA_DRIVER_VERSION "2.0.2-1"
#define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
#define HPSA "hpsa"
/* How long to wait (in milliseconds) for board to go into simple mode */
#define MAX_CONFIG_WAIT 30000
#define MAX_IOCTL_CONFIG_WAIT 1000
/*define how many times we will try a command because of bus resets */
#define MAX_CMD_RETRIES 3
/* Embedded module documentation macros - see modules.h */
MODULE_AUTHOR("Hewlett-Packard Company");
MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \
HPSA_DRIVER_VERSION);
MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers");
MODULE_VERSION(HPSA_DRIVER_VERSION);
MODULE_LICENSE("GPL");
static int hpsa_allow_any;
module_param(hpsa_allow_any, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(hpsa_allow_any,
"Allow hpsa driver to access unknown HP Smart Array hardware");
static int hpsa_simple_mode;
module_param(hpsa_simple_mode, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(hpsa_simple_mode,
"Use 'simple mode' rather than 'performant mode'");
/* define the PCI info for the cards we can control */
static const struct pci_device_id hpsa_pci_device_id[] = {
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3241},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3243},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3247},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324a},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324b},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3233},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3350},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3351},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3352},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3353},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3354},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3355},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3356},
{PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
{0,}
};
MODULE_DEVICE_TABLE(pci, hpsa_pci_device_id);
/* board_id = Subsystem Device ID & Vendor ID
* product = Marketing Name for the board
* access = Address of the struct of function pointers
*/
static struct board_type products[] = {
{0x3241103C, "Smart Array P212", &SA5_access},
{0x3243103C, "Smart Array P410", &SA5_access},
{0x3245103C, "Smart Array P410i", &SA5_access},
{0x3247103C, "Smart Array P411", &SA5_access},
{0x3249103C, "Smart Array P812", &SA5_access},
{0x324a103C, "Smart Array P712m", &SA5_access},
{0x324b103C, "Smart Array P711m", &SA5_access},
{0x3350103C, "Smart Array", &SA5_access},
{0x3351103C, "Smart Array", &SA5_access},
{0x3352103C, "Smart Array", &SA5_access},
{0x3353103C, "Smart Array", &SA5_access},
{0x3354103C, "Smart Array", &SA5_access},
{0x3355103C, "Smart Array", &SA5_access},
{0x3356103C, "Smart Array", &SA5_access},
{0xFFFF103C, "Unknown Smart Array", &SA5_access},
};
static int number_of_controllers;
static struct list_head hpsa_ctlr_list = LIST_HEAD_INIT(hpsa_ctlr_list);
static spinlock_t lockup_detector_lock;
static struct task_struct *hpsa_lockup_detector;
static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id);
static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id);
static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg);
static void start_io(struct ctlr_info *h);
#ifdef CONFIG_COMPAT
static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg);
#endif
static void cmd_free(struct ctlr_info *h, struct CommandList *c);
static void cmd_special_free(struct ctlr_info *h, struct CommandList *c);
static struct CommandList *cmd_alloc(struct ctlr_info *h);
static struct CommandList *cmd_special_alloc(struct ctlr_info *h);
static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
void *buff, size_t size, u8 page_code, unsigned char *scsi3addr,
int cmd_type);
static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd);
static void hpsa_scan_start(struct Scsi_Host *);
static int hpsa_scan_finished(struct Scsi_Host *sh,
unsigned long elapsed_time);
static int hpsa_change_queue_depth(struct scsi_device *sdev,
int qdepth, int reason);
static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd);
static int hpsa_slave_alloc(struct scsi_device *sdev);
static void hpsa_slave_destroy(struct scsi_device *sdev);
static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno);
static int check_for_unit_attention(struct ctlr_info *h,
struct CommandList *c);
static void check_ioctl_unit_attention(struct ctlr_info *h,
struct CommandList *c);
/* performant mode helper functions */
static void calc_bucket_map(int *bucket, int num_buckets,
int nsgs, int *bucket_map);
static __devinit void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h);
static inline u32 next_command(struct ctlr_info *h);
static int __devinit hpsa_find_cfg_addrs(struct pci_dev *pdev,
void __iomem *vaddr, u32 *cfg_base_addr, u64 *cfg_base_addr_index,
u64 *cfg_offset);
static int __devinit hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
unsigned long *memory_bar);
static int __devinit hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id);
static int __devinit hpsa_wait_for_board_state(struct pci_dev *pdev,
void __iomem *vaddr, int wait_for_ready);
#define BOARD_NOT_READY 0
#define BOARD_READY 1
static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev)
{
unsigned long *priv = shost_priv(sdev->host);
return (struct ctlr_info *) *priv;
}
static inline struct ctlr_info *shost_to_hba(struct Scsi_Host *sh)
{
unsigned long *priv = shost_priv(sh);
return (struct ctlr_info *) *priv;
}
static int check_for_unit_attention(struct ctlr_info *h,
struct CommandList *c)
{
if (c->err_info->SenseInfo[2] != UNIT_ATTENTION)
return 0;
switch (c->err_info->SenseInfo[12]) {
case STATE_CHANGED:
dev_warn(&h->pdev->dev, HPSA "%d: a state change "
"detected, command retried\n", h->ctlr);
break;
case LUN_FAILED:
dev_warn(&h->pdev->dev, HPSA "%d: LUN failure "
"detected, action required\n", h->ctlr);
break;
case REPORT_LUNS_CHANGED:
dev_warn(&h->pdev->dev, HPSA "%d: report LUN data "
"changed, action required\n", h->ctlr);
/*
* Note: this REPORT_LUNS_CHANGED condition only occurs on the external
* target (array) devices.
*/
break;
case POWER_OR_RESET:
dev_warn(&h->pdev->dev, HPSA "%d: a power on "
"or device reset detected\n", h->ctlr);
break;
case UNIT_ATTENTION_CLEARED:
dev_warn(&h->pdev->dev, HPSA "%d: unit attention "
"cleared by another initiator\n", h->ctlr);
break;
default:
dev_warn(&h->pdev->dev, HPSA "%d: unknown "
"unit attention detected\n", h->ctlr);
break;
}
return 1;
}
static ssize_t host_store_rescan(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct ctlr_info *h;
struct Scsi_Host *shost = class_to_shost(dev);
h = shost_to_hba(shost);
hpsa_scan_start(h->scsi_host);
return count;
}
static ssize_t host_show_firmware_revision(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ctlr_info *h;
struct Scsi_Host *shost = class_to_shost(dev);
unsigned char *fwrev;
h = shost_to_hba(shost);
if (!h->hba_inquiry_data)
return 0;
fwrev = &h->hba_inquiry_data[32];
return snprintf(buf, 20, "%c%c%c%c\n",
fwrev[0], fwrev[1], fwrev[2], fwrev[3]);
}
static ssize_t host_show_commands_outstanding(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct ctlr_info *h = shost_to_hba(shost);
return snprintf(buf, 20, "%d\n", h->commands_outstanding);
}
static ssize_t host_show_transport_mode(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ctlr_info *h;
struct Scsi_Host *shost = class_to_shost(dev);
h = shost_to_hba(shost);
return snprintf(buf, 20, "%s\n",
h->transMethod & CFGTBL_Trans_Performant ?
"performant" : "simple");
}
/* List of controllers which cannot be hard reset on kexec with reset_devices */
static u32 unresettable_controller[] = {
0x324a103C, /* Smart Array P712m */
0x324b103C, /* SmartArray P711m */
0x3223103C, /* Smart Array P800 */
0x3234103C, /* Smart Array P400 */
0x3235103C, /* Smart Array P400i */
0x3211103C, /* Smart Array E200i */
0x3212103C, /* Smart Array E200 */
0x3213103C, /* Smart Array E200i */
0x3214103C, /* Smart Array E200i */
0x3215103C, /* Smart Array E200i */
0x3237103C, /* Smart Array E500 */
0x323D103C, /* Smart Array P700m */
0x40800E11, /* Smart Array 5i */
0x409C0E11, /* Smart Array 6400 */
0x409D0E11, /* Smart Array 6400 EM */
0x40700E11, /* Smart Array 5300 */
0x40820E11, /* Smart Array 532 */
0x40830E11, /* Smart Array 5312 */
0x409A0E11, /* Smart Array 641 */
0x409B0E11, /* Smart Array 642 */
0x40910E11, /* Smart Array 6i */
};
/* List of controllers which cannot even be soft reset */
static u32 soft_unresettable_controller[] = {
0x40800E11, /* Smart Array 5i */
0x40700E11, /* Smart Array 5300 */
0x40820E11, /* Smart Array 532 */
0x40830E11, /* Smart Array 5312 */
0x409A0E11, /* Smart Array 641 */
0x409B0E11, /* Smart Array 642 */
0x40910E11, /* Smart Array 6i */
/* Exclude 640x boards. These are two pci devices in one slot
* which share a battery backed cache module. One controls the
* cache, the other accesses the cache through the one that controls
* it. If we reset the one controlling the cache, the other will
* likely not be happy. Just forbid resetting this conjoined mess.
* The 640x isn't really supported by hpsa anyway.
*/
0x409C0E11, /* Smart Array 6400 */
0x409D0E11, /* Smart Array 6400 EM */
};
static int ctlr_is_hard_resettable(u32 board_id)
{
int i;
for (i = 0; i < ARRAY_SIZE(unresettable_controller); i++)
if (unresettable_controller[i] == board_id)
return 0;
return 1;
}
static int ctlr_is_soft_resettable(u32 board_id)
{
int i;
for (i = 0; i < ARRAY_SIZE(soft_unresettable_controller); i++)
if (soft_unresettable_controller[i] == board_id)
return 0;
return 1;
}
static int ctlr_is_resettable(u32 board_id)
{
return ctlr_is_hard_resettable(board_id) ||
ctlr_is_soft_resettable(board_id);
}
static ssize_t host_show_resettable(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ctlr_info *h;
struct Scsi_Host *shost = class_to_shost(dev);
h = shost_to_hba(shost);
return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h->board_id));
}
static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[])
{
return (scsi3addr[3] & 0xC0) == 0x40;
}
static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",
"UNKNOWN"
};
#define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 1)
static ssize_t raid_level_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
ssize_t l = 0;
unsigned char rlevel;
struct ctlr_info *h;
struct scsi_device *sdev;
struct hpsa_scsi_dev_t *hdev;
unsigned long flags;
sdev = to_scsi_device(dev);
h = sdev_to_hba(sdev);
spin_lock_irqsave(&h->lock, flags);
hdev = sdev->hostdata;
if (!hdev) {
spin_unlock_irqrestore(&h->lock, flags);
return -ENODEV;
}
/* Is this even a logical drive? */
if (!is_logical_dev_addr_mode(hdev->scsi3addr)) {
spin_unlock_irqrestore(&h->lock, flags);
l = snprintf(buf, PAGE_SIZE, "N/A\n");
return l;
}
rlevel = hdev->raid_level;
spin_unlock_irqrestore(&h->lock, flags);
if (rlevel > RAID_UNKNOWN)
rlevel = RAID_UNKNOWN;
l = snprintf(buf, PAGE_SIZE, "RAID %s\n", raid_label[rlevel]);
return l;
}
static ssize_t lunid_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ctlr_info *h;
struct scsi_device *sdev;
struct hpsa_scsi_dev_t *hdev;
unsigned long flags;
unsigned char lunid[8];
sdev = to_scsi_device(dev);
h = sdev_to_hba(sdev);
spin_lock_irqsave(&h->lock, flags);
hdev = sdev->hostdata;
if (!hdev) {
spin_unlock_irqrestore(&h->lock, flags);
return -ENODEV;
}
memcpy(lunid, hdev->scsi3addr, sizeof(lunid));
spin_unlock_irqrestore(&h->lock, flags);
return snprintf(buf, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
lunid[0], lunid[1], lunid[2], lunid[3],
lunid[4], lunid[5], lunid[6], lunid[7]);
}
static ssize_t unique_id_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ctlr_info *h;
struct scsi_device *sdev;
struct hpsa_scsi_dev_t *hdev;
unsigned long flags;
unsigned char sn[16];
sdev = to_scsi_device(dev);
h = sdev_to_hba(sdev);
spin_lock_irqsave(&h->lock, flags);
hdev = sdev->hostdata;
if (!hdev) {
spin_unlock_irqrestore(&h->lock, flags);
return -ENODEV;
}
memcpy(sn, hdev->device_id, sizeof(sn));
spin_unlock_irqrestore(&h->lock, flags);
return snprintf(buf, 16 * 2 + 2,
"%02X%02X%02X%02X%02X%02X%02X%02X"
"%02X%02X%02X%02X%02X%02X%02X%02X\n",
sn[0], sn[1], sn[2], sn[3],
sn[4], sn[5], sn[6], sn[7],
sn[8], sn[9], sn[10], sn[11],
sn[12], sn[13], sn[14], sn[15]);
}
static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL);
static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL);
static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL);
static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
static DEVICE_ATTR(firmware_revision, S_IRUGO,
host_show_firmware_revision, NULL);
static DEVICE_ATTR(commands_outstanding, S_IRUGO,
host_show_commands_outstanding, NULL);
static DEVICE_ATTR(transport_mode, S_IRUGO,
host_show_transport_mode, NULL);
static DEVICE_ATTR(resettable, S_IRUGO,
host_show_resettable, NULL);
static struct device_attribute *hpsa_sdev_attrs[] = {
&dev_attr_raid_level,
&dev_attr_lunid,
&dev_attr_unique_id,
NULL,
};
static struct device_attribute *hpsa_shost_attrs[] = {
&dev_attr_rescan,
&dev_attr_firmware_revision,
&dev_attr_commands_outstanding,
&dev_attr_transport_mode,
&dev_attr_resettable,
NULL,
};
static struct scsi_host_template hpsa_driver_template = {
.module = THIS_MODULE,
.name = HPSA,
.proc_name = HPSA,
.queuecommand = hpsa_scsi_queue_command,
.scan_start = hpsa_scan_start,
.scan_finished = hpsa_scan_finished,
.change_queue_depth = hpsa_change_queue_depth,
.this_id = -1,
.use_clustering = ENABLE_CLUSTERING,
.eh_device_reset_handler = hpsa_eh_device_reset_handler,
.ioctl = hpsa_ioctl,
.slave_alloc = hpsa_slave_alloc,
.slave_destroy = hpsa_slave_destroy,
#ifdef CONFIG_COMPAT
.compat_ioctl = hpsa_compat_ioctl,
#endif
.sdev_attrs = hpsa_sdev_attrs,
.shost_attrs = hpsa_shost_attrs,
.max_sectors = 8192,
};
/* Enqueuing and dequeuing functions for cmdlists. */
static inline void addQ(struct list_head *list, struct CommandList *c)
{
list_add_tail(&c->list, list);
}
static inline u32 next_command(struct ctlr_info *h)
{
u32 a;
if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
return h->access.command_completed(h);
if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
a = *(h->reply_pool_head); /* Next cmd in ring buffer */
(h->reply_pool_head)++;
h->commands_outstanding--;
} else {
a = FIFO_EMPTY;
}
/* Check for wraparound */
if (h->reply_pool_head == (h->reply_pool + h->max_commands)) {
h->reply_pool_head = h->reply_pool;
h->reply_pool_wraparound ^= 1;
}
return a;
}
/* set_performant_mode: Modify the tag for cciss performant
* set bit 0 for pull model, bits 3-1 for block fetch
* register number
*/
static void set_performant_mode(struct ctlr_info *h, struct CommandList *c)
{
if (likely(h->transMethod & CFGTBL_Trans_Performant))
c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
}
static void enqueue_cmd_and_start_io(struct ctlr_info *h,
struct CommandList *c)
{
unsigned long flags;
set_performant_mode(h, c);
spin_lock_irqsave(&h->lock, flags);
addQ(&h->reqQ, c);
h->Qdepth++;
start_io(h);
spin_unlock_irqrestore(&h->lock, flags);
}
static inline void removeQ(struct CommandList *c)
{
if (WARN_ON(list_empty(&c->list)))
return;
list_del_init(&c->list);
}
static inline int is_hba_lunid(unsigned char scsi3addr[])
{
return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0;
}
static inline int is_scsi_rev_5(struct ctlr_info *h)
{
if (!h->hba_inquiry_data)
return 0;
if ((h->hba_inquiry_data[2] & 0x07) == 5)
return 1;
return 0;
}
static int hpsa_find_target_lun(struct ctlr_info *h,
unsigned char scsi3addr[], int bus, int *target, int *lun)
{
/* finds an unused bus, target, lun for a new physical device
* assumes h->devlock is held
*/
int i, found = 0;
DECLARE_BITMAP(lun_taken, HPSA_MAX_DEVICES);
bitmap_zero(lun_taken, HPSA_MAX_DEVICES);
for (i = 0; i < h->ndevices; i++) {
if (h->dev[i]->bus == bus && h->dev[i]->target != -1)
__set_bit(h->dev[i]->target, lun_taken);
}
i = find_first_zero_bit(lun_taken, HPSA_MAX_DEVICES);
if (i < HPSA_MAX_DEVICES) {
/* *bus = 1; */
*target = i;
*lun = 0;
found = 1;
}
return !found;
}
/* Add an entry into h->dev[] array. */
static int hpsa_scsi_add_entry(struct ctlr_info *h, int hostno,
struct hpsa_scsi_dev_t *device,
struct hpsa_scsi_dev_t *added[], int *nadded)
{
/* assumes h->devlock is held */
int n = h->ndevices;
int i;
unsigned char addr1[8], addr2[8];
struct hpsa_scsi_dev_t *sd;
if (n >= HPSA_MAX_DEVICES) {
dev_err(&h->pdev->dev, "too many devices, some will be "
"inaccessible.\n");
return -1;
}
/* physical devices do not have lun or target assigned until now. */
if (device->lun != -1)
/* Logical device, lun is already assigned. */
goto lun_assigned;
/* If this device a non-zero lun of a multi-lun device
* byte 4 of the 8-byte LUN addr will contain the logical
* unit no, zero otherise.
*/
if (device->scsi3addr[4] == 0) {
/* This is not a non-zero lun of a multi-lun device */
if (hpsa_find_target_lun(h, device->scsi3addr,
device->bus, &device->target, &device->lun) != 0)
return -1;
goto lun_assigned;
}
/* This is a non-zero lun of a multi-lun device.
* Search through our list and find the device which
* has the same 8 byte LUN address, excepting byte 4.
* Assign the same bus and target for this new LUN.
* Use the logical unit number from the firmware.
*/
memcpy(addr1, device->scsi3addr, 8);
addr1[4] = 0;
for (i = 0; i < n; i++) {
sd = h->dev[i];
memcpy(addr2, sd->scsi3addr, 8);
addr2[4] = 0;
/* differ only in byte 4? */
if (memcmp(addr1, addr2, 8) == 0) {
device->bus = sd->bus;
device->target = sd->target;
device->lun = device->scsi3addr[4];
break;
}
}
if (device->lun == -1) {
dev_warn(&h->pdev->dev, "physical device with no LUN=0,"
" suspect firmware bug or unsupported hardware "
"configuration.\n");
return -1;
}
lun_assigned:
h->dev[n] = device;
h->ndevices++;
added[*nadded] = device;
(*nadded)++;
/* initially, (before registering with scsi layer) we don't
* know our hostno and we don't want to print anything first
* time anyway (the scsi layer's inquiries will show that info)
*/
/* if (hostno != -1) */
dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d added.\n",
scsi_device_type(device->devtype), hostno,
device->bus, device->target, device->lun);
return 0;
}
/* Update an entry in h->dev[] array. */
static void hpsa_scsi_update_entry(struct ctlr_info *h, int hostno,
int entry, struct hpsa_scsi_dev_t *new_entry)
{
/* assumes h->devlock is held */
BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
/* Raid level changed. */
h->dev[entry]->raid_level = new_entry->raid_level;
dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d updated.\n",
scsi_device_type(new_entry->devtype), hostno, new_entry->bus,
new_entry->target, new_entry->lun);
}
/* Replace an entry from h->dev[] array. */
static void hpsa_scsi_replace_entry(struct ctlr_info *h, int hostno,
int entry, struct hpsa_scsi_dev_t *new_entry,
struct hpsa_scsi_dev_t *added[], int *nadded,
struct hpsa_scsi_dev_t *removed[], int *nremoved)
{
/* assumes h->devlock is held */
BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
removed[*nremoved] = h->dev[entry];
(*nremoved)++;
/*
* New physical devices won't have target/lun assigned yet
* so we need to preserve the values in the slot we are replacing.
*/
if (new_entry->target == -1) {
new_entry->target = h->dev[entry]->target;
new_entry->lun = h->dev[entry]->lun;
}
h->dev[entry] = new_entry;
added[*nadded] = new_entry;
(*nadded)++;
dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d changed.\n",
scsi_device_type(new_entry->devtype), hostno, new_entry->bus,
new_entry->target, new_entry->lun);
}
/* Remove an entry from h->dev[] array. */
static void hpsa_scsi_remove_entry(struct ctlr_info *h, int hostno, int entry,
struct hpsa_scsi_dev_t *removed[], int *nremoved)
{
/* assumes h->devlock is held */
int i;
struct hpsa_scsi_dev_t *sd;
BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
sd = h->dev[entry];
removed[*nremoved] = h->dev[entry];
(*nremoved)++;
for (i = entry; i < h->ndevices-1; i++)
h->dev[i] = h->dev[i+1];
h->ndevices--;
dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d removed.\n",
scsi_device_type(sd->devtype), hostno, sd->bus, sd->target,
sd->lun);
}
#define SCSI3ADDR_EQ(a, b) ( \
(a)[7] == (b)[7] && \
(a)[6] == (b)[6] && \
(a)[5] == (b)[5] && \
(a)[4] == (b)[4] && \
(a)[3] == (b)[3] && \
(a)[2] == (b)[2] && \
(a)[1] == (b)[1] && \
(a)[0] == (b)[0])
static void fixup_botched_add(struct ctlr_info *h,
struct hpsa_scsi_dev_t *added)
{
/* called when scsi_add_device fails in order to re-adjust
* h->dev[] to match the mid layer's view.
*/
unsigned long flags;
int i, j;
spin_lock_irqsave(&h->lock, flags);
for (i = 0; i < h->ndevices; i++) {
if (h->dev[i] == added) {
for (j = i; j < h->ndevices-1; j++)
h->dev[j] = h->dev[j+1];
h->ndevices--;
break;
}
}
spin_unlock_irqrestore(&h->lock, flags);
kfree(added);
}
static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1,
struct hpsa_scsi_dev_t *dev2)
{
/* we compare everything except lun and target as these
* are not yet assigned. Compare parts likely
* to differ first
*/
if (memcmp(dev1->scsi3addr, dev2->scsi3addr,
sizeof(dev1->scsi3addr)) != 0)
return 0;
if (memcmp(dev1->device_id, dev2->device_id,
sizeof(dev1->device_id)) != 0)
return 0;
if (memcmp(dev1->model, dev2->model, sizeof(dev1->model)) != 0)
return 0;
if (memcmp(dev1->vendor, dev2->vendor, sizeof(dev1->vendor)) != 0)
return 0;
if (dev1->devtype != dev2->devtype)
return 0;
if (dev1->bus != dev2->bus)
return 0;
return 1;
}
static inline int device_updated(struct hpsa_scsi_dev_t *dev1,
struct hpsa_scsi_dev_t *dev2)
{
/* Device attributes that can change, but don't mean
* that the device is a different device, nor that the OS
* needs to be told anything about the change.
*/
if (dev1->raid_level != dev2->raid_level)
return 1;
return 0;
}
/* Find needle in haystack. If exact match found, return DEVICE_SAME,
* and return needle location in *index. If scsi3addr matches, but not
* vendor, model, serial num, etc. return DEVICE_CHANGED, and return needle
* location in *index.
* In the case of a minor device attribute change, such as RAID level, just
* return DEVICE_UPDATED, along with the updated device's location in index.
* If needle not found, return DEVICE_NOT_FOUND.
*/
static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle,
struct hpsa_scsi_dev_t *haystack[], int haystack_size,
int *index)
{
int i;
#define DEVICE_NOT_FOUND 0
#define DEVICE_CHANGED 1
#define DEVICE_SAME 2
#define DEVICE_UPDATED 3
for (i = 0; i < haystack_size; i++) {
if (haystack[i] == NULL) /* previously removed. */
continue;
if (SCSI3ADDR_EQ(needle->scsi3addr, haystack[i]->scsi3addr)) {
*index = i;
if (device_is_the_same(needle, haystack[i])) {
if (device_updated(needle, haystack[i]))
return DEVICE_UPDATED;
return DEVICE_SAME;
} else {
return DEVICE_CHANGED;
}
}
}
*index = -1;
return DEVICE_NOT_FOUND;
}
static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno,
struct hpsa_scsi_dev_t *sd[], int nsds)
{
/* sd contains scsi3 addresses and devtypes, and inquiry
* data. This function takes what's in sd to be the current
* reality and updates h->dev[] to reflect that reality.
*/
int i, entry, device_change, changes = 0;
struct hpsa_scsi_dev_t *csd;
unsigned long flags;
struct hpsa_scsi_dev_t **added, **removed;
int nadded, nremoved;
struct Scsi_Host *sh = NULL;
added = kzalloc(sizeof(*added) * HPSA_MAX_DEVICES, GFP_KERNEL);
removed = kzalloc(sizeof(*removed) * HPSA_MAX_DEVICES, GFP_KERNEL);
if (!added || !removed) {
dev_warn(&h->pdev->dev, "out of memory in "
"adjust_hpsa_scsi_table\n");
goto free_and_out;
}
spin_lock_irqsave(&h->devlock, flags);
/* find any devices in h->dev[] that are not in
* sd[] and remove them from h->dev[], and for any
* devices which have changed, remove the old device
* info and add the new device info.
* If minor device attributes change, just update
* the existing device structure.
*/
i = 0;
nremoved = 0;
nadded = 0;
while (i < h->ndevices) {
csd = h->dev[i];
device_change = hpsa_scsi_find_entry(csd, sd, nsds, &entry);
if (device_change == DEVICE_NOT_FOUND) {
changes++;
hpsa_scsi_remove_entry(h, hostno, i,
removed, &nremoved);
continue; /* remove ^^^, hence i not incremented */
} else if (device_change == DEVICE_CHANGED) {
changes++;
hpsa_scsi_replace_entry(h, hostno, i, sd[entry],
added, &nadded, removed, &nremoved);
/* Set it to NULL to prevent it from being freed
* at the bottom of hpsa_update_scsi_devices()
*/
sd[entry] = NULL;
} else if (device_change == DEVICE_UPDATED) {
hpsa_scsi_update_entry(h, hostno, i, sd[entry]);
}
i++;
}
/* Now, make sure every device listed in sd[] is also
* listed in h->dev[], adding them if they aren't found
*/
for (i = 0; i < nsds; i++) {
if (!sd[i]) /* if already added above. */
continue;
device_change = hpsa_scsi_find_entry(sd[i], h->dev,
h->ndevices, &entry);
if (device_change == DEVICE_NOT_FOUND) {
changes++;
if (hpsa_scsi_add_entry(h, hostno, sd[i],
added, &nadded) != 0)
break;
sd[i] = NULL; /* prevent from being freed later. */
} else if (device_change == DEVICE_CHANGED) {
/* should never happen... */
changes++;
dev_warn(&h->pdev->dev,
"device unexpectedly changed.\n");
/* but if it does happen, we just ignore that device */
}
}
spin_unlock_irqrestore(&h->devlock, flags);
/* Don't notify scsi mid layer of any changes the first time through
* (or if there are no changes) scsi_scan_host will do it later the
* first time through.
*/
if (hostno == -1 || !changes)
goto free_and_out;
sh = h->scsi_host;
/* Notify scsi mid layer of any removed devices */
for (i = 0; i < nremoved; i++) {
struct scsi_device *sdev =
scsi_device_lookup(sh, removed[i]->bus,
removed[i]->target, removed[i]->lun);
if (sdev != NULL) {
scsi_remove_device(sdev);
scsi_device_put(sdev);
} else {
/* We don't expect to get here.
* future cmds to this device will get selection
* timeout as if the device was gone.
*/
dev_warn(&h->pdev->dev, "didn't find c%db%dt%dl%d "
" for removal.", hostno, removed[i]->bus,
removed[i]->target, removed[i]->lun);
}
kfree(removed[i]);
removed[i] = NULL;
}
/* Notify scsi mid layer of any added devices */
for (i = 0; i < nadded; i++) {
if (scsi_add_device(sh, added[i]->bus,
added[i]->target, added[i]->lun) == 0)
continue;
dev_warn(&h->pdev->dev, "scsi_add_device c%db%dt%dl%d failed, "
"device not added.\n", hostno, added[i]->bus,
added[i]->target, added[i]->lun);
/* now we have to remove it from h->dev,
* since it didn't get added to scsi mid layer
*/
fixup_botched_add(h, added[i]);
}
free_and_out:
kfree(added);
kfree(removed);
}
/*
* Lookup bus/target/lun and retrun corresponding struct hpsa_scsi_dev_t *
* Assume's h->devlock is held.
*/
static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h,
int bus, int target, int lun)
{
int i;
struct hpsa_scsi_dev_t *sd;
for (i = 0; i < h->ndevices; i++) {
sd = h->dev[i];
if (sd->bus == bus && sd->target == target && sd->lun == lun)
return sd;
}
return NULL;
}
/* link sdev->hostdata to our per-device structure. */
static int hpsa_slave_alloc(struct scsi_device *sdev)
{
struct hpsa_scsi_dev_t *sd;
unsigned long flags;
struct ctlr_info *h;
h = sdev_to_hba(sdev);
spin_lock_irqsave(&h->devlock, flags);
sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev),
sdev_id(sdev), sdev->lun);
if (sd != NULL)
sdev->hostdata = sd;
spin_unlock_irqrestore(&h->devlock, flags);
return 0;
}
static void hpsa_slave_destroy(struct scsi_device *sdev)
{
/* nothing to do. */
}
static void hpsa_free_sg_chain_blocks(struct ctlr_info *h)
{
int i;
if (!h->cmd_sg_list)
return;
for (i = 0; i < h->nr_cmds; i++) {
kfree(h->cmd_sg_list[i]);
h->cmd_sg_list[i] = NULL;
}
kfree(h->cmd_sg_list);
h->cmd_sg_list = NULL;
}
static int hpsa_allocate_sg_chain_blocks(struct ctlr_info *h)
{
int i;
if (h->chainsize <= 0)
return 0;
h->cmd_sg_list = kzalloc(sizeof(*h->cmd_sg_list) * h->nr_cmds,
GFP_KERNEL);
if (!h->cmd_sg_list)
return -ENOMEM;
for (i = 0; i < h->nr_cmds; i++) {
h->cmd_sg_list[i] = kmalloc(sizeof(*h->cmd_sg_list[i]) *
h->chainsize, GFP_KERNEL);
if (!h->cmd_sg_list[i])
goto clean;
}
return 0;
clean:
hpsa_free_sg_chain_blocks(h);
return -ENOMEM;
}
static void hpsa_map_sg_chain_block(struct ctlr_info *h,
struct CommandList *c)
{
struct SGDescriptor *chain_sg, *chain_block;
u64 temp64;
chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
chain_block = h->cmd_sg_list[c->cmdindex];
chain_sg->Ext = HPSA_SG_CHAIN;
chain_sg->Len = sizeof(*chain_sg) *
(c->Header.SGTotal - h->max_cmd_sg_entries);
temp64 = pci_map_single(h->pdev, chain_block, chain_sg->Len,
PCI_DMA_TODEVICE);
chain_sg->Addr.lower = (u32) (temp64 & 0x0FFFFFFFFULL);
chain_sg->Addr.upper = (u32) ((temp64 >> 32) & 0x0FFFFFFFFULL);
}
static void hpsa_unmap_sg_chain_block(struct ctlr_info *h,
struct CommandList *c)
{
struct SGDescriptor *chain_sg;
union u64bit temp64;
if (c->Header.SGTotal <= h->max_cmd_sg_entries)
return;
chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
temp64.val32.lower = chain_sg->Addr.lower;
temp64.val32.upper = chain_sg->Addr.upper;
pci_unmap_single(h->pdev, temp64.val, chain_sg->Len, PCI_DMA_TODEVICE);
}
static void complete_scsi_command(struct CommandList *cp)
{
struct scsi_cmnd *cmd;
struct ctlr_info *h;
struct ErrorInfo *ei;
unsigned char sense_key;
unsigned char asc; /* additional sense code */
unsigned char ascq; /* additional sense code qualifier */
unsigned long sense_data_size;
ei = cp->err_info;
cmd = (struct scsi_cmnd *) cp->scsi_cmd;
h = cp->h;
scsi_dma_unmap(cmd); /* undo the DMA mappings */
if (cp->Header.SGTotal > h->max_cmd_sg_entries)
hpsa_unmap_sg_chain_block(h, cp);
cmd->result = (DID_OK << 16); /* host byte */
cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */
cmd->result |= ei->ScsiStatus;
/* copy the sense data whether we need to or not. */
if (SCSI_SENSE_BUFFERSIZE < sizeof(ei->SenseInfo))
sense_data_size = SCSI_SENSE_BUFFERSIZE;
else
sense_data_size = sizeof(ei->SenseInfo);
if (ei->SenseLen < sense_data_size)
sense_data_size = ei->SenseLen;
memcpy(cmd->sense_buffer, ei->SenseInfo, sense_data_size);
scsi_set_resid(cmd, ei->ResidualCnt);
if (ei->CommandStatus == 0) {
cmd->scsi_done(cmd);
cmd_free(h, cp);
return;
}
/* an error has occurred */
switch (ei->CommandStatus) {
case CMD_TARGET_STATUS:
if (ei->ScsiStatus) {
/* Get sense key */
sense_key = 0xf & ei->SenseInfo[2];
/* Get additional sense code */
asc = ei->SenseInfo[12];
/* Get addition sense code qualifier */
ascq = ei->SenseInfo[13];
}
if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) {
if (check_for_unit_attention(h, cp)) {
cmd->result = DID_SOFT_ERROR << 16;
break;
}
if (sense_key == ILLEGAL_REQUEST) {
/*
* SCSI REPORT_LUNS is commonly unsupported on
* Smart Array. Suppress noisy complaint.
*/
if (cp->Request.CDB[0] == REPORT_LUNS)
break;
/* If ASC/ASCQ indicate Logical Unit
* Not Supported condition,
*/
if ((asc == 0x25) && (ascq == 0x0)) {
dev_warn(&h->pdev->dev, "cp %p "
"has check condition\n", cp);
break;
}
}
if (sense_key == NOT_READY) {
/* If Sense is Not Ready, Logical Unit
* Not ready, Manual Intervention
* required
*/
if ((asc == 0x04) && (ascq == 0x03)) {
dev_warn(&h->pdev->dev, "cp %p "
"has check condition: unit "
"not ready, manual "
"intervention required\n", cp);
break;
}
}
if (sense_key == ABORTED_COMMAND) {
/* Aborted command is retryable */
dev_warn(&h->pdev->dev, "cp %p "
"has check condition: aborted command: "
"ASC: 0x%x, ASCQ: 0x%x\n",
cp, asc, ascq);
cmd->result = DID_SOFT_ERROR << 16;
break;
}
/* Must be some other type of check condition */
dev_warn(&h->pdev->dev, "cp %p has check condition: "
"unknown type: "
"Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
"Returning result: 0x%x, "
"cmd=[%02x %02x %02x %02x %02x "
"%02x %02x %02x %02x %02x %02x "
"%02x %02x %02x %02x %02x]\n",
cp, sense_key, asc, ascq,
cmd->result,
cmd->cmnd[0], cmd->cmnd[1],
cmd->cmnd[2], cmd->cmnd[3],
cmd->cmnd[4], cmd->cmnd[5],
cmd->cmnd[6], cmd->cmnd[7],
cmd->cmnd[8], cmd->cmnd[9],
cmd->cmnd[10], cmd->cmnd[11],
cmd->cmnd[12], cmd->cmnd[13],
cmd->cmnd[14], cmd->cmnd[15]);
break;
}
/* Problem was not a check condition
* Pass it up to the upper layers...
*/
if (ei->ScsiStatus) {
dev_warn(&h->pdev->dev, "cp %p has status 0x%x "
"Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
"Returning result: 0x%x\n",
cp, ei->ScsiStatus,
sense_key, asc, ascq,
cmd->result);
} else { /* scsi status is zero??? How??? */
dev_warn(&h->pdev->dev, "cp %p SCSI status was 0. "
"Returning no connection.\n", cp),
/* Ordinarily, this case should never happen,
* but there is a bug in some released firmware
* revisions that allows it to happen if, for
* example, a 4100 backplane loses power and
* the tape drive is in it. We assume that
* it's a fatal error of some kind because we
* can't show that it wasn't. We will make it
* look like selection timeout since that is
* the most common reason for this to occur,
* and it's severe enough.
*/
cmd->result = DID_NO_CONNECT << 16;
}
break;
case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
break;
case CMD_DATA_OVERRUN:
dev_warn(&h->pdev->dev, "cp %p has"
" completed with data overrun "
"reported\n", cp);
break;
case CMD_INVALID: {
/* print_bytes(cp, sizeof(*cp), 1, 0);
print_cmd(cp); */
/* We get CMD_INVALID if you address a non-existent device
* instead of a selection timeout (no response). You will
* see this if you yank out a drive, then try to access it.
* This is kind of a shame because it means that any other
* CMD_INVALID (e.g. driver bug) will get interpreted as a
* missing target. */
cmd->result = DID_NO_CONNECT << 16;
}
break;
case CMD_PROTOCOL_ERR:
dev_warn(&h->pdev->dev, "cp %p has "
"protocol error \n", cp);
break;
case CMD_HARDWARE_ERR:
cmd->result = DID_ERROR << 16;
dev_warn(&h->pdev->dev, "cp %p had hardware error\n", cp);
break;
case CMD_CONNECTION_LOST:
cmd->result = DID_ERROR << 16;
dev_warn(&h->pdev->dev, "cp %p had connection lost\n", cp);
break;
case CMD_ABORTED:
cmd->result = DID_ABORT << 16;
dev_warn(&h->pdev->dev, "cp %p was aborted with status 0x%x\n",
cp, ei->ScsiStatus);
break;
case CMD_ABORT_FAILED:
cmd->result = DID_ERROR << 16;
dev_warn(&h->pdev->dev, "cp %p reports abort failed\n", cp);
break;
case CMD_UNSOLICITED_ABORT:
cmd->result = DID_SOFT_ERROR << 16; /* retry the command */
dev_warn(&h->pdev->dev, "cp %p aborted due to an unsolicited "
"abort\n", cp);
break;
case CMD_TIMEOUT:
cmd->result = DID_TIME_OUT << 16;
dev_warn(&h->pdev->dev, "cp %p timedout\n", cp);
break;
case CMD_UNABORTABLE:
cmd->result = DID_ERROR << 16;
dev_warn(&h->pdev->dev, "Command unabortable\n");
break;
default:
cmd->result = DID_ERROR << 16;
dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n",
cp, ei->CommandStatus);
}
cmd->scsi_done(cmd);
cmd_free(h, cp);
}
static void hpsa_pci_unmap(struct pci_dev *pdev,
struct CommandList *c, int sg_used, int data_direction)
{
int i;
union u64bit addr64;
for (i = 0; i < sg_used; i++) {
addr64.val32.lower = c->SG[i].Addr.lower;
addr64.val32.upper = c->SG[i].Addr.upper;
pci_unmap_single(pdev, (dma_addr_t) addr64.val, c->SG[i].Len,
data_direction);
}
}
static void hpsa_map_one(struct pci_dev *pdev,
struct CommandList *cp,
unsigned char *buf,
size_t buflen,
int data_direction)
{
u64 addr64;
if (buflen == 0 || data_direction == PCI_DMA_NONE) {
cp->Header.SGList = 0;
cp->Header.SGTotal = 0;
return;
}
addr64 = (u64) pci_map_single(pdev, buf, buflen, data_direction);
cp->SG[0].Addr.lower =
(u32) (addr64 & (u64) 0x00000000FFFFFFFF);
cp->SG[0].Addr.upper =
(u32) ((addr64 >> 32) & (u64) 0x00000000FFFFFFFF);
cp->SG[0].Len = buflen;
cp->Header.SGList = (u8) 1; /* no. SGs contig in this cmd */
cp->Header.SGTotal = (u16) 1; /* total sgs in this cmd list */
}
static inline void hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h,
struct CommandList *c)
{
DECLARE_COMPLETION_ONSTACK(wait);
c->waiting = &wait;
enqueue_cmd_and_start_io(h, c);
wait_for_completion(&wait);
}
static void hpsa_scsi_do_simple_cmd_core_if_no_lockup(struct ctlr_info *h,
struct CommandList *c)
{
unsigned long flags;
/* If controller lockup detected, fake a hardware error. */
spin_lock_irqsave(&h->lock, flags);
if (unlikely(h->lockup_detected)) {
spin_unlock_irqrestore(&h->lock, flags);
c->err_info->CommandStatus = CMD_HARDWARE_ERR;
} else {
spin_unlock_irqrestore(&h->lock, flags);
hpsa_scsi_do_simple_cmd_core(h, c);
}
}
static void hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h,
struct CommandList *c, int data_direction)
{
int retry_count = 0;
do {
memset(c->err_info, 0, sizeof(*c->err_info));
hpsa_scsi_do_simple_cmd_core(h, c);
retry_count++;
} while (check_for_unit_attention(h, c) && retry_count <= 3);
hpsa_pci_unmap(h->pdev, c, 1, data_direction);
}
static void hpsa_scsi_interpret_error(struct CommandList *cp)
{
struct ErrorInfo *ei;
struct device *d = &cp->h->pdev->dev;
ei = cp->err_info;
switch (ei->CommandStatus) {
case CMD_TARGET_STATUS:
dev_warn(d, "cmd %p has completed with errors\n", cp);
dev_warn(d, "cmd %p has SCSI Status = %x\n", cp,
ei->ScsiStatus);
if (ei->ScsiStatus == 0)
dev_warn(d, "SCSI status is abnormally zero. "
"(probably indicates selection timeout "
"reported incorrectly due to a known "
"firmware bug, circa July, 2001.)\n");
break;
case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
dev_info(d, "UNDERRUN\n");
break;
case CMD_DATA_OVERRUN:
dev_warn(d, "cp %p has completed with data overrun\n", cp);
break;
case CMD_INVALID: {
/* controller unfortunately reports SCSI passthru's
* to non-existent targets as invalid commands.
*/
dev_warn(d, "cp %p is reported invalid (probably means "
"target device no longer present)\n", cp);
/* print_bytes((unsigned char *) cp, sizeof(*cp), 1, 0);
print_cmd(cp); */
}
break;
case CMD_PROTOCOL_ERR:
dev_warn(d, "cp %p has protocol error \n", cp);
break;
case CMD_HARDWARE_ERR:
/* cmd->result = DID_ERROR << 16; */
dev_warn(d, "cp %p had hardware error\n", cp);
break;
case CMD_CONNECTION_LOST:
dev_warn(d, "cp %p had connection lost\n", cp);
break;
case CMD_ABORTED:
dev_warn(d, "cp %p was aborted\n", cp);
break;
case CMD_ABORT_FAILED:
dev_warn(d, "cp %p reports abort failed\n", cp);
break;
case CMD_UNSOLICITED_ABORT:
dev_warn(d, "cp %p aborted due to an unsolicited abort\n", cp);
break;
case CMD_TIMEOUT:
dev_warn(d, "cp %p timed out\n", cp);
break;
case CMD_UNABORTABLE:
dev_warn(d, "Command unabortable\n");
break;
default:
dev_warn(d, "cp %p returned unknown status %x\n", cp,
ei->CommandStatus);
}
}
static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
unsigned char page, unsigned char *buf,
unsigned char bufsize)
{
int rc = IO_OK;
struct CommandList *c;
struct ErrorInfo *ei;
c = cmd_special_alloc(h);
if (c == NULL) { /* trouble... */
dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
return -ENOMEM;
}
fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize, page, scsi3addr, TYPE_CMD);
hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
ei = c->err_info;
if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
hpsa_scsi_interpret_error(c);
rc = -1;
}
cmd_special_free(h, c);
return rc;
}
static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr)
{
int rc = IO_OK;
struct CommandList *c;
struct ErrorInfo *ei;
c = cmd_special_alloc(h);
if (c == NULL) { /* trouble... */
dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
return -ENOMEM;
}
fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0, scsi3addr, TYPE_MSG);
hpsa_scsi_do_simple_cmd_core(h, c);
/* no unmap needed here because no data xfer. */
ei = c->err_info;
if (ei->CommandStatus != 0) {
hpsa_scsi_interpret_error(c);
rc = -1;
}
cmd_special_free(h, c);
return rc;
}
static void hpsa_get_raid_level(struct ctlr_info *h,
unsigned char *scsi3addr, unsigned char *raid_level)
{
int rc;
unsigned char *buf;
*raid_level = RAID_UNKNOWN;
buf = kzalloc(64, GFP_KERNEL);
if (!buf)
return;
rc = hpsa_scsi_do_inquiry(h, scsi3addr, 0xC1, buf, 64);
if (rc == 0)
*raid_level = buf[8];
if (*raid_level > RAID_UNKNOWN)
*raid_level = RAID_UNKNOWN;
kfree(buf);
return;
}
/* Get the device id from inquiry page 0x83 */
static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr,
unsigned char *device_id, int buflen)
{
int rc;
unsigned char *buf;
if (buflen > 16)
buflen = 16;
buf = kzalloc(64, GFP_KERNEL);
if (!buf)
return -1;
rc = hpsa_scsi_do_inquiry(h, scsi3addr, 0x83, buf, 64);
if (rc == 0)
memcpy(device_id, &buf[8], buflen);
kfree(buf);
return rc != 0;
}
static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
struct ReportLUNdata *buf, int bufsize,
int extended_response)
{
int rc = IO_OK;
struct CommandList *c;
unsigned char scsi3addr[8];
struct ErrorInfo *ei;
c = cmd_special_alloc(h);
if (c == NULL) { /* trouble... */
dev_err(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
return -1;
}
/* address the controller */
memset(scsi3addr, 0, sizeof(scsi3addr));
fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h,
buf, bufsize, 0, scsi3addr, TYPE_CMD);
if (extended_response)
c->Request.CDB[1] = extended_response;
hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
ei = c->err_info;
if (ei->CommandStatus != 0 &&
ei->CommandStatus != CMD_DATA_UNDERRUN) {
hpsa_scsi_interpret_error(c);
rc = -1;
}
cmd_special_free(h, c);
return rc;
}
static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
struct ReportLUNdata *buf,
int bufsize, int extended_response)
{
return hpsa_scsi_do_report_luns(h, 0, buf, bufsize, extended_response);
}
static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h,
struct ReportLUNdata *buf, int bufsize)
{
return hpsa_scsi_do_report_luns(h, 1, buf, bufsize, 0);
}
static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device,
int bus, int target, int lun)
{
device->bus = bus;
device->target = target;
device->lun = lun;
}
static int hpsa_update_device_info(struct ctlr_info *h,
unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device,
unsigned char *is_OBDR_device)
{
#define OBDR_SIG_OFFSET 43
#define OBDR_TAPE_SIG "$DR-10"
#define OBDR_SIG_LEN (sizeof(OBDR_TAPE_SIG) - 1)
#define OBDR_TAPE_INQ_SIZE (OBDR_SIG_OFFSET + OBDR_SIG_LEN)
unsigned char *inq_buff;
unsigned char *obdr_sig;
inq_buff = kzalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
if (!inq_buff)
goto bail_out;
/* Do an inquiry to the device to see what it is. */
if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff,
(unsigned char) OBDR_TAPE_INQ_SIZE) != 0) {
/* Inquiry failed (msg printed already) */
dev_err(&h->pdev->dev,
"hpsa_update_device_info: inquiry failed\n");
goto bail_out;
}
this_device->devtype = (inq_buff[0] & 0x1f);
memcpy(this_device->scsi3addr, scsi3addr, 8);
memcpy(this_device->vendor, &inq_buff[8],
sizeof(this_device->vendor));
memcpy(this_device->model, &inq_buff[16],
sizeof(this_device->model));
memset(this_device->device_id, 0,
sizeof(this_device->device_id));
hpsa_get_device_id(h, scsi3addr, this_device->device_id,
sizeof(this_device->device_id));
if (this_device->devtype == TYPE_DISK &&
is_logical_dev_addr_mode(scsi3addr))
hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level);
else
this_device->raid_level = RAID_UNKNOWN;
if (is_OBDR_device) {
/* See if this is a One-Button-Disaster-Recovery device
* by looking for "$DR-10" at offset 43 in inquiry data.
*/
obdr_sig = &inq_buff[OBDR_SIG_OFFSET];
*is_OBDR_device = (this_device->devtype == TYPE_ROM &&
strncmp(obdr_sig, OBDR_TAPE_SIG,
OBDR_SIG_LEN) == 0);
}
kfree(inq_buff);
return 0;
bail_out:
kfree(inq_buff);
return 1;
}
static unsigned char *ext_target_model[] = {
"MSA2012",
"MSA2024",
"MSA2312",
"MSA2324",
"P2000 G3 SAS",
NULL,
};
static int is_ext_target(struct ctlr_info *h, struct hpsa_scsi_dev_t *device)
{
int i;
for (i = 0; ext_target_model[i]; i++)
if (strncmp(device->model, ext_target_model[i],
strlen(ext_target_model[i])) == 0)
return 1;
return 0;
}
/* Helper function to assign bus, target, lun mapping of devices.
* Puts non-external target logical volumes on bus 0, external target logical
* volumes on bus 1, physical devices on bus 2. and the hba on bus 3.
* Logical drive target and lun are assigned at this time, but
* physical device lun and target assignment are deferred (assigned
* in hpsa_find_target_lun, called by hpsa_scsi_add_entry.)
*/
static void figure_bus_target_lun(struct ctlr_info *h,
u8 *lunaddrbytes, struct hpsa_scsi_dev_t *device)
{
u32 lunid = le32_to_cpu(*((__le32 *) lunaddrbytes));
if (!is_logical_dev_addr_mode(lunaddrbytes)) {
/* physical device, target and lun filled in later */
if (is_hba_lunid(lunaddrbytes))
hpsa_set_bus_target_lun(device, 3, 0, lunid & 0x3fff);
else
/* defer target, lun assignment for physical devices */
hpsa_set_bus_target_lun(device, 2, -1, -1);
return;
}
/* It's a logical device */
if (is_ext_target(h, device)) {
/* external target way, put logicals on bus 1
* and match target/lun numbers box
* reports, other smart array, bus 0, target 0, match lunid
*/
hpsa_set_bus_target_lun(device,
1, (lunid >> 16) & 0x3fff, lunid & 0x00ff);
return;
}
hpsa_set_bus_target_lun(device, 0, 0, lunid & 0x3fff);
}
/*
* If there is no lun 0 on a target, linux won't find any devices.
* For the external targets (arrays), we have to manually detect the enclosure
* which is at lun zero, as CCISS_REPORT_PHYSICAL_LUNS doesn't report
* it for some reason. *tmpdevice is the target we're adding,
* this_device is a pointer into the current element of currentsd[]
* that we're building up in update_scsi_devices(), below.
* lunzerobits is a bitmap that tracks which targets already have a
* lun 0 assigned.
* Returns 1 if an enclosure was added, 0 if not.
*/
static int add_ext_target_dev(struct ctlr_info *h,
struct hpsa_scsi_dev_t *tmpdevice,
struct hpsa_scsi_dev_t *this_device, u8 *lunaddrbytes,
unsigned long lunzerobits[], int *n_ext_target_devs)
{
unsigned char scsi3addr[8];
if (test_bit(tmpdevice->target, lunzerobits))
return 0; /* There is already a lun 0 on this target. */
if (!is_logical_dev_addr_mode(lunaddrbytes))
return 0; /* It's the logical targets that may lack lun 0. */
if (!is_ext_target(h, tmpdevice))
return 0; /* Only external target devices have this problem. */
if (tmpdevice->lun == 0) /* if lun is 0, then we have a lun 0. */
return 0;
memset(scsi3addr, 0, 8);
scsi3addr[3] = tmpdevice->target;
if (is_hba_lunid(scsi3addr))
return 0; /* Don't add the RAID controller here. */
if (is_scsi_rev_5(h))
return 0; /* p1210m doesn't need to do this. */
if (*n_ext_target_devs >= MAX_EXT_TARGETS) {
dev_warn(&h->pdev->dev, "Maximum number of external "
"target devices exceeded. Check your hardware "
"configuration.");
return 0;
}
if (hpsa_update_device_info(h, scsi3addr, this_device, NULL))
return 0;
(*n_ext_target_devs)++;
hpsa_set_bus_target_lun(this_device,
tmpdevice->bus, tmpdevice->target, 0);
set_bit(tmpdevice->target, lunzerobits);
return 1;
}
/*
* Do CISS_REPORT_PHYS and CISS_REPORT_LOG. Data is returned in physdev,
* logdev. The number of luns in physdev and logdev are returned in
* *nphysicals and *nlogicals, respectively.
* Returns 0 on success, -1 otherwise.
*/
static int hpsa_gather_lun_info(struct ctlr_info *h,
int reportlunsize,
struct ReportLUNdata *physdev, u32 *nphysicals,
struct ReportLUNdata *logdev, u32 *nlogicals)
{
if (hpsa_scsi_do_report_phys_luns(h, physdev, reportlunsize, 0)) {
dev_err(&h->pdev->dev, "report physical LUNs failed.\n");
return -1;
}
*nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) / 8;
if (*nphysicals > HPSA_MAX_PHYS_LUN) {
dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded."
" %d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
*nphysicals - HPSA_MAX_PHYS_LUN);
*nphysicals = HPSA_MAX_PHYS_LUN;
}
if (hpsa_scsi_do_report_log_luns(h, logdev, reportlunsize)) {
dev_err(&h->pdev->dev, "report logical LUNs failed.\n");
return -1;
}
*nlogicals = be32_to_cpu(*((__be32 *) logdev->LUNListLength)) / 8;
/* Reject Logicals in excess of our max capability. */
if (*nlogicals > HPSA_MAX_LUN) {
dev_warn(&h->pdev->dev,
"maximum logical LUNs (%d) exceeded. "
"%d LUNs ignored.\n", HPSA_MAX_LUN,
*nlogicals - HPSA_MAX_LUN);
*nlogicals = HPSA_MAX_LUN;
}
if (*nlogicals + *nphysicals > HPSA_MAX_PHYS_LUN) {
dev_warn(&h->pdev->dev,
"maximum logical + physical LUNs (%d) exceeded. "
"%d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
*nphysicals + *nlogicals - HPSA_MAX_PHYS_LUN);
*nlogicals = HPSA_MAX_PHYS_LUN - *nphysicals;
}
return 0;
}
u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position, int i,
int nphysicals, int nlogicals, struct ReportLUNdata *physdev_list,
struct ReportLUNdata *logdev_list)
{
/* Helper function, figure out where the LUN ID info is coming from
* given index i, lists of physical and logical devices, where in
* the list the raid controller is supposed to appear (first or last)
*/
int logicals_start = nphysicals + (raid_ctlr_position == 0);
int last_device = nphysicals + nlogicals + (raid_ctlr_position == 0);
if (i == raid_ctlr_position)
return RAID_CTLR_LUNID;
if (i < logicals_start)
return &physdev_list->LUN[i - (raid_ctlr_position == 0)][0];
if (i < last_device)
return &logdev_list->LUN[i - nphysicals -
(raid_ctlr_position == 0)][0];
BUG();
return NULL;
}
static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
{
/* the idea here is we could get notified
* that some devices have changed, so we do a report
* physical luns and report logical luns cmd, and adjust
* our list of devices accordingly.
*
* The scsi3addr's of devices won't change so long as the
* adapter is not reset. That means we can rescan and
* tell which devices we already know about, vs. new
* devices, vs. disappearing devices.
*/
struct ReportLUNdata *physdev_list = NULL;
struct ReportLUNdata *logdev_list = NULL;
u32 nphysicals = 0;
u32 nlogicals = 0;
u32 ndev_allocated = 0;
struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice;
int ncurrent = 0;
int reportlunsize = sizeof(*physdev_list) + HPSA_MAX_PHYS_LUN * 8;
int i, n_ext_target_devs, ndevs_to_allocate;
int raid_ctlr_position;
DECLARE_BITMAP(lunzerobits, MAX_EXT_TARGETS);
currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_DEVICES, GFP_KERNEL);
physdev_list = kzalloc(reportlunsize, GFP_KERNEL);
logdev_list = kzalloc(reportlunsize, GFP_KERNEL);
tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL);
if (!currentsd || !physdev_list || !logdev_list || !tmpdevice) {
dev_err(&h->pdev->dev, "out of memory\n");
goto out;
}
memset(lunzerobits, 0, sizeof(lunzerobits));
if (hpsa_gather_lun_info(h, reportlunsize, physdev_list, &nphysicals,
logdev_list, &nlogicals))
goto out;
/* We might see up to the maximum number of logical and physical disks
* plus external target devices, and a device for the local RAID
* controller.
*/
ndevs_to_allocate = nphysicals + nlogicals + MAX_EXT_TARGETS + 1;
/* Allocate the per device structures */
for (i = 0; i < ndevs_to_allocate; i++) {
if (i >= HPSA_MAX_DEVICES) {
dev_warn(&h->pdev->dev, "maximum devices (%d) exceeded."
" %d devices ignored.\n", HPSA_MAX_DEVICES,
ndevs_to_allocate - HPSA_MAX_DEVICES);
break;
}
currentsd[i] = kzalloc(sizeof(*currentsd[i]), GFP_KERNEL);
if (!currentsd[i]) {
dev_warn(&h->pdev->dev, "out of memory at %s:%d\n",
__FILE__, __LINE__);
goto out;
}
ndev_allocated++;
}
if (unlikely(is_scsi_rev_5(h)))
raid_ctlr_position = 0;
else
raid_ctlr_position = nphysicals + nlogicals;
/* adjust our table of devices */
n_ext_target_devs = 0;
for (i = 0; i < nphysicals + nlogicals + 1; i++) {
u8 *lunaddrbytes, is_OBDR = 0;
/* Figure out where the LUN ID info is coming from */
lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position,
i, nphysicals, nlogicals, physdev_list, logdev_list);
/* skip masked physical devices. */
if (lunaddrbytes[3] & 0xC0 &&
i < nphysicals + (raid_ctlr_position == 0))
continue;
/* Get device type, vendor, model, device id */
if (hpsa_update_device_info(h, lunaddrbytes, tmpdevice,
&is_OBDR))
continue; /* skip it if we can't talk to it. */
figure_bus_target_lun(h, lunaddrbytes, tmpdevice);
this_device = currentsd[ncurrent];
/*
* For external target devices, we have to insert a LUN 0 which
* doesn't show up in CCISS_REPORT_PHYSICAL data, but there
* is nonetheless an enclosure device there. We have to
* present that otherwise linux won't find anything if
* there is no lun 0.
*/
if (add_ext_target_dev(h, tmpdevice, this_device,
lunaddrbytes, lunzerobits,
&n_ext_target_devs)) {
ncurrent++;
this_device = currentsd[ncurrent];
}
*this_device = *tmpdevice;
switch (this_device->devtype) {
case TYPE_ROM:
/* We don't *really* support actual CD-ROM devices,
* just "One Button Disaster Recovery" tape drive
* which temporarily pretends to be a CD-ROM drive.
* So we check that the device is really an OBDR tape
* device by checking for "$DR-10" in bytes 43-48 of
* the inquiry data.
*/
if (is_OBDR)
ncurrent++;
break;
case TYPE_DISK:
if (i < nphysicals)
break;
ncurrent++;
break;
case TYPE_TAPE:
case TYPE_MEDIUM_CHANGER:
ncurrent++;
break;
case TYPE_RAID:
/* Only present the Smartarray HBA as a RAID controller.
* If it's a RAID controller other than the HBA itself
* (an external RAID controller, MSA500 or similar)
* don't present it.
*/
if (!is_hba_lunid(lunaddrbytes))
break;
ncurrent++;
break;
default:
break;
}
if (ncurrent >= HPSA_MAX_DEVICES)
break;
}
adjust_hpsa_scsi_table(h, hostno, currentsd, ncurrent);
out:
kfree(tmpdevice);
for (i = 0; i < ndev_allocated; i++)
kfree(currentsd[i]);
kfree(currentsd);
kfree(physdev_list);
kfree(logdev_list);
}
/* hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci
* dma mapping and fills in the scatter gather entries of the
* hpsa command, cp.
*/
static int hpsa_scatter_gather(struct ctlr_info *h,
struct CommandList *cp,
struct scsi_cmnd *cmd)
{
unsigned int len;
struct scatterlist *sg;
u64 addr64;
int use_sg, i, sg_index, chained;
struct SGDescriptor *curr_sg;
BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
use_sg = scsi_dma_map(cmd);
if (use_sg < 0)
return use_sg;
if (!use_sg)
goto sglist_finished;
curr_sg = cp->SG;
chained = 0;
sg_index = 0;
scsi_for_each_sg(cmd, sg, use_sg, i) {
if (i == h->max_cmd_sg_entries - 1 &&
use_sg > h->max_cmd_sg_entries) {
chained = 1;
curr_sg = h->cmd_sg_list[cp->cmdindex];
sg_index = 0;
}
addr64 = (u64) sg_dma_address(sg);
len = sg_dma_len(sg);
curr_sg->Addr.lower = (u32) (addr64 & 0x0FFFFFFFFULL);
curr_sg->Addr.upper = (u32) ((addr64 >> 32) & 0x0FFFFFFFFULL);
curr_sg->Len = len;
curr_sg->Ext = 0; /* we are not chaining */
curr_sg++;
}
if (use_sg + chained > h->maxSG)
h->maxSG = use_sg + chained;
if (chained) {
cp->Header.SGList = h->max_cmd_sg_entries;
cp->Header.SGTotal = (u16) (use_sg + 1);
hpsa_map_sg_chain_block(h, cp);
return 0;
}
sglist_finished:
cp->Header.SGList = (u8) use_sg; /* no. SGs contig in this cmd */
cp->Header.SGTotal = (u16) use_sg; /* total sgs in this cmd list */
return 0;
}
static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd,
void (*done)(struct scsi_cmnd *))
{
struct ctlr_info *h;
struct hpsa_scsi_dev_t *dev;
unsigned char scsi3addr[8];
struct CommandList *c;
unsigned long flags;
/* Get the ptr to our adapter structure out of cmd->host. */
h = sdev_to_hba(cmd->device);
dev = cmd->device->hostdata;
if (!dev) {
cmd->result = DID_NO_CONNECT << 16;
done(cmd);
return 0;
}
memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr));
spin_lock_irqsave(&h->lock, flags);
if (unlikely(h->lockup_detected)) {
spin_unlock_irqrestore(&h->lock, flags);
cmd->result = DID_ERROR << 16;
done(cmd);
return 0;
}
/* Need a lock as this is being allocated from the pool */
c = cmd_alloc(h);
spin_unlock_irqrestore(&h->lock, flags);
if (c == NULL) { /* trouble... */
dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n");
return SCSI_MLQUEUE_HOST_BUSY;
}
/* Fill in the command list header */
cmd->scsi_done = done; /* save this for use by completion code */
/* save c in case we have to abort it */
cmd->host_scribble = (unsigned char *) c;
c->cmd_type = CMD_SCSI;
c->scsi_cmd = cmd;
c->Header.ReplyQueue = 0; /* unused in simple mode */
memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8);
c->Header.Tag.lower = (c->cmdindex << DIRECT_LOOKUP_SHIFT);
c->Header.Tag.lower |= DIRECT_LOOKUP_BIT;
/* Fill in the request block... */
c->Request.Timeout = 0;
memset(c->Request.CDB, 0, sizeof(c->Request.CDB));
BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB));
c->Request.CDBLen = cmd->cmd_len;
memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len);
c->Request.Type.Type = TYPE_CMD;
c->Request.Type.Attribute = ATTR_SIMPLE;
switch (cmd->sc_data_direction) {
case DMA_TO_DEVICE:
c->Request.Type.Direction = XFER_WRITE;
break;
case DMA_FROM_DEVICE:
c->Request.Type.Direction = XFER_READ;
break;
case DMA_NONE:
c->Request.Type.Direction = XFER_NONE;
break;
case DMA_BIDIRECTIONAL:
/* This can happen if a buggy application does a scsi passthru
* and sets both inlen and outlen to non-zero. ( see
* ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() )
*/
c->Request.Type.Direction = XFER_RSVD;
/* This is technically wrong, and hpsa controllers should
* reject it with CMD_INVALID, which is the most correct
* response, but non-fibre backends appear to let it
* slide by, and give the same results as if this field
* were set correctly. Either way is acceptable for
* our purposes here.
*/
break;
default:
dev_err(&h->pdev->dev, "unknown data direction: %d\n",
cmd->sc_data_direction);
BUG();
break;
}
if (hpsa_scatter_gather(h, c, cmd) < 0) { /* Fill SG list */
cmd_free(h, c);
return SCSI_MLQUEUE_HOST_BUSY;
}
enqueue_cmd_and_start_io(h, c);
/* the cmd'll come back via intr handler in complete_scsi_command() */
return 0;
}
static DEF_SCSI_QCMD(hpsa_scsi_queue_command)
static void hpsa_scan_start(struct Scsi_Host *sh)
{
struct ctlr_info *h = shost_to_hba(sh);
unsigned long flags;
/* wait until any scan already in progress is finished. */
while (1) {
spin_lock_irqsave(&h->scan_lock, flags);
if (h->scan_finished)
break;
spin_unlock_irqrestore(&h->scan_lock, flags);
wait_event(h->scan_wait_queue, h->scan_finished);
/* Note: We don't need to worry about a race between this
* thread and driver unload because the midlayer will
* have incremented the reference count, so unload won't
* happen if we're in here.
*/
}
h->scan_finished = 0; /* mark scan as in progress */
spin_unlock_irqrestore(&h->scan_lock, flags);
hpsa_update_scsi_devices(h, h->scsi_host->host_no);
spin_lock_irqsave(&h->scan_lock, flags);
h->scan_finished = 1; /* mark scan as finished. */
wake_up_all(&h->scan_wait_queue);
spin_unlock_irqrestore(&h->scan_lock, flags);
}
static int hpsa_scan_finished(struct Scsi_Host *sh,
unsigned long elapsed_time)
{
struct ctlr_info *h = shost_to_hba(sh);
unsigned long flags;
int finished;
spin_lock_irqsave(&h->scan_lock, flags);
finished = h->scan_finished;
spin_unlock_irqrestore(&h->scan_lock, flags);
return finished;
}
static int hpsa_change_queue_depth(struct scsi_device *sdev,
int qdepth, int reason)
{
struct ctlr_info *h = sdev_to_hba(sdev);
if (reason != SCSI_QDEPTH_DEFAULT)
return -ENOTSUPP;
if (qdepth < 1)
qdepth = 1;
else
if (qdepth > h->nr_cmds)
qdepth = h->nr_cmds;
scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
return sdev->queue_depth;
}
static void hpsa_unregister_scsi(struct ctlr_info *h)
{
/* we are being forcibly unloaded, and may not refuse. */
scsi_remove_host(h->scsi_host);
scsi_host_put(h->scsi_host);
h->scsi_host = NULL;
}
static int hpsa_register_scsi(struct ctlr_info *h)
{
struct Scsi_Host *sh;
int error;
sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h));
if (sh == NULL)
goto fail;
sh->io_port = 0;
sh->n_io_port = 0;
sh->this_id = -1;
sh->max_channel = 3;
sh->max_cmd_len = MAX_COMMAND_SIZE;
sh->max_lun = HPSA_MAX_LUN;
sh->max_id = HPSA_MAX_LUN;
sh->can_queue = h->nr_cmds;
sh->cmd_per_lun = h->nr_cmds;
sh->sg_tablesize = h->maxsgentries;
h->scsi_host = sh;
sh->hostdata[0] = (unsigned long) h;
sh->irq = h->intr[h->intr_mode];
sh->unique_id = sh->irq;
error = scsi_add_host(sh, &h->pdev->dev);
if (error)
goto fail_host_put;
scsi_scan_host(sh);
return 0;
fail_host_put:
dev_err(&h->pdev->dev, "%s: scsi_add_host"
" failed for controller %d\n", __func__, h->ctlr);
scsi_host_put(sh);
return error;
fail:
dev_err(&h->pdev->dev, "%s: scsi_host_alloc"
" failed for controller %d\n", __func__, h->ctlr);
return -ENOMEM;
}
static int wait_for_device_to_become_ready(struct ctlr_info *h,
unsigned char lunaddr[])
{
int rc = 0;
int count = 0;
int waittime = 1; /* seconds */
struct CommandList *c;
c = cmd_special_alloc(h);
if (!c) {
dev_warn(&h->pdev->dev, "out of memory in "
"wait_for_device_to_become_ready.\n");
return IO_ERROR;
}
/* Send test unit ready until device ready, or give up. */
while (count < HPSA_TUR_RETRY_LIMIT) {
/* Wait for a bit. do this first, because if we send
* the TUR right away, the reset will just abort it.
*/
msleep(1000 * waittime);
count++;
/* Increase wait time with each try, up to a point. */
if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS)
waittime = waittime * 2;
/* Send the Test Unit Ready */
fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, lunaddr, TYPE_CMD);
hpsa_scsi_do_simple_cmd_core(h, c);
/* no unmap needed here because no data xfer. */
if (c->err_info->CommandStatus == CMD_SUCCESS)
break;
if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION &&
(c->err_info->SenseInfo[2] == NO_SENSE ||
c->err_info->SenseInfo[2] == UNIT_ATTENTION))
break;
dev_warn(&h->pdev->dev, "waiting %d secs "
"for device to become ready.\n", waittime);
rc = 1; /* device not ready. */
}
if (rc)
dev_warn(&h->pdev->dev, "giving up on device.\n");
else
dev_warn(&h->pdev->dev, "device is ready.\n");
cmd_special_free(h, c);
return rc;
}
/* Need at least one of these error handlers to keep ../scsi/hosts.c from
* complaining. Doing a host- or bus-reset can't do anything good here.
*/
static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
{
int rc;
struct ctlr_info *h;
struct hpsa_scsi_dev_t *dev;
/* find the controller to which the command to be aborted was sent */
h = sdev_to_hba(scsicmd->device);
if (h == NULL) /* paranoia */
return FAILED;
dev = scsicmd->device->hostdata;
if (!dev) {
dev_err(&h->pdev->dev, "hpsa_eh_device_reset_handler: "
"device lookup failed.\n");
return FAILED;
}
dev_warn(&h->pdev->dev, "resetting device %d:%d:%d:%d\n",
h->scsi_host->host_no, dev->bus, dev->target, dev->lun);
/* send a reset to the SCSI LUN which the command was sent to */
rc = hpsa_send_reset(h, dev->scsi3addr);
if (rc == 0 && wait_for_device_to_become_ready(h, dev->scsi3addr) == 0)
return SUCCESS;
dev_warn(&h->pdev->dev, "resetting device failed.\n");
return FAILED;
}
/*
* For operations that cannot sleep, a command block is allocated at init,
* and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
* which ones are free or in use. Lock must be held when calling this.
* cmd_free() is the complement.
*/
static struct CommandList *cmd_alloc(struct ctlr_info *h)
{
struct CommandList *c;
int i;
union u64bit temp64;
dma_addr_t cmd_dma_handle, err_dma_handle;
do {
i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds);
if (i == h->nr_cmds)
return NULL;
} while (test_and_set_bit
(i & (BITS_PER_LONG - 1),
h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0);
c = h->cmd_pool + i;
memset(c, 0, sizeof(*c));
cmd_dma_handle = h->cmd_pool_dhandle
+ i * sizeof(*c);
c->err_info = h->errinfo_pool + i;
memset(c->err_info, 0, sizeof(*c->err_info));
err_dma_handle = h->errinfo_pool_dhandle
+ i * sizeof(*c->err_info);
h->nr_allocs++;
c->cmdindex = i;
INIT_LIST_HEAD(&c->list);
c->busaddr = (u32) cmd_dma_handle;
temp64.val = (u64) err_dma_handle;
c->ErrDesc.Addr.lower = temp64.val32.lower;
c->ErrDesc.Addr.upper = temp64.val32.upper;
c->ErrDesc.Len = sizeof(*c->err_info);
c->h = h;
return c;
}
/* For operations that can wait for kmalloc to possibly sleep,
* this routine can be called. Lock need not be held to call
* cmd_special_alloc. cmd_special_free() is the complement.
*/
static struct CommandList *cmd_special_alloc(struct ctlr_info *h)
{
struct CommandList *c;
union u64bit temp64;
dma_addr_t cmd_dma_handle, err_dma_handle;
c = pci_alloc_consistent(h->pdev, sizeof(*c), &cmd_dma_handle);
if (c == NULL)
return NULL;
memset(c, 0, sizeof(*c));
c->cmdindex = -1;
c->err_info = pci_alloc_consistent(h->pdev, sizeof(*c->err_info),
&err_dma_handle);
if (c->err_info == NULL) {
pci_free_consistent(h->pdev,
sizeof(*c), c, cmd_dma_handle);
return NULL;
}
memset(c->err_info, 0, sizeof(*c->err_info));
INIT_LIST_HEAD(&c->list);
c->busaddr = (u32) cmd_dma_handle;
temp64.val = (u64) err_dma_handle;
c->ErrDesc.Addr.lower = temp64.val32.lower;
c->ErrDesc.Addr.upper = temp64.val32.upper;
c->ErrDesc.Len = sizeof(*c->err_info);
c->h = h;
return c;
}
static void cmd_free(struct ctlr_info *h, struct CommandList *c)
{
int i;
i = c - h->cmd_pool;
clear_bit(i & (BITS_PER_LONG - 1),
h->cmd_pool_bits + (i / BITS_PER_LONG));
h->nr_frees++;
}
static void cmd_special_free(struct ctlr_info *h, struct CommandList *c)
{
union u64bit temp64;
temp64.val32.lower = c->ErrDesc.Addr.lower;
temp64.val32.upper = c->ErrDesc.Addr.upper;
pci_free_consistent(h->pdev, sizeof(*c->err_info),
c->err_info, (dma_addr_t) temp64.val);
pci_free_consistent(h->pdev, sizeof(*c),
c, (dma_addr_t) (c->busaddr & DIRECT_LOOKUP_MASK));
}
#ifdef CONFIG_COMPAT
static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd, void *arg)
{
IOCTL32_Command_struct __user *arg32 =
(IOCTL32_Command_struct __user *) arg;
IOCTL_Command_struct arg64;
IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
int err;
u32 cp;
memset(&arg64, 0, sizeof(arg64));
err = 0;
err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
sizeof(arg64.LUN_info));
err |= copy_from_user(&arg64.Request, &arg32->Request,
sizeof(arg64.Request));
err |= copy_from_user(&arg64.error_info, &arg32->error_info,
sizeof(arg64.error_info));
err |= get_user(arg64.buf_size, &arg32->buf_size);
err |= get_user(cp, &arg32->buf);
arg64.buf = compat_ptr(cp);
err |= copy_to_user(p, &arg64, sizeof(arg64));
if (err)
return -EFAULT;
err = hpsa_ioctl(dev, CCISS_PASSTHRU, (void *)p);
if (err)
return err;
err |= copy_in_user(&arg32->error_info, &p->error_info,
sizeof(arg32->error_info));
if (err)
return -EFAULT;
return err;
}
static int hpsa_ioctl32_big_passthru(struct scsi_device *dev,
int cmd, void *arg)
{
BIG_IOCTL32_Command_struct __user *arg32 =
(BIG_IOCTL32_Command_struct __user *) arg;
BIG_IOCTL_Command_struct arg64;
BIG_IOCTL_Command_struct __user *p =
compat_alloc_user_space(sizeof(arg64));
int err;
u32 cp;
memset(&arg64, 0, sizeof(arg64));
err = 0;
err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
sizeof(arg64.LUN_info));
err |= copy_from_user(&arg64.Request, &arg32->Request,
sizeof(arg64.Request));
err |= copy_from_user(&arg64.error_info, &arg32->error_info,
sizeof(arg64.error_info));
err |= get_user(arg64.buf_size, &arg32->buf_size);
err |= get_user(arg64.malloc_size, &arg32->malloc_size);
err |= get_user(cp, &arg32->buf);
arg64.buf = compat_ptr(cp);
err |= copy_to_user(p, &arg64, sizeof(arg64));
if (err)
return -EFAULT;
err = hpsa_ioctl(dev, CCISS_BIG_PASSTHRU, (void *)p);
if (err)
return err;
err |= copy_in_user(&arg32->error_info, &p->error_info,
sizeof(arg32->error_info));
if (err)
return -EFAULT;
return err;
}
static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg)
{
switch (cmd) {
case CCISS_GETPCIINFO:
case CCISS_GETINTINFO:
case CCISS_SETINTINFO:
case CCISS_GETNODENAME:
case CCISS_SETNODENAME:
case CCISS_GETHEARTBEAT:
case CCISS_GETBUSTYPES:
case CCISS_GETFIRMVER:
case CCISS_GETDRIVVER:
case CCISS_REVALIDVOLS:
case CCISS_DEREGDISK:
case CCISS_REGNEWDISK:
case CCISS_REGNEWD:
case CCISS_RESCANDISK:
case CCISS_GETLUNINFO:
return hpsa_ioctl(dev, cmd, arg);
case CCISS_PASSTHRU32:
return hpsa_ioctl32_passthru(dev, cmd, arg);
case CCISS_BIG_PASSTHRU32:
return hpsa_ioctl32_big_passthru(dev, cmd, arg);
default:
return -ENOIOCTLCMD;
}
}
#endif
static int hpsa_getpciinfo_ioctl(struct ctlr_info *h, void __user *argp)
{
struct hpsa_pci_info pciinfo;
if (!argp)
return -EINVAL;
pciinfo.domain = pci_domain_nr(h->pdev->bus);
pciinfo.bus = h->pdev->bus->number;
pciinfo.dev_fn = h->pdev->devfn;
pciinfo.board_id = h->board_id;
if (copy_to_user(argp, &pciinfo, sizeof(pciinfo)))
return -EFAULT;
return 0;
}
static int hpsa_getdrivver_ioctl(struct ctlr_info *h, void __user *argp)
{
DriverVer_type DriverVer;
unsigned char vmaj, vmin, vsubmin;
int rc;
rc = sscanf(HPSA_DRIVER_VERSION, "%hhu.%hhu.%hhu",
&vmaj, &vmin, &vsubmin);
if (rc != 3) {
dev_info(&h->pdev->dev, "driver version string '%s' "
"unrecognized.", HPSA_DRIVER_VERSION);
vmaj = 0;
vmin = 0;
vsubmin = 0;
}
DriverVer = (vmaj << 16) | (vmin << 8) | vsubmin;
if (!argp)
return -EINVAL;
if (copy_to_user(argp, &DriverVer, sizeof(DriverVer_type)))
return -EFAULT;
return 0;
}
static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
{
IOCTL_Command_struct iocommand;
struct CommandList *c;
char *buff = NULL;
union u64bit temp64;
if (!argp)
return -EINVAL;
if (!capable(CAP_SYS_RAWIO))
return -EPERM;
if (copy_from_user(&iocommand, argp, sizeof(iocommand)))
return -EFAULT;
if ((iocommand.buf_size < 1) &&
(iocommand.Request.Type.Direction != XFER_NONE)) {
return -EINVAL;
}
if (iocommand.buf_size > 0) {
buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
if (buff == NULL)
return -EFAULT;
if (iocommand.Request.Type.Direction == XFER_WRITE) {
/* Copy the data into the buffer we created */
if (copy_from_user(buff, iocommand.buf,
iocommand.buf_size)) {
kfree(buff);
return -EFAULT;
}
} else {
memset(buff, 0, iocommand.buf_size);
}
}
c = cmd_special_alloc(h);
if (c == NULL) {
kfree(buff);
return -ENOMEM;
}
/* Fill in the command type */
c->cmd_type = CMD_IOCTL_PEND;
/* Fill in Command Header */
c->Header.ReplyQueue = 0; /* unused in simple mode */
if (iocommand.buf_size > 0) { /* buffer to fill */
c->Header.SGList = 1;
c->Header.SGTotal = 1;
} else { /* no buffers to fill */
c->Header.SGList = 0;
c->Header.SGTotal = 0;
}
memcpy(&c->Header.LUN, &iocommand.LUN_info, sizeof(c->Header.LUN));
/* use the kernel address the cmd block for tag */
c->Header.Tag.lower = c->busaddr;
/* Fill in Request block */
memcpy(&c->Request, &iocommand.Request,
sizeof(c->Request));
/* Fill in the scatter gather information */
if (iocommand.buf_size > 0) {
temp64.val = pci_map_single(h->pdev, buff,
iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
c->SG[0].Addr.lower = temp64.val32.lower;
c->SG[0].Addr.upper = temp64.val32.upper;
c->SG[0].Len = iocommand.buf_size;
c->SG[0].Ext = 0; /* we are not chaining*/
}
hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c);
if (iocommand.buf_size > 0)
hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL);
check_ioctl_unit_attention(h, c);
/* Copy the error information out */
memcpy(&iocommand.error_info, c->err_info,
sizeof(iocommand.error_info));
if (copy_to_user(argp, &iocommand, sizeof(iocommand))) {
kfree(buff);
cmd_special_free(h, c);
return -EFAULT;
}
if (iocommand.Request.Type.Direction == XFER_READ &&
iocommand.buf_size > 0) {
/* Copy the data out of the buffer we created */
if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) {
kfree(buff);
cmd_special_free(h, c);
return -EFAULT;
}
}
kfree(buff);
cmd_special_free(h, c);
return 0;
}
static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
{
BIG_IOCTL_Command_struct *ioc;
struct CommandList *c;
unsigned char **buff = NULL;
int *buff_size = NULL;
union u64bit temp64;
BYTE sg_used = 0;
int status = 0;
int i;
u32 left;
u32 sz;
BYTE __user *data_ptr;
if (!argp)
return -EINVAL;
if (!capable(CAP_SYS_RAWIO))
return -EPERM;
ioc = (BIG_IOCTL_Command_struct *)
kmalloc(sizeof(*ioc), GFP_KERNEL);
if (!ioc) {
status = -ENOMEM;
goto cleanup1;
}
if (copy_from_user(ioc, argp, sizeof(*ioc))) {
status = -EFAULT;
goto cleanup1;
}
if ((ioc->buf_size < 1) &&
(ioc->Request.Type.Direction != XFER_NONE)) {
status = -EINVAL;
goto cleanup1;
}
/* Check kmalloc limits using all SGs */
if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
status = -EINVAL;
goto cleanup1;
}
if (ioc->buf_size > ioc->malloc_size * SG_ENTRIES_IN_CMD) {
status = -EINVAL;
goto cleanup1;
}
buff = kzalloc(SG_ENTRIES_IN_CMD * sizeof(char *), GFP_KERNEL);
if (!buff) {
status = -ENOMEM;
goto cleanup1;
}
buff_size = kmalloc(SG_ENTRIES_IN_CMD * sizeof(int), GFP_KERNEL);
if (!buff_size) {
status = -ENOMEM;
goto cleanup1;
}
left = ioc->buf_size;
data_ptr = ioc->buf;
while (left) {
sz = (left > ioc->malloc_size) ? ioc->malloc_size : left;
buff_size[sg_used] = sz;
buff[sg_used] = kmalloc(sz, GFP_KERNEL);
if (buff[sg_used] == NULL) {
status = -ENOMEM;
goto cleanup1;
}
if (ioc->Request.Type.Direction == XFER_WRITE) {
if (copy_from_user(buff[sg_used], data_ptr, sz)) {
status = -ENOMEM;
goto cleanup1;
}
} else
memset(buff[sg_used], 0, sz);
left -= sz;
data_ptr += sz;
sg_used++;
}
c = cmd_special_alloc(h);
if (c == NULL) {
status = -ENOMEM;
goto cleanup1;
}
c->cmd_type = CMD_IOCTL_PEND;
c->Header.ReplyQueue = 0;
c->Header.SGList = c->Header.SGTotal = sg_used;
memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN));
c->Header.Tag.lower = c->busaddr;
memcpy(&c->Request, &ioc->Request, sizeof(c->Request));
if (ioc->buf_size > 0) {
int i;
for (i = 0; i < sg_used; i++) {
temp64.val = pci_map_single(h->pdev, buff[i],
buff_size[i], PCI_DMA_BIDIRECTIONAL);
c->SG[i].Addr.lower = temp64.val32.lower;
c->SG[i].Addr.upper = temp64.val32.upper;
c->SG[i].Len = buff_size[i];
/* we are not chaining */
c->SG[i].Ext = 0;
}
}
hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c);
if (sg_used)
hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL);
check_ioctl_unit_attention(h, c);
/* Copy the error information out */
memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info));
if (copy_to_user(argp, ioc, sizeof(*ioc))) {
cmd_special_free(h, c);
status = -EFAULT;
goto cleanup1;
}
if (ioc->Request.Type.Direction == XFER_READ && ioc->buf_size > 0) {
/* Copy the data out of the buffer we created */
BYTE __user *ptr = ioc->buf;
for (i = 0; i < sg_used; i++) {
if (copy_to_user(ptr, buff[i], buff_size[i])) {
cmd_special_free(h, c);
status = -EFAULT;
goto cleanup1;
}
ptr += buff_size[i];
}
}
cmd_special_free(h, c);
status = 0;
cleanup1:
if (buff) {
for (i = 0; i < sg_used; i++)
kfree(buff[i]);
kfree(buff);
}
kfree(buff_size);
kfree(ioc);
return status;
}
static void check_ioctl_unit_attention(struct ctlr_info *h,
struct CommandList *c)
{
if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION)
(void) check_for_unit_attention(h, c);
}
/*
* ioctl
*/
static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg)
{
struct ctlr_info *h;
void __user *argp = (void __user *)arg;
h = sdev_to_hba(dev);
switch (cmd) {
case CCISS_DEREGDISK:
case CCISS_REGNEWDISK:
case CCISS_REGNEWD:
hpsa_scan_start(h->scsi_host);
return 0;
case CCISS_GETPCIINFO:
return hpsa_getpciinfo_ioctl(h, argp);
case CCISS_GETDRIVVER:
return hpsa_getdrivver_ioctl(h, argp);
case CCISS_PASSTHRU:
return hpsa_passthru_ioctl(h, argp);
case CCISS_BIG_PASSTHRU:
return hpsa_big_passthru_ioctl(h, argp);
default:
return -ENOTTY;
}
}
static int __devinit hpsa_send_host_reset(struct ctlr_info *h,
unsigned char *scsi3addr, u8 reset_type)
{
struct CommandList *c;
c = cmd_alloc(h);
if (!c)
return -ENOMEM;
fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0,
RAID_CTLR_LUNID, TYPE_MSG);
c->Request.CDB[1] = reset_type; /* fill_cmd defaults to target reset */
c->waiting = NULL;
enqueue_cmd_and_start_io(h, c);
/* Don't wait for completion, the reset won't complete. Don't free
* the command either. This is the last command we will send before
* re-initializing everything, so it doesn't matter and won't leak.
*/
return 0;
}
static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
void *buff, size_t size, u8 page_code, unsigned char *scsi3addr,
int cmd_type)
{
int pci_dir = XFER_NONE;
c->cmd_type = CMD_IOCTL_PEND;
c->Header.ReplyQueue = 0;
if (buff != NULL && size > 0) {
c->Header.SGList = 1;
c->Header.SGTotal = 1;
} else {
c->Header.SGList = 0;
c->Header.SGTotal = 0;
}
c->Header.Tag.lower = c->busaddr;
memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8);
c->Request.Type.Type = cmd_type;
if (cmd_type == TYPE_CMD) {
switch (cmd) {
case HPSA_INQUIRY:
/* are we trying to read a vital product page */
if (page_code != 0) {
c->Request.CDB[1] = 0x01;
c->Request.CDB[2] = page_code;
}
c->Request.CDBLen = 6;
c->Request.Type.Attribute = ATTR_SIMPLE;
c->Request.Type.Direction = XFER_READ;
c->Request.Timeout = 0;
c->Request.CDB[0] = HPSA_INQUIRY;
c->Request.CDB[4] = size & 0xFF;
break;
case HPSA_REPORT_LOG:
case HPSA_REPORT_PHYS:
/* Talking to controller so It's a physical command
mode = 00 target = 0. Nothing to write.
*/
c->Request.CDBLen = 12;
c->Request.Type.Attribute = ATTR_SIMPLE;
c->Request.Type.Direction = XFER_READ;
c->Request.Timeout = 0;
c->Request.CDB[0] = cmd;
c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
c->Request.CDB[7] = (size >> 16) & 0xFF;
c->Request.CDB[8] = (size >> 8) & 0xFF;
c->Request.CDB[9] = size & 0xFF;
break;
case HPSA_CACHE_FLUSH:
c->Request.CDBLen = 12;
c->Request.Type.Attribute = ATTR_SIMPLE;
c->Request.Type.Direction = XFER_WRITE;
c->Request.Timeout = 0;
c->Request.CDB[0] = BMIC_WRITE;
c->Request.CDB[6] = BMIC_CACHE_FLUSH;
c->Request.CDB[7] = (size >> 8) & 0xFF;
c->Request.CDB[8] = size & 0xFF;
break;
case TEST_UNIT_READY:
c->Request.CDBLen = 6;
c->Request.Type.Attribute = ATTR_SIMPLE;
c->Request.Type.Direction = XFER_NONE;
c->Request.Timeout = 0;
break;
default:
dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd);
BUG();
return;
}
} else if (cmd_type == TYPE_MSG) {
switch (cmd) {
case HPSA_DEVICE_RESET_MSG:
c->Request.CDBLen = 16;
c->Request.Type.Type = 1; /* It is a MSG not a CMD */
c->Request.Type.Attribute = ATTR_SIMPLE;
c->Request.Type.Direction = XFER_NONE;
c->Request.Timeout = 0; /* Don't time out */
memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
c->Request.CDB[0] = cmd;
c->Request.CDB[1] = 0x03; /* Reset target above */
/* If bytes 4-7 are zero, it means reset the */
/* LunID device */
c->Request.CDB[4] = 0x00;
c->Request.CDB[5] = 0x00;
c->Request.CDB[6] = 0x00;
c->Request.CDB[7] = 0x00;
break;
default:
dev_warn(&h->pdev->dev, "unknown message type %d\n",
cmd);
BUG();
}
} else {
dev_warn(&h->pdev->dev, "unknown command type %d\n", cmd_type);
BUG();
}
switch (c->Request.Type.Direction) {
case XFER_READ:
pci_dir = PCI_DMA_FROMDEVICE;
break;
case XFER_WRITE:
pci_dir = PCI_DMA_TODEVICE;
break;
case XFER_NONE:
pci_dir = PCI_DMA_NONE;
break;
default:
pci_dir = PCI_DMA_BIDIRECTIONAL;
}
hpsa_map_one(h->pdev, c, buff, size, pci_dir);
return;
}
/*
* Map (physical) PCI mem into (virtual) kernel space
*/
static void __iomem *remap_pci_mem(ulong base, ulong size)
{
ulong page_base = ((ulong) base) & PAGE_MASK;
ulong page_offs = ((ulong) base) - page_base;
void __iomem *page_remapped = ioremap(page_base, page_offs + size);
return page_remapped ? (page_remapped + page_offs) : NULL;
}
/* Takes cmds off the submission queue and sends them to the hardware,
* then puts them on the queue of cmds waiting for completion.
*/
static void start_io(struct ctlr_info *h)
{
struct CommandList *c;
while (!list_empty(&h->reqQ)) {
c = list_entry(h->reqQ.next, struct CommandList, list);
/* can't do anything if fifo is full */
if ((h->access.fifo_full(h))) {
dev_warn(&h->pdev->dev, "fifo full\n");
break;
}
/* Get the first entry from the Request Q */
removeQ(c);
h->Qdepth--;
/* Tell the controller execute command */
h->access.submit_command(h, c);
/* Put job onto the completed Q */
addQ(&h->cmpQ, c);
}
}
static inline unsigned long get_next_completion(struct ctlr_info *h)
{
return h->access.command_completed(h);
}
static inline bool interrupt_pending(struct ctlr_info *h)
{
return h->access.intr_pending(h);
}
static inline long interrupt_not_for_us(struct ctlr_info *h)
{
return (h->access.intr_pending(h) == 0) ||
(h->interrupts_enabled == 0);
}
static inline int bad_tag(struct ctlr_info *h, u32 tag_index,
u32 raw_tag)
{
if (unlikely(tag_index >= h->nr_cmds)) {
dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag);
return 1;
}
return 0;
}
static inline void finish_cmd(struct CommandList *c, u32 raw_tag)
{
removeQ(c);
if (likely(c->cmd_type == CMD_SCSI))
complete_scsi_command(c);
else if (c->cmd_type == CMD_IOCTL_PEND)
complete(c->waiting);
}
static inline u32 hpsa_tag_contains_index(u32 tag)
{
return tag & DIRECT_LOOKUP_BIT;
}
static inline u32 hpsa_tag_to_index(u32 tag)
{
return tag >> DIRECT_LOOKUP_SHIFT;
}
static inline u32 hpsa_tag_discard_error_bits(struct ctlr_info *h, u32 tag)
{
#define HPSA_PERF_ERROR_BITS ((1 << DIRECT_LOOKUP_SHIFT) - 1)
#define HPSA_SIMPLE_ERROR_BITS 0x03
if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
return tag & ~HPSA_SIMPLE_ERROR_BITS;
return tag & ~HPSA_PERF_ERROR_BITS;
}
/* process completion of an indexed ("direct lookup") command */
static inline u32 process_indexed_cmd(struct ctlr_info *h,
u32 raw_tag)
{
u32 tag_index;
struct CommandList *c;
tag_index = hpsa_tag_to_index(raw_tag);
if (bad_tag(h, tag_index, raw_tag))
return next_command(h);
c = h->cmd_pool + tag_index;
finish_cmd(c, raw_tag);
return next_command(h);
}
/* process completion of a non-indexed command */
static inline u32 process_nonindexed_cmd(struct ctlr_info *h,
u32 raw_tag)
{
u32 tag;
struct CommandList *c = NULL;
tag = hpsa_tag_discard_error_bits(h, raw_tag);
list_for_each_entry(c, &h->cmpQ, list) {
if ((c->busaddr & 0xFFFFFFE0) == (tag & 0xFFFFFFE0)) {
finish_cmd(c, raw_tag);
return next_command(h);
}
}
bad_tag(h, h->nr_cmds + 1, raw_tag);
return next_command(h);
}
/* Some controllers, like p400, will give us one interrupt
* after a soft reset, even if we turned interrupts off.
* Only need to check for this in the hpsa_xxx_discard_completions
* functions.
*/
static int ignore_bogus_interrupt(struct ctlr_info *h)
{
if (likely(!reset_devices))
return 0;
if (likely(h->interrupts_enabled))
return 0;
dev_info(&h->pdev->dev, "Received interrupt while interrupts disabled "
"(known firmware bug.) Ignoring.\n");
return 1;
}
static irqreturn_t hpsa_intx_discard_completions(int irq, void *dev_id)
{
struct ctlr_info *h = dev_id;
unsigned long flags;
u32 raw_tag;
if (ignore_bogus_interrupt(h))
return IRQ_NONE;
if (interrupt_not_for_us(h))
return IRQ_NONE;
spin_lock_irqsave(&h->lock, flags);
h->last_intr_timestamp = get_jiffies_64();
while (interrupt_pending(h)) {
raw_tag = get_next_completion(h);
while (raw_tag != FIFO_EMPTY)
raw_tag = next_command(h);
}
spin_unlock_irqrestore(&h->lock, flags);
return IRQ_HANDLED;
}
static irqreturn_t hpsa_msix_discard_completions(int irq, void *dev_id)
{
struct ctlr_info *h = dev_id;
unsigned long flags;
u32 raw_tag;
if (ignore_bogus_interrupt(h))
return IRQ_NONE;
spin_lock_irqsave(&h->lock, flags);
h->last_intr_timestamp = get_jiffies_64();
raw_tag = get_next_completion(h);
while (raw_tag != FIFO_EMPTY)
raw_tag = next_command(h);
spin_unlock_irqrestore(&h->lock, flags);
return IRQ_HANDLED;
}
static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id)
{
struct ctlr_info *h = dev_id;
unsigned long flags;
u32 raw_tag;
if (interrupt_not_for_us(h))
return IRQ_NONE;
spin_lock_irqsave(&h->lock, flags);
h->last_intr_timestamp = get_jiffies_64();
while (interrupt_pending(h)) {
raw_tag = get_next_completion(h);
while (raw_tag != FIFO_EMPTY) {
if (hpsa_tag_contains_index(raw_tag))
raw_tag = process_indexed_cmd(h, raw_tag);
else
raw_tag = process_nonindexed_cmd(h, raw_tag);
}
}
spin_unlock_irqrestore(&h->lock, flags);
return IRQ_HANDLED;
}
static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id)
{
struct ctlr_info *h = dev_id;
unsigned long flags;
u32 raw_tag;
spin_lock_irqsave(&h->lock, flags);
h->last_intr_timestamp = get_jiffies_64();
raw_tag = get_next_completion(h);
while (raw_tag != FIFO_EMPTY) {
if (hpsa_tag_contains_index(raw_tag))
raw_tag = process_indexed_cmd(h, raw_tag);
else
raw_tag = process_nonindexed_cmd(h, raw_tag);
}
spin_unlock_irqrestore(&h->lock, flags);
return IRQ_HANDLED;
}
/* Send a message CDB to the firmware. Careful, this only works
* in simple mode, not performant mode due to the tag lookup.
* We only ever use this immediately after a controller reset.
*/
static __devinit int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
unsigned char type)
{
struct Command {
struct CommandListHeader CommandHeader;
struct RequestBlock Request;
struct ErrDescriptor ErrorDescriptor;
};
struct Command *cmd;
static const size_t cmd_sz = sizeof(*cmd) +
sizeof(cmd->ErrorDescriptor);
dma_addr_t paddr64;
uint32_t paddr32, tag;
void __iomem *vaddr;
int i, err;
vaddr = pci_ioremap_bar(pdev, 0);
if (vaddr == NULL)
return -ENOMEM;
/* The Inbound Post Queue only accepts 32-bit physical addresses for the
* CCISS commands, so they must be allocated from the lower 4GiB of
* memory.
*/
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
iounmap(vaddr);
return -ENOMEM;
}
cmd = pci_alloc_consistent(pdev, cmd_sz, &paddr64);
if (cmd == NULL) {
iounmap(vaddr);
return -ENOMEM;
}
/* This must fit, because of the 32-bit consistent DMA mask. Also,
* although there's no guarantee, we assume that the address is at
* least 4-byte aligned (most likely, it's page-aligned).
*/
paddr32 = paddr64;
cmd->CommandHeader.ReplyQueue = 0;
cmd->CommandHeader.SGList = 0;
cmd->CommandHeader.SGTotal = 0;
cmd->CommandHeader.Tag.lower = paddr32;
cmd->CommandHeader.Tag.upper = 0;
memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8);
cmd->Request.CDBLen = 16;
cmd->Request.Type.Type = TYPE_MSG;
cmd->Request.Type.Attribute = ATTR_HEADOFQUEUE;
cmd->Request.Type.Direction = XFER_NONE;
cmd->Request.Timeout = 0; /* Don't time out */
cmd->Request.CDB[0] = opcode;
cmd->Request.CDB[1] = type;
memset(&cmd->Request.CDB[2], 0, 14); /* rest of the CDB is reserved */
cmd->ErrorDescriptor.Addr.lower = paddr32 + sizeof(*cmd);
cmd->ErrorDescriptor.Addr.upper = 0;
cmd->ErrorDescriptor.Len = sizeof(struct ErrorInfo);
writel(paddr32, vaddr + SA5_REQUEST_PORT_OFFSET);
for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) {
tag = readl(vaddr + SA5_REPLY_PORT_OFFSET);
if ((tag & ~HPSA_SIMPLE_ERROR_BITS) == paddr32)
break;
msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS);
}
iounmap(vaddr);
/* we leak the DMA buffer here ... no choice since the controller could
* still complete the command.
*/
if (i == HPSA_MSG_SEND_RETRY_LIMIT) {
dev_err(&pdev->dev, "controller message %02x:%02x timed out\n",
opcode, type);
return -ETIMEDOUT;
}
pci_free_consistent(pdev, cmd_sz, cmd, paddr64);
if (tag & HPSA_ERROR_BIT) {
dev_err(&pdev->dev, "controller message %02x:%02x failed\n",
opcode, type);
return -EIO;
}
dev_info(&pdev->dev, "controller message %02x:%02x succeeded\n",
opcode, type);
return 0;
}
#define hpsa_noop(p) hpsa_message(p, 3, 0)
static int hpsa_controller_hard_reset(struct pci_dev *pdev,
void * __iomem vaddr, u32 use_doorbell)
{
u16 pmcsr;
int pos;
if (use_doorbell) {
/* For everything after the P600, the PCI power state method
* of resetting the controller doesn't work, so we have this
* other way using the doorbell register.
*/
dev_info(&pdev->dev, "using doorbell to reset controller\n");
writel(use_doorbell, vaddr + SA5_DOORBELL);
} else { /* Try to do it the PCI power state way */
/* Quoting from the Open CISS Specification: "The Power
* Management Control/Status Register (CSR) controls the power
* state of the device. The normal operating state is D0,
* CSR=00h. The software off state is D3, CSR=03h. To reset
* the controller, place the interface device in D3 then to D0,
* this causes a secondary PCI reset which will reset the
* controller." */
pos = pci_find_capability(pdev, PCI_CAP_ID_PM);
if (pos == 0) {
dev_err(&pdev->dev,
"hpsa_reset_controller: "
"PCI PM not supported\n");
return -ENODEV;
}
dev_info(&pdev->dev, "using PCI PM to reset controller\n");
/* enter the D3hot power management state */
pci_read_config_word(pdev, pos + PCI_PM_CTRL, &pmcsr);
pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
pmcsr |= PCI_D3hot;
pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
msleep(500);
/* enter the D0 power management state */
pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
pmcsr |= PCI_D0;
pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
/*
* The P600 requires a small delay when changing states.
* Otherwise we may think the board did not reset and we bail.
* This for kdump only and is particular to the P600.
*/
msleep(500);
}
return 0;
}
static __devinit void init_driver_version(char *driver_version, int len)
{
memset(driver_version, 0, len);
strncpy(driver_version, HPSA " " HPSA_DRIVER_VERSION, len - 1);
}
static __devinit int write_driver_ver_to_cfgtable(
struct CfgTable __iomem *cfgtable)
{
char *driver_version;
int i, size = sizeof(cfgtable->driver_version);
driver_version = kmalloc(size, GFP_KERNEL);
if (!driver_version)
return -ENOMEM;
init_driver_version(driver_version, size);
for (i = 0; i < size; i++)
writeb(driver_version[i], &cfgtable->driver_version[i]);
kfree(driver_version);
return 0;
}
static __devinit void read_driver_ver_from_cfgtable(
struct CfgTable __iomem *cfgtable, unsigned char *driver_ver)
{
int i;
for (i = 0; i < sizeof(cfgtable->driver_version); i++)
driver_ver[i] = readb(&cfgtable->driver_version[i]);
}
static __devinit int controller_reset_failed(
struct CfgTable __iomem *cfgtable)
{
char *driver_ver, *old_driver_ver;
int rc, size = sizeof(cfgtable->driver_version);
old_driver_ver = kmalloc(2 * size, GFP_KERNEL);
if (!old_driver_ver)
return -ENOMEM;
driver_ver = old_driver_ver + size;
/* After a reset, the 32 bytes of "driver version" in the cfgtable
* should have been changed, otherwise we know the reset failed.
*/
init_driver_version(old_driver_ver, size);
read_driver_ver_from_cfgtable(cfgtable, driver_ver);
rc = !memcmp(driver_ver, old_driver_ver, size);
kfree(old_driver_ver);
return rc;
}
/* This does a hard reset of the controller using PCI power management
* states or the using the doorbell register.
*/
static __devinit int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)
{
u64 cfg_offset;
u32 cfg_base_addr;
u64 cfg_base_addr_index;
void __iomem *vaddr;
unsigned long paddr;
u32 misc_fw_support;
int rc;
struct CfgTable __iomem *cfgtable;
u32 use_doorbell;
u32 board_id;
u16 command_register;
/* For controllers as old as the P600, this is very nearly
* the same thing as
*
* pci_save_state(pci_dev);
* pci_set_power_state(pci_dev, PCI_D3hot);
* pci_set_power_state(pci_dev, PCI_D0);
* pci_restore_state(pci_dev);
*
* For controllers newer than the P600, the pci power state
* method of resetting doesn't work so we have another way
* using the doorbell register.
*/
rc = hpsa_lookup_board_id(pdev, &board_id);
if (rc < 0 || !ctlr_is_resettable(board_id)) {
dev_warn(&pdev->dev, "Not resetting device.\n");
return -ENODEV;
}
/* if controller is soft- but not hard resettable... */
if (!ctlr_is_hard_resettable(board_id))
return -ENOTSUPP; /* try soft reset later. */
/* Save the PCI command register */
pci_read_config_word(pdev, 4, &command_register);
/* Turn the board off. This is so that later pci_restore_state()
* won't turn the board on before the rest of config space is ready.
*/
pci_disable_device(pdev);
pci_save_state(pdev);
/* find the first memory BAR, so we can find the cfg table */
rc = hpsa_pci_find_memory_BAR(pdev, &paddr);
if (rc)
return rc;
vaddr = remap_pci_mem(paddr, 0x250);
if (!vaddr)
return -ENOMEM;
/* find cfgtable in order to check if reset via doorbell is supported */
rc = hpsa_find_cfg_addrs(pdev, vaddr, &cfg_base_addr,
&cfg_base_addr_index, &cfg_offset);
if (rc)
goto unmap_vaddr;
cfgtable = remap_pci_mem(pci_resource_start(pdev,
cfg_base_addr_index) + cfg_offset, sizeof(*cfgtable));
if (!cfgtable) {
rc = -ENOMEM;
goto unmap_vaddr;
}
rc = write_driver_ver_to_cfgtable(cfgtable);
if (rc)
goto unmap_vaddr;
/* If reset via doorbell register is supported, use that.
* There are two such methods. Favor the newest method.
*/
misc_fw_support = readl(&cfgtable->misc_fw_support);
use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET2;
if (use_doorbell) {
use_doorbell = DOORBELL_CTLR_RESET2;
} else {
use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET;
if (use_doorbell) {
dev_warn(&pdev->dev, "Soft reset not supported. "
"Firmware update is required.\n");
rc = -ENOTSUPP; /* try soft reset */
goto unmap_cfgtable;
}
}
rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell);
if (rc)
goto unmap_cfgtable;
pci_restore_state(pdev);
rc = pci_enable_device(pdev);
if (rc) {
dev_warn(&pdev->dev, "failed to enable device.\n");
goto unmap_cfgtable;
}
pci_write_config_word(pdev, 4, command_register);
/* Some devices (notably the HP Smart Array 5i Controller)
need a little pause here */
msleep(HPSA_POST_RESET_PAUSE_MSECS);
/* Wait for board to become not ready, then ready. */
dev_info(&pdev->dev, "Waiting for board to reset.\n");
rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_NOT_READY);
if (rc) {
dev_warn(&pdev->dev,
"failed waiting for board to reset."
" Will try soft reset.\n");
rc = -ENOTSUPP; /* Not expected, but try soft reset later */
goto unmap_cfgtable;
}
rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_READY);
if (rc) {
dev_warn(&pdev->dev,
"failed waiting for board to become ready "
"after hard reset\n");
goto unmap_cfgtable;
}
rc = controller_reset_failed(vaddr);
if (rc < 0)
goto unmap_cfgtable;
if (rc) {
dev_warn(&pdev->dev, "Unable to successfully reset "
"controller. Will try soft reset.\n");
rc = -ENOTSUPP;
} else {
dev_info(&pdev->dev, "board ready after hard reset.\n");
}
unmap_cfgtable:
iounmap(cfgtable);
unmap_vaddr:
iounmap(vaddr);
return rc;
}
/*
* We cannot read the structure directly, for portability we must use
* the io functions.
* This is for debug only.
*/
static void print_cfg_table(struct device *dev, struct CfgTable *tb)
{
#ifdef HPSA_DEBUG
int i;
char temp_name[17];
dev_info(dev, "Controller Configuration information\n");
dev_info(dev, "------------------------------------\n");
for (i = 0; i < 4; i++)
temp_name[i] = readb(&(tb->Signature[i]));
temp_name[4] = '\0';
dev_info(dev, " Signature = %s\n", temp_name);
dev_info(dev, " Spec Number = %d\n", readl(&(tb->SpecValence)));
dev_info(dev, " Transport methods supported = 0x%x\n",
readl(&(tb->TransportSupport)));
dev_info(dev, " Transport methods active = 0x%x\n",
readl(&(tb->TransportActive)));
dev_info(dev, " Requested transport Method = 0x%x\n",
readl(&(tb->HostWrite.TransportRequest)));
dev_info(dev, " Coalesce Interrupt Delay = 0x%x\n",
readl(&(tb->HostWrite.CoalIntDelay)));
dev_info(dev, " Coalesce Interrupt Count = 0x%x\n",
readl(&(tb->HostWrite.CoalIntCount)));
dev_info(dev, " Max outstanding commands = 0x%d\n",
readl(&(tb->CmdsOutMax)));
dev_info(dev, " Bus Types = 0x%x\n", readl(&(tb->BusTypes)));
for (i = 0; i < 16; i++)
temp_name[i] = readb(&(tb->ServerName[i]));
temp_name[16] = '\0';
dev_info(dev, " Server Name = %s\n", temp_name);
dev_info(dev, " Heartbeat Counter = 0x%x\n\n\n",
readl(&(tb->HeartBeat)));
#endif /* HPSA_DEBUG */
}
static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
{
int i, offset, mem_type, bar_type;
if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */
return 0;
offset = 0;
for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE;
if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
offset += 4;
else {
mem_type = pci_resource_flags(pdev, i) &
PCI_BASE_ADDRESS_MEM_TYPE_MASK;
switch (mem_type) {
case PCI_BASE_ADDRESS_MEM_TYPE_32:
case PCI_BASE_ADDRESS_MEM_TYPE_1M:
offset += 4; /* 32 bit */
break;
case PCI_BASE_ADDRESS_MEM_TYPE_64:
offset += 8;
break;
default: /* reserved in PCI 2.2 */
dev_warn(&pdev->dev,
"base address is invalid\n");
return -1;
break;
}
}
if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
return i + 1;
}
return -1;
}
/* If MSI/MSI-X is supported by the kernel we will try to enable it on
* controllers that are capable. If not, we use IO-APIC mode.
*/
static void __devinit hpsa_interrupt_mode(struct ctlr_info *h)
{
#ifdef CONFIG_PCI_MSI
int err;
struct msix_entry hpsa_msix_entries[4] = { {0, 0}, {0, 1},
{0, 2}, {0, 3}
};
/* Some boards advertise MSI but don't really support it */
if ((h->board_id == 0x40700E11) || (h->board_id == 0x40800E11) ||
(h->board_id == 0x40820E11) || (h->board_id == 0x40830E11))
goto default_int_mode;
if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) {
dev_info(&h->pdev->dev, "MSIX\n");
err = pci_enable_msix(h->pdev, hpsa_msix_entries, 4);
if (!err) {
h->intr[0] = hpsa_msix_entries[0].vector;
h->intr[1] = hpsa_msix_entries[1].vector;
h->intr[2] = hpsa_msix_entries[2].vector;
h->intr[3] = hpsa_msix_entries[3].vector;
h->msix_vector = 1;
return;
}
if (err > 0) {
dev_warn(&h->pdev->dev, "only %d MSI-X vectors "
"available\n", err);
goto default_int_mode;
} else {
dev_warn(&h->pdev->dev, "MSI-X init failed %d\n",
err);
goto default_int_mode;
}
}
if (pci_find_capability(h->pdev, PCI_CAP_ID_MSI)) {
dev_info(&h->pdev->dev, "MSI\n");
if (!pci_enable_msi(h->pdev))
h->msi_vector = 1;
else
dev_warn(&h->pdev->dev, "MSI init failed\n");
}
default_int_mode:
#endif /* CONFIG_PCI_MSI */
/* if we get here we're going to use the default interrupt mode */
h->intr[h->intr_mode] = h->pdev->irq;
}
static int __devinit hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id)
{
int i;
u32 subsystem_vendor_id, subsystem_device_id;
subsystem_vendor_id = pdev->subsystem_vendor;
subsystem_device_id = pdev->subsystem_device;
*board_id = ((subsystem_device_id << 16) & 0xffff0000) |
subsystem_vendor_id;
for (i = 0; i < ARRAY_SIZE(products); i++)
if (*board_id == products[i].board_id)
return i;
if ((subsystem_vendor_id != PCI_VENDOR_ID_HP &&
subsystem_vendor_id != PCI_VENDOR_ID_COMPAQ) ||
!hpsa_allow_any) {
dev_warn(&pdev->dev, "unrecognized board ID: "
"0x%08x, ignoring.\n", *board_id);
return -ENODEV;
}
return ARRAY_SIZE(products) - 1; /* generic unknown smart array */
}
static inline bool hpsa_board_disabled(struct pci_dev *pdev)
{
u16 command;
(void) pci_read_config_word(pdev, PCI_COMMAND, &command);
return ((command & PCI_COMMAND_MEMORY) == 0);
}
static int __devinit hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
unsigned long *memory_bar)
{
int i;
for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
/* addressing mode bits already removed */
*memory_bar = pci_resource_start(pdev, i);
dev_dbg(&pdev->dev, "memory BAR = %lx\n",
*memory_bar);
return 0;
}
dev_warn(&pdev->dev, "no memory BAR found\n");
return -ENODEV;
}
static int __devinit hpsa_wait_for_board_state(struct pci_dev *pdev,
void __iomem *vaddr, int wait_for_ready)
{
int i, iterations;
u32 scratchpad;
if (wait_for_ready)
iterations = HPSA_BOARD_READY_ITERATIONS;
else
iterations = HPSA_BOARD_NOT_READY_ITERATIONS;
for (i = 0; i < iterations; i++) {
scratchpad = readl(vaddr + SA5_SCRATCHPAD_OFFSET);
if (wait_for_ready) {
if (scratchpad == HPSA_FIRMWARE_READY)
return 0;
} else {
if (scratchpad != HPSA_FIRMWARE_READY)
return 0;
}
msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS);
}
dev_warn(&pdev->dev, "board not ready, timed out.\n");
return -ENODEV;
}
static int __devinit hpsa_find_cfg_addrs(struct pci_dev *pdev,
void __iomem *vaddr, u32 *cfg_base_addr, u64 *cfg_base_addr_index,
u64 *cfg_offset)
{
*cfg_base_addr = readl(vaddr + SA5_CTCFG_OFFSET);
*cfg_offset = readl(vaddr + SA5_CTMEM_OFFSET);
*cfg_base_addr &= (u32) 0x0000ffff;
*cfg_base_addr_index = find_PCI_BAR_index(pdev, *cfg_base_addr);
if (*cfg_base_addr_index == -1) {
dev_warn(&pdev->dev, "cannot find cfg_base_addr_index\n");
return -ENODEV;
}
return 0;
}
static int __devinit hpsa_find_cfgtables(struct ctlr_info *h)
{
u64 cfg_offset;
u32 cfg_base_addr;
u64 cfg_base_addr_index;
u32 trans_offset;
int rc;
rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
&cfg_base_addr_index, &cfg_offset);
if (rc)
return rc;
h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev,
cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable));
if (!h->cfgtable)
return -ENOMEM;
rc = write_driver_ver_to_cfgtable(h->cfgtable);
if (rc)
return rc;
/* Find performant mode table. */
trans_offset = readl(&h->cfgtable->TransMethodOffset);
h->transtable = remap_pci_mem(pci_resource_start(h->pdev,
cfg_base_addr_index)+cfg_offset+trans_offset,
sizeof(*h->transtable));
if (!h->transtable)
return -ENOMEM;
return 0;
}
static void __devinit hpsa_get_max_perf_mode_cmds(struct ctlr_info *h)
{
h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands));
/* Limit commands in memory limited kdump scenario. */
if (reset_devices && h->max_commands > 32)
h->max_commands = 32;
if (h->max_commands < 16) {
dev_warn(&h->pdev->dev, "Controller reports "
"max supported commands of %d, an obvious lie. "
"Using 16. Ensure that firmware is up to date.\n",
h->max_commands);
h->max_commands = 16;
}
}
/* Interrogate the hardware for some limits:
* max commands, max SG elements without chaining, and with chaining,
* SG chain block size, etc.
*/
static void __devinit hpsa_find_board_params(struct ctlr_info *h)
{
hpsa_get_max_perf_mode_cmds(h);
h->nr_cmds = h->max_commands - 4; /* Allow room for some ioctls */
h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements));
/*
* Limit in-command s/g elements to 32 save dma'able memory.
* Howvever spec says if 0, use 31
*/
h->max_cmd_sg_entries = 31;
if (h->maxsgentries > 512) {
h->max_cmd_sg_entries = 32;
h->chainsize = h->maxsgentries - h->max_cmd_sg_entries + 1;
h->maxsgentries--; /* save one for chain pointer */
} else {
h->maxsgentries = 31; /* default to traditional values */
h->chainsize = 0;
}
}
static inline bool hpsa_CISS_signature_present(struct ctlr_info *h)
{
if ((readb(&h->cfgtable->Signature[0]) != 'C') ||
(readb(&h->cfgtable->Signature[1]) != 'I') ||
(readb(&h->cfgtable->Signature[2]) != 'S') ||
(readb(&h->cfgtable->Signature[3]) != 'S')) {
dev_warn(&h->pdev->dev, "not a valid CISS config table\n");
return false;
}
return true;
}
/* Need to enable prefetch in the SCSI core for 6400 in x86 */
static inline void hpsa_enable_scsi_prefetch(struct ctlr_info *h)
{
#ifdef CONFIG_X86
u32 prefetch;
prefetch = readl(&(h->cfgtable->SCSI_Prefetch));
prefetch |= 0x100;
writel(prefetch, &(h->cfgtable->SCSI_Prefetch));
#endif
}
/* Disable DMA prefetch for the P600. Otherwise an ASIC bug may result
* in a prefetch beyond physical memory.
*/
static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info *h)
{
u32 dma_prefetch;
if (h->board_id != 0x3225103C)
return;
dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG);
dma_prefetch |= 0x8000;
writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG);
}
static void __devinit hpsa_wait_for_mode_change_ack(struct ctlr_info *h)
{
int i;
u32 doorbell_value;
unsigned long flags;
/* under certain very rare conditions, this can take awhile.
* (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
* as we enter this code.)
*/
for (i = 0; i < MAX_CONFIG_WAIT; i++) {
spin_lock_irqsave(&h->lock, flags);
doorbell_value = readl(h->vaddr + SA5_DOORBELL);
spin_unlock_irqrestore(&h->lock, flags);
if (!(doorbell_value & CFGTBL_ChangeReq))
break;
/* delay and try again */
usleep_range(10000, 20000);
}
}
static int __devinit hpsa_enter_simple_mode(struct ctlr_info *h)
{
u32 trans_support;
trans_support = readl(&(h->cfgtable->TransportSupport));
if (!(trans_support & SIMPLE_MODE))
return -ENOTSUPP;
h->max_commands = readl(&(h->cfgtable->CmdsOutMax));
/* Update the field, and then ring the doorbell */
writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest));
writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
hpsa_wait_for_mode_change_ack(h);
print_cfg_table(&h->pdev->dev, h->cfgtable);
if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) {
dev_warn(&h->pdev->dev,
"unable to get board into simple mode\n");
return -ENODEV;
}
h->transMethod = CFGTBL_Trans_Simple;
return 0;
}
static int __devinit hpsa_pci_init(struct ctlr_info *h)
{
int prod_index, err;
prod_index = hpsa_lookup_board_id(h->pdev, &h->board_id);
if (prod_index < 0)
return -ENODEV;
h->product_name = products[prod_index].product_name;
h->access = *(products[prod_index].access);
if (hpsa_board_disabled(h->pdev)) {
dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
return -ENODEV;
}
pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
err = pci_enable_device(h->pdev);
if (err) {
dev_warn(&h->pdev->dev, "unable to enable PCI device\n");
return err;
}
err = pci_request_regions(h->pdev, HPSA);
if (err) {
dev_err(&h->pdev->dev,
"cannot obtain PCI resources, aborting\n");
return err;
}
hpsa_interrupt_mode(h);
err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr);
if (err)
goto err_out_free_res;
h->vaddr = remap_pci_mem(h->paddr, 0x250);
if (!h->vaddr) {
err = -ENOMEM;
goto err_out_free_res;
}
err = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
if (err)
goto err_out_free_res;
err = hpsa_find_cfgtables(h);
if (err)
goto err_out_free_res;
hpsa_find_board_params(h);
if (!hpsa_CISS_signature_present(h)) {
err = -ENODEV;
goto err_out_free_res;
}
hpsa_enable_scsi_prefetch(h);
hpsa_p600_dma_prefetch_quirk(h);
err = hpsa_enter_simple_mode(h);
if (err)
goto err_out_free_res;
return 0;
err_out_free_res:
if (h->transtable)
iounmap(h->transtable);
if (h->cfgtable)
iounmap(h->cfgtable);
if (h->vaddr)
iounmap(h->vaddr);
/*
* Deliberately omit pci_disable_device(): it does something nasty to
* Smart Array controllers that pci_enable_device does not undo
*/
pci_release_regions(h->pdev);
return err;
}
static void __devinit hpsa_hba_inquiry(struct ctlr_info *h)
{
int rc;
#define HBA_INQUIRY_BYTE_COUNT 64
h->hba_inquiry_data = kmalloc(HBA_INQUIRY_BYTE_COUNT, GFP_KERNEL);
if (!h->hba_inquiry_data)
return;
rc = hpsa_scsi_do_inquiry(h, RAID_CTLR_LUNID, 0,
h->hba_inquiry_data, HBA_INQUIRY_BYTE_COUNT);
if (rc != 0) {
kfree(h->hba_inquiry_data);
h->hba_inquiry_data = NULL;
}
}
static __devinit int hpsa_init_reset_devices(struct pci_dev *pdev)
{
int rc, i;
if (!reset_devices)
return 0;
/* Reset the controller with a PCI power-cycle or via doorbell */
rc = hpsa_kdump_hard_reset_controller(pdev);
/* -ENOTSUPP here means we cannot reset the controller
* but it's already (and still) up and running in
* "performant mode". Or, it might be 640x, which can't reset
* due to concerns about shared bbwc between 6402/6404 pair.
*/
if (rc == -ENOTSUPP)
return rc; /* just try to do the kdump anyhow. */
if (rc)
return -ENODEV;
/* Now try to get the controller to respond to a no-op */
dev_warn(&pdev->dev, "Waiting for controller to respond to no-op\n");
for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) {
if (hpsa_noop(pdev) == 0)
break;
else
dev_warn(&pdev->dev, "no-op failed%s\n",
(i < 11 ? "; re-trying" : ""));
}
return 0;
}
static __devinit int hpsa_allocate_cmd_pool(struct ctlr_info *h)
{
h->cmd_pool_bits = kzalloc(
DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG) *
sizeof(unsigned long), GFP_KERNEL);
h->cmd_pool = pci_alloc_consistent(h->pdev,
h->nr_cmds * sizeof(*h->cmd_pool),
&(h->cmd_pool_dhandle));
h->errinfo_pool = pci_alloc_consistent(h->pdev,
h->nr_cmds * sizeof(*h->errinfo_pool),
&(h->errinfo_pool_dhandle));
if ((h->cmd_pool_bits == NULL)
|| (h->cmd_pool == NULL)
|| (h->errinfo_pool == NULL)) {
dev_err(&h->pdev->dev, "out of memory in %s", __func__);
return -ENOMEM;
}
return 0;
}
static void hpsa_free_cmd_pool(struct ctlr_info *h)
{
kfree(h->cmd_pool_bits);
if (h->cmd_pool)
pci_free_consistent(h->pdev,
h->nr_cmds * sizeof(struct CommandList),
h->cmd_pool, h->cmd_pool_dhandle);
if (h->errinfo_pool)
pci_free_consistent(h->pdev,
h->nr_cmds * sizeof(struct ErrorInfo),
h->errinfo_pool,
h->errinfo_pool_dhandle);
}
static int hpsa_request_irq(struct ctlr_info *h,
irqreturn_t (*msixhandler)(int, void *),
irqreturn_t (*intxhandler)(int, void *))
{
int rc;
if (h->msix_vector || h->msi_vector)
rc = request_irq(h->intr[h->intr_mode], msixhandler,
0, h->devname, h);
else
rc = request_irq(h->intr[h->intr_mode], intxhandler,
IRQF_SHARED, h->devname, h);
if (rc) {
dev_err(&h->pdev->dev, "unable to get irq %d for %s\n",
h->intr[h->intr_mode], h->devname);
return -ENODEV;
}
return 0;
}
static int __devinit hpsa_kdump_soft_reset(struct ctlr_info *h)
{
if (hpsa_send_host_reset(h, RAID_CTLR_LUNID,
HPSA_RESET_TYPE_CONTROLLER)) {
dev_warn(&h->pdev->dev, "Resetting array controller failed.\n");
return -EIO;
}
dev_info(&h->pdev->dev, "Waiting for board to soft reset.\n");
if (hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_NOT_READY)) {
dev_warn(&h->pdev->dev, "Soft reset had no effect.\n");
return -1;
}
dev_info(&h->pdev->dev, "Board reset, awaiting READY status.\n");
if (hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY)) {
dev_warn(&h->pdev->dev, "Board failed to become ready "
"after soft reset.\n");
return -1;
}
return 0;
}
static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h)
{
free_irq(h->intr[h->intr_mode], h);
#ifdef CONFIG_PCI_MSI
if (h->msix_vector)
pci_disable_msix(h->pdev);
else if (h->msi_vector)
pci_disable_msi(h->pdev);
#endif /* CONFIG_PCI_MSI */
hpsa_free_sg_chain_blocks(h);
hpsa_free_cmd_pool(h);
kfree(h->blockFetchTable);
pci_free_consistent(h->pdev, h->reply_pool_size,
h->reply_pool, h->reply_pool_dhandle);
if (h->vaddr)
iounmap(h->vaddr);
if (h->transtable)
iounmap(h->transtable);
if (h->cfgtable)
iounmap(h->cfgtable);
pci_release_regions(h->pdev);
kfree(h);
}
static void remove_ctlr_from_lockup_detector_list(struct ctlr_info *h)
{
assert_spin_locked(&lockup_detector_lock);
if (!hpsa_lockup_detector)
return;
if (h->lockup_detected)
return; /* already stopped the lockup detector */
list_del(&h->lockup_list);
}
/* Called when controller lockup detected. */
static void fail_all_cmds_on_list(struct ctlr_info *h, struct list_head *list)
{
struct CommandList *c = NULL;
assert_spin_locked(&h->lock);
/* Mark all outstanding commands as failed and complete them. */
while (!list_empty(list)) {
c = list_entry(list->next, struct CommandList, list);
c->err_info->CommandStatus = CMD_HARDWARE_ERR;
finish_cmd(c, c->Header.Tag.lower);
}
}
static void controller_lockup_detected(struct ctlr_info *h)
{
unsigned long flags;
assert_spin_locked(&lockup_detector_lock);
remove_ctlr_from_lockup_detector_list(h);
h->access.set_intr_mask(h, HPSA_INTR_OFF);
spin_lock_irqsave(&h->lock, flags);
h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
spin_unlock_irqrestore(&h->lock, flags);
dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x\n",
h->lockup_detected);
pci_disable_device(h->pdev);
spin_lock_irqsave(&h->lock, flags);
fail_all_cmds_on_list(h, &h->cmpQ);
fail_all_cmds_on_list(h, &h->reqQ);
spin_unlock_irqrestore(&h->lock, flags);
}
#define HEARTBEAT_SAMPLE_INTERVAL (10 * HZ)
#define HEARTBEAT_CHECK_MINIMUM_INTERVAL (HEARTBEAT_SAMPLE_INTERVAL / 2)
static void detect_controller_lockup(struct ctlr_info *h)
{
u64 now;
u32 heartbeat;
unsigned long flags;
assert_spin_locked(&lockup_detector_lock);
now = get_jiffies_64();
/* If we've received an interrupt recently, we're ok. */
if (time_after64(h->last_intr_timestamp +
(HEARTBEAT_CHECK_MINIMUM_INTERVAL), now))
return;
/*
* If we've already checked the heartbeat recently, we're ok.
* This could happen if someone sends us a signal. We
* otherwise don't care about signals in this thread.
*/
if (time_after64(h->last_heartbeat_timestamp +
(HEARTBEAT_CHECK_MINIMUM_INTERVAL), now))
return;
/* If heartbeat has not changed since we last looked, we're not ok. */
spin_lock_irqsave(&h->lock, flags);
heartbeat = readl(&h->cfgtable->HeartBeat);
spin_unlock_irqrestore(&h->lock, flags);
if (h->last_heartbeat == heartbeat) {
controller_lockup_detected(h);
return;
}
/* We're ok. */
h->last_heartbeat = heartbeat;
h->last_heartbeat_timestamp = now;
}
static int detect_controller_lockup_thread(void *notused)
{
struct ctlr_info *h;
unsigned long flags;
while (1) {
struct list_head *this, *tmp;
schedule_timeout_interruptible(HEARTBEAT_SAMPLE_INTERVAL);
if (kthread_should_stop())
break;
spin_lock_irqsave(&lockup_detector_lock, flags);
list_for_each_safe(this, tmp, &hpsa_ctlr_list) {
h = list_entry(this, struct ctlr_info, lockup_list);
detect_controller_lockup(h);
}
spin_unlock_irqrestore(&lockup_detector_lock, flags);
}
return 0;
}
static void add_ctlr_to_lockup_detector_list(struct ctlr_info *h)
{
unsigned long flags;
spin_lock_irqsave(&lockup_detector_lock, flags);
list_add_tail(&h->lockup_list, &hpsa_ctlr_list);
spin_unlock_irqrestore(&lockup_detector_lock, flags);
}
static void start_controller_lockup_detector(struct ctlr_info *h)
{
/* Start the lockup detector thread if not already started */
if (!hpsa_lockup_detector) {
spin_lock_init(&lockup_detector_lock);
hpsa_lockup_detector =
kthread_run(detect_controller_lockup_thread,
NULL, HPSA);
}
if (!hpsa_lockup_detector) {
dev_warn(&h->pdev->dev,
"Could not start lockup detector thread\n");
return;
}
add_ctlr_to_lockup_detector_list(h);
}
static void stop_controller_lockup_detector(struct ctlr_info *h)
{
unsigned long flags;
spin_lock_irqsave(&lockup_detector_lock, flags);
remove_ctlr_from_lockup_detector_list(h);
/* If the list of ctlr's to monitor is empty, stop the thread */
if (list_empty(&hpsa_ctlr_list)) {
spin_unlock_irqrestore(&lockup_detector_lock, flags);
kthread_stop(hpsa_lockup_detector);
spin_lock_irqsave(&lockup_detector_lock, flags);
hpsa_lockup_detector = NULL;
}
spin_unlock_irqrestore(&lockup_detector_lock, flags);
}
static int __devinit hpsa_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
int dac, rc;
struct ctlr_info *h;
int try_soft_reset = 0;
unsigned long flags;
if (number_of_controllers == 0)
printk(KERN_INFO DRIVER_NAME "\n");
rc = hpsa_init_reset_devices(pdev);
if (rc) {
if (rc != -ENOTSUPP)
return rc;
/* If the reset fails in a particular way (it has no way to do
* a proper hard reset, so returns -ENOTSUPP) we can try to do
* a soft reset once we get the controller configured up to the
* point that it can accept a command.
*/
try_soft_reset = 1;
rc = 0;
}
reinit_after_soft_reset:
/* Command structures must be aligned on a 32-byte boundary because
* the 5 lower bits of the address are used by the hardware. and by
* the driver. See comments in hpsa.h for more info.
*/
#define COMMANDLIST_ALIGNMENT 32
BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT);
h = kzalloc(sizeof(*h), GFP_KERNEL);
if (!h)
return -ENOMEM;
h->pdev = pdev;
h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT;
INIT_LIST_HEAD(&h->cmpQ);
INIT_LIST_HEAD(&h->reqQ);
spin_lock_init(&h->lock);
spin_lock_init(&h->scan_lock);
rc = hpsa_pci_init(h);
if (rc != 0)
goto clean1;
sprintf(h->devname, HPSA "%d", number_of_controllers);
h->ctlr = number_of_controllers;
number_of_controllers++;
/* configure PCI DMA stuff */
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
if (rc == 0) {
dac = 1;
} else {
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc == 0) {
dac = 0;
} else {
dev_err(&pdev->dev, "no suitable DMA available\n");
goto clean1;
}
}
/* make sure the board interrupts are off */
h->access.set_intr_mask(h, HPSA_INTR_OFF);
if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
goto clean2;
dev_info(&pdev->dev, "%s: <0x%x> at IRQ %d%s using DAC\n",
h->devname, pdev->device,
h->intr[h->intr_mode], dac ? "" : " not");
if (hpsa_allocate_cmd_pool(h))
goto clean4;
if (hpsa_allocate_sg_chain_blocks(h))
goto clean4;
init_waitqueue_head(&h->scan_wait_queue);
h->scan_finished = 1; /* no scan currently in progress */
pci_set_drvdata(pdev, h);
h->ndevices = 0;
h->scsi_host = NULL;
spin_lock_init(&h->devlock);
hpsa_put_ctlr_into_performant_mode(h);
/* At this point, the controller is ready to take commands.
* Now, if reset_devices and the hard reset didn't work, try
* the soft reset and see if that works.
*/
if (try_soft_reset) {
/* This is kind of gross. We may or may not get a completion
* from the soft reset command, and if we do, then the value
* from the fifo may or may not be valid. So, we wait 10 secs
* after the reset throwing away any completions we get during
* that time. Unregister the interrupt handler and register
* fake ones to scoop up any residual completions.
*/
spin_lock_irqsave(&h->lock, flags);
h->access.set_intr_mask(h, HPSA_INTR_OFF);
spin_unlock_irqrestore(&h->lock, flags);
free_irq(h->intr[h->intr_mode], h);
rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
hpsa_intx_discard_completions);
if (rc) {
dev_warn(&h->pdev->dev, "Failed to request_irq after "
"soft reset.\n");
goto clean4;
}
rc = hpsa_kdump_soft_reset(h);
if (rc)
/* Neither hard nor soft reset worked, we're hosed. */
goto clean4;
dev_info(&h->pdev->dev, "Board READY.\n");
dev_info(&h->pdev->dev,
"Waiting for stale completions to drain.\n");
h->access.set_intr_mask(h, HPSA_INTR_ON);
msleep(10000);
h->access.set_intr_mask(h, HPSA_INTR_OFF);
rc = controller_reset_failed(h->cfgtable);
if (rc)
dev_info(&h->pdev->dev,
"Soft reset appears to have failed.\n");
/* since the controller's reset, we have to go back and re-init
* everything. Easiest to just forget what we've done and do it
* all over again.
*/
hpsa_undo_allocations_after_kdump_soft_reset(h);
try_soft_reset = 0;
if (rc)
/* don't go to clean4, we already unallocated */
return -ENODEV;
goto reinit_after_soft_reset;
}
/* Turn the interrupts on so we can service requests */
h->access.set_intr_mask(h, HPSA_INTR_ON);
hpsa_hba_inquiry(h);
hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
start_controller_lockup_detector(h);
return 1;
clean4:
hpsa_free_sg_chain_blocks(h);
hpsa_free_cmd_pool(h);
free_irq(h->intr[h->intr_mode], h);
clean2:
clean1:
kfree(h);
return rc;
}
static void hpsa_flush_cache(struct ctlr_info *h)
{
char *flush_buf;
struct CommandList *c;
flush_buf = kzalloc(4, GFP_KERNEL);
if (!flush_buf)
return;
c = cmd_special_alloc(h);
if (!c) {
dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
goto out_of_memory;
}
fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0,
RAID_CTLR_LUNID, TYPE_CMD);
hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_TODEVICE);
if (c->err_info->CommandStatus != 0)
dev_warn(&h->pdev->dev,
"error flushing cache on controller\n");
cmd_special_free(h, c);
out_of_memory:
kfree(flush_buf);
}
static void hpsa_shutdown(struct pci_dev *pdev)
{
struct ctlr_info *h;
h = pci_get_drvdata(pdev);
/* Turn board interrupts off and send the flush cache command
* sendcmd will turn off interrupt, and send the flush...
* To write all data in the battery backed cache to disks
*/
hpsa_flush_cache(h);
h->access.set_intr_mask(h, HPSA_INTR_OFF);
free_irq(h->intr[h->intr_mode], h);
#ifdef CONFIG_PCI_MSI
if (h->msix_vector)
pci_disable_msix(h->pdev);
else if (h->msi_vector)
pci_disable_msi(h->pdev);
#endif /* CONFIG_PCI_MSI */
}
static void __devexit hpsa_free_device_info(struct ctlr_info *h)
{
int i;
for (i = 0; i < h->ndevices; i++)
kfree(h->dev[i]);
}
static void __devexit hpsa_remove_one(struct pci_dev *pdev)
{
struct ctlr_info *h;
if (pci_get_drvdata(pdev) == NULL) {
dev_err(&pdev->dev, "unable to remove device\n");
return;
}
h = pci_get_drvdata(pdev);
stop_controller_lockup_detector(h);
hpsa_unregister_scsi(h); /* unhook from SCSI subsystem */
hpsa_shutdown(pdev);
iounmap(h->vaddr);
iounmap(h->transtable);
iounmap(h->cfgtable);
hpsa_free_device_info(h);
hpsa_free_sg_chain_blocks(h);
pci_free_consistent(h->pdev,
h->nr_cmds * sizeof(struct CommandList),
h->cmd_pool, h->cmd_pool_dhandle);
pci_free_consistent(h->pdev,
h->nr_cmds * sizeof(struct ErrorInfo),
h->errinfo_pool, h->errinfo_pool_dhandle);
pci_free_consistent(h->pdev, h->reply_pool_size,
h->reply_pool, h->reply_pool_dhandle);
kfree(h->cmd_pool_bits);
kfree(h->blockFetchTable);
kfree(h->hba_inquiry_data);
/*
* Deliberately omit pci_disable_device(): it does something nasty to
* Smart Array controllers that pci_enable_device does not undo
*/
pci_release_regions(pdev);
pci_set_drvdata(pdev, NULL);
kfree(h);
}
static int hpsa_suspend(__attribute__((unused)) struct pci_dev *pdev,
__attribute__((unused)) pm_message_t state)
{
return -ENOSYS;
}
static int hpsa_resume(__attribute__((unused)) struct pci_dev *pdev)
{
return -ENOSYS;
}
static struct pci_driver hpsa_pci_driver = {
.name = HPSA,
.probe = hpsa_init_one,
.remove = __devexit_p(hpsa_remove_one),
.id_table = hpsa_pci_device_id, /* id_table */
.shutdown = hpsa_shutdown,
.suspend = hpsa_suspend,
.resume = hpsa_resume,
};
/* Fill in bucket_map[], given nsgs (the max number of
* scatter gather elements supported) and bucket[],
* which is an array of 8 integers. The bucket[] array
* contains 8 different DMA transfer sizes (in 16
* byte increments) which the controller uses to fetch
* commands. This function fills in bucket_map[], which
* maps a given number of scatter gather elements to one of
* the 8 DMA transfer sizes. The point of it is to allow the
* controller to only do as much DMA as needed to fetch the
* command, with the DMA transfer size encoded in the lower
* bits of the command address.
*/
static void calc_bucket_map(int bucket[], int num_buckets,
int nsgs, int *bucket_map)
{
int i, j, b, size;
/* even a command with 0 SGs requires 4 blocks */
#define MINIMUM_TRANSFER_BLOCKS 4
#define NUM_BUCKETS 8
/* Note, bucket_map must have nsgs+1 entries. */
for (i = 0; i <= nsgs; i++) {
/* Compute size of a command with i SG entries */
size = i + MINIMUM_TRANSFER_BLOCKS;
b = num_buckets; /* Assume the biggest bucket */
/* Find the bucket that is just big enough */
for (j = 0; j < 8; j++) {
if (bucket[j] >= size) {
b = j;
break;
}
}
/* for a command with i SG entries, use bucket b. */
bucket_map[i] = b;
}
}
static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h,
u32 use_short_tags)
{
int i;
unsigned long register_value;
/* This is a bit complicated. There are 8 registers on
* the controller which we write to to tell it 8 different
* sizes of commands which there may be. It's a way of
* reducing the DMA done to fetch each command. Encoded into
* each command's tag are 3 bits which communicate to the controller
* which of the eight sizes that command fits within. The size of
* each command depends on how many scatter gather entries there are.
* Each SG entry requires 16 bytes. The eight registers are programmed
* with the number of 16-byte blocks a command of that size requires.
* The smallest command possible requires 5 such 16 byte blocks.
* the largest command possible requires SG_ENTRIES_IN_CMD + 4 16-byte
* blocks. Note, this only extends to the SG entries contained
* within the command block, and does not extend to chained blocks
* of SG elements. bft[] contains the eight values we write to
* the registers. They are not evenly distributed, but have more
* sizes for small commands, and fewer sizes for larger commands.
*/
int bft[8] = {5, 6, 8, 10, 12, 20, 28, SG_ENTRIES_IN_CMD + 4};
BUILD_BUG_ON(28 > SG_ENTRIES_IN_CMD + 4);
/* 5 = 1 s/g entry or 4k
* 6 = 2 s/g entry or 8k
* 8 = 4 s/g entry or 16k
* 10 = 6 s/g entry or 24k
*/
h->reply_pool_wraparound = 1; /* spec: init to 1 */
/* Controller spec: zero out this buffer. */
memset(h->reply_pool, 0, h->reply_pool_size);
h->reply_pool_head = h->reply_pool;
bft[7] = SG_ENTRIES_IN_CMD + 4;
calc_bucket_map(bft, ARRAY_SIZE(bft),
SG_ENTRIES_IN_CMD, h->blockFetchTable);
for (i = 0; i < 8; i++)
writel(bft[i], &h->transtable->BlockFetch[i]);
/* size of controller ring buffer */
writel(h->max_commands, &h->transtable->RepQSize);
writel(1, &h->transtable->RepQCount);
writel(0, &h->transtable->RepQCtrAddrLow32);
writel(0, &h->transtable->RepQCtrAddrHigh32);
writel(h->reply_pool_dhandle, &h->transtable->RepQAddr0Low32);
writel(0, &h->transtable->RepQAddr0High32);
writel(CFGTBL_Trans_Performant | use_short_tags,
&(h->cfgtable->HostWrite.TransportRequest));
writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
hpsa_wait_for_mode_change_ack(h);
register_value = readl(&(h->cfgtable->TransportActive));
if (!(register_value & CFGTBL_Trans_Performant)) {
dev_warn(&h->pdev->dev, "unable to get board into"
" performant mode\n");
return;
}
/* Change the access methods to the performant access methods */
h->access = SA5_performant_access;
h->transMethod = CFGTBL_Trans_Performant;
}
static __devinit void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
{
u32 trans_support;
if (hpsa_simple_mode)
return;
trans_support = readl(&(h->cfgtable->TransportSupport));
if (!(trans_support & PERFORMANT_MODE))
return;
hpsa_get_max_perf_mode_cmds(h);
/* Performant mode ring buffer and supporting data structures */
h->reply_pool_size = h->max_commands * sizeof(u64);
h->reply_pool = pci_alloc_consistent(h->pdev, h->reply_pool_size,
&(h->reply_pool_dhandle));
/* Need a block fetch table for performant mode */
h->blockFetchTable = kmalloc(((SG_ENTRIES_IN_CMD + 1) *
sizeof(u32)), GFP_KERNEL);
if ((h->reply_pool == NULL)
|| (h->blockFetchTable == NULL))
goto clean_up;
hpsa_enter_performant_mode(h,
trans_support & CFGTBL_Trans_use_short_tags);
return;
clean_up:
if (h->reply_pool)
pci_free_consistent(h->pdev, h->reply_pool_size,
h->reply_pool, h->reply_pool_dhandle);
kfree(h->blockFetchTable);
}
/*
* This is it. Register the PCI driver information for the cards we control
* the OS will call our registered routines when it finds one of our cards.
*/
static int __init hpsa_init(void)
{
return pci_register_driver(&hpsa_pci_driver);
}
static void __exit hpsa_cleanup(void)
{
pci_unregister_driver(&hpsa_pci_driver);
}
module_init(hpsa_init);
module_exit(hpsa_cleanup);
| gpl-2.0 |
SmokyBob/android_kernel_asus_padfone2 | drivers/acpi/acpica/exfield.c | 3227 | 11584 | /******************************************************************************
*
* Module Name: exfield - ACPI AML (p-code) execution - field manipulation
*
*****************************************************************************/
/*
* Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* substantially similar to the "NO WARRANTY" disclaimer below
* ("Disclaimer") and any redistribution must be conditioned upon
* including a substantially similar Disclaimer requirement for further
* binary redistribution.
* 3. Neither the names of the above-listed copyright holders nor the names
* of any contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES.
*/
#include <acpi/acpi.h>
#include "accommon.h"
#include "acdispat.h"
#include "acinterp.h"
#define _COMPONENT ACPI_EXECUTER
ACPI_MODULE_NAME("exfield")
/*******************************************************************************
*
* FUNCTION: acpi_ex_read_data_from_field
*
* PARAMETERS: walk_state - Current execution state
* obj_desc - The named field
* ret_buffer_desc - Where the return data object is stored
*
* RETURN: Status
*
* DESCRIPTION: Read from a named field. Returns either an Integer or a
* Buffer, depending on the size of the field.
*
******************************************************************************/
acpi_status
acpi_ex_read_data_from_field(struct acpi_walk_state *walk_state,
union acpi_operand_object *obj_desc,
union acpi_operand_object **ret_buffer_desc)
{
acpi_status status;
union acpi_operand_object *buffer_desc;
acpi_size length;
void *buffer;
u32 function;
ACPI_FUNCTION_TRACE_PTR(ex_read_data_from_field, obj_desc);
/* Parameter validation */
if (!obj_desc) {
return_ACPI_STATUS(AE_AML_NO_OPERAND);
}
if (!ret_buffer_desc) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
if (obj_desc->common.type == ACPI_TYPE_BUFFER_FIELD) {
/*
* If the buffer_field arguments have not been previously evaluated,
* evaluate them now and save the results.
*/
if (!(obj_desc->common.flags & AOPOBJ_DATA_VALID)) {
status = acpi_ds_get_buffer_field_arguments(obj_desc);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
}
} else if ((obj_desc->common.type == ACPI_TYPE_LOCAL_REGION_FIELD) &&
(obj_desc->field.region_obj->region.space_id ==
ACPI_ADR_SPACE_SMBUS
|| obj_desc->field.region_obj->region.space_id ==
ACPI_ADR_SPACE_IPMI)) {
/*
* This is an SMBus or IPMI read. We must create a buffer to hold
* the data and then directly access the region handler.
*
* Note: Smbus protocol value is passed in upper 16-bits of Function
*/
if (obj_desc->field.region_obj->region.space_id ==
ACPI_ADR_SPACE_SMBUS) {
length = ACPI_SMBUS_BUFFER_SIZE;
function =
ACPI_READ | (obj_desc->field.attribute << 16);
} else { /* IPMI */
length = ACPI_IPMI_BUFFER_SIZE;
function = ACPI_READ;
}
buffer_desc = acpi_ut_create_buffer_object(length);
if (!buffer_desc) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
/* Lock entire transaction if requested */
acpi_ex_acquire_global_lock(obj_desc->common_field.field_flags);
/* Call the region handler for the read */
status = acpi_ex_access_region(obj_desc, 0,
ACPI_CAST_PTR(u64,
buffer_desc->
buffer.pointer),
function);
acpi_ex_release_global_lock(obj_desc->common_field.field_flags);
goto exit;
}
/*
* Allocate a buffer for the contents of the field.
*
* If the field is larger than the current integer width, create
* a BUFFER to hold it. Otherwise, use an INTEGER. This allows
* the use of arithmetic operators on the returned value if the
* field size is equal or smaller than an Integer.
*
* Note: Field.length is in bits.
*/
length =
(acpi_size) ACPI_ROUND_BITS_UP_TO_BYTES(obj_desc->field.bit_length);
if (length > acpi_gbl_integer_byte_width) {
/* Field is too large for an Integer, create a Buffer instead */
buffer_desc = acpi_ut_create_buffer_object(length);
if (!buffer_desc) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
buffer = buffer_desc->buffer.pointer;
} else {
/* Field will fit within an Integer (normal case) */
buffer_desc = acpi_ut_create_integer_object((u64) 0);
if (!buffer_desc) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
length = acpi_gbl_integer_byte_width;
buffer = &buffer_desc->integer.value;
}
ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
"FieldRead [TO]: Obj %p, Type %X, Buf %p, ByteLen %X\n",
obj_desc, obj_desc->common.type, buffer,
(u32) length));
ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
"FieldRead [FROM]: BitLen %X, BitOff %X, ByteOff %X\n",
obj_desc->common_field.bit_length,
obj_desc->common_field.start_field_bit_offset,
obj_desc->common_field.base_byte_offset));
/* Lock entire transaction if requested */
acpi_ex_acquire_global_lock(obj_desc->common_field.field_flags);
/* Read from the field */
status = acpi_ex_extract_from_field(obj_desc, buffer, (u32) length);
acpi_ex_release_global_lock(obj_desc->common_field.field_flags);
exit:
if (ACPI_FAILURE(status)) {
acpi_ut_remove_reference(buffer_desc);
} else {
*ret_buffer_desc = buffer_desc;
}
return_ACPI_STATUS(status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ex_write_data_to_field
*
* PARAMETERS: source_desc - Contains data to write
* obj_desc - The named field
* result_desc - Where the return value is returned, if any
*
* RETURN: Status
*
* DESCRIPTION: Write to a named field
*
******************************************************************************/
acpi_status
acpi_ex_write_data_to_field(union acpi_operand_object *source_desc,
union acpi_operand_object *obj_desc,
union acpi_operand_object **result_desc)
{
acpi_status status;
u32 length;
void *buffer;
union acpi_operand_object *buffer_desc;
u32 function;
ACPI_FUNCTION_TRACE_PTR(ex_write_data_to_field, obj_desc);
/* Parameter validation */
if (!source_desc || !obj_desc) {
return_ACPI_STATUS(AE_AML_NO_OPERAND);
}
if (obj_desc->common.type == ACPI_TYPE_BUFFER_FIELD) {
/*
* If the buffer_field arguments have not been previously evaluated,
* evaluate them now and save the results.
*/
if (!(obj_desc->common.flags & AOPOBJ_DATA_VALID)) {
status = acpi_ds_get_buffer_field_arguments(obj_desc);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
}
} else if ((obj_desc->common.type == ACPI_TYPE_LOCAL_REGION_FIELD) &&
(obj_desc->field.region_obj->region.space_id ==
ACPI_ADR_SPACE_SMBUS
|| obj_desc->field.region_obj->region.space_id ==
ACPI_ADR_SPACE_IPMI)) {
/*
* This is an SMBus or IPMI write. We will bypass the entire field
* mechanism and handoff the buffer directly to the handler. For
* these address spaces, the buffer is bi-directional; on a write,
* return data is returned in the same buffer.
*
* Source must be a buffer of sufficient size:
* ACPI_SMBUS_BUFFER_SIZE or ACPI_IPMI_BUFFER_SIZE.
*
* Note: SMBus protocol type is passed in upper 16-bits of Function
*/
if (source_desc->common.type != ACPI_TYPE_BUFFER) {
ACPI_ERROR((AE_INFO,
"SMBus or IPMI write requires Buffer, found type %s",
acpi_ut_get_object_type_name(source_desc)));
return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
}
if (obj_desc->field.region_obj->region.space_id ==
ACPI_ADR_SPACE_SMBUS) {
length = ACPI_SMBUS_BUFFER_SIZE;
function =
ACPI_WRITE | (obj_desc->field.attribute << 16);
} else { /* IPMI */
length = ACPI_IPMI_BUFFER_SIZE;
function = ACPI_WRITE;
}
if (source_desc->buffer.length < length) {
ACPI_ERROR((AE_INFO,
"SMBus or IPMI write requires Buffer of length %u, found length %u",
length, source_desc->buffer.length));
return_ACPI_STATUS(AE_AML_BUFFER_LIMIT);
}
/* Create the bi-directional buffer */
buffer_desc = acpi_ut_create_buffer_object(length);
if (!buffer_desc) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
buffer = buffer_desc->buffer.pointer;
ACPI_MEMCPY(buffer, source_desc->buffer.pointer, length);
/* Lock entire transaction if requested */
acpi_ex_acquire_global_lock(obj_desc->common_field.field_flags);
/*
* Perform the write (returns status and perhaps data in the
* same buffer)
*/
status = acpi_ex_access_region(obj_desc, 0,
(u64 *) buffer, function);
acpi_ex_release_global_lock(obj_desc->common_field.field_flags);
*result_desc = buffer_desc;
return_ACPI_STATUS(status);
}
/* Get a pointer to the data to be written */
switch (source_desc->common.type) {
case ACPI_TYPE_INTEGER:
buffer = &source_desc->integer.value;
length = sizeof(source_desc->integer.value);
break;
case ACPI_TYPE_BUFFER:
buffer = source_desc->buffer.pointer;
length = source_desc->buffer.length;
break;
case ACPI_TYPE_STRING:
buffer = source_desc->string.pointer;
length = source_desc->string.length;
break;
default:
return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
}
ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
"FieldWrite [FROM]: Obj %p (%s:%X), Buf %p, ByteLen %X\n",
source_desc,
acpi_ut_get_type_name(source_desc->common.type),
source_desc->common.type, buffer, length));
ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
"FieldWrite [TO]: Obj %p (%s:%X), BitLen %X, BitOff %X, ByteOff %X\n",
obj_desc,
acpi_ut_get_type_name(obj_desc->common.type),
obj_desc->common.type,
obj_desc->common_field.bit_length,
obj_desc->common_field.start_field_bit_offset,
obj_desc->common_field.base_byte_offset));
/* Lock entire transaction if requested */
acpi_ex_acquire_global_lock(obj_desc->common_field.field_flags);
/* Write to the field */
status = acpi_ex_insert_into_field(obj_desc, buffer, length);
acpi_ex_release_global_lock(obj_desc->common_field.field_flags);
return_ACPI_STATUS(status);
}
| gpl-2.0 |
jmztaylor/android_kernel_htc_operaul | fs/ncpfs/dir.c | 3995 | 33245 | /*
* dir.c
*
* Copyright (C) 1995, 1996 by Volker Lendecke
* Modified for big endian by J.F. Chadima and David S. Miller
* Modified 1997 Peter Waltenberg, Bill Hawes, David Woodhouse for 2.1 dcache
* Modified 1998, 1999 Wolfram Pienkoss for NLS
* Modified 1999 Wolfram Pienkoss for directory caching
* Modified 2000 Ben Harris, University of Cambridge for NFS NS meta-info
*
*/
#include <linux/time.h>
#include <linux/errno.h>
#include <linux/stat.h>
#include <linux/kernel.h>
#include <linux/vmalloc.h>
#include <linux/mm.h>
#include <linux/namei.h>
#include <asm/uaccess.h>
#include <asm/byteorder.h>
#include "ncp_fs.h"
static void ncp_read_volume_list(struct file *, void *, filldir_t,
struct ncp_cache_control *);
static void ncp_do_readdir(struct file *, void *, filldir_t,
struct ncp_cache_control *);
static int ncp_readdir(struct file *, void *, filldir_t);
static int ncp_create(struct inode *, struct dentry *, umode_t, struct nameidata *);
static struct dentry *ncp_lookup(struct inode *, struct dentry *, struct nameidata *);
static int ncp_unlink(struct inode *, struct dentry *);
static int ncp_mkdir(struct inode *, struct dentry *, umode_t);
static int ncp_rmdir(struct inode *, struct dentry *);
static int ncp_rename(struct inode *, struct dentry *,
struct inode *, struct dentry *);
static int ncp_mknod(struct inode * dir, struct dentry *dentry,
umode_t mode, dev_t rdev);
#if defined(CONFIG_NCPFS_EXTRAS) || defined(CONFIG_NCPFS_NFS_NS)
extern int ncp_symlink(struct inode *, struct dentry *, const char *);
#else
#define ncp_symlink NULL
#endif
const struct file_operations ncp_dir_operations =
{
.llseek = generic_file_llseek,
.read = generic_read_dir,
.readdir = ncp_readdir,
.unlocked_ioctl = ncp_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = ncp_compat_ioctl,
#endif
};
const struct inode_operations ncp_dir_inode_operations =
{
.create = ncp_create,
.lookup = ncp_lookup,
.unlink = ncp_unlink,
.symlink = ncp_symlink,
.mkdir = ncp_mkdir,
.rmdir = ncp_rmdir,
.mknod = ncp_mknod,
.rename = ncp_rename,
.setattr = ncp_notify_change,
};
/*
* Dentry operations routines
*/
static int ncp_lookup_validate(struct dentry *, struct nameidata *);
static int ncp_hash_dentry(const struct dentry *, const struct inode *,
struct qstr *);
static int ncp_compare_dentry(const struct dentry *, const struct inode *,
const struct dentry *, const struct inode *,
unsigned int, const char *, const struct qstr *);
static int ncp_delete_dentry(const struct dentry *);
const struct dentry_operations ncp_dentry_operations =
{
.d_revalidate = ncp_lookup_validate,
.d_hash = ncp_hash_dentry,
.d_compare = ncp_compare_dentry,
.d_delete = ncp_delete_dentry,
};
#define ncp_namespace(i) (NCP_SERVER(i)->name_space[NCP_FINFO(i)->volNumber])
static inline int ncp_preserve_entry_case(struct inode *i, __u32 nscreator)
{
#ifdef CONFIG_NCPFS_SMALLDOS
int ns = ncp_namespace(i);
if ((ns == NW_NS_DOS)
#ifdef CONFIG_NCPFS_OS2_NS
|| ((ns == NW_NS_OS2) && (nscreator == NW_NS_DOS))
#endif /* CONFIG_NCPFS_OS2_NS */
)
return 0;
#endif /* CONFIG_NCPFS_SMALLDOS */
return 1;
}
#define ncp_preserve_case(i) (ncp_namespace(i) != NW_NS_DOS)
static inline int ncp_case_sensitive(const struct inode *i)
{
#ifdef CONFIG_NCPFS_NFS_NS
return ncp_namespace(i) == NW_NS_NFS;
#else
return 0;
#endif /* CONFIG_NCPFS_NFS_NS */
}
/*
* Note: leave the hash unchanged if the directory
* is case-sensitive.
*/
static int
ncp_hash_dentry(const struct dentry *dentry, const struct inode *inode,
struct qstr *this)
{
if (!ncp_case_sensitive(inode)) {
struct super_block *sb = dentry->d_sb;
struct nls_table *t;
unsigned long hash;
int i;
t = NCP_IO_TABLE(sb);
hash = init_name_hash();
for (i=0; i<this->len ; i++)
hash = partial_name_hash(ncp_tolower(t, this->name[i]),
hash);
this->hash = end_name_hash(hash);
}
return 0;
}
static int
ncp_compare_dentry(const struct dentry *parent, const struct inode *pinode,
const struct dentry *dentry, const struct inode *inode,
unsigned int len, const char *str, const struct qstr *name)
{
if (len != name->len)
return 1;
if (ncp_case_sensitive(pinode))
return strncmp(str, name->name, len);
return ncp_strnicmp(NCP_IO_TABLE(pinode->i_sb), str, name->name, len);
}
/*
* This is the callback from dput() when d_count is going to 0.
* We use this to unhash dentries with bad inodes.
* Closing files can be safely postponed until iput() - it's done there anyway.
*/
static int
ncp_delete_dentry(const struct dentry * dentry)
{
struct inode *inode = dentry->d_inode;
if (inode) {
if (is_bad_inode(inode))
return 1;
} else
{
/* N.B. Unhash negative dentries? */
}
return 0;
}
static inline int
ncp_single_volume(struct ncp_server *server)
{
return (server->m.mounted_vol[0] != '\0');
}
static inline int ncp_is_server_root(struct inode *inode)
{
return (!ncp_single_volume(NCP_SERVER(inode)) &&
inode == inode->i_sb->s_root->d_inode);
}
/*
* This is the callback when the dcache has a lookup hit.
*/
#ifdef CONFIG_NCPFS_STRONG
/* try to delete a readonly file (NW R bit set) */
static int
ncp_force_unlink(struct inode *dir, struct dentry* dentry)
{
int res=0x9c,res2;
struct nw_modify_dos_info info;
__le32 old_nwattr;
struct inode *inode;
memset(&info, 0, sizeof(info));
/* remove the Read-Only flag on the NW server */
inode = dentry->d_inode;
old_nwattr = NCP_FINFO(inode)->nwattr;
info.attributes = old_nwattr & ~(aRONLY|aDELETEINHIBIT|aRENAMEINHIBIT);
res2 = ncp_modify_file_or_subdir_dos_info_path(NCP_SERVER(inode), inode, NULL, DM_ATTRIBUTES, &info);
if (res2)
goto leave_me;
/* now try again the delete operation */
res = ncp_del_file_or_subdir2(NCP_SERVER(dir), dentry);
if (res) /* delete failed, set R bit again */
{
info.attributes = old_nwattr;
res2 = ncp_modify_file_or_subdir_dos_info_path(NCP_SERVER(inode), inode, NULL, DM_ATTRIBUTES, &info);
if (res2)
goto leave_me;
}
leave_me:
return(res);
}
#endif /* CONFIG_NCPFS_STRONG */
#ifdef CONFIG_NCPFS_STRONG
static int
ncp_force_rename(struct inode *old_dir, struct dentry* old_dentry, char *_old_name,
struct inode *new_dir, struct dentry* new_dentry, char *_new_name)
{
struct nw_modify_dos_info info;
int res=0x90,res2;
struct inode *old_inode = old_dentry->d_inode;
__le32 old_nwattr = NCP_FINFO(old_inode)->nwattr;
__le32 new_nwattr = 0; /* shut compiler warning */
int old_nwattr_changed = 0;
int new_nwattr_changed = 0;
memset(&info, 0, sizeof(info));
/* remove the Read-Only flag on the NW server */
info.attributes = old_nwattr & ~(aRONLY|aRENAMEINHIBIT|aDELETEINHIBIT);
res2 = ncp_modify_file_or_subdir_dos_info_path(NCP_SERVER(old_inode), old_inode, NULL, DM_ATTRIBUTES, &info);
if (!res2)
old_nwattr_changed = 1;
if (new_dentry && new_dentry->d_inode) {
new_nwattr = NCP_FINFO(new_dentry->d_inode)->nwattr;
info.attributes = new_nwattr & ~(aRONLY|aRENAMEINHIBIT|aDELETEINHIBIT);
res2 = ncp_modify_file_or_subdir_dos_info_path(NCP_SERVER(new_dir), new_dir, _new_name, DM_ATTRIBUTES, &info);
if (!res2)
new_nwattr_changed = 1;
}
/* now try again the rename operation */
/* but only if something really happened */
if (new_nwattr_changed || old_nwattr_changed) {
res = ncp_ren_or_mov_file_or_subdir(NCP_SERVER(old_dir),
old_dir, _old_name,
new_dir, _new_name);
}
if (res)
goto leave_me;
/* file was successfully renamed, so:
do not set attributes on old file - it no longer exists
copy attributes from old file to new */
new_nwattr_changed = old_nwattr_changed;
new_nwattr = old_nwattr;
old_nwattr_changed = 0;
leave_me:;
if (old_nwattr_changed) {
info.attributes = old_nwattr;
res2 = ncp_modify_file_or_subdir_dos_info_path(NCP_SERVER(old_inode), old_inode, NULL, DM_ATTRIBUTES, &info);
/* ignore errors */
}
if (new_nwattr_changed) {
info.attributes = new_nwattr;
res2 = ncp_modify_file_or_subdir_dos_info_path(NCP_SERVER(new_dir), new_dir, _new_name, DM_ATTRIBUTES, &info);
/* ignore errors */
}
return(res);
}
#endif /* CONFIG_NCPFS_STRONG */
static int
ncp_lookup_validate(struct dentry *dentry, struct nameidata *nd)
{
struct ncp_server *server;
struct dentry *parent;
struct inode *dir;
struct ncp_entry_info finfo;
int res, val = 0, len;
__u8 __name[NCP_MAXPATHLEN + 1];
if (dentry == dentry->d_sb->s_root)
return 1;
if (nd->flags & LOOKUP_RCU)
return -ECHILD;
parent = dget_parent(dentry);
dir = parent->d_inode;
if (!dentry->d_inode)
goto finished;
server = NCP_SERVER(dir);
/*
* Inspired by smbfs:
* The default validation is based on dentry age:
* We set the max age at mount time. (But each
* successful server lookup renews the timestamp.)
*/
val = NCP_TEST_AGE(server, dentry);
if (val)
goto finished;
DDPRINTK("ncp_lookup_validate: %s/%s not valid, age=%ld, server lookup\n",
dentry->d_parent->d_name.name, dentry->d_name.name,
NCP_GET_AGE(dentry));
len = sizeof(__name);
if (ncp_is_server_root(dir)) {
res = ncp_io2vol(server, __name, &len, dentry->d_name.name,
dentry->d_name.len, 1);
if (!res) {
res = ncp_lookup_volume(server, __name, &(finfo.i));
if (!res)
ncp_update_known_namespace(server, finfo.i.volNumber, NULL);
}
} else {
res = ncp_io2vol(server, __name, &len, dentry->d_name.name,
dentry->d_name.len, !ncp_preserve_case(dir));
if (!res)
res = ncp_obtain_info(server, dir, __name, &(finfo.i));
}
finfo.volume = finfo.i.volNumber;
DDPRINTK("ncp_lookup_validate: looked for %s/%s, res=%d\n",
dentry->d_parent->d_name.name, __name, res);
/*
* If we didn't find it, or if it has a different dirEntNum to
* what we remember, it's not valid any more.
*/
if (!res) {
struct inode *inode = dentry->d_inode;
mutex_lock(&inode->i_mutex);
if (finfo.i.dirEntNum == NCP_FINFO(inode)->dirEntNum) {
ncp_new_dentry(dentry);
val=1;
} else
DDPRINTK("ncp_lookup_validate: found, but dirEntNum changed\n");
ncp_update_inode2(inode, &finfo);
mutex_unlock(&inode->i_mutex);
}
finished:
DDPRINTK("ncp_lookup_validate: result=%d\n", val);
dput(parent);
return val;
}
static struct dentry *
ncp_dget_fpos(struct dentry *dentry, struct dentry *parent, unsigned long fpos)
{
struct dentry *dent = dentry;
struct list_head *next;
if (d_validate(dent, parent)) {
if (dent->d_name.len <= NCP_MAXPATHLEN &&
(unsigned long)dent->d_fsdata == fpos) {
if (!dent->d_inode) {
dput(dent);
dent = NULL;
}
return dent;
}
dput(dent);
}
/* If a pointer is invalid, we search the dentry. */
spin_lock(&parent->d_lock);
next = parent->d_subdirs.next;
while (next != &parent->d_subdirs) {
dent = list_entry(next, struct dentry, d_u.d_child);
if ((unsigned long)dent->d_fsdata == fpos) {
if (dent->d_inode)
dget(dent);
else
dent = NULL;
spin_unlock(&parent->d_lock);
goto out;
}
next = next->next;
}
spin_unlock(&parent->d_lock);
return NULL;
out:
return dent;
}
static time_t ncp_obtain_mtime(struct dentry *dentry)
{
struct inode *inode = dentry->d_inode;
struct ncp_server *server = NCP_SERVER(inode);
struct nw_info_struct i;
if (!ncp_conn_valid(server) || ncp_is_server_root(inode))
return 0;
if (ncp_obtain_info(server, inode, NULL, &i))
return 0;
return ncp_date_dos2unix(i.modifyTime, i.modifyDate);
}
static int ncp_readdir(struct file *filp, void *dirent, filldir_t filldir)
{
struct dentry *dentry = filp->f_path.dentry;
struct inode *inode = dentry->d_inode;
struct page *page = NULL;
struct ncp_server *server = NCP_SERVER(inode);
union ncp_dir_cache *cache = NULL;
struct ncp_cache_control ctl;
int result, mtime_valid = 0;
time_t mtime = 0;
ctl.page = NULL;
ctl.cache = NULL;
DDPRINTK("ncp_readdir: reading %s/%s, pos=%d\n",
dentry->d_parent->d_name.name, dentry->d_name.name,
(int) filp->f_pos);
result = -EIO;
/* Do not generate '.' and '..' when server is dead. */
if (!ncp_conn_valid(server))
goto out;
result = 0;
if (filp->f_pos == 0) {
if (filldir(dirent, ".", 1, 0, inode->i_ino, DT_DIR))
goto out;
filp->f_pos = 1;
}
if (filp->f_pos == 1) {
if (filldir(dirent, "..", 2, 1, parent_ino(dentry), DT_DIR))
goto out;
filp->f_pos = 2;
}
page = grab_cache_page(&inode->i_data, 0);
if (!page)
goto read_really;
ctl.cache = cache = kmap(page);
ctl.head = cache->head;
if (!PageUptodate(page) || !ctl.head.eof)
goto init_cache;
if (filp->f_pos == 2) {
if (jiffies - ctl.head.time >= NCP_MAX_AGE(server))
goto init_cache;
mtime = ncp_obtain_mtime(dentry);
mtime_valid = 1;
if ((!mtime) || (mtime != ctl.head.mtime))
goto init_cache;
}
if (filp->f_pos > ctl.head.end)
goto finished;
ctl.fpos = filp->f_pos + (NCP_DIRCACHE_START - 2);
ctl.ofs = ctl.fpos / NCP_DIRCACHE_SIZE;
ctl.idx = ctl.fpos % NCP_DIRCACHE_SIZE;
for (;;) {
if (ctl.ofs != 0) {
ctl.page = find_lock_page(&inode->i_data, ctl.ofs);
if (!ctl.page)
goto invalid_cache;
ctl.cache = kmap(ctl.page);
if (!PageUptodate(ctl.page))
goto invalid_cache;
}
while (ctl.idx < NCP_DIRCACHE_SIZE) {
struct dentry *dent;
int res;
dent = ncp_dget_fpos(ctl.cache->dentry[ctl.idx],
dentry, filp->f_pos);
if (!dent)
goto invalid_cache;
res = filldir(dirent, dent->d_name.name,
dent->d_name.len, filp->f_pos,
dent->d_inode->i_ino, DT_UNKNOWN);
dput(dent);
if (res)
goto finished;
filp->f_pos += 1;
ctl.idx += 1;
if (filp->f_pos > ctl.head.end)
goto finished;
}
if (ctl.page) {
kunmap(ctl.page);
SetPageUptodate(ctl.page);
unlock_page(ctl.page);
page_cache_release(ctl.page);
ctl.page = NULL;
}
ctl.idx = 0;
ctl.ofs += 1;
}
invalid_cache:
if (ctl.page) {
kunmap(ctl.page);
unlock_page(ctl.page);
page_cache_release(ctl.page);
ctl.page = NULL;
}
ctl.cache = cache;
init_cache:
ncp_invalidate_dircache_entries(dentry);
if (!mtime_valid) {
mtime = ncp_obtain_mtime(dentry);
mtime_valid = 1;
}
ctl.head.mtime = mtime;
ctl.head.time = jiffies;
ctl.head.eof = 0;
ctl.fpos = 2;
ctl.ofs = 0;
ctl.idx = NCP_DIRCACHE_START;
ctl.filled = 0;
ctl.valid = 1;
read_really:
if (ncp_is_server_root(inode)) {
ncp_read_volume_list(filp, dirent, filldir, &ctl);
} else {
ncp_do_readdir(filp, dirent, filldir, &ctl);
}
ctl.head.end = ctl.fpos - 1;
ctl.head.eof = ctl.valid;
finished:
if (ctl.page) {
kunmap(ctl.page);
SetPageUptodate(ctl.page);
unlock_page(ctl.page);
page_cache_release(ctl.page);
}
if (page) {
cache->head = ctl.head;
kunmap(page);
SetPageUptodate(page);
unlock_page(page);
page_cache_release(page);
}
out:
return result;
}
static int
ncp_fill_cache(struct file *filp, void *dirent, filldir_t filldir,
struct ncp_cache_control *ctrl, struct ncp_entry_info *entry,
int inval_childs)
{
struct dentry *newdent, *dentry = filp->f_path.dentry;
struct inode *dir = dentry->d_inode;
struct ncp_cache_control ctl = *ctrl;
struct qstr qname;
int valid = 0;
int hashed = 0;
ino_t ino = 0;
__u8 __name[NCP_MAXPATHLEN + 1];
qname.len = sizeof(__name);
if (ncp_vol2io(NCP_SERVER(dir), __name, &qname.len,
entry->i.entryName, entry->i.nameLen,
!ncp_preserve_entry_case(dir, entry->i.NSCreator)))
return 1; /* I'm not sure */
qname.name = __name;
qname.hash = full_name_hash(qname.name, qname.len);
if (dentry->d_op && dentry->d_op->d_hash)
if (dentry->d_op->d_hash(dentry, dentry->d_inode, &qname) != 0)
goto end_advance;
newdent = d_lookup(dentry, &qname);
if (!newdent) {
newdent = d_alloc(dentry, &qname);
if (!newdent)
goto end_advance;
} else {
hashed = 1;
/* If case sensitivity changed for this volume, all entries below this one
should be thrown away. This entry itself is not affected, as its case
sensitivity is controlled by its own parent. */
if (inval_childs)
shrink_dcache_parent(newdent);
/*
* NetWare's OS2 namespace is case preserving yet case
* insensitive. So we update dentry's name as received from
* server. Parent dir's i_mutex is locked because we're in
* readdir.
*/
dentry_update_name_case(newdent, &qname);
}
if (!newdent->d_inode) {
struct inode *inode;
entry->opened = 0;
entry->ino = iunique(dir->i_sb, 2);
inode = ncp_iget(dir->i_sb, entry);
if (inode) {
d_instantiate(newdent, inode);
if (!hashed)
d_rehash(newdent);
}
} else {
struct inode *inode = newdent->d_inode;
mutex_lock_nested(&inode->i_mutex, I_MUTEX_CHILD);
ncp_update_inode2(inode, entry);
mutex_unlock(&inode->i_mutex);
}
if (newdent->d_inode) {
ino = newdent->d_inode->i_ino;
newdent->d_fsdata = (void *) ctl.fpos;
ncp_new_dentry(newdent);
}
if (ctl.idx >= NCP_DIRCACHE_SIZE) {
if (ctl.page) {
kunmap(ctl.page);
SetPageUptodate(ctl.page);
unlock_page(ctl.page);
page_cache_release(ctl.page);
}
ctl.cache = NULL;
ctl.idx -= NCP_DIRCACHE_SIZE;
ctl.ofs += 1;
ctl.page = grab_cache_page(&dir->i_data, ctl.ofs);
if (ctl.page)
ctl.cache = kmap(ctl.page);
}
if (ctl.cache) {
ctl.cache->dentry[ctl.idx] = newdent;
valid = 1;
}
dput(newdent);
end_advance:
if (!valid)
ctl.valid = 0;
if (!ctl.filled && (ctl.fpos == filp->f_pos)) {
if (!ino)
ino = find_inode_number(dentry, &qname);
if (!ino)
ino = iunique(dir->i_sb, 2);
ctl.filled = filldir(dirent, qname.name, qname.len,
filp->f_pos, ino, DT_UNKNOWN);
if (!ctl.filled)
filp->f_pos += 1;
}
ctl.fpos += 1;
ctl.idx += 1;
*ctrl = ctl;
return (ctl.valid || !ctl.filled);
}
static void
ncp_read_volume_list(struct file *filp, void *dirent, filldir_t filldir,
struct ncp_cache_control *ctl)
{
struct dentry *dentry = filp->f_path.dentry;
struct inode *inode = dentry->d_inode;
struct ncp_server *server = NCP_SERVER(inode);
struct ncp_volume_info info;
struct ncp_entry_info entry;
int i;
DPRINTK("ncp_read_volume_list: pos=%ld\n",
(unsigned long) filp->f_pos);
for (i = 0; i < NCP_NUMBER_OF_VOLUMES; i++) {
int inval_dentry;
if (ncp_get_volume_info_with_number(server, i, &info) != 0)
return;
if (!strlen(info.volume_name))
continue;
DPRINTK("ncp_read_volume_list: found vol: %s\n",
info.volume_name);
if (ncp_lookup_volume(server, info.volume_name,
&entry.i)) {
DPRINTK("ncpfs: could not lookup vol %s\n",
info.volume_name);
continue;
}
inval_dentry = ncp_update_known_namespace(server, entry.i.volNumber, NULL);
entry.volume = entry.i.volNumber;
if (!ncp_fill_cache(filp, dirent, filldir, ctl, &entry, inval_dentry))
return;
}
}
static void
ncp_do_readdir(struct file *filp, void *dirent, filldir_t filldir,
struct ncp_cache_control *ctl)
{
struct dentry *dentry = filp->f_path.dentry;
struct inode *dir = dentry->d_inode;
struct ncp_server *server = NCP_SERVER(dir);
struct nw_search_sequence seq;
struct ncp_entry_info entry;
int err;
void* buf;
int more;
size_t bufsize;
DPRINTK("ncp_do_readdir: %s/%s, fpos=%ld\n",
dentry->d_parent->d_name.name, dentry->d_name.name,
(unsigned long) filp->f_pos);
PPRINTK("ncp_do_readdir: init %s, volnum=%d, dirent=%u\n",
dentry->d_name.name, NCP_FINFO(dir)->volNumber,
NCP_FINFO(dir)->dirEntNum);
err = ncp_initialize_search(server, dir, &seq);
if (err) {
DPRINTK("ncp_do_readdir: init failed, err=%d\n", err);
return;
}
/* We MUST NOT use server->buffer_size handshaked with server if we are
using UDP, as for UDP server uses max. buffer size determined by
MTU, and for TCP server uses hardwired value 65KB (== 66560 bytes).
So we use 128KB, just to be sure, as there is no way how to know
this value in advance. */
bufsize = 131072;
buf = vmalloc(bufsize);
if (!buf)
return;
do {
int cnt;
char* rpl;
size_t rpls;
err = ncp_search_for_fileset(server, &seq, &more, &cnt, buf, bufsize, &rpl, &rpls);
if (err) /* Error */
break;
if (!cnt) /* prevent endless loop */
break;
while (cnt--) {
size_t onerpl;
if (rpls < offsetof(struct nw_info_struct, entryName))
break; /* short packet */
ncp_extract_file_info(rpl, &entry.i);
onerpl = offsetof(struct nw_info_struct, entryName) + entry.i.nameLen;
if (rpls < onerpl)
break; /* short packet */
(void)ncp_obtain_nfs_info(server, &entry.i);
rpl += onerpl;
rpls -= onerpl;
entry.volume = entry.i.volNumber;
if (!ncp_fill_cache(filp, dirent, filldir, ctl, &entry, 0))
break;
}
} while (more);
vfree(buf);
return;
}
int ncp_conn_logged_in(struct super_block *sb)
{
struct ncp_server* server = NCP_SBP(sb);
int result;
if (ncp_single_volume(server)) {
int len;
struct dentry* dent;
__u32 volNumber;
__le32 dirEntNum;
__le32 DosDirNum;
__u8 __name[NCP_MAXPATHLEN + 1];
len = sizeof(__name);
result = ncp_io2vol(server, __name, &len, server->m.mounted_vol,
strlen(server->m.mounted_vol), 1);
if (result)
goto out;
result = -ENOENT;
if (ncp_get_volume_root(server, __name, &volNumber, &dirEntNum, &DosDirNum)) {
PPRINTK("ncp_conn_logged_in: %s not found\n",
server->m.mounted_vol);
goto out;
}
dent = sb->s_root;
if (dent) {
struct inode* ino = dent->d_inode;
if (ino) {
ncp_update_known_namespace(server, volNumber, NULL);
NCP_FINFO(ino)->volNumber = volNumber;
NCP_FINFO(ino)->dirEntNum = dirEntNum;
NCP_FINFO(ino)->DosDirNum = DosDirNum;
result = 0;
} else {
DPRINTK("ncpfs: sb->s_root->d_inode == NULL!\n");
}
} else {
DPRINTK("ncpfs: sb->s_root == NULL!\n");
}
} else
result = 0;
out:
return result;
}
static struct dentry *ncp_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
{
struct ncp_server *server = NCP_SERVER(dir);
struct inode *inode = NULL;
struct ncp_entry_info finfo;
int error, res, len;
__u8 __name[NCP_MAXPATHLEN + 1];
error = -EIO;
if (!ncp_conn_valid(server))
goto finished;
PPRINTK("ncp_lookup: server lookup for %s/%s\n",
dentry->d_parent->d_name.name, dentry->d_name.name);
len = sizeof(__name);
if (ncp_is_server_root(dir)) {
res = ncp_io2vol(server, __name, &len, dentry->d_name.name,
dentry->d_name.len, 1);
if (!res)
res = ncp_lookup_volume(server, __name, &(finfo.i));
if (!res)
ncp_update_known_namespace(server, finfo.i.volNumber, NULL);
} else {
res = ncp_io2vol(server, __name, &len, dentry->d_name.name,
dentry->d_name.len, !ncp_preserve_case(dir));
if (!res)
res = ncp_obtain_info(server, dir, __name, &(finfo.i));
}
PPRINTK("ncp_lookup: looked for %s/%s, res=%d\n",
dentry->d_parent->d_name.name, __name, res);
/*
* If we didn't find an entry, make a negative dentry.
*/
if (res)
goto add_entry;
/*
* Create an inode for the entry.
*/
finfo.opened = 0;
finfo.ino = iunique(dir->i_sb, 2);
finfo.volume = finfo.i.volNumber;
error = -EACCES;
inode = ncp_iget(dir->i_sb, &finfo);
if (inode) {
ncp_new_dentry(dentry);
add_entry:
d_add(dentry, inode);
error = 0;
}
finished:
PPRINTK("ncp_lookup: result=%d\n", error);
return ERR_PTR(error);
}
/*
* This code is common to create, mkdir, and mknod.
*/
static int ncp_instantiate(struct inode *dir, struct dentry *dentry,
struct ncp_entry_info *finfo)
{
struct inode *inode;
int error = -EINVAL;
finfo->ino = iunique(dir->i_sb, 2);
inode = ncp_iget(dir->i_sb, finfo);
if (!inode)
goto out_close;
d_instantiate(dentry,inode);
error = 0;
out:
return error;
out_close:
PPRINTK("ncp_instantiate: %s/%s failed, closing file\n",
dentry->d_parent->d_name.name, dentry->d_name.name);
ncp_close_file(NCP_SERVER(dir), finfo->file_handle);
goto out;
}
int ncp_create_new(struct inode *dir, struct dentry *dentry, umode_t mode,
dev_t rdev, __le32 attributes)
{
struct ncp_server *server = NCP_SERVER(dir);
struct ncp_entry_info finfo;
int error, result, len;
int opmode;
__u8 __name[NCP_MAXPATHLEN + 1];
PPRINTK("ncp_create_new: creating %s/%s, mode=%hx\n",
dentry->d_parent->d_name.name, dentry->d_name.name, mode);
ncp_age_dentry(server, dentry);
len = sizeof(__name);
error = ncp_io2vol(server, __name, &len, dentry->d_name.name,
dentry->d_name.len, !ncp_preserve_case(dir));
if (error)
goto out;
error = -EACCES;
if (S_ISREG(mode) &&
(server->m.flags & NCP_MOUNT_EXTRAS) &&
(mode & S_IXUGO))
attributes |= aSYSTEM | aSHARED;
result = ncp_open_create_file_or_subdir(server, dir, __name,
OC_MODE_CREATE | OC_MODE_OPEN | OC_MODE_REPLACE,
attributes, AR_READ | AR_WRITE, &finfo);
opmode = O_RDWR;
if (result) {
result = ncp_open_create_file_or_subdir(server, dir, __name,
OC_MODE_CREATE | OC_MODE_OPEN | OC_MODE_REPLACE,
attributes, AR_WRITE, &finfo);
if (result) {
if (result == 0x87)
error = -ENAMETOOLONG;
else if (result < 0)
error = result;
DPRINTK("ncp_create: %s/%s failed\n",
dentry->d_parent->d_name.name, dentry->d_name.name);
goto out;
}
opmode = O_WRONLY;
}
finfo.access = opmode;
if (ncp_is_nfs_extras(server, finfo.volume)) {
finfo.i.nfs.mode = mode;
finfo.i.nfs.rdev = new_encode_dev(rdev);
if (ncp_modify_nfs_info(server, finfo.volume,
finfo.i.dirEntNum,
mode, new_encode_dev(rdev)) != 0)
goto out;
}
error = ncp_instantiate(dir, dentry, &finfo);
out:
return error;
}
static int ncp_create(struct inode *dir, struct dentry *dentry, umode_t mode,
struct nameidata *nd)
{
return ncp_create_new(dir, dentry, mode, 0, 0);
}
static int ncp_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
{
struct ncp_entry_info finfo;
struct ncp_server *server = NCP_SERVER(dir);
int error, len;
__u8 __name[NCP_MAXPATHLEN + 1];
DPRINTK("ncp_mkdir: making %s/%s\n",
dentry->d_parent->d_name.name, dentry->d_name.name);
ncp_age_dentry(server, dentry);
len = sizeof(__name);
error = ncp_io2vol(server, __name, &len, dentry->d_name.name,
dentry->d_name.len, !ncp_preserve_case(dir));
if (error)
goto out;
error = ncp_open_create_file_or_subdir(server, dir, __name,
OC_MODE_CREATE, aDIR,
cpu_to_le16(0xffff),
&finfo);
if (error == 0) {
if (ncp_is_nfs_extras(server, finfo.volume)) {
mode |= S_IFDIR;
finfo.i.nfs.mode = mode;
if (ncp_modify_nfs_info(server,
finfo.volume,
finfo.i.dirEntNum,
mode, 0) != 0)
goto out;
}
error = ncp_instantiate(dir, dentry, &finfo);
} else if (error > 0) {
error = -EACCES;
}
out:
return error;
}
static int ncp_rmdir(struct inode *dir, struct dentry *dentry)
{
struct ncp_server *server = NCP_SERVER(dir);
int error, result, len;
__u8 __name[NCP_MAXPATHLEN + 1];
DPRINTK("ncp_rmdir: removing %s/%s\n",
dentry->d_parent->d_name.name, dentry->d_name.name);
/*
* fail with EBUSY if there are still references to this
* directory.
*/
dentry_unhash(dentry);
error = -EBUSY;
if (!d_unhashed(dentry))
goto out;
len = sizeof(__name);
error = ncp_io2vol(server, __name, &len, dentry->d_name.name,
dentry->d_name.len, !ncp_preserve_case(dir));
if (error)
goto out;
result = ncp_del_file_or_subdir(server, dir, __name);
switch (result) {
case 0x00:
error = 0;
break;
case 0x85: /* unauthorized to delete file */
case 0x8A: /* unauthorized to delete file */
error = -EACCES;
break;
case 0x8F:
case 0x90: /* read only */
error = -EPERM;
break;
case 0x9F: /* in use by another client */
error = -EBUSY;
break;
case 0xA0: /* directory not empty */
error = -ENOTEMPTY;
break;
case 0xFF: /* someone deleted file */
error = -ENOENT;
break;
default:
error = result < 0 ? result : -EACCES;
break;
}
out:
return error;
}
static int ncp_unlink(struct inode *dir, struct dentry *dentry)
{
struct inode *inode = dentry->d_inode;
struct ncp_server *server;
int error;
server = NCP_SERVER(dir);
DPRINTK("ncp_unlink: unlinking %s/%s\n",
dentry->d_parent->d_name.name, dentry->d_name.name);
/*
* Check whether to close the file ...
*/
if (inode) {
PPRINTK("ncp_unlink: closing file\n");
ncp_make_closed(inode);
}
error = ncp_del_file_or_subdir2(server, dentry);
#ifdef CONFIG_NCPFS_STRONG
/* 9C is Invalid path.. It should be 8F, 90 - read only, but
it is not :-( */
if ((error == 0x9C || error == 0x90) && server->m.flags & NCP_MOUNT_STRONG) { /* R/O */
error = ncp_force_unlink(dir, dentry);
}
#endif
switch (error) {
case 0x00:
DPRINTK("ncp: removed %s/%s\n",
dentry->d_parent->d_name.name, dentry->d_name.name);
break;
case 0x85:
case 0x8A:
error = -EACCES;
break;
case 0x8D: /* some files in use */
case 0x8E: /* all files in use */
error = -EBUSY;
break;
case 0x8F: /* some read only */
case 0x90: /* all read only */
case 0x9C: /* !!! returned when in-use or read-only by NW4 */
error = -EPERM;
break;
case 0xFF:
error = -ENOENT;
break;
default:
error = error < 0 ? error : -EACCES;
break;
}
return error;
}
static int ncp_rename(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry)
{
struct ncp_server *server = NCP_SERVER(old_dir);
int error;
int old_len, new_len;
__u8 __old_name[NCP_MAXPATHLEN + 1], __new_name[NCP_MAXPATHLEN + 1];
DPRINTK("ncp_rename: %s/%s to %s/%s\n",
old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
new_dentry->d_parent->d_name.name, new_dentry->d_name.name);
if (new_dentry->d_inode && S_ISDIR(new_dentry->d_inode->i_mode)) {
/*
* fail with EBUSY if there are still references to this
* directory.
*/
dentry_unhash(new_dentry);
error = -EBUSY;
if (!d_unhashed(new_dentry))
goto out;
}
ncp_age_dentry(server, old_dentry);
ncp_age_dentry(server, new_dentry);
old_len = sizeof(__old_name);
error = ncp_io2vol(server, __old_name, &old_len,
old_dentry->d_name.name, old_dentry->d_name.len,
!ncp_preserve_case(old_dir));
if (error)
goto out;
new_len = sizeof(__new_name);
error = ncp_io2vol(server, __new_name, &new_len,
new_dentry->d_name.name, new_dentry->d_name.len,
!ncp_preserve_case(new_dir));
if (error)
goto out;
error = ncp_ren_or_mov_file_or_subdir(server, old_dir, __old_name,
new_dir, __new_name);
#ifdef CONFIG_NCPFS_STRONG
if ((error == 0x90 || error == 0x8B || error == -EACCES) &&
server->m.flags & NCP_MOUNT_STRONG) { /* RO */
error = ncp_force_rename(old_dir, old_dentry, __old_name,
new_dir, new_dentry, __new_name);
}
#endif
switch (error) {
case 0x00:
DPRINTK("ncp renamed %s -> %s.\n",
old_dentry->d_name.name,new_dentry->d_name.name);
break;
case 0x9E:
error = -ENAMETOOLONG;
break;
case 0xFF:
error = -ENOENT;
break;
default:
error = error < 0 ? error : -EACCES;
break;
}
out:
return error;
}
static int ncp_mknod(struct inode * dir, struct dentry *dentry,
umode_t mode, dev_t rdev)
{
if (!new_valid_dev(rdev))
return -EINVAL;
if (ncp_is_nfs_extras(NCP_SERVER(dir), NCP_FINFO(dir)->volNumber)) {
DPRINTK(KERN_DEBUG "ncp_mknod: mode = 0%ho\n", mode);
return ncp_create_new(dir, dentry, mode, rdev, 0);
}
return -EPERM; /* Strange, but true */
}
/* The following routines are taken directly from msdos-fs */
/* Linear day numbers of the respective 1sts in non-leap years. */
static int day_n[] =
{0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 0, 0, 0, 0};
/* Jan Feb Mar Apr May Jun Jul Aug Sep Oct Nov Dec */
extern struct timezone sys_tz;
static int utc2local(int time)
{
return time - sys_tz.tz_minuteswest * 60;
}
static int local2utc(int time)
{
return time + sys_tz.tz_minuteswest * 60;
}
/* Convert a MS-DOS time/date pair to a UNIX date (seconds since 1 1 70). */
int
ncp_date_dos2unix(__le16 t, __le16 d)
{
unsigned short time = le16_to_cpu(t), date = le16_to_cpu(d);
int month, year, secs;
/* first subtract and mask after that... Otherwise, if
date == 0, bad things happen */
month = ((date >> 5) - 1) & 15;
year = date >> 9;
secs = (time & 31) * 2 + 60 * ((time >> 5) & 63) + (time >> 11) * 3600 +
86400 * ((date & 31) - 1 + day_n[month] + (year / 4) +
year * 365 - ((year & 3) == 0 && month < 2 ? 1 : 0) + 3653);
/* days since 1.1.70 plus 80's leap day */
return local2utc(secs);
}
/* Convert linear UNIX date to a MS-DOS time/date pair. */
void
ncp_date_unix2dos(int unix_date, __le16 *time, __le16 *date)
{
int day, year, nl_day, month;
unix_date = utc2local(unix_date);
*time = cpu_to_le16(
(unix_date % 60) / 2 + (((unix_date / 60) % 60) << 5) +
(((unix_date / 3600) % 24) << 11));
day = unix_date / 86400 - 3652;
year = day / 365;
if ((year + 3) / 4 + 365 * year > day)
year--;
day -= (year + 3) / 4 + 365 * year;
if (day == 59 && !(year & 3)) {
nl_day = day;
month = 2;
} else {
nl_day = (year & 3) || day <= 59 ? day : day - 1;
for (month = 1; month < 12; month++)
if (day_n[month] > nl_day)
break;
}
*date = cpu_to_le16(nl_day - day_n[month - 1] + 1 + (month << 5) + (year << 9));
}
| gpl-2.0 |
iDroid-Project/kernel_common | arch/powerpc/lib/rheap.c | 4507 | 16859 | /*
* A Remote Heap. Remote means that we don't touch the memory that the
* heap points to. Normal heap implementations use the memory they manage
* to place their list. We cannot do that because the memory we manage may
* have special properties, for example it is uncachable or of different
* endianess.
*
* Author: Pantelis Antoniou <panto@intracom.gr>
*
* 2004 (c) INTRACOM S.A. Greece. This file is licensed under
* the terms of the GNU General Public License version 2. This program
* is licensed "as is" without any warranty of any kind, whether express
* or implied.
*/
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <asm/rheap.h>
/*
* Fixup a list_head, needed when copying lists. If the pointers fall
* between s and e, apply the delta. This assumes that
* sizeof(struct list_head *) == sizeof(unsigned long *).
*/
static inline void fixup(unsigned long s, unsigned long e, int d,
struct list_head *l)
{
unsigned long *pp;
pp = (unsigned long *)&l->next;
if (*pp >= s && *pp < e)
*pp += d;
pp = (unsigned long *)&l->prev;
if (*pp >= s && *pp < e)
*pp += d;
}
/* Grow the allocated blocks */
static int grow(rh_info_t * info, int max_blocks)
{
rh_block_t *block, *blk;
int i, new_blocks;
int delta;
unsigned long blks, blke;
if (max_blocks <= info->max_blocks)
return -EINVAL;
new_blocks = max_blocks - info->max_blocks;
block = kmalloc(sizeof(rh_block_t) * max_blocks, GFP_ATOMIC);
if (block == NULL)
return -ENOMEM;
if (info->max_blocks > 0) {
/* copy old block area */
memcpy(block, info->block,
sizeof(rh_block_t) * info->max_blocks);
delta = (char *)block - (char *)info->block;
/* and fixup list pointers */
blks = (unsigned long)info->block;
blke = (unsigned long)(info->block + info->max_blocks);
for (i = 0, blk = block; i < info->max_blocks; i++, blk++)
fixup(blks, blke, delta, &blk->list);
fixup(blks, blke, delta, &info->empty_list);
fixup(blks, blke, delta, &info->free_list);
fixup(blks, blke, delta, &info->taken_list);
/* free the old allocated memory */
if ((info->flags & RHIF_STATIC_BLOCK) == 0)
kfree(info->block);
}
info->block = block;
info->empty_slots += new_blocks;
info->max_blocks = max_blocks;
info->flags &= ~RHIF_STATIC_BLOCK;
/* add all new blocks to the free list */
blk = block + info->max_blocks - new_blocks;
for (i = 0; i < new_blocks; i++, blk++)
list_add(&blk->list, &info->empty_list);
return 0;
}
/*
* Assure at least the required amount of empty slots. If this function
* causes a grow in the block area then all pointers kept to the block
* area are invalid!
*/
static int assure_empty(rh_info_t * info, int slots)
{
int max_blocks;
/* This function is not meant to be used to grow uncontrollably */
if (slots >= 4)
return -EINVAL;
/* Enough space */
if (info->empty_slots >= slots)
return 0;
/* Next 16 sized block */
max_blocks = ((info->max_blocks + slots) + 15) & ~15;
return grow(info, max_blocks);
}
static rh_block_t *get_slot(rh_info_t * info)
{
rh_block_t *blk;
/* If no more free slots, and failure to extend. */
/* XXX: You should have called assure_empty before */
if (info->empty_slots == 0) {
printk(KERN_ERR "rh: out of slots; crash is imminent.\n");
return NULL;
}
/* Get empty slot to use */
blk = list_entry(info->empty_list.next, rh_block_t, list);
list_del_init(&blk->list);
info->empty_slots--;
/* Initialize */
blk->start = 0;
blk->size = 0;
blk->owner = NULL;
return blk;
}
static inline void release_slot(rh_info_t * info, rh_block_t * blk)
{
list_add(&blk->list, &info->empty_list);
info->empty_slots++;
}
static void attach_free_block(rh_info_t * info, rh_block_t * blkn)
{
rh_block_t *blk;
rh_block_t *before;
rh_block_t *after;
rh_block_t *next;
int size;
unsigned long s, e, bs, be;
struct list_head *l;
/* We assume that they are aligned properly */
size = blkn->size;
s = blkn->start;
e = s + size;
/* Find the blocks immediately before and after the given one
* (if any) */
before = NULL;
after = NULL;
next = NULL;
list_for_each(l, &info->free_list) {
blk = list_entry(l, rh_block_t, list);
bs = blk->start;
be = bs + blk->size;
if (next == NULL && s >= bs)
next = blk;
if (be == s)
before = blk;
if (e == bs)
after = blk;
/* If both are not null, break now */
if (before != NULL && after != NULL)
break;
}
/* Now check if they are really adjacent */
if (before && s != (before->start + before->size))
before = NULL;
if (after && e != after->start)
after = NULL;
/* No coalescing; list insert and return */
if (before == NULL && after == NULL) {
if (next != NULL)
list_add(&blkn->list, &next->list);
else
list_add(&blkn->list, &info->free_list);
return;
}
/* We don't need it anymore */
release_slot(info, blkn);
/* Grow the before block */
if (before != NULL && after == NULL) {
before->size += size;
return;
}
/* Grow the after block backwards */
if (before == NULL && after != NULL) {
after->start -= size;
after->size += size;
return;
}
/* Grow the before block, and release the after block */
before->size += size + after->size;
list_del(&after->list);
release_slot(info, after);
}
static void attach_taken_block(rh_info_t * info, rh_block_t * blkn)
{
rh_block_t *blk;
struct list_head *l;
/* Find the block immediately before the given one (if any) */
list_for_each(l, &info->taken_list) {
blk = list_entry(l, rh_block_t, list);
if (blk->start > blkn->start) {
list_add_tail(&blkn->list, &blk->list);
return;
}
}
list_add_tail(&blkn->list, &info->taken_list);
}
/*
* Create a remote heap dynamically. Note that no memory for the blocks
* are allocated. It will upon the first allocation
*/
rh_info_t *rh_create(unsigned int alignment)
{
rh_info_t *info;
/* Alignment must be a power of two */
if ((alignment & (alignment - 1)) != 0)
return ERR_PTR(-EINVAL);
info = kmalloc(sizeof(*info), GFP_ATOMIC);
if (info == NULL)
return ERR_PTR(-ENOMEM);
info->alignment = alignment;
/* Initially everything as empty */
info->block = NULL;
info->max_blocks = 0;
info->empty_slots = 0;
info->flags = 0;
INIT_LIST_HEAD(&info->empty_list);
INIT_LIST_HEAD(&info->free_list);
INIT_LIST_HEAD(&info->taken_list);
return info;
}
EXPORT_SYMBOL_GPL(rh_create);
/*
* Destroy a dynamically created remote heap. Deallocate only if the areas
* are not static
*/
void rh_destroy(rh_info_t * info)
{
if ((info->flags & RHIF_STATIC_BLOCK) == 0 && info->block != NULL)
kfree(info->block);
if ((info->flags & RHIF_STATIC_INFO) == 0)
kfree(info);
}
EXPORT_SYMBOL_GPL(rh_destroy);
/*
* Initialize in place a remote heap info block. This is needed to support
* operation very early in the startup of the kernel, when it is not yet safe
* to call kmalloc.
*/
void rh_init(rh_info_t * info, unsigned int alignment, int max_blocks,
rh_block_t * block)
{
int i;
rh_block_t *blk;
/* Alignment must be a power of two */
if ((alignment & (alignment - 1)) != 0)
return;
info->alignment = alignment;
/* Initially everything as empty */
info->block = block;
info->max_blocks = max_blocks;
info->empty_slots = max_blocks;
info->flags = RHIF_STATIC_INFO | RHIF_STATIC_BLOCK;
INIT_LIST_HEAD(&info->empty_list);
INIT_LIST_HEAD(&info->free_list);
INIT_LIST_HEAD(&info->taken_list);
/* Add all new blocks to the free list */
for (i = 0, blk = block; i < max_blocks; i++, blk++)
list_add(&blk->list, &info->empty_list);
}
EXPORT_SYMBOL_GPL(rh_init);
/* Attach a free memory region, coalesces regions if adjuscent */
int rh_attach_region(rh_info_t * info, unsigned long start, int size)
{
rh_block_t *blk;
unsigned long s, e, m;
int r;
/* The region must be aligned */
s = start;
e = s + size;
m = info->alignment - 1;
/* Round start up */
s = (s + m) & ~m;
/* Round end down */
e = e & ~m;
if (IS_ERR_VALUE(e) || (e < s))
return -ERANGE;
/* Take final values */
start = s;
size = e - s;
/* Grow the blocks, if needed */
r = assure_empty(info, 1);
if (r < 0)
return r;
blk = get_slot(info);
blk->start = start;
blk->size = size;
blk->owner = NULL;
attach_free_block(info, blk);
return 0;
}
EXPORT_SYMBOL_GPL(rh_attach_region);
/* Detatch given address range, splits free block if needed. */
unsigned long rh_detach_region(rh_info_t * info, unsigned long start, int size)
{
struct list_head *l;
rh_block_t *blk, *newblk;
unsigned long s, e, m, bs, be;
/* Validate size */
if (size <= 0)
return (unsigned long) -EINVAL;
/* The region must be aligned */
s = start;
e = s + size;
m = info->alignment - 1;
/* Round start up */
s = (s + m) & ~m;
/* Round end down */
e = e & ~m;
if (assure_empty(info, 1) < 0)
return (unsigned long) -ENOMEM;
blk = NULL;
list_for_each(l, &info->free_list) {
blk = list_entry(l, rh_block_t, list);
/* The range must lie entirely inside one free block */
bs = blk->start;
be = blk->start + blk->size;
if (s >= bs && e <= be)
break;
blk = NULL;
}
if (blk == NULL)
return (unsigned long) -ENOMEM;
/* Perfect fit */
if (bs == s && be == e) {
/* Delete from free list, release slot */
list_del(&blk->list);
release_slot(info, blk);
return s;
}
/* blk still in free list, with updated start and/or size */
if (bs == s || be == e) {
if (bs == s)
blk->start += size;
blk->size -= size;
} else {
/* The front free fragment */
blk->size = s - bs;
/* the back free fragment */
newblk = get_slot(info);
newblk->start = e;
newblk->size = be - e;
list_add(&newblk->list, &blk->list);
}
return s;
}
EXPORT_SYMBOL_GPL(rh_detach_region);
/* Allocate a block of memory at the specified alignment. The value returned
* is an offset into the buffer initialized by rh_init(), or a negative number
* if there is an error.
*/
unsigned long rh_alloc_align(rh_info_t * info, int size, int alignment, const char *owner)
{
struct list_head *l;
rh_block_t *blk;
rh_block_t *newblk;
unsigned long start, sp_size;
/* Validate size, and alignment must be power of two */
if (size <= 0 || (alignment & (alignment - 1)) != 0)
return (unsigned long) -EINVAL;
/* Align to configured alignment */
size = (size + (info->alignment - 1)) & ~(info->alignment - 1);
if (assure_empty(info, 2) < 0)
return (unsigned long) -ENOMEM;
blk = NULL;
list_for_each(l, &info->free_list) {
blk = list_entry(l, rh_block_t, list);
if (size <= blk->size) {
start = (blk->start + alignment - 1) & ~(alignment - 1);
if (start + size <= blk->start + blk->size)
break;
}
blk = NULL;
}
if (blk == NULL)
return (unsigned long) -ENOMEM;
/* Just fits */
if (blk->size == size) {
/* Move from free list to taken list */
list_del(&blk->list);
newblk = blk;
} else {
/* Fragment caused, split if needed */
/* Create block for fragment in the beginning */
sp_size = start - blk->start;
if (sp_size) {
rh_block_t *spblk;
spblk = get_slot(info);
spblk->start = blk->start;
spblk->size = sp_size;
/* add before the blk */
list_add(&spblk->list, blk->list.prev);
}
newblk = get_slot(info);
newblk->start = start;
newblk->size = size;
/* blk still in free list, with updated start and size
* for fragment in the end */
blk->start = start + size;
blk->size -= sp_size + size;
/* No fragment in the end, remove blk */
if (blk->size == 0) {
list_del(&blk->list);
release_slot(info, blk);
}
}
newblk->owner = owner;
attach_taken_block(info, newblk);
return start;
}
EXPORT_SYMBOL_GPL(rh_alloc_align);
/* Allocate a block of memory at the default alignment. The value returned is
* an offset into the buffer initialized by rh_init(), or a negative number if
* there is an error.
*/
unsigned long rh_alloc(rh_info_t * info, int size, const char *owner)
{
return rh_alloc_align(info, size, info->alignment, owner);
}
EXPORT_SYMBOL_GPL(rh_alloc);
/* Allocate a block of memory at the given offset, rounded up to the default
* alignment. The value returned is an offset into the buffer initialized by
* rh_init(), or a negative number if there is an error.
*/
unsigned long rh_alloc_fixed(rh_info_t * info, unsigned long start, int size, const char *owner)
{
struct list_head *l;
rh_block_t *blk, *newblk1, *newblk2;
unsigned long s, e, m, bs = 0, be = 0;
/* Validate size */
if (size <= 0)
return (unsigned long) -EINVAL;
/* The region must be aligned */
s = start;
e = s + size;
m = info->alignment - 1;
/* Round start up */
s = (s + m) & ~m;
/* Round end down */
e = e & ~m;
if (assure_empty(info, 2) < 0)
return (unsigned long) -ENOMEM;
blk = NULL;
list_for_each(l, &info->free_list) {
blk = list_entry(l, rh_block_t, list);
/* The range must lie entirely inside one free block */
bs = blk->start;
be = blk->start + blk->size;
if (s >= bs && e <= be)
break;
blk = NULL;
}
if (blk == NULL)
return (unsigned long) -ENOMEM;
/* Perfect fit */
if (bs == s && be == e) {
/* Move from free list to taken list */
list_del(&blk->list);
blk->owner = owner;
start = blk->start;
attach_taken_block(info, blk);
return start;
}
/* blk still in free list, with updated start and/or size */
if (bs == s || be == e) {
if (bs == s)
blk->start += size;
blk->size -= size;
} else {
/* The front free fragment */
blk->size = s - bs;
/* The back free fragment */
newblk2 = get_slot(info);
newblk2->start = e;
newblk2->size = be - e;
list_add(&newblk2->list, &blk->list);
}
newblk1 = get_slot(info);
newblk1->start = s;
newblk1->size = e - s;
newblk1->owner = owner;
start = newblk1->start;
attach_taken_block(info, newblk1);
return start;
}
EXPORT_SYMBOL_GPL(rh_alloc_fixed);
/* Deallocate the memory previously allocated by one of the rh_alloc functions.
* The return value is the size of the deallocated block, or a negative number
* if there is an error.
*/
int rh_free(rh_info_t * info, unsigned long start)
{
rh_block_t *blk, *blk2;
struct list_head *l;
int size;
/* Linear search for block */
blk = NULL;
list_for_each(l, &info->taken_list) {
blk2 = list_entry(l, rh_block_t, list);
if (start < blk2->start)
break;
blk = blk2;
}
if (blk == NULL || start > (blk->start + blk->size))
return -EINVAL;
/* Remove from taken list */
list_del(&blk->list);
/* Get size of freed block */
size = blk->size;
attach_free_block(info, blk);
return size;
}
EXPORT_SYMBOL_GPL(rh_free);
int rh_get_stats(rh_info_t * info, int what, int max_stats, rh_stats_t * stats)
{
rh_block_t *blk;
struct list_head *l;
struct list_head *h;
int nr;
switch (what) {
case RHGS_FREE:
h = &info->free_list;
break;
case RHGS_TAKEN:
h = &info->taken_list;
break;
default:
return -EINVAL;
}
/* Linear search for block */
nr = 0;
list_for_each(l, h) {
blk = list_entry(l, rh_block_t, list);
if (stats != NULL && nr < max_stats) {
stats->start = blk->start;
stats->size = blk->size;
stats->owner = blk->owner;
stats++;
}
nr++;
}
return nr;
}
EXPORT_SYMBOL_GPL(rh_get_stats);
int rh_set_owner(rh_info_t * info, unsigned long start, const char *owner)
{
rh_block_t *blk, *blk2;
struct list_head *l;
int size;
/* Linear search for block */
blk = NULL;
list_for_each(l, &info->taken_list) {
blk2 = list_entry(l, rh_block_t, list);
if (start < blk2->start)
break;
blk = blk2;
}
if (blk == NULL || start > (blk->start + blk->size))
return -EINVAL;
blk->owner = owner;
size = blk->size;
return size;
}
EXPORT_SYMBOL_GPL(rh_set_owner);
void rh_dump(rh_info_t * info)
{
static rh_stats_t st[32]; /* XXX maximum 32 blocks */
int maxnr;
int i, nr;
maxnr = ARRAY_SIZE(st);
printk(KERN_INFO
"info @0x%p (%d slots empty / %d max)\n",
info, info->empty_slots, info->max_blocks);
printk(KERN_INFO " Free:\n");
nr = rh_get_stats(info, RHGS_FREE, maxnr, st);
if (nr > maxnr)
nr = maxnr;
for (i = 0; i < nr; i++)
printk(KERN_INFO
" 0x%lx-0x%lx (%u)\n",
st[i].start, st[i].start + st[i].size,
st[i].size);
printk(KERN_INFO "\n");
printk(KERN_INFO " Taken:\n");
nr = rh_get_stats(info, RHGS_TAKEN, maxnr, st);
if (nr > maxnr)
nr = maxnr;
for (i = 0; i < nr; i++)
printk(KERN_INFO
" 0x%lx-0x%lx (%u) %s\n",
st[i].start, st[i].start + st[i].size,
st[i].size, st[i].owner != NULL ? st[i].owner : "");
printk(KERN_INFO "\n");
}
EXPORT_SYMBOL_GPL(rh_dump);
void rh_dump_blk(rh_info_t * info, rh_block_t * blk)
{
printk(KERN_INFO
"blk @0x%p: 0x%lx-0x%lx (%u)\n",
blk, blk->start, blk->start + blk->size, blk->size);
}
EXPORT_SYMBOL_GPL(rh_dump_blk);
| gpl-2.0 |
csolg/hi35xx-buildroot | linux/linux-3.4.y/arch/arm/mach-omap2/smartreflex-class3.c | 4763 | 1448 | /*
* Smart reflex Class 3 specific implementations
*
* Author: Thara Gopinath <thara@ti.com>
*
* Copyright (C) 2010 Texas Instruments, Inc.
* Thara Gopinath <thara@ti.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include "smartreflex.h"
static int sr_class3_enable(struct voltagedomain *voltdm)
{
unsigned long volt = voltdm_get_voltage(voltdm);
if (!volt) {
pr_warning("%s: Curr voltage unknown. Cannot enable sr_%s\n",
__func__, voltdm->name);
return -ENODATA;
}
omap_vp_enable(voltdm);
return sr_enable(voltdm, volt);
}
static int sr_class3_disable(struct voltagedomain *voltdm, int is_volt_reset)
{
sr_disable_errgen(voltdm);
omap_vp_disable(voltdm);
sr_disable(voltdm);
if (is_volt_reset)
voltdm_reset(voltdm);
return 0;
}
static int sr_class3_configure(struct voltagedomain *voltdm)
{
return sr_configure_errgen(voltdm);
}
/* SR class3 structure */
static struct omap_sr_class_data class3_data = {
.enable = sr_class3_enable,
.disable = sr_class3_disable,
.configure = sr_class3_configure,
.class_type = SR_CLASS3,
};
/* Smartreflex Class3 init API to be called from board file */
static int __init sr_class3_init(void)
{
pr_info("SmartReflex Class3 initialized\n");
return sr_register_class(&class3_data);
}
late_initcall(sr_class3_init);
| gpl-2.0 |
hiikezoe/android_kernel_kyocera_202k | drivers/net/wireless/libertas/rx.c | 8091 | 7551 | /*
* This file contains the handling of RX in wlan driver.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/etherdevice.h>
#include <linux/hardirq.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/export.h>
#include <net/cfg80211.h>
#include "defs.h"
#include "host.h"
#include "radiotap.h"
#include "decl.h"
#include "dev.h"
#include "mesh.h"
struct eth803hdr {
u8 dest_addr[6];
u8 src_addr[6];
u16 h803_len;
} __packed;
struct rfc1042hdr {
u8 llc_dsap;
u8 llc_ssap;
u8 llc_ctrl;
u8 snap_oui[3];
u16 snap_type;
} __packed;
struct rxpackethdr {
struct eth803hdr eth803_hdr;
struct rfc1042hdr rfc1042_hdr;
} __packed;
struct rx80211packethdr {
struct rxpd rx_pd;
void *eth80211_hdr;
} __packed;
static int process_rxed_802_11_packet(struct lbs_private *priv,
struct sk_buff *skb);
/**
* lbs_process_rxed_packet - processes received packet and forwards it
* to kernel/upper layer
*
* @priv: A pointer to &struct lbs_private
* @skb: A pointer to skb which includes the received packet
* returns: 0 or -1
*/
int lbs_process_rxed_packet(struct lbs_private *priv, struct sk_buff *skb)
{
int ret = 0;
struct net_device *dev = priv->dev;
struct rxpackethdr *p_rx_pkt;
struct rxpd *p_rx_pd;
int hdrchop;
struct ethhdr *p_ethhdr;
static const u8 rfc1042_eth_hdr[] = {
0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00
};
lbs_deb_enter(LBS_DEB_RX);
BUG_ON(!skb);
skb->ip_summed = CHECKSUM_NONE;
if (priv->wdev->iftype == NL80211_IFTYPE_MONITOR)
return process_rxed_802_11_packet(priv, skb);
p_rx_pd = (struct rxpd *) skb->data;
p_rx_pkt = (struct rxpackethdr *) ((u8 *)p_rx_pd +
le32_to_cpu(p_rx_pd->pkt_ptr));
dev = lbs_mesh_set_dev(priv, dev, p_rx_pd);
lbs_deb_hex(LBS_DEB_RX, "RX Data: Before chop rxpd", skb->data,
min_t(unsigned int, skb->len, 100));
if (skb->len < (ETH_HLEN + 8 + sizeof(struct rxpd))) {
lbs_deb_rx("rx err: frame received with bad length\n");
dev->stats.rx_length_errors++;
ret = 0;
dev_kfree_skb(skb);
goto done;
}
lbs_deb_rx("rx data: skb->len - pkt_ptr = %d-%zd = %zd\n",
skb->len, (size_t)le32_to_cpu(p_rx_pd->pkt_ptr),
skb->len - (size_t)le32_to_cpu(p_rx_pd->pkt_ptr));
lbs_deb_hex(LBS_DEB_RX, "RX Data: Dest", p_rx_pkt->eth803_hdr.dest_addr,
sizeof(p_rx_pkt->eth803_hdr.dest_addr));
lbs_deb_hex(LBS_DEB_RX, "RX Data: Src", p_rx_pkt->eth803_hdr.src_addr,
sizeof(p_rx_pkt->eth803_hdr.src_addr));
if (memcmp(&p_rx_pkt->rfc1042_hdr,
rfc1042_eth_hdr, sizeof(rfc1042_eth_hdr)) == 0) {
/*
* Replace the 803 header and rfc1042 header (llc/snap) with an
* EthernetII header, keep the src/dst and snap_type (ethertype)
*
* The firmware only passes up SNAP frames converting
* all RX Data from 802.11 to 802.2/LLC/SNAP frames.
*
* To create the Ethernet II, just move the src, dst address right
* before the snap_type.
*/
p_ethhdr = (struct ethhdr *)
((u8 *) &p_rx_pkt->eth803_hdr
+ sizeof(p_rx_pkt->eth803_hdr) + sizeof(p_rx_pkt->rfc1042_hdr)
- sizeof(p_rx_pkt->eth803_hdr.dest_addr)
- sizeof(p_rx_pkt->eth803_hdr.src_addr)
- sizeof(p_rx_pkt->rfc1042_hdr.snap_type));
memcpy(p_ethhdr->h_source, p_rx_pkt->eth803_hdr.src_addr,
sizeof(p_ethhdr->h_source));
memcpy(p_ethhdr->h_dest, p_rx_pkt->eth803_hdr.dest_addr,
sizeof(p_ethhdr->h_dest));
/* Chop off the rxpd + the excess memory from the 802.2/llc/snap header
* that was removed
*/
hdrchop = (u8 *)p_ethhdr - (u8 *)p_rx_pd;
} else {
lbs_deb_hex(LBS_DEB_RX, "RX Data: LLC/SNAP",
(u8 *) &p_rx_pkt->rfc1042_hdr,
sizeof(p_rx_pkt->rfc1042_hdr));
/* Chop off the rxpd */
hdrchop = (u8 *)&p_rx_pkt->eth803_hdr - (u8 *)p_rx_pd;
}
/* Chop off the leading header bytes so the skb points to the start of
* either the reconstructed EthII frame or the 802.2/llc/snap frame
*/
skb_pull(skb, hdrchop);
priv->cur_rate = lbs_fw_index_to_data_rate(p_rx_pd->rx_rate);
lbs_deb_rx("rx data: size of actual packet %d\n", skb->len);
dev->stats.rx_bytes += skb->len;
dev->stats.rx_packets++;
skb->protocol = eth_type_trans(skb, dev);
if (in_interrupt())
netif_rx(skb);
else
netif_rx_ni(skb);
ret = 0;
done:
lbs_deb_leave_args(LBS_DEB_RX, "ret %d", ret);
return ret;
}
EXPORT_SYMBOL_GPL(lbs_process_rxed_packet);
/**
* convert_mv_rate_to_radiotap - converts Tx/Rx rates from Marvell WLAN format
* (see Table 2 in Section 3.1) to IEEE80211_RADIOTAP_RATE units (500 Kb/s)
*
* @rate: Input rate
* returns: Output Rate (0 if invalid)
*/
static u8 convert_mv_rate_to_radiotap(u8 rate)
{
switch (rate) {
case 0: /* 1 Mbps */
return 2;
case 1: /* 2 Mbps */
return 4;
case 2: /* 5.5 Mbps */
return 11;
case 3: /* 11 Mbps */
return 22;
/* case 4: reserved */
case 5: /* 6 Mbps */
return 12;
case 6: /* 9 Mbps */
return 18;
case 7: /* 12 Mbps */
return 24;
case 8: /* 18 Mbps */
return 36;
case 9: /* 24 Mbps */
return 48;
case 10: /* 36 Mbps */
return 72;
case 11: /* 48 Mbps */
return 96;
case 12: /* 54 Mbps */
return 108;
}
pr_alert("Invalid Marvell WLAN rate %i\n", rate);
return 0;
}
/**
* process_rxed_802_11_packet - processes a received 802.11 packet and forwards
* it to kernel/upper layer
*
* @priv: A pointer to &struct lbs_private
* @skb: A pointer to skb which includes the received packet
* returns: 0 or -1
*/
static int process_rxed_802_11_packet(struct lbs_private *priv,
struct sk_buff *skb)
{
int ret = 0;
struct net_device *dev = priv->dev;
struct rx80211packethdr *p_rx_pkt;
struct rxpd *prxpd;
struct rx_radiotap_hdr radiotap_hdr;
struct rx_radiotap_hdr *pradiotap_hdr;
lbs_deb_enter(LBS_DEB_RX);
p_rx_pkt = (struct rx80211packethdr *) skb->data;
prxpd = &p_rx_pkt->rx_pd;
/* lbs_deb_hex(LBS_DEB_RX, "RX Data: Before chop rxpd", skb->data, min(skb->len, 100)); */
if (skb->len < (ETH_HLEN + 8 + sizeof(struct rxpd))) {
lbs_deb_rx("rx err: frame received with bad length\n");
dev->stats.rx_length_errors++;
ret = -EINVAL;
kfree_skb(skb);
goto done;
}
lbs_deb_rx("rx data: skb->len-sizeof(RxPd) = %d-%zd = %zd\n",
skb->len, sizeof(struct rxpd), skb->len - sizeof(struct rxpd));
/* create the exported radio header */
/* radiotap header */
memset(&radiotap_hdr, 0, sizeof(radiotap_hdr));
/* XXX must check radiotap_hdr.hdr.it_pad for pad */
radiotap_hdr.hdr.it_len = cpu_to_le16 (sizeof(struct rx_radiotap_hdr));
radiotap_hdr.hdr.it_present = cpu_to_le32 (RX_RADIOTAP_PRESENT);
radiotap_hdr.rate = convert_mv_rate_to_radiotap(prxpd->rx_rate);
/* XXX must check no carryout */
radiotap_hdr.antsignal = prxpd->snr + prxpd->nf;
/* chop the rxpd */
skb_pull(skb, sizeof(struct rxpd));
/* add space for the new radio header */
if ((skb_headroom(skb) < sizeof(struct rx_radiotap_hdr)) &&
pskb_expand_head(skb, sizeof(struct rx_radiotap_hdr), 0, GFP_ATOMIC)) {
netdev_alert(dev, "%s: couldn't pskb_expand_head\n", __func__);
ret = -ENOMEM;
kfree_skb(skb);
goto done;
}
pradiotap_hdr = (void *)skb_push(skb, sizeof(struct rx_radiotap_hdr));
memcpy(pradiotap_hdr, &radiotap_hdr, sizeof(struct rx_radiotap_hdr));
priv->cur_rate = lbs_fw_index_to_data_rate(prxpd->rx_rate);
lbs_deb_rx("rx data: size of actual packet %d\n", skb->len);
dev->stats.rx_bytes += skb->len;
dev->stats.rx_packets++;
skb->protocol = eth_type_trans(skb, priv->dev);
if (in_interrupt())
netif_rx(skb);
else
netif_rx_ni(skb);
ret = 0;
done:
lbs_deb_leave_args(LBS_DEB_RX, "ret %d", ret);
return ret;
}
| gpl-2.0 |
jakew02/android_kernel_asus_fugu | drivers/sh/intc/balancing.c | 10395 | 2327 | /*
* Support for hardware-managed IRQ auto-distribution.
*
* Copyright (C) 2010 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include "internals.h"
static unsigned long dist_handle[INTC_NR_IRQS];
void intc_balancing_enable(unsigned int irq)
{
struct intc_desc_int *d = get_intc_desc(irq);
unsigned long handle = dist_handle[irq];
unsigned long addr;
if (irq_balancing_disabled(irq) || !handle)
return;
addr = INTC_REG(d, _INTC_ADDR_D(handle), 0);
intc_reg_fns[_INTC_FN(handle)](addr, handle, 1);
}
void intc_balancing_disable(unsigned int irq)
{
struct intc_desc_int *d = get_intc_desc(irq);
unsigned long handle = dist_handle[irq];
unsigned long addr;
if (irq_balancing_disabled(irq) || !handle)
return;
addr = INTC_REG(d, _INTC_ADDR_D(handle), 0);
intc_reg_fns[_INTC_FN(handle)](addr, handle, 0);
}
static unsigned int intc_dist_data(struct intc_desc *desc,
struct intc_desc_int *d,
intc_enum enum_id)
{
struct intc_mask_reg *mr = desc->hw.mask_regs;
unsigned int i, j, fn, mode;
unsigned long reg_e, reg_d;
for (i = 0; mr && enum_id && i < desc->hw.nr_mask_regs; i++) {
mr = desc->hw.mask_regs + i;
/*
* Skip this entry if there's no auto-distribution
* register associated with it.
*/
if (!mr->dist_reg)
continue;
for (j = 0; j < ARRAY_SIZE(mr->enum_ids); j++) {
if (mr->enum_ids[j] != enum_id)
continue;
fn = REG_FN_MODIFY_BASE;
mode = MODE_ENABLE_REG;
reg_e = mr->dist_reg;
reg_d = mr->dist_reg;
fn += (mr->reg_width >> 3) - 1;
return _INTC_MK(fn, mode,
intc_get_reg(d, reg_e),
intc_get_reg(d, reg_d),
1,
(mr->reg_width - 1) - j);
}
}
/*
* It's possible we've gotten here with no distribution options
* available for the IRQ in question, so we just skip over those.
*/
return 0;
}
void intc_set_dist_handle(unsigned int irq, struct intc_desc *desc,
struct intc_desc_int *d, intc_enum id)
{
unsigned long flags;
/*
* Nothing to do for this IRQ.
*/
if (!desc->hw.mask_regs)
return;
raw_spin_lock_irqsave(&intc_big_lock, flags);
dist_handle[irq] = intc_dist_data(desc, d, id);
raw_spin_unlock_irqrestore(&intc_big_lock, flags);
}
| gpl-2.0 |
Klagopsalmer/linux | arch/parisc/lib/io.c | 13979 | 9956 | /*
* arch/parisc/lib/io.c
*
* Copyright (c) Matthew Wilcox 2001 for Hewlett-Packard
* Copyright (c) Randolph Chung 2001 <tausq@debian.org>
*
* IO accessing functions which shouldn't be inlined because they're too big
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <asm/io.h>
/* Copies a block of memory to a device in an efficient manner.
* Assumes the device can cope with 32-bit transfers. If it can't,
* don't use this function.
*/
void memcpy_toio(volatile void __iomem *dst, const void *src, int count)
{
if (((unsigned long)dst & 3) != ((unsigned long)src & 3))
goto bytecopy;
while ((unsigned long)dst & 3) {
writeb(*(char *)src, dst++);
src++;
count--;
}
while (count > 3) {
__raw_writel(*(u32 *)src, dst);
src += 4;
dst += 4;
count -= 4;
}
bytecopy:
while (count--) {
writeb(*(char *)src, dst++);
src++;
}
}
/*
** Copies a block of memory from a device in an efficient manner.
** Assumes the device can cope with 32-bit transfers. If it can't,
** don't use this function.
**
** CR16 counts on C3000 reading 256 bytes from Symbios 896 RAM:
** 27341/64 = 427 cyc per int
** 61311/128 = 478 cyc per short
** 122637/256 = 479 cyc per byte
** Ergo bus latencies dominant (not transfer size).
** Minimize total number of transfers at cost of CPU cycles.
** TODO: only look at src alignment and adjust the stores to dest.
*/
void memcpy_fromio(void *dst, const volatile void __iomem *src, int count)
{
/* first compare alignment of src/dst */
if ( (((unsigned long)dst ^ (unsigned long)src) & 1) || (count < 2) )
goto bytecopy;
if ( (((unsigned long)dst ^ (unsigned long)src) & 2) || (count < 4) )
goto shortcopy;
/* Then check for misaligned start address */
if ((unsigned long)src & 1) {
*(u8 *)dst = readb(src);
src++;
dst++;
count--;
if (count < 2) goto bytecopy;
}
if ((unsigned long)src & 2) {
*(u16 *)dst = __raw_readw(src);
src += 2;
dst += 2;
count -= 2;
}
while (count > 3) {
*(u32 *)dst = __raw_readl(src);
dst += 4;
src += 4;
count -= 4;
}
shortcopy:
while (count > 1) {
*(u16 *)dst = __raw_readw(src);
src += 2;
dst += 2;
count -= 2;
}
bytecopy:
while (count--) {
*(char *)dst = readb(src);
src++;
dst++;
}
}
/* Sets a block of memory on a device to a given value.
* Assumes the device can cope with 32-bit transfers. If it can't,
* don't use this function.
*/
void memset_io(volatile void __iomem *addr, unsigned char val, int count)
{
u32 val32 = (val << 24) | (val << 16) | (val << 8) | val;
while ((unsigned long)addr & 3) {
writeb(val, addr++);
count--;
}
while (count > 3) {
__raw_writel(val32, addr);
addr += 4;
count -= 4;
}
while (count--) {
writeb(val, addr++);
}
}
/*
* Read COUNT 8-bit bytes from port PORT into memory starting at
* SRC.
*/
void insb (unsigned long port, void *dst, unsigned long count)
{
unsigned char *p;
p = (unsigned char *)dst;
while (((unsigned long)p) & 0x3) {
if (!count)
return;
count--;
*p = inb(port);
p++;
}
while (count >= 4) {
unsigned int w;
count -= 4;
w = inb(port) << 24;
w |= inb(port) << 16;
w |= inb(port) << 8;
w |= inb(port);
*(unsigned int *) p = w;
p += 4;
}
while (count) {
--count;
*p = inb(port);
p++;
}
}
/*
* Read COUNT 16-bit words from port PORT into memory starting at
* SRC. SRC must be at least short aligned. This is used by the
* IDE driver to read disk sectors. Performance is important, but
* the interfaces seems to be slow: just using the inlined version
* of the inw() breaks things.
*/
void insw (unsigned long port, void *dst, unsigned long count)
{
unsigned int l = 0, l2;
unsigned char *p;
p = (unsigned char *)dst;
if (!count)
return;
switch (((unsigned long)p) & 0x3)
{
case 0x00: /* Buffer 32-bit aligned */
while (count>=2) {
count -= 2;
l = cpu_to_le16(inw(port)) << 16;
l |= cpu_to_le16(inw(port));
*(unsigned int *)p = l;
p += 4;
}
if (count) {
*(unsigned short *)p = cpu_to_le16(inw(port));
}
break;
case 0x02: /* Buffer 16-bit aligned */
*(unsigned short *)p = cpu_to_le16(inw(port));
p += 2;
count--;
while (count>=2) {
count -= 2;
l = cpu_to_le16(inw(port)) << 16;
l |= cpu_to_le16(inw(port));
*(unsigned int *)p = l;
p += 4;
}
if (count) {
*(unsigned short *)p = cpu_to_le16(inw(port));
}
break;
case 0x01: /* Buffer 8-bit aligned */
case 0x03:
/* I don't bother with 32bit transfers
* in this case, 16bit will have to do -- DE */
--count;
l = cpu_to_le16(inw(port));
*p = l >> 8;
p++;
while (count--)
{
l2 = cpu_to_le16(inw(port));
*(unsigned short *)p = (l & 0xff) << 8 | (l2 >> 8);
p += 2;
l = l2;
}
*p = l & 0xff;
break;
}
}
/*
* Read COUNT 32-bit words from port PORT into memory starting at
* SRC. Now works with any alignment in SRC. Performance is important,
* but the interfaces seems to be slow: just using the inlined version
* of the inl() breaks things.
*/
void insl (unsigned long port, void *dst, unsigned long count)
{
unsigned int l = 0, l2;
unsigned char *p;
p = (unsigned char *)dst;
if (!count)
return;
switch (((unsigned long) dst) & 0x3)
{
case 0x00: /* Buffer 32-bit aligned */
while (count--)
{
*(unsigned int *)p = cpu_to_le32(inl(port));
p += 4;
}
break;
case 0x02: /* Buffer 16-bit aligned */
--count;
l = cpu_to_le32(inl(port));
*(unsigned short *)p = l >> 16;
p += 2;
while (count--)
{
l2 = cpu_to_le32(inl(port));
*(unsigned int *)p = (l & 0xffff) << 16 | (l2 >> 16);
p += 4;
l = l2;
}
*(unsigned short *)p = l & 0xffff;
break;
case 0x01: /* Buffer 8-bit aligned */
--count;
l = cpu_to_le32(inl(port));
*(unsigned char *)p = l >> 24;
p++;
*(unsigned short *)p = (l >> 8) & 0xffff;
p += 2;
while (count--)
{
l2 = cpu_to_le32(inl(port));
*(unsigned int *)p = (l & 0xff) << 24 | (l2 >> 8);
p += 4;
l = l2;
}
*p = l & 0xff;
break;
case 0x03: /* Buffer 8-bit aligned */
--count;
l = cpu_to_le32(inl(port));
*p = l >> 24;
p++;
while (count--)
{
l2 = cpu_to_le32(inl(port));
*(unsigned int *)p = (l & 0xffffff) << 8 | l2 >> 24;
p += 4;
l = l2;
}
*(unsigned short *)p = (l >> 8) & 0xffff;
p += 2;
*p = l & 0xff;
break;
}
}
/*
* Like insb but in the opposite direction.
* Don't worry as much about doing aligned memory transfers:
* doing byte reads the "slow" way isn't nearly as slow as
* doing byte writes the slow way (no r-m-w cycle).
*/
void outsb(unsigned long port, const void * src, unsigned long count)
{
const unsigned char *p;
p = (const unsigned char *)src;
while (count) {
count--;
outb(*p, port);
p++;
}
}
/*
* Like insw but in the opposite direction. This is used by the IDE
* driver to write disk sectors. Performance is important, but the
* interfaces seems to be slow: just using the inlined version of the
* outw() breaks things.
*/
void outsw (unsigned long port, const void *src, unsigned long count)
{
unsigned int l = 0, l2;
const unsigned char *p;
p = (const unsigned char *)src;
if (!count)
return;
switch (((unsigned long)p) & 0x3)
{
case 0x00: /* Buffer 32-bit aligned */
while (count>=2) {
count -= 2;
l = *(unsigned int *)p;
p += 4;
outw(le16_to_cpu(l >> 16), port);
outw(le16_to_cpu(l & 0xffff), port);
}
if (count) {
outw(le16_to_cpu(*(unsigned short*)p), port);
}
break;
case 0x02: /* Buffer 16-bit aligned */
outw(le16_to_cpu(*(unsigned short*)p), port);
p += 2;
count--;
while (count>=2) {
count -= 2;
l = *(unsigned int *)p;
p += 4;
outw(le16_to_cpu(l >> 16), port);
outw(le16_to_cpu(l & 0xffff), port);
}
if (count) {
outw(le16_to_cpu(*(unsigned short *)p), port);
}
break;
case 0x01: /* Buffer 8-bit aligned */
/* I don't bother with 32bit transfers
* in this case, 16bit will have to do -- DE */
l = *p << 8;
p++;
count--;
while (count)
{
count--;
l2 = *(unsigned short *)p;
p += 2;
outw(le16_to_cpu(l | l2 >> 8), port);
l = l2 << 8;
}
l2 = *(unsigned char *)p;
outw (le16_to_cpu(l | l2>>8), port);
break;
}
}
/*
* Like insl but in the opposite direction. This is used by the IDE
* driver to write disk sectors. Works with any alignment in SRC.
* Performance is important, but the interfaces seems to be slow:
* just using the inlined version of the outl() breaks things.
*/
void outsl (unsigned long port, const void *src, unsigned long count)
{
unsigned int l = 0, l2;
const unsigned char *p;
p = (const unsigned char *)src;
if (!count)
return;
switch (((unsigned long)p) & 0x3)
{
case 0x00: /* Buffer 32-bit aligned */
while (count--)
{
outl(le32_to_cpu(*(unsigned int *)p), port);
p += 4;
}
break;
case 0x02: /* Buffer 16-bit aligned */
--count;
l = *(unsigned short *)p;
p += 2;
while (count--)
{
l2 = *(unsigned int *)p;
p += 4;
outl (le32_to_cpu(l << 16 | l2 >> 16), port);
l = l2;
}
l2 = *(unsigned short *)p;
outl (le32_to_cpu(l << 16 | l2), port);
break;
case 0x01: /* Buffer 8-bit aligned */
--count;
l = *p << 24;
p++;
l |= *(unsigned short *)p << 8;
p += 2;
while (count--)
{
l2 = *(unsigned int *)p;
p += 4;
outl (le32_to_cpu(l | l2 >> 24), port);
l = l2 << 8;
}
l2 = *p;
outl (le32_to_cpu(l | l2), port);
break;
case 0x03: /* Buffer 8-bit aligned */
--count;
l = *p << 24;
p++;
while (count--)
{
l2 = *(unsigned int *)p;
p += 4;
outl (le32_to_cpu(l | l2 >> 8), port);
l = l2 << 24;
}
l2 = *(unsigned short *)p << 16;
p += 2;
l2 |= *p;
outl (le32_to_cpu(l | l2), port);
break;
}
}
EXPORT_SYMBOL(insb);
EXPORT_SYMBOL(insw);
EXPORT_SYMBOL(insl);
EXPORT_SYMBOL(outsb);
EXPORT_SYMBOL(outsw);
EXPORT_SYMBOL(outsl);
| gpl-2.0 |
classicsong/Range-Lock | drivers/staging/intel_sst/intel_sst_drv_interface.c | 156 | 15239 | /*
* intel_sst_interface.c - Intel SST Driver for audio engine
*
* Copyright (C) 2008-10 Intel Corp
* Authors: Vinod Koul <vinod.koul@intel.com>
* Harsha Priya <priya.harsha@intel.com>
* Dharageswari R <dharageswari.r@intel.com)
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* This driver exposes the audio engine functionalities to the ALSA
* and middleware.
* Upper layer interfaces (MAD driver, MMF) to SST driver
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/delay.h>
#include <linux/pci.h>
#include <linux/fs.h>
#include <linux/firmware.h>
#include <linux/pm_runtime.h>
#include <linux/export.h>
#include "intel_sst.h"
#include "intel_sst_ioctl.h"
#include "intel_sst_fw_ipc.h"
#include "intel_sst_common.h"
/*
* sst_download_fw - download the audio firmware to DSP
*
* This function is called when the FW needs to be downloaded to SST DSP engine
*/
int sst_download_fw(void)
{
int retval;
const struct firmware *fw_sst;
char name[20];
if (sst_drv_ctx->sst_state != SST_UN_INIT)
return -EPERM;
/* Reload firmware is not needed for MRST */
if ( (sst_drv_ctx->pci_id == SST_MRST_PCI_ID) && sst_drv_ctx->fw_downloaded) {
pr_debug("FW already downloaded, skip for MRST platform\n");
sst_drv_ctx->sst_state = SST_FW_RUNNING;
return 0;
}
snprintf(name, sizeof(name), "%s%04x%s", "fw_sst_",
sst_drv_ctx->pci_id, ".bin");
pr_debug("Downloading %s FW now...\n", name);
retval = request_firmware(&fw_sst, name, &sst_drv_ctx->pci->dev);
if (retval) {
pr_err("request fw failed %d\n", retval);
return retval;
}
sst_drv_ctx->alloc_block[0].sst_id = FW_DWNL_ID;
sst_drv_ctx->alloc_block[0].ops_block.condition = false;
retval = sst_load_fw(fw_sst, NULL);
if (retval)
goto end_restore;
retval = sst_wait_timeout(sst_drv_ctx, &sst_drv_ctx->alloc_block[0]);
if (retval)
pr_err("fw download failed %d\n" , retval);
else
sst_drv_ctx->fw_downloaded = 1;
end_restore:
release_firmware(fw_sst);
sst_drv_ctx->alloc_block[0].sst_id = BLOCK_UNINIT;
return retval;
}
/*
* sst_stalled - this function checks if the lpe is in stalled state
*/
int sst_stalled(void)
{
int retry = 1000;
int retval = -1;
while (retry) {
if (!sst_drv_ctx->lpe_stalled)
return 0;
/*wait for time and re-check*/
msleep(1);
retry--;
}
pr_debug("in Stalled State\n");
return retval;
}
void free_stream_context(unsigned int str_id)
{
struct stream_info *stream;
if (!sst_validate_strid(str_id)) {
/* str_id is valid, so stream is alloacted */
stream = &sst_drv_ctx->streams[str_id];
if (sst_free_stream(str_id))
sst_clean_stream(&sst_drv_ctx->streams[str_id]);
if (stream->ops == STREAM_OPS_PLAYBACK ||
stream->ops == STREAM_OPS_PLAYBACK_DRM) {
sst_drv_ctx->pb_streams--;
if (sst_drv_ctx->pci_id == SST_MFLD_PCI_ID)
sst_drv_ctx->scard_ops->power_down_pmic_pb(
stream->device);
else {
if (sst_drv_ctx->pb_streams == 0)
sst_drv_ctx->scard_ops->
power_down_pmic_pb(stream->device);
}
} else if (stream->ops == STREAM_OPS_CAPTURE) {
sst_drv_ctx->cp_streams--;
if (sst_drv_ctx->cp_streams == 0)
sst_drv_ctx->scard_ops->power_down_pmic_cp(
stream->device);
}
if (sst_drv_ctx->pb_streams == 0
&& sst_drv_ctx->cp_streams == 0)
sst_drv_ctx->scard_ops->power_down_pmic();
}
}
/*
* sst_get_stream_allocated - this function gets a stream allocated with
* the given params
*
* @str_param : stream params
* @lib_dnld : pointer to pointer of lib downlaod struct
*
* This creates new stream id for a stream, in case lib is to be downloaded to
* DSP, it downloads that
*/
int sst_get_stream_allocated(struct snd_sst_params *str_param,
struct snd_sst_lib_download **lib_dnld)
{
int retval, str_id;
struct stream_info *str_info;
retval = sst_alloc_stream((char *) &str_param->sparams, str_param->ops,
str_param->codec, str_param->device_type);
if (retval < 0) {
pr_err("sst_alloc_stream failed %d\n", retval);
return retval;
}
pr_debug("Stream allocated %d\n", retval);
str_id = retval;
str_info = &sst_drv_ctx->streams[str_id];
/* Block the call for reply */
retval = sst_wait_interruptible_timeout(sst_drv_ctx,
&str_info->ctrl_blk, SST_BLOCK_TIMEOUT);
if ((retval != 0) || (str_info->ctrl_blk.ret_code != 0)) {
pr_debug("FW alloc failed retval %d, ret_code %d\n",
retval, str_info->ctrl_blk.ret_code);
str_id = -str_info->ctrl_blk.ret_code; /*return error*/
*lib_dnld = str_info->ctrl_blk.data;
sst_clean_stream(str_info);
} else
pr_debug("FW Stream allocated success\n");
return str_id; /*will ret either error (in above if) or correct str id*/
}
/*
* sst_get_sfreq - this function returns the frequency of the stream
*
* @str_param : stream params
*/
static int sst_get_sfreq(struct snd_sst_params *str_param)
{
switch (str_param->codec) {
case SST_CODEC_TYPE_PCM:
return 48000; /*str_param->sparams.uc.pcm_params.sfreq;*/
case SST_CODEC_TYPE_MP3:
return str_param->sparams.uc.mp3_params.sfreq;
case SST_CODEC_TYPE_AAC:
return str_param->sparams.uc.aac_params.sfreq;
case SST_CODEC_TYPE_WMA9:
return str_param->sparams.uc.wma_params.sfreq;
default:
return 0;
}
}
/*
* sst_get_stream - this function prepares for stream allocation
*
* @str_param : stream param
*/
int sst_get_stream(struct snd_sst_params *str_param)
{
int i, retval;
struct stream_info *str_info;
struct snd_sst_lib_download *lib_dnld;
/* stream is not allocated, we are allocating */
retval = sst_get_stream_allocated(str_param, &lib_dnld);
if (retval == -(SST_LIB_ERR_LIB_DNLD_REQUIRED)) {
/* codec download is required */
struct snd_sst_alloc_response *response;
pr_debug("Codec is required.... trying that\n");
if (lib_dnld == NULL) {
pr_err("lib download null!!! abort\n");
return -EIO;
}
i = sst_get_block_stream(sst_drv_ctx);
response = sst_drv_ctx->alloc_block[i].ops_block.data;
pr_debug("alloc block allocated = %d\n", i);
if (i < 0) {
kfree(lib_dnld);
return -ENOMEM;
}
retval = sst_load_library(lib_dnld, str_param->ops);
kfree(lib_dnld);
sst_drv_ctx->alloc_block[i].sst_id = BLOCK_UNINIT;
if (!retval) {
pr_debug("codec was downloaded successfully\n");
retval = sst_get_stream_allocated(str_param, &lib_dnld);
if (retval <= 0)
goto err;
pr_debug("Alloc done stream id %d\n", retval);
} else {
pr_debug("codec download failed\n");
retval = -EIO;
goto err;
}
} else if (retval <= 0)
goto err;
/*else
set_port_params(str_param, str_param->ops);*/
/* store sampling freq */
str_info = &sst_drv_ctx->streams[retval];
str_info->sfreq = sst_get_sfreq(str_param);
/* power on the analog, if reqd */
if (str_param->ops == STREAM_OPS_PLAYBACK ||
str_param->ops == STREAM_OPS_PLAYBACK_DRM) {
if (sst_drv_ctx->pci_id == SST_MRST_PCI_ID)
sst_drv_ctx->scard_ops->power_up_pmic_pb(
sst_drv_ctx->pmic_port_instance);
else
sst_drv_ctx->scard_ops->power_up_pmic_pb(
str_info->device);
/*Only if the playback is MP3 - Send a message*/
sst_drv_ctx->pb_streams++;
} else if (str_param->ops == STREAM_OPS_CAPTURE) {
sst_drv_ctx->scard_ops->power_up_pmic_cp(
sst_drv_ctx->pmic_port_instance);
/*Send a messageif not sent already*/
sst_drv_ctx->cp_streams++;
}
err:
return retval;
}
void sst_process_mad_ops(struct work_struct *work)
{
struct mad_ops_wq *mad_ops =
container_of(work, struct mad_ops_wq, wq);
int retval = 0;
switch (mad_ops->control_op) {
case SST_SND_PAUSE:
retval = sst_pause_stream(mad_ops->stream_id);
break;
case SST_SND_RESUME:
retval = sst_resume_stream(mad_ops->stream_id);
break;
case SST_SND_DROP:
retval = sst_drop_stream(mad_ops->stream_id);
break;
case SST_SND_START:
pr_debug("SST Debug: start stream\n");
retval = sst_start_stream(mad_ops->stream_id);
break;
case SST_SND_STREAM_PROCESS:
pr_debug("play/capt frames...\n");
break;
default:
pr_err(" wrong control_ops reported\n");
}
return;
}
void send_intial_rx_timeslot(void)
{
if (sst_drv_ctx->pci_id == SST_MRST_PCI_ID &&
sst_drv_ctx->rx_time_slot_status != RX_TIMESLOT_UNINIT
&& sst_drv_ctx->pmic_vendor != SND_NC)
sst_enable_rx_timeslot(sst_drv_ctx->rx_time_slot_status);
}
/*
* sst_open_pcm_stream - Open PCM interface
*
* @str_param: parameters of pcm stream
*
* This function is called by MID sound card driver to open
* a new pcm interface
*/
int sst_open_pcm_stream(struct snd_sst_params *str_param)
{
struct stream_info *str_info;
int retval;
pm_runtime_get_sync(&sst_drv_ctx->pci->dev);
if (sst_drv_ctx->sst_state == SST_SUSPENDED) {
/* LPE is suspended, resume it before proceeding*/
pr_debug("Resuming from Suspended state\n");
retval = intel_sst_resume(sst_drv_ctx->pci);
if (retval) {
pr_err("Resume Failed = %#x, abort\n", retval);
pm_runtime_put(&sst_drv_ctx->pci->dev);
return retval;
}
}
if (sst_drv_ctx->sst_state == SST_UN_INIT) {
/* FW is not downloaded */
pr_debug("DSP Downloading FW now...\n");
retval = sst_download_fw();
if (retval) {
pr_err("FW download fail %x, abort\n", retval);
pm_runtime_put(&sst_drv_ctx->pci->dev);
return retval;
}
send_intial_rx_timeslot();
}
if (!str_param) {
pm_runtime_put(&sst_drv_ctx->pci->dev);
return -EINVAL;
}
retval = sst_get_stream(str_param);
if (retval > 0) {
sst_drv_ctx->stream_cnt++;
str_info = &sst_drv_ctx->streams[retval];
str_info->src = MAD_DRV;
} else
pm_runtime_put(&sst_drv_ctx->pci->dev);
return retval;
}
/*
* sst_close_pcm_stream - Close PCM interface
*
* @str_id: stream id to be closed
*
* This function is called by MID sound card driver to close
* an existing pcm interface
*/
int sst_close_pcm_stream(unsigned int str_id)
{
struct stream_info *stream;
pr_debug("sst: stream free called\n");
if (sst_validate_strid(str_id))
return -EINVAL;
stream = &sst_drv_ctx->streams[str_id];
free_stream_context(str_id);
stream->pcm_substream = NULL;
stream->status = STREAM_UN_INIT;
stream->period_elapsed = NULL;
sst_drv_ctx->stream_cnt--;
pr_debug("sst: will call runtime put now\n");
pm_runtime_put(&sst_drv_ctx->pci->dev);
return 0;
}
/*
* sst_device_control - Set Control params
*
* @cmd: control cmd to be set
* @arg: command argument
*
* This function is called by MID sound card driver to set
* SST/Sound card controls for an opened stream.
* This is registered with MID driver
*/
int sst_device_control(int cmd, void *arg)
{
int retval = 0, str_id = 0;
switch (cmd) {
case SST_SND_PAUSE:
case SST_SND_RESUME:
case SST_SND_DROP:
case SST_SND_START:
sst_drv_ctx->mad_ops.control_op = cmd;
sst_drv_ctx->mad_ops.stream_id = *(int *)arg;
queue_work(sst_drv_ctx->mad_wq, &sst_drv_ctx->mad_ops.wq);
break;
case SST_SND_STREAM_INIT: {
struct pcm_stream_info *str_info;
struct stream_info *stream;
pr_debug("stream init called\n");
str_info = (struct pcm_stream_info *)arg;
str_id = str_info->str_id;
retval = sst_validate_strid(str_id);
if (retval)
break;
stream = &sst_drv_ctx->streams[str_id];
pr_debug("setting the period ptrs\n");
stream->pcm_substream = str_info->mad_substream;
stream->period_elapsed = str_info->period_elapsed;
stream->sfreq = str_info->sfreq;
stream->prev = stream->status;
stream->status = STREAM_INIT;
break;
}
case SST_SND_BUFFER_POINTER: {
struct pcm_stream_info *stream_info;
struct snd_sst_tstamp fw_tstamp = {0,};
struct stream_info *stream;
stream_info = (struct pcm_stream_info *)arg;
str_id = stream_info->str_id;
retval = sst_validate_strid(str_id);
if (retval)
break;
stream = &sst_drv_ctx->streams[str_id];
if (!stream->pcm_substream)
break;
memcpy_fromio(&fw_tstamp,
((void *)(sst_drv_ctx->mailbox + SST_TIME_STAMP)
+(str_id * sizeof(fw_tstamp))),
sizeof(fw_tstamp));
pr_debug("Pointer Query on strid = %d ops %d\n",
str_id, stream->ops);
if (stream->ops == STREAM_OPS_PLAYBACK)
stream_info->buffer_ptr = fw_tstamp.samples_rendered;
else
stream_info->buffer_ptr = fw_tstamp.samples_processed;
pr_debug("Samples rendered = %llu, buffer ptr %llu\n",
fw_tstamp.samples_rendered, stream_info->buffer_ptr);
break;
}
case SST_ENABLE_RX_TIME_SLOT: {
int status = *(int *)arg;
sst_drv_ctx->rx_time_slot_status = status ;
sst_enable_rx_timeslot(status);
break;
}
default:
/* Illegal case */
pr_warn("illegal req\n");
return -EINVAL;
}
return retval;
}
struct intel_sst_pcm_control pcm_ops = {
.open = sst_open_pcm_stream,
.device_control = sst_device_control,
.close = sst_close_pcm_stream,
};
struct intel_sst_card_ops sst_pmic_ops = {
.pcm_control = &pcm_ops,
};
/*
* register_sst_card - function for sound card to register
*
* @card: pointer to structure of operations
*
* This function is called card driver loads and is ready for registration
*/
int register_sst_card(struct intel_sst_card_ops *card)
{
if (!sst_drv_ctx) {
pr_err("No SST driver register card reject\n");
return -ENODEV;
}
if (!card || !card->module_name) {
pr_err("Null Pointer Passed\n");
return -EINVAL;
}
if (sst_drv_ctx->pmic_state == SND_MAD_UN_INIT) {
/* register this driver */
if ((strncmp(SST_CARD_NAMES, card->module_name,
strlen(SST_CARD_NAMES))) == 0) {
sst_drv_ctx->pmic_vendor = card->vendor_id;
sst_drv_ctx->scard_ops = card->scard_ops;
sst_pmic_ops.module_name = card->module_name;
sst_drv_ctx->pmic_state = SND_MAD_INIT_DONE;
sst_drv_ctx->rx_time_slot_status = 0; /*default AMIC*/
card->pcm_control = sst_pmic_ops.pcm_control;
return 0;
} else {
pr_err("strcmp fail %s\n", card->module_name);
return -EINVAL;
}
} else {
/* already registered a driver */
pr_err("Repeat for registration..denied\n");
return -EBADRQC;
}
/* The ASoC code doesn't set scard_ops */
if (sst_drv_ctx->scard_ops)
sst_drv_ctx->scard_ops->card_status = SND_CARD_UN_INIT;
return 0;
}
EXPORT_SYMBOL_GPL(register_sst_card);
/*
* unregister_sst_card- function for sound card to un-register
*
* @card: pointer to structure of operations
*
* This function is called when card driver unloads
*/
void unregister_sst_card(struct intel_sst_card_ops *card)
{
if (sst_pmic_ops.pcm_control == card->pcm_control) {
/* unreg */
sst_pmic_ops.module_name = "";
sst_drv_ctx->pmic_state = SND_MAD_UN_INIT;
pr_debug("Unregistered %s\n", card->module_name);
}
return;
}
EXPORT_SYMBOL_GPL(unregister_sst_card);
| gpl-2.0 |
limbo127/KVMGT-kernel | drivers/net/wireless/rtlwifi/rtl8192se/trx.c | 156 | 18543 | /******************************************************************************
*
* Copyright(c) 2009-2012 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
*
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
* Hsinchu 300, Taiwan.
*
* Larry Finger <Larry.Finger@lwfinger.net>
*
*****************************************************************************/
#include "../wifi.h"
#include "../pci.h"
#include "../base.h"
#include "../stats.h"
#include "reg.h"
#include "def.h"
#include "phy.h"
#include "fw.h"
#include "trx.h"
#include "led.h"
static u8 _rtl92se_map_hwqueue_to_fwqueue(struct sk_buff *skb, u8 skb_queue)
{
__le16 fc = rtl_get_fc(skb);
if (unlikely(ieee80211_is_beacon(fc)))
return QSLT_BEACON;
if (ieee80211_is_mgmt(fc) || ieee80211_is_ctl(fc))
return QSLT_MGNT;
if (ieee80211_is_nullfunc(fc))
return QSLT_HIGH;
return skb->priority;
}
static void _rtl92se_query_rxphystatus(struct ieee80211_hw *hw,
struct rtl_stats *pstats, u8 *pdesc,
struct rx_fwinfo *p_drvinfo,
bool packet_match_bssid,
bool packet_toself,
bool packet_beacon)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct phy_sts_cck_8192s_t *cck_buf;
struct rtl_ps_ctl *ppsc = rtl_psc(rtlpriv);
s8 rx_pwr_all = 0, rx_pwr[4];
u8 rf_rx_num = 0, evm, pwdb_all;
u8 i, max_spatial_stream;
u32 rssi, total_rssi = 0;
bool is_cck = pstats->is_cck;
pstats->packet_matchbssid = packet_match_bssid;
pstats->packet_toself = packet_toself;
pstats->packet_beacon = packet_beacon;
pstats->rx_mimo_sig_qual[0] = -1;
pstats->rx_mimo_sig_qual[1] = -1;
if (is_cck) {
u8 report, cck_highpwr;
cck_buf = (struct phy_sts_cck_8192s_t *)p_drvinfo;
if (ppsc->rfpwr_state == ERFON)
cck_highpwr = (u8) rtl_get_bbreg(hw,
RFPGA0_XA_HSSIPARAMETER2,
0x200);
else
cck_highpwr = false;
if (!cck_highpwr) {
u8 cck_agc_rpt = cck_buf->cck_agc_rpt;
report = cck_buf->cck_agc_rpt & 0xc0;
report = report >> 6;
switch (report) {
case 0x3:
rx_pwr_all = -40 - (cck_agc_rpt & 0x3e);
break;
case 0x2:
rx_pwr_all = -20 - (cck_agc_rpt & 0x3e);
break;
case 0x1:
rx_pwr_all = -2 - (cck_agc_rpt & 0x3e);
break;
case 0x0:
rx_pwr_all = 14 - (cck_agc_rpt & 0x3e);
break;
}
} else {
u8 cck_agc_rpt = cck_buf->cck_agc_rpt;
report = p_drvinfo->cfosho[0] & 0x60;
report = report >> 5;
switch (report) {
case 0x3:
rx_pwr_all = -40 - ((cck_agc_rpt & 0x1f) << 1);
break;
case 0x2:
rx_pwr_all = -20 - ((cck_agc_rpt & 0x1f) << 1);
break;
case 0x1:
rx_pwr_all = -2 - ((cck_agc_rpt & 0x1f) << 1);
break;
case 0x0:
rx_pwr_all = 14 - ((cck_agc_rpt & 0x1f) << 1);
break;
}
}
pwdb_all = rtl_query_rxpwrpercentage(rx_pwr_all);
/* CCK gain is smaller than OFDM/MCS gain, */
/* so we add gain diff by experiences, the val is 6 */
pwdb_all += 6;
if (pwdb_all > 100)
pwdb_all = 100;
/* modify the offset to make the same gain index with OFDM. */
if (pwdb_all > 34 && pwdb_all <= 42)
pwdb_all -= 2;
else if (pwdb_all > 26 && pwdb_all <= 34)
pwdb_all -= 6;
else if (pwdb_all > 14 && pwdb_all <= 26)
pwdb_all -= 8;
else if (pwdb_all > 4 && pwdb_all <= 14)
pwdb_all -= 4;
pstats->rx_pwdb_all = pwdb_all;
pstats->recvsignalpower = rx_pwr_all;
if (packet_match_bssid) {
u8 sq;
if (pstats->rx_pwdb_all > 40) {
sq = 100;
} else {
sq = cck_buf->sq_rpt;
if (sq > 64)
sq = 0;
else if (sq < 20)
sq = 100;
else
sq = ((64 - sq) * 100) / 44;
}
pstats->signalquality = sq;
pstats->rx_mimo_sig_qual[0] = sq;
pstats->rx_mimo_sig_qual[1] = -1;
}
} else {
rtlpriv->dm.rfpath_rxenable[0] =
rtlpriv->dm.rfpath_rxenable[1] = true;
for (i = RF90_PATH_A; i < RF6052_MAX_PATH; i++) {
if (rtlpriv->dm.rfpath_rxenable[i])
rf_rx_num++;
rx_pwr[i] = ((p_drvinfo->gain_trsw[i] &
0x3f) * 2) - 110;
rssi = rtl_query_rxpwrpercentage(rx_pwr[i]);
total_rssi += rssi;
rtlpriv->stats.rx_snr_db[i] =
(long)(p_drvinfo->rxsnr[i] / 2);
if (packet_match_bssid)
pstats->rx_mimo_signalstrength[i] = (u8) rssi;
}
rx_pwr_all = ((p_drvinfo->pwdb_all >> 1) & 0x7f) - 110;
pwdb_all = rtl_query_rxpwrpercentage(rx_pwr_all);
pstats->rx_pwdb_all = pwdb_all;
pstats->rxpower = rx_pwr_all;
pstats->recvsignalpower = rx_pwr_all;
if (pstats->is_ht && pstats->rate >= DESC92_RATEMCS8 &&
pstats->rate <= DESC92_RATEMCS15)
max_spatial_stream = 2;
else
max_spatial_stream = 1;
for (i = 0; i < max_spatial_stream; i++) {
evm = rtl_evm_db_to_percentage(p_drvinfo->rxevm[i]);
if (packet_match_bssid) {
if (i == 0)
pstats->signalquality = (u8)(evm &
0xff);
pstats->rx_mimo_sig_qual[i] = (u8) (evm & 0xff);
}
}
}
if (is_cck)
pstats->signalstrength = (u8)(rtl_signal_scale_mapping(hw,
pwdb_all));
else if (rf_rx_num != 0)
pstats->signalstrength = (u8) (rtl_signal_scale_mapping(hw,
total_rssi /= rf_rx_num));
}
static void _rtl92se_translate_rx_signal_stuff(struct ieee80211_hw *hw,
struct sk_buff *skb, struct rtl_stats *pstats,
u8 *pdesc, struct rx_fwinfo *p_drvinfo)
{
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
struct ieee80211_hdr *hdr;
u8 *tmp_buf;
u8 *praddr;
__le16 fc;
u16 type, cfc;
bool packet_matchbssid, packet_toself, packet_beacon = false;
tmp_buf = skb->data + pstats->rx_drvinfo_size + pstats->rx_bufshift;
hdr = (struct ieee80211_hdr *)tmp_buf;
fc = hdr->frame_control;
cfc = le16_to_cpu(fc);
type = WLAN_FC_GET_TYPE(fc);
praddr = hdr->addr1;
packet_matchbssid = ((IEEE80211_FTYPE_CTL != type) &&
ether_addr_equal(mac->bssid,
(cfc & IEEE80211_FCTL_TODS) ? hdr->addr1 :
(cfc & IEEE80211_FCTL_FROMDS) ? hdr->addr2 :
hdr->addr3) &&
(!pstats->hwerror) && (!pstats->crc) && (!pstats->icv));
packet_toself = packet_matchbssid &&
ether_addr_equal(praddr, rtlefuse->dev_addr);
if (ieee80211_is_beacon(fc))
packet_beacon = true;
_rtl92se_query_rxphystatus(hw, pstats, pdesc, p_drvinfo,
packet_matchbssid, packet_toself, packet_beacon);
rtl_process_phyinfo(hw, tmp_buf, pstats);
}
bool rtl92se_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats,
struct ieee80211_rx_status *rx_status, u8 *pdesc,
struct sk_buff *skb)
{
struct rx_fwinfo *p_drvinfo;
u32 phystatus = (u32)GET_RX_STATUS_DESC_PHY_STATUS(pdesc);
struct ieee80211_hdr *hdr;
bool first_ampdu = false;
stats->length = (u16)GET_RX_STATUS_DESC_PKT_LEN(pdesc);
stats->rx_drvinfo_size = (u8)GET_RX_STATUS_DESC_DRVINFO_SIZE(pdesc) * 8;
stats->rx_bufshift = (u8)(GET_RX_STATUS_DESC_SHIFT(pdesc) & 0x03);
stats->icv = (u16)GET_RX_STATUS_DESC_ICV(pdesc);
stats->crc = (u16)GET_RX_STATUS_DESC_CRC32(pdesc);
stats->hwerror = (u16)(stats->crc | stats->icv);
stats->decrypted = !GET_RX_STATUS_DESC_SWDEC(pdesc);
stats->rate = (u8)GET_RX_STATUS_DESC_RX_MCS(pdesc);
stats->shortpreamble = (u16)GET_RX_STATUS_DESC_SPLCP(pdesc);
stats->isampdu = (bool)(GET_RX_STATUS_DESC_PAGGR(pdesc) == 1);
stats->isfirst_ampdu = (bool) ((GET_RX_STATUS_DESC_PAGGR(pdesc) == 1)
&& (GET_RX_STATUS_DESC_FAGGR(pdesc) == 1));
stats->timestamp_low = GET_RX_STATUS_DESC_TSFL(pdesc);
stats->rx_is40Mhzpacket = (bool)GET_RX_STATUS_DESC_BW(pdesc);
stats->is_ht = (bool)GET_RX_STATUS_DESC_RX_HT(pdesc);
stats->is_cck = SE_RX_HAL_IS_CCK_RATE(pdesc);
if (stats->hwerror)
return false;
rx_status->freq = hw->conf.chandef.chan->center_freq;
rx_status->band = hw->conf.chandef.chan->band;
if (stats->crc)
rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
if (stats->rx_is40Mhzpacket)
rx_status->flag |= RX_FLAG_40MHZ;
if (stats->is_ht)
rx_status->flag |= RX_FLAG_HT;
rx_status->flag |= RX_FLAG_MACTIME_START;
/* hw will set stats->decrypted true, if it finds the
* frame is open data frame or mgmt frame,
* hw will not decrypt robust managment frame
* for IEEE80211w but still set stats->decrypted
* true, so here we should set it back to undecrypted
* for IEEE80211w frame, and mac80211 sw will help
* to decrypt it */
if (stats->decrypted) {
hdr = (struct ieee80211_hdr *)(skb->data +
stats->rx_drvinfo_size + stats->rx_bufshift);
if (!hdr) {
/* during testing, hdr was NULL here */
return false;
}
if ((ieee80211_is_robust_mgmt_frame(hdr)) &&
(ieee80211_has_protected(hdr->frame_control)))
rx_status->flag &= ~RX_FLAG_DECRYPTED;
else
rx_status->flag |= RX_FLAG_DECRYPTED;
}
rx_status->rate_idx = rtlwifi_rate_mapping(hw,
stats->is_ht, stats->rate, first_ampdu);
rx_status->mactime = stats->timestamp_low;
if (phystatus) {
p_drvinfo = (struct rx_fwinfo *)(skb->data +
stats->rx_bufshift);
_rtl92se_translate_rx_signal_stuff(hw, skb, stats, pdesc,
p_drvinfo);
}
/*rx_status->qual = stats->signal; */
rx_status->signal = stats->recvsignalpower + 10;
return true;
}
void rtl92se_tx_fill_desc(struct ieee80211_hw *hw,
struct ieee80211_hdr *hdr, u8 *pdesc_tx,
struct ieee80211_tx_info *info,
struct ieee80211_sta *sta,
struct sk_buff *skb,
u8 hw_queue, struct rtl_tcb_desc *ptcb_desc)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
u8 *pdesc = pdesc_tx;
u16 seq_number;
__le16 fc = hdr->frame_control;
u8 reserved_macid = 0;
u8 fw_qsel = _rtl92se_map_hwqueue_to_fwqueue(skb, hw_queue);
bool firstseg = (!(hdr->seq_ctrl & cpu_to_le16(IEEE80211_SCTL_FRAG)));
bool lastseg = (!(hdr->frame_control &
cpu_to_le16(IEEE80211_FCTL_MOREFRAGS)));
dma_addr_t mapping = pci_map_single(rtlpci->pdev, skb->data, skb->len,
PCI_DMA_TODEVICE);
u8 bw_40 = 0;
if (pci_dma_mapping_error(rtlpci->pdev, mapping)) {
RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
"DMA mapping error");
return;
}
if (mac->opmode == NL80211_IFTYPE_STATION) {
bw_40 = mac->bw_40;
} else if (mac->opmode == NL80211_IFTYPE_AP ||
mac->opmode == NL80211_IFTYPE_ADHOC) {
if (sta)
bw_40 = sta->bandwidth >= IEEE80211_STA_RX_BW_40;
}
seq_number = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;
rtl_get_tcb_desc(hw, info, sta, skb, ptcb_desc);
CLEAR_PCI_TX_DESC_CONTENT(pdesc, TX_DESC_SIZE_RTL8192S);
if (ieee80211_is_nullfunc(fc) || ieee80211_is_ctl(fc)) {
firstseg = true;
lastseg = true;
}
if (firstseg) {
if (rtlpriv->dm.useramask) {
/* set txdesc macId */
if (ptcb_desc->mac_id < 32) {
SET_TX_DESC_MACID(pdesc, ptcb_desc->mac_id);
reserved_macid |= ptcb_desc->mac_id;
}
}
SET_TX_DESC_RSVD_MACID(pdesc, reserved_macid);
SET_TX_DESC_TXHT(pdesc, ((ptcb_desc->hw_rate >=
DESC92_RATEMCS0) ? 1 : 0));
if (rtlhal->version == VERSION_8192S_ACUT) {
if (ptcb_desc->hw_rate == DESC92_RATE1M ||
ptcb_desc->hw_rate == DESC92_RATE2M ||
ptcb_desc->hw_rate == DESC92_RATE5_5M ||
ptcb_desc->hw_rate == DESC92_RATE11M) {
ptcb_desc->hw_rate = DESC92_RATE12M;
}
}
SET_TX_DESC_TX_RATE(pdesc, ptcb_desc->hw_rate);
if (ptcb_desc->use_shortgi || ptcb_desc->use_shortpreamble)
SET_TX_DESC_TX_SHORT(pdesc, 0);
/* Aggregation related */
if (info->flags & IEEE80211_TX_CTL_AMPDU)
SET_TX_DESC_AGG_ENABLE(pdesc, 1);
/* For AMPDU, we must insert SSN into TX_DESC */
SET_TX_DESC_SEQ(pdesc, seq_number);
/* Protection mode related */
/* For 92S, if RTS/CTS are set, HW will execute RTS. */
/* We choose only one protection mode to execute */
SET_TX_DESC_RTS_ENABLE(pdesc, ((ptcb_desc->rts_enable &&
!ptcb_desc->cts_enable) ? 1 : 0));
SET_TX_DESC_CTS_ENABLE(pdesc, ((ptcb_desc->cts_enable) ?
1 : 0));
SET_TX_DESC_RTS_STBC(pdesc, ((ptcb_desc->rts_stbc) ? 1 : 0));
SET_TX_DESC_RTS_RATE(pdesc, ptcb_desc->rts_rate);
SET_TX_DESC_RTS_BANDWIDTH(pdesc, 0);
SET_TX_DESC_RTS_SUB_CARRIER(pdesc, ptcb_desc->rts_sc);
SET_TX_DESC_RTS_SHORT(pdesc, ((ptcb_desc->rts_rate <=
DESC92_RATE54M) ?
(ptcb_desc->rts_use_shortpreamble ? 1 : 0)
: (ptcb_desc->rts_use_shortgi ? 1 : 0)));
/* Set Bandwidth and sub-channel settings. */
if (bw_40) {
if (ptcb_desc->packet_bw) {
SET_TX_DESC_TX_BANDWIDTH(pdesc, 1);
/* use duplicated mode */
SET_TX_DESC_TX_SUB_CARRIER(pdesc, 0);
} else {
SET_TX_DESC_TX_BANDWIDTH(pdesc, 0);
SET_TX_DESC_TX_SUB_CARRIER(pdesc,
mac->cur_40_prime_sc);
}
} else {
SET_TX_DESC_TX_BANDWIDTH(pdesc, 0);
SET_TX_DESC_TX_SUB_CARRIER(pdesc, 0);
}
/* 3 Fill necessary field in First Descriptor */
/*DWORD 0*/
SET_TX_DESC_LINIP(pdesc, 0);
SET_TX_DESC_OFFSET(pdesc, 32);
SET_TX_DESC_PKT_SIZE(pdesc, (u16) skb->len);
/*DWORD 1*/
SET_TX_DESC_RA_BRSR_ID(pdesc, ptcb_desc->ratr_index);
/* Fill security related */
if (info->control.hw_key) {
struct ieee80211_key_conf *keyconf;
keyconf = info->control.hw_key;
switch (keyconf->cipher) {
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
SET_TX_DESC_SEC_TYPE(pdesc, 0x1);
break;
case WLAN_CIPHER_SUITE_TKIP:
SET_TX_DESC_SEC_TYPE(pdesc, 0x2);
break;
case WLAN_CIPHER_SUITE_CCMP:
SET_TX_DESC_SEC_TYPE(pdesc, 0x3);
break;
default:
SET_TX_DESC_SEC_TYPE(pdesc, 0x0);
break;
}
}
/* Set Packet ID */
SET_TX_DESC_PACKET_ID(pdesc, 0);
/* We will assign magement queue to BK. */
SET_TX_DESC_QUEUE_SEL(pdesc, fw_qsel);
/* Alwasy enable all rate fallback range */
SET_TX_DESC_DATA_RATE_FB_LIMIT(pdesc, 0x1F);
/* Fix: I don't kown why hw use 6.5M to tx when set it */
SET_TX_DESC_USER_RATE(pdesc,
ptcb_desc->use_driver_rate ? 1 : 0);
/* Set NON_QOS bit. */
if (!ieee80211_is_data_qos(fc))
SET_TX_DESC_NON_QOS(pdesc, 1);
}
/* Fill fields that are required to be initialized
* in all of the descriptors */
/*DWORD 0 */
SET_TX_DESC_FIRST_SEG(pdesc, (firstseg ? 1 : 0));
SET_TX_DESC_LAST_SEG(pdesc, (lastseg ? 1 : 0));
/* DWORD 7 */
SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16) skb->len);
/* DOWRD 8 */
SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, mapping);
RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, "\n");
}
void rtl92se_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc,
bool firstseg, bool lastseg, struct sk_buff *skb)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
struct rtl_tcb_desc *tcb_desc = (struct rtl_tcb_desc *)(skb->cb);
dma_addr_t mapping = pci_map_single(rtlpci->pdev, skb->data, skb->len,
PCI_DMA_TODEVICE);
if (pci_dma_mapping_error(rtlpci->pdev, mapping)) {
RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
"DMA mapping error");
return;
}
/* Clear all status */
CLEAR_PCI_TX_DESC_CONTENT(pdesc, TX_CMDDESC_SIZE_RTL8192S);
/* This bit indicate this packet is used for FW download. */
if (tcb_desc->cmd_or_init == DESC_PACKET_TYPE_INIT) {
/* For firmware downlaod we only need to set LINIP */
SET_TX_DESC_LINIP(pdesc, tcb_desc->last_inipkt);
/* 92SE must set as 1 for firmware download HW DMA error */
SET_TX_DESC_FIRST_SEG(pdesc, 1);
SET_TX_DESC_LAST_SEG(pdesc, 1);
/* 92SE need not to set TX packet size when firmware download */
SET_TX_DESC_PKT_SIZE(pdesc, (u16)(skb->len));
SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16)(skb->len));
SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, mapping);
wmb();
SET_TX_DESC_OWN(pdesc, 1);
} else { /* H2C Command Desc format (Host TXCMD) */
/* 92SE must set as 1 for firmware download HW DMA error */
SET_TX_DESC_FIRST_SEG(pdesc, 1);
SET_TX_DESC_LAST_SEG(pdesc, 1);
SET_TX_DESC_OFFSET(pdesc, 0x20);
/* Buffer size + command header */
SET_TX_DESC_PKT_SIZE(pdesc, (u16)(skb->len));
/* Fixed queue of H2C command */
SET_TX_DESC_QUEUE_SEL(pdesc, 0x13);
SET_BITS_TO_LE_4BYTE(skb->data, 24, 7, rtlhal->h2c_txcmd_seq);
SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16)(skb->len));
SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, mapping);
wmb();
SET_TX_DESC_OWN(pdesc, 1);
}
}
void rtl92se_set_desc(u8 *pdesc, bool istx, u8 desc_name, u8 *val)
{
if (istx) {
switch (desc_name) {
case HW_DESC_OWN:
wmb();
SET_TX_DESC_OWN(pdesc, 1);
break;
case HW_DESC_TX_NEXTDESC_ADDR:
SET_TX_DESC_NEXT_DESC_ADDRESS(pdesc, *(u32 *) val);
break;
default:
RT_ASSERT(false, "ERR txdesc :%d not process\n",
desc_name);
break;
}
} else {
switch (desc_name) {
case HW_DESC_RXOWN:
wmb();
SET_RX_STATUS_DESC_OWN(pdesc, 1);
break;
case HW_DESC_RXBUFF_ADDR:
SET_RX_STATUS__DESC_BUFF_ADDR(pdesc, *(u32 *) val);
break;
case HW_DESC_RXPKT_LEN:
SET_RX_STATUS_DESC_PKT_LEN(pdesc, *(u32 *) val);
break;
case HW_DESC_RXERO:
SET_RX_STATUS_DESC_EOR(pdesc, 1);
break;
default:
RT_ASSERT(false, "ERR rxdesc :%d not process\n",
desc_name);
break;
}
}
}
u32 rtl92se_get_desc(u8 *desc, bool istx, u8 desc_name)
{
u32 ret = 0;
if (istx) {
switch (desc_name) {
case HW_DESC_OWN:
ret = GET_TX_DESC_OWN(desc);
break;
case HW_DESC_TXBUFF_ADDR:
ret = GET_TX_DESC_TX_BUFFER_ADDRESS(desc);
break;
default:
RT_ASSERT(false, "ERR txdesc :%d not process\n",
desc_name);
break;
}
} else {
switch (desc_name) {
case HW_DESC_OWN:
ret = GET_RX_STATUS_DESC_OWN(desc);
break;
case HW_DESC_RXPKT_LEN:
ret = GET_RX_STATUS_DESC_PKT_LEN(desc);
break;
default:
RT_ASSERT(false, "ERR rxdesc :%d not process\n",
desc_name);
break;
}
}
return ret;
}
void rtl92se_tx_polling(struct ieee80211_hw *hw, u8 hw_queue)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
rtl_write_word(rtlpriv, TP_POLL, BIT(0) << (hw_queue));
}
| gpl-2.0 |
BPI-SINOVOIP/BPI-Mainline-kernel | linux-5.4/drivers/gpu/drm/nouveau/nvif/disp.c | 156 | 1953 | /*
* Copyright 2018 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <nvif/disp.h>
#include <nvif/device.h>
#include <nvif/class.h>
void
nvif_disp_dtor(struct nvif_disp *disp)
{
nvif_object_fini(&disp->object);
}
int
nvif_disp_ctor(struct nvif_device *device, s32 oclass, struct nvif_disp *disp)
{
static const struct nvif_mclass disps[] = {
{ TU102_DISP, -1 },
{ GV100_DISP, -1 },
{ GP102_DISP, -1 },
{ GP100_DISP, -1 },
{ GM200_DISP, -1 },
{ GM107_DISP, -1 },
{ GK110_DISP, -1 },
{ GK104_DISP, -1 },
{ GF110_DISP, -1 },
{ GT214_DISP, -1 },
{ GT206_DISP, -1 },
{ GT200_DISP, -1 },
{ G82_DISP, -1 },
{ NV50_DISP, -1 },
{ NV04_DISP, -1 },
{}
};
int cid = nvif_sclass(&device->object, disps, oclass);
disp->object.client = NULL;
if (cid < 0)
return cid;
return nvif_object_init(&device->object, 0, disps[cid].oclass,
NULL, 0, &disp->object);
}
| gpl-2.0 |
fireshots/linux | drivers/misc/mei/hw-txe.c | 668 | 30610 | /*
*
* Intel Management Engine Interface (Intel MEI) Linux driver
* Copyright (c) 2013-2014, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*/
#include <linux/pci.h>
#include <linux/jiffies.h>
#include <linux/ktime.h>
#include <linux/delay.h>
#include <linux/kthread.h>
#include <linux/irqreturn.h>
#include <linux/mei.h>
#include "mei_dev.h"
#include "hw-txe.h"
#include "client.h"
#include "hbm.h"
/**
* mei_txe_reg_read - Reads 32bit data from the txe device
*
* @base_addr: registers base address
* @offset: register offset
*
* Return: register value
*/
static inline u32 mei_txe_reg_read(void __iomem *base_addr,
unsigned long offset)
{
return ioread32(base_addr + offset);
}
/**
* mei_txe_reg_write - Writes 32bit data to the txe device
*
* @base_addr: registers base address
* @offset: register offset
* @value: the value to write
*/
static inline void mei_txe_reg_write(void __iomem *base_addr,
unsigned long offset, u32 value)
{
iowrite32(value, base_addr + offset);
}
/**
* mei_txe_sec_reg_read_silent - Reads 32bit data from the SeC BAR
*
* @hw: the txe hardware structure
* @offset: register offset
*
* Doesn't check for aliveness while Reads 32bit data from the SeC BAR
*
* Return: register value
*/
static inline u32 mei_txe_sec_reg_read_silent(struct mei_txe_hw *hw,
unsigned long offset)
{
return mei_txe_reg_read(hw->mem_addr[SEC_BAR], offset);
}
/**
* mei_txe_sec_reg_read - Reads 32bit data from the SeC BAR
*
* @hw: the txe hardware structure
* @offset: register offset
*
* Reads 32bit data from the SeC BAR and shout loud if aliveness is not set
*
* Return: register value
*/
static inline u32 mei_txe_sec_reg_read(struct mei_txe_hw *hw,
unsigned long offset)
{
WARN(!hw->aliveness, "sec read: aliveness not asserted\n");
return mei_txe_sec_reg_read_silent(hw, offset);
}
/**
* mei_txe_sec_reg_write_silent - Writes 32bit data to the SeC BAR
* doesn't check for aliveness
*
* @hw: the txe hardware structure
* @offset: register offset
* @value: value to write
*
* Doesn't check for aliveness while writes 32bit data from to the SeC BAR
*/
static inline void mei_txe_sec_reg_write_silent(struct mei_txe_hw *hw,
unsigned long offset, u32 value)
{
mei_txe_reg_write(hw->mem_addr[SEC_BAR], offset, value);
}
/**
* mei_txe_sec_reg_write - Writes 32bit data to the SeC BAR
*
* @hw: the txe hardware structure
* @offset: register offset
* @value: value to write
*
* Writes 32bit data from the SeC BAR and shout loud if aliveness is not set
*/
static inline void mei_txe_sec_reg_write(struct mei_txe_hw *hw,
unsigned long offset, u32 value)
{
WARN(!hw->aliveness, "sec write: aliveness not asserted\n");
mei_txe_sec_reg_write_silent(hw, offset, value);
}
/**
* mei_txe_br_reg_read - Reads 32bit data from the Bridge BAR
*
* @hw: the txe hardware structure
* @offset: offset from which to read the data
*
* Return: the byte read.
*/
static inline u32 mei_txe_br_reg_read(struct mei_txe_hw *hw,
unsigned long offset)
{
return mei_txe_reg_read(hw->mem_addr[BRIDGE_BAR], offset);
}
/**
* mei_txe_br_reg_write - Writes 32bit data to the Bridge BAR
*
* @hw: the txe hardware structure
* @offset: offset from which to write the data
* @value: the byte to write
*/
static inline void mei_txe_br_reg_write(struct mei_txe_hw *hw,
unsigned long offset, u32 value)
{
mei_txe_reg_write(hw->mem_addr[BRIDGE_BAR], offset, value);
}
/**
* mei_txe_aliveness_set - request for aliveness change
*
* @dev: the device structure
* @req: requested aliveness value
*
* Request for aliveness change and returns true if the change is
* really needed and false if aliveness is already
* in the requested state
*
* Locking: called under "dev->device_lock" lock
*
* Return: true if request was send
*/
static bool mei_txe_aliveness_set(struct mei_device *dev, u32 req)
{
struct mei_txe_hw *hw = to_txe_hw(dev);
bool do_req = hw->aliveness != req;
dev_dbg(dev->dev, "Aliveness current=%d request=%d\n",
hw->aliveness, req);
if (do_req) {
dev->pg_event = MEI_PG_EVENT_WAIT;
mei_txe_br_reg_write(hw, SICR_HOST_ALIVENESS_REQ_REG, req);
}
return do_req;
}
/**
* mei_txe_aliveness_req_get - get aliveness requested register value
*
* @dev: the device structure
*
* Extract HICR_HOST_ALIVENESS_RESP_ACK bit from
* from HICR_HOST_ALIVENESS_REQ register value
*
* Return: SICR_HOST_ALIVENESS_REQ_REQUESTED bit value
*/
static u32 mei_txe_aliveness_req_get(struct mei_device *dev)
{
struct mei_txe_hw *hw = to_txe_hw(dev);
u32 reg;
reg = mei_txe_br_reg_read(hw, SICR_HOST_ALIVENESS_REQ_REG);
return reg & SICR_HOST_ALIVENESS_REQ_REQUESTED;
}
/**
* mei_txe_aliveness_get - get aliveness response register value
*
* @dev: the device structure
*
* Return: HICR_HOST_ALIVENESS_RESP_ACK bit from HICR_HOST_ALIVENESS_RESP
* register
*/
static u32 mei_txe_aliveness_get(struct mei_device *dev)
{
struct mei_txe_hw *hw = to_txe_hw(dev);
u32 reg;
reg = mei_txe_br_reg_read(hw, HICR_HOST_ALIVENESS_RESP_REG);
return reg & HICR_HOST_ALIVENESS_RESP_ACK;
}
/**
* mei_txe_aliveness_poll - waits for aliveness to settle
*
* @dev: the device structure
* @expected: expected aliveness value
*
* Polls for HICR_HOST_ALIVENESS_RESP.ALIVENESS_RESP to be set
*
* Return: 0 if the expected value was received, -ETIME otherwise
*/
static int mei_txe_aliveness_poll(struct mei_device *dev, u32 expected)
{
struct mei_txe_hw *hw = to_txe_hw(dev);
ktime_t stop, start;
start = ktime_get();
stop = ktime_add(start, ms_to_ktime(SEC_ALIVENESS_WAIT_TIMEOUT));
do {
hw->aliveness = mei_txe_aliveness_get(dev);
if (hw->aliveness == expected) {
dev->pg_event = MEI_PG_EVENT_IDLE;
dev_dbg(dev->dev, "aliveness settled after %lld usecs\n",
ktime_to_us(ktime_sub(ktime_get(), start)));
return 0;
}
usleep_range(20, 50);
} while (ktime_before(ktime_get(), stop));
dev->pg_event = MEI_PG_EVENT_IDLE;
dev_err(dev->dev, "aliveness timed out\n");
return -ETIME;
}
/**
* mei_txe_aliveness_wait - waits for aliveness to settle
*
* @dev: the device structure
* @expected: expected aliveness value
*
* Waits for HICR_HOST_ALIVENESS_RESP.ALIVENESS_RESP to be set
*
* Return: 0 on success and < 0 otherwise
*/
static int mei_txe_aliveness_wait(struct mei_device *dev, u32 expected)
{
struct mei_txe_hw *hw = to_txe_hw(dev);
const unsigned long timeout =
msecs_to_jiffies(SEC_ALIVENESS_WAIT_TIMEOUT);
long err;
int ret;
hw->aliveness = mei_txe_aliveness_get(dev);
if (hw->aliveness == expected)
return 0;
mutex_unlock(&dev->device_lock);
err = wait_event_timeout(hw->wait_aliveness_resp,
dev->pg_event == MEI_PG_EVENT_RECEIVED, timeout);
mutex_lock(&dev->device_lock);
hw->aliveness = mei_txe_aliveness_get(dev);
ret = hw->aliveness == expected ? 0 : -ETIME;
if (ret)
dev_warn(dev->dev, "aliveness timed out = %ld aliveness = %d event = %d\n",
err, hw->aliveness, dev->pg_event);
else
dev_dbg(dev->dev, "aliveness settled after = %d msec aliveness = %d event = %d\n",
jiffies_to_msecs(timeout - err),
hw->aliveness, dev->pg_event);
dev->pg_event = MEI_PG_EVENT_IDLE;
return ret;
}
/**
* mei_txe_aliveness_set_sync - sets an wait for aliveness to complete
*
* @dev: the device structure
* @req: requested aliveness value
*
* Return: 0 on success and < 0 otherwise
*/
int mei_txe_aliveness_set_sync(struct mei_device *dev, u32 req)
{
if (mei_txe_aliveness_set(dev, req))
return mei_txe_aliveness_wait(dev, req);
return 0;
}
/**
* mei_txe_pg_in_transition - is device now in pg transition
*
* @dev: the device structure
*
* Return: true if in pg transition, false otherwise
*/
static bool mei_txe_pg_in_transition(struct mei_device *dev)
{
return dev->pg_event == MEI_PG_EVENT_WAIT;
}
/**
* mei_txe_pg_is_enabled - detect if PG is supported by HW
*
* @dev: the device structure
*
* Return: true is pg supported, false otherwise
*/
static bool mei_txe_pg_is_enabled(struct mei_device *dev)
{
return true;
}
/**
* mei_txe_pg_state - translate aliveness register value
* to the mei power gating state
*
* @dev: the device structure
*
* Return: MEI_PG_OFF if aliveness is on and MEI_PG_ON otherwise
*/
static inline enum mei_pg_state mei_txe_pg_state(struct mei_device *dev)
{
struct mei_txe_hw *hw = to_txe_hw(dev);
return hw->aliveness ? MEI_PG_OFF : MEI_PG_ON;
}
/**
* mei_txe_input_ready_interrupt_enable - sets the Input Ready Interrupt
*
* @dev: the device structure
*/
static void mei_txe_input_ready_interrupt_enable(struct mei_device *dev)
{
struct mei_txe_hw *hw = to_txe_hw(dev);
u32 hintmsk;
/* Enable the SEC_IPC_HOST_INT_MASK_IN_RDY interrupt */
hintmsk = mei_txe_sec_reg_read(hw, SEC_IPC_HOST_INT_MASK_REG);
hintmsk |= SEC_IPC_HOST_INT_MASK_IN_RDY;
mei_txe_sec_reg_write(hw, SEC_IPC_HOST_INT_MASK_REG, hintmsk);
}
/**
* mei_txe_input_doorbell_set - sets bit 0 in
* SEC_IPC_INPUT_DOORBELL.IPC_INPUT_DOORBELL.
*
* @hw: the txe hardware structure
*/
static void mei_txe_input_doorbell_set(struct mei_txe_hw *hw)
{
/* Clear the interrupt cause */
clear_bit(TXE_INTR_IN_READY_BIT, &hw->intr_cause);
mei_txe_sec_reg_write(hw, SEC_IPC_INPUT_DOORBELL_REG, 1);
}
/**
* mei_txe_output_ready_set - Sets the SICR_SEC_IPC_OUTPUT_STATUS bit to 1
*
* @hw: the txe hardware structure
*/
static void mei_txe_output_ready_set(struct mei_txe_hw *hw)
{
mei_txe_br_reg_write(hw,
SICR_SEC_IPC_OUTPUT_STATUS_REG,
SEC_IPC_OUTPUT_STATUS_RDY);
}
/**
* mei_txe_is_input_ready - check if TXE is ready for receiving data
*
* @dev: the device structure
*
* Return: true if INPUT STATUS READY bit is set
*/
static bool mei_txe_is_input_ready(struct mei_device *dev)
{
struct mei_txe_hw *hw = to_txe_hw(dev);
u32 status;
status = mei_txe_sec_reg_read(hw, SEC_IPC_INPUT_STATUS_REG);
return !!(SEC_IPC_INPUT_STATUS_RDY & status);
}
/**
* mei_txe_intr_clear - clear all interrupts
*
* @dev: the device structure
*/
static inline void mei_txe_intr_clear(struct mei_device *dev)
{
struct mei_txe_hw *hw = to_txe_hw(dev);
mei_txe_sec_reg_write_silent(hw, SEC_IPC_HOST_INT_STATUS_REG,
SEC_IPC_HOST_INT_STATUS_PENDING);
mei_txe_br_reg_write(hw, HISR_REG, HISR_INT_STS_MSK);
mei_txe_br_reg_write(hw, HHISR_REG, IPC_HHIER_MSK);
}
/**
* mei_txe_intr_disable - disable all interrupts
*
* @dev: the device structure
*/
static void mei_txe_intr_disable(struct mei_device *dev)
{
struct mei_txe_hw *hw = to_txe_hw(dev);
mei_txe_br_reg_write(hw, HHIER_REG, 0);
mei_txe_br_reg_write(hw, HIER_REG, 0);
}
/**
* mei_txe_intr_enable - enable all interrupts
*
* @dev: the device structure
*/
static void mei_txe_intr_enable(struct mei_device *dev)
{
struct mei_txe_hw *hw = to_txe_hw(dev);
mei_txe_br_reg_write(hw, HHIER_REG, IPC_HHIER_MSK);
mei_txe_br_reg_write(hw, HIER_REG, HIER_INT_EN_MSK);
}
/**
* mei_txe_pending_interrupts - check if there are pending interrupts
* only Aliveness, Input ready, and output doorbell are of relevance
*
* @dev: the device structure
*
* Checks if there are pending interrupts
* only Aliveness, Readiness, Input ready, and Output doorbell are relevant
*
* Return: true if there are pending interrupts
*/
static bool mei_txe_pending_interrupts(struct mei_device *dev)
{
struct mei_txe_hw *hw = to_txe_hw(dev);
bool ret = (hw->intr_cause & (TXE_INTR_READINESS |
TXE_INTR_ALIVENESS |
TXE_INTR_IN_READY |
TXE_INTR_OUT_DB));
if (ret) {
dev_dbg(dev->dev,
"Pending Interrupts InReady=%01d Readiness=%01d, Aliveness=%01d, OutDoor=%01d\n",
!!(hw->intr_cause & TXE_INTR_IN_READY),
!!(hw->intr_cause & TXE_INTR_READINESS),
!!(hw->intr_cause & TXE_INTR_ALIVENESS),
!!(hw->intr_cause & TXE_INTR_OUT_DB));
}
return ret;
}
/**
* mei_txe_input_payload_write - write a dword to the host buffer
* at offset idx
*
* @dev: the device structure
* @idx: index in the host buffer
* @value: value
*/
static void mei_txe_input_payload_write(struct mei_device *dev,
unsigned long idx, u32 value)
{
struct mei_txe_hw *hw = to_txe_hw(dev);
mei_txe_sec_reg_write(hw, SEC_IPC_INPUT_PAYLOAD_REG +
(idx * sizeof(u32)), value);
}
/**
* mei_txe_out_data_read - read dword from the device buffer
* at offset idx
*
* @dev: the device structure
* @idx: index in the device buffer
*
* Return: register value at index
*/
static u32 mei_txe_out_data_read(const struct mei_device *dev,
unsigned long idx)
{
struct mei_txe_hw *hw = to_txe_hw(dev);
return mei_txe_br_reg_read(hw,
BRIDGE_IPC_OUTPUT_PAYLOAD_REG + (idx * sizeof(u32)));
}
/* Readiness */
/**
* mei_txe_readiness_set_host_rdy - set host readiness bit
*
* @dev: the device structure
*/
static void mei_txe_readiness_set_host_rdy(struct mei_device *dev)
{
struct mei_txe_hw *hw = to_txe_hw(dev);
mei_txe_br_reg_write(hw,
SICR_HOST_IPC_READINESS_REQ_REG,
SICR_HOST_IPC_READINESS_HOST_RDY);
}
/**
* mei_txe_readiness_clear - clear host readiness bit
*
* @dev: the device structure
*/
static void mei_txe_readiness_clear(struct mei_device *dev)
{
struct mei_txe_hw *hw = to_txe_hw(dev);
mei_txe_br_reg_write(hw, SICR_HOST_IPC_READINESS_REQ_REG,
SICR_HOST_IPC_READINESS_RDY_CLR);
}
/**
* mei_txe_readiness_get - Reads and returns
* the HICR_SEC_IPC_READINESS register value
*
* @dev: the device structure
*
* Return: the HICR_SEC_IPC_READINESS register value
*/
static u32 mei_txe_readiness_get(struct mei_device *dev)
{
struct mei_txe_hw *hw = to_txe_hw(dev);
return mei_txe_br_reg_read(hw, HICR_SEC_IPC_READINESS_REG);
}
/**
* mei_txe_readiness_is_sec_rdy - check readiness
* for HICR_SEC_IPC_READINESS_SEC_RDY
*
* @readiness: cached readiness state
*
* Return: true if readiness bit is set
*/
static inline bool mei_txe_readiness_is_sec_rdy(u32 readiness)
{
return !!(readiness & HICR_SEC_IPC_READINESS_SEC_RDY);
}
/**
* mei_txe_hw_is_ready - check if the hw is ready
*
* @dev: the device structure
*
* Return: true if sec is ready
*/
static bool mei_txe_hw_is_ready(struct mei_device *dev)
{
u32 readiness = mei_txe_readiness_get(dev);
return mei_txe_readiness_is_sec_rdy(readiness);
}
/**
* mei_txe_host_is_ready - check if the host is ready
*
* @dev: the device structure
*
* Return: true if host is ready
*/
static inline bool mei_txe_host_is_ready(struct mei_device *dev)
{
struct mei_txe_hw *hw = to_txe_hw(dev);
u32 reg = mei_txe_br_reg_read(hw, HICR_SEC_IPC_READINESS_REG);
return !!(reg & HICR_SEC_IPC_READINESS_HOST_RDY);
}
/**
* mei_txe_readiness_wait - wait till readiness settles
*
* @dev: the device structure
*
* Return: 0 on success and -ETIME on timeout
*/
static int mei_txe_readiness_wait(struct mei_device *dev)
{
if (mei_txe_hw_is_ready(dev))
return 0;
mutex_unlock(&dev->device_lock);
wait_event_timeout(dev->wait_hw_ready, dev->recvd_hw_ready,
msecs_to_jiffies(SEC_RESET_WAIT_TIMEOUT));
mutex_lock(&dev->device_lock);
if (!dev->recvd_hw_ready) {
dev_err(dev->dev, "wait for readiness failed\n");
return -ETIME;
}
dev->recvd_hw_ready = false;
return 0;
}
static const struct mei_fw_status mei_txe_fw_sts = {
.count = 2,
.status[0] = PCI_CFG_TXE_FW_STS0,
.status[1] = PCI_CFG_TXE_FW_STS1
};
/**
* mei_txe_fw_status - read fw status register from pci config space
*
* @dev: mei device
* @fw_status: fw status register values
*
* Return: 0 on success, error otherwise
*/
static int mei_txe_fw_status(struct mei_device *dev,
struct mei_fw_status *fw_status)
{
const struct mei_fw_status *fw_src = &mei_txe_fw_sts;
struct pci_dev *pdev = to_pci_dev(dev->dev);
int ret;
int i;
if (!fw_status)
return -EINVAL;
fw_status->count = fw_src->count;
for (i = 0; i < fw_src->count && i < MEI_FW_STATUS_MAX; i++) {
ret = pci_read_config_dword(pdev,
fw_src->status[i], &fw_status->status[i]);
if (ret)
return ret;
}
return 0;
}
/**
* mei_txe_hw_config - configure hardware at the start of the devices
*
* @dev: the device structure
*
* Configure hardware at the start of the device should be done only
* once at the device probe time
*/
static void mei_txe_hw_config(struct mei_device *dev)
{
struct mei_txe_hw *hw = to_txe_hw(dev);
/* Doesn't change in runtime */
dev->hbuf_depth = PAYLOAD_SIZE / 4;
hw->aliveness = mei_txe_aliveness_get(dev);
hw->readiness = mei_txe_readiness_get(dev);
dev_dbg(dev->dev, "aliveness_resp = 0x%08x, readiness = 0x%08x.\n",
hw->aliveness, hw->readiness);
}
/**
* mei_txe_write - writes a message to device.
*
* @dev: the device structure
* @header: header of message
* @buf: message buffer will be written
*
* Return: 0 if success, <0 - otherwise.
*/
static int mei_txe_write(struct mei_device *dev,
struct mei_msg_hdr *header, unsigned char *buf)
{
struct mei_txe_hw *hw = to_txe_hw(dev);
unsigned long rem;
unsigned long length;
int slots = dev->hbuf_depth;
u32 *reg_buf = (u32 *)buf;
u32 dw_cnt;
int i;
if (WARN_ON(!header || !buf))
return -EINVAL;
length = header->length;
dev_dbg(dev->dev, MEI_HDR_FMT, MEI_HDR_PRM(header));
dw_cnt = mei_data2slots(length);
if (dw_cnt > slots)
return -EMSGSIZE;
if (WARN(!hw->aliveness, "txe write: aliveness not asserted\n"))
return -EAGAIN;
/* Enable Input Ready Interrupt. */
mei_txe_input_ready_interrupt_enable(dev);
if (!mei_txe_is_input_ready(dev)) {
char fw_sts_str[MEI_FW_STATUS_STR_SZ];
mei_fw_status_str(dev, fw_sts_str, MEI_FW_STATUS_STR_SZ);
dev_err(dev->dev, "Input is not ready %s\n", fw_sts_str);
return -EAGAIN;
}
mei_txe_input_payload_write(dev, 0, *((u32 *)header));
for (i = 0; i < length / 4; i++)
mei_txe_input_payload_write(dev, i + 1, reg_buf[i]);
rem = length & 0x3;
if (rem > 0) {
u32 reg = 0;
memcpy(®, &buf[length - rem], rem);
mei_txe_input_payload_write(dev, i + 1, reg);
}
/* after each write the whole buffer is consumed */
hw->slots = 0;
/* Set Input-Doorbell */
mei_txe_input_doorbell_set(hw);
return 0;
}
/**
* mei_txe_hbuf_max_len - mimics the me hbuf circular buffer
*
* @dev: the device structure
*
* Return: the PAYLOAD_SIZE - 4
*/
static size_t mei_txe_hbuf_max_len(const struct mei_device *dev)
{
return PAYLOAD_SIZE - sizeof(struct mei_msg_hdr);
}
/**
* mei_txe_hbuf_empty_slots - mimics the me hbuf circular buffer
*
* @dev: the device structure
*
* Return: always hbuf_depth
*/
static int mei_txe_hbuf_empty_slots(struct mei_device *dev)
{
struct mei_txe_hw *hw = to_txe_hw(dev);
return hw->slots;
}
/**
* mei_txe_count_full_read_slots - mimics the me device circular buffer
*
* @dev: the device structure
*
* Return: always buffer size in dwords count
*/
static int mei_txe_count_full_read_slots(struct mei_device *dev)
{
/* read buffers has static size */
return PAYLOAD_SIZE / 4;
}
/**
* mei_txe_read_hdr - read message header which is always in 4 first bytes
*
* @dev: the device structure
*
* Return: mei message header
*/
static u32 mei_txe_read_hdr(const struct mei_device *dev)
{
return mei_txe_out_data_read(dev, 0);
}
/**
* mei_txe_read - reads a message from the txe device.
*
* @dev: the device structure
* @buf: message buffer will be written
* @len: message size will be read
*
* Return: -EINVAL on error wrong argument and 0 on success
*/
static int mei_txe_read(struct mei_device *dev,
unsigned char *buf, unsigned long len)
{
struct mei_txe_hw *hw = to_txe_hw(dev);
u32 *reg_buf, reg;
u32 rem;
u32 i;
if (WARN_ON(!buf || !len))
return -EINVAL;
reg_buf = (u32 *)buf;
rem = len & 0x3;
dev_dbg(dev->dev, "buffer-length = %lu buf[0]0x%08X\n",
len, mei_txe_out_data_read(dev, 0));
for (i = 0; i < len / 4; i++) {
/* skip header: index starts from 1 */
reg = mei_txe_out_data_read(dev, i + 1);
dev_dbg(dev->dev, "buf[%d] = 0x%08X\n", i, reg);
*reg_buf++ = reg;
}
if (rem) {
reg = mei_txe_out_data_read(dev, i + 1);
memcpy(reg_buf, ®, rem);
}
mei_txe_output_ready_set(hw);
return 0;
}
/**
* mei_txe_hw_reset - resets host and fw.
*
* @dev: the device structure
* @intr_enable: if interrupt should be enabled after reset.
*
* Return: 0 on success and < 0 in case of error
*/
static int mei_txe_hw_reset(struct mei_device *dev, bool intr_enable)
{
struct mei_txe_hw *hw = to_txe_hw(dev);
u32 aliveness_req;
/*
* read input doorbell to ensure consistency between Bridge and SeC
* return value might be garbage return
*/
(void)mei_txe_sec_reg_read_silent(hw, SEC_IPC_INPUT_DOORBELL_REG);
aliveness_req = mei_txe_aliveness_req_get(dev);
hw->aliveness = mei_txe_aliveness_get(dev);
/* Disable interrupts in this stage we will poll */
mei_txe_intr_disable(dev);
/*
* If Aliveness Request and Aliveness Response are not equal then
* wait for them to be equal
* Since we might have interrupts disabled - poll for it
*/
if (aliveness_req != hw->aliveness)
if (mei_txe_aliveness_poll(dev, aliveness_req) < 0) {
dev_err(dev->dev, "wait for aliveness settle failed ... bailing out\n");
return -EIO;
}
/*
* If Aliveness Request and Aliveness Response are set then clear them
*/
if (aliveness_req) {
mei_txe_aliveness_set(dev, 0);
if (mei_txe_aliveness_poll(dev, 0) < 0) {
dev_err(dev->dev, "wait for aliveness failed ... bailing out\n");
return -EIO;
}
}
/*
* Set readiness RDY_CLR bit
*/
mei_txe_readiness_clear(dev);
return 0;
}
/**
* mei_txe_hw_start - start the hardware after reset
*
* @dev: the device structure
*
* Return: 0 on success an error code otherwise
*/
static int mei_txe_hw_start(struct mei_device *dev)
{
struct mei_txe_hw *hw = to_txe_hw(dev);
int ret;
u32 hisr;
/* bring back interrupts */
mei_txe_intr_enable(dev);
ret = mei_txe_readiness_wait(dev);
if (ret < 0) {
dev_err(dev->dev, "waiting for readiness failed\n");
return ret;
}
/*
* If HISR.INT2_STS interrupt status bit is set then clear it.
*/
hisr = mei_txe_br_reg_read(hw, HISR_REG);
if (hisr & HISR_INT_2_STS)
mei_txe_br_reg_write(hw, HISR_REG, HISR_INT_2_STS);
/* Clear the interrupt cause of OutputDoorbell */
clear_bit(TXE_INTR_OUT_DB_BIT, &hw->intr_cause);
ret = mei_txe_aliveness_set_sync(dev, 1);
if (ret < 0) {
dev_err(dev->dev, "wait for aliveness failed ... bailing out\n");
return ret;
}
/* enable input ready interrupts:
* SEC_IPC_HOST_INT_MASK.IPC_INPUT_READY_INT_MASK
*/
mei_txe_input_ready_interrupt_enable(dev);
/* Set the SICR_SEC_IPC_OUTPUT_STATUS.IPC_OUTPUT_READY bit */
mei_txe_output_ready_set(hw);
/* Set bit SICR_HOST_IPC_READINESS.HOST_RDY
*/
mei_txe_readiness_set_host_rdy(dev);
return 0;
}
/**
* mei_txe_check_and_ack_intrs - translate multi BAR interrupt into
* single bit mask and acknowledge the interrupts
*
* @dev: the device structure
* @do_ack: acknowledge interrupts
*
* Return: true if found interrupts to process.
*/
static bool mei_txe_check_and_ack_intrs(struct mei_device *dev, bool do_ack)
{
struct mei_txe_hw *hw = to_txe_hw(dev);
u32 hisr;
u32 hhisr;
u32 ipc_isr;
u32 aliveness;
bool generated;
/* read interrupt registers */
hhisr = mei_txe_br_reg_read(hw, HHISR_REG);
generated = (hhisr & IPC_HHIER_MSK);
if (!generated)
goto out;
hisr = mei_txe_br_reg_read(hw, HISR_REG);
aliveness = mei_txe_aliveness_get(dev);
if (hhisr & IPC_HHIER_SEC && aliveness)
ipc_isr = mei_txe_sec_reg_read_silent(hw,
SEC_IPC_HOST_INT_STATUS_REG);
else
ipc_isr = 0;
generated = generated ||
(hisr & HISR_INT_STS_MSK) ||
(ipc_isr & SEC_IPC_HOST_INT_STATUS_PENDING);
if (generated && do_ack) {
/* Save the interrupt causes */
hw->intr_cause |= hisr & HISR_INT_STS_MSK;
if (ipc_isr & SEC_IPC_HOST_INT_STATUS_IN_RDY)
hw->intr_cause |= TXE_INTR_IN_READY;
mei_txe_intr_disable(dev);
/* Clear the interrupts in hierarchy:
* IPC and Bridge, than the High Level */
mei_txe_sec_reg_write_silent(hw,
SEC_IPC_HOST_INT_STATUS_REG, ipc_isr);
mei_txe_br_reg_write(hw, HISR_REG, hisr);
mei_txe_br_reg_write(hw, HHISR_REG, hhisr);
}
out:
return generated;
}
/**
* mei_txe_irq_quick_handler - The ISR of the MEI device
*
* @irq: The irq number
* @dev_id: pointer to the device structure
*
* Return: IRQ_WAKE_THREAD if interrupt is designed for the device
* IRQ_NONE otherwise
*/
irqreturn_t mei_txe_irq_quick_handler(int irq, void *dev_id)
{
struct mei_device *dev = dev_id;
if (mei_txe_check_and_ack_intrs(dev, true))
return IRQ_WAKE_THREAD;
return IRQ_NONE;
}
/**
* mei_txe_irq_thread_handler - txe interrupt thread
*
* @irq: The irq number
* @dev_id: pointer to the device structure
*
* Return: IRQ_HANDLED
*/
irqreturn_t mei_txe_irq_thread_handler(int irq, void *dev_id)
{
struct mei_device *dev = (struct mei_device *) dev_id;
struct mei_txe_hw *hw = to_txe_hw(dev);
struct mei_cl_cb complete_list;
s32 slots;
int rets = 0;
dev_dbg(dev->dev, "irq thread: Interrupt Registers HHISR|HISR|SEC=%02X|%04X|%02X\n",
mei_txe_br_reg_read(hw, HHISR_REG),
mei_txe_br_reg_read(hw, HISR_REG),
mei_txe_sec_reg_read_silent(hw, SEC_IPC_HOST_INT_STATUS_REG));
/* initialize our complete list */
mutex_lock(&dev->device_lock);
mei_io_list_init(&complete_list);
if (pci_dev_msi_enabled(to_pci_dev(dev->dev)))
mei_txe_check_and_ack_intrs(dev, true);
/* show irq events */
mei_txe_pending_interrupts(dev);
hw->aliveness = mei_txe_aliveness_get(dev);
hw->readiness = mei_txe_readiness_get(dev);
/* Readiness:
* Detection of TXE driver going through reset
* or TXE driver resetting the HECI interface.
*/
if (test_and_clear_bit(TXE_INTR_READINESS_BIT, &hw->intr_cause)) {
dev_dbg(dev->dev, "Readiness Interrupt was received...\n");
/* Check if SeC is going through reset */
if (mei_txe_readiness_is_sec_rdy(hw->readiness)) {
dev_dbg(dev->dev, "we need to start the dev.\n");
dev->recvd_hw_ready = true;
} else {
dev->recvd_hw_ready = false;
if (dev->dev_state != MEI_DEV_RESETTING) {
dev_warn(dev->dev, "FW not ready: resetting.\n");
schedule_work(&dev->reset_work);
goto end;
}
}
wake_up(&dev->wait_hw_ready);
}
/************************************************************/
/* Check interrupt cause:
* Aliveness: Detection of SeC acknowledge of host request that
* it remain alive or host cancellation of that request.
*/
if (test_and_clear_bit(TXE_INTR_ALIVENESS_BIT, &hw->intr_cause)) {
/* Clear the interrupt cause */
dev_dbg(dev->dev,
"Aliveness Interrupt: Status: %d\n", hw->aliveness);
dev->pg_event = MEI_PG_EVENT_RECEIVED;
if (waitqueue_active(&hw->wait_aliveness_resp))
wake_up(&hw->wait_aliveness_resp);
}
/* Output Doorbell:
* Detection of SeC having sent output to host
*/
slots = mei_count_full_read_slots(dev);
if (test_and_clear_bit(TXE_INTR_OUT_DB_BIT, &hw->intr_cause)) {
/* Read from TXE */
rets = mei_irq_read_handler(dev, &complete_list, &slots);
if (rets && dev->dev_state != MEI_DEV_RESETTING) {
dev_err(dev->dev,
"mei_irq_read_handler ret = %d.\n", rets);
schedule_work(&dev->reset_work);
goto end;
}
}
/* Input Ready: Detection if host can write to SeC */
if (test_and_clear_bit(TXE_INTR_IN_READY_BIT, &hw->intr_cause)) {
dev->hbuf_is_ready = true;
hw->slots = dev->hbuf_depth;
}
if (hw->aliveness && dev->hbuf_is_ready) {
/* get the real register value */
dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
rets = mei_irq_write_handler(dev, &complete_list);
if (rets && rets != -EMSGSIZE)
dev_err(dev->dev, "mei_irq_write_handler ret = %d.\n",
rets);
dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
}
mei_irq_compl_handler(dev, &complete_list);
end:
dev_dbg(dev->dev, "interrupt thread end ret = %d\n", rets);
mutex_unlock(&dev->device_lock);
mei_enable_interrupts(dev);
return IRQ_HANDLED;
}
static const struct mei_hw_ops mei_txe_hw_ops = {
.host_is_ready = mei_txe_host_is_ready,
.fw_status = mei_txe_fw_status,
.pg_state = mei_txe_pg_state,
.hw_is_ready = mei_txe_hw_is_ready,
.hw_reset = mei_txe_hw_reset,
.hw_config = mei_txe_hw_config,
.hw_start = mei_txe_hw_start,
.pg_in_transition = mei_txe_pg_in_transition,
.pg_is_enabled = mei_txe_pg_is_enabled,
.intr_clear = mei_txe_intr_clear,
.intr_enable = mei_txe_intr_enable,
.intr_disable = mei_txe_intr_disable,
.hbuf_free_slots = mei_txe_hbuf_empty_slots,
.hbuf_is_ready = mei_txe_is_input_ready,
.hbuf_max_len = mei_txe_hbuf_max_len,
.write = mei_txe_write,
.rdbuf_full_slots = mei_txe_count_full_read_slots,
.read_hdr = mei_txe_read_hdr,
.read = mei_txe_read,
};
/**
* mei_txe_dev_init - allocates and initializes txe hardware specific structure
*
* @pdev: pci device
*
* Return: struct mei_device * on success or NULL
*/
struct mei_device *mei_txe_dev_init(struct pci_dev *pdev)
{
struct mei_device *dev;
struct mei_txe_hw *hw;
dev = kzalloc(sizeof(struct mei_device) +
sizeof(struct mei_txe_hw), GFP_KERNEL);
if (!dev)
return NULL;
mei_device_init(dev, &pdev->dev, &mei_txe_hw_ops);
hw = to_txe_hw(dev);
init_waitqueue_head(&hw->wait_aliveness_resp);
return dev;
}
/**
* mei_txe_setup_satt2 - SATT2 configuration for DMA support.
*
* @dev: the device structure
* @addr: physical address start of the range
* @range: physical range size
*
* Return: 0 on success an error code otherwise
*/
int mei_txe_setup_satt2(struct mei_device *dev, phys_addr_t addr, u32 range)
{
struct mei_txe_hw *hw = to_txe_hw(dev);
u32 lo32 = lower_32_bits(addr);
u32 hi32 = upper_32_bits(addr);
u32 ctrl;
/* SATT is limited to 36 Bits */
if (hi32 & ~0xF)
return -EINVAL;
/* SATT has to be 16Byte aligned */
if (lo32 & 0xF)
return -EINVAL;
/* SATT range has to be 4Bytes aligned */
if (range & 0x4)
return -EINVAL;
/* SATT is limited to 32 MB range*/
if (range > SATT_RANGE_MAX)
return -EINVAL;
ctrl = SATT2_CTRL_VALID_MSK;
ctrl |= hi32 << SATT2_CTRL_BR_BASE_ADDR_REG_SHIFT;
mei_txe_br_reg_write(hw, SATT2_SAP_SIZE_REG, range);
mei_txe_br_reg_write(hw, SATT2_BRG_BA_LSB_REG, lo32);
mei_txe_br_reg_write(hw, SATT2_CTRL_REG, ctrl);
dev_dbg(dev->dev, "SATT2: SAP_SIZE_OFFSET=0x%08X, BRG_BA_LSB_OFFSET=0x%08X, CTRL_OFFSET=0x%08X\n",
range, lo32, ctrl);
return 0;
}
| gpl-2.0 |
Riztazz/trinitycore-m4a | dep/bzip2/bzlib.c | 924 | 45995 |
/*-------------------------------------------------------------*/
/*--- Library top-level functions. ---*/
/*--- bzlib.c ---*/
/*-------------------------------------------------------------*/
/* ------------------------------------------------------------------
This file is part of bzip2/libbzip2, a program and library for
lossless, block-sorting data compression.
bzip2/libbzip2 version 1.0.6 of 6 September 2010
Copyright (C) 1996-2010 Julian Seward <jseward@bzip.org>
Please read the WARNING, DISCLAIMER and PATENTS sections in the
README file.
This program is released under the terms of the license contained
in the file LICENSE.
------------------------------------------------------------------ */
/* CHANGES
0.9.0 -- original version.
0.9.0a/b -- no changes in this file.
0.9.0c -- made zero-length BZ_FLUSH work correctly in bzCompress().
fixed bzWrite/bzRead to ignore zero-length requests.
fixed bzread to correctly handle read requests after EOF.
wrong parameter order in call to bzDecompressInit in
bzBuffToBuffDecompress. Fixed.
*/
#include "bzlib_private.h"
/*---------------------------------------------------*/
/*--- Compression stuff ---*/
/*---------------------------------------------------*/
/*---------------------------------------------------*/
#ifndef BZ_NO_STDIO
void BZ2_bz__AssertH__fail ( int errcode )
{
fprintf(stderr,
"\n\nbzip2/libbzip2: internal error number %d.\n"
"This is a bug in bzip2/libbzip2, %s.\n"
"Please report it to me at: jseward@bzip.org. If this happened\n"
"when you were using some program which uses libbzip2 as a\n"
"component, you should also report this bug to the author(s)\n"
"of that program. Please make an effort to report this bug;\n"
"timely and accurate bug reports eventually lead to higher\n"
"quality software. Thanks. Julian Seward, 10 December 2007.\n\n",
errcode,
BZ2_bzlibVersion()
);
if (errcode == 1007) {
fprintf(stderr,
"\n*** A special note about internal error number 1007 ***\n"
"\n"
"Experience suggests that a common cause of i.e. 1007\n"
"is unreliable memory or other hardware. The 1007 assertion\n"
"just happens to cross-check the results of huge numbers of\n"
"memory reads/writes, and so acts (unintendedly) as a stress\n"
"test of your memory system.\n"
"\n"
"I suggest the following: try compressing the file again,\n"
"possibly monitoring progress in detail with the -vv flag.\n"
"\n"
"* If the error cannot be reproduced, and/or happens at different\n"
" points in compression, you may have a flaky memory system.\n"
" Try a memory-test program. I have used Memtest86\n"
" (www.memtest86.com). At the time of writing it is free (GPLd).\n"
" Memtest86 tests memory much more thorougly than your BIOSs\n"
" power-on test, and may find failures that the BIOS doesn't.\n"
"\n"
"* If the error can be repeatably reproduced, this is a bug in\n"
" bzip2, and I would very much like to hear about it. Please\n"
" let me know, and, ideally, save a copy of the file causing the\n"
" problem -- without which I will be unable to investigate it.\n"
"\n"
);
}
exit(3);
}
#endif
/*---------------------------------------------------*/
static
int bz_config_ok ( void )
{
if (sizeof(int) != 4) return 0;
if (sizeof(short) != 2) return 0;
if (sizeof(char) != 1) return 0;
return 1;
}
/*---------------------------------------------------*/
static
void* default_bzalloc ( void* opaque, Int32 items, Int32 size )
{
void* v = malloc ( items * size );
return v;
}
static
void default_bzfree ( void* opaque, void* addr )
{
if (addr != NULL) free ( addr );
}
/*---------------------------------------------------*/
static
void prepare_new_block ( EState* s )
{
Int32 i;
s->nblock = 0;
s->numZ = 0;
s->state_out_pos = 0;
BZ_INITIALISE_CRC ( s->blockCRC );
for (i = 0; i < 256; i++) s->inUse[i] = False;
s->blockNo++;
}
/*---------------------------------------------------*/
static
void init_RL ( EState* s )
{
s->state_in_ch = 256;
s->state_in_len = 0;
}
static
Bool isempty_RL ( EState* s )
{
if (s->state_in_ch < 256 && s->state_in_len > 0)
return False; else
return True;
}
/*---------------------------------------------------*/
int BZ_API(BZ2_bzCompressInit)
( bz_stream* strm,
int blockSize100k,
int verbosity,
int workFactor )
{
Int32 n;
EState* s;
if (!bz_config_ok()) return BZ_CONFIG_ERROR;
if (strm == NULL ||
blockSize100k < 1 || blockSize100k > 9 ||
workFactor < 0 || workFactor > 250)
return BZ_PARAM_ERROR;
if (workFactor == 0) workFactor = 30;
if (strm->bzalloc == NULL) strm->bzalloc = default_bzalloc;
if (strm->bzfree == NULL) strm->bzfree = default_bzfree;
s = BZALLOC( sizeof(EState) );
if (s == NULL) return BZ_MEM_ERROR;
s->strm = strm;
s->arr1 = NULL;
s->arr2 = NULL;
s->ftab = NULL;
n = 100000 * blockSize100k;
s->arr1 = BZALLOC( n * sizeof(UInt32) );
s->arr2 = BZALLOC( (n+BZ_N_OVERSHOOT) * sizeof(UInt32) );
s->ftab = BZALLOC( 65537 * sizeof(UInt32) );
if (s->arr1 == NULL || s->arr2 == NULL || s->ftab == NULL) {
if (s->arr1 != NULL) BZFREE(s->arr1);
if (s->arr2 != NULL) BZFREE(s->arr2);
if (s->ftab != NULL) BZFREE(s->ftab);
if (s != NULL) BZFREE(s);
return BZ_MEM_ERROR;
}
s->blockNo = 0;
s->state = BZ_S_INPUT;
s->mode = BZ_M_RUNNING;
s->combinedCRC = 0;
s->blockSize100k = blockSize100k;
s->nblockMAX = 100000 * blockSize100k - 19;
s->verbosity = verbosity;
s->workFactor = workFactor;
s->block = (UChar*)s->arr2;
s->mtfv = (UInt16*)s->arr1;
s->zbits = NULL;
s->ptr = (UInt32*)s->arr1;
strm->state = s;
strm->total_in_lo32 = 0;
strm->total_in_hi32 = 0;
strm->total_out_lo32 = 0;
strm->total_out_hi32 = 0;
init_RL ( s );
prepare_new_block ( s );
return BZ_OK;
}
/*---------------------------------------------------*/
static
void add_pair_to_block ( EState* s )
{
Int32 i;
UChar ch = (UChar)(s->state_in_ch);
for (i = 0; i < s->state_in_len; i++) {
BZ_UPDATE_CRC( s->blockCRC, ch );
}
s->inUse[s->state_in_ch] = True;
switch (s->state_in_len) {
case 1:
s->block[s->nblock] = (UChar)ch; s->nblock++;
break;
case 2:
s->block[s->nblock] = (UChar)ch; s->nblock++;
s->block[s->nblock] = (UChar)ch; s->nblock++;
break;
case 3:
s->block[s->nblock] = (UChar)ch; s->nblock++;
s->block[s->nblock] = (UChar)ch; s->nblock++;
s->block[s->nblock] = (UChar)ch; s->nblock++;
break;
default:
s->inUse[s->state_in_len-4] = True;
s->block[s->nblock] = (UChar)ch; s->nblock++;
s->block[s->nblock] = (UChar)ch; s->nblock++;
s->block[s->nblock] = (UChar)ch; s->nblock++;
s->block[s->nblock] = (UChar)ch; s->nblock++;
s->block[s->nblock] = ((UChar)(s->state_in_len-4));
s->nblock++;
break;
}
}
/*---------------------------------------------------*/
static
void flush_RL ( EState* s )
{
if (s->state_in_ch < 256) add_pair_to_block ( s );
init_RL ( s );
}
/*---------------------------------------------------*/
#define ADD_CHAR_TO_BLOCK(zs,zchh0) \
{ \
UInt32 zchh = (UInt32)(zchh0); \
/*-- fast track the common case --*/ \
if (zchh != zs->state_in_ch && \
zs->state_in_len == 1) { \
UChar ch = (UChar)(zs->state_in_ch); \
BZ_UPDATE_CRC( zs->blockCRC, ch ); \
zs->inUse[zs->state_in_ch] = True; \
zs->block[zs->nblock] = (UChar)ch; \
zs->nblock++; \
zs->state_in_ch = zchh; \
} \
else \
/*-- general, uncommon cases --*/ \
if (zchh != zs->state_in_ch || \
zs->state_in_len == 255) { \
if (zs->state_in_ch < 256) \
add_pair_to_block ( zs ); \
zs->state_in_ch = zchh; \
zs->state_in_len = 1; \
} else { \
zs->state_in_len++; \
} \
}
/*---------------------------------------------------*/
static
Bool copy_input_until_stop ( EState* s )
{
Bool progress_in = False;
if (s->mode == BZ_M_RUNNING) {
/*-- fast track the common case --*/
while (True) {
/*-- block full? --*/
if (s->nblock >= s->nblockMAX) break;
/*-- no input? --*/
if (s->strm->avail_in == 0) break;
progress_in = True;
ADD_CHAR_TO_BLOCK ( s, (UInt32)(*((UChar*)(s->strm->next_in))) );
s->strm->next_in++;
s->strm->avail_in--;
s->strm->total_in_lo32++;
if (s->strm->total_in_lo32 == 0) s->strm->total_in_hi32++;
}
} else {
/*-- general, uncommon case --*/
while (True) {
/*-- block full? --*/
if (s->nblock >= s->nblockMAX) break;
/*-- no input? --*/
if (s->strm->avail_in == 0) break;
/*-- flush/finish end? --*/
if (s->avail_in_expect == 0) break;
progress_in = True;
ADD_CHAR_TO_BLOCK ( s, (UInt32)(*((UChar*)(s->strm->next_in))) );
s->strm->next_in++;
s->strm->avail_in--;
s->strm->total_in_lo32++;
if (s->strm->total_in_lo32 == 0) s->strm->total_in_hi32++;
s->avail_in_expect--;
}
}
return progress_in;
}
/*---------------------------------------------------*/
static
Bool copy_output_until_stop ( EState* s )
{
Bool progress_out = False;
while (True) {
/*-- no output space? --*/
if (s->strm->avail_out == 0) break;
/*-- block done? --*/
if (s->state_out_pos >= s->numZ) break;
progress_out = True;
*(s->strm->next_out) = s->zbits[s->state_out_pos];
s->state_out_pos++;
s->strm->avail_out--;
s->strm->next_out++;
s->strm->total_out_lo32++;
if (s->strm->total_out_lo32 == 0) s->strm->total_out_hi32++;
}
return progress_out;
}
/*---------------------------------------------------*/
static
Bool handle_compress ( bz_stream* strm )
{
Bool progress_in = False;
Bool progress_out = False;
EState* s = strm->state;
while (True) {
if (s->state == BZ_S_OUTPUT) {
progress_out |= copy_output_until_stop ( s );
if (s->state_out_pos < s->numZ) break;
if (s->mode == BZ_M_FINISHING &&
s->avail_in_expect == 0 &&
isempty_RL(s)) break;
prepare_new_block ( s );
s->state = BZ_S_INPUT;
if (s->mode == BZ_M_FLUSHING &&
s->avail_in_expect == 0 &&
isempty_RL(s)) break;
}
if (s->state == BZ_S_INPUT) {
progress_in |= copy_input_until_stop ( s );
if (s->mode != BZ_M_RUNNING && s->avail_in_expect == 0) {
flush_RL ( s );
BZ2_compressBlock ( s, (Bool)(s->mode == BZ_M_FINISHING) );
s->state = BZ_S_OUTPUT;
}
else
if (s->nblock >= s->nblockMAX) {
BZ2_compressBlock ( s, False );
s->state = BZ_S_OUTPUT;
}
else
if (s->strm->avail_in == 0) {
break;
}
}
}
return progress_in || progress_out;
}
/*---------------------------------------------------*/
int BZ_API(BZ2_bzCompress) ( bz_stream *strm, int action )
{
Bool progress;
EState* s;
if (strm == NULL) return BZ_PARAM_ERROR;
s = strm->state;
if (s == NULL) return BZ_PARAM_ERROR;
if (s->strm != strm) return BZ_PARAM_ERROR;
preswitch:
switch (s->mode) {
case BZ_M_IDLE:
return BZ_SEQUENCE_ERROR;
case BZ_M_RUNNING:
if (action == BZ_RUN) {
progress = handle_compress ( strm );
return progress ? BZ_RUN_OK : BZ_PARAM_ERROR;
}
else
if (action == BZ_FLUSH) {
s->avail_in_expect = strm->avail_in;
s->mode = BZ_M_FLUSHING;
goto preswitch;
}
else
if (action == BZ_FINISH) {
s->avail_in_expect = strm->avail_in;
s->mode = BZ_M_FINISHING;
goto preswitch;
}
else
return BZ_PARAM_ERROR;
case BZ_M_FLUSHING:
if (action != BZ_FLUSH) return BZ_SEQUENCE_ERROR;
if (s->avail_in_expect != s->strm->avail_in)
return BZ_SEQUENCE_ERROR;
progress = handle_compress ( strm );
if (s->avail_in_expect > 0 || !isempty_RL(s) ||
s->state_out_pos < s->numZ) return BZ_FLUSH_OK;
s->mode = BZ_M_RUNNING;
return BZ_RUN_OK;
case BZ_M_FINISHING:
if (action != BZ_FINISH) return BZ_SEQUENCE_ERROR;
if (s->avail_in_expect != s->strm->avail_in)
return BZ_SEQUENCE_ERROR;
progress = handle_compress ( strm );
if (!progress) return BZ_SEQUENCE_ERROR;
if (s->avail_in_expect > 0 || !isempty_RL(s) ||
s->state_out_pos < s->numZ) return BZ_FINISH_OK;
s->mode = BZ_M_IDLE;
return BZ_STREAM_END;
}
return BZ_OK; /*--not reached--*/
}
/*---------------------------------------------------*/
int BZ_API(BZ2_bzCompressEnd) ( bz_stream *strm )
{
EState* s;
if (strm == NULL) return BZ_PARAM_ERROR;
s = strm->state;
if (s == NULL) return BZ_PARAM_ERROR;
if (s->strm != strm) return BZ_PARAM_ERROR;
if (s->arr1 != NULL) BZFREE(s->arr1);
if (s->arr2 != NULL) BZFREE(s->arr2);
if (s->ftab != NULL) BZFREE(s->ftab);
BZFREE(strm->state);
strm->state = NULL;
return BZ_OK;
}
/*---------------------------------------------------*/
/*--- Decompression stuff ---*/
/*---------------------------------------------------*/
/*---------------------------------------------------*/
int BZ_API(BZ2_bzDecompressInit)
( bz_stream* strm,
int verbosity,
int small )
{
DState* s;
if (!bz_config_ok()) return BZ_CONFIG_ERROR;
if (strm == NULL) return BZ_PARAM_ERROR;
if (small != 0 && small != 1) return BZ_PARAM_ERROR;
if (verbosity < 0 || verbosity > 4) return BZ_PARAM_ERROR;
if (strm->bzalloc == NULL) strm->bzalloc = default_bzalloc;
if (strm->bzfree == NULL) strm->bzfree = default_bzfree;
s = BZALLOC( sizeof(DState) );
if (s == NULL) return BZ_MEM_ERROR;
s->strm = strm;
strm->state = s;
s->state = BZ_X_MAGIC_1;
s->bsLive = 0;
s->bsBuff = 0;
s->calculatedCombinedCRC = 0;
strm->total_in_lo32 = 0;
strm->total_in_hi32 = 0;
strm->total_out_lo32 = 0;
strm->total_out_hi32 = 0;
s->smallDecompress = (Bool)small;
s->ll4 = NULL;
s->ll16 = NULL;
s->tt = NULL;
s->currBlockNo = 0;
s->verbosity = verbosity;
return BZ_OK;
}
/*---------------------------------------------------*/
/* Return True iff data corruption is discovered.
Returns False if there is no problem.
*/
static
Bool unRLE_obuf_to_output_FAST ( DState* s )
{
UChar k1;
if (s->blockRandomised) {
while (True) {
/* try to finish existing run */
while (True) {
if (s->strm->avail_out == 0) return False;
if (s->state_out_len == 0) break;
*( (UChar*)(s->strm->next_out) ) = s->state_out_ch;
BZ_UPDATE_CRC ( s->calculatedBlockCRC, s->state_out_ch );
s->state_out_len--;
s->strm->next_out++;
s->strm->avail_out--;
s->strm->total_out_lo32++;
if (s->strm->total_out_lo32 == 0) s->strm->total_out_hi32++;
}
/* can a new run be started? */
if (s->nblock_used == s->save_nblock+1) return False;
/* Only caused by corrupt data stream? */
if (s->nblock_used > s->save_nblock+1)
return True;
s->state_out_len = 1;
s->state_out_ch = s->k0;
BZ_GET_FAST(k1); BZ_RAND_UPD_MASK;
k1 ^= BZ_RAND_MASK; s->nblock_used++;
if (s->nblock_used == s->save_nblock+1) continue;
if (k1 != s->k0) { s->k0 = k1; continue; };
s->state_out_len = 2;
BZ_GET_FAST(k1); BZ_RAND_UPD_MASK;
k1 ^= BZ_RAND_MASK; s->nblock_used++;
if (s->nblock_used == s->save_nblock+1) continue;
if (k1 != s->k0) { s->k0 = k1; continue; };
s->state_out_len = 3;
BZ_GET_FAST(k1); BZ_RAND_UPD_MASK;
k1 ^= BZ_RAND_MASK; s->nblock_used++;
if (s->nblock_used == s->save_nblock+1) continue;
if (k1 != s->k0) { s->k0 = k1; continue; };
BZ_GET_FAST(k1); BZ_RAND_UPD_MASK;
k1 ^= BZ_RAND_MASK; s->nblock_used++;
s->state_out_len = ((Int32)k1) + 4;
BZ_GET_FAST(s->k0); BZ_RAND_UPD_MASK;
s->k0 ^= BZ_RAND_MASK; s->nblock_used++;
}
} else {
/* restore */
UInt32 c_calculatedBlockCRC = s->calculatedBlockCRC;
UChar c_state_out_ch = s->state_out_ch;
Int32 c_state_out_len = s->state_out_len;
Int32 c_nblock_used = s->nblock_used;
Int32 c_k0 = s->k0;
UInt32* c_tt = s->tt;
UInt32 c_tPos = s->tPos;
char* cs_next_out = s->strm->next_out;
unsigned int cs_avail_out = s->strm->avail_out;
Int32 ro_blockSize100k = s->blockSize100k;
/* end restore */
UInt32 avail_out_INIT = cs_avail_out;
Int32 s_save_nblockPP = s->save_nblock+1;
unsigned int total_out_lo32_old;
while (True) {
/* try to finish existing run */
if (c_state_out_len > 0) {
while (True) {
if (cs_avail_out == 0) goto return_notr;
if (c_state_out_len == 1) break;
*( (UChar*)(cs_next_out) ) = c_state_out_ch;
BZ_UPDATE_CRC ( c_calculatedBlockCRC, c_state_out_ch );
c_state_out_len--;
cs_next_out++;
cs_avail_out--;
}
s_state_out_len_eq_one:
{
if (cs_avail_out == 0) {
c_state_out_len = 1; goto return_notr;
};
*( (UChar*)(cs_next_out) ) = c_state_out_ch;
BZ_UPDATE_CRC ( c_calculatedBlockCRC, c_state_out_ch );
cs_next_out++;
cs_avail_out--;
}
}
/* Only caused by corrupt data stream? */
if (c_nblock_used > s_save_nblockPP)
return True;
/* can a new run be started? */
if (c_nblock_used == s_save_nblockPP) {
c_state_out_len = 0; goto return_notr;
};
c_state_out_ch = c_k0;
BZ_GET_FAST_C(k1); c_nblock_used++;
if (k1 != c_k0) {
c_k0 = k1; goto s_state_out_len_eq_one;
};
if (c_nblock_used == s_save_nblockPP)
goto s_state_out_len_eq_one;
c_state_out_len = 2;
BZ_GET_FAST_C(k1); c_nblock_used++;
if (c_nblock_used == s_save_nblockPP) continue;
if (k1 != c_k0) { c_k0 = k1; continue; };
c_state_out_len = 3;
BZ_GET_FAST_C(k1); c_nblock_used++;
if (c_nblock_used == s_save_nblockPP) continue;
if (k1 != c_k0) { c_k0 = k1; continue; };
BZ_GET_FAST_C(k1); c_nblock_used++;
c_state_out_len = ((Int32)k1) + 4;
BZ_GET_FAST_C(c_k0); c_nblock_used++;
}
return_notr:
total_out_lo32_old = s->strm->total_out_lo32;
s->strm->total_out_lo32 += (avail_out_INIT - cs_avail_out);
if (s->strm->total_out_lo32 < total_out_lo32_old)
s->strm->total_out_hi32++;
/* save */
s->calculatedBlockCRC = c_calculatedBlockCRC;
s->state_out_ch = c_state_out_ch;
s->state_out_len = c_state_out_len;
s->nblock_used = c_nblock_used;
s->k0 = c_k0;
s->tt = c_tt;
s->tPos = c_tPos;
s->strm->next_out = cs_next_out;
s->strm->avail_out = cs_avail_out;
/* end save */
}
return False;
}
/*---------------------------------------------------*/
__inline__ Int32 BZ2_indexIntoF ( Int32 indx, Int32 *cftab )
{
Int32 nb, na, mid;
nb = 0;
na = 256;
do {
mid = (nb + na) >> 1;
if (indx >= cftab[mid]) nb = mid; else na = mid;
}
while (na - nb != 1);
return nb;
}
/*---------------------------------------------------*/
/* Return True iff data corruption is discovered.
Returns False if there is no problem.
*/
static
Bool unRLE_obuf_to_output_SMALL ( DState* s )
{
UChar k1;
if (s->blockRandomised) {
while (True) {
/* try to finish existing run */
while (True) {
if (s->strm->avail_out == 0) return False;
if (s->state_out_len == 0) break;
*( (UChar*)(s->strm->next_out) ) = s->state_out_ch;
BZ_UPDATE_CRC ( s->calculatedBlockCRC, s->state_out_ch );
s->state_out_len--;
s->strm->next_out++;
s->strm->avail_out--;
s->strm->total_out_lo32++;
if (s->strm->total_out_lo32 == 0) s->strm->total_out_hi32++;
}
/* can a new run be started? */
if (s->nblock_used == s->save_nblock+1) return False;
/* Only caused by corrupt data stream? */
if (s->nblock_used > s->save_nblock+1)
return True;
s->state_out_len = 1;
s->state_out_ch = s->k0;
BZ_GET_SMALL(k1); BZ_RAND_UPD_MASK;
k1 ^= BZ_RAND_MASK; s->nblock_used++;
if (s->nblock_used == s->save_nblock+1) continue;
if (k1 != s->k0) { s->k0 = k1; continue; };
s->state_out_len = 2;
BZ_GET_SMALL(k1); BZ_RAND_UPD_MASK;
k1 ^= BZ_RAND_MASK; s->nblock_used++;
if (s->nblock_used == s->save_nblock+1) continue;
if (k1 != s->k0) { s->k0 = k1; continue; };
s->state_out_len = 3;
BZ_GET_SMALL(k1); BZ_RAND_UPD_MASK;
k1 ^= BZ_RAND_MASK; s->nblock_used++;
if (s->nblock_used == s->save_nblock+1) continue;
if (k1 != s->k0) { s->k0 = k1; continue; };
BZ_GET_SMALL(k1); BZ_RAND_UPD_MASK;
k1 ^= BZ_RAND_MASK; s->nblock_used++;
s->state_out_len = ((Int32)k1) + 4;
BZ_GET_SMALL(s->k0); BZ_RAND_UPD_MASK;
s->k0 ^= BZ_RAND_MASK; s->nblock_used++;
}
} else {
while (True) {
/* try to finish existing run */
while (True) {
if (s->strm->avail_out == 0) return False;
if (s->state_out_len == 0) break;
*( (UChar*)(s->strm->next_out) ) = s->state_out_ch;
BZ_UPDATE_CRC ( s->calculatedBlockCRC, s->state_out_ch );
s->state_out_len--;
s->strm->next_out++;
s->strm->avail_out--;
s->strm->total_out_lo32++;
if (s->strm->total_out_lo32 == 0) s->strm->total_out_hi32++;
}
/* can a new run be started? */
if (s->nblock_used == s->save_nblock+1) return False;
/* Only caused by corrupt data stream? */
if (s->nblock_used > s->save_nblock+1)
return True;
s->state_out_len = 1;
s->state_out_ch = s->k0;
BZ_GET_SMALL(k1); s->nblock_used++;
if (s->nblock_used == s->save_nblock+1) continue;
if (k1 != s->k0) { s->k0 = k1; continue; };
s->state_out_len = 2;
BZ_GET_SMALL(k1); s->nblock_used++;
if (s->nblock_used == s->save_nblock+1) continue;
if (k1 != s->k0) { s->k0 = k1; continue; };
s->state_out_len = 3;
BZ_GET_SMALL(k1); s->nblock_used++;
if (s->nblock_used == s->save_nblock+1) continue;
if (k1 != s->k0) { s->k0 = k1; continue; };
BZ_GET_SMALL(k1); s->nblock_used++;
s->state_out_len = ((Int32)k1) + 4;
BZ_GET_SMALL(s->k0); s->nblock_used++;
}
}
}
/*---------------------------------------------------*/
int BZ_API(BZ2_bzDecompress) ( bz_stream *strm )
{
Bool corrupt;
DState* s;
if (strm == NULL) return BZ_PARAM_ERROR;
s = strm->state;
if (s == NULL) return BZ_PARAM_ERROR;
if (s->strm != strm) return BZ_PARAM_ERROR;
while (True) {
if (s->state == BZ_X_IDLE) return BZ_SEQUENCE_ERROR;
if (s->state == BZ_X_OUTPUT) {
if (s->smallDecompress)
corrupt = unRLE_obuf_to_output_SMALL ( s ); else
corrupt = unRLE_obuf_to_output_FAST ( s );
if (corrupt) return BZ_DATA_ERROR;
if (s->nblock_used == s->save_nblock+1 && s->state_out_len == 0) {
BZ_FINALISE_CRC ( s->calculatedBlockCRC );
if (s->verbosity >= 3)
VPrintf2 ( " {0x%08x, 0x%08x}", s->storedBlockCRC,
s->calculatedBlockCRC );
if (s->verbosity >= 2) VPrintf0 ( "]" );
if (s->calculatedBlockCRC != s->storedBlockCRC)
return BZ_DATA_ERROR;
s->calculatedCombinedCRC
= (s->calculatedCombinedCRC << 1) |
(s->calculatedCombinedCRC >> 31);
s->calculatedCombinedCRC ^= s->calculatedBlockCRC;
s->state = BZ_X_BLKHDR_1;
} else {
return BZ_OK;
}
}
if (s->state >= BZ_X_MAGIC_1) {
Int32 r = BZ2_decompress ( s );
if (r == BZ_STREAM_END) {
if (s->verbosity >= 3)
VPrintf2 ( "\n combined CRCs: stored = 0x%08x, computed = 0x%08x",
s->storedCombinedCRC, s->calculatedCombinedCRC );
if (s->calculatedCombinedCRC != s->storedCombinedCRC)
return BZ_DATA_ERROR;
return r;
}
if (s->state != BZ_X_OUTPUT) return r;
}
}
AssertH ( 0, 6001 );
return 0; /*NOTREACHED*/
}
/*---------------------------------------------------*/
int BZ_API(BZ2_bzDecompressEnd) ( bz_stream *strm )
{
DState* s;
if (strm == NULL) return BZ_PARAM_ERROR;
s = strm->state;
if (s == NULL) return BZ_PARAM_ERROR;
if (s->strm != strm) return BZ_PARAM_ERROR;
if (s->tt != NULL) BZFREE(s->tt);
if (s->ll16 != NULL) BZFREE(s->ll16);
if (s->ll4 != NULL) BZFREE(s->ll4);
BZFREE(strm->state);
strm->state = NULL;
return BZ_OK;
}
#ifndef BZ_NO_STDIO
/*---------------------------------------------------*/
/*--- File I/O stuff ---*/
/*---------------------------------------------------*/
#define BZ_SETERR(eee) \
{ \
if (bzerror != NULL) *bzerror = eee; \
if (bzf != NULL) bzf->lastErr = eee; \
}
typedef
struct {
FILE* handle;
Char buf[BZ_MAX_UNUSED];
Int32 bufN;
Bool writing;
bz_stream strm;
Int32 lastErr;
Bool initialisedOk;
}
bzFile;
/*---------------------------------------------*/
static Bool myfeof ( FILE* f )
{
Int32 c = fgetc ( f );
if (c == EOF) return True;
ungetc ( c, f );
return False;
}
/*---------------------------------------------------*/
BZFILE* BZ_API(BZ2_bzWriteOpen)
( int* bzerror,
FILE* f,
int blockSize100k,
int verbosity,
int workFactor )
{
Int32 ret;
bzFile* bzf = NULL;
BZ_SETERR(BZ_OK);
if (f == NULL ||
(blockSize100k < 1 || blockSize100k > 9) ||
(workFactor < 0 || workFactor > 250) ||
(verbosity < 0 || verbosity > 4))
{ BZ_SETERR(BZ_PARAM_ERROR); return NULL; };
if (ferror(f))
{ BZ_SETERR(BZ_IO_ERROR); return NULL; };
bzf = malloc ( sizeof(bzFile) );
if (bzf == NULL)
{ BZ_SETERR(BZ_MEM_ERROR); return NULL; };
BZ_SETERR(BZ_OK);
bzf->initialisedOk = False;
bzf->bufN = 0;
bzf->handle = f;
bzf->writing = True;
bzf->strm.bzalloc = NULL;
bzf->strm.bzfree = NULL;
bzf->strm.opaque = NULL;
if (workFactor == 0) workFactor = 30;
ret = BZ2_bzCompressInit ( &(bzf->strm), blockSize100k,
verbosity, workFactor );
if (ret != BZ_OK)
{ BZ_SETERR(ret); free(bzf); return NULL; };
bzf->strm.avail_in = 0;
bzf->initialisedOk = True;
return bzf;
}
/*---------------------------------------------------*/
void BZ_API(BZ2_bzWrite)
( int* bzerror,
BZFILE* b,
void* buf,
int len )
{
Int32 n, n2, ret;
bzFile* bzf = (bzFile*)b;
BZ_SETERR(BZ_OK);
if (bzf == NULL || buf == NULL || len < 0)
{ BZ_SETERR(BZ_PARAM_ERROR); return; };
if (!(bzf->writing))
{ BZ_SETERR(BZ_SEQUENCE_ERROR); return; };
if (ferror(bzf->handle))
{ BZ_SETERR(BZ_IO_ERROR); return; };
if (len == 0)
{ BZ_SETERR(BZ_OK); return; };
bzf->strm.avail_in = len;
bzf->strm.next_in = buf;
while (True) {
bzf->strm.avail_out = BZ_MAX_UNUSED;
bzf->strm.next_out = bzf->buf;
ret = BZ2_bzCompress ( &(bzf->strm), BZ_RUN );
if (ret != BZ_RUN_OK)
{ BZ_SETERR(ret); return; };
if (bzf->strm.avail_out < BZ_MAX_UNUSED) {
n = BZ_MAX_UNUSED - bzf->strm.avail_out;
n2 = fwrite ( (void*)(bzf->buf), sizeof(UChar),
n, bzf->handle );
if (n != n2 || ferror(bzf->handle))
{ BZ_SETERR(BZ_IO_ERROR); return; };
}
if (bzf->strm.avail_in == 0)
{ BZ_SETERR(BZ_OK); return; };
}
}
/*---------------------------------------------------*/
void BZ_API(BZ2_bzWriteClose)
( int* bzerror,
BZFILE* b,
int abandon,
unsigned int* nbytes_in,
unsigned int* nbytes_out )
{
BZ2_bzWriteClose64 ( bzerror, b, abandon,
nbytes_in, NULL, nbytes_out, NULL );
}
void BZ_API(BZ2_bzWriteClose64)
( int* bzerror,
BZFILE* b,
int abandon,
unsigned int* nbytes_in_lo32,
unsigned int* nbytes_in_hi32,
unsigned int* nbytes_out_lo32,
unsigned int* nbytes_out_hi32 )
{
Int32 n, n2, ret;
bzFile* bzf = (bzFile*)b;
if (bzf == NULL)
{ BZ_SETERR(BZ_OK); return; };
if (!(bzf->writing))
{ BZ_SETERR(BZ_SEQUENCE_ERROR); return; };
if (ferror(bzf->handle))
{ BZ_SETERR(BZ_IO_ERROR); return; };
if (nbytes_in_lo32 != NULL) *nbytes_in_lo32 = 0;
if (nbytes_in_hi32 != NULL) *nbytes_in_hi32 = 0;
if (nbytes_out_lo32 != NULL) *nbytes_out_lo32 = 0;
if (nbytes_out_hi32 != NULL) *nbytes_out_hi32 = 0;
if ((!abandon) && bzf->lastErr == BZ_OK) {
while (True) {
bzf->strm.avail_out = BZ_MAX_UNUSED;
bzf->strm.next_out = bzf->buf;
ret = BZ2_bzCompress ( &(bzf->strm), BZ_FINISH );
if (ret != BZ_FINISH_OK && ret != BZ_STREAM_END)
{ BZ_SETERR(ret); return; };
if (bzf->strm.avail_out < BZ_MAX_UNUSED) {
n = BZ_MAX_UNUSED - bzf->strm.avail_out;
n2 = fwrite ( (void*)(bzf->buf), sizeof(UChar),
n, bzf->handle );
if (n != n2 || ferror(bzf->handle))
{ BZ_SETERR(BZ_IO_ERROR); return; };
}
if (ret == BZ_STREAM_END) break;
}
}
if ( !abandon && !ferror ( bzf->handle ) ) {
fflush ( bzf->handle );
if (ferror(bzf->handle))
{ BZ_SETERR(BZ_IO_ERROR); return; };
}
if (nbytes_in_lo32 != NULL)
*nbytes_in_lo32 = bzf->strm.total_in_lo32;
if (nbytes_in_hi32 != NULL)
*nbytes_in_hi32 = bzf->strm.total_in_hi32;
if (nbytes_out_lo32 != NULL)
*nbytes_out_lo32 = bzf->strm.total_out_lo32;
if (nbytes_out_hi32 != NULL)
*nbytes_out_hi32 = bzf->strm.total_out_hi32;
BZ_SETERR(BZ_OK);
BZ2_bzCompressEnd ( &(bzf->strm) );
free ( bzf );
}
/*---------------------------------------------------*/
BZFILE* BZ_API(BZ2_bzReadOpen)
( int* bzerror,
FILE* f,
int verbosity,
int small,
void* unused,
int nUnused )
{
bzFile* bzf = NULL;
int ret;
BZ_SETERR(BZ_OK);
if (f == NULL ||
(small != 0 && small != 1) ||
(verbosity < 0 || verbosity > 4) ||
(unused == NULL && nUnused != 0) ||
(unused != NULL && (nUnused < 0 || nUnused > BZ_MAX_UNUSED)))
{ BZ_SETERR(BZ_PARAM_ERROR); return NULL; };
if (ferror(f))
{ BZ_SETERR(BZ_IO_ERROR); return NULL; };
bzf = malloc ( sizeof(bzFile) );
if (bzf == NULL)
{ BZ_SETERR(BZ_MEM_ERROR); return NULL; };
BZ_SETERR(BZ_OK);
bzf->initialisedOk = False;
bzf->handle = f;
bzf->bufN = 0;
bzf->writing = False;
bzf->strm.bzalloc = NULL;
bzf->strm.bzfree = NULL;
bzf->strm.opaque = NULL;
while (nUnused > 0) {
bzf->buf[bzf->bufN] = *((UChar*)(unused)); bzf->bufN++;
unused = ((void*)( 1 + ((UChar*)(unused)) ));
nUnused--;
}
ret = BZ2_bzDecompressInit ( &(bzf->strm), verbosity, small );
if (ret != BZ_OK)
{ BZ_SETERR(ret); free(bzf); return NULL; };
bzf->strm.avail_in = bzf->bufN;
bzf->strm.next_in = bzf->buf;
bzf->initialisedOk = True;
return bzf;
}
/*---------------------------------------------------*/
void BZ_API(BZ2_bzReadClose) ( int *bzerror, BZFILE *b )
{
bzFile* bzf = (bzFile*)b;
BZ_SETERR(BZ_OK);
if (bzf == NULL)
{ BZ_SETERR(BZ_OK); return; };
if (bzf->writing)
{ BZ_SETERR(BZ_SEQUENCE_ERROR); return; };
if (bzf->initialisedOk)
(void)BZ2_bzDecompressEnd ( &(bzf->strm) );
free ( bzf );
}
/*---------------------------------------------------*/
int BZ_API(BZ2_bzRead)
( int* bzerror,
BZFILE* b,
void* buf,
int len )
{
Int32 n, ret;
bzFile* bzf = (bzFile*)b;
BZ_SETERR(BZ_OK);
if (bzf == NULL || buf == NULL || len < 0)
{ BZ_SETERR(BZ_PARAM_ERROR); return 0; };
if (bzf->writing)
{ BZ_SETERR(BZ_SEQUENCE_ERROR); return 0; };
if (len == 0)
{ BZ_SETERR(BZ_OK); return 0; };
bzf->strm.avail_out = len;
bzf->strm.next_out = buf;
while (True) {
if (ferror(bzf->handle))
{ BZ_SETERR(BZ_IO_ERROR); return 0; };
if (bzf->strm.avail_in == 0 && !myfeof(bzf->handle)) {
n = fread ( bzf->buf, sizeof(UChar),
BZ_MAX_UNUSED, bzf->handle );
if (ferror(bzf->handle))
{ BZ_SETERR(BZ_IO_ERROR); return 0; };
bzf->bufN = n;
bzf->strm.avail_in = bzf->bufN;
bzf->strm.next_in = bzf->buf;
}
ret = BZ2_bzDecompress ( &(bzf->strm) );
if (ret != BZ_OK && ret != BZ_STREAM_END)
{ BZ_SETERR(ret); return 0; };
if (ret == BZ_OK && myfeof(bzf->handle) &&
bzf->strm.avail_in == 0 && bzf->strm.avail_out > 0)
{ BZ_SETERR(BZ_UNEXPECTED_EOF); return 0; };
if (ret == BZ_STREAM_END)
{ BZ_SETERR(BZ_STREAM_END);
return len - bzf->strm.avail_out; };
if (bzf->strm.avail_out == 0)
{ BZ_SETERR(BZ_OK); return len; };
}
return 0; /*not reached*/
}
/*---------------------------------------------------*/
void BZ_API(BZ2_bzReadGetUnused)
( int* bzerror,
BZFILE* b,
void** unused,
int* nUnused )
{
bzFile* bzf = (bzFile*)b;
if (bzf == NULL)
{ BZ_SETERR(BZ_PARAM_ERROR); return; };
if (bzf->lastErr != BZ_STREAM_END)
{ BZ_SETERR(BZ_SEQUENCE_ERROR); return; };
if (unused == NULL || nUnused == NULL)
{ BZ_SETERR(BZ_PARAM_ERROR); return; };
BZ_SETERR(BZ_OK);
*nUnused = bzf->strm.avail_in;
*unused = bzf->strm.next_in;
}
#endif
/*---------------------------------------------------*/
/*--- Misc convenience stuff ---*/
/*---------------------------------------------------*/
/*---------------------------------------------------*/
int BZ_API(BZ2_bzBuffToBuffCompress)
( char* dest,
unsigned int* destLen,
char* source,
unsigned int sourceLen,
int blockSize100k,
int verbosity,
int workFactor )
{
bz_stream strm;
int ret;
if (dest == NULL || destLen == NULL ||
source == NULL ||
blockSize100k < 1 || blockSize100k > 9 ||
verbosity < 0 || verbosity > 4 ||
workFactor < 0 || workFactor > 250)
return BZ_PARAM_ERROR;
if (workFactor == 0) workFactor = 30;
strm.bzalloc = NULL;
strm.bzfree = NULL;
strm.opaque = NULL;
ret = BZ2_bzCompressInit ( &strm, blockSize100k,
verbosity, workFactor );
if (ret != BZ_OK) return ret;
strm.next_in = source;
strm.next_out = dest;
strm.avail_in = sourceLen;
strm.avail_out = *destLen;
ret = BZ2_bzCompress ( &strm, BZ_FINISH );
if (ret == BZ_FINISH_OK) goto output_overflow;
if (ret != BZ_STREAM_END) goto errhandler;
/* normal termination */
*destLen -= strm.avail_out;
BZ2_bzCompressEnd ( &strm );
return BZ_OK;
output_overflow:
BZ2_bzCompressEnd ( &strm );
return BZ_OUTBUFF_FULL;
errhandler:
BZ2_bzCompressEnd ( &strm );
return ret;
}
/*---------------------------------------------------*/
int BZ_API(BZ2_bzBuffToBuffDecompress)
( char* dest,
unsigned int* destLen,
char* source,
unsigned int sourceLen,
int small,
int verbosity )
{
bz_stream strm;
int ret;
if (dest == NULL || destLen == NULL ||
source == NULL ||
(small != 0 && small != 1) ||
verbosity < 0 || verbosity > 4)
return BZ_PARAM_ERROR;
strm.bzalloc = NULL;
strm.bzfree = NULL;
strm.opaque = NULL;
ret = BZ2_bzDecompressInit ( &strm, verbosity, small );
if (ret != BZ_OK) return ret;
strm.next_in = source;
strm.next_out = dest;
strm.avail_in = sourceLen;
strm.avail_out = *destLen;
ret = BZ2_bzDecompress ( &strm );
if (ret == BZ_OK) goto output_overflow_or_eof;
if (ret != BZ_STREAM_END) goto errhandler;
/* normal termination */
*destLen -= strm.avail_out;
BZ2_bzDecompressEnd ( &strm );
return BZ_OK;
output_overflow_or_eof:
if (strm.avail_out > 0) {
BZ2_bzDecompressEnd ( &strm );
return BZ_UNEXPECTED_EOF;
} else {
BZ2_bzDecompressEnd ( &strm );
return BZ_OUTBUFF_FULL;
};
errhandler:
BZ2_bzDecompressEnd ( &strm );
return ret;
}
/*---------------------------------------------------*/
/*--
Code contributed by Yoshioka Tsuneo (tsuneo@rr.iij4u.or.jp)
to support better zlib compatibility.
This code is not _officially_ part of libbzip2 (yet);
I haven't tested it, documented it, or considered the
threading-safeness of it.
If this code breaks, please contact both Yoshioka and me.
--*/
/*---------------------------------------------------*/
/*---------------------------------------------------*/
/*--
return version like "0.9.5d, 4-Sept-1999".
--*/
const char * BZ_API(BZ2_bzlibVersion)(void)
{
return BZ_VERSION;
}
#ifndef BZ_NO_STDIO
/*---------------------------------------------------*/
#if defined(_WIN32) || defined(OS2) || defined(MSDOS)
# include <fcntl.h>
# include <io.h>
# define SET_BINARY_MODE(file) setmode(fileno(file),O_BINARY)
#else
# define SET_BINARY_MODE(file)
#endif
static
BZFILE * bzopen_or_bzdopen
( const char *path, /* no use when bzdopen */
int fd, /* no use when bzdopen */
const char *mode,
int open_mode) /* bzopen: 0, bzdopen:1 */
{
int bzerr;
char unused[BZ_MAX_UNUSED];
int blockSize100k = 9;
int writing = 0;
char mode2[10] = "";
FILE *fp = NULL;
BZFILE *bzfp = NULL;
int verbosity = 0;
int workFactor = 30;
int smallMode = 0;
int nUnused = 0;
if (mode == NULL) return NULL;
while (*mode) {
switch (*mode) {
case 'r':
writing = 0; break;
case 'w':
writing = 1; break;
case 's':
smallMode = 1; break;
default:
if (isdigit((int)(*mode))) {
blockSize100k = *mode-BZ_HDR_0;
}
}
mode++;
}
strcat(mode2, writing ? "w" : "r" );
strcat(mode2,"b"); /* binary mode */
if (open_mode==0) {
if (path==NULL || strcmp(path,"")==0) {
fp = (writing ? stdout : stdin);
SET_BINARY_MODE(fp);
} else {
fp = fopen(path,mode2);
}
} else {
#ifdef BZ_STRICT_ANSI
fp = NULL;
#else
fp = fdopen(fd,mode2);
#endif
}
if (fp == NULL) return NULL;
if (writing) {
/* Guard against total chaos and anarchy -- JRS */
if (blockSize100k < 1) blockSize100k = 1;
if (blockSize100k > 9) blockSize100k = 9;
bzfp = BZ2_bzWriteOpen(&bzerr,fp,blockSize100k,
verbosity,workFactor);
} else {
bzfp = BZ2_bzReadOpen(&bzerr,fp,verbosity,smallMode,
unused,nUnused);
}
if (bzfp == NULL) {
if (fp != stdin && fp != stdout) fclose(fp);
return NULL;
}
return bzfp;
}
/*---------------------------------------------------*/
/*--
open file for read or write.
ex) bzopen("file","w9")
case path="" or NULL => use stdin or stdout.
--*/
BZFILE * BZ_API(BZ2_bzopen)
( const char *path,
const char *mode )
{
return bzopen_or_bzdopen(path,-1,mode,/*bzopen*/0);
}
/*---------------------------------------------------*/
BZFILE * BZ_API(BZ2_bzdopen)
( int fd,
const char *mode )
{
return bzopen_or_bzdopen(NULL,fd,mode,/*bzdopen*/1);
}
/*---------------------------------------------------*/
int BZ_API(BZ2_bzread) (BZFILE* b, void* buf, int len )
{
int bzerr, nread;
if (((bzFile*)b)->lastErr == BZ_STREAM_END) return 0;
nread = BZ2_bzRead(&bzerr,b,buf,len);
if (bzerr == BZ_OK || bzerr == BZ_STREAM_END) {
return nread;
} else {
return -1;
}
}
/*---------------------------------------------------*/
int BZ_API(BZ2_bzwrite) (BZFILE* b, void* buf, int len )
{
int bzerr;
BZ2_bzWrite(&bzerr,b,buf,len);
if(bzerr == BZ_OK){
return len;
}else{
return -1;
}
}
/*---------------------------------------------------*/
int BZ_API(BZ2_bzflush) (BZFILE *b)
{
/* do nothing now... */
return 0;
}
/*---------------------------------------------------*/
void BZ_API(BZ2_bzclose) (BZFILE* b)
{
int bzerr;
FILE *fp;
if (b==NULL) {return;}
fp = ((bzFile *)b)->handle;
if(((bzFile*)b)->writing){
BZ2_bzWriteClose(&bzerr,b,0,NULL,NULL);
if(bzerr != BZ_OK){
BZ2_bzWriteClose(NULL,b,1,NULL,NULL);
}
}else{
BZ2_bzReadClose(&bzerr,b);
}
if(fp!=stdin && fp!=stdout){
fclose(fp);
}
}
/*---------------------------------------------------*/
/*--
return last error code
--*/
static const char *bzerrorstrings[] = {
"OK"
,"SEQUENCE_ERROR"
,"PARAM_ERROR"
,"MEM_ERROR"
,"DATA_ERROR"
,"DATA_ERROR_MAGIC"
,"IO_ERROR"
,"UNEXPECTED_EOF"
,"OUTBUFF_FULL"
,"CONFIG_ERROR"
,"???" /* for future */
,"???" /* for future */
,"???" /* for future */
,"???" /* for future */
,"???" /* for future */
,"???" /* for future */
};
const char * BZ_API(BZ2_bzerror) (BZFILE *b, int *errnum)
{
int err = ((bzFile *)b)->lastErr;
if(err>0) err = 0;
*errnum = err;
return bzerrorstrings[err*-1];
}
#endif
/*-------------------------------------------------------------*/
/*--- end bzlib.c ---*/
/*-------------------------------------------------------------*/
| gpl-2.0 |
omonar/linux | drivers/md/persistent-data/dm-space-map-disk.c | 1180 | 6496 | /*
* Copyright (C) 2011 Red Hat, Inc.
*
* This file is released under the GPL.
*/
#include "dm-space-map-common.h"
#include "dm-space-map-disk.h"
#include "dm-space-map.h"
#include "dm-transaction-manager.h"
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/export.h>
#include <linux/device-mapper.h>
#define DM_MSG_PREFIX "space map disk"
/*----------------------------------------------------------------*/
/*
* Space map interface.
*/
struct sm_disk {
struct dm_space_map sm;
struct ll_disk ll;
struct ll_disk old_ll;
dm_block_t begin;
dm_block_t nr_allocated_this_transaction;
};
static void sm_disk_destroy(struct dm_space_map *sm)
{
struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
kfree(smd);
}
static int sm_disk_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
{
struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
return sm_ll_extend(&smd->ll, extra_blocks);
}
static int sm_disk_get_nr_blocks(struct dm_space_map *sm, dm_block_t *count)
{
struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
*count = smd->old_ll.nr_blocks;
return 0;
}
static int sm_disk_get_nr_free(struct dm_space_map *sm, dm_block_t *count)
{
struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
*count = (smd->old_ll.nr_blocks - smd->old_ll.nr_allocated) - smd->nr_allocated_this_transaction;
return 0;
}
static int sm_disk_get_count(struct dm_space_map *sm, dm_block_t b,
uint32_t *result)
{
struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
return sm_ll_lookup(&smd->ll, b, result);
}
static int sm_disk_count_is_more_than_one(struct dm_space_map *sm, dm_block_t b,
int *result)
{
int r;
uint32_t count;
r = sm_disk_get_count(sm, b, &count);
if (r)
return r;
*result = count > 1;
return 0;
}
static int sm_disk_set_count(struct dm_space_map *sm, dm_block_t b,
uint32_t count)
{
int r;
uint32_t old_count;
enum allocation_event ev;
struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
r = sm_ll_insert(&smd->ll, b, count, &ev);
if (!r) {
switch (ev) {
case SM_NONE:
break;
case SM_ALLOC:
/*
* This _must_ be free in the prior transaction
* otherwise we've lost atomicity.
*/
smd->nr_allocated_this_transaction++;
break;
case SM_FREE:
/*
* It's only free if it's also free in the last
* transaction.
*/
r = sm_ll_lookup(&smd->old_ll, b, &old_count);
if (r)
return r;
if (!old_count)
smd->nr_allocated_this_transaction--;
break;
}
}
return r;
}
static int sm_disk_inc_block(struct dm_space_map *sm, dm_block_t b)
{
int r;
enum allocation_event ev;
struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
r = sm_ll_inc(&smd->ll, b, &ev);
if (!r && (ev == SM_ALLOC))
/*
* This _must_ be free in the prior transaction
* otherwise we've lost atomicity.
*/
smd->nr_allocated_this_transaction++;
return r;
}
static int sm_disk_dec_block(struct dm_space_map *sm, dm_block_t b)
{
enum allocation_event ev;
struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
return sm_ll_dec(&smd->ll, b, &ev);
}
static int sm_disk_new_block(struct dm_space_map *sm, dm_block_t *b)
{
int r;
enum allocation_event ev;
struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
/* FIXME: we should loop round a couple of times */
r = sm_ll_find_free_block(&smd->old_ll, smd->begin, smd->old_ll.nr_blocks, b);
if (r)
return r;
smd->begin = *b + 1;
r = sm_ll_inc(&smd->ll, *b, &ev);
if (!r) {
BUG_ON(ev != SM_ALLOC);
smd->nr_allocated_this_transaction++;
}
return r;
}
static int sm_disk_commit(struct dm_space_map *sm)
{
int r;
dm_block_t nr_free;
struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
r = sm_disk_get_nr_free(sm, &nr_free);
if (r)
return r;
r = sm_ll_commit(&smd->ll);
if (r)
return r;
memcpy(&smd->old_ll, &smd->ll, sizeof(smd->old_ll));
smd->begin = 0;
smd->nr_allocated_this_transaction = 0;
r = sm_disk_get_nr_free(sm, &nr_free);
if (r)
return r;
return 0;
}
static int sm_disk_root_size(struct dm_space_map *sm, size_t *result)
{
*result = sizeof(struct disk_sm_root);
return 0;
}
static int sm_disk_copy_root(struct dm_space_map *sm, void *where_le, size_t max)
{
struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
struct disk_sm_root root_le;
root_le.nr_blocks = cpu_to_le64(smd->ll.nr_blocks);
root_le.nr_allocated = cpu_to_le64(smd->ll.nr_allocated);
root_le.bitmap_root = cpu_to_le64(smd->ll.bitmap_root);
root_le.ref_count_root = cpu_to_le64(smd->ll.ref_count_root);
if (max < sizeof(root_le))
return -ENOSPC;
memcpy(where_le, &root_le, sizeof(root_le));
return 0;
}
/*----------------------------------------------------------------*/
static struct dm_space_map ops = {
.destroy = sm_disk_destroy,
.extend = sm_disk_extend,
.get_nr_blocks = sm_disk_get_nr_blocks,
.get_nr_free = sm_disk_get_nr_free,
.get_count = sm_disk_get_count,
.count_is_more_than_one = sm_disk_count_is_more_than_one,
.set_count = sm_disk_set_count,
.inc_block = sm_disk_inc_block,
.dec_block = sm_disk_dec_block,
.new_block = sm_disk_new_block,
.commit = sm_disk_commit,
.root_size = sm_disk_root_size,
.copy_root = sm_disk_copy_root,
.register_threshold_callback = NULL
};
struct dm_space_map *dm_sm_disk_create(struct dm_transaction_manager *tm,
dm_block_t nr_blocks)
{
int r;
struct sm_disk *smd;
smd = kmalloc(sizeof(*smd), GFP_KERNEL);
if (!smd)
return ERR_PTR(-ENOMEM);
smd->begin = 0;
smd->nr_allocated_this_transaction = 0;
memcpy(&smd->sm, &ops, sizeof(smd->sm));
r = sm_ll_new_disk(&smd->ll, tm);
if (r)
goto bad;
r = sm_ll_extend(&smd->ll, nr_blocks);
if (r)
goto bad;
r = sm_disk_commit(&smd->sm);
if (r)
goto bad;
return &smd->sm;
bad:
kfree(smd);
return ERR_PTR(r);
}
EXPORT_SYMBOL_GPL(dm_sm_disk_create);
struct dm_space_map *dm_sm_disk_open(struct dm_transaction_manager *tm,
void *root_le, size_t len)
{
int r;
struct sm_disk *smd;
smd = kmalloc(sizeof(*smd), GFP_KERNEL);
if (!smd)
return ERR_PTR(-ENOMEM);
smd->begin = 0;
smd->nr_allocated_this_transaction = 0;
memcpy(&smd->sm, &ops, sizeof(smd->sm));
r = sm_ll_open_disk(&smd->ll, tm, root_le, len);
if (r)
goto bad;
r = sm_disk_commit(&smd->sm);
if (r)
goto bad;
return &smd->sm;
bad:
kfree(smd);
return ERR_PTR(r);
}
EXPORT_SYMBOL_GPL(dm_sm_disk_open);
/*----------------------------------------------------------------*/
| gpl-2.0 |
yuzaipiaofei/android_kernel_cyanogen_msm8916 | arch/ia64/kernel/pci-dma.c | 2204 | 2722 | /*
* Dynamic DMA mapping support.
*/
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/pci.h>
#include <linux/module.h>
#include <linux/dmar.h>
#include <asm/iommu.h>
#include <asm/machvec.h>
#include <linux/dma-mapping.h>
#ifdef CONFIG_INTEL_IOMMU
#include <linux/kernel.h>
#include <asm/page.h>
dma_addr_t bad_dma_address __read_mostly;
EXPORT_SYMBOL(bad_dma_address);
static int iommu_sac_force __read_mostly;
int no_iommu __read_mostly;
#ifdef CONFIG_IOMMU_DEBUG
int force_iommu __read_mostly = 1;
#else
int force_iommu __read_mostly;
#endif
int iommu_pass_through;
/* Dummy device used for NULL arguments (normally ISA). Better would
be probably a smaller DMA mask, but this is bug-to-bug compatible
to i386. */
struct device fallback_dev = {
.init_name = "fallback device",
.coherent_dma_mask = DMA_BIT_MASK(32),
.dma_mask = &fallback_dev.coherent_dma_mask,
};
extern struct dma_map_ops intel_dma_ops;
static int __init pci_iommu_init(void)
{
if (iommu_detected)
intel_iommu_init();
return 0;
}
/* Must execute after PCI subsystem */
fs_initcall(pci_iommu_init);
void pci_iommu_shutdown(void)
{
return;
}
void __init
iommu_dma_init(void)
{
return;
}
int iommu_dma_supported(struct device *dev, u64 mask)
{
/* Copied from i386. Doesn't make much sense, because it will
only work for pci_alloc_coherent.
The caller just has to use GFP_DMA in this case. */
if (mask < DMA_BIT_MASK(24))
return 0;
/* Tell the device to use SAC when IOMMU force is on. This
allows the driver to use cheaper accesses in some cases.
Problem with this is that if we overflow the IOMMU area and
return DAC as fallback address the device may not handle it
correctly.
As a special case some controllers have a 39bit address
mode that is as efficient as 32bit (aic79xx). Don't force
SAC for these. Assume all masks <= 40 bits are of this
type. Normally this doesn't make any difference, but gives
more gentle handling of IOMMU overflow. */
if (iommu_sac_force && (mask >= DMA_BIT_MASK(40))) {
dev_info(dev, "Force SAC with mask %llx\n", mask);
return 0;
}
return 1;
}
EXPORT_SYMBOL(iommu_dma_supported);
void __init pci_iommu_alloc(void)
{
dma_ops = &intel_dma_ops;
dma_ops->sync_single_for_cpu = machvec_dma_sync_single;
dma_ops->sync_sg_for_cpu = machvec_dma_sync_sg;
dma_ops->sync_single_for_device = machvec_dma_sync_single;
dma_ops->sync_sg_for_device = machvec_dma_sync_sg;
dma_ops->dma_supported = iommu_dma_supported;
/*
* The order of these functions is important for
* fall-back/fail-over reasons
*/
detect_intel_iommu();
#ifdef CONFIG_SWIOTLB
pci_swiotlb_init();
#endif
}
#endif
| gpl-2.0 |
Abhinav1997/kernel-1 | drivers/net/wireless/b43legacy/main.c | 2716 | 111221 | /*
*
* Broadcom B43legacy wireless driver
*
* Copyright (c) 2005 Martin Langer <martin-langer@gmx.de>
* Copyright (c) 2005-2008 Stefano Brivio <stefano.brivio@polimi.it>
* Copyright (c) 2005, 2006 Michael Buesch <m@bues.ch>
* Copyright (c) 2005 Danny van Dyk <kugelfang@gentoo.org>
* Copyright (c) 2005 Andreas Jaggi <andreas.jaggi@waterwave.ch>
* Copyright (c) 2007 Larry Finger <Larry.Finger@lwfinger.net>
*
* Some parts of the code in this file are derived from the ipw2200
* driver Copyright(c) 2003 - 2004 Intel Corporation.
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; see the file COPYING. If not, write to
* the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
* Boston, MA 02110-1301, USA.
*
*/
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/if_arp.h>
#include <linux/etherdevice.h>
#include <linux/firmware.h>
#include <linux/workqueue.h>
#include <linux/sched.h>
#include <linux/skbuff.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include <net/dst.h>
#include <asm/unaligned.h>
#include "b43legacy.h"
#include "main.h"
#include "debugfs.h"
#include "phy.h"
#include "dma.h"
#include "pio.h"
#include "sysfs.h"
#include "xmit.h"
#include "radio.h"
MODULE_DESCRIPTION("Broadcom B43legacy wireless driver");
MODULE_AUTHOR("Martin Langer");
MODULE_AUTHOR("Stefano Brivio");
MODULE_AUTHOR("Michael Buesch");
MODULE_LICENSE("GPL");
MODULE_FIRMWARE("b43legacy/ucode2.fw");
MODULE_FIRMWARE("b43legacy/ucode4.fw");
#if defined(CONFIG_B43LEGACY_DMA) && defined(CONFIG_B43LEGACY_PIO)
static int modparam_pio;
module_param_named(pio, modparam_pio, int, 0444);
MODULE_PARM_DESC(pio, "enable(1) / disable(0) PIO mode");
#elif defined(CONFIG_B43LEGACY_DMA)
# define modparam_pio 0
#elif defined(CONFIG_B43LEGACY_PIO)
# define modparam_pio 1
#endif
static int modparam_bad_frames_preempt;
module_param_named(bad_frames_preempt, modparam_bad_frames_preempt, int, 0444);
MODULE_PARM_DESC(bad_frames_preempt, "enable(1) / disable(0) Bad Frames"
" Preemption");
static char modparam_fwpostfix[16];
module_param_string(fwpostfix, modparam_fwpostfix, 16, 0444);
MODULE_PARM_DESC(fwpostfix, "Postfix for the firmware files to load.");
/* The following table supports BCM4301, BCM4303 and BCM4306/2 devices. */
static const struct ssb_device_id b43legacy_ssb_tbl[] = {
SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 2),
SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 4),
SSB_DEVTABLE_END
};
MODULE_DEVICE_TABLE(ssb, b43legacy_ssb_tbl);
/* Channel and ratetables are shared for all devices.
* They can't be const, because ieee80211 puts some precalculated
* data in there. This data is the same for all devices, so we don't
* get concurrency issues */
#define RATETAB_ENT(_rateid, _flags) \
{ \
.bitrate = B43legacy_RATE_TO_100KBPS(_rateid), \
.hw_value = (_rateid), \
.flags = (_flags), \
}
/*
* NOTE: When changing this, sync with xmit.c's
* b43legacy_plcp_get_bitrate_idx_* functions!
*/
static struct ieee80211_rate __b43legacy_ratetable[] = {
RATETAB_ENT(B43legacy_CCK_RATE_1MB, 0),
RATETAB_ENT(B43legacy_CCK_RATE_2MB, IEEE80211_RATE_SHORT_PREAMBLE),
RATETAB_ENT(B43legacy_CCK_RATE_5MB, IEEE80211_RATE_SHORT_PREAMBLE),
RATETAB_ENT(B43legacy_CCK_RATE_11MB, IEEE80211_RATE_SHORT_PREAMBLE),
RATETAB_ENT(B43legacy_OFDM_RATE_6MB, 0),
RATETAB_ENT(B43legacy_OFDM_RATE_9MB, 0),
RATETAB_ENT(B43legacy_OFDM_RATE_12MB, 0),
RATETAB_ENT(B43legacy_OFDM_RATE_18MB, 0),
RATETAB_ENT(B43legacy_OFDM_RATE_24MB, 0),
RATETAB_ENT(B43legacy_OFDM_RATE_36MB, 0),
RATETAB_ENT(B43legacy_OFDM_RATE_48MB, 0),
RATETAB_ENT(B43legacy_OFDM_RATE_54MB, 0),
};
#define b43legacy_b_ratetable (__b43legacy_ratetable + 0)
#define b43legacy_b_ratetable_size 4
#define b43legacy_g_ratetable (__b43legacy_ratetable + 0)
#define b43legacy_g_ratetable_size 12
#define CHANTAB_ENT(_chanid, _freq) \
{ \
.center_freq = (_freq), \
.hw_value = (_chanid), \
}
static struct ieee80211_channel b43legacy_bg_chantable[] = {
CHANTAB_ENT(1, 2412),
CHANTAB_ENT(2, 2417),
CHANTAB_ENT(3, 2422),
CHANTAB_ENT(4, 2427),
CHANTAB_ENT(5, 2432),
CHANTAB_ENT(6, 2437),
CHANTAB_ENT(7, 2442),
CHANTAB_ENT(8, 2447),
CHANTAB_ENT(9, 2452),
CHANTAB_ENT(10, 2457),
CHANTAB_ENT(11, 2462),
CHANTAB_ENT(12, 2467),
CHANTAB_ENT(13, 2472),
CHANTAB_ENT(14, 2484),
};
static struct ieee80211_supported_band b43legacy_band_2GHz_BPHY = {
.channels = b43legacy_bg_chantable,
.n_channels = ARRAY_SIZE(b43legacy_bg_chantable),
.bitrates = b43legacy_b_ratetable,
.n_bitrates = b43legacy_b_ratetable_size,
};
static struct ieee80211_supported_band b43legacy_band_2GHz_GPHY = {
.channels = b43legacy_bg_chantable,
.n_channels = ARRAY_SIZE(b43legacy_bg_chantable),
.bitrates = b43legacy_g_ratetable,
.n_bitrates = b43legacy_g_ratetable_size,
};
static void b43legacy_wireless_core_exit(struct b43legacy_wldev *dev);
static int b43legacy_wireless_core_init(struct b43legacy_wldev *dev);
static void b43legacy_wireless_core_stop(struct b43legacy_wldev *dev);
static int b43legacy_wireless_core_start(struct b43legacy_wldev *dev);
static int b43legacy_ratelimit(struct b43legacy_wl *wl)
{
if (!wl || !wl->current_dev)
return 1;
if (b43legacy_status(wl->current_dev) < B43legacy_STAT_STARTED)
return 1;
/* We are up and running.
* Ratelimit the messages to avoid DoS over the net. */
return net_ratelimit();
}
void b43legacyinfo(struct b43legacy_wl *wl, const char *fmt, ...)
{
struct va_format vaf;
va_list args;
if (!b43legacy_ratelimit(wl))
return;
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
printk(KERN_INFO "b43legacy-%s: %pV",
(wl && wl->hw) ? wiphy_name(wl->hw->wiphy) : "wlan", &vaf);
va_end(args);
}
void b43legacyerr(struct b43legacy_wl *wl, const char *fmt, ...)
{
struct va_format vaf;
va_list args;
if (!b43legacy_ratelimit(wl))
return;
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
printk(KERN_ERR "b43legacy-%s ERROR: %pV",
(wl && wl->hw) ? wiphy_name(wl->hw->wiphy) : "wlan", &vaf);
va_end(args);
}
void b43legacywarn(struct b43legacy_wl *wl, const char *fmt, ...)
{
struct va_format vaf;
va_list args;
if (!b43legacy_ratelimit(wl))
return;
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
printk(KERN_WARNING "b43legacy-%s warning: %pV",
(wl && wl->hw) ? wiphy_name(wl->hw->wiphy) : "wlan", &vaf);
va_end(args);
}
#if B43legacy_DEBUG
void b43legacydbg(struct b43legacy_wl *wl, const char *fmt, ...)
{
struct va_format vaf;
va_list args;
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
printk(KERN_DEBUG "b43legacy-%s debug: %pV",
(wl && wl->hw) ? wiphy_name(wl->hw->wiphy) : "wlan", &vaf);
va_end(args);
}
#endif /* DEBUG */
static void b43legacy_ram_write(struct b43legacy_wldev *dev, u16 offset,
u32 val)
{
u32 status;
B43legacy_WARN_ON(offset % 4 != 0);
status = b43legacy_read32(dev, B43legacy_MMIO_MACCTL);
if (status & B43legacy_MACCTL_BE)
val = swab32(val);
b43legacy_write32(dev, B43legacy_MMIO_RAM_CONTROL, offset);
mmiowb();
b43legacy_write32(dev, B43legacy_MMIO_RAM_DATA, val);
}
static inline
void b43legacy_shm_control_word(struct b43legacy_wldev *dev,
u16 routing, u16 offset)
{
u32 control;
/* "offset" is the WORD offset. */
control = routing;
control <<= 16;
control |= offset;
b43legacy_write32(dev, B43legacy_MMIO_SHM_CONTROL, control);
}
u32 b43legacy_shm_read32(struct b43legacy_wldev *dev,
u16 routing, u16 offset)
{
u32 ret;
if (routing == B43legacy_SHM_SHARED) {
B43legacy_WARN_ON((offset & 0x0001) != 0);
if (offset & 0x0003) {
/* Unaligned access */
b43legacy_shm_control_word(dev, routing, offset >> 2);
ret = b43legacy_read16(dev,
B43legacy_MMIO_SHM_DATA_UNALIGNED);
ret <<= 16;
b43legacy_shm_control_word(dev, routing,
(offset >> 2) + 1);
ret |= b43legacy_read16(dev, B43legacy_MMIO_SHM_DATA);
return ret;
}
offset >>= 2;
}
b43legacy_shm_control_word(dev, routing, offset);
ret = b43legacy_read32(dev, B43legacy_MMIO_SHM_DATA);
return ret;
}
u16 b43legacy_shm_read16(struct b43legacy_wldev *dev,
u16 routing, u16 offset)
{
u16 ret;
if (routing == B43legacy_SHM_SHARED) {
B43legacy_WARN_ON((offset & 0x0001) != 0);
if (offset & 0x0003) {
/* Unaligned access */
b43legacy_shm_control_word(dev, routing, offset >> 2);
ret = b43legacy_read16(dev,
B43legacy_MMIO_SHM_DATA_UNALIGNED);
return ret;
}
offset >>= 2;
}
b43legacy_shm_control_word(dev, routing, offset);
ret = b43legacy_read16(dev, B43legacy_MMIO_SHM_DATA);
return ret;
}
void b43legacy_shm_write32(struct b43legacy_wldev *dev,
u16 routing, u16 offset,
u32 value)
{
if (routing == B43legacy_SHM_SHARED) {
B43legacy_WARN_ON((offset & 0x0001) != 0);
if (offset & 0x0003) {
/* Unaligned access */
b43legacy_shm_control_word(dev, routing, offset >> 2);
mmiowb();
b43legacy_write16(dev,
B43legacy_MMIO_SHM_DATA_UNALIGNED,
(value >> 16) & 0xffff);
mmiowb();
b43legacy_shm_control_word(dev, routing,
(offset >> 2) + 1);
mmiowb();
b43legacy_write16(dev, B43legacy_MMIO_SHM_DATA,
value & 0xffff);
return;
}
offset >>= 2;
}
b43legacy_shm_control_word(dev, routing, offset);
mmiowb();
b43legacy_write32(dev, B43legacy_MMIO_SHM_DATA, value);
}
void b43legacy_shm_write16(struct b43legacy_wldev *dev, u16 routing, u16 offset,
u16 value)
{
if (routing == B43legacy_SHM_SHARED) {
B43legacy_WARN_ON((offset & 0x0001) != 0);
if (offset & 0x0003) {
/* Unaligned access */
b43legacy_shm_control_word(dev, routing, offset >> 2);
mmiowb();
b43legacy_write16(dev,
B43legacy_MMIO_SHM_DATA_UNALIGNED,
value);
return;
}
offset >>= 2;
}
b43legacy_shm_control_word(dev, routing, offset);
mmiowb();
b43legacy_write16(dev, B43legacy_MMIO_SHM_DATA, value);
}
/* Read HostFlags */
u32 b43legacy_hf_read(struct b43legacy_wldev *dev)
{
u32 ret;
ret = b43legacy_shm_read16(dev, B43legacy_SHM_SHARED,
B43legacy_SHM_SH_HOSTFHI);
ret <<= 16;
ret |= b43legacy_shm_read16(dev, B43legacy_SHM_SHARED,
B43legacy_SHM_SH_HOSTFLO);
return ret;
}
/* Write HostFlags */
void b43legacy_hf_write(struct b43legacy_wldev *dev, u32 value)
{
b43legacy_shm_write16(dev, B43legacy_SHM_SHARED,
B43legacy_SHM_SH_HOSTFLO,
(value & 0x0000FFFF));
b43legacy_shm_write16(dev, B43legacy_SHM_SHARED,
B43legacy_SHM_SH_HOSTFHI,
((value & 0xFFFF0000) >> 16));
}
void b43legacy_tsf_read(struct b43legacy_wldev *dev, u64 *tsf)
{
/* We need to be careful. As we read the TSF from multiple
* registers, we should take care of register overflows.
* In theory, the whole tsf read process should be atomic.
* We try to be atomic here, by restaring the read process,
* if any of the high registers changed (overflew).
*/
if (dev->dev->id.revision >= 3) {
u32 low;
u32 high;
u32 high2;
do {
high = b43legacy_read32(dev,
B43legacy_MMIO_REV3PLUS_TSF_HIGH);
low = b43legacy_read32(dev,
B43legacy_MMIO_REV3PLUS_TSF_LOW);
high2 = b43legacy_read32(dev,
B43legacy_MMIO_REV3PLUS_TSF_HIGH);
} while (unlikely(high != high2));
*tsf = high;
*tsf <<= 32;
*tsf |= low;
} else {
u64 tmp;
u16 v0;
u16 v1;
u16 v2;
u16 v3;
u16 test1;
u16 test2;
u16 test3;
do {
v3 = b43legacy_read16(dev, B43legacy_MMIO_TSF_3);
v2 = b43legacy_read16(dev, B43legacy_MMIO_TSF_2);
v1 = b43legacy_read16(dev, B43legacy_MMIO_TSF_1);
v0 = b43legacy_read16(dev, B43legacy_MMIO_TSF_0);
test3 = b43legacy_read16(dev, B43legacy_MMIO_TSF_3);
test2 = b43legacy_read16(dev, B43legacy_MMIO_TSF_2);
test1 = b43legacy_read16(dev, B43legacy_MMIO_TSF_1);
} while (v3 != test3 || v2 != test2 || v1 != test1);
*tsf = v3;
*tsf <<= 48;
tmp = v2;
tmp <<= 32;
*tsf |= tmp;
tmp = v1;
tmp <<= 16;
*tsf |= tmp;
*tsf |= v0;
}
}
static void b43legacy_time_lock(struct b43legacy_wldev *dev)
{
u32 status;
status = b43legacy_read32(dev, B43legacy_MMIO_MACCTL);
status |= B43legacy_MACCTL_TBTTHOLD;
b43legacy_write32(dev, B43legacy_MMIO_MACCTL, status);
mmiowb();
}
static void b43legacy_time_unlock(struct b43legacy_wldev *dev)
{
u32 status;
status = b43legacy_read32(dev, B43legacy_MMIO_MACCTL);
status &= ~B43legacy_MACCTL_TBTTHOLD;
b43legacy_write32(dev, B43legacy_MMIO_MACCTL, status);
}
static void b43legacy_tsf_write_locked(struct b43legacy_wldev *dev, u64 tsf)
{
/* Be careful with the in-progress timer.
* First zero out the low register, so we have a full
* register-overflow duration to complete the operation.
*/
if (dev->dev->id.revision >= 3) {
u32 lo = (tsf & 0x00000000FFFFFFFFULL);
u32 hi = (tsf & 0xFFFFFFFF00000000ULL) >> 32;
b43legacy_write32(dev, B43legacy_MMIO_REV3PLUS_TSF_LOW, 0);
mmiowb();
b43legacy_write32(dev, B43legacy_MMIO_REV3PLUS_TSF_HIGH,
hi);
mmiowb();
b43legacy_write32(dev, B43legacy_MMIO_REV3PLUS_TSF_LOW,
lo);
} else {
u16 v0 = (tsf & 0x000000000000FFFFULL);
u16 v1 = (tsf & 0x00000000FFFF0000ULL) >> 16;
u16 v2 = (tsf & 0x0000FFFF00000000ULL) >> 32;
u16 v3 = (tsf & 0xFFFF000000000000ULL) >> 48;
b43legacy_write16(dev, B43legacy_MMIO_TSF_0, 0);
mmiowb();
b43legacy_write16(dev, B43legacy_MMIO_TSF_3, v3);
mmiowb();
b43legacy_write16(dev, B43legacy_MMIO_TSF_2, v2);
mmiowb();
b43legacy_write16(dev, B43legacy_MMIO_TSF_1, v1);
mmiowb();
b43legacy_write16(dev, B43legacy_MMIO_TSF_0, v0);
}
}
void b43legacy_tsf_write(struct b43legacy_wldev *dev, u64 tsf)
{
b43legacy_time_lock(dev);
b43legacy_tsf_write_locked(dev, tsf);
b43legacy_time_unlock(dev);
}
static
void b43legacy_macfilter_set(struct b43legacy_wldev *dev,
u16 offset, const u8 *mac)
{
static const u8 zero_addr[ETH_ALEN] = { 0 };
u16 data;
if (!mac)
mac = zero_addr;
offset |= 0x0020;
b43legacy_write16(dev, B43legacy_MMIO_MACFILTER_CONTROL, offset);
data = mac[0];
data |= mac[1] << 8;
b43legacy_write16(dev, B43legacy_MMIO_MACFILTER_DATA, data);
data = mac[2];
data |= mac[3] << 8;
b43legacy_write16(dev, B43legacy_MMIO_MACFILTER_DATA, data);
data = mac[4];
data |= mac[5] << 8;
b43legacy_write16(dev, B43legacy_MMIO_MACFILTER_DATA, data);
}
static void b43legacy_write_mac_bssid_templates(struct b43legacy_wldev *dev)
{
static const u8 zero_addr[ETH_ALEN] = { 0 };
const u8 *mac = dev->wl->mac_addr;
const u8 *bssid = dev->wl->bssid;
u8 mac_bssid[ETH_ALEN * 2];
int i;
u32 tmp;
if (!bssid)
bssid = zero_addr;
if (!mac)
mac = zero_addr;
b43legacy_macfilter_set(dev, B43legacy_MACFILTER_BSSID, bssid);
memcpy(mac_bssid, mac, ETH_ALEN);
memcpy(mac_bssid + ETH_ALEN, bssid, ETH_ALEN);
/* Write our MAC address and BSSID to template ram */
for (i = 0; i < ARRAY_SIZE(mac_bssid); i += sizeof(u32)) {
tmp = (u32)(mac_bssid[i + 0]);
tmp |= (u32)(mac_bssid[i + 1]) << 8;
tmp |= (u32)(mac_bssid[i + 2]) << 16;
tmp |= (u32)(mac_bssid[i + 3]) << 24;
b43legacy_ram_write(dev, 0x20 + i, tmp);
b43legacy_ram_write(dev, 0x78 + i, tmp);
b43legacy_ram_write(dev, 0x478 + i, tmp);
}
}
static void b43legacy_upload_card_macaddress(struct b43legacy_wldev *dev)
{
b43legacy_write_mac_bssid_templates(dev);
b43legacy_macfilter_set(dev, B43legacy_MACFILTER_SELF,
dev->wl->mac_addr);
}
static void b43legacy_set_slot_time(struct b43legacy_wldev *dev,
u16 slot_time)
{
/* slot_time is in usec. */
if (dev->phy.type != B43legacy_PHYTYPE_G)
return;
b43legacy_write16(dev, 0x684, 510 + slot_time);
b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, 0x0010,
slot_time);
}
static void b43legacy_short_slot_timing_enable(struct b43legacy_wldev *dev)
{
b43legacy_set_slot_time(dev, 9);
}
static void b43legacy_short_slot_timing_disable(struct b43legacy_wldev *dev)
{
b43legacy_set_slot_time(dev, 20);
}
/* Synchronize IRQ top- and bottom-half.
* IRQs must be masked before calling this.
* This must not be called with the irq_lock held.
*/
static void b43legacy_synchronize_irq(struct b43legacy_wldev *dev)
{
synchronize_irq(dev->dev->irq);
tasklet_kill(&dev->isr_tasklet);
}
/* DummyTransmission function, as documented on
* http://bcm-specs.sipsolutions.net/DummyTransmission
*/
void b43legacy_dummy_transmission(struct b43legacy_wldev *dev)
{
struct b43legacy_phy *phy = &dev->phy;
unsigned int i;
unsigned int max_loop;
u16 value;
u32 buffer[5] = {
0x00000000,
0x00D40000,
0x00000000,
0x01000000,
0x00000000,
};
switch (phy->type) {
case B43legacy_PHYTYPE_B:
case B43legacy_PHYTYPE_G:
max_loop = 0xFA;
buffer[0] = 0x000B846E;
break;
default:
B43legacy_BUG_ON(1);
return;
}
for (i = 0; i < 5; i++)
b43legacy_ram_write(dev, i * 4, buffer[i]);
/* dummy read follows */
b43legacy_read32(dev, B43legacy_MMIO_MACCTL);
b43legacy_write16(dev, 0x0568, 0x0000);
b43legacy_write16(dev, 0x07C0, 0x0000);
b43legacy_write16(dev, 0x050C, 0x0000);
b43legacy_write16(dev, 0x0508, 0x0000);
b43legacy_write16(dev, 0x050A, 0x0000);
b43legacy_write16(dev, 0x054C, 0x0000);
b43legacy_write16(dev, 0x056A, 0x0014);
b43legacy_write16(dev, 0x0568, 0x0826);
b43legacy_write16(dev, 0x0500, 0x0000);
b43legacy_write16(dev, 0x0502, 0x0030);
if (phy->radio_ver == 0x2050 && phy->radio_rev <= 0x5)
b43legacy_radio_write16(dev, 0x0051, 0x0017);
for (i = 0x00; i < max_loop; i++) {
value = b43legacy_read16(dev, 0x050E);
if (value & 0x0080)
break;
udelay(10);
}
for (i = 0x00; i < 0x0A; i++) {
value = b43legacy_read16(dev, 0x050E);
if (value & 0x0400)
break;
udelay(10);
}
for (i = 0x00; i < 0x0A; i++) {
value = b43legacy_read16(dev, 0x0690);
if (!(value & 0x0100))
break;
udelay(10);
}
if (phy->radio_ver == 0x2050 && phy->radio_rev <= 0x5)
b43legacy_radio_write16(dev, 0x0051, 0x0037);
}
/* Turn the Analog ON/OFF */
static void b43legacy_switch_analog(struct b43legacy_wldev *dev, int on)
{
b43legacy_write16(dev, B43legacy_MMIO_PHY0, on ? 0 : 0xF4);
}
void b43legacy_wireless_core_reset(struct b43legacy_wldev *dev, u32 flags)
{
u32 tmslow;
u32 macctl;
flags |= B43legacy_TMSLOW_PHYCLKEN;
flags |= B43legacy_TMSLOW_PHYRESET;
ssb_device_enable(dev->dev, flags);
msleep(2); /* Wait for the PLL to turn on. */
/* Now take the PHY out of Reset again */
tmslow = ssb_read32(dev->dev, SSB_TMSLOW);
tmslow |= SSB_TMSLOW_FGC;
tmslow &= ~B43legacy_TMSLOW_PHYRESET;
ssb_write32(dev->dev, SSB_TMSLOW, tmslow);
ssb_read32(dev->dev, SSB_TMSLOW); /* flush */
msleep(1);
tmslow &= ~SSB_TMSLOW_FGC;
ssb_write32(dev->dev, SSB_TMSLOW, tmslow);
ssb_read32(dev->dev, SSB_TMSLOW); /* flush */
msleep(1);
/* Turn Analog ON */
b43legacy_switch_analog(dev, 1);
macctl = b43legacy_read32(dev, B43legacy_MMIO_MACCTL);
macctl &= ~B43legacy_MACCTL_GMODE;
if (flags & B43legacy_TMSLOW_GMODE) {
macctl |= B43legacy_MACCTL_GMODE;
dev->phy.gmode = true;
} else
dev->phy.gmode = false;
macctl |= B43legacy_MACCTL_IHR_ENABLED;
b43legacy_write32(dev, B43legacy_MMIO_MACCTL, macctl);
}
static void handle_irq_transmit_status(struct b43legacy_wldev *dev)
{
u32 v0;
u32 v1;
u16 tmp;
struct b43legacy_txstatus stat;
while (1) {
v0 = b43legacy_read32(dev, B43legacy_MMIO_XMITSTAT_0);
if (!(v0 & 0x00000001))
break;
v1 = b43legacy_read32(dev, B43legacy_MMIO_XMITSTAT_1);
stat.cookie = (v0 >> 16);
stat.seq = (v1 & 0x0000FFFF);
stat.phy_stat = ((v1 & 0x00FF0000) >> 16);
tmp = (v0 & 0x0000FFFF);
stat.frame_count = ((tmp & 0xF000) >> 12);
stat.rts_count = ((tmp & 0x0F00) >> 8);
stat.supp_reason = ((tmp & 0x001C) >> 2);
stat.pm_indicated = !!(tmp & 0x0080);
stat.intermediate = !!(tmp & 0x0040);
stat.for_ampdu = !!(tmp & 0x0020);
stat.acked = !!(tmp & 0x0002);
b43legacy_handle_txstatus(dev, &stat);
}
}
static void drain_txstatus_queue(struct b43legacy_wldev *dev)
{
u32 dummy;
if (dev->dev->id.revision < 5)
return;
/* Read all entries from the microcode TXstatus FIFO
* and throw them away.
*/
while (1) {
dummy = b43legacy_read32(dev, B43legacy_MMIO_XMITSTAT_0);
if (!(dummy & 0x00000001))
break;
dummy = b43legacy_read32(dev, B43legacy_MMIO_XMITSTAT_1);
}
}
static u32 b43legacy_jssi_read(struct b43legacy_wldev *dev)
{
u32 val = 0;
val = b43legacy_shm_read16(dev, B43legacy_SHM_SHARED, 0x40A);
val <<= 16;
val |= b43legacy_shm_read16(dev, B43legacy_SHM_SHARED, 0x408);
return val;
}
static void b43legacy_jssi_write(struct b43legacy_wldev *dev, u32 jssi)
{
b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, 0x408,
(jssi & 0x0000FFFF));
b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, 0x40A,
(jssi & 0xFFFF0000) >> 16);
}
static void b43legacy_generate_noise_sample(struct b43legacy_wldev *dev)
{
b43legacy_jssi_write(dev, 0x7F7F7F7F);
b43legacy_write32(dev, B43legacy_MMIO_MACCMD,
b43legacy_read32(dev, B43legacy_MMIO_MACCMD)
| B43legacy_MACCMD_BGNOISE);
B43legacy_WARN_ON(dev->noisecalc.channel_at_start !=
dev->phy.channel);
}
static void b43legacy_calculate_link_quality(struct b43legacy_wldev *dev)
{
/* Top half of Link Quality calculation. */
if (dev->noisecalc.calculation_running)
return;
dev->noisecalc.channel_at_start = dev->phy.channel;
dev->noisecalc.calculation_running = true;
dev->noisecalc.nr_samples = 0;
b43legacy_generate_noise_sample(dev);
}
static void handle_irq_noise(struct b43legacy_wldev *dev)
{
struct b43legacy_phy *phy = &dev->phy;
u16 tmp;
u8 noise[4];
u8 i;
u8 j;
s32 average;
/* Bottom half of Link Quality calculation. */
B43legacy_WARN_ON(!dev->noisecalc.calculation_running);
if (dev->noisecalc.channel_at_start != phy->channel)
goto drop_calculation;
*((__le32 *)noise) = cpu_to_le32(b43legacy_jssi_read(dev));
if (noise[0] == 0x7F || noise[1] == 0x7F ||
noise[2] == 0x7F || noise[3] == 0x7F)
goto generate_new;
/* Get the noise samples. */
B43legacy_WARN_ON(dev->noisecalc.nr_samples >= 8);
i = dev->noisecalc.nr_samples;
noise[0] = clamp_val(noise[0], 0, ARRAY_SIZE(phy->nrssi_lt) - 1);
noise[1] = clamp_val(noise[1], 0, ARRAY_SIZE(phy->nrssi_lt) - 1);
noise[2] = clamp_val(noise[2], 0, ARRAY_SIZE(phy->nrssi_lt) - 1);
noise[3] = clamp_val(noise[3], 0, ARRAY_SIZE(phy->nrssi_lt) - 1);
dev->noisecalc.samples[i][0] = phy->nrssi_lt[noise[0]];
dev->noisecalc.samples[i][1] = phy->nrssi_lt[noise[1]];
dev->noisecalc.samples[i][2] = phy->nrssi_lt[noise[2]];
dev->noisecalc.samples[i][3] = phy->nrssi_lt[noise[3]];
dev->noisecalc.nr_samples++;
if (dev->noisecalc.nr_samples == 8) {
/* Calculate the Link Quality by the noise samples. */
average = 0;
for (i = 0; i < 8; i++) {
for (j = 0; j < 4; j++)
average += dev->noisecalc.samples[i][j];
}
average /= (8 * 4);
average *= 125;
average += 64;
average /= 128;
tmp = b43legacy_shm_read16(dev, B43legacy_SHM_SHARED,
0x40C);
tmp = (tmp / 128) & 0x1F;
if (tmp >= 8)
average += 2;
else
average -= 25;
if (tmp == 8)
average -= 72;
else
average -= 48;
dev->stats.link_noise = average;
drop_calculation:
dev->noisecalc.calculation_running = false;
return;
}
generate_new:
b43legacy_generate_noise_sample(dev);
}
static void handle_irq_tbtt_indication(struct b43legacy_wldev *dev)
{
if (b43legacy_is_mode(dev->wl, NL80211_IFTYPE_AP)) {
/* TODO: PS TBTT */
} else {
if (1/*FIXME: the last PSpoll frame was sent successfully */)
b43legacy_power_saving_ctl_bits(dev, -1, -1);
}
if (b43legacy_is_mode(dev->wl, NL80211_IFTYPE_ADHOC))
dev->dfq_valid = true;
}
static void handle_irq_atim_end(struct b43legacy_wldev *dev)
{
if (dev->dfq_valid) {
b43legacy_write32(dev, B43legacy_MMIO_MACCMD,
b43legacy_read32(dev, B43legacy_MMIO_MACCMD)
| B43legacy_MACCMD_DFQ_VALID);
dev->dfq_valid = false;
}
}
static void handle_irq_pmq(struct b43legacy_wldev *dev)
{
u32 tmp;
/* TODO: AP mode. */
while (1) {
tmp = b43legacy_read32(dev, B43legacy_MMIO_PS_STATUS);
if (!(tmp & 0x00000008))
break;
}
/* 16bit write is odd, but correct. */
b43legacy_write16(dev, B43legacy_MMIO_PS_STATUS, 0x0002);
}
static void b43legacy_write_template_common(struct b43legacy_wldev *dev,
const u8 *data, u16 size,
u16 ram_offset,
u16 shm_size_offset, u8 rate)
{
u32 i;
u32 tmp;
struct b43legacy_plcp_hdr4 plcp;
plcp.data = 0;
b43legacy_generate_plcp_hdr(&plcp, size + FCS_LEN, rate);
b43legacy_ram_write(dev, ram_offset, le32_to_cpu(plcp.data));
ram_offset += sizeof(u32);
/* The PLCP is 6 bytes long, but we only wrote 4 bytes, yet.
* So leave the first two bytes of the next write blank.
*/
tmp = (u32)(data[0]) << 16;
tmp |= (u32)(data[1]) << 24;
b43legacy_ram_write(dev, ram_offset, tmp);
ram_offset += sizeof(u32);
for (i = 2; i < size; i += sizeof(u32)) {
tmp = (u32)(data[i + 0]);
if (i + 1 < size)
tmp |= (u32)(data[i + 1]) << 8;
if (i + 2 < size)
tmp |= (u32)(data[i + 2]) << 16;
if (i + 3 < size)
tmp |= (u32)(data[i + 3]) << 24;
b43legacy_ram_write(dev, ram_offset + i - 2, tmp);
}
b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, shm_size_offset,
size + sizeof(struct b43legacy_plcp_hdr6));
}
/* Convert a b43legacy antenna number value to the PHY TX control value. */
static u16 b43legacy_antenna_to_phyctl(int antenna)
{
switch (antenna) {
case B43legacy_ANTENNA0:
return B43legacy_TX4_PHY_ANT0;
case B43legacy_ANTENNA1:
return B43legacy_TX4_PHY_ANT1;
}
return B43legacy_TX4_PHY_ANTLAST;
}
static void b43legacy_write_beacon_template(struct b43legacy_wldev *dev,
u16 ram_offset,
u16 shm_size_offset)
{
unsigned int i, len, variable_len;
const struct ieee80211_mgmt *bcn;
const u8 *ie;
bool tim_found = false;
unsigned int rate;
u16 ctl;
int antenna;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(dev->wl->current_beacon);
bcn = (const struct ieee80211_mgmt *)(dev->wl->current_beacon->data);
len = min((size_t)dev->wl->current_beacon->len,
0x200 - sizeof(struct b43legacy_plcp_hdr6));
rate = ieee80211_get_tx_rate(dev->wl->hw, info)->hw_value;
b43legacy_write_template_common(dev, (const u8 *)bcn, len, ram_offset,
shm_size_offset, rate);
/* Write the PHY TX control parameters. */
antenna = B43legacy_ANTENNA_DEFAULT;
antenna = b43legacy_antenna_to_phyctl(antenna);
ctl = b43legacy_shm_read16(dev, B43legacy_SHM_SHARED,
B43legacy_SHM_SH_BEACPHYCTL);
/* We can't send beacons with short preamble. Would get PHY errors. */
ctl &= ~B43legacy_TX4_PHY_SHORTPRMBL;
ctl &= ~B43legacy_TX4_PHY_ANT;
ctl &= ~B43legacy_TX4_PHY_ENC;
ctl |= antenna;
ctl |= B43legacy_TX4_PHY_ENC_CCK;
b43legacy_shm_write16(dev, B43legacy_SHM_SHARED,
B43legacy_SHM_SH_BEACPHYCTL, ctl);
/* Find the position of the TIM and the DTIM_period value
* and write them to SHM. */
ie = bcn->u.beacon.variable;
variable_len = len - offsetof(struct ieee80211_mgmt, u.beacon.variable);
for (i = 0; i < variable_len - 2; ) {
uint8_t ie_id, ie_len;
ie_id = ie[i];
ie_len = ie[i + 1];
if (ie_id == 5) {
u16 tim_position;
u16 dtim_period;
/* This is the TIM Information Element */
/* Check whether the ie_len is in the beacon data range. */
if (variable_len < ie_len + 2 + i)
break;
/* A valid TIM is at least 4 bytes long. */
if (ie_len < 4)
break;
tim_found = true;
tim_position = sizeof(struct b43legacy_plcp_hdr6);
tim_position += offsetof(struct ieee80211_mgmt,
u.beacon.variable);
tim_position += i;
dtim_period = ie[i + 3];
b43legacy_shm_write16(dev, B43legacy_SHM_SHARED,
B43legacy_SHM_SH_TIMPOS, tim_position);
b43legacy_shm_write16(dev, B43legacy_SHM_SHARED,
B43legacy_SHM_SH_DTIMP, dtim_period);
break;
}
i += ie_len + 2;
}
if (!tim_found) {
b43legacywarn(dev->wl, "Did not find a valid TIM IE in the "
"beacon template packet. AP or IBSS operation "
"may be broken.\n");
} else
b43legacydbg(dev->wl, "Updated beacon template\n");
}
static void b43legacy_write_probe_resp_plcp(struct b43legacy_wldev *dev,
u16 shm_offset, u16 size,
struct ieee80211_rate *rate)
{
struct b43legacy_plcp_hdr4 plcp;
u32 tmp;
__le16 dur;
plcp.data = 0;
b43legacy_generate_plcp_hdr(&plcp, size + FCS_LEN, rate->hw_value);
dur = ieee80211_generic_frame_duration(dev->wl->hw,
dev->wl->vif,
size,
rate);
/* Write PLCP in two parts and timing for packet transfer */
tmp = le32_to_cpu(plcp.data);
b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, shm_offset,
tmp & 0xFFFF);
b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, shm_offset + 2,
tmp >> 16);
b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, shm_offset + 6,
le16_to_cpu(dur));
}
/* Instead of using custom probe response template, this function
* just patches custom beacon template by:
* 1) Changing packet type
* 2) Patching duration field
* 3) Stripping TIM
*/
static const u8 *b43legacy_generate_probe_resp(struct b43legacy_wldev *dev,
u16 *dest_size,
struct ieee80211_rate *rate)
{
const u8 *src_data;
u8 *dest_data;
u16 src_size, elem_size, src_pos, dest_pos;
__le16 dur;
struct ieee80211_hdr *hdr;
size_t ie_start;
src_size = dev->wl->current_beacon->len;
src_data = (const u8 *)dev->wl->current_beacon->data;
/* Get the start offset of the variable IEs in the packet. */
ie_start = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
B43legacy_WARN_ON(ie_start != offsetof(struct ieee80211_mgmt,
u.beacon.variable));
if (B43legacy_WARN_ON(src_size < ie_start))
return NULL;
dest_data = kmalloc(src_size, GFP_ATOMIC);
if (unlikely(!dest_data))
return NULL;
/* Copy the static data and all Information Elements, except the TIM. */
memcpy(dest_data, src_data, ie_start);
src_pos = ie_start;
dest_pos = ie_start;
for ( ; src_pos < src_size - 2; src_pos += elem_size) {
elem_size = src_data[src_pos + 1] + 2;
if (src_data[src_pos] == 5) {
/* This is the TIM. */
continue;
}
memcpy(dest_data + dest_pos, src_data + src_pos, elem_size);
dest_pos += elem_size;
}
*dest_size = dest_pos;
hdr = (struct ieee80211_hdr *)dest_data;
/* Set the frame control. */
hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
IEEE80211_STYPE_PROBE_RESP);
dur = ieee80211_generic_frame_duration(dev->wl->hw,
dev->wl->vif,
*dest_size,
rate);
hdr->duration_id = dur;
return dest_data;
}
static void b43legacy_write_probe_resp_template(struct b43legacy_wldev *dev,
u16 ram_offset,
u16 shm_size_offset,
struct ieee80211_rate *rate)
{
const u8 *probe_resp_data;
u16 size;
size = dev->wl->current_beacon->len;
probe_resp_data = b43legacy_generate_probe_resp(dev, &size, rate);
if (unlikely(!probe_resp_data))
return;
/* Looks like PLCP headers plus packet timings are stored for
* all possible basic rates
*/
b43legacy_write_probe_resp_plcp(dev, 0x31A, size,
&b43legacy_b_ratetable[0]);
b43legacy_write_probe_resp_plcp(dev, 0x32C, size,
&b43legacy_b_ratetable[1]);
b43legacy_write_probe_resp_plcp(dev, 0x33E, size,
&b43legacy_b_ratetable[2]);
b43legacy_write_probe_resp_plcp(dev, 0x350, size,
&b43legacy_b_ratetable[3]);
size = min((size_t)size,
0x200 - sizeof(struct b43legacy_plcp_hdr6));
b43legacy_write_template_common(dev, probe_resp_data,
size, ram_offset,
shm_size_offset, rate->hw_value);
kfree(probe_resp_data);
}
static void b43legacy_upload_beacon0(struct b43legacy_wldev *dev)
{
struct b43legacy_wl *wl = dev->wl;
if (wl->beacon0_uploaded)
return;
b43legacy_write_beacon_template(dev, 0x68, 0x18);
/* FIXME: Probe resp upload doesn't really belong here,
* but we don't use that feature anyway. */
b43legacy_write_probe_resp_template(dev, 0x268, 0x4A,
&__b43legacy_ratetable[3]);
wl->beacon0_uploaded = true;
}
static void b43legacy_upload_beacon1(struct b43legacy_wldev *dev)
{
struct b43legacy_wl *wl = dev->wl;
if (wl->beacon1_uploaded)
return;
b43legacy_write_beacon_template(dev, 0x468, 0x1A);
wl->beacon1_uploaded = true;
}
static void handle_irq_beacon(struct b43legacy_wldev *dev)
{
struct b43legacy_wl *wl = dev->wl;
u32 cmd, beacon0_valid, beacon1_valid;
if (!b43legacy_is_mode(wl, NL80211_IFTYPE_AP))
return;
/* This is the bottom half of the asynchronous beacon update. */
/* Ignore interrupt in the future. */
dev->irq_mask &= ~B43legacy_IRQ_BEACON;
cmd = b43legacy_read32(dev, B43legacy_MMIO_MACCMD);
beacon0_valid = (cmd & B43legacy_MACCMD_BEACON0_VALID);
beacon1_valid = (cmd & B43legacy_MACCMD_BEACON1_VALID);
/* Schedule interrupt manually, if busy. */
if (beacon0_valid && beacon1_valid) {
b43legacy_write32(dev, B43legacy_MMIO_GEN_IRQ_REASON, B43legacy_IRQ_BEACON);
dev->irq_mask |= B43legacy_IRQ_BEACON;
return;
}
if (unlikely(wl->beacon_templates_virgin)) {
/* We never uploaded a beacon before.
* Upload both templates now, but only mark one valid. */
wl->beacon_templates_virgin = false;
b43legacy_upload_beacon0(dev);
b43legacy_upload_beacon1(dev);
cmd = b43legacy_read32(dev, B43legacy_MMIO_MACCMD);
cmd |= B43legacy_MACCMD_BEACON0_VALID;
b43legacy_write32(dev, B43legacy_MMIO_MACCMD, cmd);
} else {
if (!beacon0_valid) {
b43legacy_upload_beacon0(dev);
cmd = b43legacy_read32(dev, B43legacy_MMIO_MACCMD);
cmd |= B43legacy_MACCMD_BEACON0_VALID;
b43legacy_write32(dev, B43legacy_MMIO_MACCMD, cmd);
} else if (!beacon1_valid) {
b43legacy_upload_beacon1(dev);
cmd = b43legacy_read32(dev, B43legacy_MMIO_MACCMD);
cmd |= B43legacy_MACCMD_BEACON1_VALID;
b43legacy_write32(dev, B43legacy_MMIO_MACCMD, cmd);
}
}
}
static void b43legacy_beacon_update_trigger_work(struct work_struct *work)
{
struct b43legacy_wl *wl = container_of(work, struct b43legacy_wl,
beacon_update_trigger);
struct b43legacy_wldev *dev;
mutex_lock(&wl->mutex);
dev = wl->current_dev;
if (likely(dev && (b43legacy_status(dev) >= B43legacy_STAT_INITIALIZED))) {
spin_lock_irq(&wl->irq_lock);
/* Update beacon right away or defer to IRQ. */
handle_irq_beacon(dev);
/* The handler might have updated the IRQ mask. */
b43legacy_write32(dev, B43legacy_MMIO_GEN_IRQ_MASK,
dev->irq_mask);
mmiowb();
spin_unlock_irq(&wl->irq_lock);
}
mutex_unlock(&wl->mutex);
}
/* Asynchronously update the packet templates in template RAM.
* Locking: Requires wl->irq_lock to be locked. */
static void b43legacy_update_templates(struct b43legacy_wl *wl)
{
struct sk_buff *beacon;
/* This is the top half of the ansynchronous beacon update. The bottom
* half is the beacon IRQ. Beacon update must be asynchronous to avoid
* sending an invalid beacon. This can happen for example, if the
* firmware transmits a beacon while we are updating it. */
/* We could modify the existing beacon and set the aid bit in the TIM
* field, but that would probably require resizing and moving of data
* within the beacon template. Simply request a new beacon and let
* mac80211 do the hard work. */
beacon = ieee80211_beacon_get(wl->hw, wl->vif);
if (unlikely(!beacon))
return;
if (wl->current_beacon)
dev_kfree_skb_any(wl->current_beacon);
wl->current_beacon = beacon;
wl->beacon0_uploaded = false;
wl->beacon1_uploaded = false;
ieee80211_queue_work(wl->hw, &wl->beacon_update_trigger);
}
static void b43legacy_set_beacon_int(struct b43legacy_wldev *dev,
u16 beacon_int)
{
b43legacy_time_lock(dev);
if (dev->dev->id.revision >= 3) {
b43legacy_write32(dev, B43legacy_MMIO_TSF_CFP_REP,
(beacon_int << 16));
b43legacy_write32(dev, B43legacy_MMIO_TSF_CFP_START,
(beacon_int << 10));
} else {
b43legacy_write16(dev, 0x606, (beacon_int >> 6));
b43legacy_write16(dev, 0x610, beacon_int);
}
b43legacy_time_unlock(dev);
b43legacydbg(dev->wl, "Set beacon interval to %u\n", beacon_int);
}
static void handle_irq_ucode_debug(struct b43legacy_wldev *dev)
{
}
/* Interrupt handler bottom-half */
static void b43legacy_interrupt_tasklet(struct b43legacy_wldev *dev)
{
u32 reason;
u32 dma_reason[ARRAY_SIZE(dev->dma_reason)];
u32 merged_dma_reason = 0;
int i;
unsigned long flags;
spin_lock_irqsave(&dev->wl->irq_lock, flags);
B43legacy_WARN_ON(b43legacy_status(dev) <
B43legacy_STAT_INITIALIZED);
reason = dev->irq_reason;
for (i = 0; i < ARRAY_SIZE(dma_reason); i++) {
dma_reason[i] = dev->dma_reason[i];
merged_dma_reason |= dma_reason[i];
}
if (unlikely(reason & B43legacy_IRQ_MAC_TXERR))
b43legacyerr(dev->wl, "MAC transmission error\n");
if (unlikely(reason & B43legacy_IRQ_PHY_TXERR)) {
b43legacyerr(dev->wl, "PHY transmission error\n");
rmb();
if (unlikely(atomic_dec_and_test(&dev->phy.txerr_cnt))) {
b43legacyerr(dev->wl, "Too many PHY TX errors, "
"restarting the controller\n");
b43legacy_controller_restart(dev, "PHY TX errors");
}
}
if (unlikely(merged_dma_reason & (B43legacy_DMAIRQ_FATALMASK |
B43legacy_DMAIRQ_NONFATALMASK))) {
if (merged_dma_reason & B43legacy_DMAIRQ_FATALMASK) {
b43legacyerr(dev->wl, "Fatal DMA error: "
"0x%08X, 0x%08X, 0x%08X, "
"0x%08X, 0x%08X, 0x%08X\n",
dma_reason[0], dma_reason[1],
dma_reason[2], dma_reason[3],
dma_reason[4], dma_reason[5]);
b43legacy_controller_restart(dev, "DMA error");
mmiowb();
spin_unlock_irqrestore(&dev->wl->irq_lock, flags);
return;
}
if (merged_dma_reason & B43legacy_DMAIRQ_NONFATALMASK)
b43legacyerr(dev->wl, "DMA error: "
"0x%08X, 0x%08X, 0x%08X, "
"0x%08X, 0x%08X, 0x%08X\n",
dma_reason[0], dma_reason[1],
dma_reason[2], dma_reason[3],
dma_reason[4], dma_reason[5]);
}
if (unlikely(reason & B43legacy_IRQ_UCODE_DEBUG))
handle_irq_ucode_debug(dev);
if (reason & B43legacy_IRQ_TBTT_INDI)
handle_irq_tbtt_indication(dev);
if (reason & B43legacy_IRQ_ATIM_END)
handle_irq_atim_end(dev);
if (reason & B43legacy_IRQ_BEACON)
handle_irq_beacon(dev);
if (reason & B43legacy_IRQ_PMQ)
handle_irq_pmq(dev);
if (reason & B43legacy_IRQ_TXFIFO_FLUSH_OK)
;/*TODO*/
if (reason & B43legacy_IRQ_NOISESAMPLE_OK)
handle_irq_noise(dev);
/* Check the DMA reason registers for received data. */
if (dma_reason[0] & B43legacy_DMAIRQ_RX_DONE) {
if (b43legacy_using_pio(dev))
b43legacy_pio_rx(dev->pio.queue0);
else
b43legacy_dma_rx(dev->dma.rx_ring0);
}
B43legacy_WARN_ON(dma_reason[1] & B43legacy_DMAIRQ_RX_DONE);
B43legacy_WARN_ON(dma_reason[2] & B43legacy_DMAIRQ_RX_DONE);
if (dma_reason[3] & B43legacy_DMAIRQ_RX_DONE) {
if (b43legacy_using_pio(dev))
b43legacy_pio_rx(dev->pio.queue3);
else
b43legacy_dma_rx(dev->dma.rx_ring3);
}
B43legacy_WARN_ON(dma_reason[4] & B43legacy_DMAIRQ_RX_DONE);
B43legacy_WARN_ON(dma_reason[5] & B43legacy_DMAIRQ_RX_DONE);
if (reason & B43legacy_IRQ_TX_OK)
handle_irq_transmit_status(dev);
b43legacy_write32(dev, B43legacy_MMIO_GEN_IRQ_MASK, dev->irq_mask);
mmiowb();
spin_unlock_irqrestore(&dev->wl->irq_lock, flags);
}
static void pio_irq_workaround(struct b43legacy_wldev *dev,
u16 base, int queueidx)
{
u16 rxctl;
rxctl = b43legacy_read16(dev, base + B43legacy_PIO_RXCTL);
if (rxctl & B43legacy_PIO_RXCTL_DATAAVAILABLE)
dev->dma_reason[queueidx] |= B43legacy_DMAIRQ_RX_DONE;
else
dev->dma_reason[queueidx] &= ~B43legacy_DMAIRQ_RX_DONE;
}
static void b43legacy_interrupt_ack(struct b43legacy_wldev *dev, u32 reason)
{
if (b43legacy_using_pio(dev) &&
(dev->dev->id.revision < 3) &&
(!(reason & B43legacy_IRQ_PIO_WORKAROUND))) {
/* Apply a PIO specific workaround to the dma_reasons */
pio_irq_workaround(dev, B43legacy_MMIO_PIO1_BASE, 0);
pio_irq_workaround(dev, B43legacy_MMIO_PIO2_BASE, 1);
pio_irq_workaround(dev, B43legacy_MMIO_PIO3_BASE, 2);
pio_irq_workaround(dev, B43legacy_MMIO_PIO4_BASE, 3);
}
b43legacy_write32(dev, B43legacy_MMIO_GEN_IRQ_REASON, reason);
b43legacy_write32(dev, B43legacy_MMIO_DMA0_REASON,
dev->dma_reason[0]);
b43legacy_write32(dev, B43legacy_MMIO_DMA1_REASON,
dev->dma_reason[1]);
b43legacy_write32(dev, B43legacy_MMIO_DMA2_REASON,
dev->dma_reason[2]);
b43legacy_write32(dev, B43legacy_MMIO_DMA3_REASON,
dev->dma_reason[3]);
b43legacy_write32(dev, B43legacy_MMIO_DMA4_REASON,
dev->dma_reason[4]);
b43legacy_write32(dev, B43legacy_MMIO_DMA5_REASON,
dev->dma_reason[5]);
}
/* Interrupt handler top-half */
static irqreturn_t b43legacy_interrupt_handler(int irq, void *dev_id)
{
irqreturn_t ret = IRQ_NONE;
struct b43legacy_wldev *dev = dev_id;
u32 reason;
B43legacy_WARN_ON(!dev);
spin_lock(&dev->wl->irq_lock);
if (unlikely(b43legacy_status(dev) < B43legacy_STAT_STARTED))
/* This can only happen on shared IRQ lines. */
goto out;
reason = b43legacy_read32(dev, B43legacy_MMIO_GEN_IRQ_REASON);
if (reason == 0xffffffff) /* shared IRQ */
goto out;
ret = IRQ_HANDLED;
reason &= dev->irq_mask;
if (!reason)
goto out;
dev->dma_reason[0] = b43legacy_read32(dev,
B43legacy_MMIO_DMA0_REASON)
& 0x0001DC00;
dev->dma_reason[1] = b43legacy_read32(dev,
B43legacy_MMIO_DMA1_REASON)
& 0x0000DC00;
dev->dma_reason[2] = b43legacy_read32(dev,
B43legacy_MMIO_DMA2_REASON)
& 0x0000DC00;
dev->dma_reason[3] = b43legacy_read32(dev,
B43legacy_MMIO_DMA3_REASON)
& 0x0001DC00;
dev->dma_reason[4] = b43legacy_read32(dev,
B43legacy_MMIO_DMA4_REASON)
& 0x0000DC00;
dev->dma_reason[5] = b43legacy_read32(dev,
B43legacy_MMIO_DMA5_REASON)
& 0x0000DC00;
b43legacy_interrupt_ack(dev, reason);
/* Disable all IRQs. They are enabled again in the bottom half. */
b43legacy_write32(dev, B43legacy_MMIO_GEN_IRQ_MASK, 0);
/* Save the reason code and call our bottom half. */
dev->irq_reason = reason;
tasklet_schedule(&dev->isr_tasklet);
out:
mmiowb();
spin_unlock(&dev->wl->irq_lock);
return ret;
}
static void b43legacy_release_firmware(struct b43legacy_wldev *dev)
{
release_firmware(dev->fw.ucode);
dev->fw.ucode = NULL;
release_firmware(dev->fw.pcm);
dev->fw.pcm = NULL;
release_firmware(dev->fw.initvals);
dev->fw.initvals = NULL;
release_firmware(dev->fw.initvals_band);
dev->fw.initvals_band = NULL;
}
static void b43legacy_print_fw_helptext(struct b43legacy_wl *wl)
{
b43legacyerr(wl, "You must go to http://linuxwireless.org/en/users/"
"Drivers/b43#devicefirmware "
"and download the correct firmware (version 3).\n");
}
static int do_request_fw(struct b43legacy_wldev *dev,
const char *name,
const struct firmware **fw)
{
char path[sizeof(modparam_fwpostfix) + 32];
struct b43legacy_fw_header *hdr;
u32 size;
int err;
if (!name)
return 0;
snprintf(path, ARRAY_SIZE(path),
"b43legacy%s/%s.fw",
modparam_fwpostfix, name);
err = request_firmware(fw, path, dev->dev->dev);
if (err) {
b43legacyerr(dev->wl, "Firmware file \"%s\" not found "
"or load failed.\n", path);
return err;
}
if ((*fw)->size < sizeof(struct b43legacy_fw_header))
goto err_format;
hdr = (struct b43legacy_fw_header *)((*fw)->data);
switch (hdr->type) {
case B43legacy_FW_TYPE_UCODE:
case B43legacy_FW_TYPE_PCM:
size = be32_to_cpu(hdr->size);
if (size != (*fw)->size - sizeof(struct b43legacy_fw_header))
goto err_format;
/* fallthrough */
case B43legacy_FW_TYPE_IV:
if (hdr->ver != 1)
goto err_format;
break;
default:
goto err_format;
}
return err;
err_format:
b43legacyerr(dev->wl, "Firmware file \"%s\" format error.\n", path);
return -EPROTO;
}
static int b43legacy_one_core_attach(struct ssb_device *dev,
struct b43legacy_wl *wl);
static void b43legacy_one_core_detach(struct ssb_device *dev);
static void b43legacy_request_firmware(struct work_struct *work)
{
struct b43legacy_wl *wl = container_of(work,
struct b43legacy_wl, firmware_load);
struct b43legacy_wldev *dev = wl->current_dev;
struct b43legacy_firmware *fw = &dev->fw;
const u8 rev = dev->dev->id.revision;
const char *filename;
int err;
/* do dummy read */
ssb_read32(dev->dev, SSB_TMSHIGH);
if (!fw->ucode) {
if (rev == 2)
filename = "ucode2";
else if (rev == 4)
filename = "ucode4";
else
filename = "ucode5";
err = do_request_fw(dev, filename, &fw->ucode);
if (err)
goto err_load;
}
if (!fw->pcm) {
if (rev < 5)
filename = "pcm4";
else
filename = "pcm5";
err = do_request_fw(dev, filename, &fw->pcm);
if (err)
goto err_load;
}
if (!fw->initvals) {
switch (dev->phy.type) {
case B43legacy_PHYTYPE_B:
case B43legacy_PHYTYPE_G:
if ((rev >= 5) && (rev <= 10))
filename = "b0g0initvals5";
else if (rev == 2 || rev == 4)
filename = "b0g0initvals2";
else
goto err_no_initvals;
break;
default:
goto err_no_initvals;
}
err = do_request_fw(dev, filename, &fw->initvals);
if (err)
goto err_load;
}
if (!fw->initvals_band) {
switch (dev->phy.type) {
case B43legacy_PHYTYPE_B:
case B43legacy_PHYTYPE_G:
if ((rev >= 5) && (rev <= 10))
filename = "b0g0bsinitvals5";
else if (rev >= 11)
filename = NULL;
else if (rev == 2 || rev == 4)
filename = NULL;
else
goto err_no_initvals;
break;
default:
goto err_no_initvals;
}
err = do_request_fw(dev, filename, &fw->initvals_band);
if (err)
goto err_load;
}
err = ieee80211_register_hw(wl->hw);
if (err)
goto err_one_core_detach;
return;
err_one_core_detach:
b43legacy_one_core_detach(dev->dev);
goto error;
err_load:
b43legacy_print_fw_helptext(dev->wl);
goto error;
err_no_initvals:
err = -ENODEV;
b43legacyerr(dev->wl, "No Initial Values firmware file for PHY %u, "
"core rev %u\n", dev->phy.type, rev);
goto error;
error:
b43legacy_release_firmware(dev);
return;
}
static int b43legacy_upload_microcode(struct b43legacy_wldev *dev)
{
struct wiphy *wiphy = dev->wl->hw->wiphy;
const size_t hdr_len = sizeof(struct b43legacy_fw_header);
const __be32 *data;
unsigned int i;
unsigned int len;
u16 fwrev;
u16 fwpatch;
u16 fwdate;
u16 fwtime;
u32 tmp, macctl;
int err = 0;
/* Jump the microcode PSM to offset 0 */
macctl = b43legacy_read32(dev, B43legacy_MMIO_MACCTL);
B43legacy_WARN_ON(macctl & B43legacy_MACCTL_PSM_RUN);
macctl |= B43legacy_MACCTL_PSM_JMP0;
b43legacy_write32(dev, B43legacy_MMIO_MACCTL, macctl);
/* Zero out all microcode PSM registers and shared memory. */
for (i = 0; i < 64; i++)
b43legacy_shm_write16(dev, B43legacy_SHM_WIRELESS, i, 0);
for (i = 0; i < 4096; i += 2)
b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, i, 0);
/* Upload Microcode. */
data = (__be32 *) (dev->fw.ucode->data + hdr_len);
len = (dev->fw.ucode->size - hdr_len) / sizeof(__be32);
b43legacy_shm_control_word(dev,
B43legacy_SHM_UCODE |
B43legacy_SHM_AUTOINC_W,
0x0000);
for (i = 0; i < len; i++) {
b43legacy_write32(dev, B43legacy_MMIO_SHM_DATA,
be32_to_cpu(data[i]));
udelay(10);
}
if (dev->fw.pcm) {
/* Upload PCM data. */
data = (__be32 *) (dev->fw.pcm->data + hdr_len);
len = (dev->fw.pcm->size - hdr_len) / sizeof(__be32);
b43legacy_shm_control_word(dev, B43legacy_SHM_HW, 0x01EA);
b43legacy_write32(dev, B43legacy_MMIO_SHM_DATA, 0x00004000);
/* No need for autoinc bit in SHM_HW */
b43legacy_shm_control_word(dev, B43legacy_SHM_HW, 0x01EB);
for (i = 0; i < len; i++) {
b43legacy_write32(dev, B43legacy_MMIO_SHM_DATA,
be32_to_cpu(data[i]));
udelay(10);
}
}
b43legacy_write32(dev, B43legacy_MMIO_GEN_IRQ_REASON,
B43legacy_IRQ_ALL);
/* Start the microcode PSM */
macctl = b43legacy_read32(dev, B43legacy_MMIO_MACCTL);
macctl &= ~B43legacy_MACCTL_PSM_JMP0;
macctl |= B43legacy_MACCTL_PSM_RUN;
b43legacy_write32(dev, B43legacy_MMIO_MACCTL, macctl);
/* Wait for the microcode to load and respond */
i = 0;
while (1) {
tmp = b43legacy_read32(dev, B43legacy_MMIO_GEN_IRQ_REASON);
if (tmp == B43legacy_IRQ_MAC_SUSPENDED)
break;
i++;
if (i >= B43legacy_IRQWAIT_MAX_RETRIES) {
b43legacyerr(dev->wl, "Microcode not responding\n");
b43legacy_print_fw_helptext(dev->wl);
err = -ENODEV;
goto error;
}
msleep_interruptible(50);
if (signal_pending(current)) {
err = -EINTR;
goto error;
}
}
/* dummy read follows */
b43legacy_read32(dev, B43legacy_MMIO_GEN_IRQ_REASON);
/* Get and check the revisions. */
fwrev = b43legacy_shm_read16(dev, B43legacy_SHM_SHARED,
B43legacy_SHM_SH_UCODEREV);
fwpatch = b43legacy_shm_read16(dev, B43legacy_SHM_SHARED,
B43legacy_SHM_SH_UCODEPATCH);
fwdate = b43legacy_shm_read16(dev, B43legacy_SHM_SHARED,
B43legacy_SHM_SH_UCODEDATE);
fwtime = b43legacy_shm_read16(dev, B43legacy_SHM_SHARED,
B43legacy_SHM_SH_UCODETIME);
if (fwrev > 0x128) {
b43legacyerr(dev->wl, "YOU ARE TRYING TO LOAD V4 FIRMWARE."
" Only firmware from binary drivers version 3.x"
" is supported. You must change your firmware"
" files.\n");
b43legacy_print_fw_helptext(dev->wl);
err = -EOPNOTSUPP;
goto error;
}
b43legacyinfo(dev->wl, "Loading firmware version 0x%X, patch level %u "
"(20%.2i-%.2i-%.2i %.2i:%.2i:%.2i)\n", fwrev, fwpatch,
(fwdate >> 12) & 0xF, (fwdate >> 8) & 0xF, fwdate & 0xFF,
(fwtime >> 11) & 0x1F, (fwtime >> 5) & 0x3F,
fwtime & 0x1F);
dev->fw.rev = fwrev;
dev->fw.patch = fwpatch;
snprintf(wiphy->fw_version, sizeof(wiphy->fw_version), "%u.%u",
dev->fw.rev, dev->fw.patch);
wiphy->hw_version = dev->dev->id.coreid;
return 0;
error:
macctl = b43legacy_read32(dev, B43legacy_MMIO_MACCTL);
macctl &= ~B43legacy_MACCTL_PSM_RUN;
macctl |= B43legacy_MACCTL_PSM_JMP0;
b43legacy_write32(dev, B43legacy_MMIO_MACCTL, macctl);
return err;
}
static int b43legacy_write_initvals(struct b43legacy_wldev *dev,
const struct b43legacy_iv *ivals,
size_t count,
size_t array_size)
{
const struct b43legacy_iv *iv;
u16 offset;
size_t i;
bool bit32;
BUILD_BUG_ON(sizeof(struct b43legacy_iv) != 6);
iv = ivals;
for (i = 0; i < count; i++) {
if (array_size < sizeof(iv->offset_size))
goto err_format;
array_size -= sizeof(iv->offset_size);
offset = be16_to_cpu(iv->offset_size);
bit32 = !!(offset & B43legacy_IV_32BIT);
offset &= B43legacy_IV_OFFSET_MASK;
if (offset >= 0x1000)
goto err_format;
if (bit32) {
u32 value;
if (array_size < sizeof(iv->data.d32))
goto err_format;
array_size -= sizeof(iv->data.d32);
value = get_unaligned_be32(&iv->data.d32);
b43legacy_write32(dev, offset, value);
iv = (const struct b43legacy_iv *)((const uint8_t *)iv +
sizeof(__be16) +
sizeof(__be32));
} else {
u16 value;
if (array_size < sizeof(iv->data.d16))
goto err_format;
array_size -= sizeof(iv->data.d16);
value = be16_to_cpu(iv->data.d16);
b43legacy_write16(dev, offset, value);
iv = (const struct b43legacy_iv *)((const uint8_t *)iv +
sizeof(__be16) +
sizeof(__be16));
}
}
if (array_size)
goto err_format;
return 0;
err_format:
b43legacyerr(dev->wl, "Initial Values Firmware file-format error.\n");
b43legacy_print_fw_helptext(dev->wl);
return -EPROTO;
}
static int b43legacy_upload_initvals(struct b43legacy_wldev *dev)
{
const size_t hdr_len = sizeof(struct b43legacy_fw_header);
const struct b43legacy_fw_header *hdr;
struct b43legacy_firmware *fw = &dev->fw;
const struct b43legacy_iv *ivals;
size_t count;
int err;
hdr = (const struct b43legacy_fw_header *)(fw->initvals->data);
ivals = (const struct b43legacy_iv *)(fw->initvals->data + hdr_len);
count = be32_to_cpu(hdr->size);
err = b43legacy_write_initvals(dev, ivals, count,
fw->initvals->size - hdr_len);
if (err)
goto out;
if (fw->initvals_band) {
hdr = (const struct b43legacy_fw_header *)
(fw->initvals_band->data);
ivals = (const struct b43legacy_iv *)(fw->initvals_band->data
+ hdr_len);
count = be32_to_cpu(hdr->size);
err = b43legacy_write_initvals(dev, ivals, count,
fw->initvals_band->size - hdr_len);
if (err)
goto out;
}
out:
return err;
}
/* Initialize the GPIOs
* http://bcm-specs.sipsolutions.net/GPIO
*/
static int b43legacy_gpio_init(struct b43legacy_wldev *dev)
{
struct ssb_bus *bus = dev->dev->bus;
struct ssb_device *gpiodev, *pcidev = NULL;
u32 mask;
u32 set;
b43legacy_write32(dev, B43legacy_MMIO_MACCTL,
b43legacy_read32(dev,
B43legacy_MMIO_MACCTL)
& 0xFFFF3FFF);
b43legacy_write16(dev, B43legacy_MMIO_GPIO_MASK,
b43legacy_read16(dev,
B43legacy_MMIO_GPIO_MASK)
| 0x000F);
mask = 0x0000001F;
set = 0x0000000F;
if (dev->dev->bus->chip_id == 0x4301) {
mask |= 0x0060;
set |= 0x0060;
}
if (dev->dev->bus->sprom.boardflags_lo & B43legacy_BFL_PACTRL) {
b43legacy_write16(dev, B43legacy_MMIO_GPIO_MASK,
b43legacy_read16(dev,
B43legacy_MMIO_GPIO_MASK)
| 0x0200);
mask |= 0x0200;
set |= 0x0200;
}
if (dev->dev->id.revision >= 2)
mask |= 0x0010; /* FIXME: This is redundant. */
#ifdef CONFIG_SSB_DRIVER_PCICORE
pcidev = bus->pcicore.dev;
#endif
gpiodev = bus->chipco.dev ? : pcidev;
if (!gpiodev)
return 0;
ssb_write32(gpiodev, B43legacy_GPIO_CONTROL,
(ssb_read32(gpiodev, B43legacy_GPIO_CONTROL)
& mask) | set);
return 0;
}
/* Turn off all GPIO stuff. Call this on module unload, for example. */
static void b43legacy_gpio_cleanup(struct b43legacy_wldev *dev)
{
struct ssb_bus *bus = dev->dev->bus;
struct ssb_device *gpiodev, *pcidev = NULL;
#ifdef CONFIG_SSB_DRIVER_PCICORE
pcidev = bus->pcicore.dev;
#endif
gpiodev = bus->chipco.dev ? : pcidev;
if (!gpiodev)
return;
ssb_write32(gpiodev, B43legacy_GPIO_CONTROL, 0);
}
/* http://bcm-specs.sipsolutions.net/EnableMac */
void b43legacy_mac_enable(struct b43legacy_wldev *dev)
{
dev->mac_suspended--;
B43legacy_WARN_ON(dev->mac_suspended < 0);
B43legacy_WARN_ON(irqs_disabled());
if (dev->mac_suspended == 0) {
b43legacy_write32(dev, B43legacy_MMIO_MACCTL,
b43legacy_read32(dev,
B43legacy_MMIO_MACCTL)
| B43legacy_MACCTL_ENABLED);
b43legacy_write32(dev, B43legacy_MMIO_GEN_IRQ_REASON,
B43legacy_IRQ_MAC_SUSPENDED);
/* the next two are dummy reads */
b43legacy_read32(dev, B43legacy_MMIO_MACCTL);
b43legacy_read32(dev, B43legacy_MMIO_GEN_IRQ_REASON);
b43legacy_power_saving_ctl_bits(dev, -1, -1);
/* Re-enable IRQs. */
spin_lock_irq(&dev->wl->irq_lock);
b43legacy_write32(dev, B43legacy_MMIO_GEN_IRQ_MASK,
dev->irq_mask);
spin_unlock_irq(&dev->wl->irq_lock);
}
}
/* http://bcm-specs.sipsolutions.net/SuspendMAC */
void b43legacy_mac_suspend(struct b43legacy_wldev *dev)
{
int i;
u32 tmp;
might_sleep();
B43legacy_WARN_ON(irqs_disabled());
B43legacy_WARN_ON(dev->mac_suspended < 0);
if (dev->mac_suspended == 0) {
/* Mask IRQs before suspending MAC. Otherwise
* the MAC stays busy and won't suspend. */
spin_lock_irq(&dev->wl->irq_lock);
b43legacy_write32(dev, B43legacy_MMIO_GEN_IRQ_MASK, 0);
spin_unlock_irq(&dev->wl->irq_lock);
b43legacy_synchronize_irq(dev);
b43legacy_power_saving_ctl_bits(dev, -1, 1);
b43legacy_write32(dev, B43legacy_MMIO_MACCTL,
b43legacy_read32(dev,
B43legacy_MMIO_MACCTL)
& ~B43legacy_MACCTL_ENABLED);
b43legacy_read32(dev, B43legacy_MMIO_GEN_IRQ_REASON);
for (i = 40; i; i--) {
tmp = b43legacy_read32(dev,
B43legacy_MMIO_GEN_IRQ_REASON);
if (tmp & B43legacy_IRQ_MAC_SUSPENDED)
goto out;
msleep(1);
}
b43legacyerr(dev->wl, "MAC suspend failed\n");
}
out:
dev->mac_suspended++;
}
static void b43legacy_adjust_opmode(struct b43legacy_wldev *dev)
{
struct b43legacy_wl *wl = dev->wl;
u32 ctl;
u16 cfp_pretbtt;
ctl = b43legacy_read32(dev, B43legacy_MMIO_MACCTL);
/* Reset status to STA infrastructure mode. */
ctl &= ~B43legacy_MACCTL_AP;
ctl &= ~B43legacy_MACCTL_KEEP_CTL;
ctl &= ~B43legacy_MACCTL_KEEP_BADPLCP;
ctl &= ~B43legacy_MACCTL_KEEP_BAD;
ctl &= ~B43legacy_MACCTL_PROMISC;
ctl &= ~B43legacy_MACCTL_BEACPROMISC;
ctl |= B43legacy_MACCTL_INFRA;
if (b43legacy_is_mode(wl, NL80211_IFTYPE_AP))
ctl |= B43legacy_MACCTL_AP;
else if (b43legacy_is_mode(wl, NL80211_IFTYPE_ADHOC))
ctl &= ~B43legacy_MACCTL_INFRA;
if (wl->filter_flags & FIF_CONTROL)
ctl |= B43legacy_MACCTL_KEEP_CTL;
if (wl->filter_flags & FIF_FCSFAIL)
ctl |= B43legacy_MACCTL_KEEP_BAD;
if (wl->filter_flags & FIF_PLCPFAIL)
ctl |= B43legacy_MACCTL_KEEP_BADPLCP;
if (wl->filter_flags & FIF_PROMISC_IN_BSS)
ctl |= B43legacy_MACCTL_PROMISC;
if (wl->filter_flags & FIF_BCN_PRBRESP_PROMISC)
ctl |= B43legacy_MACCTL_BEACPROMISC;
/* Workaround: On old hardware the HW-MAC-address-filter
* doesn't work properly, so always run promisc in filter
* it in software. */
if (dev->dev->id.revision <= 4)
ctl |= B43legacy_MACCTL_PROMISC;
b43legacy_write32(dev, B43legacy_MMIO_MACCTL, ctl);
cfp_pretbtt = 2;
if ((ctl & B43legacy_MACCTL_INFRA) &&
!(ctl & B43legacy_MACCTL_AP)) {
if (dev->dev->bus->chip_id == 0x4306 &&
dev->dev->bus->chip_rev == 3)
cfp_pretbtt = 100;
else
cfp_pretbtt = 50;
}
b43legacy_write16(dev, 0x612, cfp_pretbtt);
}
static void b43legacy_rate_memory_write(struct b43legacy_wldev *dev,
u16 rate,
int is_ofdm)
{
u16 offset;
if (is_ofdm) {
offset = 0x480;
offset += (b43legacy_plcp_get_ratecode_ofdm(rate) & 0x000F) * 2;
} else {
offset = 0x4C0;
offset += (b43legacy_plcp_get_ratecode_cck(rate) & 0x000F) * 2;
}
b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, offset + 0x20,
b43legacy_shm_read16(dev,
B43legacy_SHM_SHARED, offset));
}
static void b43legacy_rate_memory_init(struct b43legacy_wldev *dev)
{
switch (dev->phy.type) {
case B43legacy_PHYTYPE_G:
b43legacy_rate_memory_write(dev, B43legacy_OFDM_RATE_6MB, 1);
b43legacy_rate_memory_write(dev, B43legacy_OFDM_RATE_12MB, 1);
b43legacy_rate_memory_write(dev, B43legacy_OFDM_RATE_18MB, 1);
b43legacy_rate_memory_write(dev, B43legacy_OFDM_RATE_24MB, 1);
b43legacy_rate_memory_write(dev, B43legacy_OFDM_RATE_36MB, 1);
b43legacy_rate_memory_write(dev, B43legacy_OFDM_RATE_48MB, 1);
b43legacy_rate_memory_write(dev, B43legacy_OFDM_RATE_54MB, 1);
/* fallthrough */
case B43legacy_PHYTYPE_B:
b43legacy_rate_memory_write(dev, B43legacy_CCK_RATE_1MB, 0);
b43legacy_rate_memory_write(dev, B43legacy_CCK_RATE_2MB, 0);
b43legacy_rate_memory_write(dev, B43legacy_CCK_RATE_5MB, 0);
b43legacy_rate_memory_write(dev, B43legacy_CCK_RATE_11MB, 0);
break;
default:
B43legacy_BUG_ON(1);
}
}
/* Set the TX-Antenna for management frames sent by firmware. */
static void b43legacy_mgmtframe_txantenna(struct b43legacy_wldev *dev,
int antenna)
{
u16 ant = 0;
u16 tmp;
switch (antenna) {
case B43legacy_ANTENNA0:
ant |= B43legacy_TX4_PHY_ANT0;
break;
case B43legacy_ANTENNA1:
ant |= B43legacy_TX4_PHY_ANT1;
break;
case B43legacy_ANTENNA_AUTO:
ant |= B43legacy_TX4_PHY_ANTLAST;
break;
default:
B43legacy_BUG_ON(1);
}
/* FIXME We also need to set the other flags of the PHY control
* field somewhere. */
/* For Beacons */
tmp = b43legacy_shm_read16(dev, B43legacy_SHM_SHARED,
B43legacy_SHM_SH_BEACPHYCTL);
tmp = (tmp & ~B43legacy_TX4_PHY_ANT) | ant;
b43legacy_shm_write16(dev, B43legacy_SHM_SHARED,
B43legacy_SHM_SH_BEACPHYCTL, tmp);
/* For ACK/CTS */
tmp = b43legacy_shm_read16(dev, B43legacy_SHM_SHARED,
B43legacy_SHM_SH_ACKCTSPHYCTL);
tmp = (tmp & ~B43legacy_TX4_PHY_ANT) | ant;
b43legacy_shm_write16(dev, B43legacy_SHM_SHARED,
B43legacy_SHM_SH_ACKCTSPHYCTL, tmp);
/* For Probe Resposes */
tmp = b43legacy_shm_read16(dev, B43legacy_SHM_SHARED,
B43legacy_SHM_SH_PRPHYCTL);
tmp = (tmp & ~B43legacy_TX4_PHY_ANT) | ant;
b43legacy_shm_write16(dev, B43legacy_SHM_SHARED,
B43legacy_SHM_SH_PRPHYCTL, tmp);
}
/* This is the opposite of b43legacy_chip_init() */
static void b43legacy_chip_exit(struct b43legacy_wldev *dev)
{
b43legacy_radio_turn_off(dev, 1);
b43legacy_gpio_cleanup(dev);
/* firmware is released later */
}
/* Initialize the chip
* http://bcm-specs.sipsolutions.net/ChipInit
*/
static int b43legacy_chip_init(struct b43legacy_wldev *dev)
{
struct b43legacy_phy *phy = &dev->phy;
int err;
int tmp;
u32 value32, macctl;
u16 value16;
/* Initialize the MAC control */
macctl = B43legacy_MACCTL_IHR_ENABLED | B43legacy_MACCTL_SHM_ENABLED;
if (dev->phy.gmode)
macctl |= B43legacy_MACCTL_GMODE;
macctl |= B43legacy_MACCTL_INFRA;
b43legacy_write32(dev, B43legacy_MMIO_MACCTL, macctl);
err = b43legacy_upload_microcode(dev);
if (err)
goto out; /* firmware is released later */
err = b43legacy_gpio_init(dev);
if (err)
goto out; /* firmware is released later */
err = b43legacy_upload_initvals(dev);
if (err)
goto err_gpio_clean;
b43legacy_radio_turn_on(dev);
b43legacy_write16(dev, 0x03E6, 0x0000);
err = b43legacy_phy_init(dev);
if (err)
goto err_radio_off;
/* Select initial Interference Mitigation. */
tmp = phy->interfmode;
phy->interfmode = B43legacy_INTERFMODE_NONE;
b43legacy_radio_set_interference_mitigation(dev, tmp);
b43legacy_phy_set_antenna_diversity(dev);
b43legacy_mgmtframe_txantenna(dev, B43legacy_ANTENNA_DEFAULT);
if (phy->type == B43legacy_PHYTYPE_B) {
value16 = b43legacy_read16(dev, 0x005E);
value16 |= 0x0004;
b43legacy_write16(dev, 0x005E, value16);
}
b43legacy_write32(dev, 0x0100, 0x01000000);
if (dev->dev->id.revision < 5)
b43legacy_write32(dev, 0x010C, 0x01000000);
value32 = b43legacy_read32(dev, B43legacy_MMIO_MACCTL);
value32 &= ~B43legacy_MACCTL_INFRA;
b43legacy_write32(dev, B43legacy_MMIO_MACCTL, value32);
value32 = b43legacy_read32(dev, B43legacy_MMIO_MACCTL);
value32 |= B43legacy_MACCTL_INFRA;
b43legacy_write32(dev, B43legacy_MMIO_MACCTL, value32);
if (b43legacy_using_pio(dev)) {
b43legacy_write32(dev, 0x0210, 0x00000100);
b43legacy_write32(dev, 0x0230, 0x00000100);
b43legacy_write32(dev, 0x0250, 0x00000100);
b43legacy_write32(dev, 0x0270, 0x00000100);
b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, 0x0034,
0x0000);
}
/* Probe Response Timeout value */
/* FIXME: Default to 0, has to be set by ioctl probably... :-/ */
b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, 0x0074, 0x0000);
/* Initially set the wireless operation mode. */
b43legacy_adjust_opmode(dev);
if (dev->dev->id.revision < 3) {
b43legacy_write16(dev, 0x060E, 0x0000);
b43legacy_write16(dev, 0x0610, 0x8000);
b43legacy_write16(dev, 0x0604, 0x0000);
b43legacy_write16(dev, 0x0606, 0x0200);
} else {
b43legacy_write32(dev, 0x0188, 0x80000000);
b43legacy_write32(dev, 0x018C, 0x02000000);
}
b43legacy_write32(dev, B43legacy_MMIO_GEN_IRQ_REASON, 0x00004000);
b43legacy_write32(dev, B43legacy_MMIO_DMA0_IRQ_MASK, 0x0001DC00);
b43legacy_write32(dev, B43legacy_MMIO_DMA1_IRQ_MASK, 0x0000DC00);
b43legacy_write32(dev, B43legacy_MMIO_DMA2_IRQ_MASK, 0x0000DC00);
b43legacy_write32(dev, B43legacy_MMIO_DMA3_IRQ_MASK, 0x0001DC00);
b43legacy_write32(dev, B43legacy_MMIO_DMA4_IRQ_MASK, 0x0000DC00);
b43legacy_write32(dev, B43legacy_MMIO_DMA5_IRQ_MASK, 0x0000DC00);
value32 = ssb_read32(dev->dev, SSB_TMSLOW);
value32 |= B43legacy_TMSLOW_MACPHYCLKEN;
ssb_write32(dev->dev, SSB_TMSLOW, value32);
b43legacy_write16(dev, B43legacy_MMIO_POWERUP_DELAY,
dev->dev->bus->chipco.fast_pwrup_delay);
/* PHY TX errors counter. */
atomic_set(&phy->txerr_cnt, B43legacy_PHY_TX_BADNESS_LIMIT);
B43legacy_WARN_ON(err != 0);
b43legacydbg(dev->wl, "Chip initialized\n");
out:
return err;
err_radio_off:
b43legacy_radio_turn_off(dev, 1);
err_gpio_clean:
b43legacy_gpio_cleanup(dev);
goto out;
}
static void b43legacy_periodic_every120sec(struct b43legacy_wldev *dev)
{
struct b43legacy_phy *phy = &dev->phy;
if (phy->type != B43legacy_PHYTYPE_G || phy->rev < 2)
return;
b43legacy_mac_suspend(dev);
b43legacy_phy_lo_g_measure(dev);
b43legacy_mac_enable(dev);
}
static void b43legacy_periodic_every60sec(struct b43legacy_wldev *dev)
{
b43legacy_phy_lo_mark_all_unused(dev);
if (dev->dev->bus->sprom.boardflags_lo & B43legacy_BFL_RSSI) {
b43legacy_mac_suspend(dev);
b43legacy_calc_nrssi_slope(dev);
b43legacy_mac_enable(dev);
}
}
static void b43legacy_periodic_every30sec(struct b43legacy_wldev *dev)
{
/* Update device statistics. */
b43legacy_calculate_link_quality(dev);
}
static void b43legacy_periodic_every15sec(struct b43legacy_wldev *dev)
{
b43legacy_phy_xmitpower(dev); /* FIXME: unless scanning? */
atomic_set(&dev->phy.txerr_cnt, B43legacy_PHY_TX_BADNESS_LIMIT);
wmb();
}
static void do_periodic_work(struct b43legacy_wldev *dev)
{
unsigned int state;
state = dev->periodic_state;
if (state % 8 == 0)
b43legacy_periodic_every120sec(dev);
if (state % 4 == 0)
b43legacy_periodic_every60sec(dev);
if (state % 2 == 0)
b43legacy_periodic_every30sec(dev);
b43legacy_periodic_every15sec(dev);
}
/* Periodic work locking policy:
* The whole periodic work handler is protected by
* wl->mutex. If another lock is needed somewhere in the
* pwork callchain, it's acquired in-place, where it's needed.
*/
static void b43legacy_periodic_work_handler(struct work_struct *work)
{
struct b43legacy_wldev *dev = container_of(work, struct b43legacy_wldev,
periodic_work.work);
struct b43legacy_wl *wl = dev->wl;
unsigned long delay;
mutex_lock(&wl->mutex);
if (unlikely(b43legacy_status(dev) != B43legacy_STAT_STARTED))
goto out;
if (b43legacy_debug(dev, B43legacy_DBG_PWORK_STOP))
goto out_requeue;
do_periodic_work(dev);
dev->periodic_state++;
out_requeue:
if (b43legacy_debug(dev, B43legacy_DBG_PWORK_FAST))
delay = msecs_to_jiffies(50);
else
delay = round_jiffies_relative(HZ * 15);
ieee80211_queue_delayed_work(wl->hw, &dev->periodic_work, delay);
out:
mutex_unlock(&wl->mutex);
}
static void b43legacy_periodic_tasks_setup(struct b43legacy_wldev *dev)
{
struct delayed_work *work = &dev->periodic_work;
dev->periodic_state = 0;
INIT_DELAYED_WORK(work, b43legacy_periodic_work_handler);
ieee80211_queue_delayed_work(dev->wl->hw, work, 0);
}
/* Validate access to the chip (SHM) */
static int b43legacy_validate_chipaccess(struct b43legacy_wldev *dev)
{
u32 value;
u32 shm_backup;
shm_backup = b43legacy_shm_read32(dev, B43legacy_SHM_SHARED, 0);
b43legacy_shm_write32(dev, B43legacy_SHM_SHARED, 0, 0xAA5555AA);
if (b43legacy_shm_read32(dev, B43legacy_SHM_SHARED, 0) !=
0xAA5555AA)
goto error;
b43legacy_shm_write32(dev, B43legacy_SHM_SHARED, 0, 0x55AAAA55);
if (b43legacy_shm_read32(dev, B43legacy_SHM_SHARED, 0) !=
0x55AAAA55)
goto error;
b43legacy_shm_write32(dev, B43legacy_SHM_SHARED, 0, shm_backup);
value = b43legacy_read32(dev, B43legacy_MMIO_MACCTL);
if ((value | B43legacy_MACCTL_GMODE) !=
(B43legacy_MACCTL_GMODE | B43legacy_MACCTL_IHR_ENABLED))
goto error;
value = b43legacy_read32(dev, B43legacy_MMIO_GEN_IRQ_REASON);
if (value)
goto error;
return 0;
error:
b43legacyerr(dev->wl, "Failed to validate the chipaccess\n");
return -ENODEV;
}
static void b43legacy_security_init(struct b43legacy_wldev *dev)
{
dev->max_nr_keys = (dev->dev->id.revision >= 5) ? 58 : 20;
B43legacy_WARN_ON(dev->max_nr_keys > ARRAY_SIZE(dev->key));
dev->ktp = b43legacy_shm_read16(dev, B43legacy_SHM_SHARED,
0x0056);
/* KTP is a word address, but we address SHM bytewise.
* So multiply by two.
*/
dev->ktp *= 2;
if (dev->dev->id.revision >= 5)
/* Number of RCMTA address slots */
b43legacy_write16(dev, B43legacy_MMIO_RCMTA_COUNT,
dev->max_nr_keys - 8);
}
#ifdef CONFIG_B43LEGACY_HWRNG
static int b43legacy_rng_read(struct hwrng *rng, u32 *data)
{
struct b43legacy_wl *wl = (struct b43legacy_wl *)rng->priv;
unsigned long flags;
/* Don't take wl->mutex here, as it could deadlock with
* hwrng internal locking. It's not needed to take
* wl->mutex here, anyway. */
spin_lock_irqsave(&wl->irq_lock, flags);
*data = b43legacy_read16(wl->current_dev, B43legacy_MMIO_RNG);
spin_unlock_irqrestore(&wl->irq_lock, flags);
return (sizeof(u16));
}
#endif
static void b43legacy_rng_exit(struct b43legacy_wl *wl)
{
#ifdef CONFIG_B43LEGACY_HWRNG
if (wl->rng_initialized)
hwrng_unregister(&wl->rng);
#endif
}
static int b43legacy_rng_init(struct b43legacy_wl *wl)
{
int err = 0;
#ifdef CONFIG_B43LEGACY_HWRNG
snprintf(wl->rng_name, ARRAY_SIZE(wl->rng_name),
"%s_%s", KBUILD_MODNAME, wiphy_name(wl->hw->wiphy));
wl->rng.name = wl->rng_name;
wl->rng.data_read = b43legacy_rng_read;
wl->rng.priv = (unsigned long)wl;
wl->rng_initialized = 1;
err = hwrng_register(&wl->rng);
if (err) {
wl->rng_initialized = 0;
b43legacyerr(wl, "Failed to register the random "
"number generator (%d)\n", err);
}
#endif
return err;
}
static void b43legacy_tx_work(struct work_struct *work)
{
struct b43legacy_wl *wl = container_of(work, struct b43legacy_wl,
tx_work);
struct b43legacy_wldev *dev;
struct sk_buff *skb;
int queue_num;
int err = 0;
mutex_lock(&wl->mutex);
dev = wl->current_dev;
if (unlikely(!dev || b43legacy_status(dev) < B43legacy_STAT_STARTED)) {
mutex_unlock(&wl->mutex);
return;
}
for (queue_num = 0; queue_num < B43legacy_QOS_QUEUE_NUM; queue_num++) {
while (skb_queue_len(&wl->tx_queue[queue_num])) {
skb = skb_dequeue(&wl->tx_queue[queue_num]);
if (b43legacy_using_pio(dev))
err = b43legacy_pio_tx(dev, skb);
else
err = b43legacy_dma_tx(dev, skb);
if (err == -ENOSPC) {
wl->tx_queue_stopped[queue_num] = 1;
ieee80211_stop_queue(wl->hw, queue_num);
skb_queue_head(&wl->tx_queue[queue_num], skb);
break;
}
if (unlikely(err))
dev_kfree_skb(skb); /* Drop it */
err = 0;
}
if (!err)
wl->tx_queue_stopped[queue_num] = 0;
}
mutex_unlock(&wl->mutex);
}
static void b43legacy_op_tx(struct ieee80211_hw *hw,
struct sk_buff *skb)
{
struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw);
if (unlikely(skb->len < 2 + 2 + 6)) {
/* Too short, this can't be a valid frame. */
dev_kfree_skb_any(skb);
return;
}
B43legacy_WARN_ON(skb_shinfo(skb)->nr_frags);
skb_queue_tail(&wl->tx_queue[skb->queue_mapping], skb);
if (!wl->tx_queue_stopped[skb->queue_mapping])
ieee80211_queue_work(wl->hw, &wl->tx_work);
else
ieee80211_stop_queue(wl->hw, skb->queue_mapping);
}
static int b43legacy_op_conf_tx(struct ieee80211_hw *hw,
struct ieee80211_vif *vif, u16 queue,
const struct ieee80211_tx_queue_params *params)
{
return 0;
}
static int b43legacy_op_get_stats(struct ieee80211_hw *hw,
struct ieee80211_low_level_stats *stats)
{
struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw);
unsigned long flags;
spin_lock_irqsave(&wl->irq_lock, flags);
memcpy(stats, &wl->ieee_stats, sizeof(*stats));
spin_unlock_irqrestore(&wl->irq_lock, flags);
return 0;
}
static const char *phymode_to_string(unsigned int phymode)
{
switch (phymode) {
case B43legacy_PHYMODE_B:
return "B";
case B43legacy_PHYMODE_G:
return "G";
default:
B43legacy_BUG_ON(1);
}
return "";
}
static int find_wldev_for_phymode(struct b43legacy_wl *wl,
unsigned int phymode,
struct b43legacy_wldev **dev,
bool *gmode)
{
struct b43legacy_wldev *d;
list_for_each_entry(d, &wl->devlist, list) {
if (d->phy.possible_phymodes & phymode) {
/* Ok, this device supports the PHY-mode.
* Set the gmode bit. */
*gmode = true;
*dev = d;
return 0;
}
}
return -ESRCH;
}
static void b43legacy_put_phy_into_reset(struct b43legacy_wldev *dev)
{
struct ssb_device *sdev = dev->dev;
u32 tmslow;
tmslow = ssb_read32(sdev, SSB_TMSLOW);
tmslow &= ~B43legacy_TMSLOW_GMODE;
tmslow |= B43legacy_TMSLOW_PHYRESET;
tmslow |= SSB_TMSLOW_FGC;
ssb_write32(sdev, SSB_TMSLOW, tmslow);
msleep(1);
tmslow = ssb_read32(sdev, SSB_TMSLOW);
tmslow &= ~SSB_TMSLOW_FGC;
tmslow |= B43legacy_TMSLOW_PHYRESET;
ssb_write32(sdev, SSB_TMSLOW, tmslow);
msleep(1);
}
/* Expects wl->mutex locked */
static int b43legacy_switch_phymode(struct b43legacy_wl *wl,
unsigned int new_mode)
{
struct b43legacy_wldev *uninitialized_var(up_dev);
struct b43legacy_wldev *down_dev;
int err;
bool gmode = false;
int prev_status;
err = find_wldev_for_phymode(wl, new_mode, &up_dev, &gmode);
if (err) {
b43legacyerr(wl, "Could not find a device for %s-PHY mode\n",
phymode_to_string(new_mode));
return err;
}
if ((up_dev == wl->current_dev) &&
(!!wl->current_dev->phy.gmode == !!gmode))
/* This device is already running. */
return 0;
b43legacydbg(wl, "Reconfiguring PHYmode to %s-PHY\n",
phymode_to_string(new_mode));
down_dev = wl->current_dev;
prev_status = b43legacy_status(down_dev);
/* Shutdown the currently running core. */
if (prev_status >= B43legacy_STAT_STARTED)
b43legacy_wireless_core_stop(down_dev);
if (prev_status >= B43legacy_STAT_INITIALIZED)
b43legacy_wireless_core_exit(down_dev);
if (down_dev != up_dev)
/* We switch to a different core, so we put PHY into
* RESET on the old core. */
b43legacy_put_phy_into_reset(down_dev);
/* Now start the new core. */
up_dev->phy.gmode = gmode;
if (prev_status >= B43legacy_STAT_INITIALIZED) {
err = b43legacy_wireless_core_init(up_dev);
if (err) {
b43legacyerr(wl, "Fatal: Could not initialize device"
" for newly selected %s-PHY mode\n",
phymode_to_string(new_mode));
goto init_failure;
}
}
if (prev_status >= B43legacy_STAT_STARTED) {
err = b43legacy_wireless_core_start(up_dev);
if (err) {
b43legacyerr(wl, "Fatal: Coult not start device for "
"newly selected %s-PHY mode\n",
phymode_to_string(new_mode));
b43legacy_wireless_core_exit(up_dev);
goto init_failure;
}
}
B43legacy_WARN_ON(b43legacy_status(up_dev) != prev_status);
b43legacy_shm_write32(up_dev, B43legacy_SHM_SHARED, 0x003E, 0);
wl->current_dev = up_dev;
return 0;
init_failure:
/* Whoops, failed to init the new core. No core is operating now. */
wl->current_dev = NULL;
return err;
}
/* Write the short and long frame retry limit values. */
static void b43legacy_set_retry_limits(struct b43legacy_wldev *dev,
unsigned int short_retry,
unsigned int long_retry)
{
/* The retry limit is a 4-bit counter. Enforce this to avoid overflowing
* the chip-internal counter. */
short_retry = min(short_retry, (unsigned int)0xF);
long_retry = min(long_retry, (unsigned int)0xF);
b43legacy_shm_write16(dev, B43legacy_SHM_WIRELESS, 0x0006, short_retry);
b43legacy_shm_write16(dev, B43legacy_SHM_WIRELESS, 0x0007, long_retry);
}
static int b43legacy_op_dev_config(struct ieee80211_hw *hw,
u32 changed)
{
struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw);
struct b43legacy_wldev *dev;
struct b43legacy_phy *phy;
struct ieee80211_conf *conf = &hw->conf;
unsigned long flags;
unsigned int new_phymode = 0xFFFF;
int antenna_tx;
int err = 0;
antenna_tx = B43legacy_ANTENNA_DEFAULT;
mutex_lock(&wl->mutex);
dev = wl->current_dev;
phy = &dev->phy;
if (changed & IEEE80211_CONF_CHANGE_RETRY_LIMITS)
b43legacy_set_retry_limits(dev,
conf->short_frame_max_tx_count,
conf->long_frame_max_tx_count);
changed &= ~IEEE80211_CONF_CHANGE_RETRY_LIMITS;
if (!changed)
goto out_unlock_mutex;
/* Switch the PHY mode (if necessary). */
switch (conf->channel->band) {
case IEEE80211_BAND_2GHZ:
if (phy->type == B43legacy_PHYTYPE_B)
new_phymode = B43legacy_PHYMODE_B;
else
new_phymode = B43legacy_PHYMODE_G;
break;
default:
B43legacy_WARN_ON(1);
}
err = b43legacy_switch_phymode(wl, new_phymode);
if (err)
goto out_unlock_mutex;
/* Disable IRQs while reconfiguring the device.
* This makes it possible to drop the spinlock throughout
* the reconfiguration process. */
spin_lock_irqsave(&wl->irq_lock, flags);
if (b43legacy_status(dev) < B43legacy_STAT_STARTED) {
spin_unlock_irqrestore(&wl->irq_lock, flags);
goto out_unlock_mutex;
}
b43legacy_write32(dev, B43legacy_MMIO_GEN_IRQ_MASK, 0);
spin_unlock_irqrestore(&wl->irq_lock, flags);
b43legacy_synchronize_irq(dev);
/* Switch to the requested channel.
* The firmware takes care of races with the TX handler. */
if (conf->channel->hw_value != phy->channel)
b43legacy_radio_selectchannel(dev, conf->channel->hw_value, 0);
dev->wl->radiotap_enabled = !!(conf->flags & IEEE80211_CONF_MONITOR);
/* Adjust the desired TX power level. */
if (conf->power_level != 0) {
if (conf->power_level != phy->power_level) {
phy->power_level = conf->power_level;
b43legacy_phy_xmitpower(dev);
}
}
/* Antennas for RX and management frame TX. */
b43legacy_mgmtframe_txantenna(dev, antenna_tx);
if (wl->radio_enabled != phy->radio_on) {
if (wl->radio_enabled) {
b43legacy_radio_turn_on(dev);
b43legacyinfo(dev->wl, "Radio turned on by software\n");
if (!dev->radio_hw_enable)
b43legacyinfo(dev->wl, "The hardware RF-kill"
" button still turns the radio"
" physically off. Press the"
" button to turn it on.\n");
} else {
b43legacy_radio_turn_off(dev, 0);
b43legacyinfo(dev->wl, "Radio turned off by"
" software\n");
}
}
spin_lock_irqsave(&wl->irq_lock, flags);
b43legacy_write32(dev, B43legacy_MMIO_GEN_IRQ_MASK, dev->irq_mask);
mmiowb();
spin_unlock_irqrestore(&wl->irq_lock, flags);
out_unlock_mutex:
mutex_unlock(&wl->mutex);
return err;
}
static void b43legacy_update_basic_rates(struct b43legacy_wldev *dev, u32 brates)
{
struct ieee80211_supported_band *sband =
dev->wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ];
struct ieee80211_rate *rate;
int i;
u16 basic, direct, offset, basic_offset, rateptr;
for (i = 0; i < sband->n_bitrates; i++) {
rate = &sband->bitrates[i];
if (b43legacy_is_cck_rate(rate->hw_value)) {
direct = B43legacy_SHM_SH_CCKDIRECT;
basic = B43legacy_SHM_SH_CCKBASIC;
offset = b43legacy_plcp_get_ratecode_cck(rate->hw_value);
offset &= 0xF;
} else {
direct = B43legacy_SHM_SH_OFDMDIRECT;
basic = B43legacy_SHM_SH_OFDMBASIC;
offset = b43legacy_plcp_get_ratecode_ofdm(rate->hw_value);
offset &= 0xF;
}
rate = ieee80211_get_response_rate(sband, brates, rate->bitrate);
if (b43legacy_is_cck_rate(rate->hw_value)) {
basic_offset = b43legacy_plcp_get_ratecode_cck(rate->hw_value);
basic_offset &= 0xF;
} else {
basic_offset = b43legacy_plcp_get_ratecode_ofdm(rate->hw_value);
basic_offset &= 0xF;
}
/*
* Get the pointer that we need to point to
* from the direct map
*/
rateptr = b43legacy_shm_read16(dev, B43legacy_SHM_SHARED,
direct + 2 * basic_offset);
/* and write it to the basic map */
b43legacy_shm_write16(dev, B43legacy_SHM_SHARED,
basic + 2 * offset, rateptr);
}
}
static void b43legacy_op_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *conf,
u32 changed)
{
struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw);
struct b43legacy_wldev *dev;
unsigned long flags;
mutex_lock(&wl->mutex);
B43legacy_WARN_ON(wl->vif != vif);
dev = wl->current_dev;
/* Disable IRQs while reconfiguring the device.
* This makes it possible to drop the spinlock throughout
* the reconfiguration process. */
spin_lock_irqsave(&wl->irq_lock, flags);
if (b43legacy_status(dev) < B43legacy_STAT_STARTED) {
spin_unlock_irqrestore(&wl->irq_lock, flags);
goto out_unlock_mutex;
}
b43legacy_write32(dev, B43legacy_MMIO_GEN_IRQ_MASK, 0);
if (changed & BSS_CHANGED_BSSID) {
b43legacy_synchronize_irq(dev);
if (conf->bssid)
memcpy(wl->bssid, conf->bssid, ETH_ALEN);
else
memset(wl->bssid, 0, ETH_ALEN);
}
if (b43legacy_status(dev) >= B43legacy_STAT_INITIALIZED) {
if (changed & BSS_CHANGED_BEACON &&
(b43legacy_is_mode(wl, NL80211_IFTYPE_AP) ||
b43legacy_is_mode(wl, NL80211_IFTYPE_ADHOC)))
b43legacy_update_templates(wl);
if (changed & BSS_CHANGED_BSSID)
b43legacy_write_mac_bssid_templates(dev);
}
spin_unlock_irqrestore(&wl->irq_lock, flags);
b43legacy_mac_suspend(dev);
if (changed & BSS_CHANGED_BEACON_INT &&
(b43legacy_is_mode(wl, NL80211_IFTYPE_AP) ||
b43legacy_is_mode(wl, NL80211_IFTYPE_ADHOC)))
b43legacy_set_beacon_int(dev, conf->beacon_int);
if (changed & BSS_CHANGED_BASIC_RATES)
b43legacy_update_basic_rates(dev, conf->basic_rates);
if (changed & BSS_CHANGED_ERP_SLOT) {
if (conf->use_short_slot)
b43legacy_short_slot_timing_enable(dev);
else
b43legacy_short_slot_timing_disable(dev);
}
b43legacy_mac_enable(dev);
spin_lock_irqsave(&wl->irq_lock, flags);
b43legacy_write32(dev, B43legacy_MMIO_GEN_IRQ_MASK, dev->irq_mask);
/* XXX: why? */
mmiowb();
spin_unlock_irqrestore(&wl->irq_lock, flags);
out_unlock_mutex:
mutex_unlock(&wl->mutex);
}
static void b43legacy_op_configure_filter(struct ieee80211_hw *hw,
unsigned int changed,
unsigned int *fflags,u64 multicast)
{
struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw);
struct b43legacy_wldev *dev = wl->current_dev;
unsigned long flags;
if (!dev) {
*fflags = 0;
return;
}
spin_lock_irqsave(&wl->irq_lock, flags);
*fflags &= FIF_PROMISC_IN_BSS |
FIF_ALLMULTI |
FIF_FCSFAIL |
FIF_PLCPFAIL |
FIF_CONTROL |
FIF_OTHER_BSS |
FIF_BCN_PRBRESP_PROMISC;
changed &= FIF_PROMISC_IN_BSS |
FIF_ALLMULTI |
FIF_FCSFAIL |
FIF_PLCPFAIL |
FIF_CONTROL |
FIF_OTHER_BSS |
FIF_BCN_PRBRESP_PROMISC;
wl->filter_flags = *fflags;
if (changed && b43legacy_status(dev) >= B43legacy_STAT_INITIALIZED)
b43legacy_adjust_opmode(dev);
spin_unlock_irqrestore(&wl->irq_lock, flags);
}
/* Locking: wl->mutex */
static void b43legacy_wireless_core_stop(struct b43legacy_wldev *dev)
{
struct b43legacy_wl *wl = dev->wl;
unsigned long flags;
int queue_num;
if (b43legacy_status(dev) < B43legacy_STAT_STARTED)
return;
/* Disable and sync interrupts. We must do this before than
* setting the status to INITIALIZED, as the interrupt handler
* won't care about IRQs then. */
spin_lock_irqsave(&wl->irq_lock, flags);
b43legacy_write32(dev, B43legacy_MMIO_GEN_IRQ_MASK, 0);
b43legacy_read32(dev, B43legacy_MMIO_GEN_IRQ_MASK); /* flush */
spin_unlock_irqrestore(&wl->irq_lock, flags);
b43legacy_synchronize_irq(dev);
b43legacy_set_status(dev, B43legacy_STAT_INITIALIZED);
mutex_unlock(&wl->mutex);
/* Must unlock as it would otherwise deadlock. No races here.
* Cancel the possibly running self-rearming periodic work. */
cancel_delayed_work_sync(&dev->periodic_work);
cancel_work_sync(&wl->tx_work);
mutex_lock(&wl->mutex);
/* Drain all TX queues. */
for (queue_num = 0; queue_num < B43legacy_QOS_QUEUE_NUM; queue_num++) {
while (skb_queue_len(&wl->tx_queue[queue_num]))
dev_kfree_skb(skb_dequeue(&wl->tx_queue[queue_num]));
}
b43legacy_mac_suspend(dev);
free_irq(dev->dev->irq, dev);
b43legacydbg(wl, "Wireless interface stopped\n");
}
/* Locking: wl->mutex */
static int b43legacy_wireless_core_start(struct b43legacy_wldev *dev)
{
int err;
B43legacy_WARN_ON(b43legacy_status(dev) != B43legacy_STAT_INITIALIZED);
drain_txstatus_queue(dev);
err = request_irq(dev->dev->irq, b43legacy_interrupt_handler,
IRQF_SHARED, KBUILD_MODNAME, dev);
if (err) {
b43legacyerr(dev->wl, "Cannot request IRQ-%d\n",
dev->dev->irq);
goto out;
}
/* We are ready to run. */
ieee80211_wake_queues(dev->wl->hw);
b43legacy_set_status(dev, B43legacy_STAT_STARTED);
/* Start data flow (TX/RX) */
b43legacy_mac_enable(dev);
b43legacy_write32(dev, B43legacy_MMIO_GEN_IRQ_MASK, dev->irq_mask);
/* Start maintenance work */
b43legacy_periodic_tasks_setup(dev);
b43legacydbg(dev->wl, "Wireless interface started\n");
out:
return err;
}
/* Get PHY and RADIO versioning numbers */
static int b43legacy_phy_versioning(struct b43legacy_wldev *dev)
{
struct b43legacy_phy *phy = &dev->phy;
u32 tmp;
u8 analog_type;
u8 phy_type;
u8 phy_rev;
u16 radio_manuf;
u16 radio_ver;
u16 radio_rev;
int unsupported = 0;
/* Get PHY versioning */
tmp = b43legacy_read16(dev, B43legacy_MMIO_PHY_VER);
analog_type = (tmp & B43legacy_PHYVER_ANALOG)
>> B43legacy_PHYVER_ANALOG_SHIFT;
phy_type = (tmp & B43legacy_PHYVER_TYPE) >> B43legacy_PHYVER_TYPE_SHIFT;
phy_rev = (tmp & B43legacy_PHYVER_VERSION);
switch (phy_type) {
case B43legacy_PHYTYPE_B:
if (phy_rev != 2 && phy_rev != 4
&& phy_rev != 6 && phy_rev != 7)
unsupported = 1;
break;
case B43legacy_PHYTYPE_G:
if (phy_rev > 8)
unsupported = 1;
break;
default:
unsupported = 1;
}
if (unsupported) {
b43legacyerr(dev->wl, "FOUND UNSUPPORTED PHY "
"(Analog %u, Type %u, Revision %u)\n",
analog_type, phy_type, phy_rev);
return -EOPNOTSUPP;
}
b43legacydbg(dev->wl, "Found PHY: Analog %u, Type %u, Revision %u\n",
analog_type, phy_type, phy_rev);
/* Get RADIO versioning */
if (dev->dev->bus->chip_id == 0x4317) {
if (dev->dev->bus->chip_rev == 0)
tmp = 0x3205017F;
else if (dev->dev->bus->chip_rev == 1)
tmp = 0x4205017F;
else
tmp = 0x5205017F;
} else {
b43legacy_write16(dev, B43legacy_MMIO_RADIO_CONTROL,
B43legacy_RADIOCTL_ID);
tmp = b43legacy_read16(dev, B43legacy_MMIO_RADIO_DATA_HIGH);
tmp <<= 16;
b43legacy_write16(dev, B43legacy_MMIO_RADIO_CONTROL,
B43legacy_RADIOCTL_ID);
tmp |= b43legacy_read16(dev, B43legacy_MMIO_RADIO_DATA_LOW);
}
radio_manuf = (tmp & 0x00000FFF);
radio_ver = (tmp & 0x0FFFF000) >> 12;
radio_rev = (tmp & 0xF0000000) >> 28;
switch (phy_type) {
case B43legacy_PHYTYPE_B:
if ((radio_ver & 0xFFF0) != 0x2050)
unsupported = 1;
break;
case B43legacy_PHYTYPE_G:
if (radio_ver != 0x2050)
unsupported = 1;
break;
default:
B43legacy_BUG_ON(1);
}
if (unsupported) {
b43legacyerr(dev->wl, "FOUND UNSUPPORTED RADIO "
"(Manuf 0x%X, Version 0x%X, Revision %u)\n",
radio_manuf, radio_ver, radio_rev);
return -EOPNOTSUPP;
}
b43legacydbg(dev->wl, "Found Radio: Manuf 0x%X, Version 0x%X,"
" Revision %u\n", radio_manuf, radio_ver, radio_rev);
phy->radio_manuf = radio_manuf;
phy->radio_ver = radio_ver;
phy->radio_rev = radio_rev;
phy->analog = analog_type;
phy->type = phy_type;
phy->rev = phy_rev;
return 0;
}
static void setup_struct_phy_for_init(struct b43legacy_wldev *dev,
struct b43legacy_phy *phy)
{
struct b43legacy_lopair *lo;
int i;
memset(phy->minlowsig, 0xFF, sizeof(phy->minlowsig));
memset(phy->minlowsigpos, 0, sizeof(phy->minlowsigpos));
/* Assume the radio is enabled. If it's not enabled, the state will
* immediately get fixed on the first periodic work run. */
dev->radio_hw_enable = true;
phy->savedpctlreg = 0xFFFF;
phy->aci_enable = false;
phy->aci_wlan_automatic = false;
phy->aci_hw_rssi = false;
lo = phy->_lo_pairs;
if (lo)
memset(lo, 0, sizeof(struct b43legacy_lopair) *
B43legacy_LO_COUNT);
phy->max_lb_gain = 0;
phy->trsw_rx_gain = 0;
/* Set default attenuation values. */
phy->bbatt = b43legacy_default_baseband_attenuation(dev);
phy->rfatt = b43legacy_default_radio_attenuation(dev);
phy->txctl1 = b43legacy_default_txctl1(dev);
phy->txpwr_offset = 0;
/* NRSSI */
phy->nrssislope = 0;
for (i = 0; i < ARRAY_SIZE(phy->nrssi); i++)
phy->nrssi[i] = -1000;
for (i = 0; i < ARRAY_SIZE(phy->nrssi_lt); i++)
phy->nrssi_lt[i] = i;
phy->lofcal = 0xFFFF;
phy->initval = 0xFFFF;
phy->interfmode = B43legacy_INTERFMODE_NONE;
phy->channel = 0xFF;
}
static void setup_struct_wldev_for_init(struct b43legacy_wldev *dev)
{
/* Flags */
dev->dfq_valid = false;
/* Stats */
memset(&dev->stats, 0, sizeof(dev->stats));
setup_struct_phy_for_init(dev, &dev->phy);
/* IRQ related flags */
dev->irq_reason = 0;
memset(dev->dma_reason, 0, sizeof(dev->dma_reason));
dev->irq_mask = B43legacy_IRQ_MASKTEMPLATE;
dev->mac_suspended = 1;
/* Noise calculation context */
memset(&dev->noisecalc, 0, sizeof(dev->noisecalc));
}
static void b43legacy_set_synth_pu_delay(struct b43legacy_wldev *dev,
bool idle) {
u16 pu_delay = 1050;
if (b43legacy_is_mode(dev->wl, NL80211_IFTYPE_ADHOC) || idle)
pu_delay = 500;
if ((dev->phy.radio_ver == 0x2050) && (dev->phy.radio_rev == 8))
pu_delay = max(pu_delay, (u16)2400);
b43legacy_shm_write16(dev, B43legacy_SHM_SHARED,
B43legacy_SHM_SH_SPUWKUP, pu_delay);
}
/* Set the TSF CFP pre-TargetBeaconTransmissionTime. */
static void b43legacy_set_pretbtt(struct b43legacy_wldev *dev)
{
u16 pretbtt;
/* The time value is in microseconds. */
if (b43legacy_is_mode(dev->wl, NL80211_IFTYPE_ADHOC))
pretbtt = 2;
else
pretbtt = 250;
b43legacy_shm_write16(dev, B43legacy_SHM_SHARED,
B43legacy_SHM_SH_PRETBTT, pretbtt);
b43legacy_write16(dev, B43legacy_MMIO_TSF_CFP_PRETBTT, pretbtt);
}
/* Shutdown a wireless core */
/* Locking: wl->mutex */
static void b43legacy_wireless_core_exit(struct b43legacy_wldev *dev)
{
struct b43legacy_phy *phy = &dev->phy;
u32 macctl;
B43legacy_WARN_ON(b43legacy_status(dev) > B43legacy_STAT_INITIALIZED);
if (b43legacy_status(dev) != B43legacy_STAT_INITIALIZED)
return;
b43legacy_set_status(dev, B43legacy_STAT_UNINIT);
/* Stop the microcode PSM. */
macctl = b43legacy_read32(dev, B43legacy_MMIO_MACCTL);
macctl &= ~B43legacy_MACCTL_PSM_RUN;
macctl |= B43legacy_MACCTL_PSM_JMP0;
b43legacy_write32(dev, B43legacy_MMIO_MACCTL, macctl);
b43legacy_leds_exit(dev);
b43legacy_rng_exit(dev->wl);
b43legacy_pio_free(dev);
b43legacy_dma_free(dev);
b43legacy_chip_exit(dev);
b43legacy_radio_turn_off(dev, 1);
b43legacy_switch_analog(dev, 0);
if (phy->dyn_tssi_tbl)
kfree(phy->tssi2dbm);
kfree(phy->lo_control);
phy->lo_control = NULL;
if (dev->wl->current_beacon) {
dev_kfree_skb_any(dev->wl->current_beacon);
dev->wl->current_beacon = NULL;
}
ssb_device_disable(dev->dev, 0);
ssb_bus_may_powerdown(dev->dev->bus);
}
static void prepare_phy_data_for_init(struct b43legacy_wldev *dev)
{
struct b43legacy_phy *phy = &dev->phy;
int i;
/* Set default attenuation values. */
phy->bbatt = b43legacy_default_baseband_attenuation(dev);
phy->rfatt = b43legacy_default_radio_attenuation(dev);
phy->txctl1 = b43legacy_default_txctl1(dev);
phy->txctl2 = 0xFFFF;
phy->txpwr_offset = 0;
/* NRSSI */
phy->nrssislope = 0;
for (i = 0; i < ARRAY_SIZE(phy->nrssi); i++)
phy->nrssi[i] = -1000;
for (i = 0; i < ARRAY_SIZE(phy->nrssi_lt); i++)
phy->nrssi_lt[i] = i;
phy->lofcal = 0xFFFF;
phy->initval = 0xFFFF;
phy->aci_enable = false;
phy->aci_wlan_automatic = false;
phy->aci_hw_rssi = false;
phy->antenna_diversity = 0xFFFF;
memset(phy->minlowsig, 0xFF, sizeof(phy->minlowsig));
memset(phy->minlowsigpos, 0, sizeof(phy->minlowsigpos));
/* Flags */
phy->calibrated = 0;
if (phy->_lo_pairs)
memset(phy->_lo_pairs, 0,
sizeof(struct b43legacy_lopair) * B43legacy_LO_COUNT);
memset(phy->loopback_gain, 0, sizeof(phy->loopback_gain));
}
/* Initialize a wireless core */
static int b43legacy_wireless_core_init(struct b43legacy_wldev *dev)
{
struct b43legacy_wl *wl = dev->wl;
struct ssb_bus *bus = dev->dev->bus;
struct b43legacy_phy *phy = &dev->phy;
struct ssb_sprom *sprom = &dev->dev->bus->sprom;
int err;
u32 hf;
u32 tmp;
B43legacy_WARN_ON(b43legacy_status(dev) != B43legacy_STAT_UNINIT);
err = ssb_bus_powerup(bus, 0);
if (err)
goto out;
if (!ssb_device_is_enabled(dev->dev)) {
tmp = phy->gmode ? B43legacy_TMSLOW_GMODE : 0;
b43legacy_wireless_core_reset(dev, tmp);
}
if ((phy->type == B43legacy_PHYTYPE_B) ||
(phy->type == B43legacy_PHYTYPE_G)) {
phy->_lo_pairs = kzalloc(sizeof(struct b43legacy_lopair)
* B43legacy_LO_COUNT,
GFP_KERNEL);
if (!phy->_lo_pairs)
return -ENOMEM;
}
setup_struct_wldev_for_init(dev);
err = b43legacy_phy_init_tssi2dbm_table(dev);
if (err)
goto err_kfree_lo_control;
/* Enable IRQ routing to this device. */
ssb_pcicore_dev_irqvecs_enable(&bus->pcicore, dev->dev);
prepare_phy_data_for_init(dev);
b43legacy_phy_calibrate(dev);
err = b43legacy_chip_init(dev);
if (err)
goto err_kfree_tssitbl;
b43legacy_shm_write16(dev, B43legacy_SHM_SHARED,
B43legacy_SHM_SH_WLCOREREV,
dev->dev->id.revision);
hf = b43legacy_hf_read(dev);
if (phy->type == B43legacy_PHYTYPE_G) {
hf |= B43legacy_HF_SYMW;
if (phy->rev == 1)
hf |= B43legacy_HF_GDCW;
if (sprom->boardflags_lo & B43legacy_BFL_PACTRL)
hf |= B43legacy_HF_OFDMPABOOST;
} else if (phy->type == B43legacy_PHYTYPE_B) {
hf |= B43legacy_HF_SYMW;
if (phy->rev >= 2 && phy->radio_ver == 0x2050)
hf &= ~B43legacy_HF_GDCW;
}
b43legacy_hf_write(dev, hf);
b43legacy_set_retry_limits(dev,
B43legacy_DEFAULT_SHORT_RETRY_LIMIT,
B43legacy_DEFAULT_LONG_RETRY_LIMIT);
b43legacy_shm_write16(dev, B43legacy_SHM_SHARED,
0x0044, 3);
b43legacy_shm_write16(dev, B43legacy_SHM_SHARED,
0x0046, 2);
/* Disable sending probe responses from firmware.
* Setting the MaxTime to one usec will always trigger
* a timeout, so we never send any probe resp.
* A timeout of zero is infinite. */
b43legacy_shm_write16(dev, B43legacy_SHM_SHARED,
B43legacy_SHM_SH_PRMAXTIME, 1);
b43legacy_rate_memory_init(dev);
/* Minimum Contention Window */
if (phy->type == B43legacy_PHYTYPE_B)
b43legacy_shm_write16(dev, B43legacy_SHM_WIRELESS,
0x0003, 31);
else
b43legacy_shm_write16(dev, B43legacy_SHM_WIRELESS,
0x0003, 15);
/* Maximum Contention Window */
b43legacy_shm_write16(dev, B43legacy_SHM_WIRELESS,
0x0004, 1023);
do {
if (b43legacy_using_pio(dev))
err = b43legacy_pio_init(dev);
else {
err = b43legacy_dma_init(dev);
if (!err)
b43legacy_qos_init(dev);
}
} while (err == -EAGAIN);
if (err)
goto err_chip_exit;
b43legacy_set_synth_pu_delay(dev, 1);
ssb_bus_powerup(bus, 1); /* Enable dynamic PCTL */
b43legacy_upload_card_macaddress(dev);
b43legacy_security_init(dev);
b43legacy_rng_init(wl);
ieee80211_wake_queues(dev->wl->hw);
b43legacy_set_status(dev, B43legacy_STAT_INITIALIZED);
b43legacy_leds_init(dev);
out:
return err;
err_chip_exit:
b43legacy_chip_exit(dev);
err_kfree_tssitbl:
if (phy->dyn_tssi_tbl)
kfree(phy->tssi2dbm);
err_kfree_lo_control:
kfree(phy->lo_control);
phy->lo_control = NULL;
ssb_bus_may_powerdown(bus);
B43legacy_WARN_ON(b43legacy_status(dev) != B43legacy_STAT_UNINIT);
return err;
}
static int b43legacy_op_add_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw);
struct b43legacy_wldev *dev;
unsigned long flags;
int err = -EOPNOTSUPP;
/* TODO: allow WDS/AP devices to coexist */
if (vif->type != NL80211_IFTYPE_AP &&
vif->type != NL80211_IFTYPE_STATION &&
vif->type != NL80211_IFTYPE_WDS &&
vif->type != NL80211_IFTYPE_ADHOC)
return -EOPNOTSUPP;
mutex_lock(&wl->mutex);
if (wl->operating)
goto out_mutex_unlock;
b43legacydbg(wl, "Adding Interface type %d\n", vif->type);
dev = wl->current_dev;
wl->operating = true;
wl->vif = vif;
wl->if_type = vif->type;
memcpy(wl->mac_addr, vif->addr, ETH_ALEN);
spin_lock_irqsave(&wl->irq_lock, flags);
b43legacy_adjust_opmode(dev);
b43legacy_set_pretbtt(dev);
b43legacy_set_synth_pu_delay(dev, 0);
b43legacy_upload_card_macaddress(dev);
spin_unlock_irqrestore(&wl->irq_lock, flags);
err = 0;
out_mutex_unlock:
mutex_unlock(&wl->mutex);
return err;
}
static void b43legacy_op_remove_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw);
struct b43legacy_wldev *dev = wl->current_dev;
unsigned long flags;
b43legacydbg(wl, "Removing Interface type %d\n", vif->type);
mutex_lock(&wl->mutex);
B43legacy_WARN_ON(!wl->operating);
B43legacy_WARN_ON(wl->vif != vif);
wl->vif = NULL;
wl->operating = false;
spin_lock_irqsave(&wl->irq_lock, flags);
b43legacy_adjust_opmode(dev);
memset(wl->mac_addr, 0, ETH_ALEN);
b43legacy_upload_card_macaddress(dev);
spin_unlock_irqrestore(&wl->irq_lock, flags);
mutex_unlock(&wl->mutex);
}
static int b43legacy_op_start(struct ieee80211_hw *hw)
{
struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw);
struct b43legacy_wldev *dev = wl->current_dev;
int did_init = 0;
int err = 0;
/* Kill all old instance specific information to make sure
* the card won't use it in the short timeframe between start
* and mac80211 reconfiguring it. */
memset(wl->bssid, 0, ETH_ALEN);
memset(wl->mac_addr, 0, ETH_ALEN);
wl->filter_flags = 0;
wl->beacon0_uploaded = false;
wl->beacon1_uploaded = false;
wl->beacon_templates_virgin = true;
wl->radio_enabled = true;
mutex_lock(&wl->mutex);
if (b43legacy_status(dev) < B43legacy_STAT_INITIALIZED) {
err = b43legacy_wireless_core_init(dev);
if (err)
goto out_mutex_unlock;
did_init = 1;
}
if (b43legacy_status(dev) < B43legacy_STAT_STARTED) {
err = b43legacy_wireless_core_start(dev);
if (err) {
if (did_init)
b43legacy_wireless_core_exit(dev);
goto out_mutex_unlock;
}
}
wiphy_rfkill_start_polling(hw->wiphy);
out_mutex_unlock:
mutex_unlock(&wl->mutex);
return err;
}
static void b43legacy_op_stop(struct ieee80211_hw *hw)
{
struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw);
struct b43legacy_wldev *dev = wl->current_dev;
cancel_work_sync(&(wl->beacon_update_trigger));
mutex_lock(&wl->mutex);
if (b43legacy_status(dev) >= B43legacy_STAT_STARTED)
b43legacy_wireless_core_stop(dev);
b43legacy_wireless_core_exit(dev);
wl->radio_enabled = false;
mutex_unlock(&wl->mutex);
}
static int b43legacy_op_beacon_set_tim(struct ieee80211_hw *hw,
struct ieee80211_sta *sta, bool set)
{
struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw);
unsigned long flags;
spin_lock_irqsave(&wl->irq_lock, flags);
b43legacy_update_templates(wl);
spin_unlock_irqrestore(&wl->irq_lock, flags);
return 0;
}
static int b43legacy_op_get_survey(struct ieee80211_hw *hw, int idx,
struct survey_info *survey)
{
struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw);
struct b43legacy_wldev *dev = wl->current_dev;
struct ieee80211_conf *conf = &hw->conf;
if (idx != 0)
return -ENOENT;
survey->channel = conf->channel;
survey->filled = SURVEY_INFO_NOISE_DBM;
survey->noise = dev->stats.link_noise;
return 0;
}
static const struct ieee80211_ops b43legacy_hw_ops = {
.tx = b43legacy_op_tx,
.conf_tx = b43legacy_op_conf_tx,
.add_interface = b43legacy_op_add_interface,
.remove_interface = b43legacy_op_remove_interface,
.config = b43legacy_op_dev_config,
.bss_info_changed = b43legacy_op_bss_info_changed,
.configure_filter = b43legacy_op_configure_filter,
.get_stats = b43legacy_op_get_stats,
.start = b43legacy_op_start,
.stop = b43legacy_op_stop,
.set_tim = b43legacy_op_beacon_set_tim,
.get_survey = b43legacy_op_get_survey,
.rfkill_poll = b43legacy_rfkill_poll,
};
/* Hard-reset the chip. Do not call this directly.
* Use b43legacy_controller_restart()
*/
static void b43legacy_chip_reset(struct work_struct *work)
{
struct b43legacy_wldev *dev =
container_of(work, struct b43legacy_wldev, restart_work);
struct b43legacy_wl *wl = dev->wl;
int err = 0;
int prev_status;
mutex_lock(&wl->mutex);
prev_status = b43legacy_status(dev);
/* Bring the device down... */
if (prev_status >= B43legacy_STAT_STARTED)
b43legacy_wireless_core_stop(dev);
if (prev_status >= B43legacy_STAT_INITIALIZED)
b43legacy_wireless_core_exit(dev);
/* ...and up again. */
if (prev_status >= B43legacy_STAT_INITIALIZED) {
err = b43legacy_wireless_core_init(dev);
if (err)
goto out;
}
if (prev_status >= B43legacy_STAT_STARTED) {
err = b43legacy_wireless_core_start(dev);
if (err) {
b43legacy_wireless_core_exit(dev);
goto out;
}
}
out:
if (err)
wl->current_dev = NULL; /* Failed to init the dev. */
mutex_unlock(&wl->mutex);
if (err)
b43legacyerr(wl, "Controller restart FAILED\n");
else
b43legacyinfo(wl, "Controller restarted\n");
}
static int b43legacy_setup_modes(struct b43legacy_wldev *dev,
int have_bphy,
int have_gphy)
{
struct ieee80211_hw *hw = dev->wl->hw;
struct b43legacy_phy *phy = &dev->phy;
phy->possible_phymodes = 0;
if (have_bphy) {
hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
&b43legacy_band_2GHz_BPHY;
phy->possible_phymodes |= B43legacy_PHYMODE_B;
}
if (have_gphy) {
hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
&b43legacy_band_2GHz_GPHY;
phy->possible_phymodes |= B43legacy_PHYMODE_G;
}
return 0;
}
static void b43legacy_wireless_core_detach(struct b43legacy_wldev *dev)
{
/* We release firmware that late to not be required to re-request
* is all the time when we reinit the core. */
b43legacy_release_firmware(dev);
}
static int b43legacy_wireless_core_attach(struct b43legacy_wldev *dev)
{
struct b43legacy_wl *wl = dev->wl;
struct ssb_bus *bus = dev->dev->bus;
struct pci_dev *pdev = (bus->bustype == SSB_BUSTYPE_PCI) ? bus->host_pci : NULL;
int err;
int have_bphy = 0;
int have_gphy = 0;
u32 tmp;
/* Do NOT do any device initialization here.
* Do it in wireless_core_init() instead.
* This function is for gathering basic information about the HW, only.
* Also some structs may be set up here. But most likely you want to
* have that in core_init(), too.
*/
err = ssb_bus_powerup(bus, 0);
if (err) {
b43legacyerr(wl, "Bus powerup failed\n");
goto out;
}
/* Get the PHY type. */
if (dev->dev->id.revision >= 5) {
u32 tmshigh;
tmshigh = ssb_read32(dev->dev, SSB_TMSHIGH);
have_gphy = !!(tmshigh & B43legacy_TMSHIGH_GPHY);
if (!have_gphy)
have_bphy = 1;
} else if (dev->dev->id.revision == 4)
have_gphy = 1;
else
have_bphy = 1;
dev->phy.gmode = (have_gphy || have_bphy);
dev->phy.radio_on = true;
tmp = dev->phy.gmode ? B43legacy_TMSLOW_GMODE : 0;
b43legacy_wireless_core_reset(dev, tmp);
err = b43legacy_phy_versioning(dev);
if (err)
goto err_powerdown;
/* Check if this device supports multiband. */
if (!pdev ||
(pdev->device != 0x4312 &&
pdev->device != 0x4319 &&
pdev->device != 0x4324)) {
/* No multiband support. */
have_bphy = 0;
have_gphy = 0;
switch (dev->phy.type) {
case B43legacy_PHYTYPE_B:
have_bphy = 1;
break;
case B43legacy_PHYTYPE_G:
have_gphy = 1;
break;
default:
B43legacy_BUG_ON(1);
}
}
dev->phy.gmode = (have_gphy || have_bphy);
tmp = dev->phy.gmode ? B43legacy_TMSLOW_GMODE : 0;
b43legacy_wireless_core_reset(dev, tmp);
err = b43legacy_validate_chipaccess(dev);
if (err)
goto err_powerdown;
err = b43legacy_setup_modes(dev, have_bphy, have_gphy);
if (err)
goto err_powerdown;
/* Now set some default "current_dev" */
if (!wl->current_dev)
wl->current_dev = dev;
INIT_WORK(&dev->restart_work, b43legacy_chip_reset);
b43legacy_radio_turn_off(dev, 1);
b43legacy_switch_analog(dev, 0);
ssb_device_disable(dev->dev, 0);
ssb_bus_may_powerdown(bus);
out:
return err;
err_powerdown:
ssb_bus_may_powerdown(bus);
return err;
}
static void b43legacy_one_core_detach(struct ssb_device *dev)
{
struct b43legacy_wldev *wldev;
struct b43legacy_wl *wl;
/* Do not cancel ieee80211-workqueue based work here.
* See comment in b43legacy_remove(). */
wldev = ssb_get_drvdata(dev);
wl = wldev->wl;
b43legacy_debugfs_remove_device(wldev);
b43legacy_wireless_core_detach(wldev);
list_del(&wldev->list);
wl->nr_devs--;
ssb_set_drvdata(dev, NULL);
kfree(wldev);
}
static int b43legacy_one_core_attach(struct ssb_device *dev,
struct b43legacy_wl *wl)
{
struct b43legacy_wldev *wldev;
int err = -ENOMEM;
wldev = kzalloc(sizeof(*wldev), GFP_KERNEL);
if (!wldev)
goto out;
wldev->dev = dev;
wldev->wl = wl;
b43legacy_set_status(wldev, B43legacy_STAT_UNINIT);
wldev->bad_frames_preempt = modparam_bad_frames_preempt;
tasklet_init(&wldev->isr_tasklet,
(void (*)(unsigned long))b43legacy_interrupt_tasklet,
(unsigned long)wldev);
if (modparam_pio)
wldev->__using_pio = true;
INIT_LIST_HEAD(&wldev->list);
err = b43legacy_wireless_core_attach(wldev);
if (err)
goto err_kfree_wldev;
list_add(&wldev->list, &wl->devlist);
wl->nr_devs++;
ssb_set_drvdata(dev, wldev);
b43legacy_debugfs_add_device(wldev);
out:
return err;
err_kfree_wldev:
kfree(wldev);
return err;
}
static void b43legacy_sprom_fixup(struct ssb_bus *bus)
{
/* boardflags workarounds */
if (bus->boardinfo.vendor == PCI_VENDOR_ID_APPLE &&
bus->boardinfo.type == 0x4E &&
bus->boardinfo.rev > 0x40)
bus->sprom.boardflags_lo |= B43legacy_BFL_PACTRL;
}
static void b43legacy_wireless_exit(struct ssb_device *dev,
struct b43legacy_wl *wl)
{
struct ieee80211_hw *hw = wl->hw;
ssb_set_devtypedata(dev, NULL);
ieee80211_free_hw(hw);
}
static int b43legacy_wireless_init(struct ssb_device *dev)
{
struct ssb_sprom *sprom = &dev->bus->sprom;
struct ieee80211_hw *hw;
struct b43legacy_wl *wl;
int err = -ENOMEM;
int queue_num;
b43legacy_sprom_fixup(dev->bus);
hw = ieee80211_alloc_hw(sizeof(*wl), &b43legacy_hw_ops);
if (!hw) {
b43legacyerr(NULL, "Could not allocate ieee80211 device\n");
goto out;
}
/* fill hw info */
hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
IEEE80211_HW_SIGNAL_DBM;
hw->wiphy->interface_modes =
BIT(NL80211_IFTYPE_AP) |
BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_WDS) |
BIT(NL80211_IFTYPE_ADHOC);
hw->queues = 1; /* FIXME: hardware has more queues */
hw->max_rates = 2;
SET_IEEE80211_DEV(hw, dev->dev);
if (is_valid_ether_addr(sprom->et1mac))
SET_IEEE80211_PERM_ADDR(hw, sprom->et1mac);
else
SET_IEEE80211_PERM_ADDR(hw, sprom->il0mac);
/* Get and initialize struct b43legacy_wl */
wl = hw_to_b43legacy_wl(hw);
memset(wl, 0, sizeof(*wl));
wl->hw = hw;
spin_lock_init(&wl->irq_lock);
spin_lock_init(&wl->leds_lock);
mutex_init(&wl->mutex);
INIT_LIST_HEAD(&wl->devlist);
INIT_WORK(&wl->beacon_update_trigger, b43legacy_beacon_update_trigger_work);
INIT_WORK(&wl->tx_work, b43legacy_tx_work);
/* Initialize queues and flags. */
for (queue_num = 0; queue_num < B43legacy_QOS_QUEUE_NUM; queue_num++) {
skb_queue_head_init(&wl->tx_queue[queue_num]);
wl->tx_queue_stopped[queue_num] = 0;
}
ssb_set_devtypedata(dev, wl);
b43legacyinfo(wl, "Broadcom %04X WLAN found (core revision %u)\n",
dev->bus->chip_id, dev->id.revision);
err = 0;
out:
return err;
}
static int b43legacy_probe(struct ssb_device *dev,
const struct ssb_device_id *id)
{
struct b43legacy_wl *wl;
int err;
int first = 0;
wl = ssb_get_devtypedata(dev);
if (!wl) {
/* Probing the first core - setup common struct b43legacy_wl */
first = 1;
err = b43legacy_wireless_init(dev);
if (err)
goto out;
wl = ssb_get_devtypedata(dev);
B43legacy_WARN_ON(!wl);
}
err = b43legacy_one_core_attach(dev, wl);
if (err)
goto err_wireless_exit;
/* setup and start work to load firmware */
INIT_WORK(&wl->firmware_load, b43legacy_request_firmware);
schedule_work(&wl->firmware_load);
out:
return err;
err_wireless_exit:
if (first)
b43legacy_wireless_exit(dev, wl);
return err;
}
static void b43legacy_remove(struct ssb_device *dev)
{
struct b43legacy_wl *wl = ssb_get_devtypedata(dev);
struct b43legacy_wldev *wldev = ssb_get_drvdata(dev);
/* We must cancel any work here before unregistering from ieee80211,
* as the ieee80211 unreg will destroy the workqueue. */
cancel_work_sync(&wldev->restart_work);
cancel_work_sync(&wl->firmware_load);
B43legacy_WARN_ON(!wl);
if (wl->current_dev == wldev)
ieee80211_unregister_hw(wl->hw);
b43legacy_one_core_detach(dev);
if (list_empty(&wl->devlist))
/* Last core on the chip unregistered.
* We can destroy common struct b43legacy_wl.
*/
b43legacy_wireless_exit(dev, wl);
}
/* Perform a hardware reset. This can be called from any context. */
void b43legacy_controller_restart(struct b43legacy_wldev *dev,
const char *reason)
{
/* Must avoid requeueing, if we are in shutdown. */
if (b43legacy_status(dev) < B43legacy_STAT_INITIALIZED)
return;
b43legacyinfo(dev->wl, "Controller RESET (%s) ...\n", reason);
ieee80211_queue_work(dev->wl->hw, &dev->restart_work);
}
#ifdef CONFIG_PM
static int b43legacy_suspend(struct ssb_device *dev, pm_message_t state)
{
struct b43legacy_wldev *wldev = ssb_get_drvdata(dev);
struct b43legacy_wl *wl = wldev->wl;
b43legacydbg(wl, "Suspending...\n");
mutex_lock(&wl->mutex);
wldev->suspend_init_status = b43legacy_status(wldev);
if (wldev->suspend_init_status >= B43legacy_STAT_STARTED)
b43legacy_wireless_core_stop(wldev);
if (wldev->suspend_init_status >= B43legacy_STAT_INITIALIZED)
b43legacy_wireless_core_exit(wldev);
mutex_unlock(&wl->mutex);
b43legacydbg(wl, "Device suspended.\n");
return 0;
}
static int b43legacy_resume(struct ssb_device *dev)
{
struct b43legacy_wldev *wldev = ssb_get_drvdata(dev);
struct b43legacy_wl *wl = wldev->wl;
int err = 0;
b43legacydbg(wl, "Resuming...\n");
mutex_lock(&wl->mutex);
if (wldev->suspend_init_status >= B43legacy_STAT_INITIALIZED) {
err = b43legacy_wireless_core_init(wldev);
if (err) {
b43legacyerr(wl, "Resume failed at core init\n");
goto out;
}
}
if (wldev->suspend_init_status >= B43legacy_STAT_STARTED) {
err = b43legacy_wireless_core_start(wldev);
if (err) {
b43legacy_wireless_core_exit(wldev);
b43legacyerr(wl, "Resume failed at core start\n");
goto out;
}
}
b43legacydbg(wl, "Device resumed.\n");
out:
mutex_unlock(&wl->mutex);
return err;
}
#else /* CONFIG_PM */
# define b43legacy_suspend NULL
# define b43legacy_resume NULL
#endif /* CONFIG_PM */
static struct ssb_driver b43legacy_ssb_driver = {
.name = KBUILD_MODNAME,
.id_table = b43legacy_ssb_tbl,
.probe = b43legacy_probe,
.remove = b43legacy_remove,
.suspend = b43legacy_suspend,
.resume = b43legacy_resume,
};
static void b43legacy_print_driverinfo(void)
{
const char *feat_pci = "", *feat_leds = "",
*feat_pio = "", *feat_dma = "";
#ifdef CONFIG_B43LEGACY_PCI_AUTOSELECT
feat_pci = "P";
#endif
#ifdef CONFIG_B43LEGACY_LEDS
feat_leds = "L";
#endif
#ifdef CONFIG_B43LEGACY_PIO
feat_pio = "I";
#endif
#ifdef CONFIG_B43LEGACY_DMA
feat_dma = "D";
#endif
printk(KERN_INFO "Broadcom 43xx-legacy driver loaded "
"[ Features: %s%s%s%s ]\n",
feat_pci, feat_leds, feat_pio, feat_dma);
}
static int __init b43legacy_init(void)
{
int err;
b43legacy_debugfs_init();
err = ssb_driver_register(&b43legacy_ssb_driver);
if (err)
goto err_dfs_exit;
b43legacy_print_driverinfo();
return err;
err_dfs_exit:
b43legacy_debugfs_exit();
return err;
}
static void __exit b43legacy_exit(void)
{
ssb_driver_unregister(&b43legacy_ssb_driver);
b43legacy_debugfs_exit();
}
module_init(b43legacy_init)
module_exit(b43legacy_exit)
| gpl-2.0 |
OUDhs/android_kernel_motorola_asanti | arch/arm/mach-sa1100/h3600.c | 2972 | 3395 | /*
* Support for Compaq iPAQ H3600 handheld computer
*
* Copyright (c) 2000,1 Compaq Computer Corporation. (Author: Jamey Hicks)
* Copyright (c) 2009 Dmitry Artamonow <mad_soft@inbox.ru>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/gpio.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/mach/irda.h>
#include <mach/h3xxx.h>
#include "generic.h"
/*
* helper for sa1100fb
*/
static void h3600_lcd_power(int enable)
{
if (gpio_request(H3XXX_EGPIO_LCD_ON, "LCD power")) {
pr_err("%s: can't request H3XXX_EGPIO_LCD_ON\n", __func__);
goto err1;
}
if (gpio_request(H3600_EGPIO_LCD_PCI, "LCD control")) {
pr_err("%s: can't request H3XXX_EGPIO_LCD_PCI\n", __func__);
goto err2;
}
if (gpio_request(H3600_EGPIO_LCD_5V_ON, "LCD 5v")) {
pr_err("%s: can't request H3XXX_EGPIO_LCD_5V_ON\n", __func__);
goto err3;
}
if (gpio_request(H3600_EGPIO_LVDD_ON, "LCD 9v/-6.5v")) {
pr_err("%s: can't request H3600_EGPIO_LVDD_ON\n", __func__);
goto err4;
}
gpio_direction_output(H3XXX_EGPIO_LCD_ON, enable);
gpio_direction_output(H3600_EGPIO_LCD_PCI, enable);
gpio_direction_output(H3600_EGPIO_LCD_5V_ON, enable);
gpio_direction_output(H3600_EGPIO_LVDD_ON, enable);
gpio_free(H3600_EGPIO_LVDD_ON);
err4: gpio_free(H3600_EGPIO_LCD_5V_ON);
err3: gpio_free(H3600_EGPIO_LCD_PCI);
err2: gpio_free(H3XXX_EGPIO_LCD_ON);
err1: return;
}
static void __init h3600_map_io(void)
{
h3xxx_map_io();
sa1100fb_lcd_power = h3600_lcd_power;
}
/*
* This turns the IRDA power on or off on the Compaq H3600
*/
static int h3600_irda_set_power(struct device *dev, unsigned int state)
{
gpio_set_value(H3600_EGPIO_IR_ON, state);
return 0;
}
static void h3600_irda_set_speed(struct device *dev, unsigned int speed)
{
gpio_set_value(H3600_EGPIO_IR_FSEL, !(speed < 4000000));
}
static int h3600_irda_startup(struct device *dev)
{
int err = gpio_request(H3600_EGPIO_IR_ON, "IrDA power");
if (err)
goto err1;
err = gpio_direction_output(H3600_EGPIO_IR_ON, 0);
if (err)
goto err2;
err = gpio_request(H3600_EGPIO_IR_FSEL, "IrDA fsel");
if (err)
goto err2;
err = gpio_direction_output(H3600_EGPIO_IR_FSEL, 0);
if (err)
goto err3;
return 0;
err3: gpio_free(H3600_EGPIO_IR_FSEL);
err2: gpio_free(H3600_EGPIO_IR_ON);
err1: return err;
}
static void h3600_irda_shutdown(struct device *dev)
{
gpio_free(H3600_EGPIO_IR_ON);
gpio_free(H3600_EGPIO_IR_FSEL);
}
static struct irda_platform_data h3600_irda_data = {
.set_power = h3600_irda_set_power,
.set_speed = h3600_irda_set_speed,
.startup = h3600_irda_startup,
.shutdown = h3600_irda_shutdown,
};
static struct gpio_default_state h3600_default_gpio[] = {
{ H3XXX_GPIO_COM_DCD, GPIO_MODE_IN, "COM DCD" },
{ H3XXX_GPIO_COM_CTS, GPIO_MODE_IN, "COM CTS" },
{ H3XXX_GPIO_COM_RTS, GPIO_MODE_OUT0, "COM RTS" },
};
static void __init h3600_mach_init(void)
{
h3xxx_init_gpio(h3600_default_gpio, ARRAY_SIZE(h3600_default_gpio));
h3xxx_mach_init();
sa11x0_register_irda(&h3600_irda_data);
}
MACHINE_START(H3600, "Compaq iPAQ H3600")
.boot_params = 0xc0000100,
.map_io = h3600_map_io,
.init_irq = sa1100_init_irq,
.timer = &sa1100_timer,
.init_machine = h3600_mach_init,
MACHINE_END
| gpl-2.0 |
yinquan529/pandaboard | drivers/acpi/acpica/dsobject.c | 3228 | 24843 | /******************************************************************************
*
* Module Name: dsobject - Dispatcher object management routines
*
*****************************************************************************/
/*
* Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* substantially similar to the "NO WARRANTY" disclaimer below
* ("Disclaimer") and any redistribution must be conditioned upon
* including a substantially similar Disclaimer requirement for further
* binary redistribution.
* 3. Neither the names of the above-listed copyright holders nor the names
* of any contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES.
*/
#include <acpi/acpi.h>
#include "accommon.h"
#include "acparser.h"
#include "amlcode.h"
#include "acdispat.h"
#include "acnamesp.h"
#include "acinterp.h"
#define _COMPONENT ACPI_DISPATCHER
ACPI_MODULE_NAME("dsobject")
/* Local prototypes */
static acpi_status
acpi_ds_build_internal_object(struct acpi_walk_state *walk_state,
union acpi_parse_object *op,
union acpi_operand_object **obj_desc_ptr);
#ifndef ACPI_NO_METHOD_EXECUTION
/*******************************************************************************
*
* FUNCTION: acpi_ds_build_internal_object
*
* PARAMETERS: walk_state - Current walk state
* Op - Parser object to be translated
* obj_desc_ptr - Where the ACPI internal object is returned
*
* RETURN: Status
*
* DESCRIPTION: Translate a parser Op object to the equivalent namespace object
* Simple objects are any objects other than a package object!
*
******************************************************************************/
static acpi_status
acpi_ds_build_internal_object(struct acpi_walk_state *walk_state,
union acpi_parse_object *op,
union acpi_operand_object **obj_desc_ptr)
{
union acpi_operand_object *obj_desc;
acpi_status status;
acpi_object_type type;
ACPI_FUNCTION_TRACE(ds_build_internal_object);
*obj_desc_ptr = NULL;
if (op->common.aml_opcode == AML_INT_NAMEPATH_OP) {
/*
* This is a named object reference. If this name was
* previously looked up in the namespace, it was stored in this op.
* Otherwise, go ahead and look it up now
*/
if (!op->common.node) {
status = acpi_ns_lookup(walk_state->scope_info,
op->common.value.string,
ACPI_TYPE_ANY,
ACPI_IMODE_EXECUTE,
ACPI_NS_SEARCH_PARENT |
ACPI_NS_DONT_OPEN_SCOPE, NULL,
ACPI_CAST_INDIRECT_PTR(struct
acpi_namespace_node,
&(op->
common.
node)));
if (ACPI_FAILURE(status)) {
/* Check if we are resolving a named reference within a package */
if ((status == AE_NOT_FOUND)
&& (acpi_gbl_enable_interpreter_slack)
&&
((op->common.parent->common.aml_opcode ==
AML_PACKAGE_OP)
|| (op->common.parent->common.aml_opcode ==
AML_VAR_PACKAGE_OP))) {
/*
* We didn't find the target and we are populating elements
* of a package - ignore if slack enabled. Some ASL code
* contains dangling invalid references in packages and
* expects that no exception will be issued. Leave the
* element as a null element. It cannot be used, but it
* can be overwritten by subsequent ASL code - this is
* typically the case.
*/
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Ignoring unresolved reference in package [%4.4s]\n",
walk_state->
scope_info->scope.
node->name.ascii));
return_ACPI_STATUS(AE_OK);
} else {
ACPI_ERROR_NAMESPACE(op->common.value.
string, status);
}
return_ACPI_STATUS(status);
}
}
/* Special object resolution for elements of a package */
if ((op->common.parent->common.aml_opcode == AML_PACKAGE_OP) ||
(op->common.parent->common.aml_opcode ==
AML_VAR_PACKAGE_OP)) {
/*
* Attempt to resolve the node to a value before we insert it into
* the package. If this is a reference to a common data type,
* resolve it immediately. According to the ACPI spec, package
* elements can only be "data objects" or method references.
* Attempt to resolve to an Integer, Buffer, String or Package.
* If cannot, return the named reference (for things like Devices,
* Methods, etc.) Buffer Fields and Fields will resolve to simple
* objects (int/buf/str/pkg).
*
* NOTE: References to things like Devices, Methods, Mutexes, etc.
* will remain as named references. This behavior is not described
* in the ACPI spec, but it appears to be an oversight.
*/
obj_desc =
ACPI_CAST_PTR(union acpi_operand_object,
op->common.node);
status =
acpi_ex_resolve_node_to_value(ACPI_CAST_INDIRECT_PTR
(struct
acpi_namespace_node,
&obj_desc),
walk_state);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
/*
* Special handling for Alias objects. We need to setup the type
* and the Op->Common.Node to point to the Alias target. Note,
* Alias has at most one level of indirection internally.
*/
type = op->common.node->type;
if (type == ACPI_TYPE_LOCAL_ALIAS) {
type = obj_desc->common.type;
op->common.node =
ACPI_CAST_PTR(struct acpi_namespace_node,
op->common.node->object);
}
switch (type) {
/*
* For these types, we need the actual node, not the subobject.
* However, the subobject did not get an extra reference count above.
*
* TBD: should ex_resolve_node_to_value be changed to fix this?
*/
case ACPI_TYPE_DEVICE:
case ACPI_TYPE_THERMAL:
acpi_ut_add_reference(op->common.node->object);
/*lint -fallthrough */
/*
* For these types, we need the actual node, not the subobject.
* The subobject got an extra reference count in ex_resolve_node_to_value.
*/
case ACPI_TYPE_MUTEX:
case ACPI_TYPE_METHOD:
case ACPI_TYPE_POWER:
case ACPI_TYPE_PROCESSOR:
case ACPI_TYPE_EVENT:
case ACPI_TYPE_REGION:
/* We will create a reference object for these types below */
break;
default:
/*
* All other types - the node was resolved to an actual
* object, we are done.
*/
goto exit;
}
}
}
/* Create and init a new internal ACPI object */
obj_desc = acpi_ut_create_internal_object((acpi_ps_get_opcode_info
(op->common.aml_opcode))->
object_type);
if (!obj_desc) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
status =
acpi_ds_init_object_from_op(walk_state, op, op->common.aml_opcode,
&obj_desc);
if (ACPI_FAILURE(status)) {
acpi_ut_remove_reference(obj_desc);
return_ACPI_STATUS(status);
}
exit:
*obj_desc_ptr = obj_desc;
return_ACPI_STATUS(status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ds_build_internal_buffer_obj
*
* PARAMETERS: walk_state - Current walk state
* Op - Parser object to be translated
* buffer_length - Length of the buffer
* obj_desc_ptr - Where the ACPI internal object is returned
*
* RETURN: Status
*
* DESCRIPTION: Translate a parser Op package object to the equivalent
* namespace object
*
******************************************************************************/
acpi_status
acpi_ds_build_internal_buffer_obj(struct acpi_walk_state *walk_state,
union acpi_parse_object *op,
u32 buffer_length,
union acpi_operand_object **obj_desc_ptr)
{
union acpi_parse_object *arg;
union acpi_operand_object *obj_desc;
union acpi_parse_object *byte_list;
u32 byte_list_length = 0;
ACPI_FUNCTION_TRACE(ds_build_internal_buffer_obj);
/*
* If we are evaluating a Named buffer object "Name (xxxx, Buffer)".
* The buffer object already exists (from the NS node), otherwise it must
* be created.
*/
obj_desc = *obj_desc_ptr;
if (!obj_desc) {
/* Create a new buffer object */
obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_BUFFER);
*obj_desc_ptr = obj_desc;
if (!obj_desc) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
}
/*
* Second arg is the buffer data (optional) byte_list can be either
* individual bytes or a string initializer. In either case, a
* byte_list appears in the AML.
*/
arg = op->common.value.arg; /* skip first arg */
byte_list = arg->named.next;
if (byte_list) {
if (byte_list->common.aml_opcode != AML_INT_BYTELIST_OP) {
ACPI_ERROR((AE_INFO,
"Expecting bytelist, found AML opcode 0x%X in op %p",
byte_list->common.aml_opcode, byte_list));
acpi_ut_remove_reference(obj_desc);
return (AE_TYPE);
}
byte_list_length = (u32) byte_list->common.value.integer;
}
/*
* The buffer length (number of bytes) will be the larger of:
* 1) The specified buffer length and
* 2) The length of the initializer byte list
*/
obj_desc->buffer.length = buffer_length;
if (byte_list_length > buffer_length) {
obj_desc->buffer.length = byte_list_length;
}
/* Allocate the buffer */
if (obj_desc->buffer.length == 0) {
obj_desc->buffer.pointer = NULL;
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
"Buffer defined with zero length in AML, creating\n"));
} else {
obj_desc->buffer.pointer =
ACPI_ALLOCATE_ZEROED(obj_desc->buffer.length);
if (!obj_desc->buffer.pointer) {
acpi_ut_delete_object_desc(obj_desc);
return_ACPI_STATUS(AE_NO_MEMORY);
}
/* Initialize buffer from the byte_list (if present) */
if (byte_list) {
ACPI_MEMCPY(obj_desc->buffer.pointer,
byte_list->named.data, byte_list_length);
}
}
obj_desc->buffer.flags |= AOPOBJ_DATA_VALID;
op->common.node = ACPI_CAST_PTR(struct acpi_namespace_node, obj_desc);
return_ACPI_STATUS(AE_OK);
}
/*******************************************************************************
*
* FUNCTION: acpi_ds_build_internal_package_obj
*
* PARAMETERS: walk_state - Current walk state
* Op - Parser object to be translated
* element_count - Number of elements in the package - this is
* the num_elements argument to Package()
* obj_desc_ptr - Where the ACPI internal object is returned
*
* RETURN: Status
*
* DESCRIPTION: Translate a parser Op package object to the equivalent
* namespace object
*
* NOTE: The number of elements in the package will be always be the num_elements
* count, regardless of the number of elements in the package list. If
* num_elements is smaller, only that many package list elements are used.
* if num_elements is larger, the Package object is padded out with
* objects of type Uninitialized (as per ACPI spec.)
*
* Even though the ASL compilers do not allow num_elements to be smaller
* than the Package list length (for the fixed length package opcode), some
* BIOS code modifies the AML on the fly to adjust the num_elements, and
* this code compensates for that. This also provides compatibility with
* other AML interpreters.
*
******************************************************************************/
acpi_status
acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state,
union acpi_parse_object *op,
u32 element_count,
union acpi_operand_object **obj_desc_ptr)
{
union acpi_parse_object *arg;
union acpi_parse_object *parent;
union acpi_operand_object *obj_desc = NULL;
acpi_status status = AE_OK;
unsigned i;
u16 index;
u16 reference_count;
ACPI_FUNCTION_TRACE(ds_build_internal_package_obj);
/* Find the parent of a possibly nested package */
parent = op->common.parent;
while ((parent->common.aml_opcode == AML_PACKAGE_OP) ||
(parent->common.aml_opcode == AML_VAR_PACKAGE_OP)) {
parent = parent->common.parent;
}
/*
* If we are evaluating a Named package object "Name (xxxx, Package)",
* the package object already exists, otherwise it must be created.
*/
obj_desc = *obj_desc_ptr;
if (!obj_desc) {
obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_PACKAGE);
*obj_desc_ptr = obj_desc;
if (!obj_desc) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
obj_desc->package.node = parent->common.node;
}
/*
* Allocate the element array (array of pointers to the individual
* objects) based on the num_elements parameter. Add an extra pointer slot
* so that the list is always null terminated.
*/
obj_desc->package.elements = ACPI_ALLOCATE_ZEROED(((acpi_size)
element_count +
1) * sizeof(void *));
if (!obj_desc->package.elements) {
acpi_ut_delete_object_desc(obj_desc);
return_ACPI_STATUS(AE_NO_MEMORY);
}
obj_desc->package.count = element_count;
/*
* Initialize the elements of the package, up to the num_elements count.
* Package is automatically padded with uninitialized (NULL) elements
* if num_elements is greater than the package list length. Likewise,
* Package is truncated if num_elements is less than the list length.
*/
arg = op->common.value.arg;
arg = arg->common.next;
for (i = 0; arg && (i < element_count); i++) {
if (arg->common.aml_opcode == AML_INT_RETURN_VALUE_OP) {
if (arg->common.node->type == ACPI_TYPE_METHOD) {
/*
* A method reference "looks" to the parser to be a method
* invocation, so we special case it here
*/
arg->common.aml_opcode = AML_INT_NAMEPATH_OP;
status =
acpi_ds_build_internal_object(walk_state,
arg,
&obj_desc->
package.
elements[i]);
} else {
/* This package element is already built, just get it */
obj_desc->package.elements[i] =
ACPI_CAST_PTR(union acpi_operand_object,
arg->common.node);
}
} else {
status = acpi_ds_build_internal_object(walk_state, arg,
&obj_desc->
package.
elements[i]);
}
if (*obj_desc_ptr) {
/* Existing package, get existing reference count */
reference_count =
(*obj_desc_ptr)->common.reference_count;
if (reference_count > 1) {
/* Make new element ref count match original ref count */
for (index = 0; index < (reference_count - 1);
index++) {
acpi_ut_add_reference((obj_desc->
package.
elements[i]));
}
}
}
arg = arg->common.next;
}
/* Check for match between num_elements and actual length of package_list */
if (arg) {
/*
* num_elements was exhausted, but there are remaining elements in the
* package_list. Truncate the package to num_elements.
*
* Note: technically, this is an error, from ACPI spec: "It is an error
* for NumElements to be less than the number of elements in the
* PackageList". However, we just print a message and
* no exception is returned. This provides Windows compatibility. Some
* BIOSs will alter the num_elements on the fly, creating this type
* of ill-formed package object.
*/
while (arg) {
/*
* We must delete any package elements that were created earlier
* and are not going to be used because of the package truncation.
*/
if (arg->common.node) {
acpi_ut_remove_reference(ACPI_CAST_PTR
(union
acpi_operand_object,
arg->common.node));
arg->common.node = NULL;
}
/* Find out how many elements there really are */
i++;
arg = arg->common.next;
}
ACPI_INFO((AE_INFO,
"Actual Package length (%u) is larger than NumElements field (%u), truncated\n",
i, element_count));
} else if (i < element_count) {
/*
* Arg list (elements) was exhausted, but we did not reach num_elements count.
* Note: this is not an error, the package is padded out with NULLs.
*/
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Package List length (%u) smaller than NumElements count (%u), padded with null elements\n",
i, element_count));
}
obj_desc->package.flags |= AOPOBJ_DATA_VALID;
op->common.node = ACPI_CAST_PTR(struct acpi_namespace_node, obj_desc);
return_ACPI_STATUS(status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ds_create_node
*
* PARAMETERS: walk_state - Current walk state
* Node - NS Node to be initialized
* Op - Parser object to be translated
*
* RETURN: Status
*
* DESCRIPTION: Create the object to be associated with a namespace node
*
******************************************************************************/
acpi_status
acpi_ds_create_node(struct acpi_walk_state *walk_state,
struct acpi_namespace_node *node,
union acpi_parse_object *op)
{
acpi_status status;
union acpi_operand_object *obj_desc;
ACPI_FUNCTION_TRACE_PTR(ds_create_node, op);
/*
* Because of the execution pass through the non-control-method
* parts of the table, we can arrive here twice. Only init
* the named object node the first time through
*/
if (acpi_ns_get_attached_object(node)) {
return_ACPI_STATUS(AE_OK);
}
if (!op->common.value.arg) {
/* No arguments, there is nothing to do */
return_ACPI_STATUS(AE_OK);
}
/* Build an internal object for the argument(s) */
status = acpi_ds_build_internal_object(walk_state, op->common.value.arg,
&obj_desc);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
/* Re-type the object according to its argument */
node->type = obj_desc->common.type;
/* Attach obj to node */
status = acpi_ns_attach_object(node, obj_desc, node->type);
/* Remove local reference to the object */
acpi_ut_remove_reference(obj_desc);
return_ACPI_STATUS(status);
}
#endif /* ACPI_NO_METHOD_EXECUTION */
/*******************************************************************************
*
* FUNCTION: acpi_ds_init_object_from_op
*
* PARAMETERS: walk_state - Current walk state
* Op - Parser op used to init the internal object
* Opcode - AML opcode associated with the object
* ret_obj_desc - Namespace object to be initialized
*
* RETURN: Status
*
* DESCRIPTION: Initialize a namespace object from a parser Op and its
* associated arguments. The namespace object is a more compact
* representation of the Op and its arguments.
*
******************************************************************************/
acpi_status
acpi_ds_init_object_from_op(struct acpi_walk_state *walk_state,
union acpi_parse_object *op,
u16 opcode,
union acpi_operand_object **ret_obj_desc)
{
const struct acpi_opcode_info *op_info;
union acpi_operand_object *obj_desc;
acpi_status status = AE_OK;
ACPI_FUNCTION_TRACE(ds_init_object_from_op);
obj_desc = *ret_obj_desc;
op_info = acpi_ps_get_opcode_info(opcode);
if (op_info->class == AML_CLASS_UNKNOWN) {
/* Unknown opcode */
return_ACPI_STATUS(AE_TYPE);
}
/* Perform per-object initialization */
switch (obj_desc->common.type) {
case ACPI_TYPE_BUFFER:
/*
* Defer evaluation of Buffer term_arg operand
*/
obj_desc->buffer.node =
ACPI_CAST_PTR(struct acpi_namespace_node,
walk_state->operands[0]);
obj_desc->buffer.aml_start = op->named.data;
obj_desc->buffer.aml_length = op->named.length;
break;
case ACPI_TYPE_PACKAGE:
/*
* Defer evaluation of Package term_arg operand
*/
obj_desc->package.node =
ACPI_CAST_PTR(struct acpi_namespace_node,
walk_state->operands[0]);
obj_desc->package.aml_start = op->named.data;
obj_desc->package.aml_length = op->named.length;
break;
case ACPI_TYPE_INTEGER:
switch (op_info->type) {
case AML_TYPE_CONSTANT:
/*
* Resolve AML Constants here - AND ONLY HERE!
* All constants are integers.
* We mark the integer with a flag that indicates that it started
* life as a constant -- so that stores to constants will perform
* as expected (noop). zero_op is used as a placeholder for optional
* target operands.
*/
obj_desc->common.flags = AOPOBJ_AML_CONSTANT;
switch (opcode) {
case AML_ZERO_OP:
obj_desc->integer.value = 0;
break;
case AML_ONE_OP:
obj_desc->integer.value = 1;
break;
case AML_ONES_OP:
obj_desc->integer.value = ACPI_UINT64_MAX;
/* Truncate value if we are executing from a 32-bit ACPI table */
#ifndef ACPI_NO_METHOD_EXECUTION
acpi_ex_truncate_for32bit_table(obj_desc);
#endif
break;
case AML_REVISION_OP:
obj_desc->integer.value = ACPI_CA_VERSION;
break;
default:
ACPI_ERROR((AE_INFO,
"Unknown constant opcode 0x%X",
opcode));
status = AE_AML_OPERAND_TYPE;
break;
}
break;
case AML_TYPE_LITERAL:
obj_desc->integer.value = op->common.value.integer;
#ifndef ACPI_NO_METHOD_EXECUTION
acpi_ex_truncate_for32bit_table(obj_desc);
#endif
break;
default:
ACPI_ERROR((AE_INFO, "Unknown Integer type 0x%X",
op_info->type));
status = AE_AML_OPERAND_TYPE;
break;
}
break;
case ACPI_TYPE_STRING:
obj_desc->string.pointer = op->common.value.string;
obj_desc->string.length =
(u32) ACPI_STRLEN(op->common.value.string);
/*
* The string is contained in the ACPI table, don't ever try
* to delete it
*/
obj_desc->common.flags |= AOPOBJ_STATIC_POINTER;
break;
case ACPI_TYPE_METHOD:
break;
case ACPI_TYPE_LOCAL_REFERENCE:
switch (op_info->type) {
case AML_TYPE_LOCAL_VARIABLE:
/* Local ID (0-7) is (AML opcode - base AML_LOCAL_OP) */
obj_desc->reference.value =
((u32)opcode) - AML_LOCAL_OP;
obj_desc->reference.class = ACPI_REFCLASS_LOCAL;
#ifndef ACPI_NO_METHOD_EXECUTION
status =
acpi_ds_method_data_get_node(ACPI_REFCLASS_LOCAL,
obj_desc->reference.
value, walk_state,
ACPI_CAST_INDIRECT_PTR
(struct
acpi_namespace_node,
&obj_desc->reference.
object));
#endif
break;
case AML_TYPE_METHOD_ARGUMENT:
/* Arg ID (0-6) is (AML opcode - base AML_ARG_OP) */
obj_desc->reference.value = ((u32)opcode) - AML_ARG_OP;
obj_desc->reference.class = ACPI_REFCLASS_ARG;
#ifndef ACPI_NO_METHOD_EXECUTION
status = acpi_ds_method_data_get_node(ACPI_REFCLASS_ARG,
obj_desc->
reference.value,
walk_state,
ACPI_CAST_INDIRECT_PTR
(struct
acpi_namespace_node,
&obj_desc->
reference.
object));
#endif
break;
default: /* Object name or Debug object */
switch (op->common.aml_opcode) {
case AML_INT_NAMEPATH_OP:
/* Node was saved in Op */
obj_desc->reference.node = op->common.node;
obj_desc->reference.object =
op->common.node->object;
obj_desc->reference.class = ACPI_REFCLASS_NAME;
break;
case AML_DEBUG_OP:
obj_desc->reference.class = ACPI_REFCLASS_DEBUG;
break;
default:
ACPI_ERROR((AE_INFO,
"Unimplemented reference type for AML opcode: 0x%4.4X",
opcode));
return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
}
break;
}
break;
default:
ACPI_ERROR((AE_INFO, "Unimplemented data type: 0x%X",
obj_desc->common.type));
status = AE_AML_OPERAND_TYPE;
break;
}
return_ACPI_STATUS(status);
}
| gpl-2.0 |
hiikezoe/android_kernel_fujitsu_f11d | drivers/acpi/acpica/utdelete.c | 3228 | 18803 | /*******************************************************************************
*
* Module Name: utdelete - object deletion and reference count utilities
*
******************************************************************************/
/*
* Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* substantially similar to the "NO WARRANTY" disclaimer below
* ("Disclaimer") and any redistribution must be conditioned upon
* including a substantially similar Disclaimer requirement for further
* binary redistribution.
* 3. Neither the names of the above-listed copyright holders nor the names
* of any contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES.
*/
#include <acpi/acpi.h>
#include "accommon.h"
#include "acinterp.h"
#include "acnamesp.h"
#include "acevents.h"
#define _COMPONENT ACPI_UTILITIES
ACPI_MODULE_NAME("utdelete")
/* Local prototypes */
static void acpi_ut_delete_internal_obj(union acpi_operand_object *object);
static void
acpi_ut_update_ref_count(union acpi_operand_object *object, u32 action);
/*******************************************************************************
*
* FUNCTION: acpi_ut_delete_internal_obj
*
* PARAMETERS: Object - Object to be deleted
*
* RETURN: None
*
* DESCRIPTION: Low level object deletion, after reference counts have been
* updated (All reference counts, including sub-objects!)
*
******************************************************************************/
static void acpi_ut_delete_internal_obj(union acpi_operand_object *object)
{
void *obj_pointer = NULL;
union acpi_operand_object *handler_desc;
union acpi_operand_object *second_desc;
union acpi_operand_object *next_desc;
union acpi_operand_object **last_obj_ptr;
ACPI_FUNCTION_TRACE_PTR(ut_delete_internal_obj, object);
if (!object) {
return_VOID;
}
/*
* Must delete or free any pointers within the object that are not
* actual ACPI objects (for example, a raw buffer pointer).
*/
switch (object->common.type) {
case ACPI_TYPE_STRING:
ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
"**** String %p, ptr %p\n", object,
object->string.pointer));
/* Free the actual string buffer */
if (!(object->common.flags & AOPOBJ_STATIC_POINTER)) {
/* But only if it is NOT a pointer into an ACPI table */
obj_pointer = object->string.pointer;
}
break;
case ACPI_TYPE_BUFFER:
ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
"**** Buffer %p, ptr %p\n", object,
object->buffer.pointer));
/* Free the actual buffer */
if (!(object->common.flags & AOPOBJ_STATIC_POINTER)) {
/* But only if it is NOT a pointer into an ACPI table */
obj_pointer = object->buffer.pointer;
}
break;
case ACPI_TYPE_PACKAGE:
ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
" **** Package of count %X\n",
object->package.count));
/*
* Elements of the package are not handled here, they are deleted
* separately
*/
/* Free the (variable length) element pointer array */
obj_pointer = object->package.elements;
break;
/*
* These objects have a possible list of notify handlers.
* Device object also may have a GPE block.
*/
case ACPI_TYPE_DEVICE:
if (object->device.gpe_block) {
(void)acpi_ev_delete_gpe_block(object->device.
gpe_block);
}
/*lint -fallthrough */
case ACPI_TYPE_PROCESSOR:
case ACPI_TYPE_THERMAL:
/* Walk the notify handler list for this object */
handler_desc = object->common_notify.handler;
while (handler_desc) {
next_desc = handler_desc->address_space.next;
acpi_ut_remove_reference(handler_desc);
handler_desc = next_desc;
}
break;
case ACPI_TYPE_MUTEX:
ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
"***** Mutex %p, OS Mutex %p\n",
object, object->mutex.os_mutex));
if (object == acpi_gbl_global_lock_mutex) {
/* Global Lock has extra semaphore */
(void)
acpi_os_delete_semaphore
(acpi_gbl_global_lock_semaphore);
acpi_gbl_global_lock_semaphore = NULL;
acpi_os_delete_mutex(object->mutex.os_mutex);
acpi_gbl_global_lock_mutex = NULL;
} else {
acpi_ex_unlink_mutex(object);
acpi_os_delete_mutex(object->mutex.os_mutex);
}
break;
case ACPI_TYPE_EVENT:
ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
"***** Event %p, OS Semaphore %p\n",
object, object->event.os_semaphore));
(void)acpi_os_delete_semaphore(object->event.os_semaphore);
object->event.os_semaphore = NULL;
break;
case ACPI_TYPE_METHOD:
ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
"***** Method %p\n", object));
/* Delete the method mutex if it exists */
if (object->method.mutex) {
acpi_os_delete_mutex(object->method.mutex->mutex.
os_mutex);
acpi_ut_delete_object_desc(object->method.mutex);
object->method.mutex = NULL;
}
break;
case ACPI_TYPE_REGION:
ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
"***** Region %p\n", object));
/* Invalidate the region address/length via the host OS */
acpi_os_invalidate_address(object->region.space_id,
object->region.address,
(acpi_size) object->region.length);
second_desc = acpi_ns_get_secondary_object(object);
if (second_desc) {
/*
* Free the region_context if and only if the handler is one of the
* default handlers -- and therefore, we created the context object
* locally, it was not created by an external caller.
*/
handler_desc = object->region.handler;
if (handler_desc) {
next_desc =
handler_desc->address_space.region_list;
last_obj_ptr =
&handler_desc->address_space.region_list;
/* Remove the region object from the handler's list */
while (next_desc) {
if (next_desc == object) {
*last_obj_ptr =
next_desc->region.next;
break;
}
/* Walk the linked list of handler */
last_obj_ptr = &next_desc->region.next;
next_desc = next_desc->region.next;
}
if (handler_desc->address_space.handler_flags &
ACPI_ADDR_HANDLER_DEFAULT_INSTALLED) {
/* Deactivate region and free region context */
if (handler_desc->address_space.setup) {
(void)handler_desc->
address_space.setup(object,
ACPI_REGION_DEACTIVATE,
handler_desc->
address_space.
context,
&second_desc->
extra.
region_context);
}
}
acpi_ut_remove_reference(handler_desc);
}
/* Now we can free the Extra object */
acpi_ut_delete_object_desc(second_desc);
}
break;
case ACPI_TYPE_BUFFER_FIELD:
ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
"***** Buffer Field %p\n", object));
second_desc = acpi_ns_get_secondary_object(object);
if (second_desc) {
acpi_ut_delete_object_desc(second_desc);
}
break;
case ACPI_TYPE_LOCAL_BANK_FIELD:
ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
"***** Bank Field %p\n", object));
second_desc = acpi_ns_get_secondary_object(object);
if (second_desc) {
acpi_ut_delete_object_desc(second_desc);
}
break;
default:
break;
}
/* Free any allocated memory (pointer within the object) found above */
if (obj_pointer) {
ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
"Deleting Object Subptr %p\n", obj_pointer));
ACPI_FREE(obj_pointer);
}
/* Now the object can be safely deleted */
ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS, "Deleting Object %p [%s]\n",
object, acpi_ut_get_object_type_name(object)));
acpi_ut_delete_object_desc(object);
return_VOID;
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_delete_internal_object_list
*
* PARAMETERS: obj_list - Pointer to the list to be deleted
*
* RETURN: None
*
* DESCRIPTION: This function deletes an internal object list, including both
* simple objects and package objects
*
******************************************************************************/
void acpi_ut_delete_internal_object_list(union acpi_operand_object **obj_list)
{
union acpi_operand_object **internal_obj;
ACPI_FUNCTION_TRACE(ut_delete_internal_object_list);
/* Walk the null-terminated internal list */
for (internal_obj = obj_list; *internal_obj; internal_obj++) {
acpi_ut_remove_reference(*internal_obj);
}
/* Free the combined parameter pointer list and object array */
ACPI_FREE(obj_list);
return_VOID;
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_update_ref_count
*
* PARAMETERS: Object - Object whose ref count is to be updated
* Action - What to do
*
* RETURN: New ref count
*
* DESCRIPTION: Modify the ref count and return it.
*
******************************************************************************/
static void
acpi_ut_update_ref_count(union acpi_operand_object *object, u32 action)
{
u16 count;
u16 new_count;
ACPI_FUNCTION_NAME(ut_update_ref_count);
if (!object) {
return;
}
count = object->common.reference_count;
new_count = count;
/*
* Perform the reference count action (increment, decrement, force delete)
*/
switch (action) {
case REF_INCREMENT:
new_count++;
object->common.reference_count = new_count;
ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
"Obj %p Refs=%X, [Incremented]\n",
object, new_count));
break;
case REF_DECREMENT:
if (count < 1) {
ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
"Obj %p Refs=%X, can't decrement! (Set to 0)\n",
object, new_count));
new_count = 0;
} else {
new_count--;
ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
"Obj %p Refs=%X, [Decremented]\n",
object, new_count));
}
if (object->common.type == ACPI_TYPE_METHOD) {
ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
"Method Obj %p Refs=%X, [Decremented]\n",
object, new_count));
}
object->common.reference_count = new_count;
if (new_count == 0) {
acpi_ut_delete_internal_obj(object);
}
break;
case REF_FORCE_DELETE:
ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
"Obj %p Refs=%X, Force delete! (Set to 0)\n",
object, count));
new_count = 0;
object->common.reference_count = new_count;
acpi_ut_delete_internal_obj(object);
break;
default:
ACPI_ERROR((AE_INFO, "Unknown action (0x%X)", action));
break;
}
/*
* Sanity check the reference count, for debug purposes only.
* (A deleted object will have a huge reference count)
*/
if (count > ACPI_MAX_REFERENCE_COUNT) {
ACPI_WARNING((AE_INFO,
"Large Reference Count (0x%X) in object %p",
count, object));
}
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_update_object_reference
*
* PARAMETERS: Object - Increment ref count for this object
* and all sub-objects
* Action - Either REF_INCREMENT or REF_DECREMENT or
* REF_FORCE_DELETE
*
* RETURN: Status
*
* DESCRIPTION: Increment the object reference count
*
* Object references are incremented when:
* 1) An object is attached to a Node (namespace object)
* 2) An object is copied (all subobjects must be incremented)
*
* Object references are decremented when:
* 1) An object is detached from an Node
*
******************************************************************************/
acpi_status
acpi_ut_update_object_reference(union acpi_operand_object *object, u16 action)
{
acpi_status status = AE_OK;
union acpi_generic_state *state_list = NULL;
union acpi_operand_object *next_object = NULL;
union acpi_generic_state *state;
u32 i;
ACPI_FUNCTION_TRACE_PTR(ut_update_object_reference, object);
while (object) {
/* Make sure that this isn't a namespace handle */
if (ACPI_GET_DESCRIPTOR_TYPE(object) == ACPI_DESC_TYPE_NAMED) {
ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
"Object %p is NS handle\n", object));
return_ACPI_STATUS(AE_OK);
}
/*
* All sub-objects must have their reference count incremented also.
* Different object types have different subobjects.
*/
switch (object->common.type) {
case ACPI_TYPE_DEVICE:
case ACPI_TYPE_PROCESSOR:
case ACPI_TYPE_POWER:
case ACPI_TYPE_THERMAL:
/* Update the notify objects for these types (if present) */
acpi_ut_update_ref_count(object->common_notify.
system_notify, action);
acpi_ut_update_ref_count(object->common_notify.
device_notify, action);
break;
case ACPI_TYPE_PACKAGE:
/*
* We must update all the sub-objects of the package,
* each of whom may have their own sub-objects.
*/
for (i = 0; i < object->package.count; i++) {
/*
* Push each element onto the stack for later processing.
* Note: There can be null elements within the package,
* these are simply ignored
*/
status =
acpi_ut_create_update_state_and_push
(object->package.elements[i], action,
&state_list);
if (ACPI_FAILURE(status)) {
goto error_exit;
}
}
break;
case ACPI_TYPE_BUFFER_FIELD:
next_object = object->buffer_field.buffer_obj;
break;
case ACPI_TYPE_LOCAL_REGION_FIELD:
next_object = object->field.region_obj;
break;
case ACPI_TYPE_LOCAL_BANK_FIELD:
next_object = object->bank_field.bank_obj;
status =
acpi_ut_create_update_state_and_push(object->
bank_field.
region_obj,
action,
&state_list);
if (ACPI_FAILURE(status)) {
goto error_exit;
}
break;
case ACPI_TYPE_LOCAL_INDEX_FIELD:
next_object = object->index_field.index_obj;
status =
acpi_ut_create_update_state_and_push(object->
index_field.
data_obj,
action,
&state_list);
if (ACPI_FAILURE(status)) {
goto error_exit;
}
break;
case ACPI_TYPE_LOCAL_REFERENCE:
/*
* The target of an Index (a package, string, or buffer) or a named
* reference must track changes to the ref count of the index or
* target object.
*/
if ((object->reference.class == ACPI_REFCLASS_INDEX) ||
(object->reference.class == ACPI_REFCLASS_NAME)) {
next_object = object->reference.object;
}
break;
case ACPI_TYPE_REGION:
default:
break; /* No subobjects for all other types */
}
/*
* Now we can update the count in the main object. This can only
* happen after we update the sub-objects in case this causes the
* main object to be deleted.
*/
acpi_ut_update_ref_count(object, action);
object = NULL;
/* Move on to the next object to be updated */
if (next_object) {
object = next_object;
next_object = NULL;
} else if (state_list) {
state = acpi_ut_pop_generic_state(&state_list);
object = state->update.object;
acpi_ut_delete_generic_state(state);
}
}
return_ACPI_STATUS(AE_OK);
error_exit:
ACPI_EXCEPTION((AE_INFO, status,
"Could not update object reference count"));
/* Free any stacked Update State objects */
while (state_list) {
state = acpi_ut_pop_generic_state(&state_list);
acpi_ut_delete_generic_state(state);
}
return_ACPI_STATUS(status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_add_reference
*
* PARAMETERS: Object - Object whose reference count is to be
* incremented
*
* RETURN: None
*
* DESCRIPTION: Add one reference to an ACPI object
*
******************************************************************************/
void acpi_ut_add_reference(union acpi_operand_object *object)
{
ACPI_FUNCTION_TRACE_PTR(ut_add_reference, object);
/* Ensure that we have a valid object */
if (!acpi_ut_valid_internal_object(object)) {
return_VOID;
}
ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
"Obj %p Current Refs=%X [To Be Incremented]\n",
object, object->common.reference_count));
/* Increment the reference count */
(void)acpi_ut_update_object_reference(object, REF_INCREMENT);
return_VOID;
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_remove_reference
*
* PARAMETERS: Object - Object whose ref count will be decremented
*
* RETURN: None
*
* DESCRIPTION: Decrement the reference count of an ACPI internal object
*
******************************************************************************/
void acpi_ut_remove_reference(union acpi_operand_object *object)
{
ACPI_FUNCTION_TRACE_PTR(ut_remove_reference, object);
/*
* Allow a NULL pointer to be passed in, just ignore it. This saves
* each caller from having to check. Also, ignore NS nodes.
*
*/
if (!object ||
(ACPI_GET_DESCRIPTOR_TYPE(object) == ACPI_DESC_TYPE_NAMED)) {
return_VOID;
}
/* Ensure that we have a valid object */
if (!acpi_ut_valid_internal_object(object)) {
return_VOID;
}
ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
"Obj %p Current Refs=%X [To Be Decremented]\n",
object, object->common.reference_count));
/*
* Decrement the reference count, and only actually delete the object
* if the reference count becomes 0. (Must also decrement the ref count
* of all subobjects!)
*/
(void)acpi_ut_update_object_reference(object, REF_DECREMENT);
return_VOID;
}
| gpl-2.0 |
sakuramilk/linux-3.0.y | arch/sh/kernel/hw_breakpoint.c | 3996 | 8797 | /*
* arch/sh/kernel/hw_breakpoint.c
*
* Unified kernel/user-space hardware breakpoint facility for the on-chip UBC.
*
* Copyright (C) 2009 - 2010 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/init.h>
#include <linux/perf_event.h>
#include <linux/hw_breakpoint.h>
#include <linux/percpu.h>
#include <linux/kallsyms.h>
#include <linux/notifier.h>
#include <linux/kprobes.h>
#include <linux/kdebug.h>
#include <linux/io.h>
#include <linux/clk.h>
#include <asm/hw_breakpoint.h>
#include <asm/mmu_context.h>
#include <asm/ptrace.h>
/*
* Stores the breakpoints currently in use on each breakpoint address
* register for each cpus
*/
static DEFINE_PER_CPU(struct perf_event *, bp_per_reg[HBP_NUM]);
/*
* A dummy placeholder for early accesses until the CPUs get a chance to
* register their UBCs later in the boot process.
*/
static struct sh_ubc ubc_dummy = { .num_events = 0 };
static struct sh_ubc *sh_ubc __read_mostly = &ubc_dummy;
/*
* Install a perf counter breakpoint.
*
* We seek a free UBC channel and use it for this breakpoint.
*
* Atomic: we hold the counter->ctx->lock and we only handle variables
* and registers local to this cpu.
*/
int arch_install_hw_breakpoint(struct perf_event *bp)
{
struct arch_hw_breakpoint *info = counter_arch_bp(bp);
int i;
for (i = 0; i < sh_ubc->num_events; i++) {
struct perf_event **slot = &__get_cpu_var(bp_per_reg[i]);
if (!*slot) {
*slot = bp;
break;
}
}
if (WARN_ONCE(i == sh_ubc->num_events, "Can't find any breakpoint slot"))
return -EBUSY;
clk_enable(sh_ubc->clk);
sh_ubc->enable(info, i);
return 0;
}
/*
* Uninstall the breakpoint contained in the given counter.
*
* First we search the debug address register it uses and then we disable
* it.
*
* Atomic: we hold the counter->ctx->lock and we only handle variables
* and registers local to this cpu.
*/
void arch_uninstall_hw_breakpoint(struct perf_event *bp)
{
struct arch_hw_breakpoint *info = counter_arch_bp(bp);
int i;
for (i = 0; i < sh_ubc->num_events; i++) {
struct perf_event **slot = &__get_cpu_var(bp_per_reg[i]);
if (*slot == bp) {
*slot = NULL;
break;
}
}
if (WARN_ONCE(i == sh_ubc->num_events, "Can't find any breakpoint slot"))
return;
sh_ubc->disable(info, i);
clk_disable(sh_ubc->clk);
}
static int get_hbp_len(u16 hbp_len)
{
unsigned int len_in_bytes = 0;
switch (hbp_len) {
case SH_BREAKPOINT_LEN_1:
len_in_bytes = 1;
break;
case SH_BREAKPOINT_LEN_2:
len_in_bytes = 2;
break;
case SH_BREAKPOINT_LEN_4:
len_in_bytes = 4;
break;
case SH_BREAKPOINT_LEN_8:
len_in_bytes = 8;
break;
}
return len_in_bytes;
}
/*
* Check for virtual address in kernel space.
*/
int arch_check_bp_in_kernelspace(struct perf_event *bp)
{
unsigned int len;
unsigned long va;
struct arch_hw_breakpoint *info = counter_arch_bp(bp);
va = info->address;
len = get_hbp_len(info->len);
return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
}
int arch_bp_generic_fields(int sh_len, int sh_type,
int *gen_len, int *gen_type)
{
/* Len */
switch (sh_len) {
case SH_BREAKPOINT_LEN_1:
*gen_len = HW_BREAKPOINT_LEN_1;
break;
case SH_BREAKPOINT_LEN_2:
*gen_len = HW_BREAKPOINT_LEN_2;
break;
case SH_BREAKPOINT_LEN_4:
*gen_len = HW_BREAKPOINT_LEN_4;
break;
case SH_BREAKPOINT_LEN_8:
*gen_len = HW_BREAKPOINT_LEN_8;
break;
default:
return -EINVAL;
}
/* Type */
switch (sh_type) {
case SH_BREAKPOINT_READ:
*gen_type = HW_BREAKPOINT_R;
case SH_BREAKPOINT_WRITE:
*gen_type = HW_BREAKPOINT_W;
break;
case SH_BREAKPOINT_RW:
*gen_type = HW_BREAKPOINT_W | HW_BREAKPOINT_R;
break;
default:
return -EINVAL;
}
return 0;
}
static int arch_build_bp_info(struct perf_event *bp)
{
struct arch_hw_breakpoint *info = counter_arch_bp(bp);
info->address = bp->attr.bp_addr;
/* Len */
switch (bp->attr.bp_len) {
case HW_BREAKPOINT_LEN_1:
info->len = SH_BREAKPOINT_LEN_1;
break;
case HW_BREAKPOINT_LEN_2:
info->len = SH_BREAKPOINT_LEN_2;
break;
case HW_BREAKPOINT_LEN_4:
info->len = SH_BREAKPOINT_LEN_4;
break;
case HW_BREAKPOINT_LEN_8:
info->len = SH_BREAKPOINT_LEN_8;
break;
default:
return -EINVAL;
}
/* Type */
switch (bp->attr.bp_type) {
case HW_BREAKPOINT_R:
info->type = SH_BREAKPOINT_READ;
break;
case HW_BREAKPOINT_W:
info->type = SH_BREAKPOINT_WRITE;
break;
case HW_BREAKPOINT_W | HW_BREAKPOINT_R:
info->type = SH_BREAKPOINT_RW;
break;
default:
return -EINVAL;
}
return 0;
}
/*
* Validate the arch-specific HW Breakpoint register settings
*/
int arch_validate_hwbkpt_settings(struct perf_event *bp)
{
struct arch_hw_breakpoint *info = counter_arch_bp(bp);
unsigned int align;
int ret;
ret = arch_build_bp_info(bp);
if (ret)
return ret;
ret = -EINVAL;
switch (info->len) {
case SH_BREAKPOINT_LEN_1:
align = 0;
break;
case SH_BREAKPOINT_LEN_2:
align = 1;
break;
case SH_BREAKPOINT_LEN_4:
align = 3;
break;
case SH_BREAKPOINT_LEN_8:
align = 7;
break;
default:
return ret;
}
/*
* For kernel-addresses, either the address or symbol name can be
* specified.
*/
if (info->name)
info->address = (unsigned long)kallsyms_lookup_name(info->name);
/*
* Check that the low-order bits of the address are appropriate
* for the alignment implied by len.
*/
if (info->address & align)
return -EINVAL;
return 0;
}
/*
* Release the user breakpoints used by ptrace
*/
void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
{
int i;
struct thread_struct *t = &tsk->thread;
for (i = 0; i < sh_ubc->num_events; i++) {
unregister_hw_breakpoint(t->ptrace_bps[i]);
t->ptrace_bps[i] = NULL;
}
}
static int __kprobes hw_breakpoint_handler(struct die_args *args)
{
int cpu, i, rc = NOTIFY_STOP;
struct perf_event *bp;
unsigned int cmf, resume_mask;
/*
* Do an early return if none of the channels triggered.
*/
cmf = sh_ubc->triggered_mask();
if (unlikely(!cmf))
return NOTIFY_DONE;
/*
* By default, resume all of the active channels.
*/
resume_mask = sh_ubc->active_mask();
/*
* Disable breakpoints during exception handling.
*/
sh_ubc->disable_all();
cpu = get_cpu();
for (i = 0; i < sh_ubc->num_events; i++) {
unsigned long event_mask = (1 << i);
if (likely(!(cmf & event_mask)))
continue;
/*
* The counter may be concurrently released but that can only
* occur from a call_rcu() path. We can then safely fetch
* the breakpoint, use its callback, touch its counter
* while we are in an rcu_read_lock() path.
*/
rcu_read_lock();
bp = per_cpu(bp_per_reg[i], cpu);
if (bp)
rc = NOTIFY_DONE;
/*
* Reset the condition match flag to denote completion of
* exception handling.
*/
sh_ubc->clear_triggered_mask(event_mask);
/*
* bp can be NULL due to concurrent perf counter
* removing.
*/
if (!bp) {
rcu_read_unlock();
break;
}
/*
* Don't restore the channel if the breakpoint is from
* ptrace, as it always operates in one-shot mode.
*/
if (bp->overflow_handler == ptrace_triggered)
resume_mask &= ~(1 << i);
perf_bp_event(bp, args->regs);
/* Deliver the signal to userspace */
if (!arch_check_bp_in_kernelspace(bp)) {
siginfo_t info;
info.si_signo = args->signr;
info.si_errno = notifier_to_errno(rc);
info.si_code = TRAP_HWBKPT;
force_sig_info(args->signr, &info, current);
}
rcu_read_unlock();
}
if (cmf == 0)
rc = NOTIFY_DONE;
sh_ubc->enable_all(resume_mask);
put_cpu();
return rc;
}
BUILD_TRAP_HANDLER(breakpoint)
{
unsigned long ex = lookup_exception_vector();
TRAP_HANDLER_DECL;
notify_die(DIE_BREAKPOINT, "breakpoint", regs, 0, ex, SIGTRAP);
}
/*
* Handle debug exception notifications.
*/
int __kprobes hw_breakpoint_exceptions_notify(struct notifier_block *unused,
unsigned long val, void *data)
{
struct die_args *args = data;
if (val != DIE_BREAKPOINT)
return NOTIFY_DONE;
/*
* If the breakpoint hasn't been triggered by the UBC, it's
* probably from a debugger, so don't do anything more here.
*
* This also permits the UBC interface clock to remain off for
* non-UBC breakpoints, as we don't need to check the triggered
* or active channel masks.
*/
if (args->trapnr != sh_ubc->trap_nr)
return NOTIFY_DONE;
return hw_breakpoint_handler(data);
}
void hw_breakpoint_pmu_read(struct perf_event *bp)
{
/* TODO */
}
int register_sh_ubc(struct sh_ubc *ubc)
{
/* Bail if it's already assigned */
if (sh_ubc != &ubc_dummy)
return -EBUSY;
sh_ubc = ubc;
pr_info("HW Breakpoints: %s UBC support registered\n", ubc->name);
WARN_ON(ubc->num_events > HBP_NUM);
return 0;
}
| gpl-2.0 |
cile381/sxe_sensation_kernel | drivers/video/via/via_modesetting.c | 3996 | 3419 | /*
* Copyright 1998-2008 VIA Technologies, Inc. All Rights Reserved.
* Copyright 2001-2008 S3 Graphics, Inc. All Rights Reserved.
* Copyright 2010 Florian Tobias Schandinat <FlorianSchandinat@gmx.de>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License as published by the Free Software Foundation;
* either version 2, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTIES OR REPRESENTATIONS; without even
* the implied warranty of MERCHANTABILITY or FITNESS FOR
* A PARTICULAR PURPOSE.See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc.,
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
/*
* basic modesetting functions
*/
#include <linux/kernel.h>
#include <linux/via-core.h>
#include "via_modesetting.h"
#include "share.h"
#include "debug.h"
void via_set_primary_address(u32 addr)
{
DEBUG_MSG(KERN_DEBUG "via_set_primary_address(0x%08X)\n", addr);
via_write_reg(VIACR, 0x0D, addr & 0xFF);
via_write_reg(VIACR, 0x0C, (addr >> 8) & 0xFF);
via_write_reg(VIACR, 0x34, (addr >> 16) & 0xFF);
via_write_reg_mask(VIACR, 0x48, (addr >> 24) & 0x1F, 0x1F);
}
void via_set_secondary_address(u32 addr)
{
DEBUG_MSG(KERN_DEBUG "via_set_secondary_address(0x%08X)\n", addr);
/* secondary display supports only quadword aligned memory */
via_write_reg_mask(VIACR, 0x62, (addr >> 2) & 0xFE, 0xFE);
via_write_reg(VIACR, 0x63, (addr >> 10) & 0xFF);
via_write_reg(VIACR, 0x64, (addr >> 18) & 0xFF);
via_write_reg_mask(VIACR, 0xA3, (addr >> 26) & 0x07, 0x07);
}
void via_set_primary_pitch(u32 pitch)
{
DEBUG_MSG(KERN_DEBUG "via_set_primary_pitch(0x%08X)\n", pitch);
/* spec does not say that first adapter skips 3 bits but old
* code did it and seems to be reasonable in analogy to 2nd adapter
*/
pitch = pitch >> 3;
via_write_reg(VIACR, 0x13, pitch & 0xFF);
via_write_reg_mask(VIACR, 0x35, (pitch >> (8 - 5)) & 0xE0, 0xE0);
}
void via_set_secondary_pitch(u32 pitch)
{
DEBUG_MSG(KERN_DEBUG "via_set_secondary_pitch(0x%08X)\n", pitch);
pitch = pitch >> 3;
via_write_reg(VIACR, 0x66, pitch & 0xFF);
via_write_reg_mask(VIACR, 0x67, (pitch >> 8) & 0x03, 0x03);
via_write_reg_mask(VIACR, 0x71, (pitch >> (10 - 7)) & 0x80, 0x80);
}
void via_set_primary_color_depth(u8 depth)
{
u8 value;
DEBUG_MSG(KERN_DEBUG "via_set_primary_color_depth(%d)\n", depth);
switch (depth) {
case 8:
value = 0x00;
break;
case 15:
value = 0x04;
break;
case 16:
value = 0x14;
break;
case 24:
value = 0x0C;
break;
case 30:
value = 0x08;
break;
default:
printk(KERN_WARNING "via_set_primary_color_depth: "
"Unsupported depth: %d\n", depth);
return;
}
via_write_reg_mask(VIASR, 0x15, value, 0x1C);
}
void via_set_secondary_color_depth(u8 depth)
{
u8 value;
DEBUG_MSG(KERN_DEBUG "via_set_secondary_color_depth(%d)\n", depth);
switch (depth) {
case 8:
value = 0x00;
break;
case 16:
value = 0x40;
break;
case 24:
value = 0xC0;
break;
case 30:
value = 0x80;
break;
default:
printk(KERN_WARNING "via_set_secondary_color_depth: "
"Unsupported depth: %d\n", depth);
return;
}
via_write_reg_mask(VIACR, 0x67, value, 0xC0);
}
| gpl-2.0 |
Surge1223/kernel_samsung-jfltevzw-VRUGOC1 | kernel/irq/autoprobe.c | 10908 | 4595 | /*
* linux/kernel/irq/autoprobe.c
*
* Copyright (C) 1992, 1998-2004 Linus Torvalds, Ingo Molnar
*
* This file contains the interrupt probing code and driver APIs.
*/
#include <linux/irq.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/async.h>
#include "internals.h"
/*
* Autodetection depends on the fact that any interrupt that
* comes in on to an unassigned handler will get stuck with
* "IRQS_WAITING" cleared and the interrupt disabled.
*/
static DEFINE_MUTEX(probing_active);
/**
* probe_irq_on - begin an interrupt autodetect
*
* Commence probing for an interrupt. The interrupts are scanned
* and a mask of potential interrupt lines is returned.
*
*/
unsigned long probe_irq_on(void)
{
struct irq_desc *desc;
unsigned long mask = 0;
int i;
/*
* quiesce the kernel, or at least the asynchronous portion
*/
async_synchronize_full();
mutex_lock(&probing_active);
/*
* something may have generated an irq long ago and we want to
* flush such a longstanding irq before considering it as spurious.
*/
for_each_irq_desc_reverse(i, desc) {
raw_spin_lock_irq(&desc->lock);
if (!desc->action && irq_settings_can_probe(desc)) {
/*
* Some chips need to know about probing in
* progress:
*/
if (desc->irq_data.chip->irq_set_type)
desc->irq_data.chip->irq_set_type(&desc->irq_data,
IRQ_TYPE_PROBE);
irq_startup(desc, false);
}
raw_spin_unlock_irq(&desc->lock);
}
/* Wait for longstanding interrupts to trigger. */
msleep(20);
/*
* enable any unassigned irqs
* (we must startup again here because if a longstanding irq
* happened in the previous stage, it may have masked itself)
*/
for_each_irq_desc_reverse(i, desc) {
raw_spin_lock_irq(&desc->lock);
if (!desc->action && irq_settings_can_probe(desc)) {
desc->istate |= IRQS_AUTODETECT | IRQS_WAITING;
if (irq_startup(desc, false))
desc->istate |= IRQS_PENDING;
}
raw_spin_unlock_irq(&desc->lock);
}
/*
* Wait for spurious interrupts to trigger
*/
msleep(100);
/*
* Now filter out any obviously spurious interrupts
*/
for_each_irq_desc(i, desc) {
raw_spin_lock_irq(&desc->lock);
if (desc->istate & IRQS_AUTODETECT) {
/* It triggered already - consider it spurious. */
if (!(desc->istate & IRQS_WAITING)) {
desc->istate &= ~IRQS_AUTODETECT;
irq_shutdown(desc);
} else
if (i < 32)
mask |= 1 << i;
}
raw_spin_unlock_irq(&desc->lock);
}
return mask;
}
EXPORT_SYMBOL(probe_irq_on);
/**
* probe_irq_mask - scan a bitmap of interrupt lines
* @val: mask of interrupts to consider
*
* Scan the interrupt lines and return a bitmap of active
* autodetect interrupts. The interrupt probe logic state
* is then returned to its previous value.
*
* Note: we need to scan all the irq's even though we will
* only return autodetect irq numbers - just so that we reset
* them all to a known state.
*/
unsigned int probe_irq_mask(unsigned long val)
{
unsigned int mask = 0;
struct irq_desc *desc;
int i;
for_each_irq_desc(i, desc) {
raw_spin_lock_irq(&desc->lock);
if (desc->istate & IRQS_AUTODETECT) {
if (i < 16 && !(desc->istate & IRQS_WAITING))
mask |= 1 << i;
desc->istate &= ~IRQS_AUTODETECT;
irq_shutdown(desc);
}
raw_spin_unlock_irq(&desc->lock);
}
mutex_unlock(&probing_active);
return mask & val;
}
EXPORT_SYMBOL(probe_irq_mask);
/**
* probe_irq_off - end an interrupt autodetect
* @val: mask of potential interrupts (unused)
*
* Scans the unused interrupt lines and returns the line which
* appears to have triggered the interrupt. If no interrupt was
* found then zero is returned. If more than one interrupt is
* found then minus the first candidate is returned to indicate
* their is doubt.
*
* The interrupt probe logic state is returned to its previous
* value.
*
* BUGS: When used in a module (which arguably shouldn't happen)
* nothing prevents two IRQ probe callers from overlapping. The
* results of this are non-optimal.
*/
int probe_irq_off(unsigned long val)
{
int i, irq_found = 0, nr_of_irqs = 0;
struct irq_desc *desc;
for_each_irq_desc(i, desc) {
raw_spin_lock_irq(&desc->lock);
if (desc->istate & IRQS_AUTODETECT) {
if (!(desc->istate & IRQS_WAITING)) {
if (!nr_of_irqs)
irq_found = i;
nr_of_irqs++;
}
desc->istate &= ~IRQS_AUTODETECT;
irq_shutdown(desc);
}
raw_spin_unlock_irq(&desc->lock);
}
mutex_unlock(&probing_active);
if (nr_of_irqs > 1)
irq_found = -irq_found;
return irq_found;
}
EXPORT_SYMBOL(probe_irq_off);
| gpl-2.0 |
gpillusion/Samsung_Kernel_Source_SM-G710K | drivers/sensorhub/stm_hestia/adpd142.c | 157 | 70875 | /**
*\mainpage
* ADPD142 driver
* (c) copyright 2014 Analog Devices Inc.
* Licensed under the GPL version 3 or later.
* \date April 2014
* \version 1.1 Driver
* \version Linux 3.4.0
*/
#include "adpd142.h"
#ifdef CONFIG_OF
#include <linux/of_gpio.h>
#endif
static int dbg_enable;
module_param(dbg_enable, int, S_IRUGO | S_IWUSR);
/* ADPD142 driver version*/
#define ADPD142_VERSION "1.2"
/* ADPD142 release date*/
#define ADPD142_RELEASE_DATE "Tue Apr 22 12:11:42 IST 2014"
#define ADPD_DEV_NAME "adpd142"
/*INPUT DEVICE NAME LIST*/
#define MODULE_NAME_HRM "hrm_sensor"
#define CHIP_NAME "ADPD142RI"
#define VENDOR "ADI"
#define MAX_EOL_RESULT 132
/*I2C RELATED*/
#define I2C_RETRY_DELAY 5
#define I2C_RETRIES 5
/*ADPD142 DRIVER RELATED*/
#define MAX_BUFFER 128
#define GLOBAL_OP_MODE_OFF 0
#define GLOBAL_OP_MODE_IDLE 1
#define GLOBAL_OP_MODE_GO 2
#define SET_GLOBAL_OP_MODE(val, cmd) ((val & 0xFC) | cmd)
#define FRM_FILE 0
#define FRM_ARR 1
#define EN_FIFO_CLK 0x3B5B
#define DS_FIFO_CLK 0x3050
/******************************************************************/
/**************** ADPD142 USER OPERATING MODE ********************/
/*
Operating mode defined to user
*/
#define IDLE_MODE 0x00
#define SAMPLE_XY_A_MODE 0x30
#define SAMPLE_XY_AB_MODE 0x31
#define SAMPLE_XY_B_MODE 0x32
#define GET_USR_MODE(usr_val) ((usr_val & 0xF0) >> 4)
#define GET_USR_SUB_MODE(usr_val) (usr_val & 0xF)
/******************************************************************/
/**************** ADPD142 OPERATING MODE & DATA MODE *************/
#define R_RDATA_FLAG_EN (0x1 << 15)
#define R_PACK_START_EN (0x0 << 14)
#define R_RDOUT_MODE (0x0 << 13)
#define R_FIFO_PREVENT_EN (0x1 << 12)
#define R_SAMP_OUT_MODE (0x0 << 9)
#define DEFAULT_OP_MODE_CFG_VAL(cfg_x) (R_RDATA_FLAG_EN + R_PACK_START_EN +\
R_RDOUT_MODE + R_FIFO_PREVENT_EN+\
R_SAMP_OUT_MODE)
#define SET_R_OP_MODE_A(val) ((val & 0xF000) >> 12)
#define SET_R_DATAMODE_A(val) ((val & 0x0F00) >> 8)
#define SET_R_OP_MODE_B(val) ((val & 0x00F0) >> 4)
#define SET_R_DATAMODE_B(val) ((val & 0x000F))
/**
@brief Used to get the value need to be set for slot-A, B operating mode
and data out mode.
*/
#define SET_MODE_VALUE(val) (SET_R_OP_MODE_A(val) |\
(SET_R_DATAMODE_A(val) << 2) |\
(SET_R_OP_MODE_B(val) << 5) |\
(SET_R_DATAMODE_B(val) << 6))
/******************************************************************/
/**************** ADPD142 INTERRUPT MASK *************************/
#define INTR_MASK_IDLE_USR 0x0000
#define INTR_MASK_SAMPLE_USR 0x0020
#define INTR_MASK_S_SAMP_XY_A_USR 0x0020
#define INTR_MASK_S_SAMP_XY_AB_USR 0x0040
#define INTR_MASK_S_SAMP_XY_B_USR 0x0040
#define SET_INTR_MASK(usr_val) ((~(INTR_MASK_##usr_val)) & 0x1FF)
/******************************************************************/
/***************** ADPD142 PRINT *********************************/
#ifdef ADPD_DBG
#define ADPD142_dbg(format, arg...) \
printk(KERN_DEBUG "ADPD142 : "format, ##arg);
#else
#define ADPD142_dbg(format, arg...) {if (dbg_enable)\
printk(KERN_DEBUG "ADPD142 : "format, ##arg);\
}
#endif
#ifdef ADPD_INFO
#define ADPD142_info(format, arg...) \
printk(KERN_INFO "ADPD142 : "format, ##arg);
#else
#define ADPD142_info(format, arg...) {if (0)\
; }
#endif
/******************************************************************/
/***************** ADPD142 DIFFERENT MODE ************************/
/*
Main mode
*/
#define IDLE_USR 0
#define SAMPLE_USR 3
/*
Sub mode
*/
#define S_SAMP_XY_A 0
#define S_SAMP_XY_AB 1
#define S_SAMP_XY_B 2
/*
OPERATING MODE ==> MAIN_mode[20:16] |
OP_mode_A[15:12] |
DATA_mode_A[11:8]|
OP_mode_B[7:4] |
DATA_mode_B[3:0]
*/
#define IDLE_OFF ((IDLE_USR << 16) | (0x0000))
#define SAMPLE_XY_A ((SAMPLE_USR << 16) | (0x1400))
#define SAMPLE_XY_AB ((SAMPLE_USR << 16) | (0x1414))
#define SAMPLE_XY_B ((SAMPLE_USR << 16) | (0x0014))
#define MAX_MODE 4
#define MAX_IDLE_MODE 1
#define MAX_SAMPLE_MODE 3
#define MODE(val) __arr_mode_##val
#define MODE_SIZE(val) MAX_##val##_MODE
#define MAX_LIB_VER 20
#define OSC_TRIM_ADDR26_REG 0x26
#define OSC_TRIM_ADDR28_REG 0x28
#define OSC_TRIM_ADDR29_REG 0x29
#define OSC_TRIM_32K_REG 0x4B
#define OSC_TRIM_32M_REG 0x4D
#define OSC_DEFAULT_32K_SET 0x2695
#define OSC_DEFAULT_32M_SET 0x4272
#define OSCILLATOR_TRIM_FILE_PATH "/efs/osc_trim"
/**
mode mapping structure used to hold the number mode actually supported by
driver
*/
struct mode_mapping {
char *name;
int *mode_code;
unsigned int size;
};
int __arr_mode_IDLE[MAX_IDLE_MODE] = { IDLE_OFF };
int __arr_mode_SAMPLE[MAX_SAMPLE_MODE] = { SAMPLE_XY_A,
SAMPLE_XY_AB,
SAMPLE_XY_B };
struct mode_mapping __mode_recv_frm_usr[MAX_MODE] = {
{
.name = "IDLE",
.mode_code = MODE(IDLE),
.size = MODE_SIZE(IDLE)
},
{ .name = "UNSUPPORTED1",
.mode_code = 0,
.size = 0,
},
{ .name = "UNSUPPORTED2",
.mode_code = 0,
.size = 0,
},
{
.name = "SAMPLE",
.mode_code = MODE(SAMPLE),
.size = MODE_SIZE(SAMPLE)
}
};
/**
general data buffer that is required to store the recieved buffer
*/
unsigned short data_buffer[MAX_BUFFER] = { 0 };
struct wrk_q_timestamp {
struct timeval interrupt_trigger;
};
struct adpd142_stats {
atomic_t interrupts;
atomic_t fifo_requires_sync;
atomic_t fifo_bytes[4];
atomic_t wq_pending;
atomic_t wq_schedule_time_peak_usec;
atomic_t wq_schedule_time_last_usec;
struct ewma wq_schedule_time_avg_usec;
atomic_t data_process_time_peak_usec;
atomic_t data_process_time_last_usec;
struct ewma data_process_time_avg_usec;
struct wrk_q_timestamp stamp;
};
/**
structure hold adpd142 chip structure, and parameter used
to control the adpd142 software part.
*/
struct adpd142_data {
struct i2c_client *client;
struct mutex mutex;/*for chip structure*/
struct device *dev;
struct input_dev *ptr_sample_inputdev;
struct adpd_platform_data *ptr_config;
struct work_struct work;
struct workqueue_struct *ptr_adpd142_wq_st;
int irq;
int hrm_int;
unsigned short *ptr_data_buffer;
struct adpd142_stats stats;
unsigned short intr_mask_val;
unsigned short intr_status_val;
unsigned short fifo_size;
/*sysfs register read write */
unsigned short sysfs_I2C_regaddr;
unsigned short sysfs_I2C_regval;
unsigned short sysfslastreadval;
atomic_t sample_enabled;
atomic_t adpd_mode;
int (*read)(struct adpd142_data *, u8 reg_addr, int len, u16 *buf);
int (*write)(struct adpd142_data *, u8 reg_addr, int len, u16 data);
u8 eol_test_is_enable;
u8 eol_test_status;
char *eol_test_result;
char *eol_lib_ver;
char *elf_lib_ver;
int osc_trim_32K_value;
int osc_trim_32M_value;
int osc_trim_addr26_value;
int osc_trim_addr28_value;
int osc_trim_addr29_value;
u8 osc_trim_open_enable;
struct regulator *reg_adpd;
struct regulator *vdd_led;
};
/**
structure hold adpd142 configuration data to be written during probe.
*/
static struct adpd_platform_data adpd142_config_data = {
.config_size = 53,
.config_data = {
0x00100001,
0x000100ff,
0x00020004,
0x00060000,
0x00111011,
0x0012000A,
0x00130320,
0x00140449,
0x00150333,
0x001C4040,
0x001D4040,
0x00181400,
0x00191400,
0x001a1650,
0x001b1000,
0x001e1ff0,
0x001f1Fc0,
0x00201ad0,
0x00211470,
0x00231032,
0x00241039,
0x002502CC,
0x00340000,
0x00260000,
0x00270800,
0x00280000,
0x00294003,
0x00300330,
0x00310213,
0x00421d37,
0x0043ada5,
0x003924D2,
0x00350330,
0x00360813,
0x00441d37,
0x0045ada5,
0x003b24D2,
0x00590808,
0x00320320,
0x00330113,
0x003a22d4,
0x003c0006,
0x00401010,
0x0041004c,
0x004c0004,
0x004f2090,
0x00500000,
0x00523000,
0x0053e400,
0x005b1831,
0x005d1b31,
0x005e2808,
0x004e0040,
},
};
/**
This function is used to parse data from the string "0xXXX 1 0xYYY"
@param buf is pointer point constant address
@param cnt store number value need to parse
@param data parse data are stored in it
@return void
*/
static void
cmd_parsing(const char *buf, unsigned short cnt, unsigned short *data)
{
char **bp = (char **)&buf;
char *token, minus, parsing_cnt = 0;
int val;
int pos;
while ((token = strsep(bp, " "))) {
pos = 0;
minus = false;
if ((char)token[pos] == '-') {
minus = true;
pos++;
}
if ((token[pos] == '0') &&
(token[pos + 1] == 'x' || token[pos + 1] == 'X')) {
if (kstrtoul(&token[pos + 2], 16,
(long unsigned int *)&val))
val = 0;
if (minus)
val *= (-1);
} else {
if (kstrtoul(&token[pos], 10,
(long unsigned int *)&val))
val = 0;
if (minus)
val *= (-1);
}
if (parsing_cnt < cnt)
*(data + parsing_cnt) = val;
else
break;
parsing_cnt++;
}
}
/**
This function is a inline function mapped to i2c read functionality
@param pst_adpd the ADPD142 data structure
@param reg is the address from which the value need to be read out
@return u16 value present in the register.
*/
static inline u16
reg_read(struct adpd142_data *pst_adpd, u16 reg)
{
u16 value;
if (!pst_adpd->read(pst_adpd, reg, 1, &value))
return value;
return 0xFFFF;
}
/**
This function is a inline function mapped to i2c read functionality
@param pst_adpd the ADPD142 data structure
@param reg is the address from which the value need to be read out
@param len number of data need to be readout
@param buf is pointer store the value present in the register.
@return u16 status of i2c read.
*/
static inline u16
multi_reg_read(struct adpd142_data *pst_adpd, u16 reg, u16 len, u16 *buf)
{
return pst_adpd->read(pst_adpd, reg, len, buf);
}
/**
This function is a inline function mapped to i2c write functionality
@param pst_adpd the ADPD142 data structure
@param reg is the address to which the data need to be written
@param value is the data need to be write.
@return u16 status of i2c write.
*/
static inline u16
reg_write(struct adpd142_data *pst_adpd, u16 reg, u16 value)
{
return pst_adpd->write(pst_adpd, reg, 1, value);
}
/**
This function is used for I2C read from the sysfs
file system of the ADPD142 Chip
@param pst_adpd the ADPD142 data structure
@return u16 status of i2c read.
*/
static u16
adpd142_sysfs_I2C_read(struct adpd142_data *pst_adpd)
{
return reg_read(pst_adpd, pst_adpd->sysfs_I2C_regaddr);
}
/**
This function is used for I2C write from the sysfs
file system of the ADPD142 Chip
@param pst_adpd the ADPD142 data structure
@return u16 the bytes of written data.
*/
static u16
adpd142_sysfs_I2C_write(struct adpd142_data *pst_adpd)
{
u16 err;
err = reg_write(pst_adpd, pst_adpd->sysfs_I2C_regaddr,
pst_adpd->sysfs_I2C_regval);
if (err)
return 0xFFFF;
else
return pst_adpd->sysfs_I2C_regval;
}
/**
This function is used to parse string
@param recv_buf pointer point a string
@return int
*/
static int
parse_data(char *recv_buf)
{
char **bp = (char **)&recv_buf;
char *token, parsing_cnt = 0;
long val;
int test_data;
unsigned int data = 0;
int ret = 0;
char test[10] = {'\0'};
while ((token = strsep(bp, " \t"))) {
memset(test, '\0', 10);
memcpy(test, token, strlen(token));
memmove(test+2, test, strlen(test));
test[0] = '0';
test[1] = 'x';
ret = kstrtol(test, 0, &val);
if (ret) {
if (ret == -ERANGE) {
ADPD142_info("out of range\n");
val = 0;
}
if (ret == -EINVAL) {
ADPD142_info("parsing error\n");
sscanf(test, "%x", &test_data);
val = test_data;
}
}
if (parsing_cnt == 0) {
data = (int) (0xFF & val);
if (data == 0)
return -1;
}
if (parsing_cnt == 1) {
data = data << 16;
data |= (0xFFFF & val);
}
parsing_cnt++;
if (parsing_cnt > 2)
break;
}
return data;
}
/**
This function is used for reading configuration file
@param pst_adpd the ADPD142 data structure
@return int status of the configuration file.
*/
static int
adpd142_read_config_file(struct adpd142_data *pst_adpd)
{
mm_segment_t old_fs;
struct file *fpt_adpd = NULL;
struct adpd_platform_data *ptr_config = NULL;
int line_cnt = 0;
int start = 0;
int cnt = 0;
int ret = 0;
int i = 0;
char get_char;
char *recieved_buf = NULL;
loff_t pos = 0;
ptr_config = pst_adpd->ptr_config;
old_fs = get_fs();
set_fs(KERNEL_DS);
fpt_adpd = filp_open("/data/misc/adpd142_config.dcfg", O_RDONLY, 0666);
if (IS_ERR(fpt_adpd)) {
ADPD142_dbg("unable to find de file %ld\n", PTR_ERR(fpt_adpd));
set_fs(old_fs);
ret = /*PTR_ERR(fpt_adpd_filp); */ -1;
ptr_config->config_size = 0;
goto err_filp_open;
}
recieved_buf = kzalloc(sizeof(char) * 15, GFP_KERNEL);
memset(recieved_buf, '\0', sizeof(recieved_buf));
while (vfs_read(fpt_adpd, &get_char, sizeof(char), &pos)) {
if (get_char == '\n') {
line_cnt++;
if (cnt > 1) {
ret = parse_data(recieved_buf);
if (ret == -1) {
ADPD142_dbg("Error in reading dcfg\n");
break;
}
ADPD142_info("0x%08x\n", ret);
ptr_config->config_data[i] = ret;
i++;
}
memset(recieved_buf, '\0', sizeof(recieved_buf));
start = 0;
cnt = 0;
ret = 0;
} else {
if (get_char != '#') {
if (start != 0xF) {
recieved_buf[cnt] = get_char;
cnt++;
}
} else {
start = 0xF;
}
}
}
filp_close(fpt_adpd, NULL);
set_fs(old_fs);
kfree(recieved_buf);
ptr_config->config_size = i;
return 0;
err_filp_open:
return -1;
}
/**
This function is used for reading interrupt and FIFO status
@param pst_adpd the ADPD142 data structure
@return unsigned short interrupt status value
*/
static unsigned short
adpd142_rd_intr_fifo(struct adpd142_data *pst_adpd)
{
unsigned short regval = 0;
unsigned short fifo_size = 0;
unsigned short usr_mode = 0;
regval = reg_read(pst_adpd, ADPD_INT_STATUS_ADDR);
fifo_size = ((regval & 0xFF00) >> 8);
pst_adpd->fifo_size = ((fifo_size / 2) + (fifo_size & 1));
pst_adpd->intr_status_val = (regval & 0xFF);
ADPD142_info("Intr_status 0x%x, FIFO_SIZE 0x%x\n",
pst_adpd->intr_status_val, pst_adpd->fifo_size);
if (fifo_size <= 16)
atomic_inc(&pst_adpd->stats.fifo_bytes[0]);
else if (fifo_size <= 32)
atomic_inc(&pst_adpd->stats.fifo_bytes[1]);
else if (fifo_size <= 64)
atomic_inc(&pst_adpd->stats.fifo_bytes[2]);
else
atomic_inc(&pst_adpd->stats.fifo_bytes[3]);
usr_mode = atomic_read(&pst_adpd->adpd_mode);
if (0 != usr_mode) {
unsigned int mask = 0;
unsigned int mod = 0;
switch (GET_USR_SUB_MODE(usr_mode)) {
case S_SAMP_XY_AB:
mask = 0x07;
break;
default:
mask = 0x03;
break;
};
mod = pst_adpd->fifo_size & mask;
if (mod) {
ADPD142_info("Keeping %d samples in FIFO from %d\n",
mod, pst_adpd->fifo_size);
pst_adpd->fifo_size &= ~mask;
atomic_inc(&pst_adpd->stats.fifo_requires_sync);
}
}
return pst_adpd->intr_status_val;
}
/**
This function is used for clearing the Interrupt as well as FIFO
@param pst_adpd the ADPD142 data structure
@return void
*/
static void
adpd142_clr_intr_fifo(struct adpd142_data *pst_adpd)
{
/*below code is added for verification */
unsigned short regval;
regval = reg_read(pst_adpd, ADPD_INT_STATUS_ADDR);
ADPD142_info("fifo_size: 0x%04x, Status: 0x%x\n",
((regval & 0xFF00) >> 8), (regval & 0xFF));
reg_write(pst_adpd, ADPD_INT_STATUS_ADDR, 0x80FF);
regval = reg_read(pst_adpd, ADPD_INT_STATUS_ADDR);
ADPD142_info("After clear - fifo_size: 0x%04x, Status: 0x%x\n",
((regval & 0xFF00) >> 8), (regval & 0xFF));
}
/**
This function is used for clearing the Interrupt status
@param pst_adpd the ADPD142 data structure
@param mode operating mode of ADPD142
@return void
*/
static void
adpd142_clr_intr_status(struct adpd142_data *pst_adpd, unsigned short mode)
{
reg_write(pst_adpd, ADPD_INT_STATUS_ADDR,
pst_adpd->intr_status_val);
}
/**
This function is used to read out FIFO data from FIFO
@param pst_adpd the ADPD142 data structure
@return unsigned int FIFO size
*/
static unsigned int
adpd142_rd_fifo_data(struct adpd142_data *pst_adpd)
{
int loop_cnt = 0;
if (!pst_adpd->fifo_size)
goto err_rd_fifo_data;
if (0 != atomic_read(&pst_adpd->adpd_mode) &&
pst_adpd->fifo_size & 0x3) {
pr_err("Unexpected FIFO_SIZE=%d,\
should be multiple of four (channels)!\n",
pst_adpd->fifo_size);
}
/* reg_write(pst_adpd, ADPD_ACCESS_CTRL_ADDR, 0x0001);*/
reg_write(pst_adpd, ADPD_TEST_PD_ADDR, EN_FIFO_CLK);
multi_reg_read(pst_adpd, ADPD_DATA_BUFFER_ADDR,
pst_adpd->fifo_size, pst_adpd->ptr_data_buffer);
/* reg_write(pst_adpd, ADPD_ACCESS_CTRL_ADDR, 0x0000);*/
reg_write(pst_adpd, ADPD_TEST_PD_ADDR, DS_FIFO_CLK);
for (; loop_cnt < pst_adpd->fifo_size; loop_cnt++) {
ADPD142_info("[0x%04x] 0x%04x\n", loop_cnt,
pst_adpd->ptr_data_buffer[loop_cnt]);
}
return pst_adpd->fifo_size;
err_rd_fifo_data:
return 0;
}
/**
This function is a thing need to configure before changing the register
@param pst_adpd the ADPD142 data structure
@param global_mode OFF, IDLE, GO are the three mode
@return void
*/
static void
adpd142_config_prerequisite(struct adpd142_data *pst_adpd, u32 global_mode)
{
unsigned short regval = 0;
regval = reg_read(pst_adpd, ADPD_OP_MODE_ADDR);
regval = SET_GLOBAL_OP_MODE(regval, global_mode);
ADPD142_info("reg 0x%04x,\tafter set 0x%04x\n",
regval, SET_GLOBAL_OP_MODE(regval, global_mode));
reg_write(pst_adpd, ADPD_OP_MODE_ADDR, regval);
}
/**
This function is used for switching the adpd142 mode
@param pst_adpd the ADPD142 data structure
@param usr_mode user mode
@return void
*/
static void
adpd142_mode_switching(struct adpd142_data *pst_adpd, u32 usr_mode)
{
unsigned int opmode_val = 0;
unsigned short mode_val = 0;
unsigned short intr_mask_val = 0;
unsigned short mode = GET_USR_MODE(usr_mode);
unsigned short sub_mode = GET_USR_SUB_MODE(usr_mode);
/*disabling further avoid further interrupt to trigger */
disable_irq(pst_adpd->irq);
/*stop the pending work */
/*this function Gurantee that wrk is not pending or executing on CPU */
cancel_work_sync(&pst_adpd->work);
atomic_set(&pst_adpd->adpd_mode, 0);
mutex_lock(&pst_adpd->mutex);
/*Depending upon the mode get the value need to write Operatin mode*/
opmode_val = *(__mode_recv_frm_usr[mode].mode_code + (sub_mode));
adpd142_config_prerequisite(pst_adpd, GLOBAL_OP_MODE_IDLE);
/*switch to IDLE mode */
mode_val = DEFAULT_OP_MODE_CFG_VAL(pst_adpd);
mode_val += SET_MODE_VALUE(IDLE_OFF);
intr_mask_val = SET_INTR_MASK(IDLE_USR);
reg_write(pst_adpd, ADPD_OP_MODE_CFG_ADDR, mode_val);
reg_write(pst_adpd, ADPD_INT_MASK_ADDR, intr_mask_val);
/*clear FIFO and flush buffer */
adpd142_clr_intr_fifo(pst_adpd);
adpd142_rd_intr_fifo(pst_adpd);
if (pst_adpd->fifo_size != 0) {
adpd142_clr_intr_status(pst_adpd, IDLE_USR);
ADPD142_info("Flushing FIFO\n");
adpd142_rd_fifo_data(pst_adpd);
}
adpd142_config_prerequisite(pst_adpd, GLOBAL_OP_MODE_GO);
msleep(20);
adpd142_config_prerequisite(pst_adpd, GLOBAL_OP_MODE_IDLE);
/*Find Interrupt mask value */
switch (mode) {
case IDLE_USR:
ADPD142_info("IDLE MODE\n");
intr_mask_val = SET_INTR_MASK(IDLE_USR);
atomic_set(&pst_adpd->sample_enabled, 0);
break;
case SAMPLE_USR:
ADPD142_info("SAMPLE MODE\n");
/*enable interrupt only when data written to FIFO */
switch (sub_mode) {
case S_SAMP_XY_A:
intr_mask_val = SET_INTR_MASK(S_SAMP_XY_A_USR);
break;
case S_SAMP_XY_AB:
intr_mask_val = SET_INTR_MASK(S_SAMP_XY_AB_USR);
break;
case S_SAMP_XY_B:
intr_mask_val = SET_INTR_MASK(S_SAMP_XY_B_USR);
break;
default:
intr_mask_val = SET_INTR_MASK(SAMPLE_USR);
break;
};
intr_mask_val |= 0xC000;
atomic_set(&pst_adpd->sample_enabled, 1);
break;
default:
/*This case won't occur */
ADPD142_dbg("invalid mode\n");
break;
};
ADPD142_info("INT_MASK_ADDR[0x%04x] = 0x%04x\n", ADPD_INT_MASK_ADDR,
intr_mask_val);
pst_adpd->intr_mask_val = intr_mask_val;
/*Fetch Default opmode configuration other than OP mode
and DATA_OUT mode */
mode_val = DEFAULT_OP_MODE_CFG_VAL(pst_adpd);
/*Get Mode value from the opmode value,
to hardocde the macro, change SET_MODE_VALUE macro and concern
mode macro
*/
mode_val += SET_MODE_VALUE(opmode_val);
ADPD142_info("OP_MODE_CFG[0x%04x] = 0x%04x\n",
ADPD_OP_MODE_CFG_ADDR, mode_val);
atomic_set(&pst_adpd->adpd_mode, usr_mode);
reg_write(pst_adpd, ADPD_INT_MASK_ADDR, pst_adpd->intr_mask_val);
reg_write(pst_adpd, ADPD_OP_MODE_CFG_ADDR, mode_val);
mutex_unlock(&pst_adpd->mutex);
enable_irq(pst_adpd->irq);
if (mode != IDLE_USR)
adpd142_config_prerequisite(pst_adpd, GLOBAL_OP_MODE_GO);
else
adpd142_config_prerequisite(pst_adpd, GLOBAL_OP_MODE_OFF);
}
/**
This function is used for sending Sample event depend upon
the Sample mode
@param pst_adpd the ADPD142 data structure
@return void
*/
static void
adpd142_sample_event(struct adpd142_data *pst_adpd)
{
unsigned short usr_mode = 0;
unsigned short sub_mode = 0;
unsigned short cnt = 0;
ADPD142_info("%s\n", __func__);
usr_mode = atomic_read(&pst_adpd->adpd_mode);
sub_mode = GET_USR_SUB_MODE(usr_mode);
switch (sub_mode) {
case S_SAMP_XY_A:
if (pst_adpd->fifo_size < 4 || (pst_adpd->fifo_size & 0x3)) {
pr_err("Unexpected FIFO_SIZE=%d\n",
pst_adpd->fifo_size);
break;
}
for (cnt = 0; cnt < pst_adpd->fifo_size; cnt += 4) {
input_event(pst_adpd->ptr_sample_inputdev, EV_REL,
REL_Z, sub_mode + 1);
input_event(pst_adpd->ptr_sample_inputdev, EV_MSC,
MSC_RAW, pst_adpd->ptr_data_buffer[cnt] + 1);
input_event(pst_adpd->ptr_sample_inputdev, EV_MSC,
MSC_RAW, pst_adpd->ptr_data_buffer[cnt + 1] + 1);
input_event(pst_adpd->ptr_sample_inputdev, EV_MSC,
MSC_RAW, pst_adpd->ptr_data_buffer[cnt + 2] + 1);
input_event(pst_adpd->ptr_sample_inputdev, EV_MSC,
MSC_RAW, pst_adpd->ptr_data_buffer[cnt + 3] + 1);
input_sync(pst_adpd->ptr_sample_inputdev);
}
break;
case S_SAMP_XY_AB:
if (pst_adpd->fifo_size < 8 || (pst_adpd->fifo_size & 0x7)) {
pr_err("Unexpected FIFO_SIZE=%d\n",
pst_adpd->fifo_size);
break;
}
for (cnt = 0; cnt < pst_adpd->fifo_size; cnt += 8) {
unsigned int channel;
unsigned int sum_slot_a = 0;
unsigned int sum_slot_b = 0;
input_event(pst_adpd->ptr_sample_inputdev,
EV_REL, REL_Z, sub_mode + 1);
for (channel = 0; channel < 4; ++channel) {
sum_slot_a +=
pst_adpd->ptr_data_buffer[cnt+channel];
input_event(pst_adpd->ptr_sample_inputdev,
EV_MSC, MSC_RAW, pst_adpd->ptr_data_buffer[cnt+channel] + 1);
}
input_event(pst_adpd->ptr_sample_inputdev,
EV_REL, REL_X, sum_slot_a + 1);
for (channel = 4; channel < 8; ++channel) {
sum_slot_b +=
pst_adpd->ptr_data_buffer[cnt+channel];
input_event(pst_adpd->ptr_sample_inputdev,
EV_MSC, MSC_RAW, pst_adpd->ptr_data_buffer[cnt+channel] + 1);
}
input_event(pst_adpd->ptr_sample_inputdev,
EV_REL, REL_Y, sum_slot_b + 1);
input_sync(pst_adpd->ptr_sample_inputdev);
}
break;
case S_SAMP_XY_B:
if (pst_adpd->fifo_size < 4 || (pst_adpd->fifo_size & 0x3)) {
pr_err("Unexpected FIFO_SIZE=%d\n",
pst_adpd->fifo_size);
break;
}
for (cnt = 0; cnt < pst_adpd->fifo_size; cnt += 4) {
input_event(pst_adpd->ptr_sample_inputdev, EV_REL,
REL_Z,
sub_mode + 1);
input_event(pst_adpd->ptr_sample_inputdev, EV_MSC,
MSC_RAW,
pst_adpd->ptr_data_buffer[cnt] + 1);
input_event(pst_adpd->ptr_sample_inputdev, EV_MSC,
MSC_RAW,
pst_adpd->ptr_data_buffer[cnt + 1] + 1);
input_event(pst_adpd->ptr_sample_inputdev, EV_MSC,
MSC_RAW,
pst_adpd->ptr_data_buffer[cnt + 2] + 1);
input_event(pst_adpd->ptr_sample_inputdev, EV_MSC,
MSC_RAW,
pst_adpd->ptr_data_buffer[cnt + 3] + 1);
input_sync(pst_adpd->ptr_sample_inputdev);
}
break;
default:
break;
};
}
/**
This function is used for handling FIFO when interrupt occured
@param pst_adpd the ADPD142 data structure
@return void
*/
static void
adpd142_data_handler(struct adpd142_data *pst_adpd)
{
unsigned short usr_mode = 0;
unsigned short mode = 0;
unsigned short sub_mode = 0;
mutex_lock(&pst_adpd->mutex);
usr_mode = atomic_read(&pst_adpd->adpd_mode);
mode = GET_USR_MODE(usr_mode);
sub_mode = GET_USR_SUB_MODE(usr_mode);
ADPD142_info("mode - 0x%x,\t sub_mode - 0x%x\n", mode, sub_mode);
adpd142_rd_intr_fifo(pst_adpd);
if (pst_adpd->intr_status_val == 0) {
mutex_unlock(&pst_adpd->mutex);
return;
}
adpd142_clr_intr_status(pst_adpd, mode);
switch (mode) {
case IDLE_USR:
ADPD142_dbg("IDLE_MODE\n");
adpd142_rd_fifo_data(pst_adpd);
break;
case SAMPLE_USR:
ADPD142_info("SAMPLE MODE\n");
adpd142_rd_fifo_data(pst_adpd);
adpd142_sample_event(pst_adpd);
break;
default:
ADPD142_info("DEFAULT MODE\n");
adpd142_rd_fifo_data(pst_adpd);
break;
};
mutex_unlock(&pst_adpd->mutex);
}
/**
This function is a handler for WorkQueue
@param ptr_work linux work structure
@return void
*/
static void
adpd142_wq_handler(struct work_struct *ptr_work)
{
struct adpd142_data *pst_adpd = container_of(ptr_work,
struct adpd142_data, work);
struct timeval wkq_start;
struct timeval wkq_comp;
int diff_usec = 0;
do_gettimeofday(&wkq_start);
diff_usec = timeval_compare(&wkq_start,
&pst_adpd->stats.stamp.interrupt_trigger);
if (diff_usec > 1) {
if (diff_usec >
atomic_read(
&pst_adpd->stats.wq_schedule_time_peak_usec))
atomic_set(
&pst_adpd->stats.wq_schedule_time_peak_usec,
diff_usec);
atomic_set(&pst_adpd->stats.wq_schedule_time_last_usec,
diff_usec);
ewma_add(&pst_adpd->stats.wq_schedule_time_avg_usec, diff_usec);
}
adpd142_data_handler(pst_adpd);
do_gettimeofday(&wkq_comp);
diff_usec = timeval_compare(&wkq_comp, &wkq_start);
if (diff_usec > 1) {
if (diff_usec >
atomic_read(&pst_adpd->stats.data_process_time_peak_usec))
atomic_set(
&pst_adpd->stats.data_process_time_peak_usec,
diff_usec);
atomic_set(&pst_adpd->stats.data_process_time_last_usec,
diff_usec);
ewma_add(&pst_adpd->stats.data_process_time_avg_usec,
diff_usec);
}
}
/**
This function is used for handling Interrupt from ADPD142
@param irq is Interrupt number
@param dev_id is pointer point to ADPD142 data structure
@return irqreturn_t is a Interrupt flag
*/
static irqreturn_t
adpd142_isr_handler(int irq, void *dev_id)
{
struct adpd142_data *pst_adpd = dev_id;
atomic_inc(&pst_adpd->stats.interrupts);
if (!work_pending(&pst_adpd->work)) {
do_gettimeofday(&pst_adpd->stats.stamp.interrupt_trigger);
ADPD142_info("%s\n", __func__);
if (!queue_work(pst_adpd->ptr_adpd142_wq_st, &pst_adpd->work))
atomic_inc(&pst_adpd->stats.wq_pending);
} else {
atomic_inc(&pst_adpd->stats.wq_pending);
ADPD142_info("work_pending !!\n");
}
return IRQ_HANDLED;
}
/**
This function is used for updating the ADPD142 structure after configuration
@param pst_adpd the ADPD142 data structure
@return void
*/
static void
adpd142_update_config(struct adpd142_data *pst_adpd)
{
return;
}
/**
This function is used for loading the configuration data to ADPD142 chip
0 - From file "/data/misc/adpd142_configuration.dcfg"
1 - From Static defined Array
@param pst_adpd the ADPD142 data structure
@param config configuration command
@return int status
*/
static int
adpd142_configuration(struct adpd142_data *pst_adpd, unsigned char config)
{
struct adpd_platform_data *ptr_config;
unsigned short addr;
unsigned short data;
unsigned short cnt = 0;
int ret = 0;
adpd142_mode_switching(pst_adpd, 0);
if (config == FRM_FILE) {
ret = adpd142_read_config_file(pst_adpd);
ADPD142_info("ARRAY_SIZE - %d\n", size);
} else {
ret = FRM_ARR;
}
if (ret == 0)
ptr_config = pst_adpd->ptr_config;
else
ptr_config = &adpd142_config_data;
for (cnt = 0; cnt < ptr_config->config_size; cnt++) {
addr = (unsigned short) ((0xFFFF0000 &
ptr_config->config_data[cnt]) >> 16);
data = (unsigned short) (0x0000FFFF &
ptr_config->config_data[cnt]);
ADPD142_dbg("addr[0x%04x] = 0x%04x\n", addr, data);
reg_write(pst_adpd, addr, data);
}
adpd142_update_config(pst_adpd);
return 0;
}
/* read & write efs for hrm sensor */
static int osc_trim_efs_register_open(struct adpd142_data *pst_adpd)
{
struct file *osc_filp = NULL;
char buffer[60] = {0, };
int err = 0;
int osc_trim_32k, osc_trim_32m, osc_trim_addr26, osc_trim_addr28, osc_trim_addr29;
mm_segment_t old_fs;
old_fs = get_fs();
set_fs(KERNEL_DS);
osc_filp = filp_open(OSCILLATOR_TRIM_FILE_PATH, O_RDONLY, 0666);
if (IS_ERR(osc_filp)) {
err = PTR_ERR(osc_filp);
if (err != -ENOENT)
pr_err("adpd142_%s: Can't open oscillator trim file\n", __func__);
set_fs(old_fs);
return err;
}
err = osc_filp->f_op->read(osc_filp,
(char *)buffer,
60 * sizeof(char), &osc_filp->f_pos);
if (err != (60 * sizeof(char))) {
pr_err("adpd142_%s: Can't read the oscillator trim data from file\n", __func__);
err = -EIO;
}
sscanf(buffer, "%d,%d,%d,%d,%d",
&osc_trim_32k, &osc_trim_32m, &osc_trim_addr26, &osc_trim_addr28, &osc_trim_addr29);
pr_info("adpd142_%s = (%d,%d,%d,%d,%d)\n",
__func__, osc_trim_32k, osc_trim_32m, osc_trim_addr26, osc_trim_addr28, osc_trim_addr29);
pst_adpd->osc_trim_32K_value = osc_trim_32k;
pst_adpd->osc_trim_32M_value = osc_trim_32m;
pst_adpd->osc_trim_addr26_value = osc_trim_addr26;
pst_adpd->osc_trim_addr28_value = osc_trim_addr28;
pst_adpd->osc_trim_addr29_value = osc_trim_addr29;
filp_close(osc_filp, current->files);
set_fs(old_fs);
return err;
}
static int osc_trim_efs_register_save(struct adpd142_data *pst_adpd)
{
struct file *osc_filp = NULL;
mm_segment_t old_fs;
char buffer[60] = {0, };
int osc_trim_32k, osc_trim_32m, osc_trim_addr26, osc_trim_addr28, osc_trim_addr29;
int err = 0;
old_fs = get_fs();
set_fs(KERNEL_DS);
osc_filp = filp_open(OSCILLATOR_TRIM_FILE_PATH,
O_CREAT | O_TRUNC | O_WRONLY, 0666);
if (IS_ERR(osc_filp)) {
pr_err("adpd142_%s: Can't open oscillator trim file\n", __func__);
set_fs(old_fs);
err = PTR_ERR(osc_filp);
return err;
}
osc_trim_32k = reg_read(pst_adpd, OSC_TRIM_32K_REG);
osc_trim_32m = reg_read(pst_adpd, OSC_TRIM_32M_REG);
osc_trim_addr26 = reg_read(pst_adpd, OSC_TRIM_ADDR26_REG);
osc_trim_addr28 = reg_read(pst_adpd, OSC_TRIM_ADDR28_REG);
osc_trim_addr29 = reg_read(pst_adpd, OSC_TRIM_ADDR29_REG);
sprintf(buffer, "%d,%d,%d,%d,%d",
osc_trim_32k, osc_trim_32m, osc_trim_addr26, osc_trim_addr28, osc_trim_addr29);
pr_info("adpd142_%s = (%d,%d,%d,%d,%d)\n",
__func__, osc_trim_32k, osc_trim_32m, osc_trim_addr26, osc_trim_addr28, osc_trim_addr29);
err = osc_filp->f_op->write(osc_filp,
(char *)&buffer,
60 * sizeof(char), &osc_filp->f_pos);
if (err != (60 * sizeof(char))) {
pr_err("adpd142_%s: Can't write the oscillator trim data to file\n", __func__);
err = -EIO;
}
pst_adpd->osc_trim_32K_value = osc_trim_32k;
pst_adpd->osc_trim_32M_value = osc_trim_32m;
pst_adpd->osc_trim_addr26_value = osc_trim_addr26;
pst_adpd->osc_trim_addr28_value = osc_trim_addr28;
pst_adpd->osc_trim_addr29_value = osc_trim_addr29;
filp_close(osc_filp, current->files);
set_fs(old_fs);
return err;
}
/**
This function clears all the statistic counters.
@param pst_adpd the ADPD142 data structure
@return void
*/
static void
adpd_stat_reset(struct adpd142_data *pst_adpd)
{
atomic_set(&pst_adpd->stats.interrupts, 0);
atomic_set(&pst_adpd->stats.wq_pending, 0);
atomic_set(&pst_adpd->stats.wq_schedule_time_peak_usec, 0);
atomic_set(&pst_adpd->stats.wq_schedule_time_last_usec, 0);
atomic_set(&pst_adpd->stats.data_process_time_peak_usec, 0);
atomic_set(&pst_adpd->stats.data_process_time_last_usec, 0);
atomic_set(&pst_adpd->stats.fifo_requires_sync, 0);
atomic_set(&pst_adpd->stats.fifo_bytes[0], 0);
atomic_set(&pst_adpd->stats.fifo_bytes[1], 0);
atomic_set(&pst_adpd->stats.fifo_bytes[2], 0);
atomic_set(&pst_adpd->stats.fifo_bytes[3], 0);
ewma_init(&pst_adpd->stats.wq_schedule_time_avg_usec, 2048, 128);
ewma_init(&pst_adpd->stats.data_process_time_avg_usec, 2048, 128);
}
/* SAMPLE - SYSFS ATTRIBUTE*/
/**
This function is used for getting the status of the sample enable bit
@param dev linux device structure
@param attr pointer point linux device_attribute structure
@param buf pointer point the buffer
@return ssize_t size of data presnt in the buffer
*/
static ssize_t
sample_attr_get_enable(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct adpd142_data *pst_adpd = dev_get_drvdata(dev);
int val = atomic_read(&pst_adpd->sample_enabled);
return sprintf(buf, "%d\n", val);
}
/**
This function is used for enabling the Sample mode
@param dev linux device structure
@param attr pointer point linux device_attribute structure
@param buf pointer point the buffer
@param size buffer size
@return ssize_t size of data written to the buffer
*/
static ssize_t
sample_attr_set_enable(struct device *dev, struct device_attribute *attr,
const char *buf, size_t size)
{
struct adpd142_data *pst_adpd = dev_get_drvdata(dev);
unsigned short parse_data[2];
unsigned short mode = 0;
unsigned short osc_trim_32k, osc_trim_32m;
unsigned short osc_trim_addr26 = 0, osc_trim_addr28 = 0, osc_trim_addr29 = 0;
int err;
int val;
int n = sscanf(buf, "%d", &val);
(void)n;
memset(parse_data, 0, sizeof(parse_data));
if (pst_adpd->osc_trim_open_enable == 1) {
err = osc_trim_efs_register_open(pst_adpd);
if (err < 0) {
pr_err("adpd142_%s: osc_trim_efs_register_open() empty(%d)\n", __func__, err);
osc_trim_32k = OSC_DEFAULT_32K_SET;
osc_trim_32m = OSC_DEFAULT_32M_SET;
} else {
osc_trim_32k = (unsigned short)pst_adpd->osc_trim_32K_value;
osc_trim_32m = (unsigned short)pst_adpd->osc_trim_32M_value;
osc_trim_addr26 = (unsigned short)pst_adpd->osc_trim_addr26_value;
osc_trim_addr28 = (unsigned short)pst_adpd->osc_trim_addr28_value;
osc_trim_addr29 = (unsigned short)pst_adpd->osc_trim_addr29_value;
reg_write(pst_adpd, OSC_TRIM_ADDR26_REG, osc_trim_addr26);
reg_write(pst_adpd, OSC_TRIM_ADDR28_REG, osc_trim_addr28);
reg_write(pst_adpd, OSC_TRIM_ADDR29_REG, osc_trim_addr29);
}
pr_info("adpd142_%s = 32K[%02x], 32M[%02x], addr26[%02x], addr28[%02x], addr29[%02x]\n",
__func__, osc_trim_32k, osc_trim_32m, osc_trim_addr26, osc_trim_addr28, osc_trim_addr29);
reg_write(pst_adpd, OSC_TRIM_32K_REG, osc_trim_32k);
reg_write(pst_adpd, OSC_TRIM_32M_REG, osc_trim_32m);
pst_adpd->osc_trim_open_enable = 0;
}
if (val == 1) {
pr_info("adpd142_%s_enable.\n", __func__);
if (!IS_ERR(pst_adpd->vdd_led))
regulator_enable(pst_adpd->vdd_led);
cmd_parsing("0x31", 1, parse_data);
atomic_set(&pst_adpd->sample_enabled, 1);
} else {
if (!IS_ERR(pst_adpd->vdd_led))
regulator_disable(pst_adpd->vdd_led);
cmd_parsing("0x0", 1, parse_data);
atomic_set(&pst_adpd->sample_enabled, 0);
}
mode = GET_USR_MODE(parse_data[0]);
if (GET_USR_MODE(parse_data[0]) < MAX_MODE) {
if ((GET_USR_SUB_MODE(parse_data[0])) <
__mode_recv_frm_usr[mode].size) {
adpd142_mode_switching(pst_adpd, parse_data[0]);
} else {
ADPD142_dbg("Sub mode Out of bound\n");
adpd142_mode_switching(pst_adpd, 0);
}
} else {
ADPD142_dbg("Mode out of bound\n");
adpd142_mode_switching(pst_adpd, 0);
}
return size;
}
/* GENERAL - SYSFS ATTRIBUTE*/
/**
This function is used for getting the status of the adpd_mode
@param dev linux device structure
@param attr pointer point linux device_attribute structure
@param buf pointer point the buffer
@return ssize_t size of data presnt in the buffer
*/
static ssize_t
attr_get_mode(struct device *dev, struct device_attribute *attr, char *buf)
{
struct adpd142_data *pst_adpd = dev_get_drvdata(dev);
int val = atomic_read(&pst_adpd->adpd_mode);
return sprintf(buf, "%d\n", val);
}
/**
This function is used for switching ADPD142 mode
@param dev linux device structure
@param attr pointer point linux device_attribute structure
@param buf pointer point the buffer
@param size buffer size
@return ssize_t size of data written to the buffer
*/
static ssize_t
attr_set_mode(struct device *dev, struct device_attribute *attr,
const char *buf, size_t size)
{
struct adpd142_data *pst_adpd = dev_get_drvdata(dev);
unsigned short parse_data[2];
unsigned short mode = 0;
memset(parse_data, 0, sizeof(parse_data));
cmd_parsing(buf, 1, parse_data);
ADPD142_info("Mode requested 0x%02x\n", parse_data[0]);
pr_info("adpd142_%s = data0[%02x], data1[%02x]\n", __func__, parse_data[0], parse_data[1]);
mode = GET_USR_MODE(parse_data[0]);
if (GET_USR_MODE(parse_data[0]) < MAX_MODE) {
if ((GET_USR_SUB_MODE(parse_data[0])) <
__mode_recv_frm_usr[mode].size) {
adpd142_mode_switching(pst_adpd, parse_data[0]);
} else {
ADPD142_dbg("Sub mode Out of bound\n");
adpd142_mode_switching(pst_adpd, 0);
}
} else {
ADPD142_dbg("Mode out of bound\n");
adpd142_mode_switching(pst_adpd, 0);
}
return size;
}
/**
This function is used for reading the register value
@param dev linux device structure
@param attr pointer point linux device_attribute structure
@param buf pointer point the buffer
@return ssize_t size of data presnt in the buffer
*/
static ssize_t
attr_reg_read_get(struct device *dev, struct device_attribute *attr, char *buf)
{
struct adpd142_data *pst_adpd = dev_get_drvdata(dev);
ADPD142_dbg("Regval: 0x%4x\n", pst_adpd->sysfslastreadval);
return sprintf(buf, "0x%04x\n", pst_adpd->sysfslastreadval);
}
/**
This function is used for writing the register for reading back
@param dev linux device structure
@param attr pointer point linux device_attribute structure
@param buf pointer point the buffer
@param size buffer size
@return ssize_t size of data written to the buffer
*/
static ssize_t
attr_reg_read_set(struct device *dev, struct device_attribute *attr,
const char *buf, size_t size)
{
struct adpd142_data *pst_adpd = dev_get_drvdata(dev);
unsigned short addr, cnt;
unsigned short parse_data[4];
unsigned short ret;
memset(parse_data, 0, sizeof(unsigned short) * 4);
cmd_parsing(buf, 2, parse_data);
addr = parse_data[0];
cnt = parse_data[1];
mutex_lock(&pst_adpd->mutex);
pst_adpd->sysfs_I2C_regaddr = addr;
ret = adpd142_sysfs_I2C_read(pst_adpd);
if (ret != (-1)) {
ADPD142_dbg("RegRead_Store: addr = 0x%04X,value = 0x%04X\n",
addr, ret);
pst_adpd->sysfslastreadval = ret;
} else {
ADPD142_dbg("%s Error\n", __func__);
pst_adpd->sysfslastreadval = (unsigned short) -1;
}
mutex_unlock(&pst_adpd->mutex);
return size;
}
void adpd_power_enable(struct adpd142_data *info, bool onoff)
{
int ret;
pr_info("%s: %s\n", __func__, onoff ? "on" : "off");
if (!info->vdd_led) {
info->vdd_led = regulator_get(&info->client->dev,"adpd_led");
if (IS_ERR(info->vdd_led)) {
dev_err(&info->client->dev,"Regulator(adpd_led) get failed rc = %ld\n", PTR_ERR(info->vdd_led));
return;
}
ret = regulator_set_voltage(info->vdd_led,3300000, 3300000);
if (ret) {
dev_err(&info->client->dev,
"regulator(vdd_led) set_vtg failed rc=%d\n", ret);
return;
}
}
if (!info->reg_adpd) {
info->reg_adpd = regulator_get(&info->client->dev,"adpd_vreg");
if (IS_ERR(info->reg_adpd)) {
dev_err(&info->client->dev,"Regulator(adpd_vreg) get failed rc = %ld\n", PTR_ERR(info->reg_adpd));
return;
}
}
if (onoff) {
if (!regulator_is_enabled(info->vdd_led)) {
ret = regulator_enable(info->vdd_led);
if (ret) {
pr_err("%s: enable vdd_led failed, rc=%d\n",__func__, ret);
return;
}
pr_info("%sHRM_LED 3.3V[vdd_led] on is finished.\n",__func__);
} else
pr_info("%sHRM_LED 3.3V[vdd_led] is already on.\n",__func__);
if (!regulator_is_enabled(info->reg_adpd)) {
ret = regulator_enable(info->reg_adpd);
if (ret) {
pr_err("%s: enable adpd_vreg failed, rc=%d\n",__func__, ret);
return;
}
pr_info("%sHRM_adpd_vreg 1.8V[adpd_vreg] on is finished.\n",__func__);
} else
pr_info("%sHRM_adpd_vreg 1.8V[adpd_vreg] is already on.\n",__func__);
} else {
if (regulator_is_enabled(info->vdd_led)) {
ret = regulator_disable(info->vdd_led);
if (ret) {
pr_err("%s: disable vdd_led failed, rc=%d\n",
__func__, ret);
return;
}
pr_info("%sHRM_LED 3.3V[vdd_led] off is finished.\n",__func__);
} else
pr_info("%sHRM_LED 3.3V[vdd_led] is already off.\n",__func__);
if (regulator_is_enabled(info->reg_adpd)) {
ret = regulator_disable(info->reg_adpd);
if (ret) {
pr_err("%s: disable adpd_vreg failed, rc=%d\n",
__func__, ret);
return;
}
pr_info("%sHRM_adpd_vreg 1.8V[adpd_vreg] off is finished.\n",__func__);
} else
pr_info("%sHRM_adpd_vreg 1.8V[adpd_vreg] is already off.\n",__func__);
}
}
static int adpd_parse_dt(struct adpd142_data *data, struct device *dev)
{
struct device_node *this_node= dev->of_node;
enum of_gpio_flags flags;
int rc;
if (this_node == NULL)
return -ENODEV;
data->hrm_int = of_get_named_gpio_flags(this_node,"adpd142,irq_gpio", 0, &flags);
pr_err("%s : get hrm_int(data->hrm_int)(%d) \n", __func__, data->hrm_int);
if (data->hrm_int < 0) {
pr_err("%s : get hrm_int(%d) error\n", __func__, data->hrm_int);
return -ENODEV;
}
rc = gpio_request(data->hrm_int, "hrm_int");
if (rc) {
pr_err("%s - failed to request hrm_int\n", __func__);
goto err_int_gpio_req;
}
rc = gpio_direction_input(data->hrm_int);
if (rc) {
pr_err("%s - failed to set hrm_int as input\n", __func__);
goto err_int_gpio_direction_input;
}
return 0;
err_int_gpio_direction_input:
gpio_free(data->hrm_int);
err_int_gpio_req:
return rc;
}
/**
This function is used for writing a particular data to the register
@param dev linux device structure
@param attr pointer point linux device_attribute structure
@param buf pointer point the buffer
@param size buffer size
@return ssize_t size of data written to the buffer
*/
static ssize_t
attr_reg_write_set(struct device *dev, struct device_attribute *attr,
const char *buf, size_t size)
{
struct adpd142_data *pst_adpd = dev_get_drvdata(dev);
unsigned short cnt;
unsigned short parse_data[4];
unsigned short ret;
memset(parse_data, 0, sizeof(unsigned short) * 4);
cmd_parsing(buf, 3, parse_data);
if (parse_data[1] != 1) {
ADPD142_dbg("few many argument!!\n");
goto err_reg_write_argument;
}
pst_adpd->sysfs_I2C_regaddr = parse_data[0];
cnt = parse_data[1];
pst_adpd->sysfs_I2C_regval = parse_data[2];
mutex_lock(&pst_adpd->mutex);
ret = adpd142_sysfs_I2C_write(pst_adpd);
if (ret == pst_adpd->sysfs_I2C_regval) {
ADPD142_dbg("Reg[0x%04x] = 0x%04x\n",
pst_adpd->sysfs_I2C_regaddr,
pst_adpd->sysfs_I2C_regval);
} else {
ADPD142_dbg("Reg write error!!\n");
}
adpd142_update_config(pst_adpd);
mutex_unlock(&pst_adpd->mutex);
err_reg_write_argument:
return size;
}
/**
This function is used for getting the status of configuration
TBD
@param dev linux device structure
@param attr pointer point linux device_attribute structure
@param buf pointer point the buffer
@return ssize_t size of data presnt in the buffer
*/
static ssize_t
attr_config_get(struct device *dev, struct device_attribute *attr, char *buf)
{
return sprintf(buf, "status of config thread\n");
}
/**
This function is used for wrting the configuration value to the register
0 - write the configuration data present in file to ADPD142
1 - write the configuration data present in Array to ADPD142
@param dev linux device structure
@param attr pointer point linux device_attribute structure
@param buf pointer point the buffer
@param size buffer size
@return ssize_t size of data written to the buffer
*/
static ssize_t
attr_config_set(struct device *dev, struct device_attribute *attr,
const char *buf, size_t size)
{
struct adpd142_data *pst_adpd = dev_get_drvdata(dev);
unsigned short parse_data[1];
memset(parse_data, 0, sizeof(unsigned short) * 1);
cmd_parsing(buf, 1, parse_data);
if (parse_data[0] == FRM_ARR)
adpd142_configuration(pst_adpd, FRM_ARR);
else if (parse_data[0] == FRM_FILE)
adpd142_configuration(pst_adpd, FRM_FILE);
else
ADPD142_dbg("set 1 to config\n");
return size;
}
/**
This function is used for getting the status of adpd142 and driver
@param dev linux device structure
@param attr pointer point linux device_attribute structure
@param buf pointer point the buffer
@return ssize_t size of data presnt in the buffer
*/
static ssize_t
attr_stat_get(struct device *dev, struct device_attribute *attr, char *buf)
{
struct adpd142_data *pst_adpd = dev_get_drvdata(dev);
unsigned int interrupts = atomic_read(&pst_adpd->stats.interrupts);
unsigned int wq_pending = atomic_read(&pst_adpd->stats.wq_pending);
unsigned int wq_schedule_time_peak_usec =
atomic_read(&pst_adpd->stats.wq_schedule_time_peak_usec);
unsigned int wq_schedule_time_last_usec =
atomic_read(&pst_adpd->stats.wq_schedule_time_last_usec);
unsigned int data_process_time_peak_usec =
atomic_read(&pst_adpd->stats.data_process_time_peak_usec);
unsigned int data_process_time_last_usec =
atomic_read(&pst_adpd->stats.data_process_time_last_usec);
return sprintf(buf, "\
interrupts : %u\n\
wq_pending : %u\n\
wq_schedule_time_peak_usec : %u\n\
wq_schedule_time_avg_usec : %d\n\
wq_schedule_time_last_usec : %u\n\
data_process_time_peak_usec : %u\n\
data_process_time_avg_usec : %d\n\
data_process_time_last_usec : %u\n\
fifo_requires_sync : %d\n\
fifo bytes history : [%d %d %d %d]\n\
ADPD142 driver version : %s\n\
ADPD142 Release date : %s\n",
interrupts, wq_pending,
wq_schedule_time_peak_usec,
(int)ewma_read(&pst_adpd->stats.wq_schedule_time_avg_usec),
wq_schedule_time_last_usec,
data_process_time_peak_usec,
(int)ewma_read(&pst_adpd->stats.data_process_time_avg_usec),
data_process_time_last_usec,
atomic_read(&pst_adpd->stats.fifo_requires_sync),
atomic_read(&pst_adpd->stats.fifo_bytes[0]),
atomic_read(&pst_adpd->stats.fifo_bytes[1]),
atomic_read(&pst_adpd->stats.fifo_bytes[2]),
atomic_read(&pst_adpd->stats.fifo_bytes[3]),
ADPD142_VERSION,
ADPD142_RELEASE_DATE);
}
#define ADPD142_STAT_RESET 1
/**
This function is used for wrting the adpd stat value to zero
@param dev linux device structure
@param attr pointer point linux device_attribute structure
@param buf pointer point the buffer
@param size buffer size
@return ssize_t size of data written to the buffer
*/
static ssize_t
attr_stat_set(struct device *dev, struct device_attribute *attr,
const char *buf, size_t size)
{
struct adpd142_data *pst_adpd = dev_get_drvdata(dev);
unsigned short parse_data[1];
memset(parse_data, 0, sizeof(unsigned short) * 1);
cmd_parsing(buf, 1, parse_data);
if (parse_data[0] == ADPD142_STAT_RESET) {
ADPD142_dbg("Resetting statistics\n");
adpd_stat_reset(pst_adpd);
}
return size;
}
/**
array of sample attributes
*/
static struct device_attribute sample_attributes[] = {
__ATTR(enable, 0664, sample_attr_get_enable, sample_attr_set_enable),
};
/**
This function is used for creating sysfs attribute for sample
@param dev linux device structure
@return int status of attribute creation
*/
static int
create_sysfs_interfaces_sample(struct device *dev)
{
int i;
for (i = 0; i < ARRAY_SIZE(sample_attributes); i++)
if (device_create_file(dev, sample_attributes + i))
goto err_sysfs_interface_sample;
return 0;
err_sysfs_interface_sample:
for (; i >= 0; i--)
device_remove_file(dev, sample_attributes + i);
dev_err(dev, "%s:Unable to create interface\n", __func__);
return -1;
}
/**
This function is used for removing sysfs attribute for sample
@param dev linux device structure
@return void
*/
static void
remove_sysfs_interfaces_sample(struct device *dev)
{
int i;
for (i = 0; i < ARRAY_SIZE(sample_attributes); i++)
device_remove_file(dev, sample_attributes + i);
}
/**
This function is used for creating sysfs attribute
@param dev linux device structure
@return int status of attribute creation
*/
static int
create_sysfs_interfaces(struct device *dev)
{
return 0;
}
/**
This function is used for removing sysfs attribute
@param dev linux device structure
@return void
*/
static void
remove_sysfs_interfaces(struct device *dev)
{
}
/**
This function is used for registering input event for Sample
@param pst_adpd pointer point ADPD142 data structure
@return s32 status of this function
*/
static s32
adpd142_input_init_sample(struct adpd142_data *pst_adpd)
{
int err;
pst_adpd->ptr_sample_inputdev = input_allocate_device();
if (!pst_adpd->ptr_sample_inputdev) {
err = -ENOMEM;
dev_err(&pst_adpd->client->dev, "input dev allocation fail\n");
goto err_sample_allocate;
}
pst_adpd->ptr_sample_inputdev->name = MODULE_NAME_HRM;
input_set_drvdata(pst_adpd->ptr_sample_inputdev, pst_adpd);
__set_bit(EV_MSC, pst_adpd->ptr_sample_inputdev->evbit);
__set_bit(EV_REL, pst_adpd->ptr_sample_inputdev->evbit);
__set_bit(MSC_RAW, pst_adpd->ptr_sample_inputdev->mscbit);
__set_bit(REL_X, pst_adpd->ptr_sample_inputdev->relbit);
__set_bit(REL_Y, pst_adpd->ptr_sample_inputdev->relbit);
__set_bit(REL_Z, pst_adpd->ptr_sample_inputdev->relbit);
err = input_register_device(pst_adpd->ptr_sample_inputdev);
if (err) {
dev_err(&pst_adpd->client->dev,
"unable to register input dev %s\n",
pst_adpd->ptr_sample_inputdev->name);
goto err_sample_register_failed;
}
err = sensors_create_symlink(&pst_adpd->ptr_sample_inputdev->dev.kobj,
pst_adpd->ptr_sample_inputdev->name);
if (err < 0) {
pr_err("[HRM] %s: create_symlink error\n", __func__);
goto err_sample_register_failed;
}
return 0;
err_sample_register_failed:
input_free_device(pst_adpd->ptr_sample_inputdev);
err_sample_allocate:
return err;
}
/**
This function is used for unregistering input event for Sample
@param pst_adpd pointer point ADPD142 data structure
@return void
*/
static void
adpd142_input_cleanup_sample(struct adpd142_data *pst_adpd)
{
input_set_drvdata(pst_adpd->ptr_sample_inputdev, NULL);
input_unregister_device(pst_adpd->ptr_sample_inputdev);
input_free_device(pst_adpd->ptr_sample_inputdev);
}
/**
This function is used for registering input event for ADPD142
@param pst_adpd pointer point ADPD142 data structure
@return s32 status of the function
*/
static s32
adpd142_input_init(struct adpd142_data *pst_adpd)
{
return adpd142_input_init_sample(pst_adpd);
}
/**
This function is used for unregistering input event done for ADPD142
@param pst_adpd pointer point ADPD142 data structure
@return void
*/
static void
adpd142_input_cleanup(struct adpd142_data *pst_adpd)
{
adpd142_input_cleanup_sample(pst_adpd);
}
/**
This function is used for registering sysfs attribute for ADPD142
@param pst_adpd pointer point ADPD142 data structure
@return s32 status of the called function
*/
static s32
adpd142_sysfs_init(struct adpd142_data *pst_adpd)
{
if (create_sysfs_interfaces(&pst_adpd->client->dev))
goto err_sysfs_create_gen;
if (create_sysfs_interfaces_sample(&pst_adpd->ptr_sample_inputdev->dev))
goto err_sysfs_create_sample;
return 0;
err_sysfs_create_sample:
remove_sysfs_interfaces(&pst_adpd->client->dev);
err_sysfs_create_gen:
return -1;
}
/**
This function is used for unregistering sysfs attribute for ADPD142
@param pst_adpd pointer point ADPD142 data structure
@return void
*/
static void
adpd142_sysfs_cleanup(struct adpd142_data *pst_adpd)
{
remove_sysfs_interfaces(&pst_adpd->client->dev);
remove_sysfs_interfaces_sample(&pst_adpd->ptr_sample_inputdev->dev);
}
/**
This function is used for assigning initial assignment value to
ADPD142 data structure
@param pst_adpd pointer point ADPD142 data structure
@return void
*/
static void
adpd142_struct_assign(struct adpd142_data *pst_adpd)
{
pst_adpd->ptr_data_buffer = data_buffer;
}
/**
This function is used for initializing ADPD142
@param pst_adpd pointer point ADPD142 data structure
@param id pointer point i2c device id
@return s32 status of the called function
*/
static s32
adpd142_initialization(struct adpd142_data *pst_adpd,
const struct i2c_device_id *id)
{
int err = 0;
if (adpd142_input_init(pst_adpd)) {
err = -1;
pr_err("%s err line %d\n", __func__, __LINE__);
goto err_input_init;
}
if (adpd142_sysfs_init(pst_adpd)) {
pr_err("%s err line %d\n", __func__, __LINE__);
err = -1;
goto err_sysfs_init;
}
adpd142_struct_assign(pst_adpd);
memset(&pst_adpd->stats, 0, sizeof(pst_adpd->stats));
adpd_stat_reset(pst_adpd);
INIT_WORK(&pst_adpd->work, adpd142_wq_handler);
pst_adpd->ptr_adpd142_wq_st =
create_workqueue("adpd142_wq");
if (!pst_adpd->ptr_adpd142_wq_st) {
err = -ENOMEM;
pr_err("%s %d\n", __func__, __LINE__);
goto err_wq_creation_init;
}
if (!pst_adpd->client->irq){
pr_err("%s %d\n", __func__, __LINE__);
goto err_work_queue_init;
}
pst_adpd->hrm_int = pst_adpd->client->irq;
pst_adpd->irq = pst_adpd->hrm_int;
irq_set_irq_type(pst_adpd->irq, IRQ_TYPE_EDGE_RISING);
err = request_irq(pst_adpd->irq, adpd142_isr_handler,
IRQF_TRIGGER_RISING, dev_name(&pst_adpd->client->dev),
pst_adpd);
if (err) {
ADPD142_dbg("irq %d busy?\n", pst_adpd->irq);
goto err_work_queue_init;
}
disable_irq_nosync(pst_adpd->irq);
pst_adpd->ptr_config = kzalloc(sizeof(struct adpd_platform_data),
GFP_KERNEL);
if (pst_adpd->ptr_config == NULL) {
err = -ENOMEM;
pr_err("%s %d\n", __func__, __LINE__);
goto err_work_queue_init;
}
enable_irq(pst_adpd->irq);
adpd142_configuration(pst_adpd, 1);
adpd142_mode_switching(pst_adpd, 0); /* turn off : idle mode, temp*/
pst_adpd->osc_trim_open_enable = 1;
return 0;
//err_gpio_direction_input:
//gpio_free(pst_adpd->hrm_int);
err_work_queue_init:
destroy_workqueue(pst_adpd->ptr_adpd142_wq_st);
err_wq_creation_init:
err_sysfs_init:
adpd142_input_cleanup(pst_adpd);
err_input_init:
return err;
}
/**
This function is used for cleanup ADPD142
@param pst_adpd pointer point ADPD142 data structure
@return void
*/
static void
adpd142_initialization_cleanup(struct adpd142_data *pst_adpd)
{
adpd142_mode_switching(pst_adpd, 0);
free_irq(pst_adpd->irq, pst_adpd);
destroy_workqueue(pst_adpd->ptr_adpd142_wq_st);
adpd142_sysfs_cleanup(pst_adpd);
adpd142_input_cleanup(pst_adpd);
kobject_uevent(&pst_adpd->client->dev.kobj, KOBJ_OFFLINE);
}
#ifdef CONFIG_PM
static s32
adpd142_i2c_suspend(struct device *dev)
{
return 0;
}
static s32
adpd142_i2c_resume(struct device *dev)
{
return 0;
}
#else
#define adpd142_i2c_resume NULL
#define adpd142_i2c_suspend NULL
#endif /* CONFIG_PM */
/**
This function is used for i2c read communication between ADPD142 and AP
@param pst_adpd pointer point ADPD142 data structure
@param reg_addr address need to be fetch
@param len number byte to be read
@param buf pointer point the read out data.
@return s32 status of the called function
*/
static int
adpd142_i2c_read(struct adpd142_data *pst_adpd, u8 reg_addr, int len,
u16 *buf)
{
int err;
int tries = 0;
int icnt = 0;
struct i2c_msg msgs[] = {
{
.addr = pst_adpd->client->addr,
.flags = pst_adpd->client->flags & I2C_M_TEN,
.len = 1,
.buf = (s8 *)®_addr,
},
{
.addr = pst_adpd->client->addr,
.flags = (pst_adpd->client->flags & I2C_M_TEN) |
I2C_M_RD,
.len = len * sizeof(unsigned short),
.buf = (s8 *)buf,
},
};
do {
err = i2c_transfer(pst_adpd->client->adapter, msgs, 2);
if (err != 2)
msleep_interruptible(I2C_RETRY_DELAY);
} while ((err != 2) && (++tries < I2C_RETRIES));
if (err != 2) {
dev_err(&pst_adpd->client->dev, "read transfer error\n");
err = -EIO;
} else {
err = 0;
}
for (icnt = 0; icnt < len; icnt++) {
/*convert big endian to CPU format*/
buf[icnt] = be16_to_cpu(buf[icnt]);
}
return err;
}
/**
This function is used for i2c write communication between ADPD142 and AP
@param pst_adpd pointer point ADPD142 data structure
@param reg_addr address need to be fetch
@param len number byte to be read
@param data value to be written on the register.
@return s32 status of the called function
*/
static int
adpd142_i2c_write(struct adpd142_data *pst_adpd, u8 reg_addr, int len,
u16 data)
{
struct i2c_msg msgs[1];
int err;
int tries = 0;
unsigned short data_to_write = cpu_to_be16(data);
char buf[4];
buf[0] = (s8) reg_addr;
memcpy(buf + 1, &data_to_write, sizeof(unsigned short));
msgs[0].addr = pst_adpd->client->addr;
msgs[0].flags = pst_adpd->client->flags & I2C_M_TEN;
msgs[0].len = 1 + (1 * sizeof(unsigned short));
msgs[0].buf = buf;
do {
err = i2c_transfer(pst_adpd->client->adapter, msgs, 1);
if (err != 1)
msleep_interruptible(I2C_RETRY_DELAY);
} while ((err != 1) && (++tries < I2C_RETRIES));
if (err != 1) {
dev_err(&pst_adpd->client->dev, "write transfer error\n");
err = -EIO;
} else {
err = 0;
}
return err;
}
/* set sysfs for hrm sensor */
static ssize_t adpd142_name_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
pr_info("%s = %s \n", __func__, CHIP_NAME);
return snprintf(buf, PAGE_SIZE, "%s\n", CHIP_NAME);
}
static ssize_t adpd142_vendor_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
pr_info("%s = %s \n", __func__, VENDOR);
return snprintf(buf, PAGE_SIZE, "%s\n", VENDOR);
}
static ssize_t eol_test_result_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size);
static ssize_t eol_test_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
struct adpd142_data *pst_adpd = dev_get_drvdata(dev);
if (sysfs_streq(buf, "1")) /* eol_test start */
pst_adpd->eol_test_is_enable = 1;
else if (sysfs_streq(buf, "0")) /* eol_test stop */
pst_adpd->eol_test_is_enable = 0;
else {
pr_debug("adpd142_%s: invalid value %d\n", __func__, *buf);
return -EINVAL;
}
pst_adpd->eol_test_status = 0;
pr_info("adpd142_%s = (%c), value(%u) \n", __func__, *buf, pst_adpd->eol_test_is_enable);
return size;
}
static ssize_t eol_test_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct adpd142_data *pst_adpd = dev_get_drvdata(dev);
return snprintf(buf, PAGE_SIZE, "%u\n", pst_adpd->eol_test_is_enable);
}
static ssize_t eol_test_result_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
struct adpd142_data *pst_adpd = dev_get_drvdata(dev);
int err;
unsigned int buf_len;
buf_len = strlen(buf) + 1;
if (buf_len > MAX_EOL_RESULT)
buf_len = MAX_EOL_RESULT;
if (pst_adpd->eol_test_result != NULL)
kfree(pst_adpd->eol_test_result);
pst_adpd->eol_test_result = kzalloc(sizeof(char) * buf_len, GFP_KERNEL);
if (pst_adpd->eol_test_result == NULL) {
pr_err("adpd142_%s - couldn't allocate memory\n", __func__);
return -ENOMEM;
}
strncpy(pst_adpd->eol_test_result, buf, buf_len);
pr_info("adpd142_%s - result = %s, buf_len(%u)\n", __func__, pst_adpd->eol_test_result, buf_len);
pst_adpd->eol_test_status = 1;
err = osc_trim_efs_register_save(pst_adpd);
if (err < 0) {
pr_err("adpd142_%s: osc_trim_efs_register_save() empty(%d)\n", __func__, err);
}
return size;
}
static ssize_t eol_test_result_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct adpd142_data *pst_adpd = dev_get_drvdata(dev);
if (pst_adpd->eol_test_result == NULL) {
pr_info("adpd142_%s - data->eol_test_result is NULL\n", __func__);
pst_adpd->eol_test_status = 0;
return snprintf(buf, PAGE_SIZE, "%s\n", "NO_EOL_TEST");
}
pr_info("adpd142_%s - result = %s\n", __func__, pst_adpd->eol_test_result);
pst_adpd->eol_test_status = 0;
return snprintf(buf, PAGE_SIZE, "%s\n", pst_adpd->eol_test_result);
}
static ssize_t eol_test_status_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct adpd142_data *pst_adpd = dev_get_drvdata(dev);
return snprintf(buf, PAGE_SIZE, "%u\n", pst_adpd->eol_test_status);
}
static ssize_t adpd142_eol_lib_ver_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
struct adpd142_data *pst_adpd = dev_get_drvdata(dev);
unsigned int buf_len;
buf_len = strlen(buf) + 1;
if (buf_len > MAX_LIB_VER)
buf_len = MAX_LIB_VER;
if (pst_adpd->eol_lib_ver != NULL)
kfree(pst_adpd->eol_lib_ver);
pst_adpd->eol_lib_ver = kzalloc(sizeof(char) * buf_len, GFP_KERNEL);
if (pst_adpd->eol_lib_ver == NULL) {
pr_err("%s - couldn't allocate memory\n", __func__);
return -ENOMEM;
}
strncpy(pst_adpd->eol_lib_ver, buf, buf_len);
pr_info("%s - eol_lib_ver = %s\n", __func__, pst_adpd->eol_lib_ver);
return size;
}
static ssize_t adpd142_eol_lib_ver_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct adpd142_data *pst_adpd = dev_get_drvdata(dev);
if (pst_adpd->eol_lib_ver == NULL) {
pr_info("%s - data->eol_lib_ver is NULL\n", __func__);
return snprintf(buf, PAGE_SIZE, "%s\n", "NULL");
}
pr_info("%s - eol_lib_ver = %s\n", __func__, pst_adpd->eol_lib_ver);
return snprintf(buf, PAGE_SIZE, "%s\n", pst_adpd->eol_lib_ver);
}
static ssize_t adpd142_elf_lib_ver_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
struct adpd142_data *pst_adpd = dev_get_drvdata(dev);
unsigned int buf_len;
buf_len = strlen(buf) + 1;
if (buf_len > MAX_LIB_VER)
buf_len = MAX_LIB_VER;
if (pst_adpd->elf_lib_ver != NULL)
kfree(pst_adpd->elf_lib_ver);
pst_adpd->elf_lib_ver = kzalloc(sizeof(char) * buf_len, GFP_KERNEL);
if (pst_adpd->elf_lib_ver == NULL) {
pr_err("%s - couldn't allocate memory\n", __func__);
return -ENOMEM;
}
strncpy(pst_adpd->elf_lib_ver, buf, buf_len);
return size;
}
static ssize_t adpd142_elf_lib_ver_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct adpd142_data *pst_adpd = dev_get_drvdata(dev);
if (pst_adpd->elf_lib_ver == NULL) {
pr_info("%s - data->elf_lib_ver is NULL\n", __func__);
return snprintf(buf, PAGE_SIZE, "%s\n", "NULL");
}
pr_info("%s - elf_lib_ver = %s\n", __func__, pst_adpd->elf_lib_ver);
return snprintf(buf, PAGE_SIZE, "%s\n", pst_adpd->elf_lib_ver);
}
static DEVICE_ATTR(name, S_IRUGO, adpd142_name_show, NULL);
static DEVICE_ATTR(vendor, S_IRUGO, adpd142_vendor_show, NULL);
static DEVICE_ATTR(eol_test, S_IRUGO | S_IWUSR | S_IWGRP,
eol_test_show, eol_test_store);
static DEVICE_ATTR(eol_test_result, S_IRUGO | S_IWUSR | S_IWGRP,
eol_test_result_show, eol_test_result_store);
static DEVICE_ATTR(eol_test_status, S_IRUGO, eol_test_status_show, NULL);
static DEVICE_ATTR(eol_lib_ver, S_IRUGO | S_IWUSR | S_IWGRP,
adpd142_eol_lib_ver_show, adpd142_eol_lib_ver_store);
static DEVICE_ATTR(elf_lib_ver, S_IRUGO | S_IWUSR | S_IWGRP,
adpd142_elf_lib_ver_show, adpd142_elf_lib_ver_store);
static DEVICE_ATTR(adpd_mode, S_IRUGO | S_IWUSR | S_IWGRP,
attr_get_mode, attr_set_mode);
static DEVICE_ATTR(adpd_reg_read, S_IRUGO | S_IWUSR | S_IWGRP,
attr_reg_read_get, attr_reg_read_set);
static DEVICE_ATTR(adpd_reg_write, S_IRUGO | S_IWUSR | S_IWGRP,
NULL, attr_reg_write_set);
static DEVICE_ATTR(adpd_configuration, S_IRUGO | S_IWUSR | S_IWGRP,
attr_config_get, attr_config_set);
static DEVICE_ATTR(adpd_stat, S_IRUGO | S_IWUSR | S_IWGRP,
attr_stat_get, attr_stat_set);
static struct device_attribute *hrm_sensor_attrs[] = {
&dev_attr_name,
&dev_attr_vendor,
&dev_attr_eol_test,
&dev_attr_eol_test_result,
&dev_attr_eol_test_status,
&dev_attr_eol_lib_ver,
&dev_attr_elf_lib_ver,
&dev_attr_adpd_mode,
&dev_attr_adpd_reg_read,
&dev_attr_adpd_reg_write,
&dev_attr_adpd_configuration,
&dev_attr_adpd_stat,
NULL,
};
/**
This function is used for ADPD142 probe function
@param client pointer point to the linux i2c client structure
@param id pointer point to the linux i2c device id
@return s32 status of the probe function
*/
static s32
adpd_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id)
{
struct adpd142_data *pst_adpd = NULL;
struct adpd_platform_data *pdata = NULL;
int err = 0;
unsigned short u16_regval = 0;
pr_err("%s, is called Success\n", __func__);
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
dev_err(&client->dev, "client not i2c capable\n");
err = -ENODEV;
pr_err("[SENSOR] %s - exit_check_functionality.\n", __func__);
goto exit_check_functionality_failed;
}
pst_adpd = kzalloc(sizeof(struct adpd142_data), GFP_KERNEL);
if (pst_adpd == NULL) {
err = -ENOMEM;
pr_err("[SENSOR] %s - exit_mem_allocate.\n", __func__);
goto exit_mem_allocate_failed;
}
if(client->dev.of_node) {
pdata = devm_kzalloc (&client->dev ,
sizeof(struct adpd_platform_data), GFP_KERNEL);
if(!pdata) {
dev_err(&client->dev, "Failed to allocate memory\n");
if(pst_adpd)
kfree(pst_adpd);
return -ENOMEM;
}
} else
pdata = client->dev.platform_data;
if (!pdata) {
pr_err("%s: missing pdata!\n", __func__);
if(pst_adpd)
kfree(pst_adpd);
return err;
}
err = adpd_parse_dt(pst_adpd, &client->dev);
if (err) {
pr_err("%s, parse dt fail\n", __func__);
goto adpd_parse_dt_err;
}
mutex_init(&pst_adpd->mutex);
pst_adpd->client = client;
pst_adpd->ptr_config = pdata;
pst_adpd->read = adpd142_i2c_read;
pst_adpd->write = adpd142_i2c_write;
/*Need to allocate and assign data then use the below function */
i2c_set_clientdata(client, (struct adpd142_data *)pst_adpd);
adpd_power_enable(pst_adpd, 1);
msleep(20);
/*chip ID verification */
u16_regval = reg_read(pst_adpd, ADPD_CHIPID_ADDR);
switch (u16_regval) {
case ADPD_CHIPID(0):
case ADPD_CHIPID(1):
case ADPD_CHIPID(2):
err = 0;
ADPD142_dbg("chipID value = 0x%x\n", u16_regval);
break;
default:
err = 1;
break;
};
if (err) {
ADPD142_dbg("chipID value = 0x%x\n", u16_regval);
pr_err("[SENSOR] %s - exit_chipid_verification.\n", __func__);
goto exit_chipid_verification;
}
ADPD142_info("chipID value = 0x%x\n", u16_regval);
//pst_adpd->dev = &client->dev;
pr_info("%s - start \n", __func__);
if (adpd142_initialization(pst_adpd, id)){
pr_err("[SENSOR] %s - exit_adpd142_init_exit.\n", __func__);
goto exit_adpd142_init;
}
/* set sysfs for hrm sensor */
err = sensors_register(pst_adpd->dev, pst_adpd, hrm_sensor_attrs,
"hrm_sensor");
if (err) {
pr_err("[SENSOR] %s - cound not register hrm_sensor(%d).\n",
__func__, err);
goto hrm_sensor_register_failed;
}
pr_info("%s success\n", __func__);
goto done;
hrm_sensor_register_failed:
exit_adpd142_init:
exit_chipid_verification:
mutex_destroy(&pst_adpd->mutex);
adpd_parse_dt_err:
exit_mem_allocate_failed:
kfree(pst_adpd);
exit_check_functionality_failed:
dev_err(&client->dev, "%s: Driver Init failed\n", ADPD_DEV_NAME);
pr_err("%s failed\n", __func__);
done:
return err;
}
/**
This function is used for ADPD142 remove function
@param client pointer point to the linux i2c client structure
@return s32 status of the remove function
*/
static s32
adpd_i2c_remove(struct i2c_client *client)
{
struct adpd142_data *pst_adpd = i2c_get_clientdata(client);
ADPD142_dbg("%s\n", __func__);
adpd142_initialization_cleanup(pst_adpd);
kfree(pst_adpd->ptr_config);
kfree(pst_adpd);
pst_adpd = NULL;
i2c_set_clientdata(client, NULL);
gpio_free(pst_adpd->hrm_int);
return 0;
}
#ifdef CONFIG_PM
/**
device power management operation structure
*/
static const struct dev_pm_ops adpd_pm_ops = {
.resume = adpd142_i2c_resume,
.suspend = adpd142_i2c_suspend,
};
#endif
/**
This table tell which framework it supported
@brief the name has to get matched to the board configuration file setup
*/
/*static struct i2c_device_id adpd_id[] = { {ADPD_DEV_NAME, 0}, {} };
MODULE_DEVICE_TABLE(i2c, adpd_id);*/
static struct of_device_id adpd_match_table[] = {
{ .compatible = "adpd142",},
{},
};
MODULE_DEVICE_TABLE(i2c, adpd_match_table);
static const struct i2c_device_id adpd142_device_id[] = {
{ "adpd142", 0 },
{ }
};
/**
i2c operation structure
*/
struct i2c_driver adpd142_i2c_driver = {
.driver = {
.name = ADPD_DEV_NAME,
.owner = THIS_MODULE,
#ifdef CONFIG_PM
.pm = &adpd_pm_ops,
#endif
.of_match_table = adpd_match_table,
},
.probe = adpd_i2c_probe,
.remove = adpd_i2c_remove,
.id_table = adpd142_device_id,
};
static struct i2c_client *i2c_client;
#ifdef ADPD_AUTO_PROBE
static int
i2c_check_dev_attach(char *slave_name, unsigned short *slave_addrs,
unsigned short cnt)
{
struct i2c_board_info info;
struct i2c_adapter *i2c_adapter = NULL;
int ret = 0;
unsigned short *scan_device = NULL;
unsigned short count = 0;
/*need to check whether we need to free the memory */
scan_device = kzalloc(sizeof(unsigned short) * (cnt + 2), GFP_KERNEL);
if (IS_ERR(scan_device)) {
ret = -ENOMEM; /* out of memory */
goto i2c_check_attach_mem_fail;
}
memset(scan_device, '\0', sizeof(unsigned short) * (cnt + 2));
for (count = 0; count < cnt; count++) {
*(scan_device + count) = *(slave_addrs + count);
ADPD142_info("list of slave addr = 0x%x\n",
*(scan_device + count));
}
*(scan_device + count) = I2C_CLIENT_END;
count = 0;
do {
i2c_adapter = i2c_get_adapter(count);
if (i2c_adapter != NULL) {
memset(&info, 0, sizeof(struct i2c_board_info));
strlcpy(info.type, slave_name /*"adpd142" */ ,
I2C_NAME_SIZE);
/*need to check i2c_new_device instead of
i2c_new_probed_device*/
i2c_client =
i2c_new_probed_device(i2c_adapter, &info,
(const unsigned short *)
scan_device, NULL);
if (i2c_client != NULL) {
ADPD142_dbg("I2C busnum - %d\n", count);
ADPD142_dbg(
"device is attached to the bus i2c-%d\n",
count);
} else {
}
i2c_put_adapter(i2c_adapter);
} else {
ADPD142_info("Not valid adapter\n");
}
count++;
} while (i2c_client == NULL && count < 20);
kfree(scan_device);
if (i2c_client == NULL) {
/*No such device or address */
return -ENXIO;
} else {
return 0;
}
i2c_check_attach_mem_fail:
return ret;
}
#endif
/**
This function is get called when the module is inserted
@return inti status of the adpd142_multisensor_init
*/
static int __init
adpd142_multisensor_init(void)
{
#ifdef ADPD_AUTO_PROBE
ADPD142_dbg("%s\n", __func__);
unsigned short addr[] = { ADPD142_SLAVE_ADDR };
if (!i2c_check_dev_attach(ADPD_DEV_NAME, addr, 1)) {
return i2c_add_driver(&adpd142_i2c_driver);
} else {
pr_err("i2c bus connect error\n");
return -1;
}
#else
ADPD142_dbg("%s\n", __func__);
return i2c_add_driver(&adpd142_i2c_driver);
#endif
}
/**
This function is get called when the module is removed
@return void
*/
static void __exit
adpd142_multisensor_exit(void)
{
ADPD142_dbg("%s\n", __func__);
i2c_del_driver(&adpd142_i2c_driver);
if (i2c_client)
i2c_unregister_device(i2c_client);
}
module_init(adpd142_multisensor_init);
module_exit(adpd142_multisensor_exit);
MODULE_DESCRIPTION();
MODULE_LICENSE("GPL");
MODULE_AUTHOR("");
| gpl-2.0 |
OneOfMany07/tty-ng | drivers/rtc/rtc-r9701.c | 157 | 4776 | /*
* Driver for Epson RTC-9701JE
*
* Copyright (C) 2008 Magnus Damm
*
* Based on rtc-max6902.c
*
* Copyright (C) 2006 8D Technologies inc.
* Copyright (C) 2004 Compulab Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/device.h>
#include <linux/init.h>
#include <linux/rtc.h>
#include <linux/spi/spi.h>
#include <linux/bcd.h>
#include <linux/delay.h>
#include <linux/bitops.h>
#define RSECCNT 0x00 /* Second Counter */
#define RMINCNT 0x01 /* Minute Counter */
#define RHRCNT 0x02 /* Hour Counter */
#define RWKCNT 0x03 /* Week Counter */
#define RDAYCNT 0x04 /* Day Counter */
#define RMONCNT 0x05 /* Month Counter */
#define RYRCNT 0x06 /* Year Counter */
#define R100CNT 0x07 /* Y100 Counter */
#define RMINAR 0x08 /* Minute Alarm */
#define RHRAR 0x09 /* Hour Alarm */
#define RWKAR 0x0a /* Week/Day Alarm */
#define RTIMCNT 0x0c /* Interval Timer */
#define REXT 0x0d /* Extension Register */
#define RFLAG 0x0e /* RTC Flag Register */
#define RCR 0x0f /* RTC Control Register */
static int write_reg(struct device *dev, int address, unsigned char data)
{
struct spi_device *spi = to_spi_device(dev);
unsigned char buf[2];
buf[0] = address & 0x7f;
buf[1] = data;
return spi_write(spi, buf, ARRAY_SIZE(buf));
}
static int read_regs(struct device *dev, unsigned char *regs, int no_regs)
{
struct spi_device *spi = to_spi_device(dev);
u8 txbuf[1], rxbuf[1];
int k, ret;
ret = 0;
for (k = 0; ret == 0 && k < no_regs; k++) {
txbuf[0] = 0x80 | regs[k];
ret = spi_write_then_read(spi, txbuf, 1, rxbuf, 1);
regs[k] = rxbuf[0];
}
return ret;
}
static int r9701_get_datetime(struct device *dev, struct rtc_time *dt)
{
int ret;
unsigned char buf[] = { RSECCNT, RMINCNT, RHRCNT,
RDAYCNT, RMONCNT, RYRCNT };
ret = read_regs(dev, buf, ARRAY_SIZE(buf));
if (ret)
return ret;
memset(dt, 0, sizeof(*dt));
dt->tm_sec = bcd2bin(buf[0]); /* RSECCNT */
dt->tm_min = bcd2bin(buf[1]); /* RMINCNT */
dt->tm_hour = bcd2bin(buf[2]); /* RHRCNT */
dt->tm_mday = bcd2bin(buf[3]); /* RDAYCNT */
dt->tm_mon = bcd2bin(buf[4]) - 1; /* RMONCNT */
dt->tm_year = bcd2bin(buf[5]) + 100; /* RYRCNT */
/* the rtc device may contain illegal values on power up
* according to the data sheet. make sure they are valid.
*/
return rtc_valid_tm(dt);
}
static int r9701_set_datetime(struct device *dev, struct rtc_time *dt)
{
int ret, year;
year = dt->tm_year + 1900;
if (year >= 2100 || year < 2000)
return -EINVAL;
ret = write_reg(dev, RHRCNT, bin2bcd(dt->tm_hour));
ret = ret ? ret : write_reg(dev, RMINCNT, bin2bcd(dt->tm_min));
ret = ret ? ret : write_reg(dev, RSECCNT, bin2bcd(dt->tm_sec));
ret = ret ? ret : write_reg(dev, RDAYCNT, bin2bcd(dt->tm_mday));
ret = ret ? ret : write_reg(dev, RMONCNT, bin2bcd(dt->tm_mon + 1));
ret = ret ? ret : write_reg(dev, RYRCNT, bin2bcd(dt->tm_year - 100));
ret = ret ? ret : write_reg(dev, RWKCNT, 1 << dt->tm_wday);
return ret;
}
static const struct rtc_class_ops r9701_rtc_ops = {
.read_time = r9701_get_datetime,
.set_time = r9701_set_datetime,
};
static int r9701_probe(struct spi_device *spi)
{
struct rtc_device *rtc;
struct rtc_time dt;
unsigned char tmp;
int res;
tmp = R100CNT;
res = read_regs(&spi->dev, &tmp, 1);
if (res || tmp != 0x20) {
dev_err(&spi->dev, "cannot read RTC register\n");
return -ENODEV;
}
/*
* The device seems to be present. Now check if the registers
* contain invalid values. If so, try to write a default date:
* 2000/1/1 00:00:00
*/
if (r9701_get_datetime(&spi->dev, &dt)) {
dev_info(&spi->dev, "trying to repair invalid date/time\n");
dt.tm_sec = 0;
dt.tm_min = 0;
dt.tm_hour = 0;
dt.tm_mday = 1;
dt.tm_mon = 0;
dt.tm_year = 100;
if (r9701_set_datetime(&spi->dev, &dt) ||
r9701_get_datetime(&spi->dev, &dt)) {
dev_err(&spi->dev, "cannot repair RTC register\n");
return -ENODEV;
}
}
rtc = rtc_device_register("r9701",
&spi->dev, &r9701_rtc_ops, THIS_MODULE);
if (IS_ERR(rtc))
return PTR_ERR(rtc);
dev_set_drvdata(&spi->dev, rtc);
return 0;
}
static int r9701_remove(struct spi_device *spi)
{
struct rtc_device *rtc = dev_get_drvdata(&spi->dev);
rtc_device_unregister(rtc);
return 0;
}
static struct spi_driver r9701_driver = {
.driver = {
.name = "rtc-r9701",
.owner = THIS_MODULE,
},
.probe = r9701_probe,
.remove = r9701_remove,
};
module_spi_driver(r9701_driver);
MODULE_DESCRIPTION("r9701 spi RTC driver");
MODULE_AUTHOR("Magnus Damm <damm@opensource.se>");
MODULE_LICENSE("GPL");
MODULE_ALIAS("spi:rtc-r9701");
| gpl-2.0 |
lvming/linux | Documentation/accounting/getdelays.c | 1437 | 13554 | /* getdelays.c
*
* Utility to get per-pid and per-tgid delay accounting statistics
* Also illustrates usage of the taskstats interface
*
* Copyright (C) Shailabh Nagar, IBM Corp. 2005
* Copyright (C) Balbir Singh, IBM Corp. 2006
* Copyright (c) Jay Lan, SGI. 2006
*
* Compile with
* gcc -I/usr/src/linux/include getdelays.c -o getdelays
*/
#include <stdio.h>
#include <stdlib.h>
#include <errno.h>
#include <unistd.h>
#include <poll.h>
#include <string.h>
#include <fcntl.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/socket.h>
#include <sys/wait.h>
#include <signal.h>
#include <linux/genetlink.h>
#include <linux/taskstats.h>
#include <linux/cgroupstats.h>
/*
* Generic macros for dealing with netlink sockets. Might be duplicated
* elsewhere. It is recommended that commercial grade applications use
* libnl or libnetlink and use the interfaces provided by the library
*/
#define GENLMSG_DATA(glh) ((void *)(NLMSG_DATA(glh) + GENL_HDRLEN))
#define GENLMSG_PAYLOAD(glh) (NLMSG_PAYLOAD(glh, 0) - GENL_HDRLEN)
#define NLA_DATA(na) ((void *)((char*)(na) + NLA_HDRLEN))
#define NLA_PAYLOAD(len) (len - NLA_HDRLEN)
#define err(code, fmt, arg...) \
do { \
fprintf(stderr, fmt, ##arg); \
exit(code); \
} while (0)
int done;
int rcvbufsz;
char name[100];
int dbg;
int print_delays;
int print_io_accounting;
int print_task_context_switch_counts;
#define PRINTF(fmt, arg...) { \
if (dbg) { \
printf(fmt, ##arg); \
} \
}
/* Maximum size of response requested or message sent */
#define MAX_MSG_SIZE 1024
/* Maximum number of cpus expected to be specified in a cpumask */
#define MAX_CPUS 32
struct msgtemplate {
struct nlmsghdr n;
struct genlmsghdr g;
char buf[MAX_MSG_SIZE];
};
char cpumask[100+6*MAX_CPUS];
static void usage(void)
{
fprintf(stderr, "getdelays [-dilv] [-w logfile] [-r bufsize] "
"[-m cpumask] [-t tgid] [-p pid]\n");
fprintf(stderr, " -d: print delayacct stats\n");
fprintf(stderr, " -i: print IO accounting (works only with -p)\n");
fprintf(stderr, " -l: listen forever\n");
fprintf(stderr, " -v: debug on\n");
fprintf(stderr, " -C: container path\n");
}
/*
* Create a raw netlink socket and bind
*/
static int create_nl_socket(int protocol)
{
int fd;
struct sockaddr_nl local;
fd = socket(AF_NETLINK, SOCK_RAW, protocol);
if (fd < 0)
return -1;
if (rcvbufsz)
if (setsockopt(fd, SOL_SOCKET, SO_RCVBUF,
&rcvbufsz, sizeof(rcvbufsz)) < 0) {
fprintf(stderr, "Unable to set socket rcv buf size to %d\n",
rcvbufsz);
goto error;
}
memset(&local, 0, sizeof(local));
local.nl_family = AF_NETLINK;
if (bind(fd, (struct sockaddr *) &local, sizeof(local)) < 0)
goto error;
return fd;
error:
close(fd);
return -1;
}
static int send_cmd(int sd, __u16 nlmsg_type, __u32 nlmsg_pid,
__u8 genl_cmd, __u16 nla_type,
void *nla_data, int nla_len)
{
struct nlattr *na;
struct sockaddr_nl nladdr;
int r, buflen;
char *buf;
struct msgtemplate msg;
msg.n.nlmsg_len = NLMSG_LENGTH(GENL_HDRLEN);
msg.n.nlmsg_type = nlmsg_type;
msg.n.nlmsg_flags = NLM_F_REQUEST;
msg.n.nlmsg_seq = 0;
msg.n.nlmsg_pid = nlmsg_pid;
msg.g.cmd = genl_cmd;
msg.g.version = 0x1;
na = (struct nlattr *) GENLMSG_DATA(&msg);
na->nla_type = nla_type;
na->nla_len = nla_len + 1 + NLA_HDRLEN;
memcpy(NLA_DATA(na), nla_data, nla_len);
msg.n.nlmsg_len += NLMSG_ALIGN(na->nla_len);
buf = (char *) &msg;
buflen = msg.n.nlmsg_len ;
memset(&nladdr, 0, sizeof(nladdr));
nladdr.nl_family = AF_NETLINK;
while ((r = sendto(sd, buf, buflen, 0, (struct sockaddr *) &nladdr,
sizeof(nladdr))) < buflen) {
if (r > 0) {
buf += r;
buflen -= r;
} else if (errno != EAGAIN)
return -1;
}
return 0;
}
/*
* Probe the controller in genetlink to find the family id
* for the TASKSTATS family
*/
static int get_family_id(int sd)
{
struct {
struct nlmsghdr n;
struct genlmsghdr g;
char buf[256];
} ans;
int id = 0, rc;
struct nlattr *na;
int rep_len;
strcpy(name, TASKSTATS_GENL_NAME);
rc = send_cmd(sd, GENL_ID_CTRL, getpid(), CTRL_CMD_GETFAMILY,
CTRL_ATTR_FAMILY_NAME, (void *)name,
strlen(TASKSTATS_GENL_NAME)+1);
if (rc < 0)
return 0; /* sendto() failure? */
rep_len = recv(sd, &ans, sizeof(ans), 0);
if (ans.n.nlmsg_type == NLMSG_ERROR ||
(rep_len < 0) || !NLMSG_OK((&ans.n), rep_len))
return 0;
na = (struct nlattr *) GENLMSG_DATA(&ans);
na = (struct nlattr *) ((char *) na + NLA_ALIGN(na->nla_len));
if (na->nla_type == CTRL_ATTR_FAMILY_ID) {
id = *(__u16 *) NLA_DATA(na);
}
return id;
}
#define average_ms(t, c) (t / 1000000ULL / (c ? c : 1))
static void print_delayacct(struct taskstats *t)
{
printf("\n\nCPU %15s%15s%15s%15s%15s\n"
" %15llu%15llu%15llu%15llu%15.3fms\n"
"IO %15s%15s%15s\n"
" %15llu%15llu%15llums\n"
"SWAP %15s%15s%15s\n"
" %15llu%15llu%15llums\n"
"RECLAIM %12s%15s%15s\n"
" %15llu%15llu%15llums\n",
"count", "real total", "virtual total",
"delay total", "delay average",
(unsigned long long)t->cpu_count,
(unsigned long long)t->cpu_run_real_total,
(unsigned long long)t->cpu_run_virtual_total,
(unsigned long long)t->cpu_delay_total,
average_ms((double)t->cpu_delay_total, t->cpu_count),
"count", "delay total", "delay average",
(unsigned long long)t->blkio_count,
(unsigned long long)t->blkio_delay_total,
average_ms(t->blkio_delay_total, t->blkio_count),
"count", "delay total", "delay average",
(unsigned long long)t->swapin_count,
(unsigned long long)t->swapin_delay_total,
average_ms(t->swapin_delay_total, t->swapin_count),
"count", "delay total", "delay average",
(unsigned long long)t->freepages_count,
(unsigned long long)t->freepages_delay_total,
average_ms(t->freepages_delay_total, t->freepages_count));
}
static void task_context_switch_counts(struct taskstats *t)
{
printf("\n\nTask %15s%15s\n"
" %15llu%15llu\n",
"voluntary", "nonvoluntary",
(unsigned long long)t->nvcsw, (unsigned long long)t->nivcsw);
}
static void print_cgroupstats(struct cgroupstats *c)
{
printf("sleeping %llu, blocked %llu, running %llu, stopped %llu, "
"uninterruptible %llu\n", (unsigned long long)c->nr_sleeping,
(unsigned long long)c->nr_io_wait,
(unsigned long long)c->nr_running,
(unsigned long long)c->nr_stopped,
(unsigned long long)c->nr_uninterruptible);
}
static void print_ioacct(struct taskstats *t)
{
printf("%s: read=%llu, write=%llu, cancelled_write=%llu\n",
t->ac_comm,
(unsigned long long)t->read_bytes,
(unsigned long long)t->write_bytes,
(unsigned long long)t->cancelled_write_bytes);
}
int main(int argc, char *argv[])
{
int c, rc, rep_len, aggr_len, len2;
int cmd_type = TASKSTATS_CMD_ATTR_UNSPEC;
__u16 id;
__u32 mypid;
struct nlattr *na;
int nl_sd = -1;
int len = 0;
pid_t tid = 0;
pid_t rtid = 0;
int fd = 0;
int count = 0;
int write_file = 0;
int maskset = 0;
char *logfile = NULL;
int loop = 0;
int containerset = 0;
char *containerpath = NULL;
int cfd = 0;
int forking = 0;
sigset_t sigset;
struct msgtemplate msg;
while (!forking) {
c = getopt(argc, argv, "qdiw:r:m:t:p:vlC:c:");
if (c < 0)
break;
switch (c) {
case 'd':
printf("print delayacct stats ON\n");
print_delays = 1;
break;
case 'i':
printf("printing IO accounting\n");
print_io_accounting = 1;
break;
case 'q':
printf("printing task/process context switch rates\n");
print_task_context_switch_counts = 1;
break;
case 'C':
containerset = 1;
containerpath = optarg;
break;
case 'w':
logfile = strdup(optarg);
printf("write to file %s\n", logfile);
write_file = 1;
break;
case 'r':
rcvbufsz = atoi(optarg);
printf("receive buf size %d\n", rcvbufsz);
if (rcvbufsz < 0)
err(1, "Invalid rcv buf size\n");
break;
case 'm':
strncpy(cpumask, optarg, sizeof(cpumask));
cpumask[sizeof(cpumask) - 1] = '\0';
maskset = 1;
printf("cpumask %s maskset %d\n", cpumask, maskset);
break;
case 't':
tid = atoi(optarg);
if (!tid)
err(1, "Invalid tgid\n");
cmd_type = TASKSTATS_CMD_ATTR_TGID;
break;
case 'p':
tid = atoi(optarg);
if (!tid)
err(1, "Invalid pid\n");
cmd_type = TASKSTATS_CMD_ATTR_PID;
break;
case 'c':
/* Block SIGCHLD for sigwait() later */
if (sigemptyset(&sigset) == -1)
err(1, "Failed to empty sigset");
if (sigaddset(&sigset, SIGCHLD))
err(1, "Failed to set sigchld in sigset");
sigprocmask(SIG_BLOCK, &sigset, NULL);
/* fork/exec a child */
tid = fork();
if (tid < 0)
err(1, "Fork failed\n");
if (tid == 0)
if (execvp(argv[optind - 1],
&argv[optind - 1]) < 0)
exit(-1);
/* Set the command type and avoid further processing */
cmd_type = TASKSTATS_CMD_ATTR_PID;
forking = 1;
break;
case 'v':
printf("debug on\n");
dbg = 1;
break;
case 'l':
printf("listen forever\n");
loop = 1;
break;
default:
usage();
exit(-1);
}
}
if (write_file) {
fd = open(logfile, O_WRONLY | O_CREAT | O_TRUNC,
S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
if (fd == -1) {
perror("Cannot open output file\n");
exit(1);
}
}
if ((nl_sd = create_nl_socket(NETLINK_GENERIC)) < 0)
err(1, "error creating Netlink socket\n");
mypid = getpid();
id = get_family_id(nl_sd);
if (!id) {
fprintf(stderr, "Error getting family id, errno %d\n", errno);
goto err;
}
PRINTF("family id %d\n", id);
if (maskset) {
rc = send_cmd(nl_sd, id, mypid, TASKSTATS_CMD_GET,
TASKSTATS_CMD_ATTR_REGISTER_CPUMASK,
&cpumask, strlen(cpumask) + 1);
PRINTF("Sent register cpumask, retval %d\n", rc);
if (rc < 0) {
fprintf(stderr, "error sending register cpumask\n");
goto err;
}
}
if (tid && containerset) {
fprintf(stderr, "Select either -t or -C, not both\n");
goto err;
}
/*
* If we forked a child, wait for it to exit. Cannot use waitpid()
* as all the delicious data would be reaped as part of the wait
*/
if (tid && forking) {
int sig_received;
sigwait(&sigset, &sig_received);
}
if (tid) {
rc = send_cmd(nl_sd, id, mypid, TASKSTATS_CMD_GET,
cmd_type, &tid, sizeof(__u32));
PRINTF("Sent pid/tgid, retval %d\n", rc);
if (rc < 0) {
fprintf(stderr, "error sending tid/tgid cmd\n");
goto done;
}
}
if (containerset) {
cfd = open(containerpath, O_RDONLY);
if (cfd < 0) {
perror("error opening container file");
goto err;
}
rc = send_cmd(nl_sd, id, mypid, CGROUPSTATS_CMD_GET,
CGROUPSTATS_CMD_ATTR_FD, &cfd, sizeof(__u32));
if (rc < 0) {
perror("error sending cgroupstats command");
goto err;
}
}
if (!maskset && !tid && !containerset) {
usage();
goto err;
}
do {
rep_len = recv(nl_sd, &msg, sizeof(msg), 0);
PRINTF("received %d bytes\n", rep_len);
if (rep_len < 0) {
fprintf(stderr, "nonfatal reply error: errno %d\n",
errno);
continue;
}
if (msg.n.nlmsg_type == NLMSG_ERROR ||
!NLMSG_OK((&msg.n), rep_len)) {
struct nlmsgerr *err = NLMSG_DATA(&msg);
fprintf(stderr, "fatal reply error, errno %d\n",
err->error);
goto done;
}
PRINTF("nlmsghdr size=%zu, nlmsg_len=%d, rep_len=%d\n",
sizeof(struct nlmsghdr), msg.n.nlmsg_len, rep_len);
rep_len = GENLMSG_PAYLOAD(&msg.n);
na = (struct nlattr *) GENLMSG_DATA(&msg);
len = 0;
while (len < rep_len) {
len += NLA_ALIGN(na->nla_len);
switch (na->nla_type) {
case TASKSTATS_TYPE_AGGR_TGID:
/* Fall through */
case TASKSTATS_TYPE_AGGR_PID:
aggr_len = NLA_PAYLOAD(na->nla_len);
len2 = 0;
/* For nested attributes, na follows */
na = (struct nlattr *) NLA_DATA(na);
done = 0;
while (len2 < aggr_len) {
switch (na->nla_type) {
case TASKSTATS_TYPE_PID:
rtid = *(int *) NLA_DATA(na);
if (print_delays)
printf("PID\t%d\n", rtid);
break;
case TASKSTATS_TYPE_TGID:
rtid = *(int *) NLA_DATA(na);
if (print_delays)
printf("TGID\t%d\n", rtid);
break;
case TASKSTATS_TYPE_STATS:
count++;
if (print_delays)
print_delayacct((struct taskstats *) NLA_DATA(na));
if (print_io_accounting)
print_ioacct((struct taskstats *) NLA_DATA(na));
if (print_task_context_switch_counts)
task_context_switch_counts((struct taskstats *) NLA_DATA(na));
if (fd) {
if (write(fd, NLA_DATA(na), na->nla_len) < 0) {
err(1,"write error\n");
}
}
if (!loop)
goto done;
break;
default:
fprintf(stderr, "Unknown nested"
" nla_type %d\n",
na->nla_type);
break;
}
len2 += NLA_ALIGN(na->nla_len);
na = (struct nlattr *) ((char *) na + len2);
}
break;
case CGROUPSTATS_TYPE_CGROUP_STATS:
print_cgroupstats(NLA_DATA(na));
break;
default:
fprintf(stderr, "Unknown nla_type %d\n",
na->nla_type);
case TASKSTATS_TYPE_NULL:
break;
}
na = (struct nlattr *) (GENLMSG_DATA(&msg) + len);
}
} while (loop);
done:
if (maskset) {
rc = send_cmd(nl_sd, id, mypid, TASKSTATS_CMD_GET,
TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK,
&cpumask, strlen(cpumask) + 1);
printf("Sent deregister mask, retval %d\n", rc);
if (rc < 0)
err(rc, "error sending deregister cpumask\n");
}
err:
close(nl_sd);
if (fd)
close(fd);
if (cfd)
close(cfd);
return 0;
}
| gpl-2.0 |
ShaguptaS/linux | net/netfilter/xt_ipcomp.c | 1693 | 2854 | /* Kernel module to match IPComp parameters for IPv4 and IPv6
*
* Copyright (C) 2013 WindRiver
*
* Author:
* Fan Du <fan.du@windriver.com>
*
* Based on:
* net/netfilter/xt_esp.c
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/in.h>
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/ip.h>
#include <linux/netfilter/xt_ipcomp.h>
#include <linux/netfilter/x_tables.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Fan Du <fan.du@windriver.com>");
MODULE_DESCRIPTION("Xtables: IPv4/6 IPsec-IPComp SPI match");
/* Returns 1 if the spi is matched by the range, 0 otherwise */
static inline bool
spi_match(u_int32_t min, u_int32_t max, u_int32_t spi, bool invert)
{
bool r;
pr_debug("spi_match:%c 0x%x <= 0x%x <= 0x%x\n",
invert ? '!' : ' ', min, spi, max);
r = (spi >= min && spi <= max) ^ invert;
pr_debug(" result %s\n", r ? "PASS" : "FAILED");
return r;
}
static bool comp_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
struct ip_comp_hdr _comphdr;
const struct ip_comp_hdr *chdr;
const struct xt_ipcomp *compinfo = par->matchinfo;
/* Must not be a fragment. */
if (par->fragoff != 0)
return false;
chdr = skb_header_pointer(skb, par->thoff, sizeof(_comphdr), &_comphdr);
if (chdr == NULL) {
/* We've been asked to examine this packet, and we
* can't. Hence, no choice but to drop.
*/
pr_debug("Dropping evil IPComp tinygram.\n");
par->hotdrop = true;
return 0;
}
return spi_match(compinfo->spis[0], compinfo->spis[1],
ntohs(chdr->cpi),
!!(compinfo->invflags & XT_IPCOMP_INV_SPI));
}
static int comp_mt_check(const struct xt_mtchk_param *par)
{
const struct xt_ipcomp *compinfo = par->matchinfo;
/* Must specify no unknown invflags */
if (compinfo->invflags & ~XT_IPCOMP_INV_MASK) {
pr_err("unknown flags %X\n", compinfo->invflags);
return -EINVAL;
}
return 0;
}
static struct xt_match comp_mt_reg[] __read_mostly = {
{
.name = "ipcomp",
.family = NFPROTO_IPV4,
.match = comp_mt,
.matchsize = sizeof(struct xt_ipcomp),
.proto = IPPROTO_COMP,
.checkentry = comp_mt_check,
.me = THIS_MODULE,
},
{
.name = "ipcomp",
.family = NFPROTO_IPV6,
.match = comp_mt,
.matchsize = sizeof(struct xt_ipcomp),
.proto = IPPROTO_COMP,
.checkentry = comp_mt_check,
.me = THIS_MODULE,
},
};
static int __init comp_mt_init(void)
{
return xt_register_matches(comp_mt_reg, ARRAY_SIZE(comp_mt_reg));
}
static void __exit comp_mt_exit(void)
{
xt_unregister_matches(comp_mt_reg, ARRAY_SIZE(comp_mt_reg));
}
module_init(comp_mt_init);
module_exit(comp_mt_exit);
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.