repo_name string | path string | copies string | size string | content string | license string |
|---|---|---|---|---|---|
openwrt/linux | drivers/staging/rts5139/xd.c | 2727 | 57791 | /* Driver for Realtek RTS51xx USB card reader
*
* Copyright(c) 2009 Realtek Semiconductor Corp. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2, or (at your option) any
* later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, see <http://www.gnu.org/licenses/>.
*
* Author:
* wwang (wei_wang@realsil.com.cn)
* No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
* Maintainer:
* Edwin Rong (edwin_rong@realsil.com.cn)
* No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
*/
#include <linux/blkdev.h>
#include <linux/kthread.h>
#include <linux/sched.h>
#include <linux/vmalloc.h>
#include "debug.h"
#include "trace.h"
#include "rts51x.h"
#include "rts51x_transport.h"
#include "rts51x_scsi.h"
#include "rts51x_card.h"
#include "xd.h"
static int xd_build_l2p_tbl(struct rts51x_chip *chip, int zone_no);
static int xd_init_page(struct rts51x_chip *chip, u32 phy_blk, u16 logoff,
u8 start_page, u8 end_page);
static inline void xd_set_err_code(struct rts51x_chip *chip, u8 err_code)
{
struct xd_info *xd_card = &(chip->xd_card);
xd_card->err_code = err_code;
}
static int xd_set_init_para(struct rts51x_chip *chip)
{
struct xd_info *xd_card = &(chip->xd_card);
int retval;
if (chip->asic_code)
xd_card->xd_clock = 47;
else
xd_card->xd_clock = CLK_50;
retval = switch_clock(chip, xd_card->xd_clock);
if (retval != STATUS_SUCCESS)
TRACE_RET(chip, retval);
return STATUS_SUCCESS;
}
static int xd_switch_clock(struct rts51x_chip *chip)
{
struct xd_info *xd_card = &(chip->xd_card);
int retval;
retval = rts51x_select_card(chip, XD_CARD);
if (retval != STATUS_SUCCESS)
TRACE_RET(chip, retval);
retval = switch_clock(chip, xd_card->xd_clock);
if (retval != STATUS_SUCCESS)
TRACE_RET(chip, retval);
return STATUS_SUCCESS;
}
static int xd_read_id(struct rts51x_chip *chip, u8 id_cmd, u8 *id_buf,
u8 buf_len)
{
int retval, i;
rts51x_init_cmd(chip);
rts51x_add_cmd(chip, WRITE_REG_CMD, XD_DAT, 0xFF, id_cmd);
rts51x_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF,
XD_TRANSFER_START | XD_READ_ID);
rts51x_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER, XD_TRANSFER_END,
XD_TRANSFER_END);
for (i = 0; i < 4; i++) {
rts51x_add_cmd(chip, READ_REG_CMD, (u16) (XD_ADDRESS1 + i), 0,
0);
}
retval = rts51x_send_cmd(chip, MODE_CR, 20);
if (retval != STATUS_SUCCESS)
TRACE_RET(chip, retval);
retval = rts51x_get_rsp(chip, 5, 20);
if (retval != STATUS_SUCCESS) {
rts51x_clear_xd_error(chip);
TRACE_RET(chip, retval);
}
if (id_buf && buf_len) {
if (buf_len > 4)
buf_len = 4;
rts51x_read_rsp_buf(chip, 1, id_buf, buf_len);
}
return STATUS_SUCCESS;
}
static void xd_assign_phy_addr(struct rts51x_chip *chip, u32 addr, u8 mode)
{
struct xd_info *xd_card = &(chip->xd_card);
switch (mode) {
case XD_RW_ADDR:
rts51x_add_cmd(chip, WRITE_REG_CMD, XD_ADDRESS0, 0xFF, 0);
rts51x_add_cmd(chip, WRITE_REG_CMD, XD_ADDRESS1, 0xFF,
(u8) addr);
rts51x_add_cmd(chip, WRITE_REG_CMD, XD_ADDRESS2, 0xFF,
(u8) (addr >> 8));
rts51x_add_cmd(chip, WRITE_REG_CMD, XD_ADDRESS3, 0xFF,
(u8) (addr >> 16));
rts51x_add_cmd(chip, WRITE_REG_CMD, XD_CFG, 0xFF,
xd_card->addr_cycle | XD_CALC_ECC |
XD_BA_NO_TRANSFORM);
break;
case XD_ERASE_ADDR:
rts51x_add_cmd(chip, WRITE_REG_CMD, XD_ADDRESS0, 0xFF,
(u8) addr);
rts51x_add_cmd(chip, WRITE_REG_CMD, XD_ADDRESS1, 0xFF,
(u8) (addr >> 8));
rts51x_add_cmd(chip, WRITE_REG_CMD, XD_ADDRESS2, 0xFF,
(u8) (addr >> 16));
rts51x_add_cmd(chip, WRITE_REG_CMD, XD_CFG, 0xFF,
(xd_card->addr_cycle - 1) |
XD_CALC_ECC | XD_BA_NO_TRANSFORM);
break;
default:
break;
}
}
static int xd_read_redundant(struct rts51x_chip *chip, u32 page_addr, u8 *buf,
int buf_len)
{
int retval, i;
rts51x_init_cmd(chip);
xd_assign_phy_addr(chip, page_addr, XD_RW_ADDR);
rts51x_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF,
XD_TRANSFER_START | XD_READ_REDUNDANT);
rts51x_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER, XD_TRANSFER_END,
XD_TRANSFER_END);
for (i = 0; i < 6; i++) {
rts51x_add_cmd(chip, READ_REG_CMD, (u16) (XD_PAGE_STATUS + i),
0, 0);
}
for (i = 0; i < 4; i++) {
rts51x_add_cmd(chip, READ_REG_CMD, (u16) (XD_RESERVED0 + i), 0,
0);
}
rts51x_add_cmd(chip, READ_REG_CMD, XD_PARITY, 0, 0);
retval = rts51x_send_cmd(chip, MODE_CR, 100);
if (retval != STATUS_SUCCESS)
TRACE_RET(chip, retval);
retval = rts51x_get_rsp(chip, 11, 500);
if (retval != STATUS_SUCCESS) {
rts51x_clear_xd_error(chip);
TRACE_RET(chip, retval);
}
if (buf && buf_len) {
if (buf_len > 11)
buf_len = 11;
rts51x_read_rsp_buf(chip, 1, buf, buf_len);
}
return STATUS_SUCCESS;
}
static int xd_read_data_from_ppb(struct rts51x_chip *chip, int offset, u8 *buf,
int buf_len)
{
int retval, i;
if (!buf || (buf_len <= 0))
TRACE_RET(chip, STATUS_FAIL);
rts51x_init_cmd(chip);
for (i = 0; i < buf_len; i++) {
rts51x_add_cmd(chip, READ_REG_CMD, PPBUF_BASE2 + offset + i, 0,
0);
}
retval = rts51x_send_cmd(chip, MODE_CR, 100);
if (retval != STATUS_SUCCESS)
TRACE_RET(chip, retval);
retval = rts51x_get_rsp(chip, buf_len, 200);
if (retval != STATUS_SUCCESS)
TRACE_RET(chip, retval);
rts51x_read_rsp_buf(chip, 0, buf, buf_len);
return STATUS_SUCCESS;
}
static int xd_read_cis(struct rts51x_chip *chip, u32 page_addr, u8 *buf,
int buf_len)
{
int retval;
u8 reg;
if (!buf || (buf_len < 10))
TRACE_RET(chip, STATUS_FAIL);
rts51x_init_cmd(chip);
xd_assign_phy_addr(chip, page_addr, XD_RW_ADDR);
rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01,
PINGPONG_BUFFER);
rts51x_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_CNT, 0xFF, 1);
rts51x_add_cmd(chip, WRITE_REG_CMD, XD_CHK_DATA_STATUS,
XD_AUTO_CHK_DATA_STATUS, XD_AUTO_CHK_DATA_STATUS);
rts51x_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF,
XD_TRANSFER_START | XD_READ_PAGES);
rts51x_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER,
XD_TRANSFER_END | XD_PPB_EMPTY,
XD_TRANSFER_END | XD_PPB_EMPTY);
retval = rts51x_send_cmd(chip, MODE_CR, 100);
if (retval != STATUS_SUCCESS)
TRACE_RET(chip, retval);
retval = rts51x_get_rsp(chip, 1, 500);
if (retval == STATUS_TIMEDOUT) {
rts51x_clear_xd_error(chip);
TRACE_RET(chip, retval);
}
RTS51X_READ_REG(chip, XD_PAGE_STATUS, ®);
if (reg != XD_GPG) {
rts51x_clear_xd_error(chip);
TRACE_RET(chip, STATUS_FAIL);
}
RTS51X_READ_REG(chip, XD_CTL, ®);
if (!(reg & XD_ECC1_ERROR) || !(reg & XD_ECC1_UNCORRECTABLE)) {
retval = xd_read_data_from_ppb(chip, 0, buf, buf_len);
if (retval != STATUS_SUCCESS)
TRACE_RET(chip, retval);
if (reg & XD_ECC1_ERROR) { /* correctable error */
u8 ecc_bit, ecc_byte;
RTS51X_READ_REG(chip, XD_ECC_BIT1, &ecc_bit);
RTS51X_READ_REG(chip, XD_ECC_BYTE1, &ecc_byte);
RTS51X_DEBUGP("ECC_BIT1 = 0x%x, ECC_BYTE1 = 0x%x\n",
ecc_bit, ecc_byte);
if (ecc_byte < buf_len) {
RTS51X_DEBUGP("Before correct: 0x%x\n",
buf[ecc_byte]);
buf[ecc_byte] ^= (1 << ecc_bit);
RTS51X_DEBUGP("After correct: 0x%x\n",
buf[ecc_byte]);
}
}
} else if (!(reg & XD_ECC2_ERROR) || !(reg & XD_ECC2_UNCORRECTABLE)) {
RTS51X_WRITE_REG(chip, CARD_STOP, XD_STOP | XD_CLR_ERR,
XD_STOP | XD_CLR_ERR);
retval = xd_read_data_from_ppb(chip, 256, buf, buf_len);
if (retval != STATUS_SUCCESS)
TRACE_RET(chip, retval);
if (reg & XD_ECC2_ERROR) {
u8 ecc_bit, ecc_byte;
RTS51X_READ_REG(chip, XD_ECC_BIT2, &ecc_bit);
RTS51X_READ_REG(chip, XD_ECC_BYTE2, &ecc_byte);
RTS51X_DEBUGP("ECC_BIT2 = 0x%x, ECC_BYTE2 = 0x%x\n",
ecc_bit, ecc_byte);
if (ecc_byte < buf_len) {
RTS51X_DEBUGP("Before correct: 0x%x\n",
buf[ecc_byte]);
buf[ecc_byte] ^= (1 << ecc_bit);
RTS51X_DEBUGP("After correct: 0x%x\n",
buf[ecc_byte]);
}
}
} else {
rts51x_clear_xd_error(chip);
TRACE_RET(chip, STATUS_FAIL);
}
return STATUS_SUCCESS;
}
static void xd_pull_ctl_disable(struct rts51x_chip *chip)
{
if (CHECK_PKG(chip, LQFP48)) {
rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL1, 0xFF, 0x55);
rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL2, 0xFF, 0x55);
rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL3, 0xFF, 0x95);
rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL4, 0xFF, 0x55);
rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL5, 0xFF, 0x55);
rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL6, 0xFF, 0xA5);
} else {
rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL1, 0xFF, 0x65);
rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL2, 0xFF, 0x55);
rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL3, 0xFF, 0x95);
rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL4, 0xFF, 0x55);
rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL5, 0xFF, 0x56);
rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL6, 0xFF, 0x59);
}
}
static void xd_pull_ctl_enable(struct rts51x_chip *chip)
{
if (CHECK_PKG(chip, LQFP48)) {
rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL1, 0xFF, 0xAA);
rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL2, 0xFF, 0x55);
rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL3, 0xFF, 0x95);
rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL4, 0xFF, 0x55);
rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL5, 0xFF, 0x55);
rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL6, 0xFF, 0xA5);
} else {
rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL1, 0xFF, 0xA5);
rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL2, 0xFF, 0x59);
rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL3, 0xFF, 0x95);
rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL4, 0xFF, 0x55);
rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL5, 0xFF, 0x55);
rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL6, 0xFF, 0x59);
}
}
static int reset_xd(struct rts51x_chip *chip)
{
struct xd_info *xd_card = &(chip->xd_card);
int retval, i, j;
u8 id_buf[4], redunt[11];
retval = rts51x_select_card(chip, XD_CARD);
if (retval != STATUS_SUCCESS)
TRACE_RET(chip, STATUS_FAIL);
rts51x_init_cmd(chip);
rts51x_add_cmd(chip, WRITE_REG_CMD, XD_CHK_DATA_STATUS, 0xFF,
XD_PGSTS_NOT_FF);
if (chip->asic_code)
xd_pull_ctl_disable(chip);
else
rts51x_add_cmd(chip, WRITE_REG_CMD, FPGA_PULL_CTL, 0xFF,
(FPGA_XD_PULL_CTL_EN1 & FPGA_XD_PULL_CTL_EN3));
if (!chip->option.FT2_fast_mode) {
rts51x_add_cmd(chip, WRITE_REG_CMD, XD_INIT, XD_NO_AUTO_PWR_OFF,
0);
if (CHECK_PKG(chip, LQFP48) ||
chip->option.rts5129_D3318_off_enable) {
rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PWR_CTL,
DV3318_AUTO_PWR_OFF,
DV3318_AUTO_PWR_OFF);
}
}
rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_OE, XD_OUTPUT_EN, 0);
if (!chip->option.FT2_fast_mode) {
rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PWR_CTL, POWER_MASK,
POWER_OFF);
}
retval = rts51x_send_cmd(chip, MODE_C, 100);
if (retval != STATUS_SUCCESS)
TRACE_RET(chip, retval);
if (!chip->option.FT2_fast_mode) {
#ifdef SD_XD_IO_FOLLOW_PWR
if (CHECK_PKG(chip, LQFP48)
|| chip->option.rts5129_D3318_off_enable) {
rts51x_write_register(chip, CARD_PWR_CTL,
LDO_OFF, LDO_OFF);
}
#endif
wait_timeout(250);
#ifdef SD_XD_IO_FOLLOW_PWR
if (CHECK_PKG(chip, LQFP48)
|| chip->option.rts5129_D3318_off_enable) {
rts51x_init_cmd(chip);
if (chip->asic_code) {
xd_pull_ctl_enable(chip);
} else {
rts51x_add_cmd(chip, WRITE_REG_CMD,
FPGA_PULL_CTL, 0xFF,
(FPGA_XD_PULL_CTL_EN1 &
FPGA_XD_PULL_CTL_EN2));
}
retval = rts51x_send_cmd(chip, MODE_C, 100);
if (retval != STATUS_SUCCESS)
TRACE_RET(chip, STATUS_FAIL);
}
#endif
retval = rts51x_card_power_on(chip, XD_CARD);
if (retval != STATUS_SUCCESS)
TRACE_RET(chip, retval);
#ifdef SUPPORT_OCP
wait_timeout(50);
rts51x_get_card_status(chip, &(chip->card_status));
chip->ocp_stat = (chip->card_status >> 4) & 0x03;
if (chip->ocp_stat & (MS_OCP_NOW | MS_OCP_EVER)) {
RTS51X_DEBUGP("Over current, OCPSTAT is 0x%x\n",
chip->ocp_stat);
TRACE_RET(chip, STATUS_FAIL);
}
#endif
}
rts51x_init_cmd(chip);
if (chip->asic_code)
xd_pull_ctl_enable(chip);
else
rts51x_add_cmd(chip, WRITE_REG_CMD, FPGA_PULL_CTL, 0xFF,
(FPGA_XD_PULL_CTL_EN1 & FPGA_XD_PULL_CTL_EN2));
rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_OE, XD_OUTPUT_EN,
XD_OUTPUT_EN);
rts51x_add_cmd(chip, WRITE_REG_CMD, XD_CTL, XD_CE_DISEN, XD_CE_DISEN);
retval = rts51x_send_cmd(chip, MODE_C, 100);
if (retval != STATUS_SUCCESS)
TRACE_RET(chip, STATUS_FAIL);
if (!chip->option.FT2_fast_mode)
wait_timeout(200);
retval = xd_set_init_para(chip);
if (retval != STATUS_SUCCESS)
TRACE_RET(chip, STATUS_FAIL);
/* Read ID to check if the timing setting is right */
for (i = 0; i < 4; i++) {
u8 xd_dat, xd_ctl;
if (monitor_card_cd(chip, XD_CARD) == CD_NOT_EXIST)
TRACE_RET(chip, STATUS_FAIL);
rts51x_init_cmd(chip);
rts51x_add_cmd(chip, WRITE_REG_CMD, XD_DTCTL, 0xFF,
XD_TIME_SETUP_STEP * 3 + XD_TIME_RW_STEP *
(2 + i + chip->option.rts51x_xd_rw_step)
+ XD_TIME_RWN_STEP * (i + chip->option.rts51x_xd_rwn_step));
rts51x_add_cmd(chip, WRITE_REG_CMD, XD_CATCTL, 0xFF,
XD_TIME_SETUP_STEP * 3 + XD_TIME_RW_STEP * (4 +
i) + XD_TIME_RWN_STEP * (3 + i));
rts51x_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF,
XD_TRANSFER_START | XD_RESET);
rts51x_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER,
XD_TRANSFER_END, XD_TRANSFER_END);
rts51x_add_cmd(chip, READ_REG_CMD, XD_DAT, 0, 0);
rts51x_add_cmd(chip, READ_REG_CMD, XD_CTL, 0, 0);
retval = rts51x_send_cmd(chip, MODE_CR, 100);
if (retval != STATUS_SUCCESS) {
rts51x_clear_xd_error(chip);
TRACE_RET(chip, retval);
}
retval = rts51x_get_rsp(chip, 3, 100);
if (retval != STATUS_SUCCESS) {
rts51x_clear_xd_error(chip);
TRACE_RET(chip, retval);
}
xd_dat = chip->rsp_buf[1];
xd_ctl = chip->rsp_buf[2];
RTS51X_DEBUGP("XD_DAT: 0x%x, XD_CTL: 0x%x\n", xd_dat, xd_ctl);
if (((xd_dat & READY_FLAG) != READY_STATE)
|| !(xd_ctl & XD_RDY))
continue;
retval = xd_read_id(chip, READ_ID, id_buf, 4);
if (retval != STATUS_SUCCESS)
TRACE_RET(chip, retval);
RTS51X_DEBUGP("READ_ID: 0x%x 0x%x 0x%x 0x%x\n",
id_buf[0], id_buf[1], id_buf[2], id_buf[3]);
xd_card->device_code = id_buf[1];
switch (xd_card->device_code) {
case XD_4M_X8_512_1:
case XD_4M_X8_512_2:
xd_card->block_shift = 4; /* 16 pages per block */
xd_card->page_off = 0x0F;
xd_card->addr_cycle = 3;
xd_card->zone_cnt = 1;
xd_card->capacity = 8000; /* 500 * 2 ^ 4 */
XD_SET_4MB(xd_card);
break;
case XD_8M_X8_512:
xd_card->block_shift = 4;
xd_card->page_off = 0x0F;
xd_card->addr_cycle = 3;
xd_card->zone_cnt = 1;
xd_card->capacity = 16000; /* 1000 * 2 ^ 4 */
break;
case XD_16M_X8_512:
XD_PAGE_512(xd_card); /* 32 pages per block */
xd_card->addr_cycle = 3;
xd_card->zone_cnt = 1;
xd_card->capacity = 32000; /* 1000 * 2 ^ 5 */
break;
case XD_32M_X8_512:
XD_PAGE_512(xd_card);
xd_card->addr_cycle = 3;
xd_card->zone_cnt = 2;
xd_card->capacity = 64000; /* 2000 * 2 ^ 5 */
break;
case XD_64M_X8_512:
XD_PAGE_512(xd_card);
xd_card->addr_cycle = 4;
xd_card->zone_cnt = 4;
xd_card->capacity = 128000; /* 4000 * 2 ^ 5 */
break;
case XD_128M_X8_512:
XD_PAGE_512(xd_card);
xd_card->addr_cycle = 4;
xd_card->zone_cnt = 8;
xd_card->capacity = 256000; /* 8000 * 2 ^ 5 */
break;
case XD_256M_X8_512:
XD_PAGE_512(xd_card);
xd_card->addr_cycle = 4;
xd_card->zone_cnt = 16;
xd_card->capacity = 512000; /* 16000 * 2 ^ 5 */
break;
case XD_512M_X8:
XD_PAGE_512(xd_card);
xd_card->addr_cycle = 4;
xd_card->zone_cnt = 32;
xd_card->capacity = 1024000; /* 32000 * 2 ^ 5 */
break;
case xD_1G_X8_512:
XD_PAGE_512(xd_card);
xd_card->addr_cycle = 4;
xd_card->zone_cnt = 64;
xd_card->capacity = 2048000; /* 64000 * 2 ^ 5 */
break;
case xD_2G_X8_512:
XD_PAGE_512(xd_card);
xd_card->addr_cycle = 4;
xd_card->zone_cnt = 128;
xd_card->capacity = 4096000; /* 128000 * 2 ^ 5 */
break;
default:
continue;
}
/* Confirm timing setting */
for (j = 0; j < 10; j++) {
retval = xd_read_id(chip, READ_ID, id_buf, 4);
if (retval != STATUS_SUCCESS)
TRACE_RET(chip, retval);
if (id_buf[1] != xd_card->device_code)
break;
}
/* Current timing pass */
if (j == 10)
break;
}
if (i == 4) {
xd_card->block_shift = 0;
xd_card->page_off = 0;
xd_card->addr_cycle = 0;
xd_card->capacity = 0;
TRACE_RET(chip, STATUS_FAIL);
}
retval = xd_read_id(chip, READ_xD_ID, id_buf, 4);
if (retval != STATUS_SUCCESS)
TRACE_RET(chip, retval);
RTS51X_DEBUGP("READ_xD_ID: 0x%x 0x%x 0x%x 0x%x\n",
id_buf[0], id_buf[1], id_buf[2], id_buf[3]);
if (id_buf[2] != XD_ID_CODE)
TRACE_RET(chip, STATUS_FAIL);
/* Search CIS block */
for (i = 0; i < 24; i++) {
u32 page_addr;
if (monitor_card_cd(chip, XD_CARD) == CD_NOT_EXIST)
TRACE_RET(chip, STATUS_FAIL);
page_addr = (u32) i << xd_card->block_shift;
for (j = 0; j < 3; j++) {
retval = xd_read_redundant(chip, page_addr, redunt, 11);
if (retval == STATUS_SUCCESS)
break;
}
if (j == 3)
continue;
if (redunt[BLOCK_STATUS] != XD_GBLK)
continue;
j = 0;
/* Check page status */
if (redunt[PAGE_STATUS] != XD_GPG) {
for (j = 1; j <= 8; j++) {
retval =
xd_read_redundant(chip, page_addr + j,
redunt, 11);
if (retval == STATUS_SUCCESS) {
if (redunt[PAGE_STATUS] == XD_GPG)
break;
}
}
if (j == 9)
break;
}
if ((redunt[BLOCK_STATUS] == XD_GBLK)
&& (redunt[PARITY] & XD_BA1_ALL0)) {
u8 buf[10];
page_addr += j;
retval = xd_read_cis(chip, page_addr, buf, 10);
if (retval != STATUS_SUCCESS)
TRACE_RET(chip, retval);
if ((buf[0] == 0x01) && (buf[1] == 0x03)
&& (buf[2] == 0xD9)
&& (buf[3] == 0x01) && (buf[4] == 0xFF)
&& (buf[5] == 0x18) && (buf[6] == 0x02)
&& (buf[7] == 0xDF) && (buf[8] == 0x01)
&& (buf[9] == 0x20)) {
xd_card->cis_block = (u16) i;
}
}
break;
}
RTS51X_DEBUGP("CIS block: 0x%x\n", xd_card->cis_block);
if (xd_card->cis_block == 0xFFFF)
TRACE_RET(chip, STATUS_FAIL);
chip->capacity[chip->card2lun[XD_CARD]] = xd_card->capacity;
return STATUS_SUCCESS;
}
static int xd_check_data_blank(u8 *redunt)
{
int i;
for (i = 0; i < 6; i++) {
if (redunt[PAGE_STATUS + i] != 0xFF)
return 0;
}
if ((redunt[PARITY] & (XD_ECC1_ALL1 | XD_ECC2_ALL1)) !=
(XD_ECC1_ALL1 | XD_ECC2_ALL1))
return 0;
for (i = 0; i < 4; i++) {
if (redunt[RESERVED0 + i] != 0xFF)
return 0;
}
return 1;
}
static u16 xd_load_log_block_addr(u8 *redunt)
{
u16 addr = 0xFFFF;
if (redunt[PARITY] & XD_BA1_BA2_EQL)
addr =
((u16) redunt[BLOCK_ADDR1_H] << 8) | redunt[BLOCK_ADDR1_L];
else if (redunt[PARITY] & XD_BA1_VALID)
addr =
((u16) redunt[BLOCK_ADDR1_H] << 8) | redunt[BLOCK_ADDR1_L];
else if (redunt[PARITY] & XD_BA2_VALID)
addr =
((u16) redunt[BLOCK_ADDR2_H] << 8) | redunt[BLOCK_ADDR2_L];
return addr;
}
static int xd_init_l2p_tbl(struct rts51x_chip *chip)
{
struct xd_info *xd_card = &(chip->xd_card);
int size, i;
RTS51X_DEBUGP("xd_init_l2p_tbl: zone_cnt = %d\n", xd_card->zone_cnt);
if (xd_card->zone_cnt < 1)
TRACE_RET(chip, STATUS_FAIL);
size = xd_card->zone_cnt * sizeof(struct zone_entry);
RTS51X_DEBUGP("Buffer size for l2p table is %d\n", size);
xd_card->zone = vmalloc(size);
if (!xd_card->zone)
TRACE_RET(chip, STATUS_NOMEM);
for (i = 0; i < xd_card->zone_cnt; i++) {
xd_card->zone[i].build_flag = 0;
xd_card->zone[i].l2p_table = NULL;
xd_card->zone[i].free_table = NULL;
xd_card->zone[i].get_index = 0;
xd_card->zone[i].set_index = 0;
xd_card->zone[i].unused_blk_cnt = 0;
}
return STATUS_SUCCESS;
}
static inline void free_zone(struct zone_entry *zone)
{
RTS51X_DEBUGP("free_zone\n");
if (!zone)
return;
zone->build_flag = 0;
zone->set_index = 0;
zone->get_index = 0;
zone->unused_blk_cnt = 0;
if (zone->l2p_table) {
vfree(zone->l2p_table);
zone->l2p_table = NULL;
}
if (zone->free_table) {
vfree(zone->free_table);
zone->free_table = NULL;
}
}
static void xd_set_unused_block(struct rts51x_chip *chip, u32 phy_blk)
{
struct xd_info *xd_card = &(chip->xd_card);
struct zone_entry *zone;
int zone_no;
zone_no = (int)phy_blk >> 10;
if (zone_no >= xd_card->zone_cnt) {
RTS51X_DEBUGP("Set unused block to invalid zone"
"(zone_no = %d, zone_cnt = %d)\n",
zone_no, xd_card->zone_cnt);
return;
}
zone = &(xd_card->zone[zone_no]);
if (zone->free_table == NULL) {
if (xd_build_l2p_tbl(chip, zone_no) != STATUS_SUCCESS)
return;
}
if ((zone->set_index >= XD_FREE_TABLE_CNT)
|| (zone->set_index < 0)) {
free_zone(zone);
RTS51X_DEBUGP("Set unused block fail, invalid set_index\n");
return;
}
RTS51X_DEBUGP("Set unused block to index %d\n", zone->set_index);
zone->free_table[zone->set_index++] = (u16) (phy_blk & 0x3ff);
if (zone->set_index >= XD_FREE_TABLE_CNT)
zone->set_index = 0;
zone->unused_blk_cnt++;
}
static u32 xd_get_unused_block(struct rts51x_chip *chip, int zone_no)
{
struct xd_info *xd_card = &(chip->xd_card);
struct zone_entry *zone;
u32 phy_blk;
if (zone_no >= xd_card->zone_cnt) {
RTS51X_DEBUGP("Get unused block from invalid zone"
"(zone_no = %d, zone_cnt = %d)\n",
zone_no, xd_card->zone_cnt);
TRACE_RET(chip, BLK_NOT_FOUND);
}
zone = &(xd_card->zone[zone_no]);
if ((zone->unused_blk_cnt == 0) ||
(zone->set_index == zone->get_index)) {
free_zone(zone);
RTS51X_DEBUGP("Get unused block fail,"
"no unused block available\n");
TRACE_RET(chip, BLK_NOT_FOUND);
}
if ((zone->get_index >= XD_FREE_TABLE_CNT) || (zone->get_index < 0)) {
free_zone(zone);
RTS51X_DEBUGP("Get unused block fail, invalid get_index\n");
TRACE_RET(chip, BLK_NOT_FOUND);
}
RTS51X_DEBUGP("Get unused block from index %d\n", zone->get_index);
phy_blk = zone->free_table[zone->get_index];
zone->free_table[zone->get_index++] = 0xFFFF;
if (zone->get_index >= XD_FREE_TABLE_CNT)
zone->get_index = 0;
zone->unused_blk_cnt--;
phy_blk += ((u32) (zone_no) << 10);
return phy_blk;
}
static void xd_set_l2p_tbl(struct rts51x_chip *chip, int zone_no, u16 log_off,
u16 phy_off)
{
struct xd_info *xd_card = &(chip->xd_card);
struct zone_entry *zone;
zone = &(xd_card->zone[zone_no]);
zone->l2p_table[log_off] = phy_off;
}
static int xd_delay_write(struct rts51x_chip *chip);
static u32 xd_get_l2p_tbl(struct rts51x_chip *chip, int zone_no, u16 log_off)
{
struct xd_info *xd_card = &(chip->xd_card);
struct zone_entry *zone;
int retval;
zone = &(xd_card->zone[zone_no]);
if (zone->l2p_table[log_off] == 0xFFFF) {
u32 phy_blk = 0;
int i;
retval = xd_delay_write(chip);
if (retval != STATUS_SUCCESS) {
RTS51X_DEBUGP("In xd_get_l2p_tbl,"
"delay write fail!\n");
TRACE_RET(chip, BLK_NOT_FOUND);
}
if (zone->unused_blk_cnt <= 0) {
RTS51X_DEBUGP("No unused block!\n");
TRACE_RET(chip, BLK_NOT_FOUND);
}
for (i = 0; i < zone->unused_blk_cnt; i++) {
phy_blk = xd_get_unused_block(chip, zone_no);
if (phy_blk == BLK_NOT_FOUND) {
RTS51X_DEBUGP("No unused block available!\n");
TRACE_RET(chip, BLK_NOT_FOUND);
}
retval =
xd_init_page(chip, phy_blk, log_off, 0,
xd_card->page_off + 1);
if (retval == STATUS_SUCCESS)
break;
}
if (i >= zone->unused_blk_cnt) {
RTS51X_DEBUGP("No good unused block available!\n");
TRACE_RET(chip, BLK_NOT_FOUND);
}
xd_set_l2p_tbl(chip, zone_no, log_off, (u16) (phy_blk & 0x3FF));
return phy_blk;
}
return (u32) zone->l2p_table[log_off] + ((u32) (zone_no) << 10);
}
int rts51x_reset_xd_card(struct rts51x_chip *chip)
{
struct xd_info *xd_card = &(chip->xd_card);
int retval;
memset(xd_card, 0, sizeof(struct xd_info));
xd_card->block_shift = 0;
xd_card->page_off = 0;
xd_card->addr_cycle = 0;
xd_card->capacity = 0;
xd_card->zone_cnt = 0;
xd_card->cis_block = 0xFFFF;
xd_card->delay_write.delay_write_flag = 0;
rts51x_enable_card_clock(chip, XD_CARD);
retval = reset_xd(chip);
if (retval != STATUS_SUCCESS) {
if (chip->option.reset_or_rw_fail_set_pad_drive) {
rts51x_write_register(chip, CARD_DRIVE_SEL,
SD20_DRIVE_MASK, DRIVE_8mA);
}
TRACE_RET(chip, retval);
}
retval = xd_init_l2p_tbl(chip);
if (retval != STATUS_SUCCESS)
TRACE_RET(chip, retval);
return STATUS_SUCCESS;
}
static int xd_mark_bad_block(struct rts51x_chip *chip, u32 phy_blk)
{
struct xd_info *xd_card = &(chip->xd_card);
int retval;
u32 page_addr;
u8 reg = 0;
RTS51X_DEBUGP("mark block 0x%x as bad block\n", phy_blk);
if (phy_blk == BLK_NOT_FOUND)
TRACE_RET(chip, STATUS_FAIL);
rts51x_init_cmd(chip);
rts51x_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_STATUS, 0xFF, XD_GPG);
rts51x_add_cmd(chip, WRITE_REG_CMD, XD_BLOCK_STATUS, 0xFF,
XD_LATER_BBLK);
rts51x_add_cmd(chip, WRITE_REG_CMD, XD_BLOCK_ADDR1_H, 0xFF, 0xFF);
rts51x_add_cmd(chip, WRITE_REG_CMD, XD_BLOCK_ADDR1_L, 0xFF, 0xFF);
rts51x_add_cmd(chip, WRITE_REG_CMD, XD_BLOCK_ADDR2_H, 0xFF, 0xFF);
rts51x_add_cmd(chip, WRITE_REG_CMD, XD_BLOCK_ADDR2_L, 0xFF, 0xFF);
rts51x_add_cmd(chip, WRITE_REG_CMD, XD_RESERVED0, 0xFF, 0xFF);
rts51x_add_cmd(chip, WRITE_REG_CMD, XD_RESERVED1, 0xFF, 0xFF);
rts51x_add_cmd(chip, WRITE_REG_CMD, XD_RESERVED2, 0xFF, 0xFF);
rts51x_add_cmd(chip, WRITE_REG_CMD, XD_RESERVED3, 0xFF, 0xFF);
page_addr = phy_blk << xd_card->block_shift;
xd_assign_phy_addr(chip, page_addr, XD_RW_ADDR);
/* Specify page count */
rts51x_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_CNT, 0xFF,
xd_card->page_off + 1);
rts51x_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF,
XD_TRANSFER_START | XD_WRITE_REDUNDANT);
rts51x_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER, XD_TRANSFER_END,
XD_TRANSFER_END);
retval = rts51x_send_cmd(chip, MODE_CR, 100);
if (retval != STATUS_SUCCESS)
TRACE_RET(chip, STATUS_FAIL);
retval = rts51x_get_rsp(chip, 1, 100);
if (retval != STATUS_SUCCESS) {
rts51x_clear_xd_error(chip);
rts51x_ep0_read_register(chip, XD_DAT, ®);
if (reg & PROGRAM_ERROR)
xd_set_err_code(chip, XD_PRG_ERROR);
else
xd_set_err_code(chip, XD_TO_ERROR);
TRACE_RET(chip, STATUS_FAIL);
}
return STATUS_SUCCESS;
}
static int xd_init_page(struct rts51x_chip *chip, u32 phy_blk, u16 logoff,
u8 start_page, u8 end_page)
{
struct xd_info *xd_card = &(chip->xd_card);
int retval;
u32 page_addr;
u8 reg = 0;
RTS51X_DEBUGP("Init block 0x%x\n", phy_blk);
if (start_page > end_page)
TRACE_RET(chip, STATUS_FAIL);
if (phy_blk == BLK_NOT_FOUND)
TRACE_RET(chip, STATUS_FAIL);
rts51x_init_cmd(chip);
rts51x_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_STATUS, 0xFF, 0xFF);
rts51x_add_cmd(chip, WRITE_REG_CMD, XD_BLOCK_STATUS, 0xFF, 0xFF);
rts51x_add_cmd(chip, WRITE_REG_CMD, XD_BLOCK_ADDR1_H, 0xFF,
(u8) (logoff >> 8));
rts51x_add_cmd(chip, WRITE_REG_CMD, XD_BLOCK_ADDR1_L, 0xFF,
(u8) logoff);
page_addr = (phy_blk << xd_card->block_shift) + start_page;
xd_assign_phy_addr(chip, page_addr, XD_RW_ADDR);
rts51x_add_cmd(chip, WRITE_REG_CMD, XD_CFG, XD_BA_TRANSFORM,
XD_BA_TRANSFORM);
rts51x_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_CNT, 0xFF,
(end_page - start_page));
rts51x_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF,
XD_TRANSFER_START | XD_WRITE_REDUNDANT);
rts51x_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER, XD_TRANSFER_END,
XD_TRANSFER_END);
retval = rts51x_send_cmd(chip, MODE_CR, 100);
if (retval != STATUS_SUCCESS)
TRACE_RET(chip, STATUS_FAIL);
retval = rts51x_get_rsp(chip, 1, 500);
if (retval != STATUS_SUCCESS) {
rts51x_clear_xd_error(chip);
rts51x_ep0_read_register(chip, XD_DAT, ®);
if (reg & PROGRAM_ERROR) {
xd_mark_bad_block(chip, phy_blk);
xd_set_err_code(chip, XD_PRG_ERROR);
} else {
xd_set_err_code(chip, XD_TO_ERROR);
}
TRACE_RET(chip, STATUS_FAIL);
}
return STATUS_SUCCESS;
}
static int xd_copy_page(struct rts51x_chip *chip,
u32 old_blk, u32 new_blk, u8 start_page, u8 end_page)
{
struct xd_info *xd_card = &(chip->xd_card);
u32 old_page, new_page;
u8 i, reg = 0;
int retval;
RTS51X_DEBUGP("Copy page from block 0x%x to block 0x%x\n", old_blk,
new_blk);
if (start_page > end_page)
TRACE_RET(chip, STATUS_FAIL);
if ((old_blk == BLK_NOT_FOUND) || (new_blk == BLK_NOT_FOUND))
TRACE_RET(chip, STATUS_FAIL);
old_page = (old_blk << xd_card->block_shift) + start_page;
new_page = (new_blk << xd_card->block_shift) + start_page;
XD_CLR_BAD_NEWBLK(xd_card);
RTS51X_WRITE_REG(chip, CARD_DATA_SOURCE, 0x01, PINGPONG_BUFFER);
for (i = start_page; i < end_page; i++) {
if (monitor_card_cd(chip, XD_CARD) == CD_NOT_EXIST) {
RTS51X_WRITE_REG(chip, CARD_STOP, XD_STOP | XD_CLR_ERR,
XD_STOP | XD_CLR_ERR);
xd_set_err_code(chip, XD_NO_CARD);
TRACE_RET(chip, STATUS_FAIL);
}
rts51x_init_cmd(chip);
xd_assign_phy_addr(chip, old_page, XD_RW_ADDR);
/* Single page read */
rts51x_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_CNT, 0xFF, 1);
rts51x_add_cmd(chip, WRITE_REG_CMD, XD_CHK_DATA_STATUS,
XD_AUTO_CHK_DATA_STATUS, 0);
rts51x_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF,
XD_TRANSFER_START | XD_READ_PAGES);
rts51x_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER,
XD_TRANSFER_END, XD_TRANSFER_END);
retval = rts51x_send_cmd(chip, MODE_CR | STAGE_XD_STATUS, 100);
if (retval != STATUS_SUCCESS)
TRACE_RET(chip, retval);
retval = rts51x_get_rsp(chip, 4, 500);
if ((retval != STATUS_SUCCESS) ||
(chip->rsp_buf[2] & (XD_ECC1_ERROR | XD_ECC2_ERROR))) {
rts51x_clear_xd_error(chip);
reg = 0;
rts51x_ep0_read_register(chip, XD_CTL, ®);
if (reg & (XD_ECC1_ERROR | XD_ECC2_ERROR)) {
wait_timeout(100);
if (monitor_card_cd(chip, XD_CARD) ==
CD_NOT_EXIST) {
xd_set_err_code(chip, XD_NO_CARD);
TRACE_RET(chip, STATUS_FAIL);
}
if (((reg &
(XD_ECC1_ERROR | XD_ECC1_UNCORRECTABLE))
== (XD_ECC1_ERROR | XD_ECC1_UNCORRECTABLE))
|| ((reg & (XD_ECC2_ERROR |
XD_ECC2_UNCORRECTABLE)) ==
(XD_ECC2_ERROR | XD_ECC2_UNCORRECTABLE))) {
RTS51X_WRITE_REG(chip, XD_PAGE_STATUS,
0xFF, XD_BPG);
RTS51X_WRITE_REG(chip, XD_BLOCK_STATUS,
0xFF, XD_GBLK);
XD_SET_BAD_OLDBLK(xd_card);
RTS51X_DEBUGP("old block 0x%x"
"ecc error\n", old_blk);
}
} else {
xd_set_err_code(chip, XD_TO_ERROR);
TRACE_RET(chip, STATUS_FAIL);
}
}
if (XD_CHK_BAD_OLDBLK(xd_card))
rts51x_clear_xd_error(chip);
rts51x_init_cmd(chip);
xd_assign_phy_addr(chip, new_page, XD_RW_ADDR);
rts51x_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_CNT, 0xFF, 1);
rts51x_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF,
XD_TRANSFER_START | XD_WRITE_PAGES);
rts51x_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER,
XD_TRANSFER_END, XD_TRANSFER_END);
retval = rts51x_send_cmd(chip, MODE_CR, 100);
if (retval != STATUS_SUCCESS)
TRACE_RET(chip, retval);
retval = rts51x_get_rsp(chip, 1, 300);
if (retval != STATUS_SUCCESS) {
rts51x_clear_xd_error(chip);
reg = 0;
rts51x_ep0_read_register(chip, XD_DAT, ®);
if (reg & PROGRAM_ERROR) {
xd_mark_bad_block(chip, new_blk);
xd_set_err_code(chip, XD_PRG_ERROR);
XD_SET_BAD_NEWBLK(xd_card);
} else {
xd_set_err_code(chip, XD_TO_ERROR);
}
TRACE_RET(chip, retval);
}
old_page++;
new_page++;
}
return STATUS_SUCCESS;
}
static int xd_reset_cmd(struct rts51x_chip *chip)
{
int retval;
u8 xd_dat, xd_ctl;
rts51x_init_cmd(chip);
rts51x_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF,
XD_TRANSFER_START | XD_RESET);
rts51x_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER, XD_TRANSFER_END,
XD_TRANSFER_END);
rts51x_add_cmd(chip, READ_REG_CMD, XD_DAT, 0, 0);
rts51x_add_cmd(chip, READ_REG_CMD, XD_CTL, 0, 0);
retval = rts51x_send_cmd(chip, MODE_CR, 100);
if (retval != STATUS_SUCCESS)
TRACE_RET(chip, retval);
retval = rts51x_get_rsp(chip, 3, 100);
if (retval != STATUS_SUCCESS) {
rts51x_clear_xd_error(chip);
TRACE_RET(chip, retval);
}
xd_dat = chip->rsp_buf[1];
xd_ctl = chip->rsp_buf[2];
if (((xd_dat & READY_FLAG) == READY_STATE) && (xd_ctl & XD_RDY))
return STATUS_SUCCESS;
TRACE_RET(chip, STATUS_FAIL);
}
static int xd_erase_block(struct rts51x_chip *chip, u32 phy_blk)
{
struct xd_info *xd_card = &(chip->xd_card);
u32 page_addr;
u8 reg = 0, xd_dat;
int i, retval;
if (phy_blk == BLK_NOT_FOUND)
TRACE_RET(chip, STATUS_FAIL);
page_addr = phy_blk << xd_card->block_shift;
for (i = 0; i < 3; i++) {
rts51x_init_cmd(chip);
xd_assign_phy_addr(chip, page_addr, XD_ERASE_ADDR);
rts51x_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF,
XD_TRANSFER_START | XD_ERASE);
rts51x_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER,
XD_TRANSFER_END, XD_TRANSFER_END);
rts51x_add_cmd(chip, READ_REG_CMD, XD_DAT, 0, 0);
retval = rts51x_send_cmd(chip, MODE_CR, 100);
if (retval != STATUS_SUCCESS)
TRACE_RET(chip, retval);
retval = rts51x_get_rsp(chip, 2, 300);
if (retval != STATUS_SUCCESS) {
rts51x_clear_xd_error(chip);
rts51x_ep0_read_register(chip, XD_DAT, ®);
if (reg & PROGRAM_ERROR) {
xd_mark_bad_block(chip, phy_blk);
xd_set_err_code(chip, XD_PRG_ERROR);
TRACE_RET(chip, STATUS_FAIL);
} else {
xd_set_err_code(chip, XD_ERASE_FAIL);
}
retval = xd_reset_cmd(chip);
if (retval != STATUS_SUCCESS)
TRACE_RET(chip, retval);
continue;
}
xd_dat = chip->rsp_buf[1];
if (xd_dat & PROGRAM_ERROR) {
xd_mark_bad_block(chip, phy_blk);
xd_set_err_code(chip, XD_PRG_ERROR);
TRACE_RET(chip, STATUS_FAIL);
}
return STATUS_SUCCESS;
}
xd_mark_bad_block(chip, phy_blk);
xd_set_err_code(chip, XD_ERASE_FAIL);
TRACE_RET(chip, STATUS_FAIL);
}
static int xd_build_l2p_tbl(struct rts51x_chip *chip, int zone_no)
{
struct xd_info *xd_card = &(chip->xd_card);
struct zone_entry *zone;
int retval;
u32 start, end, i;
u16 max_logoff, cur_fst_page_logoff, cur_lst_page_logoff,
ent_lst_page_logoff;
u8 redunt[11];
RTS51X_DEBUGP("xd_build_l2p_tbl: %d\n", zone_no);
if (xd_card->zone == NULL) {
retval = xd_init_l2p_tbl(chip);
if (retval != STATUS_SUCCESS)
TRACE_RET(chip, retval);
}
if (xd_card->zone[zone_no].build_flag) {
RTS51X_DEBUGP("l2p table of zone %d has been built\n",
zone_no);
return STATUS_SUCCESS;
}
zone = &(xd_card->zone[zone_no]);
if (zone->l2p_table == NULL) {
zone->l2p_table = vmalloc(2000);
if (zone->l2p_table == NULL)
TRACE_GOTO(chip, Build_Fail);
}
memset((u8 *) (zone->l2p_table), 0xff, 2000);
if (zone->free_table == NULL) {
zone->free_table = vmalloc(XD_FREE_TABLE_CNT * 2);
if (zone->free_table == NULL)
TRACE_GOTO(chip, Build_Fail);
}
memset((u8 *) (zone->free_table), 0xff, XD_FREE_TABLE_CNT * 2);
if (zone_no == 0) {
if (xd_card->cis_block == 0xFFFF)
start = 0;
else
start = xd_card->cis_block + 1;
if (XD_CHK_4MB(xd_card)) {
end = 0x200;
max_logoff = 499;
} else {
end = 0x400;
max_logoff = 999;
}
} else {
start = (u32) (zone_no) << 10;
end = (u32) (zone_no + 1) << 10;
max_logoff = 999;
}
RTS51X_DEBUGP("start block 0x%x, end block 0x%x\n", start, end);
zone->set_index = zone->get_index = 0;
zone->unused_blk_cnt = 0;
for (i = start; i < end; i++) {
u32 page_addr = i << xd_card->block_shift;
u32 phy_block;
retval = xd_read_redundant(chip, page_addr, redunt, 11);
if (retval != STATUS_SUCCESS)
continue;
if (redunt[BLOCK_STATUS] != 0xFF) {
RTS51X_DEBUGP("bad block\n");
continue;
}
if (xd_check_data_blank(redunt)) {
RTS51X_DEBUGP("blank block\n");
xd_set_unused_block(chip, i);
continue;
}
cur_fst_page_logoff = xd_load_log_block_addr(redunt);
if ((cur_fst_page_logoff == 0xFFFF)
|| (cur_fst_page_logoff > max_logoff)) {
retval = xd_erase_block(chip, i);
if (retval == STATUS_SUCCESS)
xd_set_unused_block(chip, i);
continue;
}
if ((zone_no == 0) && (cur_fst_page_logoff == 0)
&& (redunt[PAGE_STATUS] != XD_GPG))
XD_SET_MBR_FAIL(xd_card);
if (zone->l2p_table[cur_fst_page_logoff] == 0xFFFF) {
zone->l2p_table[cur_fst_page_logoff] =
(u16) (i & 0x3FF);
continue;
}
phy_block =
zone->l2p_table[cur_fst_page_logoff] +
((u32) ((zone_no) << 10));
page_addr = ((i + 1) << xd_card->block_shift) - 1;
retval = xd_read_redundant(chip, page_addr, redunt, 11);
if (retval != STATUS_SUCCESS)
continue;
cur_lst_page_logoff = xd_load_log_block_addr(redunt);
if (cur_lst_page_logoff == cur_fst_page_logoff) {
int m;
page_addr =
((phy_block + 1) << xd_card->block_shift) - 1;
for (m = 0; m < 3; m++) {
retval =
xd_read_redundant(chip, page_addr, redunt,
11);
if (retval == STATUS_SUCCESS)
break;
}
if (m == 3) {
zone->l2p_table[cur_fst_page_logoff] =
(u16) (i & 0x3FF);
retval = xd_erase_block(chip, phy_block);
if (retval == STATUS_SUCCESS)
xd_set_unused_block(chip, phy_block);
continue;
}
ent_lst_page_logoff = xd_load_log_block_addr(redunt);
if (ent_lst_page_logoff != cur_fst_page_logoff) {
zone->l2p_table[cur_fst_page_logoff] =
(u16) (i & 0x3FF);
retval = xd_erase_block(chip, phy_block);
if (retval == STATUS_SUCCESS)
xd_set_unused_block(chip, phy_block);
continue;
} else {
retval = xd_erase_block(chip, i);
if (retval == STATUS_SUCCESS)
xd_set_unused_block(chip, i);
}
} else {
retval = xd_erase_block(chip, i);
if (retval == STATUS_SUCCESS)
xd_set_unused_block(chip, i);
}
}
if (XD_CHK_4MB(xd_card))
end = 500;
else
end = 1000;
i = 0;
for (start = 0; start < end; start++) {
if (zone->l2p_table[start] == 0xFFFF)
i++;
}
RTS51X_DEBUGP("Block count %d, invalid L2P entry %d\n", end, i);
RTS51X_DEBUGP("Total unused block: %d\n", zone->unused_blk_cnt);
if ((zone->unused_blk_cnt - i) < 1)
chip->card_wp |= XD_CARD;
zone->build_flag = 1;
return STATUS_SUCCESS;
Build_Fail:
if (zone->l2p_table) {
vfree(zone->l2p_table);
zone->l2p_table = NULL;
}
if (zone->free_table) {
vfree(zone->free_table);
zone->free_table = NULL;
}
return STATUS_FAIL;
}
static int xd_send_cmd(struct rts51x_chip *chip, u8 cmd)
{
int retval;
rts51x_init_cmd(chip);
rts51x_add_cmd(chip, WRITE_REG_CMD, XD_DAT, 0xFF, cmd);
rts51x_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF,
XD_TRANSFER_START | XD_SET_CMD);
rts51x_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER, XD_TRANSFER_END,
XD_TRANSFER_END);
retval = rts51x_send_cmd(chip, MODE_CR, 100);
if (retval != STATUS_SUCCESS)
TRACE_RET(chip, retval);
retval = rts51x_get_rsp(chip, 1, 200);
if (retval != STATUS_SUCCESS) {
rts51x_clear_xd_error(chip);
TRACE_RET(chip, retval);
}
return STATUS_SUCCESS;
}
static int xd_read_multiple_pages(struct rts51x_chip *chip, u32 phy_blk,
u32 log_blk, u8 start_page, u8 end_page,
u8 *buf, void **ptr, unsigned int *offset)
{
struct xd_info *xd_card = &(chip->xd_card);
u32 page_addr, new_blk;
u16 log_off;
u8 reg_val, page_cnt;
int zone_no, retval, i;
if (start_page > end_page)
TRACE_RET(chip, STATUS_FAIL);
page_cnt = end_page - start_page;
zone_no = (int)(log_blk / 1000);
log_off = (u16) (log_blk % 1000);
if ((phy_blk & 0x3FF) == 0x3FF) {
for (i = 0; i < 256; i++) {
page_addr = ((u32) i) << xd_card->block_shift;
retval = xd_read_redundant(chip, page_addr, NULL, 0);
if (retval == STATUS_SUCCESS)
break;
if (monitor_card_cd(chip, XD_CARD) == CD_NOT_EXIST) {
xd_set_err_code(chip, XD_NO_CARD);
TRACE_RET(chip, STATUS_FAIL);
}
}
}
page_addr = (phy_blk << xd_card->block_shift) + start_page;
rts51x_init_cmd(chip);
xd_assign_phy_addr(chip, page_addr, XD_RW_ADDR);
rts51x_add_cmd(chip, WRITE_REG_CMD, XD_CFG, XD_PPB_TO_SIE,
XD_PPB_TO_SIE);
rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01,
RING_BUFFER);
rts51x_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_CNT, 0xFF, page_cnt);
rts51x_add_cmd(chip, WRITE_REG_CMD, XD_CHK_DATA_STATUS,
XD_AUTO_CHK_DATA_STATUS, XD_AUTO_CHK_DATA_STATUS);
rts51x_trans_dma_enable(chip->srb->sc_data_direction, chip, page_cnt * 512,
DMA_512);
rts51x_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF,
XD_TRANSFER_START | XD_READ_PAGES);
rts51x_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER,
XD_TRANSFER_END | XD_PPB_EMPTY,
XD_TRANSFER_END | XD_PPB_EMPTY);
retval = rts51x_send_cmd(chip, MODE_CDIR, 100);
if (retval != STATUS_SUCCESS)
TRACE_RET(chip, retval);
retval =
rts51x_transfer_data_partial(chip, RCV_BULK_PIPE(chip), (void *)buf,
ptr, offset, page_cnt * 512,
scsi_sg_count(chip->srb), NULL, 2000);
if (retval != STATUS_SUCCESS) {
rts51x_clear_xd_error(chip);
if (retval == STATUS_TIMEDOUT) {
xd_set_err_code(chip, XD_TO_ERROR);
TRACE_RET(chip, retval);
} else {
TRACE_GOTO(chip, Fail);
}
}
retval = rts51x_get_rsp(chip, 1, 200);
if (retval != STATUS_SUCCESS) {
rts51x_clear_xd_error(chip);
if (retval == STATUS_TIMEDOUT) {
xd_set_err_code(chip, XD_TO_ERROR);
TRACE_RET(chip, retval);
} else {
TRACE_GOTO(chip, Fail);
}
}
return STATUS_SUCCESS;
Fail:
rts51x_ep0_read_register(chip, XD_PAGE_STATUS, ®_val);
RTS51X_DEBUGP("XD_PAGE_STATUS: 0x%x\n", reg_val);
if (reg_val != XD_GPG)
xd_set_err_code(chip, XD_PRG_ERROR);
rts51x_ep0_read_register(chip, XD_CTL, ®_val);
RTS51X_DEBUGP("XD_CTL: 0x%x\n", reg_val);
/* Handle uncorrectable ECC error */
if (((reg_val & (XD_ECC1_ERROR | XD_ECC1_UNCORRECTABLE))
== (XD_ECC1_ERROR | XD_ECC1_UNCORRECTABLE))
|| ((reg_val & (XD_ECC2_ERROR | XD_ECC2_UNCORRECTABLE))
== (XD_ECC2_ERROR | XD_ECC2_UNCORRECTABLE))) {
wait_timeout(100);
if (monitor_card_cd(chip, XD_CARD) == CD_NOT_EXIST) {
xd_set_err_code(chip, XD_NO_CARD);
TRACE_RET(chip, STATUS_FAIL);
}
xd_set_err_code(chip, XD_ECC_ERROR);
new_blk = xd_get_unused_block(chip, zone_no);
if (new_blk == NO_NEW_BLK) {
XD_CLR_BAD_OLDBLK(xd_card);
TRACE_RET(chip, STATUS_FAIL);
}
retval =
xd_copy_page(chip, phy_blk, new_blk, 0,
xd_card->page_off + 1);
if (retval != STATUS_SUCCESS) {
if (!XD_CHK_BAD_NEWBLK(xd_card)) {
retval = xd_erase_block(chip, new_blk);
if (retval == STATUS_SUCCESS)
xd_set_unused_block(chip, new_blk);
} else {
XD_CLR_BAD_NEWBLK(xd_card);
}
XD_CLR_BAD_OLDBLK(xd_card);
TRACE_RET(chip, STATUS_FAIL);
}
xd_set_l2p_tbl(chip, zone_no, log_off, (u16) (new_blk & 0x3FF));
xd_erase_block(chip, phy_blk);
xd_mark_bad_block(chip, phy_blk);
XD_CLR_BAD_OLDBLK(xd_card);
}
TRACE_RET(chip, STATUS_FAIL);
}
static int xd_finish_write(struct rts51x_chip *chip,
u32 old_blk, u32 new_blk, u32 log_blk, u8 page_off)
{
struct xd_info *xd_card = &(chip->xd_card);
int retval, zone_no;
u16 log_off;
RTS51X_DEBUGP("xd_finish_write, old_blk = 0x%x, new_blk = 0x%x,"
"log_blk = 0x%x\n", old_blk, new_blk, log_blk);
if (page_off > xd_card->page_off)
TRACE_RET(chip, STATUS_FAIL);
zone_no = (int)(log_blk / 1000);
log_off = (u16) (log_blk % 1000);
if (old_blk == BLK_NOT_FOUND) {
retval = xd_init_page(chip, new_blk, log_off,
page_off, xd_card->page_off + 1);
if (retval != STATUS_SUCCESS) {
retval = xd_erase_block(chip, new_blk);
if (retval == STATUS_SUCCESS)
xd_set_unused_block(chip, new_blk);
TRACE_RET(chip, STATUS_FAIL);
}
} else {
retval = xd_copy_page(chip, old_blk, new_blk,
page_off, xd_card->page_off + 1);
if (retval != STATUS_SUCCESS) {
if (!XD_CHK_BAD_NEWBLK(xd_card)) {
retval = xd_erase_block(chip, new_blk);
if (retval == STATUS_SUCCESS)
xd_set_unused_block(chip, new_blk);
}
XD_CLR_BAD_NEWBLK(xd_card);
TRACE_RET(chip, STATUS_FAIL);
}
retval = xd_erase_block(chip, old_blk);
if (retval == STATUS_SUCCESS) {
if (XD_CHK_BAD_OLDBLK(xd_card)) {
xd_mark_bad_block(chip, old_blk);
XD_CLR_BAD_OLDBLK(xd_card);
} else {
/* Add source block to unused block */
xd_set_unused_block(chip, old_blk);
}
} else {
xd_set_err_code(chip, XD_NO_ERROR);
XD_CLR_BAD_OLDBLK(xd_card);
}
}
/* Add target block to L2P table */
xd_set_l2p_tbl(chip, zone_no, log_off, (u16) (new_blk & 0x3FF));
return STATUS_SUCCESS;
}
static int xd_prepare_write(struct rts51x_chip *chip,
u32 old_blk, u32 new_blk, u32 log_blk, u8 page_off)
{
int retval;
RTS51X_DEBUGP("xd_prepare_write, old_blk = 0x%x, new_blk = 0x%x,"
"log_blk = 0x%x, page_off = %d\n",
old_blk, new_blk, log_blk, (int)page_off);
if (page_off) {
retval = xd_copy_page(chip, old_blk, new_blk, 0, page_off);
if (retval != STATUS_SUCCESS)
TRACE_RET(chip, retval);
}
return STATUS_SUCCESS;
}
static int xd_write_multiple_pages(struct rts51x_chip *chip, u32 old_blk,
u32 new_blk, u32 log_blk, u8 start_page,
u8 end_page, u8 *buf, void **ptr,
unsigned int *offset)
{
struct xd_info *xd_card = &(chip->xd_card);
u32 page_addr;
int zone_no, retval;
u16 log_off;
u8 page_cnt, reg_val;
RTS51X_DEBUGP("xd_write_multiple_pages, old_blk = 0x%x,"
"new_blk = 0x%x, log_blk = 0x%x\n",
old_blk, new_blk, log_blk);
if (start_page > end_page)
TRACE_RET(chip, STATUS_FAIL);
page_cnt = end_page - start_page;
zone_no = (int)(log_blk / 1000);
log_off = (u16) (log_blk % 1000);
page_addr = (new_blk << xd_card->block_shift) + start_page;
/* Send index command */
retval = xd_send_cmd(chip, READ1_1);
if (retval != STATUS_SUCCESS)
TRACE_RET(chip, retval);
rts51x_init_cmd(chip);
/* Prepare redundant field */
rts51x_add_cmd(chip, WRITE_REG_CMD, XD_BLOCK_ADDR1_H, 0xFF,
(u8) (log_off >> 8));
rts51x_add_cmd(chip, WRITE_REG_CMD, XD_BLOCK_ADDR1_L, 0xFF,
(u8) log_off);
rts51x_add_cmd(chip, WRITE_REG_CMD, XD_BLOCK_STATUS, 0xFF, XD_GBLK);
rts51x_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_STATUS, 0xFF, XD_GPG);
xd_assign_phy_addr(chip, page_addr, XD_RW_ADDR);
/* Transform the block address by hardware */
rts51x_add_cmd(chip, WRITE_REG_CMD, XD_CFG, XD_BA_TRANSFORM,
XD_BA_TRANSFORM);
rts51x_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_CNT, 0xFF, page_cnt);
rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01,
RING_BUFFER);
rts51x_trans_dma_enable(chip->srb->sc_data_direction, chip, page_cnt * 512,
DMA_512);
rts51x_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF,
XD_TRANSFER_START | XD_WRITE_PAGES);
rts51x_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER, XD_TRANSFER_END,
XD_TRANSFER_END);
retval = rts51x_send_cmd(chip, MODE_CDOR, 100);
if (retval != STATUS_SUCCESS)
TRACE_RET(chip, retval);
retval =
rts51x_transfer_data_partial(chip, SND_BULK_PIPE(chip), (void *)buf,
ptr, offset, page_cnt * 512,
scsi_sg_count(chip->srb), NULL, 2000);
if (retval != STATUS_SUCCESS) {
rts51x_clear_xd_error(chip);
if (retval == STATUS_TIMEDOUT) {
xd_set_err_code(chip, XD_TO_ERROR);
TRACE_RET(chip, retval);
} else {
TRACE_GOTO(chip, Fail);
}
}
retval = rts51x_get_rsp(chip, 1, 200);
if (retval != STATUS_SUCCESS) {
rts51x_clear_xd_error(chip);
if (retval == STATUS_TIMEDOUT) {
xd_set_err_code(chip, XD_TO_ERROR);
TRACE_RET(chip, retval);
} else {
TRACE_GOTO(chip, Fail);
}
}
if (end_page == (xd_card->page_off + 1)) {
xd_card->delay_write.delay_write_flag = 0;
if (old_blk != BLK_NOT_FOUND) {
retval = xd_erase_block(chip, old_blk);
if (retval == STATUS_SUCCESS) {
if (XD_CHK_BAD_OLDBLK(xd_card)) {
xd_mark_bad_block(chip, old_blk);
XD_CLR_BAD_OLDBLK(xd_card);
} else {
xd_set_unused_block(chip, old_blk);
}
} else {
xd_set_err_code(chip, XD_NO_ERROR);
XD_CLR_BAD_OLDBLK(xd_card);
}
}
xd_set_l2p_tbl(chip, zone_no, log_off, (u16) (new_blk & 0x3FF));
}
return STATUS_SUCCESS;
Fail:
rts51x_ep0_read_register(chip, XD_DAT, ®_val);
RTS51X_DEBUGP("XD_DAT: 0x%x\n", reg_val);
if (reg_val & PROGRAM_ERROR) {
xd_set_err_code(chip, XD_PRG_ERROR);
xd_mark_bad_block(chip, new_blk);
}
TRACE_RET(chip, STATUS_FAIL);
}
static int xd_delay_write(struct rts51x_chip *chip)
{
struct xd_info *xd_card = &(chip->xd_card);
struct xd_delay_write_tag *delay_write = &(xd_card->delay_write);
int retval;
if (delay_write->delay_write_flag) {
RTS51X_DEBUGP("xd_delay_write\n");
retval = xd_switch_clock(chip);
if (retval != STATUS_SUCCESS)
TRACE_RET(chip, retval);
delay_write->delay_write_flag = 0;
retval = xd_finish_write(chip,
delay_write->old_phyblock,
delay_write->new_phyblock,
delay_write->logblock,
delay_write->pageoff);
if (retval != STATUS_SUCCESS)
TRACE_RET(chip, retval);
}
return STATUS_SUCCESS;
}
int rts51x_xd_rw(struct scsi_cmnd *srb, struct rts51x_chip *chip, u32 start_sector,
u16 sector_cnt)
{
struct xd_info *xd_card = &(chip->xd_card);
unsigned int lun = SCSI_LUN(srb);
struct xd_delay_write_tag *delay_write = &(xd_card->delay_write);
int retval, zone_no;
u32 log_blk, old_blk = 0, new_blk = 0;
u16 log_off, total_sec_cnt = sector_cnt;
u8 start_page, end_page = 0, page_cnt;
u8 *buf;
void *ptr = NULL;
unsigned int offset = 0;
xd_set_err_code(chip, XD_NO_ERROR);
xd_card->counter = 0;
RTS51X_DEBUGP("rts51x_xd_rw: scsi_bufflen = %d, scsi_sg_count = %d\n",
scsi_bufflen(srb), scsi_sg_count(srb));
RTS51X_DEBUGP("Data direction: %s\n",
(srb->sc_data_direction ==
DMA_TO_DEVICE) ? "write" : "read");
buf = (u8 *) scsi_sglist(srb);
retval = xd_switch_clock(chip);
if (retval != STATUS_SUCCESS)
TRACE_RET(chip, retval);
log_blk = start_sector >> xd_card->block_shift;
start_page = (u8) start_sector & xd_card->page_off;
zone_no = (int)(log_blk / 1000);
log_off = (u16) (log_blk % 1000);
RTS51X_DEBUGP("log_blk = 0x%x\n", log_blk);
if (xd_card->zone[zone_no].build_flag == 0) {
retval = xd_build_l2p_tbl(chip, zone_no);
if (retval != STATUS_SUCCESS) {
chip->card_fail |= XD_CARD;
rts51x_set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
TRACE_RET(chip, retval);
}
}
if (srb->sc_data_direction == DMA_TO_DEVICE) {
if (delay_write->delay_write_flag &&
(delay_write->logblock == log_blk) &&
(start_page > delay_write->pageoff)) {
delay_write->delay_write_flag = 0;
if (delay_write->old_phyblock != BLK_NOT_FOUND) {
retval = xd_copy_page(chip,
delay_write->old_phyblock,
delay_write->new_phyblock,
delay_write->pageoff,
start_page);
if (retval != STATUS_SUCCESS) {
rts51x_set_sense_type(chip, lun,
SENSE_TYPE_MEDIA_WRITE_ERR);
TRACE_RET(chip, retval);
}
}
old_blk = delay_write->old_phyblock;
new_blk = delay_write->new_phyblock;
} else if (delay_write->delay_write_flag &&
(delay_write->logblock == log_blk) &&
(start_page == delay_write->pageoff)) {
delay_write->delay_write_flag = 0;
old_blk = delay_write->old_phyblock;
new_blk = delay_write->new_phyblock;
} else {
retval = xd_delay_write(chip);
if (retval != STATUS_SUCCESS) {
rts51x_set_sense_type(chip, lun,
SENSE_TYPE_MEDIA_WRITE_ERR);
TRACE_RET(chip, retval);
}
old_blk = xd_get_l2p_tbl(chip, zone_no, log_off);
new_blk = xd_get_unused_block(chip, zone_no);
if ((old_blk == BLK_NOT_FOUND)
|| (new_blk == BLK_NOT_FOUND)) {
rts51x_set_sense_type(chip, lun,
SENSE_TYPE_MEDIA_WRITE_ERR);
TRACE_RET(chip, retval);
}
retval =
xd_prepare_write(chip, old_blk, new_blk, log_blk,
start_page);
if (retval != STATUS_SUCCESS) {
if (monitor_card_cd(chip, XD_CARD) ==
CD_NOT_EXIST) {
rts51x_set_sense_type(chip, lun,
SENSE_TYPE_MEDIA_NOT_PRESENT);
TRACE_RET(chip, STATUS_FAIL);
}
rts51x_set_sense_type(chip, lun,
SENSE_TYPE_MEDIA_WRITE_ERR);
TRACE_RET(chip, retval);
}
}
} else {
retval = xd_delay_write(chip);
if (retval != STATUS_SUCCESS) {
if (monitor_card_cd(chip, XD_CARD) == CD_NOT_EXIST) {
rts51x_set_sense_type(chip, lun,
SENSE_TYPE_MEDIA_NOT_PRESENT);
TRACE_RET(chip, STATUS_FAIL);
}
rts51x_set_sense_type(chip, lun,
SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
TRACE_RET(chip, retval);
}
old_blk = xd_get_l2p_tbl(chip, zone_no, log_off);
if (old_blk == BLK_NOT_FOUND) {
rts51x_set_sense_type(chip, lun,
SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
TRACE_RET(chip, STATUS_FAIL);
}
}
RTS51X_DEBUGP("old_blk = 0x%x\n", old_blk);
if (srb->sc_data_direction == DMA_TO_DEVICE)
RTS51X_DEBUGP("new_blk = 0x%x\n", new_blk);
while (total_sec_cnt) {
if ((start_page + total_sec_cnt) > (xd_card->page_off + 1))
end_page = xd_card->page_off + 1;
else
end_page = start_page + (u8) total_sec_cnt;
page_cnt = end_page - start_page;
if (srb->sc_data_direction == DMA_FROM_DEVICE) {
retval = xd_read_multiple_pages(chip, old_blk, log_blk,
start_page, end_page,
buf, &ptr, &offset);
if (retval != STATUS_SUCCESS) {
rts51x_set_sense_type(chip, lun,
SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
TRACE_RET(chip, STATUS_FAIL);
}
} else {
retval =
xd_write_multiple_pages(chip, old_blk, new_blk,
log_blk, start_page,
end_page, buf, &ptr,
&offset);
if (retval != STATUS_SUCCESS) {
rts51x_set_sense_type(chip, lun,
SENSE_TYPE_MEDIA_WRITE_ERR);
TRACE_RET(chip, STATUS_FAIL);
}
}
total_sec_cnt -= page_cnt;
if (total_sec_cnt == 0)
break;
log_blk++;
zone_no = (int)(log_blk / 1000);
log_off = (u16) (log_blk % 1000);
if (xd_card->zone[zone_no].build_flag == 0) {
retval = xd_build_l2p_tbl(chip, zone_no);
if (retval != STATUS_SUCCESS) {
chip->card_fail |= XD_CARD;
rts51x_set_sense_type(chip, lun,
SENSE_TYPE_MEDIA_NOT_PRESENT);
TRACE_RET(chip, retval);
}
}
old_blk = xd_get_l2p_tbl(chip, zone_no, log_off);
if (old_blk == BLK_NOT_FOUND) {
if (srb->sc_data_direction == DMA_FROM_DEVICE) {
rts51x_set_sense_type(chip, lun,
SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
} else {
rts51x_set_sense_type(chip, lun,
SENSE_TYPE_MEDIA_WRITE_ERR);
}
TRACE_RET(chip, STATUS_FAIL);
}
if (srb->sc_data_direction == DMA_TO_DEVICE) {
new_blk = xd_get_unused_block(chip, zone_no);
if (new_blk == BLK_NOT_FOUND) {
rts51x_set_sense_type(chip, lun,
SENSE_TYPE_MEDIA_WRITE_ERR);
TRACE_RET(chip, STATUS_FAIL);
}
}
start_page = 0;
}
if ((srb->sc_data_direction == DMA_TO_DEVICE) &&
(end_page != (xd_card->page_off + 1))) {
delay_write->delay_write_flag = 1;
delay_write->old_phyblock = old_blk;
delay_write->new_phyblock = new_blk;
delay_write->logblock = log_blk;
delay_write->pageoff = end_page;
}
scsi_set_resid(srb, 0);
return STATUS_SUCCESS;
}
void rts51x_xd_free_l2p_tbl(struct rts51x_chip *chip)
{
struct xd_info *xd_card = &(chip->xd_card);
int i = 0;
if (xd_card->zone != NULL) {
for (i = 0; i < xd_card->zone_cnt; i++) {
if (xd_card->zone[i].l2p_table != NULL) {
vfree(xd_card->zone[i].l2p_table);
xd_card->zone[i].l2p_table = NULL;
}
if (xd_card->zone[i].free_table != NULL) {
vfree(xd_card->zone[i].free_table);
xd_card->zone[i].free_table = NULL;
}
}
vfree(xd_card->zone);
xd_card->zone = NULL;
}
}
void rts51x_xd_cleanup_work(struct rts51x_chip *chip)
{
struct xd_info *xd_card = &(chip->xd_card);
if (xd_card->delay_write.delay_write_flag) {
RTS51X_DEBUGP("xD: delay write\n");
xd_delay_write(chip);
xd_card->counter = 0;
}
}
static int xd_power_off_card3v3(struct rts51x_chip *chip)
{
int retval;
rts51x_init_cmd(chip);
rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_CLK_EN, XD_CLK_EN, 0);
if (chip->asic_code)
xd_pull_ctl_disable(chip);
else
rts51x_add_cmd(chip, WRITE_REG_CMD, FPGA_PULL_CTL, 0xFF, 0xDF);
rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_OE, XD_OUTPUT_EN, 0);
if (!chip->option.FT2_fast_mode) {
rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PWR_CTL, POWER_MASK,
POWER_OFF);
if (CHECK_PKG(chip, LQFP48)
|| chip->option.rts5129_D3318_off_enable)
rts51x_add_cmd(chip, WRITE_REG_CMD, CARD_PWR_CTL,
DV3318_AUTO_PWR_OFF, 0);
}
retval = rts51x_send_cmd(chip, MODE_C, 100);
if (retval != STATUS_SUCCESS)
TRACE_RET(chip, retval);
return STATUS_SUCCESS;
}
int rts51x_release_xd_card(struct rts51x_chip *chip)
{
struct xd_info *xd_card = &(chip->xd_card);
int retval;
RTS51X_DEBUGP("rts51x_release_xd_card\n");
chip->card_ready &= ~XD_CARD;
chip->card_fail &= ~XD_CARD;
chip->card_wp &= ~XD_CARD;
xd_card->delay_write.delay_write_flag = 0;
rts51x_xd_free_l2p_tbl(chip);
rts51x_write_register(chip, SFSM_ED, HW_CMD_STOP, HW_CMD_STOP);
retval = xd_power_off_card3v3(chip);
if (retval != STATUS_SUCCESS)
TRACE_RET(chip, retval);
if (chip->asic_code && CHECK_PKG(chip, QFN24))
wait_timeout(20);
return STATUS_SUCCESS;
}
| gpl-2.0 |
crewrktablets/rk29_kernel_308 | arch/arm/mach-h720x/common.c | 2983 | 5856 | /*
* linux/arch/arm/mach-h720x/common.c
*
* Copyright (C) 2003 Thomas Gleixner <tglx@linutronix.de>
* 2003 Robert Schwebel <r.schwebel@pengutronix.de>
* 2004 Sascha Hauer <s.hauer@pengutronix.de>
*
* common stuff for Hynix h720x processors
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <linux/sched.h>
#include <linux/mman.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/dma.h>
#include <mach/hardware.h>
#include <asm/irq.h>
#include <asm/mach/irq.h>
#include <asm/mach/map.h>
#include <mach/irqs.h>
#include <asm/mach/dma.h>
#if 0
#define IRQDBG(args...) printk(args)
#else
#define IRQDBG(args...) do {} while(0)
#endif
void __init arch_dma_init(dma_t *dma)
{
}
/*
* Return usecs since last timer reload
* (timercount * (usecs perjiffie)) / (ticks per jiffie)
*/
unsigned long h720x_gettimeoffset(void)
{
return (CPU_REG (TIMER_VIRT, TM0_COUNT) * tick_usec) / LATCH;
}
/*
* mask Global irq's
*/
static void mask_global_irq(struct irq_data *d)
{
CPU_REG (IRQC_VIRT, IRQC_IER) &= ~(1 << d->irq);
}
/*
* unmask Global irq's
*/
static void unmask_global_irq(struct irq_data *d)
{
CPU_REG (IRQC_VIRT, IRQC_IER) |= (1 << d->irq);
}
/*
* ack GPIO irq's
* Ack only for edge triggered int's valid
*/
static void inline ack_gpio_irq(struct irq_data *d)
{
u32 reg_base = GPIO_VIRT(IRQ_TO_REGNO(d->irq));
u32 bit = IRQ_TO_BIT(d->irq);
if ( (CPU_REG (reg_base, GPIO_EDGE) & bit))
CPU_REG (reg_base, GPIO_CLR) = bit;
}
/*
* mask GPIO irq's
*/
static void inline mask_gpio_irq(struct irq_data *d)
{
u32 reg_base = GPIO_VIRT(IRQ_TO_REGNO(d->irq));
u32 bit = IRQ_TO_BIT(d->irq);
CPU_REG (reg_base, GPIO_MASK) &= ~bit;
}
/*
* unmask GPIO irq's
*/
static void inline unmask_gpio_irq(struct irq_data *d)
{
u32 reg_base = GPIO_VIRT(IRQ_TO_REGNO(d->irq));
u32 bit = IRQ_TO_BIT(d->irq);
CPU_REG (reg_base, GPIO_MASK) |= bit;
}
static void
h720x_gpio_handler(unsigned int mask, unsigned int irq,
struct irq_desc *desc)
{
IRQDBG("%s irq: %d\n", __func__, irq);
while (mask) {
if (mask & 1) {
IRQDBG("handling irq %d\n", irq);
generic_handle_irq(irq);
}
irq++;
mask >>= 1;
}
}
static void
h720x_gpioa_demux_handler(unsigned int irq_unused, struct irq_desc *desc)
{
unsigned int mask, irq;
mask = CPU_REG(GPIO_A_VIRT,GPIO_STAT);
irq = IRQ_CHAINED_GPIOA(0);
IRQDBG("%s mask: 0x%08x irq: %d\n", __func__, mask,irq);
h720x_gpio_handler(mask, irq, desc);
}
static void
h720x_gpiob_demux_handler(unsigned int irq_unused, struct irq_desc *desc)
{
unsigned int mask, irq;
mask = CPU_REG(GPIO_B_VIRT,GPIO_STAT);
irq = IRQ_CHAINED_GPIOB(0);
IRQDBG("%s mask: 0x%08x irq: %d\n", __func__, mask,irq);
h720x_gpio_handler(mask, irq, desc);
}
static void
h720x_gpioc_demux_handler(unsigned int irq_unused, struct irq_desc *desc)
{
unsigned int mask, irq;
mask = CPU_REG(GPIO_C_VIRT,GPIO_STAT);
irq = IRQ_CHAINED_GPIOC(0);
IRQDBG("%s mask: 0x%08x irq: %d\n", __func__, mask,irq);
h720x_gpio_handler(mask, irq, desc);
}
static void
h720x_gpiod_demux_handler(unsigned int irq_unused, struct irq_desc *desc)
{
unsigned int mask, irq;
mask = CPU_REG(GPIO_D_VIRT,GPIO_STAT);
irq = IRQ_CHAINED_GPIOD(0);
IRQDBG("%s mask: 0x%08x irq: %d\n", __func__, mask,irq);
h720x_gpio_handler(mask, irq, desc);
}
#ifdef CONFIG_CPU_H7202
static void
h720x_gpioe_demux_handler(unsigned int irq_unused, struct irq_desc *desc)
{
unsigned int mask, irq;
mask = CPU_REG(GPIO_E_VIRT,GPIO_STAT);
irq = IRQ_CHAINED_GPIOE(0);
IRQDBG("%s mask: 0x%08x irq: %d\n", __func__, mask,irq);
h720x_gpio_handler(mask, irq, desc);
}
#endif
static struct irq_chip h720x_global_chip = {
.irq_ack = mask_global_irq,
.irq_mask = mask_global_irq,
.irq_unmask = unmask_global_irq,
};
static struct irq_chip h720x_gpio_chip = {
.irq_ack = ack_gpio_irq,
.irq_mask = mask_gpio_irq,
.irq_unmask = unmask_gpio_irq,
};
/*
* Initialize IRQ's, mask all, enable multiplexed irq's
*/
void __init h720x_init_irq (void)
{
int irq;
/* Mask global irq's */
CPU_REG (IRQC_VIRT, IRQC_IER) = 0x0;
/* Mask all multiplexed irq's */
CPU_REG (GPIO_A_VIRT, GPIO_MASK) = 0x0;
CPU_REG (GPIO_B_VIRT, GPIO_MASK) = 0x0;
CPU_REG (GPIO_C_VIRT, GPIO_MASK) = 0x0;
CPU_REG (GPIO_D_VIRT, GPIO_MASK) = 0x0;
/* Initialize global IRQ's, fast path */
for (irq = 0; irq < NR_GLBL_IRQS; irq++) {
irq_set_chip_and_handler(irq, &h720x_global_chip,
handle_level_irq);
set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
}
/* Initialize multiplexed IRQ's, slow path */
for (irq = IRQ_CHAINED_GPIOA(0) ; irq <= IRQ_CHAINED_GPIOD(31); irq++) {
irq_set_chip_and_handler(irq, &h720x_gpio_chip,
handle_edge_irq);
set_irq_flags(irq, IRQF_VALID );
}
irq_set_chained_handler(IRQ_GPIOA, h720x_gpioa_demux_handler);
irq_set_chained_handler(IRQ_GPIOB, h720x_gpiob_demux_handler);
irq_set_chained_handler(IRQ_GPIOC, h720x_gpioc_demux_handler);
irq_set_chained_handler(IRQ_GPIOD, h720x_gpiod_demux_handler);
#ifdef CONFIG_CPU_H7202
for (irq = IRQ_CHAINED_GPIOE(0) ; irq <= IRQ_CHAINED_GPIOE(31); irq++) {
irq_set_chip_and_handler(irq, &h720x_gpio_chip,
handle_edge_irq);
set_irq_flags(irq, IRQF_VALID );
}
irq_set_chained_handler(IRQ_GPIOE, h720x_gpioe_demux_handler);
#endif
/* Enable multiplexed irq's */
CPU_REG (IRQC_VIRT, IRQC_IER) = IRQ_ENA_MUX;
}
static struct map_desc h720x_io_desc[] __initdata = {
{
.virtual = IO_VIRT,
.pfn = __phys_to_pfn(IO_PHYS),
.length = IO_SIZE,
.type = MT_DEVICE
},
};
/* Initialize io tables */
void __init h720x_map_io(void)
{
iotable_init(h720x_io_desc,ARRAY_SIZE(h720x_io_desc));
}
| gpl-2.0 |
xdabbeb/g2-kk-kernel | sound/core/hwdep.c | 3239 | 13466 | /*
* Hardware dependent layer
* Copyright (c) by Jaroslav Kysela <perex@perex.cz>
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/major.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/time.h>
#include <linux/mutex.h>
#include <linux/module.h>
#include <sound/core.h>
#include <sound/control.h>
#include <sound/minors.h>
#include <sound/hwdep.h>
#include <sound/info.h>
MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>");
MODULE_DESCRIPTION("Hardware dependent layer");
MODULE_LICENSE("GPL");
static LIST_HEAD(snd_hwdep_devices);
static DEFINE_MUTEX(register_mutex);
static int snd_hwdep_free(struct snd_hwdep *hwdep);
static int snd_hwdep_dev_free(struct snd_device *device);
static int snd_hwdep_dev_register(struct snd_device *device);
static int snd_hwdep_dev_disconnect(struct snd_device *device);
static struct snd_hwdep *snd_hwdep_search(struct snd_card *card, int device)
{
struct snd_hwdep *hwdep;
list_for_each_entry(hwdep, &snd_hwdep_devices, list)
if (hwdep->card == card && hwdep->device == device)
return hwdep;
return NULL;
}
static loff_t snd_hwdep_llseek(struct file * file, loff_t offset, int orig)
{
struct snd_hwdep *hw = file->private_data;
if (hw->ops.llseek)
return hw->ops.llseek(hw, file, offset, orig);
return -ENXIO;
}
static ssize_t snd_hwdep_read(struct file * file, char __user *buf,
size_t count, loff_t *offset)
{
struct snd_hwdep *hw = file->private_data;
if (hw->ops.read)
return hw->ops.read(hw, buf, count, offset);
return -ENXIO;
}
static ssize_t snd_hwdep_write(struct file * file, const char __user *buf,
size_t count, loff_t *offset)
{
struct snd_hwdep *hw = file->private_data;
if (hw->ops.write)
return hw->ops.write(hw, buf, count, offset);
return -ENXIO;
}
static int snd_hwdep_open(struct inode *inode, struct file * file)
{
int major = imajor(inode);
struct snd_hwdep *hw;
int err;
wait_queue_t wait;
if (major == snd_major) {
hw = snd_lookup_minor_data(iminor(inode),
SNDRV_DEVICE_TYPE_HWDEP);
#ifdef CONFIG_SND_OSSEMUL
} else if (major == SOUND_MAJOR) {
hw = snd_lookup_oss_minor_data(iminor(inode),
SNDRV_OSS_DEVICE_TYPE_DMFM);
#endif
} else
return -ENXIO;
if (hw == NULL)
return -ENODEV;
if (!try_module_get(hw->card->module))
return -EFAULT;
init_waitqueue_entry(&wait, current);
add_wait_queue(&hw->open_wait, &wait);
mutex_lock(&hw->open_mutex);
while (1) {
if (hw->exclusive && hw->used > 0) {
err = -EBUSY;
break;
}
if (!hw->ops.open) {
err = 0;
break;
}
err = hw->ops.open(hw, file);
if (err >= 0)
break;
if (err == -EAGAIN) {
if (file->f_flags & O_NONBLOCK) {
err = -EBUSY;
break;
}
} else
break;
set_current_state(TASK_INTERRUPTIBLE);
mutex_unlock(&hw->open_mutex);
schedule();
mutex_lock(&hw->open_mutex);
if (signal_pending(current)) {
err = -ERESTARTSYS;
break;
}
}
remove_wait_queue(&hw->open_wait, &wait);
if (err >= 0) {
err = snd_card_file_add(hw->card, file);
if (err >= 0) {
file->private_data = hw;
hw->used++;
} else {
if (hw->ops.release)
hw->ops.release(hw, file);
}
}
mutex_unlock(&hw->open_mutex);
if (err < 0)
module_put(hw->card->module);
return err;
}
static int snd_hwdep_release(struct inode *inode, struct file * file)
{
int err = 0;
struct snd_hwdep *hw = file->private_data;
struct module *mod = hw->card->module;
mutex_lock(&hw->open_mutex);
if (hw->ops.release)
err = hw->ops.release(hw, file);
if (hw->used > 0)
hw->used--;
mutex_unlock(&hw->open_mutex);
wake_up(&hw->open_wait);
snd_card_file_remove(hw->card, file);
module_put(mod);
return err;
}
static unsigned int snd_hwdep_poll(struct file * file, poll_table * wait)
{
struct snd_hwdep *hw = file->private_data;
if (hw->ops.poll)
return hw->ops.poll(hw, file, wait);
return 0;
}
static int snd_hwdep_info(struct snd_hwdep *hw,
struct snd_hwdep_info __user *_info)
{
struct snd_hwdep_info info;
memset(&info, 0, sizeof(info));
info.card = hw->card->number;
strlcpy(info.id, hw->id, sizeof(info.id));
strlcpy(info.name, hw->name, sizeof(info.name));
info.iface = hw->iface;
if (copy_to_user(_info, &info, sizeof(info)))
return -EFAULT;
return 0;
}
static int snd_hwdep_dsp_status(struct snd_hwdep *hw,
struct snd_hwdep_dsp_status __user *_info)
{
struct snd_hwdep_dsp_status info;
int err;
if (! hw->ops.dsp_status)
return -ENXIO;
memset(&info, 0, sizeof(info));
info.dsp_loaded = hw->dsp_loaded;
if ((err = hw->ops.dsp_status(hw, &info)) < 0)
return err;
if (copy_to_user(_info, &info, sizeof(info)))
return -EFAULT;
return 0;
}
static int snd_hwdep_dsp_load(struct snd_hwdep *hw,
struct snd_hwdep_dsp_image __user *_info)
{
struct snd_hwdep_dsp_image info;
int err;
if (! hw->ops.dsp_load)
return -ENXIO;
memset(&info, 0, sizeof(info));
if (copy_from_user(&info, _info, sizeof(info)))
return -EFAULT;
/* check whether the dsp was already loaded */
if (hw->dsp_loaded & (1 << info.index))
return -EBUSY;
if (!access_ok(VERIFY_READ, info.image, info.length))
return -EFAULT;
err = hw->ops.dsp_load(hw, &info);
if (err < 0)
return err;
hw->dsp_loaded |= (1 << info.index);
return 0;
}
static long snd_hwdep_ioctl(struct file * file, unsigned int cmd,
unsigned long arg)
{
struct snd_hwdep *hw = file->private_data;
void __user *argp = (void __user *)arg;
switch (cmd) {
case SNDRV_HWDEP_IOCTL_PVERSION:
return put_user(SNDRV_HWDEP_VERSION, (int __user *)argp);
case SNDRV_HWDEP_IOCTL_INFO:
return snd_hwdep_info(hw, argp);
case SNDRV_HWDEP_IOCTL_DSP_STATUS:
return snd_hwdep_dsp_status(hw, argp);
case SNDRV_HWDEP_IOCTL_DSP_LOAD:
return snd_hwdep_dsp_load(hw, argp);
}
if (hw->ops.ioctl)
return hw->ops.ioctl(hw, file, cmd, arg);
return -ENOTTY;
}
static int snd_hwdep_mmap(struct file * file, struct vm_area_struct * vma)
{
struct snd_hwdep *hw = file->private_data;
if (hw->ops.mmap)
return hw->ops.mmap(hw, file, vma);
return -ENXIO;
}
static int snd_hwdep_control_ioctl(struct snd_card *card,
struct snd_ctl_file * control,
unsigned int cmd, unsigned long arg)
{
switch (cmd) {
case SNDRV_CTL_IOCTL_HWDEP_NEXT_DEVICE:
{
int device;
if (get_user(device, (int __user *)arg))
return -EFAULT;
mutex_lock(®ister_mutex);
if (device < 0)
device = 0;
else if (device < SNDRV_MINOR_HWDEPS)
device++;
else
device = SNDRV_MINOR_HWDEPS;
while (device < SNDRV_MINOR_HWDEPS) {
if (snd_hwdep_search(card, device))
break;
device++;
}
if (device >= SNDRV_MINOR_HWDEPS)
device = -1;
mutex_unlock(®ister_mutex);
if (put_user(device, (int __user *)arg))
return -EFAULT;
return 0;
}
case SNDRV_CTL_IOCTL_HWDEP_INFO:
{
struct snd_hwdep_info __user *info = (struct snd_hwdep_info __user *)arg;
int device, err;
struct snd_hwdep *hwdep;
if (get_user(device, &info->device))
return -EFAULT;
mutex_lock(®ister_mutex);
hwdep = snd_hwdep_search(card, device);
if (hwdep)
err = snd_hwdep_info(hwdep, info);
else
err = -ENXIO;
mutex_unlock(®ister_mutex);
return err;
}
}
return -ENOIOCTLCMD;
}
#ifdef CONFIG_COMPAT
#include "hwdep_compat.c"
#else
#define snd_hwdep_ioctl_compat NULL
#endif
/*
*/
static const struct file_operations snd_hwdep_f_ops =
{
.owner = THIS_MODULE,
.llseek = snd_hwdep_llseek,
.read = snd_hwdep_read,
.write = snd_hwdep_write,
.open = snd_hwdep_open,
.release = snd_hwdep_release,
.poll = snd_hwdep_poll,
.unlocked_ioctl = snd_hwdep_ioctl,
.compat_ioctl = snd_hwdep_ioctl_compat,
.mmap = snd_hwdep_mmap,
};
/**
* snd_hwdep_new - create a new hwdep instance
* @card: the card instance
* @id: the id string
* @device: the device index (zero-based)
* @rhwdep: the pointer to store the new hwdep instance
*
* Creates a new hwdep instance with the given index on the card.
* The callbacks (hwdep->ops) must be set on the returned instance
* after this call manually by the caller.
*
* Returns zero if successful, or a negative error code on failure.
*/
int snd_hwdep_new(struct snd_card *card, char *id, int device,
struct snd_hwdep **rhwdep)
{
struct snd_hwdep *hwdep;
int err;
static struct snd_device_ops ops = {
.dev_free = snd_hwdep_dev_free,
.dev_register = snd_hwdep_dev_register,
.dev_disconnect = snd_hwdep_dev_disconnect,
};
if (snd_BUG_ON(!card))
return -ENXIO;
if (rhwdep)
*rhwdep = NULL;
hwdep = kzalloc(sizeof(*hwdep), GFP_KERNEL);
if (hwdep == NULL) {
snd_printk(KERN_ERR "hwdep: cannot allocate\n");
return -ENOMEM;
}
hwdep->card = card;
hwdep->device = device;
if (id)
strlcpy(hwdep->id, id, sizeof(hwdep->id));
#ifdef CONFIG_SND_OSSEMUL
hwdep->oss_type = -1;
#endif
if ((err = snd_device_new(card, SNDRV_DEV_HWDEP, hwdep, &ops)) < 0) {
snd_hwdep_free(hwdep);
return err;
}
init_waitqueue_head(&hwdep->open_wait);
mutex_init(&hwdep->open_mutex);
if (rhwdep)
*rhwdep = hwdep;
return 0;
}
static int snd_hwdep_free(struct snd_hwdep *hwdep)
{
if (!hwdep)
return 0;
if (hwdep->private_free)
hwdep->private_free(hwdep);
kfree(hwdep);
return 0;
}
static int snd_hwdep_dev_free(struct snd_device *device)
{
struct snd_hwdep *hwdep = device->device_data;
return snd_hwdep_free(hwdep);
}
static int snd_hwdep_dev_register(struct snd_device *device)
{
struct snd_hwdep *hwdep = device->device_data;
int err;
char name[32];
mutex_lock(®ister_mutex);
if (snd_hwdep_search(hwdep->card, hwdep->device)) {
mutex_unlock(®ister_mutex);
return -EBUSY;
}
list_add_tail(&hwdep->list, &snd_hwdep_devices);
sprintf(name, "hwC%iD%i", hwdep->card->number, hwdep->device);
if ((err = snd_register_device(SNDRV_DEVICE_TYPE_HWDEP,
hwdep->card, hwdep->device,
&snd_hwdep_f_ops, hwdep, name)) < 0) {
snd_printk(KERN_ERR "unable to register hardware dependent device %i:%i\n",
hwdep->card->number, hwdep->device);
list_del(&hwdep->list);
mutex_unlock(®ister_mutex);
return err;
}
#ifdef CONFIG_SND_OSSEMUL
hwdep->ossreg = 0;
if (hwdep->oss_type >= 0) {
if ((hwdep->oss_type == SNDRV_OSS_DEVICE_TYPE_DMFM) && (hwdep->device != 0)) {
snd_printk (KERN_WARNING "only hwdep device 0 can be registered as OSS direct FM device!\n");
} else {
if (snd_register_oss_device(hwdep->oss_type,
hwdep->card, hwdep->device,
&snd_hwdep_f_ops, hwdep,
hwdep->oss_dev) < 0) {
snd_printk(KERN_ERR "unable to register OSS compatibility device %i:%i\n",
hwdep->card->number, hwdep->device);
} else
hwdep->ossreg = 1;
}
}
#endif
mutex_unlock(®ister_mutex);
return 0;
}
static int snd_hwdep_dev_disconnect(struct snd_device *device)
{
struct snd_hwdep *hwdep = device->device_data;
if (snd_BUG_ON(!hwdep))
return -ENXIO;
mutex_lock(®ister_mutex);
if (snd_hwdep_search(hwdep->card, hwdep->device) != hwdep) {
mutex_unlock(®ister_mutex);
return -EINVAL;
}
#ifdef CONFIG_SND_OSSEMUL
if (hwdep->ossreg)
snd_unregister_oss_device(hwdep->oss_type, hwdep->card, hwdep->device);
#endif
snd_unregister_device(SNDRV_DEVICE_TYPE_HWDEP, hwdep->card, hwdep->device);
list_del_init(&hwdep->list);
mutex_unlock(®ister_mutex);
return 0;
}
#ifdef CONFIG_PROC_FS
/*
* Info interface
*/
static void snd_hwdep_proc_read(struct snd_info_entry *entry,
struct snd_info_buffer *buffer)
{
struct snd_hwdep *hwdep;
mutex_lock(®ister_mutex);
list_for_each_entry(hwdep, &snd_hwdep_devices, list)
snd_iprintf(buffer, "%02i-%02i: %s\n",
hwdep->card->number, hwdep->device, hwdep->name);
mutex_unlock(®ister_mutex);
}
static struct snd_info_entry *snd_hwdep_proc_entry;
static void __init snd_hwdep_proc_init(void)
{
struct snd_info_entry *entry;
if ((entry = snd_info_create_module_entry(THIS_MODULE, "hwdep", NULL)) != NULL) {
entry->c.text.read = snd_hwdep_proc_read;
if (snd_info_register(entry) < 0) {
snd_info_free_entry(entry);
entry = NULL;
}
}
snd_hwdep_proc_entry = entry;
}
static void __exit snd_hwdep_proc_done(void)
{
snd_info_free_entry(snd_hwdep_proc_entry);
}
#else /* !CONFIG_PROC_FS */
#define snd_hwdep_proc_init()
#define snd_hwdep_proc_done()
#endif /* CONFIG_PROC_FS */
/*
* ENTRY functions
*/
static int __init alsa_hwdep_init(void)
{
snd_hwdep_proc_init();
snd_ctl_register_ioctl(snd_hwdep_control_ioctl);
snd_ctl_register_ioctl_compat(snd_hwdep_control_ioctl);
return 0;
}
static void __exit alsa_hwdep_exit(void)
{
snd_ctl_unregister_ioctl(snd_hwdep_control_ioctl);
snd_ctl_unregister_ioctl_compat(snd_hwdep_control_ioctl);
snd_hwdep_proc_done();
}
module_init(alsa_hwdep_init)
module_exit(alsa_hwdep_exit)
EXPORT_SYMBOL(snd_hwdep_new);
| gpl-2.0 |
drowningchild/lgog_old | drivers/net/ethernet/davicom/dm9000.c | 3495 | 39386 | /*
* Davicom DM9000 Fast Ethernet driver for Linux.
* Copyright (C) 1997 Sten Wang
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* (C) Copyright 1997-1998 DAVICOM Semiconductor,Inc. All Rights Reserved.
*
* Additional updates, Copyright:
* Ben Dooks <ben@simtec.co.uk>
* Sascha Hauer <s.hauer@pengutronix.de>
*/
#include <linux/module.h>
#include <linux/ioport.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
#include <linux/crc32.h>
#include <linux/mii.h>
#include <linux/ethtool.h>
#include <linux/dm9000.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/irq.h>
#include <linux/slab.h>
#include <asm/delay.h>
#include <asm/irq.h>
#include <asm/io.h>
#include "dm9000.h"
/* Board/System/Debug information/definition ---------------- */
#define DM9000_PHY 0x40 /* PHY address 0x01 */
#define CARDNAME "dm9000"
#define DRV_VERSION "1.31"
/*
* Transmit timeout, default 5 seconds.
*/
static int watchdog = 5000;
module_param(watchdog, int, 0400);
MODULE_PARM_DESC(watchdog, "transmit timeout in milliseconds");
/*
* Debug messages level
*/
static int debug;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "dm9000 debug level (0-4)");
/* DM9000 register address locking.
*
* The DM9000 uses an address register to control where data written
* to the data register goes. This means that the address register
* must be preserved over interrupts or similar calls.
*
* During interrupt and other critical calls, a spinlock is used to
* protect the system, but the calls themselves save the address
* in the address register in case they are interrupting another
* access to the device.
*
* For general accesses a lock is provided so that calls which are
* allowed to sleep are serialised so that the address register does
* not need to be saved. This lock also serves to serialise access
* to the EEPROM and PHY access registers which are shared between
* these two devices.
*/
/* The driver supports the original DM9000E, and now the two newer
* devices, DM9000A and DM9000B.
*/
enum dm9000_type {
TYPE_DM9000E, /* original DM9000 */
TYPE_DM9000A,
TYPE_DM9000B
};
/* Structure/enum declaration ------------------------------- */
typedef struct board_info {
void __iomem *io_addr; /* Register I/O base address */
void __iomem *io_data; /* Data I/O address */
u16 irq; /* IRQ */
u16 tx_pkt_cnt;
u16 queue_pkt_len;
u16 queue_start_addr;
u16 queue_ip_summed;
u16 dbug_cnt;
u8 io_mode; /* 0:word, 2:byte */
u8 phy_addr;
u8 imr_all;
unsigned int flags;
unsigned int in_suspend :1;
unsigned int wake_supported :1;
enum dm9000_type type;
void (*inblk)(void __iomem *port, void *data, int length);
void (*outblk)(void __iomem *port, void *data, int length);
void (*dumpblk)(void __iomem *port, int length);
struct device *dev; /* parent device */
struct resource *addr_res; /* resources found */
struct resource *data_res;
struct resource *addr_req; /* resources requested */
struct resource *data_req;
struct resource *irq_res;
int irq_wake;
struct mutex addr_lock; /* phy and eeprom access lock */
struct delayed_work phy_poll;
struct net_device *ndev;
spinlock_t lock;
struct mii_if_info mii;
u32 msg_enable;
u32 wake_state;
int ip_summed;
} board_info_t;
/* debug code */
#define dm9000_dbg(db, lev, msg...) do { \
if ((lev) < debug) { \
dev_dbg(db->dev, msg); \
} \
} while (0)
static inline board_info_t *to_dm9000_board(struct net_device *dev)
{
return netdev_priv(dev);
}
/* DM9000 network board routine ---------------------------- */
static void
dm9000_reset(board_info_t * db)
{
dev_dbg(db->dev, "resetting device\n");
/* RESET device */
writeb(DM9000_NCR, db->io_addr);
udelay(200);
writeb(NCR_RST, db->io_data);
udelay(200);
}
/*
* Read a byte from I/O port
*/
static u8
ior(board_info_t * db, int reg)
{
writeb(reg, db->io_addr);
return readb(db->io_data);
}
/*
* Write a byte to I/O port
*/
static void
iow(board_info_t * db, int reg, int value)
{
writeb(reg, db->io_addr);
writeb(value, db->io_data);
}
/* routines for sending block to chip */
static void dm9000_outblk_8bit(void __iomem *reg, void *data, int count)
{
writesb(reg, data, count);
}
static void dm9000_outblk_16bit(void __iomem *reg, void *data, int count)
{
writesw(reg, data, (count+1) >> 1);
}
static void dm9000_outblk_32bit(void __iomem *reg, void *data, int count)
{
writesl(reg, data, (count+3) >> 2);
}
/* input block from chip to memory */
static void dm9000_inblk_8bit(void __iomem *reg, void *data, int count)
{
readsb(reg, data, count);
}
static void dm9000_inblk_16bit(void __iomem *reg, void *data, int count)
{
readsw(reg, data, (count+1) >> 1);
}
static void dm9000_inblk_32bit(void __iomem *reg, void *data, int count)
{
readsl(reg, data, (count+3) >> 2);
}
/* dump block from chip to null */
static void dm9000_dumpblk_8bit(void __iomem *reg, int count)
{
int i;
int tmp;
for (i = 0; i < count; i++)
tmp = readb(reg);
}
static void dm9000_dumpblk_16bit(void __iomem *reg, int count)
{
int i;
int tmp;
count = (count + 1) >> 1;
for (i = 0; i < count; i++)
tmp = readw(reg);
}
static void dm9000_dumpblk_32bit(void __iomem *reg, int count)
{
int i;
int tmp;
count = (count + 3) >> 2;
for (i = 0; i < count; i++)
tmp = readl(reg);
}
/* dm9000_set_io
*
* select the specified set of io routines to use with the
* device
*/
static void dm9000_set_io(struct board_info *db, int byte_width)
{
/* use the size of the data resource to work out what IO
* routines we want to use
*/
switch (byte_width) {
case 1:
db->dumpblk = dm9000_dumpblk_8bit;
db->outblk = dm9000_outblk_8bit;
db->inblk = dm9000_inblk_8bit;
break;
case 3:
dev_dbg(db->dev, ": 3 byte IO, falling back to 16bit\n");
case 2:
db->dumpblk = dm9000_dumpblk_16bit;
db->outblk = dm9000_outblk_16bit;
db->inblk = dm9000_inblk_16bit;
break;
case 4:
default:
db->dumpblk = dm9000_dumpblk_32bit;
db->outblk = dm9000_outblk_32bit;
db->inblk = dm9000_inblk_32bit;
break;
}
}
static void dm9000_schedule_poll(board_info_t *db)
{
if (db->type == TYPE_DM9000E)
schedule_delayed_work(&db->phy_poll, HZ * 2);
}
static int dm9000_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
{
board_info_t *dm = to_dm9000_board(dev);
if (!netif_running(dev))
return -EINVAL;
return generic_mii_ioctl(&dm->mii, if_mii(req), cmd, NULL);
}
static unsigned int
dm9000_read_locked(board_info_t *db, int reg)
{
unsigned long flags;
unsigned int ret;
spin_lock_irqsave(&db->lock, flags);
ret = ior(db, reg);
spin_unlock_irqrestore(&db->lock, flags);
return ret;
}
static int dm9000_wait_eeprom(board_info_t *db)
{
unsigned int status;
int timeout = 8; /* wait max 8msec */
/* The DM9000 data sheets say we should be able to
* poll the ERRE bit in EPCR to wait for the EEPROM
* operation. From testing several chips, this bit
* does not seem to work.
*
* We attempt to use the bit, but fall back to the
* timeout (which is why we do not return an error
* on expiry) to say that the EEPROM operation has
* completed.
*/
while (1) {
status = dm9000_read_locked(db, DM9000_EPCR);
if ((status & EPCR_ERRE) == 0)
break;
msleep(1);
if (timeout-- < 0) {
dev_dbg(db->dev, "timeout waiting EEPROM\n");
break;
}
}
return 0;
}
/*
* Read a word data from EEPROM
*/
static void
dm9000_read_eeprom(board_info_t *db, int offset, u8 *to)
{
unsigned long flags;
if (db->flags & DM9000_PLATF_NO_EEPROM) {
to[0] = 0xff;
to[1] = 0xff;
return;
}
mutex_lock(&db->addr_lock);
spin_lock_irqsave(&db->lock, flags);
iow(db, DM9000_EPAR, offset);
iow(db, DM9000_EPCR, EPCR_ERPRR);
spin_unlock_irqrestore(&db->lock, flags);
dm9000_wait_eeprom(db);
/* delay for at-least 150uS */
msleep(1);
spin_lock_irqsave(&db->lock, flags);
iow(db, DM9000_EPCR, 0x0);
to[0] = ior(db, DM9000_EPDRL);
to[1] = ior(db, DM9000_EPDRH);
spin_unlock_irqrestore(&db->lock, flags);
mutex_unlock(&db->addr_lock);
}
/*
* Write a word data to SROM
*/
static void
dm9000_write_eeprom(board_info_t *db, int offset, u8 *data)
{
unsigned long flags;
if (db->flags & DM9000_PLATF_NO_EEPROM)
return;
mutex_lock(&db->addr_lock);
spin_lock_irqsave(&db->lock, flags);
iow(db, DM9000_EPAR, offset);
iow(db, DM9000_EPDRH, data[1]);
iow(db, DM9000_EPDRL, data[0]);
iow(db, DM9000_EPCR, EPCR_WEP | EPCR_ERPRW);
spin_unlock_irqrestore(&db->lock, flags);
dm9000_wait_eeprom(db);
mdelay(1); /* wait at least 150uS to clear */
spin_lock_irqsave(&db->lock, flags);
iow(db, DM9000_EPCR, 0);
spin_unlock_irqrestore(&db->lock, flags);
mutex_unlock(&db->addr_lock);
}
/* ethtool ops */
static void dm9000_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
board_info_t *dm = to_dm9000_board(dev);
strcpy(info->driver, CARDNAME);
strcpy(info->version, DRV_VERSION);
strcpy(info->bus_info, to_platform_device(dm->dev)->name);
}
static u32 dm9000_get_msglevel(struct net_device *dev)
{
board_info_t *dm = to_dm9000_board(dev);
return dm->msg_enable;
}
static void dm9000_set_msglevel(struct net_device *dev, u32 value)
{
board_info_t *dm = to_dm9000_board(dev);
dm->msg_enable = value;
}
static int dm9000_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
board_info_t *dm = to_dm9000_board(dev);
mii_ethtool_gset(&dm->mii, cmd);
return 0;
}
static int dm9000_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
board_info_t *dm = to_dm9000_board(dev);
return mii_ethtool_sset(&dm->mii, cmd);
}
static int dm9000_nway_reset(struct net_device *dev)
{
board_info_t *dm = to_dm9000_board(dev);
return mii_nway_restart(&dm->mii);
}
static int dm9000_set_features(struct net_device *dev,
netdev_features_t features)
{
board_info_t *dm = to_dm9000_board(dev);
netdev_features_t changed = dev->features ^ features;
unsigned long flags;
if (!(changed & NETIF_F_RXCSUM))
return 0;
spin_lock_irqsave(&dm->lock, flags);
iow(dm, DM9000_RCSR, (features & NETIF_F_RXCSUM) ? RCSR_CSUM : 0);
spin_unlock_irqrestore(&dm->lock, flags);
return 0;
}
static u32 dm9000_get_link(struct net_device *dev)
{
board_info_t *dm = to_dm9000_board(dev);
u32 ret;
if (dm->flags & DM9000_PLATF_EXT_PHY)
ret = mii_link_ok(&dm->mii);
else
ret = dm9000_read_locked(dm, DM9000_NSR) & NSR_LINKST ? 1 : 0;
return ret;
}
#define DM_EEPROM_MAGIC (0x444D394B)
static int dm9000_get_eeprom_len(struct net_device *dev)
{
return 128;
}
static int dm9000_get_eeprom(struct net_device *dev,
struct ethtool_eeprom *ee, u8 *data)
{
board_info_t *dm = to_dm9000_board(dev);
int offset = ee->offset;
int len = ee->len;
int i;
/* EEPROM access is aligned to two bytes */
if ((len & 1) != 0 || (offset & 1) != 0)
return -EINVAL;
if (dm->flags & DM9000_PLATF_NO_EEPROM)
return -ENOENT;
ee->magic = DM_EEPROM_MAGIC;
for (i = 0; i < len; i += 2)
dm9000_read_eeprom(dm, (offset + i) / 2, data + i);
return 0;
}
static int dm9000_set_eeprom(struct net_device *dev,
struct ethtool_eeprom *ee, u8 *data)
{
board_info_t *dm = to_dm9000_board(dev);
int offset = ee->offset;
int len = ee->len;
int done;
/* EEPROM access is aligned to two bytes */
if (dm->flags & DM9000_PLATF_NO_EEPROM)
return -ENOENT;
if (ee->magic != DM_EEPROM_MAGIC)
return -EINVAL;
while (len > 0) {
if (len & 1 || offset & 1) {
int which = offset & 1;
u8 tmp[2];
dm9000_read_eeprom(dm, offset / 2, tmp);
tmp[which] = *data;
dm9000_write_eeprom(dm, offset / 2, tmp);
done = 1;
} else {
dm9000_write_eeprom(dm, offset / 2, data);
done = 2;
}
data += done;
offset += done;
len -= done;
}
return 0;
}
static void dm9000_get_wol(struct net_device *dev, struct ethtool_wolinfo *w)
{
board_info_t *dm = to_dm9000_board(dev);
memset(w, 0, sizeof(struct ethtool_wolinfo));
/* note, we could probably support wake-phy too */
w->supported = dm->wake_supported ? WAKE_MAGIC : 0;
w->wolopts = dm->wake_state;
}
static int dm9000_set_wol(struct net_device *dev, struct ethtool_wolinfo *w)
{
board_info_t *dm = to_dm9000_board(dev);
unsigned long flags;
u32 opts = w->wolopts;
u32 wcr = 0;
if (!dm->wake_supported)
return -EOPNOTSUPP;
if (opts & ~WAKE_MAGIC)
return -EINVAL;
if (opts & WAKE_MAGIC)
wcr |= WCR_MAGICEN;
mutex_lock(&dm->addr_lock);
spin_lock_irqsave(&dm->lock, flags);
iow(dm, DM9000_WCR, wcr);
spin_unlock_irqrestore(&dm->lock, flags);
mutex_unlock(&dm->addr_lock);
if (dm->wake_state != opts) {
/* change in wol state, update IRQ state */
if (!dm->wake_state)
irq_set_irq_wake(dm->irq_wake, 1);
else if (dm->wake_state && !opts)
irq_set_irq_wake(dm->irq_wake, 0);
}
dm->wake_state = opts;
return 0;
}
static const struct ethtool_ops dm9000_ethtool_ops = {
.get_drvinfo = dm9000_get_drvinfo,
.get_settings = dm9000_get_settings,
.set_settings = dm9000_set_settings,
.get_msglevel = dm9000_get_msglevel,
.set_msglevel = dm9000_set_msglevel,
.nway_reset = dm9000_nway_reset,
.get_link = dm9000_get_link,
.get_wol = dm9000_get_wol,
.set_wol = dm9000_set_wol,
.get_eeprom_len = dm9000_get_eeprom_len,
.get_eeprom = dm9000_get_eeprom,
.set_eeprom = dm9000_set_eeprom,
};
static void dm9000_show_carrier(board_info_t *db,
unsigned carrier, unsigned nsr)
{
struct net_device *ndev = db->ndev;
unsigned ncr = dm9000_read_locked(db, DM9000_NCR);
if (carrier)
dev_info(db->dev, "%s: link up, %dMbps, %s-duplex, no LPA\n",
ndev->name, (nsr & NSR_SPEED) ? 10 : 100,
(ncr & NCR_FDX) ? "full" : "half");
else
dev_info(db->dev, "%s: link down\n", ndev->name);
}
static void
dm9000_poll_work(struct work_struct *w)
{
struct delayed_work *dw = to_delayed_work(w);
board_info_t *db = container_of(dw, board_info_t, phy_poll);
struct net_device *ndev = db->ndev;
if (db->flags & DM9000_PLATF_SIMPLE_PHY &&
!(db->flags & DM9000_PLATF_EXT_PHY)) {
unsigned nsr = dm9000_read_locked(db, DM9000_NSR);
unsigned old_carrier = netif_carrier_ok(ndev) ? 1 : 0;
unsigned new_carrier;
new_carrier = (nsr & NSR_LINKST) ? 1 : 0;
if (old_carrier != new_carrier) {
if (netif_msg_link(db))
dm9000_show_carrier(db, new_carrier, nsr);
if (!new_carrier)
netif_carrier_off(ndev);
else
netif_carrier_on(ndev);
}
} else
mii_check_media(&db->mii, netif_msg_link(db), 0);
if (netif_running(ndev))
dm9000_schedule_poll(db);
}
/* dm9000_release_board
*
* release a board, and any mapped resources
*/
static void
dm9000_release_board(struct platform_device *pdev, struct board_info *db)
{
/* unmap our resources */
iounmap(db->io_addr);
iounmap(db->io_data);
/* release the resources */
release_resource(db->data_req);
kfree(db->data_req);
release_resource(db->addr_req);
kfree(db->addr_req);
}
static unsigned char dm9000_type_to_char(enum dm9000_type type)
{
switch (type) {
case TYPE_DM9000E: return 'e';
case TYPE_DM9000A: return 'a';
case TYPE_DM9000B: return 'b';
}
return '?';
}
/*
* Set DM9000 multicast address
*/
static void
dm9000_hash_table_unlocked(struct net_device *dev)
{
board_info_t *db = netdev_priv(dev);
struct netdev_hw_addr *ha;
int i, oft;
u32 hash_val;
u16 hash_table[4];
u8 rcr = RCR_DIS_LONG | RCR_DIS_CRC | RCR_RXEN;
dm9000_dbg(db, 1, "entering %s\n", __func__);
for (i = 0, oft = DM9000_PAR; i < 6; i++, oft++)
iow(db, oft, dev->dev_addr[i]);
/* Clear Hash Table */
for (i = 0; i < 4; i++)
hash_table[i] = 0x0;
/* broadcast address */
hash_table[3] = 0x8000;
if (dev->flags & IFF_PROMISC)
rcr |= RCR_PRMSC;
if (dev->flags & IFF_ALLMULTI)
rcr |= RCR_ALL;
/* the multicast address in Hash Table : 64 bits */
netdev_for_each_mc_addr(ha, dev) {
hash_val = ether_crc_le(6, ha->addr) & 0x3f;
hash_table[hash_val / 16] |= (u16) 1 << (hash_val % 16);
}
/* Write the hash table to MAC MD table */
for (i = 0, oft = DM9000_MAR; i < 4; i++) {
iow(db, oft++, hash_table[i]);
iow(db, oft++, hash_table[i] >> 8);
}
iow(db, DM9000_RCR, rcr);
}
static void
dm9000_hash_table(struct net_device *dev)
{
board_info_t *db = netdev_priv(dev);
unsigned long flags;
spin_lock_irqsave(&db->lock, flags);
dm9000_hash_table_unlocked(dev);
spin_unlock_irqrestore(&db->lock, flags);
}
/*
* Initialize dm9000 board
*/
static void
dm9000_init_dm9000(struct net_device *dev)
{
board_info_t *db = netdev_priv(dev);
unsigned int imr;
unsigned int ncr;
dm9000_dbg(db, 1, "entering %s\n", __func__);
/* I/O mode */
db->io_mode = ior(db, DM9000_ISR) >> 6; /* ISR bit7:6 keeps I/O mode */
/* Checksum mode */
if (dev->hw_features & NETIF_F_RXCSUM)
iow(db, DM9000_RCSR,
(dev->features & NETIF_F_RXCSUM) ? RCSR_CSUM : 0);
iow(db, DM9000_GPCR, GPCR_GEP_CNTL); /* Let GPIO0 output */
ncr = (db->flags & DM9000_PLATF_EXT_PHY) ? NCR_EXT_PHY : 0;
/* if wol is needed, then always set NCR_WAKEEN otherwise we end
* up dumping the wake events if we disable this. There is already
* a wake-mask in DM9000_WCR */
if (db->wake_supported)
ncr |= NCR_WAKEEN;
iow(db, DM9000_NCR, ncr);
/* Program operating register */
iow(db, DM9000_TCR, 0); /* TX Polling clear */
iow(db, DM9000_BPTR, 0x3f); /* Less 3Kb, 200us */
iow(db, DM9000_FCR, 0xff); /* Flow Control */
iow(db, DM9000_SMCR, 0); /* Special Mode */
/* clear TX status */
iow(db, DM9000_NSR, NSR_WAKEST | NSR_TX2END | NSR_TX1END);
iow(db, DM9000_ISR, ISR_CLR_STATUS); /* Clear interrupt status */
/* Set address filter table */
dm9000_hash_table_unlocked(dev);
imr = IMR_PAR | IMR_PTM | IMR_PRM;
if (db->type != TYPE_DM9000E)
imr |= IMR_LNKCHNG;
db->imr_all = imr;
/* Enable TX/RX interrupt mask */
iow(db, DM9000_IMR, imr);
/* Init Driver variable */
db->tx_pkt_cnt = 0;
db->queue_pkt_len = 0;
dev->trans_start = jiffies;
}
/* Our watchdog timed out. Called by the networking layer */
static void dm9000_timeout(struct net_device *dev)
{
board_info_t *db = netdev_priv(dev);
u8 reg_save;
unsigned long flags;
/* Save previous register address */
spin_lock_irqsave(&db->lock, flags);
reg_save = readb(db->io_addr);
netif_stop_queue(dev);
dm9000_reset(db);
dm9000_init_dm9000(dev);
/* We can accept TX packets again */
dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
/* Restore previous register address */
writeb(reg_save, db->io_addr);
spin_unlock_irqrestore(&db->lock, flags);
}
static void dm9000_send_packet(struct net_device *dev,
int ip_summed,
u16 pkt_len)
{
board_info_t *dm = to_dm9000_board(dev);
/* The DM9000 is not smart enough to leave fragmented packets alone. */
if (dm->ip_summed != ip_summed) {
if (ip_summed == CHECKSUM_NONE)
iow(dm, DM9000_TCCR, 0);
else
iow(dm, DM9000_TCCR, TCCR_IP | TCCR_UDP | TCCR_TCP);
dm->ip_summed = ip_summed;
}
/* Set TX length to DM9000 */
iow(dm, DM9000_TXPLL, pkt_len);
iow(dm, DM9000_TXPLH, pkt_len >> 8);
/* Issue TX polling command */
iow(dm, DM9000_TCR, TCR_TXREQ); /* Cleared after TX complete */
}
/*
* Hardware start transmission.
* Send a packet to media from the upper layer.
*/
static int
dm9000_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
unsigned long flags;
board_info_t *db = netdev_priv(dev);
dm9000_dbg(db, 3, "%s:\n", __func__);
if (db->tx_pkt_cnt > 1)
return NETDEV_TX_BUSY;
spin_lock_irqsave(&db->lock, flags);
/* Move data to DM9000 TX RAM */
writeb(DM9000_MWCMD, db->io_addr);
(db->outblk)(db->io_data, skb->data, skb->len);
dev->stats.tx_bytes += skb->len;
db->tx_pkt_cnt++;
/* TX control: First packet immediately send, second packet queue */
if (db->tx_pkt_cnt == 1) {
dm9000_send_packet(dev, skb->ip_summed, skb->len);
} else {
/* Second packet */
db->queue_pkt_len = skb->len;
db->queue_ip_summed = skb->ip_summed;
netif_stop_queue(dev);
}
spin_unlock_irqrestore(&db->lock, flags);
/* free this SKB */
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
/*
* DM9000 interrupt handler
* receive the packet to upper layer, free the transmitted packet
*/
static void dm9000_tx_done(struct net_device *dev, board_info_t *db)
{
int tx_status = ior(db, DM9000_NSR); /* Got TX status */
if (tx_status & (NSR_TX2END | NSR_TX1END)) {
/* One packet sent complete */
db->tx_pkt_cnt--;
dev->stats.tx_packets++;
if (netif_msg_tx_done(db))
dev_dbg(db->dev, "tx done, NSR %02x\n", tx_status);
/* Queue packet check & send */
if (db->tx_pkt_cnt > 0)
dm9000_send_packet(dev, db->queue_ip_summed,
db->queue_pkt_len);
netif_wake_queue(dev);
}
}
struct dm9000_rxhdr {
u8 RxPktReady;
u8 RxStatus;
__le16 RxLen;
} __packed;
/*
* Received a packet and pass to upper layer
*/
static void
dm9000_rx(struct net_device *dev)
{
board_info_t *db = netdev_priv(dev);
struct dm9000_rxhdr rxhdr;
struct sk_buff *skb;
u8 rxbyte, *rdptr;
bool GoodPacket;
int RxLen;
/* Check packet ready or not */
do {
ior(db, DM9000_MRCMDX); /* Dummy read */
/* Get most updated data */
rxbyte = readb(db->io_data);
/* Status check: this byte must be 0 or 1 */
if (rxbyte & DM9000_PKT_ERR) {
dev_warn(db->dev, "status check fail: %d\n", rxbyte);
iow(db, DM9000_RCR, 0x00); /* Stop Device */
iow(db, DM9000_ISR, IMR_PAR); /* Stop INT request */
return;
}
if (!(rxbyte & DM9000_PKT_RDY))
return;
/* A packet ready now & Get status/length */
GoodPacket = true;
writeb(DM9000_MRCMD, db->io_addr);
(db->inblk)(db->io_data, &rxhdr, sizeof(rxhdr));
RxLen = le16_to_cpu(rxhdr.RxLen);
if (netif_msg_rx_status(db))
dev_dbg(db->dev, "RX: status %02x, length %04x\n",
rxhdr.RxStatus, RxLen);
/* Packet Status check */
if (RxLen < 0x40) {
GoodPacket = false;
if (netif_msg_rx_err(db))
dev_dbg(db->dev, "RX: Bad Packet (runt)\n");
}
if (RxLen > DM9000_PKT_MAX) {
dev_dbg(db->dev, "RST: RX Len:%x\n", RxLen);
}
/* rxhdr.RxStatus is identical to RSR register. */
if (rxhdr.RxStatus & (RSR_FOE | RSR_CE | RSR_AE |
RSR_PLE | RSR_RWTO |
RSR_LCS | RSR_RF)) {
GoodPacket = false;
if (rxhdr.RxStatus & RSR_FOE) {
if (netif_msg_rx_err(db))
dev_dbg(db->dev, "fifo error\n");
dev->stats.rx_fifo_errors++;
}
if (rxhdr.RxStatus & RSR_CE) {
if (netif_msg_rx_err(db))
dev_dbg(db->dev, "crc error\n");
dev->stats.rx_crc_errors++;
}
if (rxhdr.RxStatus & RSR_RF) {
if (netif_msg_rx_err(db))
dev_dbg(db->dev, "length error\n");
dev->stats.rx_length_errors++;
}
}
/* Move data from DM9000 */
if (GoodPacket &&
((skb = netdev_alloc_skb(dev, RxLen + 4)) != NULL)) {
skb_reserve(skb, 2);
rdptr = (u8 *) skb_put(skb, RxLen - 4);
/* Read received packet from RX SRAM */
(db->inblk)(db->io_data, rdptr, RxLen);
dev->stats.rx_bytes += RxLen;
/* Pass to upper layer */
skb->protocol = eth_type_trans(skb, dev);
if (dev->features & NETIF_F_RXCSUM) {
if ((((rxbyte & 0x1c) << 3) & rxbyte) == 0)
skb->ip_summed = CHECKSUM_UNNECESSARY;
else
skb_checksum_none_assert(skb);
}
netif_rx(skb);
dev->stats.rx_packets++;
} else {
/* need to dump the packet's data */
(db->dumpblk)(db->io_data, RxLen);
}
} while (rxbyte & DM9000_PKT_RDY);
}
static irqreturn_t dm9000_interrupt(int irq, void *dev_id)
{
struct net_device *dev = dev_id;
board_info_t *db = netdev_priv(dev);
int int_status;
unsigned long flags;
u8 reg_save;
dm9000_dbg(db, 3, "entering %s\n", __func__);
/* A real interrupt coming */
/* holders of db->lock must always block IRQs */
spin_lock_irqsave(&db->lock, flags);
/* Save previous register address */
reg_save = readb(db->io_addr);
/* Disable all interrupts */
iow(db, DM9000_IMR, IMR_PAR);
/* Got DM9000 interrupt status */
int_status = ior(db, DM9000_ISR); /* Got ISR */
iow(db, DM9000_ISR, int_status); /* Clear ISR status */
if (netif_msg_intr(db))
dev_dbg(db->dev, "interrupt status %02x\n", int_status);
/* Received the coming packet */
if (int_status & ISR_PRS)
dm9000_rx(dev);
/* Trnasmit Interrupt check */
if (int_status & ISR_PTS)
dm9000_tx_done(dev, db);
if (db->type != TYPE_DM9000E) {
if (int_status & ISR_LNKCHNG) {
/* fire a link-change request */
schedule_delayed_work(&db->phy_poll, 1);
}
}
/* Re-enable interrupt mask */
iow(db, DM9000_IMR, db->imr_all);
/* Restore previous register address */
writeb(reg_save, db->io_addr);
spin_unlock_irqrestore(&db->lock, flags);
return IRQ_HANDLED;
}
static irqreturn_t dm9000_wol_interrupt(int irq, void *dev_id)
{
struct net_device *dev = dev_id;
board_info_t *db = netdev_priv(dev);
unsigned long flags;
unsigned nsr, wcr;
spin_lock_irqsave(&db->lock, flags);
nsr = ior(db, DM9000_NSR);
wcr = ior(db, DM9000_WCR);
dev_dbg(db->dev, "%s: NSR=0x%02x, WCR=0x%02x\n", __func__, nsr, wcr);
if (nsr & NSR_WAKEST) {
/* clear, so we can avoid */
iow(db, DM9000_NSR, NSR_WAKEST);
if (wcr & WCR_LINKST)
dev_info(db->dev, "wake by link status change\n");
if (wcr & WCR_SAMPLEST)
dev_info(db->dev, "wake by sample packet\n");
if (wcr & WCR_MAGICST )
dev_info(db->dev, "wake by magic packet\n");
if (!(wcr & (WCR_LINKST | WCR_SAMPLEST | WCR_MAGICST)))
dev_err(db->dev, "wake signalled with no reason? "
"NSR=0x%02x, WSR=0x%02x\n", nsr, wcr);
}
spin_unlock_irqrestore(&db->lock, flags);
return (nsr & NSR_WAKEST) ? IRQ_HANDLED : IRQ_NONE;
}
#ifdef CONFIG_NET_POLL_CONTROLLER
/*
*Used by netconsole
*/
static void dm9000_poll_controller(struct net_device *dev)
{
disable_irq(dev->irq);
dm9000_interrupt(dev->irq, dev);
enable_irq(dev->irq);
}
#endif
/*
* Open the interface.
* The interface is opened whenever "ifconfig" actives it.
*/
static int
dm9000_open(struct net_device *dev)
{
board_info_t *db = netdev_priv(dev);
unsigned long irqflags = db->irq_res->flags & IRQF_TRIGGER_MASK;
if (netif_msg_ifup(db))
dev_dbg(db->dev, "enabling %s\n", dev->name);
/* If there is no IRQ type specified, default to something that
* may work, and tell the user that this is a problem */
if (irqflags == IRQF_TRIGGER_NONE)
dev_warn(db->dev, "WARNING: no IRQ resource flags set.\n");
irqflags |= IRQF_SHARED;
/* GPIO0 on pre-activate PHY, Reg 1F is not set by reset */
iow(db, DM9000_GPR, 0); /* REG_1F bit0 activate phyxcer */
mdelay(1); /* delay needs by DM9000B */
/* Initialize DM9000 board */
dm9000_reset(db);
dm9000_init_dm9000(dev);
if (request_irq(dev->irq, dm9000_interrupt, irqflags, dev->name, dev))
return -EAGAIN;
/* Init driver variable */
db->dbug_cnt = 0;
mii_check_media(&db->mii, netif_msg_link(db), 1);
netif_start_queue(dev);
dm9000_schedule_poll(db);
return 0;
}
/*
* Sleep, either by using msleep() or if we are suspending, then
* use mdelay() to sleep.
*/
static void dm9000_msleep(board_info_t *db, unsigned int ms)
{
if (db->in_suspend)
mdelay(ms);
else
msleep(ms);
}
/*
* Read a word from phyxcer
*/
static int
dm9000_phy_read(struct net_device *dev, int phy_reg_unused, int reg)
{
board_info_t *db = netdev_priv(dev);
unsigned long flags;
unsigned int reg_save;
int ret;
mutex_lock(&db->addr_lock);
spin_lock_irqsave(&db->lock,flags);
/* Save previous register address */
reg_save = readb(db->io_addr);
/* Fill the phyxcer register into REG_0C */
iow(db, DM9000_EPAR, DM9000_PHY | reg);
iow(db, DM9000_EPCR, EPCR_ERPRR | EPCR_EPOS); /* Issue phyxcer read command */
writeb(reg_save, db->io_addr);
spin_unlock_irqrestore(&db->lock,flags);
dm9000_msleep(db, 1); /* Wait read complete */
spin_lock_irqsave(&db->lock,flags);
reg_save = readb(db->io_addr);
iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer read command */
/* The read data keeps on REG_0D & REG_0E */
ret = (ior(db, DM9000_EPDRH) << 8) | ior(db, DM9000_EPDRL);
/* restore the previous address */
writeb(reg_save, db->io_addr);
spin_unlock_irqrestore(&db->lock,flags);
mutex_unlock(&db->addr_lock);
dm9000_dbg(db, 5, "phy_read[%02x] -> %04x\n", reg, ret);
return ret;
}
/*
* Write a word to phyxcer
*/
static void
dm9000_phy_write(struct net_device *dev,
int phyaddr_unused, int reg, int value)
{
board_info_t *db = netdev_priv(dev);
unsigned long flags;
unsigned long reg_save;
dm9000_dbg(db, 5, "phy_write[%02x] = %04x\n", reg, value);
mutex_lock(&db->addr_lock);
spin_lock_irqsave(&db->lock,flags);
/* Save previous register address */
reg_save = readb(db->io_addr);
/* Fill the phyxcer register into REG_0C */
iow(db, DM9000_EPAR, DM9000_PHY | reg);
/* Fill the written data into REG_0D & REG_0E */
iow(db, DM9000_EPDRL, value);
iow(db, DM9000_EPDRH, value >> 8);
iow(db, DM9000_EPCR, EPCR_EPOS | EPCR_ERPRW); /* Issue phyxcer write command */
writeb(reg_save, db->io_addr);
spin_unlock_irqrestore(&db->lock, flags);
dm9000_msleep(db, 1); /* Wait write complete */
spin_lock_irqsave(&db->lock,flags);
reg_save = readb(db->io_addr);
iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer write command */
/* restore the previous address */
writeb(reg_save, db->io_addr);
spin_unlock_irqrestore(&db->lock, flags);
mutex_unlock(&db->addr_lock);
}
static void
dm9000_shutdown(struct net_device *dev)
{
board_info_t *db = netdev_priv(dev);
/* RESET device */
dm9000_phy_write(dev, 0, MII_BMCR, BMCR_RESET); /* PHY RESET */
iow(db, DM9000_GPR, 0x01); /* Power-Down PHY */
iow(db, DM9000_IMR, IMR_PAR); /* Disable all interrupt */
iow(db, DM9000_RCR, 0x00); /* Disable RX */
}
/*
* Stop the interface.
* The interface is stopped when it is brought.
*/
static int
dm9000_stop(struct net_device *ndev)
{
board_info_t *db = netdev_priv(ndev);
if (netif_msg_ifdown(db))
dev_dbg(db->dev, "shutting down %s\n", ndev->name);
cancel_delayed_work_sync(&db->phy_poll);
netif_stop_queue(ndev);
netif_carrier_off(ndev);
/* free interrupt */
free_irq(ndev->irq, ndev);
dm9000_shutdown(ndev);
return 0;
}
static const struct net_device_ops dm9000_netdev_ops = {
.ndo_open = dm9000_open,
.ndo_stop = dm9000_stop,
.ndo_start_xmit = dm9000_start_xmit,
.ndo_tx_timeout = dm9000_timeout,
.ndo_set_rx_mode = dm9000_hash_table,
.ndo_do_ioctl = dm9000_ioctl,
.ndo_change_mtu = eth_change_mtu,
.ndo_set_features = dm9000_set_features,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = dm9000_poll_controller,
#endif
};
/*
* Search DM9000 board, allocate space and register it
*/
static int __devinit
dm9000_probe(struct platform_device *pdev)
{
struct dm9000_plat_data *pdata = pdev->dev.platform_data;
struct board_info *db; /* Point a board information structure */
struct net_device *ndev;
const unsigned char *mac_src;
int ret = 0;
int iosize;
int i;
u32 id_val;
/* Init network device */
ndev = alloc_etherdev(sizeof(struct board_info));
if (!ndev)
return -ENOMEM;
SET_NETDEV_DEV(ndev, &pdev->dev);
dev_dbg(&pdev->dev, "dm9000_probe()\n");
/* setup board info structure */
db = netdev_priv(ndev);
db->dev = &pdev->dev;
db->ndev = ndev;
spin_lock_init(&db->lock);
mutex_init(&db->addr_lock);
INIT_DELAYED_WORK(&db->phy_poll, dm9000_poll_work);
db->addr_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
db->data_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
db->irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (db->addr_res == NULL || db->data_res == NULL ||
db->irq_res == NULL) {
dev_err(db->dev, "insufficient resources\n");
ret = -ENOENT;
goto out;
}
db->irq_wake = platform_get_irq(pdev, 1);
if (db->irq_wake >= 0) {
dev_dbg(db->dev, "wakeup irq %d\n", db->irq_wake);
ret = request_irq(db->irq_wake, dm9000_wol_interrupt,
IRQF_SHARED, dev_name(db->dev), ndev);
if (ret) {
dev_err(db->dev, "cannot get wakeup irq (%d)\n", ret);
} else {
/* test to see if irq is really wakeup capable */
ret = irq_set_irq_wake(db->irq_wake, 1);
if (ret) {
dev_err(db->dev, "irq %d cannot set wakeup (%d)\n",
db->irq_wake, ret);
ret = 0;
} else {
irq_set_irq_wake(db->irq_wake, 0);
db->wake_supported = 1;
}
}
}
iosize = resource_size(db->addr_res);
db->addr_req = request_mem_region(db->addr_res->start, iosize,
pdev->name);
if (db->addr_req == NULL) {
dev_err(db->dev, "cannot claim address reg area\n");
ret = -EIO;
goto out;
}
db->io_addr = ioremap(db->addr_res->start, iosize);
if (db->io_addr == NULL) {
dev_err(db->dev, "failed to ioremap address reg\n");
ret = -EINVAL;
goto out;
}
iosize = resource_size(db->data_res);
db->data_req = request_mem_region(db->data_res->start, iosize,
pdev->name);
if (db->data_req == NULL) {
dev_err(db->dev, "cannot claim data reg area\n");
ret = -EIO;
goto out;
}
db->io_data = ioremap(db->data_res->start, iosize);
if (db->io_data == NULL) {
dev_err(db->dev, "failed to ioremap data reg\n");
ret = -EINVAL;
goto out;
}
/* fill in parameters for net-dev structure */
ndev->base_addr = (unsigned long)db->io_addr;
ndev->irq = db->irq_res->start;
/* ensure at least we have a default set of IO routines */
dm9000_set_io(db, iosize);
/* check to see if anything is being over-ridden */
if (pdata != NULL) {
/* check to see if the driver wants to over-ride the
* default IO width */
if (pdata->flags & DM9000_PLATF_8BITONLY)
dm9000_set_io(db, 1);
if (pdata->flags & DM9000_PLATF_16BITONLY)
dm9000_set_io(db, 2);
if (pdata->flags & DM9000_PLATF_32BITONLY)
dm9000_set_io(db, 4);
/* check to see if there are any IO routine
* over-rides */
if (pdata->inblk != NULL)
db->inblk = pdata->inblk;
if (pdata->outblk != NULL)
db->outblk = pdata->outblk;
if (pdata->dumpblk != NULL)
db->dumpblk = pdata->dumpblk;
db->flags = pdata->flags;
}
#ifdef CONFIG_DM9000_FORCE_SIMPLE_PHY_POLL
db->flags |= DM9000_PLATF_SIMPLE_PHY;
#endif
dm9000_reset(db);
/* try multiple times, DM9000 sometimes gets the read wrong */
for (i = 0; i < 8; i++) {
id_val = ior(db, DM9000_VIDL);
id_val |= (u32)ior(db, DM9000_VIDH) << 8;
id_val |= (u32)ior(db, DM9000_PIDL) << 16;
id_val |= (u32)ior(db, DM9000_PIDH) << 24;
if (id_val == DM9000_ID)
break;
dev_err(db->dev, "read wrong id 0x%08x\n", id_val);
}
if (id_val != DM9000_ID) {
dev_err(db->dev, "wrong id: 0x%08x\n", id_val);
ret = -ENODEV;
goto out;
}
/* Identify what type of DM9000 we are working on */
id_val = ior(db, DM9000_CHIPR);
dev_dbg(db->dev, "dm9000 revision 0x%02x\n", id_val);
switch (id_val) {
case CHIPR_DM9000A:
db->type = TYPE_DM9000A;
break;
case CHIPR_DM9000B:
db->type = TYPE_DM9000B;
break;
default:
dev_dbg(db->dev, "ID %02x => defaulting to DM9000E\n", id_val);
db->type = TYPE_DM9000E;
}
/* dm9000a/b are capable of hardware checksum offload */
if (db->type == TYPE_DM9000A || db->type == TYPE_DM9000B) {
ndev->hw_features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM;
ndev->features |= ndev->hw_features;
}
/* from this point we assume that we have found a DM9000 */
/* driver system function */
ether_setup(ndev);
ndev->netdev_ops = &dm9000_netdev_ops;
ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
ndev->ethtool_ops = &dm9000_ethtool_ops;
db->msg_enable = NETIF_MSG_LINK;
db->mii.phy_id_mask = 0x1f;
db->mii.reg_num_mask = 0x1f;
db->mii.force_media = 0;
db->mii.full_duplex = 0;
db->mii.dev = ndev;
db->mii.mdio_read = dm9000_phy_read;
db->mii.mdio_write = dm9000_phy_write;
mac_src = "eeprom";
/* try reading the node address from the attached EEPROM */
for (i = 0; i < 6; i += 2)
dm9000_read_eeprom(db, i / 2, ndev->dev_addr+i);
if (!is_valid_ether_addr(ndev->dev_addr) && pdata != NULL) {
mac_src = "platform data";
memcpy(ndev->dev_addr, pdata->dev_addr, 6);
}
if (!is_valid_ether_addr(ndev->dev_addr)) {
/* try reading from mac */
mac_src = "chip";
for (i = 0; i < 6; i++)
ndev->dev_addr[i] = ior(db, i+DM9000_PAR);
}
if (!is_valid_ether_addr(ndev->dev_addr)) {
dev_warn(db->dev, "%s: Invalid ethernet MAC address. Please "
"set using ifconfig\n", ndev->name);
eth_hw_addr_random(ndev);
mac_src = "random";
}
platform_set_drvdata(pdev, ndev);
ret = register_netdev(ndev);
if (ret == 0)
printk(KERN_INFO "%s: dm9000%c at %p,%p IRQ %d MAC: %pM (%s)\n",
ndev->name, dm9000_type_to_char(db->type),
db->io_addr, db->io_data, ndev->irq,
ndev->dev_addr, mac_src);
return 0;
out:
dev_err(db->dev, "not found (%d).\n", ret);
dm9000_release_board(pdev, db);
free_netdev(ndev);
return ret;
}
static int
dm9000_drv_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct net_device *ndev = platform_get_drvdata(pdev);
board_info_t *db;
if (ndev) {
db = netdev_priv(ndev);
db->in_suspend = 1;
if (!netif_running(ndev))
return 0;
netif_device_detach(ndev);
/* only shutdown if not using WoL */
if (!db->wake_state)
dm9000_shutdown(ndev);
}
return 0;
}
static int
dm9000_drv_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct net_device *ndev = platform_get_drvdata(pdev);
board_info_t *db = netdev_priv(ndev);
if (ndev) {
if (netif_running(ndev)) {
/* reset if we were not in wake mode to ensure if
* the device was powered off it is in a known state */
if (!db->wake_state) {
dm9000_reset(db);
dm9000_init_dm9000(ndev);
}
netif_device_attach(ndev);
}
db->in_suspend = 0;
}
return 0;
}
static const struct dev_pm_ops dm9000_drv_pm_ops = {
.suspend = dm9000_drv_suspend,
.resume = dm9000_drv_resume,
};
static int __devexit
dm9000_drv_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
platform_set_drvdata(pdev, NULL);
unregister_netdev(ndev);
dm9000_release_board(pdev, netdev_priv(ndev));
free_netdev(ndev); /* free device structure */
dev_dbg(&pdev->dev, "released and freed device\n");
return 0;
}
static struct platform_driver dm9000_driver = {
.driver = {
.name = "dm9000",
.owner = THIS_MODULE,
.pm = &dm9000_drv_pm_ops,
},
.probe = dm9000_probe,
.remove = __devexit_p(dm9000_drv_remove),
};
static int __init
dm9000_init(void)
{
printk(KERN_INFO "%s Ethernet Driver, V%s\n", CARDNAME, DRV_VERSION);
return platform_driver_register(&dm9000_driver);
}
static void __exit
dm9000_cleanup(void)
{
platform_driver_unregister(&dm9000_driver);
}
module_init(dm9000_init);
module_exit(dm9000_cleanup);
MODULE_AUTHOR("Sascha Hauer, Ben Dooks");
MODULE_DESCRIPTION("Davicom DM9000 network driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:dm9000");
| gpl-2.0 |
MoKee/android_kernel_zte_x9180 | drivers/input/joystick/tdisc_vtd518_shinetsu.c | 3495 | 13818 | /* Copyright (c) 2010, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/workqueue.h>
#include <linux/input.h>
#include <linux/delay.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/gpio.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/input/tdisc_shinetsu.h>
#if defined(CONFIG_HAS_EARLYSUSPEND)
#include <linux/earlysuspend.h>
/* Early-suspend level */
#define TDISC_SUSPEND_LEVEL 1
#endif
MODULE_LICENSE("GPL v2");
MODULE_VERSION("0.1");
MODULE_DESCRIPTION("Shinetsu Touchdisc driver");
MODULE_ALIAS("platform:tdisc-shinetsu");
#define TDSIC_BLK_READ_CMD 0x00
#define TDISC_READ_DELAY msecs_to_jiffies(25)
#define X_MAX (32)
#define X_MIN (-32)
#define Y_MAX (32)
#define Y_MIN (-32)
#define PRESSURE_MAX (32)
#define PRESSURE_MIN (0)
#define TDISC_USER_ACTIVE_MASK 0x40
#define TDISC_NORTH_SWITCH_MASK 0x20
#define TDISC_SOUTH_SWITCH_MASK 0x10
#define TDISC_EAST_SWITCH_MASK 0x08
#define TDISC_WEST_SWITCH_MASK 0x04
#define TDISC_CENTER_SWITCH 0x01
#define TDISC_BUTTON_PRESS_MASK 0x3F
#define DRIVER_NAME "tdisc-shinetsu"
#define DEVICE_NAME "vtd518"
#define TDISC_NAME "tdisc_shinetsu"
#define TDISC_INT "tdisc_interrupt"
struct tdisc_data {
struct input_dev *tdisc_device;
struct i2c_client *clientp;
struct tdisc_platform_data *pdata;
struct delayed_work tdisc_work;
#if defined(CONFIG_HAS_EARLYSUSPEND)
struct early_suspend tdisc_early_suspend;
#endif
};
static void process_tdisc_data(struct tdisc_data *dd, u8 *data)
{
int i;
static bool button_press;
s8 x, y;
/* Check if the user is actively navigating */
if (!(data[7] & TDISC_USER_ACTIVE_MASK)) {
pr_debug(" TDISC ! No Data to report ! False positive \n");
return;
}
for (i = 0; i < 8 ; i++)
pr_debug(" Data[%d] = %x\n", i, data[i]);
/* Check if there is a button press */
if (dd->pdata->tdisc_report_keys)
if (data[7] & TDISC_BUTTON_PRESS_MASK || button_press == true) {
input_report_key(dd->tdisc_device, KEY_UP,
(data[7] & TDISC_NORTH_SWITCH_MASK));
input_report_key(dd->tdisc_device, KEY_DOWN,
(data[7] & TDISC_SOUTH_SWITCH_MASK));
input_report_key(dd->tdisc_device, KEY_RIGHT,
(data[7] & TDISC_EAST_SWITCH_MASK));
input_report_key(dd->tdisc_device, KEY_LEFT,
(data[7] & TDISC_WEST_SWITCH_MASK));
input_report_key(dd->tdisc_device, KEY_ENTER,
(data[7] & TDISC_CENTER_SWITCH));
if (data[7] & TDISC_BUTTON_PRESS_MASK)
button_press = true;
else
button_press = false;
}
if (dd->pdata->tdisc_report_relative) {
/* Report relative motion values */
x = (s8) data[0];
y = (s8) data[1];
if (dd->pdata->tdisc_reverse_x)
x *= -1;
if (dd->pdata->tdisc_reverse_y)
y *= -1;
input_report_rel(dd->tdisc_device, REL_X, x);
input_report_rel(dd->tdisc_device, REL_Y, y);
}
if (dd->pdata->tdisc_report_absolute) {
input_report_abs(dd->tdisc_device, ABS_X, data[2]);
input_report_abs(dd->tdisc_device, ABS_Y, data[3]);
input_report_abs(dd->tdisc_device, ABS_PRESSURE, data[4]);
}
if (dd->pdata->tdisc_report_wheel)
input_report_rel(dd->tdisc_device, REL_WHEEL, (s8) data[6]);
input_sync(dd->tdisc_device);
}
static void tdisc_work_f(struct work_struct *work)
{
int rc;
u8 data[8];
struct tdisc_data *dd =
container_of(work, struct tdisc_data, tdisc_work.work);
/*
* Read the value of the interrupt pin. If low, perform
* an I2C read of 8 bytes to get the touch values and then
* reschedule the work after 25ms. If pin is high, exit
* and wait for next interrupt.
*/
rc = gpio_get_value_cansleep(dd->pdata->tdisc_gpio);
if (rc < 0) {
rc = pm_runtime_put_sync(&dd->clientp->dev);
if (rc < 0)
dev_dbg(&dd->clientp->dev, "%s: pm_runtime_put_sync"
" failed\n", __func__);
enable_irq(dd->clientp->irq);
return;
}
pr_debug("%s: TDISC gpio_get_value = %d\n", __func__, rc);
if (rc == 0) {
/* We have data to read */
rc = i2c_smbus_read_i2c_block_data(dd->clientp,
TDSIC_BLK_READ_CMD, 8, data);
if (rc < 0) {
pr_debug("%s:I2C read failed,trying again\n", __func__);
rc = i2c_smbus_read_i2c_block_data(dd->clientp,
TDSIC_BLK_READ_CMD, 8, data);
if (rc < 0) {
pr_err("%s:I2C read failed again, exiting\n",
__func__);
goto fail_i2c_read;
}
}
pr_debug("%s: TDISC: I2C read success\n", __func__);
process_tdisc_data(dd, data);
} else {
/*
* We have no data to read.
* Enable the IRQ to receive further interrupts.
*/
enable_irq(dd->clientp->irq);
rc = pm_runtime_put_sync(&dd->clientp->dev);
if (rc < 0)
dev_dbg(&dd->clientp->dev, "%s: pm_runtime_put_sync"
" failed\n", __func__);
return;
}
fail_i2c_read:
schedule_delayed_work(&dd->tdisc_work, TDISC_READ_DELAY);
}
static irqreturn_t tdisc_interrupt(int irq, void *dev_id)
{
/*
* The touch disc intially generates an interrupt on any
* touch. The interrupt line is pulled low and remains low
* untill there are touch operations being performed. In case
* there are no further touch operations, the line goes high. The
* same process repeats again the next time,when the disc is touched.
*
* We do the following operations once we receive an interrupt.
* 1. Disable the IRQ for any further interrutps.
* 2. Schedule work every 25ms if the GPIO is still low.
* 3. In the work queue do a I2C read to get the touch data.
* 4. If the GPIO is pulled high, enable the IRQ and cancel the work.
*/
struct tdisc_data *dd = dev_id;
int rc;
rc = pm_runtime_get(&dd->clientp->dev);
if (rc < 0)
dev_dbg(&dd->clientp->dev, "%s: pm_runtime_get"
" failed\n", __func__);
pr_debug("%s: TDISC IRQ ! :-)\n", __func__);
/* Schedule the work immediately */
disable_irq_nosync(dd->clientp->irq);
schedule_delayed_work(&dd->tdisc_work, 0);
return IRQ_HANDLED;
}
static int tdisc_open(struct input_dev *dev)
{
int rc;
struct tdisc_data *dd = input_get_drvdata(dev);
if (!dd->clientp) {
/* Check if a valid i2c client is present */
pr_err("%s: no i2c adapter present \n", __func__);
return -ENODEV;
}
/* Enable the device */
if (dd->pdata->tdisc_enable != NULL) {
rc = dd->pdata->tdisc_enable();
if (rc)
goto fail_open;
}
rc = request_any_context_irq(dd->clientp->irq, tdisc_interrupt,
IRQF_TRIGGER_FALLING, TDISC_INT, dd);
if (rc < 0) {
pr_err("%s: request IRQ failed\n", __func__);
goto fail_irq_open;
}
return 0;
fail_irq_open:
if (dd->pdata->tdisc_disable != NULL)
dd->pdata->tdisc_disable();
fail_open:
return rc;
}
static void tdisc_close(struct input_dev *dev)
{
struct tdisc_data *dd = input_get_drvdata(dev);
free_irq(dd->clientp->irq, dd);
cancel_delayed_work_sync(&dd->tdisc_work);
if (dd->pdata->tdisc_disable != NULL)
dd->pdata->tdisc_disable();
}
static int __devexit tdisc_remove(struct i2c_client *client)
{
struct tdisc_data *dd;
pm_runtime_disable(&client->dev);
dd = i2c_get_clientdata(client);
#ifdef CONFIG_HAS_EARLYSUSPEND
unregister_early_suspend(&dd->tdisc_early_suspend);
#endif
input_unregister_device(dd->tdisc_device);
if (dd->pdata->tdisc_release != NULL)
dd->pdata->tdisc_release();
i2c_set_clientdata(client, NULL);
kfree(dd);
return 0;
}
#ifdef CONFIG_PM
static int tdisc_suspend(struct device *dev)
{
int rc;
struct tdisc_data *dd;
dd = dev_get_drvdata(dev);
if (device_may_wakeup(&dd->clientp->dev))
enable_irq_wake(dd->clientp->irq);
else {
disable_irq(dd->clientp->irq);
if (cancel_delayed_work_sync(&dd->tdisc_work))
enable_irq(dd->clientp->irq);
if (dd->pdata->tdisc_disable) {
rc = dd->pdata->tdisc_disable();
if (rc) {
pr_err("%s: Suspend failed\n", __func__);
return rc;
}
}
}
return 0;
}
static int tdisc_resume(struct device *dev)
{
int rc;
struct tdisc_data *dd;
dd = dev_get_drvdata(dev);
if (device_may_wakeup(&dd->clientp->dev))
disable_irq_wake(dd->clientp->irq);
else {
if (dd->pdata->tdisc_enable) {
rc = dd->pdata->tdisc_enable();
if (rc) {
pr_err("%s: Resume failed\n", __func__);
return rc;
}
}
enable_irq(dd->clientp->irq);
}
return 0;
}
#ifdef CONFIG_HAS_EARLYSUSPEND
static void tdisc_early_suspend(struct early_suspend *h)
{
struct tdisc_data *dd = container_of(h, struct tdisc_data,
tdisc_early_suspend);
tdisc_suspend(&dd->clientp->dev);
}
static void tdisc_late_resume(struct early_suspend *h)
{
struct tdisc_data *dd = container_of(h, struct tdisc_data,
tdisc_early_suspend);
tdisc_resume(&dd->clientp->dev);
}
#endif
static struct dev_pm_ops tdisc_pm_ops = {
#ifndef CONFIG_HAS_EARLYSUSPEND
.suspend = tdisc_suspend,
.resume = tdisc_resume,
#endif
};
#endif
static const struct i2c_device_id tdisc_id[] = {
{ DEVICE_NAME, 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, tdisc_id);
static int __devinit tdisc_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
int rc = -1;
int x_max, x_min, y_max, y_min, pressure_min, pressure_max;
struct tdisc_platform_data *pd;
struct tdisc_data *dd;
/* Check if the I2C adapter supports the BLOCK READ functionality */
if (!i2c_check_functionality(client->adapter,
I2C_FUNC_SMBUS_READ_I2C_BLOCK))
return -ENODEV;
/* Enable runtime PM ops, start in ACTIVE mode */
rc = pm_runtime_set_active(&client->dev);
if (rc < 0)
dev_dbg(&client->dev, "unable to set runtime pm state\n");
pm_runtime_enable(&client->dev);
dd = kzalloc(sizeof *dd, GFP_KERNEL);
if (!dd) {
rc = -ENOMEM;
goto probe_exit;
}
i2c_set_clientdata(client, dd);
dd->clientp = client;
pd = client->dev.platform_data;
if (!pd) {
pr_err("%s: platform data not set \n", __func__);
rc = -EFAULT;
goto probe_free_exit;
}
dd->pdata = pd;
dd->tdisc_device = input_allocate_device();
if (!dd->tdisc_device) {
rc = -ENOMEM;
goto probe_free_exit;
}
input_set_drvdata(dd->tdisc_device, dd);
dd->tdisc_device->open = tdisc_open;
dd->tdisc_device->close = tdisc_close;
dd->tdisc_device->name = TDISC_NAME;
dd->tdisc_device->id.bustype = BUS_I2C;
dd->tdisc_device->id.product = 1;
dd->tdisc_device->id.version = 1;
if (pd->tdisc_abs) {
x_max = pd->tdisc_abs->x_max;
x_min = pd->tdisc_abs->x_min;
y_max = pd->tdisc_abs->y_max;
y_min = pd->tdisc_abs->y_min;
pressure_max = pd->tdisc_abs->pressure_max;
pressure_min = pd->tdisc_abs->pressure_min;
} else {
x_max = X_MAX;
x_min = X_MIN;
y_max = Y_MAX;
y_min = Y_MIN;
pressure_max = PRESSURE_MAX;
pressure_min = PRESSURE_MIN;
}
/* Device capablities for relative motion */
input_set_capability(dd->tdisc_device, EV_REL, REL_X);
input_set_capability(dd->tdisc_device, EV_REL, REL_Y);
input_set_capability(dd->tdisc_device, EV_KEY, BTN_MOUSE);
/* Device capablities for absolute motion */
input_set_capability(dd->tdisc_device, EV_ABS, ABS_X);
input_set_capability(dd->tdisc_device, EV_ABS, ABS_Y);
input_set_capability(dd->tdisc_device, EV_ABS, ABS_PRESSURE);
input_set_abs_params(dd->tdisc_device, ABS_X, x_min, x_max, 0, 0);
input_set_abs_params(dd->tdisc_device, ABS_Y, y_min, y_max, 0, 0);
input_set_abs_params(dd->tdisc_device, ABS_PRESSURE, pressure_min,
pressure_max, 0, 0);
/* Device capabilities for scroll and buttons */
input_set_capability(dd->tdisc_device, EV_REL, REL_WHEEL);
input_set_capability(dd->tdisc_device, EV_KEY, KEY_LEFT);
input_set_capability(dd->tdisc_device, EV_KEY, KEY_RIGHT);
input_set_capability(dd->tdisc_device, EV_KEY, KEY_UP);
input_set_capability(dd->tdisc_device, EV_KEY, KEY_DOWN);
input_set_capability(dd->tdisc_device, EV_KEY, KEY_ENTER);
/* Setup the device for operation */
if (dd->pdata->tdisc_setup != NULL) {
rc = dd->pdata->tdisc_setup();
if (rc) {
pr_err("%s: Setup failed \n", __func__);
goto probe_unreg_free_exit;
}
}
/* Setup wakeup capability */
device_init_wakeup(&dd->clientp->dev, dd->pdata->tdisc_wakeup);
INIT_DELAYED_WORK(&dd->tdisc_work, tdisc_work_f);
rc = input_register_device(dd->tdisc_device);
if (rc) {
pr_err("%s: input register device failed \n", __func__);
rc = -EINVAL;
goto probe_register_fail;
}
pm_runtime_set_suspended(&client->dev);
#ifdef CONFIG_HAS_EARLYSUSPEND
dd->tdisc_early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN +
TDISC_SUSPEND_LEVEL;
dd->tdisc_early_suspend.suspend = tdisc_early_suspend;
dd->tdisc_early_suspend.resume = tdisc_late_resume;
register_early_suspend(&dd->tdisc_early_suspend);
#endif
return 0;
probe_register_fail:
if (dd->pdata->tdisc_release != NULL)
dd->pdata->tdisc_release();
probe_unreg_free_exit:
input_free_device(dd->tdisc_device);
probe_free_exit:
i2c_set_clientdata(client, NULL);
kfree(dd);
probe_exit:
pm_runtime_set_suspended(&client->dev);
pm_runtime_disable(&client->dev);
return rc;
}
static struct i2c_driver tdisc_driver = {
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
#ifdef CONFIG_PM
.pm = &tdisc_pm_ops,
#endif
},
.probe = tdisc_probe,
.remove = __devexit_p(tdisc_remove),
.id_table = tdisc_id,
};
static int __init tdisc_init(void)
{
int rc;
rc = i2c_add_driver(&tdisc_driver);
if (rc)
pr_err("%s: i2c add driver failed \n", __func__);
return rc;
}
static void __exit tdisc_exit(void)
{
i2c_del_driver(&tdisc_driver);
}
module_init(tdisc_init);
module_exit(tdisc_exit);
| gpl-2.0 |
piasek1906/Piasek-KK-G2 | drivers/input/joystick/tdisc_vtd518_shinetsu.c | 3495 | 13818 | /* Copyright (c) 2010, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/workqueue.h>
#include <linux/input.h>
#include <linux/delay.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/gpio.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/input/tdisc_shinetsu.h>
#if defined(CONFIG_HAS_EARLYSUSPEND)
#include <linux/earlysuspend.h>
/* Early-suspend level */
#define TDISC_SUSPEND_LEVEL 1
#endif
MODULE_LICENSE("GPL v2");
MODULE_VERSION("0.1");
MODULE_DESCRIPTION("Shinetsu Touchdisc driver");
MODULE_ALIAS("platform:tdisc-shinetsu");
#define TDSIC_BLK_READ_CMD 0x00
#define TDISC_READ_DELAY msecs_to_jiffies(25)
#define X_MAX (32)
#define X_MIN (-32)
#define Y_MAX (32)
#define Y_MIN (-32)
#define PRESSURE_MAX (32)
#define PRESSURE_MIN (0)
#define TDISC_USER_ACTIVE_MASK 0x40
#define TDISC_NORTH_SWITCH_MASK 0x20
#define TDISC_SOUTH_SWITCH_MASK 0x10
#define TDISC_EAST_SWITCH_MASK 0x08
#define TDISC_WEST_SWITCH_MASK 0x04
#define TDISC_CENTER_SWITCH 0x01
#define TDISC_BUTTON_PRESS_MASK 0x3F
#define DRIVER_NAME "tdisc-shinetsu"
#define DEVICE_NAME "vtd518"
#define TDISC_NAME "tdisc_shinetsu"
#define TDISC_INT "tdisc_interrupt"
struct tdisc_data {
struct input_dev *tdisc_device;
struct i2c_client *clientp;
struct tdisc_platform_data *pdata;
struct delayed_work tdisc_work;
#if defined(CONFIG_HAS_EARLYSUSPEND)
struct early_suspend tdisc_early_suspend;
#endif
};
static void process_tdisc_data(struct tdisc_data *dd, u8 *data)
{
int i;
static bool button_press;
s8 x, y;
/* Check if the user is actively navigating */
if (!(data[7] & TDISC_USER_ACTIVE_MASK)) {
pr_debug(" TDISC ! No Data to report ! False positive \n");
return;
}
for (i = 0; i < 8 ; i++)
pr_debug(" Data[%d] = %x\n", i, data[i]);
/* Check if there is a button press */
if (dd->pdata->tdisc_report_keys)
if (data[7] & TDISC_BUTTON_PRESS_MASK || button_press == true) {
input_report_key(dd->tdisc_device, KEY_UP,
(data[7] & TDISC_NORTH_SWITCH_MASK));
input_report_key(dd->tdisc_device, KEY_DOWN,
(data[7] & TDISC_SOUTH_SWITCH_MASK));
input_report_key(dd->tdisc_device, KEY_RIGHT,
(data[7] & TDISC_EAST_SWITCH_MASK));
input_report_key(dd->tdisc_device, KEY_LEFT,
(data[7] & TDISC_WEST_SWITCH_MASK));
input_report_key(dd->tdisc_device, KEY_ENTER,
(data[7] & TDISC_CENTER_SWITCH));
if (data[7] & TDISC_BUTTON_PRESS_MASK)
button_press = true;
else
button_press = false;
}
if (dd->pdata->tdisc_report_relative) {
/* Report relative motion values */
x = (s8) data[0];
y = (s8) data[1];
if (dd->pdata->tdisc_reverse_x)
x *= -1;
if (dd->pdata->tdisc_reverse_y)
y *= -1;
input_report_rel(dd->tdisc_device, REL_X, x);
input_report_rel(dd->tdisc_device, REL_Y, y);
}
if (dd->pdata->tdisc_report_absolute) {
input_report_abs(dd->tdisc_device, ABS_X, data[2]);
input_report_abs(dd->tdisc_device, ABS_Y, data[3]);
input_report_abs(dd->tdisc_device, ABS_PRESSURE, data[4]);
}
if (dd->pdata->tdisc_report_wheel)
input_report_rel(dd->tdisc_device, REL_WHEEL, (s8) data[6]);
input_sync(dd->tdisc_device);
}
static void tdisc_work_f(struct work_struct *work)
{
int rc;
u8 data[8];
struct tdisc_data *dd =
container_of(work, struct tdisc_data, tdisc_work.work);
/*
* Read the value of the interrupt pin. If low, perform
* an I2C read of 8 bytes to get the touch values and then
* reschedule the work after 25ms. If pin is high, exit
* and wait for next interrupt.
*/
rc = gpio_get_value_cansleep(dd->pdata->tdisc_gpio);
if (rc < 0) {
rc = pm_runtime_put_sync(&dd->clientp->dev);
if (rc < 0)
dev_dbg(&dd->clientp->dev, "%s: pm_runtime_put_sync"
" failed\n", __func__);
enable_irq(dd->clientp->irq);
return;
}
pr_debug("%s: TDISC gpio_get_value = %d\n", __func__, rc);
if (rc == 0) {
/* We have data to read */
rc = i2c_smbus_read_i2c_block_data(dd->clientp,
TDSIC_BLK_READ_CMD, 8, data);
if (rc < 0) {
pr_debug("%s:I2C read failed,trying again\n", __func__);
rc = i2c_smbus_read_i2c_block_data(dd->clientp,
TDSIC_BLK_READ_CMD, 8, data);
if (rc < 0) {
pr_err("%s:I2C read failed again, exiting\n",
__func__);
goto fail_i2c_read;
}
}
pr_debug("%s: TDISC: I2C read success\n", __func__);
process_tdisc_data(dd, data);
} else {
/*
* We have no data to read.
* Enable the IRQ to receive further interrupts.
*/
enable_irq(dd->clientp->irq);
rc = pm_runtime_put_sync(&dd->clientp->dev);
if (rc < 0)
dev_dbg(&dd->clientp->dev, "%s: pm_runtime_put_sync"
" failed\n", __func__);
return;
}
fail_i2c_read:
schedule_delayed_work(&dd->tdisc_work, TDISC_READ_DELAY);
}
static irqreturn_t tdisc_interrupt(int irq, void *dev_id)
{
/*
* The touch disc intially generates an interrupt on any
* touch. The interrupt line is pulled low and remains low
* untill there are touch operations being performed. In case
* there are no further touch operations, the line goes high. The
* same process repeats again the next time,when the disc is touched.
*
* We do the following operations once we receive an interrupt.
* 1. Disable the IRQ for any further interrutps.
* 2. Schedule work every 25ms if the GPIO is still low.
* 3. In the work queue do a I2C read to get the touch data.
* 4. If the GPIO is pulled high, enable the IRQ and cancel the work.
*/
struct tdisc_data *dd = dev_id;
int rc;
rc = pm_runtime_get(&dd->clientp->dev);
if (rc < 0)
dev_dbg(&dd->clientp->dev, "%s: pm_runtime_get"
" failed\n", __func__);
pr_debug("%s: TDISC IRQ ! :-)\n", __func__);
/* Schedule the work immediately */
disable_irq_nosync(dd->clientp->irq);
schedule_delayed_work(&dd->tdisc_work, 0);
return IRQ_HANDLED;
}
static int tdisc_open(struct input_dev *dev)
{
int rc;
struct tdisc_data *dd = input_get_drvdata(dev);
if (!dd->clientp) {
/* Check if a valid i2c client is present */
pr_err("%s: no i2c adapter present \n", __func__);
return -ENODEV;
}
/* Enable the device */
if (dd->pdata->tdisc_enable != NULL) {
rc = dd->pdata->tdisc_enable();
if (rc)
goto fail_open;
}
rc = request_any_context_irq(dd->clientp->irq, tdisc_interrupt,
IRQF_TRIGGER_FALLING, TDISC_INT, dd);
if (rc < 0) {
pr_err("%s: request IRQ failed\n", __func__);
goto fail_irq_open;
}
return 0;
fail_irq_open:
if (dd->pdata->tdisc_disable != NULL)
dd->pdata->tdisc_disable();
fail_open:
return rc;
}
static void tdisc_close(struct input_dev *dev)
{
struct tdisc_data *dd = input_get_drvdata(dev);
free_irq(dd->clientp->irq, dd);
cancel_delayed_work_sync(&dd->tdisc_work);
if (dd->pdata->tdisc_disable != NULL)
dd->pdata->tdisc_disable();
}
static int __devexit tdisc_remove(struct i2c_client *client)
{
struct tdisc_data *dd;
pm_runtime_disable(&client->dev);
dd = i2c_get_clientdata(client);
#ifdef CONFIG_HAS_EARLYSUSPEND
unregister_early_suspend(&dd->tdisc_early_suspend);
#endif
input_unregister_device(dd->tdisc_device);
if (dd->pdata->tdisc_release != NULL)
dd->pdata->tdisc_release();
i2c_set_clientdata(client, NULL);
kfree(dd);
return 0;
}
#ifdef CONFIG_PM
static int tdisc_suspend(struct device *dev)
{
int rc;
struct tdisc_data *dd;
dd = dev_get_drvdata(dev);
if (device_may_wakeup(&dd->clientp->dev))
enable_irq_wake(dd->clientp->irq);
else {
disable_irq(dd->clientp->irq);
if (cancel_delayed_work_sync(&dd->tdisc_work))
enable_irq(dd->clientp->irq);
if (dd->pdata->tdisc_disable) {
rc = dd->pdata->tdisc_disable();
if (rc) {
pr_err("%s: Suspend failed\n", __func__);
return rc;
}
}
}
return 0;
}
static int tdisc_resume(struct device *dev)
{
int rc;
struct tdisc_data *dd;
dd = dev_get_drvdata(dev);
if (device_may_wakeup(&dd->clientp->dev))
disable_irq_wake(dd->clientp->irq);
else {
if (dd->pdata->tdisc_enable) {
rc = dd->pdata->tdisc_enable();
if (rc) {
pr_err("%s: Resume failed\n", __func__);
return rc;
}
}
enable_irq(dd->clientp->irq);
}
return 0;
}
#ifdef CONFIG_HAS_EARLYSUSPEND
static void tdisc_early_suspend(struct early_suspend *h)
{
struct tdisc_data *dd = container_of(h, struct tdisc_data,
tdisc_early_suspend);
tdisc_suspend(&dd->clientp->dev);
}
static void tdisc_late_resume(struct early_suspend *h)
{
struct tdisc_data *dd = container_of(h, struct tdisc_data,
tdisc_early_suspend);
tdisc_resume(&dd->clientp->dev);
}
#endif
static struct dev_pm_ops tdisc_pm_ops = {
#ifndef CONFIG_HAS_EARLYSUSPEND
.suspend = tdisc_suspend,
.resume = tdisc_resume,
#endif
};
#endif
static const struct i2c_device_id tdisc_id[] = {
{ DEVICE_NAME, 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, tdisc_id);
static int __devinit tdisc_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
int rc = -1;
int x_max, x_min, y_max, y_min, pressure_min, pressure_max;
struct tdisc_platform_data *pd;
struct tdisc_data *dd;
/* Check if the I2C adapter supports the BLOCK READ functionality */
if (!i2c_check_functionality(client->adapter,
I2C_FUNC_SMBUS_READ_I2C_BLOCK))
return -ENODEV;
/* Enable runtime PM ops, start in ACTIVE mode */
rc = pm_runtime_set_active(&client->dev);
if (rc < 0)
dev_dbg(&client->dev, "unable to set runtime pm state\n");
pm_runtime_enable(&client->dev);
dd = kzalloc(sizeof *dd, GFP_KERNEL);
if (!dd) {
rc = -ENOMEM;
goto probe_exit;
}
i2c_set_clientdata(client, dd);
dd->clientp = client;
pd = client->dev.platform_data;
if (!pd) {
pr_err("%s: platform data not set \n", __func__);
rc = -EFAULT;
goto probe_free_exit;
}
dd->pdata = pd;
dd->tdisc_device = input_allocate_device();
if (!dd->tdisc_device) {
rc = -ENOMEM;
goto probe_free_exit;
}
input_set_drvdata(dd->tdisc_device, dd);
dd->tdisc_device->open = tdisc_open;
dd->tdisc_device->close = tdisc_close;
dd->tdisc_device->name = TDISC_NAME;
dd->tdisc_device->id.bustype = BUS_I2C;
dd->tdisc_device->id.product = 1;
dd->tdisc_device->id.version = 1;
if (pd->tdisc_abs) {
x_max = pd->tdisc_abs->x_max;
x_min = pd->tdisc_abs->x_min;
y_max = pd->tdisc_abs->y_max;
y_min = pd->tdisc_abs->y_min;
pressure_max = pd->tdisc_abs->pressure_max;
pressure_min = pd->tdisc_abs->pressure_min;
} else {
x_max = X_MAX;
x_min = X_MIN;
y_max = Y_MAX;
y_min = Y_MIN;
pressure_max = PRESSURE_MAX;
pressure_min = PRESSURE_MIN;
}
/* Device capablities for relative motion */
input_set_capability(dd->tdisc_device, EV_REL, REL_X);
input_set_capability(dd->tdisc_device, EV_REL, REL_Y);
input_set_capability(dd->tdisc_device, EV_KEY, BTN_MOUSE);
/* Device capablities for absolute motion */
input_set_capability(dd->tdisc_device, EV_ABS, ABS_X);
input_set_capability(dd->tdisc_device, EV_ABS, ABS_Y);
input_set_capability(dd->tdisc_device, EV_ABS, ABS_PRESSURE);
input_set_abs_params(dd->tdisc_device, ABS_X, x_min, x_max, 0, 0);
input_set_abs_params(dd->tdisc_device, ABS_Y, y_min, y_max, 0, 0);
input_set_abs_params(dd->tdisc_device, ABS_PRESSURE, pressure_min,
pressure_max, 0, 0);
/* Device capabilities for scroll and buttons */
input_set_capability(dd->tdisc_device, EV_REL, REL_WHEEL);
input_set_capability(dd->tdisc_device, EV_KEY, KEY_LEFT);
input_set_capability(dd->tdisc_device, EV_KEY, KEY_RIGHT);
input_set_capability(dd->tdisc_device, EV_KEY, KEY_UP);
input_set_capability(dd->tdisc_device, EV_KEY, KEY_DOWN);
input_set_capability(dd->tdisc_device, EV_KEY, KEY_ENTER);
/* Setup the device for operation */
if (dd->pdata->tdisc_setup != NULL) {
rc = dd->pdata->tdisc_setup();
if (rc) {
pr_err("%s: Setup failed \n", __func__);
goto probe_unreg_free_exit;
}
}
/* Setup wakeup capability */
device_init_wakeup(&dd->clientp->dev, dd->pdata->tdisc_wakeup);
INIT_DELAYED_WORK(&dd->tdisc_work, tdisc_work_f);
rc = input_register_device(dd->tdisc_device);
if (rc) {
pr_err("%s: input register device failed \n", __func__);
rc = -EINVAL;
goto probe_register_fail;
}
pm_runtime_set_suspended(&client->dev);
#ifdef CONFIG_HAS_EARLYSUSPEND
dd->tdisc_early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN +
TDISC_SUSPEND_LEVEL;
dd->tdisc_early_suspend.suspend = tdisc_early_suspend;
dd->tdisc_early_suspend.resume = tdisc_late_resume;
register_early_suspend(&dd->tdisc_early_suspend);
#endif
return 0;
probe_register_fail:
if (dd->pdata->tdisc_release != NULL)
dd->pdata->tdisc_release();
probe_unreg_free_exit:
input_free_device(dd->tdisc_device);
probe_free_exit:
i2c_set_clientdata(client, NULL);
kfree(dd);
probe_exit:
pm_runtime_set_suspended(&client->dev);
pm_runtime_disable(&client->dev);
return rc;
}
static struct i2c_driver tdisc_driver = {
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
#ifdef CONFIG_PM
.pm = &tdisc_pm_ops,
#endif
},
.probe = tdisc_probe,
.remove = __devexit_p(tdisc_remove),
.id_table = tdisc_id,
};
static int __init tdisc_init(void)
{
int rc;
rc = i2c_add_driver(&tdisc_driver);
if (rc)
pr_err("%s: i2c add driver failed \n", __func__);
return rc;
}
static void __exit tdisc_exit(void)
{
i2c_del_driver(&tdisc_driver);
}
module_init(tdisc_init);
module_exit(tdisc_exit);
| gpl-2.0 |
jamison904/Nexus7_kernel | drivers/gpu/drm/ttm/ttm_bo_util.c | 4519 | 16906 | /**************************************************************************
*
* Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/*
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*/
#include "ttm/ttm_bo_driver.h"
#include "ttm/ttm_placement.h"
#include <linux/io.h>
#include <linux/highmem.h>
#include <linux/wait.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/module.h>
void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
{
ttm_bo_mem_put(bo, &bo->mem);
}
int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
bool evict, bool no_wait_reserve,
bool no_wait_gpu, struct ttm_mem_reg *new_mem)
{
struct ttm_tt *ttm = bo->ttm;
struct ttm_mem_reg *old_mem = &bo->mem;
int ret;
if (old_mem->mem_type != TTM_PL_SYSTEM) {
ttm_tt_unbind(ttm);
ttm_bo_free_old_node(bo);
ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
TTM_PL_MASK_MEM);
old_mem->mem_type = TTM_PL_SYSTEM;
}
ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
if (unlikely(ret != 0))
return ret;
if (new_mem->mem_type != TTM_PL_SYSTEM) {
ret = ttm_tt_bind(ttm, new_mem);
if (unlikely(ret != 0))
return ret;
}
*old_mem = *new_mem;
new_mem->mm_node = NULL;
return 0;
}
EXPORT_SYMBOL(ttm_bo_move_ttm);
int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
{
if (likely(man->io_reserve_fastpath))
return 0;
if (interruptible)
return mutex_lock_interruptible(&man->io_reserve_mutex);
mutex_lock(&man->io_reserve_mutex);
return 0;
}
void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
{
if (likely(man->io_reserve_fastpath))
return;
mutex_unlock(&man->io_reserve_mutex);
}
static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
{
struct ttm_buffer_object *bo;
if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru))
return -EAGAIN;
bo = list_first_entry(&man->io_reserve_lru,
struct ttm_buffer_object,
io_reserve_lru);
list_del_init(&bo->io_reserve_lru);
ttm_bo_unmap_virtual_locked(bo);
return 0;
}
static int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
struct ttm_mem_reg *mem)
{
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
int ret = 0;
if (!bdev->driver->io_mem_reserve)
return 0;
if (likely(man->io_reserve_fastpath))
return bdev->driver->io_mem_reserve(bdev, mem);
if (bdev->driver->io_mem_reserve &&
mem->bus.io_reserved_count++ == 0) {
retry:
ret = bdev->driver->io_mem_reserve(bdev, mem);
if (ret == -EAGAIN) {
ret = ttm_mem_io_evict(man);
if (ret == 0)
goto retry;
}
}
return ret;
}
static void ttm_mem_io_free(struct ttm_bo_device *bdev,
struct ttm_mem_reg *mem)
{
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
if (likely(man->io_reserve_fastpath))
return;
if (bdev->driver->io_mem_reserve &&
--mem->bus.io_reserved_count == 0 &&
bdev->driver->io_mem_free)
bdev->driver->io_mem_free(bdev, mem);
}
int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
{
struct ttm_mem_reg *mem = &bo->mem;
int ret;
if (!mem->bus.io_reserved_vm) {
struct ttm_mem_type_manager *man =
&bo->bdev->man[mem->mem_type];
ret = ttm_mem_io_reserve(bo->bdev, mem);
if (unlikely(ret != 0))
return ret;
mem->bus.io_reserved_vm = true;
if (man->use_io_reserve_lru)
list_add_tail(&bo->io_reserve_lru,
&man->io_reserve_lru);
}
return 0;
}
void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
{
struct ttm_mem_reg *mem = &bo->mem;
if (mem->bus.io_reserved_vm) {
mem->bus.io_reserved_vm = false;
list_del_init(&bo->io_reserve_lru);
ttm_mem_io_free(bo->bdev, mem);
}
}
int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
void **virtual)
{
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
int ret;
void *addr;
*virtual = NULL;
(void) ttm_mem_io_lock(man, false);
ret = ttm_mem_io_reserve(bdev, mem);
ttm_mem_io_unlock(man);
if (ret || !mem->bus.is_iomem)
return ret;
if (mem->bus.addr) {
addr = mem->bus.addr;
} else {
if (mem->placement & TTM_PL_FLAG_WC)
addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size);
else
addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size);
if (!addr) {
(void) ttm_mem_io_lock(man, false);
ttm_mem_io_free(bdev, mem);
ttm_mem_io_unlock(man);
return -ENOMEM;
}
}
*virtual = addr;
return 0;
}
void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
void *virtual)
{
struct ttm_mem_type_manager *man;
man = &bdev->man[mem->mem_type];
if (virtual && mem->bus.addr == NULL)
iounmap(virtual);
(void) ttm_mem_io_lock(man, false);
ttm_mem_io_free(bdev, mem);
ttm_mem_io_unlock(man);
}
static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
{
uint32_t *dstP =
(uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
uint32_t *srcP =
(uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
int i;
for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
iowrite32(ioread32(srcP++), dstP++);
return 0;
}
static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
unsigned long page,
pgprot_t prot)
{
struct page *d = ttm->pages[page];
void *dst;
if (!d)
return -ENOMEM;
src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
#ifdef CONFIG_X86
dst = kmap_atomic_prot(d, prot);
#else
if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
dst = vmap(&d, 1, 0, prot);
else
dst = kmap(d);
#endif
if (!dst)
return -ENOMEM;
memcpy_fromio(dst, src, PAGE_SIZE);
#ifdef CONFIG_X86
kunmap_atomic(dst);
#else
if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
vunmap(dst);
else
kunmap(d);
#endif
return 0;
}
static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
unsigned long page,
pgprot_t prot)
{
struct page *s = ttm->pages[page];
void *src;
if (!s)
return -ENOMEM;
dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
#ifdef CONFIG_X86
src = kmap_atomic_prot(s, prot);
#else
if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
src = vmap(&s, 1, 0, prot);
else
src = kmap(s);
#endif
if (!src)
return -ENOMEM;
memcpy_toio(dst, src, PAGE_SIZE);
#ifdef CONFIG_X86
kunmap_atomic(src);
#else
if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
vunmap(src);
else
kunmap(s);
#endif
return 0;
}
int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
bool evict, bool no_wait_reserve, bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
struct ttm_tt *ttm = bo->ttm;
struct ttm_mem_reg *old_mem = &bo->mem;
struct ttm_mem_reg old_copy = *old_mem;
void *old_iomap;
void *new_iomap;
int ret;
unsigned long i;
unsigned long page;
unsigned long add = 0;
int dir;
ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
if (ret)
return ret;
ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
if (ret)
goto out;
if (old_iomap == NULL && new_iomap == NULL)
goto out2;
if (old_iomap == NULL && ttm == NULL)
goto out2;
if (ttm->state == tt_unpopulated) {
ret = ttm->bdev->driver->ttm_tt_populate(ttm);
if (ret)
goto out1;
}
add = 0;
dir = 1;
if ((old_mem->mem_type == new_mem->mem_type) &&
(new_mem->start < old_mem->start + old_mem->size)) {
dir = -1;
add = new_mem->num_pages - 1;
}
for (i = 0; i < new_mem->num_pages; ++i) {
page = i * dir + add;
if (old_iomap == NULL) {
pgprot_t prot = ttm_io_prot(old_mem->placement,
PAGE_KERNEL);
ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
prot);
} else if (new_iomap == NULL) {
pgprot_t prot = ttm_io_prot(new_mem->placement,
PAGE_KERNEL);
ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
prot);
} else
ret = ttm_copy_io_page(new_iomap, old_iomap, page);
if (ret)
goto out1;
}
mb();
out2:
old_copy = *old_mem;
*old_mem = *new_mem;
new_mem->mm_node = NULL;
if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) {
ttm_tt_unbind(ttm);
ttm_tt_destroy(ttm);
bo->ttm = NULL;
}
out1:
ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
out:
ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
ttm_bo_mem_put(bo, &old_copy);
return ret;
}
EXPORT_SYMBOL(ttm_bo_move_memcpy);
static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
{
kfree(bo);
}
/**
* ttm_buffer_object_transfer
*
* @bo: A pointer to a struct ttm_buffer_object.
* @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
* holding the data of @bo with the old placement.
*
* This is a utility function that may be called after an accelerated move
* has been scheduled. A new buffer object is created as a placeholder for
* the old data while it's being copied. When that buffer object is idle,
* it can be destroyed, releasing the space of the old placement.
* Returns:
* !0: Failure.
*/
static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
struct ttm_buffer_object **new_obj)
{
struct ttm_buffer_object *fbo;
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_bo_driver *driver = bdev->driver;
fbo = kzalloc(sizeof(*fbo), GFP_KERNEL);
if (!fbo)
return -ENOMEM;
*fbo = *bo;
/**
* Fix up members that we shouldn't copy directly:
* TODO: Explicit member copy would probably be better here.
*/
init_waitqueue_head(&fbo->event_queue);
INIT_LIST_HEAD(&fbo->ddestroy);
INIT_LIST_HEAD(&fbo->lru);
INIT_LIST_HEAD(&fbo->swap);
INIT_LIST_HEAD(&fbo->io_reserve_lru);
fbo->vm_node = NULL;
atomic_set(&fbo->cpu_writers, 0);
fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
kref_init(&fbo->list_kref);
kref_init(&fbo->kref);
fbo->destroy = &ttm_transfered_destroy;
fbo->acc_size = 0;
*new_obj = fbo;
return 0;
}
pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
{
#if defined(__i386__) || defined(__x86_64__)
if (caching_flags & TTM_PL_FLAG_WC)
tmp = pgprot_writecombine(tmp);
else if (boot_cpu_data.x86 > 3)
tmp = pgprot_noncached(tmp);
#elif defined(__powerpc__)
if (!(caching_flags & TTM_PL_FLAG_CACHED)) {
pgprot_val(tmp) |= _PAGE_NO_CACHE;
if (caching_flags & TTM_PL_FLAG_UNCACHED)
pgprot_val(tmp) |= _PAGE_GUARDED;
}
#endif
#if defined(__ia64__)
if (caching_flags & TTM_PL_FLAG_WC)
tmp = pgprot_writecombine(tmp);
else
tmp = pgprot_noncached(tmp);
#endif
#if defined(__sparc__)
if (!(caching_flags & TTM_PL_FLAG_CACHED))
tmp = pgprot_noncached(tmp);
#endif
return tmp;
}
EXPORT_SYMBOL(ttm_io_prot);
static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
unsigned long offset,
unsigned long size,
struct ttm_bo_kmap_obj *map)
{
struct ttm_mem_reg *mem = &bo->mem;
if (bo->mem.bus.addr) {
map->bo_kmap_type = ttm_bo_map_premapped;
map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
} else {
map->bo_kmap_type = ttm_bo_map_iomap;
if (mem->placement & TTM_PL_FLAG_WC)
map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset,
size);
else
map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset,
size);
}
return (!map->virtual) ? -ENOMEM : 0;
}
static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
unsigned long start_page,
unsigned long num_pages,
struct ttm_bo_kmap_obj *map)
{
struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot;
struct ttm_tt *ttm = bo->ttm;
int ret;
BUG_ON(!ttm);
if (ttm->state == tt_unpopulated) {
ret = ttm->bdev->driver->ttm_tt_populate(ttm);
if (ret)
return ret;
}
if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
/*
* We're mapping a single page, and the desired
* page protection is consistent with the bo.
*/
map->bo_kmap_type = ttm_bo_map_kmap;
map->page = ttm->pages[start_page];
map->virtual = kmap(map->page);
} else {
/*
* We need to use vmap to get the desired page protection
* or to make the buffer object look contiguous.
*/
prot = (mem->placement & TTM_PL_FLAG_CACHED) ?
PAGE_KERNEL :
ttm_io_prot(mem->placement, PAGE_KERNEL);
map->bo_kmap_type = ttm_bo_map_vmap;
map->virtual = vmap(ttm->pages + start_page, num_pages,
0, prot);
}
return (!map->virtual) ? -ENOMEM : 0;
}
int ttm_bo_kmap(struct ttm_buffer_object *bo,
unsigned long start_page, unsigned long num_pages,
struct ttm_bo_kmap_obj *map)
{
struct ttm_mem_type_manager *man =
&bo->bdev->man[bo->mem.mem_type];
unsigned long offset, size;
int ret;
BUG_ON(!list_empty(&bo->swap));
map->virtual = NULL;
map->bo = bo;
if (num_pages > bo->num_pages)
return -EINVAL;
if (start_page > bo->num_pages)
return -EINVAL;
#if 0
if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC))
return -EPERM;
#endif
(void) ttm_mem_io_lock(man, false);
ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
ttm_mem_io_unlock(man);
if (ret)
return ret;
if (!bo->mem.bus.is_iomem) {
return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
} else {
offset = start_page << PAGE_SHIFT;
size = num_pages << PAGE_SHIFT;
return ttm_bo_ioremap(bo, offset, size, map);
}
}
EXPORT_SYMBOL(ttm_bo_kmap);
void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
{
struct ttm_buffer_object *bo = map->bo;
struct ttm_mem_type_manager *man =
&bo->bdev->man[bo->mem.mem_type];
if (!map->virtual)
return;
switch (map->bo_kmap_type) {
case ttm_bo_map_iomap:
iounmap(map->virtual);
break;
case ttm_bo_map_vmap:
vunmap(map->virtual);
break;
case ttm_bo_map_kmap:
kunmap(map->page);
break;
case ttm_bo_map_premapped:
break;
default:
BUG();
}
(void) ttm_mem_io_lock(man, false);
ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
ttm_mem_io_unlock(man);
map->virtual = NULL;
map->page = NULL;
}
EXPORT_SYMBOL(ttm_bo_kunmap);
int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
void *sync_obj,
void *sync_obj_arg,
bool evict, bool no_wait_reserve,
bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_bo_driver *driver = bdev->driver;
struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
struct ttm_mem_reg *old_mem = &bo->mem;
int ret;
struct ttm_buffer_object *ghost_obj;
void *tmp_obj = NULL;
spin_lock(&bdev->fence_lock);
if (bo->sync_obj) {
tmp_obj = bo->sync_obj;
bo->sync_obj = NULL;
}
bo->sync_obj = driver->sync_obj_ref(sync_obj);
bo->sync_obj_arg = sync_obj_arg;
if (evict) {
ret = ttm_bo_wait(bo, false, false, false);
spin_unlock(&bdev->fence_lock);
if (tmp_obj)
driver->sync_obj_unref(&tmp_obj);
if (ret)
return ret;
if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
(bo->ttm != NULL)) {
ttm_tt_unbind(bo->ttm);
ttm_tt_destroy(bo->ttm);
bo->ttm = NULL;
}
ttm_bo_free_old_node(bo);
} else {
/**
* This should help pipeline ordinary buffer moves.
*
* Hang old buffer memory on a new buffer object,
* and leave it to be released when the GPU
* operation has completed.
*/
set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
spin_unlock(&bdev->fence_lock);
if (tmp_obj)
driver->sync_obj_unref(&tmp_obj);
ret = ttm_buffer_object_transfer(bo, &ghost_obj);
if (ret)
return ret;
/**
* If we're not moving to fixed memory, the TTM object
* needs to stay alive. Otherwhise hang it on the ghost
* bo to be unbound and destroyed.
*/
if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
ghost_obj->ttm = NULL;
else
bo->ttm = NULL;
ttm_bo_unreserve(ghost_obj);
ttm_bo_unref(&ghost_obj);
}
*old_mem = *new_mem;
new_mem->mm_node = NULL;
return 0;
}
EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
| gpl-2.0 |
txuki2005/TaUrUs_Kernel | drivers/i2c/busses/i2c-nuc900.c | 5031 | 14726 | /*
* linux/drivers/i2c/busses/i2c-nuc900.c
*
* Copyright (c) 2010 Nuvoton technology corporation.
*
* This driver based on S3C2410 I2C driver of Ben Dooks <ben-Y5A6D6n0/KfQXOPxS62xeg@public.gmane.org>.
* Written by Wan ZongShun <mcuos.com-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation;version 2 of the License.
*
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/i2c.h>
#include <linux/init.h>
#include <linux/time.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/cpufreq.h>
#include <linux/slab.h>
#include <linux/io.h>
#include <mach/mfp.h>
#include <mach/i2c.h>
/* nuc900 i2c registers offset */
#define CSR 0x00
#define DIVIDER 0x04
#define CMDR 0x08
#define SWR 0x0C
#define RXR 0x10
#define TXR 0x14
/* nuc900 i2c CSR register bits */
#define IRQEN 0x003
#define I2CBUSY 0x400
#define I2CSTART 0x018
#define IRQFLAG 0x004
#define ARBIT_LOST 0x200
#define SLAVE_ACK 0x800
/* nuc900 i2c CMDR register bits */
#define I2C_CMD_START 0x10
#define I2C_CMD_STOP 0x08
#define I2C_CMD_READ 0x04
#define I2C_CMD_WRITE 0x02
#define I2C_CMD_NACK 0x01
/* i2c controller state */
enum nuc900_i2c_state {
STATE_IDLE,
STATE_START,
STATE_READ,
STATE_WRITE,
STATE_STOP
};
/* i2c controller private data */
struct nuc900_i2c {
spinlock_t lock;
wait_queue_head_t wait;
struct i2c_msg *msg;
unsigned int msg_num;
unsigned int msg_idx;
unsigned int msg_ptr;
unsigned int irq;
enum nuc900_i2c_state state;
void __iomem *regs;
struct clk *clk;
struct device *dev;
struct resource *ioarea;
struct i2c_adapter adap;
};
/* nuc900_i2c_master_complete
*
* complete the message and wake up the caller, using the given return code,
* or zero to mean ok.
*/
static inline void nuc900_i2c_master_complete(struct nuc900_i2c *i2c, int ret)
{
dev_dbg(i2c->dev, "master_complete %d\n", ret);
i2c->msg_ptr = 0;
i2c->msg = NULL;
i2c->msg_idx++;
i2c->msg_num = 0;
if (ret)
i2c->msg_idx = ret;
wake_up(&i2c->wait);
}
/* irq enable/disable functions */
static inline void nuc900_i2c_disable_irq(struct nuc900_i2c *i2c)
{
unsigned long tmp;
tmp = readl(i2c->regs + CSR);
writel(tmp & ~IRQEN, i2c->regs + CSR);
}
static inline void nuc900_i2c_enable_irq(struct nuc900_i2c *i2c)
{
unsigned long tmp;
tmp = readl(i2c->regs + CSR);
writel(tmp | IRQEN, i2c->regs + CSR);
}
/* nuc900_i2c_message_start
*
* put the start of a message onto the bus
*/
static void nuc900_i2c_message_start(struct nuc900_i2c *i2c,
struct i2c_msg *msg)
{
unsigned int addr = (msg->addr & 0x7f) << 1;
if (msg->flags & I2C_M_RD)
addr |= 0x1;
writel(addr & 0xff, i2c->regs + TXR);
writel(I2C_CMD_START | I2C_CMD_WRITE, i2c->regs + CMDR);
}
static inline void nuc900_i2c_stop(struct nuc900_i2c *i2c, int ret)
{
dev_dbg(i2c->dev, "STOP\n");
/* stop the transfer */
i2c->state = STATE_STOP;
writel(I2C_CMD_STOP, i2c->regs + CMDR);
nuc900_i2c_master_complete(i2c, ret);
nuc900_i2c_disable_irq(i2c);
}
/* helper functions to determine the current state in the set of
* messages we are sending
*/
/* is_lastmsg()
*
* returns TRUE if the current message is the last in the set
*/
static inline int is_lastmsg(struct nuc900_i2c *i2c)
{
return i2c->msg_idx >= (i2c->msg_num - 1);
}
/* is_msglast
*
* returns TRUE if we this is the last byte in the current message
*/
static inline int is_msglast(struct nuc900_i2c *i2c)
{
return i2c->msg_ptr == i2c->msg->len-1;
}
/* is_msgend
*
* returns TRUE if we reached the end of the current message
*/
static inline int is_msgend(struct nuc900_i2c *i2c)
{
return i2c->msg_ptr >= i2c->msg->len;
}
/* i2c_nuc900_irq_nextbyte
*
* process an interrupt and work out what to do
*/
static void i2c_nuc900_irq_nextbyte(struct nuc900_i2c *i2c,
unsigned long iicstat)
{
unsigned char byte;
switch (i2c->state) {
case STATE_IDLE:
dev_err(i2c->dev, "%s: called in STATE_IDLE\n", __func__);
break;
case STATE_STOP:
dev_err(i2c->dev, "%s: called in STATE_STOP\n", __func__);
nuc900_i2c_disable_irq(i2c);
break;
case STATE_START:
/* last thing we did was send a start condition on the
* bus, or started a new i2c message
*/
if (iicstat & SLAVE_ACK &&
!(i2c->msg->flags & I2C_M_IGNORE_NAK)) {
/* ack was not received... */
dev_dbg(i2c->dev, "ack was not received\n");
nuc900_i2c_stop(i2c, -ENXIO);
break;
}
if (i2c->msg->flags & I2C_M_RD)
i2c->state = STATE_READ;
else
i2c->state = STATE_WRITE;
/* terminate the transfer if there is nothing to do
* as this is used by the i2c probe to find devices.
*/
if (is_lastmsg(i2c) && i2c->msg->len == 0) {
nuc900_i2c_stop(i2c, 0);
break;
}
if (i2c->state == STATE_READ)
goto prepare_read;
/* fall through to the write state, as we will need to
* send a byte as well
*/
case STATE_WRITE:
/* we are writing data to the device... check for the
* end of the message, and if so, work out what to do
*/
if (!(i2c->msg->flags & I2C_M_IGNORE_NAK)) {
if (iicstat & SLAVE_ACK) {
dev_dbg(i2c->dev, "WRITE: No Ack\n");
nuc900_i2c_stop(i2c, -ECONNREFUSED);
break;
}
}
retry_write:
if (!is_msgend(i2c)) {
byte = i2c->msg->buf[i2c->msg_ptr++];
writeb(byte, i2c->regs + TXR);
writel(I2C_CMD_WRITE, i2c->regs + CMDR);
} else if (!is_lastmsg(i2c)) {
/* we need to go to the next i2c message */
dev_dbg(i2c->dev, "WRITE: Next Message\n");
i2c->msg_ptr = 0;
i2c->msg_idx++;
i2c->msg++;
/* check to see if we need to do another message */
if (i2c->msg->flags & I2C_M_NOSTART) {
if (i2c->msg->flags & I2C_M_RD) {
/* cannot do this, the controller
* forces us to send a new START
* when we change direction
*/
nuc900_i2c_stop(i2c, -EINVAL);
}
goto retry_write;
} else {
/* send the new start */
nuc900_i2c_message_start(i2c, i2c->msg);
i2c->state = STATE_START;
}
} else {
/* send stop */
nuc900_i2c_stop(i2c, 0);
}
break;
case STATE_READ:
/* we have a byte of data in the data register, do
* something with it, and then work out wether we are
* going to do any more read/write
*/
byte = readb(i2c->regs + RXR);
i2c->msg->buf[i2c->msg_ptr++] = byte;
prepare_read:
if (is_msglast(i2c)) {
/* last byte of buffer */
if (is_lastmsg(i2c))
writel(I2C_CMD_READ | I2C_CMD_NACK,
i2c->regs + CMDR);
} else if (is_msgend(i2c)) {
/* ok, we've read the entire buffer, see if there
* is anything else we need to do
*/
if (is_lastmsg(i2c)) {
/* last message, send stop and complete */
dev_dbg(i2c->dev, "READ: Send Stop\n");
nuc900_i2c_stop(i2c, 0);
} else {
/* go to the next transfer */
dev_dbg(i2c->dev, "READ: Next Transfer\n");
i2c->msg_ptr = 0;
i2c->msg_idx++;
i2c->msg++;
writel(I2C_CMD_READ, i2c->regs + CMDR);
}
} else {
writel(I2C_CMD_READ, i2c->regs + CMDR);
}
break;
}
}
/* nuc900_i2c_irq
*
* top level IRQ servicing routine
*/
static irqreturn_t nuc900_i2c_irq(int irqno, void *dev_id)
{
struct nuc900_i2c *i2c = dev_id;
unsigned long status;
status = readl(i2c->regs + CSR);
writel(status | IRQFLAG, i2c->regs + CSR);
if (status & ARBIT_LOST) {
/* deal with arbitration loss */
dev_err(i2c->dev, "deal with arbitration loss\n");
goto out;
}
if (i2c->state == STATE_IDLE) {
dev_dbg(i2c->dev, "IRQ: error i2c->state == IDLE\n");
goto out;
}
/* pretty much this leaves us with the fact that we've
* transmitted or received whatever byte we last sent
*/
i2c_nuc900_irq_nextbyte(i2c, status);
out:
return IRQ_HANDLED;
}
/* nuc900_i2c_set_master
*
* get the i2c bus for a master transaction
*/
static int nuc900_i2c_set_master(struct nuc900_i2c *i2c)
{
int timeout = 400;
while (timeout-- > 0) {
if (((readl(i2c->regs + SWR) & I2CSTART) == I2CSTART) &&
((readl(i2c->regs + CSR) & I2CBUSY) == 0)) {
return 0;
}
msleep(1);
}
return -ETIMEDOUT;
}
/* nuc900_i2c_doxfer
*
* this starts an i2c transfer
*/
static int nuc900_i2c_doxfer(struct nuc900_i2c *i2c,
struct i2c_msg *msgs, int num)
{
unsigned long iicstat, timeout;
int spins = 20;
int ret;
ret = nuc900_i2c_set_master(i2c);
if (ret != 0) {
dev_err(i2c->dev, "cannot get bus (error %d)\n", ret);
ret = -EAGAIN;
goto out;
}
spin_lock_irq(&i2c->lock);
i2c->msg = msgs;
i2c->msg_num = num;
i2c->msg_ptr = 0;
i2c->msg_idx = 0;
i2c->state = STATE_START;
nuc900_i2c_message_start(i2c, msgs);
spin_unlock_irq(&i2c->lock);
timeout = wait_event_timeout(i2c->wait, i2c->msg_num == 0, HZ * 5);
ret = i2c->msg_idx;
/* having these next two as dev_err() makes life very
* noisy when doing an i2cdetect
*/
if (timeout == 0)
dev_dbg(i2c->dev, "timeout\n");
else if (ret != num)
dev_dbg(i2c->dev, "incomplete xfer (%d)\n", ret);
/* ensure the stop has been through the bus */
dev_dbg(i2c->dev, "waiting for bus idle\n");
/* first, try busy waiting briefly */
do {
iicstat = readl(i2c->regs + CSR);
} while ((iicstat & I2CBUSY) && --spins);
/* if that timed out sleep */
if (!spins) {
msleep(1);
iicstat = readl(i2c->regs + CSR);
}
if (iicstat & I2CBUSY)
dev_warn(i2c->dev, "timeout waiting for bus idle\n");
out:
return ret;
}
/* nuc900_i2c_xfer
*
* first port of call from the i2c bus code when an message needs
* transferring across the i2c bus.
*/
static int nuc900_i2c_xfer(struct i2c_adapter *adap,
struct i2c_msg *msgs, int num)
{
struct nuc900_i2c *i2c = (struct nuc900_i2c *)adap->algo_data;
int retry;
int ret;
nuc900_i2c_enable_irq(i2c);
for (retry = 0; retry < adap->retries; retry++) {
ret = nuc900_i2c_doxfer(i2c, msgs, num);
if (ret != -EAGAIN)
return ret;
dev_dbg(i2c->dev, "Retrying transmission (%d)\n", retry);
udelay(100);
}
return -EREMOTEIO;
}
/* declare our i2c functionality */
static u32 nuc900_i2c_func(struct i2c_adapter *adap)
{
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_PROTOCOL_MANGLING;
}
/* i2c bus registration info */
static const struct i2c_algorithm nuc900_i2c_algorithm = {
.master_xfer = nuc900_i2c_xfer,
.functionality = nuc900_i2c_func,
};
/* nuc900_i2c_probe
*
* called by the bus driver when a suitable device is found
*/
static int __devinit nuc900_i2c_probe(struct platform_device *pdev)
{
struct nuc900_i2c *i2c;
struct nuc900_platform_i2c *pdata;
struct resource *res;
int ret;
pdata = pdev->dev.platform_data;
if (!pdata) {
dev_err(&pdev->dev, "no platform data\n");
return -EINVAL;
}
i2c = kzalloc(sizeof(struct nuc900_i2c), GFP_KERNEL);
if (!i2c) {
dev_err(&pdev->dev, "no memory for state\n");
return -ENOMEM;
}
strlcpy(i2c->adap.name, "nuc900-i2c0", sizeof(i2c->adap.name));
i2c->adap.owner = THIS_MODULE;
i2c->adap.algo = &nuc900_i2c_algorithm;
i2c->adap.retries = 2;
i2c->adap.class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
spin_lock_init(&i2c->lock);
init_waitqueue_head(&i2c->wait);
/* find the clock and enable it */
i2c->dev = &pdev->dev;
i2c->clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(i2c->clk)) {
dev_err(&pdev->dev, "cannot get clock\n");
ret = -ENOENT;
goto err_noclk;
}
dev_dbg(&pdev->dev, "clock source %p\n", i2c->clk);
clk_enable(i2c->clk);
/* map the registers */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (res == NULL) {
dev_err(&pdev->dev, "cannot find IO resource\n");
ret = -ENOENT;
goto err_clk;
}
i2c->ioarea = request_mem_region(res->start, resource_size(res),
pdev->name);
if (i2c->ioarea == NULL) {
dev_err(&pdev->dev, "cannot request IO\n");
ret = -ENXIO;
goto err_clk;
}
i2c->regs = ioremap(res->start, resource_size(res));
if (i2c->regs == NULL) {
dev_err(&pdev->dev, "cannot map IO\n");
ret = -ENXIO;
goto err_ioarea;
}
dev_dbg(&pdev->dev, "registers %p (%p, %p)\n",
i2c->regs, i2c->ioarea, res);
/* setup info block for the i2c core */
i2c->adap.algo_data = i2c;
i2c->adap.dev.parent = &pdev->dev;
mfp_set_groupg(&pdev->dev, NULL);
clk_get_rate(i2c->clk);
ret = (i2c->clk.apbfreq)/(pdata->bus_freq * 5) - 1;
writel(ret & 0xffff, i2c->regs + DIVIDER);
/* find the IRQ for this unit (note, this relies on the init call to
* ensure no current IRQs pending
*/
i2c->irq = ret = platform_get_irq(pdev, 0);
if (ret <= 0) {
dev_err(&pdev->dev, "cannot find IRQ\n");
goto err_iomap;
}
ret = request_irq(i2c->irq, nuc900_i2c_irq, IRQF_SHARED,
dev_name(&pdev->dev), i2c);
if (ret != 0) {
dev_err(&pdev->dev, "cannot claim IRQ %d\n", i2c->irq);
goto err_iomap;
}
/* Note, previous versions of the driver used i2c_add_adapter()
* to add the bus at any number. We now pass the bus number via
* the platform data, so if unset it will now default to always
* being bus 0.
*/
i2c->adap.nr = pdata->bus_num;
ret = i2c_add_numbered_adapter(&i2c->adap);
if (ret < 0) {
dev_err(&pdev->dev, "failed to add bus to i2c core\n");
goto err_irq;
}
platform_set_drvdata(pdev, i2c);
dev_info(&pdev->dev, "%s: NUC900 I2C adapter\n",
dev_name(&i2c->adap.dev));
return 0;
err_irq:
free_irq(i2c->irq, i2c);
err_iomap:
iounmap(i2c->regs);
err_ioarea:
release_resource(i2c->ioarea);
kfree(i2c->ioarea);
err_clk:
clk_disable(i2c->clk);
clk_put(i2c->clk);
err_noclk:
kfree(i2c);
return ret;
}
/* nuc900_i2c_remove
*
* called when device is removed from the bus
*/
static int __devexit nuc900_i2c_remove(struct platform_device *pdev)
{
struct nuc900_i2c *i2c = platform_get_drvdata(pdev);
i2c_del_adapter(&i2c->adap);
free_irq(i2c->irq, i2c);
clk_disable(i2c->clk);
clk_put(i2c->clk);
iounmap(i2c->regs);
release_resource(i2c->ioarea);
kfree(i2c->ioarea);
kfree(i2c);
return 0;
}
static struct platform_driver nuc900_i2c_driver = {
.probe = nuc900_i2c_probe,
.remove = __devexit_p(nuc900_i2c_remove),
.driver = {
.owner = THIS_MODULE,
.name = "nuc900-i2c0",
},
};
static int __init i2c_adap_nuc900_init(void)
{
return platform_driver_register(&nuc900_i2c_driver);
}
static void __exit i2c_adap_nuc900_exit(void)
{
platform_driver_unregister(&nuc900_i2c_driver);
}
subsys_initcall(i2c_adap_nuc900_init);
module_exit(i2c_adap_nuc900_exit);
MODULE_DESCRIPTION("NUC900 I2C Bus driver");
MODULE_AUTHOR("Wan ZongShun, <mcuos.com-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:nuc900-i2c0");
| gpl-2.0 |
paul-chambers/netgear-r7800 | git_home/linux.git/sourcecode/drivers/media/video/cx88/cx88-dvb.c | 5031 | 48367 | /*
*
* device driver for Conexant 2388x based TV cards
* MPEG Transport Stream (DVB) routines
*
* (c) 2004, 2005 Chris Pascoe <c.pascoe@itee.uq.edu.au>
* (c) 2004 Gerd Knorr <kraxel@bytesex.org> [SuSE Labs]
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/device.h>
#include <linux/fs.h>
#include <linux/kthread.h>
#include <linux/file.h>
#include <linux/suspend.h>
#include "cx88.h"
#include "dvb-pll.h"
#include <media/v4l2-common.h>
#include "mt352.h"
#include "mt352_priv.h"
#include "cx88-vp3054-i2c.h"
#include "zl10353.h"
#include "cx22702.h"
#include "or51132.h"
#include "lgdt330x.h"
#include "s5h1409.h"
#include "xc4000.h"
#include "xc5000.h"
#include "nxt200x.h"
#include "cx24123.h"
#include "isl6421.h"
#include "tuner-simple.h"
#include "tda9887.h"
#include "s5h1411.h"
#include "stv0299.h"
#include "z0194a.h"
#include "stv0288.h"
#include "stb6000.h"
#include "cx24116.h"
#include "stv0900.h"
#include "stb6100.h"
#include "stb6100_proc.h"
#include "mb86a16.h"
#include "ds3000.h"
MODULE_DESCRIPTION("driver for cx2388x based DVB cards");
MODULE_AUTHOR("Chris Pascoe <c.pascoe@itee.uq.edu.au>");
MODULE_AUTHOR("Gerd Knorr <kraxel@bytesex.org> [SuSE Labs]");
MODULE_LICENSE("GPL");
MODULE_VERSION(CX88_VERSION);
static unsigned int debug;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug,"enable debug messages [dvb]");
static unsigned int dvb_buf_tscnt = 32;
module_param(dvb_buf_tscnt, int, 0644);
MODULE_PARM_DESC(dvb_buf_tscnt, "DVB Buffer TS count [dvb]");
DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
#define dprintk(level,fmt, arg...) if (debug >= level) \
printk(KERN_DEBUG "%s/2-dvb: " fmt, core->name, ## arg)
/* ------------------------------------------------------------------ */
static int dvb_buf_setup(struct videobuf_queue *q,
unsigned int *count, unsigned int *size)
{
struct cx8802_dev *dev = q->priv_data;
dev->ts_packet_size = 188 * 4;
dev->ts_packet_count = dvb_buf_tscnt;
*size = dev->ts_packet_size * dev->ts_packet_count;
*count = dvb_buf_tscnt;
return 0;
}
static int dvb_buf_prepare(struct videobuf_queue *q,
struct videobuf_buffer *vb, enum v4l2_field field)
{
struct cx8802_dev *dev = q->priv_data;
return cx8802_buf_prepare(q, dev, (struct cx88_buffer*)vb,field);
}
static void dvb_buf_queue(struct videobuf_queue *q, struct videobuf_buffer *vb)
{
struct cx8802_dev *dev = q->priv_data;
cx8802_buf_queue(dev, (struct cx88_buffer*)vb);
}
static void dvb_buf_release(struct videobuf_queue *q,
struct videobuf_buffer *vb)
{
cx88_free_buffer(q, (struct cx88_buffer*)vb);
}
static const struct videobuf_queue_ops dvb_qops = {
.buf_setup = dvb_buf_setup,
.buf_prepare = dvb_buf_prepare,
.buf_queue = dvb_buf_queue,
.buf_release = dvb_buf_release,
};
/* ------------------------------------------------------------------ */
static int cx88_dvb_bus_ctrl(struct dvb_frontend* fe, int acquire)
{
struct cx8802_dev *dev= fe->dvb->priv;
struct cx8802_driver *drv = NULL;
int ret = 0;
int fe_id;
fe_id = videobuf_dvb_find_frontend(&dev->frontends, fe);
if (!fe_id) {
printk(KERN_ERR "%s() No frontend found\n", __func__);
return -EINVAL;
}
mutex_lock(&dev->core->lock);
drv = cx8802_get_driver(dev, CX88_MPEG_DVB);
if (drv) {
if (acquire){
dev->frontends.active_fe_id = fe_id;
ret = drv->request_acquire(drv);
} else {
ret = drv->request_release(drv);
dev->frontends.active_fe_id = 0;
}
}
mutex_unlock(&dev->core->lock);
return ret;
}
static void cx88_dvb_gate_ctrl(struct cx88_core *core, int open)
{
struct videobuf_dvb_frontends *f;
struct videobuf_dvb_frontend *fe;
if (!core->dvbdev)
return;
f = &core->dvbdev->frontends;
if (!f)
return;
if (f->gate <= 1) /* undefined or fe0 */
fe = videobuf_dvb_get_frontend(f, 1);
else
fe = videobuf_dvb_get_frontend(f, f->gate);
if (fe && fe->dvb.frontend && fe->dvb.frontend->ops.i2c_gate_ctrl)
fe->dvb.frontend->ops.i2c_gate_ctrl(fe->dvb.frontend, open);
}
/* ------------------------------------------------------------------ */
static int dvico_fusionhdtv_demod_init(struct dvb_frontend* fe)
{
static const u8 clock_config [] = { CLOCK_CTL, 0x38, 0x39 };
static const u8 reset [] = { RESET, 0x80 };
static const u8 adc_ctl_1_cfg [] = { ADC_CTL_1, 0x40 };
static const u8 agc_cfg [] = { AGC_TARGET, 0x24, 0x20 };
static const u8 gpp_ctl_cfg [] = { GPP_CTL, 0x33 };
static const u8 capt_range_cfg[] = { CAPT_RANGE, 0x32 };
mt352_write(fe, clock_config, sizeof(clock_config));
udelay(200);
mt352_write(fe, reset, sizeof(reset));
mt352_write(fe, adc_ctl_1_cfg, sizeof(adc_ctl_1_cfg));
mt352_write(fe, agc_cfg, sizeof(agc_cfg));
mt352_write(fe, gpp_ctl_cfg, sizeof(gpp_ctl_cfg));
mt352_write(fe, capt_range_cfg, sizeof(capt_range_cfg));
return 0;
}
static int dvico_dual_demod_init(struct dvb_frontend *fe)
{
static const u8 clock_config [] = { CLOCK_CTL, 0x38, 0x38 };
static const u8 reset [] = { RESET, 0x80 };
static const u8 adc_ctl_1_cfg [] = { ADC_CTL_1, 0x40 };
static const u8 agc_cfg [] = { AGC_TARGET, 0x28, 0x20 };
static const u8 gpp_ctl_cfg [] = { GPP_CTL, 0x33 };
static const u8 capt_range_cfg[] = { CAPT_RANGE, 0x32 };
mt352_write(fe, clock_config, sizeof(clock_config));
udelay(200);
mt352_write(fe, reset, sizeof(reset));
mt352_write(fe, adc_ctl_1_cfg, sizeof(adc_ctl_1_cfg));
mt352_write(fe, agc_cfg, sizeof(agc_cfg));
mt352_write(fe, gpp_ctl_cfg, sizeof(gpp_ctl_cfg));
mt352_write(fe, capt_range_cfg, sizeof(capt_range_cfg));
return 0;
}
static int dntv_live_dvbt_demod_init(struct dvb_frontend* fe)
{
static const u8 clock_config [] = { 0x89, 0x38, 0x39 };
static const u8 reset [] = { 0x50, 0x80 };
static const u8 adc_ctl_1_cfg [] = { 0x8E, 0x40 };
static const u8 agc_cfg [] = { 0x67, 0x10, 0x23, 0x00, 0xFF, 0xFF,
0x00, 0xFF, 0x00, 0x40, 0x40 };
static const u8 dntv_extra[] = { 0xB5, 0x7A };
static const u8 capt_range_cfg[] = { 0x75, 0x32 };
mt352_write(fe, clock_config, sizeof(clock_config));
udelay(2000);
mt352_write(fe, reset, sizeof(reset));
mt352_write(fe, adc_ctl_1_cfg, sizeof(adc_ctl_1_cfg));
mt352_write(fe, agc_cfg, sizeof(agc_cfg));
udelay(2000);
mt352_write(fe, dntv_extra, sizeof(dntv_extra));
mt352_write(fe, capt_range_cfg, sizeof(capt_range_cfg));
return 0;
}
static const struct mt352_config dvico_fusionhdtv = {
.demod_address = 0x0f,
.demod_init = dvico_fusionhdtv_demod_init,
};
static const struct mt352_config dntv_live_dvbt_config = {
.demod_address = 0x0f,
.demod_init = dntv_live_dvbt_demod_init,
};
static const struct mt352_config dvico_fusionhdtv_dual = {
.demod_address = 0x0f,
.demod_init = dvico_dual_demod_init,
};
static const struct zl10353_config cx88_terratec_cinergy_ht_pci_mkii_config = {
.demod_address = (0x1e >> 1),
.no_tuner = 1,
.if2 = 45600,
};
static struct mb86a16_config twinhan_vp1027 = {
.demod_address = 0x08,
};
#if defined(CONFIG_VIDEO_CX88_VP3054) || (defined(CONFIG_VIDEO_CX88_VP3054_MODULE) && defined(MODULE))
static int dntv_live_dvbt_pro_demod_init(struct dvb_frontend* fe)
{
static const u8 clock_config [] = { 0x89, 0x38, 0x38 };
static const u8 reset [] = { 0x50, 0x80 };
static const u8 adc_ctl_1_cfg [] = { 0x8E, 0x40 };
static const u8 agc_cfg [] = { 0x67, 0x10, 0x20, 0x00, 0xFF, 0xFF,
0x00, 0xFF, 0x00, 0x40, 0x40 };
static const u8 dntv_extra[] = { 0xB5, 0x7A };
static const u8 capt_range_cfg[] = { 0x75, 0x32 };
mt352_write(fe, clock_config, sizeof(clock_config));
udelay(2000);
mt352_write(fe, reset, sizeof(reset));
mt352_write(fe, adc_ctl_1_cfg, sizeof(adc_ctl_1_cfg));
mt352_write(fe, agc_cfg, sizeof(agc_cfg));
udelay(2000);
mt352_write(fe, dntv_extra, sizeof(dntv_extra));
mt352_write(fe, capt_range_cfg, sizeof(capt_range_cfg));
return 0;
}
static const struct mt352_config dntv_live_dvbt_pro_config = {
.demod_address = 0x0f,
.no_tuner = 1,
.demod_init = dntv_live_dvbt_pro_demod_init,
};
#endif
static const struct zl10353_config dvico_fusionhdtv_hybrid = {
.demod_address = 0x0f,
.no_tuner = 1,
};
static const struct zl10353_config dvico_fusionhdtv_xc3028 = {
.demod_address = 0x0f,
.if2 = 45600,
.no_tuner = 1,
};
static const struct mt352_config dvico_fusionhdtv_mt352_xc3028 = {
.demod_address = 0x0f,
.if2 = 4560,
.no_tuner = 1,
.demod_init = dvico_fusionhdtv_demod_init,
};
static const struct zl10353_config dvico_fusionhdtv_plus_v1_1 = {
.demod_address = 0x0f,
};
static const struct cx22702_config connexant_refboard_config = {
.demod_address = 0x43,
.output_mode = CX22702_SERIAL_OUTPUT,
};
static const struct cx22702_config hauppauge_hvr_config = {
.demod_address = 0x63,
.output_mode = CX22702_SERIAL_OUTPUT,
};
static int or51132_set_ts_param(struct dvb_frontend* fe, int is_punctured)
{
struct cx8802_dev *dev= fe->dvb->priv;
dev->ts_gen_cntrl = is_punctured ? 0x04 : 0x00;
return 0;
}
static const struct or51132_config pchdtv_hd3000 = {
.demod_address = 0x15,
.set_ts_params = or51132_set_ts_param,
};
static int lgdt330x_pll_rf_set(struct dvb_frontend* fe, int index)
{
struct cx8802_dev *dev= fe->dvb->priv;
struct cx88_core *core = dev->core;
dprintk(1, "%s: index = %d\n", __func__, index);
if (index == 0)
cx_clear(MO_GP0_IO, 8);
else
cx_set(MO_GP0_IO, 8);
return 0;
}
static int lgdt330x_set_ts_param(struct dvb_frontend* fe, int is_punctured)
{
struct cx8802_dev *dev= fe->dvb->priv;
if (is_punctured)
dev->ts_gen_cntrl |= 0x04;
else
dev->ts_gen_cntrl &= ~0x04;
return 0;
}
static struct lgdt330x_config fusionhdtv_3_gold = {
.demod_address = 0x0e,
.demod_chip = LGDT3302,
.serial_mpeg = 0x04, /* TPSERIAL for 3302 in TOP_CONTROL */
.set_ts_params = lgdt330x_set_ts_param,
};
static const struct lgdt330x_config fusionhdtv_5_gold = {
.demod_address = 0x0e,
.demod_chip = LGDT3303,
.serial_mpeg = 0x40, /* TPSERIAL for 3303 in TOP_CONTROL */
.set_ts_params = lgdt330x_set_ts_param,
};
static const struct lgdt330x_config pchdtv_hd5500 = {
.demod_address = 0x59,
.demod_chip = LGDT3303,
.serial_mpeg = 0x40, /* TPSERIAL for 3303 in TOP_CONTROL */
.set_ts_params = lgdt330x_set_ts_param,
};
static int nxt200x_set_ts_param(struct dvb_frontend* fe, int is_punctured)
{
struct cx8802_dev *dev= fe->dvb->priv;
dev->ts_gen_cntrl = is_punctured ? 0x04 : 0x00;
return 0;
}
static const struct nxt200x_config ati_hdtvwonder = {
.demod_address = 0x0a,
.set_ts_params = nxt200x_set_ts_param,
};
static int cx24123_set_ts_param(struct dvb_frontend* fe,
int is_punctured)
{
struct cx8802_dev *dev= fe->dvb->priv;
dev->ts_gen_cntrl = 0x02;
return 0;
}
static int kworld_dvbs_100_set_voltage(struct dvb_frontend* fe,
fe_sec_voltage_t voltage)
{
struct cx8802_dev *dev= fe->dvb->priv;
struct cx88_core *core = dev->core;
if (voltage == SEC_VOLTAGE_OFF)
cx_write(MO_GP0_IO, 0x000006fb);
else
cx_write(MO_GP0_IO, 0x000006f9);
if (core->prev_set_voltage)
return core->prev_set_voltage(fe, voltage);
return 0;
}
static int geniatech_dvbs_set_voltage(struct dvb_frontend *fe,
fe_sec_voltage_t voltage)
{
struct cx8802_dev *dev= fe->dvb->priv;
struct cx88_core *core = dev->core;
if (voltage == SEC_VOLTAGE_OFF) {
dprintk(1,"LNB Voltage OFF\n");
cx_write(MO_GP0_IO, 0x0000efff);
}
if (core->prev_set_voltage)
return core->prev_set_voltage(fe, voltage);
return 0;
}
static int tevii_dvbs_set_voltage(struct dvb_frontend *fe,
fe_sec_voltage_t voltage)
{
struct cx8802_dev *dev= fe->dvb->priv;
struct cx88_core *core = dev->core;
cx_set(MO_GP0_IO, 0x6040);
switch (voltage) {
case SEC_VOLTAGE_13:
cx_clear(MO_GP0_IO, 0x20);
break;
case SEC_VOLTAGE_18:
cx_set(MO_GP0_IO, 0x20);
break;
case SEC_VOLTAGE_OFF:
cx_clear(MO_GP0_IO, 0x20);
break;
}
if (core->prev_set_voltage)
return core->prev_set_voltage(fe, voltage);
return 0;
}
static int vp1027_set_voltage(struct dvb_frontend *fe,
fe_sec_voltage_t voltage)
{
struct cx8802_dev *dev = fe->dvb->priv;
struct cx88_core *core = dev->core;
switch (voltage) {
case SEC_VOLTAGE_13:
dprintk(1, "LNB SEC Voltage=13\n");
cx_write(MO_GP0_IO, 0x00001220);
break;
case SEC_VOLTAGE_18:
dprintk(1, "LNB SEC Voltage=18\n");
cx_write(MO_GP0_IO, 0x00001222);
break;
case SEC_VOLTAGE_OFF:
dprintk(1, "LNB Voltage OFF\n");
cx_write(MO_GP0_IO, 0x00001230);
break;
}
if (core->prev_set_voltage)
return core->prev_set_voltage(fe, voltage);
return 0;
}
static const struct cx24123_config geniatech_dvbs_config = {
.demod_address = 0x55,
.set_ts_params = cx24123_set_ts_param,
};
static const struct cx24123_config hauppauge_novas_config = {
.demod_address = 0x55,
.set_ts_params = cx24123_set_ts_param,
};
static const struct cx24123_config kworld_dvbs_100_config = {
.demod_address = 0x15,
.set_ts_params = cx24123_set_ts_param,
.lnb_polarity = 1,
};
static const struct s5h1409_config pinnacle_pctv_hd_800i_config = {
.demod_address = 0x32 >> 1,
.output_mode = S5H1409_PARALLEL_OUTPUT,
.gpio = S5H1409_GPIO_ON,
.qam_if = 44000,
.inversion = S5H1409_INVERSION_OFF,
.status_mode = S5H1409_DEMODLOCKING,
.mpeg_timing = S5H1409_MPEGTIMING_NONCONTINOUS_NONINVERTING_CLOCK,
};
static const struct s5h1409_config dvico_hdtv5_pci_nano_config = {
.demod_address = 0x32 >> 1,
.output_mode = S5H1409_SERIAL_OUTPUT,
.gpio = S5H1409_GPIO_OFF,
.inversion = S5H1409_INVERSION_OFF,
.status_mode = S5H1409_DEMODLOCKING,
.mpeg_timing = S5H1409_MPEGTIMING_CONTINOUS_NONINVERTING_CLOCK,
};
static const struct s5h1409_config kworld_atsc_120_config = {
.demod_address = 0x32 >> 1,
.output_mode = S5H1409_SERIAL_OUTPUT,
.gpio = S5H1409_GPIO_OFF,
.inversion = S5H1409_INVERSION_OFF,
.status_mode = S5H1409_DEMODLOCKING,
.mpeg_timing = S5H1409_MPEGTIMING_CONTINOUS_NONINVERTING_CLOCK,
};
static const struct xc5000_config pinnacle_pctv_hd_800i_tuner_config = {
.i2c_address = 0x64,
.if_khz = 5380,
};
static const struct zl10353_config cx88_pinnacle_hybrid_pctv = {
.demod_address = (0x1e >> 1),
.no_tuner = 1,
.if2 = 45600,
};
static const struct zl10353_config cx88_geniatech_x8000_mt = {
.demod_address = (0x1e >> 1),
.no_tuner = 1,
.disable_i2c_gate_ctrl = 1,
};
static const struct s5h1411_config dvico_fusionhdtv7_config = {
.output_mode = S5H1411_SERIAL_OUTPUT,
.gpio = S5H1411_GPIO_ON,
.mpeg_timing = S5H1411_MPEGTIMING_CONTINOUS_NONINVERTING_CLOCK,
.qam_if = S5H1411_IF_44000,
.vsb_if = S5H1411_IF_44000,
.inversion = S5H1411_INVERSION_OFF,
.status_mode = S5H1411_DEMODLOCKING
};
static const struct xc5000_config dvico_fusionhdtv7_tuner_config = {
.i2c_address = 0xc2 >> 1,
.if_khz = 5380,
};
static int attach_xc3028(u8 addr, struct cx8802_dev *dev)
{
struct dvb_frontend *fe;
struct videobuf_dvb_frontend *fe0 = NULL;
struct xc2028_ctrl ctl;
struct xc2028_config cfg = {
.i2c_adap = &dev->core->i2c_adap,
.i2c_addr = addr,
.ctrl = &ctl,
};
/* Get the first frontend */
fe0 = videobuf_dvb_get_frontend(&dev->frontends, 1);
if (!fe0)
return -EINVAL;
if (!fe0->dvb.frontend) {
printk(KERN_ERR "%s/2: dvb frontend not attached. "
"Can't attach xc3028\n",
dev->core->name);
return -EINVAL;
}
/*
* Some xc3028 devices may be hidden by an I2C gate. This is known
* to happen with some s5h1409-based devices.
* Now that I2C gate is open, sets up xc3028 configuration
*/
cx88_setup_xc3028(dev->core, &ctl);
fe = dvb_attach(xc2028_attach, fe0->dvb.frontend, &cfg);
if (!fe) {
printk(KERN_ERR "%s/2: xc3028 attach failed\n",
dev->core->name);
dvb_frontend_detach(fe0->dvb.frontend);
dvb_unregister_frontend(fe0->dvb.frontend);
fe0->dvb.frontend = NULL;
return -EINVAL;
}
printk(KERN_INFO "%s/2: xc3028 attached\n",
dev->core->name);
return 0;
}
static int attach_xc4000(struct cx8802_dev *dev, struct xc4000_config *cfg)
{
struct dvb_frontend *fe;
struct videobuf_dvb_frontend *fe0 = NULL;
/* Get the first frontend */
fe0 = videobuf_dvb_get_frontend(&dev->frontends, 1);
if (!fe0)
return -EINVAL;
if (!fe0->dvb.frontend) {
printk(KERN_ERR "%s/2: dvb frontend not attached. "
"Can't attach xc4000\n",
dev->core->name);
return -EINVAL;
}
fe = dvb_attach(xc4000_attach, fe0->dvb.frontend, &dev->core->i2c_adap,
cfg);
if (!fe) {
printk(KERN_ERR "%s/2: xc4000 attach failed\n",
dev->core->name);
dvb_frontend_detach(fe0->dvb.frontend);
dvb_unregister_frontend(fe0->dvb.frontend);
fe0->dvb.frontend = NULL;
return -EINVAL;
}
printk(KERN_INFO "%s/2: xc4000 attached\n", dev->core->name);
return 0;
}
static int cx24116_set_ts_param(struct dvb_frontend *fe,
int is_punctured)
{
struct cx8802_dev *dev = fe->dvb->priv;
dev->ts_gen_cntrl = 0x2;
return 0;
}
static int stv0900_set_ts_param(struct dvb_frontend *fe,
int is_punctured)
{
struct cx8802_dev *dev = fe->dvb->priv;
dev->ts_gen_cntrl = 0;
return 0;
}
static int cx24116_reset_device(struct dvb_frontend *fe)
{
struct cx8802_dev *dev = fe->dvb->priv;
struct cx88_core *core = dev->core;
/* Reset the part */
/* Put the cx24116 into reset */
cx_write(MO_SRST_IO, 0);
msleep(10);
/* Take the cx24116 out of reset */
cx_write(MO_SRST_IO, 1);
msleep(10);
return 0;
}
static const struct cx24116_config hauppauge_hvr4000_config = {
.demod_address = 0x05,
.set_ts_params = cx24116_set_ts_param,
.reset_device = cx24116_reset_device,
};
static const struct cx24116_config tevii_s460_config = {
.demod_address = 0x55,
.set_ts_params = cx24116_set_ts_param,
.reset_device = cx24116_reset_device,
};
static int ds3000_set_ts_param(struct dvb_frontend *fe,
int is_punctured)
{
struct cx8802_dev *dev = fe->dvb->priv;
dev->ts_gen_cntrl = 4;
return 0;
}
static struct ds3000_config tevii_ds3000_config = {
.demod_address = 0x68,
.set_ts_params = ds3000_set_ts_param,
};
static const struct stv0900_config prof_7301_stv0900_config = {
.demod_address = 0x6a,
/* demod_mode = 0,*/
.xtal = 27000000,
.clkmode = 3,/* 0-CLKI, 2-XTALI, else AUTO */
.diseqc_mode = 2,/* 2/3 PWM */
.tun1_maddress = 0,/* 0x60 */
.tun1_adc = 0,/* 2 Vpp */
.path1_mode = 3,
.set_ts_params = stv0900_set_ts_param,
};
static const struct stb6100_config prof_7301_stb6100_config = {
.tuner_address = 0x60,
.refclock = 27000000,
};
static const struct stv0299_config tevii_tuner_sharp_config = {
.demod_address = 0x68,
.inittab = sharp_z0194a_inittab,
.mclk = 88000000UL,
.invert = 1,
.skip_reinit = 0,
.lock_output = 1,
.volt13_op0_op1 = STV0299_VOLT13_OP1,
.min_delay_ms = 100,
.set_symbol_rate = sharp_z0194a_set_symbol_rate,
.set_ts_params = cx24116_set_ts_param,
};
static const struct stv0288_config tevii_tuner_earda_config = {
.demod_address = 0x68,
.min_delay_ms = 100,
.set_ts_params = cx24116_set_ts_param,
};
static int cx8802_alloc_frontends(struct cx8802_dev *dev)
{
struct cx88_core *core = dev->core;
struct videobuf_dvb_frontend *fe = NULL;
int i;
mutex_init(&dev->frontends.lock);
INIT_LIST_HEAD(&dev->frontends.felist);
if (!core->board.num_frontends)
return -ENODEV;
printk(KERN_INFO "%s() allocating %d frontend(s)\n", __func__,
core->board.num_frontends);
for (i = 1; i <= core->board.num_frontends; i++) {
fe = videobuf_dvb_alloc_frontend(&dev->frontends, i);
if (!fe) {
printk(KERN_ERR "%s() failed to alloc\n", __func__);
videobuf_dvb_dealloc_frontends(&dev->frontends);
return -ENOMEM;
}
}
return 0;
}
static const u8 samsung_smt_7020_inittab[] = {
0x01, 0x15,
0x02, 0x00,
0x03, 0x00,
0x04, 0x7D,
0x05, 0x0F,
0x06, 0x02,
0x07, 0x00,
0x08, 0x60,
0x0A, 0xC2,
0x0B, 0x00,
0x0C, 0x01,
0x0D, 0x81,
0x0E, 0x44,
0x0F, 0x09,
0x10, 0x3C,
0x11, 0x84,
0x12, 0xDA,
0x13, 0x99,
0x14, 0x8D,
0x15, 0xCE,
0x16, 0xE8,
0x17, 0x43,
0x18, 0x1C,
0x19, 0x1B,
0x1A, 0x1D,
0x1C, 0x12,
0x1D, 0x00,
0x1E, 0x00,
0x1F, 0x00,
0x20, 0x00,
0x21, 0x00,
0x22, 0x00,
0x23, 0x00,
0x28, 0x02,
0x29, 0x28,
0x2A, 0x14,
0x2B, 0x0F,
0x2C, 0x09,
0x2D, 0x05,
0x31, 0x1F,
0x32, 0x19,
0x33, 0xFC,
0x34, 0x13,
0xff, 0xff,
};
static int samsung_smt_7020_tuner_set_params(struct dvb_frontend *fe)
{
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
struct cx8802_dev *dev = fe->dvb->priv;
u8 buf[4];
u32 div;
struct i2c_msg msg = {
.addr = 0x61,
.flags = 0,
.buf = buf,
.len = sizeof(buf) };
div = c->frequency / 125;
buf[0] = (div >> 8) & 0x7f;
buf[1] = div & 0xff;
buf[2] = 0x84; /* 0xC4 */
buf[3] = 0x00;
if (c->frequency < 1500000)
buf[3] |= 0x10;
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
if (i2c_transfer(&dev->core->i2c_adap, &msg, 1) != 1)
return -EIO;
return 0;
}
static int samsung_smt_7020_set_tone(struct dvb_frontend *fe,
fe_sec_tone_mode_t tone)
{
struct cx8802_dev *dev = fe->dvb->priv;
struct cx88_core *core = dev->core;
cx_set(MO_GP0_IO, 0x0800);
switch (tone) {
case SEC_TONE_ON:
cx_set(MO_GP0_IO, 0x08);
break;
case SEC_TONE_OFF:
cx_clear(MO_GP0_IO, 0x08);
break;
default:
return -EINVAL;
}
return 0;
}
static int samsung_smt_7020_set_voltage(struct dvb_frontend *fe,
fe_sec_voltage_t voltage)
{
struct cx8802_dev *dev = fe->dvb->priv;
struct cx88_core *core = dev->core;
u8 data;
struct i2c_msg msg = {
.addr = 8,
.flags = 0,
.buf = &data,
.len = sizeof(data) };
cx_set(MO_GP0_IO, 0x8000);
switch (voltage) {
case SEC_VOLTAGE_OFF:
break;
case SEC_VOLTAGE_13:
data = ISL6421_EN1 | ISL6421_LLC1;
cx_clear(MO_GP0_IO, 0x80);
break;
case SEC_VOLTAGE_18:
data = ISL6421_EN1 | ISL6421_LLC1 | ISL6421_VSEL1;
cx_clear(MO_GP0_IO, 0x80);
break;
default:
return -EINVAL;
};
return (i2c_transfer(&dev->core->i2c_adap, &msg, 1) == 1) ? 0 : -EIO;
}
static int samsung_smt_7020_stv0299_set_symbol_rate(struct dvb_frontend *fe,
u32 srate, u32 ratio)
{
u8 aclk = 0;
u8 bclk = 0;
if (srate < 1500000) {
aclk = 0xb7;
bclk = 0x47;
} else if (srate < 3000000) {
aclk = 0xb7;
bclk = 0x4b;
} else if (srate < 7000000) {
aclk = 0xb7;
bclk = 0x4f;
} else if (srate < 14000000) {
aclk = 0xb7;
bclk = 0x53;
} else if (srate < 30000000) {
aclk = 0xb6;
bclk = 0x53;
} else if (srate < 45000000) {
aclk = 0xb4;
bclk = 0x51;
}
stv0299_writereg(fe, 0x13, aclk);
stv0299_writereg(fe, 0x14, bclk);
stv0299_writereg(fe, 0x1f, (ratio >> 16) & 0xff);
stv0299_writereg(fe, 0x20, (ratio >> 8) & 0xff);
stv0299_writereg(fe, 0x21, ratio & 0xf0);
return 0;
}
static const struct stv0299_config samsung_stv0299_config = {
.demod_address = 0x68,
.inittab = samsung_smt_7020_inittab,
.mclk = 88000000UL,
.invert = 0,
.skip_reinit = 0,
.lock_output = STV0299_LOCKOUTPUT_LK,
.volt13_op0_op1 = STV0299_VOLT13_OP1,
.min_delay_ms = 100,
.set_symbol_rate = samsung_smt_7020_stv0299_set_symbol_rate,
};
static int dvb_register(struct cx8802_dev *dev)
{
struct cx88_core *core = dev->core;
struct videobuf_dvb_frontend *fe0, *fe1 = NULL;
int mfe_shared = 0; /* bus not shared by default */
int res = -EINVAL;
if (0 != core->i2c_rc) {
printk(KERN_ERR "%s/2: no i2c-bus available, cannot attach dvb drivers\n", core->name);
goto frontend_detach;
}
/* Get the first frontend */
fe0 = videobuf_dvb_get_frontend(&dev->frontends, 1);
if (!fe0)
goto frontend_detach;
/* multi-frontend gate control is undefined or defaults to fe0 */
dev->frontends.gate = 0;
/* Sets the gate control callback to be used by i2c command calls */
core->gate_ctrl = cx88_dvb_gate_ctrl;
/* init frontend(s) */
switch (core->boardnr) {
case CX88_BOARD_HAUPPAUGE_DVB_T1:
fe0->dvb.frontend = dvb_attach(cx22702_attach,
&connexant_refboard_config,
&core->i2c_adap);
if (fe0->dvb.frontend != NULL) {
if (!dvb_attach(dvb_pll_attach, fe0->dvb.frontend,
0x61, &core->i2c_adap,
DVB_PLL_THOMSON_DTT759X))
goto frontend_detach;
}
break;
case CX88_BOARD_TERRATEC_CINERGY_1400_DVB_T1:
case CX88_BOARD_CONEXANT_DVB_T1:
case CX88_BOARD_KWORLD_DVB_T_CX22702:
case CX88_BOARD_WINFAST_DTV1000:
fe0->dvb.frontend = dvb_attach(cx22702_attach,
&connexant_refboard_config,
&core->i2c_adap);
if (fe0->dvb.frontend != NULL) {
if (!dvb_attach(dvb_pll_attach, fe0->dvb.frontend,
0x60, &core->i2c_adap,
DVB_PLL_THOMSON_DTT7579))
goto frontend_detach;
}
break;
case CX88_BOARD_WINFAST_DTV2000H:
case CX88_BOARD_HAUPPAUGE_HVR1100:
case CX88_BOARD_HAUPPAUGE_HVR1100LP:
case CX88_BOARD_HAUPPAUGE_HVR1300:
fe0->dvb.frontend = dvb_attach(cx22702_attach,
&hauppauge_hvr_config,
&core->i2c_adap);
if (fe0->dvb.frontend != NULL) {
if (!dvb_attach(simple_tuner_attach, fe0->dvb.frontend,
&core->i2c_adap, 0x61,
TUNER_PHILIPS_FMD1216ME_MK3))
goto frontend_detach;
}
break;
case CX88_BOARD_WINFAST_DTV2000H_J:
fe0->dvb.frontend = dvb_attach(cx22702_attach,
&hauppauge_hvr_config,
&core->i2c_adap);
if (fe0->dvb.frontend != NULL) {
if (!dvb_attach(simple_tuner_attach, fe0->dvb.frontend,
&core->i2c_adap, 0x61,
TUNER_PHILIPS_FMD1216MEX_MK3))
goto frontend_detach;
}
break;
case CX88_BOARD_HAUPPAUGE_HVR3000:
/* MFE frontend 1 */
mfe_shared = 1;
dev->frontends.gate = 2;
/* DVB-S init */
fe0->dvb.frontend = dvb_attach(cx24123_attach,
&hauppauge_novas_config,
&dev->core->i2c_adap);
if (fe0->dvb.frontend) {
if (!dvb_attach(isl6421_attach,
fe0->dvb.frontend,
&dev->core->i2c_adap,
0x08, ISL6421_DCL, 0x00))
goto frontend_detach;
}
/* MFE frontend 2 */
fe1 = videobuf_dvb_get_frontend(&dev->frontends, 2);
if (!fe1)
goto frontend_detach;
/* DVB-T init */
fe1->dvb.frontend = dvb_attach(cx22702_attach,
&hauppauge_hvr_config,
&dev->core->i2c_adap);
if (fe1->dvb.frontend) {
fe1->dvb.frontend->id = 1;
if (!dvb_attach(simple_tuner_attach,
fe1->dvb.frontend,
&dev->core->i2c_adap,
0x61, TUNER_PHILIPS_FMD1216ME_MK3))
goto frontend_detach;
}
break;
case CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_PLUS:
fe0->dvb.frontend = dvb_attach(mt352_attach,
&dvico_fusionhdtv,
&core->i2c_adap);
if (fe0->dvb.frontend != NULL) {
if (!dvb_attach(dvb_pll_attach, fe0->dvb.frontend,
0x60, NULL, DVB_PLL_THOMSON_DTT7579))
goto frontend_detach;
break;
}
/* ZL10353 replaces MT352 on later cards */
fe0->dvb.frontend = dvb_attach(zl10353_attach,
&dvico_fusionhdtv_plus_v1_1,
&core->i2c_adap);
if (fe0->dvb.frontend != NULL) {
if (!dvb_attach(dvb_pll_attach, fe0->dvb.frontend,
0x60, NULL, DVB_PLL_THOMSON_DTT7579))
goto frontend_detach;
}
break;
case CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_DUAL:
/* The tin box says DEE1601, but it seems to be DTT7579
* compatible, with a slightly different MT352 AGC gain. */
fe0->dvb.frontend = dvb_attach(mt352_attach,
&dvico_fusionhdtv_dual,
&core->i2c_adap);
if (fe0->dvb.frontend != NULL) {
if (!dvb_attach(dvb_pll_attach, fe0->dvb.frontend,
0x61, NULL, DVB_PLL_THOMSON_DTT7579))
goto frontend_detach;
break;
}
/* ZL10353 replaces MT352 on later cards */
fe0->dvb.frontend = dvb_attach(zl10353_attach,
&dvico_fusionhdtv_plus_v1_1,
&core->i2c_adap);
if (fe0->dvb.frontend != NULL) {
if (!dvb_attach(dvb_pll_attach, fe0->dvb.frontend,
0x61, NULL, DVB_PLL_THOMSON_DTT7579))
goto frontend_detach;
}
break;
case CX88_BOARD_DVICO_FUSIONHDTV_DVB_T1:
fe0->dvb.frontend = dvb_attach(mt352_attach,
&dvico_fusionhdtv,
&core->i2c_adap);
if (fe0->dvb.frontend != NULL) {
if (!dvb_attach(dvb_pll_attach, fe0->dvb.frontend,
0x61, NULL, DVB_PLL_LG_Z201))
goto frontend_detach;
}
break;
case CX88_BOARD_KWORLD_DVB_T:
case CX88_BOARD_DNTV_LIVE_DVB_T:
case CX88_BOARD_ADSTECH_DVB_T_PCI:
fe0->dvb.frontend = dvb_attach(mt352_attach,
&dntv_live_dvbt_config,
&core->i2c_adap);
if (fe0->dvb.frontend != NULL) {
if (!dvb_attach(dvb_pll_attach, fe0->dvb.frontend,
0x61, NULL, DVB_PLL_UNKNOWN_1))
goto frontend_detach;
}
break;
case CX88_BOARD_DNTV_LIVE_DVB_T_PRO:
#if defined(CONFIG_VIDEO_CX88_VP3054) || (defined(CONFIG_VIDEO_CX88_VP3054_MODULE) && defined(MODULE))
/* MT352 is on a secondary I2C bus made from some GPIO lines */
fe0->dvb.frontend = dvb_attach(mt352_attach, &dntv_live_dvbt_pro_config,
&dev->vp3054->adap);
if (fe0->dvb.frontend != NULL) {
if (!dvb_attach(simple_tuner_attach, fe0->dvb.frontend,
&core->i2c_adap, 0x61,
TUNER_PHILIPS_FMD1216ME_MK3))
goto frontend_detach;
}
#else
printk(KERN_ERR "%s/2: built without vp3054 support\n",
core->name);
#endif
break;
case CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_HYBRID:
fe0->dvb.frontend = dvb_attach(zl10353_attach,
&dvico_fusionhdtv_hybrid,
&core->i2c_adap);
if (fe0->dvb.frontend != NULL) {
if (!dvb_attach(simple_tuner_attach, fe0->dvb.frontend,
&core->i2c_adap, 0x61,
TUNER_THOMSON_FE6600))
goto frontend_detach;
}
break;
case CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_PRO:
fe0->dvb.frontend = dvb_attach(zl10353_attach,
&dvico_fusionhdtv_xc3028,
&core->i2c_adap);
if (fe0->dvb.frontend == NULL)
fe0->dvb.frontend = dvb_attach(mt352_attach,
&dvico_fusionhdtv_mt352_xc3028,
&core->i2c_adap);
/*
* On this board, the demod provides the I2C bus pullup.
* We must not permit gate_ctrl to be performed, or
* the xc3028 cannot communicate on the bus.
*/
if (fe0->dvb.frontend)
fe0->dvb.frontend->ops.i2c_gate_ctrl = NULL;
if (attach_xc3028(0x61, dev) < 0)
goto frontend_detach;
break;
case CX88_BOARD_PCHDTV_HD3000:
fe0->dvb.frontend = dvb_attach(or51132_attach, &pchdtv_hd3000,
&core->i2c_adap);
if (fe0->dvb.frontend != NULL) {
if (!dvb_attach(simple_tuner_attach, fe0->dvb.frontend,
&core->i2c_adap, 0x61,
TUNER_THOMSON_DTT761X))
goto frontend_detach;
}
break;
case CX88_BOARD_DVICO_FUSIONHDTV_3_GOLD_Q:
dev->ts_gen_cntrl = 0x08;
/* Do a hardware reset of chip before using it. */
cx_clear(MO_GP0_IO, 1);
mdelay(100);
cx_set(MO_GP0_IO, 1);
mdelay(200);
/* Select RF connector callback */
fusionhdtv_3_gold.pll_rf_set = lgdt330x_pll_rf_set;
fe0->dvb.frontend = dvb_attach(lgdt330x_attach,
&fusionhdtv_3_gold,
&core->i2c_adap);
if (fe0->dvb.frontend != NULL) {
if (!dvb_attach(simple_tuner_attach, fe0->dvb.frontend,
&core->i2c_adap, 0x61,
TUNER_MICROTUNE_4042FI5))
goto frontend_detach;
}
break;
case CX88_BOARD_DVICO_FUSIONHDTV_3_GOLD_T:
dev->ts_gen_cntrl = 0x08;
/* Do a hardware reset of chip before using it. */
cx_clear(MO_GP0_IO, 1);
mdelay(100);
cx_set(MO_GP0_IO, 9);
mdelay(200);
fe0->dvb.frontend = dvb_attach(lgdt330x_attach,
&fusionhdtv_3_gold,
&core->i2c_adap);
if (fe0->dvb.frontend != NULL) {
if (!dvb_attach(simple_tuner_attach, fe0->dvb.frontend,
&core->i2c_adap, 0x61,
TUNER_THOMSON_DTT761X))
goto frontend_detach;
}
break;
case CX88_BOARD_DVICO_FUSIONHDTV_5_GOLD:
dev->ts_gen_cntrl = 0x08;
/* Do a hardware reset of chip before using it. */
cx_clear(MO_GP0_IO, 1);
mdelay(100);
cx_set(MO_GP0_IO, 1);
mdelay(200);
fe0->dvb.frontend = dvb_attach(lgdt330x_attach,
&fusionhdtv_5_gold,
&core->i2c_adap);
if (fe0->dvb.frontend != NULL) {
if (!dvb_attach(simple_tuner_attach, fe0->dvb.frontend,
&core->i2c_adap, 0x61,
TUNER_LG_TDVS_H06XF))
goto frontend_detach;
if (!dvb_attach(tda9887_attach, fe0->dvb.frontend,
&core->i2c_adap, 0x43))
goto frontend_detach;
}
break;
case CX88_BOARD_PCHDTV_HD5500:
dev->ts_gen_cntrl = 0x08;
/* Do a hardware reset of chip before using it. */
cx_clear(MO_GP0_IO, 1);
mdelay(100);
cx_set(MO_GP0_IO, 1);
mdelay(200);
fe0->dvb.frontend = dvb_attach(lgdt330x_attach,
&pchdtv_hd5500,
&core->i2c_adap);
if (fe0->dvb.frontend != NULL) {
if (!dvb_attach(simple_tuner_attach, fe0->dvb.frontend,
&core->i2c_adap, 0x61,
TUNER_LG_TDVS_H06XF))
goto frontend_detach;
if (!dvb_attach(tda9887_attach, fe0->dvb.frontend,
&core->i2c_adap, 0x43))
goto frontend_detach;
}
break;
case CX88_BOARD_ATI_HDTVWONDER:
fe0->dvb.frontend = dvb_attach(nxt200x_attach,
&ati_hdtvwonder,
&core->i2c_adap);
if (fe0->dvb.frontend != NULL) {
if (!dvb_attach(simple_tuner_attach, fe0->dvb.frontend,
&core->i2c_adap, 0x61,
TUNER_PHILIPS_TUV1236D))
goto frontend_detach;
}
break;
case CX88_BOARD_HAUPPAUGE_NOVASPLUS_S1:
case CX88_BOARD_HAUPPAUGE_NOVASE2_S1:
fe0->dvb.frontend = dvb_attach(cx24123_attach,
&hauppauge_novas_config,
&core->i2c_adap);
if (fe0->dvb.frontend) {
if (!dvb_attach(isl6421_attach, fe0->dvb.frontend,
&core->i2c_adap, 0x08, ISL6421_DCL, 0x00))
goto frontend_detach;
}
break;
case CX88_BOARD_KWORLD_DVBS_100:
fe0->dvb.frontend = dvb_attach(cx24123_attach,
&kworld_dvbs_100_config,
&core->i2c_adap);
if (fe0->dvb.frontend) {
core->prev_set_voltage = fe0->dvb.frontend->ops.set_voltage;
fe0->dvb.frontend->ops.set_voltage = kworld_dvbs_100_set_voltage;
}
break;
case CX88_BOARD_GENIATECH_DVBS:
fe0->dvb.frontend = dvb_attach(cx24123_attach,
&geniatech_dvbs_config,
&core->i2c_adap);
if (fe0->dvb.frontend) {
core->prev_set_voltage = fe0->dvb.frontend->ops.set_voltage;
fe0->dvb.frontend->ops.set_voltage = geniatech_dvbs_set_voltage;
}
break;
case CX88_BOARD_PINNACLE_PCTV_HD_800i:
fe0->dvb.frontend = dvb_attach(s5h1409_attach,
&pinnacle_pctv_hd_800i_config,
&core->i2c_adap);
if (fe0->dvb.frontend != NULL) {
if (!dvb_attach(xc5000_attach, fe0->dvb.frontend,
&core->i2c_adap,
&pinnacle_pctv_hd_800i_tuner_config))
goto frontend_detach;
}
break;
case CX88_BOARD_DVICO_FUSIONHDTV_5_PCI_NANO:
fe0->dvb.frontend = dvb_attach(s5h1409_attach,
&dvico_hdtv5_pci_nano_config,
&core->i2c_adap);
if (fe0->dvb.frontend != NULL) {
struct dvb_frontend *fe;
struct xc2028_config cfg = {
.i2c_adap = &core->i2c_adap,
.i2c_addr = 0x61,
};
static struct xc2028_ctrl ctl = {
.fname = XC2028_DEFAULT_FIRMWARE,
.max_len = 64,
.scode_table = XC3028_FE_OREN538,
};
fe = dvb_attach(xc2028_attach,
fe0->dvb.frontend, &cfg);
if (fe != NULL && fe->ops.tuner_ops.set_config != NULL)
fe->ops.tuner_ops.set_config(fe, &ctl);
}
break;
case CX88_BOARD_PINNACLE_HYBRID_PCTV:
case CX88_BOARD_WINFAST_DTV1800H:
fe0->dvb.frontend = dvb_attach(zl10353_attach,
&cx88_pinnacle_hybrid_pctv,
&core->i2c_adap);
if (fe0->dvb.frontend) {
fe0->dvb.frontend->ops.i2c_gate_ctrl = NULL;
if (attach_xc3028(0x61, dev) < 0)
goto frontend_detach;
}
break;
case CX88_BOARD_WINFAST_DTV1800H_XC4000:
case CX88_BOARD_WINFAST_DTV2000H_PLUS:
fe0->dvb.frontend = dvb_attach(zl10353_attach,
&cx88_pinnacle_hybrid_pctv,
&core->i2c_adap);
if (fe0->dvb.frontend) {
struct xc4000_config cfg = {
.i2c_address = 0x61,
.default_pm = 0,
.dvb_amplitude = 134,
.set_smoothedcvbs = 1,
.if_khz = 4560
};
fe0->dvb.frontend->ops.i2c_gate_ctrl = NULL;
if (attach_xc4000(dev, &cfg) < 0)
goto frontend_detach;
}
break;
case CX88_BOARD_GENIATECH_X8000_MT:
dev->ts_gen_cntrl = 0x00;
fe0->dvb.frontend = dvb_attach(zl10353_attach,
&cx88_geniatech_x8000_mt,
&core->i2c_adap);
if (attach_xc3028(0x61, dev) < 0)
goto frontend_detach;
break;
case CX88_BOARD_KWORLD_ATSC_120:
fe0->dvb.frontend = dvb_attach(s5h1409_attach,
&kworld_atsc_120_config,
&core->i2c_adap);
if (attach_xc3028(0x61, dev) < 0)
goto frontend_detach;
break;
case CX88_BOARD_DVICO_FUSIONHDTV_7_GOLD:
fe0->dvb.frontend = dvb_attach(s5h1411_attach,
&dvico_fusionhdtv7_config,
&core->i2c_adap);
if (fe0->dvb.frontend != NULL) {
if (!dvb_attach(xc5000_attach, fe0->dvb.frontend,
&core->i2c_adap,
&dvico_fusionhdtv7_tuner_config))
goto frontend_detach;
}
break;
case CX88_BOARD_HAUPPAUGE_HVR4000:
/* MFE frontend 1 */
mfe_shared = 1;
dev->frontends.gate = 2;
/* DVB-S/S2 Init */
fe0->dvb.frontend = dvb_attach(cx24116_attach,
&hauppauge_hvr4000_config,
&dev->core->i2c_adap);
if (fe0->dvb.frontend) {
if (!dvb_attach(isl6421_attach,
fe0->dvb.frontend,
&dev->core->i2c_adap,
0x08, ISL6421_DCL, 0x00))
goto frontend_detach;
}
/* MFE frontend 2 */
fe1 = videobuf_dvb_get_frontend(&dev->frontends, 2);
if (!fe1)
goto frontend_detach;
/* DVB-T Init */
fe1->dvb.frontend = dvb_attach(cx22702_attach,
&hauppauge_hvr_config,
&dev->core->i2c_adap);
if (fe1->dvb.frontend) {
fe1->dvb.frontend->id = 1;
if (!dvb_attach(simple_tuner_attach,
fe1->dvb.frontend,
&dev->core->i2c_adap,
0x61, TUNER_PHILIPS_FMD1216ME_MK3))
goto frontend_detach;
}
break;
case CX88_BOARD_HAUPPAUGE_HVR4000LITE:
fe0->dvb.frontend = dvb_attach(cx24116_attach,
&hauppauge_hvr4000_config,
&dev->core->i2c_adap);
if (fe0->dvb.frontend) {
if (!dvb_attach(isl6421_attach,
fe0->dvb.frontend,
&dev->core->i2c_adap,
0x08, ISL6421_DCL, 0x00))
goto frontend_detach;
}
break;
case CX88_BOARD_PROF_6200:
case CX88_BOARD_TBS_8910:
case CX88_BOARD_TEVII_S420:
fe0->dvb.frontend = dvb_attach(stv0299_attach,
&tevii_tuner_sharp_config,
&core->i2c_adap);
if (fe0->dvb.frontend != NULL) {
if (!dvb_attach(dvb_pll_attach, fe0->dvb.frontend, 0x60,
&core->i2c_adap, DVB_PLL_OPERA1))
goto frontend_detach;
core->prev_set_voltage = fe0->dvb.frontend->ops.set_voltage;
fe0->dvb.frontend->ops.set_voltage = tevii_dvbs_set_voltage;
} else {
fe0->dvb.frontend = dvb_attach(stv0288_attach,
&tevii_tuner_earda_config,
&core->i2c_adap);
if (fe0->dvb.frontend != NULL) {
if (!dvb_attach(stb6000_attach, fe0->dvb.frontend, 0x61,
&core->i2c_adap))
goto frontend_detach;
core->prev_set_voltage = fe0->dvb.frontend->ops.set_voltage;
fe0->dvb.frontend->ops.set_voltage = tevii_dvbs_set_voltage;
}
}
break;
case CX88_BOARD_TEVII_S460:
fe0->dvb.frontend = dvb_attach(cx24116_attach,
&tevii_s460_config,
&core->i2c_adap);
if (fe0->dvb.frontend != NULL)
fe0->dvb.frontend->ops.set_voltage = tevii_dvbs_set_voltage;
break;
case CX88_BOARD_TEVII_S464:
fe0->dvb.frontend = dvb_attach(ds3000_attach,
&tevii_ds3000_config,
&core->i2c_adap);
if (fe0->dvb.frontend != NULL)
fe0->dvb.frontend->ops.set_voltage =
tevii_dvbs_set_voltage;
break;
case CX88_BOARD_OMICOM_SS4_PCI:
case CX88_BOARD_TBS_8920:
case CX88_BOARD_PROF_7300:
case CX88_BOARD_SATTRADE_ST4200:
fe0->dvb.frontend = dvb_attach(cx24116_attach,
&hauppauge_hvr4000_config,
&core->i2c_adap);
if (fe0->dvb.frontend != NULL)
fe0->dvb.frontend->ops.set_voltage = tevii_dvbs_set_voltage;
break;
case CX88_BOARD_TERRATEC_CINERGY_HT_PCI_MKII:
fe0->dvb.frontend = dvb_attach(zl10353_attach,
&cx88_terratec_cinergy_ht_pci_mkii_config,
&core->i2c_adap);
if (fe0->dvb.frontend) {
fe0->dvb.frontend->ops.i2c_gate_ctrl = NULL;
if (attach_xc3028(0x61, dev) < 0)
goto frontend_detach;
}
break;
case CX88_BOARD_PROF_7301:{
struct dvb_tuner_ops *tuner_ops = NULL;
fe0->dvb.frontend = dvb_attach(stv0900_attach,
&prof_7301_stv0900_config,
&core->i2c_adap, 0);
if (fe0->dvb.frontend != NULL) {
if (!dvb_attach(stb6100_attach, fe0->dvb.frontend,
&prof_7301_stb6100_config,
&core->i2c_adap))
goto frontend_detach;
tuner_ops = &fe0->dvb.frontend->ops.tuner_ops;
tuner_ops->set_frequency = stb6100_set_freq;
tuner_ops->get_frequency = stb6100_get_freq;
tuner_ops->set_bandwidth = stb6100_set_bandw;
tuner_ops->get_bandwidth = stb6100_get_bandw;
core->prev_set_voltage =
fe0->dvb.frontend->ops.set_voltage;
fe0->dvb.frontend->ops.set_voltage =
tevii_dvbs_set_voltage;
}
break;
}
case CX88_BOARD_SAMSUNG_SMT_7020:
dev->ts_gen_cntrl = 0x08;
cx_set(MO_GP0_IO, 0x0101);
cx_clear(MO_GP0_IO, 0x01);
mdelay(100);
cx_set(MO_GP0_IO, 0x01);
mdelay(200);
fe0->dvb.frontend = dvb_attach(stv0299_attach,
&samsung_stv0299_config,
&dev->core->i2c_adap);
if (fe0->dvb.frontend) {
fe0->dvb.frontend->ops.tuner_ops.set_params =
samsung_smt_7020_tuner_set_params;
fe0->dvb.frontend->tuner_priv =
&dev->core->i2c_adap;
fe0->dvb.frontend->ops.set_voltage =
samsung_smt_7020_set_voltage;
fe0->dvb.frontend->ops.set_tone =
samsung_smt_7020_set_tone;
}
break;
case CX88_BOARD_TWINHAN_VP1027_DVBS:
dev->ts_gen_cntrl = 0x00;
fe0->dvb.frontend = dvb_attach(mb86a16_attach,
&twinhan_vp1027,
&core->i2c_adap);
if (fe0->dvb.frontend) {
core->prev_set_voltage =
fe0->dvb.frontend->ops.set_voltage;
fe0->dvb.frontend->ops.set_voltage =
vp1027_set_voltage;
}
break;
default:
printk(KERN_ERR "%s/2: The frontend of your DVB/ATSC card isn't supported yet\n",
core->name);
break;
}
if ( (NULL == fe0->dvb.frontend) || (fe1 && NULL == fe1->dvb.frontend) ) {
printk(KERN_ERR
"%s/2: frontend initialization failed\n",
core->name);
goto frontend_detach;
}
/* define general-purpose callback pointer */
fe0->dvb.frontend->callback = cx88_tuner_callback;
/* Ensure all frontends negotiate bus access */
fe0->dvb.frontend->ops.ts_bus_ctrl = cx88_dvb_bus_ctrl;
if (fe1)
fe1->dvb.frontend->ops.ts_bus_ctrl = cx88_dvb_bus_ctrl;
/* Put the analog decoder in standby to keep it quiet */
call_all(core, core, s_power, 0);
/* register everything */
res = videobuf_dvb_register_bus(&dev->frontends, THIS_MODULE, dev,
&dev->pci->dev, adapter_nr, mfe_shared, NULL);
if (res)
goto frontend_detach;
return res;
frontend_detach:
core->gate_ctrl = NULL;
videobuf_dvb_dealloc_frontends(&dev->frontends);
return res;
}
/* ----------------------------------------------------------- */
/* CX8802 MPEG -> mini driver - We have been given the hardware */
static int cx8802_dvb_advise_acquire(struct cx8802_driver *drv)
{
struct cx88_core *core = drv->core;
int err = 0;
dprintk( 1, "%s\n", __func__);
switch (core->boardnr) {
case CX88_BOARD_HAUPPAUGE_HVR1300:
/* We arrive here with either the cx23416 or the cx22702
* on the bus. Take the bus from the cx23416 and enable the
* cx22702 demod
*/
/* Toggle reset on cx22702 leaving i2c active */
cx_set(MO_GP0_IO, 0x00000080);
udelay(1000);
cx_clear(MO_GP0_IO, 0x00000080);
udelay(50);
cx_set(MO_GP0_IO, 0x00000080);
udelay(1000);
/* enable the cx22702 pins */
cx_clear(MO_GP0_IO, 0x00000004);
udelay(1000);
break;
case CX88_BOARD_HAUPPAUGE_HVR3000:
case CX88_BOARD_HAUPPAUGE_HVR4000:
/* Toggle reset on cx22702 leaving i2c active */
cx_set(MO_GP0_IO, 0x00000080);
udelay(1000);
cx_clear(MO_GP0_IO, 0x00000080);
udelay(50);
cx_set(MO_GP0_IO, 0x00000080);
udelay(1000);
switch (core->dvbdev->frontends.active_fe_id) {
case 1: /* DVB-S/S2 Enabled */
/* tri-state the cx22702 pins */
cx_set(MO_GP0_IO, 0x00000004);
/* Take the cx24116/cx24123 out of reset */
cx_write(MO_SRST_IO, 1);
core->dvbdev->ts_gen_cntrl = 0x02; /* Parallel IO */
break;
case 2: /* DVB-T Enabled */
/* Put the cx24116/cx24123 into reset */
cx_write(MO_SRST_IO, 0);
/* enable the cx22702 pins */
cx_clear(MO_GP0_IO, 0x00000004);
core->dvbdev->ts_gen_cntrl = 0x0c; /* Serial IO */
break;
}
udelay(1000);
break;
case CX88_BOARD_WINFAST_DTV2000H_PLUS:
/* set RF input to AIR for DVB-T (GPIO 16) */
cx_write(MO_GP2_IO, 0x0101);
break;
default:
err = -ENODEV;
}
return err;
}
/* CX8802 MPEG -> mini driver - We no longer have the hardware */
static int cx8802_dvb_advise_release(struct cx8802_driver *drv)
{
struct cx88_core *core = drv->core;
int err = 0;
dprintk( 1, "%s\n", __func__);
switch (core->boardnr) {
case CX88_BOARD_HAUPPAUGE_HVR1300:
/* Do Nothing, leave the cx22702 on the bus. */
break;
case CX88_BOARD_HAUPPAUGE_HVR3000:
case CX88_BOARD_HAUPPAUGE_HVR4000:
break;
default:
err = -ENODEV;
}
return err;
}
static int cx8802_dvb_probe(struct cx8802_driver *drv)
{
struct cx88_core *core = drv->core;
struct cx8802_dev *dev = drv->core->dvbdev;
int err;
struct videobuf_dvb_frontend *fe;
int i;
dprintk( 1, "%s\n", __func__);
dprintk( 1, " ->being probed by Card=%d Name=%s, PCI %02x:%02x\n",
core->boardnr,
core->name,
core->pci_bus,
core->pci_slot);
err = -ENODEV;
if (!(core->board.mpeg & CX88_MPEG_DVB))
goto fail_core;
/* If vp3054 isn't enabled, a stub will just return 0 */
err = vp3054_i2c_probe(dev);
if (0 != err)
goto fail_core;
/* dvb stuff */
printk(KERN_INFO "%s/2: cx2388x based DVB/ATSC card\n", core->name);
dev->ts_gen_cntrl = 0x0c;
err = cx8802_alloc_frontends(dev);
if (err)
goto fail_core;
err = -ENODEV;
for (i = 1; i <= core->board.num_frontends; i++) {
fe = videobuf_dvb_get_frontend(&core->dvbdev->frontends, i);
if (fe == NULL) {
printk(KERN_ERR "%s() failed to get frontend(%d)\n",
__func__, i);
goto fail_probe;
}
videobuf_queue_sg_init(&fe->dvb.dvbq, &dvb_qops,
&dev->pci->dev, &dev->slock,
V4L2_BUF_TYPE_VIDEO_CAPTURE,
V4L2_FIELD_TOP,
sizeof(struct cx88_buffer),
dev, NULL);
/* init struct videobuf_dvb */
fe->dvb.name = dev->core->name;
}
err = dvb_register(dev);
if (err)
/* frontends/adapter de-allocated in dvb_register */
printk(KERN_ERR "%s/2: dvb_register failed (err = %d)\n",
core->name, err);
return err;
fail_probe:
videobuf_dvb_dealloc_frontends(&core->dvbdev->frontends);
fail_core:
return err;
}
static int cx8802_dvb_remove(struct cx8802_driver *drv)
{
struct cx88_core *core = drv->core;
struct cx8802_dev *dev = drv->core->dvbdev;
dprintk( 1, "%s\n", __func__);
videobuf_dvb_unregister_bus(&dev->frontends);
vp3054_i2c_remove(dev);
core->gate_ctrl = NULL;
return 0;
}
static struct cx8802_driver cx8802_dvb_driver = {
.type_id = CX88_MPEG_DVB,
.hw_access = CX8802_DRVCTL_SHARED,
.probe = cx8802_dvb_probe,
.remove = cx8802_dvb_remove,
.advise_acquire = cx8802_dvb_advise_acquire,
.advise_release = cx8802_dvb_advise_release,
};
static int __init dvb_init(void)
{
printk(KERN_INFO "cx88/2: cx2388x dvb driver version %s loaded\n",
CX88_VERSION);
return cx8802_register_driver(&cx8802_dvb_driver);
}
static void __exit dvb_fini(void)
{
cx8802_unregister_driver(&cx8802_dvb_driver);
}
module_init(dvb_init);
module_exit(dvb_fini);
| gpl-2.0 |
haoyangw/android_kernel_xiaomi_dior_WIP | drivers/i2c/busses/i2c-nuc900.c | 5031 | 14726 | /*
* linux/drivers/i2c/busses/i2c-nuc900.c
*
* Copyright (c) 2010 Nuvoton technology corporation.
*
* This driver based on S3C2410 I2C driver of Ben Dooks <ben-Y5A6D6n0/KfQXOPxS62xeg@public.gmane.org>.
* Written by Wan ZongShun <mcuos.com-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation;version 2 of the License.
*
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/i2c.h>
#include <linux/init.h>
#include <linux/time.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/cpufreq.h>
#include <linux/slab.h>
#include <linux/io.h>
#include <mach/mfp.h>
#include <mach/i2c.h>
/* nuc900 i2c registers offset */
#define CSR 0x00
#define DIVIDER 0x04
#define CMDR 0x08
#define SWR 0x0C
#define RXR 0x10
#define TXR 0x14
/* nuc900 i2c CSR register bits */
#define IRQEN 0x003
#define I2CBUSY 0x400
#define I2CSTART 0x018
#define IRQFLAG 0x004
#define ARBIT_LOST 0x200
#define SLAVE_ACK 0x800
/* nuc900 i2c CMDR register bits */
#define I2C_CMD_START 0x10
#define I2C_CMD_STOP 0x08
#define I2C_CMD_READ 0x04
#define I2C_CMD_WRITE 0x02
#define I2C_CMD_NACK 0x01
/* i2c controller state */
enum nuc900_i2c_state {
STATE_IDLE,
STATE_START,
STATE_READ,
STATE_WRITE,
STATE_STOP
};
/* i2c controller private data */
struct nuc900_i2c {
spinlock_t lock;
wait_queue_head_t wait;
struct i2c_msg *msg;
unsigned int msg_num;
unsigned int msg_idx;
unsigned int msg_ptr;
unsigned int irq;
enum nuc900_i2c_state state;
void __iomem *regs;
struct clk *clk;
struct device *dev;
struct resource *ioarea;
struct i2c_adapter adap;
};
/* nuc900_i2c_master_complete
*
* complete the message and wake up the caller, using the given return code,
* or zero to mean ok.
*/
static inline void nuc900_i2c_master_complete(struct nuc900_i2c *i2c, int ret)
{
dev_dbg(i2c->dev, "master_complete %d\n", ret);
i2c->msg_ptr = 0;
i2c->msg = NULL;
i2c->msg_idx++;
i2c->msg_num = 0;
if (ret)
i2c->msg_idx = ret;
wake_up(&i2c->wait);
}
/* irq enable/disable functions */
static inline void nuc900_i2c_disable_irq(struct nuc900_i2c *i2c)
{
unsigned long tmp;
tmp = readl(i2c->regs + CSR);
writel(tmp & ~IRQEN, i2c->regs + CSR);
}
static inline void nuc900_i2c_enable_irq(struct nuc900_i2c *i2c)
{
unsigned long tmp;
tmp = readl(i2c->regs + CSR);
writel(tmp | IRQEN, i2c->regs + CSR);
}
/* nuc900_i2c_message_start
*
* put the start of a message onto the bus
*/
static void nuc900_i2c_message_start(struct nuc900_i2c *i2c,
struct i2c_msg *msg)
{
unsigned int addr = (msg->addr & 0x7f) << 1;
if (msg->flags & I2C_M_RD)
addr |= 0x1;
writel(addr & 0xff, i2c->regs + TXR);
writel(I2C_CMD_START | I2C_CMD_WRITE, i2c->regs + CMDR);
}
static inline void nuc900_i2c_stop(struct nuc900_i2c *i2c, int ret)
{
dev_dbg(i2c->dev, "STOP\n");
/* stop the transfer */
i2c->state = STATE_STOP;
writel(I2C_CMD_STOP, i2c->regs + CMDR);
nuc900_i2c_master_complete(i2c, ret);
nuc900_i2c_disable_irq(i2c);
}
/* helper functions to determine the current state in the set of
* messages we are sending
*/
/* is_lastmsg()
*
* returns TRUE if the current message is the last in the set
*/
static inline int is_lastmsg(struct nuc900_i2c *i2c)
{
return i2c->msg_idx >= (i2c->msg_num - 1);
}
/* is_msglast
*
* returns TRUE if we this is the last byte in the current message
*/
static inline int is_msglast(struct nuc900_i2c *i2c)
{
return i2c->msg_ptr == i2c->msg->len-1;
}
/* is_msgend
*
* returns TRUE if we reached the end of the current message
*/
static inline int is_msgend(struct nuc900_i2c *i2c)
{
return i2c->msg_ptr >= i2c->msg->len;
}
/* i2c_nuc900_irq_nextbyte
*
* process an interrupt and work out what to do
*/
static void i2c_nuc900_irq_nextbyte(struct nuc900_i2c *i2c,
unsigned long iicstat)
{
unsigned char byte;
switch (i2c->state) {
case STATE_IDLE:
dev_err(i2c->dev, "%s: called in STATE_IDLE\n", __func__);
break;
case STATE_STOP:
dev_err(i2c->dev, "%s: called in STATE_STOP\n", __func__);
nuc900_i2c_disable_irq(i2c);
break;
case STATE_START:
/* last thing we did was send a start condition on the
* bus, or started a new i2c message
*/
if (iicstat & SLAVE_ACK &&
!(i2c->msg->flags & I2C_M_IGNORE_NAK)) {
/* ack was not received... */
dev_dbg(i2c->dev, "ack was not received\n");
nuc900_i2c_stop(i2c, -ENXIO);
break;
}
if (i2c->msg->flags & I2C_M_RD)
i2c->state = STATE_READ;
else
i2c->state = STATE_WRITE;
/* terminate the transfer if there is nothing to do
* as this is used by the i2c probe to find devices.
*/
if (is_lastmsg(i2c) && i2c->msg->len == 0) {
nuc900_i2c_stop(i2c, 0);
break;
}
if (i2c->state == STATE_READ)
goto prepare_read;
/* fall through to the write state, as we will need to
* send a byte as well
*/
case STATE_WRITE:
/* we are writing data to the device... check for the
* end of the message, and if so, work out what to do
*/
if (!(i2c->msg->flags & I2C_M_IGNORE_NAK)) {
if (iicstat & SLAVE_ACK) {
dev_dbg(i2c->dev, "WRITE: No Ack\n");
nuc900_i2c_stop(i2c, -ECONNREFUSED);
break;
}
}
retry_write:
if (!is_msgend(i2c)) {
byte = i2c->msg->buf[i2c->msg_ptr++];
writeb(byte, i2c->regs + TXR);
writel(I2C_CMD_WRITE, i2c->regs + CMDR);
} else if (!is_lastmsg(i2c)) {
/* we need to go to the next i2c message */
dev_dbg(i2c->dev, "WRITE: Next Message\n");
i2c->msg_ptr = 0;
i2c->msg_idx++;
i2c->msg++;
/* check to see if we need to do another message */
if (i2c->msg->flags & I2C_M_NOSTART) {
if (i2c->msg->flags & I2C_M_RD) {
/* cannot do this, the controller
* forces us to send a new START
* when we change direction
*/
nuc900_i2c_stop(i2c, -EINVAL);
}
goto retry_write;
} else {
/* send the new start */
nuc900_i2c_message_start(i2c, i2c->msg);
i2c->state = STATE_START;
}
} else {
/* send stop */
nuc900_i2c_stop(i2c, 0);
}
break;
case STATE_READ:
/* we have a byte of data in the data register, do
* something with it, and then work out wether we are
* going to do any more read/write
*/
byte = readb(i2c->regs + RXR);
i2c->msg->buf[i2c->msg_ptr++] = byte;
prepare_read:
if (is_msglast(i2c)) {
/* last byte of buffer */
if (is_lastmsg(i2c))
writel(I2C_CMD_READ | I2C_CMD_NACK,
i2c->regs + CMDR);
} else if (is_msgend(i2c)) {
/* ok, we've read the entire buffer, see if there
* is anything else we need to do
*/
if (is_lastmsg(i2c)) {
/* last message, send stop and complete */
dev_dbg(i2c->dev, "READ: Send Stop\n");
nuc900_i2c_stop(i2c, 0);
} else {
/* go to the next transfer */
dev_dbg(i2c->dev, "READ: Next Transfer\n");
i2c->msg_ptr = 0;
i2c->msg_idx++;
i2c->msg++;
writel(I2C_CMD_READ, i2c->regs + CMDR);
}
} else {
writel(I2C_CMD_READ, i2c->regs + CMDR);
}
break;
}
}
/* nuc900_i2c_irq
*
* top level IRQ servicing routine
*/
static irqreturn_t nuc900_i2c_irq(int irqno, void *dev_id)
{
struct nuc900_i2c *i2c = dev_id;
unsigned long status;
status = readl(i2c->regs + CSR);
writel(status | IRQFLAG, i2c->regs + CSR);
if (status & ARBIT_LOST) {
/* deal with arbitration loss */
dev_err(i2c->dev, "deal with arbitration loss\n");
goto out;
}
if (i2c->state == STATE_IDLE) {
dev_dbg(i2c->dev, "IRQ: error i2c->state == IDLE\n");
goto out;
}
/* pretty much this leaves us with the fact that we've
* transmitted or received whatever byte we last sent
*/
i2c_nuc900_irq_nextbyte(i2c, status);
out:
return IRQ_HANDLED;
}
/* nuc900_i2c_set_master
*
* get the i2c bus for a master transaction
*/
static int nuc900_i2c_set_master(struct nuc900_i2c *i2c)
{
int timeout = 400;
while (timeout-- > 0) {
if (((readl(i2c->regs + SWR) & I2CSTART) == I2CSTART) &&
((readl(i2c->regs + CSR) & I2CBUSY) == 0)) {
return 0;
}
msleep(1);
}
return -ETIMEDOUT;
}
/* nuc900_i2c_doxfer
*
* this starts an i2c transfer
*/
static int nuc900_i2c_doxfer(struct nuc900_i2c *i2c,
struct i2c_msg *msgs, int num)
{
unsigned long iicstat, timeout;
int spins = 20;
int ret;
ret = nuc900_i2c_set_master(i2c);
if (ret != 0) {
dev_err(i2c->dev, "cannot get bus (error %d)\n", ret);
ret = -EAGAIN;
goto out;
}
spin_lock_irq(&i2c->lock);
i2c->msg = msgs;
i2c->msg_num = num;
i2c->msg_ptr = 0;
i2c->msg_idx = 0;
i2c->state = STATE_START;
nuc900_i2c_message_start(i2c, msgs);
spin_unlock_irq(&i2c->lock);
timeout = wait_event_timeout(i2c->wait, i2c->msg_num == 0, HZ * 5);
ret = i2c->msg_idx;
/* having these next two as dev_err() makes life very
* noisy when doing an i2cdetect
*/
if (timeout == 0)
dev_dbg(i2c->dev, "timeout\n");
else if (ret != num)
dev_dbg(i2c->dev, "incomplete xfer (%d)\n", ret);
/* ensure the stop has been through the bus */
dev_dbg(i2c->dev, "waiting for bus idle\n");
/* first, try busy waiting briefly */
do {
iicstat = readl(i2c->regs + CSR);
} while ((iicstat & I2CBUSY) && --spins);
/* if that timed out sleep */
if (!spins) {
msleep(1);
iicstat = readl(i2c->regs + CSR);
}
if (iicstat & I2CBUSY)
dev_warn(i2c->dev, "timeout waiting for bus idle\n");
out:
return ret;
}
/* nuc900_i2c_xfer
*
* first port of call from the i2c bus code when an message needs
* transferring across the i2c bus.
*/
static int nuc900_i2c_xfer(struct i2c_adapter *adap,
struct i2c_msg *msgs, int num)
{
struct nuc900_i2c *i2c = (struct nuc900_i2c *)adap->algo_data;
int retry;
int ret;
nuc900_i2c_enable_irq(i2c);
for (retry = 0; retry < adap->retries; retry++) {
ret = nuc900_i2c_doxfer(i2c, msgs, num);
if (ret != -EAGAIN)
return ret;
dev_dbg(i2c->dev, "Retrying transmission (%d)\n", retry);
udelay(100);
}
return -EREMOTEIO;
}
/* declare our i2c functionality */
static u32 nuc900_i2c_func(struct i2c_adapter *adap)
{
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_PROTOCOL_MANGLING;
}
/* i2c bus registration info */
static const struct i2c_algorithm nuc900_i2c_algorithm = {
.master_xfer = nuc900_i2c_xfer,
.functionality = nuc900_i2c_func,
};
/* nuc900_i2c_probe
*
* called by the bus driver when a suitable device is found
*/
static int __devinit nuc900_i2c_probe(struct platform_device *pdev)
{
struct nuc900_i2c *i2c;
struct nuc900_platform_i2c *pdata;
struct resource *res;
int ret;
pdata = pdev->dev.platform_data;
if (!pdata) {
dev_err(&pdev->dev, "no platform data\n");
return -EINVAL;
}
i2c = kzalloc(sizeof(struct nuc900_i2c), GFP_KERNEL);
if (!i2c) {
dev_err(&pdev->dev, "no memory for state\n");
return -ENOMEM;
}
strlcpy(i2c->adap.name, "nuc900-i2c0", sizeof(i2c->adap.name));
i2c->adap.owner = THIS_MODULE;
i2c->adap.algo = &nuc900_i2c_algorithm;
i2c->adap.retries = 2;
i2c->adap.class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
spin_lock_init(&i2c->lock);
init_waitqueue_head(&i2c->wait);
/* find the clock and enable it */
i2c->dev = &pdev->dev;
i2c->clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(i2c->clk)) {
dev_err(&pdev->dev, "cannot get clock\n");
ret = -ENOENT;
goto err_noclk;
}
dev_dbg(&pdev->dev, "clock source %p\n", i2c->clk);
clk_enable(i2c->clk);
/* map the registers */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (res == NULL) {
dev_err(&pdev->dev, "cannot find IO resource\n");
ret = -ENOENT;
goto err_clk;
}
i2c->ioarea = request_mem_region(res->start, resource_size(res),
pdev->name);
if (i2c->ioarea == NULL) {
dev_err(&pdev->dev, "cannot request IO\n");
ret = -ENXIO;
goto err_clk;
}
i2c->regs = ioremap(res->start, resource_size(res));
if (i2c->regs == NULL) {
dev_err(&pdev->dev, "cannot map IO\n");
ret = -ENXIO;
goto err_ioarea;
}
dev_dbg(&pdev->dev, "registers %p (%p, %p)\n",
i2c->regs, i2c->ioarea, res);
/* setup info block for the i2c core */
i2c->adap.algo_data = i2c;
i2c->adap.dev.parent = &pdev->dev;
mfp_set_groupg(&pdev->dev, NULL);
clk_get_rate(i2c->clk);
ret = (i2c->clk.apbfreq)/(pdata->bus_freq * 5) - 1;
writel(ret & 0xffff, i2c->regs + DIVIDER);
/* find the IRQ for this unit (note, this relies on the init call to
* ensure no current IRQs pending
*/
i2c->irq = ret = platform_get_irq(pdev, 0);
if (ret <= 0) {
dev_err(&pdev->dev, "cannot find IRQ\n");
goto err_iomap;
}
ret = request_irq(i2c->irq, nuc900_i2c_irq, IRQF_SHARED,
dev_name(&pdev->dev), i2c);
if (ret != 0) {
dev_err(&pdev->dev, "cannot claim IRQ %d\n", i2c->irq);
goto err_iomap;
}
/* Note, previous versions of the driver used i2c_add_adapter()
* to add the bus at any number. We now pass the bus number via
* the platform data, so if unset it will now default to always
* being bus 0.
*/
i2c->adap.nr = pdata->bus_num;
ret = i2c_add_numbered_adapter(&i2c->adap);
if (ret < 0) {
dev_err(&pdev->dev, "failed to add bus to i2c core\n");
goto err_irq;
}
platform_set_drvdata(pdev, i2c);
dev_info(&pdev->dev, "%s: NUC900 I2C adapter\n",
dev_name(&i2c->adap.dev));
return 0;
err_irq:
free_irq(i2c->irq, i2c);
err_iomap:
iounmap(i2c->regs);
err_ioarea:
release_resource(i2c->ioarea);
kfree(i2c->ioarea);
err_clk:
clk_disable(i2c->clk);
clk_put(i2c->clk);
err_noclk:
kfree(i2c);
return ret;
}
/* nuc900_i2c_remove
*
* called when device is removed from the bus
*/
static int __devexit nuc900_i2c_remove(struct platform_device *pdev)
{
struct nuc900_i2c *i2c = platform_get_drvdata(pdev);
i2c_del_adapter(&i2c->adap);
free_irq(i2c->irq, i2c);
clk_disable(i2c->clk);
clk_put(i2c->clk);
iounmap(i2c->regs);
release_resource(i2c->ioarea);
kfree(i2c->ioarea);
kfree(i2c);
return 0;
}
static struct platform_driver nuc900_i2c_driver = {
.probe = nuc900_i2c_probe,
.remove = __devexit_p(nuc900_i2c_remove),
.driver = {
.owner = THIS_MODULE,
.name = "nuc900-i2c0",
},
};
static int __init i2c_adap_nuc900_init(void)
{
return platform_driver_register(&nuc900_i2c_driver);
}
static void __exit i2c_adap_nuc900_exit(void)
{
platform_driver_unregister(&nuc900_i2c_driver);
}
subsys_initcall(i2c_adap_nuc900_init);
module_exit(i2c_adap_nuc900_exit);
MODULE_DESCRIPTION("NUC900 I2C Bus driver");
MODULE_AUTHOR("Wan ZongShun, <mcuos.com-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:nuc900-i2c0");
| gpl-2.0 |
TeamGlade-Devices/kernel_xiaomi_cancro | drivers/media/dvb/frontends/stv0299.c | 5031 | 18805 | /*
Driver for ST STV0299 demodulator
Copyright (C) 2001-2002 Convergence Integrated Media GmbH
<ralph@convergence.de>,
<holger@convergence.de>,
<js@convergence.de>
Philips SU1278/SH
Copyright (C) 2002 by Peter Schildmann <peter.schildmann@web.de>
LG TDQF-S001F
Copyright (C) 2002 Felix Domke <tmbinc@elitedvb.net>
& Andreas Oberritter <obi@linuxtv.org>
Support for Samsung TBMU24112IMB used on Technisat SkyStar2 rev. 2.6B
Copyright (C) 2003 Vadim Catana <skystar@moldova.cc>:
Support for Philips SU1278 on Technotrend hardware
Copyright (C) 2004 Andrew de Quincey <adq_dvb@lidskialf.net>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/jiffies.h>
#include <asm/div64.h>
#include "dvb_frontend.h"
#include "stv0299.h"
struct stv0299_state {
struct i2c_adapter* i2c;
const struct stv0299_config* config;
struct dvb_frontend frontend;
u8 initialised:1;
u32 tuner_frequency;
u32 symbol_rate;
fe_code_rate_t fec_inner;
int errmode;
u32 ucblocks;
u8 mcr_reg;
};
#define STATUS_BER 0
#define STATUS_UCBLOCKS 1
static int debug;
static int debug_legacy_dish_switch;
#define dprintk(args...) \
do { \
if (debug) printk(KERN_DEBUG "stv0299: " args); \
} while (0)
static int stv0299_writeregI (struct stv0299_state* state, u8 reg, u8 data)
{
int ret;
u8 buf [] = { reg, data };
struct i2c_msg msg = { .addr = state->config->demod_address, .flags = 0, .buf = buf, .len = 2 };
ret = i2c_transfer (state->i2c, &msg, 1);
if (ret != 1)
dprintk("%s: writereg error (reg == 0x%02x, val == 0x%02x, "
"ret == %i)\n", __func__, reg, data, ret);
return (ret != 1) ? -EREMOTEIO : 0;
}
static int stv0299_write(struct dvb_frontend* fe, const u8 buf[], int len)
{
struct stv0299_state* state = fe->demodulator_priv;
if (len != 2)
return -EINVAL;
return stv0299_writeregI(state, buf[0], buf[1]);
}
static u8 stv0299_readreg (struct stv0299_state* state, u8 reg)
{
int ret;
u8 b0 [] = { reg };
u8 b1 [] = { 0 };
struct i2c_msg msg [] = { { .addr = state->config->demod_address, .flags = 0, .buf = b0, .len = 1 },
{ .addr = state->config->demod_address, .flags = I2C_M_RD, .buf = b1, .len = 1 } };
ret = i2c_transfer (state->i2c, msg, 2);
if (ret != 2)
dprintk("%s: readreg error (reg == 0x%02x, ret == %i)\n",
__func__, reg, ret);
return b1[0];
}
static int stv0299_readregs (struct stv0299_state* state, u8 reg1, u8 *b, u8 len)
{
int ret;
struct i2c_msg msg [] = { { .addr = state->config->demod_address, .flags = 0, .buf = ®1, .len = 1 },
{ .addr = state->config->demod_address, .flags = I2C_M_RD, .buf = b, .len = len } };
ret = i2c_transfer (state->i2c, msg, 2);
if (ret != 2)
dprintk("%s: readreg error (ret == %i)\n", __func__, ret);
return ret == 2 ? 0 : ret;
}
static int stv0299_set_FEC (struct stv0299_state* state, fe_code_rate_t fec)
{
dprintk ("%s\n", __func__);
switch (fec) {
case FEC_AUTO:
{
return stv0299_writeregI (state, 0x31, 0x1f);
}
case FEC_1_2:
{
return stv0299_writeregI (state, 0x31, 0x01);
}
case FEC_2_3:
{
return stv0299_writeregI (state, 0x31, 0x02);
}
case FEC_3_4:
{
return stv0299_writeregI (state, 0x31, 0x04);
}
case FEC_5_6:
{
return stv0299_writeregI (state, 0x31, 0x08);
}
case FEC_7_8:
{
return stv0299_writeregI (state, 0x31, 0x10);
}
default:
{
return -EINVAL;
}
}
}
static fe_code_rate_t stv0299_get_fec (struct stv0299_state* state)
{
static fe_code_rate_t fec_tab [] = { FEC_2_3, FEC_3_4, FEC_5_6,
FEC_7_8, FEC_1_2 };
u8 index;
dprintk ("%s\n", __func__);
index = stv0299_readreg (state, 0x1b);
index &= 0x7;
if (index > 4)
return FEC_AUTO;
return fec_tab [index];
}
static int stv0299_wait_diseqc_fifo (struct stv0299_state* state, int timeout)
{
unsigned long start = jiffies;
dprintk ("%s\n", __func__);
while (stv0299_readreg(state, 0x0a) & 1) {
if (jiffies - start > timeout) {
dprintk ("%s: timeout!!\n", __func__);
return -ETIMEDOUT;
}
msleep(10);
};
return 0;
}
static int stv0299_wait_diseqc_idle (struct stv0299_state* state, int timeout)
{
unsigned long start = jiffies;
dprintk ("%s\n", __func__);
while ((stv0299_readreg(state, 0x0a) & 3) != 2 ) {
if (jiffies - start > timeout) {
dprintk ("%s: timeout!!\n", __func__);
return -ETIMEDOUT;
}
msleep(10);
};
return 0;
}
static int stv0299_set_symbolrate (struct dvb_frontend* fe, u32 srate)
{
struct stv0299_state* state = fe->demodulator_priv;
u64 big = srate;
u32 ratio;
// check rate is within limits
if ((srate < 1000000) || (srate > 45000000)) return -EINVAL;
// calculate value to program
big = big << 20;
big += (state->config->mclk-1); // round correctly
do_div(big, state->config->mclk);
ratio = big << 4;
return state->config->set_symbol_rate(fe, srate, ratio);
}
static int stv0299_get_symbolrate (struct stv0299_state* state)
{
u32 Mclk = state->config->mclk / 4096L;
u32 srate;
s32 offset;
u8 sfr[3];
s8 rtf;
dprintk ("%s\n", __func__);
stv0299_readregs (state, 0x1f, sfr, 3);
stv0299_readregs (state, 0x1a, (u8 *)&rtf, 1);
srate = (sfr[0] << 8) | sfr[1];
srate *= Mclk;
srate /= 16;
srate += (sfr[2] >> 4) * Mclk / 256;
offset = (s32) rtf * (srate / 4096L);
offset /= 128;
dprintk ("%s : srate = %i\n", __func__, srate);
dprintk ("%s : ofset = %i\n", __func__, offset);
srate += offset;
srate += 1000;
srate /= 2000;
srate *= 2000;
return srate;
}
static int stv0299_send_diseqc_msg (struct dvb_frontend* fe,
struct dvb_diseqc_master_cmd *m)
{
struct stv0299_state* state = fe->demodulator_priv;
u8 val;
int i;
dprintk ("%s\n", __func__);
if (stv0299_wait_diseqc_idle (state, 100) < 0)
return -ETIMEDOUT;
val = stv0299_readreg (state, 0x08);
if (stv0299_writeregI (state, 0x08, (val & ~0x7) | 0x6)) /* DiSEqC mode */
return -EREMOTEIO;
for (i=0; i<m->msg_len; i++) {
if (stv0299_wait_diseqc_fifo (state, 100) < 0)
return -ETIMEDOUT;
if (stv0299_writeregI (state, 0x09, m->msg[i]))
return -EREMOTEIO;
}
if (stv0299_wait_diseqc_idle (state, 100) < 0)
return -ETIMEDOUT;
return 0;
}
static int stv0299_send_diseqc_burst (struct dvb_frontend* fe, fe_sec_mini_cmd_t burst)
{
struct stv0299_state* state = fe->demodulator_priv;
u8 val;
dprintk ("%s\n", __func__);
if (stv0299_wait_diseqc_idle (state, 100) < 0)
return -ETIMEDOUT;
val = stv0299_readreg (state, 0x08);
if (stv0299_writeregI (state, 0x08, (val & ~0x7) | 0x2)) /* burst mode */
return -EREMOTEIO;
if (stv0299_writeregI (state, 0x09, burst == SEC_MINI_A ? 0x00 : 0xff))
return -EREMOTEIO;
if (stv0299_wait_diseqc_idle (state, 100) < 0)
return -ETIMEDOUT;
if (stv0299_writeregI (state, 0x08, val))
return -EREMOTEIO;
return 0;
}
static int stv0299_set_tone (struct dvb_frontend* fe, fe_sec_tone_mode_t tone)
{
struct stv0299_state* state = fe->demodulator_priv;
u8 val;
if (stv0299_wait_diseqc_idle (state, 100) < 0)
return -ETIMEDOUT;
val = stv0299_readreg (state, 0x08);
switch (tone) {
case SEC_TONE_ON:
return stv0299_writeregI (state, 0x08, val | 0x3);
case SEC_TONE_OFF:
return stv0299_writeregI (state, 0x08, (val & ~0x3) | 0x02);
default:
return -EINVAL;
}
}
static int stv0299_set_voltage (struct dvb_frontend* fe, fe_sec_voltage_t voltage)
{
struct stv0299_state* state = fe->demodulator_priv;
u8 reg0x08;
u8 reg0x0c;
dprintk("%s: %s\n", __func__,
voltage == SEC_VOLTAGE_13 ? "SEC_VOLTAGE_13" :
voltage == SEC_VOLTAGE_18 ? "SEC_VOLTAGE_18" : "??");
reg0x08 = stv0299_readreg (state, 0x08);
reg0x0c = stv0299_readreg (state, 0x0c);
/**
* H/V switching over OP0, OP1 and OP2 are LNB power enable bits
*/
reg0x0c &= 0x0f;
reg0x08 = (reg0x08 & 0x3f) | (state->config->lock_output << 6);
switch (voltage) {
case SEC_VOLTAGE_13:
if (state->config->volt13_op0_op1 == STV0299_VOLT13_OP0)
reg0x0c |= 0x10; /* OP1 off, OP0 on */
else
reg0x0c |= 0x40; /* OP1 on, OP0 off */
break;
case SEC_VOLTAGE_18:
reg0x0c |= 0x50; /* OP1 on, OP0 on */
break;
case SEC_VOLTAGE_OFF:
/* LNB power off! */
reg0x08 = 0x00;
reg0x0c = 0x00;
break;
default:
return -EINVAL;
};
if (state->config->op0_off)
reg0x0c &= ~0x10;
stv0299_writeregI(state, 0x08, reg0x08);
return stv0299_writeregI(state, 0x0c, reg0x0c);
}
static int stv0299_send_legacy_dish_cmd (struct dvb_frontend* fe, unsigned long cmd)
{
struct stv0299_state* state = fe->demodulator_priv;
u8 reg0x08;
u8 reg0x0c;
u8 lv_mask = 0x40;
u8 last = 1;
int i;
struct timeval nexttime;
struct timeval tv[10];
reg0x08 = stv0299_readreg (state, 0x08);
reg0x0c = stv0299_readreg (state, 0x0c);
reg0x0c &= 0x0f;
stv0299_writeregI (state, 0x08, (reg0x08 & 0x3f) | (state->config->lock_output << 6));
if (state->config->volt13_op0_op1 == STV0299_VOLT13_OP0)
lv_mask = 0x10;
cmd = cmd << 1;
if (debug_legacy_dish_switch)
printk ("%s switch command: 0x%04lx\n",__func__, cmd);
do_gettimeofday (&nexttime);
if (debug_legacy_dish_switch)
memcpy (&tv[0], &nexttime, sizeof (struct timeval));
stv0299_writeregI (state, 0x0c, reg0x0c | 0x50); /* set LNB to 18V */
dvb_frontend_sleep_until(&nexttime, 32000);
for (i=0; i<9; i++) {
if (debug_legacy_dish_switch)
do_gettimeofday (&tv[i+1]);
if((cmd & 0x01) != last) {
/* set voltage to (last ? 13V : 18V) */
stv0299_writeregI (state, 0x0c, reg0x0c | (last ? lv_mask : 0x50));
last = (last) ? 0 : 1;
}
cmd = cmd >> 1;
if (i != 8)
dvb_frontend_sleep_until(&nexttime, 8000);
}
if (debug_legacy_dish_switch) {
printk ("%s(%d): switch delay (should be 32k followed by all 8k\n",
__func__, fe->dvb->num);
for (i = 1; i < 10; i++)
printk ("%d: %d\n", i, timeval_usec_diff(tv[i-1] , tv[i]));
}
return 0;
}
static int stv0299_init (struct dvb_frontend* fe)
{
struct stv0299_state* state = fe->demodulator_priv;
int i;
u8 reg;
u8 val;
dprintk("stv0299: init chip\n");
stv0299_writeregI(state, 0x02, 0x30 | state->mcr_reg);
msleep(50);
for (i = 0; ; i += 2) {
reg = state->config->inittab[i];
val = state->config->inittab[i+1];
if (reg == 0xff && val == 0xff)
break;
if (reg == 0x0c && state->config->op0_off)
val &= ~0x10;
if (reg == 0x2)
state->mcr_reg = val & 0xf;
stv0299_writeregI(state, reg, val);
}
return 0;
}
static int stv0299_read_status(struct dvb_frontend* fe, fe_status_t* status)
{
struct stv0299_state* state = fe->demodulator_priv;
u8 signal = 0xff - stv0299_readreg (state, 0x18);
u8 sync = stv0299_readreg (state, 0x1b);
dprintk ("%s : FE_READ_STATUS : VSTATUS: 0x%02x\n", __func__, sync);
*status = 0;
if (signal > 10)
*status |= FE_HAS_SIGNAL;
if (sync & 0x80)
*status |= FE_HAS_CARRIER;
if (sync & 0x10)
*status |= FE_HAS_VITERBI;
if (sync & 0x08)
*status |= FE_HAS_SYNC;
if ((sync & 0x98) == 0x98)
*status |= FE_HAS_LOCK;
return 0;
}
static int stv0299_read_ber(struct dvb_frontend* fe, u32* ber)
{
struct stv0299_state* state = fe->demodulator_priv;
if (state->errmode != STATUS_BER)
return -ENOSYS;
*ber = stv0299_readreg(state, 0x1e) | (stv0299_readreg(state, 0x1d) << 8);
return 0;
}
static int stv0299_read_signal_strength(struct dvb_frontend* fe, u16* strength)
{
struct stv0299_state* state = fe->demodulator_priv;
s32 signal = 0xffff - ((stv0299_readreg (state, 0x18) << 8)
| stv0299_readreg (state, 0x19));
dprintk ("%s : FE_READ_SIGNAL_STRENGTH : AGC2I: 0x%02x%02x, signal=0x%04x\n", __func__,
stv0299_readreg (state, 0x18),
stv0299_readreg (state, 0x19), (int) signal);
signal = signal * 5 / 4;
*strength = (signal > 0xffff) ? 0xffff : (signal < 0) ? 0 : signal;
return 0;
}
static int stv0299_read_snr(struct dvb_frontend* fe, u16* snr)
{
struct stv0299_state* state = fe->demodulator_priv;
s32 xsnr = 0xffff - ((stv0299_readreg (state, 0x24) << 8)
| stv0299_readreg (state, 0x25));
xsnr = 3 * (xsnr - 0xa100);
*snr = (xsnr > 0xffff) ? 0xffff : (xsnr < 0) ? 0 : xsnr;
return 0;
}
static int stv0299_read_ucblocks(struct dvb_frontend* fe, u32* ucblocks)
{
struct stv0299_state* state = fe->demodulator_priv;
if (state->errmode != STATUS_UCBLOCKS)
return -ENOSYS;
state->ucblocks += stv0299_readreg(state, 0x1e);
state->ucblocks += (stv0299_readreg(state, 0x1d) << 8);
*ucblocks = state->ucblocks;
return 0;
}
static int stv0299_set_frontend(struct dvb_frontend *fe)
{
struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct stv0299_state* state = fe->demodulator_priv;
int invval = 0;
dprintk ("%s : FE_SET_FRONTEND\n", __func__);
if (state->config->set_ts_params)
state->config->set_ts_params(fe, 0);
// set the inversion
if (p->inversion == INVERSION_OFF) invval = 0;
else if (p->inversion == INVERSION_ON) invval = 1;
else {
printk("stv0299 does not support auto-inversion\n");
return -EINVAL;
}
if (state->config->invert) invval = (~invval) & 1;
stv0299_writeregI(state, 0x0c, (stv0299_readreg(state, 0x0c) & 0xfe) | invval);
if (fe->ops.tuner_ops.set_params) {
fe->ops.tuner_ops.set_params(fe);
if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0);
}
stv0299_set_FEC(state, p->fec_inner);
stv0299_set_symbolrate(fe, p->symbol_rate);
stv0299_writeregI(state, 0x22, 0x00);
stv0299_writeregI(state, 0x23, 0x00);
state->tuner_frequency = p->frequency;
state->fec_inner = p->fec_inner;
state->symbol_rate = p->symbol_rate;
return 0;
}
static int stv0299_get_frontend(struct dvb_frontend *fe)
{
struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct stv0299_state* state = fe->demodulator_priv;
s32 derot_freq;
int invval;
derot_freq = (s32)(s16) ((stv0299_readreg (state, 0x22) << 8)
| stv0299_readreg (state, 0x23));
derot_freq *= (state->config->mclk >> 16);
derot_freq += 500;
derot_freq /= 1000;
p->frequency += derot_freq;
invval = stv0299_readreg (state, 0x0c) & 1;
if (state->config->invert) invval = (~invval) & 1;
p->inversion = invval ? INVERSION_ON : INVERSION_OFF;
p->fec_inner = stv0299_get_fec(state);
p->symbol_rate = stv0299_get_symbolrate(state);
return 0;
}
static int stv0299_sleep(struct dvb_frontend* fe)
{
struct stv0299_state* state = fe->demodulator_priv;
stv0299_writeregI(state, 0x02, 0xb0 | state->mcr_reg);
state->initialised = 0;
return 0;
}
static int stv0299_i2c_gate_ctrl(struct dvb_frontend* fe, int enable)
{
struct stv0299_state* state = fe->demodulator_priv;
if (enable) {
stv0299_writeregI(state, 0x05, 0xb5);
} else {
stv0299_writeregI(state, 0x05, 0x35);
}
udelay(1);
return 0;
}
static int stv0299_get_tune_settings(struct dvb_frontend* fe, struct dvb_frontend_tune_settings* fesettings)
{
struct stv0299_state* state = fe->demodulator_priv;
struct dtv_frontend_properties *p = &fe->dtv_property_cache;
fesettings->min_delay_ms = state->config->min_delay_ms;
if (p->symbol_rate < 10000000) {
fesettings->step_size = p->symbol_rate / 32000;
fesettings->max_drift = 5000;
} else {
fesettings->step_size = p->symbol_rate / 16000;
fesettings->max_drift = p->symbol_rate / 2000;
}
return 0;
}
static void stv0299_release(struct dvb_frontend* fe)
{
struct stv0299_state* state = fe->demodulator_priv;
kfree(state);
}
static struct dvb_frontend_ops stv0299_ops;
struct dvb_frontend* stv0299_attach(const struct stv0299_config* config,
struct i2c_adapter* i2c)
{
struct stv0299_state* state = NULL;
int id;
/* allocate memory for the internal state */
state = kzalloc(sizeof(struct stv0299_state), GFP_KERNEL);
if (state == NULL) goto error;
/* setup the state */
state->config = config;
state->i2c = i2c;
state->initialised = 0;
state->tuner_frequency = 0;
state->symbol_rate = 0;
state->fec_inner = 0;
state->errmode = STATUS_BER;
/* check if the demod is there */
stv0299_writeregI(state, 0x02, 0x30); /* standby off */
msleep(200);
id = stv0299_readreg(state, 0x00);
/* register 0x00 contains 0xa1 for STV0299 and STV0299B */
/* register 0x00 might contain 0x80 when returning from standby */
if (id != 0xa1 && id != 0x80) goto error;
/* create dvb_frontend */
memcpy(&state->frontend.ops, &stv0299_ops, sizeof(struct dvb_frontend_ops));
state->frontend.demodulator_priv = state;
return &state->frontend;
error:
kfree(state);
return NULL;
}
static struct dvb_frontend_ops stv0299_ops = {
.delsys = { SYS_DVBS },
.info = {
.name = "ST STV0299 DVB-S",
.frequency_min = 950000,
.frequency_max = 2150000,
.frequency_stepsize = 125, /* kHz for QPSK frontends */
.frequency_tolerance = 0,
.symbol_rate_min = 1000000,
.symbol_rate_max = 45000000,
.symbol_rate_tolerance = 500, /* ppm */
.caps = FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 |
FE_CAN_QPSK |
FE_CAN_FEC_AUTO
},
.release = stv0299_release,
.init = stv0299_init,
.sleep = stv0299_sleep,
.write = stv0299_write,
.i2c_gate_ctrl = stv0299_i2c_gate_ctrl,
.set_frontend = stv0299_set_frontend,
.get_frontend = stv0299_get_frontend,
.get_tune_settings = stv0299_get_tune_settings,
.read_status = stv0299_read_status,
.read_ber = stv0299_read_ber,
.read_signal_strength = stv0299_read_signal_strength,
.read_snr = stv0299_read_snr,
.read_ucblocks = stv0299_read_ucblocks,
.diseqc_send_master_cmd = stv0299_send_diseqc_msg,
.diseqc_send_burst = stv0299_send_diseqc_burst,
.set_tone = stv0299_set_tone,
.set_voltage = stv0299_set_voltage,
.dishnetwork_send_legacy_command = stv0299_send_legacy_dish_cmd,
};
module_param(debug_legacy_dish_switch, int, 0444);
MODULE_PARM_DESC(debug_legacy_dish_switch, "Enable timing analysis for Dish Network legacy switches");
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off).");
MODULE_DESCRIPTION("ST STV0299 DVB Demodulator driver");
MODULE_AUTHOR("Ralph Metzler, Holger Waechtler, Peter Schildmann, Felix Domke, "
"Andreas Oberritter, Andrew de Quincey, Kenneth Aafly");
MODULE_LICENSE("GPL");
EXPORT_SYMBOL(stv0299_attach);
| gpl-2.0 |
ibladesi/TF101-HighOC-3P2 | fs/notify/dnotify/dnotify.c | 8359 | 11681 | /*
* Directory notifications for Linux.
*
* Copyright (C) 2000,2001,2002 Stephen Rothwell
*
* Copyright (C) 2009 Eric Paris <Red Hat Inc>
* dnotify was largly rewritten to use the new fsnotify infrastructure
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2, or (at your option) any
* later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#include <linux/fs.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/dnotify.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/fdtable.h>
#include <linux/fsnotify_backend.h>
int dir_notify_enable __read_mostly = 1;
static struct kmem_cache *dnotify_struct_cache __read_mostly;
static struct kmem_cache *dnotify_mark_cache __read_mostly;
static struct fsnotify_group *dnotify_group __read_mostly;
static DEFINE_MUTEX(dnotify_mark_mutex);
/*
* dnotify will attach one of these to each inode (i_fsnotify_marks) which
* is being watched by dnotify. If multiple userspace applications are watching
* the same directory with dnotify their information is chained in dn
*/
struct dnotify_mark {
struct fsnotify_mark fsn_mark;
struct dnotify_struct *dn;
};
/*
* When a process starts or stops watching an inode the set of events which
* dnotify cares about for that inode may change. This function runs the
* list of everything receiving dnotify events about this directory and calculates
* the set of all those events. After it updates what dnotify is interested in
* it calls the fsnotify function so it can update the set of all events relevant
* to this inode.
*/
static void dnotify_recalc_inode_mask(struct fsnotify_mark *fsn_mark)
{
__u32 new_mask, old_mask;
struct dnotify_struct *dn;
struct dnotify_mark *dn_mark = container_of(fsn_mark,
struct dnotify_mark,
fsn_mark);
assert_spin_locked(&fsn_mark->lock);
old_mask = fsn_mark->mask;
new_mask = 0;
for (dn = dn_mark->dn; dn != NULL; dn = dn->dn_next)
new_mask |= (dn->dn_mask & ~FS_DN_MULTISHOT);
fsnotify_set_mark_mask_locked(fsn_mark, new_mask);
if (old_mask == new_mask)
return;
if (fsn_mark->i.inode)
fsnotify_recalc_inode_mask(fsn_mark->i.inode);
}
/*
* Mains fsnotify call where events are delivered to dnotify.
* Find the dnotify mark on the relevant inode, run the list of dnotify structs
* on that mark and determine which of them has expressed interest in receiving
* events of this type. When found send the correct process and signal and
* destroy the dnotify struct if it was not registered to receive multiple
* events.
*/
static int dnotify_handle_event(struct fsnotify_group *group,
struct fsnotify_mark *inode_mark,
struct fsnotify_mark *vfsmount_mark,
struct fsnotify_event *event)
{
struct dnotify_mark *dn_mark;
struct inode *to_tell;
struct dnotify_struct *dn;
struct dnotify_struct **prev;
struct fown_struct *fown;
__u32 test_mask = event->mask & ~FS_EVENT_ON_CHILD;
BUG_ON(vfsmount_mark);
to_tell = event->to_tell;
dn_mark = container_of(inode_mark, struct dnotify_mark, fsn_mark);
spin_lock(&inode_mark->lock);
prev = &dn_mark->dn;
while ((dn = *prev) != NULL) {
if ((dn->dn_mask & test_mask) == 0) {
prev = &dn->dn_next;
continue;
}
fown = &dn->dn_filp->f_owner;
send_sigio(fown, dn->dn_fd, POLL_MSG);
if (dn->dn_mask & FS_DN_MULTISHOT)
prev = &dn->dn_next;
else {
*prev = dn->dn_next;
kmem_cache_free(dnotify_struct_cache, dn);
dnotify_recalc_inode_mask(inode_mark);
}
}
spin_unlock(&inode_mark->lock);
return 0;
}
/*
* Given an inode and mask determine if dnotify would be interested in sending
* userspace notification for that pair.
*/
static bool dnotify_should_send_event(struct fsnotify_group *group,
struct inode *inode,
struct fsnotify_mark *inode_mark,
struct fsnotify_mark *vfsmount_mark,
__u32 mask, void *data, int data_type)
{
/* not a dir, dnotify doesn't care */
if (!S_ISDIR(inode->i_mode))
return false;
return true;
}
static void dnotify_free_mark(struct fsnotify_mark *fsn_mark)
{
struct dnotify_mark *dn_mark = container_of(fsn_mark,
struct dnotify_mark,
fsn_mark);
BUG_ON(dn_mark->dn);
kmem_cache_free(dnotify_mark_cache, dn_mark);
}
static struct fsnotify_ops dnotify_fsnotify_ops = {
.handle_event = dnotify_handle_event,
.should_send_event = dnotify_should_send_event,
.free_group_priv = NULL,
.freeing_mark = NULL,
.free_event_priv = NULL,
};
/*
* Called every time a file is closed. Looks first for a dnotify mark on the
* inode. If one is found run all of the ->dn structures attached to that
* mark for one relevant to this process closing the file and remove that
* dnotify_struct. If that was the last dnotify_struct also remove the
* fsnotify_mark.
*/
void dnotify_flush(struct file *filp, fl_owner_t id)
{
struct fsnotify_mark *fsn_mark;
struct dnotify_mark *dn_mark;
struct dnotify_struct *dn;
struct dnotify_struct **prev;
struct inode *inode;
inode = filp->f_path.dentry->d_inode;
if (!S_ISDIR(inode->i_mode))
return;
fsn_mark = fsnotify_find_inode_mark(dnotify_group, inode);
if (!fsn_mark)
return;
dn_mark = container_of(fsn_mark, struct dnotify_mark, fsn_mark);
mutex_lock(&dnotify_mark_mutex);
spin_lock(&fsn_mark->lock);
prev = &dn_mark->dn;
while ((dn = *prev) != NULL) {
if ((dn->dn_owner == id) && (dn->dn_filp == filp)) {
*prev = dn->dn_next;
kmem_cache_free(dnotify_struct_cache, dn);
dnotify_recalc_inode_mask(fsn_mark);
break;
}
prev = &dn->dn_next;
}
spin_unlock(&fsn_mark->lock);
/* nothing else could have found us thanks to the dnotify_mark_mutex */
if (dn_mark->dn == NULL)
fsnotify_destroy_mark(fsn_mark);
mutex_unlock(&dnotify_mark_mutex);
fsnotify_put_mark(fsn_mark);
}
/* this conversion is done only at watch creation */
static __u32 convert_arg(unsigned long arg)
{
__u32 new_mask = FS_EVENT_ON_CHILD;
if (arg & DN_MULTISHOT)
new_mask |= FS_DN_MULTISHOT;
if (arg & DN_DELETE)
new_mask |= (FS_DELETE | FS_MOVED_FROM);
if (arg & DN_MODIFY)
new_mask |= FS_MODIFY;
if (arg & DN_ACCESS)
new_mask |= FS_ACCESS;
if (arg & DN_ATTRIB)
new_mask |= FS_ATTRIB;
if (arg & DN_RENAME)
new_mask |= FS_DN_RENAME;
if (arg & DN_CREATE)
new_mask |= (FS_CREATE | FS_MOVED_TO);
return new_mask;
}
/*
* If multiple processes watch the same inode with dnotify there is only one
* dnotify mark in inode->i_fsnotify_marks but we chain a dnotify_struct
* onto that mark. This function either attaches the new dnotify_struct onto
* that list, or it |= the mask onto an existing dnofiy_struct.
*/
static int attach_dn(struct dnotify_struct *dn, struct dnotify_mark *dn_mark,
fl_owner_t id, int fd, struct file *filp, __u32 mask)
{
struct dnotify_struct *odn;
odn = dn_mark->dn;
while (odn != NULL) {
/* adding more events to existing dnofiy_struct? */
if ((odn->dn_owner == id) && (odn->dn_filp == filp)) {
odn->dn_fd = fd;
odn->dn_mask |= mask;
return -EEXIST;
}
odn = odn->dn_next;
}
dn->dn_mask = mask;
dn->dn_fd = fd;
dn->dn_filp = filp;
dn->dn_owner = id;
dn->dn_next = dn_mark->dn;
dn_mark->dn = dn;
return 0;
}
/*
* When a process calls fcntl to attach a dnotify watch to a directory it ends
* up here. Allocate both a mark for fsnotify to add and a dnotify_struct to be
* attached to the fsnotify_mark.
*/
int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg)
{
struct dnotify_mark *new_dn_mark, *dn_mark;
struct fsnotify_mark *new_fsn_mark, *fsn_mark;
struct dnotify_struct *dn;
struct inode *inode;
fl_owner_t id = current->files;
struct file *f;
int destroy = 0, error = 0;
__u32 mask;
/* we use these to tell if we need to kfree */
new_fsn_mark = NULL;
dn = NULL;
if (!dir_notify_enable) {
error = -EINVAL;
goto out_err;
}
/* a 0 mask means we are explicitly removing the watch */
if ((arg & ~DN_MULTISHOT) == 0) {
dnotify_flush(filp, id);
error = 0;
goto out_err;
}
/* dnotify only works on directories */
inode = filp->f_path.dentry->d_inode;
if (!S_ISDIR(inode->i_mode)) {
error = -ENOTDIR;
goto out_err;
}
/* expect most fcntl to add new rather than augment old */
dn = kmem_cache_alloc(dnotify_struct_cache, GFP_KERNEL);
if (!dn) {
error = -ENOMEM;
goto out_err;
}
/* new fsnotify mark, we expect most fcntl calls to add a new mark */
new_dn_mark = kmem_cache_alloc(dnotify_mark_cache, GFP_KERNEL);
if (!new_dn_mark) {
error = -ENOMEM;
goto out_err;
}
/* convert the userspace DN_* "arg" to the internal FS_* defines in fsnotify */
mask = convert_arg(arg);
/* set up the new_fsn_mark and new_dn_mark */
new_fsn_mark = &new_dn_mark->fsn_mark;
fsnotify_init_mark(new_fsn_mark, dnotify_free_mark);
new_fsn_mark->mask = mask;
new_dn_mark->dn = NULL;
/* this is needed to prevent the fcntl/close race described below */
mutex_lock(&dnotify_mark_mutex);
/* add the new_fsn_mark or find an old one. */
fsn_mark = fsnotify_find_inode_mark(dnotify_group, inode);
if (fsn_mark) {
dn_mark = container_of(fsn_mark, struct dnotify_mark, fsn_mark);
spin_lock(&fsn_mark->lock);
} else {
fsnotify_add_mark(new_fsn_mark, dnotify_group, inode, NULL, 0);
spin_lock(&new_fsn_mark->lock);
fsn_mark = new_fsn_mark;
dn_mark = new_dn_mark;
/* we used new_fsn_mark, so don't free it */
new_fsn_mark = NULL;
}
rcu_read_lock();
f = fcheck(fd);
rcu_read_unlock();
/* if (f != filp) means that we lost a race and another task/thread
* actually closed the fd we are still playing with before we grabbed
* the dnotify_mark_mutex and fsn_mark->lock. Since closing the fd is the
* only time we clean up the marks we need to get our mark off
* the list. */
if (f != filp) {
/* if we added ourselves, shoot ourselves, it's possible that
* the flush actually did shoot this fsn_mark. That's fine too
* since multiple calls to destroy_mark is perfectly safe, if
* we found a dn_mark already attached to the inode, just sod
* off silently as the flush at close time dealt with it.
*/
if (dn_mark == new_dn_mark)
destroy = 1;
goto out;
}
error = __f_setown(filp, task_pid(current), PIDTYPE_PID, 0);
if (error) {
/* if we added, we must shoot */
if (dn_mark == new_dn_mark)
destroy = 1;
goto out;
}
error = attach_dn(dn, dn_mark, id, fd, filp, mask);
/* !error means that we attached the dn to the dn_mark, so don't free it */
if (!error)
dn = NULL;
/* -EEXIST means that we didn't add this new dn and used an old one.
* that isn't an error (and the unused dn should be freed) */
else if (error == -EEXIST)
error = 0;
dnotify_recalc_inode_mask(fsn_mark);
out:
spin_unlock(&fsn_mark->lock);
if (destroy)
fsnotify_destroy_mark(fsn_mark);
mutex_unlock(&dnotify_mark_mutex);
fsnotify_put_mark(fsn_mark);
out_err:
if (new_fsn_mark)
fsnotify_put_mark(new_fsn_mark);
if (dn)
kmem_cache_free(dnotify_struct_cache, dn);
return error;
}
static int __init dnotify_init(void)
{
dnotify_struct_cache = KMEM_CACHE(dnotify_struct, SLAB_PANIC);
dnotify_mark_cache = KMEM_CACHE(dnotify_mark, SLAB_PANIC);
dnotify_group = fsnotify_alloc_group(&dnotify_fsnotify_ops);
if (IS_ERR(dnotify_group))
panic("unable to allocate fsnotify group for dnotify\n");
return 0;
}
module_init(dnotify_init)
| gpl-2.0 |
kamarush/yuga_aosp_kernel_lp | fs/notify/dnotify/dnotify.c | 8359 | 11681 | /*
* Directory notifications for Linux.
*
* Copyright (C) 2000,2001,2002 Stephen Rothwell
*
* Copyright (C) 2009 Eric Paris <Red Hat Inc>
* dnotify was largly rewritten to use the new fsnotify infrastructure
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2, or (at your option) any
* later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#include <linux/fs.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/dnotify.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/fdtable.h>
#include <linux/fsnotify_backend.h>
int dir_notify_enable __read_mostly = 1;
static struct kmem_cache *dnotify_struct_cache __read_mostly;
static struct kmem_cache *dnotify_mark_cache __read_mostly;
static struct fsnotify_group *dnotify_group __read_mostly;
static DEFINE_MUTEX(dnotify_mark_mutex);
/*
* dnotify will attach one of these to each inode (i_fsnotify_marks) which
* is being watched by dnotify. If multiple userspace applications are watching
* the same directory with dnotify their information is chained in dn
*/
struct dnotify_mark {
struct fsnotify_mark fsn_mark;
struct dnotify_struct *dn;
};
/*
* When a process starts or stops watching an inode the set of events which
* dnotify cares about for that inode may change. This function runs the
* list of everything receiving dnotify events about this directory and calculates
* the set of all those events. After it updates what dnotify is interested in
* it calls the fsnotify function so it can update the set of all events relevant
* to this inode.
*/
static void dnotify_recalc_inode_mask(struct fsnotify_mark *fsn_mark)
{
__u32 new_mask, old_mask;
struct dnotify_struct *dn;
struct dnotify_mark *dn_mark = container_of(fsn_mark,
struct dnotify_mark,
fsn_mark);
assert_spin_locked(&fsn_mark->lock);
old_mask = fsn_mark->mask;
new_mask = 0;
for (dn = dn_mark->dn; dn != NULL; dn = dn->dn_next)
new_mask |= (dn->dn_mask & ~FS_DN_MULTISHOT);
fsnotify_set_mark_mask_locked(fsn_mark, new_mask);
if (old_mask == new_mask)
return;
if (fsn_mark->i.inode)
fsnotify_recalc_inode_mask(fsn_mark->i.inode);
}
/*
* Mains fsnotify call where events are delivered to dnotify.
* Find the dnotify mark on the relevant inode, run the list of dnotify structs
* on that mark and determine which of them has expressed interest in receiving
* events of this type. When found send the correct process and signal and
* destroy the dnotify struct if it was not registered to receive multiple
* events.
*/
static int dnotify_handle_event(struct fsnotify_group *group,
struct fsnotify_mark *inode_mark,
struct fsnotify_mark *vfsmount_mark,
struct fsnotify_event *event)
{
struct dnotify_mark *dn_mark;
struct inode *to_tell;
struct dnotify_struct *dn;
struct dnotify_struct **prev;
struct fown_struct *fown;
__u32 test_mask = event->mask & ~FS_EVENT_ON_CHILD;
BUG_ON(vfsmount_mark);
to_tell = event->to_tell;
dn_mark = container_of(inode_mark, struct dnotify_mark, fsn_mark);
spin_lock(&inode_mark->lock);
prev = &dn_mark->dn;
while ((dn = *prev) != NULL) {
if ((dn->dn_mask & test_mask) == 0) {
prev = &dn->dn_next;
continue;
}
fown = &dn->dn_filp->f_owner;
send_sigio(fown, dn->dn_fd, POLL_MSG);
if (dn->dn_mask & FS_DN_MULTISHOT)
prev = &dn->dn_next;
else {
*prev = dn->dn_next;
kmem_cache_free(dnotify_struct_cache, dn);
dnotify_recalc_inode_mask(inode_mark);
}
}
spin_unlock(&inode_mark->lock);
return 0;
}
/*
* Given an inode and mask determine if dnotify would be interested in sending
* userspace notification for that pair.
*/
static bool dnotify_should_send_event(struct fsnotify_group *group,
struct inode *inode,
struct fsnotify_mark *inode_mark,
struct fsnotify_mark *vfsmount_mark,
__u32 mask, void *data, int data_type)
{
/* not a dir, dnotify doesn't care */
if (!S_ISDIR(inode->i_mode))
return false;
return true;
}
static void dnotify_free_mark(struct fsnotify_mark *fsn_mark)
{
struct dnotify_mark *dn_mark = container_of(fsn_mark,
struct dnotify_mark,
fsn_mark);
BUG_ON(dn_mark->dn);
kmem_cache_free(dnotify_mark_cache, dn_mark);
}
static struct fsnotify_ops dnotify_fsnotify_ops = {
.handle_event = dnotify_handle_event,
.should_send_event = dnotify_should_send_event,
.free_group_priv = NULL,
.freeing_mark = NULL,
.free_event_priv = NULL,
};
/*
* Called every time a file is closed. Looks first for a dnotify mark on the
* inode. If one is found run all of the ->dn structures attached to that
* mark for one relevant to this process closing the file and remove that
* dnotify_struct. If that was the last dnotify_struct also remove the
* fsnotify_mark.
*/
void dnotify_flush(struct file *filp, fl_owner_t id)
{
struct fsnotify_mark *fsn_mark;
struct dnotify_mark *dn_mark;
struct dnotify_struct *dn;
struct dnotify_struct **prev;
struct inode *inode;
inode = filp->f_path.dentry->d_inode;
if (!S_ISDIR(inode->i_mode))
return;
fsn_mark = fsnotify_find_inode_mark(dnotify_group, inode);
if (!fsn_mark)
return;
dn_mark = container_of(fsn_mark, struct dnotify_mark, fsn_mark);
mutex_lock(&dnotify_mark_mutex);
spin_lock(&fsn_mark->lock);
prev = &dn_mark->dn;
while ((dn = *prev) != NULL) {
if ((dn->dn_owner == id) && (dn->dn_filp == filp)) {
*prev = dn->dn_next;
kmem_cache_free(dnotify_struct_cache, dn);
dnotify_recalc_inode_mask(fsn_mark);
break;
}
prev = &dn->dn_next;
}
spin_unlock(&fsn_mark->lock);
/* nothing else could have found us thanks to the dnotify_mark_mutex */
if (dn_mark->dn == NULL)
fsnotify_destroy_mark(fsn_mark);
mutex_unlock(&dnotify_mark_mutex);
fsnotify_put_mark(fsn_mark);
}
/* this conversion is done only at watch creation */
static __u32 convert_arg(unsigned long arg)
{
__u32 new_mask = FS_EVENT_ON_CHILD;
if (arg & DN_MULTISHOT)
new_mask |= FS_DN_MULTISHOT;
if (arg & DN_DELETE)
new_mask |= (FS_DELETE | FS_MOVED_FROM);
if (arg & DN_MODIFY)
new_mask |= FS_MODIFY;
if (arg & DN_ACCESS)
new_mask |= FS_ACCESS;
if (arg & DN_ATTRIB)
new_mask |= FS_ATTRIB;
if (arg & DN_RENAME)
new_mask |= FS_DN_RENAME;
if (arg & DN_CREATE)
new_mask |= (FS_CREATE | FS_MOVED_TO);
return new_mask;
}
/*
* If multiple processes watch the same inode with dnotify there is only one
* dnotify mark in inode->i_fsnotify_marks but we chain a dnotify_struct
* onto that mark. This function either attaches the new dnotify_struct onto
* that list, or it |= the mask onto an existing dnofiy_struct.
*/
static int attach_dn(struct dnotify_struct *dn, struct dnotify_mark *dn_mark,
fl_owner_t id, int fd, struct file *filp, __u32 mask)
{
struct dnotify_struct *odn;
odn = dn_mark->dn;
while (odn != NULL) {
/* adding more events to existing dnofiy_struct? */
if ((odn->dn_owner == id) && (odn->dn_filp == filp)) {
odn->dn_fd = fd;
odn->dn_mask |= mask;
return -EEXIST;
}
odn = odn->dn_next;
}
dn->dn_mask = mask;
dn->dn_fd = fd;
dn->dn_filp = filp;
dn->dn_owner = id;
dn->dn_next = dn_mark->dn;
dn_mark->dn = dn;
return 0;
}
/*
* When a process calls fcntl to attach a dnotify watch to a directory it ends
* up here. Allocate both a mark for fsnotify to add and a dnotify_struct to be
* attached to the fsnotify_mark.
*/
int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg)
{
struct dnotify_mark *new_dn_mark, *dn_mark;
struct fsnotify_mark *new_fsn_mark, *fsn_mark;
struct dnotify_struct *dn;
struct inode *inode;
fl_owner_t id = current->files;
struct file *f;
int destroy = 0, error = 0;
__u32 mask;
/* we use these to tell if we need to kfree */
new_fsn_mark = NULL;
dn = NULL;
if (!dir_notify_enable) {
error = -EINVAL;
goto out_err;
}
/* a 0 mask means we are explicitly removing the watch */
if ((arg & ~DN_MULTISHOT) == 0) {
dnotify_flush(filp, id);
error = 0;
goto out_err;
}
/* dnotify only works on directories */
inode = filp->f_path.dentry->d_inode;
if (!S_ISDIR(inode->i_mode)) {
error = -ENOTDIR;
goto out_err;
}
/* expect most fcntl to add new rather than augment old */
dn = kmem_cache_alloc(dnotify_struct_cache, GFP_KERNEL);
if (!dn) {
error = -ENOMEM;
goto out_err;
}
/* new fsnotify mark, we expect most fcntl calls to add a new mark */
new_dn_mark = kmem_cache_alloc(dnotify_mark_cache, GFP_KERNEL);
if (!new_dn_mark) {
error = -ENOMEM;
goto out_err;
}
/* convert the userspace DN_* "arg" to the internal FS_* defines in fsnotify */
mask = convert_arg(arg);
/* set up the new_fsn_mark and new_dn_mark */
new_fsn_mark = &new_dn_mark->fsn_mark;
fsnotify_init_mark(new_fsn_mark, dnotify_free_mark);
new_fsn_mark->mask = mask;
new_dn_mark->dn = NULL;
/* this is needed to prevent the fcntl/close race described below */
mutex_lock(&dnotify_mark_mutex);
/* add the new_fsn_mark or find an old one. */
fsn_mark = fsnotify_find_inode_mark(dnotify_group, inode);
if (fsn_mark) {
dn_mark = container_of(fsn_mark, struct dnotify_mark, fsn_mark);
spin_lock(&fsn_mark->lock);
} else {
fsnotify_add_mark(new_fsn_mark, dnotify_group, inode, NULL, 0);
spin_lock(&new_fsn_mark->lock);
fsn_mark = new_fsn_mark;
dn_mark = new_dn_mark;
/* we used new_fsn_mark, so don't free it */
new_fsn_mark = NULL;
}
rcu_read_lock();
f = fcheck(fd);
rcu_read_unlock();
/* if (f != filp) means that we lost a race and another task/thread
* actually closed the fd we are still playing with before we grabbed
* the dnotify_mark_mutex and fsn_mark->lock. Since closing the fd is the
* only time we clean up the marks we need to get our mark off
* the list. */
if (f != filp) {
/* if we added ourselves, shoot ourselves, it's possible that
* the flush actually did shoot this fsn_mark. That's fine too
* since multiple calls to destroy_mark is perfectly safe, if
* we found a dn_mark already attached to the inode, just sod
* off silently as the flush at close time dealt with it.
*/
if (dn_mark == new_dn_mark)
destroy = 1;
goto out;
}
error = __f_setown(filp, task_pid(current), PIDTYPE_PID, 0);
if (error) {
/* if we added, we must shoot */
if (dn_mark == new_dn_mark)
destroy = 1;
goto out;
}
error = attach_dn(dn, dn_mark, id, fd, filp, mask);
/* !error means that we attached the dn to the dn_mark, so don't free it */
if (!error)
dn = NULL;
/* -EEXIST means that we didn't add this new dn and used an old one.
* that isn't an error (and the unused dn should be freed) */
else if (error == -EEXIST)
error = 0;
dnotify_recalc_inode_mask(fsn_mark);
out:
spin_unlock(&fsn_mark->lock);
if (destroy)
fsnotify_destroy_mark(fsn_mark);
mutex_unlock(&dnotify_mark_mutex);
fsnotify_put_mark(fsn_mark);
out_err:
if (new_fsn_mark)
fsnotify_put_mark(new_fsn_mark);
if (dn)
kmem_cache_free(dnotify_struct_cache, dn);
return error;
}
static int __init dnotify_init(void)
{
dnotify_struct_cache = KMEM_CACHE(dnotify_struct, SLAB_PANIC);
dnotify_mark_cache = KMEM_CACHE(dnotify_mark, SLAB_PANIC);
dnotify_group = fsnotify_alloc_group(&dnotify_fsnotify_ops);
if (IS_ERR(dnotify_group))
panic("unable to allocate fsnotify group for dnotify\n");
return 0;
}
module_init(dnotify_init)
| gpl-2.0 |
xiaognol/android_kernel_zte_nx503a-4.2 | arch/alpha/kernel/irq_pyxis.c | 11943 | 2264 | /*
* linux/arch/alpha/kernel/irq_pyxis.c
*
* Based on code written by David A Rusling (david.rusling@reo.mts.dec.com).
*
* IRQ Code common to all PYXIS core logic chips.
*/
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/irq.h>
#include <asm/io.h>
#include <asm/core_cia.h>
#include "proto.h"
#include "irq_impl.h"
/* Note mask bit is true for ENABLED irqs. */
static unsigned long cached_irq_mask;
static inline void
pyxis_update_irq_hw(unsigned long mask)
{
*(vulp)PYXIS_INT_MASK = mask;
mb();
*(vulp)PYXIS_INT_MASK;
}
static inline void
pyxis_enable_irq(struct irq_data *d)
{
pyxis_update_irq_hw(cached_irq_mask |= 1UL << (d->irq - 16));
}
static void
pyxis_disable_irq(struct irq_data *d)
{
pyxis_update_irq_hw(cached_irq_mask &= ~(1UL << (d->irq - 16)));
}
static void
pyxis_mask_and_ack_irq(struct irq_data *d)
{
unsigned long bit = 1UL << (d->irq - 16);
unsigned long mask = cached_irq_mask &= ~bit;
/* Disable the interrupt. */
*(vulp)PYXIS_INT_MASK = mask;
wmb();
/* Ack PYXIS PCI interrupt. */
*(vulp)PYXIS_INT_REQ = bit;
mb();
/* Re-read to force both writes. */
*(vulp)PYXIS_INT_MASK;
}
static struct irq_chip pyxis_irq_type = {
.name = "PYXIS",
.irq_mask_ack = pyxis_mask_and_ack_irq,
.irq_mask = pyxis_disable_irq,
.irq_unmask = pyxis_enable_irq,
};
void
pyxis_device_interrupt(unsigned long vector)
{
unsigned long pld;
unsigned int i;
/* Read the interrupt summary register of PYXIS */
pld = *(vulp)PYXIS_INT_REQ;
pld &= cached_irq_mask;
/*
* Now for every possible bit set, work through them and call
* the appropriate interrupt handler.
*/
while (pld) {
i = ffz(~pld);
pld &= pld - 1; /* clear least bit set */
if (i == 7)
isa_device_interrupt(vector);
else
handle_irq(16+i);
}
}
void __init
init_pyxis_irqs(unsigned long ignore_mask)
{
long i;
*(vulp)PYXIS_INT_MASK = 0; /* disable all */
*(vulp)PYXIS_INT_REQ = -1; /* flush all */
mb();
/* Send -INTA pulses to clear any pending interrupts ...*/
*(vuip) CIA_IACK_SC;
for (i = 16; i < 48; ++i) {
if ((ignore_mask >> i) & 1)
continue;
irq_set_chip_and_handler(i, &pyxis_irq_type, handle_level_irq);
irq_set_status_flags(i, IRQ_LEVEL);
}
setup_irq(16+7, &isa_cascade_irqaction);
}
| gpl-2.0 |
boa19861105/android_kernel_htc_b3uhl-JP | sound/drivers/opl4/opl4_mixer.c | 15015 | 2867 | /*
* OPL4 mixer functions
* Copyright (c) 2003 by Clemens Ladisch <clemens@ladisch.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include "opl4_local.h"
#include <sound/control.h>
static int snd_opl4_ctl_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
{
uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
uinfo->count = 2;
uinfo->value.integer.min = 0;
uinfo->value.integer.max = 7;
return 0;
}
static int snd_opl4_ctl_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct snd_opl4 *opl4 = snd_kcontrol_chip(kcontrol);
unsigned long flags;
u8 reg = kcontrol->private_value;
u8 value;
spin_lock_irqsave(&opl4->reg_lock, flags);
value = snd_opl4_read(opl4, reg);
spin_unlock_irqrestore(&opl4->reg_lock, flags);
ucontrol->value.integer.value[0] = 7 - (value & 7);
ucontrol->value.integer.value[1] = 7 - ((value >> 3) & 7);
return 0;
}
static int snd_opl4_ctl_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct snd_opl4 *opl4 = snd_kcontrol_chip(kcontrol);
unsigned long flags;
u8 reg = kcontrol->private_value;
u8 value, old_value;
value = (7 - (ucontrol->value.integer.value[0] & 7)) |
((7 - (ucontrol->value.integer.value[1] & 7)) << 3);
spin_lock_irqsave(&opl4->reg_lock, flags);
old_value = snd_opl4_read(opl4, reg);
snd_opl4_write(opl4, reg, value);
spin_unlock_irqrestore(&opl4->reg_lock, flags);
return value != old_value;
}
static struct snd_kcontrol_new snd_opl4_controls[] = {
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "FM Playback Volume",
.info = snd_opl4_ctl_info,
.get = snd_opl4_ctl_get,
.put = snd_opl4_ctl_put,
.private_value = OPL4_REG_MIX_CONTROL_FM
},
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Wavetable Playback Volume",
.info = snd_opl4_ctl_info,
.get = snd_opl4_ctl_get,
.put = snd_opl4_ctl_put,
.private_value = OPL4_REG_MIX_CONTROL_PCM
}
};
int snd_opl4_create_mixer(struct snd_opl4 *opl4)
{
struct snd_card *card = opl4->card;
int i, err;
strcat(card->mixername, ",OPL4");
for (i = 0; i < 2; ++i) {
err = snd_ctl_add(card, snd_ctl_new1(&snd_opl4_controls[i], opl4));
if (err < 0)
return err;
}
return 0;
}
| gpl-2.0 |
msfkonsole/android_kernel_xiaomi_dior | net/mac80211/michael.c | 15015 | 2237 | /*
* Michael MIC implementation - optimized for TKIP MIC operations
* Copyright 2002-2003, Instant802 Networks, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/types.h>
#include <linux/bitops.h>
#include <linux/ieee80211.h>
#include <asm/unaligned.h>
#include "michael.h"
static void michael_block(struct michael_mic_ctx *mctx, u32 val)
{
mctx->l ^= val;
mctx->r ^= rol32(mctx->l, 17);
mctx->l += mctx->r;
mctx->r ^= ((mctx->l & 0xff00ff00) >> 8) |
((mctx->l & 0x00ff00ff) << 8);
mctx->l += mctx->r;
mctx->r ^= rol32(mctx->l, 3);
mctx->l += mctx->r;
mctx->r ^= ror32(mctx->l, 2);
mctx->l += mctx->r;
}
static void michael_mic_hdr(struct michael_mic_ctx *mctx, const u8 *key,
struct ieee80211_hdr *hdr)
{
u8 *da, *sa, tid;
da = ieee80211_get_DA(hdr);
sa = ieee80211_get_SA(hdr);
if (ieee80211_is_data_qos(hdr->frame_control))
tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
else
tid = 0;
mctx->l = get_unaligned_le32(key);
mctx->r = get_unaligned_le32(key + 4);
/*
* A pseudo header (DA, SA, Priority, 0, 0, 0) is used in Michael MIC
* calculation, but it is _not_ transmitted
*/
michael_block(mctx, get_unaligned_le32(da));
michael_block(mctx, get_unaligned_le16(&da[4]) |
(get_unaligned_le16(sa) << 16));
michael_block(mctx, get_unaligned_le32(&sa[2]));
michael_block(mctx, tid);
}
void michael_mic(const u8 *key, struct ieee80211_hdr *hdr,
const u8 *data, size_t data_len, u8 *mic)
{
u32 val;
size_t block, blocks, left;
struct michael_mic_ctx mctx;
michael_mic_hdr(&mctx, key, hdr);
/* Real data */
blocks = data_len / 4;
left = data_len % 4;
for (block = 0; block < blocks; block++)
michael_block(&mctx, get_unaligned_le32(&data[block * 4]));
/* Partial block of 0..3 bytes and padding: 0x5a + 4..7 zeros to make
* total length a multiple of 4. */
val = 0x5a;
while (left > 0) {
val <<= 8;
left--;
val |= data[blocks * 4 + left];
}
michael_block(&mctx, val);
michael_block(&mctx, 0);
put_unaligned_le32(mctx.l, mic);
put_unaligned_le32(mctx.r, mic + 4);
}
| gpl-2.0 |
nnamon/linux | block/blk-mq-tag.c | 168 | 16755 | /*
* Fast and scalable bitmap tagging variant. Uses sparser bitmaps spread
* over multiple cachelines to avoid ping-pong between multiple submitters
* or submitter and completer. Uses rolling wakeups to avoid falling of
* the scaling cliff when we run out of tags and have to start putting
* submitters to sleep.
*
* Uses active queue tracking to support fairer distribution of tags
* between multiple submitters when a shared tag map is used.
*
* Copyright (C) 2013-2014 Jens Axboe
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/random.h>
#include <linux/blk-mq.h>
#include "blk.h"
#include "blk-mq.h"
#include "blk-mq-tag.h"
static bool bt_has_free_tags(struct blk_mq_bitmap_tags *bt)
{
int i;
for (i = 0; i < bt->map_nr; i++) {
struct blk_align_bitmap *bm = &bt->map[i];
int ret;
ret = find_first_zero_bit(&bm->word, bm->depth);
if (ret < bm->depth)
return true;
}
return false;
}
bool blk_mq_has_free_tags(struct blk_mq_tags *tags)
{
if (!tags)
return true;
return bt_has_free_tags(&tags->bitmap_tags);
}
static inline int bt_index_inc(int index)
{
return (index + 1) & (BT_WAIT_QUEUES - 1);
}
static inline void bt_index_atomic_inc(atomic_t *index)
{
int old = atomic_read(index);
int new = bt_index_inc(old);
atomic_cmpxchg(index, old, new);
}
/*
* If a previously inactive queue goes active, bump the active user count.
*/
bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
{
if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) &&
!test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
atomic_inc(&hctx->tags->active_queues);
return true;
}
/*
* Wakeup all potentially sleeping on tags
*/
void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve)
{
struct blk_mq_bitmap_tags *bt;
int i, wake_index;
bt = &tags->bitmap_tags;
wake_index = atomic_read(&bt->wake_index);
for (i = 0; i < BT_WAIT_QUEUES; i++) {
struct bt_wait_state *bs = &bt->bs[wake_index];
if (waitqueue_active(&bs->wait))
wake_up(&bs->wait);
wake_index = bt_index_inc(wake_index);
}
if (include_reserve) {
bt = &tags->breserved_tags;
if (waitqueue_active(&bt->bs[0].wait))
wake_up(&bt->bs[0].wait);
}
}
/*
* If a previously busy queue goes inactive, potential waiters could now
* be allowed to queue. Wake them up and check.
*/
void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
{
struct blk_mq_tags *tags = hctx->tags;
if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
return;
atomic_dec(&tags->active_queues);
blk_mq_tag_wakeup_all(tags, false);
}
/*
* For shared tag users, we track the number of currently active users
* and attempt to provide a fair share of the tag depth for each of them.
*/
static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
struct blk_mq_bitmap_tags *bt)
{
unsigned int depth, users;
if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_SHARED))
return true;
if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
return true;
/*
* Don't try dividing an ant
*/
if (bt->depth == 1)
return true;
users = atomic_read(&hctx->tags->active_queues);
if (!users)
return true;
/*
* Allow at least some tags
*/
depth = max((bt->depth + users - 1) / users, 4U);
return atomic_read(&hctx->nr_active) < depth;
}
static int __bt_get_word(struct blk_align_bitmap *bm, unsigned int last_tag,
bool nowrap)
{
int tag, org_last_tag = last_tag;
while (1) {
tag = find_next_zero_bit(&bm->word, bm->depth, last_tag);
if (unlikely(tag >= bm->depth)) {
/*
* We started with an offset, and we didn't reset the
* offset to 0 in a failure case, so start from 0 to
* exhaust the map.
*/
if (org_last_tag && last_tag && !nowrap) {
last_tag = org_last_tag = 0;
continue;
}
return -1;
}
if (!test_and_set_bit(tag, &bm->word))
break;
last_tag = tag + 1;
if (last_tag >= bm->depth - 1)
last_tag = 0;
}
return tag;
}
#define BT_ALLOC_RR(tags) (tags->alloc_policy == BLK_TAG_ALLOC_RR)
/*
* Straight forward bitmap tag implementation, where each bit is a tag
* (cleared == free, and set == busy). The small twist is using per-cpu
* last_tag caches, which blk-mq stores in the blk_mq_ctx software queue
* contexts. This enables us to drastically limit the space searched,
* without dirtying an extra shared cacheline like we would if we stored
* the cache value inside the shared blk_mq_bitmap_tags structure. On top
* of that, each word of tags is in a separate cacheline. This means that
* multiple users will tend to stick to different cachelines, at least
* until the map is exhausted.
*/
static int __bt_get(struct blk_mq_hw_ctx *hctx, struct blk_mq_bitmap_tags *bt,
unsigned int *tag_cache, struct blk_mq_tags *tags)
{
unsigned int last_tag, org_last_tag;
int index, i, tag;
if (!hctx_may_queue(hctx, bt))
return -1;
last_tag = org_last_tag = *tag_cache;
index = TAG_TO_INDEX(bt, last_tag);
for (i = 0; i < bt->map_nr; i++) {
tag = __bt_get_word(&bt->map[index], TAG_TO_BIT(bt, last_tag),
BT_ALLOC_RR(tags));
if (tag != -1) {
tag += (index << bt->bits_per_word);
goto done;
}
/*
* Jump to next index, and reset the last tag to be the
* first tag of that index
*/
index++;
last_tag = (index << bt->bits_per_word);
if (index >= bt->map_nr) {
index = 0;
last_tag = 0;
}
}
*tag_cache = 0;
return -1;
/*
* Only update the cache from the allocation path, if we ended
* up using the specific cached tag.
*/
done:
if (tag == org_last_tag || unlikely(BT_ALLOC_RR(tags))) {
last_tag = tag + 1;
if (last_tag >= bt->depth - 1)
last_tag = 0;
*tag_cache = last_tag;
}
return tag;
}
static struct bt_wait_state *bt_wait_ptr(struct blk_mq_bitmap_tags *bt,
struct blk_mq_hw_ctx *hctx)
{
struct bt_wait_state *bs;
int wait_index;
if (!hctx)
return &bt->bs[0];
wait_index = atomic_read(&hctx->wait_index);
bs = &bt->bs[wait_index];
bt_index_atomic_inc(&hctx->wait_index);
return bs;
}
static int bt_get(struct blk_mq_alloc_data *data,
struct blk_mq_bitmap_tags *bt,
struct blk_mq_hw_ctx *hctx,
unsigned int *last_tag, struct blk_mq_tags *tags)
{
struct bt_wait_state *bs;
DEFINE_WAIT(wait);
int tag;
tag = __bt_get(hctx, bt, last_tag, tags);
if (tag != -1)
return tag;
if (!(data->gfp & __GFP_WAIT))
return -1;
bs = bt_wait_ptr(bt, hctx);
do {
prepare_to_wait(&bs->wait, &wait, TASK_UNINTERRUPTIBLE);
tag = __bt_get(hctx, bt, last_tag, tags);
if (tag != -1)
break;
/*
* We're out of tags on this hardware queue, kick any
* pending IO submits before going to sleep waiting for
* some to complete. Note that hctx can be NULL here for
* reserved tag allocation.
*/
if (hctx)
blk_mq_run_hw_queue(hctx, false);
/*
* Retry tag allocation after running the hardware queue,
* as running the queue may also have found completions.
*/
tag = __bt_get(hctx, bt, last_tag, tags);
if (tag != -1)
break;
blk_mq_put_ctx(data->ctx);
io_schedule();
data->ctx = blk_mq_get_ctx(data->q);
data->hctx = data->q->mq_ops->map_queue(data->q,
data->ctx->cpu);
if (data->reserved) {
bt = &data->hctx->tags->breserved_tags;
} else {
last_tag = &data->ctx->last_tag;
hctx = data->hctx;
bt = &hctx->tags->bitmap_tags;
}
finish_wait(&bs->wait, &wait);
bs = bt_wait_ptr(bt, hctx);
} while (1);
finish_wait(&bs->wait, &wait);
return tag;
}
static unsigned int __blk_mq_get_tag(struct blk_mq_alloc_data *data)
{
int tag;
tag = bt_get(data, &data->hctx->tags->bitmap_tags, data->hctx,
&data->ctx->last_tag, data->hctx->tags);
if (tag >= 0)
return tag + data->hctx->tags->nr_reserved_tags;
return BLK_MQ_TAG_FAIL;
}
static unsigned int __blk_mq_get_reserved_tag(struct blk_mq_alloc_data *data)
{
int tag, zero = 0;
if (unlikely(!data->hctx->tags->nr_reserved_tags)) {
WARN_ON_ONCE(1);
return BLK_MQ_TAG_FAIL;
}
tag = bt_get(data, &data->hctx->tags->breserved_tags, NULL, &zero,
data->hctx->tags);
if (tag < 0)
return BLK_MQ_TAG_FAIL;
return tag;
}
unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
{
if (!data->reserved)
return __blk_mq_get_tag(data);
return __blk_mq_get_reserved_tag(data);
}
static struct bt_wait_state *bt_wake_ptr(struct blk_mq_bitmap_tags *bt)
{
int i, wake_index;
wake_index = atomic_read(&bt->wake_index);
for (i = 0; i < BT_WAIT_QUEUES; i++) {
struct bt_wait_state *bs = &bt->bs[wake_index];
if (waitqueue_active(&bs->wait)) {
int o = atomic_read(&bt->wake_index);
if (wake_index != o)
atomic_cmpxchg(&bt->wake_index, o, wake_index);
return bs;
}
wake_index = bt_index_inc(wake_index);
}
return NULL;
}
static void bt_clear_tag(struct blk_mq_bitmap_tags *bt, unsigned int tag)
{
const int index = TAG_TO_INDEX(bt, tag);
struct bt_wait_state *bs;
int wait_cnt;
clear_bit(TAG_TO_BIT(bt, tag), &bt->map[index].word);
/* Ensure that the wait list checks occur after clear_bit(). */
smp_mb();
bs = bt_wake_ptr(bt);
if (!bs)
return;
wait_cnt = atomic_dec_return(&bs->wait_cnt);
if (unlikely(wait_cnt < 0))
wait_cnt = atomic_inc_return(&bs->wait_cnt);
if (wait_cnt == 0) {
atomic_add(bt->wake_cnt, &bs->wait_cnt);
bt_index_atomic_inc(&bt->wake_index);
wake_up(&bs->wait);
}
}
void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, unsigned int tag,
unsigned int *last_tag)
{
struct blk_mq_tags *tags = hctx->tags;
if (tag >= tags->nr_reserved_tags) {
const int real_tag = tag - tags->nr_reserved_tags;
BUG_ON(real_tag >= tags->nr_tags);
bt_clear_tag(&tags->bitmap_tags, real_tag);
if (likely(tags->alloc_policy == BLK_TAG_ALLOC_FIFO))
*last_tag = real_tag;
} else {
BUG_ON(tag >= tags->nr_reserved_tags);
bt_clear_tag(&tags->breserved_tags, tag);
}
}
static void bt_for_each(struct blk_mq_hw_ctx *hctx,
struct blk_mq_bitmap_tags *bt, unsigned int off,
busy_iter_fn *fn, void *data, bool reserved)
{
struct request *rq;
int bit, i;
for (i = 0; i < bt->map_nr; i++) {
struct blk_align_bitmap *bm = &bt->map[i];
for (bit = find_first_bit(&bm->word, bm->depth);
bit < bm->depth;
bit = find_next_bit(&bm->word, bm->depth, bit + 1)) {
rq = blk_mq_tag_to_rq(hctx->tags, off + bit);
if (rq->q == hctx->queue)
fn(hctx, rq, data, reserved);
}
off += (1 << bt->bits_per_word);
}
}
static void bt_tags_for_each(struct blk_mq_tags *tags,
struct blk_mq_bitmap_tags *bt, unsigned int off,
busy_tag_iter_fn *fn, void *data, bool reserved)
{
struct request *rq;
int bit, i;
if (!tags->rqs)
return;
for (i = 0; i < bt->map_nr; i++) {
struct blk_align_bitmap *bm = &bt->map[i];
for (bit = find_first_bit(&bm->word, bm->depth);
bit < bm->depth;
bit = find_next_bit(&bm->word, bm->depth, bit + 1)) {
rq = blk_mq_tag_to_rq(tags, off + bit);
fn(rq, data, reserved);
}
off += (1 << bt->bits_per_word);
}
}
void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn,
void *priv)
{
if (tags->nr_reserved_tags)
bt_tags_for_each(tags, &tags->breserved_tags, 0, fn, priv, true);
bt_tags_for_each(tags, &tags->bitmap_tags, tags->nr_reserved_tags, fn, priv,
false);
}
EXPORT_SYMBOL(blk_mq_all_tag_busy_iter);
void blk_mq_tag_busy_iter(struct blk_mq_hw_ctx *hctx, busy_iter_fn *fn,
void *priv)
{
struct blk_mq_tags *tags = hctx->tags;
if (tags->nr_reserved_tags)
bt_for_each(hctx, &tags->breserved_tags, 0, fn, priv, true);
bt_for_each(hctx, &tags->bitmap_tags, tags->nr_reserved_tags, fn, priv,
false);
}
EXPORT_SYMBOL(blk_mq_tag_busy_iter);
static unsigned int bt_unused_tags(struct blk_mq_bitmap_tags *bt)
{
unsigned int i, used;
for (i = 0, used = 0; i < bt->map_nr; i++) {
struct blk_align_bitmap *bm = &bt->map[i];
used += bitmap_weight(&bm->word, bm->depth);
}
return bt->depth - used;
}
static void bt_update_count(struct blk_mq_bitmap_tags *bt,
unsigned int depth)
{
unsigned int tags_per_word = 1U << bt->bits_per_word;
unsigned int map_depth = depth;
if (depth) {
int i;
for (i = 0; i < bt->map_nr; i++) {
bt->map[i].depth = min(map_depth, tags_per_word);
map_depth -= bt->map[i].depth;
}
}
bt->wake_cnt = BT_WAIT_BATCH;
if (bt->wake_cnt > depth / BT_WAIT_QUEUES)
bt->wake_cnt = max(1U, depth / BT_WAIT_QUEUES);
bt->depth = depth;
}
static int bt_alloc(struct blk_mq_bitmap_tags *bt, unsigned int depth,
int node, bool reserved)
{
int i;
bt->bits_per_word = ilog2(BITS_PER_LONG);
/*
* Depth can be zero for reserved tags, that's not a failure
* condition.
*/
if (depth) {
unsigned int nr, tags_per_word;
tags_per_word = (1 << bt->bits_per_word);
/*
* If the tag space is small, shrink the number of tags
* per word so we spread over a few cachelines, at least.
* If less than 4 tags, just forget about it, it's not
* going to work optimally anyway.
*/
if (depth >= 4) {
while (tags_per_word * 4 > depth) {
bt->bits_per_word--;
tags_per_word = (1 << bt->bits_per_word);
}
}
nr = ALIGN(depth, tags_per_word) / tags_per_word;
bt->map = kzalloc_node(nr * sizeof(struct blk_align_bitmap),
GFP_KERNEL, node);
if (!bt->map)
return -ENOMEM;
bt->map_nr = nr;
}
bt->bs = kzalloc(BT_WAIT_QUEUES * sizeof(*bt->bs), GFP_KERNEL);
if (!bt->bs) {
kfree(bt->map);
bt->map = NULL;
return -ENOMEM;
}
bt_update_count(bt, depth);
for (i = 0; i < BT_WAIT_QUEUES; i++) {
init_waitqueue_head(&bt->bs[i].wait);
atomic_set(&bt->bs[i].wait_cnt, bt->wake_cnt);
}
return 0;
}
static void bt_free(struct blk_mq_bitmap_tags *bt)
{
kfree(bt->map);
kfree(bt->bs);
}
static struct blk_mq_tags *blk_mq_init_bitmap_tags(struct blk_mq_tags *tags,
int node, int alloc_policy)
{
unsigned int depth = tags->nr_tags - tags->nr_reserved_tags;
tags->alloc_policy = alloc_policy;
if (bt_alloc(&tags->bitmap_tags, depth, node, false))
goto enomem;
if (bt_alloc(&tags->breserved_tags, tags->nr_reserved_tags, node, true))
goto enomem;
return tags;
enomem:
bt_free(&tags->bitmap_tags);
kfree(tags);
return NULL;
}
struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
unsigned int reserved_tags,
int node, int alloc_policy)
{
struct blk_mq_tags *tags;
if (total_tags > BLK_MQ_TAG_MAX) {
pr_err("blk-mq: tag depth too large\n");
return NULL;
}
tags = kzalloc_node(sizeof(*tags), GFP_KERNEL, node);
if (!tags)
return NULL;
if (!zalloc_cpumask_var(&tags->cpumask, GFP_KERNEL)) {
kfree(tags);
return NULL;
}
tags->nr_tags = total_tags;
tags->nr_reserved_tags = reserved_tags;
return blk_mq_init_bitmap_tags(tags, node, alloc_policy);
}
void blk_mq_free_tags(struct blk_mq_tags *tags)
{
bt_free(&tags->bitmap_tags);
bt_free(&tags->breserved_tags);
kfree(tags);
}
void blk_mq_tag_init_last_tag(struct blk_mq_tags *tags, unsigned int *tag)
{
unsigned int depth = tags->nr_tags - tags->nr_reserved_tags;
*tag = prandom_u32() % depth;
}
int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int tdepth)
{
tdepth -= tags->nr_reserved_tags;
if (tdepth > tags->nr_tags)
return -EINVAL;
/*
* Don't need (or can't) update reserved tags here, they remain
* static and should never need resizing.
*/
bt_update_count(&tags->bitmap_tags, tdepth);
blk_mq_tag_wakeup_all(tags, false);
return 0;
}
/**
* blk_mq_unique_tag() - return a tag that is unique queue-wide
* @rq: request for which to compute a unique tag
*
* The tag field in struct request is unique per hardware queue but not over
* all hardware queues. Hence this function that returns a tag with the
* hardware context index in the upper bits and the per hardware queue tag in
* the lower bits.
*
* Note: When called for a request that is queued on a non-multiqueue request
* queue, the hardware context index is set to zero.
*/
u32 blk_mq_unique_tag(struct request *rq)
{
struct request_queue *q = rq->q;
struct blk_mq_hw_ctx *hctx;
int hwq = 0;
if (q->mq_ops) {
hctx = q->mq_ops->map_queue(q, rq->mq_ctx->cpu);
hwq = hctx->queue_num;
}
return (hwq << BLK_MQ_UNIQUE_TAG_BITS) |
(rq->tag & BLK_MQ_UNIQUE_TAG_MASK);
}
EXPORT_SYMBOL(blk_mq_unique_tag);
ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page)
{
char *orig_page = page;
unsigned int free, res;
if (!tags)
return 0;
page += sprintf(page, "nr_tags=%u, reserved_tags=%u, "
"bits_per_word=%u\n",
tags->nr_tags, tags->nr_reserved_tags,
tags->bitmap_tags.bits_per_word);
free = bt_unused_tags(&tags->bitmap_tags);
res = bt_unused_tags(&tags->breserved_tags);
page += sprintf(page, "nr_free=%u, nr_reserved=%u\n", free, res);
page += sprintf(page, "active_queues=%u\n", atomic_read(&tags->active_queues));
return page - orig_page;
}
| gpl-2.0 |
xsynergy510x/android_kernel_samsung_jf | drivers/net/wireless/ipsecdrvtl/ai.c | 168 | 8112 | /*
'hmac.c' Obfuscated by COBF (Version 1.06 2006-01-07 by BB) at Wed Dec 18 14:11:10 2013
*/
#include"cobf.h"
#ifdef _WIN32
#if defined( UNDER_CE) && defined( bb344) || ! defined( bb338)
#define bb340 1
#define bb336 1
#else
#define bb351 bb357
#define bb330 1
#define bb352 1
#endif
#define bb353 1
#include"uncobf.h"
#include<ndis.h>
#include"cobf.h"
#ifdef UNDER_CE
#include"uncobf.h"
#include<ndiswan.h>
#include"cobf.h"
#endif
#include"uncobf.h"
#include<stdio.h>
#include<basetsd.h>
#include"cobf.h"
bba bbs bbl bbf, *bb1;bba bbs bbe bbq, *bb94;bba bb135 bb124, *bb337;
bba bbs bbl bb39, *bb72;bba bbs bb135 bbk, *bb59;bba bbe bbu, *bb133;
bba bbh bbf*bb89;
#ifdef bb308
bba bbd bb60, *bb122;
#endif
#else
#include"uncobf.h"
#include<linux/module.h>
#include<linux/ctype.h>
#include<linux/time.h>
#include<linux/slab.h>
#include"cobf.h"
#ifndef bb116
#define bb116
#ifdef _WIN32
#include"uncobf.h"
#include<wtypes.h>
#include"cobf.h"
#else
#ifdef bb113
#include"uncobf.h"
#include<linux/types.h>
#include"cobf.h"
#else
#include"uncobf.h"
#include<stddef.h>
#include<sys/types.h>
#include"cobf.h"
#endif
#endif
#ifdef _WIN32
bba bb111 bb255;
#else
bba bbe bbu, *bb133, *bb279;
#define bb201 1
#define bb202 0
bba bb271 bb228, *bb217, *bb230;bba bbe bb237, *bb250, *bb286;bba bbs
bbq, *bb94, *bb288;bba bb6 bb223, *bb284;bba bbs bb6 bb227, *bb258;
bba bb6 bb117, *bb240;bba bbs bb6 bb63, *bb241;bba bb63 bb257, *bb229
;bba bb63 bb276, *bb291;bba bb117 bb111, *bb249;bba bb289 bb262;bba
bb209 bb124;bba bb270 bb82;bba bb115 bb114;bba bb115 bb274;
#ifdef bb226
bba bb236 bb39, *bb72;bba bb254 bbk, *bb59;bba bb252 bbd, *bb29;bba
bb269 bb56, *bb119;
#else
bba bb264 bb39, *bb72;bba bb256 bbk, *bb59;bba bb278 bbd, *bb29;bba
bb207 bb56, *bb119;
#endif
bba bb39 bbf, *bb1, *bb224;bba bbk bb244, *bb214, *bb221;bba bbk bb275
, *bb210, *bb247;bba bbd bb60, *bb122, *bb205;bba bb82 bb37, *bb266, *
bb242;bba bbd bb235, *bb211, *bb222;bba bb114 bb251, *bb268, *bb232;
bba bb56 bb225, *bb280, *bb273;
#define bb141 bbb
bba bbb*bb212, *bb77;bba bbh bbb*bb231;bba bbl bb208;bba bbl*bb233;
bba bbh bbl*bb83;
#if defined( bb113)
bba bbe bb112;
#endif
bba bb112 bb19;bba bb19*bb234;bba bbh bb19*bb188;
#if defined( bb283) || defined( bb238)
bba bb19 bb36;bba bb19 bb120;
#else
bba bbl bb36;bba bbs bbl bb120;
#endif
bba bbh bb36*bb261;bba bb36*bb267;bba bb60 bb265, *bb216;bba bbb*
bb107;bba bb107*bb239;
#define bb215( bb35) bbi bb35##__ { bbe bb219; }; bba bbi bb35##__ * \
bb35
bba bbi{bb37 bb190,bb246,bb243,bb245;}bb272, *bb281, *bb260;bba bbi{
bb37 bb8,bb193;}bb292, *bb263, *bb277;bba bbi{bb37 bb218,bb248;}bb220
, *bb213, *bb259;
#endif
bba bbh bbf*bb89;
#endif
bba bbf bb101;
#define IN
#define OUT
#ifdef _DEBUG
#define bb145( bbc) bb32( bbc)
#else
#define bb145( bbc) ( bbb)( bbc)
#endif
bba bbe bb161, *bb173;
#define bb287 0
#define bb312 1
#define bb296 2
#define bb323 3
#define bb343 4
bba bbe bb349;bba bbb*bb121;
#endif
#ifdef _WIN32
#ifndef UNDER_CE
#define bb31 bb341
#define bb43 bb346
bba bbs bb6 bb31;bba bb6 bb43;
#endif
#else
#endif
#ifdef _WIN32
bbb*bb128(bb31 bb47);bbb bb108(bbb* );bbb*bb137(bb31 bb159,bb31 bb47);
#else
#define bb128( bbc) bb147(1, bbc, bb140)
#define bb108( bbc) bb331( bbc)
#define bb137( bbc, bbn) bb147( bbc, bbn, bb140)
#endif
#ifdef _WIN32
#define bb32( bbc) bb339( bbc)
#else
#ifdef _DEBUG
bbe bb144(bbh bbl*bb95,bbh bbl*bb25,bbs bb285);
#define bb32( bbc) ( bbb)(( bbc) || ( bb144(# bbc, __FILE__, __LINE__ \
)))
#else
#define bb32( bbc) (( bbb)0)
#endif
#endif
bb43 bb302(bb43*bb324);
#ifndef _WIN32
bbe bb328(bbh bbl*bbg);bbe bb321(bbh bbl*bb20,...);
#endif
#ifdef _WIN32
bba bb342 bb96;
#define bb139( bbc) bb354( bbc)
#define bb143( bbc) bb329( bbc)
#define bb134( bbc) bb348( bbc)
#define bb132( bbc) bb332( bbc)
#else
bba bb334 bb96;
#define bb139( bbc) ( bbb)( * bbc = bb356( bbc))
#define bb143( bbc) (( bbb)0)
#define bb134( bbc) bb333( bbc)
#define bb132( bbc) bb358( bbc)
#endif
#ifdef __cplusplus
bbr"\x43"{
#endif
bba bbi{bbd bb9;bbd bb23[4 ];bbf bb102[64 ];}bb530;bbb bb1816(bb530*bbj
);bbb bb1361(bb530*bbj,bbh bbb*bb498,bbq bb9);bbb bb1819(bb530*bbj,
bbb*bb14);bbb bb1852(bbb*bb14,bbh bbb*bb5,bbq bb9);bbb bb1964(bbb*
bb14,bb83 bb5);
#ifdef __cplusplus
}
#endif
#ifdef __cplusplus
bbr"\x43"{
#endif
bba bbi{bbd bb9;bbd bb23[5 ];bbf bb102[64 ];}bb524;bbb bb1794(bb524*bbj
);bbb bb1290(bb524*bbj,bbh bbb*bb5,bbq bb9);bbb bb1802(bb524*bbj,bbb*
bb14);bba bbi{bbd bb9;bbd bb23[8 ];bbf bb102[64 ];}bb529;bbb bb1818(
bb529*bbj);bbb bb1292(bb529*bbj,bbh bbb*bb5,bbq bb9);bbb bb1814(bb529
*bbj,bbb*bb14);bba bbi{bbd bb9;bb56 bb23[8 ];bbf bb102[128 ];}bb463;
bbb bb1808(bb463*bbj);bbb bb1228(bb463*bbj,bbh bbb*bb5,bbq bb9);bbb
bb1835(bb463*bbj,bbb*bb14);bba bb463 bb926;bbb bb1797(bb926*bbj);bbb
bb1810(bb926*bbj,bbb*bb14);bbb bb1902(bbb*bb14,bbh bbb*bb5,bbq bb9);
bbb bb1866(bbb*bb14,bbh bbb*bb5,bbq bb9);bbb bb1849(bbb*bb14,bbh bbb*
bb5,bbq bb9);bbb bb1929(bbb*bb14,bbh bbb*bb5,bbq bb9);bbb bb2016(bbb*
bb14,bb83 bb5);bbb bb1965(bbb*bb14,bb83 bb5);bbb bb2025(bbb*bb14,bb83
bb5);bbb bb2022(bbb*bb14,bb83 bb5);
#ifdef __cplusplus
}
#endif
#ifdef __cplusplus
bbr"\x43"{
#endif
bba bbi{bbd bb9;bbd bb23[5 ];bbf bb102[64 ];}bb525;bbb bb1801(bb525*bbj
);bbb bb1295(bb525*bbj,bbh bbb*bb498,bbq bb9);bbb bb1795(bb525*bbj,
bbb*bb14);bbb bb1924(bbb*bb14,bbh bbb*bb5,bbq bb9);bbb bb1973(bbb*
bb14,bb83 bb5);
#ifdef __cplusplus
}
#endif
#ifdef __cplusplus
bbr"\x43"{
#endif
bba bbi{bbd bb9;bbd bb23[5 ];bbf bb102[64 ];}bb532;bbb bb1804(bb532*bbj
);bbb bb1356(bb532*bbj,bbh bbb*bb498,bbq bb9);bbb bb1838(bb532*bbj,
bbb*bb14);bbb bb1875(bbb*bb14,bbh bbb*bb5,bbq bb9);bbb bb2002(bbb*
bb14,bb83 bb5);
#ifdef __cplusplus
}
#endif
#ifdef __cplusplus
bbr"\x43"{
#endif
bba bbb( *bb1059)(bbb*bbj);bba bbb( *bb839)(bbb*bbj,bbh bbb*bb5,bbq
bb9);bba bbb( *bb784)(bbb*bbj,bbb*bb14);bba bbi{bbe bb129;bbq bb38;
bbq bb393;bb1059 bb887;bb839 bb180;bb784 bb746;}bb448;bbb bb1858(
bb448*bbj,bbe bb129);bba bbi{bb448 bbn;bbf bbt[256 -bb12(bb448)];}
bb454;bbb bb1984(bb454*bbj,bbe bb129);bbb bb1991(bb454*bbj);bbb bb2020
(bb454*bbj,bbe bb129);bbb bb1983(bb454*bbj,bbh bbb*bb5,bbq bb9);bbb
bb1976(bb454*bbj,bbb*bb14);bbb bb1989(bbe bb129,bbb*bb14,bbh bbb*bb5,
bbq bb9);bbb bb2049(bbe bb129,bbb*bb14,bb83 bb5);bb83 bb1970(bbe bb129
);
#ifdef __cplusplus
}
#endif
#ifdef __cplusplus
bbr"\x43"{
#endif
bba bbi{bb448 bbn;bbf bb543[(512 -bb12(bb448))/2 ];bbf bb1341[(512 -bb12
(bb448))/2 ];}bb494;bbb bb1961(bb494*bbj,bbe bb595);bbb bb2005(bb494*
bbj,bbh bbb*bb71,bbq bb142);bbb bb2108(bb494*bbj,bbe bb595,bbh bbb*
bb71,bbq bb142);bbb bb1988(bb494*bbj,bbh bbb*bb5,bbq bb9);bbb bb2007(
bb494*bbj,bbb*bb14);bbb bb2107(bbe bb595,bbh bbb*bb71,bbq bb142,bbb*
bb14,bbh bbb*bb5,bbq bb9);bbb bb2190(bbe bb595,bb83 bb71,bbb*bb14,
bb83 bb5);
#ifdef __cplusplus
}
#endif
bbb bb1961(bb494*bbj,bbe bb595){bb1858(&bbj->bbn,bb595);}bbb bb2005(
bb494*bbj,bbh bbb*bb1303,bbq bb142){bb448 bbn=bbj->bbn;bb1 bb543=bbj
->bb543,bb1341=bbj->bb1341;bbh bbf*bb71=(bbh bbf* )bb1303;bbf bb2272[
256 ],bb1551[256 ];bbn.bb887(bb543);bbm(bb142>bbn.bb38){bbn.bb180(bb543
,bb71,bb142);bb32(bbn.bb393<=bb12(bb2272));bbn.bb746(bb543,bb2272);
bb71=bb2272;bb142=bbn.bb393;bb32(bb142<=bbn.bb38);}{bbq bbz;bb32(bbn.
bb38<=bb12(bb1551));bb90(bbz=0 ;bbz<bbn.bb38;bbz++)bb1551[bbz]=0x36 ^(
bbz<bb142?bb71[bbz]:0 );bbn.bb887(bb543);bbn.bb180(bb543,bb1551,bbn.
bb38);}{bbq bbz;bb90(bbz=0 ;bbz<bbn.bb38;bbz++)bb1551[bbz]=0x5c ^(bbz<
bb142?bb71[bbz]:0 );bbn.bb887(bb1341);bbn.bb180(bb1341,bb1551,bbn.bb38
);}}bbb bb2108(bb494*bbj,bbe bb595,bbh bbb*bb71,bbq bb142){bb1961(bbj
,bb595);bb2005(bbj,bb71,bb142);}bbb bb1988(bb494*bbj,bbh bbb*bb5,bbq
bb9){bbj->bbn.bb180(bbj->bb543,bb5,bb9);}bbb bb2007(bb494*bbj,bbb*
bb14){bb448 bbn=bbj->bbn;bb1 bb543=bbj->bb543,bb1341=bbj->bb1341;bbn.
bb746(bb543,bb14);bbn.bb180(bb1341,bb14,bbn.bb393);bbn.bb746(bb1341,
bb14);}bbb bb2107(bbe bb595,bbh bbb*bb71,bbq bb142,bbb*bb14,bbh bbb*
bb5,bbq bb9){bb494 bb97;bb2108(&bb97,bb595,bb71,bb142);bb1988(&bb97,
bb5,bb9);bb2007(&bb97,bb14);}bbb bb2190(bbe bb595,bb83 bb71,bbb*bb14,
bb83 bb5){bb2107(bb595,bb71,(bbq)bb1304(bb71),bb14,bb5,(bbq)bb1304(
bb5));}
| gpl-2.0 |
blackb1rd/android_kernel_samsung_d2 | arch/arm/mach-msm/board-8064-gpu.c | 424 | 6471 | /* Copyright (c) 2012, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/init.h>
#include <linux/platform_device.h>
#include <mach/kgsl.h>
#include <mach/msm_bus_board.h>
#include <mach/board.h>
#include <mach/msm_dcvs.h>
#include <mach/socinfo.h>
#include "devices.h"
#include "board-8064.h"
#ifdef CONFIG_MSM_DCVS
static struct msm_dcvs_freq_entry grp3d_freq[] = {
{0, 900, 0, 0, 0},
{0, 950, 0, 0, 0},
{0, 950, 0, 0, 0},
{0, 1200, 1, 100, 100},
};
static struct msm_dcvs_core_info grp3d_core_info = {
.freq_tbl = &grp3d_freq[0],
.num_cores = 1,
.sensors = (int[]){0},
.thermal_poll_ms = 60000,
.core_param = {
.core_type = MSM_DCVS_CORE_TYPE_GPU,
},
.algo_param = {
.disable_pc_threshold = 0,
.em_win_size_min_us = 100000,
.em_win_size_max_us = 300000,
.em_max_util_pct = 97,
.group_id = 0,
.max_freq_chg_time_us = 100000,
.slack_mode_dynamic = 0,
.slack_time_min_us = 39000,
.slack_time_max_us = 39000,
.ss_win_size_min_us = 1000000,
.ss_win_size_max_us = 1000000,
.ss_util_pct = 95,
.ss_no_corr_below_freq = 0,
},
.energy_coeffs = {
.leakage_coeff_a = -17720,
.leakage_coeff_b = 37,
.leakage_coeff_c = 3329,
.leakage_coeff_d = -277,
.active_coeff_a = 2492,
.active_coeff_b = 0,
.active_coeff_c = 0
},
.power_param = {
.current_temp = 25,
.num_freq = ARRAY_SIZE(grp3d_freq),
}
};
#endif /* CONFIG_MSM_DCVS */
#ifdef CONFIG_MSM_BUS_SCALING
static struct msm_bus_vectors grp3d_init_vectors[] = {
{
.src = MSM_BUS_MASTER_GRAPHICS_3D,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 0,
.ib = 0,
},
{
.src = MSM_BUS_MASTER_GRAPHICS_3D_PORT1,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 0,
.ib = 0,
},
};
static struct msm_bus_vectors grp3d_low_vectors[] = {
{
.src = MSM_BUS_MASTER_GRAPHICS_3D,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 0,
.ib = KGSL_CONVERT_TO_MBPS(1000),
},
{
.src = MSM_BUS_MASTER_GRAPHICS_3D_PORT1,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 0,
.ib = KGSL_CONVERT_TO_MBPS(1000),
},
};
static struct msm_bus_vectors grp3d_nominal_low_vectors[] = {
{
.src = MSM_BUS_MASTER_GRAPHICS_3D,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 0,
.ib = KGSL_CONVERT_TO_MBPS(2000),
},
{
.src = MSM_BUS_MASTER_GRAPHICS_3D_PORT1,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 0,
.ib = KGSL_CONVERT_TO_MBPS(2000),
},
};
static struct msm_bus_vectors grp3d_nominal_high_vectors[] = {
{
.src = MSM_BUS_MASTER_GRAPHICS_3D,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 0,
.ib = KGSL_CONVERT_TO_MBPS(2656),
},
{
.src = MSM_BUS_MASTER_GRAPHICS_3D_PORT1,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 0,
.ib = KGSL_CONVERT_TO_MBPS(2656),
},
};
static struct msm_bus_vectors grp3d_max_vectors[] = {
{
.src = MSM_BUS_MASTER_GRAPHICS_3D,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 0,
.ib = KGSL_CONVERT_TO_MBPS(4264),
},
{
.src = MSM_BUS_MASTER_GRAPHICS_3D_PORT1,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 0,
.ib = KGSL_CONVERT_TO_MBPS(4264),
},
};
static struct msm_bus_paths grp3d_bus_scale_usecases[] = {
{
ARRAY_SIZE(grp3d_init_vectors),
grp3d_init_vectors,
},
{
ARRAY_SIZE(grp3d_low_vectors),
grp3d_low_vectors,
},
{
ARRAY_SIZE(grp3d_nominal_low_vectors),
grp3d_nominal_low_vectors,
},
{
ARRAY_SIZE(grp3d_nominal_high_vectors),
grp3d_nominal_high_vectors,
},
{
ARRAY_SIZE(grp3d_max_vectors),
grp3d_max_vectors,
},
};
static struct msm_bus_scale_pdata grp3d_bus_scale_pdata = {
grp3d_bus_scale_usecases,
ARRAY_SIZE(grp3d_bus_scale_usecases),
.name = "grp3d",
};
#endif
static struct resource kgsl_3d0_resources[] = {
{
.name = KGSL_3D0_REG_MEMORY,
.start = 0x04300000, /* GFX3D address */
.end = 0x0431ffff,
.flags = IORESOURCE_MEM,
},
{
.name = KGSL_3D0_IRQ,
.start = GFX3D_IRQ,
.end = GFX3D_IRQ,
.flags = IORESOURCE_IRQ,
},
};
static const struct kgsl_iommu_ctx kgsl_3d0_iommu0_ctxs[] = {
{ "gfx3d_user", 0 },
{ "gfx3d_priv", 1 },
};
static const struct kgsl_iommu_ctx kgsl_3d0_iommu1_ctxs[] = {
{ "gfx3d1_user", 0 },
{ "gfx3d1_priv", 1 },
};
static struct kgsl_device_iommu_data kgsl_3d0_iommu_data[] = {
{
.iommu_ctxs = kgsl_3d0_iommu0_ctxs,
.iommu_ctx_count = ARRAY_SIZE(kgsl_3d0_iommu0_ctxs),
.physstart = 0x07C00000,
.physend = 0x07C00000 + SZ_1M - 1,
},
{
.iommu_ctxs = kgsl_3d0_iommu1_ctxs,
.iommu_ctx_count = ARRAY_SIZE(kgsl_3d0_iommu1_ctxs),
.physstart = 0x07D00000,
.physend = 0x07D00000 + SZ_1M - 1,
},
};
static struct kgsl_device_platform_data kgsl_3d0_pdata = {
.pwrlevel = {
{
.gpu_freq = 400000000,
.bus_freq = 4,
.io_fraction = 0,
},
{
.gpu_freq = 320000000,
.bus_freq = 3,
.io_fraction = 33,
},
{
.gpu_freq = 200000000,
.bus_freq = 2,
.io_fraction = 100,
},
{
.gpu_freq = 128000000,
.bus_freq = 1,
.io_fraction = 100,
},
{
.gpu_freq = 27000000,
.bus_freq = 0,
},
},
.init_level = 1,
.num_levels = 5,
.set_grp_async = NULL,
.idle_timeout = HZ/10,
.nap_allowed = true,
.strtstp_sleepwake = true,
.clk_map = KGSL_CLK_CORE | KGSL_CLK_IFACE | KGSL_CLK_MEM_IFACE,
#ifdef CONFIG_MSM_BUS_SCALING
.bus_scale_table = &grp3d_bus_scale_pdata,
#endif
.iommu_data = kgsl_3d0_iommu_data,
.iommu_count = ARRAY_SIZE(kgsl_3d0_iommu_data),
#ifdef CONFIG_MSM_DCVS
.core_info = &grp3d_core_info,
#endif
};
struct platform_device device_kgsl_3d0 = {
.name = "kgsl-3d0",
.id = 0,
.num_resources = ARRAY_SIZE(kgsl_3d0_resources),
.resource = kgsl_3d0_resources,
.dev = {
.platform_data = &kgsl_3d0_pdata,
},
};
void __init apq8064_init_gpu(void)
{
unsigned int version = socinfo_get_version();
if (cpu_is_apq8064ab())
kgsl_3d0_pdata.pwrlevel[0].gpu_freq = 450000000;
if (SOCINFO_VERSION_MAJOR(version) == 2) {
kgsl_3d0_pdata.chipid = ADRENO_CHIPID(3, 2, 0, 2);
} else {
if ((SOCINFO_VERSION_MAJOR(version) == 1) &&
(SOCINFO_VERSION_MINOR(version) == 1))
kgsl_3d0_pdata.chipid = ADRENO_CHIPID(3, 2, 0, 1);
else
kgsl_3d0_pdata.chipid = ADRENO_CHIPID(3, 2, 0, 0);
}
platform_device_register(&device_kgsl_3d0);
}
| gpl-2.0 |
xb446909/personalprojects | crosstool/source/linux-4.1.2/drivers/rtc/class.c | 424 | 9479 | /*
* RTC subsystem, base class
*
* Copyright (C) 2005 Tower Technologies
* Author: Alessandro Zummo <a.zummo@towertech.it>
*
* class skeleton from drivers/hwmon/hwmon.c
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/of.h>
#include <linux/rtc.h>
#include <linux/kdev_t.h>
#include <linux/idr.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
#include "rtc-core.h"
static DEFINE_IDA(rtc_ida);
struct class *rtc_class;
static void rtc_device_release(struct device *dev)
{
struct rtc_device *rtc = to_rtc_device(dev);
ida_simple_remove(&rtc_ida, rtc->id);
kfree(rtc);
}
#ifdef CONFIG_RTC_HCTOSYS_DEVICE
/* Result of the last RTC to system clock attempt. */
int rtc_hctosys_ret = -ENODEV;
#endif
#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_RTC_HCTOSYS_DEVICE)
/*
* On suspend(), measure the delta between one RTC and the
* system's wall clock; restore it on resume().
*/
static struct timespec64 old_rtc, old_system, old_delta;
static int rtc_suspend(struct device *dev)
{
struct rtc_device *rtc = to_rtc_device(dev);
struct rtc_time tm;
struct timespec64 delta, delta_delta;
int err;
if (timekeeping_rtc_skipsuspend())
return 0;
if (strcmp(dev_name(&rtc->dev), CONFIG_RTC_HCTOSYS_DEVICE) != 0)
return 0;
/* snapshot the current RTC and system time at suspend*/
err = rtc_read_time(rtc, &tm);
if (err < 0) {
pr_debug("%s: fail to read rtc time\n", dev_name(&rtc->dev));
return 0;
}
getnstimeofday64(&old_system);
old_rtc.tv_sec = rtc_tm_to_time64(&tm);
/*
* To avoid drift caused by repeated suspend/resumes,
* which each can add ~1 second drift error,
* try to compensate so the difference in system time
* and rtc time stays close to constant.
*/
delta = timespec64_sub(old_system, old_rtc);
delta_delta = timespec64_sub(delta, old_delta);
if (delta_delta.tv_sec < -2 || delta_delta.tv_sec >= 2) {
/*
* if delta_delta is too large, assume time correction
* has occured and set old_delta to the current delta.
*/
old_delta = delta;
} else {
/* Otherwise try to adjust old_system to compensate */
old_system = timespec64_sub(old_system, delta_delta);
}
return 0;
}
static int rtc_resume(struct device *dev)
{
struct rtc_device *rtc = to_rtc_device(dev);
struct rtc_time tm;
struct timespec64 new_system, new_rtc;
struct timespec64 sleep_time;
int err;
if (timekeeping_rtc_skipresume())
return 0;
rtc_hctosys_ret = -ENODEV;
if (strcmp(dev_name(&rtc->dev), CONFIG_RTC_HCTOSYS_DEVICE) != 0)
return 0;
/* snapshot the current rtc and system time at resume */
getnstimeofday64(&new_system);
err = rtc_read_time(rtc, &tm);
if (err < 0) {
pr_debug("%s: fail to read rtc time\n", dev_name(&rtc->dev));
return 0;
}
new_rtc.tv_sec = rtc_tm_to_time64(&tm);
new_rtc.tv_nsec = 0;
if (new_rtc.tv_sec < old_rtc.tv_sec) {
pr_debug("%s: time travel!\n", dev_name(&rtc->dev));
return 0;
}
/* calculate the RTC time delta (sleep time)*/
sleep_time = timespec64_sub(new_rtc, old_rtc);
/*
* Since these RTC suspend/resume handlers are not called
* at the very end of suspend or the start of resume,
* some run-time may pass on either sides of the sleep time
* so subtract kernel run-time between rtc_suspend to rtc_resume
* to keep things accurate.
*/
sleep_time = timespec64_sub(sleep_time,
timespec64_sub(new_system, old_system));
if (sleep_time.tv_sec >= 0)
timekeeping_inject_sleeptime64(&sleep_time);
rtc_hctosys_ret = 0;
return 0;
}
static SIMPLE_DEV_PM_OPS(rtc_class_dev_pm_ops, rtc_suspend, rtc_resume);
#define RTC_CLASS_DEV_PM_OPS (&rtc_class_dev_pm_ops)
#else
#define RTC_CLASS_DEV_PM_OPS NULL
#endif
/**
* rtc_device_register - register w/ RTC class
* @dev: the device to register
*
* rtc_device_unregister() must be called when the class device is no
* longer needed.
*
* Returns the pointer to the new struct class device.
*/
struct rtc_device *rtc_device_register(const char *name, struct device *dev,
const struct rtc_class_ops *ops,
struct module *owner)
{
struct rtc_device *rtc;
struct rtc_wkalrm alrm;
int of_id = -1, id = -1, err;
if (dev->of_node)
of_id = of_alias_get_id(dev->of_node, "rtc");
else if (dev->parent && dev->parent->of_node)
of_id = of_alias_get_id(dev->parent->of_node, "rtc");
if (of_id >= 0) {
id = ida_simple_get(&rtc_ida, of_id, of_id + 1,
GFP_KERNEL);
if (id < 0)
dev_warn(dev, "/aliases ID %d not available\n",
of_id);
}
if (id < 0) {
id = ida_simple_get(&rtc_ida, 0, 0, GFP_KERNEL);
if (id < 0) {
err = id;
goto exit;
}
}
rtc = kzalloc(sizeof(struct rtc_device), GFP_KERNEL);
if (rtc == NULL) {
err = -ENOMEM;
goto exit_ida;
}
rtc->id = id;
rtc->ops = ops;
rtc->owner = owner;
rtc->irq_freq = 1;
rtc->max_user_freq = 64;
rtc->dev.parent = dev;
rtc->dev.class = rtc_class;
rtc->dev.release = rtc_device_release;
mutex_init(&rtc->ops_lock);
spin_lock_init(&rtc->irq_lock);
spin_lock_init(&rtc->irq_task_lock);
init_waitqueue_head(&rtc->irq_queue);
/* Init timerqueue */
timerqueue_init_head(&rtc->timerqueue);
INIT_WORK(&rtc->irqwork, rtc_timer_do_work);
/* Init aie timer */
rtc_timer_init(&rtc->aie_timer, rtc_aie_update_irq, (void *)rtc);
/* Init uie timer */
rtc_timer_init(&rtc->uie_rtctimer, rtc_uie_update_irq, (void *)rtc);
/* Init pie timer */
hrtimer_init(&rtc->pie_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
rtc->pie_timer.function = rtc_pie_update_irq;
rtc->pie_enabled = 0;
strlcpy(rtc->name, name, RTC_DEVICE_NAME_SIZE);
dev_set_name(&rtc->dev, "rtc%d", id);
/* Check to see if there is an ALARM already set in hw */
err = __rtc_read_alarm(rtc, &alrm);
if (!err && !rtc_valid_tm(&alrm.time))
rtc_initialize_alarm(rtc, &alrm);
rtc_dev_prepare(rtc);
err = device_register(&rtc->dev);
if (err) {
put_device(&rtc->dev);
goto exit_kfree;
}
rtc_dev_add_device(rtc);
rtc_sysfs_add_device(rtc);
rtc_proc_add_device(rtc);
dev_info(dev, "rtc core: registered %s as %s\n",
rtc->name, dev_name(&rtc->dev));
return rtc;
exit_kfree:
kfree(rtc);
exit_ida:
ida_simple_remove(&rtc_ida, id);
exit:
dev_err(dev, "rtc core: unable to register %s, err = %d\n",
name, err);
return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(rtc_device_register);
/**
* rtc_device_unregister - removes the previously registered RTC class device
*
* @rtc: the RTC class device to destroy
*/
void rtc_device_unregister(struct rtc_device *rtc)
{
if (get_device(&rtc->dev) != NULL) {
mutex_lock(&rtc->ops_lock);
/* remove innards of this RTC, then disable it, before
* letting any rtc_class_open() users access it again
*/
rtc_sysfs_del_device(rtc);
rtc_dev_del_device(rtc);
rtc_proc_del_device(rtc);
device_unregister(&rtc->dev);
rtc->ops = NULL;
mutex_unlock(&rtc->ops_lock);
put_device(&rtc->dev);
}
}
EXPORT_SYMBOL_GPL(rtc_device_unregister);
static void devm_rtc_device_release(struct device *dev, void *res)
{
struct rtc_device *rtc = *(struct rtc_device **)res;
rtc_device_unregister(rtc);
}
static int devm_rtc_device_match(struct device *dev, void *res, void *data)
{
struct rtc **r = res;
return *r == data;
}
/**
* devm_rtc_device_register - resource managed rtc_device_register()
* @dev: the device to register
* @name: the name of the device
* @ops: the rtc operations structure
* @owner: the module owner
*
* @return a struct rtc on success, or an ERR_PTR on error
*
* Managed rtc_device_register(). The rtc_device returned from this function
* are automatically freed on driver detach. See rtc_device_register()
* for more information.
*/
struct rtc_device *devm_rtc_device_register(struct device *dev,
const char *name,
const struct rtc_class_ops *ops,
struct module *owner)
{
struct rtc_device **ptr, *rtc;
ptr = devres_alloc(devm_rtc_device_release, sizeof(*ptr), GFP_KERNEL);
if (!ptr)
return ERR_PTR(-ENOMEM);
rtc = rtc_device_register(name, dev, ops, owner);
if (!IS_ERR(rtc)) {
*ptr = rtc;
devres_add(dev, ptr);
} else {
devres_free(ptr);
}
return rtc;
}
EXPORT_SYMBOL_GPL(devm_rtc_device_register);
/**
* devm_rtc_device_unregister - resource managed devm_rtc_device_unregister()
* @dev: the device to unregister
* @rtc: the RTC class device to unregister
*
* Deallocated a rtc allocated with devm_rtc_device_register(). Normally this
* function will not need to be called and the resource management code will
* ensure that the resource is freed.
*/
void devm_rtc_device_unregister(struct device *dev, struct rtc_device *rtc)
{
int rc;
rc = devres_release(dev, devm_rtc_device_release,
devm_rtc_device_match, rtc);
WARN_ON(rc);
}
EXPORT_SYMBOL_GPL(devm_rtc_device_unregister);
static int __init rtc_init(void)
{
rtc_class = class_create(THIS_MODULE, "rtc");
if (IS_ERR(rtc_class)) {
pr_err("couldn't create class\n");
return PTR_ERR(rtc_class);
}
rtc_class->pm = RTC_CLASS_DEV_PM_OPS;
rtc_dev_init();
rtc_sysfs_init(rtc_class);
return 0;
}
static void __exit rtc_exit(void)
{
rtc_dev_exit();
class_destroy(rtc_class);
ida_destroy(&rtc_ida);
}
subsys_initcall(rtc_init);
module_exit(rtc_exit);
MODULE_AUTHOR("Alessandro Zummo <a.zummo@towertech.it>");
MODULE_DESCRIPTION("RTC class support");
MODULE_LICENSE("GPL");
| gpl-2.0 |
m-creations/openwrt | target/linux/generic/files/crypto/ocf/ocf-bench.c | 424 | 13227 | /*
* A loadable module that benchmarks the OCF crypto speed from kernel space.
*
* Copyright (C) 2004-2010 David McCullough <david_mccullough@mcafee.com>
*
* LICENSE TERMS
*
* The free distribution and use of this software in both source and binary
* form is allowed (with or without changes) provided that:
*
* 1. distributions of this source code include the above copyright
* notice, this list of conditions and the following disclaimer;
*
* 2. distributions in binary form include the above copyright
* notice, this list of conditions and the following disclaimer
* in the documentation and/or other associated materials;
*
* 3. the copyright holder's name is not used to endorse products
* built using this software without specific written permission.
*
* ALTERNATIVELY, provided that this notice is retained in full, this product
* may be distributed under the terms of the GNU General Public License (GPL),
* in which case the provisions of the GPL apply INSTEAD OF those given above.
*
* DISCLAIMER
*
* This software is provided 'as is' with no explicit or implied warranties
* in respect of its properties, including, but not limited to, correctness
* and/or fitness for purpose.
*/
#include <linux/version.h>
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) && !defined(AUTOCONF_INCLUDED)
#include <linux/config.h>
#endif
#include <linux/module.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/wait.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <cryptodev.h>
#ifdef I_HAVE_AN_XSCALE_WITH_INTEL_SDK
#define BENCH_IXP_ACCESS_LIB 1
#endif
#ifdef BENCH_IXP_ACCESS_LIB
#include <IxTypes.h>
#include <IxOsBuffMgt.h>
#include <IxNpeDl.h>
#include <IxCryptoAcc.h>
#include <IxQMgr.h>
#include <IxOsServices.h>
#include <IxOsCacheMMU.h>
#endif
/*
* support for access lib version 1.4
*/
#ifndef IX_MBUF_PRIV
#define IX_MBUF_PRIV(x) ((x)->priv)
#endif
/*
* the number of simultaneously active requests
*/
static int request_q_len = 40;
module_param(request_q_len, int, 0);
MODULE_PARM_DESC(request_q_len, "Number of outstanding requests");
/*
* how many requests we want to have processed
*/
static int request_num = 1024;
module_param(request_num, int, 0);
MODULE_PARM_DESC(request_num, "run for at least this many requests");
/*
* the size of each request
*/
static int request_size = 1488;
module_param(request_size, int, 0);
MODULE_PARM_DESC(request_size, "size of each request");
/*
* OCF batching of requests
*/
static int request_batch = 1;
module_param(request_batch, int, 0);
MODULE_PARM_DESC(request_batch, "enable OCF request batching");
/*
* OCF immediate callback on completion
*/
static int request_cbimm = 1;
module_param(request_cbimm, int, 0);
MODULE_PARM_DESC(request_cbimm, "enable OCF immediate callback on completion");
/*
* a structure for each request
*/
typedef struct {
struct work_struct work;
#ifdef BENCH_IXP_ACCESS_LIB
IX_MBUF mbuf;
#endif
unsigned char *buffer;
} request_t;
static request_t *requests;
static spinlock_t ocfbench_counter_lock;
static int outstanding;
static int total;
/*************************************************************************/
/*
* OCF benchmark routines
*/
static uint64_t ocf_cryptoid;
static unsigned long jstart, jstop;
static int ocf_init(void);
static int ocf_cb(struct cryptop *crp);
static void ocf_request(void *arg);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
static void ocf_request_wq(struct work_struct *work);
#endif
static int
ocf_init(void)
{
int error;
struct cryptoini crie, cria;
struct cryptodesc crda, crde;
memset(&crie, 0, sizeof(crie));
memset(&cria, 0, sizeof(cria));
memset(&crde, 0, sizeof(crde));
memset(&crda, 0, sizeof(crda));
cria.cri_alg = CRYPTO_SHA1_HMAC;
cria.cri_klen = 20 * 8;
cria.cri_key = "0123456789abcdefghij";
//crie.cri_alg = CRYPTO_3DES_CBC;
crie.cri_alg = CRYPTO_AES_CBC;
crie.cri_klen = 24 * 8;
crie.cri_key = "0123456789abcdefghijklmn";
crie.cri_next = &cria;
error = crypto_newsession(&ocf_cryptoid, &crie,
CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE);
if (error) {
printk("crypto_newsession failed %d\n", error);
return -1;
}
return 0;
}
static int
ocf_cb(struct cryptop *crp)
{
request_t *r = (request_t *) crp->crp_opaque;
unsigned long flags;
if (crp->crp_etype)
printk("Error in OCF processing: %d\n", crp->crp_etype);
crypto_freereq(crp);
crp = NULL;
/* do all requests but take at least 1 second */
spin_lock_irqsave(&ocfbench_counter_lock, flags);
total++;
if (total > request_num && jstart + HZ < jiffies) {
outstanding--;
spin_unlock_irqrestore(&ocfbench_counter_lock, flags);
return 0;
}
spin_unlock_irqrestore(&ocfbench_counter_lock, flags);
schedule_work(&r->work);
return 0;
}
static void
ocf_request(void *arg)
{
request_t *r = arg;
struct cryptop *crp = crypto_getreq(2);
struct cryptodesc *crde, *crda;
unsigned long flags;
if (!crp) {
spin_lock_irqsave(&ocfbench_counter_lock, flags);
outstanding--;
spin_unlock_irqrestore(&ocfbench_counter_lock, flags);
return;
}
crde = crp->crp_desc;
crda = crde->crd_next;
crda->crd_skip = 0;
crda->crd_flags = 0;
crda->crd_len = request_size;
crda->crd_inject = request_size;
crda->crd_alg = CRYPTO_SHA1_HMAC;
crda->crd_key = "0123456789abcdefghij";
crda->crd_klen = 20 * 8;
crde->crd_skip = 0;
crde->crd_flags = CRD_F_IV_EXPLICIT | CRD_F_ENCRYPT;
crde->crd_len = request_size;
crde->crd_inject = request_size;
//crde->crd_alg = CRYPTO_3DES_CBC;
crde->crd_alg = CRYPTO_AES_CBC;
crde->crd_key = "0123456789abcdefghijklmn";
crde->crd_klen = 24 * 8;
crp->crp_ilen = request_size + 64;
crp->crp_flags = 0;
if (request_batch)
crp->crp_flags |= CRYPTO_F_BATCH;
if (request_cbimm)
crp->crp_flags |= CRYPTO_F_CBIMM;
crp->crp_buf = (caddr_t) r->buffer;
crp->crp_callback = ocf_cb;
crp->crp_sid = ocf_cryptoid;
crp->crp_opaque = (caddr_t) r;
crypto_dispatch(crp);
}
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
static void
ocf_request_wq(struct work_struct *work)
{
request_t *r = container_of(work, request_t, work);
ocf_request(r);
}
#endif
static void
ocf_done(void)
{
crypto_freesession(ocf_cryptoid);
}
/*************************************************************************/
#ifdef BENCH_IXP_ACCESS_LIB
/*************************************************************************/
/*
* CryptoAcc benchmark routines
*/
static IxCryptoAccCtx ixp_ctx;
static UINT32 ixp_ctx_id;
static IX_MBUF ixp_pri;
static IX_MBUF ixp_sec;
static int ixp_registered = 0;
static void ixp_register_cb(UINT32 ctx_id, IX_MBUF *bufp,
IxCryptoAccStatus status);
static void ixp_perform_cb(UINT32 ctx_id, IX_MBUF *sbufp, IX_MBUF *dbufp,
IxCryptoAccStatus status);
static void ixp_request(void *arg);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
static void ixp_request_wq(struct work_struct *work);
#endif
static int
ixp_init(void)
{
IxCryptoAccStatus status;
ixp_ctx.cipherCtx.cipherAlgo = IX_CRYPTO_ACC_CIPHER_3DES;
ixp_ctx.cipherCtx.cipherMode = IX_CRYPTO_ACC_MODE_CBC;
ixp_ctx.cipherCtx.cipherKeyLen = 24;
ixp_ctx.cipherCtx.cipherBlockLen = IX_CRYPTO_ACC_DES_BLOCK_64;
ixp_ctx.cipherCtx.cipherInitialVectorLen = IX_CRYPTO_ACC_DES_IV_64;
memcpy(ixp_ctx.cipherCtx.key.cipherKey, "0123456789abcdefghijklmn", 24);
ixp_ctx.authCtx.authAlgo = IX_CRYPTO_ACC_AUTH_SHA1;
ixp_ctx.authCtx.authDigestLen = 12;
ixp_ctx.authCtx.aadLen = 0;
ixp_ctx.authCtx.authKeyLen = 20;
memcpy(ixp_ctx.authCtx.key.authKey, "0123456789abcdefghij", 20);
ixp_ctx.useDifferentSrcAndDestMbufs = 0;
ixp_ctx.operation = IX_CRYPTO_ACC_OP_ENCRYPT_AUTH ;
IX_MBUF_MLEN(&ixp_pri) = IX_MBUF_PKT_LEN(&ixp_pri) = 128;
IX_MBUF_MDATA(&ixp_pri) = (unsigned char *) kmalloc(128, SLAB_ATOMIC);
IX_MBUF_MLEN(&ixp_sec) = IX_MBUF_PKT_LEN(&ixp_sec) = 128;
IX_MBUF_MDATA(&ixp_sec) = (unsigned char *) kmalloc(128, SLAB_ATOMIC);
status = ixCryptoAccCtxRegister(&ixp_ctx, &ixp_pri, &ixp_sec,
ixp_register_cb, ixp_perform_cb, &ixp_ctx_id);
if (IX_CRYPTO_ACC_STATUS_SUCCESS == status) {
while (!ixp_registered)
schedule();
return ixp_registered < 0 ? -1 : 0;
}
printk("ixp: ixCryptoAccCtxRegister failed %d\n", status);
return -1;
}
static void
ixp_register_cb(UINT32 ctx_id, IX_MBUF *bufp, IxCryptoAccStatus status)
{
if (bufp) {
IX_MBUF_MLEN(bufp) = IX_MBUF_PKT_LEN(bufp) = 0;
kfree(IX_MBUF_MDATA(bufp));
IX_MBUF_MDATA(bufp) = NULL;
}
if (IX_CRYPTO_ACC_STATUS_WAIT == status)
return;
if (IX_CRYPTO_ACC_STATUS_SUCCESS == status)
ixp_registered = 1;
else
ixp_registered = -1;
}
static void
ixp_perform_cb(
UINT32 ctx_id,
IX_MBUF *sbufp,
IX_MBUF *dbufp,
IxCryptoAccStatus status)
{
request_t *r = NULL;
unsigned long flags;
/* do all requests but take at least 1 second */
spin_lock_irqsave(&ocfbench_counter_lock, flags);
total++;
if (total > request_num && jstart + HZ < jiffies) {
outstanding--;
spin_unlock_irqrestore(&ocfbench_counter_lock, flags);
return;
}
if (!sbufp || !(r = IX_MBUF_PRIV(sbufp))) {
printk("crappo %p %p\n", sbufp, r);
outstanding--;
spin_unlock_irqrestore(&ocfbench_counter_lock, flags);
return;
}
spin_unlock_irqrestore(&ocfbench_counter_lock, flags);
schedule_work(&r->work);
}
static void
ixp_request(void *arg)
{
request_t *r = arg;
IxCryptoAccStatus status;
unsigned long flags;
memset(&r->mbuf, 0, sizeof(r->mbuf));
IX_MBUF_MLEN(&r->mbuf) = IX_MBUF_PKT_LEN(&r->mbuf) = request_size + 64;
IX_MBUF_MDATA(&r->mbuf) = r->buffer;
IX_MBUF_PRIV(&r->mbuf) = r;
status = ixCryptoAccAuthCryptPerform(ixp_ctx_id, &r->mbuf, NULL,
0, request_size, 0, request_size, request_size, r->buffer);
if (IX_CRYPTO_ACC_STATUS_SUCCESS != status) {
printk("status1 = %d\n", status);
spin_lock_irqsave(&ocfbench_counter_lock, flags);
outstanding--;
spin_unlock_irqrestore(&ocfbench_counter_lock, flags);
return;
}
return;
}
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
static void
ixp_request_wq(struct work_struct *work)
{
request_t *r = container_of(work, request_t, work);
ixp_request(r);
}
#endif
static void
ixp_done(void)
{
/* we should free the session here but I am lazy :-) */
}
/*************************************************************************/
#endif /* BENCH_IXP_ACCESS_LIB */
/*************************************************************************/
int
ocfbench_init(void)
{
int i;
unsigned long mbps;
unsigned long flags;
printk("Crypto Speed tests\n");
requests = kmalloc(sizeof(request_t) * request_q_len, GFP_KERNEL);
if (!requests) {
printk("malloc failed\n");
return -EINVAL;
}
for (i = 0; i < request_q_len; i++) {
/* +64 for return data */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
INIT_WORK(&requests[i].work, ocf_request_wq);
#else
INIT_WORK(&requests[i].work, ocf_request, &requests[i]);
#endif
requests[i].buffer = kmalloc(request_size + 128, GFP_DMA);
if (!requests[i].buffer) {
printk("malloc failed\n");
return -EINVAL;
}
memset(requests[i].buffer, '0' + i, request_size + 128);
}
/*
* OCF benchmark
*/
printk("OCF: testing ...\n");
if (ocf_init() == -1)
return -EINVAL;
spin_lock_init(&ocfbench_counter_lock);
total = outstanding = 0;
jstart = jiffies;
for (i = 0; i < request_q_len; i++) {
spin_lock_irqsave(&ocfbench_counter_lock, flags);
outstanding++;
spin_unlock_irqrestore(&ocfbench_counter_lock, flags);
ocf_request(&requests[i]);
}
while (outstanding > 0)
schedule();
jstop = jiffies;
mbps = 0;
if (jstop > jstart) {
mbps = (unsigned long) total * (unsigned long) request_size * 8;
mbps /= ((jstop - jstart) * 1000) / HZ;
}
printk("OCF: %d requests of %d bytes in %d jiffies (%d.%03d Mbps)\n",
total, request_size, (int)(jstop - jstart),
((int)mbps) / 1000, ((int)mbps) % 1000);
ocf_done();
#ifdef BENCH_IXP_ACCESS_LIB
/*
* IXP benchmark
*/
printk("IXP: testing ...\n");
ixp_init();
total = outstanding = 0;
jstart = jiffies;
for (i = 0; i < request_q_len; i++) {
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
INIT_WORK(&requests[i].work, ixp_request_wq);
#else
INIT_WORK(&requests[i].work, ixp_request, &requests[i]);
#endif
spin_lock_irqsave(&ocfbench_counter_lock, flags);
outstanding++;
spin_unlock_irqrestore(&ocfbench_counter_lock, flags);
ixp_request(&requests[i]);
}
while (outstanding > 0)
schedule();
jstop = jiffies;
mbps = 0;
if (jstop > jstart) {
mbps = (unsigned long) total * (unsigned long) request_size * 8;
mbps /= ((jstop - jstart) * 1000) / HZ;
}
printk("IXP: %d requests of %d bytes in %d jiffies (%d.%03d Mbps)\n",
total, request_size, jstop - jstart,
((int)mbps) / 1000, ((int)mbps) % 1000);
ixp_done();
#endif /* BENCH_IXP_ACCESS_LIB */
for (i = 0; i < request_q_len; i++)
kfree(requests[i].buffer);
kfree(requests);
return -EINVAL; /* always fail to load so it can be re-run quickly ;-) */
}
static void __exit ocfbench_exit(void)
{
}
module_init(ocfbench_init);
module_exit(ocfbench_exit);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("David McCullough <david_mccullough@mcafee.com>");
MODULE_DESCRIPTION("Benchmark various in-kernel crypto speeds");
| gpl-2.0 |
pawitp/android_kernel_samsung_i9082 | net/caif/cfctrl.c | 424 | 16320 | /*
* Copyright (C) ST-Ericsson AB 2010
* Author: Sjur Brendeland/sjur.brandeland@stericsson.com
* License terms: GNU General Public License (GPL) version 2
*/
#define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
#include <linux/stddef.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <net/caif/caif_layer.h>
#include <net/caif/cfpkt.h>
#include <net/caif/cfctrl.h>
#define container_obj(layr) container_of(layr, struct cfctrl, serv.layer)
#define UTILITY_NAME_LENGTH 16
#define CFPKT_CTRL_PKT_LEN 20
#ifdef CAIF_NO_LOOP
static int handle_loop(struct cfctrl *ctrl,
int cmd, struct cfpkt *pkt){
return -1;
}
#else
static int handle_loop(struct cfctrl *ctrl,
int cmd, struct cfpkt *pkt);
#endif
static int cfctrl_recv(struct cflayer *layr, struct cfpkt *pkt);
static void cfctrl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
int phyid);
struct cflayer *cfctrl_create(void)
{
struct dev_info dev_info;
struct cfctrl *this =
kmalloc(sizeof(struct cfctrl), GFP_ATOMIC);
if (!this) {
pr_warn("Out of memory\n");
return NULL;
}
caif_assert(offsetof(struct cfctrl, serv.layer) == 0);
memset(&dev_info, 0, sizeof(dev_info));
dev_info.id = 0xff;
memset(this, 0, sizeof(*this));
cfsrvl_init(&this->serv, 0, &dev_info, false);
atomic_set(&this->req_seq_no, 1);
atomic_set(&this->rsp_seq_no, 1);
this->serv.layer.receive = cfctrl_recv;
sprintf(this->serv.layer.name, "ctrl");
this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
#ifndef CAIF_NO_LOOP
spin_lock_init(&this->loop_linkid_lock);
this->loop_linkid = 1;
#endif
spin_lock_init(&this->info_list_lock);
INIT_LIST_HEAD(&this->list);
return &this->serv.layer;
}
void cfctrl_remove(struct cflayer *layer)
{
struct cfctrl_request_info *p, *tmp;
struct cfctrl *ctrl = container_obj(layer);
spin_lock_bh(&ctrl->info_list_lock);
list_for_each_entry_safe(p, tmp, &ctrl->list, list) {
list_del(&p->list);
kfree(p);
}
spin_unlock_bh(&ctrl->info_list_lock);
kfree(layer);
}
static bool param_eq(const struct cfctrl_link_param *p1,
const struct cfctrl_link_param *p2)
{
bool eq =
p1->linktype == p2->linktype &&
p1->priority == p2->priority &&
p1->phyid == p2->phyid &&
p1->endpoint == p2->endpoint && p1->chtype == p2->chtype;
if (!eq)
return false;
switch (p1->linktype) {
case CFCTRL_SRV_VEI:
return true;
case CFCTRL_SRV_DATAGRAM:
return p1->u.datagram.connid == p2->u.datagram.connid;
case CFCTRL_SRV_RFM:
return
p1->u.rfm.connid == p2->u.rfm.connid &&
strcmp(p1->u.rfm.volume, p2->u.rfm.volume) == 0;
case CFCTRL_SRV_UTIL:
return
p1->u.utility.fifosize_kb == p2->u.utility.fifosize_kb
&& p1->u.utility.fifosize_bufs ==
p2->u.utility.fifosize_bufs
&& strcmp(p1->u.utility.name, p2->u.utility.name) == 0
&& p1->u.utility.paramlen == p2->u.utility.paramlen
&& memcmp(p1->u.utility.params, p2->u.utility.params,
p1->u.utility.paramlen) == 0;
case CFCTRL_SRV_VIDEO:
return p1->u.video.connid == p2->u.video.connid;
case CFCTRL_SRV_DBG:
return true;
case CFCTRL_SRV_DECM:
return false;
default:
return false;
}
return false;
}
static bool cfctrl_req_eq(const struct cfctrl_request_info *r1,
const struct cfctrl_request_info *r2)
{
if (r1->cmd != r2->cmd)
return false;
if (r1->cmd == CFCTRL_CMD_LINK_SETUP)
return param_eq(&r1->param, &r2->param);
else
return r1->channel_id == r2->channel_id;
}
/* Insert request at the end */
static void cfctrl_insert_req(struct cfctrl *ctrl,
struct cfctrl_request_info *req)
{
spin_lock_bh(&ctrl->info_list_lock);
atomic_inc(&ctrl->req_seq_no);
req->sequence_no = atomic_read(&ctrl->req_seq_no);
list_add_tail(&req->list, &ctrl->list);
spin_unlock_bh(&ctrl->info_list_lock);
}
/* Compare and remove request */
static struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
struct cfctrl_request_info *req)
{
struct cfctrl_request_info *p, *tmp, *first;
first = list_first_entry(&ctrl->list, struct cfctrl_request_info, list);
list_for_each_entry_safe(p, tmp, &ctrl->list, list) {
if (cfctrl_req_eq(req, p)) {
if (p != first)
pr_warn("Requests are not received in order\n");
atomic_set(&ctrl->rsp_seq_no,
p->sequence_no);
list_del(&p->list);
goto out;
}
}
p = NULL;
out:
return p;
}
struct cfctrl_rsp *cfctrl_get_respfuncs(struct cflayer *layer)
{
struct cfctrl *this = container_obj(layer);
return &this->res;
}
static void init_info(struct caif_payload_info *info, struct cfctrl *cfctrl)
{
info->hdr_len = 0;
info->channel_id = cfctrl->serv.layer.id;
info->dev_info = &cfctrl->serv.dev_info;
}
void cfctrl_enum_req(struct cflayer *layer, u8 physlinkid)
{
struct cfctrl *cfctrl = container_obj(layer);
struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
struct cflayer *dn = cfctrl->serv.layer.dn;
if (!pkt) {
pr_warn("Out of memory\n");
return;
}
if (!dn) {
pr_debug("not able to send enum request\n");
return;
}
caif_assert(offsetof(struct cfctrl, serv.layer) == 0);
init_info(cfpkt_info(pkt), cfctrl);
cfpkt_info(pkt)->dev_info->id = physlinkid;
cfctrl->serv.dev_info.id = physlinkid;
cfpkt_addbdy(pkt, CFCTRL_CMD_ENUM);
cfpkt_addbdy(pkt, physlinkid);
dn->transmit(dn, pkt);
}
int cfctrl_linkup_request(struct cflayer *layer,
struct cfctrl_link_param *param,
struct cflayer *user_layer)
{
struct cfctrl *cfctrl = container_obj(layer);
u32 tmp32;
u16 tmp16;
u8 tmp8;
struct cfctrl_request_info *req;
int ret;
char utility_name[16];
struct cfpkt *pkt;
struct cflayer *dn = cfctrl->serv.layer.dn;
if (!dn) {
pr_debug("not able to send linkup request\n");
return -ENODEV;
}
if (cfctrl_cancel_req(layer, user_layer) > 0) {
/* Slight Paranoia, check if already connecting */
pr_err("Duplicate connect request for same client\n");
WARN_ON(1);
return -EALREADY;
}
pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
if (!pkt) {
pr_warn("Out of memory\n");
return -ENOMEM;
}
cfpkt_addbdy(pkt, CFCTRL_CMD_LINK_SETUP);
cfpkt_addbdy(pkt, (param->chtype << 4) | param->linktype);
cfpkt_addbdy(pkt, (param->priority << 3) | param->phyid);
cfpkt_addbdy(pkt, param->endpoint & 0x03);
switch (param->linktype) {
case CFCTRL_SRV_VEI:
break;
case CFCTRL_SRV_VIDEO:
cfpkt_addbdy(pkt, (u8) param->u.video.connid);
break;
case CFCTRL_SRV_DBG:
break;
case CFCTRL_SRV_DATAGRAM:
tmp32 = cpu_to_le32(param->u.datagram.connid);
cfpkt_add_body(pkt, &tmp32, 4);
break;
case CFCTRL_SRV_RFM:
/* Construct a frame, convert DatagramConnectionID to network
* format long and copy it out...
*/
tmp32 = cpu_to_le32(param->u.rfm.connid);
cfpkt_add_body(pkt, &tmp32, 4);
/* Add volume name, including zero termination... */
cfpkt_add_body(pkt, param->u.rfm.volume,
strlen(param->u.rfm.volume) + 1);
break;
case CFCTRL_SRV_UTIL:
tmp16 = cpu_to_le16(param->u.utility.fifosize_kb);
cfpkt_add_body(pkt, &tmp16, 2);
tmp16 = cpu_to_le16(param->u.utility.fifosize_bufs);
cfpkt_add_body(pkt, &tmp16, 2);
memset(utility_name, 0, sizeof(utility_name));
strncpy(utility_name, param->u.utility.name,
UTILITY_NAME_LENGTH - 1);
cfpkt_add_body(pkt, utility_name, UTILITY_NAME_LENGTH);
tmp8 = param->u.utility.paramlen;
cfpkt_add_body(pkt, &tmp8, 1);
cfpkt_add_body(pkt, param->u.utility.params,
param->u.utility.paramlen);
break;
default:
pr_warn("Request setup of bad link type = %d\n",
param->linktype);
return -EINVAL;
}
req = kzalloc(sizeof(*req), GFP_KERNEL);
if (!req) {
pr_warn("Out of memory\n");
return -ENOMEM;
}
req->client_layer = user_layer;
req->cmd = CFCTRL_CMD_LINK_SETUP;
req->param = *param;
cfctrl_insert_req(cfctrl, req);
init_info(cfpkt_info(pkt), cfctrl);
/*
* NOTE:Always send linkup and linkdown request on the same
* device as the payload. Otherwise old queued up payload
* might arrive with the newly allocated channel ID.
*/
cfpkt_info(pkt)->dev_info->id = param->phyid;
ret =
dn->transmit(dn, pkt);
if (ret < 0) {
int count;
count = cfctrl_cancel_req(&cfctrl->serv.layer,
user_layer);
if (count != 1) {
pr_err("Could not remove request (%d)", count);
return -ENODEV;
}
}
return 0;
}
int cfctrl_linkdown_req(struct cflayer *layer, u8 channelid,
struct cflayer *client)
{
int ret;
struct cfctrl *cfctrl = container_obj(layer);
struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
struct cflayer *dn = cfctrl->serv.layer.dn;
if (!pkt) {
pr_warn("Out of memory\n");
return -ENOMEM;
}
if (!dn) {
pr_debug("not able to send link-down request\n");
return -ENODEV;
}
cfpkt_addbdy(pkt, CFCTRL_CMD_LINK_DESTROY);
cfpkt_addbdy(pkt, channelid);
init_info(cfpkt_info(pkt), cfctrl);
ret =
dn->transmit(dn, pkt);
#ifndef CAIF_NO_LOOP
cfctrl->loop_linkused[channelid] = 0;
#endif
return ret;
}
int cfctrl_cancel_req(struct cflayer *layr, struct cflayer *adap_layer)
{
struct cfctrl_request_info *p, *tmp;
struct cfctrl *ctrl = container_obj(layr);
int found = 0;
spin_lock_bh(&ctrl->info_list_lock);
list_for_each_entry_safe(p, tmp, &ctrl->list, list) {
if (p->client_layer == adap_layer) {
list_del(&p->list);
kfree(p);
found++;
}
}
spin_unlock_bh(&ctrl->info_list_lock);
return found;
}
static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt)
{
u8 cmdrsp;
u8 cmd;
int ret = -1;
u16 tmp16;
u8 len;
u8 param[255];
u8 linkid;
struct cfctrl *cfctrl = container_obj(layer);
struct cfctrl_request_info rsp, *req;
cfpkt_extr_head(pkt, &cmdrsp, 1);
cmd = cmdrsp & CFCTRL_CMD_MASK;
if (cmd != CFCTRL_CMD_LINK_ERR
&& CFCTRL_RSP_BIT != (CFCTRL_RSP_BIT & cmdrsp)
&& CFCTRL_ERR_BIT != (CFCTRL_ERR_BIT & cmdrsp)) {
if (handle_loop(cfctrl, cmd, pkt) != 0)
cmdrsp |= CFCTRL_ERR_BIT;
}
switch (cmd) {
case CFCTRL_CMD_LINK_SETUP:
{
enum cfctrl_srv serv;
enum cfctrl_srv servtype;
u8 endpoint;
u8 physlinkid;
u8 prio;
u8 tmp;
u32 tmp32;
u8 *cp;
int i;
struct cfctrl_link_param linkparam;
memset(&linkparam, 0, sizeof(linkparam));
cfpkt_extr_head(pkt, &tmp, 1);
serv = tmp & CFCTRL_SRV_MASK;
linkparam.linktype = serv;
servtype = tmp >> 4;
linkparam.chtype = servtype;
cfpkt_extr_head(pkt, &tmp, 1);
physlinkid = tmp & 0x07;
prio = tmp >> 3;
linkparam.priority = prio;
linkparam.phyid = physlinkid;
cfpkt_extr_head(pkt, &endpoint, 1);
linkparam.endpoint = endpoint & 0x03;
switch (serv) {
case CFCTRL_SRV_VEI:
case CFCTRL_SRV_DBG:
if (CFCTRL_ERR_BIT & cmdrsp)
break;
/* Link ID */
cfpkt_extr_head(pkt, &linkid, 1);
break;
case CFCTRL_SRV_VIDEO:
cfpkt_extr_head(pkt, &tmp, 1);
linkparam.u.video.connid = tmp;
if (CFCTRL_ERR_BIT & cmdrsp)
break;
/* Link ID */
cfpkt_extr_head(pkt, &linkid, 1);
break;
case CFCTRL_SRV_DATAGRAM:
cfpkt_extr_head(pkt, &tmp32, 4);
linkparam.u.datagram.connid =
le32_to_cpu(tmp32);
if (CFCTRL_ERR_BIT & cmdrsp)
break;
/* Link ID */
cfpkt_extr_head(pkt, &linkid, 1);
break;
case CFCTRL_SRV_RFM:
/* Construct a frame, convert
* DatagramConnectionID
* to network format long and copy it out...
*/
cfpkt_extr_head(pkt, &tmp32, 4);
linkparam.u.rfm.connid =
le32_to_cpu(tmp32);
cp = (u8 *) linkparam.u.rfm.volume;
for (cfpkt_extr_head(pkt, &tmp, 1);
cfpkt_more(pkt) && tmp != '\0';
cfpkt_extr_head(pkt, &tmp, 1))
*cp++ = tmp;
*cp = '\0';
if (CFCTRL_ERR_BIT & cmdrsp)
break;
/* Link ID */
cfpkt_extr_head(pkt, &linkid, 1);
break;
case CFCTRL_SRV_UTIL:
/* Construct a frame, convert
* DatagramConnectionID
* to network format long and copy it out...
*/
/* Fifosize KB */
cfpkt_extr_head(pkt, &tmp16, 2);
linkparam.u.utility.fifosize_kb =
le16_to_cpu(tmp16);
/* Fifosize bufs */
cfpkt_extr_head(pkt, &tmp16, 2);
linkparam.u.utility.fifosize_bufs =
le16_to_cpu(tmp16);
/* name */
cp = (u8 *) linkparam.u.utility.name;
caif_assert(sizeof(linkparam.u.utility.name)
>= UTILITY_NAME_LENGTH);
for (i = 0;
i < UTILITY_NAME_LENGTH
&& cfpkt_more(pkt); i++) {
cfpkt_extr_head(pkt, &tmp, 1);
*cp++ = tmp;
}
/* Length */
cfpkt_extr_head(pkt, &len, 1);
linkparam.u.utility.paramlen = len;
/* Param Data */
cp = linkparam.u.utility.params;
while (cfpkt_more(pkt) && len--) {
cfpkt_extr_head(pkt, &tmp, 1);
*cp++ = tmp;
}
if (CFCTRL_ERR_BIT & cmdrsp)
break;
/* Link ID */
cfpkt_extr_head(pkt, &linkid, 1);
/* Length */
cfpkt_extr_head(pkt, &len, 1);
/* Param Data */
cfpkt_extr_head(pkt, ¶m, len);
break;
default:
pr_warn("Request setup, invalid type (%d)\n",
serv);
goto error;
}
rsp.cmd = cmd;
rsp.param = linkparam;
spin_lock_bh(&cfctrl->info_list_lock);
req = cfctrl_remove_req(cfctrl, &rsp);
if (CFCTRL_ERR_BIT == (CFCTRL_ERR_BIT & cmdrsp) ||
cfpkt_erroneous(pkt)) {
pr_err("Invalid O/E bit or parse error "
"on CAIF control channel\n");
cfctrl->res.reject_rsp(cfctrl->serv.layer.up,
0,
req ? req->client_layer
: NULL);
} else {
cfctrl->res.linksetup_rsp(cfctrl->serv.
layer.up, linkid,
serv, physlinkid,
req ? req->
client_layer : NULL);
}
if (req != NULL)
kfree(req);
spin_unlock_bh(&cfctrl->info_list_lock);
}
break;
case CFCTRL_CMD_LINK_DESTROY:
cfpkt_extr_head(pkt, &linkid, 1);
cfctrl->res.linkdestroy_rsp(cfctrl->serv.layer.up, linkid);
break;
case CFCTRL_CMD_LINK_ERR:
pr_err("Frame Error Indication received\n");
cfctrl->res.linkerror_ind();
break;
case CFCTRL_CMD_ENUM:
cfctrl->res.enum_rsp();
break;
case CFCTRL_CMD_SLEEP:
cfctrl->res.sleep_rsp();
break;
case CFCTRL_CMD_WAKE:
cfctrl->res.wake_rsp();
break;
case CFCTRL_CMD_LINK_RECONF:
cfctrl->res.restart_rsp();
break;
case CFCTRL_CMD_RADIO_SET:
cfctrl->res.radioset_rsp();
break;
default:
pr_err("Unrecognized Control Frame\n");
goto error;
break;
}
ret = 0;
error:
cfpkt_destroy(pkt);
return ret;
}
static void cfctrl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
int phyid)
{
struct cfctrl *this = container_obj(layr);
switch (ctrl) {
case _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND:
case CAIF_CTRLCMD_FLOW_OFF_IND:
spin_lock_bh(&this->info_list_lock);
if (!list_empty(&this->list))
pr_debug("Received flow off in control layer\n");
spin_unlock_bh(&this->info_list_lock);
break;
case _CAIF_CTRLCMD_PHYIF_DOWN_IND: {
struct cfctrl_request_info *p, *tmp;
/* Find all connect request and report failure */
spin_lock_bh(&this->info_list_lock);
list_for_each_entry_safe(p, tmp, &this->list, list) {
if (p->param.phyid == phyid) {
list_del(&p->list);
p->client_layer->ctrlcmd(p->client_layer,
CAIF_CTRLCMD_INIT_FAIL_RSP,
phyid);
kfree(p);
}
}
spin_unlock_bh(&this->info_list_lock);
break;
}
default:
break;
}
}
#ifndef CAIF_NO_LOOP
static int handle_loop(struct cfctrl *ctrl, int cmd, struct cfpkt *pkt)
{
static int last_linkid;
static int dec;
u8 linkid, linktype, tmp;
switch (cmd) {
case CFCTRL_CMD_LINK_SETUP:
spin_lock_bh(&ctrl->loop_linkid_lock);
if (!dec) {
for (linkid = last_linkid + 1; linkid < 254; linkid++)
if (!ctrl->loop_linkused[linkid])
goto found;
}
dec = 1;
for (linkid = last_linkid - 1; linkid > 1; linkid--)
if (!ctrl->loop_linkused[linkid])
goto found;
spin_unlock_bh(&ctrl->loop_linkid_lock);
return -1;
found:
if (linkid < 10)
dec = 0;
if (!ctrl->loop_linkused[linkid])
ctrl->loop_linkused[linkid] = 1;
last_linkid = linkid;
cfpkt_add_trail(pkt, &linkid, 1);
spin_unlock_bh(&ctrl->loop_linkid_lock);
cfpkt_peek_head(pkt, &linktype, 1);
if (linktype == CFCTRL_SRV_UTIL) {
tmp = 0x01;
cfpkt_add_trail(pkt, &tmp, 1);
cfpkt_add_trail(pkt, &tmp, 1);
}
break;
case CFCTRL_CMD_LINK_DESTROY:
spin_lock_bh(&ctrl->loop_linkid_lock);
cfpkt_peek_head(pkt, &linkid, 1);
ctrl->loop_linkused[linkid] = 0;
spin_unlock_bh(&ctrl->loop_linkid_lock);
break;
default:
break;
}
return 0;
}
#endif
| gpl-2.0 |
pichina/linux-bcache | drivers/mfd/menelaus.c | 680 | 30451 | /*
* Copyright (C) 2004 Texas Instruments, Inc.
*
* Some parts based tps65010.c:
* Copyright (C) 2004 Texas Instruments and
* Copyright (C) 2004-2005 David Brownell
*
* Some parts based on tlv320aic24.c:
* Copyright (C) by Kai Svahn <kai.svahn@nokia.com>
*
* Changes for interrupt handling and clean-up by
* Tony Lindgren <tony@atomide.com> and Imre Deak <imre.deak@nokia.com>
* Cleanup and generalized support for voltage setting by
* Juha Yrjola
* Added support for controlling VCORE and regulator sleep states,
* Amit Kucheria <amit.kucheria@nokia.com>
* Copyright (C) 2005, 2006 Nokia Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/module.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/sched.h>
#include <linux/mutex.h>
#include <linux/workqueue.h>
#include <linux/delay.h>
#include <linux/rtc.h>
#include <linux/bcd.h>
#include <asm/mach/irq.h>
#include <mach/gpio.h>
#include <mach/menelaus.h>
#define DRIVER_NAME "menelaus"
#define MENELAUS_I2C_ADDRESS 0x72
#define MENELAUS_REV 0x01
#define MENELAUS_VCORE_CTRL1 0x02
#define MENELAUS_VCORE_CTRL2 0x03
#define MENELAUS_VCORE_CTRL3 0x04
#define MENELAUS_VCORE_CTRL4 0x05
#define MENELAUS_VCORE_CTRL5 0x06
#define MENELAUS_DCDC_CTRL1 0x07
#define MENELAUS_DCDC_CTRL2 0x08
#define MENELAUS_DCDC_CTRL3 0x09
#define MENELAUS_LDO_CTRL1 0x0A
#define MENELAUS_LDO_CTRL2 0x0B
#define MENELAUS_LDO_CTRL3 0x0C
#define MENELAUS_LDO_CTRL4 0x0D
#define MENELAUS_LDO_CTRL5 0x0E
#define MENELAUS_LDO_CTRL6 0x0F
#define MENELAUS_LDO_CTRL7 0x10
#define MENELAUS_LDO_CTRL8 0x11
#define MENELAUS_SLEEP_CTRL1 0x12
#define MENELAUS_SLEEP_CTRL2 0x13
#define MENELAUS_DEVICE_OFF 0x14
#define MENELAUS_OSC_CTRL 0x15
#define MENELAUS_DETECT_CTRL 0x16
#define MENELAUS_INT_MASK1 0x17
#define MENELAUS_INT_MASK2 0x18
#define MENELAUS_INT_STATUS1 0x19
#define MENELAUS_INT_STATUS2 0x1A
#define MENELAUS_INT_ACK1 0x1B
#define MENELAUS_INT_ACK2 0x1C
#define MENELAUS_GPIO_CTRL 0x1D
#define MENELAUS_GPIO_IN 0x1E
#define MENELAUS_GPIO_OUT 0x1F
#define MENELAUS_BBSMS 0x20
#define MENELAUS_RTC_CTRL 0x21
#define MENELAUS_RTC_UPDATE 0x22
#define MENELAUS_RTC_SEC 0x23
#define MENELAUS_RTC_MIN 0x24
#define MENELAUS_RTC_HR 0x25
#define MENELAUS_RTC_DAY 0x26
#define MENELAUS_RTC_MON 0x27
#define MENELAUS_RTC_YR 0x28
#define MENELAUS_RTC_WKDAY 0x29
#define MENELAUS_RTC_AL_SEC 0x2A
#define MENELAUS_RTC_AL_MIN 0x2B
#define MENELAUS_RTC_AL_HR 0x2C
#define MENELAUS_RTC_AL_DAY 0x2D
#define MENELAUS_RTC_AL_MON 0x2E
#define MENELAUS_RTC_AL_YR 0x2F
#define MENELAUS_RTC_COMP_MSB 0x30
#define MENELAUS_RTC_COMP_LSB 0x31
#define MENELAUS_S1_PULL_EN 0x32
#define MENELAUS_S1_PULL_DIR 0x33
#define MENELAUS_S2_PULL_EN 0x34
#define MENELAUS_S2_PULL_DIR 0x35
#define MENELAUS_MCT_CTRL1 0x36
#define MENELAUS_MCT_CTRL2 0x37
#define MENELAUS_MCT_CTRL3 0x38
#define MENELAUS_MCT_PIN_ST 0x39
#define MENELAUS_DEBOUNCE1 0x3A
#define IH_MENELAUS_IRQS 12
#define MENELAUS_MMC_S1CD_IRQ 0 /* MMC slot 1 card change */
#define MENELAUS_MMC_S2CD_IRQ 1 /* MMC slot 2 card change */
#define MENELAUS_MMC_S1D1_IRQ 2 /* MMC DAT1 low in slot 1 */
#define MENELAUS_MMC_S2D1_IRQ 3 /* MMC DAT1 low in slot 2 */
#define MENELAUS_LOWBAT_IRQ 4 /* Low battery */
#define MENELAUS_HOTDIE_IRQ 5 /* Hot die detect */
#define MENELAUS_UVLO_IRQ 6 /* UVLO detect */
#define MENELAUS_TSHUT_IRQ 7 /* Thermal shutdown */
#define MENELAUS_RTCTMR_IRQ 8 /* RTC timer */
#define MENELAUS_RTCALM_IRQ 9 /* RTC alarm */
#define MENELAUS_RTCERR_IRQ 10 /* RTC error */
#define MENELAUS_PSHBTN_IRQ 11 /* Push button */
#define MENELAUS_RESERVED12_IRQ 12 /* Reserved */
#define MENELAUS_RESERVED13_IRQ 13 /* Reserved */
#define MENELAUS_RESERVED14_IRQ 14 /* Reserved */
#define MENELAUS_RESERVED15_IRQ 15 /* Reserved */
static void menelaus_work(struct work_struct *_menelaus);
struct menelaus_chip {
struct mutex lock;
struct i2c_client *client;
struct work_struct work;
#ifdef CONFIG_RTC_DRV_TWL92330
struct rtc_device *rtc;
u8 rtc_control;
unsigned uie:1;
#endif
unsigned vcore_hw_mode:1;
u8 mask1, mask2;
void (*handlers[16])(struct menelaus_chip *);
void (*mmc_callback)(void *data, u8 mask);
void *mmc_callback_data;
};
static struct menelaus_chip *the_menelaus;
static int menelaus_write_reg(int reg, u8 value)
{
int val = i2c_smbus_write_byte_data(the_menelaus->client, reg, value);
if (val < 0) {
pr_err(DRIVER_NAME ": write error");
return val;
}
return 0;
}
static int menelaus_read_reg(int reg)
{
int val = i2c_smbus_read_byte_data(the_menelaus->client, reg);
if (val < 0)
pr_err(DRIVER_NAME ": read error");
return val;
}
static int menelaus_enable_irq(int irq)
{
if (irq > 7) {
irq -= 8;
the_menelaus->mask2 &= ~(1 << irq);
return menelaus_write_reg(MENELAUS_INT_MASK2,
the_menelaus->mask2);
} else {
the_menelaus->mask1 &= ~(1 << irq);
return menelaus_write_reg(MENELAUS_INT_MASK1,
the_menelaus->mask1);
}
}
static int menelaus_disable_irq(int irq)
{
if (irq > 7) {
irq -= 8;
the_menelaus->mask2 |= (1 << irq);
return menelaus_write_reg(MENELAUS_INT_MASK2,
the_menelaus->mask2);
} else {
the_menelaus->mask1 |= (1 << irq);
return menelaus_write_reg(MENELAUS_INT_MASK1,
the_menelaus->mask1);
}
}
static int menelaus_ack_irq(int irq)
{
if (irq > 7)
return menelaus_write_reg(MENELAUS_INT_ACK2, 1 << (irq - 8));
else
return menelaus_write_reg(MENELAUS_INT_ACK1, 1 << irq);
}
/* Adds a handler for an interrupt. Does not run in interrupt context */
static int menelaus_add_irq_work(int irq,
void (*handler)(struct menelaus_chip *))
{
int ret = 0;
mutex_lock(&the_menelaus->lock);
the_menelaus->handlers[irq] = handler;
ret = menelaus_enable_irq(irq);
mutex_unlock(&the_menelaus->lock);
return ret;
}
/* Removes handler for an interrupt */
static int menelaus_remove_irq_work(int irq)
{
int ret = 0;
mutex_lock(&the_menelaus->lock);
ret = menelaus_disable_irq(irq);
the_menelaus->handlers[irq] = NULL;
mutex_unlock(&the_menelaus->lock);
return ret;
}
/*
* Gets scheduled when a card detect interrupt happens. Note that in some cases
* this line is wired to card cover switch rather than the card detect switch
* in each slot. In this case the cards are not seen by menelaus.
* FIXME: Add handling for D1 too
*/
static void menelaus_mmc_cd_work(struct menelaus_chip *menelaus_hw)
{
int reg;
unsigned char card_mask = 0;
reg = menelaus_read_reg(MENELAUS_MCT_PIN_ST);
if (reg < 0)
return;
if (!(reg & 0x1))
card_mask |= (1 << 0);
if (!(reg & 0x2))
card_mask |= (1 << 1);
if (menelaus_hw->mmc_callback)
menelaus_hw->mmc_callback(menelaus_hw->mmc_callback_data,
card_mask);
}
/*
* Toggles the MMC slots between open-drain and push-pull mode.
*/
int menelaus_set_mmc_opendrain(int slot, int enable)
{
int ret, val;
if (slot != 1 && slot != 2)
return -EINVAL;
mutex_lock(&the_menelaus->lock);
ret = menelaus_read_reg(MENELAUS_MCT_CTRL1);
if (ret < 0) {
mutex_unlock(&the_menelaus->lock);
return ret;
}
val = ret;
if (slot == 1) {
if (enable)
val |= 1 << 2;
else
val &= ~(1 << 2);
} else {
if (enable)
val |= 1 << 3;
else
val &= ~(1 << 3);
}
ret = menelaus_write_reg(MENELAUS_MCT_CTRL1, val);
mutex_unlock(&the_menelaus->lock);
return ret;
}
EXPORT_SYMBOL(menelaus_set_mmc_opendrain);
int menelaus_set_slot_sel(int enable)
{
int ret;
mutex_lock(&the_menelaus->lock);
ret = menelaus_read_reg(MENELAUS_GPIO_CTRL);
if (ret < 0)
goto out;
ret |= 0x02;
if (enable)
ret |= 1 << 5;
else
ret &= ~(1 << 5);
ret = menelaus_write_reg(MENELAUS_GPIO_CTRL, ret);
out:
mutex_unlock(&the_menelaus->lock);
return ret;
}
EXPORT_SYMBOL(menelaus_set_slot_sel);
int menelaus_set_mmc_slot(int slot, int enable, int power, int cd_en)
{
int ret, val;
if (slot != 1 && slot != 2)
return -EINVAL;
if (power >= 3)
return -EINVAL;
mutex_lock(&the_menelaus->lock);
ret = menelaus_read_reg(MENELAUS_MCT_CTRL2);
if (ret < 0)
goto out;
val = ret;
if (slot == 1) {
if (cd_en)
val |= (1 << 4) | (1 << 6);
else
val &= ~((1 << 4) | (1 << 6));
} else {
if (cd_en)
val |= (1 << 5) | (1 << 7);
else
val &= ~((1 << 5) | (1 << 7));
}
ret = menelaus_write_reg(MENELAUS_MCT_CTRL2, val);
if (ret < 0)
goto out;
ret = menelaus_read_reg(MENELAUS_MCT_CTRL3);
if (ret < 0)
goto out;
val = ret;
if (slot == 1) {
if (enable)
val |= 1 << 0;
else
val &= ~(1 << 0);
} else {
int b;
if (enable)
ret |= 1 << 1;
else
ret &= ~(1 << 1);
b = menelaus_read_reg(MENELAUS_MCT_CTRL2);
b &= ~0x03;
b |= power;
ret = menelaus_write_reg(MENELAUS_MCT_CTRL2, b);
if (ret < 0)
goto out;
}
/* Disable autonomous shutdown */
val &= ~(0x03 << 2);
ret = menelaus_write_reg(MENELAUS_MCT_CTRL3, val);
out:
mutex_unlock(&the_menelaus->lock);
return ret;
}
EXPORT_SYMBOL(menelaus_set_mmc_slot);
int menelaus_register_mmc_callback(void (*callback)(void *data, u8 card_mask),
void *data)
{
int ret = 0;
the_menelaus->mmc_callback_data = data;
the_menelaus->mmc_callback = callback;
ret = menelaus_add_irq_work(MENELAUS_MMC_S1CD_IRQ,
menelaus_mmc_cd_work);
if (ret < 0)
return ret;
ret = menelaus_add_irq_work(MENELAUS_MMC_S2CD_IRQ,
menelaus_mmc_cd_work);
if (ret < 0)
return ret;
ret = menelaus_add_irq_work(MENELAUS_MMC_S1D1_IRQ,
menelaus_mmc_cd_work);
if (ret < 0)
return ret;
ret = menelaus_add_irq_work(MENELAUS_MMC_S2D1_IRQ,
menelaus_mmc_cd_work);
return ret;
}
EXPORT_SYMBOL(menelaus_register_mmc_callback);
void menelaus_unregister_mmc_callback(void)
{
menelaus_remove_irq_work(MENELAUS_MMC_S1CD_IRQ);
menelaus_remove_irq_work(MENELAUS_MMC_S2CD_IRQ);
menelaus_remove_irq_work(MENELAUS_MMC_S1D1_IRQ);
menelaus_remove_irq_work(MENELAUS_MMC_S2D1_IRQ);
the_menelaus->mmc_callback = NULL;
the_menelaus->mmc_callback_data = 0;
}
EXPORT_SYMBOL(menelaus_unregister_mmc_callback);
struct menelaus_vtg {
const char *name;
u8 vtg_reg;
u8 vtg_shift;
u8 vtg_bits;
u8 mode_reg;
};
struct menelaus_vtg_value {
u16 vtg;
u16 val;
};
static int menelaus_set_voltage(const struct menelaus_vtg *vtg, int mV,
int vtg_val, int mode)
{
int val, ret;
struct i2c_client *c = the_menelaus->client;
mutex_lock(&the_menelaus->lock);
if (vtg == 0)
goto set_voltage;
ret = menelaus_read_reg(vtg->vtg_reg);
if (ret < 0)
goto out;
val = ret & ~(((1 << vtg->vtg_bits) - 1) << vtg->vtg_shift);
val |= vtg_val << vtg->vtg_shift;
dev_dbg(&c->dev, "Setting voltage '%s'"
"to %d mV (reg 0x%02x, val 0x%02x)\n",
vtg->name, mV, vtg->vtg_reg, val);
ret = menelaus_write_reg(vtg->vtg_reg, val);
if (ret < 0)
goto out;
set_voltage:
ret = menelaus_write_reg(vtg->mode_reg, mode);
out:
mutex_unlock(&the_menelaus->lock);
if (ret == 0) {
/* Wait for voltage to stabilize */
msleep(1);
}
return ret;
}
static int menelaus_get_vtg_value(int vtg, const struct menelaus_vtg_value *tbl,
int n)
{
int i;
for (i = 0; i < n; i++, tbl++)
if (tbl->vtg == vtg)
return tbl->val;
return -EINVAL;
}
/*
* Vcore can be programmed in two ways:
* SW-controlled: Required voltage is programmed into VCORE_CTRL1
* HW-controlled: Required range (roof-floor) is programmed into VCORE_CTRL3
* and VCORE_CTRL4
*
* Call correct 'set' function accordingly
*/
static const struct menelaus_vtg_value vcore_values[] = {
{ 1000, 0 },
{ 1025, 1 },
{ 1050, 2 },
{ 1075, 3 },
{ 1100, 4 },
{ 1125, 5 },
{ 1150, 6 },
{ 1175, 7 },
{ 1200, 8 },
{ 1225, 9 },
{ 1250, 10 },
{ 1275, 11 },
{ 1300, 12 },
{ 1325, 13 },
{ 1350, 14 },
{ 1375, 15 },
{ 1400, 16 },
{ 1425, 17 },
{ 1450, 18 },
};
int menelaus_set_vcore_sw(unsigned int mV)
{
int val, ret;
struct i2c_client *c = the_menelaus->client;
val = menelaus_get_vtg_value(mV, vcore_values,
ARRAY_SIZE(vcore_values));
if (val < 0)
return -EINVAL;
dev_dbg(&c->dev, "Setting VCORE to %d mV (val 0x%02x)\n", mV, val);
/* Set SW mode and the voltage in one go. */
mutex_lock(&the_menelaus->lock);
ret = menelaus_write_reg(MENELAUS_VCORE_CTRL1, val);
if (ret == 0)
the_menelaus->vcore_hw_mode = 0;
mutex_unlock(&the_menelaus->lock);
msleep(1);
return ret;
}
int menelaus_set_vcore_hw(unsigned int roof_mV, unsigned int floor_mV)
{
int fval, rval, val, ret;
struct i2c_client *c = the_menelaus->client;
rval = menelaus_get_vtg_value(roof_mV, vcore_values,
ARRAY_SIZE(vcore_values));
if (rval < 0)
return -EINVAL;
fval = menelaus_get_vtg_value(floor_mV, vcore_values,
ARRAY_SIZE(vcore_values));
if (fval < 0)
return -EINVAL;
dev_dbg(&c->dev, "Setting VCORE FLOOR to %d mV and ROOF to %d mV\n",
floor_mV, roof_mV);
mutex_lock(&the_menelaus->lock);
ret = menelaus_write_reg(MENELAUS_VCORE_CTRL3, fval);
if (ret < 0)
goto out;
ret = menelaus_write_reg(MENELAUS_VCORE_CTRL4, rval);
if (ret < 0)
goto out;
if (!the_menelaus->vcore_hw_mode) {
val = menelaus_read_reg(MENELAUS_VCORE_CTRL1);
/* HW mode, turn OFF byte comparator */
val |= ((1 << 7) | (1 << 5));
ret = menelaus_write_reg(MENELAUS_VCORE_CTRL1, val);
the_menelaus->vcore_hw_mode = 1;
}
msleep(1);
out:
mutex_unlock(&the_menelaus->lock);
return ret;
}
static const struct menelaus_vtg vmem_vtg = {
.name = "VMEM",
.vtg_reg = MENELAUS_LDO_CTRL1,
.vtg_shift = 0,
.vtg_bits = 2,
.mode_reg = MENELAUS_LDO_CTRL3,
};
static const struct menelaus_vtg_value vmem_values[] = {
{ 1500, 0 },
{ 1800, 1 },
{ 1900, 2 },
{ 2500, 3 },
};
int menelaus_set_vmem(unsigned int mV)
{
int val;
if (mV == 0)
return menelaus_set_voltage(&vmem_vtg, 0, 0, 0);
val = menelaus_get_vtg_value(mV, vmem_values, ARRAY_SIZE(vmem_values));
if (val < 0)
return -EINVAL;
return menelaus_set_voltage(&vmem_vtg, mV, val, 0x02);
}
EXPORT_SYMBOL(menelaus_set_vmem);
static const struct menelaus_vtg vio_vtg = {
.name = "VIO",
.vtg_reg = MENELAUS_LDO_CTRL1,
.vtg_shift = 2,
.vtg_bits = 2,
.mode_reg = MENELAUS_LDO_CTRL4,
};
static const struct menelaus_vtg_value vio_values[] = {
{ 1500, 0 },
{ 1800, 1 },
{ 2500, 2 },
{ 2800, 3 },
};
int menelaus_set_vio(unsigned int mV)
{
int val;
if (mV == 0)
return menelaus_set_voltage(&vio_vtg, 0, 0, 0);
val = menelaus_get_vtg_value(mV, vio_values, ARRAY_SIZE(vio_values));
if (val < 0)
return -EINVAL;
return menelaus_set_voltage(&vio_vtg, mV, val, 0x02);
}
EXPORT_SYMBOL(menelaus_set_vio);
static const struct menelaus_vtg_value vdcdc_values[] = {
{ 1500, 0 },
{ 1800, 1 },
{ 2000, 2 },
{ 2200, 3 },
{ 2400, 4 },
{ 2800, 5 },
{ 3000, 6 },
{ 3300, 7 },
};
static const struct menelaus_vtg vdcdc2_vtg = {
.name = "VDCDC2",
.vtg_reg = MENELAUS_DCDC_CTRL1,
.vtg_shift = 0,
.vtg_bits = 3,
.mode_reg = MENELAUS_DCDC_CTRL2,
};
static const struct menelaus_vtg vdcdc3_vtg = {
.name = "VDCDC3",
.vtg_reg = MENELAUS_DCDC_CTRL1,
.vtg_shift = 3,
.vtg_bits = 3,
.mode_reg = MENELAUS_DCDC_CTRL3,
};
int menelaus_set_vdcdc(int dcdc, unsigned int mV)
{
const struct menelaus_vtg *vtg;
int val;
if (dcdc != 2 && dcdc != 3)
return -EINVAL;
if (dcdc == 2)
vtg = &vdcdc2_vtg;
else
vtg = &vdcdc3_vtg;
if (mV == 0)
return menelaus_set_voltage(vtg, 0, 0, 0);
val = menelaus_get_vtg_value(mV, vdcdc_values,
ARRAY_SIZE(vdcdc_values));
if (val < 0)
return -EINVAL;
return menelaus_set_voltage(vtg, mV, val, 0x03);
}
static const struct menelaus_vtg_value vmmc_values[] = {
{ 1850, 0 },
{ 2800, 1 },
{ 3000, 2 },
{ 3100, 3 },
};
static const struct menelaus_vtg vmmc_vtg = {
.name = "VMMC",
.vtg_reg = MENELAUS_LDO_CTRL1,
.vtg_shift = 6,
.vtg_bits = 2,
.mode_reg = MENELAUS_LDO_CTRL7,
};
int menelaus_set_vmmc(unsigned int mV)
{
int val;
if (mV == 0)
return menelaus_set_voltage(&vmmc_vtg, 0, 0, 0);
val = menelaus_get_vtg_value(mV, vmmc_values, ARRAY_SIZE(vmmc_values));
if (val < 0)
return -EINVAL;
return menelaus_set_voltage(&vmmc_vtg, mV, val, 0x02);
}
EXPORT_SYMBOL(menelaus_set_vmmc);
static const struct menelaus_vtg_value vaux_values[] = {
{ 1500, 0 },
{ 1800, 1 },
{ 2500, 2 },
{ 2800, 3 },
};
static const struct menelaus_vtg vaux_vtg = {
.name = "VAUX",
.vtg_reg = MENELAUS_LDO_CTRL1,
.vtg_shift = 4,
.vtg_bits = 2,
.mode_reg = MENELAUS_LDO_CTRL6,
};
int menelaus_set_vaux(unsigned int mV)
{
int val;
if (mV == 0)
return menelaus_set_voltage(&vaux_vtg, 0, 0, 0);
val = menelaus_get_vtg_value(mV, vaux_values, ARRAY_SIZE(vaux_values));
if (val < 0)
return -EINVAL;
return menelaus_set_voltage(&vaux_vtg, mV, val, 0x02);
}
EXPORT_SYMBOL(menelaus_set_vaux);
int menelaus_get_slot_pin_states(void)
{
return menelaus_read_reg(MENELAUS_MCT_PIN_ST);
}
EXPORT_SYMBOL(menelaus_get_slot_pin_states);
int menelaus_set_regulator_sleep(int enable, u32 val)
{
int t, ret;
struct i2c_client *c = the_menelaus->client;
mutex_lock(&the_menelaus->lock);
ret = menelaus_write_reg(MENELAUS_SLEEP_CTRL2, val);
if (ret < 0)
goto out;
dev_dbg(&c->dev, "regulator sleep configuration: %02x\n", val);
ret = menelaus_read_reg(MENELAUS_GPIO_CTRL);
if (ret < 0)
goto out;
t = ((1 << 6) | 0x04);
if (enable)
ret |= t;
else
ret &= ~t;
ret = menelaus_write_reg(MENELAUS_GPIO_CTRL, ret);
out:
mutex_unlock(&the_menelaus->lock);
return ret;
}
/*-----------------------------------------------------------------------*/
/* Handles Menelaus interrupts. Does not run in interrupt context */
static void menelaus_work(struct work_struct *_menelaus)
{
struct menelaus_chip *menelaus =
container_of(_menelaus, struct menelaus_chip, work);
void (*handler)(struct menelaus_chip *menelaus);
while (1) {
unsigned isr;
isr = (menelaus_read_reg(MENELAUS_INT_STATUS2)
& ~menelaus->mask2) << 8;
isr |= menelaus_read_reg(MENELAUS_INT_STATUS1)
& ~menelaus->mask1;
if (!isr)
break;
while (isr) {
int irq = fls(isr) - 1;
isr &= ~(1 << irq);
mutex_lock(&menelaus->lock);
menelaus_disable_irq(irq);
menelaus_ack_irq(irq);
handler = menelaus->handlers[irq];
if (handler)
handler(menelaus);
menelaus_enable_irq(irq);
mutex_unlock(&menelaus->lock);
}
}
enable_irq(menelaus->client->irq);
}
/*
* We cannot use I2C in interrupt context, so we just schedule work.
*/
static irqreturn_t menelaus_irq(int irq, void *_menelaus)
{
struct menelaus_chip *menelaus = _menelaus;
disable_irq_nosync(irq);
(void)schedule_work(&menelaus->work);
return IRQ_HANDLED;
}
/*-----------------------------------------------------------------------*/
/*
* The RTC needs to be set once, then it runs on backup battery power.
* It supports alarms, including system wake alarms (from some modes);
* and 1/second IRQs if requested.
*/
#ifdef CONFIG_RTC_DRV_TWL92330
#define RTC_CTRL_RTC_EN (1 << 0)
#define RTC_CTRL_AL_EN (1 << 1)
#define RTC_CTRL_MODE12 (1 << 2)
#define RTC_CTRL_EVERY_MASK (3 << 3)
#define RTC_CTRL_EVERY_SEC (0 << 3)
#define RTC_CTRL_EVERY_MIN (1 << 3)
#define RTC_CTRL_EVERY_HR (2 << 3)
#define RTC_CTRL_EVERY_DAY (3 << 3)
#define RTC_UPDATE_EVERY 0x08
#define RTC_HR_PM (1 << 7)
static void menelaus_to_time(char *regs, struct rtc_time *t)
{
t->tm_sec = bcd2bin(regs[0]);
t->tm_min = bcd2bin(regs[1]);
if (the_menelaus->rtc_control & RTC_CTRL_MODE12) {
t->tm_hour = bcd2bin(regs[2] & 0x1f) - 1;
if (regs[2] & RTC_HR_PM)
t->tm_hour += 12;
} else
t->tm_hour = bcd2bin(regs[2] & 0x3f);
t->tm_mday = bcd2bin(regs[3]);
t->tm_mon = bcd2bin(regs[4]) - 1;
t->tm_year = bcd2bin(regs[5]) + 100;
}
static int time_to_menelaus(struct rtc_time *t, int regnum)
{
int hour, status;
status = menelaus_write_reg(regnum++, bin2bcd(t->tm_sec));
if (status < 0)
goto fail;
status = menelaus_write_reg(regnum++, bin2bcd(t->tm_min));
if (status < 0)
goto fail;
if (the_menelaus->rtc_control & RTC_CTRL_MODE12) {
hour = t->tm_hour + 1;
if (hour > 12)
hour = RTC_HR_PM | bin2bcd(hour - 12);
else
hour = bin2bcd(hour);
} else
hour = bin2bcd(t->tm_hour);
status = menelaus_write_reg(regnum++, hour);
if (status < 0)
goto fail;
status = menelaus_write_reg(regnum++, bin2bcd(t->tm_mday));
if (status < 0)
goto fail;
status = menelaus_write_reg(regnum++, bin2bcd(t->tm_mon + 1));
if (status < 0)
goto fail;
status = menelaus_write_reg(regnum++, bin2bcd(t->tm_year - 100));
if (status < 0)
goto fail;
return 0;
fail:
dev_err(&the_menelaus->client->dev, "rtc write reg %02x, err %d\n",
--regnum, status);
return status;
}
static int menelaus_read_time(struct device *dev, struct rtc_time *t)
{
struct i2c_msg msg[2];
char regs[7];
int status;
/* block read date and time registers */
regs[0] = MENELAUS_RTC_SEC;
msg[0].addr = MENELAUS_I2C_ADDRESS;
msg[0].flags = 0;
msg[0].len = 1;
msg[0].buf = regs;
msg[1].addr = MENELAUS_I2C_ADDRESS;
msg[1].flags = I2C_M_RD;
msg[1].len = sizeof(regs);
msg[1].buf = regs;
status = i2c_transfer(the_menelaus->client->adapter, msg, 2);
if (status != 2) {
dev_err(dev, "%s error %d\n", "read", status);
return -EIO;
}
menelaus_to_time(regs, t);
t->tm_wday = bcd2bin(regs[6]);
return 0;
}
static int menelaus_set_time(struct device *dev, struct rtc_time *t)
{
int status;
/* write date and time registers */
status = time_to_menelaus(t, MENELAUS_RTC_SEC);
if (status < 0)
return status;
status = menelaus_write_reg(MENELAUS_RTC_WKDAY, bin2bcd(t->tm_wday));
if (status < 0) {
dev_err(&the_menelaus->client->dev, "rtc write reg %02x "
"err %d\n", MENELAUS_RTC_WKDAY, status);
return status;
}
/* now commit the write */
status = menelaus_write_reg(MENELAUS_RTC_UPDATE, RTC_UPDATE_EVERY);
if (status < 0)
dev_err(&the_menelaus->client->dev, "rtc commit time, err %d\n",
status);
return 0;
}
static int menelaus_read_alarm(struct device *dev, struct rtc_wkalrm *w)
{
struct i2c_msg msg[2];
char regs[6];
int status;
/* block read alarm registers */
regs[0] = MENELAUS_RTC_AL_SEC;
msg[0].addr = MENELAUS_I2C_ADDRESS;
msg[0].flags = 0;
msg[0].len = 1;
msg[0].buf = regs;
msg[1].addr = MENELAUS_I2C_ADDRESS;
msg[1].flags = I2C_M_RD;
msg[1].len = sizeof(regs);
msg[1].buf = regs;
status = i2c_transfer(the_menelaus->client->adapter, msg, 2);
if (status != 2) {
dev_err(dev, "%s error %d\n", "alarm read", status);
return -EIO;
}
menelaus_to_time(regs, &w->time);
w->enabled = !!(the_menelaus->rtc_control & RTC_CTRL_AL_EN);
/* NOTE we *could* check if actually pending... */
w->pending = 0;
return 0;
}
static int menelaus_set_alarm(struct device *dev, struct rtc_wkalrm *w)
{
int status;
if (the_menelaus->client->irq <= 0 && w->enabled)
return -ENODEV;
/* clear previous alarm enable */
if (the_menelaus->rtc_control & RTC_CTRL_AL_EN) {
the_menelaus->rtc_control &= ~RTC_CTRL_AL_EN;
status = menelaus_write_reg(MENELAUS_RTC_CTRL,
the_menelaus->rtc_control);
if (status < 0)
return status;
}
/* write alarm registers */
status = time_to_menelaus(&w->time, MENELAUS_RTC_AL_SEC);
if (status < 0)
return status;
/* enable alarm if requested */
if (w->enabled) {
the_menelaus->rtc_control |= RTC_CTRL_AL_EN;
status = menelaus_write_reg(MENELAUS_RTC_CTRL,
the_menelaus->rtc_control);
}
return status;
}
#ifdef CONFIG_RTC_INTF_DEV
static void menelaus_rtc_update_work(struct menelaus_chip *m)
{
/* report 1/sec update */
local_irq_disable();
rtc_update_irq(m->rtc, 1, RTC_IRQF | RTC_UF);
local_irq_enable();
}
static int menelaus_ioctl(struct device *dev, unsigned cmd, unsigned long arg)
{
int status;
if (the_menelaus->client->irq <= 0)
return -ENOIOCTLCMD;
switch (cmd) {
/* alarm IRQ */
case RTC_AIE_ON:
if (the_menelaus->rtc_control & RTC_CTRL_AL_EN)
return 0;
the_menelaus->rtc_control |= RTC_CTRL_AL_EN;
break;
case RTC_AIE_OFF:
if (!(the_menelaus->rtc_control & RTC_CTRL_AL_EN))
return 0;
the_menelaus->rtc_control &= ~RTC_CTRL_AL_EN;
break;
/* 1/second "update" IRQ */
case RTC_UIE_ON:
if (the_menelaus->uie)
return 0;
status = menelaus_remove_irq_work(MENELAUS_RTCTMR_IRQ);
status = menelaus_add_irq_work(MENELAUS_RTCTMR_IRQ,
menelaus_rtc_update_work);
if (status == 0)
the_menelaus->uie = 1;
return status;
case RTC_UIE_OFF:
if (!the_menelaus->uie)
return 0;
status = menelaus_remove_irq_work(MENELAUS_RTCTMR_IRQ);
if (status == 0)
the_menelaus->uie = 0;
return status;
default:
return -ENOIOCTLCMD;
}
return menelaus_write_reg(MENELAUS_RTC_CTRL, the_menelaus->rtc_control);
}
#else
#define menelaus_ioctl NULL
#endif
/* REVISIT no compensation register support ... */
static const struct rtc_class_ops menelaus_rtc_ops = {
.ioctl = menelaus_ioctl,
.read_time = menelaus_read_time,
.set_time = menelaus_set_time,
.read_alarm = menelaus_read_alarm,
.set_alarm = menelaus_set_alarm,
};
static void menelaus_rtc_alarm_work(struct menelaus_chip *m)
{
/* report alarm */
local_irq_disable();
rtc_update_irq(m->rtc, 1, RTC_IRQF | RTC_AF);
local_irq_enable();
/* then disable it; alarms are oneshot */
the_menelaus->rtc_control &= ~RTC_CTRL_AL_EN;
menelaus_write_reg(MENELAUS_RTC_CTRL, the_menelaus->rtc_control);
}
static inline void menelaus_rtc_init(struct menelaus_chip *m)
{
int alarm = (m->client->irq > 0);
/* assume 32KDETEN pin is pulled high */
if (!(menelaus_read_reg(MENELAUS_OSC_CTRL) & 0x80)) {
dev_dbg(&m->client->dev, "no 32k oscillator\n");
return;
}
/* support RTC alarm; it can issue wakeups */
if (alarm) {
if (menelaus_add_irq_work(MENELAUS_RTCALM_IRQ,
menelaus_rtc_alarm_work) < 0) {
dev_err(&m->client->dev, "can't handle RTC alarm\n");
return;
}
device_init_wakeup(&m->client->dev, 1);
}
/* be sure RTC is enabled; allow 1/sec irqs; leave 12hr mode alone */
m->rtc_control = menelaus_read_reg(MENELAUS_RTC_CTRL);
if (!(m->rtc_control & RTC_CTRL_RTC_EN)
|| (m->rtc_control & RTC_CTRL_AL_EN)
|| (m->rtc_control & RTC_CTRL_EVERY_MASK)) {
if (!(m->rtc_control & RTC_CTRL_RTC_EN)) {
dev_warn(&m->client->dev, "rtc clock needs setting\n");
m->rtc_control |= RTC_CTRL_RTC_EN;
}
m->rtc_control &= ~RTC_CTRL_EVERY_MASK;
m->rtc_control &= ~RTC_CTRL_AL_EN;
menelaus_write_reg(MENELAUS_RTC_CTRL, m->rtc_control);
}
m->rtc = rtc_device_register(DRIVER_NAME,
&m->client->dev,
&menelaus_rtc_ops, THIS_MODULE);
if (IS_ERR(m->rtc)) {
if (alarm) {
menelaus_remove_irq_work(MENELAUS_RTCALM_IRQ);
device_init_wakeup(&m->client->dev, 0);
}
dev_err(&m->client->dev, "can't register RTC: %d\n",
(int) PTR_ERR(m->rtc));
the_menelaus->rtc = NULL;
}
}
#else
static inline void menelaus_rtc_init(struct menelaus_chip *m)
{
/* nothing */
}
#endif
/*-----------------------------------------------------------------------*/
static struct i2c_driver menelaus_i2c_driver;
static int menelaus_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct menelaus_chip *menelaus;
int rev = 0, val;
int err = 0;
struct menelaus_platform_data *menelaus_pdata =
client->dev.platform_data;
if (the_menelaus) {
dev_dbg(&client->dev, "only one %s for now\n",
DRIVER_NAME);
return -ENODEV;
}
menelaus = kzalloc(sizeof *menelaus, GFP_KERNEL);
if (!menelaus)
return -ENOMEM;
i2c_set_clientdata(client, menelaus);
the_menelaus = menelaus;
menelaus->client = client;
/* If a true probe check the device */
rev = menelaus_read_reg(MENELAUS_REV);
if (rev < 0) {
pr_err(DRIVER_NAME ": device not found");
err = -ENODEV;
goto fail1;
}
/* Ack and disable all Menelaus interrupts */
menelaus_write_reg(MENELAUS_INT_ACK1, 0xff);
menelaus_write_reg(MENELAUS_INT_ACK2, 0xff);
menelaus_write_reg(MENELAUS_INT_MASK1, 0xff);
menelaus_write_reg(MENELAUS_INT_MASK2, 0xff);
menelaus->mask1 = 0xff;
menelaus->mask2 = 0xff;
/* Set output buffer strengths */
menelaus_write_reg(MENELAUS_MCT_CTRL1, 0x73);
if (client->irq > 0) {
err = request_irq(client->irq, menelaus_irq, IRQF_DISABLED,
DRIVER_NAME, menelaus);
if (err) {
dev_dbg(&client->dev, "can't get IRQ %d, err %d\n",
client->irq, err);
goto fail1;
}
}
mutex_init(&menelaus->lock);
INIT_WORK(&menelaus->work, menelaus_work);
pr_info("Menelaus rev %d.%d\n", rev >> 4, rev & 0x0f);
val = menelaus_read_reg(MENELAUS_VCORE_CTRL1);
if (val < 0)
goto fail2;
if (val & (1 << 7))
menelaus->vcore_hw_mode = 1;
else
menelaus->vcore_hw_mode = 0;
if (menelaus_pdata != NULL && menelaus_pdata->late_init != NULL) {
err = menelaus_pdata->late_init(&client->dev);
if (err < 0)
goto fail2;
}
menelaus_rtc_init(menelaus);
return 0;
fail2:
free_irq(client->irq, menelaus);
flush_scheduled_work();
fail1:
kfree(menelaus);
return err;
}
static int __exit menelaus_remove(struct i2c_client *client)
{
struct menelaus_chip *menelaus = i2c_get_clientdata(client);
free_irq(client->irq, menelaus);
kfree(menelaus);
i2c_set_clientdata(client, NULL);
the_menelaus = NULL;
return 0;
}
static const struct i2c_device_id menelaus_id[] = {
{ "menelaus", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, menelaus_id);
static struct i2c_driver menelaus_i2c_driver = {
.driver = {
.name = DRIVER_NAME,
},
.probe = menelaus_probe,
.remove = __exit_p(menelaus_remove),
.id_table = menelaus_id,
};
static int __init menelaus_init(void)
{
int res;
res = i2c_add_driver(&menelaus_i2c_driver);
if (res < 0) {
pr_err(DRIVER_NAME ": driver registration failed\n");
return res;
}
return 0;
}
static void __exit menelaus_exit(void)
{
i2c_del_driver(&menelaus_i2c_driver);
/* FIXME: Shutdown menelaus parts that can be shut down */
}
MODULE_AUTHOR("Texas Instruments, Inc. (and others)");
MODULE_DESCRIPTION("I2C interface for Menelaus.");
MODULE_LICENSE("GPL");
module_init(menelaus_init);
module_exit(menelaus_exit);
| gpl-2.0 |
WildfireDEV/android_kernel_samsung_s6 | drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c | 936 | 56383 | /*
* Copyright (c) 2010 Broadcom Corporation
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
* SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
* OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/types.h>
#include <linux/module.h>
#include <linux/if_ether.h>
#include <linux/spinlock.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/err.h>
#include <linux/jiffies.h>
#include <uapi/linux/nl80211.h>
#include <net/cfg80211.h>
#include <brcmu_utils.h>
#include <brcmu_wifi.h>
#include "dhd.h"
#include "dhd_proto.h"
#include "dhd_dbg.h"
#include "dhd_bus.h"
#include "fwil.h"
#include "fwil_types.h"
#include "fweh.h"
#include "fwsignal.h"
#include "p2p.h"
#include "wl_cfg80211.h"
/**
* DOC: Firmware Signalling
*
* Firmware can send signals to host and vice versa, which are passed in the
* data packets using TLV based header. This signalling layer is on top of the
* BDC bus protocol layer.
*/
/*
* single definition for firmware-driver flow control tlv's.
*
* each tlv is specified by BRCMF_FWS_TLV_DEF(name, ID, length).
* A length value 0 indicates variable length tlv.
*/
#define BRCMF_FWS_TLV_DEFLIST \
BRCMF_FWS_TLV_DEF(MAC_OPEN, 1, 1) \
BRCMF_FWS_TLV_DEF(MAC_CLOSE, 2, 1) \
BRCMF_FWS_TLV_DEF(MAC_REQUEST_CREDIT, 3, 2) \
BRCMF_FWS_TLV_DEF(TXSTATUS, 4, 4) \
BRCMF_FWS_TLV_DEF(PKTTAG, 5, 4) \
BRCMF_FWS_TLV_DEF(MACDESC_ADD, 6, 8) \
BRCMF_FWS_TLV_DEF(MACDESC_DEL, 7, 8) \
BRCMF_FWS_TLV_DEF(RSSI, 8, 1) \
BRCMF_FWS_TLV_DEF(INTERFACE_OPEN, 9, 1) \
BRCMF_FWS_TLV_DEF(INTERFACE_CLOSE, 10, 1) \
BRCMF_FWS_TLV_DEF(FIFO_CREDITBACK, 11, 6) \
BRCMF_FWS_TLV_DEF(PENDING_TRAFFIC_BMP, 12, 2) \
BRCMF_FWS_TLV_DEF(MAC_REQUEST_PACKET, 13, 3) \
BRCMF_FWS_TLV_DEF(HOST_REORDER_RXPKTS, 14, 10) \
BRCMF_FWS_TLV_DEF(TRANS_ID, 18, 6) \
BRCMF_FWS_TLV_DEF(COMP_TXSTATUS, 19, 1) \
BRCMF_FWS_TLV_DEF(FILLER, 255, 0)
/*
* enum brcmf_fws_tlv_type - definition of tlv identifiers.
*/
#define BRCMF_FWS_TLV_DEF(name, id, len) \
BRCMF_FWS_TYPE_ ## name = id,
enum brcmf_fws_tlv_type {
BRCMF_FWS_TLV_DEFLIST
BRCMF_FWS_TYPE_INVALID
};
#undef BRCMF_FWS_TLV_DEF
/*
* enum brcmf_fws_tlv_len - definition of tlv lengths.
*/
#define BRCMF_FWS_TLV_DEF(name, id, len) \
BRCMF_FWS_TYPE_ ## name ## _LEN = (len),
enum brcmf_fws_tlv_len {
BRCMF_FWS_TLV_DEFLIST
};
#undef BRCMF_FWS_TLV_DEF
#ifdef DEBUG
/*
* brcmf_fws_tlv_names - array of tlv names.
*/
#define BRCMF_FWS_TLV_DEF(name, id, len) \
{ id, #name },
static struct {
enum brcmf_fws_tlv_type id;
const char *name;
} brcmf_fws_tlv_names[] = {
BRCMF_FWS_TLV_DEFLIST
};
#undef BRCMF_FWS_TLV_DEF
static const char *brcmf_fws_get_tlv_name(enum brcmf_fws_tlv_type id)
{
int i;
for (i = 0; i < ARRAY_SIZE(brcmf_fws_tlv_names); i++)
if (brcmf_fws_tlv_names[i].id == id)
return brcmf_fws_tlv_names[i].name;
return "INVALID";
}
#else
static const char *brcmf_fws_get_tlv_name(enum brcmf_fws_tlv_type id)
{
return "NODEBUG";
}
#endif /* DEBUG */
/*
* flags used to enable tlv signalling from firmware.
*/
#define BRCMF_FWS_FLAGS_RSSI_SIGNALS 0x0001
#define BRCMF_FWS_FLAGS_XONXOFF_SIGNALS 0x0002
#define BRCMF_FWS_FLAGS_CREDIT_STATUS_SIGNALS 0x0004
#define BRCMF_FWS_FLAGS_HOST_PROPTXSTATUS_ACTIVE 0x0008
#define BRCMF_FWS_FLAGS_PSQ_GENERATIONFSM_ENABLE 0x0010
#define BRCMF_FWS_FLAGS_PSQ_ZERO_BUFFER_ENABLE 0x0020
#define BRCMF_FWS_FLAGS_HOST_RXREORDER_ACTIVE 0x0040
#define BRCMF_FWS_MAC_DESC_TABLE_SIZE 32
#define BRCMF_FWS_MAC_DESC_ID_INVALID 0xff
#define BRCMF_FWS_HOSTIF_FLOWSTATE_OFF 0
#define BRCMF_FWS_HOSTIF_FLOWSTATE_ON 1
#define BRCMF_FWS_FLOWCONTROL_HIWATER 128
#define BRCMF_FWS_FLOWCONTROL_LOWATER 64
#define BRCMF_FWS_PSQ_PREC_COUNT ((NL80211_NUM_ACS + 1) * 2)
#define BRCMF_FWS_PSQ_LEN 256
#define BRCMF_FWS_HTOD_FLAG_PKTFROMHOST 0x01
#define BRCMF_FWS_HTOD_FLAG_PKT_REQUESTED 0x02
#define BRCMF_FWS_RET_OK_NOSCHEDULE 0
#define BRCMF_FWS_RET_OK_SCHEDULE 1
/**
* enum brcmf_fws_skb_state - indicates processing state of skb.
*
* @BRCMF_FWS_SKBSTATE_NEW: sk_buff is newly arrived in the driver.
* @BRCMF_FWS_SKBSTATE_DELAYED: sk_buff had to wait on queue.
* @BRCMF_FWS_SKBSTATE_SUPPRESSED: sk_buff has been suppressed by firmware.
*/
enum brcmf_fws_skb_state {
BRCMF_FWS_SKBSTATE_NEW,
BRCMF_FWS_SKBSTATE_DELAYED,
BRCMF_FWS_SKBSTATE_SUPPRESSED
};
/**
* struct brcmf_skbuff_cb - control buffer associated with skbuff.
*
* @if_flags: holds interface index and packet related flags.
* @htod: host to device packet identifier (used in PKTTAG tlv).
* @state: transmit state of the packet.
* @mac: descriptor related to destination for this packet.
*
* This information is stored in control buffer struct sk_buff::cb, which
* provides 48 bytes of storage so this structure should not exceed that.
*/
struct brcmf_skbuff_cb {
u16 if_flags;
u32 htod;
enum brcmf_fws_skb_state state;
struct brcmf_fws_mac_descriptor *mac;
};
/*
* macro casting skbuff control buffer to struct brcmf_skbuff_cb.
*/
#define brcmf_skbcb(skb) ((struct brcmf_skbuff_cb *)((skb)->cb))
/*
* sk_buff control if flags
*
* b[11] - packet sent upon firmware request.
* b[10] - packet only contains signalling data.
* b[9] - packet is a tx packet.
* b[8] - packet uses FIFO credit (non-pspoll).
* b[7] - interface in AP mode.
* b[6:4] - AC FIFO number.
* b[3:0] - interface index.
*/
#define BRCMF_SKB_IF_FLAGS_REQUESTED_MASK 0x0800
#define BRCMF_SKB_IF_FLAGS_REQUESTED_SHIFT 11
#define BRCMF_SKB_IF_FLAGS_SIGNAL_ONLY_MASK 0x0400
#define BRCMF_SKB_IF_FLAGS_SIGNAL_ONLY_SHIFT 10
#define BRCMF_SKB_IF_FLAGS_TRANSMIT_MASK 0x0200
#define BRCMF_SKB_IF_FLAGS_TRANSMIT_SHIFT 9
#define BRCMF_SKB_IF_FLAGS_CREDITCHECK_MASK 0x0100
#define BRCMF_SKB_IF_FLAGS_CREDITCHECK_SHIFT 8
#define BRCMF_SKB_IF_FLAGS_IF_AP_MASK 0x0080
#define BRCMF_SKB_IF_FLAGS_IF_AP_SHIFT 7
#define BRCMF_SKB_IF_FLAGS_FIFO_MASK 0x0070
#define BRCMF_SKB_IF_FLAGS_FIFO_SHIFT 4
#define BRCMF_SKB_IF_FLAGS_INDEX_MASK 0x000f
#define BRCMF_SKB_IF_FLAGS_INDEX_SHIFT 0
#define brcmf_skb_if_flags_set_field(skb, field, value) \
brcmu_maskset16(&(brcmf_skbcb(skb)->if_flags), \
BRCMF_SKB_IF_FLAGS_ ## field ## _MASK, \
BRCMF_SKB_IF_FLAGS_ ## field ## _SHIFT, (value))
#define brcmf_skb_if_flags_get_field(skb, field) \
brcmu_maskget16(brcmf_skbcb(skb)->if_flags, \
BRCMF_SKB_IF_FLAGS_ ## field ## _MASK, \
BRCMF_SKB_IF_FLAGS_ ## field ## _SHIFT)
/*
* sk_buff control packet identifier
*
* 32-bit packet identifier used in PKTTAG tlv from host to dongle.
*
* - Generated at the host (e.g. dhd)
* - Seen as a generic sequence number by firmware except for the flags field.
*
* Generation : b[31] => generation number for this packet [host->fw]
* OR, current generation number [fw->host]
* Flags : b[30:27] => command, status flags
* FIFO-AC : b[26:24] => AC-FIFO id
* h-slot : b[23:8] => hanger-slot
* freerun : b[7:0] => A free running counter
*/
#define BRCMF_SKB_HTOD_TAG_GENERATION_MASK 0x80000000
#define BRCMF_SKB_HTOD_TAG_GENERATION_SHIFT 31
#define BRCMF_SKB_HTOD_TAG_FLAGS_MASK 0x78000000
#define BRCMF_SKB_HTOD_TAG_FLAGS_SHIFT 27
#define BRCMF_SKB_HTOD_TAG_FIFO_MASK 0x07000000
#define BRCMF_SKB_HTOD_TAG_FIFO_SHIFT 24
#define BRCMF_SKB_HTOD_TAG_HSLOT_MASK 0x00ffff00
#define BRCMF_SKB_HTOD_TAG_HSLOT_SHIFT 8
#define BRCMF_SKB_HTOD_TAG_FREERUN_MASK 0x000000ff
#define BRCMF_SKB_HTOD_TAG_FREERUN_SHIFT 0
#define brcmf_skb_htod_tag_set_field(skb, field, value) \
brcmu_maskset32(&(brcmf_skbcb(skb)->htod), \
BRCMF_SKB_HTOD_TAG_ ## field ## _MASK, \
BRCMF_SKB_HTOD_TAG_ ## field ## _SHIFT, (value))
#define brcmf_skb_htod_tag_get_field(skb, field) \
brcmu_maskget32(brcmf_skbcb(skb)->htod, \
BRCMF_SKB_HTOD_TAG_ ## field ## _MASK, \
BRCMF_SKB_HTOD_TAG_ ## field ## _SHIFT)
#define BRCMF_FWS_TXSTAT_GENERATION_MASK 0x80000000
#define BRCMF_FWS_TXSTAT_GENERATION_SHIFT 31
#define BRCMF_FWS_TXSTAT_FLAGS_MASK 0x78000000
#define BRCMF_FWS_TXSTAT_FLAGS_SHIFT 27
#define BRCMF_FWS_TXSTAT_FIFO_MASK 0x07000000
#define BRCMF_FWS_TXSTAT_FIFO_SHIFT 24
#define BRCMF_FWS_TXSTAT_HSLOT_MASK 0x00FFFF00
#define BRCMF_FWS_TXSTAT_HSLOT_SHIFT 8
#define BRCMF_FWS_TXSTAT_PKTID_MASK 0x00FFFFFF
#define BRCMF_FWS_TXSTAT_PKTID_SHIFT 0
#define brcmf_txstatus_get_field(txs, field) \
brcmu_maskget32(txs, BRCMF_FWS_TXSTAT_ ## field ## _MASK, \
BRCMF_FWS_TXSTAT_ ## field ## _SHIFT)
/* How long to defer borrowing in jiffies */
#define BRCMF_FWS_BORROW_DEFER_PERIOD (HZ / 10)
/**
* enum brcmf_fws_fifo - fifo indices used by dongle firmware.
*
* @BRCMF_FWS_FIFO_AC_BK: fifo for background traffic.
* @BRCMF_FWS_FIFO_AC_BE: fifo for best-effort traffic.
* @BRCMF_FWS_FIFO_AC_VI: fifo for video traffic.
* @BRCMF_FWS_FIFO_AC_VO: fifo for voice traffic.
* @BRCMF_FWS_FIFO_BCMC: fifo for broadcast/multicast (AP only).
* @BRCMF_FWS_FIFO_ATIM: fifo for ATIM (AP only).
* @BRCMF_FWS_FIFO_COUNT: number of fifos.
*/
enum brcmf_fws_fifo {
BRCMF_FWS_FIFO_AC_BK,
BRCMF_FWS_FIFO_AC_BE,
BRCMF_FWS_FIFO_AC_VI,
BRCMF_FWS_FIFO_AC_VO,
BRCMF_FWS_FIFO_BCMC,
BRCMF_FWS_FIFO_ATIM,
BRCMF_FWS_FIFO_COUNT
};
/**
* enum brcmf_fws_txstatus - txstatus flag values.
*
* @BRCMF_FWS_TXSTATUS_DISCARD:
* host is free to discard the packet.
* @BRCMF_FWS_TXSTATUS_CORE_SUPPRESS:
* 802.11 core suppressed the packet.
* @BRCMF_FWS_TXSTATUS_FW_PS_SUPPRESS:
* firmware suppress the packet as device is already in PS mode.
* @BRCMF_FWS_TXSTATUS_FW_TOSSED:
* firmware tossed the packet.
*/
enum brcmf_fws_txstatus {
BRCMF_FWS_TXSTATUS_DISCARD,
BRCMF_FWS_TXSTATUS_CORE_SUPPRESS,
BRCMF_FWS_TXSTATUS_FW_PS_SUPPRESS,
BRCMF_FWS_TXSTATUS_FW_TOSSED
};
enum brcmf_fws_fcmode {
BRCMF_FWS_FCMODE_NONE,
BRCMF_FWS_FCMODE_IMPLIED_CREDIT,
BRCMF_FWS_FCMODE_EXPLICIT_CREDIT
};
enum brcmf_fws_mac_desc_state {
BRCMF_FWS_STATE_OPEN = 1,
BRCMF_FWS_STATE_CLOSE
};
/**
* struct brcmf_fws_mac_descriptor - firmware signalling data per node/interface
*
* @occupied: slot is in use.
* @mac_handle: handle for mac entry determined by firmware.
* @interface_id: interface index.
* @state: current state.
* @suppressed: mac entry is suppressed.
* @generation: generation bit.
* @ac_bitmap: ac queue bitmap.
* @requested_credit: credits requested by firmware.
* @ea: ethernet address.
* @seq: per-node free-running sequence.
* @psq: power-save queue.
* @transit_count: packet in transit to firmware.
*/
struct brcmf_fws_mac_descriptor {
u8 occupied;
u8 mac_handle;
u8 interface_id;
u8 state;
bool suppressed;
u8 generation;
u8 ac_bitmap;
u8 requested_credit;
u8 requested_packet;
u8 ea[ETH_ALEN];
u8 seq[BRCMF_FWS_FIFO_COUNT];
struct pktq psq;
int transit_count;
int suppress_count;
int suppr_transit_count;
bool send_tim_signal;
u8 traffic_pending_bmp;
u8 traffic_lastreported_bmp;
};
#define BRCMF_FWS_HANGER_MAXITEMS 1024
/**
* enum brcmf_fws_hanger_item_state - state of hanger item.
*
* @BRCMF_FWS_HANGER_ITEM_STATE_FREE: item is free for use.
* @BRCMF_FWS_HANGER_ITEM_STATE_INUSE: item is in use.
* @BRCMF_FWS_HANGER_ITEM_STATE_INUSE_SUPPRESSED: item was suppressed.
*/
enum brcmf_fws_hanger_item_state {
BRCMF_FWS_HANGER_ITEM_STATE_FREE = 1,
BRCMF_FWS_HANGER_ITEM_STATE_INUSE,
BRCMF_FWS_HANGER_ITEM_STATE_INUSE_SUPPRESSED
};
/**
* struct brcmf_fws_hanger_item - single entry for tx pending packet.
*
* @state: entry is either free or occupied.
* @gen: generation.
* @pkt: packet itself.
*/
struct brcmf_fws_hanger_item {
enum brcmf_fws_hanger_item_state state;
u8 gen;
struct sk_buff *pkt;
};
/**
* struct brcmf_fws_hanger - holds packets awaiting firmware txstatus.
*
* @pushed: packets pushed to await txstatus.
* @popped: packets popped upon handling txstatus.
* @failed_to_push: packets that could not be pushed.
* @failed_to_pop: packets that could not be popped.
* @failed_slotfind: packets for which failed to find an entry.
* @slot_pos: last returned item index for a free entry.
* @items: array of hanger items.
*/
struct brcmf_fws_hanger {
u32 pushed;
u32 popped;
u32 failed_to_push;
u32 failed_to_pop;
u32 failed_slotfind;
u32 slot_pos;
struct brcmf_fws_hanger_item items[BRCMF_FWS_HANGER_MAXITEMS];
};
struct brcmf_fws_macdesc_table {
struct brcmf_fws_mac_descriptor nodes[BRCMF_FWS_MAC_DESC_TABLE_SIZE];
struct brcmf_fws_mac_descriptor iface[BRCMF_MAX_IFS];
struct brcmf_fws_mac_descriptor other;
};
struct brcmf_fws_info {
struct brcmf_pub *drvr;
struct brcmf_fws_stats stats;
struct brcmf_fws_hanger hanger;
enum brcmf_fws_fcmode fcmode;
struct brcmf_fws_macdesc_table desc;
struct workqueue_struct *fws_wq;
struct work_struct fws_dequeue_work;
u32 fifo_enqpkt[BRCMF_FWS_FIFO_COUNT];
int fifo_credit[BRCMF_FWS_FIFO_COUNT];
int credits_borrowed[BRCMF_FWS_FIFO_AC_VO + 1];
int deq_node_pos[BRCMF_FWS_FIFO_COUNT];
u32 fifo_credit_map;
u32 fifo_delay_map;
unsigned long borrow_defer_timestamp;
};
/*
* brcmf_fws_prio2fifo - mapping from 802.1d priority to firmware fifo index.
*/
static const int brcmf_fws_prio2fifo[] = {
BRCMF_FWS_FIFO_AC_BE,
BRCMF_FWS_FIFO_AC_BK,
BRCMF_FWS_FIFO_AC_BK,
BRCMF_FWS_FIFO_AC_BE,
BRCMF_FWS_FIFO_AC_VI,
BRCMF_FWS_FIFO_AC_VI,
BRCMF_FWS_FIFO_AC_VO,
BRCMF_FWS_FIFO_AC_VO
};
static int fcmode;
module_param(fcmode, int, S_IRUSR);
MODULE_PARM_DESC(fcmode, "mode of firmware signalled flow control");
#define BRCMF_FWS_TLV_DEF(name, id, len) \
case BRCMF_FWS_TYPE_ ## name: \
return len;
/**
* brcmf_fws_get_tlv_len() - returns defined length for given tlv id.
*
* @fws: firmware-signalling information.
* @id: identifier of the TLV.
*
* Return: the specified length for the given TLV; Otherwise -EINVAL.
*/
static int brcmf_fws_get_tlv_len(struct brcmf_fws_info *fws,
enum brcmf_fws_tlv_type id)
{
switch (id) {
BRCMF_FWS_TLV_DEFLIST
default:
fws->stats.tlv_invalid_type++;
break;
}
return -EINVAL;
}
#undef BRCMF_FWS_TLV_DEF
static bool brcmf_fws_ifidx_match(struct sk_buff *skb, void *arg)
{
u32 ifidx = brcmf_skb_if_flags_get_field(skb, INDEX);
return ifidx == *(int *)arg;
}
static void brcmf_fws_psq_flush(struct brcmf_fws_info *fws, struct pktq *q,
int ifidx)
{
bool (*matchfn)(struct sk_buff *, void *) = NULL;
struct sk_buff *skb;
int prec;
if (ifidx != -1)
matchfn = brcmf_fws_ifidx_match;
for (prec = 0; prec < q->num_prec; prec++) {
skb = brcmu_pktq_pdeq_match(q, prec, matchfn, &ifidx);
while (skb) {
brcmu_pkt_buf_free_skb(skb);
skb = brcmu_pktq_pdeq_match(q, prec, matchfn, &ifidx);
}
}
}
static void brcmf_fws_hanger_init(struct brcmf_fws_hanger *hanger)
{
int i;
brcmf_dbg(TRACE, "enter\n");
memset(hanger, 0, sizeof(*hanger));
for (i = 0; i < ARRAY_SIZE(hanger->items); i++)
hanger->items[i].state = BRCMF_FWS_HANGER_ITEM_STATE_FREE;
}
static u32 brcmf_fws_hanger_get_free_slot(struct brcmf_fws_hanger *h)
{
u32 i;
brcmf_dbg(TRACE, "enter\n");
i = (h->slot_pos + 1) % BRCMF_FWS_HANGER_MAXITEMS;
while (i != h->slot_pos) {
if (h->items[i].state == BRCMF_FWS_HANGER_ITEM_STATE_FREE) {
h->slot_pos = i;
goto done;
}
i++;
if (i == BRCMF_FWS_HANGER_MAXITEMS)
i = 0;
}
brcmf_err("all slots occupied\n");
h->failed_slotfind++;
i = BRCMF_FWS_HANGER_MAXITEMS;
done:
brcmf_dbg(TRACE, "exit: %d\n", i);
return i;
}
static int brcmf_fws_hanger_pushpkt(struct brcmf_fws_hanger *h,
struct sk_buff *pkt, u32 slot_id)
{
brcmf_dbg(TRACE, "enter\n");
if (slot_id >= BRCMF_FWS_HANGER_MAXITEMS)
return -ENOENT;
if (h->items[slot_id].state != BRCMF_FWS_HANGER_ITEM_STATE_FREE) {
brcmf_err("slot is not free\n");
h->failed_to_push++;
return -EINVAL;
}
h->items[slot_id].state = BRCMF_FWS_HANGER_ITEM_STATE_INUSE;
h->items[slot_id].pkt = pkt;
h->pushed++;
return 0;
}
static int brcmf_fws_hanger_poppkt(struct brcmf_fws_hanger *h,
u32 slot_id, struct sk_buff **pktout,
bool remove_item)
{
brcmf_dbg(TRACE, "enter\n");
if (slot_id >= BRCMF_FWS_HANGER_MAXITEMS)
return -ENOENT;
if (h->items[slot_id].state == BRCMF_FWS_HANGER_ITEM_STATE_FREE) {
brcmf_err("entry not in use\n");
h->failed_to_pop++;
return -EINVAL;
}
*pktout = h->items[slot_id].pkt;
if (remove_item) {
h->items[slot_id].state = BRCMF_FWS_HANGER_ITEM_STATE_FREE;
h->items[slot_id].pkt = NULL;
h->items[slot_id].gen = 0xff;
h->popped++;
}
return 0;
}
static int brcmf_fws_hanger_mark_suppressed(struct brcmf_fws_hanger *h,
u32 slot_id, u8 gen)
{
brcmf_dbg(TRACE, "enter\n");
if (slot_id >= BRCMF_FWS_HANGER_MAXITEMS)
return -ENOENT;
h->items[slot_id].gen = gen;
if (h->items[slot_id].state != BRCMF_FWS_HANGER_ITEM_STATE_INUSE) {
brcmf_err("entry not in use\n");
return -EINVAL;
}
h->items[slot_id].state = BRCMF_FWS_HANGER_ITEM_STATE_INUSE_SUPPRESSED;
return 0;
}
static int brcmf_fws_hanger_get_genbit(struct brcmf_fws_hanger *hanger,
struct sk_buff *pkt, u32 slot_id,
int *gen)
{
brcmf_dbg(TRACE, "enter\n");
*gen = 0xff;
if (slot_id >= BRCMF_FWS_HANGER_MAXITEMS)
return -ENOENT;
if (hanger->items[slot_id].state == BRCMF_FWS_HANGER_ITEM_STATE_FREE) {
brcmf_err("slot not in use\n");
return -EINVAL;
}
*gen = hanger->items[slot_id].gen;
return 0;
}
static void brcmf_fws_hanger_cleanup(struct brcmf_fws_info *fws,
bool (*fn)(struct sk_buff *, void *),
int ifidx)
{
struct brcmf_fws_hanger *h = &fws->hanger;
struct sk_buff *skb;
int i;
enum brcmf_fws_hanger_item_state s;
brcmf_dbg(TRACE, "enter: ifidx=%d\n", ifidx);
for (i = 0; i < ARRAY_SIZE(h->items); i++) {
s = h->items[i].state;
if (s == BRCMF_FWS_HANGER_ITEM_STATE_INUSE ||
s == BRCMF_FWS_HANGER_ITEM_STATE_INUSE_SUPPRESSED) {
skb = h->items[i].pkt;
if (fn == NULL || fn(skb, &ifidx)) {
/* suppress packets freed from psq */
if (s == BRCMF_FWS_HANGER_ITEM_STATE_INUSE)
brcmu_pkt_buf_free_skb(skb);
h->items[i].state =
BRCMF_FWS_HANGER_ITEM_STATE_FREE;
}
}
}
}
static void brcmf_fws_init_mac_descriptor(struct brcmf_fws_mac_descriptor *desc,
u8 *addr, u8 ifidx)
{
brcmf_dbg(TRACE,
"enter: desc %p ea=%pM, ifidx=%u\n", desc, addr, ifidx);
desc->occupied = 1;
desc->state = BRCMF_FWS_STATE_OPEN;
desc->requested_credit = 0;
/* depending on use may need ifp->bssidx instead */
desc->interface_id = ifidx;
desc->ac_bitmap = 0xff; /* update this when handling APSD */
if (addr)
memcpy(&desc->ea[0], addr, ETH_ALEN);
}
static
void brcmf_fws_clear_mac_descriptor(struct brcmf_fws_mac_descriptor *desc)
{
brcmf_dbg(TRACE,
"enter: ea=%pM, ifidx=%u\n", desc->ea, desc->interface_id);
desc->occupied = 0;
desc->state = BRCMF_FWS_STATE_CLOSE;
desc->requested_credit = 0;
}
static struct brcmf_fws_mac_descriptor *
brcmf_fws_mac_descriptor_lookup(struct brcmf_fws_info *fws, u8 *ea)
{
struct brcmf_fws_mac_descriptor *entry;
int i;
brcmf_dbg(TRACE, "enter: ea=%pM\n", ea);
if (ea == NULL)
return ERR_PTR(-EINVAL);
entry = &fws->desc.nodes[0];
for (i = 0; i < ARRAY_SIZE(fws->desc.nodes); i++) {
if (entry->occupied && !memcmp(entry->ea, ea, ETH_ALEN))
return entry;
entry++;
}
return ERR_PTR(-ENOENT);
}
static struct brcmf_fws_mac_descriptor*
brcmf_fws_find_mac_desc(struct brcmf_fws_info *fws, struct brcmf_if *ifp,
u8 *da)
{
struct brcmf_fws_mac_descriptor *entry = &fws->desc.other;
bool multicast;
enum nl80211_iftype iftype;
brcmf_dbg(TRACE, "enter: idx=%d\n", ifp->bssidx);
multicast = is_multicast_ether_addr(da);
iftype = brcmf_cfg80211_get_iftype(ifp);
/* Multicast destination and P2P clients get the interface entry.
* STA gets the interface entry if there is no exact match. For
* example, TDLS destinations have their own entry.
*/
entry = NULL;
if ((multicast || iftype == NL80211_IFTYPE_STATION ||
iftype == NL80211_IFTYPE_P2P_CLIENT) && ifp->fws_desc)
entry = ifp->fws_desc;
if (entry != NULL && iftype != NL80211_IFTYPE_STATION)
goto done;
entry = brcmf_fws_mac_descriptor_lookup(fws, da);
if (IS_ERR(entry))
entry = &fws->desc.other;
done:
brcmf_dbg(TRACE, "exit: entry=%p\n", entry);
return entry;
}
static bool brcmf_fws_mac_desc_closed(struct brcmf_fws_info *fws,
struct brcmf_fws_mac_descriptor *entry,
int fifo)
{
struct brcmf_fws_mac_descriptor *if_entry;
bool closed;
/* for unique destination entries the related interface
* may be closed.
*/
if (entry->mac_handle) {
if_entry = &fws->desc.iface[entry->interface_id];
if (if_entry->state == BRCMF_FWS_STATE_CLOSE)
return true;
}
/* an entry is closed when the state is closed and
* the firmware did not request anything.
*/
closed = entry->state == BRCMF_FWS_STATE_CLOSE &&
!entry->requested_credit && !entry->requested_packet;
/* Or firmware does not allow traffic for given fifo */
return closed || !(entry->ac_bitmap & BIT(fifo));
}
static void brcmf_fws_mac_desc_cleanup(struct brcmf_fws_info *fws,
struct brcmf_fws_mac_descriptor *entry,
int ifidx)
{
brcmf_dbg(TRACE, "enter: entry=(ea=%pM, ifid=%d), ifidx=%d\n",
entry->ea, entry->interface_id, ifidx);
if (entry->occupied && (ifidx == -1 || ifidx == entry->interface_id)) {
brcmf_dbg(TRACE, "flush psq: ifidx=%d, qlen=%d\n",
ifidx, entry->psq.len);
brcmf_fws_psq_flush(fws, &entry->psq, ifidx);
entry->occupied = !!(entry->psq.len);
}
}
static void brcmf_fws_bus_txq_cleanup(struct brcmf_fws_info *fws,
bool (*fn)(struct sk_buff *, void *),
int ifidx)
{
struct brcmf_fws_hanger_item *hi;
struct pktq *txq;
struct sk_buff *skb;
int prec;
u32 hslot;
brcmf_dbg(TRACE, "enter: ifidx=%d\n", ifidx);
txq = brcmf_bus_gettxq(fws->drvr->bus_if);
if (IS_ERR(txq)) {
brcmf_dbg(TRACE, "no txq to clean up\n");
return;
}
for (prec = 0; prec < txq->num_prec; prec++) {
skb = brcmu_pktq_pdeq_match(txq, prec, fn, &ifidx);
while (skb) {
hslot = brcmf_skb_htod_tag_get_field(skb, HSLOT);
hi = &fws->hanger.items[hslot];
WARN_ON(skb != hi->pkt);
hi->state = BRCMF_FWS_HANGER_ITEM_STATE_FREE;
brcmu_pkt_buf_free_skb(skb);
skb = brcmu_pktq_pdeq_match(txq, prec, fn, &ifidx);
}
}
}
static void brcmf_fws_cleanup(struct brcmf_fws_info *fws, int ifidx)
{
int i;
struct brcmf_fws_mac_descriptor *table;
bool (*matchfn)(struct sk_buff *, void *) = NULL;
brcmf_dbg(TRACE, "enter: ifidx=%d\n", ifidx);
if (fws == NULL)
return;
if (ifidx != -1)
matchfn = brcmf_fws_ifidx_match;
/* cleanup individual nodes */
table = &fws->desc.nodes[0];
for (i = 0; i < ARRAY_SIZE(fws->desc.nodes); i++)
brcmf_fws_mac_desc_cleanup(fws, &table[i], ifidx);
brcmf_fws_mac_desc_cleanup(fws, &fws->desc.other, ifidx);
brcmf_fws_bus_txq_cleanup(fws, matchfn, ifidx);
brcmf_fws_hanger_cleanup(fws, matchfn, ifidx);
}
static void brcmf_fws_tim_update(struct brcmf_fws_info *ctx,
struct brcmf_fws_mac_descriptor *entry,
int prec)
{
brcmf_dbg(TRACE, "enter: ea=%pM\n", entry->ea);
if (entry->state == BRCMF_FWS_STATE_CLOSE) {
/* check delayedQ and suppressQ in one call using bitmap */
if (brcmu_pktq_mlen(&entry->psq, 3 << (prec * 2)) == 0)
entry->traffic_pending_bmp =
entry->traffic_pending_bmp & ~NBITVAL(prec);
else
entry->traffic_pending_bmp =
entry->traffic_pending_bmp | NBITVAL(prec);
}
/* request a TIM update to firmware at the next piggyback opportunity */
if (entry->traffic_lastreported_bmp != entry->traffic_pending_bmp)
entry->send_tim_signal = true;
}
static void
brcmf_fws_flow_control_check(struct brcmf_fws_info *fws, struct pktq *pq,
u8 if_id)
{
struct brcmf_if *ifp = fws->drvr->iflist[if_id];
if (WARN_ON(!ifp))
return;
brcmf_dbg(TRACE,
"enter: bssidx=%d, ifidx=%d\n", ifp->bssidx, ifp->ifidx);
if ((ifp->netif_stop & BRCMF_NETIF_STOP_REASON_FWS_FC) &&
pq->len <= BRCMF_FWS_FLOWCONTROL_LOWATER)
brcmf_txflowblock_if(ifp,
BRCMF_NETIF_STOP_REASON_FWS_FC, false);
if (!(ifp->netif_stop & BRCMF_NETIF_STOP_REASON_FWS_FC) &&
pq->len >= BRCMF_FWS_FLOWCONTROL_HIWATER)
brcmf_txflowblock_if(ifp, BRCMF_NETIF_STOP_REASON_FWS_FC, true);
return;
}
static int brcmf_fws_rssi_indicate(struct brcmf_fws_info *fws, s8 rssi)
{
brcmf_dbg(CTL, "rssi %d\n", rssi);
return 0;
}
static
int brcmf_fws_macdesc_indicate(struct brcmf_fws_info *fws, u8 type, u8 *data)
{
struct brcmf_fws_mac_descriptor *entry, *existing;
u8 mac_handle;
u8 ifidx;
u8 *addr;
mac_handle = *data++;
ifidx = *data++;
addr = data;
entry = &fws->desc.nodes[mac_handle & 0x1F];
if (type == BRCMF_FWS_TYPE_MACDESC_DEL) {
brcmf_dbg(TRACE, "deleting mac %pM idx %d\n", addr, ifidx);
if (entry->occupied) {
brcmf_fws_mac_desc_cleanup(fws, entry, -1);
brcmf_fws_clear_mac_descriptor(entry);
} else
fws->stats.mac_update_failed++;
return 0;
}
brcmf_dbg(TRACE,
"add mac %pM handle %u idx %d\n", addr, mac_handle, ifidx);
existing = brcmf_fws_mac_descriptor_lookup(fws, addr);
if (IS_ERR(existing)) {
if (!entry->occupied) {
entry->mac_handle = mac_handle;
brcmf_fws_init_mac_descriptor(entry, addr, ifidx);
brcmu_pktq_init(&entry->psq, BRCMF_FWS_PSQ_PREC_COUNT,
BRCMF_FWS_PSQ_LEN);
} else {
fws->stats.mac_update_failed++;
}
} else {
if (entry != existing) {
brcmf_dbg(TRACE, "relocate mac\n");
memcpy(entry, existing,
offsetof(struct brcmf_fws_mac_descriptor, psq));
entry->mac_handle = mac_handle;
brcmf_fws_clear_mac_descriptor(existing);
} else {
brcmf_dbg(TRACE, "use existing\n");
WARN_ON(entry->mac_handle != mac_handle);
/* TODO: what should we do here: continue, reinit, .. */
}
}
return 0;
}
static int brcmf_fws_macdesc_state_indicate(struct brcmf_fws_info *fws,
u8 type, u8 *data)
{
struct brcmf_fws_mac_descriptor *entry;
u8 mac_handle;
int i;
mac_handle = data[0];
entry = &fws->desc.nodes[mac_handle & 0x1F];
if (!entry->occupied) {
fws->stats.mac_ps_update_failed++;
return -ESRCH;
}
/* a state update should wipe old credits? */
entry->requested_credit = 0;
if (type == BRCMF_FWS_TYPE_MAC_OPEN) {
entry->state = BRCMF_FWS_STATE_OPEN;
return BRCMF_FWS_RET_OK_SCHEDULE;
} else {
entry->state = BRCMF_FWS_STATE_CLOSE;
for (i = BRCMF_FWS_FIFO_AC_BE; i < NL80211_NUM_ACS; i++)
brcmf_fws_tim_update(fws, entry, i);
}
return BRCMF_FWS_RET_OK_NOSCHEDULE;
}
static int brcmf_fws_interface_state_indicate(struct brcmf_fws_info *fws,
u8 type, u8 *data)
{
struct brcmf_fws_mac_descriptor *entry;
u8 ifidx;
int ret;
ifidx = data[0];
brcmf_dbg(TRACE, "enter: ifidx=%d\n", ifidx);
if (ifidx >= BRCMF_MAX_IFS) {
ret = -ERANGE;
goto fail;
}
entry = &fws->desc.iface[ifidx];
if (!entry->occupied) {
ret = -ESRCH;
goto fail;
}
switch (type) {
case BRCMF_FWS_TYPE_INTERFACE_OPEN:
entry->state = BRCMF_FWS_STATE_OPEN;
return BRCMF_FWS_RET_OK_SCHEDULE;
case BRCMF_FWS_TYPE_INTERFACE_CLOSE:
entry->state = BRCMF_FWS_STATE_CLOSE;
return BRCMF_FWS_RET_OK_NOSCHEDULE;
default:
ret = -EINVAL;
break;
}
fail:
fws->stats.if_update_failed++;
return ret;
}
static int brcmf_fws_request_indicate(struct brcmf_fws_info *fws, u8 type,
u8 *data)
{
struct brcmf_fws_mac_descriptor *entry;
entry = &fws->desc.nodes[data[1] & 0x1F];
if (!entry->occupied) {
if (type == BRCMF_FWS_TYPE_MAC_REQUEST_CREDIT)
fws->stats.credit_request_failed++;
else
fws->stats.packet_request_failed++;
return -ESRCH;
}
if (type == BRCMF_FWS_TYPE_MAC_REQUEST_CREDIT)
entry->requested_credit = data[0];
else
entry->requested_packet = data[0];
entry->ac_bitmap = data[2];
return BRCMF_FWS_RET_OK_SCHEDULE;
}
static void brcmf_fws_return_credits(struct brcmf_fws_info *fws,
u8 fifo, u8 credits)
{
int lender_ac;
int *borrowed;
int *fifo_credit;
if (!credits)
return;
if ((fifo == BRCMF_FWS_FIFO_AC_BE) &&
(fws->credits_borrowed[0])) {
for (lender_ac = BRCMF_FWS_FIFO_AC_VO; lender_ac >= 0;
lender_ac--) {
borrowed = &fws->credits_borrowed[lender_ac];
if (*borrowed) {
fws->fifo_credit_map |= (1 << lender_ac);
fifo_credit = &fws->fifo_credit[lender_ac];
if (*borrowed >= credits) {
*borrowed -= credits;
*fifo_credit += credits;
return;
} else {
credits -= *borrowed;
*fifo_credit += *borrowed;
*borrowed = 0;
}
}
}
}
fws->fifo_credit_map |= 1 << fifo;
fws->fifo_credit[fifo] += credits;
}
static void brcmf_fws_schedule_deq(struct brcmf_fws_info *fws)
{
/* only schedule dequeue when there are credits for delayed traffic */
if (fws->fifo_credit_map & fws->fifo_delay_map)
queue_work(fws->fws_wq, &fws->fws_dequeue_work);
}
static void brcmf_skb_pick_up_credit(struct brcmf_fws_info *fws, int fifo,
struct sk_buff *p)
{
struct brcmf_fws_mac_descriptor *entry = brcmf_skbcb(p)->mac;
if (brcmf_skbcb(p)->if_flags & BRCMF_SKB_IF_FLAGS_CREDITCHECK_MASK) {
if (fws->fcmode != BRCMF_FWS_FCMODE_IMPLIED_CREDIT)
return;
brcmf_fws_return_credits(fws, fifo, 1);
} else {
/*
* if this packet did not count against FIFO credit, it
* must have taken a requested_credit from the destination
* entry (for pspoll etc.)
*/
if (!brcmf_skb_if_flags_get_field(p, REQUESTED))
entry->requested_credit++;
}
brcmf_fws_schedule_deq(fws);
}
static int brcmf_fws_enq(struct brcmf_fws_info *fws,
enum brcmf_fws_skb_state state, int fifo,
struct sk_buff *p)
{
int prec = 2 * fifo;
u32 *qfull_stat = &fws->stats.delayq_full_error;
struct brcmf_fws_mac_descriptor *entry;
entry = brcmf_skbcb(p)->mac;
if (entry == NULL) {
brcmf_err("no mac descriptor found for skb %p\n", p);
return -ENOENT;
}
brcmf_dbg(TRACE, "enter: ea=%pM, qlen=%d\n", entry->ea, entry->psq.len);
if (state == BRCMF_FWS_SKBSTATE_SUPPRESSED) {
prec += 1;
qfull_stat = &fws->stats.supprq_full_error;
}
if (brcmu_pktq_penq(&entry->psq, prec, p) == NULL) {
*qfull_stat += 1;
return -ENFILE;
}
/* increment total enqueued packet count */
fws->fifo_delay_map |= 1 << fifo;
fws->fifo_enqpkt[fifo]++;
/* update the sk_buff state */
brcmf_skbcb(p)->state = state;
if (state == BRCMF_FWS_SKBSTATE_SUPPRESSED)
entry->suppress_count++;
/*
* A packet has been pushed so update traffic
* availability bitmap, if applicable
*/
brcmf_fws_tim_update(fws, entry, fifo);
brcmf_fws_flow_control_check(fws, &entry->psq,
brcmf_skb_if_flags_get_field(p, INDEX));
return 0;
}
static struct sk_buff *brcmf_fws_deq(struct brcmf_fws_info *fws, int fifo)
{
struct brcmf_fws_mac_descriptor *table;
struct brcmf_fws_mac_descriptor *entry;
struct sk_buff *p;
int use_credit = 1;
int num_nodes;
int node_pos;
int prec_out;
int pmsk;
int i;
table = (struct brcmf_fws_mac_descriptor *)&fws->desc;
num_nodes = sizeof(fws->desc) / sizeof(struct brcmf_fws_mac_descriptor);
node_pos = fws->deq_node_pos[fifo];
for (i = 0; i < num_nodes; i++) {
entry = &table[(node_pos + i) % num_nodes];
if (!entry->occupied ||
brcmf_fws_mac_desc_closed(fws, entry, fifo))
continue;
if (entry->suppressed)
pmsk = 2;
else
pmsk = 3;
p = brcmu_pktq_mdeq(&entry->psq, pmsk << (fifo * 2), &prec_out);
if (p == NULL) {
if (entry->suppressed) {
if (entry->suppr_transit_count >
entry->suppress_count)
return NULL;
entry->suppressed = false;
p = brcmu_pktq_mdeq(&entry->psq,
1 << (fifo * 2), &prec_out);
}
}
if (p == NULL)
continue;
/* did the packet come from suppress sub-queue? */
if (entry->requested_credit > 0) {
entry->requested_credit--;
/*
* if the packet was pulled out while destination is in
* closed state but had a non-zero packets requested,
* then this should not count against the FIFO credit.
* That is due to the fact that the firmware will
* most likely hold onto this packet until a suitable
* time later to push it to the appropriate AC FIFO.
*/
if (entry->state == BRCMF_FWS_STATE_CLOSE)
use_credit = 0;
} else if (entry->requested_packet > 0) {
entry->requested_packet--;
brcmf_skb_if_flags_set_field(p, REQUESTED, 1);
if (entry->state == BRCMF_FWS_STATE_CLOSE)
use_credit = 0;
}
brcmf_skb_if_flags_set_field(p, CREDITCHECK, use_credit);
/* move dequeue position to ensure fair round-robin */
fws->deq_node_pos[fifo] = (node_pos + i + 1) % num_nodes;
brcmf_fws_flow_control_check(fws, &entry->psq,
brcmf_skb_if_flags_get_field(p,
INDEX)
);
/*
* A packet has been picked up, update traffic
* availability bitmap, if applicable
*/
brcmf_fws_tim_update(fws, entry, fifo);
/*
* decrement total enqueued fifo packets and
* clear delay bitmap if done.
*/
fws->fifo_enqpkt[fifo]--;
if (fws->fifo_enqpkt[fifo] == 0)
fws->fifo_delay_map &= ~(1 << fifo);
goto done;
}
p = NULL;
done:
brcmf_dbg(TRACE, "exit: fifo %d skb %p\n", fifo, p);
return p;
}
static int brcmf_fws_txstatus_suppressed(struct brcmf_fws_info *fws, int fifo,
struct sk_buff *skb, u32 genbit)
{
struct brcmf_fws_mac_descriptor *entry = brcmf_skbcb(skb)->mac;
u32 hslot;
int ret;
hslot = brcmf_skb_htod_tag_get_field(skb, HSLOT);
/* this packet was suppressed */
if (!entry->suppressed || entry->generation != genbit) {
entry->suppressed = true;
entry->suppress_count = brcmu_pktq_mlen(&entry->psq,
1 << (fifo * 2 + 1));
entry->suppr_transit_count = entry->transit_count;
}
entry->generation = genbit;
ret = brcmf_fws_enq(fws, BRCMF_FWS_SKBSTATE_SUPPRESSED, fifo, skb);
if (ret != 0) {
/* suppress q is full, drop this packet */
brcmf_fws_hanger_poppkt(&fws->hanger, hslot, &skb,
true);
} else {
/*
* Mark suppressed to avoid a double free during
* wlfc cleanup
*/
brcmf_fws_hanger_mark_suppressed(&fws->hanger, hslot,
genbit);
entry->suppress_count++;
}
return ret;
}
static int
brcmf_fws_txstatus_process(struct brcmf_fws_info *fws, u8 flags, u32 hslot,
u32 genbit)
{
u32 fifo;
int ret;
bool remove_from_hanger = true;
struct sk_buff *skb;
struct brcmf_fws_mac_descriptor *entry = NULL;
brcmf_dbg(TRACE, "status: flags=0x%X, hslot=%d\n",
flags, hslot);
if (flags == BRCMF_FWS_TXSTATUS_DISCARD)
fws->stats.txs_discard++;
else if (flags == BRCMF_FWS_TXSTATUS_CORE_SUPPRESS) {
fws->stats.txs_supp_core++;
remove_from_hanger = false;
} else if (flags == BRCMF_FWS_TXSTATUS_FW_PS_SUPPRESS) {
fws->stats.txs_supp_ps++;
remove_from_hanger = false;
} else if (flags == BRCMF_FWS_TXSTATUS_FW_TOSSED)
fws->stats.txs_tossed++;
else
brcmf_err("unexpected txstatus\n");
ret = brcmf_fws_hanger_poppkt(&fws->hanger, hslot, &skb,
remove_from_hanger);
if (ret != 0) {
brcmf_err("no packet in hanger slot: hslot=%d\n", hslot);
return ret;
}
entry = brcmf_skbcb(skb)->mac;
if (WARN_ON(!entry)) {
brcmu_pkt_buf_free_skb(skb);
return -EINVAL;
}
/* pick up the implicit credit from this packet */
fifo = brcmf_skb_htod_tag_get_field(skb, FIFO);
brcmf_skb_pick_up_credit(fws, fifo, skb);
if (!remove_from_hanger)
ret = brcmf_fws_txstatus_suppressed(fws, fifo, skb, genbit);
if (remove_from_hanger || ret) {
entry->transit_count--;
if (entry->suppressed)
entry->suppr_transit_count--;
brcmf_txfinalize(fws->drvr, skb, true);
}
return 0;
}
static int brcmf_fws_fifocreditback_indicate(struct brcmf_fws_info *fws,
u8 *data)
{
int i;
if (fws->fcmode != BRCMF_FWS_FCMODE_EXPLICIT_CREDIT) {
brcmf_dbg(INFO, "ignored\n");
return BRCMF_FWS_RET_OK_NOSCHEDULE;
}
brcmf_dbg(TRACE, "enter: data %pM\n", data);
for (i = 0; i < BRCMF_FWS_FIFO_COUNT; i++)
brcmf_fws_return_credits(fws, i, data[i]);
brcmf_dbg(INFO, "map: credit %x delay %x\n", fws->fifo_credit_map,
fws->fifo_delay_map);
return BRCMF_FWS_RET_OK_SCHEDULE;
}
static int brcmf_fws_txstatus_indicate(struct brcmf_fws_info *fws, u8 *data)
{
__le32 status_le;
u32 status;
u32 hslot;
u32 genbit;
u8 flags;
fws->stats.txs_indicate++;
memcpy(&status_le, data, sizeof(status_le));
status = le32_to_cpu(status_le);
flags = brcmf_txstatus_get_field(status, FLAGS);
hslot = brcmf_txstatus_get_field(status, HSLOT);
genbit = brcmf_txstatus_get_field(status, GENERATION);
return brcmf_fws_txstatus_process(fws, flags, hslot, genbit);
}
static int brcmf_fws_dbg_seqnum_check(struct brcmf_fws_info *fws, u8 *data)
{
__le32 timestamp;
memcpy(×tamp, &data[2], sizeof(timestamp));
brcmf_dbg(INFO, "received: seq %d, timestamp %d\n", data[1],
le32_to_cpu(timestamp));
return 0;
}
/* using macro so sparse checking does not complain
* about locking imbalance.
*/
#define brcmf_fws_lock(drvr, flags) \
do { \
flags = 0; \
spin_lock_irqsave(&((drvr)->fws_spinlock), (flags)); \
} while (0)
/* using macro so sparse checking does not complain
* about locking imbalance.
*/
#define brcmf_fws_unlock(drvr, flags) \
spin_unlock_irqrestore(&((drvr)->fws_spinlock), (flags))
static int brcmf_fws_notify_credit_map(struct brcmf_if *ifp,
const struct brcmf_event_msg *e,
void *data)
{
struct brcmf_fws_info *fws = ifp->drvr->fws;
int i;
ulong flags;
u8 *credits = data;
if (e->datalen < BRCMF_FWS_FIFO_COUNT) {
brcmf_err("event payload too small (%d)\n", e->datalen);
return -EINVAL;
}
brcmf_dbg(TRACE, "enter: credits %pM\n", credits);
brcmf_fws_lock(ifp->drvr, flags);
for (i = 0; i < ARRAY_SIZE(fws->fifo_credit); i++) {
if (*credits)
fws->fifo_credit_map |= 1 << i;
else
fws->fifo_credit_map &= ~(1 << i);
fws->fifo_credit[i] = *credits++;
}
brcmf_fws_schedule_deq(fws);
brcmf_fws_unlock(ifp->drvr, flags);
return 0;
}
int brcmf_fws_hdrpull(struct brcmf_pub *drvr, int ifidx, s16 signal_len,
struct sk_buff *skb)
{
struct brcmf_fws_info *fws = drvr->fws;
ulong flags;
u8 *signal_data;
s16 data_len;
u8 type;
u8 len;
u8 *data;
s32 status;
s32 err;
brcmf_dbg(TRACE, "enter: ifidx %d, skblen %u, sig %d\n",
ifidx, skb->len, signal_len);
WARN_ON(signal_len > skb->len);
/* if flow control disabled, skip to packet data and leave */
if (!signal_len || !drvr->fw_signals) {
skb_pull(skb, signal_len);
return 0;
}
/* lock during tlv parsing */
brcmf_fws_lock(drvr, flags);
fws->stats.header_pulls++;
data_len = signal_len;
signal_data = skb->data;
status = BRCMF_FWS_RET_OK_NOSCHEDULE;
while (data_len > 0) {
/* extract tlv info */
type = signal_data[0];
/* FILLER type is actually not a TLV, but
* a single byte that can be skipped.
*/
if (type == BRCMF_FWS_TYPE_FILLER) {
signal_data += 1;
data_len -= 1;
continue;
}
len = signal_data[1];
data = signal_data + 2;
brcmf_dbg(INFO, "tlv type=%d (%s), len=%d, data[0]=%d\n", type,
brcmf_fws_get_tlv_name(type), len, *data);
/* abort parsing when length invalid */
if (data_len < len + 2)
break;
if (len != brcmf_fws_get_tlv_len(fws, type))
break;
err = BRCMF_FWS_RET_OK_NOSCHEDULE;
switch (type) {
case BRCMF_FWS_TYPE_HOST_REORDER_RXPKTS:
case BRCMF_FWS_TYPE_COMP_TXSTATUS:
break;
case BRCMF_FWS_TYPE_MACDESC_ADD:
case BRCMF_FWS_TYPE_MACDESC_DEL:
brcmf_fws_macdesc_indicate(fws, type, data);
break;
case BRCMF_FWS_TYPE_MAC_OPEN:
case BRCMF_FWS_TYPE_MAC_CLOSE:
err = brcmf_fws_macdesc_state_indicate(fws, type, data);
break;
case BRCMF_FWS_TYPE_INTERFACE_OPEN:
case BRCMF_FWS_TYPE_INTERFACE_CLOSE:
err = brcmf_fws_interface_state_indicate(fws, type,
data);
break;
case BRCMF_FWS_TYPE_MAC_REQUEST_CREDIT:
case BRCMF_FWS_TYPE_MAC_REQUEST_PACKET:
err = brcmf_fws_request_indicate(fws, type, data);
break;
case BRCMF_FWS_TYPE_TXSTATUS:
brcmf_fws_txstatus_indicate(fws, data);
break;
case BRCMF_FWS_TYPE_FIFO_CREDITBACK:
err = brcmf_fws_fifocreditback_indicate(fws, data);
break;
case BRCMF_FWS_TYPE_RSSI:
brcmf_fws_rssi_indicate(fws, *data);
break;
case BRCMF_FWS_TYPE_TRANS_ID:
brcmf_fws_dbg_seqnum_check(fws, data);
break;
case BRCMF_FWS_TYPE_PKTTAG:
case BRCMF_FWS_TYPE_PENDING_TRAFFIC_BMP:
default:
fws->stats.tlv_invalid_type++;
break;
}
if (err == BRCMF_FWS_RET_OK_SCHEDULE)
status = BRCMF_FWS_RET_OK_SCHEDULE;
signal_data += len + 2;
data_len -= len + 2;
}
if (data_len != 0)
fws->stats.tlv_parse_failed++;
if (status == BRCMF_FWS_RET_OK_SCHEDULE)
brcmf_fws_schedule_deq(fws);
/* signalling processing result does
* not affect the actual ethernet packet.
*/
skb_pull(skb, signal_len);
/* this may be a signal-only packet
*/
if (skb->len == 0)
fws->stats.header_only_pkt++;
brcmf_fws_unlock(drvr, flags);
return 0;
}
static int brcmf_fws_hdrpush(struct brcmf_fws_info *fws, struct sk_buff *skb)
{
struct brcmf_fws_mac_descriptor *entry = brcmf_skbcb(skb)->mac;
u8 *wlh;
u16 data_offset = 0;
u8 fillers;
__le32 pkttag = cpu_to_le32(brcmf_skbcb(skb)->htod);
brcmf_dbg(TRACE, "enter: ea=%pM, ifidx=%u, pkttag=0x%08X\n",
entry->ea, entry->interface_id, le32_to_cpu(pkttag));
if (entry->send_tim_signal)
data_offset += 2 + BRCMF_FWS_TYPE_PENDING_TRAFFIC_BMP_LEN;
/* +2 is for Type[1] and Len[1] in TLV, plus TIM signal */
data_offset += 2 + BRCMF_FWS_TYPE_PKTTAG_LEN;
fillers = round_up(data_offset, 4) - data_offset;
data_offset += fillers;
skb_push(skb, data_offset);
wlh = skb->data;
wlh[0] = BRCMF_FWS_TYPE_PKTTAG;
wlh[1] = BRCMF_FWS_TYPE_PKTTAG_LEN;
memcpy(&wlh[2], &pkttag, sizeof(pkttag));
wlh += BRCMF_FWS_TYPE_PKTTAG_LEN + 2;
if (entry->send_tim_signal) {
entry->send_tim_signal = 0;
wlh[0] = BRCMF_FWS_TYPE_PENDING_TRAFFIC_BMP;
wlh[1] = BRCMF_FWS_TYPE_PENDING_TRAFFIC_BMP_LEN;
wlh[2] = entry->mac_handle;
wlh[3] = entry->traffic_pending_bmp;
wlh += BRCMF_FWS_TYPE_PENDING_TRAFFIC_BMP_LEN + 2;
entry->traffic_lastreported_bmp = entry->traffic_pending_bmp;
}
if (fillers)
memset(wlh, BRCMF_FWS_TYPE_FILLER, fillers);
brcmf_proto_hdrpush(fws->drvr, brcmf_skb_if_flags_get_field(skb, INDEX),
data_offset >> 2, skb);
return 0;
}
static int brcmf_fws_precommit_skb(struct brcmf_fws_info *fws, int fifo,
struct sk_buff *p)
{
struct brcmf_skbuff_cb *skcb = brcmf_skbcb(p);
struct brcmf_fws_mac_descriptor *entry = skcb->mac;
int rc = 0;
bool header_needed;
int hslot = BRCMF_FWS_HANGER_MAXITEMS;
u8 free_ctr;
u8 ifidx;
u8 flags;
header_needed = skcb->state != BRCMF_FWS_SKBSTATE_SUPPRESSED;
if (header_needed) {
/* obtaining free slot may fail, but that will be caught
* by the hanger push. This assures the packet has a BDC
* header upon return.
*/
hslot = brcmf_fws_hanger_get_free_slot(&fws->hanger);
free_ctr = entry->seq[fifo];
brcmf_skb_htod_tag_set_field(p, HSLOT, hslot);
brcmf_skb_htod_tag_set_field(p, FREERUN, free_ctr);
brcmf_skb_htod_tag_set_field(p, GENERATION, 1);
entry->transit_count++;
}
brcmf_skb_if_flags_set_field(p, TRANSMIT, 1);
brcmf_skb_htod_tag_set_field(p, FIFO, fifo);
flags = BRCMF_FWS_HTOD_FLAG_PKTFROMHOST;
if (!(skcb->if_flags & BRCMF_SKB_IF_FLAGS_CREDITCHECK_MASK)) {
/*
Indicate that this packet is being sent in response to an
explicit request from the firmware side.
*/
flags |= BRCMF_FWS_HTOD_FLAG_PKT_REQUESTED;
}
brcmf_skb_htod_tag_set_field(p, FLAGS, flags);
if (header_needed) {
brcmf_fws_hdrpush(fws, p);
rc = brcmf_fws_hanger_pushpkt(&fws->hanger, p, hslot);
if (rc)
brcmf_err("hanger push failed: rc=%d\n", rc);
} else {
int gen;
/* remove old header */
rc = brcmf_proto_hdrpull(fws->drvr, false, &ifidx, p);
if (rc == 0) {
hslot = brcmf_skb_htod_tag_get_field(p, HSLOT);
brcmf_fws_hanger_get_genbit(&fws->hanger, p,
hslot, &gen);
brcmf_skb_htod_tag_set_field(p, GENERATION, gen);
/* push new header */
brcmf_fws_hdrpush(fws, p);
}
}
return rc;
}
static void
brcmf_fws_rollback_toq(struct brcmf_fws_info *fws, struct sk_buff *skb)
{
/*
put the packet back to the head of queue
- suppressed packet goes back to suppress sub-queue
- pull out the header, if new or delayed packet
Note: hslot is used only when header removal is done.
*/
struct brcmf_fws_mac_descriptor *entry;
enum brcmf_fws_skb_state state;
struct sk_buff *pktout;
int rc = 0;
int fifo;
int hslot;
u8 ifidx;
fifo = brcmf_skb_if_flags_get_field(skb, FIFO);
state = brcmf_skbcb(skb)->state;
entry = brcmf_skbcb(skb)->mac;
if (entry != NULL) {
if (state == BRCMF_FWS_SKBSTATE_SUPPRESSED) {
/* wl-header is saved for suppressed packets */
pktout = brcmu_pktq_penq_head(&entry->psq, 2 * fifo + 1,
skb);
if (pktout == NULL) {
brcmf_err("suppress queue full\n");
rc = -ENOSPC;
}
} else {
hslot = brcmf_skb_htod_tag_get_field(skb, HSLOT);
/* remove header first */
rc = brcmf_proto_hdrpull(fws->drvr, false, &ifidx, skb);
if (rc) {
brcmf_err("header removal failed\n");
/* free the hanger slot */
brcmf_fws_hanger_poppkt(&fws->hanger, hslot,
&pktout, true);
rc = -EINVAL;
goto fail;
}
/* delay-q packets are going to delay-q */
pktout = brcmu_pktq_penq_head(&entry->psq,
2 * fifo, skb);
if (pktout == NULL) {
brcmf_err("delay queue full\n");
rc = -ENOSPC;
}
/* free the hanger slot */
brcmf_fws_hanger_poppkt(&fws->hanger, hslot, &pktout,
true);
/* decrement sequence count */
entry->seq[fifo]--;
}
/*
if this packet did not count against FIFO credit, it must have
taken a requested_credit from the firmware (for pspoll etc.)
*/
if (!(brcmf_skbcb(skb)->if_flags &
BRCMF_SKB_IF_FLAGS_CREDITCHECK_MASK))
entry->requested_credit++;
} else {
brcmf_err("no mac entry linked\n");
rc = -ENOENT;
}
fail:
if (rc) {
brcmf_txfinalize(fws->drvr, skb, false);
fws->stats.rollback_failed++;
} else
fws->stats.rollback_success++;
}
static int brcmf_fws_borrow_credit(struct brcmf_fws_info *fws)
{
int lender_ac;
if (time_after(fws->borrow_defer_timestamp, jiffies))
return -ENAVAIL;
for (lender_ac = 0; lender_ac <= BRCMF_FWS_FIFO_AC_VO; lender_ac++) {
if (fws->fifo_credit[lender_ac]) {
fws->credits_borrowed[lender_ac]++;
fws->fifo_credit[lender_ac]--;
if (fws->fifo_credit[lender_ac] == 0)
fws->fifo_credit_map &= ~(1 << lender_ac);
brcmf_dbg(TRACE, "borrow credit from: %d\n", lender_ac);
return 0;
}
}
return -ENAVAIL;
}
static int brcmf_fws_consume_credit(struct brcmf_fws_info *fws, int fifo,
struct sk_buff *skb)
{
struct brcmf_fws_mac_descriptor *entry = brcmf_skbcb(skb)->mac;
int *credit = &fws->fifo_credit[fifo];
int use_credit = 1;
brcmf_dbg(TRACE, "enter: ac=%d, credits=%d\n", fifo, *credit);
if (entry->requested_credit > 0) {
/*
* if the packet was pulled out while destination is in
* closed state but had a non-zero packets requested,
* then this should not count against the FIFO credit.
* That is due to the fact that the firmware will
* most likely hold onto this packet until a suitable
* time later to push it to the appropriate AC FIFO.
*/
entry->requested_credit--;
if (entry->state == BRCMF_FWS_STATE_CLOSE)
use_credit = 0;
} else if (entry->requested_packet > 0) {
entry->requested_packet--;
brcmf_skb_if_flags_set_field(skb, REQUESTED, 1);
if (entry->state == BRCMF_FWS_STATE_CLOSE)
use_credit = 0;
}
brcmf_skb_if_flags_set_field(skb, CREDITCHECK, use_credit);
if (!use_credit) {
brcmf_dbg(TRACE, "exit: no creditcheck set\n");
return 0;
}
if (fifo != BRCMF_FWS_FIFO_AC_BE)
fws->borrow_defer_timestamp = jiffies +
BRCMF_FWS_BORROW_DEFER_PERIOD;
if (!(*credit)) {
/* Try to borrow a credit from other queue */
if (fifo == BRCMF_FWS_FIFO_AC_BE &&
brcmf_fws_borrow_credit(fws) == 0)
return 0;
brcmf_dbg(TRACE, "exit: ac=%d, credits depleted\n", fifo);
return -ENAVAIL;
}
(*credit)--;
if (!(*credit))
fws->fifo_credit_map &= ~(1 << fifo);
brcmf_dbg(TRACE, "exit: ac=%d, credits=%d\n", fifo, *credit);
return 0;
}
static int brcmf_fws_commit_skb(struct brcmf_fws_info *fws, int fifo,
struct sk_buff *skb)
{
struct brcmf_skbuff_cb *skcb = brcmf_skbcb(skb);
struct brcmf_fws_mac_descriptor *entry;
struct brcmf_bus *bus = fws->drvr->bus_if;
int rc;
entry = skcb->mac;
if (IS_ERR(entry))
return PTR_ERR(entry);
rc = brcmf_fws_precommit_skb(fws, fifo, skb);
if (rc < 0) {
fws->stats.generic_error++;
goto rollback;
}
rc = brcmf_bus_txdata(bus, skb);
if (rc < 0)
goto rollback;
entry->seq[fifo]++;
fws->stats.pkt2bus++;
if (brcmf_skbcb(skb)->if_flags & BRCMF_SKB_IF_FLAGS_CREDITCHECK_MASK) {
fws->stats.send_pkts[fifo]++;
fws->stats.fifo_credits_sent[fifo]++;
}
return rc;
rollback:
brcmf_fws_rollback_toq(fws, skb);
return rc;
}
int brcmf_fws_process_skb(struct brcmf_if *ifp, struct sk_buff *skb)
{
struct brcmf_pub *drvr = ifp->drvr;
struct brcmf_fws_info *fws = drvr->fws;
struct brcmf_skbuff_cb *skcb = brcmf_skbcb(skb);
struct ethhdr *eh = (struct ethhdr *)(skb->data);
ulong flags;
int fifo = BRCMF_FWS_FIFO_BCMC;
bool multicast = is_multicast_ether_addr(eh->h_dest);
/* determine the priority */
if (!skb->priority)
skb->priority = cfg80211_classify8021d(skb);
drvr->tx_multicast += !!multicast;
if (ntohs(eh->h_proto) == ETH_P_PAE)
atomic_inc(&ifp->pend_8021x_cnt);
if (!brcmf_fws_fc_active(fws)) {
/* If the protocol uses a data header, apply it */
brcmf_proto_hdrpush(drvr, ifp->ifidx, 0, skb);
/* Use bus module to send data frame */
return brcmf_bus_txdata(drvr->bus_if, skb);
}
/* set control buffer information */
skcb->if_flags = 0;
skcb->mac = brcmf_fws_find_mac_desc(fws, ifp, eh->h_dest);
skcb->state = BRCMF_FWS_SKBSTATE_NEW;
brcmf_skb_if_flags_set_field(skb, INDEX, ifp->ifidx);
if (!multicast)
fifo = brcmf_fws_prio2fifo[skb->priority];
brcmf_skb_if_flags_set_field(skb, FIFO, fifo);
brcmf_dbg(TRACE, "ea=%pM, multi=%d, fifo=%d\n", eh->h_dest,
multicast, fifo);
brcmf_fws_lock(drvr, flags);
if (skcb->mac->suppressed ||
brcmf_fws_mac_desc_closed(fws, skcb->mac, fifo) ||
brcmu_pktq_mlen(&skcb->mac->psq, 3 << (fifo * 2)) ||
(!multicast &&
brcmf_fws_consume_credit(fws, fifo, skb) < 0)) {
/* enqueue the packet in delayQ */
drvr->fws->fifo_delay_map |= 1 << fifo;
brcmf_fws_enq(fws, BRCMF_FWS_SKBSTATE_DELAYED, fifo, skb);
} else {
if (brcmf_fws_commit_skb(fws, fifo, skb))
if (!multicast)
brcmf_skb_pick_up_credit(fws, fifo, skb);
}
brcmf_fws_unlock(drvr, flags);
return 0;
}
void brcmf_fws_reset_interface(struct brcmf_if *ifp)
{
struct brcmf_fws_mac_descriptor *entry = ifp->fws_desc;
brcmf_dbg(TRACE, "enter: idx=%d\n", ifp->bssidx);
if (!entry)
return;
brcmf_fws_init_mac_descriptor(entry, ifp->mac_addr, ifp->ifidx);
}
void brcmf_fws_add_interface(struct brcmf_if *ifp)
{
struct brcmf_fws_info *fws = ifp->drvr->fws;
struct brcmf_fws_mac_descriptor *entry;
brcmf_dbg(TRACE, "enter: idx=%d, mac=%pM\n",
ifp->bssidx, ifp->mac_addr);
if (!ifp->ndev || !ifp->drvr->fw_signals)
return;
entry = &fws->desc.iface[ifp->ifidx];
ifp->fws_desc = entry;
brcmf_fws_init_mac_descriptor(entry, ifp->mac_addr, ifp->ifidx);
brcmu_pktq_init(&entry->psq, BRCMF_FWS_PSQ_PREC_COUNT,
BRCMF_FWS_PSQ_LEN);
}
void brcmf_fws_del_interface(struct brcmf_if *ifp)
{
struct brcmf_fws_mac_descriptor *entry = ifp->fws_desc;
ulong flags;
brcmf_dbg(TRACE, "enter: idx=%d\n", ifp->bssidx);
if (!entry)
return;
brcmf_fws_lock(ifp->drvr, flags);
ifp->fws_desc = NULL;
brcmf_fws_clear_mac_descriptor(entry);
brcmf_fws_cleanup(ifp->drvr->fws, ifp->ifidx);
brcmf_fws_unlock(ifp->drvr, flags);
}
static void brcmf_fws_dequeue_worker(struct work_struct *worker)
{
struct brcmf_fws_info *fws;
struct sk_buff *skb;
ulong flags;
int fifo;
int credit;
fws = container_of(worker, struct brcmf_fws_info, fws_dequeue_work);
brcmf_dbg(TRACE, "enter: fws=%p\n", fws);
brcmf_fws_lock(fws->drvr, flags);
for (fifo = NL80211_NUM_ACS; fifo >= 0; fifo--) {
brcmf_dbg(TRACE, "fifo %d credit %d\n", fifo,
fws->fifo_credit[fifo]);
for (credit = 0; credit < fws->fifo_credit[fifo]; /* nop */) {
skb = brcmf_fws_deq(fws, fifo);
if (!skb || brcmf_fws_commit_skb(fws, fifo, skb))
break;
if (brcmf_skbcb(skb)->if_flags &
BRCMF_SKB_IF_FLAGS_CREDITCHECK_MASK)
credit++;
}
if ((fifo == BRCMF_FWS_FIFO_AC_BE) &&
(credit == fws->fifo_credit[fifo])) {
fws->fifo_credit[fifo] -= credit;
while (brcmf_fws_borrow_credit(fws) == 0) {
skb = brcmf_fws_deq(fws, fifo);
if (!skb) {
brcmf_fws_return_credits(fws, fifo, 1);
break;
}
if (brcmf_fws_commit_skb(fws, fifo, skb)) {
brcmf_fws_return_credits(fws, fifo, 1);
break;
}
}
} else {
fws->fifo_credit[fifo] -= credit;
}
}
brcmf_fws_unlock(fws->drvr, flags);
}
int brcmf_fws_init(struct brcmf_pub *drvr)
{
u32 tlv = BRCMF_FWS_FLAGS_RSSI_SIGNALS;
int rc;
if (!drvr->fw_signals)
return 0;
spin_lock_init(&drvr->fws_spinlock);
drvr->fws = kzalloc(sizeof(*(drvr->fws)), GFP_KERNEL);
if (!drvr->fws) {
rc = -ENOMEM;
goto fail;
}
/* set linkage back */
drvr->fws->drvr = drvr;
drvr->fws->fcmode = fcmode;
drvr->fws->fws_wq = create_singlethread_workqueue("brcmf_fws_wq");
if (drvr->fws->fws_wq == NULL) {
brcmf_err("workqueue creation failed\n");
rc = -EBADF;
goto fail;
}
INIT_WORK(&drvr->fws->fws_dequeue_work, brcmf_fws_dequeue_worker);
/* enable firmware signalling if fcmode active */
if (drvr->fws->fcmode != BRCMF_FWS_FCMODE_NONE)
tlv |= BRCMF_FWS_FLAGS_XONXOFF_SIGNALS |
BRCMF_FWS_FLAGS_CREDIT_STATUS_SIGNALS |
BRCMF_FWS_FLAGS_HOST_PROPTXSTATUS_ACTIVE;
rc = brcmf_fweh_register(drvr, BRCMF_E_FIFO_CREDIT_MAP,
brcmf_fws_notify_credit_map);
if (rc < 0) {
brcmf_err("register credit map handler failed\n");
goto fail;
}
/* setting the iovar may fail if feature is unsupported
* so leave the rc as is so driver initialization can
* continue.
*/
if (brcmf_fil_iovar_int_set(drvr->iflist[0], "tlv", tlv)) {
brcmf_err("failed to set bdcv2 tlv signaling\n");
goto fail_event;
}
brcmf_fws_hanger_init(&drvr->fws->hanger);
brcmf_fws_init_mac_descriptor(&drvr->fws->desc.other, NULL, 0);
brcmu_pktq_init(&drvr->fws->desc.other.psq, BRCMF_FWS_PSQ_PREC_COUNT,
BRCMF_FWS_PSQ_LEN);
/* create debugfs file for statistics */
brcmf_debugfs_create_fws_stats(drvr, &drvr->fws->stats);
/* TODO: remove upon feature delivery */
brcmf_err("%s bdcv2 tlv signaling [%x]\n",
drvr->fw_signals ? "enabled" : "disabled", tlv);
return 0;
fail_event:
brcmf_fweh_unregister(drvr, BRCMF_E_FIFO_CREDIT_MAP);
fail:
brcmf_fws_deinit(drvr);
return rc;
}
void brcmf_fws_deinit(struct brcmf_pub *drvr)
{
struct brcmf_fws_info *fws = drvr->fws;
ulong flags;
if (!fws)
return;
/* disable firmware signalling entirely
* to avoid using the workqueue.
*/
drvr->fw_signals = false;
if (drvr->fws->fws_wq)
destroy_workqueue(drvr->fws->fws_wq);
/* cleanup */
brcmf_fws_lock(drvr, flags);
brcmf_fws_cleanup(fws, -1);
drvr->fws = NULL;
brcmf_fws_unlock(drvr, flags);
/* free top structure */
kfree(fws);
}
bool brcmf_fws_fc_active(struct brcmf_fws_info *fws)
{
if (!fws)
return false;
brcmf_dbg(TRACE, "enter: mode=%d\n", fws->fcmode);
return fws->fcmode != BRCMF_FWS_FCMODE_NONE;
}
void brcmf_fws_bustxfail(struct brcmf_fws_info *fws, struct sk_buff *skb)
{
ulong flags;
brcmf_fws_lock(fws->drvr, flags);
brcmf_fws_txstatus_process(fws, BRCMF_FWS_TXSTATUS_FW_TOSSED,
brcmf_skb_htod_tag_get_field(skb, HSLOT), 0);
/* the packet never reached firmware so reclaim credit */
if (fws->fcmode == BRCMF_FWS_FCMODE_EXPLICIT_CREDIT &&
brcmf_skbcb(skb)->if_flags & BRCMF_SKB_IF_FLAGS_CREDITCHECK_MASK) {
brcmf_fws_return_credits(fws,
brcmf_skb_htod_tag_get_field(skb,
FIFO),
1);
brcmf_fws_schedule_deq(fws);
}
brcmf_fws_unlock(fws->drvr, flags);
}
| gpl-2.0 |
hisilicon/linaro-kernel | drivers/target/target_core_hba.c | 936 | 4322 | /*******************************************************************************
* Filename: target_core_hba.c
*
* This file contains the TCM HBA Transport related functions.
*
* (c) Copyright 2003-2013 Datera, Inc.
*
* Nicholas A. Bellinger <nab@kernel.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
******************************************************************************/
#include <linux/net.h>
#include <linux/string.h>
#include <linux/timer.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/in.h>
#include <linux/module.h>
#include <net/sock.h>
#include <net/tcp.h>
#include <target/target_core_base.h>
#include <target/target_core_backend.h>
#include <target/target_core_fabric.h>
#include "target_core_internal.h"
static LIST_HEAD(subsystem_list);
static DEFINE_MUTEX(subsystem_mutex);
static u32 hba_id_counter;
static DEFINE_SPINLOCK(hba_lock);
static LIST_HEAD(hba_list);
int transport_subsystem_register(struct se_subsystem_api *sub_api)
{
struct se_subsystem_api *s;
INIT_LIST_HEAD(&sub_api->sub_api_list);
mutex_lock(&subsystem_mutex);
list_for_each_entry(s, &subsystem_list, sub_api_list) {
if (!strcmp(s->name, sub_api->name)) {
pr_err("%p is already registered with"
" duplicate name %s, unable to process"
" request\n", s, s->name);
mutex_unlock(&subsystem_mutex);
return -EEXIST;
}
}
list_add_tail(&sub_api->sub_api_list, &subsystem_list);
mutex_unlock(&subsystem_mutex);
pr_debug("TCM: Registered subsystem plugin: %s struct module:"
" %p\n", sub_api->name, sub_api->owner);
return 0;
}
EXPORT_SYMBOL(transport_subsystem_register);
void transport_subsystem_release(struct se_subsystem_api *sub_api)
{
mutex_lock(&subsystem_mutex);
list_del(&sub_api->sub_api_list);
mutex_unlock(&subsystem_mutex);
}
EXPORT_SYMBOL(transport_subsystem_release);
static struct se_subsystem_api *core_get_backend(const char *sub_name)
{
struct se_subsystem_api *s;
mutex_lock(&subsystem_mutex);
list_for_each_entry(s, &subsystem_list, sub_api_list) {
if (!strcmp(s->name, sub_name))
goto found;
}
mutex_unlock(&subsystem_mutex);
return NULL;
found:
if (s->owner && !try_module_get(s->owner))
s = NULL;
mutex_unlock(&subsystem_mutex);
return s;
}
struct se_hba *
core_alloc_hba(const char *plugin_name, u32 plugin_dep_id, u32 hba_flags)
{
struct se_hba *hba;
int ret = 0;
hba = kzalloc(sizeof(*hba), GFP_KERNEL);
if (!hba) {
pr_err("Unable to allocate struct se_hba\n");
return ERR_PTR(-ENOMEM);
}
spin_lock_init(&hba->device_lock);
mutex_init(&hba->hba_access_mutex);
hba->hba_index = scsi_get_new_index(SCSI_INST_INDEX);
hba->hba_flags |= hba_flags;
hba->transport = core_get_backend(plugin_name);
if (!hba->transport) {
ret = -EINVAL;
goto out_free_hba;
}
ret = hba->transport->attach_hba(hba, plugin_dep_id);
if (ret < 0)
goto out_module_put;
spin_lock(&hba_lock);
hba->hba_id = hba_id_counter++;
list_add_tail(&hba->hba_node, &hba_list);
spin_unlock(&hba_lock);
pr_debug("CORE_HBA[%d] - Attached HBA to Generic Target"
" Core\n", hba->hba_id);
return hba;
out_module_put:
if (hba->transport->owner)
module_put(hba->transport->owner);
hba->transport = NULL;
out_free_hba:
kfree(hba);
return ERR_PTR(ret);
}
int
core_delete_hba(struct se_hba *hba)
{
WARN_ON(hba->dev_count);
hba->transport->detach_hba(hba);
spin_lock(&hba_lock);
list_del(&hba->hba_node);
spin_unlock(&hba_lock);
pr_debug("CORE_HBA[%d] - Detached HBA from Generic Target"
" Core\n", hba->hba_id);
if (hba->transport->owner)
module_put(hba->transport->owner);
hba->transport = NULL;
kfree(hba);
return 0;
}
| gpl-2.0 |
MCherifiOSS/linux | drivers/staging/lustre/lustre/ptlrpc/ptlrpc_module.c | 936 | 3667 | /*
* GPL HEADER START
*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 only,
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License version 2 for more details (a copy is included
* in the LICENSE file that accompanied this code).
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
* http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
*
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
* CA 95054 USA or visit www.sun.com if you need additional information or
* have any questions.
*
* GPL HEADER END
*/
/*
* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
* Copyright (c) 2011, 2012, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
* Lustre is a trademark of Sun Microsystems, Inc.
*/
#define DEBUG_SUBSYSTEM S_RPC
#include "../include/obd_support.h"
#include "../include/obd_class.h"
#include "../include/lustre_net.h"
#include "../include/lustre_req_layout.h"
#include "ptlrpc_internal.h"
extern spinlock_t ptlrpc_last_xid_lock;
#if RS_DEBUG
extern spinlock_t ptlrpc_rs_debug_lock;
#endif
extern struct mutex pinger_mutex;
extern struct mutex ptlrpcd_mutex;
__init int ptlrpc_init(void)
{
int rc, cleanup_phase = 0;
lustre_assert_wire_constants();
#if RS_DEBUG
spin_lock_init(&ptlrpc_rs_debug_lock);
#endif
mutex_init(&ptlrpc_all_services_mutex);
mutex_init(&pinger_mutex);
mutex_init(&ptlrpcd_mutex);
ptlrpc_init_xid();
rc = req_layout_init();
if (rc)
return rc;
rc = ptlrpc_hr_init();
if (rc)
return rc;
cleanup_phase = 1;
rc = ptlrpc_request_cache_init();
if (rc)
goto cleanup;
cleanup_phase = 2;
rc = ptlrpc_init_portals();
if (rc)
goto cleanup;
cleanup_phase = 3;
rc = ptlrpc_connection_init();
if (rc)
goto cleanup;
cleanup_phase = 4;
ptlrpc_put_connection_superhack = ptlrpc_connection_put;
rc = ptlrpc_start_pinger();
if (rc)
goto cleanup;
cleanup_phase = 5;
rc = ldlm_init();
if (rc)
goto cleanup;
cleanup_phase = 6;
rc = sptlrpc_init();
if (rc)
goto cleanup;
cleanup_phase = 7;
rc = ptlrpc_nrs_init();
if (rc)
goto cleanup;
cleanup_phase = 8;
rc = tgt_mod_init();
if (rc)
goto cleanup;
return 0;
cleanup:
switch (cleanup_phase) {
case 8:
ptlrpc_nrs_fini();
/* Fall through */
case 7:
sptlrpc_fini();
/* Fall through */
case 6:
ldlm_exit();
/* Fall through */
case 5:
ptlrpc_stop_pinger();
/* Fall through */
case 4:
ptlrpc_connection_fini();
/* Fall through */
case 3:
ptlrpc_exit_portals();
/* Fall through */
case 2:
ptlrpc_request_cache_fini();
/* Fall through */
case 1:
ptlrpc_hr_fini();
req_layout_fini();
/* Fall through */
default: ;
}
return rc;
}
static void __exit ptlrpc_exit(void)
{
tgt_mod_exit();
ptlrpc_nrs_fini();
sptlrpc_fini();
ldlm_exit();
ptlrpc_stop_pinger();
ptlrpc_exit_portals();
ptlrpc_request_cache_fini();
ptlrpc_hr_fini();
ptlrpc_connection_fini();
}
MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
MODULE_DESCRIPTION("Lustre Request Processor and Lock Management");
MODULE_LICENSE("GPL");
MODULE_VERSION("1.0.0");
module_init(ptlrpc_init);
module_exit(ptlrpc_exit);
| gpl-2.0 |
longman88/qspinlock-v14 | drivers/cdrom/gdrom.c | 1704 | 23374 | /* GD ROM driver for the SEGA Dreamcast
* copyright Adrian McMenamin, 2007
* With thanks to Marcus Comstedt and Nathan Keynes
* for work in reversing PIO and DMA
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/init.h>
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/dma-mapping.h>
#include <linux/cdrom.h>
#include <linux/genhd.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/interrupt.h>
#include <linux/device.h>
#include <linux/mutex.h>
#include <linux/wait.h>
#include <linux/workqueue.h>
#include <linux/platform_device.h>
#include <scsi/scsi.h>
#include <asm/io.h>
#include <asm/dma.h>
#include <asm/delay.h>
#include <mach/dma.h>
#include <mach/sysasic.h>
#define GDROM_DEV_NAME "gdrom"
#define GD_SESSION_OFFSET 150
/* GD Rom commands */
#define GDROM_COM_SOFTRESET 0x08
#define GDROM_COM_EXECDIAG 0x90
#define GDROM_COM_PACKET 0xA0
#define GDROM_COM_IDDEV 0xA1
/* GD Rom registers */
#define GDROM_BASE_REG 0xA05F7000
#define GDROM_ALTSTATUS_REG (GDROM_BASE_REG + 0x18)
#define GDROM_DATA_REG (GDROM_BASE_REG + 0x80)
#define GDROM_ERROR_REG (GDROM_BASE_REG + 0x84)
#define GDROM_INTSEC_REG (GDROM_BASE_REG + 0x88)
#define GDROM_SECNUM_REG (GDROM_BASE_REG + 0x8C)
#define GDROM_BCL_REG (GDROM_BASE_REG + 0x90)
#define GDROM_BCH_REG (GDROM_BASE_REG + 0x94)
#define GDROM_DSEL_REG (GDROM_BASE_REG + 0x98)
#define GDROM_STATUSCOMMAND_REG (GDROM_BASE_REG + 0x9C)
#define GDROM_RESET_REG (GDROM_BASE_REG + 0x4E4)
#define GDROM_DMA_STARTADDR_REG (GDROM_BASE_REG + 0x404)
#define GDROM_DMA_LENGTH_REG (GDROM_BASE_REG + 0x408)
#define GDROM_DMA_DIRECTION_REG (GDROM_BASE_REG + 0x40C)
#define GDROM_DMA_ENABLE_REG (GDROM_BASE_REG + 0x414)
#define GDROM_DMA_STATUS_REG (GDROM_BASE_REG + 0x418)
#define GDROM_DMA_WAIT_REG (GDROM_BASE_REG + 0x4A0)
#define GDROM_DMA_ACCESS_CTRL_REG (GDROM_BASE_REG + 0x4B8)
#define GDROM_HARD_SECTOR 2048
#define BLOCK_LAYER_SECTOR 512
#define GD_TO_BLK 4
#define GDROM_DEFAULT_TIMEOUT (HZ * 7)
static DEFINE_MUTEX(gdrom_mutex);
static const struct {
int sense_key;
const char * const text;
} sense_texts[] = {
{NO_SENSE, "OK"},
{RECOVERED_ERROR, "Recovered from error"},
{NOT_READY, "Device not ready"},
{MEDIUM_ERROR, "Disk not ready"},
{HARDWARE_ERROR, "Hardware error"},
{ILLEGAL_REQUEST, "Command has failed"},
{UNIT_ATTENTION, "Device needs attention - disk may have been changed"},
{DATA_PROTECT, "Data protection error"},
{ABORTED_COMMAND, "Command aborted"},
};
static struct platform_device *pd;
static int gdrom_major;
static DECLARE_WAIT_QUEUE_HEAD(command_queue);
static DECLARE_WAIT_QUEUE_HEAD(request_queue);
static DEFINE_SPINLOCK(gdrom_lock);
static void gdrom_readdisk_dma(struct work_struct *work);
static DECLARE_WORK(work, gdrom_readdisk_dma);
static LIST_HEAD(gdrom_deferred);
struct gdromtoc {
unsigned int entry[99];
unsigned int first, last;
unsigned int leadout;
};
static struct gdrom_unit {
struct gendisk *disk;
struct cdrom_device_info *cd_info;
int status;
int pending;
int transfer;
char disk_type;
struct gdromtoc *toc;
struct request_queue *gdrom_rq;
} gd;
struct gdrom_id {
char mid;
char modid;
char verid;
char padA[13];
char mname[16];
char modname[16];
char firmver[16];
char padB[16];
};
static int gdrom_getsense(short *bufstring);
static int gdrom_packetcommand(struct cdrom_device_info *cd_info,
struct packet_command *command);
static int gdrom_hardreset(struct cdrom_device_info *cd_info);
static bool gdrom_is_busy(void)
{
return (__raw_readb(GDROM_ALTSTATUS_REG) & 0x80) != 0;
}
static bool gdrom_data_request(void)
{
return (__raw_readb(GDROM_ALTSTATUS_REG) & 0x88) == 8;
}
static bool gdrom_wait_clrbusy(void)
{
unsigned long timeout = jiffies + GDROM_DEFAULT_TIMEOUT;
while ((__raw_readb(GDROM_ALTSTATUS_REG) & 0x80) &&
(time_before(jiffies, timeout)))
cpu_relax();
return time_before(jiffies, timeout + 1);
}
static bool gdrom_wait_busy_sleeps(void)
{
unsigned long timeout;
/* Wait to get busy first */
timeout = jiffies + GDROM_DEFAULT_TIMEOUT;
while (!gdrom_is_busy() && time_before(jiffies, timeout))
cpu_relax();
/* Now wait for busy to clear */
return gdrom_wait_clrbusy();
}
static void gdrom_identifydevice(void *buf)
{
int c;
short *data = buf;
/* If the device won't clear it has probably
* been hit by a serious failure - but we'll
* try to return a sense key even so */
if (!gdrom_wait_clrbusy()) {
gdrom_getsense(NULL);
return;
}
__raw_writeb(GDROM_COM_IDDEV, GDROM_STATUSCOMMAND_REG);
if (!gdrom_wait_busy_sleeps()) {
gdrom_getsense(NULL);
return;
}
/* now read in the data */
for (c = 0; c < 40; c++)
data[c] = __raw_readw(GDROM_DATA_REG);
}
static void gdrom_spicommand(void *spi_string, int buflen)
{
short *cmd = spi_string;
unsigned long timeout;
/* ensure IRQ_WAIT is set */
__raw_writeb(0x08, GDROM_ALTSTATUS_REG);
/* specify how many bytes we expect back */
__raw_writeb(buflen & 0xFF, GDROM_BCL_REG);
__raw_writeb((buflen >> 8) & 0xFF, GDROM_BCH_REG);
/* other parameters */
__raw_writeb(0, GDROM_INTSEC_REG);
__raw_writeb(0, GDROM_SECNUM_REG);
__raw_writeb(0, GDROM_ERROR_REG);
/* Wait until we can go */
if (!gdrom_wait_clrbusy()) {
gdrom_getsense(NULL);
return;
}
timeout = jiffies + GDROM_DEFAULT_TIMEOUT;
__raw_writeb(GDROM_COM_PACKET, GDROM_STATUSCOMMAND_REG);
while (!gdrom_data_request() && time_before(jiffies, timeout))
cpu_relax();
if (!time_before(jiffies, timeout + 1)) {
gdrom_getsense(NULL);
return;
}
outsw(GDROM_DATA_REG, cmd, 6);
}
/* gdrom_command_executediagnostic:
* Used to probe for presence of working GDROM
* Restarts GDROM device and then applies standard ATA 3
* Execute Diagnostic Command: a return of '1' indicates device 0
* present and device 1 absent
*/
static char gdrom_execute_diagnostic(void)
{
gdrom_hardreset(gd.cd_info);
if (!gdrom_wait_clrbusy())
return 0;
__raw_writeb(GDROM_COM_EXECDIAG, GDROM_STATUSCOMMAND_REG);
if (!gdrom_wait_busy_sleeps())
return 0;
return __raw_readb(GDROM_ERROR_REG);
}
/*
* Prepare disk command
* byte 0 = 0x70
* byte 1 = 0x1f
*/
static int gdrom_preparedisk_cmd(void)
{
struct packet_command *spin_command;
spin_command = kzalloc(sizeof(struct packet_command), GFP_KERNEL);
if (!spin_command)
return -ENOMEM;
spin_command->cmd[0] = 0x70;
spin_command->cmd[2] = 0x1f;
spin_command->buflen = 0;
gd.pending = 1;
gdrom_packetcommand(gd.cd_info, spin_command);
/* 60 second timeout */
wait_event_interruptible_timeout(command_queue, gd.pending == 0,
GDROM_DEFAULT_TIMEOUT);
gd.pending = 0;
kfree(spin_command);
if (gd.status & 0x01) {
/* log an error */
gdrom_getsense(NULL);
return -EIO;
}
return 0;
}
/*
* Read TOC command
* byte 0 = 0x14
* byte 1 = session
* byte 3 = sizeof TOC >> 8 ie upper byte
* byte 4 = sizeof TOC & 0xff ie lower byte
*/
static int gdrom_readtoc_cmd(struct gdromtoc *toc, int session)
{
int tocsize;
struct packet_command *toc_command;
int err = 0;
toc_command = kzalloc(sizeof(struct packet_command), GFP_KERNEL);
if (!toc_command)
return -ENOMEM;
tocsize = sizeof(struct gdromtoc);
toc_command->cmd[0] = 0x14;
toc_command->cmd[1] = session;
toc_command->cmd[3] = tocsize >> 8;
toc_command->cmd[4] = tocsize & 0xff;
toc_command->buflen = tocsize;
if (gd.pending) {
err = -EBUSY;
goto cleanup_readtoc_final;
}
gd.pending = 1;
gdrom_packetcommand(gd.cd_info, toc_command);
wait_event_interruptible_timeout(command_queue, gd.pending == 0,
GDROM_DEFAULT_TIMEOUT);
if (gd.pending) {
err = -EINVAL;
goto cleanup_readtoc;
}
insw(GDROM_DATA_REG, toc, tocsize/2);
if (gd.status & 0x01)
err = -EINVAL;
cleanup_readtoc:
gd.pending = 0;
cleanup_readtoc_final:
kfree(toc_command);
return err;
}
/* TOC helpers */
static int get_entry_lba(int track)
{
return (cpu_to_be32(track & 0xffffff00) - GD_SESSION_OFFSET);
}
static int get_entry_q_ctrl(int track)
{
return (track & 0x000000f0) >> 4;
}
static int get_entry_track(int track)
{
return (track & 0x0000ff00) >> 8;
}
static int gdrom_get_last_session(struct cdrom_device_info *cd_info,
struct cdrom_multisession *ms_info)
{
int fentry, lentry, track, data, tocuse, err;
if (!gd.toc)
return -ENOMEM;
tocuse = 1;
/* Check if GD-ROM */
err = gdrom_readtoc_cmd(gd.toc, 1);
/* Not a GD-ROM so check if standard CD-ROM */
if (err) {
tocuse = 0;
err = gdrom_readtoc_cmd(gd.toc, 0);
if (err) {
pr_info("Could not get CD table of contents\n");
return -ENXIO;
}
}
fentry = get_entry_track(gd.toc->first);
lentry = get_entry_track(gd.toc->last);
/* Find the first data track */
track = get_entry_track(gd.toc->last);
do {
data = gd.toc->entry[track - 1];
if (get_entry_q_ctrl(data))
break; /* ie a real data track */
track--;
} while (track >= fentry);
if ((track > 100) || (track < get_entry_track(gd.toc->first))) {
pr_info("No data on the last session of the CD\n");
gdrom_getsense(NULL);
return -ENXIO;
}
ms_info->addr_format = CDROM_LBA;
ms_info->addr.lba = get_entry_lba(data);
ms_info->xa_flag = 1;
return 0;
}
static int gdrom_open(struct cdrom_device_info *cd_info, int purpose)
{
/* spin up the disk */
return gdrom_preparedisk_cmd();
}
/* this function is required even if empty */
static void gdrom_release(struct cdrom_device_info *cd_info)
{
}
static int gdrom_drivestatus(struct cdrom_device_info *cd_info, int ignore)
{
/* read the sense key */
char sense = __raw_readb(GDROM_ERROR_REG);
sense &= 0xF0;
if (sense == 0)
return CDS_DISC_OK;
if (sense == 0x20)
return CDS_DRIVE_NOT_READY;
/* default */
return CDS_NO_INFO;
}
static unsigned int gdrom_check_events(struct cdrom_device_info *cd_info,
unsigned int clearing, int ignore)
{
/* check the sense key */
return (__raw_readb(GDROM_ERROR_REG) & 0xF0) == 0x60 ?
DISK_EVENT_MEDIA_CHANGE : 0;
}
/* reset the G1 bus */
static int gdrom_hardreset(struct cdrom_device_info *cd_info)
{
int count;
__raw_writel(0x1fffff, GDROM_RESET_REG);
for (count = 0xa0000000; count < 0xa0200000; count += 4)
__raw_readl(count);
return 0;
}
/* keep the function looking like the universal
* CD Rom specification - returning int */
static int gdrom_packetcommand(struct cdrom_device_info *cd_info,
struct packet_command *command)
{
gdrom_spicommand(&command->cmd, command->buflen);
return 0;
}
/* Get Sense SPI command
* From Marcus Comstedt
* cmd = 0x13
* cmd + 4 = length of returned buffer
* Returns 5 16 bit words
*/
static int gdrom_getsense(short *bufstring)
{
struct packet_command *sense_command;
short sense[5];
int sense_key;
int err = -EIO;
sense_command = kzalloc(sizeof(struct packet_command), GFP_KERNEL);
if (!sense_command)
return -ENOMEM;
sense_command->cmd[0] = 0x13;
sense_command->cmd[4] = 10;
sense_command->buflen = 10;
/* even if something is pending try to get
* the sense key if possible */
if (gd.pending && !gdrom_wait_clrbusy()) {
err = -EBUSY;
goto cleanup_sense_final;
}
gd.pending = 1;
gdrom_packetcommand(gd.cd_info, sense_command);
wait_event_interruptible_timeout(command_queue, gd.pending == 0,
GDROM_DEFAULT_TIMEOUT);
if (gd.pending)
goto cleanup_sense;
insw(GDROM_DATA_REG, &sense, sense_command->buflen/2);
if (sense[1] & 40) {
pr_info("Drive not ready - command aborted\n");
goto cleanup_sense;
}
sense_key = sense[1] & 0x0F;
if (sense_key < ARRAY_SIZE(sense_texts))
pr_info("%s\n", sense_texts[sense_key].text);
else
pr_err("Unknown sense key: %d\n", sense_key);
if (bufstring) /* return addional sense data */
memcpy(bufstring, &sense[4], 2);
if (sense_key < 2)
err = 0;
cleanup_sense:
gd.pending = 0;
cleanup_sense_final:
kfree(sense_command);
return err;
}
static int gdrom_audio_ioctl(struct cdrom_device_info *cdi, unsigned int cmd,
void *arg)
{
return -EINVAL;
}
static struct cdrom_device_ops gdrom_ops = {
.open = gdrom_open,
.release = gdrom_release,
.drive_status = gdrom_drivestatus,
.check_events = gdrom_check_events,
.get_last_session = gdrom_get_last_session,
.reset = gdrom_hardreset,
.audio_ioctl = gdrom_audio_ioctl,
.capability = CDC_MULTI_SESSION | CDC_MEDIA_CHANGED |
CDC_RESET | CDC_DRIVE_STATUS | CDC_CD_R,
.n_minors = 1,
};
static int gdrom_bdops_open(struct block_device *bdev, fmode_t mode)
{
int ret;
mutex_lock(&gdrom_mutex);
ret = cdrom_open(gd.cd_info, bdev, mode);
mutex_unlock(&gdrom_mutex);
return ret;
}
static void gdrom_bdops_release(struct gendisk *disk, fmode_t mode)
{
mutex_lock(&gdrom_mutex);
cdrom_release(gd.cd_info, mode);
mutex_unlock(&gdrom_mutex);
}
static unsigned int gdrom_bdops_check_events(struct gendisk *disk,
unsigned int clearing)
{
return cdrom_check_events(gd.cd_info, clearing);
}
static int gdrom_bdops_ioctl(struct block_device *bdev, fmode_t mode,
unsigned cmd, unsigned long arg)
{
int ret;
mutex_lock(&gdrom_mutex);
ret = cdrom_ioctl(gd.cd_info, bdev, mode, cmd, arg);
mutex_unlock(&gdrom_mutex);
return ret;
}
static const struct block_device_operations gdrom_bdops = {
.owner = THIS_MODULE,
.open = gdrom_bdops_open,
.release = gdrom_bdops_release,
.check_events = gdrom_bdops_check_events,
.ioctl = gdrom_bdops_ioctl,
};
static irqreturn_t gdrom_command_interrupt(int irq, void *dev_id)
{
gd.status = __raw_readb(GDROM_STATUSCOMMAND_REG);
if (gd.pending != 1)
return IRQ_HANDLED;
gd.pending = 0;
wake_up_interruptible(&command_queue);
return IRQ_HANDLED;
}
static irqreturn_t gdrom_dma_interrupt(int irq, void *dev_id)
{
gd.status = __raw_readb(GDROM_STATUSCOMMAND_REG);
if (gd.transfer != 1)
return IRQ_HANDLED;
gd.transfer = 0;
wake_up_interruptible(&request_queue);
return IRQ_HANDLED;
}
static int gdrom_set_interrupt_handlers(void)
{
int err;
err = request_irq(HW_EVENT_GDROM_CMD, gdrom_command_interrupt,
0, "gdrom_command", &gd);
if (err)
return err;
err = request_irq(HW_EVENT_GDROM_DMA, gdrom_dma_interrupt,
0, "gdrom_dma", &gd);
if (err)
free_irq(HW_EVENT_GDROM_CMD, &gd);
return err;
}
/* Implement DMA read using SPI command
* 0 -> 0x30
* 1 -> mode
* 2 -> block >> 16
* 3 -> block >> 8
* 4 -> block
* 8 -> sectors >> 16
* 9 -> sectors >> 8
* 10 -> sectors
*/
static void gdrom_readdisk_dma(struct work_struct *work)
{
int err, block, block_cnt;
struct packet_command *read_command;
struct list_head *elem, *next;
struct request *req;
unsigned long timeout;
if (list_empty(&gdrom_deferred))
return;
read_command = kzalloc(sizeof(struct packet_command), GFP_KERNEL);
if (!read_command)
return; /* get more memory later? */
read_command->cmd[0] = 0x30;
read_command->cmd[1] = 0x20;
spin_lock(&gdrom_lock);
list_for_each_safe(elem, next, &gdrom_deferred) {
req = list_entry(elem, struct request, queuelist);
spin_unlock(&gdrom_lock);
block = blk_rq_pos(req)/GD_TO_BLK + GD_SESSION_OFFSET;
block_cnt = blk_rq_sectors(req)/GD_TO_BLK;
__raw_writel(virt_to_phys(bio_data(req->bio)), GDROM_DMA_STARTADDR_REG);
__raw_writel(block_cnt * GDROM_HARD_SECTOR, GDROM_DMA_LENGTH_REG);
__raw_writel(1, GDROM_DMA_DIRECTION_REG);
__raw_writel(1, GDROM_DMA_ENABLE_REG);
read_command->cmd[2] = (block >> 16) & 0xFF;
read_command->cmd[3] = (block >> 8) & 0xFF;
read_command->cmd[4] = block & 0xFF;
read_command->cmd[8] = (block_cnt >> 16) & 0xFF;
read_command->cmd[9] = (block_cnt >> 8) & 0xFF;
read_command->cmd[10] = block_cnt & 0xFF;
/* set for DMA */
__raw_writeb(1, GDROM_ERROR_REG);
/* other registers */
__raw_writeb(0, GDROM_SECNUM_REG);
__raw_writeb(0, GDROM_BCL_REG);
__raw_writeb(0, GDROM_BCH_REG);
__raw_writeb(0, GDROM_DSEL_REG);
__raw_writeb(0, GDROM_INTSEC_REG);
/* Wait for registers to reset after any previous activity */
timeout = jiffies + HZ / 2;
while (gdrom_is_busy() && time_before(jiffies, timeout))
cpu_relax();
__raw_writeb(GDROM_COM_PACKET, GDROM_STATUSCOMMAND_REG);
timeout = jiffies + HZ / 2;
/* Wait for packet command to finish */
while (gdrom_is_busy() && time_before(jiffies, timeout))
cpu_relax();
gd.pending = 1;
gd.transfer = 1;
outsw(GDROM_DATA_REG, &read_command->cmd, 6);
timeout = jiffies + HZ / 2;
/* Wait for any pending DMA to finish */
while (__raw_readb(GDROM_DMA_STATUS_REG) &&
time_before(jiffies, timeout))
cpu_relax();
/* start transfer */
__raw_writeb(1, GDROM_DMA_STATUS_REG);
wait_event_interruptible_timeout(request_queue,
gd.transfer == 0, GDROM_DEFAULT_TIMEOUT);
err = gd.transfer ? -EIO : 0;
gd.transfer = 0;
gd.pending = 0;
/* now seek to take the request spinlock
* before handling ending the request */
spin_lock(&gdrom_lock);
list_del_init(&req->queuelist);
__blk_end_request_all(req, err);
}
spin_unlock(&gdrom_lock);
kfree(read_command);
}
static void gdrom_request(struct request_queue *rq)
{
struct request *req;
while ((req = blk_fetch_request(rq)) != NULL) {
if (req->cmd_type != REQ_TYPE_FS) {
printk(KERN_DEBUG "gdrom: Non-fs request ignored\n");
__blk_end_request_all(req, -EIO);
continue;
}
if (rq_data_dir(req) != READ) {
pr_notice("Read only device - write request ignored\n");
__blk_end_request_all(req, -EIO);
continue;
}
/*
* Add to list of deferred work and then schedule
* workqueue.
*/
list_add_tail(&req->queuelist, &gdrom_deferred);
schedule_work(&work);
}
}
/* Print string identifying GD ROM device */
static int gdrom_outputversion(void)
{
struct gdrom_id *id;
char *model_name, *manuf_name, *firmw_ver;
int err = -ENOMEM;
/* query device ID */
id = kzalloc(sizeof(struct gdrom_id), GFP_KERNEL);
if (!id)
return err;
gdrom_identifydevice(id);
model_name = kstrndup(id->modname, 16, GFP_KERNEL);
if (!model_name)
goto free_id;
manuf_name = kstrndup(id->mname, 16, GFP_KERNEL);
if (!manuf_name)
goto free_model_name;
firmw_ver = kstrndup(id->firmver, 16, GFP_KERNEL);
if (!firmw_ver)
goto free_manuf_name;
pr_info("%s from %s with firmware %s\n",
model_name, manuf_name, firmw_ver);
err = 0;
kfree(firmw_ver);
free_manuf_name:
kfree(manuf_name);
free_model_name:
kfree(model_name);
free_id:
kfree(id);
return err;
}
/* set the default mode for DMA transfer */
static int gdrom_init_dma_mode(void)
{
__raw_writeb(0x13, GDROM_ERROR_REG);
__raw_writeb(0x22, GDROM_INTSEC_REG);
if (!gdrom_wait_clrbusy())
return -EBUSY;
__raw_writeb(0xEF, GDROM_STATUSCOMMAND_REG);
if (!gdrom_wait_busy_sleeps())
return -EBUSY;
/* Memory protection setting for GDROM DMA
* Bits 31 - 16 security: 0x8843
* Bits 15 and 7 reserved (0)
* Bits 14 - 8 start of transfer range in 1 MB blocks OR'ed with 0x80
* Bits 6 - 0 end of transfer range in 1 MB blocks OR'ed with 0x80
* (0x40 | 0x80) = start range at 0x0C000000
* (0x7F | 0x80) = end range at 0x0FFFFFFF */
__raw_writel(0x8843407F, GDROM_DMA_ACCESS_CTRL_REG);
__raw_writel(9, GDROM_DMA_WAIT_REG); /* DMA word setting */
return 0;
}
static void probe_gdrom_setupcd(void)
{
gd.cd_info->ops = &gdrom_ops;
gd.cd_info->capacity = 1;
strcpy(gd.cd_info->name, GDROM_DEV_NAME);
gd.cd_info->mask = CDC_CLOSE_TRAY|CDC_OPEN_TRAY|CDC_LOCK|
CDC_SELECT_DISC;
}
static void probe_gdrom_setupdisk(void)
{
gd.disk->major = gdrom_major;
gd.disk->first_minor = 1;
gd.disk->minors = 1;
strcpy(gd.disk->disk_name, GDROM_DEV_NAME);
}
static int probe_gdrom_setupqueue(void)
{
blk_queue_logical_block_size(gd.gdrom_rq, GDROM_HARD_SECTOR);
/* using DMA so memory will need to be contiguous */
blk_queue_max_segments(gd.gdrom_rq, 1);
/* set a large max size to get most from DMA */
blk_queue_max_segment_size(gd.gdrom_rq, 0x40000);
gd.disk->queue = gd.gdrom_rq;
return gdrom_init_dma_mode();
}
/*
* register this as a block device and as compliant with the
* universal CD Rom driver interface
*/
static int probe_gdrom(struct platform_device *devptr)
{
int err;
/* Start the device */
if (gdrom_execute_diagnostic() != 1) {
pr_warning("ATA Probe for GDROM failed\n");
return -ENODEV;
}
/* Print out firmware ID */
if (gdrom_outputversion())
return -ENOMEM;
/* Register GDROM */
gdrom_major = register_blkdev(0, GDROM_DEV_NAME);
if (gdrom_major <= 0)
return gdrom_major;
pr_info("Registered with major number %d\n",
gdrom_major);
/* Specify basic properties of drive */
gd.cd_info = kzalloc(sizeof(struct cdrom_device_info), GFP_KERNEL);
if (!gd.cd_info) {
err = -ENOMEM;
goto probe_fail_no_mem;
}
probe_gdrom_setupcd();
gd.disk = alloc_disk(1);
if (!gd.disk) {
err = -ENODEV;
goto probe_fail_no_disk;
}
probe_gdrom_setupdisk();
if (register_cdrom(gd.cd_info)) {
err = -ENODEV;
goto probe_fail_cdrom_register;
}
gd.disk->fops = &gdrom_bdops;
/* latch on to the interrupt */
err = gdrom_set_interrupt_handlers();
if (err)
goto probe_fail_cmdirq_register;
gd.gdrom_rq = blk_init_queue(gdrom_request, &gdrom_lock);
if (!gd.gdrom_rq)
goto probe_fail_requestq;
err = probe_gdrom_setupqueue();
if (err)
goto probe_fail_toc;
gd.toc = kzalloc(sizeof(struct gdromtoc), GFP_KERNEL);
if (!gd.toc)
goto probe_fail_toc;
add_disk(gd.disk);
return 0;
probe_fail_toc:
blk_cleanup_queue(gd.gdrom_rq);
probe_fail_requestq:
free_irq(HW_EVENT_GDROM_DMA, &gd);
free_irq(HW_EVENT_GDROM_CMD, &gd);
probe_fail_cmdirq_register:
probe_fail_cdrom_register:
del_gendisk(gd.disk);
probe_fail_no_disk:
kfree(gd.cd_info);
probe_fail_no_mem:
unregister_blkdev(gdrom_major, GDROM_DEV_NAME);
gdrom_major = 0;
pr_warning("Probe failed - error is 0x%X\n", err);
return err;
}
static int remove_gdrom(struct platform_device *devptr)
{
flush_work(&work);
blk_cleanup_queue(gd.gdrom_rq);
free_irq(HW_EVENT_GDROM_CMD, &gd);
free_irq(HW_EVENT_GDROM_DMA, &gd);
del_gendisk(gd.disk);
if (gdrom_major)
unregister_blkdev(gdrom_major, GDROM_DEV_NAME);
unregister_cdrom(gd.cd_info);
return 0;
}
static struct platform_driver gdrom_driver = {
.probe = probe_gdrom,
.remove = remove_gdrom,
.driver = {
.name = GDROM_DEV_NAME,
},
};
static int __init init_gdrom(void)
{
int rc;
gd.toc = NULL;
rc = platform_driver_register(&gdrom_driver);
if (rc)
return rc;
pd = platform_device_register_simple(GDROM_DEV_NAME, -1, NULL, 0);
if (IS_ERR(pd)) {
platform_driver_unregister(&gdrom_driver);
return PTR_ERR(pd);
}
return 0;
}
static void __exit exit_gdrom(void)
{
platform_device_unregister(pd);
platform_driver_unregister(&gdrom_driver);
kfree(gd.toc);
}
module_init(init_gdrom);
module_exit(exit_gdrom);
MODULE_AUTHOR("Adrian McMenamin <adrian@mcmen.demon.co.uk>");
MODULE_DESCRIPTION("SEGA Dreamcast GD-ROM Driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
sakindia123/stock_j7_kernel | drivers/hwmon/emc2103.c | 2216 | 21145 | /*
* emc2103.c - Support for SMSC EMC2103
* Copyright (c) 2010 SMSC
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/jiffies.h>
#include <linux/i2c.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/err.h>
#include <linux/mutex.h>
/* Addresses scanned */
static const unsigned short normal_i2c[] = { 0x2E, I2C_CLIENT_END };
static const u8 REG_TEMP[4] = { 0x00, 0x02, 0x04, 0x06 };
static const u8 REG_TEMP_MIN[4] = { 0x3c, 0x38, 0x39, 0x3a };
static const u8 REG_TEMP_MAX[4] = { 0x34, 0x30, 0x31, 0x32 };
#define REG_CONF1 0x20
#define REG_TEMP_MAX_ALARM 0x24
#define REG_TEMP_MIN_ALARM 0x25
#define REG_FAN_CONF1 0x42
#define REG_FAN_TARGET_LO 0x4c
#define REG_FAN_TARGET_HI 0x4d
#define REG_FAN_TACH_HI 0x4e
#define REG_FAN_TACH_LO 0x4f
#define REG_PRODUCT_ID 0xfd
#define REG_MFG_ID 0xfe
/* equation 4 from datasheet: rpm = (3932160 * multipler) / count */
#define FAN_RPM_FACTOR 3932160
/*
* 2103-2 and 2103-4's 3rd temperature sensor can be connected to two diodes
* in anti-parallel mode, and in this configuration both can be read
* independently (so we have 4 temperature inputs). The device can't
* detect if it's connected in this mode, so we have to manually enable
* it. Default is to leave the device in the state it's already in (-1).
* This parameter allows APD mode to be optionally forced on or off
*/
static int apd = -1;
module_param(apd, bint, 0);
MODULE_PARM_DESC(init, "Set to zero to disable anti-parallel diode mode");
struct temperature {
s8 degrees;
u8 fraction; /* 0-7 multiples of 0.125 */
};
struct emc2103_data {
struct device *hwmon_dev;
struct mutex update_lock;
bool valid; /* registers are valid */
bool fan_rpm_control;
int temp_count; /* num of temp sensors */
unsigned long last_updated; /* in jiffies */
struct temperature temp[4]; /* internal + 3 external */
s8 temp_min[4]; /* no fractional part */
s8 temp_max[4]; /* no fractional part */
u8 temp_min_alarm;
u8 temp_max_alarm;
u8 fan_multiplier;
u16 fan_tach;
u16 fan_target;
};
static int read_u8_from_i2c(struct i2c_client *client, u8 i2c_reg, u8 *output)
{
int status = i2c_smbus_read_byte_data(client, i2c_reg);
if (status < 0) {
dev_warn(&client->dev, "reg 0x%02x, err %d\n",
i2c_reg, status);
} else {
*output = status;
}
return status;
}
static void read_temp_from_i2c(struct i2c_client *client, u8 i2c_reg,
struct temperature *temp)
{
u8 degrees, fractional;
if (read_u8_from_i2c(client, i2c_reg, °rees) < 0)
return;
if (read_u8_from_i2c(client, i2c_reg + 1, &fractional) < 0)
return;
temp->degrees = degrees;
temp->fraction = (fractional & 0xe0) >> 5;
}
static void read_fan_from_i2c(struct i2c_client *client, u16 *output,
u8 hi_addr, u8 lo_addr)
{
u8 high_byte, lo_byte;
if (read_u8_from_i2c(client, hi_addr, &high_byte) < 0)
return;
if (read_u8_from_i2c(client, lo_addr, &lo_byte) < 0)
return;
*output = ((u16)high_byte << 5) | (lo_byte >> 3);
}
static void write_fan_target_to_i2c(struct i2c_client *client, u16 new_target)
{
u8 high_byte = (new_target & 0x1fe0) >> 5;
u8 low_byte = (new_target & 0x001f) << 3;
i2c_smbus_write_byte_data(client, REG_FAN_TARGET_LO, low_byte);
i2c_smbus_write_byte_data(client, REG_FAN_TARGET_HI, high_byte);
}
static void read_fan_config_from_i2c(struct i2c_client *client)
{
struct emc2103_data *data = i2c_get_clientdata(client);
u8 conf1;
if (read_u8_from_i2c(client, REG_FAN_CONF1, &conf1) < 0)
return;
data->fan_multiplier = 1 << ((conf1 & 0x60) >> 5);
data->fan_rpm_control = (conf1 & 0x80) != 0;
}
static struct emc2103_data *emc2103_update_device(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct emc2103_data *data = i2c_get_clientdata(client);
mutex_lock(&data->update_lock);
if (time_after(jiffies, data->last_updated + HZ + HZ / 2)
|| !data->valid) {
int i;
for (i = 0; i < data->temp_count; i++) {
read_temp_from_i2c(client, REG_TEMP[i], &data->temp[i]);
read_u8_from_i2c(client, REG_TEMP_MIN[i],
&data->temp_min[i]);
read_u8_from_i2c(client, REG_TEMP_MAX[i],
&data->temp_max[i]);
}
read_u8_from_i2c(client, REG_TEMP_MIN_ALARM,
&data->temp_min_alarm);
read_u8_from_i2c(client, REG_TEMP_MAX_ALARM,
&data->temp_max_alarm);
read_fan_from_i2c(client, &data->fan_tach,
REG_FAN_TACH_HI, REG_FAN_TACH_LO);
read_fan_from_i2c(client, &data->fan_target,
REG_FAN_TARGET_HI, REG_FAN_TARGET_LO);
read_fan_config_from_i2c(client);
data->last_updated = jiffies;
data->valid = true;
}
mutex_unlock(&data->update_lock);
return data;
}
static ssize_t
show_temp(struct device *dev, struct device_attribute *da, char *buf)
{
int nr = to_sensor_dev_attr(da)->index;
struct emc2103_data *data = emc2103_update_device(dev);
int millidegrees = data->temp[nr].degrees * 1000
+ data->temp[nr].fraction * 125;
return sprintf(buf, "%d\n", millidegrees);
}
static ssize_t
show_temp_min(struct device *dev, struct device_attribute *da, char *buf)
{
int nr = to_sensor_dev_attr(da)->index;
struct emc2103_data *data = emc2103_update_device(dev);
int millidegrees = data->temp_min[nr] * 1000;
return sprintf(buf, "%d\n", millidegrees);
}
static ssize_t
show_temp_max(struct device *dev, struct device_attribute *da, char *buf)
{
int nr = to_sensor_dev_attr(da)->index;
struct emc2103_data *data = emc2103_update_device(dev);
int millidegrees = data->temp_max[nr] * 1000;
return sprintf(buf, "%d\n", millidegrees);
}
static ssize_t
show_temp_fault(struct device *dev, struct device_attribute *da, char *buf)
{
int nr = to_sensor_dev_attr(da)->index;
struct emc2103_data *data = emc2103_update_device(dev);
bool fault = (data->temp[nr].degrees == -128);
return sprintf(buf, "%d\n", fault ? 1 : 0);
}
static ssize_t
show_temp_min_alarm(struct device *dev, struct device_attribute *da, char *buf)
{
int nr = to_sensor_dev_attr(da)->index;
struct emc2103_data *data = emc2103_update_device(dev);
bool alarm = data->temp_min_alarm & (1 << nr);
return sprintf(buf, "%d\n", alarm ? 1 : 0);
}
static ssize_t
show_temp_max_alarm(struct device *dev, struct device_attribute *da, char *buf)
{
int nr = to_sensor_dev_attr(da)->index;
struct emc2103_data *data = emc2103_update_device(dev);
bool alarm = data->temp_max_alarm & (1 << nr);
return sprintf(buf, "%d\n", alarm ? 1 : 0);
}
static ssize_t set_temp_min(struct device *dev, struct device_attribute *da,
const char *buf, size_t count)
{
int nr = to_sensor_dev_attr(da)->index;
struct i2c_client *client = to_i2c_client(dev);
struct emc2103_data *data = i2c_get_clientdata(client);
long val;
int result = kstrtol(buf, 10, &val);
if (result < 0)
return -EINVAL;
val = DIV_ROUND_CLOSEST(val, 1000);
if ((val < -63) || (val > 127))
return -EINVAL;
mutex_lock(&data->update_lock);
data->temp_min[nr] = val;
i2c_smbus_write_byte_data(client, REG_TEMP_MIN[nr], val);
mutex_unlock(&data->update_lock);
return count;
}
static ssize_t set_temp_max(struct device *dev, struct device_attribute *da,
const char *buf, size_t count)
{
int nr = to_sensor_dev_attr(da)->index;
struct i2c_client *client = to_i2c_client(dev);
struct emc2103_data *data = i2c_get_clientdata(client);
long val;
int result = kstrtol(buf, 10, &val);
if (result < 0)
return -EINVAL;
val = DIV_ROUND_CLOSEST(val, 1000);
if ((val < -63) || (val > 127))
return -EINVAL;
mutex_lock(&data->update_lock);
data->temp_max[nr] = val;
i2c_smbus_write_byte_data(client, REG_TEMP_MAX[nr], val);
mutex_unlock(&data->update_lock);
return count;
}
static ssize_t
show_fan(struct device *dev, struct device_attribute *da, char *buf)
{
struct emc2103_data *data = emc2103_update_device(dev);
int rpm = 0;
if (data->fan_tach != 0)
rpm = (FAN_RPM_FACTOR * data->fan_multiplier) / data->fan_tach;
return sprintf(buf, "%d\n", rpm);
}
static ssize_t
show_fan_div(struct device *dev, struct device_attribute *da, char *buf)
{
struct emc2103_data *data = emc2103_update_device(dev);
int fan_div = 8 / data->fan_multiplier;
return sprintf(buf, "%d\n", fan_div);
}
/*
* Note: we also update the fan target here, because its value is
* determined in part by the fan clock divider. This follows the principle
* of least surprise; the user doesn't expect the fan target to change just
* because the divider changed.
*/
static ssize_t set_fan_div(struct device *dev, struct device_attribute *da,
const char *buf, size_t count)
{
struct emc2103_data *data = emc2103_update_device(dev);
struct i2c_client *client = to_i2c_client(dev);
int new_range_bits, old_div = 8 / data->fan_multiplier;
long new_div;
int status = kstrtol(buf, 10, &new_div);
if (status < 0)
return -EINVAL;
if (new_div == old_div) /* No change */
return count;
switch (new_div) {
case 1:
new_range_bits = 3;
break;
case 2:
new_range_bits = 2;
break;
case 4:
new_range_bits = 1;
break;
case 8:
new_range_bits = 0;
break;
default:
return -EINVAL;
}
mutex_lock(&data->update_lock);
status = i2c_smbus_read_byte_data(client, REG_FAN_CONF1);
if (status < 0) {
dev_dbg(&client->dev, "reg 0x%02x, err %d\n",
REG_FAN_CONF1, status);
mutex_unlock(&data->update_lock);
return -EIO;
}
status &= 0x9F;
status |= (new_range_bits << 5);
i2c_smbus_write_byte_data(client, REG_FAN_CONF1, status);
data->fan_multiplier = 8 / new_div;
/* update fan target if high byte is not disabled */
if ((data->fan_target & 0x1fe0) != 0x1fe0) {
u16 new_target = (data->fan_target * old_div) / new_div;
data->fan_target = min(new_target, (u16)0x1fff);
write_fan_target_to_i2c(client, data->fan_target);
}
/* invalidate data to force re-read from hardware */
data->valid = false;
mutex_unlock(&data->update_lock);
return count;
}
static ssize_t
show_fan_target(struct device *dev, struct device_attribute *da, char *buf)
{
struct emc2103_data *data = emc2103_update_device(dev);
int rpm = 0;
/* high byte of 0xff indicates disabled so return 0 */
if ((data->fan_target != 0) && ((data->fan_target & 0x1fe0) != 0x1fe0))
rpm = (FAN_RPM_FACTOR * data->fan_multiplier)
/ data->fan_target;
return sprintf(buf, "%d\n", rpm);
}
static ssize_t set_fan_target(struct device *dev, struct device_attribute *da,
const char *buf, size_t count)
{
struct emc2103_data *data = emc2103_update_device(dev);
struct i2c_client *client = to_i2c_client(dev);
long rpm_target;
int result = kstrtol(buf, 10, &rpm_target);
if (result < 0)
return -EINVAL;
/* Datasheet states 16384 as maximum RPM target (table 3.2) */
if ((rpm_target < 0) || (rpm_target > 16384))
return -EINVAL;
mutex_lock(&data->update_lock);
if (rpm_target == 0)
data->fan_target = 0x1fff;
else
data->fan_target = clamp_val(
(FAN_RPM_FACTOR * data->fan_multiplier) / rpm_target,
0, 0x1fff);
write_fan_target_to_i2c(client, data->fan_target);
mutex_unlock(&data->update_lock);
return count;
}
static ssize_t
show_fan_fault(struct device *dev, struct device_attribute *da, char *buf)
{
struct emc2103_data *data = emc2103_update_device(dev);
bool fault = ((data->fan_tach & 0x1fe0) == 0x1fe0);
return sprintf(buf, "%d\n", fault ? 1 : 0);
}
static ssize_t
show_pwm_enable(struct device *dev, struct device_attribute *da, char *buf)
{
struct emc2103_data *data = emc2103_update_device(dev);
return sprintf(buf, "%d\n", data->fan_rpm_control ? 3 : 0);
}
static ssize_t set_pwm_enable(struct device *dev, struct device_attribute *da,
const char *buf, size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
struct emc2103_data *data = i2c_get_clientdata(client);
long new_value;
u8 conf_reg;
int result = kstrtol(buf, 10, &new_value);
if (result < 0)
return -EINVAL;
mutex_lock(&data->update_lock);
switch (new_value) {
case 0:
data->fan_rpm_control = false;
break;
case 3:
data->fan_rpm_control = true;
break;
default:
count = -EINVAL;
goto err;
}
result = read_u8_from_i2c(client, REG_FAN_CONF1, &conf_reg);
if (result) {
count = result;
goto err;
}
if (data->fan_rpm_control)
conf_reg |= 0x80;
else
conf_reg &= ~0x80;
i2c_smbus_write_byte_data(client, REG_FAN_CONF1, conf_reg);
err:
mutex_unlock(&data->update_lock);
return count;
}
static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL, 0);
static SENSOR_DEVICE_ATTR(temp1_min, S_IRUGO | S_IWUSR, show_temp_min,
set_temp_min, 0);
static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO | S_IWUSR, show_temp_max,
set_temp_max, 0);
static SENSOR_DEVICE_ATTR(temp1_fault, S_IRUGO, show_temp_fault, NULL, 0);
static SENSOR_DEVICE_ATTR(temp1_min_alarm, S_IRUGO, show_temp_min_alarm,
NULL, 0);
static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_temp_max_alarm,
NULL, 0);
static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_temp, NULL, 1);
static SENSOR_DEVICE_ATTR(temp2_min, S_IRUGO | S_IWUSR, show_temp_min,
set_temp_min, 1);
static SENSOR_DEVICE_ATTR(temp2_max, S_IRUGO | S_IWUSR, show_temp_max,
set_temp_max, 1);
static SENSOR_DEVICE_ATTR(temp2_fault, S_IRUGO, show_temp_fault, NULL, 1);
static SENSOR_DEVICE_ATTR(temp2_min_alarm, S_IRUGO, show_temp_min_alarm,
NULL, 1);
static SENSOR_DEVICE_ATTR(temp2_max_alarm, S_IRUGO, show_temp_max_alarm,
NULL, 1);
static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, show_temp, NULL, 2);
static SENSOR_DEVICE_ATTR(temp3_min, S_IRUGO | S_IWUSR, show_temp_min,
set_temp_min, 2);
static SENSOR_DEVICE_ATTR(temp3_max, S_IRUGO | S_IWUSR, show_temp_max,
set_temp_max, 2);
static SENSOR_DEVICE_ATTR(temp3_fault, S_IRUGO, show_temp_fault, NULL, 2);
static SENSOR_DEVICE_ATTR(temp3_min_alarm, S_IRUGO, show_temp_min_alarm,
NULL, 2);
static SENSOR_DEVICE_ATTR(temp3_max_alarm, S_IRUGO, show_temp_max_alarm,
NULL, 2);
static SENSOR_DEVICE_ATTR(temp4_input, S_IRUGO, show_temp, NULL, 3);
static SENSOR_DEVICE_ATTR(temp4_min, S_IRUGO | S_IWUSR, show_temp_min,
set_temp_min, 3);
static SENSOR_DEVICE_ATTR(temp4_max, S_IRUGO | S_IWUSR, show_temp_max,
set_temp_max, 3);
static SENSOR_DEVICE_ATTR(temp4_fault, S_IRUGO, show_temp_fault, NULL, 3);
static SENSOR_DEVICE_ATTR(temp4_min_alarm, S_IRUGO, show_temp_min_alarm,
NULL, 3);
static SENSOR_DEVICE_ATTR(temp4_max_alarm, S_IRUGO, show_temp_max_alarm,
NULL, 3);
static DEVICE_ATTR(fan1_input, S_IRUGO, show_fan, NULL);
static DEVICE_ATTR(fan1_div, S_IRUGO | S_IWUSR, show_fan_div, set_fan_div);
static DEVICE_ATTR(fan1_target, S_IRUGO | S_IWUSR, show_fan_target,
set_fan_target);
static DEVICE_ATTR(fan1_fault, S_IRUGO, show_fan_fault, NULL);
static DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, show_pwm_enable,
set_pwm_enable);
/* sensors present on all models */
static struct attribute *emc2103_attributes[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr,
&sensor_dev_attr_temp1_min.dev_attr.attr,
&sensor_dev_attr_temp1_max.dev_attr.attr,
&sensor_dev_attr_temp1_fault.dev_attr.attr,
&sensor_dev_attr_temp1_min_alarm.dev_attr.attr,
&sensor_dev_attr_temp1_max_alarm.dev_attr.attr,
&sensor_dev_attr_temp2_input.dev_attr.attr,
&sensor_dev_attr_temp2_min.dev_attr.attr,
&sensor_dev_attr_temp2_max.dev_attr.attr,
&sensor_dev_attr_temp2_fault.dev_attr.attr,
&sensor_dev_attr_temp2_min_alarm.dev_attr.attr,
&sensor_dev_attr_temp2_max_alarm.dev_attr.attr,
&dev_attr_fan1_input.attr,
&dev_attr_fan1_div.attr,
&dev_attr_fan1_target.attr,
&dev_attr_fan1_fault.attr,
&dev_attr_pwm1_enable.attr,
NULL
};
/* extra temperature sensors only present on 2103-2 and 2103-4 */
static struct attribute *emc2103_attributes_temp3[] = {
&sensor_dev_attr_temp3_input.dev_attr.attr,
&sensor_dev_attr_temp3_min.dev_attr.attr,
&sensor_dev_attr_temp3_max.dev_attr.attr,
&sensor_dev_attr_temp3_fault.dev_attr.attr,
&sensor_dev_attr_temp3_min_alarm.dev_attr.attr,
&sensor_dev_attr_temp3_max_alarm.dev_attr.attr,
NULL
};
/* extra temperature sensors only present on 2103-2 and 2103-4 in APD mode */
static struct attribute *emc2103_attributes_temp4[] = {
&sensor_dev_attr_temp4_input.dev_attr.attr,
&sensor_dev_attr_temp4_min.dev_attr.attr,
&sensor_dev_attr_temp4_max.dev_attr.attr,
&sensor_dev_attr_temp4_fault.dev_attr.attr,
&sensor_dev_attr_temp4_min_alarm.dev_attr.attr,
&sensor_dev_attr_temp4_max_alarm.dev_attr.attr,
NULL
};
static const struct attribute_group emc2103_group = {
.attrs = emc2103_attributes,
};
static const struct attribute_group emc2103_temp3_group = {
.attrs = emc2103_attributes_temp3,
};
static const struct attribute_group emc2103_temp4_group = {
.attrs = emc2103_attributes_temp4,
};
static int
emc2103_probe(struct i2c_client *client, const struct i2c_device_id *id)
{
struct emc2103_data *data;
int status;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
return -EIO;
data = devm_kzalloc(&client->dev, sizeof(struct emc2103_data),
GFP_KERNEL);
if (!data)
return -ENOMEM;
i2c_set_clientdata(client, data);
mutex_init(&data->update_lock);
/* 2103-2 and 2103-4 have 3 external diodes, 2103-1 has 1 */
status = i2c_smbus_read_byte_data(client, REG_PRODUCT_ID);
if (status == 0x24) {
/* 2103-1 only has 1 external diode */
data->temp_count = 2;
} else {
/* 2103-2 and 2103-4 have 3 or 4 external diodes */
status = i2c_smbus_read_byte_data(client, REG_CONF1);
if (status < 0) {
dev_dbg(&client->dev, "reg 0x%02x, err %d\n", REG_CONF1,
status);
return status;
}
/* detect current state of hardware */
data->temp_count = (status & 0x01) ? 4 : 3;
/* force APD state if module parameter is set */
if (apd == 0) {
/* force APD mode off */
data->temp_count = 3;
status &= ~(0x01);
i2c_smbus_write_byte_data(client, REG_CONF1, status);
} else if (apd == 1) {
/* force APD mode on */
data->temp_count = 4;
status |= 0x01;
i2c_smbus_write_byte_data(client, REG_CONF1, status);
}
}
/* Register sysfs hooks */
status = sysfs_create_group(&client->dev.kobj, &emc2103_group);
if (status)
return status;
if (data->temp_count >= 3) {
status = sysfs_create_group(&client->dev.kobj,
&emc2103_temp3_group);
if (status)
goto exit_remove;
}
if (data->temp_count == 4) {
status = sysfs_create_group(&client->dev.kobj,
&emc2103_temp4_group);
if (status)
goto exit_remove_temp3;
}
data->hwmon_dev = hwmon_device_register(&client->dev);
if (IS_ERR(data->hwmon_dev)) {
status = PTR_ERR(data->hwmon_dev);
goto exit_remove_temp4;
}
dev_info(&client->dev, "%s: sensor '%s'\n",
dev_name(data->hwmon_dev), client->name);
return 0;
exit_remove_temp4:
if (data->temp_count == 4)
sysfs_remove_group(&client->dev.kobj, &emc2103_temp4_group);
exit_remove_temp3:
if (data->temp_count >= 3)
sysfs_remove_group(&client->dev.kobj, &emc2103_temp3_group);
exit_remove:
sysfs_remove_group(&client->dev.kobj, &emc2103_group);
return status;
}
static int emc2103_remove(struct i2c_client *client)
{
struct emc2103_data *data = i2c_get_clientdata(client);
hwmon_device_unregister(data->hwmon_dev);
if (data->temp_count == 4)
sysfs_remove_group(&client->dev.kobj, &emc2103_temp4_group);
if (data->temp_count >= 3)
sysfs_remove_group(&client->dev.kobj, &emc2103_temp3_group);
sysfs_remove_group(&client->dev.kobj, &emc2103_group);
return 0;
}
static const struct i2c_device_id emc2103_ids[] = {
{ "emc2103", 0, },
{ /* LIST END */ }
};
MODULE_DEVICE_TABLE(i2c, emc2103_ids);
/* Return 0 if detection is successful, -ENODEV otherwise */
static int
emc2103_detect(struct i2c_client *new_client, struct i2c_board_info *info)
{
struct i2c_adapter *adapter = new_client->adapter;
int manufacturer, product;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
return -ENODEV;
manufacturer = i2c_smbus_read_byte_data(new_client, REG_MFG_ID);
if (manufacturer != 0x5D)
return -ENODEV;
product = i2c_smbus_read_byte_data(new_client, REG_PRODUCT_ID);
if ((product != 0x24) && (product != 0x26))
return -ENODEV;
strlcpy(info->type, "emc2103", I2C_NAME_SIZE);
return 0;
}
static struct i2c_driver emc2103_driver = {
.class = I2C_CLASS_HWMON,
.driver = {
.name = "emc2103",
},
.probe = emc2103_probe,
.remove = emc2103_remove,
.id_table = emc2103_ids,
.detect = emc2103_detect,
.address_list = normal_i2c,
};
module_i2c_driver(emc2103_driver);
MODULE_AUTHOR("Steve Glendinning <steve.glendinning@shawell.net>");
MODULE_DESCRIPTION("SMSC EMC2103 hwmon driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
visi0nary/android_kernel_elephone_p8000 | arch/um/drivers/ubd_user.c | 2216 | 1217 | /*
* Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
* Copyright (C) 2001 Ridgerun,Inc (glonnon@ridgerun.com)
* Licensed under the GPL
*/
#include <stddef.h>
#include <unistd.h>
#include <errno.h>
#include <sched.h>
#include <signal.h>
#include <string.h>
#include <netinet/in.h>
#include <sys/time.h>
#include <sys/socket.h>
#include <sys/mman.h>
#include <sys/param.h>
#include <endian.h>
#include <byteswap.h>
#include "ubd.h"
#include <os.h>
void ignore_sigwinch_sig(void)
{
signal(SIGWINCH, SIG_IGN);
}
int start_io_thread(unsigned long sp, int *fd_out)
{
int pid, fds[2], err;
err = os_pipe(fds, 1, 1);
if(err < 0){
printk("start_io_thread - os_pipe failed, err = %d\n", -err);
goto out;
}
kernel_fd = fds[0];
*fd_out = fds[1];
err = os_set_fd_block(*fd_out, 0);
if (err) {
printk("start_io_thread - failed to set nonblocking I/O.\n");
goto out_close;
}
pid = clone(io_thread, (void *) sp, CLONE_FILES | CLONE_VM, NULL);
if(pid < 0){
err = -errno;
printk("start_io_thread - clone failed : errno = %d\n", errno);
goto out_close;
}
return(pid);
out_close:
os_close_file(fds[0]);
os_close_file(fds[1]);
kernel_fd = -1;
*fd_out = -1;
out:
return err;
}
| gpl-2.0 |
Evervolv/android_kernel_htc_msm7x30-3.0 | fs/cifs/cifsencrypt.c | 2472 | 22040 | /*
* fs/cifs/cifsencrypt.c
*
* Copyright (C) International Business Machines Corp., 2005,2006
* Author(s): Steve French (sfrench@us.ibm.com)
*
* This library is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published
* by the Free Software Foundation; either version 2.1 of the License, or
* (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
* the GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this library; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/fs.h>
#include <linux/slab.h>
#include "cifspdu.h"
#include "cifsglob.h"
#include "cifs_debug.h"
#include "cifs_unicode.h"
#include "cifsproto.h"
#include "ntlmssp.h"
#include <linux/ctype.h>
#include <linux/random.h>
/*
* Calculate and return the CIFS signature based on the mac key and SMB PDU.
* The 16 byte signature must be allocated by the caller. Note we only use the
* 1st eight bytes and that the smb header signature field on input contains
* the sequence number before this function is called. Also, this function
* should be called with the server->srv_mutex held.
*/
static int cifs_calculate_signature(const struct smb_hdr *cifs_pdu,
struct TCP_Server_Info *server, char *signature)
{
int rc;
if (cifs_pdu == NULL || signature == NULL || server == NULL)
return -EINVAL;
if (!server->secmech.sdescmd5) {
cERROR(1, "%s: Can't generate signature\n", __func__);
return -1;
}
rc = crypto_shash_init(&server->secmech.sdescmd5->shash);
if (rc) {
cERROR(1, "%s: Oould not init md5\n", __func__);
return rc;
}
crypto_shash_update(&server->secmech.sdescmd5->shash,
server->session_key.response, server->session_key.len);
crypto_shash_update(&server->secmech.sdescmd5->shash,
cifs_pdu->Protocol, be32_to_cpu(cifs_pdu->smb_buf_length));
rc = crypto_shash_final(&server->secmech.sdescmd5->shash, signature);
return 0;
}
/* must be called with server->srv_mutex held */
int cifs_sign_smb(struct smb_hdr *cifs_pdu, struct TCP_Server_Info *server,
__u32 *pexpected_response_sequence_number)
{
int rc = 0;
char smb_signature[20];
if ((cifs_pdu == NULL) || (server == NULL))
return -EINVAL;
if ((cifs_pdu->Flags2 & SMBFLG2_SECURITY_SIGNATURE) == 0)
return rc;
cifs_pdu->Signature.Sequence.SequenceNumber =
cpu_to_le32(server->sequence_number);
cifs_pdu->Signature.Sequence.Reserved = 0;
*pexpected_response_sequence_number = server->sequence_number++;
server->sequence_number++;
rc = cifs_calculate_signature(cifs_pdu, server, smb_signature);
if (rc)
memset(cifs_pdu->Signature.SecuritySignature, 0, 8);
else
memcpy(cifs_pdu->Signature.SecuritySignature, smb_signature, 8);
return rc;
}
static int cifs_calc_signature2(const struct kvec *iov, int n_vec,
struct TCP_Server_Info *server, char *signature)
{
int i;
int rc;
if (iov == NULL || signature == NULL || server == NULL)
return -EINVAL;
if (!server->secmech.sdescmd5) {
cERROR(1, "%s: Can't generate signature\n", __func__);
return -1;
}
rc = crypto_shash_init(&server->secmech.sdescmd5->shash);
if (rc) {
cERROR(1, "%s: Oould not init md5\n", __func__);
return rc;
}
crypto_shash_update(&server->secmech.sdescmd5->shash,
server->session_key.response, server->session_key.len);
for (i = 0; i < n_vec; i++) {
if (iov[i].iov_len == 0)
continue;
if (iov[i].iov_base == NULL) {
cERROR(1, "null iovec entry");
return -EIO;
}
/* The first entry includes a length field (which does not get
signed that occupies the first 4 bytes before the header */
if (i == 0) {
if (iov[0].iov_len <= 8) /* cmd field at offset 9 */
break; /* nothing to sign or corrupt header */
crypto_shash_update(&server->secmech.sdescmd5->shash,
iov[i].iov_base + 4, iov[i].iov_len - 4);
} else
crypto_shash_update(&server->secmech.sdescmd5->shash,
iov[i].iov_base, iov[i].iov_len);
}
rc = crypto_shash_final(&server->secmech.sdescmd5->shash, signature);
return rc;
}
/* must be called with server->srv_mutex held */
int cifs_sign_smb2(struct kvec *iov, int n_vec, struct TCP_Server_Info *server,
__u32 *pexpected_response_sequence_number)
{
int rc = 0;
char smb_signature[20];
struct smb_hdr *cifs_pdu = iov[0].iov_base;
if ((cifs_pdu == NULL) || (server == NULL))
return -EINVAL;
if ((cifs_pdu->Flags2 & SMBFLG2_SECURITY_SIGNATURE) == 0)
return rc;
cifs_pdu->Signature.Sequence.SequenceNumber =
cpu_to_le32(server->sequence_number);
cifs_pdu->Signature.Sequence.Reserved = 0;
*pexpected_response_sequence_number = server->sequence_number++;
server->sequence_number++;
rc = cifs_calc_signature2(iov, n_vec, server, smb_signature);
if (rc)
memset(cifs_pdu->Signature.SecuritySignature, 0, 8);
else
memcpy(cifs_pdu->Signature.SecuritySignature, smb_signature, 8);
return rc;
}
int cifs_verify_signature(struct smb_hdr *cifs_pdu,
struct TCP_Server_Info *server,
__u32 expected_sequence_number)
{
unsigned int rc;
char server_response_sig[8];
char what_we_think_sig_should_be[20];
if (cifs_pdu == NULL || server == NULL)
return -EINVAL;
if (!server->session_estab)
return 0;
if (cifs_pdu->Command == SMB_COM_LOCKING_ANDX) {
struct smb_com_lock_req *pSMB =
(struct smb_com_lock_req *)cifs_pdu;
if (pSMB->LockType & LOCKING_ANDX_OPLOCK_RELEASE)
return 0;
}
/* BB what if signatures are supposed to be on for session but
server does not send one? BB */
/* Do not need to verify session setups with signature "BSRSPYL " */
if (memcmp(cifs_pdu->Signature.SecuritySignature, "BSRSPYL ", 8) == 0)
cFYI(1, "dummy signature received for smb command 0x%x",
cifs_pdu->Command);
/* save off the origiginal signature so we can modify the smb and check
its signature against what the server sent */
memcpy(server_response_sig, cifs_pdu->Signature.SecuritySignature, 8);
cifs_pdu->Signature.Sequence.SequenceNumber =
cpu_to_le32(expected_sequence_number);
cifs_pdu->Signature.Sequence.Reserved = 0;
mutex_lock(&server->srv_mutex);
rc = cifs_calculate_signature(cifs_pdu, server,
what_we_think_sig_should_be);
mutex_unlock(&server->srv_mutex);
if (rc)
return rc;
/* cifs_dump_mem("what we think it should be: ",
what_we_think_sig_should_be, 16); */
if (memcmp(server_response_sig, what_we_think_sig_should_be, 8))
return -EACCES;
else
return 0;
}
/* first calculate 24 bytes ntlm response and then 16 byte session key */
int setup_ntlm_response(struct cifs_ses *ses)
{
int rc = 0;
unsigned int temp_len = CIFS_SESS_KEY_SIZE + CIFS_AUTH_RESP_SIZE;
char temp_key[CIFS_SESS_KEY_SIZE];
if (!ses)
return -EINVAL;
ses->auth_key.response = kmalloc(temp_len, GFP_KERNEL);
if (!ses->auth_key.response) {
cERROR(1, "NTLM can't allocate (%u bytes) memory", temp_len);
return -ENOMEM;
}
ses->auth_key.len = temp_len;
rc = SMBNTencrypt(ses->password, ses->server->cryptkey,
ses->auth_key.response + CIFS_SESS_KEY_SIZE);
if (rc) {
cFYI(1, "%s Can't generate NTLM response, error: %d",
__func__, rc);
return rc;
}
rc = E_md4hash(ses->password, temp_key);
if (rc) {
cFYI(1, "%s Can't generate NT hash, error: %d", __func__, rc);
return rc;
}
rc = mdfour(ses->auth_key.response, temp_key, CIFS_SESS_KEY_SIZE);
if (rc)
cFYI(1, "%s Can't generate NTLM session key, error: %d",
__func__, rc);
return rc;
}
#ifdef CONFIG_CIFS_WEAK_PW_HASH
int calc_lanman_hash(const char *password, const char *cryptkey, bool encrypt,
char *lnm_session_key)
{
int i;
int rc;
char password_with_pad[CIFS_ENCPWD_SIZE];
memset(password_with_pad, 0, CIFS_ENCPWD_SIZE);
if (password)
strncpy(password_with_pad, password, CIFS_ENCPWD_SIZE);
if (!encrypt && global_secflags & CIFSSEC_MAY_PLNTXT) {
memset(lnm_session_key, 0, CIFS_SESS_KEY_SIZE);
memcpy(lnm_session_key, password_with_pad,
CIFS_ENCPWD_SIZE);
return 0;
}
/* calculate old style session key */
/* calling toupper is less broken than repeatedly
calling nls_toupper would be since that will never
work for UTF8, but neither handles multibyte code pages
but the only alternative would be converting to UCS-16 (Unicode)
(using a routine something like UniStrupr) then
uppercasing and then converting back from Unicode - which
would only worth doing it if we knew it were utf8. Basically
utf8 and other multibyte codepages each need their own strupper
function since a byte at a time will ont work. */
for (i = 0; i < CIFS_ENCPWD_SIZE; i++)
password_with_pad[i] = toupper(password_with_pad[i]);
rc = SMBencrypt(password_with_pad, cryptkey, lnm_session_key);
return rc;
}
#endif /* CIFS_WEAK_PW_HASH */
/* Build a proper attribute value/target info pairs blob.
* Fill in netbios and dns domain name and workstation name
* and client time (total five av pairs and + one end of fields indicator.
* Allocate domain name which gets freed when session struct is deallocated.
*/
static int
build_avpair_blob(struct cifs_ses *ses, const struct nls_table *nls_cp)
{
unsigned int dlen;
unsigned int wlen;
unsigned int size = 6 * sizeof(struct ntlmssp2_name);
__le64 curtime;
char *defdmname = "WORKGROUP";
unsigned char *blobptr;
struct ntlmssp2_name *attrptr;
if (!ses->domainName) {
ses->domainName = kstrdup(defdmname, GFP_KERNEL);
if (!ses->domainName)
return -ENOMEM;
}
dlen = strlen(ses->domainName);
wlen = strlen(ses->server->hostname);
/* The length of this blob is a size which is
* six times the size of a structure which holds name/size +
* two times the unicode length of a domain name +
* two times the unicode length of a server name +
* size of a timestamp (which is 8 bytes).
*/
ses->auth_key.len = size + 2 * (2 * dlen) + 2 * (2 * wlen) + 8;
ses->auth_key.response = kzalloc(ses->auth_key.len, GFP_KERNEL);
if (!ses->auth_key.response) {
ses->auth_key.len = 0;
cERROR(1, "Challenge target info allocation failure");
return -ENOMEM;
}
blobptr = ses->auth_key.response;
attrptr = (struct ntlmssp2_name *) blobptr;
attrptr->type = cpu_to_le16(NTLMSSP_AV_NB_DOMAIN_NAME);
attrptr->length = cpu_to_le16(2 * dlen);
blobptr = (unsigned char *)attrptr + sizeof(struct ntlmssp2_name);
cifs_strtoUCS((__le16 *)blobptr, ses->domainName, dlen, nls_cp);
blobptr += 2 * dlen;
attrptr = (struct ntlmssp2_name *) blobptr;
attrptr->type = cpu_to_le16(NTLMSSP_AV_NB_COMPUTER_NAME);
attrptr->length = cpu_to_le16(2 * wlen);
blobptr = (unsigned char *)attrptr + sizeof(struct ntlmssp2_name);
cifs_strtoUCS((__le16 *)blobptr, ses->server->hostname, wlen, nls_cp);
blobptr += 2 * wlen;
attrptr = (struct ntlmssp2_name *) blobptr;
attrptr->type = cpu_to_le16(NTLMSSP_AV_DNS_DOMAIN_NAME);
attrptr->length = cpu_to_le16(2 * dlen);
blobptr = (unsigned char *)attrptr + sizeof(struct ntlmssp2_name);
cifs_strtoUCS((__le16 *)blobptr, ses->domainName, dlen, nls_cp);
blobptr += 2 * dlen;
attrptr = (struct ntlmssp2_name *) blobptr;
attrptr->type = cpu_to_le16(NTLMSSP_AV_DNS_COMPUTER_NAME);
attrptr->length = cpu_to_le16(2 * wlen);
blobptr = (unsigned char *)attrptr + sizeof(struct ntlmssp2_name);
cifs_strtoUCS((__le16 *)blobptr, ses->server->hostname, wlen, nls_cp);
blobptr += 2 * wlen;
attrptr = (struct ntlmssp2_name *) blobptr;
attrptr->type = cpu_to_le16(NTLMSSP_AV_TIMESTAMP);
attrptr->length = cpu_to_le16(sizeof(__le64));
blobptr = (unsigned char *)attrptr + sizeof(struct ntlmssp2_name);
curtime = cpu_to_le64(cifs_UnixTimeToNT(CURRENT_TIME));
memcpy(blobptr, &curtime, sizeof(__le64));
return 0;
}
/* Server has provided av pairs/target info in the type 2 challenge
* packet and we have plucked it and stored within smb session.
* We parse that blob here to find netbios domain name to be used
* as part of ntlmv2 authentication (in Target String), if not already
* specified on the command line.
* If this function returns without any error but without fetching
* domain name, authentication may fail against some server but
* may not fail against other (those who are not very particular
* about target string i.e. for some, just user name might suffice.
*/
static int
find_domain_name(struct cifs_ses *ses, const struct nls_table *nls_cp)
{
unsigned int attrsize;
unsigned int type;
unsigned int onesize = sizeof(struct ntlmssp2_name);
unsigned char *blobptr;
unsigned char *blobend;
struct ntlmssp2_name *attrptr;
if (!ses->auth_key.len || !ses->auth_key.response)
return 0;
blobptr = ses->auth_key.response;
blobend = blobptr + ses->auth_key.len;
while (blobptr + onesize < blobend) {
attrptr = (struct ntlmssp2_name *) blobptr;
type = le16_to_cpu(attrptr->type);
if (type == NTLMSSP_AV_EOL)
break;
blobptr += 2; /* advance attr type */
attrsize = le16_to_cpu(attrptr->length);
blobptr += 2; /* advance attr size */
if (blobptr + attrsize > blobend)
break;
if (type == NTLMSSP_AV_NB_DOMAIN_NAME) {
if (!attrsize)
break;
if (!ses->domainName) {
ses->domainName =
kmalloc(attrsize + 1, GFP_KERNEL);
if (!ses->domainName)
return -ENOMEM;
cifs_from_ucs2(ses->domainName,
(__le16 *)blobptr, attrsize, attrsize,
nls_cp, false);
break;
}
}
blobptr += attrsize; /* advance attr value */
}
return 0;
}
static int calc_ntlmv2_hash(struct cifs_ses *ses, char *ntlmv2_hash,
const struct nls_table *nls_cp)
{
int rc = 0;
int len;
char nt_hash[CIFS_NTHASH_SIZE];
wchar_t *user;
wchar_t *domain;
wchar_t *server;
if (!ses->server->secmech.sdeschmacmd5) {
cERROR(1, "calc_ntlmv2_hash: can't generate ntlmv2 hash\n");
return -1;
}
/* calculate md4 hash of password */
E_md4hash(ses->password, nt_hash);
crypto_shash_setkey(ses->server->secmech.hmacmd5, nt_hash,
CIFS_NTHASH_SIZE);
rc = crypto_shash_init(&ses->server->secmech.sdeschmacmd5->shash);
if (rc) {
cERROR(1, "calc_ntlmv2_hash: could not init hmacmd5\n");
return rc;
}
/* convert ses->user_name to unicode and uppercase */
len = strlen(ses->user_name);
user = kmalloc(2 + (len * 2), GFP_KERNEL);
if (user == NULL) {
cERROR(1, "calc_ntlmv2_hash: user mem alloc failure\n");
rc = -ENOMEM;
goto calc_exit_2;
}
len = cifs_strtoUCS((__le16 *)user, ses->user_name, len, nls_cp);
UniStrupr(user);
crypto_shash_update(&ses->server->secmech.sdeschmacmd5->shash,
(char *)user, 2 * len);
/* convert ses->domainName to unicode and uppercase */
if (ses->domainName) {
len = strlen(ses->domainName);
domain = kmalloc(2 + (len * 2), GFP_KERNEL);
if (domain == NULL) {
cERROR(1, "calc_ntlmv2_hash: domain mem alloc failure");
rc = -ENOMEM;
goto calc_exit_1;
}
len = cifs_strtoUCS((__le16 *)domain, ses->domainName, len,
nls_cp);
crypto_shash_update(&ses->server->secmech.sdeschmacmd5->shash,
(char *)domain, 2 * len);
kfree(domain);
} else if (ses->serverName) {
len = strlen(ses->serverName);
server = kmalloc(2 + (len * 2), GFP_KERNEL);
if (server == NULL) {
cERROR(1, "calc_ntlmv2_hash: server mem alloc failure");
rc = -ENOMEM;
goto calc_exit_1;
}
len = cifs_strtoUCS((__le16 *)server, ses->serverName, len,
nls_cp);
crypto_shash_update(&ses->server->secmech.sdeschmacmd5->shash,
(char *)server, 2 * len);
kfree(server);
}
rc = crypto_shash_final(&ses->server->secmech.sdeschmacmd5->shash,
ntlmv2_hash);
calc_exit_1:
kfree(user);
calc_exit_2:
return rc;
}
static int
CalcNTLMv2_response(const struct cifs_ses *ses, char *ntlmv2_hash)
{
int rc;
unsigned int offset = CIFS_SESS_KEY_SIZE + 8;
if (!ses->server->secmech.sdeschmacmd5) {
cERROR(1, "calc_ntlmv2_hash: can't generate ntlmv2 hash\n");
return -1;
}
crypto_shash_setkey(ses->server->secmech.hmacmd5,
ntlmv2_hash, CIFS_HMAC_MD5_HASH_SIZE);
rc = crypto_shash_init(&ses->server->secmech.sdeschmacmd5->shash);
if (rc) {
cERROR(1, "CalcNTLMv2_response: could not init hmacmd5");
return rc;
}
if (ses->server->secType == RawNTLMSSP)
memcpy(ses->auth_key.response + offset,
ses->ntlmssp->cryptkey, CIFS_SERVER_CHALLENGE_SIZE);
else
memcpy(ses->auth_key.response + offset,
ses->server->cryptkey, CIFS_SERVER_CHALLENGE_SIZE);
crypto_shash_update(&ses->server->secmech.sdeschmacmd5->shash,
ses->auth_key.response + offset, ses->auth_key.len - offset);
rc = crypto_shash_final(&ses->server->secmech.sdeschmacmd5->shash,
ses->auth_key.response + CIFS_SESS_KEY_SIZE);
return rc;
}
int
setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp)
{
int rc;
int baselen;
unsigned int tilen;
struct ntlmv2_resp *buf;
char ntlmv2_hash[16];
unsigned char *tiblob = NULL; /* target info blob */
if (ses->server->secType == RawNTLMSSP) {
if (!ses->domainName) {
rc = find_domain_name(ses, nls_cp);
if (rc) {
cERROR(1, "error %d finding domain name", rc);
goto setup_ntlmv2_rsp_ret;
}
}
} else {
rc = build_avpair_blob(ses, nls_cp);
if (rc) {
cERROR(1, "error %d building av pair blob", rc);
goto setup_ntlmv2_rsp_ret;
}
}
baselen = CIFS_SESS_KEY_SIZE + sizeof(struct ntlmv2_resp);
tilen = ses->auth_key.len;
tiblob = ses->auth_key.response;
ses->auth_key.response = kmalloc(baselen + tilen, GFP_KERNEL);
if (!ses->auth_key.response) {
rc = ENOMEM;
ses->auth_key.len = 0;
cERROR(1, "%s: Can't allocate auth blob", __func__);
goto setup_ntlmv2_rsp_ret;
}
ses->auth_key.len += baselen;
buf = (struct ntlmv2_resp *)
(ses->auth_key.response + CIFS_SESS_KEY_SIZE);
buf->blob_signature = cpu_to_le32(0x00000101);
buf->reserved = 0;
buf->time = cpu_to_le64(cifs_UnixTimeToNT(CURRENT_TIME));
get_random_bytes(&buf->client_chal, sizeof(buf->client_chal));
buf->reserved2 = 0;
memcpy(ses->auth_key.response + baselen, tiblob, tilen);
/* calculate ntlmv2_hash */
rc = calc_ntlmv2_hash(ses, ntlmv2_hash, nls_cp);
if (rc) {
cERROR(1, "could not get v2 hash rc %d", rc);
goto setup_ntlmv2_rsp_ret;
}
/* calculate first part of the client response (CR1) */
rc = CalcNTLMv2_response(ses, ntlmv2_hash);
if (rc) {
cERROR(1, "Could not calculate CR1 rc: %d", rc);
goto setup_ntlmv2_rsp_ret;
}
/* now calculate the session key for NTLMv2 */
crypto_shash_setkey(ses->server->secmech.hmacmd5,
ntlmv2_hash, CIFS_HMAC_MD5_HASH_SIZE);
rc = crypto_shash_init(&ses->server->secmech.sdeschmacmd5->shash);
if (rc) {
cERROR(1, "%s: Could not init hmacmd5\n", __func__);
goto setup_ntlmv2_rsp_ret;
}
crypto_shash_update(&ses->server->secmech.sdeschmacmd5->shash,
ses->auth_key.response + CIFS_SESS_KEY_SIZE,
CIFS_HMAC_MD5_HASH_SIZE);
rc = crypto_shash_final(&ses->server->secmech.sdeschmacmd5->shash,
ses->auth_key.response);
setup_ntlmv2_rsp_ret:
kfree(tiblob);
return rc;
}
int
calc_seckey(struct cifs_ses *ses)
{
int rc;
struct crypto_blkcipher *tfm_arc4;
struct scatterlist sgin, sgout;
struct blkcipher_desc desc;
unsigned char sec_key[CIFS_SESS_KEY_SIZE]; /* a nonce */
get_random_bytes(sec_key, CIFS_SESS_KEY_SIZE);
tfm_arc4 = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(tfm_arc4)) {
rc = PTR_ERR(tfm_arc4);
cERROR(1, "could not allocate crypto API arc4\n");
return rc;
}
desc.tfm = tfm_arc4;
crypto_blkcipher_setkey(tfm_arc4, ses->auth_key.response,
CIFS_SESS_KEY_SIZE);
sg_init_one(&sgin, sec_key, CIFS_SESS_KEY_SIZE);
sg_init_one(&sgout, ses->ntlmssp->ciphertext, CIFS_CPHTXT_SIZE);
rc = crypto_blkcipher_encrypt(&desc, &sgout, &sgin, CIFS_CPHTXT_SIZE);
if (rc) {
cERROR(1, "could not encrypt session key rc: %d\n", rc);
crypto_free_blkcipher(tfm_arc4);
return rc;
}
/* make secondary_key/nonce as session key */
memcpy(ses->auth_key.response, sec_key, CIFS_SESS_KEY_SIZE);
/* and make len as that of session key only */
ses->auth_key.len = CIFS_SESS_KEY_SIZE;
crypto_free_blkcipher(tfm_arc4);
return 0;
}
void
cifs_crypto_shash_release(struct TCP_Server_Info *server)
{
if (server->secmech.md5)
crypto_free_shash(server->secmech.md5);
if (server->secmech.hmacmd5)
crypto_free_shash(server->secmech.hmacmd5);
kfree(server->secmech.sdeschmacmd5);
kfree(server->secmech.sdescmd5);
}
int
cifs_crypto_shash_allocate(struct TCP_Server_Info *server)
{
int rc;
unsigned int size;
server->secmech.hmacmd5 = crypto_alloc_shash("hmac(md5)", 0, 0);
if (IS_ERR(server->secmech.hmacmd5)) {
cERROR(1, "could not allocate crypto hmacmd5\n");
return PTR_ERR(server->secmech.hmacmd5);
}
server->secmech.md5 = crypto_alloc_shash("md5", 0, 0);
if (IS_ERR(server->secmech.md5)) {
cERROR(1, "could not allocate crypto md5\n");
rc = PTR_ERR(server->secmech.md5);
goto crypto_allocate_md5_fail;
}
size = sizeof(struct shash_desc) +
crypto_shash_descsize(server->secmech.hmacmd5);
server->secmech.sdeschmacmd5 = kmalloc(size, GFP_KERNEL);
if (!server->secmech.sdeschmacmd5) {
cERROR(1, "cifs_crypto_shash_allocate: can't alloc hmacmd5\n");
rc = -ENOMEM;
goto crypto_allocate_hmacmd5_sdesc_fail;
}
server->secmech.sdeschmacmd5->shash.tfm = server->secmech.hmacmd5;
server->secmech.sdeschmacmd5->shash.flags = 0x0;
size = sizeof(struct shash_desc) +
crypto_shash_descsize(server->secmech.md5);
server->secmech.sdescmd5 = kmalloc(size, GFP_KERNEL);
if (!server->secmech.sdescmd5) {
cERROR(1, "cifs_crypto_shash_allocate: can't alloc md5\n");
rc = -ENOMEM;
goto crypto_allocate_md5_sdesc_fail;
}
server->secmech.sdescmd5->shash.tfm = server->secmech.md5;
server->secmech.sdescmd5->shash.flags = 0x0;
return 0;
crypto_allocate_md5_sdesc_fail:
kfree(server->secmech.sdeschmacmd5);
crypto_allocate_hmacmd5_sdesc_fail:
crypto_free_shash(server->secmech.md5);
crypto_allocate_md5_fail:
crypto_free_shash(server->secmech.hmacmd5);
return rc;
}
| gpl-2.0 |
balika011/android_kernel_xiaomi_kenzo | drivers/gpu/drm/nouveau/nouveau_volt.c | 2472 | 6245 | /*
* Copyright 2010 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include <drm/drmP.h>
#include "nouveau_drm.h"
#include "nouveau_pm.h"
#include <subdev/bios/gpio.h>
#include <subdev/gpio.h>
static const enum dcb_gpio_func_name vidtag[] = { 0x04, 0x05, 0x06, 0x1a, 0x73 };
static int nr_vidtag = sizeof(vidtag) / sizeof(vidtag[0]);
int
nouveau_voltage_gpio_get(struct drm_device *dev)
{
struct nouveau_pm_voltage *volt = &nouveau_pm(dev)->voltage;
struct nouveau_device *device = nouveau_dev(dev);
struct nouveau_gpio *gpio = nouveau_gpio(device);
u8 vid = 0;
int i;
for (i = 0; i < nr_vidtag; i++) {
if (!(volt->vid_mask & (1 << i)))
continue;
vid |= gpio->get(gpio, 0, vidtag[i], 0xff) << i;
}
return nouveau_volt_lvl_lookup(dev, vid);
}
int
nouveau_voltage_gpio_set(struct drm_device *dev, int voltage)
{
struct nouveau_device *device = nouveau_dev(dev);
struct nouveau_gpio *gpio = nouveau_gpio(device);
struct nouveau_pm_voltage *volt = &nouveau_pm(dev)->voltage;
int vid, i;
vid = nouveau_volt_vid_lookup(dev, voltage);
if (vid < 0)
return vid;
for (i = 0; i < nr_vidtag; i++) {
if (!(volt->vid_mask & (1 << i)))
continue;
gpio->set(gpio, 0, vidtag[i], 0xff, !!(vid & (1 << i)));
}
return 0;
}
int
nouveau_volt_vid_lookup(struct drm_device *dev, int voltage)
{
struct nouveau_pm_voltage *volt = &nouveau_pm(dev)->voltage;
int i;
for (i = 0; i < volt->nr_level; i++) {
if (volt->level[i].voltage == voltage)
return volt->level[i].vid;
}
return -ENOENT;
}
int
nouveau_volt_lvl_lookup(struct drm_device *dev, int vid)
{
struct nouveau_pm_voltage *volt = &nouveau_pm(dev)->voltage;
int i;
for (i = 0; i < volt->nr_level; i++) {
if (volt->level[i].vid == vid)
return volt->level[i].voltage;
}
return -ENOENT;
}
void
nouveau_volt_init(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_gpio *gpio = nouveau_gpio(drm->device);
struct nouveau_pm *pm = nouveau_pm(dev);
struct nouveau_pm_voltage *voltage = &pm->voltage;
struct nvbios *bios = &drm->vbios;
struct dcb_gpio_func func;
struct bit_entry P;
u8 *volt = NULL, *entry;
int i, headerlen, recordlen, entries, vidmask, vidshift;
if (bios->type == NVBIOS_BIT) {
if (bit_table(dev, 'P', &P))
return;
if (P.version == 1)
volt = ROMPTR(dev, P.data[16]);
else
if (P.version == 2)
volt = ROMPTR(dev, P.data[12]);
else {
NV_WARN(drm, "unknown volt for BIT P %d\n", P.version);
}
} else {
if (bios->data[bios->offset + 6] < 0x27) {
NV_DEBUG(drm, "BMP version too old for voltage\n");
return;
}
volt = ROMPTR(dev, bios->data[bios->offset + 0x98]);
}
if (!volt) {
NV_DEBUG(drm, "voltage table pointer invalid\n");
return;
}
switch (volt[0]) {
case 0x10:
case 0x11:
case 0x12:
headerlen = 5;
recordlen = volt[1];
entries = volt[2];
vidshift = 0;
vidmask = volt[4];
break;
case 0x20:
headerlen = volt[1];
recordlen = volt[3];
entries = volt[2];
vidshift = 0; /* could be vidshift like 0x30? */
vidmask = volt[5];
break;
case 0x30:
headerlen = volt[1];
recordlen = volt[2];
entries = volt[3];
vidmask = volt[4];
/* no longer certain what volt[5] is, if it's related to
* the vid shift then it's definitely not a function of
* how many bits are set.
*
* after looking at a number of nva3+ vbios images, they
* all seem likely to have a static shift of 2.. lets
* go with that for now until proven otherwise.
*/
vidshift = 2;
break;
case 0x40:
headerlen = volt[1];
recordlen = volt[2];
entries = volt[3]; /* not a clue what the entries are for.. */
vidmask = volt[11]; /* guess.. */
vidshift = 0;
break;
default:
NV_WARN(drm, "voltage table 0x%02x unknown\n", volt[0]);
return;
}
/* validate vid mask */
voltage->vid_mask = vidmask;
if (!voltage->vid_mask)
return;
i = 0;
while (vidmask) {
if (i > nr_vidtag) {
NV_DEBUG(drm, "vid bit %d unknown\n", i);
return;
}
if (gpio && gpio->find(gpio, 0, vidtag[i], 0xff, &func)) {
NV_DEBUG(drm, "vid bit %d has no gpio tag\n", i);
return;
}
vidmask >>= 1;
i++;
}
/* parse vbios entries into common format */
voltage->version = volt[0];
if (voltage->version < 0x40) {
voltage->nr_level = entries;
voltage->level =
kcalloc(entries, sizeof(*voltage->level), GFP_KERNEL);
if (!voltage->level)
return;
entry = volt + headerlen;
for (i = 0; i < entries; i++, entry += recordlen) {
voltage->level[i].voltage = entry[0] * 10000;
voltage->level[i].vid = entry[1] >> vidshift;
}
} else {
u32 volt_uv = ROM32(volt[4]);
s16 step_uv = ROM16(volt[8]);
u8 vid;
voltage->nr_level = voltage->vid_mask + 1;
voltage->level = kcalloc(voltage->nr_level,
sizeof(*voltage->level), GFP_KERNEL);
if (!voltage->level)
return;
for (vid = 0; vid <= voltage->vid_mask; vid++) {
voltage->level[vid].voltage = volt_uv;
voltage->level[vid].vid = vid;
volt_uv += step_uv;
}
}
voltage->supported = true;
}
void
nouveau_volt_fini(struct drm_device *dev)
{
struct nouveau_pm_voltage *volt = &nouveau_pm(dev)->voltage;
kfree(volt->level);
}
| gpl-2.0 |
glepag1/sultan-kernel-bruce-linaro | net/ipv4/netfilter/nf_nat_proto_icmp.c | 3240 | 2484 | /* (C) 1999-2001 Paul `Rusty' Russell
* (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/types.h>
#include <linux/init.h>
#include <linux/ip.h>
#include <linux/icmp.h>
#include <linux/netfilter.h>
#include <net/netfilter/nf_nat.h>
#include <net/netfilter/nf_nat_core.h>
#include <net/netfilter/nf_nat_rule.h>
#include <net/netfilter/nf_nat_protocol.h>
static bool
icmp_in_range(const struct nf_conntrack_tuple *tuple,
enum nf_nat_manip_type maniptype,
const union nf_conntrack_man_proto *min,
const union nf_conntrack_man_proto *max)
{
return ntohs(tuple->src.u.icmp.id) >= ntohs(min->icmp.id) &&
ntohs(tuple->src.u.icmp.id) <= ntohs(max->icmp.id);
}
static void
icmp_unique_tuple(struct nf_conntrack_tuple *tuple,
const struct nf_nat_range *range,
enum nf_nat_manip_type maniptype,
const struct nf_conn *ct)
{
static u_int16_t id;
unsigned int range_size;
unsigned int i;
range_size = ntohs(range->max.icmp.id) - ntohs(range->min.icmp.id) + 1;
/* If no range specified... */
if (!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED))
range_size = 0xFFFF;
for (i = 0; ; ++id) {
tuple->src.u.icmp.id = htons(ntohs(range->min.icmp.id) +
(id % range_size));
if (++i == range_size || !nf_nat_used_tuple(tuple, ct))
return;
}
return;
}
static bool
icmp_manip_pkt(struct sk_buff *skb,
unsigned int iphdroff,
const struct nf_conntrack_tuple *tuple,
enum nf_nat_manip_type maniptype)
{
const struct iphdr *iph = (struct iphdr *)(skb->data + iphdroff);
struct icmphdr *hdr;
unsigned int hdroff = iphdroff + iph->ihl*4;
if (!skb_make_writable(skb, hdroff + sizeof(*hdr)))
return false;
hdr = (struct icmphdr *)(skb->data + hdroff);
inet_proto_csum_replace2(&hdr->checksum, skb,
hdr->un.echo.id, tuple->src.u.icmp.id, 0);
hdr->un.echo.id = tuple->src.u.icmp.id;
return true;
}
const struct nf_nat_protocol nf_nat_protocol_icmp = {
.protonum = IPPROTO_ICMP,
.me = THIS_MODULE,
.manip_pkt = icmp_manip_pkt,
.in_range = icmp_in_range,
.unique_tuple = icmp_unique_tuple,
#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
.range_to_nlattr = nf_nat_proto_range_to_nlattr,
.nlattr_to_range = nf_nat_proto_nlattr_to_range,
#endif
};
| gpl-2.0 |
omnirom/android_kernel_oppo_find5 | drivers/input/touchscreen/st1232.c | 4776 | 6513 | /*
* ST1232 Touchscreen Controller Driver
*
* Copyright (C) 2010 Renesas Solutions Corp.
* Tony SIM <chinyeow.sim.xt@renesas.com>
*
* Using code from:
* - android.git.kernel.org: projects/kernel/common.git: synaptics_i2c_rmi.c
* Copyright (C) 2007 Google, Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/delay.h>
#include <linux/i2c.h>
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/pm_qos.h>
#include <linux/slab.h>
#include <linux/types.h>
#define ST1232_TS_NAME "st1232-ts"
#define MIN_X 0x00
#define MIN_Y 0x00
#define MAX_X 0x31f /* (800 - 1) */
#define MAX_Y 0x1df /* (480 - 1) */
#define MAX_AREA 0xff
#define MAX_FINGERS 2
struct st1232_ts_finger {
u16 x;
u16 y;
u8 t;
bool is_valid;
};
struct st1232_ts_data {
struct i2c_client *client;
struct input_dev *input_dev;
struct st1232_ts_finger finger[MAX_FINGERS];
struct dev_pm_qos_request low_latency_req;
};
static int st1232_ts_read_data(struct st1232_ts_data *ts)
{
struct st1232_ts_finger *finger = ts->finger;
struct i2c_client *client = ts->client;
struct i2c_msg msg[2];
int error;
u8 start_reg;
u8 buf[10];
/* read touchscreen data from ST1232 */
msg[0].addr = client->addr;
msg[0].flags = 0;
msg[0].len = 1;
msg[0].buf = &start_reg;
start_reg = 0x10;
msg[1].addr = ts->client->addr;
msg[1].flags = I2C_M_RD;
msg[1].len = sizeof(buf);
msg[1].buf = buf;
error = i2c_transfer(client->adapter, msg, 2);
if (error < 0)
return error;
/* get "valid" bits */
finger[0].is_valid = buf[2] >> 7;
finger[1].is_valid = buf[5] >> 7;
/* get xy coordinate */
if (finger[0].is_valid) {
finger[0].x = ((buf[2] & 0x0070) << 4) | buf[3];
finger[0].y = ((buf[2] & 0x0007) << 8) | buf[4];
finger[0].t = buf[8];
}
if (finger[1].is_valid) {
finger[1].x = ((buf[5] & 0x0070) << 4) | buf[6];
finger[1].y = ((buf[5] & 0x0007) << 8) | buf[7];
finger[1].t = buf[9];
}
return 0;
}
static irqreturn_t st1232_ts_irq_handler(int irq, void *dev_id)
{
struct st1232_ts_data *ts = dev_id;
struct st1232_ts_finger *finger = ts->finger;
struct input_dev *input_dev = ts->input_dev;
int count = 0;
int i, ret;
ret = st1232_ts_read_data(ts);
if (ret < 0)
goto end;
/* multi touch protocol */
for (i = 0; i < MAX_FINGERS; i++) {
if (!finger[i].is_valid)
continue;
input_report_abs(input_dev, ABS_MT_TOUCH_MAJOR, finger[i].t);
input_report_abs(input_dev, ABS_MT_POSITION_X, finger[i].x);
input_report_abs(input_dev, ABS_MT_POSITION_Y, finger[i].y);
input_mt_sync(input_dev);
count++;
}
/* SYN_MT_REPORT only if no contact */
if (!count) {
input_mt_sync(input_dev);
if (ts->low_latency_req.dev) {
dev_pm_qos_remove_request(&ts->low_latency_req);
ts->low_latency_req.dev = NULL;
}
} else if (!ts->low_latency_req.dev) {
/* First contact, request 100 us latency. */
dev_pm_qos_add_ancestor_request(&ts->client->dev,
&ts->low_latency_req, 100);
}
/* SYN_REPORT */
input_sync(input_dev);
end:
return IRQ_HANDLED;
}
static int __devinit st1232_ts_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct st1232_ts_data *ts;
struct input_dev *input_dev;
int error;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
dev_err(&client->dev, "need I2C_FUNC_I2C\n");
return -EIO;
}
if (!client->irq) {
dev_err(&client->dev, "no IRQ?\n");
return -EINVAL;
}
ts = kzalloc(sizeof(struct st1232_ts_data), GFP_KERNEL);
input_dev = input_allocate_device();
if (!ts || !input_dev) {
error = -ENOMEM;
goto err_free_mem;
}
ts->client = client;
ts->input_dev = input_dev;
input_dev->name = "st1232-touchscreen";
input_dev->id.bustype = BUS_I2C;
input_dev->dev.parent = &client->dev;
__set_bit(EV_SYN, input_dev->evbit);
__set_bit(EV_KEY, input_dev->evbit);
__set_bit(EV_ABS, input_dev->evbit);
input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR, 0, MAX_AREA, 0, 0);
input_set_abs_params(input_dev, ABS_MT_POSITION_X, MIN_X, MAX_X, 0, 0);
input_set_abs_params(input_dev, ABS_MT_POSITION_Y, MIN_Y, MAX_Y, 0, 0);
error = request_threaded_irq(client->irq, NULL, st1232_ts_irq_handler,
IRQF_ONESHOT, client->name, ts);
if (error) {
dev_err(&client->dev, "Failed to register interrupt\n");
goto err_free_mem;
}
error = input_register_device(ts->input_dev);
if (error) {
dev_err(&client->dev, "Unable to register %s input device\n",
input_dev->name);
goto err_free_irq;
}
i2c_set_clientdata(client, ts);
device_init_wakeup(&client->dev, 1);
return 0;
err_free_irq:
free_irq(client->irq, ts);
err_free_mem:
input_free_device(input_dev);
kfree(ts);
return error;
}
static int __devexit st1232_ts_remove(struct i2c_client *client)
{
struct st1232_ts_data *ts = i2c_get_clientdata(client);
device_init_wakeup(&client->dev, 0);
free_irq(client->irq, ts);
input_unregister_device(ts->input_dev);
kfree(ts);
return 0;
}
#ifdef CONFIG_PM
static int st1232_ts_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
if (device_may_wakeup(&client->dev))
enable_irq_wake(client->irq);
else
disable_irq(client->irq);
return 0;
}
static int st1232_ts_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
if (device_may_wakeup(&client->dev))
disable_irq_wake(client->irq);
else
enable_irq(client->irq);
return 0;
}
static const struct dev_pm_ops st1232_ts_pm_ops = {
.suspend = st1232_ts_suspend,
.resume = st1232_ts_resume,
};
#endif
static const struct i2c_device_id st1232_ts_id[] = {
{ ST1232_TS_NAME, 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, st1232_ts_id);
static struct i2c_driver st1232_ts_driver = {
.probe = st1232_ts_probe,
.remove = __devexit_p(st1232_ts_remove),
.id_table = st1232_ts_id,
.driver = {
.name = ST1232_TS_NAME,
.owner = THIS_MODULE,
#ifdef CONFIG_PM
.pm = &st1232_ts_pm_ops,
#endif
},
};
module_i2c_driver(st1232_ts_driver);
MODULE_AUTHOR("Tony SIM <chinyeow.sim.xt@renesas.com>");
MODULE_DESCRIPTION("SITRONIX ST1232 Touchscreen Controller Driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
ktoonsez/SiyahD | drivers/media/common/tuners/qt1010.c | 4776 | 13845 | /*
* Driver for Quantek QT1010 silicon tuner
*
* Copyright (C) 2006 Antti Palosaari <crope@iki.fi>
* Aapo Tahkola <aet@rasterburn.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include "qt1010.h"
#include "qt1010_priv.h"
static int debug;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "Turn on/off debugging (default:off).");
#define dprintk(args...) \
do { \
if (debug) printk(KERN_DEBUG "QT1010: " args); \
} while (0)
/* read single register */
static int qt1010_readreg(struct qt1010_priv *priv, u8 reg, u8 *val)
{
struct i2c_msg msg[2] = {
{ .addr = priv->cfg->i2c_address,
.flags = 0, .buf = ®, .len = 1 },
{ .addr = priv->cfg->i2c_address,
.flags = I2C_M_RD, .buf = val, .len = 1 },
};
if (i2c_transfer(priv->i2c, msg, 2) != 2) {
printk(KERN_WARNING "qt1010 I2C read failed\n");
return -EREMOTEIO;
}
return 0;
}
/* write single register */
static int qt1010_writereg(struct qt1010_priv *priv, u8 reg, u8 val)
{
u8 buf[2] = { reg, val };
struct i2c_msg msg = { .addr = priv->cfg->i2c_address,
.flags = 0, .buf = buf, .len = 2 };
if (i2c_transfer(priv->i2c, &msg, 1) != 1) {
printk(KERN_WARNING "qt1010 I2C write failed\n");
return -EREMOTEIO;
}
return 0;
}
/* dump all registers */
static void qt1010_dump_regs(struct qt1010_priv *priv)
{
u8 reg, val;
for (reg = 0; ; reg++) {
if (reg % 16 == 0) {
if (reg)
printk(KERN_CONT "\n");
printk(KERN_DEBUG "%02x:", reg);
}
if (qt1010_readreg(priv, reg, &val) == 0)
printk(KERN_CONT " %02x", val);
else
printk(KERN_CONT " --");
if (reg == 0x2f)
break;
}
printk(KERN_CONT "\n");
}
static int qt1010_set_params(struct dvb_frontend *fe,
struct dvb_frontend_parameters *params)
{
struct qt1010_priv *priv;
int err;
u32 freq, div, mod1, mod2;
u8 i, tmpval, reg05;
qt1010_i2c_oper_t rd[48] = {
{ QT1010_WR, 0x01, 0x80 },
{ QT1010_WR, 0x02, 0x3f },
{ QT1010_WR, 0x05, 0xff }, /* 02 c write */
{ QT1010_WR, 0x06, 0x44 },
{ QT1010_WR, 0x07, 0xff }, /* 04 c write */
{ QT1010_WR, 0x08, 0x08 },
{ QT1010_WR, 0x09, 0xff }, /* 06 c write */
{ QT1010_WR, 0x0a, 0xff }, /* 07 c write */
{ QT1010_WR, 0x0b, 0xff }, /* 08 c write */
{ QT1010_WR, 0x0c, 0xe1 },
{ QT1010_WR, 0x1a, 0xff }, /* 10 c write */
{ QT1010_WR, 0x1b, 0x00 },
{ QT1010_WR, 0x1c, 0x89 },
{ QT1010_WR, 0x11, 0xff }, /* 13 c write */
{ QT1010_WR, 0x12, 0xff }, /* 14 c write */
{ QT1010_WR, 0x22, 0xff }, /* 15 c write */
{ QT1010_WR, 0x1e, 0x00 },
{ QT1010_WR, 0x1e, 0xd0 },
{ QT1010_RD, 0x22, 0xff }, /* 16 c read */
{ QT1010_WR, 0x1e, 0x00 },
{ QT1010_RD, 0x05, 0xff }, /* 20 c read */
{ QT1010_RD, 0x22, 0xff }, /* 21 c read */
{ QT1010_WR, 0x23, 0xd0 },
{ QT1010_WR, 0x1e, 0x00 },
{ QT1010_WR, 0x1e, 0xe0 },
{ QT1010_RD, 0x23, 0xff }, /* 25 c read */
{ QT1010_RD, 0x23, 0xff }, /* 26 c read */
{ QT1010_WR, 0x1e, 0x00 },
{ QT1010_WR, 0x24, 0xd0 },
{ QT1010_WR, 0x1e, 0x00 },
{ QT1010_WR, 0x1e, 0xf0 },
{ QT1010_RD, 0x24, 0xff }, /* 31 c read */
{ QT1010_WR, 0x1e, 0x00 },
{ QT1010_WR, 0x14, 0x7f },
{ QT1010_WR, 0x15, 0x7f },
{ QT1010_WR, 0x05, 0xff }, /* 35 c write */
{ QT1010_WR, 0x06, 0x00 },
{ QT1010_WR, 0x15, 0x1f },
{ QT1010_WR, 0x16, 0xff },
{ QT1010_WR, 0x18, 0xff },
{ QT1010_WR, 0x1f, 0xff }, /* 40 c write */
{ QT1010_WR, 0x20, 0xff }, /* 41 c write */
{ QT1010_WR, 0x21, 0x53 },
{ QT1010_WR, 0x25, 0xff }, /* 43 c write */
{ QT1010_WR, 0x26, 0x15 },
{ QT1010_WR, 0x00, 0xff }, /* 45 c write */
{ QT1010_WR, 0x02, 0x00 },
{ QT1010_WR, 0x01, 0x00 }
};
#define FREQ1 32000000 /* 32 MHz */
#define FREQ2 4000000 /* 4 MHz Quartz oscillator in the stick? */
priv = fe->tuner_priv;
freq = params->frequency;
div = (freq + QT1010_OFFSET) / QT1010_STEP;
freq = (div * QT1010_STEP) - QT1010_OFFSET;
mod1 = (freq + QT1010_OFFSET) % FREQ1;
mod2 = (freq + QT1010_OFFSET) % FREQ2;
priv->bandwidth =
(fe->ops.info.type == FE_OFDM) ? params->u.ofdm.bandwidth : 0;
priv->frequency = freq;
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1); /* open i2c_gate */
/* reg 05 base value */
if (freq < 290000000) reg05 = 0x14; /* 290 MHz */
else if (freq < 610000000) reg05 = 0x34; /* 610 MHz */
else if (freq < 802000000) reg05 = 0x54; /* 802 MHz */
else reg05 = 0x74;
/* 0x5 */
rd[2].val = reg05;
/* 07 - set frequency: 32 MHz scale */
rd[4].val = (freq + QT1010_OFFSET) / FREQ1;
/* 09 - changes every 8/24 MHz */
if (mod1 < 8000000) rd[6].val = 0x1d;
else rd[6].val = 0x1c;
/* 0a - set frequency: 4 MHz scale (max 28 MHz) */
if (mod1 < 1*FREQ2) rd[7].val = 0x09; /* +0 MHz */
else if (mod1 < 2*FREQ2) rd[7].val = 0x08; /* +4 MHz */
else if (mod1 < 3*FREQ2) rd[7].val = 0x0f; /* +8 MHz */
else if (mod1 < 4*FREQ2) rd[7].val = 0x0e; /* +12 MHz */
else if (mod1 < 5*FREQ2) rd[7].val = 0x0d; /* +16 MHz */
else if (mod1 < 6*FREQ2) rd[7].val = 0x0c; /* +20 MHz */
else if (mod1 < 7*FREQ2) rd[7].val = 0x0b; /* +24 MHz */
else rd[7].val = 0x0a; /* +28 MHz */
/* 0b - changes every 2/2 MHz */
if (mod2 < 2000000) rd[8].val = 0x45;
else rd[8].val = 0x44;
/* 1a - set frequency: 125 kHz scale (max 3875 kHz)*/
tmpval = 0x78; /* byte, overflows intentionally */
rd[10].val = tmpval-((mod2/QT1010_STEP)*0x08);
/* 11 */
rd[13].val = 0xfd; /* TODO: correct value calculation */
/* 12 */
rd[14].val = 0x91; /* TODO: correct value calculation */
/* 22 */
if (freq < 450000000) rd[15].val = 0xd0; /* 450 MHz */
else if (freq < 482000000) rd[15].val = 0xd1; /* 482 MHz */
else if (freq < 514000000) rd[15].val = 0xd4; /* 514 MHz */
else if (freq < 546000000) rd[15].val = 0xd7; /* 546 MHz */
else if (freq < 610000000) rd[15].val = 0xda; /* 610 MHz */
else rd[15].val = 0xd0;
/* 05 */
rd[35].val = (reg05 & 0xf0);
/* 1f */
if (mod1 < 8000000) tmpval = 0x00;
else if (mod1 < 12000000) tmpval = 0x01;
else if (mod1 < 16000000) tmpval = 0x02;
else if (mod1 < 24000000) tmpval = 0x03;
else if (mod1 < 28000000) tmpval = 0x04;
else tmpval = 0x05;
rd[40].val = (priv->reg1f_init_val + 0x0e + tmpval);
/* 20 */
if (mod1 < 8000000) tmpval = 0x00;
else if (mod1 < 12000000) tmpval = 0x01;
else if (mod1 < 20000000) tmpval = 0x02;
else if (mod1 < 24000000) tmpval = 0x03;
else if (mod1 < 28000000) tmpval = 0x04;
else tmpval = 0x05;
rd[41].val = (priv->reg20_init_val + 0x0d + tmpval);
/* 25 */
rd[43].val = priv->reg25_init_val;
/* 00 */
rd[45].val = 0x92; /* TODO: correct value calculation */
dprintk("freq:%u 05:%02x 07:%02x 09:%02x 0a:%02x 0b:%02x " \
"1a:%02x 11:%02x 12:%02x 22:%02x 05:%02x 1f:%02x " \
"20:%02x 25:%02x 00:%02x", \
freq, rd[2].val, rd[4].val, rd[6].val, rd[7].val, rd[8].val, \
rd[10].val, rd[13].val, rd[14].val, rd[15].val, rd[35].val, \
rd[40].val, rd[41].val, rd[43].val, rd[45].val);
for (i = 0; i < ARRAY_SIZE(rd); i++) {
if (rd[i].oper == QT1010_WR) {
err = qt1010_writereg(priv, rd[i].reg, rd[i].val);
} else { /* read is required to proper locking */
err = qt1010_readreg(priv, rd[i].reg, &tmpval);
}
if (err) return err;
}
if (debug)
qt1010_dump_regs(priv);
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 0); /* close i2c_gate */
return 0;
}
static int qt1010_init_meas1(struct qt1010_priv *priv,
u8 oper, u8 reg, u8 reg_init_val, u8 *retval)
{
u8 i, val1, val2;
int err;
qt1010_i2c_oper_t i2c_data[] = {
{ QT1010_WR, reg, reg_init_val },
{ QT1010_WR, 0x1e, 0x00 },
{ QT1010_WR, 0x1e, oper },
{ QT1010_RD, reg, 0xff }
};
for (i = 0; i < ARRAY_SIZE(i2c_data); i++) {
if (i2c_data[i].oper == QT1010_WR) {
err = qt1010_writereg(priv, i2c_data[i].reg,
i2c_data[i].val);
} else {
err = qt1010_readreg(priv, i2c_data[i].reg, &val2);
}
if (err) return err;
}
do {
val1 = val2;
err = qt1010_readreg(priv, reg, &val2);
if (err) return err;
dprintk("compare reg:%02x %02x %02x", reg, val1, val2);
} while (val1 != val2);
*retval = val1;
return qt1010_writereg(priv, 0x1e, 0x00);
}
static u8 qt1010_init_meas2(struct qt1010_priv *priv,
u8 reg_init_val, u8 *retval)
{
u8 i, val;
int err;
qt1010_i2c_oper_t i2c_data[] = {
{ QT1010_WR, 0x07, reg_init_val },
{ QT1010_WR, 0x22, 0xd0 },
{ QT1010_WR, 0x1e, 0x00 },
{ QT1010_WR, 0x1e, 0xd0 },
{ QT1010_RD, 0x22, 0xff },
{ QT1010_WR, 0x1e, 0x00 },
{ QT1010_WR, 0x22, 0xff }
};
for (i = 0; i < ARRAY_SIZE(i2c_data); i++) {
if (i2c_data[i].oper == QT1010_WR) {
err = qt1010_writereg(priv, i2c_data[i].reg,
i2c_data[i].val);
} else {
err = qt1010_readreg(priv, i2c_data[i].reg, &val);
}
if (err) return err;
}
*retval = val;
return 0;
}
static int qt1010_init(struct dvb_frontend *fe)
{
struct qt1010_priv *priv = fe->tuner_priv;
struct dvb_frontend_parameters params;
int err = 0;
u8 i, tmpval, *valptr = NULL;
qt1010_i2c_oper_t i2c_data[] = {
{ QT1010_WR, 0x01, 0x80 },
{ QT1010_WR, 0x0d, 0x84 },
{ QT1010_WR, 0x0e, 0xb7 },
{ QT1010_WR, 0x2a, 0x23 },
{ QT1010_WR, 0x2c, 0xdc },
{ QT1010_M1, 0x25, 0x40 }, /* get reg 25 init value */
{ QT1010_M1, 0x81, 0xff }, /* get reg 25 init value */
{ QT1010_WR, 0x2b, 0x70 },
{ QT1010_WR, 0x2a, 0x23 },
{ QT1010_M1, 0x26, 0x08 },
{ QT1010_M1, 0x82, 0xff },
{ QT1010_WR, 0x05, 0x14 },
{ QT1010_WR, 0x06, 0x44 },
{ QT1010_WR, 0x07, 0x28 },
{ QT1010_WR, 0x08, 0x0b },
{ QT1010_WR, 0x11, 0xfd },
{ QT1010_M1, 0x22, 0x0d },
{ QT1010_M1, 0xd0, 0xff },
{ QT1010_WR, 0x06, 0x40 },
{ QT1010_WR, 0x16, 0xf0 },
{ QT1010_WR, 0x02, 0x38 },
{ QT1010_WR, 0x03, 0x18 },
{ QT1010_WR, 0x20, 0xe0 },
{ QT1010_M1, 0x1f, 0x20 }, /* get reg 1f init value */
{ QT1010_M1, 0x84, 0xff }, /* get reg 1f init value */
{ QT1010_RD, 0x20, 0x20 }, /* get reg 20 init value */
{ QT1010_WR, 0x03, 0x19 },
{ QT1010_WR, 0x02, 0x3f },
{ QT1010_WR, 0x21, 0x53 },
{ QT1010_RD, 0x21, 0xff },
{ QT1010_WR, 0x11, 0xfd },
{ QT1010_WR, 0x05, 0x34 },
{ QT1010_WR, 0x06, 0x44 },
{ QT1010_WR, 0x08, 0x08 }
};
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1); /* open i2c_gate */
for (i = 0; i < ARRAY_SIZE(i2c_data); i++) {
switch (i2c_data[i].oper) {
case QT1010_WR:
err = qt1010_writereg(priv, i2c_data[i].reg,
i2c_data[i].val);
break;
case QT1010_RD:
if (i2c_data[i].val == 0x20)
valptr = &priv->reg20_init_val;
else
valptr = &tmpval;
err = qt1010_readreg(priv, i2c_data[i].reg, valptr);
break;
case QT1010_M1:
if (i2c_data[i].val == 0x25)
valptr = &priv->reg25_init_val;
else if (i2c_data[i].val == 0x1f)
valptr = &priv->reg1f_init_val;
else
valptr = &tmpval;
err = qt1010_init_meas1(priv, i2c_data[i+1].reg,
i2c_data[i].reg,
i2c_data[i].val, valptr);
i++;
break;
}
if (err) return err;
}
for (i = 0x31; i < 0x3a; i++) /* 0x31 - 0x39 */
if ((err = qt1010_init_meas2(priv, i, &tmpval)))
return err;
params.frequency = 545000000; /* Sigmatek DVB-110 545000000 */
/* MSI Megasky 580 GL861 533000000 */
return qt1010_set_params(fe, ¶ms);
}
static int qt1010_release(struct dvb_frontend *fe)
{
kfree(fe->tuner_priv);
fe->tuner_priv = NULL;
return 0;
}
static int qt1010_get_frequency(struct dvb_frontend *fe, u32 *frequency)
{
struct qt1010_priv *priv = fe->tuner_priv;
*frequency = priv->frequency;
return 0;
}
static int qt1010_get_bandwidth(struct dvb_frontend *fe, u32 *bandwidth)
{
struct qt1010_priv *priv = fe->tuner_priv;
*bandwidth = priv->bandwidth;
return 0;
}
static const struct dvb_tuner_ops qt1010_tuner_ops = {
.info = {
.name = "Quantek QT1010",
.frequency_min = QT1010_MIN_FREQ,
.frequency_max = QT1010_MAX_FREQ,
.frequency_step = QT1010_STEP,
},
.release = qt1010_release,
.init = qt1010_init,
/* TODO: implement sleep */
.set_params = qt1010_set_params,
.get_frequency = qt1010_get_frequency,
.get_bandwidth = qt1010_get_bandwidth
};
struct dvb_frontend * qt1010_attach(struct dvb_frontend *fe,
struct i2c_adapter *i2c,
struct qt1010_config *cfg)
{
struct qt1010_priv *priv = NULL;
u8 id;
priv = kzalloc(sizeof(struct qt1010_priv), GFP_KERNEL);
if (priv == NULL)
return NULL;
priv->cfg = cfg;
priv->i2c = i2c;
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1); /* open i2c_gate */
/* Try to detect tuner chip. Probably this is not correct register. */
if (qt1010_readreg(priv, 0x29, &id) != 0 || (id != 0x39)) {
kfree(priv);
return NULL;
}
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 0); /* close i2c_gate */
printk(KERN_INFO "Quantek QT1010 successfully identified.\n");
memcpy(&fe->ops.tuner_ops, &qt1010_tuner_ops,
sizeof(struct dvb_tuner_ops));
fe->tuner_priv = priv;
return fe;
}
EXPORT_SYMBOL(qt1010_attach);
MODULE_DESCRIPTION("Quantek QT1010 silicon tuner driver");
MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
MODULE_AUTHOR("Aapo Tahkola <aet@rasterburn.org>");
MODULE_VERSION("0.1");
MODULE_LICENSE("GPL");
| gpl-2.0 |
CMyst/android_kernel_htc_msm8960 | arch/arm/mach-imx/mach-mx31lilly.c | 4776 | 8984 | /*
* LILLY-1131 module support
*
* Copyright (c) 2009 Daniel Mack <daniel@caiaq.de>
*
* based on code for other MX31 boards,
*
* Copyright 2005-2007 Freescale Semiconductor
* Copyright (c) 2009 Alberto Panizzo <maramaopercheseimorto@gmail.com>
* Copyright (C) 2009 Valentin Longchamp, EPFL Mobots group
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/types.h>
#include <linux/init.h>
#include <linux/clk.h>
#include <linux/gpio.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/moduleparam.h>
#include <linux/smsc911x.h>
#include <linux/mtd/physmap.h>
#include <linux/spi/spi.h>
#include <linux/mfd/mc13783.h>
#include <linux/usb/otg.h>
#include <linux/usb/ulpi.h>
#include <linux/regulator/machine.h>
#include <linux/regulator/fixed.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/mach/time.h>
#include <asm/mach/map.h>
#include <mach/hardware.h>
#include <mach/common.h>
#include <mach/iomux-mx3.h>
#include <mach/board-mx31lilly.h>
#include <mach/ulpi.h>
#include "devices-imx31.h"
/*
* This file contains module-specific initialization routines for LILLY-1131.
* Initialization of peripherals found on the baseboard is implemented in the
* appropriate baseboard support code.
*/
/* SMSC ethernet support */
static struct resource smsc91x_resources[] = {
{
.start = MX31_CS4_BASE_ADDR,
.end = MX31_CS4_BASE_ADDR + 0xffff,
.flags = IORESOURCE_MEM,
},
{
.start = IOMUX_TO_IRQ(MX31_PIN_GPIO1_0),
.end = IOMUX_TO_IRQ(MX31_PIN_GPIO1_0),
.flags = IORESOURCE_IRQ | IRQF_TRIGGER_FALLING,
}
};
static struct smsc911x_platform_config smsc911x_config = {
.phy_interface = PHY_INTERFACE_MODE_MII,
.irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_LOW,
.irq_type = SMSC911X_IRQ_TYPE_OPEN_DRAIN,
.flags = SMSC911X_USE_32BIT |
SMSC911X_SAVE_MAC_ADDRESS |
SMSC911X_FORCE_INTERNAL_PHY,
};
static struct platform_device smsc91x_device = {
.name = "smsc911x",
.id = -1,
.num_resources = ARRAY_SIZE(smsc91x_resources),
.resource = smsc91x_resources,
.dev = {
.platform_data = &smsc911x_config,
}
};
/* NOR flash */
static struct physmap_flash_data nor_flash_data = {
.width = 2,
};
static struct resource nor_flash_resource = {
.start = 0xa0000000,
.end = 0xa1ffffff,
.flags = IORESOURCE_MEM,
};
static struct platform_device physmap_flash_device = {
.name = "physmap-flash",
.id = 0,
.dev = {
.platform_data = &nor_flash_data,
},
.resource = &nor_flash_resource,
.num_resources = 1,
};
/* USB */
#define USB_PAD_CFG (PAD_CTL_DRV_MAX | PAD_CTL_SRE_FAST | PAD_CTL_HYS_CMOS | \
PAD_CTL_ODE_CMOS | PAD_CTL_100K_PU)
static int usbh1_init(struct platform_device *pdev)
{
int pins[] = {
MX31_PIN_CSPI1_MOSI__USBH1_RXDM,
MX31_PIN_CSPI1_MISO__USBH1_RXDP,
MX31_PIN_CSPI1_SS0__USBH1_TXDM,
MX31_PIN_CSPI1_SS1__USBH1_TXDP,
MX31_PIN_CSPI1_SS2__USBH1_RCV,
MX31_PIN_CSPI1_SCLK__USBH1_OEB,
MX31_PIN_CSPI1_SPI_RDY__USBH1_FS,
};
mxc_iomux_setup_multiple_pins(pins, ARRAY_SIZE(pins), "USB H1");
mxc_iomux_set_pad(MX31_PIN_CSPI1_MOSI, USB_PAD_CFG);
mxc_iomux_set_pad(MX31_PIN_CSPI1_MISO, USB_PAD_CFG);
mxc_iomux_set_pad(MX31_PIN_CSPI1_SS0, USB_PAD_CFG);
mxc_iomux_set_pad(MX31_PIN_CSPI1_SS1, USB_PAD_CFG);
mxc_iomux_set_pad(MX31_PIN_CSPI1_SS2, USB_PAD_CFG);
mxc_iomux_set_pad(MX31_PIN_CSPI1_SCLK, USB_PAD_CFG);
mxc_iomux_set_pad(MX31_PIN_CSPI1_SPI_RDY, USB_PAD_CFG);
mxc_iomux_set_gpr(MUX_PGP_USB_SUSPEND, true);
mdelay(10);
return mx31_initialize_usb_hw(pdev->id, MXC_EHCI_POWER_PINS_ENABLED |
MXC_EHCI_INTERFACE_SINGLE_UNI);
}
static int usbh2_init(struct platform_device *pdev)
{
int pins[] = {
MX31_PIN_USBH2_DATA0__USBH2_DATA0,
MX31_PIN_USBH2_DATA1__USBH2_DATA1,
MX31_PIN_USBH2_CLK__USBH2_CLK,
MX31_PIN_USBH2_DIR__USBH2_DIR,
MX31_PIN_USBH2_NXT__USBH2_NXT,
MX31_PIN_USBH2_STP__USBH2_STP,
};
mxc_iomux_setup_multiple_pins(pins, ARRAY_SIZE(pins), "USB H2");
mxc_iomux_set_pad(MX31_PIN_USBH2_CLK, USB_PAD_CFG);
mxc_iomux_set_pad(MX31_PIN_USBH2_DIR, USB_PAD_CFG);
mxc_iomux_set_pad(MX31_PIN_USBH2_NXT, USB_PAD_CFG);
mxc_iomux_set_pad(MX31_PIN_USBH2_STP, USB_PAD_CFG);
mxc_iomux_set_pad(MX31_PIN_USBH2_DATA0, USB_PAD_CFG);
mxc_iomux_set_pad(MX31_PIN_USBH2_DATA1, USB_PAD_CFG);
mxc_iomux_set_pad(MX31_PIN_SRXD6, USB_PAD_CFG);
mxc_iomux_set_pad(MX31_PIN_STXD6, USB_PAD_CFG);
mxc_iomux_set_pad(MX31_PIN_SFS3, USB_PAD_CFG);
mxc_iomux_set_pad(MX31_PIN_SCK3, USB_PAD_CFG);
mxc_iomux_set_pad(MX31_PIN_SRXD3, USB_PAD_CFG);
mxc_iomux_set_pad(MX31_PIN_STXD3, USB_PAD_CFG);
mxc_iomux_set_gpr(MUX_PGP_UH2, true);
/* chip select */
mxc_iomux_alloc_pin(IOMUX_MODE(MX31_PIN_DTR_DCE1, IOMUX_CONFIG_GPIO),
"USBH2_CS");
gpio_request(IOMUX_TO_GPIO(MX31_PIN_DTR_DCE1), "USBH2 CS");
gpio_direction_output(IOMUX_TO_GPIO(MX31_PIN_DTR_DCE1), 0);
mdelay(10);
return mx31_initialize_usb_hw(pdev->id, MXC_EHCI_POWER_PINS_ENABLED);
}
static const struct mxc_usbh_platform_data usbh1_pdata __initconst = {
.init = usbh1_init,
.portsc = MXC_EHCI_MODE_UTMI | MXC_EHCI_SERIAL,
};
static struct mxc_usbh_platform_data usbh2_pdata __initdata = {
.init = usbh2_init,
.portsc = MXC_EHCI_MODE_ULPI | MXC_EHCI_UTMI_8BIT,
};
static void __init lilly1131_usb_init(void)
{
imx31_add_mxc_ehci_hs(1, &usbh1_pdata);
usbh2_pdata.otg = imx_otg_ulpi_create(ULPI_OTG_DRVVBUS |
ULPI_OTG_DRVVBUS_EXT);
if (usbh2_pdata.otg)
imx31_add_mxc_ehci_hs(2, &usbh2_pdata);
}
/* SPI */
static int spi_internal_chipselect[] = {
MXC_SPI_CS(0),
MXC_SPI_CS(1),
MXC_SPI_CS(2),
};
static const struct spi_imx_master spi0_pdata __initconst = {
.chipselect = spi_internal_chipselect,
.num_chipselect = ARRAY_SIZE(spi_internal_chipselect),
};
static const struct spi_imx_master spi1_pdata __initconst = {
.chipselect = spi_internal_chipselect,
.num_chipselect = ARRAY_SIZE(spi_internal_chipselect),
};
static struct mc13xxx_platform_data mc13783_pdata __initdata = {
.flags = MC13XXX_USE_RTC | MC13XXX_USE_TOUCHSCREEN,
};
static struct spi_board_info mc13783_dev __initdata = {
.modalias = "mc13783",
.max_speed_hz = 1000000,
.bus_num = 1,
.chip_select = 0,
.platform_data = &mc13783_pdata,
.irq = IOMUX_TO_IRQ(MX31_PIN_GPIO1_3),
};
static struct platform_device *devices[] __initdata = {
&smsc91x_device,
&physmap_flash_device,
};
static int mx31lilly_baseboard;
core_param(mx31lilly_baseboard, mx31lilly_baseboard, int, 0444);
static struct regulator_consumer_supply dummy_supplies[] = {
REGULATOR_SUPPLY("vdd33a", "smsc911x"),
REGULATOR_SUPPLY("vddvario", "smsc911x"),
};
static void __init mx31lilly_board_init(void)
{
imx31_soc_init();
switch (mx31lilly_baseboard) {
case MX31LILLY_NOBOARD:
break;
case MX31LILLY_DB:
mx31lilly_db_init();
break;
default:
printk(KERN_ERR "Illegal mx31lilly_baseboard type %d\n",
mx31lilly_baseboard);
}
mxc_iomux_alloc_pin(MX31_PIN_CS4__CS4, "Ethernet CS");
/* SPI */
mxc_iomux_alloc_pin(MX31_PIN_CSPI1_SCLK__SCLK, "SPI1_CLK");
mxc_iomux_alloc_pin(MX31_PIN_CSPI1_MOSI__MOSI, "SPI1_TX");
mxc_iomux_alloc_pin(MX31_PIN_CSPI1_MISO__MISO, "SPI1_RX");
mxc_iomux_alloc_pin(MX31_PIN_CSPI1_SPI_RDY__SPI_RDY, "SPI1_RDY");
mxc_iomux_alloc_pin(MX31_PIN_CSPI1_SS0__SS0, "SPI1_SS0");
mxc_iomux_alloc_pin(MX31_PIN_CSPI1_SS1__SS1, "SPI1_SS1");
mxc_iomux_alloc_pin(MX31_PIN_CSPI1_SS2__SS2, "SPI1_SS2");
mxc_iomux_alloc_pin(MX31_PIN_CSPI2_SCLK__SCLK, "SPI2_CLK");
mxc_iomux_alloc_pin(MX31_PIN_CSPI2_MOSI__MOSI, "SPI2_TX");
mxc_iomux_alloc_pin(MX31_PIN_CSPI2_MISO__MISO, "SPI2_RX");
mxc_iomux_alloc_pin(MX31_PIN_CSPI2_SPI_RDY__SPI_RDY, "SPI2_RDY");
mxc_iomux_alloc_pin(MX31_PIN_CSPI2_SS0__SS0, "SPI2_SS0");
mxc_iomux_alloc_pin(MX31_PIN_CSPI2_SS1__SS1, "SPI2_SS1");
mxc_iomux_alloc_pin(MX31_PIN_CSPI2_SS2__SS2, "SPI2_SS2");
imx31_add_spi_imx0(&spi0_pdata);
imx31_add_spi_imx1(&spi1_pdata);
spi_register_board_info(&mc13783_dev, 1);
regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies));
platform_add_devices(devices, ARRAY_SIZE(devices));
/* USB */
lilly1131_usb_init();
}
static void __init mx31lilly_timer_init(void)
{
mx31_clocks_init(26000000);
}
static struct sys_timer mx31lilly_timer = {
.init = mx31lilly_timer_init,
};
MACHINE_START(LILLY1131, "INCO startec LILLY-1131")
.atag_offset = 0x100,
.map_io = mx31_map_io,
.init_early = imx31_init_early,
.init_irq = mx31_init_irq,
.handle_irq = imx31_handle_irq,
.timer = &mx31lilly_timer,
.init_machine = mx31lilly_board_init,
.restart = mxc_restart,
MACHINE_END
| gpl-2.0 |
StelixROM/android_kernel_htc_msm8960 | sound/soc/davinci/davinci-vcif.c | 5032 | 7328 | /*
* ALSA SoC Voice Codec Interface for TI DAVINCI processor
*
* Copyright (C) 2010 Texas Instruments.
*
* Author: Miguel Aguilar <miguel.aguilar@ridgerun.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/mfd/davinci_voicecodec.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/initval.h>
#include <sound/soc.h>
#include "davinci-pcm.h"
#include "davinci-i2s.h"
#define MOD_REG_BIT(val, mask, set) do { \
if (set) { \
val |= mask; \
} else { \
val &= ~mask; \
} \
} while (0)
struct davinci_vcif_dev {
struct davinci_vc *davinci_vc;
struct davinci_pcm_dma_params dma_params[2];
};
static void davinci_vcif_start(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct davinci_vcif_dev *davinci_vcif_dev =
snd_soc_dai_get_drvdata(rtd->cpu_dai);
struct davinci_vc *davinci_vc = davinci_vcif_dev->davinci_vc;
u32 w;
/* Start the sample generator and enable transmitter/receiver */
w = readl(davinci_vc->base + DAVINCI_VC_CTRL);
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
MOD_REG_BIT(w, DAVINCI_VC_CTRL_RSTDAC, 0);
else
MOD_REG_BIT(w, DAVINCI_VC_CTRL_RSTADC, 0);
writel(w, davinci_vc->base + DAVINCI_VC_CTRL);
}
static void davinci_vcif_stop(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct davinci_vcif_dev *davinci_vcif_dev =
snd_soc_dai_get_drvdata(rtd->cpu_dai);
struct davinci_vc *davinci_vc = davinci_vcif_dev->davinci_vc;
u32 w;
/* Reset transmitter/receiver and sample rate/frame sync generators */
w = readl(davinci_vc->base + DAVINCI_VC_CTRL);
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
MOD_REG_BIT(w, DAVINCI_VC_CTRL_RSTDAC, 1);
else
MOD_REG_BIT(w, DAVINCI_VC_CTRL_RSTADC, 1);
writel(w, davinci_vc->base + DAVINCI_VC_CTRL);
}
static int davinci_vcif_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
{
struct davinci_vcif_dev *davinci_vcif_dev = snd_soc_dai_get_drvdata(dai);
struct davinci_vc *davinci_vc = davinci_vcif_dev->davinci_vc;
struct davinci_pcm_dma_params *dma_params =
&davinci_vcif_dev->dma_params[substream->stream];
u32 w;
/* Restart the codec before setup */
davinci_vcif_stop(substream);
davinci_vcif_start(substream);
/* General line settings */
writel(DAVINCI_VC_CTRL_MASK, davinci_vc->base + DAVINCI_VC_CTRL);
writel(DAVINCI_VC_INT_MASK, davinci_vc->base + DAVINCI_VC_INTCLR);
writel(DAVINCI_VC_INT_MASK, davinci_vc->base + DAVINCI_VC_INTEN);
w = readl(davinci_vc->base + DAVINCI_VC_CTRL);
/* Determine xfer data type */
switch (params_format(params)) {
case SNDRV_PCM_FORMAT_U8:
dma_params->data_type = 0;
MOD_REG_BIT(w, DAVINCI_VC_CTRL_RD_BITS_8 |
DAVINCI_VC_CTRL_RD_UNSIGNED |
DAVINCI_VC_CTRL_WD_BITS_8 |
DAVINCI_VC_CTRL_WD_UNSIGNED, 1);
break;
case SNDRV_PCM_FORMAT_S8:
dma_params->data_type = 1;
MOD_REG_BIT(w, DAVINCI_VC_CTRL_RD_BITS_8 |
DAVINCI_VC_CTRL_WD_BITS_8, 1);
MOD_REG_BIT(w, DAVINCI_VC_CTRL_RD_UNSIGNED |
DAVINCI_VC_CTRL_WD_UNSIGNED, 0);
break;
case SNDRV_PCM_FORMAT_S16_LE:
dma_params->data_type = 2;
MOD_REG_BIT(w, DAVINCI_VC_CTRL_RD_BITS_8 |
DAVINCI_VC_CTRL_RD_UNSIGNED |
DAVINCI_VC_CTRL_WD_BITS_8 |
DAVINCI_VC_CTRL_WD_UNSIGNED, 0);
break;
default:
printk(KERN_WARNING "davinci-vcif: unsupported PCM format");
return -EINVAL;
}
dma_params->acnt = dma_params->data_type;
writel(w, davinci_vc->base + DAVINCI_VC_CTRL);
return 0;
}
static int davinci_vcif_trigger(struct snd_pcm_substream *substream, int cmd,
struct snd_soc_dai *dai)
{
int ret = 0;
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_RESUME:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
davinci_vcif_start(substream);
break;
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_SUSPEND:
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
davinci_vcif_stop(substream);
break;
default:
ret = -EINVAL;
}
return ret;
}
static int davinci_vcif_startup(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct davinci_vcif_dev *dev = snd_soc_dai_get_drvdata(dai);
snd_soc_dai_set_dma_data(dai, substream, dev->dma_params);
return 0;
}
#define DAVINCI_VCIF_RATES SNDRV_PCM_RATE_8000_48000
static const struct snd_soc_dai_ops davinci_vcif_dai_ops = {
.startup = davinci_vcif_startup,
.trigger = davinci_vcif_trigger,
.hw_params = davinci_vcif_hw_params,
};
static struct snd_soc_dai_driver davinci_vcif_dai = {
.playback = {
.channels_min = 1,
.channels_max = 2,
.rates = DAVINCI_VCIF_RATES,
.formats = SNDRV_PCM_FMTBIT_S16_LE,},
.capture = {
.channels_min = 1,
.channels_max = 2,
.rates = DAVINCI_VCIF_RATES,
.formats = SNDRV_PCM_FMTBIT_S16_LE,},
.ops = &davinci_vcif_dai_ops,
};
static int davinci_vcif_probe(struct platform_device *pdev)
{
struct davinci_vc *davinci_vc = pdev->dev.platform_data;
struct davinci_vcif_dev *davinci_vcif_dev;
int ret;
davinci_vcif_dev = devm_kzalloc(&pdev->dev,
sizeof(struct davinci_vcif_dev),
GFP_KERNEL);
if (!davinci_vcif_dev) {
dev_dbg(&pdev->dev,
"could not allocate memory for private data\n");
return -ENOMEM;
}
/* DMA tx params */
davinci_vcif_dev->davinci_vc = davinci_vc;
davinci_vcif_dev->dma_params[SNDRV_PCM_STREAM_PLAYBACK].channel =
davinci_vc->davinci_vcif.dma_tx_channel;
davinci_vcif_dev->dma_params[SNDRV_PCM_STREAM_PLAYBACK].dma_addr =
davinci_vc->davinci_vcif.dma_tx_addr;
/* DMA rx params */
davinci_vcif_dev->dma_params[SNDRV_PCM_STREAM_CAPTURE].channel =
davinci_vc->davinci_vcif.dma_rx_channel;
davinci_vcif_dev->dma_params[SNDRV_PCM_STREAM_CAPTURE].dma_addr =
davinci_vc->davinci_vcif.dma_rx_addr;
dev_set_drvdata(&pdev->dev, davinci_vcif_dev);
ret = snd_soc_register_dai(&pdev->dev, &davinci_vcif_dai);
if (ret != 0) {
dev_err(&pdev->dev, "could not register dai\n");
return ret;
}
return 0;
}
static int davinci_vcif_remove(struct platform_device *pdev)
{
snd_soc_unregister_dai(&pdev->dev);
return 0;
}
static struct platform_driver davinci_vcif_driver = {
.probe = davinci_vcif_probe,
.remove = davinci_vcif_remove,
.driver = {
.name = "davinci-vcif",
.owner = THIS_MODULE,
},
};
module_platform_driver(davinci_vcif_driver);
MODULE_AUTHOR("Miguel Aguilar");
MODULE_DESCRIPTION("Texas Instruments DaVinci ASoC Voice Codec Interface");
MODULE_LICENSE("GPL");
| gpl-2.0 |
shankarathi07/linux_lg_lollipop | sound/soc/blackfin/bfin-eval-adau1701.c | 5032 | 3315 | /*
* Machine driver for EVAL-ADAU1701MINIZ on Analog Devices bfin
* evaluation boards.
*
* Copyright 2011 Analog Devices Inc.
* Author: Lars-Peter Clausen <lars@metafoo.de>
*
* Licensed under the GPL-2 or later.
*/
#include <linux/module.h>
#include <linux/device.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/soc.h>
#include <sound/pcm_params.h>
#include "../codecs/adau1701.h"
static const struct snd_soc_dapm_widget bfin_eval_adau1701_dapm_widgets[] = {
SND_SOC_DAPM_SPK("Speaker", NULL),
SND_SOC_DAPM_LINE("Line Out", NULL),
SND_SOC_DAPM_LINE("Line In", NULL),
};
static const struct snd_soc_dapm_route bfin_eval_adau1701_dapm_routes[] = {
{ "Speaker", NULL, "OUT0" },
{ "Speaker", NULL, "OUT1" },
{ "Line Out", NULL, "OUT2" },
{ "Line Out", NULL, "OUT3" },
{ "IN0", NULL, "Line In" },
{ "IN1", NULL, "Line In" },
};
static int bfin_eval_adau1701_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_dai *codec_dai = rtd->codec_dai;
int ret;
ret = snd_soc_dai_set_sysclk(codec_dai, ADAU1701_CLK_SRC_OSC, 12288000,
SND_SOC_CLOCK_IN);
return ret;
}
static struct snd_soc_ops bfin_eval_adau1701_ops = {
.hw_params = bfin_eval_adau1701_hw_params,
};
#define BFIN_EVAL_ADAU1701_DAI_FMT (SND_SOC_DAIFMT_I2S | \
SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBM_CFM)
static struct snd_soc_dai_link bfin_eval_adau1701_dai[] = {
{
.name = "adau1701",
.stream_name = "adau1701",
.cpu_dai_name = "bfin-i2s.0",
.codec_dai_name = "adau1701",
.platform_name = "bfin-i2s-pcm-audio",
.codec_name = "adau1701.0-0034",
.ops = &bfin_eval_adau1701_ops,
.dai_fmt = BFIN_EVAL_ADAU1701_DAI_FMT,
},
{
.name = "adau1701",
.stream_name = "adau1701",
.cpu_dai_name = "bfin-i2s.1",
.codec_dai_name = "adau1701",
.platform_name = "bfin-i2s-pcm-audio",
.codec_name = "adau1701.0-0034",
.ops = &bfin_eval_adau1701_ops,
.dai_fmt = BFIN_EVAL_ADAU1701_DAI_FMT,
},
};
static struct snd_soc_card bfin_eval_adau1701 = {
.name = "bfin-eval-adau1701",
.owner = THIS_MODULE,
.dai_link = &bfin_eval_adau1701_dai[CONFIG_SND_BF5XX_SPORT_NUM],
.num_links = 1,
.dapm_widgets = bfin_eval_adau1701_dapm_widgets,
.num_dapm_widgets = ARRAY_SIZE(bfin_eval_adau1701_dapm_widgets),
.dapm_routes = bfin_eval_adau1701_dapm_routes,
.num_dapm_routes = ARRAY_SIZE(bfin_eval_adau1701_dapm_routes),
};
static int bfin_eval_adau1701_probe(struct platform_device *pdev)
{
struct snd_soc_card *card = &bfin_eval_adau1701;
card->dev = &pdev->dev;
return snd_soc_register_card(&bfin_eval_adau1701);
}
static int __devexit bfin_eval_adau1701_remove(struct platform_device *pdev)
{
struct snd_soc_card *card = platform_get_drvdata(pdev);
snd_soc_unregister_card(card);
return 0;
}
static struct platform_driver bfin_eval_adau1701_driver = {
.driver = {
.name = "bfin-eval-adau1701",
.owner = THIS_MODULE,
.pm = &snd_soc_pm_ops,
},
.probe = bfin_eval_adau1701_probe,
.remove = __devexit_p(bfin_eval_adau1701_remove),
};
module_platform_driver(bfin_eval_adau1701_driver);
MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
MODULE_DESCRIPTION("ALSA SoC bfin ADAU1701 driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:bfin-eval-adau1701");
| gpl-2.0 |
cogsy23/linux-hg | drivers/leds/leds-hp6xx.c | 7848 | 2283 | /*
* LED Triggers Core
* For the HP Jornada 620/660/680/690 handhelds
*
* Copyright 2008 Kristoffer Ericson <kristoffer.ericson@gmail.com>
* this driver is based on leds-spitz.c by Richard Purdie.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/leds.h>
#include <asm/hd64461.h>
#include <mach/hp6xx.h>
static void hp6xxled_green_set(struct led_classdev *led_cdev,
enum led_brightness value)
{
u8 v8;
v8 = inb(PKDR);
if (value)
outb(v8 & (~PKDR_LED_GREEN), PKDR);
else
outb(v8 | PKDR_LED_GREEN, PKDR);
}
static void hp6xxled_red_set(struct led_classdev *led_cdev,
enum led_brightness value)
{
u16 v16;
v16 = inw(HD64461_GPBDR);
if (value)
outw(v16 & (~HD64461_GPBDR_LED_RED), HD64461_GPBDR);
else
outw(v16 | HD64461_GPBDR_LED_RED, HD64461_GPBDR);
}
static struct led_classdev hp6xx_red_led = {
.name = "hp6xx:red",
.default_trigger = "hp6xx-charge",
.brightness_set = hp6xxled_red_set,
.flags = LED_CORE_SUSPENDRESUME,
};
static struct led_classdev hp6xx_green_led = {
.name = "hp6xx:green",
.default_trigger = "ide-disk",
.brightness_set = hp6xxled_green_set,
.flags = LED_CORE_SUSPENDRESUME,
};
static int hp6xxled_probe(struct platform_device *pdev)
{
int ret;
ret = led_classdev_register(&pdev->dev, &hp6xx_red_led);
if (ret < 0)
return ret;
ret = led_classdev_register(&pdev->dev, &hp6xx_green_led);
if (ret < 0)
led_classdev_unregister(&hp6xx_red_led);
return ret;
}
static int hp6xxled_remove(struct platform_device *pdev)
{
led_classdev_unregister(&hp6xx_red_led);
led_classdev_unregister(&hp6xx_green_led);
return 0;
}
static struct platform_driver hp6xxled_driver = {
.probe = hp6xxled_probe,
.remove = hp6xxled_remove,
.driver = {
.name = "hp6xx-led",
.owner = THIS_MODULE,
},
};
module_platform_driver(hp6xxled_driver);
MODULE_AUTHOR("Kristoffer Ericson <kristoffer.ericson@gmail.com>");
MODULE_DESCRIPTION("HP Jornada 6xx LED driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:hp6xx-led");
| gpl-2.0 |
Flyhalf205/android_kernel_htc_t6 | drivers/hwmon/pmbus/adm1275.c | 8872 | 10281 | /*
* Hardware monitoring driver for Analog Devices ADM1275 Hot-Swap Controller
* and Digital Power Monitor
*
* Copyright (c) 2011 Ericsson AB.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/i2c.h>
#include "pmbus.h"
enum chips { adm1075, adm1275, adm1276 };
#define ADM1275_PEAK_IOUT 0xd0
#define ADM1275_PEAK_VIN 0xd1
#define ADM1275_PEAK_VOUT 0xd2
#define ADM1275_PMON_CONFIG 0xd4
#define ADM1275_VIN_VOUT_SELECT (1 << 6)
#define ADM1275_VRANGE (1 << 5)
#define ADM1075_IRANGE_50 (1 << 4)
#define ADM1075_IRANGE_25 (1 << 3)
#define ADM1075_IRANGE_MASK ((1 << 3) | (1 << 4))
#define ADM1275_IOUT_WARN2_LIMIT 0xd7
#define ADM1275_DEVICE_CONFIG 0xd8
#define ADM1275_IOUT_WARN2_SELECT (1 << 4)
#define ADM1276_PEAK_PIN 0xda
#define ADM1275_MFR_STATUS_IOUT_WARN2 (1 << 0)
#define ADM1075_READ_VAUX 0xdd
#define ADM1075_VAUX_OV_WARN_LIMIT 0xde
#define ADM1075_VAUX_UV_WARN_LIMIT 0xdf
#define ADM1075_VAUX_STATUS 0xf6
#define ADM1075_VAUX_OV_WARN (1<<7)
#define ADM1075_VAUX_UV_WARN (1<<6)
struct adm1275_data {
int id;
bool have_oc_fault;
struct pmbus_driver_info info;
};
#define to_adm1275_data(x) container_of(x, struct adm1275_data, info)
static int adm1275_read_word_data(struct i2c_client *client, int page, int reg)
{
const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
const struct adm1275_data *data = to_adm1275_data(info);
int ret = 0;
if (page)
return -ENXIO;
switch (reg) {
case PMBUS_IOUT_UC_FAULT_LIMIT:
if (data->have_oc_fault) {
ret = -ENXIO;
break;
}
ret = pmbus_read_word_data(client, 0, ADM1275_IOUT_WARN2_LIMIT);
break;
case PMBUS_IOUT_OC_FAULT_LIMIT:
if (!data->have_oc_fault) {
ret = -ENXIO;
break;
}
ret = pmbus_read_word_data(client, 0, ADM1275_IOUT_WARN2_LIMIT);
break;
case PMBUS_VOUT_OV_WARN_LIMIT:
if (data->id != adm1075) {
ret = -ENODATA;
break;
}
ret = pmbus_read_word_data(client, 0,
ADM1075_VAUX_OV_WARN_LIMIT);
break;
case PMBUS_VOUT_UV_WARN_LIMIT:
if (data->id != adm1075) {
ret = -ENODATA;
break;
}
ret = pmbus_read_word_data(client, 0,
ADM1075_VAUX_UV_WARN_LIMIT);
break;
case PMBUS_READ_VOUT:
if (data->id != adm1075) {
ret = -ENODATA;
break;
}
ret = pmbus_read_word_data(client, 0, ADM1075_READ_VAUX);
break;
case PMBUS_VIRT_READ_IOUT_MAX:
ret = pmbus_read_word_data(client, 0, ADM1275_PEAK_IOUT);
break;
case PMBUS_VIRT_READ_VOUT_MAX:
ret = pmbus_read_word_data(client, 0, ADM1275_PEAK_VOUT);
break;
case PMBUS_VIRT_READ_VIN_MAX:
ret = pmbus_read_word_data(client, 0, ADM1275_PEAK_VIN);
break;
case PMBUS_VIRT_READ_PIN_MAX:
if (data->id == adm1275) {
ret = -ENXIO;
break;
}
ret = pmbus_read_word_data(client, 0, ADM1276_PEAK_PIN);
break;
case PMBUS_VIRT_RESET_IOUT_HISTORY:
case PMBUS_VIRT_RESET_VOUT_HISTORY:
case PMBUS_VIRT_RESET_VIN_HISTORY:
break;
case PMBUS_VIRT_RESET_PIN_HISTORY:
if (data->id == adm1275)
ret = -ENXIO;
break;
default:
ret = -ENODATA;
break;
}
return ret;
}
static int adm1275_write_word_data(struct i2c_client *client, int page, int reg,
u16 word)
{
int ret;
if (page)
return -ENXIO;
switch (reg) {
case PMBUS_IOUT_UC_FAULT_LIMIT:
case PMBUS_IOUT_OC_FAULT_LIMIT:
ret = pmbus_write_word_data(client, 0, ADM1275_IOUT_WARN2_LIMIT,
word);
break;
case PMBUS_VIRT_RESET_IOUT_HISTORY:
ret = pmbus_write_word_data(client, 0, ADM1275_PEAK_IOUT, 0);
break;
case PMBUS_VIRT_RESET_VOUT_HISTORY:
ret = pmbus_write_word_data(client, 0, ADM1275_PEAK_VOUT, 0);
break;
case PMBUS_VIRT_RESET_VIN_HISTORY:
ret = pmbus_write_word_data(client, 0, ADM1275_PEAK_VIN, 0);
break;
case PMBUS_VIRT_RESET_PIN_HISTORY:
ret = pmbus_write_word_data(client, 0, ADM1276_PEAK_PIN, 0);
break;
default:
ret = -ENODATA;
break;
}
return ret;
}
static int adm1275_read_byte_data(struct i2c_client *client, int page, int reg)
{
const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
const struct adm1275_data *data = to_adm1275_data(info);
int mfr_status, ret;
if (page > 0)
return -ENXIO;
switch (reg) {
case PMBUS_STATUS_IOUT:
ret = pmbus_read_byte_data(client, page, PMBUS_STATUS_IOUT);
if (ret < 0)
break;
mfr_status = pmbus_read_byte_data(client, page,
PMBUS_STATUS_MFR_SPECIFIC);
if (mfr_status < 0) {
ret = mfr_status;
break;
}
if (mfr_status & ADM1275_MFR_STATUS_IOUT_WARN2) {
ret |= data->have_oc_fault ?
PB_IOUT_OC_FAULT : PB_IOUT_UC_FAULT;
}
break;
case PMBUS_STATUS_VOUT:
if (data->id != adm1075) {
ret = -ENODATA;
break;
}
ret = 0;
mfr_status = pmbus_read_byte_data(client, 0,
ADM1075_VAUX_STATUS);
if (mfr_status & ADM1075_VAUX_OV_WARN)
ret |= PB_VOLTAGE_OV_WARNING;
if (mfr_status & ADM1075_VAUX_UV_WARN)
ret |= PB_VOLTAGE_UV_WARNING;
break;
default:
ret = -ENODATA;
break;
}
return ret;
}
static const struct i2c_device_id adm1275_id[] = {
{ "adm1075", adm1075 },
{ "adm1275", adm1275 },
{ "adm1276", adm1276 },
{ }
};
MODULE_DEVICE_TABLE(i2c, adm1275_id);
static int adm1275_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
u8 block_buffer[I2C_SMBUS_BLOCK_MAX + 1];
int config, device_config;
int ret;
struct pmbus_driver_info *info;
struct adm1275_data *data;
const struct i2c_device_id *mid;
if (!i2c_check_functionality(client->adapter,
I2C_FUNC_SMBUS_READ_BYTE_DATA
| I2C_FUNC_SMBUS_BLOCK_DATA))
return -ENODEV;
ret = i2c_smbus_read_block_data(client, PMBUS_MFR_ID, block_buffer);
if (ret < 0) {
dev_err(&client->dev, "Failed to read Manufacturer ID\n");
return ret;
}
if (ret != 3 || strncmp(block_buffer, "ADI", 3)) {
dev_err(&client->dev, "Unsupported Manufacturer ID\n");
return -ENODEV;
}
ret = i2c_smbus_read_block_data(client, PMBUS_MFR_MODEL, block_buffer);
if (ret < 0) {
dev_err(&client->dev, "Failed to read Manufacturer Model\n");
return ret;
}
for (mid = adm1275_id; mid->name[0]; mid++) {
if (!strncasecmp(mid->name, block_buffer, strlen(mid->name)))
break;
}
if (!mid->name[0]) {
dev_err(&client->dev, "Unsupported device\n");
return -ENODEV;
}
if (id->driver_data != mid->driver_data)
dev_notice(&client->dev,
"Device mismatch: Configured %s, detected %s\n",
id->name, mid->name);
config = i2c_smbus_read_byte_data(client, ADM1275_PMON_CONFIG);
if (config < 0)
return config;
device_config = i2c_smbus_read_byte_data(client, ADM1275_DEVICE_CONFIG);
if (device_config < 0)
return device_config;
data = devm_kzalloc(&client->dev, sizeof(struct adm1275_data),
GFP_KERNEL);
if (!data)
return -ENOMEM;
data->id = mid->driver_data;
info = &data->info;
info->pages = 1;
info->format[PSC_VOLTAGE_IN] = direct;
info->format[PSC_VOLTAGE_OUT] = direct;
info->format[PSC_CURRENT_OUT] = direct;
info->m[PSC_CURRENT_OUT] = 807;
info->b[PSC_CURRENT_OUT] = 20475;
info->R[PSC_CURRENT_OUT] = -1;
info->func[0] = PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT;
info->read_word_data = adm1275_read_word_data;
info->read_byte_data = adm1275_read_byte_data;
info->write_word_data = adm1275_write_word_data;
if (data->id == adm1075) {
info->m[PSC_VOLTAGE_IN] = 27169;
info->b[PSC_VOLTAGE_IN] = 0;
info->R[PSC_VOLTAGE_IN] = -1;
info->m[PSC_VOLTAGE_OUT] = 27169;
info->b[PSC_VOLTAGE_OUT] = 0;
info->R[PSC_VOLTAGE_OUT] = -1;
} else if (config & ADM1275_VRANGE) {
info->m[PSC_VOLTAGE_IN] = 19199;
info->b[PSC_VOLTAGE_IN] = 0;
info->R[PSC_VOLTAGE_IN] = -2;
info->m[PSC_VOLTAGE_OUT] = 19199;
info->b[PSC_VOLTAGE_OUT] = 0;
info->R[PSC_VOLTAGE_OUT] = -2;
} else {
info->m[PSC_VOLTAGE_IN] = 6720;
info->b[PSC_VOLTAGE_IN] = 0;
info->R[PSC_VOLTAGE_IN] = -1;
info->m[PSC_VOLTAGE_OUT] = 6720;
info->b[PSC_VOLTAGE_OUT] = 0;
info->R[PSC_VOLTAGE_OUT] = -1;
}
if (device_config & ADM1275_IOUT_WARN2_SELECT)
data->have_oc_fault = true;
switch (data->id) {
case adm1075:
info->format[PSC_POWER] = direct;
info->b[PSC_POWER] = 0;
info->R[PSC_POWER] = -1;
switch (config & ADM1075_IRANGE_MASK) {
case ADM1075_IRANGE_25:
info->m[PSC_POWER] = 8549;
info->m[PSC_CURRENT_OUT] = 806;
break;
case ADM1075_IRANGE_50:
info->m[PSC_POWER] = 4279;
info->m[PSC_CURRENT_OUT] = 404;
break;
default:
dev_err(&client->dev, "Invalid input current range");
info->m[PSC_POWER] = 0;
info->m[PSC_CURRENT_OUT] = 0;
break;
}
info->func[0] |= PMBUS_HAVE_VIN | PMBUS_HAVE_PIN
| PMBUS_HAVE_STATUS_INPUT;
if (config & ADM1275_VIN_VOUT_SELECT)
info->func[0] |=
PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT;
break;
case adm1275:
if (config & ADM1275_VIN_VOUT_SELECT)
info->func[0] |=
PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT;
else
info->func[0] |=
PMBUS_HAVE_VIN | PMBUS_HAVE_STATUS_INPUT;
break;
case adm1276:
info->format[PSC_POWER] = direct;
info->func[0] |= PMBUS_HAVE_VIN | PMBUS_HAVE_PIN
| PMBUS_HAVE_STATUS_INPUT;
if (config & ADM1275_VIN_VOUT_SELECT)
info->func[0] |=
PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT;
if (config & ADM1275_VRANGE) {
info->m[PSC_POWER] = 6043;
info->b[PSC_POWER] = 0;
info->R[PSC_POWER] = -2;
} else {
info->m[PSC_POWER] = 2115;
info->b[PSC_POWER] = 0;
info->R[PSC_POWER] = -1;
}
break;
}
return pmbus_do_probe(client, id, info);
}
static struct i2c_driver adm1275_driver = {
.driver = {
.name = "adm1275",
},
.probe = adm1275_probe,
.remove = pmbus_do_remove,
.id_table = adm1275_id,
};
module_i2c_driver(adm1275_driver);
MODULE_AUTHOR("Guenter Roeck");
MODULE_DESCRIPTION("PMBus driver for Analog Devices ADM1275 and compatibles");
MODULE_LICENSE("GPL");
| gpl-2.0 |
CandyDevices/kernel_htc_msm8974 | arch/powerpc/math-emu/fnmadds.c | 13736 | 1169 | #include <linux/types.h>
#include <linux/errno.h>
#include <asm/uaccess.h>
#include <asm/sfp-machine.h>
#include <math-emu/soft-fp.h>
#include <math-emu/double.h>
#include <math-emu/single.h>
int
fnmadds(void *frD, void *frA, void *frB, void *frC)
{
FP_DECL_D(R);
FP_DECL_D(A);
FP_DECL_D(B);
FP_DECL_D(C);
FP_DECL_D(T);
FP_DECL_EX;
#ifdef DEBUG
printk("%s: %p %p %p %p\n", __func__, frD, frA, frB, frC);
#endif
FP_UNPACK_DP(A, frA);
FP_UNPACK_DP(B, frB);
FP_UNPACK_DP(C, frC);
#ifdef DEBUG
printk("A: %ld %lu %lu %ld (%ld)\n", A_s, A_f1, A_f0, A_e, A_c);
printk("B: %ld %lu %lu %ld (%ld)\n", B_s, B_f1, B_f0, B_e, B_c);
printk("C: %ld %lu %lu %ld (%ld)\n", C_s, C_f1, C_f0, C_e, C_c);
#endif
if ((A_c == FP_CLS_INF && C_c == FP_CLS_ZERO) ||
(A_c == FP_CLS_ZERO && C_c == FP_CLS_INF))
FP_SET_EXCEPTION(EFLAG_VXIMZ);
FP_MUL_D(T, A, C);
if (T_s != B_s && T_c == FP_CLS_INF && B_c == FP_CLS_INF)
FP_SET_EXCEPTION(EFLAG_VXISI);
FP_ADD_D(R, T, B);
if (R_c != FP_CLS_NAN)
R_s ^= 1;
#ifdef DEBUG
printk("D: %ld %lu %lu %ld (%ld)\n", R_s, R_f1, R_f0, R_e, R_c);
#endif
__FP_PACK_DS(frD, R);
return FP_CUR_EXCEPTIONS;
}
| gpl-2.0 |
VM12/android_kernel_oneplus_msm8974 | arch/powerpc/math-emu/fnmadds.c | 13736 | 1169 | #include <linux/types.h>
#include <linux/errno.h>
#include <asm/uaccess.h>
#include <asm/sfp-machine.h>
#include <math-emu/soft-fp.h>
#include <math-emu/double.h>
#include <math-emu/single.h>
int
fnmadds(void *frD, void *frA, void *frB, void *frC)
{
FP_DECL_D(R);
FP_DECL_D(A);
FP_DECL_D(B);
FP_DECL_D(C);
FP_DECL_D(T);
FP_DECL_EX;
#ifdef DEBUG
printk("%s: %p %p %p %p\n", __func__, frD, frA, frB, frC);
#endif
FP_UNPACK_DP(A, frA);
FP_UNPACK_DP(B, frB);
FP_UNPACK_DP(C, frC);
#ifdef DEBUG
printk("A: %ld %lu %lu %ld (%ld)\n", A_s, A_f1, A_f0, A_e, A_c);
printk("B: %ld %lu %lu %ld (%ld)\n", B_s, B_f1, B_f0, B_e, B_c);
printk("C: %ld %lu %lu %ld (%ld)\n", C_s, C_f1, C_f0, C_e, C_c);
#endif
if ((A_c == FP_CLS_INF && C_c == FP_CLS_ZERO) ||
(A_c == FP_CLS_ZERO && C_c == FP_CLS_INF))
FP_SET_EXCEPTION(EFLAG_VXIMZ);
FP_MUL_D(T, A, C);
if (T_s != B_s && T_c == FP_CLS_INF && B_c == FP_CLS_INF)
FP_SET_EXCEPTION(EFLAG_VXISI);
FP_ADD_D(R, T, B);
if (R_c != FP_CLS_NAN)
R_s ^= 1;
#ifdef DEBUG
printk("D: %ld %lu %lu %ld (%ld)\n", R_s, R_f1, R_f0, R_e, R_c);
#endif
__FP_PACK_DS(frD, R);
return FP_CUR_EXCEPTIONS;
}
| gpl-2.0 |
no-cannabis/linux-matrix1 | arch/powerpc/math-emu/fnmsubs.c | 13736 | 1192 | #include <linux/types.h>
#include <linux/errno.h>
#include <asm/uaccess.h>
#include <asm/sfp-machine.h>
#include <math-emu/soft-fp.h>
#include <math-emu/double.h>
#include <math-emu/single.h>
int
fnmsubs(void *frD, void *frA, void *frB, void *frC)
{
FP_DECL_D(R);
FP_DECL_D(A);
FP_DECL_D(B);
FP_DECL_D(C);
FP_DECL_D(T);
FP_DECL_EX;
#ifdef DEBUG
printk("%s: %p %p %p %p\n", __func__, frD, frA, frB, frC);
#endif
FP_UNPACK_DP(A, frA);
FP_UNPACK_DP(B, frB);
FP_UNPACK_DP(C, frC);
#ifdef DEBUG
printk("A: %ld %lu %lu %ld (%ld)\n", A_s, A_f1, A_f0, A_e, A_c);
printk("B: %ld %lu %lu %ld (%ld)\n", B_s, B_f1, B_f0, B_e, B_c);
printk("C: %ld %lu %lu %ld (%ld)\n", C_s, C_f1, C_f0, C_e, C_c);
#endif
if ((A_c == FP_CLS_INF && C_c == FP_CLS_ZERO) ||
(A_c == FP_CLS_ZERO && C_c == FP_CLS_INF))
FP_SET_EXCEPTION(EFLAG_VXIMZ);
FP_MUL_D(T, A, C);
if (B_c != FP_CLS_NAN)
B_s ^= 1;
if (T_s != B_s && T_c == FP_CLS_INF && B_c == FP_CLS_INF)
FP_SET_EXCEPTION(EFLAG_VXISI);
FP_ADD_D(R, T, B);
if (R_c != FP_CLS_NAN)
R_s ^= 1;
#ifdef DEBUG
printk("D: %ld %lu %lu %ld (%ld)\n", R_s, R_f1, R_f0, R_e, R_c);
#endif
__FP_PACK_DS(frD, R);
return FP_CUR_EXCEPTIONS;
}
| gpl-2.0 |
forumi0721/bananapi_linux-sunxi | arch/powerpc/math-emu/fnmadd.c | 13736 | 1138 | #include <linux/types.h>
#include <linux/errno.h>
#include <asm/uaccess.h>
#include <asm/sfp-machine.h>
#include <math-emu/soft-fp.h>
#include <math-emu/double.h>
int
fnmadd(void *frD, void *frA, void *frB, void *frC)
{
FP_DECL_D(R);
FP_DECL_D(A);
FP_DECL_D(B);
FP_DECL_D(C);
FP_DECL_D(T);
FP_DECL_EX;
#ifdef DEBUG
printk("%s: %p %p %p %p\n", __func__, frD, frA, frB, frC);
#endif
FP_UNPACK_DP(A, frA);
FP_UNPACK_DP(B, frB);
FP_UNPACK_DP(C, frC);
#ifdef DEBUG
printk("A: %ld %lu %lu %ld (%ld)\n", A_s, A_f1, A_f0, A_e, A_c);
printk("B: %ld %lu %lu %ld (%ld)\n", B_s, B_f1, B_f0, B_e, B_c);
printk("C: %ld %lu %lu %ld (%ld)\n", C_s, C_f1, C_f0, C_e, C_c);
#endif
if ((A_c == FP_CLS_INF && C_c == FP_CLS_ZERO) ||
(A_c == FP_CLS_ZERO && C_c == FP_CLS_INF))
FP_SET_EXCEPTION(EFLAG_VXIMZ);
FP_MUL_D(T, A, C);
if (T_s != B_s && T_c == FP_CLS_INF && B_c == FP_CLS_INF)
FP_SET_EXCEPTION(EFLAG_VXISI);
FP_ADD_D(R, T, B);
if (R_c != FP_CLS_NAN)
R_s ^= 1;
#ifdef DEBUG
printk("D: %ld %lu %lu %ld (%ld)\n", R_s, R_f1, R_f0, R_e, R_c);
#endif
__FP_PACK_D(frD, R);
return FP_CUR_EXCEPTIONS;
}
| gpl-2.0 |
Mazout360/kernel-maz | sound/oss/dmasound/dmasound_q40.c | 14504 | 14354 | /*
* linux/sound/oss/dmasound/dmasound_q40.c
*
* Q40 DMA Sound Driver
*
* See linux/sound/oss/dmasound/dmasound_core.c for copyright and credits
* prior to 28/01/2001
*
* 28/01/2001 [0.1] Iain Sandoe
* - added versioning
* - put in and populated the hardware_afmts field.
* [0.2] - put in SNDCTL_DSP_GETCAPS value.
* [0.3] - put in default hard/soft settings.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/soundcard.h>
#include <linux/interrupt.h>
#include <asm/uaccess.h>
#include <asm/q40ints.h>
#include <asm/q40_master.h>
#include "dmasound.h"
#define DMASOUND_Q40_REVISION 0
#define DMASOUND_Q40_EDITION 3
static int expand_bal; /* Balance factor for expanding (not volume!) */
static int expand_data; /* Data for expanding */
/*** Low level stuff *********************************************************/
static void *Q40Alloc(unsigned int size, gfp_t flags);
static void Q40Free(void *, unsigned int);
static int Q40IrqInit(void);
#ifdef MODULE
static void Q40IrqCleanUp(void);
#endif
static void Q40Silence(void);
static void Q40Init(void);
static int Q40SetFormat(int format);
static int Q40SetVolume(int volume);
static void Q40PlayNextFrame(int index);
static void Q40Play(void);
static irqreturn_t Q40StereoInterrupt(int irq, void *dummy);
static irqreturn_t Q40MonoInterrupt(int irq, void *dummy);
static void Q40Interrupt(void);
/*** Mid level stuff *********************************************************/
/* userCount, frameUsed, frameLeft == byte counts */
static ssize_t q40_ct_law(const u_char __user *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft)
{
char *table = dmasound.soft.format == AFMT_MU_LAW ? dmasound_ulaw2dma8: dmasound_alaw2dma8;
ssize_t count, used;
u_char *p = (u_char *) &frame[*frameUsed];
used = count = min_t(size_t, userCount, frameLeft);
if (copy_from_user(p,userPtr,count))
return -EFAULT;
while (count > 0) {
*p = table[*p]+128;
p++;
count--;
}
*frameUsed += used ;
return used;
}
static ssize_t q40_ct_s8(const u_char __user *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft)
{
ssize_t count, used;
u_char *p = (u_char *) &frame[*frameUsed];
used = count = min_t(size_t, userCount, frameLeft);
if (copy_from_user(p,userPtr,count))
return -EFAULT;
while (count > 0) {
*p = *p + 128;
p++;
count--;
}
*frameUsed += used;
return used;
}
static ssize_t q40_ct_u8(const u_char __user *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft)
{
ssize_t count, used;
u_char *p = (u_char *) &frame[*frameUsed];
used = count = min_t(size_t, userCount, frameLeft);
if (copy_from_user(p,userPtr,count))
return -EFAULT;
*frameUsed += used;
return used;
}
/* a bit too complicated to optimise right now ..*/
static ssize_t q40_ctx_law(const u_char __user *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft)
{
unsigned char *table = (unsigned char *)
(dmasound.soft.format == AFMT_MU_LAW ? dmasound_ulaw2dma8: dmasound_alaw2dma8);
unsigned int data = expand_data;
u_char *p = (u_char *) &frame[*frameUsed];
int bal = expand_bal;
int hSpeed = dmasound.hard.speed, sSpeed = dmasound.soft.speed;
int utotal, ftotal;
ftotal = frameLeft;
utotal = userCount;
while (frameLeft) {
u_char c;
if (bal < 0) {
if (userCount == 0)
break;
if (get_user(c, userPtr++))
return -EFAULT;
data = table[c];
data += 0x80;
userCount--;
bal += hSpeed;
}
*p++ = data;
frameLeft--;
bal -= sSpeed;
}
expand_bal = bal;
expand_data = data;
*frameUsed += (ftotal - frameLeft);
utotal -= userCount;
return utotal;
}
static ssize_t q40_ctx_s8(const u_char __user *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft)
{
u_char *p = (u_char *) &frame[*frameUsed];
unsigned int data = expand_data;
int bal = expand_bal;
int hSpeed = dmasound.hard.speed, sSpeed = dmasound.soft.speed;
int utotal, ftotal;
ftotal = frameLeft;
utotal = userCount;
while (frameLeft) {
u_char c;
if (bal < 0) {
if (userCount == 0)
break;
if (get_user(c, userPtr++))
return -EFAULT;
data = c ;
data += 0x80;
userCount--;
bal += hSpeed;
}
*p++ = data;
frameLeft--;
bal -= sSpeed;
}
expand_bal = bal;
expand_data = data;
*frameUsed += (ftotal - frameLeft);
utotal -= userCount;
return utotal;
}
static ssize_t q40_ctx_u8(const u_char __user *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft)
{
u_char *p = (u_char *) &frame[*frameUsed];
unsigned int data = expand_data;
int bal = expand_bal;
int hSpeed = dmasound.hard.speed, sSpeed = dmasound.soft.speed;
int utotal, ftotal;
ftotal = frameLeft;
utotal = userCount;
while (frameLeft) {
u_char c;
if (bal < 0) {
if (userCount == 0)
break;
if (get_user(c, userPtr++))
return -EFAULT;
data = c ;
userCount--;
bal += hSpeed;
}
*p++ = data;
frameLeft--;
bal -= sSpeed;
}
expand_bal = bal;
expand_data = data;
*frameUsed += (ftotal - frameLeft) ;
utotal -= userCount;
return utotal;
}
/* compressing versions */
static ssize_t q40_ctc_law(const u_char __user *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft)
{
unsigned char *table = (unsigned char *)
(dmasound.soft.format == AFMT_MU_LAW ? dmasound_ulaw2dma8: dmasound_alaw2dma8);
unsigned int data = expand_data;
u_char *p = (u_char *) &frame[*frameUsed];
int bal = expand_bal;
int hSpeed = dmasound.hard.speed, sSpeed = dmasound.soft.speed;
int utotal, ftotal;
ftotal = frameLeft;
utotal = userCount;
while (frameLeft) {
u_char c;
while(bal<0) {
if (userCount == 0)
goto lout;
if (!(bal<(-hSpeed))) {
if (get_user(c, userPtr))
return -EFAULT;
data = 0x80 + table[c];
}
userPtr++;
userCount--;
bal += hSpeed;
}
*p++ = data;
frameLeft--;
bal -= sSpeed;
}
lout:
expand_bal = bal;
expand_data = data;
*frameUsed += (ftotal - frameLeft);
utotal -= userCount;
return utotal;
}
static ssize_t q40_ctc_s8(const u_char __user *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft)
{
u_char *p = (u_char *) &frame[*frameUsed];
unsigned int data = expand_data;
int bal = expand_bal;
int hSpeed = dmasound.hard.speed, sSpeed = dmasound.soft.speed;
int utotal, ftotal;
ftotal = frameLeft;
utotal = userCount;
while (frameLeft) {
u_char c;
while (bal < 0) {
if (userCount == 0)
goto lout;
if (!(bal<(-hSpeed))) {
if (get_user(c, userPtr))
return -EFAULT;
data = c + 0x80;
}
userPtr++;
userCount--;
bal += hSpeed;
}
*p++ = data;
frameLeft--;
bal -= sSpeed;
}
lout:
expand_bal = bal;
expand_data = data;
*frameUsed += (ftotal - frameLeft);
utotal -= userCount;
return utotal;
}
static ssize_t q40_ctc_u8(const u_char __user *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft)
{
u_char *p = (u_char *) &frame[*frameUsed];
unsigned int data = expand_data;
int bal = expand_bal;
int hSpeed = dmasound.hard.speed, sSpeed = dmasound.soft.speed;
int utotal, ftotal;
ftotal = frameLeft;
utotal = userCount;
while (frameLeft) {
u_char c;
while (bal < 0) {
if (userCount == 0)
goto lout;
if (!(bal<(-hSpeed))) {
if (get_user(c, userPtr))
return -EFAULT;
data = c ;
}
userPtr++;
userCount--;
bal += hSpeed;
}
*p++ = data;
frameLeft--;
bal -= sSpeed;
}
lout:
expand_bal = bal;
expand_data = data;
*frameUsed += (ftotal - frameLeft) ;
utotal -= userCount;
return utotal;
}
static TRANS transQ40Normal = {
q40_ct_law, q40_ct_law, q40_ct_s8, q40_ct_u8, NULL, NULL, NULL, NULL
};
static TRANS transQ40Expanding = {
q40_ctx_law, q40_ctx_law, q40_ctx_s8, q40_ctx_u8, NULL, NULL, NULL, NULL
};
static TRANS transQ40Compressing = {
q40_ctc_law, q40_ctc_law, q40_ctc_s8, q40_ctc_u8, NULL, NULL, NULL, NULL
};
/*** Low level stuff *********************************************************/
static void *Q40Alloc(unsigned int size, gfp_t flags)
{
return kmalloc(size, flags); /* change to vmalloc */
}
static void Q40Free(void *ptr, unsigned int size)
{
kfree(ptr);
}
static int __init Q40IrqInit(void)
{
/* Register interrupt handler. */
if (request_irq(Q40_IRQ_SAMPLE, Q40StereoInterrupt, 0,
"DMA sound", Q40Interrupt))
return 0;
return(1);
}
#ifdef MODULE
static void Q40IrqCleanUp(void)
{
master_outb(0,SAMPLE_ENABLE_REG);
free_irq(Q40_IRQ_SAMPLE, Q40Interrupt);
}
#endif /* MODULE */
static void Q40Silence(void)
{
master_outb(0,SAMPLE_ENABLE_REG);
*DAC_LEFT=*DAC_RIGHT=127;
}
static char *q40_pp;
static unsigned int q40_sc;
static void Q40PlayNextFrame(int index)
{
u_char *start;
u_long size;
u_char speed;
int error;
/* used by Q40Play() if all doubts whether there really is something
* to be played are already wiped out.
*/
start = write_sq.buffers[write_sq.front];
size = (write_sq.count == index ? write_sq.rear_size : write_sq.block_size);
q40_pp=start;
q40_sc=size;
write_sq.front = (write_sq.front+1) % write_sq.max_count;
write_sq.active++;
speed=(dmasound.hard.speed==10000 ? 0 : 1);
master_outb( 0,SAMPLE_ENABLE_REG);
free_irq(Q40_IRQ_SAMPLE, Q40Interrupt);
if (dmasound.soft.stereo)
error = request_irq(Q40_IRQ_SAMPLE, Q40StereoInterrupt, 0,
"Q40 sound", Q40Interrupt);
else
error = request_irq(Q40_IRQ_SAMPLE, Q40MonoInterrupt, 0,
"Q40 sound", Q40Interrupt);
if (error && printk_ratelimit())
pr_err("Couldn't register sound interrupt\n");
master_outb( speed, SAMPLE_RATE_REG);
master_outb( 1,SAMPLE_CLEAR_REG);
master_outb( 1,SAMPLE_ENABLE_REG);
}
static void Q40Play(void)
{
unsigned long flags;
if (write_sq.active || write_sq.count<=0 ) {
/* There's already a frame loaded */
return;
}
/* nothing in the queue */
if (write_sq.count <= 1 && write_sq.rear_size < write_sq.block_size && !write_sq.syncing) {
/* hmmm, the only existing frame is not
* yet filled and we're not syncing?
*/
return;
}
spin_lock_irqsave(&dmasound.lock, flags);
Q40PlayNextFrame(1);
spin_unlock_irqrestore(&dmasound.lock, flags);
}
static irqreturn_t Q40StereoInterrupt(int irq, void *dummy)
{
spin_lock(&dmasound.lock);
if (q40_sc>1){
*DAC_LEFT=*q40_pp++;
*DAC_RIGHT=*q40_pp++;
q40_sc -=2;
master_outb(1,SAMPLE_CLEAR_REG);
}else Q40Interrupt();
spin_unlock(&dmasound.lock);
return IRQ_HANDLED;
}
static irqreturn_t Q40MonoInterrupt(int irq, void *dummy)
{
spin_lock(&dmasound.lock);
if (q40_sc>0){
*DAC_LEFT=*q40_pp;
*DAC_RIGHT=*q40_pp++;
q40_sc --;
master_outb(1,SAMPLE_CLEAR_REG);
}else Q40Interrupt();
spin_unlock(&dmasound.lock);
return IRQ_HANDLED;
}
static void Q40Interrupt(void)
{
if (!write_sq.active) {
/* playing was interrupted and sq_reset() has already cleared
* the sq variables, so better don't do anything here.
*/
WAKE_UP(write_sq.sync_queue);
master_outb(0,SAMPLE_ENABLE_REG); /* better safe */
goto exit;
} else write_sq.active=0;
write_sq.count--;
Q40Play();
if (q40_sc<2)
{ /* there was nothing to play, disable irq */
master_outb(0,SAMPLE_ENABLE_REG);
*DAC_LEFT=*DAC_RIGHT=127;
}
WAKE_UP(write_sq.action_queue);
exit:
master_outb(1,SAMPLE_CLEAR_REG);
}
static void Q40Init(void)
{
int i, idx;
const int freq[] = {10000, 20000};
/* search a frequency that fits into the allowed error range */
idx = -1;
for (i = 0; i < 2; i++)
if ((100 * abs(dmasound.soft.speed - freq[i]) / freq[i]) <= catchRadius)
idx = i;
dmasound.hard = dmasound.soft;
/*sound.hard.stereo=1;*/ /* no longer true */
dmasound.hard.size=8;
if (idx > -1) {
dmasound.soft.speed = freq[idx];
dmasound.trans_write = &transQ40Normal;
} else
dmasound.trans_write = &transQ40Expanding;
Q40Silence();
if (dmasound.hard.speed > 20200) {
/* squeeze the sound, we do that */
dmasound.hard.speed = 20000;
dmasound.trans_write = &transQ40Compressing;
} else if (dmasound.hard.speed > 10000) {
dmasound.hard.speed = 20000;
} else {
dmasound.hard.speed = 10000;
}
expand_bal = -dmasound.soft.speed;
}
static int Q40SetFormat(int format)
{
/* Q40 sound supports only 8bit modes */
switch (format) {
case AFMT_QUERY:
return(dmasound.soft.format);
case AFMT_MU_LAW:
case AFMT_A_LAW:
case AFMT_S8:
case AFMT_U8:
break;
default:
format = AFMT_S8;
}
dmasound.soft.format = format;
dmasound.soft.size = 8;
if (dmasound.minDev == SND_DEV_DSP) {
dmasound.dsp.format = format;
dmasound.dsp.size = 8;
}
Q40Init();
return(format);
}
static int Q40SetVolume(int volume)
{
return 0;
}
/*** Machine definitions *****************************************************/
static SETTINGS def_hard = {
.format = AFMT_U8,
.stereo = 0,
.size = 8,
.speed = 10000
} ;
static SETTINGS def_soft = {
.format = AFMT_U8,
.stereo = 0,
.size = 8,
.speed = 8000
} ;
static MACHINE machQ40 = {
.name = "Q40",
.name2 = "Q40",
.owner = THIS_MODULE,
.dma_alloc = Q40Alloc,
.dma_free = Q40Free,
.irqinit = Q40IrqInit,
#ifdef MODULE
.irqcleanup = Q40IrqCleanUp,
#endif /* MODULE */
.init = Q40Init,
.silence = Q40Silence,
.setFormat = Q40SetFormat,
.setVolume = Q40SetVolume,
.play = Q40Play,
.min_dsp_speed = 10000,
.version = ((DMASOUND_Q40_REVISION<<8) | DMASOUND_Q40_EDITION),
.hardware_afmts = AFMT_U8, /* h'ware-supported formats *only* here */
.capabilities = DSP_CAP_BATCH /* As per SNDCTL_DSP_GETCAPS */
};
/*** Config & Setup **********************************************************/
static int __init dmasound_q40_init(void)
{
if (MACH_IS_Q40) {
dmasound.mach = machQ40;
dmasound.mach.default_hard = def_hard ;
dmasound.mach.default_soft = def_soft ;
return dmasound_init();
} else
return -ENODEV;
}
static void __exit dmasound_q40_cleanup(void)
{
dmasound_deinit();
}
module_init(dmasound_q40_init);
module_exit(dmasound_q40_cleanup);
MODULE_DESCRIPTION("Q40/Q60 sound driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
mrwargod/boeffla-kernel-slimversion-bacon | arch/sparc/kernel/us2e_cpufreq.c | 169 | 9883 | /* us2e_cpufreq.c: UltraSPARC-IIe cpu frequency support
*
* Copyright (C) 2003 David S. Miller (davem@redhat.com)
*
* Many thanks to Dominik Brodowski for fixing up the cpufreq
* infrastructure in order to make this driver easier to implement.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/smp.h>
#include <linux/cpufreq.h>
#include <linux/threads.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <asm/asi.h>
#include <asm/timer.h>
static struct cpufreq_driver *cpufreq_us2e_driver;
struct us2e_freq_percpu_info {
struct cpufreq_frequency_table table[6];
};
/* Indexed by cpu number. */
static struct us2e_freq_percpu_info *us2e_freq_table;
#define HBIRD_MEM_CNTL0_ADDR 0x1fe0000f010UL
#define HBIRD_ESTAR_MODE_ADDR 0x1fe0000f080UL
/* UltraSPARC-IIe has five dividers: 1, 2, 4, 6, and 8. These are controlled
* in the ESTAR mode control register.
*/
#define ESTAR_MODE_DIV_1 0x0000000000000000UL
#define ESTAR_MODE_DIV_2 0x0000000000000001UL
#define ESTAR_MODE_DIV_4 0x0000000000000003UL
#define ESTAR_MODE_DIV_6 0x0000000000000002UL
#define ESTAR_MODE_DIV_8 0x0000000000000004UL
#define ESTAR_MODE_DIV_MASK 0x0000000000000007UL
#define MCTRL0_SREFRESH_ENAB 0x0000000000010000UL
#define MCTRL0_REFR_COUNT_MASK 0x0000000000007f00UL
#define MCTRL0_REFR_COUNT_SHIFT 8
#define MCTRL0_REFR_INTERVAL 7800
#define MCTRL0_REFR_CLKS_P_CNT 64
static unsigned long read_hbreg(unsigned long addr)
{
unsigned long ret;
__asm__ __volatile__("ldxa [%1] %2, %0"
: "=&r" (ret)
: "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
return ret;
}
static void write_hbreg(unsigned long addr, unsigned long val)
{
__asm__ __volatile__("stxa %0, [%1] %2\n\t"
"membar #Sync"
: /* no outputs */
: "r" (val), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E)
: "memory");
if (addr == HBIRD_ESTAR_MODE_ADDR) {
/* Need to wait 16 clock cycles for the PLL to lock. */
udelay(1);
}
}
static void self_refresh_ctl(int enable)
{
unsigned long mctrl = read_hbreg(HBIRD_MEM_CNTL0_ADDR);
if (enable)
mctrl |= MCTRL0_SREFRESH_ENAB;
else
mctrl &= ~MCTRL0_SREFRESH_ENAB;
write_hbreg(HBIRD_MEM_CNTL0_ADDR, mctrl);
(void) read_hbreg(HBIRD_MEM_CNTL0_ADDR);
}
static void frob_mem_refresh(int cpu_slowing_down,
unsigned long clock_tick,
unsigned long old_divisor, unsigned long divisor)
{
unsigned long old_refr_count, refr_count, mctrl;
refr_count = (clock_tick * MCTRL0_REFR_INTERVAL);
refr_count /= (MCTRL0_REFR_CLKS_P_CNT * divisor * 1000000000UL);
mctrl = read_hbreg(HBIRD_MEM_CNTL0_ADDR);
old_refr_count = (mctrl & MCTRL0_REFR_COUNT_MASK)
>> MCTRL0_REFR_COUNT_SHIFT;
mctrl &= ~MCTRL0_REFR_COUNT_MASK;
mctrl |= refr_count << MCTRL0_REFR_COUNT_SHIFT;
write_hbreg(HBIRD_MEM_CNTL0_ADDR, mctrl);
mctrl = read_hbreg(HBIRD_MEM_CNTL0_ADDR);
if (cpu_slowing_down && !(mctrl & MCTRL0_SREFRESH_ENAB)) {
unsigned long usecs;
/* We have to wait for both refresh counts (old
* and new) to go to zero.
*/
usecs = (MCTRL0_REFR_CLKS_P_CNT *
(refr_count + old_refr_count) *
1000000UL *
old_divisor) / clock_tick;
udelay(usecs + 1UL);
}
}
static void us2e_transition(unsigned long estar, unsigned long new_bits,
unsigned long clock_tick,
unsigned long old_divisor, unsigned long divisor)
{
unsigned long flags;
local_irq_save(flags);
estar &= ~ESTAR_MODE_DIV_MASK;
/* This is based upon the state transition diagram in the IIe manual. */
if (old_divisor == 2 && divisor == 1) {
self_refresh_ctl(0);
write_hbreg(HBIRD_ESTAR_MODE_ADDR, estar | new_bits);
frob_mem_refresh(0, clock_tick, old_divisor, divisor);
} else if (old_divisor == 1 && divisor == 2) {
frob_mem_refresh(1, clock_tick, old_divisor, divisor);
write_hbreg(HBIRD_ESTAR_MODE_ADDR, estar | new_bits);
self_refresh_ctl(1);
} else if (old_divisor == 1 && divisor > 2) {
us2e_transition(estar, ESTAR_MODE_DIV_2, clock_tick,
1, 2);
us2e_transition(estar, new_bits, clock_tick,
2, divisor);
} else if (old_divisor > 2 && divisor == 1) {
us2e_transition(estar, ESTAR_MODE_DIV_2, clock_tick,
old_divisor, 2);
us2e_transition(estar, new_bits, clock_tick,
2, divisor);
} else if (old_divisor < divisor) {
frob_mem_refresh(0, clock_tick, old_divisor, divisor);
write_hbreg(HBIRD_ESTAR_MODE_ADDR, estar | new_bits);
} else if (old_divisor > divisor) {
write_hbreg(HBIRD_ESTAR_MODE_ADDR, estar | new_bits);
frob_mem_refresh(1, clock_tick, old_divisor, divisor);
} else {
BUG();
}
local_irq_restore(flags);
}
static unsigned long index_to_estar_mode(unsigned int index)
{
switch (index) {
case 0:
return ESTAR_MODE_DIV_1;
case 1:
return ESTAR_MODE_DIV_2;
case 2:
return ESTAR_MODE_DIV_4;
case 3:
return ESTAR_MODE_DIV_6;
case 4:
return ESTAR_MODE_DIV_8;
default:
BUG();
}
}
static unsigned long index_to_divisor(unsigned int index)
{
switch (index) {
case 0:
return 1;
case 1:
return 2;
case 2:
return 4;
case 3:
return 6;
case 4:
return 8;
default:
BUG();
}
}
static unsigned long estar_to_divisor(unsigned long estar)
{
unsigned long ret;
switch (estar & ESTAR_MODE_DIV_MASK) {
case ESTAR_MODE_DIV_1:
ret = 1;
break;
case ESTAR_MODE_DIV_2:
ret = 2;
break;
case ESTAR_MODE_DIV_4:
ret = 4;
break;
case ESTAR_MODE_DIV_6:
ret = 6;
break;
case ESTAR_MODE_DIV_8:
ret = 8;
break;
default:
BUG();
}
return ret;
}
static unsigned int us2e_freq_get(unsigned int cpu)
{
cpumask_t cpus_allowed;
unsigned long clock_tick, estar;
if (!cpu_online(cpu))
return 0;
cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current));
set_cpus_allowed_ptr(current, cpumask_of(cpu));
clock_tick = sparc64_get_clock_tick(cpu) / 1000;
estar = read_hbreg(HBIRD_ESTAR_MODE_ADDR);
set_cpus_allowed_ptr(current, &cpus_allowed);
return clock_tick / estar_to_divisor(estar);
}
static void us2e_set_cpu_divider_index(struct cpufreq_policy *policy,
unsigned int index)
{
unsigned int cpu = policy->cpu;
unsigned long new_bits, new_freq;
unsigned long clock_tick, divisor, old_divisor, estar;
cpumask_t cpus_allowed;
struct cpufreq_freqs freqs;
if (!cpu_online(cpu))
return;
cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current));
set_cpus_allowed_ptr(current, cpumask_of(cpu));
new_freq = clock_tick = sparc64_get_clock_tick(cpu) / 1000;
new_bits = index_to_estar_mode(index);
divisor = index_to_divisor(index);
new_freq /= divisor;
estar = read_hbreg(HBIRD_ESTAR_MODE_ADDR);
old_divisor = estar_to_divisor(estar);
freqs.old = clock_tick / old_divisor;
freqs.new = new_freq;
cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
if (old_divisor != divisor)
us2e_transition(estar, new_bits, clock_tick * 1000,
old_divisor, divisor);
cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
set_cpus_allowed_ptr(current, &cpus_allowed);
}
static int us2e_freq_target(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation)
{
unsigned int new_index = 0;
if (cpufreq_frequency_table_target(policy,
&us2e_freq_table[policy->cpu].table[0],
target_freq, relation, &new_index))
return -EINVAL;
us2e_set_cpu_divider_index(policy, new_index);
return 0;
}
static int us2e_freq_verify(struct cpufreq_policy *policy)
{
return cpufreq_frequency_table_verify(policy,
&us2e_freq_table[policy->cpu].table[0]);
}
static int __init us2e_freq_cpu_init(struct cpufreq_policy *policy)
{
unsigned int cpu = policy->cpu;
unsigned long clock_tick = sparc64_get_clock_tick(cpu) / 1000;
struct cpufreq_frequency_table *table =
&us2e_freq_table[cpu].table[0];
table[0].index = 0;
table[0].frequency = clock_tick / 1;
table[1].index = 1;
table[1].frequency = clock_tick / 2;
table[2].index = 2;
table[2].frequency = clock_tick / 4;
table[2].index = 3;
table[2].frequency = clock_tick / 6;
table[2].index = 4;
table[2].frequency = clock_tick / 8;
table[2].index = 5;
table[3].frequency = CPUFREQ_TABLE_END;
policy->cpuinfo.transition_latency = 0;
policy->cur = clock_tick;
return cpufreq_frequency_table_cpuinfo(policy, table);
}
static int us2e_freq_cpu_exit(struct cpufreq_policy *policy)
{
if (cpufreq_us2e_driver)
us2e_set_cpu_divider_index(policy, 0);
return 0;
}
static int __init us2e_freq_init(void)
{
unsigned long manuf, impl, ver;
int ret;
if (tlb_type != spitfire)
return -ENODEV;
__asm__("rdpr %%ver, %0" : "=r" (ver));
manuf = ((ver >> 48) & 0xffff);
impl = ((ver >> 32) & 0xffff);
if (manuf == 0x17 && impl == 0x13) {
struct cpufreq_driver *driver;
ret = -ENOMEM;
driver = kzalloc(sizeof(struct cpufreq_driver), GFP_KERNEL);
if (!driver)
goto err_out;
us2e_freq_table = kzalloc(
(NR_CPUS * sizeof(struct us2e_freq_percpu_info)),
GFP_KERNEL);
if (!us2e_freq_table)
goto err_out;
driver->init = us2e_freq_cpu_init;
driver->verify = us2e_freq_verify;
driver->target = us2e_freq_target;
driver->get = us2e_freq_get;
driver->exit = us2e_freq_cpu_exit;
driver->owner = THIS_MODULE,
strcpy(driver->name, "UltraSPARC-IIe");
cpufreq_us2e_driver = driver;
ret = cpufreq_register_driver(driver);
if (ret)
goto err_out;
return 0;
err_out:
if (driver) {
kfree(driver);
cpufreq_us2e_driver = NULL;
}
kfree(us2e_freq_table);
us2e_freq_table = NULL;
return ret;
}
return -ENODEV;
}
static void __exit us2e_freq_exit(void)
{
if (cpufreq_us2e_driver) {
cpufreq_unregister_driver(cpufreq_us2e_driver);
kfree(cpufreq_us2e_driver);
cpufreq_us2e_driver = NULL;
kfree(us2e_freq_table);
us2e_freq_table = NULL;
}
}
MODULE_AUTHOR("David S. Miller <davem@redhat.com>");
MODULE_DESCRIPTION("cpufreq driver for UltraSPARC-IIe");
MODULE_LICENSE("GPL");
module_init(us2e_freq_init);
module_exit(us2e_freq_exit);
| gpl-2.0 |
lostemp/port_linux-2.6.30.4 | drivers/net/wireless/b43legacy/xmit.c | 169 | 18549 | /*
Broadcom B43legacy wireless driver
Transmission (TX/RX) related functions.
Copyright (C) 2005 Martin Langer <martin-langer@gmx.de>
Copyright (C) 2005 Stefano Brivio <stefano.brivio@polimi.it>
Copyright (C) 2005, 2006 Michael Buesch <mb@bu3sch.de>
Copyright (C) 2005 Danny van Dyk <kugelfang@gentoo.org>
Copyright (C) 2005 Andreas Jaggi <andreas.jaggi@waterwave.ch>
Copyright (C) 2007 Larry Finger <Larry.Finger@lwfinger.net>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; see the file COPYING. If not, write to
the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
Boston, MA 02110-1301, USA.
*/
#include <net/dst.h>
#include "xmit.h"
#include "phy.h"
#include "dma.h"
#include "pio.h"
/* Extract the bitrate out of a CCK PLCP header. */
static u8 b43legacy_plcp_get_bitrate_idx_cck(struct b43legacy_plcp_hdr6 *plcp)
{
switch (plcp->raw[0]) {
case 0x0A:
return 0;
case 0x14:
return 1;
case 0x37:
return 2;
case 0x6E:
return 3;
}
B43legacy_BUG_ON(1);
return -1;
}
/* Extract the bitrate out of an OFDM PLCP header. */
static u8 b43legacy_plcp_get_bitrate_idx_ofdm(struct b43legacy_plcp_hdr6 *plcp,
bool aphy)
{
int base = aphy ? 0 : 4;
switch (plcp->raw[0] & 0xF) {
case 0xB:
return base + 0;
case 0xF:
return base + 1;
case 0xA:
return base + 2;
case 0xE:
return base + 3;
case 0x9:
return base + 4;
case 0xD:
return base + 5;
case 0x8:
return base + 6;
case 0xC:
return base + 7;
}
B43legacy_BUG_ON(1);
return -1;
}
u8 b43legacy_plcp_get_ratecode_cck(const u8 bitrate)
{
switch (bitrate) {
case B43legacy_CCK_RATE_1MB:
return 0x0A;
case B43legacy_CCK_RATE_2MB:
return 0x14;
case B43legacy_CCK_RATE_5MB:
return 0x37;
case B43legacy_CCK_RATE_11MB:
return 0x6E;
}
B43legacy_BUG_ON(1);
return 0;
}
u8 b43legacy_plcp_get_ratecode_ofdm(const u8 bitrate)
{
switch (bitrate) {
case B43legacy_OFDM_RATE_6MB:
return 0xB;
case B43legacy_OFDM_RATE_9MB:
return 0xF;
case B43legacy_OFDM_RATE_12MB:
return 0xA;
case B43legacy_OFDM_RATE_18MB:
return 0xE;
case B43legacy_OFDM_RATE_24MB:
return 0x9;
case B43legacy_OFDM_RATE_36MB:
return 0xD;
case B43legacy_OFDM_RATE_48MB:
return 0x8;
case B43legacy_OFDM_RATE_54MB:
return 0xC;
}
B43legacy_BUG_ON(1);
return 0;
}
void b43legacy_generate_plcp_hdr(struct b43legacy_plcp_hdr4 *plcp,
const u16 octets, const u8 bitrate)
{
__le32 *data = &(plcp->data);
__u8 *raw = plcp->raw;
if (b43legacy_is_ofdm_rate(bitrate)) {
u16 d;
d = b43legacy_plcp_get_ratecode_ofdm(bitrate);
B43legacy_WARN_ON(octets & 0xF000);
d |= (octets << 5);
*data = cpu_to_le32(d);
} else {
u32 plen;
plen = octets * 16 / bitrate;
if ((octets * 16 % bitrate) > 0) {
plen++;
if ((bitrate == B43legacy_CCK_RATE_11MB)
&& ((octets * 8 % 11) < 4))
raw[1] = 0x84;
else
raw[1] = 0x04;
} else
raw[1] = 0x04;
*data |= cpu_to_le32(plen << 16);
raw[0] = b43legacy_plcp_get_ratecode_cck(bitrate);
}
}
static u8 b43legacy_calc_fallback_rate(u8 bitrate)
{
switch (bitrate) {
case B43legacy_CCK_RATE_1MB:
return B43legacy_CCK_RATE_1MB;
case B43legacy_CCK_RATE_2MB:
return B43legacy_CCK_RATE_1MB;
case B43legacy_CCK_RATE_5MB:
return B43legacy_CCK_RATE_2MB;
case B43legacy_CCK_RATE_11MB:
return B43legacy_CCK_RATE_5MB;
case B43legacy_OFDM_RATE_6MB:
return B43legacy_CCK_RATE_5MB;
case B43legacy_OFDM_RATE_9MB:
return B43legacy_OFDM_RATE_6MB;
case B43legacy_OFDM_RATE_12MB:
return B43legacy_OFDM_RATE_9MB;
case B43legacy_OFDM_RATE_18MB:
return B43legacy_OFDM_RATE_12MB;
case B43legacy_OFDM_RATE_24MB:
return B43legacy_OFDM_RATE_18MB;
case B43legacy_OFDM_RATE_36MB:
return B43legacy_OFDM_RATE_24MB;
case B43legacy_OFDM_RATE_48MB:
return B43legacy_OFDM_RATE_36MB;
case B43legacy_OFDM_RATE_54MB:
return B43legacy_OFDM_RATE_48MB;
}
B43legacy_BUG_ON(1);
return 0;
}
static int generate_txhdr_fw3(struct b43legacy_wldev *dev,
struct b43legacy_txhdr_fw3 *txhdr,
const unsigned char *fragment_data,
unsigned int fragment_len,
struct ieee80211_tx_info *info,
u16 cookie)
{
const struct ieee80211_hdr *wlhdr;
int use_encryption = !!info->control.hw_key;
u8 rate;
struct ieee80211_rate *rate_fb;
int rate_ofdm;
int rate_fb_ofdm;
unsigned int plcp_fragment_len;
u32 mac_ctl = 0;
u16 phy_ctl = 0;
struct ieee80211_rate *tx_rate;
struct ieee80211_tx_rate *rates;
wlhdr = (const struct ieee80211_hdr *)fragment_data;
memset(txhdr, 0, sizeof(*txhdr));
tx_rate = ieee80211_get_tx_rate(dev->wl->hw, info);
rate = tx_rate->hw_value;
rate_ofdm = b43legacy_is_ofdm_rate(rate);
rate_fb = ieee80211_get_alt_retry_rate(dev->wl->hw, info, 0) ? : tx_rate;
rate_fb_ofdm = b43legacy_is_ofdm_rate(rate_fb->hw_value);
txhdr->mac_frame_ctl = wlhdr->frame_control;
memcpy(txhdr->tx_receiver, wlhdr->addr1, 6);
/* Calculate duration for fallback rate */
if ((rate_fb->hw_value == rate) ||
(wlhdr->duration_id & cpu_to_le16(0x8000)) ||
(wlhdr->duration_id == cpu_to_le16(0))) {
/* If the fallback rate equals the normal rate or the
* dur_id field contains an AID, CFP magic or 0,
* use the original dur_id field. */
txhdr->dur_fb = wlhdr->duration_id;
} else {
txhdr->dur_fb = ieee80211_generic_frame_duration(dev->wl->hw,
info->control.vif,
fragment_len,
rate_fb);
}
plcp_fragment_len = fragment_len + FCS_LEN;
if (use_encryption) {
u8 key_idx = info->control.hw_key->hw_key_idx;
struct b43legacy_key *key;
int wlhdr_len;
size_t iv_len;
B43legacy_WARN_ON(key_idx >= dev->max_nr_keys);
key = &(dev->key[key_idx]);
if (key->enabled) {
/* Hardware appends ICV. */
plcp_fragment_len += info->control.hw_key->icv_len;
key_idx = b43legacy_kidx_to_fw(dev, key_idx);
mac_ctl |= (key_idx << B43legacy_TX4_MAC_KEYIDX_SHIFT) &
B43legacy_TX4_MAC_KEYIDX;
mac_ctl |= (key->algorithm <<
B43legacy_TX4_MAC_KEYALG_SHIFT) &
B43legacy_TX4_MAC_KEYALG;
wlhdr_len = ieee80211_hdrlen(wlhdr->frame_control);
iv_len = min((size_t)info->control.hw_key->iv_len,
ARRAY_SIZE(txhdr->iv));
memcpy(txhdr->iv, ((u8 *)wlhdr) + wlhdr_len, iv_len);
} else {
/* This key is invalid. This might only happen
* in a short timeframe after machine resume before
* we were able to reconfigure keys.
* Drop this packet completely. Do not transmit it
* unencrypted to avoid leaking information. */
return -ENOKEY;
}
}
b43legacy_generate_plcp_hdr((struct b43legacy_plcp_hdr4 *)
(&txhdr->plcp), plcp_fragment_len,
rate);
b43legacy_generate_plcp_hdr((struct b43legacy_plcp_hdr4 *)
(&txhdr->plcp_fb), plcp_fragment_len,
rate_fb->hw_value);
/* PHY TX Control word */
if (rate_ofdm)
phy_ctl |= B43legacy_TX4_PHY_OFDM;
if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
phy_ctl |= B43legacy_TX4_PHY_SHORTPRMBL;
switch (info->antenna_sel_tx) {
case 0:
phy_ctl |= B43legacy_TX4_PHY_ANTLAST;
break;
case 1:
phy_ctl |= B43legacy_TX4_PHY_ANT0;
break;
case 2:
phy_ctl |= B43legacy_TX4_PHY_ANT1;
break;
default:
B43legacy_BUG_ON(1);
}
/* MAC control */
rates = info->control.rates;
if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
mac_ctl |= B43legacy_TX4_MAC_ACK;
if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
mac_ctl |= B43legacy_TX4_MAC_HWSEQ;
if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
mac_ctl |= B43legacy_TX4_MAC_STMSDU;
if (rate_fb_ofdm)
mac_ctl |= B43legacy_TX4_MAC_FALLBACKOFDM;
/* Overwrite rates[0].count to make the retry calculation
* in the tx status easier. need the actual retry limit to
* detect whether the fallback rate was used.
*/
if ((rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) ||
(rates[0].count <= dev->wl->hw->conf.long_frame_max_tx_count)) {
rates[0].count = dev->wl->hw->conf.long_frame_max_tx_count;
mac_ctl |= B43legacy_TX4_MAC_LONGFRAME;
} else {
rates[0].count = dev->wl->hw->conf.short_frame_max_tx_count;
}
/* Generate the RTS or CTS-to-self frame */
if ((rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) ||
(rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT)) {
unsigned int len;
struct ieee80211_hdr *hdr;
int rts_rate;
int rts_rate_fb;
int rts_rate_ofdm;
int rts_rate_fb_ofdm;
rts_rate = ieee80211_get_rts_cts_rate(dev->wl->hw, info)->hw_value;
rts_rate_ofdm = b43legacy_is_ofdm_rate(rts_rate);
rts_rate_fb = b43legacy_calc_fallback_rate(rts_rate);
rts_rate_fb_ofdm = b43legacy_is_ofdm_rate(rts_rate_fb);
if (rts_rate_fb_ofdm)
mac_ctl |= B43legacy_TX4_MAC_CTSFALLBACKOFDM;
if (rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
ieee80211_ctstoself_get(dev->wl->hw,
info->control.vif,
fragment_data,
fragment_len, info,
(struct ieee80211_cts *)
(txhdr->rts_frame));
mac_ctl |= B43legacy_TX4_MAC_SENDCTS;
len = sizeof(struct ieee80211_cts);
} else {
ieee80211_rts_get(dev->wl->hw,
info->control.vif,
fragment_data, fragment_len, info,
(struct ieee80211_rts *)
(txhdr->rts_frame));
mac_ctl |= B43legacy_TX4_MAC_SENDRTS;
len = sizeof(struct ieee80211_rts);
}
len += FCS_LEN;
b43legacy_generate_plcp_hdr((struct b43legacy_plcp_hdr4 *)
(&txhdr->rts_plcp),
len, rts_rate);
b43legacy_generate_plcp_hdr((struct b43legacy_plcp_hdr4 *)
(&txhdr->rts_plcp_fb),
len, rts_rate_fb);
hdr = (struct ieee80211_hdr *)(&txhdr->rts_frame);
txhdr->rts_dur_fb = hdr->duration_id;
}
/* Magic cookie */
txhdr->cookie = cpu_to_le16(cookie);
/* Apply the bitfields */
txhdr->mac_ctl = cpu_to_le32(mac_ctl);
txhdr->phy_ctl = cpu_to_le16(phy_ctl);
return 0;
}
int b43legacy_generate_txhdr(struct b43legacy_wldev *dev,
u8 *txhdr,
const unsigned char *fragment_data,
unsigned int fragment_len,
struct ieee80211_tx_info *info,
u16 cookie)
{
return generate_txhdr_fw3(dev, (struct b43legacy_txhdr_fw3 *)txhdr,
fragment_data, fragment_len,
info, cookie);
}
static s8 b43legacy_rssi_postprocess(struct b43legacy_wldev *dev,
u8 in_rssi, int ofdm,
int adjust_2053, int adjust_2050)
{
struct b43legacy_phy *phy = &dev->phy;
s32 tmp;
switch (phy->radio_ver) {
case 0x2050:
if (ofdm) {
tmp = in_rssi;
if (tmp > 127)
tmp -= 256;
tmp *= 73;
tmp /= 64;
if (adjust_2050)
tmp += 25;
else
tmp -= 3;
} else {
if (dev->dev->bus->sprom.boardflags_lo
& B43legacy_BFL_RSSI) {
if (in_rssi > 63)
in_rssi = 63;
tmp = phy->nrssi_lt[in_rssi];
tmp = 31 - tmp;
tmp *= -131;
tmp /= 128;
tmp -= 57;
} else {
tmp = in_rssi;
tmp = 31 - tmp;
tmp *= -149;
tmp /= 128;
tmp -= 68;
}
if (phy->type == B43legacy_PHYTYPE_G &&
adjust_2050)
tmp += 25;
}
break;
case 0x2060:
if (in_rssi > 127)
tmp = in_rssi - 256;
else
tmp = in_rssi;
break;
default:
tmp = in_rssi;
tmp -= 11;
tmp *= 103;
tmp /= 64;
if (adjust_2053)
tmp -= 109;
else
tmp -= 83;
}
return (s8)tmp;
}
void b43legacy_rx(struct b43legacy_wldev *dev,
struct sk_buff *skb,
const void *_rxhdr)
{
struct ieee80211_rx_status status;
struct b43legacy_plcp_hdr6 *plcp;
struct ieee80211_hdr *wlhdr;
const struct b43legacy_rxhdr_fw3 *rxhdr = _rxhdr;
__le16 fctl;
u16 phystat0;
u16 phystat3;
u16 chanstat;
u16 mactime;
u32 macstat;
u16 chanid;
u8 jssi;
int padding;
memset(&status, 0, sizeof(status));
/* Get metadata about the frame from the header. */
phystat0 = le16_to_cpu(rxhdr->phy_status0);
phystat3 = le16_to_cpu(rxhdr->phy_status3);
jssi = rxhdr->jssi;
macstat = le16_to_cpu(rxhdr->mac_status);
mactime = le16_to_cpu(rxhdr->mac_time);
chanstat = le16_to_cpu(rxhdr->channel);
if (macstat & B43legacy_RX_MAC_FCSERR)
dev->wl->ieee_stats.dot11FCSErrorCount++;
/* Skip PLCP and padding */
padding = (macstat & B43legacy_RX_MAC_PADDING) ? 2 : 0;
if (unlikely(skb->len < (sizeof(struct b43legacy_plcp_hdr6) +
padding))) {
b43legacydbg(dev->wl, "RX: Packet size underrun (1)\n");
goto drop;
}
plcp = (struct b43legacy_plcp_hdr6 *)(skb->data + padding);
skb_pull(skb, sizeof(struct b43legacy_plcp_hdr6) + padding);
/* The skb contains the Wireless Header + payload data now */
if (unlikely(skb->len < (2+2+6/*minimum hdr*/ + FCS_LEN))) {
b43legacydbg(dev->wl, "RX: Packet size underrun (2)\n");
goto drop;
}
wlhdr = (struct ieee80211_hdr *)(skb->data);
fctl = wlhdr->frame_control;
if ((macstat & B43legacy_RX_MAC_DEC) &&
!(macstat & B43legacy_RX_MAC_DECERR)) {
unsigned int keyidx;
int wlhdr_len;
int iv_len;
int icv_len;
keyidx = ((macstat & B43legacy_RX_MAC_KEYIDX)
>> B43legacy_RX_MAC_KEYIDX_SHIFT);
/* We must adjust the key index here. We want the "physical"
* key index, but the ucode passed it slightly different.
*/
keyidx = b43legacy_kidx_to_raw(dev, keyidx);
B43legacy_WARN_ON(keyidx >= dev->max_nr_keys);
if (dev->key[keyidx].algorithm != B43legacy_SEC_ALGO_NONE) {
/* Remove PROTECTED flag to mark it as decrypted. */
B43legacy_WARN_ON(!ieee80211_has_protected(fctl));
fctl &= ~cpu_to_le16(IEEE80211_FCTL_PROTECTED);
wlhdr->frame_control = fctl;
wlhdr_len = ieee80211_hdrlen(fctl);
if (unlikely(skb->len < (wlhdr_len + 3))) {
b43legacydbg(dev->wl, "RX: Packet size"
" underrun3\n");
goto drop;
}
if (skb->data[wlhdr_len + 3] & (1 << 5)) {
/* The Ext-IV Bit is set in the "KeyID"
* octet of the IV.
*/
iv_len = 8;
icv_len = 8;
} else {
iv_len = 4;
icv_len = 4;
}
if (unlikely(skb->len < (wlhdr_len + iv_len +
icv_len))) {
b43legacydbg(dev->wl, "RX: Packet size"
" underrun4\n");
goto drop;
}
/* Remove the IV */
memmove(skb->data + iv_len, skb->data, wlhdr_len);
skb_pull(skb, iv_len);
/* Remove the ICV */
skb_trim(skb, skb->len - icv_len);
status.flag |= RX_FLAG_DECRYPTED;
}
}
status.signal = b43legacy_rssi_postprocess(dev, jssi,
(phystat0 & B43legacy_RX_PHYST0_OFDM),
(phystat0 & B43legacy_RX_PHYST0_GAINCTL),
(phystat3 & B43legacy_RX_PHYST3_TRSTATE));
status.noise = dev->stats.link_noise;
status.qual = (jssi * 100) / B43legacy_RX_MAX_SSI;
/* change to support A PHY */
if (phystat0 & B43legacy_RX_PHYST0_OFDM)
status.rate_idx = b43legacy_plcp_get_bitrate_idx_ofdm(plcp, false);
else
status.rate_idx = b43legacy_plcp_get_bitrate_idx_cck(plcp);
status.antenna = !!(phystat0 & B43legacy_RX_PHYST0_ANT);
/*
* All frames on monitor interfaces and beacons always need a full
* 64-bit timestamp. Monitor interfaces need it for diagnostic
* purposes and beacons for IBSS merging.
* This code assumes we get to process the packet within 16 bits
* of timestamp, i.e. about 65 milliseconds after the PHY received
* the first symbol.
*/
if (ieee80211_is_beacon(fctl) || dev->wl->radiotap_enabled) {
u16 low_mactime_now;
b43legacy_tsf_read(dev, &status.mactime);
low_mactime_now = status.mactime;
status.mactime = status.mactime & ~0xFFFFULL;
status.mactime += mactime;
if (low_mactime_now <= mactime)
status.mactime -= 0x10000;
status.flag |= RX_FLAG_TSFT;
}
chanid = (chanstat & B43legacy_RX_CHAN_ID) >>
B43legacy_RX_CHAN_ID_SHIFT;
switch (chanstat & B43legacy_RX_CHAN_PHYTYPE) {
case B43legacy_PHYTYPE_B:
case B43legacy_PHYTYPE_G:
status.band = IEEE80211_BAND_2GHZ;
status.freq = chanid + 2400;
break;
default:
b43legacywarn(dev->wl, "Unexpected value for chanstat (0x%X)\n",
chanstat);
}
dev->stats.last_rx = jiffies;
ieee80211_rx_irqsafe(dev->wl->hw, skb, &status);
return;
drop:
b43legacydbg(dev->wl, "RX: Packet dropped\n");
dev_kfree_skb_any(skb);
}
void b43legacy_handle_txstatus(struct b43legacy_wldev *dev,
const struct b43legacy_txstatus *status)
{
b43legacy_debugfs_log_txstat(dev, status);
if (status->intermediate)
return;
if (status->for_ampdu)
return;
if (!status->acked)
dev->wl->ieee_stats.dot11ACKFailureCount++;
if (status->rts_count) {
if (status->rts_count == 0xF) /* FIXME */
dev->wl->ieee_stats.dot11RTSFailureCount++;
else
dev->wl->ieee_stats.dot11RTSSuccessCount++;
}
if (b43legacy_using_pio(dev))
b43legacy_pio_handle_txstatus(dev, status);
else
b43legacy_dma_handle_txstatus(dev, status);
}
/* Handle TX status report as received through DMA/PIO queues */
void b43legacy_handle_hwtxstatus(struct b43legacy_wldev *dev,
const struct b43legacy_hwtxstatus *hw)
{
struct b43legacy_txstatus status;
u8 tmp;
status.cookie = le16_to_cpu(hw->cookie);
status.seq = le16_to_cpu(hw->seq);
status.phy_stat = hw->phy_stat;
tmp = hw->count;
status.frame_count = (tmp >> 4);
status.rts_count = (tmp & 0x0F);
tmp = hw->flags << 1;
status.supp_reason = ((tmp & 0x1C) >> 2);
status.pm_indicated = !!(tmp & 0x80);
status.intermediate = !!(tmp & 0x40);
status.for_ampdu = !!(tmp & 0x20);
status.acked = !!(tmp & 0x02);
b43legacy_handle_txstatus(dev, &status);
}
/* Stop any TX operation on the device (suspend the hardware queues) */
void b43legacy_tx_suspend(struct b43legacy_wldev *dev)
{
if (b43legacy_using_pio(dev))
b43legacy_pio_freeze_txqueues(dev);
else
b43legacy_dma_tx_suspend(dev);
}
/* Resume any TX operation on the device (resume the hardware queues) */
void b43legacy_tx_resume(struct b43legacy_wldev *dev)
{
if (b43legacy_using_pio(dev))
b43legacy_pio_thaw_txqueues(dev);
else
b43legacy_dma_tx_resume(dev);
}
/* Initialize the QoS parameters */
void b43legacy_qos_init(struct b43legacy_wldev *dev)
{
/* FIXME: This function must probably be called from the mac80211
* config callback. */
return;
b43legacy_hf_write(dev, b43legacy_hf_read(dev) | B43legacy_HF_EDCF);
/* FIXME kill magic */
b43legacy_write16(dev, 0x688,
b43legacy_read16(dev, 0x688) | 0x4);
/*TODO: We might need some stack support here to get the values. */
}
| gpl-2.0 |
zoyi/openwrt-ar71xx | target/linux/ar71xx/files/arch/mips/ath79/mach-mzk-w300nh.c | 681 | 3165 | /*
* Planex MZK-W300NH board support
*
* Copyright (C) 2008-2012 Gabor Juhos <juhosg@openwrt.org>
* Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*/
#include <asm/mach-ath79/ath79.h>
#include "dev-eth.h"
#include "dev-gpio-buttons.h"
#include "dev-leds-gpio.h"
#include "dev-m25p80.h"
#include "dev-wmac.h"
#include "machtypes.h"
#define MZK_W300NH_GPIO_LED_STATUS 1
#define MZK_W300NH_GPIO_LED_WPS 3
#define MZK_W300NH_GPIO_LED_WLAN 6
#define MZK_W300NH_GPIO_LED_AP_GREEN 15
#define MZK_W300NH_GPIO_LED_AP_AMBER 16
#define MZK_W300NH_GPIO_BTN_APROUTER 5
#define MZK_W300NH_GPIO_BTN_WPS 12
#define MZK_W300NH_GPIO_BTN_RESET 21
#define MZK_W300NH_KEYS_POLL_INTERVAL 20 /* msecs */
#define MZK_W300NH_KEYS_DEBOUNCE_INTERVAL (3 * MZK_W300NH_KEYS_POLL_INTERVAL)
static struct gpio_led mzk_w300nh_leds_gpio[] __initdata = {
{
.name = "planex:green:status",
.gpio = MZK_W300NH_GPIO_LED_STATUS,
.active_low = 1,
}, {
.name = "planex:blue:wps",
.gpio = MZK_W300NH_GPIO_LED_WPS,
.active_low = 1,
}, {
.name = "planex:green:wlan",
.gpio = MZK_W300NH_GPIO_LED_WLAN,
.active_low = 1,
}, {
.name = "planex:green:aprouter",
.gpio = MZK_W300NH_GPIO_LED_AP_GREEN,
}, {
.name = "planex:amber:aprouter",
.gpio = MZK_W300NH_GPIO_LED_AP_AMBER,
}
};
static struct gpio_keys_button mzk_w300nh_gpio_keys[] __initdata = {
{
.desc = "reset",
.type = EV_KEY,
.code = KEY_RESTART,
.debounce_interval = MZK_W300NH_KEYS_DEBOUNCE_INTERVAL,
.gpio = MZK_W300NH_GPIO_BTN_RESET,
.active_low = 1,
}, {
.desc = "wps",
.type = EV_KEY,
.code = KEY_WPS_BUTTON,
.debounce_interval = MZK_W300NH_KEYS_DEBOUNCE_INTERVAL,
.gpio = MZK_W300NH_GPIO_BTN_WPS,
.active_low = 1,
}, {
.desc = "aprouter",
.type = EV_KEY,
.code = BTN_2,
.debounce_interval = MZK_W300NH_KEYS_DEBOUNCE_INTERVAL,
.gpio = MZK_W300NH_GPIO_BTN_APROUTER,
.active_low = 0,
}
};
#define MZK_W300NH_WAN_PHYMASK BIT(4)
#define MZK_W300NH_MDIO_MASK (~MZK_W300NH_WAN_PHYMASK)
static void __init mzk_w300nh_setup(void)
{
u8 *eeprom = (u8 *) KSEG1ADDR(0x1fff1000);
ath79_register_mdio(0, MZK_W300NH_MDIO_MASK);
ath79_init_mac(ath79_eth0_data.mac_addr, eeprom, 0);
ath79_eth0_data.phy_if_mode = PHY_INTERFACE_MODE_RMII;
ath79_eth0_data.speed = SPEED_100;
ath79_eth0_data.duplex = DUPLEX_FULL;
ath79_eth0_data.has_ar8216 = 1;
ath79_init_mac(ath79_eth1_data.mac_addr, eeprom, 1);
ath79_eth1_data.phy_if_mode = PHY_INTERFACE_MODE_RMII;
ath79_eth1_data.phy_mask = MZK_W300NH_WAN_PHYMASK;
ath79_register_eth(0);
ath79_register_eth(1);
ath79_register_m25p80(NULL);
ath79_register_leds_gpio(-1, ARRAY_SIZE(mzk_w300nh_leds_gpio),
mzk_w300nh_leds_gpio);
ath79_register_gpio_keys_polled(-1, MZK_W300NH_KEYS_POLL_INTERVAL,
ARRAY_SIZE(mzk_w300nh_gpio_keys),
mzk_w300nh_gpio_keys);
ath79_register_wmac(eeprom, NULL);
}
MIPS_MACHINE(ATH79_MACH_MZK_W300NH, "MZK-W300NH", "Planex MZK-W300NH",
mzk_w300nh_setup);
| gpl-2.0 |
tinixx/linux | drivers/usb/serial/kobil_sct.c | 1193 | 16150 | /*
* KOBIL USB Smart Card Terminal Driver
*
* Copyright (C) 2002 KOBIL Systems GmbH
* Author: Thomas Wahrenbruch
*
* Contact: linuxusb@kobil.de
*
* This program is largely derived from work by the linux-usb group
* and associated source files. Please see the usb/serial files for
* individual credits and copyrights.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* Thanks to Greg Kroah-Hartman (greg@kroah.com) for his help and
* patience.
*
* Supported readers: USB TWIN, KAAN Standard Plus and SecOVID Reader Plus
* (Adapter K), B1 Professional and KAAN Professional (Adapter B)
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/uaccess.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
#include <linux/ioctl.h>
#include "kobil_sct.h"
#define DRIVER_AUTHOR "KOBIL Systems GmbH - http://www.kobil.com"
#define DRIVER_DESC "KOBIL USB Smart Card Terminal Driver (experimental)"
#define KOBIL_VENDOR_ID 0x0D46
#define KOBIL_ADAPTER_B_PRODUCT_ID 0x2011
#define KOBIL_ADAPTER_K_PRODUCT_ID 0x2012
#define KOBIL_USBTWIN_PRODUCT_ID 0x0078
#define KOBIL_KAAN_SIM_PRODUCT_ID 0x0081
#define KOBIL_TIMEOUT 500
#define KOBIL_BUF_LENGTH 300
/* Function prototypes */
static int kobil_port_probe(struct usb_serial_port *probe);
static int kobil_port_remove(struct usb_serial_port *probe);
static int kobil_open(struct tty_struct *tty, struct usb_serial_port *port);
static void kobil_close(struct usb_serial_port *port);
static int kobil_write(struct tty_struct *tty, struct usb_serial_port *port,
const unsigned char *buf, int count);
static int kobil_write_room(struct tty_struct *tty);
static int kobil_ioctl(struct tty_struct *tty,
unsigned int cmd, unsigned long arg);
static int kobil_tiocmget(struct tty_struct *tty);
static int kobil_tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear);
static void kobil_read_int_callback(struct urb *urb);
static void kobil_write_int_callback(struct urb *urb);
static void kobil_set_termios(struct tty_struct *tty,
struct usb_serial_port *port, struct ktermios *old);
static void kobil_init_termios(struct tty_struct *tty);
static const struct usb_device_id id_table[] = {
{ USB_DEVICE(KOBIL_VENDOR_ID, KOBIL_ADAPTER_B_PRODUCT_ID) },
{ USB_DEVICE(KOBIL_VENDOR_ID, KOBIL_ADAPTER_K_PRODUCT_ID) },
{ USB_DEVICE(KOBIL_VENDOR_ID, KOBIL_USBTWIN_PRODUCT_ID) },
{ USB_DEVICE(KOBIL_VENDOR_ID, KOBIL_KAAN_SIM_PRODUCT_ID) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, id_table);
static struct usb_serial_driver kobil_device = {
.driver = {
.owner = THIS_MODULE,
.name = "kobil",
},
.description = "KOBIL USB smart card terminal",
.id_table = id_table,
.num_ports = 1,
.port_probe = kobil_port_probe,
.port_remove = kobil_port_remove,
.ioctl = kobil_ioctl,
.set_termios = kobil_set_termios,
.init_termios = kobil_init_termios,
.tiocmget = kobil_tiocmget,
.tiocmset = kobil_tiocmset,
.open = kobil_open,
.close = kobil_close,
.write = kobil_write,
.write_room = kobil_write_room,
.read_int_callback = kobil_read_int_callback,
.write_int_callback = kobil_write_int_callback,
};
static struct usb_serial_driver * const serial_drivers[] = {
&kobil_device, NULL
};
struct kobil_private {
unsigned char buf[KOBIL_BUF_LENGTH]; /* buffer for the APDU to send */
int filled; /* index of the last char in buf */
int cur_pos; /* index of the next char to send in buf */
__u16 device_type;
};
static int kobil_port_probe(struct usb_serial_port *port)
{
struct usb_serial *serial = port->serial;
struct kobil_private *priv;
priv = kmalloc(sizeof(struct kobil_private), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->filled = 0;
priv->cur_pos = 0;
priv->device_type = le16_to_cpu(serial->dev->descriptor.idProduct);
switch (priv->device_type) {
case KOBIL_ADAPTER_B_PRODUCT_ID:
dev_dbg(&serial->dev->dev, "KOBIL B1 PRO / KAAN PRO detected\n");
break;
case KOBIL_ADAPTER_K_PRODUCT_ID:
dev_dbg(&serial->dev->dev, "KOBIL KAAN Standard Plus / SecOVID Reader Plus detected\n");
break;
case KOBIL_USBTWIN_PRODUCT_ID:
dev_dbg(&serial->dev->dev, "KOBIL USBTWIN detected\n");
break;
case KOBIL_KAAN_SIM_PRODUCT_ID:
dev_dbg(&serial->dev->dev, "KOBIL KAAN SIM detected\n");
break;
}
usb_set_serial_port_data(port, priv);
return 0;
}
static int kobil_port_remove(struct usb_serial_port *port)
{
struct kobil_private *priv;
priv = usb_get_serial_port_data(port);
kfree(priv);
return 0;
}
static void kobil_init_termios(struct tty_struct *tty)
{
/* Default to echo off and other sane device settings */
tty->termios.c_lflag = 0;
tty->termios.c_iflag &= ~(ISIG | ICANON | ECHO | IEXTEN | XCASE);
tty->termios.c_iflag |= IGNBRK | IGNPAR | IXOFF;
/* do NOT translate CR to CR-NL (0x0A -> 0x0A 0x0D) */
tty->termios.c_oflag &= ~ONLCR;
}
static int kobil_open(struct tty_struct *tty, struct usb_serial_port *port)
{
struct device *dev = &port->dev;
int result = 0;
struct kobil_private *priv;
unsigned char *transfer_buffer;
int transfer_buffer_length = 8;
priv = usb_get_serial_port_data(port);
/* allocate memory for transfer buffer */
transfer_buffer = kzalloc(transfer_buffer_length, GFP_KERNEL);
if (!transfer_buffer)
return -ENOMEM;
/* get hardware version */
result = usb_control_msg(port->serial->dev,
usb_rcvctrlpipe(port->serial->dev, 0),
SUSBCRequest_GetMisc,
USB_TYPE_VENDOR | USB_RECIP_ENDPOINT | USB_DIR_IN,
SUSBCR_MSC_GetHWVersion,
0,
transfer_buffer,
transfer_buffer_length,
KOBIL_TIMEOUT
);
dev_dbg(dev, "%s - Send get_HW_version URB returns: %i\n", __func__, result);
dev_dbg(dev, "Hardware version: %i.%i.%i\n", transfer_buffer[0],
transfer_buffer[1], transfer_buffer[2]);
/* get firmware version */
result = usb_control_msg(port->serial->dev,
usb_rcvctrlpipe(port->serial->dev, 0),
SUSBCRequest_GetMisc,
USB_TYPE_VENDOR | USB_RECIP_ENDPOINT | USB_DIR_IN,
SUSBCR_MSC_GetFWVersion,
0,
transfer_buffer,
transfer_buffer_length,
KOBIL_TIMEOUT
);
dev_dbg(dev, "%s - Send get_FW_version URB returns: %i\n", __func__, result);
dev_dbg(dev, "Firmware version: %i.%i.%i\n", transfer_buffer[0],
transfer_buffer[1], transfer_buffer[2]);
if (priv->device_type == KOBIL_ADAPTER_B_PRODUCT_ID ||
priv->device_type == KOBIL_ADAPTER_K_PRODUCT_ID) {
/* Setting Baudrate, Parity and Stopbits */
result = usb_control_msg(port->serial->dev,
usb_sndctrlpipe(port->serial->dev, 0),
SUSBCRequest_SetBaudRateParityAndStopBits,
USB_TYPE_VENDOR | USB_RECIP_ENDPOINT | USB_DIR_OUT,
SUSBCR_SBR_9600 | SUSBCR_SPASB_EvenParity |
SUSBCR_SPASB_1StopBit,
0,
NULL,
0,
KOBIL_TIMEOUT
);
dev_dbg(dev, "%s - Send set_baudrate URB returns: %i\n", __func__, result);
/* reset all queues */
result = usb_control_msg(port->serial->dev,
usb_sndctrlpipe(port->serial->dev, 0),
SUSBCRequest_Misc,
USB_TYPE_VENDOR | USB_RECIP_ENDPOINT | USB_DIR_OUT,
SUSBCR_MSC_ResetAllQueues,
0,
NULL,
0,
KOBIL_TIMEOUT
);
dev_dbg(dev, "%s - Send reset_all_queues URB returns: %i\n", __func__, result);
}
if (priv->device_type == KOBIL_USBTWIN_PRODUCT_ID ||
priv->device_type == KOBIL_ADAPTER_B_PRODUCT_ID ||
priv->device_type == KOBIL_KAAN_SIM_PRODUCT_ID) {
/* start reading (Adapter B 'cause PNP string) */
result = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
dev_dbg(dev, "%s - Send read URB returns: %i\n", __func__, result);
}
kfree(transfer_buffer);
return 0;
}
static void kobil_close(struct usb_serial_port *port)
{
/* FIXME: Add rts/dtr methods */
usb_kill_urb(port->interrupt_out_urb);
usb_kill_urb(port->interrupt_in_urb);
}
static void kobil_read_int_callback(struct urb *urb)
{
int result;
struct usb_serial_port *port = urb->context;
unsigned char *data = urb->transfer_buffer;
int status = urb->status;
if (status) {
dev_dbg(&port->dev, "%s - Read int status not zero: %d\n", __func__, status);
return;
}
if (urb->actual_length) {
usb_serial_debug_data(&port->dev, __func__, urb->actual_length,
data);
tty_insert_flip_string(&port->port, data, urb->actual_length);
tty_flip_buffer_push(&port->port);
}
result = usb_submit_urb(port->interrupt_in_urb, GFP_ATOMIC);
dev_dbg(&port->dev, "%s - Send read URB returns: %i\n", __func__, result);
}
static void kobil_write_int_callback(struct urb *urb)
{
}
static int kobil_write(struct tty_struct *tty, struct usb_serial_port *port,
const unsigned char *buf, int count)
{
int length = 0;
int result = 0;
int todo = 0;
struct kobil_private *priv;
if (count == 0) {
dev_dbg(&port->dev, "%s - write request of 0 bytes\n", __func__);
return 0;
}
priv = usb_get_serial_port_data(port);
if (count > (KOBIL_BUF_LENGTH - priv->filled)) {
dev_dbg(&port->dev, "%s - Error: write request bigger than buffer size\n", __func__);
return -ENOMEM;
}
/* Copy data to buffer */
memcpy(priv->buf + priv->filled, buf, count);
usb_serial_debug_data(&port->dev, __func__, count, priv->buf + priv->filled);
priv->filled = priv->filled + count;
/* only send complete block. TWIN, KAAN SIM and adapter K
use the same protocol. */
if (((priv->device_type != KOBIL_ADAPTER_B_PRODUCT_ID) && (priv->filled > 2) && (priv->filled >= (priv->buf[1] + 3))) ||
((priv->device_type == KOBIL_ADAPTER_B_PRODUCT_ID) && (priv->filled > 3) && (priv->filled >= (priv->buf[2] + 4)))) {
/* stop reading (except TWIN and KAAN SIM) */
if ((priv->device_type == KOBIL_ADAPTER_B_PRODUCT_ID)
|| (priv->device_type == KOBIL_ADAPTER_K_PRODUCT_ID))
usb_kill_urb(port->interrupt_in_urb);
todo = priv->filled - priv->cur_pos;
while (todo > 0) {
/* max 8 byte in one urb (endpoint size) */
length = min(todo, port->interrupt_out_size);
/* copy data to transfer buffer */
memcpy(port->interrupt_out_buffer,
priv->buf + priv->cur_pos, length);
port->interrupt_out_urb->transfer_buffer_length = length;
priv->cur_pos = priv->cur_pos + length;
result = usb_submit_urb(port->interrupt_out_urb,
GFP_ATOMIC);
dev_dbg(&port->dev, "%s - Send write URB returns: %i\n", __func__, result);
todo = priv->filled - priv->cur_pos;
if (todo > 0)
msleep(24);
}
priv->filled = 0;
priv->cur_pos = 0;
/* start reading (except TWIN and KAAN SIM) */
if (priv->device_type == KOBIL_ADAPTER_B_PRODUCT_ID ||
priv->device_type == KOBIL_ADAPTER_K_PRODUCT_ID) {
result = usb_submit_urb(port->interrupt_in_urb,
GFP_ATOMIC);
dev_dbg(&port->dev, "%s - Send read URB returns: %i\n", __func__, result);
}
}
return count;
}
static int kobil_write_room(struct tty_struct *tty)
{
/* FIXME */
return 8;
}
static int kobil_tiocmget(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct kobil_private *priv;
int result;
unsigned char *transfer_buffer;
int transfer_buffer_length = 8;
priv = usb_get_serial_port_data(port);
if (priv->device_type == KOBIL_USBTWIN_PRODUCT_ID
|| priv->device_type == KOBIL_KAAN_SIM_PRODUCT_ID) {
/* This device doesn't support ioctl calls */
return -EINVAL;
}
/* allocate memory for transfer buffer */
transfer_buffer = kzalloc(transfer_buffer_length, GFP_KERNEL);
if (!transfer_buffer)
return -ENOMEM;
result = usb_control_msg(port->serial->dev,
usb_rcvctrlpipe(port->serial->dev, 0),
SUSBCRequest_GetStatusLineState,
USB_TYPE_VENDOR | USB_RECIP_ENDPOINT | USB_DIR_IN,
0,
0,
transfer_buffer,
transfer_buffer_length,
KOBIL_TIMEOUT);
dev_dbg(&port->dev, "%s - Send get_status_line_state URB returns: %i. Statusline: %02x\n",
__func__, result, transfer_buffer[0]);
result = 0;
if ((transfer_buffer[0] & SUSBCR_GSL_DSR) != 0)
result = TIOCM_DSR;
kfree(transfer_buffer);
return result;
}
static int kobil_tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear)
{
struct usb_serial_port *port = tty->driver_data;
struct device *dev = &port->dev;
struct kobil_private *priv;
int result;
int dtr = 0;
int rts = 0;
/* FIXME: locking ? */
priv = usb_get_serial_port_data(port);
if (priv->device_type == KOBIL_USBTWIN_PRODUCT_ID
|| priv->device_type == KOBIL_KAAN_SIM_PRODUCT_ID) {
/* This device doesn't support ioctl calls */
return -EINVAL;
}
if (set & TIOCM_RTS)
rts = 1;
if (set & TIOCM_DTR)
dtr = 1;
if (clear & TIOCM_RTS)
rts = 0;
if (clear & TIOCM_DTR)
dtr = 0;
if (priv->device_type == KOBIL_ADAPTER_B_PRODUCT_ID) {
if (dtr != 0)
dev_dbg(dev, "%s - Setting DTR\n", __func__);
else
dev_dbg(dev, "%s - Clearing DTR\n", __func__);
result = usb_control_msg(port->serial->dev,
usb_sndctrlpipe(port->serial->dev, 0),
SUSBCRequest_SetStatusLinesOrQueues,
USB_TYPE_VENDOR | USB_RECIP_ENDPOINT | USB_DIR_OUT,
((dtr != 0) ? SUSBCR_SSL_SETDTR : SUSBCR_SSL_CLRDTR),
0,
NULL,
0,
KOBIL_TIMEOUT);
} else {
if (rts != 0)
dev_dbg(dev, "%s - Setting RTS\n", __func__);
else
dev_dbg(dev, "%s - Clearing RTS\n", __func__);
result = usb_control_msg(port->serial->dev,
usb_sndctrlpipe(port->serial->dev, 0),
SUSBCRequest_SetStatusLinesOrQueues,
USB_TYPE_VENDOR | USB_RECIP_ENDPOINT | USB_DIR_OUT,
((rts != 0) ? SUSBCR_SSL_SETRTS : SUSBCR_SSL_CLRRTS),
0,
NULL,
0,
KOBIL_TIMEOUT);
}
dev_dbg(dev, "%s - Send set_status_line URB returns: %i\n", __func__, result);
return (result < 0) ? result : 0;
}
static void kobil_set_termios(struct tty_struct *tty,
struct usb_serial_port *port, struct ktermios *old)
{
struct kobil_private *priv;
int result;
unsigned short urb_val = 0;
int c_cflag = tty->termios.c_cflag;
speed_t speed;
priv = usb_get_serial_port_data(port);
if (priv->device_type == KOBIL_USBTWIN_PRODUCT_ID ||
priv->device_type == KOBIL_KAAN_SIM_PRODUCT_ID) {
/* This device doesn't support ioctl calls */
tty_termios_copy_hw(&tty->termios, old);
return;
}
speed = tty_get_baud_rate(tty);
switch (speed) {
case 1200:
urb_val = SUSBCR_SBR_1200;
break;
default:
speed = 9600;
case 9600:
urb_val = SUSBCR_SBR_9600;
break;
}
urb_val |= (c_cflag & CSTOPB) ? SUSBCR_SPASB_2StopBits :
SUSBCR_SPASB_1StopBit;
if (c_cflag & PARENB) {
if (c_cflag & PARODD)
urb_val |= SUSBCR_SPASB_OddParity;
else
urb_val |= SUSBCR_SPASB_EvenParity;
} else
urb_val |= SUSBCR_SPASB_NoParity;
tty->termios.c_cflag &= ~CMSPAR;
tty_encode_baud_rate(tty, speed, speed);
result = usb_control_msg(port->serial->dev,
usb_sndctrlpipe(port->serial->dev, 0),
SUSBCRequest_SetBaudRateParityAndStopBits,
USB_TYPE_VENDOR | USB_RECIP_ENDPOINT | USB_DIR_OUT,
urb_val,
0,
NULL,
0,
KOBIL_TIMEOUT
);
}
static int kobil_ioctl(struct tty_struct *tty,
unsigned int cmd, unsigned long arg)
{
struct usb_serial_port *port = tty->driver_data;
struct kobil_private *priv = usb_get_serial_port_data(port);
int result;
if (priv->device_type == KOBIL_USBTWIN_PRODUCT_ID ||
priv->device_type == KOBIL_KAAN_SIM_PRODUCT_ID)
/* This device doesn't support ioctl calls */
return -ENOIOCTLCMD;
switch (cmd) {
case TCFLSH:
result = usb_control_msg(port->serial->dev,
usb_sndctrlpipe(port->serial->dev, 0),
SUSBCRequest_Misc,
USB_TYPE_VENDOR | USB_RECIP_ENDPOINT | USB_DIR_OUT,
SUSBCR_MSC_ResetAllQueues,
0,
NULL,
0,
KOBIL_TIMEOUT
);
dev_dbg(&port->dev,
"%s - Send reset_all_queues (FLUSH) URB returns: %i\n",
__func__, result);
return (result < 0) ? -EIO: 0;
default:
return -ENOIOCTLCMD;
}
}
module_usb_serial_driver(serial_drivers, id_table);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
| gpl-2.0 |
AndroidDeveloperAlliance/ZenKernel_Crespo | arch/arm/mach-sa1100/assabet.c | 2473 | 11208 | /*
* linux/arch/arm/mach-sa1100/assabet.c
*
* Author: Nicolas Pitre
*
* This file contains all Assabet-specific tweaks.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/ioport.h>
#include <linux/serial_core.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/delay.h>
#include <linux/mm.h>
#include <mach/hardware.h>
#include <asm/mach-types.h>
#include <asm/irq.h>
#include <asm/setup.h>
#include <asm/page.h>
#include <asm/pgtable-hwdef.h>
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/mach/arch.h>
#include <asm/mach/flash.h>
#include <asm/mach/irda.h>
#include <asm/mach/map.h>
#include <asm/mach/serial_sa1100.h>
#include <mach/assabet.h>
#include <mach/mcp.h>
#include "generic.h"
#define ASSABET_BCR_DB1110 \
(ASSABET_BCR_SPK_OFF | ASSABET_BCR_QMUTE | \
ASSABET_BCR_LED_GREEN | ASSABET_BCR_LED_RED | \
ASSABET_BCR_RS232EN | ASSABET_BCR_LCD_12RGB | \
ASSABET_BCR_IRDA_MD0)
#define ASSABET_BCR_DB1111 \
(ASSABET_BCR_SPK_OFF | ASSABET_BCR_QMUTE | \
ASSABET_BCR_LED_GREEN | ASSABET_BCR_LED_RED | \
ASSABET_BCR_RS232EN | ASSABET_BCR_LCD_12RGB | \
ASSABET_BCR_CF_BUS_OFF | ASSABET_BCR_STEREO_LB | \
ASSABET_BCR_IRDA_MD0 | ASSABET_BCR_CF_RST)
unsigned long SCR_value = ASSABET_SCR_INIT;
EXPORT_SYMBOL(SCR_value);
static unsigned long BCR_value = ASSABET_BCR_DB1110;
void ASSABET_BCR_frob(unsigned int mask, unsigned int val)
{
unsigned long flags;
local_irq_save(flags);
BCR_value = (BCR_value & ~mask) | val;
ASSABET_BCR = BCR_value;
local_irq_restore(flags);
}
EXPORT_SYMBOL(ASSABET_BCR_frob);
static void assabet_backlight_power(int on)
{
#ifndef ASSABET_PAL_VIDEO
if (on)
ASSABET_BCR_set(ASSABET_BCR_LIGHT_ON);
else
#endif
ASSABET_BCR_clear(ASSABET_BCR_LIGHT_ON);
}
/*
* Turn on/off the backlight. When turning the backlight on,
* we wait 500us after turning it on so we don't cause the
* supplies to droop when we enable the LCD controller (and
* cause a hard reset.)
*/
static void assabet_lcd_power(int on)
{
#ifndef ASSABET_PAL_VIDEO
if (on) {
ASSABET_BCR_set(ASSABET_BCR_LCD_ON);
udelay(500);
} else
#endif
ASSABET_BCR_clear(ASSABET_BCR_LCD_ON);
}
/*
* Assabet flash support code.
*/
#ifdef ASSABET_REV_4
/*
* Phase 4 Assabet has two 28F160B3 flash parts in bank 0:
*/
static struct mtd_partition assabet_partitions[] = {
{
.name = "bootloader",
.size = 0x00020000,
.offset = 0,
.mask_flags = MTD_WRITEABLE,
}, {
.name = "bootloader params",
.size = 0x00020000,
.offset = MTDPART_OFS_APPEND,
.mask_flags = MTD_WRITEABLE,
}, {
.name = "jffs",
.size = MTDPART_SIZ_FULL,
.offset = MTDPART_OFS_APPEND,
}
};
#else
/*
* Phase 5 Assabet has two 28F128J3A flash parts in bank 0:
*/
static struct mtd_partition assabet_partitions[] = {
{
.name = "bootloader",
.size = 0x00040000,
.offset = 0,
.mask_flags = MTD_WRITEABLE,
}, {
.name = "bootloader params",
.size = 0x00040000,
.offset = MTDPART_OFS_APPEND,
.mask_flags = MTD_WRITEABLE,
}, {
.name = "jffs",
.size = MTDPART_SIZ_FULL,
.offset = MTDPART_OFS_APPEND,
}
};
#endif
static struct flash_platform_data assabet_flash_data = {
.map_name = "cfi_probe",
.parts = assabet_partitions,
.nr_parts = ARRAY_SIZE(assabet_partitions),
};
static struct resource assabet_flash_resources[] = {
{
.start = SA1100_CS0_PHYS,
.end = SA1100_CS0_PHYS + SZ_32M - 1,
.flags = IORESOURCE_MEM,
}, {
.start = SA1100_CS1_PHYS,
.end = SA1100_CS1_PHYS + SZ_32M - 1,
.flags = IORESOURCE_MEM,
}
};
/*
* Assabet IrDA support code.
*/
static int assabet_irda_set_power(struct device *dev, unsigned int state)
{
static unsigned int bcr_state[4] = {
ASSABET_BCR_IRDA_MD0,
ASSABET_BCR_IRDA_MD1|ASSABET_BCR_IRDA_MD0,
ASSABET_BCR_IRDA_MD1,
0
};
if (state < 4) {
state = bcr_state[state];
ASSABET_BCR_clear(state ^ (ASSABET_BCR_IRDA_MD1|
ASSABET_BCR_IRDA_MD0));
ASSABET_BCR_set(state);
}
return 0;
}
static void assabet_irda_set_speed(struct device *dev, unsigned int speed)
{
if (speed < 4000000)
ASSABET_BCR_clear(ASSABET_BCR_IRDA_FSEL);
else
ASSABET_BCR_set(ASSABET_BCR_IRDA_FSEL);
}
static struct irda_platform_data assabet_irda_data = {
.set_power = assabet_irda_set_power,
.set_speed = assabet_irda_set_speed,
};
static struct mcp_plat_data assabet_mcp_data = {
.mccr0 = MCCR0_ADM,
.sclk_rate = 11981000,
};
static void __init assabet_init(void)
{
/*
* Ensure that the power supply is in "high power" mode.
*/
GPDR |= GPIO_GPIO16;
GPSR = GPIO_GPIO16;
/*
* Ensure that these pins are set as outputs and are driving
* logic 0. This ensures that we won't inadvertently toggle
* the WS latch in the CPLD, and we don't float causing
* excessive power drain. --rmk
*/
GPDR |= GPIO_SSP_TXD | GPIO_SSP_SCLK | GPIO_SSP_SFRM;
GPCR = GPIO_SSP_TXD | GPIO_SSP_SCLK | GPIO_SSP_SFRM;
/*
* Set up registers for sleep mode.
*/
PWER = PWER_GPIO0;
PGSR = 0;
PCFR = 0;
PSDR = 0;
PPDR |= PPC_TXD3 | PPC_TXD1;
PPSR |= PPC_TXD3 | PPC_TXD1;
sa1100fb_lcd_power = assabet_lcd_power;
sa1100fb_backlight_power = assabet_backlight_power;
if (machine_has_neponset()) {
/*
* Angel sets this, but other bootloaders may not.
*
* This must precede any driver calls to BCR_set()
* or BCR_clear().
*/
ASSABET_BCR = BCR_value = ASSABET_BCR_DB1111;
#ifndef CONFIG_ASSABET_NEPONSET
printk( "Warning: Neponset detected but full support "
"hasn't been configured in the kernel\n" );
#endif
}
sa11x0_register_mtd(&assabet_flash_data, assabet_flash_resources,
ARRAY_SIZE(assabet_flash_resources));
sa11x0_register_irda(&assabet_irda_data);
sa11x0_register_mcp(&assabet_mcp_data);
}
/*
* On Assabet, we must probe for the Neponset board _before_
* paging_init() has occurred to actually determine the amount
* of RAM available. To do so, we map the appropriate IO section
* in the page table here in order to access GPIO registers.
*/
static void __init map_sa1100_gpio_regs( void )
{
unsigned long phys = __PREG(GPLR) & PMD_MASK;
unsigned long virt = io_p2v(phys);
int prot = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_DOMAIN(DOMAIN_IO);
pmd_t *pmd;
pmd = pmd_offset(pgd_offset_k(virt), virt);
*pmd = __pmd(phys | prot);
flush_pmd_entry(pmd);
}
/*
* Read System Configuration "Register"
* (taken from "Intel StrongARM SA-1110 Microprocessor Development Board
* User's Guide", section 4.4.1)
*
* This same scan is performed in arch/arm/boot/compressed/head-sa1100.S
* to set up the serial port for decompression status messages. We
* repeat it here because the kernel may not be loaded as a zImage, and
* also because it's a hassle to communicate the SCR value to the kernel
* from the decompressor.
*
* Note that IRQs are guaranteed to be disabled.
*/
static void __init get_assabet_scr(void)
{
unsigned long scr, i;
GPDR |= 0x3fc; /* Configure GPIO 9:2 as outputs */
GPSR = 0x3fc; /* Write 0xFF to GPIO 9:2 */
GPDR &= ~(0x3fc); /* Configure GPIO 9:2 as inputs */
for(i = 100; i--; ) /* Read GPIO 9:2 */
scr = GPLR;
GPDR |= 0x3fc; /* restore correct pin direction */
scr &= 0x3fc; /* save as system configuration byte. */
SCR_value = scr;
}
static void __init
fixup_assabet(struct machine_desc *desc, struct tag *tags,
char **cmdline, struct meminfo *mi)
{
/* This must be done before any call to machine_has_neponset() */
map_sa1100_gpio_regs();
get_assabet_scr();
if (machine_has_neponset())
printk("Neponset expansion board detected\n");
}
static void assabet_uart_pm(struct uart_port *port, u_int state, u_int oldstate)
{
if (port->mapbase == _Ser1UTCR0) {
if (state)
ASSABET_BCR_clear(ASSABET_BCR_RS232EN |
ASSABET_BCR_COM_RTS |
ASSABET_BCR_COM_DTR);
else
ASSABET_BCR_set(ASSABET_BCR_RS232EN |
ASSABET_BCR_COM_RTS |
ASSABET_BCR_COM_DTR);
}
}
/*
* Assabet uses COM_RTS and COM_DTR for both UART1 (com port)
* and UART3 (radio module). We only handle them for UART1 here.
*/
static void assabet_set_mctrl(struct uart_port *port, u_int mctrl)
{
if (port->mapbase == _Ser1UTCR0) {
u_int set = 0, clear = 0;
if (mctrl & TIOCM_RTS)
clear |= ASSABET_BCR_COM_RTS;
else
set |= ASSABET_BCR_COM_RTS;
if (mctrl & TIOCM_DTR)
clear |= ASSABET_BCR_COM_DTR;
else
set |= ASSABET_BCR_COM_DTR;
ASSABET_BCR_clear(clear);
ASSABET_BCR_set(set);
}
}
static u_int assabet_get_mctrl(struct uart_port *port)
{
u_int ret = 0;
u_int bsr = ASSABET_BSR;
/* need 2 reads to read current value */
bsr = ASSABET_BSR;
if (port->mapbase == _Ser1UTCR0) {
if (bsr & ASSABET_BSR_COM_DCD)
ret |= TIOCM_CD;
if (bsr & ASSABET_BSR_COM_CTS)
ret |= TIOCM_CTS;
if (bsr & ASSABET_BSR_COM_DSR)
ret |= TIOCM_DSR;
} else if (port->mapbase == _Ser3UTCR0) {
if (bsr & ASSABET_BSR_RAD_DCD)
ret |= TIOCM_CD;
if (bsr & ASSABET_BSR_RAD_CTS)
ret |= TIOCM_CTS;
if (bsr & ASSABET_BSR_RAD_DSR)
ret |= TIOCM_DSR;
if (bsr & ASSABET_BSR_RAD_RI)
ret |= TIOCM_RI;
} else {
ret = TIOCM_CD | TIOCM_CTS | TIOCM_DSR;
}
return ret;
}
static struct sa1100_port_fns assabet_port_fns __initdata = {
.set_mctrl = assabet_set_mctrl,
.get_mctrl = assabet_get_mctrl,
.pm = assabet_uart_pm,
};
static struct map_desc assabet_io_desc[] __initdata = {
{ /* Board Control Register */
.virtual = 0xf1000000,
.pfn = __phys_to_pfn(0x12000000),
.length = 0x00100000,
.type = MT_DEVICE
}, { /* MQ200 */
.virtual = 0xf2800000,
.pfn = __phys_to_pfn(0x4b800000),
.length = 0x00800000,
.type = MT_DEVICE
}
};
static void __init assabet_map_io(void)
{
sa1100_map_io();
iotable_init(assabet_io_desc, ARRAY_SIZE(assabet_io_desc));
/*
* Set SUS bit in SDCR0 so serial port 1 functions.
* Its called GPCLKR0 in my SA1110 manual.
*/
Ser1SDCR0 |= SDCR0_SUS;
if (machine_has_neponset()) {
#ifdef CONFIG_ASSABET_NEPONSET
extern void neponset_map_io(void);
/*
* We map Neponset registers even if it isn't present since
* many drivers will try to probe their stuff (and fail).
* This is still more friendly than a kernel paging request
* crash.
*/
neponset_map_io();
#endif
} else {
sa1100_register_uart_fns(&assabet_port_fns);
}
/*
* When Neponset is attached, the first UART should be
* UART3. That's what Angel is doing and many documents
* are stating this.
*
* We do the Neponset mapping even if Neponset support
* isn't compiled in so the user will still get something on
* the expected physical serial port.
*
* We no longer do this; not all boot loaders support it,
* and UART3 appears to be somewhat unreliable with blob.
*/
sa1100_register_uart(0, 1);
sa1100_register_uart(2, 3);
}
MACHINE_START(ASSABET, "Intel-Assabet")
.boot_params = 0xc0000100,
.fixup = fixup_assabet,
.map_io = assabet_map_io,
.init_irq = sa1100_init_irq,
.timer = &sa1100_timer,
.init_machine = assabet_init,
MACHINE_END
| gpl-2.0 |
zanezam/boeffla-kernel-samsung-s3 | arch/arm/mach-ixp4xx/gtwx5715-setup.c | 2473 | 4619 | /*
* arch/arm/mach-ixp4xx/gtwx5715-setup.c
*
* Gemtek GTWX5715 (Linksys WRV54G) board setup
*
* Copyright (C) 2004 George T. Joseph
* Derived from Coyote
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
*/
#include <linux/init.h>
#include <linux/device.h>
#include <linux/serial.h>
#include <linux/tty.h>
#include <linux/serial_8250.h>
#include <asm/types.h>
#include <asm/setup.h>
#include <asm/memory.h>
#include <mach/hardware.h>
#include <asm/irq.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/mach/flash.h>
/* GPIO 5,6,7 and 12 are hard wired to the Kendin KS8995M Switch
and operate as an SPI type interface. The details of the interface
are available on Kendin/Micrel's web site. */
#define GTWX5715_KSSPI_SELECT 5
#define GTWX5715_KSSPI_TXD 6
#define GTWX5715_KSSPI_CLOCK 7
#define GTWX5715_KSSPI_RXD 12
/* The "reset" button is wired to GPIO 3.
The GPIO is brought "low" when the button is pushed. */
#define GTWX5715_BUTTON_GPIO 3
/* Board Label Front Label
LED1 Power
LED2 Wireless-G
LED3 not populated but could be
LED4 Internet
LED5 - LED8 Controlled by KS8995M Switch
LED9 DMZ */
#define GTWX5715_LED1_GPIO 2
#define GTWX5715_LED2_GPIO 9
#define GTWX5715_LED3_GPIO 8
#define GTWX5715_LED4_GPIO 1
#define GTWX5715_LED9_GPIO 4
/*
* Xscale UART registers are 32 bits wide with only the least
* significant 8 bits having any meaning. From a configuration
* perspective, this means 2 things...
*
* Setting .regshift = 2 so that the standard 16550 registers
* line up on every 4th byte.
*
* Shifting the register start virtual address +3 bytes when
* compiled big-endian. Since register writes are done on a
* single byte basis, if the shift isn't done the driver will
* write the value into the most significant byte of the register,
* which is ignored, instead of the least significant.
*/
#ifdef __ARMEB__
#define REG_OFFSET 3
#else
#define REG_OFFSET 0
#endif
/*
* Only the second or "console" uart is connected on the gtwx5715.
*/
static struct resource gtwx5715_uart_resources[] = {
{
.start = IXP4XX_UART2_BASE_PHYS,
.end = IXP4XX_UART2_BASE_PHYS + 0x0fff,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_IXP4XX_UART2,
.end = IRQ_IXP4XX_UART2,
.flags = IORESOURCE_IRQ,
},
{ },
};
static struct plat_serial8250_port gtwx5715_uart_platform_data[] = {
{
.mapbase = IXP4XX_UART2_BASE_PHYS,
.membase = (char *)IXP4XX_UART2_BASE_VIRT + REG_OFFSET,
.irq = IRQ_IXP4XX_UART2,
.flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST,
.iotype = UPIO_MEM,
.regshift = 2,
.uartclk = IXP4XX_UART_XTAL,
},
{ },
};
static struct platform_device gtwx5715_uart_device = {
.name = "serial8250",
.id = PLAT8250_DEV_PLATFORM,
.dev = {
.platform_data = gtwx5715_uart_platform_data,
},
.num_resources = 2,
.resource = gtwx5715_uart_resources,
};
static struct flash_platform_data gtwx5715_flash_data = {
.map_name = "cfi_probe",
.width = 2,
};
static struct resource gtwx5715_flash_resource = {
.flags = IORESOURCE_MEM,
};
static struct platform_device gtwx5715_flash = {
.name = "IXP4XX-Flash",
.id = 0,
.dev = {
.platform_data = >wx5715_flash_data,
},
.num_resources = 1,
.resource = >wx5715_flash_resource,
};
static struct platform_device *gtwx5715_devices[] __initdata = {
>wx5715_uart_device,
>wx5715_flash,
};
static void __init gtwx5715_init(void)
{
ixp4xx_sys_init();
gtwx5715_flash_resource.start = IXP4XX_EXP_BUS_BASE(0);
gtwx5715_flash_resource.end = IXP4XX_EXP_BUS_BASE(0) + SZ_8M - 1;
platform_add_devices(gtwx5715_devices, ARRAY_SIZE(gtwx5715_devices));
}
MACHINE_START(GTWX5715, "Gemtek GTWX5715 (Linksys WRV54G)")
/* Maintainer: George Joseph */
.map_io = ixp4xx_map_io,
.init_irq = ixp4xx_init_irq,
.timer = &ixp4xx_timer,
.boot_params = 0x0100,
.init_machine = gtwx5715_init,
MACHINE_END
| gpl-2.0 |
raden/blue-kelisa-kernel | drivers/net/wireless/iwlwifi/iwl-agn-rs.c | 2729 | 101226 | /******************************************************************************
*
* Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
*
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
*****************************************************************************/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <net/mac80211.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/delay.h>
#include <linux/workqueue.h>
#include "iwl-dev.h"
#include "iwl-core.h"
#include "iwl-agn.h"
#include "iwl-op-mode.h"
#define RS_NAME "iwl-agn-rs"
#define NUM_TRY_BEFORE_ANT_TOGGLE 1
#define IWL_NUMBER_TRY 1
#define IWL_HT_NUMBER_TRY 3
#define IWL_RATE_MAX_WINDOW 62 /* # tx in history window */
#define IWL_RATE_MIN_FAILURE_TH 6 /* min failures to calc tpt */
#define IWL_RATE_MIN_SUCCESS_TH 8 /* min successes to calc tpt */
/* max allowed rate miss before sync LQ cmd */
#define IWL_MISSED_RATE_MAX 15
/* max time to accum history 2 seconds */
#define IWL_RATE_SCALE_FLUSH_INTVL (3*HZ)
static u8 rs_ht_to_legacy[] = {
IWL_RATE_6M_INDEX, IWL_RATE_6M_INDEX,
IWL_RATE_6M_INDEX, IWL_RATE_6M_INDEX,
IWL_RATE_6M_INDEX,
IWL_RATE_6M_INDEX, IWL_RATE_9M_INDEX,
IWL_RATE_12M_INDEX, IWL_RATE_18M_INDEX,
IWL_RATE_24M_INDEX, IWL_RATE_36M_INDEX,
IWL_RATE_48M_INDEX, IWL_RATE_54M_INDEX
};
static const u8 ant_toggle_lookup[] = {
/*ANT_NONE -> */ ANT_NONE,
/*ANT_A -> */ ANT_B,
/*ANT_B -> */ ANT_C,
/*ANT_AB -> */ ANT_BC,
/*ANT_C -> */ ANT_A,
/*ANT_AC -> */ ANT_AB,
/*ANT_BC -> */ ANT_AC,
/*ANT_ABC -> */ ANT_ABC,
};
#define IWL_DECLARE_RATE_INFO(r, s, ip, in, rp, rn, pp, np) \
[IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \
IWL_RATE_SISO_##s##M_PLCP, \
IWL_RATE_MIMO2_##s##M_PLCP,\
IWL_RATE_MIMO3_##s##M_PLCP,\
IWL_RATE_##r##M_IEEE, \
IWL_RATE_##ip##M_INDEX, \
IWL_RATE_##in##M_INDEX, \
IWL_RATE_##rp##M_INDEX, \
IWL_RATE_##rn##M_INDEX, \
IWL_RATE_##pp##M_INDEX, \
IWL_RATE_##np##M_INDEX }
/*
* Parameter order:
* rate, ht rate, prev rate, next rate, prev tgg rate, next tgg rate
*
* If there isn't a valid next or previous rate then INV is used which
* maps to IWL_RATE_INVALID
*
*/
const struct iwl_rate_info iwl_rates[IWL_RATE_COUNT] = {
IWL_DECLARE_RATE_INFO(1, INV, INV, 2, INV, 2, INV, 2), /* 1mbps */
IWL_DECLARE_RATE_INFO(2, INV, 1, 5, 1, 5, 1, 5), /* 2mbps */
IWL_DECLARE_RATE_INFO(5, INV, 2, 6, 2, 11, 2, 11), /*5.5mbps */
IWL_DECLARE_RATE_INFO(11, INV, 9, 12, 9, 12, 5, 18), /* 11mbps */
IWL_DECLARE_RATE_INFO(6, 6, 5, 9, 5, 11, 5, 11), /* 6mbps */
IWL_DECLARE_RATE_INFO(9, 6, 6, 11, 6, 11, 5, 11), /* 9mbps */
IWL_DECLARE_RATE_INFO(12, 12, 11, 18, 11, 18, 11, 18), /* 12mbps */
IWL_DECLARE_RATE_INFO(18, 18, 12, 24, 12, 24, 11, 24), /* 18mbps */
IWL_DECLARE_RATE_INFO(24, 24, 18, 36, 18, 36, 18, 36), /* 24mbps */
IWL_DECLARE_RATE_INFO(36, 36, 24, 48, 24, 48, 24, 48), /* 36mbps */
IWL_DECLARE_RATE_INFO(48, 48, 36, 54, 36, 54, 36, 54), /* 48mbps */
IWL_DECLARE_RATE_INFO(54, 54, 48, INV, 48, INV, 48, INV),/* 54mbps */
IWL_DECLARE_RATE_INFO(60, 60, 48, INV, 48, INV, 48, INV),/* 60mbps */
/* FIXME:RS: ^^ should be INV (legacy) */
};
static inline u8 rs_extract_rate(u32 rate_n_flags)
{
return (u8)(rate_n_flags & RATE_MCS_RATE_MSK);
}
static int iwl_hwrate_to_plcp_idx(u32 rate_n_flags)
{
int idx = 0;
/* HT rate format */
if (rate_n_flags & RATE_MCS_HT_MSK) {
idx = rs_extract_rate(rate_n_flags);
if (idx >= IWL_RATE_MIMO3_6M_PLCP)
idx = idx - IWL_RATE_MIMO3_6M_PLCP;
else if (idx >= IWL_RATE_MIMO2_6M_PLCP)
idx = idx - IWL_RATE_MIMO2_6M_PLCP;
idx += IWL_FIRST_OFDM_RATE;
/* skip 9M not supported in ht*/
if (idx >= IWL_RATE_9M_INDEX)
idx += 1;
if ((idx >= IWL_FIRST_OFDM_RATE) && (idx <= IWL_LAST_OFDM_RATE))
return idx;
/* legacy rate format, search for match in table */
} else {
for (idx = 0; idx < ARRAY_SIZE(iwl_rates); idx++)
if (iwl_rates[idx].plcp ==
rs_extract_rate(rate_n_flags))
return idx;
}
return -1;
}
static void rs_rate_scale_perform(struct iwl_priv *priv,
struct sk_buff *skb,
struct ieee80211_sta *sta,
struct iwl_lq_sta *lq_sta);
static void rs_fill_link_cmd(struct iwl_priv *priv,
struct iwl_lq_sta *lq_sta, u32 rate_n_flags);
static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search);
#ifdef CONFIG_MAC80211_DEBUGFS
static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
u32 *rate_n_flags, int index);
#else
static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
u32 *rate_n_flags, int index)
{}
#endif
/**
* The following tables contain the expected throughput metrics for all rates
*
* 1, 2, 5.5, 11, 6, 9, 12, 18, 24, 36, 48, 54, 60 MBits
*
* where invalid entries are zeros.
*
* CCK rates are only valid in legacy table and will only be used in G
* (2.4 GHz) band.
*/
static s32 expected_tpt_legacy[IWL_RATE_COUNT] = {
7, 13, 35, 58, 40, 57, 72, 98, 121, 154, 177, 186, 0
};
static s32 expected_tpt_siso20MHz[4][IWL_RATE_COUNT] = {
{0, 0, 0, 0, 42, 0, 76, 102, 124, 159, 183, 193, 202}, /* Norm */
{0, 0, 0, 0, 46, 0, 82, 110, 132, 168, 192, 202, 210}, /* SGI */
{0, 0, 0, 0, 47, 0, 91, 133, 171, 242, 305, 334, 362}, /* AGG */
{0, 0, 0, 0, 52, 0, 101, 145, 187, 264, 330, 361, 390}, /* AGG+SGI */
};
static s32 expected_tpt_siso40MHz[4][IWL_RATE_COUNT] = {
{0, 0, 0, 0, 77, 0, 127, 160, 184, 220, 242, 250, 257}, /* Norm */
{0, 0, 0, 0, 83, 0, 135, 169, 193, 229, 250, 257, 264}, /* SGI */
{0, 0, 0, 0, 94, 0, 177, 249, 313, 423, 512, 550, 586}, /* AGG */
{0, 0, 0, 0, 104, 0, 193, 270, 338, 454, 545, 584, 620}, /* AGG+SGI */
};
static s32 expected_tpt_mimo2_20MHz[4][IWL_RATE_COUNT] = {
{0, 0, 0, 0, 74, 0, 123, 155, 179, 214, 236, 244, 251}, /* Norm */
{0, 0, 0, 0, 81, 0, 131, 164, 188, 223, 243, 251, 257}, /* SGI */
{0, 0, 0, 0, 89, 0, 167, 235, 296, 402, 488, 526, 560}, /* AGG */
{0, 0, 0, 0, 97, 0, 182, 255, 320, 431, 520, 558, 593}, /* AGG+SGI*/
};
static s32 expected_tpt_mimo2_40MHz[4][IWL_RATE_COUNT] = {
{0, 0, 0, 0, 123, 0, 182, 214, 235, 264, 279, 285, 289}, /* Norm */
{0, 0, 0, 0, 131, 0, 191, 222, 242, 270, 284, 289, 293}, /* SGI */
{0, 0, 0, 0, 171, 0, 305, 410, 496, 634, 731, 771, 805}, /* AGG */
{0, 0, 0, 0, 186, 0, 329, 439, 527, 667, 764, 803, 838}, /* AGG+SGI */
};
static s32 expected_tpt_mimo3_20MHz[4][IWL_RATE_COUNT] = {
{0, 0, 0, 0, 99, 0, 153, 186, 208, 239, 256, 263, 268}, /* Norm */
{0, 0, 0, 0, 106, 0, 162, 194, 215, 246, 262, 268, 273}, /* SGI */
{0, 0, 0, 0, 134, 0, 249, 346, 431, 574, 685, 732, 775}, /* AGG */
{0, 0, 0, 0, 148, 0, 272, 376, 465, 614, 727, 775, 818}, /* AGG+SGI */
};
static s32 expected_tpt_mimo3_40MHz[4][IWL_RATE_COUNT] = {
{0, 0, 0, 0, 152, 0, 211, 239, 255, 279, 290, 294, 297}, /* Norm */
{0, 0, 0, 0, 160, 0, 219, 245, 261, 284, 294, 297, 300}, /* SGI */
{0, 0, 0, 0, 254, 0, 443, 584, 695, 868, 984, 1030, 1070}, /* AGG */
{0, 0, 0, 0, 277, 0, 478, 624, 737, 911, 1026, 1070, 1109}, /* AGG+SGI */
};
/* mbps, mcs */
static const struct iwl_rate_mcs_info iwl_rate_mcs[IWL_RATE_COUNT] = {
{ "1", "BPSK DSSS"},
{ "2", "QPSK DSSS"},
{"5.5", "BPSK CCK"},
{ "11", "QPSK CCK"},
{ "6", "BPSK 1/2"},
{ "9", "BPSK 1/2"},
{ "12", "QPSK 1/2"},
{ "18", "QPSK 3/4"},
{ "24", "16QAM 1/2"},
{ "36", "16QAM 3/4"},
{ "48", "64QAM 2/3"},
{ "54", "64QAM 3/4"},
{ "60", "64QAM 5/6"},
};
#define MCS_INDEX_PER_STREAM (8)
static void rs_rate_scale_clear_window(struct iwl_rate_scale_data *window)
{
window->data = 0;
window->success_counter = 0;
window->success_ratio = IWL_INVALID_VALUE;
window->counter = 0;
window->average_tpt = IWL_INVALID_VALUE;
window->stamp = 0;
}
static inline u8 rs_is_valid_ant(u8 valid_antenna, u8 ant_type)
{
return (ant_type & valid_antenna) == ant_type;
}
/*
* removes the old data from the statistics. All data that is older than
* TID_MAX_TIME_DIFF, will be deleted.
*/
static void rs_tl_rm_old_stats(struct iwl_traffic_load *tl, u32 curr_time)
{
/* The oldest age we want to keep */
u32 oldest_time = curr_time - TID_MAX_TIME_DIFF;
while (tl->queue_count &&
(tl->time_stamp < oldest_time)) {
tl->total -= tl->packet_count[tl->head];
tl->packet_count[tl->head] = 0;
tl->time_stamp += TID_QUEUE_CELL_SPACING;
tl->queue_count--;
tl->head++;
if (tl->head >= TID_QUEUE_MAX_SIZE)
tl->head = 0;
}
}
/*
* increment traffic load value for tid and also remove
* any old values if passed the certain time period
*/
static u8 rs_tl_add_packet(struct iwl_lq_sta *lq_data,
struct ieee80211_hdr *hdr)
{
u32 curr_time = jiffies_to_msecs(jiffies);
u32 time_diff;
s32 index;
struct iwl_traffic_load *tl = NULL;
u8 tid;
if (ieee80211_is_data_qos(hdr->frame_control)) {
u8 *qc = ieee80211_get_qos_ctl(hdr);
tid = qc[0] & 0xf;
} else
return IWL_MAX_TID_COUNT;
if (unlikely(tid >= IWL_MAX_TID_COUNT))
return IWL_MAX_TID_COUNT;
tl = &lq_data->load[tid];
curr_time -= curr_time % TID_ROUND_VALUE;
/* Happens only for the first packet. Initialize the data */
if (!(tl->queue_count)) {
tl->total = 1;
tl->time_stamp = curr_time;
tl->queue_count = 1;
tl->head = 0;
tl->packet_count[0] = 1;
return IWL_MAX_TID_COUNT;
}
time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time);
index = time_diff / TID_QUEUE_CELL_SPACING;
/* The history is too long: remove data that is older than */
/* TID_MAX_TIME_DIFF */
if (index >= TID_QUEUE_MAX_SIZE)
rs_tl_rm_old_stats(tl, curr_time);
index = (tl->head + index) % TID_QUEUE_MAX_SIZE;
tl->packet_count[index] = tl->packet_count[index] + 1;
tl->total = tl->total + 1;
if ((index + 1) > tl->queue_count)
tl->queue_count = index + 1;
return tid;
}
#ifdef CONFIG_MAC80211_DEBUGFS
/**
* Program the device to use fixed rate for frame transmit
* This is for debugging/testing only
* once the device start use fixed rate, we need to reload the module
* to being back the normal operation.
*/
static void rs_program_fix_rate(struct iwl_priv *priv,
struct iwl_lq_sta *lq_sta)
{
struct iwl_station_priv *sta_priv =
container_of(lq_sta, struct iwl_station_priv, lq_sta);
struct iwl_rxon_context *ctx = sta_priv->ctx;
lq_sta->active_legacy_rate = 0x0FFF; /* 1 - 54 MBits, includes CCK */
lq_sta->active_siso_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
lq_sta->active_mimo2_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
lq_sta->active_mimo3_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
/* testmode has higher priority to overwirte the fixed rate */
if (priv->tm_fixed_rate)
lq_sta->dbg_fixed_rate = priv->tm_fixed_rate;
#endif
IWL_DEBUG_RATE(priv, "sta_id %d rate 0x%X\n",
lq_sta->lq.sta_id, lq_sta->dbg_fixed_rate);
if (lq_sta->dbg_fixed_rate) {
rs_fill_link_cmd(NULL, lq_sta, lq_sta->dbg_fixed_rate);
iwl_send_lq_cmd(lq_sta->drv, ctx, &lq_sta->lq, CMD_ASYNC,
false);
}
}
#endif
/*
get the traffic load value for tid
*/
static u32 rs_tl_get_load(struct iwl_lq_sta *lq_data, u8 tid)
{
u32 curr_time = jiffies_to_msecs(jiffies);
u32 time_diff;
s32 index;
struct iwl_traffic_load *tl = NULL;
if (tid >= IWL_MAX_TID_COUNT)
return 0;
tl = &(lq_data->load[tid]);
curr_time -= curr_time % TID_ROUND_VALUE;
if (!(tl->queue_count))
return 0;
time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time);
index = time_diff / TID_QUEUE_CELL_SPACING;
/* The history is too long: remove data that is older than */
/* TID_MAX_TIME_DIFF */
if (index >= TID_QUEUE_MAX_SIZE)
rs_tl_rm_old_stats(tl, curr_time);
return tl->total;
}
static int rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv,
struct iwl_lq_sta *lq_data, u8 tid,
struct ieee80211_sta *sta)
{
int ret = -EAGAIN;
u32 load;
/*
* Don't create TX aggregation sessions when in high
* BT traffic, as they would just be disrupted by BT.
*/
if (priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH) {
IWL_ERR(priv, "BT traffic (%d), no aggregation allowed\n",
priv->bt_traffic_load);
return ret;
}
load = rs_tl_get_load(lq_data, tid);
if ((iwlagn_mod_params.auto_agg) || (load > IWL_AGG_LOAD_THRESHOLD)) {
IWL_DEBUG_HT(priv, "Starting Tx agg: STA: %pM tid: %d\n",
sta->addr, tid);
ret = ieee80211_start_tx_ba_session(sta, tid, 5000);
if (ret == -EAGAIN) {
/*
* driver and mac80211 is out of sync
* this might be cause by reloading firmware
* stop the tx ba session here
*/
IWL_ERR(priv, "Fail start Tx agg on tid: %d\n",
tid);
ieee80211_stop_tx_ba_session(sta, tid);
}
} else {
IWL_DEBUG_HT(priv, "Aggregation not enabled for tid %d "
"because load = %u\n", tid, load);
}
return ret;
}
static void rs_tl_turn_on_agg(struct iwl_priv *priv, u8 tid,
struct iwl_lq_sta *lq_data,
struct ieee80211_sta *sta)
{
if (tid < IWL_MAX_TID_COUNT)
rs_tl_turn_on_agg_for_tid(priv, lq_data, tid, sta);
else
IWL_ERR(priv, "tid exceeds max TID count: %d/%d\n",
tid, IWL_MAX_TID_COUNT);
}
static inline int get_num_of_ant_from_rate(u32 rate_n_flags)
{
return !!(rate_n_flags & RATE_MCS_ANT_A_MSK) +
!!(rate_n_flags & RATE_MCS_ANT_B_MSK) +
!!(rate_n_flags & RATE_MCS_ANT_C_MSK);
}
/*
* Static function to get the expected throughput from an iwl_scale_tbl_info
* that wraps a NULL pointer check
*/
static s32 get_expected_tpt(struct iwl_scale_tbl_info *tbl, int rs_index)
{
if (tbl->expected_tpt)
return tbl->expected_tpt[rs_index];
return 0;
}
/**
* rs_collect_tx_data - Update the success/failure sliding window
*
* We keep a sliding window of the last 62 packets transmitted
* at this rate. window->data contains the bitmask of successful
* packets.
*/
static int rs_collect_tx_data(struct iwl_scale_tbl_info *tbl,
int scale_index, int attempts, int successes)
{
struct iwl_rate_scale_data *window = NULL;
static const u64 mask = (((u64)1) << (IWL_RATE_MAX_WINDOW - 1));
s32 fail_count, tpt;
if (scale_index < 0 || scale_index >= IWL_RATE_COUNT)
return -EINVAL;
/* Select window for current tx bit rate */
window = &(tbl->win[scale_index]);
/* Get expected throughput */
tpt = get_expected_tpt(tbl, scale_index);
/*
* Keep track of only the latest 62 tx frame attempts in this rate's
* history window; anything older isn't really relevant any more.
* If we have filled up the sliding window, drop the oldest attempt;
* if the oldest attempt (highest bit in bitmap) shows "success",
* subtract "1" from the success counter (this is the main reason
* we keep these bitmaps!).
*/
while (attempts > 0) {
if (window->counter >= IWL_RATE_MAX_WINDOW) {
/* remove earliest */
window->counter = IWL_RATE_MAX_WINDOW - 1;
if (window->data & mask) {
window->data &= ~mask;
window->success_counter--;
}
}
/* Increment frames-attempted counter */
window->counter++;
/* Shift bitmap by one frame to throw away oldest history */
window->data <<= 1;
/* Mark the most recent #successes attempts as successful */
if (successes > 0) {
window->success_counter++;
window->data |= 0x1;
successes--;
}
attempts--;
}
/* Calculate current success ratio, avoid divide-by-0! */
if (window->counter > 0)
window->success_ratio = 128 * (100 * window->success_counter)
/ window->counter;
else
window->success_ratio = IWL_INVALID_VALUE;
fail_count = window->counter - window->success_counter;
/* Calculate average throughput, if we have enough history. */
if ((fail_count >= IWL_RATE_MIN_FAILURE_TH) ||
(window->success_counter >= IWL_RATE_MIN_SUCCESS_TH))
window->average_tpt = (window->success_ratio * tpt + 64) / 128;
else
window->average_tpt = IWL_INVALID_VALUE;
/* Tag this window as having been updated */
window->stamp = jiffies;
return 0;
}
/*
* Fill uCode API rate_n_flags field, based on "search" or "active" table.
*/
/* FIXME:RS:remove this function and put the flags statically in the table */
static u32 rate_n_flags_from_tbl(struct iwl_priv *priv,
struct iwl_scale_tbl_info *tbl,
int index, u8 use_green)
{
u32 rate_n_flags = 0;
if (is_legacy(tbl->lq_type)) {
rate_n_flags = iwl_rates[index].plcp;
if (index >= IWL_FIRST_CCK_RATE && index <= IWL_LAST_CCK_RATE)
rate_n_flags |= RATE_MCS_CCK_MSK;
} else if (is_Ht(tbl->lq_type)) {
if (index > IWL_LAST_OFDM_RATE) {
IWL_ERR(priv, "Invalid HT rate index %d\n", index);
index = IWL_LAST_OFDM_RATE;
}
rate_n_flags = RATE_MCS_HT_MSK;
if (is_siso(tbl->lq_type))
rate_n_flags |= iwl_rates[index].plcp_siso;
else if (is_mimo2(tbl->lq_type))
rate_n_flags |= iwl_rates[index].plcp_mimo2;
else
rate_n_flags |= iwl_rates[index].plcp_mimo3;
} else {
IWL_ERR(priv, "Invalid tbl->lq_type %d\n", tbl->lq_type);
}
rate_n_flags |= ((tbl->ant_type << RATE_MCS_ANT_POS) &
RATE_MCS_ANT_ABC_MSK);
if (is_Ht(tbl->lq_type)) {
if (tbl->is_ht40) {
if (tbl->is_dup)
rate_n_flags |= RATE_MCS_DUP_MSK;
else
rate_n_flags |= RATE_MCS_HT40_MSK;
}
if (tbl->is_SGI)
rate_n_flags |= RATE_MCS_SGI_MSK;
if (use_green) {
rate_n_flags |= RATE_MCS_GF_MSK;
if (is_siso(tbl->lq_type) && tbl->is_SGI) {
rate_n_flags &= ~RATE_MCS_SGI_MSK;
IWL_ERR(priv, "GF was set with SGI:SISO\n");
}
}
}
return rate_n_flags;
}
/*
* Interpret uCode API's rate_n_flags format,
* fill "search" or "active" tx mode table.
*/
static int rs_get_tbl_info_from_mcs(const u32 rate_n_flags,
enum ieee80211_band band,
struct iwl_scale_tbl_info *tbl,
int *rate_idx)
{
u32 ant_msk = (rate_n_flags & RATE_MCS_ANT_ABC_MSK);
u8 num_of_ant = get_num_of_ant_from_rate(rate_n_flags);
u8 mcs;
memset(tbl, 0, sizeof(struct iwl_scale_tbl_info));
*rate_idx = iwl_hwrate_to_plcp_idx(rate_n_flags);
if (*rate_idx == IWL_RATE_INVALID) {
*rate_idx = -1;
return -EINVAL;
}
tbl->is_SGI = 0; /* default legacy setup */
tbl->is_ht40 = 0;
tbl->is_dup = 0;
tbl->ant_type = (ant_msk >> RATE_MCS_ANT_POS);
tbl->lq_type = LQ_NONE;
tbl->max_search = IWL_MAX_SEARCH;
/* legacy rate format */
if (!(rate_n_flags & RATE_MCS_HT_MSK)) {
if (num_of_ant == 1) {
if (band == IEEE80211_BAND_5GHZ)
tbl->lq_type = LQ_A;
else
tbl->lq_type = LQ_G;
}
/* HT rate format */
} else {
if (rate_n_flags & RATE_MCS_SGI_MSK)
tbl->is_SGI = 1;
if ((rate_n_flags & RATE_MCS_HT40_MSK) ||
(rate_n_flags & RATE_MCS_DUP_MSK))
tbl->is_ht40 = 1;
if (rate_n_flags & RATE_MCS_DUP_MSK)
tbl->is_dup = 1;
mcs = rs_extract_rate(rate_n_flags);
/* SISO */
if (mcs <= IWL_RATE_SISO_60M_PLCP) {
if (num_of_ant == 1)
tbl->lq_type = LQ_SISO; /*else NONE*/
/* MIMO2 */
} else if (mcs <= IWL_RATE_MIMO2_60M_PLCP) {
if (num_of_ant == 2)
tbl->lq_type = LQ_MIMO2;
/* MIMO3 */
} else {
if (num_of_ant == 3) {
tbl->max_search = IWL_MAX_11N_MIMO3_SEARCH;
tbl->lq_type = LQ_MIMO3;
}
}
}
return 0;
}
/* switch to another antenna/antennas and return 1 */
/* if no other valid antenna found, return 0 */
static int rs_toggle_antenna(u32 valid_ant, u32 *rate_n_flags,
struct iwl_scale_tbl_info *tbl)
{
u8 new_ant_type;
if (!tbl->ant_type || tbl->ant_type > ANT_ABC)
return 0;
if (!rs_is_valid_ant(valid_ant, tbl->ant_type))
return 0;
new_ant_type = ant_toggle_lookup[tbl->ant_type];
while ((new_ant_type != tbl->ant_type) &&
!rs_is_valid_ant(valid_ant, new_ant_type))
new_ant_type = ant_toggle_lookup[new_ant_type];
if (new_ant_type == tbl->ant_type)
return 0;
tbl->ant_type = new_ant_type;
*rate_n_flags &= ~RATE_MCS_ANT_ABC_MSK;
*rate_n_flags |= new_ant_type << RATE_MCS_ANT_POS;
return 1;
}
/**
* Green-field mode is valid if the station supports it and
* there are no non-GF stations present in the BSS.
*/
static bool rs_use_green(struct ieee80211_sta *sta)
{
struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
struct iwl_rxon_context *ctx = sta_priv->ctx;
return (sta->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD) &&
!(ctx->ht.non_gf_sta_present);
}
/**
* rs_get_supported_rates - get the available rates
*
* if management frame or broadcast frame only return
* basic available rates.
*
*/
static u16 rs_get_supported_rates(struct iwl_lq_sta *lq_sta,
struct ieee80211_hdr *hdr,
enum iwl_table_type rate_type)
{
if (is_legacy(rate_type)) {
return lq_sta->active_legacy_rate;
} else {
if (is_siso(rate_type))
return lq_sta->active_siso_rate;
else if (is_mimo2(rate_type))
return lq_sta->active_mimo2_rate;
else
return lq_sta->active_mimo3_rate;
}
}
static u16 rs_get_adjacent_rate(struct iwl_priv *priv, u8 index, u16 rate_mask,
int rate_type)
{
u8 high = IWL_RATE_INVALID;
u8 low = IWL_RATE_INVALID;
/* 802.11A or ht walks to the next literal adjacent rate in
* the rate table */
if (is_a_band(rate_type) || !is_legacy(rate_type)) {
int i;
u32 mask;
/* Find the previous rate that is in the rate mask */
i = index - 1;
for (mask = (1 << i); i >= 0; i--, mask >>= 1) {
if (rate_mask & mask) {
low = i;
break;
}
}
/* Find the next rate that is in the rate mask */
i = index + 1;
for (mask = (1 << i); i < IWL_RATE_COUNT; i++, mask <<= 1) {
if (rate_mask & mask) {
high = i;
break;
}
}
return (high << 8) | low;
}
low = index;
while (low != IWL_RATE_INVALID) {
low = iwl_rates[low].prev_rs;
if (low == IWL_RATE_INVALID)
break;
if (rate_mask & (1 << low))
break;
IWL_DEBUG_RATE(priv, "Skipping masked lower rate: %d\n", low);
}
high = index;
while (high != IWL_RATE_INVALID) {
high = iwl_rates[high].next_rs;
if (high == IWL_RATE_INVALID)
break;
if (rate_mask & (1 << high))
break;
IWL_DEBUG_RATE(priv, "Skipping masked higher rate: %d\n", high);
}
return (high << 8) | low;
}
static u32 rs_get_lower_rate(struct iwl_lq_sta *lq_sta,
struct iwl_scale_tbl_info *tbl,
u8 scale_index, u8 ht_possible)
{
s32 low;
u16 rate_mask;
u16 high_low;
u8 switch_to_legacy = 0;
u8 is_green = lq_sta->is_green;
struct iwl_priv *priv = lq_sta->drv;
/* check if we need to switch from HT to legacy rates.
* assumption is that mandatory rates (1Mbps or 6Mbps)
* are always supported (spec demand) */
if (!is_legacy(tbl->lq_type) && (!ht_possible || !scale_index)) {
switch_to_legacy = 1;
scale_index = rs_ht_to_legacy[scale_index];
if (lq_sta->band == IEEE80211_BAND_5GHZ)
tbl->lq_type = LQ_A;
else
tbl->lq_type = LQ_G;
if (num_of_ant(tbl->ant_type) > 1)
tbl->ant_type =
first_antenna(hw_params(priv).valid_tx_ant);
tbl->is_ht40 = 0;
tbl->is_SGI = 0;
tbl->max_search = IWL_MAX_SEARCH;
}
rate_mask = rs_get_supported_rates(lq_sta, NULL, tbl->lq_type);
/* Mask with station rate restriction */
if (is_legacy(tbl->lq_type)) {
/* supp_rates has no CCK bits in A mode */
if (lq_sta->band == IEEE80211_BAND_5GHZ)
rate_mask = (u16)(rate_mask &
(lq_sta->supp_rates << IWL_FIRST_OFDM_RATE));
else
rate_mask = (u16)(rate_mask & lq_sta->supp_rates);
}
/* If we switched from HT to legacy, check current rate */
if (switch_to_legacy && (rate_mask & (1 << scale_index))) {
low = scale_index;
goto out;
}
high_low = rs_get_adjacent_rate(lq_sta->drv, scale_index, rate_mask,
tbl->lq_type);
low = high_low & 0xff;
if (low == IWL_RATE_INVALID)
low = scale_index;
out:
return rate_n_flags_from_tbl(lq_sta->drv, tbl, low, is_green);
}
/*
* Simple function to compare two rate scale table types
*/
static bool table_type_matches(struct iwl_scale_tbl_info *a,
struct iwl_scale_tbl_info *b)
{
return (a->lq_type == b->lq_type) && (a->ant_type == b->ant_type) &&
(a->is_SGI == b->is_SGI);
}
static void rs_bt_update_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
struct iwl_lq_sta *lq_sta)
{
struct iwl_scale_tbl_info *tbl;
bool full_concurrent = priv->bt_full_concurrent;
if (priv->bt_ant_couple_ok) {
/*
* Is there a need to switch between
* full concurrency and 3-wire?
*/
if (priv->bt_ci_compliance && priv->bt_ant_couple_ok)
full_concurrent = true;
else
full_concurrent = false;
}
if ((priv->bt_traffic_load != priv->last_bt_traffic_load) ||
(priv->bt_full_concurrent != full_concurrent)) {
priv->bt_full_concurrent = full_concurrent;
/* Update uCode's rate table. */
tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
rs_fill_link_cmd(priv, lq_sta, tbl->current_rate);
iwl_send_lq_cmd(priv, ctx, &lq_sta->lq, CMD_ASYNC, false);
queue_work(priv->workqueue, &priv->bt_full_concurrency);
}
}
/*
* mac80211 sends us Tx status
*/
static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
struct ieee80211_sta *sta, void *priv_sta,
struct sk_buff *skb)
{
int legacy_success;
int retries;
int rs_index, mac_index, i;
struct iwl_lq_sta *lq_sta = priv_sta;
struct iwl_link_quality_cmd *table;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct iwl_op_mode *op_mode = (struct iwl_op_mode *)priv_r;
struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
enum mac80211_rate_control_flags mac_flags;
u32 tx_rate;
struct iwl_scale_tbl_info tbl_type;
struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl;
struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
struct iwl_rxon_context *ctx = sta_priv->ctx;
IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n");
/* Treat uninitialized rate scaling data same as non-existing. */
if (!lq_sta) {
IWL_DEBUG_RATE(priv, "Station rate scaling not created yet.\n");
return;
} else if (!lq_sta->drv) {
IWL_DEBUG_RATE(priv, "Rate scaling not initialized yet.\n");
return;
}
if (!ieee80211_is_data(hdr->frame_control) ||
info->flags & IEEE80211_TX_CTL_NO_ACK)
return;
/* This packet was aggregated but doesn't carry status info */
if ((info->flags & IEEE80211_TX_CTL_AMPDU) &&
!(info->flags & IEEE80211_TX_STAT_AMPDU))
return;
/*
* Ignore this Tx frame response if its initial rate doesn't match
* that of latest Link Quality command. There may be stragglers
* from a previous Link Quality command, but we're no longer interested
* in those; they're either from the "active" mode while we're trying
* to check "search" mode, or a prior "search" mode after we've moved
* to a new "search" mode (which might become the new "active" mode).
*/
table = &lq_sta->lq;
tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags);
rs_get_tbl_info_from_mcs(tx_rate, priv->band, &tbl_type, &rs_index);
if (priv->band == IEEE80211_BAND_5GHZ)
rs_index -= IWL_FIRST_OFDM_RATE;
mac_flags = info->status.rates[0].flags;
mac_index = info->status.rates[0].idx;
/* For HT packets, map MCS to PLCP */
if (mac_flags & IEEE80211_TX_RC_MCS) {
mac_index &= RATE_MCS_CODE_MSK; /* Remove # of streams */
if (mac_index >= (IWL_RATE_9M_INDEX - IWL_FIRST_OFDM_RATE))
mac_index++;
/*
* mac80211 HT index is always zero-indexed; we need to move
* HT OFDM rates after CCK rates in 2.4 GHz band
*/
if (priv->band == IEEE80211_BAND_2GHZ)
mac_index += IWL_FIRST_OFDM_RATE;
}
/* Here we actually compare this rate to the latest LQ command */
if ((mac_index < 0) ||
(tbl_type.is_SGI != !!(mac_flags & IEEE80211_TX_RC_SHORT_GI)) ||
(tbl_type.is_ht40 != !!(mac_flags & IEEE80211_TX_RC_40_MHZ_WIDTH)) ||
(tbl_type.is_dup != !!(mac_flags & IEEE80211_TX_RC_DUP_DATA)) ||
(tbl_type.ant_type != info->antenna_sel_tx) ||
(!!(tx_rate & RATE_MCS_HT_MSK) != !!(mac_flags & IEEE80211_TX_RC_MCS)) ||
(!!(tx_rate & RATE_MCS_GF_MSK) != !!(mac_flags & IEEE80211_TX_RC_GREEN_FIELD)) ||
(rs_index != mac_index)) {
IWL_DEBUG_RATE(priv, "initial rate %d does not match %d (0x%x)\n", mac_index, rs_index, tx_rate);
/*
* Since rates mis-match, the last LQ command may have failed.
* After IWL_MISSED_RATE_MAX mis-matches, resync the uCode with
* ... driver.
*/
lq_sta->missed_rate_counter++;
if (lq_sta->missed_rate_counter > IWL_MISSED_RATE_MAX) {
lq_sta->missed_rate_counter = 0;
iwl_send_lq_cmd(priv, ctx, &lq_sta->lq, CMD_ASYNC, false);
}
/* Regardless, ignore this status info for outdated rate */
return;
} else
/* Rate did match, so reset the missed_rate_counter */
lq_sta->missed_rate_counter = 0;
/* Figure out if rate scale algorithm is in active or search table */
if (table_type_matches(&tbl_type,
&(lq_sta->lq_info[lq_sta->active_tbl]))) {
curr_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
other_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
} else if (table_type_matches(&tbl_type,
&lq_sta->lq_info[1 - lq_sta->active_tbl])) {
curr_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
other_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
} else {
IWL_DEBUG_RATE(priv, "Neither active nor search matches tx rate\n");
tmp_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
IWL_DEBUG_RATE(priv, "active- lq:%x, ant:%x, SGI:%d\n",
tmp_tbl->lq_type, tmp_tbl->ant_type, tmp_tbl->is_SGI);
tmp_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
IWL_DEBUG_RATE(priv, "search- lq:%x, ant:%x, SGI:%d\n",
tmp_tbl->lq_type, tmp_tbl->ant_type, tmp_tbl->is_SGI);
IWL_DEBUG_RATE(priv, "actual- lq:%x, ant:%x, SGI:%d\n",
tbl_type.lq_type, tbl_type.ant_type, tbl_type.is_SGI);
/*
* no matching table found, let's by-pass the data collection
* and continue to perform rate scale to find the rate table
*/
rs_stay_in_table(lq_sta, true);
goto done;
}
/*
* Updating the frame history depends on whether packets were
* aggregated.
*
* For aggregation, all packets were transmitted at the same rate, the
* first index into rate scale table.
*/
if (info->flags & IEEE80211_TX_STAT_AMPDU) {
tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags);
rs_get_tbl_info_from_mcs(tx_rate, priv->band, &tbl_type,
&rs_index);
rs_collect_tx_data(curr_tbl, rs_index,
info->status.ampdu_len,
info->status.ampdu_ack_len);
/* Update success/fail counts if not searching for new mode */
if (lq_sta->stay_in_tbl) {
lq_sta->total_success += info->status.ampdu_ack_len;
lq_sta->total_failed += (info->status.ampdu_len -
info->status.ampdu_ack_len);
}
} else {
/*
* For legacy, update frame history with for each Tx retry.
*/
retries = info->status.rates[0].count - 1;
/* HW doesn't send more than 15 retries */
retries = min(retries, 15);
/* The last transmission may have been successful */
legacy_success = !!(info->flags & IEEE80211_TX_STAT_ACK);
/* Collect data for each rate used during failed TX attempts */
for (i = 0; i <= retries; ++i) {
tx_rate = le32_to_cpu(table->rs_table[i].rate_n_flags);
rs_get_tbl_info_from_mcs(tx_rate, priv->band,
&tbl_type, &rs_index);
/*
* Only collect stats if retried rate is in the same RS
* table as active/search.
*/
if (table_type_matches(&tbl_type, curr_tbl))
tmp_tbl = curr_tbl;
else if (table_type_matches(&tbl_type, other_tbl))
tmp_tbl = other_tbl;
else
continue;
rs_collect_tx_data(tmp_tbl, rs_index, 1,
i < retries ? 0 : legacy_success);
}
/* Update success/fail counts if not searching for new mode */
if (lq_sta->stay_in_tbl) {
lq_sta->total_success += legacy_success;
lq_sta->total_failed += retries + (1 - legacy_success);
}
}
/* The last TX rate is cached in lq_sta; it's set in if/else above */
lq_sta->last_rate_n_flags = tx_rate;
done:
/* See if there's a better rate or modulation mode to try. */
if (sta && sta->supp_rates[sband->band])
rs_rate_scale_perform(priv, skb, sta, lq_sta);
#if defined(CONFIG_MAC80211_DEBUGFS) && defined(CONFIG_IWLWIFI_DEVICE_TESTMODE)
if ((priv->tm_fixed_rate) &&
(priv->tm_fixed_rate != lq_sta->dbg_fixed_rate))
rs_program_fix_rate(priv, lq_sta);
#endif
if (cfg(priv)->bt_params && cfg(priv)->bt_params->advanced_bt_coexist)
rs_bt_update_lq(priv, ctx, lq_sta);
}
/*
* Begin a period of staying with a selected modulation mode.
* Set "stay_in_tbl" flag to prevent any mode switches.
* Set frame tx success limits according to legacy vs. high-throughput,
* and reset overall (spanning all rates) tx success history statistics.
* These control how long we stay using same modulation mode before
* searching for a new mode.
*/
static void rs_set_stay_in_table(struct iwl_priv *priv, u8 is_legacy,
struct iwl_lq_sta *lq_sta)
{
IWL_DEBUG_RATE(priv, "we are staying in the same table\n");
lq_sta->stay_in_tbl = 1; /* only place this gets set */
if (is_legacy) {
lq_sta->table_count_limit = IWL_LEGACY_TABLE_COUNT;
lq_sta->max_failure_limit = IWL_LEGACY_FAILURE_LIMIT;
lq_sta->max_success_limit = IWL_LEGACY_SUCCESS_LIMIT;
} else {
lq_sta->table_count_limit = IWL_NONE_LEGACY_TABLE_COUNT;
lq_sta->max_failure_limit = IWL_NONE_LEGACY_FAILURE_LIMIT;
lq_sta->max_success_limit = IWL_NONE_LEGACY_SUCCESS_LIMIT;
}
lq_sta->table_count = 0;
lq_sta->total_failed = 0;
lq_sta->total_success = 0;
lq_sta->flush_timer = jiffies;
lq_sta->action_counter = 0;
}
/*
* Find correct throughput table for given mode of modulation
*/
static void rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta,
struct iwl_scale_tbl_info *tbl)
{
/* Used to choose among HT tables */
s32 (*ht_tbl_pointer)[IWL_RATE_COUNT];
/* Check for invalid LQ type */
if (WARN_ON_ONCE(!is_legacy(tbl->lq_type) && !is_Ht(tbl->lq_type))) {
tbl->expected_tpt = expected_tpt_legacy;
return;
}
/* Legacy rates have only one table */
if (is_legacy(tbl->lq_type)) {
tbl->expected_tpt = expected_tpt_legacy;
return;
}
/* Choose among many HT tables depending on number of streams
* (SISO/MIMO2/MIMO3), channel width (20/40), SGI, and aggregation
* status */
if (is_siso(tbl->lq_type) && (!tbl->is_ht40 || lq_sta->is_dup))
ht_tbl_pointer = expected_tpt_siso20MHz;
else if (is_siso(tbl->lq_type))
ht_tbl_pointer = expected_tpt_siso40MHz;
else if (is_mimo2(tbl->lq_type) && (!tbl->is_ht40 || lq_sta->is_dup))
ht_tbl_pointer = expected_tpt_mimo2_20MHz;
else if (is_mimo2(tbl->lq_type))
ht_tbl_pointer = expected_tpt_mimo2_40MHz;
else if (is_mimo3(tbl->lq_type) && (!tbl->is_ht40 || lq_sta->is_dup))
ht_tbl_pointer = expected_tpt_mimo3_20MHz;
else /* if (is_mimo3(tbl->lq_type)) <-- must be true */
ht_tbl_pointer = expected_tpt_mimo3_40MHz;
if (!tbl->is_SGI && !lq_sta->is_agg) /* Normal */
tbl->expected_tpt = ht_tbl_pointer[0];
else if (tbl->is_SGI && !lq_sta->is_agg) /* SGI */
tbl->expected_tpt = ht_tbl_pointer[1];
else if (!tbl->is_SGI && lq_sta->is_agg) /* AGG */
tbl->expected_tpt = ht_tbl_pointer[2];
else /* AGG+SGI */
tbl->expected_tpt = ht_tbl_pointer[3];
}
/*
* Find starting rate for new "search" high-throughput mode of modulation.
* Goal is to find lowest expected rate (under perfect conditions) that is
* above the current measured throughput of "active" mode, to give new mode
* a fair chance to prove itself without too many challenges.
*
* This gets called when transitioning to more aggressive modulation
* (i.e. legacy to SISO or MIMO, or SISO to MIMO), as well as less aggressive
* (i.e. MIMO to SISO). When moving to MIMO, bit rate will typically need
* to decrease to match "active" throughput. When moving from MIMO to SISO,
* bit rate will typically need to increase, but not if performance was bad.
*/
static s32 rs_get_best_rate(struct iwl_priv *priv,
struct iwl_lq_sta *lq_sta,
struct iwl_scale_tbl_info *tbl, /* "search" */
u16 rate_mask, s8 index)
{
/* "active" values */
struct iwl_scale_tbl_info *active_tbl =
&(lq_sta->lq_info[lq_sta->active_tbl]);
s32 active_sr = active_tbl->win[index].success_ratio;
s32 active_tpt = active_tbl->expected_tpt[index];
/* expected "search" throughput */
s32 *tpt_tbl = tbl->expected_tpt;
s32 new_rate, high, low, start_hi;
u16 high_low;
s8 rate = index;
new_rate = high = low = start_hi = IWL_RATE_INVALID;
for (; ;) {
high_low = rs_get_adjacent_rate(priv, rate, rate_mask,
tbl->lq_type);
low = high_low & 0xff;
high = (high_low >> 8) & 0xff;
/*
* Lower the "search" bit rate, to give new "search" mode
* approximately the same throughput as "active" if:
*
* 1) "Active" mode has been working modestly well (but not
* great), and expected "search" throughput (under perfect
* conditions) at candidate rate is above the actual
* measured "active" throughput (but less than expected
* "active" throughput under perfect conditions).
* OR
* 2) "Active" mode has been working perfectly or very well
* and expected "search" throughput (under perfect
* conditions) at candidate rate is above expected
* "active" throughput (under perfect conditions).
*/
if ((((100 * tpt_tbl[rate]) > lq_sta->last_tpt) &&
((active_sr > IWL_RATE_DECREASE_TH) &&
(active_sr <= IWL_RATE_HIGH_TH) &&
(tpt_tbl[rate] <= active_tpt))) ||
((active_sr >= IWL_RATE_SCALE_SWITCH) &&
(tpt_tbl[rate] > active_tpt))) {
/* (2nd or later pass)
* If we've already tried to raise the rate, and are
* now trying to lower it, use the higher rate. */
if (start_hi != IWL_RATE_INVALID) {
new_rate = start_hi;
break;
}
new_rate = rate;
/* Loop again with lower rate */
if (low != IWL_RATE_INVALID)
rate = low;
/* Lower rate not available, use the original */
else
break;
/* Else try to raise the "search" rate to match "active" */
} else {
/* (2nd or later pass)
* If we've already tried to lower the rate, and are
* now trying to raise it, use the lower rate. */
if (new_rate != IWL_RATE_INVALID)
break;
/* Loop again with higher rate */
else if (high != IWL_RATE_INVALID) {
start_hi = high;
rate = high;
/* Higher rate not available, use the original */
} else {
new_rate = rate;
break;
}
}
}
return new_rate;
}
/*
* Set up search table for MIMO2
*/
static int rs_switch_to_mimo2(struct iwl_priv *priv,
struct iwl_lq_sta *lq_sta,
struct ieee80211_conf *conf,
struct ieee80211_sta *sta,
struct iwl_scale_tbl_info *tbl, int index)
{
u16 rate_mask;
s32 rate;
s8 is_green = lq_sta->is_green;
struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
struct iwl_rxon_context *ctx = sta_priv->ctx;
if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
return -1;
if (((sta->ht_cap.cap & IEEE80211_HT_CAP_SM_PS) >> 2)
== WLAN_HT_CAP_SM_PS_STATIC)
return -1;
/* Need both Tx chains/antennas to support MIMO */
if (hw_params(priv).tx_chains_num < 2)
return -1;
IWL_DEBUG_RATE(priv, "LQ: try to switch to MIMO2\n");
tbl->lq_type = LQ_MIMO2;
tbl->is_dup = lq_sta->is_dup;
tbl->action = 0;
tbl->max_search = IWL_MAX_SEARCH;
rate_mask = lq_sta->active_mimo2_rate;
if (iwl_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap))
tbl->is_ht40 = 1;
else
tbl->is_ht40 = 0;
rs_set_expected_tpt_table(lq_sta, tbl);
rate = rs_get_best_rate(priv, lq_sta, tbl, rate_mask, index);
IWL_DEBUG_RATE(priv, "LQ: MIMO2 best rate %d mask %X\n", rate, rate_mask);
if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) {
IWL_DEBUG_RATE(priv, "Can't switch with index %d rate mask %x\n",
rate, rate_mask);
return -1;
}
tbl->current_rate = rate_n_flags_from_tbl(priv, tbl, rate, is_green);
IWL_DEBUG_RATE(priv, "LQ: Switch to new mcs %X index is green %X\n",
tbl->current_rate, is_green);
return 0;
}
/*
* Set up search table for MIMO3
*/
static int rs_switch_to_mimo3(struct iwl_priv *priv,
struct iwl_lq_sta *lq_sta,
struct ieee80211_conf *conf,
struct ieee80211_sta *sta,
struct iwl_scale_tbl_info *tbl, int index)
{
u16 rate_mask;
s32 rate;
s8 is_green = lq_sta->is_green;
struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
struct iwl_rxon_context *ctx = sta_priv->ctx;
if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
return -1;
if (((sta->ht_cap.cap & IEEE80211_HT_CAP_SM_PS) >> 2)
== WLAN_HT_CAP_SM_PS_STATIC)
return -1;
/* Need both Tx chains/antennas to support MIMO */
if (hw_params(priv).tx_chains_num < 3)
return -1;
IWL_DEBUG_RATE(priv, "LQ: try to switch to MIMO3\n");
tbl->lq_type = LQ_MIMO3;
tbl->is_dup = lq_sta->is_dup;
tbl->action = 0;
tbl->max_search = IWL_MAX_11N_MIMO3_SEARCH;
rate_mask = lq_sta->active_mimo3_rate;
if (iwl_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap))
tbl->is_ht40 = 1;
else
tbl->is_ht40 = 0;
rs_set_expected_tpt_table(lq_sta, tbl);
rate = rs_get_best_rate(priv, lq_sta, tbl, rate_mask, index);
IWL_DEBUG_RATE(priv, "LQ: MIMO3 best rate %d mask %X\n",
rate, rate_mask);
if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) {
IWL_DEBUG_RATE(priv, "Can't switch with index %d rate mask %x\n",
rate, rate_mask);
return -1;
}
tbl->current_rate = rate_n_flags_from_tbl(priv, tbl, rate, is_green);
IWL_DEBUG_RATE(priv, "LQ: Switch to new mcs %X index is green %X\n",
tbl->current_rate, is_green);
return 0;
}
/*
* Set up search table for SISO
*/
static int rs_switch_to_siso(struct iwl_priv *priv,
struct iwl_lq_sta *lq_sta,
struct ieee80211_conf *conf,
struct ieee80211_sta *sta,
struct iwl_scale_tbl_info *tbl, int index)
{
u16 rate_mask;
u8 is_green = lq_sta->is_green;
s32 rate;
struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
struct iwl_rxon_context *ctx = sta_priv->ctx;
if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
return -1;
IWL_DEBUG_RATE(priv, "LQ: try to switch to SISO\n");
tbl->is_dup = lq_sta->is_dup;
tbl->lq_type = LQ_SISO;
tbl->action = 0;
tbl->max_search = IWL_MAX_SEARCH;
rate_mask = lq_sta->active_siso_rate;
if (iwl_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap))
tbl->is_ht40 = 1;
else
tbl->is_ht40 = 0;
if (is_green)
tbl->is_SGI = 0; /*11n spec: no SGI in SISO+Greenfield*/
rs_set_expected_tpt_table(lq_sta, tbl);
rate = rs_get_best_rate(priv, lq_sta, tbl, rate_mask, index);
IWL_DEBUG_RATE(priv, "LQ: get best rate %d mask %X\n", rate, rate_mask);
if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) {
IWL_DEBUG_RATE(priv, "can not switch with index %d rate mask %x\n",
rate, rate_mask);
return -1;
}
tbl->current_rate = rate_n_flags_from_tbl(priv, tbl, rate, is_green);
IWL_DEBUG_RATE(priv, "LQ: Switch to new mcs %X index is green %X\n",
tbl->current_rate, is_green);
return 0;
}
/*
* Try to switch to new modulation mode from legacy
*/
static int rs_move_legacy_other(struct iwl_priv *priv,
struct iwl_lq_sta *lq_sta,
struct ieee80211_conf *conf,
struct ieee80211_sta *sta,
int index)
{
struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
struct iwl_scale_tbl_info *search_tbl =
&(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
struct iwl_rate_scale_data *window = &(tbl->win[index]);
u32 sz = (sizeof(struct iwl_scale_tbl_info) -
(sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
u8 start_action;
u8 valid_tx_ant = hw_params(priv).valid_tx_ant;
u8 tx_chains_num = hw_params(priv).tx_chains_num;
int ret = 0;
u8 update_search_tbl_counter = 0;
switch (priv->bt_traffic_load) {
case IWL_BT_COEX_TRAFFIC_LOAD_NONE:
/* nothing */
break;
case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
/* avoid antenna B unless MIMO */
if (tbl->action == IWL_LEGACY_SWITCH_ANTENNA2)
tbl->action = IWL_LEGACY_SWITCH_SISO;
break;
case IWL_BT_COEX_TRAFFIC_LOAD_HIGH:
case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
/* avoid antenna B and MIMO */
valid_tx_ant =
first_antenna(hw_params(priv).valid_tx_ant);
if (tbl->action >= IWL_LEGACY_SWITCH_ANTENNA2 &&
tbl->action != IWL_LEGACY_SWITCH_SISO)
tbl->action = IWL_LEGACY_SWITCH_SISO;
break;
default:
IWL_ERR(priv, "Invalid BT load %d", priv->bt_traffic_load);
break;
}
if (!iwl_ht_enabled(priv))
/* stay in Legacy */
tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
else if (iwl_tx_ant_restriction(priv) == IWL_ANT_OK_SINGLE &&
tbl->action > IWL_LEGACY_SWITCH_SISO)
tbl->action = IWL_LEGACY_SWITCH_SISO;
/* configure as 1x1 if bt full concurrency */
if (priv->bt_full_concurrent) {
if (!iwl_ht_enabled(priv))
tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
else if (tbl->action >= IWL_LEGACY_SWITCH_ANTENNA2)
tbl->action = IWL_LEGACY_SWITCH_SISO;
valid_tx_ant =
first_antenna(hw_params(priv).valid_tx_ant);
}
start_action = tbl->action;
for (; ;) {
lq_sta->action_counter++;
switch (tbl->action) {
case IWL_LEGACY_SWITCH_ANTENNA1:
case IWL_LEGACY_SWITCH_ANTENNA2:
IWL_DEBUG_RATE(priv, "LQ: Legacy toggle Antenna\n");
if ((tbl->action == IWL_LEGACY_SWITCH_ANTENNA1 &&
tx_chains_num <= 1) ||
(tbl->action == IWL_LEGACY_SWITCH_ANTENNA2 &&
tx_chains_num <= 2))
break;
/* Don't change antenna if success has been great */
if (window->success_ratio >= IWL_RS_GOOD_RATIO &&
!priv->bt_full_concurrent &&
priv->bt_traffic_load ==
IWL_BT_COEX_TRAFFIC_LOAD_NONE)
break;
/* Set up search table to try other antenna */
memcpy(search_tbl, tbl, sz);
if (rs_toggle_antenna(valid_tx_ant,
&search_tbl->current_rate, search_tbl)) {
update_search_tbl_counter = 1;
rs_set_expected_tpt_table(lq_sta, search_tbl);
goto out;
}
break;
case IWL_LEGACY_SWITCH_SISO:
IWL_DEBUG_RATE(priv, "LQ: Legacy switch to SISO\n");
/* Set up search table to try SISO */
memcpy(search_tbl, tbl, sz);
search_tbl->is_SGI = 0;
ret = rs_switch_to_siso(priv, lq_sta, conf, sta,
search_tbl, index);
if (!ret) {
lq_sta->action_counter = 0;
goto out;
}
break;
case IWL_LEGACY_SWITCH_MIMO2_AB:
case IWL_LEGACY_SWITCH_MIMO2_AC:
case IWL_LEGACY_SWITCH_MIMO2_BC:
IWL_DEBUG_RATE(priv, "LQ: Legacy switch to MIMO2\n");
/* Set up search table to try MIMO */
memcpy(search_tbl, tbl, sz);
search_tbl->is_SGI = 0;
if (tbl->action == IWL_LEGACY_SWITCH_MIMO2_AB)
search_tbl->ant_type = ANT_AB;
else if (tbl->action == IWL_LEGACY_SWITCH_MIMO2_AC)
search_tbl->ant_type = ANT_AC;
else
search_tbl->ant_type = ANT_BC;
if (!rs_is_valid_ant(valid_tx_ant, search_tbl->ant_type))
break;
ret = rs_switch_to_mimo2(priv, lq_sta, conf, sta,
search_tbl, index);
if (!ret) {
lq_sta->action_counter = 0;
goto out;
}
break;
case IWL_LEGACY_SWITCH_MIMO3_ABC:
IWL_DEBUG_RATE(priv, "LQ: Legacy switch to MIMO3\n");
/* Set up search table to try MIMO3 */
memcpy(search_tbl, tbl, sz);
search_tbl->is_SGI = 0;
search_tbl->ant_type = ANT_ABC;
if (!rs_is_valid_ant(valid_tx_ant, search_tbl->ant_type))
break;
ret = rs_switch_to_mimo3(priv, lq_sta, conf, sta,
search_tbl, index);
if (!ret) {
lq_sta->action_counter = 0;
goto out;
}
break;
}
tbl->action++;
if (tbl->action > IWL_LEGACY_SWITCH_MIMO3_ABC)
tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
if (tbl->action == start_action)
break;
}
search_tbl->lq_type = LQ_NONE;
return 0;
out:
lq_sta->search_better_tbl = 1;
tbl->action++;
if (tbl->action > IWL_LEGACY_SWITCH_MIMO3_ABC)
tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
if (update_search_tbl_counter)
search_tbl->action = tbl->action;
return 0;
}
/*
* Try to switch to new modulation mode from SISO
*/
static int rs_move_siso_to_other(struct iwl_priv *priv,
struct iwl_lq_sta *lq_sta,
struct ieee80211_conf *conf,
struct ieee80211_sta *sta, int index)
{
u8 is_green = lq_sta->is_green;
struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
struct iwl_scale_tbl_info *search_tbl =
&(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
struct iwl_rate_scale_data *window = &(tbl->win[index]);
struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
u32 sz = (sizeof(struct iwl_scale_tbl_info) -
(sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
u8 start_action;
u8 valid_tx_ant = hw_params(priv).valid_tx_ant;
u8 tx_chains_num = hw_params(priv).tx_chains_num;
u8 update_search_tbl_counter = 0;
int ret;
switch (priv->bt_traffic_load) {
case IWL_BT_COEX_TRAFFIC_LOAD_NONE:
/* nothing */
break;
case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
/* avoid antenna B unless MIMO */
if (tbl->action == IWL_SISO_SWITCH_ANTENNA2)
tbl->action = IWL_SISO_SWITCH_MIMO2_AB;
break;
case IWL_BT_COEX_TRAFFIC_LOAD_HIGH:
case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
/* avoid antenna B and MIMO */
valid_tx_ant =
first_antenna(hw_params(priv).valid_tx_ant);
if (tbl->action != IWL_SISO_SWITCH_ANTENNA1)
tbl->action = IWL_SISO_SWITCH_ANTENNA1;
break;
default:
IWL_ERR(priv, "Invalid BT load %d", priv->bt_traffic_load);
break;
}
if (iwl_tx_ant_restriction(priv) == IWL_ANT_OK_SINGLE &&
tbl->action > IWL_SISO_SWITCH_ANTENNA2) {
/* stay in SISO */
tbl->action = IWL_SISO_SWITCH_ANTENNA1;
}
/* configure as 1x1 if bt full concurrency */
if (priv->bt_full_concurrent) {
valid_tx_ant =
first_antenna(hw_params(priv).valid_tx_ant);
if (tbl->action >= IWL_LEGACY_SWITCH_ANTENNA2)
tbl->action = IWL_SISO_SWITCH_ANTENNA1;
}
start_action = tbl->action;
for (;;) {
lq_sta->action_counter++;
switch (tbl->action) {
case IWL_SISO_SWITCH_ANTENNA1:
case IWL_SISO_SWITCH_ANTENNA2:
IWL_DEBUG_RATE(priv, "LQ: SISO toggle Antenna\n");
if ((tbl->action == IWL_SISO_SWITCH_ANTENNA1 &&
tx_chains_num <= 1) ||
(tbl->action == IWL_SISO_SWITCH_ANTENNA2 &&
tx_chains_num <= 2))
break;
if (window->success_ratio >= IWL_RS_GOOD_RATIO &&
!priv->bt_full_concurrent &&
priv->bt_traffic_load ==
IWL_BT_COEX_TRAFFIC_LOAD_NONE)
break;
memcpy(search_tbl, tbl, sz);
if (rs_toggle_antenna(valid_tx_ant,
&search_tbl->current_rate, search_tbl)) {
update_search_tbl_counter = 1;
goto out;
}
break;
case IWL_SISO_SWITCH_MIMO2_AB:
case IWL_SISO_SWITCH_MIMO2_AC:
case IWL_SISO_SWITCH_MIMO2_BC:
IWL_DEBUG_RATE(priv, "LQ: SISO switch to MIMO2\n");
memcpy(search_tbl, tbl, sz);
search_tbl->is_SGI = 0;
if (tbl->action == IWL_SISO_SWITCH_MIMO2_AB)
search_tbl->ant_type = ANT_AB;
else if (tbl->action == IWL_SISO_SWITCH_MIMO2_AC)
search_tbl->ant_type = ANT_AC;
else
search_tbl->ant_type = ANT_BC;
if (!rs_is_valid_ant(valid_tx_ant, search_tbl->ant_type))
break;
ret = rs_switch_to_mimo2(priv, lq_sta, conf, sta,
search_tbl, index);
if (!ret)
goto out;
break;
case IWL_SISO_SWITCH_GI:
if (!tbl->is_ht40 && !(ht_cap->cap &
IEEE80211_HT_CAP_SGI_20))
break;
if (tbl->is_ht40 && !(ht_cap->cap &
IEEE80211_HT_CAP_SGI_40))
break;
IWL_DEBUG_RATE(priv, "LQ: SISO toggle SGI/NGI\n");
memcpy(search_tbl, tbl, sz);
if (is_green) {
if (!tbl->is_SGI)
break;
else
IWL_ERR(priv,
"SGI was set in GF+SISO\n");
}
search_tbl->is_SGI = !tbl->is_SGI;
rs_set_expected_tpt_table(lq_sta, search_tbl);
if (tbl->is_SGI) {
s32 tpt = lq_sta->last_tpt / 100;
if (tpt >= search_tbl->expected_tpt[index])
break;
}
search_tbl->current_rate =
rate_n_flags_from_tbl(priv, search_tbl,
index, is_green);
update_search_tbl_counter = 1;
goto out;
case IWL_SISO_SWITCH_MIMO3_ABC:
IWL_DEBUG_RATE(priv, "LQ: SISO switch to MIMO3\n");
memcpy(search_tbl, tbl, sz);
search_tbl->is_SGI = 0;
search_tbl->ant_type = ANT_ABC;
if (!rs_is_valid_ant(valid_tx_ant, search_tbl->ant_type))
break;
ret = rs_switch_to_mimo3(priv, lq_sta, conf, sta,
search_tbl, index);
if (!ret)
goto out;
break;
}
tbl->action++;
if (tbl->action > IWL_LEGACY_SWITCH_MIMO3_ABC)
tbl->action = IWL_SISO_SWITCH_ANTENNA1;
if (tbl->action == start_action)
break;
}
search_tbl->lq_type = LQ_NONE;
return 0;
out:
lq_sta->search_better_tbl = 1;
tbl->action++;
if (tbl->action > IWL_SISO_SWITCH_MIMO3_ABC)
tbl->action = IWL_SISO_SWITCH_ANTENNA1;
if (update_search_tbl_counter)
search_tbl->action = tbl->action;
return 0;
}
/*
* Try to switch to new modulation mode from MIMO2
*/
static int rs_move_mimo2_to_other(struct iwl_priv *priv,
struct iwl_lq_sta *lq_sta,
struct ieee80211_conf *conf,
struct ieee80211_sta *sta, int index)
{
s8 is_green = lq_sta->is_green;
struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
struct iwl_scale_tbl_info *search_tbl =
&(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
struct iwl_rate_scale_data *window = &(tbl->win[index]);
struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
u32 sz = (sizeof(struct iwl_scale_tbl_info) -
(sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
u8 start_action;
u8 valid_tx_ant = hw_params(priv).valid_tx_ant;
u8 tx_chains_num = hw_params(priv).tx_chains_num;
u8 update_search_tbl_counter = 0;
int ret;
switch (priv->bt_traffic_load) {
case IWL_BT_COEX_TRAFFIC_LOAD_NONE:
/* nothing */
break;
case IWL_BT_COEX_TRAFFIC_LOAD_HIGH:
case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
/* avoid antenna B and MIMO */
if (tbl->action != IWL_MIMO2_SWITCH_SISO_A)
tbl->action = IWL_MIMO2_SWITCH_SISO_A;
break;
case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
/* avoid antenna B unless MIMO */
if (tbl->action == IWL_MIMO2_SWITCH_SISO_B ||
tbl->action == IWL_MIMO2_SWITCH_SISO_C)
tbl->action = IWL_MIMO2_SWITCH_SISO_A;
break;
default:
IWL_ERR(priv, "Invalid BT load %d", priv->bt_traffic_load);
break;
}
if ((iwl_tx_ant_restriction(priv) == IWL_ANT_OK_SINGLE) &&
(tbl->action < IWL_MIMO2_SWITCH_SISO_A ||
tbl->action > IWL_MIMO2_SWITCH_SISO_C)) {
/* switch in SISO */
tbl->action = IWL_MIMO2_SWITCH_SISO_A;
}
/* configure as 1x1 if bt full concurrency */
if (priv->bt_full_concurrent &&
(tbl->action < IWL_MIMO2_SWITCH_SISO_A ||
tbl->action > IWL_MIMO2_SWITCH_SISO_C))
tbl->action = IWL_MIMO2_SWITCH_SISO_A;
start_action = tbl->action;
for (;;) {
lq_sta->action_counter++;
switch (tbl->action) {
case IWL_MIMO2_SWITCH_ANTENNA1:
case IWL_MIMO2_SWITCH_ANTENNA2:
IWL_DEBUG_RATE(priv, "LQ: MIMO2 toggle Antennas\n");
if (tx_chains_num <= 2)
break;
if (window->success_ratio >= IWL_RS_GOOD_RATIO)
break;
memcpy(search_tbl, tbl, sz);
if (rs_toggle_antenna(valid_tx_ant,
&search_tbl->current_rate, search_tbl)) {
update_search_tbl_counter = 1;
goto out;
}
break;
case IWL_MIMO2_SWITCH_SISO_A:
case IWL_MIMO2_SWITCH_SISO_B:
case IWL_MIMO2_SWITCH_SISO_C:
IWL_DEBUG_RATE(priv, "LQ: MIMO2 switch to SISO\n");
/* Set up new search table for SISO */
memcpy(search_tbl, tbl, sz);
if (tbl->action == IWL_MIMO2_SWITCH_SISO_A)
search_tbl->ant_type = ANT_A;
else if (tbl->action == IWL_MIMO2_SWITCH_SISO_B)
search_tbl->ant_type = ANT_B;
else
search_tbl->ant_type = ANT_C;
if (!rs_is_valid_ant(valid_tx_ant, search_tbl->ant_type))
break;
ret = rs_switch_to_siso(priv, lq_sta, conf, sta,
search_tbl, index);
if (!ret)
goto out;
break;
case IWL_MIMO2_SWITCH_GI:
if (!tbl->is_ht40 && !(ht_cap->cap &
IEEE80211_HT_CAP_SGI_20))
break;
if (tbl->is_ht40 && !(ht_cap->cap &
IEEE80211_HT_CAP_SGI_40))
break;
IWL_DEBUG_RATE(priv, "LQ: MIMO2 toggle SGI/NGI\n");
/* Set up new search table for MIMO2 */
memcpy(search_tbl, tbl, sz);
search_tbl->is_SGI = !tbl->is_SGI;
rs_set_expected_tpt_table(lq_sta, search_tbl);
/*
* If active table already uses the fastest possible
* modulation (dual stream with short guard interval),
* and it's working well, there's no need to look
* for a better type of modulation!
*/
if (tbl->is_SGI) {
s32 tpt = lq_sta->last_tpt / 100;
if (tpt >= search_tbl->expected_tpt[index])
break;
}
search_tbl->current_rate =
rate_n_flags_from_tbl(priv, search_tbl,
index, is_green);
update_search_tbl_counter = 1;
goto out;
case IWL_MIMO2_SWITCH_MIMO3_ABC:
IWL_DEBUG_RATE(priv, "LQ: MIMO2 switch to MIMO3\n");
memcpy(search_tbl, tbl, sz);
search_tbl->is_SGI = 0;
search_tbl->ant_type = ANT_ABC;
if (!rs_is_valid_ant(valid_tx_ant, search_tbl->ant_type))
break;
ret = rs_switch_to_mimo3(priv, lq_sta, conf, sta,
search_tbl, index);
if (!ret)
goto out;
break;
}
tbl->action++;
if (tbl->action > IWL_MIMO2_SWITCH_MIMO3_ABC)
tbl->action = IWL_MIMO2_SWITCH_ANTENNA1;
if (tbl->action == start_action)
break;
}
search_tbl->lq_type = LQ_NONE;
return 0;
out:
lq_sta->search_better_tbl = 1;
tbl->action++;
if (tbl->action > IWL_MIMO2_SWITCH_MIMO3_ABC)
tbl->action = IWL_MIMO2_SWITCH_ANTENNA1;
if (update_search_tbl_counter)
search_tbl->action = tbl->action;
return 0;
}
/*
* Try to switch to new modulation mode from MIMO3
*/
static int rs_move_mimo3_to_other(struct iwl_priv *priv,
struct iwl_lq_sta *lq_sta,
struct ieee80211_conf *conf,
struct ieee80211_sta *sta, int index)
{
s8 is_green = lq_sta->is_green;
struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
struct iwl_scale_tbl_info *search_tbl =
&(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
struct iwl_rate_scale_data *window = &(tbl->win[index]);
struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
u32 sz = (sizeof(struct iwl_scale_tbl_info) -
(sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
u8 start_action;
u8 valid_tx_ant = hw_params(priv).valid_tx_ant;
u8 tx_chains_num = hw_params(priv).tx_chains_num;
int ret;
u8 update_search_tbl_counter = 0;
switch (priv->bt_traffic_load) {
case IWL_BT_COEX_TRAFFIC_LOAD_NONE:
/* nothing */
break;
case IWL_BT_COEX_TRAFFIC_LOAD_HIGH:
case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
/* avoid antenna B and MIMO */
if (tbl->action != IWL_MIMO3_SWITCH_SISO_A)
tbl->action = IWL_MIMO3_SWITCH_SISO_A;
break;
case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
/* avoid antenna B unless MIMO */
if (tbl->action == IWL_MIMO3_SWITCH_SISO_B ||
tbl->action == IWL_MIMO3_SWITCH_SISO_C)
tbl->action = IWL_MIMO3_SWITCH_SISO_A;
break;
default:
IWL_ERR(priv, "Invalid BT load %d", priv->bt_traffic_load);
break;
}
if ((iwl_tx_ant_restriction(priv) == IWL_ANT_OK_SINGLE) &&
(tbl->action < IWL_MIMO3_SWITCH_SISO_A ||
tbl->action > IWL_MIMO3_SWITCH_SISO_C)) {
/* switch in SISO */
tbl->action = IWL_MIMO3_SWITCH_SISO_A;
}
/* configure as 1x1 if bt full concurrency */
if (priv->bt_full_concurrent &&
(tbl->action < IWL_MIMO3_SWITCH_SISO_A ||
tbl->action > IWL_MIMO3_SWITCH_SISO_C))
tbl->action = IWL_MIMO3_SWITCH_SISO_A;
start_action = tbl->action;
for (;;) {
lq_sta->action_counter++;
switch (tbl->action) {
case IWL_MIMO3_SWITCH_ANTENNA1:
case IWL_MIMO3_SWITCH_ANTENNA2:
IWL_DEBUG_RATE(priv, "LQ: MIMO3 toggle Antennas\n");
if (tx_chains_num <= 3)
break;
if (window->success_ratio >= IWL_RS_GOOD_RATIO)
break;
memcpy(search_tbl, tbl, sz);
if (rs_toggle_antenna(valid_tx_ant,
&search_tbl->current_rate, search_tbl))
goto out;
break;
case IWL_MIMO3_SWITCH_SISO_A:
case IWL_MIMO3_SWITCH_SISO_B:
case IWL_MIMO3_SWITCH_SISO_C:
IWL_DEBUG_RATE(priv, "LQ: MIMO3 switch to SISO\n");
/* Set up new search table for SISO */
memcpy(search_tbl, tbl, sz);
if (tbl->action == IWL_MIMO3_SWITCH_SISO_A)
search_tbl->ant_type = ANT_A;
else if (tbl->action == IWL_MIMO3_SWITCH_SISO_B)
search_tbl->ant_type = ANT_B;
else
search_tbl->ant_type = ANT_C;
if (!rs_is_valid_ant(valid_tx_ant, search_tbl->ant_type))
break;
ret = rs_switch_to_siso(priv, lq_sta, conf, sta,
search_tbl, index);
if (!ret)
goto out;
break;
case IWL_MIMO3_SWITCH_MIMO2_AB:
case IWL_MIMO3_SWITCH_MIMO2_AC:
case IWL_MIMO3_SWITCH_MIMO2_BC:
IWL_DEBUG_RATE(priv, "LQ: MIMO3 switch to MIMO2\n");
memcpy(search_tbl, tbl, sz);
search_tbl->is_SGI = 0;
if (tbl->action == IWL_MIMO3_SWITCH_MIMO2_AB)
search_tbl->ant_type = ANT_AB;
else if (tbl->action == IWL_MIMO3_SWITCH_MIMO2_AC)
search_tbl->ant_type = ANT_AC;
else
search_tbl->ant_type = ANT_BC;
if (!rs_is_valid_ant(valid_tx_ant, search_tbl->ant_type))
break;
ret = rs_switch_to_mimo2(priv, lq_sta, conf, sta,
search_tbl, index);
if (!ret)
goto out;
break;
case IWL_MIMO3_SWITCH_GI:
if (!tbl->is_ht40 && !(ht_cap->cap &
IEEE80211_HT_CAP_SGI_20))
break;
if (tbl->is_ht40 && !(ht_cap->cap &
IEEE80211_HT_CAP_SGI_40))
break;
IWL_DEBUG_RATE(priv, "LQ: MIMO3 toggle SGI/NGI\n");
/* Set up new search table for MIMO */
memcpy(search_tbl, tbl, sz);
search_tbl->is_SGI = !tbl->is_SGI;
rs_set_expected_tpt_table(lq_sta, search_tbl);
/*
* If active table already uses the fastest possible
* modulation (dual stream with short guard interval),
* and it's working well, there's no need to look
* for a better type of modulation!
*/
if (tbl->is_SGI) {
s32 tpt = lq_sta->last_tpt / 100;
if (tpt >= search_tbl->expected_tpt[index])
break;
}
search_tbl->current_rate =
rate_n_flags_from_tbl(priv, search_tbl,
index, is_green);
update_search_tbl_counter = 1;
goto out;
}
tbl->action++;
if (tbl->action > IWL_MIMO3_SWITCH_GI)
tbl->action = IWL_MIMO3_SWITCH_ANTENNA1;
if (tbl->action == start_action)
break;
}
search_tbl->lq_type = LQ_NONE;
return 0;
out:
lq_sta->search_better_tbl = 1;
tbl->action++;
if (tbl->action > IWL_MIMO3_SWITCH_GI)
tbl->action = IWL_MIMO3_SWITCH_ANTENNA1;
if (update_search_tbl_counter)
search_tbl->action = tbl->action;
return 0;
}
/*
* Check whether we should continue using same modulation mode, or
* begin search for a new mode, based on:
* 1) # tx successes or failures while using this mode
* 2) # times calling this function
* 3) elapsed time in this mode (not used, for now)
*/
static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search)
{
struct iwl_scale_tbl_info *tbl;
int i;
int active_tbl;
int flush_interval_passed = 0;
struct iwl_priv *priv;
priv = lq_sta->drv;
active_tbl = lq_sta->active_tbl;
tbl = &(lq_sta->lq_info[active_tbl]);
/* If we've been disallowing search, see if we should now allow it */
if (lq_sta->stay_in_tbl) {
/* Elapsed time using current modulation mode */
if (lq_sta->flush_timer)
flush_interval_passed =
time_after(jiffies,
(unsigned long)(lq_sta->flush_timer +
IWL_RATE_SCALE_FLUSH_INTVL));
/*
* Check if we should allow search for new modulation mode.
* If many frames have failed or succeeded, or we've used
* this same modulation for a long time, allow search, and
* reset history stats that keep track of whether we should
* allow a new search. Also (below) reset all bitmaps and
* stats in active history.
*/
if (force_search ||
(lq_sta->total_failed > lq_sta->max_failure_limit) ||
(lq_sta->total_success > lq_sta->max_success_limit) ||
((!lq_sta->search_better_tbl) && (lq_sta->flush_timer)
&& (flush_interval_passed))) {
IWL_DEBUG_RATE(priv, "LQ: stay is expired %d %d %d\n:",
lq_sta->total_failed,
lq_sta->total_success,
flush_interval_passed);
/* Allow search for new mode */
lq_sta->stay_in_tbl = 0; /* only place reset */
lq_sta->total_failed = 0;
lq_sta->total_success = 0;
lq_sta->flush_timer = 0;
/*
* Else if we've used this modulation mode enough repetitions
* (regardless of elapsed time or success/failure), reset
* history bitmaps and rate-specific stats for all rates in
* active table.
*/
} else {
lq_sta->table_count++;
if (lq_sta->table_count >=
lq_sta->table_count_limit) {
lq_sta->table_count = 0;
IWL_DEBUG_RATE(priv, "LQ: stay in table clear win\n");
for (i = 0; i < IWL_RATE_COUNT; i++)
rs_rate_scale_clear_window(
&(tbl->win[i]));
}
}
/* If transitioning to allow "search", reset all history
* bitmaps and stats in active table (this will become the new
* "search" table). */
if (!lq_sta->stay_in_tbl) {
for (i = 0; i < IWL_RATE_COUNT; i++)
rs_rate_scale_clear_window(&(tbl->win[i]));
}
}
}
/*
* setup rate table in uCode
*/
static void rs_update_rate_tbl(struct iwl_priv *priv,
struct iwl_rxon_context *ctx,
struct iwl_lq_sta *lq_sta,
struct iwl_scale_tbl_info *tbl,
int index, u8 is_green)
{
u32 rate;
/* Update uCode's rate table. */
rate = rate_n_flags_from_tbl(priv, tbl, index, is_green);
rs_fill_link_cmd(priv, lq_sta, rate);
iwl_send_lq_cmd(priv, ctx, &lq_sta->lq, CMD_ASYNC, false);
}
/*
* Do rate scaling and search for new modulation mode.
*/
static void rs_rate_scale_perform(struct iwl_priv *priv,
struct sk_buff *skb,
struct ieee80211_sta *sta,
struct iwl_lq_sta *lq_sta)
{
struct ieee80211_hw *hw = priv->hw;
struct ieee80211_conf *conf = &hw->conf;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
int low = IWL_RATE_INVALID;
int high = IWL_RATE_INVALID;
int index;
int i;
struct iwl_rate_scale_data *window = NULL;
int current_tpt = IWL_INVALID_VALUE;
int low_tpt = IWL_INVALID_VALUE;
int high_tpt = IWL_INVALID_VALUE;
u32 fail_count;
s8 scale_action = 0;
u16 rate_mask;
u8 update_lq = 0;
struct iwl_scale_tbl_info *tbl, *tbl1;
u16 rate_scale_index_msk = 0;
u8 is_green = 0;
u8 active_tbl = 0;
u8 done_search = 0;
u16 high_low;
s32 sr;
u8 tid = IWL_MAX_TID_COUNT;
struct iwl_tid_data *tid_data;
struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
struct iwl_rxon_context *ctx = sta_priv->ctx;
IWL_DEBUG_RATE(priv, "rate scale calculate new rate for skb\n");
/* Send management frames and NO_ACK data using lowest rate. */
/* TODO: this could probably be improved.. */
if (!ieee80211_is_data(hdr->frame_control) ||
info->flags & IEEE80211_TX_CTL_NO_ACK)
return;
lq_sta->supp_rates = sta->supp_rates[lq_sta->band];
tid = rs_tl_add_packet(lq_sta, hdr);
if ((tid != IWL_MAX_TID_COUNT) &&
(lq_sta->tx_agg_tid_en & (1 << tid))) {
tid_data = &priv->tid_data[lq_sta->lq.sta_id][tid];
if (tid_data->agg.state == IWL_AGG_OFF)
lq_sta->is_agg = 0;
else
lq_sta->is_agg = 1;
} else
lq_sta->is_agg = 0;
/*
* Select rate-scale / modulation-mode table to work with in
* the rest of this function: "search" if searching for better
* modulation mode, or "active" if doing rate scaling within a mode.
*/
if (!lq_sta->search_better_tbl)
active_tbl = lq_sta->active_tbl;
else
active_tbl = 1 - lq_sta->active_tbl;
tbl = &(lq_sta->lq_info[active_tbl]);
if (is_legacy(tbl->lq_type))
lq_sta->is_green = 0;
else
lq_sta->is_green = rs_use_green(sta);
is_green = lq_sta->is_green;
/* current tx rate */
index = lq_sta->last_txrate_idx;
IWL_DEBUG_RATE(priv, "Rate scale index %d for type %d\n", index,
tbl->lq_type);
/* rates available for this association, and for modulation mode */
rate_mask = rs_get_supported_rates(lq_sta, hdr, tbl->lq_type);
IWL_DEBUG_RATE(priv, "mask 0x%04X\n", rate_mask);
/* mask with station rate restriction */
if (is_legacy(tbl->lq_type)) {
if (lq_sta->band == IEEE80211_BAND_5GHZ)
/* supp_rates has no CCK bits in A mode */
rate_scale_index_msk = (u16) (rate_mask &
(lq_sta->supp_rates << IWL_FIRST_OFDM_RATE));
else
rate_scale_index_msk = (u16) (rate_mask &
lq_sta->supp_rates);
} else
rate_scale_index_msk = rate_mask;
if (!rate_scale_index_msk)
rate_scale_index_msk = rate_mask;
if (!((1 << index) & rate_scale_index_msk)) {
IWL_ERR(priv, "Current Rate is not valid\n");
if (lq_sta->search_better_tbl) {
/* revert to active table if search table is not valid*/
tbl->lq_type = LQ_NONE;
lq_sta->search_better_tbl = 0;
tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
/* get "active" rate info */
index = iwl_hwrate_to_plcp_idx(tbl->current_rate);
rs_update_rate_tbl(priv, ctx, lq_sta, tbl,
index, is_green);
}
return;
}
/* Get expected throughput table and history window for current rate */
if (!tbl->expected_tpt) {
IWL_ERR(priv, "tbl->expected_tpt is NULL\n");
return;
}
/* force user max rate if set by user */
if ((lq_sta->max_rate_idx != -1) &&
(lq_sta->max_rate_idx < index)) {
index = lq_sta->max_rate_idx;
update_lq = 1;
window = &(tbl->win[index]);
goto lq_update;
}
window = &(tbl->win[index]);
/*
* If there is not enough history to calculate actual average
* throughput, keep analyzing results of more tx frames, without
* changing rate or mode (bypass most of the rest of this function).
* Set up new rate table in uCode only if old rate is not supported
* in current association (use new rate found above).
*/
fail_count = window->counter - window->success_counter;
if ((fail_count < IWL_RATE_MIN_FAILURE_TH) &&
(window->success_counter < IWL_RATE_MIN_SUCCESS_TH)) {
IWL_DEBUG_RATE(priv, "LQ: still below TH. succ=%d total=%d "
"for index %d\n",
window->success_counter, window->counter, index);
/* Can't calculate this yet; not enough history */
window->average_tpt = IWL_INVALID_VALUE;
/* Should we stay with this modulation mode,
* or search for a new one? */
rs_stay_in_table(lq_sta, false);
goto out;
}
/* Else we have enough samples; calculate estimate of
* actual average throughput */
if (window->average_tpt != ((window->success_ratio *
tbl->expected_tpt[index] + 64) / 128)) {
IWL_ERR(priv, "expected_tpt should have been calculated by now\n");
window->average_tpt = ((window->success_ratio *
tbl->expected_tpt[index] + 64) / 128);
}
/* If we are searching for better modulation mode, check success. */
if (lq_sta->search_better_tbl &&
(iwl_tx_ant_restriction(priv) == IWL_ANT_OK_MULTI)) {
/* If good success, continue using the "search" mode;
* no need to send new link quality command, since we're
* continuing to use the setup that we've been trying. */
if (window->average_tpt > lq_sta->last_tpt) {
IWL_DEBUG_RATE(priv, "LQ: SWITCHING TO NEW TABLE "
"suc=%d cur-tpt=%d old-tpt=%d\n",
window->success_ratio,
window->average_tpt,
lq_sta->last_tpt);
if (!is_legacy(tbl->lq_type))
lq_sta->enable_counter = 1;
/* Swap tables; "search" becomes "active" */
lq_sta->active_tbl = active_tbl;
current_tpt = window->average_tpt;
/* Else poor success; go back to mode in "active" table */
} else {
IWL_DEBUG_RATE(priv, "LQ: GOING BACK TO THE OLD TABLE "
"suc=%d cur-tpt=%d old-tpt=%d\n",
window->success_ratio,
window->average_tpt,
lq_sta->last_tpt);
/* Nullify "search" table */
tbl->lq_type = LQ_NONE;
/* Revert to "active" table */
active_tbl = lq_sta->active_tbl;
tbl = &(lq_sta->lq_info[active_tbl]);
/* Revert to "active" rate and throughput info */
index = iwl_hwrate_to_plcp_idx(tbl->current_rate);
current_tpt = lq_sta->last_tpt;
/* Need to set up a new rate table in uCode */
update_lq = 1;
}
/* Either way, we've made a decision; modulation mode
* search is done, allow rate adjustment next time. */
lq_sta->search_better_tbl = 0;
done_search = 1; /* Don't switch modes below! */
goto lq_update;
}
/* (Else) not in search of better modulation mode, try for better
* starting rate, while staying in this mode. */
high_low = rs_get_adjacent_rate(priv, index, rate_scale_index_msk,
tbl->lq_type);
low = high_low & 0xff;
high = (high_low >> 8) & 0xff;
/* If user set max rate, dont allow higher than user constrain */
if ((lq_sta->max_rate_idx != -1) &&
(lq_sta->max_rate_idx < high))
high = IWL_RATE_INVALID;
sr = window->success_ratio;
/* Collect measured throughputs for current and adjacent rates */
current_tpt = window->average_tpt;
if (low != IWL_RATE_INVALID)
low_tpt = tbl->win[low].average_tpt;
if (high != IWL_RATE_INVALID)
high_tpt = tbl->win[high].average_tpt;
scale_action = 0;
/* Too many failures, decrease rate */
if ((sr <= IWL_RATE_DECREASE_TH) || (current_tpt == 0)) {
IWL_DEBUG_RATE(priv, "decrease rate because of low success_ratio\n");
scale_action = -1;
/* No throughput measured yet for adjacent rates; try increase. */
} else if ((low_tpt == IWL_INVALID_VALUE) &&
(high_tpt == IWL_INVALID_VALUE)) {
if (high != IWL_RATE_INVALID && sr >= IWL_RATE_INCREASE_TH)
scale_action = 1;
else if (low != IWL_RATE_INVALID)
scale_action = 0;
}
/* Both adjacent throughputs are measured, but neither one has better
* throughput; we're using the best rate, don't change it! */
else if ((low_tpt != IWL_INVALID_VALUE) &&
(high_tpt != IWL_INVALID_VALUE) &&
(low_tpt < current_tpt) &&
(high_tpt < current_tpt))
scale_action = 0;
/* At least one adjacent rate's throughput is measured,
* and may have better performance. */
else {
/* Higher adjacent rate's throughput is measured */
if (high_tpt != IWL_INVALID_VALUE) {
/* Higher rate has better throughput */
if (high_tpt > current_tpt &&
sr >= IWL_RATE_INCREASE_TH) {
scale_action = 1;
} else {
scale_action = 0;
}
/* Lower adjacent rate's throughput is measured */
} else if (low_tpt != IWL_INVALID_VALUE) {
/* Lower rate has better throughput */
if (low_tpt > current_tpt) {
IWL_DEBUG_RATE(priv,
"decrease rate because of low tpt\n");
scale_action = -1;
} else if (sr >= IWL_RATE_INCREASE_TH) {
scale_action = 1;
}
}
}
/* Sanity check; asked for decrease, but success rate or throughput
* has been good at old rate. Don't change it. */
if ((scale_action == -1) && (low != IWL_RATE_INVALID) &&
((sr > IWL_RATE_HIGH_TH) ||
(current_tpt > (100 * tbl->expected_tpt[low]))))
scale_action = 0;
if (!iwl_ht_enabled(priv) && !is_legacy(tbl->lq_type))
scale_action = -1;
if (iwl_tx_ant_restriction(priv) != IWL_ANT_OK_MULTI &&
(is_mimo2(tbl->lq_type) || is_mimo3(tbl->lq_type)))
scale_action = -1;
if ((priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH) &&
(is_mimo2(tbl->lq_type) || is_mimo3(tbl->lq_type))) {
if (lq_sta->last_bt_traffic > priv->bt_traffic_load) {
/*
* don't set scale_action, don't want to scale up if
* the rate scale doesn't otherwise think that is a
* good idea.
*/
} else if (lq_sta->last_bt_traffic <= priv->bt_traffic_load) {
scale_action = -1;
}
}
lq_sta->last_bt_traffic = priv->bt_traffic_load;
if ((priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH) &&
(is_mimo2(tbl->lq_type) || is_mimo3(tbl->lq_type))) {
/* search for a new modulation */
rs_stay_in_table(lq_sta, true);
goto lq_update;
}
switch (scale_action) {
case -1:
/* Decrease starting rate, update uCode's rate table */
if (low != IWL_RATE_INVALID) {
update_lq = 1;
index = low;
}
break;
case 1:
/* Increase starting rate, update uCode's rate table */
if (high != IWL_RATE_INVALID) {
update_lq = 1;
index = high;
}
break;
case 0:
/* No change */
default:
break;
}
IWL_DEBUG_RATE(priv, "choose rate scale index %d action %d low %d "
"high %d type %d\n",
index, scale_action, low, high, tbl->lq_type);
lq_update:
/* Replace uCode's rate table for the destination station. */
if (update_lq)
rs_update_rate_tbl(priv, ctx, lq_sta, tbl, index, is_green);
if (iwl_tx_ant_restriction(priv) == IWL_ANT_OK_MULTI) {
/* Should we stay with this modulation mode,
* or search for a new one? */
rs_stay_in_table(lq_sta, false);
}
/*
* Search for new modulation mode if we're:
* 1) Not changing rates right now
* 2) Not just finishing up a search
* 3) Allowing a new search
*/
if (!update_lq && !done_search && !lq_sta->stay_in_tbl && window->counter) {
/* Save current throughput to compare with "search" throughput*/
lq_sta->last_tpt = current_tpt;
/* Select a new "search" modulation mode to try.
* If one is found, set up the new "search" table. */
if (is_legacy(tbl->lq_type))
rs_move_legacy_other(priv, lq_sta, conf, sta, index);
else if (is_siso(tbl->lq_type))
rs_move_siso_to_other(priv, lq_sta, conf, sta, index);
else if (is_mimo2(tbl->lq_type))
rs_move_mimo2_to_other(priv, lq_sta, conf, sta, index);
else
rs_move_mimo3_to_other(priv, lq_sta, conf, sta, index);
/* If new "search" mode was selected, set up in uCode table */
if (lq_sta->search_better_tbl) {
/* Access the "search" table, clear its history. */
tbl = &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
for (i = 0; i < IWL_RATE_COUNT; i++)
rs_rate_scale_clear_window(&(tbl->win[i]));
/* Use new "search" start rate */
index = iwl_hwrate_to_plcp_idx(tbl->current_rate);
IWL_DEBUG_RATE(priv, "Switch current mcs: %X index: %d\n",
tbl->current_rate, index);
rs_fill_link_cmd(priv, lq_sta, tbl->current_rate);
iwl_send_lq_cmd(priv, ctx, &lq_sta->lq, CMD_ASYNC, false);
} else
done_search = 1;
}
if (done_search && !lq_sta->stay_in_tbl) {
/* If the "active" (non-search) mode was legacy,
* and we've tried switching antennas,
* but we haven't been able to try HT modes (not available),
* stay with best antenna legacy modulation for a while
* before next round of mode comparisons. */
tbl1 = &(lq_sta->lq_info[lq_sta->active_tbl]);
if (is_legacy(tbl1->lq_type) && !conf_is_ht(conf) &&
lq_sta->action_counter > tbl1->max_search) {
IWL_DEBUG_RATE(priv, "LQ: STAY in legacy table\n");
rs_set_stay_in_table(priv, 1, lq_sta);
}
/* If we're in an HT mode, and all 3 mode switch actions
* have been tried and compared, stay in this best modulation
* mode for a while before next round of mode comparisons. */
if (lq_sta->enable_counter &&
(lq_sta->action_counter >= tbl1->max_search) &&
iwl_ht_enabled(priv)) {
if ((lq_sta->last_tpt > IWL_AGG_TPT_THREHOLD) &&
(lq_sta->tx_agg_tid_en & (1 << tid)) &&
(tid != IWL_MAX_TID_COUNT)) {
u8 sta_id = lq_sta->lq.sta_id;
tid_data = &priv->tid_data[sta_id][tid];
if (tid_data->agg.state == IWL_AGG_OFF) {
IWL_DEBUG_RATE(priv,
"try to aggregate tid %d\n",
tid);
rs_tl_turn_on_agg(priv, tid,
lq_sta, sta);
}
}
rs_set_stay_in_table(priv, 0, lq_sta);
}
}
out:
tbl->current_rate = rate_n_flags_from_tbl(priv, tbl, index, is_green);
lq_sta->last_txrate_idx = index;
}
/**
* rs_initialize_lq - Initialize a station's hardware rate table
*
* The uCode's station table contains a table of fallback rates
* for automatic fallback during transmission.
*
* NOTE: This sets up a default set of values. These will be replaced later
* if the driver's iwl-agn-rs rate scaling algorithm is used, instead of
* rc80211_simple.
*
* NOTE: Run REPLY_ADD_STA command to set up station table entry, before
* calling this function (which runs REPLY_TX_LINK_QUALITY_CMD,
* which requires station table entry to exist).
*/
static void rs_initialize_lq(struct iwl_priv *priv,
struct ieee80211_sta *sta,
struct iwl_lq_sta *lq_sta)
{
struct iwl_scale_tbl_info *tbl;
int rate_idx;
int i;
u32 rate;
u8 use_green = rs_use_green(sta);
u8 active_tbl = 0;
u8 valid_tx_ant;
struct iwl_station_priv *sta_priv;
struct iwl_rxon_context *ctx;
if (!sta || !lq_sta)
return;
sta_priv = (void *)sta->drv_priv;
ctx = sta_priv->ctx;
i = lq_sta->last_txrate_idx;
valid_tx_ant = hw_params(priv).valid_tx_ant;
if (!lq_sta->search_better_tbl)
active_tbl = lq_sta->active_tbl;
else
active_tbl = 1 - lq_sta->active_tbl;
tbl = &(lq_sta->lq_info[active_tbl]);
if ((i < 0) || (i >= IWL_RATE_COUNT))
i = 0;
rate = iwl_rates[i].plcp;
tbl->ant_type = first_antenna(valid_tx_ant);
rate |= tbl->ant_type << RATE_MCS_ANT_POS;
if (i >= IWL_FIRST_CCK_RATE && i <= IWL_LAST_CCK_RATE)
rate |= RATE_MCS_CCK_MSK;
rs_get_tbl_info_from_mcs(rate, priv->band, tbl, &rate_idx);
if (!rs_is_valid_ant(valid_tx_ant, tbl->ant_type))
rs_toggle_antenna(valid_tx_ant, &rate, tbl);
rate = rate_n_flags_from_tbl(priv, tbl, rate_idx, use_green);
tbl->current_rate = rate;
rs_set_expected_tpt_table(lq_sta, tbl);
rs_fill_link_cmd(NULL, lq_sta, rate);
priv->stations[lq_sta->lq.sta_id].lq = &lq_sta->lq;
iwl_send_lq_cmd(priv, ctx, &lq_sta->lq, CMD_SYNC, true);
}
static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta, void *priv_sta,
struct ieee80211_tx_rate_control *txrc)
{
struct sk_buff *skb = txrc->skb;
struct ieee80211_supported_band *sband = txrc->sband;
struct iwl_op_mode *op_mode __maybe_unused =
(struct iwl_op_mode *)priv_r;
struct iwl_priv *priv __maybe_unused = IWL_OP_MODE_GET_DVM(op_mode);
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct iwl_lq_sta *lq_sta = priv_sta;
int rate_idx;
IWL_DEBUG_RATE_LIMIT(priv, "rate scale calculate new rate for skb\n");
/* Get max rate if user set max rate */
if (lq_sta) {
lq_sta->max_rate_idx = txrc->max_rate_idx;
if ((sband->band == IEEE80211_BAND_5GHZ) &&
(lq_sta->max_rate_idx != -1))
lq_sta->max_rate_idx += IWL_FIRST_OFDM_RATE;
if ((lq_sta->max_rate_idx < 0) ||
(lq_sta->max_rate_idx >= IWL_RATE_COUNT))
lq_sta->max_rate_idx = -1;
}
/* Treat uninitialized rate scaling data same as non-existing. */
if (lq_sta && !lq_sta->drv) {
IWL_DEBUG_RATE(priv, "Rate scaling not initialized yet.\n");
priv_sta = NULL;
}
/* Send management frames and NO_ACK data using lowest rate. */
if (rate_control_send_low(sta, priv_sta, txrc))
return;
rate_idx = lq_sta->last_txrate_idx;
if (lq_sta->last_rate_n_flags & RATE_MCS_HT_MSK) {
rate_idx -= IWL_FIRST_OFDM_RATE;
/* 6M and 9M shared same MCS index */
rate_idx = (rate_idx > 0) ? (rate_idx - 1) : 0;
if (rs_extract_rate(lq_sta->last_rate_n_flags) >=
IWL_RATE_MIMO3_6M_PLCP)
rate_idx = rate_idx + (2 * MCS_INDEX_PER_STREAM);
else if (rs_extract_rate(lq_sta->last_rate_n_flags) >=
IWL_RATE_MIMO2_6M_PLCP)
rate_idx = rate_idx + MCS_INDEX_PER_STREAM;
info->control.rates[0].flags = IEEE80211_TX_RC_MCS;
if (lq_sta->last_rate_n_flags & RATE_MCS_SGI_MSK)
info->control.rates[0].flags |= IEEE80211_TX_RC_SHORT_GI;
if (lq_sta->last_rate_n_flags & RATE_MCS_DUP_MSK)
info->control.rates[0].flags |= IEEE80211_TX_RC_DUP_DATA;
if (lq_sta->last_rate_n_flags & RATE_MCS_HT40_MSK)
info->control.rates[0].flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
if (lq_sta->last_rate_n_flags & RATE_MCS_GF_MSK)
info->control.rates[0].flags |= IEEE80211_TX_RC_GREEN_FIELD;
} else {
/* Check for invalid rates */
if ((rate_idx < 0) || (rate_idx >= IWL_RATE_COUNT_LEGACY) ||
((sband->band == IEEE80211_BAND_5GHZ) &&
(rate_idx < IWL_FIRST_OFDM_RATE)))
rate_idx = rate_lowest_index(sband, sta);
/* On valid 5 GHz rate, adjust index */
else if (sband->band == IEEE80211_BAND_5GHZ)
rate_idx -= IWL_FIRST_OFDM_RATE;
info->control.rates[0].flags = 0;
}
info->control.rates[0].idx = rate_idx;
}
static void *rs_alloc_sta(void *priv_rate, struct ieee80211_sta *sta,
gfp_t gfp)
{
struct iwl_station_priv *sta_priv = (struct iwl_station_priv *) sta->drv_priv;
struct iwl_op_mode *op_mode __maybe_unused =
(struct iwl_op_mode *)priv_rate;
struct iwl_priv *priv __maybe_unused = IWL_OP_MODE_GET_DVM(op_mode);
IWL_DEBUG_RATE(priv, "create station rate scale window\n");
return &sta_priv->lq_sta;
}
/*
* Called after adding a new station to initialize rate scaling
*/
void iwl_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 sta_id)
{
int i, j;
struct ieee80211_hw *hw = priv->hw;
struct ieee80211_conf *conf = &priv->hw->conf;
struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
struct iwl_station_priv *sta_priv;
struct iwl_lq_sta *lq_sta;
struct ieee80211_supported_band *sband;
sta_priv = (struct iwl_station_priv *) sta->drv_priv;
lq_sta = &sta_priv->lq_sta;
sband = hw->wiphy->bands[conf->channel->band];
lq_sta->lq.sta_id = sta_id;
for (j = 0; j < LQ_SIZE; j++)
for (i = 0; i < IWL_RATE_COUNT; i++)
rs_rate_scale_clear_window(&lq_sta->lq_info[j].win[i]);
lq_sta->flush_timer = 0;
lq_sta->supp_rates = sta->supp_rates[sband->band];
for (j = 0; j < LQ_SIZE; j++)
for (i = 0; i < IWL_RATE_COUNT; i++)
rs_rate_scale_clear_window(&lq_sta->lq_info[j].win[i]);
IWL_DEBUG_RATE(priv, "LQ: *** rate scale station global init for station %d ***\n",
sta_id);
/* TODO: what is a good starting rate for STA? About middle? Maybe not
* the lowest or the highest rate.. Could consider using RSSI from
* previous packets? Need to have IEEE 802.1X auth succeed immediately
* after assoc.. */
lq_sta->is_dup = 0;
lq_sta->max_rate_idx = -1;
lq_sta->missed_rate_counter = IWL_MISSED_RATE_MAX;
lq_sta->is_green = rs_use_green(sta);
lq_sta->active_legacy_rate = priv->active_rate & ~(0x1000);
lq_sta->band = priv->band;
/*
* active_siso_rate mask includes 9 MBits (bit 5), and CCK (bits 0-3),
* supp_rates[] does not; shift to convert format, force 9 MBits off.
*/
lq_sta->active_siso_rate = ht_cap->mcs.rx_mask[0] << 1;
lq_sta->active_siso_rate |= ht_cap->mcs.rx_mask[0] & 0x1;
lq_sta->active_siso_rate &= ~((u16)0x2);
lq_sta->active_siso_rate <<= IWL_FIRST_OFDM_RATE;
/* Same here */
lq_sta->active_mimo2_rate = ht_cap->mcs.rx_mask[1] << 1;
lq_sta->active_mimo2_rate |= ht_cap->mcs.rx_mask[1] & 0x1;
lq_sta->active_mimo2_rate &= ~((u16)0x2);
lq_sta->active_mimo2_rate <<= IWL_FIRST_OFDM_RATE;
lq_sta->active_mimo3_rate = ht_cap->mcs.rx_mask[2] << 1;
lq_sta->active_mimo3_rate |= ht_cap->mcs.rx_mask[2] & 0x1;
lq_sta->active_mimo3_rate &= ~((u16)0x2);
lq_sta->active_mimo3_rate <<= IWL_FIRST_OFDM_RATE;
IWL_DEBUG_RATE(priv, "SISO-RATE=%X MIMO2-RATE=%X MIMO3-RATE=%X\n",
lq_sta->active_siso_rate,
lq_sta->active_mimo2_rate,
lq_sta->active_mimo3_rate);
/* These values will be overridden later */
lq_sta->lq.general_params.single_stream_ant_msk =
first_antenna(hw_params(priv).valid_tx_ant);
lq_sta->lq.general_params.dual_stream_ant_msk =
hw_params(priv).valid_tx_ant &
~first_antenna(hw_params(priv).valid_tx_ant);
if (!lq_sta->lq.general_params.dual_stream_ant_msk) {
lq_sta->lq.general_params.dual_stream_ant_msk = ANT_AB;
} else if (num_of_ant(hw_params(priv).valid_tx_ant) == 2) {
lq_sta->lq.general_params.dual_stream_ant_msk =
hw_params(priv).valid_tx_ant;
}
/* as default allow aggregation for all tids */
lq_sta->tx_agg_tid_en = IWL_AGG_ALL_TID;
lq_sta->drv = priv;
/* Set last_txrate_idx to lowest rate */
lq_sta->last_txrate_idx = rate_lowest_index(sband, sta);
if (sband->band == IEEE80211_BAND_5GHZ)
lq_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE;
lq_sta->is_agg = 0;
#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
priv->tm_fixed_rate = 0;
#endif
#ifdef CONFIG_MAC80211_DEBUGFS
lq_sta->dbg_fixed_rate = 0;
#endif
rs_initialize_lq(priv, sta, lq_sta);
}
static void rs_fill_link_cmd(struct iwl_priv *priv,
struct iwl_lq_sta *lq_sta, u32 new_rate)
{
struct iwl_scale_tbl_info tbl_type;
int index = 0;
int rate_idx;
int repeat_rate = 0;
u8 ant_toggle_cnt = 0;
u8 use_ht_possible = 1;
u8 valid_tx_ant = 0;
struct iwl_station_priv *sta_priv =
container_of(lq_sta, struct iwl_station_priv, lq_sta);
struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
/* Override starting rate (index 0) if needed for debug purposes */
rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
/* Interpret new_rate (rate_n_flags) */
rs_get_tbl_info_from_mcs(new_rate, lq_sta->band,
&tbl_type, &rate_idx);
if (priv && priv->bt_full_concurrent) {
/* 1x1 only */
tbl_type.ant_type =
first_antenna(hw_params(priv).valid_tx_ant);
}
/* How many times should we repeat the initial rate? */
if (is_legacy(tbl_type.lq_type)) {
ant_toggle_cnt = 1;
repeat_rate = IWL_NUMBER_TRY;
} else {
repeat_rate = min(IWL_HT_NUMBER_TRY,
LINK_QUAL_AGG_DISABLE_START_DEF - 1);
}
lq_cmd->general_params.mimo_delimiter =
is_mimo(tbl_type.lq_type) ? 1 : 0;
/* Fill 1st table entry (index 0) */
lq_cmd->rs_table[index].rate_n_flags = cpu_to_le32(new_rate);
if (num_of_ant(tbl_type.ant_type) == 1) {
lq_cmd->general_params.single_stream_ant_msk =
tbl_type.ant_type;
} else if (num_of_ant(tbl_type.ant_type) == 2) {
lq_cmd->general_params.dual_stream_ant_msk =
tbl_type.ant_type;
} /* otherwise we don't modify the existing value */
index++;
repeat_rate--;
if (priv) {
if (priv->bt_full_concurrent)
valid_tx_ant = ANT_A;
else
valid_tx_ant = hw_params(priv).valid_tx_ant;
}
/* Fill rest of rate table */
while (index < LINK_QUAL_MAX_RETRY_NUM) {
/* Repeat initial/next rate.
* For legacy IWL_NUMBER_TRY == 1, this loop will not execute.
* For HT IWL_HT_NUMBER_TRY == 3, this executes twice. */
while (repeat_rate > 0 && (index < LINK_QUAL_MAX_RETRY_NUM)) {
if (is_legacy(tbl_type.lq_type)) {
if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE)
ant_toggle_cnt++;
else if (priv &&
rs_toggle_antenna(valid_tx_ant,
&new_rate, &tbl_type))
ant_toggle_cnt = 1;
}
/* Override next rate if needed for debug purposes */
rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
/* Fill next table entry */
lq_cmd->rs_table[index].rate_n_flags =
cpu_to_le32(new_rate);
repeat_rate--;
index++;
}
rs_get_tbl_info_from_mcs(new_rate, lq_sta->band, &tbl_type,
&rate_idx);
if (priv && priv->bt_full_concurrent) {
/* 1x1 only */
tbl_type.ant_type =
first_antenna(hw_params(priv).valid_tx_ant);
}
/* Indicate to uCode which entries might be MIMO.
* If initial rate was MIMO, this will finally end up
* as (IWL_HT_NUMBER_TRY * 2), after 2nd pass, otherwise 0. */
if (is_mimo(tbl_type.lq_type))
lq_cmd->general_params.mimo_delimiter = index;
/* Get next rate */
new_rate = rs_get_lower_rate(lq_sta, &tbl_type, rate_idx,
use_ht_possible);
/* How many times should we repeat the next rate? */
if (is_legacy(tbl_type.lq_type)) {
if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE)
ant_toggle_cnt++;
else if (priv &&
rs_toggle_antenna(valid_tx_ant,
&new_rate, &tbl_type))
ant_toggle_cnt = 1;
repeat_rate = IWL_NUMBER_TRY;
} else {
repeat_rate = IWL_HT_NUMBER_TRY;
}
/* Don't allow HT rates after next pass.
* rs_get_lower_rate() will change type to LQ_A or LQ_G. */
use_ht_possible = 0;
/* Override next rate if needed for debug purposes */
rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
/* Fill next table entry */
lq_cmd->rs_table[index].rate_n_flags = cpu_to_le32(new_rate);
index++;
repeat_rate--;
}
lq_cmd->agg_params.agg_frame_cnt_limit =
sta_priv->max_agg_bufsize ?: LINK_QUAL_AGG_FRAME_LIMIT_DEF;
lq_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
lq_cmd->agg_params.agg_time_limit =
cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
/*
* overwrite if needed, pass aggregation time limit
* to uCode in uSec
*/
if (priv && cfg(priv)->bt_params &&
cfg(priv)->bt_params->agg_time_limit &&
priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)
lq_cmd->agg_params.agg_time_limit =
cpu_to_le16(cfg(priv)->bt_params->agg_time_limit);
}
static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
{
return hw->priv;
}
/* rate scale requires free function to be implemented */
static void rs_free(void *priv_rate)
{
return;
}
static void rs_free_sta(void *priv_r, struct ieee80211_sta *sta,
void *priv_sta)
{
struct iwl_op_mode *op_mode __maybe_unused = priv_r;
struct iwl_priv *priv __maybe_unused = IWL_OP_MODE_GET_DVM(op_mode);
IWL_DEBUG_RATE(priv, "enter\n");
IWL_DEBUG_RATE(priv, "leave\n");
}
#ifdef CONFIG_MAC80211_DEBUGFS
static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
u32 *rate_n_flags, int index)
{
struct iwl_priv *priv;
u8 valid_tx_ant;
u8 ant_sel_tx;
priv = lq_sta->drv;
valid_tx_ant = hw_params(priv).valid_tx_ant;
if (lq_sta->dbg_fixed_rate) {
ant_sel_tx =
((lq_sta->dbg_fixed_rate & RATE_MCS_ANT_ABC_MSK)
>> RATE_MCS_ANT_POS);
if ((valid_tx_ant & ant_sel_tx) == ant_sel_tx) {
*rate_n_flags = lq_sta->dbg_fixed_rate;
IWL_DEBUG_RATE(priv, "Fixed rate ON\n");
} else {
lq_sta->dbg_fixed_rate = 0;
IWL_ERR(priv,
"Invalid antenna selection 0x%X, Valid is 0x%X\n",
ant_sel_tx, valid_tx_ant);
IWL_DEBUG_RATE(priv, "Fixed rate OFF\n");
}
} else {
IWL_DEBUG_RATE(priv, "Fixed rate OFF\n");
}
}
static ssize_t rs_sta_dbgfs_scale_table_write(struct file *file,
const char __user *user_buf, size_t count, loff_t *ppos)
{
struct iwl_lq_sta *lq_sta = file->private_data;
struct iwl_priv *priv;
char buf[64];
size_t buf_size;
u32 parsed_rate;
priv = lq_sta->drv;
memset(buf, 0, sizeof(buf));
buf_size = min(count, sizeof(buf) - 1);
if (copy_from_user(buf, user_buf, buf_size))
return -EFAULT;
if (sscanf(buf, "%x", &parsed_rate) == 1)
lq_sta->dbg_fixed_rate = parsed_rate;
else
lq_sta->dbg_fixed_rate = 0;
rs_program_fix_rate(priv, lq_sta);
return count;
}
static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
char __user *user_buf, size_t count, loff_t *ppos)
{
char *buff;
int desc = 0;
int i = 0;
int index = 0;
ssize_t ret;
struct iwl_lq_sta *lq_sta = file->private_data;
struct iwl_priv *priv;
struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
priv = lq_sta->drv;
buff = kmalloc(1024, GFP_KERNEL);
if (!buff)
return -ENOMEM;
desc += sprintf(buff+desc, "sta_id %d\n", lq_sta->lq.sta_id);
desc += sprintf(buff+desc, "failed=%d success=%d rate=0%X\n",
lq_sta->total_failed, lq_sta->total_success,
lq_sta->active_legacy_rate);
desc += sprintf(buff+desc, "fixed rate 0x%X\n",
lq_sta->dbg_fixed_rate);
desc += sprintf(buff+desc, "valid_tx_ant %s%s%s\n",
(hw_params(priv).valid_tx_ant & ANT_A) ? "ANT_A," : "",
(hw_params(priv).valid_tx_ant & ANT_B) ? "ANT_B," : "",
(hw_params(priv).valid_tx_ant & ANT_C) ? "ANT_C" : "");
desc += sprintf(buff+desc, "lq type %s\n",
(is_legacy(tbl->lq_type)) ? "legacy" : "HT");
if (is_Ht(tbl->lq_type)) {
desc += sprintf(buff+desc, " %s",
(is_siso(tbl->lq_type)) ? "SISO" :
((is_mimo2(tbl->lq_type)) ? "MIMO2" : "MIMO3"));
desc += sprintf(buff+desc, " %s",
(tbl->is_ht40) ? "40MHz" : "20MHz");
desc += sprintf(buff+desc, " %s %s %s\n", (tbl->is_SGI) ? "SGI" : "",
(lq_sta->is_green) ? "GF enabled" : "",
(lq_sta->is_agg) ? "AGG on" : "");
}
desc += sprintf(buff+desc, "last tx rate=0x%X\n",
lq_sta->last_rate_n_flags);
desc += sprintf(buff+desc, "general:"
"flags=0x%X mimo-d=%d s-ant0x%x d-ant=0x%x\n",
lq_sta->lq.general_params.flags,
lq_sta->lq.general_params.mimo_delimiter,
lq_sta->lq.general_params.single_stream_ant_msk,
lq_sta->lq.general_params.dual_stream_ant_msk);
desc += sprintf(buff+desc, "agg:"
"time_limit=%d dist_start_th=%d frame_cnt_limit=%d\n",
le16_to_cpu(lq_sta->lq.agg_params.agg_time_limit),
lq_sta->lq.agg_params.agg_dis_start_th,
lq_sta->lq.agg_params.agg_frame_cnt_limit);
desc += sprintf(buff+desc,
"Start idx [0]=0x%x [1]=0x%x [2]=0x%x [3]=0x%x\n",
lq_sta->lq.general_params.start_rate_index[0],
lq_sta->lq.general_params.start_rate_index[1],
lq_sta->lq.general_params.start_rate_index[2],
lq_sta->lq.general_params.start_rate_index[3]);
for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
index = iwl_hwrate_to_plcp_idx(
le32_to_cpu(lq_sta->lq.rs_table[i].rate_n_flags));
if (is_legacy(tbl->lq_type)) {
desc += sprintf(buff+desc, " rate[%d] 0x%X %smbps\n",
i, le32_to_cpu(lq_sta->lq.rs_table[i].rate_n_flags),
iwl_rate_mcs[index].mbps);
} else {
desc += sprintf(buff+desc, " rate[%d] 0x%X %smbps (%s)\n",
i, le32_to_cpu(lq_sta->lq.rs_table[i].rate_n_flags),
iwl_rate_mcs[index].mbps, iwl_rate_mcs[index].mcs);
}
}
ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
kfree(buff);
return ret;
}
static const struct file_operations rs_sta_dbgfs_scale_table_ops = {
.write = rs_sta_dbgfs_scale_table_write,
.read = rs_sta_dbgfs_scale_table_read,
.open = simple_open,
.llseek = default_llseek,
};
static ssize_t rs_sta_dbgfs_stats_table_read(struct file *file,
char __user *user_buf, size_t count, loff_t *ppos)
{
char *buff;
int desc = 0;
int i, j;
ssize_t ret;
struct iwl_lq_sta *lq_sta = file->private_data;
buff = kmalloc(1024, GFP_KERNEL);
if (!buff)
return -ENOMEM;
for (i = 0; i < LQ_SIZE; i++) {
desc += sprintf(buff+desc,
"%s type=%d SGI=%d HT40=%d DUP=%d GF=%d\n"
"rate=0x%X\n",
lq_sta->active_tbl == i ? "*" : "x",
lq_sta->lq_info[i].lq_type,
lq_sta->lq_info[i].is_SGI,
lq_sta->lq_info[i].is_ht40,
lq_sta->lq_info[i].is_dup,
lq_sta->is_green,
lq_sta->lq_info[i].current_rate);
for (j = 0; j < IWL_RATE_COUNT; j++) {
desc += sprintf(buff+desc,
"counter=%d success=%d %%=%d\n",
lq_sta->lq_info[i].win[j].counter,
lq_sta->lq_info[i].win[j].success_counter,
lq_sta->lq_info[i].win[j].success_ratio);
}
}
ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
kfree(buff);
return ret;
}
static const struct file_operations rs_sta_dbgfs_stats_table_ops = {
.read = rs_sta_dbgfs_stats_table_read,
.open = simple_open,
.llseek = default_llseek,
};
static ssize_t rs_sta_dbgfs_rate_scale_data_read(struct file *file,
char __user *user_buf, size_t count, loff_t *ppos)
{
struct iwl_lq_sta *lq_sta = file->private_data;
struct iwl_scale_tbl_info *tbl = &lq_sta->lq_info[lq_sta->active_tbl];
char buff[120];
int desc = 0;
if (is_Ht(tbl->lq_type))
desc += sprintf(buff+desc,
"Bit Rate= %d Mb/s\n",
tbl->expected_tpt[lq_sta->last_txrate_idx]);
else
desc += sprintf(buff+desc,
"Bit Rate= %d Mb/s\n",
iwl_rates[lq_sta->last_txrate_idx].ieee >> 1);
return simple_read_from_buffer(user_buf, count, ppos, buff, desc);
}
static const struct file_operations rs_sta_dbgfs_rate_scale_data_ops = {
.read = rs_sta_dbgfs_rate_scale_data_read,
.open = simple_open,
.llseek = default_llseek,
};
static void rs_add_debugfs(void *priv, void *priv_sta,
struct dentry *dir)
{
struct iwl_lq_sta *lq_sta = priv_sta;
lq_sta->rs_sta_dbgfs_scale_table_file =
debugfs_create_file("rate_scale_table", S_IRUSR | S_IWUSR, dir,
lq_sta, &rs_sta_dbgfs_scale_table_ops);
lq_sta->rs_sta_dbgfs_stats_table_file =
debugfs_create_file("rate_stats_table", S_IRUSR, dir,
lq_sta, &rs_sta_dbgfs_stats_table_ops);
lq_sta->rs_sta_dbgfs_rate_scale_data_file =
debugfs_create_file("rate_scale_data", S_IRUSR, dir,
lq_sta, &rs_sta_dbgfs_rate_scale_data_ops);
lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file =
debugfs_create_u8("tx_agg_tid_enable", S_IRUSR | S_IWUSR, dir,
&lq_sta->tx_agg_tid_en);
}
static void rs_remove_debugfs(void *priv, void *priv_sta)
{
struct iwl_lq_sta *lq_sta = priv_sta;
debugfs_remove(lq_sta->rs_sta_dbgfs_scale_table_file);
debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file);
debugfs_remove(lq_sta->rs_sta_dbgfs_rate_scale_data_file);
debugfs_remove(lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file);
}
#endif
/*
* Initialization of rate scaling information is done by driver after
* the station is added. Since mac80211 calls this function before a
* station is added we ignore it.
*/
static void rs_rate_init_stub(void *priv_r, struct ieee80211_supported_band *sband,
struct ieee80211_sta *sta, void *priv_sta)
{
}
static struct rate_control_ops rs_ops = {
.module = NULL,
.name = RS_NAME,
.tx_status = rs_tx_status,
.get_rate = rs_get_rate,
.rate_init = rs_rate_init_stub,
.alloc = rs_alloc,
.free = rs_free,
.alloc_sta = rs_alloc_sta,
.free_sta = rs_free_sta,
#ifdef CONFIG_MAC80211_DEBUGFS
.add_sta_debugfs = rs_add_debugfs,
.remove_sta_debugfs = rs_remove_debugfs,
#endif
};
int iwlagn_rate_control_register(void)
{
return ieee80211_rate_control_register(&rs_ops);
}
void iwlagn_rate_control_unregister(void)
{
ieee80211_rate_control_unregister(&rs_ops);
}
| gpl-2.0 |
Backspace-Dev/htx21 | drivers/s390/scsi/zfcp_qdio.c | 3497 | 13894 | /*
* zfcp device driver
*
* Setup and helper functions to access QDIO.
*
* Copyright IBM Corporation 2002, 2010
*/
#define KMSG_COMPONENT "zfcp"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/slab.h>
#include <linux/module.h>
#include "zfcp_ext.h"
#include "zfcp_qdio.h"
#define QBUFF_PER_PAGE (PAGE_SIZE / sizeof(struct qdio_buffer))
static bool enable_multibuffer;
module_param_named(datarouter, enable_multibuffer, bool, 0400);
MODULE_PARM_DESC(datarouter, "Enable hardware data router support");
static int zfcp_qdio_buffers_enqueue(struct qdio_buffer **sbal)
{
int pos;
for (pos = 0; pos < QDIO_MAX_BUFFERS_PER_Q; pos += QBUFF_PER_PAGE) {
sbal[pos] = (struct qdio_buffer *) get_zeroed_page(GFP_KERNEL);
if (!sbal[pos])
return -ENOMEM;
}
for (pos = 0; pos < QDIO_MAX_BUFFERS_PER_Q; pos++)
if (pos % QBUFF_PER_PAGE)
sbal[pos] = sbal[pos - 1] + 1;
return 0;
}
static void zfcp_qdio_handler_error(struct zfcp_qdio *qdio, char *id,
unsigned int qdio_err)
{
struct zfcp_adapter *adapter = qdio->adapter;
dev_warn(&adapter->ccw_device->dev, "A QDIO problem occurred\n");
if (qdio_err & QDIO_ERROR_SLSB_STATE) {
zfcp_qdio_siosl(adapter);
zfcp_erp_adapter_shutdown(adapter, 0, id);
return;
}
zfcp_erp_adapter_reopen(adapter,
ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
ZFCP_STATUS_COMMON_ERP_FAILED, id);
}
static void zfcp_qdio_zero_sbals(struct qdio_buffer *sbal[], int first, int cnt)
{
int i, sbal_idx;
for (i = first; i < first + cnt; i++) {
sbal_idx = i % QDIO_MAX_BUFFERS_PER_Q;
memset(sbal[sbal_idx], 0, sizeof(struct qdio_buffer));
}
}
/* this needs to be called prior to updating the queue fill level */
static inline void zfcp_qdio_account(struct zfcp_qdio *qdio)
{
unsigned long long now, span;
int used;
now = get_clock_monotonic();
span = (now - qdio->req_q_time) >> 12;
used = QDIO_MAX_BUFFERS_PER_Q - atomic_read(&qdio->req_q_free);
qdio->req_q_util += used * span;
qdio->req_q_time = now;
}
static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int qdio_err,
int queue_no, int idx, int count,
unsigned long parm)
{
struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm;
if (unlikely(qdio_err)) {
zfcp_qdio_handler_error(qdio, "qdireq1", qdio_err);
return;
}
/* cleanup all SBALs being program-owned now */
zfcp_qdio_zero_sbals(qdio->req_q, idx, count);
spin_lock_irq(&qdio->stat_lock);
zfcp_qdio_account(qdio);
spin_unlock_irq(&qdio->stat_lock);
atomic_add(count, &qdio->req_q_free);
wake_up(&qdio->req_q_wq);
}
static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err,
int queue_no, int idx, int count,
unsigned long parm)
{
struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm;
struct zfcp_adapter *adapter = qdio->adapter;
struct qdio_buffer_element *sbale;
int sbal_no, sbal_idx;
void *pl[ZFCP_QDIO_MAX_SBALS_PER_REQ + 1];
u64 req_id;
u8 scount;
if (unlikely(qdio_err)) {
memset(pl, 0, ZFCP_QDIO_MAX_SBALS_PER_REQ * sizeof(void *));
if (zfcp_adapter_multi_buffer_active(adapter)) {
sbale = qdio->res_q[idx]->element;
req_id = (u64) sbale->addr;
scount = sbale->scount + 1; /* incl. signaling SBAL */
for (sbal_no = 0; sbal_no < scount; sbal_no++) {
sbal_idx = (idx + sbal_no) %
QDIO_MAX_BUFFERS_PER_Q;
pl[sbal_no] = qdio->res_q[sbal_idx];
}
zfcp_dbf_hba_def_err(adapter, req_id, scount, pl);
}
zfcp_qdio_handler_error(qdio, "qdires1", qdio_err);
return;
}
/*
* go through all SBALs from input queue currently
* returned by QDIO layer
*/
for (sbal_no = 0; sbal_no < count; sbal_no++) {
sbal_idx = (idx + sbal_no) % QDIO_MAX_BUFFERS_PER_Q;
/* go through all SBALEs of SBAL */
zfcp_fsf_reqid_check(qdio, sbal_idx);
}
/*
* put SBALs back to response queue
*/
if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, idx, count))
zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdires2");
}
static struct qdio_buffer_element *
zfcp_qdio_sbal_chain(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
{
struct qdio_buffer_element *sbale;
/* set last entry flag in current SBALE of current SBAL */
sbale = zfcp_qdio_sbale_curr(qdio, q_req);
sbale->eflags |= SBAL_EFLAGS_LAST_ENTRY;
/* don't exceed last allowed SBAL */
if (q_req->sbal_last == q_req->sbal_limit)
return NULL;
/* set chaining flag in first SBALE of current SBAL */
sbale = zfcp_qdio_sbale_req(qdio, q_req);
sbale->sflags |= SBAL_SFLAGS0_MORE_SBALS;
/* calculate index of next SBAL */
q_req->sbal_last++;
q_req->sbal_last %= QDIO_MAX_BUFFERS_PER_Q;
/* keep this requests number of SBALs up-to-date */
q_req->sbal_number++;
BUG_ON(q_req->sbal_number > ZFCP_QDIO_MAX_SBALS_PER_REQ);
/* start at first SBALE of new SBAL */
q_req->sbale_curr = 0;
/* set storage-block type for new SBAL */
sbale = zfcp_qdio_sbale_curr(qdio, q_req);
sbale->sflags |= q_req->sbtype;
return sbale;
}
static struct qdio_buffer_element *
zfcp_qdio_sbale_next(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
{
if (q_req->sbale_curr == qdio->max_sbale_per_sbal - 1)
return zfcp_qdio_sbal_chain(qdio, q_req);
q_req->sbale_curr++;
return zfcp_qdio_sbale_curr(qdio, q_req);
}
/**
* zfcp_qdio_sbals_from_sg - fill SBALs from scatter-gather list
* @qdio: pointer to struct zfcp_qdio
* @q_req: pointer to struct zfcp_qdio_req
* @sg: scatter-gather list
* @max_sbals: upper bound for number of SBALs to be used
* Returns: zero or -EINVAL on error
*/
int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
struct scatterlist *sg)
{
struct qdio_buffer_element *sbale;
/* set storage-block type for this request */
sbale = zfcp_qdio_sbale_req(qdio, q_req);
sbale->sflags |= q_req->sbtype;
for (; sg; sg = sg_next(sg)) {
sbale = zfcp_qdio_sbale_next(qdio, q_req);
if (!sbale) {
atomic_inc(&qdio->req_q_full);
zfcp_qdio_zero_sbals(qdio->req_q, q_req->sbal_first,
q_req->sbal_number);
return -EINVAL;
}
sbale->addr = sg_virt(sg);
sbale->length = sg->length;
}
return 0;
}
static int zfcp_qdio_sbal_check(struct zfcp_qdio *qdio)
{
spin_lock_irq(&qdio->req_q_lock);
if (atomic_read(&qdio->req_q_free) ||
!(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
return 1;
spin_unlock_irq(&qdio->req_q_lock);
return 0;
}
/**
* zfcp_qdio_sbal_get - get free sbal in request queue, wait if necessary
* @qdio: pointer to struct zfcp_qdio
*
* The req_q_lock must be held by the caller of this function, and
* this function may only be called from process context; it will
* sleep when waiting for a free sbal.
*
* Returns: 0 on success, -EIO if there is no free sbal after waiting.
*/
int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio)
{
long ret;
spin_unlock_irq(&qdio->req_q_lock);
ret = wait_event_interruptible_timeout(qdio->req_q_wq,
zfcp_qdio_sbal_check(qdio), 5 * HZ);
if (!(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
return -EIO;
if (ret > 0)
return 0;
if (!ret) {
atomic_inc(&qdio->req_q_full);
/* assume hanging outbound queue, try queue recovery */
zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdsbg_1");
}
spin_lock_irq(&qdio->req_q_lock);
return -EIO;
}
/**
* zfcp_qdio_send - set PCI flag in first SBALE and send req to QDIO
* @qdio: pointer to struct zfcp_qdio
* @q_req: pointer to struct zfcp_qdio_req
* Returns: 0 on success, error otherwise
*/
int zfcp_qdio_send(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
{
int retval;
u8 sbal_number = q_req->sbal_number;
spin_lock(&qdio->stat_lock);
zfcp_qdio_account(qdio);
spin_unlock(&qdio->stat_lock);
retval = do_QDIO(qdio->adapter->ccw_device, QDIO_FLAG_SYNC_OUTPUT, 0,
q_req->sbal_first, sbal_number);
if (unlikely(retval)) {
zfcp_qdio_zero_sbals(qdio->req_q, q_req->sbal_first,
sbal_number);
return retval;
}
/* account for transferred buffers */
atomic_sub(sbal_number, &qdio->req_q_free);
qdio->req_q_idx += sbal_number;
qdio->req_q_idx %= QDIO_MAX_BUFFERS_PER_Q;
return 0;
}
static void zfcp_qdio_setup_init_data(struct qdio_initialize *id,
struct zfcp_qdio *qdio)
{
memset(id, 0, sizeof(*id));
id->cdev = qdio->adapter->ccw_device;
id->q_format = QDIO_ZFCP_QFMT;
memcpy(id->adapter_name, dev_name(&id->cdev->dev), 8);
ASCEBC(id->adapter_name, 8);
id->qib_rflags = QIB_RFLAGS_ENABLE_DATA_DIV;
if (enable_multibuffer)
id->qdr_ac |= QDR_AC_MULTI_BUFFER_ENABLE;
id->no_input_qs = 1;
id->no_output_qs = 1;
id->input_handler = zfcp_qdio_int_resp;
id->output_handler = zfcp_qdio_int_req;
id->int_parm = (unsigned long) qdio;
id->input_sbal_addr_array = (void **) (qdio->res_q);
id->output_sbal_addr_array = (void **) (qdio->req_q);
id->scan_threshold =
QDIO_MAX_BUFFERS_PER_Q - ZFCP_QDIO_MAX_SBALS_PER_REQ * 2;
}
/**
* zfcp_qdio_allocate - allocate queue memory and initialize QDIO data
* @adapter: pointer to struct zfcp_adapter
* Returns: -ENOMEM on memory allocation error or return value from
* qdio_allocate
*/
static int zfcp_qdio_allocate(struct zfcp_qdio *qdio)
{
struct qdio_initialize init_data;
if (zfcp_qdio_buffers_enqueue(qdio->req_q) ||
zfcp_qdio_buffers_enqueue(qdio->res_q))
return -ENOMEM;
zfcp_qdio_setup_init_data(&init_data, qdio);
init_waitqueue_head(&qdio->req_q_wq);
return qdio_allocate(&init_data);
}
/**
* zfcp_close_qdio - close qdio queues for an adapter
* @qdio: pointer to structure zfcp_qdio
*/
void zfcp_qdio_close(struct zfcp_qdio *qdio)
{
struct zfcp_adapter *adapter = qdio->adapter;
int idx, count;
if (!(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
return;
/* clear QDIOUP flag, thus do_QDIO is not called during qdio_shutdown */
spin_lock_irq(&qdio->req_q_lock);
atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status);
spin_unlock_irq(&qdio->req_q_lock);
wake_up(&qdio->req_q_wq);
qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR);
/* cleanup used outbound sbals */
count = atomic_read(&qdio->req_q_free);
if (count < QDIO_MAX_BUFFERS_PER_Q) {
idx = (qdio->req_q_idx + count) % QDIO_MAX_BUFFERS_PER_Q;
count = QDIO_MAX_BUFFERS_PER_Q - count;
zfcp_qdio_zero_sbals(qdio->req_q, idx, count);
}
qdio->req_q_idx = 0;
atomic_set(&qdio->req_q_free, 0);
}
/**
* zfcp_qdio_open - prepare and initialize response queue
* @qdio: pointer to struct zfcp_qdio
* Returns: 0 on success, otherwise -EIO
*/
int zfcp_qdio_open(struct zfcp_qdio *qdio)
{
struct qdio_buffer_element *sbale;
struct qdio_initialize init_data;
struct zfcp_adapter *adapter = qdio->adapter;
struct ccw_device *cdev = adapter->ccw_device;
struct qdio_ssqd_desc ssqd;
int cc;
if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)
return -EIO;
atomic_clear_mask(ZFCP_STATUS_ADAPTER_SIOSL_ISSUED,
&qdio->adapter->status);
zfcp_qdio_setup_init_data(&init_data, qdio);
if (qdio_establish(&init_data))
goto failed_establish;
if (qdio_get_ssqd_desc(init_data.cdev, &ssqd))
goto failed_qdio;
if (ssqd.qdioac2 & CHSC_AC2_DATA_DIV_ENABLED)
atomic_set_mask(ZFCP_STATUS_ADAPTER_DATA_DIV_ENABLED,
&qdio->adapter->status);
if (ssqd.qdioac2 & CHSC_AC2_MULTI_BUFFER_ENABLED) {
atomic_set_mask(ZFCP_STATUS_ADAPTER_MB_ACT, &adapter->status);
qdio->max_sbale_per_sbal = QDIO_MAX_ELEMENTS_PER_BUFFER;
} else {
atomic_clear_mask(ZFCP_STATUS_ADAPTER_MB_ACT, &adapter->status);
qdio->max_sbale_per_sbal = QDIO_MAX_ELEMENTS_PER_BUFFER - 1;
}
qdio->max_sbale_per_req =
ZFCP_QDIO_MAX_SBALS_PER_REQ * qdio->max_sbale_per_sbal
- 2;
if (qdio_activate(cdev))
goto failed_qdio;
for (cc = 0; cc < QDIO_MAX_BUFFERS_PER_Q; cc++) {
sbale = &(qdio->res_q[cc]->element[0]);
sbale->length = 0;
sbale->eflags = SBAL_EFLAGS_LAST_ENTRY;
sbale->sflags = 0;
sbale->addr = NULL;
}
if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, 0, QDIO_MAX_BUFFERS_PER_Q))
goto failed_qdio;
/* set index of first available SBALS / number of available SBALS */
qdio->req_q_idx = 0;
atomic_set(&qdio->req_q_free, QDIO_MAX_BUFFERS_PER_Q);
atomic_set_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &qdio->adapter->status);
if (adapter->scsi_host) {
adapter->scsi_host->sg_tablesize = qdio->max_sbale_per_req;
adapter->scsi_host->max_sectors = qdio->max_sbale_per_req * 8;
}
return 0;
failed_qdio:
qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
failed_establish:
dev_err(&cdev->dev,
"Setting up the QDIO connection to the FCP adapter failed\n");
return -EIO;
}
void zfcp_qdio_destroy(struct zfcp_qdio *qdio)
{
int p;
if (!qdio)
return;
if (qdio->adapter->ccw_device)
qdio_free(qdio->adapter->ccw_device);
for (p = 0; p < QDIO_MAX_BUFFERS_PER_Q; p += QBUFF_PER_PAGE) {
free_page((unsigned long) qdio->req_q[p]);
free_page((unsigned long) qdio->res_q[p]);
}
kfree(qdio);
}
int zfcp_qdio_setup(struct zfcp_adapter *adapter)
{
struct zfcp_qdio *qdio;
qdio = kzalloc(sizeof(struct zfcp_qdio), GFP_KERNEL);
if (!qdio)
return -ENOMEM;
qdio->adapter = adapter;
if (zfcp_qdio_allocate(qdio)) {
zfcp_qdio_destroy(qdio);
return -ENOMEM;
}
spin_lock_init(&qdio->req_q_lock);
spin_lock_init(&qdio->stat_lock);
adapter->qdio = qdio;
return 0;
}
/**
* zfcp_qdio_siosl - Trigger logging in FCP channel
* @adapter: The zfcp_adapter where to trigger logging
*
* Call the cio siosl function to trigger hardware logging. This
* wrapper function sets a flag to ensure hardware logging is only
* triggered once before going through qdio shutdown.
*
* The triggers are always run from qdio tasklet context, so no
* additional synchronization is necessary.
*/
void zfcp_qdio_siosl(struct zfcp_adapter *adapter)
{
int rc;
if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_SIOSL_ISSUED)
return;
rc = ccw_device_siosl(adapter->ccw_device);
if (!rc)
atomic_set_mask(ZFCP_STATUS_ADAPTER_SIOSL_ISSUED,
&adapter->status);
}
| gpl-2.0 |
arnavgosain/moto_msm8226 | net/nfc/netlink.c | 4777 | 15204 | /*
* Copyright (C) 2011 Instituto Nokia de Tecnologia
*
* Authors:
* Lauro Ramos Venancio <lauro.venancio@openbossa.org>
* Aloisio Almeida Jr <aloisio.almeida@openbossa.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the
* Free Software Foundation, Inc.,
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": %s: " fmt, __func__
#include <net/genetlink.h>
#include <linux/nfc.h>
#include <linux/slab.h>
#include "nfc.h"
static struct genl_multicast_group nfc_genl_event_mcgrp = {
.name = NFC_GENL_MCAST_EVENT_NAME,
};
struct genl_family nfc_genl_family = {
.id = GENL_ID_GENERATE,
.hdrsize = 0,
.name = NFC_GENL_NAME,
.version = NFC_GENL_VERSION,
.maxattr = NFC_ATTR_MAX,
};
static const struct nla_policy nfc_genl_policy[NFC_ATTR_MAX + 1] = {
[NFC_ATTR_DEVICE_INDEX] = { .type = NLA_U32 },
[NFC_ATTR_DEVICE_NAME] = { .type = NLA_STRING,
.len = NFC_DEVICE_NAME_MAXSIZE },
[NFC_ATTR_PROTOCOLS] = { .type = NLA_U32 },
[NFC_ATTR_COMM_MODE] = { .type = NLA_U8 },
[NFC_ATTR_RF_MODE] = { .type = NLA_U8 },
[NFC_ATTR_DEVICE_POWERED] = { .type = NLA_U8 },
};
static int nfc_genl_send_target(struct sk_buff *msg, struct nfc_target *target,
struct netlink_callback *cb, int flags)
{
void *hdr;
hdr = genlmsg_put(msg, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq,
&nfc_genl_family, flags, NFC_CMD_GET_TARGET);
if (!hdr)
return -EMSGSIZE;
genl_dump_check_consistent(cb, hdr, &nfc_genl_family);
NLA_PUT_U32(msg, NFC_ATTR_TARGET_INDEX, target->idx);
NLA_PUT_U32(msg, NFC_ATTR_PROTOCOLS, target->supported_protocols);
NLA_PUT_U16(msg, NFC_ATTR_TARGET_SENS_RES, target->sens_res);
NLA_PUT_U8(msg, NFC_ATTR_TARGET_SEL_RES, target->sel_res);
if (target->nfcid1_len > 0)
NLA_PUT(msg, NFC_ATTR_TARGET_NFCID1, target->nfcid1_len,
target->nfcid1);
if (target->sensb_res_len > 0)
NLA_PUT(msg, NFC_ATTR_TARGET_SENSB_RES, target->sensb_res_len,
target->sensb_res);
if (target->sensf_res_len > 0)
NLA_PUT(msg, NFC_ATTR_TARGET_SENSF_RES, target->sensf_res_len,
target->sensf_res);
return genlmsg_end(msg, hdr);
nla_put_failure:
genlmsg_cancel(msg, hdr);
return -EMSGSIZE;
}
static struct nfc_dev *__get_device_from_cb(struct netlink_callback *cb)
{
struct nfc_dev *dev;
int rc;
u32 idx;
rc = nlmsg_parse(cb->nlh, GENL_HDRLEN + nfc_genl_family.hdrsize,
nfc_genl_family.attrbuf,
nfc_genl_family.maxattr,
nfc_genl_policy);
if (rc < 0)
return ERR_PTR(rc);
if (!nfc_genl_family.attrbuf[NFC_ATTR_DEVICE_INDEX])
return ERR_PTR(-EINVAL);
idx = nla_get_u32(nfc_genl_family.attrbuf[NFC_ATTR_DEVICE_INDEX]);
dev = nfc_get_device(idx);
if (!dev)
return ERR_PTR(-ENODEV);
return dev;
}
static int nfc_genl_dump_targets(struct sk_buff *skb,
struct netlink_callback *cb)
{
int i = cb->args[0];
struct nfc_dev *dev = (struct nfc_dev *) cb->args[1];
int rc;
if (!dev) {
dev = __get_device_from_cb(cb);
if (IS_ERR(dev))
return PTR_ERR(dev);
cb->args[1] = (long) dev;
}
spin_lock_bh(&dev->targets_lock);
cb->seq = dev->targets_generation;
while (i < dev->n_targets) {
rc = nfc_genl_send_target(skb, &dev->targets[i], cb,
NLM_F_MULTI);
if (rc < 0)
break;
i++;
}
spin_unlock_bh(&dev->targets_lock);
cb->args[0] = i;
return skb->len;
}
static int nfc_genl_dump_targets_done(struct netlink_callback *cb)
{
struct nfc_dev *dev = (struct nfc_dev *) cb->args[1];
if (dev)
nfc_put_device(dev);
return 0;
}
int nfc_genl_targets_found(struct nfc_dev *dev)
{
struct sk_buff *msg;
void *hdr;
dev->genl_data.poll_req_pid = 0;
msg = nlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
if (!msg)
return -ENOMEM;
hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0,
NFC_EVENT_TARGETS_FOUND);
if (!hdr)
goto free_msg;
NLA_PUT_U32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx);
genlmsg_end(msg, hdr);
return genlmsg_multicast(msg, 0, nfc_genl_event_mcgrp.id, GFP_ATOMIC);
nla_put_failure:
genlmsg_cancel(msg, hdr);
free_msg:
nlmsg_free(msg);
return -EMSGSIZE;
}
int nfc_genl_device_added(struct nfc_dev *dev)
{
struct sk_buff *msg;
void *hdr;
msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
if (!msg)
return -ENOMEM;
hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0,
NFC_EVENT_DEVICE_ADDED);
if (!hdr)
goto free_msg;
NLA_PUT_STRING(msg, NFC_ATTR_DEVICE_NAME, nfc_device_name(dev));
NLA_PUT_U32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx);
NLA_PUT_U32(msg, NFC_ATTR_PROTOCOLS, dev->supported_protocols);
NLA_PUT_U8(msg, NFC_ATTR_DEVICE_POWERED, dev->dev_up);
genlmsg_end(msg, hdr);
genlmsg_multicast(msg, 0, nfc_genl_event_mcgrp.id, GFP_KERNEL);
return 0;
nla_put_failure:
genlmsg_cancel(msg, hdr);
free_msg:
nlmsg_free(msg);
return -EMSGSIZE;
}
int nfc_genl_device_removed(struct nfc_dev *dev)
{
struct sk_buff *msg;
void *hdr;
msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
if (!msg)
return -ENOMEM;
hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0,
NFC_EVENT_DEVICE_REMOVED);
if (!hdr)
goto free_msg;
NLA_PUT_U32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx);
genlmsg_end(msg, hdr);
genlmsg_multicast(msg, 0, nfc_genl_event_mcgrp.id, GFP_KERNEL);
return 0;
nla_put_failure:
genlmsg_cancel(msg, hdr);
free_msg:
nlmsg_free(msg);
return -EMSGSIZE;
}
static int nfc_genl_send_device(struct sk_buff *msg, struct nfc_dev *dev,
u32 pid, u32 seq,
struct netlink_callback *cb,
int flags)
{
void *hdr;
hdr = genlmsg_put(msg, pid, seq, &nfc_genl_family, flags,
NFC_CMD_GET_DEVICE);
if (!hdr)
return -EMSGSIZE;
if (cb)
genl_dump_check_consistent(cb, hdr, &nfc_genl_family);
NLA_PUT_STRING(msg, NFC_ATTR_DEVICE_NAME, nfc_device_name(dev));
NLA_PUT_U32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx);
NLA_PUT_U32(msg, NFC_ATTR_PROTOCOLS, dev->supported_protocols);
NLA_PUT_U8(msg, NFC_ATTR_DEVICE_POWERED, dev->dev_up);
return genlmsg_end(msg, hdr);
nla_put_failure:
genlmsg_cancel(msg, hdr);
return -EMSGSIZE;
}
static int nfc_genl_dump_devices(struct sk_buff *skb,
struct netlink_callback *cb)
{
struct class_dev_iter *iter = (struct class_dev_iter *) cb->args[0];
struct nfc_dev *dev = (struct nfc_dev *) cb->args[1];
bool first_call = false;
if (!iter) {
first_call = true;
iter = kmalloc(sizeof(struct class_dev_iter), GFP_KERNEL);
if (!iter)
return -ENOMEM;
cb->args[0] = (long) iter;
}
mutex_lock(&nfc_devlist_mutex);
cb->seq = nfc_devlist_generation;
if (first_call) {
nfc_device_iter_init(iter);
dev = nfc_device_iter_next(iter);
}
while (dev) {
int rc;
rc = nfc_genl_send_device(skb, dev, NETLINK_CB(cb->skb).pid,
cb->nlh->nlmsg_seq, cb, NLM_F_MULTI);
if (rc < 0)
break;
dev = nfc_device_iter_next(iter);
}
mutex_unlock(&nfc_devlist_mutex);
cb->args[1] = (long) dev;
return skb->len;
}
static int nfc_genl_dump_devices_done(struct netlink_callback *cb)
{
struct class_dev_iter *iter = (struct class_dev_iter *) cb->args[0];
nfc_device_iter_exit(iter);
kfree(iter);
return 0;
}
int nfc_genl_dep_link_up_event(struct nfc_dev *dev, u32 target_idx,
u8 comm_mode, u8 rf_mode)
{
struct sk_buff *msg;
void *hdr;
pr_debug("DEP link is up\n");
msg = nlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
if (!msg)
return -ENOMEM;
hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0, NFC_CMD_DEP_LINK_UP);
if (!hdr)
goto free_msg;
NLA_PUT_U32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx);
if (rf_mode == NFC_RF_INITIATOR)
NLA_PUT_U32(msg, NFC_ATTR_TARGET_INDEX, target_idx);
NLA_PUT_U8(msg, NFC_ATTR_COMM_MODE, comm_mode);
NLA_PUT_U8(msg, NFC_ATTR_RF_MODE, rf_mode);
genlmsg_end(msg, hdr);
dev->dep_link_up = true;
genlmsg_multicast(msg, 0, nfc_genl_event_mcgrp.id, GFP_ATOMIC);
return 0;
nla_put_failure:
genlmsg_cancel(msg, hdr);
free_msg:
nlmsg_free(msg);
return -EMSGSIZE;
}
int nfc_genl_dep_link_down_event(struct nfc_dev *dev)
{
struct sk_buff *msg;
void *hdr;
pr_debug("DEP link is down\n");
msg = nlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
if (!msg)
return -ENOMEM;
hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0,
NFC_CMD_DEP_LINK_DOWN);
if (!hdr)
goto free_msg;
NLA_PUT_U32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx);
genlmsg_end(msg, hdr);
genlmsg_multicast(msg, 0, nfc_genl_event_mcgrp.id, GFP_ATOMIC);
return 0;
nla_put_failure:
genlmsg_cancel(msg, hdr);
free_msg:
nlmsg_free(msg);
return -EMSGSIZE;
}
static int nfc_genl_get_device(struct sk_buff *skb, struct genl_info *info)
{
struct sk_buff *msg;
struct nfc_dev *dev;
u32 idx;
int rc = -ENOBUFS;
if (!info->attrs[NFC_ATTR_DEVICE_INDEX])
return -EINVAL;
idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]);
dev = nfc_get_device(idx);
if (!dev)
return -ENODEV;
msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
if (!msg) {
rc = -ENOMEM;
goto out_putdev;
}
rc = nfc_genl_send_device(msg, dev, info->snd_pid, info->snd_seq,
NULL, 0);
if (rc < 0)
goto out_free;
nfc_put_device(dev);
return genlmsg_reply(msg, info);
out_free:
nlmsg_free(msg);
out_putdev:
nfc_put_device(dev);
return rc;
}
static int nfc_genl_dev_up(struct sk_buff *skb, struct genl_info *info)
{
struct nfc_dev *dev;
int rc;
u32 idx;
if (!info->attrs[NFC_ATTR_DEVICE_INDEX])
return -EINVAL;
idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]);
dev = nfc_get_device(idx);
if (!dev)
return -ENODEV;
rc = nfc_dev_up(dev);
nfc_put_device(dev);
return rc;
}
static int nfc_genl_dev_down(struct sk_buff *skb, struct genl_info *info)
{
struct nfc_dev *dev;
int rc;
u32 idx;
if (!info->attrs[NFC_ATTR_DEVICE_INDEX])
return -EINVAL;
idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]);
dev = nfc_get_device(idx);
if (!dev)
return -ENODEV;
rc = nfc_dev_down(dev);
nfc_put_device(dev);
return rc;
}
static int nfc_genl_start_poll(struct sk_buff *skb, struct genl_info *info)
{
struct nfc_dev *dev;
int rc;
u32 idx;
u32 protocols;
pr_debug("Poll start\n");
if (!info->attrs[NFC_ATTR_DEVICE_INDEX] ||
!info->attrs[NFC_ATTR_PROTOCOLS])
return -EINVAL;
idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]);
protocols = nla_get_u32(info->attrs[NFC_ATTR_PROTOCOLS]);
dev = nfc_get_device(idx);
if (!dev)
return -ENODEV;
mutex_lock(&dev->genl_data.genl_data_mutex);
rc = nfc_start_poll(dev, protocols);
if (!rc)
dev->genl_data.poll_req_pid = info->snd_pid;
mutex_unlock(&dev->genl_data.genl_data_mutex);
nfc_put_device(dev);
return rc;
}
static int nfc_genl_stop_poll(struct sk_buff *skb, struct genl_info *info)
{
struct nfc_dev *dev;
int rc;
u32 idx;
if (!info->attrs[NFC_ATTR_DEVICE_INDEX])
return -EINVAL;
idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]);
dev = nfc_get_device(idx);
if (!dev)
return -ENODEV;
mutex_lock(&dev->genl_data.genl_data_mutex);
if (dev->genl_data.poll_req_pid != info->snd_pid) {
rc = -EBUSY;
goto out;
}
rc = nfc_stop_poll(dev);
dev->genl_data.poll_req_pid = 0;
out:
mutex_unlock(&dev->genl_data.genl_data_mutex);
nfc_put_device(dev);
return rc;
}
static int nfc_genl_dep_link_up(struct sk_buff *skb, struct genl_info *info)
{
struct nfc_dev *dev;
int rc, tgt_idx;
u32 idx;
u8 comm;
pr_debug("DEP link up\n");
if (!info->attrs[NFC_ATTR_DEVICE_INDEX] ||
!info->attrs[NFC_ATTR_COMM_MODE])
return -EINVAL;
idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]);
if (!info->attrs[NFC_ATTR_TARGET_INDEX])
tgt_idx = NFC_TARGET_IDX_ANY;
else
tgt_idx = nla_get_u32(info->attrs[NFC_ATTR_TARGET_INDEX]);
comm = nla_get_u8(info->attrs[NFC_ATTR_COMM_MODE]);
if (comm != NFC_COMM_ACTIVE && comm != NFC_COMM_PASSIVE)
return -EINVAL;
dev = nfc_get_device(idx);
if (!dev)
return -ENODEV;
rc = nfc_dep_link_up(dev, tgt_idx, comm);
nfc_put_device(dev);
return rc;
}
static int nfc_genl_dep_link_down(struct sk_buff *skb, struct genl_info *info)
{
struct nfc_dev *dev;
int rc;
u32 idx;
if (!info->attrs[NFC_ATTR_DEVICE_INDEX])
return -EINVAL;
idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]);
dev = nfc_get_device(idx);
if (!dev)
return -ENODEV;
rc = nfc_dep_link_down(dev);
nfc_put_device(dev);
return rc;
}
static struct genl_ops nfc_genl_ops[] = {
{
.cmd = NFC_CMD_GET_DEVICE,
.doit = nfc_genl_get_device,
.dumpit = nfc_genl_dump_devices,
.done = nfc_genl_dump_devices_done,
.policy = nfc_genl_policy,
},
{
.cmd = NFC_CMD_DEV_UP,
.doit = nfc_genl_dev_up,
.policy = nfc_genl_policy,
},
{
.cmd = NFC_CMD_DEV_DOWN,
.doit = nfc_genl_dev_down,
.policy = nfc_genl_policy,
},
{
.cmd = NFC_CMD_START_POLL,
.doit = nfc_genl_start_poll,
.policy = nfc_genl_policy,
},
{
.cmd = NFC_CMD_STOP_POLL,
.doit = nfc_genl_stop_poll,
.policy = nfc_genl_policy,
},
{
.cmd = NFC_CMD_DEP_LINK_UP,
.doit = nfc_genl_dep_link_up,
.policy = nfc_genl_policy,
},
{
.cmd = NFC_CMD_DEP_LINK_DOWN,
.doit = nfc_genl_dep_link_down,
.policy = nfc_genl_policy,
},
{
.cmd = NFC_CMD_GET_TARGET,
.dumpit = nfc_genl_dump_targets,
.done = nfc_genl_dump_targets_done,
.policy = nfc_genl_policy,
},
};
static int nfc_genl_rcv_nl_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
struct netlink_notify *n = ptr;
struct class_dev_iter iter;
struct nfc_dev *dev;
if (event != NETLINK_URELEASE || n->protocol != NETLINK_GENERIC)
goto out;
pr_debug("NETLINK_URELEASE event from id %d\n", n->pid);
nfc_device_iter_init(&iter);
dev = nfc_device_iter_next(&iter);
while (dev) {
if (dev->genl_data.poll_req_pid == n->pid) {
nfc_stop_poll(dev);
dev->genl_data.poll_req_pid = 0;
}
dev = nfc_device_iter_next(&iter);
}
nfc_device_iter_exit(&iter);
out:
return NOTIFY_DONE;
}
void nfc_genl_data_init(struct nfc_genl_data *genl_data)
{
genl_data->poll_req_pid = 0;
mutex_init(&genl_data->genl_data_mutex);
}
void nfc_genl_data_exit(struct nfc_genl_data *genl_data)
{
mutex_destroy(&genl_data->genl_data_mutex);
}
static struct notifier_block nl_notifier = {
.notifier_call = nfc_genl_rcv_nl_event,
};
/**
* nfc_genl_init() - Initialize netlink interface
*
* This initialization function registers the nfc netlink family.
*/
int __init nfc_genl_init(void)
{
int rc;
rc = genl_register_family_with_ops(&nfc_genl_family, nfc_genl_ops,
ARRAY_SIZE(nfc_genl_ops));
if (rc)
return rc;
rc = genl_register_mc_group(&nfc_genl_family, &nfc_genl_event_mcgrp);
netlink_register_notifier(&nl_notifier);
return rc;
}
/**
* nfc_genl_exit() - Deinitialize netlink interface
*
* This exit function unregisters the nfc netlink family.
*/
void nfc_genl_exit(void)
{
netlink_unregister_notifier(&nl_notifier);
genl_unregister_family(&nfc_genl_family);
}
| gpl-2.0 |
savoca/kernel-msm | arch/arm/mach-s3c24xx/mach-at2440evb.c | 4777 | 5288 | /* linux/arch/arm/mach-s3c2440/mach-at2440evb.c
*
* Copyright (c) 2008 Ramax Lo <ramaxlo@gmail.com>
* Based on mach-anubis.c by Ben Dooks <ben@simtec.co.uk>
* and modifications by SBZ <sbz@spgui.org> and
* Weibing <http://weibing.blogbus.com>
*
* For product information, visit http://www.arm.com/
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/timer.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/serial_core.h>
#include <linux/dm9000.h>
#include <linux/platform_device.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <asm/mach/irq.h>
#include <mach/hardware.h>
#include <mach/fb.h>
#include <asm/irq.h>
#include <asm/mach-types.h>
#include <plat/regs-serial.h>
#include <mach/regs-gpio.h>
#include <mach/regs-mem.h>
#include <mach/regs-lcd.h>
#include <plat/nand.h>
#include <plat/iic.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/nand_ecc.h>
#include <linux/mtd/partitions.h>
#include <plat/clock.h>
#include <plat/devs.h>
#include <plat/cpu.h>
#include <plat/mci.h>
#include "common.h"
static struct map_desc at2440evb_iodesc[] __initdata = {
/* Nothing here */
};
#define UCON S3C2410_UCON_DEFAULT
#define ULCON (S3C2410_LCON_CS8 | S3C2410_LCON_PNONE)
#define UFCON (S3C2410_UFCON_RXTRIG8 | S3C2410_UFCON_FIFOMODE)
static struct s3c2410_uartcfg at2440evb_uartcfgs[] __initdata = {
[0] = {
.hwport = 0,
.flags = 0,
.ucon = UCON,
.ulcon = ULCON,
.ufcon = UFCON,
.clk_sel = S3C2410_UCON_CLKSEL1 | S3C2410_UCON_CLKSEL2,
},
[1] = {
.hwport = 1,
.flags = 0,
.ucon = UCON,
.ulcon = ULCON,
.ufcon = UFCON,
.clk_sel = S3C2410_UCON_CLKSEL1 | S3C2410_UCON_CLKSEL2,
},
};
/* NAND Flash on AT2440EVB board */
static struct mtd_partition __initdata at2440evb_default_nand_part[] = {
[0] = {
.name = "Boot Agent",
.size = SZ_256K,
.offset = 0,
},
[1] = {
.name = "Kernel",
.size = SZ_2M,
.offset = SZ_256K,
},
[2] = {
.name = "Root",
.offset = SZ_256K + SZ_2M,
.size = MTDPART_SIZ_FULL,
},
};
static struct s3c2410_nand_set __initdata at2440evb_nand_sets[] = {
[0] = {
.name = "nand",
.nr_chips = 1,
.nr_partitions = ARRAY_SIZE(at2440evb_default_nand_part),
.partitions = at2440evb_default_nand_part,
},
};
static struct s3c2410_platform_nand __initdata at2440evb_nand_info = {
.tacls = 25,
.twrph0 = 55,
.twrph1 = 40,
.nr_sets = ARRAY_SIZE(at2440evb_nand_sets),
.sets = at2440evb_nand_sets,
};
/* DM9000AEP 10/100 ethernet controller */
static struct resource at2440evb_dm9k_resource[] = {
[0] = {
.start = S3C2410_CS3,
.end = S3C2410_CS3 + 3,
.flags = IORESOURCE_MEM
},
[1] = {
.start = S3C2410_CS3 + 4,
.end = S3C2410_CS3 + 7,
.flags = IORESOURCE_MEM
},
[2] = {
.start = IRQ_EINT7,
.end = IRQ_EINT7,
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE,
}
};
static struct dm9000_plat_data at2440evb_dm9k_pdata = {
.flags = (DM9000_PLATF_16BITONLY | DM9000_PLATF_NO_EEPROM),
};
static struct platform_device at2440evb_device_eth = {
.name = "dm9000",
.id = -1,
.num_resources = ARRAY_SIZE(at2440evb_dm9k_resource),
.resource = at2440evb_dm9k_resource,
.dev = {
.platform_data = &at2440evb_dm9k_pdata,
},
};
static struct s3c24xx_mci_pdata at2440evb_mci_pdata __initdata = {
.gpio_detect = S3C2410_GPG(10),
};
/* 7" LCD panel */
static struct s3c2410fb_display at2440evb_lcd_cfg __initdata = {
.lcdcon5 = S3C2410_LCDCON5_FRM565 |
S3C2410_LCDCON5_INVVLINE |
S3C2410_LCDCON5_INVVFRAME |
S3C2410_LCDCON5_PWREN |
S3C2410_LCDCON5_HWSWP,
.type = S3C2410_LCDCON1_TFT,
.width = 800,
.height = 480,
.pixclock = 33333, /* HCLK 60 MHz, divisor 2 */
.xres = 800,
.yres = 480,
.bpp = 16,
.left_margin = 88,
.right_margin = 40,
.hsync_len = 128,
.upper_margin = 32,
.lower_margin = 11,
.vsync_len = 2,
};
static struct s3c2410fb_mach_info at2440evb_fb_info __initdata = {
.displays = &at2440evb_lcd_cfg,
.num_displays = 1,
.default_display = 0,
};
static struct platform_device *at2440evb_devices[] __initdata = {
&s3c_device_ohci,
&s3c_device_wdt,
&s3c_device_adc,
&s3c_device_i2c0,
&s3c_device_rtc,
&s3c_device_nand,
&s3c_device_sdi,
&s3c_device_lcd,
&at2440evb_device_eth,
};
static void __init at2440evb_map_io(void)
{
s3c24xx_init_io(at2440evb_iodesc, ARRAY_SIZE(at2440evb_iodesc));
s3c24xx_init_clocks(16934400);
s3c24xx_init_uarts(at2440evb_uartcfgs, ARRAY_SIZE(at2440evb_uartcfgs));
}
static void __init at2440evb_init(void)
{
s3c24xx_fb_set_platdata(&at2440evb_fb_info);
s3c24xx_mci_set_platdata(&at2440evb_mci_pdata);
s3c_nand_set_platdata(&at2440evb_nand_info);
s3c_i2c0_set_platdata(NULL);
platform_add_devices(at2440evb_devices, ARRAY_SIZE(at2440evb_devices));
}
MACHINE_START(AT2440EVB, "AT2440EVB")
.atag_offset = 0x100,
.map_io = at2440evb_map_io,
.init_machine = at2440evb_init,
.init_irq = s3c24xx_init_irq,
.timer = &s3c24xx_timer,
.restart = s3c244x_restart,
MACHINE_END
| gpl-2.0 |
M1cha/backup_android_kernel_xiaomi_aries | arch/arm/mach-s3c24xx/mach-at2440evb.c | 4777 | 5288 | /* linux/arch/arm/mach-s3c2440/mach-at2440evb.c
*
* Copyright (c) 2008 Ramax Lo <ramaxlo@gmail.com>
* Based on mach-anubis.c by Ben Dooks <ben@simtec.co.uk>
* and modifications by SBZ <sbz@spgui.org> and
* Weibing <http://weibing.blogbus.com>
*
* For product information, visit http://www.arm.com/
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/timer.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/serial_core.h>
#include <linux/dm9000.h>
#include <linux/platform_device.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <asm/mach/irq.h>
#include <mach/hardware.h>
#include <mach/fb.h>
#include <asm/irq.h>
#include <asm/mach-types.h>
#include <plat/regs-serial.h>
#include <mach/regs-gpio.h>
#include <mach/regs-mem.h>
#include <mach/regs-lcd.h>
#include <plat/nand.h>
#include <plat/iic.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/nand_ecc.h>
#include <linux/mtd/partitions.h>
#include <plat/clock.h>
#include <plat/devs.h>
#include <plat/cpu.h>
#include <plat/mci.h>
#include "common.h"
static struct map_desc at2440evb_iodesc[] __initdata = {
/* Nothing here */
};
#define UCON S3C2410_UCON_DEFAULT
#define ULCON (S3C2410_LCON_CS8 | S3C2410_LCON_PNONE)
#define UFCON (S3C2410_UFCON_RXTRIG8 | S3C2410_UFCON_FIFOMODE)
static struct s3c2410_uartcfg at2440evb_uartcfgs[] __initdata = {
[0] = {
.hwport = 0,
.flags = 0,
.ucon = UCON,
.ulcon = ULCON,
.ufcon = UFCON,
.clk_sel = S3C2410_UCON_CLKSEL1 | S3C2410_UCON_CLKSEL2,
},
[1] = {
.hwport = 1,
.flags = 0,
.ucon = UCON,
.ulcon = ULCON,
.ufcon = UFCON,
.clk_sel = S3C2410_UCON_CLKSEL1 | S3C2410_UCON_CLKSEL2,
},
};
/* NAND Flash on AT2440EVB board */
static struct mtd_partition __initdata at2440evb_default_nand_part[] = {
[0] = {
.name = "Boot Agent",
.size = SZ_256K,
.offset = 0,
},
[1] = {
.name = "Kernel",
.size = SZ_2M,
.offset = SZ_256K,
},
[2] = {
.name = "Root",
.offset = SZ_256K + SZ_2M,
.size = MTDPART_SIZ_FULL,
},
};
static struct s3c2410_nand_set __initdata at2440evb_nand_sets[] = {
[0] = {
.name = "nand",
.nr_chips = 1,
.nr_partitions = ARRAY_SIZE(at2440evb_default_nand_part),
.partitions = at2440evb_default_nand_part,
},
};
static struct s3c2410_platform_nand __initdata at2440evb_nand_info = {
.tacls = 25,
.twrph0 = 55,
.twrph1 = 40,
.nr_sets = ARRAY_SIZE(at2440evb_nand_sets),
.sets = at2440evb_nand_sets,
};
/* DM9000AEP 10/100 ethernet controller */
static struct resource at2440evb_dm9k_resource[] = {
[0] = {
.start = S3C2410_CS3,
.end = S3C2410_CS3 + 3,
.flags = IORESOURCE_MEM
},
[1] = {
.start = S3C2410_CS3 + 4,
.end = S3C2410_CS3 + 7,
.flags = IORESOURCE_MEM
},
[2] = {
.start = IRQ_EINT7,
.end = IRQ_EINT7,
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE,
}
};
static struct dm9000_plat_data at2440evb_dm9k_pdata = {
.flags = (DM9000_PLATF_16BITONLY | DM9000_PLATF_NO_EEPROM),
};
static struct platform_device at2440evb_device_eth = {
.name = "dm9000",
.id = -1,
.num_resources = ARRAY_SIZE(at2440evb_dm9k_resource),
.resource = at2440evb_dm9k_resource,
.dev = {
.platform_data = &at2440evb_dm9k_pdata,
},
};
static struct s3c24xx_mci_pdata at2440evb_mci_pdata __initdata = {
.gpio_detect = S3C2410_GPG(10),
};
/* 7" LCD panel */
static struct s3c2410fb_display at2440evb_lcd_cfg __initdata = {
.lcdcon5 = S3C2410_LCDCON5_FRM565 |
S3C2410_LCDCON5_INVVLINE |
S3C2410_LCDCON5_INVVFRAME |
S3C2410_LCDCON5_PWREN |
S3C2410_LCDCON5_HWSWP,
.type = S3C2410_LCDCON1_TFT,
.width = 800,
.height = 480,
.pixclock = 33333, /* HCLK 60 MHz, divisor 2 */
.xres = 800,
.yres = 480,
.bpp = 16,
.left_margin = 88,
.right_margin = 40,
.hsync_len = 128,
.upper_margin = 32,
.lower_margin = 11,
.vsync_len = 2,
};
static struct s3c2410fb_mach_info at2440evb_fb_info __initdata = {
.displays = &at2440evb_lcd_cfg,
.num_displays = 1,
.default_display = 0,
};
static struct platform_device *at2440evb_devices[] __initdata = {
&s3c_device_ohci,
&s3c_device_wdt,
&s3c_device_adc,
&s3c_device_i2c0,
&s3c_device_rtc,
&s3c_device_nand,
&s3c_device_sdi,
&s3c_device_lcd,
&at2440evb_device_eth,
};
static void __init at2440evb_map_io(void)
{
s3c24xx_init_io(at2440evb_iodesc, ARRAY_SIZE(at2440evb_iodesc));
s3c24xx_init_clocks(16934400);
s3c24xx_init_uarts(at2440evb_uartcfgs, ARRAY_SIZE(at2440evb_uartcfgs));
}
static void __init at2440evb_init(void)
{
s3c24xx_fb_set_platdata(&at2440evb_fb_info);
s3c24xx_mci_set_platdata(&at2440evb_mci_pdata);
s3c_nand_set_platdata(&at2440evb_nand_info);
s3c_i2c0_set_platdata(NULL);
platform_add_devices(at2440evb_devices, ARRAY_SIZE(at2440evb_devices));
}
MACHINE_START(AT2440EVB, "AT2440EVB")
.atag_offset = 0x100,
.map_io = at2440evb_map_io,
.init_machine = at2440evb_init,
.init_irq = s3c24xx_init_irq,
.timer = &s3c24xx_timer,
.restart = s3c244x_restart,
MACHINE_END
| gpl-2.0 |
davidmueller13/valexKernel-lt03wifi | drivers/media/dvb/frontends/hd29l2.c | 5033 | 17943 | /*
* HDIC HD29L2 DMB-TH demodulator driver
*
* Copyright (C) 2011 Metropolia University of Applied Sciences, Electria R&D
*
* Author: Antti Palosaari <crope@iki.fi>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include "hd29l2_priv.h"
int hd29l2_debug;
module_param_named(debug, hd29l2_debug, int, 0644);
MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off).");
/* write multiple registers */
static int hd29l2_wr_regs(struct hd29l2_priv *priv, u8 reg, u8 *val, int len)
{
int ret;
u8 buf[2 + len];
struct i2c_msg msg[1] = {
{
.addr = priv->cfg.i2c_addr,
.flags = 0,
.len = sizeof(buf),
.buf = buf,
}
};
buf[0] = 0x00;
buf[1] = reg;
memcpy(&buf[2], val, len);
ret = i2c_transfer(priv->i2c, msg, 1);
if (ret == 1) {
ret = 0;
} else {
warn("i2c wr failed=%d reg=%02x len=%d", ret, reg, len);
ret = -EREMOTEIO;
}
return ret;
}
/* read multiple registers */
static int hd29l2_rd_regs(struct hd29l2_priv *priv, u8 reg, u8 *val, int len)
{
int ret;
u8 buf[2] = { 0x00, reg };
struct i2c_msg msg[2] = {
{
.addr = priv->cfg.i2c_addr,
.flags = 0,
.len = 2,
.buf = buf,
}, {
.addr = priv->cfg.i2c_addr,
.flags = I2C_M_RD,
.len = len,
.buf = val,
}
};
ret = i2c_transfer(priv->i2c, msg, 2);
if (ret == 2) {
ret = 0;
} else {
warn("i2c rd failed=%d reg=%02x len=%d", ret, reg, len);
ret = -EREMOTEIO;
}
return ret;
}
/* write single register */
static int hd29l2_wr_reg(struct hd29l2_priv *priv, u8 reg, u8 val)
{
return hd29l2_wr_regs(priv, reg, &val, 1);
}
/* read single register */
static int hd29l2_rd_reg(struct hd29l2_priv *priv, u8 reg, u8 *val)
{
return hd29l2_rd_regs(priv, reg, val, 1);
}
/* write single register with mask */
static int hd29l2_wr_reg_mask(struct hd29l2_priv *priv, u8 reg, u8 val, u8 mask)
{
int ret;
u8 tmp;
/* no need for read if whole reg is written */
if (mask != 0xff) {
ret = hd29l2_rd_regs(priv, reg, &tmp, 1);
if (ret)
return ret;
val &= mask;
tmp &= ~mask;
val |= tmp;
}
return hd29l2_wr_regs(priv, reg, &val, 1);
}
/* read single register with mask */
int hd29l2_rd_reg_mask(struct hd29l2_priv *priv, u8 reg, u8 *val, u8 mask)
{
int ret, i;
u8 tmp;
ret = hd29l2_rd_regs(priv, reg, &tmp, 1);
if (ret)
return ret;
tmp &= mask;
/* find position of the first bit */
for (i = 0; i < 8; i++) {
if ((mask >> i) & 0x01)
break;
}
*val = tmp >> i;
return 0;
}
static int hd29l2_soft_reset(struct hd29l2_priv *priv)
{
int ret;
u8 tmp;
ret = hd29l2_rd_reg(priv, 0x26, &tmp);
if (ret)
goto err;
ret = hd29l2_wr_reg(priv, 0x26, 0x0d);
if (ret)
goto err;
usleep_range(10000, 20000);
ret = hd29l2_wr_reg(priv, 0x26, tmp);
if (ret)
goto err;
return 0;
err:
dbg("%s: failed=%d", __func__, ret);
return ret;
}
static int hd29l2_i2c_gate_ctrl(struct dvb_frontend *fe, int enable)
{
int ret, i;
struct hd29l2_priv *priv = fe->demodulator_priv;
u8 tmp;
dbg("%s: enable=%d", __func__, enable);
/* set tuner address for demod */
if (!priv->tuner_i2c_addr_programmed && enable) {
/* no need to set tuner address every time, once is enough */
ret = hd29l2_wr_reg(priv, 0x9d, priv->cfg.tuner_i2c_addr << 1);
if (ret)
goto err;
priv->tuner_i2c_addr_programmed = true;
}
/* open / close gate */
ret = hd29l2_wr_reg(priv, 0x9f, enable);
if (ret)
goto err;
/* wait demod ready */
for (i = 10; i; i--) {
ret = hd29l2_rd_reg(priv, 0x9e, &tmp);
if (ret)
goto err;
if (tmp == enable)
break;
usleep_range(5000, 10000);
}
dbg("%s: loop=%d", __func__, i);
return ret;
err:
dbg("%s: failed=%d", __func__, ret);
return ret;
}
static int hd29l2_read_status(struct dvb_frontend *fe, fe_status_t *status)
{
int ret;
struct hd29l2_priv *priv = fe->demodulator_priv;
u8 buf[2];
*status = 0;
ret = hd29l2_rd_reg(priv, 0x05, &buf[0]);
if (ret)
goto err;
if (buf[0] & 0x01) {
/* full lock */
*status |= FE_HAS_SIGNAL | FE_HAS_CARRIER | FE_HAS_VITERBI |
FE_HAS_SYNC | FE_HAS_LOCK;
} else {
ret = hd29l2_rd_reg(priv, 0x0d, &buf[1]);
if (ret)
goto err;
if ((buf[1] & 0xfe) == 0x78)
/* partial lock */
*status |= FE_HAS_SIGNAL | FE_HAS_CARRIER |
FE_HAS_VITERBI | FE_HAS_SYNC;
}
priv->fe_status = *status;
return 0;
err:
dbg("%s: failed=%d", __func__, ret);
return ret;
}
static int hd29l2_read_snr(struct dvb_frontend *fe, u16 *snr)
{
int ret;
struct hd29l2_priv *priv = fe->demodulator_priv;
u8 buf[2];
u16 tmp;
if (!(priv->fe_status & FE_HAS_LOCK)) {
*snr = 0;
ret = 0;
goto err;
}
ret = hd29l2_rd_regs(priv, 0x0b, buf, 2);
if (ret)
goto err;
tmp = (buf[0] << 8) | buf[1];
/* report SNR in dB * 10 */
#define LOG10_20736_24 72422627 /* log10(20736) << 24 */
if (tmp)
*snr = (LOG10_20736_24 - intlog10(tmp)) / ((1 << 24) / 100);
else
*snr = 0;
return 0;
err:
dbg("%s: failed=%d", __func__, ret);
return ret;
}
static int hd29l2_read_signal_strength(struct dvb_frontend *fe, u16 *strength)
{
int ret;
struct hd29l2_priv *priv = fe->demodulator_priv;
u8 buf[2];
u16 tmp;
*strength = 0;
ret = hd29l2_rd_regs(priv, 0xd5, buf, 2);
if (ret)
goto err;
tmp = buf[0] << 8 | buf[1];
tmp = ~tmp & 0x0fff;
/* scale value to 0x0000-0xffff from 0x0000-0x0fff */
*strength = tmp * 0xffff / 0x0fff;
return 0;
err:
dbg("%s: failed=%d", __func__, ret);
return ret;
}
static int hd29l2_read_ber(struct dvb_frontend *fe, u32 *ber)
{
int ret;
struct hd29l2_priv *priv = fe->demodulator_priv;
u8 buf[2];
if (!(priv->fe_status & FE_HAS_SYNC)) {
*ber = 0;
ret = 0;
goto err;
}
ret = hd29l2_rd_regs(priv, 0xd9, buf, 2);
if (ret) {
*ber = 0;
goto err;
}
/* LDPC BER */
*ber = ((buf[0] & 0x0f) << 8) | buf[1];
return 0;
err:
dbg("%s: failed=%d", __func__, ret);
return ret;
}
static int hd29l2_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
{
/* no way to read? */
*ucblocks = 0;
return 0;
}
static enum dvbfe_search hd29l2_search(struct dvb_frontend *fe)
{
int ret, i;
struct hd29l2_priv *priv = fe->demodulator_priv;
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
u8 tmp, buf[3];
u8 modulation, carrier, guard_interval, interleave, code_rate;
u64 num64;
u32 if_freq, if_ctl;
bool auto_mode;
dbg("%s: delivery_system=%d frequency=%d bandwidth_hz=%d " \
"modulation=%d inversion=%d fec_inner=%d guard_interval=%d",
__func__,
c->delivery_system, c->frequency, c->bandwidth_hz,
c->modulation, c->inversion, c->fec_inner, c->guard_interval);
/* as for now we detect always params automatically */
auto_mode = true;
/* program tuner */
if (fe->ops.tuner_ops.set_params)
fe->ops.tuner_ops.set_params(fe);
/* get and program IF */
if (fe->ops.tuner_ops.get_if_frequency)
fe->ops.tuner_ops.get_if_frequency(fe, &if_freq);
else
if_freq = 0;
if (if_freq) {
/* normal IF */
/* calc IF control value */
num64 = if_freq;
num64 *= 0x800000;
num64 = div_u64(num64, HD29L2_XTAL);
num64 -= 0x800000;
if_ctl = num64;
tmp = 0xfc; /* tuner type normal */
} else {
/* zero IF */
if_ctl = 0;
tmp = 0xfe; /* tuner type Zero-IF */
}
buf[0] = ((if_ctl >> 0) & 0xff);
buf[1] = ((if_ctl >> 8) & 0xff);
buf[2] = ((if_ctl >> 16) & 0xff);
/* program IF control */
ret = hd29l2_wr_regs(priv, 0x14, buf, 3);
if (ret)
goto err;
/* program tuner type */
ret = hd29l2_wr_reg(priv, 0xab, tmp);
if (ret)
goto err;
dbg("%s: if_freq=%d if_ctl=%x", __func__, if_freq, if_ctl);
if (auto_mode) {
/*
* use auto mode
*/
/* disable quick mode */
ret = hd29l2_wr_reg_mask(priv, 0xac, 0 << 7, 0x80);
if (ret)
goto err;
ret = hd29l2_wr_reg_mask(priv, 0x82, 1 << 1, 0x02);
if (ret)
goto err;
/* enable auto mode */
ret = hd29l2_wr_reg_mask(priv, 0x7d, 1 << 6, 0x40);
if (ret)
goto err;
ret = hd29l2_wr_reg_mask(priv, 0x81, 1 << 3, 0x08);
if (ret)
goto err;
/* soft reset */
ret = hd29l2_soft_reset(priv);
if (ret)
goto err;
/* detect modulation */
for (i = 30; i; i--) {
msleep(100);
ret = hd29l2_rd_reg(priv, 0x0d, &tmp);
if (ret)
goto err;
if ((((tmp & 0xf0) >= 0x10) &&
((tmp & 0x0f) == 0x08)) || (tmp >= 0x2c))
break;
}
dbg("%s: loop=%d", __func__, i);
if (i == 0)
/* detection failed */
return DVBFE_ALGO_SEARCH_FAILED;
/* read modulation */
ret = hd29l2_rd_reg_mask(priv, 0x7d, &modulation, 0x07);
if (ret)
goto err;
} else {
/*
* use manual mode
*/
modulation = HD29L2_QAM64;
carrier = HD29L2_CARRIER_MULTI;
guard_interval = HD29L2_PN945;
interleave = HD29L2_INTERLEAVER_420;
code_rate = HD29L2_CODE_RATE_08;
tmp = (code_rate << 3) | modulation;
ret = hd29l2_wr_reg_mask(priv, 0x7d, tmp, 0x5f);
if (ret)
goto err;
tmp = (carrier << 2) | guard_interval;
ret = hd29l2_wr_reg_mask(priv, 0x81, tmp, 0x0f);
if (ret)
goto err;
tmp = interleave;
ret = hd29l2_wr_reg_mask(priv, 0x82, tmp, 0x03);
if (ret)
goto err;
}
/* ensure modulation validy */
/* 0=QAM4_NR, 1=QAM4, 2=QAM16, 3=QAM32, 4=QAM64 */
if (modulation > (ARRAY_SIZE(reg_mod_vals_tab[0].val) - 1)) {
dbg("%s: modulation=%d not valid", __func__, modulation);
goto err;
}
/* program registers according to modulation */
for (i = 0; i < ARRAY_SIZE(reg_mod_vals_tab); i++) {
ret = hd29l2_wr_reg(priv, reg_mod_vals_tab[i].reg,
reg_mod_vals_tab[i].val[modulation]);
if (ret)
goto err;
}
/* read guard interval */
ret = hd29l2_rd_reg_mask(priv, 0x81, &guard_interval, 0x03);
if (ret)
goto err;
/* read carrier mode */
ret = hd29l2_rd_reg_mask(priv, 0x81, &carrier, 0x04);
if (ret)
goto err;
dbg("%s: modulation=%d guard_interval=%d carrier=%d",
__func__, modulation, guard_interval, carrier);
if ((carrier == HD29L2_CARRIER_MULTI) && (modulation == HD29L2_QAM64) &&
(guard_interval == HD29L2_PN945)) {
dbg("%s: C=3780 && QAM64 && PN945", __func__);
ret = hd29l2_wr_reg(priv, 0x42, 0x33);
if (ret)
goto err;
ret = hd29l2_wr_reg(priv, 0xdd, 0x01);
if (ret)
goto err;
}
usleep_range(10000, 20000);
/* soft reset */
ret = hd29l2_soft_reset(priv);
if (ret)
goto err;
/* wait demod lock */
for (i = 30; i; i--) {
msleep(100);
/* read lock bit */
ret = hd29l2_rd_reg_mask(priv, 0x05, &tmp, 0x01);
if (ret)
goto err;
if (tmp)
break;
}
dbg("%s: loop=%d", __func__, i);
if (i == 0)
return DVBFE_ALGO_SEARCH_AGAIN;
return DVBFE_ALGO_SEARCH_SUCCESS;
err:
dbg("%s: failed=%d", __func__, ret);
return DVBFE_ALGO_SEARCH_ERROR;
}
static int hd29l2_get_frontend_algo(struct dvb_frontend *fe)
{
return DVBFE_ALGO_CUSTOM;
}
static int hd29l2_get_frontend(struct dvb_frontend *fe)
{
int ret;
struct hd29l2_priv *priv = fe->demodulator_priv;
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
u8 buf[3];
u32 if_ctl;
char *str_constellation, *str_code_rate, *str_constellation_code_rate,
*str_guard_interval, *str_carrier, *str_guard_interval_carrier,
*str_interleave, *str_interleave_;
ret = hd29l2_rd_reg(priv, 0x7d, &buf[0]);
if (ret)
goto err;
ret = hd29l2_rd_regs(priv, 0x81, &buf[1], 2);
if (ret)
goto err;
/* constellation, 0x7d[2:0] */
switch ((buf[0] >> 0) & 0x07) {
case 0: /* QAM4NR */
str_constellation = "QAM4NR";
c->modulation = QAM_AUTO; /* FIXME */
break;
case 1: /* QAM4 */
str_constellation = "QAM4";
c->modulation = QPSK; /* FIXME */
break;
case 2:
str_constellation = "QAM16";
c->modulation = QAM_16;
break;
case 3:
str_constellation = "QAM32";
c->modulation = QAM_32;
break;
case 4:
str_constellation = "QAM64";
c->modulation = QAM_64;
break;
default:
str_constellation = "?";
}
/* LDPC code rate, 0x7d[4:3] */
switch ((buf[0] >> 3) & 0x03) {
case 0: /* 0.4 */
str_code_rate = "0.4";
c->fec_inner = FEC_AUTO; /* FIXME */
break;
case 1: /* 0.6 */
str_code_rate = "0.6";
c->fec_inner = FEC_3_5;
break;
case 2: /* 0.8 */
str_code_rate = "0.8";
c->fec_inner = FEC_4_5;
break;
default:
str_code_rate = "?";
}
/* constellation & code rate set, 0x7d[6] */
switch ((buf[0] >> 6) & 0x01) {
case 0:
str_constellation_code_rate = "manual";
break;
case 1:
str_constellation_code_rate = "auto";
break;
default:
str_constellation_code_rate = "?";
}
/* frame header, 0x81[1:0] */
switch ((buf[1] >> 0) & 0x03) {
case 0: /* PN945 */
str_guard_interval = "PN945";
c->guard_interval = GUARD_INTERVAL_AUTO; /* FIXME */
break;
case 1: /* PN595 */
str_guard_interval = "PN595";
c->guard_interval = GUARD_INTERVAL_AUTO; /* FIXME */
break;
case 2: /* PN420 */
str_guard_interval = "PN420";
c->guard_interval = GUARD_INTERVAL_AUTO; /* FIXME */
break;
default:
str_guard_interval = "?";
}
/* carrier, 0x81[2] */
switch ((buf[1] >> 2) & 0x01) {
case 0:
str_carrier = "C=1";
break;
case 1:
str_carrier = "C=3780";
break;
default:
str_carrier = "?";
}
/* frame header & carrier set, 0x81[3] */
switch ((buf[1] >> 3) & 0x01) {
case 0:
str_guard_interval_carrier = "manual";
break;
case 1:
str_guard_interval_carrier = "auto";
break;
default:
str_guard_interval_carrier = "?";
}
/* interleave, 0x82[0] */
switch ((buf[2] >> 0) & 0x01) {
case 0:
str_interleave = "M=720";
break;
case 1:
str_interleave = "M=240";
break;
default:
str_interleave = "?";
}
/* interleave set, 0x82[1] */
switch ((buf[2] >> 1) & 0x01) {
case 0:
str_interleave_ = "manual";
break;
case 1:
str_interleave_ = "auto";
break;
default:
str_interleave_ = "?";
}
/*
* We can read out current detected NCO and use that value next
* time instead of calculating new value from targed IF.
* I think it will not effect receiver sensitivity but gaining lock
* after tune could be easier...
*/
ret = hd29l2_rd_regs(priv, 0xb1, &buf[0], 3);
if (ret)
goto err;
if_ctl = (buf[0] << 16) | ((buf[1] - 7) << 8) | buf[2];
dbg("%s: %s %s %s | %s %s %s | %s %s | NCO=%06x", __func__,
str_constellation, str_code_rate, str_constellation_code_rate,
str_guard_interval, str_carrier, str_guard_interval_carrier,
str_interleave, str_interleave_, if_ctl);
return 0;
err:
dbg("%s: failed=%d", __func__, ret);
return ret;
}
static int hd29l2_init(struct dvb_frontend *fe)
{
int ret, i;
struct hd29l2_priv *priv = fe->demodulator_priv;
u8 tmp;
static const struct reg_val tab[] = {
{ 0x3a, 0x06 },
{ 0x3b, 0x03 },
{ 0x3c, 0x04 },
{ 0xaf, 0x06 },
{ 0xb0, 0x1b },
{ 0x80, 0x64 },
{ 0x10, 0x38 },
};
dbg("%s:", __func__);
/* reset demod */
/* it is recommended to HW reset chip using RST_N pin */
if (fe->callback) {
ret = fe->callback(fe, DVB_FRONTEND_COMPONENT_DEMOD, 0, 0);
if (ret)
goto err;
/* reprogramming needed because HW reset clears registers */
priv->tuner_i2c_addr_programmed = false;
}
/* init */
for (i = 0; i < ARRAY_SIZE(tab); i++) {
ret = hd29l2_wr_reg(priv, tab[i].reg, tab[i].val);
if (ret)
goto err;
}
/* TS params */
ret = hd29l2_rd_reg(priv, 0x36, &tmp);
if (ret)
goto err;
tmp &= 0x1b;
tmp |= priv->cfg.ts_mode;
ret = hd29l2_wr_reg(priv, 0x36, tmp);
if (ret)
goto err;
ret = hd29l2_rd_reg(priv, 0x31, &tmp);
tmp &= 0xef;
if (!(priv->cfg.ts_mode >> 7))
/* set b4 for serial TS */
tmp |= 0x10;
ret = hd29l2_wr_reg(priv, 0x31, tmp);
if (ret)
goto err;
return ret;
err:
dbg("%s: failed=%d", __func__, ret);
return ret;
}
static void hd29l2_release(struct dvb_frontend *fe)
{
struct hd29l2_priv *priv = fe->demodulator_priv;
kfree(priv);
}
static struct dvb_frontend_ops hd29l2_ops;
struct dvb_frontend *hd29l2_attach(const struct hd29l2_config *config,
struct i2c_adapter *i2c)
{
int ret;
struct hd29l2_priv *priv = NULL;
u8 tmp;
/* allocate memory for the internal state */
priv = kzalloc(sizeof(struct hd29l2_priv), GFP_KERNEL);
if (priv == NULL)
goto err;
/* setup the state */
priv->i2c = i2c;
memcpy(&priv->cfg, config, sizeof(struct hd29l2_config));
/* check if the demod is there */
ret = hd29l2_rd_reg(priv, 0x00, &tmp);
if (ret)
goto err;
/* create dvb_frontend */
memcpy(&priv->fe.ops, &hd29l2_ops, sizeof(struct dvb_frontend_ops));
priv->fe.demodulator_priv = priv;
return &priv->fe;
err:
kfree(priv);
return NULL;
}
EXPORT_SYMBOL(hd29l2_attach);
static struct dvb_frontend_ops hd29l2_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "HDIC HD29L2 DMB-TH",
.frequency_min = 474000000,
.frequency_max = 858000000,
.frequency_stepsize = 10000,
.caps = FE_CAN_FEC_AUTO |
FE_CAN_QPSK |
FE_CAN_QAM_16 |
FE_CAN_QAM_32 |
FE_CAN_QAM_64 |
FE_CAN_QAM_AUTO |
FE_CAN_TRANSMISSION_MODE_AUTO |
FE_CAN_BANDWIDTH_AUTO |
FE_CAN_GUARD_INTERVAL_AUTO |
FE_CAN_HIERARCHY_AUTO |
FE_CAN_RECOVER
},
.release = hd29l2_release,
.init = hd29l2_init,
.get_frontend_algo = hd29l2_get_frontend_algo,
.search = hd29l2_search,
.get_frontend = hd29l2_get_frontend,
.read_status = hd29l2_read_status,
.read_snr = hd29l2_read_snr,
.read_signal_strength = hd29l2_read_signal_strength,
.read_ber = hd29l2_read_ber,
.read_ucblocks = hd29l2_read_ucblocks,
.i2c_gate_ctrl = hd29l2_i2c_gate_ctrl,
};
MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
MODULE_DESCRIPTION("HDIC HD29L2 DMB-TH demodulator driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
MinimalOS-AOSP/kernel_huawei_angler | arch/h8300/kernel/irq.c | 6825 | 3637 | /*
* linux/arch/h8300/kernel/irq.c
*
* Copyright 2007 Yoshinori Sato <ysato@users.sourceforge.jp>
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/kernel_stat.h>
#include <linux/seq_file.h>
#include <linux/init.h>
#include <linux/random.h>
#include <linux/bootmem.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <asm/traps.h>
#include <asm/io.h>
#include <asm/setup.h>
#include <asm/errno.h>
/*#define DEBUG*/
extern unsigned long *interrupt_redirect_table;
extern const int h8300_saved_vectors[];
extern const h8300_vector h8300_trap_table[];
int h8300_enable_irq_pin(unsigned int irq);
void h8300_disable_irq_pin(unsigned int irq);
#define CPU_VECTOR ((unsigned long *)0x000000)
#define ADDR_MASK (0xffffff)
static inline int is_ext_irq(unsigned int irq)
{
return (irq >= EXT_IRQ0 && irq <= (EXT_IRQ0 + EXT_IRQS));
}
static void h8300_enable_irq(struct irq_data *data)
{
if (is_ext_irq(data->irq))
IER_REGS |= 1 << (data->irq - EXT_IRQ0);
}
static void h8300_disable_irq(struct irq_data *data)
{
if (is_ext_irq(data->irq))
IER_REGS &= ~(1 << (data->irq - EXT_IRQ0));
}
static unsigned int h8300_startup_irq(struct irq_data *data)
{
if (is_ext_irq(data->irq))
return h8300_enable_irq_pin(data->irq);
else
return 0;
}
static void h8300_shutdown_irq(struct irq_data *data)
{
if (is_ext_irq(data->irq))
h8300_disable_irq_pin(data->irq);
}
/*
* h8300 interrupt controller implementation
*/
struct irq_chip h8300irq_chip = {
.name = "H8300-INTC",
.irq_startup = h8300_startup_irq,
.irq_shutdown = h8300_shutdown_irq,
.irq_enable = h8300_enable_irq,
.irq_disable = h8300_disable_irq,
};
#if defined(CONFIG_RAMKERNEL)
static unsigned long __init *get_vector_address(void)
{
unsigned long *rom_vector = CPU_VECTOR;
unsigned long base,tmp;
int vec_no;
base = rom_vector[EXT_IRQ0] & ADDR_MASK;
/* check romvector format */
for (vec_no = EXT_IRQ1; vec_no <= EXT_IRQ0+EXT_IRQS; vec_no++) {
if ((base+(vec_no - EXT_IRQ0)*4) != (rom_vector[vec_no] & ADDR_MASK))
return NULL;
}
/* ramvector base address */
base -= EXT_IRQ0*4;
/* writerble check */
tmp = ~(*(volatile unsigned long *)base);
(*(volatile unsigned long *)base) = tmp;
if ((*(volatile unsigned long *)base) != tmp)
return NULL;
return (unsigned long *)base;
}
static void __init setup_vector(void)
{
int i;
unsigned long *ramvec,*ramvec_p;
const h8300_vector *trap_entry;
const int *saved_vector;
ramvec = get_vector_address();
if (ramvec == NULL)
panic("interrupt vector serup failed.");
else
printk(KERN_INFO "virtual vector at 0x%08lx\n",(unsigned long)ramvec);
/* create redirect table */
ramvec_p = ramvec;
trap_entry = h8300_trap_table;
saved_vector = h8300_saved_vectors;
for ( i = 0; i < NR_IRQS; i++) {
if (i == *saved_vector) {
ramvec_p++;
saved_vector++;
} else {
if ( i < NR_TRAPS ) {
if (*trap_entry)
*ramvec_p = VECTOR(*trap_entry);
ramvec_p++;
trap_entry++;
} else
*ramvec_p++ = REDIRECT(interrupt_entry);
}
}
interrupt_redirect_table = ramvec;
#ifdef DEBUG
ramvec_p = ramvec;
for (i = 0; i < NR_IRQS; i++) {
if ((i % 8) == 0)
printk(KERN_DEBUG "\n%p: ",ramvec_p);
printk(KERN_DEBUG "%p ",*ramvec_p);
ramvec_p++;
}
printk(KERN_DEBUG "\n");
#endif
}
#else
#define setup_vector() do { } while(0)
#endif
void __init init_IRQ(void)
{
int c;
setup_vector();
for (c = 0; c < NR_IRQS; c++)
irq_set_chip_and_handler(c, &h8300irq_chip, handle_simple_irq);
}
asmlinkage void do_IRQ(int irq)
{
irq_enter();
generic_handle_irq(irq);
irq_exit();
}
| gpl-2.0 |
Docker-J/Sail_GEE | arch/h8300/kernel/irq.c | 6825 | 3637 | /*
* linux/arch/h8300/kernel/irq.c
*
* Copyright 2007 Yoshinori Sato <ysato@users.sourceforge.jp>
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/kernel_stat.h>
#include <linux/seq_file.h>
#include <linux/init.h>
#include <linux/random.h>
#include <linux/bootmem.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <asm/traps.h>
#include <asm/io.h>
#include <asm/setup.h>
#include <asm/errno.h>
/*#define DEBUG*/
extern unsigned long *interrupt_redirect_table;
extern const int h8300_saved_vectors[];
extern const h8300_vector h8300_trap_table[];
int h8300_enable_irq_pin(unsigned int irq);
void h8300_disable_irq_pin(unsigned int irq);
#define CPU_VECTOR ((unsigned long *)0x000000)
#define ADDR_MASK (0xffffff)
static inline int is_ext_irq(unsigned int irq)
{
return (irq >= EXT_IRQ0 && irq <= (EXT_IRQ0 + EXT_IRQS));
}
static void h8300_enable_irq(struct irq_data *data)
{
if (is_ext_irq(data->irq))
IER_REGS |= 1 << (data->irq - EXT_IRQ0);
}
static void h8300_disable_irq(struct irq_data *data)
{
if (is_ext_irq(data->irq))
IER_REGS &= ~(1 << (data->irq - EXT_IRQ0));
}
static unsigned int h8300_startup_irq(struct irq_data *data)
{
if (is_ext_irq(data->irq))
return h8300_enable_irq_pin(data->irq);
else
return 0;
}
static void h8300_shutdown_irq(struct irq_data *data)
{
if (is_ext_irq(data->irq))
h8300_disable_irq_pin(data->irq);
}
/*
* h8300 interrupt controller implementation
*/
struct irq_chip h8300irq_chip = {
.name = "H8300-INTC",
.irq_startup = h8300_startup_irq,
.irq_shutdown = h8300_shutdown_irq,
.irq_enable = h8300_enable_irq,
.irq_disable = h8300_disable_irq,
};
#if defined(CONFIG_RAMKERNEL)
static unsigned long __init *get_vector_address(void)
{
unsigned long *rom_vector = CPU_VECTOR;
unsigned long base,tmp;
int vec_no;
base = rom_vector[EXT_IRQ0] & ADDR_MASK;
/* check romvector format */
for (vec_no = EXT_IRQ1; vec_no <= EXT_IRQ0+EXT_IRQS; vec_no++) {
if ((base+(vec_no - EXT_IRQ0)*4) != (rom_vector[vec_no] & ADDR_MASK))
return NULL;
}
/* ramvector base address */
base -= EXT_IRQ0*4;
/* writerble check */
tmp = ~(*(volatile unsigned long *)base);
(*(volatile unsigned long *)base) = tmp;
if ((*(volatile unsigned long *)base) != tmp)
return NULL;
return (unsigned long *)base;
}
static void __init setup_vector(void)
{
int i;
unsigned long *ramvec,*ramvec_p;
const h8300_vector *trap_entry;
const int *saved_vector;
ramvec = get_vector_address();
if (ramvec == NULL)
panic("interrupt vector serup failed.");
else
printk(KERN_INFO "virtual vector at 0x%08lx\n",(unsigned long)ramvec);
/* create redirect table */
ramvec_p = ramvec;
trap_entry = h8300_trap_table;
saved_vector = h8300_saved_vectors;
for ( i = 0; i < NR_IRQS; i++) {
if (i == *saved_vector) {
ramvec_p++;
saved_vector++;
} else {
if ( i < NR_TRAPS ) {
if (*trap_entry)
*ramvec_p = VECTOR(*trap_entry);
ramvec_p++;
trap_entry++;
} else
*ramvec_p++ = REDIRECT(interrupt_entry);
}
}
interrupt_redirect_table = ramvec;
#ifdef DEBUG
ramvec_p = ramvec;
for (i = 0; i < NR_IRQS; i++) {
if ((i % 8) == 0)
printk(KERN_DEBUG "\n%p: ",ramvec_p);
printk(KERN_DEBUG "%p ",*ramvec_p);
ramvec_p++;
}
printk(KERN_DEBUG "\n");
#endif
}
#else
#define setup_vector() do { } while(0)
#endif
void __init init_IRQ(void)
{
int c;
setup_vector();
for (c = 0; c < NR_IRQS; c++)
irq_set_chip_and_handler(c, &h8300irq_chip, handle_simple_irq);
}
asmlinkage void do_IRQ(int irq)
{
irq_enter();
generic_handle_irq(irq);
irq_exit();
}
| gpl-2.0 |
faux123/HTC_Amaze_Kernel | net/netfilter/nf_tproxy_core.c | 10409 | 1415 | /*
* Transparent proxy support for Linux/iptables
*
* Copyright (c) 2006-2007 BalaBit IT Ltd.
* Author: Balazs Scheidler, Krisztian Kovacs
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <linux/module.h>
#include <linux/net.h>
#include <linux/if.h>
#include <linux/netdevice.h>
#include <net/udp.h>
#include <net/netfilter/nf_tproxy_core.h>
static void
nf_tproxy_destructor(struct sk_buff *skb)
{
struct sock *sk = skb->sk;
skb->sk = NULL;
skb->destructor = NULL;
if (sk)
sock_put(sk);
}
/* consumes sk */
void
nf_tproxy_assign_sock(struct sk_buff *skb, struct sock *sk)
{
/* assigning tw sockets complicates things; most
* skb->sk->X checks would have to test sk->sk_state first */
if (sk->sk_state == TCP_TIME_WAIT) {
inet_twsk_put(inet_twsk(sk));
return;
}
skb_orphan(skb);
skb->sk = sk;
skb->destructor = nf_tproxy_destructor;
}
EXPORT_SYMBOL_GPL(nf_tproxy_assign_sock);
static int __init nf_tproxy_init(void)
{
pr_info("NF_TPROXY: Transparent proxy support initialized, version 4.1.0\n");
pr_info("NF_TPROXY: Copyright (c) 2006-2007 BalaBit IT Ltd.\n");
return 0;
}
module_init(nf_tproxy_init);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Krisztian Kovacs");
MODULE_DESCRIPTION("Transparent proxy support core routines");
| gpl-2.0 |
Desterly/android_kernel_motorola_msm8994 | arch/sparc/prom/memory.c | 12201 | 1937 | /* memory.c: Prom routine for acquiring various bits of information
* about RAM on the machine, both virtual and physical.
*
* Copyright (C) 1995, 2008 David S. Miller (davem@davemloft.net)
* Copyright (C) 1997 Michael A. Griffith (grif@acm.org)
*/
#include <linux/kernel.h>
#include <linux/sort.h>
#include <linux/init.h>
#include <asm/openprom.h>
#include <asm/oplib.h>
#include <asm/page.h>
static int __init prom_meminit_v0(void)
{
struct linux_mlist_v0 *p;
int index;
index = 0;
for (p = *(romvec->pv_v0mem.v0_available); p; p = p->theres_more) {
sp_banks[index].base_addr = (unsigned long) p->start_adr;
sp_banks[index].num_bytes = p->num_bytes;
index++;
}
return index;
}
static int __init prom_meminit_v2(void)
{
struct linux_prom_registers reg[64];
phandle node;
int size, num_ents, i;
node = prom_searchsiblings(prom_getchild(prom_root_node), "memory");
size = prom_getproperty(node, "available", (char *) reg, sizeof(reg));
num_ents = size / sizeof(struct linux_prom_registers);
for (i = 0; i < num_ents; i++) {
sp_banks[i].base_addr = reg[i].phys_addr;
sp_banks[i].num_bytes = reg[i].reg_size;
}
return num_ents;
}
static int sp_banks_cmp(const void *a, const void *b)
{
const struct sparc_phys_banks *x = a, *y = b;
if (x->base_addr > y->base_addr)
return 1;
if (x->base_addr < y->base_addr)
return -1;
return 0;
}
/* Initialize the memory lists based upon the prom version. */
void __init prom_meminit(void)
{
int i, num_ents = 0;
switch (prom_vers) {
case PROM_V0:
num_ents = prom_meminit_v0();
break;
case PROM_V2:
case PROM_V3:
num_ents = prom_meminit_v2();
break;
default:
break;
}
sort(sp_banks, num_ents, sizeof(struct sparc_phys_banks),
sp_banks_cmp, NULL);
/* Sentinel. */
sp_banks[num_ents].base_addr = 0xdeadbeef;
sp_banks[num_ents].num_bytes = 0;
for (i = 0; i < num_ents; i++)
sp_banks[i].num_bytes &= PAGE_MASK;
}
| gpl-2.0 |
netico-solutions/linux-am335x | fs/adfs/map.c | 12969 | 7075 | /*
* linux/fs/adfs/map.c
*
* Copyright (C) 1997-2002 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/buffer_head.h>
#include <asm/unaligned.h>
#include "adfs.h"
/*
* The ADFS map is basically a set of sectors. Each sector is called a
* zone which contains a bitstream made up of variable sized fragments.
* Each bit refers to a set of bytes in the filesystem, defined by
* log2bpmb. This may be larger or smaller than the sector size, but
* the overall size it describes will always be a round number of
* sectors. A fragment id is always idlen bits long.
*
* < idlen > < n > <1>
* +---------+-------//---------+---+
* | frag id | 0000....000000 | 1 |
* +---------+-------//---------+---+
*
* The physical disk space used by a fragment is taken from the start of
* the fragment id up to and including the '1' bit - ie, idlen + n + 1
* bits.
*
* A fragment id can be repeated multiple times in the whole map for
* large or fragmented files. The first map zone a fragment starts in
* is given by fragment id / ids_per_zone - this allows objects to start
* from any zone on the disk.
*
* Free space is described by a linked list of fragments. Each free
* fragment describes free space in the same way as the other fragments,
* however, the frag id specifies an offset (in map bits) from the end
* of this fragment to the start of the next free fragment.
*
* Objects stored on the disk are allocated object ids (we use these as
* our inode numbers.) Object ids contain a fragment id and an optional
* offset. This allows a directory fragment to contain small files
* associated with that directory.
*/
/*
* For the future...
*/
static DEFINE_RWLOCK(adfs_map_lock);
/*
* This is fun. We need to load up to 19 bits from the map at an
* arbitrary bit alignment. (We're limited to 19 bits by F+ version 2).
*/
#define GET_FRAG_ID(_map,_start,_idmask) \
({ \
unsigned char *_m = _map + (_start >> 3); \
u32 _frag = get_unaligned_le32(_m); \
_frag >>= (_start & 7); \
_frag & _idmask; \
})
/*
* return the map bit offset of the fragment frag_id in the zone dm.
* Note that the loop is optimised for best asm code - look at the
* output of:
* gcc -D__KERNEL__ -O2 -I../../include -o - -S map.c
*/
static int
lookup_zone(const struct adfs_discmap *dm, const unsigned int idlen,
const unsigned int frag_id, unsigned int *offset)
{
const unsigned int mapsize = dm->dm_endbit;
const u32 idmask = (1 << idlen) - 1;
unsigned char *map = dm->dm_bh->b_data + 4;
unsigned int start = dm->dm_startbit;
unsigned int mapptr;
u32 frag;
do {
frag = GET_FRAG_ID(map, start, idmask);
mapptr = start + idlen;
/*
* find end of fragment
*/
{
__le32 *_map = (__le32 *)map;
u32 v = le32_to_cpu(_map[mapptr >> 5]) >> (mapptr & 31);
while (v == 0) {
mapptr = (mapptr & ~31) + 32;
if (mapptr >= mapsize)
goto error;
v = le32_to_cpu(_map[mapptr >> 5]);
}
mapptr += 1 + ffz(~v);
}
if (frag == frag_id)
goto found;
again:
start = mapptr;
} while (mapptr < mapsize);
return -1;
error:
printk(KERN_ERR "adfs: oversized fragment 0x%x at 0x%x-0x%x\n",
frag, start, mapptr);
return -1;
found:
{
int length = mapptr - start;
if (*offset >= length) {
*offset -= length;
goto again;
}
}
return start + *offset;
}
/*
* Scan the free space map, for this zone, calculating the total
* number of map bits in each free space fragment.
*
* Note: idmask is limited to 15 bits [3.2]
*/
static unsigned int
scan_free_map(struct adfs_sb_info *asb, struct adfs_discmap *dm)
{
const unsigned int mapsize = dm->dm_endbit + 32;
const unsigned int idlen = asb->s_idlen;
const unsigned int frag_idlen = idlen <= 15 ? idlen : 15;
const u32 idmask = (1 << frag_idlen) - 1;
unsigned char *map = dm->dm_bh->b_data;
unsigned int start = 8, mapptr;
u32 frag;
unsigned long total = 0;
/*
* get fragment id
*/
frag = GET_FRAG_ID(map, start, idmask);
/*
* If the freelink is null, then no free fragments
* exist in this zone.
*/
if (frag == 0)
return 0;
do {
start += frag;
/*
* get fragment id
*/
frag = GET_FRAG_ID(map, start, idmask);
mapptr = start + idlen;
/*
* find end of fragment
*/
{
__le32 *_map = (__le32 *)map;
u32 v = le32_to_cpu(_map[mapptr >> 5]) >> (mapptr & 31);
while (v == 0) {
mapptr = (mapptr & ~31) + 32;
if (mapptr >= mapsize)
goto error;
v = le32_to_cpu(_map[mapptr >> 5]);
}
mapptr += 1 + ffz(~v);
}
total += mapptr - start;
} while (frag >= idlen + 1);
if (frag != 0)
printk(KERN_ERR "adfs: undersized free fragment\n");
return total;
error:
printk(KERN_ERR "adfs: oversized free fragment\n");
return 0;
}
static int
scan_map(struct adfs_sb_info *asb, unsigned int zone,
const unsigned int frag_id, unsigned int mapoff)
{
const unsigned int idlen = asb->s_idlen;
struct adfs_discmap *dm, *dm_end;
int result;
dm = asb->s_map + zone;
zone = asb->s_map_size;
dm_end = asb->s_map + zone;
do {
result = lookup_zone(dm, idlen, frag_id, &mapoff);
if (result != -1)
goto found;
dm ++;
if (dm == dm_end)
dm = asb->s_map;
} while (--zone > 0);
return -1;
found:
result -= dm->dm_startbit;
result += dm->dm_startblk;
return result;
}
/*
* calculate the amount of free blocks in the map.
*
* n=1
* total_free = E(free_in_zone_n)
* nzones
*/
unsigned int
adfs_map_free(struct super_block *sb)
{
struct adfs_sb_info *asb = ADFS_SB(sb);
struct adfs_discmap *dm;
unsigned int total = 0;
unsigned int zone;
dm = asb->s_map;
zone = asb->s_map_size;
do {
total += scan_free_map(asb, dm++);
} while (--zone > 0);
return signed_asl(total, asb->s_map2blk);
}
int
adfs_map_lookup(struct super_block *sb, unsigned int frag_id,
unsigned int offset)
{
struct adfs_sb_info *asb = ADFS_SB(sb);
unsigned int zone, mapoff;
int result;
/*
* map & root fragment is special - it starts in the center of the
* disk. The other fragments start at zone (frag / ids_per_zone)
*/
if (frag_id == ADFS_ROOT_FRAG)
zone = asb->s_map_size >> 1;
else
zone = frag_id / asb->s_ids_per_zone;
if (zone >= asb->s_map_size)
goto bad_fragment;
/* Convert sector offset to map offset */
mapoff = signed_asl(offset, -asb->s_map2blk);
read_lock(&adfs_map_lock);
result = scan_map(asb, zone, frag_id, mapoff);
read_unlock(&adfs_map_lock);
if (result > 0) {
unsigned int secoff;
/* Calculate sector offset into map block */
secoff = offset - signed_asl(mapoff, asb->s_map2blk);
return secoff + signed_asl(result, asb->s_map2blk);
}
adfs_error(sb, "fragment 0x%04x at offset %d not found in map",
frag_id, offset);
return 0;
bad_fragment:
adfs_error(sb, "invalid fragment 0x%04x (zone = %d, max = %d)",
frag_id, zone, asb->s_map_size);
return 0;
}
| gpl-2.0 |
monishk10/moshi_cancro | drivers/video/via/via_utility.c | 12969 | 6019 | /*
* Copyright 1998-2008 VIA Technologies, Inc. All Rights Reserved.
* Copyright 2001-2008 S3 Graphics, Inc. All Rights Reserved.
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License as published by the Free Software Foundation;
* either version 2, or (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTIES OR REPRESENTATIONS; without even
* the implied warranty of MERCHANTABILITY or FITNESS FOR
* A PARTICULAR PURPOSE.See the GNU General Public License
* for more details.
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc.,
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#include <linux/via-core.h>
#include "global.h"
void viafb_get_device_support_state(u32 *support_state)
{
*support_state = CRT_Device;
if (viaparinfo->chip_info->tmds_chip_info.tmds_chip_name == VT1632_TMDS)
*support_state |= DVI_Device;
if (viaparinfo->chip_info->lvds_chip_info.lvds_chip_name == VT1631_LVDS)
*support_state |= LCD_Device;
}
void viafb_get_device_connect_state(u32 *connect_state)
{
bool mobile = false;
*connect_state = CRT_Device;
if (viafb_dvi_sense())
*connect_state |= DVI_Device;
viafb_lcd_get_mobile_state(&mobile);
if (mobile)
*connect_state |= LCD_Device;
}
bool viafb_lcd_get_support_expand_state(u32 xres, u32 yres)
{
unsigned int support_state = 0;
switch (viafb_lcd_panel_id) {
case LCD_PANEL_ID0_640X480:
if ((xres < 640) && (yres < 480))
support_state = true;
break;
case LCD_PANEL_ID1_800X600:
if ((xres < 800) && (yres < 600))
support_state = true;
break;
case LCD_PANEL_ID2_1024X768:
if ((xres < 1024) && (yres < 768))
support_state = true;
break;
case LCD_PANEL_ID3_1280X768:
if ((xres < 1280) && (yres < 768))
support_state = true;
break;
case LCD_PANEL_ID4_1280X1024:
if ((xres < 1280) && (yres < 1024))
support_state = true;
break;
case LCD_PANEL_ID5_1400X1050:
if ((xres < 1400) && (yres < 1050))
support_state = true;
break;
case LCD_PANEL_ID6_1600X1200:
if ((xres < 1600) && (yres < 1200))
support_state = true;
break;
case LCD_PANEL_ID7_1366X768:
if ((xres < 1366) && (yres < 768))
support_state = true;
break;
case LCD_PANEL_ID8_1024X600:
if ((xres < 1024) && (yres < 600))
support_state = true;
break;
case LCD_PANEL_ID9_1280X800:
if ((xres < 1280) && (yres < 800))
support_state = true;
break;
case LCD_PANEL_IDA_800X480:
if ((xres < 800) && (yres < 480))
support_state = true;
break;
case LCD_PANEL_IDB_1360X768:
if ((xres < 1360) && (yres < 768))
support_state = true;
break;
case LCD_PANEL_IDC_480X640:
if ((xres < 480) && (yres < 640))
support_state = true;
break;
default:
support_state = false;
break;
}
return support_state;
}
/*====================================================================*/
/* Gamma Function Implementation*/
/*====================================================================*/
void viafb_set_gamma_table(int bpp, unsigned int *gamma_table)
{
int i, sr1a;
int active_device_amount = 0;
int device_status = viafb_DeviceStatus;
for (i = 0; i < sizeof(viafb_DeviceStatus) * 8; i++) {
if (device_status & 1)
active_device_amount++;
device_status >>= 1;
}
/* 8 bpp mode can't adjust gamma */
if (bpp == 8)
return ;
/* Enable Gamma */
switch (viaparinfo->chip_info->gfx_chip_name) {
case UNICHROME_CLE266:
case UNICHROME_K400:
viafb_write_reg_mask(SR16, VIASR, 0x80, BIT7);
break;
case UNICHROME_K800:
case UNICHROME_PM800:
case UNICHROME_CN700:
case UNICHROME_CX700:
case UNICHROME_K8M890:
case UNICHROME_P4M890:
case UNICHROME_P4M900:
viafb_write_reg_mask(CR33, VIACR, 0x80, BIT7);
break;
}
sr1a = (unsigned int)viafb_read_reg(VIASR, SR1A);
viafb_write_reg_mask(SR1A, VIASR, 0x0, BIT0);
/* Fill IGA1 Gamma Table */
outb(0, LUT_INDEX_WRITE);
for (i = 0; i < 256; i++) {
outb(gamma_table[i] >> 16, LUT_DATA);
outb(gamma_table[i] >> 8 & 0xFF, LUT_DATA);
outb(gamma_table[i] & 0xFF, LUT_DATA);
}
/* If adjust Gamma value in SAMM, fill IGA1,
IGA2 Gamma table simultaneous. */
/* Switch to IGA2 Gamma Table */
if ((active_device_amount > 1) &&
!((viaparinfo->chip_info->gfx_chip_name ==
UNICHROME_CLE266) &&
(viaparinfo->chip_info->gfx_chip_revision < 15))) {
viafb_write_reg_mask(SR1A, VIASR, 0x01, BIT0);
viafb_write_reg_mask(CR6A, VIACR, 0x02, BIT1);
/* Fill IGA2 Gamma Table */
outb(0, LUT_INDEX_WRITE);
for (i = 0; i < 256; i++) {
outb(gamma_table[i] >> 16, LUT_DATA);
outb(gamma_table[i] >> 8 & 0xFF, LUT_DATA);
outb(gamma_table[i] & 0xFF, LUT_DATA);
}
}
viafb_write_reg(SR1A, VIASR, sr1a);
}
void viafb_get_gamma_table(unsigned int *gamma_table)
{
unsigned char color_r, color_g, color_b;
unsigned char sr1a = 0;
int i;
/* Enable Gamma */
switch (viaparinfo->chip_info->gfx_chip_name) {
case UNICHROME_CLE266:
case UNICHROME_K400:
viafb_write_reg_mask(SR16, VIASR, 0x80, BIT7);
break;
case UNICHROME_K800:
case UNICHROME_PM800:
case UNICHROME_CN700:
case UNICHROME_CX700:
case UNICHROME_K8M890:
case UNICHROME_P4M890:
case UNICHROME_P4M900:
viafb_write_reg_mask(CR33, VIACR, 0x80, BIT7);
break;
}
sr1a = viafb_read_reg(VIASR, SR1A);
viafb_write_reg_mask(SR1A, VIASR, 0x0, BIT0);
/* Reading gamma table to get color value */
outb(0, LUT_INDEX_READ);
for (i = 0; i < 256; i++) {
color_r = inb(LUT_DATA);
color_g = inb(LUT_DATA);
color_b = inb(LUT_DATA);
gamma_table[i] =
((((u32) color_r) << 16) |
(((u16) color_g) << 8)) | color_b;
}
viafb_write_reg(SR1A, VIASR, sr1a);
}
void viafb_get_gamma_support_state(int bpp, unsigned int *support_state)
{
if (bpp == 8)
*support_state = None_Device;
else
*support_state = CRT_Device | DVI_Device | LCD_Device;
}
| gpl-2.0 |
boyan3010/ShooterU_Kernel_3.2.X | fs/gfs2/dir.c | 170 | 46895 | /*
* Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
* Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
*
* This copyrighted material is made available to anyone wishing to use,
* modify, copy, or redistribute it subject to the terms and conditions
* of the GNU General Public License version 2.
*/
/*
* Implements Extendible Hashing as described in:
* "Extendible Hashing" by Fagin, et al in
* __ACM Trans. on Database Systems__, Sept 1979.
*
*
* Here's the layout of dirents which is essentially the same as that of ext2
* within a single block. The field de_name_len is the number of bytes
* actually required for the name (no null terminator). The field de_rec_len
* is the number of bytes allocated to the dirent. The offset of the next
* dirent in the block is (dirent + dirent->de_rec_len). When a dirent is
* deleted, the preceding dirent inherits its allocated space, ie
* prev->de_rec_len += deleted->de_rec_len. Since the next dirent is obtained
* by adding de_rec_len to the current dirent, this essentially causes the
* deleted dirent to get jumped over when iterating through all the dirents.
*
* When deleting the first dirent in a block, there is no previous dirent so
* the field de_ino is set to zero to designate it as deleted. When allocating
* a dirent, gfs2_dirent_alloc iterates through the dirents in a block. If the
* first dirent has (de_ino == 0) and de_rec_len is large enough, this first
* dirent is allocated. Otherwise it must go through all the 'used' dirents
* searching for one in which the amount of total space minus the amount of
* used space will provide enough space for the new dirent.
*
* There are two types of blocks in which dirents reside. In a stuffed dinode,
* the dirents begin at offset sizeof(struct gfs2_dinode) from the beginning of
* the block. In leaves, they begin at offset sizeof(struct gfs2_leaf) from the
* beginning of the leaf block. The dirents reside in leaves when
*
* dip->i_diskflags & GFS2_DIF_EXHASH is true
*
* Otherwise, the dirents are "linear", within a single stuffed dinode block.
*
* When the dirents are in leaves, the actual contents of the directory file are
* used as an array of 64-bit block pointers pointing to the leaf blocks. The
* dirents are NOT in the directory file itself. There can be more than one
* block pointer in the array that points to the same leaf. In fact, when a
* directory is first converted from linear to exhash, all of the pointers
* point to the same leaf.
*
* When a leaf is completely full, the size of the hash table can be
* doubled unless it is already at the maximum size which is hard coded into
* GFS2_DIR_MAX_DEPTH. After that, leaves are chained together in a linked list,
* but never before the maximum hash table size has been reached.
*/
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/buffer_head.h>
#include <linux/sort.h>
#include <linux/gfs2_ondisk.h>
#include <linux/crc32.h>
#include <linux/vmalloc.h>
#include "gfs2.h"
#include "incore.h"
#include "dir.h"
#include "glock.h"
#include "inode.h"
#include "meta_io.h"
#include "quota.h"
#include "rgrp.h"
#include "trans.h"
#include "bmap.h"
#include "util.h"
#define IS_LEAF 1 /* Hashed (leaf) directory */
#define IS_DINODE 2 /* Linear (stuffed dinode block) directory */
#define gfs2_disk_hash2offset(h) (((u64)(h)) >> 1)
#define gfs2_dir_offset2hash(p) ((u32)(((u64)(p)) << 1))
struct qstr gfs2_qdot __read_mostly;
struct qstr gfs2_qdotdot __read_mostly;
typedef int (*gfs2_dscan_t)(const struct gfs2_dirent *dent,
const struct qstr *name, void *opaque);
int gfs2_dir_get_new_buffer(struct gfs2_inode *ip, u64 block,
struct buffer_head **bhp)
{
struct buffer_head *bh;
bh = gfs2_meta_new(ip->i_gl, block);
gfs2_trans_add_bh(ip->i_gl, bh, 1);
gfs2_metatype_set(bh, GFS2_METATYPE_JD, GFS2_FORMAT_JD);
gfs2_buffer_clear_tail(bh, sizeof(struct gfs2_meta_header));
*bhp = bh;
return 0;
}
static int gfs2_dir_get_existing_buffer(struct gfs2_inode *ip, u64 block,
struct buffer_head **bhp)
{
struct buffer_head *bh;
int error;
error = gfs2_meta_read(ip->i_gl, block, DIO_WAIT, &bh);
if (error)
return error;
if (gfs2_metatype_check(GFS2_SB(&ip->i_inode), bh, GFS2_METATYPE_JD)) {
brelse(bh);
return -EIO;
}
*bhp = bh;
return 0;
}
static int gfs2_dir_write_stuffed(struct gfs2_inode *ip, const char *buf,
unsigned int offset, unsigned int size)
{
struct buffer_head *dibh;
int error;
error = gfs2_meta_inode_buffer(ip, &dibh);
if (error)
return error;
gfs2_trans_add_bh(ip->i_gl, dibh, 1);
memcpy(dibh->b_data + offset + sizeof(struct gfs2_dinode), buf, size);
if (ip->i_inode.i_size < offset + size)
i_size_write(&ip->i_inode, offset + size);
ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME;
gfs2_dinode_out(ip, dibh->b_data);
brelse(dibh);
return size;
}
/**
* gfs2_dir_write_data - Write directory information to the inode
* @ip: The GFS2 inode
* @buf: The buffer containing information to be written
* @offset: The file offset to start writing at
* @size: The amount of data to write
*
* Returns: The number of bytes correctly written or error code
*/
static int gfs2_dir_write_data(struct gfs2_inode *ip, const char *buf,
u64 offset, unsigned int size)
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
struct buffer_head *dibh;
u64 lblock, dblock;
u32 extlen = 0;
unsigned int o;
int copied = 0;
int error = 0;
int new = 0;
if (!size)
return 0;
if (gfs2_is_stuffed(ip) &&
offset + size <= sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode))
return gfs2_dir_write_stuffed(ip, buf, (unsigned int)offset,
size);
if (gfs2_assert_warn(sdp, gfs2_is_jdata(ip)))
return -EINVAL;
if (gfs2_is_stuffed(ip)) {
error = gfs2_unstuff_dinode(ip, NULL);
if (error)
return error;
}
lblock = offset;
o = do_div(lblock, sdp->sd_jbsize) + sizeof(struct gfs2_meta_header);
while (copied < size) {
unsigned int amount;
struct buffer_head *bh;
amount = size - copied;
if (amount > sdp->sd_sb.sb_bsize - o)
amount = sdp->sd_sb.sb_bsize - o;
if (!extlen) {
new = 1;
error = gfs2_extent_map(&ip->i_inode, lblock, &new,
&dblock, &extlen);
if (error)
goto fail;
error = -EIO;
if (gfs2_assert_withdraw(sdp, dblock))
goto fail;
}
if (amount == sdp->sd_jbsize || new)
error = gfs2_dir_get_new_buffer(ip, dblock, &bh);
else
error = gfs2_dir_get_existing_buffer(ip, dblock, &bh);
if (error)
goto fail;
gfs2_trans_add_bh(ip->i_gl, bh, 1);
memcpy(bh->b_data + o, buf, amount);
brelse(bh);
buf += amount;
copied += amount;
lblock++;
dblock++;
extlen--;
o = sizeof(struct gfs2_meta_header);
}
out:
error = gfs2_meta_inode_buffer(ip, &dibh);
if (error)
return error;
if (ip->i_inode.i_size < offset + copied)
i_size_write(&ip->i_inode, offset + copied);
ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME;
gfs2_trans_add_bh(ip->i_gl, dibh, 1);
gfs2_dinode_out(ip, dibh->b_data);
brelse(dibh);
return copied;
fail:
if (copied)
goto out;
return error;
}
static int gfs2_dir_read_stuffed(struct gfs2_inode *ip, __be64 *buf,
unsigned int size)
{
struct buffer_head *dibh;
int error;
error = gfs2_meta_inode_buffer(ip, &dibh);
if (!error) {
memcpy(buf, dibh->b_data + sizeof(struct gfs2_dinode), size);
brelse(dibh);
}
return (error) ? error : size;
}
/**
* gfs2_dir_read_data - Read a data from a directory inode
* @ip: The GFS2 Inode
* @buf: The buffer to place result into
* @size: Amount of data to transfer
*
* Returns: The amount of data actually copied or the error
*/
static int gfs2_dir_read_data(struct gfs2_inode *ip, __be64 *buf,
unsigned int size)
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
u64 lblock, dblock;
u32 extlen = 0;
unsigned int o;
int copied = 0;
int error = 0;
if (gfs2_is_stuffed(ip))
return gfs2_dir_read_stuffed(ip, buf, size);
if (gfs2_assert_warn(sdp, gfs2_is_jdata(ip)))
return -EINVAL;
lblock = 0;
o = do_div(lblock, sdp->sd_jbsize) + sizeof(struct gfs2_meta_header);
while (copied < size) {
unsigned int amount;
struct buffer_head *bh;
int new;
amount = size - copied;
if (amount > sdp->sd_sb.sb_bsize - o)
amount = sdp->sd_sb.sb_bsize - o;
if (!extlen) {
new = 0;
error = gfs2_extent_map(&ip->i_inode, lblock, &new,
&dblock, &extlen);
if (error || !dblock)
goto fail;
BUG_ON(extlen < 1);
bh = gfs2_meta_ra(ip->i_gl, dblock, extlen);
} else {
error = gfs2_meta_read(ip->i_gl, dblock, DIO_WAIT, &bh);
if (error)
goto fail;
}
error = gfs2_metatype_check(sdp, bh, GFS2_METATYPE_JD);
if (error) {
brelse(bh);
goto fail;
}
dblock++;
extlen--;
memcpy(buf, bh->b_data + o, amount);
brelse(bh);
buf += (amount/sizeof(__be64));
copied += amount;
lblock++;
o = sizeof(struct gfs2_meta_header);
}
return copied;
fail:
return (copied) ? copied : error;
}
/**
* gfs2_dir_get_hash_table - Get pointer to the dir hash table
* @ip: The inode in question
*
* Returns: The hash table or an error
*/
static __be64 *gfs2_dir_get_hash_table(struct gfs2_inode *ip)
{
struct inode *inode = &ip->i_inode;
int ret;
u32 hsize;
__be64 *hc;
BUG_ON(!(ip->i_diskflags & GFS2_DIF_EXHASH));
hc = ip->i_hash_cache;
if (hc)
return hc;
hsize = 1 << ip->i_depth;
hsize *= sizeof(__be64);
if (hsize != i_size_read(&ip->i_inode)) {
gfs2_consist_inode(ip);
return ERR_PTR(-EIO);
}
hc = kmalloc(hsize, GFP_NOFS);
ret = -ENOMEM;
if (hc == NULL)
return ERR_PTR(-ENOMEM);
ret = gfs2_dir_read_data(ip, hc, hsize);
if (ret < 0) {
kfree(hc);
return ERR_PTR(ret);
}
spin_lock(&inode->i_lock);
if (ip->i_hash_cache)
kfree(hc);
else
ip->i_hash_cache = hc;
spin_unlock(&inode->i_lock);
return ip->i_hash_cache;
}
/**
* gfs2_dir_hash_inval - Invalidate dir hash
* @ip: The directory inode
*
* Must be called with an exclusive glock, or during glock invalidation.
*/
void gfs2_dir_hash_inval(struct gfs2_inode *ip)
{
__be64 *hc = ip->i_hash_cache;
ip->i_hash_cache = NULL;
kfree(hc);
}
static inline int gfs2_dirent_sentinel(const struct gfs2_dirent *dent)
{
return dent->de_inum.no_addr == 0 || dent->de_inum.no_formal_ino == 0;
}
static inline int __gfs2_dirent_find(const struct gfs2_dirent *dent,
const struct qstr *name, int ret)
{
if (!gfs2_dirent_sentinel(dent) &&
be32_to_cpu(dent->de_hash) == name->hash &&
be16_to_cpu(dent->de_name_len) == name->len &&
memcmp(dent+1, name->name, name->len) == 0)
return ret;
return 0;
}
static int gfs2_dirent_find(const struct gfs2_dirent *dent,
const struct qstr *name,
void *opaque)
{
return __gfs2_dirent_find(dent, name, 1);
}
static int gfs2_dirent_prev(const struct gfs2_dirent *dent,
const struct qstr *name,
void *opaque)
{
return __gfs2_dirent_find(dent, name, 2);
}
/*
* name->name holds ptr to start of block.
* name->len holds size of block.
*/
static int gfs2_dirent_last(const struct gfs2_dirent *dent,
const struct qstr *name,
void *opaque)
{
const char *start = name->name;
const char *end = (const char *)dent + be16_to_cpu(dent->de_rec_len);
if (name->len == (end - start))
return 1;
return 0;
}
static int gfs2_dirent_find_space(const struct gfs2_dirent *dent,
const struct qstr *name,
void *opaque)
{
unsigned required = GFS2_DIRENT_SIZE(name->len);
unsigned actual = GFS2_DIRENT_SIZE(be16_to_cpu(dent->de_name_len));
unsigned totlen = be16_to_cpu(dent->de_rec_len);
if (gfs2_dirent_sentinel(dent))
actual = 0;
if (totlen - actual >= required)
return 1;
return 0;
}
struct dirent_gather {
const struct gfs2_dirent **pdent;
unsigned offset;
};
static int gfs2_dirent_gather(const struct gfs2_dirent *dent,
const struct qstr *name,
void *opaque)
{
struct dirent_gather *g = opaque;
if (!gfs2_dirent_sentinel(dent)) {
g->pdent[g->offset++] = dent;
}
return 0;
}
/*
* Other possible things to check:
* - Inode located within filesystem size (and on valid block)
* - Valid directory entry type
* Not sure how heavy-weight we want to make this... could also check
* hash is correct for example, but that would take a lot of extra time.
* For now the most important thing is to check that the various sizes
* are correct.
*/
static int gfs2_check_dirent(struct gfs2_dirent *dent, unsigned int offset,
unsigned int size, unsigned int len, int first)
{
const char *msg = "gfs2_dirent too small";
if (unlikely(size < sizeof(struct gfs2_dirent)))
goto error;
msg = "gfs2_dirent misaligned";
if (unlikely(offset & 0x7))
goto error;
msg = "gfs2_dirent points beyond end of block";
if (unlikely(offset + size > len))
goto error;
msg = "zero inode number";
if (unlikely(!first && gfs2_dirent_sentinel(dent)))
goto error;
msg = "name length is greater than space in dirent";
if (!gfs2_dirent_sentinel(dent) &&
unlikely(sizeof(struct gfs2_dirent)+be16_to_cpu(dent->de_name_len) >
size))
goto error;
return 0;
error:
printk(KERN_WARNING "gfs2_check_dirent: %s (%s)\n", msg,
first ? "first in block" : "not first in block");
return -EIO;
}
static int gfs2_dirent_offset(const void *buf)
{
const struct gfs2_meta_header *h = buf;
int offset;
BUG_ON(buf == NULL);
switch(be32_to_cpu(h->mh_type)) {
case GFS2_METATYPE_LF:
offset = sizeof(struct gfs2_leaf);
break;
case GFS2_METATYPE_DI:
offset = sizeof(struct gfs2_dinode);
break;
default:
goto wrong_type;
}
return offset;
wrong_type:
printk(KERN_WARNING "gfs2_scan_dirent: wrong block type %u\n",
be32_to_cpu(h->mh_type));
return -1;
}
static struct gfs2_dirent *gfs2_dirent_scan(struct inode *inode, void *buf,
unsigned int len, gfs2_dscan_t scan,
const struct qstr *name,
void *opaque)
{
struct gfs2_dirent *dent, *prev;
unsigned offset;
unsigned size;
int ret = 0;
ret = gfs2_dirent_offset(buf);
if (ret < 0)
goto consist_inode;
offset = ret;
prev = NULL;
dent = buf + offset;
size = be16_to_cpu(dent->de_rec_len);
if (gfs2_check_dirent(dent, offset, size, len, 1))
goto consist_inode;
do {
ret = scan(dent, name, opaque);
if (ret)
break;
offset += size;
if (offset == len)
break;
prev = dent;
dent = buf + offset;
size = be16_to_cpu(dent->de_rec_len);
if (gfs2_check_dirent(dent, offset, size, len, 0))
goto consist_inode;
} while(1);
switch(ret) {
case 0:
return NULL;
case 1:
return dent;
case 2:
return prev ? prev : dent;
default:
BUG_ON(ret > 0);
return ERR_PTR(ret);
}
consist_inode:
gfs2_consist_inode(GFS2_I(inode));
return ERR_PTR(-EIO);
}
static int dirent_check_reclen(struct gfs2_inode *dip,
const struct gfs2_dirent *d, const void *end_p)
{
const void *ptr = d;
u16 rec_len = be16_to_cpu(d->de_rec_len);
if (unlikely(rec_len < sizeof(struct gfs2_dirent)))
goto broken;
ptr += rec_len;
if (ptr < end_p)
return rec_len;
if (ptr == end_p)
return -ENOENT;
broken:
gfs2_consist_inode(dip);
return -EIO;
}
/**
* dirent_next - Next dirent
* @dip: the directory
* @bh: The buffer
* @dent: Pointer to list of dirents
*
* Returns: 0 on success, error code otherwise
*/
static int dirent_next(struct gfs2_inode *dip, struct buffer_head *bh,
struct gfs2_dirent **dent)
{
struct gfs2_dirent *cur = *dent, *tmp;
char *bh_end = bh->b_data + bh->b_size;
int ret;
ret = dirent_check_reclen(dip, cur, bh_end);
if (ret < 0)
return ret;
tmp = (void *)cur + ret;
ret = dirent_check_reclen(dip, tmp, bh_end);
if (ret == -EIO)
return ret;
/* Only the first dent could ever have de_inum.no_addr == 0 */
if (gfs2_dirent_sentinel(tmp)) {
gfs2_consist_inode(dip);
return -EIO;
}
*dent = tmp;
return 0;
}
/**
* dirent_del - Delete a dirent
* @dip: The GFS2 inode
* @bh: The buffer
* @prev: The previous dirent
* @cur: The current dirent
*
*/
static void dirent_del(struct gfs2_inode *dip, struct buffer_head *bh,
struct gfs2_dirent *prev, struct gfs2_dirent *cur)
{
u16 cur_rec_len, prev_rec_len;
if (gfs2_dirent_sentinel(cur)) {
gfs2_consist_inode(dip);
return;
}
gfs2_trans_add_bh(dip->i_gl, bh, 1);
/* If there is no prev entry, this is the first entry in the block.
The de_rec_len is already as big as it needs to be. Just zero
out the inode number and return. */
if (!prev) {
cur->de_inum.no_addr = 0;
cur->de_inum.no_formal_ino = 0;
return;
}
/* Combine this dentry with the previous one. */
prev_rec_len = be16_to_cpu(prev->de_rec_len);
cur_rec_len = be16_to_cpu(cur->de_rec_len);
if ((char *)prev + prev_rec_len != (char *)cur)
gfs2_consist_inode(dip);
if ((char *)cur + cur_rec_len > bh->b_data + bh->b_size)
gfs2_consist_inode(dip);
prev_rec_len += cur_rec_len;
prev->de_rec_len = cpu_to_be16(prev_rec_len);
}
/*
* Takes a dent from which to grab space as an argument. Returns the
* newly created dent.
*/
static struct gfs2_dirent *gfs2_init_dirent(struct inode *inode,
struct gfs2_dirent *dent,
const struct qstr *name,
struct buffer_head *bh)
{
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_dirent *ndent;
unsigned offset = 0, totlen;
if (!gfs2_dirent_sentinel(dent))
offset = GFS2_DIRENT_SIZE(be16_to_cpu(dent->de_name_len));
totlen = be16_to_cpu(dent->de_rec_len);
BUG_ON(offset + name->len > totlen);
gfs2_trans_add_bh(ip->i_gl, bh, 1);
ndent = (struct gfs2_dirent *)((char *)dent + offset);
dent->de_rec_len = cpu_to_be16(offset);
gfs2_qstr2dirent(name, totlen - offset, ndent);
return ndent;
}
static struct gfs2_dirent *gfs2_dirent_alloc(struct inode *inode,
struct buffer_head *bh,
const struct qstr *name)
{
struct gfs2_dirent *dent;
dent = gfs2_dirent_scan(inode, bh->b_data, bh->b_size,
gfs2_dirent_find_space, name, NULL);
if (!dent || IS_ERR(dent))
return dent;
return gfs2_init_dirent(inode, dent, name, bh);
}
static int get_leaf(struct gfs2_inode *dip, u64 leaf_no,
struct buffer_head **bhp)
{
int error;
error = gfs2_meta_read(dip->i_gl, leaf_no, DIO_WAIT, bhp);
if (!error && gfs2_metatype_check(GFS2_SB(&dip->i_inode), *bhp, GFS2_METATYPE_LF)) {
/* printk(KERN_INFO "block num=%llu\n", leaf_no); */
error = -EIO;
}
return error;
}
/**
* get_leaf_nr - Get a leaf number associated with the index
* @dip: The GFS2 inode
* @index:
* @leaf_out:
*
* Returns: 0 on success, error code otherwise
*/
static int get_leaf_nr(struct gfs2_inode *dip, u32 index,
u64 *leaf_out)
{
__be64 *hash;
hash = gfs2_dir_get_hash_table(dip);
if (IS_ERR(hash))
return PTR_ERR(hash);
*leaf_out = be64_to_cpu(*(hash + index));
return 0;
}
static int get_first_leaf(struct gfs2_inode *dip, u32 index,
struct buffer_head **bh_out)
{
u64 leaf_no;
int error;
error = get_leaf_nr(dip, index, &leaf_no);
if (!error)
error = get_leaf(dip, leaf_no, bh_out);
return error;
}
static struct gfs2_dirent *gfs2_dirent_search(struct inode *inode,
const struct qstr *name,
gfs2_dscan_t scan,
struct buffer_head **pbh)
{
struct buffer_head *bh;
struct gfs2_dirent *dent;
struct gfs2_inode *ip = GFS2_I(inode);
int error;
if (ip->i_diskflags & GFS2_DIF_EXHASH) {
struct gfs2_leaf *leaf;
unsigned hsize = 1 << ip->i_depth;
unsigned index;
u64 ln;
if (hsize * sizeof(u64) != i_size_read(inode)) {
gfs2_consist_inode(ip);
return ERR_PTR(-EIO);
}
index = name->hash >> (32 - ip->i_depth);
error = get_first_leaf(ip, index, &bh);
if (error)
return ERR_PTR(error);
do {
dent = gfs2_dirent_scan(inode, bh->b_data, bh->b_size,
scan, name, NULL);
if (dent)
goto got_dent;
leaf = (struct gfs2_leaf *)bh->b_data;
ln = be64_to_cpu(leaf->lf_next);
brelse(bh);
if (!ln)
break;
error = get_leaf(ip, ln, &bh);
} while(!error);
return error ? ERR_PTR(error) : NULL;
}
error = gfs2_meta_inode_buffer(ip, &bh);
if (error)
return ERR_PTR(error);
dent = gfs2_dirent_scan(inode, bh->b_data, bh->b_size, scan, name, NULL);
got_dent:
if (unlikely(dent == NULL || IS_ERR(dent))) {
brelse(bh);
bh = NULL;
}
*pbh = bh;
return dent;
}
static struct gfs2_leaf *new_leaf(struct inode *inode, struct buffer_head **pbh, u16 depth)
{
struct gfs2_inode *ip = GFS2_I(inode);
unsigned int n = 1;
u64 bn;
int error;
struct buffer_head *bh;
struct gfs2_leaf *leaf;
struct gfs2_dirent *dent;
struct qstr name = { .name = "", .len = 0, .hash = 0 };
error = gfs2_alloc_block(ip, &bn, &n);
if (error)
return NULL;
bh = gfs2_meta_new(ip->i_gl, bn);
if (!bh)
return NULL;
gfs2_trans_add_unrevoke(GFS2_SB(inode), bn, 1);
gfs2_trans_add_bh(ip->i_gl, bh, 1);
gfs2_metatype_set(bh, GFS2_METATYPE_LF, GFS2_FORMAT_LF);
leaf = (struct gfs2_leaf *)bh->b_data;
leaf->lf_depth = cpu_to_be16(depth);
leaf->lf_entries = 0;
leaf->lf_dirent_format = cpu_to_be32(GFS2_FORMAT_DE);
leaf->lf_next = 0;
memset(leaf->lf_reserved, 0, sizeof(leaf->lf_reserved));
dent = (struct gfs2_dirent *)(leaf+1);
gfs2_qstr2dirent(&name, bh->b_size - sizeof(struct gfs2_leaf), dent);
*pbh = bh;
return leaf;
}
/**
* dir_make_exhash - Convert a stuffed directory into an ExHash directory
* @dip: The GFS2 inode
*
* Returns: 0 on success, error code otherwise
*/
static int dir_make_exhash(struct inode *inode)
{
struct gfs2_inode *dip = GFS2_I(inode);
struct gfs2_sbd *sdp = GFS2_SB(inode);
struct gfs2_dirent *dent;
struct qstr args;
struct buffer_head *bh, *dibh;
struct gfs2_leaf *leaf;
int y;
u32 x;
__be64 *lp;
u64 bn;
int error;
error = gfs2_meta_inode_buffer(dip, &dibh);
if (error)
return error;
/* Turn over a new leaf */
leaf = new_leaf(inode, &bh, 0);
if (!leaf)
return -ENOSPC;
bn = bh->b_blocknr;
gfs2_assert(sdp, dip->i_entries < (1 << 16));
leaf->lf_entries = cpu_to_be16(dip->i_entries);
/* Copy dirents */
gfs2_buffer_copy_tail(bh, sizeof(struct gfs2_leaf), dibh,
sizeof(struct gfs2_dinode));
/* Find last entry */
x = 0;
args.len = bh->b_size - sizeof(struct gfs2_dinode) +
sizeof(struct gfs2_leaf);
args.name = bh->b_data;
dent = gfs2_dirent_scan(&dip->i_inode, bh->b_data, bh->b_size,
gfs2_dirent_last, &args, NULL);
if (!dent) {
brelse(bh);
brelse(dibh);
return -EIO;
}
if (IS_ERR(dent)) {
brelse(bh);
brelse(dibh);
return PTR_ERR(dent);
}
/* Adjust the last dirent's record length
(Remember that dent still points to the last entry.) */
dent->de_rec_len = cpu_to_be16(be16_to_cpu(dent->de_rec_len) +
sizeof(struct gfs2_dinode) -
sizeof(struct gfs2_leaf));
brelse(bh);
/* We're done with the new leaf block, now setup the new
hash table. */
gfs2_trans_add_bh(dip->i_gl, dibh, 1);
gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
lp = (__be64 *)(dibh->b_data + sizeof(struct gfs2_dinode));
for (x = sdp->sd_hash_ptrs; x--; lp++)
*lp = cpu_to_be64(bn);
i_size_write(inode, sdp->sd_sb.sb_bsize / 2);
gfs2_add_inode_blocks(&dip->i_inode, 1);
dip->i_diskflags |= GFS2_DIF_EXHASH;
for (x = sdp->sd_hash_ptrs, y = -1; x; x >>= 1, y++) ;
dip->i_depth = y;
gfs2_dinode_out(dip, dibh->b_data);
brelse(dibh);
return 0;
}
/**
* dir_split_leaf - Split a leaf block into two
* @dip: The GFS2 inode
* @index:
* @leaf_no:
*
* Returns: 0 on success, error code on failure
*/
static int dir_split_leaf(struct inode *inode, const struct qstr *name)
{
struct gfs2_inode *dip = GFS2_I(inode);
struct buffer_head *nbh, *obh, *dibh;
struct gfs2_leaf *nleaf, *oleaf;
struct gfs2_dirent *dent = NULL, *prev = NULL, *next = NULL, *new;
u32 start, len, half_len, divider;
u64 bn, leaf_no;
__be64 *lp;
u32 index;
int x, moved = 0;
int error;
index = name->hash >> (32 - dip->i_depth);
error = get_leaf_nr(dip, index, &leaf_no);
if (error)
return error;
/* Get the old leaf block */
error = get_leaf(dip, leaf_no, &obh);
if (error)
return error;
oleaf = (struct gfs2_leaf *)obh->b_data;
if (dip->i_depth == be16_to_cpu(oleaf->lf_depth)) {
brelse(obh);
return 1; /* can't split */
}
gfs2_trans_add_bh(dip->i_gl, obh, 1);
nleaf = new_leaf(inode, &nbh, be16_to_cpu(oleaf->lf_depth) + 1);
if (!nleaf) {
brelse(obh);
return -ENOSPC;
}
bn = nbh->b_blocknr;
/* Compute the start and len of leaf pointers in the hash table. */
len = 1 << (dip->i_depth - be16_to_cpu(oleaf->lf_depth));
half_len = len >> 1;
if (!half_len) {
printk(KERN_WARNING "i_depth %u lf_depth %u index %u\n", dip->i_depth, be16_to_cpu(oleaf->lf_depth), index);
gfs2_consist_inode(dip);
error = -EIO;
goto fail_brelse;
}
start = (index & ~(len - 1));
/* Change the pointers.
Don't bother distinguishing stuffed from non-stuffed.
This code is complicated enough already. */
lp = kmalloc(half_len * sizeof(__be64), GFP_NOFS);
if (!lp) {
error = -ENOMEM;
goto fail_brelse;
}
/* Change the pointers */
for (x = 0; x < half_len; x++)
lp[x] = cpu_to_be64(bn);
gfs2_dir_hash_inval(dip);
error = gfs2_dir_write_data(dip, (char *)lp, start * sizeof(u64),
half_len * sizeof(u64));
if (error != half_len * sizeof(u64)) {
if (error >= 0)
error = -EIO;
goto fail_lpfree;
}
kfree(lp);
/* Compute the divider */
divider = (start + half_len) << (32 - dip->i_depth);
/* Copy the entries */
dent = (struct gfs2_dirent *)(obh->b_data + sizeof(struct gfs2_leaf));
do {
next = dent;
if (dirent_next(dip, obh, &next))
next = NULL;
if (!gfs2_dirent_sentinel(dent) &&
be32_to_cpu(dent->de_hash) < divider) {
struct qstr str;
str.name = (char*)(dent+1);
str.len = be16_to_cpu(dent->de_name_len);
str.hash = be32_to_cpu(dent->de_hash);
new = gfs2_dirent_alloc(inode, nbh, &str);
if (IS_ERR(new)) {
error = PTR_ERR(new);
break;
}
new->de_inum = dent->de_inum; /* No endian worries */
new->de_type = dent->de_type; /* No endian worries */
be16_add_cpu(&nleaf->lf_entries, 1);
dirent_del(dip, obh, prev, dent);
if (!oleaf->lf_entries)
gfs2_consist_inode(dip);
be16_add_cpu(&oleaf->lf_entries, -1);
if (!prev)
prev = dent;
moved = 1;
} else {
prev = dent;
}
dent = next;
} while (dent);
oleaf->lf_depth = nleaf->lf_depth;
error = gfs2_meta_inode_buffer(dip, &dibh);
if (!gfs2_assert_withdraw(GFS2_SB(&dip->i_inode), !error)) {
gfs2_trans_add_bh(dip->i_gl, dibh, 1);
gfs2_add_inode_blocks(&dip->i_inode, 1);
gfs2_dinode_out(dip, dibh->b_data);
brelse(dibh);
}
brelse(obh);
brelse(nbh);
return error;
fail_lpfree:
kfree(lp);
fail_brelse:
brelse(obh);
brelse(nbh);
return error;
}
/**
* dir_double_exhash - Double size of ExHash table
* @dip: The GFS2 dinode
*
* Returns: 0 on success, error code on failure
*/
static int dir_double_exhash(struct gfs2_inode *dip)
{
struct buffer_head *dibh;
u32 hsize;
u32 hsize_bytes;
__be64 *hc;
__be64 *hc2, *h;
int x;
int error = 0;
hsize = 1 << dip->i_depth;
hsize_bytes = hsize * sizeof(__be64);
hc = gfs2_dir_get_hash_table(dip);
if (IS_ERR(hc))
return PTR_ERR(hc);
h = hc2 = kmalloc(hsize_bytes * 2, GFP_NOFS);
if (!hc2)
return -ENOMEM;
error = gfs2_meta_inode_buffer(dip, &dibh);
if (error)
goto out_kfree;
for (x = 0; x < hsize; x++) {
*h++ = *hc;
*h++ = *hc;
hc++;
}
error = gfs2_dir_write_data(dip, (char *)hc2, 0, hsize_bytes * 2);
if (error != (hsize_bytes * 2))
goto fail;
gfs2_dir_hash_inval(dip);
dip->i_hash_cache = hc2;
dip->i_depth++;
gfs2_dinode_out(dip, dibh->b_data);
brelse(dibh);
return 0;
fail:
/* Replace original hash table & size */
gfs2_dir_write_data(dip, (char *)hc, 0, hsize_bytes);
i_size_write(&dip->i_inode, hsize_bytes);
gfs2_dinode_out(dip, dibh->b_data);
brelse(dibh);
out_kfree:
kfree(hc2);
return error;
}
/**
* compare_dents - compare directory entries by hash value
* @a: first dent
* @b: second dent
*
* When comparing the hash entries of @a to @b:
* gt: returns 1
* lt: returns -1
* eq: returns 0
*/
static int compare_dents(const void *a, const void *b)
{
const struct gfs2_dirent *dent_a, *dent_b;
u32 hash_a, hash_b;
int ret = 0;
dent_a = *(const struct gfs2_dirent **)a;
hash_a = be32_to_cpu(dent_a->de_hash);
dent_b = *(const struct gfs2_dirent **)b;
hash_b = be32_to_cpu(dent_b->de_hash);
if (hash_a > hash_b)
ret = 1;
else if (hash_a < hash_b)
ret = -1;
else {
unsigned int len_a = be16_to_cpu(dent_a->de_name_len);
unsigned int len_b = be16_to_cpu(dent_b->de_name_len);
if (len_a > len_b)
ret = 1;
else if (len_a < len_b)
ret = -1;
else
ret = memcmp(dent_a + 1, dent_b + 1, len_a);
}
return ret;
}
/**
* do_filldir_main - read out directory entries
* @dip: The GFS2 inode
* @offset: The offset in the file to read from
* @opaque: opaque data to pass to filldir
* @filldir: The function to pass entries to
* @darr: an array of struct gfs2_dirent pointers to read
* @entries: the number of entries in darr
* @copied: pointer to int that's non-zero if a entry has been copied out
*
* Jump through some hoops to make sure that if there are hash collsions,
* they are read out at the beginning of a buffer. We want to minimize
* the possibility that they will fall into different readdir buffers or
* that someone will want to seek to that location.
*
* Returns: errno, >0 on exception from filldir
*/
static int do_filldir_main(struct gfs2_inode *dip, u64 *offset,
void *opaque, filldir_t filldir,
const struct gfs2_dirent **darr, u32 entries,
int *copied)
{
const struct gfs2_dirent *dent, *dent_next;
u64 off, off_next;
unsigned int x, y;
int run = 0;
int error = 0;
sort(darr, entries, sizeof(struct gfs2_dirent *), compare_dents, NULL);
dent_next = darr[0];
off_next = be32_to_cpu(dent_next->de_hash);
off_next = gfs2_disk_hash2offset(off_next);
for (x = 0, y = 1; x < entries; x++, y++) {
dent = dent_next;
off = off_next;
if (y < entries) {
dent_next = darr[y];
off_next = be32_to_cpu(dent_next->de_hash);
off_next = gfs2_disk_hash2offset(off_next);
if (off < *offset)
continue;
*offset = off;
if (off_next == off) {
if (*copied && !run)
return 1;
run = 1;
} else
run = 0;
} else {
if (off < *offset)
continue;
*offset = off;
}
error = filldir(opaque, (const char *)(dent + 1),
be16_to_cpu(dent->de_name_len),
off, be64_to_cpu(dent->de_inum.no_addr),
be16_to_cpu(dent->de_type));
if (error)
return 1;
*copied = 1;
}
/* Increment the *offset by one, so the next time we come into the
do_filldir fxn, we get the next entry instead of the last one in the
current leaf */
(*offset)++;
return 0;
}
static void *gfs2_alloc_sort_buffer(unsigned size)
{
void *ptr = NULL;
if (size < KMALLOC_MAX_SIZE)
ptr = kmalloc(size, GFP_NOFS | __GFP_NOWARN);
if (!ptr)
ptr = __vmalloc(size, GFP_NOFS, PAGE_KERNEL);
return ptr;
}
static void gfs2_free_sort_buffer(void *ptr)
{
if (is_vmalloc_addr(ptr))
vfree(ptr);
else
kfree(ptr);
}
static int gfs2_dir_read_leaf(struct inode *inode, u64 *offset, void *opaque,
filldir_t filldir, int *copied, unsigned *depth,
u64 leaf_no)
{
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_sbd *sdp = GFS2_SB(inode);
struct buffer_head *bh;
struct gfs2_leaf *lf;
unsigned entries = 0, entries2 = 0;
unsigned leaves = 0;
const struct gfs2_dirent **darr, *dent;
struct dirent_gather g;
struct buffer_head **larr;
int leaf = 0;
int error, i;
u64 lfn = leaf_no;
do {
error = get_leaf(ip, lfn, &bh);
if (error)
goto out;
lf = (struct gfs2_leaf *)bh->b_data;
if (leaves == 0)
*depth = be16_to_cpu(lf->lf_depth);
entries += be16_to_cpu(lf->lf_entries);
leaves++;
lfn = be64_to_cpu(lf->lf_next);
brelse(bh);
} while(lfn);
if (!entries)
return 0;
error = -ENOMEM;
/*
* The extra 99 entries are not normally used, but are a buffer
* zone in case the number of entries in the leaf is corrupt.
* 99 is the maximum number of entries that can fit in a single
* leaf block.
*/
larr = gfs2_alloc_sort_buffer((leaves + entries + 99) * sizeof(void *));
if (!larr)
goto out;
darr = (const struct gfs2_dirent **)(larr + leaves);
g.pdent = darr;
g.offset = 0;
lfn = leaf_no;
do {
error = get_leaf(ip, lfn, &bh);
if (error)
goto out_free;
lf = (struct gfs2_leaf *)bh->b_data;
lfn = be64_to_cpu(lf->lf_next);
if (lf->lf_entries) {
entries2 += be16_to_cpu(lf->lf_entries);
dent = gfs2_dirent_scan(inode, bh->b_data, bh->b_size,
gfs2_dirent_gather, NULL, &g);
error = PTR_ERR(dent);
if (IS_ERR(dent))
goto out_free;
if (entries2 != g.offset) {
fs_warn(sdp, "Number of entries corrupt in dir "
"leaf %llu, entries2 (%u) != "
"g.offset (%u)\n",
(unsigned long long)bh->b_blocknr,
entries2, g.offset);
error = -EIO;
goto out_free;
}
error = 0;
larr[leaf++] = bh;
} else {
brelse(bh);
}
} while(lfn);
BUG_ON(entries2 != entries);
error = do_filldir_main(ip, offset, opaque, filldir, darr,
entries, copied);
out_free:
for(i = 0; i < leaf; i++)
brelse(larr[i]);
gfs2_free_sort_buffer(larr);
out:
return error;
}
/**
* dir_e_read - Reads the entries from a directory into a filldir buffer
* @dip: dinode pointer
* @offset: the hash of the last entry read shifted to the right once
* @opaque: buffer for the filldir function to fill
* @filldir: points to the filldir function to use
*
* Returns: errno
*/
static int dir_e_read(struct inode *inode, u64 *offset, void *opaque,
filldir_t filldir)
{
struct gfs2_inode *dip = GFS2_I(inode);
u32 hsize, len = 0;
u32 hash, index;
__be64 *lp;
int copied = 0;
int error = 0;
unsigned depth = 0;
hsize = 1 << dip->i_depth;
hash = gfs2_dir_offset2hash(*offset);
index = hash >> (32 - dip->i_depth);
lp = gfs2_dir_get_hash_table(dip);
if (IS_ERR(lp))
return PTR_ERR(lp);
while (index < hsize) {
error = gfs2_dir_read_leaf(inode, offset, opaque, filldir,
&copied, &depth,
be64_to_cpu(lp[index]));
if (error)
break;
len = 1 << (dip->i_depth - depth);
index = (index & ~(len - 1)) + len;
}
if (error > 0)
error = 0;
return error;
}
int gfs2_dir_read(struct inode *inode, u64 *offset, void *opaque,
filldir_t filldir)
{
struct gfs2_inode *dip = GFS2_I(inode);
struct gfs2_sbd *sdp = GFS2_SB(inode);
struct dirent_gather g;
const struct gfs2_dirent **darr, *dent;
struct buffer_head *dibh;
int copied = 0;
int error;
if (!dip->i_entries)
return 0;
if (dip->i_diskflags & GFS2_DIF_EXHASH)
return dir_e_read(inode, offset, opaque, filldir);
if (!gfs2_is_stuffed(dip)) {
gfs2_consist_inode(dip);
return -EIO;
}
error = gfs2_meta_inode_buffer(dip, &dibh);
if (error)
return error;
error = -ENOMEM;
/* 96 is max number of dirents which can be stuffed into an inode */
darr = kmalloc(96 * sizeof(struct gfs2_dirent *), GFP_NOFS);
if (darr) {
g.pdent = darr;
g.offset = 0;
dent = gfs2_dirent_scan(inode, dibh->b_data, dibh->b_size,
gfs2_dirent_gather, NULL, &g);
if (IS_ERR(dent)) {
error = PTR_ERR(dent);
goto out;
}
if (dip->i_entries != g.offset) {
fs_warn(sdp, "Number of entries corrupt in dir %llu, "
"ip->i_entries (%u) != g.offset (%u)\n",
(unsigned long long)dip->i_no_addr,
dip->i_entries,
g.offset);
error = -EIO;
goto out;
}
error = do_filldir_main(dip, offset, opaque, filldir, darr,
dip->i_entries, &copied);
out:
kfree(darr);
}
if (error > 0)
error = 0;
brelse(dibh);
return error;
}
/**
* gfs2_dir_search - Search a directory
* @dip: The GFS2 inode
* @filename:
* @inode:
*
* This routine searches a directory for a file or another directory.
* Assumes a glock is held on dip.
*
* Returns: errno
*/
struct inode *gfs2_dir_search(struct inode *dir, const struct qstr *name)
{
struct buffer_head *bh;
struct gfs2_dirent *dent;
struct inode *inode;
dent = gfs2_dirent_search(dir, name, gfs2_dirent_find, &bh);
if (dent) {
if (IS_ERR(dent))
return ERR_CAST(dent);
inode = gfs2_inode_lookup(dir->i_sb,
be16_to_cpu(dent->de_type),
be64_to_cpu(dent->de_inum.no_addr),
be64_to_cpu(dent->de_inum.no_formal_ino), 0);
brelse(bh);
return inode;
}
return ERR_PTR(-ENOENT);
}
int gfs2_dir_check(struct inode *dir, const struct qstr *name,
const struct gfs2_inode *ip)
{
struct buffer_head *bh;
struct gfs2_dirent *dent;
int ret = -ENOENT;
dent = gfs2_dirent_search(dir, name, gfs2_dirent_find, &bh);
if (dent) {
if (IS_ERR(dent))
return PTR_ERR(dent);
if (ip) {
if (be64_to_cpu(dent->de_inum.no_addr) != ip->i_no_addr)
goto out;
if (be64_to_cpu(dent->de_inum.no_formal_ino) !=
ip->i_no_formal_ino)
goto out;
if (unlikely(IF2DT(ip->i_inode.i_mode) !=
be16_to_cpu(dent->de_type))) {
gfs2_consist_inode(GFS2_I(dir));
ret = -EIO;
goto out;
}
}
ret = 0;
out:
brelse(bh);
}
return ret;
}
static int dir_new_leaf(struct inode *inode, const struct qstr *name)
{
struct buffer_head *bh, *obh;
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_leaf *leaf, *oleaf;
int error;
u32 index;
u64 bn;
index = name->hash >> (32 - ip->i_depth);
error = get_first_leaf(ip, index, &obh);
if (error)
return error;
do {
oleaf = (struct gfs2_leaf *)obh->b_data;
bn = be64_to_cpu(oleaf->lf_next);
if (!bn)
break;
brelse(obh);
error = get_leaf(ip, bn, &obh);
if (error)
return error;
} while(1);
gfs2_trans_add_bh(ip->i_gl, obh, 1);
leaf = new_leaf(inode, &bh, be16_to_cpu(oleaf->lf_depth));
if (!leaf) {
brelse(obh);
return -ENOSPC;
}
oleaf->lf_next = cpu_to_be64(bh->b_blocknr);
brelse(bh);
brelse(obh);
error = gfs2_meta_inode_buffer(ip, &bh);
if (error)
return error;
gfs2_trans_add_bh(ip->i_gl, bh, 1);
gfs2_add_inode_blocks(&ip->i_inode, 1);
gfs2_dinode_out(ip, bh->b_data);
brelse(bh);
return 0;
}
/**
* gfs2_dir_add - Add new filename into directory
* @dip: The GFS2 inode
* @filename: The new name
* @inode: The inode number of the entry
* @type: The type of the entry
*
* Returns: 0 on success, error code on failure
*/
int gfs2_dir_add(struct inode *inode, const struct qstr *name,
const struct gfs2_inode *nip)
{
struct gfs2_inode *ip = GFS2_I(inode);
struct buffer_head *bh;
struct gfs2_dirent *dent;
struct gfs2_leaf *leaf;
int error;
while(1) {
dent = gfs2_dirent_search(inode, name, gfs2_dirent_find_space,
&bh);
if (dent) {
if (IS_ERR(dent))
return PTR_ERR(dent);
dent = gfs2_init_dirent(inode, dent, name, bh);
gfs2_inum_out(nip, dent);
dent->de_type = cpu_to_be16(IF2DT(nip->i_inode.i_mode));
if (ip->i_diskflags & GFS2_DIF_EXHASH) {
leaf = (struct gfs2_leaf *)bh->b_data;
be16_add_cpu(&leaf->lf_entries, 1);
}
brelse(bh);
error = gfs2_meta_inode_buffer(ip, &bh);
if (error)
break;
gfs2_trans_add_bh(ip->i_gl, bh, 1);
ip->i_entries++;
ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME;
if (S_ISDIR(nip->i_inode.i_mode))
inc_nlink(&ip->i_inode);
gfs2_dinode_out(ip, bh->b_data);
brelse(bh);
error = 0;
break;
}
if (!(ip->i_diskflags & GFS2_DIF_EXHASH)) {
error = dir_make_exhash(inode);
if (error)
break;
continue;
}
error = dir_split_leaf(inode, name);
if (error == 0)
continue;
if (error < 0)
break;
if (ip->i_depth < GFS2_DIR_MAX_DEPTH) {
error = dir_double_exhash(ip);
if (error)
break;
error = dir_split_leaf(inode, name);
if (error < 0)
break;
if (error == 0)
continue;
}
error = dir_new_leaf(inode, name);
if (!error)
continue;
error = -ENOSPC;
break;
}
return error;
}
/**
* gfs2_dir_del - Delete a directory entry
* @dip: The GFS2 inode
* @filename: The filename
*
* Returns: 0 on success, error code on failure
*/
int gfs2_dir_del(struct gfs2_inode *dip, const struct dentry *dentry)
{
const struct qstr *name = &dentry->d_name;
struct gfs2_dirent *dent, *prev = NULL;
struct buffer_head *bh;
/* Returns _either_ the entry (if its first in block) or the
previous entry otherwise */
dent = gfs2_dirent_search(&dip->i_inode, name, gfs2_dirent_prev, &bh);
if (!dent) {
gfs2_consist_inode(dip);
return -EIO;
}
if (IS_ERR(dent)) {
gfs2_consist_inode(dip);
return PTR_ERR(dent);
}
/* If not first in block, adjust pointers accordingly */
if (gfs2_dirent_find(dent, name, NULL) == 0) {
prev = dent;
dent = (struct gfs2_dirent *)((char *)dent + be16_to_cpu(prev->de_rec_len));
}
dirent_del(dip, bh, prev, dent);
if (dip->i_diskflags & GFS2_DIF_EXHASH) {
struct gfs2_leaf *leaf = (struct gfs2_leaf *)bh->b_data;
u16 entries = be16_to_cpu(leaf->lf_entries);
if (!entries)
gfs2_consist_inode(dip);
leaf->lf_entries = cpu_to_be16(--entries);
}
brelse(bh);
if (!dip->i_entries)
gfs2_consist_inode(dip);
dip->i_entries--;
dip->i_inode.i_mtime = dip->i_inode.i_ctime = CURRENT_TIME;
if (S_ISDIR(dentry->d_inode->i_mode))
drop_nlink(&dip->i_inode);
mark_inode_dirty(&dip->i_inode);
return 0;
}
/**
* gfs2_dir_mvino - Change inode number of directory entry
* @dip: The GFS2 inode
* @filename:
* @new_inode:
*
* This routine changes the inode number of a directory entry. It's used
* by rename to change ".." when a directory is moved.
* Assumes a glock is held on dvp.
*
* Returns: errno
*/
int gfs2_dir_mvino(struct gfs2_inode *dip, const struct qstr *filename,
const struct gfs2_inode *nip, unsigned int new_type)
{
struct buffer_head *bh;
struct gfs2_dirent *dent;
int error;
dent = gfs2_dirent_search(&dip->i_inode, filename, gfs2_dirent_find, &bh);
if (!dent) {
gfs2_consist_inode(dip);
return -EIO;
}
if (IS_ERR(dent))
return PTR_ERR(dent);
gfs2_trans_add_bh(dip->i_gl, bh, 1);
gfs2_inum_out(nip, dent);
dent->de_type = cpu_to_be16(new_type);
if (dip->i_diskflags & GFS2_DIF_EXHASH) {
brelse(bh);
error = gfs2_meta_inode_buffer(dip, &bh);
if (error)
return error;
gfs2_trans_add_bh(dip->i_gl, bh, 1);
}
dip->i_inode.i_mtime = dip->i_inode.i_ctime = CURRENT_TIME;
gfs2_dinode_out(dip, bh->b_data);
brelse(bh);
return 0;
}
/**
* leaf_dealloc - Deallocate a directory leaf
* @dip: the directory
* @index: the hash table offset in the directory
* @len: the number of pointers to this leaf
* @leaf_no: the leaf number
* @leaf_bh: buffer_head for the starting leaf
* last_dealloc: 1 if this is the final dealloc for the leaf, else 0
*
* Returns: errno
*/
static int leaf_dealloc(struct gfs2_inode *dip, u32 index, u32 len,
u64 leaf_no, struct buffer_head *leaf_bh,
int last_dealloc)
{
struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
struct gfs2_leaf *tmp_leaf;
struct gfs2_rgrp_list rlist;
struct buffer_head *bh, *dibh;
u64 blk, nblk;
unsigned int rg_blocks = 0, l_blocks = 0;
char *ht;
unsigned int x, size = len * sizeof(u64);
int error;
memset(&rlist, 0, sizeof(struct gfs2_rgrp_list));
ht = kzalloc(size, GFP_NOFS);
if (!ht)
return -ENOMEM;
if (!gfs2_alloc_get(dip)) {
error = -ENOMEM;
goto out;
}
error = gfs2_quota_hold(dip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
if (error)
goto out_put;
/* Count the number of leaves */
bh = leaf_bh;
for (blk = leaf_no; blk; blk = nblk) {
if (blk != leaf_no) {
error = get_leaf(dip, blk, &bh);
if (error)
goto out_rlist;
}
tmp_leaf = (struct gfs2_leaf *)bh->b_data;
nblk = be64_to_cpu(tmp_leaf->lf_next);
if (blk != leaf_no)
brelse(bh);
gfs2_rlist_add(dip, &rlist, blk);
l_blocks++;
}
gfs2_rlist_alloc(&rlist, LM_ST_EXCLUSIVE);
for (x = 0; x < rlist.rl_rgrps; x++) {
struct gfs2_rgrpd *rgd;
rgd = rlist.rl_ghs[x].gh_gl->gl_object;
rg_blocks += rgd->rd_length;
}
error = gfs2_glock_nq_m(rlist.rl_rgrps, rlist.rl_ghs);
if (error)
goto out_rlist;
error = gfs2_trans_begin(sdp,
rg_blocks + (DIV_ROUND_UP(size, sdp->sd_jbsize) + 1) +
RES_DINODE + RES_STATFS + RES_QUOTA, l_blocks);
if (error)
goto out_rg_gunlock;
bh = leaf_bh;
for (blk = leaf_no; blk; blk = nblk) {
if (blk != leaf_no) {
error = get_leaf(dip, blk, &bh);
if (error)
goto out_end_trans;
}
tmp_leaf = (struct gfs2_leaf *)bh->b_data;
nblk = be64_to_cpu(tmp_leaf->lf_next);
if (blk != leaf_no)
brelse(bh);
gfs2_free_meta(dip, blk, 1);
gfs2_add_inode_blocks(&dip->i_inode, -1);
}
error = gfs2_dir_write_data(dip, ht, index * sizeof(u64), size);
if (error != size) {
if (error >= 0)
error = -EIO;
goto out_end_trans;
}
error = gfs2_meta_inode_buffer(dip, &dibh);
if (error)
goto out_end_trans;
gfs2_trans_add_bh(dip->i_gl, dibh, 1);
/* On the last dealloc, make this a regular file in case we crash.
(We don't want to free these blocks a second time.) */
if (last_dealloc)
dip->i_inode.i_mode = S_IFREG;
gfs2_dinode_out(dip, dibh->b_data);
brelse(dibh);
out_end_trans:
gfs2_trans_end(sdp);
out_rg_gunlock:
gfs2_glock_dq_m(rlist.rl_rgrps, rlist.rl_ghs);
out_rlist:
gfs2_rlist_free(&rlist);
gfs2_quota_unhold(dip);
out_put:
gfs2_alloc_put(dip);
out:
kfree(ht);
return error;
}
/**
* gfs2_dir_exhash_dealloc - free all the leaf blocks in a directory
* @dip: the directory
*
* Dealloc all on-disk directory leaves to FREEMETA state
* Change on-disk inode type to "regular file"
*
* Returns: errno
*/
int gfs2_dir_exhash_dealloc(struct gfs2_inode *dip)
{
struct buffer_head *bh;
struct gfs2_leaf *leaf;
u32 hsize, len;
u32 index = 0, next_index;
__be64 *lp;
u64 leaf_no;
int error = 0, last;
hsize = 1 << dip->i_depth;
lp = gfs2_dir_get_hash_table(dip);
if (IS_ERR(lp))
return PTR_ERR(lp);
while (index < hsize) {
leaf_no = be64_to_cpu(lp[index]);
if (leaf_no) {
error = get_leaf(dip, leaf_no, &bh);
if (error)
goto out;
leaf = (struct gfs2_leaf *)bh->b_data;
len = 1 << (dip->i_depth - be16_to_cpu(leaf->lf_depth));
next_index = (index & ~(len - 1)) + len;
last = ((next_index >= hsize) ? 1 : 0);
error = leaf_dealloc(dip, index, len, leaf_no, bh,
last);
brelse(bh);
if (error)
goto out;
index = next_index;
} else
index++;
}
if (index != hsize) {
gfs2_consist_inode(dip);
error = -EIO;
}
out:
return error;
}
/**
* gfs2_diradd_alloc_required - find if adding entry will require an allocation
* @ip: the file being written to
* @filname: the filename that's going to be added
*
* Returns: 1 if alloc required, 0 if not, -ve on error
*/
int gfs2_diradd_alloc_required(struct inode *inode, const struct qstr *name)
{
struct gfs2_dirent *dent;
struct buffer_head *bh;
dent = gfs2_dirent_search(inode, name, gfs2_dirent_find_space, &bh);
if (!dent) {
return 1;
}
if (IS_ERR(dent))
return PTR_ERR(dent);
brelse(bh);
return 0;
}
| gpl-2.0 |
Potin/linux-am33x-04.06.00.07 | kernel/rcutorture.c | 170 | 47300 | /*
* Read-Copy Update module-based torture test facility
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
* Copyright (C) IBM Corporation, 2005, 2006
*
* Authors: Paul E. McKenney <paulmck@us.ibm.com>
* Josh Triplett <josh@freedesktop.org>
*
* See also: Documentation/RCU/torture.txt
*/
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/kthread.h>
#include <linux/err.h>
#include <linux/spinlock.h>
#include <linux/smp.h>
#include <linux/rcupdate.h>
#include <linux/interrupt.h>
#include <linux/sched.h>
#include <linux/atomic.h>
#include <linux/bitops.h>
#include <linux/completion.h>
#include <linux/moduleparam.h>
#include <linux/percpu.h>
#include <linux/notifier.h>
#include <linux/reboot.h>
#include <linux/freezer.h>
#include <linux/cpu.h>
#include <linux/delay.h>
#include <linux/stat.h>
#include <linux/srcu.h>
#include <linux/slab.h>
#include <asm/byteorder.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and "
"Josh Triplett <josh@freedesktop.org>");
static int nreaders = -1; /* # reader threads, defaults to 2*ncpus */
static int nfakewriters = 4; /* # fake writer threads */
static int stat_interval; /* Interval between stats, in seconds. */
/* Defaults to "only at end of test". */
static int verbose; /* Print more debug info. */
static int test_no_idle_hz; /* Test RCU's support for tickless idle CPUs. */
static int shuffle_interval = 3; /* Interval between shuffles (in sec)*/
static int stutter = 5; /* Start/stop testing interval (in sec) */
static int irqreader = 1; /* RCU readers from irq (timers). */
static int fqs_duration = 0; /* Duration of bursts (us), 0 to disable. */
static int fqs_holdoff = 0; /* Hold time within burst (us). */
static int fqs_stutter = 3; /* Wait time between bursts (s). */
static int test_boost = 1; /* Test RCU prio boost: 0=no, 1=maybe, 2=yes. */
static int test_boost_interval = 7; /* Interval between boost tests, seconds. */
static int test_boost_duration = 4; /* Duration of each boost test, seconds. */
static char *torture_type = "rcu"; /* What RCU implementation to torture. */
module_param(nreaders, int, 0444);
MODULE_PARM_DESC(nreaders, "Number of RCU reader threads");
module_param(nfakewriters, int, 0444);
MODULE_PARM_DESC(nfakewriters, "Number of RCU fake writer threads");
module_param(stat_interval, int, 0644);
MODULE_PARM_DESC(stat_interval, "Number of seconds between stats printk()s");
module_param(verbose, bool, 0444);
MODULE_PARM_DESC(verbose, "Enable verbose debugging printk()s");
module_param(test_no_idle_hz, bool, 0444);
MODULE_PARM_DESC(test_no_idle_hz, "Test support for tickless idle CPUs");
module_param(shuffle_interval, int, 0444);
MODULE_PARM_DESC(shuffle_interval, "Number of seconds between shuffles");
module_param(stutter, int, 0444);
MODULE_PARM_DESC(stutter, "Number of seconds to run/halt test");
module_param(irqreader, int, 0444);
MODULE_PARM_DESC(irqreader, "Allow RCU readers from irq handlers");
module_param(fqs_duration, int, 0444);
MODULE_PARM_DESC(fqs_duration, "Duration of fqs bursts (us)");
module_param(fqs_holdoff, int, 0444);
MODULE_PARM_DESC(fqs_holdoff, "Holdoff time within fqs bursts (us)");
module_param(fqs_stutter, int, 0444);
MODULE_PARM_DESC(fqs_stutter, "Wait time between fqs bursts (s)");
module_param(test_boost, int, 0444);
MODULE_PARM_DESC(test_boost, "Test RCU prio boost: 0=no, 1=maybe, 2=yes.");
module_param(test_boost_interval, int, 0444);
MODULE_PARM_DESC(test_boost_interval, "Interval between boost tests, seconds.");
module_param(test_boost_duration, int, 0444);
MODULE_PARM_DESC(test_boost_duration, "Duration of each boost test, seconds.");
module_param(torture_type, charp, 0444);
MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, rcu_bh, srcu)");
#define TORTURE_FLAG "-torture:"
#define PRINTK_STRING(s) \
do { printk(KERN_ALERT "%s" TORTURE_FLAG s "\n", torture_type); } while (0)
#define VERBOSE_PRINTK_STRING(s) \
do { if (verbose) printk(KERN_ALERT "%s" TORTURE_FLAG s "\n", torture_type); } while (0)
#define VERBOSE_PRINTK_ERRSTRING(s) \
do { if (verbose) printk(KERN_ALERT "%s" TORTURE_FLAG "!!! " s "\n", torture_type); } while (0)
static char printk_buf[4096];
static int nrealreaders;
static struct task_struct *writer_task;
static struct task_struct **fakewriter_tasks;
static struct task_struct **reader_tasks;
static struct task_struct *stats_task;
static struct task_struct *shuffler_task;
static struct task_struct *stutter_task;
static struct task_struct *fqs_task;
static struct task_struct *boost_tasks[NR_CPUS];
#define RCU_TORTURE_PIPE_LEN 10
struct rcu_torture {
struct rcu_head rtort_rcu;
int rtort_pipe_count;
struct list_head rtort_free;
int rtort_mbtest;
};
static LIST_HEAD(rcu_torture_freelist);
static struct rcu_torture __rcu *rcu_torture_current;
static unsigned long rcu_torture_current_version;
static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN];
static DEFINE_SPINLOCK(rcu_torture_lock);
static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
{ 0 };
static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
{ 0 };
static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
static atomic_t n_rcu_torture_alloc;
static atomic_t n_rcu_torture_alloc_fail;
static atomic_t n_rcu_torture_free;
static atomic_t n_rcu_torture_mberror;
static atomic_t n_rcu_torture_error;
static long n_rcu_torture_boost_ktrerror;
static long n_rcu_torture_boost_rterror;
static long n_rcu_torture_boost_failure;
static long n_rcu_torture_boosts;
static long n_rcu_torture_timers;
static struct list_head rcu_torture_removed;
static cpumask_var_t shuffle_tmp_mask;
static int stutter_pause_test;
#if defined(MODULE) || defined(CONFIG_RCU_TORTURE_TEST_RUNNABLE)
#define RCUTORTURE_RUNNABLE_INIT 1
#else
#define RCUTORTURE_RUNNABLE_INIT 0
#endif
int rcutorture_runnable = RCUTORTURE_RUNNABLE_INIT;
#if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU)
#define rcu_can_boost() 1
#else /* #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */
#define rcu_can_boost() 0
#endif /* #else #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */
static unsigned long boost_starttime; /* jiffies of next boost test start. */
DEFINE_MUTEX(boost_mutex); /* protect setting boost_starttime */
/* and boost task create/destroy. */
/* Mediate rmmod and system shutdown. Concurrent rmmod & shutdown illegal! */
#define FULLSTOP_DONTSTOP 0 /* Normal operation. */
#define FULLSTOP_SHUTDOWN 1 /* System shutdown with rcutorture running. */
#define FULLSTOP_RMMOD 2 /* Normal rmmod of rcutorture. */
static int fullstop = FULLSTOP_RMMOD;
/*
* Protect fullstop transitions and spawning of kthreads.
*/
static DEFINE_MUTEX(fullstop_mutex);
/*
* Detect and respond to a system shutdown.
*/
static int
rcutorture_shutdown_notify(struct notifier_block *unused1,
unsigned long unused2, void *unused3)
{
mutex_lock(&fullstop_mutex);
if (fullstop == FULLSTOP_DONTSTOP)
fullstop = FULLSTOP_SHUTDOWN;
else
printk(KERN_WARNING /* but going down anyway, so... */
"Concurrent 'rmmod rcutorture' and shutdown illegal!\n");
mutex_unlock(&fullstop_mutex);
return NOTIFY_DONE;
}
/*
* Absorb kthreads into a kernel function that won't return, so that
* they won't ever access module text or data again.
*/
static void rcutorture_shutdown_absorb(char *title)
{
if (ACCESS_ONCE(fullstop) == FULLSTOP_SHUTDOWN) {
printk(KERN_NOTICE
"rcutorture thread %s parking due to system shutdown\n",
title);
schedule_timeout_uninterruptible(MAX_SCHEDULE_TIMEOUT);
}
}
/*
* Allocate an element from the rcu_tortures pool.
*/
static struct rcu_torture *
rcu_torture_alloc(void)
{
struct list_head *p;
spin_lock_bh(&rcu_torture_lock);
if (list_empty(&rcu_torture_freelist)) {
atomic_inc(&n_rcu_torture_alloc_fail);
spin_unlock_bh(&rcu_torture_lock);
return NULL;
}
atomic_inc(&n_rcu_torture_alloc);
p = rcu_torture_freelist.next;
list_del_init(p);
spin_unlock_bh(&rcu_torture_lock);
return container_of(p, struct rcu_torture, rtort_free);
}
/*
* Free an element to the rcu_tortures pool.
*/
static void
rcu_torture_free(struct rcu_torture *p)
{
atomic_inc(&n_rcu_torture_free);
spin_lock_bh(&rcu_torture_lock);
list_add_tail(&p->rtort_free, &rcu_torture_freelist);
spin_unlock_bh(&rcu_torture_lock);
}
struct rcu_random_state {
unsigned long rrs_state;
long rrs_count;
};
#define RCU_RANDOM_MULT 39916801 /* prime */
#define RCU_RANDOM_ADD 479001701 /* prime */
#define RCU_RANDOM_REFRESH 10000
#define DEFINE_RCU_RANDOM(name) struct rcu_random_state name = { 0, 0 }
/*
* Crude but fast random-number generator. Uses a linear congruential
* generator, with occasional help from cpu_clock().
*/
static unsigned long
rcu_random(struct rcu_random_state *rrsp)
{
if (--rrsp->rrs_count < 0) {
rrsp->rrs_state += (unsigned long)local_clock();
rrsp->rrs_count = RCU_RANDOM_REFRESH;
}
rrsp->rrs_state = rrsp->rrs_state * RCU_RANDOM_MULT + RCU_RANDOM_ADD;
return swahw32(rrsp->rrs_state);
}
static void
rcu_stutter_wait(char *title)
{
while (stutter_pause_test || !rcutorture_runnable) {
if (rcutorture_runnable)
schedule_timeout_interruptible(1);
else
schedule_timeout_interruptible(round_jiffies_relative(HZ));
rcutorture_shutdown_absorb(title);
}
}
/*
* Operations vector for selecting different types of tests.
*/
struct rcu_torture_ops {
void (*init)(void);
void (*cleanup)(void);
int (*readlock)(void);
void (*read_delay)(struct rcu_random_state *rrsp);
void (*readunlock)(int idx);
int (*completed)(void);
void (*deferred_free)(struct rcu_torture *p);
void (*sync)(void);
void (*cb_barrier)(void);
void (*fqs)(void);
int (*stats)(char *page);
int irq_capable;
int can_boost;
char *name;
};
static struct rcu_torture_ops *cur_ops;
/*
* Definitions for rcu torture testing.
*/
static int rcu_torture_read_lock(void) __acquires(RCU)
{
rcu_read_lock();
return 0;
}
static void rcu_read_delay(struct rcu_random_state *rrsp)
{
const unsigned long shortdelay_us = 200;
const unsigned long longdelay_ms = 50;
/* We want a short delay sometimes to make a reader delay the grace
* period, and we want a long delay occasionally to trigger
* force_quiescent_state. */
if (!(rcu_random(rrsp) % (nrealreaders * 2000 * longdelay_ms)))
mdelay(longdelay_ms);
if (!(rcu_random(rrsp) % (nrealreaders * 2 * shortdelay_us)))
udelay(shortdelay_us);
#ifdef CONFIG_PREEMPT
if (!preempt_count() && !(rcu_random(rrsp) % (nrealreaders * 20000)))
preempt_schedule(); /* No QS if preempt_disable() in effect */
#endif
}
static void rcu_torture_read_unlock(int idx) __releases(RCU)
{
rcu_read_unlock();
}
static int rcu_torture_completed(void)
{
return rcu_batches_completed();
}
static void
rcu_torture_cb(struct rcu_head *p)
{
int i;
struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu);
if (fullstop != FULLSTOP_DONTSTOP) {
/* Test is ending, just drop callbacks on the floor. */
/* The next initialization will pick up the pieces. */
return;
}
i = rp->rtort_pipe_count;
if (i > RCU_TORTURE_PIPE_LEN)
i = RCU_TORTURE_PIPE_LEN;
atomic_inc(&rcu_torture_wcount[i]);
if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
rp->rtort_mbtest = 0;
rcu_torture_free(rp);
} else
cur_ops->deferred_free(rp);
}
static int rcu_no_completed(void)
{
return 0;
}
static void rcu_torture_deferred_free(struct rcu_torture *p)
{
call_rcu(&p->rtort_rcu, rcu_torture_cb);
}
static struct rcu_torture_ops rcu_ops = {
.init = NULL,
.cleanup = NULL,
.readlock = rcu_torture_read_lock,
.read_delay = rcu_read_delay,
.readunlock = rcu_torture_read_unlock,
.completed = rcu_torture_completed,
.deferred_free = rcu_torture_deferred_free,
.sync = synchronize_rcu,
.cb_barrier = rcu_barrier,
.fqs = rcu_force_quiescent_state,
.stats = NULL,
.irq_capable = 1,
.can_boost = rcu_can_boost(),
.name = "rcu"
};
static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
{
int i;
struct rcu_torture *rp;
struct rcu_torture *rp1;
cur_ops->sync();
list_add(&p->rtort_free, &rcu_torture_removed);
list_for_each_entry_safe(rp, rp1, &rcu_torture_removed, rtort_free) {
i = rp->rtort_pipe_count;
if (i > RCU_TORTURE_PIPE_LEN)
i = RCU_TORTURE_PIPE_LEN;
atomic_inc(&rcu_torture_wcount[i]);
if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
rp->rtort_mbtest = 0;
list_del(&rp->rtort_free);
rcu_torture_free(rp);
}
}
}
static void rcu_sync_torture_init(void)
{
INIT_LIST_HEAD(&rcu_torture_removed);
}
static struct rcu_torture_ops rcu_sync_ops = {
.init = rcu_sync_torture_init,
.cleanup = NULL,
.readlock = rcu_torture_read_lock,
.read_delay = rcu_read_delay,
.readunlock = rcu_torture_read_unlock,
.completed = rcu_torture_completed,
.deferred_free = rcu_sync_torture_deferred_free,
.sync = synchronize_rcu,
.cb_barrier = NULL,
.fqs = rcu_force_quiescent_state,
.stats = NULL,
.irq_capable = 1,
.can_boost = rcu_can_boost(),
.name = "rcu_sync"
};
static struct rcu_torture_ops rcu_expedited_ops = {
.init = rcu_sync_torture_init,
.cleanup = NULL,
.readlock = rcu_torture_read_lock,
.read_delay = rcu_read_delay, /* just reuse rcu's version. */
.readunlock = rcu_torture_read_unlock,
.completed = rcu_no_completed,
.deferred_free = rcu_sync_torture_deferred_free,
.sync = synchronize_rcu_expedited,
.cb_barrier = NULL,
.fqs = rcu_force_quiescent_state,
.stats = NULL,
.irq_capable = 1,
.can_boost = rcu_can_boost(),
.name = "rcu_expedited"
};
/*
* Definitions for rcu_bh torture testing.
*/
static int rcu_bh_torture_read_lock(void) __acquires(RCU_BH)
{
rcu_read_lock_bh();
return 0;
}
static void rcu_bh_torture_read_unlock(int idx) __releases(RCU_BH)
{
rcu_read_unlock_bh();
}
static int rcu_bh_torture_completed(void)
{
return rcu_batches_completed_bh();
}
static void rcu_bh_torture_deferred_free(struct rcu_torture *p)
{
call_rcu_bh(&p->rtort_rcu, rcu_torture_cb);
}
static struct rcu_torture_ops rcu_bh_ops = {
.init = NULL,
.cleanup = NULL,
.readlock = rcu_bh_torture_read_lock,
.read_delay = rcu_read_delay, /* just reuse rcu's version. */
.readunlock = rcu_bh_torture_read_unlock,
.completed = rcu_bh_torture_completed,
.deferred_free = rcu_bh_torture_deferred_free,
.sync = synchronize_rcu_bh,
.cb_barrier = rcu_barrier_bh,
.fqs = rcu_bh_force_quiescent_state,
.stats = NULL,
.irq_capable = 1,
.name = "rcu_bh"
};
static struct rcu_torture_ops rcu_bh_sync_ops = {
.init = rcu_sync_torture_init,
.cleanup = NULL,
.readlock = rcu_bh_torture_read_lock,
.read_delay = rcu_read_delay, /* just reuse rcu's version. */
.readunlock = rcu_bh_torture_read_unlock,
.completed = rcu_bh_torture_completed,
.deferred_free = rcu_sync_torture_deferred_free,
.sync = synchronize_rcu_bh,
.cb_barrier = NULL,
.fqs = rcu_bh_force_quiescent_state,
.stats = NULL,
.irq_capable = 1,
.name = "rcu_bh_sync"
};
static struct rcu_torture_ops rcu_bh_expedited_ops = {
.init = rcu_sync_torture_init,
.cleanup = NULL,
.readlock = rcu_bh_torture_read_lock,
.read_delay = rcu_read_delay, /* just reuse rcu's version. */
.readunlock = rcu_bh_torture_read_unlock,
.completed = rcu_bh_torture_completed,
.deferred_free = rcu_sync_torture_deferred_free,
.sync = synchronize_rcu_bh_expedited,
.cb_barrier = NULL,
.fqs = rcu_bh_force_quiescent_state,
.stats = NULL,
.irq_capable = 1,
.name = "rcu_bh_expedited"
};
/*
* Definitions for srcu torture testing.
*/
static struct srcu_struct srcu_ctl;
static void srcu_torture_init(void)
{
init_srcu_struct(&srcu_ctl);
rcu_sync_torture_init();
}
static void srcu_torture_cleanup(void)
{
synchronize_srcu(&srcu_ctl);
cleanup_srcu_struct(&srcu_ctl);
}
static int srcu_torture_read_lock(void) __acquires(&srcu_ctl)
{
return srcu_read_lock(&srcu_ctl);
}
static void srcu_read_delay(struct rcu_random_state *rrsp)
{
long delay;
const long uspertick = 1000000 / HZ;
const long longdelay = 10;
/* We want there to be long-running readers, but not all the time. */
delay = rcu_random(rrsp) % (nrealreaders * 2 * longdelay * uspertick);
if (!delay)
schedule_timeout_interruptible(longdelay);
else
rcu_read_delay(rrsp);
}
static void srcu_torture_read_unlock(int idx) __releases(&srcu_ctl)
{
srcu_read_unlock(&srcu_ctl, idx);
}
static int srcu_torture_completed(void)
{
return srcu_batches_completed(&srcu_ctl);
}
static void srcu_torture_synchronize(void)
{
synchronize_srcu(&srcu_ctl);
}
static int srcu_torture_stats(char *page)
{
int cnt = 0;
int cpu;
int idx = srcu_ctl.completed & 0x1;
cnt += sprintf(&page[cnt], "%s%s per-CPU(idx=%d):",
torture_type, TORTURE_FLAG, idx);
for_each_possible_cpu(cpu) {
cnt += sprintf(&page[cnt], " %d(%d,%d)", cpu,
per_cpu_ptr(srcu_ctl.per_cpu_ref, cpu)->c[!idx],
per_cpu_ptr(srcu_ctl.per_cpu_ref, cpu)->c[idx]);
}
cnt += sprintf(&page[cnt], "\n");
return cnt;
}
static struct rcu_torture_ops srcu_ops = {
.init = srcu_torture_init,
.cleanup = srcu_torture_cleanup,
.readlock = srcu_torture_read_lock,
.read_delay = srcu_read_delay,
.readunlock = srcu_torture_read_unlock,
.completed = srcu_torture_completed,
.deferred_free = rcu_sync_torture_deferred_free,
.sync = srcu_torture_synchronize,
.cb_barrier = NULL,
.stats = srcu_torture_stats,
.name = "srcu"
};
static void srcu_torture_synchronize_expedited(void)
{
synchronize_srcu_expedited(&srcu_ctl);
}
static struct rcu_torture_ops srcu_expedited_ops = {
.init = srcu_torture_init,
.cleanup = srcu_torture_cleanup,
.readlock = srcu_torture_read_lock,
.read_delay = srcu_read_delay,
.readunlock = srcu_torture_read_unlock,
.completed = srcu_torture_completed,
.deferred_free = rcu_sync_torture_deferred_free,
.sync = srcu_torture_synchronize_expedited,
.cb_barrier = NULL,
.stats = srcu_torture_stats,
.name = "srcu_expedited"
};
/*
* Definitions for sched torture testing.
*/
static int sched_torture_read_lock(void)
{
preempt_disable();
return 0;
}
static void sched_torture_read_unlock(int idx)
{
preempt_enable();
}
static void rcu_sched_torture_deferred_free(struct rcu_torture *p)
{
call_rcu_sched(&p->rtort_rcu, rcu_torture_cb);
}
static struct rcu_torture_ops sched_ops = {
.init = rcu_sync_torture_init,
.cleanup = NULL,
.readlock = sched_torture_read_lock,
.read_delay = rcu_read_delay, /* just reuse rcu's version. */
.readunlock = sched_torture_read_unlock,
.completed = rcu_no_completed,
.deferred_free = rcu_sched_torture_deferred_free,
.sync = synchronize_sched,
.cb_barrier = rcu_barrier_sched,
.fqs = rcu_sched_force_quiescent_state,
.stats = NULL,
.irq_capable = 1,
.name = "sched"
};
static struct rcu_torture_ops sched_sync_ops = {
.init = rcu_sync_torture_init,
.cleanup = NULL,
.readlock = sched_torture_read_lock,
.read_delay = rcu_read_delay, /* just reuse rcu's version. */
.readunlock = sched_torture_read_unlock,
.completed = rcu_no_completed,
.deferred_free = rcu_sync_torture_deferred_free,
.sync = synchronize_sched,
.cb_barrier = NULL,
.fqs = rcu_sched_force_quiescent_state,
.stats = NULL,
.name = "sched_sync"
};
static struct rcu_torture_ops sched_expedited_ops = {
.init = rcu_sync_torture_init,
.cleanup = NULL,
.readlock = sched_torture_read_lock,
.read_delay = rcu_read_delay, /* just reuse rcu's version. */
.readunlock = sched_torture_read_unlock,
.completed = rcu_no_completed,
.deferred_free = rcu_sync_torture_deferred_free,
.sync = synchronize_sched_expedited,
.cb_barrier = NULL,
.fqs = rcu_sched_force_quiescent_state,
.stats = NULL,
.irq_capable = 1,
.name = "sched_expedited"
};
/*
* RCU torture priority-boost testing. Runs one real-time thread per
* CPU for moderate bursts, repeatedly registering RCU callbacks and
* spinning waiting for them to be invoked. If a given callback takes
* too long to be invoked, we assume that priority inversion has occurred.
*/
struct rcu_boost_inflight {
struct rcu_head rcu;
int inflight;
};
static void rcu_torture_boost_cb(struct rcu_head *head)
{
struct rcu_boost_inflight *rbip =
container_of(head, struct rcu_boost_inflight, rcu);
smp_mb(); /* Ensure RCU-core accesses precede clearing ->inflight */
rbip->inflight = 0;
}
static int rcu_torture_boost(void *arg)
{
unsigned long call_rcu_time;
unsigned long endtime;
unsigned long oldstarttime;
struct rcu_boost_inflight rbi = { .inflight = 0 };
struct sched_param sp;
VERBOSE_PRINTK_STRING("rcu_torture_boost started");
/* Set real-time priority. */
sp.sched_priority = 1;
if (sched_setscheduler(current, SCHED_FIFO, &sp) < 0) {
VERBOSE_PRINTK_STRING("rcu_torture_boost RT prio failed!");
n_rcu_torture_boost_rterror++;
}
init_rcu_head_on_stack(&rbi.rcu);
/* Each pass through the following loop does one boost-test cycle. */
do {
/* Wait for the next test interval. */
oldstarttime = boost_starttime;
while (ULONG_CMP_LT(jiffies, oldstarttime)) {
schedule_timeout_uninterruptible(1);
rcu_stutter_wait("rcu_torture_boost");
if (kthread_should_stop() ||
fullstop != FULLSTOP_DONTSTOP)
goto checkwait;
}
/* Do one boost-test interval. */
endtime = oldstarttime + test_boost_duration * HZ;
call_rcu_time = jiffies;
while (ULONG_CMP_LT(jiffies, endtime)) {
/* If we don't have a callback in flight, post one. */
if (!rbi.inflight) {
smp_mb(); /* RCU core before ->inflight = 1. */
rbi.inflight = 1;
call_rcu(&rbi.rcu, rcu_torture_boost_cb);
if (jiffies - call_rcu_time >
test_boost_duration * HZ - HZ / 2) {
VERBOSE_PRINTK_STRING("rcu_torture_boost boosting failed");
n_rcu_torture_boost_failure++;
}
call_rcu_time = jiffies;
}
cond_resched();
rcu_stutter_wait("rcu_torture_boost");
if (kthread_should_stop() ||
fullstop != FULLSTOP_DONTSTOP)
goto checkwait;
}
/*
* Set the start time of the next test interval.
* Yes, this is vulnerable to long delays, but such
* delays simply cause a false negative for the next
* interval. Besides, we are running at RT priority,
* so delays should be relatively rare.
*/
while (oldstarttime == boost_starttime &&
!kthread_should_stop()) {
if (mutex_trylock(&boost_mutex)) {
boost_starttime = jiffies +
test_boost_interval * HZ;
n_rcu_torture_boosts++;
mutex_unlock(&boost_mutex);
break;
}
schedule_timeout_uninterruptible(1);
}
/* Go do the stutter. */
checkwait: rcu_stutter_wait("rcu_torture_boost");
} while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
/* Clean up and exit. */
VERBOSE_PRINTK_STRING("rcu_torture_boost task stopping");
rcutorture_shutdown_absorb("rcu_torture_boost");
while (!kthread_should_stop() || rbi.inflight)
schedule_timeout_uninterruptible(1);
smp_mb(); /* order accesses to ->inflight before stack-frame death. */
destroy_rcu_head_on_stack(&rbi.rcu);
return 0;
}
/*
* RCU torture force-quiescent-state kthread. Repeatedly induces
* bursts of calls to force_quiescent_state(), increasing the probability
* of occurrence of some important types of race conditions.
*/
static int
rcu_torture_fqs(void *arg)
{
unsigned long fqs_resume_time;
int fqs_burst_remaining;
VERBOSE_PRINTK_STRING("rcu_torture_fqs task started");
do {
fqs_resume_time = jiffies + fqs_stutter * HZ;
while (ULONG_CMP_LT(jiffies, fqs_resume_time) &&
!kthread_should_stop()) {
schedule_timeout_interruptible(1);
}
fqs_burst_remaining = fqs_duration;
while (fqs_burst_remaining > 0 &&
!kthread_should_stop()) {
cur_ops->fqs();
udelay(fqs_holdoff);
fqs_burst_remaining -= fqs_holdoff;
}
rcu_stutter_wait("rcu_torture_fqs");
} while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
VERBOSE_PRINTK_STRING("rcu_torture_fqs task stopping");
rcutorture_shutdown_absorb("rcu_torture_fqs");
while (!kthread_should_stop())
schedule_timeout_uninterruptible(1);
return 0;
}
/*
* RCU torture writer kthread. Repeatedly substitutes a new structure
* for that pointed to by rcu_torture_current, freeing the old structure
* after a series of grace periods (the "pipeline").
*/
static int
rcu_torture_writer(void *arg)
{
int i;
long oldbatch = rcu_batches_completed();
struct rcu_torture *rp;
struct rcu_torture *old_rp;
static DEFINE_RCU_RANDOM(rand);
VERBOSE_PRINTK_STRING("rcu_torture_writer task started");
set_user_nice(current, 19);
do {
schedule_timeout_uninterruptible(1);
rp = rcu_torture_alloc();
if (rp == NULL)
continue;
rp->rtort_pipe_count = 0;
udelay(rcu_random(&rand) & 0x3ff);
old_rp = rcu_dereference_check(rcu_torture_current,
current == writer_task);
rp->rtort_mbtest = 1;
rcu_assign_pointer(rcu_torture_current, rp);
smp_wmb(); /* Mods to old_rp must follow rcu_assign_pointer() */
if (old_rp) {
i = old_rp->rtort_pipe_count;
if (i > RCU_TORTURE_PIPE_LEN)
i = RCU_TORTURE_PIPE_LEN;
atomic_inc(&rcu_torture_wcount[i]);
old_rp->rtort_pipe_count++;
cur_ops->deferred_free(old_rp);
}
rcutorture_record_progress(++rcu_torture_current_version);
oldbatch = cur_ops->completed();
rcu_stutter_wait("rcu_torture_writer");
} while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
VERBOSE_PRINTK_STRING("rcu_torture_writer task stopping");
rcutorture_shutdown_absorb("rcu_torture_writer");
while (!kthread_should_stop())
schedule_timeout_uninterruptible(1);
return 0;
}
/*
* RCU torture fake writer kthread. Repeatedly calls sync, with a random
* delay between calls.
*/
static int
rcu_torture_fakewriter(void *arg)
{
DEFINE_RCU_RANDOM(rand);
VERBOSE_PRINTK_STRING("rcu_torture_fakewriter task started");
set_user_nice(current, 19);
do {
schedule_timeout_uninterruptible(1 + rcu_random(&rand)%10);
udelay(rcu_random(&rand) & 0x3ff);
cur_ops->sync();
rcu_stutter_wait("rcu_torture_fakewriter");
} while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
VERBOSE_PRINTK_STRING("rcu_torture_fakewriter task stopping");
rcutorture_shutdown_absorb("rcu_torture_fakewriter");
while (!kthread_should_stop())
schedule_timeout_uninterruptible(1);
return 0;
}
/*
* RCU torture reader from timer handler. Dereferences rcu_torture_current,
* incrementing the corresponding element of the pipeline array. The
* counter in the element should never be greater than 1, otherwise, the
* RCU implementation is broken.
*/
static void rcu_torture_timer(unsigned long unused)
{
int idx;
int completed;
static DEFINE_RCU_RANDOM(rand);
static DEFINE_SPINLOCK(rand_lock);
struct rcu_torture *p;
int pipe_count;
idx = cur_ops->readlock();
completed = cur_ops->completed();
p = rcu_dereference_check(rcu_torture_current,
rcu_read_lock_bh_held() ||
rcu_read_lock_sched_held() ||
srcu_read_lock_held(&srcu_ctl));
if (p == NULL) {
/* Leave because rcu_torture_writer is not yet underway */
cur_ops->readunlock(idx);
return;
}
if (p->rtort_mbtest == 0)
atomic_inc(&n_rcu_torture_mberror);
spin_lock(&rand_lock);
cur_ops->read_delay(&rand);
n_rcu_torture_timers++;
spin_unlock(&rand_lock);
preempt_disable();
pipe_count = p->rtort_pipe_count;
if (pipe_count > RCU_TORTURE_PIPE_LEN) {
/* Should not happen, but... */
pipe_count = RCU_TORTURE_PIPE_LEN;
}
__this_cpu_inc(rcu_torture_count[pipe_count]);
completed = cur_ops->completed() - completed;
if (completed > RCU_TORTURE_PIPE_LEN) {
/* Should not happen, but... */
completed = RCU_TORTURE_PIPE_LEN;
}
__this_cpu_inc(rcu_torture_batch[completed]);
preempt_enable();
cur_ops->readunlock(idx);
}
/*
* RCU torture reader kthread. Repeatedly dereferences rcu_torture_current,
* incrementing the corresponding element of the pipeline array. The
* counter in the element should never be greater than 1, otherwise, the
* RCU implementation is broken.
*/
static int
rcu_torture_reader(void *arg)
{
int completed;
int idx;
DEFINE_RCU_RANDOM(rand);
struct rcu_torture *p;
int pipe_count;
struct timer_list t;
VERBOSE_PRINTK_STRING("rcu_torture_reader task started");
set_user_nice(current, 19);
if (irqreader && cur_ops->irq_capable)
setup_timer_on_stack(&t, rcu_torture_timer, 0);
do {
if (irqreader && cur_ops->irq_capable) {
if (!timer_pending(&t))
mod_timer(&t, jiffies + 1);
}
idx = cur_ops->readlock();
completed = cur_ops->completed();
p = rcu_dereference_check(rcu_torture_current,
rcu_read_lock_bh_held() ||
rcu_read_lock_sched_held() ||
srcu_read_lock_held(&srcu_ctl));
if (p == NULL) {
/* Wait for rcu_torture_writer to get underway */
cur_ops->readunlock(idx);
schedule_timeout_interruptible(HZ);
continue;
}
if (p->rtort_mbtest == 0)
atomic_inc(&n_rcu_torture_mberror);
cur_ops->read_delay(&rand);
preempt_disable();
pipe_count = p->rtort_pipe_count;
if (pipe_count > RCU_TORTURE_PIPE_LEN) {
/* Should not happen, but... */
pipe_count = RCU_TORTURE_PIPE_LEN;
}
__this_cpu_inc(rcu_torture_count[pipe_count]);
completed = cur_ops->completed() - completed;
if (completed > RCU_TORTURE_PIPE_LEN) {
/* Should not happen, but... */
completed = RCU_TORTURE_PIPE_LEN;
}
__this_cpu_inc(rcu_torture_batch[completed]);
preempt_enable();
cur_ops->readunlock(idx);
schedule();
rcu_stutter_wait("rcu_torture_reader");
} while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
VERBOSE_PRINTK_STRING("rcu_torture_reader task stopping");
rcutorture_shutdown_absorb("rcu_torture_reader");
if (irqreader && cur_ops->irq_capable)
del_timer_sync(&t);
while (!kthread_should_stop())
schedule_timeout_uninterruptible(1);
return 0;
}
/*
* Create an RCU-torture statistics message in the specified buffer.
*/
static int
rcu_torture_printk(char *page)
{
int cnt = 0;
int cpu;
int i;
long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
for_each_possible_cpu(cpu) {
for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
pipesummary[i] += per_cpu(rcu_torture_count, cpu)[i];
batchsummary[i] += per_cpu(rcu_torture_batch, cpu)[i];
}
}
for (i = RCU_TORTURE_PIPE_LEN - 1; i >= 0; i--) {
if (pipesummary[i] != 0)
break;
}
cnt += sprintf(&page[cnt], "%s%s ", torture_type, TORTURE_FLAG);
cnt += sprintf(&page[cnt],
"rtc: %p ver: %lu tfle: %d rta: %d rtaf: %d rtf: %d "
"rtmbe: %d rtbke: %ld rtbre: %ld "
"rtbf: %ld rtb: %ld nt: %ld",
rcu_torture_current,
rcu_torture_current_version,
list_empty(&rcu_torture_freelist),
atomic_read(&n_rcu_torture_alloc),
atomic_read(&n_rcu_torture_alloc_fail),
atomic_read(&n_rcu_torture_free),
atomic_read(&n_rcu_torture_mberror),
n_rcu_torture_boost_ktrerror,
n_rcu_torture_boost_rterror,
n_rcu_torture_boost_failure,
n_rcu_torture_boosts,
n_rcu_torture_timers);
if (atomic_read(&n_rcu_torture_mberror) != 0 ||
n_rcu_torture_boost_ktrerror != 0 ||
n_rcu_torture_boost_rterror != 0 ||
n_rcu_torture_boost_failure != 0)
cnt += sprintf(&page[cnt], " !!!");
cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
if (i > 1) {
cnt += sprintf(&page[cnt], "!!! ");
atomic_inc(&n_rcu_torture_error);
WARN_ON_ONCE(1);
}
cnt += sprintf(&page[cnt], "Reader Pipe: ");
for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
cnt += sprintf(&page[cnt], " %ld", pipesummary[i]);
cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
cnt += sprintf(&page[cnt], "Reader Batch: ");
for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
cnt += sprintf(&page[cnt], " %ld", batchsummary[i]);
cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
cnt += sprintf(&page[cnt], " %d",
atomic_read(&rcu_torture_wcount[i]));
}
cnt += sprintf(&page[cnt], "\n");
if (cur_ops->stats)
cnt += cur_ops->stats(&page[cnt]);
return cnt;
}
/*
* Print torture statistics. Caller must ensure that there is only
* one call to this function at a given time!!! This is normally
* accomplished by relying on the module system to only have one copy
* of the module loaded, and then by giving the rcu_torture_stats
* kthread full control (or the init/cleanup functions when rcu_torture_stats
* thread is not running).
*/
static void
rcu_torture_stats_print(void)
{
int cnt;
cnt = rcu_torture_printk(printk_buf);
printk(KERN_ALERT "%s", printk_buf);
}
/*
* Periodically prints torture statistics, if periodic statistics printing
* was specified via the stat_interval module parameter.
*
* No need to worry about fullstop here, since this one doesn't reference
* volatile state or register callbacks.
*/
static int
rcu_torture_stats(void *arg)
{
VERBOSE_PRINTK_STRING("rcu_torture_stats task started");
do {
schedule_timeout_interruptible(stat_interval * HZ);
rcu_torture_stats_print();
rcutorture_shutdown_absorb("rcu_torture_stats");
} while (!kthread_should_stop());
VERBOSE_PRINTK_STRING("rcu_torture_stats task stopping");
return 0;
}
static int rcu_idle_cpu; /* Force all torture tasks off this CPU */
/* Shuffle tasks such that we allow @rcu_idle_cpu to become idle. A special case
* is when @rcu_idle_cpu = -1, when we allow the tasks to run on all CPUs.
*/
static void rcu_torture_shuffle_tasks(void)
{
int i;
cpumask_setall(shuffle_tmp_mask);
get_online_cpus();
/* No point in shuffling if there is only one online CPU (ex: UP) */
if (num_online_cpus() == 1) {
put_online_cpus();
return;
}
if (rcu_idle_cpu != -1)
cpumask_clear_cpu(rcu_idle_cpu, shuffle_tmp_mask);
set_cpus_allowed_ptr(current, shuffle_tmp_mask);
if (reader_tasks) {
for (i = 0; i < nrealreaders; i++)
if (reader_tasks[i])
set_cpus_allowed_ptr(reader_tasks[i],
shuffle_tmp_mask);
}
if (fakewriter_tasks) {
for (i = 0; i < nfakewriters; i++)
if (fakewriter_tasks[i])
set_cpus_allowed_ptr(fakewriter_tasks[i],
shuffle_tmp_mask);
}
if (writer_task)
set_cpus_allowed_ptr(writer_task, shuffle_tmp_mask);
if (stats_task)
set_cpus_allowed_ptr(stats_task, shuffle_tmp_mask);
if (rcu_idle_cpu == -1)
rcu_idle_cpu = num_online_cpus() - 1;
else
rcu_idle_cpu--;
put_online_cpus();
}
/* Shuffle tasks across CPUs, with the intent of allowing each CPU in the
* system to become idle at a time and cut off its timer ticks. This is meant
* to test the support for such tickless idle CPU in RCU.
*/
static int
rcu_torture_shuffle(void *arg)
{
VERBOSE_PRINTK_STRING("rcu_torture_shuffle task started");
do {
schedule_timeout_interruptible(shuffle_interval * HZ);
rcu_torture_shuffle_tasks();
rcutorture_shutdown_absorb("rcu_torture_shuffle");
} while (!kthread_should_stop());
VERBOSE_PRINTK_STRING("rcu_torture_shuffle task stopping");
return 0;
}
/* Cause the rcutorture test to "stutter", starting and stopping all
* threads periodically.
*/
static int
rcu_torture_stutter(void *arg)
{
VERBOSE_PRINTK_STRING("rcu_torture_stutter task started");
do {
schedule_timeout_interruptible(stutter * HZ);
stutter_pause_test = 1;
if (!kthread_should_stop())
schedule_timeout_interruptible(stutter * HZ);
stutter_pause_test = 0;
rcutorture_shutdown_absorb("rcu_torture_stutter");
} while (!kthread_should_stop());
VERBOSE_PRINTK_STRING("rcu_torture_stutter task stopping");
return 0;
}
static inline void
rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, char *tag)
{
printk(KERN_ALERT "%s" TORTURE_FLAG
"--- %s: nreaders=%d nfakewriters=%d "
"stat_interval=%d verbose=%d test_no_idle_hz=%d "
"shuffle_interval=%d stutter=%d irqreader=%d "
"fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d "
"test_boost=%d/%d test_boost_interval=%d "
"test_boost_duration=%d\n",
torture_type, tag, nrealreaders, nfakewriters,
stat_interval, verbose, test_no_idle_hz, shuffle_interval,
stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter,
test_boost, cur_ops->can_boost,
test_boost_interval, test_boost_duration);
}
static struct notifier_block rcutorture_shutdown_nb = {
.notifier_call = rcutorture_shutdown_notify,
};
static void rcutorture_booster_cleanup(int cpu)
{
struct task_struct *t;
if (boost_tasks[cpu] == NULL)
return;
mutex_lock(&boost_mutex);
VERBOSE_PRINTK_STRING("Stopping rcu_torture_boost task");
t = boost_tasks[cpu];
boost_tasks[cpu] = NULL;
mutex_unlock(&boost_mutex);
/* This must be outside of the mutex, otherwise deadlock! */
kthread_stop(t);
}
static int rcutorture_booster_init(int cpu)
{
int retval;
if (boost_tasks[cpu] != NULL)
return 0; /* Already created, nothing more to do. */
/* Don't allow time recalculation while creating a new task. */
mutex_lock(&boost_mutex);
VERBOSE_PRINTK_STRING("Creating rcu_torture_boost task");
boost_tasks[cpu] = kthread_create_on_node(rcu_torture_boost, NULL,
cpu_to_node(cpu),
"rcu_torture_boost");
if (IS_ERR(boost_tasks[cpu])) {
retval = PTR_ERR(boost_tasks[cpu]);
VERBOSE_PRINTK_STRING("rcu_torture_boost task create failed");
n_rcu_torture_boost_ktrerror++;
boost_tasks[cpu] = NULL;
mutex_unlock(&boost_mutex);
return retval;
}
kthread_bind(boost_tasks[cpu], cpu);
wake_up_process(boost_tasks[cpu]);
mutex_unlock(&boost_mutex);
return 0;
}
static int rcutorture_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
{
long cpu = (long)hcpu;
switch (action) {
case CPU_ONLINE:
case CPU_DOWN_FAILED:
(void)rcutorture_booster_init(cpu);
break;
case CPU_DOWN_PREPARE:
rcutorture_booster_cleanup(cpu);
break;
default:
break;
}
return NOTIFY_OK;
}
static struct notifier_block rcutorture_cpu_nb = {
.notifier_call = rcutorture_cpu_notify,
};
static void
rcu_torture_cleanup(void)
{
int i;
mutex_lock(&fullstop_mutex);
rcutorture_record_test_transition();
if (fullstop == FULLSTOP_SHUTDOWN) {
printk(KERN_WARNING /* but going down anyway, so... */
"Concurrent 'rmmod rcutorture' and shutdown illegal!\n");
mutex_unlock(&fullstop_mutex);
schedule_timeout_uninterruptible(10);
if (cur_ops->cb_barrier != NULL)
cur_ops->cb_barrier();
return;
}
fullstop = FULLSTOP_RMMOD;
mutex_unlock(&fullstop_mutex);
unregister_reboot_notifier(&rcutorture_shutdown_nb);
if (stutter_task) {
VERBOSE_PRINTK_STRING("Stopping rcu_torture_stutter task");
kthread_stop(stutter_task);
}
stutter_task = NULL;
if (shuffler_task) {
VERBOSE_PRINTK_STRING("Stopping rcu_torture_shuffle task");
kthread_stop(shuffler_task);
free_cpumask_var(shuffle_tmp_mask);
}
shuffler_task = NULL;
if (writer_task) {
VERBOSE_PRINTK_STRING("Stopping rcu_torture_writer task");
kthread_stop(writer_task);
}
writer_task = NULL;
if (reader_tasks) {
for (i = 0; i < nrealreaders; i++) {
if (reader_tasks[i]) {
VERBOSE_PRINTK_STRING(
"Stopping rcu_torture_reader task");
kthread_stop(reader_tasks[i]);
}
reader_tasks[i] = NULL;
}
kfree(reader_tasks);
reader_tasks = NULL;
}
rcu_torture_current = NULL;
if (fakewriter_tasks) {
for (i = 0; i < nfakewriters; i++) {
if (fakewriter_tasks[i]) {
VERBOSE_PRINTK_STRING(
"Stopping rcu_torture_fakewriter task");
kthread_stop(fakewriter_tasks[i]);
}
fakewriter_tasks[i] = NULL;
}
kfree(fakewriter_tasks);
fakewriter_tasks = NULL;
}
if (stats_task) {
VERBOSE_PRINTK_STRING("Stopping rcu_torture_stats task");
kthread_stop(stats_task);
}
stats_task = NULL;
if (fqs_task) {
VERBOSE_PRINTK_STRING("Stopping rcu_torture_fqs task");
kthread_stop(fqs_task);
}
fqs_task = NULL;
if ((test_boost == 1 && cur_ops->can_boost) ||
test_boost == 2) {
unregister_cpu_notifier(&rcutorture_cpu_nb);
for_each_possible_cpu(i)
rcutorture_booster_cleanup(i);
}
/* Wait for all RCU callbacks to fire. */
if (cur_ops->cb_barrier != NULL)
cur_ops->cb_barrier();
rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
if (cur_ops->cleanup)
cur_ops->cleanup();
if (atomic_read(&n_rcu_torture_error))
rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
else
rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
}
static int __init
rcu_torture_init(void)
{
int i;
int cpu;
int firsterr = 0;
static struct rcu_torture_ops *torture_ops[] =
{ &rcu_ops, &rcu_sync_ops, &rcu_expedited_ops,
&rcu_bh_ops, &rcu_bh_sync_ops, &rcu_bh_expedited_ops,
&srcu_ops, &srcu_expedited_ops,
&sched_ops, &sched_sync_ops, &sched_expedited_ops, };
mutex_lock(&fullstop_mutex);
/* Process args and tell the world that the torturer is on the job. */
for (i = 0; i < ARRAY_SIZE(torture_ops); i++) {
cur_ops = torture_ops[i];
if (strcmp(torture_type, cur_ops->name) == 0)
break;
}
if (i == ARRAY_SIZE(torture_ops)) {
printk(KERN_ALERT "rcu-torture: invalid torture type: \"%s\"\n",
torture_type);
printk(KERN_ALERT "rcu-torture types:");
for (i = 0; i < ARRAY_SIZE(torture_ops); i++)
printk(KERN_ALERT " %s", torture_ops[i]->name);
printk(KERN_ALERT "\n");
mutex_unlock(&fullstop_mutex);
return -EINVAL;
}
if (cur_ops->fqs == NULL && fqs_duration != 0) {
printk(KERN_ALERT "rcu-torture: ->fqs NULL and non-zero "
"fqs_duration, fqs disabled.\n");
fqs_duration = 0;
}
if (cur_ops->init)
cur_ops->init(); /* no "goto unwind" prior to this point!!! */
if (nreaders >= 0)
nrealreaders = nreaders;
else
nrealreaders = 2 * num_online_cpus();
rcu_torture_print_module_parms(cur_ops, "Start of test");
fullstop = FULLSTOP_DONTSTOP;
/* Set up the freelist. */
INIT_LIST_HEAD(&rcu_torture_freelist);
for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) {
rcu_tortures[i].rtort_mbtest = 0;
list_add_tail(&rcu_tortures[i].rtort_free,
&rcu_torture_freelist);
}
/* Initialize the statistics so that each run gets its own numbers. */
rcu_torture_current = NULL;
rcu_torture_current_version = 0;
atomic_set(&n_rcu_torture_alloc, 0);
atomic_set(&n_rcu_torture_alloc_fail, 0);
atomic_set(&n_rcu_torture_free, 0);
atomic_set(&n_rcu_torture_mberror, 0);
atomic_set(&n_rcu_torture_error, 0);
n_rcu_torture_boost_ktrerror = 0;
n_rcu_torture_boost_rterror = 0;
n_rcu_torture_boost_failure = 0;
n_rcu_torture_boosts = 0;
for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
atomic_set(&rcu_torture_wcount[i], 0);
for_each_possible_cpu(cpu) {
for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
per_cpu(rcu_torture_count, cpu)[i] = 0;
per_cpu(rcu_torture_batch, cpu)[i] = 0;
}
}
/* Start up the kthreads. */
VERBOSE_PRINTK_STRING("Creating rcu_torture_writer task");
writer_task = kthread_run(rcu_torture_writer, NULL,
"rcu_torture_writer");
if (IS_ERR(writer_task)) {
firsterr = PTR_ERR(writer_task);
VERBOSE_PRINTK_ERRSTRING("Failed to create writer");
writer_task = NULL;
goto unwind;
}
fakewriter_tasks = kzalloc(nfakewriters * sizeof(fakewriter_tasks[0]),
GFP_KERNEL);
if (fakewriter_tasks == NULL) {
VERBOSE_PRINTK_ERRSTRING("out of memory");
firsterr = -ENOMEM;
goto unwind;
}
for (i = 0; i < nfakewriters; i++) {
VERBOSE_PRINTK_STRING("Creating rcu_torture_fakewriter task");
fakewriter_tasks[i] = kthread_run(rcu_torture_fakewriter, NULL,
"rcu_torture_fakewriter");
if (IS_ERR(fakewriter_tasks[i])) {
firsterr = PTR_ERR(fakewriter_tasks[i]);
VERBOSE_PRINTK_ERRSTRING("Failed to create fakewriter");
fakewriter_tasks[i] = NULL;
goto unwind;
}
}
reader_tasks = kzalloc(nrealreaders * sizeof(reader_tasks[0]),
GFP_KERNEL);
if (reader_tasks == NULL) {
VERBOSE_PRINTK_ERRSTRING("out of memory");
firsterr = -ENOMEM;
goto unwind;
}
for (i = 0; i < nrealreaders; i++) {
VERBOSE_PRINTK_STRING("Creating rcu_torture_reader task");
reader_tasks[i] = kthread_run(rcu_torture_reader, NULL,
"rcu_torture_reader");
if (IS_ERR(reader_tasks[i])) {
firsterr = PTR_ERR(reader_tasks[i]);
VERBOSE_PRINTK_ERRSTRING("Failed to create reader");
reader_tasks[i] = NULL;
goto unwind;
}
}
if (stat_interval > 0) {
VERBOSE_PRINTK_STRING("Creating rcu_torture_stats task");
stats_task = kthread_run(rcu_torture_stats, NULL,
"rcu_torture_stats");
if (IS_ERR(stats_task)) {
firsterr = PTR_ERR(stats_task);
VERBOSE_PRINTK_ERRSTRING("Failed to create stats");
stats_task = NULL;
goto unwind;
}
}
if (test_no_idle_hz) {
rcu_idle_cpu = num_online_cpus() - 1;
if (!alloc_cpumask_var(&shuffle_tmp_mask, GFP_KERNEL)) {
firsterr = -ENOMEM;
VERBOSE_PRINTK_ERRSTRING("Failed to alloc mask");
goto unwind;
}
/* Create the shuffler thread */
shuffler_task = kthread_run(rcu_torture_shuffle, NULL,
"rcu_torture_shuffle");
if (IS_ERR(shuffler_task)) {
free_cpumask_var(shuffle_tmp_mask);
firsterr = PTR_ERR(shuffler_task);
VERBOSE_PRINTK_ERRSTRING("Failed to create shuffler");
shuffler_task = NULL;
goto unwind;
}
}
if (stutter < 0)
stutter = 0;
if (stutter) {
/* Create the stutter thread */
stutter_task = kthread_run(rcu_torture_stutter, NULL,
"rcu_torture_stutter");
if (IS_ERR(stutter_task)) {
firsterr = PTR_ERR(stutter_task);
VERBOSE_PRINTK_ERRSTRING("Failed to create stutter");
stutter_task = NULL;
goto unwind;
}
}
if (fqs_duration < 0)
fqs_duration = 0;
if (fqs_duration) {
/* Create the stutter thread */
fqs_task = kthread_run(rcu_torture_fqs, NULL,
"rcu_torture_fqs");
if (IS_ERR(fqs_task)) {
firsterr = PTR_ERR(fqs_task);
VERBOSE_PRINTK_ERRSTRING("Failed to create fqs");
fqs_task = NULL;
goto unwind;
}
}
if (test_boost_interval < 1)
test_boost_interval = 1;
if (test_boost_duration < 2)
test_boost_duration = 2;
if ((test_boost == 1 && cur_ops->can_boost) ||
test_boost == 2) {
int retval;
boost_starttime = jiffies + test_boost_interval * HZ;
register_cpu_notifier(&rcutorture_cpu_nb);
for_each_possible_cpu(i) {
if (cpu_is_offline(i))
continue; /* Heuristic: CPU can go offline. */
retval = rcutorture_booster_init(i);
if (retval < 0) {
firsterr = retval;
goto unwind;
}
}
}
register_reboot_notifier(&rcutorture_shutdown_nb);
rcutorture_record_test_transition();
mutex_unlock(&fullstop_mutex);
return 0;
unwind:
mutex_unlock(&fullstop_mutex);
rcu_torture_cleanup();
return firsterr;
}
module_init(rcu_torture_init);
module_exit(rcu_torture_cleanup);
| gpl-2.0 |
TenchiMasaki/android_kernel_asus_moorefield | drivers/external_drivers/camera/drivers/media/pci/atomisp2/css2401a0_legacy_v21/isp/kernels/xnr/xnr_1.0/ia_css_xnr.host.c | 170 | 1752 | /*
* Support for Intel Camera Imaging ISP subsystem.
*
* Copyright (c) 2010 - 2014 Intel Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*
*/
#include "ia_css_types.h"
#include "sh_css_defs.h"
#include "ia_css_debug.h"
#include "sh_css_frac.h"
#include "ia_css_xnr.host.h"
const struct ia_css_xnr_config default_xnr_config = {
/** default threshold 6400 translates to 25 on ISP. */
6400
};
void
ia_css_xnr_table_vamem_encode(
struct sh_css_isp_xnr_vamem_params *to,
const struct ia_css_xnr_table *from,
unsigned size)
{
(void)size;
memcpy (&to->xnr, &from->data, sizeof(to->xnr));
}
void
ia_css_xnr_encode(
struct sh_css_isp_xnr_params *to,
const struct ia_css_xnr_config *from,
unsigned size)
{
(void)size;
to->threshold =
uDIGIT_FITTING(from->threshold, 16, SH_CSS_ISP_YUV_BITS);
}
void
ia_css_xnr_table_debug_dtrace(
const struct ia_css_xnr_table *config,
unsigned level)
{
(void)config;
(void)level;
}
void
ia_css_xnr_debug_dtrace(
const struct ia_css_xnr_config *config,
unsigned level)
{
ia_css_debug_dtrace(level,
"config.threshold=%d\n", config->threshold);
}
| gpl-2.0 |
wanahmadzainie/linux-mainline | drivers/scsi/bfa/bfad_attr.c | 426 | 29968 | /*
* Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
* Copyright (c) 2014- QLogic Corporation.
* All rights reserved
* www.qlogic.com
*
* Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License (GPL) Version 2 as
* published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
/*
* bfa_attr.c Linux driver configuration interface module.
*/
#include "bfad_drv.h"
#include "bfad_im.h"
/*
* FC transport template entry, get SCSI target port ID.
*/
static void
bfad_im_get_starget_port_id(struct scsi_target *starget)
{
struct Scsi_Host *shost;
struct bfad_im_port_s *im_port;
struct bfad_s *bfad;
struct bfad_itnim_s *itnim = NULL;
u32 fc_id = -1;
unsigned long flags;
shost = dev_to_shost(starget->dev.parent);
im_port = (struct bfad_im_port_s *) shost->hostdata[0];
bfad = im_port->bfad;
spin_lock_irqsave(&bfad->bfad_lock, flags);
itnim = bfad_get_itnim(im_port, starget->id);
if (itnim)
fc_id = bfa_fcs_itnim_get_fcid(&itnim->fcs_itnim);
fc_starget_port_id(starget) = fc_id;
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
}
/*
* FC transport template entry, get SCSI target nwwn.
*/
static void
bfad_im_get_starget_node_name(struct scsi_target *starget)
{
struct Scsi_Host *shost;
struct bfad_im_port_s *im_port;
struct bfad_s *bfad;
struct bfad_itnim_s *itnim = NULL;
u64 node_name = 0;
unsigned long flags;
shost = dev_to_shost(starget->dev.parent);
im_port = (struct bfad_im_port_s *) shost->hostdata[0];
bfad = im_port->bfad;
spin_lock_irqsave(&bfad->bfad_lock, flags);
itnim = bfad_get_itnim(im_port, starget->id);
if (itnim)
node_name = bfa_fcs_itnim_get_nwwn(&itnim->fcs_itnim);
fc_starget_node_name(starget) = cpu_to_be64(node_name);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
}
/*
* FC transport template entry, get SCSI target pwwn.
*/
static void
bfad_im_get_starget_port_name(struct scsi_target *starget)
{
struct Scsi_Host *shost;
struct bfad_im_port_s *im_port;
struct bfad_s *bfad;
struct bfad_itnim_s *itnim = NULL;
u64 port_name = 0;
unsigned long flags;
shost = dev_to_shost(starget->dev.parent);
im_port = (struct bfad_im_port_s *) shost->hostdata[0];
bfad = im_port->bfad;
spin_lock_irqsave(&bfad->bfad_lock, flags);
itnim = bfad_get_itnim(im_port, starget->id);
if (itnim)
port_name = bfa_fcs_itnim_get_pwwn(&itnim->fcs_itnim);
fc_starget_port_name(starget) = cpu_to_be64(port_name);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
}
/*
* FC transport template entry, get SCSI host port ID.
*/
static void
bfad_im_get_host_port_id(struct Scsi_Host *shost)
{
struct bfad_im_port_s *im_port =
(struct bfad_im_port_s *) shost->hostdata[0];
struct bfad_port_s *port = im_port->port;
fc_host_port_id(shost) =
bfa_hton3b(bfa_fcs_lport_get_fcid(port->fcs_port));
}
/*
* FC transport template entry, get SCSI host port type.
*/
static void
bfad_im_get_host_port_type(struct Scsi_Host *shost)
{
struct bfad_im_port_s *im_port =
(struct bfad_im_port_s *) shost->hostdata[0];
struct bfad_s *bfad = im_port->bfad;
struct bfa_lport_attr_s port_attr;
bfa_fcs_lport_get_attr(&bfad->bfa_fcs.fabric.bport, &port_attr);
switch (port_attr.port_type) {
case BFA_PORT_TYPE_NPORT:
fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
break;
case BFA_PORT_TYPE_NLPORT:
fc_host_port_type(shost) = FC_PORTTYPE_NLPORT;
break;
case BFA_PORT_TYPE_P2P:
fc_host_port_type(shost) = FC_PORTTYPE_PTP;
break;
case BFA_PORT_TYPE_LPORT:
fc_host_port_type(shost) = FC_PORTTYPE_LPORT;
break;
default:
fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN;
break;
}
}
/*
* FC transport template entry, get SCSI host port state.
*/
static void
bfad_im_get_host_port_state(struct Scsi_Host *shost)
{
struct bfad_im_port_s *im_port =
(struct bfad_im_port_s *) shost->hostdata[0];
struct bfad_s *bfad = im_port->bfad;
struct bfa_port_attr_s attr;
bfa_fcport_get_attr(&bfad->bfa, &attr);
switch (attr.port_state) {
case BFA_PORT_ST_LINKDOWN:
fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
break;
case BFA_PORT_ST_LINKUP:
fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
break;
case BFA_PORT_ST_DISABLED:
case BFA_PORT_ST_STOPPED:
case BFA_PORT_ST_IOCDOWN:
case BFA_PORT_ST_IOCDIS:
fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
break;
case BFA_PORT_ST_UNINIT:
case BFA_PORT_ST_ENABLING_QWAIT:
case BFA_PORT_ST_ENABLING:
case BFA_PORT_ST_DISABLING_QWAIT:
case BFA_PORT_ST_DISABLING:
default:
fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
break;
}
}
/*
* FC transport template entry, get SCSI host active fc4s.
*/
static void
bfad_im_get_host_active_fc4s(struct Scsi_Host *shost)
{
struct bfad_im_port_s *im_port =
(struct bfad_im_port_s *) shost->hostdata[0];
struct bfad_port_s *port = im_port->port;
memset(fc_host_active_fc4s(shost), 0,
sizeof(fc_host_active_fc4s(shost)));
if (port->supported_fc4s & BFA_LPORT_ROLE_FCP_IM)
fc_host_active_fc4s(shost)[2] = 1;
fc_host_active_fc4s(shost)[7] = 1;
}
/*
* FC transport template entry, get SCSI host link speed.
*/
static void
bfad_im_get_host_speed(struct Scsi_Host *shost)
{
struct bfad_im_port_s *im_port =
(struct bfad_im_port_s *) shost->hostdata[0];
struct bfad_s *bfad = im_port->bfad;
struct bfa_port_attr_s attr;
bfa_fcport_get_attr(&bfad->bfa, &attr);
switch (attr.speed) {
case BFA_PORT_SPEED_10GBPS:
fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
break;
case BFA_PORT_SPEED_16GBPS:
fc_host_speed(shost) = FC_PORTSPEED_16GBIT;
break;
case BFA_PORT_SPEED_8GBPS:
fc_host_speed(shost) = FC_PORTSPEED_8GBIT;
break;
case BFA_PORT_SPEED_4GBPS:
fc_host_speed(shost) = FC_PORTSPEED_4GBIT;
break;
case BFA_PORT_SPEED_2GBPS:
fc_host_speed(shost) = FC_PORTSPEED_2GBIT;
break;
case BFA_PORT_SPEED_1GBPS:
fc_host_speed(shost) = FC_PORTSPEED_1GBIT;
break;
default:
fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
break;
}
}
/*
* FC transport template entry, get SCSI host port type.
*/
static void
bfad_im_get_host_fabric_name(struct Scsi_Host *shost)
{
struct bfad_im_port_s *im_port =
(struct bfad_im_port_s *) shost->hostdata[0];
struct bfad_port_s *port = im_port->port;
wwn_t fabric_nwwn = 0;
fabric_nwwn = bfa_fcs_lport_get_fabric_name(port->fcs_port);
fc_host_fabric_name(shost) = cpu_to_be64(fabric_nwwn);
}
/*
* FC transport template entry, get BFAD statistics.
*/
static struct fc_host_statistics *
bfad_im_get_stats(struct Scsi_Host *shost)
{
struct bfad_im_port_s *im_port =
(struct bfad_im_port_s *) shost->hostdata[0];
struct bfad_s *bfad = im_port->bfad;
struct bfad_hal_comp fcomp;
union bfa_port_stats_u *fcstats;
struct fc_host_statistics *hstats;
bfa_status_t rc;
unsigned long flags;
fcstats = kzalloc(sizeof(union bfa_port_stats_u), GFP_KERNEL);
if (fcstats == NULL)
return NULL;
hstats = &bfad->link_stats;
init_completion(&fcomp.comp);
spin_lock_irqsave(&bfad->bfad_lock, flags);
memset(hstats, 0, sizeof(struct fc_host_statistics));
rc = bfa_port_get_stats(BFA_FCPORT(&bfad->bfa),
fcstats, bfad_hcb_comp, &fcomp);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
if (rc != BFA_STATUS_OK)
return NULL;
wait_for_completion(&fcomp.comp);
/* Fill the fc_host_statistics structure */
hstats->seconds_since_last_reset = fcstats->fc.secs_reset;
hstats->tx_frames = fcstats->fc.tx_frames;
hstats->tx_words = fcstats->fc.tx_words;
hstats->rx_frames = fcstats->fc.rx_frames;
hstats->rx_words = fcstats->fc.rx_words;
hstats->lip_count = fcstats->fc.lip_count;
hstats->nos_count = fcstats->fc.nos_count;
hstats->error_frames = fcstats->fc.error_frames;
hstats->dumped_frames = fcstats->fc.dropped_frames;
hstats->link_failure_count = fcstats->fc.link_failures;
hstats->loss_of_sync_count = fcstats->fc.loss_of_syncs;
hstats->loss_of_signal_count = fcstats->fc.loss_of_signals;
hstats->prim_seq_protocol_err_count = fcstats->fc.primseq_errs;
hstats->invalid_crc_count = fcstats->fc.invalid_crcs;
kfree(fcstats);
return hstats;
}
/*
* FC transport template entry, reset BFAD statistics.
*/
static void
bfad_im_reset_stats(struct Scsi_Host *shost)
{
struct bfad_im_port_s *im_port =
(struct bfad_im_port_s *) shost->hostdata[0];
struct bfad_s *bfad = im_port->bfad;
struct bfad_hal_comp fcomp;
unsigned long flags;
bfa_status_t rc;
init_completion(&fcomp.comp);
spin_lock_irqsave(&bfad->bfad_lock, flags);
rc = bfa_port_clear_stats(BFA_FCPORT(&bfad->bfa), bfad_hcb_comp,
&fcomp);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
if (rc != BFA_STATUS_OK)
return;
wait_for_completion(&fcomp.comp);
return;
}
/*
* FC transport template entry, set rport loss timeout.
* Update dev_loss_tmo based on the value pushed down by the stack
* In case it is lesser than path_tov of driver, set it to path_tov + 1
* to ensure that the driver times out before the application
*/
static void
bfad_im_set_rport_loss_tmo(struct fc_rport *rport, u32 timeout)
{
struct bfad_itnim_data_s *itnim_data = rport->dd_data;
struct bfad_itnim_s *itnim = itnim_data->itnim;
struct bfad_s *bfad = itnim->im->bfad;
uint16_t path_tov = bfa_fcpim_path_tov_get(&bfad->bfa);
rport->dev_loss_tmo = timeout;
if (timeout < path_tov)
rport->dev_loss_tmo = path_tov + 1;
}
static int
bfad_im_vport_create(struct fc_vport *fc_vport, bool disable)
{
char *vname = fc_vport->symbolic_name;
struct Scsi_Host *shost = fc_vport->shost;
struct bfad_im_port_s *im_port =
(struct bfad_im_port_s *) shost->hostdata[0];
struct bfad_s *bfad = im_port->bfad;
struct bfa_lport_cfg_s port_cfg;
struct bfad_vport_s *vp;
int status = 0, rc;
unsigned long flags;
memset(&port_cfg, 0, sizeof(port_cfg));
u64_to_wwn(fc_vport->node_name, (u8 *)&port_cfg.nwwn);
u64_to_wwn(fc_vport->port_name, (u8 *)&port_cfg.pwwn);
if (strlen(vname) > 0)
strcpy((char *)&port_cfg.sym_name, vname);
port_cfg.roles = BFA_LPORT_ROLE_FCP_IM;
spin_lock_irqsave(&bfad->bfad_lock, flags);
list_for_each_entry(vp, &bfad->pbc_vport_list, list_entry) {
if (port_cfg.pwwn ==
vp->fcs_vport.lport.port_cfg.pwwn) {
port_cfg.preboot_vp =
vp->fcs_vport.lport.port_cfg.preboot_vp;
break;
}
}
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
rc = bfad_vport_create(bfad, 0, &port_cfg, &fc_vport->dev);
if (rc == BFA_STATUS_OK) {
struct bfad_vport_s *vport;
struct bfa_fcs_vport_s *fcs_vport;
struct Scsi_Host *vshost;
spin_lock_irqsave(&bfad->bfad_lock, flags);
fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs, 0,
port_cfg.pwwn);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
if (fcs_vport == NULL)
return VPCERR_BAD_WWN;
fc_vport_set_state(fc_vport, FC_VPORT_ACTIVE);
if (disable) {
spin_lock_irqsave(&bfad->bfad_lock, flags);
bfa_fcs_vport_stop(fcs_vport);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
fc_vport_set_state(fc_vport, FC_VPORT_DISABLED);
}
vport = fcs_vport->vport_drv;
vshost = vport->drv_port.im_port->shost;
fc_host_node_name(vshost) = wwn_to_u64((u8 *)&port_cfg.nwwn);
fc_host_port_name(vshost) = wwn_to_u64((u8 *)&port_cfg.pwwn);
fc_host_supported_classes(vshost) = FC_COS_CLASS3;
memset(fc_host_supported_fc4s(vshost), 0,
sizeof(fc_host_supported_fc4s(vshost)));
/* For FCP type 0x08 */
if (supported_fc4s & BFA_LPORT_ROLE_FCP_IM)
fc_host_supported_fc4s(vshost)[2] = 1;
/* For fibre channel services type 0x20 */
fc_host_supported_fc4s(vshost)[7] = 1;
fc_host_supported_speeds(vshost) =
bfad_im_supported_speeds(&bfad->bfa);
fc_host_maxframe_size(vshost) =
bfa_fcport_get_maxfrsize(&bfad->bfa);
fc_vport->dd_data = vport;
vport->drv_port.im_port->fc_vport = fc_vport;
} else if (rc == BFA_STATUS_INVALID_WWN)
return VPCERR_BAD_WWN;
else if (rc == BFA_STATUS_VPORT_EXISTS)
return VPCERR_BAD_WWN;
else if (rc == BFA_STATUS_VPORT_MAX)
return VPCERR_NO_FABRIC_SUPP;
else if (rc == BFA_STATUS_VPORT_WWN_BP)
return VPCERR_BAD_WWN;
else
return FC_VPORT_FAILED;
return status;
}
int
bfad_im_issue_fc_host_lip(struct Scsi_Host *shost)
{
struct bfad_im_port_s *im_port =
(struct bfad_im_port_s *) shost->hostdata[0];
struct bfad_s *bfad = im_port->bfad;
struct bfad_hal_comp fcomp;
unsigned long flags;
uint32_t status;
init_completion(&fcomp.comp);
spin_lock_irqsave(&bfad->bfad_lock, flags);
status = bfa_port_disable(&bfad->bfa.modules.port,
bfad_hcb_comp, &fcomp);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
if (status != BFA_STATUS_OK)
return -EIO;
wait_for_completion(&fcomp.comp);
if (fcomp.status != BFA_STATUS_OK)
return -EIO;
spin_lock_irqsave(&bfad->bfad_lock, flags);
status = bfa_port_enable(&bfad->bfa.modules.port,
bfad_hcb_comp, &fcomp);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
if (status != BFA_STATUS_OK)
return -EIO;
wait_for_completion(&fcomp.comp);
if (fcomp.status != BFA_STATUS_OK)
return -EIO;
return 0;
}
static int
bfad_im_vport_delete(struct fc_vport *fc_vport)
{
struct bfad_vport_s *vport = (struct bfad_vport_s *)fc_vport->dd_data;
struct bfad_im_port_s *im_port =
(struct bfad_im_port_s *) vport->drv_port.im_port;
struct bfad_s *bfad = im_port->bfad;
struct bfad_port_s *port;
struct bfa_fcs_vport_s *fcs_vport;
struct Scsi_Host *vshost;
wwn_t pwwn;
int rc;
unsigned long flags;
struct completion fcomp;
if (im_port->flags & BFAD_PORT_DELETE) {
bfad_scsi_host_free(bfad, im_port);
list_del(&vport->list_entry);
kfree(vport);
return 0;
}
port = im_port->port;
vshost = vport->drv_port.im_port->shost;
u64_to_wwn(fc_host_port_name(vshost), (u8 *)&pwwn);
spin_lock_irqsave(&bfad->bfad_lock, flags);
fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs, 0, pwwn);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
if (fcs_vport == NULL)
return VPCERR_BAD_WWN;
vport->drv_port.flags |= BFAD_PORT_DELETE;
vport->comp_del = &fcomp;
init_completion(vport->comp_del);
spin_lock_irqsave(&bfad->bfad_lock, flags);
rc = bfa_fcs_vport_delete(&vport->fcs_vport);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
if (rc == BFA_STATUS_PBC) {
vport->drv_port.flags &= ~BFAD_PORT_DELETE;
vport->comp_del = NULL;
return -1;
}
wait_for_completion(vport->comp_del);
bfad_scsi_host_free(bfad, im_port);
list_del(&vport->list_entry);
kfree(vport);
return 0;
}
static int
bfad_im_vport_disable(struct fc_vport *fc_vport, bool disable)
{
struct bfad_vport_s *vport;
struct bfad_s *bfad;
struct bfa_fcs_vport_s *fcs_vport;
struct Scsi_Host *vshost;
wwn_t pwwn;
unsigned long flags;
vport = (struct bfad_vport_s *)fc_vport->dd_data;
bfad = vport->drv_port.bfad;
vshost = vport->drv_port.im_port->shost;
u64_to_wwn(fc_host_port_name(vshost), (u8 *)&pwwn);
spin_lock_irqsave(&bfad->bfad_lock, flags);
fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs, 0, pwwn);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
if (fcs_vport == NULL)
return VPCERR_BAD_WWN;
if (disable) {
bfa_fcs_vport_stop(fcs_vport);
fc_vport_set_state(fc_vport, FC_VPORT_DISABLED);
} else {
bfa_fcs_vport_start(fcs_vport);
fc_vport_set_state(fc_vport, FC_VPORT_ACTIVE);
}
return 0;
}
void
bfad_im_vport_set_symbolic_name(struct fc_vport *fc_vport)
{
struct bfad_vport_s *vport = (struct bfad_vport_s *)fc_vport->dd_data;
struct bfad_im_port_s *im_port =
(struct bfad_im_port_s *)vport->drv_port.im_port;
struct bfad_s *bfad = im_port->bfad;
struct Scsi_Host *vshost = vport->drv_port.im_port->shost;
char *sym_name = fc_vport->symbolic_name;
struct bfa_fcs_vport_s *fcs_vport;
wwn_t pwwn;
unsigned long flags;
u64_to_wwn(fc_host_port_name(vshost), (u8 *)&pwwn);
spin_lock_irqsave(&bfad->bfad_lock, flags);
fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs, 0, pwwn);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
if (fcs_vport == NULL)
return;
spin_lock_irqsave(&bfad->bfad_lock, flags);
if (strlen(sym_name) > 0)
bfa_fcs_lport_set_symname(&fcs_vport->lport, sym_name);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
}
struct fc_function_template bfad_im_fc_function_template = {
/* Target dynamic attributes */
.get_starget_port_id = bfad_im_get_starget_port_id,
.show_starget_port_id = 1,
.get_starget_node_name = bfad_im_get_starget_node_name,
.show_starget_node_name = 1,
.get_starget_port_name = bfad_im_get_starget_port_name,
.show_starget_port_name = 1,
/* Host dynamic attribute */
.get_host_port_id = bfad_im_get_host_port_id,
.show_host_port_id = 1,
/* Host fixed attributes */
.show_host_node_name = 1,
.show_host_port_name = 1,
.show_host_supported_classes = 1,
.show_host_supported_fc4s = 1,
.show_host_supported_speeds = 1,
.show_host_maxframe_size = 1,
/* More host dynamic attributes */
.show_host_port_type = 1,
.get_host_port_type = bfad_im_get_host_port_type,
.show_host_port_state = 1,
.get_host_port_state = bfad_im_get_host_port_state,
.show_host_active_fc4s = 1,
.get_host_active_fc4s = bfad_im_get_host_active_fc4s,
.show_host_speed = 1,
.get_host_speed = bfad_im_get_host_speed,
.show_host_fabric_name = 1,
.get_host_fabric_name = bfad_im_get_host_fabric_name,
.show_host_symbolic_name = 1,
/* Statistics */
.get_fc_host_stats = bfad_im_get_stats,
.reset_fc_host_stats = bfad_im_reset_stats,
/* Allocation length for host specific data */
.dd_fcrport_size = sizeof(struct bfad_itnim_data_s *),
/* Remote port fixed attributes */
.show_rport_maxframe_size = 1,
.show_rport_supported_classes = 1,
.show_rport_dev_loss_tmo = 1,
.set_rport_dev_loss_tmo = bfad_im_set_rport_loss_tmo,
.issue_fc_host_lip = bfad_im_issue_fc_host_lip,
.vport_create = bfad_im_vport_create,
.vport_delete = bfad_im_vport_delete,
.vport_disable = bfad_im_vport_disable,
.set_vport_symbolic_name = bfad_im_vport_set_symbolic_name,
.bsg_request = bfad_im_bsg_request,
.bsg_timeout = bfad_im_bsg_timeout,
};
struct fc_function_template bfad_im_vport_fc_function_template = {
/* Target dynamic attributes */
.get_starget_port_id = bfad_im_get_starget_port_id,
.show_starget_port_id = 1,
.get_starget_node_name = bfad_im_get_starget_node_name,
.show_starget_node_name = 1,
.get_starget_port_name = bfad_im_get_starget_port_name,
.show_starget_port_name = 1,
/* Host dynamic attribute */
.get_host_port_id = bfad_im_get_host_port_id,
.show_host_port_id = 1,
/* Host fixed attributes */
.show_host_node_name = 1,
.show_host_port_name = 1,
.show_host_supported_classes = 1,
.show_host_supported_fc4s = 1,
.show_host_supported_speeds = 1,
.show_host_maxframe_size = 1,
/* More host dynamic attributes */
.show_host_port_type = 1,
.get_host_port_type = bfad_im_get_host_port_type,
.show_host_port_state = 1,
.get_host_port_state = bfad_im_get_host_port_state,
.show_host_active_fc4s = 1,
.get_host_active_fc4s = bfad_im_get_host_active_fc4s,
.show_host_speed = 1,
.get_host_speed = bfad_im_get_host_speed,
.show_host_fabric_name = 1,
.get_host_fabric_name = bfad_im_get_host_fabric_name,
.show_host_symbolic_name = 1,
/* Statistics */
.get_fc_host_stats = bfad_im_get_stats,
.reset_fc_host_stats = bfad_im_reset_stats,
/* Allocation length for host specific data */
.dd_fcrport_size = sizeof(struct bfad_itnim_data_s *),
/* Remote port fixed attributes */
.show_rport_maxframe_size = 1,
.show_rport_supported_classes = 1,
.show_rport_dev_loss_tmo = 1,
.set_rport_dev_loss_tmo = bfad_im_set_rport_loss_tmo,
};
/*
* Scsi_Host_attrs SCSI host attributes
*/
static ssize_t
bfad_im_serial_num_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct bfad_im_port_s *im_port =
(struct bfad_im_port_s *) shost->hostdata[0];
struct bfad_s *bfad = im_port->bfad;
char serial_num[BFA_ADAPTER_SERIAL_NUM_LEN];
bfa_get_adapter_serial_num(&bfad->bfa, serial_num);
return snprintf(buf, PAGE_SIZE, "%s\n", serial_num);
}
static ssize_t
bfad_im_model_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct bfad_im_port_s *im_port =
(struct bfad_im_port_s *) shost->hostdata[0];
struct bfad_s *bfad = im_port->bfad;
char model[BFA_ADAPTER_MODEL_NAME_LEN];
bfa_get_adapter_model(&bfad->bfa, model);
return snprintf(buf, PAGE_SIZE, "%s\n", model);
}
static ssize_t
bfad_im_model_desc_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct bfad_im_port_s *im_port =
(struct bfad_im_port_s *) shost->hostdata[0];
struct bfad_s *bfad = im_port->bfad;
char model[BFA_ADAPTER_MODEL_NAME_LEN];
char model_descr[BFA_ADAPTER_MODEL_DESCR_LEN];
int nports = 0;
bfa_get_adapter_model(&bfad->bfa, model);
nports = bfa_get_nports(&bfad->bfa);
if (!strcmp(model, "QLogic-425"))
snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
"QLogic BR-series 4Gbps PCIe dual port FC HBA");
else if (!strcmp(model, "QLogic-825"))
snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
"QLogic BR-series 8Gbps PCIe dual port FC HBA");
else if (!strcmp(model, "QLogic-42B"))
snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
"QLogic BR-series 4Gbps PCIe dual port FC HBA for HP");
else if (!strcmp(model, "QLogic-82B"))
snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
"QLogic BR-series 8Gbps PCIe dual port FC HBA for HP");
else if (!strcmp(model, "QLogic-1010"))
snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
"QLogic BR-series 10Gbps single port CNA");
else if (!strcmp(model, "QLogic-1020"))
snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
"QLogic BR-series 10Gbps dual port CNA");
else if (!strcmp(model, "QLogic-1007"))
snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
"QLogic BR-series 10Gbps CNA for IBM Blade Center");
else if (!strcmp(model, "QLogic-415"))
snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
"QLogic BR-series 4Gbps PCIe single port FC HBA");
else if (!strcmp(model, "QLogic-815"))
snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
"QLogic BR-series 8Gbps PCIe single port FC HBA");
else if (!strcmp(model, "QLogic-41B"))
snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
"QLogic BR-series 4Gbps PCIe single port FC HBA for HP");
else if (!strcmp(model, "QLogic-81B"))
snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
"QLogic BR-series 8Gbps PCIe single port FC HBA for HP");
else if (!strcmp(model, "QLogic-804"))
snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
"QLogic BR-series 8Gbps FC HBA for HP Bladesystem C-class");
else if (!strcmp(model, "QLogic-1741"))
snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
"QLogic BR-series 10Gbps CNA for Dell M-Series Blade Servers");
else if (strstr(model, "QLogic-1860")) {
if (nports == 1 && bfa_ioc_is_cna(&bfad->bfa.ioc))
snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
"QLogic BR-series 10Gbps single port CNA");
else if (nports == 1 && !bfa_ioc_is_cna(&bfad->bfa.ioc))
snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
"QLogic BR-series 16Gbps PCIe single port FC HBA");
else if (nports == 2 && bfa_ioc_is_cna(&bfad->bfa.ioc))
snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
"QLogic BR-series 10Gbps dual port CNA");
else if (nports == 2 && !bfa_ioc_is_cna(&bfad->bfa.ioc))
snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
"QLogic BR-series 16Gbps PCIe dual port FC HBA");
} else if (!strcmp(model, "QLogic-1867")) {
if (nports == 1 && !bfa_ioc_is_cna(&bfad->bfa.ioc))
snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
"QLogic BR-series 16Gbps PCIe single port FC HBA for IBM");
else if (nports == 2 && !bfa_ioc_is_cna(&bfad->bfa.ioc))
snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
"QLogic BR-series 16Gbps PCIe dual port FC HBA for IBM");
} else
snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
"Invalid Model");
return snprintf(buf, PAGE_SIZE, "%s\n", model_descr);
}
static ssize_t
bfad_im_node_name_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct bfad_im_port_s *im_port =
(struct bfad_im_port_s *) shost->hostdata[0];
struct bfad_port_s *port = im_port->port;
u64 nwwn;
nwwn = bfa_fcs_lport_get_nwwn(port->fcs_port);
return snprintf(buf, PAGE_SIZE, "0x%llx\n", cpu_to_be64(nwwn));
}
static ssize_t
bfad_im_symbolic_name_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct bfad_im_port_s *im_port =
(struct bfad_im_port_s *) shost->hostdata[0];
struct bfad_s *bfad = im_port->bfad;
struct bfa_lport_attr_s port_attr;
char symname[BFA_SYMNAME_MAXLEN];
bfa_fcs_lport_get_attr(&bfad->bfa_fcs.fabric.bport, &port_attr);
strncpy(symname, port_attr.port_cfg.sym_name.symname,
BFA_SYMNAME_MAXLEN);
return snprintf(buf, PAGE_SIZE, "%s\n", symname);
}
static ssize_t
bfad_im_hw_version_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct bfad_im_port_s *im_port =
(struct bfad_im_port_s *) shost->hostdata[0];
struct bfad_s *bfad = im_port->bfad;
char hw_ver[BFA_VERSION_LEN];
bfa_get_pci_chip_rev(&bfad->bfa, hw_ver);
return snprintf(buf, PAGE_SIZE, "%s\n", hw_ver);
}
static ssize_t
bfad_im_drv_version_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
return snprintf(buf, PAGE_SIZE, "%s\n", BFAD_DRIVER_VERSION);
}
static ssize_t
bfad_im_optionrom_version_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct bfad_im_port_s *im_port =
(struct bfad_im_port_s *) shost->hostdata[0];
struct bfad_s *bfad = im_port->bfad;
char optrom_ver[BFA_VERSION_LEN];
bfa_get_adapter_optrom_ver(&bfad->bfa, optrom_ver);
return snprintf(buf, PAGE_SIZE, "%s\n", optrom_ver);
}
static ssize_t
bfad_im_fw_version_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct bfad_im_port_s *im_port =
(struct bfad_im_port_s *) shost->hostdata[0];
struct bfad_s *bfad = im_port->bfad;
char fw_ver[BFA_VERSION_LEN];
bfa_get_adapter_fw_ver(&bfad->bfa, fw_ver);
return snprintf(buf, PAGE_SIZE, "%s\n", fw_ver);
}
static ssize_t
bfad_im_num_of_ports_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct bfad_im_port_s *im_port =
(struct bfad_im_port_s *) shost->hostdata[0];
struct bfad_s *bfad = im_port->bfad;
return snprintf(buf, PAGE_SIZE, "%d\n",
bfa_get_nports(&bfad->bfa));
}
static ssize_t
bfad_im_drv_name_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
return snprintf(buf, PAGE_SIZE, "%s\n", BFAD_DRIVER_NAME);
}
static ssize_t
bfad_im_num_of_discovered_ports_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct bfad_im_port_s *im_port =
(struct bfad_im_port_s *) shost->hostdata[0];
struct bfad_port_s *port = im_port->port;
struct bfad_s *bfad = im_port->bfad;
int nrports = 2048;
struct bfa_rport_qualifier_s *rports = NULL;
unsigned long flags;
rports = kzalloc(sizeof(struct bfa_rport_qualifier_s) * nrports,
GFP_ATOMIC);
if (rports == NULL)
return snprintf(buf, PAGE_SIZE, "Failed\n");
spin_lock_irqsave(&bfad->bfad_lock, flags);
bfa_fcs_lport_get_rport_quals(port->fcs_port, rports, &nrports);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
kfree(rports);
return snprintf(buf, PAGE_SIZE, "%d\n", nrports);
}
static DEVICE_ATTR(serial_number, S_IRUGO,
bfad_im_serial_num_show, NULL);
static DEVICE_ATTR(model, S_IRUGO, bfad_im_model_show, NULL);
static DEVICE_ATTR(model_description, S_IRUGO,
bfad_im_model_desc_show, NULL);
static DEVICE_ATTR(node_name, S_IRUGO, bfad_im_node_name_show, NULL);
static DEVICE_ATTR(symbolic_name, S_IRUGO,
bfad_im_symbolic_name_show, NULL);
static DEVICE_ATTR(hardware_version, S_IRUGO,
bfad_im_hw_version_show, NULL);
static DEVICE_ATTR(driver_version, S_IRUGO,
bfad_im_drv_version_show, NULL);
static DEVICE_ATTR(option_rom_version, S_IRUGO,
bfad_im_optionrom_version_show, NULL);
static DEVICE_ATTR(firmware_version, S_IRUGO,
bfad_im_fw_version_show, NULL);
static DEVICE_ATTR(number_of_ports, S_IRUGO,
bfad_im_num_of_ports_show, NULL);
static DEVICE_ATTR(driver_name, S_IRUGO, bfad_im_drv_name_show, NULL);
static DEVICE_ATTR(number_of_discovered_ports, S_IRUGO,
bfad_im_num_of_discovered_ports_show, NULL);
struct device_attribute *bfad_im_host_attrs[] = {
&dev_attr_serial_number,
&dev_attr_model,
&dev_attr_model_description,
&dev_attr_node_name,
&dev_attr_symbolic_name,
&dev_attr_hardware_version,
&dev_attr_driver_version,
&dev_attr_option_rom_version,
&dev_attr_firmware_version,
&dev_attr_number_of_ports,
&dev_attr_driver_name,
&dev_attr_number_of_discovered_ports,
NULL,
};
struct device_attribute *bfad_im_vport_attrs[] = {
&dev_attr_serial_number,
&dev_attr_model,
&dev_attr_model_description,
&dev_attr_node_name,
&dev_attr_symbolic_name,
&dev_attr_hardware_version,
&dev_attr_driver_version,
&dev_attr_option_rom_version,
&dev_attr_firmware_version,
&dev_attr_number_of_ports,
&dev_attr_driver_name,
&dev_attr_number_of_discovered_ports,
NULL,
};
| gpl-2.0 |
OMFGB/htc-kernel-msm7x30_omfgb | drivers/staging/dream/qdsp5/audio_aac.c | 938 | 27336 | /* arch/arm/mach-msm/qdsp5/audio_aac.c
*
* aac audio decoder device
*
* Copyright (C) 2008 Google, Inc.
* Copyright (C) 2008 HTC Corporation
* Copyright (c) 2008-2009 QUALCOMM USA, INC.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/miscdevice.h>
#include <linux/uaccess.h>
#include <linux/kthread.h>
#include <linux/wait.h>
#include <linux/dma-mapping.h>
#include <linux/gfp.h>
#include <linux/delay.h>
#include <asm/atomic.h>
#include <asm/ioctls.h>
#include "audmgr.h"
#include <mach/msm_adsp.h>
#include <mach/msm_audio_aac.h>
#include <mach/qdsp5/qdsp5audppcmdi.h>
#include <mach/qdsp5/qdsp5audppmsg.h>
#include <mach/qdsp5/qdsp5audplaycmdi.h>
#include <mach/qdsp5/qdsp5audplaymsg.h>
/* for queue ids - should be relative to module number*/
#include "adsp.h"
#ifdef DEBUG
#define dprintk(format, arg...) \
printk(KERN_DEBUG format, ## arg)
#else
#define dprintk(format, arg...) do {} while (0)
#endif
#define BUFSZ 32768
#define DMASZ (BUFSZ * 2)
#define AUDPLAY_INVALID_READ_PTR_OFFSET 0xFFFF
#define AUDDEC_DEC_AAC 5
#define PCM_BUFSZ_MIN 9600 /* Hold one stereo AAC frame */
#define PCM_BUF_MAX_COUNT 5 /* DSP only accepts 5 buffers at most
but support 2 buffers currently */
#define ROUTING_MODE_FTRT 1
#define ROUTING_MODE_RT 2
/* Decoder status received from AUDPPTASK */
#define AUDPP_DEC_STATUS_SLEEP 0
#define AUDPP_DEC_STATUS_INIT 1
#define AUDPP_DEC_STATUS_CFG 2
#define AUDPP_DEC_STATUS_PLAY 3
struct buffer {
void *data;
unsigned size;
unsigned used; /* Input usage actual DSP produced PCM size */
unsigned addr;
};
struct audio {
struct buffer out[2];
spinlock_t dsp_lock;
uint8_t out_head;
uint8_t out_tail;
uint8_t out_needed; /* number of buffers the dsp is waiting for */
atomic_t out_bytes;
struct mutex lock;
struct mutex write_lock;
wait_queue_head_t write_wait;
/* Host PCM section */
struct buffer in[PCM_BUF_MAX_COUNT];
struct mutex read_lock;
wait_queue_head_t read_wait; /* Wait queue for read */
char *read_data; /* pointer to reader buffer */
dma_addr_t read_phys; /* physical address of reader buffer */
uint8_t read_next; /* index to input buffers to be read next */
uint8_t fill_next; /* index to buffer that DSP should be filling */
uint8_t pcm_buf_count; /* number of pcm buffer allocated */
/* ---- End of Host PCM section */
struct msm_adsp_module *audplay;
/* configuration to use on next enable */
uint32_t out_sample_rate;
uint32_t out_channel_mode;
struct msm_audio_aac_config aac_config;
struct audmgr audmgr;
/* data allocated for various buffers */
char *data;
dma_addr_t phys;
int rflush; /* Read flush */
int wflush; /* Write flush */
int opened;
int enabled;
int running;
int stopped; /* set when stopped, cleared on flush */
int pcm_feedback;
int buf_refresh;
int reserved; /* A byte is being reserved */
char rsv_byte; /* Handle odd length user data */
unsigned volume;
uint16_t dec_id;
uint32_t read_ptr_offset;
};
static int auddec_dsp_config(struct audio *audio, int enable);
static void audpp_cmd_cfg_adec_params(struct audio *audio);
static void audpp_cmd_cfg_routing_mode(struct audio *audio);
static void audplay_send_data(struct audio *audio, unsigned needed);
static void audplay_config_hostpcm(struct audio *audio);
static void audplay_buffer_refresh(struct audio *audio);
static void audio_dsp_event(void *private, unsigned id, uint16_t *msg);
/* must be called with audio->lock held */
static int audio_enable(struct audio *audio)
{
struct audmgr_config cfg;
int rc;
dprintk("audio_enable()\n");
if (audio->enabled)
return 0;
audio->out_tail = 0;
audio->out_needed = 0;
cfg.tx_rate = RPC_AUD_DEF_SAMPLE_RATE_NONE;
cfg.rx_rate = RPC_AUD_DEF_SAMPLE_RATE_48000;
cfg.def_method = RPC_AUD_DEF_METHOD_PLAYBACK;
cfg.codec = RPC_AUD_DEF_CODEC_AAC;
cfg.snd_method = RPC_SND_METHOD_MIDI;
rc = audmgr_enable(&audio->audmgr, &cfg);
if (rc < 0)
return rc;
if (msm_adsp_enable(audio->audplay)) {
pr_err("audio: msm_adsp_enable(audplay) failed\n");
audmgr_disable(&audio->audmgr);
return -ENODEV;
}
if (audpp_enable(audio->dec_id, audio_dsp_event, audio)) {
pr_err("audio: audpp_enable() failed\n");
msm_adsp_disable(audio->audplay);
audmgr_disable(&audio->audmgr);
return -ENODEV;
}
audio->enabled = 1;
return 0;
}
/* must be called with audio->lock held */
static int audio_disable(struct audio *audio)
{
dprintk("audio_disable()\n");
if (audio->enabled) {
audio->enabled = 0;
auddec_dsp_config(audio, 0);
wake_up(&audio->write_wait);
wake_up(&audio->read_wait);
msm_adsp_disable(audio->audplay);
audpp_disable(audio->dec_id, audio);
audmgr_disable(&audio->audmgr);
audio->out_needed = 0;
}
return 0;
}
/* ------------------- dsp --------------------- */
static void audio_update_pcm_buf_entry(struct audio *audio, uint32_t *payload)
{
uint8_t index;
unsigned long flags;
if (audio->rflush)
return;
spin_lock_irqsave(&audio->dsp_lock, flags);
for (index = 0; index < payload[1]; index++) {
if (audio->in[audio->fill_next].addr ==
payload[2 + index * 2]) {
dprintk("audio_update_pcm_buf_entry: in[%d] ready\n",
audio->fill_next);
audio->in[audio->fill_next].used =
payload[3 + index * 2];
if ((++audio->fill_next) == audio->pcm_buf_count)
audio->fill_next = 0;
} else {
pr_err
("audio_update_pcm_buf_entry: expected=%x ret=%x\n"
, audio->in[audio->fill_next].addr,
payload[1 + index * 2]);
break;
}
}
if (audio->in[audio->fill_next].used == 0) {
audplay_buffer_refresh(audio);
} else {
dprintk("audio_update_pcm_buf_entry: read cannot keep up\n");
audio->buf_refresh = 1;
}
wake_up(&audio->read_wait);
spin_unlock_irqrestore(&audio->dsp_lock, flags);
}
static void audplay_dsp_event(void *data, unsigned id, size_t len,
void (*getevent) (void *ptr, size_t len))
{
struct audio *audio = data;
uint32_t msg[28];
getevent(msg, sizeof(msg));
dprintk("audplay_dsp_event: msg_id=%x\n", id);
switch (id) {
case AUDPLAY_MSG_DEC_NEEDS_DATA:
audplay_send_data(audio, 1);
break;
case AUDPLAY_MSG_BUFFER_UPDATE:
audio_update_pcm_buf_entry(audio, msg);
break;
default:
pr_err("unexpected message from decoder \n");
}
}
static void audio_dsp_event(void *private, unsigned id, uint16_t *msg)
{
struct audio *audio = private;
switch (id) {
case AUDPP_MSG_STATUS_MSG:{
unsigned status = msg[1];
switch (status) {
case AUDPP_DEC_STATUS_SLEEP:
dprintk("decoder status: sleep \n");
break;
case AUDPP_DEC_STATUS_INIT:
dprintk("decoder status: init \n");
audpp_cmd_cfg_routing_mode(audio);
break;
case AUDPP_DEC_STATUS_CFG:
dprintk("decoder status: cfg \n");
break;
case AUDPP_DEC_STATUS_PLAY:
dprintk("decoder status: play \n");
if (audio->pcm_feedback) {
audplay_config_hostpcm(audio);
audplay_buffer_refresh(audio);
}
break;
default:
pr_err("unknown decoder status \n");
}
break;
}
case AUDPP_MSG_CFG_MSG:
if (msg[0] == AUDPP_MSG_ENA_ENA) {
dprintk("audio_dsp_event: CFG_MSG ENABLE\n");
auddec_dsp_config(audio, 1);
audio->out_needed = 0;
audio->running = 1;
audpp_set_volume_and_pan(audio->dec_id, audio->volume,
0);
audpp_avsync(audio->dec_id, 22050);
} else if (msg[0] == AUDPP_MSG_ENA_DIS) {
dprintk("audio_dsp_event: CFG_MSG DISABLE\n");
audpp_avsync(audio->dec_id, 0);
audio->running = 0;
} else {
pr_err("audio_dsp_event: CFG_MSG %d?\n", msg[0]);
}
break;
case AUDPP_MSG_ROUTING_ACK:
dprintk("audio_dsp_event: ROUTING_ACK mode=%d\n", msg[1]);
audpp_cmd_cfg_adec_params(audio);
break;
case AUDPP_MSG_FLUSH_ACK:
dprintk("%s: FLUSH_ACK\n", __func__);
audio->wflush = 0;
audio->rflush = 0;
if (audio->pcm_feedback)
audplay_buffer_refresh(audio);
break;
default:
pr_err("audio_dsp_event: UNKNOWN (%d)\n", id);
}
}
struct msm_adsp_ops audplay_adsp_ops_aac = {
.event = audplay_dsp_event,
};
#define audplay_send_queue0(audio, cmd, len) \
msm_adsp_write(audio->audplay, QDSP_uPAudPlay0BitStreamCtrlQueue, \
cmd, len)
static int auddec_dsp_config(struct audio *audio, int enable)
{
audpp_cmd_cfg_dec_type cmd;
memset(&cmd, 0, sizeof(cmd));
cmd.cmd_id = AUDPP_CMD_CFG_DEC_TYPE;
if (enable)
cmd.dec0_cfg = AUDPP_CMD_UPDATDE_CFG_DEC |
AUDPP_CMD_ENA_DEC_V | AUDDEC_DEC_AAC;
else
cmd.dec0_cfg = AUDPP_CMD_UPDATDE_CFG_DEC | AUDPP_CMD_DIS_DEC_V;
return audpp_send_queue1(&cmd, sizeof(cmd));
}
static void audpp_cmd_cfg_adec_params(struct audio *audio)
{
audpp_cmd_cfg_adec_params_aac cmd;
memset(&cmd, 0, sizeof(cmd));
cmd.common.cmd_id = AUDPP_CMD_CFG_ADEC_PARAMS;
cmd.common.length = AUDPP_CMD_CFG_ADEC_PARAMS_AAC_LEN;
cmd.common.dec_id = audio->dec_id;
cmd.common.input_sampling_frequency = audio->out_sample_rate;
cmd.format = audio->aac_config.format;
cmd.audio_object = audio->aac_config.audio_object;
cmd.ep_config = audio->aac_config.ep_config;
cmd.aac_section_data_resilience_flag =
audio->aac_config.aac_section_data_resilience_flag;
cmd.aac_scalefactor_data_resilience_flag =
audio->aac_config.aac_scalefactor_data_resilience_flag;
cmd.aac_spectral_data_resilience_flag =
audio->aac_config.aac_spectral_data_resilience_flag;
cmd.sbr_on_flag = audio->aac_config.sbr_on_flag;
cmd.sbr_ps_on_flag = audio->aac_config.sbr_ps_on_flag;
cmd.channel_configuration = audio->aac_config.channel_configuration;
audpp_send_queue2(&cmd, sizeof(cmd));
}
static void audpp_cmd_cfg_routing_mode(struct audio *audio)
{
struct audpp_cmd_routing_mode cmd;
dprintk("audpp_cmd_cfg_routing_mode()\n");
memset(&cmd, 0, sizeof(cmd));
cmd.cmd_id = AUDPP_CMD_ROUTING_MODE;
cmd.object_number = audio->dec_id;
if (audio->pcm_feedback)
cmd.routing_mode = ROUTING_MODE_FTRT;
else
cmd.routing_mode = ROUTING_MODE_RT;
audpp_send_queue1(&cmd, sizeof(cmd));
}
static int audplay_dsp_send_data_avail(struct audio *audio,
unsigned idx, unsigned len)
{
audplay_cmd_bitstream_data_avail cmd;
cmd.cmd_id = AUDPLAY_CMD_BITSTREAM_DATA_AVAIL;
cmd.decoder_id = audio->dec_id;
cmd.buf_ptr = audio->out[idx].addr;
cmd.buf_size = len / 2;
cmd.partition_number = 0;
return audplay_send_queue0(audio, &cmd, sizeof(cmd));
}
static void audplay_buffer_refresh(struct audio *audio)
{
struct audplay_cmd_buffer_refresh refresh_cmd;
refresh_cmd.cmd_id = AUDPLAY_CMD_BUFFER_REFRESH;
refresh_cmd.num_buffers = 1;
refresh_cmd.buf0_address = audio->in[audio->fill_next].addr;
refresh_cmd.buf0_length = audio->in[audio->fill_next].size -
(audio->in[audio->fill_next].size % 1024); /* AAC frame size */
refresh_cmd.buf_read_count = 0;
dprintk("audplay_buffer_fresh: buf0_addr=%x buf0_len=%d\n",
refresh_cmd.buf0_address, refresh_cmd.buf0_length);
(void)audplay_send_queue0(audio, &refresh_cmd, sizeof(refresh_cmd));
}
static void audplay_config_hostpcm(struct audio *audio)
{
struct audplay_cmd_hpcm_buf_cfg cfg_cmd;
dprintk("audplay_config_hostpcm()\n");
cfg_cmd.cmd_id = AUDPLAY_CMD_HPCM_BUF_CFG;
cfg_cmd.max_buffers = audio->pcm_buf_count;
cfg_cmd.byte_swap = 0;
cfg_cmd.hostpcm_config = (0x8000) | (0x4000);
cfg_cmd.feedback_frequency = 1;
cfg_cmd.partition_number = 0;
(void)audplay_send_queue0(audio, &cfg_cmd, sizeof(cfg_cmd));
}
static void audplay_send_data(struct audio *audio, unsigned needed)
{
struct buffer *frame;
unsigned long flags;
spin_lock_irqsave(&audio->dsp_lock, flags);
if (!audio->running)
goto done;
if (needed && !audio->wflush) {
/* We were called from the callback because the DSP
* requested more data. Note that the DSP does want
* more data, and if a buffer was in-flight, mark it
* as available (since the DSP must now be done with
* it).
*/
audio->out_needed = 1;
frame = audio->out + audio->out_tail;
if (frame->used == 0xffffffff) {
dprintk("frame %d free\n", audio->out_tail);
frame->used = 0;
audio->out_tail ^= 1;
wake_up(&audio->write_wait);
}
}
if (audio->out_needed) {
/* If the DSP currently wants data and we have a
* buffer available, we will send it and reset
* the needed flag. We'll mark the buffer as in-flight
* so that it won't be recycled until the next buffer
* is requested
*/
frame = audio->out + audio->out_tail;
if (frame->used) {
BUG_ON(frame->used == 0xffffffff);
/* printk("frame %d busy\n", audio->out_tail); */
audplay_dsp_send_data_avail(audio, audio->out_tail,
frame->used);
frame->used = 0xffffffff;
audio->out_needed = 0;
}
}
done:
spin_unlock_irqrestore(&audio->dsp_lock, flags);
}
/* ------------------- device --------------------- */
static void audio_flush(struct audio *audio)
{
audio->out[0].used = 0;
audio->out[1].used = 0;
audio->out_head = 0;
audio->out_tail = 0;
audio->reserved = 0;
audio->out_needed = 0;
atomic_set(&audio->out_bytes, 0);
}
static void audio_flush_pcm_buf(struct audio *audio)
{
uint8_t index;
for (index = 0; index < PCM_BUF_MAX_COUNT; index++)
audio->in[index].used = 0;
audio->buf_refresh = 0;
audio->read_next = 0;
audio->fill_next = 0;
}
static int audaac_validate_usr_config(struct msm_audio_aac_config *config)
{
int ret_val = -1;
if (config->format != AUDIO_AAC_FORMAT_ADTS &&
config->format != AUDIO_AAC_FORMAT_RAW &&
config->format != AUDIO_AAC_FORMAT_PSUEDO_RAW &&
config->format != AUDIO_AAC_FORMAT_LOAS)
goto done;
if (config->audio_object != AUDIO_AAC_OBJECT_LC &&
config->audio_object != AUDIO_AAC_OBJECT_LTP &&
config->audio_object != AUDIO_AAC_OBJECT_ERLC)
goto done;
if (config->audio_object == AUDIO_AAC_OBJECT_ERLC) {
if (config->ep_config > 3)
goto done;
if (config->aac_scalefactor_data_resilience_flag !=
AUDIO_AAC_SCA_DATA_RES_OFF &&
config->aac_scalefactor_data_resilience_flag !=
AUDIO_AAC_SCA_DATA_RES_ON)
goto done;
if (config->aac_section_data_resilience_flag !=
AUDIO_AAC_SEC_DATA_RES_OFF &&
config->aac_section_data_resilience_flag !=
AUDIO_AAC_SEC_DATA_RES_ON)
goto done;
if (config->aac_spectral_data_resilience_flag !=
AUDIO_AAC_SPEC_DATA_RES_OFF &&
config->aac_spectral_data_resilience_flag !=
AUDIO_AAC_SPEC_DATA_RES_ON)
goto done;
} else {
config->aac_section_data_resilience_flag =
AUDIO_AAC_SEC_DATA_RES_OFF;
config->aac_scalefactor_data_resilience_flag =
AUDIO_AAC_SCA_DATA_RES_OFF;
config->aac_spectral_data_resilience_flag =
AUDIO_AAC_SPEC_DATA_RES_OFF;
}
if (config->sbr_on_flag != AUDIO_AAC_SBR_ON_FLAG_OFF &&
config->sbr_on_flag != AUDIO_AAC_SBR_ON_FLAG_ON)
goto done;
if (config->sbr_ps_on_flag != AUDIO_AAC_SBR_PS_ON_FLAG_OFF &&
config->sbr_ps_on_flag != AUDIO_AAC_SBR_PS_ON_FLAG_ON)
goto done;
if (config->dual_mono_mode > AUDIO_AAC_DUAL_MONO_PL_SR)
goto done;
if (config->channel_configuration > 2)
goto done;
ret_val = 0;
done:
return ret_val;
}
static void audio_ioport_reset(struct audio *audio)
{
/* Make sure read/write thread are free from
* sleep and knowing that system is not able
* to process io request at the moment
*/
wake_up(&audio->write_wait);
mutex_lock(&audio->write_lock);
audio_flush(audio);
mutex_unlock(&audio->write_lock);
wake_up(&audio->read_wait);
mutex_lock(&audio->read_lock);
audio_flush_pcm_buf(audio);
mutex_unlock(&audio->read_lock);
}
static long audio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct audio *audio = file->private_data;
int rc = 0;
dprintk("audio_ioctl() cmd = %d\n", cmd);
if (cmd == AUDIO_GET_STATS) {
struct msm_audio_stats stats;
stats.byte_count = audpp_avsync_byte_count(audio->dec_id);
stats.sample_count = audpp_avsync_sample_count(audio->dec_id);
if (copy_to_user((void *)arg, &stats, sizeof(stats)))
return -EFAULT;
return 0;
}
if (cmd == AUDIO_SET_VOLUME) {
unsigned long flags;
spin_lock_irqsave(&audio->dsp_lock, flags);
audio->volume = arg;
if (audio->running)
audpp_set_volume_and_pan(audio->dec_id, arg, 0);
spin_unlock_irqrestore(&audio->dsp_lock, flags);
return 0;
}
mutex_lock(&audio->lock);
switch (cmd) {
case AUDIO_START:
rc = audio_enable(audio);
break;
case AUDIO_STOP:
rc = audio_disable(audio);
audio->stopped = 1;
audio_ioport_reset(audio);
audio->stopped = 0;
break;
case AUDIO_FLUSH:
dprintk("%s: AUDIO_FLUSH\n", __func__);
audio->rflush = 1;
audio->wflush = 1;
audio_ioport_reset(audio);
if (audio->running)
audpp_flush(audio->dec_id);
else {
audio->rflush = 0;
audio->wflush = 0;
}
break;
case AUDIO_SET_CONFIG:{
struct msm_audio_config config;
if (copy_from_user
(&config, (void *)arg, sizeof(config))) {
rc = -EFAULT;
break;
}
if (config.channel_count == 1) {
config.channel_count =
AUDPP_CMD_PCM_INTF_MONO_V;
} else if (config.channel_count == 2) {
config.channel_count =
AUDPP_CMD_PCM_INTF_STEREO_V;
} else {
rc = -EINVAL;
break;
}
audio->out_sample_rate = config.sample_rate;
audio->out_channel_mode = config.channel_count;
rc = 0;
break;
}
case AUDIO_GET_CONFIG:{
struct msm_audio_config config;
config.buffer_size = BUFSZ;
config.buffer_count = 2;
config.sample_rate = audio->out_sample_rate;
if (audio->out_channel_mode ==
AUDPP_CMD_PCM_INTF_MONO_V) {
config.channel_count = 1;
} else {
config.channel_count = 2;
}
config.unused[0] = 0;
config.unused[1] = 0;
config.unused[2] = 0;
config.unused[3] = 0;
if (copy_to_user((void *)arg, &config,
sizeof(config)))
rc = -EFAULT;
else
rc = 0;
break;
}
case AUDIO_GET_AAC_CONFIG:{
if (copy_to_user((void *)arg, &audio->aac_config,
sizeof(audio->aac_config)))
rc = -EFAULT;
else
rc = 0;
break;
}
case AUDIO_SET_AAC_CONFIG:{
struct msm_audio_aac_config usr_config;
if (copy_from_user
(&usr_config, (void *)arg,
sizeof(usr_config))) {
rc = -EFAULT;
break;
}
if (audaac_validate_usr_config(&usr_config) == 0) {
audio->aac_config = usr_config;
rc = 0;
} else
rc = -EINVAL;
break;
}
case AUDIO_GET_PCM_CONFIG:{
struct msm_audio_pcm_config config;
config.pcm_feedback = 0;
config.buffer_count = PCM_BUF_MAX_COUNT;
config.buffer_size = PCM_BUFSZ_MIN;
if (copy_to_user((void *)arg, &config,
sizeof(config)))
rc = -EFAULT;
else
rc = 0;
break;
}
case AUDIO_SET_PCM_CONFIG:{
struct msm_audio_pcm_config config;
if (copy_from_user
(&config, (void *)arg, sizeof(config))) {
rc = -EFAULT;
break;
}
if ((config.buffer_count > PCM_BUF_MAX_COUNT) ||
(config.buffer_count == 1))
config.buffer_count = PCM_BUF_MAX_COUNT;
if (config.buffer_size < PCM_BUFSZ_MIN)
config.buffer_size = PCM_BUFSZ_MIN;
/* Check if pcm feedback is required */
if ((config.pcm_feedback) && (!audio->read_data)) {
dprintk("ioctl: allocate PCM buffer %d\n",
config.buffer_count *
config.buffer_size);
audio->read_data =
dma_alloc_coherent(NULL,
config.buffer_size *
config.buffer_count,
&audio->read_phys,
GFP_KERNEL);
if (!audio->read_data) {
pr_err("audio_aac: buf alloc fail\n");
rc = -1;
} else {
uint8_t index;
uint32_t offset = 0;
audio->pcm_feedback = 1;
audio->buf_refresh = 0;
audio->pcm_buf_count =
config.buffer_count;
audio->read_next = 0;
audio->fill_next = 0;
for (index = 0;
index < config.buffer_count;
index++) {
audio->in[index].data =
audio->read_data + offset;
audio->in[index].addr =
audio->read_phys + offset;
audio->in[index].size =
config.buffer_size;
audio->in[index].used = 0;
offset += config.buffer_size;
}
rc = 0;
}
} else {
rc = 0;
}
break;
}
case AUDIO_PAUSE:
dprintk("%s: AUDIO_PAUSE %ld\n", __func__, arg);
rc = audpp_pause(audio->dec_id, (int) arg);
break;
default:
rc = -EINVAL;
}
mutex_unlock(&audio->lock);
return rc;
}
static ssize_t audio_read(struct file *file, char __user *buf, size_t count,
loff_t *pos)
{
struct audio *audio = file->private_data;
const char __user *start = buf;
int rc = 0;
if (!audio->pcm_feedback)
return 0; /* PCM feedback is not enabled. Nothing to read */
mutex_lock(&audio->read_lock);
dprintk("audio_read() %d \n", count);
while (count > 0) {
rc = wait_event_interruptible(audio->read_wait,
(audio->in[audio->read_next].
used > 0) || (audio->stopped)
|| (audio->rflush));
if (rc < 0)
break;
if (audio->stopped || audio->rflush) {
rc = -EBUSY;
break;
}
if (count < audio->in[audio->read_next].used) {
/* Read must happen in frame boundary. Since driver
does not know frame size, read count must be greater
or equal to size of PCM samples */
dprintk("audio_read: no partial frame done reading\n");
break;
} else {
dprintk("audio_read: read from in[%d]\n",
audio->read_next);
if (copy_to_user
(buf, audio->in[audio->read_next].data,
audio->in[audio->read_next].used)) {
pr_err("audio_read: invalid addr %x \n",
(unsigned int)buf);
rc = -EFAULT;
break;
}
count -= audio->in[audio->read_next].used;
buf += audio->in[audio->read_next].used;
audio->in[audio->read_next].used = 0;
if ((++audio->read_next) == audio->pcm_buf_count)
audio->read_next = 0;
if (audio->in[audio->read_next].used == 0)
break; /* No data ready at this moment
* Exit while loop to prevent
* output thread sleep too long
*/
}
}
/* don't feed output buffer to HW decoder during flushing
* buffer refresh command will be sent once flush completes
* send buf refresh command here can confuse HW decoder
*/
if (audio->buf_refresh && !audio->rflush) {
audio->buf_refresh = 0;
dprintk("audio_read: kick start pcm feedback again\n");
audplay_buffer_refresh(audio);
}
mutex_unlock(&audio->read_lock);
if (buf > start)
rc = buf - start;
dprintk("audio_read: read %d bytes\n", rc);
return rc;
}
static ssize_t audio_write(struct file *file, const char __user *buf,
size_t count, loff_t *pos)
{
struct audio *audio = file->private_data;
const char __user *start = buf;
struct buffer *frame;
size_t xfer;
char *cpy_ptr;
int rc = 0;
unsigned dsize;
mutex_lock(&audio->write_lock);
while (count > 0) {
frame = audio->out + audio->out_head;
cpy_ptr = frame->data;
dsize = 0;
rc = wait_event_interruptible(audio->write_wait,
(frame->used == 0)
|| (audio->stopped)
|| (audio->wflush));
if (rc < 0)
break;
if (audio->stopped || audio->wflush) {
rc = -EBUSY;
break;
}
if (audio->reserved) {
dprintk("%s: append reserved byte %x\n",
__func__, audio->rsv_byte);
*cpy_ptr = audio->rsv_byte;
xfer = (count > (frame->size - 1)) ?
frame->size - 1 : count;
cpy_ptr++;
dsize = 1;
audio->reserved = 0;
} else
xfer = (count > frame->size) ? frame->size : count;
if (copy_from_user(cpy_ptr, buf, xfer)) {
rc = -EFAULT;
break;
}
dsize += xfer;
if (dsize & 1) {
audio->rsv_byte = ((char *) frame->data)[dsize - 1];
dprintk("%s: odd length buf reserve last byte %x\n",
__func__, audio->rsv_byte);
audio->reserved = 1;
dsize--;
}
count -= xfer;
buf += xfer;
if (dsize > 0) {
audio->out_head ^= 1;
frame->used = dsize;
audplay_send_data(audio, 0);
}
}
mutex_unlock(&audio->write_lock);
if (buf > start)
return buf - start;
return rc;
}
static int audio_release(struct inode *inode, struct file *file)
{
struct audio *audio = file->private_data;
dprintk("audio_release()\n");
mutex_lock(&audio->lock);
audio_disable(audio);
audio_flush(audio);
audio_flush_pcm_buf(audio);
msm_adsp_put(audio->audplay);
audio->audplay = NULL;
audio->opened = 0;
audio->reserved = 0;
dma_free_coherent(NULL, DMASZ, audio->data, audio->phys);
audio->data = NULL;
if (audio->read_data != NULL) {
dma_free_coherent(NULL,
audio->in[0].size * audio->pcm_buf_count,
audio->read_data, audio->read_phys);
audio->read_data = NULL;
}
audio->pcm_feedback = 0;
mutex_unlock(&audio->lock);
return 0;
}
static struct audio the_aac_audio;
static int audio_open(struct inode *inode, struct file *file)
{
struct audio *audio = &the_aac_audio;
int rc;
mutex_lock(&audio->lock);
if (audio->opened) {
pr_err("audio: busy\n");
rc = -EBUSY;
goto done;
}
if (!audio->data) {
audio->data = dma_alloc_coherent(NULL, DMASZ,
&audio->phys, GFP_KERNEL);
if (!audio->data) {
pr_err("audio: could not allocate DMA buffers\n");
rc = -ENOMEM;
goto done;
}
}
rc = audmgr_open(&audio->audmgr);
if (rc)
goto done;
rc = msm_adsp_get("AUDPLAY0TASK", &audio->audplay,
&audplay_adsp_ops_aac, audio);
if (rc) {
pr_err("audio: failed to get audplay0 dsp module\n");
goto done;
}
audio->out_sample_rate = 44100;
audio->out_channel_mode = AUDPP_CMD_PCM_INTF_STEREO_V;
audio->aac_config.format = AUDIO_AAC_FORMAT_ADTS;
audio->aac_config.audio_object = AUDIO_AAC_OBJECT_LC;
audio->aac_config.ep_config = 0;
audio->aac_config.aac_section_data_resilience_flag =
AUDIO_AAC_SEC_DATA_RES_OFF;
audio->aac_config.aac_scalefactor_data_resilience_flag =
AUDIO_AAC_SCA_DATA_RES_OFF;
audio->aac_config.aac_spectral_data_resilience_flag =
AUDIO_AAC_SPEC_DATA_RES_OFF;
audio->aac_config.sbr_on_flag = AUDIO_AAC_SBR_ON_FLAG_ON;
audio->aac_config.sbr_ps_on_flag = AUDIO_AAC_SBR_PS_ON_FLAG_ON;
audio->aac_config.dual_mono_mode = AUDIO_AAC_DUAL_MONO_PL_SR;
audio->aac_config.channel_configuration = 2;
audio->dec_id = 0;
audio->out[0].data = audio->data + 0;
audio->out[0].addr = audio->phys + 0;
audio->out[0].size = BUFSZ;
audio->out[1].data = audio->data + BUFSZ;
audio->out[1].addr = audio->phys + BUFSZ;
audio->out[1].size = BUFSZ;
audio->volume = 0x2000; /* Q13 1.0 */
audio_flush(audio);
file->private_data = audio;
audio->opened = 1;
rc = 0;
done:
mutex_unlock(&audio->lock);
return rc;
}
static struct file_operations audio_aac_fops = {
.owner = THIS_MODULE,
.open = audio_open,
.release = audio_release,
.read = audio_read,
.write = audio_write,
.unlocked_ioctl = audio_ioctl,
};
struct miscdevice audio_aac_misc = {
.minor = MISC_DYNAMIC_MINOR,
.name = "msm_aac",
.fops = &audio_aac_fops,
};
static int __init audio_init(void)
{
mutex_init(&the_aac_audio.lock);
mutex_init(&the_aac_audio.write_lock);
mutex_init(&the_aac_audio.read_lock);
spin_lock_init(&the_aac_audio.dsp_lock);
init_waitqueue_head(&the_aac_audio.write_wait);
init_waitqueue_head(&the_aac_audio.read_wait);
the_aac_audio.read_data = NULL;
return misc_register(&audio_aac_misc);
}
device_initcall(audio_init);
| gpl-2.0 |
bju2000/cm_kernel_samsung_slte | net/core/net-procfs.c | 1962 | 9566 | #include <linux/netdevice.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <net/wext.h>
#define BUCKET_SPACE (32 - NETDEV_HASHBITS - 1)
#define get_bucket(x) ((x) >> BUCKET_SPACE)
#define get_offset(x) ((x) & ((1 << BUCKET_SPACE) - 1))
#define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o))
extern struct list_head ptype_all __read_mostly;
extern struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
static inline struct net_device *dev_from_same_bucket(struct seq_file *seq, loff_t *pos)
{
struct net *net = seq_file_net(seq);
struct net_device *dev;
struct hlist_head *h;
unsigned int count = 0, offset = get_offset(*pos);
h = &net->dev_name_head[get_bucket(*pos)];
hlist_for_each_entry_rcu(dev, h, name_hlist) {
if (++count == offset)
return dev;
}
return NULL;
}
static inline struct net_device *dev_from_bucket(struct seq_file *seq, loff_t *pos)
{
struct net_device *dev;
unsigned int bucket;
do {
dev = dev_from_same_bucket(seq, pos);
if (dev)
return dev;
bucket = get_bucket(*pos) + 1;
*pos = set_bucket_offset(bucket, 1);
} while (bucket < NETDEV_HASHENTRIES);
return NULL;
}
/*
* This is invoked by the /proc filesystem handler to display a device
* in detail.
*/
static void *dev_seq_start(struct seq_file *seq, loff_t *pos)
__acquires(RCU)
{
rcu_read_lock();
if (!*pos)
return SEQ_START_TOKEN;
if (get_bucket(*pos) >= NETDEV_HASHENTRIES)
return NULL;
return dev_from_bucket(seq, pos);
}
static void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
++*pos;
return dev_from_bucket(seq, pos);
}
static void dev_seq_stop(struct seq_file *seq, void *v)
__releases(RCU)
{
rcu_read_unlock();
}
static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
{
struct rtnl_link_stats64 temp;
const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
"%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
dev->name, stats->rx_bytes, stats->rx_packets,
stats->rx_errors,
stats->rx_dropped + stats->rx_missed_errors,
stats->rx_fifo_errors,
stats->rx_length_errors + stats->rx_over_errors +
stats->rx_crc_errors + stats->rx_frame_errors,
stats->rx_compressed, stats->multicast,
stats->tx_bytes, stats->tx_packets,
stats->tx_errors, stats->tx_dropped,
stats->tx_fifo_errors, stats->collisions,
stats->tx_carrier_errors +
stats->tx_aborted_errors +
stats->tx_window_errors +
stats->tx_heartbeat_errors,
stats->tx_compressed);
}
/*
* Called from the PROCfs module. This now uses the new arbitrary sized
* /proc/net interface to create /proc/net/dev
*/
static int dev_seq_show(struct seq_file *seq, void *v)
{
if (v == SEQ_START_TOKEN)
seq_puts(seq, "Inter-| Receive "
" | Transmit\n"
" face |bytes packets errs drop fifo frame "
"compressed multicast|bytes packets errs "
"drop fifo colls carrier compressed\n");
else
dev_seq_printf_stats(seq, v);
return 0;
}
static struct softnet_data *softnet_get_online(loff_t *pos)
{
struct softnet_data *sd = NULL;
while (*pos < nr_cpu_ids)
if (cpu_online(*pos)) {
sd = &per_cpu(softnet_data, *pos);
break;
} else
++*pos;
return sd;
}
static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
{
return softnet_get_online(pos);
}
static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
++*pos;
return softnet_get_online(pos);
}
static void softnet_seq_stop(struct seq_file *seq, void *v)
{
}
static int softnet_seq_show(struct seq_file *seq, void *v)
{
struct softnet_data *sd = v;
seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
sd->processed, sd->dropped, sd->time_squeeze, 0,
0, 0, 0, 0, /* was fastroute */
sd->cpu_collision, sd->received_rps);
return 0;
}
static const struct seq_operations dev_seq_ops = {
.start = dev_seq_start,
.next = dev_seq_next,
.stop = dev_seq_stop,
.show = dev_seq_show,
};
static int dev_seq_open(struct inode *inode, struct file *file)
{
return seq_open_net(inode, file, &dev_seq_ops,
sizeof(struct seq_net_private));
}
static const struct file_operations dev_seq_fops = {
.owner = THIS_MODULE,
.open = dev_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release_net,
};
static const struct seq_operations softnet_seq_ops = {
.start = softnet_seq_start,
.next = softnet_seq_next,
.stop = softnet_seq_stop,
.show = softnet_seq_show,
};
static int softnet_seq_open(struct inode *inode, struct file *file)
{
return seq_open(file, &softnet_seq_ops);
}
static const struct file_operations softnet_seq_fops = {
.owner = THIS_MODULE,
.open = softnet_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
static void *ptype_get_idx(loff_t pos)
{
struct packet_type *pt = NULL;
loff_t i = 0;
int t;
list_for_each_entry_rcu(pt, &ptype_all, list) {
if (i == pos)
return pt;
++i;
}
for (t = 0; t < PTYPE_HASH_SIZE; t++) {
list_for_each_entry_rcu(pt, &ptype_base[t], list) {
if (i == pos)
return pt;
++i;
}
}
return NULL;
}
static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
__acquires(RCU)
{
rcu_read_lock();
return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN;
}
static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct packet_type *pt;
struct list_head *nxt;
int hash;
++*pos;
if (v == SEQ_START_TOKEN)
return ptype_get_idx(0);
pt = v;
nxt = pt->list.next;
if (pt->type == htons(ETH_P_ALL)) {
if (nxt != &ptype_all)
goto found;
hash = 0;
nxt = ptype_base[0].next;
} else
hash = ntohs(pt->type) & PTYPE_HASH_MASK;
while (nxt == &ptype_base[hash]) {
if (++hash >= PTYPE_HASH_SIZE)
return NULL;
nxt = ptype_base[hash].next;
}
found:
return list_entry(nxt, struct packet_type, list);
}
static void ptype_seq_stop(struct seq_file *seq, void *v)
__releases(RCU)
{
rcu_read_unlock();
}
static int ptype_seq_show(struct seq_file *seq, void *v)
{
struct packet_type *pt = v;
if (v == SEQ_START_TOKEN)
seq_puts(seq, "Type Device Function\n");
else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) {
if (pt->type == htons(ETH_P_ALL))
seq_puts(seq, "ALL ");
else
seq_printf(seq, "%04x", ntohs(pt->type));
seq_printf(seq, " %-8s %pf\n",
pt->dev ? pt->dev->name : "", pt->func);
}
return 0;
}
static const struct seq_operations ptype_seq_ops = {
.start = ptype_seq_start,
.next = ptype_seq_next,
.stop = ptype_seq_stop,
.show = ptype_seq_show,
};
static int ptype_seq_open(struct inode *inode, struct file *file)
{
return seq_open_net(inode, file, &ptype_seq_ops,
sizeof(struct seq_net_private));
}
static const struct file_operations ptype_seq_fops = {
.owner = THIS_MODULE,
.open = ptype_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release_net,
};
static int __net_init dev_proc_net_init(struct net *net)
{
int rc = -ENOMEM;
if (!proc_create("dev", S_IRUGO, net->proc_net, &dev_seq_fops))
goto out;
if (!proc_create("softnet_stat", S_IRUGO, net->proc_net,
&softnet_seq_fops))
goto out_dev;
if (!proc_create("ptype", S_IRUGO, net->proc_net, &ptype_seq_fops))
goto out_softnet;
if (wext_proc_init(net))
goto out_ptype;
rc = 0;
out:
return rc;
out_ptype:
remove_proc_entry("ptype", net->proc_net);
out_softnet:
remove_proc_entry("softnet_stat", net->proc_net);
out_dev:
remove_proc_entry("dev", net->proc_net);
goto out;
}
static void __net_exit dev_proc_net_exit(struct net *net)
{
wext_proc_exit(net);
remove_proc_entry("ptype", net->proc_net);
remove_proc_entry("softnet_stat", net->proc_net);
remove_proc_entry("dev", net->proc_net);
}
static struct pernet_operations __net_initdata dev_proc_ops = {
.init = dev_proc_net_init,
.exit = dev_proc_net_exit,
};
static int dev_mc_seq_show(struct seq_file *seq, void *v)
{
struct netdev_hw_addr *ha;
struct net_device *dev = v;
if (v == SEQ_START_TOKEN)
return 0;
netif_addr_lock_bh(dev);
netdev_for_each_mc_addr(ha, dev) {
int i;
seq_printf(seq, "%-4d %-15s %-5d %-5d ", dev->ifindex,
dev->name, ha->refcount, ha->global_use);
for (i = 0; i < dev->addr_len; i++)
seq_printf(seq, "%02x", ha->addr[i]);
seq_putc(seq, '\n');
}
netif_addr_unlock_bh(dev);
return 0;
}
static const struct seq_operations dev_mc_seq_ops = {
.start = dev_seq_start,
.next = dev_seq_next,
.stop = dev_seq_stop,
.show = dev_mc_seq_show,
};
static int dev_mc_seq_open(struct inode *inode, struct file *file)
{
return seq_open_net(inode, file, &dev_mc_seq_ops,
sizeof(struct seq_net_private));
}
static const struct file_operations dev_mc_seq_fops = {
.owner = THIS_MODULE,
.open = dev_mc_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release_net,
};
static int __net_init dev_mc_net_init(struct net *net)
{
if (!proc_create("dev_mcast", 0, net->proc_net, &dev_mc_seq_fops))
return -ENOMEM;
return 0;
}
static void __net_exit dev_mc_net_exit(struct net *net)
{
remove_proc_entry("dev_mcast", net->proc_net);
}
static struct pernet_operations __net_initdata dev_mc_net_ops = {
.init = dev_mc_net_init,
.exit = dev_mc_net_exit,
};
int __init dev_proc_init(void)
{
int ret = register_pernet_subsys(&dev_proc_ops);
if (!ret)
return register_pernet_subsys(&dev_mc_net_ops);
return ret;
}
| gpl-2.0 |
CODEG3EK/Odin | drivers/gpu/drm/i810/i810_dma.c | 2218 | 33619 | /* i810_dma.c -- DMA support for the i810 -*- linux-c -*-
* Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors: Rickard E. (Rik) Faith <faith@valinux.com>
* Jeff Hartmann <jhartmann@valinux.com>
* Keith Whitwell <keith@tungstengraphics.com>
*
*/
#include <drm/drmP.h>
#include <drm/i810_drm.h>
#include "i810_drv.h"
#include <linux/interrupt.h> /* For task queue support */
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/pagemap.h>
#define I810_BUF_FREE 2
#define I810_BUF_CLIENT 1
#define I810_BUF_HARDWARE 0
#define I810_BUF_UNMAPPED 0
#define I810_BUF_MAPPED 1
static struct drm_buf *i810_freelist_get(struct drm_device * dev)
{
struct drm_device_dma *dma = dev->dma;
int i;
int used;
/* Linear search might not be the best solution */
for (i = 0; i < dma->buf_count; i++) {
struct drm_buf *buf = dma->buflist[i];
drm_i810_buf_priv_t *buf_priv = buf->dev_private;
/* In use is already a pointer */
used = cmpxchg(buf_priv->in_use, I810_BUF_FREE,
I810_BUF_CLIENT);
if (used == I810_BUF_FREE)
return buf;
}
return NULL;
}
/* This should only be called if the buffer is not sent to the hardware
* yet, the hardware updates in use for us once its on the ring buffer.
*/
static int i810_freelist_put(struct drm_device *dev, struct drm_buf *buf)
{
drm_i810_buf_priv_t *buf_priv = buf->dev_private;
int used;
/* In use is already a pointer */
used = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT, I810_BUF_FREE);
if (used != I810_BUF_CLIENT) {
DRM_ERROR("Freeing buffer thats not in use : %d\n", buf->idx);
return -EINVAL;
}
return 0;
}
static int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
{
struct drm_file *priv = filp->private_data;
struct drm_device *dev;
drm_i810_private_t *dev_priv;
struct drm_buf *buf;
drm_i810_buf_priv_t *buf_priv;
dev = priv->minor->dev;
dev_priv = dev->dev_private;
buf = dev_priv->mmap_buffer;
buf_priv = buf->dev_private;
vma->vm_flags |= (VM_IO | VM_DONTCOPY);
buf_priv->currently_mapped = I810_BUF_MAPPED;
if (io_remap_pfn_range(vma, vma->vm_start,
vma->vm_pgoff,
vma->vm_end - vma->vm_start, vma->vm_page_prot))
return -EAGAIN;
return 0;
}
static const struct file_operations i810_buffer_fops = {
.open = drm_open,
.release = drm_release,
.unlocked_ioctl = drm_ioctl,
.mmap = i810_mmap_buffers,
.fasync = drm_fasync,
#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
#endif
.llseek = noop_llseek,
};
static int i810_map_buffer(struct drm_buf *buf, struct drm_file *file_priv)
{
struct drm_device *dev = file_priv->minor->dev;
drm_i810_buf_priv_t *buf_priv = buf->dev_private;
drm_i810_private_t *dev_priv = dev->dev_private;
const struct file_operations *old_fops;
int retcode = 0;
if (buf_priv->currently_mapped == I810_BUF_MAPPED)
return -EINVAL;
/* This is all entirely broken */
old_fops = file_priv->filp->f_op;
file_priv->filp->f_op = &i810_buffer_fops;
dev_priv->mmap_buffer = buf;
buf_priv->virtual = (void *)vm_mmap(file_priv->filp, 0, buf->total,
PROT_READ | PROT_WRITE,
MAP_SHARED, buf->bus_address);
dev_priv->mmap_buffer = NULL;
file_priv->filp->f_op = old_fops;
if (IS_ERR(buf_priv->virtual)) {
/* Real error */
DRM_ERROR("mmap error\n");
retcode = PTR_ERR(buf_priv->virtual);
buf_priv->virtual = NULL;
}
return retcode;
}
static int i810_unmap_buffer(struct drm_buf *buf)
{
drm_i810_buf_priv_t *buf_priv = buf->dev_private;
int retcode = 0;
if (buf_priv->currently_mapped != I810_BUF_MAPPED)
return -EINVAL;
retcode = vm_munmap((unsigned long)buf_priv->virtual,
(size_t) buf->total);
buf_priv->currently_mapped = I810_BUF_UNMAPPED;
buf_priv->virtual = NULL;
return retcode;
}
static int i810_dma_get_buffer(struct drm_device *dev, drm_i810_dma_t *d,
struct drm_file *file_priv)
{
struct drm_buf *buf;
drm_i810_buf_priv_t *buf_priv;
int retcode = 0;
buf = i810_freelist_get(dev);
if (!buf) {
retcode = -ENOMEM;
DRM_DEBUG("retcode=%d\n", retcode);
return retcode;
}
retcode = i810_map_buffer(buf, file_priv);
if (retcode) {
i810_freelist_put(dev, buf);
DRM_ERROR("mapbuf failed, retcode %d\n", retcode);
return retcode;
}
buf->file_priv = file_priv;
buf_priv = buf->dev_private;
d->granted = 1;
d->request_idx = buf->idx;
d->request_size = buf->total;
d->virtual = buf_priv->virtual;
return retcode;
}
static int i810_dma_cleanup(struct drm_device *dev)
{
struct drm_device_dma *dma = dev->dma;
/* Make sure interrupts are disabled here because the uninstall ioctl
* may not have been called from userspace and after dev_private
* is freed, it's too late.
*/
if (drm_core_check_feature(dev, DRIVER_HAVE_IRQ) && dev->irq_enabled)
drm_irq_uninstall(dev);
if (dev->dev_private) {
int i;
drm_i810_private_t *dev_priv =
(drm_i810_private_t *) dev->dev_private;
if (dev_priv->ring.virtual_start)
drm_core_ioremapfree(&dev_priv->ring.map, dev);
if (dev_priv->hw_status_page) {
pci_free_consistent(dev->pdev, PAGE_SIZE,
dev_priv->hw_status_page,
dev_priv->dma_status_page);
}
kfree(dev->dev_private);
dev->dev_private = NULL;
for (i = 0; i < dma->buf_count; i++) {
struct drm_buf *buf = dma->buflist[i];
drm_i810_buf_priv_t *buf_priv = buf->dev_private;
if (buf_priv->kernel_virtual && buf->total)
drm_core_ioremapfree(&buf_priv->map, dev);
}
}
return 0;
}
static int i810_wait_ring(struct drm_device *dev, int n)
{
drm_i810_private_t *dev_priv = dev->dev_private;
drm_i810_ring_buffer_t *ring = &(dev_priv->ring);
int iters = 0;
unsigned long end;
unsigned int last_head = I810_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
end = jiffies + (HZ * 3);
while (ring->space < n) {
ring->head = I810_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
ring->space = ring->head - (ring->tail + 8);
if (ring->space < 0)
ring->space += ring->Size;
if (ring->head != last_head) {
end = jiffies + (HZ * 3);
last_head = ring->head;
}
iters++;
if (time_before(end, jiffies)) {
DRM_ERROR("space: %d wanted %d\n", ring->space, n);
DRM_ERROR("lockup\n");
goto out_wait_ring;
}
udelay(1);
}
out_wait_ring:
return iters;
}
static void i810_kernel_lost_context(struct drm_device *dev)
{
drm_i810_private_t *dev_priv = dev->dev_private;
drm_i810_ring_buffer_t *ring = &(dev_priv->ring);
ring->head = I810_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
ring->tail = I810_READ(LP_RING + RING_TAIL);
ring->space = ring->head - (ring->tail + 8);
if (ring->space < 0)
ring->space += ring->Size;
}
static int i810_freelist_init(struct drm_device *dev, drm_i810_private_t *dev_priv)
{
struct drm_device_dma *dma = dev->dma;
int my_idx = 24;
u32 *hw_status = (u32 *) (dev_priv->hw_status_page + my_idx);
int i;
if (dma->buf_count > 1019) {
/* Not enough space in the status page for the freelist */
return -EINVAL;
}
for (i = 0; i < dma->buf_count; i++) {
struct drm_buf *buf = dma->buflist[i];
drm_i810_buf_priv_t *buf_priv = buf->dev_private;
buf_priv->in_use = hw_status++;
buf_priv->my_use_idx = my_idx;
my_idx += 4;
*buf_priv->in_use = I810_BUF_FREE;
buf_priv->map.offset = buf->bus_address;
buf_priv->map.size = buf->total;
buf_priv->map.type = _DRM_AGP;
buf_priv->map.flags = 0;
buf_priv->map.mtrr = 0;
drm_core_ioremap(&buf_priv->map, dev);
buf_priv->kernel_virtual = buf_priv->map.handle;
}
return 0;
}
static int i810_dma_initialize(struct drm_device *dev,
drm_i810_private_t *dev_priv,
drm_i810_init_t *init)
{
struct drm_map_list *r_list;
memset(dev_priv, 0, sizeof(drm_i810_private_t));
list_for_each_entry(r_list, &dev->maplist, head) {
if (r_list->map &&
r_list->map->type == _DRM_SHM &&
r_list->map->flags & _DRM_CONTAINS_LOCK) {
dev_priv->sarea_map = r_list->map;
break;
}
}
if (!dev_priv->sarea_map) {
dev->dev_private = (void *)dev_priv;
i810_dma_cleanup(dev);
DRM_ERROR("can not find sarea!\n");
return -EINVAL;
}
dev_priv->mmio_map = drm_core_findmap(dev, init->mmio_offset);
if (!dev_priv->mmio_map) {
dev->dev_private = (void *)dev_priv;
i810_dma_cleanup(dev);
DRM_ERROR("can not find mmio map!\n");
return -EINVAL;
}
dev->agp_buffer_token = init->buffers_offset;
dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
if (!dev->agp_buffer_map) {
dev->dev_private = (void *)dev_priv;
i810_dma_cleanup(dev);
DRM_ERROR("can not find dma buffer map!\n");
return -EINVAL;
}
dev_priv->sarea_priv = (drm_i810_sarea_t *)
((u8 *) dev_priv->sarea_map->handle + init->sarea_priv_offset);
dev_priv->ring.Start = init->ring_start;
dev_priv->ring.End = init->ring_end;
dev_priv->ring.Size = init->ring_size;
dev_priv->ring.map.offset = dev->agp->base + init->ring_start;
dev_priv->ring.map.size = init->ring_size;
dev_priv->ring.map.type = _DRM_AGP;
dev_priv->ring.map.flags = 0;
dev_priv->ring.map.mtrr = 0;
drm_core_ioremap(&dev_priv->ring.map, dev);
if (dev_priv->ring.map.handle == NULL) {
dev->dev_private = (void *)dev_priv;
i810_dma_cleanup(dev);
DRM_ERROR("can not ioremap virtual address for"
" ring buffer\n");
return -ENOMEM;
}
dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
dev_priv->w = init->w;
dev_priv->h = init->h;
dev_priv->pitch = init->pitch;
dev_priv->back_offset = init->back_offset;
dev_priv->depth_offset = init->depth_offset;
dev_priv->front_offset = init->front_offset;
dev_priv->overlay_offset = init->overlay_offset;
dev_priv->overlay_physical = init->overlay_physical;
dev_priv->front_di1 = init->front_offset | init->pitch_bits;
dev_priv->back_di1 = init->back_offset | init->pitch_bits;
dev_priv->zi1 = init->depth_offset | init->pitch_bits;
/* Program Hardware Status Page */
dev_priv->hw_status_page =
pci_alloc_consistent(dev->pdev, PAGE_SIZE,
&dev_priv->dma_status_page);
if (!dev_priv->hw_status_page) {
dev->dev_private = (void *)dev_priv;
i810_dma_cleanup(dev);
DRM_ERROR("Can not allocate hardware status page\n");
return -ENOMEM;
}
memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page);
I810_WRITE(0x02080, dev_priv->dma_status_page);
DRM_DEBUG("Enabled hardware status page\n");
/* Now we need to init our freelist */
if (i810_freelist_init(dev, dev_priv) != 0) {
dev->dev_private = (void *)dev_priv;
i810_dma_cleanup(dev);
DRM_ERROR("Not enough space in the status page for"
" the freelist\n");
return -ENOMEM;
}
dev->dev_private = (void *)dev_priv;
return 0;
}
static int i810_dma_init(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_i810_private_t *dev_priv;
drm_i810_init_t *init = data;
int retcode = 0;
switch (init->func) {
case I810_INIT_DMA_1_4:
DRM_INFO("Using v1.4 init.\n");
dev_priv = kmalloc(sizeof(drm_i810_private_t), GFP_KERNEL);
if (dev_priv == NULL)
return -ENOMEM;
retcode = i810_dma_initialize(dev, dev_priv, init);
break;
case I810_CLEANUP_DMA:
DRM_INFO("DMA Cleanup\n");
retcode = i810_dma_cleanup(dev);
break;
default:
return -EINVAL;
}
return retcode;
}
/* Most efficient way to verify state for the i810 is as it is
* emitted. Non-conformant state is silently dropped.
*
* Use 'volatile' & local var tmp to force the emitted values to be
* identical to the verified ones.
*/
static void i810EmitContextVerified(struct drm_device *dev,
volatile unsigned int *code)
{
drm_i810_private_t *dev_priv = dev->dev_private;
int i, j = 0;
unsigned int tmp;
RING_LOCALS;
BEGIN_LP_RING(I810_CTX_SETUP_SIZE);
OUT_RING(GFX_OP_COLOR_FACTOR);
OUT_RING(code[I810_CTXREG_CF1]);
OUT_RING(GFX_OP_STIPPLE);
OUT_RING(code[I810_CTXREG_ST1]);
for (i = 4; i < I810_CTX_SETUP_SIZE; i++) {
tmp = code[i];
if ((tmp & (7 << 29)) == (3 << 29) &&
(tmp & (0x1f << 24)) < (0x1d << 24)) {
OUT_RING(tmp);
j++;
} else
printk("constext state dropped!!!\n");
}
if (j & 1)
OUT_RING(0);
ADVANCE_LP_RING();
}
static void i810EmitTexVerified(struct drm_device *dev, volatile unsigned int *code)
{
drm_i810_private_t *dev_priv = dev->dev_private;
int i, j = 0;
unsigned int tmp;
RING_LOCALS;
BEGIN_LP_RING(I810_TEX_SETUP_SIZE);
OUT_RING(GFX_OP_MAP_INFO);
OUT_RING(code[I810_TEXREG_MI1]);
OUT_RING(code[I810_TEXREG_MI2]);
OUT_RING(code[I810_TEXREG_MI3]);
for (i = 4; i < I810_TEX_SETUP_SIZE; i++) {
tmp = code[i];
if ((tmp & (7 << 29)) == (3 << 29) &&
(tmp & (0x1f << 24)) < (0x1d << 24)) {
OUT_RING(tmp);
j++;
} else
printk("texture state dropped!!!\n");
}
if (j & 1)
OUT_RING(0);
ADVANCE_LP_RING();
}
/* Need to do some additional checking when setting the dest buffer.
*/
static void i810EmitDestVerified(struct drm_device *dev,
volatile unsigned int *code)
{
drm_i810_private_t *dev_priv = dev->dev_private;
unsigned int tmp;
RING_LOCALS;
BEGIN_LP_RING(I810_DEST_SETUP_SIZE + 2);
tmp = code[I810_DESTREG_DI1];
if (tmp == dev_priv->front_di1 || tmp == dev_priv->back_di1) {
OUT_RING(CMD_OP_DESTBUFFER_INFO);
OUT_RING(tmp);
} else
DRM_DEBUG("bad di1 %x (allow %x or %x)\n",
tmp, dev_priv->front_di1, dev_priv->back_di1);
/* invarient:
*/
OUT_RING(CMD_OP_Z_BUFFER_INFO);
OUT_RING(dev_priv->zi1);
OUT_RING(GFX_OP_DESTBUFFER_VARS);
OUT_RING(code[I810_DESTREG_DV1]);
OUT_RING(GFX_OP_DRAWRECT_INFO);
OUT_RING(code[I810_DESTREG_DR1]);
OUT_RING(code[I810_DESTREG_DR2]);
OUT_RING(code[I810_DESTREG_DR3]);
OUT_RING(code[I810_DESTREG_DR4]);
OUT_RING(0);
ADVANCE_LP_RING();
}
static void i810EmitState(struct drm_device *dev)
{
drm_i810_private_t *dev_priv = dev->dev_private;
drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
unsigned int dirty = sarea_priv->dirty;
DRM_DEBUG("%x\n", dirty);
if (dirty & I810_UPLOAD_BUFFERS) {
i810EmitDestVerified(dev, sarea_priv->BufferState);
sarea_priv->dirty &= ~I810_UPLOAD_BUFFERS;
}
if (dirty & I810_UPLOAD_CTX) {
i810EmitContextVerified(dev, sarea_priv->ContextState);
sarea_priv->dirty &= ~I810_UPLOAD_CTX;
}
if (dirty & I810_UPLOAD_TEX0) {
i810EmitTexVerified(dev, sarea_priv->TexState[0]);
sarea_priv->dirty &= ~I810_UPLOAD_TEX0;
}
if (dirty & I810_UPLOAD_TEX1) {
i810EmitTexVerified(dev, sarea_priv->TexState[1]);
sarea_priv->dirty &= ~I810_UPLOAD_TEX1;
}
}
/* need to verify
*/
static void i810_dma_dispatch_clear(struct drm_device *dev, int flags,
unsigned int clear_color,
unsigned int clear_zval)
{
drm_i810_private_t *dev_priv = dev->dev_private;
drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
int nbox = sarea_priv->nbox;
struct drm_clip_rect *pbox = sarea_priv->boxes;
int pitch = dev_priv->pitch;
int cpp = 2;
int i;
RING_LOCALS;
if (dev_priv->current_page == 1) {
unsigned int tmp = flags;
flags &= ~(I810_FRONT | I810_BACK);
if (tmp & I810_FRONT)
flags |= I810_BACK;
if (tmp & I810_BACK)
flags |= I810_FRONT;
}
i810_kernel_lost_context(dev);
if (nbox > I810_NR_SAREA_CLIPRECTS)
nbox = I810_NR_SAREA_CLIPRECTS;
for (i = 0; i < nbox; i++, pbox++) {
unsigned int x = pbox->x1;
unsigned int y = pbox->y1;
unsigned int width = (pbox->x2 - x) * cpp;
unsigned int height = pbox->y2 - y;
unsigned int start = y * pitch + x * cpp;
if (pbox->x1 > pbox->x2 ||
pbox->y1 > pbox->y2 ||
pbox->x2 > dev_priv->w || pbox->y2 > dev_priv->h)
continue;
if (flags & I810_FRONT) {
BEGIN_LP_RING(6);
OUT_RING(BR00_BITBLT_CLIENT | BR00_OP_COLOR_BLT | 0x3);
OUT_RING(BR13_SOLID_PATTERN | (0xF0 << 16) | pitch);
OUT_RING((height << 16) | width);
OUT_RING(start);
OUT_RING(clear_color);
OUT_RING(0);
ADVANCE_LP_RING();
}
if (flags & I810_BACK) {
BEGIN_LP_RING(6);
OUT_RING(BR00_BITBLT_CLIENT | BR00_OP_COLOR_BLT | 0x3);
OUT_RING(BR13_SOLID_PATTERN | (0xF0 << 16) | pitch);
OUT_RING((height << 16) | width);
OUT_RING(dev_priv->back_offset + start);
OUT_RING(clear_color);
OUT_RING(0);
ADVANCE_LP_RING();
}
if (flags & I810_DEPTH) {
BEGIN_LP_RING(6);
OUT_RING(BR00_BITBLT_CLIENT | BR00_OP_COLOR_BLT | 0x3);
OUT_RING(BR13_SOLID_PATTERN | (0xF0 << 16) | pitch);
OUT_RING((height << 16) | width);
OUT_RING(dev_priv->depth_offset + start);
OUT_RING(clear_zval);
OUT_RING(0);
ADVANCE_LP_RING();
}
}
}
static void i810_dma_dispatch_swap(struct drm_device *dev)
{
drm_i810_private_t *dev_priv = dev->dev_private;
drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
int nbox = sarea_priv->nbox;
struct drm_clip_rect *pbox = sarea_priv->boxes;
int pitch = dev_priv->pitch;
int cpp = 2;
int i;
RING_LOCALS;
DRM_DEBUG("swapbuffers\n");
i810_kernel_lost_context(dev);
if (nbox > I810_NR_SAREA_CLIPRECTS)
nbox = I810_NR_SAREA_CLIPRECTS;
for (i = 0; i < nbox; i++, pbox++) {
unsigned int w = pbox->x2 - pbox->x1;
unsigned int h = pbox->y2 - pbox->y1;
unsigned int dst = pbox->x1 * cpp + pbox->y1 * pitch;
unsigned int start = dst;
if (pbox->x1 > pbox->x2 ||
pbox->y1 > pbox->y2 ||
pbox->x2 > dev_priv->w || pbox->y2 > dev_priv->h)
continue;
BEGIN_LP_RING(6);
OUT_RING(BR00_BITBLT_CLIENT | BR00_OP_SRC_COPY_BLT | 0x4);
OUT_RING(pitch | (0xCC << 16));
OUT_RING((h << 16) | (w * cpp));
if (dev_priv->current_page == 0)
OUT_RING(dev_priv->front_offset + start);
else
OUT_RING(dev_priv->back_offset + start);
OUT_RING(pitch);
if (dev_priv->current_page == 0)
OUT_RING(dev_priv->back_offset + start);
else
OUT_RING(dev_priv->front_offset + start);
ADVANCE_LP_RING();
}
}
static void i810_dma_dispatch_vertex(struct drm_device *dev,
struct drm_buf *buf, int discard, int used)
{
drm_i810_private_t *dev_priv = dev->dev_private;
drm_i810_buf_priv_t *buf_priv = buf->dev_private;
drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
struct drm_clip_rect *box = sarea_priv->boxes;
int nbox = sarea_priv->nbox;
unsigned long address = (unsigned long)buf->bus_address;
unsigned long start = address - dev->agp->base;
int i = 0;
RING_LOCALS;
i810_kernel_lost_context(dev);
if (nbox > I810_NR_SAREA_CLIPRECTS)
nbox = I810_NR_SAREA_CLIPRECTS;
if (used > 4 * 1024)
used = 0;
if (sarea_priv->dirty)
i810EmitState(dev);
if (buf_priv->currently_mapped == I810_BUF_MAPPED) {
unsigned int prim = (sarea_priv->vertex_prim & PR_MASK);
*(u32 *) buf_priv->kernel_virtual =
((GFX_OP_PRIMITIVE | prim | ((used / 4) - 2)));
if (used & 4) {
*(u32 *) ((char *) buf_priv->kernel_virtual + used) = 0;
used += 4;
}
i810_unmap_buffer(buf);
}
if (used) {
do {
if (i < nbox) {
BEGIN_LP_RING(4);
OUT_RING(GFX_OP_SCISSOR | SC_UPDATE_SCISSOR |
SC_ENABLE);
OUT_RING(GFX_OP_SCISSOR_INFO);
OUT_RING(box[i].x1 | (box[i].y1 << 16));
OUT_RING((box[i].x2 -
1) | ((box[i].y2 - 1) << 16));
ADVANCE_LP_RING();
}
BEGIN_LP_RING(4);
OUT_RING(CMD_OP_BATCH_BUFFER);
OUT_RING(start | BB1_PROTECTED);
OUT_RING(start + used - 4);
OUT_RING(0);
ADVANCE_LP_RING();
} while (++i < nbox);
}
if (discard) {
dev_priv->counter++;
(void)cmpxchg(buf_priv->in_use, I810_BUF_CLIENT,
I810_BUF_HARDWARE);
BEGIN_LP_RING(8);
OUT_RING(CMD_STORE_DWORD_IDX);
OUT_RING(20);
OUT_RING(dev_priv->counter);
OUT_RING(CMD_STORE_DWORD_IDX);
OUT_RING(buf_priv->my_use_idx);
OUT_RING(I810_BUF_FREE);
OUT_RING(CMD_REPORT_HEAD);
OUT_RING(0);
ADVANCE_LP_RING();
}
}
static void i810_dma_dispatch_flip(struct drm_device *dev)
{
drm_i810_private_t *dev_priv = dev->dev_private;
int pitch = dev_priv->pitch;
RING_LOCALS;
DRM_DEBUG("page=%d pfCurrentPage=%d\n",
dev_priv->current_page,
dev_priv->sarea_priv->pf_current_page);
i810_kernel_lost_context(dev);
BEGIN_LP_RING(2);
OUT_RING(INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE);
OUT_RING(0);
ADVANCE_LP_RING();
BEGIN_LP_RING(I810_DEST_SETUP_SIZE + 2);
/* On i815 at least ASYNC is buggy */
/* pitch<<5 is from 11.2.8 p158,
its the pitch / 8 then left shifted 8,
so (pitch >> 3) << 8 */
OUT_RING(CMD_OP_FRONTBUFFER_INFO | (pitch << 5) /*| ASYNC_FLIP */ );
if (dev_priv->current_page == 0) {
OUT_RING(dev_priv->back_offset);
dev_priv->current_page = 1;
} else {
OUT_RING(dev_priv->front_offset);
dev_priv->current_page = 0;
}
OUT_RING(0);
ADVANCE_LP_RING();
BEGIN_LP_RING(2);
OUT_RING(CMD_OP_WAIT_FOR_EVENT | WAIT_FOR_PLANE_A_FLIP);
OUT_RING(0);
ADVANCE_LP_RING();
/* Increment the frame counter. The client-side 3D driver must
* throttle the framerate by waiting for this value before
* performing the swapbuffer ioctl.
*/
dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
}
static void i810_dma_quiescent(struct drm_device *dev)
{
drm_i810_private_t *dev_priv = dev->dev_private;
RING_LOCALS;
i810_kernel_lost_context(dev);
BEGIN_LP_RING(4);
OUT_RING(INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE);
OUT_RING(CMD_REPORT_HEAD);
OUT_RING(0);
OUT_RING(0);
ADVANCE_LP_RING();
i810_wait_ring(dev, dev_priv->ring.Size - 8);
}
static int i810_flush_queue(struct drm_device *dev)
{
drm_i810_private_t *dev_priv = dev->dev_private;
struct drm_device_dma *dma = dev->dma;
int i, ret = 0;
RING_LOCALS;
i810_kernel_lost_context(dev);
BEGIN_LP_RING(2);
OUT_RING(CMD_REPORT_HEAD);
OUT_RING(0);
ADVANCE_LP_RING();
i810_wait_ring(dev, dev_priv->ring.Size - 8);
for (i = 0; i < dma->buf_count; i++) {
struct drm_buf *buf = dma->buflist[i];
drm_i810_buf_priv_t *buf_priv = buf->dev_private;
int used = cmpxchg(buf_priv->in_use, I810_BUF_HARDWARE,
I810_BUF_FREE);
if (used == I810_BUF_HARDWARE)
DRM_DEBUG("reclaimed from HARDWARE\n");
if (used == I810_BUF_CLIENT)
DRM_DEBUG("still on client\n");
}
return ret;
}
/* Must be called with the lock held */
void i810_driver_reclaim_buffers(struct drm_device *dev,
struct drm_file *file_priv)
{
struct drm_device_dma *dma = dev->dma;
int i;
if (!dma)
return;
if (!dev->dev_private)
return;
if (!dma->buflist)
return;
i810_flush_queue(dev);
for (i = 0; i < dma->buf_count; i++) {
struct drm_buf *buf = dma->buflist[i];
drm_i810_buf_priv_t *buf_priv = buf->dev_private;
if (buf->file_priv == file_priv && buf_priv) {
int used = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT,
I810_BUF_FREE);
if (used == I810_BUF_CLIENT)
DRM_DEBUG("reclaimed from client\n");
if (buf_priv->currently_mapped == I810_BUF_MAPPED)
buf_priv->currently_mapped = I810_BUF_UNMAPPED;
}
}
}
static int i810_flush_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
LOCK_TEST_WITH_RETURN(dev, file_priv);
i810_flush_queue(dev);
return 0;
}
static int i810_dma_vertex(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_device_dma *dma = dev->dma;
drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
u32 *hw_status = dev_priv->hw_status_page;
drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
dev_priv->sarea_priv;
drm_i810_vertex_t *vertex = data;
LOCK_TEST_WITH_RETURN(dev, file_priv);
DRM_DEBUG("idx %d used %d discard %d\n",
vertex->idx, vertex->used, vertex->discard);
if (vertex->idx < 0 || vertex->idx > dma->buf_count)
return -EINVAL;
i810_dma_dispatch_vertex(dev,
dma->buflist[vertex->idx],
vertex->discard, vertex->used);
atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
atomic_inc(&dev->counts[_DRM_STAT_DMA]);
sarea_priv->last_enqueue = dev_priv->counter - 1;
sarea_priv->last_dispatch = (int)hw_status[5];
return 0;
}
static int i810_clear_bufs(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_i810_clear_t *clear = data;
LOCK_TEST_WITH_RETURN(dev, file_priv);
/* GH: Someone's doing nasty things... */
if (!dev->dev_private)
return -EINVAL;
i810_dma_dispatch_clear(dev, clear->flags,
clear->clear_color, clear->clear_depth);
return 0;
}
static int i810_swap_bufs(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
DRM_DEBUG("\n");
LOCK_TEST_WITH_RETURN(dev, file_priv);
i810_dma_dispatch_swap(dev);
return 0;
}
static int i810_getage(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
u32 *hw_status = dev_priv->hw_status_page;
drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
dev_priv->sarea_priv;
sarea_priv->last_dispatch = (int)hw_status[5];
return 0;
}
static int i810_getbuf(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
int retcode = 0;
drm_i810_dma_t *d = data;
drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
u32 *hw_status = dev_priv->hw_status_page;
drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
dev_priv->sarea_priv;
LOCK_TEST_WITH_RETURN(dev, file_priv);
d->granted = 0;
retcode = i810_dma_get_buffer(dev, d, file_priv);
DRM_DEBUG("i810_dma: %d returning %d, granted = %d\n",
task_pid_nr(current), retcode, d->granted);
sarea_priv->last_dispatch = (int)hw_status[5];
return retcode;
}
static int i810_copybuf(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
/* Never copy - 2.4.x doesn't need it */
return 0;
}
static int i810_docopy(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
/* Never copy - 2.4.x doesn't need it */
return 0;
}
static void i810_dma_dispatch_mc(struct drm_device *dev, struct drm_buf *buf, int used,
unsigned int last_render)
{
drm_i810_private_t *dev_priv = dev->dev_private;
drm_i810_buf_priv_t *buf_priv = buf->dev_private;
drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
unsigned long address = (unsigned long)buf->bus_address;
unsigned long start = address - dev->agp->base;
int u;
RING_LOCALS;
i810_kernel_lost_context(dev);
u = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT, I810_BUF_HARDWARE);
if (u != I810_BUF_CLIENT)
DRM_DEBUG("MC found buffer that isn't mine!\n");
if (used > 4 * 1024)
used = 0;
sarea_priv->dirty = 0x7f;
DRM_DEBUG("addr 0x%lx, used 0x%x\n", address, used);
dev_priv->counter++;
DRM_DEBUG("dispatch counter : %ld\n", dev_priv->counter);
DRM_DEBUG("start : %lx\n", start);
DRM_DEBUG("used : %d\n", used);
DRM_DEBUG("start + used - 4 : %ld\n", start + used - 4);
if (buf_priv->currently_mapped == I810_BUF_MAPPED) {
if (used & 4) {
*(u32 *) ((char *) buf_priv->virtual + used) = 0;
used += 4;
}
i810_unmap_buffer(buf);
}
BEGIN_LP_RING(4);
OUT_RING(CMD_OP_BATCH_BUFFER);
OUT_RING(start | BB1_PROTECTED);
OUT_RING(start + used - 4);
OUT_RING(0);
ADVANCE_LP_RING();
BEGIN_LP_RING(8);
OUT_RING(CMD_STORE_DWORD_IDX);
OUT_RING(buf_priv->my_use_idx);
OUT_RING(I810_BUF_FREE);
OUT_RING(0);
OUT_RING(CMD_STORE_DWORD_IDX);
OUT_RING(16);
OUT_RING(last_render);
OUT_RING(0);
ADVANCE_LP_RING();
}
static int i810_dma_mc(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_device_dma *dma = dev->dma;
drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
u32 *hw_status = dev_priv->hw_status_page;
drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
dev_priv->sarea_priv;
drm_i810_mc_t *mc = data;
LOCK_TEST_WITH_RETURN(dev, file_priv);
if (mc->idx >= dma->buf_count || mc->idx < 0)
return -EINVAL;
i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
mc->last_render);
atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
atomic_inc(&dev->counts[_DRM_STAT_DMA]);
sarea_priv->last_enqueue = dev_priv->counter - 1;
sarea_priv->last_dispatch = (int)hw_status[5];
return 0;
}
static int i810_rstatus(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
return (int)(((u32 *) (dev_priv->hw_status_page))[4]);
}
static int i810_ov0_info(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
drm_i810_overlay_t *ov = data;
ov->offset = dev_priv->overlay_offset;
ov->physical = dev_priv->overlay_physical;
return 0;
}
static int i810_fstatus(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
LOCK_TEST_WITH_RETURN(dev, file_priv);
return I810_READ(0x30008);
}
static int i810_ov0_flip(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
LOCK_TEST_WITH_RETURN(dev, file_priv);
/* Tell the overlay to update */
I810_WRITE(0x30000, dev_priv->overlay_physical | 0x80000000);
return 0;
}
/* Not sure why this isn't set all the time:
*/
static void i810_do_init_pageflip(struct drm_device *dev)
{
drm_i810_private_t *dev_priv = dev->dev_private;
DRM_DEBUG("\n");
dev_priv->page_flipping = 1;
dev_priv->current_page = 0;
dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
}
static int i810_do_cleanup_pageflip(struct drm_device *dev)
{
drm_i810_private_t *dev_priv = dev->dev_private;
DRM_DEBUG("\n");
if (dev_priv->current_page != 0)
i810_dma_dispatch_flip(dev);
dev_priv->page_flipping = 0;
return 0;
}
static int i810_flip_bufs(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_i810_private_t *dev_priv = dev->dev_private;
DRM_DEBUG("\n");
LOCK_TEST_WITH_RETURN(dev, file_priv);
if (!dev_priv->page_flipping)
i810_do_init_pageflip(dev);
i810_dma_dispatch_flip(dev);
return 0;
}
int i810_driver_load(struct drm_device *dev, unsigned long flags)
{
/* i810 has 4 more counters */
dev->counters += 4;
dev->types[6] = _DRM_STAT_IRQ;
dev->types[7] = _DRM_STAT_PRIMARY;
dev->types[8] = _DRM_STAT_SECONDARY;
dev->types[9] = _DRM_STAT_DMA;
pci_set_master(dev->pdev);
return 0;
}
void i810_driver_lastclose(struct drm_device *dev)
{
i810_dma_cleanup(dev);
}
void i810_driver_preclose(struct drm_device *dev, struct drm_file *file_priv)
{
if (dev->dev_private) {
drm_i810_private_t *dev_priv = dev->dev_private;
if (dev_priv->page_flipping)
i810_do_cleanup_pageflip(dev);
}
if (file_priv->master && file_priv->master->lock.hw_lock) {
drm_idlelock_take(&file_priv->master->lock);
i810_driver_reclaim_buffers(dev, file_priv);
drm_idlelock_release(&file_priv->master->lock);
} else {
/* master disappeared, clean up stuff anyway and hope nothing
* goes wrong */
i810_driver_reclaim_buffers(dev, file_priv);
}
}
int i810_driver_dma_quiescent(struct drm_device *dev)
{
i810_dma_quiescent(dev);
return 0;
}
struct drm_ioctl_desc i810_ioctls[] = {
DRM_IOCTL_DEF_DRV(I810_INIT, i810_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I810_VERTEX, i810_dma_vertex, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I810_CLEAR, i810_clear_bufs, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I810_FLUSH, i810_flush_ioctl, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I810_GETAGE, i810_getage, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I810_GETBUF, i810_getbuf, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I810_SWAP, i810_swap_bufs, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I810_COPY, i810_copybuf, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I810_DOCOPY, i810_docopy, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I810_OV0INFO, i810_ov0_info, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I810_FSTATUS, i810_fstatus, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I810_OV0FLIP, i810_ov0_flip, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I810_MC, i810_dma_mc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I810_RSTATUS, i810_rstatus, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I810_FLIP, i810_flip_bufs, DRM_AUTH|DRM_UNLOCKED),
};
int i810_max_ioctl = DRM_ARRAY_SIZE(i810_ioctls);
/**
* Determine if the device really is AGP or not.
*
* All Intel graphics chipsets are treated as AGP, even if they are really
* PCI-e.
*
* \param dev The device to be tested.
*
* \returns
* A value of 1 is always retured to indictate every i810 is AGP.
*/
int i810_driver_device_is_agp(struct drm_device *dev)
{
return 1;
}
| gpl-2.0 |
Stane1983/amlogic-m6_m8 | fs/notify/vfsmount_mark.c | 2730 | 5447 | /*
* Copyright (C) 2008 Red Hat, Inc., Eric Paris <eparis@redhat.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; see the file COPYING. If not, write to
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mount.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
#include <linux/atomic.h>
#include <linux/fsnotify_backend.h>
#include "fsnotify.h"
#include "../mount.h"
void fsnotify_clear_marks_by_mount(struct vfsmount *mnt)
{
struct fsnotify_mark *mark, *lmark;
struct hlist_node *n;
struct mount *m = real_mount(mnt);
LIST_HEAD(free_list);
spin_lock(&mnt->mnt_root->d_lock);
hlist_for_each_entry_safe(mark, n, &m->mnt_fsnotify_marks, m.m_list) {
list_add(&mark->m.free_m_list, &free_list);
hlist_del_init_rcu(&mark->m.m_list);
fsnotify_get_mark(mark);
}
spin_unlock(&mnt->mnt_root->d_lock);
list_for_each_entry_safe(mark, lmark, &free_list, m.free_m_list) {
struct fsnotify_group *group;
spin_lock(&mark->lock);
fsnotify_get_group(mark->group);
group = mark->group;
spin_unlock(&mark->lock);
fsnotify_destroy_mark(mark, group);
fsnotify_put_mark(mark);
fsnotify_put_group(group);
}
}
void fsnotify_clear_vfsmount_marks_by_group(struct fsnotify_group *group)
{
fsnotify_clear_marks_by_group_flags(group, FSNOTIFY_MARK_FLAG_VFSMOUNT);
}
/*
* Recalculate the mask of events relevant to a given vfsmount locked.
*/
static void fsnotify_recalc_vfsmount_mask_locked(struct vfsmount *mnt)
{
struct mount *m = real_mount(mnt);
struct fsnotify_mark *mark;
__u32 new_mask = 0;
assert_spin_locked(&mnt->mnt_root->d_lock);
hlist_for_each_entry(mark, &m->mnt_fsnotify_marks, m.m_list)
new_mask |= mark->mask;
m->mnt_fsnotify_mask = new_mask;
}
/*
* Recalculate the mnt->mnt_fsnotify_mask, or the mask of all FS_* event types
* any notifier is interested in hearing for this mount point
*/
void fsnotify_recalc_vfsmount_mask(struct vfsmount *mnt)
{
spin_lock(&mnt->mnt_root->d_lock);
fsnotify_recalc_vfsmount_mask_locked(mnt);
spin_unlock(&mnt->mnt_root->d_lock);
}
void fsnotify_destroy_vfsmount_mark(struct fsnotify_mark *mark)
{
struct vfsmount *mnt = mark->m.mnt;
BUG_ON(!mutex_is_locked(&mark->group->mark_mutex));
assert_spin_locked(&mark->lock);
spin_lock(&mnt->mnt_root->d_lock);
hlist_del_init_rcu(&mark->m.m_list);
mark->m.mnt = NULL;
fsnotify_recalc_vfsmount_mask_locked(mnt);
spin_unlock(&mnt->mnt_root->d_lock);
}
static struct fsnotify_mark *fsnotify_find_vfsmount_mark_locked(struct fsnotify_group *group,
struct vfsmount *mnt)
{
struct mount *m = real_mount(mnt);
struct fsnotify_mark *mark;
assert_spin_locked(&mnt->mnt_root->d_lock);
hlist_for_each_entry(mark, &m->mnt_fsnotify_marks, m.m_list) {
if (mark->group == group) {
fsnotify_get_mark(mark);
return mark;
}
}
return NULL;
}
/*
* given a group and vfsmount, find the mark associated with that combination.
* if found take a reference to that mark and return it, else return NULL
*/
struct fsnotify_mark *fsnotify_find_vfsmount_mark(struct fsnotify_group *group,
struct vfsmount *mnt)
{
struct fsnotify_mark *mark;
spin_lock(&mnt->mnt_root->d_lock);
mark = fsnotify_find_vfsmount_mark_locked(group, mnt);
spin_unlock(&mnt->mnt_root->d_lock);
return mark;
}
/*
* Attach an initialized mark to a given group and vfsmount.
* These marks may be used for the fsnotify backend to determine which
* event types should be delivered to which groups.
*/
int fsnotify_add_vfsmount_mark(struct fsnotify_mark *mark,
struct fsnotify_group *group, struct vfsmount *mnt,
int allow_dups)
{
struct mount *m = real_mount(mnt);
struct fsnotify_mark *lmark, *last = NULL;
int ret = 0;
mark->flags |= FSNOTIFY_MARK_FLAG_VFSMOUNT;
BUG_ON(!mutex_is_locked(&group->mark_mutex));
assert_spin_locked(&mark->lock);
spin_lock(&mnt->mnt_root->d_lock);
mark->m.mnt = mnt;
/* is mark the first mark? */
if (hlist_empty(&m->mnt_fsnotify_marks)) {
hlist_add_head_rcu(&mark->m.m_list, &m->mnt_fsnotify_marks);
goto out;
}
/* should mark be in the middle of the current list? */
hlist_for_each_entry(lmark, &m->mnt_fsnotify_marks, m.m_list) {
last = lmark;
if ((lmark->group == group) && !allow_dups) {
ret = -EEXIST;
goto out;
}
if (mark->group->priority < lmark->group->priority)
continue;
if ((mark->group->priority == lmark->group->priority) &&
(mark->group < lmark->group))
continue;
hlist_add_before_rcu(&mark->m.m_list, &lmark->m.m_list);
goto out;
}
BUG_ON(last == NULL);
/* mark should be the last entry. last is the current last entry */
hlist_add_after_rcu(&last->m.m_list, &mark->m.m_list);
out:
fsnotify_recalc_vfsmount_mask_locked(mnt);
spin_unlock(&mnt->mnt_root->d_lock);
return ret;
}
| gpl-2.0 |
garwynn/D710VMUB_FL26_Kernel | arch/m32r/platforms/mappi3/setup.c | 2986 | 5461 | /*
* linux/arch/m32r/platforms/mappi3/setup.c
*
* Setup routines for Renesas MAPPI-III(M3A-2170) Board
*
* Copyright (c) 2001-2005 Hiroyuki Kondo, Hirokazu Takata,
* Hitoshi Yamamoto, Mamoru Sakugawa
*/
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <asm/system.h>
#include <asm/m32r.h>
#include <asm/io.h>
#define irq2port(x) (M32R_ICU_CR1_PORTL + ((x - 1) * sizeof(unsigned long)))
icu_data_t icu_data[NR_IRQS];
static void disable_mappi3_irq(unsigned int irq)
{
unsigned long port, data;
if ((irq == 0) ||(irq >= NR_IRQS)) {
printk("bad irq 0x%08x\n", irq);
return;
}
port = irq2port(irq);
data = icu_data[irq].icucr|M32R_ICUCR_ILEVEL7;
outl(data, port);
}
static void enable_mappi3_irq(unsigned int irq)
{
unsigned long port, data;
if ((irq == 0) ||(irq >= NR_IRQS)) {
printk("bad irq 0x%08x\n", irq);
return;
}
port = irq2port(irq);
data = icu_data[irq].icucr|M32R_ICUCR_IEN|M32R_ICUCR_ILEVEL6;
outl(data, port);
}
static void mask_mappi3(struct irq_data *data)
{
disable_mappi3_irq(data->irq);
}
static void unmask_mappi3(struct irq_data *data)
{
enable_mappi3_irq(data->irq);
}
static void shutdown_mappi3(struct irq_data *data)
{
unsigned long port;
port = irq2port(data->irq);
outl(M32R_ICUCR_ILEVEL7, port);
}
static struct irq_chip mappi3_irq_type = {
.name = "MAPPI3-IRQ",
.irq_shutdown = shutdown_mappi3,
.irq_mask = mask_mappi3,
.irq_unmask = unmask_mappi3,
};
void __init init_IRQ(void)
{
#if defined(CONFIG_SMC91X)
/* INT0 : LAN controller (SMC91111) */
irq_set_chip_and_handler(M32R_IRQ_INT0, &mappi3_irq_type,
handle_level_irq);
icu_data[M32R_IRQ_INT0].icucr = M32R_ICUCR_IEN|M32R_ICUCR_ISMOD10;
disable_mappi3_irq(M32R_IRQ_INT0);
#endif /* CONFIG_SMC91X */
/* MFT2 : system timer */
irq_set_chip_and_handler(M32R_IRQ_MFT2, &mappi3_irq_type,
handle_level_irq);
icu_data[M32R_IRQ_MFT2].icucr = M32R_ICUCR_IEN;
disable_mappi3_irq(M32R_IRQ_MFT2);
#ifdef CONFIG_SERIAL_M32R_SIO
/* SIO0_R : uart receive data */
irq_set_chip_and_handler(M32R_IRQ_SIO0_R, &mappi3_irq_type,
handle_level_irq);
icu_data[M32R_IRQ_SIO0_R].icucr = 0;
disable_mappi3_irq(M32R_IRQ_SIO0_R);
/* SIO0_S : uart send data */
irq_set_chip_and_handler(M32R_IRQ_SIO0_S, &mappi3_irq_type,
handle_level_irq);
icu_data[M32R_IRQ_SIO0_S].icucr = 0;
disable_mappi3_irq(M32R_IRQ_SIO0_S);
/* SIO1_R : uart receive data */
irq_set_chip_and_handler(M32R_IRQ_SIO1_R, &mappi3_irq_type,
handle_level_irq);
icu_data[M32R_IRQ_SIO1_R].icucr = 0;
disable_mappi3_irq(M32R_IRQ_SIO1_R);
/* SIO1_S : uart send data */
irq_set_chip_and_handler(M32R_IRQ_SIO1_S, &mappi3_irq_type,
handle_level_irq);
icu_data[M32R_IRQ_SIO1_S].icucr = 0;
disable_mappi3_irq(M32R_IRQ_SIO1_S);
#endif /* CONFIG_M32R_USE_DBG_CONSOLE */
#if defined(CONFIG_USB)
/* INT1 : USB Host controller interrupt */
irq_set_chip_and_handler(M32R_IRQ_INT1, &mappi3_irq_type,
handle_level_irq);
icu_data[M32R_IRQ_INT1].icucr = M32R_ICUCR_ISMOD01;
disable_mappi3_irq(M32R_IRQ_INT1);
#endif /* CONFIG_USB */
/* CFC IREQ */
irq_set_chip_and_handler(PLD_IRQ_CFIREQ, &mappi3_irq_type,
handle_level_irq);
icu_data[PLD_IRQ_CFIREQ].icucr = M32R_ICUCR_IEN|M32R_ICUCR_ISMOD01;
disable_mappi3_irq(PLD_IRQ_CFIREQ);
#if defined(CONFIG_M32R_CFC)
/* ICUCR41: CFC Insert & eject */
irq_set_chip_and_handler(PLD_IRQ_CFC_INSERT, &mappi3_irq_type,
handle_level_irq);
icu_data[PLD_IRQ_CFC_INSERT].icucr = M32R_ICUCR_IEN|M32R_ICUCR_ISMOD00;
disable_mappi3_irq(PLD_IRQ_CFC_INSERT);
#endif /* CONFIG_M32R_CFC */
/* IDE IREQ */
irq_set_chip_and_handler(PLD_IRQ_IDEIREQ, &mappi3_irq_type,
handle_level_irq);
icu_data[PLD_IRQ_IDEIREQ].icucr = M32R_ICUCR_IEN|M32R_ICUCR_ISMOD10;
disable_mappi3_irq(PLD_IRQ_IDEIREQ);
}
#if defined(CONFIG_SMC91X)
#define LAN_IOSTART 0x300
#define LAN_IOEND 0x320
static struct resource smc91x_resources[] = {
[0] = {
.start = (LAN_IOSTART),
.end = (LAN_IOEND),
.flags = IORESOURCE_MEM,
},
[1] = {
.start = M32R_IRQ_INT0,
.end = M32R_IRQ_INT0,
.flags = IORESOURCE_IRQ,
}
};
static struct platform_device smc91x_device = {
.name = "smc91x",
.id = 0,
.num_resources = ARRAY_SIZE(smc91x_resources),
.resource = smc91x_resources,
};
#endif
#if defined(CONFIG_FB_S1D13XXX)
#include <video/s1d13xxxfb.h>
#include <asm/s1d13806.h>
static struct s1d13xxxfb_pdata s1d13xxxfb_data = {
.initregs = s1d13xxxfb_initregs,
.initregssize = ARRAY_SIZE(s1d13xxxfb_initregs),
.platform_init_video = NULL,
#ifdef CONFIG_PM
.platform_suspend_video = NULL,
.platform_resume_video = NULL,
#endif
};
static struct resource s1d13xxxfb_resources[] = {
[0] = {
.start = 0x1d600000UL,
.end = 0x1d73FFFFUL,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = 0x1d400000UL,
.end = 0x1d4001FFUL,
.flags = IORESOURCE_MEM,
}
};
static struct platform_device s1d13xxxfb_device = {
.name = S1D_DEVICENAME,
.id = 0,
.dev = {
.platform_data = &s1d13xxxfb_data,
},
.num_resources = ARRAY_SIZE(s1d13xxxfb_resources),
.resource = s1d13xxxfb_resources,
};
#endif
static int __init platform_init(void)
{
#if defined(CONFIG_SMC91X)
platform_device_register(&smc91x_device);
#endif
#if defined(CONFIG_FB_S1D13XXX)
platform_device_register(&s1d13xxxfb_device);
#endif
return 0;
}
arch_initcall(platform_init);
| gpl-2.0 |
manveru0/FeaCore_Phoenix_S3 | arch/arm/plat-s3c24xx/cpu-freq.c | 2986 | 17684 | /* linux/arch/arm/plat-s3c24xx/cpu-freq.c
*
* Copyright (c) 2006-2008 Simtec Electronics
* http://armlinux.simtec.co.uk/
* Ben Dooks <ben@simtec.co.uk>
*
* S3C24XX CPU Frequency scaling
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/cpufreq.h>
#include <linux/cpu.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/sysdev.h>
#include <linux/sysfs.h>
#include <linux/slab.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <plat/cpu.h>
#include <plat/clock.h>
#include <plat/cpu-freq-core.h>
#include <mach/regs-clock.h>
/* note, cpufreq support deals in kHz, no Hz */
static struct cpufreq_driver s3c24xx_driver;
static struct s3c_cpufreq_config cpu_cur;
static struct s3c_iotimings s3c24xx_iotiming;
static struct cpufreq_frequency_table *pll_reg;
static unsigned int last_target = ~0;
static unsigned int ftab_size;
static struct cpufreq_frequency_table *ftab;
static struct clk *_clk_mpll;
static struct clk *_clk_xtal;
static struct clk *clk_fclk;
static struct clk *clk_hclk;
static struct clk *clk_pclk;
static struct clk *clk_arm;
#ifdef CONFIG_CPU_FREQ_S3C24XX_DEBUGFS
struct s3c_cpufreq_config *s3c_cpufreq_getconfig(void)
{
return &cpu_cur;
}
struct s3c_iotimings *s3c_cpufreq_getiotimings(void)
{
return &s3c24xx_iotiming;
}
#endif /* CONFIG_CPU_FREQ_S3C24XX_DEBUGFS */
static void s3c_cpufreq_getcur(struct s3c_cpufreq_config *cfg)
{
unsigned long fclk, pclk, hclk, armclk;
cfg->freq.fclk = fclk = clk_get_rate(clk_fclk);
cfg->freq.hclk = hclk = clk_get_rate(clk_hclk);
cfg->freq.pclk = pclk = clk_get_rate(clk_pclk);
cfg->freq.armclk = armclk = clk_get_rate(clk_arm);
cfg->pll.index = __raw_readl(S3C2410_MPLLCON);
cfg->pll.frequency = fclk;
cfg->freq.hclk_tns = 1000000000 / (cfg->freq.hclk / 10);
cfg->divs.h_divisor = fclk / hclk;
cfg->divs.p_divisor = fclk / pclk;
}
static inline void s3c_cpufreq_calc(struct s3c_cpufreq_config *cfg)
{
unsigned long pll = cfg->pll.frequency;
cfg->freq.fclk = pll;
cfg->freq.hclk = pll / cfg->divs.h_divisor;
cfg->freq.pclk = pll / cfg->divs.p_divisor;
/* convert hclk into 10ths of nanoseconds for io calcs */
cfg->freq.hclk_tns = 1000000000 / (cfg->freq.hclk / 10);
}
static inline int closer(unsigned int target, unsigned int n, unsigned int c)
{
int diff_cur = abs(target - c);
int diff_new = abs(target - n);
return (diff_new < diff_cur);
}
static void s3c_cpufreq_show(const char *pfx,
struct s3c_cpufreq_config *cfg)
{
s3c_freq_dbg("%s: Fvco=%u, F=%lu, A=%lu, H=%lu (%u), P=%lu (%u)\n",
pfx, cfg->pll.frequency, cfg->freq.fclk, cfg->freq.armclk,
cfg->freq.hclk, cfg->divs.h_divisor,
cfg->freq.pclk, cfg->divs.p_divisor);
}
/* functions to wrapper the driver info calls to do the cpu specific work */
static void s3c_cpufreq_setio(struct s3c_cpufreq_config *cfg)
{
if (cfg->info->set_iotiming)
(cfg->info->set_iotiming)(cfg, &s3c24xx_iotiming);
}
static int s3c_cpufreq_calcio(struct s3c_cpufreq_config *cfg)
{
if (cfg->info->calc_iotiming)
return (cfg->info->calc_iotiming)(cfg, &s3c24xx_iotiming);
return 0;
}
static void s3c_cpufreq_setrefresh(struct s3c_cpufreq_config *cfg)
{
(cfg->info->set_refresh)(cfg);
}
static void s3c_cpufreq_setdivs(struct s3c_cpufreq_config *cfg)
{
(cfg->info->set_divs)(cfg);
}
static int s3c_cpufreq_calcdivs(struct s3c_cpufreq_config *cfg)
{
return (cfg->info->calc_divs)(cfg);
}
static void s3c_cpufreq_setfvco(struct s3c_cpufreq_config *cfg)
{
(cfg->info->set_fvco)(cfg);
}
static inline void s3c_cpufreq_resume_clocks(void)
{
cpu_cur.info->resume_clocks();
}
static inline void s3c_cpufreq_updateclk(struct clk *clk,
unsigned int freq)
{
clk_set_rate(clk, freq);
}
static int s3c_cpufreq_settarget(struct cpufreq_policy *policy,
unsigned int target_freq,
struct cpufreq_frequency_table *pll)
{
struct s3c_cpufreq_freqs freqs;
struct s3c_cpufreq_config cpu_new;
unsigned long flags;
cpu_new = cpu_cur; /* copy new from current */
s3c_cpufreq_show("cur", &cpu_cur);
/* TODO - check for DMA currently outstanding */
cpu_new.pll = pll ? *pll : cpu_cur.pll;
if (pll)
freqs.pll_changing = 1;
/* update our frequencies */
cpu_new.freq.armclk = target_freq;
cpu_new.freq.fclk = cpu_new.pll.frequency;
if (s3c_cpufreq_calcdivs(&cpu_new) < 0) {
printk(KERN_ERR "no divisors for %d\n", target_freq);
goto err_notpossible;
}
s3c_freq_dbg("%s: got divs\n", __func__);
s3c_cpufreq_calc(&cpu_new);
s3c_freq_dbg("%s: calculated frequencies for new\n", __func__);
if (cpu_new.freq.hclk != cpu_cur.freq.hclk) {
if (s3c_cpufreq_calcio(&cpu_new) < 0) {
printk(KERN_ERR "%s: no IO timings\n", __func__);
goto err_notpossible;
}
}
s3c_cpufreq_show("new", &cpu_new);
/* setup our cpufreq parameters */
freqs.old = cpu_cur.freq;
freqs.new = cpu_new.freq;
freqs.freqs.cpu = 0;
freqs.freqs.old = cpu_cur.freq.armclk / 1000;
freqs.freqs.new = cpu_new.freq.armclk / 1000;
/* update f/h/p clock settings before we issue the change
* notification, so that drivers do not need to do anything
* special if they want to recalculate on CPUFREQ_PRECHANGE. */
s3c_cpufreq_updateclk(_clk_mpll, cpu_new.pll.frequency);
s3c_cpufreq_updateclk(clk_fclk, cpu_new.freq.fclk);
s3c_cpufreq_updateclk(clk_hclk, cpu_new.freq.hclk);
s3c_cpufreq_updateclk(clk_pclk, cpu_new.freq.pclk);
/* start the frequency change */
if (policy)
cpufreq_notify_transition(&freqs.freqs, CPUFREQ_PRECHANGE);
/* If hclk is staying the same, then we do not need to
* re-write the IO or the refresh timings whilst we are changing
* speed. */
local_irq_save(flags);
/* is our memory clock slowing down? */
if (cpu_new.freq.hclk < cpu_cur.freq.hclk) {
s3c_cpufreq_setrefresh(&cpu_new);
s3c_cpufreq_setio(&cpu_new);
}
if (cpu_new.freq.fclk == cpu_cur.freq.fclk) {
/* not changing PLL, just set the divisors */
s3c_cpufreq_setdivs(&cpu_new);
} else {
if (cpu_new.freq.fclk < cpu_cur.freq.fclk) {
/* slow the cpu down, then set divisors */
s3c_cpufreq_setfvco(&cpu_new);
s3c_cpufreq_setdivs(&cpu_new);
} else {
/* set the divisors, then speed up */
s3c_cpufreq_setdivs(&cpu_new);
s3c_cpufreq_setfvco(&cpu_new);
}
}
/* did our memory clock speed up */
if (cpu_new.freq.hclk > cpu_cur.freq.hclk) {
s3c_cpufreq_setrefresh(&cpu_new);
s3c_cpufreq_setio(&cpu_new);
}
/* update our current settings */
cpu_cur = cpu_new;
local_irq_restore(flags);
/* notify everyone we've done this */
if (policy)
cpufreq_notify_transition(&freqs.freqs, CPUFREQ_POSTCHANGE);
s3c_freq_dbg("%s: finished\n", __func__);
return 0;
err_notpossible:
printk(KERN_ERR "no compatible settings for %d\n", target_freq);
return -EINVAL;
}
/* s3c_cpufreq_target
*
* called by the cpufreq core to adjust the frequency that the CPU
* is currently running at.
*/
static int s3c_cpufreq_target(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation)
{
struct cpufreq_frequency_table *pll;
unsigned int index;
/* avoid repeated calls which cause a needless amout of duplicated
* logging output (and CPU time as the calculation process is
* done) */
if (target_freq == last_target)
return 0;
last_target = target_freq;
s3c_freq_dbg("%s: policy %p, target %u, relation %u\n",
__func__, policy, target_freq, relation);
if (ftab) {
if (cpufreq_frequency_table_target(policy, ftab,
target_freq, relation,
&index)) {
s3c_freq_dbg("%s: table failed\n", __func__);
return -EINVAL;
}
s3c_freq_dbg("%s: adjust %d to entry %d (%u)\n", __func__,
target_freq, index, ftab[index].frequency);
target_freq = ftab[index].frequency;
}
target_freq *= 1000; /* convert target to Hz */
/* find the settings for our new frequency */
if (!pll_reg || cpu_cur.lock_pll) {
/* either we've not got any PLL values, or we've locked
* to the current one. */
pll = NULL;
} else {
struct cpufreq_policy tmp_policy;
int ret;
/* we keep the cpu pll table in Hz, to ensure we get an
* accurate value for the PLL output. */
tmp_policy.min = policy->min * 1000;
tmp_policy.max = policy->max * 1000;
tmp_policy.cpu = policy->cpu;
/* cpufreq_frequency_table_target uses a pointer to 'index'
* which is the number of the table entry, not the value of
* the table entry's index field. */
ret = cpufreq_frequency_table_target(&tmp_policy, pll_reg,
target_freq, relation,
&index);
if (ret < 0) {
printk(KERN_ERR "%s: no PLL available\n", __func__);
goto err_notpossible;
}
pll = pll_reg + index;
s3c_freq_dbg("%s: target %u => %u\n",
__func__, target_freq, pll->frequency);
target_freq = pll->frequency;
}
return s3c_cpufreq_settarget(policy, target_freq, pll);
err_notpossible:
printk(KERN_ERR "no compatible settings for %d\n", target_freq);
return -EINVAL;
}
static unsigned int s3c_cpufreq_get(unsigned int cpu)
{
return clk_get_rate(clk_arm) / 1000;
}
struct clk *s3c_cpufreq_clk_get(struct device *dev, const char *name)
{
struct clk *clk;
clk = clk_get(dev, name);
if (IS_ERR(clk))
printk(KERN_ERR "cpufreq: failed to get clock '%s'\n", name);
return clk;
}
static int s3c_cpufreq_init(struct cpufreq_policy *policy)
{
printk(KERN_INFO "%s: initialising policy %p\n", __func__, policy);
if (policy->cpu != 0)
return -EINVAL;
policy->cur = s3c_cpufreq_get(0);
policy->min = policy->cpuinfo.min_freq = 0;
policy->max = policy->cpuinfo.max_freq = cpu_cur.info->max.fclk / 1000;
policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
/* feed the latency information from the cpu driver */
policy->cpuinfo.transition_latency = cpu_cur.info->latency;
if (ftab)
cpufreq_frequency_table_cpuinfo(policy, ftab);
return 0;
}
static __init int s3c_cpufreq_initclks(void)
{
_clk_mpll = s3c_cpufreq_clk_get(NULL, "mpll");
_clk_xtal = s3c_cpufreq_clk_get(NULL, "xtal");
clk_fclk = s3c_cpufreq_clk_get(NULL, "fclk");
clk_hclk = s3c_cpufreq_clk_get(NULL, "hclk");
clk_pclk = s3c_cpufreq_clk_get(NULL, "pclk");
clk_arm = s3c_cpufreq_clk_get(NULL, "armclk");
if (IS_ERR(clk_fclk) || IS_ERR(clk_hclk) || IS_ERR(clk_pclk) ||
IS_ERR(_clk_mpll) || IS_ERR(clk_arm) || IS_ERR(_clk_xtal)) {
printk(KERN_ERR "%s: could not get clock(s)\n", __func__);
return -ENOENT;
}
printk(KERN_INFO "%s: clocks f=%lu,h=%lu,p=%lu,a=%lu\n", __func__,
clk_get_rate(clk_fclk) / 1000,
clk_get_rate(clk_hclk) / 1000,
clk_get_rate(clk_pclk) / 1000,
clk_get_rate(clk_arm) / 1000);
return 0;
}
static int s3c_cpufreq_verify(struct cpufreq_policy *policy)
{
if (policy->cpu != 0)
return -EINVAL;
return 0;
}
#ifdef CONFIG_PM
static struct cpufreq_frequency_table suspend_pll;
static unsigned int suspend_freq;
static int s3c_cpufreq_suspend(struct cpufreq_policy *policy)
{
suspend_pll.frequency = clk_get_rate(_clk_mpll);
suspend_pll.index = __raw_readl(S3C2410_MPLLCON);
suspend_freq = s3c_cpufreq_get(0) * 1000;
return 0;
}
static int s3c_cpufreq_resume(struct cpufreq_policy *policy)
{
int ret;
s3c_freq_dbg("%s: resuming with policy %p\n", __func__, policy);
last_target = ~0; /* invalidate last_target setting */
/* first, find out what speed we resumed at. */
s3c_cpufreq_resume_clocks();
/* whilst we will be called later on, we try and re-set the
* cpu frequencies as soon as possible so that we do not end
* up resuming devices and then immediately having to re-set
* a number of settings once these devices have restarted.
*
* as a note, it is expected devices are not used until they
* have been un-suspended and at that time they should have
* used the updated clock settings.
*/
ret = s3c_cpufreq_settarget(NULL, suspend_freq, &suspend_pll);
if (ret) {
printk(KERN_ERR "%s: failed to reset pll/freq\n", __func__);
return ret;
}
return 0;
}
#else
#define s3c_cpufreq_resume NULL
#define s3c_cpufreq_suspend NULL
#endif
static struct cpufreq_driver s3c24xx_driver = {
.flags = CPUFREQ_STICKY,
.verify = s3c_cpufreq_verify,
.target = s3c_cpufreq_target,
.get = s3c_cpufreq_get,
.init = s3c_cpufreq_init,
.suspend = s3c_cpufreq_suspend,
.resume = s3c_cpufreq_resume,
.name = "s3c24xx",
};
int __init s3c_cpufreq_register(struct s3c_cpufreq_info *info)
{
if (!info || !info->name) {
printk(KERN_ERR "%s: failed to pass valid information\n",
__func__);
return -EINVAL;
}
printk(KERN_INFO "S3C24XX CPU Frequency driver, %s cpu support\n",
info->name);
/* check our driver info has valid data */
BUG_ON(info->set_refresh == NULL);
BUG_ON(info->set_divs == NULL);
BUG_ON(info->calc_divs == NULL);
/* info->set_fvco is optional, depending on whether there
* is a need to set the clock code. */
cpu_cur.info = info;
/* Note, driver registering should probably update locktime */
return 0;
}
int __init s3c_cpufreq_setboard(struct s3c_cpufreq_board *board)
{
struct s3c_cpufreq_board *ours;
if (!board) {
printk(KERN_INFO "%s: no board data\n", __func__);
return -EINVAL;
}
/* Copy the board information so that each board can make this
* initdata. */
ours = kzalloc(sizeof(struct s3c_cpufreq_board), GFP_KERNEL);
if (ours == NULL) {
printk(KERN_ERR "%s: no memory\n", __func__);
return -ENOMEM;
}
*ours = *board;
cpu_cur.board = ours;
return 0;
}
int __init s3c_cpufreq_auto_io(void)
{
int ret;
if (!cpu_cur.info->get_iotiming) {
printk(KERN_ERR "%s: get_iotiming undefined\n", __func__);
return -ENOENT;
}
printk(KERN_INFO "%s: working out IO settings\n", __func__);
ret = (cpu_cur.info->get_iotiming)(&cpu_cur, &s3c24xx_iotiming);
if (ret)
printk(KERN_ERR "%s: failed to get timings\n", __func__);
return ret;
}
/* if one or is zero, then return the other, otherwise return the min */
#define do_min(_a, _b) ((_a) == 0 ? (_b) : (_b) == 0 ? (_a) : min(_a, _b))
/**
* s3c_cpufreq_freq_min - find the minimum settings for the given freq.
* @dst: The destination structure
* @a: One argument.
* @b: The other argument.
*
* Create a minimum of each frequency entry in the 'struct s3c_freq',
* unless the entry is zero when it is ignored and the non-zero argument
* used.
*/
static void s3c_cpufreq_freq_min(struct s3c_freq *dst,
struct s3c_freq *a, struct s3c_freq *b)
{
dst->fclk = do_min(a->fclk, b->fclk);
dst->hclk = do_min(a->hclk, b->hclk);
dst->pclk = do_min(a->pclk, b->pclk);
dst->armclk = do_min(a->armclk, b->armclk);
}
static inline u32 calc_locktime(u32 freq, u32 time_us)
{
u32 result;
result = freq * time_us;
result = DIV_ROUND_UP(result, 1000 * 1000);
return result;
}
static void s3c_cpufreq_update_loctkime(void)
{
unsigned int bits = cpu_cur.info->locktime_bits;
u32 rate = (u32)clk_get_rate(_clk_xtal);
u32 val;
if (bits == 0) {
WARN_ON(1);
return;
}
val = calc_locktime(rate, cpu_cur.info->locktime_u) << bits;
val |= calc_locktime(rate, cpu_cur.info->locktime_m);
printk(KERN_INFO "%s: new locktime is 0x%08x\n", __func__, val);
__raw_writel(val, S3C2410_LOCKTIME);
}
static int s3c_cpufreq_build_freq(void)
{
int size, ret;
if (!cpu_cur.info->calc_freqtable)
return -EINVAL;
kfree(ftab);
ftab = NULL;
size = cpu_cur.info->calc_freqtable(&cpu_cur, NULL, 0);
size++;
ftab = kmalloc(sizeof(struct cpufreq_frequency_table) * size, GFP_KERNEL);
if (!ftab) {
printk(KERN_ERR "%s: no memory for tables\n", __func__);
return -ENOMEM;
}
ftab_size = size;
ret = cpu_cur.info->calc_freqtable(&cpu_cur, ftab, size);
s3c_cpufreq_addfreq(ftab, ret, size, CPUFREQ_TABLE_END);
return 0;
}
static int __init s3c_cpufreq_initcall(void)
{
int ret = 0;
if (cpu_cur.info && cpu_cur.board) {
ret = s3c_cpufreq_initclks();
if (ret)
goto out;
/* get current settings */
s3c_cpufreq_getcur(&cpu_cur);
s3c_cpufreq_show("cur", &cpu_cur);
if (cpu_cur.board->auto_io) {
ret = s3c_cpufreq_auto_io();
if (ret) {
printk(KERN_ERR "%s: failed to get io timing\n",
__func__);
goto out;
}
}
if (cpu_cur.board->need_io && !cpu_cur.info->set_iotiming) {
printk(KERN_ERR "%s: no IO support registered\n",
__func__);
ret = -EINVAL;
goto out;
}
if (!cpu_cur.info->need_pll)
cpu_cur.lock_pll = 1;
s3c_cpufreq_update_loctkime();
s3c_cpufreq_freq_min(&cpu_cur.max, &cpu_cur.board->max,
&cpu_cur.info->max);
if (cpu_cur.info->calc_freqtable)
s3c_cpufreq_build_freq();
ret = cpufreq_register_driver(&s3c24xx_driver);
}
out:
return ret;
}
late_initcall(s3c_cpufreq_initcall);
/**
* s3c_plltab_register - register CPU PLL table.
* @plls: The list of PLL entries.
* @plls_no: The size of the PLL entries @plls.
*
* Register the given set of PLLs with the system.
*/
int __init s3c_plltab_register(struct cpufreq_frequency_table *plls,
unsigned int plls_no)
{
struct cpufreq_frequency_table *vals;
unsigned int size;
size = sizeof(struct cpufreq_frequency_table) * (plls_no + 1);
vals = kmalloc(size, GFP_KERNEL);
if (vals) {
memcpy(vals, plls, size);
pll_reg = vals;
/* write a terminating entry, we don't store it in the
* table that is stored in the kernel */
vals += plls_no;
vals->frequency = CPUFREQ_TABLE_END;
printk(KERN_INFO "cpufreq: %d PLL entries\n", plls_no);
} else
printk(KERN_ERR "cpufreq: no memory for PLL tables\n");
return vals ? 0 : -ENOMEM;
}
| gpl-2.0 |
ZeroInfinityXDA/OQC-m9 | arch/mips/ath79/dev-usb.c | 4522 | 6162 | /*
* Atheros AR7XXX/AR9XXX USB Host Controller device
*
* Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
* Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
*
* Parts of this file are based on Atheros' 2.6.15 BSP
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/irq.h>
#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
#include <linux/usb/ehci_pdriver.h>
#include <linux/usb/ohci_pdriver.h>
#include <asm/mach-ath79/ath79.h>
#include <asm/mach-ath79/ar71xx_regs.h>
#include "common.h"
#include "dev-usb.h"
static u64 ath79_usb_dmamask = DMA_BIT_MASK(32);
static struct usb_ohci_pdata ath79_ohci_pdata = {
};
static struct usb_ehci_pdata ath79_ehci_pdata_v1 = {
.has_synopsys_hc_bug = 1,
};
static struct usb_ehci_pdata ath79_ehci_pdata_v2 = {
.caps_offset = 0x100,
.has_tt = 1,
};
static void __init ath79_usb_register(const char *name, int id,
unsigned long base, unsigned long size,
int irq, const void *data,
size_t data_size)
{
struct resource res[2];
struct platform_device *pdev;
memset(res, 0, sizeof(res));
res[0].flags = IORESOURCE_MEM;
res[0].start = base;
res[0].end = base + size - 1;
res[1].flags = IORESOURCE_IRQ;
res[1].start = irq;
res[1].end = irq;
pdev = platform_device_register_resndata(NULL, name, id,
res, ARRAY_SIZE(res),
data, data_size);
if (IS_ERR(pdev)) {
pr_err("ath79: unable to register USB at %08lx, err=%d\n",
base, (int) PTR_ERR(pdev));
return;
}
pdev->dev.dma_mask = &ath79_usb_dmamask;
pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
}
#define AR71XX_USB_RESET_MASK (AR71XX_RESET_USB_HOST | \
AR71XX_RESET_USB_PHY | \
AR71XX_RESET_USB_OHCI_DLL)
static void __init ath79_usb_setup(void)
{
void __iomem *usb_ctrl_base;
ath79_device_reset_set(AR71XX_USB_RESET_MASK);
mdelay(1000);
ath79_device_reset_clear(AR71XX_USB_RESET_MASK);
usb_ctrl_base = ioremap(AR71XX_USB_CTRL_BASE, AR71XX_USB_CTRL_SIZE);
/* Turning on the Buff and Desc swap bits */
__raw_writel(0xf0000, usb_ctrl_base + AR71XX_USB_CTRL_REG_CONFIG);
/* WAR for HW bug. Here it adjusts the duration between two SOFS */
__raw_writel(0x20c00, usb_ctrl_base + AR71XX_USB_CTRL_REG_FLADJ);
iounmap(usb_ctrl_base);
mdelay(900);
ath79_usb_register("ohci-platform", -1,
AR71XX_OHCI_BASE, AR71XX_OHCI_SIZE,
ATH79_MISC_IRQ(6),
&ath79_ohci_pdata, sizeof(ath79_ohci_pdata));
ath79_usb_register("ehci-platform", -1,
AR71XX_EHCI_BASE, AR71XX_EHCI_SIZE,
ATH79_CPU_IRQ(3),
&ath79_ehci_pdata_v1, sizeof(ath79_ehci_pdata_v1));
}
static void __init ar7240_usb_setup(void)
{
void __iomem *usb_ctrl_base;
ath79_device_reset_clear(AR7240_RESET_OHCI_DLL);
ath79_device_reset_set(AR7240_RESET_USB_HOST);
mdelay(1000);
ath79_device_reset_set(AR7240_RESET_OHCI_DLL);
ath79_device_reset_clear(AR7240_RESET_USB_HOST);
usb_ctrl_base = ioremap(AR7240_USB_CTRL_BASE, AR7240_USB_CTRL_SIZE);
/* WAR for HW bug. Here it adjusts the duration between two SOFS */
__raw_writel(0x3, usb_ctrl_base + AR71XX_USB_CTRL_REG_FLADJ);
iounmap(usb_ctrl_base);
ath79_usb_register("ohci-platform", -1,
AR7240_OHCI_BASE, AR7240_OHCI_SIZE,
ATH79_CPU_IRQ(3),
&ath79_ohci_pdata, sizeof(ath79_ohci_pdata));
}
static void __init ar724x_usb_setup(void)
{
ath79_device_reset_set(AR724X_RESET_USBSUS_OVERRIDE);
mdelay(10);
ath79_device_reset_clear(AR724X_RESET_USB_HOST);
mdelay(10);
ath79_device_reset_clear(AR724X_RESET_USB_PHY);
mdelay(10);
ath79_usb_register("ehci-platform", -1,
AR724X_EHCI_BASE, AR724X_EHCI_SIZE,
ATH79_CPU_IRQ(3),
&ath79_ehci_pdata_v2, sizeof(ath79_ehci_pdata_v2));
}
static void __init ar913x_usb_setup(void)
{
ath79_device_reset_set(AR913X_RESET_USBSUS_OVERRIDE);
mdelay(10);
ath79_device_reset_clear(AR913X_RESET_USB_HOST);
mdelay(10);
ath79_device_reset_clear(AR913X_RESET_USB_PHY);
mdelay(10);
ath79_usb_register("ehci-platform", -1,
AR913X_EHCI_BASE, AR913X_EHCI_SIZE,
ATH79_CPU_IRQ(3),
&ath79_ehci_pdata_v2, sizeof(ath79_ehci_pdata_v2));
}
static void __init ar933x_usb_setup(void)
{
ath79_device_reset_set(AR933X_RESET_USBSUS_OVERRIDE);
mdelay(10);
ath79_device_reset_clear(AR933X_RESET_USB_HOST);
mdelay(10);
ath79_device_reset_clear(AR933X_RESET_USB_PHY);
mdelay(10);
ath79_usb_register("ehci-platform", -1,
AR933X_EHCI_BASE, AR933X_EHCI_SIZE,
ATH79_CPU_IRQ(3),
&ath79_ehci_pdata_v2, sizeof(ath79_ehci_pdata_v2));
}
static void __init ar934x_usb_setup(void)
{
u32 bootstrap;
bootstrap = ath79_reset_rr(AR934X_RESET_REG_BOOTSTRAP);
if (bootstrap & AR934X_BOOTSTRAP_USB_MODE_DEVICE)
return;
ath79_device_reset_set(AR934X_RESET_USBSUS_OVERRIDE);
udelay(1000);
ath79_device_reset_clear(AR934X_RESET_USB_PHY);
udelay(1000);
ath79_device_reset_clear(AR934X_RESET_USB_PHY_ANALOG);
udelay(1000);
ath79_device_reset_clear(AR934X_RESET_USB_HOST);
udelay(1000);
ath79_usb_register("ehci-platform", -1,
AR934X_EHCI_BASE, AR934X_EHCI_SIZE,
ATH79_CPU_IRQ(3),
&ath79_ehci_pdata_v2, sizeof(ath79_ehci_pdata_v2));
}
static void __init qca955x_usb_setup(void)
{
ath79_usb_register("ehci-platform", 0,
QCA955X_EHCI0_BASE, QCA955X_EHCI_SIZE,
ATH79_IP3_IRQ(0),
&ath79_ehci_pdata_v2, sizeof(ath79_ehci_pdata_v2));
ath79_usb_register("ehci-platform", 1,
QCA955X_EHCI1_BASE, QCA955X_EHCI_SIZE,
ATH79_IP3_IRQ(1),
&ath79_ehci_pdata_v2, sizeof(ath79_ehci_pdata_v2));
}
void __init ath79_register_usb(void)
{
if (soc_is_ar71xx())
ath79_usb_setup();
else if (soc_is_ar7240())
ar7240_usb_setup();
else if (soc_is_ar7241() || soc_is_ar7242())
ar724x_usb_setup();
else if (soc_is_ar913x())
ar913x_usb_setup();
else if (soc_is_ar933x())
ar933x_usb_setup();
else if (soc_is_ar934x())
ar934x_usb_setup();
else if (soc_is_qca955x())
qca955x_usb_setup();
else
BUG();
}
| gpl-2.0 |
NamanArora/flamingo_kernel | drivers/mfd/twl4030-power.c | 5034 | 14670 | /*
* linux/drivers/i2c/chips/twl4030-power.c
*
* Handle TWL4030 Power initialization
*
* Copyright (C) 2008 Nokia Corporation
* Copyright (C) 2006 Texas Instruments, Inc
*
* Written by Kalle Jokiniemi
* Peter De Schrijver <peter.de-schrijver@nokia.com>
* Several fixes by Amit Kucheria <amit.kucheria@verdurent.com>
*
* This file is subject to the terms and conditions of the GNU General
* Public License. See the file "COPYING" in the main directory of this
* archive for more details.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/module.h>
#include <linux/pm.h>
#include <linux/i2c/twl.h>
#include <linux/platform_device.h>
#include <asm/mach-types.h>
static u8 twl4030_start_script_address = 0x2b;
#define PWR_P1_SW_EVENTS 0x10
#define PWR_DEVOFF (1 << 0)
#define SEQ_OFFSYNC (1 << 0)
#define PHY_TO_OFF_PM_MASTER(p) (p - 0x36)
#define PHY_TO_OFF_PM_RECEIVER(p) (p - 0x5b)
/* resource - hfclk */
#define R_HFCLKOUT_DEV_GRP PHY_TO_OFF_PM_RECEIVER(0xe6)
/* PM events */
#define R_P1_SW_EVENTS PHY_TO_OFF_PM_MASTER(0x46)
#define R_P2_SW_EVENTS PHY_TO_OFF_PM_MASTER(0x47)
#define R_P3_SW_EVENTS PHY_TO_OFF_PM_MASTER(0x48)
#define R_CFG_P1_TRANSITION PHY_TO_OFF_PM_MASTER(0x36)
#define R_CFG_P2_TRANSITION PHY_TO_OFF_PM_MASTER(0x37)
#define R_CFG_P3_TRANSITION PHY_TO_OFF_PM_MASTER(0x38)
#define LVL_WAKEUP 0x08
#define ENABLE_WARMRESET (1<<4)
#define END_OF_SCRIPT 0x3f
#define R_SEQ_ADD_A2S PHY_TO_OFF_PM_MASTER(0x55)
#define R_SEQ_ADD_S2A12 PHY_TO_OFF_PM_MASTER(0x56)
#define R_SEQ_ADD_S2A3 PHY_TO_OFF_PM_MASTER(0x57)
#define R_SEQ_ADD_WARM PHY_TO_OFF_PM_MASTER(0x58)
#define R_MEMORY_ADDRESS PHY_TO_OFF_PM_MASTER(0x59)
#define R_MEMORY_DATA PHY_TO_OFF_PM_MASTER(0x5a)
/* resource configuration registers
<RESOURCE>_DEV_GRP at address 'n+0'
<RESOURCE>_TYPE at address 'n+1'
<RESOURCE>_REMAP at address 'n+2'
<RESOURCE>_DEDICATED at address 'n+3'
*/
#define DEV_GRP_OFFSET 0
#define TYPE_OFFSET 1
#define REMAP_OFFSET 2
#define DEDICATED_OFFSET 3
/* Bit positions in the registers */
/* <RESOURCE>_DEV_GRP */
#define DEV_GRP_SHIFT 5
#define DEV_GRP_MASK (7 << DEV_GRP_SHIFT)
/* <RESOURCE>_TYPE */
#define TYPE_SHIFT 0
#define TYPE_MASK (7 << TYPE_SHIFT)
#define TYPE2_SHIFT 3
#define TYPE2_MASK (3 << TYPE2_SHIFT)
/* <RESOURCE>_REMAP */
#define SLEEP_STATE_SHIFT 0
#define SLEEP_STATE_MASK (0xf << SLEEP_STATE_SHIFT)
#define OFF_STATE_SHIFT 4
#define OFF_STATE_MASK (0xf << OFF_STATE_SHIFT)
static u8 res_config_addrs[] = {
[RES_VAUX1] = 0x17,
[RES_VAUX2] = 0x1b,
[RES_VAUX3] = 0x1f,
[RES_VAUX4] = 0x23,
[RES_VMMC1] = 0x27,
[RES_VMMC2] = 0x2b,
[RES_VPLL1] = 0x2f,
[RES_VPLL2] = 0x33,
[RES_VSIM] = 0x37,
[RES_VDAC] = 0x3b,
[RES_VINTANA1] = 0x3f,
[RES_VINTANA2] = 0x43,
[RES_VINTDIG] = 0x47,
[RES_VIO] = 0x4b,
[RES_VDD1] = 0x55,
[RES_VDD2] = 0x63,
[RES_VUSB_1V5] = 0x71,
[RES_VUSB_1V8] = 0x74,
[RES_VUSB_3V1] = 0x77,
[RES_VUSBCP] = 0x7a,
[RES_REGEN] = 0x7f,
[RES_NRES_PWRON] = 0x82,
[RES_CLKEN] = 0x85,
[RES_SYSEN] = 0x88,
[RES_HFCLKOUT] = 0x8b,
[RES_32KCLKOUT] = 0x8e,
[RES_RESET] = 0x91,
[RES_MAIN_REF] = 0x94,
};
static int __devinit twl4030_write_script_byte(u8 address, u8 byte)
{
int err;
err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, address,
R_MEMORY_ADDRESS);
if (err)
goto out;
err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, byte,
R_MEMORY_DATA);
out:
return err;
}
static int __devinit twl4030_write_script_ins(u8 address, u16 pmb_message,
u8 delay, u8 next)
{
int err;
address *= 4;
err = twl4030_write_script_byte(address++, pmb_message >> 8);
if (err)
goto out;
err = twl4030_write_script_byte(address++, pmb_message & 0xff);
if (err)
goto out;
err = twl4030_write_script_byte(address++, delay);
if (err)
goto out;
err = twl4030_write_script_byte(address++, next);
out:
return err;
}
static int __devinit twl4030_write_script(u8 address, struct twl4030_ins *script,
int len)
{
int err;
for (; len; len--, address++, script++) {
if (len == 1) {
err = twl4030_write_script_ins(address,
script->pmb_message,
script->delay,
END_OF_SCRIPT);
if (err)
break;
} else {
err = twl4030_write_script_ins(address,
script->pmb_message,
script->delay,
address + 1);
if (err)
break;
}
}
return err;
}
static int __devinit twl4030_config_wakeup3_sequence(u8 address)
{
int err;
u8 data;
/* Set SLEEP to ACTIVE SEQ address for P3 */
err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, address,
R_SEQ_ADD_S2A3);
if (err)
goto out;
/* P3 LVL_WAKEUP should be on LEVEL */
err = twl_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &data,
R_P3_SW_EVENTS);
if (err)
goto out;
data |= LVL_WAKEUP;
err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, data,
R_P3_SW_EVENTS);
out:
if (err)
pr_err("TWL4030 wakeup sequence for P3 config error\n");
return err;
}
static int __devinit twl4030_config_wakeup12_sequence(u8 address)
{
int err = 0;
u8 data;
/* Set SLEEP to ACTIVE SEQ address for P1 and P2 */
err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, address,
R_SEQ_ADD_S2A12);
if (err)
goto out;
/* P1/P2 LVL_WAKEUP should be on LEVEL */
err = twl_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &data,
R_P1_SW_EVENTS);
if (err)
goto out;
data |= LVL_WAKEUP;
err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, data,
R_P1_SW_EVENTS);
if (err)
goto out;
err = twl_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &data,
R_P2_SW_EVENTS);
if (err)
goto out;
data |= LVL_WAKEUP;
err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, data,
R_P2_SW_EVENTS);
if (err)
goto out;
if (machine_is_omap_3430sdp() || machine_is_omap_ldp()) {
/* Disabling AC charger effect on sleep-active transitions */
err = twl_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &data,
R_CFG_P1_TRANSITION);
if (err)
goto out;
data &= ~(1<<1);
err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, data ,
R_CFG_P1_TRANSITION);
if (err)
goto out;
}
out:
if (err)
pr_err("TWL4030 wakeup sequence for P1 and P2" \
"config error\n");
return err;
}
static int __devinit twl4030_config_sleep_sequence(u8 address)
{
int err;
/* Set ACTIVE to SLEEP SEQ address in T2 memory*/
err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, address,
R_SEQ_ADD_A2S);
if (err)
pr_err("TWL4030 sleep sequence config error\n");
return err;
}
static int __devinit twl4030_config_warmreset_sequence(u8 address)
{
int err;
u8 rd_data;
/* Set WARM RESET SEQ address for P1 */
err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, address,
R_SEQ_ADD_WARM);
if (err)
goto out;
/* P1/P2/P3 enable WARMRESET */
err = twl_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &rd_data,
R_P1_SW_EVENTS);
if (err)
goto out;
rd_data |= ENABLE_WARMRESET;
err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, rd_data,
R_P1_SW_EVENTS);
if (err)
goto out;
err = twl_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &rd_data,
R_P2_SW_EVENTS);
if (err)
goto out;
rd_data |= ENABLE_WARMRESET;
err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, rd_data,
R_P2_SW_EVENTS);
if (err)
goto out;
err = twl_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &rd_data,
R_P3_SW_EVENTS);
if (err)
goto out;
rd_data |= ENABLE_WARMRESET;
err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, rd_data,
R_P3_SW_EVENTS);
out:
if (err)
pr_err("TWL4030 warmreset seq config error\n");
return err;
}
static int __devinit twl4030_configure_resource(struct twl4030_resconfig *rconfig)
{
int rconfig_addr;
int err;
u8 type;
u8 grp;
u8 remap;
if (rconfig->resource > TOTAL_RESOURCES) {
pr_err("TWL4030 Resource %d does not exist\n",
rconfig->resource);
return -EINVAL;
}
rconfig_addr = res_config_addrs[rconfig->resource];
/* Set resource group */
err = twl_i2c_read_u8(TWL4030_MODULE_PM_RECEIVER, &grp,
rconfig_addr + DEV_GRP_OFFSET);
if (err) {
pr_err("TWL4030 Resource %d group could not be read\n",
rconfig->resource);
return err;
}
if (rconfig->devgroup != TWL4030_RESCONFIG_UNDEF) {
grp &= ~DEV_GRP_MASK;
grp |= rconfig->devgroup << DEV_GRP_SHIFT;
err = twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER,
grp, rconfig_addr + DEV_GRP_OFFSET);
if (err < 0) {
pr_err("TWL4030 failed to program devgroup\n");
return err;
}
}
/* Set resource types */
err = twl_i2c_read_u8(TWL4030_MODULE_PM_RECEIVER, &type,
rconfig_addr + TYPE_OFFSET);
if (err < 0) {
pr_err("TWL4030 Resource %d type could not be read\n",
rconfig->resource);
return err;
}
if (rconfig->type != TWL4030_RESCONFIG_UNDEF) {
type &= ~TYPE_MASK;
type |= rconfig->type << TYPE_SHIFT;
}
if (rconfig->type2 != TWL4030_RESCONFIG_UNDEF) {
type &= ~TYPE2_MASK;
type |= rconfig->type2 << TYPE2_SHIFT;
}
err = twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER,
type, rconfig_addr + TYPE_OFFSET);
if (err < 0) {
pr_err("TWL4030 failed to program resource type\n");
return err;
}
/* Set remap states */
err = twl_i2c_read_u8(TWL4030_MODULE_PM_RECEIVER, &remap,
rconfig_addr + REMAP_OFFSET);
if (err < 0) {
pr_err("TWL4030 Resource %d remap could not be read\n",
rconfig->resource);
return err;
}
if (rconfig->remap_off != TWL4030_RESCONFIG_UNDEF) {
remap &= ~OFF_STATE_MASK;
remap |= rconfig->remap_off << OFF_STATE_SHIFT;
}
if (rconfig->remap_sleep != TWL4030_RESCONFIG_UNDEF) {
remap &= ~SLEEP_STATE_MASK;
remap |= rconfig->remap_sleep << SLEEP_STATE_SHIFT;
}
err = twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER,
remap,
rconfig_addr + REMAP_OFFSET);
if (err < 0) {
pr_err("TWL4030 failed to program remap\n");
return err;
}
return 0;
}
static int __devinit load_twl4030_script(struct twl4030_script *tscript,
u8 address)
{
int err;
static int order;
/* Make sure the script isn't going beyond last valid address (0x3f) */
if ((address + tscript->size) > END_OF_SCRIPT) {
pr_err("TWL4030 scripts too big error\n");
return -EINVAL;
}
err = twl4030_write_script(address, tscript->script, tscript->size);
if (err)
goto out;
if (tscript->flags & TWL4030_WRST_SCRIPT) {
err = twl4030_config_warmreset_sequence(address);
if (err)
goto out;
}
if (tscript->flags & TWL4030_WAKEUP12_SCRIPT) {
err = twl4030_config_wakeup12_sequence(address);
if (err)
goto out;
order = 1;
}
if (tscript->flags & TWL4030_WAKEUP3_SCRIPT) {
err = twl4030_config_wakeup3_sequence(address);
if (err)
goto out;
}
if (tscript->flags & TWL4030_SLEEP_SCRIPT) {
if (!order)
pr_warning("TWL4030: Bad order of scripts (sleep "\
"script before wakeup) Leads to boot"\
"failure on some boards\n");
err = twl4030_config_sleep_sequence(address);
}
out:
return err;
}
int twl4030_remove_script(u8 flags)
{
int err = 0;
err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER,
TWL4030_PM_MASTER_KEY_CFG1,
TWL4030_PM_MASTER_PROTECT_KEY);
if (err) {
pr_err("twl4030: unable to unlock PROTECT_KEY\n");
return err;
}
err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER,
TWL4030_PM_MASTER_KEY_CFG2,
TWL4030_PM_MASTER_PROTECT_KEY);
if (err) {
pr_err("twl4030: unable to unlock PROTECT_KEY\n");
return err;
}
if (flags & TWL4030_WRST_SCRIPT) {
err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, END_OF_SCRIPT,
R_SEQ_ADD_WARM);
if (err)
return err;
}
if (flags & TWL4030_WAKEUP12_SCRIPT) {
err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, END_OF_SCRIPT,
R_SEQ_ADD_S2A12);
if (err)
return err;
}
if (flags & TWL4030_WAKEUP3_SCRIPT) {
err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, END_OF_SCRIPT,
R_SEQ_ADD_S2A3);
if (err)
return err;
}
if (flags & TWL4030_SLEEP_SCRIPT) {
err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, END_OF_SCRIPT,
R_SEQ_ADD_A2S);
if (err)
return err;
}
err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, 0,
TWL4030_PM_MASTER_PROTECT_KEY);
if (err)
pr_err("TWL4030 Unable to relock registers\n");
return err;
}
/*
* In master mode, start the power off sequence.
* After a successful execution, TWL shuts down the power to the SoC
* and all peripherals connected to it.
*/
void twl4030_power_off(void)
{
int err;
err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, PWR_DEVOFF,
TWL4030_PM_MASTER_P1_SW_EVENTS);
if (err)
pr_err("TWL4030 Unable to power off\n");
}
void __devinit twl4030_power_init(struct twl4030_power_data *twl4030_scripts)
{
int err = 0;
int i;
struct twl4030_resconfig *resconfig;
u8 val, address = twl4030_start_script_address;
err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER,
TWL4030_PM_MASTER_KEY_CFG1,
TWL4030_PM_MASTER_PROTECT_KEY);
if (err)
goto unlock;
err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER,
TWL4030_PM_MASTER_KEY_CFG2,
TWL4030_PM_MASTER_PROTECT_KEY);
if (err)
goto unlock;
for (i = 0; i < twl4030_scripts->num; i++) {
err = load_twl4030_script(twl4030_scripts->scripts[i], address);
if (err)
goto load;
address += twl4030_scripts->scripts[i]->size;
}
resconfig = twl4030_scripts->resource_config;
if (resconfig) {
while (resconfig->resource) {
err = twl4030_configure_resource(resconfig);
if (err)
goto resource;
resconfig++;
}
}
/* Board has to be wired properly to use this feature */
if (twl4030_scripts->use_poweroff && !pm_power_off) {
/* Default for SEQ_OFFSYNC is set, lets ensure this */
err = twl_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &val,
TWL4030_PM_MASTER_CFG_P123_TRANSITION);
if (err) {
pr_warning("TWL4030 Unable to read registers\n");
} else if (!(val & SEQ_OFFSYNC)) {
val |= SEQ_OFFSYNC;
err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, val,
TWL4030_PM_MASTER_CFG_P123_TRANSITION);
if (err) {
pr_err("TWL4030 Unable to setup SEQ_OFFSYNC\n");
goto relock;
}
}
pm_power_off = twl4030_power_off;
}
relock:
err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, 0,
TWL4030_PM_MASTER_PROTECT_KEY);
if (err)
pr_err("TWL4030 Unable to relock registers\n");
return;
unlock:
if (err)
pr_err("TWL4030 Unable to unlock registers\n");
return;
load:
if (err)
pr_err("TWL4030 failed to load scripts\n");
return;
resource:
if (err)
pr_err("TWL4030 failed to configure resource\n");
return;
}
| gpl-2.0 |
Haxynox/kernel_samsung_n7100-old | drivers/media/common/tuners/tda8290.c | 8106 | 24382 | /*
i2c tv tuner chip device driver
controls the philips tda8290+75 tuner chip combo.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
This "tda8290" module was split apart from the original "tuner" module.
*/
#include <linux/i2c.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/videodev2.h>
#include "tuner-i2c.h"
#include "tda8290.h"
#include "tda827x.h"
#include "tda18271.h"
static int debug;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "enable verbose debug messages");
static int deemphasis_50;
module_param(deemphasis_50, int, 0644);
MODULE_PARM_DESC(deemphasis_50, "0 - 75us deemphasis; 1 - 50us deemphasis");
/* ---------------------------------------------------------------------- */
struct tda8290_priv {
struct tuner_i2c_props i2c_props;
unsigned char tda8290_easy_mode;
unsigned char tda827x_addr;
unsigned char ver;
#define TDA8290 1
#define TDA8295 2
#define TDA8275 4
#define TDA8275A 8
#define TDA18271 16
struct tda827x_config cfg;
};
/*---------------------------------------------------------------------*/
static int tda8290_i2c_bridge(struct dvb_frontend *fe, int close)
{
struct tda8290_priv *priv = fe->analog_demod_priv;
unsigned char enable[2] = { 0x21, 0xC0 };
unsigned char disable[2] = { 0x21, 0x00 };
unsigned char *msg;
if (close) {
msg = enable;
tuner_i2c_xfer_send(&priv->i2c_props, msg, 2);
/* let the bridge stabilize */
msleep(20);
} else {
msg = disable;
tuner_i2c_xfer_send(&priv->i2c_props, msg, 2);
}
return 0;
}
static int tda8295_i2c_bridge(struct dvb_frontend *fe, int close)
{
struct tda8290_priv *priv = fe->analog_demod_priv;
unsigned char enable[2] = { 0x45, 0xc1 };
unsigned char disable[2] = { 0x46, 0x00 };
unsigned char buf[3] = { 0x45, 0x01, 0x00 };
unsigned char *msg;
if (close) {
msg = enable;
tuner_i2c_xfer_send(&priv->i2c_props, msg, 2);
/* let the bridge stabilize */
msleep(20);
} else {
msg = disable;
tuner_i2c_xfer_send_recv(&priv->i2c_props, msg, 1, &msg[1], 1);
buf[2] = msg[1];
buf[2] &= ~0x04;
tuner_i2c_xfer_send(&priv->i2c_props, buf, 3);
msleep(5);
msg[1] |= 0x04;
tuner_i2c_xfer_send(&priv->i2c_props, msg, 2);
}
return 0;
}
/*---------------------------------------------------------------------*/
static void set_audio(struct dvb_frontend *fe,
struct analog_parameters *params)
{
struct tda8290_priv *priv = fe->analog_demod_priv;
char* mode;
if (params->std & V4L2_STD_MN) {
priv->tda8290_easy_mode = 0x01;
mode = "MN";
} else if (params->std & V4L2_STD_B) {
priv->tda8290_easy_mode = 0x02;
mode = "B";
} else if (params->std & V4L2_STD_GH) {
priv->tda8290_easy_mode = 0x04;
mode = "GH";
} else if (params->std & V4L2_STD_PAL_I) {
priv->tda8290_easy_mode = 0x08;
mode = "I";
} else if (params->std & V4L2_STD_DK) {
priv->tda8290_easy_mode = 0x10;
mode = "DK";
} else if (params->std & V4L2_STD_SECAM_L) {
priv->tda8290_easy_mode = 0x20;
mode = "L";
} else if (params->std & V4L2_STD_SECAM_LC) {
priv->tda8290_easy_mode = 0x40;
mode = "LC";
} else {
priv->tda8290_easy_mode = 0x10;
mode = "xx";
}
if (params->mode == V4L2_TUNER_RADIO) {
/* Set TDA8295 to FM radio; Start TDA8290 with MN values */
priv->tda8290_easy_mode = (priv->ver & TDA8295) ? 0x80 : 0x01;
tuner_dbg("setting to radio FM\n");
} else {
tuner_dbg("setting tda829x to system %s\n", mode);
}
}
static struct {
unsigned char seq[2];
} fm_mode[] = {
{ { 0x01, 0x81} }, /* Put device into expert mode */
{ { 0x03, 0x48} }, /* Disable NOTCH and VIDEO filters */
{ { 0x04, 0x04} }, /* Disable color carrier filter (SSIF) */
{ { 0x05, 0x04} }, /* ADC headroom */
{ { 0x06, 0x10} }, /* group delay flat */
{ { 0x07, 0x00} }, /* use the same radio DTO values as a tda8295 */
{ { 0x08, 0x00} },
{ { 0x09, 0x80} },
{ { 0x0a, 0xda} },
{ { 0x0b, 0x4b} },
{ { 0x0c, 0x68} },
{ { 0x0d, 0x00} }, /* PLL off, no video carrier detect */
{ { 0x14, 0x00} }, /* disable auto mute if no video */
};
static void tda8290_set_params(struct dvb_frontend *fe,
struct analog_parameters *params)
{
struct tda8290_priv *priv = fe->analog_demod_priv;
unsigned char soft_reset[] = { 0x00, 0x00 };
unsigned char easy_mode[] = { 0x01, priv->tda8290_easy_mode };
unsigned char expert_mode[] = { 0x01, 0x80 };
unsigned char agc_out_on[] = { 0x02, 0x00 };
unsigned char gainset_off[] = { 0x28, 0x14 };
unsigned char if_agc_spd[] = { 0x0f, 0x88 };
unsigned char adc_head_6[] = { 0x05, 0x04 };
unsigned char adc_head_9[] = { 0x05, 0x02 };
unsigned char adc_head_12[] = { 0x05, 0x01 };
unsigned char pll_bw_nom[] = { 0x0d, 0x47 };
unsigned char pll_bw_low[] = { 0x0d, 0x27 };
unsigned char gainset_2[] = { 0x28, 0x64 };
unsigned char agc_rst_on[] = { 0x0e, 0x0b };
unsigned char agc_rst_off[] = { 0x0e, 0x09 };
unsigned char if_agc_set[] = { 0x0f, 0x81 };
unsigned char addr_adc_sat = 0x1a;
unsigned char addr_agc_stat = 0x1d;
unsigned char addr_pll_stat = 0x1b;
unsigned char adc_sat, agc_stat,
pll_stat;
int i;
set_audio(fe, params);
if (priv->cfg.config)
tuner_dbg("tda827xa config is 0x%02x\n", priv->cfg.config);
tuner_i2c_xfer_send(&priv->i2c_props, easy_mode, 2);
tuner_i2c_xfer_send(&priv->i2c_props, agc_out_on, 2);
tuner_i2c_xfer_send(&priv->i2c_props, soft_reset, 2);
msleep(1);
if (params->mode == V4L2_TUNER_RADIO) {
unsigned char deemphasis[] = { 0x13, 1 };
/* FIXME: allow using a different deemphasis */
if (deemphasis_50)
deemphasis[1] = 2;
for (i = 0; i < ARRAY_SIZE(fm_mode); i++)
tuner_i2c_xfer_send(&priv->i2c_props, fm_mode[i].seq, 2);
tuner_i2c_xfer_send(&priv->i2c_props, deemphasis, 2);
} else {
expert_mode[1] = priv->tda8290_easy_mode + 0x80;
tuner_i2c_xfer_send(&priv->i2c_props, expert_mode, 2);
tuner_i2c_xfer_send(&priv->i2c_props, gainset_off, 2);
tuner_i2c_xfer_send(&priv->i2c_props, if_agc_spd, 2);
if (priv->tda8290_easy_mode & 0x60)
tuner_i2c_xfer_send(&priv->i2c_props, adc_head_9, 2);
else
tuner_i2c_xfer_send(&priv->i2c_props, adc_head_6, 2);
tuner_i2c_xfer_send(&priv->i2c_props, pll_bw_nom, 2);
}
tda8290_i2c_bridge(fe, 1);
if (fe->ops.tuner_ops.set_analog_params)
fe->ops.tuner_ops.set_analog_params(fe, params);
for (i = 0; i < 3; i++) {
tuner_i2c_xfer_send_recv(&priv->i2c_props,
&addr_pll_stat, 1, &pll_stat, 1);
if (pll_stat & 0x80) {
tuner_i2c_xfer_send_recv(&priv->i2c_props,
&addr_adc_sat, 1,
&adc_sat, 1);
tuner_i2c_xfer_send_recv(&priv->i2c_props,
&addr_agc_stat, 1,
&agc_stat, 1);
tuner_dbg("tda8290 is locked, AGC: %d\n", agc_stat);
break;
} else {
tuner_dbg("tda8290 not locked, no signal?\n");
msleep(100);
}
}
/* adjust headroom resp. gain */
if ((agc_stat > 115) || (!(pll_stat & 0x80) && (adc_sat < 20))) {
tuner_dbg("adjust gain, step 1. Agc: %d, ADC stat: %d, lock: %d\n",
agc_stat, adc_sat, pll_stat & 0x80);
tuner_i2c_xfer_send(&priv->i2c_props, gainset_2, 2);
msleep(100);
tuner_i2c_xfer_send_recv(&priv->i2c_props,
&addr_agc_stat, 1, &agc_stat, 1);
tuner_i2c_xfer_send_recv(&priv->i2c_props,
&addr_pll_stat, 1, &pll_stat, 1);
if ((agc_stat > 115) || !(pll_stat & 0x80)) {
tuner_dbg("adjust gain, step 2. Agc: %d, lock: %d\n",
agc_stat, pll_stat & 0x80);
if (priv->cfg.agcf)
priv->cfg.agcf(fe);
msleep(100);
tuner_i2c_xfer_send_recv(&priv->i2c_props,
&addr_agc_stat, 1,
&agc_stat, 1);
tuner_i2c_xfer_send_recv(&priv->i2c_props,
&addr_pll_stat, 1,
&pll_stat, 1);
if((agc_stat > 115) || !(pll_stat & 0x80)) {
tuner_dbg("adjust gain, step 3. Agc: %d\n", agc_stat);
tuner_i2c_xfer_send(&priv->i2c_props, adc_head_12, 2);
tuner_i2c_xfer_send(&priv->i2c_props, pll_bw_low, 2);
msleep(100);
}
}
}
/* l/ l' deadlock? */
if(priv->tda8290_easy_mode & 0x60) {
tuner_i2c_xfer_send_recv(&priv->i2c_props,
&addr_adc_sat, 1,
&adc_sat, 1);
tuner_i2c_xfer_send_recv(&priv->i2c_props,
&addr_pll_stat, 1,
&pll_stat, 1);
if ((adc_sat > 20) || !(pll_stat & 0x80)) {
tuner_dbg("trying to resolve SECAM L deadlock\n");
tuner_i2c_xfer_send(&priv->i2c_props, agc_rst_on, 2);
msleep(40);
tuner_i2c_xfer_send(&priv->i2c_props, agc_rst_off, 2);
}
}
tda8290_i2c_bridge(fe, 0);
tuner_i2c_xfer_send(&priv->i2c_props, if_agc_set, 2);
}
/*---------------------------------------------------------------------*/
static void tda8295_power(struct dvb_frontend *fe, int enable)
{
struct tda8290_priv *priv = fe->analog_demod_priv;
unsigned char buf[] = { 0x30, 0x00 }; /* clb_stdbt */
tuner_i2c_xfer_send_recv(&priv->i2c_props, &buf[0], 1, &buf[1], 1);
if (enable)
buf[1] = 0x01;
else
buf[1] = 0x03;
tuner_i2c_xfer_send(&priv->i2c_props, buf, 2);
}
static void tda8295_set_easy_mode(struct dvb_frontend *fe, int enable)
{
struct tda8290_priv *priv = fe->analog_demod_priv;
unsigned char buf[] = { 0x01, 0x00 };
tuner_i2c_xfer_send_recv(&priv->i2c_props, &buf[0], 1, &buf[1], 1);
if (enable)
buf[1] = 0x01; /* rising edge sets regs 0x02 - 0x23 */
else
buf[1] = 0x00; /* reset active bit */
tuner_i2c_xfer_send(&priv->i2c_props, buf, 2);
}
static void tda8295_set_video_std(struct dvb_frontend *fe)
{
struct tda8290_priv *priv = fe->analog_demod_priv;
unsigned char buf[] = { 0x00, priv->tda8290_easy_mode };
tuner_i2c_xfer_send(&priv->i2c_props, buf, 2);
tda8295_set_easy_mode(fe, 1);
msleep(20);
tda8295_set_easy_mode(fe, 0);
}
/*---------------------------------------------------------------------*/
static void tda8295_agc1_out(struct dvb_frontend *fe, int enable)
{
struct tda8290_priv *priv = fe->analog_demod_priv;
unsigned char buf[] = { 0x02, 0x00 }; /* DIV_FUNC */
tuner_i2c_xfer_send_recv(&priv->i2c_props, &buf[0], 1, &buf[1], 1);
if (enable)
buf[1] &= ~0x40;
else
buf[1] |= 0x40;
tuner_i2c_xfer_send(&priv->i2c_props, buf, 2);
}
static void tda8295_agc2_out(struct dvb_frontend *fe, int enable)
{
struct tda8290_priv *priv = fe->analog_demod_priv;
unsigned char set_gpio_cf[] = { 0x44, 0x00 };
unsigned char set_gpio_val[] = { 0x46, 0x00 };
tuner_i2c_xfer_send_recv(&priv->i2c_props,
&set_gpio_cf[0], 1, &set_gpio_cf[1], 1);
tuner_i2c_xfer_send_recv(&priv->i2c_props,
&set_gpio_val[0], 1, &set_gpio_val[1], 1);
set_gpio_cf[1] &= 0xf0; /* clear GPIO_0 bits 3-0 */
if (enable) {
set_gpio_cf[1] |= 0x01; /* config GPIO_0 as Open Drain Out */
set_gpio_val[1] &= 0xfe; /* set GPIO_0 pin low */
}
tuner_i2c_xfer_send(&priv->i2c_props, set_gpio_cf, 2);
tuner_i2c_xfer_send(&priv->i2c_props, set_gpio_val, 2);
}
static int tda8295_has_signal(struct dvb_frontend *fe)
{
struct tda8290_priv *priv = fe->analog_demod_priv;
unsigned char hvpll_stat = 0x26;
unsigned char ret;
tuner_i2c_xfer_send_recv(&priv->i2c_props, &hvpll_stat, 1, &ret, 1);
return (ret & 0x01) ? 65535 : 0;
}
/*---------------------------------------------------------------------*/
static void tda8295_set_params(struct dvb_frontend *fe,
struct analog_parameters *params)
{
struct tda8290_priv *priv = fe->analog_demod_priv;
unsigned char blanking_mode[] = { 0x1d, 0x00 };
set_audio(fe, params);
tuner_dbg("%s: freq = %d\n", __func__, params->frequency);
tda8295_power(fe, 1);
tda8295_agc1_out(fe, 1);
tuner_i2c_xfer_send_recv(&priv->i2c_props,
&blanking_mode[0], 1, &blanking_mode[1], 1);
tda8295_set_video_std(fe);
blanking_mode[1] = 0x03;
tuner_i2c_xfer_send(&priv->i2c_props, blanking_mode, 2);
msleep(20);
tda8295_i2c_bridge(fe, 1);
if (fe->ops.tuner_ops.set_analog_params)
fe->ops.tuner_ops.set_analog_params(fe, params);
if (priv->cfg.agcf)
priv->cfg.agcf(fe);
if (tda8295_has_signal(fe))
tuner_dbg("tda8295 is locked\n");
else
tuner_dbg("tda8295 not locked, no signal?\n");
tda8295_i2c_bridge(fe, 0);
}
/*---------------------------------------------------------------------*/
static int tda8290_has_signal(struct dvb_frontend *fe)
{
struct tda8290_priv *priv = fe->analog_demod_priv;
unsigned char i2c_get_afc[1] = { 0x1B };
unsigned char afc = 0;
tuner_i2c_xfer_send_recv(&priv->i2c_props,
i2c_get_afc, ARRAY_SIZE(i2c_get_afc), &afc, 1);
return (afc & 0x80)? 65535:0;
}
/*---------------------------------------------------------------------*/
static void tda8290_standby(struct dvb_frontend *fe)
{
struct tda8290_priv *priv = fe->analog_demod_priv;
unsigned char cb1[] = { 0x30, 0xD0 };
unsigned char tda8290_standby[] = { 0x00, 0x02 };
unsigned char tda8290_agc_tri[] = { 0x02, 0x20 };
struct i2c_msg msg = {.addr = priv->tda827x_addr, .flags=0, .buf=cb1, .len = 2};
tda8290_i2c_bridge(fe, 1);
if (priv->ver & TDA8275A)
cb1[1] = 0x90;
i2c_transfer(priv->i2c_props.adap, &msg, 1);
tda8290_i2c_bridge(fe, 0);
tuner_i2c_xfer_send(&priv->i2c_props, tda8290_agc_tri, 2);
tuner_i2c_xfer_send(&priv->i2c_props, tda8290_standby, 2);
}
static void tda8295_standby(struct dvb_frontend *fe)
{
tda8295_agc1_out(fe, 0); /* Put AGC in tri-state */
tda8295_power(fe, 0);
}
static void tda8290_init_if(struct dvb_frontend *fe)
{
struct tda8290_priv *priv = fe->analog_demod_priv;
unsigned char set_VS[] = { 0x30, 0x6F };
unsigned char set_GP00_CF[] = { 0x20, 0x01 };
unsigned char set_GP01_CF[] = { 0x20, 0x0B };
if ((priv->cfg.config == 1) || (priv->cfg.config == 2))
tuner_i2c_xfer_send(&priv->i2c_props, set_GP00_CF, 2);
else
tuner_i2c_xfer_send(&priv->i2c_props, set_GP01_CF, 2);
tuner_i2c_xfer_send(&priv->i2c_props, set_VS, 2);
}
static void tda8295_init_if(struct dvb_frontend *fe)
{
struct tda8290_priv *priv = fe->analog_demod_priv;
static unsigned char set_adc_ctl[] = { 0x33, 0x14 };
static unsigned char set_adc_ctl2[] = { 0x34, 0x00 };
static unsigned char set_pll_reg6[] = { 0x3e, 0x63 };
static unsigned char set_pll_reg0[] = { 0x38, 0x23 };
static unsigned char set_pll_reg7[] = { 0x3f, 0x01 };
static unsigned char set_pll_reg10[] = { 0x42, 0x61 };
static unsigned char set_gpio_reg0[] = { 0x44, 0x0b };
tda8295_power(fe, 1);
tda8295_set_easy_mode(fe, 0);
tda8295_set_video_std(fe);
tuner_i2c_xfer_send(&priv->i2c_props, set_adc_ctl, 2);
tuner_i2c_xfer_send(&priv->i2c_props, set_adc_ctl2, 2);
tuner_i2c_xfer_send(&priv->i2c_props, set_pll_reg6, 2);
tuner_i2c_xfer_send(&priv->i2c_props, set_pll_reg0, 2);
tuner_i2c_xfer_send(&priv->i2c_props, set_pll_reg7, 2);
tuner_i2c_xfer_send(&priv->i2c_props, set_pll_reg10, 2);
tuner_i2c_xfer_send(&priv->i2c_props, set_gpio_reg0, 2);
tda8295_agc1_out(fe, 0);
tda8295_agc2_out(fe, 0);
}
static void tda8290_init_tuner(struct dvb_frontend *fe)
{
struct tda8290_priv *priv = fe->analog_demod_priv;
unsigned char tda8275_init[] = { 0x00, 0x00, 0x00, 0x40, 0xdC, 0x04, 0xAf,
0x3F, 0x2A, 0x04, 0xFF, 0x00, 0x00, 0x40 };
unsigned char tda8275a_init[] = { 0x00, 0x00, 0x00, 0x00, 0xdC, 0x05, 0x8b,
0x0c, 0x04, 0x20, 0xFF, 0x00, 0x00, 0x4b };
struct i2c_msg msg = {.addr = priv->tda827x_addr, .flags=0,
.buf=tda8275_init, .len = 14};
if (priv->ver & TDA8275A)
msg.buf = tda8275a_init;
tda8290_i2c_bridge(fe, 1);
i2c_transfer(priv->i2c_props.adap, &msg, 1);
tda8290_i2c_bridge(fe, 0);
}
/*---------------------------------------------------------------------*/
static void tda829x_release(struct dvb_frontend *fe)
{
struct tda8290_priv *priv = fe->analog_demod_priv;
/* only try to release the tuner if we've
* attached it from within this module */
if (priv->ver & (TDA18271 | TDA8275 | TDA8275A))
if (fe->ops.tuner_ops.release)
fe->ops.tuner_ops.release(fe);
kfree(fe->analog_demod_priv);
fe->analog_demod_priv = NULL;
}
static struct tda18271_config tda829x_tda18271_config = {
.gate = TDA18271_GATE_ANALOG,
};
static int tda829x_find_tuner(struct dvb_frontend *fe)
{
struct tda8290_priv *priv = fe->analog_demod_priv;
struct analog_demod_ops *analog_ops = &fe->ops.analog_ops;
int i, ret, tuners_found;
u32 tuner_addrs;
u8 data;
struct i2c_msg msg = { .flags = I2C_M_RD, .buf = &data, .len = 1 };
if (!analog_ops->i2c_gate_ctrl) {
printk(KERN_ERR "tda8290: no gate control were provided!\n");
return -EINVAL;
}
analog_ops->i2c_gate_ctrl(fe, 1);
/* probe for tuner chip */
tuners_found = 0;
tuner_addrs = 0;
for (i = 0x60; i <= 0x63; i++) {
msg.addr = i;
ret = i2c_transfer(priv->i2c_props.adap, &msg, 1);
if (ret == 1) {
tuners_found++;
tuner_addrs = (tuner_addrs << 8) + i;
}
}
/* if there is more than one tuner, we expect the right one is
behind the bridge and we choose the highest address that doesn't
give a response now
*/
analog_ops->i2c_gate_ctrl(fe, 0);
if (tuners_found > 1)
for (i = 0; i < tuners_found; i++) {
msg.addr = tuner_addrs & 0xff;
ret = i2c_transfer(priv->i2c_props.adap, &msg, 1);
if (ret == 1)
tuner_addrs = tuner_addrs >> 8;
else
break;
}
if (tuner_addrs == 0) {
tuner_addrs = 0x60;
tuner_info("could not clearly identify tuner address, "
"defaulting to %x\n", tuner_addrs);
} else {
tuner_addrs = tuner_addrs & 0xff;
tuner_info("setting tuner address to %x\n", tuner_addrs);
}
priv->tda827x_addr = tuner_addrs;
msg.addr = tuner_addrs;
analog_ops->i2c_gate_ctrl(fe, 1);
ret = i2c_transfer(priv->i2c_props.adap, &msg, 1);
if (ret != 1) {
tuner_warn("tuner access failed!\n");
analog_ops->i2c_gate_ctrl(fe, 0);
return -EREMOTEIO;
}
if ((data == 0x83) || (data == 0x84)) {
priv->ver |= TDA18271;
tda829x_tda18271_config.config = priv->cfg.config;
dvb_attach(tda18271_attach, fe, priv->tda827x_addr,
priv->i2c_props.adap, &tda829x_tda18271_config);
} else {
if ((data & 0x3c) == 0)
priv->ver |= TDA8275;
else
priv->ver |= TDA8275A;
dvb_attach(tda827x_attach, fe, priv->tda827x_addr,
priv->i2c_props.adap, &priv->cfg);
priv->cfg.switch_addr = priv->i2c_props.addr;
}
if (fe->ops.tuner_ops.init)
fe->ops.tuner_ops.init(fe);
if (fe->ops.tuner_ops.sleep)
fe->ops.tuner_ops.sleep(fe);
analog_ops->i2c_gate_ctrl(fe, 0);
return 0;
}
static int tda8290_probe(struct tuner_i2c_props *i2c_props)
{
#define TDA8290_ID 0x89
u8 reg = 0x1f, id;
struct i2c_msg msg_read[] = {
{ .addr = i2c_props->addr, .flags = 0, .len = 1, .buf = ® },
{ .addr = i2c_props->addr, .flags = I2C_M_RD, .len = 1, .buf = &id },
};
/* detect tda8290 */
if (i2c_transfer(i2c_props->adap, msg_read, 2) != 2) {
printk(KERN_WARNING "%s: couldn't read register 0x%02x\n",
__func__, reg);
return -ENODEV;
}
if (id == TDA8290_ID) {
if (debug)
printk(KERN_DEBUG "%s: tda8290 detected @ %d-%04x\n",
__func__, i2c_adapter_id(i2c_props->adap),
i2c_props->addr);
return 0;
}
return -ENODEV;
}
static int tda8295_probe(struct tuner_i2c_props *i2c_props)
{
#define TDA8295_ID 0x8a
#define TDA8295C2_ID 0x8b
u8 reg = 0x2f, id;
struct i2c_msg msg_read[] = {
{ .addr = i2c_props->addr, .flags = 0, .len = 1, .buf = ® },
{ .addr = i2c_props->addr, .flags = I2C_M_RD, .len = 1, .buf = &id },
};
/* detect tda8295 */
if (i2c_transfer(i2c_props->adap, msg_read, 2) != 2) {
printk(KERN_WARNING "%s: couldn't read register 0x%02x\n",
__func__, reg);
return -ENODEV;
}
if ((id & 0xfe) == TDA8295_ID) {
if (debug)
printk(KERN_DEBUG "%s: %s detected @ %d-%04x\n",
__func__, (id == TDA8295_ID) ?
"tda8295c1" : "tda8295c2",
i2c_adapter_id(i2c_props->adap),
i2c_props->addr);
return 0;
}
return -ENODEV;
}
static struct analog_demod_ops tda8290_ops = {
.set_params = tda8290_set_params,
.has_signal = tda8290_has_signal,
.standby = tda8290_standby,
.release = tda829x_release,
.i2c_gate_ctrl = tda8290_i2c_bridge,
};
static struct analog_demod_ops tda8295_ops = {
.set_params = tda8295_set_params,
.has_signal = tda8295_has_signal,
.standby = tda8295_standby,
.release = tda829x_release,
.i2c_gate_ctrl = tda8295_i2c_bridge,
};
struct dvb_frontend *tda829x_attach(struct dvb_frontend *fe,
struct i2c_adapter *i2c_adap, u8 i2c_addr,
struct tda829x_config *cfg)
{
struct tda8290_priv *priv = NULL;
char *name;
priv = kzalloc(sizeof(struct tda8290_priv), GFP_KERNEL);
if (priv == NULL)
return NULL;
fe->analog_demod_priv = priv;
priv->i2c_props.addr = i2c_addr;
priv->i2c_props.adap = i2c_adap;
priv->i2c_props.name = "tda829x";
if (cfg)
priv->cfg.config = cfg->lna_cfg;
if (tda8290_probe(&priv->i2c_props) == 0) {
priv->ver = TDA8290;
memcpy(&fe->ops.analog_ops, &tda8290_ops,
sizeof(struct analog_demod_ops));
}
if (tda8295_probe(&priv->i2c_props) == 0) {
priv->ver = TDA8295;
memcpy(&fe->ops.analog_ops, &tda8295_ops,
sizeof(struct analog_demod_ops));
}
if (!(cfg) || (TDA829X_PROBE_TUNER == cfg->probe_tuner)) {
tda8295_power(fe, 1);
if (tda829x_find_tuner(fe) < 0)
goto fail;
}
switch (priv->ver) {
case TDA8290:
name = "tda8290";
break;
case TDA8295:
name = "tda8295";
break;
case TDA8290 | TDA8275:
name = "tda8290+75";
break;
case TDA8295 | TDA8275:
name = "tda8295+75";
break;
case TDA8290 | TDA8275A:
name = "tda8290+75a";
break;
case TDA8295 | TDA8275A:
name = "tda8295+75a";
break;
case TDA8290 | TDA18271:
name = "tda8290+18271";
break;
case TDA8295 | TDA18271:
name = "tda8295+18271";
break;
default:
goto fail;
}
tuner_info("type set to %s\n", name);
fe->ops.analog_ops.info.name = name;
if (priv->ver & TDA8290) {
if (priv->ver & (TDA8275 | TDA8275A))
tda8290_init_tuner(fe);
tda8290_init_if(fe);
} else if (priv->ver & TDA8295)
tda8295_init_if(fe);
return fe;
fail:
memset(&fe->ops.analog_ops, 0, sizeof(struct analog_demod_ops));
tda829x_release(fe);
return NULL;
}
EXPORT_SYMBOL_GPL(tda829x_attach);
int tda829x_probe(struct i2c_adapter *i2c_adap, u8 i2c_addr)
{
struct tuner_i2c_props i2c_props = {
.adap = i2c_adap,
.addr = i2c_addr,
};
unsigned char soft_reset[] = { 0x00, 0x00 };
unsigned char easy_mode_b[] = { 0x01, 0x02 };
unsigned char easy_mode_g[] = { 0x01, 0x04 };
unsigned char restore_9886[] = { 0x00, 0xd6, 0x30 };
unsigned char addr_dto_lsb = 0x07;
unsigned char data;
#define PROBE_BUFFER_SIZE 8
unsigned char buf[PROBE_BUFFER_SIZE];
int i;
/* rule out tda9887, which would return the same byte repeatedly */
tuner_i2c_xfer_send_recv(&i2c_props,
soft_reset, 1, buf, PROBE_BUFFER_SIZE);
for (i = 1; i < PROBE_BUFFER_SIZE; i++) {
if (buf[i] != buf[0])
break;
}
/* all bytes are equal, not a tda829x - probably a tda9887 */
if (i == PROBE_BUFFER_SIZE)
return -ENODEV;
if ((tda8290_probe(&i2c_props) == 0) ||
(tda8295_probe(&i2c_props) == 0))
return 0;
/* fall back to old probing method */
tuner_i2c_xfer_send(&i2c_props, easy_mode_b, 2);
tuner_i2c_xfer_send(&i2c_props, soft_reset, 2);
tuner_i2c_xfer_send_recv(&i2c_props, &addr_dto_lsb, 1, &data, 1);
if (data == 0) {
tuner_i2c_xfer_send(&i2c_props, easy_mode_g, 2);
tuner_i2c_xfer_send(&i2c_props, soft_reset, 2);
tuner_i2c_xfer_send_recv(&i2c_props,
&addr_dto_lsb, 1, &data, 1);
if (data == 0x7b) {
return 0;
}
}
tuner_i2c_xfer_send(&i2c_props, restore_9886, 3);
return -ENODEV;
}
EXPORT_SYMBOL_GPL(tda829x_probe);
MODULE_DESCRIPTION("Philips/NXP TDA8290/TDA8295 analog IF demodulator driver");
MODULE_AUTHOR("Gerd Knorr, Hartmut Hackmann, Michael Krufky");
MODULE_LICENSE("GPL");
/*
* Overrides for Emacs so that we follow Linus's tabbing style.
* ---------------------------------------------------------------------------
* Local variables:
* c-basic-offset: 8
* End:
*/
| gpl-2.0 |
Elektropippo/kernel_852i | arch/parisc/math-emu/sfadd.c | 12202 | 14617 | /*
* Linux/PA-RISC Project (http://www.parisc-linux.org/)
*
* Floating-point emulation code
* Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/*
* BEGIN_DESC
*
* File:
* @(#) pa/spmath/sfadd.c $Revision: 1.1 $
*
* Purpose:
* Single_add: add two single precision values.
*
* External Interfaces:
* sgl_fadd(leftptr, rightptr, dstptr, status)
*
* Internal Interfaces:
*
* Theory:
* <<please update with a overview of the operation of this file>>
*
* END_DESC
*/
#include "float.h"
#include "sgl_float.h"
/*
* Single_add: add two single precision values.
*/
int
sgl_fadd(
sgl_floating_point *leftptr,
sgl_floating_point *rightptr,
sgl_floating_point *dstptr,
unsigned int *status)
{
register unsigned int left, right, result, extent;
register unsigned int signless_upper_left, signless_upper_right, save;
register int result_exponent, right_exponent, diff_exponent;
register int sign_save, jumpsize;
register boolean inexact = FALSE;
register boolean underflowtrap;
/* Create local copies of the numbers */
left = *leftptr;
right = *rightptr;
/* A zero "save" helps discover equal operands (for later), *
* and is used in swapping operands (if needed). */
Sgl_xortointp1(left,right,/*to*/save);
/*
* check first operand for NaN's or infinity
*/
if ((result_exponent = Sgl_exponent(left)) == SGL_INFINITY_EXPONENT)
{
if (Sgl_iszero_mantissa(left))
{
if (Sgl_isnotnan(right))
{
if (Sgl_isinfinity(right) && save!=0)
{
/*
* invalid since operands are opposite signed infinity's
*/
if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
Set_invalidflag();
Sgl_makequietnan(result);
*dstptr = result;
return(NOEXCEPTION);
}
/*
* return infinity
*/
*dstptr = left;
return(NOEXCEPTION);
}
}
else
{
/*
* is NaN; signaling or quiet?
*/
if (Sgl_isone_signaling(left))
{
/* trap if INVALIDTRAP enabled */
if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
/* make NaN quiet */
Set_invalidflag();
Sgl_set_quiet(left);
}
/*
* is second operand a signaling NaN?
*/
else if (Sgl_is_signalingnan(right))
{
/* trap if INVALIDTRAP enabled */
if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
/* make NaN quiet */
Set_invalidflag();
Sgl_set_quiet(right);
*dstptr = right;
return(NOEXCEPTION);
}
/*
* return quiet NaN
*/
*dstptr = left;
return(NOEXCEPTION);
}
} /* End left NaN or Infinity processing */
/*
* check second operand for NaN's or infinity
*/
if (Sgl_isinfinity_exponent(right))
{
if (Sgl_iszero_mantissa(right))
{
/* return infinity */
*dstptr = right;
return(NOEXCEPTION);
}
/*
* is NaN; signaling or quiet?
*/
if (Sgl_isone_signaling(right))
{
/* trap if INVALIDTRAP enabled */
if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
/* make NaN quiet */
Set_invalidflag();
Sgl_set_quiet(right);
}
/*
* return quiet NaN
*/
*dstptr = right;
return(NOEXCEPTION);
} /* End right NaN or Infinity processing */
/* Invariant: Must be dealing with finite numbers */
/* Compare operands by removing the sign */
Sgl_copytoint_exponentmantissa(left,signless_upper_left);
Sgl_copytoint_exponentmantissa(right,signless_upper_right);
/* sign difference selects add or sub operation. */
if(Sgl_ismagnitudeless(signless_upper_left,signless_upper_right))
{
/* Set the left operand to the larger one by XOR swap *
* First finish the first word using "save" */
Sgl_xorfromintp1(save,right,/*to*/right);
Sgl_xorfromintp1(save,left,/*to*/left);
result_exponent = Sgl_exponent(left);
}
/* Invariant: left is not smaller than right. */
if((right_exponent = Sgl_exponent(right)) == 0)
{
/* Denormalized operands. First look for zeroes */
if(Sgl_iszero_mantissa(right))
{
/* right is zero */
if(Sgl_iszero_exponentmantissa(left))
{
/* Both operands are zeros */
if(Is_rounding_mode(ROUNDMINUS))
{
Sgl_or_signs(left,/*with*/right);
}
else
{
Sgl_and_signs(left,/*with*/right);
}
}
else
{
/* Left is not a zero and must be the result. Trapped
* underflows are signaled if left is denormalized. Result
* is always exact. */
if( (result_exponent == 0) && Is_underflowtrap_enabled() )
{
/* need to normalize results mantissa */
sign_save = Sgl_signextendedsign(left);
Sgl_leftshiftby1(left);
Sgl_normalize(left,result_exponent);
Sgl_set_sign(left,/*using*/sign_save);
Sgl_setwrapped_exponent(left,result_exponent,unfl);
*dstptr = left;
return(UNDERFLOWEXCEPTION);
}
}
*dstptr = left;
return(NOEXCEPTION);
}
/* Neither are zeroes */
Sgl_clear_sign(right); /* Exponent is already cleared */
if(result_exponent == 0 )
{
/* Both operands are denormalized. The result must be exact
* and is simply calculated. A sum could become normalized and a
* difference could cancel to a true zero. */
if( (/*signed*/int) save < 0 )
{
Sgl_subtract(left,/*minus*/right,/*into*/result);
if(Sgl_iszero_mantissa(result))
{
if(Is_rounding_mode(ROUNDMINUS))
{
Sgl_setone_sign(result);
}
else
{
Sgl_setzero_sign(result);
}
*dstptr = result;
return(NOEXCEPTION);
}
}
else
{
Sgl_addition(left,right,/*into*/result);
if(Sgl_isone_hidden(result))
{
*dstptr = result;
return(NOEXCEPTION);
}
}
if(Is_underflowtrap_enabled())
{
/* need to normalize result */
sign_save = Sgl_signextendedsign(result);
Sgl_leftshiftby1(result);
Sgl_normalize(result,result_exponent);
Sgl_set_sign(result,/*using*/sign_save);
Sgl_setwrapped_exponent(result,result_exponent,unfl);
*dstptr = result;
return(UNDERFLOWEXCEPTION);
}
*dstptr = result;
return(NOEXCEPTION);
}
right_exponent = 1; /* Set exponent to reflect different bias
* with denomalized numbers. */
}
else
{
Sgl_clear_signexponent_set_hidden(right);
}
Sgl_clear_exponent_set_hidden(left);
diff_exponent = result_exponent - right_exponent;
/*
* Special case alignment of operands that would force alignment
* beyond the extent of the extension. A further optimization
* could special case this but only reduces the path length for this
* infrequent case.
*/
if(diff_exponent > SGL_THRESHOLD)
{
diff_exponent = SGL_THRESHOLD;
}
/* Align right operand by shifting to right */
Sgl_right_align(/*operand*/right,/*shifted by*/diff_exponent,
/*and lower to*/extent);
/* Treat sum and difference of the operands separately. */
if( (/*signed*/int) save < 0 )
{
/*
* Difference of the two operands. Their can be no overflow. A
* borrow can occur out of the hidden bit and force a post
* normalization phase.
*/
Sgl_subtract_withextension(left,/*minus*/right,/*with*/extent,/*into*/result);
if(Sgl_iszero_hidden(result))
{
/* Handle normalization */
/* A straightforward algorithm would now shift the result
* and extension left until the hidden bit becomes one. Not
* all of the extension bits need participate in the shift.
* Only the two most significant bits (round and guard) are
* needed. If only a single shift is needed then the guard
* bit becomes a significant low order bit and the extension
* must participate in the rounding. If more than a single
* shift is needed, then all bits to the right of the guard
* bit are zeros, and the guard bit may or may not be zero. */
sign_save = Sgl_signextendedsign(result);
Sgl_leftshiftby1_withextent(result,extent,result);
/* Need to check for a zero result. The sign and exponent
* fields have already been zeroed. The more efficient test
* of the full object can be used.
*/
if(Sgl_iszero(result))
/* Must have been "x-x" or "x+(-x)". */
{
if(Is_rounding_mode(ROUNDMINUS)) Sgl_setone_sign(result);
*dstptr = result;
return(NOEXCEPTION);
}
result_exponent--;
/* Look to see if normalization is finished. */
if(Sgl_isone_hidden(result))
{
if(result_exponent==0)
{
/* Denormalized, exponent should be zero. Left operand *
* was normalized, so extent (guard, round) was zero */
goto underflow;
}
else
{
/* No further normalization is needed. */
Sgl_set_sign(result,/*using*/sign_save);
Ext_leftshiftby1(extent);
goto round;
}
}
/* Check for denormalized, exponent should be zero. Left *
* operand was normalized, so extent (guard, round) was zero */
if(!(underflowtrap = Is_underflowtrap_enabled()) &&
result_exponent==0) goto underflow;
/* Shift extension to complete one bit of normalization and
* update exponent. */
Ext_leftshiftby1(extent);
/* Discover first one bit to determine shift amount. Use a
* modified binary search. We have already shifted the result
* one position right and still not found a one so the remainder
* of the extension must be zero and simplifies rounding. */
/* Scan bytes */
while(Sgl_iszero_hiddenhigh7mantissa(result))
{
Sgl_leftshiftby8(result);
if((result_exponent -= 8) <= 0 && !underflowtrap)
goto underflow;
}
/* Now narrow it down to the nibble */
if(Sgl_iszero_hiddenhigh3mantissa(result))
{
/* The lower nibble contains the normalizing one */
Sgl_leftshiftby4(result);
if((result_exponent -= 4) <= 0 && !underflowtrap)
goto underflow;
}
/* Select case were first bit is set (already normalized)
* otherwise select the proper shift. */
if((jumpsize = Sgl_hiddenhigh3mantissa(result)) > 7)
{
/* Already normalized */
if(result_exponent <= 0) goto underflow;
Sgl_set_sign(result,/*using*/sign_save);
Sgl_set_exponent(result,/*using*/result_exponent);
*dstptr = result;
return(NOEXCEPTION);
}
Sgl_sethigh4bits(result,/*using*/sign_save);
switch(jumpsize)
{
case 1:
{
Sgl_leftshiftby3(result);
result_exponent -= 3;
break;
}
case 2:
case 3:
{
Sgl_leftshiftby2(result);
result_exponent -= 2;
break;
}
case 4:
case 5:
case 6:
case 7:
{
Sgl_leftshiftby1(result);
result_exponent -= 1;
break;
}
}
if(result_exponent > 0)
{
Sgl_set_exponent(result,/*using*/result_exponent);
*dstptr = result;
return(NOEXCEPTION); /* Sign bit is already set */
}
/* Fixup potential underflows */
underflow:
if(Is_underflowtrap_enabled())
{
Sgl_set_sign(result,sign_save);
Sgl_setwrapped_exponent(result,result_exponent,unfl);
*dstptr = result;
/* inexact = FALSE; */
return(UNDERFLOWEXCEPTION);
}
/*
* Since we cannot get an inexact denormalized result,
* we can now return.
*/
Sgl_right_align(result,/*by*/(1-result_exponent),extent);
Sgl_clear_signexponent(result);
Sgl_set_sign(result,sign_save);
*dstptr = result;
return(NOEXCEPTION);
} /* end if(hidden...)... */
/* Fall through and round */
} /* end if(save < 0)... */
else
{
/* Add magnitudes */
Sgl_addition(left,right,/*to*/result);
if(Sgl_isone_hiddenoverflow(result))
{
/* Prenormalization required. */
Sgl_rightshiftby1_withextent(result,extent,extent);
Sgl_arithrightshiftby1(result);
result_exponent++;
} /* end if hiddenoverflow... */
} /* end else ...add magnitudes... */
/* Round the result. If the extension is all zeros,then the result is
* exact. Otherwise round in the correct direction. No underflow is
* possible. If a postnormalization is necessary, then the mantissa is
* all zeros so no shift is needed. */
round:
if(Ext_isnotzero(extent))
{
inexact = TRUE;
switch(Rounding_mode())
{
case ROUNDNEAREST: /* The default. */
if(Ext_isone_sign(extent))
{
/* at least 1/2 ulp */
if(Ext_isnotzero_lower(extent) ||
Sgl_isone_lowmantissa(result))
{
/* either exactly half way and odd or more than 1/2ulp */
Sgl_increment(result);
}
}
break;
case ROUNDPLUS:
if(Sgl_iszero_sign(result))
{
/* Round up positive results */
Sgl_increment(result);
}
break;
case ROUNDMINUS:
if(Sgl_isone_sign(result))
{
/* Round down negative results */
Sgl_increment(result);
}
case ROUNDZERO:;
/* truncate is simple */
} /* end switch... */
if(Sgl_isone_hiddenoverflow(result)) result_exponent++;
}
if(result_exponent == SGL_INFINITY_EXPONENT)
{
/* Overflow */
if(Is_overflowtrap_enabled())
{
Sgl_setwrapped_exponent(result,result_exponent,ovfl);
*dstptr = result;
if (inexact)
if (Is_inexacttrap_enabled())
return(OVERFLOWEXCEPTION | INEXACTEXCEPTION);
else Set_inexactflag();
return(OVERFLOWEXCEPTION);
}
else
{
Set_overflowflag();
inexact = TRUE;
Sgl_setoverflow(result);
}
}
else Sgl_set_exponent(result,result_exponent);
*dstptr = result;
if(inexact)
if(Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
else Set_inexactflag();
return(NOEXCEPTION);
}
| gpl-2.0 |
chijure/android_kernel_lge_vee1 | arch/arm/mach-msm/sdio_smem.c | 427 | 3951 | /* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#define pr_fmt(fmt) "%s: " fmt, __func__
#include <linux/sched.h>
#include <linux/wait.h>
#include <linux/workqueue.h>
#include <linux/module.h>
#include <mach/sdio_al.h>
#include <mach/sdio_smem.h>
static void sdio_smem_read(struct work_struct *work);
static struct sdio_channel *channel;
static struct workqueue_struct *workq;
static DECLARE_WORK(work_read, sdio_smem_read);
static DECLARE_WAIT_QUEUE_HEAD(waitq);
static int bytes_avail;
static int sdio_ch_opened;
static void sdio_smem_release(struct device *dev)
{
pr_debug("sdio smem released\n");
}
static struct sdio_smem_client client;
static void sdio_smem_read(struct work_struct *work)
{
int err;
int read_avail;
char *data = client.buf;
if (!sdio_ch_opened)
return;
read_avail = sdio_read_avail(channel);
if (read_avail > bytes_avail ||
read_avail < 0) {
pr_err("Error: read_avail=%d bytes_avail=%d\n",
read_avail, bytes_avail);
goto read_err;
}
if (read_avail == 0)
return;
err = sdio_read(channel,
&data[client.size - bytes_avail],
read_avail);
if (err) {
pr_err("sdio_read error (%d)", err);
goto read_err;
}
bytes_avail -= read_avail;
pr_debug("read %d bytes (bytes_avail = %d)\n",
read_avail, bytes_avail);
if (!bytes_avail) {
bytes_avail = client.size;
err = client.cb_func(SDIO_SMEM_EVENT_READ_DONE);
}
if (err)
pr_err("error (%d) on callback\n", err);
return;
read_err:
if (sdio_ch_opened)
client.cb_func(SDIO_SMEM_EVENT_READ_ERR);
return;
}
static void sdio_smem_notify(void *priv, unsigned event)
{
pr_debug("%d event received\n", event);
if (event == SDIO_EVENT_DATA_READ_AVAIL ||
event == SDIO_EVENT_DATA_WRITE_AVAIL)
queue_work(workq, &work_read);
}
int sdio_smem_register_client(void)
{
int err = 0;
if (!client.buf || !client.size || !client.cb_func)
return -EINVAL;
pr_debug("buf = %p\n", client.buf);
pr_debug("size = 0x%x\n", client.size);
bytes_avail = client.size;
workq = create_singlethread_workqueue("sdio_smem");
if (!workq)
return -ENOMEM;
sdio_ch_opened = 1;
err = sdio_open("SDIO_SMEM", &channel, NULL, sdio_smem_notify);
if (err) {
sdio_ch_opened = 0;
pr_err("sdio_open error (%d)\n", err);
destroy_workqueue(workq);
return err;
}
pr_debug("SDIO SMEM channel opened\n");
return err;
}
int sdio_smem_unregister_client(void)
{
int err = 0;
sdio_ch_opened = 0;
err = sdio_close(channel);
if (err) {
pr_err("sdio_close error (%d)\n", err);
return err;
}
pr_debug("SDIO SMEM channel closed\n");
flush_workqueue(workq);
destroy_workqueue(workq);
bytes_avail = 0;
client.buf = NULL;
client.cb_func = NULL;
client.size = 0;
return 0;
}
static int sdio_smem_probe(struct platform_device *pdev)
{
client.plat_dev.name = "SDIO_SMEM_CLIENT";
client.plat_dev.id = -1;
client.plat_dev.dev.release = sdio_smem_release;
return platform_device_register(&client.plat_dev);
}
static int sdio_smem_remove(struct platform_device *pdev)
{
platform_device_unregister(&client.plat_dev);
memset(&client, 0, sizeof(client));
sdio_ch_opened = 0;
return 0;
}
static struct platform_driver sdio_smem_drv = {
.probe = sdio_smem_probe,
.remove = sdio_smem_remove,
.driver = {
.name = "SDIO_SMEM",
.owner = THIS_MODULE,
},
};
static int __init sdio_smem_init(void)
{
return platform_driver_register(&sdio_smem_drv);
};
module_init(sdio_smem_init);
MODULE_DESCRIPTION("SDIO SMEM");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
iamroot12D/linux | drivers/irqchip/irq-keystone.c | 427 | 6004 | /*
* Texas Instruments Keystone IRQ controller IP driver
*
* Copyright (C) 2014 Texas Instruments, Inc.
* Author: Sajesh Kumar Saran <sajesh@ti.com>
* Grygorii Strashko <grygorii.strashko@ti.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation version 2.
*
* This program is distributed "as is" WITHOUT ANY WARRANTY of any
* kind, whether express or implied; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/irq.h>
#include <linux/bitops.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/irqdomain.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
#include "irqchip.h"
/* The source ID bits start from 4 to 31 (total 28 bits)*/
#define BIT_OFS 4
#define KEYSTONE_N_IRQ (32 - BIT_OFS)
struct keystone_irq_device {
struct device *dev;
struct irq_chip chip;
u32 mask;
int irq;
struct irq_domain *irqd;
struct regmap *devctrl_regs;
u32 devctrl_offset;
};
static inline u32 keystone_irq_readl(struct keystone_irq_device *kirq)
{
int ret;
u32 val = 0;
ret = regmap_read(kirq->devctrl_regs, kirq->devctrl_offset, &val);
if (ret < 0)
dev_dbg(kirq->dev, "irq read failed ret(%d)\n", ret);
return val;
}
static inline void
keystone_irq_writel(struct keystone_irq_device *kirq, u32 value)
{
int ret;
ret = regmap_write(kirq->devctrl_regs, kirq->devctrl_offset, value);
if (ret < 0)
dev_dbg(kirq->dev, "irq write failed ret(%d)\n", ret);
}
static void keystone_irq_setmask(struct irq_data *d)
{
struct keystone_irq_device *kirq = irq_data_get_irq_chip_data(d);
kirq->mask |= BIT(d->hwirq);
dev_dbg(kirq->dev, "mask %lu [%x]\n", d->hwirq, kirq->mask);
}
static void keystone_irq_unmask(struct irq_data *d)
{
struct keystone_irq_device *kirq = irq_data_get_irq_chip_data(d);
kirq->mask &= ~BIT(d->hwirq);
dev_dbg(kirq->dev, "unmask %lu [%x]\n", d->hwirq, kirq->mask);
}
static void keystone_irq_ack(struct irq_data *d)
{
/* nothing to do here */
}
static void keystone_irq_handler(unsigned irq, struct irq_desc *desc)
{
struct keystone_irq_device *kirq = irq_desc_get_handler_data(desc);
unsigned long pending;
int src, virq;
dev_dbg(kirq->dev, "start irq %d\n", irq);
chained_irq_enter(irq_desc_get_chip(desc), desc);
pending = keystone_irq_readl(kirq);
keystone_irq_writel(kirq, pending);
dev_dbg(kirq->dev, "pending 0x%lx, mask 0x%x\n", pending, kirq->mask);
pending = (pending >> BIT_OFS) & ~kirq->mask;
dev_dbg(kirq->dev, "pending after mask 0x%lx\n", pending);
for (src = 0; src < KEYSTONE_N_IRQ; src++) {
if (BIT(src) & pending) {
virq = irq_find_mapping(kirq->irqd, src);
dev_dbg(kirq->dev, "dispatch bit %d, virq %d\n",
src, virq);
if (!virq)
dev_warn(kirq->dev, "sporious irq detected hwirq %d, virq %d\n",
src, virq);
generic_handle_irq(virq);
}
}
chained_irq_exit(irq_desc_get_chip(desc), desc);
dev_dbg(kirq->dev, "end irq %d\n", irq);
}
static int keystone_irq_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hw)
{
struct keystone_irq_device *kirq = h->host_data;
irq_set_chip_data(virq, kirq);
irq_set_chip_and_handler(virq, &kirq->chip, handle_level_irq);
set_irq_flags(virq, IRQF_VALID | IRQF_PROBE);
return 0;
}
static struct irq_domain_ops keystone_irq_ops = {
.map = keystone_irq_map,
.xlate = irq_domain_xlate_onecell,
};
static int keystone_irq_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct keystone_irq_device *kirq;
int ret;
if (np == NULL)
return -EINVAL;
kirq = devm_kzalloc(dev, sizeof(*kirq), GFP_KERNEL);
if (!kirq)
return -ENOMEM;
kirq->devctrl_regs =
syscon_regmap_lookup_by_phandle(np, "ti,syscon-dev");
if (IS_ERR(kirq->devctrl_regs))
return PTR_ERR(kirq->devctrl_regs);
ret = of_property_read_u32_index(np, "ti,syscon-dev", 1,
&kirq->devctrl_offset);
if (ret) {
dev_err(dev, "couldn't read the devctrl_offset offset!\n");
return ret;
}
kirq->irq = platform_get_irq(pdev, 0);
if (kirq->irq < 0) {
dev_err(dev, "no irq resource %d\n", kirq->irq);
return kirq->irq;
}
kirq->dev = dev;
kirq->mask = ~0x0;
kirq->chip.name = "keystone-irq";
kirq->chip.irq_ack = keystone_irq_ack;
kirq->chip.irq_mask = keystone_irq_setmask;
kirq->chip.irq_unmask = keystone_irq_unmask;
kirq->irqd = irq_domain_add_linear(np, KEYSTONE_N_IRQ,
&keystone_irq_ops, kirq);
if (!kirq->irqd) {
dev_err(dev, "IRQ domain registration failed\n");
return -ENODEV;
}
platform_set_drvdata(pdev, kirq);
irq_set_chained_handler(kirq->irq, keystone_irq_handler);
irq_set_handler_data(kirq->irq, kirq);
/* clear all source bits */
keystone_irq_writel(kirq, ~0x0);
dev_info(dev, "irqchip registered, nr_irqs %u\n", KEYSTONE_N_IRQ);
return 0;
}
static int keystone_irq_remove(struct platform_device *pdev)
{
struct keystone_irq_device *kirq = platform_get_drvdata(pdev);
int hwirq;
for (hwirq = 0; hwirq < KEYSTONE_N_IRQ; hwirq++)
irq_dispose_mapping(irq_find_mapping(kirq->irqd, hwirq));
irq_domain_remove(kirq->irqd);
return 0;
}
static const struct of_device_id keystone_irq_dt_ids[] = {
{ .compatible = "ti,keystone-irq", },
{},
};
MODULE_DEVICE_TABLE(of, keystone_irq_dt_ids);
static struct platform_driver keystone_irq_device_driver = {
.probe = keystone_irq_probe,
.remove = keystone_irq_remove,
.driver = {
.name = "keystone_irq",
.of_match_table = of_match_ptr(keystone_irq_dt_ids),
}
};
module_platform_driver(keystone_irq_device_driver);
MODULE_AUTHOR("Texas Instruments");
MODULE_AUTHOR("Sajesh Kumar Saran");
MODULE_AUTHOR("Grygorii Strashko");
MODULE_DESCRIPTION("Keystone IRQ chip");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
omor1/linux-430 | drivers/pci/hotplug/sgi_hotplug.c | 427 | 18383 | /*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2005-2006 Silicon Graphics, Inc. All rights reserved.
*
* This work was based on the 2.4/2.6 kernel development by Dick Reigner.
* Work to add BIOS PROM support was completed by Mike Habeck.
*/
#include <linux/acpi.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/pci_hotplug.h>
#include <linux/proc_fs.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/mutex.h>
#include <asm/sn/addrs.h>
#include <asm/sn/geo.h>
#include <asm/sn/l1.h>
#include <asm/sn/module.h>
#include <asm/sn/pcibr_provider.h>
#include <asm/sn/pcibus_provider_defs.h>
#include <asm/sn/pcidev.h>
#include <asm/sn/sn_feature_sets.h>
#include <asm/sn/sn_sal.h>
#include <asm/sn/types.h>
#include <asm/sn/acpi.h>
#include "../pci.h"
MODULE_LICENSE("GPL");
MODULE_AUTHOR("SGI (prarit@sgi.com, dickie@sgi.com, habeck@sgi.com)");
MODULE_DESCRIPTION("SGI Altix Hot Plug PCI Controller Driver");
/* SAL call error codes. Keep in sync with prom header io/include/pcibr.h */
#define PCI_SLOT_ALREADY_UP 2 /* slot already up */
#define PCI_SLOT_ALREADY_DOWN 3 /* slot already down */
#define PCI_L1_ERR 7 /* L1 console command error */
#define PCI_EMPTY_33MHZ 15 /* empty 33 MHz bus */
#define PCIIO_ASIC_TYPE_TIOCA 4
#define PCI_L1_QSIZE 128 /* our L1 message buffer size */
#define SN_MAX_HP_SLOTS 32 /* max hotplug slots */
#define SN_SLOT_NAME_SIZE 33 /* size of name string */
/* internal list head */
static struct list_head sn_hp_list;
/* hotplug_slot struct's private pointer */
struct slot {
int device_num;
struct pci_bus *pci_bus;
/* this struct for glue internal only */
struct hotplug_slot *hotplug_slot;
struct list_head hp_list;
char physical_path[SN_SLOT_NAME_SIZE];
};
struct pcibr_slot_enable_resp {
int resp_sub_errno;
char resp_l1_msg[PCI_L1_QSIZE + 1];
};
struct pcibr_slot_disable_resp {
int resp_sub_errno;
char resp_l1_msg[PCI_L1_QSIZE + 1];
};
enum sn_pci_req_e {
PCI_REQ_SLOT_ELIGIBLE,
PCI_REQ_SLOT_DISABLE
};
static int enable_slot(struct hotplug_slot *slot);
static int disable_slot(struct hotplug_slot *slot);
static inline int get_power_status(struct hotplug_slot *slot, u8 *value);
static struct hotplug_slot_ops sn_hotplug_slot_ops = {
.enable_slot = enable_slot,
.disable_slot = disable_slot,
.get_power_status = get_power_status,
};
static DEFINE_MUTEX(sn_hotplug_mutex);
static ssize_t path_show(struct pci_slot *pci_slot, char *buf)
{
int retval = -ENOENT;
struct slot *slot = pci_slot->hotplug->private;
if (!slot)
return retval;
retval = sprintf(buf, "%s\n", slot->physical_path);
return retval;
}
static struct pci_slot_attribute sn_slot_path_attr = __ATTR_RO(path);
static int sn_pci_slot_valid(struct pci_bus *pci_bus, int device)
{
struct pcibus_info *pcibus_info;
u16 busnum, segment, ioboard_type;
pcibus_info = SN_PCIBUS_BUSSOFT_INFO(pci_bus);
/* Check to see if this is a valid slot on 'pci_bus' */
if (!(pcibus_info->pbi_valid_devices & (1 << device)))
return -EPERM;
ioboard_type = sn_ioboard_to_pci_bus(pci_bus);
busnum = pcibus_info->pbi_buscommon.bs_persist_busnum;
segment = pci_domain_nr(pci_bus) & 0xf;
/* Do not allow hotplug operations on base I/O cards */
if ((ioboard_type == L1_BRICKTYPE_IX ||
ioboard_type == L1_BRICKTYPE_IA) &&
(segment == 1 && busnum == 0 && device != 1))
return -EPERM;
return 1;
}
static int sn_pci_bus_valid(struct pci_bus *pci_bus)
{
struct pcibus_info *pcibus_info;
u32 asic_type;
u16 ioboard_type;
/* Don't register slots hanging off the TIOCA bus */
pcibus_info = SN_PCIBUS_BUSSOFT_INFO(pci_bus);
asic_type = pcibus_info->pbi_buscommon.bs_asic_type;
if (asic_type == PCIIO_ASIC_TYPE_TIOCA)
return -EPERM;
/* Only register slots in I/O Bricks that support hotplug */
ioboard_type = sn_ioboard_to_pci_bus(pci_bus);
switch (ioboard_type) {
case L1_BRICKTYPE_IX:
case L1_BRICKTYPE_PX:
case L1_BRICKTYPE_IA:
case L1_BRICKTYPE_PA:
case L1_BOARDTYPE_PCIX3SLOT:
return 1;
break;
default:
return -EPERM;
break;
}
return -EIO;
}
static int sn_hp_slot_private_alloc(struct hotplug_slot *bss_hotplug_slot,
struct pci_bus *pci_bus, int device,
char *name)
{
struct pcibus_info *pcibus_info;
struct slot *slot;
pcibus_info = SN_PCIBUS_BUSSOFT_INFO(pci_bus);
slot = kzalloc(sizeof(*slot), GFP_KERNEL);
if (!slot)
return -ENOMEM;
bss_hotplug_slot->private = slot;
slot->device_num = device;
slot->pci_bus = pci_bus;
sprintf(name, "%04x:%02x:%02x",
pci_domain_nr(pci_bus),
((u16)pcibus_info->pbi_buscommon.bs_persist_busnum),
device + 1);
sn_generate_path(pci_bus, slot->physical_path);
slot->hotplug_slot = bss_hotplug_slot;
list_add(&slot->hp_list, &sn_hp_list);
return 0;
}
static struct hotplug_slot *sn_hp_destroy(void)
{
struct slot *slot;
struct pci_slot *pci_slot;
struct hotplug_slot *bss_hotplug_slot = NULL;
list_for_each_entry(slot, &sn_hp_list, hp_list) {
bss_hotplug_slot = slot->hotplug_slot;
pci_slot = bss_hotplug_slot->pci_slot;
list_del(&((struct slot *)bss_hotplug_slot->private)->
hp_list);
sysfs_remove_file(&pci_slot->kobj,
&sn_slot_path_attr.attr);
break;
}
return bss_hotplug_slot;
}
static void sn_bus_free_data(struct pci_dev *dev)
{
struct pci_bus *subordinate_bus;
struct pci_dev *child;
/* Recursively clean up sn_irq_info structs */
if (dev->subordinate) {
subordinate_bus = dev->subordinate;
list_for_each_entry(child, &subordinate_bus->devices, bus_list)
sn_bus_free_data(child);
}
/*
* Some drivers may use dma accesses during the
* driver remove function. We release the sysdata
* areas after the driver remove functions have
* been called.
*/
sn_bus_store_sysdata(dev);
sn_pci_unfixup_slot(dev);
}
static int sn_slot_enable(struct hotplug_slot *bss_hotplug_slot,
int device_num, char **ssdt)
{
struct slot *slot = bss_hotplug_slot->private;
struct pcibus_info *pcibus_info;
struct pcibr_slot_enable_resp resp;
int rc;
pcibus_info = SN_PCIBUS_BUSSOFT_INFO(slot->pci_bus);
/*
* Power-on and initialize the slot in the SN
* PCI infrastructure.
*/
rc = sal_pcibr_slot_enable(pcibus_info, device_num, &resp, ssdt);
if (rc == PCI_SLOT_ALREADY_UP) {
dev_dbg(&slot->pci_bus->self->dev, "is already active\n");
return 1; /* return 1 to user */
}
if (rc == PCI_L1_ERR) {
dev_dbg(&slot->pci_bus->self->dev, "L1 failure %d with message: %s",
resp.resp_sub_errno, resp.resp_l1_msg);
return -EPERM;
}
if (rc) {
dev_dbg(&slot->pci_bus->self->dev, "insert failed with error %d sub-error %d\n",
rc, resp.resp_sub_errno);
return -EIO;
}
pcibus_info = SN_PCIBUS_BUSSOFT_INFO(slot->pci_bus);
pcibus_info->pbi_enabled_devices |= (1 << device_num);
return 0;
}
static int sn_slot_disable(struct hotplug_slot *bss_hotplug_slot,
int device_num, int action)
{
struct slot *slot = bss_hotplug_slot->private;
struct pcibus_info *pcibus_info;
struct pcibr_slot_disable_resp resp;
int rc;
pcibus_info = SN_PCIBUS_BUSSOFT_INFO(slot->pci_bus);
rc = sal_pcibr_slot_disable(pcibus_info, device_num, action, &resp);
if ((action == PCI_REQ_SLOT_ELIGIBLE) &&
(rc == PCI_SLOT_ALREADY_DOWN)) {
dev_dbg(&slot->pci_bus->self->dev, "Slot %s already inactive\n", slot->physical_path);
return 1; /* return 1 to user */
}
if ((action == PCI_REQ_SLOT_ELIGIBLE) && (rc == PCI_EMPTY_33MHZ)) {
dev_dbg(&slot->pci_bus->self->dev, "Cannot remove last 33MHz card\n");
return -EPERM;
}
if ((action == PCI_REQ_SLOT_ELIGIBLE) && (rc == PCI_L1_ERR)) {
dev_dbg(&slot->pci_bus->self->dev, "L1 failure %d with message \n%s\n",
resp.resp_sub_errno, resp.resp_l1_msg);
return -EPERM;
}
if ((action == PCI_REQ_SLOT_ELIGIBLE) && rc) {
dev_dbg(&slot->pci_bus->self->dev, "remove failed with error %d sub-error %d\n",
rc, resp.resp_sub_errno);
return -EIO;
}
if ((action == PCI_REQ_SLOT_ELIGIBLE) && !rc)
return 0;
if ((action == PCI_REQ_SLOT_DISABLE) && !rc) {
pcibus_info = SN_PCIBUS_BUSSOFT_INFO(slot->pci_bus);
pcibus_info->pbi_enabled_devices &= ~(1 << device_num);
dev_dbg(&slot->pci_bus->self->dev, "remove successful\n");
return 0;
}
if ((action == PCI_REQ_SLOT_DISABLE) && rc) {
dev_dbg(&slot->pci_bus->self->dev, "remove failed rc = %d\n", rc);
}
return rc;
}
/*
* Power up and configure the slot via a SAL call to PROM.
* Scan slot (and any children), do any platform specific fixup,
* and find device driver.
*/
static int enable_slot(struct hotplug_slot *bss_hotplug_slot)
{
struct slot *slot = bss_hotplug_slot->private;
struct pci_bus *new_bus = NULL;
struct pci_dev *dev;
int num_funcs;
int new_ppb = 0;
int rc;
char *ssdt = NULL;
void pcibios_fixup_device_resources(struct pci_dev *);
/* Serialize the Linux PCI infrastructure */
mutex_lock(&sn_hotplug_mutex);
/*
* Power-on and initialize the slot in the SN
* PCI infrastructure. Also, retrieve the ACPI SSDT
* table for the slot (if ACPI capable PROM).
*/
rc = sn_slot_enable(bss_hotplug_slot, slot->device_num, &ssdt);
if (rc) {
mutex_unlock(&sn_hotplug_mutex);
return rc;
}
if (ssdt)
ssdt = __va(ssdt);
/* Add the new SSDT for the slot to the ACPI namespace */
if (SN_ACPI_BASE_SUPPORT() && ssdt) {
acpi_status ret;
ret = acpi_load_table((struct acpi_table_header *)ssdt);
if (ACPI_FAILURE(ret)) {
printk(KERN_ERR "%s: acpi_load_table failed (0x%x)\n",
__func__, ret);
/* try to continue on */
}
}
num_funcs = pci_scan_slot(slot->pci_bus,
PCI_DEVFN(slot->device_num + 1, 0));
if (!num_funcs) {
dev_dbg(&slot->pci_bus->self->dev, "no device in slot\n");
mutex_unlock(&sn_hotplug_mutex);
return -ENODEV;
}
/*
* Map SN resources for all functions on the card
* to the Linux PCI interface and tell the drivers
* about them.
*/
list_for_each_entry(dev, &slot->pci_bus->devices, bus_list) {
if (PCI_SLOT(dev->devfn) != slot->device_num + 1)
continue;
/* Need to do slot fixup on PPB before fixup of children
* (PPB's pcidev_info needs to be in pcidev_info list
* before child's SN_PCIDEV_INFO() call to setup
* pdi_host_pcidev_info).
*/
pcibios_fixup_device_resources(dev);
if (SN_ACPI_BASE_SUPPORT())
sn_acpi_slot_fixup(dev);
else
sn_io_slot_fixup(dev);
if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
pci_hp_add_bridge(dev);
if (dev->subordinate) {
new_bus = dev->subordinate;
new_ppb = 1;
}
}
}
/*
* Add the slot's devices to the ACPI infrastructure */
if (SN_ACPI_BASE_SUPPORT() && ssdt) {
unsigned long long adr;
struct acpi_device *pdevice;
acpi_handle phandle;
acpi_handle chandle = NULL;
acpi_handle rethandle;
acpi_status ret;
phandle = acpi_device_handle(PCI_CONTROLLER(slot->pci_bus)->companion);
if (acpi_bus_get_device(phandle, &pdevice)) {
dev_dbg(&slot->pci_bus->self->dev, "no parent device, assuming NULL\n");
pdevice = NULL;
}
acpi_scan_lock_acquire();
/*
* Walk the rootbus node's immediate children looking for
* the slot's device node(s). There can be more than
* one for multifunction devices.
*/
for (;;) {
rethandle = NULL;
ret = acpi_get_next_object(ACPI_TYPE_DEVICE,
phandle, chandle,
&rethandle);
if (ret == AE_NOT_FOUND || rethandle == NULL)
break;
chandle = rethandle;
ret = acpi_evaluate_integer(chandle, METHOD_NAME__ADR,
NULL, &adr);
if (ACPI_SUCCESS(ret) &&
(adr>>16) == (slot->device_num + 1)) {
ret = acpi_bus_scan(chandle);
if (ACPI_FAILURE(ret)) {
printk(KERN_ERR "%s: acpi_bus_scan failed (0x%x) for slot %d func %d\n",
__func__, ret, (int)(adr>>16),
(int)(adr&0xffff));
/* try to continue on */
}
}
}
acpi_scan_lock_release();
}
pci_lock_rescan_remove();
/* Call the driver for the new device */
pci_bus_add_devices(slot->pci_bus);
/* Call the drivers for the new devices subordinate to PPB */
if (new_ppb)
pci_bus_add_devices(new_bus);
pci_unlock_rescan_remove();
mutex_unlock(&sn_hotplug_mutex);
if (rc == 0)
dev_dbg(&slot->pci_bus->self->dev, "insert operation successful\n");
else
dev_dbg(&slot->pci_bus->self->dev, "insert operation failed rc = %d\n", rc);
return rc;
}
static int disable_slot(struct hotplug_slot *bss_hotplug_slot)
{
struct slot *slot = bss_hotplug_slot->private;
struct pci_dev *dev, *temp;
int rc;
acpi_handle ssdt_hdl = NULL;
/* Acquire update access to the bus */
mutex_lock(&sn_hotplug_mutex);
/* is it okay to bring this slot down? */
rc = sn_slot_disable(bss_hotplug_slot, slot->device_num,
PCI_REQ_SLOT_ELIGIBLE);
if (rc)
goto leaving;
/* free the ACPI resources for the slot */
if (SN_ACPI_BASE_SUPPORT() &&
PCI_CONTROLLER(slot->pci_bus)->companion) {
unsigned long long adr;
struct acpi_device *device;
acpi_handle phandle;
acpi_handle chandle = NULL;
acpi_handle rethandle;
acpi_status ret;
/* Get the rootbus node pointer */
phandle = acpi_device_handle(PCI_CONTROLLER(slot->pci_bus)->companion);
acpi_scan_lock_acquire();
/*
* Walk the rootbus node's immediate children looking for
* the slot's device node(s). There can be more than
* one for multifunction devices.
*/
for (;;) {
rethandle = NULL;
ret = acpi_get_next_object(ACPI_TYPE_DEVICE,
phandle, chandle,
&rethandle);
if (ret == AE_NOT_FOUND || rethandle == NULL)
break;
chandle = rethandle;
ret = acpi_evaluate_integer(chandle,
METHOD_NAME__ADR,
NULL, &adr);
if (ACPI_SUCCESS(ret) &&
(adr>>16) == (slot->device_num + 1)) {
/* retain the owner id */
ssdt_hdl = chandle;
ret = acpi_bus_get_device(chandle,
&device);
if (ACPI_SUCCESS(ret))
acpi_bus_trim(device);
}
}
acpi_scan_lock_release();
}
pci_lock_rescan_remove();
/* Free the SN resources assigned to the Linux device.*/
list_for_each_entry_safe(dev, temp, &slot->pci_bus->devices, bus_list) {
if (PCI_SLOT(dev->devfn) != slot->device_num + 1)
continue;
pci_dev_get(dev);
sn_bus_free_data(dev);
pci_stop_and_remove_bus_device(dev);
pci_dev_put(dev);
}
pci_unlock_rescan_remove();
/* Remove the SSDT for the slot from the ACPI namespace */
if (SN_ACPI_BASE_SUPPORT() && ssdt_hdl) {
acpi_status ret;
ret = acpi_unload_parent_table(ssdt_hdl);
if (ACPI_FAILURE(ret)) {
acpi_handle_err(ssdt_hdl,
"%s: acpi_unload_parent_table failed (0x%x)\n",
__func__, ret);
/* try to continue on */
}
}
/* free the collected sysdata pointers */
sn_bus_free_sysdata();
/* Deactivate slot */
rc = sn_slot_disable(bss_hotplug_slot, slot->device_num,
PCI_REQ_SLOT_DISABLE);
leaving:
/* Release the bus lock */
mutex_unlock(&sn_hotplug_mutex);
return rc;
}
static inline int get_power_status(struct hotplug_slot *bss_hotplug_slot,
u8 *value)
{
struct slot *slot = bss_hotplug_slot->private;
struct pcibus_info *pcibus_info;
u32 power;
pcibus_info = SN_PCIBUS_BUSSOFT_INFO(slot->pci_bus);
mutex_lock(&sn_hotplug_mutex);
power = pcibus_info->pbi_enabled_devices & (1 << slot->device_num);
*value = power ? 1 : 0;
mutex_unlock(&sn_hotplug_mutex);
return 0;
}
static void sn_release_slot(struct hotplug_slot *bss_hotplug_slot)
{
kfree(bss_hotplug_slot->info);
kfree(bss_hotplug_slot->private);
kfree(bss_hotplug_slot);
}
static int sn_hotplug_slot_register(struct pci_bus *pci_bus)
{
int device;
struct pci_slot *pci_slot;
struct hotplug_slot *bss_hotplug_slot;
char name[SN_SLOT_NAME_SIZE];
int rc = 0;
/*
* Currently only four devices are supported,
* in the future there maybe more -- up to 32.
*/
for (device = 0; device < SN_MAX_HP_SLOTS ; device++) {
if (sn_pci_slot_valid(pci_bus, device) != 1)
continue;
bss_hotplug_slot = kzalloc(sizeof(*bss_hotplug_slot),
GFP_KERNEL);
if (!bss_hotplug_slot) {
rc = -ENOMEM;
goto alloc_err;
}
bss_hotplug_slot->info =
kzalloc(sizeof(struct hotplug_slot_info),
GFP_KERNEL);
if (!bss_hotplug_slot->info) {
rc = -ENOMEM;
goto alloc_err;
}
if (sn_hp_slot_private_alloc(bss_hotplug_slot,
pci_bus, device, name)) {
rc = -ENOMEM;
goto alloc_err;
}
bss_hotplug_slot->ops = &sn_hotplug_slot_ops;
bss_hotplug_slot->release = &sn_release_slot;
rc = pci_hp_register(bss_hotplug_slot, pci_bus, device, name);
if (rc)
goto register_err;
pci_slot = bss_hotplug_slot->pci_slot;
rc = sysfs_create_file(&pci_slot->kobj,
&sn_slot_path_attr.attr);
if (rc)
goto register_err;
}
dev_dbg(&pci_bus->self->dev, "Registered bus with hotplug\n");
return rc;
register_err:
dev_dbg(&pci_bus->self->dev, "bus failed to register with err = %d\n",
rc);
alloc_err:
if (rc == -ENOMEM)
dev_dbg(&pci_bus->self->dev, "Memory allocation error\n");
/* destroy THIS element */
if (bss_hotplug_slot)
sn_release_slot(bss_hotplug_slot);
/* destroy anything else on the list */
while ((bss_hotplug_slot = sn_hp_destroy()))
pci_hp_deregister(bss_hotplug_slot);
return rc;
}
static int __init sn_pci_hotplug_init(void)
{
struct pci_bus *pci_bus = NULL;
int rc;
int registered = 0;
if (!sn_prom_feature_available(PRF_HOTPLUG_SUPPORT)) {
printk(KERN_ERR "%s: PROM version does not support hotplug.\n",
__func__);
return -EPERM;
}
INIT_LIST_HEAD(&sn_hp_list);
while ((pci_bus = pci_find_next_bus(pci_bus))) {
if (!pci_bus->sysdata)
continue;
rc = sn_pci_bus_valid(pci_bus);
if (rc != 1) {
dev_dbg(&pci_bus->self->dev, "not a valid hotplug bus\n");
continue;
}
dev_dbg(&pci_bus->self->dev, "valid hotplug bus\n");
rc = sn_hotplug_slot_register(pci_bus);
if (!rc) {
registered = 1;
} else {
registered = 0;
break;
}
}
return registered == 1 ? 0 : -ENODEV;
}
static void __exit sn_pci_hotplug_exit(void)
{
struct hotplug_slot *bss_hotplug_slot;
while ((bss_hotplug_slot = sn_hp_destroy()))
pci_hp_deregister(bss_hotplug_slot);
if (!list_empty(&sn_hp_list))
printk(KERN_ERR "%s: internal list is not empty\n", __FILE__);
}
module_init(sn_pci_hotplug_init);
module_exit(sn_pci_hotplug_exit);
| gpl-2.0 |
Abhishek-karmakar/caf-kernel-msm8916 | drivers/input/misc/dm355evm_keys.c | 2475 | 8109 | /*
* dm355evm_keys.c - support buttons and IR remote on DM355 EVM board
*
* Copyright (c) 2008 by David Brownell
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/input.h>
#include <linux/input/sparse-keymap.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/i2c/dm355evm_msp.h>
#include <linux/module.h>
/*
* The MSP430 firmware on the DM355 EVM monitors on-board pushbuttons
* and an IR receptor used for the remote control. When any key is
* pressed, or its autorepeat kicks in, an event is sent. This driver
* read those events from the small (32 event) queue and reports them.
*
* Note that physically there can only be one of these devices.
*
* This driver was tested with firmware revision A4.
*/
struct dm355evm_keys {
struct input_dev *input;
struct device *dev;
int irq;
};
/* These initial keycodes can be remapped */
static const struct key_entry dm355evm_keys[] = {
/*
* Pushbuttons on the EVM board ... note that the labels for these
* are SW10/SW11/etc on the PC board. The left/right orientation
* comes only from the firmware's documentation, and presumes the
* power connector is immediately in front of you and the IR sensor
* is to the right. (That is, rotate the board counter-clockwise
* by 90 degrees from the SW10/etc and "DM355 EVM" labels.)
*/
{ KE_KEY, 0x00d8, { KEY_OK } }, /* SW12 */
{ KE_KEY, 0x00b8, { KEY_UP } }, /* SW13 */
{ KE_KEY, 0x00e8, { KEY_DOWN } }, /* SW11 */
{ KE_KEY, 0x0078, { KEY_LEFT } }, /* SW14 */
{ KE_KEY, 0x00f0, { KEY_RIGHT } }, /* SW10 */
/*
* IR buttons ... codes assigned to match the universal remote
* provided with the EVM (Philips PM4S) using DVD code 0020.
*
* These event codes match firmware documentation, but other
* remote controls could easily send more RC5-encoded events.
* The PM4S manual was used in several cases to help select
* a keycode reflecting the intended usage.
*
* RC5 codes are 14 bits, with two start bits (0x3 prefix)
* and a toggle bit (masked out below).
*/
{ KE_KEY, 0x300c, { KEY_POWER } }, /* NOTE: docs omit this */
{ KE_KEY, 0x3000, { KEY_NUMERIC_0 } },
{ KE_KEY, 0x3001, { KEY_NUMERIC_1 } },
{ KE_KEY, 0x3002, { KEY_NUMERIC_2 } },
{ KE_KEY, 0x3003, { KEY_NUMERIC_3 } },
{ KE_KEY, 0x3004, { KEY_NUMERIC_4 } },
{ KE_KEY, 0x3005, { KEY_NUMERIC_5 } },
{ KE_KEY, 0x3006, { KEY_NUMERIC_6 } },
{ KE_KEY, 0x3007, { KEY_NUMERIC_7 } },
{ KE_KEY, 0x3008, { KEY_NUMERIC_8 } },
{ KE_KEY, 0x3009, { KEY_NUMERIC_9 } },
{ KE_KEY, 0x3022, { KEY_ENTER } },
{ KE_KEY, 0x30ec, { KEY_MODE } }, /* "tv/vcr/..." */
{ KE_KEY, 0x300f, { KEY_SELECT } }, /* "info" */
{ KE_KEY, 0x3020, { KEY_CHANNELUP } }, /* "up" */
{ KE_KEY, 0x302e, { KEY_MENU } }, /* "in/out" */
{ KE_KEY, 0x3011, { KEY_VOLUMEDOWN } }, /* "left" */
{ KE_KEY, 0x300d, { KEY_MUTE } }, /* "ok" */
{ KE_KEY, 0x3010, { KEY_VOLUMEUP } }, /* "right" */
{ KE_KEY, 0x301e, { KEY_SUBTITLE } }, /* "cc" */
{ KE_KEY, 0x3021, { KEY_CHANNELDOWN } },/* "down" */
{ KE_KEY, 0x3022, { KEY_PREVIOUS } },
{ KE_KEY, 0x3026, { KEY_SLEEP } },
{ KE_KEY, 0x3172, { KEY_REWIND } }, /* NOTE: docs wrongly say 0x30ca */
{ KE_KEY, 0x3175, { KEY_PLAY } },
{ KE_KEY, 0x3174, { KEY_FASTFORWARD } },
{ KE_KEY, 0x3177, { KEY_RECORD } },
{ KE_KEY, 0x3176, { KEY_STOP } },
{ KE_KEY, 0x3169, { KEY_PAUSE } },
};
/*
* Because we communicate with the MSP430 using I2C, and all I2C calls
* in Linux sleep, we use a threaded IRQ handler. The IRQ itself is
* active low, but we go through the GPIO controller so we can trigger
* on falling edges and not worry about enabling/disabling the IRQ in
* the keypress handling path.
*/
static irqreturn_t dm355evm_keys_irq(int irq, void *_keys)
{
static u16 last_event;
struct dm355evm_keys *keys = _keys;
const struct key_entry *ke;
unsigned int keycode;
int status;
u16 event;
/* For simplicity we ignore INPUT_COUNT and just read
* events until we get the "queue empty" indicator.
* Reading INPUT_LOW decrements the count.
*/
for (;;) {
status = dm355evm_msp_read(DM355EVM_MSP_INPUT_HIGH);
if (status < 0) {
dev_dbg(keys->dev, "input high err %d\n",
status);
break;
}
event = status << 8;
status = dm355evm_msp_read(DM355EVM_MSP_INPUT_LOW);
if (status < 0) {
dev_dbg(keys->dev, "input low err %d\n",
status);
break;
}
event |= status;
if (event == 0xdead)
break;
/* Press and release a button: two events, same code.
* Press and hold (autorepeat), then release: N events
* (N > 2), same code. For RC5 buttons the toggle bits
* distinguish (for example) "1-autorepeat" from "1 1";
* but PCB buttons don't support that bit.
*
* So we must synthesize release events. We do that by
* mapping events to a press/release event pair; then
* to avoid adding extra events, skip the second event
* of each pair.
*/
if (event == last_event) {
last_event = 0;
continue;
}
last_event = event;
/* ignore the RC5 toggle bit */
event &= ~0x0800;
/* find the key, or report it as unknown */
ke = sparse_keymap_entry_from_scancode(keys->input, event);
keycode = ke ? ke->keycode : KEY_UNKNOWN;
dev_dbg(keys->dev,
"input event 0x%04x--> keycode %d\n",
event, keycode);
/* report press + release */
input_report_key(keys->input, keycode, 1);
input_sync(keys->input);
input_report_key(keys->input, keycode, 0);
input_sync(keys->input);
}
return IRQ_HANDLED;
}
/*----------------------------------------------------------------------*/
static int dm355evm_keys_probe(struct platform_device *pdev)
{
struct dm355evm_keys *keys;
struct input_dev *input;
int status;
/* allocate instance struct and input dev */
keys = kzalloc(sizeof *keys, GFP_KERNEL);
input = input_allocate_device();
if (!keys || !input) {
status = -ENOMEM;
goto fail1;
}
keys->dev = &pdev->dev;
keys->input = input;
/* set up "threaded IRQ handler" */
status = platform_get_irq(pdev, 0);
if (status < 0)
goto fail1;
keys->irq = status;
input_set_drvdata(input, keys);
input->name = "DM355 EVM Controls";
input->phys = "dm355evm/input0";
input->dev.parent = &pdev->dev;
input->id.bustype = BUS_I2C;
input->id.product = 0x0355;
input->id.version = dm355evm_msp_read(DM355EVM_MSP_FIRMREV);
status = sparse_keymap_setup(input, dm355evm_keys, NULL);
if (status)
goto fail1;
/* REVISIT: flush the event queue? */
status = request_threaded_irq(keys->irq, NULL, dm355evm_keys_irq,
IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
dev_name(&pdev->dev), keys);
if (status < 0)
goto fail2;
/* register */
status = input_register_device(input);
if (status < 0)
goto fail3;
platform_set_drvdata(pdev, keys);
return 0;
fail3:
free_irq(keys->irq, keys);
fail2:
sparse_keymap_free(input);
fail1:
input_free_device(input);
kfree(keys);
dev_err(&pdev->dev, "can't register, err %d\n", status);
return status;
}
static int dm355evm_keys_remove(struct platform_device *pdev)
{
struct dm355evm_keys *keys = platform_get_drvdata(pdev);
free_irq(keys->irq, keys);
sparse_keymap_free(keys->input);
input_unregister_device(keys->input);
kfree(keys);
return 0;
}
/* REVISIT: add suspend/resume when DaVinci supports it. The IRQ should
* be able to wake up the system. When device_may_wakeup(&pdev->dev), call
* enable_irq_wake() on suspend, and disable_irq_wake() on resume.
*/
/*
* I2C is used to talk to the MSP430, but this platform device is
* exposed by an MFD driver that manages I2C communications.
*/
static struct platform_driver dm355evm_keys_driver = {
.probe = dm355evm_keys_probe,
.remove = dm355evm_keys_remove,
.driver = {
.owner = THIS_MODULE,
.name = "dm355evm_keys",
},
};
module_platform_driver(dm355evm_keys_driver);
MODULE_LICENSE("GPL");
| gpl-2.0 |
sandymanu/sandy_lettuce_8916 | drivers/block/ps3disk.c | 2475 | 14279 | /*
* PS3 Disk Storage Driver
*
* Copyright (C) 2007 Sony Computer Entertainment Inc.
* Copyright 2007 Sony Corp.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published
* by the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include <linux/ata.h>
#include <linux/blkdev.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <asm/lv1call.h>
#include <asm/ps3stor.h>
#include <asm/firmware.h>
#define DEVICE_NAME "ps3disk"
#define BOUNCE_SIZE (64*1024)
#define PS3DISK_MAX_DISKS 16
#define PS3DISK_MINORS 16
#define PS3DISK_NAME "ps3d%c"
struct ps3disk_private {
spinlock_t lock; /* Request queue spinlock */
struct request_queue *queue;
struct gendisk *gendisk;
unsigned int blocking_factor;
struct request *req;
u64 raw_capacity;
unsigned char model[ATA_ID_PROD_LEN+1];
};
#define LV1_STORAGE_SEND_ATA_COMMAND (2)
#define LV1_STORAGE_ATA_HDDOUT (0x23)
struct lv1_ata_cmnd_block {
u16 features;
u16 sector_count;
u16 LBA_low;
u16 LBA_mid;
u16 LBA_high;
u8 device;
u8 command;
u32 is_ext;
u32 proto;
u32 in_out;
u32 size;
u64 buffer;
u32 arglen;
};
enum lv1_ata_proto {
NON_DATA_PROTO = 0,
PIO_DATA_IN_PROTO = 1,
PIO_DATA_OUT_PROTO = 2,
DMA_PROTO = 3
};
enum lv1_ata_in_out {
DIR_WRITE = 0, /* memory -> device */
DIR_READ = 1 /* device -> memory */
};
static int ps3disk_major;
static const struct block_device_operations ps3disk_fops = {
.owner = THIS_MODULE,
};
static void ps3disk_scatter_gather(struct ps3_storage_device *dev,
struct request *req, int gather)
{
unsigned int offset = 0;
struct req_iterator iter;
struct bio_vec *bvec;
unsigned int i = 0;
size_t size;
void *buf;
rq_for_each_segment(bvec, req, iter) {
unsigned long flags;
dev_dbg(&dev->sbd.core,
"%s:%u: bio %u: %u segs %u sectors from %lu\n",
__func__, __LINE__, i, bio_segments(iter.bio),
bio_sectors(iter.bio), iter.bio->bi_sector);
size = bvec->bv_len;
buf = bvec_kmap_irq(bvec, &flags);
if (gather)
memcpy(dev->bounce_buf+offset, buf, size);
else
memcpy(buf, dev->bounce_buf+offset, size);
offset += size;
flush_kernel_dcache_page(bvec->bv_page);
bvec_kunmap_irq(buf, &flags);
i++;
}
}
static int ps3disk_submit_request_sg(struct ps3_storage_device *dev,
struct request *req)
{
struct ps3disk_private *priv = ps3_system_bus_get_drvdata(&dev->sbd);
int write = rq_data_dir(req), res;
const char *op = write ? "write" : "read";
u64 start_sector, sectors;
unsigned int region_id = dev->regions[dev->region_idx].id;
#ifdef DEBUG
unsigned int n = 0;
struct bio_vec *bv;
struct req_iterator iter;
rq_for_each_segment(bv, req, iter)
n++;
dev_dbg(&dev->sbd.core,
"%s:%u: %s req has %u bvecs for %u sectors\n",
__func__, __LINE__, op, n, blk_rq_sectors(req));
#endif
start_sector = blk_rq_pos(req) * priv->blocking_factor;
sectors = blk_rq_sectors(req) * priv->blocking_factor;
dev_dbg(&dev->sbd.core, "%s:%u: %s %llu sectors starting at %llu\n",
__func__, __LINE__, op, sectors, start_sector);
if (write) {
ps3disk_scatter_gather(dev, req, 1);
res = lv1_storage_write(dev->sbd.dev_id, region_id,
start_sector, sectors, 0,
dev->bounce_lpar, &dev->tag);
} else {
res = lv1_storage_read(dev->sbd.dev_id, region_id,
start_sector, sectors, 0,
dev->bounce_lpar, &dev->tag);
}
if (res) {
dev_err(&dev->sbd.core, "%s:%u: %s failed %d\n", __func__,
__LINE__, op, res);
__blk_end_request_all(req, -EIO);
return 0;
}
priv->req = req;
return 1;
}
static int ps3disk_submit_flush_request(struct ps3_storage_device *dev,
struct request *req)
{
struct ps3disk_private *priv = ps3_system_bus_get_drvdata(&dev->sbd);
u64 res;
dev_dbg(&dev->sbd.core, "%s:%u: flush request\n", __func__, __LINE__);
res = lv1_storage_send_device_command(dev->sbd.dev_id,
LV1_STORAGE_ATA_HDDOUT, 0, 0, 0,
0, &dev->tag);
if (res) {
dev_err(&dev->sbd.core, "%s:%u: sync cache failed 0x%llx\n",
__func__, __LINE__, res);
__blk_end_request_all(req, -EIO);
return 0;
}
priv->req = req;
return 1;
}
static void ps3disk_do_request(struct ps3_storage_device *dev,
struct request_queue *q)
{
struct request *req;
dev_dbg(&dev->sbd.core, "%s:%u\n", __func__, __LINE__);
while ((req = blk_fetch_request(q))) {
if (req->cmd_flags & REQ_FLUSH) {
if (ps3disk_submit_flush_request(dev, req))
break;
} else if (req->cmd_type == REQ_TYPE_FS) {
if (ps3disk_submit_request_sg(dev, req))
break;
} else {
blk_dump_rq_flags(req, DEVICE_NAME " bad request");
__blk_end_request_all(req, -EIO);
continue;
}
}
}
static void ps3disk_request(struct request_queue *q)
{
struct ps3_storage_device *dev = q->queuedata;
struct ps3disk_private *priv = ps3_system_bus_get_drvdata(&dev->sbd);
if (priv->req) {
dev_dbg(&dev->sbd.core, "%s:%u busy\n", __func__, __LINE__);
return;
}
ps3disk_do_request(dev, q);
}
static irqreturn_t ps3disk_interrupt(int irq, void *data)
{
struct ps3_storage_device *dev = data;
struct ps3disk_private *priv;
struct request *req;
int res, read, error;
u64 tag, status;
const char *op;
res = lv1_storage_get_async_status(dev->sbd.dev_id, &tag, &status);
if (tag != dev->tag)
dev_err(&dev->sbd.core,
"%s:%u: tag mismatch, got %llx, expected %llx\n",
__func__, __LINE__, tag, dev->tag);
if (res) {
dev_err(&dev->sbd.core, "%s:%u: res=%d status=0x%llx\n",
__func__, __LINE__, res, status);
return IRQ_HANDLED;
}
priv = ps3_system_bus_get_drvdata(&dev->sbd);
req = priv->req;
if (!req) {
dev_dbg(&dev->sbd.core,
"%s:%u non-block layer request completed\n", __func__,
__LINE__);
dev->lv1_status = status;
complete(&dev->done);
return IRQ_HANDLED;
}
if (req->cmd_flags & REQ_FLUSH) {
read = 0;
op = "flush";
} else {
read = !rq_data_dir(req);
op = read ? "read" : "write";
}
if (status) {
dev_dbg(&dev->sbd.core, "%s:%u: %s failed 0x%llx\n", __func__,
__LINE__, op, status);
error = -EIO;
} else {
dev_dbg(&dev->sbd.core, "%s:%u: %s completed\n", __func__,
__LINE__, op);
error = 0;
if (read)
ps3disk_scatter_gather(dev, req, 0);
}
spin_lock(&priv->lock);
__blk_end_request_all(req, error);
priv->req = NULL;
ps3disk_do_request(dev, priv->queue);
spin_unlock(&priv->lock);
return IRQ_HANDLED;
}
static int ps3disk_sync_cache(struct ps3_storage_device *dev)
{
u64 res;
dev_dbg(&dev->sbd.core, "%s:%u: sync cache\n", __func__, __LINE__);
res = ps3stor_send_command(dev, LV1_STORAGE_ATA_HDDOUT, 0, 0, 0, 0);
if (res) {
dev_err(&dev->sbd.core, "%s:%u: sync cache failed 0x%llx\n",
__func__, __LINE__, res);
return -EIO;
}
return 0;
}
/* ATA helpers copied from drivers/ata/libata-core.c */
static void swap_buf_le16(u16 *buf, unsigned int buf_words)
{
#ifdef __BIG_ENDIAN
unsigned int i;
for (i = 0; i < buf_words; i++)
buf[i] = le16_to_cpu(buf[i]);
#endif /* __BIG_ENDIAN */
}
static u64 ata_id_n_sectors(const u16 *id)
{
if (ata_id_has_lba(id)) {
if (ata_id_has_lba48(id))
return ata_id_u64(id, 100);
else
return ata_id_u32(id, 60);
} else {
if (ata_id_current_chs_valid(id))
return ata_id_u32(id, 57);
else
return id[1] * id[3] * id[6];
}
}
static void ata_id_string(const u16 *id, unsigned char *s, unsigned int ofs,
unsigned int len)
{
unsigned int c;
while (len > 0) {
c = id[ofs] >> 8;
*s = c;
s++;
c = id[ofs] & 0xff;
*s = c;
s++;
ofs++;
len -= 2;
}
}
static void ata_id_c_string(const u16 *id, unsigned char *s, unsigned int ofs,
unsigned int len)
{
unsigned char *p;
WARN_ON(!(len & 1));
ata_id_string(id, s, ofs, len - 1);
p = s + strnlen(s, len - 1);
while (p > s && p[-1] == ' ')
p--;
*p = '\0';
}
static int ps3disk_identify(struct ps3_storage_device *dev)
{
struct ps3disk_private *priv = ps3_system_bus_get_drvdata(&dev->sbd);
struct lv1_ata_cmnd_block ata_cmnd;
u16 *id = dev->bounce_buf;
u64 res;
dev_dbg(&dev->sbd.core, "%s:%u: identify disk\n", __func__, __LINE__);
memset(&ata_cmnd, 0, sizeof(struct lv1_ata_cmnd_block));
ata_cmnd.command = ATA_CMD_ID_ATA;
ata_cmnd.sector_count = 1;
ata_cmnd.size = ata_cmnd.arglen = ATA_ID_WORDS * 2;
ata_cmnd.buffer = dev->bounce_lpar;
ata_cmnd.proto = PIO_DATA_IN_PROTO;
ata_cmnd.in_out = DIR_READ;
res = ps3stor_send_command(dev, LV1_STORAGE_SEND_ATA_COMMAND,
ps3_mm_phys_to_lpar(__pa(&ata_cmnd)),
sizeof(ata_cmnd), ata_cmnd.buffer,
ata_cmnd.arglen);
if (res) {
dev_err(&dev->sbd.core, "%s:%u: identify disk failed 0x%llx\n",
__func__, __LINE__, res);
return -EIO;
}
swap_buf_le16(id, ATA_ID_WORDS);
/* All we're interested in are raw capacity and model name */
priv->raw_capacity = ata_id_n_sectors(id);
ata_id_c_string(id, priv->model, ATA_ID_PROD, sizeof(priv->model));
return 0;
}
static unsigned long ps3disk_mask;
static DEFINE_MUTEX(ps3disk_mask_mutex);
static int ps3disk_probe(struct ps3_system_bus_device *_dev)
{
struct ps3_storage_device *dev = to_ps3_storage_device(&_dev->core);
struct ps3disk_private *priv;
int error;
unsigned int devidx;
struct request_queue *queue;
struct gendisk *gendisk;
if (dev->blk_size < 512) {
dev_err(&dev->sbd.core,
"%s:%u: cannot handle block size %llu\n", __func__,
__LINE__, dev->blk_size);
return -EINVAL;
}
BUILD_BUG_ON(PS3DISK_MAX_DISKS > BITS_PER_LONG);
mutex_lock(&ps3disk_mask_mutex);
devidx = find_first_zero_bit(&ps3disk_mask, PS3DISK_MAX_DISKS);
if (devidx >= PS3DISK_MAX_DISKS) {
dev_err(&dev->sbd.core, "%s:%u: Too many disks\n", __func__,
__LINE__);
mutex_unlock(&ps3disk_mask_mutex);
return -ENOSPC;
}
__set_bit(devidx, &ps3disk_mask);
mutex_unlock(&ps3disk_mask_mutex);
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv) {
error = -ENOMEM;
goto fail;
}
ps3_system_bus_set_drvdata(_dev, priv);
spin_lock_init(&priv->lock);
dev->bounce_size = BOUNCE_SIZE;
dev->bounce_buf = kmalloc(BOUNCE_SIZE, GFP_DMA);
if (!dev->bounce_buf) {
error = -ENOMEM;
goto fail_free_priv;
}
error = ps3stor_setup(dev, ps3disk_interrupt);
if (error)
goto fail_free_bounce;
ps3disk_identify(dev);
queue = blk_init_queue(ps3disk_request, &priv->lock);
if (!queue) {
dev_err(&dev->sbd.core, "%s:%u: blk_init_queue failed\n",
__func__, __LINE__);
error = -ENOMEM;
goto fail_teardown;
}
priv->queue = queue;
queue->queuedata = dev;
blk_queue_bounce_limit(queue, BLK_BOUNCE_HIGH);
blk_queue_max_hw_sectors(queue, dev->bounce_size >> 9);
blk_queue_segment_boundary(queue, -1UL);
blk_queue_dma_alignment(queue, dev->blk_size-1);
blk_queue_logical_block_size(queue, dev->blk_size);
blk_queue_flush(queue, REQ_FLUSH);
blk_queue_max_segments(queue, -1);
blk_queue_max_segment_size(queue, dev->bounce_size);
gendisk = alloc_disk(PS3DISK_MINORS);
if (!gendisk) {
dev_err(&dev->sbd.core, "%s:%u: alloc_disk failed\n", __func__,
__LINE__);
error = -ENOMEM;
goto fail_cleanup_queue;
}
priv->gendisk = gendisk;
gendisk->major = ps3disk_major;
gendisk->first_minor = devidx * PS3DISK_MINORS;
gendisk->fops = &ps3disk_fops;
gendisk->queue = queue;
gendisk->private_data = dev;
gendisk->driverfs_dev = &dev->sbd.core;
snprintf(gendisk->disk_name, sizeof(gendisk->disk_name), PS3DISK_NAME,
devidx+'a');
priv->blocking_factor = dev->blk_size >> 9;
set_capacity(gendisk,
dev->regions[dev->region_idx].size*priv->blocking_factor);
dev_info(&dev->sbd.core,
"%s is a %s (%llu MiB total, %lu MiB for OtherOS)\n",
gendisk->disk_name, priv->model, priv->raw_capacity >> 11,
get_capacity(gendisk) >> 11);
add_disk(gendisk);
return 0;
fail_cleanup_queue:
blk_cleanup_queue(queue);
fail_teardown:
ps3stor_teardown(dev);
fail_free_bounce:
kfree(dev->bounce_buf);
fail_free_priv:
kfree(priv);
ps3_system_bus_set_drvdata(_dev, NULL);
fail:
mutex_lock(&ps3disk_mask_mutex);
__clear_bit(devidx, &ps3disk_mask);
mutex_unlock(&ps3disk_mask_mutex);
return error;
}
static int ps3disk_remove(struct ps3_system_bus_device *_dev)
{
struct ps3_storage_device *dev = to_ps3_storage_device(&_dev->core);
struct ps3disk_private *priv = ps3_system_bus_get_drvdata(&dev->sbd);
mutex_lock(&ps3disk_mask_mutex);
__clear_bit(MINOR(disk_devt(priv->gendisk)) / PS3DISK_MINORS,
&ps3disk_mask);
mutex_unlock(&ps3disk_mask_mutex);
del_gendisk(priv->gendisk);
blk_cleanup_queue(priv->queue);
put_disk(priv->gendisk);
dev_notice(&dev->sbd.core, "Synchronizing disk cache\n");
ps3disk_sync_cache(dev);
ps3stor_teardown(dev);
kfree(dev->bounce_buf);
kfree(priv);
ps3_system_bus_set_drvdata(_dev, NULL);
return 0;
}
static struct ps3_system_bus_driver ps3disk = {
.match_id = PS3_MATCH_ID_STOR_DISK,
.core.name = DEVICE_NAME,
.core.owner = THIS_MODULE,
.probe = ps3disk_probe,
.remove = ps3disk_remove,
.shutdown = ps3disk_remove,
};
static int __init ps3disk_init(void)
{
int error;
if (!firmware_has_feature(FW_FEATURE_PS3_LV1))
return -ENODEV;
error = register_blkdev(0, DEVICE_NAME);
if (error <= 0) {
printk(KERN_ERR "%s:%u: register_blkdev failed %d\n", __func__,
__LINE__, error);
return error;
}
ps3disk_major = error;
pr_info("%s:%u: registered block device major %d\n", __func__,
__LINE__, ps3disk_major);
error = ps3_system_bus_driver_register(&ps3disk);
if (error)
unregister_blkdev(ps3disk_major, DEVICE_NAME);
return error;
}
static void __exit ps3disk_exit(void)
{
ps3_system_bus_driver_unregister(&ps3disk);
unregister_blkdev(ps3disk_major, DEVICE_NAME);
}
module_init(ps3disk_init);
module_exit(ps3disk_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("PS3 Disk Storage Driver");
MODULE_AUTHOR("Sony Corporation");
MODULE_ALIAS(PS3_MODULE_ALIAS_STOR_DISK);
| gpl-2.0 |
hfutxqd/android_kernel_zte_s291_msm8974 | arch/cris/arch-v10/kernel/process.c | 4523 | 7643 | /*
* linux/arch/cris/kernel/process.c
*
* Copyright (C) 1995 Linus Torvalds
* Copyright (C) 2000-2002 Axis Communications AB
*
* Authors: Bjorn Wesen (bjornw@axis.com)
* Mikael Starvik (starvik@axis.com)
*
* This file handles the architecture-dependent parts of process handling..
*/
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/fs.h>
#include <arch/svinto.h>
#include <linux/init.h>
#include <arch/system.h>
#ifdef CONFIG_ETRAX_GPIO
void etrax_gpio_wake_up_check(void); /* drivers/gpio.c */
#endif
/*
* We use this if we don't have any better
* idle routine..
*/
void default_idle(void)
{
#ifdef CONFIG_ETRAX_GPIO
etrax_gpio_wake_up_check();
#endif
}
/*
* Free current thread data structures etc..
*/
void exit_thread(void)
{
/* Nothing needs to be done. */
}
/* if the watchdog is enabled, we can simply disable interrupts and go
* into an eternal loop, and the watchdog will reset the CPU after 0.1s
* if on the other hand the watchdog wasn't enabled, we just enable it and wait
*/
void hard_reset_now (void)
{
/*
* Don't declare this variable elsewhere. We don't want any other
* code to know about it than the watchdog handler in entry.S and
* this code, implementing hard reset through the watchdog.
*/
#if defined(CONFIG_ETRAX_WATCHDOG) && !defined(CONFIG_SVINTO_SIM)
extern int cause_of_death;
#endif
printk("*** HARD RESET ***\n");
local_irq_disable();
#if defined(CONFIG_ETRAX_WATCHDOG) && !defined(CONFIG_SVINTO_SIM)
cause_of_death = 0xbedead;
#else
/* Since we dont plan to keep on resetting the watchdog,
the key can be arbitrary hence three */
*R_WATCHDOG = IO_FIELD(R_WATCHDOG, key, 3) |
IO_STATE(R_WATCHDOG, enable, start);
#endif
while(1) /* waiting for RETRIBUTION! */ ;
}
/*
* Return saved PC of a blocked thread.
*/
unsigned long thread_saved_pc(struct task_struct *t)
{
return task_pt_regs(t)->irp;
}
static void kernel_thread_helper(void* dummy, int (*fn)(void *), void * arg)
{
fn(arg);
do_exit(-1); /* Should never be called, return bad exit value */
}
/*
* Create a kernel thread
*/
int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
{
struct pt_regs regs;
memset(®s, 0, sizeof(regs));
/* Don't use r10 since that is set to 0 in copy_thread */
regs.r11 = (unsigned long)fn;
regs.r12 = (unsigned long)arg;
regs.irp = (unsigned long)kernel_thread_helper;
regs.dccr = 1 << I_DCCR_BITNR;
/* Ok, create the new process.. */
return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, ®s, 0, NULL, NULL);
}
/* setup the child's kernel stack with a pt_regs and switch_stack on it.
* it will be un-nested during _resume and _ret_from_sys_call when the
* new thread is scheduled.
*
* also setup the thread switching structure which is used to keep
* thread-specific data during _resumes.
*
*/
asmlinkage void ret_from_fork(void);
int copy_thread(unsigned long clone_flags, unsigned long usp,
unsigned long unused,
struct task_struct *p, struct pt_regs *regs)
{
struct pt_regs * childregs;
struct switch_stack *swstack;
/* put the pt_regs structure at the end of the new kernel stack page and fix it up
* remember that the task_struct doubles as the kernel stack for the task
*/
childregs = task_pt_regs(p);
*childregs = *regs; /* struct copy of pt_regs */
p->set_child_tid = p->clear_child_tid = NULL;
childregs->r10 = 0; /* child returns 0 after a fork/clone */
/* put the switch stack right below the pt_regs */
swstack = ((struct switch_stack *)childregs) - 1;
swstack->r9 = 0; /* parameter to ret_from_sys_call, 0 == dont restart the syscall */
/* we want to return into ret_from_sys_call after the _resume */
swstack->return_ip = (unsigned long) ret_from_fork; /* Will call ret_from_sys_call */
/* fix the user-mode stackpointer */
p->thread.usp = usp;
/* and the kernel-mode one */
p->thread.ksp = (unsigned long) swstack;
#ifdef DEBUG
printk("copy_thread: new regs at 0x%p, as shown below:\n", childregs);
show_registers(childregs);
#endif
return 0;
}
/*
* Be aware of the "magic" 7th argument in the four system-calls below.
* They need the latest stackframe, which is put as the 7th argument by
* entry.S. The previous arguments are dummies or actually used, but need
* to be defined to reach the 7th argument.
*
* N.B.: Another method to get the stackframe is to use current_regs(). But
* it returns the latest stack-frame stacked when going from _user mode_ and
* some of these (at least sys_clone) are called from kernel-mode sometimes
* (for example during kernel_thread, above) and thus cannot use it. Thus,
* to be sure not to get any surprises, we use the method for the other calls
* as well.
*/
asmlinkage int sys_fork(long r10, long r11, long r12, long r13, long mof, long srp,
struct pt_regs *regs)
{
return do_fork(SIGCHLD, rdusp(), regs, 0, NULL, NULL);
}
/* if newusp is 0, we just grab the old usp */
/* FIXME: Is parent_tid/child_tid really third/fourth argument? Update lib? */
asmlinkage int sys_clone(unsigned long newusp, unsigned long flags,
int* parent_tid, int* child_tid, long mof, long srp,
struct pt_regs *regs)
{
if (!newusp)
newusp = rdusp();
return do_fork(flags, newusp, regs, 0, parent_tid, child_tid);
}
/* vfork is a system call in i386 because of register-pressure - maybe
* we can remove it and handle it in libc but we put it here until then.
*/
asmlinkage int sys_vfork(long r10, long r11, long r12, long r13, long mof, long srp,
struct pt_regs *regs)
{
return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, rdusp(), regs, 0, NULL, NULL);
}
/*
* sys_execve() executes a new program.
*/
asmlinkage int sys_execve(const char *fname,
const char *const *argv,
const char *const *envp,
long r13, long mof, long srp,
struct pt_regs *regs)
{
int error;
char *filename;
filename = getname(fname);
error = PTR_ERR(filename);
if (IS_ERR(filename))
goto out;
error = do_execve(filename, argv, envp, regs);
putname(filename);
out:
return error;
}
unsigned long get_wchan(struct task_struct *p)
{
#if 0
/* YURGH. TODO. */
unsigned long ebp, esp, eip;
unsigned long stack_page;
int count = 0;
if (!p || p == current || p->state == TASK_RUNNING)
return 0;
stack_page = (unsigned long)p;
esp = p->thread.esp;
if (!stack_page || esp < stack_page || esp > 8188+stack_page)
return 0;
/* include/asm-i386/system.h:switch_to() pushes ebp last. */
ebp = *(unsigned long *) esp;
do {
if (ebp < stack_page || ebp > 8184+stack_page)
return 0;
eip = *(unsigned long *) (ebp+4);
if (!in_sched_functions(eip))
return eip;
ebp = *(unsigned long *) ebp;
} while (count++ < 16);
#endif
return 0;
}
#undef last_sched
#undef first_sched
void show_regs(struct pt_regs * regs)
{
unsigned long usp = rdusp();
printk("IRP: %08lx SRP: %08lx DCCR: %08lx USP: %08lx MOF: %08lx\n",
regs->irp, regs->srp, regs->dccr, usp, regs->mof );
printk(" r0: %08lx r1: %08lx r2: %08lx r3: %08lx\n",
regs->r0, regs->r1, regs->r2, regs->r3);
printk(" r4: %08lx r5: %08lx r6: %08lx r7: %08lx\n",
regs->r4, regs->r5, regs->r6, regs->r7);
printk(" r8: %08lx r9: %08lx r10: %08lx r11: %08lx\n",
regs->r8, regs->r9, regs->r10, regs->r11);
printk("r12: %08lx r13: %08lx oR10: %08lx\n",
regs->r12, regs->r13, regs->orig_r10);
}
| gpl-2.0 |
Jarbu12/Mini-NovaXM4.3 | drivers/staging/media/lirc/lirc_igorplugusb.c | 5035 | 13574 | /*
* lirc_igorplugusb - USB remote support for LIRC
*
* Supports the standard homebrew IgorPlugUSB receiver with Igor's firmware.
* See http://www.cesko.host.sk/IgorPlugUSB/IgorPlug-USB%20(AVR)_eng.htm
*
* The device can only record bursts of up to 36 pulses/spaces.
* Works fine with RC5. Longer commands lead to device buffer overrun.
* (Maybe a better firmware or a microcontroller with more ram can help?)
*
* Version 0.1 [beta status]
*
* Copyright (C) 2004 Jan M. Hochstein
* <hochstein@algo.informatik.tu-darmstadt.de>
*
* This driver was derived from:
* Paul Miller <pmiller9@users.sourceforge.net>
* "lirc_atiusb" module
* Vladimir Dergachev <volodya@minspring.com>'s 2002
* "USB ATI Remote support" (input device)
* Adrian Dewhurst <sailor-lk@sailorfrag.net>'s 2002
* "USB StreamZap remote driver" (LIRC)
* Artur Lipowski <alipowski@kki.net.pl>'s 2002
* "lirc_dev" and "lirc_gpio" LIRC modules
*/
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/kmod.h>
#include <linux/sched.h>
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/usb.h>
#include <linux/time.h>
#include <media/lirc.h>
#include <media/lirc_dev.h>
/* module identification */
#define DRIVER_VERSION "0.2"
#define DRIVER_AUTHOR \
"Jan M. Hochstein <hochstein@algo.informatik.tu-darmstadt.de>"
#define DRIVER_DESC "Igorplug USB remote driver for LIRC"
#define DRIVER_NAME "lirc_igorplugusb"
/* debugging support */
#ifdef CONFIG_USB_DEBUG
static bool debug = 1;
#else
static bool debug;
#endif
#define dprintk(fmt, args...) \
do { \
if (debug) \
printk(KERN_DEBUG fmt, ## args); \
} while (0)
/* One mode2 pulse/space has 4 bytes. */
#define CODE_LENGTH sizeof(int)
/* Igor's firmware cannot record bursts longer than 36. */
#define DEVICE_BUFLEN 36
/*
* Header at the beginning of the device's buffer:
* unsigned char data_length
* unsigned char data_start (!=0 means ring-buffer overrun)
* unsigned char counter (incremented by each burst)
*/
#define DEVICE_HEADERLEN 3
/* This is for the gap */
#define ADDITIONAL_LIRC_BYTES 2
/* times to poll per second */
#define SAMPLE_RATE 100
static int sample_rate = SAMPLE_RATE;
/**** Igor's USB Request Codes */
#define SET_INFRABUFFER_EMPTY 1
/**
* Params: none
* Answer: empty
*/
#define GET_INFRACODE 2
/**
* Params:
* wValue: offset to begin reading infra buffer
*
* Answer: infra data
*/
#define SET_DATAPORT_DIRECTION 3
/**
* Params:
* wValue: (byte) 1 bit for each data port pin (0=in, 1=out)
*
* Answer: empty
*/
#define GET_DATAPORT_DIRECTION 4
/**
* Params: none
*
* Answer: (byte) 1 bit for each data port pin (0=in, 1=out)
*/
#define SET_OUT_DATAPORT 5
/**
* Params:
* wValue: byte to write to output data port
*
* Answer: empty
*/
#define GET_OUT_DATAPORT 6
/**
* Params: none
*
* Answer: least significant 3 bits read from output data port
*/
#define GET_IN_DATAPORT 7
/**
* Params: none
*
* Answer: least significant 3 bits read from input data port
*/
#define READ_EEPROM 8
/**
* Params:
* wValue: offset to begin reading EEPROM
*
* Answer: EEPROM bytes
*/
#define WRITE_EEPROM 9
/**
* Params:
* wValue: offset to EEPROM byte
* wIndex: byte to write
*
* Answer: empty
*/
#define SEND_RS232 10
/**
* Params:
* wValue: byte to send
*
* Answer: empty
*/
#define RECV_RS232 11
/**
* Params: none
*
* Answer: byte received
*/
#define SET_RS232_BAUD 12
/**
* Params:
* wValue: byte to write to UART bit rate register (UBRR)
*
* Answer: empty
*/
#define GET_RS232_BAUD 13
/**
* Params: none
*
* Answer: byte read from UART bit rate register (UBRR)
*/
/* data structure for each usb remote */
struct igorplug {
/* usb */
struct usb_device *usbdev;
int devnum;
unsigned char *buf_in;
unsigned int len_in;
int in_space;
struct timeval last_time;
dma_addr_t dma_in;
/* lirc */
struct lirc_driver *d;
/* handle sending (init strings) */
int send_flags;
};
static int unregister_from_lirc(struct igorplug *ir)
{
struct lirc_driver *d;
int devnum;
if (!ir) {
printk(KERN_ERR "%s: called with NULL device struct!\n",
__func__);
return -EINVAL;
}
devnum = ir->devnum;
d = ir->d;
if (!d) {
printk(KERN_ERR "%s: called with NULL lirc driver struct!\n",
__func__);
return -EINVAL;
}
dprintk(DRIVER_NAME "[%d]: calling lirc_unregister_driver\n", devnum);
lirc_unregister_driver(d->minor);
kfree(d);
ir->d = NULL;
kfree(ir);
return devnum;
}
static int set_use_inc(void *data)
{
struct igorplug *ir = data;
if (!ir) {
printk(DRIVER_NAME "[?]: set_use_inc called with no context\n");
return -EIO;
}
dprintk(DRIVER_NAME "[%d]: set use inc\n", ir->devnum);
if (!ir->usbdev)
return -ENODEV;
return 0;
}
static void set_use_dec(void *data)
{
struct igorplug *ir = data;
if (!ir) {
printk(DRIVER_NAME "[?]: set_use_dec called with no context\n");
return;
}
dprintk(DRIVER_NAME "[%d]: set use dec\n", ir->devnum);
}
static void send_fragment(struct igorplug *ir, struct lirc_buffer *buf,
int i, int max)
{
int code;
/* MODE2: pulse/space (PULSE_BIT) in 1us units */
while (i < max) {
/* 1 Igor-tick = 85.333333 us */
code = (unsigned int)ir->buf_in[i] * 85 +
(unsigned int)ir->buf_in[i] / 3;
ir->last_time.tv_usec += code;
if (ir->in_space)
code |= PULSE_BIT;
lirc_buffer_write(buf, (unsigned char *)&code);
/* 1 chunk = CODE_LENGTH bytes */
ir->in_space ^= 1;
++i;
}
}
/**
* Called in user context.
* return 0 if data was added to the buffer and
* -ENODATA if none was available. This should add some number of bits
* evenly divisible by code_length to the buffer
*/
static int igorplugusb_remote_poll(void *data, struct lirc_buffer *buf)
{
int ret;
struct igorplug *ir = (struct igorplug *)data;
if (!ir || !ir->usbdev) /* Has the device been removed? */
return -ENODEV;
memset(ir->buf_in, 0, ir->len_in);
ret = usb_control_msg(ir->usbdev, usb_rcvctrlpipe(ir->usbdev, 0),
GET_INFRACODE, USB_TYPE_VENDOR | USB_DIR_IN,
0/* offset */, /*unused*/0,
ir->buf_in, ir->len_in,
/*timeout*/HZ * USB_CTRL_GET_TIMEOUT);
if (ret > 0) {
int code, timediff;
struct timeval now;
/* ACK packet has 1 byte --> ignore */
if (ret < DEVICE_HEADERLEN)
return -ENODATA;
dprintk(DRIVER_NAME ": Got %d bytes. Header: %02x %02x %02x\n",
ret, ir->buf_in[0], ir->buf_in[1], ir->buf_in[2]);
do_gettimeofday(&now);
timediff = now.tv_sec - ir->last_time.tv_sec;
if (timediff + 1 > PULSE_MASK / 1000000)
timediff = PULSE_MASK;
else {
timediff *= 1000000;
timediff += now.tv_usec - ir->last_time.tv_usec;
}
ir->last_time.tv_sec = now.tv_sec;
ir->last_time.tv_usec = now.tv_usec;
/* create leading gap */
code = timediff;
lirc_buffer_write(buf, (unsigned char *)&code);
ir->in_space = 1; /* next comes a pulse */
if (ir->buf_in[2] == 0)
send_fragment(ir, buf, DEVICE_HEADERLEN, ret);
else {
printk(KERN_WARNING DRIVER_NAME
"[%d]: Device buffer overrun.\n", ir->devnum);
/* HHHNNNNNNNNNNNOOOOOOOO H = header
<---[2]---> N = newer
<---------ret--------> O = older */
ir->buf_in[2] %= ret - DEVICE_HEADERLEN; /* sanitize */
/* keep even-ness to not desync pulse/pause */
send_fragment(ir, buf, DEVICE_HEADERLEN +
ir->buf_in[2] - (ir->buf_in[2] & 1), ret);
send_fragment(ir, buf, DEVICE_HEADERLEN,
DEVICE_HEADERLEN + ir->buf_in[2]);
}
ret = usb_control_msg(
ir->usbdev, usb_rcvctrlpipe(ir->usbdev, 0),
SET_INFRABUFFER_EMPTY, USB_TYPE_VENDOR|USB_DIR_IN,
/*unused*/0, /*unused*/0,
/*dummy*/ir->buf_in, /*dummy*/ir->len_in,
/*timeout*/HZ * USB_CTRL_GET_TIMEOUT);
if (ret < 0)
printk(DRIVER_NAME "[%d]: SET_INFRABUFFER_EMPTY: "
"error %d\n", ir->devnum, ret);
return 0;
} else if (ret < 0)
printk(DRIVER_NAME "[%d]: GET_INFRACODE: error %d\n",
ir->devnum, ret);
return -ENODATA;
}
static int igorplugusb_remote_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
struct usb_device *dev = NULL;
struct usb_host_interface *idesc = NULL;
struct usb_endpoint_descriptor *ep;
struct igorplug *ir = NULL;
struct lirc_driver *driver = NULL;
int devnum, pipe, maxp;
int minor = 0;
char buf[63], name[128] = "";
int mem_failure = 0;
int ret;
dprintk(DRIVER_NAME ": usb probe called.\n");
dev = interface_to_usbdev(intf);
idesc = intf->cur_altsetting;
if (idesc->desc.bNumEndpoints != 1)
return -ENODEV;
ep = &idesc->endpoint->desc;
if (((ep->bEndpointAddress & USB_ENDPOINT_DIR_MASK)
!= USB_DIR_IN)
|| (ep->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
!= USB_ENDPOINT_XFER_CONTROL)
return -ENODEV;
pipe = usb_rcvctrlpipe(dev, ep->bEndpointAddress);
devnum = dev->devnum;
maxp = usb_maxpacket(dev, pipe, usb_pipeout(pipe));
dprintk(DRIVER_NAME "[%d]: bytes_in_key=%zu maxp=%d\n",
devnum, CODE_LENGTH, maxp);
mem_failure = 0;
ir = kzalloc(sizeof(struct igorplug), GFP_KERNEL);
if (!ir) {
mem_failure = 1;
goto mem_failure_switch;
}
driver = kzalloc(sizeof(struct lirc_driver), GFP_KERNEL);
if (!driver) {
mem_failure = 2;
goto mem_failure_switch;
}
ir->buf_in = usb_alloc_coherent(dev, DEVICE_BUFLEN + DEVICE_HEADERLEN,
GFP_ATOMIC, &ir->dma_in);
if (!ir->buf_in) {
mem_failure = 3;
goto mem_failure_switch;
}
strcpy(driver->name, DRIVER_NAME " ");
driver->minor = -1;
driver->code_length = CODE_LENGTH * 8; /* in bits */
driver->features = LIRC_CAN_REC_MODE2;
driver->data = ir;
driver->chunk_size = CODE_LENGTH;
driver->buffer_size = DEVICE_BUFLEN + ADDITIONAL_LIRC_BYTES;
driver->set_use_inc = &set_use_inc;
driver->set_use_dec = &set_use_dec;
driver->sample_rate = sample_rate; /* per second */
driver->add_to_buf = &igorplugusb_remote_poll;
driver->dev = &intf->dev;
driver->owner = THIS_MODULE;
minor = lirc_register_driver(driver);
if (minor < 0)
mem_failure = 9;
mem_failure_switch:
switch (mem_failure) {
case 9:
usb_free_coherent(dev, DEVICE_BUFLEN + DEVICE_HEADERLEN,
ir->buf_in, ir->dma_in);
case 3:
kfree(driver);
case 2:
kfree(ir);
case 1:
printk(DRIVER_NAME "[%d]: out of memory (code=%d)\n",
devnum, mem_failure);
return -ENOMEM;
}
driver->minor = minor;
ir->d = driver;
ir->devnum = devnum;
ir->usbdev = dev;
ir->len_in = DEVICE_BUFLEN + DEVICE_HEADERLEN;
ir->in_space = 1; /* First mode2 event is a space. */
do_gettimeofday(&ir->last_time);
if (dev->descriptor.iManufacturer
&& usb_string(dev, dev->descriptor.iManufacturer,
buf, sizeof(buf)) > 0)
strlcpy(name, buf, sizeof(name));
if (dev->descriptor.iProduct
&& usb_string(dev, dev->descriptor.iProduct, buf, sizeof(buf)) > 0)
snprintf(name + strlen(name), sizeof(name) - strlen(name),
" %s", buf);
printk(DRIVER_NAME "[%d]: %s on usb%d:%d\n", devnum, name,
dev->bus->busnum, devnum);
/* clear device buffer */
ret = usb_control_msg(ir->usbdev, usb_rcvctrlpipe(ir->usbdev, 0),
SET_INFRABUFFER_EMPTY, USB_TYPE_VENDOR|USB_DIR_IN,
/*unused*/0, /*unused*/0,
/*dummy*/ir->buf_in, /*dummy*/ir->len_in,
/*timeout*/HZ * USB_CTRL_GET_TIMEOUT);
if (ret < 0)
printk(DRIVER_NAME "[%d]: SET_INFRABUFFER_EMPTY: error %d\n",
devnum, ret);
usb_set_intfdata(intf, ir);
return 0;
}
static void igorplugusb_remote_disconnect(struct usb_interface *intf)
{
struct usb_device *usbdev = interface_to_usbdev(intf);
struct igorplug *ir = usb_get_intfdata(intf);
struct device *dev = &intf->dev;
int devnum;
usb_set_intfdata(intf, NULL);
if (!ir || !ir->d)
return;
ir->usbdev = NULL;
usb_free_coherent(usbdev, ir->len_in, ir->buf_in, ir->dma_in);
devnum = unregister_from_lirc(ir);
dev_info(dev, DRIVER_NAME "[%d]: %s done\n", devnum, __func__);
}
static struct usb_device_id igorplugusb_remote_id_table[] = {
/* Igor Plug USB (Atmel's Manufact. ID) */
{ USB_DEVICE(0x03eb, 0x0002) },
/* Fit PC2 Infrared Adapter */
{ USB_DEVICE(0x03eb, 0x21fe) },
/* Terminating entry */
{ }
};
static struct usb_driver igorplugusb_remote_driver = {
.name = DRIVER_NAME,
.probe = igorplugusb_remote_probe,
.disconnect = igorplugusb_remote_disconnect,
.id_table = igorplugusb_remote_id_table
};
module_usb_driver(igorplugusb_remote_driver);
#include <linux/vermagic.h>
MODULE_INFO(vermagic, VERMAGIC_STRING);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(usb, igorplugusb_remote_id_table);
module_param(sample_rate, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(sample_rate, "Sampling rate in Hz (default: 100)");
module_param(debug, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(debug, "Debug enabled or not");
| gpl-2.0 |
CyanideL/android_kernel_samsung_klte | drivers/net/ethernet/broadcom/bcm63xx_enet.c | 5035 | 49123 | /*
* Driver for BCM963xx builtin Ethernet mac
*
* Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/clk.h>
#include <linux/etherdevice.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/ethtool.h>
#include <linux/crc32.h>
#include <linux/err.h>
#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
#include <linux/if_vlan.h>
#include <bcm63xx_dev_enet.h>
#include "bcm63xx_enet.h"
static char bcm_enet_driver_name[] = "bcm63xx_enet";
static char bcm_enet_driver_version[] = "1.0";
static int copybreak __read_mostly = 128;
module_param(copybreak, int, 0);
MODULE_PARM_DESC(copybreak, "Receive copy threshold");
/* io memory shared between all devices */
static void __iomem *bcm_enet_shared_base;
/*
* io helpers to access mac registers
*/
static inline u32 enet_readl(struct bcm_enet_priv *priv, u32 off)
{
return bcm_readl(priv->base + off);
}
static inline void enet_writel(struct bcm_enet_priv *priv,
u32 val, u32 off)
{
bcm_writel(val, priv->base + off);
}
/*
* io helpers to access shared registers
*/
static inline u32 enet_dma_readl(struct bcm_enet_priv *priv, u32 off)
{
return bcm_readl(bcm_enet_shared_base + off);
}
static inline void enet_dma_writel(struct bcm_enet_priv *priv,
u32 val, u32 off)
{
bcm_writel(val, bcm_enet_shared_base + off);
}
/*
* write given data into mii register and wait for transfer to end
* with timeout (average measured transfer time is 25us)
*/
static int do_mdio_op(struct bcm_enet_priv *priv, unsigned int data)
{
int limit;
/* make sure mii interrupt status is cleared */
enet_writel(priv, ENET_IR_MII, ENET_IR_REG);
enet_writel(priv, data, ENET_MIIDATA_REG);
wmb();
/* busy wait on mii interrupt bit, with timeout */
limit = 1000;
do {
if (enet_readl(priv, ENET_IR_REG) & ENET_IR_MII)
break;
udelay(1);
} while (limit-- > 0);
return (limit < 0) ? 1 : 0;
}
/*
* MII internal read callback
*/
static int bcm_enet_mdio_read(struct bcm_enet_priv *priv, int mii_id,
int regnum)
{
u32 tmp, val;
tmp = regnum << ENET_MIIDATA_REG_SHIFT;
tmp |= 0x2 << ENET_MIIDATA_TA_SHIFT;
tmp |= mii_id << ENET_MIIDATA_PHYID_SHIFT;
tmp |= ENET_MIIDATA_OP_READ_MASK;
if (do_mdio_op(priv, tmp))
return -1;
val = enet_readl(priv, ENET_MIIDATA_REG);
val &= 0xffff;
return val;
}
/*
* MII internal write callback
*/
static int bcm_enet_mdio_write(struct bcm_enet_priv *priv, int mii_id,
int regnum, u16 value)
{
u32 tmp;
tmp = (value & 0xffff) << ENET_MIIDATA_DATA_SHIFT;
tmp |= 0x2 << ENET_MIIDATA_TA_SHIFT;
tmp |= regnum << ENET_MIIDATA_REG_SHIFT;
tmp |= mii_id << ENET_MIIDATA_PHYID_SHIFT;
tmp |= ENET_MIIDATA_OP_WRITE_MASK;
(void)do_mdio_op(priv, tmp);
return 0;
}
/*
* MII read callback from phylib
*/
static int bcm_enet_mdio_read_phylib(struct mii_bus *bus, int mii_id,
int regnum)
{
return bcm_enet_mdio_read(bus->priv, mii_id, regnum);
}
/*
* MII write callback from phylib
*/
static int bcm_enet_mdio_write_phylib(struct mii_bus *bus, int mii_id,
int regnum, u16 value)
{
return bcm_enet_mdio_write(bus->priv, mii_id, regnum, value);
}
/*
* MII read callback from mii core
*/
static int bcm_enet_mdio_read_mii(struct net_device *dev, int mii_id,
int regnum)
{
return bcm_enet_mdio_read(netdev_priv(dev), mii_id, regnum);
}
/*
* MII write callback from mii core
*/
static void bcm_enet_mdio_write_mii(struct net_device *dev, int mii_id,
int regnum, int value)
{
bcm_enet_mdio_write(netdev_priv(dev), mii_id, regnum, value);
}
/*
* refill rx queue
*/
static int bcm_enet_refill_rx(struct net_device *dev)
{
struct bcm_enet_priv *priv;
priv = netdev_priv(dev);
while (priv->rx_desc_count < priv->rx_ring_size) {
struct bcm_enet_desc *desc;
struct sk_buff *skb;
dma_addr_t p;
int desc_idx;
u32 len_stat;
desc_idx = priv->rx_dirty_desc;
desc = &priv->rx_desc_cpu[desc_idx];
if (!priv->rx_skb[desc_idx]) {
skb = netdev_alloc_skb(dev, priv->rx_skb_size);
if (!skb)
break;
priv->rx_skb[desc_idx] = skb;
p = dma_map_single(&priv->pdev->dev, skb->data,
priv->rx_skb_size,
DMA_FROM_DEVICE);
desc->address = p;
}
len_stat = priv->rx_skb_size << DMADESC_LENGTH_SHIFT;
len_stat |= DMADESC_OWNER_MASK;
if (priv->rx_dirty_desc == priv->rx_ring_size - 1) {
len_stat |= DMADESC_WRAP_MASK;
priv->rx_dirty_desc = 0;
} else {
priv->rx_dirty_desc++;
}
wmb();
desc->len_stat = len_stat;
priv->rx_desc_count++;
/* tell dma engine we allocated one buffer */
enet_dma_writel(priv, 1, ENETDMA_BUFALLOC_REG(priv->rx_chan));
}
/* If rx ring is still empty, set a timer to try allocating
* again at a later time. */
if (priv->rx_desc_count == 0 && netif_running(dev)) {
dev_warn(&priv->pdev->dev, "unable to refill rx ring\n");
priv->rx_timeout.expires = jiffies + HZ;
add_timer(&priv->rx_timeout);
}
return 0;
}
/*
* timer callback to defer refill rx queue in case we're OOM
*/
static void bcm_enet_refill_rx_timer(unsigned long data)
{
struct net_device *dev;
struct bcm_enet_priv *priv;
dev = (struct net_device *)data;
priv = netdev_priv(dev);
spin_lock(&priv->rx_lock);
bcm_enet_refill_rx((struct net_device *)data);
spin_unlock(&priv->rx_lock);
}
/*
* extract packet from rx queue
*/
static int bcm_enet_receive_queue(struct net_device *dev, int budget)
{
struct bcm_enet_priv *priv;
struct device *kdev;
int processed;
priv = netdev_priv(dev);
kdev = &priv->pdev->dev;
processed = 0;
/* don't scan ring further than number of refilled
* descriptor */
if (budget > priv->rx_desc_count)
budget = priv->rx_desc_count;
do {
struct bcm_enet_desc *desc;
struct sk_buff *skb;
int desc_idx;
u32 len_stat;
unsigned int len;
desc_idx = priv->rx_curr_desc;
desc = &priv->rx_desc_cpu[desc_idx];
/* make sure we actually read the descriptor status at
* each loop */
rmb();
len_stat = desc->len_stat;
/* break if dma ownership belongs to hw */
if (len_stat & DMADESC_OWNER_MASK)
break;
processed++;
priv->rx_curr_desc++;
if (priv->rx_curr_desc == priv->rx_ring_size)
priv->rx_curr_desc = 0;
priv->rx_desc_count--;
/* if the packet does not have start of packet _and_
* end of packet flag set, then just recycle it */
if ((len_stat & DMADESC_ESOP_MASK) != DMADESC_ESOP_MASK) {
dev->stats.rx_dropped++;
continue;
}
/* recycle packet if it's marked as bad */
if (unlikely(len_stat & DMADESC_ERR_MASK)) {
dev->stats.rx_errors++;
if (len_stat & DMADESC_OVSIZE_MASK)
dev->stats.rx_length_errors++;
if (len_stat & DMADESC_CRC_MASK)
dev->stats.rx_crc_errors++;
if (len_stat & DMADESC_UNDER_MASK)
dev->stats.rx_frame_errors++;
if (len_stat & DMADESC_OV_MASK)
dev->stats.rx_fifo_errors++;
continue;
}
/* valid packet */
skb = priv->rx_skb[desc_idx];
len = (len_stat & DMADESC_LENGTH_MASK) >> DMADESC_LENGTH_SHIFT;
/* don't include FCS */
len -= 4;
if (len < copybreak) {
struct sk_buff *nskb;
nskb = netdev_alloc_skb_ip_align(dev, len);
if (!nskb) {
/* forget packet, just rearm desc */
dev->stats.rx_dropped++;
continue;
}
dma_sync_single_for_cpu(kdev, desc->address,
len, DMA_FROM_DEVICE);
memcpy(nskb->data, skb->data, len);
dma_sync_single_for_device(kdev, desc->address,
len, DMA_FROM_DEVICE);
skb = nskb;
} else {
dma_unmap_single(&priv->pdev->dev, desc->address,
priv->rx_skb_size, DMA_FROM_DEVICE);
priv->rx_skb[desc_idx] = NULL;
}
skb_put(skb, len);
skb->protocol = eth_type_trans(skb, dev);
dev->stats.rx_packets++;
dev->stats.rx_bytes += len;
netif_receive_skb(skb);
} while (--budget > 0);
if (processed || !priv->rx_desc_count) {
bcm_enet_refill_rx(dev);
/* kick rx dma */
enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK,
ENETDMA_CHANCFG_REG(priv->rx_chan));
}
return processed;
}
/*
* try to or force reclaim of transmitted buffers
*/
static int bcm_enet_tx_reclaim(struct net_device *dev, int force)
{
struct bcm_enet_priv *priv;
int released;
priv = netdev_priv(dev);
released = 0;
while (priv->tx_desc_count < priv->tx_ring_size) {
struct bcm_enet_desc *desc;
struct sk_buff *skb;
/* We run in a bh and fight against start_xmit, which
* is called with bh disabled */
spin_lock(&priv->tx_lock);
desc = &priv->tx_desc_cpu[priv->tx_dirty_desc];
if (!force && (desc->len_stat & DMADESC_OWNER_MASK)) {
spin_unlock(&priv->tx_lock);
break;
}
/* ensure other field of the descriptor were not read
* before we checked ownership */
rmb();
skb = priv->tx_skb[priv->tx_dirty_desc];
priv->tx_skb[priv->tx_dirty_desc] = NULL;
dma_unmap_single(&priv->pdev->dev, desc->address, skb->len,
DMA_TO_DEVICE);
priv->tx_dirty_desc++;
if (priv->tx_dirty_desc == priv->tx_ring_size)
priv->tx_dirty_desc = 0;
priv->tx_desc_count++;
spin_unlock(&priv->tx_lock);
if (desc->len_stat & DMADESC_UNDER_MASK)
dev->stats.tx_errors++;
dev_kfree_skb(skb);
released++;
}
if (netif_queue_stopped(dev) && released)
netif_wake_queue(dev);
return released;
}
/*
* poll func, called by network core
*/
static int bcm_enet_poll(struct napi_struct *napi, int budget)
{
struct bcm_enet_priv *priv;
struct net_device *dev;
int tx_work_done, rx_work_done;
priv = container_of(napi, struct bcm_enet_priv, napi);
dev = priv->net_dev;
/* ack interrupts */
enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
ENETDMA_IR_REG(priv->rx_chan));
enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
ENETDMA_IR_REG(priv->tx_chan));
/* reclaim sent skb */
tx_work_done = bcm_enet_tx_reclaim(dev, 0);
spin_lock(&priv->rx_lock);
rx_work_done = bcm_enet_receive_queue(dev, budget);
spin_unlock(&priv->rx_lock);
if (rx_work_done >= budget || tx_work_done > 0) {
/* rx/tx queue is not yet empty/clean */
return rx_work_done;
}
/* no more packet in rx/tx queue, remove device from poll
* queue */
napi_complete(napi);
/* restore rx/tx interrupt */
enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
ENETDMA_IRMASK_REG(priv->rx_chan));
enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
ENETDMA_IRMASK_REG(priv->tx_chan));
return rx_work_done;
}
/*
* mac interrupt handler
*/
static irqreturn_t bcm_enet_isr_mac(int irq, void *dev_id)
{
struct net_device *dev;
struct bcm_enet_priv *priv;
u32 stat;
dev = dev_id;
priv = netdev_priv(dev);
stat = enet_readl(priv, ENET_IR_REG);
if (!(stat & ENET_IR_MIB))
return IRQ_NONE;
/* clear & mask interrupt */
enet_writel(priv, ENET_IR_MIB, ENET_IR_REG);
enet_writel(priv, 0, ENET_IRMASK_REG);
/* read mib registers in workqueue */
schedule_work(&priv->mib_update_task);
return IRQ_HANDLED;
}
/*
* rx/tx dma interrupt handler
*/
static irqreturn_t bcm_enet_isr_dma(int irq, void *dev_id)
{
struct net_device *dev;
struct bcm_enet_priv *priv;
dev = dev_id;
priv = netdev_priv(dev);
/* mask rx/tx interrupts */
enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->rx_chan));
enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan));
napi_schedule(&priv->napi);
return IRQ_HANDLED;
}
/*
* tx request callback
*/
static int bcm_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct bcm_enet_priv *priv;
struct bcm_enet_desc *desc;
u32 len_stat;
int ret;
priv = netdev_priv(dev);
/* lock against tx reclaim */
spin_lock(&priv->tx_lock);
/* make sure the tx hw queue is not full, should not happen
* since we stop queue before it's the case */
if (unlikely(!priv->tx_desc_count)) {
netif_stop_queue(dev);
dev_err(&priv->pdev->dev, "xmit called with no tx desc "
"available?\n");
ret = NETDEV_TX_BUSY;
goto out_unlock;
}
/* point to the next available desc */
desc = &priv->tx_desc_cpu[priv->tx_curr_desc];
priv->tx_skb[priv->tx_curr_desc] = skb;
/* fill descriptor */
desc->address = dma_map_single(&priv->pdev->dev, skb->data, skb->len,
DMA_TO_DEVICE);
len_stat = (skb->len << DMADESC_LENGTH_SHIFT) & DMADESC_LENGTH_MASK;
len_stat |= DMADESC_ESOP_MASK |
DMADESC_APPEND_CRC |
DMADESC_OWNER_MASK;
priv->tx_curr_desc++;
if (priv->tx_curr_desc == priv->tx_ring_size) {
priv->tx_curr_desc = 0;
len_stat |= DMADESC_WRAP_MASK;
}
priv->tx_desc_count--;
/* dma might be already polling, make sure we update desc
* fields in correct order */
wmb();
desc->len_stat = len_stat;
wmb();
/* kick tx dma */
enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK,
ENETDMA_CHANCFG_REG(priv->tx_chan));
/* stop queue if no more desc available */
if (!priv->tx_desc_count)
netif_stop_queue(dev);
dev->stats.tx_bytes += skb->len;
dev->stats.tx_packets++;
ret = NETDEV_TX_OK;
out_unlock:
spin_unlock(&priv->tx_lock);
return ret;
}
/*
* Change the interface's mac address.
*/
static int bcm_enet_set_mac_address(struct net_device *dev, void *p)
{
struct bcm_enet_priv *priv;
struct sockaddr *addr = p;
u32 val;
priv = netdev_priv(dev);
memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
/* use perfect match register 0 to store my mac address */
val = (dev->dev_addr[2] << 24) | (dev->dev_addr[3] << 16) |
(dev->dev_addr[4] << 8) | dev->dev_addr[5];
enet_writel(priv, val, ENET_PML_REG(0));
val = (dev->dev_addr[0] << 8 | dev->dev_addr[1]);
val |= ENET_PMH_DATAVALID_MASK;
enet_writel(priv, val, ENET_PMH_REG(0));
return 0;
}
/*
* Change rx mode (promiscuous/allmulti) and update multicast list
*/
static void bcm_enet_set_multicast_list(struct net_device *dev)
{
struct bcm_enet_priv *priv;
struct netdev_hw_addr *ha;
u32 val;
int i;
priv = netdev_priv(dev);
val = enet_readl(priv, ENET_RXCFG_REG);
if (dev->flags & IFF_PROMISC)
val |= ENET_RXCFG_PROMISC_MASK;
else
val &= ~ENET_RXCFG_PROMISC_MASK;
/* only 3 perfect match registers left, first one is used for
* own mac address */
if ((dev->flags & IFF_ALLMULTI) || netdev_mc_count(dev) > 3)
val |= ENET_RXCFG_ALLMCAST_MASK;
else
val &= ~ENET_RXCFG_ALLMCAST_MASK;
/* no need to set perfect match registers if we catch all
* multicast */
if (val & ENET_RXCFG_ALLMCAST_MASK) {
enet_writel(priv, val, ENET_RXCFG_REG);
return;
}
i = 0;
netdev_for_each_mc_addr(ha, dev) {
u8 *dmi_addr;
u32 tmp;
if (i == 3)
break;
/* update perfect match registers */
dmi_addr = ha->addr;
tmp = (dmi_addr[2] << 24) | (dmi_addr[3] << 16) |
(dmi_addr[4] << 8) | dmi_addr[5];
enet_writel(priv, tmp, ENET_PML_REG(i + 1));
tmp = (dmi_addr[0] << 8 | dmi_addr[1]);
tmp |= ENET_PMH_DATAVALID_MASK;
enet_writel(priv, tmp, ENET_PMH_REG(i++ + 1));
}
for (; i < 3; i++) {
enet_writel(priv, 0, ENET_PML_REG(i + 1));
enet_writel(priv, 0, ENET_PMH_REG(i + 1));
}
enet_writel(priv, val, ENET_RXCFG_REG);
}
/*
* set mac duplex parameters
*/
static void bcm_enet_set_duplex(struct bcm_enet_priv *priv, int fullduplex)
{
u32 val;
val = enet_readl(priv, ENET_TXCTL_REG);
if (fullduplex)
val |= ENET_TXCTL_FD_MASK;
else
val &= ~ENET_TXCTL_FD_MASK;
enet_writel(priv, val, ENET_TXCTL_REG);
}
/*
* set mac flow control parameters
*/
static void bcm_enet_set_flow(struct bcm_enet_priv *priv, int rx_en, int tx_en)
{
u32 val;
/* rx flow control (pause frame handling) */
val = enet_readl(priv, ENET_RXCFG_REG);
if (rx_en)
val |= ENET_RXCFG_ENFLOW_MASK;
else
val &= ~ENET_RXCFG_ENFLOW_MASK;
enet_writel(priv, val, ENET_RXCFG_REG);
/* tx flow control (pause frame generation) */
val = enet_dma_readl(priv, ENETDMA_CFG_REG);
if (tx_en)
val |= ENETDMA_CFG_FLOWCH_MASK(priv->rx_chan);
else
val &= ~ENETDMA_CFG_FLOWCH_MASK(priv->rx_chan);
enet_dma_writel(priv, val, ENETDMA_CFG_REG);
}
/*
* link changed callback (from phylib)
*/
static void bcm_enet_adjust_phy_link(struct net_device *dev)
{
struct bcm_enet_priv *priv;
struct phy_device *phydev;
int status_changed;
priv = netdev_priv(dev);
phydev = priv->phydev;
status_changed = 0;
if (priv->old_link != phydev->link) {
status_changed = 1;
priv->old_link = phydev->link;
}
/* reflect duplex change in mac configuration */
if (phydev->link && phydev->duplex != priv->old_duplex) {
bcm_enet_set_duplex(priv,
(phydev->duplex == DUPLEX_FULL) ? 1 : 0);
status_changed = 1;
priv->old_duplex = phydev->duplex;
}
/* enable flow control if remote advertise it (trust phylib to
* check that duplex is full */
if (phydev->link && phydev->pause != priv->old_pause) {
int rx_pause_en, tx_pause_en;
if (phydev->pause) {
/* pause was advertised by lpa and us */
rx_pause_en = 1;
tx_pause_en = 1;
} else if (!priv->pause_auto) {
/* pause setting overrided by user */
rx_pause_en = priv->pause_rx;
tx_pause_en = priv->pause_tx;
} else {
rx_pause_en = 0;
tx_pause_en = 0;
}
bcm_enet_set_flow(priv, rx_pause_en, tx_pause_en);
status_changed = 1;
priv->old_pause = phydev->pause;
}
if (status_changed) {
pr_info("%s: link %s", dev->name, phydev->link ?
"UP" : "DOWN");
if (phydev->link)
pr_cont(" - %d/%s - flow control %s", phydev->speed,
DUPLEX_FULL == phydev->duplex ? "full" : "half",
phydev->pause == 1 ? "rx&tx" : "off");
pr_cont("\n");
}
}
/*
* link changed callback (if phylib is not used)
*/
static void bcm_enet_adjust_link(struct net_device *dev)
{
struct bcm_enet_priv *priv;
priv = netdev_priv(dev);
bcm_enet_set_duplex(priv, priv->force_duplex_full);
bcm_enet_set_flow(priv, priv->pause_rx, priv->pause_tx);
netif_carrier_on(dev);
pr_info("%s: link forced UP - %d/%s - flow control %s/%s\n",
dev->name,
priv->force_speed_100 ? 100 : 10,
priv->force_duplex_full ? "full" : "half",
priv->pause_rx ? "rx" : "off",
priv->pause_tx ? "tx" : "off");
}
/*
* open callback, allocate dma rings & buffers and start rx operation
*/
static int bcm_enet_open(struct net_device *dev)
{
struct bcm_enet_priv *priv;
struct sockaddr addr;
struct device *kdev;
struct phy_device *phydev;
int i, ret;
unsigned int size;
char phy_id[MII_BUS_ID_SIZE + 3];
void *p;
u32 val;
priv = netdev_priv(dev);
kdev = &priv->pdev->dev;
if (priv->has_phy) {
/* connect to PHY */
snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
priv->mii_bus->id, priv->phy_id);
phydev = phy_connect(dev, phy_id, bcm_enet_adjust_phy_link, 0,
PHY_INTERFACE_MODE_MII);
if (IS_ERR(phydev)) {
dev_err(kdev, "could not attach to PHY\n");
return PTR_ERR(phydev);
}
/* mask with MAC supported features */
phydev->supported &= (SUPPORTED_10baseT_Half |
SUPPORTED_10baseT_Full |
SUPPORTED_100baseT_Half |
SUPPORTED_100baseT_Full |
SUPPORTED_Autoneg |
SUPPORTED_Pause |
SUPPORTED_MII);
phydev->advertising = phydev->supported;
if (priv->pause_auto && priv->pause_rx && priv->pause_tx)
phydev->advertising |= SUPPORTED_Pause;
else
phydev->advertising &= ~SUPPORTED_Pause;
dev_info(kdev, "attached PHY at address %d [%s]\n",
phydev->addr, phydev->drv->name);
priv->old_link = 0;
priv->old_duplex = -1;
priv->old_pause = -1;
priv->phydev = phydev;
}
/* mask all interrupts and request them */
enet_writel(priv, 0, ENET_IRMASK_REG);
enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->rx_chan));
enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan));
ret = request_irq(dev->irq, bcm_enet_isr_mac, 0, dev->name, dev);
if (ret)
goto out_phy_disconnect;
ret = request_irq(priv->irq_rx, bcm_enet_isr_dma, IRQF_DISABLED,
dev->name, dev);
if (ret)
goto out_freeirq;
ret = request_irq(priv->irq_tx, bcm_enet_isr_dma,
IRQF_DISABLED, dev->name, dev);
if (ret)
goto out_freeirq_rx;
/* initialize perfect match registers */
for (i = 0; i < 4; i++) {
enet_writel(priv, 0, ENET_PML_REG(i));
enet_writel(priv, 0, ENET_PMH_REG(i));
}
/* write device mac address */
memcpy(addr.sa_data, dev->dev_addr, ETH_ALEN);
bcm_enet_set_mac_address(dev, &addr);
/* allocate rx dma ring */
size = priv->rx_ring_size * sizeof(struct bcm_enet_desc);
p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
if (!p) {
dev_err(kdev, "cannot allocate rx ring %u\n", size);
ret = -ENOMEM;
goto out_freeirq_tx;
}
memset(p, 0, size);
priv->rx_desc_alloc_size = size;
priv->rx_desc_cpu = p;
/* allocate tx dma ring */
size = priv->tx_ring_size * sizeof(struct bcm_enet_desc);
p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
if (!p) {
dev_err(kdev, "cannot allocate tx ring\n");
ret = -ENOMEM;
goto out_free_rx_ring;
}
memset(p, 0, size);
priv->tx_desc_alloc_size = size;
priv->tx_desc_cpu = p;
priv->tx_skb = kzalloc(sizeof(struct sk_buff *) * priv->tx_ring_size,
GFP_KERNEL);
if (!priv->tx_skb) {
dev_err(kdev, "cannot allocate rx skb queue\n");
ret = -ENOMEM;
goto out_free_tx_ring;
}
priv->tx_desc_count = priv->tx_ring_size;
priv->tx_dirty_desc = 0;
priv->tx_curr_desc = 0;
spin_lock_init(&priv->tx_lock);
/* init & fill rx ring with skbs */
priv->rx_skb = kzalloc(sizeof(struct sk_buff *) * priv->rx_ring_size,
GFP_KERNEL);
if (!priv->rx_skb) {
dev_err(kdev, "cannot allocate rx skb queue\n");
ret = -ENOMEM;
goto out_free_tx_skb;
}
priv->rx_desc_count = 0;
priv->rx_dirty_desc = 0;
priv->rx_curr_desc = 0;
/* initialize flow control buffer allocation */
enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0,
ENETDMA_BUFALLOC_REG(priv->rx_chan));
if (bcm_enet_refill_rx(dev)) {
dev_err(kdev, "cannot allocate rx skb queue\n");
ret = -ENOMEM;
goto out;
}
/* write rx & tx ring addresses */
enet_dma_writel(priv, priv->rx_desc_dma,
ENETDMA_RSTART_REG(priv->rx_chan));
enet_dma_writel(priv, priv->tx_desc_dma,
ENETDMA_RSTART_REG(priv->tx_chan));
/* clear remaining state ram for rx & tx channel */
enet_dma_writel(priv, 0, ENETDMA_SRAM2_REG(priv->rx_chan));
enet_dma_writel(priv, 0, ENETDMA_SRAM2_REG(priv->tx_chan));
enet_dma_writel(priv, 0, ENETDMA_SRAM3_REG(priv->rx_chan));
enet_dma_writel(priv, 0, ENETDMA_SRAM3_REG(priv->tx_chan));
enet_dma_writel(priv, 0, ENETDMA_SRAM4_REG(priv->rx_chan));
enet_dma_writel(priv, 0, ENETDMA_SRAM4_REG(priv->tx_chan));
/* set max rx/tx length */
enet_writel(priv, priv->hw_mtu, ENET_RXMAXLEN_REG);
enet_writel(priv, priv->hw_mtu, ENET_TXMAXLEN_REG);
/* set dma maximum burst len */
enet_dma_writel(priv, BCMENET_DMA_MAXBURST,
ENETDMA_MAXBURST_REG(priv->rx_chan));
enet_dma_writel(priv, BCMENET_DMA_MAXBURST,
ENETDMA_MAXBURST_REG(priv->tx_chan));
/* set correct transmit fifo watermark */
enet_writel(priv, BCMENET_TX_FIFO_TRESH, ENET_TXWMARK_REG);
/* set flow control low/high threshold to 1/3 / 2/3 */
val = priv->rx_ring_size / 3;
enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan));
val = (priv->rx_ring_size * 2) / 3;
enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan));
/* all set, enable mac and interrupts, start dma engine and
* kick rx dma channel */
wmb();
val = enet_readl(priv, ENET_CTL_REG);
val |= ENET_CTL_ENABLE_MASK;
enet_writel(priv, val, ENET_CTL_REG);
enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK,
ENETDMA_CHANCFG_REG(priv->rx_chan));
/* watch "mib counters about to overflow" interrupt */
enet_writel(priv, ENET_IR_MIB, ENET_IR_REG);
enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG);
/* watch "packet transferred" interrupt in rx and tx */
enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
ENETDMA_IR_REG(priv->rx_chan));
enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
ENETDMA_IR_REG(priv->tx_chan));
/* make sure we enable napi before rx interrupt */
napi_enable(&priv->napi);
enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
ENETDMA_IRMASK_REG(priv->rx_chan));
enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
ENETDMA_IRMASK_REG(priv->tx_chan));
if (priv->has_phy)
phy_start(priv->phydev);
else
bcm_enet_adjust_link(dev);
netif_start_queue(dev);
return 0;
out:
for (i = 0; i < priv->rx_ring_size; i++) {
struct bcm_enet_desc *desc;
if (!priv->rx_skb[i])
continue;
desc = &priv->rx_desc_cpu[i];
dma_unmap_single(kdev, desc->address, priv->rx_skb_size,
DMA_FROM_DEVICE);
kfree_skb(priv->rx_skb[i]);
}
kfree(priv->rx_skb);
out_free_tx_skb:
kfree(priv->tx_skb);
out_free_tx_ring:
dma_free_coherent(kdev, priv->tx_desc_alloc_size,
priv->tx_desc_cpu, priv->tx_desc_dma);
out_free_rx_ring:
dma_free_coherent(kdev, priv->rx_desc_alloc_size,
priv->rx_desc_cpu, priv->rx_desc_dma);
out_freeirq_tx:
free_irq(priv->irq_tx, dev);
out_freeirq_rx:
free_irq(priv->irq_rx, dev);
out_freeirq:
free_irq(dev->irq, dev);
out_phy_disconnect:
phy_disconnect(priv->phydev);
return ret;
}
/*
* disable mac
*/
static void bcm_enet_disable_mac(struct bcm_enet_priv *priv)
{
int limit;
u32 val;
val = enet_readl(priv, ENET_CTL_REG);
val |= ENET_CTL_DISABLE_MASK;
enet_writel(priv, val, ENET_CTL_REG);
limit = 1000;
do {
u32 val;
val = enet_readl(priv, ENET_CTL_REG);
if (!(val & ENET_CTL_DISABLE_MASK))
break;
udelay(1);
} while (limit--);
}
/*
* disable dma in given channel
*/
static void bcm_enet_disable_dma(struct bcm_enet_priv *priv, int chan)
{
int limit;
enet_dma_writel(priv, 0, ENETDMA_CHANCFG_REG(chan));
limit = 1000;
do {
u32 val;
val = enet_dma_readl(priv, ENETDMA_CHANCFG_REG(chan));
if (!(val & ENETDMA_CHANCFG_EN_MASK))
break;
udelay(1);
} while (limit--);
}
/*
* stop callback
*/
static int bcm_enet_stop(struct net_device *dev)
{
struct bcm_enet_priv *priv;
struct device *kdev;
int i;
priv = netdev_priv(dev);
kdev = &priv->pdev->dev;
netif_stop_queue(dev);
napi_disable(&priv->napi);
if (priv->has_phy)
phy_stop(priv->phydev);
del_timer_sync(&priv->rx_timeout);
/* mask all interrupts */
enet_writel(priv, 0, ENET_IRMASK_REG);
enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->rx_chan));
enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan));
/* make sure no mib update is scheduled */
cancel_work_sync(&priv->mib_update_task);
/* disable dma & mac */
bcm_enet_disable_dma(priv, priv->tx_chan);
bcm_enet_disable_dma(priv, priv->rx_chan);
bcm_enet_disable_mac(priv);
/* force reclaim of all tx buffers */
bcm_enet_tx_reclaim(dev, 1);
/* free the rx skb ring */
for (i = 0; i < priv->rx_ring_size; i++) {
struct bcm_enet_desc *desc;
if (!priv->rx_skb[i])
continue;
desc = &priv->rx_desc_cpu[i];
dma_unmap_single(kdev, desc->address, priv->rx_skb_size,
DMA_FROM_DEVICE);
kfree_skb(priv->rx_skb[i]);
}
/* free remaining allocated memory */
kfree(priv->rx_skb);
kfree(priv->tx_skb);
dma_free_coherent(kdev, priv->rx_desc_alloc_size,
priv->rx_desc_cpu, priv->rx_desc_dma);
dma_free_coherent(kdev, priv->tx_desc_alloc_size,
priv->tx_desc_cpu, priv->tx_desc_dma);
free_irq(priv->irq_tx, dev);
free_irq(priv->irq_rx, dev);
free_irq(dev->irq, dev);
/* release phy */
if (priv->has_phy) {
phy_disconnect(priv->phydev);
priv->phydev = NULL;
}
return 0;
}
/*
* ethtool callbacks
*/
struct bcm_enet_stats {
char stat_string[ETH_GSTRING_LEN];
int sizeof_stat;
int stat_offset;
int mib_reg;
};
#define GEN_STAT(m) sizeof(((struct bcm_enet_priv *)0)->m), \
offsetof(struct bcm_enet_priv, m)
#define DEV_STAT(m) sizeof(((struct net_device_stats *)0)->m), \
offsetof(struct net_device_stats, m)
static const struct bcm_enet_stats bcm_enet_gstrings_stats[] = {
{ "rx_packets", DEV_STAT(rx_packets), -1 },
{ "tx_packets", DEV_STAT(tx_packets), -1 },
{ "rx_bytes", DEV_STAT(rx_bytes), -1 },
{ "tx_bytes", DEV_STAT(tx_bytes), -1 },
{ "rx_errors", DEV_STAT(rx_errors), -1 },
{ "tx_errors", DEV_STAT(tx_errors), -1 },
{ "rx_dropped", DEV_STAT(rx_dropped), -1 },
{ "tx_dropped", DEV_STAT(tx_dropped), -1 },
{ "rx_good_octets", GEN_STAT(mib.rx_gd_octets), ETH_MIB_RX_GD_OCTETS},
{ "rx_good_pkts", GEN_STAT(mib.rx_gd_pkts), ETH_MIB_RX_GD_PKTS },
{ "rx_broadcast", GEN_STAT(mib.rx_brdcast), ETH_MIB_RX_BRDCAST },
{ "rx_multicast", GEN_STAT(mib.rx_mult), ETH_MIB_RX_MULT },
{ "rx_64_octets", GEN_STAT(mib.rx_64), ETH_MIB_RX_64 },
{ "rx_65_127_oct", GEN_STAT(mib.rx_65_127), ETH_MIB_RX_65_127 },
{ "rx_128_255_oct", GEN_STAT(mib.rx_128_255), ETH_MIB_RX_128_255 },
{ "rx_256_511_oct", GEN_STAT(mib.rx_256_511), ETH_MIB_RX_256_511 },
{ "rx_512_1023_oct", GEN_STAT(mib.rx_512_1023), ETH_MIB_RX_512_1023 },
{ "rx_1024_max_oct", GEN_STAT(mib.rx_1024_max), ETH_MIB_RX_1024_MAX },
{ "rx_jabber", GEN_STAT(mib.rx_jab), ETH_MIB_RX_JAB },
{ "rx_oversize", GEN_STAT(mib.rx_ovr), ETH_MIB_RX_OVR },
{ "rx_fragment", GEN_STAT(mib.rx_frag), ETH_MIB_RX_FRAG },
{ "rx_dropped", GEN_STAT(mib.rx_drop), ETH_MIB_RX_DROP },
{ "rx_crc_align", GEN_STAT(mib.rx_crc_align), ETH_MIB_RX_CRC_ALIGN },
{ "rx_undersize", GEN_STAT(mib.rx_und), ETH_MIB_RX_UND },
{ "rx_crc", GEN_STAT(mib.rx_crc), ETH_MIB_RX_CRC },
{ "rx_align", GEN_STAT(mib.rx_align), ETH_MIB_RX_ALIGN },
{ "rx_symbol_error", GEN_STAT(mib.rx_sym), ETH_MIB_RX_SYM },
{ "rx_pause", GEN_STAT(mib.rx_pause), ETH_MIB_RX_PAUSE },
{ "rx_control", GEN_STAT(mib.rx_cntrl), ETH_MIB_RX_CNTRL },
{ "tx_good_octets", GEN_STAT(mib.tx_gd_octets), ETH_MIB_TX_GD_OCTETS },
{ "tx_good_pkts", GEN_STAT(mib.tx_gd_pkts), ETH_MIB_TX_GD_PKTS },
{ "tx_broadcast", GEN_STAT(mib.tx_brdcast), ETH_MIB_TX_BRDCAST },
{ "tx_multicast", GEN_STAT(mib.tx_mult), ETH_MIB_TX_MULT },
{ "tx_64_oct", GEN_STAT(mib.tx_64), ETH_MIB_TX_64 },
{ "tx_65_127_oct", GEN_STAT(mib.tx_65_127), ETH_MIB_TX_65_127 },
{ "tx_128_255_oct", GEN_STAT(mib.tx_128_255), ETH_MIB_TX_128_255 },
{ "tx_256_511_oct", GEN_STAT(mib.tx_256_511), ETH_MIB_TX_256_511 },
{ "tx_512_1023_oct", GEN_STAT(mib.tx_512_1023), ETH_MIB_TX_512_1023},
{ "tx_1024_max_oct", GEN_STAT(mib.tx_1024_max), ETH_MIB_TX_1024_MAX },
{ "tx_jabber", GEN_STAT(mib.tx_jab), ETH_MIB_TX_JAB },
{ "tx_oversize", GEN_STAT(mib.tx_ovr), ETH_MIB_TX_OVR },
{ "tx_fragment", GEN_STAT(mib.tx_frag), ETH_MIB_TX_FRAG },
{ "tx_underrun", GEN_STAT(mib.tx_underrun), ETH_MIB_TX_UNDERRUN },
{ "tx_collisions", GEN_STAT(mib.tx_col), ETH_MIB_TX_COL },
{ "tx_single_collision", GEN_STAT(mib.tx_1_col), ETH_MIB_TX_1_COL },
{ "tx_multiple_collision", GEN_STAT(mib.tx_m_col), ETH_MIB_TX_M_COL },
{ "tx_excess_collision", GEN_STAT(mib.tx_ex_col), ETH_MIB_TX_EX_COL },
{ "tx_late_collision", GEN_STAT(mib.tx_late), ETH_MIB_TX_LATE },
{ "tx_deferred", GEN_STAT(mib.tx_def), ETH_MIB_TX_DEF },
{ "tx_carrier_sense", GEN_STAT(mib.tx_crs), ETH_MIB_TX_CRS },
{ "tx_pause", GEN_STAT(mib.tx_pause), ETH_MIB_TX_PAUSE },
};
#define BCM_ENET_STATS_LEN \
(sizeof(bcm_enet_gstrings_stats) / sizeof(struct bcm_enet_stats))
static const u32 unused_mib_regs[] = {
ETH_MIB_TX_ALL_OCTETS,
ETH_MIB_TX_ALL_PKTS,
ETH_MIB_RX_ALL_OCTETS,
ETH_MIB_RX_ALL_PKTS,
};
static void bcm_enet_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
strncpy(drvinfo->driver, bcm_enet_driver_name, 32);
strncpy(drvinfo->version, bcm_enet_driver_version, 32);
strncpy(drvinfo->fw_version, "N/A", 32);
strncpy(drvinfo->bus_info, "bcm63xx", 32);
drvinfo->n_stats = BCM_ENET_STATS_LEN;
}
static int bcm_enet_get_sset_count(struct net_device *netdev,
int string_set)
{
switch (string_set) {
case ETH_SS_STATS:
return BCM_ENET_STATS_LEN;
default:
return -EINVAL;
}
}
static void bcm_enet_get_strings(struct net_device *netdev,
u32 stringset, u8 *data)
{
int i;
switch (stringset) {
case ETH_SS_STATS:
for (i = 0; i < BCM_ENET_STATS_LEN; i++) {
memcpy(data + i * ETH_GSTRING_LEN,
bcm_enet_gstrings_stats[i].stat_string,
ETH_GSTRING_LEN);
}
break;
}
}
static void update_mib_counters(struct bcm_enet_priv *priv)
{
int i;
for (i = 0; i < BCM_ENET_STATS_LEN; i++) {
const struct bcm_enet_stats *s;
u32 val;
char *p;
s = &bcm_enet_gstrings_stats[i];
if (s->mib_reg == -1)
continue;
val = enet_readl(priv, ENET_MIB_REG(s->mib_reg));
p = (char *)priv + s->stat_offset;
if (s->sizeof_stat == sizeof(u64))
*(u64 *)p += val;
else
*(u32 *)p += val;
}
/* also empty unused mib counters to make sure mib counter
* overflow interrupt is cleared */
for (i = 0; i < ARRAY_SIZE(unused_mib_regs); i++)
(void)enet_readl(priv, ENET_MIB_REG(unused_mib_regs[i]));
}
static void bcm_enet_update_mib_counters_defer(struct work_struct *t)
{
struct bcm_enet_priv *priv;
priv = container_of(t, struct bcm_enet_priv, mib_update_task);
mutex_lock(&priv->mib_update_lock);
update_mib_counters(priv);
mutex_unlock(&priv->mib_update_lock);
/* reenable mib interrupt */
if (netif_running(priv->net_dev))
enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG);
}
static void bcm_enet_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats,
u64 *data)
{
struct bcm_enet_priv *priv;
int i;
priv = netdev_priv(netdev);
mutex_lock(&priv->mib_update_lock);
update_mib_counters(priv);
for (i = 0; i < BCM_ENET_STATS_LEN; i++) {
const struct bcm_enet_stats *s;
char *p;
s = &bcm_enet_gstrings_stats[i];
if (s->mib_reg == -1)
p = (char *)&netdev->stats;
else
p = (char *)priv;
p += s->stat_offset;
data[i] = (s->sizeof_stat == sizeof(u64)) ?
*(u64 *)p : *(u32 *)p;
}
mutex_unlock(&priv->mib_update_lock);
}
static int bcm_enet_get_settings(struct net_device *dev,
struct ethtool_cmd *cmd)
{
struct bcm_enet_priv *priv;
priv = netdev_priv(dev);
cmd->maxrxpkt = 0;
cmd->maxtxpkt = 0;
if (priv->has_phy) {
if (!priv->phydev)
return -ENODEV;
return phy_ethtool_gset(priv->phydev, cmd);
} else {
cmd->autoneg = 0;
ethtool_cmd_speed_set(cmd, ((priv->force_speed_100)
? SPEED_100 : SPEED_10));
cmd->duplex = (priv->force_duplex_full) ?
DUPLEX_FULL : DUPLEX_HALF;
cmd->supported = ADVERTISED_10baseT_Half |
ADVERTISED_10baseT_Full |
ADVERTISED_100baseT_Half |
ADVERTISED_100baseT_Full;
cmd->advertising = 0;
cmd->port = PORT_MII;
cmd->transceiver = XCVR_EXTERNAL;
}
return 0;
}
static int bcm_enet_set_settings(struct net_device *dev,
struct ethtool_cmd *cmd)
{
struct bcm_enet_priv *priv;
priv = netdev_priv(dev);
if (priv->has_phy) {
if (!priv->phydev)
return -ENODEV;
return phy_ethtool_sset(priv->phydev, cmd);
} else {
if (cmd->autoneg ||
(cmd->speed != SPEED_100 && cmd->speed != SPEED_10) ||
cmd->port != PORT_MII)
return -EINVAL;
priv->force_speed_100 = (cmd->speed == SPEED_100) ? 1 : 0;
priv->force_duplex_full = (cmd->duplex == DUPLEX_FULL) ? 1 : 0;
if (netif_running(dev))
bcm_enet_adjust_link(dev);
return 0;
}
}
static void bcm_enet_get_ringparam(struct net_device *dev,
struct ethtool_ringparam *ering)
{
struct bcm_enet_priv *priv;
priv = netdev_priv(dev);
/* rx/tx ring is actually only limited by memory */
ering->rx_max_pending = 8192;
ering->tx_max_pending = 8192;
ering->rx_pending = priv->rx_ring_size;
ering->tx_pending = priv->tx_ring_size;
}
static int bcm_enet_set_ringparam(struct net_device *dev,
struct ethtool_ringparam *ering)
{
struct bcm_enet_priv *priv;
int was_running;
priv = netdev_priv(dev);
was_running = 0;
if (netif_running(dev)) {
bcm_enet_stop(dev);
was_running = 1;
}
priv->rx_ring_size = ering->rx_pending;
priv->tx_ring_size = ering->tx_pending;
if (was_running) {
int err;
err = bcm_enet_open(dev);
if (err)
dev_close(dev);
else
bcm_enet_set_multicast_list(dev);
}
return 0;
}
static void bcm_enet_get_pauseparam(struct net_device *dev,
struct ethtool_pauseparam *ecmd)
{
struct bcm_enet_priv *priv;
priv = netdev_priv(dev);
ecmd->autoneg = priv->pause_auto;
ecmd->rx_pause = priv->pause_rx;
ecmd->tx_pause = priv->pause_tx;
}
static int bcm_enet_set_pauseparam(struct net_device *dev,
struct ethtool_pauseparam *ecmd)
{
struct bcm_enet_priv *priv;
priv = netdev_priv(dev);
if (priv->has_phy) {
if (ecmd->autoneg && (ecmd->rx_pause != ecmd->tx_pause)) {
/* asymetric pause mode not supported,
* actually possible but integrated PHY has RO
* asym_pause bit */
return -EINVAL;
}
} else {
/* no pause autoneg on direct mii connection */
if (ecmd->autoneg)
return -EINVAL;
}
priv->pause_auto = ecmd->autoneg;
priv->pause_rx = ecmd->rx_pause;
priv->pause_tx = ecmd->tx_pause;
return 0;
}
static const struct ethtool_ops bcm_enet_ethtool_ops = {
.get_strings = bcm_enet_get_strings,
.get_sset_count = bcm_enet_get_sset_count,
.get_ethtool_stats = bcm_enet_get_ethtool_stats,
.get_settings = bcm_enet_get_settings,
.set_settings = bcm_enet_set_settings,
.get_drvinfo = bcm_enet_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_ringparam = bcm_enet_get_ringparam,
.set_ringparam = bcm_enet_set_ringparam,
.get_pauseparam = bcm_enet_get_pauseparam,
.set_pauseparam = bcm_enet_set_pauseparam,
};
static int bcm_enet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct bcm_enet_priv *priv;
priv = netdev_priv(dev);
if (priv->has_phy) {
if (!priv->phydev)
return -ENODEV;
return phy_mii_ioctl(priv->phydev, rq, cmd);
} else {
struct mii_if_info mii;
mii.dev = dev;
mii.mdio_read = bcm_enet_mdio_read_mii;
mii.mdio_write = bcm_enet_mdio_write_mii;
mii.phy_id = 0;
mii.phy_id_mask = 0x3f;
mii.reg_num_mask = 0x1f;
return generic_mii_ioctl(&mii, if_mii(rq), cmd, NULL);
}
}
/*
* calculate actual hardware mtu
*/
static int compute_hw_mtu(struct bcm_enet_priv *priv, int mtu)
{
int actual_mtu;
actual_mtu = mtu;
/* add ethernet header + vlan tag size */
actual_mtu += VLAN_ETH_HLEN;
if (actual_mtu < 64 || actual_mtu > BCMENET_MAX_MTU)
return -EINVAL;
/*
* setup maximum size before we get overflow mark in
* descriptor, note that this will not prevent reception of
* big frames, they will be split into multiple buffers
* anyway
*/
priv->hw_mtu = actual_mtu;
/*
* align rx buffer size to dma burst len, account FCS since
* it's appended
*/
priv->rx_skb_size = ALIGN(actual_mtu + ETH_FCS_LEN,
BCMENET_DMA_MAXBURST * 4);
return 0;
}
/*
* adjust mtu, can't be called while device is running
*/
static int bcm_enet_change_mtu(struct net_device *dev, int new_mtu)
{
int ret;
if (netif_running(dev))
return -EBUSY;
ret = compute_hw_mtu(netdev_priv(dev), new_mtu);
if (ret)
return ret;
dev->mtu = new_mtu;
return 0;
}
/*
* preinit hardware to allow mii operation while device is down
*/
static void bcm_enet_hw_preinit(struct bcm_enet_priv *priv)
{
u32 val;
int limit;
/* make sure mac is disabled */
bcm_enet_disable_mac(priv);
/* soft reset mac */
val = ENET_CTL_SRESET_MASK;
enet_writel(priv, val, ENET_CTL_REG);
wmb();
limit = 1000;
do {
val = enet_readl(priv, ENET_CTL_REG);
if (!(val & ENET_CTL_SRESET_MASK))
break;
udelay(1);
} while (limit--);
/* select correct mii interface */
val = enet_readl(priv, ENET_CTL_REG);
if (priv->use_external_mii)
val |= ENET_CTL_EPHYSEL_MASK;
else
val &= ~ENET_CTL_EPHYSEL_MASK;
enet_writel(priv, val, ENET_CTL_REG);
/* turn on mdc clock */
enet_writel(priv, (0x1f << ENET_MIISC_MDCFREQDIV_SHIFT) |
ENET_MIISC_PREAMBLEEN_MASK, ENET_MIISC_REG);
/* set mib counters to self-clear when read */
val = enet_readl(priv, ENET_MIBCTL_REG);
val |= ENET_MIBCTL_RDCLEAR_MASK;
enet_writel(priv, val, ENET_MIBCTL_REG);
}
static const struct net_device_ops bcm_enet_ops = {
.ndo_open = bcm_enet_open,
.ndo_stop = bcm_enet_stop,
.ndo_start_xmit = bcm_enet_start_xmit,
.ndo_set_mac_address = bcm_enet_set_mac_address,
.ndo_set_rx_mode = bcm_enet_set_multicast_list,
.ndo_do_ioctl = bcm_enet_ioctl,
.ndo_change_mtu = bcm_enet_change_mtu,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = bcm_enet_netpoll,
#endif
};
/*
* allocate netdevice, request register memory and register device.
*/
static int __devinit bcm_enet_probe(struct platform_device *pdev)
{
struct bcm_enet_priv *priv;
struct net_device *dev;
struct bcm63xx_enet_platform_data *pd;
struct resource *res_mem, *res_irq, *res_irq_rx, *res_irq_tx;
struct mii_bus *bus;
const char *clk_name;
unsigned int iomem_size;
int i, ret;
/* stop if shared driver failed, assume driver->probe will be
* called in the same order we register devices (correct ?) */
if (!bcm_enet_shared_base)
return -ENODEV;
res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
res_irq_rx = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
res_irq_tx = platform_get_resource(pdev, IORESOURCE_IRQ, 2);
if (!res_mem || !res_irq || !res_irq_rx || !res_irq_tx)
return -ENODEV;
ret = 0;
dev = alloc_etherdev(sizeof(*priv));
if (!dev)
return -ENOMEM;
priv = netdev_priv(dev);
ret = compute_hw_mtu(priv, dev->mtu);
if (ret)
goto out;
iomem_size = resource_size(res_mem);
if (!request_mem_region(res_mem->start, iomem_size, "bcm63xx_enet")) {
ret = -EBUSY;
goto out;
}
priv->base = ioremap(res_mem->start, iomem_size);
if (priv->base == NULL) {
ret = -ENOMEM;
goto out_release_mem;
}
dev->irq = priv->irq = res_irq->start;
priv->irq_rx = res_irq_rx->start;
priv->irq_tx = res_irq_tx->start;
priv->mac_id = pdev->id;
/* get rx & tx dma channel id for this mac */
if (priv->mac_id == 0) {
priv->rx_chan = 0;
priv->tx_chan = 1;
clk_name = "enet0";
} else {
priv->rx_chan = 2;
priv->tx_chan = 3;
clk_name = "enet1";
}
priv->mac_clk = clk_get(&pdev->dev, clk_name);
if (IS_ERR(priv->mac_clk)) {
ret = PTR_ERR(priv->mac_clk);
goto out_unmap;
}
clk_enable(priv->mac_clk);
/* initialize default and fetch platform data */
priv->rx_ring_size = BCMENET_DEF_RX_DESC;
priv->tx_ring_size = BCMENET_DEF_TX_DESC;
pd = pdev->dev.platform_data;
if (pd) {
memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN);
priv->has_phy = pd->has_phy;
priv->phy_id = pd->phy_id;
priv->has_phy_interrupt = pd->has_phy_interrupt;
priv->phy_interrupt = pd->phy_interrupt;
priv->use_external_mii = !pd->use_internal_phy;
priv->pause_auto = pd->pause_auto;
priv->pause_rx = pd->pause_rx;
priv->pause_tx = pd->pause_tx;
priv->force_duplex_full = pd->force_duplex_full;
priv->force_speed_100 = pd->force_speed_100;
}
if (priv->mac_id == 0 && priv->has_phy && !priv->use_external_mii) {
/* using internal PHY, enable clock */
priv->phy_clk = clk_get(&pdev->dev, "ephy");
if (IS_ERR(priv->phy_clk)) {
ret = PTR_ERR(priv->phy_clk);
priv->phy_clk = NULL;
goto out_put_clk_mac;
}
clk_enable(priv->phy_clk);
}
/* do minimal hardware init to be able to probe mii bus */
bcm_enet_hw_preinit(priv);
/* MII bus registration */
if (priv->has_phy) {
priv->mii_bus = mdiobus_alloc();
if (!priv->mii_bus) {
ret = -ENOMEM;
goto out_uninit_hw;
}
bus = priv->mii_bus;
bus->name = "bcm63xx_enet MII bus";
bus->parent = &pdev->dev;
bus->priv = priv;
bus->read = bcm_enet_mdio_read_phylib;
bus->write = bcm_enet_mdio_write_phylib;
sprintf(bus->id, "%s-%d", pdev->name, priv->mac_id);
/* only probe bus where we think the PHY is, because
* the mdio read operation return 0 instead of 0xffff
* if a slave is not present on hw */
bus->phy_mask = ~(1 << priv->phy_id);
bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
if (!bus->irq) {
ret = -ENOMEM;
goto out_free_mdio;
}
if (priv->has_phy_interrupt)
bus->irq[priv->phy_id] = priv->phy_interrupt;
else
bus->irq[priv->phy_id] = PHY_POLL;
ret = mdiobus_register(bus);
if (ret) {
dev_err(&pdev->dev, "unable to register mdio bus\n");
goto out_free_mdio;
}
} else {
/* run platform code to initialize PHY device */
if (pd->mii_config &&
pd->mii_config(dev, 1, bcm_enet_mdio_read_mii,
bcm_enet_mdio_write_mii)) {
dev_err(&pdev->dev, "unable to configure mdio bus\n");
goto out_uninit_hw;
}
}
spin_lock_init(&priv->rx_lock);
/* init rx timeout (used for oom) */
init_timer(&priv->rx_timeout);
priv->rx_timeout.function = bcm_enet_refill_rx_timer;
priv->rx_timeout.data = (unsigned long)dev;
/* init the mib update lock&work */
mutex_init(&priv->mib_update_lock);
INIT_WORK(&priv->mib_update_task, bcm_enet_update_mib_counters_defer);
/* zero mib counters */
for (i = 0; i < ENET_MIB_REG_COUNT; i++)
enet_writel(priv, 0, ENET_MIB_REG(i));
/* register netdevice */
dev->netdev_ops = &bcm_enet_ops;
netif_napi_add(dev, &priv->napi, bcm_enet_poll, 16);
SET_ETHTOOL_OPS(dev, &bcm_enet_ethtool_ops);
SET_NETDEV_DEV(dev, &pdev->dev);
ret = register_netdev(dev);
if (ret)
goto out_unregister_mdio;
netif_carrier_off(dev);
platform_set_drvdata(pdev, dev);
priv->pdev = pdev;
priv->net_dev = dev;
return 0;
out_unregister_mdio:
if (priv->mii_bus) {
mdiobus_unregister(priv->mii_bus);
kfree(priv->mii_bus->irq);
}
out_free_mdio:
if (priv->mii_bus)
mdiobus_free(priv->mii_bus);
out_uninit_hw:
/* turn off mdc clock */
enet_writel(priv, 0, ENET_MIISC_REG);
if (priv->phy_clk) {
clk_disable(priv->phy_clk);
clk_put(priv->phy_clk);
}
out_put_clk_mac:
clk_disable(priv->mac_clk);
clk_put(priv->mac_clk);
out_unmap:
iounmap(priv->base);
out_release_mem:
release_mem_region(res_mem->start, iomem_size);
out:
free_netdev(dev);
return ret;
}
/*
* exit func, stops hardware and unregisters netdevice
*/
static int __devexit bcm_enet_remove(struct platform_device *pdev)
{
struct bcm_enet_priv *priv;
struct net_device *dev;
struct resource *res;
/* stop netdevice */
dev = platform_get_drvdata(pdev);
priv = netdev_priv(dev);
unregister_netdev(dev);
/* turn off mdc clock */
enet_writel(priv, 0, ENET_MIISC_REG);
if (priv->has_phy) {
mdiobus_unregister(priv->mii_bus);
kfree(priv->mii_bus->irq);
mdiobus_free(priv->mii_bus);
} else {
struct bcm63xx_enet_platform_data *pd;
pd = pdev->dev.platform_data;
if (pd && pd->mii_config)
pd->mii_config(dev, 0, bcm_enet_mdio_read_mii,
bcm_enet_mdio_write_mii);
}
/* release device resources */
iounmap(priv->base);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(res->start, resource_size(res));
/* disable hw block clocks */
if (priv->phy_clk) {
clk_disable(priv->phy_clk);
clk_put(priv->phy_clk);
}
clk_disable(priv->mac_clk);
clk_put(priv->mac_clk);
platform_set_drvdata(pdev, NULL);
free_netdev(dev);
return 0;
}
struct platform_driver bcm63xx_enet_driver = {
.probe = bcm_enet_probe,
.remove = __devexit_p(bcm_enet_remove),
.driver = {
.name = "bcm63xx_enet",
.owner = THIS_MODULE,
},
};
/*
* reserve & remap memory space shared between all macs
*/
static int __devinit bcm_enet_shared_probe(struct platform_device *pdev)
{
struct resource *res;
unsigned int iomem_size;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -ENODEV;
iomem_size = resource_size(res);
if (!request_mem_region(res->start, iomem_size, "bcm63xx_enet_dma"))
return -EBUSY;
bcm_enet_shared_base = ioremap(res->start, iomem_size);
if (!bcm_enet_shared_base) {
release_mem_region(res->start, iomem_size);
return -ENOMEM;
}
return 0;
}
static int __devexit bcm_enet_shared_remove(struct platform_device *pdev)
{
struct resource *res;
iounmap(bcm_enet_shared_base);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(res->start, resource_size(res));
return 0;
}
/*
* this "shared" driver is needed because both macs share a single
* address space
*/
struct platform_driver bcm63xx_enet_shared_driver = {
.probe = bcm_enet_shared_probe,
.remove = __devexit_p(bcm_enet_shared_remove),
.driver = {
.name = "bcm63xx_enet_shared",
.owner = THIS_MODULE,
},
};
/*
* entry point
*/
static int __init bcm_enet_init(void)
{
int ret;
ret = platform_driver_register(&bcm63xx_enet_shared_driver);
if (ret)
return ret;
ret = platform_driver_register(&bcm63xx_enet_driver);
if (ret)
platform_driver_unregister(&bcm63xx_enet_shared_driver);
return ret;
}
static void __exit bcm_enet_exit(void)
{
platform_driver_unregister(&bcm63xx_enet_driver);
platform_driver_unregister(&bcm63xx_enet_shared_driver);
}
module_init(bcm_enet_init);
module_exit(bcm_enet_exit);
MODULE_DESCRIPTION("BCM63xx internal ethernet mac driver");
MODULE_AUTHOR("Maxime Bizon <mbizon@freebox.fr>");
MODULE_LICENSE("GPL");
| gpl-2.0 |
0mark/linux-sunxi | drivers/net/wireless/libertas_tf/main.c | 5035 | 19558 | /*
* Copyright (C) 2008, cozybit Inc.
* Copyright (C) 2003-2006, Marvell International Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or (at
* your option) any later version.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/hardirq.h>
#include <linux/slab.h>
#include <linux/etherdevice.h>
#include <linux/module.h>
#include "libertas_tf.h"
#define DRIVER_RELEASE_VERSION "004.p0"
/* thinfirm version: 5.132.X.pX */
#define LBTF_FW_VER_MIN 0x05840300
#define LBTF_FW_VER_MAX 0x0584ffff
#define QOS_CONTROL_LEN 2
/* Module parameters */
unsigned int lbtf_debug;
EXPORT_SYMBOL_GPL(lbtf_debug);
module_param_named(libertas_tf_debug, lbtf_debug, int, 0644);
static const char lbtf_driver_version[] = "THINFIRM-USB8388-" DRIVER_RELEASE_VERSION
#ifdef DEBUG
"-dbg"
#endif
"";
struct workqueue_struct *lbtf_wq;
static const struct ieee80211_channel lbtf_channels[] = {
{ .center_freq = 2412, .hw_value = 1 },
{ .center_freq = 2417, .hw_value = 2 },
{ .center_freq = 2422, .hw_value = 3 },
{ .center_freq = 2427, .hw_value = 4 },
{ .center_freq = 2432, .hw_value = 5 },
{ .center_freq = 2437, .hw_value = 6 },
{ .center_freq = 2442, .hw_value = 7 },
{ .center_freq = 2447, .hw_value = 8 },
{ .center_freq = 2452, .hw_value = 9 },
{ .center_freq = 2457, .hw_value = 10 },
{ .center_freq = 2462, .hw_value = 11 },
{ .center_freq = 2467, .hw_value = 12 },
{ .center_freq = 2472, .hw_value = 13 },
{ .center_freq = 2484, .hw_value = 14 },
};
/* This table contains the hardware specific values for the modulation rates. */
static const struct ieee80211_rate lbtf_rates[] = {
{ .bitrate = 10,
.hw_value = 0, },
{ .bitrate = 20,
.hw_value = 1,
.flags = IEEE80211_RATE_SHORT_PREAMBLE },
{ .bitrate = 55,
.hw_value = 2,
.flags = IEEE80211_RATE_SHORT_PREAMBLE },
{ .bitrate = 110,
.hw_value = 3,
.flags = IEEE80211_RATE_SHORT_PREAMBLE },
{ .bitrate = 60,
.hw_value = 5,
.flags = 0 },
{ .bitrate = 90,
.hw_value = 6,
.flags = 0 },
{ .bitrate = 120,
.hw_value = 7,
.flags = 0 },
{ .bitrate = 180,
.hw_value = 8,
.flags = 0 },
{ .bitrate = 240,
.hw_value = 9,
.flags = 0 },
{ .bitrate = 360,
.hw_value = 10,
.flags = 0 },
{ .bitrate = 480,
.hw_value = 11,
.flags = 0 },
{ .bitrate = 540,
.hw_value = 12,
.flags = 0 },
};
static void lbtf_cmd_work(struct work_struct *work)
{
struct lbtf_private *priv = container_of(work, struct lbtf_private,
cmd_work);
lbtf_deb_enter(LBTF_DEB_CMD);
spin_lock_irq(&priv->driver_lock);
/* command response? */
if (priv->cmd_response_rxed) {
priv->cmd_response_rxed = 0;
spin_unlock_irq(&priv->driver_lock);
lbtf_process_rx_command(priv);
spin_lock_irq(&priv->driver_lock);
}
if (priv->cmd_timed_out && priv->cur_cmd) {
struct cmd_ctrl_node *cmdnode = priv->cur_cmd;
if (++priv->nr_retries > 10) {
lbtf_complete_command(priv, cmdnode,
-ETIMEDOUT);
priv->nr_retries = 0;
} else {
priv->cur_cmd = NULL;
/* Stick it back at the _top_ of the pending
* queue for immediate resubmission */
list_add(&cmdnode->list, &priv->cmdpendingq);
}
}
priv->cmd_timed_out = 0;
spin_unlock_irq(&priv->driver_lock);
if (!priv->fw_ready) {
lbtf_deb_leave_args(LBTF_DEB_CMD, "fw not ready");
return;
}
/* Execute the next command */
if (!priv->cur_cmd)
lbtf_execute_next_command(priv);
lbtf_deb_leave(LBTF_DEB_CMD);
}
/**
* lbtf_setup_firmware: initialize firmware.
*
* @priv A pointer to struct lbtf_private structure
*
* Returns: 0 on success.
*/
static int lbtf_setup_firmware(struct lbtf_private *priv)
{
int ret = -1;
lbtf_deb_enter(LBTF_DEB_FW);
/*
* Read priv address from HW
*/
memset(priv->current_addr, 0xff, ETH_ALEN);
ret = lbtf_update_hw_spec(priv);
if (ret) {
ret = -1;
goto done;
}
lbtf_set_mac_control(priv);
lbtf_set_radio_control(priv);
ret = 0;
done:
lbtf_deb_leave_args(LBTF_DEB_FW, "ret: %d", ret);
return ret;
}
/**
* This function handles the timeout of command sending.
* It will re-send the same command again.
*/
static void command_timer_fn(unsigned long data)
{
struct lbtf_private *priv = (struct lbtf_private *)data;
unsigned long flags;
lbtf_deb_enter(LBTF_DEB_CMD);
spin_lock_irqsave(&priv->driver_lock, flags);
if (!priv->cur_cmd) {
printk(KERN_DEBUG "libertastf: command timer expired; "
"no pending command\n");
goto out;
}
printk(KERN_DEBUG "libertas: command %x timed out\n",
le16_to_cpu(priv->cur_cmd->cmdbuf->command));
priv->cmd_timed_out = 1;
queue_work(lbtf_wq, &priv->cmd_work);
out:
spin_unlock_irqrestore(&priv->driver_lock, flags);
lbtf_deb_leave(LBTF_DEB_CMD);
}
static int lbtf_init_adapter(struct lbtf_private *priv)
{
lbtf_deb_enter(LBTF_DEB_MAIN);
memset(priv->current_addr, 0xff, ETH_ALEN);
mutex_init(&priv->lock);
priv->vif = NULL;
setup_timer(&priv->command_timer, command_timer_fn,
(unsigned long)priv);
INIT_LIST_HEAD(&priv->cmdfreeq);
INIT_LIST_HEAD(&priv->cmdpendingq);
spin_lock_init(&priv->driver_lock);
/* Allocate the command buffers */
if (lbtf_allocate_cmd_buffer(priv))
return -1;
lbtf_deb_leave(LBTF_DEB_MAIN);
return 0;
}
static void lbtf_free_adapter(struct lbtf_private *priv)
{
lbtf_deb_enter(LBTF_DEB_MAIN);
lbtf_free_cmd_buffer(priv);
del_timer(&priv->command_timer);
lbtf_deb_leave(LBTF_DEB_MAIN);
}
static void lbtf_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
{
struct lbtf_private *priv = hw->priv;
priv->skb_to_tx = skb;
queue_work(lbtf_wq, &priv->tx_work);
/*
* queue will be restarted when we receive transmission feedback if
* there are no buffered multicast frames to send
*/
ieee80211_stop_queues(priv->hw);
}
static void lbtf_tx_work(struct work_struct *work)
{
struct lbtf_private *priv = container_of(work, struct lbtf_private,
tx_work);
unsigned int len;
struct ieee80211_tx_info *info;
struct txpd *txpd;
struct sk_buff *skb = NULL;
int err;
lbtf_deb_enter(LBTF_DEB_MACOPS | LBTF_DEB_TX);
if ((priv->vif->type == NL80211_IFTYPE_AP) &&
(!skb_queue_empty(&priv->bc_ps_buf)))
skb = skb_dequeue(&priv->bc_ps_buf);
else if (priv->skb_to_tx) {
skb = priv->skb_to_tx;
priv->skb_to_tx = NULL;
} else {
lbtf_deb_leave(LBTF_DEB_MACOPS | LBTF_DEB_TX);
return;
}
len = skb->len;
info = IEEE80211_SKB_CB(skb);
txpd = (struct txpd *) skb_push(skb, sizeof(struct txpd));
if (priv->surpriseremoved) {
dev_kfree_skb_any(skb);
lbtf_deb_leave(LBTF_DEB_MACOPS | LBTF_DEB_TX);
return;
}
memset(txpd, 0, sizeof(struct txpd));
/* Activate per-packet rate selection */
txpd->tx_control |= cpu_to_le32(MRVL_PER_PACKET_RATE |
ieee80211_get_tx_rate(priv->hw, info)->hw_value);
/* copy destination address from 802.11 header */
memcpy(txpd->tx_dest_addr_high, skb->data + sizeof(struct txpd) + 4,
ETH_ALEN);
txpd->tx_packet_length = cpu_to_le16(len);
txpd->tx_packet_location = cpu_to_le32(sizeof(struct txpd));
lbtf_deb_hex(LBTF_DEB_TX, "TX Data", skb->data, min_t(unsigned int, skb->len, 100));
BUG_ON(priv->tx_skb);
spin_lock_irq(&priv->driver_lock);
priv->tx_skb = skb;
err = priv->hw_host_to_card(priv, MVMS_DAT, skb->data, skb->len);
spin_unlock_irq(&priv->driver_lock);
if (err) {
dev_kfree_skb_any(skb);
priv->tx_skb = NULL;
pr_err("TX error: %d", err);
}
lbtf_deb_leave(LBTF_DEB_MACOPS | LBTF_DEB_TX);
}
static int lbtf_op_start(struct ieee80211_hw *hw)
{
struct lbtf_private *priv = hw->priv;
void *card = priv->card;
int ret = -1;
lbtf_deb_enter(LBTF_DEB_MACOPS);
if (!priv->fw_ready)
/* Upload firmware */
if (priv->hw_prog_firmware(card))
goto err_prog_firmware;
/* poke the firmware */
priv->capability = WLAN_CAPABILITY_SHORT_PREAMBLE;
priv->radioon = RADIO_ON;
priv->mac_control = CMD_ACT_MAC_RX_ON | CMD_ACT_MAC_TX_ON;
ret = lbtf_setup_firmware(priv);
if (ret)
goto err_prog_firmware;
if ((priv->fwrelease < LBTF_FW_VER_MIN) ||
(priv->fwrelease > LBTF_FW_VER_MAX)) {
ret = -1;
goto err_prog_firmware;
}
printk(KERN_INFO "libertastf: Marvell WLAN 802.11 thinfirm adapter\n");
lbtf_deb_leave(LBTF_DEB_MACOPS);
return 0;
err_prog_firmware:
priv->hw_reset_device(card);
lbtf_deb_leave_args(LBTF_DEB_MACOPS, "error programing fw; ret=%d", ret);
return ret;
}
static void lbtf_op_stop(struct ieee80211_hw *hw)
{
struct lbtf_private *priv = hw->priv;
unsigned long flags;
struct sk_buff *skb;
struct cmd_ctrl_node *cmdnode;
lbtf_deb_enter(LBTF_DEB_MACOPS);
/* Flush pending command nodes */
spin_lock_irqsave(&priv->driver_lock, flags);
list_for_each_entry(cmdnode, &priv->cmdpendingq, list) {
cmdnode->result = -ENOENT;
cmdnode->cmdwaitqwoken = 1;
wake_up_interruptible(&cmdnode->cmdwait_q);
}
spin_unlock_irqrestore(&priv->driver_lock, flags);
cancel_work_sync(&priv->cmd_work);
cancel_work_sync(&priv->tx_work);
while ((skb = skb_dequeue(&priv->bc_ps_buf)))
dev_kfree_skb_any(skb);
priv->radioon = RADIO_OFF;
lbtf_set_radio_control(priv);
lbtf_deb_leave(LBTF_DEB_MACOPS);
}
static int lbtf_op_add_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct lbtf_private *priv = hw->priv;
lbtf_deb_enter(LBTF_DEB_MACOPS);
if (priv->vif != NULL)
return -EOPNOTSUPP;
priv->vif = vif;
switch (vif->type) {
case NL80211_IFTYPE_MESH_POINT:
case NL80211_IFTYPE_AP:
lbtf_set_mode(priv, LBTF_AP_MODE);
break;
case NL80211_IFTYPE_STATION:
lbtf_set_mode(priv, LBTF_STA_MODE);
break;
default:
priv->vif = NULL;
return -EOPNOTSUPP;
}
lbtf_set_mac_address(priv, (u8 *) vif->addr);
lbtf_deb_leave(LBTF_DEB_MACOPS);
return 0;
}
static void lbtf_op_remove_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct lbtf_private *priv = hw->priv;
lbtf_deb_enter(LBTF_DEB_MACOPS);
if (priv->vif->type == NL80211_IFTYPE_AP ||
priv->vif->type == NL80211_IFTYPE_MESH_POINT)
lbtf_beacon_ctrl(priv, 0, 0);
lbtf_set_mode(priv, LBTF_PASSIVE_MODE);
lbtf_set_bssid(priv, 0, NULL);
priv->vif = NULL;
lbtf_deb_leave(LBTF_DEB_MACOPS);
}
static int lbtf_op_config(struct ieee80211_hw *hw, u32 changed)
{
struct lbtf_private *priv = hw->priv;
struct ieee80211_conf *conf = &hw->conf;
lbtf_deb_enter(LBTF_DEB_MACOPS);
if (conf->channel->center_freq != priv->cur_freq) {
priv->cur_freq = conf->channel->center_freq;
lbtf_set_channel(priv, conf->channel->hw_value);
}
lbtf_deb_leave(LBTF_DEB_MACOPS);
return 0;
}
static u64 lbtf_op_prepare_multicast(struct ieee80211_hw *hw,
struct netdev_hw_addr_list *mc_list)
{
struct lbtf_private *priv = hw->priv;
int i;
struct netdev_hw_addr *ha;
int mc_count = netdev_hw_addr_list_count(mc_list);
if (!mc_count || mc_count > MRVDRV_MAX_MULTICAST_LIST_SIZE)
return mc_count;
priv->nr_of_multicastmacaddr = mc_count;
i = 0;
netdev_hw_addr_list_for_each(ha, mc_list)
memcpy(&priv->multicastlist[i++], ha->addr, ETH_ALEN);
return mc_count;
}
#define SUPPORTED_FIF_FLAGS (FIF_PROMISC_IN_BSS | FIF_ALLMULTI)
static void lbtf_op_configure_filter(struct ieee80211_hw *hw,
unsigned int changed_flags,
unsigned int *new_flags,
u64 multicast)
{
struct lbtf_private *priv = hw->priv;
int old_mac_control = priv->mac_control;
lbtf_deb_enter(LBTF_DEB_MACOPS);
changed_flags &= SUPPORTED_FIF_FLAGS;
*new_flags &= SUPPORTED_FIF_FLAGS;
if (!changed_flags) {
lbtf_deb_leave(LBTF_DEB_MACOPS);
return;
}
if (*new_flags & (FIF_PROMISC_IN_BSS))
priv->mac_control |= CMD_ACT_MAC_PROMISCUOUS_ENABLE;
else
priv->mac_control &= ~CMD_ACT_MAC_PROMISCUOUS_ENABLE;
if (*new_flags & (FIF_ALLMULTI) ||
multicast > MRVDRV_MAX_MULTICAST_LIST_SIZE) {
priv->mac_control |= CMD_ACT_MAC_ALL_MULTICAST_ENABLE;
priv->mac_control &= ~CMD_ACT_MAC_MULTICAST_ENABLE;
} else if (multicast) {
priv->mac_control |= CMD_ACT_MAC_MULTICAST_ENABLE;
priv->mac_control &= ~CMD_ACT_MAC_ALL_MULTICAST_ENABLE;
lbtf_cmd_set_mac_multicast_addr(priv);
} else {
priv->mac_control &= ~(CMD_ACT_MAC_MULTICAST_ENABLE |
CMD_ACT_MAC_ALL_MULTICAST_ENABLE);
if (priv->nr_of_multicastmacaddr) {
priv->nr_of_multicastmacaddr = 0;
lbtf_cmd_set_mac_multicast_addr(priv);
}
}
if (priv->mac_control != old_mac_control)
lbtf_set_mac_control(priv);
lbtf_deb_leave(LBTF_DEB_MACOPS);
}
static void lbtf_op_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
u32 changes)
{
struct lbtf_private *priv = hw->priv;
struct sk_buff *beacon;
lbtf_deb_enter(LBTF_DEB_MACOPS);
if (changes & (BSS_CHANGED_BEACON | BSS_CHANGED_BEACON_INT)) {
switch (priv->vif->type) {
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_MESH_POINT:
beacon = ieee80211_beacon_get(hw, vif);
if (beacon) {
lbtf_beacon_set(priv, beacon);
kfree_skb(beacon);
lbtf_beacon_ctrl(priv, 1,
bss_conf->beacon_int);
}
break;
default:
break;
}
}
if (changes & BSS_CHANGED_BSSID) {
bool activate = !is_zero_ether_addr(bss_conf->bssid);
lbtf_set_bssid(priv, activate, bss_conf->bssid);
}
if (changes & BSS_CHANGED_ERP_PREAMBLE) {
if (bss_conf->use_short_preamble)
priv->preamble = CMD_TYPE_SHORT_PREAMBLE;
else
priv->preamble = CMD_TYPE_LONG_PREAMBLE;
lbtf_set_radio_control(priv);
}
lbtf_deb_leave(LBTF_DEB_MACOPS);
}
static int lbtf_op_get_survey(struct ieee80211_hw *hw, int idx,
struct survey_info *survey)
{
struct lbtf_private *priv = hw->priv;
struct ieee80211_conf *conf = &hw->conf;
if (idx != 0)
return -ENOENT;
survey->channel = conf->channel;
survey->filled = SURVEY_INFO_NOISE_DBM;
survey->noise = priv->noise;
return 0;
}
static const struct ieee80211_ops lbtf_ops = {
.tx = lbtf_op_tx,
.start = lbtf_op_start,
.stop = lbtf_op_stop,
.add_interface = lbtf_op_add_interface,
.remove_interface = lbtf_op_remove_interface,
.config = lbtf_op_config,
.prepare_multicast = lbtf_op_prepare_multicast,
.configure_filter = lbtf_op_configure_filter,
.bss_info_changed = lbtf_op_bss_info_changed,
.get_survey = lbtf_op_get_survey,
};
int lbtf_rx(struct lbtf_private *priv, struct sk_buff *skb)
{
struct ieee80211_rx_status stats;
struct rxpd *prxpd;
int need_padding;
unsigned int flags;
struct ieee80211_hdr *hdr;
lbtf_deb_enter(LBTF_DEB_RX);
prxpd = (struct rxpd *) skb->data;
memset(&stats, 0, sizeof(stats));
if (!(prxpd->status & cpu_to_le16(MRVDRV_RXPD_STATUS_OK)))
stats.flag |= RX_FLAG_FAILED_FCS_CRC;
stats.freq = priv->cur_freq;
stats.band = IEEE80211_BAND_2GHZ;
stats.signal = prxpd->snr;
priv->noise = prxpd->nf;
/* Marvell rate index has a hole at value 4 */
if (prxpd->rx_rate > 4)
--prxpd->rx_rate;
stats.rate_idx = prxpd->rx_rate;
skb_pull(skb, sizeof(struct rxpd));
hdr = (struct ieee80211_hdr *)skb->data;
flags = le32_to_cpu(*(__le32 *)(skb->data + 4));
need_padding = ieee80211_is_data_qos(hdr->frame_control);
need_padding ^= ieee80211_has_a4(hdr->frame_control);
need_padding ^= ieee80211_is_data_qos(hdr->frame_control) &&
(*ieee80211_get_qos_ctl(hdr) &
IEEE80211_QOS_CTL_A_MSDU_PRESENT);
if (need_padding) {
memmove(skb->data + 2, skb->data, skb->len);
skb_reserve(skb, 2);
}
memcpy(IEEE80211_SKB_RXCB(skb), &stats, sizeof(stats));
lbtf_deb_rx("rx data: skb->len-sizeof(RxPd) = %d-%zd = %zd\n",
skb->len, sizeof(struct rxpd), skb->len - sizeof(struct rxpd));
lbtf_deb_hex(LBTF_DEB_RX, "RX Data", skb->data,
min_t(unsigned int, skb->len, 100));
ieee80211_rx_irqsafe(priv->hw, skb);
lbtf_deb_leave(LBTF_DEB_RX);
return 0;
}
EXPORT_SYMBOL_GPL(lbtf_rx);
/**
* lbtf_add_card: Add and initialize the card, no fw upload yet.
*
* @card A pointer to card
*
* Returns: pointer to struct lbtf_priv.
*/
struct lbtf_private *lbtf_add_card(void *card, struct device *dmdev)
{
struct ieee80211_hw *hw;
struct lbtf_private *priv = NULL;
lbtf_deb_enter(LBTF_DEB_MAIN);
hw = ieee80211_alloc_hw(sizeof(struct lbtf_private), &lbtf_ops);
if (!hw)
goto done;
priv = hw->priv;
if (lbtf_init_adapter(priv))
goto err_init_adapter;
priv->hw = hw;
priv->card = card;
priv->tx_skb = NULL;
hw->queues = 1;
hw->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING;
hw->extra_tx_headroom = sizeof(struct txpd);
memcpy(priv->channels, lbtf_channels, sizeof(lbtf_channels));
memcpy(priv->rates, lbtf_rates, sizeof(lbtf_rates));
priv->band.n_bitrates = ARRAY_SIZE(lbtf_rates);
priv->band.bitrates = priv->rates;
priv->band.n_channels = ARRAY_SIZE(lbtf_channels);
priv->band.channels = priv->channels;
hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band;
hw->wiphy->interface_modes =
BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_ADHOC);
skb_queue_head_init(&priv->bc_ps_buf);
SET_IEEE80211_DEV(hw, dmdev);
INIT_WORK(&priv->cmd_work, lbtf_cmd_work);
INIT_WORK(&priv->tx_work, lbtf_tx_work);
if (ieee80211_register_hw(hw))
goto err_init_adapter;
goto done;
err_init_adapter:
lbtf_free_adapter(priv);
ieee80211_free_hw(hw);
priv = NULL;
done:
lbtf_deb_leave_args(LBTF_DEB_MAIN, "priv %p", priv);
return priv;
}
EXPORT_SYMBOL_GPL(lbtf_add_card);
int lbtf_remove_card(struct lbtf_private *priv)
{
struct ieee80211_hw *hw = priv->hw;
lbtf_deb_enter(LBTF_DEB_MAIN);
priv->surpriseremoved = 1;
del_timer(&priv->command_timer);
lbtf_free_adapter(priv);
priv->hw = NULL;
ieee80211_unregister_hw(hw);
ieee80211_free_hw(hw);
lbtf_deb_leave(LBTF_DEB_MAIN);
return 0;
}
EXPORT_SYMBOL_GPL(lbtf_remove_card);
void lbtf_send_tx_feedback(struct lbtf_private *priv, u8 retrycnt, u8 fail)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(priv->tx_skb);
ieee80211_tx_info_clear_status(info);
/*
* Commented out, otherwise we never go beyond 1Mbit/s using mac80211
* default pid rc algorithm.
*
* info->status.retry_count = MRVL_DEFAULT_RETRIES - retrycnt;
*/
if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) && !fail)
info->flags |= IEEE80211_TX_STAT_ACK;
skb_pull(priv->tx_skb, sizeof(struct txpd));
ieee80211_tx_status_irqsafe(priv->hw, priv->tx_skb);
priv->tx_skb = NULL;
if (!priv->skb_to_tx && skb_queue_empty(&priv->bc_ps_buf))
ieee80211_wake_queues(priv->hw);
else
queue_work(lbtf_wq, &priv->tx_work);
}
EXPORT_SYMBOL_GPL(lbtf_send_tx_feedback);
void lbtf_bcn_sent(struct lbtf_private *priv)
{
struct sk_buff *skb = NULL;
if (priv->vif->type != NL80211_IFTYPE_AP)
return;
if (skb_queue_empty(&priv->bc_ps_buf)) {
bool tx_buff_bc = false;
while ((skb = ieee80211_get_buffered_bc(priv->hw, priv->vif))) {
skb_queue_tail(&priv->bc_ps_buf, skb);
tx_buff_bc = true;
}
if (tx_buff_bc) {
ieee80211_stop_queues(priv->hw);
queue_work(lbtf_wq, &priv->tx_work);
}
}
skb = ieee80211_beacon_get(priv->hw, priv->vif);
if (skb) {
lbtf_beacon_set(priv, skb);
kfree_skb(skb);
}
}
EXPORT_SYMBOL_GPL(lbtf_bcn_sent);
static int __init lbtf_init_module(void)
{
lbtf_deb_enter(LBTF_DEB_MAIN);
lbtf_wq = create_workqueue("libertastf");
if (lbtf_wq == NULL) {
printk(KERN_ERR "libertastf: couldn't create workqueue\n");
return -ENOMEM;
}
lbtf_deb_leave(LBTF_DEB_MAIN);
return 0;
}
static void __exit lbtf_exit_module(void)
{
lbtf_deb_enter(LBTF_DEB_MAIN);
destroy_workqueue(lbtf_wq);
lbtf_deb_leave(LBTF_DEB_MAIN);
}
module_init(lbtf_init_module);
module_exit(lbtf_exit_module);
MODULE_DESCRIPTION("Libertas WLAN Thinfirm Driver Library");
MODULE_AUTHOR("Cozybit Inc.");
MODULE_LICENSE("GPL");
| gpl-2.0 |
willowgarage/pr2-kernel | arch/arm/plat-pxa/dma.c | 7339 | 10008 | /*
* linux/arch/arm/plat-pxa/dma.c
*
* PXA DMA registration and IRQ dispatching
*
* Author: Nicolas Pitre
* Created: Nov 15, 2001
* Copyright: MontaVista Software Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/errno.h>
#include <linux/dma-mapping.h>
#include <asm/irq.h>
#include <asm/memory.h>
#include <mach/hardware.h>
#include <mach/dma.h>
#define DMA_DEBUG_NAME "pxa_dma"
#define DMA_MAX_REQUESTERS 64
struct dma_channel {
char *name;
pxa_dma_prio prio;
void (*irq_handler)(int, void *);
void *data;
spinlock_t lock;
};
static struct dma_channel *dma_channels;
static int num_dma_channels;
/*
* Debug fs
*/
#ifdef CONFIG_DEBUG_FS
#include <linux/debugfs.h>
#include <linux/uaccess.h>
#include <linux/seq_file.h>
static struct dentry *dbgfs_root, *dbgfs_state, **dbgfs_chan;
static int dbg_show_requester_chan(struct seq_file *s, void *p)
{
int pos = 0;
int chan = (int)s->private;
int i;
u32 drcmr;
pos += seq_printf(s, "DMA channel %d requesters list :\n", chan);
for (i = 0; i < DMA_MAX_REQUESTERS; i++) {
drcmr = DRCMR(i);
if ((drcmr & DRCMR_CHLNUM) == chan)
pos += seq_printf(s, "\tRequester %d (MAPVLD=%d)\n", i,
!!(drcmr & DRCMR_MAPVLD));
}
return pos;
}
static inline int dbg_burst_from_dcmd(u32 dcmd)
{
int burst = (dcmd >> 16) & 0x3;
return burst ? 4 << burst : 0;
}
static int is_phys_valid(unsigned long addr)
{
return pfn_valid(__phys_to_pfn(addr));
}
#define DCSR_STR(flag) (dcsr & DCSR_##flag ? #flag" " : "")
#define DCMD_STR(flag) (dcmd & DCMD_##flag ? #flag" " : "")
static int dbg_show_descriptors(struct seq_file *s, void *p)
{
int pos = 0;
int chan = (int)s->private;
int i, max_show = 20, burst, width;
u32 dcmd;
unsigned long phys_desc;
struct pxa_dma_desc *desc;
unsigned long flags;
spin_lock_irqsave(&dma_channels[chan].lock, flags);
phys_desc = DDADR(chan);
pos += seq_printf(s, "DMA channel %d descriptors :\n", chan);
pos += seq_printf(s, "[%03d] First descriptor unknown\n", 0);
for (i = 1; i < max_show && is_phys_valid(phys_desc); i++) {
desc = phys_to_virt(phys_desc);
dcmd = desc->dcmd;
burst = dbg_burst_from_dcmd(dcmd);
width = (1 << ((dcmd >> 14) & 0x3)) >> 1;
pos += seq_printf(s, "[%03d] Desc at %08lx(virt %p)\n",
i, phys_desc, desc);
pos += seq_printf(s, "\tDDADR = %08x\n", desc->ddadr);
pos += seq_printf(s, "\tDSADR = %08x\n", desc->dsadr);
pos += seq_printf(s, "\tDTADR = %08x\n", desc->dtadr);
pos += seq_printf(s, "\tDCMD = %08x (%s%s%s%s%s%s%sburst=%d"
" width=%d len=%d)\n",
dcmd,
DCMD_STR(INCSRCADDR), DCMD_STR(INCTRGADDR),
DCMD_STR(FLOWSRC), DCMD_STR(FLOWTRG),
DCMD_STR(STARTIRQEN), DCMD_STR(ENDIRQEN),
DCMD_STR(ENDIAN), burst, width,
dcmd & DCMD_LENGTH);
phys_desc = desc->ddadr;
}
if (i == max_show)
pos += seq_printf(s, "[%03d] Desc at %08lx ... max display reached\n",
i, phys_desc);
else
pos += seq_printf(s, "[%03d] Desc at %08lx is %s\n",
i, phys_desc, phys_desc == DDADR_STOP ?
"DDADR_STOP" : "invalid");
spin_unlock_irqrestore(&dma_channels[chan].lock, flags);
return pos;
}
static int dbg_show_chan_state(struct seq_file *s, void *p)
{
int pos = 0;
int chan = (int)s->private;
u32 dcsr, dcmd;
int burst, width;
static char *str_prio[] = { "high", "normal", "low" };
dcsr = DCSR(chan);
dcmd = DCMD(chan);
burst = dbg_burst_from_dcmd(dcmd);
width = (1 << ((dcmd >> 14) & 0x3)) >> 1;
pos += seq_printf(s, "DMA channel %d\n", chan);
pos += seq_printf(s, "\tPriority : %s\n",
str_prio[dma_channels[chan].prio]);
pos += seq_printf(s, "\tUnaligned transfer bit: %s\n",
DALGN & (1 << chan) ? "yes" : "no");
pos += seq_printf(s, "\tDCSR = %08x (%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s)\n",
dcsr, DCSR_STR(RUN), DCSR_STR(NODESC),
DCSR_STR(STOPIRQEN), DCSR_STR(EORIRQEN),
DCSR_STR(EORJMPEN), DCSR_STR(EORSTOPEN),
DCSR_STR(SETCMPST), DCSR_STR(CLRCMPST),
DCSR_STR(CMPST), DCSR_STR(EORINTR), DCSR_STR(REQPEND),
DCSR_STR(STOPSTATE), DCSR_STR(ENDINTR),
DCSR_STR(STARTINTR), DCSR_STR(BUSERR));
pos += seq_printf(s, "\tDCMD = %08x (%s%s%s%s%s%s%sburst=%d width=%d"
" len=%d)\n",
dcmd,
DCMD_STR(INCSRCADDR), DCMD_STR(INCTRGADDR),
DCMD_STR(FLOWSRC), DCMD_STR(FLOWTRG),
DCMD_STR(STARTIRQEN), DCMD_STR(ENDIRQEN),
DCMD_STR(ENDIAN), burst, width, dcmd & DCMD_LENGTH);
pos += seq_printf(s, "\tDSADR = %08x\n", DSADR(chan));
pos += seq_printf(s, "\tDTADR = %08x\n", DTADR(chan));
pos += seq_printf(s, "\tDDADR = %08x\n", DDADR(chan));
return pos;
}
static int dbg_show_state(struct seq_file *s, void *p)
{
int pos = 0;
/* basic device status */
pos += seq_printf(s, "DMA engine status\n");
pos += seq_printf(s, "\tChannel number: %d\n", num_dma_channels);
return pos;
}
#define DBGFS_FUNC_DECL(name) \
static int dbg_open_##name(struct inode *inode, struct file *file) \
{ \
return single_open(file, dbg_show_##name, inode->i_private); \
} \
static const struct file_operations dbg_fops_##name = { \
.owner = THIS_MODULE, \
.open = dbg_open_##name, \
.llseek = seq_lseek, \
.read = seq_read, \
.release = single_release, \
}
DBGFS_FUNC_DECL(state);
DBGFS_FUNC_DECL(chan_state);
DBGFS_FUNC_DECL(descriptors);
DBGFS_FUNC_DECL(requester_chan);
static struct dentry *pxa_dma_dbg_alloc_chan(int ch, struct dentry *chandir)
{
char chan_name[11];
struct dentry *chan, *chan_state = NULL, *chan_descr = NULL;
struct dentry *chan_reqs = NULL;
void *dt;
scnprintf(chan_name, sizeof(chan_name), "%d", ch);
chan = debugfs_create_dir(chan_name, chandir);
dt = (void *)ch;
if (chan)
chan_state = debugfs_create_file("state", 0400, chan, dt,
&dbg_fops_chan_state);
if (chan_state)
chan_descr = debugfs_create_file("descriptors", 0400, chan, dt,
&dbg_fops_descriptors);
if (chan_descr)
chan_reqs = debugfs_create_file("requesters", 0400, chan, dt,
&dbg_fops_requester_chan);
if (!chan_reqs)
goto err_state;
return chan;
err_state:
debugfs_remove_recursive(chan);
return NULL;
}
static void pxa_dma_init_debugfs(void)
{
int i;
struct dentry *chandir;
dbgfs_root = debugfs_create_dir(DMA_DEBUG_NAME, NULL);
if (IS_ERR(dbgfs_root) || !dbgfs_root)
goto err_root;
dbgfs_state = debugfs_create_file("state", 0400, dbgfs_root, NULL,
&dbg_fops_state);
if (!dbgfs_state)
goto err_state;
dbgfs_chan = kmalloc(sizeof(*dbgfs_state) * num_dma_channels,
GFP_KERNEL);
if (!dbgfs_chan)
goto err_alloc;
chandir = debugfs_create_dir("channels", dbgfs_root);
if (!chandir)
goto err_chandir;
for (i = 0; i < num_dma_channels; i++) {
dbgfs_chan[i] = pxa_dma_dbg_alloc_chan(i, chandir);
if (!dbgfs_chan[i])
goto err_chans;
}
return;
err_chans:
err_chandir:
kfree(dbgfs_chan);
err_alloc:
err_state:
debugfs_remove_recursive(dbgfs_root);
err_root:
pr_err("pxa_dma: debugfs is not available\n");
}
static void __exit pxa_dma_cleanup_debugfs(void)
{
debugfs_remove_recursive(dbgfs_root);
}
#else
static inline void pxa_dma_init_debugfs(void) {}
static inline void pxa_dma_cleanup_debugfs(void) {}
#endif
int pxa_request_dma (char *name, pxa_dma_prio prio,
void (*irq_handler)(int, void *),
void *data)
{
unsigned long flags;
int i, found = 0;
/* basic sanity checks */
if (!name || !irq_handler)
return -EINVAL;
local_irq_save(flags);
do {
/* try grabbing a DMA channel with the requested priority */
for (i = 0; i < num_dma_channels; i++) {
if ((dma_channels[i].prio == prio) &&
!dma_channels[i].name) {
found = 1;
break;
}
}
/* if requested prio group is full, try a hier priority */
} while (!found && prio--);
if (found) {
DCSR(i) = DCSR_STARTINTR|DCSR_ENDINTR|DCSR_BUSERR;
dma_channels[i].name = name;
dma_channels[i].irq_handler = irq_handler;
dma_channels[i].data = data;
} else {
printk (KERN_WARNING "No more available DMA channels for %s\n", name);
i = -ENODEV;
}
local_irq_restore(flags);
return i;
}
EXPORT_SYMBOL(pxa_request_dma);
void pxa_free_dma (int dma_ch)
{
unsigned long flags;
if (!dma_channels[dma_ch].name) {
printk (KERN_CRIT
"%s: trying to free channel %d which is already freed\n",
__func__, dma_ch);
return;
}
local_irq_save(flags);
DCSR(dma_ch) = DCSR_STARTINTR|DCSR_ENDINTR|DCSR_BUSERR;
dma_channels[dma_ch].name = NULL;
local_irq_restore(flags);
}
EXPORT_SYMBOL(pxa_free_dma);
static irqreturn_t dma_irq_handler(int irq, void *dev_id)
{
int i, dint = DINT;
struct dma_channel *channel;
while (dint) {
i = __ffs(dint);
dint &= (dint - 1);
channel = &dma_channels[i];
if (channel->name && channel->irq_handler) {
channel->irq_handler(i, channel->data);
} else {
/*
* IRQ for an unregistered DMA channel:
* let's clear the interrupts and disable it.
*/
printk (KERN_WARNING "spurious IRQ for DMA channel %d\n", i);
DCSR(i) = DCSR_STARTINTR|DCSR_ENDINTR|DCSR_BUSERR;
}
}
return IRQ_HANDLED;
}
int __init pxa_init_dma(int irq, int num_ch)
{
int i, ret;
dma_channels = kzalloc(sizeof(struct dma_channel) * num_ch, GFP_KERNEL);
if (dma_channels == NULL)
return -ENOMEM;
/* dma channel priorities on pxa2xx processors:
* ch 0 - 3, 16 - 19 <--> (0) DMA_PRIO_HIGH
* ch 4 - 7, 20 - 23 <--> (1) DMA_PRIO_MEDIUM
* ch 8 - 15, 24 - 31 <--> (2) DMA_PRIO_LOW
*/
for (i = 0; i < num_ch; i++) {
DCSR(i) = 0;
dma_channels[i].prio = min((i & 0xf) >> 2, DMA_PRIO_LOW);
spin_lock_init(&dma_channels[i].lock);
}
ret = request_irq(irq, dma_irq_handler, IRQF_DISABLED, "DMA", NULL);
if (ret) {
printk (KERN_CRIT "Wow! Can't register IRQ for DMA\n");
kfree(dma_channels);
return ret;
}
num_dma_channels = num_ch;
pxa_dma_init_debugfs();
return 0;
}
| gpl-2.0 |
zhaochengw/android_kernel_nx403 | lib/sha1.c | 8107 | 6197 | /*
* SHA1 routine optimized to do word accesses rather than byte accesses,
* and to avoid unnecessary copies into the context array.
*
* This was based on the git SHA1 implementation.
*/
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/bitops.h>
#include <linux/cryptohash.h>
#include <asm/unaligned.h>
/*
* If you have 32 registers or more, the compiler can (and should)
* try to change the array[] accesses into registers. However, on
* machines with less than ~25 registers, that won't really work,
* and at least gcc will make an unholy mess of it.
*
* So to avoid that mess which just slows things down, we force
* the stores to memory to actually happen (we might be better off
* with a 'W(t)=(val);asm("":"+m" (W(t))' there instead, as
* suggested by Artur Skawina - that will also make gcc unable to
* try to do the silly "optimize away loads" part because it won't
* see what the value will be).
*
* Ben Herrenschmidt reports that on PPC, the C version comes close
* to the optimized asm with this (ie on PPC you don't want that
* 'volatile', since there are lots of registers).
*
* On ARM we get the best code generation by forcing a full memory barrier
* between each SHA_ROUND, otherwise gcc happily get wild with spilling and
* the stack frame size simply explode and performance goes down the drain.
*/
#ifdef CONFIG_X86
#define setW(x, val) (*(volatile __u32 *)&W(x) = (val))
#elif defined(CONFIG_ARM)
#define setW(x, val) do { W(x) = (val); __asm__("":::"memory"); } while (0)
#else
#define setW(x, val) (W(x) = (val))
#endif
/* This "rolls" over the 512-bit array */
#define W(x) (array[(x)&15])
/*
* Where do we get the source from? The first 16 iterations get it from
* the input data, the next mix it from the 512-bit array.
*/
#define SHA_SRC(t) get_unaligned_be32((__u32 *)data + t)
#define SHA_MIX(t) rol32(W(t+13) ^ W(t+8) ^ W(t+2) ^ W(t), 1)
#define SHA_ROUND(t, input, fn, constant, A, B, C, D, E) do { \
__u32 TEMP = input(t); setW(t, TEMP); \
E += TEMP + rol32(A,5) + (fn) + (constant); \
B = ror32(B, 2); } while (0)
#define T_0_15(t, A, B, C, D, E) SHA_ROUND(t, SHA_SRC, (((C^D)&B)^D) , 0x5a827999, A, B, C, D, E )
#define T_16_19(t, A, B, C, D, E) SHA_ROUND(t, SHA_MIX, (((C^D)&B)^D) , 0x5a827999, A, B, C, D, E )
#define T_20_39(t, A, B, C, D, E) SHA_ROUND(t, SHA_MIX, (B^C^D) , 0x6ed9eba1, A, B, C, D, E )
#define T_40_59(t, A, B, C, D, E) SHA_ROUND(t, SHA_MIX, ((B&C)+(D&(B^C))) , 0x8f1bbcdc, A, B, C, D, E )
#define T_60_79(t, A, B, C, D, E) SHA_ROUND(t, SHA_MIX, (B^C^D) , 0xca62c1d6, A, B, C, D, E )
/**
* sha_transform - single block SHA1 transform
*
* @digest: 160 bit digest to update
* @data: 512 bits of data to hash
* @array: 16 words of workspace (see note)
*
* This function generates a SHA1 digest for a single 512-bit block.
* Be warned, it does not handle padding and message digest, do not
* confuse it with the full FIPS 180-1 digest algorithm for variable
* length messages.
*
* Note: If the hash is security sensitive, the caller should be sure
* to clear the workspace. This is left to the caller to avoid
* unnecessary clears between chained hashing operations.
*/
void sha_transform(__u32 *digest, const char *data, __u32 *array)
{
__u32 A, B, C, D, E;
A = digest[0];
B = digest[1];
C = digest[2];
D = digest[3];
E = digest[4];
/* Round 1 - iterations 0-16 take their input from 'data' */
T_0_15( 0, A, B, C, D, E);
T_0_15( 1, E, A, B, C, D);
T_0_15( 2, D, E, A, B, C);
T_0_15( 3, C, D, E, A, B);
T_0_15( 4, B, C, D, E, A);
T_0_15( 5, A, B, C, D, E);
T_0_15( 6, E, A, B, C, D);
T_0_15( 7, D, E, A, B, C);
T_0_15( 8, C, D, E, A, B);
T_0_15( 9, B, C, D, E, A);
T_0_15(10, A, B, C, D, E);
T_0_15(11, E, A, B, C, D);
T_0_15(12, D, E, A, B, C);
T_0_15(13, C, D, E, A, B);
T_0_15(14, B, C, D, E, A);
T_0_15(15, A, B, C, D, E);
/* Round 1 - tail. Input from 512-bit mixing array */
T_16_19(16, E, A, B, C, D);
T_16_19(17, D, E, A, B, C);
T_16_19(18, C, D, E, A, B);
T_16_19(19, B, C, D, E, A);
/* Round 2 */
T_20_39(20, A, B, C, D, E);
T_20_39(21, E, A, B, C, D);
T_20_39(22, D, E, A, B, C);
T_20_39(23, C, D, E, A, B);
T_20_39(24, B, C, D, E, A);
T_20_39(25, A, B, C, D, E);
T_20_39(26, E, A, B, C, D);
T_20_39(27, D, E, A, B, C);
T_20_39(28, C, D, E, A, B);
T_20_39(29, B, C, D, E, A);
T_20_39(30, A, B, C, D, E);
T_20_39(31, E, A, B, C, D);
T_20_39(32, D, E, A, B, C);
T_20_39(33, C, D, E, A, B);
T_20_39(34, B, C, D, E, A);
T_20_39(35, A, B, C, D, E);
T_20_39(36, E, A, B, C, D);
T_20_39(37, D, E, A, B, C);
T_20_39(38, C, D, E, A, B);
T_20_39(39, B, C, D, E, A);
/* Round 3 */
T_40_59(40, A, B, C, D, E);
T_40_59(41, E, A, B, C, D);
T_40_59(42, D, E, A, B, C);
T_40_59(43, C, D, E, A, B);
T_40_59(44, B, C, D, E, A);
T_40_59(45, A, B, C, D, E);
T_40_59(46, E, A, B, C, D);
T_40_59(47, D, E, A, B, C);
T_40_59(48, C, D, E, A, B);
T_40_59(49, B, C, D, E, A);
T_40_59(50, A, B, C, D, E);
T_40_59(51, E, A, B, C, D);
T_40_59(52, D, E, A, B, C);
T_40_59(53, C, D, E, A, B);
T_40_59(54, B, C, D, E, A);
T_40_59(55, A, B, C, D, E);
T_40_59(56, E, A, B, C, D);
T_40_59(57, D, E, A, B, C);
T_40_59(58, C, D, E, A, B);
T_40_59(59, B, C, D, E, A);
/* Round 4 */
T_60_79(60, A, B, C, D, E);
T_60_79(61, E, A, B, C, D);
T_60_79(62, D, E, A, B, C);
T_60_79(63, C, D, E, A, B);
T_60_79(64, B, C, D, E, A);
T_60_79(65, A, B, C, D, E);
T_60_79(66, E, A, B, C, D);
T_60_79(67, D, E, A, B, C);
T_60_79(68, C, D, E, A, B);
T_60_79(69, B, C, D, E, A);
T_60_79(70, A, B, C, D, E);
T_60_79(71, E, A, B, C, D);
T_60_79(72, D, E, A, B, C);
T_60_79(73, C, D, E, A, B);
T_60_79(74, B, C, D, E, A);
T_60_79(75, A, B, C, D, E);
T_60_79(76, E, A, B, C, D);
T_60_79(77, D, E, A, B, C);
T_60_79(78, C, D, E, A, B);
T_60_79(79, B, C, D, E, A);
digest[0] += A;
digest[1] += B;
digest[2] += C;
digest[3] += D;
digest[4] += E;
}
EXPORT_SYMBOL(sha_transform);
/**
* sha_init - initialize the vectors for a SHA1 digest
* @buf: vector to initialize
*/
void sha_init(__u32 *buf)
{
buf[0] = 0x67452301;
buf[1] = 0xefcdab89;
buf[2] = 0x98badcfe;
buf[3] = 0x10325476;
buf[4] = 0xc3d2e1f0;
}
| gpl-2.0 |
Arc-Team/android_kernel_htc_msm8660 | sound/oss/sequencer.c | 8363 | 34554 | /*
* sound/oss/sequencer.c
*
* The sequencer personality manager.
*/
/*
* Copyright (C) by Hannu Savolainen 1993-1997
*
* OSS/Free for Linux is distributed under the GNU GENERAL PUBLIC LICENSE (GPL)
* Version 2 (June 1991). See the "COPYING" file distributed with this software
* for more info.
*/
/*
* Thomas Sailer : ioctl code reworked (vmalloc/vfree removed)
* Alan Cox : reformatted and fixed a pair of null pointer bugs
*/
#include <linux/kmod.h>
#include <linux/spinlock.h>
#include "sound_config.h"
#include "midi_ctrl.h"
static int sequencer_ok;
static struct sound_timer_operations *tmr;
static int tmr_no = -1; /* Currently selected timer */
static int pending_timer = -1; /* For timer change operation */
extern unsigned long seq_time;
static int obsolete_api_used;
static DEFINE_SPINLOCK(lock);
/*
* Local counts for number of synth and MIDI devices. These are initialized
* by the sequencer_open.
*/
static int max_mididev;
static int max_synthdev;
/*
* The seq_mode gives the operating mode of the sequencer:
* 1 = level1 (the default)
* 2 = level2 (extended capabilities)
*/
#define SEQ_1 1
#define SEQ_2 2
static int seq_mode = SEQ_1;
static DECLARE_WAIT_QUEUE_HEAD(seq_sleeper);
static DECLARE_WAIT_QUEUE_HEAD(midi_sleeper);
static int midi_opened[MAX_MIDI_DEV];
static int midi_written[MAX_MIDI_DEV];
static unsigned long prev_input_time;
static int prev_event_time;
#include "tuning.h"
#define EV_SZ 8
#define IEV_SZ 8
static unsigned char *queue;
static unsigned char *iqueue;
static volatile int qhead, qtail, qlen;
static volatile int iqhead, iqtail, iqlen;
static volatile int seq_playing;
static volatile int sequencer_busy;
static int output_threshold;
static long pre_event_timeout;
static unsigned synth_open_mask;
static int seq_queue(unsigned char *note, char nonblock);
static void seq_startplay(void);
static int seq_sync(void);
static void seq_reset(void);
#if MAX_SYNTH_DEV > 15
#error Too many synthesizer devices enabled.
#endif
int sequencer_read(int dev, struct file *file, char __user *buf, int count)
{
int c = count, p = 0;
int ev_len;
unsigned long flags;
dev = dev >> 4;
ev_len = seq_mode == SEQ_1 ? 4 : 8;
spin_lock_irqsave(&lock,flags);
if (!iqlen)
{
spin_unlock_irqrestore(&lock,flags);
if (file->f_flags & O_NONBLOCK) {
return -EAGAIN;
}
interruptible_sleep_on_timeout(&midi_sleeper,
pre_event_timeout);
spin_lock_irqsave(&lock,flags);
if (!iqlen)
{
spin_unlock_irqrestore(&lock,flags);
return 0;
}
}
while (iqlen && c >= ev_len)
{
char *fixit = (char *) &iqueue[iqhead * IEV_SZ];
spin_unlock_irqrestore(&lock,flags);
if (copy_to_user(&(buf)[p], fixit, ev_len))
return count - c;
p += ev_len;
c -= ev_len;
spin_lock_irqsave(&lock,flags);
iqhead = (iqhead + 1) % SEQ_MAX_QUEUE;
iqlen--;
}
spin_unlock_irqrestore(&lock,flags);
return count - c;
}
static void sequencer_midi_output(int dev)
{
/*
* Currently NOP
*/
}
void seq_copy_to_input(unsigned char *event_rec, int len)
{
unsigned long flags;
/*
* Verify that the len is valid for the current mode.
*/
if (len != 4 && len != 8)
return;
if ((seq_mode == SEQ_1) != (len == 4))
return;
if (iqlen >= (SEQ_MAX_QUEUE - 1))
return; /* Overflow */
spin_lock_irqsave(&lock,flags);
memcpy(&iqueue[iqtail * IEV_SZ], event_rec, len);
iqlen++;
iqtail = (iqtail + 1) % SEQ_MAX_QUEUE;
wake_up(&midi_sleeper);
spin_unlock_irqrestore(&lock,flags);
}
EXPORT_SYMBOL(seq_copy_to_input);
static void sequencer_midi_input(int dev, unsigned char data)
{
unsigned int tstamp;
unsigned char event_rec[4];
if (data == 0xfe) /* Ignore active sensing */
return;
tstamp = jiffies - seq_time;
if (tstamp != prev_input_time)
{
tstamp = (tstamp << 8) | SEQ_WAIT;
seq_copy_to_input((unsigned char *) &tstamp, 4);
prev_input_time = tstamp;
}
event_rec[0] = SEQ_MIDIPUTC;
event_rec[1] = data;
event_rec[2] = dev;
event_rec[3] = 0;
seq_copy_to_input(event_rec, 4);
}
void seq_input_event(unsigned char *event_rec, int len)
{
unsigned long this_time;
if (seq_mode == SEQ_2)
this_time = tmr->get_time(tmr_no);
else
this_time = jiffies - seq_time;
if (this_time != prev_input_time)
{
unsigned char tmp_event[8];
tmp_event[0] = EV_TIMING;
tmp_event[1] = TMR_WAIT_ABS;
tmp_event[2] = 0;
tmp_event[3] = 0;
*(unsigned int *) &tmp_event[4] = this_time;
seq_copy_to_input(tmp_event, 8);
prev_input_time = this_time;
}
seq_copy_to_input(event_rec, len);
}
EXPORT_SYMBOL(seq_input_event);
int sequencer_write(int dev, struct file *file, const char __user *buf, int count)
{
unsigned char event_rec[EV_SZ], ev_code;
int p = 0, c, ev_size;
int mode = translate_mode(file);
dev = dev >> 4;
DEB(printk("sequencer_write(dev=%d, count=%d)\n", dev, count));
if (mode == OPEN_READ)
return -EIO;
c = count;
while (c >= 4)
{
if (copy_from_user((char *) event_rec, &(buf)[p], 4))
goto out;
ev_code = event_rec[0];
if (ev_code == SEQ_FULLSIZE)
{
int err, fmt;
dev = *(unsigned short *) &event_rec[2];
if (dev < 0 || dev >= max_synthdev || synth_devs[dev] == NULL)
return -ENXIO;
if (!(synth_open_mask & (1 << dev)))
return -ENXIO;
fmt = (*(short *) &event_rec[0]) & 0xffff;
err = synth_devs[dev]->load_patch(dev, fmt, buf + p, c, 0);
if (err < 0)
return err;
return err;
}
if (ev_code >= 128)
{
if (seq_mode == SEQ_2 && ev_code == SEQ_EXTENDED)
{
printk(KERN_WARNING "Sequencer: Invalid level 2 event %x\n", ev_code);
return -EINVAL;
}
ev_size = 8;
if (c < ev_size)
{
if (!seq_playing)
seq_startplay();
return count - c;
}
if (copy_from_user((char *)&event_rec[4],
&(buf)[p + 4], 4))
goto out;
}
else
{
if (seq_mode == SEQ_2)
{
printk(KERN_WARNING "Sequencer: 4 byte event in level 2 mode\n");
return -EINVAL;
}
ev_size = 4;
if (event_rec[0] != SEQ_MIDIPUTC)
obsolete_api_used = 1;
}
if (event_rec[0] == SEQ_MIDIPUTC)
{
if (!midi_opened[event_rec[2]])
{
int err, mode;
int dev = event_rec[2];
if (dev >= max_mididev || midi_devs[dev]==NULL)
{
/*printk("Sequencer Error: Nonexistent MIDI device %d\n", dev);*/
return -ENXIO;
}
mode = translate_mode(file);
if ((err = midi_devs[dev]->open(dev, mode,
sequencer_midi_input, sequencer_midi_output)) < 0)
{
seq_reset();
printk(KERN_WARNING "Sequencer Error: Unable to open Midi #%d\n", dev);
return err;
}
midi_opened[dev] = 1;
}
}
if (!seq_queue(event_rec, (file->f_flags & (O_NONBLOCK) ? 1 : 0)))
{
int processed = count - c;
if (!seq_playing)
seq_startplay();
if (!processed && (file->f_flags & O_NONBLOCK))
return -EAGAIN;
else
return processed;
}
p += ev_size;
c -= ev_size;
}
if (!seq_playing)
seq_startplay();
out:
return count;
}
static int seq_queue(unsigned char *note, char nonblock)
{
/*
* Test if there is space in the queue
*/
if (qlen >= SEQ_MAX_QUEUE)
if (!seq_playing)
seq_startplay(); /*
* Give chance to drain the queue
*/
if (!nonblock && qlen >= SEQ_MAX_QUEUE && !waitqueue_active(&seq_sleeper)) {
/*
* Sleep until there is enough space on the queue
*/
interruptible_sleep_on(&seq_sleeper);
}
if (qlen >= SEQ_MAX_QUEUE)
{
return 0; /*
* To be sure
*/
}
memcpy(&queue[qtail * EV_SZ], note, EV_SZ);
qtail = (qtail + 1) % SEQ_MAX_QUEUE;
qlen++;
return 1;
}
static int extended_event(unsigned char *q)
{
int dev = q[2];
if (dev < 0 || dev >= max_synthdev)
return -ENXIO;
if (!(synth_open_mask & (1 << dev)))
return -ENXIO;
switch (q[1])
{
case SEQ_NOTEOFF:
synth_devs[dev]->kill_note(dev, q[3], q[4], q[5]);
break;
case SEQ_NOTEON:
if (q[4] > 127 && q[4] != 255)
return 0;
if (q[5] == 0)
{
synth_devs[dev]->kill_note(dev, q[3], q[4], q[5]);
break;
}
synth_devs[dev]->start_note(dev, q[3], q[4], q[5]);
break;
case SEQ_PGMCHANGE:
synth_devs[dev]->set_instr(dev, q[3], q[4]);
break;
case SEQ_AFTERTOUCH:
synth_devs[dev]->aftertouch(dev, q[3], q[4]);
break;
case SEQ_BALANCE:
synth_devs[dev]->panning(dev, q[3], (char) q[4]);
break;
case SEQ_CONTROLLER:
synth_devs[dev]->controller(dev, q[3], q[4], (short) (q[5] | (q[6] << 8)));
break;
case SEQ_VOLMODE:
if (synth_devs[dev]->volume_method != NULL)
synth_devs[dev]->volume_method(dev, q[3]);
break;
default:
return -EINVAL;
}
return 0;
}
static int find_voice(int dev, int chn, int note)
{
unsigned short key;
int i;
key = (chn << 8) | (note + 1);
for (i = 0; i < synth_devs[dev]->alloc.max_voice; i++)
if (synth_devs[dev]->alloc.map[i] == key)
return i;
return -1;
}
static int alloc_voice(int dev, int chn, int note)
{
unsigned short key;
int voice;
key = (chn << 8) | (note + 1);
voice = synth_devs[dev]->alloc_voice(dev, chn, note,
&synth_devs[dev]->alloc);
synth_devs[dev]->alloc.map[voice] = key;
synth_devs[dev]->alloc.alloc_times[voice] =
synth_devs[dev]->alloc.timestamp++;
return voice;
}
static void seq_chn_voice_event(unsigned char *event_rec)
{
#define dev event_rec[1]
#define cmd event_rec[2]
#define chn event_rec[3]
#define note event_rec[4]
#define parm event_rec[5]
int voice = -1;
if ((int) dev > max_synthdev || synth_devs[dev] == NULL)
return;
if (!(synth_open_mask & (1 << dev)))
return;
if (!synth_devs[dev])
return;
if (seq_mode == SEQ_2)
{
if (synth_devs[dev]->alloc_voice)
voice = find_voice(dev, chn, note);
if (cmd == MIDI_NOTEON && parm == 0)
{
cmd = MIDI_NOTEOFF;
parm = 64;
}
}
switch (cmd)
{
case MIDI_NOTEON:
if (note > 127 && note != 255) /* Not a seq2 feature */
return;
if (voice == -1 && seq_mode == SEQ_2 && synth_devs[dev]->alloc_voice)
{
/* Internal synthesizer (FM, GUS, etc) */
voice = alloc_voice(dev, chn, note);
}
if (voice == -1)
voice = chn;
if (seq_mode == SEQ_2 && (int) dev < num_synths)
{
/*
* The MIDI channel 10 is a percussive channel. Use the note
* number to select the proper patch (128 to 255) to play.
*/
if (chn == 9)
{
synth_devs[dev]->set_instr(dev, voice, 128 + note);
synth_devs[dev]->chn_info[chn].pgm_num = 128 + note;
}
synth_devs[dev]->setup_voice(dev, voice, chn);
}
synth_devs[dev]->start_note(dev, voice, note, parm);
break;
case MIDI_NOTEOFF:
if (voice == -1)
voice = chn;
synth_devs[dev]->kill_note(dev, voice, note, parm);
break;
case MIDI_KEY_PRESSURE:
if (voice == -1)
voice = chn;
synth_devs[dev]->aftertouch(dev, voice, parm);
break;
default:;
}
#undef dev
#undef cmd
#undef chn
#undef note
#undef parm
}
static void seq_chn_common_event(unsigned char *event_rec)
{
unsigned char dev = event_rec[1];
unsigned char cmd = event_rec[2];
unsigned char chn = event_rec[3];
unsigned char p1 = event_rec[4];
/* unsigned char p2 = event_rec[5]; */
unsigned short w14 = *(short *) &event_rec[6];
if ((int) dev > max_synthdev || synth_devs[dev] == NULL)
return;
if (!(synth_open_mask & (1 << dev)))
return;
if (!synth_devs[dev])
return;
switch (cmd)
{
case MIDI_PGM_CHANGE:
if (seq_mode == SEQ_2)
{
synth_devs[dev]->chn_info[chn].pgm_num = p1;
if ((int) dev >= num_synths)
synth_devs[dev]->set_instr(dev, chn, p1);
}
else
synth_devs[dev]->set_instr(dev, chn, p1);
break;
case MIDI_CTL_CHANGE:
if (seq_mode == SEQ_2)
{
if (chn > 15 || p1 > 127)
break;
synth_devs[dev]->chn_info[chn].controllers[p1] = w14 & 0x7f;
if (p1 < 32) /* Setting MSB should clear LSB to 0 */
synth_devs[dev]->chn_info[chn].controllers[p1 + 32] = 0;
if ((int) dev < num_synths)
{
int val = w14 & 0x7f;
int i, key;
if (p1 < 64) /* Combine MSB and LSB */
{
val = ((synth_devs[dev]->
chn_info[chn].controllers[p1 & ~32] & 0x7f) << 7)
| (synth_devs[dev]->
chn_info[chn].controllers[p1 | 32] & 0x7f);
p1 &= ~32;
}
/* Handle all playing notes on this channel */
key = ((int) chn << 8);
for (i = 0; i < synth_devs[dev]->alloc.max_voice; i++)
if ((synth_devs[dev]->alloc.map[i] & 0xff00) == key)
synth_devs[dev]->controller(dev, i, p1, val);
}
else
synth_devs[dev]->controller(dev, chn, p1, w14);
}
else /* Mode 1 */
synth_devs[dev]->controller(dev, chn, p1, w14);
break;
case MIDI_PITCH_BEND:
if (seq_mode == SEQ_2)
{
synth_devs[dev]->chn_info[chn].bender_value = w14;
if ((int) dev < num_synths)
{
/* Handle all playing notes on this channel */
int i, key;
key = (chn << 8);
for (i = 0; i < synth_devs[dev]->alloc.max_voice; i++)
if ((synth_devs[dev]->alloc.map[i] & 0xff00) == key)
synth_devs[dev]->bender(dev, i, w14);
}
else
synth_devs[dev]->bender(dev, chn, w14);
}
else /* MODE 1 */
synth_devs[dev]->bender(dev, chn, w14);
break;
default:;
}
}
static int seq_timing_event(unsigned char *event_rec)
{
unsigned char cmd = event_rec[1];
unsigned int parm = *(int *) &event_rec[4];
if (seq_mode == SEQ_2)
{
int ret;
if ((ret = tmr->event(tmr_no, event_rec)) == TIMER_ARMED)
if ((SEQ_MAX_QUEUE - qlen) >= output_threshold)
wake_up(&seq_sleeper);
return ret;
}
switch (cmd)
{
case TMR_WAIT_REL:
parm += prev_event_time;
/*
* NOTE! No break here. Execution of TMR_WAIT_REL continues in the
* next case (TMR_WAIT_ABS)
*/
case TMR_WAIT_ABS:
if (parm > 0)
{
long time;
time = parm;
prev_event_time = time;
seq_playing = 1;
request_sound_timer(time);
if ((SEQ_MAX_QUEUE - qlen) >= output_threshold)
wake_up(&seq_sleeper);
return TIMER_ARMED;
}
break;
case TMR_START:
seq_time = jiffies;
prev_input_time = 0;
prev_event_time = 0;
break;
case TMR_STOP:
break;
case TMR_CONTINUE:
break;
case TMR_TEMPO:
break;
case TMR_ECHO:
if (seq_mode == SEQ_2)
seq_copy_to_input(event_rec, 8);
else
{
parm = (parm << 8 | SEQ_ECHO);
seq_copy_to_input((unsigned char *) &parm, 4);
}
break;
default:;
}
return TIMER_NOT_ARMED;
}
static void seq_local_event(unsigned char *event_rec)
{
unsigned char cmd = event_rec[1];
unsigned int parm = *((unsigned int *) &event_rec[4]);
switch (cmd)
{
case LOCL_STARTAUDIO:
DMAbuf_start_devices(parm);
break;
default:;
}
}
static void seq_sysex_message(unsigned char *event_rec)
{
unsigned int dev = event_rec[1];
int i, l = 0;
unsigned char *buf = &event_rec[2];
if (dev > max_synthdev)
return;
if (!(synth_open_mask & (1 << dev)))
return;
if (!synth_devs[dev])
return;
l = 0;
for (i = 0; i < 6 && buf[i] != 0xff; i++)
l = i + 1;
if (!synth_devs[dev]->send_sysex)
return;
if (l > 0)
synth_devs[dev]->send_sysex(dev, buf, l);
}
static int play_event(unsigned char *q)
{
/*
* NOTE! This routine returns
* 0 = normal event played.
* 1 = Timer armed. Suspend playback until timer callback.
* 2 = MIDI output buffer full. Restore queue and suspend until timer
*/
unsigned int *delay;
switch (q[0])
{
case SEQ_NOTEOFF:
if (synth_open_mask & (1 << 0))
if (synth_devs[0])
synth_devs[0]->kill_note(0, q[1], 255, q[3]);
break;
case SEQ_NOTEON:
if (q[4] < 128 || q[4] == 255)
if (synth_open_mask & (1 << 0))
if (synth_devs[0])
synth_devs[0]->start_note(0, q[1], q[2], q[3]);
break;
case SEQ_WAIT:
delay = (unsigned int *) q; /*
* Bytes 1 to 3 are containing the *
* delay in 'ticks'
*/
*delay = (*delay >> 8) & 0xffffff;
if (*delay > 0)
{
long time;
seq_playing = 1;
time = *delay;
prev_event_time = time;
request_sound_timer(time);
if ((SEQ_MAX_QUEUE - qlen) >= output_threshold)
wake_up(&seq_sleeper);
/*
* The timer is now active and will reinvoke this function
* after the timer expires. Return to the caller now.
*/
return 1;
}
break;
case SEQ_PGMCHANGE:
if (synth_open_mask & (1 << 0))
if (synth_devs[0])
synth_devs[0]->set_instr(0, q[1], q[2]);
break;
case SEQ_SYNCTIMER: /*
* Reset timer
*/
seq_time = jiffies;
prev_input_time = 0;
prev_event_time = 0;
break;
case SEQ_MIDIPUTC: /*
* Put a midi character
*/
if (midi_opened[q[2]])
{
int dev;
dev = q[2];
if (dev < 0 || dev >= num_midis || midi_devs[dev] == NULL)
break;
if (!midi_devs[dev]->outputc(dev, q[1]))
{
/*
* Output FIFO is full. Wait one timer cycle and try again.
*/
seq_playing = 1;
request_sound_timer(-1);
return 2;
}
else
midi_written[dev] = 1;
}
break;
case SEQ_ECHO:
seq_copy_to_input(q, 4); /*
* Echo back to the process
*/
break;
case SEQ_PRIVATE:
if ((int) q[1] < max_synthdev)
synth_devs[q[1]]->hw_control(q[1], q);
break;
case SEQ_EXTENDED:
extended_event(q);
break;
case EV_CHN_VOICE:
seq_chn_voice_event(q);
break;
case EV_CHN_COMMON:
seq_chn_common_event(q);
break;
case EV_TIMING:
if (seq_timing_event(q) == TIMER_ARMED)
{
return 1;
}
break;
case EV_SEQ_LOCAL:
seq_local_event(q);
break;
case EV_SYSEX:
seq_sysex_message(q);
break;
default:;
}
return 0;
}
/* called also as timer in irq context */
static void seq_startplay(void)
{
int this_one, action;
unsigned long flags;
while (qlen > 0)
{
spin_lock_irqsave(&lock,flags);
qhead = ((this_one = qhead) + 1) % SEQ_MAX_QUEUE;
qlen--;
spin_unlock_irqrestore(&lock,flags);
seq_playing = 1;
if ((action = play_event(&queue[this_one * EV_SZ])))
{ /* Suspend playback. Next timer routine invokes this routine again */
if (action == 2)
{
qlen++;
qhead = this_one;
}
return;
}
}
seq_playing = 0;
if ((SEQ_MAX_QUEUE - qlen) >= output_threshold)
wake_up(&seq_sleeper);
}
static void reset_controllers(int dev, unsigned char *controller, int update_dev)
{
int i;
for (i = 0; i < 128; i++)
controller[i] = ctrl_def_values[i];
}
static void setup_mode2(void)
{
int dev;
max_synthdev = num_synths;
for (dev = 0; dev < num_midis; dev++)
{
if (midi_devs[dev] && midi_devs[dev]->converter != NULL)
{
synth_devs[max_synthdev++] = midi_devs[dev]->converter;
}
}
for (dev = 0; dev < max_synthdev; dev++)
{
int chn;
synth_devs[dev]->sysex_ptr = 0;
synth_devs[dev]->emulation = 0;
for (chn = 0; chn < 16; chn++)
{
synth_devs[dev]->chn_info[chn].pgm_num = 0;
reset_controllers(dev,
synth_devs[dev]->chn_info[chn].controllers,0);
synth_devs[dev]->chn_info[chn].bender_value = (1 << 7); /* Neutral */
synth_devs[dev]->chn_info[chn].bender_range = 200;
}
}
max_mididev = 0;
seq_mode = SEQ_2;
}
int sequencer_open(int dev, struct file *file)
{
int retval, mode, i;
int level, tmp;
if (!sequencer_ok)
sequencer_init();
level = ((dev & 0x0f) == SND_DEV_SEQ2) ? 2 : 1;
dev = dev >> 4;
mode = translate_mode(file);
DEB(printk("sequencer_open(dev=%d)\n", dev));
if (!sequencer_ok)
{
/* printk("Sound card: sequencer not initialized\n");*/
return -ENXIO;
}
if (dev) /* Patch manager device (obsolete) */
return -ENXIO;
if(synth_devs[dev] == NULL)
request_module("synth0");
if (mode == OPEN_READ)
{
if (!num_midis)
{
/*printk("Sequencer: No MIDI devices. Input not possible\n");*/
sequencer_busy = 0;
return -ENXIO;
}
}
if (sequencer_busy)
{
return -EBUSY;
}
sequencer_busy = 1;
obsolete_api_used = 0;
max_mididev = num_midis;
max_synthdev = num_synths;
pre_event_timeout = MAX_SCHEDULE_TIMEOUT;
seq_mode = SEQ_1;
if (pending_timer != -1)
{
tmr_no = pending_timer;
pending_timer = -1;
}
if (tmr_no == -1) /* Not selected yet */
{
int i, best;
best = -1;
for (i = 0; i < num_sound_timers; i++)
if (sound_timer_devs[i] && sound_timer_devs[i]->priority > best)
{
tmr_no = i;
best = sound_timer_devs[i]->priority;
}
if (tmr_no == -1) /* Should not be */
tmr_no = 0;
}
tmr = sound_timer_devs[tmr_no];
if (level == 2)
{
if (tmr == NULL)
{
/*printk("sequencer: No timer for level 2\n");*/
sequencer_busy = 0;
return -ENXIO;
}
setup_mode2();
}
if (!max_synthdev && !max_mididev)
{
sequencer_busy=0;
return -ENXIO;
}
synth_open_mask = 0;
for (i = 0; i < max_mididev; i++)
{
midi_opened[i] = 0;
midi_written[i] = 0;
}
for (i = 0; i < max_synthdev; i++)
{
if (synth_devs[i]==NULL)
continue;
if (!try_module_get(synth_devs[i]->owner))
continue;
if ((tmp = synth_devs[i]->open(i, mode)) < 0)
{
printk(KERN_WARNING "Sequencer: Warning! Cannot open synth device #%d (%d)\n", i, tmp);
if (synth_devs[i]->midi_dev)
printk(KERN_WARNING "(Maps to MIDI dev #%d)\n", synth_devs[i]->midi_dev);
}
else
{
synth_open_mask |= (1 << i);
if (synth_devs[i]->midi_dev)
midi_opened[synth_devs[i]->midi_dev] = 1;
}
}
seq_time = jiffies;
prev_input_time = 0;
prev_event_time = 0;
if (seq_mode == SEQ_1 && (mode == OPEN_READ || mode == OPEN_READWRITE))
{
/*
* Initialize midi input devices
*/
for (i = 0; i < max_mididev; i++)
if (!midi_opened[i] && midi_devs[i])
{
if (!try_module_get(midi_devs[i]->owner))
continue;
if ((retval = midi_devs[i]->open(i, mode,
sequencer_midi_input, sequencer_midi_output)) >= 0)
{
midi_opened[i] = 1;
}
}
}
if (seq_mode == SEQ_2) {
if (try_module_get(tmr->owner))
tmr->open(tmr_no, seq_mode);
}
init_waitqueue_head(&seq_sleeper);
init_waitqueue_head(&midi_sleeper);
output_threshold = SEQ_MAX_QUEUE / 2;
return 0;
}
static void seq_drain_midi_queues(void)
{
int i, n;
/*
* Give the Midi drivers time to drain their output queues
*/
n = 1;
while (!signal_pending(current) && n)
{
n = 0;
for (i = 0; i < max_mididev; i++)
if (midi_opened[i] && midi_written[i])
if (midi_devs[i]->buffer_status != NULL)
if (midi_devs[i]->buffer_status(i))
n++;
/*
* Let's have a delay
*/
if (n)
interruptible_sleep_on_timeout(&seq_sleeper,
HZ/10);
}
}
void sequencer_release(int dev, struct file *file)
{
int i;
int mode = translate_mode(file);
dev = dev >> 4;
DEB(printk("sequencer_release(dev=%d)\n", dev));
/*
* Wait until the queue is empty (if we don't have nonblock)
*/
if (mode != OPEN_READ && !(file->f_flags & O_NONBLOCK))
{
while (!signal_pending(current) && qlen > 0)
{
seq_sync();
interruptible_sleep_on_timeout(&seq_sleeper,
3*HZ);
/* Extra delay */
}
}
if (mode != OPEN_READ)
seq_drain_midi_queues(); /*
* Ensure the output queues are empty
*/
seq_reset();
if (mode != OPEN_READ)
seq_drain_midi_queues(); /*
* Flush the all notes off messages
*/
for (i = 0; i < max_synthdev; i++)
{
if (synth_open_mask & (1 << i)) /*
* Actually opened
*/
if (synth_devs[i])
{
synth_devs[i]->close(i);
module_put(synth_devs[i]->owner);
if (synth_devs[i]->midi_dev)
midi_opened[synth_devs[i]->midi_dev] = 0;
}
}
for (i = 0; i < max_mididev; i++)
{
if (midi_opened[i]) {
midi_devs[i]->close(i);
module_put(midi_devs[i]->owner);
}
}
if (seq_mode == SEQ_2) {
tmr->close(tmr_no);
module_put(tmr->owner);
}
if (obsolete_api_used)
printk(KERN_WARNING "/dev/music: Obsolete (4 byte) API was used by %s\n", current->comm);
sequencer_busy = 0;
}
static int seq_sync(void)
{
if (qlen && !seq_playing && !signal_pending(current))
seq_startplay();
if (qlen > 0)
interruptible_sleep_on_timeout(&seq_sleeper, HZ);
return qlen;
}
static void midi_outc(int dev, unsigned char data)
{
/*
* NOTE! Calls sleep(). Don't call this from interrupt.
*/
int n;
unsigned long flags;
/*
* This routine sends one byte to the Midi channel.
* If the output FIFO is full, it waits until there
* is space in the queue
*/
n = 3 * HZ; /* Timeout */
spin_lock_irqsave(&lock,flags);
while (n && !midi_devs[dev]->outputc(dev, data)) {
interruptible_sleep_on_timeout(&seq_sleeper, HZ/25);
n--;
}
spin_unlock_irqrestore(&lock,flags);
}
static void seq_reset(void)
{
/*
* NOTE! Calls sleep(). Don't call this from interrupt.
*/
int i;
int chn;
unsigned long flags;
sound_stop_timer();
seq_time = jiffies;
prev_input_time = 0;
prev_event_time = 0;
qlen = qhead = qtail = 0;
iqlen = iqhead = iqtail = 0;
for (i = 0; i < max_synthdev; i++)
if (synth_open_mask & (1 << i))
if (synth_devs[i])
synth_devs[i]->reset(i);
if (seq_mode == SEQ_2)
{
for (chn = 0; chn < 16; chn++)
for (i = 0; i < max_synthdev; i++)
if (synth_open_mask & (1 << i))
if (synth_devs[i])
{
synth_devs[i]->controller(i, chn, 123, 0); /* All notes off */
synth_devs[i]->controller(i, chn, 121, 0); /* Reset all ctl */
synth_devs[i]->bender(i, chn, 1 << 13); /* Bender off */
}
}
else /* seq_mode == SEQ_1 */
{
for (i = 0; i < max_mididev; i++)
if (midi_written[i]) /*
* Midi used. Some notes may still be playing
*/
{
/*
* Sending just a ACTIVE SENSING message should be enough to stop all
* playing notes. Since there are devices not recognizing the
* active sensing, we have to send some all notes off messages also.
*/
midi_outc(i, 0xfe);
for (chn = 0; chn < 16; chn++)
{
midi_outc(i, (unsigned char) (0xb0 + (chn & 0x0f))); /* control change */
midi_outc(i, 0x7b); /* All notes off */
midi_outc(i, 0); /* Dummy parameter */
}
midi_devs[i]->close(i);
midi_written[i] = 0;
midi_opened[i] = 0;
}
}
seq_playing = 0;
spin_lock_irqsave(&lock,flags);
if (waitqueue_active(&seq_sleeper)) {
/* printk( "Sequencer Warning: Unexpected sleeping process - Waking up\n"); */
wake_up(&seq_sleeper);
}
spin_unlock_irqrestore(&lock,flags);
}
static void seq_panic(void)
{
/*
* This routine is called by the application in case the user
* wants to reset the system to the default state.
*/
seq_reset();
/*
* Since some of the devices don't recognize the active sensing and
* all notes off messages, we have to shut all notes manually.
*
* TO BE IMPLEMENTED LATER
*/
/*
* Also return the controllers to their default states
*/
}
int sequencer_ioctl(int dev, struct file *file, unsigned int cmd, void __user *arg)
{
int midi_dev, orig_dev, val, err;
int mode = translate_mode(file);
struct synth_info inf;
struct seq_event_rec event_rec;
unsigned long flags;
int __user *p = arg;
orig_dev = dev = dev >> 4;
switch (cmd)
{
case SNDCTL_TMR_TIMEBASE:
case SNDCTL_TMR_TEMPO:
case SNDCTL_TMR_START:
case SNDCTL_TMR_STOP:
case SNDCTL_TMR_CONTINUE:
case SNDCTL_TMR_METRONOME:
case SNDCTL_TMR_SOURCE:
if (seq_mode != SEQ_2)
return -EINVAL;
return tmr->ioctl(tmr_no, cmd, arg);
case SNDCTL_TMR_SELECT:
if (seq_mode != SEQ_2)
return -EINVAL;
if (get_user(pending_timer, p))
return -EFAULT;
if (pending_timer < 0 || pending_timer >= num_sound_timers || sound_timer_devs[pending_timer] == NULL)
{
pending_timer = -1;
return -EINVAL;
}
val = pending_timer;
break;
case SNDCTL_SEQ_PANIC:
seq_panic();
return -EINVAL;
case SNDCTL_SEQ_SYNC:
if (mode == OPEN_READ)
return 0;
while (qlen > 0 && !signal_pending(current))
seq_sync();
return qlen ? -EINTR : 0;
case SNDCTL_SEQ_RESET:
seq_reset();
return 0;
case SNDCTL_SEQ_TESTMIDI:
if (__get_user(midi_dev, p))
return -EFAULT;
if (midi_dev < 0 || midi_dev >= max_mididev || !midi_devs[midi_dev])
return -ENXIO;
if (!midi_opened[midi_dev] &&
(err = midi_devs[midi_dev]->open(midi_dev, mode, sequencer_midi_input,
sequencer_midi_output)) < 0)
return err;
midi_opened[midi_dev] = 1;
return 0;
case SNDCTL_SEQ_GETINCOUNT:
if (mode == OPEN_WRITE)
return 0;
val = iqlen;
break;
case SNDCTL_SEQ_GETOUTCOUNT:
if (mode == OPEN_READ)
return 0;
val = SEQ_MAX_QUEUE - qlen;
break;
case SNDCTL_SEQ_GETTIME:
if (seq_mode == SEQ_2)
return tmr->ioctl(tmr_no, cmd, arg);
val = jiffies - seq_time;
break;
case SNDCTL_SEQ_CTRLRATE:
/*
* If *arg == 0, just return the current rate
*/
if (seq_mode == SEQ_2)
return tmr->ioctl(tmr_no, cmd, arg);
if (get_user(val, p))
return -EFAULT;
if (val != 0)
return -EINVAL;
val = HZ;
break;
case SNDCTL_SEQ_RESETSAMPLES:
case SNDCTL_SYNTH_REMOVESAMPLE:
case SNDCTL_SYNTH_CONTROL:
if (get_user(dev, p))
return -EFAULT;
if (dev < 0 || dev >= num_synths || synth_devs[dev] == NULL)
return -ENXIO;
if (!(synth_open_mask & (1 << dev)) && !orig_dev)
return -EBUSY;
return synth_devs[dev]->ioctl(dev, cmd, arg);
case SNDCTL_SEQ_NRSYNTHS:
val = max_synthdev;
break;
case SNDCTL_SEQ_NRMIDIS:
val = max_mididev;
break;
case SNDCTL_SYNTH_MEMAVL:
if (get_user(dev, p))
return -EFAULT;
if (dev < 0 || dev >= num_synths || synth_devs[dev] == NULL)
return -ENXIO;
if (!(synth_open_mask & (1 << dev)) && !orig_dev)
return -EBUSY;
val = synth_devs[dev]->ioctl(dev, cmd, arg);
break;
case SNDCTL_FM_4OP_ENABLE:
if (get_user(dev, p))
return -EFAULT;
if (dev < 0 || dev >= num_synths || synth_devs[dev] == NULL)
return -ENXIO;
if (!(synth_open_mask & (1 << dev)))
return -ENXIO;
synth_devs[dev]->ioctl(dev, cmd, arg);
return 0;
case SNDCTL_SYNTH_INFO:
if (get_user(dev, &((struct synth_info __user *)arg)->device))
return -EFAULT;
if (dev < 0 || dev >= max_synthdev)
return -ENXIO;
if (!(synth_open_mask & (1 << dev)) && !orig_dev)
return -EBUSY;
return synth_devs[dev]->ioctl(dev, cmd, arg);
/* Like SYNTH_INFO but returns ID in the name field */
case SNDCTL_SYNTH_ID:
if (get_user(dev, &((struct synth_info __user *)arg)->device))
return -EFAULT;
if (dev < 0 || dev >= max_synthdev)
return -ENXIO;
if (!(synth_open_mask & (1 << dev)) && !orig_dev)
return -EBUSY;
memcpy(&inf, synth_devs[dev]->info, sizeof(inf));
strlcpy(inf.name, synth_devs[dev]->id, sizeof(inf.name));
inf.device = dev;
return copy_to_user(arg, &inf, sizeof(inf))?-EFAULT:0;
case SNDCTL_SEQ_OUTOFBAND:
if (copy_from_user(&event_rec, arg, sizeof(event_rec)))
return -EFAULT;
spin_lock_irqsave(&lock,flags);
play_event(event_rec.arr);
spin_unlock_irqrestore(&lock,flags);
return 0;
case SNDCTL_MIDI_INFO:
if (get_user(dev, &((struct midi_info __user *)arg)->device))
return -EFAULT;
if (dev < 0 || dev >= max_mididev || !midi_devs[dev])
return -ENXIO;
midi_devs[dev]->info.device = dev;
return copy_to_user(arg, &midi_devs[dev]->info, sizeof(struct midi_info))?-EFAULT:0;
case SNDCTL_SEQ_THRESHOLD:
if (get_user(val, p))
return -EFAULT;
if (val < 1)
val = 1;
if (val >= SEQ_MAX_QUEUE)
val = SEQ_MAX_QUEUE - 1;
output_threshold = val;
return 0;
case SNDCTL_MIDI_PRETIME:
if (get_user(val, p))
return -EFAULT;
if (val < 0)
val = 0;
val = (HZ * val) / 10;
pre_event_timeout = val;
break;
default:
if (mode == OPEN_READ)
return -EIO;
if (!synth_devs[0])
return -ENXIO;
if (!(synth_open_mask & (1 << 0)))
return -ENXIO;
if (!synth_devs[0]->ioctl)
return -EINVAL;
return synth_devs[0]->ioctl(0, cmd, arg);
}
return put_user(val, p);
}
/* No kernel lock - we're using the global irq lock here */
unsigned int sequencer_poll(int dev, struct file *file, poll_table * wait)
{
unsigned long flags;
unsigned int mask = 0;
dev = dev >> 4;
spin_lock_irqsave(&lock,flags);
/* input */
poll_wait(file, &midi_sleeper, wait);
if (iqlen)
mask |= POLLIN | POLLRDNORM;
/* output */
poll_wait(file, &seq_sleeper, wait);
if ((SEQ_MAX_QUEUE - qlen) >= output_threshold)
mask |= POLLOUT | POLLWRNORM;
spin_unlock_irqrestore(&lock,flags);
return mask;
}
void sequencer_timer(unsigned long dummy)
{
seq_startplay();
}
EXPORT_SYMBOL(sequencer_timer);
int note_to_freq(int note_num)
{
/*
* This routine converts a midi note to a frequency (multiplied by 1000)
*/
int note, octave, note_freq;
static int notes[] =
{
261632, 277189, 293671, 311132, 329632, 349232,
369998, 391998, 415306, 440000, 466162, 493880
};
#define BASE_OCTAVE 5
octave = note_num / 12;
note = note_num % 12;
note_freq = notes[note];
if (octave < BASE_OCTAVE)
note_freq >>= (BASE_OCTAVE - octave);
else if (octave > BASE_OCTAVE)
note_freq <<= (octave - BASE_OCTAVE);
/*
* note_freq >>= 1;
*/
return note_freq;
}
EXPORT_SYMBOL(note_to_freq);
unsigned long compute_finetune(unsigned long base_freq, int bend, int range,
int vibrato_cents)
{
unsigned long amount;
int negative, semitones, cents, multiplier = 1;
if (!bend)
return base_freq;
if (!range)
return base_freq;
if (!base_freq)
return base_freq;
if (range >= 8192)
range = 8192;
bend = bend * range / 8192; /* Convert to cents */
bend += vibrato_cents;
if (!bend)
return base_freq;
negative = bend < 0 ? 1 : 0;
if (bend < 0)
bend *= -1;
if (bend > range)
bend = range;
/*
if (bend > 2399)
bend = 2399;
*/
while (bend > 2399)
{
multiplier *= 4;
bend -= 2400;
}
semitones = bend / 100;
cents = bend % 100;
amount = (int) (semitone_tuning[semitones] * multiplier * cent_tuning[cents]) / 10000;
if (negative)
return (base_freq * 10000) / amount; /* Bend down */
else
return (base_freq * amount) / 10000; /* Bend up */
}
EXPORT_SYMBOL(compute_finetune);
void sequencer_init(void)
{
if (sequencer_ok)
return;
queue = vmalloc(SEQ_MAX_QUEUE * EV_SZ);
if (queue == NULL)
{
printk(KERN_ERR "sequencer: Can't allocate memory for sequencer output queue\n");
return;
}
iqueue = vmalloc(SEQ_MAX_QUEUE * IEV_SZ);
if (iqueue == NULL)
{
printk(KERN_ERR "sequencer: Can't allocate memory for sequencer input queue\n");
vfree(queue);
return;
}
sequencer_ok = 1;
}
EXPORT_SYMBOL(sequencer_init);
void sequencer_unload(void)
{
vfree(queue);
vfree(iqueue);
queue = iqueue = NULL;
}
| gpl-2.0 |
DerRomtester/android_kernel_oneplus_bacon-3.10 | drivers/uwb/beacon.c | 11691 | 16601 | /*
* Ultra Wide Band
* Beacon management
*
* Copyright (C) 2005-2006 Intel Corporation
* Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*
*
* FIXME: docs
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/kdev_t.h>
#include <linux/slab.h>
#include "uwb-internal.h"
/* Start Beaconing command structure */
struct uwb_rc_cmd_start_beacon {
struct uwb_rccb rccb;
__le16 wBPSTOffset;
u8 bChannelNumber;
} __attribute__((packed));
static int uwb_rc_start_beacon(struct uwb_rc *rc, u16 bpst_offset, u8 channel)
{
int result;
struct uwb_rc_cmd_start_beacon *cmd;
struct uwb_rc_evt_confirm reply;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (cmd == NULL)
return -ENOMEM;
cmd->rccb.bCommandType = UWB_RC_CET_GENERAL;
cmd->rccb.wCommand = cpu_to_le16(UWB_RC_CMD_START_BEACON);
cmd->wBPSTOffset = cpu_to_le16(bpst_offset);
cmd->bChannelNumber = channel;
reply.rceb.bEventType = UWB_RC_CET_GENERAL;
reply.rceb.wEvent = UWB_RC_CMD_START_BEACON;
result = uwb_rc_cmd(rc, "START-BEACON", &cmd->rccb, sizeof(*cmd),
&reply.rceb, sizeof(reply));
if (result < 0)
goto error_cmd;
if (reply.bResultCode != UWB_RC_RES_SUCCESS) {
dev_err(&rc->uwb_dev.dev,
"START-BEACON: command execution failed: %s (%d)\n",
uwb_rc_strerror(reply.bResultCode), reply.bResultCode);
result = -EIO;
}
error_cmd:
kfree(cmd);
return result;
}
static int uwb_rc_stop_beacon(struct uwb_rc *rc)
{
int result;
struct uwb_rccb *cmd;
struct uwb_rc_evt_confirm reply;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (cmd == NULL)
return -ENOMEM;
cmd->bCommandType = UWB_RC_CET_GENERAL;
cmd->wCommand = cpu_to_le16(UWB_RC_CMD_STOP_BEACON);
reply.rceb.bEventType = UWB_RC_CET_GENERAL;
reply.rceb.wEvent = UWB_RC_CMD_STOP_BEACON;
result = uwb_rc_cmd(rc, "STOP-BEACON", cmd, sizeof(*cmd),
&reply.rceb, sizeof(reply));
if (result < 0)
goto error_cmd;
if (reply.bResultCode != UWB_RC_RES_SUCCESS) {
dev_err(&rc->uwb_dev.dev,
"STOP-BEACON: command execution failed: %s (%d)\n",
uwb_rc_strerror(reply.bResultCode), reply.bResultCode);
result = -EIO;
}
error_cmd:
kfree(cmd);
return result;
}
/*
* Start/stop beacons
*
* @rc: UWB Radio Controller to operate on
* @channel: UWB channel on which to beacon (WUSB[table
* 5-12]). If -1, stop beaconing.
* @bpst_offset: Beacon Period Start Time offset; FIXME-do zero
*
* According to WHCI 0.95 [4.13.6] the driver will only receive the RCEB
* of a SET IE command after the device sent the first beacon that includes
* the IEs specified in the SET IE command. So, after we start beaconing we
* check if there is anything in the IE cache and call the SET IE command
* if needed.
*/
int uwb_rc_beacon(struct uwb_rc *rc, int channel, unsigned bpst_offset)
{
int result;
struct device *dev = &rc->uwb_dev.dev;
if (channel < 0)
channel = -1;
if (channel == -1)
result = uwb_rc_stop_beacon(rc);
else {
/* channel >= 0...dah */
result = uwb_rc_start_beacon(rc, bpst_offset, channel);
if (result < 0)
return result;
if (le16_to_cpu(rc->ies->wIELength) > 0) {
result = uwb_rc_set_ie(rc, rc->ies);
if (result < 0) {
dev_err(dev, "Cannot set new IE on device: "
"%d\n", result);
result = uwb_rc_stop_beacon(rc);
channel = -1;
bpst_offset = 0;
}
}
}
if (result >= 0)
rc->beaconing = channel;
return result;
}
/*
* Beacon cache
*
* The purpose of this is to speed up the lookup of becon information
* when a new beacon arrives. The UWB Daemon uses it also to keep a
* tab of which devices are in radio distance and which not. When a
* device's beacon stays present for more than a certain amount of
* time, it is considered a new, usable device. When a beacon ceases
* to be received for a certain amount of time, it is considered that
* the device is gone.
*
* FIXME: use an allocator for the entries
* FIXME: use something faster for search than a list
*/
void uwb_bce_kfree(struct kref *_bce)
{
struct uwb_beca_e *bce = container_of(_bce, struct uwb_beca_e, refcnt);
kfree(bce->be);
kfree(bce);
}
/* Find a beacon by dev addr in the cache */
static
struct uwb_beca_e *__uwb_beca_find_bydev(struct uwb_rc *rc,
const struct uwb_dev_addr *dev_addr)
{
struct uwb_beca_e *bce, *next;
list_for_each_entry_safe(bce, next, &rc->uwb_beca.list, node) {
if (!memcmp(&bce->dev_addr, dev_addr, sizeof(bce->dev_addr)))
goto out;
}
bce = NULL;
out:
return bce;
}
/* Find a beacon by dev addr in the cache */
static
struct uwb_beca_e *__uwb_beca_find_bymac(struct uwb_rc *rc,
const struct uwb_mac_addr *mac_addr)
{
struct uwb_beca_e *bce, *next;
list_for_each_entry_safe(bce, next, &rc->uwb_beca.list, node) {
if (!memcmp(bce->mac_addr, mac_addr->data,
sizeof(struct uwb_mac_addr)))
goto out;
}
bce = NULL;
out:
return bce;
}
/**
* uwb_dev_get_by_devaddr - get a UWB device with a specific DevAddr
* @rc: the radio controller that saw the device
* @devaddr: DevAddr of the UWB device to find
*
* There may be more than one matching device (in the case of a
* DevAddr conflict), but only the first one is returned.
*/
struct uwb_dev *uwb_dev_get_by_devaddr(struct uwb_rc *rc,
const struct uwb_dev_addr *devaddr)
{
struct uwb_dev *found = NULL;
struct uwb_beca_e *bce;
mutex_lock(&rc->uwb_beca.mutex);
bce = __uwb_beca_find_bydev(rc, devaddr);
if (bce)
found = uwb_dev_try_get(rc, bce->uwb_dev);
mutex_unlock(&rc->uwb_beca.mutex);
return found;
}
/**
* uwb_dev_get_by_macaddr - get a UWB device with a specific EUI-48
* @rc: the radio controller that saw the device
* @devaddr: EUI-48 of the UWB device to find
*/
struct uwb_dev *uwb_dev_get_by_macaddr(struct uwb_rc *rc,
const struct uwb_mac_addr *macaddr)
{
struct uwb_dev *found = NULL;
struct uwb_beca_e *bce;
mutex_lock(&rc->uwb_beca.mutex);
bce = __uwb_beca_find_bymac(rc, macaddr);
if (bce)
found = uwb_dev_try_get(rc, bce->uwb_dev);
mutex_unlock(&rc->uwb_beca.mutex);
return found;
}
/* Initialize a beacon cache entry */
static void uwb_beca_e_init(struct uwb_beca_e *bce)
{
mutex_init(&bce->mutex);
kref_init(&bce->refcnt);
stats_init(&bce->lqe_stats);
stats_init(&bce->rssi_stats);
}
/*
* Add a beacon to the cache
*
* @be: Beacon event information
* @bf: Beacon frame (part of b, really)
* @ts_jiffies: Timestamp (in jiffies) when the beacon was received
*/
static
struct uwb_beca_e *__uwb_beca_add(struct uwb_rc *rc,
struct uwb_rc_evt_beacon *be,
struct uwb_beacon_frame *bf,
unsigned long ts_jiffies)
{
struct uwb_beca_e *bce;
bce = kzalloc(sizeof(*bce), GFP_KERNEL);
if (bce == NULL)
return NULL;
uwb_beca_e_init(bce);
bce->ts_jiffies = ts_jiffies;
bce->uwb_dev = NULL;
list_add(&bce->node, &rc->uwb_beca.list);
return bce;
}
/*
* Wipe out beacon entries that became stale
*
* Remove associated devicest too.
*/
void uwb_beca_purge(struct uwb_rc *rc)
{
struct uwb_beca_e *bce, *next;
unsigned long expires;
mutex_lock(&rc->uwb_beca.mutex);
list_for_each_entry_safe(bce, next, &rc->uwb_beca.list, node) {
expires = bce->ts_jiffies + msecs_to_jiffies(beacon_timeout_ms);
if (time_after(jiffies, expires)) {
uwbd_dev_offair(bce);
}
}
mutex_unlock(&rc->uwb_beca.mutex);
}
/* Clean up the whole beacon cache. Called on shutdown */
void uwb_beca_release(struct uwb_rc *rc)
{
struct uwb_beca_e *bce, *next;
mutex_lock(&rc->uwb_beca.mutex);
list_for_each_entry_safe(bce, next, &rc->uwb_beca.list, node) {
list_del(&bce->node);
uwb_bce_put(bce);
}
mutex_unlock(&rc->uwb_beca.mutex);
}
static void uwb_beacon_print(struct uwb_rc *rc, struct uwb_rc_evt_beacon *be,
struct uwb_beacon_frame *bf)
{
char macbuf[UWB_ADDR_STRSIZE];
char devbuf[UWB_ADDR_STRSIZE];
char dstbuf[UWB_ADDR_STRSIZE];
uwb_mac_addr_print(macbuf, sizeof(macbuf), &bf->Device_Identifier);
uwb_dev_addr_print(devbuf, sizeof(devbuf), &bf->hdr.SrcAddr);
uwb_dev_addr_print(dstbuf, sizeof(dstbuf), &bf->hdr.DestAddr);
dev_info(&rc->uwb_dev.dev,
"BEACON from %s to %s (ch%u offset %u slot %u MAC %s)\n",
devbuf, dstbuf, be->bChannelNumber, be->wBPSTOffset,
bf->Beacon_Slot_Number, macbuf);
}
/*
* @bce: beacon cache entry, referenced
*/
ssize_t uwb_bce_print_IEs(struct uwb_dev *uwb_dev, struct uwb_beca_e *bce,
char *buf, size_t size)
{
ssize_t result = 0;
struct uwb_rc_evt_beacon *be;
struct uwb_beacon_frame *bf;
int ies_len;
struct uwb_ie_hdr *ies;
mutex_lock(&bce->mutex);
be = bce->be;
if (be) {
bf = (struct uwb_beacon_frame *)bce->be->BeaconInfo;
ies_len = be->wBeaconInfoLength - sizeof(struct uwb_beacon_frame);
ies = (struct uwb_ie_hdr *)bf->IEData;
result = uwb_ie_dump_hex(ies, ies_len, buf, size);
}
mutex_unlock(&bce->mutex);
return result;
}
/*
* Verify that the beacon event, frame and IEs are ok
*/
static int uwb_verify_beacon(struct uwb_rc *rc, struct uwb_event *evt,
struct uwb_rc_evt_beacon *be)
{
int result = -EINVAL;
struct uwb_beacon_frame *bf;
struct device *dev = &rc->uwb_dev.dev;
/* Is there enough data to decode a beacon frame? */
if (evt->notif.size < sizeof(*be) + sizeof(*bf)) {
dev_err(dev, "BEACON event: Not enough data to decode "
"(%zu vs %zu bytes needed)\n", evt->notif.size,
sizeof(*be) + sizeof(*bf));
goto error;
}
/* FIXME: make sure beacon frame IEs are fine and that the whole thing
* is consistent */
result = 0;
error:
return result;
}
/*
* Handle UWB_RC_EVT_BEACON events
*
* We check the beacon cache to see how the received beacon fares. If
* is there already we refresh the timestamp. If not we create a new
* entry.
*
* According to the WHCI and WUSB specs, only one beacon frame is
* allowed per notification block, so we don't bother about scanning
* for more.
*/
int uwbd_evt_handle_rc_beacon(struct uwb_event *evt)
{
int result = -EINVAL;
struct uwb_rc *rc;
struct uwb_rc_evt_beacon *be;
struct uwb_beacon_frame *bf;
struct uwb_beca_e *bce;
unsigned long last_ts;
rc = evt->rc;
be = container_of(evt->notif.rceb, struct uwb_rc_evt_beacon, rceb);
result = uwb_verify_beacon(rc, evt, be);
if (result < 0)
return result;
/* FIXME: handle alien beacons. */
if (be->bBeaconType == UWB_RC_BEACON_TYPE_OL_ALIEN ||
be->bBeaconType == UWB_RC_BEACON_TYPE_NOL_ALIEN) {
return -ENOSYS;
}
bf = (struct uwb_beacon_frame *) be->BeaconInfo;
/*
* Drop beacons from devices with a NULL EUI-48 -- they cannot
* be uniquely identified.
*
* It's expected that these will all be WUSB devices and they
* have a WUSB specific connection method so ignoring them
* here shouldn't be a problem.
*/
if (uwb_mac_addr_bcast(&bf->Device_Identifier))
return 0;
mutex_lock(&rc->uwb_beca.mutex);
bce = __uwb_beca_find_bymac(rc, &bf->Device_Identifier);
if (bce == NULL) {
/* Not in there, a new device is pinging */
uwb_beacon_print(evt->rc, be, bf);
bce = __uwb_beca_add(rc, be, bf, evt->ts_jiffies);
if (bce == NULL) {
mutex_unlock(&rc->uwb_beca.mutex);
return -ENOMEM;
}
}
mutex_unlock(&rc->uwb_beca.mutex);
mutex_lock(&bce->mutex);
/* purge old beacon data */
kfree(bce->be);
last_ts = bce->ts_jiffies;
/* Update commonly used fields */
bce->ts_jiffies = evt->ts_jiffies;
bce->be = be;
bce->dev_addr = bf->hdr.SrcAddr;
bce->mac_addr = &bf->Device_Identifier;
be->wBPSTOffset = le16_to_cpu(be->wBPSTOffset);
be->wBeaconInfoLength = le16_to_cpu(be->wBeaconInfoLength);
stats_add_sample(&bce->lqe_stats, be->bLQI - 7);
stats_add_sample(&bce->rssi_stats, be->bRSSI + 18);
/*
* This might be a beacon from a new device.
*/
if (bce->uwb_dev == NULL)
uwbd_dev_onair(evt->rc, bce);
mutex_unlock(&bce->mutex);
return 1; /* we keep the event data */
}
/*
* Handle UWB_RC_EVT_BEACON_SIZE events
*
* XXXXX
*/
int uwbd_evt_handle_rc_beacon_size(struct uwb_event *evt)
{
int result = -EINVAL;
struct device *dev = &evt->rc->uwb_dev.dev;
struct uwb_rc_evt_beacon_size *bs;
/* Is there enough data to decode the event? */
if (evt->notif.size < sizeof(*bs)) {
dev_err(dev, "BEACON SIZE notification: Not enough data to "
"decode (%zu vs %zu bytes needed)\n",
evt->notif.size, sizeof(*bs));
goto error;
}
bs = container_of(evt->notif.rceb, struct uwb_rc_evt_beacon_size, rceb);
if (0)
dev_info(dev, "Beacon size changed to %u bytes "
"(FIXME: action?)\n", le16_to_cpu(bs->wNewBeaconSize));
else {
/* temporary hack until we do something with this message... */
static unsigned count;
if (++count % 1000 == 0)
dev_info(dev, "Beacon size changed %u times "
"(FIXME: action?)\n", count);
}
result = 0;
error:
return result;
}
/**
* uwbd_evt_handle_rc_bp_slot_change - handle a BP_SLOT_CHANGE event
* @evt: the BP_SLOT_CHANGE notification from the radio controller
*
* If the event indicates that no beacon period slots were available
* then radio controller has transitioned to a non-beaconing state.
* Otherwise, simply save the current beacon slot.
*/
int uwbd_evt_handle_rc_bp_slot_change(struct uwb_event *evt)
{
struct uwb_rc *rc = evt->rc;
struct device *dev = &rc->uwb_dev.dev;
struct uwb_rc_evt_bp_slot_change *bpsc;
if (evt->notif.size < sizeof(*bpsc)) {
dev_err(dev, "BP SLOT CHANGE event: Not enough data\n");
return -EINVAL;
}
bpsc = container_of(evt->notif.rceb, struct uwb_rc_evt_bp_slot_change, rceb);
mutex_lock(&rc->uwb_dev.mutex);
if (uwb_rc_evt_bp_slot_change_no_slot(bpsc)) {
dev_info(dev, "stopped beaconing: No free slots in BP\n");
rc->beaconing = -1;
} else
rc->uwb_dev.beacon_slot = uwb_rc_evt_bp_slot_change_slot_num(bpsc);
mutex_unlock(&rc->uwb_dev.mutex);
return 0;
}
/**
* Handle UWB_RC_EVT_BPOIE_CHANGE events
*
* XXXXX
*/
struct uwb_ie_bpo {
struct uwb_ie_hdr hdr;
u8 bp_length;
u8 data[];
} __attribute__((packed));
int uwbd_evt_handle_rc_bpoie_change(struct uwb_event *evt)
{
int result = -EINVAL;
struct device *dev = &evt->rc->uwb_dev.dev;
struct uwb_rc_evt_bpoie_change *bpoiec;
struct uwb_ie_bpo *bpoie;
static unsigned count; /* FIXME: this is a temp hack */
size_t iesize;
/* Is there enough data to decode it? */
if (evt->notif.size < sizeof(*bpoiec)) {
dev_err(dev, "BPOIEC notification: Not enough data to "
"decode (%zu vs %zu bytes needed)\n",
evt->notif.size, sizeof(*bpoiec));
goto error;
}
bpoiec = container_of(evt->notif.rceb, struct uwb_rc_evt_bpoie_change, rceb);
iesize = le16_to_cpu(bpoiec->wBPOIELength);
if (iesize < sizeof(*bpoie)) {
dev_err(dev, "BPOIEC notification: Not enough IE data to "
"decode (%zu vs %zu bytes needed)\n",
iesize, sizeof(*bpoie));
goto error;
}
if (++count % 1000 == 0) /* Lame placeholder */
dev_info(dev, "BPOIE: %u changes received\n", count);
/*
* FIXME: At this point we should go over all the IEs in the
* bpoiec->BPOIE array and act on each.
*/
result = 0;
error:
return result;
}
/*
* Print beaconing state.
*/
static ssize_t uwb_rc_beacon_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct uwb_dev *uwb_dev = to_uwb_dev(dev);
struct uwb_rc *rc = uwb_dev->rc;
ssize_t result;
mutex_lock(&rc->uwb_dev.mutex);
result = sprintf(buf, "%d\n", rc->beaconing);
mutex_unlock(&rc->uwb_dev.mutex);
return result;
}
/*
* Start beaconing on the specified channel, or stop beaconing.
*/
static ssize_t uwb_rc_beacon_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
struct uwb_dev *uwb_dev = to_uwb_dev(dev);
struct uwb_rc *rc = uwb_dev->rc;
int channel;
ssize_t result = -EINVAL;
result = sscanf(buf, "%d", &channel);
if (result >= 1)
result = uwb_radio_force_channel(rc, channel);
return result < 0 ? result : size;
}
DEVICE_ATTR(beacon, S_IRUGO | S_IWUSR, uwb_rc_beacon_show, uwb_rc_beacon_store);
| gpl-2.0 |
Snuzzo/vigor_aosp_kernel | fs/ext4/inode.c | 172 | 177459 | /*
* linux/fs/ext4/inode.c
*
* Copyright (C) 1992, 1993, 1994, 1995
* Remy Card (card@masi.ibp.fr)
* Laboratoire MASI - Institut Blaise Pascal
* Universite Pierre et Marie Curie (Paris VI)
*
* from
*
* linux/fs/minix/inode.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
*
* Goal-directed block allocation by Stephen Tweedie
* (sct@redhat.com), 1993, 1998
* Big-endian to little-endian byte-swapping/bitmaps by
* David S. Miller (davem@caip.rutgers.edu), 1995
* 64-bit file support on 64-bit platforms by Jakub Jelinek
* (jj@sunsite.ms.mff.cuni.cz)
*
* Assorted race fixes, rewrite of ext4_get_block() by Al Viro, 2000
*/
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/time.h>
#include <linux/jbd2.h>
#include <linux/highuid.h>
#include <linux/pagemap.h>
#include <linux/quotaops.h>
#include <linux/string.h>
#include <linux/buffer_head.h>
#include <linux/writeback.h>
#include <linux/pagevec.h>
#include <linux/mpage.h>
#include <linux/namei.h>
#include <linux/uio.h>
#include <linux/bio.h>
#include <linux/workqueue.h>
#include <linux/kernel.h>
#include <linux/printk.h>
#include <linux/slab.h>
#include <linux/ratelimit.h>
#include "ext4_jbd2.h"
#include "xattr.h"
#include "acl.h"
#include "ext4_extents.h"
#include <trace/events/ext4.h>
#define MPAGE_DA_EXTENT_TAIL 0x01
static inline int ext4_begin_ordered_truncate(struct inode *inode,
loff_t new_size)
{
trace_ext4_begin_ordered_truncate(inode, new_size);
/*
* If jinode is zero, then we never opened the file for
* writing, so there's no need to call
* jbd2_journal_begin_ordered_truncate() since there's no
* outstanding writes we need to flush.
*/
if (!EXT4_I(inode)->jinode)
return 0;
return jbd2_journal_begin_ordered_truncate(EXT4_JOURNAL(inode),
EXT4_I(inode)->jinode,
new_size);
}
static void ext4_invalidatepage(struct page *page, unsigned long offset);
static int noalloc_get_block_write(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create);
static int ext4_set_bh_endio(struct buffer_head *bh, struct inode *inode);
static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate);
static int __ext4_journalled_writepage(struct page *page, unsigned int len);
static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh);
/*
* Test whether an inode is a fast symlink.
*/
static int ext4_inode_is_fast_symlink(struct inode *inode)
{
int ea_blocks = EXT4_I(inode)->i_file_acl ?
(inode->i_sb->s_blocksize >> 9) : 0;
return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0);
}
/*
* Work out how many blocks we need to proceed with the next chunk of a
* truncate transaction.
*/
static unsigned long blocks_for_truncate(struct inode *inode)
{
ext4_lblk_t needed;
needed = inode->i_blocks >> (inode->i_sb->s_blocksize_bits - 9);
/* Give ourselves just enough room to cope with inodes in which
* i_blocks is corrupt: we've seen disk corruptions in the past
* which resulted in random data in an inode which looked enough
* like a regular file for ext4 to try to delete it. Things
* will go a bit crazy if that happens, but at least we should
* try not to panic the whole kernel. */
if (needed < 2)
needed = 2;
/* But we need to bound the transaction so we don't overflow the
* journal. */
if (needed > EXT4_MAX_TRANS_DATA)
needed = EXT4_MAX_TRANS_DATA;
return EXT4_DATA_TRANS_BLOCKS(inode->i_sb) + needed;
}
/*
* Truncate transactions can be complex and absolutely huge. So we need to
* be able to restart the transaction at a conventient checkpoint to make
* sure we don't overflow the journal.
*
* start_transaction gets us a new handle for a truncate transaction,
* and extend_transaction tries to extend the existing one a bit. If
* extend fails, we need to propagate the failure up and restart the
* transaction in the top-level truncate loop. --sct
*/
static handle_t *start_transaction(struct inode *inode)
{
handle_t *result;
result = ext4_journal_start(inode, blocks_for_truncate(inode));
if (!IS_ERR(result))
return result;
ext4_std_error(inode->i_sb, PTR_ERR(result));
return result;
}
/*
* Try to extend this transaction for the purposes of truncation.
*
* Returns 0 if we managed to create more room. If we can't create more
* room, and the transaction must be restarted we return 1.
*/
static int try_to_extend_transaction(handle_t *handle, struct inode *inode)
{
if (!ext4_handle_valid(handle))
return 0;
if (ext4_handle_has_enough_credits(handle, EXT4_RESERVE_TRANS_BLOCKS+1))
return 0;
if (!ext4_journal_extend(handle, blocks_for_truncate(inode)))
return 0;
return 1;
}
/*
* Restart the transaction associated with *handle. This does a commit,
* so before we call here everything must be consistently dirtied against
* this transaction.
*/
int ext4_truncate_restart_trans(handle_t *handle, struct inode *inode,
int nblocks)
{
int ret;
/*
* Drop i_data_sem to avoid deadlock with ext4_map_blocks. At this
* moment, get_block can be called only for blocks inside i_size since
* page cache has been already dropped and writes are blocked by
* i_mutex. So we can safely drop the i_data_sem here.
*/
BUG_ON(EXT4_JOURNAL(inode) == NULL);
jbd_debug(2, "restarting handle %p\n", handle);
up_write(&EXT4_I(inode)->i_data_sem);
ret = ext4_journal_restart(handle, nblocks);
down_write(&EXT4_I(inode)->i_data_sem);
ext4_discard_preallocations(inode);
return ret;
}
/*
* Called at the last iput() if i_nlink is zero.
*/
void ext4_evict_inode(struct inode *inode)
{
handle_t *handle;
int err;
trace_ext4_evict_inode(inode);
ext4_ioend_wait(inode);
if (inode->i_nlink) {
truncate_inode_pages(&inode->i_data, 0);
goto no_delete;
}
if (!is_bad_inode(inode))
dquot_initialize(inode);
if (ext4_should_order_data(inode))
ext4_begin_ordered_truncate(inode, 0);
truncate_inode_pages(&inode->i_data, 0);
if (is_bad_inode(inode))
goto no_delete;
handle = ext4_journal_start(inode, blocks_for_truncate(inode)+3);
if (IS_ERR(handle)) {
ext4_std_error(inode->i_sb, PTR_ERR(handle));
/*
* If we're going to skip the normal cleanup, we still need to
* make sure that the in-core orphan linked list is properly
* cleaned up.
*/
ext4_orphan_del(NULL, inode);
goto no_delete;
}
if (IS_SYNC(inode))
ext4_handle_sync(handle);
inode->i_size = 0;
err = ext4_mark_inode_dirty(handle, inode);
if (err) {
ext4_warning(inode->i_sb,
"couldn't mark inode dirty (err %d)", err);
goto stop_handle;
}
if (inode->i_blocks)
ext4_truncate(inode);
/*
* ext4_ext_truncate() doesn't reserve any slop when it
* restarts journal transactions; therefore there may not be
* enough credits left in the handle to remove the inode from
* the orphan list and set the dtime field.
*/
if (!ext4_handle_has_enough_credits(handle, 3)) {
err = ext4_journal_extend(handle, 3);
if (err > 0)
err = ext4_journal_restart(handle, 3);
if (err != 0) {
ext4_warning(inode->i_sb,
"couldn't extend journal (err %d)", err);
stop_handle:
ext4_journal_stop(handle);
ext4_orphan_del(NULL, inode);
goto no_delete;
}
}
/*
* Kill off the orphan record which ext4_truncate created.
* AKPM: I think this can be inside the above `if'.
* Note that ext4_orphan_del() has to be able to cope with the
* deletion of a non-existent orphan - this is because we don't
* know if ext4_truncate() actually created an orphan record.
* (Well, we could do this if we need to, but heck - it works)
*/
ext4_orphan_del(handle, inode);
EXT4_I(inode)->i_dtime = get_seconds();
/*
* One subtle ordering requirement: if anything has gone wrong
* (transaction abort, IO errors, whatever), then we can still
* do these next steps (the fs will already have been marked as
* having errors), but we can't free the inode if the mark_dirty
* fails.
*/
if (ext4_mark_inode_dirty(handle, inode))
/* If that failed, just do the required in-core inode clear. */
ext4_clear_inode(inode);
else
ext4_free_inode(handle, inode);
ext4_journal_stop(handle);
return;
no_delete:
ext4_clear_inode(inode); /* We must guarantee clearing of inode... */
}
typedef struct {
__le32 *p;
__le32 key;
struct buffer_head *bh;
} Indirect;
static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v)
{
p->key = *(p->p = v);
p->bh = bh;
}
/**
* ext4_block_to_path - parse the block number into array of offsets
* @inode: inode in question (we are only interested in its superblock)
* @i_block: block number to be parsed
* @offsets: array to store the offsets in
* @boundary: set this non-zero if the referred-to block is likely to be
* followed (on disk) by an indirect block.
*
* To store the locations of file's data ext4 uses a data structure common
* for UNIX filesystems - tree of pointers anchored in the inode, with
* data blocks at leaves and indirect blocks in intermediate nodes.
* This function translates the block number into path in that tree -
* return value is the path length and @offsets[n] is the offset of
* pointer to (n+1)th node in the nth one. If @block is out of range
* (negative or too large) warning is printed and zero returned.
*
* Note: function doesn't find node addresses, so no IO is needed. All
* we need to know is the capacity of indirect blocks (taken from the
* inode->i_sb).
*/
/*
* Portability note: the last comparison (check that we fit into triple
* indirect block) is spelled differently, because otherwise on an
* architecture with 32-bit longs and 8Kb pages we might get into trouble
* if our filesystem had 8Kb blocks. We might use long long, but that would
* kill us on x86. Oh, well, at least the sign propagation does not matter -
* i_block would have to be negative in the very beginning, so we would not
* get there at all.
*/
static int ext4_block_to_path(struct inode *inode,
ext4_lblk_t i_block,
ext4_lblk_t offsets[4], int *boundary)
{
int ptrs = EXT4_ADDR_PER_BLOCK(inode->i_sb);
int ptrs_bits = EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb);
const long direct_blocks = EXT4_NDIR_BLOCKS,
indirect_blocks = ptrs,
double_blocks = (1 << (ptrs_bits * 2));
int n = 0;
int final = 0;
if (i_block < direct_blocks) {
offsets[n++] = i_block;
final = direct_blocks;
} else if ((i_block -= direct_blocks) < indirect_blocks) {
offsets[n++] = EXT4_IND_BLOCK;
offsets[n++] = i_block;
final = ptrs;
} else if ((i_block -= indirect_blocks) < double_blocks) {
offsets[n++] = EXT4_DIND_BLOCK;
offsets[n++] = i_block >> ptrs_bits;
offsets[n++] = i_block & (ptrs - 1);
final = ptrs;
} else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
offsets[n++] = EXT4_TIND_BLOCK;
offsets[n++] = i_block >> (ptrs_bits * 2);
offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
offsets[n++] = i_block & (ptrs - 1);
final = ptrs;
} else {
ext4_warning(inode->i_sb, "block %lu > max in inode %lu",
i_block + direct_blocks +
indirect_blocks + double_blocks, inode->i_ino);
}
if (boundary)
*boundary = final - 1 - (i_block & (ptrs - 1));
return n;
}
static int __ext4_check_blockref(const char *function, unsigned int line,
struct inode *inode,
__le32 *p, unsigned int max)
{
struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
__le32 *bref = p;
unsigned int blk;
while (bref < p+max) {
blk = le32_to_cpu(*bref++);
if (blk &&
unlikely(!ext4_data_block_valid(EXT4_SB(inode->i_sb),
blk, 1))) {
es->s_last_error_block = cpu_to_le64(blk);
ext4_error_inode(inode, function, line, blk,
"invalid block");
return -EIO;
}
}
return 0;
}
#define ext4_check_indirect_blockref(inode, bh) \
__ext4_check_blockref(__func__, __LINE__, inode, \
(__le32 *)(bh)->b_data, \
EXT4_ADDR_PER_BLOCK((inode)->i_sb))
#define ext4_check_inode_blockref(inode) \
__ext4_check_blockref(__func__, __LINE__, inode, \
EXT4_I(inode)->i_data, \
EXT4_NDIR_BLOCKS)
/**
* ext4_get_branch - read the chain of indirect blocks leading to data
* @inode: inode in question
* @depth: depth of the chain (1 - direct pointer, etc.)
* @offsets: offsets of pointers in inode/indirect blocks
* @chain: place to store the result
* @err: here we store the error value
*
* Function fills the array of triples <key, p, bh> and returns %NULL
* if everything went OK or the pointer to the last filled triple
* (incomplete one) otherwise. Upon the return chain[i].key contains
* the number of (i+1)-th block in the chain (as it is stored in memory,
* i.e. little-endian 32-bit), chain[i].p contains the address of that
* number (it points into struct inode for i==0 and into the bh->b_data
* for i>0) and chain[i].bh points to the buffer_head of i-th indirect
* block for i>0 and NULL for i==0. In other words, it holds the block
* numbers of the chain, addresses they were taken from (and where we can
* verify that chain did not change) and buffer_heads hosting these
* numbers.
*
* Function stops when it stumbles upon zero pointer (absent block)
* (pointer to last triple returned, *@err == 0)
* or when it gets an IO error reading an indirect block
* (ditto, *@err == -EIO)
* or when it reads all @depth-1 indirect blocks successfully and finds
* the whole chain, all way to the data (returns %NULL, *err == 0).
*
* Need to be called with
* down_read(&EXT4_I(inode)->i_data_sem)
*/
static Indirect *ext4_get_branch(struct inode *inode, int depth,
ext4_lblk_t *offsets,
Indirect chain[4], int *err)
{
struct super_block *sb = inode->i_sb;
Indirect *p = chain;
struct buffer_head *bh;
*err = 0;
/* i_data is not going away, no lock needed */
add_chain(chain, NULL, EXT4_I(inode)->i_data + *offsets);
if (!p->key)
goto no_block;
while (--depth) {
bh = sb_getblk(sb, le32_to_cpu(p->key));
if (unlikely(!bh))
goto failure;
if (!bh_uptodate_or_lock(bh)) {
if (bh_submit_read(bh) < 0) {
put_bh(bh);
goto failure;
}
/* validate block references */
if (ext4_check_indirect_blockref(inode, bh)) {
put_bh(bh);
goto failure;
}
}
add_chain(++p, bh, (__le32 *)bh->b_data + *++offsets);
/* Reader: end */
if (!p->key)
goto no_block;
}
return NULL;
failure:
*err = -EIO;
no_block:
return p;
}
/**
* ext4_find_near - find a place for allocation with sufficient locality
* @inode: owner
* @ind: descriptor of indirect block.
*
* This function returns the preferred place for block allocation.
* It is used when heuristic for sequential allocation fails.
* Rules are:
* + if there is a block to the left of our position - allocate near it.
* + if pointer will live in indirect block - allocate near that block.
* + if pointer will live in inode - allocate in the same
* cylinder group.
*
* In the latter case we colour the starting block by the callers PID to
* prevent it from clashing with concurrent allocations for a different inode
* in the same block group. The PID is used here so that functionally related
* files will be close-by on-disk.
*
* Caller must make sure that @ind is valid and will stay that way.
*/
static ext4_fsblk_t ext4_find_near(struct inode *inode, Indirect *ind)
{
struct ext4_inode_info *ei = EXT4_I(inode);
__le32 *start = ind->bh ? (__le32 *) ind->bh->b_data : ei->i_data;
__le32 *p;
ext4_fsblk_t bg_start;
ext4_fsblk_t last_block;
ext4_grpblk_t colour;
ext4_group_t block_group;
int flex_size = ext4_flex_bg_size(EXT4_SB(inode->i_sb));
/* Try to find previous block */
for (p = ind->p - 1; p >= start; p--) {
if (*p)
return le32_to_cpu(*p);
}
/* No such thing, so let's try location of indirect block */
if (ind->bh)
return ind->bh->b_blocknr;
/*
* It is going to be referred to from the inode itself? OK, just put it
* into the same cylinder group then.
*/
block_group = ei->i_block_group;
if (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) {
block_group &= ~(flex_size-1);
if (S_ISREG(inode->i_mode))
block_group++;
}
bg_start = ext4_group_first_block_no(inode->i_sb, block_group);
last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1;
/*
* If we are doing delayed allocation, we don't need take
* colour into account.
*/
if (test_opt(inode->i_sb, DELALLOC))
return bg_start;
if (bg_start + EXT4_BLOCKS_PER_GROUP(inode->i_sb) <= last_block)
colour = (current->pid % 16) *
(EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16);
else
colour = (current->pid % 16) * ((last_block - bg_start) / 16);
return bg_start + colour;
}
/**
* ext4_find_goal - find a preferred place for allocation.
* @inode: owner
* @block: block we want
* @partial: pointer to the last triple within a chain
*
* Normally this function find the preferred place for block allocation,
* returns it.
* Because this is only used for non-extent files, we limit the block nr
* to 32 bits.
*/
static ext4_fsblk_t ext4_find_goal(struct inode *inode, ext4_lblk_t block,
Indirect *partial)
{
ext4_fsblk_t goal;
/*
* XXX need to get goal block from mballoc's data structures
*/
goal = ext4_find_near(inode, partial);
goal = goal & EXT4_MAX_BLOCK_FILE_PHYS;
return goal;
}
/**
* ext4_blks_to_allocate - Look up the block map and count the number
* of direct blocks need to be allocated for the given branch.
*
* @branch: chain of indirect blocks
* @k: number of blocks need for indirect blocks
* @blks: number of data blocks to be mapped.
* @blocks_to_boundary: the offset in the indirect block
*
* return the total number of blocks to be allocate, including the
* direct and indirect blocks.
*/
static int ext4_blks_to_allocate(Indirect *branch, int k, unsigned int blks,
int blocks_to_boundary)
{
unsigned int count = 0;
/*
* Simple case, [t,d]Indirect block(s) has not allocated yet
* then it's clear blocks on that path have not allocated
*/
if (k > 0) {
/* right now we don't handle cross boundary allocation */
if (blks < blocks_to_boundary + 1)
count += blks;
else
count += blocks_to_boundary + 1;
return count;
}
count++;
while (count < blks && count <= blocks_to_boundary &&
le32_to_cpu(*(branch[0].p + count)) == 0) {
count++;
}
return count;
}
/**
* ext4_alloc_blocks: multiple allocate blocks needed for a branch
* @handle: handle for this transaction
* @inode: inode which needs allocated blocks
* @iblock: the logical block to start allocated at
* @goal: preferred physical block of allocation
* @indirect_blks: the number of blocks need to allocate for indirect
* blocks
* @blks: number of desired blocks
* @new_blocks: on return it will store the new block numbers for
* the indirect blocks(if needed) and the first direct block,
* @err: on return it will store the error code
*
* This function will return the number of blocks allocated as
* requested by the passed-in parameters.
*/
static int ext4_alloc_blocks(handle_t *handle, struct inode *inode,
ext4_lblk_t iblock, ext4_fsblk_t goal,
int indirect_blks, int blks,
ext4_fsblk_t new_blocks[4], int *err)
{
struct ext4_allocation_request ar;
int target, i;
unsigned long count = 0, blk_allocated = 0;
int index = 0;
ext4_fsblk_t current_block = 0;
int ret = 0;
/*
* Here we try to allocate the requested multiple blocks at once,
* on a best-effort basis.
* To build a branch, we should allocate blocks for
* the indirect blocks(if not allocated yet), and at least
* the first direct block of this branch. That's the
* minimum number of blocks need to allocate(required)
*/
/* first we try to allocate the indirect blocks */
target = indirect_blks;
while (target > 0) {
count = target;
/* allocating blocks for indirect blocks and direct blocks */
current_block = ext4_new_meta_blocks(handle, inode, goal,
0, &count, err);
if (*err)
goto failed_out;
if (unlikely(current_block + count > EXT4_MAX_BLOCK_FILE_PHYS)) {
EXT4_ERROR_INODE(inode,
"current_block %llu + count %lu > %d!",
current_block, count,
EXT4_MAX_BLOCK_FILE_PHYS);
*err = -EIO;
goto failed_out;
}
target -= count;
/* allocate blocks for indirect blocks */
while (index < indirect_blks && count) {
new_blocks[index++] = current_block++;
count--;
}
if (count > 0) {
/*
* save the new block number
* for the first direct block
*/
new_blocks[index] = current_block;
printk(KERN_INFO "%s returned more blocks than "
"requested\n", __func__);
WARN_ON(1);
break;
}
}
target = blks - count ;
blk_allocated = count;
if (!target)
goto allocated;
/* Now allocate data blocks */
memset(&ar, 0, sizeof(ar));
ar.inode = inode;
ar.goal = goal;
ar.len = target;
ar.logical = iblock;
if (S_ISREG(inode->i_mode))
/* enable in-core preallocation only for regular files */
ar.flags = EXT4_MB_HINT_DATA;
current_block = ext4_mb_new_blocks(handle, &ar, err);
if (unlikely(current_block + ar.len > EXT4_MAX_BLOCK_FILE_PHYS)) {
EXT4_ERROR_INODE(inode,
"current_block %llu + ar.len %d > %d!",
current_block, ar.len,
EXT4_MAX_BLOCK_FILE_PHYS);
*err = -EIO;
goto failed_out;
}
if (*err && (target == blks)) {
/*
* if the allocation failed and we didn't allocate
* any blocks before
*/
goto failed_out;
}
if (!*err) {
if (target == blks) {
/*
* save the new block number
* for the first direct block
*/
new_blocks[index] = current_block;
}
blk_allocated += ar.len;
}
allocated:
/* total number of blocks allocated for direct blocks */
ret = blk_allocated;
*err = 0;
return ret;
failed_out:
for (i = 0; i < index; i++)
ext4_free_blocks(handle, inode, NULL, new_blocks[i], 1, 0);
return ret;
}
/**
* ext4_alloc_branch - allocate and set up a chain of blocks.
* @handle: handle for this transaction
* @inode: owner
* @indirect_blks: number of allocated indirect blocks
* @blks: number of allocated direct blocks
* @goal: preferred place for allocation
* @offsets: offsets (in the blocks) to store the pointers to next.
* @branch: place to store the chain in.
*
* This function allocates blocks, zeroes out all but the last one,
* links them into chain and (if we are synchronous) writes them to disk.
* In other words, it prepares a branch that can be spliced onto the
* inode. It stores the information about that chain in the branch[], in
* the same format as ext4_get_branch() would do. We are calling it after
* we had read the existing part of chain and partial points to the last
* triple of that (one with zero ->key). Upon the exit we have the same
* picture as after the successful ext4_get_block(), except that in one
* place chain is disconnected - *branch->p is still zero (we did not
* set the last link), but branch->key contains the number that should
* be placed into *branch->p to fill that gap.
*
* If allocation fails we free all blocks we've allocated (and forget
* their buffer_heads) and return the error value the from failed
* ext4_alloc_block() (normally -ENOSPC). Otherwise we set the chain
* as described above and return 0.
*/
static int ext4_alloc_branch(handle_t *handle, struct inode *inode,
ext4_lblk_t iblock, int indirect_blks,
int *blks, ext4_fsblk_t goal,
ext4_lblk_t *offsets, Indirect *branch)
{
int blocksize = inode->i_sb->s_blocksize;
int i, n = 0;
int err = 0;
struct buffer_head *bh;
int num;
ext4_fsblk_t new_blocks[4];
ext4_fsblk_t current_block;
num = ext4_alloc_blocks(handle, inode, iblock, goal, indirect_blks,
*blks, new_blocks, &err);
if (err)
return err;
branch[0].key = cpu_to_le32(new_blocks[0]);
/*
* metadata blocks and data blocks are allocated.
*/
for (n = 1; n <= indirect_blks; n++) {
/*
* Get buffer_head for parent block, zero it out
* and set the pointer to new one, then send
* parent to disk.
*/
bh = sb_getblk(inode->i_sb, new_blocks[n-1]);
if (unlikely(!bh)) {
err = -EIO;
goto failed;
}
branch[n].bh = bh;
lock_buffer(bh);
BUFFER_TRACE(bh, "call get_create_access");
err = ext4_journal_get_create_access(handle, bh);
if (err) {
/* Don't brelse(bh) here; it's done in
* ext4_journal_forget() below */
unlock_buffer(bh);
goto failed;
}
memset(bh->b_data, 0, blocksize);
branch[n].p = (__le32 *) bh->b_data + offsets[n];
branch[n].key = cpu_to_le32(new_blocks[n]);
*branch[n].p = branch[n].key;
if (n == indirect_blks) {
current_block = new_blocks[n];
/*
* End of chain, update the last new metablock of
* the chain to point to the new allocated
* data blocks numbers
*/
for (i = 1; i < num; i++)
*(branch[n].p + i) = cpu_to_le32(++current_block);
}
BUFFER_TRACE(bh, "marking uptodate");
set_buffer_uptodate(bh);
unlock_buffer(bh);
BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
err = ext4_handle_dirty_metadata(handle, inode, bh);
if (err)
goto failed;
}
*blks = num;
return err;
failed:
/* Allocation failed, free what we already allocated */
ext4_free_blocks(handle, inode, NULL, new_blocks[0], 1, 0);
for (i = 1; i <= n ; i++) {
/*
* branch[i].bh is newly allocated, so there is no
* need to revoke the block, which is why we don't
* need to set EXT4_FREE_BLOCKS_METADATA.
*/
ext4_free_blocks(handle, inode, NULL, new_blocks[i], 1,
EXT4_FREE_BLOCKS_FORGET);
}
for (i = n+1; i < indirect_blks; i++)
ext4_free_blocks(handle, inode, NULL, new_blocks[i], 1, 0);
ext4_free_blocks(handle, inode, NULL, new_blocks[i], num, 0);
return err;
}
/**
* ext4_splice_branch - splice the allocated branch onto inode.
* @handle: handle for this transaction
* @inode: owner
* @block: (logical) number of block we are adding
* @chain: chain of indirect blocks (with a missing link - see
* ext4_alloc_branch)
* @where: location of missing link
* @num: number of indirect blocks we are adding
* @blks: number of direct blocks we are adding
*
* This function fills the missing link and does all housekeeping needed in
* inode (->i_blocks, etc.). In case of success we end up with the full
* chain to new block and return 0.
*/
static int ext4_splice_branch(handle_t *handle, struct inode *inode,
ext4_lblk_t block, Indirect *where, int num,
int blks)
{
int i;
int err = 0;
ext4_fsblk_t current_block;
/*
* If we're splicing into a [td]indirect block (as opposed to the
* inode) then we need to get write access to the [td]indirect block
* before the splice.
*/
if (where->bh) {
BUFFER_TRACE(where->bh, "get_write_access");
err = ext4_journal_get_write_access(handle, where->bh);
if (err)
goto err_out;
}
/* That's it */
*where->p = where->key;
/*
* Update the host buffer_head or inode to point to more just allocated
* direct blocks blocks
*/
if (num == 0 && blks > 1) {
current_block = le32_to_cpu(where->key) + 1;
for (i = 1; i < blks; i++)
*(where->p + i) = cpu_to_le32(current_block++);
}
/* We are done with atomic stuff, now do the rest of housekeeping */
/* had we spliced it onto indirect block? */
if (where->bh) {
/*
* If we spliced it onto an indirect block, we haven't
* altered the inode. Note however that if it is being spliced
* onto an indirect block at the very end of the file (the
* file is growing) then we *will* alter the inode to reflect
* the new i_size. But that is not done here - it is done in
* generic_commit_write->__mark_inode_dirty->ext4_dirty_inode.
*/
jbd_debug(5, "splicing indirect only\n");
BUFFER_TRACE(where->bh, "call ext4_handle_dirty_metadata");
err = ext4_handle_dirty_metadata(handle, inode, where->bh);
if (err)
goto err_out;
} else {
/*
* OK, we spliced it into the inode itself on a direct block.
*/
ext4_mark_inode_dirty(handle, inode);
jbd_debug(5, "splicing direct\n");
}
return err;
err_out:
for (i = 1; i <= num; i++) {
/*
* branch[i].bh is newly allocated, so there is no
* need to revoke the block, which is why we don't
* need to set EXT4_FREE_BLOCKS_METADATA.
*/
ext4_free_blocks(handle, inode, where[i].bh, 0, 1,
EXT4_FREE_BLOCKS_FORGET);
}
ext4_free_blocks(handle, inode, NULL, le32_to_cpu(where[num].key),
blks, 0);
return err;
}
/*
* The ext4_ind_map_blocks() function handles non-extents inodes
* (i.e., using the traditional indirect/double-indirect i_blocks
* scheme) for ext4_map_blocks().
*
* Allocation strategy is simple: if we have to allocate something, we will
* have to go the whole way to leaf. So let's do it before attaching anything
* to tree, set linkage between the newborn blocks, write them if sync is
* required, recheck the path, free and repeat if check fails, otherwise
* set the last missing link (that will protect us from any truncate-generated
* removals - all blocks on the path are immune now) and possibly force the
* write on the parent block.
* That has a nice additional property: no special recovery from the failed
* allocations is needed - we simply release blocks and do not touch anything
* reachable from inode.
*
* `handle' can be NULL if create == 0.
*
* return > 0, # of blocks mapped or allocated.
* return = 0, if plain lookup failed.
* return < 0, error case.
*
* The ext4_ind_get_blocks() function should be called with
* down_write(&EXT4_I(inode)->i_data_sem) if allocating filesystem
* blocks (i.e., flags has EXT4_GET_BLOCKS_CREATE set) or
* down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system
* blocks.
*/
static int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
struct ext4_map_blocks *map,
int flags)
{
int err = -EIO;
ext4_lblk_t offsets[4];
Indirect chain[4];
Indirect *partial;
ext4_fsblk_t goal;
int indirect_blks;
int blocks_to_boundary = 0;
int depth;
int count = 0;
ext4_fsblk_t first_block = 0;
trace_ext4_ind_map_blocks_enter(inode, map->m_lblk, map->m_len, flags);
J_ASSERT(!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)));
J_ASSERT(handle != NULL || (flags & EXT4_GET_BLOCKS_CREATE) == 0);
depth = ext4_block_to_path(inode, map->m_lblk, offsets,
&blocks_to_boundary);
if (depth == 0)
goto out;
partial = ext4_get_branch(inode, depth, offsets, chain, &err);
/* Simplest case - block found, no allocation needed */
if (!partial) {
first_block = le32_to_cpu(chain[depth - 1].key);
count++;
/*map more blocks*/
while (count < map->m_len && count <= blocks_to_boundary) {
ext4_fsblk_t blk;
blk = le32_to_cpu(*(chain[depth-1].p + count));
if (blk == first_block + count)
count++;
else
break;
}
goto got_it;
}
/* Next simple case - plain lookup or failed read of indirect block */
if ((flags & EXT4_GET_BLOCKS_CREATE) == 0 || err == -EIO)
goto cleanup;
/*
* Okay, we need to do block allocation.
*/
goal = ext4_find_goal(inode, map->m_lblk, partial);
/* the number of blocks need to allocate for [d,t]indirect blocks */
indirect_blks = (chain + depth) - partial - 1;
/*
* Next look up the indirect map to count the totoal number of
* direct blocks to allocate for this branch.
*/
count = ext4_blks_to_allocate(partial, indirect_blks,
map->m_len, blocks_to_boundary);
/*
* Block out ext4_truncate while we alter the tree
*/
err = ext4_alloc_branch(handle, inode, map->m_lblk, indirect_blks,
&count, goal,
offsets + (partial - chain), partial);
/*
* The ext4_splice_branch call will free and forget any buffers
* on the new chain if there is a failure, but that risks using
* up transaction credits, especially for bitmaps where the
* credits cannot be returned. Can we handle this somehow? We
* may need to return -EAGAIN upwards in the worst case. --sct
*/
if (!err)
err = ext4_splice_branch(handle, inode, map->m_lblk,
partial, indirect_blks, count);
if (err)
goto cleanup;
map->m_flags |= EXT4_MAP_NEW;
ext4_update_inode_fsync_trans(handle, inode, 1);
got_it:
map->m_flags |= EXT4_MAP_MAPPED;
map->m_pblk = le32_to_cpu(chain[depth-1].key);
map->m_len = count;
if (count > blocks_to_boundary)
map->m_flags |= EXT4_MAP_BOUNDARY;
err = count;
/* Clean up and exit */
partial = chain + depth - 1; /* the whole chain */
cleanup:
while (partial > chain) {
BUFFER_TRACE(partial->bh, "call brelse");
brelse(partial->bh);
partial--;
}
out:
trace_ext4_ind_map_blocks_exit(inode, map->m_lblk,
map->m_pblk, map->m_len, err);
return err;
}
#ifdef CONFIG_QUOTA
qsize_t *ext4_get_reserved_space(struct inode *inode)
{
return &EXT4_I(inode)->i_reserved_quota;
}
#endif
/*
* Calculate the number of metadata blocks need to reserve
* to allocate a new block at @lblocks for non extent file based file
*/
static int ext4_indirect_calc_metadata_amount(struct inode *inode,
sector_t lblock)
{
struct ext4_inode_info *ei = EXT4_I(inode);
sector_t dind_mask = ~((sector_t)EXT4_ADDR_PER_BLOCK(inode->i_sb) - 1);
int blk_bits;
if (lblock < EXT4_NDIR_BLOCKS)
return 0;
lblock -= EXT4_NDIR_BLOCKS;
if (ei->i_da_metadata_calc_len &&
(lblock & dind_mask) == ei->i_da_metadata_calc_last_lblock) {
ei->i_da_metadata_calc_len++;
return 0;
}
ei->i_da_metadata_calc_last_lblock = lblock & dind_mask;
ei->i_da_metadata_calc_len = 1;
blk_bits = order_base_2(lblock);
return (blk_bits / EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb)) + 1;
}
/*
* Calculate the number of metadata blocks need to reserve
* to allocate a block located at @lblock
*/
static int ext4_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock)
{
if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
return ext4_ext_calc_metadata_amount(inode, lblock);
return ext4_indirect_calc_metadata_amount(inode, lblock);
}
/*
* Called with i_data_sem down, which is important since we can call
* ext4_discard_preallocations() from here.
*/
void ext4_da_update_reserve_space(struct inode *inode,
int used, int quota_claim)
{
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
struct ext4_inode_info *ei = EXT4_I(inode);
spin_lock(&ei->i_block_reservation_lock);
trace_ext4_da_update_reserve_space(inode, used);
if (unlikely(used > ei->i_reserved_data_blocks)) {
ext4_msg(inode->i_sb, KERN_NOTICE, "%s: ino %lu, used %d "
"with only %d reserved data blocks\n",
__func__, inode->i_ino, used,
ei->i_reserved_data_blocks);
WARN_ON(1);
used = ei->i_reserved_data_blocks;
}
if (unlikely(ei->i_allocated_meta_blocks > ei->i_reserved_meta_blocks)) {
ext4_msg(inode->i_sb, KERN_NOTICE, "%s: ino %lu, allocated %d "
"with only %d reserved metadata blocks\n", __func__,
inode->i_ino, ei->i_allocated_meta_blocks,
ei->i_reserved_meta_blocks);
WARN_ON(1);
ei->i_allocated_meta_blocks = ei->i_reserved_meta_blocks;
}
/* Update per-inode reservations */
ei->i_reserved_data_blocks -= used;
ei->i_reserved_meta_blocks -= ei->i_allocated_meta_blocks;
percpu_counter_sub(&sbi->s_dirtyblocks_counter,
used + ei->i_allocated_meta_blocks);
ei->i_allocated_meta_blocks = 0;
if (ei->i_reserved_data_blocks == 0) {
/*
* We can release all of the reserved metadata blocks
* only when we have written all of the delayed
* allocation blocks.
*/
percpu_counter_sub(&sbi->s_dirtyblocks_counter,
ei->i_reserved_meta_blocks);
ei->i_reserved_meta_blocks = 0;
ei->i_da_metadata_calc_len = 0;
}
spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
/* Update quota subsystem for data blocks */
if (quota_claim)
dquot_claim_block(inode, used);
else {
/*
* We did fallocate with an offset that is already delayed
* allocated. So on delayed allocated writeback we should
* not re-claim the quota for fallocated blocks.
*/
dquot_release_reservation_block(inode, used);
}
/*
* If we have done all the pending block allocations and if
* there aren't any writers on the inode, we can discard the
* inode's preallocations.
*/
if ((ei->i_reserved_data_blocks == 0) &&
(atomic_read(&inode->i_writecount) == 0))
ext4_discard_preallocations(inode);
}
static int __check_block_validity(struct inode *inode, const char *func,
unsigned int line,
struct ext4_map_blocks *map)
{
if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), map->m_pblk,
map->m_len)) {
ext4_error_inode(inode, func, line, map->m_pblk,
"lblock %lu mapped to illegal pblock "
"(length %d)", (unsigned long) map->m_lblk,
map->m_len);
return -EIO;
}
return 0;
}
#define check_block_validity(inode, map) \
__check_block_validity((inode), __func__, __LINE__, (map))
/*
* Return the number of contiguous dirty pages in a given inode
* starting at page frame idx.
*/
static pgoff_t ext4_num_dirty_pages(struct inode *inode, pgoff_t idx,
unsigned int max_pages)
{
struct address_space *mapping = inode->i_mapping;
pgoff_t index;
struct pagevec pvec;
pgoff_t num = 0;
int i, nr_pages, done = 0;
if (max_pages == 0)
return 0;
pagevec_init(&pvec, 0);
while (!done) {
index = idx;
nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
PAGECACHE_TAG_DIRTY,
(pgoff_t)PAGEVEC_SIZE);
if (nr_pages == 0)
break;
for (i = 0; i < nr_pages; i++) {
struct page *page = pvec.pages[i];
struct buffer_head *bh, *head;
lock_page(page);
if (unlikely(page->mapping != mapping) ||
!PageDirty(page) ||
PageWriteback(page) ||
page->index != idx) {
done = 1;
unlock_page(page);
break;
}
if (page_has_buffers(page)) {
bh = head = page_buffers(page);
do {
if (!buffer_delay(bh) &&
!buffer_unwritten(bh))
done = 1;
bh = bh->b_this_page;
} while (!done && (bh != head));
}
unlock_page(page);
if (done)
break;
idx++;
num++;
if (num >= max_pages) {
done = 1;
break;
}
}
pagevec_release(&pvec);
}
return num;
}
/*
* The ext4_map_blocks() function tries to look up the requested blocks,
* and returns if the blocks are already mapped.
*
* Otherwise it takes the write lock of the i_data_sem and allocate blocks
* and store the allocated blocks in the result buffer head and mark it
* mapped.
*
* If file type is extents based, it will call ext4_ext_map_blocks(),
* Otherwise, call with ext4_ind_map_blocks() to handle indirect mapping
* based files
*
* On success, it returns the number of blocks being mapped or allocate.
* if create==0 and the blocks are pre-allocated and uninitialized block,
* the result buffer head is unmapped. If the create ==1, it will make sure
* the buffer head is mapped.
*
* It returns 0 if plain look up failed (blocks have not been allocated), in
* that casem, buffer head is unmapped
*
* It returns the error in case of allocation failure.
*/
int ext4_map_blocks(handle_t *handle, struct inode *inode,
struct ext4_map_blocks *map, int flags)
{
int retval;
map->m_flags = 0;
ext_debug("ext4_map_blocks(): inode %lu, flag %d, max_blocks %u,"
"logical block %lu\n", inode->i_ino, flags, map->m_len,
(unsigned long) map->m_lblk);
/*
* Try to see if we can get the block without requesting a new
* file system block.
*/
down_read((&EXT4_I(inode)->i_data_sem));
if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
retval = ext4_ext_map_blocks(handle, inode, map, 0);
} else {
retval = ext4_ind_map_blocks(handle, inode, map, 0);
}
up_read((&EXT4_I(inode)->i_data_sem));
if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
int ret = check_block_validity(inode, map);
if (ret != 0)
return ret;
}
/* If it is only a block(s) look up */
if ((flags & EXT4_GET_BLOCKS_CREATE) == 0)
return retval;
/*
* Returns if the blocks have already allocated
*
* Note that if blocks have been preallocated
* ext4_ext_get_block() returns th create = 0
* with buffer head unmapped.
*/
if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED)
return retval;
/*
* When we call get_blocks without the create flag, the
* BH_Unwritten flag could have gotten set if the blocks
* requested were part of a uninitialized extent. We need to
* clear this flag now that we are committed to convert all or
* part of the uninitialized extent to be an initialized
* extent. This is because we need to avoid the combination
* of BH_Unwritten and BH_Mapped flags being simultaneously
* set on the buffer_head.
*/
map->m_flags &= ~EXT4_MAP_UNWRITTEN;
/*
* New blocks allocate and/or writing to uninitialized extent
* will possibly result in updating i_data, so we take
* the write lock of i_data_sem, and call get_blocks()
* with create == 1 flag.
*/
down_write((&EXT4_I(inode)->i_data_sem));
/*
* if the caller is from delayed allocation writeout path
* we have already reserved fs blocks for allocation
* let the underlying get_block() function know to
* avoid double accounting
*/
if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
ext4_set_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED);
/*
* We need to check for EXT4 here because migrate
* could have changed the inode type in between
*/
if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
retval = ext4_ext_map_blocks(handle, inode, map, flags);
} else {
retval = ext4_ind_map_blocks(handle, inode, map, flags);
if (retval > 0 && map->m_flags & EXT4_MAP_NEW) {
/*
* We allocated new blocks which will result in
* i_data's format changing. Force the migrate
* to fail by clearing migrate flags
*/
ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
}
/*
* Update reserved blocks/metadata blocks after successful
* block allocation which had been deferred till now. We don't
* support fallocate for non extent files. So we can update
* reserve space here.
*/
if ((retval > 0) &&
(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE))
ext4_da_update_reserve_space(inode, retval, 1);
}
if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
ext4_clear_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED);
up_write((&EXT4_I(inode)->i_data_sem));
if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
int ret = check_block_validity(inode, map);
if (ret != 0)
return ret;
}
return retval;
}
/* Maximum number of blocks we map for direct IO at once. */
#define DIO_MAX_BLOCKS 4096
static int _ext4_get_block(struct inode *inode, sector_t iblock,
struct buffer_head *bh, int flags)
{
handle_t *handle = ext4_journal_current_handle();
struct ext4_map_blocks map;
int ret = 0, started = 0;
int dio_credits;
map.m_lblk = iblock;
map.m_len = bh->b_size >> inode->i_blkbits;
if (flags && !handle) {
/* Direct IO write... */
if (map.m_len > DIO_MAX_BLOCKS)
map.m_len = DIO_MAX_BLOCKS;
dio_credits = ext4_chunk_trans_blocks(inode, map.m_len);
handle = ext4_journal_start(inode, dio_credits);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
return ret;
}
started = 1;
}
ret = ext4_map_blocks(handle, inode, &map, flags);
if (ret > 0) {
map_bh(bh, inode->i_sb, map.m_pblk);
bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags;
bh->b_size = inode->i_sb->s_blocksize * map.m_len;
ret = 0;
}
if (started)
ext4_journal_stop(handle);
return ret;
}
int ext4_get_block(struct inode *inode, sector_t iblock,
struct buffer_head *bh, int create)
{
return _ext4_get_block(inode, iblock, bh,
create ? EXT4_GET_BLOCKS_CREATE : 0);
}
/*
* `handle' can be NULL if create is zero
*/
struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode,
ext4_lblk_t block, int create, int *errp)
{
struct ext4_map_blocks map;
struct buffer_head *bh;
int fatal = 0, err;
J_ASSERT(handle != NULL || create == 0);
map.m_lblk = block;
map.m_len = 1;
err = ext4_map_blocks(handle, inode, &map,
create ? EXT4_GET_BLOCKS_CREATE : 0);
if (err < 0)
*errp = err;
if (err <= 0)
return NULL;
*errp = 0;
bh = sb_getblk(inode->i_sb, map.m_pblk);
if (!bh) {
*errp = -EIO;
return NULL;
}
if (map.m_flags & EXT4_MAP_NEW) {
J_ASSERT(create != 0);
J_ASSERT(handle != NULL);
/*
* Now that we do not always journal data, we should
* keep in mind whether this should always journal the
* new buffer as metadata. For now, regular file
* writes use ext4_get_block instead, so it's not a
* problem.
*/
lock_buffer(bh);
BUFFER_TRACE(bh, "call get_create_access");
fatal = ext4_journal_get_create_access(handle, bh);
if (!fatal && !buffer_uptodate(bh)) {
memset(bh->b_data, 0, inode->i_sb->s_blocksize);
set_buffer_uptodate(bh);
}
unlock_buffer(bh);
BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
err = ext4_handle_dirty_metadata(handle, inode, bh);
if (!fatal)
fatal = err;
} else {
BUFFER_TRACE(bh, "not a new buffer");
}
if (fatal) {
*errp = fatal;
brelse(bh);
bh = NULL;
}
return bh;
}
struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode,
ext4_lblk_t block, int create, int *err)
{
struct buffer_head *bh;
bh = ext4_getblk(handle, inode, block, create, err);
if (!bh)
return bh;
if (buffer_uptodate(bh))
return bh;
ll_rw_block(READ_META, 1, &bh);
wait_on_buffer(bh);
if (buffer_uptodate(bh))
return bh;
put_bh(bh);
*err = -EIO;
return NULL;
}
static int walk_page_buffers(handle_t *handle,
struct buffer_head *head,
unsigned from,
unsigned to,
int *partial,
int (*fn)(handle_t *handle,
struct buffer_head *bh))
{
struct buffer_head *bh;
unsigned block_start, block_end;
unsigned blocksize = head->b_size;
int err, ret = 0;
struct buffer_head *next;
for (bh = head, block_start = 0;
ret == 0 && (bh != head || !block_start);
block_start = block_end, bh = next) {
next = bh->b_this_page;
block_end = block_start + blocksize;
if (block_end <= from || block_start >= to) {
if (partial && !buffer_uptodate(bh))
*partial = 1;
continue;
}
err = (*fn)(handle, bh);
if (!ret)
ret = err;
}
return ret;
}
/*
* To preserve ordering, it is essential that the hole instantiation and
* the data write be encapsulated in a single transaction. We cannot
* close off a transaction and start a new one between the ext4_get_block()
* and the commit_write(). So doing the jbd2_journal_start at the start of
* prepare_write() is the right place.
*
* Also, this function can nest inside ext4_writepage() ->
* block_write_full_page(). In that case, we *know* that ext4_writepage()
* has generated enough buffer credits to do the whole page. So we won't
* block on the journal in that case, which is good, because the caller may
* be PF_MEMALLOC.
*
* By accident, ext4 can be reentered when a transaction is open via
* quota file writes. If we were to commit the transaction while thus
* reentered, there can be a deadlock - we would be holding a quota
* lock, and the commit would never complete if another thread had a
* transaction open and was blocking on the quota lock - a ranking
* violation.
*
* So what we do is to rely on the fact that jbd2_journal_stop/journal_start
* will _not_ run commit under these circumstances because handle->h_ref
* is elevated. We'll still have enough credits for the tiny quotafile
* write.
*/
static int do_journal_get_write_access(handle_t *handle,
struct buffer_head *bh)
{
int dirty = buffer_dirty(bh);
int ret;
if (!buffer_mapped(bh) || buffer_freed(bh))
return 0;
/*
* __block_write_begin() could have dirtied some buffers. Clean
* the dirty bit as jbd2_journal_get_write_access() could complain
* otherwise about fs integrity issues. Setting of the dirty bit
* by __block_write_begin() isn't a real problem here as we clear
* the bit before releasing a page lock and thus writeback cannot
* ever write the buffer.
*/
if (dirty)
clear_buffer_dirty(bh);
ret = ext4_journal_get_write_access(handle, bh);
if (!ret && dirty)
ret = ext4_handle_dirty_metadata(handle, NULL, bh);
return ret;
}
/*
* Truncate blocks that were not used by write. We have to truncate the
* pagecache as well so that corresponding buffers get properly unmapped.
*/
static void ext4_truncate_failed_write(struct inode *inode)
{
truncate_inode_pages(inode->i_mapping, inode->i_size);
ext4_truncate(inode);
}
static int ext4_get_block_write(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create);
static int ext4_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata)
{
struct inode *inode = mapping->host;
int ret, needed_blocks;
handle_t *handle;
int retries = 0;
struct page *page;
pgoff_t index;
unsigned from, to;
trace_ext4_write_begin(inode, pos, len, flags);
/*
* Reserve one block more for addition to orphan list in case
* we allocate blocks but write fails for some reason
*/
needed_blocks = ext4_writepage_trans_blocks(inode) + 1;
index = pos >> PAGE_CACHE_SHIFT;
from = pos & (PAGE_CACHE_SIZE - 1);
to = from + len;
retry:
handle = ext4_journal_start(inode, needed_blocks);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
goto out;
}
/* We cannot recurse into the filesystem as the transaction is already
* started */
flags |= AOP_FLAG_NOFS;
page = grab_cache_page_write_begin(mapping, index, flags);
if (!page) {
ext4_journal_stop(handle);
ret = -ENOMEM;
goto out;
}
*pagep = page;
if (ext4_should_dioread_nolock(inode))
ret = __block_write_begin(page, pos, len, ext4_get_block_write);
else
ret = __block_write_begin(page, pos, len, ext4_get_block);
if (!ret && ext4_should_journal_data(inode)) {
ret = walk_page_buffers(handle, page_buffers(page),
from, to, NULL, do_journal_get_write_access);
}
if (ret) {
unlock_page(page);
page_cache_release(page);
/*
* __block_write_begin may have instantiated a few blocks
* outside i_size. Trim these off again. Don't need
* i_size_read because we hold i_mutex.
*
* Add inode to orphan list in case we crash before
* truncate finishes
*/
if (pos + len > inode->i_size && ext4_can_truncate(inode))
ext4_orphan_add(handle, inode);
ext4_journal_stop(handle);
if (pos + len > inode->i_size) {
ext4_truncate_failed_write(inode);
/*
* If truncate failed early the inode might
* still be on the orphan list; we need to
* make sure the inode is removed from the
* orphan list in that case.
*/
if (inode->i_nlink)
ext4_orphan_del(NULL, inode);
}
}
if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
goto retry;
out:
return ret;
}
/* For write_end() in data=journal mode */
static int write_end_fn(handle_t *handle, struct buffer_head *bh)
{
if (!buffer_mapped(bh) || buffer_freed(bh))
return 0;
set_buffer_uptodate(bh);
return ext4_handle_dirty_metadata(handle, NULL, bh);
}
static int ext4_generic_write_end(struct file *file,
struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata)
{
int i_size_changed = 0;
struct inode *inode = mapping->host;
handle_t *handle = ext4_journal_current_handle();
copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
/*
* No need to use i_size_read() here, the i_size
* cannot change under us because we hold i_mutex.
*
* But it's important to update i_size while still holding page lock:
* page writeout could otherwise come in and zero beyond i_size.
*/
if (pos + copied > inode->i_size) {
i_size_write(inode, pos + copied);
i_size_changed = 1;
}
if (pos + copied > EXT4_I(inode)->i_disksize) {
/* We need to mark inode dirty even if
* new_i_size is less that inode->i_size
* bu greater than i_disksize.(hint delalloc)
*/
ext4_update_i_disksize(inode, (pos + copied));
i_size_changed = 1;
}
unlock_page(page);
page_cache_release(page);
/*
* Don't mark the inode dirty under page lock. First, it unnecessarily
* makes the holding time of page lock longer. Second, it forces lock
* ordering of page lock and transaction start for journaling
* filesystems.
*/
if (i_size_changed)
ext4_mark_inode_dirty(handle, inode);
return copied;
}
/*
* We need to pick up the new inode size which generic_commit_write gave us
* `file' can be NULL - eg, when called from page_symlink().
*
* ext4 never places buffers on inode->i_mapping->private_list. metadata
* buffers are managed internally.
*/
static int ext4_ordered_write_end(struct file *file,
struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata)
{
handle_t *handle = ext4_journal_current_handle();
struct inode *inode = mapping->host;
int ret = 0, ret2;
trace_ext4_ordered_write_end(inode, pos, len, copied);
ret = ext4_jbd2_file_inode(handle, inode);
if (ret == 0) {
ret2 = ext4_generic_write_end(file, mapping, pos, len, copied,
page, fsdata);
copied = ret2;
if (pos + len > inode->i_size && ext4_can_truncate(inode))
/* if we have allocated more blocks and copied
* less. We will have blocks allocated outside
* inode->i_size. So truncate them
*/
ext4_orphan_add(handle, inode);
if (ret2 < 0)
ret = ret2;
}
ret2 = ext4_journal_stop(handle);
if (!ret)
ret = ret2;
if (pos + len > inode->i_size) {
ext4_truncate_failed_write(inode);
/*
* If truncate failed early the inode might still be
* on the orphan list; we need to make sure the inode
* is removed from the orphan list in that case.
*/
if (inode->i_nlink)
ext4_orphan_del(NULL, inode);
}
return ret ? ret : copied;
}
static int ext4_writeback_write_end(struct file *file,
struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata)
{
handle_t *handle = ext4_journal_current_handle();
struct inode *inode = mapping->host;
int ret = 0, ret2;
trace_ext4_writeback_write_end(inode, pos, len, copied);
ret2 = ext4_generic_write_end(file, mapping, pos, len, copied,
page, fsdata);
copied = ret2;
if (pos + len > inode->i_size && ext4_can_truncate(inode))
/* if we have allocated more blocks and copied
* less. We will have blocks allocated outside
* inode->i_size. So truncate them
*/
ext4_orphan_add(handle, inode);
if (ret2 < 0)
ret = ret2;
ret2 = ext4_journal_stop(handle);
if (!ret)
ret = ret2;
if (pos + len > inode->i_size) {
ext4_truncate_failed_write(inode);
/*
* If truncate failed early the inode might still be
* on the orphan list; we need to make sure the inode
* is removed from the orphan list in that case.
*/
if (inode->i_nlink)
ext4_orphan_del(NULL, inode);
}
return ret ? ret : copied;
}
static int ext4_journalled_write_end(struct file *file,
struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata)
{
handle_t *handle = ext4_journal_current_handle();
struct inode *inode = mapping->host;
int ret = 0, ret2;
int partial = 0;
unsigned from, to;
loff_t new_i_size;
trace_ext4_journalled_write_end(inode, pos, len, copied);
from = pos & (PAGE_CACHE_SIZE - 1);
to = from + len;
BUG_ON(!ext4_handle_valid(handle));
if (copied < len) {
if (!PageUptodate(page))
copied = 0;
page_zero_new_buffers(page, from+copied, to);
}
ret = walk_page_buffers(handle, page_buffers(page), from,
to, &partial, write_end_fn);
if (!partial)
SetPageUptodate(page);
new_i_size = pos + copied;
if (new_i_size > inode->i_size)
i_size_write(inode, pos+copied);
ext4_set_inode_state(inode, EXT4_STATE_JDATA);
if (new_i_size > EXT4_I(inode)->i_disksize) {
ext4_update_i_disksize(inode, new_i_size);
ret2 = ext4_mark_inode_dirty(handle, inode);
if (!ret)
ret = ret2;
}
unlock_page(page);
page_cache_release(page);
if (pos + len > inode->i_size && ext4_can_truncate(inode))
/* if we have allocated more blocks and copied
* less. We will have blocks allocated outside
* inode->i_size. So truncate them
*/
ext4_orphan_add(handle, inode);
ret2 = ext4_journal_stop(handle);
if (!ret)
ret = ret2;
if (pos + len > inode->i_size) {
ext4_truncate_failed_write(inode);
/*
* If truncate failed early the inode might still be
* on the orphan list; we need to make sure the inode
* is removed from the orphan list in that case.
*/
if (inode->i_nlink)
ext4_orphan_del(NULL, inode);
}
return ret ? ret : copied;
}
/*
* Reserve a single block located at lblock
*/
static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock)
{
int retries = 0;
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
struct ext4_inode_info *ei = EXT4_I(inode);
unsigned long md_needed;
int ret;
/*
* recalculate the amount of metadata blocks to reserve
* in order to allocate nrblocks
* worse case is one extent per block
*/
repeat:
spin_lock(&ei->i_block_reservation_lock);
md_needed = ext4_calc_metadata_amount(inode, lblock);
trace_ext4_da_reserve_space(inode, md_needed);
spin_unlock(&ei->i_block_reservation_lock);
/*
* We will charge metadata quota at writeout time; this saves
* us from metadata over-estimation, though we may go over by
* a small amount in the end. Here we just reserve for data.
*/
ret = dquot_reserve_block(inode, 1);
if (ret)
return ret;
/*
* We do still charge estimated metadata to the sb though;
* we cannot afford to run out of free blocks.
*/
if (ext4_claim_free_blocks(sbi, md_needed + 1, 0)) {
dquot_release_reservation_block(inode, 1);
if (ext4_should_retry_alloc(inode->i_sb, &retries)) {
yield();
goto repeat;
}
return -ENOSPC;
}
spin_lock(&ei->i_block_reservation_lock);
ei->i_reserved_data_blocks++;
ei->i_reserved_meta_blocks += md_needed;
spin_unlock(&ei->i_block_reservation_lock);
return 0; /* success */
}
static void ext4_da_release_space(struct inode *inode, int to_free)
{
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
struct ext4_inode_info *ei = EXT4_I(inode);
if (!to_free)
return; /* Nothing to release, exit */
spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
trace_ext4_da_release_space(inode, to_free);
if (unlikely(to_free > ei->i_reserved_data_blocks)) {
/*
* if there aren't enough reserved blocks, then the
* counter is messed up somewhere. Since this
* function is called from invalidate page, it's
* harmless to return without any action.
*/
ext4_msg(inode->i_sb, KERN_NOTICE, "ext4_da_release_space: "
"ino %lu, to_free %d with only %d reserved "
"data blocks\n", inode->i_ino, to_free,
ei->i_reserved_data_blocks);
WARN_ON(1);
to_free = ei->i_reserved_data_blocks;
}
ei->i_reserved_data_blocks -= to_free;
if (ei->i_reserved_data_blocks == 0) {
/*
* We can release all of the reserved metadata blocks
* only when we have written all of the delayed
* allocation blocks.
*/
percpu_counter_sub(&sbi->s_dirtyblocks_counter,
ei->i_reserved_meta_blocks);
ei->i_reserved_meta_blocks = 0;
ei->i_da_metadata_calc_len = 0;
}
/* update fs dirty data blocks counter */
percpu_counter_sub(&sbi->s_dirtyblocks_counter, to_free);
spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
dquot_release_reservation_block(inode, to_free);
}
static void ext4_da_page_release_reservation(struct page *page,
unsigned long offset)
{
int to_release = 0;
struct buffer_head *head, *bh;
unsigned int curr_off = 0;
head = page_buffers(page);
bh = head;
do {
unsigned int next_off = curr_off + bh->b_size;
if ((offset <= curr_off) && (buffer_delay(bh))) {
to_release++;
clear_buffer_delay(bh);
}
curr_off = next_off;
} while ((bh = bh->b_this_page) != head);
ext4_da_release_space(page->mapping->host, to_release);
}
/*
* Delayed allocation stuff
*/
/*
* mpage_da_submit_io - walks through extent of pages and try to write
* them with writepage() call back
*
* @mpd->inode: inode
* @mpd->first_page: first page of the extent
* @mpd->next_page: page after the last page of the extent
*
* By the time mpage_da_submit_io() is called we expect all blocks
* to be allocated. this may be wrong if allocation failed.
*
* As pages are already locked by write_cache_pages(), we can't use it
*/
static int mpage_da_submit_io(struct mpage_da_data *mpd,
struct ext4_map_blocks *map)
{
struct pagevec pvec;
unsigned long index, end;
int ret = 0, err, nr_pages, i;
struct inode *inode = mpd->inode;
struct address_space *mapping = inode->i_mapping;
loff_t size = i_size_read(inode);
unsigned int len, block_start;
struct buffer_head *bh, *page_bufs = NULL;
int journal_data = ext4_should_journal_data(inode);
sector_t pblock = 0, cur_logical = 0;
struct ext4_io_submit io_submit;
BUG_ON(mpd->next_page <= mpd->first_page);
memset(&io_submit, 0, sizeof(io_submit));
/*
* We need to start from the first_page to the next_page - 1
* to make sure we also write the mapped dirty buffer_heads.
* If we look at mpd->b_blocknr we would only be looking
* at the currently mapped buffer_heads.
*/
index = mpd->first_page;
end = mpd->next_page - 1;
pagevec_init(&pvec, 0);
while (index <= end) {
nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE);
if (nr_pages == 0)
break;
for (i = 0; i < nr_pages; i++) {
int commit_write = 0, skip_page = 0;
struct page *page = pvec.pages[i];
index = page->index;
if (index > end)
break;
if (index == size >> PAGE_CACHE_SHIFT)
len = size & ~PAGE_CACHE_MASK;
else
len = PAGE_CACHE_SIZE;
if (map) {
cur_logical = index << (PAGE_CACHE_SHIFT -
inode->i_blkbits);
pblock = map->m_pblk + (cur_logical -
map->m_lblk);
}
index++;
BUG_ON(!PageLocked(page));
BUG_ON(PageWriteback(page));
/*
* If the page does not have buffers (for
* whatever reason), try to create them using
* __block_write_begin. If this fails,
* skip the page and move on.
*/
if (!page_has_buffers(page)) {
if (__block_write_begin(page, 0, len,
noalloc_get_block_write)) {
skip_page:
unlock_page(page);
continue;
}
commit_write = 1;
}
bh = page_bufs = page_buffers(page);
block_start = 0;
do {
if (!bh)
goto skip_page;
if (map && (cur_logical >= map->m_lblk) &&
(cur_logical <= (map->m_lblk +
(map->m_len - 1)))) {
if (buffer_delay(bh)) {
clear_buffer_delay(bh);
bh->b_blocknr = pblock;
}
if (buffer_unwritten(bh) ||
buffer_mapped(bh))
BUG_ON(bh->b_blocknr != pblock);
if (map->m_flags & EXT4_MAP_UNINIT)
set_buffer_uninit(bh);
clear_buffer_unwritten(bh);
}
/*
* skip page if block allocation undone and
* block is dirty
*/
if (ext4_bh_delay_or_unwritten(NULL, bh))
skip_page = 1;
bh = bh->b_this_page;
block_start += bh->b_size;
cur_logical++;
pblock++;
} while (bh != page_bufs);
if (skip_page)
goto skip_page;
if (commit_write)
/* mark the buffer_heads as dirty & uptodate */
block_commit_write(page, 0, len);
clear_page_dirty_for_io(page);
/*
* Delalloc doesn't support data journalling,
* but eventually maybe we'll lift this
* restriction.
*/
if (unlikely(journal_data && PageChecked(page)))
err = __ext4_journalled_writepage(page, len);
else if (test_opt(inode->i_sb, MBLK_IO_SUBMIT))
err = ext4_bio_write_page(&io_submit, page,
len, mpd->wbc);
else if (buffer_uninit(page_bufs)) {
ext4_set_bh_endio(page_bufs, inode);
err = block_write_full_page_endio(page,
noalloc_get_block_write,
mpd->wbc, ext4_end_io_buffer_write);
} else
err = block_write_full_page(page,
noalloc_get_block_write, mpd->wbc);
if (!err)
mpd->pages_written++;
/*
* In error case, we have to continue because
* remaining pages are still locked
*/
if (ret == 0)
ret = err;
}
pagevec_release(&pvec);
}
ext4_io_submit(&io_submit);
return ret;
}
static void ext4_da_block_invalidatepages(struct mpage_da_data *mpd)
{
int nr_pages, i;
pgoff_t index, end;
struct pagevec pvec;
struct inode *inode = mpd->inode;
struct address_space *mapping = inode->i_mapping;
index = mpd->first_page;
end = mpd->next_page - 1;
pagevec_init(&pvec, 0);
while (index <= end) {
nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE);
if (nr_pages == 0)
break;
for (i = 0; i < nr_pages; i++) {
struct page *page = pvec.pages[i];
if (page->index > end)
break;
BUG_ON(!PageLocked(page));
BUG_ON(PageWriteback(page));
block_invalidatepage(page, 0);
ClearPageUptodate(page);
unlock_page(page);
}
index = pvec.pages[nr_pages - 1]->index + 1;
pagevec_release(&pvec);
}
return;
}
static void ext4_print_free_blocks(struct inode *inode)
{
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
printk(KERN_CRIT "Total free blocks count %lld\n",
ext4_count_free_blocks(inode->i_sb));
printk(KERN_CRIT "Free/Dirty block details\n");
printk(KERN_CRIT "free_blocks=%lld\n",
(long long) percpu_counter_sum(&sbi->s_freeblocks_counter));
printk(KERN_CRIT "dirty_blocks=%lld\n",
(long long) percpu_counter_sum(&sbi->s_dirtyblocks_counter));
printk(KERN_CRIT "Block reservation details\n");
printk(KERN_CRIT "i_reserved_data_blocks=%u\n",
EXT4_I(inode)->i_reserved_data_blocks);
printk(KERN_CRIT "i_reserved_meta_blocks=%u\n",
EXT4_I(inode)->i_reserved_meta_blocks);
return;
}
/*
* mpage_da_map_and_submit - go through given space, map them
* if necessary, and then submit them for I/O
*
* @mpd - bh describing space
*
* The function skips space we know is already mapped to disk blocks.
*
*/
static void mpage_da_map_and_submit(struct mpage_da_data *mpd)
{
int err, blks, get_blocks_flags;
struct ext4_map_blocks map, *mapp = NULL;
sector_t next = mpd->b_blocknr;
unsigned max_blocks = mpd->b_size >> mpd->inode->i_blkbits;
loff_t disksize = EXT4_I(mpd->inode)->i_disksize;
handle_t *handle = NULL;
/*
* If the blocks are mapped already, or we couldn't accumulate
* any blocks, then proceed immediately to the submission stage.
*/
if ((mpd->b_size == 0) ||
((mpd->b_state & (1 << BH_Mapped)) &&
!(mpd->b_state & (1 << BH_Delay)) &&
!(mpd->b_state & (1 << BH_Unwritten))))
goto submit_io;
handle = ext4_journal_current_handle();
BUG_ON(!handle);
/*
* Call ext4_map_blocks() to allocate any delayed allocation
* blocks, or to convert an uninitialized extent to be
* initialized (in the case where we have written into
* one or more preallocated blocks).
*
* We pass in the magic EXT4_GET_BLOCKS_DELALLOC_RESERVE to
* indicate that we are on the delayed allocation path. This
* affects functions in many different parts of the allocation
* call path. This flag exists primarily because we don't
* want to change *many* call functions, so ext4_map_blocks()
* will set the EXT4_STATE_DELALLOC_RESERVED flag once the
* inode's allocation semaphore is taken.
*
* If the blocks in questions were delalloc blocks, set
* EXT4_GET_BLOCKS_DELALLOC_RESERVE so the delalloc accounting
* variables are updated after the blocks have been allocated.
*/
map.m_lblk = next;
map.m_len = max_blocks;
get_blocks_flags = EXT4_GET_BLOCKS_CREATE;
if (ext4_should_dioread_nolock(mpd->inode))
get_blocks_flags |= EXT4_GET_BLOCKS_IO_CREATE_EXT;
if (mpd->b_state & (1 << BH_Delay))
get_blocks_flags |= EXT4_GET_BLOCKS_DELALLOC_RESERVE;
blks = ext4_map_blocks(handle, mpd->inode, &map, get_blocks_flags);
if (blks < 0) {
struct super_block *sb = mpd->inode->i_sb;
err = blks;
/*
* If get block returns EAGAIN or ENOSPC and there
* appears to be free blocks we will just let
* mpage_da_submit_io() unlock all of the pages.
*/
if (err == -EAGAIN)
goto submit_io;
if (err == -ENOSPC &&
ext4_count_free_blocks(sb)) {
mpd->retval = err;
goto submit_io;
}
/*
* get block failure will cause us to loop in
* writepages, because a_ops->writepage won't be able
* to make progress. The page will be redirtied by
* writepage and writepages will again try to write
* the same.
*/
if (!(EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED)) {
ext4_msg(sb, KERN_CRIT,
"delayed block allocation failed for inode %lu "
"at logical offset %llu with max blocks %zd "
"with error %d", mpd->inode->i_ino,
(unsigned long long) next,
mpd->b_size >> mpd->inode->i_blkbits, err);
ext4_msg(sb, KERN_CRIT,
"This should not happen!! Data will be lost\n");
if (err == -ENOSPC)
ext4_print_free_blocks(mpd->inode);
}
/* invalidate all the pages */
ext4_da_block_invalidatepages(mpd);
/* Mark this page range as having been completed */
mpd->io_done = 1;
return;
}
BUG_ON(blks == 0);
mapp = ↦
if (map.m_flags & EXT4_MAP_NEW) {
struct block_device *bdev = mpd->inode->i_sb->s_bdev;
int i;
for (i = 0; i < map.m_len; i++)
unmap_underlying_metadata(bdev, map.m_pblk + i);
}
if (ext4_should_order_data(mpd->inode)) {
err = ext4_jbd2_file_inode(handle, mpd->inode);
if (err)
/* This only happens if the journal is aborted */
return;
}
/*
* Update on-disk size along with block allocation.
*/
disksize = ((loff_t) next + blks) << mpd->inode->i_blkbits;
if (disksize > i_size_read(mpd->inode))
disksize = i_size_read(mpd->inode);
if (disksize > EXT4_I(mpd->inode)->i_disksize) {
ext4_update_i_disksize(mpd->inode, disksize);
err = ext4_mark_inode_dirty(handle, mpd->inode);
if (err)
ext4_error(mpd->inode->i_sb,
"Failed to mark inode %lu dirty",
mpd->inode->i_ino);
}
submit_io:
mpage_da_submit_io(mpd, mapp);
mpd->io_done = 1;
}
#define BH_FLAGS ((1 << BH_Uptodate) | (1 << BH_Mapped) | \
(1 << BH_Delay) | (1 << BH_Unwritten))
/*
* mpage_add_bh_to_extent - try to add one more block to extent of blocks
*
* @mpd->lbh - extent of blocks
* @logical - logical number of the block in the file
* @bh - bh of the block (used to access block's state)
*
* the function is used to collect contig. blocks in same state
*/
static void mpage_add_bh_to_extent(struct mpage_da_data *mpd,
sector_t logical, size_t b_size,
unsigned long b_state)
{
sector_t next;
int nrblocks = mpd->b_size >> mpd->inode->i_blkbits;
/*
* XXX Don't go larger than mballoc is willing to allocate
* This is a stopgap solution. We eventually need to fold
* mpage_da_submit_io() into this function and then call
* ext4_map_blocks() multiple times in a loop
*/
if (nrblocks >= 8*1024*1024/mpd->inode->i_sb->s_blocksize)
goto flush_it;
/* check if thereserved journal credits might overflow */
if (!(ext4_test_inode_flag(mpd->inode, EXT4_INODE_EXTENTS))) {
if (nrblocks >= EXT4_MAX_TRANS_DATA) {
/*
* With non-extent format we are limited by the journal
* credit available. Total credit needed to insert
* nrblocks contiguous blocks is dependent on the
* nrblocks. So limit nrblocks.
*/
goto flush_it;
} else if ((nrblocks + (b_size >> mpd->inode->i_blkbits)) >
EXT4_MAX_TRANS_DATA) {
/*
* Adding the new buffer_head would make it cross the
* allowed limit for which we have journal credit
* reserved. So limit the new bh->b_size
*/
b_size = (EXT4_MAX_TRANS_DATA - nrblocks) <<
mpd->inode->i_blkbits;
/* we will do mpage_da_submit_io in the next loop */
}
}
/*
* First block in the extent
*/
if (mpd->b_size == 0) {
mpd->b_blocknr = logical;
mpd->b_size = b_size;
mpd->b_state = b_state & BH_FLAGS;
return;
}
next = mpd->b_blocknr + nrblocks;
/*
* Can we merge the block to our big extent?
*/
if (logical == next && (b_state & BH_FLAGS) == mpd->b_state) {
mpd->b_size += b_size;
return;
}
flush_it:
/*
* We couldn't merge the block to our extent, so we
* need to flush current extent and start new one
*/
mpage_da_map_and_submit(mpd);
return;
}
static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh)
{
return (buffer_delay(bh) || buffer_unwritten(bh)) && buffer_dirty(bh);
}
/*
* This is a special get_blocks_t callback which is used by
* ext4_da_write_begin(). It will either return mapped block or
* reserve space for a single block.
*
* For delayed buffer_head we have BH_Mapped, BH_New, BH_Delay set.
* We also have b_blocknr = -1 and b_bdev initialized properly
*
* For unwritten buffer_head we have BH_Mapped, BH_New, BH_Unwritten set.
* We also have b_blocknr = physicalblock mapping unwritten extent and b_bdev
* initialized properly.
*/
static int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
struct buffer_head *bh, int create)
{
struct ext4_map_blocks map;
int ret = 0;
sector_t invalid_block = ~((sector_t) 0xffff);
if (invalid_block < ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es))
invalid_block = ~0;
BUG_ON(create == 0);
BUG_ON(bh->b_size != inode->i_sb->s_blocksize);
map.m_lblk = iblock;
map.m_len = 1;
/*
* first, we need to know whether the block is allocated already
* preallocated blocks are unmapped but should treated
* the same as allocated blocks.
*/
ret = ext4_map_blocks(NULL, inode, &map, 0);
if (ret < 0)
return ret;
if (ret == 0) {
if (buffer_delay(bh))
return 0; /* Not sure this could or should happen */
/*
* XXX: __block_write_begin() unmaps passed block, is it OK?
*/
ret = ext4_da_reserve_space(inode, iblock);
if (ret)
/* not enough space to reserve */
return ret;
map_bh(bh, inode->i_sb, invalid_block);
set_buffer_new(bh);
set_buffer_delay(bh);
return 0;
}
map_bh(bh, inode->i_sb, map.m_pblk);
bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags;
if (buffer_unwritten(bh)) {
/* A delayed write to unwritten bh should be marked
* new and mapped. Mapped ensures that we don't do
* get_block multiple times when we write to the same
* offset and new ensures that we do proper zero out
* for partial write.
*/
set_buffer_new(bh);
set_buffer_mapped(bh);
}
return 0;
}
/*
* This function is used as a standard get_block_t calback function
* when there is no desire to allocate any blocks. It is used as a
* callback function for block_write_begin() and block_write_full_page().
* These functions should only try to map a single block at a time.
*
* Since this function doesn't do block allocations even if the caller
* requests it by passing in create=1, it is critically important that
* any caller checks to make sure that any buffer heads are returned
* by this function are either all already mapped or marked for
* delayed allocation before calling block_write_full_page(). Otherwise,
* b_blocknr could be left unitialized, and the page write functions will
* be taken by surprise.
*/
static int noalloc_get_block_write(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create)
{
BUG_ON(bh_result->b_size != inode->i_sb->s_blocksize);
return _ext4_get_block(inode, iblock, bh_result, 0);
}
static int bget_one(handle_t *handle, struct buffer_head *bh)
{
get_bh(bh);
return 0;
}
static int bput_one(handle_t *handle, struct buffer_head *bh)
{
put_bh(bh);
return 0;
}
static int __ext4_journalled_writepage(struct page *page,
unsigned int len)
{
struct address_space *mapping = page->mapping;
struct inode *inode = mapping->host;
struct buffer_head *page_bufs;
handle_t *handle = NULL;
int ret = 0;
int err;
ClearPageChecked(page);
page_bufs = page_buffers(page);
BUG_ON(!page_bufs);
walk_page_buffers(handle, page_bufs, 0, len, NULL, bget_one);
/* As soon as we unlock the page, it can go away, but we have
* references to buffers so we are safe */
unlock_page(page);
handle = ext4_journal_start(inode, ext4_writepage_trans_blocks(inode));
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
goto out;
}
BUG_ON(!ext4_handle_valid(handle));
ret = walk_page_buffers(handle, page_bufs, 0, len, NULL,
do_journal_get_write_access);
err = walk_page_buffers(handle, page_bufs, 0, len, NULL,
write_end_fn);
if (ret == 0)
ret = err;
err = ext4_journal_stop(handle);
if (!ret)
ret = err;
walk_page_buffers(handle, page_bufs, 0, len, NULL, bput_one);
ext4_set_inode_state(inode, EXT4_STATE_JDATA);
out:
return ret;
}
static int ext4_set_bh_endio(struct buffer_head *bh, struct inode *inode);
static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate);
/*
* Note that we don't need to start a transaction unless we're journaling data
* because we should have holes filled from ext4_page_mkwrite(). We even don't
* need to file the inode to the transaction's list in ordered mode because if
* we are writing back data added by write(), the inode is already there and if
* we are writing back data modified via mmap(), no one guarantees in which
* transaction the data will hit the disk. In case we are journaling data, we
* cannot start transaction directly because transaction start ranks above page
* lock so we have to do some magic.
*
* This function can get called via...
* - ext4_da_writepages after taking page lock (have journal handle)
* - journal_submit_inode_data_buffers (no journal handle)
* - shrink_page_list via pdflush (no journal handle)
* - grab_page_cache when doing write_begin (have journal handle)
*
* We don't do any block allocation in this function. If we have page with
* multiple blocks we need to write those buffer_heads that are mapped. This
* is important for mmaped based write. So if we do with blocksize 1K
* truncate(f, 1024);
* a = mmap(f, 0, 4096);
* a[0] = 'a';
* truncate(f, 4096);
* we have in the page first buffer_head mapped via page_mkwrite call back
* but other bufer_heads would be unmapped but dirty(dirty done via the
* do_wp_page). So writepage should write the first block. If we modify
* the mmap area beyond 1024 we will again get a page_fault and the
* page_mkwrite callback will do the block allocation and mark the
* buffer_heads mapped.
*
* We redirty the page if we have any buffer_heads that is either delay or
* unwritten in the page.
*
* We can get recursively called as show below.
*
* ext4_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() ->
* ext4_writepage()
*
* But since we don't do any block allocation we should not deadlock.
* Page also have the dirty flag cleared so we don't get recurive page_lock.
*/
static int ext4_writepage(struct page *page,
struct writeback_control *wbc)
{
int ret = 0, commit_write = 0;
loff_t size;
unsigned int len;
struct buffer_head *page_bufs = NULL;
struct inode *inode = page->mapping->host;
trace_ext4_writepage(page);
size = i_size_read(inode);
if (page->index == size >> PAGE_CACHE_SHIFT)
len = size & ~PAGE_CACHE_MASK;
else
len = PAGE_CACHE_SIZE;
/*
* If the page does not have buffers (for whatever reason),
* try to create them using __block_write_begin. If this
* fails, redirty the page and move on.
*/
if (!page_has_buffers(page)) {
if (__block_write_begin(page, 0, len,
noalloc_get_block_write)) {
redirty_page:
redirty_page_for_writepage(wbc, page);
unlock_page(page);
return 0;
}
commit_write = 1;
}
page_bufs = page_buffers(page);
if (walk_page_buffers(NULL, page_bufs, 0, len, NULL,
ext4_bh_delay_or_unwritten)) {
/*
* We don't want to do block allocation, so redirty
* the page and return. We may reach here when we do
* a journal commit via journal_submit_inode_data_buffers.
* We can also reach here via shrink_page_list
*/
goto redirty_page;
}
if (commit_write)
/* now mark the buffer_heads as dirty and uptodate */
block_commit_write(page, 0, len);
if (PageChecked(page) && ext4_should_journal_data(inode))
/*
* It's mmapped pagecache. Add buffers and journal it. There
* doesn't seem much point in redirtying the page here.
*/
return __ext4_journalled_writepage(page, len);
if (buffer_uninit(page_bufs)) {
ext4_set_bh_endio(page_bufs, inode);
ret = block_write_full_page_endio(page, noalloc_get_block_write,
wbc, ext4_end_io_buffer_write);
} else
ret = block_write_full_page(page, noalloc_get_block_write,
wbc);
return ret;
}
/*
* This is called via ext4_da_writepages() to
* calculate the total number of credits to reserve to fit
* a single extent allocation into a single transaction,
* ext4_da_writpeages() will loop calling this before
* the block allocation.
*/
static int ext4_da_writepages_trans_blocks(struct inode *inode)
{
int max_blocks = EXT4_I(inode)->i_reserved_data_blocks;
/*
* With non-extent format the journal credit needed to
* insert nrblocks contiguous block is dependent on
* number of contiguous block. So we will limit
* number of contiguous block to a sane value
*/
if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) &&
(max_blocks > EXT4_MAX_TRANS_DATA))
max_blocks = EXT4_MAX_TRANS_DATA;
return ext4_chunk_trans_blocks(inode, max_blocks);
}
/*
* write_cache_pages_da - walk the list of dirty pages of the given
* address space and accumulate pages that need writing, and call
* mpage_da_map_and_submit to map a single contiguous memory region
* and then write them.
*/
static int write_cache_pages_da(struct address_space *mapping,
struct writeback_control *wbc,
struct mpage_da_data *mpd,
pgoff_t *done_index)
{
struct buffer_head *bh, *head;
struct inode *inode = mapping->host;
struct pagevec pvec;
unsigned int nr_pages;
sector_t logical;
pgoff_t index, end;
long nr_to_write = wbc->nr_to_write;
int i, tag, ret = 0;
memset(mpd, 0, sizeof(struct mpage_da_data));
mpd->wbc = wbc;
mpd->inode = inode;
pagevec_init(&pvec, 0);
index = wbc->range_start >> PAGE_CACHE_SHIFT;
end = wbc->range_end >> PAGE_CACHE_SHIFT;
if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
tag = PAGECACHE_TAG_TOWRITE;
else
tag = PAGECACHE_TAG_DIRTY;
*done_index = index;
while (index <= end) {
nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
if (nr_pages == 0)
return 0;
for (i = 0; i < nr_pages; i++) {
struct page *page = pvec.pages[i];
/*
* At this point, the page may be truncated or
* invalidated (changing page->mapping to NULL), or
* even swizzled back from swapper_space to tmpfs file
* mapping. However, page->index will not change
* because we have a reference on the page.
*/
if (page->index > end)
goto out;
*done_index = page->index + 1;
/*
* If we can't merge this page, and we have
* accumulated an contiguous region, write it
*/
if ((mpd->next_page != page->index) &&
(mpd->next_page != mpd->first_page)) {
mpage_da_map_and_submit(mpd);
goto ret_extent_tail;
}
lock_page(page);
/*
* If the page is no longer dirty, or its
* mapping no longer corresponds to inode we
* are writing (which means it has been
* truncated or invalidated), or the page is
* already under writeback and we are not
* doing a data integrity writeback, skip the page
*/
if (!PageDirty(page) ||
(PageWriteback(page) &&
(wbc->sync_mode == WB_SYNC_NONE)) ||
unlikely(page->mapping != mapping)) {
unlock_page(page);
continue;
}
wait_on_page_writeback(page);
BUG_ON(PageWriteback(page));
if (mpd->next_page != page->index)
mpd->first_page = page->index;
mpd->next_page = page->index + 1;
logical = (sector_t) page->index <<
(PAGE_CACHE_SHIFT - inode->i_blkbits);
if (!page_has_buffers(page)) {
mpage_add_bh_to_extent(mpd, logical,
PAGE_CACHE_SIZE,
(1 << BH_Dirty) | (1 << BH_Uptodate));
if (mpd->io_done)
goto ret_extent_tail;
} else {
/*
* Page with regular buffer heads,
* just add all dirty ones
*/
head = page_buffers(page);
bh = head;
do {
BUG_ON(buffer_locked(bh));
/*
* We need to try to allocate
* unmapped blocks in the same page.
* Otherwise we won't make progress
* with the page in ext4_writepage
*/
if (ext4_bh_delay_or_unwritten(NULL, bh)) {
mpage_add_bh_to_extent(mpd, logical,
bh->b_size,
bh->b_state);
if (mpd->io_done)
goto ret_extent_tail;
} else if (buffer_dirty(bh) && (buffer_mapped(bh))) {
/*
* mapped dirty buffer. We need
* to update the b_state
* because we look at b_state
* in mpage_da_map_blocks. We
* don't update b_size because
* if we find an unmapped
* buffer_head later we need to
* use the b_state flag of that
* buffer_head.
*/
if (mpd->b_size == 0)
mpd->b_state = bh->b_state & BH_FLAGS;
}
logical++;
} while ((bh = bh->b_this_page) != head);
}
if (nr_to_write > 0) {
nr_to_write--;
if (nr_to_write == 0 &&
wbc->sync_mode == WB_SYNC_NONE)
/*
* We stop writing back only if we are
* not doing integrity sync. In case of
* integrity sync we have to keep going
* because someone may be concurrently
* dirtying pages, and we might have
* synced a lot of newly appeared dirty
* pages, but have not synced all of the
* old dirty pages.
*/
goto out;
}
}
pagevec_release(&pvec);
cond_resched();
}
return 0;
ret_extent_tail:
ret = MPAGE_DA_EXTENT_TAIL;
out:
pagevec_release(&pvec);
cond_resched();
return ret;
}
static int ext4_da_writepages(struct address_space *mapping,
struct writeback_control *wbc)
{
pgoff_t index;
int range_whole = 0;
handle_t *handle = NULL;
struct mpage_da_data mpd;
struct inode *inode = mapping->host;
int pages_written = 0;
unsigned int max_pages;
int range_cyclic, cycled = 1, io_done = 0;
int needed_blocks, ret = 0;
long desired_nr_to_write, nr_to_writebump = 0;
loff_t range_start = wbc->range_start;
struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb);
pgoff_t done_index = 0;
pgoff_t end;
trace_ext4_da_writepages(inode, wbc);
/*
* No pages to write? This is mainly a kludge to avoid starting
* a transaction for special inodes like journal inode on last iput()
* because that could violate lock ordering on umount
*/
if (!mapping->nrpages || !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
return 0;
/*
* If the filesystem has aborted, it is read-only, so return
* right away instead of dumping stack traces later on that
* will obscure the real source of the problem. We test
* EXT4_MF_FS_ABORTED instead of sb->s_flag's MS_RDONLY because
* the latter could be true if the filesystem is mounted
* read-only, and in that case, ext4_da_writepages should
* *never* be called, so if that ever happens, we would want
* the stack trace.
*/
if (unlikely(sbi->s_mount_flags & EXT4_MF_FS_ABORTED))
return -EROFS;
if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
range_whole = 1;
range_cyclic = wbc->range_cyclic;
if (wbc->range_cyclic) {
index = mapping->writeback_index;
if (index)
cycled = 0;
wbc->range_start = index << PAGE_CACHE_SHIFT;
wbc->range_end = LLONG_MAX;
wbc->range_cyclic = 0;
end = -1;
} else {
index = wbc->range_start >> PAGE_CACHE_SHIFT;
end = wbc->range_end >> PAGE_CACHE_SHIFT;
}
/*
* This works around two forms of stupidity. The first is in
* the writeback code, which caps the maximum number of pages
* written to be 1024 pages. This is wrong on multiple
* levels; different architectues have a different page size,
* which changes the maximum amount of data which gets
* written. Secondly, 4 megabytes is way too small. XFS
* forces this value to be 16 megabytes by multiplying
* nr_to_write parameter by four, and then relies on its
* allocator to allocate larger extents to make them
* contiguous. Unfortunately this brings us to the second
* stupidity, which is that ext4's mballoc code only allocates
* at most 2048 blocks. So we force contiguous writes up to
* the number of dirty blocks in the inode, or
* sbi->max_writeback_mb_bump whichever is smaller.
*/
max_pages = sbi->s_max_writeback_mb_bump << (20 - PAGE_CACHE_SHIFT);
if (!range_cyclic && range_whole) {
if (wbc->nr_to_write == LONG_MAX)
desired_nr_to_write = wbc->nr_to_write;
else
desired_nr_to_write = wbc->nr_to_write * 8;
} else
desired_nr_to_write = ext4_num_dirty_pages(inode, index,
max_pages);
if (desired_nr_to_write > max_pages)
desired_nr_to_write = max_pages;
if (wbc->nr_to_write < desired_nr_to_write) {
nr_to_writebump = desired_nr_to_write - wbc->nr_to_write;
wbc->nr_to_write = desired_nr_to_write;
}
retry:
if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
tag_pages_for_writeback(mapping, index, end);
while (!ret && wbc->nr_to_write > 0) {
/*
* we insert one extent at a time. So we need
* credit needed for single extent allocation.
* journalled mode is currently not supported
* by delalloc
*/
BUG_ON(ext4_should_journal_data(inode));
needed_blocks = ext4_da_writepages_trans_blocks(inode);
/* start a new transaction*/
handle = ext4_journal_start(inode, needed_blocks);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
ext4_msg(inode->i_sb, KERN_CRIT, "%s: jbd2_start: "
"%ld pages, ino %lu; err %d", __func__,
wbc->nr_to_write, inode->i_ino, ret);
goto out_writepages;
}
/*
* Now call write_cache_pages_da() to find the next
* contiguous region of logical blocks that need
* blocks to be allocated by ext4 and submit them.
*/
ret = write_cache_pages_da(mapping, wbc, &mpd, &done_index);
/*
* If we have a contiguous extent of pages and we
* haven't done the I/O yet, map the blocks and submit
* them for I/O.
*/
if (!mpd.io_done && mpd.next_page != mpd.first_page) {
mpage_da_map_and_submit(&mpd);
ret = MPAGE_DA_EXTENT_TAIL;
}
trace_ext4_da_write_pages(inode, &mpd);
wbc->nr_to_write -= mpd.pages_written;
ext4_journal_stop(handle);
if ((mpd.retval == -ENOSPC) && sbi->s_journal) {
/* commit the transaction which would
* free blocks released in the transaction
* and try again
*/
jbd2_journal_force_commit_nested(sbi->s_journal);
ret = 0;
} else if (ret == MPAGE_DA_EXTENT_TAIL) {
/*
* got one extent now try with
* rest of the pages
*/
pages_written += mpd.pages_written;
ret = 0;
io_done = 1;
} else if (wbc->nr_to_write)
/*
* There is no more writeout needed
* or we requested for a noblocking writeout
* and we found the device congested
*/
break;
}
if (!io_done && !cycled) {
cycled = 1;
index = 0;
wbc->range_start = index << PAGE_CACHE_SHIFT;
wbc->range_end = mapping->writeback_index - 1;
goto retry;
}
/* Update index */
wbc->range_cyclic = range_cyclic;
if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
/*
* set the writeback_index so that range_cyclic
* mode will write it back later
*/
mapping->writeback_index = done_index;
out_writepages:
wbc->nr_to_write -= nr_to_writebump;
wbc->range_start = range_start;
trace_ext4_da_writepages_result(inode, wbc, ret, pages_written);
return ret;
}
#define FALL_BACK_TO_NONDELALLOC 1
static int ext4_nonda_switch(struct super_block *sb)
{
s64 free_blocks, dirty_blocks;
struct ext4_sb_info *sbi = EXT4_SB(sb);
/*
* switch to non delalloc mode if we are running low
* on free block. The free block accounting via percpu
* counters can get slightly wrong with percpu_counter_batch getting
* accumulated on each CPU without updating global counters
* Delalloc need an accurate free block accounting. So switch
* to non delalloc when we are near to error range.
*/
free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
dirty_blocks = percpu_counter_read_positive(&sbi->s_dirtyblocks_counter);
if (2 * free_blocks < 3 * dirty_blocks ||
free_blocks < (dirty_blocks + EXT4_FREEBLOCKS_WATERMARK)) {
/*
* free block count is less than 150% of dirty blocks
* or free blocks is less than watermark
*/
return 1;
}
/*
* Even if we don't switch but are nearing capacity,
* start pushing delalloc when 1/2 of free blocks are dirty.
*/
if (free_blocks < 2 * dirty_blocks)
writeback_inodes_sb_if_idle(sb);
return 0;
}
static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata)
{
int ret, retries = 0;
struct page *page;
pgoff_t index;
struct inode *inode = mapping->host;
handle_t *handle;
index = pos >> PAGE_CACHE_SHIFT;
if (ext4_nonda_switch(inode->i_sb)) {
*fsdata = (void *)FALL_BACK_TO_NONDELALLOC;
return ext4_write_begin(file, mapping, pos,
len, flags, pagep, fsdata);
}
*fsdata = (void *)0;
trace_ext4_da_write_begin(inode, pos, len, flags);
retry:
/*
* With delayed allocation, we don't log the i_disksize update
* if there is delayed block allocation. But we still need
* to journalling the i_disksize update if writes to the end
* of file which has an already mapped buffer.
*/
handle = ext4_journal_start(inode, 1);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
goto out;
}
/* We cannot recurse into the filesystem as the transaction is already
* started */
flags |= AOP_FLAG_NOFS;
page = grab_cache_page_write_begin(mapping, index, flags);
if (!page) {
ext4_journal_stop(handle);
ret = -ENOMEM;
goto out;
}
*pagep = page;
ret = __block_write_begin(page, pos, len, ext4_da_get_block_prep);
if (ret < 0) {
unlock_page(page);
ext4_journal_stop(handle);
page_cache_release(page);
/*
* block_write_begin may have instantiated a few blocks
* outside i_size. Trim these off again. Don't need
* i_size_read because we hold i_mutex.
*/
if (pos + len > inode->i_size)
ext4_truncate_failed_write(inode);
}
if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
goto retry;
out:
return ret;
}
/*
* Check if we should update i_disksize
* when write to the end of file but not require block allocation
*/
static int ext4_da_should_update_i_disksize(struct page *page,
unsigned long offset)
{
struct buffer_head *bh;
struct inode *inode = page->mapping->host;
unsigned int idx;
int i;
bh = page_buffers(page);
idx = offset >> inode->i_blkbits;
for (i = 0; i < idx; i++)
bh = bh->b_this_page;
if (!buffer_mapped(bh) || (buffer_delay(bh)) || buffer_unwritten(bh))
return 0;
return 1;
}
static int ext4_da_write_end(struct file *file,
struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata)
{
struct inode *inode = mapping->host;
int ret = 0, ret2;
handle_t *handle = ext4_journal_current_handle();
loff_t new_i_size;
unsigned long start, end;
int write_mode = (int)(unsigned long)fsdata;
if (write_mode == FALL_BACK_TO_NONDELALLOC) {
switch (ext4_inode_journal_mode(inode)) {
case EXT4_INODE_ORDERED_DATA_MODE:
return ext4_ordered_write_end(file, mapping, pos,
len, copied, page, fsdata);
case EXT4_INODE_WRITEBACK_DATA_MODE:
return ext4_writeback_write_end(file, mapping, pos,
len, copied, page, fsdata);
default:
BUG();
}
}
trace_ext4_da_write_end(inode, pos, len, copied);
start = pos & (PAGE_CACHE_SIZE - 1);
end = start + copied - 1;
/*
* generic_write_end() will run mark_inode_dirty() if i_size
* changes. So let's piggyback the i_disksize mark_inode_dirty
* into that.
*/
new_i_size = pos + copied;
if (copied && new_i_size > EXT4_I(inode)->i_disksize) {
if (ext4_da_should_update_i_disksize(page, end)) {
down_write(&EXT4_I(inode)->i_data_sem);
if (new_i_size > EXT4_I(inode)->i_disksize) {
/*
* Updating i_disksize when extending file
* without needing block allocation
*/
if (ext4_should_order_data(inode))
ret = ext4_jbd2_file_inode(handle,
inode);
EXT4_I(inode)->i_disksize = new_i_size;
}
up_write(&EXT4_I(inode)->i_data_sem);
/* We need to mark inode dirty even if
* new_i_size is less that inode->i_size
* bu greater than i_disksize.(hint delalloc)
*/
ext4_mark_inode_dirty(handle, inode);
}
}
ret2 = generic_write_end(file, mapping, pos, len, copied,
page, fsdata);
copied = ret2;
if (ret2 < 0)
ret = ret2;
ret2 = ext4_journal_stop(handle);
if (!ret)
ret = ret2;
return ret ? ret : copied;
}
static void ext4_da_invalidatepage(struct page *page, unsigned long offset)
{
/*
* Drop reserved blocks
*/
BUG_ON(!PageLocked(page));
if (!page_has_buffers(page))
goto out;
ext4_da_page_release_reservation(page, offset);
out:
ext4_invalidatepage(page, offset);
return;
}
/*
* Force all delayed allocation blocks to be allocated for a given inode.
*/
int ext4_alloc_da_blocks(struct inode *inode)
{
trace_ext4_alloc_da_blocks(inode);
if (!EXT4_I(inode)->i_reserved_data_blocks &&
!EXT4_I(inode)->i_reserved_meta_blocks)
return 0;
/*
* We do something simple for now. The filemap_flush() will
* also start triggering a write of the data blocks, which is
* not strictly speaking necessary (and for users of
* laptop_mode, not even desirable). However, to do otherwise
* would require replicating code paths in:
*
* ext4_da_writepages() ->
* write_cache_pages() ---> (via passed in callback function)
* __mpage_da_writepage() -->
* mpage_add_bh_to_extent()
* mpage_da_map_blocks()
*
* The problem is that write_cache_pages(), located in
* mm/page-writeback.c, marks pages clean in preparation for
* doing I/O, which is not desirable if we're not planning on
* doing I/O at all.
*
* We could call write_cache_pages(), and then redirty all of
* the pages by calling redirty_page_for_writepage() but that
* would be ugly in the extreme. So instead we would need to
* replicate parts of the code in the above functions,
* simplifying them because we wouldn't actually intend to
* write out the pages, but rather only collect contiguous
* logical block extents, call the multi-block allocator, and
* then update the buffer heads with the block allocations.
*
* For now, though, we'll cheat by calling filemap_flush(),
* which will map the blocks, and start the I/O, but not
* actually wait for the I/O to complete.
*/
return filemap_flush(inode->i_mapping);
}
/*
* bmap() is special. It gets used by applications such as lilo and by
* the swapper to find the on-disk block of a specific piece of data.
*
* Naturally, this is dangerous if the block concerned is still in the
* journal. If somebody makes a swapfile on an ext4 data-journaling
* filesystem and enables swap, then they may get a nasty shock when the
* data getting swapped to that swapfile suddenly gets overwritten by
* the original zero's written out previously to the journal and
* awaiting writeback in the kernel's buffer cache.
*
* So, if we see any bmap calls here on a modified, data-journaled file,
* take extra steps to flush any blocks which might be in the cache.
*/
static sector_t ext4_bmap(struct address_space *mapping, sector_t block)
{
struct inode *inode = mapping->host;
journal_t *journal;
int err;
if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) &&
test_opt(inode->i_sb, DELALLOC)) {
/*
* With delalloc we want to sync the file
* so that we can make sure we allocate
* blocks for file
*/
filemap_write_and_wait(mapping);
}
if (EXT4_JOURNAL(inode) &&
ext4_test_inode_state(inode, EXT4_STATE_JDATA)) {
/*
* This is a REALLY heavyweight approach, but the use of
* bmap on dirty files is expected to be extremely rare:
* only if we run lilo or swapon on a freshly made file
* do we expect this to happen.
*
* (bmap requires CAP_SYS_RAWIO so this does not
* represent an unprivileged user DOS attack --- we'd be
* in trouble if mortal users could trigger this path at
* will.)
*
* NB. EXT4_STATE_JDATA is not set on files other than
* regular files. If somebody wants to bmap a directory
* or symlink and gets confused because the buffer
* hasn't yet been flushed to disk, they deserve
* everything they get.
*/
ext4_clear_inode_state(inode, EXT4_STATE_JDATA);
journal = EXT4_JOURNAL(inode);
jbd2_journal_lock_updates(journal);
err = jbd2_journal_flush(journal);
jbd2_journal_unlock_updates(journal);
if (err)
return 0;
}
return generic_block_bmap(mapping, block, ext4_get_block);
}
static int ext4_readpage(struct file *file, struct page *page)
{
trace_ext4_readpage(page);
return mpage_readpage(page, ext4_get_block);
}
static int
ext4_readpages(struct file *file, struct address_space *mapping,
struct list_head *pages, unsigned nr_pages)
{
return mpage_readpages(mapping, pages, nr_pages, ext4_get_block);
}
static void ext4_invalidatepage_free_endio(struct page *page, unsigned long offset)
{
struct buffer_head *head, *bh;
unsigned int curr_off = 0;
if (!page_has_buffers(page))
return;
head = bh = page_buffers(page);
do {
if (offset <= curr_off && test_clear_buffer_uninit(bh)
&& bh->b_private) {
ext4_free_io_end(bh->b_private);
bh->b_private = NULL;
bh->b_end_io = NULL;
}
curr_off = curr_off + bh->b_size;
bh = bh->b_this_page;
} while (bh != head);
}
static void ext4_invalidatepage(struct page *page, unsigned long offset)
{
journal_t *journal = EXT4_JOURNAL(page->mapping->host);
trace_ext4_invalidatepage(page, offset);
/*
* free any io_end structure allocated for buffers to be discarded
*/
if (ext4_should_dioread_nolock(page->mapping->host))
ext4_invalidatepage_free_endio(page, offset);
/*
* If it's a full truncate we just forget about the pending dirtying
*/
if (offset == 0)
ClearPageChecked(page);
if (journal)
jbd2_journal_invalidatepage(journal, page, offset);
else
block_invalidatepage(page, offset);
}
static int ext4_releasepage(struct page *page, gfp_t wait)
{
journal_t *journal = EXT4_JOURNAL(page->mapping->host);
trace_ext4_releasepage(page);
WARN_ON(PageChecked(page));
if (!page_has_buffers(page))
return 0;
if (journal)
return jbd2_journal_try_to_free_buffers(journal, page, wait);
else
return try_to_free_buffers(page);
}
/*
* O_DIRECT for ext3 (or indirect map) based files
*
* If the O_DIRECT write will extend the file then add this inode to the
* orphan list. So recovery will truncate it back to the original size
* if the machine crashes during the write.
*
* If the O_DIRECT write is intantiating holes inside i_size and the machine
* crashes then stale disk data _may_ be exposed inside the file. But current
* VFS code falls back into buffered path in that case so we are safe.
*/
static ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb,
const struct iovec *iov, loff_t offset,
unsigned long nr_segs)
{
struct file *file = iocb->ki_filp;
struct inode *inode = file->f_mapping->host;
struct ext4_inode_info *ei = EXT4_I(inode);
handle_t *handle;
ssize_t ret;
int orphan = 0;
size_t count = iov_length(iov, nr_segs);
int retries = 0;
if (rw == WRITE) {
loff_t final_size = offset + count;
if (final_size > inode->i_size) {
/* Credits for sb + inode write */
handle = ext4_journal_start(inode, 2);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
goto out;
}
ret = ext4_orphan_add(handle, inode);
if (ret) {
ext4_journal_stop(handle);
goto out;
}
orphan = 1;
ei->i_disksize = inode->i_size;
ext4_journal_stop(handle);
}
}
retry:
if (rw == READ && ext4_should_dioread_nolock(inode)) {
if (unlikely(!list_empty(&ei->i_completed_io_list))) {
mutex_lock(&inode->i_mutex);
ext4_flush_completed_IO(inode);
mutex_unlock(&inode->i_mutex);
}
ret = __blockdev_direct_IO(rw, iocb, inode,
inode->i_sb->s_bdev, iov,
offset, nr_segs,
ext4_get_block, NULL, NULL, 0);
} else {
ret = blockdev_direct_IO(rw, iocb, inode,
inode->i_sb->s_bdev, iov,
offset, nr_segs,
ext4_get_block, NULL);
if (unlikely((rw & WRITE) && ret < 0)) {
loff_t isize = i_size_read(inode);
loff_t end = offset + iov_length(iov, nr_segs);
if (end > isize)
ext4_truncate_failed_write(inode);
}
}
if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
goto retry;
if (orphan) {
int err;
/* Credits for sb + inode write */
handle = ext4_journal_start(inode, 2);
if (IS_ERR(handle)) {
/* This is really bad luck. We've written the data
* but cannot extend i_size. Bail out and pretend
* the write failed... */
ret = PTR_ERR(handle);
if (inode->i_nlink)
ext4_orphan_del(NULL, inode);
goto out;
}
if (inode->i_nlink)
ext4_orphan_del(handle, inode);
if (ret > 0) {
loff_t end = offset + ret;
if (end > inode->i_size) {
ei->i_disksize = end;
i_size_write(inode, end);
/*
* We're going to return a positive `ret'
* here due to non-zero-length I/O, so there's
* no way of reporting error returns from
* ext4_mark_inode_dirty() to userspace. So
* ignore it.
*/
ext4_mark_inode_dirty(handle, inode);
}
}
err = ext4_journal_stop(handle);
if (ret == 0)
ret = err;
}
out:
return ret;
}
/*
* ext4_get_block used when preparing for a DIO write or buffer write.
* We allocate an uinitialized extent if blocks haven't been allocated.
* The extent will be converted to initialized after the IO is complete.
*/
static int ext4_get_block_write(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create)
{
ext4_debug("ext4_get_block_write: inode %lu, create flag %d\n",
inode->i_ino, create);
return _ext4_get_block(inode, iblock, bh_result,
EXT4_GET_BLOCKS_IO_CREATE_EXT);
}
static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
ssize_t size, void *private, int ret,
bool is_async)
{
ext4_io_end_t *io_end = iocb->private;
struct workqueue_struct *wq;
unsigned long flags;
struct ext4_inode_info *ei;
/* if not async direct IO or dio with 0 bytes write, just return */
if (!io_end || !size)
goto out;
ext_debug("ext4_end_io_dio(): io_end 0x%p"
"for inode %lu, iocb 0x%p, offset %llu, size %llu\n",
iocb->private, io_end->inode->i_ino, iocb, offset,
size);
/* if not aio dio with unwritten extents, just free io and return */
if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) {
ext4_free_io_end(io_end);
iocb->private = NULL;
out:
if (is_async)
aio_complete(iocb, ret, 0);
return;
}
io_end->offset = offset;
io_end->size = size;
if (is_async) {
io_end->iocb = iocb;
io_end->result = ret;
}
wq = EXT4_SB(io_end->inode->i_sb)->dio_unwritten_wq;
/* Add the io_end to per-inode completed aio dio list*/
ei = EXT4_I(io_end->inode);
spin_lock_irqsave(&ei->i_completed_io_lock, flags);
list_add_tail(&io_end->list, &ei->i_completed_io_list);
spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
/* queue the work to convert unwritten extents to written */
queue_work(wq, &io_end->work);
iocb->private = NULL;
}
static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate)
{
ext4_io_end_t *io_end = bh->b_private;
struct workqueue_struct *wq;
struct inode *inode;
unsigned long flags;
if (!test_clear_buffer_uninit(bh) || !io_end)
goto out;
if (!(io_end->inode->i_sb->s_flags & MS_ACTIVE)) {
printk("sb umounted, discard end_io request for inode %lu\n",
io_end->inode->i_ino);
ext4_free_io_end(io_end);
goto out;
}
/*
* It may be over-defensive here to check EXT4_IO_END_UNWRITTEN now,
* but being more careful is always safe for the future change.
*/
inode = io_end->inode;
if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) {
io_end->flag |= EXT4_IO_END_UNWRITTEN;
atomic_inc(&EXT4_I(inode)->i_aiodio_unwritten);
}
/* Add the io_end to per-inode completed io list*/
spin_lock_irqsave(&EXT4_I(inode)->i_completed_io_lock, flags);
list_add_tail(&io_end->list, &EXT4_I(inode)->i_completed_io_list);
spin_unlock_irqrestore(&EXT4_I(inode)->i_completed_io_lock, flags);
wq = EXT4_SB(inode->i_sb)->dio_unwritten_wq;
/* queue the work to convert unwritten extents to written */
queue_work(wq, &io_end->work);
out:
bh->b_private = NULL;
bh->b_end_io = NULL;
clear_buffer_uninit(bh);
end_buffer_async_write(bh, uptodate);
}
static int ext4_set_bh_endio(struct buffer_head *bh, struct inode *inode)
{
ext4_io_end_t *io_end;
struct page *page = bh->b_page;
loff_t offset = (sector_t)page->index << PAGE_CACHE_SHIFT;
size_t size = bh->b_size;
retry:
io_end = ext4_init_io_end(inode, GFP_ATOMIC);
if (!io_end) {
pr_warn_ratelimited("%s: allocation fail\n", __func__);
schedule();
goto retry;
}
io_end->offset = offset;
io_end->size = size;
/*
* We need to hold a reference to the page to make sure it
* doesn't get evicted before ext4_end_io_work() has a chance
* to convert the extent from written to unwritten.
*/
io_end->page = page;
get_page(io_end->page);
bh->b_private = io_end;
bh->b_end_io = ext4_end_io_buffer_write;
return 0;
}
/*
* For ext4 extent files, ext4 will do direct-io write to holes,
* preallocated extents, and those write extend the file, no need to
* fall back to buffered IO.
*
* For holes, we fallocate those blocks, mark them as uninitialized
* If those blocks were preallocated, we mark sure they are splited, but
* still keep the range to write as uninitialized.
*
* The unwrritten extents will be converted to written when DIO is completed.
* For async direct IO, since the IO may still pending when return, we
* set up an end_io call back function, which will do the conversion
* when async direct IO completed.
*
* If the O_DIRECT write will extend the file then add this inode to the
* orphan list. So recovery will truncate it back to the original size
* if the machine crashes during the write.
*
*/
static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
const struct iovec *iov, loff_t offset,
unsigned long nr_segs)
{
struct file *file = iocb->ki_filp;
struct inode *inode = file->f_mapping->host;
ssize_t ret;
size_t count = iov_length(iov, nr_segs);
loff_t final_size = offset + count;
if (rw == WRITE && final_size <= inode->i_size) {
/*
* We could direct write to holes and fallocate.
*
* Allocated blocks to fill the hole are marked as uninitialized
* to prevent parallel buffered read to expose the stale data
* before DIO complete the data IO.
*
* As to previously fallocated extents, ext4 get_block
* will just simply mark the buffer mapped but still
* keep the extents uninitialized.
*
* for non AIO case, we will convert those unwritten extents
* to written after return back from blockdev_direct_IO.
*
* for async DIO, the conversion needs to be defered when
* the IO is completed. The ext4 end_io callback function
* will be called to take care of the conversion work.
* Here for async case, we allocate an io_end structure to
* hook to the iocb.
*/
iocb->private = NULL;
EXT4_I(inode)->cur_aio_dio = NULL;
if (!is_sync_kiocb(iocb)) {
iocb->private = ext4_init_io_end(inode, GFP_NOFS);
if (!iocb->private)
return -ENOMEM;
/*
* we save the io structure for current async
* direct IO, so that later ext4_map_blocks()
* could flag the io structure whether there
* is a unwritten extents needs to be converted
* when IO is completed.
*/
EXT4_I(inode)->cur_aio_dio = iocb->private;
}
ret = blockdev_direct_IO(rw, iocb, inode,
inode->i_sb->s_bdev, iov,
offset, nr_segs,
ext4_get_block_write,
ext4_end_io_dio);
if (iocb->private)
EXT4_I(inode)->cur_aio_dio = NULL;
/*
* The io_end structure takes a reference to the inode,
* that structure needs to be destroyed and the
* reference to the inode need to be dropped, when IO is
* complete, even with 0 byte write, or failed.
*
* In the successful AIO DIO case, the io_end structure will be
* desctroyed and the reference to the inode will be dropped
* after the end_io call back function is called.
*
* In the case there is 0 byte write, or error case, since
* VFS direct IO won't invoke the end_io call back function,
* we need to free the end_io structure here.
*/
if (ret != -EIOCBQUEUED && ret <= 0 && iocb->private) {
ext4_free_io_end(iocb->private);
iocb->private = NULL;
} else if (ret > 0 && ext4_test_inode_state(inode,
EXT4_STATE_DIO_UNWRITTEN)) {
int err;
/*
* for non AIO case, since the IO is already
* completed, we could do the conversion right here
*/
err = ext4_convert_unwritten_extents(inode,
offset, ret);
if (err < 0)
ret = err;
ext4_clear_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN);
}
return ret;
}
/* for write the the end of file case, we fall back to old way */
return ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs);
}
static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb,
const struct iovec *iov, loff_t offset,
unsigned long nr_segs)
{
struct file *file = iocb->ki_filp;
struct inode *inode = file->f_mapping->host;
ssize_t ret;
trace_ext4_direct_IO_enter(inode, offset, iov_length(iov, nr_segs), rw);
if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
ret = ext4_ext_direct_IO(rw, iocb, iov, offset, nr_segs);
else
ret = ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs);
trace_ext4_direct_IO_exit(inode, offset,
iov_length(iov, nr_segs), rw, ret);
return ret;
}
/*
* Pages can be marked dirty completely asynchronously from ext4's journalling
* activity. By filemap_sync_pte(), try_to_unmap_one(), etc. We cannot do
* much here because ->set_page_dirty is called under VFS locks. The page is
* not necessarily locked.
*
* We cannot just dirty the page and leave attached buffers clean, because the
* buffers' dirty state is "definitive". We cannot just set the buffers dirty
* or jbddirty because all the journalling code will explode.
*
* So what we do is to mark the page "pending dirty" and next time writepage
* is called, propagate that into the buffers appropriately.
*/
static int ext4_journalled_set_page_dirty(struct page *page)
{
SetPageChecked(page);
return __set_page_dirty_nobuffers(page);
}
static const struct address_space_operations ext4_ordered_aops = {
.readpage = ext4_readpage,
.readpages = ext4_readpages,
.writepage = ext4_writepage,
.write_begin = ext4_write_begin,
.write_end = ext4_ordered_write_end,
.bmap = ext4_bmap,
.invalidatepage = ext4_invalidatepage,
.releasepage = ext4_releasepage,
.direct_IO = ext4_direct_IO,
.migratepage = buffer_migrate_page,
.is_partially_uptodate = block_is_partially_uptodate,
.error_remove_page = generic_error_remove_page,
};
static const struct address_space_operations ext4_writeback_aops = {
.readpage = ext4_readpage,
.readpages = ext4_readpages,
.writepage = ext4_writepage,
.write_begin = ext4_write_begin,
.write_end = ext4_writeback_write_end,
.bmap = ext4_bmap,
.invalidatepage = ext4_invalidatepage,
.releasepage = ext4_releasepage,
.direct_IO = ext4_direct_IO,
.migratepage = buffer_migrate_page,
.is_partially_uptodate = block_is_partially_uptodate,
.error_remove_page = generic_error_remove_page,
};
static const struct address_space_operations ext4_journalled_aops = {
.readpage = ext4_readpage,
.readpages = ext4_readpages,
.writepage = ext4_writepage,
.write_begin = ext4_write_begin,
.write_end = ext4_journalled_write_end,
.set_page_dirty = ext4_journalled_set_page_dirty,
.bmap = ext4_bmap,
.invalidatepage = ext4_invalidatepage,
.releasepage = ext4_releasepage,
.is_partially_uptodate = block_is_partially_uptodate,
.error_remove_page = generic_error_remove_page,
};
static const struct address_space_operations ext4_da_aops = {
.readpage = ext4_readpage,
.readpages = ext4_readpages,
.writepage = ext4_writepage,
.writepages = ext4_da_writepages,
.write_begin = ext4_da_write_begin,
.write_end = ext4_da_write_end,
.bmap = ext4_bmap,
.invalidatepage = ext4_da_invalidatepage,
.releasepage = ext4_releasepage,
.direct_IO = ext4_direct_IO,
.migratepage = buffer_migrate_page,
.is_partially_uptodate = block_is_partially_uptodate,
.error_remove_page = generic_error_remove_page,
};
void ext4_set_aops(struct inode *inode)
{
switch (ext4_inode_journal_mode(inode)) {
case EXT4_INODE_ORDERED_DATA_MODE:
if (test_opt(inode->i_sb, DELALLOC))
inode->i_mapping->a_ops = &ext4_da_aops;
else
inode->i_mapping->a_ops = &ext4_ordered_aops;
break;
case EXT4_INODE_WRITEBACK_DATA_MODE:
if (test_opt(inode->i_sb, DELALLOC))
inode->i_mapping->a_ops = &ext4_da_aops;
else
inode->i_mapping->a_ops = &ext4_writeback_aops;
break;
case EXT4_INODE_JOURNAL_DATA_MODE:
inode->i_mapping->a_ops = &ext4_journalled_aops;
break;
default:
BUG();
}
}
/*
* ext4_block_truncate_page() zeroes out a mapping from file offset `from'
* up to the end of the block which corresponds to `from'.
* This required during truncate. We need to physically zero the tail end
* of that block so it doesn't yield old data if the file is later grown.
*/
int ext4_block_truncate_page(handle_t *handle,
struct address_space *mapping, loff_t from)
{
unsigned offset = from & (PAGE_CACHE_SIZE-1);
unsigned length;
unsigned blocksize;
struct inode *inode = mapping->host;
blocksize = inode->i_sb->s_blocksize;
length = blocksize - (offset & (blocksize - 1));
return ext4_block_zero_page_range(handle, mapping, from, length);
}
/*
* ext4_block_zero_page_range() zeros out a mapping of length 'length'
* starting from file offset 'from'. The range to be zero'd must
* be contained with in one block. If the specified range exceeds
* the end of the block it will be shortened to end of the block
* that cooresponds to 'from'
*/
int ext4_block_zero_page_range(handle_t *handle,
struct address_space *mapping, loff_t from, loff_t length)
{
ext4_fsblk_t index = from >> PAGE_CACHE_SHIFT;
unsigned offset = from & (PAGE_CACHE_SIZE-1);
unsigned blocksize, max, pos;
ext4_lblk_t iblock;
struct inode *inode = mapping->host;
struct buffer_head *bh;
struct page *page;
int err = 0;
page = find_or_create_page(mapping, from >> PAGE_CACHE_SHIFT,
mapping_gfp_mask(mapping) & ~__GFP_FS);
if (!page)
return -EINVAL;
blocksize = inode->i_sb->s_blocksize;
max = blocksize - (offset & (blocksize - 1));
/*
* correct length if it does not fall between
* 'from' and the end of the block
*/
if (length > max || length < 0)
length = max;
iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
if (!page_has_buffers(page))
create_empty_buffers(page, blocksize, 0);
/* Find the buffer that contains "offset" */
bh = page_buffers(page);
pos = blocksize;
while (offset >= pos) {
bh = bh->b_this_page;
iblock++;
pos += blocksize;
}
err = 0;
if (buffer_freed(bh)) {
BUFFER_TRACE(bh, "freed: skip");
goto unlock;
}
if (!buffer_mapped(bh)) {
BUFFER_TRACE(bh, "unmapped");
ext4_get_block(inode, iblock, bh, 0);
/* unmapped? It's a hole - nothing to do */
if (!buffer_mapped(bh)) {
BUFFER_TRACE(bh, "still unmapped");
goto unlock;
}
}
/* Ok, it's mapped. Make sure it's up-to-date */
if (PageUptodate(page))
set_buffer_uptodate(bh);
if (!buffer_uptodate(bh)) {
err = -EIO;
ll_rw_block(READ, 1, &bh);
wait_on_buffer(bh);
/* Uhhuh. Read error. Complain and punt. */
if (!buffer_uptodate(bh))
goto unlock;
}
if (ext4_should_journal_data(inode)) {
BUFFER_TRACE(bh, "get write access");
err = ext4_journal_get_write_access(handle, bh);
if (err)
goto unlock;
}
zero_user(page, offset, length);
BUFFER_TRACE(bh, "zeroed end of block");
err = 0;
if (ext4_should_journal_data(inode)) {
err = ext4_handle_dirty_metadata(handle, inode, bh);
} else {
if (ext4_should_order_data(inode) && EXT4_I(inode)->jinode)
err = ext4_jbd2_file_inode(handle, inode);
mark_buffer_dirty(bh);
}
unlock:
unlock_page(page);
page_cache_release(page);
return err;
}
/*
* Probably it should be a library function... search for first non-zero word
* or memcmp with zero_page, whatever is better for particular architecture.
* Linus?
*/
static inline int all_zeroes(__le32 *p, __le32 *q)
{
while (p < q)
if (*p++)
return 0;
return 1;
}
/**
* ext4_find_shared - find the indirect blocks for partial truncation.
* @inode: inode in question
* @depth: depth of the affected branch
* @offsets: offsets of pointers in that branch (see ext4_block_to_path)
* @chain: place to store the pointers to partial indirect blocks
* @top: place to the (detached) top of branch
*
* This is a helper function used by ext4_truncate().
*
* When we do truncate() we may have to clean the ends of several
* indirect blocks but leave the blocks themselves alive. Block is
* partially truncated if some data below the new i_size is referred
* from it (and it is on the path to the first completely truncated
* data block, indeed). We have to free the top of that path along
* with everything to the right of the path. Since no allocation
* past the truncation point is possible until ext4_truncate()
* finishes, we may safely do the latter, but top of branch may
* require special attention - pageout below the truncation point
* might try to populate it.
*
* We atomically detach the top of branch from the tree, store the
* block number of its root in *@top, pointers to buffer_heads of
* partially truncated blocks - in @chain[].bh and pointers to
* their last elements that should not be removed - in
* @chain[].p. Return value is the pointer to last filled element
* of @chain.
*
* The work left to caller to do the actual freeing of subtrees:
* a) free the subtree starting from *@top
* b) free the subtrees whose roots are stored in
* (@chain[i].p+1 .. end of @chain[i].bh->b_data)
* c) free the subtrees growing from the inode past the @chain[0].
* (no partially truncated stuff there). */
static Indirect *ext4_find_shared(struct inode *inode, int depth,
ext4_lblk_t offsets[4], Indirect chain[4],
__le32 *top)
{
Indirect *partial, *p;
int k, err;
*top = 0;
/* Make k index the deepest non-null offset + 1 */
for (k = depth; k > 1 && !offsets[k-1]; k--)
;
partial = ext4_get_branch(inode, k, offsets, chain, &err);
/* Writer: pointers */
if (!partial)
partial = chain + k-1;
/*
* If the branch acquired continuation since we've looked at it -
* fine, it should all survive and (new) top doesn't belong to us.
*/
if (!partial->key && *partial->p)
/* Writer: end */
goto no_top;
for (p = partial; (p > chain) && all_zeroes((__le32 *) p->bh->b_data, p->p); p--)
;
/*
* OK, we've found the last block that must survive. The rest of our
* branch should be detached before unlocking. However, if that rest
* of branch is all ours and does not grow immediately from the inode
* it's easier to cheat and just decrement partial->p.
*/
if (p == chain + k - 1 && p > chain) {
p->p--;
} else {
*top = *p->p;
/* Nope, don't do this in ext4. Must leave the tree intact */
#if 0
*p->p = 0;
#endif
}
/* Writer: end */
while (partial > p) {
brelse(partial->bh);
partial--;
}
no_top:
return partial;
}
/*
* Zero a number of block pointers in either an inode or an indirect block.
* If we restart the transaction we must again get write access to the
* indirect block for further modification.
*
* We release `count' blocks on disk, but (last - first) may be greater
* than `count' because there can be holes in there.
*
* Return 0 on success, 1 on invalid block range
* and < 0 on fatal error.
*/
static int ext4_clear_blocks(handle_t *handle, struct inode *inode,
struct buffer_head *bh,
ext4_fsblk_t block_to_free,
unsigned long count, __le32 *first,
__le32 *last)
{
__le32 *p;
int flags = EXT4_FREE_BLOCKS_FORGET | EXT4_FREE_BLOCKS_VALIDATED;
int err;
if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
flags |= EXT4_FREE_BLOCKS_METADATA;
if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), block_to_free,
count)) {
EXT4_ERROR_INODE(inode, "attempt to clear invalid "
"blocks %llu len %lu",
(unsigned long long) block_to_free, count);
return 1;
}
if (try_to_extend_transaction(handle, inode)) {
if (bh) {
BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
err = ext4_handle_dirty_metadata(handle, inode, bh);
if (unlikely(err))
goto out_err;
}
err = ext4_mark_inode_dirty(handle, inode);
if (unlikely(err))
goto out_err;
err = ext4_truncate_restart_trans(handle, inode,
blocks_for_truncate(inode));
if (unlikely(err))
goto out_err;
if (bh) {
BUFFER_TRACE(bh, "retaking write access");
err = ext4_journal_get_write_access(handle, bh);
if (unlikely(err))
goto out_err;
}
}
for (p = first; p < last; p++)
*p = 0;
ext4_free_blocks(handle, inode, NULL, block_to_free, count, flags);
return 0;
out_err:
ext4_std_error(inode->i_sb, err);
return err;
}
/**
* ext4_free_data - free a list of data blocks
* @handle: handle for this transaction
* @inode: inode we are dealing with
* @this_bh: indirect buffer_head which contains *@first and *@last
* @first: array of block numbers
* @last: points immediately past the end of array
*
* We are freeing all blocks referred from that array (numbers are stored as
* little-endian 32-bit) and updating @inode->i_blocks appropriately.
*
* We accumulate contiguous runs of blocks to free. Conveniently, if these
* blocks are contiguous then releasing them at one time will only affect one
* or two bitmap blocks (+ group descriptor(s) and superblock) and we won't
* actually use a lot of journal space.
*
* @this_bh will be %NULL if @first and @last point into the inode's direct
* block pointers.
*/
static void ext4_free_data(handle_t *handle, struct inode *inode,
struct buffer_head *this_bh,
__le32 *first, __le32 *last)
{
ext4_fsblk_t block_to_free = 0; /* Starting block # of a run */
unsigned long count = 0; /* Number of blocks in the run */
__le32 *block_to_free_p = NULL; /* Pointer into inode/ind
corresponding to
block_to_free */
ext4_fsblk_t nr; /* Current block # */
__le32 *p; /* Pointer into inode/ind
for current block */
int err = 0;
if (this_bh) { /* For indirect block */
BUFFER_TRACE(this_bh, "get_write_access");
err = ext4_journal_get_write_access(handle, this_bh);
/* Important: if we can't update the indirect pointers
* to the blocks, we can't free them. */
if (err)
return;
}
for (p = first; p < last; p++) {
nr = le32_to_cpu(*p);
if (nr) {
/* accumulate blocks to free if they're contiguous */
if (count == 0) {
block_to_free = nr;
block_to_free_p = p;
count = 1;
} else if (nr == block_to_free + count) {
count++;
} else {
err = ext4_clear_blocks(handle, inode, this_bh,
block_to_free, count,
block_to_free_p, p);
if (err)
break;
block_to_free = nr;
block_to_free_p = p;
count = 1;
}
}
}
if (!err && count > 0)
err = ext4_clear_blocks(handle, inode, this_bh, block_to_free,
count, block_to_free_p, p);
if (err < 0)
/* fatal error */
return;
if (this_bh) {
BUFFER_TRACE(this_bh, "call ext4_handle_dirty_metadata");
/*
* The buffer head should have an attached journal head at this
* point. However, if the data is corrupted and an indirect
* block pointed to itself, it would have been detached when
* the block was cleared. Check for this instead of OOPSing.
*/
if ((EXT4_JOURNAL(inode) == NULL) || bh2jh(this_bh))
ext4_handle_dirty_metadata(handle, inode, this_bh);
else
EXT4_ERROR_INODE(inode,
"circular indirect block detected at "
"block %llu",
(unsigned long long) this_bh->b_blocknr);
}
}
/**
* ext4_free_branches - free an array of branches
* @handle: JBD handle for this transaction
* @inode: inode we are dealing with
* @parent_bh: the buffer_head which contains *@first and *@last
* @first: array of block numbers
* @last: pointer immediately past the end of array
* @depth: depth of the branches to free
*
* We are freeing all blocks referred from these branches (numbers are
* stored as little-endian 32-bit) and updating @inode->i_blocks
* appropriately.
*/
static void ext4_free_branches(handle_t *handle, struct inode *inode,
struct buffer_head *parent_bh,
__le32 *first, __le32 *last, int depth)
{
ext4_fsblk_t nr;
__le32 *p;
if (ext4_handle_is_aborted(handle))
return;
if (depth--) {
struct buffer_head *bh;
int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
p = last;
while (--p >= first) {
nr = le32_to_cpu(*p);
if (!nr)
continue; /* A hole */
if (!ext4_data_block_valid(EXT4_SB(inode->i_sb),
nr, 1)) {
EXT4_ERROR_INODE(inode,
"invalid indirect mapped "
"block %lu (level %d)",
(unsigned long) nr, depth);
break;
}
/* Go read the buffer for the next level down */
bh = sb_bread(inode->i_sb, nr);
/*
* A read failure? Report error and clear slot
* (should be rare).
*/
if (!bh) {
EXT4_ERROR_INODE_BLOCK(inode, nr,
"Read failure");
continue;
}
/* This zaps the entire block. Bottom up. */
BUFFER_TRACE(bh, "free child branches");
ext4_free_branches(handle, inode, bh,
(__le32 *) bh->b_data,
(__le32 *) bh->b_data + addr_per_block,
depth);
brelse(bh);
/*
* Everything below this this pointer has been
* released. Now let this top-of-subtree go.
*
* We want the freeing of this indirect block to be
* atomic in the journal with the updating of the
* bitmap block which owns it. So make some room in
* the journal.
*
* We zero the parent pointer *after* freeing its
* pointee in the bitmaps, so if extend_transaction()
* for some reason fails to put the bitmap changes and
* the release into the same transaction, recovery
* will merely complain about releasing a free block,
* rather than leaking blocks.
*/
if (ext4_handle_is_aborted(handle))
return;
if (try_to_extend_transaction(handle, inode)) {
ext4_mark_inode_dirty(handle, inode);
ext4_truncate_restart_trans(handle, inode,
blocks_for_truncate(inode));
}
/*
* The forget flag here is critical because if
* we are journaling (and not doing data
* journaling), we have to make sure a revoke
* record is written to prevent the journal
* replay from overwriting the (former)
* indirect block if it gets reallocated as a
* data block. This must happen in the same
* transaction where the data blocks are
* actually freed.
*/
ext4_free_blocks(handle, inode, NULL, nr, 1,
EXT4_FREE_BLOCKS_METADATA|
EXT4_FREE_BLOCKS_FORGET);
if (parent_bh) {
/*
* The block which we have just freed is
* pointed to by an indirect block: journal it
*/
BUFFER_TRACE(parent_bh, "get_write_access");
if (!ext4_journal_get_write_access(handle,
parent_bh)){
*p = 0;
BUFFER_TRACE(parent_bh,
"call ext4_handle_dirty_metadata");
ext4_handle_dirty_metadata(handle,
inode,
parent_bh);
}
}
}
} else {
/* We have reached the bottom of the tree. */
BUFFER_TRACE(parent_bh, "free data blocks");
ext4_free_data(handle, inode, parent_bh, first, last);
}
}
int ext4_can_truncate(struct inode *inode)
{
if (S_ISREG(inode->i_mode))
return 1;
if (S_ISDIR(inode->i_mode))
return 1;
if (S_ISLNK(inode->i_mode))
return !ext4_inode_is_fast_symlink(inode);
return 0;
}
/*
* ext4_punch_hole: punches a hole in a file by releaseing the blocks
* associated with the given offset and length
*
* @inode: File inode
* @offset: The offset where the hole will begin
* @len: The length of the hole
*
* Returns: 0 on sucess or negative on failure
*/
int ext4_punch_hole(struct file *file, loff_t offset, loff_t length)
{
struct inode *inode = file->f_path.dentry->d_inode;
if (!S_ISREG(inode->i_mode))
return -ENOTSUPP;
if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
/* TODO: Add support for non extent hole punching */
return -ENOTSUPP;
}
return ext4_ext_punch_hole(file, offset, length);
}
/*
* ext4_truncate()
*
* We block out ext4_get_block() block instantiations across the entire
* transaction, and VFS/VM ensures that ext4_truncate() cannot run
* simultaneously on behalf of the same inode.
*
* As we work through the truncate and commmit bits of it to the journal there
* is one core, guiding principle: the file's tree must always be consistent on
* disk. We must be able to restart the truncate after a crash.
*
* The file's tree may be transiently inconsistent in memory (although it
* probably isn't), but whenever we close off and commit a journal transaction,
* the contents of (the filesystem + the journal) must be consistent and
* restartable. It's pretty simple, really: bottom up, right to left (although
* left-to-right works OK too).
*
* Note that at recovery time, journal replay occurs *before* the restart of
* truncate against the orphan inode list.
*
* The committed inode has the new, desired i_size (which is the same as
* i_disksize in this case). After a crash, ext4_orphan_cleanup() will see
* that this inode's truncate did not complete and it will again call
* ext4_truncate() to have another go. So there will be instantiated blocks
* to the right of the truncation point in a crashed ext4 filesystem. But
* that's fine - as long as they are linked from the inode, the post-crash
* ext4_truncate() run will find them and release them.
*/
void ext4_truncate(struct inode *inode)
{
handle_t *handle;
struct ext4_inode_info *ei = EXT4_I(inode);
__le32 *i_data = ei->i_data;
int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
struct address_space *mapping = inode->i_mapping;
ext4_lblk_t offsets[4];
Indirect chain[4];
Indirect *partial;
__le32 nr = 0;
int n = 0;
ext4_lblk_t last_block, max_block;
unsigned blocksize = inode->i_sb->s_blocksize;
trace_ext4_truncate_enter(inode);
if (!ext4_can_truncate(inode))
return;
ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
if (inode->i_size == 0 && !test_opt(inode->i_sb, NO_AUTO_DA_ALLOC))
ext4_set_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
ext4_ext_truncate(inode);
trace_ext4_truncate_exit(inode);
return;
}
handle = start_transaction(inode);
if (IS_ERR(handle))
return; /* AKPM: return what? */
last_block = (inode->i_size + blocksize-1)
>> EXT4_BLOCK_SIZE_BITS(inode->i_sb);
max_block = (EXT4_SB(inode->i_sb)->s_bitmap_maxbytes + blocksize-1)
>> EXT4_BLOCK_SIZE_BITS(inode->i_sb);
if (inode->i_size & (blocksize - 1))
if (ext4_block_truncate_page(handle, mapping, inode->i_size))
goto out_stop;
if (last_block != max_block) {
n = ext4_block_to_path(inode, last_block, offsets, NULL);
if (n == 0)
goto out_stop; /* error */
}
/*
* OK. This truncate is going to happen. We add the inode to the
* orphan list, so that if this truncate spans multiple transactions,
* and we crash, we will resume the truncate when the filesystem
* recovers. It also marks the inode dirty, to catch the new size.
*
* Implication: the file must always be in a sane, consistent
* truncatable state while each transaction commits.
*/
if (ext4_orphan_add(handle, inode))
goto out_stop;
/*
* From here we block out all ext4_get_block() callers who want to
* modify the block allocation tree.
*/
down_write(&ei->i_data_sem);
ext4_discard_preallocations(inode);
/*
* The orphan list entry will now protect us from any crash which
* occurs before the truncate completes, so it is now safe to propagate
* the new, shorter inode size (held for now in i_size) into the
* on-disk inode. We do this via i_disksize, which is the value which
* ext4 *really* writes onto the disk inode.
*/
ei->i_disksize = inode->i_size;
if (last_block == max_block) {
/*
* It is unnecessary to free any data blocks if last_block is
* equal to the indirect block limit.
*/
goto out_unlock;
} else if (n == 1) { /* direct blocks */
ext4_free_data(handle, inode, NULL, i_data+offsets[0],
i_data + EXT4_NDIR_BLOCKS);
goto do_indirects;
}
partial = ext4_find_shared(inode, n, offsets, chain, &nr);
/* Kill the top of shared branch (not detached) */
if (nr) {
if (partial == chain) {
/* Shared branch grows from the inode */
ext4_free_branches(handle, inode, NULL,
&nr, &nr+1, (chain+n-1) - partial);
*partial->p = 0;
/*
* We mark the inode dirty prior to restart,
* and prior to stop. No need for it here.
*/
} else {
/* Shared branch grows from an indirect block */
BUFFER_TRACE(partial->bh, "get_write_access");
ext4_free_branches(handle, inode, partial->bh,
partial->p,
partial->p+1, (chain+n-1) - partial);
}
}
/* Clear the ends of indirect blocks on the shared branch */
while (partial > chain) {
ext4_free_branches(handle, inode, partial->bh, partial->p + 1,
(__le32*)partial->bh->b_data+addr_per_block,
(chain+n-1) - partial);
BUFFER_TRACE(partial->bh, "call brelse");
brelse(partial->bh);
partial--;
}
do_indirects:
/* Kill the remaining (whole) subtrees */
switch (offsets[0]) {
default:
nr = i_data[EXT4_IND_BLOCK];
if (nr) {
ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 1);
i_data[EXT4_IND_BLOCK] = 0;
}
case EXT4_IND_BLOCK:
nr = i_data[EXT4_DIND_BLOCK];
if (nr) {
ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 2);
i_data[EXT4_DIND_BLOCK] = 0;
}
case EXT4_DIND_BLOCK:
nr = i_data[EXT4_TIND_BLOCK];
if (nr) {
ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 3);
i_data[EXT4_TIND_BLOCK] = 0;
}
case EXT4_TIND_BLOCK:
;
}
out_unlock:
up_write(&ei->i_data_sem);
inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
ext4_mark_inode_dirty(handle, inode);
/*
* In a multi-transaction truncate, we only make the final transaction
* synchronous
*/
if (IS_SYNC(inode))
ext4_handle_sync(handle);
out_stop:
/*
* If this was a simple ftruncate(), and the file will remain alive
* then we need to clear up the orphan record which we created above.
* However, if this was a real unlink then we were called by
* ext4_delete_inode(), and we allow that function to clean up the
* orphan info for us.
*/
if (inode->i_nlink)
ext4_orphan_del(handle, inode);
ext4_journal_stop(handle);
trace_ext4_truncate_exit(inode);
}
/*
* ext4_get_inode_loc returns with an extra refcount against the inode's
* underlying buffer_head on success. If 'in_mem' is true, we have all
* data in memory that is needed to recreate the on-disk version of this
* inode.
*/
static int __ext4_get_inode_loc(struct inode *inode,
struct ext4_iloc *iloc, int in_mem)
{
struct ext4_group_desc *gdp;
struct buffer_head *bh;
struct super_block *sb = inode->i_sb;
ext4_fsblk_t block;
int inodes_per_block, inode_offset;
iloc->bh = NULL;
if (!ext4_valid_inum(sb, inode->i_ino))
return -EIO;
iloc->block_group = (inode->i_ino - 1) / EXT4_INODES_PER_GROUP(sb);
gdp = ext4_get_group_desc(sb, iloc->block_group, NULL);
if (!gdp)
return -EIO;
/*
* Figure out the offset within the block group inode table
*/
inodes_per_block = EXT4_SB(sb)->s_inodes_per_block;
inode_offset = ((inode->i_ino - 1) %
EXT4_INODES_PER_GROUP(sb));
block = ext4_inode_table(sb, gdp) + (inode_offset / inodes_per_block);
iloc->offset = (inode_offset % inodes_per_block) * EXT4_INODE_SIZE(sb);
bh = sb_getblk(sb, block);
if (!bh) {
EXT4_ERROR_INODE_BLOCK(inode, block,
"unable to read itable block");
return -EIO;
}
if (!buffer_uptodate(bh)) {
lock_buffer(bh);
/*
* If the buffer has the write error flag, we have failed
* to write out another inode in the same block. In this
* case, we don't have to read the block because we may
* read the old inode data successfully.
*/
if (buffer_write_io_error(bh) && !buffer_uptodate(bh))
set_buffer_uptodate(bh);
if (buffer_uptodate(bh)) {
/* someone brought it uptodate while we waited */
unlock_buffer(bh);
goto has_buffer;
}
/*
* If we have all information of the inode in memory and this
* is the only valid inode in the block, we need not read the
* block.
*/
if (in_mem) {
struct buffer_head *bitmap_bh;
int i, start;
start = inode_offset & ~(inodes_per_block - 1);
/* Is the inode bitmap in cache? */
bitmap_bh = sb_getblk(sb, ext4_inode_bitmap(sb, gdp));
if (!bitmap_bh)
goto make_io;
/*
* If the inode bitmap isn't in cache then the
* optimisation may end up performing two reads instead
* of one, so skip it.
*/
if (!buffer_uptodate(bitmap_bh)) {
brelse(bitmap_bh);
goto make_io;
}
for (i = start; i < start + inodes_per_block; i++) {
if (i == inode_offset)
continue;
if (ext4_test_bit(i, bitmap_bh->b_data))
break;
}
brelse(bitmap_bh);
if (i == start + inodes_per_block) {
/* all other inodes are free, so skip I/O */
memset(bh->b_data, 0, bh->b_size);
set_buffer_uptodate(bh);
unlock_buffer(bh);
goto has_buffer;
}
}
make_io:
/*
* If we need to do any I/O, try to pre-readahead extra
* blocks from the inode table.
*/
if (EXT4_SB(sb)->s_inode_readahead_blks) {
ext4_fsblk_t b, end, table;
unsigned num;
table = ext4_inode_table(sb, gdp);
/* s_inode_readahead_blks is always a power of 2 */
b = block & ~(EXT4_SB(sb)->s_inode_readahead_blks-1);
if (table > b)
b = table;
end = b + EXT4_SB(sb)->s_inode_readahead_blks;
num = EXT4_INODES_PER_GROUP(sb);
if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
EXT4_FEATURE_RO_COMPAT_GDT_CSUM))
num -= ext4_itable_unused_count(sb, gdp);
table += num / inodes_per_block;
if (end > table)
end = table;
while (b <= end)
sb_breadahead(sb, b++);
}
/*
* There are other valid inodes in the buffer, this inode
* has in-inode xattrs, or we don't have this inode in memory.
* Read the block from disk.
*/
trace_ext4_load_inode(inode);
get_bh(bh);
bh->b_end_io = end_buffer_read_sync;
submit_bh(READ_META, bh);
wait_on_buffer(bh);
if (!buffer_uptodate(bh)) {
EXT4_ERROR_INODE_BLOCK(inode, block,
"unable to read itable block");
brelse(bh);
return -EIO;
}
}
has_buffer:
iloc->bh = bh;
return 0;
}
int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc)
{
/* We have all inode data except xattrs in memory here. */
return __ext4_get_inode_loc(inode, iloc,
!ext4_test_inode_state(inode, EXT4_STATE_XATTR));
}
void ext4_set_inode_flags(struct inode *inode)
{
unsigned int flags = EXT4_I(inode)->i_flags;
inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
if (flags & EXT4_SYNC_FL)
inode->i_flags |= S_SYNC;
if (flags & EXT4_APPEND_FL)
inode->i_flags |= S_APPEND;
if (flags & EXT4_IMMUTABLE_FL)
inode->i_flags |= S_IMMUTABLE;
if (flags & EXT4_NOATIME_FL)
inode->i_flags |= S_NOATIME;
if (flags & EXT4_DIRSYNC_FL)
inode->i_flags |= S_DIRSYNC;
}
/* Propagate flags from i_flags to EXT4_I(inode)->i_flags */
void ext4_get_inode_flags(struct ext4_inode_info *ei)
{
unsigned int vfs_fl;
unsigned long old_fl, new_fl;
do {
vfs_fl = ei->vfs_inode.i_flags;
old_fl = ei->i_flags;
new_fl = old_fl & ~(EXT4_SYNC_FL|EXT4_APPEND_FL|
EXT4_IMMUTABLE_FL|EXT4_NOATIME_FL|
EXT4_DIRSYNC_FL);
if (vfs_fl & S_SYNC)
new_fl |= EXT4_SYNC_FL;
if (vfs_fl & S_APPEND)
new_fl |= EXT4_APPEND_FL;
if (vfs_fl & S_IMMUTABLE)
new_fl |= EXT4_IMMUTABLE_FL;
if (vfs_fl & S_NOATIME)
new_fl |= EXT4_NOATIME_FL;
if (vfs_fl & S_DIRSYNC)
new_fl |= EXT4_DIRSYNC_FL;
} while (cmpxchg(&ei->i_flags, old_fl, new_fl) != old_fl);
}
static blkcnt_t ext4_inode_blocks(struct ext4_inode *raw_inode,
struct ext4_inode_info *ei)
{
blkcnt_t i_blocks ;
struct inode *inode = &(ei->vfs_inode);
struct super_block *sb = inode->i_sb;
if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
EXT4_FEATURE_RO_COMPAT_HUGE_FILE)) {
/* we are using combined 48 bit field */
i_blocks = ((u64)le16_to_cpu(raw_inode->i_blocks_high)) << 32 |
le32_to_cpu(raw_inode->i_blocks_lo);
if (ext4_test_inode_flag(inode, EXT4_INODE_HUGE_FILE)) {
/* i_blocks represent file system block size */
return i_blocks << (inode->i_blkbits - 9);
} else {
return i_blocks;
}
} else {
return le32_to_cpu(raw_inode->i_blocks_lo);
}
}
struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
{
struct ext4_iloc iloc;
struct ext4_inode *raw_inode;
struct ext4_inode_info *ei;
struct inode *inode;
journal_t *journal = EXT4_SB(sb)->s_journal;
long ret;
int block;
inode = iget_locked(sb, ino);
if (!inode)
return ERR_PTR(-ENOMEM);
if (!(inode->i_state & I_NEW))
return inode;
ei = EXT4_I(inode);
iloc.bh = NULL;
ret = __ext4_get_inode_loc(inode, &iloc, 0);
if (ret < 0)
goto bad_inode;
raw_inode = ext4_raw_inode(&iloc);
inode->i_mode = le16_to_cpu(raw_inode->i_mode);
inode->i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
inode->i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
if (!(test_opt(inode->i_sb, NO_UID32))) {
inode->i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
inode->i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
}
inode->i_nlink = le16_to_cpu(raw_inode->i_links_count);
ext4_clear_state_flags(ei); /* Only relevant on 32-bit archs */
ei->i_dir_start_lookup = 0;
ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
/* We now have enough fields to check if the inode was active or not.
* This is needed because nfsd might try to access dead inodes
* the test is that same one that e2fsck uses
* NeilBrown 1999oct15
*/
if (inode->i_nlink == 0) {
if (inode->i_mode == 0 ||
!(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) {
/* this inode is deleted */
ret = -ESTALE;
goto bad_inode;
}
/* The only unlinked inodes we let through here have
* valid i_mode and are being read by the orphan
* recovery code: that's fine, we're about to complete
* the process of deleting those. */
}
ei->i_flags = le32_to_cpu(raw_inode->i_flags);
inode->i_blocks = ext4_inode_blocks(raw_inode, ei);
ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo);
if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_64BIT))
ei->i_file_acl |=
((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32;
inode->i_size = ext4_isize(raw_inode);
ei->i_disksize = inode->i_size;
#ifdef CONFIG_QUOTA
ei->i_reserved_quota = 0;
#endif
inode->i_generation = le32_to_cpu(raw_inode->i_generation);
ei->i_block_group = iloc.block_group;
ei->i_last_alloc_group = ~0;
/*
* NOTE! The in-memory inode i_data array is in little-endian order
* even on big-endian machines: we do NOT byteswap the block numbers!
*/
for (block = 0; block < EXT4_N_BLOCKS; block++)
ei->i_data[block] = raw_inode->i_block[block];
INIT_LIST_HEAD(&ei->i_orphan);
/*
* Set transaction id's of transactions that have to be committed
* to finish f[data]sync. We set them to currently running transaction
* as we cannot be sure that the inode or some of its metadata isn't
* part of the transaction - the inode could have been reclaimed and
* now it is reread from disk.
*/
if (journal) {
transaction_t *transaction;
tid_t tid;
read_lock(&journal->j_state_lock);
if (journal->j_running_transaction)
transaction = journal->j_running_transaction;
else
transaction = journal->j_committing_transaction;
if (transaction)
tid = transaction->t_tid;
else
tid = journal->j_commit_sequence;
read_unlock(&journal->j_state_lock);
ei->i_sync_tid = tid;
ei->i_datasync_tid = tid;
}
if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >
EXT4_INODE_SIZE(inode->i_sb)) {
ret = -EIO;
goto bad_inode;
}
if (ei->i_extra_isize == 0) {
/* The extra space is currently unused. Use it. */
ei->i_extra_isize = sizeof(struct ext4_inode) -
EXT4_GOOD_OLD_INODE_SIZE;
} else {
__le32 *magic = (void *)raw_inode +
EXT4_GOOD_OLD_INODE_SIZE +
ei->i_extra_isize;
if (*magic == cpu_to_le32(EXT4_XATTR_MAGIC))
ext4_set_inode_state(inode, EXT4_STATE_XATTR);
}
} else
ei->i_extra_isize = 0;
EXT4_INODE_GET_XTIME(i_ctime, inode, raw_inode);
EXT4_INODE_GET_XTIME(i_mtime, inode, raw_inode);
EXT4_INODE_GET_XTIME(i_atime, inode, raw_inode);
EXT4_EINODE_GET_XTIME(i_crtime, ei, raw_inode);
inode->i_version = le32_to_cpu(raw_inode->i_disk_version);
if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
inode->i_version |=
(__u64)(le32_to_cpu(raw_inode->i_version_hi)) << 32;
}
ret = 0;
if (ei->i_file_acl &&
!ext4_data_block_valid(EXT4_SB(sb), ei->i_file_acl, 1)) {
EXT4_ERROR_INODE(inode, "bad extended attribute block %llu",
ei->i_file_acl);
ret = -EIO;
goto bad_inode;
} else if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
(S_ISLNK(inode->i_mode) &&
!ext4_inode_is_fast_symlink(inode)))
/* Validate extent which is part of inode */
ret = ext4_ext_check_inode(inode);
} else if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
(S_ISLNK(inode->i_mode) &&
!ext4_inode_is_fast_symlink(inode))) {
/* Validate block references which are part of inode */
ret = ext4_check_inode_blockref(inode);
}
if (ret)
goto bad_inode;
if (S_ISREG(inode->i_mode)) {
inode->i_op = &ext4_file_inode_operations;
inode->i_fop = &ext4_file_operations;
ext4_set_aops(inode);
} else if (S_ISDIR(inode->i_mode)) {
inode->i_op = &ext4_dir_inode_operations;
inode->i_fop = &ext4_dir_operations;
} else if (S_ISLNK(inode->i_mode)) {
if (ext4_inode_is_fast_symlink(inode)) {
inode->i_op = &ext4_fast_symlink_inode_operations;
nd_terminate_link(ei->i_data, inode->i_size,
sizeof(ei->i_data) - 1);
} else {
inode->i_op = &ext4_symlink_inode_operations;
ext4_set_aops(inode);
}
} else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
inode->i_op = &ext4_special_inode_operations;
if (raw_inode->i_block[0])
init_special_inode(inode, inode->i_mode,
old_decode_dev(le32_to_cpu(raw_inode->i_block[0])));
else
init_special_inode(inode, inode->i_mode,
new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
} else {
ret = -EIO;
EXT4_ERROR_INODE(inode, "bogus i_mode (%o)", inode->i_mode);
goto bad_inode;
}
brelse(iloc.bh);
ext4_set_inode_flags(inode);
unlock_new_inode(inode);
return inode;
bad_inode:
brelse(iloc.bh);
iget_failed(inode);
return ERR_PTR(ret);
}
static int ext4_inode_blocks_set(handle_t *handle,
struct ext4_inode *raw_inode,
struct ext4_inode_info *ei)
{
struct inode *inode = &(ei->vfs_inode);
u64 i_blocks = inode->i_blocks;
struct super_block *sb = inode->i_sb;
if (i_blocks <= ~0U) {
/*
* i_blocks can be represnted in a 32 bit variable
* as multiple of 512 bytes
*/
raw_inode->i_blocks_lo = cpu_to_le32(i_blocks);
raw_inode->i_blocks_high = 0;
ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE);
return 0;
}
if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_HUGE_FILE))
return -EFBIG;
if (i_blocks <= 0xffffffffffffULL) {
/*
* i_blocks can be represented in a 48 bit variable
* as multiple of 512 bytes
*/
raw_inode->i_blocks_lo = cpu_to_le32(i_blocks);
raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32);
ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE);
} else {
ext4_set_inode_flag(inode, EXT4_INODE_HUGE_FILE);
/* i_block is stored in file system block size */
i_blocks = i_blocks >> (inode->i_blkbits - 9);
raw_inode->i_blocks_lo = cpu_to_le32(i_blocks);
raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32);
}
return 0;
}
/*
* Post the struct inode info into an on-disk inode location in the
* buffer-cache. This gobbles the caller's reference to the
* buffer_head in the inode location struct.
*
* The caller must have write access to iloc->bh.
*/
static int ext4_do_update_inode(handle_t *handle,
struct inode *inode,
struct ext4_iloc *iloc)
{
struct ext4_inode *raw_inode = ext4_raw_inode(iloc);
struct ext4_inode_info *ei = EXT4_I(inode);
struct buffer_head *bh = iloc->bh;
int err = 0, rc, block;
int need_datasync = 0;
/* For fields not not tracking in the in-memory inode,
* initialise them to zero for new inodes. */
if (ext4_test_inode_state(inode, EXT4_STATE_NEW))
memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size);
ext4_get_inode_flags(ei);
raw_inode->i_mode = cpu_to_le16(inode->i_mode);
if (!(test_opt(inode->i_sb, NO_UID32))) {
raw_inode->i_uid_low = cpu_to_le16(low_16_bits(inode->i_uid));
raw_inode->i_gid_low = cpu_to_le16(low_16_bits(inode->i_gid));
/*
* Fix up interoperability with old kernels. Otherwise, old inodes get
* re-used with the upper 16 bits of the uid/gid intact
*/
if (!ei->i_dtime) {
raw_inode->i_uid_high =
cpu_to_le16(high_16_bits(inode->i_uid));
raw_inode->i_gid_high =
cpu_to_le16(high_16_bits(inode->i_gid));
} else {
raw_inode->i_uid_high = 0;
raw_inode->i_gid_high = 0;
}
} else {
raw_inode->i_uid_low =
cpu_to_le16(fs_high2lowuid(inode->i_uid));
raw_inode->i_gid_low =
cpu_to_le16(fs_high2lowgid(inode->i_gid));
raw_inode->i_uid_high = 0;
raw_inode->i_gid_high = 0;
}
raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
EXT4_INODE_SET_XTIME(i_ctime, inode, raw_inode);
EXT4_INODE_SET_XTIME(i_mtime, inode, raw_inode);
EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode);
EXT4_EINODE_SET_XTIME(i_crtime, ei, raw_inode);
if (ext4_inode_blocks_set(handle, raw_inode, ei))
goto out_brelse;
raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
raw_inode->i_flags = cpu_to_le32(ei->i_flags & 0xFFFFFFFF);
if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
cpu_to_le32(EXT4_OS_HURD))
raw_inode->i_file_acl_high =
cpu_to_le16(ei->i_file_acl >> 32);
raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl);
if (ei->i_disksize != ext4_isize(raw_inode)) {
ext4_isize_set(raw_inode, ei->i_disksize);
need_datasync = 1;
}
if (ei->i_disksize > 0x7fffffffULL) {
struct super_block *sb = inode->i_sb;
if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
EXT4_FEATURE_RO_COMPAT_LARGE_FILE) ||
EXT4_SB(sb)->s_es->s_rev_level ==
cpu_to_le32(EXT4_GOOD_OLD_REV)) {
/* If this is the first large file
* created, add a flag to the superblock.
*/
err = ext4_journal_get_write_access(handle,
EXT4_SB(sb)->s_sbh);
if (err)
goto out_brelse;
ext4_update_dynamic_rev(sb);
EXT4_SET_RO_COMPAT_FEATURE(sb,
EXT4_FEATURE_RO_COMPAT_LARGE_FILE);
sb->s_dirt = 1;
ext4_handle_sync(handle);
err = ext4_handle_dirty_metadata(handle, NULL,
EXT4_SB(sb)->s_sbh);
}
}
raw_inode->i_generation = cpu_to_le32(inode->i_generation);
if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
if (old_valid_dev(inode->i_rdev)) {
raw_inode->i_block[0] =
cpu_to_le32(old_encode_dev(inode->i_rdev));
raw_inode->i_block[1] = 0;
} else {
raw_inode->i_block[0] = 0;
raw_inode->i_block[1] =
cpu_to_le32(new_encode_dev(inode->i_rdev));
raw_inode->i_block[2] = 0;
}
} else
for (block = 0; block < EXT4_N_BLOCKS; block++)
raw_inode->i_block[block] = ei->i_data[block];
raw_inode->i_disk_version = cpu_to_le32(inode->i_version);
if (ei->i_extra_isize) {
if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
raw_inode->i_version_hi =
cpu_to_le32(inode->i_version >> 32);
raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize);
}
BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
rc = ext4_handle_dirty_metadata(handle, NULL, bh);
if (!err)
err = rc;
ext4_clear_inode_state(inode, EXT4_STATE_NEW);
ext4_update_inode_fsync_trans(handle, inode, need_datasync);
out_brelse:
brelse(bh);
ext4_std_error(inode->i_sb, err);
return err;
}
/*
* ext4_write_inode()
*
* We are called from a few places:
*
* - Within generic_file_write() for O_SYNC files.
* Here, there will be no transaction running. We wait for any running
* trasnaction to commit.
*
* - Within sys_sync(), kupdate and such.
* We wait on commit, if tol to.
*
* - Within prune_icache() (PF_MEMALLOC == true)
* Here we simply return. We can't afford to block kswapd on the
* journal commit.
*
* In all cases it is actually safe for us to return without doing anything,
* because the inode has been copied into a raw inode buffer in
* ext4_mark_inode_dirty(). This is a correctness thing for O_SYNC and for
* knfsd.
*
* Note that we are absolutely dependent upon all inode dirtiers doing the
* right thing: they *must* call mark_inode_dirty() after dirtying info in
* which we are interested.
*
* It would be a bug for them to not do this. The code:
*
* mark_inode_dirty(inode)
* stuff();
* inode->i_size = expr;
*
* is in error because a kswapd-driven write_inode() could occur while
* `stuff()' is running, and the new i_size will be lost. Plus the inode
* will no longer be on the superblock's dirty inode list.
*/
int ext4_write_inode(struct inode *inode, struct writeback_control *wbc)
{
int err;
if (current->flags & PF_MEMALLOC)
return 0;
if (EXT4_SB(inode->i_sb)->s_journal) {
if (ext4_journal_current_handle()) {
jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n");
dump_stack();
return -EIO;
}
if (wbc->sync_mode != WB_SYNC_ALL)
return 0;
err = ext4_force_commit(inode->i_sb);
} else {
struct ext4_iloc iloc;
err = __ext4_get_inode_loc(inode, &iloc, 0);
if (err)
return err;
if (wbc->sync_mode == WB_SYNC_ALL)
sync_dirty_buffer(iloc.bh);
if (buffer_req(iloc.bh) && !buffer_uptodate(iloc.bh)) {
EXT4_ERROR_INODE_BLOCK(inode, iloc.bh->b_blocknr,
"IO error syncing inode");
err = -EIO;
}
brelse(iloc.bh);
}
return err;
}
/*
* ext4_setattr()
*
* Called from notify_change.
*
* We want to trap VFS attempts to truncate the file as soon as
* possible. In particular, we want to make sure that when the VFS
* shrinks i_size, we put the inode on the orphan list and modify
* i_disksize immediately, so that during the subsequent flushing of
* dirty pages and freeing of disk blocks, we can guarantee that any
* commit will leave the blocks being flushed in an unused state on
* disk. (On recovery, the inode will get truncated and the blocks will
* be freed, so we have a strong guarantee that no future commit will
* leave these blocks visible to the user.)
*
* Another thing we have to assure is that if we are in ordered mode
* and inode is still attached to the committing transaction, we must
* we start writeout of all the dirty pages which are being truncated.
* This way we are sure that all the data written in the previous
* transaction are already on disk (truncate waits for pages under
* writeback).
*
* Called with inode->i_mutex down.
*/
int ext4_setattr(struct dentry *dentry, struct iattr *attr)
{
struct inode *inode = dentry->d_inode;
int error, rc = 0;
int orphan = 0;
const unsigned int ia_valid = attr->ia_valid;
error = inode_change_ok(inode, attr);
if (error)
return error;
if (is_quota_modification(inode, attr))
dquot_initialize(inode);
if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) ||
(ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) {
handle_t *handle;
/* (user+group)*(old+new) structure, inode write (sb,
* inode block, ? - but truncate inode update has it) */
handle = ext4_journal_start(inode, (EXT4_MAXQUOTAS_INIT_BLOCKS(inode->i_sb)+
EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb))+3);
if (IS_ERR(handle)) {
error = PTR_ERR(handle);
goto err_out;
}
error = dquot_transfer(inode, attr);
if (error) {
ext4_journal_stop(handle);
return error;
}
/* Update corresponding info in inode so that everything is in
* one transaction */
if (attr->ia_valid & ATTR_UID)
inode->i_uid = attr->ia_uid;
if (attr->ia_valid & ATTR_GID)
inode->i_gid = attr->ia_gid;
error = ext4_mark_inode_dirty(handle, inode);
ext4_journal_stop(handle);
}
if (attr->ia_valid & ATTR_SIZE) {
if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
if (attr->ia_size > sbi->s_bitmap_maxbytes)
return -EFBIG;
}
}
if (S_ISREG(inode->i_mode) &&
attr->ia_valid & ATTR_SIZE &&
(attr->ia_size < inode->i_size)) {
handle_t *handle;
handle = ext4_journal_start(inode, 3);
if (IS_ERR(handle)) {
error = PTR_ERR(handle);
goto err_out;
}
if (ext4_handle_valid(handle)) {
error = ext4_orphan_add(handle, inode);
orphan = 1;
}
EXT4_I(inode)->i_disksize = attr->ia_size;
rc = ext4_mark_inode_dirty(handle, inode);
if (!error)
error = rc;
ext4_journal_stop(handle);
if (ext4_should_order_data(inode)) {
error = ext4_begin_ordered_truncate(inode,
attr->ia_size);
if (error) {
/* Do as much error cleanup as possible */
handle = ext4_journal_start(inode, 3);
if (IS_ERR(handle)) {
ext4_orphan_del(NULL, inode);
goto err_out;
}
ext4_orphan_del(handle, inode);
orphan = 0;
ext4_journal_stop(handle);
goto err_out;
}
}
}
if (attr->ia_valid & ATTR_SIZE) {
if (attr->ia_size != i_size_read(inode)) {
truncate_setsize(inode, attr->ia_size);
ext4_truncate(inode);
} else if (ext4_test_inode_flag(inode, EXT4_INODE_EOFBLOCKS))
ext4_truncate(inode);
}
if (!rc) {
setattr_copy(inode, attr);
mark_inode_dirty(inode);
}
/*
* If the call to ext4_truncate failed to get a transaction handle at
* all, we need to clean up the in-core orphan list manually.
*/
if (orphan && inode->i_nlink)
ext4_orphan_del(NULL, inode);
if (!rc && (ia_valid & ATTR_MODE))
rc = ext4_acl_chmod(inode);
err_out:
ext4_std_error(inode->i_sb, error);
if (!error)
error = rc;
return error;
}
int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry,
struct kstat *stat)
{
struct inode *inode;
unsigned long delalloc_blocks;
inode = dentry->d_inode;
generic_fillattr(inode, stat);
/*
* We can't update i_blocks if the block allocation is delayed
* otherwise in the case of system crash before the real block
* allocation is done, we will have i_blocks inconsistent with
* on-disk file blocks.
* We always keep i_blocks updated together with real
* allocation. But to not confuse with user, stat
* will return the blocks that include the delayed allocation
* blocks for this file.
*/
delalloc_blocks = EXT4_I(inode)->i_reserved_data_blocks;
stat->blocks += (delalloc_blocks << inode->i_sb->s_blocksize_bits)>>9;
return 0;
}
static int ext4_indirect_trans_blocks(struct inode *inode, int nrblocks,
int chunk)
{
int indirects;
/* if nrblocks are contiguous */
if (chunk) {
/*
* With N contiguous data blocks, we need at most
* N/EXT4_ADDR_PER_BLOCK(inode->i_sb) + 1 indirect blocks,
* 2 dindirect blocks, and 1 tindirect block
*/
return DIV_ROUND_UP(nrblocks,
EXT4_ADDR_PER_BLOCK(inode->i_sb)) + 4;
}
/*
* if nrblocks are not contiguous, worse case, each block touch
* a indirect block, and each indirect block touch a double indirect
* block, plus a triple indirect block
*/
indirects = nrblocks * 2 + 1;
return indirects;
}
static int ext4_index_trans_blocks(struct inode *inode, int nrblocks, int chunk)
{
if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
return ext4_indirect_trans_blocks(inode, nrblocks, chunk);
return ext4_ext_index_trans_blocks(inode, nrblocks, chunk);
}
/*
* Account for index blocks, block groups bitmaps and block group
* descriptor blocks if modify datablocks and index blocks
* worse case, the indexs blocks spread over different block groups
*
* If datablocks are discontiguous, they are possible to spread over
* different block groups too. If they are contiuguous, with flexbg,
* they could still across block group boundary.
*
* Also account for superblock, inode, quota and xattr blocks
*/
static int ext4_meta_trans_blocks(struct inode *inode, int nrblocks, int chunk)
{
ext4_group_t groups, ngroups = ext4_get_groups_count(inode->i_sb);
int gdpblocks;
int idxblocks;
int ret = 0;
/*
* How many index blocks need to touch to modify nrblocks?
* The "Chunk" flag indicating whether the nrblocks is
* physically contiguous on disk
*
* For Direct IO and fallocate, they calls get_block to allocate
* one single extent at a time, so they could set the "Chunk" flag
*/
idxblocks = ext4_index_trans_blocks(inode, nrblocks, chunk);
ret = idxblocks;
/*
* Now let's see how many group bitmaps and group descriptors need
* to account
*/
groups = idxblocks;
if (chunk)
groups += 1;
else
groups += nrblocks;
gdpblocks = groups;
if (groups > ngroups)
groups = ngroups;
if (groups > EXT4_SB(inode->i_sb)->s_gdb_count)
gdpblocks = EXT4_SB(inode->i_sb)->s_gdb_count;
/* bitmaps and block group descriptor blocks */
ret += groups + gdpblocks;
/* Blocks for super block, inode, quota and xattr blocks */
ret += EXT4_META_TRANS_BLOCKS(inode->i_sb);
return ret;
}
/*
* Calculate the total number of credits to reserve to fit
* the modification of a single pages into a single transaction,
* which may include multiple chunks of block allocations.
*
* This could be called via ext4_write_begin()
*
* We need to consider the worse case, when
* one new block per extent.
*/
int ext4_writepage_trans_blocks(struct inode *inode)
{
int bpp = ext4_journal_blocks_per_page(inode);
int ret;
ret = ext4_meta_trans_blocks(inode, bpp, 0);
/* Account for data blocks for journalled mode */
if (ext4_should_journal_data(inode))
ret += bpp;
return ret;
}
/*
* Calculate the journal credits for a chunk of data modification.
*
* This is called from DIO, fallocate or whoever calling
* ext4_map_blocks() to map/allocate a chunk of contiguous disk blocks.
*
* journal buffers for data blocks are not included here, as DIO
* and fallocate do no need to journal data buffers.
*/
int ext4_chunk_trans_blocks(struct inode *inode, int nrblocks)
{
return ext4_meta_trans_blocks(inode, nrblocks, 1);
}
/*
* The caller must have previously called ext4_reserve_inode_write().
* Give this, we know that the caller already has write access to iloc->bh.
*/
int ext4_mark_iloc_dirty(handle_t *handle,
struct inode *inode, struct ext4_iloc *iloc)
{
int err = 0;
if (test_opt(inode->i_sb, I_VERSION))
inode_inc_iversion(inode);
/* the do_update_inode consumes one bh->b_count */
get_bh(iloc->bh);
/* ext4_do_update_inode() does jbd2_journal_dirty_metadata */
err = ext4_do_update_inode(handle, inode, iloc);
put_bh(iloc->bh);
return err;
}
/*
* On success, We end up with an outstanding reference count against
* iloc->bh. This _must_ be cleaned up later.
*/
int
ext4_reserve_inode_write(handle_t *handle, struct inode *inode,
struct ext4_iloc *iloc)
{
int err;
err = ext4_get_inode_loc(inode, iloc);
if (!err) {
BUFFER_TRACE(iloc->bh, "get_write_access");
err = ext4_journal_get_write_access(handle, iloc->bh);
if (err) {
brelse(iloc->bh);
iloc->bh = NULL;
}
}
ext4_std_error(inode->i_sb, err);
return err;
}
/*
* Expand an inode by new_extra_isize bytes.
* Returns 0 on success or negative error number on failure.
*/
static int ext4_expand_extra_isize(struct inode *inode,
unsigned int new_extra_isize,
struct ext4_iloc iloc,
handle_t *handle)
{
struct ext4_inode *raw_inode;
struct ext4_xattr_ibody_header *header;
if (EXT4_I(inode)->i_extra_isize >= new_extra_isize)
return 0;
raw_inode = ext4_raw_inode(&iloc);
header = IHDR(inode, raw_inode);
/* No extended attributes present */
if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR) ||
header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) {
memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE, 0,
new_extra_isize);
EXT4_I(inode)->i_extra_isize = new_extra_isize;
return 0;
}
/* try to expand with EAs present */
return ext4_expand_extra_isize_ea(inode, new_extra_isize,
raw_inode, handle);
}
/*
* What we do here is to mark the in-core inode as clean with respect to inode
* dirtiness (it may still be data-dirty).
* This means that the in-core inode may be reaped by prune_icache
* without having to perform any I/O. This is a very good thing,
* because *any* task may call prune_icache - even ones which
* have a transaction open against a different journal.
*
* Is this cheating? Not really. Sure, we haven't written the
* inode out, but prune_icache isn't a user-visible syncing function.
* Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync)
* we start and wait on commits.
*
* Is this efficient/effective? Well, we're being nice to the system
* by cleaning up our inodes proactively so they can be reaped
* without I/O. But we are potentially leaving up to five seconds'
* worth of inodes floating about which prune_icache wants us to
* write out. One way to fix that would be to get prune_icache()
* to do a write_super() to free up some memory. It has the desired
* effect.
*/
int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
{
struct ext4_iloc iloc;
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
static unsigned int mnt_count;
int err, ret;
might_sleep();
trace_ext4_mark_inode_dirty(inode, _RET_IP_);
err = ext4_reserve_inode_write(handle, inode, &iloc);
if (ext4_handle_valid(handle) &&
EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize &&
!ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND)) {
/*
* We need extra buffer credits since we may write into EA block
* with this same handle. If journal_extend fails, then it will
* only result in a minor loss of functionality for that inode.
* If this is felt to be critical, then e2fsck should be run to
* force a large enough s_min_extra_isize.
*/
if ((jbd2_journal_extend(handle,
EXT4_DATA_TRANS_BLOCKS(inode->i_sb))) == 0) {
ret = ext4_expand_extra_isize(inode,
sbi->s_want_extra_isize,
iloc, handle);
if (ret) {
ext4_set_inode_state(inode,
EXT4_STATE_NO_EXPAND);
if (mnt_count !=
le16_to_cpu(sbi->s_es->s_mnt_count)) {
ext4_warning(inode->i_sb,
"Unable to expand inode %lu. Delete"
" some EAs or run e2fsck.",
inode->i_ino);
mnt_count =
le16_to_cpu(sbi->s_es->s_mnt_count);
}
}
}
}
if (!err)
err = ext4_mark_iloc_dirty(handle, inode, &iloc);
return err;
}
/*
* ext4_dirty_inode() is called from __mark_inode_dirty()
*
* We're really interested in the case where a file is being extended.
* i_size has been changed by generic_commit_write() and we thus need
* to include the updated inode in the current transaction.
*
* Also, dquot_alloc_block() will always dirty the inode when blocks
* are allocated to the file.
*
* If the inode is marked synchronous, we don't honour that here - doing
* so would cause a commit on atime updates, which we don't bother doing.
* We handle synchronous inodes at the highest possible level.
*/
void ext4_dirty_inode(struct inode *inode, int flags)
{
handle_t *handle;
handle = ext4_journal_start(inode, 2);
if (IS_ERR(handle))
goto out;
ext4_mark_inode_dirty(handle, inode);
ext4_journal_stop(handle);
out:
return;
}
#if 0
/*
* Bind an inode's backing buffer_head into this transaction, to prevent
* it from being flushed to disk early. Unlike
* ext4_reserve_inode_write, this leaves behind no bh reference and
* returns no iloc structure, so the caller needs to repeat the iloc
* lookup to mark the inode dirty later.
*/
static int ext4_pin_inode(handle_t *handle, struct inode *inode)
{
struct ext4_iloc iloc;
int err = 0;
if (handle) {
err = ext4_get_inode_loc(inode, &iloc);
if (!err) {
BUFFER_TRACE(iloc.bh, "get_write_access");
err = jbd2_journal_get_write_access(handle, iloc.bh);
if (!err)
err = ext4_handle_dirty_metadata(handle,
NULL,
iloc.bh);
brelse(iloc.bh);
}
}
ext4_std_error(inode->i_sb, err);
return err;
}
#endif
int ext4_change_inode_journal_flag(struct inode *inode, int val)
{
journal_t *journal;
handle_t *handle;
int err;
/*
* We have to be very careful here: changing a data block's
* journaling status dynamically is dangerous. If we write a
* data block to the journal, change the status and then delete
* that block, we risk forgetting to revoke the old log record
* from the journal and so a subsequent replay can corrupt data.
* So, first we make sure that the journal is empty and that
* nobody is changing anything.
*/
journal = EXT4_JOURNAL(inode);
if (!journal)
return 0;
if (is_journal_aborted(journal))
return -EROFS;
jbd2_journal_lock_updates(journal);
jbd2_journal_flush(journal);
/*
* OK, there are no updates running now, and all cached data is
* synced to disk. We are now in a completely consistent state
* which doesn't have anything in the journal, and we know that
* no filesystem updates are running, so it is safe to modify
* the inode's in-core data-journaling state flag now.
*/
if (val)
ext4_set_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
else
ext4_clear_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
ext4_set_aops(inode);
jbd2_journal_unlock_updates(journal);
/* Finally we can mark the inode as dirty. */
handle = ext4_journal_start(inode, 1);
if (IS_ERR(handle))
return PTR_ERR(handle);
err = ext4_mark_inode_dirty(handle, inode);
ext4_handle_sync(handle);
ext4_journal_stop(handle);
ext4_std_error(inode->i_sb, err);
return err;
}
static int ext4_bh_unmapped(handle_t *handle, struct buffer_head *bh)
{
return !buffer_mapped(bh);
}
int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct page *page = vmf->page;
loff_t size;
unsigned long len;
int ret = -EINVAL;
void *fsdata;
struct file *file = vma->vm_file;
struct inode *inode = file->f_path.dentry->d_inode;
struct address_space *mapping = inode->i_mapping;
/*
* Get i_alloc_sem to stop truncates messing with the inode. We cannot
* get i_mutex because we are already holding mmap_sem.
*/
down_read(&inode->i_alloc_sem);
size = i_size_read(inode);
if (page->mapping != mapping || size <= page_offset(page)
|| !PageUptodate(page)) {
/* page got truncated from under us? */
goto out_unlock;
}
ret = 0;
lock_page(page);
wait_on_page_writeback(page);
if (PageMappedToDisk(page)) {
up_read(&inode->i_alloc_sem);
return VM_FAULT_LOCKED;
}
if (page->index == size >> PAGE_CACHE_SHIFT)
len = size & ~PAGE_CACHE_MASK;
else
len = PAGE_CACHE_SIZE;
/*
* return if we have all the buffers mapped. This avoid
* the need to call write_begin/write_end which does a
* journal_start/journal_stop which can block and take
* long time
*/
if (page_has_buffers(page)) {
if (!walk_page_buffers(NULL, page_buffers(page), 0, len, NULL,
ext4_bh_unmapped)) {
up_read(&inode->i_alloc_sem);
return VM_FAULT_LOCKED;
}
}
unlock_page(page);
/*
* OK, we need to fill the hole... Do write_begin write_end
* to do block allocation/reservation.We are not holding
* inode.i__mutex here. That allow * parallel write_begin,
* write_end call. lock_page prevent this from happening
* on the same page though
*/
ret = mapping->a_ops->write_begin(file, mapping, page_offset(page),
len, AOP_FLAG_UNINTERRUPTIBLE, &page, &fsdata);
if (ret < 0)
goto out_unlock;
ret = mapping->a_ops->write_end(file, mapping, page_offset(page),
len, len, page, fsdata);
if (ret < 0)
goto out_unlock;
ret = 0;
/*
* write_begin/end might have created a dirty page and someone
* could wander in and start the IO. Make sure that hasn't
* happened.
*/
lock_page(page);
wait_on_page_writeback(page);
up_read(&inode->i_alloc_sem);
return VM_FAULT_LOCKED;
out_unlock:
if (ret)
ret = VM_FAULT_SIGBUS;
up_read(&inode->i_alloc_sem);
return ret;
}
| gpl-2.0 |
FEDEVEL/openrex-linux-3.14 | drivers/usb/serial/digi_acceleport.c | 428 | 44236 | /*
* Digi AccelePort USB-4 and USB-2 Serial Converters
*
* Copyright 2000 by Digi International
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* Shamelessly based on Brian Warner's keyspan_pda.c and Greg Kroah-Hartman's
* usb-serial driver.
*
* Peter Berger (pberger@brimson.com)
* Al Borchers (borchers@steinerpoint.com)
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
#include <linux/uaccess.h>
#include <linux/usb.h>
#include <linux/wait.h>
#include <linux/usb/serial.h>
/* Defines */
#define DRIVER_AUTHOR "Peter Berger <pberger@brimson.com>, Al Borchers <borchers@steinerpoint.com>"
#define DRIVER_DESC "Digi AccelePort USB-2/USB-4 Serial Converter driver"
/* port output buffer length -- must be <= transfer buffer length - 2 */
/* so we can be sure to send the full buffer in one urb */
#define DIGI_OUT_BUF_SIZE 8
/* port input buffer length -- must be >= transfer buffer length - 3 */
/* so we can be sure to hold at least one full buffer from one urb */
#define DIGI_IN_BUF_SIZE 64
/* retry timeout while sleeping */
#define DIGI_RETRY_TIMEOUT (HZ/10)
/* timeout while waiting for tty output to drain in close */
/* this delay is used twice in close, so the total delay could */
/* be twice this value */
#define DIGI_CLOSE_TIMEOUT (5*HZ)
/* AccelePort USB Defines */
/* ids */
#define DIGI_VENDOR_ID 0x05c5
#define DIGI_2_ID 0x0002 /* USB-2 */
#define DIGI_4_ID 0x0004 /* USB-4 */
/* commands
* "INB": can be used on the in-band endpoint
* "OOB": can be used on the out-of-band endpoint
*/
#define DIGI_CMD_SET_BAUD_RATE 0 /* INB, OOB */
#define DIGI_CMD_SET_WORD_SIZE 1 /* INB, OOB */
#define DIGI_CMD_SET_PARITY 2 /* INB, OOB */
#define DIGI_CMD_SET_STOP_BITS 3 /* INB, OOB */
#define DIGI_CMD_SET_INPUT_FLOW_CONTROL 4 /* INB, OOB */
#define DIGI_CMD_SET_OUTPUT_FLOW_CONTROL 5 /* INB, OOB */
#define DIGI_CMD_SET_DTR_SIGNAL 6 /* INB, OOB */
#define DIGI_CMD_SET_RTS_SIGNAL 7 /* INB, OOB */
#define DIGI_CMD_READ_INPUT_SIGNALS 8 /* OOB */
#define DIGI_CMD_IFLUSH_FIFO 9 /* OOB */
#define DIGI_CMD_RECEIVE_ENABLE 10 /* INB, OOB */
#define DIGI_CMD_BREAK_CONTROL 11 /* INB, OOB */
#define DIGI_CMD_LOCAL_LOOPBACK 12 /* INB, OOB */
#define DIGI_CMD_TRANSMIT_IDLE 13 /* INB, OOB */
#define DIGI_CMD_READ_UART_REGISTER 14 /* OOB */
#define DIGI_CMD_WRITE_UART_REGISTER 15 /* INB, OOB */
#define DIGI_CMD_AND_UART_REGISTER 16 /* INB, OOB */
#define DIGI_CMD_OR_UART_REGISTER 17 /* INB, OOB */
#define DIGI_CMD_SEND_DATA 18 /* INB */
#define DIGI_CMD_RECEIVE_DATA 19 /* INB */
#define DIGI_CMD_RECEIVE_DISABLE 20 /* INB */
#define DIGI_CMD_GET_PORT_TYPE 21 /* OOB */
/* baud rates */
#define DIGI_BAUD_50 0
#define DIGI_BAUD_75 1
#define DIGI_BAUD_110 2
#define DIGI_BAUD_150 3
#define DIGI_BAUD_200 4
#define DIGI_BAUD_300 5
#define DIGI_BAUD_600 6
#define DIGI_BAUD_1200 7
#define DIGI_BAUD_1800 8
#define DIGI_BAUD_2400 9
#define DIGI_BAUD_4800 10
#define DIGI_BAUD_7200 11
#define DIGI_BAUD_9600 12
#define DIGI_BAUD_14400 13
#define DIGI_BAUD_19200 14
#define DIGI_BAUD_28800 15
#define DIGI_BAUD_38400 16
#define DIGI_BAUD_57600 17
#define DIGI_BAUD_76800 18
#define DIGI_BAUD_115200 19
#define DIGI_BAUD_153600 20
#define DIGI_BAUD_230400 21
#define DIGI_BAUD_460800 22
/* arguments */
#define DIGI_WORD_SIZE_5 0
#define DIGI_WORD_SIZE_6 1
#define DIGI_WORD_SIZE_7 2
#define DIGI_WORD_SIZE_8 3
#define DIGI_PARITY_NONE 0
#define DIGI_PARITY_ODD 1
#define DIGI_PARITY_EVEN 2
#define DIGI_PARITY_MARK 3
#define DIGI_PARITY_SPACE 4
#define DIGI_STOP_BITS_1 0
#define DIGI_STOP_BITS_2 1
#define DIGI_INPUT_FLOW_CONTROL_XON_XOFF 1
#define DIGI_INPUT_FLOW_CONTROL_RTS 2
#define DIGI_INPUT_FLOW_CONTROL_DTR 4
#define DIGI_OUTPUT_FLOW_CONTROL_XON_XOFF 1
#define DIGI_OUTPUT_FLOW_CONTROL_CTS 2
#define DIGI_OUTPUT_FLOW_CONTROL_DSR 4
#define DIGI_DTR_INACTIVE 0
#define DIGI_DTR_ACTIVE 1
#define DIGI_DTR_INPUT_FLOW_CONTROL 2
#define DIGI_RTS_INACTIVE 0
#define DIGI_RTS_ACTIVE 1
#define DIGI_RTS_INPUT_FLOW_CONTROL 2
#define DIGI_RTS_TOGGLE 3
#define DIGI_FLUSH_TX 1
#define DIGI_FLUSH_RX 2
#define DIGI_RESUME_TX 4 /* clears xoff condition */
#define DIGI_TRANSMIT_NOT_IDLE 0
#define DIGI_TRANSMIT_IDLE 1
#define DIGI_DISABLE 0
#define DIGI_ENABLE 1
#define DIGI_DEASSERT 0
#define DIGI_ASSERT 1
/* in band status codes */
#define DIGI_OVERRUN_ERROR 4
#define DIGI_PARITY_ERROR 8
#define DIGI_FRAMING_ERROR 16
#define DIGI_BREAK_ERROR 32
/* out of band status */
#define DIGI_NO_ERROR 0
#define DIGI_BAD_FIRST_PARAMETER 1
#define DIGI_BAD_SECOND_PARAMETER 2
#define DIGI_INVALID_LINE 3
#define DIGI_INVALID_OPCODE 4
/* input signals */
#define DIGI_READ_INPUT_SIGNALS_SLOT 1
#define DIGI_READ_INPUT_SIGNALS_ERR 2
#define DIGI_READ_INPUT_SIGNALS_BUSY 4
#define DIGI_READ_INPUT_SIGNALS_PE 8
#define DIGI_READ_INPUT_SIGNALS_CTS 16
#define DIGI_READ_INPUT_SIGNALS_DSR 32
#define DIGI_READ_INPUT_SIGNALS_RI 64
#define DIGI_READ_INPUT_SIGNALS_DCD 128
/* Structures */
struct digi_serial {
spinlock_t ds_serial_lock;
struct usb_serial_port *ds_oob_port; /* out-of-band port */
int ds_oob_port_num; /* index of out-of-band port */
int ds_device_started;
};
struct digi_port {
spinlock_t dp_port_lock;
int dp_port_num;
int dp_out_buf_len;
unsigned char dp_out_buf[DIGI_OUT_BUF_SIZE];
int dp_write_urb_in_use;
unsigned int dp_modem_signals;
int dp_transmit_idle;
wait_queue_head_t dp_transmit_idle_wait;
int dp_throttled;
int dp_throttle_restart;
wait_queue_head_t dp_flush_wait;
wait_queue_head_t dp_close_wait; /* wait queue for close */
struct work_struct dp_wakeup_work;
struct usb_serial_port *dp_port;
};
/* Local Function Declarations */
static void digi_wakeup_write_lock(struct work_struct *work);
static int digi_write_oob_command(struct usb_serial_port *port,
unsigned char *buf, int count, int interruptible);
static int digi_write_inb_command(struct usb_serial_port *port,
unsigned char *buf, int count, unsigned long timeout);
static int digi_set_modem_signals(struct usb_serial_port *port,
unsigned int modem_signals, int interruptible);
static int digi_transmit_idle(struct usb_serial_port *port,
unsigned long timeout);
static void digi_rx_throttle(struct tty_struct *tty);
static void digi_rx_unthrottle(struct tty_struct *tty);
static void digi_set_termios(struct tty_struct *tty,
struct usb_serial_port *port, struct ktermios *old_termios);
static void digi_break_ctl(struct tty_struct *tty, int break_state);
static int digi_tiocmget(struct tty_struct *tty);
static int digi_tiocmset(struct tty_struct *tty, unsigned int set,
unsigned int clear);
static int digi_write(struct tty_struct *tty, struct usb_serial_port *port,
const unsigned char *buf, int count);
static void digi_write_bulk_callback(struct urb *urb);
static int digi_write_room(struct tty_struct *tty);
static int digi_chars_in_buffer(struct tty_struct *tty);
static int digi_open(struct tty_struct *tty, struct usb_serial_port *port);
static void digi_close(struct usb_serial_port *port);
static void digi_dtr_rts(struct usb_serial_port *port, int on);
static int digi_startup_device(struct usb_serial *serial);
static int digi_startup(struct usb_serial *serial);
static void digi_disconnect(struct usb_serial *serial);
static void digi_release(struct usb_serial *serial);
static int digi_port_probe(struct usb_serial_port *port);
static int digi_port_remove(struct usb_serial_port *port);
static void digi_read_bulk_callback(struct urb *urb);
static int digi_read_inb_callback(struct urb *urb);
static int digi_read_oob_callback(struct urb *urb);
static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE(DIGI_VENDOR_ID, DIGI_2_ID) },
{ USB_DEVICE(DIGI_VENDOR_ID, DIGI_4_ID) },
{ } /* Terminating entry */
};
static const struct usb_device_id id_table_2[] = {
{ USB_DEVICE(DIGI_VENDOR_ID, DIGI_2_ID) },
{ } /* Terminating entry */
};
static const struct usb_device_id id_table_4[] = {
{ USB_DEVICE(DIGI_VENDOR_ID, DIGI_4_ID) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, id_table_combined);
/* device info needed for the Digi serial converter */
static struct usb_serial_driver digi_acceleport_2_device = {
.driver = {
.owner = THIS_MODULE,
.name = "digi_2",
},
.description = "Digi 2 port USB adapter",
.id_table = id_table_2,
.num_ports = 3,
.open = digi_open,
.close = digi_close,
.dtr_rts = digi_dtr_rts,
.write = digi_write,
.write_room = digi_write_room,
.write_bulk_callback = digi_write_bulk_callback,
.read_bulk_callback = digi_read_bulk_callback,
.chars_in_buffer = digi_chars_in_buffer,
.throttle = digi_rx_throttle,
.unthrottle = digi_rx_unthrottle,
.set_termios = digi_set_termios,
.break_ctl = digi_break_ctl,
.tiocmget = digi_tiocmget,
.tiocmset = digi_tiocmset,
.attach = digi_startup,
.disconnect = digi_disconnect,
.release = digi_release,
.port_probe = digi_port_probe,
.port_remove = digi_port_remove,
};
static struct usb_serial_driver digi_acceleport_4_device = {
.driver = {
.owner = THIS_MODULE,
.name = "digi_4",
},
.description = "Digi 4 port USB adapter",
.id_table = id_table_4,
.num_ports = 4,
.open = digi_open,
.close = digi_close,
.write = digi_write,
.write_room = digi_write_room,
.write_bulk_callback = digi_write_bulk_callback,
.read_bulk_callback = digi_read_bulk_callback,
.chars_in_buffer = digi_chars_in_buffer,
.throttle = digi_rx_throttle,
.unthrottle = digi_rx_unthrottle,
.set_termios = digi_set_termios,
.break_ctl = digi_break_ctl,
.tiocmget = digi_tiocmget,
.tiocmset = digi_tiocmset,
.attach = digi_startup,
.disconnect = digi_disconnect,
.release = digi_release,
.port_probe = digi_port_probe,
.port_remove = digi_port_remove,
};
static struct usb_serial_driver * const serial_drivers[] = {
&digi_acceleport_2_device, &digi_acceleport_4_device, NULL
};
/* Functions */
/*
* Cond Wait Interruptible Timeout Irqrestore
*
* Do spin_unlock_irqrestore and interruptible_sleep_on_timeout
* so that wake ups are not lost if they occur between the unlock
* and the sleep. In other words, spin_unlock_irqrestore and
* interruptible_sleep_on_timeout are "atomic" with respect to
* wake ups. This is used to implement condition variables.
*
* interruptible_sleep_on_timeout is deprecated and has been replaced
* with the equivalent code.
*/
static long cond_wait_interruptible_timeout_irqrestore(
wait_queue_head_t *q, long timeout,
spinlock_t *lock, unsigned long flags)
__releases(lock)
{
DEFINE_WAIT(wait);
prepare_to_wait(q, &wait, TASK_INTERRUPTIBLE);
spin_unlock_irqrestore(lock, flags);
timeout = schedule_timeout(timeout);
finish_wait(q, &wait);
return timeout;
}
/*
* Digi Wakeup Write
*
* Wake up port, line discipline, and tty processes sleeping
* on writes.
*/
static void digi_wakeup_write_lock(struct work_struct *work)
{
struct digi_port *priv =
container_of(work, struct digi_port, dp_wakeup_work);
struct usb_serial_port *port = priv->dp_port;
unsigned long flags;
spin_lock_irqsave(&priv->dp_port_lock, flags);
tty_port_tty_wakeup(&port->port);
spin_unlock_irqrestore(&priv->dp_port_lock, flags);
}
/*
* Digi Write OOB Command
*
* Write commands on the out of band port. Commands are 4
* bytes each, multiple commands can be sent at once, and
* no command will be split across USB packets. Returns 0
* if successful, -EINTR if interrupted while sleeping and
* the interruptible flag is true, or a negative error
* returned by usb_submit_urb.
*/
static int digi_write_oob_command(struct usb_serial_port *port,
unsigned char *buf, int count, int interruptible)
{
int ret = 0;
int len;
struct usb_serial_port *oob_port = (struct usb_serial_port *)((struct digi_serial *)(usb_get_serial_data(port->serial)))->ds_oob_port;
struct digi_port *oob_priv = usb_get_serial_port_data(oob_port);
unsigned long flags = 0;
dev_dbg(&port->dev,
"digi_write_oob_command: TOP: port=%d, count=%d\n",
oob_priv->dp_port_num, count);
spin_lock_irqsave(&oob_priv->dp_port_lock, flags);
while (count > 0) {
while (oob_priv->dp_write_urb_in_use) {
cond_wait_interruptible_timeout_irqrestore(
&oob_port->write_wait, DIGI_RETRY_TIMEOUT,
&oob_priv->dp_port_lock, flags);
if (interruptible && signal_pending(current))
return -EINTR;
spin_lock_irqsave(&oob_priv->dp_port_lock, flags);
}
/* len must be a multiple of 4, so commands are not split */
len = min(count, oob_port->bulk_out_size);
if (len > 4)
len &= ~3;
memcpy(oob_port->write_urb->transfer_buffer, buf, len);
oob_port->write_urb->transfer_buffer_length = len;
ret = usb_submit_urb(oob_port->write_urb, GFP_ATOMIC);
if (ret == 0) {
oob_priv->dp_write_urb_in_use = 1;
count -= len;
buf += len;
}
}
spin_unlock_irqrestore(&oob_priv->dp_port_lock, flags);
if (ret)
dev_err(&port->dev, "%s: usb_submit_urb failed, ret=%d\n",
__func__, ret);
return ret;
}
/*
* Digi Write In Band Command
*
* Write commands on the given port. Commands are 4
* bytes each, multiple commands can be sent at once, and
* no command will be split across USB packets. If timeout
* is non-zero, write in band command will return after
* waiting unsuccessfully for the URB status to clear for
* timeout ticks. Returns 0 if successful, or a negative
* error returned by digi_write.
*/
static int digi_write_inb_command(struct usb_serial_port *port,
unsigned char *buf, int count, unsigned long timeout)
{
int ret = 0;
int len;
struct digi_port *priv = usb_get_serial_port_data(port);
unsigned char *data = port->write_urb->transfer_buffer;
unsigned long flags = 0;
dev_dbg(&port->dev, "digi_write_inb_command: TOP: port=%d, count=%d\n",
priv->dp_port_num, count);
if (timeout)
timeout += jiffies;
else
timeout = ULONG_MAX;
spin_lock_irqsave(&priv->dp_port_lock, flags);
while (count > 0 && ret == 0) {
while (priv->dp_write_urb_in_use &&
time_before(jiffies, timeout)) {
cond_wait_interruptible_timeout_irqrestore(
&port->write_wait, DIGI_RETRY_TIMEOUT,
&priv->dp_port_lock, flags);
if (signal_pending(current))
return -EINTR;
spin_lock_irqsave(&priv->dp_port_lock, flags);
}
/* len must be a multiple of 4 and small enough to */
/* guarantee the write will send buffered data first, */
/* so commands are in order with data and not split */
len = min(count, port->bulk_out_size-2-priv->dp_out_buf_len);
if (len > 4)
len &= ~3;
/* write any buffered data first */
if (priv->dp_out_buf_len > 0) {
data[0] = DIGI_CMD_SEND_DATA;
data[1] = priv->dp_out_buf_len;
memcpy(data + 2, priv->dp_out_buf,
priv->dp_out_buf_len);
memcpy(data + 2 + priv->dp_out_buf_len, buf, len);
port->write_urb->transfer_buffer_length
= priv->dp_out_buf_len + 2 + len;
} else {
memcpy(data, buf, len);
port->write_urb->transfer_buffer_length = len;
}
ret = usb_submit_urb(port->write_urb, GFP_ATOMIC);
if (ret == 0) {
priv->dp_write_urb_in_use = 1;
priv->dp_out_buf_len = 0;
count -= len;
buf += len;
}
}
spin_unlock_irqrestore(&priv->dp_port_lock, flags);
if (ret)
dev_err(&port->dev,
"%s: usb_submit_urb failed, ret=%d, port=%d\n",
__func__, ret, priv->dp_port_num);
return ret;
}
/*
* Digi Set Modem Signals
*
* Sets or clears DTR and RTS on the port, according to the
* modem_signals argument. Use TIOCM_DTR and TIOCM_RTS flags
* for the modem_signals argument. Returns 0 if successful,
* -EINTR if interrupted while sleeping, or a non-zero error
* returned by usb_submit_urb.
*/
static int digi_set_modem_signals(struct usb_serial_port *port,
unsigned int modem_signals, int interruptible)
{
int ret;
struct digi_port *port_priv = usb_get_serial_port_data(port);
struct usb_serial_port *oob_port = (struct usb_serial_port *) ((struct digi_serial *)(usb_get_serial_data(port->serial)))->ds_oob_port;
struct digi_port *oob_priv = usb_get_serial_port_data(oob_port);
unsigned char *data = oob_port->write_urb->transfer_buffer;
unsigned long flags = 0;
dev_dbg(&port->dev,
"digi_set_modem_signals: TOP: port=%d, modem_signals=0x%x\n",
port_priv->dp_port_num, modem_signals);
spin_lock_irqsave(&oob_priv->dp_port_lock, flags);
spin_lock(&port_priv->dp_port_lock);
while (oob_priv->dp_write_urb_in_use) {
spin_unlock(&port_priv->dp_port_lock);
cond_wait_interruptible_timeout_irqrestore(
&oob_port->write_wait, DIGI_RETRY_TIMEOUT,
&oob_priv->dp_port_lock, flags);
if (interruptible && signal_pending(current))
return -EINTR;
spin_lock_irqsave(&oob_priv->dp_port_lock, flags);
spin_lock(&port_priv->dp_port_lock);
}
data[0] = DIGI_CMD_SET_DTR_SIGNAL;
data[1] = port_priv->dp_port_num;
data[2] = (modem_signals & TIOCM_DTR) ?
DIGI_DTR_ACTIVE : DIGI_DTR_INACTIVE;
data[3] = 0;
data[4] = DIGI_CMD_SET_RTS_SIGNAL;
data[5] = port_priv->dp_port_num;
data[6] = (modem_signals & TIOCM_RTS) ?
DIGI_RTS_ACTIVE : DIGI_RTS_INACTIVE;
data[7] = 0;
oob_port->write_urb->transfer_buffer_length = 8;
ret = usb_submit_urb(oob_port->write_urb, GFP_ATOMIC);
if (ret == 0) {
oob_priv->dp_write_urb_in_use = 1;
port_priv->dp_modem_signals =
(port_priv->dp_modem_signals&~(TIOCM_DTR|TIOCM_RTS))
| (modem_signals&(TIOCM_DTR|TIOCM_RTS));
}
spin_unlock(&port_priv->dp_port_lock);
spin_unlock_irqrestore(&oob_priv->dp_port_lock, flags);
if (ret)
dev_err(&port->dev, "%s: usb_submit_urb failed, ret=%d\n",
__func__, ret);
return ret;
}
/*
* Digi Transmit Idle
*
* Digi transmit idle waits, up to timeout ticks, for the transmitter
* to go idle. It returns 0 if successful or a negative error.
*
* There are race conditions here if more than one process is calling
* digi_transmit_idle on the same port at the same time. However, this
* is only called from close, and only one process can be in close on a
* port at a time, so its ok.
*/
static int digi_transmit_idle(struct usb_serial_port *port,
unsigned long timeout)
{
int ret;
unsigned char buf[2];
struct digi_port *priv = usb_get_serial_port_data(port);
unsigned long flags = 0;
spin_lock_irqsave(&priv->dp_port_lock, flags);
priv->dp_transmit_idle = 0;
spin_unlock_irqrestore(&priv->dp_port_lock, flags);
buf[0] = DIGI_CMD_TRANSMIT_IDLE;
buf[1] = 0;
timeout += jiffies;
ret = digi_write_inb_command(port, buf, 2, timeout - jiffies);
if (ret != 0)
return ret;
spin_lock_irqsave(&priv->dp_port_lock, flags);
while (time_before(jiffies, timeout) && !priv->dp_transmit_idle) {
cond_wait_interruptible_timeout_irqrestore(
&priv->dp_transmit_idle_wait, DIGI_RETRY_TIMEOUT,
&priv->dp_port_lock, flags);
if (signal_pending(current))
return -EINTR;
spin_lock_irqsave(&priv->dp_port_lock, flags);
}
priv->dp_transmit_idle = 0;
spin_unlock_irqrestore(&priv->dp_port_lock, flags);
return 0;
}
static void digi_rx_throttle(struct tty_struct *tty)
{
unsigned long flags;
struct usb_serial_port *port = tty->driver_data;
struct digi_port *priv = usb_get_serial_port_data(port);
/* stop receiving characters by not resubmitting the read urb */
spin_lock_irqsave(&priv->dp_port_lock, flags);
priv->dp_throttled = 1;
priv->dp_throttle_restart = 0;
spin_unlock_irqrestore(&priv->dp_port_lock, flags);
}
static void digi_rx_unthrottle(struct tty_struct *tty)
{
int ret = 0;
unsigned long flags;
struct usb_serial_port *port = tty->driver_data;
struct digi_port *priv = usb_get_serial_port_data(port);
spin_lock_irqsave(&priv->dp_port_lock, flags);
/* restart read chain */
if (priv->dp_throttle_restart)
ret = usb_submit_urb(port->read_urb, GFP_ATOMIC);
/* turn throttle off */
priv->dp_throttled = 0;
priv->dp_throttle_restart = 0;
spin_unlock_irqrestore(&priv->dp_port_lock, flags);
if (ret)
dev_err(&port->dev,
"%s: usb_submit_urb failed, ret=%d, port=%d\n",
__func__, ret, priv->dp_port_num);
}
static void digi_set_termios(struct tty_struct *tty,
struct usb_serial_port *port, struct ktermios *old_termios)
{
struct digi_port *priv = usb_get_serial_port_data(port);
struct device *dev = &port->dev;
unsigned int iflag = tty->termios.c_iflag;
unsigned int cflag = tty->termios.c_cflag;
unsigned int old_iflag = old_termios->c_iflag;
unsigned int old_cflag = old_termios->c_cflag;
unsigned char buf[32];
unsigned int modem_signals;
int arg, ret;
int i = 0;
speed_t baud;
dev_dbg(dev,
"digi_set_termios: TOP: port=%d, iflag=0x%x, old_iflag=0x%x, cflag=0x%x, old_cflag=0x%x\n",
priv->dp_port_num, iflag, old_iflag, cflag, old_cflag);
/* set baud rate */
baud = tty_get_baud_rate(tty);
if (baud != tty_termios_baud_rate(old_termios)) {
arg = -1;
/* reassert DTR and (maybe) RTS on transition from B0 */
if ((old_cflag&CBAUD) == B0) {
/* don't set RTS if using hardware flow control */
/* and throttling input */
modem_signals = TIOCM_DTR;
if (!(tty->termios.c_cflag & CRTSCTS) ||
!test_bit(TTY_THROTTLED, &tty->flags))
modem_signals |= TIOCM_RTS;
digi_set_modem_signals(port, modem_signals, 1);
}
switch (baud) {
/* drop DTR and RTS on transition to B0 */
case 0: digi_set_modem_signals(port, 0, 1); break;
case 50: arg = DIGI_BAUD_50; break;
case 75: arg = DIGI_BAUD_75; break;
case 110: arg = DIGI_BAUD_110; break;
case 150: arg = DIGI_BAUD_150; break;
case 200: arg = DIGI_BAUD_200; break;
case 300: arg = DIGI_BAUD_300; break;
case 600: arg = DIGI_BAUD_600; break;
case 1200: arg = DIGI_BAUD_1200; break;
case 1800: arg = DIGI_BAUD_1800; break;
case 2400: arg = DIGI_BAUD_2400; break;
case 4800: arg = DIGI_BAUD_4800; break;
case 9600: arg = DIGI_BAUD_9600; break;
case 19200: arg = DIGI_BAUD_19200; break;
case 38400: arg = DIGI_BAUD_38400; break;
case 57600: arg = DIGI_BAUD_57600; break;
case 115200: arg = DIGI_BAUD_115200; break;
case 230400: arg = DIGI_BAUD_230400; break;
case 460800: arg = DIGI_BAUD_460800; break;
default:
arg = DIGI_BAUD_9600;
baud = 9600;
break;
}
if (arg != -1) {
buf[i++] = DIGI_CMD_SET_BAUD_RATE;
buf[i++] = priv->dp_port_num;
buf[i++] = arg;
buf[i++] = 0;
}
}
/* set parity */
tty->termios.c_cflag &= ~CMSPAR;
if ((cflag&(PARENB|PARODD)) != (old_cflag&(PARENB|PARODD))) {
if (cflag&PARENB) {
if (cflag&PARODD)
arg = DIGI_PARITY_ODD;
else
arg = DIGI_PARITY_EVEN;
} else {
arg = DIGI_PARITY_NONE;
}
buf[i++] = DIGI_CMD_SET_PARITY;
buf[i++] = priv->dp_port_num;
buf[i++] = arg;
buf[i++] = 0;
}
/* set word size */
if ((cflag&CSIZE) != (old_cflag&CSIZE)) {
arg = -1;
switch (cflag&CSIZE) {
case CS5: arg = DIGI_WORD_SIZE_5; break;
case CS6: arg = DIGI_WORD_SIZE_6; break;
case CS7: arg = DIGI_WORD_SIZE_7; break;
case CS8: arg = DIGI_WORD_SIZE_8; break;
default:
dev_dbg(dev,
"digi_set_termios: can't handle word size %d\n",
(cflag&CSIZE));
break;
}
if (arg != -1) {
buf[i++] = DIGI_CMD_SET_WORD_SIZE;
buf[i++] = priv->dp_port_num;
buf[i++] = arg;
buf[i++] = 0;
}
}
/* set stop bits */
if ((cflag&CSTOPB) != (old_cflag&CSTOPB)) {
if ((cflag&CSTOPB))
arg = DIGI_STOP_BITS_2;
else
arg = DIGI_STOP_BITS_1;
buf[i++] = DIGI_CMD_SET_STOP_BITS;
buf[i++] = priv->dp_port_num;
buf[i++] = arg;
buf[i++] = 0;
}
/* set input flow control */
if ((iflag&IXOFF) != (old_iflag&IXOFF)
|| (cflag&CRTSCTS) != (old_cflag&CRTSCTS)) {
arg = 0;
if (iflag&IXOFF)
arg |= DIGI_INPUT_FLOW_CONTROL_XON_XOFF;
else
arg &= ~DIGI_INPUT_FLOW_CONTROL_XON_XOFF;
if (cflag&CRTSCTS) {
arg |= DIGI_INPUT_FLOW_CONTROL_RTS;
/* On USB-4 it is necessary to assert RTS prior */
/* to selecting RTS input flow control. */
buf[i++] = DIGI_CMD_SET_RTS_SIGNAL;
buf[i++] = priv->dp_port_num;
buf[i++] = DIGI_RTS_ACTIVE;
buf[i++] = 0;
} else {
arg &= ~DIGI_INPUT_FLOW_CONTROL_RTS;
}
buf[i++] = DIGI_CMD_SET_INPUT_FLOW_CONTROL;
buf[i++] = priv->dp_port_num;
buf[i++] = arg;
buf[i++] = 0;
}
/* set output flow control */
if ((iflag & IXON) != (old_iflag & IXON)
|| (cflag & CRTSCTS) != (old_cflag & CRTSCTS)) {
arg = 0;
if (iflag & IXON)
arg |= DIGI_OUTPUT_FLOW_CONTROL_XON_XOFF;
else
arg &= ~DIGI_OUTPUT_FLOW_CONTROL_XON_XOFF;
if (cflag & CRTSCTS) {
arg |= DIGI_OUTPUT_FLOW_CONTROL_CTS;
} else {
arg &= ~DIGI_OUTPUT_FLOW_CONTROL_CTS;
tty->hw_stopped = 0;
}
buf[i++] = DIGI_CMD_SET_OUTPUT_FLOW_CONTROL;
buf[i++] = priv->dp_port_num;
buf[i++] = arg;
buf[i++] = 0;
}
/* set receive enable/disable */
if ((cflag & CREAD) != (old_cflag & CREAD)) {
if (cflag & CREAD)
arg = DIGI_ENABLE;
else
arg = DIGI_DISABLE;
buf[i++] = DIGI_CMD_RECEIVE_ENABLE;
buf[i++] = priv->dp_port_num;
buf[i++] = arg;
buf[i++] = 0;
}
ret = digi_write_oob_command(port, buf, i, 1);
if (ret != 0)
dev_dbg(dev, "digi_set_termios: write oob failed, ret=%d\n", ret);
tty_encode_baud_rate(tty, baud, baud);
}
static void digi_break_ctl(struct tty_struct *tty, int break_state)
{
struct usb_serial_port *port = tty->driver_data;
unsigned char buf[4];
buf[0] = DIGI_CMD_BREAK_CONTROL;
buf[1] = 2; /* length */
buf[2] = break_state ? 1 : 0;
buf[3] = 0; /* pad */
digi_write_inb_command(port, buf, 4, 0);
}
static int digi_tiocmget(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct digi_port *priv = usb_get_serial_port_data(port);
unsigned int val;
unsigned long flags;
spin_lock_irqsave(&priv->dp_port_lock, flags);
val = priv->dp_modem_signals;
spin_unlock_irqrestore(&priv->dp_port_lock, flags);
return val;
}
static int digi_tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear)
{
struct usb_serial_port *port = tty->driver_data;
struct digi_port *priv = usb_get_serial_port_data(port);
unsigned int val;
unsigned long flags;
spin_lock_irqsave(&priv->dp_port_lock, flags);
val = (priv->dp_modem_signals & ~clear) | set;
spin_unlock_irqrestore(&priv->dp_port_lock, flags);
return digi_set_modem_signals(port, val, 1);
}
static int digi_write(struct tty_struct *tty, struct usb_serial_port *port,
const unsigned char *buf, int count)
{
int ret, data_len, new_len;
struct digi_port *priv = usb_get_serial_port_data(port);
unsigned char *data = port->write_urb->transfer_buffer;
unsigned long flags = 0;
dev_dbg(&port->dev,
"digi_write: TOP: port=%d, count=%d, in_interrupt=%ld\n",
priv->dp_port_num, count, in_interrupt());
/* copy user data (which can sleep) before getting spin lock */
count = min(count, port->bulk_out_size-2);
count = min(64, count);
/* be sure only one write proceeds at a time */
/* there are races on the port private buffer */
spin_lock_irqsave(&priv->dp_port_lock, flags);
/* wait for urb status clear to submit another urb */
if (priv->dp_write_urb_in_use) {
/* buffer data if count is 1 (probably put_char) if possible */
if (count == 1 && priv->dp_out_buf_len < DIGI_OUT_BUF_SIZE) {
priv->dp_out_buf[priv->dp_out_buf_len++] = *buf;
new_len = 1;
} else {
new_len = 0;
}
spin_unlock_irqrestore(&priv->dp_port_lock, flags);
return new_len;
}
/* allow space for any buffered data and for new data, up to */
/* transfer buffer size - 2 (for command and length bytes) */
new_len = min(count, port->bulk_out_size-2-priv->dp_out_buf_len);
data_len = new_len + priv->dp_out_buf_len;
if (data_len == 0) {
spin_unlock_irqrestore(&priv->dp_port_lock, flags);
return 0;
}
port->write_urb->transfer_buffer_length = data_len+2;
*data++ = DIGI_CMD_SEND_DATA;
*data++ = data_len;
/* copy in buffered data first */
memcpy(data, priv->dp_out_buf, priv->dp_out_buf_len);
data += priv->dp_out_buf_len;
/* copy in new data */
memcpy(data, buf, new_len);
ret = usb_submit_urb(port->write_urb, GFP_ATOMIC);
if (ret == 0) {
priv->dp_write_urb_in_use = 1;
ret = new_len;
priv->dp_out_buf_len = 0;
}
/* return length of new data written, or error */
spin_unlock_irqrestore(&priv->dp_port_lock, flags);
if (ret < 0)
dev_err_console(port,
"%s: usb_submit_urb failed, ret=%d, port=%d\n",
__func__, ret, priv->dp_port_num);
dev_dbg(&port->dev, "digi_write: returning %d\n", ret);
return ret;
}
static void digi_write_bulk_callback(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
struct usb_serial *serial;
struct digi_port *priv;
struct digi_serial *serial_priv;
int ret = 0;
int status = urb->status;
/* port and serial sanity check */
if (port == NULL || (priv = usb_get_serial_port_data(port)) == NULL) {
pr_err("%s: port or port->private is NULL, status=%d\n",
__func__, status);
return;
}
serial = port->serial;
if (serial == NULL || (serial_priv = usb_get_serial_data(serial)) == NULL) {
dev_err(&port->dev,
"%s: serial or serial->private is NULL, status=%d\n",
__func__, status);
return;
}
/* handle oob callback */
if (priv->dp_port_num == serial_priv->ds_oob_port_num) {
dev_dbg(&port->dev, "digi_write_bulk_callback: oob callback\n");
spin_lock(&priv->dp_port_lock);
priv->dp_write_urb_in_use = 0;
wake_up_interruptible(&port->write_wait);
spin_unlock(&priv->dp_port_lock);
return;
}
/* try to send any buffered data on this port */
spin_lock(&priv->dp_port_lock);
priv->dp_write_urb_in_use = 0;
if (priv->dp_out_buf_len > 0) {
*((unsigned char *)(port->write_urb->transfer_buffer))
= (unsigned char)DIGI_CMD_SEND_DATA;
*((unsigned char *)(port->write_urb->transfer_buffer) + 1)
= (unsigned char)priv->dp_out_buf_len;
port->write_urb->transfer_buffer_length =
priv->dp_out_buf_len + 2;
memcpy(port->write_urb->transfer_buffer + 2, priv->dp_out_buf,
priv->dp_out_buf_len);
ret = usb_submit_urb(port->write_urb, GFP_ATOMIC);
if (ret == 0) {
priv->dp_write_urb_in_use = 1;
priv->dp_out_buf_len = 0;
}
}
/* wake up processes sleeping on writes immediately */
tty_port_tty_wakeup(&port->port);
/* also queue up a wakeup at scheduler time, in case we */
/* lost the race in write_chan(). */
schedule_work(&priv->dp_wakeup_work);
spin_unlock(&priv->dp_port_lock);
if (ret && ret != -EPERM)
dev_err_console(port,
"%s: usb_submit_urb failed, ret=%d, port=%d\n",
__func__, ret, priv->dp_port_num);
}
static int digi_write_room(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct digi_port *priv = usb_get_serial_port_data(port);
int room;
unsigned long flags = 0;
spin_lock_irqsave(&priv->dp_port_lock, flags);
if (priv->dp_write_urb_in_use)
room = 0;
else
room = port->bulk_out_size - 2 - priv->dp_out_buf_len;
spin_unlock_irqrestore(&priv->dp_port_lock, flags);
dev_dbg(&port->dev, "digi_write_room: port=%d, room=%d\n", priv->dp_port_num, room);
return room;
}
static int digi_chars_in_buffer(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct digi_port *priv = usb_get_serial_port_data(port);
if (priv->dp_write_urb_in_use) {
dev_dbg(&port->dev, "digi_chars_in_buffer: port=%d, chars=%d\n",
priv->dp_port_num, port->bulk_out_size - 2);
/* return(port->bulk_out_size - 2); */
return 256;
} else {
dev_dbg(&port->dev, "digi_chars_in_buffer: port=%d, chars=%d\n",
priv->dp_port_num, priv->dp_out_buf_len);
return priv->dp_out_buf_len;
}
}
static void digi_dtr_rts(struct usb_serial_port *port, int on)
{
/* Adjust DTR and RTS */
digi_set_modem_signals(port, on * (TIOCM_DTR|TIOCM_RTS), 1);
}
static int digi_open(struct tty_struct *tty, struct usb_serial_port *port)
{
int ret;
unsigned char buf[32];
struct digi_port *priv = usb_get_serial_port_data(port);
struct ktermios not_termios;
/* be sure the device is started up */
if (digi_startup_device(port->serial) != 0)
return -ENXIO;
/* read modem signals automatically whenever they change */
buf[0] = DIGI_CMD_READ_INPUT_SIGNALS;
buf[1] = priv->dp_port_num;
buf[2] = DIGI_ENABLE;
buf[3] = 0;
/* flush fifos */
buf[4] = DIGI_CMD_IFLUSH_FIFO;
buf[5] = priv->dp_port_num;
buf[6] = DIGI_FLUSH_TX | DIGI_FLUSH_RX;
buf[7] = 0;
ret = digi_write_oob_command(port, buf, 8, 1);
if (ret != 0)
dev_dbg(&port->dev, "digi_open: write oob failed, ret=%d\n", ret);
/* set termios settings */
if (tty) {
not_termios.c_cflag = ~tty->termios.c_cflag;
not_termios.c_iflag = ~tty->termios.c_iflag;
digi_set_termios(tty, port, ¬_termios);
}
return 0;
}
static void digi_close(struct usb_serial_port *port)
{
DEFINE_WAIT(wait);
int ret;
unsigned char buf[32];
struct digi_port *priv = usb_get_serial_port_data(port);
mutex_lock(&port->serial->disc_mutex);
/* if disconnected, just clear flags */
if (port->serial->disconnected)
goto exit;
/* FIXME: Transmit idle belongs in the wait_unti_sent path */
digi_transmit_idle(port, DIGI_CLOSE_TIMEOUT);
/* disable input flow control */
buf[0] = DIGI_CMD_SET_INPUT_FLOW_CONTROL;
buf[1] = priv->dp_port_num;
buf[2] = DIGI_DISABLE;
buf[3] = 0;
/* disable output flow control */
buf[4] = DIGI_CMD_SET_OUTPUT_FLOW_CONTROL;
buf[5] = priv->dp_port_num;
buf[6] = DIGI_DISABLE;
buf[7] = 0;
/* disable reading modem signals automatically */
buf[8] = DIGI_CMD_READ_INPUT_SIGNALS;
buf[9] = priv->dp_port_num;
buf[10] = DIGI_DISABLE;
buf[11] = 0;
/* disable receive */
buf[12] = DIGI_CMD_RECEIVE_ENABLE;
buf[13] = priv->dp_port_num;
buf[14] = DIGI_DISABLE;
buf[15] = 0;
/* flush fifos */
buf[16] = DIGI_CMD_IFLUSH_FIFO;
buf[17] = priv->dp_port_num;
buf[18] = DIGI_FLUSH_TX | DIGI_FLUSH_RX;
buf[19] = 0;
ret = digi_write_oob_command(port, buf, 20, 0);
if (ret != 0)
dev_dbg(&port->dev, "digi_close: write oob failed, ret=%d\n",
ret);
/* wait for final commands on oob port to complete */
prepare_to_wait(&priv->dp_flush_wait, &wait,
TASK_INTERRUPTIBLE);
schedule_timeout(DIGI_CLOSE_TIMEOUT);
finish_wait(&priv->dp_flush_wait, &wait);
/* shutdown any outstanding bulk writes */
usb_kill_urb(port->write_urb);
exit:
spin_lock_irq(&priv->dp_port_lock);
priv->dp_write_urb_in_use = 0;
wake_up_interruptible(&priv->dp_close_wait);
spin_unlock_irq(&priv->dp_port_lock);
mutex_unlock(&port->serial->disc_mutex);
}
/*
* Digi Startup Device
*
* Starts reads on all ports. Must be called AFTER startup, with
* urbs initialized. Returns 0 if successful, non-zero error otherwise.
*/
static int digi_startup_device(struct usb_serial *serial)
{
int i, ret = 0;
struct digi_serial *serial_priv = usb_get_serial_data(serial);
struct usb_serial_port *port;
/* be sure this happens exactly once */
spin_lock(&serial_priv->ds_serial_lock);
if (serial_priv->ds_device_started) {
spin_unlock(&serial_priv->ds_serial_lock);
return 0;
}
serial_priv->ds_device_started = 1;
spin_unlock(&serial_priv->ds_serial_lock);
/* start reading from each bulk in endpoint for the device */
/* set USB_DISABLE_SPD flag for write bulk urbs */
for (i = 0; i < serial->type->num_ports + 1; i++) {
port = serial->port[i];
ret = usb_submit_urb(port->read_urb, GFP_KERNEL);
if (ret != 0) {
dev_err(&port->dev,
"%s: usb_submit_urb failed, ret=%d, port=%d\n",
__func__, ret, i);
break;
}
}
return ret;
}
static int digi_port_init(struct usb_serial_port *port, unsigned port_num)
{
struct digi_port *priv;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
spin_lock_init(&priv->dp_port_lock);
priv->dp_port_num = port_num;
init_waitqueue_head(&priv->dp_transmit_idle_wait);
init_waitqueue_head(&priv->dp_flush_wait);
init_waitqueue_head(&priv->dp_close_wait);
INIT_WORK(&priv->dp_wakeup_work, digi_wakeup_write_lock);
priv->dp_port = port;
init_waitqueue_head(&port->write_wait);
usb_set_serial_port_data(port, priv);
return 0;
}
static int digi_startup(struct usb_serial *serial)
{
struct digi_serial *serial_priv;
int ret;
serial_priv = kzalloc(sizeof(*serial_priv), GFP_KERNEL);
if (!serial_priv)
return -ENOMEM;
spin_lock_init(&serial_priv->ds_serial_lock);
serial_priv->ds_oob_port_num = serial->type->num_ports;
serial_priv->ds_oob_port = serial->port[serial_priv->ds_oob_port_num];
ret = digi_port_init(serial_priv->ds_oob_port,
serial_priv->ds_oob_port_num);
if (ret) {
kfree(serial_priv);
return ret;
}
usb_set_serial_data(serial, serial_priv);
return 0;
}
static void digi_disconnect(struct usb_serial *serial)
{
int i;
/* stop reads and writes on all ports */
for (i = 0; i < serial->type->num_ports + 1; i++) {
usb_kill_urb(serial->port[i]->read_urb);
usb_kill_urb(serial->port[i]->write_urb);
}
}
static void digi_release(struct usb_serial *serial)
{
struct digi_serial *serial_priv;
struct digi_port *priv;
serial_priv = usb_get_serial_data(serial);
priv = usb_get_serial_port_data(serial_priv->ds_oob_port);
kfree(priv);
kfree(serial_priv);
}
static int digi_port_probe(struct usb_serial_port *port)
{
return digi_port_init(port, port->port_number);
}
static int digi_port_remove(struct usb_serial_port *port)
{
struct digi_port *priv;
priv = usb_get_serial_port_data(port);
kfree(priv);
return 0;
}
static void digi_read_bulk_callback(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
struct digi_port *priv;
struct digi_serial *serial_priv;
int ret;
int status = urb->status;
/* port sanity check, do not resubmit if port is not valid */
if (port == NULL)
return;
priv = usb_get_serial_port_data(port);
if (priv == NULL) {
dev_err(&port->dev, "%s: port->private is NULL, status=%d\n",
__func__, status);
return;
}
if (port->serial == NULL ||
(serial_priv = usb_get_serial_data(port->serial)) == NULL) {
dev_err(&port->dev, "%s: serial is bad or serial->private "
"is NULL, status=%d\n", __func__, status);
return;
}
/* do not resubmit urb if it has any status error */
if (status) {
dev_err(&port->dev,
"%s: nonzero read bulk status: status=%d, port=%d\n",
__func__, status, priv->dp_port_num);
return;
}
/* handle oob or inb callback, do not resubmit if error */
if (priv->dp_port_num == serial_priv->ds_oob_port_num) {
if (digi_read_oob_callback(urb) != 0)
return;
} else {
if (digi_read_inb_callback(urb) != 0)
return;
}
/* continue read */
ret = usb_submit_urb(urb, GFP_ATOMIC);
if (ret != 0 && ret != -EPERM) {
dev_err(&port->dev,
"%s: failed resubmitting urb, ret=%d, port=%d\n",
__func__, ret, priv->dp_port_num);
}
}
/*
* Digi Read INB Callback
*
* Digi Read INB Callback handles reads on the in band ports, sending
* the data on to the tty subsystem. When called we know port and
* port->private are not NULL and port->serial has been validated.
* It returns 0 if successful, 1 if successful but the port is
* throttled, and -1 if the sanity checks failed.
*/
static int digi_read_inb_callback(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
struct digi_port *priv = usb_get_serial_port_data(port);
int opcode = ((unsigned char *)urb->transfer_buffer)[0];
int len = ((unsigned char *)urb->transfer_buffer)[1];
int port_status = ((unsigned char *)urb->transfer_buffer)[2];
unsigned char *data = ((unsigned char *)urb->transfer_buffer) + 3;
int flag, throttled;
int status = urb->status;
/* do not process callbacks on closed ports */
/* but do continue the read chain */
if (urb->status == -ENOENT)
return 0;
/* short/multiple packet check */
if (urb->actual_length != len + 2) {
dev_err(&port->dev, "%s: INCOMPLETE OR MULTIPLE PACKET, "
"status=%d, port=%d, opcode=%d, len=%d, "
"actual_length=%d, status=%d\n", __func__, status,
priv->dp_port_num, opcode, len, urb->actual_length,
port_status);
return -1;
}
spin_lock(&priv->dp_port_lock);
/* check for throttle; if set, do not resubmit read urb */
/* indicate the read chain needs to be restarted on unthrottle */
throttled = priv->dp_throttled;
if (throttled)
priv->dp_throttle_restart = 1;
/* receive data */
if (opcode == DIGI_CMD_RECEIVE_DATA) {
/* get flag from port_status */
flag = 0;
/* overrun is special, not associated with a char */
if (port_status & DIGI_OVERRUN_ERROR)
tty_insert_flip_char(&port->port, 0, TTY_OVERRUN);
/* break takes precedence over parity, */
/* which takes precedence over framing errors */
if (port_status & DIGI_BREAK_ERROR)
flag = TTY_BREAK;
else if (port_status & DIGI_PARITY_ERROR)
flag = TTY_PARITY;
else if (port_status & DIGI_FRAMING_ERROR)
flag = TTY_FRAME;
/* data length is len-1 (one byte of len is port_status) */
--len;
if (len > 0) {
tty_insert_flip_string_fixed_flag(&port->port, data,
flag, len);
tty_flip_buffer_push(&port->port);
}
}
spin_unlock(&priv->dp_port_lock);
if (opcode == DIGI_CMD_RECEIVE_DISABLE)
dev_dbg(&port->dev, "%s: got RECEIVE_DISABLE\n", __func__);
else if (opcode != DIGI_CMD_RECEIVE_DATA)
dev_dbg(&port->dev, "%s: unknown opcode: %d\n", __func__, opcode);
return throttled ? 1 : 0;
}
/*
* Digi Read OOB Callback
*
* Digi Read OOB Callback handles reads on the out of band port.
* When called we know port and port->private are not NULL and
* the port->serial is valid. It returns 0 if successful, and
* -1 if the sanity checks failed.
*/
static int digi_read_oob_callback(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
struct usb_serial *serial = port->serial;
struct tty_struct *tty;
struct digi_port *priv = usb_get_serial_port_data(port);
int opcode, line, status, val;
int i;
unsigned int rts;
/* handle each oob command */
for (i = 0; i < urb->actual_length - 3;) {
opcode = ((unsigned char *)urb->transfer_buffer)[i++];
line = ((unsigned char *)urb->transfer_buffer)[i++];
status = ((unsigned char *)urb->transfer_buffer)[i++];
val = ((unsigned char *)urb->transfer_buffer)[i++];
dev_dbg(&port->dev, "digi_read_oob_callback: opcode=%d, line=%d, status=%d, val=%d\n",
opcode, line, status, val);
if (status != 0 || line >= serial->type->num_ports)
continue;
port = serial->port[line];
priv = usb_get_serial_port_data(port);
if (priv == NULL)
return -1;
tty = tty_port_tty_get(&port->port);
rts = 0;
if (tty)
rts = tty->termios.c_cflag & CRTSCTS;
if (tty && opcode == DIGI_CMD_READ_INPUT_SIGNALS) {
spin_lock(&priv->dp_port_lock);
/* convert from digi flags to termiox flags */
if (val & DIGI_READ_INPUT_SIGNALS_CTS) {
priv->dp_modem_signals |= TIOCM_CTS;
/* port must be open to use tty struct */
if (rts) {
tty->hw_stopped = 0;
tty_port_tty_wakeup(&port->port);
}
} else {
priv->dp_modem_signals &= ~TIOCM_CTS;
/* port must be open to use tty struct */
if (rts)
tty->hw_stopped = 1;
}
if (val & DIGI_READ_INPUT_SIGNALS_DSR)
priv->dp_modem_signals |= TIOCM_DSR;
else
priv->dp_modem_signals &= ~TIOCM_DSR;
if (val & DIGI_READ_INPUT_SIGNALS_RI)
priv->dp_modem_signals |= TIOCM_RI;
else
priv->dp_modem_signals &= ~TIOCM_RI;
if (val & DIGI_READ_INPUT_SIGNALS_DCD)
priv->dp_modem_signals |= TIOCM_CD;
else
priv->dp_modem_signals &= ~TIOCM_CD;
spin_unlock(&priv->dp_port_lock);
} else if (opcode == DIGI_CMD_TRANSMIT_IDLE) {
spin_lock(&priv->dp_port_lock);
priv->dp_transmit_idle = 1;
wake_up_interruptible(&priv->dp_transmit_idle_wait);
spin_unlock(&priv->dp_port_lock);
} else if (opcode == DIGI_CMD_IFLUSH_FIFO) {
wake_up_interruptible(&priv->dp_flush_wait);
}
tty_kref_put(tty);
}
return 0;
}
module_usb_serial_driver(serial_drivers, id_table_combined);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
| gpl-2.0 |
s0be/android_kernel_letv_msm8994 | drivers/md/dm.c | 428 | 64285 | /*
* Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
* Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
*
* This file is released under the GPL.
*/
#include "dm.h"
#include "dm-uevent.h"
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/moduleparam.h>
#include <linux/blkpg.h>
#include <linux/bio.h>
#include <linux/mempool.h>
#include <linux/slab.h>
#include <linux/idr.h>
#include <linux/hdreg.h>
#include <linux/delay.h>
#include <trace/events/block.h>
#define DM_MSG_PREFIX "core"
#ifdef CONFIG_PRINTK
/*
* ratelimit state to be used in DMXXX_LIMIT().
*/
DEFINE_RATELIMIT_STATE(dm_ratelimit_state,
DEFAULT_RATELIMIT_INTERVAL,
DEFAULT_RATELIMIT_BURST);
EXPORT_SYMBOL(dm_ratelimit_state);
#endif
/*
* Cookies are numeric values sent with CHANGE and REMOVE
* uevents while resuming, removing or renaming the device.
*/
#define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE"
#define DM_COOKIE_LENGTH 24
static const char *_name = DM_NAME;
static unsigned int major = 0;
static unsigned int _major = 0;
static DEFINE_IDR(_minor_idr);
static DEFINE_SPINLOCK(_minor_lock);
/*
* For bio-based dm.
* One of these is allocated per bio.
*/
struct dm_io {
struct mapped_device *md;
int error;
atomic_t io_count;
struct bio *bio;
unsigned long start_time;
spinlock_t endio_lock;
};
/*
* For request-based dm.
* One of these is allocated per request.
*/
struct dm_rq_target_io {
struct mapped_device *md;
struct dm_target *ti;
struct request *orig, clone;
int error;
union map_info info;
};
/*
* For request-based dm - the bio clones we allocate are embedded in these
* structs.
*
* We allocate these with bio_alloc_bioset, using the front_pad parameter when
* the bioset is created - this means the bio has to come at the end of the
* struct.
*/
struct dm_rq_clone_bio_info {
struct bio *orig;
struct dm_rq_target_io *tio;
struct bio clone;
};
union map_info *dm_get_mapinfo(struct bio *bio)
{
if (bio && bio->bi_private)
return &((struct dm_target_io *)bio->bi_private)->info;
return NULL;
}
union map_info *dm_get_rq_mapinfo(struct request *rq)
{
if (rq && rq->end_io_data)
return &((struct dm_rq_target_io *)rq->end_io_data)->info;
return NULL;
}
EXPORT_SYMBOL_GPL(dm_get_rq_mapinfo);
#define MINOR_ALLOCED ((void *)-1)
/*
* Bits for the md->flags field.
*/
#define DMF_BLOCK_IO_FOR_SUSPEND 0
#define DMF_SUSPENDED 1
#define DMF_FROZEN 2
#define DMF_FREEING 3
#define DMF_DELETING 4
#define DMF_NOFLUSH_SUSPENDING 5
#define DMF_MERGE_IS_OPTIONAL 6
/*
* Work processed by per-device workqueue.
*/
struct mapped_device {
struct rw_semaphore io_lock;
struct mutex suspend_lock;
rwlock_t map_lock;
atomic_t holders;
atomic_t open_count;
unsigned long flags;
struct request_queue *queue;
unsigned type;
/* Protect queue and type against concurrent access. */
struct mutex type_lock;
struct target_type *immutable_target_type;
struct gendisk *disk;
char name[16];
void *interface_ptr;
/*
* A list of ios that arrived while we were suspended.
*/
atomic_t pending[2];
wait_queue_head_t wait;
struct work_struct work;
struct bio_list deferred;
spinlock_t deferred_lock;
/*
* Processing queue (flush)
*/
struct workqueue_struct *wq;
/*
* The current mapping.
*/
struct dm_table *map;
/*
* io objects are allocated from here.
*/
mempool_t *io_pool;
struct bio_set *bs;
/*
* Event handling.
*/
atomic_t event_nr;
wait_queue_head_t eventq;
atomic_t uevent_seq;
struct list_head uevent_list;
spinlock_t uevent_lock; /* Protect access to uevent_list */
/*
* freeze/thaw support require holding onto a super block
*/
struct super_block *frozen_sb;
struct block_device *bdev;
/* forced geometry settings */
struct hd_geometry geometry;
/* kobject and completion */
struct dm_kobject_holder kobj_holder;
/* zero-length flush that will be cloned and submitted to targets */
struct bio flush_bio;
};
/*
* For mempools pre-allocation at the table loading time.
*/
struct dm_md_mempools {
mempool_t *io_pool;
struct bio_set *bs;
};
#define MIN_IOS 256
static struct kmem_cache *_io_cache;
static struct kmem_cache *_rq_tio_cache;
static int __init local_init(void)
{
int r = -ENOMEM;
/* allocate a slab for the dm_ios */
_io_cache = KMEM_CACHE(dm_io, 0);
if (!_io_cache)
return r;
_rq_tio_cache = KMEM_CACHE(dm_rq_target_io, 0);
if (!_rq_tio_cache)
goto out_free_io_cache;
r = dm_uevent_init();
if (r)
goto out_free_rq_tio_cache;
_major = major;
r = register_blkdev(_major, _name);
if (r < 0)
goto out_uevent_exit;
if (!_major)
_major = r;
return 0;
out_uevent_exit:
dm_uevent_exit();
out_free_rq_tio_cache:
kmem_cache_destroy(_rq_tio_cache);
out_free_io_cache:
kmem_cache_destroy(_io_cache);
return r;
}
static void local_exit(void)
{
kmem_cache_destroy(_rq_tio_cache);
kmem_cache_destroy(_io_cache);
unregister_blkdev(_major, _name);
dm_uevent_exit();
_major = 0;
DMINFO("cleaned up");
}
static int (*_inits[])(void) __initdata = {
local_init,
dm_target_init,
dm_linear_init,
dm_stripe_init,
dm_io_init,
dm_kcopyd_init,
dm_interface_init,
};
static void (*_exits[])(void) = {
local_exit,
dm_target_exit,
dm_linear_exit,
dm_stripe_exit,
dm_io_exit,
dm_kcopyd_exit,
dm_interface_exit,
};
static int __init dm_init(void)
{
const int count = ARRAY_SIZE(_inits);
int r, i;
for (i = 0; i < count; i++) {
r = _inits[i]();
if (r)
goto bad;
}
return 0;
bad:
while (i--)
_exits[i]();
return r;
}
static void __exit dm_exit(void)
{
int i = ARRAY_SIZE(_exits);
while (i--)
_exits[i]();
/*
* Should be empty by this point.
*/
idr_destroy(&_minor_idr);
}
/*
* Block device functions
*/
int dm_deleting_md(struct mapped_device *md)
{
return test_bit(DMF_DELETING, &md->flags);
}
static int dm_blk_open(struct block_device *bdev, fmode_t mode)
{
struct mapped_device *md;
spin_lock(&_minor_lock);
md = bdev->bd_disk->private_data;
if (!md)
goto out;
if (test_bit(DMF_FREEING, &md->flags) ||
dm_deleting_md(md)) {
md = NULL;
goto out;
}
dm_get(md);
atomic_inc(&md->open_count);
out:
spin_unlock(&_minor_lock);
return md ? 0 : -ENXIO;
}
static void dm_blk_close(struct gendisk *disk, fmode_t mode)
{
struct mapped_device *md = disk->private_data;
spin_lock(&_minor_lock);
atomic_dec(&md->open_count);
dm_put(md);
spin_unlock(&_minor_lock);
}
int dm_open_count(struct mapped_device *md)
{
return atomic_read(&md->open_count);
}
/*
* Guarantees nothing is using the device before it's deleted.
*/
int dm_lock_for_deletion(struct mapped_device *md)
{
int r = 0;
spin_lock(&_minor_lock);
if (dm_open_count(md))
r = -EBUSY;
else
set_bit(DMF_DELETING, &md->flags);
spin_unlock(&_minor_lock);
return r;
}
static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
{
struct mapped_device *md = bdev->bd_disk->private_data;
return dm_get_geometry(md, geo);
}
static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
struct mapped_device *md = bdev->bd_disk->private_data;
struct dm_table *map;
struct dm_target *tgt;
int r = -ENOTTY;
retry:
map = dm_get_live_table(md);
if (!map || !dm_table_get_size(map))
goto out;
/* We only support devices that have a single target */
if (dm_table_get_num_targets(map) != 1)
goto out;
tgt = dm_table_get_target(map, 0);
if (dm_suspended_md(md)) {
r = -EAGAIN;
goto out;
}
if (tgt->type->ioctl)
r = tgt->type->ioctl(tgt, cmd, arg);
out:
dm_table_put(map);
if (r == -ENOTCONN) {
msleep(10);
goto retry;
}
return r;
}
static struct dm_io *alloc_io(struct mapped_device *md)
{
return mempool_alloc(md->io_pool, GFP_NOIO);
}
static void free_io(struct mapped_device *md, struct dm_io *io)
{
mempool_free(io, md->io_pool);
}
static void free_tio(struct mapped_device *md, struct dm_target_io *tio)
{
bio_put(&tio->clone);
}
static struct dm_rq_target_io *alloc_rq_tio(struct mapped_device *md,
gfp_t gfp_mask)
{
return mempool_alloc(md->io_pool, gfp_mask);
}
static void free_rq_tio(struct dm_rq_target_io *tio)
{
mempool_free(tio, tio->md->io_pool);
}
static int md_in_flight(struct mapped_device *md)
{
return atomic_read(&md->pending[READ]) +
atomic_read(&md->pending[WRITE]);
}
static void start_io_acct(struct dm_io *io)
{
struct mapped_device *md = io->md;
int cpu;
int rw = bio_data_dir(io->bio);
io->start_time = jiffies;
cpu = part_stat_lock();
part_round_stats(cpu, &dm_disk(md)->part0);
part_stat_unlock();
atomic_set(&dm_disk(md)->part0.in_flight[rw],
atomic_inc_return(&md->pending[rw]));
}
static void end_io_acct(struct dm_io *io)
{
struct mapped_device *md = io->md;
struct bio *bio = io->bio;
unsigned long duration = jiffies - io->start_time;
int pending, cpu;
int rw = bio_data_dir(bio);
cpu = part_stat_lock();
part_round_stats(cpu, &dm_disk(md)->part0);
part_stat_add(cpu, &dm_disk(md)->part0, ticks[rw], duration);
part_stat_unlock();
/*
* After this is decremented the bio must not be touched if it is
* a flush.
*/
pending = atomic_dec_return(&md->pending[rw]);
atomic_set(&dm_disk(md)->part0.in_flight[rw], pending);
pending += atomic_read(&md->pending[rw^0x1]);
/* nudge anyone waiting on suspend queue */
if (!pending)
wake_up(&md->wait);
}
/*
* Add the bio to the list of deferred io.
*/
static void queue_io(struct mapped_device *md, struct bio *bio)
{
unsigned long flags;
spin_lock_irqsave(&md->deferred_lock, flags);
bio_list_add(&md->deferred, bio);
spin_unlock_irqrestore(&md->deferred_lock, flags);
queue_work(md->wq, &md->work);
}
/*
* Everyone (including functions in this file), should use this
* function to access the md->map field, and make sure they call
* dm_table_put() when finished.
*/
struct dm_table *dm_get_live_table(struct mapped_device *md)
{
struct dm_table *t;
unsigned long flags;
read_lock_irqsave(&md->map_lock, flags);
t = md->map;
if (t)
dm_table_get(t);
read_unlock_irqrestore(&md->map_lock, flags);
return t;
}
/*
* Get the geometry associated with a dm device
*/
int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo)
{
*geo = md->geometry;
return 0;
}
/*
* Set the geometry of a device.
*/
int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo)
{
sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors;
if (geo->start > sz) {
DMWARN("Start sector is beyond the geometry limits.");
return -EINVAL;
}
md->geometry = *geo;
return 0;
}
/*-----------------------------------------------------------------
* CRUD START:
* A more elegant soln is in the works that uses the queue
* merge fn, unfortunately there are a couple of changes to
* the block layer that I want to make for this. So in the
* interests of getting something for people to use I give
* you this clearly demarcated crap.
*---------------------------------------------------------------*/
static int __noflush_suspending(struct mapped_device *md)
{
return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
}
/*
* Decrements the number of outstanding ios that a bio has been
* cloned into, completing the original io if necc.
*/
static void dec_pending(struct dm_io *io, int error)
{
unsigned long flags;
int io_error;
struct bio *bio;
struct mapped_device *md = io->md;
/* Push-back supersedes any I/O errors */
if (unlikely(error)) {
spin_lock_irqsave(&io->endio_lock, flags);
if (!(io->error > 0 && __noflush_suspending(md)))
io->error = error;
spin_unlock_irqrestore(&io->endio_lock, flags);
}
if (atomic_dec_and_test(&io->io_count)) {
if (io->error == DM_ENDIO_REQUEUE) {
/*
* Target requested pushing back the I/O.
*/
spin_lock_irqsave(&md->deferred_lock, flags);
if (__noflush_suspending(md))
bio_list_add_head(&md->deferred, io->bio);
else
/* noflush suspend was interrupted. */
io->error = -EIO;
spin_unlock_irqrestore(&md->deferred_lock, flags);
}
io_error = io->error;
bio = io->bio;
end_io_acct(io);
free_io(md, io);
if (io_error == DM_ENDIO_REQUEUE)
return;
if ((bio->bi_rw & REQ_FLUSH) && bio->bi_size) {
/*
* Preflush done for flush with data, reissue
* without REQ_FLUSH.
*/
bio->bi_rw &= ~REQ_FLUSH;
queue_io(md, bio);
} else {
/* done with normal IO or empty flush */
trace_block_bio_complete(md->queue, bio, io_error);
bio_endio(bio, io_error);
}
}
}
static void clone_endio(struct bio *bio, int error)
{
int r = 0;
struct dm_target_io *tio = bio->bi_private;
struct dm_io *io = tio->io;
struct mapped_device *md = tio->io->md;
dm_endio_fn endio = tio->ti->type->end_io;
if (!bio_flagged(bio, BIO_UPTODATE) && !error)
error = -EIO;
if (endio) {
r = endio(tio->ti, bio, error);
if (r < 0 || r == DM_ENDIO_REQUEUE)
/*
* error and requeue request are handled
* in dec_pending().
*/
error = r;
else if (r == DM_ENDIO_INCOMPLETE)
/* The target will handle the io */
return;
else if (r) {
DMWARN("unimplemented target endio return value: %d", r);
BUG();
}
}
free_tio(md, tio);
dec_pending(io, error);
}
/*
* Partial completion handling for request-based dm
*/
static void end_clone_bio(struct bio *clone, int error)
{
struct dm_rq_clone_bio_info *info = clone->bi_private;
struct dm_rq_target_io *tio = info->tio;
struct bio *bio = info->orig;
unsigned int nr_bytes = info->orig->bi_size;
bio_put(clone);
if (tio->error)
/*
* An error has already been detected on the request.
* Once error occurred, just let clone->end_io() handle
* the remainder.
*/
return;
else if (error) {
/*
* Don't notice the error to the upper layer yet.
* The error handling decision is made by the target driver,
* when the request is completed.
*/
tio->error = error;
return;
}
/*
* I/O for the bio successfully completed.
* Notice the data completion to the upper layer.
*/
/*
* bios are processed from the head of the list.
* So the completing bio should always be rq->bio.
* If it's not, something wrong is happening.
*/
if (tio->orig->bio != bio)
DMERR("bio completion is going in the middle of the request");
/*
* Update the original request.
* Do not use blk_end_request() here, because it may complete
* the original request before the clone, and break the ordering.
*/
blk_update_request(tio->orig, 0, nr_bytes);
}
/*
* Don't touch any member of the md after calling this function because
* the md may be freed in dm_put() at the end of this function.
* Or do dm_get() before calling this function and dm_put() later.
*/
static void rq_completed(struct mapped_device *md, int rw, int run_queue)
{
atomic_dec(&md->pending[rw]);
/* nudge anyone waiting on suspend queue */
if (!md_in_flight(md))
wake_up(&md->wait);
/*
* Run this off this callpath, as drivers could invoke end_io while
* inside their request_fn (and holding the queue lock). Calling
* back into ->request_fn() could deadlock attempting to grab the
* queue lock again.
*/
if (run_queue)
blk_run_queue_async(md->queue);
/*
* dm_put() must be at the end of this function. See the comment above
*/
dm_put(md);
}
static void free_rq_clone(struct request *clone)
{
struct dm_rq_target_io *tio = clone->end_io_data;
blk_rq_unprep_clone(clone);
free_rq_tio(tio);
}
/*
* Complete the clone and the original request.
* Must be called without queue lock.
*/
void dm_end_request(struct request *clone, int error)
{
int rw = rq_data_dir(clone);
struct dm_rq_target_io *tio = clone->end_io_data;
struct mapped_device *md = tio->md;
struct request *rq = tio->orig;
if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
rq->errors = clone->errors;
rq->resid_len = clone->resid_len;
if (rq->sense)
/*
* We are using the sense buffer of the original
* request.
* So setting the length of the sense data is enough.
*/
rq->sense_len = clone->sense_len;
}
free_rq_clone(clone);
blk_end_request_all(rq, error);
rq_completed(md, rw, true);
}
static void dm_unprep_request(struct request *rq)
{
struct request *clone = rq->special;
rq->special = NULL;
rq->cmd_flags &= ~REQ_DONTPREP;
free_rq_clone(clone);
}
/*
* Requeue the original request of a clone.
*/
void dm_requeue_unmapped_request(struct request *clone)
{
int rw = rq_data_dir(clone);
struct dm_rq_target_io *tio = clone->end_io_data;
struct mapped_device *md = tio->md;
struct request *rq = tio->orig;
struct request_queue *q = rq->q;
unsigned long flags;
dm_unprep_request(rq);
spin_lock_irqsave(q->queue_lock, flags);
blk_requeue_request(q, rq);
spin_unlock_irqrestore(q->queue_lock, flags);
rq_completed(md, rw, 0);
}
EXPORT_SYMBOL_GPL(dm_requeue_unmapped_request);
static void __stop_queue(struct request_queue *q)
{
blk_stop_queue(q);
}
static void stop_queue(struct request_queue *q)
{
unsigned long flags;
spin_lock_irqsave(q->queue_lock, flags);
__stop_queue(q);
spin_unlock_irqrestore(q->queue_lock, flags);
}
static void __start_queue(struct request_queue *q)
{
if (blk_queue_stopped(q))
blk_start_queue(q);
}
static void start_queue(struct request_queue *q)
{
unsigned long flags;
spin_lock_irqsave(q->queue_lock, flags);
__start_queue(q);
spin_unlock_irqrestore(q->queue_lock, flags);
}
static void dm_done(struct request *clone, int error, bool mapped)
{
int r = error;
struct dm_rq_target_io *tio = clone->end_io_data;
dm_request_endio_fn rq_end_io = NULL;
if (tio->ti) {
rq_end_io = tio->ti->type->rq_end_io;
if (mapped && rq_end_io)
r = rq_end_io(tio->ti, clone, error, &tio->info);
}
if (r <= 0)
/* The target wants to complete the I/O */
dm_end_request(clone, r);
else if (r == DM_ENDIO_INCOMPLETE)
/* The target will handle the I/O */
return;
else if (r == DM_ENDIO_REQUEUE)
/* The target wants to requeue the I/O */
dm_requeue_unmapped_request(clone);
else {
DMWARN("unimplemented target endio return value: %d", r);
BUG();
}
}
/*
* Request completion handler for request-based dm
*/
static void dm_softirq_done(struct request *rq)
{
bool mapped = true;
struct request *clone = rq->completion_data;
struct dm_rq_target_io *tio = clone->end_io_data;
if (rq->cmd_flags & REQ_FAILED)
mapped = false;
dm_done(clone, tio->error, mapped);
}
/*
* Complete the clone and the original request with the error status
* through softirq context.
*/
static void dm_complete_request(struct request *clone, int error)
{
struct dm_rq_target_io *tio = clone->end_io_data;
struct request *rq = tio->orig;
tio->error = error;
rq->completion_data = clone;
blk_complete_request(rq);
}
/*
* Complete the not-mapped clone and the original request with the error status
* through softirq context.
* Target's rq_end_io() function isn't called.
* This may be used when the target's map_rq() function fails.
*/
void dm_kill_unmapped_request(struct request *clone, int error)
{
struct dm_rq_target_io *tio = clone->end_io_data;
struct request *rq = tio->orig;
rq->cmd_flags |= REQ_FAILED;
dm_complete_request(clone, error);
}
EXPORT_SYMBOL_GPL(dm_kill_unmapped_request);
/*
* Called with the queue lock held
*/
static void end_clone_request(struct request *clone, int error)
{
/*
* For just cleaning up the information of the queue in which
* the clone was dispatched.
* The clone is *NOT* freed actually here because it is alloced from
* dm own mempool and REQ_ALLOCED isn't set in clone->cmd_flags.
*/
__blk_put_request(clone->q, clone);
/*
* Actual request completion is done in a softirq context which doesn't
* hold the queue lock. Otherwise, deadlock could occur because:
* - another request may be submitted by the upper level driver
* of the stacking during the completion
* - the submission which requires queue lock may be done
* against this queue
*/
dm_complete_request(clone, error);
}
/*
* Return maximum size of I/O possible at the supplied sector up to the current
* target boundary.
*/
static sector_t max_io_len_target_boundary(sector_t sector, struct dm_target *ti)
{
sector_t target_offset = dm_target_offset(ti, sector);
return ti->len - target_offset;
}
static sector_t max_io_len(sector_t sector, struct dm_target *ti)
{
sector_t len = max_io_len_target_boundary(sector, ti);
sector_t offset, max_len;
/*
* Does the target need to split even further?
*/
if (ti->max_io_len) {
offset = dm_target_offset(ti, sector);
if (unlikely(ti->max_io_len & (ti->max_io_len - 1)))
max_len = sector_div(offset, ti->max_io_len);
else
max_len = offset & (ti->max_io_len - 1);
max_len = ti->max_io_len - max_len;
if (len > max_len)
len = max_len;
}
return len;
}
int dm_set_target_max_io_len(struct dm_target *ti, sector_t len)
{
if (len > UINT_MAX) {
DMERR("Specified maximum size of target IO (%llu) exceeds limit (%u)",
(unsigned long long)len, UINT_MAX);
ti->error = "Maximum size of target IO is too large";
return -EINVAL;
}
ti->max_io_len = (uint32_t) len;
return 0;
}
EXPORT_SYMBOL_GPL(dm_set_target_max_io_len);
static void __map_bio(struct dm_target_io *tio)
{
int r;
sector_t sector;
struct mapped_device *md;
struct bio *clone = &tio->clone;
struct dm_target *ti = tio->ti;
clone->bi_end_io = clone_endio;
clone->bi_private = tio;
/*
* Map the clone. If r == 0 we don't need to do
* anything, the target has assumed ownership of
* this io.
*/
atomic_inc(&tio->io->io_count);
sector = clone->bi_sector;
r = ti->type->map(ti, clone);
if (r == DM_MAPIO_REMAPPED) {
/* the bio has been remapped so dispatch it */
trace_block_bio_remap(bdev_get_queue(clone->bi_bdev), clone,
tio->io->bio->bi_bdev->bd_dev, sector);
generic_make_request(clone);
} else if (r < 0 || r == DM_MAPIO_REQUEUE) {
/* error the io and bail out, or requeue it if needed */
md = tio->io->md;
dec_pending(tio->io, r);
free_tio(md, tio);
} else if (r) {
DMWARN("unimplemented target map return value: %d", r);
BUG();
}
}
struct clone_info {
struct mapped_device *md;
struct dm_table *map;
struct bio *bio;
struct dm_io *io;
sector_t sector;
sector_t sector_count;
unsigned short idx;
};
static void bio_setup_sector(struct bio *bio, sector_t sector, sector_t len)
{
bio->bi_sector = sector;
bio->bi_size = to_bytes(len);
}
static void bio_setup_bv(struct bio *bio, unsigned short idx, unsigned short bv_count)
{
bio->bi_idx = idx;
bio->bi_vcnt = idx + bv_count;
bio->bi_flags &= ~(1 << BIO_SEG_VALID);
}
static void clone_bio_integrity(struct bio *bio, struct bio *clone,
unsigned short idx, unsigned len, unsigned offset,
unsigned trim)
{
if (!bio_integrity(bio))
return;
bio_integrity_clone(clone, bio, GFP_NOIO);
if (trim)
bio_integrity_trim(clone, bio_sector_offset(bio, idx, offset), len);
}
/*
* Creates a little bio that just does part of a bvec.
*/
static void clone_split_bio(struct dm_target_io *tio, struct bio *bio,
sector_t sector, unsigned short idx,
unsigned offset, unsigned len)
{
struct bio *clone = &tio->clone;
struct bio_vec *bv = bio->bi_io_vec + idx;
*clone->bi_io_vec = *bv;
bio_setup_sector(clone, sector, len);
clone->bi_bdev = bio->bi_bdev;
clone->bi_rw = bio->bi_rw;
clone->bi_vcnt = 1;
clone->bi_io_vec->bv_offset = offset;
clone->bi_io_vec->bv_len = clone->bi_size;
clone->bi_flags |= 1 << BIO_CLONED;
clone_bio_integrity(bio, clone, idx, len, offset, 1);
}
/*
* Creates a bio that consists of range of complete bvecs.
*/
static void clone_bio(struct dm_target_io *tio, struct bio *bio,
sector_t sector, unsigned short idx,
unsigned short bv_count, unsigned len)
{
struct bio *clone = &tio->clone;
unsigned trim = 0;
__bio_clone(clone, bio);
bio_setup_sector(clone, sector, len);
bio_setup_bv(clone, idx, bv_count);
if (idx != bio->bi_idx || clone->bi_size < bio->bi_size)
trim = 1;
clone_bio_integrity(bio, clone, idx, len, 0, trim);
}
static struct dm_target_io *alloc_tio(struct clone_info *ci,
struct dm_target *ti, int nr_iovecs,
unsigned target_bio_nr)
{
struct dm_target_io *tio;
struct bio *clone;
clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, ci->md->bs);
tio = container_of(clone, struct dm_target_io, clone);
tio->io = ci->io;
tio->ti = ti;
memset(&tio->info, 0, sizeof(tio->info));
tio->target_bio_nr = target_bio_nr;
return tio;
}
static void __clone_and_map_simple_bio(struct clone_info *ci,
struct dm_target *ti,
unsigned target_bio_nr, sector_t len)
{
struct dm_target_io *tio = alloc_tio(ci, ti, ci->bio->bi_max_vecs, target_bio_nr);
struct bio *clone = &tio->clone;
/*
* Discard requests require the bio's inline iovecs be initialized.
* ci->bio->bi_max_vecs is BIO_INLINE_VECS anyway, for both flush
* and discard, so no need for concern about wasted bvec allocations.
*/
__bio_clone(clone, ci->bio);
if (len)
bio_setup_sector(clone, ci->sector, len);
__map_bio(tio);
}
static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti,
unsigned num_bios, sector_t len)
{
unsigned target_bio_nr;
for (target_bio_nr = 0; target_bio_nr < num_bios; target_bio_nr++)
__clone_and_map_simple_bio(ci, ti, target_bio_nr, len);
}
static int __send_empty_flush(struct clone_info *ci)
{
unsigned target_nr = 0;
struct dm_target *ti;
BUG_ON(bio_has_data(ci->bio));
while ((ti = dm_table_get_target(ci->map, target_nr++)))
__send_duplicate_bios(ci, ti, ti->num_flush_bios, 0);
return 0;
}
static void __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti,
sector_t sector, int nr_iovecs,
unsigned short idx, unsigned short bv_count,
unsigned offset, unsigned len,
unsigned split_bvec)
{
struct bio *bio = ci->bio;
struct dm_target_io *tio;
unsigned target_bio_nr;
unsigned num_target_bios = 1;
/*
* Does the target want to receive duplicate copies of the bio?
*/
if (bio_data_dir(bio) == WRITE && ti->num_write_bios)
num_target_bios = ti->num_write_bios(ti, bio);
for (target_bio_nr = 0; target_bio_nr < num_target_bios; target_bio_nr++) {
tio = alloc_tio(ci, ti, nr_iovecs, target_bio_nr);
if (split_bvec)
clone_split_bio(tio, bio, sector, idx, offset, len);
else
clone_bio(tio, bio, sector, idx, bv_count, len);
__map_bio(tio);
}
}
typedef unsigned (*get_num_bios_fn)(struct dm_target *ti);
static unsigned get_num_discard_bios(struct dm_target *ti)
{
return ti->num_discard_bios;
}
static unsigned get_num_write_same_bios(struct dm_target *ti)
{
return ti->num_write_same_bios;
}
typedef bool (*is_split_required_fn)(struct dm_target *ti);
static bool is_split_required_for_discard(struct dm_target *ti)
{
return ti->split_discard_bios;
}
static int __send_changing_extent_only(struct clone_info *ci,
get_num_bios_fn get_num_bios,
is_split_required_fn is_split_required)
{
struct dm_target *ti;
sector_t len;
unsigned num_bios;
do {
ti = dm_table_find_target(ci->map, ci->sector);
if (!dm_target_is_valid(ti))
return -EIO;
/*
* Even though the device advertised support for this type of
* request, that does not mean every target supports it, and
* reconfiguration might also have changed that since the
* check was performed.
*/
num_bios = get_num_bios ? get_num_bios(ti) : 0;
if (!num_bios)
return -EOPNOTSUPP;
if (is_split_required && !is_split_required(ti))
len = min(ci->sector_count, max_io_len_target_boundary(ci->sector, ti));
else
len = min(ci->sector_count, max_io_len(ci->sector, ti));
__send_duplicate_bios(ci, ti, num_bios, len);
ci->sector += len;
} while (ci->sector_count -= len);
return 0;
}
static int __send_discard(struct clone_info *ci)
{
return __send_changing_extent_only(ci, get_num_discard_bios,
is_split_required_for_discard);
}
static int __send_write_same(struct clone_info *ci)
{
return __send_changing_extent_only(ci, get_num_write_same_bios, NULL);
}
/*
* Find maximum number of sectors / bvecs we can process with a single bio.
*/
static sector_t __len_within_target(struct clone_info *ci, sector_t max, int *idx)
{
struct bio *bio = ci->bio;
sector_t bv_len, total_len = 0;
for (*idx = ci->idx; max && (*idx < bio->bi_vcnt); (*idx)++) {
bv_len = to_sector(bio->bi_io_vec[*idx].bv_len);
if (bv_len > max)
break;
max -= bv_len;
total_len += bv_len;
}
return total_len;
}
static int __split_bvec_across_targets(struct clone_info *ci,
struct dm_target *ti, sector_t max)
{
struct bio *bio = ci->bio;
struct bio_vec *bv = bio->bi_io_vec + ci->idx;
sector_t remaining = to_sector(bv->bv_len);
unsigned offset = 0;
sector_t len;
do {
if (offset) {
ti = dm_table_find_target(ci->map, ci->sector);
if (!dm_target_is_valid(ti))
return -EIO;
max = max_io_len(ci->sector, ti);
}
len = min(remaining, max);
__clone_and_map_data_bio(ci, ti, ci->sector, 1, ci->idx, 0,
bv->bv_offset + offset, len, 1);
ci->sector += len;
ci->sector_count -= len;
offset += to_bytes(len);
} while (remaining -= len);
ci->idx++;
return 0;
}
/*
* Select the correct strategy for processing a non-flush bio.
*/
static int __split_and_process_non_flush(struct clone_info *ci)
{
struct bio *bio = ci->bio;
struct dm_target *ti;
sector_t len, max;
int idx;
if (unlikely(bio->bi_rw & REQ_DISCARD))
return __send_discard(ci);
else if (unlikely(bio->bi_rw & REQ_WRITE_SAME))
return __send_write_same(ci);
ti = dm_table_find_target(ci->map, ci->sector);
if (!dm_target_is_valid(ti))
return -EIO;
max = max_io_len(ci->sector, ti);
/*
* Optimise for the simple case where we can do all of
* the remaining io with a single clone.
*/
if (ci->sector_count <= max) {
__clone_and_map_data_bio(ci, ti, ci->sector, bio->bi_max_vecs,
ci->idx, bio->bi_vcnt - ci->idx, 0,
ci->sector_count, 0);
ci->sector_count = 0;
return 0;
}
/*
* There are some bvecs that don't span targets.
* Do as many of these as possible.
*/
if (to_sector(bio->bi_io_vec[ci->idx].bv_len) <= max) {
len = __len_within_target(ci, max, &idx);
__clone_and_map_data_bio(ci, ti, ci->sector, bio->bi_max_vecs,
ci->idx, idx - ci->idx, 0, len, 0);
ci->sector += len;
ci->sector_count -= len;
ci->idx = idx;
return 0;
}
/*
* Handle a bvec that must be split between two or more targets.
*/
return __split_bvec_across_targets(ci, ti, max);
}
/*
* Entry point to split a bio into clones and submit them to the targets.
*/
static void __split_and_process_bio(struct mapped_device *md, struct bio *bio)
{
struct clone_info ci;
int error = 0;
ci.map = dm_get_live_table(md);
if (unlikely(!ci.map)) {
bio_io_error(bio);
return;
}
ci.md = md;
ci.io = alloc_io(md);
ci.io->error = 0;
atomic_set(&ci.io->io_count, 1);
ci.io->bio = bio;
ci.io->md = md;
spin_lock_init(&ci.io->endio_lock);
ci.sector = bio->bi_sector;
ci.idx = bio->bi_idx;
start_io_acct(ci.io);
if (bio->bi_rw & REQ_FLUSH) {
ci.bio = &ci.md->flush_bio;
ci.sector_count = 0;
error = __send_empty_flush(&ci);
/* dec_pending submits any data associated with flush */
} else {
ci.bio = bio;
ci.sector_count = bio_sectors(bio);
while (ci.sector_count && !error)
error = __split_and_process_non_flush(&ci);
}
/* drop the extra reference count */
dec_pending(ci.io, error);
dm_table_put(ci.map);
}
/*-----------------------------------------------------------------
* CRUD END
*---------------------------------------------------------------*/
static int dm_merge_bvec(struct request_queue *q,
struct bvec_merge_data *bvm,
struct bio_vec *biovec)
{
struct mapped_device *md = q->queuedata;
struct dm_table *map = dm_get_live_table(md);
struct dm_target *ti;
sector_t max_sectors;
int max_size = 0;
if (unlikely(!map))
goto out;
ti = dm_table_find_target(map, bvm->bi_sector);
if (!dm_target_is_valid(ti))
goto out_table;
/*
* Find maximum amount of I/O that won't need splitting
*/
max_sectors = min(max_io_len(bvm->bi_sector, ti),
(sector_t) BIO_MAX_SECTORS);
max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size;
if (max_size < 0)
max_size = 0;
/*
* merge_bvec_fn() returns number of bytes
* it can accept at this offset
* max is precomputed maximal io size
*/
if (max_size && ti->type->merge)
max_size = ti->type->merge(ti, bvm, biovec, max_size);
/*
* If the target doesn't support merge method and some of the devices
* provided their merge_bvec method (we know this by looking at
* queue_max_hw_sectors), then we can't allow bios with multiple vector
* entries. So always set max_size to 0, and the code below allows
* just one page.
*/
else if (queue_max_hw_sectors(q) <= PAGE_SIZE >> 9)
max_size = 0;
out_table:
dm_table_put(map);
out:
/*
* Always allow an entire first page
*/
if (max_size <= biovec->bv_len && !(bvm->bi_size >> SECTOR_SHIFT))
max_size = biovec->bv_len;
return max_size;
}
/*
* The request function that just remaps the bio built up by
* dm_merge_bvec.
*/
static void _dm_request(struct request_queue *q, struct bio *bio)
{
int rw = bio_data_dir(bio);
struct mapped_device *md = q->queuedata;
int cpu;
down_read(&md->io_lock);
cpu = part_stat_lock();
part_stat_inc(cpu, &dm_disk(md)->part0, ios[rw]);
part_stat_add(cpu, &dm_disk(md)->part0, sectors[rw], bio_sectors(bio));
part_stat_unlock();
/* if we're suspended, we have to queue this io for later */
if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) {
up_read(&md->io_lock);
if (bio_rw(bio) != READA)
queue_io(md, bio);
else
bio_io_error(bio);
return;
}
__split_and_process_bio(md, bio);
up_read(&md->io_lock);
return;
}
static int dm_request_based(struct mapped_device *md)
{
return blk_queue_stackable(md->queue);
}
static void dm_request(struct request_queue *q, struct bio *bio)
{
struct mapped_device *md = q->queuedata;
if (dm_request_based(md))
blk_queue_bio(q, bio);
else
_dm_request(q, bio);
}
void dm_dispatch_request(struct request *rq)
{
int r;
if (blk_queue_io_stat(rq->q))
rq->cmd_flags |= REQ_IO_STAT;
rq->start_time = jiffies;
r = blk_insert_cloned_request(rq->q, rq);
if (r)
dm_complete_request(rq, r);
}
EXPORT_SYMBOL_GPL(dm_dispatch_request);
static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig,
void *data)
{
struct dm_rq_target_io *tio = data;
struct dm_rq_clone_bio_info *info =
container_of(bio, struct dm_rq_clone_bio_info, clone);
info->orig = bio_orig;
info->tio = tio;
bio->bi_end_io = end_clone_bio;
bio->bi_private = info;
return 0;
}
static int setup_clone(struct request *clone, struct request *rq,
struct dm_rq_target_io *tio)
{
int r;
r = blk_rq_prep_clone(clone, rq, tio->md->bs, GFP_ATOMIC,
dm_rq_bio_constructor, tio);
if (r)
return r;
clone->cmd = rq->cmd;
clone->cmd_len = rq->cmd_len;
clone->sense = rq->sense;
clone->buffer = rq->buffer;
clone->end_io = end_clone_request;
clone->end_io_data = tio;
return 0;
}
static struct request *clone_rq(struct request *rq, struct mapped_device *md,
gfp_t gfp_mask)
{
struct request *clone;
struct dm_rq_target_io *tio;
tio = alloc_rq_tio(md, gfp_mask);
if (!tio)
return NULL;
tio->md = md;
tio->ti = NULL;
tio->orig = rq;
tio->error = 0;
memset(&tio->info, 0, sizeof(tio->info));
clone = &tio->clone;
if (setup_clone(clone, rq, tio)) {
/* -ENOMEM */
free_rq_tio(tio);
return NULL;
}
return clone;
}
/*
* Called with the queue lock held.
*/
static int dm_prep_fn(struct request_queue *q, struct request *rq)
{
struct mapped_device *md = q->queuedata;
struct request *clone;
if (unlikely(rq->special)) {
DMWARN("Already has something in rq->special.");
return BLKPREP_KILL;
}
clone = clone_rq(rq, md, GFP_ATOMIC);
if (!clone)
return BLKPREP_DEFER;
rq->special = clone;
rq->cmd_flags |= REQ_DONTPREP;
return BLKPREP_OK;
}
/*
* Returns:
* 0 : the request has been processed (not requeued)
* !0 : the request has been requeued
*/
static int map_request(struct dm_target *ti, struct request *clone,
struct mapped_device *md)
{
int r, requeued = 0;
struct dm_rq_target_io *tio = clone->end_io_data;
tio->ti = ti;
r = ti->type->map_rq(ti, clone, &tio->info);
switch (r) {
case DM_MAPIO_SUBMITTED:
/* The target has taken the I/O to submit by itself later */
break;
case DM_MAPIO_REMAPPED:
/* The target has remapped the I/O so dispatch it */
trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)),
blk_rq_pos(tio->orig));
dm_dispatch_request(clone);
break;
case DM_MAPIO_REQUEUE:
/* The target wants to requeue the I/O */
dm_requeue_unmapped_request(clone);
requeued = 1;
break;
default:
if (r > 0) {
DMWARN("unimplemented target map return value: %d", r);
BUG();
}
/* The target wants to complete the I/O */
dm_kill_unmapped_request(clone, r);
break;
}
return requeued;
}
static struct request *dm_start_request(struct mapped_device *md, struct request *orig)
{
struct request *clone;
blk_start_request(orig);
clone = orig->special;
atomic_inc(&md->pending[rq_data_dir(clone)]);
/*
* Hold the md reference here for the in-flight I/O.
* We can't rely on the reference count by device opener,
* because the device may be closed during the request completion
* when all bios are completed.
* See the comment in rq_completed() too.
*/
dm_get(md);
return clone;
}
/*
* q->request_fn for request-based dm.
* Called with the queue lock held.
*/
static void dm_request_fn(struct request_queue *q)
{
struct mapped_device *md = q->queuedata;
struct dm_table *map = dm_get_live_table(md);
struct dm_target *ti;
struct request *rq, *clone;
sector_t pos;
/*
* For suspend, check blk_queue_stopped() and increment
* ->pending within a single queue_lock not to increment the
* number of in-flight I/Os after the queue is stopped in
* dm_suspend().
*/
while (!blk_queue_stopped(q)) {
rq = blk_peek_request(q);
if (!rq)
goto out;
/* always use block 0 to find the target for flushes for now */
pos = 0;
if (!(rq->cmd_flags & REQ_FLUSH))
pos = blk_rq_pos(rq);
ti = dm_table_find_target(map, pos);
if (!dm_target_is_valid(ti)) {
/*
* Must perform setup, that dm_done() requires,
* before calling dm_kill_unmapped_request
*/
DMERR_LIMIT("request attempted access beyond the end of device");
clone = dm_start_request(md, rq);
dm_kill_unmapped_request(clone, -EIO);
continue;
}
if (ti->type->busy && ti->type->busy(ti))
goto delay_and_out;
clone = dm_start_request(md, rq);
spin_unlock(q->queue_lock);
if (map_request(ti, clone, md))
goto requeued;
BUG_ON(!irqs_disabled());
spin_lock(q->queue_lock);
}
goto out;
requeued:
BUG_ON(!irqs_disabled());
spin_lock(q->queue_lock);
delay_and_out:
blk_delay_queue(q, HZ / 10);
out:
dm_table_put(map);
}
int dm_underlying_device_busy(struct request_queue *q)
{
return blk_lld_busy(q);
}
EXPORT_SYMBOL_GPL(dm_underlying_device_busy);
static int dm_lld_busy(struct request_queue *q)
{
int r;
struct mapped_device *md = q->queuedata;
struct dm_table *map = dm_get_live_table(md);
if (!map || test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))
r = 1;
else
r = dm_table_any_busy_target(map);
dm_table_put(map);
return r;
}
static int dm_any_congested(void *congested_data, int bdi_bits)
{
int r = bdi_bits;
struct mapped_device *md = congested_data;
struct dm_table *map;
if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
map = dm_get_live_table(md);
if (map) {
/*
* Request-based dm cares about only own queue for
* the query about congestion status of request_queue
*/
if (dm_request_based(md))
r = md->queue->backing_dev_info.state &
bdi_bits;
else
r = dm_table_any_congested(map, bdi_bits);
dm_table_put(map);
}
}
return r;
}
/*-----------------------------------------------------------------
* An IDR is used to keep track of allocated minor numbers.
*---------------------------------------------------------------*/
static void free_minor(int minor)
{
spin_lock(&_minor_lock);
idr_remove(&_minor_idr, minor);
spin_unlock(&_minor_lock);
}
/*
* See if the device with a specific minor # is free.
*/
static int specific_minor(int minor)
{
int r;
if (minor >= (1 << MINORBITS))
return -EINVAL;
idr_preload(GFP_KERNEL);
spin_lock(&_minor_lock);
r = idr_alloc(&_minor_idr, MINOR_ALLOCED, minor, minor + 1, GFP_NOWAIT);
spin_unlock(&_minor_lock);
idr_preload_end();
if (r < 0)
return r == -ENOSPC ? -EBUSY : r;
return 0;
}
static int next_free_minor(int *minor)
{
int r;
idr_preload(GFP_KERNEL);
spin_lock(&_minor_lock);
r = idr_alloc(&_minor_idr, MINOR_ALLOCED, 0, 1 << MINORBITS, GFP_NOWAIT);
spin_unlock(&_minor_lock);
idr_preload_end();
if (r < 0)
return r;
*minor = r;
return 0;
}
static const struct block_device_operations dm_blk_dops;
static void dm_wq_work(struct work_struct *work);
static void dm_init_md_queue(struct mapped_device *md)
{
/*
* Request-based dm devices cannot be stacked on top of bio-based dm
* devices. The type of this dm device has not been decided yet.
* The type is decided at the first table loading time.
* To prevent problematic device stacking, clear the queue flag
* for request stacking support until then.
*
* This queue is new, so no concurrency on the queue_flags.
*/
queue_flag_clear_unlocked(QUEUE_FLAG_STACKABLE, md->queue);
md->queue->queuedata = md;
md->queue->backing_dev_info.congested_fn = dm_any_congested;
md->queue->backing_dev_info.congested_data = md;
blk_queue_make_request(md->queue, dm_request);
blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
blk_queue_merge_bvec(md->queue, dm_merge_bvec);
}
/*
* Allocate and initialise a blank device with a given minor.
*/
static struct mapped_device *alloc_dev(int minor)
{
int r;
struct mapped_device *md = kzalloc(sizeof(*md), GFP_KERNEL);
void *old_md;
if (!md) {
DMWARN("unable to allocate device, out of memory.");
return NULL;
}
if (!try_module_get(THIS_MODULE))
goto bad_module_get;
/* get a minor number for the dev */
if (minor == DM_ANY_MINOR)
r = next_free_minor(&minor);
else
r = specific_minor(minor);
if (r < 0)
goto bad_minor;
md->type = DM_TYPE_NONE;
init_rwsem(&md->io_lock);
mutex_init(&md->suspend_lock);
mutex_init(&md->type_lock);
spin_lock_init(&md->deferred_lock);
rwlock_init(&md->map_lock);
atomic_set(&md->holders, 1);
atomic_set(&md->open_count, 0);
atomic_set(&md->event_nr, 0);
atomic_set(&md->uevent_seq, 0);
INIT_LIST_HEAD(&md->uevent_list);
spin_lock_init(&md->uevent_lock);
md->queue = blk_alloc_queue(GFP_KERNEL);
if (!md->queue)
goto bad_queue;
dm_init_md_queue(md);
md->disk = alloc_disk(1);
if (!md->disk)
goto bad_disk;
atomic_set(&md->pending[0], 0);
atomic_set(&md->pending[1], 0);
init_waitqueue_head(&md->wait);
INIT_WORK(&md->work, dm_wq_work);
init_waitqueue_head(&md->eventq);
init_completion(&md->kobj_holder.completion);
md->disk->major = _major;
md->disk->first_minor = minor;
md->disk->fops = &dm_blk_dops;
md->disk->queue = md->queue;
md->disk->private_data = md;
sprintf(md->disk->disk_name, "dm-%d", minor);
add_disk(md->disk);
format_dev_t(md->name, MKDEV(_major, minor));
md->wq = alloc_workqueue("kdmflush",
WQ_NON_REENTRANT | WQ_MEM_RECLAIM, 0);
if (!md->wq)
goto bad_thread;
md->bdev = bdget_disk(md->disk, 0);
if (!md->bdev)
goto bad_bdev;
bio_init(&md->flush_bio);
md->flush_bio.bi_bdev = md->bdev;
md->flush_bio.bi_rw = WRITE_FLUSH;
/* Populate the mapping, nobody knows we exist yet */
spin_lock(&_minor_lock);
old_md = idr_replace(&_minor_idr, md, minor);
spin_unlock(&_minor_lock);
BUG_ON(old_md != MINOR_ALLOCED);
return md;
bad_bdev:
destroy_workqueue(md->wq);
bad_thread:
del_gendisk(md->disk);
put_disk(md->disk);
bad_disk:
blk_cleanup_queue(md->queue);
bad_queue:
free_minor(minor);
bad_minor:
module_put(THIS_MODULE);
bad_module_get:
kfree(md);
return NULL;
}
static void unlock_fs(struct mapped_device *md);
static void free_dev(struct mapped_device *md)
{
int minor = MINOR(disk_devt(md->disk));
unlock_fs(md);
bdput(md->bdev);
destroy_workqueue(md->wq);
if (md->io_pool)
mempool_destroy(md->io_pool);
if (md->bs)
bioset_free(md->bs);
blk_integrity_unregister(md->disk);
del_gendisk(md->disk);
free_minor(minor);
spin_lock(&_minor_lock);
md->disk->private_data = NULL;
spin_unlock(&_minor_lock);
put_disk(md->disk);
blk_cleanup_queue(md->queue);
module_put(THIS_MODULE);
kfree(md);
}
static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
{
struct dm_md_mempools *p = dm_table_get_md_mempools(t);
if (md->io_pool && md->bs) {
/* The md already has necessary mempools. */
if (dm_table_get_type(t) == DM_TYPE_BIO_BASED) {
/*
* Reload bioset because front_pad may have changed
* because a different table was loaded.
*/
bioset_free(md->bs);
md->bs = p->bs;
p->bs = NULL;
} else if (dm_table_get_type(t) == DM_TYPE_REQUEST_BASED) {
/*
* There's no need to reload with request-based dm
* because the size of front_pad doesn't change.
* Note for future: If you are to reload bioset,
* prep-ed requests in the queue may refer
* to bio from the old bioset, so you must walk
* through the queue to unprep.
*/
}
goto out;
}
BUG_ON(!p || md->io_pool || md->bs);
md->io_pool = p->io_pool;
p->io_pool = NULL;
md->bs = p->bs;
p->bs = NULL;
out:
/* mempool bind completed, now no need any mempools in the table */
dm_table_free_md_mempools(t);
}
/*
* Bind a table to the device.
*/
static void event_callback(void *context)
{
unsigned long flags;
LIST_HEAD(uevents);
struct mapped_device *md = (struct mapped_device *) context;
spin_lock_irqsave(&md->uevent_lock, flags);
list_splice_init(&md->uevent_list, &uevents);
spin_unlock_irqrestore(&md->uevent_lock, flags);
dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
atomic_inc(&md->event_nr);
wake_up(&md->eventq);
}
/*
* Protected by md->suspend_lock obtained by dm_swap_table().
*/
static void __set_size(struct mapped_device *md, sector_t size)
{
set_capacity(md->disk, size);
i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT);
}
/*
* Return 1 if the queue has a compulsory merge_bvec_fn function.
*
* If this function returns 0, then the device is either a non-dm
* device without a merge_bvec_fn, or it is a dm device that is
* able to split any bios it receives that are too big.
*/
int dm_queue_merge_is_compulsory(struct request_queue *q)
{
struct mapped_device *dev_md;
if (!q->merge_bvec_fn)
return 0;
if (q->make_request_fn == dm_request) {
dev_md = q->queuedata;
if (test_bit(DMF_MERGE_IS_OPTIONAL, &dev_md->flags))
return 0;
}
return 1;
}
static int dm_device_merge_is_compulsory(struct dm_target *ti,
struct dm_dev *dev, sector_t start,
sector_t len, void *data)
{
struct block_device *bdev = dev->bdev;
struct request_queue *q = bdev_get_queue(bdev);
return dm_queue_merge_is_compulsory(q);
}
/*
* Return 1 if it is acceptable to ignore merge_bvec_fn based
* on the properties of the underlying devices.
*/
static int dm_table_merge_is_optional(struct dm_table *table)
{
unsigned i = 0;
struct dm_target *ti;
while (i < dm_table_get_num_targets(table)) {
ti = dm_table_get_target(table, i++);
if (ti->type->iterate_devices &&
ti->type->iterate_devices(ti, dm_device_merge_is_compulsory, NULL))
return 0;
}
return 1;
}
/*
* Returns old map, which caller must destroy.
*/
static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
struct queue_limits *limits)
{
struct dm_table *old_map;
struct request_queue *q = md->queue;
sector_t size;
unsigned long flags;
int merge_is_optional;
size = dm_table_get_size(t);
/*
* Wipe any geometry if the size of the table changed.
*/
if (size != get_capacity(md->disk))
memset(&md->geometry, 0, sizeof(md->geometry));
__set_size(md, size);
dm_table_event_callback(t, event_callback, md);
/*
* The queue hasn't been stopped yet, if the old table type wasn't
* for request-based during suspension. So stop it to prevent
* I/O mapping before resume.
* This must be done before setting the queue restrictions,
* because request-based dm may be run just after the setting.
*/
if (dm_table_request_based(t) && !blk_queue_stopped(q))
stop_queue(q);
__bind_mempools(md, t);
merge_is_optional = dm_table_merge_is_optional(t);
write_lock_irqsave(&md->map_lock, flags);
old_map = md->map;
md->map = t;
md->immutable_target_type = dm_table_get_immutable_target_type(t);
dm_table_set_restrictions(t, q, limits);
if (merge_is_optional)
set_bit(DMF_MERGE_IS_OPTIONAL, &md->flags);
else
clear_bit(DMF_MERGE_IS_OPTIONAL, &md->flags);
write_unlock_irqrestore(&md->map_lock, flags);
return old_map;
}
/*
* Returns unbound table for the caller to free.
*/
static struct dm_table *__unbind(struct mapped_device *md)
{
struct dm_table *map = md->map;
unsigned long flags;
if (!map)
return NULL;
dm_table_event_callback(map, NULL, NULL);
write_lock_irqsave(&md->map_lock, flags);
md->map = NULL;
write_unlock_irqrestore(&md->map_lock, flags);
return map;
}
/*
* Constructor for a new device.
*/
int dm_create(int minor, struct mapped_device **result)
{
struct mapped_device *md;
md = alloc_dev(minor);
if (!md)
return -ENXIO;
dm_sysfs_init(md);
*result = md;
return 0;
}
/*
* Functions to manage md->type.
* All are required to hold md->type_lock.
*/
void dm_lock_md_type(struct mapped_device *md)
{
mutex_lock(&md->type_lock);
}
void dm_unlock_md_type(struct mapped_device *md)
{
mutex_unlock(&md->type_lock);
}
void dm_set_md_type(struct mapped_device *md, unsigned type)
{
md->type = type;
}
unsigned dm_get_md_type(struct mapped_device *md)
{
return md->type;
}
struct target_type *dm_get_immutable_target_type(struct mapped_device *md)
{
return md->immutable_target_type;
}
/*
* The queue_limits are only valid as long as you have a reference
* count on 'md'.
*/
struct queue_limits *dm_get_queue_limits(struct mapped_device *md)
{
BUG_ON(!atomic_read(&md->holders));
return &md->queue->limits;
}
EXPORT_SYMBOL_GPL(dm_get_queue_limits);
/*
* Fully initialize a request-based queue (->elevator, ->request_fn, etc).
*/
static int dm_init_request_based_queue(struct mapped_device *md)
{
struct request_queue *q = NULL;
if (md->queue->elevator)
return 1;
/* Fully initialize the queue */
q = blk_init_allocated_queue(md->queue, dm_request_fn, NULL);
if (!q)
return 0;
md->queue = q;
dm_init_md_queue(md);
blk_queue_softirq_done(md->queue, dm_softirq_done);
blk_queue_prep_rq(md->queue, dm_prep_fn);
blk_queue_lld_busy(md->queue, dm_lld_busy);
elv_register_queue(md->queue);
return 1;
}
/*
* Setup the DM device's queue based on md's type
*/
int dm_setup_md_queue(struct mapped_device *md)
{
if ((dm_get_md_type(md) == DM_TYPE_REQUEST_BASED) &&
!dm_init_request_based_queue(md)) {
DMWARN("Cannot initialize queue for request-based mapped device");
return -EINVAL;
}
return 0;
}
static struct mapped_device *dm_find_md(dev_t dev)
{
struct mapped_device *md;
unsigned minor = MINOR(dev);
if (MAJOR(dev) != _major || minor >= (1 << MINORBITS))
return NULL;
spin_lock(&_minor_lock);
md = idr_find(&_minor_idr, minor);
if (md && (md == MINOR_ALLOCED ||
(MINOR(disk_devt(dm_disk(md))) != minor) ||
dm_deleting_md(md) ||
test_bit(DMF_FREEING, &md->flags))) {
md = NULL;
goto out;
}
out:
spin_unlock(&_minor_lock);
return md;
}
struct mapped_device *dm_get_md(dev_t dev)
{
struct mapped_device *md = dm_find_md(dev);
if (md)
dm_get(md);
return md;
}
EXPORT_SYMBOL_GPL(dm_get_md);
void *dm_get_mdptr(struct mapped_device *md)
{
return md->interface_ptr;
}
void dm_set_mdptr(struct mapped_device *md, void *ptr)
{
md->interface_ptr = ptr;
}
void dm_get(struct mapped_device *md)
{
atomic_inc(&md->holders);
BUG_ON(test_bit(DMF_FREEING, &md->flags));
}
const char *dm_device_name(struct mapped_device *md)
{
return md->name;
}
EXPORT_SYMBOL_GPL(dm_device_name);
static void __dm_destroy(struct mapped_device *md, bool wait)
{
struct dm_table *map;
might_sleep();
spin_lock(&_minor_lock);
map = dm_get_live_table(md);
idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md))));
set_bit(DMF_FREEING, &md->flags);
spin_unlock(&_minor_lock);
if (!dm_suspended_md(md)) {
dm_table_presuspend_targets(map);
dm_table_postsuspend_targets(map);
}
/*
* Rare, but there may be I/O requests still going to complete,
* for example. Wait for all references to disappear.
* No one should increment the reference count of the mapped_device,
* after the mapped_device state becomes DMF_FREEING.
*/
if (wait)
while (atomic_read(&md->holders))
msleep(1);
else if (atomic_read(&md->holders))
DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)",
dm_device_name(md), atomic_read(&md->holders));
dm_sysfs_exit(md);
dm_table_put(map);
dm_table_destroy(__unbind(md));
free_dev(md);
}
void dm_destroy(struct mapped_device *md)
{
__dm_destroy(md, true);
}
void dm_destroy_immediate(struct mapped_device *md)
{
__dm_destroy(md, false);
}
void dm_put(struct mapped_device *md)
{
atomic_dec(&md->holders);
}
EXPORT_SYMBOL_GPL(dm_put);
static int dm_wait_for_completion(struct mapped_device *md, int interruptible)
{
int r = 0;
DECLARE_WAITQUEUE(wait, current);
add_wait_queue(&md->wait, &wait);
while (1) {
set_current_state(interruptible);
if (!md_in_flight(md))
break;
if (interruptible == TASK_INTERRUPTIBLE &&
signal_pending(current)) {
r = -EINTR;
break;
}
io_schedule();
}
set_current_state(TASK_RUNNING);
remove_wait_queue(&md->wait, &wait);
return r;
}
/*
* Process the deferred bios
*/
static void dm_wq_work(struct work_struct *work)
{
struct mapped_device *md = container_of(work, struct mapped_device,
work);
struct bio *c;
down_read(&md->io_lock);
while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
spin_lock_irq(&md->deferred_lock);
c = bio_list_pop(&md->deferred);
spin_unlock_irq(&md->deferred_lock);
if (!c)
break;
up_read(&md->io_lock);
if (dm_request_based(md))
generic_make_request(c);
else
__split_and_process_bio(md, c);
down_read(&md->io_lock);
}
up_read(&md->io_lock);
}
static void dm_queue_flush(struct mapped_device *md)
{
clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
smp_mb__after_atomic();
queue_work(md->wq, &md->work);
}
/*
* Swap in a new table, returning the old one for the caller to destroy.
*/
struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table)
{
struct dm_table *live_map = NULL, *map = ERR_PTR(-EINVAL);
struct queue_limits limits;
int r;
mutex_lock(&md->suspend_lock);
/* device must be suspended */
if (!dm_suspended_md(md))
goto out;
/*
* If the new table has no data devices, retain the existing limits.
* This helps multipath with queue_if_no_path if all paths disappear,
* then new I/O is queued based on these limits, and then some paths
* reappear.
*/
if (dm_table_has_no_data_devices(table)) {
live_map = dm_get_live_table(md);
if (live_map)
limits = md->queue->limits;
dm_table_put(live_map);
}
if (!live_map) {
r = dm_calculate_queue_limits(table, &limits);
if (r) {
map = ERR_PTR(r);
goto out;
}
}
map = __bind(md, table, &limits);
out:
mutex_unlock(&md->suspend_lock);
return map;
}
/*
* Functions to lock and unlock any filesystem running on the
* device.
*/
static int lock_fs(struct mapped_device *md)
{
int r;
WARN_ON(md->frozen_sb);
md->frozen_sb = freeze_bdev(md->bdev);
if (IS_ERR(md->frozen_sb)) {
r = PTR_ERR(md->frozen_sb);
md->frozen_sb = NULL;
return r;
}
set_bit(DMF_FROZEN, &md->flags);
return 0;
}
static void unlock_fs(struct mapped_device *md)
{
if (!test_bit(DMF_FROZEN, &md->flags))
return;
thaw_bdev(md->bdev, md->frozen_sb);
md->frozen_sb = NULL;
clear_bit(DMF_FROZEN, &md->flags);
}
/*
* We need to be able to change a mapping table under a mounted
* filesystem. For example we might want to move some data in
* the background. Before the table can be swapped with
* dm_bind_table, dm_suspend must be called to flush any in
* flight bios and ensure that any further io gets deferred.
*/
/*
* Suspend mechanism in request-based dm.
*
* 1. Flush all I/Os by lock_fs() if needed.
* 2. Stop dispatching any I/O by stopping the request_queue.
* 3. Wait for all in-flight I/Os to be completed or requeued.
*
* To abort suspend, start the request_queue.
*/
int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
{
struct dm_table *map = NULL;
int r = 0;
int do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG ? 1 : 0;
int noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG ? 1 : 0;
mutex_lock(&md->suspend_lock);
if (dm_suspended_md(md)) {
r = -EINVAL;
goto out_unlock;
}
map = dm_get_live_table(md);
/*
* DMF_NOFLUSH_SUSPENDING must be set before presuspend.
* This flag is cleared before dm_suspend returns.
*/
if (noflush)
set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
/* This does not get reverted if there's an error later. */
dm_table_presuspend_targets(map);
/*
* Flush I/O to the device.
* Any I/O submitted after lock_fs() may not be flushed.
* noflush takes precedence over do_lockfs.
* (lock_fs() flushes I/Os and waits for them to complete.)
*/
if (!noflush && do_lockfs) {
r = lock_fs(md);
if (r)
goto out;
}
/*
* Here we must make sure that no processes are submitting requests
* to target drivers i.e. no one may be executing
* __split_and_process_bio. This is called from dm_request and
* dm_wq_work.
*
* To get all processes out of __split_and_process_bio in dm_request,
* we take the write lock. To prevent any process from reentering
* __split_and_process_bio from dm_request and quiesce the thread
* (dm_wq_work), we set BMF_BLOCK_IO_FOR_SUSPEND and call
* flush_workqueue(md->wq).
*/
down_write(&md->io_lock);
set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
up_write(&md->io_lock);
/*
* Stop md->queue before flushing md->wq in case request-based
* dm defers requests to md->wq from md->queue.
*/
if (dm_request_based(md))
stop_queue(md->queue);
flush_workqueue(md->wq);
/*
* At this point no more requests are entering target request routines.
* We call dm_wait_for_completion to wait for all existing requests
* to finish.
*/
r = dm_wait_for_completion(md, TASK_INTERRUPTIBLE);
down_write(&md->io_lock);
if (noflush)
clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
up_write(&md->io_lock);
/* were we interrupted ? */
if (r < 0) {
dm_queue_flush(md);
if (dm_request_based(md))
start_queue(md->queue);
unlock_fs(md);
goto out; /* pushback list is already flushed, so skip flush */
}
/*
* If dm_wait_for_completion returned 0, the device is completely
* quiescent now. There is no request-processing activity. All new
* requests are being added to md->deferred list.
*/
set_bit(DMF_SUSPENDED, &md->flags);
dm_table_postsuspend_targets(map);
out:
dm_table_put(map);
out_unlock:
mutex_unlock(&md->suspend_lock);
return r;
}
int dm_resume(struct mapped_device *md)
{
int r = -EINVAL;
struct dm_table *map = NULL;
mutex_lock(&md->suspend_lock);
if (!dm_suspended_md(md))
goto out;
map = dm_get_live_table(md);
if (!map || !dm_table_get_size(map))
goto out;
r = dm_table_resume_targets(map);
if (r)
goto out;
dm_queue_flush(md);
/*
* Flushing deferred I/Os must be done after targets are resumed
* so that mapping of targets can work correctly.
* Request-based dm is queueing the deferred I/Os in its request_queue.
*/
if (dm_request_based(md))
start_queue(md->queue);
unlock_fs(md);
clear_bit(DMF_SUSPENDED, &md->flags);
r = 0;
out:
dm_table_put(map);
mutex_unlock(&md->suspend_lock);
return r;
}
/*-----------------------------------------------------------------
* Event notification.
*---------------------------------------------------------------*/
int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
unsigned cookie)
{
char udev_cookie[DM_COOKIE_LENGTH];
char *envp[] = { udev_cookie, NULL };
if (!cookie)
return kobject_uevent(&disk_to_dev(md->disk)->kobj, action);
else {
snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u",
DM_COOKIE_ENV_VAR_NAME, cookie);
return kobject_uevent_env(&disk_to_dev(md->disk)->kobj,
action, envp);
}
}
uint32_t dm_next_uevent_seq(struct mapped_device *md)
{
return atomic_add_return(1, &md->uevent_seq);
}
uint32_t dm_get_event_nr(struct mapped_device *md)
{
return atomic_read(&md->event_nr);
}
int dm_wait_event(struct mapped_device *md, int event_nr)
{
return wait_event_interruptible(md->eventq,
(event_nr != atomic_read(&md->event_nr)));
}
void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
{
unsigned long flags;
spin_lock_irqsave(&md->uevent_lock, flags);
list_add(elist, &md->uevent_list);
spin_unlock_irqrestore(&md->uevent_lock, flags);
}
/*
* The gendisk is only valid as long as you have a reference
* count on 'md'.
*/
struct gendisk *dm_disk(struct mapped_device *md)
{
return md->disk;
}
struct kobject *dm_kobject(struct mapped_device *md)
{
return &md->kobj_holder.kobj;
}
struct mapped_device *dm_get_from_kobject(struct kobject *kobj)
{
struct mapped_device *md;
md = container_of(kobj, struct mapped_device, kobj_holder.kobj);
if (test_bit(DMF_FREEING, &md->flags) ||
dm_deleting_md(md))
return NULL;
dm_get(md);
return md;
}
int dm_suspended_md(struct mapped_device *md)
{
return test_bit(DMF_SUSPENDED, &md->flags);
}
int dm_suspended(struct dm_target *ti)
{
return dm_suspended_md(dm_table_get_md(ti->table));
}
EXPORT_SYMBOL_GPL(dm_suspended);
int dm_noflush_suspending(struct dm_target *ti)
{
return __noflush_suspending(dm_table_get_md(ti->table));
}
EXPORT_SYMBOL_GPL(dm_noflush_suspending);
struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity, unsigned per_bio_data_size)
{
struct dm_md_mempools *pools = kzalloc(sizeof(*pools), GFP_KERNEL);
struct kmem_cache *cachep;
unsigned int pool_size;
unsigned int front_pad;
if (!pools)
return NULL;
if (type == DM_TYPE_BIO_BASED) {
cachep = _io_cache;
pool_size = 16;
front_pad = roundup(per_bio_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone);
} else if (type == DM_TYPE_REQUEST_BASED) {
cachep = _rq_tio_cache;
pool_size = MIN_IOS;
front_pad = offsetof(struct dm_rq_clone_bio_info, clone);
/* per_bio_data_size is not used. See __bind_mempools(). */
WARN_ON(per_bio_data_size != 0);
} else
goto out;
pools->io_pool = mempool_create_slab_pool(MIN_IOS, cachep);
if (!pools->io_pool)
goto out;
pools->bs = bioset_create(pool_size, front_pad);
if (!pools->bs)
goto out;
if (integrity && bioset_integrity_create(pools->bs, pool_size))
goto out;
return pools;
out:
dm_free_md_mempools(pools);
return NULL;
}
void dm_free_md_mempools(struct dm_md_mempools *pools)
{
if (!pools)
return;
if (pools->io_pool)
mempool_destroy(pools->io_pool);
if (pools->bs)
bioset_free(pools->bs);
kfree(pools);
}
static const struct block_device_operations dm_blk_dops = {
.open = dm_blk_open,
.release = dm_blk_close,
.ioctl = dm_blk_ioctl,
.getgeo = dm_blk_getgeo,
.owner = THIS_MODULE
};
EXPORT_SYMBOL(dm_get_mapinfo);
/*
* module hooks
*/
module_init(dm_init);
module_exit(dm_exit);
module_param(major, uint, 0);
MODULE_PARM_DESC(major, "The major number of the device mapper");
MODULE_DESCRIPTION(DM_NAME " driver");
MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
MODULE_LICENSE("GPL");
| gpl-2.0 |
quadcores/Full_Dedup_Liu | net/core/net_namespace.c | 684 | 23264 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/workqueue.h>
#include <linux/rtnetlink.h>
#include <linux/cache.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/delay.h>
#include <linux/sched.h>
#include <linux/idr.h>
#include <linux/rculist.h>
#include <linux/nsproxy.h>
#include <linux/fs.h>
#include <linux/proc_ns.h>
#include <linux/file.h>
#include <linux/export.h>
#include <linux/user_namespace.h>
#include <linux/net_namespace.h>
#include <net/sock.h>
#include <net/netlink.h>
#include <net/net_namespace.h>
#include <net/netns/generic.h>
/*
* Our network namespace constructor/destructor lists
*/
static LIST_HEAD(pernet_list);
static struct list_head *first_device = &pernet_list;
DEFINE_MUTEX(net_mutex);
LIST_HEAD(net_namespace_list);
EXPORT_SYMBOL_GPL(net_namespace_list);
struct net init_net = {
.dev_base_head = LIST_HEAD_INIT(init_net.dev_base_head),
};
EXPORT_SYMBOL(init_net);
#define INITIAL_NET_GEN_PTRS 13 /* +1 for len +2 for rcu_head */
static unsigned int max_gen_ptrs = INITIAL_NET_GEN_PTRS;
static struct net_generic *net_alloc_generic(void)
{
struct net_generic *ng;
size_t generic_size = offsetof(struct net_generic, ptr[max_gen_ptrs]);
ng = kzalloc(generic_size, GFP_KERNEL);
if (ng)
ng->len = max_gen_ptrs;
return ng;
}
static int net_assign_generic(struct net *net, int id, void *data)
{
struct net_generic *ng, *old_ng;
BUG_ON(!mutex_is_locked(&net_mutex));
BUG_ON(id == 0);
old_ng = rcu_dereference_protected(net->gen,
lockdep_is_held(&net_mutex));
ng = old_ng;
if (old_ng->len >= id)
goto assign;
ng = net_alloc_generic();
if (ng == NULL)
return -ENOMEM;
/*
* Some synchronisation notes:
*
* The net_generic explores the net->gen array inside rcu
* read section. Besides once set the net->gen->ptr[x]
* pointer never changes (see rules in netns/generic.h).
*
* That said, we simply duplicate this array and schedule
* the old copy for kfree after a grace period.
*/
memcpy(&ng->ptr, &old_ng->ptr, old_ng->len * sizeof(void*));
rcu_assign_pointer(net->gen, ng);
kfree_rcu(old_ng, rcu);
assign:
ng->ptr[id - 1] = data;
return 0;
}
static int ops_init(const struct pernet_operations *ops, struct net *net)
{
int err = -ENOMEM;
void *data = NULL;
if (ops->id && ops->size) {
data = kzalloc(ops->size, GFP_KERNEL);
if (!data)
goto out;
err = net_assign_generic(net, *ops->id, data);
if (err)
goto cleanup;
}
err = 0;
if (ops->init)
err = ops->init(net);
if (!err)
return 0;
cleanup:
kfree(data);
out:
return err;
}
static void ops_free(const struct pernet_operations *ops, struct net *net)
{
if (ops->id && ops->size) {
int id = *ops->id;
kfree(net_generic(net, id));
}
}
static void ops_exit_list(const struct pernet_operations *ops,
struct list_head *net_exit_list)
{
struct net *net;
if (ops->exit) {
list_for_each_entry(net, net_exit_list, exit_list)
ops->exit(net);
}
if (ops->exit_batch)
ops->exit_batch(net_exit_list);
}
static void ops_free_list(const struct pernet_operations *ops,
struct list_head *net_exit_list)
{
struct net *net;
if (ops->size && ops->id) {
list_for_each_entry(net, net_exit_list, exit_list)
ops_free(ops, net);
}
}
/* should be called with nsid_lock held */
static int alloc_netid(struct net *net, struct net *peer, int reqid)
{
int min = 0, max = 0;
if (reqid >= 0) {
min = reqid;
max = reqid + 1;
}
return idr_alloc(&net->netns_ids, peer, min, max, GFP_ATOMIC);
}
/* This function is used by idr_for_each(). If net is equal to peer, the
* function returns the id so that idr_for_each() stops. Because we cannot
* returns the id 0 (idr_for_each() will not stop), we return the magic value
* NET_ID_ZERO (-1) for it.
*/
#define NET_ID_ZERO -1
static int net_eq_idr(int id, void *net, void *peer)
{
if (net_eq(net, peer))
return id ? : NET_ID_ZERO;
return 0;
}
/* Should be called with nsid_lock held. If a new id is assigned, the bool alloc
* is set to true, thus the caller knows that the new id must be notified via
* rtnl.
*/
static int __peernet2id_alloc(struct net *net, struct net *peer, bool *alloc)
{
int id = idr_for_each(&net->netns_ids, net_eq_idr, peer);
bool alloc_it = *alloc;
*alloc = false;
/* Magic value for id 0. */
if (id == NET_ID_ZERO)
return 0;
if (id > 0)
return id;
if (alloc_it) {
id = alloc_netid(net, peer, -1);
*alloc = true;
return id >= 0 ? id : NETNSA_NSID_NOT_ASSIGNED;
}
return NETNSA_NSID_NOT_ASSIGNED;
}
/* should be called with nsid_lock held */
static int __peernet2id(struct net *net, struct net *peer)
{
bool no = false;
return __peernet2id_alloc(net, peer, &no);
}
static void rtnl_net_notifyid(struct net *net, int cmd, int id);
/* This function returns the id of a peer netns. If no id is assigned, one will
* be allocated and returned.
*/
int peernet2id_alloc(struct net *net, struct net *peer)
{
unsigned long flags;
bool alloc;
int id;
spin_lock_irqsave(&net->nsid_lock, flags);
alloc = atomic_read(&peer->count) == 0 ? false : true;
id = __peernet2id_alloc(net, peer, &alloc);
spin_unlock_irqrestore(&net->nsid_lock, flags);
if (alloc && id >= 0)
rtnl_net_notifyid(net, RTM_NEWNSID, id);
return id;
}
EXPORT_SYMBOL(peernet2id_alloc);
/* This function returns, if assigned, the id of a peer netns. */
int peernet2id(struct net *net, struct net *peer)
{
unsigned long flags;
int id;
spin_lock_irqsave(&net->nsid_lock, flags);
id = __peernet2id(net, peer);
spin_unlock_irqrestore(&net->nsid_lock, flags);
return id;
}
/* This function returns true is the peer netns has an id assigned into the
* current netns.
*/
bool peernet_has_id(struct net *net, struct net *peer)
{
return peernet2id(net, peer) >= 0;
}
struct net *get_net_ns_by_id(struct net *net, int id)
{
unsigned long flags;
struct net *peer;
if (id < 0)
return NULL;
rcu_read_lock();
spin_lock_irqsave(&net->nsid_lock, flags);
peer = idr_find(&net->netns_ids, id);
if (peer)
get_net(peer);
spin_unlock_irqrestore(&net->nsid_lock, flags);
rcu_read_unlock();
return peer;
}
/*
* setup_net runs the initializers for the network namespace object.
*/
static __net_init int setup_net(struct net *net, struct user_namespace *user_ns)
{
/* Must be called with net_mutex held */
const struct pernet_operations *ops, *saved_ops;
int error = 0;
LIST_HEAD(net_exit_list);
atomic_set(&net->count, 1);
atomic_set(&net->passive, 1);
net->dev_base_seq = 1;
net->user_ns = user_ns;
idr_init(&net->netns_ids);
spin_lock_init(&net->nsid_lock);
list_for_each_entry(ops, &pernet_list, list) {
error = ops_init(ops, net);
if (error < 0)
goto out_undo;
}
out:
return error;
out_undo:
/* Walk through the list backwards calling the exit functions
* for the pernet modules whose init functions did not fail.
*/
list_add(&net->exit_list, &net_exit_list);
saved_ops = ops;
list_for_each_entry_continue_reverse(ops, &pernet_list, list)
ops_exit_list(ops, &net_exit_list);
ops = saved_ops;
list_for_each_entry_continue_reverse(ops, &pernet_list, list)
ops_free_list(ops, &net_exit_list);
rcu_barrier();
goto out;
}
#ifdef CONFIG_NET_NS
static struct kmem_cache *net_cachep;
static struct workqueue_struct *netns_wq;
static struct net *net_alloc(void)
{
struct net *net = NULL;
struct net_generic *ng;
ng = net_alloc_generic();
if (!ng)
goto out;
net = kmem_cache_zalloc(net_cachep, GFP_KERNEL);
if (!net)
goto out_free;
rcu_assign_pointer(net->gen, ng);
out:
return net;
out_free:
kfree(ng);
goto out;
}
static void net_free(struct net *net)
{
kfree(rcu_access_pointer(net->gen));
kmem_cache_free(net_cachep, net);
}
void net_drop_ns(void *p)
{
struct net *ns = p;
if (ns && atomic_dec_and_test(&ns->passive))
net_free(ns);
}
struct net *copy_net_ns(unsigned long flags,
struct user_namespace *user_ns, struct net *old_net)
{
struct net *net;
int rv;
if (!(flags & CLONE_NEWNET))
return get_net(old_net);
net = net_alloc();
if (!net)
return ERR_PTR(-ENOMEM);
get_user_ns(user_ns);
mutex_lock(&net_mutex);
rv = setup_net(net, user_ns);
if (rv == 0) {
rtnl_lock();
list_add_tail_rcu(&net->list, &net_namespace_list);
rtnl_unlock();
}
mutex_unlock(&net_mutex);
if (rv < 0) {
put_user_ns(user_ns);
net_drop_ns(net);
return ERR_PTR(rv);
}
return net;
}
static DEFINE_SPINLOCK(cleanup_list_lock);
static LIST_HEAD(cleanup_list); /* Must hold cleanup_list_lock to touch */
static void cleanup_net(struct work_struct *work)
{
const struct pernet_operations *ops;
struct net *net, *tmp;
struct list_head net_kill_list;
LIST_HEAD(net_exit_list);
/* Atomically snapshot the list of namespaces to cleanup */
spin_lock_irq(&cleanup_list_lock);
list_replace_init(&cleanup_list, &net_kill_list);
spin_unlock_irq(&cleanup_list_lock);
mutex_lock(&net_mutex);
/* Don't let anyone else find us. */
rtnl_lock();
list_for_each_entry(net, &net_kill_list, cleanup_list) {
list_del_rcu(&net->list);
list_add_tail(&net->exit_list, &net_exit_list);
for_each_net(tmp) {
int id;
spin_lock_irq(&tmp->nsid_lock);
id = __peernet2id(tmp, net);
if (id >= 0)
idr_remove(&tmp->netns_ids, id);
spin_unlock_irq(&tmp->nsid_lock);
if (id >= 0)
rtnl_net_notifyid(tmp, RTM_DELNSID, id);
}
spin_lock_irq(&net->nsid_lock);
idr_destroy(&net->netns_ids);
spin_unlock_irq(&net->nsid_lock);
}
rtnl_unlock();
/*
* Another CPU might be rcu-iterating the list, wait for it.
* This needs to be before calling the exit() notifiers, so
* the rcu_barrier() below isn't sufficient alone.
*/
synchronize_rcu();
/* Run all of the network namespace exit methods */
list_for_each_entry_reverse(ops, &pernet_list, list)
ops_exit_list(ops, &net_exit_list);
/* Free the net generic variables */
list_for_each_entry_reverse(ops, &pernet_list, list)
ops_free_list(ops, &net_exit_list);
mutex_unlock(&net_mutex);
/* Ensure there are no outstanding rcu callbacks using this
* network namespace.
*/
rcu_barrier();
/* Finally it is safe to free my network namespace structure */
list_for_each_entry_safe(net, tmp, &net_exit_list, exit_list) {
list_del_init(&net->exit_list);
put_user_ns(net->user_ns);
net_drop_ns(net);
}
}
static DECLARE_WORK(net_cleanup_work, cleanup_net);
void __put_net(struct net *net)
{
/* Cleanup the network namespace in process context */
unsigned long flags;
spin_lock_irqsave(&cleanup_list_lock, flags);
list_add(&net->cleanup_list, &cleanup_list);
spin_unlock_irqrestore(&cleanup_list_lock, flags);
queue_work(netns_wq, &net_cleanup_work);
}
EXPORT_SYMBOL_GPL(__put_net);
struct net *get_net_ns_by_fd(int fd)
{
struct file *file;
struct ns_common *ns;
struct net *net;
file = proc_ns_fget(fd);
if (IS_ERR(file))
return ERR_CAST(file);
ns = get_proc_ns(file_inode(file));
if (ns->ops == &netns_operations)
net = get_net(container_of(ns, struct net, ns));
else
net = ERR_PTR(-EINVAL);
fput(file);
return net;
}
#else
struct net *get_net_ns_by_fd(int fd)
{
return ERR_PTR(-EINVAL);
}
#endif
EXPORT_SYMBOL_GPL(get_net_ns_by_fd);
struct net *get_net_ns_by_pid(pid_t pid)
{
struct task_struct *tsk;
struct net *net;
/* Lookup the network namespace */
net = ERR_PTR(-ESRCH);
rcu_read_lock();
tsk = find_task_by_vpid(pid);
if (tsk) {
struct nsproxy *nsproxy;
task_lock(tsk);
nsproxy = tsk->nsproxy;
if (nsproxy)
net = get_net(nsproxy->net_ns);
task_unlock(tsk);
}
rcu_read_unlock();
return net;
}
EXPORT_SYMBOL_GPL(get_net_ns_by_pid);
static __net_init int net_ns_net_init(struct net *net)
{
#ifdef CONFIG_NET_NS
net->ns.ops = &netns_operations;
#endif
return ns_alloc_inum(&net->ns);
}
static __net_exit void net_ns_net_exit(struct net *net)
{
ns_free_inum(&net->ns);
}
static struct pernet_operations __net_initdata net_ns_ops = {
.init = net_ns_net_init,
.exit = net_ns_net_exit,
};
static struct nla_policy rtnl_net_policy[NETNSA_MAX + 1] = {
[NETNSA_NONE] = { .type = NLA_UNSPEC },
[NETNSA_NSID] = { .type = NLA_S32 },
[NETNSA_PID] = { .type = NLA_U32 },
[NETNSA_FD] = { .type = NLA_U32 },
};
static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh)
{
struct net *net = sock_net(skb->sk);
struct nlattr *tb[NETNSA_MAX + 1];
unsigned long flags;
struct net *peer;
int nsid, err;
err = nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX,
rtnl_net_policy);
if (err < 0)
return err;
if (!tb[NETNSA_NSID])
return -EINVAL;
nsid = nla_get_s32(tb[NETNSA_NSID]);
if (tb[NETNSA_PID])
peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID]));
else if (tb[NETNSA_FD])
peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD]));
else
return -EINVAL;
if (IS_ERR(peer))
return PTR_ERR(peer);
spin_lock_irqsave(&net->nsid_lock, flags);
if (__peernet2id(net, peer) >= 0) {
spin_unlock_irqrestore(&net->nsid_lock, flags);
err = -EEXIST;
goto out;
}
err = alloc_netid(net, peer, nsid);
spin_unlock_irqrestore(&net->nsid_lock, flags);
if (err >= 0) {
rtnl_net_notifyid(net, RTM_NEWNSID, err);
err = 0;
}
out:
put_net(peer);
return err;
}
static int rtnl_net_get_size(void)
{
return NLMSG_ALIGN(sizeof(struct rtgenmsg))
+ nla_total_size(sizeof(s32)) /* NETNSA_NSID */
;
}
static int rtnl_net_fill(struct sk_buff *skb, u32 portid, u32 seq, int flags,
int cmd, struct net *net, int nsid)
{
struct nlmsghdr *nlh;
struct rtgenmsg *rth;
nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rth), flags);
if (!nlh)
return -EMSGSIZE;
rth = nlmsg_data(nlh);
rth->rtgen_family = AF_UNSPEC;
if (nla_put_s32(skb, NETNSA_NSID, nsid))
goto nla_put_failure;
nlmsg_end(skb, nlh);
return 0;
nla_put_failure:
nlmsg_cancel(skb, nlh);
return -EMSGSIZE;
}
static int rtnl_net_getid(struct sk_buff *skb, struct nlmsghdr *nlh)
{
struct net *net = sock_net(skb->sk);
struct nlattr *tb[NETNSA_MAX + 1];
struct sk_buff *msg;
struct net *peer;
int err, id;
err = nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX,
rtnl_net_policy);
if (err < 0)
return err;
if (tb[NETNSA_PID])
peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID]));
else if (tb[NETNSA_FD])
peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD]));
else
return -EINVAL;
if (IS_ERR(peer))
return PTR_ERR(peer);
msg = nlmsg_new(rtnl_net_get_size(), GFP_KERNEL);
if (!msg) {
err = -ENOMEM;
goto out;
}
id = peernet2id(net, peer);
err = rtnl_net_fill(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0,
RTM_NEWNSID, net, id);
if (err < 0)
goto err_out;
err = rtnl_unicast(msg, net, NETLINK_CB(skb).portid);
goto out;
err_out:
nlmsg_free(msg);
out:
put_net(peer);
return err;
}
struct rtnl_net_dump_cb {
struct net *net;
struct sk_buff *skb;
struct netlink_callback *cb;
int idx;
int s_idx;
};
static int rtnl_net_dumpid_one(int id, void *peer, void *data)
{
struct rtnl_net_dump_cb *net_cb = (struct rtnl_net_dump_cb *)data;
int ret;
if (net_cb->idx < net_cb->s_idx)
goto cont;
ret = rtnl_net_fill(net_cb->skb, NETLINK_CB(net_cb->cb->skb).portid,
net_cb->cb->nlh->nlmsg_seq, NLM_F_MULTI,
RTM_NEWNSID, net_cb->net, id);
if (ret < 0)
return ret;
cont:
net_cb->idx++;
return 0;
}
static int rtnl_net_dumpid(struct sk_buff *skb, struct netlink_callback *cb)
{
struct net *net = sock_net(skb->sk);
struct rtnl_net_dump_cb net_cb = {
.net = net,
.skb = skb,
.cb = cb,
.idx = 0,
.s_idx = cb->args[0],
};
unsigned long flags;
spin_lock_irqsave(&net->nsid_lock, flags);
idr_for_each(&net->netns_ids, rtnl_net_dumpid_one, &net_cb);
spin_unlock_irqrestore(&net->nsid_lock, flags);
cb->args[0] = net_cb.idx;
return skb->len;
}
static void rtnl_net_notifyid(struct net *net, int cmd, int id)
{
struct sk_buff *msg;
int err = -ENOMEM;
msg = nlmsg_new(rtnl_net_get_size(), GFP_KERNEL);
if (!msg)
goto out;
err = rtnl_net_fill(msg, 0, 0, 0, cmd, net, id);
if (err < 0)
goto err_out;
rtnl_notify(msg, net, 0, RTNLGRP_NSID, NULL, 0);
return;
err_out:
nlmsg_free(msg);
out:
rtnl_set_sk_err(net, RTNLGRP_NSID, err);
}
static int __init net_ns_init(void)
{
struct net_generic *ng;
#ifdef CONFIG_NET_NS
net_cachep = kmem_cache_create("net_namespace", sizeof(struct net),
SMP_CACHE_BYTES,
SLAB_PANIC, NULL);
/* Create workqueue for cleanup */
netns_wq = create_singlethread_workqueue("netns");
if (!netns_wq)
panic("Could not create netns workq");
#endif
ng = net_alloc_generic();
if (!ng)
panic("Could not allocate generic netns");
rcu_assign_pointer(init_net.gen, ng);
mutex_lock(&net_mutex);
if (setup_net(&init_net, &init_user_ns))
panic("Could not setup the initial network namespace");
rtnl_lock();
list_add_tail_rcu(&init_net.list, &net_namespace_list);
rtnl_unlock();
mutex_unlock(&net_mutex);
register_pernet_subsys(&net_ns_ops);
rtnl_register(PF_UNSPEC, RTM_NEWNSID, rtnl_net_newid, NULL, NULL);
rtnl_register(PF_UNSPEC, RTM_GETNSID, rtnl_net_getid, rtnl_net_dumpid,
NULL);
return 0;
}
pure_initcall(net_ns_init);
#ifdef CONFIG_NET_NS
static int __register_pernet_operations(struct list_head *list,
struct pernet_operations *ops)
{
struct net *net;
int error;
LIST_HEAD(net_exit_list);
list_add_tail(&ops->list, list);
if (ops->init || (ops->id && ops->size)) {
for_each_net(net) {
error = ops_init(ops, net);
if (error)
goto out_undo;
list_add_tail(&net->exit_list, &net_exit_list);
}
}
return 0;
out_undo:
/* If I have an error cleanup all namespaces I initialized */
list_del(&ops->list);
ops_exit_list(ops, &net_exit_list);
ops_free_list(ops, &net_exit_list);
return error;
}
static void __unregister_pernet_operations(struct pernet_operations *ops)
{
struct net *net;
LIST_HEAD(net_exit_list);
list_del(&ops->list);
for_each_net(net)
list_add_tail(&net->exit_list, &net_exit_list);
ops_exit_list(ops, &net_exit_list);
ops_free_list(ops, &net_exit_list);
}
#else
static int __register_pernet_operations(struct list_head *list,
struct pernet_operations *ops)
{
return ops_init(ops, &init_net);
}
static void __unregister_pernet_operations(struct pernet_operations *ops)
{
LIST_HEAD(net_exit_list);
list_add(&init_net.exit_list, &net_exit_list);
ops_exit_list(ops, &net_exit_list);
ops_free_list(ops, &net_exit_list);
}
#endif /* CONFIG_NET_NS */
static DEFINE_IDA(net_generic_ids);
static int register_pernet_operations(struct list_head *list,
struct pernet_operations *ops)
{
int error;
if (ops->id) {
again:
error = ida_get_new_above(&net_generic_ids, 1, ops->id);
if (error < 0) {
if (error == -EAGAIN) {
ida_pre_get(&net_generic_ids, GFP_KERNEL);
goto again;
}
return error;
}
max_gen_ptrs = max_t(unsigned int, max_gen_ptrs, *ops->id);
}
error = __register_pernet_operations(list, ops);
if (error) {
rcu_barrier();
if (ops->id)
ida_remove(&net_generic_ids, *ops->id);
}
return error;
}
static void unregister_pernet_operations(struct pernet_operations *ops)
{
__unregister_pernet_operations(ops);
rcu_barrier();
if (ops->id)
ida_remove(&net_generic_ids, *ops->id);
}
/**
* register_pernet_subsys - register a network namespace subsystem
* @ops: pernet operations structure for the subsystem
*
* Register a subsystem which has init and exit functions
* that are called when network namespaces are created and
* destroyed respectively.
*
* When registered all network namespace init functions are
* called for every existing network namespace. Allowing kernel
* modules to have a race free view of the set of network namespaces.
*
* When a new network namespace is created all of the init
* methods are called in the order in which they were registered.
*
* When a network namespace is destroyed all of the exit methods
* are called in the reverse of the order with which they were
* registered.
*/
int register_pernet_subsys(struct pernet_operations *ops)
{
int error;
mutex_lock(&net_mutex);
error = register_pernet_operations(first_device, ops);
mutex_unlock(&net_mutex);
return error;
}
EXPORT_SYMBOL_GPL(register_pernet_subsys);
/**
* unregister_pernet_subsys - unregister a network namespace subsystem
* @ops: pernet operations structure to manipulate
*
* Remove the pernet operations structure from the list to be
* used when network namespaces are created or destroyed. In
* addition run the exit method for all existing network
* namespaces.
*/
void unregister_pernet_subsys(struct pernet_operations *ops)
{
mutex_lock(&net_mutex);
unregister_pernet_operations(ops);
mutex_unlock(&net_mutex);
}
EXPORT_SYMBOL_GPL(unregister_pernet_subsys);
/**
* register_pernet_device - register a network namespace device
* @ops: pernet operations structure for the subsystem
*
* Register a device which has init and exit functions
* that are called when network namespaces are created and
* destroyed respectively.
*
* When registered all network namespace init functions are
* called for every existing network namespace. Allowing kernel
* modules to have a race free view of the set of network namespaces.
*
* When a new network namespace is created all of the init
* methods are called in the order in which they were registered.
*
* When a network namespace is destroyed all of the exit methods
* are called in the reverse of the order with which they were
* registered.
*/
int register_pernet_device(struct pernet_operations *ops)
{
int error;
mutex_lock(&net_mutex);
error = register_pernet_operations(&pernet_list, ops);
if (!error && (first_device == &pernet_list))
first_device = &ops->list;
mutex_unlock(&net_mutex);
return error;
}
EXPORT_SYMBOL_GPL(register_pernet_device);
/**
* unregister_pernet_device - unregister a network namespace netdevice
* @ops: pernet operations structure to manipulate
*
* Remove the pernet operations structure from the list to be
* used when network namespaces are created or destroyed. In
* addition run the exit method for all existing network
* namespaces.
*/
void unregister_pernet_device(struct pernet_operations *ops)
{
mutex_lock(&net_mutex);
if (&ops->list == first_device)
first_device = first_device->next;
unregister_pernet_operations(ops);
mutex_unlock(&net_mutex);
}
EXPORT_SYMBOL_GPL(unregister_pernet_device);
#ifdef CONFIG_NET_NS
static struct ns_common *netns_get(struct task_struct *task)
{
struct net *net = NULL;
struct nsproxy *nsproxy;
task_lock(task);
nsproxy = task->nsproxy;
if (nsproxy)
net = get_net(nsproxy->net_ns);
task_unlock(task);
return net ? &net->ns : NULL;
}
static inline struct net *to_net_ns(struct ns_common *ns)
{
return container_of(ns, struct net, ns);
}
static void netns_put(struct ns_common *ns)
{
put_net(to_net_ns(ns));
}
static int netns_install(struct nsproxy *nsproxy, struct ns_common *ns)
{
struct net *net = to_net_ns(ns);
if (!ns_capable(net->user_ns, CAP_SYS_ADMIN) ||
!ns_capable(current_user_ns(), CAP_SYS_ADMIN))
return -EPERM;
put_net(nsproxy->net_ns);
nsproxy->net_ns = get_net(net);
return 0;
}
const struct proc_ns_operations netns_operations = {
.name = "net",
.type = CLONE_NEWNET,
.get = netns_get,
.put = netns_put,
.install = netns_install,
};
#endif
| gpl-2.0 |
xiaoleili/linux-mediatek | drivers/video/fbdev/omap2/displays-new/encoder-tpd12s015.c | 940 | 8594 | /*
* TPD12S015 HDMI ESD protection & level shifter chip driver
*
* Copyright (C) 2013 Texas Instruments
* Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*/
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/gpio.h>
#include <linux/platform_device.h>
#include <linux/of_gpio.h>
#include <video/omapdss.h>
#include <video/omap-panel-data.h>
struct panel_drv_data {
struct omap_dss_device dssdev;
struct omap_dss_device *in;
int ct_cp_hpd_gpio;
int ls_oe_gpio;
int hpd_gpio;
struct omap_video_timings timings;
};
#define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev)
static int tpd_connect(struct omap_dss_device *dssdev,
struct omap_dss_device *dst)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
int r;
r = in->ops.hdmi->connect(in, dssdev);
if (r)
return r;
dst->src = dssdev;
dssdev->dst = dst;
gpio_set_value_cansleep(ddata->ct_cp_hpd_gpio, 1);
/* DC-DC converter needs at max 300us to get to 90% of 5V */
udelay(300);
return 0;
}
static void tpd_disconnect(struct omap_dss_device *dssdev,
struct omap_dss_device *dst)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
WARN_ON(dst != dssdev->dst);
if (dst != dssdev->dst)
return;
gpio_set_value_cansleep(ddata->ct_cp_hpd_gpio, 0);
dst->src = NULL;
dssdev->dst = NULL;
in->ops.hdmi->disconnect(in, &ddata->dssdev);
}
static int tpd_enable(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
int r;
if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
return 0;
in->ops.hdmi->set_timings(in, &ddata->timings);
r = in->ops.hdmi->enable(in);
if (r)
return r;
dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
return r;
}
static void tpd_disable(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
return;
in->ops.hdmi->disable(in);
dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
}
static void tpd_set_timings(struct omap_dss_device *dssdev,
struct omap_video_timings *timings)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
ddata->timings = *timings;
dssdev->panel.timings = *timings;
in->ops.hdmi->set_timings(in, timings);
}
static void tpd_get_timings(struct omap_dss_device *dssdev,
struct omap_video_timings *timings)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
*timings = ddata->timings;
}
static int tpd_check_timings(struct omap_dss_device *dssdev,
struct omap_video_timings *timings)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
int r;
r = in->ops.hdmi->check_timings(in, timings);
return r;
}
static int tpd_read_edid(struct omap_dss_device *dssdev,
u8 *edid, int len)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
int r;
if (!gpio_get_value_cansleep(ddata->hpd_gpio))
return -ENODEV;
if (gpio_is_valid(ddata->ls_oe_gpio))
gpio_set_value_cansleep(ddata->ls_oe_gpio, 1);
r = in->ops.hdmi->read_edid(in, edid, len);
if (gpio_is_valid(ddata->ls_oe_gpio))
gpio_set_value_cansleep(ddata->ls_oe_gpio, 0);
return r;
}
static bool tpd_detect(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
return gpio_get_value_cansleep(ddata->hpd_gpio);
}
static int tpd_set_infoframe(struct omap_dss_device *dssdev,
const struct hdmi_avi_infoframe *avi)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
return in->ops.hdmi->set_infoframe(in, avi);
}
static int tpd_set_hdmi_mode(struct omap_dss_device *dssdev,
bool hdmi_mode)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
return in->ops.hdmi->set_hdmi_mode(in, hdmi_mode);
}
static const struct omapdss_hdmi_ops tpd_hdmi_ops = {
.connect = tpd_connect,
.disconnect = tpd_disconnect,
.enable = tpd_enable,
.disable = tpd_disable,
.check_timings = tpd_check_timings,
.set_timings = tpd_set_timings,
.get_timings = tpd_get_timings,
.read_edid = tpd_read_edid,
.detect = tpd_detect,
.set_infoframe = tpd_set_infoframe,
.set_hdmi_mode = tpd_set_hdmi_mode,
};
static int tpd_probe_pdata(struct platform_device *pdev)
{
struct panel_drv_data *ddata = platform_get_drvdata(pdev);
struct encoder_tpd12s015_platform_data *pdata;
struct omap_dss_device *dssdev, *in;
pdata = dev_get_platdata(&pdev->dev);
ddata->ct_cp_hpd_gpio = pdata->ct_cp_hpd_gpio;
ddata->ls_oe_gpio = pdata->ls_oe_gpio;
ddata->hpd_gpio = pdata->hpd_gpio;
in = omap_dss_find_output(pdata->source);
if (in == NULL) {
dev_err(&pdev->dev, "Failed to find video source\n");
return -ENODEV;
}
ddata->in = in;
dssdev = &ddata->dssdev;
dssdev->name = pdata->name;
return 0;
}
static int tpd_probe_of(struct platform_device *pdev)
{
struct panel_drv_data *ddata = platform_get_drvdata(pdev);
struct device_node *node = pdev->dev.of_node;
struct omap_dss_device *in;
int gpio;
/* CT CP HPD GPIO */
gpio = of_get_gpio(node, 0);
if (!gpio_is_valid(gpio)) {
dev_err(&pdev->dev, "failed to parse CT CP HPD gpio\n");
return gpio;
}
ddata->ct_cp_hpd_gpio = gpio;
/* LS OE GPIO */
gpio = of_get_gpio(node, 1);
if (gpio_is_valid(gpio) || gpio == -ENOENT) {
ddata->ls_oe_gpio = gpio;
} else {
dev_err(&pdev->dev, "failed to parse LS OE gpio\n");
return gpio;
}
/* HPD GPIO */
gpio = of_get_gpio(node, 2);
if (!gpio_is_valid(gpio)) {
dev_err(&pdev->dev, "failed to parse HPD gpio\n");
return gpio;
}
ddata->hpd_gpio = gpio;
in = omapdss_of_find_source_for_first_ep(node);
if (IS_ERR(in)) {
dev_err(&pdev->dev, "failed to find video source\n");
return PTR_ERR(in);
}
ddata->in = in;
return 0;
}
static int tpd_probe(struct platform_device *pdev)
{
struct omap_dss_device *in, *dssdev;
struct panel_drv_data *ddata;
int r;
ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
if (!ddata)
return -ENOMEM;
platform_set_drvdata(pdev, ddata);
if (dev_get_platdata(&pdev->dev)) {
r = tpd_probe_pdata(pdev);
if (r)
return r;
} else if (pdev->dev.of_node) {
r = tpd_probe_of(pdev);
if (r)
return r;
} else {
return -ENODEV;
}
r = devm_gpio_request_one(&pdev->dev, ddata->ct_cp_hpd_gpio,
GPIOF_OUT_INIT_LOW, "hdmi_ct_cp_hpd");
if (r)
goto err_gpio;
if (gpio_is_valid(ddata->ls_oe_gpio)) {
r = devm_gpio_request_one(&pdev->dev, ddata->ls_oe_gpio,
GPIOF_OUT_INIT_LOW, "hdmi_ls_oe");
if (r)
goto err_gpio;
}
r = devm_gpio_request_one(&pdev->dev, ddata->hpd_gpio,
GPIOF_DIR_IN, "hdmi_hpd");
if (r)
goto err_gpio;
dssdev = &ddata->dssdev;
dssdev->ops.hdmi = &tpd_hdmi_ops;
dssdev->dev = &pdev->dev;
dssdev->type = OMAP_DISPLAY_TYPE_HDMI;
dssdev->output_type = OMAP_DISPLAY_TYPE_HDMI;
dssdev->owner = THIS_MODULE;
dssdev->port_num = 1;
in = ddata->in;
r = omapdss_register_output(dssdev);
if (r) {
dev_err(&pdev->dev, "Failed to register output\n");
goto err_reg;
}
return 0;
err_reg:
err_gpio:
omap_dss_put_device(ddata->in);
return r;
}
static int __exit tpd_remove(struct platform_device *pdev)
{
struct panel_drv_data *ddata = platform_get_drvdata(pdev);
struct omap_dss_device *dssdev = &ddata->dssdev;
struct omap_dss_device *in = ddata->in;
omapdss_unregister_output(&ddata->dssdev);
WARN_ON(omapdss_device_is_enabled(dssdev));
if (omapdss_device_is_enabled(dssdev))
tpd_disable(dssdev);
WARN_ON(omapdss_device_is_connected(dssdev));
if (omapdss_device_is_connected(dssdev))
tpd_disconnect(dssdev, dssdev->dst);
omap_dss_put_device(in);
return 0;
}
static const struct of_device_id tpd_of_match[] = {
{ .compatible = "omapdss,ti,tpd12s015", },
{},
};
MODULE_DEVICE_TABLE(of, tpd_of_match);
static struct platform_driver tpd_driver = {
.probe = tpd_probe,
.remove = __exit_p(tpd_remove),
.driver = {
.name = "tpd12s015",
.of_match_table = tpd_of_match,
.suppress_bind_attrs = true,
},
};
module_platform_driver(tpd_driver);
MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>");
MODULE_DESCRIPTION("TPD12S015 driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
agat63/AGAT_JB_kernel | drivers/tty/serial/samsung.c | 1708 | 34693 | /*
* Driver core for Samsung SoC onboard UARTs.
*
* Ben Dooks, Copyright (c) 2003-2008 Simtec Electronics
* http://armlinux.simtec.co.uk/
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
/* Hote on 2410 error handling
*
* The s3c2410 manual has a love/hate affair with the contents of the
* UERSTAT register in the UART blocks, and keeps marking some of the
* error bits as reserved. Having checked with the s3c2410x01,
* it copes with BREAKs properly, so I am happy to ignore the RESERVED
* feature from the latter versions of the manual.
*
* If it becomes aparrent that latter versions of the 2410 remove these
* bits, then action will have to be taken to differentiate the versions
* and change the policy on BREAK
*
* BJD, 04-Nov-2004
*/
#if defined(CONFIG_SERIAL_SAMSUNG_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
#define SUPPORT_SYSRQ
#endif
#include <linux/module.h>
#include <linux/ioport.h>
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/init.h>
#include <linux/sysrq.h>
#include <linux/console.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/serial_core.h>
#include <linux/serial.h>
#include <linux/delay.h>
#include <linux/clk.h>
#include <linux/cpufreq.h>
#include <asm/irq.h>
#include <mach/hardware.h>
#include <mach/map.h>
#include <plat/regs-serial.h>
#include "samsung.h"
/* UART name and device definitions */
#define S3C24XX_SERIAL_NAME "ttySAC"
#define S3C24XX_SERIAL_MAJOR 204
#define S3C24XX_SERIAL_MINOR 64
/* macros to change one thing to another */
#define tx_enabled(port) ((port)->unused[0])
#define rx_enabled(port) ((port)->unused[1])
/* flag to ignore all characters coming in */
#define RXSTAT_DUMMY_READ (0x10000000)
static inline struct s3c24xx_uart_port *to_ourport(struct uart_port *port)
{
return container_of(port, struct s3c24xx_uart_port, port);
}
/* translate a port to the device name */
static inline const char *s3c24xx_serial_portname(struct uart_port *port)
{
return to_platform_device(port->dev)->name;
}
static int s3c24xx_serial_txempty_nofifo(struct uart_port *port)
{
return (rd_regl(port, S3C2410_UTRSTAT) & S3C2410_UTRSTAT_TXE);
}
static void s3c24xx_serial_rx_enable(struct uart_port *port)
{
unsigned long flags;
unsigned int ucon, ufcon;
int count = 10000;
spin_lock_irqsave(&port->lock, flags);
while (--count && !s3c24xx_serial_txempty_nofifo(port))
udelay(100);
ufcon = rd_regl(port, S3C2410_UFCON);
ufcon |= S3C2410_UFCON_RESETRX;
wr_regl(port, S3C2410_UFCON, ufcon);
ucon = rd_regl(port, S3C2410_UCON);
ucon |= S3C2410_UCON_RXIRQMODE;
wr_regl(port, S3C2410_UCON, ucon);
rx_enabled(port) = 1;
spin_unlock_irqrestore(&port->lock, flags);
}
static void s3c24xx_serial_rx_disable(struct uart_port *port)
{
unsigned long flags;
unsigned int ucon;
spin_lock_irqsave(&port->lock, flags);
ucon = rd_regl(port, S3C2410_UCON);
ucon &= ~S3C2410_UCON_RXIRQMODE;
wr_regl(port, S3C2410_UCON, ucon);
rx_enabled(port) = 0;
spin_unlock_irqrestore(&port->lock, flags);
}
static void s3c24xx_serial_stop_tx(struct uart_port *port)
{
struct s3c24xx_uart_port *ourport = to_ourport(port);
if (tx_enabled(port)) {
disable_irq_nosync(ourport->tx_irq);
tx_enabled(port) = 0;
if (port->flags & UPF_CONS_FLOW)
s3c24xx_serial_rx_enable(port);
}
}
static void s3c24xx_serial_start_tx(struct uart_port *port)
{
struct s3c24xx_uart_port *ourport = to_ourport(port);
if (!tx_enabled(port)) {
if (port->flags & UPF_CONS_FLOW)
s3c24xx_serial_rx_disable(port);
enable_irq(ourport->tx_irq);
tx_enabled(port) = 1;
}
}
static void s3c24xx_serial_stop_rx(struct uart_port *port)
{
struct s3c24xx_uart_port *ourport = to_ourport(port);
if (rx_enabled(port)) {
dbg("s3c24xx_serial_stop_rx: port=%p\n", port);
disable_irq_nosync(ourport->rx_irq);
rx_enabled(port) = 0;
}
}
static void s3c24xx_serial_enable_ms(struct uart_port *port)
{
}
static inline struct s3c24xx_uart_info *s3c24xx_port_to_info(struct uart_port *port)
{
return to_ourport(port)->info;
}
static inline struct s3c2410_uartcfg *s3c24xx_port_to_cfg(struct uart_port *port)
{
if (port->dev == NULL)
return NULL;
return (struct s3c2410_uartcfg *)port->dev->platform_data;
}
static int s3c24xx_serial_rx_fifocnt(struct s3c24xx_uart_port *ourport,
unsigned long ufstat)
{
struct s3c24xx_uart_info *info = ourport->info;
if (ufstat & info->rx_fifofull)
return info->fifosize;
return (ufstat & info->rx_fifomask) >> info->rx_fifoshift;
}
/* ? - where has parity gone?? */
#define S3C2410_UERSTAT_PARITY (0x1000)
static irqreturn_t
s3c24xx_serial_rx_chars(int irq, void *dev_id)
{
struct s3c24xx_uart_port *ourport = dev_id;
struct uart_port *port = &ourport->port;
struct tty_struct *tty = port->state->port.tty;
unsigned int ufcon, ch, flag, ufstat, uerstat;
int max_count = 64;
while (max_count-- > 0) {
ufcon = rd_regl(port, S3C2410_UFCON);
ufstat = rd_regl(port, S3C2410_UFSTAT);
if (s3c24xx_serial_rx_fifocnt(ourport, ufstat) == 0)
break;
uerstat = rd_regl(port, S3C2410_UERSTAT);
ch = rd_regb(port, S3C2410_URXH);
if (port->flags & UPF_CONS_FLOW) {
int txe = s3c24xx_serial_txempty_nofifo(port);
if (rx_enabled(port)) {
if (!txe) {
rx_enabled(port) = 0;
continue;
}
} else {
if (txe) {
ufcon |= S3C2410_UFCON_RESETRX;
wr_regl(port, S3C2410_UFCON, ufcon);
rx_enabled(port) = 1;
goto out;
}
continue;
}
}
/* insert the character into the buffer */
flag = TTY_NORMAL;
port->icount.rx++;
if (unlikely(uerstat & S3C2410_UERSTAT_ANY)) {
dbg("rxerr: port ch=0x%02x, rxs=0x%08x\n",
ch, uerstat);
/* check for break */
if (uerstat & S3C2410_UERSTAT_BREAK) {
dbg("break!\n");
port->icount.brk++;
if (uart_handle_break(port))
goto ignore_char;
}
if (uerstat & S3C2410_UERSTAT_FRAME)
port->icount.frame++;
if (uerstat & S3C2410_UERSTAT_OVERRUN)
port->icount.overrun++;
uerstat &= port->read_status_mask;
if (uerstat & S3C2410_UERSTAT_BREAK)
flag = TTY_BREAK;
else if (uerstat & S3C2410_UERSTAT_PARITY)
flag = TTY_PARITY;
else if (uerstat & (S3C2410_UERSTAT_FRAME |
S3C2410_UERSTAT_OVERRUN))
flag = TTY_FRAME;
}
if (uart_handle_sysrq_char(port, ch))
goto ignore_char;
uart_insert_char(port, uerstat, S3C2410_UERSTAT_OVERRUN,
ch, flag);
ignore_char:
continue;
}
tty_flip_buffer_push(tty);
out:
return IRQ_HANDLED;
}
static irqreturn_t s3c24xx_serial_tx_chars(int irq, void *id)
{
struct s3c24xx_uart_port *ourport = id;
struct uart_port *port = &ourport->port;
struct circ_buf *xmit = &port->state->xmit;
int count = 256;
if (port->x_char) {
wr_regb(port, S3C2410_UTXH, port->x_char);
port->icount.tx++;
port->x_char = 0;
goto out;
}
/* if there isn't anything more to transmit, or the uart is now
* stopped, disable the uart and exit
*/
if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
s3c24xx_serial_stop_tx(port);
goto out;
}
/* try and drain the buffer... */
while (!uart_circ_empty(xmit) && count-- > 0) {
if (rd_regl(port, S3C2410_UFSTAT) & ourport->info->tx_fifofull)
break;
wr_regb(port, S3C2410_UTXH, xmit->buf[xmit->tail]);
xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
port->icount.tx++;
}
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(port);
if (uart_circ_empty(xmit))
s3c24xx_serial_stop_tx(port);
out:
return IRQ_HANDLED;
}
static unsigned int s3c24xx_serial_tx_empty(struct uart_port *port)
{
struct s3c24xx_uart_info *info = s3c24xx_port_to_info(port);
unsigned long ufstat = rd_regl(port, S3C2410_UFSTAT);
unsigned long ufcon = rd_regl(port, S3C2410_UFCON);
if (ufcon & S3C2410_UFCON_FIFOMODE) {
if ((ufstat & info->tx_fifomask) != 0 ||
(ufstat & info->tx_fifofull))
return 0;
return 1;
}
return s3c24xx_serial_txempty_nofifo(port);
}
/* no modem control lines */
static unsigned int s3c24xx_serial_get_mctrl(struct uart_port *port)
{
unsigned int umstat = rd_regb(port, S3C2410_UMSTAT);
if (umstat & S3C2410_UMSTAT_CTS)
return TIOCM_CAR | TIOCM_DSR | TIOCM_CTS;
else
return TIOCM_CAR | TIOCM_DSR;
}
static void s3c24xx_serial_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
/* todo - possibly remove AFC and do manual CTS */
}
static void s3c24xx_serial_break_ctl(struct uart_port *port, int break_state)
{
unsigned long flags;
unsigned int ucon;
spin_lock_irqsave(&port->lock, flags);
ucon = rd_regl(port, S3C2410_UCON);
if (break_state)
ucon |= S3C2410_UCON_SBREAK;
else
ucon &= ~S3C2410_UCON_SBREAK;
wr_regl(port, S3C2410_UCON, ucon);
spin_unlock_irqrestore(&port->lock, flags);
}
static void s3c24xx_serial_shutdown(struct uart_port *port)
{
struct s3c24xx_uart_port *ourport = to_ourport(port);
if (ourport->tx_claimed) {
free_irq(ourport->tx_irq, ourport);
tx_enabled(port) = 0;
ourport->tx_claimed = 0;
}
if (ourport->rx_claimed) {
free_irq(ourport->rx_irq, ourport);
ourport->rx_claimed = 0;
rx_enabled(port) = 0;
}
}
static int s3c24xx_serial_startup(struct uart_port *port)
{
struct s3c24xx_uart_port *ourport = to_ourport(port);
int ret;
dbg("s3c24xx_serial_startup: port=%p (%08lx,%p)\n",
port->mapbase, port->membase);
rx_enabled(port) = 1;
ret = request_irq(ourport->rx_irq, s3c24xx_serial_rx_chars, 0,
s3c24xx_serial_portname(port), ourport);
if (ret != 0) {
printk(KERN_ERR "cannot get irq %d\n", ourport->rx_irq);
return ret;
}
ourport->rx_claimed = 1;
dbg("requesting tx irq...\n");
tx_enabled(port) = 1;
ret = request_irq(ourport->tx_irq, s3c24xx_serial_tx_chars, 0,
s3c24xx_serial_portname(port), ourport);
if (ret) {
printk(KERN_ERR "cannot get irq %d\n", ourport->tx_irq);
goto err;
}
ourport->tx_claimed = 1;
dbg("s3c24xx_serial_startup ok\n");
/* the port reset code should have done the correct
* register setup for the port controls */
return ret;
err:
s3c24xx_serial_shutdown(port);
return ret;
}
/* power power management control */
static void s3c24xx_serial_pm(struct uart_port *port, unsigned int level,
unsigned int old)
{
struct s3c24xx_uart_port *ourport = to_ourport(port);
ourport->pm_level = level;
switch (level) {
case 3:
if (!IS_ERR(ourport->baudclk) && ourport->baudclk != NULL)
clk_disable(ourport->baudclk);
clk_disable(ourport->clk);
break;
case 0:
clk_enable(ourport->clk);
if (!IS_ERR(ourport->baudclk) && ourport->baudclk != NULL)
clk_enable(ourport->baudclk);
break;
default:
printk(KERN_ERR "s3c24xx_serial: unknown pm %d\n", level);
}
}
/* baud rate calculation
*
* The UARTs on the S3C2410/S3C2440 can take their clocks from a number
* of different sources, including the peripheral clock ("pclk") and an
* external clock ("uclk"). The S3C2440 also adds the core clock ("fclk")
* with a programmable extra divisor.
*
* The following code goes through the clock sources, and calculates the
* baud clocks (and the resultant actual baud rates) and then tries to
* pick the closest one and select that.
*
*/
#define MAX_CLKS (8)
static struct s3c24xx_uart_clksrc tmp_clksrc = {
.name = "pclk",
.min_baud = 0,
.max_baud = 0,
.divisor = 1,
};
static inline int
s3c24xx_serial_getsource(struct uart_port *port, struct s3c24xx_uart_clksrc *c)
{
struct s3c24xx_uart_info *info = s3c24xx_port_to_info(port);
return (info->get_clksrc)(port, c);
}
static inline int
s3c24xx_serial_setsource(struct uart_port *port, struct s3c24xx_uart_clksrc *c)
{
struct s3c24xx_uart_info *info = s3c24xx_port_to_info(port);
return (info->set_clksrc)(port, c);
}
struct baud_calc {
struct s3c24xx_uart_clksrc *clksrc;
unsigned int calc;
unsigned int divslot;
unsigned int quot;
struct clk *src;
};
static int s3c24xx_serial_calcbaud(struct baud_calc *calc,
struct uart_port *port,
struct s3c24xx_uart_clksrc *clksrc,
unsigned int baud)
{
struct s3c24xx_uart_port *ourport = to_ourport(port);
unsigned long rate;
calc->src = clk_get(port->dev, clksrc->name);
if (calc->src == NULL || IS_ERR(calc->src))
return 0;
rate = clk_get_rate(calc->src);
rate /= clksrc->divisor;
calc->clksrc = clksrc;
if (ourport->info->has_divslot) {
unsigned long div = rate / baud;
/* The UDIVSLOT register on the newer UARTs allows us to
* get a divisor adjustment of 1/16th on the baud clock.
*
* We don't keep the UDIVSLOT value (the 16ths we calculated
* by not multiplying the baud by 16) as it is easy enough
* to recalculate.
*/
calc->quot = div / 16;
calc->calc = rate / div;
} else {
calc->quot = (rate + (8 * baud)) / (16 * baud);
calc->calc = (rate / (calc->quot * 16));
}
calc->quot--;
return 1;
}
static unsigned int s3c24xx_serial_getclk(struct uart_port *port,
struct s3c24xx_uart_clksrc **clksrc,
struct clk **clk,
unsigned int baud)
{
struct s3c2410_uartcfg *cfg = s3c24xx_port_to_cfg(port);
struct s3c24xx_uart_clksrc *clkp;
struct baud_calc res[MAX_CLKS];
struct baud_calc *resptr, *best, *sptr;
int i;
clkp = cfg->clocks;
best = NULL;
if (cfg->clocks_size < 2) {
if (cfg->clocks_size == 0)
clkp = &tmp_clksrc;
/* check to see if we're sourcing fclk, and if so we're
* going to have to update the clock source
*/
if (strcmp(clkp->name, "fclk") == 0) {
struct s3c24xx_uart_clksrc src;
s3c24xx_serial_getsource(port, &src);
/* check that the port already using fclk, and if
* not, then re-select fclk
*/
if (strcmp(src.name, clkp->name) == 0) {
s3c24xx_serial_setsource(port, clkp);
s3c24xx_serial_getsource(port, &src);
}
clkp->divisor = src.divisor;
}
s3c24xx_serial_calcbaud(res, port, clkp, baud);
best = res;
resptr = best + 1;
} else {
resptr = res;
for (i = 0; i < cfg->clocks_size; i++, clkp++) {
if (s3c24xx_serial_calcbaud(resptr, port, clkp, baud))
resptr++;
}
}
/* ok, we now need to select the best clock we found */
if (!best) {
unsigned int deviation = (1<<30)|((1<<30)-1);
int calc_deviation;
for (sptr = res; sptr < resptr; sptr++) {
calc_deviation = baud - sptr->calc;
if (calc_deviation < 0)
calc_deviation = -calc_deviation;
if (calc_deviation < deviation) {
best = sptr;
deviation = calc_deviation;
}
}
}
/* store results to pass back */
*clksrc = best->clksrc;
*clk = best->src;
return best->quot;
}
/* udivslot_table[]
*
* This table takes the fractional value of the baud divisor and gives
* the recommended setting for the UDIVSLOT register.
*/
static u16 udivslot_table[16] = {
[0] = 0x0000,
[1] = 0x0080,
[2] = 0x0808,
[3] = 0x0888,
[4] = 0x2222,
[5] = 0x4924,
[6] = 0x4A52,
[7] = 0x54AA,
[8] = 0x5555,
[9] = 0xD555,
[10] = 0xD5D5,
[11] = 0xDDD5,
[12] = 0xDDDD,
[13] = 0xDFDD,
[14] = 0xDFDF,
[15] = 0xFFDF,
};
static void s3c24xx_serial_set_termios(struct uart_port *port,
struct ktermios *termios,
struct ktermios *old)
{
struct s3c2410_uartcfg *cfg = s3c24xx_port_to_cfg(port);
struct s3c24xx_uart_port *ourport = to_ourport(port);
struct s3c24xx_uart_clksrc *clksrc = NULL;
struct clk *clk = NULL;
unsigned long flags;
unsigned int baud, quot;
unsigned int ulcon;
unsigned int umcon;
unsigned int udivslot = 0;
/*
* We don't support modem control lines.
*/
termios->c_cflag &= ~(HUPCL | CMSPAR);
termios->c_cflag |= CLOCAL;
/*
* Ask the core to calculate the divisor for us.
*/
baud = uart_get_baud_rate(port, termios, old, 0, 115200*8);
if (baud == 38400 && (port->flags & UPF_SPD_MASK) == UPF_SPD_CUST)
quot = port->custom_divisor;
else
quot = s3c24xx_serial_getclk(port, &clksrc, &clk, baud);
/* check to see if we need to change clock source */
if (ourport->clksrc != clksrc || ourport->baudclk != clk) {
dbg("selecting clock %p\n", clk);
s3c24xx_serial_setsource(port, clksrc);
if (ourport->baudclk != NULL && !IS_ERR(ourport->baudclk)) {
clk_disable(ourport->baudclk);
ourport->baudclk = NULL;
}
clk_enable(clk);
ourport->clksrc = clksrc;
ourport->baudclk = clk;
ourport->baudclk_rate = clk ? clk_get_rate(clk) : 0;
}
if (ourport->info->has_divslot) {
unsigned int div = ourport->baudclk_rate / baud;
if (cfg->has_fracval) {
udivslot = (div & 15);
dbg("fracval = %04x\n", udivslot);
} else {
udivslot = udivslot_table[div & 15];
dbg("udivslot = %04x (div %d)\n", udivslot, div & 15);
}
}
switch (termios->c_cflag & CSIZE) {
case CS5:
dbg("config: 5bits/char\n");
ulcon = S3C2410_LCON_CS5;
break;
case CS6:
dbg("config: 6bits/char\n");
ulcon = S3C2410_LCON_CS6;
break;
case CS7:
dbg("config: 7bits/char\n");
ulcon = S3C2410_LCON_CS7;
break;
case CS8:
default:
dbg("config: 8bits/char\n");
ulcon = S3C2410_LCON_CS8;
break;
}
/* preserve original lcon IR settings */
ulcon |= (cfg->ulcon & S3C2410_LCON_IRM);
if (termios->c_cflag & CSTOPB)
ulcon |= S3C2410_LCON_STOPB;
umcon = (termios->c_cflag & CRTSCTS) ? S3C2410_UMCOM_AFC : 0;
if (termios->c_cflag & PARENB) {
if (termios->c_cflag & PARODD)
ulcon |= S3C2410_LCON_PODD;
else
ulcon |= S3C2410_LCON_PEVEN;
} else {
ulcon |= S3C2410_LCON_PNONE;
}
spin_lock_irqsave(&port->lock, flags);
dbg("setting ulcon to %08x, brddiv to %d, udivslot %08x\n",
ulcon, quot, udivslot);
wr_regl(port, S3C2410_ULCON, ulcon);
wr_regl(port, S3C2410_UBRDIV, quot);
wr_regl(port, S3C2410_UMCON, umcon);
if (ourport->info->has_divslot)
wr_regl(port, S3C2443_DIVSLOT, udivslot);
dbg("uart: ulcon = 0x%08x, ucon = 0x%08x, ufcon = 0x%08x\n",
rd_regl(port, S3C2410_ULCON),
rd_regl(port, S3C2410_UCON),
rd_regl(port, S3C2410_UFCON));
/*
* Update the per-port timeout.
*/
uart_update_timeout(port, termios->c_cflag, baud);
/*
* Which character status flags are we interested in?
*/
port->read_status_mask = S3C2410_UERSTAT_OVERRUN;
if (termios->c_iflag & INPCK)
port->read_status_mask |= S3C2410_UERSTAT_FRAME | S3C2410_UERSTAT_PARITY;
/*
* Which character status flags should we ignore?
*/
port->ignore_status_mask = 0;
if (termios->c_iflag & IGNPAR)
port->ignore_status_mask |= S3C2410_UERSTAT_OVERRUN;
if (termios->c_iflag & IGNBRK && termios->c_iflag & IGNPAR)
port->ignore_status_mask |= S3C2410_UERSTAT_FRAME;
/*
* Ignore all characters if CREAD is not set.
*/
if ((termios->c_cflag & CREAD) == 0)
port->ignore_status_mask |= RXSTAT_DUMMY_READ;
spin_unlock_irqrestore(&port->lock, flags);
}
static const char *s3c24xx_serial_type(struct uart_port *port)
{
switch (port->type) {
case PORT_S3C2410:
return "S3C2410";
case PORT_S3C2440:
return "S3C2440";
case PORT_S3C2412:
return "S3C2412";
case PORT_S3C6400:
return "S3C6400/10";
default:
return NULL;
}
}
#define MAP_SIZE (0x100)
static void s3c24xx_serial_release_port(struct uart_port *port)
{
release_mem_region(port->mapbase, MAP_SIZE);
}
static int s3c24xx_serial_request_port(struct uart_port *port)
{
const char *name = s3c24xx_serial_portname(port);
return request_mem_region(port->mapbase, MAP_SIZE, name) ? 0 : -EBUSY;
}
static void s3c24xx_serial_config_port(struct uart_port *port, int flags)
{
struct s3c24xx_uart_info *info = s3c24xx_port_to_info(port);
if (flags & UART_CONFIG_TYPE &&
s3c24xx_serial_request_port(port) == 0)
port->type = info->type;
}
/*
* verify the new serial_struct (for TIOCSSERIAL).
*/
static int
s3c24xx_serial_verify_port(struct uart_port *port, struct serial_struct *ser)
{
struct s3c24xx_uart_info *info = s3c24xx_port_to_info(port);
if (ser->type != PORT_UNKNOWN && ser->type != info->type)
return -EINVAL;
return 0;
}
#ifdef CONFIG_SERIAL_SAMSUNG_CONSOLE
static struct console s3c24xx_serial_console;
#define S3C24XX_SERIAL_CONSOLE &s3c24xx_serial_console
#else
#define S3C24XX_SERIAL_CONSOLE NULL
#endif
static struct uart_ops s3c24xx_serial_ops = {
.pm = s3c24xx_serial_pm,
.tx_empty = s3c24xx_serial_tx_empty,
.get_mctrl = s3c24xx_serial_get_mctrl,
.set_mctrl = s3c24xx_serial_set_mctrl,
.stop_tx = s3c24xx_serial_stop_tx,
.start_tx = s3c24xx_serial_start_tx,
.stop_rx = s3c24xx_serial_stop_rx,
.enable_ms = s3c24xx_serial_enable_ms,
.break_ctl = s3c24xx_serial_break_ctl,
.startup = s3c24xx_serial_startup,
.shutdown = s3c24xx_serial_shutdown,
.set_termios = s3c24xx_serial_set_termios,
.type = s3c24xx_serial_type,
.release_port = s3c24xx_serial_release_port,
.request_port = s3c24xx_serial_request_port,
.config_port = s3c24xx_serial_config_port,
.verify_port = s3c24xx_serial_verify_port,
};
static struct uart_driver s3c24xx_uart_drv = {
.owner = THIS_MODULE,
.driver_name = "s3c2410_serial",
.nr = CONFIG_SERIAL_SAMSUNG_UARTS,
.cons = S3C24XX_SERIAL_CONSOLE,
.dev_name = S3C24XX_SERIAL_NAME,
.major = S3C24XX_SERIAL_MAJOR,
.minor = S3C24XX_SERIAL_MINOR,
};
static struct s3c24xx_uart_port s3c24xx_serial_ports[CONFIG_SERIAL_SAMSUNG_UARTS] = {
[0] = {
.port = {
.lock = __SPIN_LOCK_UNLOCKED(s3c24xx_serial_ports[0].port.lock),
.iotype = UPIO_MEM,
.irq = IRQ_S3CUART_RX0,
.uartclk = 0,
.fifosize = 16,
.ops = &s3c24xx_serial_ops,
.flags = UPF_BOOT_AUTOCONF,
.line = 0,
}
},
[1] = {
.port = {
.lock = __SPIN_LOCK_UNLOCKED(s3c24xx_serial_ports[1].port.lock),
.iotype = UPIO_MEM,
.irq = IRQ_S3CUART_RX1,
.uartclk = 0,
.fifosize = 16,
.ops = &s3c24xx_serial_ops,
.flags = UPF_BOOT_AUTOCONF,
.line = 1,
}
},
#if CONFIG_SERIAL_SAMSUNG_UARTS > 2
[2] = {
.port = {
.lock = __SPIN_LOCK_UNLOCKED(s3c24xx_serial_ports[2].port.lock),
.iotype = UPIO_MEM,
.irq = IRQ_S3CUART_RX2,
.uartclk = 0,
.fifosize = 16,
.ops = &s3c24xx_serial_ops,
.flags = UPF_BOOT_AUTOCONF,
.line = 2,
}
},
#endif
#if CONFIG_SERIAL_SAMSUNG_UARTS > 3
[3] = {
.port = {
.lock = __SPIN_LOCK_UNLOCKED(s3c24xx_serial_ports[3].port.lock),
.iotype = UPIO_MEM,
.irq = IRQ_S3CUART_RX3,
.uartclk = 0,
.fifosize = 16,
.ops = &s3c24xx_serial_ops,
.flags = UPF_BOOT_AUTOCONF,
.line = 3,
}
}
#endif
};
/* s3c24xx_serial_resetport
*
* wrapper to call the specific reset for this port (reset the fifos
* and the settings)
*/
static inline int s3c24xx_serial_resetport(struct uart_port *port,
struct s3c2410_uartcfg *cfg)
{
struct s3c24xx_uart_info *info = s3c24xx_port_to_info(port);
return (info->reset_port)(port, cfg);
}
#ifdef CONFIG_CPU_FREQ
static int s3c24xx_serial_cpufreq_transition(struct notifier_block *nb,
unsigned long val, void *data)
{
struct s3c24xx_uart_port *port;
struct uart_port *uport;
port = container_of(nb, struct s3c24xx_uart_port, freq_transition);
uport = &port->port;
/* check to see if port is enabled */
if (port->pm_level != 0)
return 0;
/* try and work out if the baudrate is changing, we can detect
* a change in rate, but we do not have support for detecting
* a disturbance in the clock-rate over the change.
*/
if (IS_ERR(port->clk))
goto exit;
if (port->baudclk_rate == clk_get_rate(port->clk))
goto exit;
if (val == CPUFREQ_PRECHANGE) {
/* we should really shut the port down whilst the
* frequency change is in progress. */
} else if (val == CPUFREQ_POSTCHANGE) {
struct ktermios *termios;
struct tty_struct *tty;
if (uport->state == NULL)
goto exit;
tty = uport->state->port.tty;
if (tty == NULL)
goto exit;
termios = tty->termios;
if (termios == NULL) {
printk(KERN_WARNING "%s: no termios?\n", __func__);
goto exit;
}
s3c24xx_serial_set_termios(uport, termios, NULL);
}
exit:
return 0;
}
static inline int s3c24xx_serial_cpufreq_register(struct s3c24xx_uart_port *port)
{
port->freq_transition.notifier_call = s3c24xx_serial_cpufreq_transition;
return cpufreq_register_notifier(&port->freq_transition,
CPUFREQ_TRANSITION_NOTIFIER);
}
static inline void s3c24xx_serial_cpufreq_deregister(struct s3c24xx_uart_port *port)
{
cpufreq_unregister_notifier(&port->freq_transition,
CPUFREQ_TRANSITION_NOTIFIER);
}
#else
static inline int s3c24xx_serial_cpufreq_register(struct s3c24xx_uart_port *port)
{
return 0;
}
static inline void s3c24xx_serial_cpufreq_deregister(struct s3c24xx_uart_port *port)
{
}
#endif
/* s3c24xx_serial_init_port
*
* initialise a single serial port from the platform device given
*/
static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
struct s3c24xx_uart_info *info,
struct platform_device *platdev)
{
struct uart_port *port = &ourport->port;
struct s3c2410_uartcfg *cfg;
struct resource *res;
int ret;
dbg("s3c24xx_serial_init_port: port=%p, platdev=%p\n", port, platdev);
if (platdev == NULL)
return -ENODEV;
cfg = s3c24xx_dev_to_cfg(&platdev->dev);
if (port->mapbase != 0)
return 0;
if (cfg->hwport > CONFIG_SERIAL_SAMSUNG_UARTS) {
printk(KERN_ERR "%s: port %d bigger than %d\n", __func__,
cfg->hwport, CONFIG_SERIAL_SAMSUNG_UARTS);
return -ERANGE;
}
/* setup info for port */
port->dev = &platdev->dev;
ourport->info = info;
/* copy the info in from provided structure */
ourport->port.fifosize = info->fifosize;
dbg("s3c24xx_serial_init_port: %p (hw %d)...\n", port, cfg->hwport);
port->uartclk = 1;
if (cfg->uart_flags & UPF_CONS_FLOW) {
dbg("s3c24xx_serial_init_port: enabling flow control\n");
port->flags |= UPF_CONS_FLOW;
}
/* sort our the physical and virtual addresses for each UART */
res = platform_get_resource(platdev, IORESOURCE_MEM, 0);
if (res == NULL) {
printk(KERN_ERR "failed to find memory resource for uart\n");
return -EINVAL;
}
dbg("resource %p (%lx..%lx)\n", res, res->start, res->end);
port->mapbase = res->start;
port->membase = S3C_VA_UART + (res->start & 0xfffff);
ret = platform_get_irq(platdev, 0);
if (ret < 0)
port->irq = 0;
else {
port->irq = ret;
ourport->rx_irq = ret;
ourport->tx_irq = ret + 1;
}
ret = platform_get_irq(platdev, 1);
if (ret > 0)
ourport->tx_irq = ret;
ourport->clk = clk_get(&platdev->dev, "uart");
dbg("port: map=%08x, mem=%08x, irq=%d (%d,%d), clock=%ld\n",
port->mapbase, port->membase, port->irq,
ourport->rx_irq, ourport->tx_irq, port->uartclk);
/* reset the fifos (and setup the uart) */
s3c24xx_serial_resetport(port, cfg);
return 0;
}
static ssize_t s3c24xx_serial_show_clksrc(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct uart_port *port = s3c24xx_dev_to_port(dev);
struct s3c24xx_uart_port *ourport = to_ourport(port);
return snprintf(buf, PAGE_SIZE, "* %s\n", ourport->clksrc->name);
}
static DEVICE_ATTR(clock_source, S_IRUGO, s3c24xx_serial_show_clksrc, NULL);
/* Device driver serial port probe */
static int probe_index;
int s3c24xx_serial_probe(struct platform_device *dev,
struct s3c24xx_uart_info *info)
{
struct s3c24xx_uart_port *ourport;
int ret;
dbg("s3c24xx_serial_probe(%p, %p) %d\n", dev, info, probe_index);
ourport = &s3c24xx_serial_ports[probe_index];
probe_index++;
dbg("%s: initialising port %p...\n", __func__, ourport);
ret = s3c24xx_serial_init_port(ourport, info, dev);
if (ret < 0)
goto probe_err;
dbg("%s: adding port\n", __func__);
uart_add_one_port(&s3c24xx_uart_drv, &ourport->port);
platform_set_drvdata(dev, &ourport->port);
ret = device_create_file(&dev->dev, &dev_attr_clock_source);
if (ret < 0)
printk(KERN_ERR "%s: failed to add clksrc attr.\n", __func__);
ret = s3c24xx_serial_cpufreq_register(ourport);
if (ret < 0)
dev_err(&dev->dev, "failed to add cpufreq notifier\n");
return 0;
probe_err:
return ret;
}
EXPORT_SYMBOL_GPL(s3c24xx_serial_probe);
int __devexit s3c24xx_serial_remove(struct platform_device *dev)
{
struct uart_port *port = s3c24xx_dev_to_port(&dev->dev);
if (port) {
s3c24xx_serial_cpufreq_deregister(to_ourport(port));
device_remove_file(&dev->dev, &dev_attr_clock_source);
uart_remove_one_port(&s3c24xx_uart_drv, port);
}
return 0;
}
EXPORT_SYMBOL_GPL(s3c24xx_serial_remove);
/* UART power management code */
#ifdef CONFIG_PM
static int s3c24xx_serial_suspend(struct platform_device *dev, pm_message_t state)
{
struct uart_port *port = s3c24xx_dev_to_port(&dev->dev);
if (port)
uart_suspend_port(&s3c24xx_uart_drv, port);
return 0;
}
static int s3c24xx_serial_resume(struct platform_device *dev)
{
struct uart_port *port = s3c24xx_dev_to_port(&dev->dev);
struct s3c24xx_uart_port *ourport = to_ourport(port);
if (port) {
clk_enable(ourport->clk);
s3c24xx_serial_resetport(port, s3c24xx_port_to_cfg(port));
clk_disable(ourport->clk);
uart_resume_port(&s3c24xx_uart_drv, port);
}
return 0;
}
#endif
int s3c24xx_serial_init(struct platform_driver *drv,
struct s3c24xx_uart_info *info)
{
dbg("s3c24xx_serial_init(%p,%p)\n", drv, info);
#ifdef CONFIG_PM
drv->suspend = s3c24xx_serial_suspend;
drv->resume = s3c24xx_serial_resume;
#endif
return platform_driver_register(drv);
}
EXPORT_SYMBOL_GPL(s3c24xx_serial_init);
/* module initialisation code */
static int __init s3c24xx_serial_modinit(void)
{
int ret;
ret = uart_register_driver(&s3c24xx_uart_drv);
if (ret < 0) {
printk(KERN_ERR "failed to register UART driver\n");
return -1;
}
return 0;
}
static void __exit s3c24xx_serial_modexit(void)
{
uart_unregister_driver(&s3c24xx_uart_drv);
}
module_init(s3c24xx_serial_modinit);
module_exit(s3c24xx_serial_modexit);
/* Console code */
#ifdef CONFIG_SERIAL_SAMSUNG_CONSOLE
static struct uart_port *cons_uart;
static int
s3c24xx_serial_console_txrdy(struct uart_port *port, unsigned int ufcon)
{
struct s3c24xx_uart_info *info = s3c24xx_port_to_info(port);
unsigned long ufstat, utrstat;
if (ufcon & S3C2410_UFCON_FIFOMODE) {
/* fifo mode - check amount of data in fifo registers... */
ufstat = rd_regl(port, S3C2410_UFSTAT);
return (ufstat & info->tx_fifofull) ? 0 : 1;
}
/* in non-fifo mode, we go and use the tx buffer empty */
utrstat = rd_regl(port, S3C2410_UTRSTAT);
return (utrstat & S3C2410_UTRSTAT_TXE) ? 1 : 0;
}
static void
s3c24xx_serial_console_putchar(struct uart_port *port, int ch)
{
unsigned int ufcon = rd_regl(cons_uart, S3C2410_UFCON);
while (!s3c24xx_serial_console_txrdy(port, ufcon))
barrier();
wr_regb(cons_uart, S3C2410_UTXH, ch);
}
static void
s3c24xx_serial_console_write(struct console *co, const char *s,
unsigned int count)
{
uart_console_write(cons_uart, s, count, s3c24xx_serial_console_putchar);
}
static void __init
s3c24xx_serial_get_options(struct uart_port *port, int *baud,
int *parity, int *bits)
{
struct s3c24xx_uart_clksrc clksrc;
struct clk *clk;
unsigned int ulcon;
unsigned int ucon;
unsigned int ubrdiv;
unsigned long rate;
ulcon = rd_regl(port, S3C2410_ULCON);
ucon = rd_regl(port, S3C2410_UCON);
ubrdiv = rd_regl(port, S3C2410_UBRDIV);
dbg("s3c24xx_serial_get_options: port=%p\n"
"registers: ulcon=%08x, ucon=%08x, ubdriv=%08x\n",
port, ulcon, ucon, ubrdiv);
if ((ucon & 0xf) != 0) {
/* consider the serial port configured if the tx/rx mode set */
switch (ulcon & S3C2410_LCON_CSMASK) {
case S3C2410_LCON_CS5:
*bits = 5;
break;
case S3C2410_LCON_CS6:
*bits = 6;
break;
case S3C2410_LCON_CS7:
*bits = 7;
break;
default:
case S3C2410_LCON_CS8:
*bits = 8;
break;
}
switch (ulcon & S3C2410_LCON_PMASK) {
case S3C2410_LCON_PEVEN:
*parity = 'e';
break;
case S3C2410_LCON_PODD:
*parity = 'o';
break;
case S3C2410_LCON_PNONE:
default:
*parity = 'n';
}
/* now calculate the baud rate */
s3c24xx_serial_getsource(port, &clksrc);
clk = clk_get(port->dev, clksrc.name);
if (!IS_ERR(clk) && clk != NULL)
rate = clk_get_rate(clk) / clksrc.divisor;
else
rate = 1;
*baud = rate / (16 * (ubrdiv + 1));
dbg("calculated baud %d\n", *baud);
}
}
/* s3c24xx_serial_init_ports
*
* initialise the serial ports from the machine provided initialisation
* data.
*/
static int s3c24xx_serial_init_ports(struct s3c24xx_uart_info **info)
{
struct s3c24xx_uart_port *ptr = s3c24xx_serial_ports;
struct platform_device **platdev_ptr;
int i;
dbg("s3c24xx_serial_init_ports: initialising ports...\n");
platdev_ptr = s3c24xx_uart_devs;
for (i = 0; i < CONFIG_SERIAL_SAMSUNG_UARTS; i++, ptr++, platdev_ptr++) {
s3c24xx_serial_init_port(ptr, info[i], *platdev_ptr);
}
return 0;
}
static int __init
s3c24xx_serial_console_setup(struct console *co, char *options)
{
struct uart_port *port;
int baud = 9600;
int bits = 8;
int parity = 'n';
int flow = 'n';
dbg("s3c24xx_serial_console_setup: co=%p (%d), %s\n",
co, co->index, options);
/* is this a valid port */
if (co->index == -1 || co->index >= CONFIG_SERIAL_SAMSUNG_UARTS)
co->index = 0;
port = &s3c24xx_serial_ports[co->index].port;
/* is the port configured? */
if (port->mapbase == 0x0) {
co->index = 0;
port = &s3c24xx_serial_ports[co->index].port;
}
cons_uart = port;
dbg("s3c24xx_serial_console_setup: port=%p (%d)\n", port, co->index);
/*
* Check whether an invalid uart number has been specified, and
* if so, search for the first available port that does have
* console support.
*/
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
else
s3c24xx_serial_get_options(port, &baud, &parity, &bits);
dbg("s3c24xx_serial_console_setup: baud %d\n", baud);
return uart_set_options(port, co, baud, parity, bits, flow);
}
/* s3c24xx_serial_initconsole
*
* initialise the console from one of the uart drivers
*/
static struct console s3c24xx_serial_console = {
.name = S3C24XX_SERIAL_NAME,
.device = uart_console_device,
.flags = CON_PRINTBUFFER,
.index = -1,
.write = s3c24xx_serial_console_write,
.setup = s3c24xx_serial_console_setup
};
int s3c24xx_serial_initconsole(struct platform_driver *drv,
struct s3c24xx_uart_info **info)
{
struct platform_device *dev = s3c24xx_uart_devs[0];
dbg("s3c24xx_serial_initconsole\n");
/* select driver based on the cpu */
if (dev == NULL) {
printk(KERN_ERR "s3c24xx: no devices for console init\n");
return 0;
}
if (strcmp(dev->name, drv->driver.name) != 0)
return 0;
s3c24xx_serial_console.data = &s3c24xx_uart_drv;
s3c24xx_serial_init_ports(info);
register_console(&s3c24xx_serial_console);
return 0;
}
#endif /* CONFIG_SERIAL_SAMSUNG_CONSOLE */
MODULE_DESCRIPTION("Samsung SoC Serial port driver");
MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
KernelWorld/KW-Kenzo | drivers/input/serio/rpckbd.c | 2476 | 4339 | /*
* Copyright (c) 2000-2001 Vojtech Pavlik
* Copyright (c) 2002 Russell King
*/
/*
* Acorn RiscPC PS/2 keyboard controller driver for Linux/ARM
*/
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Should you need to contact me, the author, you can do so either by
* e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
* Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
*/
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/serio.h>
#include <linux/err.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <mach/hardware.h>
#include <asm/hardware/iomd.h>
MODULE_AUTHOR("Vojtech Pavlik, Russell King");
MODULE_DESCRIPTION("Acorn RiscPC PS/2 keyboard controller driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:kart");
struct rpckbd_data {
int tx_irq;
int rx_irq;
};
static int rpckbd_write(struct serio *port, unsigned char val)
{
while (!(iomd_readb(IOMD_KCTRL) & (1 << 7)))
cpu_relax();
iomd_writeb(val, IOMD_KARTTX);
return 0;
}
static irqreturn_t rpckbd_rx(int irq, void *dev_id)
{
struct serio *port = dev_id;
unsigned int byte;
int handled = IRQ_NONE;
while (iomd_readb(IOMD_KCTRL) & (1 << 5)) {
byte = iomd_readb(IOMD_KARTRX);
serio_interrupt(port, byte, 0);
handled = IRQ_HANDLED;
}
return handled;
}
static irqreturn_t rpckbd_tx(int irq, void *dev_id)
{
return IRQ_HANDLED;
}
static int rpckbd_open(struct serio *port)
{
struct rpckbd_data *rpckbd = port->port_data;
/* Reset the keyboard state machine. */
iomd_writeb(0, IOMD_KCTRL);
iomd_writeb(8, IOMD_KCTRL);
iomd_readb(IOMD_KARTRX);
if (request_irq(rpckbd->rx_irq, rpckbd_rx, 0, "rpckbd", port) != 0) {
printk(KERN_ERR "rpckbd.c: Could not allocate keyboard receive IRQ\n");
return -EBUSY;
}
if (request_irq(rpckbd->tx_irq, rpckbd_tx, 0, "rpckbd", port) != 0) {
printk(KERN_ERR "rpckbd.c: Could not allocate keyboard transmit IRQ\n");
free_irq(rpckbd->rx_irq, port);
return -EBUSY;
}
return 0;
}
static void rpckbd_close(struct serio *port)
{
struct rpckbd_data *rpckbd = port->port_data;
free_irq(rpckbd->rx_irq, port);
free_irq(rpckbd->tx_irq, port);
}
/*
* Allocate and initialize serio structure for subsequent registration
* with serio core.
*/
static int rpckbd_probe(struct platform_device *dev)
{
struct rpckbd_data *rpckbd;
struct serio *serio;
int tx_irq, rx_irq;
rx_irq = platform_get_irq(dev, 0);
if (rx_irq <= 0)
return rx_irq < 0 ? rx_irq : -ENXIO;
tx_irq = platform_get_irq(dev, 1);
if (tx_irq <= 0)
return tx_irq < 0 ? tx_irq : -ENXIO;
serio = kzalloc(sizeof(struct serio), GFP_KERNEL);
rpckbd = kzalloc(sizeof(*rpckbd), GFP_KERNEL);
if (!serio || !rpckbd) {
kfree(rpckbd);
kfree(serio);
return -ENOMEM;
}
rpckbd->rx_irq = rx_irq;
rpckbd->tx_irq = tx_irq;
serio->id.type = SERIO_8042;
serio->write = rpckbd_write;
serio->open = rpckbd_open;
serio->close = rpckbd_close;
serio->dev.parent = &dev->dev;
serio->port_data = rpckbd;
strlcpy(serio->name, "RiscPC PS/2 kbd port", sizeof(serio->name));
strlcpy(serio->phys, "rpckbd/serio0", sizeof(serio->phys));
platform_set_drvdata(dev, serio);
serio_register_port(serio);
return 0;
}
static int rpckbd_remove(struct platform_device *dev)
{
struct serio *serio = platform_get_drvdata(dev);
struct rpckbd_data *rpckbd = serio->port_data;
serio_unregister_port(serio);
kfree(rpckbd);
return 0;
}
static struct platform_driver rpckbd_driver = {
.probe = rpckbd_probe,
.remove = rpckbd_remove,
.driver = {
.name = "kart",
.owner = THIS_MODULE,
},
};
module_platform_driver(rpckbd_driver);
| gpl-2.0 |
HostZero/android_kernel_zuk_msm8996 | arch/arm/common/sharpsl_param.c | 2476 | 1916 | /*
* Hardware parameter area specific to Sharp SL series devices
*
* Copyright (c) 2005 Richard Purdie
*
* Based on Sharp's 2.4 kernel patches
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/string.h>
#include <asm/mach/sharpsl_param.h>
#include <asm/memory.h>
/*
* Certain hardware parameters determined at the time of device manufacture,
* typically including LCD parameters are loaded by the bootloader at the
* address PARAM_BASE. As the kernel will overwrite them, we need to store
* them early in the boot process, then pass them to the appropriate drivers.
* Not all devices use all parameters but the format is common to all.
*/
#ifdef CONFIG_ARCH_SA1100
#define PARAM_BASE 0xe8ffc000
#define param_start(x) (void *)(x)
#else
#define PARAM_BASE 0xa0000a00
#define param_start(x) __va(x)
#endif
#define MAGIC_CHG(a,b,c,d) ( ( d << 24 ) | ( c << 16 ) | ( b << 8 ) | a )
#define COMADJ_MAGIC MAGIC_CHG('C','M','A','D')
#define UUID_MAGIC MAGIC_CHG('U','U','I','D')
#define TOUCH_MAGIC MAGIC_CHG('T','U','C','H')
#define AD_MAGIC MAGIC_CHG('B','V','A','D')
#define PHAD_MAGIC MAGIC_CHG('P','H','A','D')
struct sharpsl_param_info sharpsl_param;
EXPORT_SYMBOL(sharpsl_param);
void sharpsl_save_param(void)
{
memcpy(&sharpsl_param, param_start(PARAM_BASE), sizeof(struct sharpsl_param_info));
if (sharpsl_param.comadj_keyword != COMADJ_MAGIC)
sharpsl_param.comadj=-1;
if (sharpsl_param.phad_keyword != PHAD_MAGIC)
sharpsl_param.phadadj=-1;
if (sharpsl_param.uuid_keyword != UUID_MAGIC)
sharpsl_param.uuid[0]=-1;
if (sharpsl_param.touch_keyword != TOUCH_MAGIC)
sharpsl_param.touch_xp=-1;
if (sharpsl_param.adadj_keyword != AD_MAGIC)
sharpsl_param.adadj=-1;
}
| gpl-2.0 |
faux123/Note_4_SM-N910T | arch/blackfin/mach-bf537/boards/cm_bf537u.c | 2732 | 18359 | /*
* Copyright 2004-2009 Analog Devices Inc.
* 2008-2009 Bluetechnix
* 2005 National ICT Australia (NICTA)
* Aidan Williams <aidan@nicta.com.au>
*
* Licensed under the GPL-2 or later.
*/
#include <linux/device.h>
#include <linux/etherdevice.h>
#include <linux/export.h>
#include <linux/platform_device.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/physmap.h>
#include <linux/spi/spi.h>
#include <linux/spi/flash.h>
#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE)
#include <linux/usb/isp1362.h>
#endif
#include <linux/ata_platform.h>
#include <linux/irq.h>
#include <asm/dma.h>
#include <asm/bfin5xx_spi.h>
#include <asm/portmux.h>
#include <asm/dpmc.h>
#include <linux/spi/mmc_spi.h>
/*
* Name the Board for the /proc/cpuinfo
*/
const char bfin_board_name[] = "Bluetechnix CM BF537U";
#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
/* all SPI peripherals info goes here */
#if defined(CONFIG_MTD_M25P80) || defined(CONFIG_MTD_M25P80_MODULE)
static struct mtd_partition bfin_spi_flash_partitions[] = {
{
.name = "bootloader(spi)",
.size = 0x00020000,
.offset = 0,
.mask_flags = MTD_CAP_ROM
}, {
.name = "linux kernel(spi)",
.size = 0xe0000,
.offset = 0x20000
}, {
.name = "file system(spi)",
.size = 0x700000,
.offset = 0x00100000,
}
};
static struct flash_platform_data bfin_spi_flash_data = {
.name = "m25p80",
.parts = bfin_spi_flash_partitions,
.nr_parts = ARRAY_SIZE(bfin_spi_flash_partitions),
.type = "m25p64",
};
/* SPI flash chip (m25p64) */
static struct bfin5xx_spi_chip spi_flash_chip_info = {
.enable_dma = 0, /* use dma transfer with this chip*/
};
#endif
#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
static struct bfin5xx_spi_chip mmc_spi_chip_info = {
.enable_dma = 0,
};
#endif
static struct spi_board_info bfin_spi_board_info[] __initdata = {
#if defined(CONFIG_MTD_M25P80) || defined(CONFIG_MTD_M25P80_MODULE)
{
/* the modalias must be the same as spi device driver name */
.modalias = "m25p80", /* Name of spi_driver for this device */
.max_speed_hz = 25000000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0, /* Framework bus number */
.chip_select = 1, /* Framework chip select. On STAMP537 it is SPISSEL1*/
.platform_data = &bfin_spi_flash_data,
.controller_data = &spi_flash_chip_info,
.mode = SPI_MODE_3,
},
#endif
#if defined(CONFIG_SND_BF5XX_SOC_AD183X) || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE)
{
.modalias = "ad183x",
.max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 4,
},
#endif
#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
{
.modalias = "mmc_spi",
.max_speed_hz = 20000000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 1,
.controller_data = &mmc_spi_chip_info,
.mode = SPI_MODE_3,
},
#endif
};
/* SPI (0) */
static struct resource bfin_spi0_resource[] = {
[0] = {
.start = SPI0_REGBASE,
.end = SPI0_REGBASE + 0xFF,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = CH_SPI,
.end = CH_SPI,
.flags = IORESOURCE_DMA,
},
[2] = {
.start = IRQ_SPI,
.end = IRQ_SPI,
.flags = IORESOURCE_IRQ,
},
};
/* SPI controller data */
static struct bfin5xx_spi_master bfin_spi0_info = {
.num_chipselect = 8,
.enable_dma = 1, /* master has the ability to do dma transfer */
.pin_req = {P_SPI0_SCK, P_SPI0_MISO, P_SPI0_MOSI, 0},
};
static struct platform_device bfin_spi0_device = {
.name = "bfin-spi",
.id = 0, /* Bus number */
.num_resources = ARRAY_SIZE(bfin_spi0_resource),
.resource = bfin_spi0_resource,
.dev = {
.platform_data = &bfin_spi0_info, /* Passed to driver */
},
};
#endif /* spi master and devices */
#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE)
static struct platform_device rtc_device = {
.name = "rtc-bfin",
.id = -1,
};
#endif
#if defined(CONFIG_FB_HITACHI_TX09) || defined(CONFIG_FB_HITACHI_TX09_MODULE)
static struct platform_device hitachi_fb_device = {
.name = "hitachi-tx09",
};
#endif
#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE)
#include <linux/smc91x.h>
static struct smc91x_platdata smc91x_info = {
.flags = SMC91X_USE_16BIT | SMC91X_NOWAIT,
.leda = RPC_LED_100_10,
.ledb = RPC_LED_TX_RX,
};
static struct resource smc91x_resources[] = {
{
.start = 0x20200300,
.end = 0x20200300 + 16,
.flags = IORESOURCE_MEM,
}, {
.start = IRQ_PF14,
.end = IRQ_PF14,
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
},
};
static struct platform_device smc91x_device = {
.name = "smc91x",
.id = 0,
.num_resources = ARRAY_SIZE(smc91x_resources),
.resource = smc91x_resources,
.dev = {
.platform_data = &smc91x_info,
},
};
#endif
#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE)
static struct resource isp1362_hcd_resources[] = {
{
.start = 0x20308000,
.end = 0x20308000,
.flags = IORESOURCE_MEM,
}, {
.start = 0x20308004,
.end = 0x20308004,
.flags = IORESOURCE_MEM,
}, {
.start = IRQ_PG15,
.end = IRQ_PG15,
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWEDGE,
},
};
static struct isp1362_platform_data isp1362_priv = {
.sel15Kres = 1,
.clknotstop = 0,
.oc_enable = 0,
.int_act_high = 0,
.int_edge_triggered = 0,
.remote_wakeup_connected = 0,
.no_power_switching = 1,
.power_switching_mode = 0,
};
static struct platform_device isp1362_hcd_device = {
.name = "isp1362-hcd",
.id = 0,
.dev = {
.platform_data = &isp1362_priv,
},
.num_resources = ARRAY_SIZE(isp1362_hcd_resources),
.resource = isp1362_hcd_resources,
};
#endif
#if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE)
static struct resource net2272_bfin_resources[] = {
{
.start = 0x20200000,
.end = 0x20200000 + 0x100,
.flags = IORESOURCE_MEM,
}, {
.start = IRQ_PH14,
.end = IRQ_PH14,
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
},
};
static struct platform_device net2272_bfin_device = {
.name = "net2272",
.id = -1,
.num_resources = ARRAY_SIZE(net2272_bfin_resources),
.resource = net2272_bfin_resources,
};
#endif
#if defined(CONFIG_MTD_GPIO_ADDR) || defined(CONFIG_MTD_GPIO_ADDR_MODULE)
static struct mtd_partition cm_partitions[] = {
{
.name = "bootloader(nor)",
.size = 0x40000,
.offset = 0,
}, {
.name = "linux kernel(nor)",
.size = 0x100000,
.offset = MTDPART_OFS_APPEND,
}, {
.name = "file system(nor)",
.size = MTDPART_SIZ_FULL,
.offset = MTDPART_OFS_APPEND,
}
};
static struct physmap_flash_data cm_flash_data = {
.width = 2,
.parts = cm_partitions,
.nr_parts = ARRAY_SIZE(cm_partitions),
};
static unsigned cm_flash_gpios[] = { GPIO_PH0 };
static struct resource cm_flash_resource[] = {
{
.name = "cfi_probe",
.start = 0x20000000,
.end = 0x201fffff,
.flags = IORESOURCE_MEM,
}, {
.start = (unsigned long)cm_flash_gpios,
.end = ARRAY_SIZE(cm_flash_gpios),
.flags = IORESOURCE_IRQ,
}
};
static struct platform_device cm_flash_device = {
.name = "gpio-addr-flash",
.id = 0,
.dev = {
.platform_data = &cm_flash_data,
},
.num_resources = ARRAY_SIZE(cm_flash_resource),
.resource = cm_flash_resource,
};
#endif
#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE)
#ifdef CONFIG_SERIAL_BFIN_UART0
static struct resource bfin_uart0_resources[] = {
{
.start = UART0_THR,
.end = UART0_GCTL+2,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_UART0_TX,
.end = IRQ_UART0_TX,
.flags = IORESOURCE_IRQ,
},
{
.start = IRQ_UART0_RX,
.end = IRQ_UART0_RX,
.flags = IORESOURCE_IRQ,
},
{
.start = IRQ_UART0_ERROR,
.end = IRQ_UART0_ERROR,
.flags = IORESOURCE_IRQ,
},
{
.start = CH_UART0_TX,
.end = CH_UART0_TX,
.flags = IORESOURCE_DMA,
},
{
.start = CH_UART0_RX,
.end = CH_UART0_RX,
.flags = IORESOURCE_DMA,
},
};
static unsigned short bfin_uart0_peripherals[] = {
P_UART0_TX, P_UART0_RX, 0
};
static struct platform_device bfin_uart0_device = {
.name = "bfin-uart",
.id = 0,
.num_resources = ARRAY_SIZE(bfin_uart0_resources),
.resource = bfin_uart0_resources,
.dev = {
.platform_data = &bfin_uart0_peripherals, /* Passed to driver */
},
};
#endif
#ifdef CONFIG_SERIAL_BFIN_UART1
static struct resource bfin_uart1_resources[] = {
{
.start = UART1_THR,
.end = UART1_GCTL+2,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_UART1_TX,
.end = IRQ_UART1_TX,
.flags = IORESOURCE_IRQ,
},
{
.start = IRQ_UART1_RX,
.end = IRQ_UART1_RX,
.flags = IORESOURCE_IRQ,
},
{
.start = IRQ_UART1_ERROR,
.end = IRQ_UART1_ERROR,
.flags = IORESOURCE_IRQ,
},
{
.start = CH_UART1_TX,
.end = CH_UART1_TX,
.flags = IORESOURCE_DMA,
},
{
.start = CH_UART1_RX,
.end = CH_UART1_RX,
.flags = IORESOURCE_DMA,
},
};
static unsigned short bfin_uart1_peripherals[] = {
P_UART1_TX, P_UART1_RX, 0
};
static struct platform_device bfin_uart1_device = {
.name = "bfin-uart",
.id = 1,
.num_resources = ARRAY_SIZE(bfin_uart1_resources),
.resource = bfin_uart1_resources,
.dev = {
.platform_data = &bfin_uart1_peripherals, /* Passed to driver */
},
};
#endif
#endif
#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
#ifdef CONFIG_BFIN_SIR0
static struct resource bfin_sir0_resources[] = {
{
.start = 0xFFC00400,
.end = 0xFFC004FF,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_UART0_RX,
.end = IRQ_UART0_RX+1,
.flags = IORESOURCE_IRQ,
},
{
.start = CH_UART0_RX,
.end = CH_UART0_RX+1,
.flags = IORESOURCE_DMA,
},
};
static struct platform_device bfin_sir0_device = {
.name = "bfin_sir",
.id = 0,
.num_resources = ARRAY_SIZE(bfin_sir0_resources),
.resource = bfin_sir0_resources,
};
#endif
#ifdef CONFIG_BFIN_SIR1
static struct resource bfin_sir1_resources[] = {
{
.start = 0xFFC02000,
.end = 0xFFC020FF,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_UART1_RX,
.end = IRQ_UART1_RX+1,
.flags = IORESOURCE_IRQ,
},
{
.start = CH_UART1_RX,
.end = CH_UART1_RX+1,
.flags = IORESOURCE_DMA,
},
};
static struct platform_device bfin_sir1_device = {
.name = "bfin_sir",
.id = 1,
.num_resources = ARRAY_SIZE(bfin_sir1_resources),
.resource = bfin_sir1_resources,
};
#endif
#endif
#if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE)
static const u16 bfin_twi0_pins[] = {P_TWI0_SCL, P_TWI0_SDA, 0};
static struct resource bfin_twi0_resource[] = {
[0] = {
.start = TWI0_REGBASE,
.end = TWI0_REGBASE,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = IRQ_TWI,
.end = IRQ_TWI,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device i2c_bfin_twi_device = {
.name = "i2c-bfin-twi",
.id = 0,
.num_resources = ARRAY_SIZE(bfin_twi0_resource),
.resource = bfin_twi0_resource,
.dev = {
.platform_data = &bfin_twi0_pins,
},
};
#endif
#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE)
#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
static struct resource bfin_sport0_uart_resources[] = {
{
.start = SPORT0_TCR1,
.end = SPORT0_MRCS3+4,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_SPORT0_RX,
.end = IRQ_SPORT0_RX+1,
.flags = IORESOURCE_IRQ,
},
{
.start = IRQ_SPORT0_ERROR,
.end = IRQ_SPORT0_ERROR,
.flags = IORESOURCE_IRQ,
},
};
static unsigned short bfin_sport0_peripherals[] = {
P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
P_SPORT0_DRPRI, P_SPORT0_RSCLK, 0
};
static struct platform_device bfin_sport0_uart_device = {
.name = "bfin-sport-uart",
.id = 0,
.num_resources = ARRAY_SIZE(bfin_sport0_uart_resources),
.resource = bfin_sport0_uart_resources,
.dev = {
.platform_data = &bfin_sport0_peripherals, /* Passed to driver */
},
};
#endif
#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART
static struct resource bfin_sport1_uart_resources[] = {
{
.start = SPORT1_TCR1,
.end = SPORT1_MRCS3+4,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_SPORT1_RX,
.end = IRQ_SPORT1_RX+1,
.flags = IORESOURCE_IRQ,
},
{
.start = IRQ_SPORT1_ERROR,
.end = IRQ_SPORT1_ERROR,
.flags = IORESOURCE_IRQ,
},
};
static unsigned short bfin_sport1_peripherals[] = {
P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS,
P_SPORT1_DRPRI, P_SPORT1_RSCLK, 0
};
static struct platform_device bfin_sport1_uart_device = {
.name = "bfin-sport-uart",
.id = 1,
.num_resources = ARRAY_SIZE(bfin_sport1_uart_resources),
.resource = bfin_sport1_uart_resources,
.dev = {
.platform_data = &bfin_sport1_peripherals, /* Passed to driver */
},
};
#endif
#endif
#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
#include <linux/bfin_mac.h>
static const unsigned short bfin_mac_peripherals[] = P_MII0;
static struct bfin_phydev_platform_data bfin_phydev_data[] = {
{
.addr = 1,
.irq = IRQ_MAC_PHYINT,
},
};
static struct bfin_mii_bus_platform_data bfin_mii_bus_data = {
.phydev_number = 1,
.phydev_data = bfin_phydev_data,
.phy_mode = PHY_INTERFACE_MODE_MII,
.mac_peripherals = bfin_mac_peripherals,
};
static struct platform_device bfin_mii_bus = {
.name = "bfin_mii_bus",
.dev = {
.platform_data = &bfin_mii_bus_data,
}
};
static struct platform_device bfin_mac_device = {
.name = "bfin_mac",
.dev = {
.platform_data = &bfin_mii_bus,
}
};
#endif
#if defined(CONFIG_PATA_PLATFORM) || defined(CONFIG_PATA_PLATFORM_MODULE)
#define PATA_INT IRQ_PF14
static struct pata_platform_info bfin_pata_platform_data = {
.ioport_shift = 2,
.irq_type = IRQF_TRIGGER_HIGH,
};
static struct resource bfin_pata_resources[] = {
{
.start = 0x2030C000,
.end = 0x2030C01F,
.flags = IORESOURCE_MEM,
},
{
.start = 0x2030D018,
.end = 0x2030D01B,
.flags = IORESOURCE_MEM,
},
{
.start = PATA_INT,
.end = PATA_INT,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device bfin_pata_device = {
.name = "pata_platform",
.id = -1,
.num_resources = ARRAY_SIZE(bfin_pata_resources),
.resource = bfin_pata_resources,
.dev = {
.platform_data = &bfin_pata_platform_data,
}
};
#endif
static const unsigned int cclk_vlev_datasheet[] =
{
VRPAIR(VLEV_085, 250000000),
VRPAIR(VLEV_090, 376000000),
VRPAIR(VLEV_095, 426000000),
VRPAIR(VLEV_100, 426000000),
VRPAIR(VLEV_105, 476000000),
VRPAIR(VLEV_110, 476000000),
VRPAIR(VLEV_115, 476000000),
VRPAIR(VLEV_120, 500000000),
VRPAIR(VLEV_125, 533000000),
VRPAIR(VLEV_130, 600000000),
};
static struct bfin_dpmc_platform_data bfin_dmpc_vreg_data = {
.tuple_tab = cclk_vlev_datasheet,
.tabsize = ARRAY_SIZE(cclk_vlev_datasheet),
.vr_settling_time = 25 /* us */,
};
static struct platform_device bfin_dpmc = {
.name = "bfin dpmc",
.dev = {
.platform_data = &bfin_dmpc_vreg_data,
},
};
static struct platform_device *cm_bf537u_devices[] __initdata = {
&bfin_dpmc,
#if defined(CONFIG_FB_HITACHI_TX09) || defined(CONFIG_FB_HITACHI_TX09_MODULE)
&hitachi_fb_device,
#endif
#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE)
&rtc_device,
#endif
#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE)
#ifdef CONFIG_SERIAL_BFIN_UART0
&bfin_uart0_device,
#endif
#ifdef CONFIG_SERIAL_BFIN_UART1
&bfin_uart1_device,
#endif
#endif
#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
#ifdef CONFIG_BFIN_SIR0
&bfin_sir0_device,
#endif
#ifdef CONFIG_BFIN_SIR1
&bfin_sir1_device,
#endif
#endif
#if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE)
&i2c_bfin_twi_device,
#endif
#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE)
#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
&bfin_sport0_uart_device,
#endif
#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART
&bfin_sport1_uart_device,
#endif
#endif
#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE)
&isp1362_hcd_device,
#endif
#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE)
&smc91x_device,
#endif
#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
&bfin_mii_bus,
&bfin_mac_device,
#endif
#if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE)
&net2272_bfin_device,
#endif
#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
&bfin_spi0_device,
#endif
#if defined(CONFIG_PATA_PLATFORM) || defined(CONFIG_PATA_PLATFORM_MODULE)
&bfin_pata_device,
#endif
#if defined(CONFIG_MTD_GPIO_ADDR) || defined(CONFIG_MTD_GPIO_ADDR_MODULE)
&cm_flash_device,
#endif
};
static int __init net2272_init(void)
{
#if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE)
int ret;
ret = gpio_request(GPIO_PH15, driver_name);
if (ret)
return ret;
ret = gpio_request(GPIO_PH13, "net2272");
if (ret) {
gpio_free(GPIO_PH15);
return ret;
}
/* Set PH15 Low make /AMS2 work properly */
gpio_direction_output(GPIO_PH15, 0);
/* enable CLKBUF output */
bfin_write_VR_CTL(bfin_read_VR_CTL() | CLKBUFOE);
/* Reset the USB chip */
gpio_direction_output(GPIO_PH13, 0);
mdelay(2);
gpio_set_value(GPIO_PH13, 1);
#endif
return 0;
}
static int __init cm_bf537u_init(void)
{
printk(KERN_INFO "%s(): registering device resources\n", __func__);
platform_add_devices(cm_bf537u_devices, ARRAY_SIZE(cm_bf537u_devices));
#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info));
#endif
#if defined(CONFIG_PATA_PLATFORM) || defined(CONFIG_PATA_PLATFORM_MODULE)
irq_set_status_flags(PATA_INT, IRQ_NOAUTOEN);
#endif
if (net2272_init())
pr_warning("unable to configure net2272; it probably won't work\n");
return 0;
}
arch_initcall(cm_bf537u_init);
static struct platform_device *cm_bf537u_early_devices[] __initdata = {
#if defined(CONFIG_SERIAL_BFIN_CONSOLE) || defined(CONFIG_EARLY_PRINTK)
#ifdef CONFIG_SERIAL_BFIN_UART0
&bfin_uart0_device,
#endif
#ifdef CONFIG_SERIAL_BFIN_UART1
&bfin_uart1_device,
#endif
#endif
#if defined(CONFIG_SERIAL_BFIN_SPORT_CONSOLE)
#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
&bfin_sport0_uart_device,
#endif
#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART
&bfin_sport1_uart_device,
#endif
#endif
};
void __init native_machine_early_platform_add_devices(void)
{
printk(KERN_INFO "register early platform devices\n");
early_platform_add_devices(cm_bf537u_early_devices,
ARRAY_SIZE(cm_bf537u_early_devices));
}
int bfin_get_ether_addr(char *addr)
{
return 1;
}
EXPORT_SYMBOL(bfin_get_ether_addr);
| gpl-2.0 |
Tasssadar/kernel_nexus | arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c | 2732 | 5818 | /*
* mpc7448_hpc2.c
*
* Board setup routines for the Freescale mpc7448hpc2(taiga) platform
*
* Author: Jacob Pan
* jacob.pan@freescale.com
* Author: Xianghua Xiao
* x.xiao@freescale.com
* Maintainer: Roy Zang <tie-fei.zang@freescale.com>
* Add Flat Device Tree support fot mpc7448hpc2 board
*
* Copyright 2004-2006 Freescale Semiconductor, Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/stddef.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/kdev_t.h>
#include <linux/console.h>
#include <linux/delay.h>
#include <linux/irq.h>
#include <linux/seq_file.h>
#include <linux/root_dev.h>
#include <linux/serial.h>
#include <linux/tty.h>
#include <linux/serial_core.h>
#include <asm/system.h>
#include <asm/time.h>
#include <asm/machdep.h>
#include <asm/prom.h>
#include <asm/udbg.h>
#include <asm/tsi108.h>
#include <asm/pci-bridge.h>
#include <asm/reg.h>
#include <mm/mmu_decl.h>
#include <asm/tsi108_pci.h>
#include <asm/tsi108_irq.h>
#include <asm/mpic.h>
#undef DEBUG
#ifdef DEBUG
#define DBG(fmt...) do { printk(fmt); } while(0)
#else
#define DBG(fmt...) do { } while(0)
#endif
#define MPC7448HPC2_PCI_CFG_PHYS 0xfb000000
int mpc7448_hpc2_exclude_device(struct pci_controller *hose,
u_char bus, u_char devfn)
{
if (bus == 0 && PCI_SLOT(devfn) == 0)
return PCIBIOS_DEVICE_NOT_FOUND;
else
return PCIBIOS_SUCCESSFUL;
}
static void __init mpc7448_hpc2_setup_arch(void)
{
struct device_node *np;
if (ppc_md.progress)
ppc_md.progress("mpc7448_hpc2_setup_arch():set_bridge", 0);
tsi108_csr_vir_base = get_vir_csrbase();
/* setup PCI host bridge */
#ifdef CONFIG_PCI
for_each_compatible_node(np, "pci", "tsi108-pci")
tsi108_setup_pci(np, MPC7448HPC2_PCI_CFG_PHYS, 0);
ppc_md.pci_exclude_device = mpc7448_hpc2_exclude_device;
if (ppc_md.progress)
ppc_md.progress("tsi108: resources set", 0x100);
#endif
printk(KERN_INFO "MPC7448HPC2 (TAIGA) Platform\n");
printk(KERN_INFO
"Jointly ported by Freescale and Tundra Semiconductor\n");
printk(KERN_INFO
"Enabling L2 cache then enabling the HID0 prefetch engine.\n");
}
/*
* Interrupt setup and service. Interrupts on the mpc7448_hpc2 come
* from the four external INT pins, PCI interrupts are routed via
* PCI interrupt control registers, it generates internal IRQ23
*
* Interrupt routing on the Taiga Board:
* TSI108:PB_INT[0] -> CPU0:INT#
* TSI108:PB_INT[1] -> CPU0:MCP#
* TSI108:PB_INT[2] -> N/C
* TSI108:PB_INT[3] -> N/C
*/
static void __init mpc7448_hpc2_init_IRQ(void)
{
struct mpic *mpic;
phys_addr_t mpic_paddr = 0;
struct device_node *tsi_pic;
#ifdef CONFIG_PCI
unsigned int cascade_pci_irq;
struct device_node *tsi_pci;
struct device_node *cascade_node = NULL;
#endif
tsi_pic = of_find_node_by_type(NULL, "open-pic");
if (tsi_pic) {
unsigned int size;
const void *prop = of_get_property(tsi_pic, "reg", &size);
mpic_paddr = of_translate_address(tsi_pic, prop);
}
if (mpic_paddr == 0) {
printk("%s: No tsi108 PIC found !\n", __func__);
return;
}
DBG("%s: tsi108 pic phys_addr = 0x%x\n", __func__,
(u32) mpic_paddr);
mpic = mpic_alloc(tsi_pic, mpic_paddr,
MPIC_PRIMARY | MPIC_BIG_ENDIAN | MPIC_WANTS_RESET |
MPIC_SPV_EOI | MPIC_NO_PTHROU_DIS | MPIC_REGSET_TSI108,
24,
NR_IRQS-4, /* num_sources used */
"Tsi108_PIC");
BUG_ON(mpic == NULL);
mpic_assign_isu(mpic, 0, mpic_paddr + 0x100);
mpic_init(mpic);
#ifdef CONFIG_PCI
tsi_pci = of_find_node_by_type(NULL, "pci");
if (tsi_pci == NULL) {
printk("%s: No tsi108 pci node found !\n", __func__);
return;
}
cascade_node = of_find_node_by_type(NULL, "pic-router");
if (cascade_node == NULL) {
printk("%s: No tsi108 pci cascade node found !\n", __func__);
return;
}
cascade_pci_irq = irq_of_parse_and_map(tsi_pci, 0);
DBG("%s: tsi108 cascade_pci_irq = 0x%x\n", __func__,
(u32) cascade_pci_irq);
tsi108_pci_int_init(cascade_node);
irq_set_handler_data(cascade_pci_irq, mpic);
irq_set_chained_handler(cascade_pci_irq, tsi108_irq_cascade);
#endif
/* Configure MPIC outputs to CPU0 */
tsi108_write_reg(TSI108_MPIC_OFFSET + 0x30c, 0);
of_node_put(tsi_pic);
}
void mpc7448_hpc2_show_cpuinfo(struct seq_file *m)
{
seq_printf(m, "vendor\t\t: Freescale Semiconductor\n");
}
void mpc7448_hpc2_restart(char *cmd)
{
local_irq_disable();
/* Set exception prefix high - to the firmware */
_nmask_and_or_msr(0, MSR_IP);
for (;;) ; /* Spin until reset happens */
}
void mpc7448_hpc2_power_off(void)
{
local_irq_disable();
for (;;) ; /* No way to shut power off with software */
}
void mpc7448_hpc2_halt(void)
{
mpc7448_hpc2_power_off();
}
/*
* Called very early, device-tree isn't unflattened
*/
static int __init mpc7448_hpc2_probe(void)
{
unsigned long root = of_get_flat_dt_root();
if (!of_flat_dt_is_compatible(root, "mpc74xx"))
return 0;
return 1;
}
static int mpc7448_machine_check_exception(struct pt_regs *regs)
{
const struct exception_table_entry *entry;
/* Are we prepared to handle this fault */
if ((entry = search_exception_tables(regs->nip)) != NULL) {
tsi108_clear_pci_cfg_error();
regs->msr |= MSR_RI;
regs->nip = entry->fixup;
return 1;
}
return 0;
}
define_machine(mpc7448_hpc2){
.name = "MPC7448 HPC2",
.probe = mpc7448_hpc2_probe,
.setup_arch = mpc7448_hpc2_setup_arch,
.init_IRQ = mpc7448_hpc2_init_IRQ,
.show_cpuinfo = mpc7448_hpc2_show_cpuinfo,
.get_irq = mpic_get_irq,
.restart = mpc7448_hpc2_restart,
.calibrate_decr = generic_calibrate_decr,
.machine_check_exception= mpc7448_machine_check_exception,
.progress = udbg_progress,
};
| gpl-2.0 |
Rerito/linux-ubi | arch/blackfin/mach-bf537/boards/dnp5370.c | 2732 | 9890 | /*
* This is the configuration for SSV Dil/NetPC DNP/5370 board.
*
* DIL module: http://www.dilnetpc.com/dnp0086.htm
* SK28 (starter kit): http://www.dilnetpc.com/dnp0088.htm
*
* Copyright 2010 3ality Digital Systems
* Copyright 2005 National ICT Australia (NICTA)
* Copyright 2004-2006 Analog Devices Inc.
*
* Licensed under the GPL-2 or later.
*/
#include <linux/device.h>
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/plat-ram.h>
#include <linux/mtd/physmap.h>
#include <linux/spi/spi.h>
#include <linux/spi/flash.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/i2c.h>
#include <linux/spi/mmc_spi.h>
#include <linux/phy.h>
#include <asm/dma.h>
#include <asm/bfin5xx_spi.h>
#include <asm/reboot.h>
#include <asm/portmux.h>
#include <asm/dpmc.h>
/*
* Name the Board for the /proc/cpuinfo
*/
const char bfin_board_name[] = "DNP/5370";
#define FLASH_MAC 0x202f0000
#define CONFIG_MTD_PHYSMAP_LEN 0x300000
#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE)
static struct platform_device rtc_device = {
.name = "rtc-bfin",
.id = -1,
};
#endif
#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
#include <linux/bfin_mac.h>
static const unsigned short bfin_mac_peripherals[] = P_RMII0;
static struct bfin_phydev_platform_data bfin_phydev_data[] = {
{
.addr = 1,
.irq = PHY_POLL, /* IRQ_MAC_PHYINT */
},
};
static struct bfin_mii_bus_platform_data bfin_mii_bus_data = {
.phydev_number = 1,
.phydev_data = bfin_phydev_data,
.phy_mode = PHY_INTERFACE_MODE_RMII,
.mac_peripherals = bfin_mac_peripherals,
};
static struct platform_device bfin_mii_bus = {
.name = "bfin_mii_bus",
.dev = {
.platform_data = &bfin_mii_bus_data,
}
};
static struct platform_device bfin_mac_device = {
.name = "bfin_mac",
.dev = {
.platform_data = &bfin_mii_bus,
}
};
#endif
#if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE)
static struct mtd_partition asmb_flash_partitions[] = {
{
.name = "bootloader(nor)",
.size = 0x30000,
.offset = 0,
}, {
.name = "linux kernel and rootfs(nor)",
.size = 0x300000 - 0x30000 - 0x10000,
.offset = MTDPART_OFS_APPEND,
}, {
.name = "MAC address(nor)",
.size = 0x10000,
.offset = MTDPART_OFS_APPEND,
.mask_flags = MTD_WRITEABLE,
}
};
static struct physmap_flash_data asmb_flash_data = {
.width = 1,
.parts = asmb_flash_partitions,
.nr_parts = ARRAY_SIZE(asmb_flash_partitions),
};
static struct resource asmb_flash_resource = {
.start = 0x20000000,
.end = 0x202fffff,
.flags = IORESOURCE_MEM,
};
/* 4 MB NOR flash attached to async memory banks 0-2,
* therefore only 3 MB visible.
*/
static struct platform_device asmb_flash_device = {
.name = "physmap-flash",
.id = 0,
.dev = {
.platform_data = &asmb_flash_data,
},
.num_resources = 1,
.resource = &asmb_flash_resource,
};
#endif
#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
static struct bfin5xx_spi_chip mmc_spi_chip_info = {
.enable_dma = 0, /* use no dma transfer with this chip*/
};
#endif
#if defined(CONFIG_MTD_DATAFLASH) || defined(CONFIG_MTD_DATAFLASH_MODULE)
/* This mapping is for at45db642 it has 1056 page size,
* partition size and offset should be page aligned
*/
static struct mtd_partition bfin_spi_dataflash_partitions[] = {
{
.name = "JFFS2 dataflash(nor)",
#ifdef CONFIG_MTD_PAGESIZE_1024
.offset = 0x40000,
.size = 0x7C0000,
#else
.offset = 0x0,
.size = 0x840000,
#endif
}
};
static struct flash_platform_data bfin_spi_dataflash_data = {
.name = "mtd_dataflash",
.parts = bfin_spi_dataflash_partitions,
.nr_parts = ARRAY_SIZE(bfin_spi_dataflash_partitions),
.type = "mtd_dataflash",
};
static struct bfin5xx_spi_chip spi_dataflash_chip_info = {
.enable_dma = 0, /* use no dma transfer with this chip*/
};
#endif
static struct spi_board_info bfin_spi_board_info[] __initdata = {
/* SD/MMC card reader at SPI bus */
#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
{
.modalias = "mmc_spi",
.max_speed_hz = 20000000,
.bus_num = 0,
.chip_select = 1,
.controller_data = &mmc_spi_chip_info,
.mode = SPI_MODE_3,
},
#endif
/* 8 Megabyte Atmel NOR flash chip at SPI bus */
#if defined(CONFIG_MTD_DATAFLASH) || defined(CONFIG_MTD_DATAFLASH_MODULE)
{
.modalias = "mtd_dataflash",
.max_speed_hz = 16700000,
.bus_num = 0,
.chip_select = 2,
.platform_data = &bfin_spi_dataflash_data,
.controller_data = &spi_dataflash_chip_info,
.mode = SPI_MODE_3, /* SPI_CPHA and SPI_CPOL */
},
#endif
};
/* SPI controller data */
/* SPI (0) */
static struct resource bfin_spi0_resource[] = {
[0] = {
.start = SPI0_REGBASE,
.end = SPI0_REGBASE + 0xFF,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = CH_SPI,
.end = CH_SPI,
.flags = IORESOURCE_DMA,
},
[2] = {
.start = IRQ_SPI,
.end = IRQ_SPI,
.flags = IORESOURCE_IRQ,
},
};
static struct bfin5xx_spi_master spi_bfin_master_info = {
.num_chipselect = 8,
.enable_dma = 1, /* master has the ability to do dma transfer */
.pin_req = {P_SPI0_SCK, P_SPI0_MISO, P_SPI0_MOSI, 0},
};
static struct platform_device spi_bfin_master_device = {
.name = "bfin-spi",
.id = 0, /* Bus number */
.num_resources = ARRAY_SIZE(bfin_spi0_resource),
.resource = bfin_spi0_resource,
.dev = {
.platform_data = &spi_bfin_master_info, /* Passed to driver */
},
};
#endif
#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE)
#ifdef CONFIG_SERIAL_BFIN_UART0
static struct resource bfin_uart0_resources[] = {
{
.start = UART0_THR,
.end = UART0_GCTL+2,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_UART0_TX,
.end = IRQ_UART0_TX,
.flags = IORESOURCE_IRQ,
},
{
.start = IRQ_UART0_RX,
.end = IRQ_UART0_RX,
.flags = IORESOURCE_IRQ,
},
{
.start = IRQ_UART0_ERROR,
.end = IRQ_UART0_ERROR,
.flags = IORESOURCE_IRQ,
},
{
.start = CH_UART0_TX,
.end = CH_UART0_TX,
.flags = IORESOURCE_DMA,
},
{
.start = CH_UART0_RX,
.end = CH_UART0_RX,
.flags = IORESOURCE_DMA,
},
};
static unsigned short bfin_uart0_peripherals[] = {
P_UART0_TX, P_UART0_RX, 0
};
static struct platform_device bfin_uart0_device = {
.name = "bfin-uart",
.id = 0,
.num_resources = ARRAY_SIZE(bfin_uart0_resources),
.resource = bfin_uart0_resources,
.dev = {
.platform_data = &bfin_uart0_peripherals, /* Passed to driver */
},
};
#endif
#ifdef CONFIG_SERIAL_BFIN_UART1
static struct resource bfin_uart1_resources[] = {
{
.start = UART1_THR,
.end = UART1_GCTL+2,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_UART1_TX,
.end = IRQ_UART1_TX,
.flags = IORESOURCE_IRQ,
},
{
.start = IRQ_UART1_RX,
.end = IRQ_UART1_RX,
.flags = IORESOURCE_IRQ,
},
{
.start = IRQ_UART1_ERROR,
.end = IRQ_UART1_ERROR,
.flags = IORESOURCE_IRQ,
},
{
.start = CH_UART1_TX,
.end = CH_UART1_TX,
.flags = IORESOURCE_DMA,
},
{
.start = CH_UART1_RX,
.end = CH_UART1_RX,
.flags = IORESOURCE_DMA,
},
};
static unsigned short bfin_uart1_peripherals[] = {
P_UART1_TX, P_UART1_RX, 0
};
static struct platform_device bfin_uart1_device = {
.name = "bfin-uart",
.id = 1,
.num_resources = ARRAY_SIZE(bfin_uart1_resources),
.resource = bfin_uart1_resources,
.dev = {
.platform_data = &bfin_uart1_peripherals, /* Passed to driver */
},
};
#endif
#endif
#if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE)
static const u16 bfin_twi0_pins[] = {P_TWI0_SCL, P_TWI0_SDA, 0};
static struct resource bfin_twi0_resource[] = {
[0] = {
.start = TWI0_REGBASE,
.end = TWI0_REGBASE + 0xff,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = IRQ_TWI,
.end = IRQ_TWI,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device i2c_bfin_twi_device = {
.name = "i2c-bfin-twi",
.id = 0,
.num_resources = ARRAY_SIZE(bfin_twi0_resource),
.resource = bfin_twi0_resource,
.dev = {
.platform_data = &bfin_twi0_pins,
},
};
#endif
static struct platform_device *dnp5370_devices[] __initdata = {
#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE)
#ifdef CONFIG_SERIAL_BFIN_UART0
&bfin_uart0_device,
#endif
#ifdef CONFIG_SERIAL_BFIN_UART1
&bfin_uart1_device,
#endif
#endif
#if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE)
&asmb_flash_device,
#endif
#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
&bfin_mii_bus,
&bfin_mac_device,
#endif
#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
&spi_bfin_master_device,
#endif
#if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE)
&i2c_bfin_twi_device,
#endif
#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE)
&rtc_device,
#endif
};
static int __init dnp5370_init(void)
{
printk(KERN_INFO "DNP/5370: registering device resources\n");
platform_add_devices(dnp5370_devices, ARRAY_SIZE(dnp5370_devices));
printk(KERN_INFO "DNP/5370: registering %zu SPI slave devices\n",
ARRAY_SIZE(bfin_spi_board_info));
spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info));
printk(KERN_INFO "DNP/5370: MAC %pM\n", (void *)FLASH_MAC);
return 0;
}
arch_initcall(dnp5370_init);
/*
* Currently the MAC address is saved in Flash by U-Boot
*/
int bfin_get_ether_addr(char *addr)
{
*(u32 *)(&(addr[0])) = bfin_read32(FLASH_MAC);
*(u16 *)(&(addr[4])) = bfin_read16(FLASH_MAC + 4);
return 0;
}
EXPORT_SYMBOL(bfin_get_ether_addr);
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.